import os import time import urllib.parse import requests from requests.exceptions import RequestException import concurrent.futures def download_url(url): proxies = {"http": "http://127.0.0.1:10808", "https": "http://127.0.0.1:10808"} headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" } decoded_url = urllib.parse.unquote(url) volume_name = decoded_url.split("/")[-1] filename = f"html_files/{volume_name}.html" if os.path.exists(filename): # Already exists return True, filename success = False retries = 3 while not success and retries > 0: try: response = requests.get(url, headers=headers, proxies=proxies, timeout=15) if response.status_code == 200: with open(filename, "w", encoding="utf-8") as out_f: out_f.write(response.text) success = True print(f"Successfully downloaded {filename}") return True, filename else: print(f"HTTP Error {response.status_code} for {url}. Retries left: {retries - 1}") except RequestException as e: pass if not success: retries -= 1 time.sleep(2) if not success: print(f"Failed to download {url} after all retries.") return False, filename return False, filename def main(): if not os.path.exists("html_files"): os.makedirs("html_files") with open("missing_urls.txt", "r", encoding="utf-8") as f: urls = [line.strip() for line in f if line.strip()] # Use ThreadPoolExecutor to download multiple files concurrently with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor: futures = [executor.submit(download_url, url) for url in urls] for future in concurrent.futures.as_completed(futures): try: future.result() except Exception as e: print(f"Exception during download: {e}") if __name__ == "__main__": main()