66 lines
2.1 KiB
Python
66 lines
2.1 KiB
Python
import os
|
|
import time
|
|
import urllib.parse
|
|
import requests
|
|
from requests.exceptions import RequestException
|
|
|
|
|
|
def main():
|
|
if not os.path.exists("html_files"):
|
|
os.makedirs("html_files")
|
|
|
|
with open("missing_urls.txt", "r", encoding="utf-8") as f:
|
|
urls = [line.strip() for line in f if line.strip()]
|
|
|
|
proxies = {"http": "http://127.0.0.1:10808", "https": "http://127.0.0.1:10808"}
|
|
|
|
headers = {
|
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
|
}
|
|
|
|
for url in urls:
|
|
# Decode the URL to get the original characters
|
|
decoded_url = urllib.parse.unquote(url)
|
|
# Extract the volume name, e.g., 卷001之1
|
|
volume_name = decoded_url.split("/")[-1]
|
|
|
|
filename = f"html_files/{volume_name}.html"
|
|
|
|
if os.path.exists(filename):
|
|
print(f"Skipping {filename}, already exists.")
|
|
continue
|
|
|
|
print(f"Downloading {volume_name} from {url}...")
|
|
|
|
success = False
|
|
retries = 3
|
|
while not success and retries > 0:
|
|
try:
|
|
response = requests.get(
|
|
url, headers=headers, proxies=proxies, timeout=15
|
|
)
|
|
if response.status_code == 200:
|
|
with open(filename, "w", encoding="utf-8") as out_f:
|
|
out_f.write(response.text)
|
|
success = True
|
|
print(f"Successfully downloaded {filename}")
|
|
else:
|
|
print(
|
|
f"HTTP Error {response.status_code} for {url}. Retries left: {retries - 1}"
|
|
)
|
|
except RequestException as e:
|
|
print(f"Error downloading {url}: {e}. Retries left: {retries - 1}")
|
|
|
|
if not success:
|
|
retries -= 1
|
|
time.sleep(2) # Delay before retry
|
|
|
|
if not success:
|
|
print(f"Failed to download {url} after all retries.")
|
|
|
|
time.sleep(0.5) # Small delay between requests
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|