帮我把一下代码设置一个合理请求头,并加入一个延时import requests import os from bs4 import BeautifulSoup class NovelDownloader: def __init__(self, root_url): self.root_url = root_url self.book_list = [] self.chapter_list = [] def get_url(self, url): while True: try: res = requests.get(url) if res.status_code == 200: print("页面获取成功!") return res.text else: print("页面返回异常!", res.status_code) except: print("页面获取错误!") def get_book_list(self): res = self.get_url(self.root_url) html = BeautifulSoup(res, "html.parser") a_list = html.find_all("a", {"class": "name"}) for a in a_list: self.book_list.append(a["href"]) self.book_list = [self.root_url + i for i in self.book_list] self.book_list.remove('http://www.biquge5200.cc/') def get_chapter_list(self, url): res = self.get_url(url) html = BeautifulSoup(res, "html.parser") a_list = html.find_all("a", {"class": "chapter"}) for a in a_list: self.chapter_list.append((a["href"], a.text.replace("\n", ""))) def get_content(self, chapter): url = self.root_url + chapter[0] print(url) book_name = chapter[0].split("/")[1] print(book_name) if not os.path.exists(book_name): os.mkdir(book_name) res = self.get_url(url) html = BeautifulSoup(res, "html.parser") content = html.find("div", {"id": "content"}).text print(content) path = os.path.join(book_name, chapter[1]) with open(path, "w", encoding="utf8") as f: f.write(content) def main(self): self.get_book_list() for book in self.book_list: self.get_chapter_list(book) for chapter in self.chapter_list: self.get_content(chapter) if __name__ == '__main__': root_url = "http://www.biquge5200.cc/" nd = NovelDownloader(root_url) nd.main()
时间: 2024-04-28 13:25:20 浏览: 146
好的,我可以帮你添加一个合理的请求头和延时。在 `get_url` 方法中,你可以添加一个请求头字典,来模拟浏览器发送请求。同时,在每次请求后添加一个时间延时,避免频繁请求被网站屏蔽。
修改后的代码如下所示:
```python
import requests
import os
import time
from bs4 import BeautifulSoup
class NovelDownloader:
def __init__(self, root_url):
self.root_url = root_url
self.book_list = []
self.chapter_list = []
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
} # 添加请求头
def get_url(self, url):
while True:
try:
res = requests.get(url, headers=self.headers)
if res.status_code == 200:
print("页面获取成功!")
return res.text
else:
print("页面返回异常!", res.status_code)
except:
print("页面获取错误!")
time.sleep(1) # 添加延时
def get_book_list(self):
res = self.get_url(self.root_url)
html = BeautifulSoup(res, "html.parser")
a_list = html.find_all("a", {"class": "name"})
for a in a_list:
self.book_list.append(a["href"])
self.book_list = [self.root_url + i for i in self.book_list]
self.book_list.remove('http://www.biquge5200.cc/')
def get_chapter_list(self, url):
res = self.get_url(url)
html = BeautifulSoup(res, "html.parser")
a_list = html.find_all("a", {"class": "chapter"})
for a in a_list:
self.chapter_list.append((a["href"], a.text.replace("\n", "")))
def get_content(self, chapter):
url = self.root_url + chapter[0]
print(url)
book_name = chapter[0].split("/")[1]
print(book_name)
if not os.path.exists(book_name):
os.mkdir(book_name)
res = self.get_url(url)
html = BeautifulSoup(res, "html.parser")
content = html.find("div", {"id": "content"}).text
print(content)
path = os.path.join(book_name, chapter[1])
with open(path, "w", encoding="utf8") as f:
f.write(content)
def main(self):
self.get_book_list()
for book in self.book_list:
self.get_chapter_list(book)
for chapter in self.chapter_list:
self.get_content(chapter)
if __name__ == '__main__':
root_url = "http://www.biquge5200.cc/"
nd = NovelDownloader(root_url)
nd.main()
```
这样就可以更好地模拟浏览器发送请求,避免被网站屏蔽。另外,可以根据需要调整延时时间,以达到更好的效果。
阅读全文
相关推荐

















