import requests import os from bs4 import BeautifulSoup class book_spider(): def __init__(self,root_url): self.root_url=root_url self.book_list=[] #一级页面中获取的数据(二级页面地址)存放于此列表 self.chapter_list=[] #二级页面中获取的数据(三级页面地址和章节名)存放于此列表 def get_url(url): while True: try: res=requests.get(url) if res.status_code==200: res.encoding =res.apparent_encoding print("页面获取成功") return res.text else: print("页面返回异常",res.status_code) except: print("页面获取错误") def get_book_list(self.url): res = self.get_url(url) html = BeautifulSoup(res,"html.parser") a_list = html.find_all("a",{"class":"name"}) for a in a_list: self.book_list.append(a["href"]) self.book_list = [self.root_url+i for i in self.bbok_list] self.book_list.remove('http://10.1.88.252:7000/庆余年') print(book_list) def get_chapter_list(self,url): res = self.get_url(url) html = BeautifulSoup(res,"html.parser") a_list = html.find_all("a",{"class":"chapter"}) for a in a_list: self.chapter_list.append((a["href"],a.text.replace("\n",""))) def get_content(self.chapter): url = self.root_url + chapter[0] print(url) book_name = chapter[0].split("/")[1] print(book_name) if not os.path.exists(book_name): os.mkdir(book_name) res = self.get_url(url) html = BeautifulSoup(res,"html.parser") content = html.find("div",{"id":"content"}).text print(content) path = os.path.join(book_name,chapter[1]) with open(path,"w",encoding="utf8") as f: f.write(content) def main(): self.get_book_list(self.root_url) for book in self.book_list: self.get_chapter_liat(book) for chapter in chapter_list: self.get_content(chapter) book_s = book_spider("http://10.1.88.252:7000") book_s.main()这是一段爬虫代码,找出里面的错误并改正
时间: 2024-04-28 18:25:19 浏览: 137
有以下几个错误:
1. 在 `get_url` 方法中,需要在 `self` 参数之前添加 `@staticmethod` 装饰器,使其成为静态方法。
2. 在 `get_book_list` 方法中,`print(book_list)` 应该改为 `print(self.book_list)`,因为 `book_list` 是类成员变量。
3. 在 `get_book_list` 方法中,将 `self.bbok_list` 改为 `self.book_list`。
4. 在 `get_chapter_list` 方法中,将 `self.chapter_list.append((a["href"],a.text.replace("\n","")))` 改为 `self.chapter_list.append((self.root_url+a["href"],a.text.replace("\n","")))`,因为需要获取完整的章节链接。
5. 在 `get_content` 方法中,将 `print(url)` 和 `print(book_name)` 改为注释,因为这些是调试信息。
改正后的代码如下:
```
import requests
import os
from bs4 import BeautifulSoup
class book_spider():
def __init__(self,root_url):
self.root_url=root_url
self.book_list=[]
self.chapter_list=[]
@staticmethod
def get_url(url):
while True:
try:
res=requests.get(url)
if res.status_code==200:
res.encoding =res.apparent_encoding
print("页面获取成功")
return res.text
else:
print("页面返回异常",res.status_code)
except:
print("页面获取错误")
def get_book_list(self,url):
res = self.get_url(url)
html = BeautifulSoup(res,"html.parser")
a_list = html.find_all("a",{"class":"name"})
for a in a_list:
self.book_list.append(self.root_url + a["href"])
self.book_list.remove('http://10.1.88.252:7000/庆余年')
print(self.book_list)
def get_chapter_list(self,url):
res = self.get_url(url)
html = BeautifulSoup(res,"html.parser")
a_list = html.find_all("a",{"class":"chapter"})
for a in a_list:
self.chapter_list.append((self.root_url+a["href"],a.text.replace("\n","")))
def get_content(self,chapter):
url = chapter[0]
book_name = chapter[0].split("/")[1]
if not os.path.exists(book_name):
os.mkdir(book_name)
res = self.get_url(url)
html = BeautifulSoup(res,"html.parser")
content = html.find("div",{"id":"content"}).text
path = os.path.join(book_name,chapter[1])
with open(path,"w",encoding="utf8") as f:
f.write(content)
def main(self):
self.get_book_list(self.root_url)
for book in self.book_list:
self.get_chapter_list(book)
for chapter in self.chapter_list:
self.get_content(chapter)
book_s = book_spider("http://10.1.88.252:7000")
book_s.main()
```
阅读全文