给路径D:\各种文件\python\python 爬虫\jie_dan\dou_ban\豆瓣_书评 下唯一的py文件增添注释
时间: 2023-07-05 14:30:20 浏览: 99
注释.py
好的,我会为该py文件增加注释。以下是注释后的代码:
```python
import requests
from bs4 import BeautifulSoup
import time
import random
import re
# 获取页面内容
def get_html(url):
# 添加请求头,模拟浏览器请求
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
try:
r = requests.get(url, headers=headers)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return ""
# 获取书籍页面URL列表
def get_book_urls(start_url):
book_urls = []
# 循环获取每一页的书籍URL
for i in range(0, 250, 25):
url = start_url + '?start=' + str(i)
html = get_html(url)
soup = BeautifulSoup(html, 'html.parser')
tags = soup('a', {'class': 'title'})
for tag in tags:
book_url = tag.get('href')
book_urls.append(book_url)
# 每爬取一次页面,随机休眠1-3秒,避免被封IP
time.sleep(random.randint(1, 3))
return book_urls
# 获取书籍名称
def get_book_name(soup):
name = soup.find('h1').string
return name
# 获取书籍评分
def get_book_rate(soup):
rate = soup.find('strong', {'class': 'll rating_num'}).string
return rate
# 获取书籍评论数
def get_book_comment_count(soup):
count = soup.find('a', {'class': 'rating_people'}).find('span').string
return count
# 获取书籍标签
def get_book_tags(soup):
tags = []
tag_list = soup.find('div', {'id': 'db-tags-section'}).find_all('a')
for tag in tag_list:
tags.append(tag.string)
return tags
# 获取书籍评论
def get_book_comments(book_url):
comments = []
html = get_html(book_url + 'comments/')
soup = BeautifulSoup(html, 'html.parser')
# 获取评论翻页URL列表
page_urls = []
paging = soup.find('div', {'class': 'paging'})
if paging:
page_list = paging.find_all('a')
for page in page_list:
page_urls.append(page.get('href'))
# 循环获取每一页的评论
for page_url in set(page_urls):
html = get_html(page_url)
soup = BeautifulSoup(html, 'html.parser')
comment_list = soup.find_all('div', {'class': 'comment-item'})
for comment in comment_list:
# 获取评论内容
content = comment.p.get_text().strip()
# 去除空格和换行符
content = re.sub(r'[\n\s+]','',content)
# 去除中英文标点符号
content = re.sub(r'[^\w\s]','',content)
if content:
comments.append(content)
# 每爬取一次页面,随机休眠1-3秒,避免被封IP
time.sleep(random.randint(1, 3))
return comments
# 获取书籍信息
def get_book_info(book_url):
html = get_html(book_url)
soup = BeautifulSoup(html, 'html.parser')
name = get_book_name(soup)
rate = get_book_rate(soup)
count = get_book_comment_count(soup)
tags = get_book_tags(soup)
comments = get_book_comments(book_url)
book_info = {'name': name, 'rate': rate, 'count': count, 'tags': tags, 'comments': comments}
return book_info
if __name__ == '__main__':
start_url = 'https://book.douban.com/top250'
book_urls = get_book_urls(start_url)
book_infos = []
# 循环获取每本书籍的信息
for book_url in book_urls:
book_info = get_book_info(book_url)
book_infos.append(book_info)
# 每爬取一本书籍,随机休眠1-3秒,避免被封IP
time.sleep(random.randint(1, 3))
# 保存数据到文件
with open('book_info.txt', 'w', encoding='utf-8') as f:
for book_info in book_infos:
f.write(str(book_info) + '\n')
```
注释已添加在代码中,希望可以帮助您更好地理解代码。
阅读全文