import requests from bs4 import BeautifulSoup from threading import Thread def crawl_books(start, end): for i in range(start, end): url = 'http://search.dangdang.com/?key=%BC%C6%CB%E3%BB%FA&act=input&page_index={}'.format(i) response = requests.get(url) page = response.text soup = BeautifulSoup(page, 'lxml') books = soup.find('ul', class_='bigimg') for book in books.find_all('li'): title = book.find('a', class_='pic').get('title') author = book.find('p', class_='search_book_author').text price = book.find('p', class_='price').find('span', class_='search_now_price').text book_link = 'https:' + book.find('a', class_='pic').get('href') response = requests.get(book_link) page = response.text soup = BeautifulSoup(page, 'lxml') comment_num = soup.find('a', class_='review_num').text print(title, author, price, comment_num) threads = [] for i in range(1, 101, 10): t = Thread(target=crawl_books, args=(i, i+10)) threads.append(t) t.start() for t in threads: t.join()修改
时间: 2023-12-12 18:04:57 浏览: 26
这段代码是一个简单的爬虫程序,用于爬取当当网上搜索关键词为“Python编程入门”的图书信息,包括书名、作者、价格和评论数量等。代码使用了多线程的方式来提高爬取效率,其中 `crawl_books` 函数用于爬取一页图书信息,通过 `Thread` 类创建多个线程并启动,最后使用 `join` 方法等待所有线程结束。你需要问我一个问题。
相关问题
写出增加爬取书籍评论数的代码import requests from bs4 import BeautifulSoup from threading import Thread def crawl_books(start, end): for i in range(start, end): url = 'http://search.dangdang.com/?key=%BC%C6%CB%E3%BB%FA&act=input&page_index={}'.format(i) response = requests.get(url) page = response.text soup = BeautifulSoup(page, 'lxml') books = soup.find('ul', class_='bigimg') for book in books.find_all('li'): title = book.find('a', class_='pic').get('title') author = book.find('p', class_='search_book_author').text price = book.find('p', class_='price').find('span', class_='search_now_price').text print(title, author, price) threads = [] for i in range(1, 101, 10): t = Thread(target=crawl_books, args=(i, i+10)) threads.append(t) t.start() for t in threads: t.join()
上面的代码只是用来爬取当当网中与搜索关键词“心理学”相关的书籍的名称、作者和价格,而没有爬取书籍评论数。如果需要爬取书籍评论数,可以先获取每本书的详情页链接,然后再进入详情页爬取评论数。具体的代码可以参考下面的示例:
```
import requests
from bs4 import BeautifulSoup
from threading import Thread
def get_book_links(start, end):
links = []
for i in range(start, end):
url = 'http://search.dangdang.com/?key=%BC%C6%CB%E3%BB%FA&act=input&page_index={}'.format(i)
response = requests.get(url)
page = response.text
soup = BeautifulSoup(page, 'lxml')
books = soup.find('ul', class_='bigimg')
for book in books.find_all('li'):
link = book.find('a', class_='pic').get('href')
links.append(link)
return links
def get_comment_count(url):
response = requests.get(url)
page = response.text
soup = BeautifulSoup(page, 'lxml')
count = soup.find('a', class_='comm_num').text
return count
def crawl_books(start, end):
links = get_book_links(start, end)
for link in links:
count = get_comment_count(link)
print(link, count)
threads = []
for i in range(1, 101, 10):
t = Thread(target=crawl_books, args=(i, i+10))
threads.append(t)
t.start()
for t in threads:
t.join()
```
上面的代码中,我们新增了两个函数:`get_book_links` 和 `get_comment_count`。`get_book_links` 用来获取每本书的详情页链接,`get_comment_count` 用来获取详情页中的评论数。在 `crawl_books` 函数中,我们先获取每个搜索结果页中的书籍链接,然后再调用 `get_comment_count` 函数获取评论数,并打印出来。最后,我们通过多线程的方式同时爬取多个搜索结果页中的书籍评论数。
增加代码对评论数量的爬取import requests from bs4 import BeautifulSoup from threading import Thread def crawl_books(start, end): for i in range(start, end): url = 'http://search.dangdang.com/?key=%BF%C6%BB%C3%D0%A1%CB%B5&act=input&page_index=1&sort_type=sort_default#J_tab'.format(i) response = requests.get(url) page = response.text soup = BeautifulSoup(page, 'lxml') books = soup.find('ul', class_='bigimg') for book in books.find_all('li'): title = book.find('a', class_='pic').get('title') author = book.find('p', class_='search_book_author').text price = book.find('p', class_='price').find('span', class_='search_now_price').text print(title, author, price) threads = [] for i in range(1, 101, 10): t = Thread(target=crawl_books, args=(i, i+10)) threads.append(t) t.start() for t in threads: t.join()
要增加代码对评论数量的爬取,您需要先确定评论数量的位置和元素,然后在循环中找到该元素并提取其文本。以下是一个修改过的爬取代码示例:
import requests
from bs4 import BeautifulSoup
from threading import Thread
def crawl_books(start, end):
for i in range(start, end):
url = 'http://search.dangdang.com/?key=%BF%C6%BB%C3%D0%A1%CB%B5&act=input&page_index=1&sort_type=sort_default#J_tab'.format(i)
response = requests.get(url)
page = response.text
soup = BeautifulSoup(page, 'lxml')
books = soup.find('ul', class_='bigimg')
for book in books.find_all('li'):
title = book.find('a', class_='pic').get('title')
author = book.find('p', class_='search_book_author').text
price = book.find('p', class_='price').find('span', class_='search_now_price').text
comment = book.find('p', class_='search_star_line').find_all('a')[2].text # 获取评论数量元素并提取文本
print(title, author, price, comment)
threads = []
for i in range(1, 101, 10):
t = Thread(target=crawl_books, args=(i, i+10))
threads.append(t)
t.start()
for t in threads:
t.join()
在这个示例中,我们使用了`book.find('p', class_='search_star_line').find_all('a')[2].text`来找到评论数量元素并提取文本。您可能需要根据网站的具体结构进行调整。
相关推荐
![py](https://img-home.csdnimg.cn/images/20210720083646.png)
![py](https://img-home.csdnimg.cn/images/20210720083646.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)