import requests import random import parsel visited_urls = set() # 保存已经访问过的链接 page = 1 while True: # 循环爬取 url = f'https://travel.qunar.com/travelbook/list.htm?page={page}&order=hot_heat' html_data = requests.get(url).text selector = parsel.Selector(html_data) lis = selector.css('.list_item h2 a::attr(href)').getall() # 遍历当前页面中的所有链接 for li in lis: detail_url = 'https://travel.qunar.com' + li if detail_url in visited_urls: # 如果链接已经访问过,则跳过 continue visited_urls.add(detail_url) # 将链接加入集合中 detail_html = requests.get(detail_url).text time.sleep(random.randint(3, 5)) detail_selector = parsel.Selector(detail_html) title = detail_selector.css('.b_crumb_cont *:nth-child(3)::text').get() comment = detail_selector.css('.title.white::text').get() view_count = detail_selector.css('.view_count::text').get() date = detail_selector.css('#js_mainleft > div.b_foreword > ul > li.f_item.when > p > span.data::text').get() days = detail_selector.css('#js_mainleft > div.b_foreword > ul > li.f_item.howlong > p > span.data::text').get() money = detail_selector.css('#js_mainleft > div.b_foreword > ul > li.f_item.howmuch > p > span.data::text').get() character = detail_selector.css('#js_mainleft > div.b_foreword > ul > li.f_item.who > p > span.data::text').get() play_list = detail_selector.css('#js_mainleft > div.b_foreword > ul > li.f_item.how > p > span.data > span::text').getall() for play in play_list: print(play) print(title, comment, view_count, date, days, money, character, play_list, detail_url) time.sleep(random.randint(3, 5)) # 判断是否需要继续爬取 if len(lis) == 0 or page >= 20: break page += 1这段代码怎么将爬取出来的所有数据保存到csv文件
时间: 2024-03-29 22:35:43 浏览: 140
你可以使用 Python 自带的 csv 模块将数据保存到 CSV 文件中。在你的代码中,你可以在循环中将每个页面的数据添加到一个列表中,然后在退出循环后将整个列表保存到 CSV 文件中。以下是修改后的代码:
```
import requests
import random
import parsel
import csv
visited_urls = set() # 保存已经访问过的链接
page = 1
data_list = [] # 保存所有数据
while True: # 循环爬取
url = f'https://travel.qunar.com/travelbook/list.htm?page={page}&order=hot_heat'
html_data = requests.get(url).text
selector = parsel.Selector(html_data)
lis = selector.css('.list_item h2 a::attr(href)').getall()
# 遍历当前页面中的所有链接
for li in lis:
detail_url = 'https://travel.qunar.com' + li
if detail_url in visited_urls: # 如果链接已经访问过,则跳过
continue
visited_urls.add(detail_url) # 将链接加入集合中
detail_html = requests.get(detail_url).text
time.sleep(random.randint(3, 5))
detail_selector = parsel.Selector(detail_html)
title = detail_selector.css('.b_crumb_cont *:nth-child(3)::text').get()
comment = detail_selector.css('.title.white::text').get()
view_count = detail_selector.css('.view_count::text').get()
date = detail_selector.css('#js_mainleft > div.b_foreword > ul > li.f_item.when > p > span.data::text').get()
days = detail_selector.css('#js_mainleft > div.b_foreword > ul > li.f_item.howlong > p > span.data::text').get()
money = detail_selector.css('#js_mainleft > div.b_foreword > ul > li.f_item.howmuch > p > span.data::text').get()
character = detail_selector.css('#js_mainleft > div.b_foreword > ul > li.f_item.who > p > span.data::text').get()
play_list = detail_selector.css('#js_mainleft > div.b_foreword > ul > li.f_item.how > p > span.data > span::text').getall()
# 将当前页面的数据添加到列表中
data_list.append([title, comment, view_count, date, days, money, character, play_list, detail_url])
for play in play_list:
print(play)
print(title, comment, view_count, date, days, money, character, play_list, detail_url)
time.sleep(random.randint(3, 5))
# 判断是否需要继续爬取
if len(lis) == 0 or page >= 20:
break
page += 1
# 将所有数据保存到 CSV 文件中
with open('data.csv', 'w', newline='', encoding='utf-8-sig') as f:
writer = csv.writer(f)
writer.writerow(['标题', '评论', '浏览数', '日期', '天数', '花费', '人物', '玩法', '链接'])
for data in data_list:
writer.writerow(data)
```
该代码将数据保存到名为 `data.csv` 的文件中,并在文件中包括标题、评论、浏览数、日期、天数、花费、人物、玩法和链接等信息。
阅读全文