import requests from bs4 import BeautifulSoup import csv def get_top250_movies(): url = 'https://movie.douban.com/top250' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'} movie_info_list = [] for i in range(0, 250, 25): params = {'start': str(i)} res = requests.get(url, headers=headers, params=params) soup = BeautifulSoup(res.text, 'html.parser') movie_list = soup.find_all('div', class_='info') for movie in movie_list: title = movie.find('span', class_='title').text info = movie.find('div', class_='bd').p.text.strip().split('\n') director = info[0][4:] actors = info[1][3:] year = info[1][-5:-1] rating = movie.find('span', class_='rating_num').text comment_num = movie.find('div', class_='star').find_all('span')[3].text[:-3] movie_info_list.append([title, director, actors, year, rating, comment_num]) return movie_info_list def save_to_csv(movie_info_list): with open('movie_info.csv', 'w', newline='', encoding='utf-8-sig') as f: writer = csv.writer(f) writer.writerow(['电影名称', '导演', '演员', '上映年份', '评分', '评论数']) for movie_info in movie_info_list: writer.writerow(movie_info) if __name__ == '__main__': movie_info_list = get_top250_movies() save_to_csv(movie_info_list) print('电影信息保存成功!')将此代码运行成功后保存的六个信息分别单独成一列
时间: 2023-06-26 10:08:50 浏览: 94
import sys import os import urllib from bs4 import BeautifulSoup
可以在save_to_csv函数中将每个信息分别写入csv文件中的一列,修改后代码如下:
```python
import requests
from bs4 import BeautifulSoup
import csv
def get_top250_movies():
url = 'https://movie.douban.com/top250'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
}
movie_info_list = []
for i in range(0, 250, 25):
params = {'start': str(i)}
res = requests.get(url, headers=headers, params=params)
soup = BeautifulSoup(res.text, 'html.parser')
movie_list = soup.find_all('div', class_='info')
for movie in movie_list:
title = movie.find('span', class_='title').text
info = movie.find('div', class_='bd').p.text.strip().split('\n')
director = info[0][4:]
actors = info[1][3:]
year = info[1][-5:-1]
rating = movie.find('span', class_='rating_num').text
comment_num = movie.find('div', class_='star').find_all('span')[3].text[:-3]
movie_info_list.append([title, director, actors, year, rating, comment_num])
return movie_info_list
def save_to_csv(movie_info_list):
with open('movie_info.csv', 'w', newline='', encoding='utf-8-sig') as f:
writer = csv.writer(f)
writer.writerow(['电影名称', '导演', '演员', '上映年份', '评分', '评论数'])
for movie_info in movie_info_list:
writer.writerow(movie_info)
# 将每个信息分别写入csv文件中的一列
with open('movie_info.csv', 'r', newline='', encoding='utf-8-sig') as f_read:
with open('movie_info_new.csv', 'w', newline='', encoding='utf-8-sig') as f_write:
reader = csv.reader(f_read)
writer = csv.writer(f_write)
for row in reader:
for i in range(len(row)):
writer.writerow([row[i]])
# 删除原文件
import os
os.remove('movie_info.csv')
# 重命名新文件
os.rename('movie_info_new.csv', 'movie_info.csv')
if __name__ == '__main__':
movie_info_list = get_top250_movies()
save_to_csv(movie_info_list)
print('电影信息保存成功!')
```
运行修改后的代码,会在同级目录下生成一个名为'movie_info.csv'的文件,其中每个信息分别单独成一列。
阅读全文