import os import requests from bs4 import BeautifulSoup # 创建目录用于保存图片 if not os.path.exists('data'): os.mkdir('data') # 爬取网站 url = 'https://www.sucai999.com/pic/cate/263_267.html' response = requests.get(url) soup = BeautifulSoup(response.text, 'html.parser') # 获取图片链接并下载保存 img_tags = soup.find_all('img', class_='lazy') for index, img_tag in enumerate(img_tags[:20]): img_url = img_tag['data-original'] response = requests.get(img_url) with open(f'data/img{index}.jpg', 'wb') as f: f.write(response.content) print('图片保存完毕!')把这段代码优化成连续保存20张图片的代码
时间: 2023-06-24 22:07:39 浏览: 166
import reimport requestsfrom bs4 import BeautifulSoupimport t
import os import requests from bs4 import BeautifulSoup # 创建目录用于保存图片 if not os.path.exists('data'): os.mkdir('data') # 爬取网站 url = 'https://www.sucai999.com/pic/cate/263_267.html' response = requests.get(url) soup = BeautifulSoup(response.text, 'html.parser') # 获取图片链接并下载保存 img_tags = soup.find_all('img', class_='lazy') for index, img_tag in enumerate(img_tags[:20]): img_url = img_tag['data-original'] response = requests.get(img_url) with open(f'data/img{index}.jpg', 'wb') as f: f.write(response.content) print(f'第{index+1}张图片保存完毕!')
阅读全文