帮我寻找windows版本的chrome 63.0.3239.132 解压版
时间: 2024-02-06 17:03:23 浏览: 17
您好!您可以在以下网站上寻找并下载chrome 63.0.3239.132 的解压版:
1. https://www.slimjet.com/chrome/google-chrome-old-version.php
2. https://www.slimbrowser.net/en/google-chrome-old-version.php
3. https://www.itechtics.com/download-google-chrome-full-setup/
希望能对您有所帮助!
相关问题
from bs4 import BeautifulSoup import requests import time import pymysql conn = pymysql.connect(host='localhost', user='root', password='123456', charset='utf8') cursor = conn.cursor() cursor.execute('create database wyya;') cursor.execute('use wyya;') create_Tb = 'create table sj(地址 varchar(100),标题 varchar(100),播放量 varchar(50),作者 varchar(50));' cursor.execute(create_Tb) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36' } for i in range(0, 1505, 35): print(i) time.sleep(2) url = 'https://music.163.com/discover/playlist/?cat=华语&order=hot&limit=35&offset=' + str(i)#修改这里即可 response = requests.get(url=url, headers=headers) html = response.text soup = BeautifulSoup(html, 'html.parser') # 获取包含歌单详情页网址的标签 ids = soup.select('.dec a') # 获取包含歌单索引页信息的标签 lis = soup.select('#m-pl-container li') print(len(lis)) for j in range(len(lis)): # 获取歌单详情页地址 url = ids[j]['href'] # 获取歌单标题 title = ids[j]['title'] # 获取歌单播放量 play = lis[j].select('.nb')[0].get_text() # 获取歌单贡献者名字 user = lis[j].select('p')[1].select('a')[0].get_text() # 输出歌单索引页信息 print(url, title, play, user) insert_Tb = 'insert into sj(地址,标题,播放量,作者) values(%s,%s,%s,%s);' val = (url, title, play, user) cursor.execute(insert_Tb, val) cursor.execute("select *from sj;") conn.commit(); data = cursor.fetchall() for bases in data: print(bases) conn.close()写出优化后的这段代码,使爬取到的所有数据全部存入数据库
from bs4 import BeautifulSoup
import requests
import time
import pymysql
# 连接数据库
conn = pymysql.connect(host='localhost', user='root', password='123456', charset='utf8')
cursor = conn.cursor()
# 创建数据库和表
cursor.execute('create database if not exists wyya;')
cursor.execute('use wyya;')
create_Tb = 'create table if not exists sj(地址 varchar(100),标题 varchar(100),播放量 varchar(50),作者 varchar(50));'
cursor.execute(create_Tb)
# 设置请求头
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
}
# 循环爬取歌单信息并存入数据库
for i in range(0, 1505, 35):
print(i)
time.sleep(2)
url = 'https://music.163.com/discover/playlist/?cat=华语&order=hot&limit=35&offset=' + str(i)
response = requests.get(url=url, headers=headers)
html = response.text
soup = BeautifulSoup(html, 'html.parser')
ids = soup.select('.dec a')
lis = soup.select('#m-pl-container li')
print(len(lis))
for j in range(len(lis)):
url = ids[j]['href']
title = ids[j]['title']
play = lis[j].select('.nb')[0].get_text()
user = lis[j].select('p')[1].select('a')[0].get_text()
print(url, title, play, user)
# 将爬取到的信息插入数据库
insert_Tb = 'insert into sj(地址,标题,播放量,作者) values(%s,%s,%s,%s);'
val = (url, title, play, user)
cursor.execute(insert_Tb, val)
# 查询并输出数据库中的数据
cursor.execute("select *from sj;")
data = cursor.fetchall()
for bases in data:
print(bases)
# 提交并关闭数据库连接
conn.commit()
conn.close()
pandas版本是2.0.1 from bs4 import BeautifulSoup import pandas as pd import requests import time df = pd.read_csv('playlist.csv', header=None,error_bad_lines=False, names=['url', 'title', 'play', 'user']) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36' } for i in df['url']: time.sleep(2) url = 'https://music.163.com' + i response = requests.get(url=url, headers=headers) html = response.text soup = BeautifulSoup(html, 'html.parser') # 获取歌单标题 title = soup.select('h2')[0].get_text().replace(',', ',') # 获取标签 tags = [] tags_message = soup.select('.u-tag i') for p in tags_message: tags.append(p.get_text()) # 对标签进行格式化 if len(tags) > 1: tag = '-'.join(tags) else: tag = tags[0] # 获取歌单介绍 if soup.select('#album-desc-more'): text = soup.select('#album-desc-more')[0].get_text().replace('\n', '').replace(',', ',') else: text = '无' # 获取歌单收藏量 collection = soup.select('#content-operation i')[1].get_text().replace('(', '').replace(')', '') # 歌单播放量 play = soup.select('.s-fc6')[0].get_text() # 歌单内歌曲数 songs = soup.select('#playlist-track-count')[0].get_text() # 歌单评论数 comments = soup.select('#cnt_comment_count')[0].get_text() # 输出歌单详情页信息 print(title, tag, text, collection, play, songs, comments) # 将详情页信息写入CSV文件中 with open('music_message.csv', 'a+', encoding='utf-8-sig') as f: f.write(title + ',' + tag + ',' + text + ',' + collection + ',' + play + ',' + songs + ',' + comments + '\n') # 获取歌单内歌曲名称 li = soup.select('.f-hide li a') for j in li: with open('music_name.csv', 'a+', encoding='utf-8-sig') as f: f.write(j.get_text() + '\n')
根据你提供的代码,似乎是在使用 pandas 读取一个名为 'playlist.csv' 的文件,并对文件中的每个 URL 访问网页获取歌单信息和歌曲名称,并将这些信息写入两个不同的 CSV 文件中。在读取 CSV 文件时,你使用了 `error_bad_lines=False` 来跳过读取失败的行。但是,根据你提供的 pandas 版本号是 2.0.1,这个版本并不支持 `error_bad_lines` 这个参数,因此会导致出现错误提示。你可以尝试升级 pandas 版本,或者直接删掉这个参数。另外,你的代码中没有明显的错误,但建议在向 CSV 文件写入数据时,最好使用 pandas 的 `to_csv()` 函数,这样可以更方便地进行数据写入和数据处理。