帮我将以下代码写注释# coding=gbk # -- coding:uft-8 -- # 贝壳网小区 import requests from lxml import etree from time import sleep import hashlib from urllib import parse import pandas as pd def getPosi(tar): try: ak = 'C8rQZy1askzzMtdY3ChAZUer1P0PRjI0' sk = 'shShi1VLCkH1gGR4v75d2LTnrn2Vm5Mg' add = f'/geocoding/v3/?address={tar}&output=json&ak={ak}&city=大连市' add = parse.quote(add, safe="/:=&?#+!$,;'@()*[]") sn = hashlib.md5(parse.quote_plus(add + sk).encode('utf-8')).hexdigest() url = f'https://api.map.baidu.com{add}&sn={sn}' dic = requests.get(url).json() lat = dic['result']['location']['lat'] lng = dic['result']['location']['lng'] return lat, lng except: return None, None def collect(): items = { 'ganjingzi': 22, 'zhongshan': 19, 'shahekou': 14, 'xigang': 12 } resLs = [] for key in items: for page in range(items[key]): page += 1 url = f'https://dl.ke.com/xiaoqu/{key}/pg{page}/' headers = { 'User-Agent': ua, 'Referer': url } while True: try: res = requests.get(url=url, headers=headers, timeout=(5, 5)).content.decode('utf-8') break except: print('again') tree = etree.HTML(res) for li in tree.xpath('//ul[@class="listContent"]/li'): href = li.xpath('./a/@href')[0] while True: try: res = requests.get(url=href, headers=headers, timeout=(5, 5)).content.decode('utf-8') break except: print('again') tree = etree.HTML(res) dic = { 'href': href, 'key': key, 'name': tree.xpath('//h1/@title')[0], 'price': (tree.xpath('//span[@class="xiaoquUnitPrice"]/text()') + [''])[0], 'property': tree.xpath('//span[@class="xiaoquInfoContent"]/text()')[1].strip(), 'building': tree.xpath('//span[@class="xiaoquInfoContent"]/text()')[4].strip(), 'house': tree.xpath('//span[@class="xiaoquInfoContent"]/text()')[5].strip() } dic['lat'], dic['lng'] = getPosi(dic['name']) print(dic) resLs.append(dic) sleep(3) df = pd.DataFrame(resLs) df.to_excel('贝壳网小区.xlsx', encoding='utf-8', index=False) if name == 'main': ua = 'Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/91.0.4472.106Safari/537.36' collect()
时间: 2023-10-05 18:10:00 浏览: 91
# coding=gbk # 设置文件编码为 gbk
# -- coding:uft-8 -- # 设置文件编码为 utf-8
# 贝壳网小区 # 注释,说明文件内容为贝壳网小区相关代码
import requests # 导入 requests 模块
from lxml import etree # 导入 lxml 中的 etree 模块
from time import sleep # 导入 time 模块中的 sleep 函数
import hashlib # 导入 hashlib 模块
from urllib import parse # 导入 urllib 中的 parse 模块
import pandas as pd # 导入 pandas 模块
# 定义获取位置信息的函数
def getPosi(tar):
try:
ak = 'C8rQZy1askzzMtdY3ChAZUer1P0PRjI0' # 百度地图开放平台中的AK
sk = 'shShi1VLCkH1gGR4v75d2LTnrn2Vm5Mg' # 百度地图开放平台中的SK
add = f'/geocoding/v3/?address={tar}&output=json&ak={ak}&city=大连市' # 构造请求地址
add = parse.quote(add, safe="/:=&?#+!$,;'@()*[]") # 对地址进行URL编码
sn = hashlib.md5(parse.quote_plus(add + sk).encode('utf-8')).hexdigest() # 对地址进行签名
url = f'https://api.map.baidu.com{add}&sn={sn}' # 构造完整的请求URL
dic = requests.get(url).json() # 发送请求,获取位置信息
lat = dic['result']['location']['lat'] # 获取纬度
lng = dic['result']['location']['lng'] # 获取经度
return lat, lng # 返回位置信息中的纬度与经度
except:
return None, None # 若获取位置信息失败,则返回 None
# 定义数据收集函数
def collect():
# 定义小区名称与页数的字典
items = {
'ganjingzi': 22,
'zhongshan': 19,
'shahekou': 14,
'xigang': 12
}
resLs = [] # 定义空列表,用于存储收集到的数据
for key in items: # 遍历小区名称与页数的字典
for page in range(items[key]): # 遍历每个小区的每一页
page += 1 # 页码从1开始
url = f'https://dl.ke.com/xiaoqu/{key}/pg{page}/' # 构造请求URL
headers = {
'User-Agent': ua, # 设置请求头中的User-Agent
'Referer': url # 设置请求头中的Referer
}
while True: # 循环发送请求,直到成功或超时
try:
res = requests.get(url=url, headers=headers, timeout=(5, 5)).content.decode('utf-8')
break
except:
print('again')
tree = etree.HTML(res) # 解析HTML文本
for li in tree.xpath('//ul[@class="listContent"]/li'): # 遍历每个小区的信息块
href = li.xpath('./a/@href')[0] # 获取小区详情页的URL
while True: # 循环发送请求,直到成功或超时
try:
res = requests.get(url=href, headers=headers, timeout=(5, 5)).content.decode('utf-8')
break
except:
print('again')
tree = etree.HTML(res) # 解析HTML文本
dic = { # 构造数据字典
'href': href, # 小区详情页的URL
'key': key, # 小区名称
'name': tree.xpath('//h1/@title')[0], # 小区名称
'price': (tree.xpath('//span[@class="xiaoquUnitPrice"]/text()') + [''])[0], # 房价
'property': tree.xpath('//span[@class="xiaoquInfoContent"]/text()')[1].strip(), # 物业类型
'building': tree.xpath('//span[@class="xiaoquInfoContent"]/text()')[4].strip(), # 建筑年代
'house': tree.xpath('//span[@class="xiaoquInfoContent"]/text()')[5].strip() # 房屋总数
}
dic['lat'], dic['lng'] = getPosi(dic['name']) # 获取小区位置信息
print(dic) # 输出小区信息
resLs.append(dic) # 将小区信息添加到列表中
sleep(3) # 休眠3秒,防止请求过于频繁
df = pd.DataFrame(resLs) # 将列表中的数据转换为DataFrame格式
df.to_excel('贝壳网小区.xlsx', encoding='utf-8', index=False) # 将DataFrame中的数据保存为Excel文件
if name == 'main': # 如果文件被直接执行,则执行以下代码块
ua = 'Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/91.0.4472.106Safari/537.36' # 设置请求头中的User-Agent
collect() # 调用数据收集函数
阅读全文