pd.DataFrame.from_dic
时间: 2023-11-03 11:56:53 浏览: 100
pd.DataFrame.from_dic方法是不存在的,我犯了个错误,非常抱歉。要将字典拆分为DataFrame,您可以使用以下方法:
```python
import pandas as pd
# 定义字典
dictionary = {10000626: ['210964244', '210357576'], 10000744: ['320257863'], 10000762: ['210864888', '320082031']}
# 创建空的DataFrame
df = pd.DataFrame(columns=['key', 'values'])
# 遍历字典并将键和值添加到DataFrame中
for key, values in dictionary.items():
for value in values:
df = df.append({'key': key, 'values': value}, ignore_index=True)
# 打印DataFrame
print(df)
```
这将输出以下DataFrame:
```
key values
0 10000626 210964244
1 10000626 210357576
2 10000744 320257863
3 10000762 210864888
4 10000762 320082031
```
每个字典键和对应的值都被拆分为单独的行,并以"key"和"values"作为列名。
相关问题
import numpy as np import pandas as pd from wordcloud import WordCloud, ImageColorGenerator import matplotlib.pyplot as plt from PIL import Image def draw_cloud(read_name): image = Image.open('/Users/zhoulexin/downloads/学者.png') # 作为背景轮廓图 graph = np.array(image) # 参数分别是指定字体、背景颜色、最大的词的大小、使用给定图作为背景形状 wc = WordCloud(font_path='System/Library/Fonts/Supplemental/Arial Unicode.ttf', background_color='black', max_words=100, mask=graph) fp = pd.read_csv(read_name, encoding='gb18030') # 读取词频文件, 因为要显示中文,故编码为gbk name = list(fp.name) # 词 value = fp.val # 词的频率 for i in range(len(name)): name[i] = str(name[i]) dic = dict(zip(name, value)) # 词频以字典形式存储 wc.generate_from_frequencies(dic) # 根据给定词频生成词云 image_color = ImageColorGenerator(graph) plt.imshow(wc) plt.axis("off") # 不显示坐标轴 plt.show() wc.to_file('scholar词云.png') # 图片命名 if __name__ == '__main__': draw_cloud("/Users/zhoulexin/downloads/李学龙.csv")本段代码报错为AttributeError: 'DataFrame' object has no attribute 'name'如何修改
在这段代码中,DataFrame对象没有"name"这个属性,所以代码会报错。你需要将下面的代码:
```
name = list(fp.name) # 词
```
修改为:
```
name = list(fp["name"]) # 词
```
这样就可以获取到"name"这一列的数据了。
帮我将以下代码写注释# coding=gbk # -- coding:uft-8 -- # 贝壳网小区 import requests from lxml import etree from time import sleep import hashlib from urllib import parse import pandas as pd def getPosi(tar): try: ak = 'C8rQZy1askzzMtdY3ChAZUer1P0PRjI0' sk = 'shShi1VLCkH1gGR4v75d2LTnrn2Vm5Mg' add = f'/geocoding/v3/?address={tar}&output=json&ak={ak}&city=大连市' add = parse.quote(add, safe="/:=&?#+!$,;'@()*[]") sn = hashlib.md5(parse.quote_plus(add + sk).encode('utf-8')).hexdigest() url = f'https://api.map.baidu.com{add}&sn={sn}' dic = requests.get(url).json() lat = dic['result']['location']['lat'] lng = dic['result']['location']['lng'] return lat, lng except: return None, None def collect(): items = { 'ganjingzi': 22, 'zhongshan': 19, 'shahekou': 14, 'xigang': 12 } resLs = [] for key in items: for page in range(items[key]): page += 1 url = f'https://dl.ke.com/xiaoqu/{key}/pg{page}/' headers = { 'User-Agent': ua, 'Referer': url } while True: try: res = requests.get(url=url, headers=headers, timeout=(5, 5)).content.decode('utf-8') break except: print('again') tree = etree.HTML(res) for li in tree.xpath('//ul[@class="listContent"]/li'): href = li.xpath('./a/@href')[0] while True: try: res = requests.get(url=href, headers=headers, timeout=(5, 5)).content.decode('utf-8') break except: print('again') tree = etree.HTML(res) dic = { 'href': href, 'key': key, 'name': tree.xpath('//h1/@title')[0], 'price': (tree.xpath('//span[@class="xiaoquUnitPrice"]/text()') + [''])[0], 'property': tree.xpath('//span[@class="xiaoquInfoContent"]/text()')[1].strip(), 'building': tree.xpath('//span[@class="xiaoquInfoContent"]/text()')[4].strip(), 'house': tree.xpath('//span[@class="xiaoquInfoContent"]/text()')[5].strip() } dic['lat'], dic['lng'] = getPosi(dic['name']) print(dic) resLs.append(dic) sleep(3) df = pd.DataFrame(resLs) df.to_excel('贝壳网小区.xlsx', encoding='utf-8', index=False) if name == 'main': ua = 'Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/91.0.4472.106Safari/537.36' collect()
# coding=gbk # 设置文件编码为 gbk
# -- coding:uft-8 -- # 设置文件编码为 utf-8
# 贝壳网小区 # 注释,说明文件内容为贝壳网小区相关代码
import requests # 导入 requests 模块
from lxml import etree # 导入 lxml 中的 etree 模块
from time import sleep # 导入 time 模块中的 sleep 函数
import hashlib # 导入 hashlib 模块
from urllib import parse # 导入 urllib 中的 parse 模块
import pandas as pd # 导入 pandas 模块
# 定义获取位置信息的函数
def getPosi(tar):
try:
ak = 'C8rQZy1askzzMtdY3ChAZUer1P0PRjI0' # 百度地图开放平台中的AK
sk = 'shShi1VLCkH1gGR4v75d2LTnrn2Vm5Mg' # 百度地图开放平台中的SK
add = f'/geocoding/v3/?address={tar}&output=json&ak={ak}&city=大连市' # 构造请求地址
add = parse.quote(add, safe="/:=&?#+!$,;'@()*[]") # 对地址进行URL编码
sn = hashlib.md5(parse.quote_plus(add + sk).encode('utf-8')).hexdigest() # 对地址进行签名
url = f'https://api.map.baidu.com{add}&sn={sn}' # 构造完整的请求URL
dic = requests.get(url).json() # 发送请求,获取位置信息
lat = dic['result']['location']['lat'] # 获取纬度
lng = dic['result']['location']['lng'] # 获取经度
return lat, lng # 返回位置信息中的纬度与经度
except:
return None, None # 若获取位置信息失败,则返回 None
# 定义数据收集函数
def collect():
# 定义小区名称与页数的字典
items = {
'ganjingzi': 22,
'zhongshan': 19,
'shahekou': 14,
'xigang': 12
}
resLs = [] # 定义空列表,用于存储收集到的数据
for key in items: # 遍历小区名称与页数的字典
for page in range(items[key]): # 遍历每个小区的每一页
page += 1 # 页码从1开始
url = f'https://dl.ke.com/xiaoqu/{key}/pg{page}/' # 构造请求URL
headers = {
'User-Agent': ua, # 设置请求头中的User-Agent
'Referer': url # 设置请求头中的Referer
}
while True: # 循环发送请求,直到成功或超时
try:
res = requests.get(url=url, headers=headers, timeout=(5, 5)).content.decode('utf-8')
break
except:
print('again')
tree = etree.HTML(res) # 解析HTML文本
for li in tree.xpath('//ul[@class="listContent"]/li'): # 遍历每个小区的信息块
href = li.xpath('./a/@href')[0] # 获取小区详情页的URL
while True: # 循环发送请求,直到成功或超时
try:
res = requests.get(url=href, headers=headers, timeout=(5, 5)).content.decode('utf-8')
break
except:
print('again')
tree = etree.HTML(res) # 解析HTML文本
dic = { # 构造数据字典
'href': href, # 小区详情页的URL
'key': key, # 小区名称
'name': tree.xpath('//h1/@title')[0], # 小区名称
'price': (tree.xpath('//span[@class="xiaoquUnitPrice"]/text()') + [''])[0], # 房价
'property': tree.xpath('//span[@class="xiaoquInfoContent"]/text()')[1].strip(), # 物业类型
'building': tree.xpath('//span[@class="xiaoquInfoContent"]/text()')[4].strip(), # 建筑年代
'house': tree.xpath('//span[@class="xiaoquInfoContent"]/text()')[5].strip() # 房屋总数
}
dic['lat'], dic['lng'] = getPosi(dic['name']) # 获取小区位置信息
print(dic) # 输出小区信息
resLs.append(dic) # 将小区信息添加到列表中
sleep(3) # 休眠3秒,防止请求过于频繁
df = pd.DataFrame(resLs) # 将列表中的数据转换为DataFrame格式
df.to_excel('贝壳网小区.xlsx', encoding='utf-8', index=False) # 将DataFrame中的数据保存为Excel文件
if name == 'main': # 如果文件被直接执行,则执行以下代码块
ua = 'Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/91.0.4472.106Safari/537.36' # 设置请求头中的User-Agent
collect() # 调用数据收集函数
阅读全文