Can only append a dict if ignore_index=True
时间: 2023-12-29 10:23:38 浏览: 26
This error message typically occurs when you try to concatenate a dictionary to a pandas DataFrame without specifying `ignore_index=True`.
For example, if you have a dictionary `d` and a DataFrame `df`, you might try to concatenate them like this:
```
pd.concat([df, d])
```
However, this will raise the error message you mentioned. To fix it, you need to add `ignore_index=True` to the `concat()` call:
```
pd.concat([df, d], ignore_index=True)
```
This tells pandas to create a new index for the concatenated DataFrame, rather than trying to append the dictionary to the existing index.
相关问题
对下列代码增加功能,使其可以计算每一行每一个词的密度以及总词数的密度 import pandas as pd import re from collections import Counter # 读取词典txt文件 with open('词典.txt', 'r', encoding='utf-8') as f: list_dict = [line.strip() for line in f] # 读取F列数据 df = pd.read_excel('实验体.xlsx') list_f = df['Answer'].tolist() # 统计每行文本中的词频 dict_count_list = [] for text in list_f: # 匹配文本中的词列表 text = str(text) words = re.findall('|'.join(list_dict), text) # 统计每个词在该行文本中的出现次数 dict_count = Counter(words) dict_count_list.append(dict_count) # 将每行文本的词频统计结果合并为一个DataFrame对象 df_count = pd.DataFrame(dict_count_list) df_count.index = df.index # 输出为Excel文件 writer = pd.ExcelWriter('数实验体100.xlsx') df_count.to_excel(writer, sheet_name='Sheet1') writer._save()
以下是增加功能后的代码:
import pandas as pd
import re
from collections import Counter
# 读取词典txt文件
with open('词典.txt', 'r', encoding='utf-8') as f:
list_dict = [line.strip() for line in f]
# 读取F列数据
df = pd.read_excel('实验体.xlsx')
list_f = df['Answer'].tolist()
# 统计每行文本中的词频
dict_count_list = []
for text in list_f:
# 匹配文本中的词列表
text = str(text)
words = re.findall('|'.join(list_dict), text)
# 统计每个词在该行文本中的出现次数
dict_count = Counter(words)
dict_count_list.append(dict_count)
# 将每行文本的词频统计结果合并为一个DataFrame对象
df_count = pd.DataFrame(dict_count_list)
df_count.index = df.index
# 计算每一行每一个词的密度
df_density = pd.DataFrame(columns=list(df_count.columns))
for i in range(len(df_count)):
line = df_count.iloc[i]
density = line / line.sum()
df_density = df_density.append(density, ignore_index=True)
# 计算每一行总词数密度和每一个数字化词的密度
total_word_count = df_count.sum(axis=1)
total_word_density = total_word_count / total_word_count.sum()
numeric_word_count = df_count.select_dtypes(include=['int']).sum(axis=1)
numeric_word_density = numeric_word_count / total_word_count
# 将密度数据添加到原DataFrame对象
df_count['Total Word Density'] = total_word_density
df_count['Numeric Word Density'] = numeric_word_density
df_count = pd.concat([df_count, df_density], axis=1)
# 输出为Excel文件
writer = pd.ExcelWriter('数实验体100.xlsx')
df_count.to_excel(writer, sheet_name='Sheet1')
writer._save()
现在,每行每一个词的密度以及总词数密度和每一个数字化词的密度都计算出来了,并且已经被添加到原DataFrame对象中。
import jieba import torch from transformers import BertTokenizer, BertModel, BertConfig # 自定义词汇表路径 vocab_path = "output/user_vocab.txt" count = 0 with open(vocab_path, 'r', encoding='utf-8') as file: for line in file: count += 1 user_vocab = count print(user_vocab) # 种子词 seed_words = ['姓名'] # 加载微博文本数据 text_data = [] with open("output/weibo_data.txt", "r", encoding="utf-8") as f: for line in f: text_data.append(line.strip()) print(text_data) # 加载BERT分词器,并使用自定义词汇表 tokenizer = BertTokenizer.from_pretrained('bert-base-chinese', vocab_file=vocab_path) config = BertConfig.from_pretrained("bert-base-chinese", vocab_size=user_vocab) # 加载BERT模型 model = BertModel.from_pretrained('bert-base-chinese', config=config, ignore_mismatched_sizes=True) seed_tokens = ["[CLS]"] + seed_words + ["[SEP]"] seed_token_ids = tokenizer.convert_tokens_to_ids(seed_tokens) seed_segment_ids = [0] * len(seed_token_ids) # 转换为张量,调用BERT模型进行编码 seed_token_tensor = torch.tensor([seed_token_ids]) seed_segment_tensor = torch.tensor([seed_segment_ids]) model.eval() with torch.no_grad(): seed_outputs = model(seed_token_tensor, seed_segment_tensor) seed_encoded_layers = seed_outputs[0] jieba.load_userdict('data/user_dict.txt') # 构建隐私词库 privacy_words = set() privacy_words_sim = set() for text in text_data: words = jieba.lcut(text.strip()) tokens = ["[CLS]"] + words + ["[SEP]"] token_ids = tokenizer.convert_tokens_to_ids(tokens) segment_ids = [0] * len(token_ids) # 转换为张量,调用BERT模型进行编码 token_tensor = torch.tensor([token_ids]) segment_tensor = torch.tensor([segment_ids]) model.eval() with torch.no_grad(): outputs = model(token_tensor, segment_tensor) encoded_layers = outputs[0] # 对于每个词,计算它与种子词的余弦相似度 for i in range(1, len(tokens) - 1): word = tokens[i] if word in seed_words: continue if len(word) <= 1: continue sim_scores = [] for j in range(len(seed_encoded_layers)): sim_scores.append(torch.cosine_similarity(seed_encoded_layers[j][0], encoded_layers[j][i], dim=0).item()) cos_sim = sum(sim_scores) / len(sim_scores) print(cos_sim, word) if cos_sim >= 0.5: privacy_words.add(word) privacy_words_sim.add((word, cos_sim)) print(privacy_words) # 输出隐私词库 with open("output/privacy_words.txt", "w", encoding="utf-8") as f1: for word in privacy_words: f1.write(word + '\n') with open("output/privacy_words_sim.txt", "w", encoding="utf-8") as f2: for word, cos_sim in privacy_words_sim: f2.write(word + "\t" + str(cos_sim) + "\n") 详细解释上述代码,包括这行代码的作用以及为什么要这样做?
这段代码是一个隐私词提取的程序,其目的是从微博文本数据中提取出可能包含隐私信息的词汇,并将其输出到文件中。
代码分为以下几个部分:
1.导入必要的库和模型
```
import jieba
import torch
from transformers import BertTokenizer, BertModel, BertConfig
```
其中,jieba是一个中文分词库,torch是PyTorch深度学习框架,transformers是一个自然语言处理模型库。
2.加载自定义的词汇表
```
vocab_path = "output/user_vocab.txt"
count = 0
with open(vocab_path, 'r', encoding='utf-8') as file:
for line in file:
count += 1
user_vocab = count
print(user_vocab)
```
这里的自定义词汇表是一些特定领域的词汇,例如医学领域或法律领域的专业术语。这些词汇不在通用的词汇表中,需要单独加载。
3.加载微博文本数据
```
text_data = []
with open("output/weibo_data.txt", "r", encoding="utf-8") as f:
for line in f:
text_data.append(line.strip())
print(text_data)
```
这里的微博文本数据是程序要处理的输入数据。
4.加载BERT分词器,并使用自定义词汇表
```
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese', vocab_file=vocab_path)
config = BertConfig.from_pretrained("bert-base-chinese", vocab_size=user_vocab)
```
BERT分词器可以将中文文本转换为一系列的词汇编号,这里使用自定义词汇表来保证所有的词汇都可以被正确地转换。
5.加载BERT模型
```
model = BertModel.from_pretrained('bert-base-chinese', config=config, ignore_mismatched_sizes=True)
```
BERT模型是一个预训练的深度学习模型,可以将文本编码为向量表示。
6.构建种子词库
```
seed_words = ['姓名']
seed_tokens = ["[CLS]"] + seed_words + ["[SEP]"]
seed_token_ids = tokenizer.convert_tokens_to_ids(seed_tokens)
seed_segment_ids = [0] * len(seed_token_ids)
seed_token_tensor = torch.tensor([seed_token_ids])
seed_segment_tensor = torch.tensor([seed_segment_ids])
model.eval()
with torch.no_grad():
seed_outputs = model(seed_token_tensor, seed_segment_tensor)
seed_encoded_layers = seed_outputs[0]
```
种子词库是指一些已知的包含隐私信息的词汇,这里只有一个“姓名”。这部分代码将种子词转换为张量表示,并调用BERT模型进行编码。
7.构建隐私词库
```
privacy_words = set()
privacy_words_sim = set()
for text in text_data:
words = jieba.lcut(text.strip())
tokens = ["[CLS]"] + words + ["[SEP]"]
token_ids = tokenizer.convert_tokens_to_ids(tokens)
segment_ids = [0] * len(token_ids)
token_tensor = torch.tensor([token_ids])
segment_tensor = torch.tensor([segment_ids])
model.eval()
with torch.no_grad():
outputs = model(token_tensor, segment_tensor)
encoded_layers = outputs[0]
for i in range(1, len(tokens) - 1):
word = tokens[i]
if word in seed_words:
continue
if len(word) <= 1:
continue
sim_scores = []
for j in range(len(seed_encoded_layers)):
sim_scores.append(torch.cosine_similarity(seed_encoded_layers[j][0], encoded_layers[j][i], dim=0).item())
cos_sim = sum(sim_scores) / len(sim_scores)
print(cos_sim, word)
if cos_sim >= 0.5:
privacy_words.add(word)
privacy_words_sim.add((word, cos_sim))
print(privacy_words)
```
这部分代码是隐私词提取的核心部分,其流程如下:
1. 对每个文本进行分词。
2. 将分词后的词汇转换为张量表示,并调用BERT模型进行编码。
3. 对于每个词,计算它与种子词之间的余弦相似度。
4. 如果相似度大于等于0.5,则将该词添加到隐私词库中。
8.输出隐私词库
```
with open("output/privacy_words.txt", "w", encoding="utf-8") as f1:
for word in privacy_words:
f1.write(word + '\n')
with open("output/privacy_words_sim.txt", "w", encoding="utf-8") as f2:
for word, cos_sim in privacy_words_sim:
f2.write(word + "\t" + str(cos_sim) + "\n")
```
这部分代码将提取出的隐私词输出到文件中,包括词汇本身和与种子词的相似度值。