from transformers import BertTokenizer, BertModel import torch from sklearn.metrics.pairwise import cosine_similarity # 加载BERT模型和分词器 tokenizer = BertTokenizer.from_pretrained('bert-base-chinese') model = BertModel.from_pretrained('bert-base-chinese') # 种子词列表 seed_words = ['个人信息', '隐私', '泄露', '安全'] # 加载微博用户文本语料(假设存储在weibo1.txt文件中) with open('output/weibo1.txt', 'r', encoding='utf-8') as f: corpus = f.readlines() # 预处理文本语料,获取每个中文词汇的词向量 corpus_vectors = [] for text in corpus: # 使用BERT分词器将文本分成词汇 tokens = tokenizer.tokenize(text) # 将词汇转换为对应的id input_ids = tokenizer.convert_tokens_to_ids(tokens) # 将id序列转换为PyTorch张量 input_ids = torch.tensor(input_ids).unsqueeze(0) # 使用BERT模型计算词向量 with torch.no_grad(): outputs = model(input_ids) last_hidden_state = outputs[0][:, 1:-1, :] avg_pooling = torch.mean(last_hidden_state, dim=1) corpus_vectors.append(avg_pooling.numpy()) # 计算每个中文词汇与种子词的余弦相似度 similarity_threshold = 0.8 privacy_words = set() for seed_word in seed_words: # 将种子词转换为对应的id seed_word_ids = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(seed_word)) # 将id序列转换为PyTorch张量,并增加batch size维度 seed_word_ids = torch.tensor(seed_word_ids).unsqueeze(0) # 使用BERT模型计算种子词的词向量 with torch.no_grad(): outputs = model(seed_word_ids) last_hidden_state = outputs[0][:, 1:-1, :] avg_pooling = torch.mean(last_hidden_state, dim=1) seed_word_vector = avg_pooling.numpy() # 计算每个中文词汇与种子词的余弦相似度 for i, vector in enumerate(corpus_vectors): sim = cosine_similarity([seed_word_vector], [vector])[0][0] if sim >= similarity_threshold: privacy_words.add(corpus[i]) print(privacy_words) 上述代码运行后报错了,报错信息:ValueError: Found array with dim 3. check_pairwise_arrays expected <= 2. 怎么修改?
时间: 2024-02-23 16:56:47 浏览: 140
可以尝试将`corpus_vectors`中的每个词向量从3维降到2维,即将`avg_pooling.numpy()`改为`avg_pooling.numpy().squeeze(0)`,这样每个词向量就是二维的了。然后再将`seed_word_vector`也从3维降到2维,即将`seed_word_vector`改为`avg_pooling.numpy().squeeze(0)`。这样修改后的代码如下:
```
from transformers import BertTokenizer, BertModel
import torch
from sklearn.metrics.pairwise import cosine_similarity
# 加载BERT模型和分词器
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
model = BertModel.from_pretrained('bert-base-chinese')
# 种子词列表
seed_words = ['个人信息', '隐私', '泄露', '安全']
# 加载微博用户文本语料(假设存储在weibo1.txt文件中)
with open('output/weibo1.txt', 'r', encoding='utf-8') as f:
corpus = f.readlines()
# 预处理文本语料,获取每个中文词汇的词向量
corpus_vectors = []
for text in corpus:
# 使用BERT分词器将文本分成词汇
tokens = tokenizer.tokenize(text)
# 将词汇转换为对应的id
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# 将id序列转换为PyTorch张量
input_ids = torch.tensor(input_ids).unsqueeze(0)
# 使用BERT模型计算词向量
with torch.no_grad():
outputs = model(input_ids)
last_hidden_state = outputs[0][:, 1:-1, :]
avg_pooling = torch.mean(last_hidden_state, dim=1)
corpus_vectors.append(avg_pooling.numpy().squeeze(0))
# 计算每个中文词汇与种子词的余弦相似度
similarity_threshold = 0.8
privacy_words = set()
for seed_word in seed_words:
# 将种子词转换为对应的id
seed_word_ids = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(seed_word))
# 将id序列转换为PyTorch张量,并增加batch size维度
seed_word_ids = torch.tensor(seed_word_ids).unsqueeze(0)
# 使用BERT模型计算种子词的词向量
with torch.no_grad():
outputs = model(seed_word_ids)
last_hidden_state = outputs[0][:, 1:-1, :]
avg_pooling = torch.mean(last_hidden_state, dim=1)
seed_word_vector = avg_pooling.numpy().squeeze(0)
# 计算每个中文词汇与种子词的余弦相似度
for i, vector in enumerate(corpus_vectors):
sim = cosine_similarity([seed_word_vector], [vector])[0][0]
if sim >= similarity_threshold:
privacy_words.add(corpus[i])
print(privacy_words)
```
希望可以解决你的问题。
阅读全文