import torch from transformers import BertTokenizer, BertModel # 加载种子词库 seed_words = [] with open("output/base_words.txt", "r", encoding="utf-8") as f: for line in f: seed_words.append(line.strip()) print(seed_words) # 加载微博文本数据 text_data = [] with open("output/weibo1.txt", "r", encoding="utf-8") as f: for line in f: text_data.append(line.strip()) print(text_data) # 加载BERT模型和分词器 tokenizer = BertTokenizer.from_pretrained('bert-base-chinese') model = BertModel.from_pretrained('bert-base-chinese') # 构建隐私词库 privacy_words = set(seed_words) for text in text_data: # 对文本进行分词,并且添加特殊标记 tokens = ["[CLS]"] + tokenizer.tokenize(text) + ["[SEP]"] token_ids = tokenizer.convert_tokens_to_ids(tokens) segment_ids = [0] * len(token_ids) # 转换为张量,调用BERT模型进行编码 token_tensor = torch.tensor([token_ids]) segment_tensor = torch.tensor([segment_ids]) with torch.no_grad(): outputs = model(token_tensor, segment_tensor) encoded_layers = outputs[0] # 对于每个词,计算它与种子词的相似度 for i in range(1, len(tokens)-1): word = tokens[i] if word in seed_words: continue word_tensor = encoded_layers[0][i].reshape(1, -1) sim = cosine_similarity(word_tensor, encoded_layers[0][1:-1])[0].max() # if sim > 0.5: # privacy_words.add(word) # 输出隐私词库 with open("output/privacy_words.txt", "w", encoding="utf-8") as f: for word in privacy_words: f.write(word + "\n") 上述代码中的 sim = cosine_similarity(word_tensor, encoded_layers[0][1:-1])[0].max() 的 cosine_similarity()应该用的是哪个库中的,是正确的

时间: 2024-02-23 17:57:25 浏览: 25
这段代码中使用的 `cosine_similarity()` 函数应该是来自 `sklearn.metrics.pairwise` 库中的,用于计算两个向量之间的余弦相似度。此外,代码中使用了 `torch` 和 `transformers` 库来加载和使用预训练的BERT模型和分词器,以及进行张量操作。
相关问题

import torch from sklearn.metrics.pairwise import cosine_similarity from transformers import BertTokenizer, BertModel # 加载种子词库 seed_words = [] with open("output/base_words.txt", "r", encoding="utf-8") as f: for line in f: seed_words.append(line.strip()) print(seed_words) # 加载微博文本数据 text_data = [] with open("output/weibo1.txt", "r", encoding="utf-8") as f: for line in f: text_data.append(line.strip()) print(text_data) # 加载BERT模型和分词器 tokenizer = BertTokenizer.from_pretrained('bert-base-chinese') model = BertModel.from_pretrained('bert-base-chinese') # 构建隐私词库 privacy_words = set(seed_words) for text in text_data: # 对文本进行分词,并且添加特殊标记 tokens = ["[CLS]"] + tokenizer.tokenize(text) + ["[SEP]"] token_ids = tokenizer.convert_tokens_to_ids(tokens) segment_ids = [0] * len(token_ids) # 转换为张量,调用BERT模型进行编码 token_tensor = torch.tensor([token_ids]) segment_tensor = torch.tensor([segment_ids]) with torch.no_grad(): outputs = model(token_tensor, segment_tensor) encoded_layers = outputs[0] # 对于每个词,计算它与种子词的相似度 for i in range(1, len(tokens)-1): word = tokens[i] if word in seed_words: continue word_tensor = encoded_layers[0][i].reshape(1, -1) sim = cosine_similarity(encoded_layers[0][1:-1], word_tensor, dense_output=False)[0].max() if sim > 0.5: privacy_words.add(word) # 输出隐私词库 with open("output/privacy_words.txt", "w", encoding="utf-8") as f: for word in privacy_words: f.write(word + "\n") 上述代码中的这两行代码: if sim > 0.5: privacy_words.add(word) 中privacy_words集合写入的词汇不是我想要的,运行之后都是写入privacy_words集合的都是单个字,我需要的是大于等于两个字的中文词汇,并且不包含种子词列表中的词汇,只需要将微博文本数据中与种子词相似度高的词汇写入privacy_words集合中,请帮我正确修改上述代码

可以将代码的 if 判断条件修改为: if sim > 0.5 and len(word) > 1 and word not in seed_words: 这样就只会将长度大于等于两个字且不包含种子词列表中的词汇添加到 privacy_words 集合中了。修改后的完整代码如下: ``` import torch from sklearn.metrics.pairwise import cosine_similarity from transformers import BertTokenizer, BertModel # 加载种子词库 seed_words = [] with open("output/base_words.txt", "r", encoding="utf-8") as f: for line in f: seed_words.append(line.strip()) print(seed_words) # 加载微博文本数据 text_data = [] with open("output/weibo1.txt", "r", encoding="utf-8") as f: for line in f: text_data.append(line.strip()) print(text_data) # 加载BERT模型和分词器 tokenizer = BertTokenizer.from_pretrained('bert-base-chinese') model = BertModel.from_pretrained('bert-base-chinese') # 构建隐私词库 privacy_words = set(seed_words) for text in text_data: # 对文本进行分词,并且添加特殊标记 tokens = ["[CLS]"] + tokenizer.tokenize(text) + ["[SEP]"] token_ids = tokenizer.convert_tokens_to_ids(tokens) segment_ids = [0] * len(token_ids) # 转换为张量,调用BERT模型进行编码 token_tensor = torch.tensor([token_ids]) segment_tensor = torch.tensor([segment_ids]) with torch.no_grad(): outputs = model(token_tensor, segment_tensor) encoded_layers = outputs[0] # 对于每个词,计算它与种子词的相似度 for i in range(1, len(tokens)-1): word = tokens[i] if word in seed_words: continue word_tensor = encoded_layers[0][i].reshape(1, -1) sim = cosine_similarity(encoded_layers[0][1:-1], word_tensor, dense_output=False)[0].max() if sim > 0.5 and len(word) > 1 and word not in seed_words: privacy_words.add(word) # 输出隐私词库 with open("output/privacy_words.txt", "w", encoding="utf-8") as f: for word in privacy_words: f.write(word + "\n") ```

import jieba import torch from sklearn.metrics.pairwise import cosine_similarity from transformers import BertTokenizer, BertModel seed_words = ['姓名'] # 加载微博文本数据 text_data = [] with open("output/weibo1.txt", "r", encoding="utf-8") as f: for line in f: text_data.append(line.strip()) # 加载BERT模型和分词器 tokenizer = BertTokenizer.from_pretrained('bert-base-chinese') model = BertModel.from_pretrained('bert-base-chinese') seed_tokens = ["[CLS]"] + seed_words + ["[SEP]"] seed_token_ids = tokenizer.convert_tokens_to_ids(seed_tokens) seed_segment_ids = [0] * len(seed_token_ids) # 转换为张量,调用BERT模型进行编码 seed_token_tensor = torch.tensor([seed_token_ids]) seed_segment_tensor = torch.tensor([seed_segment_ids]) with torch.no_grad(): seed_outputs = model(seed_token_tensor, seed_segment_tensor) seed_encoded_layers = seed_outputs[0] jieba.load_userdict('data/userdict.txt') # 构建隐私词库 privacy_words = set() for text in text_data: words = jieba.lcut(text.strip()) tokens = ["[CLS]"] + words + ["[SEP]"] token_ids = tokenizer.convert_tokens_to_ids(tokens) segment_ids = [0] * len(token_ids) # 转换为张量,调用BERT模型进行编码 token_tensor = torch.tensor([token_ids]) segment_tensor = torch.tensor([segment_ids]) with torch.no_grad(): outputs = model(token_tensor, segment_tensor) encoded_layers = outputs[0] # 对于每个词,计算它与种子词的相似度 for i in range(1, len(tokens)-1): word = tokens[i] if word in seed_words: continue word_tensor = encoded_layers[0][i].reshape(1, -1) seed_tensors =seed_encoded_layers[0][i].reshape(1, -1) # 计算当前微博词汇与种子词的相似度 sim = cosine_similarity(word_tensor, seed_tensors, dense_output=False)[0].max() print(sim, word) if sim > 0.5 and len(word) > 1: privacy_words.add(word) print(privacy_words) 上述代码运行之后有错误,报错信息为:Traceback (most recent call last): File "E:/PyCharm Community Edition 2020.2.2/Project/WordDict/newsim.py", line 397, in <module> seed_tensors =seed_encoded_layers[0][i].reshape(1, -1) IndexError: index 3 is out of bounds for dimension 0 with size 3. 请帮我修改

根据报错信息,问题出在获取seed_encoded_layers的第三个元素时,超过了张量的维度范围。可以通过打印输出seed_encoded_layers的形状,来确定张量的维度情况,看看是否与预期一致。 同时,可以检查一下seed_tokens和seed_token_ids的值,确保它们正确。 下面是修改后的代码: ``` import jieba import torch from sklearn.metrics.pairwise import cosine_similarity from transformers import BertTokenizer, BertModel seed_words = ['姓名'] # 加载微博文本数据 text_data = [] with open("output/weibo1.txt", "r", encoding="utf-8") as f: for line in f: text_data.append(line.strip()) # 加载BERT模型和分词器 tokenizer = BertTokenizer.from_pretrained('bert-base-chinese') model = BertModel.from_pretrained('bert-base-chinese') seed_tokens = ["[CLS]"] + seed_words + ["[SEP]"] seed_token_ids = tokenizer.convert_tokens_to_ids(seed_tokens) seed_segment_ids = [0] * len(seed_token_ids) # 转换为张量,调用BERT模型进行编码 seed_token_tensor = torch.tensor([seed_token_ids]) seed_segment_tensor = torch.tensor([seed_segment_ids]) with torch.no_grad(): seed_outputs = model(seed_token_tensor, seed_segment_tensor) seed_encoded_layers = seed_outputs[0] jieba.load_userdict('data/userdict.txt') # 构建隐私词库 privacy_words = set() for text in text_data: words = jieba.lcut(text.strip()) tokens = ["[CLS]"] + words + ["[SEP]"] token_ids = tokenizer.convert_tokens_to_ids(tokens) segment_ids = [0] * len(token_ids) # 转换为张量,调用BERT模型进行编码 token_tensor = torch.tensor([token_ids]) segment_tensor = torch.tensor([segment_ids]) with torch.no_grad(): outputs = model(token_tensor, segment_tensor) encoded_layers = outputs[0] # 对于每个词,计算它与种子词的相似度 for i in range(1, len(tokens)-1): word = tokens[i] if word in seed_words: continue word_tensor = encoded_layers[0][i].reshape(1, -1) seed_tensors = seed_encoded_layers[0][0].reshape(1, -1) # 修改此处,取第一个元素 # 计算当前微博词汇与种子词的相似度 sim = cosine_similarity(word_tensor, seed_tensors, dense_output=False)[0].max() print(sim, word) if sim > 0.5 and len(word) > 1: privacy_words.add(word) print(privacy_words) ```

相关推荐

import jieba import torch from transformers import BertTokenizer, BertModel, BertConfig # 自定义词汇表路径 vocab_path = "output/user_vocab.txt" count = 0 with open(vocab_path, 'r', encoding='utf-8') as file: for line in file: count += 1 user_vocab = count print(user_vocab) # 种子词 seed_words = ['姓名'] # 加载微博文本数据 text_data = [] with open("output/weibo_data.txt", "r", encoding="utf-8") as f: for line in f: text_data.append(line.strip()) print(text_data) # 加载BERT分词器,并使用自定义词汇表 tokenizer = BertTokenizer.from_pretrained('bert-base-chinese', vocab_file=vocab_path) config = BertConfig.from_pretrained("bert-base-chinese", vocab_size=user_vocab) # 加载BERT模型 model = BertModel.from_pretrained('bert-base-chinese', config=config, ignore_mismatched_sizes=True) seed_tokens = ["[CLS]"] + seed_words + ["[SEP]"] seed_token_ids = tokenizer.convert_tokens_to_ids(seed_tokens) seed_segment_ids = [0] * len(seed_token_ids) # 转换为张量,调用BERT模型进行编码 seed_token_tensor = torch.tensor([seed_token_ids]) seed_segment_tensor = torch.tensor([seed_segment_ids]) model.eval() with torch.no_grad(): seed_outputs = model(seed_token_tensor, seed_segment_tensor) seed_encoded_layers = seed_outputs[0] jieba.load_userdict('data/user_dict.txt') # 构建隐私词库 privacy_words = set() privacy_words_sim = set() for text in text_data: words = jieba.lcut(text.strip()) tokens = ["[CLS]"] + words + ["[SEP]"] token_ids = tokenizer.convert_tokens_to_ids(tokens) segment_ids = [0] * len(token_ids) # 转换为张量,调用BERT模型进行编码 token_tensor = torch.tensor([token_ids]) segment_tensor = torch.tensor([segment_ids]) model.eval() with torch.no_grad(): outputs = model(token_tensor, segment_tensor) encoded_layers = outputs[0] # 对于每个词,计算它与种子词的余弦相似度 for i in range(1, len(tokens) - 1): word = tokens[i] if word in seed_words: continue if len(word) <= 1: continue sim_scores = [] for j in range(len(seed_encoded_layers)): sim_scores.append(torch.cosine_similarity(seed_encoded_layers[j][0], encoded_layers[j][i], dim=0).item()) cos_sim = sum(sim_scores) / len(sim_scores) print(cos_sim, word) if cos_sim >= 0.5: privacy_words.add(word) privacy_words_sim.add((word, cos_sim)) print(privacy_words) # 输出隐私词库 with open("output/privacy_words.txt", "w", encoding="utf-8") as f1: for word in privacy_words: f1.write(word + '\n') with open("output/privacy_words_sim.txt", "w", encoding="utf-8") as f2: for word, cos_sim in privacy_words_sim: f2.write(word + "\t" + str(cos_sim) + "\n") 详细解释上述代码,包括这行代码的作用以及为什么要这样做?

import jieba import torch from sklearn.metrics.pairwise import cosine_similarity from transformers import BertTokenizer, BertModel seed_words = ['姓名'] # with open("output/base_words.txt", "r", encoding="utf-8") as f: # for line in f: # seed_words.append(line.strip()) # print(seed_words) # 加载微博文本数据 text_data = [] with open("output/weibo1.txt", "r", encoding="utf-8") as f: for line in f: text_data.append(line.strip()) # print(text_data) # 加载BERT模型和分词器 tokenizer = BertTokenizer.from_pretrained('bert-base-chinese') model = BertModel.from_pretrained('bert-base-chinese') jieba.load_userdict('data/userdict.txt') # 构建隐私词库 privacy_words = set() for text in text_data: words = jieba.lcut(text.strip()) # 对文本进行分词,并且添加特殊标记 tokens = ["[CLS]"] + words + ["[SEP]"] # print(tokens) # # 对文本进行分词,并且添加特殊标记 # tokens = ["[CLS]"] + tokenizer.tokenize(text) + ["[SEP]"] # print(tokens) token_ids = tokenizer.convert_tokens_to_ids(tokens) # print(token_ids) segment_ids = [0] * len(token_ids) # 转换为张量,调用BERT模型进行编码 token_tensor = torch.tensor([token_ids]) segment_tensor = torch.tensor([segment_ids]) with torch.no_grad(): outputs = model(token_tensor, segment_tensor) encoded_layers = outputs[0] # print(encoded_layers) # 对于每个词,计算它与种子词的相似度 for i in range(1, len(tokens)-1): # print(tokens[i]) word = tokens[i] if word in seed_words: continue word_tensor = encoded_layers[0][i].reshape(1, -1) sim = cosine_similarity(encoded_layers[0][1:-1], word_tensor, dense_output=False)[0].max() if sim > 0.5 and len(word) > 1: privacy_words.add(word) print(privacy_words) # 输出隐私词库 with open("output/privacy_words.txt", "w", encoding="utf-8") as f: for word in privacy_words: f.write(word + "\n") 上述代码使用bert微调来训练自己的微博数据来获取词向量,然后计算与种子词的相似度,输出结果会不会更准确,修改代码帮我实现一下

最新推荐

recommend-type

Python优秀项目 基于Flask+MySQL实现的玩具电子商务网站源码+部署文档+数据资料.zip

CSDN IT狂飙上传的代码均可运行,功能ok的情况下才上传的,直接替换数据即可使用,小白也能轻松上手 【资源说明】 Python优秀项目 基于Flask+MySQL实现的玩具电子商务网站源码+部署文档+数据资料.zip 1、代码压缩包内容 代码的项目文件 部署文档文件 2、代码运行版本 python3.7或者3.7以上的版本;若运行有误,根据提示GPT修改;若不会,私信博主(问题描述要详细) 3、运行操作步骤 步骤一:将代码的项目目录使用IDEA打开(IDEA要配置好python环境) 步骤二:根据部署文档或运行提示安装项目所需的库 步骤三:IDEA点击运行,等待程序服务启动完成 4、python资讯 如需要其他python项目的定制服务,可后台私信博主(注明你的项目需求) 4.1 python或人工智能项目辅导 4.2 python或人工智能程序定制 4.3 python科研合作 Django、Flask、Pytorch、Scrapy、PyQt、爬虫、可视化、大数据、推荐系统、人工智能、大模型
recommend-type

人脸识别例子,利用python调用opencv库

人脸识别例子
recommend-type

densenet模型-基于深度学习对手势方向识别-不含数据集图片-含逐行注释和说明文档.zip

densenet模型_基于深度学习对手势方向识别-不含数据集图片-含逐行注释和说明文档 本代码是基于python pytorch环境安装的。 下载本代码后,有个环境安装的requirement.txt文本 如果有环境安装不会的,可自行网上搜索如何安装python和pytorch,这些环境安装都是有很多教程的,简单的 环境需要自行安装,推荐安装anaconda然后再里面推荐安装python3.7或3.8的版本,pytorch推荐安装1.7.1或1.8.1版本 首先是代码的整体介绍 总共是3个py文件,十分的简便 且代码里面的每一行都是含有中文注释的,小白也能看懂代码 然后是关于数据集的介绍。 本代码是不含数据集图片的,下载本代码后需要自行搜集图片放到对应的文件夹下即可 在数据集文件夹下是我们的各个类别,这个类别不是固定的,可自行创建文件夹增加分类数据集 需要我们往每个文件夹下搜集来图片放到对应文件夹下,每个对应的文件夹里面也有一张提示图,提示图片放的位置 然后我们需要将搜集来的图片,直接放到对应的文件夹下,就可以对代码进行训练了。 运行01生成txt.py,是将数据
recommend-type

数据仓库实例【41页】.ppt

数据仓库实例【41页】
recommend-type

zigbee-cluster-library-specification

最新的zigbee-cluster-library-specification说明文档。
recommend-type

管理建模和仿真的文件

管理Boualem Benatallah引用此版本:布阿利姆·贝纳塔拉。管理建模和仿真。约瑟夫-傅立叶大学-格勒诺布尔第一大学,1996年。法语。NNT:电话:00345357HAL ID:电话:00345357https://theses.hal.science/tel-003453572008年12月9日提交HAL是一个多学科的开放存取档案馆,用于存放和传播科学研究论文,无论它们是否被公开。论文可以来自法国或国外的教学和研究机构,也可以来自公共或私人研究中心。L’archive ouverte pluridisciplinaire
recommend-type

深入了解MATLAB开根号的最新研究和应用:获取开根号领域的最新动态

![matlab开根号](https://www.mathworks.com/discovery/image-segmentation/_jcr_content/mainParsys3/discoverysubsection_1185333930/mainParsys3/image_copy.adapt.full.medium.jpg/1712813808277.jpg) # 1. MATLAB开根号的理论基础 开根号运算在数学和科学计算中无处不在。在MATLAB中,开根号可以通过多种函数实现,包括`sqrt()`和`nthroot()`。`sqrt()`函数用于计算正实数的平方根,而`nt
recommend-type

react的函数组件的使用

React 的函数组件是一种简单的组件类型,用于定义无状态或者只读组件。 它们通常接受一个 props 对象作为参数并返回一个 React 元素。 函数组件的优点是代码简洁、易于测试和重用,并且它们使 React 应用程序的性能更加出色。 您可以使用函数组件来呈现简单的 UI 组件,例如按钮、菜单、标签或其他部件。 您还可以将它们与 React 中的其他组件类型(如类组件或 Hooks)结合使用,以实现更复杂的 UI 交互和功能。
recommend-type

JSBSim Reference Manual

JSBSim参考手册,其中包含JSBSim简介,JSBSim配置文件xml的编写语法,编程手册以及一些应用实例等。其中有部分内容还没有写完,估计有生之年很难看到完整版了,但是内容还是很有参考价值的。
recommend-type

"互动学习:行动中的多样性与论文攻读经历"

多样性她- 事实上SCI NCES你的时间表ECOLEDO C Tora SC和NCESPOUR l’Ingén学习互动,互动学习以行动为中心的强化学习学会互动,互动学习,以行动为中心的强化学习计算机科学博士论文于2021年9月28日在Villeneuve d'Asq公开支持马修·瑟林评审团主席法布里斯·勒菲弗尔阿维尼翁大学教授论文指导奥利维尔·皮耶昆谷歌研究教授:智囊团论文联合主任菲利普·普雷教授,大学。里尔/CRISTAL/因里亚报告员奥利维耶·西格德索邦大学报告员卢多维奇·德诺耶教授,Facebook /索邦大学审查员越南圣迈IMT Atlantic高级讲师邀请弗洛里安·斯特鲁布博士,Deepmind对于那些及时看到自己错误的人...3谢谢你首先,我要感谢我的两位博士生导师Olivier和Philippe。奥利维尔,"站在巨人的肩膀上"这句话对你来说完全有意义了。从科学上讲,你知道在这篇论文的(许多)错误中,你是我可以依