tokens = jieba.lcut(text) tokens = [token for token in tokens if ((token not in stopwords) and len(token) > 1)] return tokens
时间: 2024-04-23 16:13:55 浏览: 137
这段代码使用了jieba库对文本进行分词,并过滤掉停用词和长度小于等于1的词汇,最后返回处理后的词汇列表。具体解释如下:
1. `jieba.lcut(text)`:使用jieba库的分词函数对文本进行分词,返回一个词汇列表。
2. `tokens = [token for token in tokens if ((token not in stopwords) and len(token) > 1)]`:通过列表解析式,筛选出不在停用词列表中且长度大于1的词汇,并保存在新的列表中。
3. `return tokens`:返回处理后的词汇列表。
相关问题
def tokenize_text(text): tokens = jieba.lcut(text) tokens = [token.strip() for token in tokens] return tokens
这是一个使用 jieba 分词对中文文本进行分词的函数,具体实现如下:
- 使用 jieba.lcut 对文本进行分词,得到分词结果。
- 对分词结果进行去除空格等操作,保证每个分词的格式一致。
- 返回分词结果。
该函数可以用于将中文文本转换为一系列有意义的词汇,方便进行后续的自然语言处理任务。
import jieba import torch from sklearn.metrics.pairwise import cosine_similarity from transformers import BertTokenizer, BertModel seed_words = ['姓名'] # 加载微博文本数据 text_data = [] with open("output/weibo1.txt", "r", encoding="utf-8") as f: for line in f: text_data.append(line.strip()) # 加载BERT模型和分词器 tokenizer = BertTokenizer.from_pretrained('bert-base-chinese') model = BertModel.from_pretrained('bert-base-chinese') seed_tokens = ["[CLS]"] + seed_words + ["[SEP]"] seed_token_ids = tokenizer.convert_tokens_to_ids(seed_tokens) seed_segment_ids = [0] * len(seed_token_ids) # 转换为张量,调用BERT模型进行编码 seed_token_tensor = torch.tensor([seed_token_ids]) seed_segment_tensor = torch.tensor([seed_segment_ids]) with torch.no_grad(): seed_outputs = model(seed_token_tensor, seed_segment_tensor) seed_encoded_layers = seed_outputs[0] jieba.load_userdict('data/userdict.txt') # 构建隐私词库 privacy_words = set() for text in text_data: words = jieba.lcut(text.strip()) tokens = ["[CLS]"] + words + ["[SEP]"] token_ids = tokenizer.convert_tokens_to_ids(tokens) segment_ids = [0] * len(token_ids) # 转换为张量,调用BERT模型进行编码 token_tensor = torch.tensor([token_ids]) segment_tensor = torch.tensor([segment_ids]) with torch.no_grad(): outputs = model(token_tensor, segment_tensor) encoded_layers = outputs[0] # 对于每个词,计算它与种子词的相似度 for i in range(1, len(tokens)-1): word = tokens[i] if word in seed_words: continue word_tensor = encoded_layers[0][i].reshape(1, -1) seed_tensors =seed_encoded_layers[0][i].reshape(1, -1) # 计算当前微博词汇与种子词的相似度 sim = cosine_similarity(word_tensor, seed_tensors, dense_output=False)[0].max() print(sim, word) if sim > 0.5 and len(word) > 1: privacy_words.add(word) print(privacy_words) 上述代码运行之后有错误,报错信息为:Traceback (most recent call last): File "E:/PyCharm Community Edition 2020.2.2/Project/WordDict/newsim.py", line 397, in <module> seed_tensors =seed_encoded_layers[0][i].reshape(1, -1) IndexError: index 3 is out of bounds for dimension 0 with size 3. 请帮我修改
根据报错信息,问题出在获取seed_encoded_layers的第三个元素时,超过了张量的维度范围。可以通过打印输出seed_encoded_layers的形状,来确定张量的维度情况,看看是否与预期一致。
同时,可以检查一下seed_tokens和seed_token_ids的值,确保它们正确。
下面是修改后的代码:
```
import jieba
import torch
from sklearn.metrics.pairwise import cosine_similarity
from transformers import BertTokenizer, BertModel
seed_words = ['姓名']
# 加载微博文本数据
text_data = []
with open("output/weibo1.txt", "r", encoding="utf-8") as f:
for line in f:
text_data.append(line.strip())
# 加载BERT模型和分词器
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
model = BertModel.from_pretrained('bert-base-chinese')
seed_tokens = ["[CLS]"] + seed_words + ["[SEP]"]
seed_token_ids = tokenizer.convert_tokens_to_ids(seed_tokens)
seed_segment_ids = [0] * len(seed_token_ids)
# 转换为张量,调用BERT模型进行编码
seed_token_tensor = torch.tensor([seed_token_ids])
seed_segment_tensor = torch.tensor([seed_segment_ids])
with torch.no_grad():
seed_outputs = model(seed_token_tensor, seed_segment_tensor)
seed_encoded_layers = seed_outputs[0]
jieba.load_userdict('data/userdict.txt')
# 构建隐私词库
privacy_words = set()
for text in text_data:
words = jieba.lcut(text.strip())
tokens = ["[CLS]"] + words + ["[SEP]"]
token_ids = tokenizer.convert_tokens_to_ids(tokens)
segment_ids = [0] * len(token_ids)
# 转换为张量,调用BERT模型进行编码
token_tensor = torch.tensor([token_ids])
segment_tensor = torch.tensor([segment_ids])
with torch.no_grad():
outputs = model(token_tensor, segment_tensor)
encoded_layers = outputs[0]
# 对于每个词,计算它与种子词的相似度
for i in range(1, len(tokens)-1):
word = tokens[i]
if word in seed_words:
continue
word_tensor = encoded_layers[0][i].reshape(1, -1)
seed_tensors = seed_encoded_layers[0][0].reshape(1, -1) # 修改此处,取第一个元素
# 计算当前微博词汇与种子词的相似度
sim = cosine_similarity(word_tensor, seed_tensors, dense_output=False)[0].max()
print(sim, word)
if sim > 0.5 and len(word) > 1:
privacy_words.add(word)
print(privacy_words)
```
阅读全文