import sys import re import jieba import codecs import gensim import numpy as np import pandas as pd def segment(doc: str): stop_words = pd.read_csv('data/stopwords.txt', index_col=False, quoting=3, names=['stopword'], sep='\n', encoding='utf-8') stop_words = list(stop_words.stopword) reg_html = re.compile(r'<[^>]+>', re.S) # 去掉html标签数字等 doc = reg_html.sub('', doc) doc = re.sub('[0-9]', '', doc) doc = re.sub('\s', '', doc) word_list = list(jieba.cut(doc)) out_str = '' for word in word_list: if word not in stop_words: out_str += word out_str += ' ' segments = out_str.split(sep=' ') return segments def doc2vec(file_name, model): start_alpha = 0.01 infer_epoch = 1000 doc = segment(codecs.open(file_name, 'r', 'utf-8').read()) vector = model.docvecs[doc_id] return model.infer_vector(doc) # 计算两个向量余弦值 def similarity(a_vect, b_vect): dot_val = 0.0 a_norm = 0.0 b_norm = 0.0 cos = None for a, b in zip(a_vect, b_vect): dot_val += a * b a_norm += a ** 2 b_norm += b ** 2 if a_norm == 0.0 or b_norm == 0.0: cos = -1 else: cos = dot_val / ((a_norm * b_norm) ** 0.5) return cos def test_model(file1, file2): print('导入模型') model_path = 'tmp/zhwk_news.doc2vec' model = gensim.models.Doc2Vec.load(model_path) vect1 = doc2vec(file1, model) # 转成句子向量 vect2 = doc2vec(file2, model) print(sys.getsizeof(vect1)) # 查看变量占用空间大小 print(sys.getsizeof(vect2)) cos = similarity(vect1, vect2) print('相似度:%0.2f%%' % (cos * 100)) if __name__ == '__main__': file1 = 'data/corpus_test/t1.txt' file2 = 'data/corpus_test/t2.txt' test_model(file1, file2) 有什么问题 ,怎么解决
时间: 2024-03-27 10:34:51 浏览: 116
python调试文件时发生import requests报错.doc
5星 · 资源好评率100%
在 `doc2vec()` 函数中,你在尝试访问 `doc_id` 变量,但是该变量未定义,这会导致 `NameError` 错误。你需要将该变量定义为函数的参数,并在调用函数时传递文档的标识符。
另外,在 `doc2vec()` 函数中,你在尝试将 `Doc2Vec` 模型返回的向量赋值给 `vector` 变量,但是该变量没有使用,这会导致不必要的计算和内存占用。你应该直接返回模型返回的向量。
最后,在 `test_model()` 函数中,你在尝试打印向量的内存占用大小,但是你应该打印向量本身的大小,而不是指向向量的变量的大小。你可以使用 `np.ndarray.nbytes` 属性来获取向量的大小。
下面是修改后的代码:
```
import sys
import re
import jieba
import codecs
import gensim
import numpy as np
import pandas as pd
def segment(doc: str):
stop_words = pd.read_csv('data/stopwords.txt', index_col=False, quoting=3,
names=['stopword'], sep='\n', encoding='utf-8')
stop_words = list(stop_words.stopword)
reg_html = re.compile(r'<[^>]+>', re.S) # 去掉html标签数字等
doc = reg_html.sub('', doc)
doc = re.sub('[0-9]', '', doc)
doc = re.sub('\s', '', doc)
word_list = list(jieba.cut(doc))
out_str = ''
for word in word_list:
if word not in stop_words:
out_str += word
out_str += ' '
segments = out_str.split(sep=' ')
return segments
def doc2vec(file_name, model, doc_id):
start_alpha = 0.01
infer_epoch = 1000
doc = segment(codecs.open(file_name, 'r', 'utf-8').read())
return model.infer_vector(doc, alpha=start_alpha, steps=infer_epoch)
# 计算两个向量余弦值
def similarity(a_vect, b_vect):
dot_val = 0.0
a_norm = 0.0
b_norm = 0.0
cos = None
for a, b in zip(a_vect, b_vect):
dot_val += a * b
a_norm += a ** 2
b_norm += b ** 2
if a_norm == 0.0 or b_norm == 0.0:
cos = -1
else:
cos = dot_val / ((a_norm * b_norm) ** 0.5)
return cos
def test_model(file1, file2):
print('导入模型')
model_path = 'tmp/zhwk_news.doc2vec'
model = gensim.models.Doc2Vec.load(model_path)
vect1 = doc2vec(file1, model, doc_id=0) # 转成句子向量
vect2 = doc2vec(file2, model, doc_id=1)
print(vect1.nbytes) # 查看向量大小
print(vect2.nbytes)
cos = similarity(vect1, vect2)
print('相似度:%0.2f%%' % (cos * 100))
if __name__ == '__main__':
file1 = 'data/corpus_test/t1.txt'
file2 = 'data/corpus_test/t2.txt'
test_model(file1, file2)
```
阅读全文