import jieba import math import re from collections import Counter # 读入两个txt文件存入s1,s2字符串中 s1 = open('1.txt', 'r').read() s2 = open('2.txt', 'r').read() # 利用jieba分词与停用词表,将词分好并保存到向量中 stopwords = [] fstop = open('stopwords.txt', 'r', encoding='utf-8') for eachWord in fstop: eachWord = re.sub("\n", "", eachWord) stopwords.append(eachWord) fstop.close() s1_cut = [i for i in jieba.cut(s1, cut_all=True) if (i not in stopwords) and i != ''] s2_cut = [i for i in jieba.cut(s2, cut_all=True) if (i not in stopwords) and i != ''] # 使用TF-IDF算法调整词频向量中每个词的权重 def get_tf_idf(word, cut_list, cut_code_list, doc_num): tf = cut_list.count(word) df = sum(1 for cut_code in cut_code_list if word in cut_code) idf = math.log(doc_num / df) return tf * idf word_set = list(set(s1_cut).union(set(s2_cut))) doc_num = 2 # 计算TF-IDF值并保存到向量中 s1_cut_tfidf = [get_tf_idf(word, s1_cut, [s1_cut, s2_cut], doc_num) for word in word_set] s2_cut_tfidf = [get_tf_idf(word, s2_cut, [s1_cut, s2_cut], doc_num) for word in word_set] # 获取TF-IDF值最高的前k个词 k = 10 s1_cut_topk = [word_set[i] for i in sorted(range(len(s1_cut_tfidf)), key=lambda x: s1_cut_tfidf[x], reverse=True)[:k]] s2_cut_topk = [word_set[i] for i in sorted(range(len(s2_cut_tfidf)), key=lambda x: s2_cut_tfidf[x], reverse=True)[:k]] # 使用前k个高频词的词频向量计算余弦相似度 s1_cut_code = [s1_cut.count(word) for word in s1_cut_topk] s2_cut_code = [s2_cut.count(word) for word in s2_cut_topk] sum = 0 sq1 = 0 sq2 = 0 for i in range(len(s1_cut_code)): sum += s1_cut_code[i] * s2_cut_code[i] sq1 += pow(s1_cut_code[i], 2) sq2 += pow(s2_cut_code[i], 2) try: result = round(float(sum) / (math.sqrt(sq1) * math.sqrt(sq2)), 3) except ZeroDivisionError: result = 0.0 print("\n余弦相似度为:%f" % result)
时间: 2023-06-06 19:06:14 浏览: 100
Python错题本:from scipy.misc import imread 报错cannot import name imread 的解决方案
这段代码是Python的一些import语句。其中,jieba是一个中文分词库,用于对中文文本进行分词处理;math是Python的数学函数库,提供了许多常用的数学函数;re是Python的正则表达式库,用于对字符串进行匹配和处理;Counter是Python的计数器库,用于对一组数据进行计数处理。这些库的引入,可以帮助Python程序员更方便地对中文文本和数学数据进行处理和分析。
阅读全文