def Stop_words(): stopword = [] data = [] f = open('C:/Users/Administrator/Desktop/data/stopword.txt',encoding='utf8') for line in f.readlines(): data.append(line) for i in data: output = str(i).replace('\n','')#replace用法和sub函数很接近 stopword.append(output) return stopword # 采用jieba进行词性标注,对当前文档过滤词性和停用词 def Filter_word(text): filter_word = [] stopword = Stop_words() text = jieba.posseg.cut(text) for word, flag in text: if flag.startswith('n') is False:#用于检测字符串是否以指定的子字符串开始 continue if not word in stopword and len(word) > 1: filter_word.append(word) return filter_word # 对文档集过滤词性和停用词 def Filter_words(data_path =r'C:/Users/Administrator/Desktop/data//corpus.txt'): document = [] for line in open(data_path, 'r',encoding= 'utf8') : segment = jieba.posseg.cut(line.strip()) filter_words = [] stopword = Stop_words() for word, flag in segment: if flag.startswith('n') is False: continue if not word in stopword and len(word) > 1: filter_words.append(word) document.append(filter_words) return document def tf_idf(): tf_dict = {} idf_dict = {} filter_word = Filter_word(text) for word in filter_word: if word not in tf_dict: tf_dict[word] = 1 else: tf_dict[word] += 1 for word in tf_dict: tf_dict[word] = tf_dict[word] / len(text) document = Filter_words() doc_total = len(document) for doc in document: for word in set(doc): if word not in idf_dict: idf_dict[word] = 1 else: idf_dict[word] += 1 for word in idf_dict: idf_dict[word] = math.log(doc_total / (idf_dict[word] + 1)) tf_idf_dict = {} for word in filter_word: if word not in idf_dict: idf_dict[word] = 0 tf_idf_dict[word] = tf_dict[word] * idf_dict[word] return tf_idf_dict tf_idf_dict = tf_idf() keyword = 6 print('TF-IDF模型结果:') for key, value in sorted(tf_idf_dict.items(), key=operator.itemgetter(1),reverse=True)[:keyword]: print(key, end=' ') print('\n')
时间: 2024-03-28 17:38:58 浏览: 27
这段代码实现了一个 TF-IDF 模型,用于计算文本中关键词的权重。其中,Stop_words 函数用于读取停用词表,Filter_word 函数用于对单个文档进行过滤,Filter_words 函数用于对整个文档集进行过滤。tf_idf 函数用于计算 TF-IDF 值,最后输出了权重最高的前 keyword 个关键词。
相关问题
翻译代码def load_stopwords(file_path): stop_words = [] with open(file_path, encoding='UTF-8') as words: stop_words.extend([i.strip() for i in words.readlines()]) return stop_words def review_to_text(review): stop_words = load_stopwords(stopword_path) # 去除英文 review = re.sub("[^\u4e00-\u9fa5^a-z^A-Z]", '', review) review = jieba.cut(review) # 去掉停用词 if stop_words: all_stop_words = set(stop_words) words = [w for w in review if w not in all_stop_words] #print(words) return words
这段代码包含了两个函数:
1. load_stopwords(file_path):用于加载中文停用词表。具体实现步骤如下:
a. 通过传入文件路径,利用with open的方式读取文件内容。
b. 将每行的停用词添加到stop_words列表中。
c. 返回stop_words列表。
2. review_to_text(review):用于将评论文本转换为文本列表。具体实现步骤如下:
a. 调用load_stopwords函数,加载中文停用词表。
b. 使用正则表达式去除文本中的英文字符。
c. 使用jieba库进行中文分词。
d. 去除文本中的停用词。
e. 将处理后的文本列表作为函数返回值。
需要注意的是,这段代码使用了正则表达式去除了文本中的英文字符,只保留了中文字符。同时,在进行中文分词时,使用了jieba库。此外,对于停用词的处理,代码使用了Python中的列表推导式,将不在停用词表中的词语添加到words列表中。
import jieba def word_extract(): # 读取文件 corpus = [] path = 'D:/自然语言处理/第2章/data/金庸-白马啸西风.txt' content = '' for line in open(path, 'r', encoding='gbk', errors='ignore'): line = line.strip() content += line corpus.append(content) # 加载停用词 stop_words = [] path = 'D:/自然语言处理/第4章/data/stopword.txt' for line in open(path, encoding='utf8'): line = line.strip() stop_words.append(line) # jieba分词 split_words = [] word_list = jieba.cut(corpus[0]) for word in word_list: if word not in stop_words: split_words.append(word)这段代码哪里有问题
这段代码的缩进有问题,导致程序运行时会报错。具体来说,for循环和if语句的缩进应该向右移动一个制表符或四个空格,如下所示:
```
import jieba
def word_extract():
# 读取文件
corpus = []
path = 'D:/自然语言处理/第2章/data/金庸-白马啸西风.txt'
content = ''
for line in open(path, 'r', encoding='gbk', errors='ignore'):
line = line.strip()
content += line
corpus.append(content)
# 加载停用词
stop_words = []
path = 'D:/自然语言处理/第4章/data/stopword.txt'
for line in open(path, encoding='utf8'):
line = line.strip()
stop_words.append(line)
# jieba分词
split_words = []
word_list = jieba.cut(corpus[0])
for word in word_list:
if word not in stop_words:
split_words.append(word)
return split_words
```