结合了LDA主题模型、Word2Vec词向量模型的TextRank关键词抽取算法Python代码
时间: 2024-02-05 13:10:24 浏览: 97
以下是结合了LDA主题模型、Word2Vec词向量模型的TextRank关键词抽取算法的Python代码:
```python
import jieba
import gensim
from gensim import corpora, models
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
def load_stopwords(path):
"""
加载停用词
:param path: 停用词文件路径
:return: 停用词列表
"""
stopwords = []
with open(path, 'r', encoding='utf-8') as f:
for line in f.readlines():
stopwords.append(line.strip())
return stopwords
def get_sentences(text):
"""
使用jieba分句
:param text: 文本内容
:return: 句子列表
"""
sentences = []
for line in text.split('\n'):
line = line.strip()
if not line:
continue
for s in line.split('。'):
s = s.strip()
if not s:
continue
sentences.append(s)
return sentences
def segment(sentence, stopwords):
"""
使用jieba进行分词并去除停用词
:param sentence: 句子
:param stopwords: 停用词列表
:return: 分词后的列表
"""
words = []
for word in jieba.cut(sentence):
word = word.strip()
if not word:
continue
if word not in stopwords:
words.append(word)
return words
def get_word2vec_model(text, size=100, window=5, min_count=5, workers=4):
"""
训练Word2Vec模型
:param text: 文本内容
:param size: 词向量维度
:param window: 窗口大小
:param min_count: 最小词频
:param workers: 线程数
:return: Word2Vec模型
"""
sentences = []
for line in text.split('\n'):
line = line.strip()
if not line:
continue
sentences.append(segment(line, stopwords))
model = gensim.models.Word2Vec(sentences, size=size, window=window, min_count=min_count, workers=workers)
return model
def get_lda_model(text, num_topics=8, passes=10):
"""
训练LDA主题模型
:param text: 文本内容
:param num_topics: 主题数
:param passes: 迭代次数
:return: LDA模型和语料库
"""
sentences = []
for line in text.split('\n'):
line = line.strip()
if not line:
continue
sentences.append(segment(line, stopwords))
dictionary = corpora.Dictionary(sentences)
corpus = [dictionary.doc2bow(sentence) for sentence in sentences]
lda_model = models.ldamodel.LdaModel(corpus=corpus, num_topics=num_topics, id2word=dictionary, passes=passes)
return lda_model, corpus
def get_topic_word_matrix(lda_model, num_topics, num_words):
"""
获取主题-词矩阵
:param lda_model: LDA模型
:param num_topics: 主题数
:param num_words: 每个主题选取的关键词数
:return: 主题-词矩阵
"""
topic_word_matrix = np.zeros((num_topics, num_words))
for i in range(num_topics):
topic_words = lda_model.get_topic_terms(i, topn=num_words)
for j in range(num_words):
topic_word_matrix[i][j] = topic_words[j][0]
return topic_word_matrix
def get_sentence_topic_vector(sentence, lda_model, dictionary, num_topics):
"""
获取句子的主题向量
:param sentence: 句子
:param lda_model: LDA模型
:param dictionary: 词典
:param num_topics: 主题数
:return: 句子的主题向量
"""
sentence_bow = dictionary.doc2bow(segment(sentence, stopwords))
topic_vector = np.zeros(num_topics)
for topic, prob in lda_model[sentence_bow]:
topic_vector[topic] = prob
return topic_vector
def get_similarity_matrix(sentences, word2vec_model):
"""
获取句子之间的相似度矩阵
:param sentences: 句子列表
:param word2vec_model: Word2Vec模型
:return: 相似度矩阵
"""
similarity_matrix = np.zeros((len(sentences), len(sentences)))
for i in range(len(sentences)):
for j in range(i+1, len(sentences)):
sim = cosine_similarity([np.mean([word2vec_model[word] for word in segment(sentences[i], stopwords) if word in word2vec_model], axis=0)],
[np.mean([word2vec_model[word] for word in segment(sentences[j], stopwords) if word in word2vec_model], axis=0)]).item()
similarity_matrix[i][j] = sim
similarity_matrix[j][i] = sim
return similarity_matrix
def get_textrank_score(sentences, num_topics, lda_model, word2vec_model):
"""
获取TextRank算法得分
:param sentences: 句子列表
:param num_topics: 主题数
:param lda_model: LDA模型
:param word2vec_model: Word2Vec模型
:return: 句子得分列表
"""
dictionary = lda_model.id2word
num_words = 20
topic_word_matrix = get_topic_word_matrix(lda_model, num_topics, num_words)
sentence_topic_vectors = np.zeros((len(sentences), num_topics))
for i in range(len(sentences)):
sentence_topic_vectors[i] = get_sentence_topic_vector(sentences[i], lda_model, dictionary, num_topics)
similarity_matrix = get_similarity_matrix(sentences, word2vec_model)
# TextRank算法迭代
max_iter = 100
d = 0.85
scores = np.ones(len(sentences))
for i in range(max_iter):
tmp_scores = np.zeros(len(sentences))
for j in range(len(sentences)):
tmp_scores[j] = (1 - d) + d * np.sum([similarity_matrix[j][k] * scores[k] for k in range(len(sentences))])
scores = tmp_scores
# 合并TextRank和主题模型得分
final_scores = np.zeros(len(sentences))
for i in range(len(sentences)):
for j in range(num_topics):
final_scores[i] += topic_word_matrix[j].tolist().count(i) * sentence_topic_vectors[i][j]
final_scores = d * final_scores + (1 - d) * scores
return final_scores
# 加载停用词
stopwords = load_stopwords('stopwords.txt')
# 加载文本
with open('text.txt', 'r', encoding='utf-8') as f:
text = f.read()
# 分句
sentences = get_sentences(text)
# 训练Word2Vec模型
word2vec_model = get_word2vec_model(text)
# 训练LDA主题模型
lda_model, corpus = get_lda_model(text)
# 获取TextRank算法得分
num_topics = 8
scores = get_textrank_score(sentences, num_topics, lda_model, word2vec_model)
# 按得分排序,获取关键词
num_keywords = 10
keywords = []
idx = np.argsort(scores)[::-1][:num_keywords]
for i in idx:
keywords.append(sentences[i])
print(keywords)
```
其中,text.txt为待处理的文本文件,stopwords.txt为停用词文件,需要自行准备。代码中num_topics、num_words、num_keywords等参数需要根据实际情况进行调整。