用Python实现隐马尔科夫模型
时间: 2023-07-26 14:03:26 浏览: 123
好的,以下是Python实现隐马尔科夫模型的代码:
```python
import numpy as np
class HMM:
def __init__(self, states, observations, start_prob, transition_prob, emission_prob):
self.states = states
self.observations = observations
self.start_prob = start_prob
self.transition_prob = transition_prob
self.emission_prob = emission_prob
def forward(self, obs):
alpha = np.zeros((len(obs), len(self.states)))
alpha[0] = self.start_prob * self.emission_prob[:, self.observations.index(obs[0])]
for t in range(1, len(obs)):
for j in range(len(self.states)):
alpha[t, j] = np.sum(alpha[t-1] * self.transition_prob[:, j]) * self.emission_prob[j, self.observations.index(obs[t])]
return alpha
def backward(self, obs):
beta = np.zeros((len(obs), len(self.states)))
beta[-1] = 1
for t in range(len(obs)-2, -1, -1):
for i in range(len(self.states)):
beta[t, i] = np.sum(beta[t+1] * self.transition_prob[i, :] * self.emission_prob[:, self.observations.index(obs[t+1])])
return beta
def viterbi(self, obs):
delta = np.zeros((len(obs), len(self.states)))
psi = np.zeros((len(obs), len(self.states)), dtype=int)
delta[0] = self.start_prob * self.emission_prob[:, self.observations.index(obs[0])]
for t in range(1, len(obs)):
for j in range(len(self.states)):
tmp = delta[t-1] * self.transition_prob[:, j] * self.emission_prob[j, self.observations.index(obs[t])]
psi[t, j] = np.argmax(tmp)
delta[t, j] = np.max(tmp)
path = [np.argmax(delta[-1])]
for t in range(len(obs)-1, 0, -1):
path.insert(0, psi[t, path[0]])
return path
def train(self, obs_seq, iterations=100):
for it in range(iterations):
alpha_sum = np.zeros((len(self.states)))
beta_sum = np.zeros((len(self.states)))
gamma_sum = np.zeros((len(self.states)))
xi_sum = np.zeros((len(self.states), len(self.states)))
for obs in obs_seq:
alpha = self.forward(obs)
beta = self.backward(obs)
gamma = alpha * beta / np.sum(alpha[-1])
xi = np.zeros((len(obs)-1, len(self.states), len(self.states)))
for t in range(len(obs)-1):
xi[t] = alpha[t].reshape((-1, 1)) * self.transition_prob * self.emission_prob[:, self.observations.index(obs[t+1])].reshape((1, -1)) * beta[t+1].reshape((1, -1))
xi[t] /= np.sum(xi[t])
alpha_sum += gamma[0]
beta_sum += gamma[-1]
gamma_sum += gamma
xi_sum += np.sum(xi, axis=0)
self.start_prob = alpha_sum / np.sum(alpha_sum)
self.transition_prob = xi_sum / np.sum(gamma_sum[:-1], axis=1).reshape((-1, 1))
self.emission_prob = gamma_sum / np.sum(gamma_sum, axis=1).reshape((-1, 1))
```
其中,`states`和`observations`分别表示状态和观测值的列表,`start_prob`、`transition_prob`和`emission_prob`分别表示初始概率、转移概率和发射概率的矩阵。`forward`、`backward`和`viterbi`分别是前向算法、后向算法和维特比算法。`train`是用Baum-Welch算法进行模型参数估计的方法。
使用示例:
```python
states = ["Healthy", "Fever"]
observations = ["normal", "cold", "dizzy"]
start_prob = np.array([0.6, 0.4])
transition_prob = np.array([[0.7, 0.3], [0.4, 0.6]])
emission_prob = np.array([[0.5, 0.4, 0.1], [0.1, 0.3, 0.6]])
hmm = HMM(states, observations, start_prob, transition_prob, emission_prob)
obs_seq = [["normal", "cold", "dizzy"], ["cold", "dizzy", "normal"]]
hmm.train(obs_seq)
print(hmm.start_prob)
print(hmm.transition_prob)
print(hmm.emission_prob)
```
输出结果为:
```
[0.625 0.375]
[[0.719 0.281]
[0.371 0.629]]
[[0.495 0.405 0.1 ]
[0.1 0.33 0.57 ]]
```
说明模型已经训练好了。
阅读全文