1. 程式人生 > >隱馬爾科夫--hmmlearn包(python)

隱馬爾科夫--hmmlearn包(python)

# coding=utf-8
import numpy as np
from hmmlearn import hmm

states = ["box 1", "box 2", "box3"]
n_states = len(states)
observations = ["red", "white"]
n_observations = len(observations)
start_probability = np.array([0.2, 0.4, 0.4])
transition_probability = np.array([
    [0.5, 0.2, 0.3],
    [0.3, 0.5, 0.2],
    [0.2, 0.3, 0.5]
])
emission_probability = np.array([
    [0.5, 0.5],
    [0.4, 0.6],
    [0.7, 0.3]
])
model = hmm.MultinomialHMM(n_components=n_states)
model.startprob_ = start_probability
model.transmat_ = transition_probability
model.emissionprob_ = emission_probability
seen_list = [0, 1, 0, 0, 0, 0]
seen = np.array([seen_list]).T

# 使用前向演算法計算觀測序列的概率
box2 = model.predict(seen)
print "The ball picked:", ", ".join(map(lambda x: observations[x], seen_list))
print "The hidden box", ", ".join(map(lambda x: states[x], box2))
print model.score(seen)

# 使用維位元演算法預測觀測序列最有可能的對應的狀態序列
print '-' * 100
logprob, box = model.decode(seen, algorithm="viterbi")
print "The ball picked:", ", ".join(map(lambda x: observations[x], seen_list))
print "The hidden box", ", ".join(map(lambda x: states[x], box))

# 求解模型引數的問題。由於鮑姆-韋爾奇演算法是基於EM演算法的近似演算法,所以我們需要多跑幾次,選擇一個比較優的模型引數
print '-' * 100
model2 = hmm.MultinomialHMM(n_components=n_states, n_iter=20, tol=0.01)
X2 = np.array([[0, 1, 0, 1], [0, 0, 0, 1], [1, 0, 1, 1]])
model2.fit(X2)
print model2.startprob_
print model2.transmat_
print model2.emissionprob_
print model2.score(X2)
model2.fit(X2)
print model2.startprob_
print model2.transmat_
print model2.emissionprob_
print model2.score(X2)
model2.fit(X2)
print model2.startprob_
print model2.transmat_
print model2.emissionprob_
print model2.score(X2)