# Hidden Markov Models # # Author: Ron Weiss # and Shiqiao Du # API changes: Jaques Grobler """ The :mod:`sklearn.hmm` module implements hidden Markov models. **Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known numerical stability issues. If nobody volunteers to write documentation and make it more stable, this module will be removed in version 0.11. """ import string import warnings import numpy as np from .utils import check_random_state from .utils.extmath import logsumexp from .base import BaseEstimator from .mixture import ( GMM, log_multivariate_normal_density, sample_gaussian, distribute_covar_matrix_to_match_covariance_type, _validate_covars) from . import cluster from . import _hmmc __all__ = ['GMMHMM', 'GaussianHMM', 'MultinomialHMM', 'decoder_algorithms', 'normalize'] ZEROLOGPROB = -1e200 EPS = np.finfo(float).eps NEGINF = -np.inf decoder_algorithms = ("viterbi", "map") def normalize(A, axis=None): """ Normalize the input array so that it sums to 1. Parameters ---------- A: array, shape (n_samples, n_features) Non-normalized input data axis: int dimension along which normalization is performed Returns ------- normalized_A: array, shape (n_samples, n_features) A with values normalized (summing to 1) along the prescribed axis WARNING: Modifies inplace the array """ A += EPS Asum = A.sum(axis) if axis and A.ndim > 1: # Make sure we don't divide by zero. Asum[Asum == 0] = 1 shape = list(A.shape) shape[axis] = 1 Asum.shape = shape return A / Asum class _BaseHMM(BaseEstimator): """Hidden Markov Model base class. Representation of a hidden Markov model probability distribution. This class allows for easy evaluation of, sampling from, and maximum-likelihood estimation of the parameters of a HMM. See the instance documentation for details specific to a particular object. Attributes ---------- n_components : int Number of states in the model. transmat : array, shape (`n_components`, `n_components`) Matrix of transition probabilities between states. startprob : array, shape ('n_components`,) Initial state occupation distribution. transmat_prior : array, shape (`n_components`, `n_components`) Matrix of prior transition probabilities between states. startprob_prior : array, shape ('n_components`,) Initial state occupation prior distribution. algorithm : string, one of the decoder_algorithms decoder algorithm random_state: RandomState or an int seed (0 by default) A random number generator instance n_iter : int, optional Number of iterations to perform. thresh : float, optional Convergence threshold. params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 's' for startprob, 't' for transmat, 'm' for means, and 'c' for covars, etc. Defaults to all parameters. init_params : string, optional Controls which parameters are initialized prior to training. Can contain any combination of 's' for startprob, 't' for transmat, 'm' for means, and 'c' for covars, etc. Defaults to all parameters. See Also -------- GMM : Gaussian mixture model """ # This class implements the public interface to all HMMs that # derive from it, including all of the machinery for the # forward-backward and Viterbi algorithms. Subclasses need only # implement _generate_sample_from_state(), _compute_log_likelihood(), # _init(), _initialize_sufficient_statistics(), # _accumulate_sufficient_statistics(), and _do_mstep(), all of # which depend on the specific emission distribution. # # Subclasses will probably also want to implement properties for # the emission distribution parameters to expose them publically. def __init__(self, n_components=1, startprob=None, transmat=None, startprob_prior=None, transmat_prior=None, algorithm="viterbi", random_state=None, n_iter=10, thresh=1e-2, params=string.ascii_letters, init_params=string.ascii_letters): self.n_components = n_components self.n_iter = n_iter self.thresh = thresh self.params = params self.init_params = init_params self.startprob_ = startprob self.startprob_prior = startprob_prior self.transmat_ = transmat self.transmat_prior = transmat_prior self._algorithm = algorithm self.random_state = random_state def eval(self, obs): """Compute the log probability under the model and compute posteriors Implements rank and beam pruning in the forward-backward algorithm to speed up inference in large models. Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single point in the sequence. Returns ------- logprob : float Log likelihood of the sequence `obs` posteriors: array_like, shape (n, n_components) Posterior probabilities of each state for each observation See Also -------- score : Compute the log probability under the model decode : Find most likely state sequence corresponding to a `obs` """ obs = np.asarray(obs) framelogprob = self._compute_log_likelihood(obs) logprob, fwdlattice = self._do_forward_pass(framelogprob) bwdlattice = self._do_backward_pass(framelogprob) gamma = fwdlattice + bwdlattice # gamma is guaranteed to be correctly normalized by logprob at # all frames, unless we do approximate inference using pruning. # So, we will normalize each frame explicitly in case we # pruned too aggressively. posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T posteriors += np.finfo(np.float32).eps posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1)) return logprob, posteriors def score(self, obs): """Compute the log probability under the model. Parameters ---------- obs : array_like, shape (n, n_features) Sequence of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : float Log likelihood of the `obs` See Also -------- eval : Compute the log probability under the model and posteriors decode : Find most likely state sequence corresponding to a `obs` """ obs = np.asarray(obs) framelogprob = self._compute_log_likelihood(obs) logprob, _ = self._do_forward_pass(framelogprob) return logprob def _decode_viterbi(self, obs): """Find most likely state sequence corresponding to `obs`. Uses the Viterbi algorithm. Parameters ---------- obs : array_like, shape (n, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- viterbi_logprob : float Log probability of the maximum likelihood path through the HMM state_sequence : array_like, shape (n,) Index of the most likely states for each observation See Also -------- eval : Compute the log probability under the model and posteriors score : Compute the log probability under the model """ obs = np.asarray(obs) framelogprob = self._compute_log_likelihood(obs) viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob) return viterbi_logprob, state_sequence def _decode_map(self, obs): """Find most likely state sequence corresponding to `obs`. Uses the maximum a posteriori estimation. Parameters ---------- obs : array_like, shape (n, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- map_logprob : float Log probability of the maximum likelihood path through the HMM state_sequence : array_like, shape (n,) Index of the most likely states for each observation See Also -------- eval : Compute the log probability under the model and posteriors score : Compute the log probability under the model """ _, posteriors = self.eval(obs) state_sequence = np.argmax(posteriors, axis=1) map_logprob = np.max(posteriors, axis=1).sum() return map_logprob, state_sequence def decode(self, obs, algorithm="viterbi"): """Find most likely state sequence corresponding to `obs`. Uses the selected algorithm for decoding. Parameters ---------- obs : array_like, shape (n, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. algorithm : string, one of the `decoder_algorithms` decoder algorithm to be used Returns ------- logprob : float Log probability of the maximum likelihood path through the HMM state_sequence : array_like, shape (n,) Index of the most likely states for each observation See Also -------- eval : Compute the log probability under the model and posteriors score : Compute the log probability under the model """ if self._algorithm in decoder_algorithms: algorithm = self._algorithm elif algorithm in decoder_algorithms: algorithm = algorithm decoder = {"viterbi": self._decode_viterbi, "map": self._decode_map} logprob, state_sequence = decoder[algorithm](obs) return logprob, state_sequence def predict(self, obs, algorithm="viterbi"): """Find most likely state sequence corresponding to `obs`. Parameters ---------- obs : array_like, shape (n, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- state_sequence : array_like, shape (n,) Index of the most likely states for each observation """ _, state_sequence = self.decode(obs, algorithm) return state_sequence def predict_proba(self, obs): """Compute the posterior probability for each state in the model Parameters ---------- obs : array_like, shape (n, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- T : array-like, shape (n, n_components) Returns the probability of the sample for each state in the model. """ _, posteriors = self.eval(obs) return posteriors def sample(self, n=1, random_state=None): """Generate random samples from the model. Parameters ---------- n : int Number of samples to generate. random_state: RandomState or an int seed (0 by default) A random number generator instance. If None is given, the object's random_state is used Returns ------- (obs, hidden_states) obs : array_like, length `n` List of samples hidden_states : array_like, length `n` List of hidden states """ if random_state is None: random_state = self.random_state random_state = check_random_state(random_state) startprob_pdf = self.startprob_ startprob_cdf = np.cumsum(startprob_pdf) transmat_pdf = self.transmat_ transmat_cdf = np.cumsum(transmat_pdf, 1) # Initial state. rand = random_state.rand() currstate = (startprob_cdf > rand).argmax() hidden_states = [currstate] obs = [self._generate_sample_from_state( currstate, random_state=random_state)] for _ in xrange(n - 1): rand = random_state.rand() currstate = (transmat_cdf[currstate] > rand).argmax() hidden_states.append(currstate) obs.append(self._generate_sample_from_state( currstate, random_state=random_state)) return np.array(obs), np.array(hidden_states, dtype=int) def fit(self, obs, **kwargs): """Estimate model parameters. An initialization step is performed before entering the EM algorithm. If you want to avoid this step, set the keyword argument init_params to the empty string ''. Likewise, if you would like just to do an initialization, call this method with n_iter=0. Parameters ---------- obs : list List of array-like observation sequences (shape (n_i, n_features)). Notes ----- In general, `logprob` should be non-decreasing unless aggressive pruning is used. Decreasing `logprob` is generally a sign of overfitting (e.g. a covariance parameter getting too small). You can fix this by getting more training data, or decreasing `covars_prior`. **Please note that setting parameters in the `fit` method is deprecated and will be removed in the next release. Set it on initialization instead.** """ if kwargs: warnings.warn("Setting parameters in the 'fit' method is" "deprecated and will be removed in 0.14. Set it on " "initialization instead.", DeprecationWarning, stacklevel=2) # initialisations for in case the user still adds parameters to fit # so things don't break for name in ('n_iter', 'thresh', 'params', 'init_params'): if name in kwargs: setattr(self, name, kwargs[name]) if self.algorithm not in decoder_algorithms: self._algorithm = "viterbi" self._init(obs, self.init_params) logprob = [] for i in xrange(self.n_iter): # Expectation step stats = self._initialize_sufficient_statistics() curr_logprob = 0 for seq in obs: framelogprob = self._compute_log_likelihood(seq) lpr, fwdlattice = self._do_forward_pass(framelogprob) bwdlattice = self._do_backward_pass(framelogprob) gamma = fwdlattice + bwdlattice posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T curr_logprob += lpr self._accumulate_sufficient_statistics( stats, seq, framelogprob, posteriors, fwdlattice, bwdlattice, self.params) logprob.append(curr_logprob) # Check for convergence. if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh: break # Maximization step self._do_mstep(stats, self.params) return self def _get_algorithm(self): "decoder algorithm" return self._algorithm def _set_algorithm(self, algorithm): if algorithm not in decoder_algorithms: raise ValueError("algorithm must be one of the decoder_algorithms") self._algorithm = algorithm algorithm = property(_get_algorithm, _set_algorithm) def _get_startprob(self): """Mixing startprob for each state.""" return np.exp(self._log_startprob) def _set_startprob(self, startprob): if startprob is None: startprob = np.tile(1.0 / self.n_components, self.n_components) else: startprob = np.asarray(startprob, dtype=np.float) # check if there exists a component whose value is exactly zero # if so, add a small number and re-normalize if not np.alltrue(startprob): normalize(startprob) if len(startprob) != self.n_components: raise ValueError('startprob must have length n_components') if not np.allclose(np.sum(startprob), 1.0): raise ValueError('startprob must sum to 1.0') self._log_startprob = np.log(np.asarray(startprob).copy()) startprob_ = property(_get_startprob, _set_startprob) def _get_transmat(self): """Matrix of transition probabilities.""" return np.exp(self._log_transmat) def _set_transmat(self, transmat): if transmat is None: transmat = np.tile(1.0 / self.n_components, (self.n_components, self.n_components)) # check if there exists a component whose value is exactly zero # if so, add a small number and re-normalize if not np.alltrue(transmat): normalize(transmat, axis=1) if (np.asarray(transmat).shape != (self.n_components, self.n_components)): raise ValueError('transmat must have shape ' '(n_components, n_components)') if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)): raise ValueError('Rows of transmat must sum to 1.0') self._log_transmat = np.log(np.asarray(transmat).copy()) underflow_idx = np.isnan(self._log_transmat) self._log_transmat[underflow_idx] = NEGINF transmat_ = property(_get_transmat, _set_transmat) def _do_viterbi_pass(self, framelogprob): n_observations, n_components = framelogprob.shape state_sequence, logprob = _hmmc._viterbi( n_observations, n_components, self._log_startprob, self._log_transmat, framelogprob) return logprob, state_sequence def _do_forward_pass(self, framelogprob): n_observations, n_components = framelogprob.shape fwdlattice = np.zeros((n_observations, n_components)) _hmmc._forward(n_observations, n_components, self._log_startprob, self._log_transmat, framelogprob, fwdlattice) fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF return logsumexp(fwdlattice[-1]), fwdlattice def _do_backward_pass(self, framelogprob): n_observations, n_components = framelogprob.shape bwdlattice = np.zeros((n_observations, n_components)) _hmmc._backward(n_observations, n_components, self._log_startprob, self._log_transmat, framelogprob, bwdlattice) bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF return bwdlattice def _compute_log_likelihood(self, obs): pass def _generate_sample_from_state(self, state, random_state=None): pass def _init(self, obs, params): if 's' in params: self.startprob_.fill(1.0 / self.n_components) if 't' in params: self.transmat_.fill(1.0 / self.n_components) # Methods used by self.fit() def _initialize_sufficient_statistics(self): stats = {'nobs': 0, 'start': np.zeros(self.n_components), 'trans': np.zeros((self.n_components, self.n_components))} return stats def _accumulate_sufficient_statistics(self, stats, seq, framelogprob, posteriors, fwdlattice, bwdlattice, params): stats['nobs'] += 1 if 's' in params: stats['start'] += posteriors[0] if 't' in params: n_observations, n_components = framelogprob.shape lneta = np.zeros((n_observations - 1, n_components, n_components)) lnP = logsumexp(fwdlattice[-1]) _hmmc._compute_lneta(n_observations, n_components, fwdlattice, self._log_transmat, bwdlattice, framelogprob, lnP, lneta) stats["trans"] += np.exp(logsumexp(lneta, 0)) def _do_mstep(self, stats, params): # Based on Huang, Acero, Hon, "Spoken Language Processing", # p. 443 - 445 if self.startprob_prior is None: self.startprob_prior = 1.0 if self.transmat_prior is None: self.transmat_prior = 1.0 if 's' in params: self.startprob_ = normalize( np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20)) if 't' in params: transmat_ = normalize( np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20), axis=1) self.transmat_ = transmat_ class GaussianHMM(_BaseHMM): """Hidden Markov Model with Gaussian emissions Representation of a hidden Markov model probability distribution. This class allows for easy evaluation of, sampling from, and maximum-likelihood estimation of the parameters of a HMM. Parameters ---------- n_components : int Number of states. ``_covariance_type`` : string String describing the type of covariance parameters to use. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. Attributes ---------- ``_covariance_type`` : string String describing the type of covariance parameters used by the model. Must be one of 'spherical', 'tied', 'diag', 'full'. n_features : int Dimensionality of the Gaussian emissions. n_components : int Number of states in the model. transmat : array, shape (`n_components`, `n_components`) Matrix of transition probabilities between states. startprob : array, shape ('n_components`,) Initial state occupation distribution. means : array, shape (`n_components`, `n_features`) Mean parameters for each state. covars : array Covariance parameters for each state. The shape depends on ``_covariance_type``:: (`n_components`,) if 'spherical', (`n_features`, `n_features`) if 'tied', (`n_components`, `n_features`) if 'diag', (`n_components`, `n_features`, `n_features`) if 'full' random_state: RandomState or an int seed (0 by default) A random number generator instance n_iter : int, optional Number of iterations to perform. thresh : float, optional Convergence threshold. params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 's' for startprob, 't' for transmat, 'm' for means, and 'c' for covars, etc. Defaults to all parameters. init_params : string, optional Controls which parameters are initialized prior to training. Can contain any combination of 's' for startprob, 't' for transmat, 'm' for means, and 'c' for covars, etc. Defaults to all parameters. Examples -------- >>> from sklearn.hmm import GaussianHMM >>> GaussianHMM(n_components=2) ... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE GaussianHMM(algorithm='viterbi',... See Also -------- GMM : Gaussian mixture model """ def __init__(self, n_components=1, covariance_type='diag', startprob=None, transmat=None, startprob_prior=None, transmat_prior=None, algorithm="viterbi", means_prior=None, means_weight=0, covars_prior=1e-2, covars_weight=1, random_state=None, n_iter=10, thresh=1e-2, params=string.ascii_letters, init_params=string.ascii_letters): _BaseHMM.__init__(self, n_components, startprob, transmat, startprob_prior=startprob_prior, transmat_prior=transmat_prior, algorithm=algorithm, random_state=random_state, n_iter=n_iter, thresh=thresh, params=params, init_params=init_params) self._covariance_type = covariance_type if not covariance_type in ['spherical', 'tied', 'diag', 'full']: raise ValueError('bad covariance_type') self.means_prior = means_prior self.means_weight = means_weight self.covars_prior = covars_prior self.covars_weight = covars_weight @property def covariance_type(self): """Covariance type of the model. Must be one of 'spherical', 'tied', 'diag', 'full'. """ return self._covariance_type def _get_means(self): """Mean parameters for each state.""" return self._means_ def _set_means(self, means): means = np.asarray(means) if (hasattr(self, 'n_features') and means.shape != (self.n_components, self.n_features)): raise ValueError('means must have shape ' '(n_components, n_features)') self._means_ = means.copy() self.n_features = self._means_.shape[1] means_ = property(_get_means, _set_means) def _get_covars(self): """Return covars as a full matrix.""" if self._covariance_type == 'full': return self._covars_ elif self._covariance_type == 'diag': return [np.diag(cov) for cov in self._covars_] elif self._covariance_type == 'tied': return [self._covars_] * self.n_components elif self._covariance_type == 'spherical': return [np.eye(self.n_features) * f for f in self._covars_] def _set_covars(self, covars): covars = np.asarray(covars) _validate_covars(covars, self._covariance_type, self.n_components) self._covars_ = covars.copy() covars_ = property(_get_covars, _set_covars) def _compute_log_likelihood(self, obs): return log_multivariate_normal_density( obs, self._means_, self._covars_, self._covariance_type) def _generate_sample_from_state(self, state, random_state=None): if self._covariance_type == 'tied': cv = self._covars_ else: cv = self._covars_[state] return sample_gaussian(self._means_[state], cv, self._covariance_type, random_state=random_state) def _init(self, obs, params='stmc'): super(GaussianHMM, self)._init(obs, params=params) if (hasattr(self, 'n_features') and self.n_features != obs[0].shape[1]): raise ValueError('Unexpected number of dimensions, got %s but ' 'expected %s' % (obs[0].shape[1], self.n_features)) self.n_features = obs[0].shape[1] if 'm' in params: self._means_ = cluster.KMeans( n_clusters=self.n_components).fit(obs[0]).cluster_centers_ if 'c' in params: cv = np.cov(obs[0].T) if not cv.shape: cv.shape = (1, 1) self._covars_ = distribute_covar_matrix_to_match_covariance_type( cv, self._covariance_type, self.n_components) def _initialize_sufficient_statistics(self): stats = super(GaussianHMM, self)._initialize_sufficient_statistics() stats['post'] = np.zeros(self.n_components) stats['obs'] = np.zeros((self.n_components, self.n_features)) stats['obs**2'] = np.zeros((self.n_components, self.n_features)) stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features, self.n_features)) return stats def _accumulate_sufficient_statistics(self, stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params): super(GaussianHMM, self)._accumulate_sufficient_statistics( stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params) if 'm' in params or 'c' in params: stats['post'] += posteriors.sum(axis=0) stats['obs'] += np.dot(posteriors.T, obs) if 'c' in params: if self._covariance_type in ('spherical', 'diag'): stats['obs**2'] += np.dot(posteriors.T, obs ** 2) elif self._covariance_type in ('tied', 'full'): for t, o in enumerate(obs): obsobsT = np.outer(o, o) for c in xrange(self.n_components): stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT def _do_mstep(self, stats, params): super(GaussianHMM, self)._do_mstep(stats, params) # Based on Huang, Acero, Hon, "Spoken Language Processing", # p. 443 - 445 denom = stats['post'][:, np.newaxis] if 'm' in params: prior = self.means_prior weight = self.means_weight if prior is None: weight = 0 prior = 0 self._means_ = (weight * prior + stats['obs']) / (weight + denom) if 'c' in params: covars_prior = self.covars_prior covars_weight = self.covars_weight if covars_prior is None: covars_weight = 0 covars_prior = 0 means_prior = self.means_prior means_weight = self.means_weight if means_prior is None: means_weight = 0 means_prior = 0 meandiff = self._means_ - means_prior if self._covariance_type in ('spherical', 'diag'): cv_num = (means_weight * (meandiff) ** 2 + stats['obs**2'] - 2 * self._means_ * stats['obs'] + self._means_ ** 2 * denom) cv_den = max(covars_weight - 1, 0) + denom self._covars_ = (covars_prior + cv_num) / cv_den if self._covariance_type == 'spherical': self._covars_ = np.tile( self._covars_.mean(1)[:, np.newaxis], (1, self._covars_.shape[1])) elif self._covariance_type in ('tied', 'full'): cvnum = np.empty((self.n_components, self.n_features, self.n_features)) for c in xrange(self.n_components): obsmean = np.outer(stats['obs'][c], self._means_[c]) cvnum[c] = (means_weight * np.outer(meandiff[c], meandiff[c]) + stats['obs*obs.T'][c] - obsmean - obsmean.T + np.outer(self._means_[c], self._means_[c]) * stats['post'][c]) cvweight = max(covars_weight - self.n_features, 0) if self._covariance_type == 'tied': self._covars_ = ((covars_prior + cvnum.sum(axis=0)) / (cvweight + stats['post'].sum())) elif self._covariance_type == 'full': self._covars_ = ((covars_prior + cvnum) / (cvweight + stats['post'][:, None, None])) class MultinomialHMM(_BaseHMM): """Hidden Markov Model with multinomial (discrete) emissions Attributes ---------- n_components : int Number of states in the model. n_symbols : int Number of possible symbols emitted by the model (in the observations). transmat : array, shape (`n_components`, `n_components`) Matrix of transition probabilities between states. startprob : array, shape ('n_components`,) Initial state occupation distribution. emissionprob : array, shape ('n_components`, 'n_symbols`) Probability of emitting a given symbol when in each state. random_state: RandomState or an int seed (0 by default) A random number generator instance n_iter : int, optional Number of iterations to perform. thresh : float, optional Convergence threshold. params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 's' for startprob, 't' for transmat, 'm' for means, and 'c' for covars, etc. Defaults to all parameters. init_params : string, optional Controls which parameters are initialized prior to training. Can contain any combination of 's' for startprob, 't' for transmat, 'm' for means, and 'c' for covars, etc. Defaults to all parameters. Examples -------- >>> from sklearn.hmm import MultinomialHMM >>> MultinomialHMM(n_components=2) ... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE MultinomialHMM(algorithm='viterbi',... See Also -------- GaussianHMM : HMM with Gaussian emissions """ def __init__(self, n_components=1, startprob=None, transmat=None, startprob_prior=None, transmat_prior=None, algorithm="viterbi", random_state=None, n_iter=10, thresh=1e-2, params=string.ascii_letters, init_params=string.ascii_letters): """Create a hidden Markov model with multinomial emissions. Parameters ---------- n_components : int Number of states. """ _BaseHMM.__init__(self, n_components, startprob, transmat, startprob_prior=startprob_prior, transmat_prior=transmat_prior, algorithm=algorithm, random_state=random_state, n_iter=n_iter, thresh=thresh, params=params, init_params=init_params) def _get_emissionprob(self): """Emission probability distribution for each state.""" return np.exp(self._log_emissionprob) def _set_emissionprob(self, emissionprob): emissionprob = np.asarray(emissionprob) if hasattr(self, 'n_symbols') and \ emissionprob.shape != (self.n_components, self.n_symbols): raise ValueError('emissionprob must have shape ' '(n_components, n_symbols)') # check if there exists a component whose value is exactly zero # if so, add a small number and re-normalize if not np.alltrue(emissionprob): normalize(emissionprob) self._log_emissionprob = np.log(emissionprob) underflow_idx = np.isnan(self._log_emissionprob) self._log_emissionprob[underflow_idx] = NEGINF self.n_symbols = self._log_emissionprob.shape[1] emissionprob_ = property(_get_emissionprob, _set_emissionprob) def _compute_log_likelihood(self, obs): return self._log_emissionprob[:, obs].T def _generate_sample_from_state(self, state, random_state=None): cdf = np.cumsum(self.emissionprob_[state, :]) random_state = check_random_state(random_state) rand = random_state.rand() symbol = (cdf > rand).argmax() return symbol def _init(self, obs, params='ste'): super(MultinomialHMM, self)._init(obs, params=params) self.random_state = check_random_state(self.random_state) if 'e' in params: if not hasattr(self, 'n_symbols'): symbols = set() for o in obs: symbols = symbols.union(set(o)) self.n_symbols = len(symbols) emissionprob = normalize(self.random_state.rand(self.n_components, self.n_symbols), 1) self.emissionprob_ = emissionprob def _initialize_sufficient_statistics(self): stats = super(MultinomialHMM, self)._initialize_sufficient_statistics() stats['obs'] = np.zeros((self.n_components, self.n_symbols)) return stats def _accumulate_sufficient_statistics(self, stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params): super(MultinomialHMM, self)._accumulate_sufficient_statistics( stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params) if 'e' in params: for t, symbol in enumerate(obs): stats['obs'][:, symbol] += posteriors[t] def _do_mstep(self, stats, params): super(MultinomialHMM, self)._do_mstep(stats, params) if 'e' in params: self.emissionprob_ = (stats['obs'] / stats['obs'].sum(1)[:, np.newaxis]) def _check_input_symbols(self, obs): """check if input can be used for Multinomial.fit input must be both positive integer array and every element must be continuous. e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not """ symbols = np.asanyarray(obs).flatten() if symbols.dtype.kind != 'i': # input symbols must be integer return False if len(symbols) == 1: # input too short return False if np.any(symbols < 0): # input containes negative intiger return False symbols.sort() if np.any(np.diff(symbols) > 1): # input is discontinous return False return True def fit(self, obs, **kwargs): err_msg = ("Input must be both positive integer array and " "every element must be continuous, but %s was given.") if not self._check_input_symbols(obs): raise ValueError(err_msg % obs) return _BaseHMM.fit(self, obs, **kwargs) class GMMHMM(_BaseHMM): """Hidden Markov Model with Gaussin mixture emissions Attributes ---------- init_params : string, optional Controls which parameters are initialized prior to training. Can \ contain any combination of 's' for startprob, 't' for transmat, 'm' \ for means, and 'c' for covars, etc. Defaults to all parameters. params : string, optional Controls which parameters are updated in the training process. Can contain any combination of 's' for startprob, 't' for transmat,'m' for means, and 'c' for covars, etc. Defaults to all parameters. n_components : int Number of states in the model. transmat : array, shape (`n_components`, `n_components`) Matrix of transition probabilities between states. startprob : array, shape ('n_components`,) Initial state occupation distribution. gmms : array of GMM objects, length `n_components` GMM emission distributions for each state. random_state : RandomState or an int seed (0 by default) A random number generator instance n_iter : int, optional Number of iterations to perform. thresh : float, optional Convergence threshold. Examples -------- >>> from sklearn.hmm import GMMHMM >>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag') ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE GMMHMM(algorithm='viterbi', covariance_type='diag',... See Also -------- GaussianHMM : HMM with Gaussian emissions """ def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None, startprob_prior=None, transmat_prior=None, algorithm="viterbi", gmms=None, covariance_type='diag', covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2, params=string.ascii_letters, init_params=string.ascii_letters): """Create a hidden Markov model with GMM emissions. Parameters ---------- n_components : int Number of states. """ _BaseHMM.__init__(self, n_components, startprob, transmat, startprob_prior=startprob_prior, transmat_prior=transmat_prior, algorithm=algorithm, random_state=random_state, n_iter=n_iter, thresh=thresh, params=params, init_params=init_params) # XXX: Hotfit for n_mix that is incompatible with the scikit's # BaseEstimator API self.n_mix = n_mix self._covariance_type = covariance_type self.covars_prior = covars_prior self.gmms = gmms if gmms is None: gmms = [] for x in xrange(self.n_components): if covariance_type is None: g = GMM(n_mix) else: g = GMM(n_mix, covariance_type=covariance_type) gmms.append(g) self.gmms_ = gmms # Read-only properties. @property def covariance_type(self): """Covariance type of the model. Must be one of 'spherical', 'tied', 'diag', 'full'. """ return self._covariance_type def _compute_log_likelihood(self, obs): return np.array([g.score(obs) for g in self.gmms_]).T def _generate_sample_from_state(self, state, random_state=None): return self.gmms_[state].sample(1, random_state=random_state).flatten() def _init(self, obs, params='stwmc'): super(GMMHMM, self)._init(obs, params=params) allobs = np.concatenate(obs, 0) for g in self.gmms_: g.set_params(init_params=params, n_iter=0) g.fit(allobs) def _initialize_sufficient_statistics(self): stats = super(GMMHMM, self)._initialize_sufficient_statistics() stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_] stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_] stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_] return stats def _accumulate_sufficient_statistics(self, stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params): super(GMMHMM, self)._accumulate_sufficient_statistics( stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice, params) for state, g in enumerate(self.gmms_): _, lgmm_posteriors = g.eval(obs) lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis] + np.finfo(np.float).eps) gmm_posteriors = np.exp(lgmm_posteriors) tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type) n_features = g.means_.shape[1] tmp_gmm._set_covars( distribute_covar_matrix_to_match_covariance_type( np.eye(n_features), g.covariance_type, g.n_components)) norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params) if np.any(np.isnan(tmp_gmm.covars_)): raise ValueError stats['norm'][state] += norm if 'm' in params: stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis] if 'c' in params: if tmp_gmm.covariance_type == 'tied': stats['covars'][state] += tmp_gmm.covars_ * norm.sum() else: cvnorm = np.copy(norm) shape = np.ones(tmp_gmm.covars_.ndim) shape[0] = np.shape(tmp_gmm.covars_)[0] cvnorm.shape = shape stats['covars'][state] += tmp_gmm.covars_ * cvnorm def _do_mstep(self, stats, params): super(GMMHMM, self)._do_mstep(stats, params) # All that is left to do is to apply covars_prior to the # parameters updated in _accumulate_sufficient_statistics. for state, g in enumerate(self.gmms_): n_features = g.means_.shape[1] norm = stats['norm'][state] if 'w' in params: g.weights_ = normalize(norm) if 'm' in params: g.means_ = stats['means'][state] / norm[:, np.newaxis] if 'c' in params: if g.covariance_type == 'tied': g.covars_ = ((stats['covars'][state] + self.covars_prior * np.eye(n_features)) / norm.sum()) else: cvnorm = np.copy(norm) shape = np.ones(g.covars_.ndim) shape[0] = np.shape(g.covars_)[0] cvnorm.shape = shape if (g.covariance_type in ['spherical', 'diag']): g.covars_ = (stats['covars'][state] + self.covars_prior) / cvnorm elif g.covariance_type == 'full': eye = np.eye(n_features) g.covars_ = ((stats['covars'][state] + self.covars_prior * eye[np.newaxis]) / cvnorm)