@@ -98,7 +98,11 @@ def partition_data(self):
98
98
ll = np .tile (np .log (pi_j ), (self .n , 1 ))
99
99
for k in range (self .K ):
100
100
ll [:, k ] += stats .multivariate_normal .logpdf (self .data_z , mu_k [k , :], cov = sigma_k , allow_singular = True )
101
- Z = np .array ([np .random .multinomial (1 , np .exp (ll [n , :]- logsumexp (ll [n , :]))).argmax () for n in range (self .n )])
101
+
102
+ ll = np .exp (ll - logsumexp (ll , axis = 1 , keepdims = True ))
103
+ ll = ll / ll .sum (axis = 1 , keepdims = True )
104
+
105
+ Z = np .array ([np .random .multinomial (1 , ll [n , :]).argmax () for n in range (self .n )])
102
106
le = LabelEncoder ()
103
107
Z = le .fit_transform (Z )
104
108
return Z
@@ -414,7 +418,11 @@ def partition_data(self):
414
418
ll = np .tile (np .log (pi_j ), (self .n , 1 ))
415
419
for k in range (self .K ):
416
420
ll [:, k ] += stats .multivariate_normal .logpdf (self .data_y , mu_k [k , :], cov = sigma_k , allow_singular = True )
417
- Z = np .array ([np .random .multinomial (1 , np .exp (ll [n , :]- logsumexp (ll [n , :]))).argmax () for n in range (self .n )])
421
+
422
+ ll = np .exp (ll - logsumexp (ll , axis = 1 , keepdims = True ))
423
+ ll = ll / ll .sum (axis = 1 , keepdims = True )
424
+
425
+ Z = np .array ([np .random .multinomial (1 , ll [n , :]).argmax () for n in range (self .n )])
418
426
prop_Y = np .take_along_axis (ll , Z [:, None ], axis = 1 ).sum ()
419
427
le = LabelEncoder ()
420
428
Z = le .fit_transform (Z )
0 commit comments