You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
# whether to soft clamp the input value from -value to value
155
169
@@ -305,6 +319,9 @@ def forward(
305
319
# distribution over all available tokens in the batch
306
320
307
321
avg_prob=reduce(per_sample_probs, '... c d -> c d', 'mean')
322
+
323
+
avg_prob=maybe_distributed_mean(avg_prob)
324
+
308
325
codebook_entropy=entropy(avg_prob).mean()
309
326
310
327
# 1. entropy will be nudged to be low for each code, to encourage the network to output confident predictions
@@ -324,17 +341,7 @@ def forward(
324
341
325
342
ifself.trainingandself.commitment_loss_weight>0.:
326
343
327
-
ifself.use_code_agnostic_commit_loss:
328
-
# credit goes to @MattMcPartlon for sharing this in https://github.com/lucidrains/vector-quantize-pytorch/issues/120#issuecomment-2095089337
0 commit comments