Skip to content

Commit 492e666

Browse files
committed
address #160
1 parent 54d29e8 commit 492e666

File tree

2 files changed

+5
-2
lines changed

2 files changed

+5
-2
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "vector-quantize-pytorch"
3-
version = "1.17.3"
3+
version = "1.17.4"
44
description = "Vector Quantization - Pytorch"
55
authors = [
66
{ name = "Phil Wang", email = "lucidrains@gmail.com" }

vector_quantize_pytorch/residual_vq.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,8 +149,11 @@ def __init__(
149149
self.quantize_dropout_multiple_of = quantize_dropout_multiple_of # encodec paper proposes structured dropout, believe this was set to 4
150150

151151
# setting up the MLPs for implicit neural codebooks
152+
153+
self.mlps = None
152154

153-
self.mlps = ModuleList([MLP(dim = codebook_dim, l2norm_output = first(self.layers).use_cosine_sim, **mlp_kwargs) for _ in range(num_quantizers - 1)])
155+
if implicit_neural_codebook:
156+
self.mlps = ModuleList([MLP(dim = codebook_dim, l2norm_output = first(self.layers).use_cosine_sim, **mlp_kwargs) for _ in range(num_quantizers - 1)])
154157

155158
# sharing codebook logic
156159

0 commit comments

Comments
 (0)