Skip to content

Commit b449efc

Browse files
committed
allow for turning off norm in random projection quantizer
1 parent 483ed6a commit b449efc

File tree

2 files changed

+3
-2
lines changed

2 files changed

+3
-2
lines changed

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
setup(
44
name = 'vector_quantize_pytorch',
55
packages = find_packages(),
6-
version = '1.1.4',
6+
version = '1.1.5',
77
license='MIT',
88
description = 'Vector Quantization - Pytorch',
99
long_description_content_type = 'text/markdown',

vector_quantize_pytorch/random_projection_quantizer.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ def __init__(
1515
codebook_size,
1616
codebook_dim,
1717
num_codebooks = 1,
18+
norm = True,
1819
**kwargs
1920
):
2021
super().__init__()
@@ -28,7 +29,7 @@ def __init__(
2829
# in section 3 of https://arxiv.org/abs/2202.01855
2930
# "The input data is normalized to have 0 mean and standard deviation of 1 ... to prevent collapse"
3031

31-
self.norm = nn.LayerNorm(dim, elementwise_affine = False)
32+
self.norm = nn.LayerNorm(dim, elementwise_affine = False) if norm else nn.Identity()
3233

3334
self.vq = VectorQuantize(
3435
dim = codebook_dim * num_codebooks,

0 commit comments

Comments
 (0)