Skip to content

Commit 2681312

Browse files
committed
able to always have projections in lfq
1 parent 1c77559 commit 2681312

File tree

2 files changed

+3
-2
lines changed

2 files changed

+3
-2
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "vector-quantize-pytorch"
3-
version = "1.14.36"
3+
version = "1.14.37"
44
description = "Vector Quantization - Pytorch"
55
authors = [
66
{ name = "Phil Wang", email = "lucidrains@gmail.com" }

vector_quantize_pytorch/lookup_free_quantization.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@ def __init__(
101101
keep_num_codebooks_dim = None,
102102
codebook_scale = 1., # for residual LFQ, codebook scaled down by 2x at each layer
103103
frac_per_sample_entropy = 1., # make less than 1. to only use a random fraction of the probs for per sample entropy
104+
has_projections = None,
104105
projection_has_bias = True,
105106
soft_clamp_input_value = None,
106107
cosine_sim_project_in = False,
@@ -124,7 +125,7 @@ def __init__(
124125
codebook_dims = codebook_dim * num_codebooks
125126
dim = default(dim, codebook_dims)
126127

127-
has_projections = dim != codebook_dims
128+
has_projections = default(has_projections, dim != codebook_dims)
128129

129130
if cosine_sim_project_in:
130131
cosine_sim_project_in = default(cosine_sim_project_in_scale, codebook_scale)

0 commit comments

Comments
 (0)