Skip to content

Commit be995a7

Browse files
committed
ability to turn off bias in projections in LFQ
1 parent 1ea2ef6 commit be995a7

File tree

2 files changed

+5
-4
lines changed

2 files changed

+5
-4
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "vector-quantize-pytorch"
3-
version = "1.14.15"
3+
version = "1.14.16"
44
description = "Vector Quantization - Pytorch"
55
authors = [
66
{ name = "Phil Wang", email = "lucidrains@gmail.com" }

vector_quantize_pytorch/lookup_free_quantization.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,8 @@ def __init__(
6464
keep_num_codebooks_dim = None,
6565
codebook_scale = 1., # for residual LFQ, codebook scaled down by 2x at each layer
6666
frac_per_sample_entropy = 1., # make less than 1. to only use a random fraction of the probs for per sample entropy
67-
use_code_agnostic_commit_loss = False
67+
use_code_agnostic_commit_loss = False,
68+
projection_has_bias = True
6869
):
6970
super().__init__()
7071

@@ -80,8 +81,8 @@ def __init__(
8081
dim = default(dim, codebook_dims)
8182

8283
has_projections = dim != codebook_dims
83-
self.project_in = nn.Linear(dim, codebook_dims) if has_projections else nn.Identity()
84-
self.project_out = nn.Linear(codebook_dims, dim) if has_projections else nn.Identity()
84+
self.project_in = nn.Linear(dim, codebook_dims, bias = projection_has_bias) if has_projections else nn.Identity()
85+
self.project_out = nn.Linear(codebook_dims, dim, bias = projection_has_bias) if has_projections else nn.Identity()
8586
self.has_projections = has_projections
8687

8788
self.dim = dim

0 commit comments

Comments
 (0)