We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
2 parents d921ed3 + 0d03528 commit 34db26cCopy full SHA for 34db26c
llmc/compression/quantization/quarot.py
@@ -32,10 +32,11 @@ def preprocess(self):
32
self.model.get_embed_layers()[0].weight,
33
):
34
logger.info('Tie weight! Copy embed_layer for head_layer!')
35
- path = os.join(self.config.model.path, 'config.json')
36
- with open(path, 'w') as f:
+ path = os.path.join(self.config.model.path, 'config.json')
+ with open(path, 'r') as f:
37
config = json.load(f)
38
- config['tie_word_embeddings'] = False
+ config['tie_word_embeddings'] = False
39
+ with open(path, 'w') as f:
40
json.dump(config, f, indent=4)
41
del self.model.get_head_layers()[0].weight
42
w = self.model.get_embed_layers()[0].weight.clone()
0 commit comments