k-l-lambda commited on
Commit
4ad8f19
·
1 Parent(s): 4ae587b

config.json: removed fp8 quantization config.

Browse files
Files changed (1) hide show
  1. config.json +0 -9
config.json CHANGED
@@ -34,15 +34,6 @@
34
  "q_lora_rank": 1536,
35
  "qk_nope_head_dim": 128,
36
  "qk_rope_head_dim": 64,
37
- "quantization_config": {
38
- "activation_scheme": "dynamic",
39
- "fmt": "e4m3",
40
- "quant_method": "fp8",
41
- "weight_block_size": [
42
- 128,
43
- 128
44
- ]
45
- },
46
  "rms_norm_eps": 1e-06,
47
  "rope_scaling": {
48
  "beta_fast": 32,
 
34
  "q_lora_rank": 1536,
35
  "qk_nope_head_dim": 128,
36
  "qk_rope_head_dim": 64,
 
 
 
 
 
 
 
 
 
37
  "rms_norm_eps": 1e-06,
38
  "rope_scaling": {
39
  "beta_fast": 32,