sharpenb commited on
Commit
dceb600
·
verified ·
1 Parent(s): aa8b6aa

Upload folder using huggingface_hub (#7)

Browse files

- d12922830bac2033b908a8d9d210fe0478fe6b2662ac1c7e675d1f54648e3d44 (339455a79075eb34cf0f7d826601579fb563bafb)
- a0dd77bbd3ad5a61ab8695d30118db0d18729f34fdfbea8fb6d4d79e58647704 (8f7659702f88fcc1afabc3f3113acc6b96148013)

Files changed (3) hide show
  1. config.json +6 -1
  2. qmodel.pt +1 -1
  3. smash_config.json +1 -1
config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_attn_implementation_autoset": true,
3
- "_name_or_path": "/tmp/models/tmpgl7uz6ih/tmp4p2khk6w",
4
  "architectures": [
5
  "LlamaForCausalLM"
6
  ],
@@ -47,9 +47,14 @@
47
  "tie_word_embeddings": true,
48
  "torch_dtype": "bfloat16",
49
  "transformers.js_config": {
 
50
  "kv_cache_dtype": {
51
  "fp16": "float16",
52
  "q4f16": "float16"
 
 
 
 
53
  }
54
  },
55
  "transformers_version": "4.48.2",
 
1
  {
2
  "_attn_implementation_autoset": true,
3
+ "_name_or_path": "/tmp/models/tmp3fjrifao/tmpizgz2wn6",
4
  "architectures": [
5
  "LlamaForCausalLM"
6
  ],
 
47
  "tie_word_embeddings": true,
48
  "torch_dtype": "bfloat16",
49
  "transformers.js_config": {
50
+ "dtype": "q4",
51
  "kv_cache_dtype": {
52
  "fp16": "float16",
53
  "q4f16": "float16"
54
+ },
55
+ "use_external_data_format": {
56
+ "model.onnx": true,
57
+ "model_fp16.onnx": true
58
  }
59
  },
60
  "transformers_version": "4.48.2",
qmodel.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d54f27b8eec5c7e905958321aee5898dae3a5223ee7775ca2fba42ffd20cd1a0
3
  size 2215166128
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fb62cbc0610971af8d46dce37ed1b33d49c0ba267aec3cd5f5805bb65dec6e0
3
  size 2215166128
smash_config.json CHANGED
@@ -11,7 +11,7 @@
11
  "quant_hqq_weight_bits": 8,
12
  "max_batch_size": 1,
13
  "device": "cuda",
14
- "cache_dir": "/tmp/models/tmpgl7uz6ih",
15
  "task": "",
16
  "save_load_fn": "hqq",
17
  "save_load_fn_args": {},
 
11
  "quant_hqq_weight_bits": 8,
12
  "max_batch_size": 1,
13
  "device": "cuda",
14
+ "cache_dir": "/tmp/models/tmp3fjrifao",
15
  "task": "",
16
  "save_load_fn": "hqq",
17
  "save_load_fn_args": {},