Upload folder using huggingface_hub

#2
by sharpenb - opened
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmplzo8can2wf8fall1",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmpj7wgl7dbe56lg8yl",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eeea2b94f2c47d0f6d88ef8d7d4d5527a8cf2f0605e8023e6901f9bfffed46b6
3
  size 4996772687
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e790e85babcb91a4c7fddcc155baf0b3c796e4b6791ecdba1fd2d78391cb300
3
  size 4996772687
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f326aac1c987490c14cf7189e5d386c65266b05ad62159413d8c44d2c1f917f
3
  size 4090014257
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e96ce167e737f0bfd30b6953972aef896e5523385b84be3029445ec69c6ba224
3
  size 4090014257
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmplzo8can2",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmpj7wgl7db",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}