numen-tech commited on
Commit
dcadf22
·
1 Parent(s): fd4421c

Add weights

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,10 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
3
  ---
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ base_model: soob3123/amoral-gemma3-1B-v2
4
+ base_model_relation: quantized
5
+ library_name: mlc-llm
6
+ pipeline_tag: text-generation
7
  ---
8
+
9
+ 4-bit [OmniQuant](https://arxiv.org/abs/2308.13137) quantized version of [amoral-gemma3-1B-v2](https://huggingface.co/soob3123/amoral-gemma3-1B-v2) for inference with the [Private LLM](http://privatellm.app) app.
10
+
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "quantization_config": {
3
+ "bits": 4
4
+ }
5
+ }
ndarray-cache.json ADDED
The diff for this file is too large to render. See raw diff
 
params_shard_0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4917a670473e45b676f9d6a1e2789ad4d66e4bd23bacbd5f0a10d06194995ae5
3
+ size 603979776
params_shard_1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:043b4f7dd461bd6a22592f9d7e2d9d1218a1bbf8c16e10ccbd0130ed724d49fb
3
+ size 31804160
params_shard_10.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2268b160144df94ae1f73851a55b8a6af528d99a59e157b4fc6af8829af8c64
3
+ size 27696128
params_shard_11.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee0082af8f6f1ca70b5fdd7e8bad3e5818ba2a7ed3134625b8995db4a57a2dfd
3
+ size 27696128
params_shard_12.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:602e8ffa01aa3d92b4e1e5df33ec211ce58dccd41a1abc8155f472c577186484
3
+ size 27696128
params_shard_13.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0e8e015270bff5240961cda4c32303a97b338e054af9223410b123be53b32f4
3
+ size 23590400
params_shard_2.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bb028487569125f0d5375679342bd62f19bbeb89824458a594509172358c2df
3
+ size 27696128
params_shard_3.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2884ce5bb11310bbc83d8a1bde4be943d354585ee3f2369fe7fff444f93b7547
3
+ size 27696128
params_shard_4.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fab3bf6f4480c154cd7be9c8ef74ff274b7a2c2ff266177862bbd571c0d8de5
3
+ size 27696128
params_shard_5.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68bcec3c830dd49d338c70151f2f5c26588d5dd3a29ec4b7aabdbcacfd419d9b
3
+ size 27696128
params_shard_6.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:296e93c517bea4631d52d281d7c12e7acdd1bae1cc413d0483ff187f9d4c9e08
3
+ size 27696128
params_shard_7.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c378daaf3c79a6db768644bc5cb9151ccd0df0237ecc4a83cbf8ee32116955
3
+ size 27696128
params_shard_8.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dc4928c0b24292f8a10209da7660479ff6318e097107d15f2b624541b300ee9
3
+ size 27696128
params_shard_9.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b18e1393dd5915faca94a00e0cb10cdfa00ae3c25a36c159c482c58b5660a3e6
3
+ size 27696128
private-llm-config.json ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "gemma3_text",
3
+ "quantization": "w4a16g128asym_1",
4
+ "model_config": {
5
+ "text_config": {
6
+ "hidden_size": 1152,
7
+ "intermediate_size": 6912,
8
+ "num_hidden_layers": 26,
9
+ "attention_bias": false,
10
+ "num_attention_heads": 4,
11
+ "num_key_value_heads": 1,
12
+ "head_dim": 256,
13
+ "rms_norm_eps": 1e-06,
14
+ "hidden_activation": "gelu_pytorch_tanh",
15
+ "position_embedding_base": 1000000,
16
+ "rope_local_base_freq": 10000,
17
+ "sliding_window_pattern": 6,
18
+ "context_window_size": 8192,
19
+ "prefill_chunk_size": 8192,
20
+ "query_pre_attn_scalar": 256,
21
+ "sliding_window": 512,
22
+ "kwargs": {
23
+ "architectures": [
24
+ "Gemma3ForCausalLM"
25
+ ],
26
+ "attention_dropout": 0.0,
27
+ "attn_logit_softcapping": null,
28
+ "bos_token_id": 2,
29
+ "cache_implementation": "hybrid",
30
+ "eos_token_id": 106,
31
+ "final_logit_softcapping": null,
32
+ "initializer_range": 0.02,
33
+ "max_position_embeddings": 32768,
34
+ "model_type": "gemma3_text",
35
+ "pad_token_id": 0,
36
+ "rope_scaling": null,
37
+ "rope_theta": 1000000,
38
+ "torch_dtype": "bfloat16",
39
+ "transformers_version": "4.50.0.dev0",
40
+ "unsloth_fixed": true,
41
+ "unsloth_version": "2025.3.18",
42
+ "use_cache": true
43
+ }
44
+ },
45
+ "vocab_size": 262144,
46
+ "tensor_parallel_shards": 1,
47
+ "max_batch_size": 80,
48
+ "context_window_size": 32768,
49
+ "sliding_window_size": -1,
50
+ "prefill_chunk_size": 128,
51
+ "is_text_model": true
52
+ },
53
+ "vocab_size": 262144,
54
+ "context_window_size": 32768,
55
+ "sliding_window_size": -1,
56
+ "prefill_chunk_size": 128,
57
+ "attention_sink_size": -1,
58
+ "tensor_parallel_shards": 1,
59
+ "mean_gen_len": 128,
60
+ "max_gen_len": 512,
61
+ "shift_fill_factor": 0.3,
62
+ "temperature": 0.7,
63
+ "presence_penalty": 0.0,
64
+ "frequency_penalty": 0.0,
65
+ "repetition_penalty": 1.0,
66
+ "top_p": 0.95,
67
+ "conv_template": "gemma3_instruction",
68
+ "pad_token_id": 0,
69
+ "bos_token_id": 2,
70
+ "eos_token_id": [
71
+ 1,
72
+ 106
73
+ ],
74
+ "tokenizer_files": [
75
+ "tokenizer.model",
76
+ "tokenizer.json",
77
+ "added_tokens.json",
78
+ "tokenizer_config.json"
79
+ ],
80
+ "version": "0.1.0"
81
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff