kunato commited on
Commit
e3b863b
·
verified ·
1 Parent(s): 1cabea0

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -1,64 +1,37 @@
1
  {
2
  "architectures": [
3
- "Gemma3ForConditionalGeneration"
4
- ],
5
- "boi_token_index": 255999,
6
- "eoi_token_index": 256000,
7
- "eos_token_id": [
8
- 1,
9
- 106
10
  ],
 
 
 
 
 
 
 
 
 
11
  "hidden_size": 2560,
12
- "image_token_index": 262144,
13
  "initializer_range": 0.02,
14
- "mm_tokens_per_image": 256,
15
- "model_type": "gemma3",
16
- "text_config": {
17
- "attention_bias": false,
18
- "attention_dropout": 0.0,
19
- "attn_logit_softcapping": null,
20
- "cache_implementation": "hybrid",
21
- "final_logit_softcapping": null,
22
- "head_dim": 256,
23
- "hidden_activation": "gelu_pytorch_tanh",
24
- "hidden_size": 2560,
25
- "initializer_range": 0.02,
26
- "intermediate_size": 10240,
27
- "max_position_embeddings": 131072,
28
- "model_type": "gemma3_text",
29
- "num_attention_heads": 8,
30
- "num_hidden_layers": 34,
31
- "num_key_value_heads": 4,
32
- "query_pre_attn_scalar": 256,
33
- "rms_norm_eps": 1e-06,
34
- "rope_local_base_freq": 10000.0,
35
- "rope_scaling": {
36
- "factor": 8.0,
37
- "rope_type": "linear"
38
- },
39
- "rope_theta": 1000000.0,
40
- "sliding_window": 1024,
41
- "sliding_window_pattern": 6,
42
- "torch_dtype": "bfloat16",
43
- "use_cache": false,
44
- "vocab_size": 262208
45
  },
 
 
 
46
  "torch_dtype": "bfloat16",
47
- "transformers_version": "4.51.1",
48
  "use_cache": false,
49
- "vision_config": {
50
- "attention_dropout": 0.0,
51
- "hidden_act": "gelu_pytorch_tanh",
52
- "hidden_size": 1152,
53
- "image_size": 896,
54
- "intermediate_size": 4304,
55
- "layer_norm_eps": 1e-06,
56
- "model_type": "siglip_vision_model",
57
- "num_attention_heads": 16,
58
- "num_channels": 3,
59
- "num_hidden_layers": 27,
60
- "patch_size": 14,
61
- "torch_dtype": "bfloat16",
62
- "vision_use_head": false
63
- }
64
  }
 
1
  {
2
  "architectures": [
3
+ "Gemma3ForCausalLM"
 
 
 
 
 
 
4
  ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "attn_logit_softcapping": null,
8
+ "bos_token_id": 2,
9
+ "cache_implementation": "hybrid",
10
+ "eos_token_id": 1,
11
+ "final_logit_softcapping": null,
12
+ "head_dim": 256,
13
+ "hidden_activation": "gelu_pytorch_tanh",
14
  "hidden_size": 2560,
 
15
  "initializer_range": 0.02,
16
+ "intermediate_size": 10240,
17
+ "max_position_embeddings": 131072,
18
+ "model_type": "gemma3_text",
19
+ "num_attention_heads": 8,
20
+ "num_hidden_layers": 34,
21
+ "num_key_value_heads": 4,
22
+ "pad_token_id": 0,
23
+ "query_pre_attn_scalar": 256,
24
+ "rms_norm_eps": 1e-06,
25
+ "rope_local_base_freq": 10000.0,
26
+ "rope_scaling": {
27
+ "factor": 8.0,
28
+ "rope_type": "linear"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  },
30
+ "rope_theta": 1000000.0,
31
+ "sliding_window": 1024,
32
+ "sliding_window_pattern": 6,
33
  "torch_dtype": "bfloat16",
34
+ "transformers_version": "4.52.4",
35
  "use_cache": false,
36
+ "vocab_size": 262208
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  }
generation_config.json CHANGED
@@ -9,5 +9,5 @@
9
  "pad_token_id": 0,
10
  "top_k": 64,
11
  "top_p": 0.95,
12
- "transformers_version": "4.51.1"
13
  }
 
9
  "pad_token_id": 0,
10
  "top_k": 64,
11
  "top_p": 0.95,
12
+ "transformers_version": "4.52.4"
13
  }
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71bc400a21566d1cad9375e2067897b4f8e299b347e85cb095275c48368a0d68
3
- size 4961251752
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9942fbe91486a5c2c14e2fc49608b24ad4613bf063ea288bee098e62717af3d
3
+ size 4960531344
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b92ea547075b34f1699a46fce132018ea84e97242a5e353ed169b12f165e850f
3
- size 3639026128
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad8361881e803001862460ef972a26824fd87455b1e90510b146373dc068f582
3
+ size 2800046672
model.safetensors.index.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -51328,7 +51328,6 @@
51328
  ],
51329
  "boi_token": "<start_of_image>",
51330
  "bos_token": "<bos>",
51331
- "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n",
51332
  "clean_up_tokenization_spaces": false,
51333
  "eoi_token": "<end_of_image>",
51334
  "eos_token": "<end_of_turn>",
 
51328
  ],
51329
  "boi_token": "<start_of_image>",
51330
  "bos_token": "<bos>",
 
51331
  "clean_up_tokenization_spaces": false,
51332
  "eoi_token": "<end_of_image>",
51333
  "eos_token": "<end_of_turn>",