Add files using upload-large-folder tool
Browse files- .gitattributes +2 -0
- config.json +2 -4
- mmproj-F16.gguf +0 -0
- mmproj-F32.gguf +0 -0
.gitattributes
CHANGED
@@ -92,3 +92,5 @@ Q5_K_S/Llama-4-Scout-17B-16E-Instruct-Q5_K_S-00001-of-00002.gguf filter=lfs diff
|
|
92 |
UD-Q8_K_XL/Llama-4-Scout-17B-16E-Instruct-UD-Q8_K_XL-00001-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
|
93 |
UD-Q8_K_XL/Llama-4-Scout-17B-16E-Instruct-UD-Q8_K_XL-00002-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
|
94 |
UD-Q6_K_XL/Llama-4-Scout-17B-16E-Instruct-UD-Q6_K_XL-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
92 |
UD-Q8_K_XL/Llama-4-Scout-17B-16E-Instruct-UD-Q8_K_XL-00001-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
|
93 |
UD-Q8_K_XL/Llama-4-Scout-17B-16E-Instruct-UD-Q8_K_XL-00002-of-00003.gguf filter=lfs diff=lfs merge=lfs -text
|
94 |
UD-Q6_K_XL/Llama-4-Scout-17B-16E-Instruct-UD-Q6_K_XL-00001-of-00002.gguf filter=lfs diff=lfs merge=lfs -text
|
95 |
+
mmproj-F16.gguf filter=lfs diff=lfs merge=lfs -text
|
96 |
+
mmproj-F32.gguf filter=lfs diff=lfs merge=lfs -text
|
config.json
CHANGED
@@ -8,12 +8,11 @@
|
|
8 |
"model_type": "llama4",
|
9 |
"pad_token_id": 200018,
|
10 |
"text_config": {
|
11 |
-
"_attn_implementation_autoset": true,
|
12 |
"attention_bias": false,
|
13 |
"attention_chunk_size": 8192,
|
14 |
"attention_dropout": 0.0,
|
15 |
"attn_scale": 0.1,
|
16 |
-
"attn_temperature_tuning":
|
17 |
"bos_token_id": 200000,
|
18 |
"cache_implementation": "hybrid",
|
19 |
"eos_token_id": [
|
@@ -157,10 +156,9 @@
|
|
157 |
},
|
158 |
"tie_word_embeddings": false,
|
159 |
"torch_dtype": "bfloat16",
|
160 |
-
"transformers_version": "4.
|
161 |
"unsloth_fixed": true,
|
162 |
"vision_config": {
|
163 |
-
"_attn_implementation_autoset": true,
|
164 |
"attention_dropout": 0.0,
|
165 |
"hidden_act": "gelu",
|
166 |
"hidden_size": 1408,
|
|
|
8 |
"model_type": "llama4",
|
9 |
"pad_token_id": 200018,
|
10 |
"text_config": {
|
|
|
11 |
"attention_bias": false,
|
12 |
"attention_chunk_size": 8192,
|
13 |
"attention_dropout": 0.0,
|
14 |
"attn_scale": 0.1,
|
15 |
+
"attn_temperature_tuning": true,
|
16 |
"bos_token_id": 200000,
|
17 |
"cache_implementation": "hybrid",
|
18 |
"eos_token_id": [
|
|
|
156 |
},
|
157 |
"tie_word_embeddings": false,
|
158 |
"torch_dtype": "bfloat16",
|
159 |
+
"transformers_version": "4.52.2",
|
160 |
"unsloth_fixed": true,
|
161 |
"vision_config": {
|
|
|
162 |
"attention_dropout": 0.0,
|
163 |
"hidden_act": "gelu",
|
164 |
"hidden_size": 1408,
|
mmproj-F16.gguf
CHANGED
Binary files a/mmproj-F16.gguf and b/mmproj-F16.gguf differ
|
|
mmproj-F32.gguf
CHANGED
Binary files a/mmproj-F32.gguf and b/mmproj-F32.gguf differ
|
|