Add files using upload-large-folder tool
Browse files- .gitattributes +1 -0
- SYSTEM_PROMPT.txt +19 -0
- config.json +2 -8
- generation_config.json +1 -2
- params.json +29 -0
- tekken.json +3 -0
- tokenizer_config.json +3 -2
.gitattributes
CHANGED
@@ -33,4 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
36 |
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tekken.json filter=lfs diff=lfs merge=lfs -text
|
37 |
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
SYSTEM_PROMPT.txt
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
You are {name}, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris.
|
2 |
+
You power an AI assistant called Le Chat.
|
3 |
+
Your knowledge base was last updated on 2023-10-01.
|
4 |
+
The current date is {today}.
|
5 |
+
|
6 |
+
When you're not sure about some information, you say that you don't have the information and don't make up anything.
|
7 |
+
If the user's question is not clear, ambiguous, or does not provide enough context for you to accurately answer the question, you do not try to answer it right away and you rather ask the user to clarify their request (e.g. "What are some good restaurants around me?" => "Where are you?" or "When is the next flight to Tokyo" => "Where do you travel from?").
|
8 |
+
You are always very attentive to dates, in particular you try to resolve dates (e.g. "yesterday" is {yesterday}) and when asked about information at specific dates, you discard information that is at another date.
|
9 |
+
You follow these instructions in all languages, and always respond to the user in the language they use or request.
|
10 |
+
Next sections describe the capabilities that you have.
|
11 |
+
|
12 |
+
# WEB BROWSING INSTRUCTIONS
|
13 |
+
|
14 |
+
You cannot perform any web search or access internet to open URLs, links etc. If it seems like the user is expecting you to do so, you clarify the situation and ask the user to copy paste the text directly in the chat.
|
15 |
+
|
16 |
+
# MULTI-MODAL INSTRUCTIONS
|
17 |
+
|
18 |
+
You have the ability to read images, but you cannot generate images. You also cannot transcribe audio files or videos.
|
19 |
+
You cannot read nor transcribe audio files or videos.
|
config.json
CHANGED
@@ -2,12 +2,9 @@
|
|
2 |
"architectures": [
|
3 |
"Mistral3ForConditionalGeneration"
|
4 |
],
|
5 |
-
"bos_token_id": 1,
|
6 |
-
"eos_token_id": 2,
|
7 |
"image_token_index": 10,
|
8 |
"model_type": "mistral3",
|
9 |
"multimodal_projector_bias": false,
|
10 |
-
"pad_token_id": 11,
|
11 |
"projector_hidden_act": "gelu",
|
12 |
"spatial_merge_size": 2,
|
13 |
"text_config": {
|
@@ -25,13 +22,11 @@
|
|
25 |
"rms_norm_eps": 1e-05,
|
26 |
"rope_theta": 1000000000.0,
|
27 |
"sliding_window": null,
|
28 |
-
"torch_dtype": "bfloat16",
|
29 |
"use_cache": true,
|
30 |
"vocab_size": 131072
|
31 |
},
|
32 |
"torch_dtype": "bfloat16",
|
33 |
-
"transformers_version": "4.
|
34 |
-
"unsloth_fixed": true,
|
35 |
"vision_config": {
|
36 |
"attention_dropout": 0.0,
|
37 |
"head_dim": 64,
|
@@ -45,8 +40,7 @@
|
|
45 |
"num_channels": 3,
|
46 |
"num_hidden_layers": 24,
|
47 |
"patch_size": 14,
|
48 |
-
"rope_theta": 10000.0
|
49 |
-
"torch_dtype": "bfloat16"
|
50 |
},
|
51 |
"vision_feature_layer": -1
|
52 |
}
|
|
|
2 |
"architectures": [
|
3 |
"Mistral3ForConditionalGeneration"
|
4 |
],
|
|
|
|
|
5 |
"image_token_index": 10,
|
6 |
"model_type": "mistral3",
|
7 |
"multimodal_projector_bias": false,
|
|
|
8 |
"projector_hidden_act": "gelu",
|
9 |
"spatial_merge_size": 2,
|
10 |
"text_config": {
|
|
|
22 |
"rms_norm_eps": 1e-05,
|
23 |
"rope_theta": 1000000000.0,
|
24 |
"sliding_window": null,
|
|
|
25 |
"use_cache": true,
|
26 |
"vocab_size": 131072
|
27 |
},
|
28 |
"torch_dtype": "bfloat16",
|
29 |
+
"transformers_version": "4.50.0.dev0",
|
|
|
30 |
"vision_config": {
|
31 |
"attention_dropout": 0.0,
|
32 |
"head_dim": 64,
|
|
|
40 |
"num_channels": 3,
|
41 |
"num_hidden_layers": 24,
|
42 |
"patch_size": 14,
|
43 |
+
"rope_theta": 10000.0
|
|
|
44 |
},
|
45 |
"vision_feature_layer": -1
|
46 |
}
|
generation_config.json
CHANGED
@@ -2,6 +2,5 @@
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
-
"
|
6 |
-
"transformers_version": "4.52.0.dev0"
|
7 |
}
|
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
+
"transformers_version": "4.50.0.dev0"
|
|
|
6 |
}
|
params.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"dim": 5120,
|
3 |
+
"n_layers": 40,
|
4 |
+
"head_dim": 128,
|
5 |
+
"hidden_dim": 32768,
|
6 |
+
"n_heads": 32,
|
7 |
+
"n_kv_heads": 8,
|
8 |
+
"rope_theta": 1000000000.0,
|
9 |
+
"norm_eps": 1e-05,
|
10 |
+
"vocab_size": 131072,
|
11 |
+
"vision_encoder": {
|
12 |
+
"hidden_size": 1024,
|
13 |
+
"num_channels": 3,
|
14 |
+
"max_image_size": 1540,
|
15 |
+
"patch_size": 14,
|
16 |
+
"rope_theta": 10000.0,
|
17 |
+
"intermediate_size": 4096,
|
18 |
+
"num_hidden_layers": 24,
|
19 |
+
"num_attention_heads": 16,
|
20 |
+
"adapter_bias": false,
|
21 |
+
"mm_projector_id": "patch_merge",
|
22 |
+
"spatial_merge_size": 2,
|
23 |
+
"add_pre_mm_projector_layer_norm": true,
|
24 |
+
"image_token_id": 10,
|
25 |
+
"image_break_token_id": 12,
|
26 |
+
"image_end_token_id": 13,
|
27 |
+
"image_size": 1540
|
28 |
+
}
|
29 |
+
}
|
tekken.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c604f35d1035f534519622c0ec83fed6184978d4fdee92a5bd2a50bc05438094
|
3 |
+
size 14801330
|
tokenizer_config.json
CHANGED
@@ -9011,11 +9011,12 @@
|
|
9011 |
"eos_token": "</s>",
|
9012 |
"extra_special_tokens": {},
|
9013 |
"legacy": true,
|
9014 |
-
"model_max_length":
|
9015 |
"pad_token": "<pad>",
|
9016 |
"padding_side": "left",
|
9017 |
"processor_class": "PixtralProcessor",
|
9018 |
"tokenizer_class": "LlamaTokenizerFast",
|
9019 |
"unk_token": "<unk>",
|
9020 |
-
"use_default_system_prompt": false
|
|
|
9021 |
}
|
|
|
9011 |
"eos_token": "</s>",
|
9012 |
"extra_special_tokens": {},
|
9013 |
"legacy": true,
|
9014 |
+
"model_max_length": 1000000000000000019884624838656,
|
9015 |
"pad_token": "<pad>",
|
9016 |
"padding_side": "left",
|
9017 |
"processor_class": "PixtralProcessor",
|
9018 |
"tokenizer_class": "LlamaTokenizerFast",
|
9019 |
"unk_token": "<unk>",
|
9020 |
+
"use_default_system_prompt": false,
|
9021 |
+
"chat_template": "{%- set today = strftime_now(\"%Y-%m-%d\") %}\n{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris.\\nYour knowledge base was last updated on 2023-10-01. The current date is \" + today + \".\\n\\nWhen you're not sure about some information, you say that you don't have the information and don't make up anything.\\nIf the user's question is not clear, ambiguous, or does not provide enough context for you to accurately answer the question, you do not try to answer it right away and you rather ask the user to clarify their request (e.g. \\\"What are some good restaurants around me?\\\" => \\\"Where are you?\\\" or \\\"When is the next flight to Tokyo\\\" => \\\"Where do you travel from?\\\")\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n {%- if messages[0]['content'] is string %}\n {%- set system_message = messages[0]['content'] %}\n {%- else %}\n {%- set system_message = messages[0]['content'][0]['text'] %}\n {%- endif %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set system_message = default_system_message %}\n {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n {%- if message['role'] == 'user' %}\n {%- if message['content'] is string %}\n {{- '[INST]' + message['content'] + '[/INST]' }}\n {%- else %}\n {{- '[INST]' }}\n {%- for block in message['content'] %}\n {%- if block['type'] == 'text' %}\n {{- block['text'] }}\n {%- elif block['type'] in ['image', 'image_url'] %}\n {{- '[IMG]' }}\n {%- else %}\n {{- raise_exception('Only text and image blocks are supported in message content!') }}\n {%- endif %}\n {%- endfor %}\n {{- '[/INST]' }}\n {%- endif %}\n {%- elif message['role'] == 'system' %}\n {%- if message['content'] is string %}\n {{- '[SYSTEM_PROMPT]' + message['content'] + '[/SYSTEM_PROMPT]' }}\n {%- else %}\n {{- '[SYSTEM_PROMPT]' + message['content'][0]['text'] + '[/SYSTEM_PROMPT]' }}\n {%- endif %}\n {%- elif message['role'] == 'assistant' %}\n {%- if message['content'] is string %}\n {{- message['content'] + eos_token }}\n {%- else %}\n {{- message['content'][0]['text'] + eos_token }}\n {%- endif %}\n {%- else %}\n {{- raise_exception('Only user, system and assistant roles are supported!') }}\n {%- endif %}\n{%- endfor %}"
|
9022 |
}
|