Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- Modelfile +14 -0
- README.md +346 -0
- added_tokens.json +28 -0
- chat_template.jinja +86 -0
- config.json +38 -0
- generation_config.json +13 -0
- merges.txt +0 -0
- model-00001-of-00013.safetensors +3 -0
- model-00002-of-00013.safetensors +3 -0
- model-00003-of-00013.safetensors +3 -0
- model-00004-of-00013.safetensors +3 -0
- model-00005-of-00013.safetensors +3 -0
- model-00006-of-00013.safetensors +3 -0
- model-00007-of-00013.safetensors +3 -0
- model-00008-of-00013.safetensors +3 -0
- model-00009-of-00013.safetensors +3 -0
- model-00010-of-00013.safetensors +3 -0
- model-00011-of-00013.safetensors +3 -0
- model-00012-of-00013.safetensors +3 -0
- model-00013-of-00013.safetensors +3 -0
- model.safetensors.index.json +0 -0
- special_tokens_map.json +31 -0
- tokenizer.json +3 -0
- tokenizer_config.json +240 -0
- vocab.json +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
Modelfile
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ollama modelfile auto-generated by llamafactory
|
2 |
+
|
3 |
+
FROM .
|
4 |
+
|
5 |
+
TEMPLATE """{{ if .System }}<|im_start|>system
|
6 |
+
{{ .System }}<|im_end|>
|
7 |
+
{{ end }}{{ range .Messages }}{{ if eq .Role "user" }}<|im_start|>user
|
8 |
+
{{ .Content }}<|im_end|>
|
9 |
+
<|im_start|>assistant
|
10 |
+
{{ else if eq .Role "assistant" }}{{ .Content }}<|im_end|>
|
11 |
+
{{ end }}{{ end }}"""
|
12 |
+
|
13 |
+
PARAMETER stop "<|im_end|>"
|
14 |
+
PARAMETER num_ctx 4096
|
README.md
ADDED
@@ -0,0 +1,346 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
license: apache-2.0
|
4 |
+
license_link: https://huggingface.co/Qwen/Qwen3-30B-A3B-Instruct-2507/blob/main/LICENSE
|
5 |
+
pipeline_tag: text-generation
|
6 |
+
---
|
7 |
+
|
8 |
+
This a full parameter fine-tuning version of Qwen3-30B-A3B-Instruct-2507 which is trained on a large scale energy QA expansion dataset. This is the model at step 300.
|
9 |
+
|
10 |
+
# Qwen3-30B-A3B-Instruct-2507
|
11 |
+
<a href="https://chat.qwen.ai/?model=Qwen3-30B-A3B-2507" target="_blank" style="margin: 2px;">
|
12 |
+
<img alt="Chat" src="https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5" style="display: inline-block; vertical-align: middle;"/>
|
13 |
+
</a>
|
14 |
+
|
15 |
+
## Highlights
|
16 |
+
|
17 |
+
We introduce the updated version of the **Qwen3-30B-A3B non-thinking mode**, named **Qwen3-30B-A3B-Instruct-2507**, featuring the following key enhancements:
|
18 |
+
|
19 |
+
- **Significant improvements** in general capabilities, including **instruction following, logical reasoning, text comprehension, mathematics, science, coding and tool usage**.
|
20 |
+
- **Substantial gains** in long-tail knowledge coverage across **multiple languages**.
|
21 |
+
- **Markedly better alignment** with user preferences in **subjective and open-ended tasks**, enabling more helpful responses and higher-quality text generation.
|
22 |
+
- **Enhanced capabilities** in **256K long-context understanding**.
|
23 |
+
|
24 |
+

|
25 |
+
|
26 |
+
## Model Overview
|
27 |
+
|
28 |
+
**Qwen3-30B-A3B-Instruct-2507** has the following features:
|
29 |
+
- Type: Causal Language Models
|
30 |
+
- Training Stage: Pretraining & Post-training
|
31 |
+
- Number of Parameters: 30.5B in total and 3.3B activated
|
32 |
+
- Number of Paramaters (Non-Embedding): 29.9B
|
33 |
+
- Number of Layers: 48
|
34 |
+
- Number of Attention Heads (GQA): 32 for Q and 4 for KV
|
35 |
+
- Number of Experts: 128
|
36 |
+
- Number of Activated Experts: 8
|
37 |
+
- Context Length: **262,144 natively**.
|
38 |
+
|
39 |
+
**NOTE: This model supports only non-thinking mode and does not generate ``<think></think>`` blocks in its output. Meanwhile, specifying `enable_thinking=False` is no longer required.**
|
40 |
+
|
41 |
+
For more details, including benchmark evaluation, hardware requirements, and inference performance, please refer to our [blog](https://qwenlm.github.io/blog/qwen3/), [GitHub](https://github.com/QwenLM/Qwen3), and [Documentation](https://qwen.readthedocs.io/en/latest/).
|
42 |
+
|
43 |
+
|
44 |
+
## Performance
|
45 |
+
|
46 |
+
| | Deepseek-V3-0324 | GPT-4o-0327 | Gemini-2.5-Flash Non-Thinking | Qwen3-235B-A22B Non-Thinking | Qwen3-30B-A3B Non-Thinking | Qwen3-30B-A3B-Instruct-2507 |
|
47 |
+
|--- | --- | --- | --- | --- | --- | --- |
|
48 |
+
| **Knowledge** | | | | | | |
|
49 |
+
| MMLU-Pro | **81.2** | 79.8 | 81.1 | 75.2 | 69.1 | 78.4 |
|
50 |
+
| MMLU-Redux | 90.4 | **91.3** | 90.6 | 89.2 | 84.1 | 89.3 |
|
51 |
+
| GPQA | 68.4 | 66.9 | **78.3** | 62.9 | 54.8 | 70.4 |
|
52 |
+
| SuperGPQA | **57.3** | 51.0 | 54.6 | 48.2 | 42.2 | 53.4 |
|
53 |
+
| **Reasoning** | | | | | | |
|
54 |
+
| AIME25 | 46.6 | 26.7 | **61.6** | 24.7 | 21.6 | 61.3 |
|
55 |
+
| HMMT25 | 27.5 | 7.9 | **45.8** | 10.0 | 12.0 | 43.0 |
|
56 |
+
| ZebraLogic | 83.4 | 52.6 | 57.9 | 37.7 | 33.2 | **90.0** |
|
57 |
+
| LiveBench 20241125 | 66.9 | 63.7 | **69.1** | 62.5 | 59.4 | 69.0 |
|
58 |
+
| **Coding** | | | | | | |
|
59 |
+
| LiveCodeBench v6 (25.02-25.05) | **45.2** | 35.8 | 40.1 | 32.9 | 29.0 | 43.2 |
|
60 |
+
| MultiPL-E | 82.2 | 82.7 | 77.7 | 79.3 | 74.6 | **83.8** |
|
61 |
+
| Aider-Polyglot | 55.1 | 45.3 | 44.0 | **59.6** | 24.4 | 35.6 |
|
62 |
+
| **Alignment** | | | | | | |
|
63 |
+
| IFEval | 82.3 | 83.9 | 84.3 | 83.2 | 83.7 | **84.7** |
|
64 |
+
| Arena-Hard v2* | 45.6 | 61.9 | 58.3 | 52.0 | 24.8 | **69.0** |
|
65 |
+
| Creative Writing v3 | 81.6 | 84.9 | 84.6 | 80.4 | 68.1 | **86.0** |
|
66 |
+
| WritingBench | 74.5 | 75.5 | 80.5 | 77.0 | 72.2 | **85.5** |
|
67 |
+
| **Agent** | | | | | | |
|
68 |
+
| BFCL-v3 | 64.7 | 66.5 | 66.1 | **68.0** | 58.6 | 65.1 |
|
69 |
+
| TAU1-Retail | 49.6 | 60.3# | **65.2** | 65.2 | 38.3 | 59.1 |
|
70 |
+
| TAU1-Airline | 32.0 | 42.8# | **48.0** | 32.0 | 18.0 | 40.0 |
|
71 |
+
| TAU2-Retail | **71.1** | 66.7# | 64.3 | 64.9 | 31.6 | 57.0 |
|
72 |
+
| TAU2-Airline | 36.0 | 42.0# | **42.5** | 36.0 | 18.0 | 38.0 |
|
73 |
+
| TAU2-Telecom | **34.0** | 29.8# | 16.9 | 24.6 | 18.4 | 12.3 |
|
74 |
+
| **Multilingualism** | | | | | | |
|
75 |
+
| MultiIF | 66.5 | 70.4 | 69.4 | 70.2 | **70.8** | 67.9 |
|
76 |
+
| MMLU-ProX | 75.8 | 76.2 | **78.3** | 73.2 | 65.1 | 72.0 |
|
77 |
+
| INCLUDE | 80.1 | 82.1 | **83.8** | 75.6 | 67.8 | 71.9 |
|
78 |
+
| PolyMATH | 32.2 | 25.5 | 41.9 | 27.0 | 23.3 | **43.1** |
|
79 |
+
|
80 |
+
*: For reproducibility, we report the win rates evaluated by GPT-4.1.
|
81 |
+
|
82 |
+
\#: Results were generated using GPT-4o-20241120, as access to the native function calling API of GPT-4o-0327 was unavailable.
|
83 |
+
|
84 |
+
|
85 |
+
## Quickstart
|
86 |
+
|
87 |
+
The code of Qwen3-MoE has been in the latest Hugging Face `transformers` and we advise you to use the latest version of `transformers`.
|
88 |
+
|
89 |
+
With `transformers<4.51.0`, you will encounter the following error:
|
90 |
+
```
|
91 |
+
KeyError: 'qwen3_moe'
|
92 |
+
```
|
93 |
+
|
94 |
+
The following contains a code snippet illustrating how to use the model generate content based on given inputs.
|
95 |
+
```python
|
96 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
97 |
+
|
98 |
+
model_name = "Qwen/Qwen3-30B-A3B-Instruct-2507"
|
99 |
+
|
100 |
+
# load the tokenizer and the model
|
101 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
102 |
+
model = AutoModelForCausalLM.from_pretrained(
|
103 |
+
model_name,
|
104 |
+
torch_dtype="auto",
|
105 |
+
device_map="auto"
|
106 |
+
)
|
107 |
+
|
108 |
+
# prepare the model input
|
109 |
+
prompt = "Give me a short introduction to large language model."
|
110 |
+
messages = [
|
111 |
+
{"role": "user", "content": prompt}
|
112 |
+
]
|
113 |
+
text = tokenizer.apply_chat_template(
|
114 |
+
messages,
|
115 |
+
tokenize=False,
|
116 |
+
add_generation_prompt=True,
|
117 |
+
)
|
118 |
+
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
119 |
+
|
120 |
+
# conduct text completion
|
121 |
+
generated_ids = model.generate(
|
122 |
+
**model_inputs,
|
123 |
+
max_new_tokens=16384
|
124 |
+
)
|
125 |
+
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
|
126 |
+
|
127 |
+
content = tokenizer.decode(output_ids, skip_special_tokens=True)
|
128 |
+
|
129 |
+
print("content:", content)
|
130 |
+
```
|
131 |
+
|
132 |
+
For deployment, you can use `sglang>=0.4.6.post1` or `vllm>=0.8.5` or to create an OpenAI-compatible API endpoint:
|
133 |
+
- SGLang:
|
134 |
+
```shell
|
135 |
+
python -m sglang.launch_server --model-path Qwen/Qwen3-30B-A3B-Instruct-2507 --context-length 262144
|
136 |
+
```
|
137 |
+
- vLLM:
|
138 |
+
```shell
|
139 |
+
vllm serve Qwen/Qwen3-30B-A3B-Instruct-2507 --max-model-len 262144
|
140 |
+
```
|
141 |
+
|
142 |
+
**Note: If you encounter out-of-memory (OOM) issues, consider reducing the context length to a shorter value, such as `32,768`.**
|
143 |
+
|
144 |
+
For local use, applications such as Ollama, LMStudio, MLX-LM, llama.cpp, and KTransformers have also supported Qwen3.
|
145 |
+
|
146 |
+
## Agentic Use
|
147 |
+
|
148 |
+
Qwen3 excels in tool calling capabilities. We recommend using [Qwen-Agent](https://github.com/QwenLM/Qwen-Agent) to make the best use of agentic ability of Qwen3. Qwen-Agent encapsulates tool-calling templates and tool-calling parsers internally, greatly reducing coding complexity.
|
149 |
+
|
150 |
+
To define the available tools, you can use the MCP configuration file, use the integrated tool of Qwen-Agent, or integrate other tools by yourself.
|
151 |
+
```python
|
152 |
+
from qwen_agent.agents import Assistant
|
153 |
+
|
154 |
+
# Define LLM
|
155 |
+
llm_cfg = {
|
156 |
+
'model': 'Qwen3-30B-A3B-Instruct-2507',
|
157 |
+
|
158 |
+
# Use a custom endpoint compatible with OpenAI API:
|
159 |
+
'model_server': 'http://localhost:8000/v1', # api_base
|
160 |
+
'api_key': 'EMPTY',
|
161 |
+
}
|
162 |
+
|
163 |
+
# Define Tools
|
164 |
+
tools = [
|
165 |
+
{'mcpServers': { # You can specify the MCP configuration file
|
166 |
+
'time': {
|
167 |
+
'command': 'uvx',
|
168 |
+
'args': ['mcp-server-time', '--local-timezone=Asia/Shanghai']
|
169 |
+
},
|
170 |
+
"fetch": {
|
171 |
+
"command": "uvx",
|
172 |
+
"args": ["mcp-server-fetch"]
|
173 |
+
}
|
174 |
+
}
|
175 |
+
},
|
176 |
+
'code_interpreter', # Built-in tools
|
177 |
+
]
|
178 |
+
|
179 |
+
# Define Agent
|
180 |
+
bot = Assistant(llm=llm_cfg, function_list=tools)
|
181 |
+
|
182 |
+
# Streaming generation
|
183 |
+
messages = [{'role': 'user', 'content': 'https://qwenlm.github.io/blog/ Introduce the latest developments of Qwen'}]
|
184 |
+
for responses in bot.run(messages=messages):
|
185 |
+
pass
|
186 |
+
print(responses)
|
187 |
+
```
|
188 |
+
|
189 |
+
## Processing Ultra-Long Texts
|
190 |
+
|
191 |
+
To support **ultra-long context processing** (up to **1 million tokens**), we integrate two key techniques:
|
192 |
+
|
193 |
+
- **[Dual Chunk Attention](https://arxiv.org/abs/2402.17463) (DCA)**: A length extrapolation method that splits long sequences into manageable chunks while preserving global coherence.
|
194 |
+
- **[MInference](https://arxiv.org/abs/2407.02490)**: A sparse attention mechanism that reduces computational overhead by focusing on critical token interactions.
|
195 |
+
|
196 |
+
Together, these innovations significantly improve both **generation quality** and **inference efficiency** for sequences beyond 256K tokens. On sequences approaching 1M tokens, the system achieves up to a **3× speedup** compared to standard attention implementations.
|
197 |
+
|
198 |
+
For full technical details, see the [Qwen2.5-1M Technical Report](https://arxiv.org/abs/2501.15383).
|
199 |
+
|
200 |
+
### How to Enable 1M Token Context
|
201 |
+
|
202 |
+
> [!NOTE]
|
203 |
+
> To effectively process a 1 million token context, users will require approximately **240 GB** of total GPU memory. This accounts for model weights, KV-cache storage, and peak activation memory demands.
|
204 |
+
|
205 |
+
#### Step 1: Update Configuration File
|
206 |
+
|
207 |
+
Download the model and replace the content of your `config.json` with `config_1m.json`, which includes the config for length extrapolation and sparse attention.
|
208 |
+
|
209 |
+
```bash
|
210 |
+
export MODELNAME=Qwen3-30B-A3B-Instruct-2507
|
211 |
+
huggingface-cli download Qwen/${MODELNAME} --local-dir ${MODELNAME}
|
212 |
+
mv ${MODELNAME}/config.json ${MODELNAME}/config.json.bak
|
213 |
+
mv ${MODELNAME}/config_1m.json ${MODELNAME}/config.json
|
214 |
+
```
|
215 |
+
|
216 |
+
#### Step 2: Launch Model Server
|
217 |
+
|
218 |
+
After updating the config, proceed with either **vLLM** or **SGLang** for serving the model.
|
219 |
+
|
220 |
+
#### Option 1: Using vLLM
|
221 |
+
|
222 |
+
To run Qwen with 1M context support:
|
223 |
+
|
224 |
+
```bash
|
225 |
+
pip install -U vllm \
|
226 |
+
--torch-backend=auto \
|
227 |
+
--extra-index-url https://wheels.vllm.ai/nightly
|
228 |
+
```
|
229 |
+
|
230 |
+
Then launch the server with Dual Chunk Flash Attention enabled:
|
231 |
+
|
232 |
+
```bash
|
233 |
+
VLLM_ATTENTION_BACKEND=DUAL_CHUNK_FLASH_ATTN VLLM_USE_V1=0 \
|
234 |
+
vllm serve ./Qwen3-30B-A3B-Instruct-2507 \
|
235 |
+
--tensor-parallel-size 4 \
|
236 |
+
--max-model-len 1010000 \
|
237 |
+
--enable-chunked-prefill \
|
238 |
+
--max-num-batched-tokens 131072 \
|
239 |
+
--enforce-eager \
|
240 |
+
--max-num-seqs 1 \
|
241 |
+
--gpu-memory-utilization 0.85
|
242 |
+
```
|
243 |
+
|
244 |
+
##### Key Parameters
|
245 |
+
|
246 |
+
| Parameter | Purpose |
|
247 |
+
|--------|--------|
|
248 |
+
| `VLLM_ATTENTION_BACKEND=DUAL_CHUNK_FLASH_ATTN` | Enables the custom attention kernel for long-context efficiency |
|
249 |
+
| `--max-model-len 1010000` | Sets maximum context length to ~1M tokens |
|
250 |
+
| `--enable-chunked-prefill` | Allows chunked prefill for very long inputs (avoids OOM) |
|
251 |
+
| `--max-num-batched-tokens 131072` | Controls batch size during prefill; balances throughput and memory |
|
252 |
+
| `--enforce-eager` | Disables CUDA graph capture (required for dual chunk attention) |
|
253 |
+
| `--max-num-seqs 1` | Limits concurrent sequences due to extreme memory usage |
|
254 |
+
| `--gpu-memory-utilization 0.85` | Set the fraction of GPU memory to be used for the model executor |
|
255 |
+
|
256 |
+
#### Option 2: Using SGLang
|
257 |
+
|
258 |
+
First, clone and install the specialized branch:
|
259 |
+
|
260 |
+
```bash
|
261 |
+
git clone https://github.com/sgl-project/sglang.git
|
262 |
+
cd sglang
|
263 |
+
pip install -e "python[all]"
|
264 |
+
```
|
265 |
+
|
266 |
+
Launch the server with DCA support:
|
267 |
+
|
268 |
+
```bash
|
269 |
+
python3 -m sglang.launch_server \
|
270 |
+
--model-path ./Qwen3-30B-A3B-Instruct-2507 \
|
271 |
+
--context-length 1010000 \
|
272 |
+
--mem-frac 0.75 \
|
273 |
+
--attention-backend dual_chunk_flash_attn \
|
274 |
+
--tp 4 \
|
275 |
+
--chunked-prefill-size 131072
|
276 |
+
```
|
277 |
+
|
278 |
+
##### Key Parameters
|
279 |
+
|
280 |
+
| Parameter | Purpose |
|
281 |
+
|---------|--------|
|
282 |
+
| `--attention-backend dual_chunk_flash_attn` | Activates Dual Chunk Flash Attention |
|
283 |
+
| `--context-length 1010000` | Defines max input length |
|
284 |
+
| `--mem-frac 0.75` | The fraction of the memory used for static allocation (model weights and KV cache memory pool). Use a smaller value if you see out-of-memory errors. |
|
285 |
+
| `--tp 4` | Tensor parallelism size (matches model sharding) |
|
286 |
+
| `--chunked-prefill-size 131072` | Prefill chunk size for handling long inputs without OOM |
|
287 |
+
|
288 |
+
#### Troubleshooting:
|
289 |
+
|
290 |
+
1. Encountering the error: "The model's max sequence length (xxxxx) is larger than the maximum number of tokens that can be stored in the KV cache." or "RuntimeError: Not enough memory. Please try to increase --mem-fraction-static."
|
291 |
+
|
292 |
+
The VRAM reserved for the KV cache is insufficient.
|
293 |
+
- vLLM: Consider reducing the ``max_model_len`` or increasing the ``tensor_parallel_size`` and ``gpu_memory_utilization``. Alternatively, you can reduce ``max_num_batched_tokens``, although this may significantly slow down inference.
|
294 |
+
- SGLang: Consider reducing the ``context-length`` or increasing the ``tp`` and ``mem-frac``. Alternatively, you can reduce ``chunked-prefill-size``, although this may significantly slow down inference.
|
295 |
+
|
296 |
+
2. Encountering the error: "torch.OutOfMemoryError: CUDA out of memory."
|
297 |
+
|
298 |
+
The VRAM reserved for activation weights is insufficient. You can try lowering ``gpu_memory_utilization`` or ``mem-frac``, but be aware that this might reduce the VRAM available for the KV cache.
|
299 |
+
|
300 |
+
3. Encountering the error: "Input prompt (xxxxx tokens) + lookahead slots (0) is too long and exceeds the capacity of the block manager." or "The input (xxx xtokens) is longer than the model's context length (xxx tokens)."
|
301 |
+
|
302 |
+
The input is too lengthy. Consider using a shorter sequence or increasing the ``max_model_len`` or ``context-length``.
|
303 |
+
|
304 |
+
#### Long-Context Performance
|
305 |
+
|
306 |
+
We test the model on an 1M version of the [RULER](https://arxiv.org/abs/2404.06654) benchmark.
|
307 |
+
|
308 |
+
| Model Name | Acc avg | 4k | 8k | 16k | 32k | 64k | 96k | 128k | 192k | 256k | 384k | 512k | 640k | 768k | 896k | 1000k |
|
309 |
+
|---------------------------------------------|---------|------|------|------|------|------|------|------|------|------|------|------|------|------|------|-------|
|
310 |
+
| Qwen3-30B-A3B (Non-Thinking) | 72.0 | 97.1 | 96.1 | 95.0 | 92.2 | 82.6 | 79.7 | 76.9 | 70.2 | 66.3 | 61.9 | 55.4 | 52.6 | 51.5 | 52.0 | 50.9 |
|
311 |
+
| Qwen3-30B-A3B-Instruct-2507 (Full Attention) | 86.8 | 98.0 | 96.7 | 96.9 | 97.2 | 93.4 | 91.0 | 89.1 | 89.8 | 82.5 | 83.6 | 78.4 | 79.7 | 77.6 | 75.7 | 72.8 |
|
312 |
+
| Qwen3-30B-A3B-Instruct-2507 (Sparse Attention) | 86.8 | 98.0 | 97.1 | 96.3 | 95.1 | 93.6 | 92.5 | 88.1 | 87.7 | 82.9 | 85.7 | 80.7 | 80.0 | 76.9 | 75.5 | 72.2 |
|
313 |
+
|
314 |
+
|
315 |
+
* All models are evaluated with Dual Chunk Attention enabled.
|
316 |
+
* Since the evaluation is time-consuming, we use 260 samples for each length (13 sub-tasks, 20 samples for each).
|
317 |
+
|
318 |
+
## Best Practices
|
319 |
+
|
320 |
+
To achieve optimal performance, we recommend the following settings:
|
321 |
+
|
322 |
+
1. **Sampling Parameters**:
|
323 |
+
- We suggest using `Temperature=0.7`, `TopP=0.8`, `TopK=20`, and `MinP=0`.
|
324 |
+
- For supported frameworks, you can adjust the `presence_penalty` parameter between 0 and 2 to reduce endless repetitions. However, using a higher value may occasionally result in language mixing and a slight decrease in model performance.
|
325 |
+
|
326 |
+
2. **Adequate Output Length**: We recommend using an output length of 16,384 tokens for most queries, which is adequate for instruct models.
|
327 |
+
|
328 |
+
3. **Standardize Output Format**: We recommend using prompts to standardize model outputs when benchmarking.
|
329 |
+
- **Math Problems**: Include "Please reason step by step, and put your final answer within \boxed{}." in the prompt.
|
330 |
+
- **Multiple-Choice Questions**: Add the following JSON structure to the prompt to standardize responses: "Please show your choice in the `answer` field with only the choice letter, e.g., `"answer": "C"`."
|
331 |
+
|
332 |
+
### Citation
|
333 |
+
|
334 |
+
If you find our work helpful, feel free to give us a cite.
|
335 |
+
|
336 |
+
```
|
337 |
+
@misc{qwen3technicalreport,
|
338 |
+
title={Qwen3 Technical Report},
|
339 |
+
author={Qwen Team},
|
340 |
+
year={2025},
|
341 |
+
eprint={2505.09388},
|
342 |
+
archivePrefix={arXiv},
|
343 |
+
primaryClass={cs.CL},
|
344 |
+
url={https://arxiv.org/abs/2505.09388},
|
345 |
+
}
|
346 |
+
```
|
added_tokens.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</think>": 151668,
|
3 |
+
"</tool_call>": 151658,
|
4 |
+
"</tool_response>": 151666,
|
5 |
+
"<think>": 151667,
|
6 |
+
"<tool_call>": 151657,
|
7 |
+
"<tool_response>": 151665,
|
8 |
+
"<|box_end|>": 151649,
|
9 |
+
"<|box_start|>": 151648,
|
10 |
+
"<|endoftext|>": 151643,
|
11 |
+
"<|file_sep|>": 151664,
|
12 |
+
"<|fim_middle|>": 151660,
|
13 |
+
"<|fim_pad|>": 151662,
|
14 |
+
"<|fim_prefix|>": 151659,
|
15 |
+
"<|fim_suffix|>": 151661,
|
16 |
+
"<|im_end|>": 151645,
|
17 |
+
"<|im_start|>": 151644,
|
18 |
+
"<|image_pad|>": 151655,
|
19 |
+
"<|object_ref_end|>": 151647,
|
20 |
+
"<|object_ref_start|>": 151646,
|
21 |
+
"<|quad_end|>": 151651,
|
22 |
+
"<|quad_start|>": 151650,
|
23 |
+
"<|repo_name|>": 151663,
|
24 |
+
"<|video_pad|>": 151656,
|
25 |
+
"<|vision_end|>": 151653,
|
26 |
+
"<|vision_pad|>": 151654,
|
27 |
+
"<|vision_start|>": 151652
|
28 |
+
}
|
chat_template.jinja
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{%- if tools %}
|
2 |
+
{{- '<|im_start|>system\n' }}
|
3 |
+
{%- if messages[0].role == 'system' %}
|
4 |
+
{{- messages[0].content + '\n\n' }}
|
5 |
+
{%- endif %}
|
6 |
+
{{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
|
7 |
+
{%- for tool in tools %}
|
8 |
+
{{- "\n" }}
|
9 |
+
{{- tool | tojson }}
|
10 |
+
{%- endfor %}
|
11 |
+
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
|
12 |
+
{%- else %}
|
13 |
+
{%- if messages[0].role == 'system' %}
|
14 |
+
{{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
|
15 |
+
{%- endif %}
|
16 |
+
{%- endif %}
|
17 |
+
{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
|
18 |
+
{%- for message in messages[::-1] %}
|
19 |
+
{%- set index = (messages|length - 1) - loop.index0 %}
|
20 |
+
{%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
|
21 |
+
{%- set ns.multi_step_tool = false %}
|
22 |
+
{%- set ns.last_query_index = index %}
|
23 |
+
{%- endif %}
|
24 |
+
{%- endfor %}
|
25 |
+
{%- for message in messages %}
|
26 |
+
{%- if message.content is string %}
|
27 |
+
{%- set content = message.content %}
|
28 |
+
{%- else %}
|
29 |
+
{%- set content = '' %}
|
30 |
+
{%- endif %}
|
31 |
+
{%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
|
32 |
+
{{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
|
33 |
+
{%- elif message.role == "assistant" %}
|
34 |
+
{%- set reasoning_content = '' %}
|
35 |
+
{%- if message.reasoning_content is string %}
|
36 |
+
{%- set reasoning_content = message.reasoning_content %}
|
37 |
+
{%- else %}
|
38 |
+
{%- if '</think>' in content %}
|
39 |
+
{%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
|
40 |
+
{%- set content = content.split('</think>')[-1].lstrip('\n') %}
|
41 |
+
{%- endif %}
|
42 |
+
{%- endif %}
|
43 |
+
{%- if loop.index0 > ns.last_query_index %}
|
44 |
+
{%- if loop.last or (not loop.last and reasoning_content) %}
|
45 |
+
{{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
|
46 |
+
{%- else %}
|
47 |
+
{{- '<|im_start|>' + message.role + '\n' + content }}
|
48 |
+
{%- endif %}
|
49 |
+
{%- else %}
|
50 |
+
{{- '<|im_start|>' + message.role + '\n' + content }}
|
51 |
+
{%- endif %}
|
52 |
+
{%- if message.tool_calls %}
|
53 |
+
{%- for tool_call in message.tool_calls %}
|
54 |
+
{%- if (loop.first and content) or (not loop.first) %}
|
55 |
+
{{- '\n' }}
|
56 |
+
{%- endif %}
|
57 |
+
{%- if tool_call.function %}
|
58 |
+
{%- set tool_call = tool_call.function %}
|
59 |
+
{%- endif %}
|
60 |
+
{{- '<tool_call>\n{"name": "' }}
|
61 |
+
{{- tool_call.name }}
|
62 |
+
{{- '", "arguments": ' }}
|
63 |
+
{%- if tool_call.arguments is string %}
|
64 |
+
{{- tool_call.arguments }}
|
65 |
+
{%- else %}
|
66 |
+
{{- tool_call.arguments | tojson }}
|
67 |
+
{%- endif %}
|
68 |
+
{{- '}\n</tool_call>' }}
|
69 |
+
{%- endfor %}
|
70 |
+
{%- endif %}
|
71 |
+
{{- '<|im_end|>\n' }}
|
72 |
+
{%- elif message.role == "tool" %}
|
73 |
+
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
|
74 |
+
{{- '<|im_start|>user' }}
|
75 |
+
{%- endif %}
|
76 |
+
{{- '\n<tool_response>\n' }}
|
77 |
+
{{- content }}
|
78 |
+
{{- '\n</tool_response>' }}
|
79 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
|
80 |
+
{{- '<|im_end|>\n' }}
|
81 |
+
{%- endif %}
|
82 |
+
{%- endif %}
|
83 |
+
{%- endfor %}
|
84 |
+
{%- if add_generation_prompt %}
|
85 |
+
{{- '<|im_start|>assistant\n' }}
|
86 |
+
{%- endif %}
|
config.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"Qwen3MoeForCausalLM"
|
4 |
+
],
|
5 |
+
"attention_bias": false,
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 151643,
|
8 |
+
"decoder_sparse_step": 1,
|
9 |
+
"eos_token_id": 151645,
|
10 |
+
"head_dim": 128,
|
11 |
+
"hidden_act": "silu",
|
12 |
+
"hidden_size": 2048,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 6144,
|
15 |
+
"max_position_embeddings": 262144,
|
16 |
+
"max_window_layers": 48,
|
17 |
+
"mlp_only_layers": [],
|
18 |
+
"model_type": "qwen3_moe",
|
19 |
+
"moe_intermediate_size": 768,
|
20 |
+
"norm_topk_prob": true,
|
21 |
+
"num_attention_heads": 32,
|
22 |
+
"num_experts": 128,
|
23 |
+
"num_experts_per_tok": 8,
|
24 |
+
"num_hidden_layers": 48,
|
25 |
+
"num_key_value_heads": 4,
|
26 |
+
"output_router_logits": false,
|
27 |
+
"rms_norm_eps": 1e-06,
|
28 |
+
"rope_scaling": null,
|
29 |
+
"rope_theta": 10000000,
|
30 |
+
"router_aux_loss_coef": 0.001,
|
31 |
+
"sliding_window": null,
|
32 |
+
"tie_word_embeddings": false,
|
33 |
+
"torch_dtype": "bfloat16",
|
34 |
+
"transformers_version": "4.55.0",
|
35 |
+
"use_cache": true,
|
36 |
+
"use_sliding_window": false,
|
37 |
+
"vocab_size": 151936
|
38 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token_id": 151643,
|
3 |
+
"do_sample": true,
|
4 |
+
"eos_token_id": [
|
5 |
+
151645,
|
6 |
+
151643
|
7 |
+
],
|
8 |
+
"pad_token_id": 151643,
|
9 |
+
"temperature": 0.7,
|
10 |
+
"top_k": 20,
|
11 |
+
"top_p": 0.8,
|
12 |
+
"transformers_version": "4.55.0"
|
13 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model-00001-of-00013.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:620c3a15ac57494f48f7f8b484af40e858ca25941353f051731c105be8138f1d
|
3 |
+
size 4997184968
|
model-00002-of-00013.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d7622e4288cd83ca5b4fa70bd93835e445e2ec5e4d9caf42981905499c745db5
|
3 |
+
size 4997741608
|
model-00003-of-00013.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c0b66273248c3381dd8e85c3fbd7f40a560379b36689e41f458b0ea708fdcf70
|
3 |
+
size 4997742208
|
model-00004-of-00013.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:719470dbe16aa5b4f85c991b98e9d6ddb90452159a5666bec682d69f44ae242e
|
3 |
+
size 4997743184
|
model-00005-of-00013.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e73f25aaf43b895f05f70fa9a97a20830a6cd3c205d910d82c8e87e2d1df9714
|
3 |
+
size 4997743184
|
model-00006-of-00013.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1300381310e23d32c338a6a4af1c44792bea8bd6279042fdcbd37284321db647
|
3 |
+
size 4997743184
|
model-00007-of-00013.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9073df5e4d1cd30a2088153b45a17f2736106923f41af4c158bfef7e08185ca4
|
3 |
+
size 4997743184
|
model-00008-of-00013.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:af7572c4aa4631fa3be3b618e610687ebb6255737de702edb1b902a74a83976f
|
3 |
+
size 4997743184
|
model-00009-of-00013.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7fab737618c8c23352fd9bd563f9fe90f3ba1a2f814966ace5f4c71bfe34acab
|
3 |
+
size 4997743184
|
model-00010-of-00013.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9f2a3d72a7d890a4cf13b80a59041f20d5a6becdd26266d5e4c2eb410918bc8d
|
3 |
+
size 4997743184
|
model-00011-of-00013.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b9806e0389d6a60241978d362ee81b64f02e65abfb74f4f534bc7acb202ac627
|
3 |
+
size 4997743184
|
model-00012-of-00013.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:12c7ffdbccbab6197a7b7481d715cb9bf7c496fbd6653bfe23f3b626a5911b15
|
3 |
+
size 4997743184
|
model-00013-of-00013.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:162d64a195caa8a6154029f5f868e02c17b8823b7d8c2a043242363c120eca00
|
3 |
+
size 1094220288
|
model.safetensors.index.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
special_tokens_map.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|im_start|>",
|
4 |
+
"<|im_end|>",
|
5 |
+
"<|object_ref_start|>",
|
6 |
+
"<|object_ref_end|>",
|
7 |
+
"<|box_start|>",
|
8 |
+
"<|box_end|>",
|
9 |
+
"<|quad_start|>",
|
10 |
+
"<|quad_end|>",
|
11 |
+
"<|vision_start|>",
|
12 |
+
"<|vision_end|>",
|
13 |
+
"<|vision_pad|>",
|
14 |
+
"<|image_pad|>",
|
15 |
+
"<|video_pad|>"
|
16 |
+
],
|
17 |
+
"eos_token": {
|
18 |
+
"content": "<|im_end|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
},
|
24 |
+
"pad_token": {
|
25 |
+
"content": "<|endoftext|>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": false,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
}
|
31 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
|
3 |
+
size 11422654
|
tokenizer_config.json
ADDED
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_prefix_space": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"151643": {
|
6 |
+
"content": "<|endoftext|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"151644": {
|
14 |
+
"content": "<|im_start|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"151645": {
|
22 |
+
"content": "<|im_end|>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
},
|
29 |
+
"151646": {
|
30 |
+
"content": "<|object_ref_start|>",
|
31 |
+
"lstrip": false,
|
32 |
+
"normalized": false,
|
33 |
+
"rstrip": false,
|
34 |
+
"single_word": false,
|
35 |
+
"special": true
|
36 |
+
},
|
37 |
+
"151647": {
|
38 |
+
"content": "<|object_ref_end|>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false,
|
43 |
+
"special": true
|
44 |
+
},
|
45 |
+
"151648": {
|
46 |
+
"content": "<|box_start|>",
|
47 |
+
"lstrip": false,
|
48 |
+
"normalized": false,
|
49 |
+
"rstrip": false,
|
50 |
+
"single_word": false,
|
51 |
+
"special": true
|
52 |
+
},
|
53 |
+
"151649": {
|
54 |
+
"content": "<|box_end|>",
|
55 |
+
"lstrip": false,
|
56 |
+
"normalized": false,
|
57 |
+
"rstrip": false,
|
58 |
+
"single_word": false,
|
59 |
+
"special": true
|
60 |
+
},
|
61 |
+
"151650": {
|
62 |
+
"content": "<|quad_start|>",
|
63 |
+
"lstrip": false,
|
64 |
+
"normalized": false,
|
65 |
+
"rstrip": false,
|
66 |
+
"single_word": false,
|
67 |
+
"special": true
|
68 |
+
},
|
69 |
+
"151651": {
|
70 |
+
"content": "<|quad_end|>",
|
71 |
+
"lstrip": false,
|
72 |
+
"normalized": false,
|
73 |
+
"rstrip": false,
|
74 |
+
"single_word": false,
|
75 |
+
"special": true
|
76 |
+
},
|
77 |
+
"151652": {
|
78 |
+
"content": "<|vision_start|>",
|
79 |
+
"lstrip": false,
|
80 |
+
"normalized": false,
|
81 |
+
"rstrip": false,
|
82 |
+
"single_word": false,
|
83 |
+
"special": true
|
84 |
+
},
|
85 |
+
"151653": {
|
86 |
+
"content": "<|vision_end|>",
|
87 |
+
"lstrip": false,
|
88 |
+
"normalized": false,
|
89 |
+
"rstrip": false,
|
90 |
+
"single_word": false,
|
91 |
+
"special": true
|
92 |
+
},
|
93 |
+
"151654": {
|
94 |
+
"content": "<|vision_pad|>",
|
95 |
+
"lstrip": false,
|
96 |
+
"normalized": false,
|
97 |
+
"rstrip": false,
|
98 |
+
"single_word": false,
|
99 |
+
"special": true
|
100 |
+
},
|
101 |
+
"151655": {
|
102 |
+
"content": "<|image_pad|>",
|
103 |
+
"lstrip": false,
|
104 |
+
"normalized": false,
|
105 |
+
"rstrip": false,
|
106 |
+
"single_word": false,
|
107 |
+
"special": true
|
108 |
+
},
|
109 |
+
"151656": {
|
110 |
+
"content": "<|video_pad|>",
|
111 |
+
"lstrip": false,
|
112 |
+
"normalized": false,
|
113 |
+
"rstrip": false,
|
114 |
+
"single_word": false,
|
115 |
+
"special": true
|
116 |
+
},
|
117 |
+
"151657": {
|
118 |
+
"content": "<tool_call>",
|
119 |
+
"lstrip": false,
|
120 |
+
"normalized": false,
|
121 |
+
"rstrip": false,
|
122 |
+
"single_word": false,
|
123 |
+
"special": false
|
124 |
+
},
|
125 |
+
"151658": {
|
126 |
+
"content": "</tool_call>",
|
127 |
+
"lstrip": false,
|
128 |
+
"normalized": false,
|
129 |
+
"rstrip": false,
|
130 |
+
"single_word": false,
|
131 |
+
"special": false
|
132 |
+
},
|
133 |
+
"151659": {
|
134 |
+
"content": "<|fim_prefix|>",
|
135 |
+
"lstrip": false,
|
136 |
+
"normalized": false,
|
137 |
+
"rstrip": false,
|
138 |
+
"single_word": false,
|
139 |
+
"special": false
|
140 |
+
},
|
141 |
+
"151660": {
|
142 |
+
"content": "<|fim_middle|>",
|
143 |
+
"lstrip": false,
|
144 |
+
"normalized": false,
|
145 |
+
"rstrip": false,
|
146 |
+
"single_word": false,
|
147 |
+
"special": false
|
148 |
+
},
|
149 |
+
"151661": {
|
150 |
+
"content": "<|fim_suffix|>",
|
151 |
+
"lstrip": false,
|
152 |
+
"normalized": false,
|
153 |
+
"rstrip": false,
|
154 |
+
"single_word": false,
|
155 |
+
"special": false
|
156 |
+
},
|
157 |
+
"151662": {
|
158 |
+
"content": "<|fim_pad|>",
|
159 |
+
"lstrip": false,
|
160 |
+
"normalized": false,
|
161 |
+
"rstrip": false,
|
162 |
+
"single_word": false,
|
163 |
+
"special": false
|
164 |
+
},
|
165 |
+
"151663": {
|
166 |
+
"content": "<|repo_name|>",
|
167 |
+
"lstrip": false,
|
168 |
+
"normalized": false,
|
169 |
+
"rstrip": false,
|
170 |
+
"single_word": false,
|
171 |
+
"special": false
|
172 |
+
},
|
173 |
+
"151664": {
|
174 |
+
"content": "<|file_sep|>",
|
175 |
+
"lstrip": false,
|
176 |
+
"normalized": false,
|
177 |
+
"rstrip": false,
|
178 |
+
"single_word": false,
|
179 |
+
"special": false
|
180 |
+
},
|
181 |
+
"151665": {
|
182 |
+
"content": "<tool_response>",
|
183 |
+
"lstrip": false,
|
184 |
+
"normalized": false,
|
185 |
+
"rstrip": false,
|
186 |
+
"single_word": false,
|
187 |
+
"special": false
|
188 |
+
},
|
189 |
+
"151666": {
|
190 |
+
"content": "</tool_response>",
|
191 |
+
"lstrip": false,
|
192 |
+
"normalized": false,
|
193 |
+
"rstrip": false,
|
194 |
+
"single_word": false,
|
195 |
+
"special": false
|
196 |
+
},
|
197 |
+
"151667": {
|
198 |
+
"content": "<think>",
|
199 |
+
"lstrip": false,
|
200 |
+
"normalized": false,
|
201 |
+
"rstrip": false,
|
202 |
+
"single_word": false,
|
203 |
+
"special": false
|
204 |
+
},
|
205 |
+
"151668": {
|
206 |
+
"content": "</think>",
|
207 |
+
"lstrip": false,
|
208 |
+
"normalized": false,
|
209 |
+
"rstrip": false,
|
210 |
+
"single_word": false,
|
211 |
+
"special": false
|
212 |
+
}
|
213 |
+
},
|
214 |
+
"additional_special_tokens": [
|
215 |
+
"<|im_start|>",
|
216 |
+
"<|im_end|>",
|
217 |
+
"<|object_ref_start|>",
|
218 |
+
"<|object_ref_end|>",
|
219 |
+
"<|box_start|>",
|
220 |
+
"<|box_end|>",
|
221 |
+
"<|quad_start|>",
|
222 |
+
"<|quad_end|>",
|
223 |
+
"<|vision_start|>",
|
224 |
+
"<|vision_end|>",
|
225 |
+
"<|vision_pad|>",
|
226 |
+
"<|image_pad|>",
|
227 |
+
"<|video_pad|>"
|
228 |
+
],
|
229 |
+
"bos_token": null,
|
230 |
+
"clean_up_tokenization_spaces": false,
|
231 |
+
"eos_token": "<|im_end|>",
|
232 |
+
"errors": "replace",
|
233 |
+
"extra_special_tokens": {},
|
234 |
+
"model_max_length": 1010000,
|
235 |
+
"pad_token": "<|endoftext|>",
|
236 |
+
"padding_side": "left",
|
237 |
+
"split_special_tokens": false,
|
238 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
239 |
+
"unk_token": null
|
240 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|