Commit
·
61d3fe4
0
Parent(s):
Reinitialized repo with Deepseek R1 changes
Browse files- .gitattributes +35 -0
- LICENSE +21 -0
- README.md +239 -0
- config.json +70 -0
- configuration_deepseek.py +210 -0
- figures/benchmark.jpg +0 -0
- generation_config.json +9 -0
- model-00039-of-000163.safetensors +3 -0
- model.safetensors.index.json +0 -0
- modeling_deepseek.py +1717 -0
- tokenizer.json +0 -0
- tokenizer_config.json +35 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 DeepSeek
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
ADDED
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: mit
|
3 |
+
library_name: transformers
|
4 |
+
---
|
5 |
+
# DeepSeek-R1
|
6 |
+
<!-- markdownlint-disable first-line-h1 -->
|
7 |
+
<!-- markdownlint-disable html -->
|
8 |
+
<!-- markdownlint-disable no-duplicate-header -->
|
9 |
+
|
10 |
+
<div align="center">
|
11 |
+
<img src="https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/logo.svg?raw=true" width="60%" alt="DeepSeek-V3" />
|
12 |
+
</div>
|
13 |
+
<hr>
|
14 |
+
<div align="center" style="line-height: 1;">
|
15 |
+
<a href="https://www.deepseek.com/" target="_blank" style="margin: 2px;">
|
16 |
+
<img alt="Homepage" src="https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/badge.svg?raw=true" style="display: inline-block; vertical-align: middle;"/>
|
17 |
+
</a>
|
18 |
+
<a href="https://chat.deepseek.com/" target="_blank" style="margin: 2px;">
|
19 |
+
<img alt="Chat" src="https://img.shields.io/badge/🤖%20Chat-DeepSeek%20R1-536af5?color=536af5&logoColor=white" style="display: inline-block; vertical-align: middle;"/>
|
20 |
+
</a>
|
21 |
+
<a href="https://huggingface.co/deepseek-ai" target="_blank" style="margin: 2px;">
|
22 |
+
<img alt="Hugging Face" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-DeepSeek%20AI-ffc107?color=ffc107&logoColor=white" style="display: inline-block; vertical-align: middle;"/>
|
23 |
+
</a>
|
24 |
+
</div>
|
25 |
+
|
26 |
+
<div align="center" style="line-height: 1;">
|
27 |
+
<a href="https://discord.gg/Tc7c45Zzu5" target="_blank" style="margin: 2px;">
|
28 |
+
<img alt="Discord" src="https://img.shields.io/badge/Discord-DeepSeek%20AI-7289da?logo=discord&logoColor=white&color=7289da" style="display: inline-block; vertical-align: middle;"/>
|
29 |
+
</a>
|
30 |
+
<a href="https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/qr.jpeg?raw=true" target="_blank" style="margin: 2px;">
|
31 |
+
<img alt="Wechat" src="https://img.shields.io/badge/WeChat-DeepSeek%20AI-brightgreen?logo=wechat&logoColor=white" style="display: inline-block; vertical-align: middle;"/>
|
32 |
+
</a>
|
33 |
+
<a href="https://twitter.com/deepseek_ai" target="_blank" style="margin: 2px;">
|
34 |
+
<img alt="Twitter Follow" src="https://img.shields.io/badge/Twitter-deepseek_ai-white?logo=x&logoColor=white" style="display: inline-block; vertical-align: middle;"/>
|
35 |
+
</a>
|
36 |
+
</div>
|
37 |
+
|
38 |
+
<div align="center" style="line-height: 1;">
|
39 |
+
<a href="https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE" style="margin: 2px;">
|
40 |
+
<img alt="License" src="https://img.shields.io/badge/License-MIT-f5de53?&color=f5de53" style="display: inline-block; vertical-align: middle;"/>
|
41 |
+
</a>
|
42 |
+
</div>
|
43 |
+
|
44 |
+
|
45 |
+
<p align="center">
|
46 |
+
<a href="https://github.com/deepseek-ai/DeepSeek-R1/blob/main/DeepSeek_R1.pdf"><b>Paper Link</b>👁️</a>
|
47 |
+
</p>
|
48 |
+
|
49 |
+
|
50 |
+
## 1. Introduction
|
51 |
+
|
52 |
+
We introduce our first-generation reasoning models, DeepSeek-R1-Zero and DeepSeek-R1.
|
53 |
+
DeepSeek-R1-Zero, a model trained via large-scale reinforcement learning (RL) without supervised fine-tuning (SFT) as a preliminary step, demonstrated remarkable performance on reasoning.
|
54 |
+
With RL, DeepSeek-R1-Zero naturally emerged with numerous powerful and interesting reasoning behaviors.
|
55 |
+
However, DeepSeek-R1-Zero encounters challenges such as endless repetition, poor readability, and language mixing. To address these issues and further enhance reasoning performance,
|
56 |
+
we introduce DeepSeek-R1, which incorporates cold-start data before RL.
|
57 |
+
DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks.
|
58 |
+
To support the research community, we have open-sourced DeepSeek-R1-Zero, DeepSeek-R1, and six dense models distilled from DeepSeek-R1 based on Llama and Qwen. DeepSeek-R1-Distill-Qwen-32B outperforms OpenAI-o1-mini across various benchmarks, achieving new state-of-the-art results for dense models.
|
59 |
+
|
60 |
+
**NOTE: Before running DeepSeek-R1 series models locally, we kindly recommend reviewing the [Usage Recommendation](#usage-recommendations) section.**
|
61 |
+
|
62 |
+
<p align="center">
|
63 |
+
<img width="80%" src="figures/benchmark.jpg">
|
64 |
+
</p>
|
65 |
+
|
66 |
+
## 2. Model Summary
|
67 |
+
|
68 |
+
---
|
69 |
+
|
70 |
+
**Post-Training: Large-Scale Reinforcement Learning on the Base Model**
|
71 |
+
|
72 |
+
- We directly apply reinforcement learning (RL) to the base model without relying on supervised fine-tuning (SFT) as a preliminary step. This approach allows the model to explore chain-of-thought (CoT) for solving complex problems, resulting in the development of DeepSeek-R1-Zero. DeepSeek-R1-Zero demonstrates capabilities such as self-verification, reflection, and generating long CoTs, marking a significant milestone for the research community. Notably, it is the first open research to validate that reasoning capabilities of LLMs can be incentivized purely through RL, without the need for SFT. This breakthrough paves the way for future advancements in this area.
|
73 |
+
|
74 |
+
- We introduce our pipeline to develop DeepSeek-R1. The pipeline incorporates two RL stages aimed at discovering improved reasoning patterns and aligning with human preferences, as well as two SFT stages that serve as the seed for the model's reasoning and non-reasoning capabilities.
|
75 |
+
We believe the pipeline will benefit the industry by creating better models.
|
76 |
+
|
77 |
+
---
|
78 |
+
|
79 |
+
**Distillation: Smaller Models Can Be Powerful Too**
|
80 |
+
|
81 |
+
- We demonstrate that the reasoning patterns of larger models can be distilled into smaller models, resulting in better performance compared to the reasoning patterns discovered through RL on small models. The open source DeepSeek-R1, as well as its API, will benefit the research community to distill better smaller models in the future.
|
82 |
+
- Using the reasoning data generated by DeepSeek-R1, we fine-tuned several dense models that are widely used in the research community. The evaluation results demonstrate that the distilled smaller dense models perform exceptionally well on benchmarks. We open-source distilled 1.5B, 7B, 8B, 14B, 32B, and 70B checkpoints based on Qwen2.5 and Llama3 series to the community.
|
83 |
+
|
84 |
+
## 3. Model Downloads
|
85 |
+
|
86 |
+
### DeepSeek-R1 Models
|
87 |
+
|
88 |
+
<div align="center">
|
89 |
+
|
90 |
+
| **Model** | **#Total Params** | **#Activated Params** | **Context Length** | **Download** |
|
91 |
+
| :------------: | :------------: | :------------: | :------------: | :------------: |
|
92 |
+
| DeepSeek-R1-Zero | 671B | 37B | 128K | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1-Zero) |
|
93 |
+
| DeepSeek-R1 | 671B | 37B | 128K | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1) |
|
94 |
+
|
95 |
+
</div>
|
96 |
+
|
97 |
+
DeepSeek-R1-Zero & DeepSeek-R1 are trained based on DeepSeek-V3-Base.
|
98 |
+
For more details regarding the model architecture, please refer to [DeepSeek-V3](https://github.com/deepseek-ai/DeepSeek-V3) repository.
|
99 |
+
|
100 |
+
### DeepSeek-R1-Distill Models
|
101 |
+
|
102 |
+
<div align="center">
|
103 |
+
|
104 |
+
| **Model** | **Base Model** | **Download** |
|
105 |
+
| :------------: | :------------: | :------------: |
|
106 |
+
| DeepSeek-R1-Distill-Qwen-1.5B | [Qwen2.5-Math-1.5B](https://huggingface.co/Qwen/Qwen2.5-Math-1.5B) | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B) |
|
107 |
+
| DeepSeek-R1-Distill-Qwen-7B | [Qwen2.5-Math-7B](https://huggingface.co/Qwen/Qwen2.5-Math-7B) | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B) |
|
108 |
+
| DeepSeek-R1-Distill-Llama-8B | [Llama-3.1-8B](https://huggingface.co/meta-llama/Llama-3.1-8B) | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B) |
|
109 |
+
| DeepSeek-R1-Distill-Qwen-14B | [Qwen2.5-14B](https://huggingface.co/Qwen/Qwen2.5-14B) | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B) |
|
110 |
+
|DeepSeek-R1-Distill-Qwen-32B | [Qwen2.5-32B](https://huggingface.co/Qwen/Qwen2.5-32B) | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B) |
|
111 |
+
| DeepSeek-R1-Distill-Llama-70B | [Llama-3.3-70B-Instruct](https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct) | [🤗 HuggingFace](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B) |
|
112 |
+
|
113 |
+
</div>
|
114 |
+
|
115 |
+
DeepSeek-R1-Distill models are fine-tuned based on open-source models, using samples generated by DeepSeek-R1.
|
116 |
+
We slightly change their configs and tokenizers. Please use our setting to run these models.
|
117 |
+
|
118 |
+
## 4. Evaluation Results
|
119 |
+
|
120 |
+
### DeepSeek-R1-Evaluation
|
121 |
+
For all our models, the maximum generation length is set to 32,768 tokens. For benchmarks requiring sampling, we use a temperature of $0.6$, a top-p value of $0.95$, and generate 64 responses per query to estimate pass@1.
|
122 |
+
<div align="center">
|
123 |
+
|
124 |
+
|
125 |
+
| Category | Benchmark (Metric) | Claude-3.5-Sonnet-1022 | GPT-4o 0513 | DeepSeek V3 | OpenAI o1-mini | OpenAI o1-1217 | DeepSeek R1 |
|
126 |
+
|----------|-------------------|----------------------|------------|--------------|----------------|------------|--------------|
|
127 |
+
| | Architecture | - | - | MoE | - | - | MoE |
|
128 |
+
| | # Activated Params | - | - | 37B | - | - | 37B |
|
129 |
+
| | # Total Params | - | - | 671B | - | - | 671B |
|
130 |
+
| English | MMLU (Pass@1) | 88.3 | 87.2 | 88.5 | 85.2 | **91.8** | 90.8 |
|
131 |
+
| | MMLU-Redux (EM) | 88.9 | 88.0 | 89.1 | 86.7 | - | **92.9** |
|
132 |
+
| | MMLU-Pro (EM) | 78.0 | 72.6 | 75.9 | 80.3 | - | **84.0** |
|
133 |
+
| | DROP (3-shot F1) | 88.3 | 83.7 | 91.6 | 83.9 | 90.2 | **92.2** |
|
134 |
+
| | IF-Eval (Prompt Strict) | **86.5** | 84.3 | 86.1 | 84.8 | - | 83.3 |
|
135 |
+
| | GPQA-Diamond (Pass@1) | 65.0 | 49.9 | 59.1 | 60.0 | **75.7** | 71.5 |
|
136 |
+
| | SimpleQA (Correct) | 28.4 | 38.2 | 24.9 | 7.0 | **47.0** | 30.1 |
|
137 |
+
| | FRAMES (Acc.) | 72.5 | 80.5 | 73.3 | 76.9 | - | **82.5** |
|
138 |
+
| | AlpacaEval2.0 (LC-winrate) | 52.0 | 51.1 | 70.0 | 57.8 | - | **87.6** |
|
139 |
+
| | ArenaHard (GPT-4-1106) | 85.2 | 80.4 | 85.5 | 92.0 | - | **92.3** |
|
140 |
+
| Code | LiveCodeBench (Pass@1-COT) | 33.8 | 34.2 | - | 53.8 | 63.4 | **65.9** |
|
141 |
+
| | Codeforces (Percentile) | 20.3 | 23.6 | 58.7 | 93.4 | **96.6** | 96.3 |
|
142 |
+
| | Codeforces (Rating) | 717 | 759 | 1134 | 1820 | **2061** | 2029 |
|
143 |
+
| | SWE Verified (Resolved) | **50.8** | 38.8 | 42.0 | 41.6 | 48.9 | 49.2 |
|
144 |
+
| | Aider-Polyglot (Acc.) | 45.3 | 16.0 | 49.6 | 32.9 | **61.7** | 53.3 |
|
145 |
+
| Math | AIME 2024 (Pass@1) | 16.0 | 9.3 | 39.2 | 63.6 | 79.2 | **79.8** |
|
146 |
+
| | MATH-500 (Pass@1) | 78.3 | 74.6 | 90.2 | 90.0 | 96.4 | **97.3** |
|
147 |
+
| | CNMO 2024 (Pass@1) | 13.1 | 10.8 | 43.2 | 67.6 | - | **78.8** |
|
148 |
+
| Chinese | CLUEWSC (EM) | 85.4 | 87.9 | 90.9 | 89.9 | - | **92.8** |
|
149 |
+
| | C-Eval (EM) | 76.7 | 76.0 | 86.5 | 68.9 | - | **91.8** |
|
150 |
+
| | C-SimpleQA (Correct) | 55.4 | 58.7 | **68.0** | 40.3 | - | 63.7 |
|
151 |
+
|
152 |
+
</div>
|
153 |
+
|
154 |
+
|
155 |
+
### Distilled Model Evaluation
|
156 |
+
|
157 |
+
|
158 |
+
<div align="center">
|
159 |
+
|
160 |
+
| Model | AIME 2024 pass@1 | AIME 2024 cons@64 | MATH-500 pass@1 | GPQA Diamond pass@1 | LiveCodeBench pass@1 | CodeForces rating |
|
161 |
+
|------------------------------------------|------------------|-------------------|-----------------|----------------------|----------------------|-------------------|
|
162 |
+
| GPT-4o-0513 | 9.3 | 13.4 | 74.6 | 49.9 | 32.9 | 759 |
|
163 |
+
| Claude-3.5-Sonnet-1022 | 16.0 | 26.7 | 78.3 | 65.0 | 38.9 | 717 |
|
164 |
+
| o1-mini | 63.6 | 80.0 | 90.0 | 60.0 | 53.8 | **1820** |
|
165 |
+
| QwQ-32B-Preview | 44.0 | 60.0 | 90.6 | 54.5 | 41.9 | 1316 |
|
166 |
+
| DeepSeek-R1-Distill-Qwen-1.5B | 28.9 | 52.7 | 83.9 | 33.8 | 16.9 | 954 |
|
167 |
+
| DeepSeek-R1-Distill-Qwen-7B | 55.5 | 83.3 | 92.8 | 49.1 | 37.6 | 1189 |
|
168 |
+
| DeepSeek-R1-Distill-Qwen-14B | 69.7 | 80.0 | 93.9 | 59.1 | 53.1 | 1481 |
|
169 |
+
| DeepSeek-R1-Distill-Qwen-32B | **72.6** | 83.3 | 94.3 | 62.1 | 57.2 | 1691 |
|
170 |
+
| DeepSeek-R1-Distill-Llama-8B | 50.4 | 80.0 | 89.1 | 49.0 | 39.6 | 1205 |
|
171 |
+
| DeepSeek-R1-Distill-Llama-70B | 70.0 | **86.7** | **94.5** | **65.2** | **57.5** | 1633 |
|
172 |
+
|
173 |
+
</div>
|
174 |
+
|
175 |
+
|
176 |
+
## 5. Chat Website & API Platform
|
177 |
+
You can chat with DeepSeek-R1 on DeepSeek's official website: [chat.deepseek.com](https://chat.deepseek.com), and switch on the button "DeepThink"
|
178 |
+
|
179 |
+
We also provide OpenAI-Compatible API at DeepSeek Platform: [platform.deepseek.com](https://platform.deepseek.com/)
|
180 |
+
|
181 |
+
## 6. How to Run Locally
|
182 |
+
|
183 |
+
### DeepSeek-R1 Models
|
184 |
+
|
185 |
+
Please visit [DeepSeek-V3](https://github.com/deepseek-ai/DeepSeek-V3) repo for more information about running DeepSeek-R1 locally.
|
186 |
+
|
187 |
+
**NOTE: Hugging Face's Transformers has not been directly supported yet.**
|
188 |
+
|
189 |
+
### DeepSeek-R1-Distill Models
|
190 |
+
|
191 |
+
DeepSeek-R1-Distill models can be utilized in the same manner as Qwen or Llama models.
|
192 |
+
|
193 |
+
For instance, you can easily start a service using [vLLM](https://github.com/vllm-project/vllm):
|
194 |
+
|
195 |
+
```shell
|
196 |
+
vllm serve deepseek-ai/DeepSeek-R1-Distill-Qwen-32B --tensor-parallel-size 2 --max-model-len 32768 --enforce-eager
|
197 |
+
```
|
198 |
+
|
199 |
+
You can also easily start a service using [SGLang](https://github.com/sgl-project/sglang)
|
200 |
+
|
201 |
+
```bash
|
202 |
+
python3 -m sglang.launch_server --model deepseek-ai/DeepSeek-R1-Distill-Qwen-32B --trust-remote-code --tp 2
|
203 |
+
```
|
204 |
+
|
205 |
+
### Usage Recommendations
|
206 |
+
|
207 |
+
**We recommend adhering to the following configurations when utilizing the DeepSeek-R1 series models, including benchmarking, to achieve the expected performance:**
|
208 |
+
|
209 |
+
1. Set the temperature within the range of 0.5-0.7 (0.6 is recommended) to prevent endless repetitions or incoherent outputs.
|
210 |
+
2. **Avoid adding a system prompt; all instructions should be contained within the user prompt.**
|
211 |
+
3. For mathematical problems, it is advisable to include a directive in your prompt such as: "Please reason step by step, and put your final answer within \boxed{}."
|
212 |
+
4. When evaluating model performance, it is recommended to conduct multiple tests and average the results.
|
213 |
+
|
214 |
+
Additionally, we have observed that the DeepSeek-R1 series models tend to bypass thinking pattern (i.e., outputting "\<think\>\n\n\</think\>") when responding to certain queries, which can adversely affect the model's performance.
|
215 |
+
**To ensure that the model engages in thorough reasoning, we recommend enforcing the model to initiate its response with "\<think\>\n" at the beginning of every output.**
|
216 |
+
|
217 |
+
## 7. License
|
218 |
+
This code repository and the model weights are licensed under the [MIT License](https://github.com/deepseek-ai/DeepSeek-R1/blob/main/LICENSE).
|
219 |
+
DeepSeek-R1 series support commercial use, allow for any modifications and derivative works, including, but not limited to, distillation for training other LLMs. Please note that:
|
220 |
+
- DeepSeek-R1-Distill-Qwen-1.5B, DeepSeek-R1-Distill-Qwen-7B, DeepSeek-R1-Distill-Qwen-14B and DeepSeek-R1-Distill-Qwen-32B are derived from [Qwen-2.5 series](https://github.com/QwenLM/Qwen2.5), which are originally licensed under [Apache 2.0 License](https://huggingface.co/Qwen/Qwen2.5-1.5B/blob/main/LICENSE), and now finetuned with 800k samples curated with DeepSeek-R1.
|
221 |
+
- DeepSeek-R1-Distill-Llama-8B is derived from Llama3.1-8B-Base and is originally licensed under [llama3.1 license](https://huggingface.co/meta-llama/Llama-3.1-8B/blob/main/LICENSE).
|
222 |
+
- DeepSeek-R1-Distill-Llama-70B is derived from Llama3.3-70B-Instruct and is originally licensed under [llama3.3 license](https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct/blob/main/LICENSE).
|
223 |
+
|
224 |
+
## 8. Citation
|
225 |
+
```
|
226 |
+
@misc{deepseekai2025deepseekr1incentivizingreasoningcapability,
|
227 |
+
title={DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning},
|
228 |
+
author={DeepSeek-AI},
|
229 |
+
year={2025},
|
230 |
+
eprint={2501.12948},
|
231 |
+
archivePrefix={arXiv},
|
232 |
+
primaryClass={cs.CL},
|
233 |
+
url={https://arxiv.org/abs/2501.12948},
|
234 |
+
}
|
235 |
+
|
236 |
+
```
|
237 |
+
|
238 |
+
## 9. Contact
|
239 |
+
If you have any questions, please raise an issue or contact us at [[email protected]]([email protected]).
|
config.json
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"DeepseekV3ForCausalLM"
|
4 |
+
],
|
5 |
+
"attention_bias": false,
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"auto_map": {
|
8 |
+
"AutoConfig": "configuration_deepseek.DeepseekV3Config",
|
9 |
+
"AutoModel": "modeling_deepseek.DeepseekV3Model",
|
10 |
+
"AutoModelForCausalLM": "modeling_deepseek.DeepseekV3ForCausalLM"
|
11 |
+
},
|
12 |
+
"aux_loss_alpha": 0.001,
|
13 |
+
"bos_token_id": 0,
|
14 |
+
"eos_token_id": 1,
|
15 |
+
"ep_size": 1,
|
16 |
+
"first_k_dense_replace": 3,
|
17 |
+
"hidden_act": "silu",
|
18 |
+
"hidden_size": 7168,
|
19 |
+
"initializer_range": 0.02,
|
20 |
+
"intermediate_size": 18432,
|
21 |
+
"kv_lora_rank": 512,
|
22 |
+
"max_position_embeddings": 163840,
|
23 |
+
"model_type": "deepseek_v3",
|
24 |
+
"moe_intermediate_size": 2048,
|
25 |
+
"moe_layer_freq": 1,
|
26 |
+
"n_group": 8,
|
27 |
+
"n_routed_experts": 256,
|
28 |
+
"n_shared_experts": 1,
|
29 |
+
"norm_topk_prob": true,
|
30 |
+
"num_attention_heads": 128,
|
31 |
+
"num_experts_per_tok": 8,
|
32 |
+
"num_hidden_layers": 61,
|
33 |
+
"num_key_value_heads": 128,
|
34 |
+
"num_nextn_predict_layers": 1,
|
35 |
+
"pretraining_tp": 1,
|
36 |
+
"q_lora_rank": 1536,
|
37 |
+
"qk_nope_head_dim": 128,
|
38 |
+
"qk_rope_head_dim": 64,
|
39 |
+
"quantization_config": {
|
40 |
+
"activation_scheme": "dynamic",
|
41 |
+
"fmt": "e4m3",
|
42 |
+
"quant_method": "fp8",
|
43 |
+
"weight_block_size": [
|
44 |
+
128,
|
45 |
+
128
|
46 |
+
]
|
47 |
+
},
|
48 |
+
"rms_norm_eps": 1e-06,
|
49 |
+
"rope_scaling": {
|
50 |
+
"beta_fast": 32,
|
51 |
+
"beta_slow": 1,
|
52 |
+
"factor": 40,
|
53 |
+
"mscale": 1.0,
|
54 |
+
"mscale_all_dim": 1.0,
|
55 |
+
"original_max_position_embeddings": 4096,
|
56 |
+
"type": "yarn"
|
57 |
+
},
|
58 |
+
"rope_theta": 10000,
|
59 |
+
"routed_scaling_factor": 2.5,
|
60 |
+
"scoring_func": "sigmoid",
|
61 |
+
"seq_aux": true,
|
62 |
+
"tie_word_embeddings": false,
|
63 |
+
"topk_group": 4,
|
64 |
+
"topk_method": "noaux_tc",
|
65 |
+
"torch_dtype": "bfloat16",
|
66 |
+
"transformers_version": "4.46.3",
|
67 |
+
"use_cache": true,
|
68 |
+
"v_head_dim": 128,
|
69 |
+
"vocab_size": 129280
|
70 |
+
}
|
configuration_deepseek.py
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers.configuration_utils import PretrainedConfig
|
2 |
+
from transformers.utils import logging
|
3 |
+
|
4 |
+
logger = logging.get_logger(__name__)
|
5 |
+
|
6 |
+
DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
|
7 |
+
class DeepseekV3Config(PretrainedConfig):
|
8 |
+
r"""
|
9 |
+
This is the configuration class to store the configuration of a [`DeepseekV3Model`]. It is used to instantiate an DeepSeek
|
10 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
11 |
+
defaults will yield a similar configuration to that of the DeepSeek-V3.
|
12 |
+
|
13 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
14 |
+
documentation from [`PretrainedConfig`] for more information.
|
15 |
+
|
16 |
+
|
17 |
+
Args:
|
18 |
+
vocab_size (`int`, *optional*, defaults to 129280):
|
19 |
+
Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
|
20 |
+
`inputs_ids` passed when calling [`DeepseekV3Model`]
|
21 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
22 |
+
Dimension of the hidden representations.
|
23 |
+
intermediate_size (`int`, *optional*, defaults to 11008):
|
24 |
+
Dimension of the MLP representations.
|
25 |
+
moe_intermediate_size (`int`, *optional*, defaults to 1407):
|
26 |
+
Dimension of the MoE representations.
|
27 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
28 |
+
Number of hidden layers in the Transformer decoder.
|
29 |
+
num_nextn_predict_layers (`int`, *optional*, defaults to 1):
|
30 |
+
Number of nextn predict layers in the DeepSeekV3 Model.
|
31 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
32 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
33 |
+
n_shared_experts (`int`, *optional*, defaults to None):
|
34 |
+
Number of shared experts, None means dense model.
|
35 |
+
n_routed_experts (`int`, *optional*, defaults to None):
|
36 |
+
Number of routed experts, None means dense model.
|
37 |
+
routed_scaling_factor (`float`, *optional*, defaults to 1.0):
|
38 |
+
Scaling factor or routed experts.
|
39 |
+
topk_method (`str`, *optional*, defaults to `gready`):
|
40 |
+
Topk method used in routed gate.
|
41 |
+
n_group (`int`, *optional*, defaults to None):
|
42 |
+
Number of groups for routed experts.
|
43 |
+
topk_group (`int`, *optional*, defaults to None):
|
44 |
+
Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
|
45 |
+
num_experts_per_tok (`int`, *optional*, defaults to None):
|
46 |
+
Number of selected experts, None means dense model.
|
47 |
+
moe_layer_freq (`int`, *optional*, defaults to 1):
|
48 |
+
The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
|
49 |
+
first_k_dense_replace (`int`, *optional*, defaults to 0):
|
50 |
+
Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
|
51 |
+
\--k dense layers--/
|
52 |
+
norm_topk_prob (`bool`, *optional*, defaults to False):
|
53 |
+
Whether to normalize the weights of the routed experts.
|
54 |
+
scoring_func (`str`, *optional*, defaults to 'softmax'):
|
55 |
+
Method of computing expert weights.
|
56 |
+
aux_loss_alpha (`float`, *optional*, defaults to 0.001):
|
57 |
+
Auxiliary loss weight coefficient.
|
58 |
+
seq_aux = (`bool`, *optional*, defaults to True):
|
59 |
+
Whether to compute the auxiliary loss for each individual sample.
|
60 |
+
num_key_value_heads (`int`, *optional*):
|
61 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
62 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
63 |
+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
64 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
65 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
66 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
67 |
+
`num_attention_heads`.
|
68 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
69 |
+
The non-linear activation function (function or string) in the decoder.
|
70 |
+
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
71 |
+
The maximum sequence length that this model might ever be used with.
|
72 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
73 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
74 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
|
75 |
+
The epsilon used by the rms normalization layers.
|
76 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
77 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
78 |
+
relevant if `config.is_decoder=True`.
|
79 |
+
pad_token_id (`int`, *optional*):
|
80 |
+
Padding token id.
|
81 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
82 |
+
Beginning of stream token id.
|
83 |
+
eos_token_id (`int`, *optional*, defaults to 2):
|
84 |
+
End of stream token id.
|
85 |
+
pretraining_tp (`int`, *optional*, defaults to 1):
|
86 |
+
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
|
87 |
+
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
|
88 |
+
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
|
89 |
+
issue](https://github.com/pytorch/pytorch/issues/76232).
|
90 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
91 |
+
Whether to tie weight embeddings
|
92 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
93 |
+
The base period of the RoPE embeddings.
|
94 |
+
rope_scaling (`Dict`, *optional*):
|
95 |
+
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
|
96 |
+
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
|
97 |
+
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
|
98 |
+
`max_position_embeddings` to the expected new maximum.
|
99 |
+
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
|
100 |
+
Whether to use a bias in the query, key, value and output projection layers during self-attention.
|
101 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
102 |
+
The dropout ratio for the attention probabilities.
|
103 |
+
|
104 |
+
```python
|
105 |
+
>>> from transformers import DeepseekV3Model, DeepseekV3Config
|
106 |
+
|
107 |
+
>>> # Initializing a Deepseek-V3 style configuration
|
108 |
+
>>> configuration = DeepseekV3Config()
|
109 |
+
|
110 |
+
>>> # Accessing the model configuration
|
111 |
+
>>> configuration = model.config
|
112 |
+
```"""
|
113 |
+
|
114 |
+
model_type = "deepseek_v3"
|
115 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
116 |
+
|
117 |
+
def __init__(
|
118 |
+
self,
|
119 |
+
vocab_size=129280,
|
120 |
+
hidden_size=7168,
|
121 |
+
intermediate_size=18432,
|
122 |
+
moe_intermediate_size = 2048,
|
123 |
+
num_hidden_layers=61,
|
124 |
+
num_nextn_predict_layers=1,
|
125 |
+
num_attention_heads=128,
|
126 |
+
num_key_value_heads=128,
|
127 |
+
n_shared_experts = 1,
|
128 |
+
n_routed_experts = 256,
|
129 |
+
ep_size = 1,
|
130 |
+
routed_scaling_factor = 2.5,
|
131 |
+
kv_lora_rank = 512,
|
132 |
+
q_lora_rank = 1536,
|
133 |
+
qk_rope_head_dim = 64,
|
134 |
+
v_head_dim = 128,
|
135 |
+
qk_nope_head_dim = 128,
|
136 |
+
topk_method = 'noaux_tc',
|
137 |
+
n_group = 8,
|
138 |
+
topk_group = 4,
|
139 |
+
num_experts_per_tok = 8,
|
140 |
+
moe_layer_freq = 1,
|
141 |
+
first_k_dense_replace = 3,
|
142 |
+
norm_topk_prob = True,
|
143 |
+
scoring_func = 'sigmoid',
|
144 |
+
aux_loss_alpha = 0.001,
|
145 |
+
seq_aux = True,
|
146 |
+
hidden_act="silu",
|
147 |
+
max_position_embeddings=4096,
|
148 |
+
initializer_range=0.02,
|
149 |
+
rms_norm_eps=1e-6,
|
150 |
+
use_cache=True,
|
151 |
+
pad_token_id=None,
|
152 |
+
bos_token_id=0,
|
153 |
+
eos_token_id=1,
|
154 |
+
pretraining_tp=1,
|
155 |
+
tie_word_embeddings=False,
|
156 |
+
rope_theta=10000.0,
|
157 |
+
rope_scaling=None,
|
158 |
+
attention_bias=False,
|
159 |
+
attention_dropout=0.0,
|
160 |
+
**kwargs,
|
161 |
+
):
|
162 |
+
self.vocab_size = vocab_size
|
163 |
+
self.max_position_embeddings = max_position_embeddings
|
164 |
+
self.hidden_size = hidden_size
|
165 |
+
self.intermediate_size = intermediate_size
|
166 |
+
self.moe_intermediate_size = moe_intermediate_size
|
167 |
+
self.num_hidden_layers = num_hidden_layers
|
168 |
+
self.num_nextn_predict_layers = num_nextn_predict_layers
|
169 |
+
self.num_attention_heads = num_attention_heads
|
170 |
+
self.n_shared_experts = n_shared_experts
|
171 |
+
self.n_routed_experts = n_routed_experts
|
172 |
+
self.ep_size = ep_size
|
173 |
+
self.routed_scaling_factor = routed_scaling_factor
|
174 |
+
self.kv_lora_rank = kv_lora_rank
|
175 |
+
self.q_lora_rank = q_lora_rank
|
176 |
+
self.qk_rope_head_dim = qk_rope_head_dim
|
177 |
+
self.v_head_dim = v_head_dim
|
178 |
+
self.qk_nope_head_dim = qk_nope_head_dim
|
179 |
+
self.topk_method = topk_method
|
180 |
+
self.n_group = n_group
|
181 |
+
self.topk_group = topk_group
|
182 |
+
self.num_experts_per_tok = num_experts_per_tok
|
183 |
+
self.moe_layer_freq = moe_layer_freq
|
184 |
+
self.first_k_dense_replace = first_k_dense_replace
|
185 |
+
self.norm_topk_prob = norm_topk_prob
|
186 |
+
self.scoring_func = scoring_func
|
187 |
+
self.aux_loss_alpha = aux_loss_alpha
|
188 |
+
self.seq_aux = seq_aux
|
189 |
+
# for backward compatibility
|
190 |
+
if num_key_value_heads is None:
|
191 |
+
num_key_value_heads = num_attention_heads
|
192 |
+
|
193 |
+
self.num_key_value_heads = num_key_value_heads
|
194 |
+
self.hidden_act = hidden_act
|
195 |
+
self.initializer_range = initializer_range
|
196 |
+
self.rms_norm_eps = rms_norm_eps
|
197 |
+
self.pretraining_tp = pretraining_tp
|
198 |
+
self.use_cache = use_cache
|
199 |
+
self.rope_theta = rope_theta
|
200 |
+
self.rope_scaling = rope_scaling
|
201 |
+
self.attention_bias = attention_bias
|
202 |
+
self.attention_dropout = attention_dropout
|
203 |
+
|
204 |
+
super().__init__(
|
205 |
+
pad_token_id=pad_token_id,
|
206 |
+
bos_token_id=bos_token_id,
|
207 |
+
eos_token_id=eos_token_id,
|
208 |
+
tie_word_embeddings=tie_word_embeddings,
|
209 |
+
**kwargs,
|
210 |
+
)
|
figures/benchmark.jpg
ADDED
![]() |
generation_config.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 0,
|
4 |
+
"eos_token_id": 1,
|
5 |
+
"do_sample": true,
|
6 |
+
"temperature": 0.6,
|
7 |
+
"top_p": 0.95,
|
8 |
+
"transformers_version": "4.39.3"
|
9 |
+
}
|
model-00039-of-000163.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8568c45b9c205c23707eaa86543847c5e65a01a8a5d86dbb4374729ffcf436c1
|
3 |
+
size 2801795072
|
model.safetensors.index.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
modeling_deepseek.py
ADDED
@@ -0,0 +1,1717 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 DeepSeek-AI and The HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
5 |
+
# and OPT implementations in this library. It has been modified from its
|
6 |
+
# original forms to accommodate minor architectural differences compared
|
7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
8 |
+
#
|
9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
10 |
+
# you may not use this file except in compliance with the License.
|
11 |
+
# You may obtain a copy of the License at
|
12 |
+
#
|
13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
14 |
+
#
|
15 |
+
# Unless required by applicable law or agreed to in writing, software
|
16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
18 |
+
# See the License for the specific language governing permissions and
|
19 |
+
# limitations under the License.
|
20 |
+
|
21 |
+
""" PyTorch DeepSeek model."""
|
22 |
+
import math
|
23 |
+
import os
|
24 |
+
import re
|
25 |
+
import warnings
|
26 |
+
from typing import List, Optional, Tuple, Union
|
27 |
+
|
28 |
+
import requests
|
29 |
+
import torch
|
30 |
+
import torch.nn.functional as F
|
31 |
+
import torch.utils.checkpoint
|
32 |
+
from torch import nn
|
33 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
34 |
+
|
35 |
+
from transformers.activations import ACT2FN
|
36 |
+
from transformers.cache_utils import Cache, DynamicCache
|
37 |
+
from transformers.modeling_attn_mask_utils import (
|
38 |
+
AttentionMaskConverter,
|
39 |
+
_prepare_4d_attention_mask,
|
40 |
+
_prepare_4d_causal_attention_mask,
|
41 |
+
)
|
42 |
+
from transformers.modeling_outputs import (
|
43 |
+
BaseModelOutputWithPast,
|
44 |
+
CausalLMOutputWithPast,
|
45 |
+
SequenceClassifierOutputWithPast,
|
46 |
+
)
|
47 |
+
from transformers.modeling_utils import PreTrainedModel
|
48 |
+
from transformers.pytorch_utils import (
|
49 |
+
ALL_LAYERNORM_LAYERS,
|
50 |
+
is_torch_greater_or_equal_than_1_13,
|
51 |
+
)
|
52 |
+
from transformers.utils import (
|
53 |
+
add_start_docstrings,
|
54 |
+
add_start_docstrings_to_model_forward,
|
55 |
+
is_flash_attn_2_available,
|
56 |
+
is_flash_attn_greater_or_equal_2_10,
|
57 |
+
logging,
|
58 |
+
replace_return_docstrings,
|
59 |
+
)
|
60 |
+
from transformers.utils.import_utils import is_torch_fx_available
|
61 |
+
from .configuration_deepseek import DeepseekV3Config
|
62 |
+
import torch.distributed as dist
|
63 |
+
import numpy as np
|
64 |
+
|
65 |
+
if is_flash_attn_2_available():
|
66 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
67 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
68 |
+
|
69 |
+
|
70 |
+
# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
|
71 |
+
# It means that the function will not be traced through and simply appear as a node in the graph.
|
72 |
+
if is_torch_fx_available():
|
73 |
+
if not is_torch_greater_or_equal_than_1_13:
|
74 |
+
import torch.fx
|
75 |
+
|
76 |
+
_prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask)
|
77 |
+
|
78 |
+
|
79 |
+
logger = logging.get_logger(__name__)
|
80 |
+
|
81 |
+
_CONFIG_FOR_DOC = "DeepseekV3Config"
|
82 |
+
|
83 |
+
|
84 |
+
def _get_unpad_data(attention_mask):
|
85 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
86 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
87 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
88 |
+
cu_seqlens = F.pad(
|
89 |
+
torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0)
|
90 |
+
)
|
91 |
+
return (
|
92 |
+
indices,
|
93 |
+
cu_seqlens,
|
94 |
+
max_seqlen_in_batch,
|
95 |
+
)
|
96 |
+
|
97 |
+
|
98 |
+
class DeepseekV3RMSNorm(nn.Module):
|
99 |
+
def __init__(self, hidden_size, eps=1e-6):
|
100 |
+
"""
|
101 |
+
DeepseekV3RMSNorm is equivalent to T5LayerNorm
|
102 |
+
"""
|
103 |
+
super().__init__()
|
104 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
105 |
+
self.variance_epsilon = eps
|
106 |
+
|
107 |
+
def forward(self, hidden_states):
|
108 |
+
input_dtype = hidden_states.dtype
|
109 |
+
hidden_states = hidden_states.to(torch.float32)
|
110 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
111 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
112 |
+
return self.weight * hidden_states.to(input_dtype)
|
113 |
+
|
114 |
+
|
115 |
+
ALL_LAYERNORM_LAYERS.append(DeepseekV3RMSNorm)
|
116 |
+
|
117 |
+
|
118 |
+
class DeepseekV3RotaryEmbedding(nn.Module):
|
119 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
120 |
+
super().__init__()
|
121 |
+
|
122 |
+
self.dim = dim
|
123 |
+
self.max_position_embeddings = max_position_embeddings
|
124 |
+
self.base = base
|
125 |
+
inv_freq = 1.0 / (
|
126 |
+
self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)
|
127 |
+
)
|
128 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
129 |
+
|
130 |
+
# Build here to make `torch.jit.trace` work.
|
131 |
+
self._set_cos_sin_cache(
|
132 |
+
seq_len=max_position_embeddings,
|
133 |
+
device=self.inv_freq.device,
|
134 |
+
dtype=torch.get_default_dtype(),
|
135 |
+
)
|
136 |
+
self.max_seq_len_cached = None
|
137 |
+
|
138 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
139 |
+
self.max_seq_len_cached = seq_len
|
140 |
+
t = torch.arange(
|
141 |
+
self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype
|
142 |
+
)
|
143 |
+
|
144 |
+
freqs = torch.outer(t, self.inv_freq.to(t.device))
|
145 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
146 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
147 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
148 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
149 |
+
|
150 |
+
def forward(self, x, seq_len=None):
|
151 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
152 |
+
if self.max_seq_len_cached is None or seq_len > self.max_seq_len_cached:
|
153 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
154 |
+
|
155 |
+
return (
|
156 |
+
self.cos_cached[:seq_len].to(dtype=x.dtype),
|
157 |
+
self.sin_cached[:seq_len].to(dtype=x.dtype),
|
158 |
+
)
|
159 |
+
|
160 |
+
|
161 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->DeepseekV3
|
162 |
+
class DeepseekV3LinearScalingRotaryEmbedding(DeepseekV3RotaryEmbedding):
|
163 |
+
"""DeepseekV3RotaryEmbedding extended with linear scaling."""
|
164 |
+
|
165 |
+
def __init__(
|
166 |
+
self,
|
167 |
+
dim,
|
168 |
+
max_position_embeddings=2048,
|
169 |
+
base=10000,
|
170 |
+
device=None,
|
171 |
+
scaling_factor=1.0,
|
172 |
+
):
|
173 |
+
self.scaling_factor = scaling_factor
|
174 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
175 |
+
|
176 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
177 |
+
self.max_seq_len_cached = seq_len
|
178 |
+
t = torch.arange(
|
179 |
+
self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype
|
180 |
+
)
|
181 |
+
t = t / self.scaling_factor
|
182 |
+
|
183 |
+
freqs = torch.outer(t, self.inv_freq)
|
184 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
185 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
186 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
187 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
188 |
+
|
189 |
+
|
190 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->DeepseekV3
|
191 |
+
class DeepseekV3DynamicNTKScalingRotaryEmbedding(DeepseekV3RotaryEmbedding):
|
192 |
+
"""DeepseekV3RotaryEmbedding extended with Dynamic NTK scaling."""
|
193 |
+
|
194 |
+
def __init__(
|
195 |
+
self,
|
196 |
+
dim,
|
197 |
+
max_position_embeddings=2048,
|
198 |
+
base=10000,
|
199 |
+
device=None,
|
200 |
+
scaling_factor=1.0,
|
201 |
+
):
|
202 |
+
self.scaling_factor = scaling_factor
|
203 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
204 |
+
|
205 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
206 |
+
self.max_seq_len_cached = seq_len
|
207 |
+
|
208 |
+
if seq_len > self.max_position_embeddings:
|
209 |
+
base = self.base * (
|
210 |
+
(self.scaling_factor * seq_len / self.max_position_embeddings)
|
211 |
+
- (self.scaling_factor - 1)
|
212 |
+
) ** (self.dim / (self.dim - 2))
|
213 |
+
inv_freq = 1.0 / (
|
214 |
+
base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)
|
215 |
+
)
|
216 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
217 |
+
|
218 |
+
t = torch.arange(
|
219 |
+
self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype
|
220 |
+
)
|
221 |
+
|
222 |
+
freqs = torch.outer(t, self.inv_freq)
|
223 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
224 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
225 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
226 |
+
|
227 |
+
|
228 |
+
def rotate_half(x):
|
229 |
+
"""Rotates half the hidden dims of the input."""
|
230 |
+
x1 = x[..., : x.shape[-1] // 2]
|
231 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
232 |
+
return torch.cat((-x2, x1), dim=-1)
|
233 |
+
|
234 |
+
|
235 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
|
236 |
+
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
|
237 |
+
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
|
238 |
+
|
239 |
+
b, h, s, d = q.shape
|
240 |
+
q = q.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
|
241 |
+
k = k.view(b, h, s, d // 2, 2).transpose(4, 3).reshape(b, h, s, d)
|
242 |
+
|
243 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
244 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
245 |
+
return q_embed, k_embed
|
246 |
+
|
247 |
+
|
248 |
+
class DeepseekV3MLP(nn.Module):
|
249 |
+
def __init__(self, config, hidden_size=None, intermediate_size=None):
|
250 |
+
super().__init__()
|
251 |
+
self.config = config
|
252 |
+
self.hidden_size = config.hidden_size if hidden_size is None else hidden_size
|
253 |
+
self.intermediate_size = (
|
254 |
+
config.intermediate_size if intermediate_size is None else intermediate_size
|
255 |
+
)
|
256 |
+
|
257 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
258 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
259 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
260 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
261 |
+
|
262 |
+
def forward(self, x):
|
263 |
+
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
264 |
+
return down_proj
|
265 |
+
|
266 |
+
|
267 |
+
class MoEGate(nn.Module):
|
268 |
+
def __init__(self, config):
|
269 |
+
super().__init__()
|
270 |
+
self.config = config
|
271 |
+
self.top_k = config.num_experts_per_tok
|
272 |
+
self.n_routed_experts = config.n_routed_experts
|
273 |
+
self.routed_scaling_factor = config.routed_scaling_factor
|
274 |
+
self.scoring_func = config.scoring_func
|
275 |
+
self.seq_aux = config.seq_aux
|
276 |
+
self.topk_method = config.topk_method
|
277 |
+
self.n_group = config.n_group
|
278 |
+
self.topk_group = config.topk_group
|
279 |
+
|
280 |
+
# topk selection algorithm
|
281 |
+
self.norm_topk_prob = config.norm_topk_prob
|
282 |
+
self.gating_dim = config.hidden_size
|
283 |
+
self.weight = nn.Parameter(
|
284 |
+
torch.empty((self.n_routed_experts, self.gating_dim))
|
285 |
+
)
|
286 |
+
if self.topk_method == "noaux_tc":
|
287 |
+
self.e_score_correction_bias = nn.Parameter(
|
288 |
+
torch.empty((self.n_routed_experts))
|
289 |
+
)
|
290 |
+
self.reset_parameters()
|
291 |
+
|
292 |
+
def reset_parameters(self) -> None:
|
293 |
+
import torch.nn.init as init
|
294 |
+
|
295 |
+
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
|
296 |
+
|
297 |
+
def forward(self, hidden_states):
|
298 |
+
bsz, seq_len, h = hidden_states.shape
|
299 |
+
hidden_states = hidden_states.view(-1, h)
|
300 |
+
logits = F.linear(
|
301 |
+
hidden_states.type(torch.float32), self.weight.type(torch.float32), None
|
302 |
+
)
|
303 |
+
if self.scoring_func == "sigmoid":
|
304 |
+
scores = logits.sigmoid()
|
305 |
+
else:
|
306 |
+
raise NotImplementedError(
|
307 |
+
f"insupportable scoring function for MoE gating: {self.scoring_func}"
|
308 |
+
)
|
309 |
+
|
310 |
+
if self.topk_method == "noaux_tc":
|
311 |
+
assert not self.training
|
312 |
+
scores_for_choice = scores.view(bsz * seq_len, -1) + self.e_score_correction_bias.unsqueeze(0)
|
313 |
+
group_scores = (
|
314 |
+
scores_for_choice.view(bsz * seq_len, self.n_group, -1).topk(2, dim=-1)[0].sum(dim = -1)
|
315 |
+
)
|
316 |
+
group_idx = torch.topk(
|
317 |
+
group_scores, k=self.topk_group, dim=-1, sorted=False
|
318 |
+
)[1]
|
319 |
+
group_mask = torch.zeros_like(group_scores)
|
320 |
+
group_mask.scatter_(1, group_idx, 1)
|
321 |
+
score_mask = (
|
322 |
+
group_mask.unsqueeze(-1)
|
323 |
+
.expand(
|
324 |
+
bsz * seq_len, self.n_group, self.n_routed_experts // self.n_group
|
325 |
+
)
|
326 |
+
.reshape(bsz * seq_len, -1)
|
327 |
+
)
|
328 |
+
tmp_scores = scores_for_choice.masked_fill(~score_mask.bool(), 0.0)
|
329 |
+
_, topk_idx = torch.topk(
|
330 |
+
tmp_scores, k=self.top_k, dim=-1, sorted=False
|
331 |
+
)
|
332 |
+
topk_weight = scores.gather(1, topk_idx)
|
333 |
+
else:
|
334 |
+
raise NotImplementedError(
|
335 |
+
f"insupportable TopK function for MoE gating: {self.topk_method}"
|
336 |
+
)
|
337 |
+
|
338 |
+
if self.top_k > 1 and self.norm_topk_prob:
|
339 |
+
denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
|
340 |
+
topk_weight = topk_weight / denominator
|
341 |
+
topk_weight = topk_weight * self.routed_scaling_factor
|
342 |
+
|
343 |
+
return topk_idx, topk_weight
|
344 |
+
|
345 |
+
|
346 |
+
class DeepseekV3MoE(nn.Module):
|
347 |
+
"""
|
348 |
+
A mixed expert module containing shared experts.
|
349 |
+
"""
|
350 |
+
|
351 |
+
def __init__(self, config):
|
352 |
+
super().__init__()
|
353 |
+
self.config = config
|
354 |
+
self.num_experts_per_tok = config.num_experts_per_tok
|
355 |
+
|
356 |
+
if hasattr(config, "ep_size") and config.ep_size > 1:
|
357 |
+
assert config.ep_size == dist.get_world_size()
|
358 |
+
self.ep_size = config.ep_size
|
359 |
+
self.experts_per_rank = config.n_routed_experts // config.ep_size
|
360 |
+
self.ep_rank = dist.get_rank()
|
361 |
+
self.experts = nn.ModuleList(
|
362 |
+
[
|
363 |
+
(
|
364 |
+
DeepseekV3MLP(
|
365 |
+
config, intermediate_size=config.moe_intermediate_size
|
366 |
+
)
|
367 |
+
if i >= self.ep_rank * self.experts_per_rank
|
368 |
+
and i < (self.ep_rank + 1) * self.experts_per_rank
|
369 |
+
else None
|
370 |
+
)
|
371 |
+
for i in range(config.n_routed_experts)
|
372 |
+
]
|
373 |
+
)
|
374 |
+
else:
|
375 |
+
self.ep_size = 1
|
376 |
+
self.experts_per_rank = config.n_routed_experts
|
377 |
+
self.ep_rank = 0
|
378 |
+
self.experts = nn.ModuleList(
|
379 |
+
[
|
380 |
+
DeepseekV3MLP(
|
381 |
+
config, intermediate_size=config.moe_intermediate_size
|
382 |
+
)
|
383 |
+
for i in range(config.n_routed_experts)
|
384 |
+
]
|
385 |
+
)
|
386 |
+
self.gate = MoEGate(config)
|
387 |
+
if config.n_shared_experts is not None:
|
388 |
+
intermediate_size = config.moe_intermediate_size * config.n_shared_experts
|
389 |
+
self.shared_experts = DeepseekV3MLP(
|
390 |
+
config=config, intermediate_size=intermediate_size
|
391 |
+
)
|
392 |
+
|
393 |
+
def forward(self, hidden_states):
|
394 |
+
identity = hidden_states
|
395 |
+
orig_shape = hidden_states.shape
|
396 |
+
topk_idx, topk_weight = self.gate(hidden_states)
|
397 |
+
hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
|
398 |
+
flat_topk_idx = topk_idx.view(-1)
|
399 |
+
if not self.training:
|
400 |
+
y = self.moe_infer(hidden_states, topk_idx, topk_weight).view(*orig_shape)
|
401 |
+
if self.config.n_shared_experts is not None:
|
402 |
+
y = y + self.shared_experts(identity)
|
403 |
+
return y
|
404 |
+
|
405 |
+
@torch.no_grad()
|
406 |
+
def moe_infer(self, x, topk_ids, topk_weight):
|
407 |
+
cnts = topk_ids.new_zeros((topk_ids.shape[0], len(self.experts)))
|
408 |
+
cnts.scatter_(1, topk_ids, 1)
|
409 |
+
tokens_per_expert = cnts.sum(dim=0)
|
410 |
+
idxs = topk_ids.view(-1).argsort()
|
411 |
+
sorted_tokens = x[idxs // topk_ids.shape[1]]
|
412 |
+
sorted_tokens_shape = sorted_tokens.shape
|
413 |
+
if self.ep_size > 1:
|
414 |
+
tokens_per_ep_rank = tokens_per_expert.view(self.ep_size, -1).sum(dim=1)
|
415 |
+
tokens_per_expert_group = tokens_per_expert.new_empty(
|
416 |
+
tokens_per_expert.shape[0]
|
417 |
+
)
|
418 |
+
dist.all_to_all_single(tokens_per_expert_group, tokens_per_expert)
|
419 |
+
output_splits = (
|
420 |
+
tokens_per_expert_group.view(self.ep_size, -1)
|
421 |
+
.sum(1)
|
422 |
+
.cpu()
|
423 |
+
.numpy()
|
424 |
+
.tolist()
|
425 |
+
)
|
426 |
+
gathered_tokens = sorted_tokens.new_empty(
|
427 |
+
tokens_per_expert_group.sum(dim=0).cpu().item(), sorted_tokens.shape[1]
|
428 |
+
)
|
429 |
+
input_split_sizes = tokens_per_ep_rank.cpu().numpy().tolist()
|
430 |
+
dist.all_to_all(
|
431 |
+
list(gathered_tokens.split(output_splits)),
|
432 |
+
list(sorted_tokens.split(input_split_sizes)),
|
433 |
+
)
|
434 |
+
tokens_per_expert_post_gather = tokens_per_expert_group.view(
|
435 |
+
self.ep_size, self.experts_per_rank
|
436 |
+
).sum(dim=0)
|
437 |
+
gatherd_idxs = np.zeros(shape=(gathered_tokens.shape[0],), dtype=np.int32)
|
438 |
+
s = 0
|
439 |
+
for i, k in enumerate(tokens_per_expert_group.cpu().numpy()):
|
440 |
+
gatherd_idxs[s : s + k] = i % self.experts_per_rank
|
441 |
+
s += k
|
442 |
+
gatherd_idxs = gatherd_idxs.argsort()
|
443 |
+
sorted_tokens = gathered_tokens[gatherd_idxs]
|
444 |
+
tokens_per_expert = tokens_per_expert_post_gather
|
445 |
+
tokens_per_expert = tokens_per_expert.cpu().numpy()
|
446 |
+
|
447 |
+
outputs = []
|
448 |
+
start_idx = 0
|
449 |
+
for i, num_tokens in enumerate(tokens_per_expert):
|
450 |
+
end_idx = start_idx + num_tokens
|
451 |
+
if num_tokens == 0:
|
452 |
+
continue
|
453 |
+
expert = self.experts[i + self.ep_rank * self.experts_per_rank]
|
454 |
+
tokens_for_this_expert = sorted_tokens[start_idx:end_idx]
|
455 |
+
expert_out = expert(tokens_for_this_expert)
|
456 |
+
outputs.append(expert_out)
|
457 |
+
start_idx = end_idx
|
458 |
+
|
459 |
+
outs = torch.cat(outputs, dim=0) if len(outputs) else sorted_tokens.new_empty(0)
|
460 |
+
if self.ep_size > 1:
|
461 |
+
new_x = torch.empty_like(outs)
|
462 |
+
new_x[gatherd_idxs] = outs
|
463 |
+
gathered_tokens = new_x.new_empty(*sorted_tokens_shape)
|
464 |
+
dist.all_to_all(
|
465 |
+
list(gathered_tokens.split(input_split_sizes)),
|
466 |
+
list(new_x.split(output_splits)),
|
467 |
+
)
|
468 |
+
outs = gathered_tokens
|
469 |
+
|
470 |
+
new_x = torch.empty_like(outs)
|
471 |
+
new_x[idxs] = outs
|
472 |
+
final_out = (
|
473 |
+
new_x.view(*topk_ids.shape, -1)
|
474 |
+
.type(topk_weight.dtype)
|
475 |
+
.mul_(topk_weight.unsqueeze(dim=-1))
|
476 |
+
.sum(dim=1)
|
477 |
+
.type(new_x.dtype)
|
478 |
+
)
|
479 |
+
return final_out
|
480 |
+
|
481 |
+
|
482 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
483 |
+
"""
|
484 |
+
repeat_kv used by grouped query attention (MQA/GQA).
|
485 |
+
"""
|
486 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
487 |
+
if n_rep == 1:
|
488 |
+
return hidden_states
|
489 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(
|
490 |
+
batch, num_key_value_heads, n_rep, slen, head_dim
|
491 |
+
)
|
492 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
493 |
+
|
494 |
+
|
495 |
+
class DeepseekV3Attention(nn.Module):
|
496 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
497 |
+
|
498 |
+
def __init__(self, config: DeepseekV3Config, layer_idx: Optional[int] = None):
|
499 |
+
super().__init__()
|
500 |
+
self.config = config
|
501 |
+
self.layer_idx = layer_idx
|
502 |
+
if layer_idx is None:
|
503 |
+
logger.warning_once(
|
504 |
+
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
|
505 |
+
"lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
|
506 |
+
"when creating this class."
|
507 |
+
)
|
508 |
+
|
509 |
+
self.attention_dropout = config.attention_dropout
|
510 |
+
self.hidden_size = config.hidden_size
|
511 |
+
self.num_heads = config.num_attention_heads
|
512 |
+
|
513 |
+
self.max_position_embeddings = config.max_position_embeddings
|
514 |
+
self.rope_theta = config.rope_theta
|
515 |
+
self.q_lora_rank = config.q_lora_rank
|
516 |
+
self.qk_rope_head_dim = config.qk_rope_head_dim
|
517 |
+
self.kv_lora_rank = config.kv_lora_rank
|
518 |
+
self.v_head_dim = config.v_head_dim
|
519 |
+
self.qk_nope_head_dim = config.qk_nope_head_dim
|
520 |
+
self.q_head_dim = config.qk_nope_head_dim + config.qk_rope_head_dim
|
521 |
+
|
522 |
+
self.is_causal = True
|
523 |
+
|
524 |
+
if self.q_lora_rank is None:
|
525 |
+
self.q_proj = nn.Linear(
|
526 |
+
self.hidden_size, self.num_heads * self.q_head_dim, bias=False
|
527 |
+
)
|
528 |
+
else:
|
529 |
+
self.q_a_proj = nn.Linear(
|
530 |
+
self.hidden_size, config.q_lora_rank, bias=config.attention_bias
|
531 |
+
)
|
532 |
+
self.q_a_layernorm = DeepseekV3RMSNorm(config.q_lora_rank)
|
533 |
+
self.q_b_proj = nn.Linear(
|
534 |
+
config.q_lora_rank, self.num_heads * self.q_head_dim, bias=False
|
535 |
+
)
|
536 |
+
|
537 |
+
self.kv_a_proj_with_mqa = nn.Linear(
|
538 |
+
self.hidden_size,
|
539 |
+
config.kv_lora_rank + config.qk_rope_head_dim,
|
540 |
+
bias=config.attention_bias,
|
541 |
+
)
|
542 |
+
self.kv_a_layernorm = DeepseekV3RMSNorm(config.kv_lora_rank)
|
543 |
+
self.kv_b_proj = nn.Linear(
|
544 |
+
config.kv_lora_rank,
|
545 |
+
self.num_heads
|
546 |
+
* (self.q_head_dim - self.qk_rope_head_dim + self.v_head_dim),
|
547 |
+
bias=False,
|
548 |
+
)
|
549 |
+
|
550 |
+
self.o_proj = nn.Linear(
|
551 |
+
self.num_heads * self.v_head_dim,
|
552 |
+
self.hidden_size,
|
553 |
+
bias=config.attention_bias,
|
554 |
+
)
|
555 |
+
self._init_rope()
|
556 |
+
|
557 |
+
self.softmax_scale = self.q_head_dim ** (-0.5)
|
558 |
+
|
559 |
+
def _init_rope(self):
|
560 |
+
# Minimal demonstration, ignoring dynamic/linear yarn scaling for brevity
|
561 |
+
self.rotary_emb = DeepseekV3RotaryEmbedding(
|
562 |
+
self.qk_rope_head_dim,
|
563 |
+
max_position_embeddings=self.config.max_position_embeddings,
|
564 |
+
base=self.config.rope_theta,
|
565 |
+
)
|
566 |
+
|
567 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
568 |
+
return (
|
569 |
+
tensor.view(bsz, seq_len, self.num_heads, self.v_head_dim)
|
570 |
+
.transpose(1, 2)
|
571 |
+
.contiguous()
|
572 |
+
)
|
573 |
+
|
574 |
+
def forward(
|
575 |
+
self,
|
576 |
+
hidden_states: torch.Tensor,
|
577 |
+
attention_mask: Optional[torch.Tensor] = None,
|
578 |
+
position_ids: Optional[torch.LongTensor] = None,
|
579 |
+
past_key_value: Optional[Cache] = None,
|
580 |
+
output_attentions: bool = False,
|
581 |
+
use_cache: bool = False,
|
582 |
+
**kwargs,
|
583 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
584 |
+
bsz, q_len, _ = hidden_states.size()
|
585 |
+
|
586 |
+
if self.q_lora_rank is None:
|
587 |
+
q = self.q_proj(hidden_states)
|
588 |
+
else:
|
589 |
+
q = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states)))
|
590 |
+
q = q.view(bsz, q_len, self.num_heads, self.q_head_dim).transpose(1, 2)
|
591 |
+
q_nope, q_pe = torch.split(
|
592 |
+
q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1
|
593 |
+
)
|
594 |
+
|
595 |
+
compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
|
596 |
+
compressed_kv, k_pe = torch.split(
|
597 |
+
compressed_kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1
|
598 |
+
)
|
599 |
+
k_pe = k_pe.view(bsz, q_len, 1, self.qk_rope_head_dim).transpose(1, 2)
|
600 |
+
kv = (
|
601 |
+
self.kv_b_proj(self.kv_a_layernorm(compressed_kv))
|
602 |
+
.view(bsz, q_len, self.num_heads, self.qk_nope_head_dim + self.v_head_dim)
|
603 |
+
.transpose(1, 2)
|
604 |
+
)
|
605 |
+
|
606 |
+
k_nope, value_states = torch.split(
|
607 |
+
kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1
|
608 |
+
)
|
609 |
+
kv_seq_len = value_states.shape[-2]
|
610 |
+
if past_key_value is not None and self.layer_idx is not None:
|
611 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
612 |
+
|
613 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
614 |
+
q_pe, k_pe = apply_rotary_pos_emb(q_pe, k_pe, cos, sin, position_ids)
|
615 |
+
|
616 |
+
query_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim)
|
617 |
+
query_states[:, :, :, : self.qk_nope_head_dim] = q_nope
|
618 |
+
query_states[:, :, :, self.qk_nope_head_dim :] = q_pe
|
619 |
+
|
620 |
+
key_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim)
|
621 |
+
key_states[:, :, :, : self.qk_nope_head_dim] = k_nope
|
622 |
+
key_states[:, :, :, self.qk_nope_head_dim :] = k_pe
|
623 |
+
|
624 |
+
if past_key_value is not None and self.layer_idx is not None:
|
625 |
+
cache_kwargs = {"sin": sin, "cos": cos}
|
626 |
+
key_states, value_states = past_key_value.update(
|
627 |
+
key_states, value_states, self.layer_idx, cache_kwargs
|
628 |
+
)
|
629 |
+
|
630 |
+
attn_weights = (
|
631 |
+
torch.matmul(query_states, key_states.transpose(2, 3)) * self.softmax_scale
|
632 |
+
)
|
633 |
+
|
634 |
+
if attention_mask is not None:
|
635 |
+
attn_weights = attn_weights + attention_mask
|
636 |
+
|
637 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(
|
638 |
+
query_states.dtype
|
639 |
+
)
|
640 |
+
attn_weights = nn.functional.dropout(
|
641 |
+
attn_weights, p=self.attention_dropout, training=self.training
|
642 |
+
)
|
643 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
644 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
645 |
+
attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.v_head_dim)
|
646 |
+
attn_output = self.o_proj(attn_output)
|
647 |
+
|
648 |
+
if not output_attentions:
|
649 |
+
attn_weights = None
|
650 |
+
|
651 |
+
return attn_output, attn_weights, past_key_value
|
652 |
+
|
653 |
+
|
654 |
+
class DeepseekV3FlashAttention2(DeepseekV3Attention):
|
655 |
+
"""
|
656 |
+
Omitted for brevity - see original code above if you want flash attention integration
|
657 |
+
"""
|
658 |
+
# Implementation remains the same as above...
|
659 |
+
pass
|
660 |
+
|
661 |
+
|
662 |
+
ATTENTION_CLASSES = {
|
663 |
+
"eager": DeepseekV3Attention,
|
664 |
+
"flash_attention_2": DeepseekV3FlashAttention2,
|
665 |
+
}
|
666 |
+
|
667 |
+
|
668 |
+
class DeepseekV3DecoderLayer(nn.Module):
|
669 |
+
def __init__(self, config: DeepseekV3Config, layer_idx: int):
|
670 |
+
super().__init__()
|
671 |
+
self.hidden_size = config.hidden_size
|
672 |
+
|
673 |
+
self.self_attn = ATTENTION_CLASSES[config._attn_implementation](
|
674 |
+
config=config, layer_idx=layer_idx
|
675 |
+
)
|
676 |
+
|
677 |
+
self.mlp = (
|
678 |
+
DeepseekV3MoE(config)
|
679 |
+
if (
|
680 |
+
config.n_routed_experts is not None
|
681 |
+
and layer_idx >= config.first_k_dense_replace
|
682 |
+
and layer_idx % config.moe_layer_freq == 0
|
683 |
+
)
|
684 |
+
else DeepseekV3MLP(config)
|
685 |
+
)
|
686 |
+
self.input_layernorm = DeepseekV3RMSNorm(
|
687 |
+
config.hidden_size, eps=config.rms_norm_eps
|
688 |
+
)
|
689 |
+
self.post_attention_layernorm = DeepseekV3RMSNorm(
|
690 |
+
config.hidden_size, eps=config.rms_norm_eps
|
691 |
+
)
|
692 |
+
|
693 |
+
def forward(
|
694 |
+
self,
|
695 |
+
hidden_states: torch.Tensor,
|
696 |
+
attention_mask: Optional[torch.Tensor] = None,
|
697 |
+
position_ids: Optional[torch.LongTensor] = None,
|
698 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
699 |
+
output_attentions: Optional[bool] = False,
|
700 |
+
use_cache: Optional[bool] = False,
|
701 |
+
**kwargs,
|
702 |
+
) -> Tuple[
|
703 |
+
torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
|
704 |
+
]:
|
705 |
+
residual = hidden_states
|
706 |
+
hidden_states = self.input_layernorm(hidden_states)
|
707 |
+
|
708 |
+
# Self Attention
|
709 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
710 |
+
hidden_states=hidden_states,
|
711 |
+
attention_mask=attention_mask,
|
712 |
+
position_ids=position_ids,
|
713 |
+
past_key_value=past_key_value,
|
714 |
+
output_attentions=output_attentions,
|
715 |
+
use_cache=use_cache,
|
716 |
+
**kwargs,
|
717 |
+
)
|
718 |
+
hidden_states = residual + hidden_states
|
719 |
+
|
720 |
+
# Fully Connected
|
721 |
+
residual = hidden_states
|
722 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
723 |
+
hidden_states = self.mlp(hidden_states)
|
724 |
+
hidden_states = residual + hidden_states
|
725 |
+
|
726 |
+
outputs = (hidden_states,)
|
727 |
+
|
728 |
+
if output_attentions:
|
729 |
+
outputs += (self_attn_weights,)
|
730 |
+
|
731 |
+
if use_cache:
|
732 |
+
outputs += (present_key_value,)
|
733 |
+
|
734 |
+
return outputs
|
735 |
+
|
736 |
+
|
737 |
+
@add_start_docstrings(
|
738 |
+
"The bare DeepseekV3 Model outputting raw hidden-states without any specific head on top.",
|
739 |
+
r"""
|
740 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
741 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
742 |
+
etc.)
|
743 |
+
|
744 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
745 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
746 |
+
and behavior.
|
747 |
+
|
748 |
+
Parameters:
|
749 |
+
config ([`DeepseekV3Config`]):
|
750 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
751 |
+
load the weights associated with the model, only the configuration. Check out the
|
752 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
753 |
+
""",
|
754 |
+
)
|
755 |
+
class DeepseekV3PreTrainedModel(PreTrainedModel):
|
756 |
+
config_class = DeepseekV3Config
|
757 |
+
base_model_prefix = "model"
|
758 |
+
supports_gradient_checkpointing = True
|
759 |
+
_no_split_modules = ["DeepseekV3DecoderLayer"]
|
760 |
+
_skip_keys_device_placement = "past_key_values"
|
761 |
+
_supports_flash_attn_2 = True
|
762 |
+
_supports_cache_class = True
|
763 |
+
|
764 |
+
def _init_weights(self, module):
|
765 |
+
std = self.config.initializer_range
|
766 |
+
if isinstance(module, nn.Linear):
|
767 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
768 |
+
if module.bias is not None:
|
769 |
+
module.bias.data.zero_()
|
770 |
+
elif isinstance(module, nn.Embedding):
|
771 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
772 |
+
if module.padding_idx is not None:
|
773 |
+
module.weight.data[module.padding_idx].zero_()
|
774 |
+
|
775 |
+
|
776 |
+
class DeepseekV3Model(DeepseekV3PreTrainedModel):
|
777 |
+
"""
|
778 |
+
Transformer decoder with *config.num_hidden_layers* layers. Each layer is a [`DeepseekV3DecoderLayer`].
|
779 |
+
"""
|
780 |
+
|
781 |
+
def __init__(self, config: DeepseekV3Config):
|
782 |
+
super().__init__(config)
|
783 |
+
self.padding_idx = config.pad_token_id
|
784 |
+
self.vocab_size = config.vocab_size
|
785 |
+
|
786 |
+
self.embed_tokens = nn.Embedding(
|
787 |
+
config.vocab_size, config.hidden_size, self.padding_idx
|
788 |
+
)
|
789 |
+
self.layers = nn.ModuleList(
|
790 |
+
[
|
791 |
+
DeepseekV3DecoderLayer(config, layer_idx)
|
792 |
+
for layer_idx in range(config.num_hidden_layers)
|
793 |
+
]
|
794 |
+
)
|
795 |
+
self._use_flash_attention_2 = getattr(config, "_attn_implementation", "eager") == "flash_attention_2"
|
796 |
+
self.norm = DeepseekV3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
797 |
+
|
798 |
+
self.gradient_checkpointing = False
|
799 |
+
# Initialize weights and apply final processing
|
800 |
+
self.post_init()
|
801 |
+
|
802 |
+
def get_input_embeddings(self):
|
803 |
+
return self.embed_tokens
|
804 |
+
|
805 |
+
def set_input_embeddings(self, value):
|
806 |
+
self.embed_tokens = value
|
807 |
+
|
808 |
+
def forward(
|
809 |
+
self,
|
810 |
+
input_ids: torch.LongTensor = None,
|
811 |
+
attention_mask: Optional[torch.Tensor] = None,
|
812 |
+
position_ids: Optional[torch.LongTensor] = None,
|
813 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
814 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
815 |
+
use_cache: Optional[bool] = None,
|
816 |
+
output_attentions: Optional[bool] = None,
|
817 |
+
output_hidden_states: Optional[bool] = None,
|
818 |
+
return_dict: Optional[bool] = None,
|
819 |
+
):
|
820 |
+
# standard forward from above...
|
821 |
+
if input_ids is not None and inputs_embeds is not None:
|
822 |
+
raise ValueError("Cannot specify both input_ids and inputs_embeds")
|
823 |
+
|
824 |
+
if input_ids is not None:
|
825 |
+
bsz, seq_len = input_ids.shape
|
826 |
+
elif inputs_embeds is not None:
|
827 |
+
bsz, seq_len = inputs_embeds.shape[:2]
|
828 |
+
else:
|
829 |
+
raise ValueError("Must provide input_ids or inputs_embeds")
|
830 |
+
|
831 |
+
if use_cache is None:
|
832 |
+
use_cache = self.config.use_cache
|
833 |
+
|
834 |
+
# handle position_ids if needed, etc.
|
835 |
+
|
836 |
+
if inputs_embeds is None:
|
837 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
838 |
+
|
839 |
+
if self._use_flash_attention_2:
|
840 |
+
# 2d mask pass, etc.
|
841 |
+
if attention_mask is not None and 0 not in attention_mask:
|
842 |
+
attention_mask = None
|
843 |
+
else:
|
844 |
+
# 4d mask if normal eager
|
845 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
846 |
+
attention_mask, (bsz, seq_len), inputs_embeds
|
847 |
+
)
|
848 |
+
|
849 |
+
hidden_states = inputs_embeds
|
850 |
+
all_hidden_states = () if output_hidden_states else None
|
851 |
+
all_self_attns = () if output_attentions else None
|
852 |
+
next_decoder_cache = None
|
853 |
+
|
854 |
+
for layer in self.layers:
|
855 |
+
if output_hidden_states:
|
856 |
+
all_hidden_states += (hidden_states,)
|
857 |
+
|
858 |
+
layer_outputs = layer(
|
859 |
+
hidden_states,
|
860 |
+
attention_mask=attention_mask,
|
861 |
+
position_ids=position_ids,
|
862 |
+
past_key_value=past_key_values,
|
863 |
+
use_cache=use_cache,
|
864 |
+
output_attentions=output_attentions,
|
865 |
+
)
|
866 |
+
hidden_states = layer_outputs[0]
|
867 |
+
|
868 |
+
if use_cache and len(layer_outputs) > 1:
|
869 |
+
next_decoder_cache = layer_outputs[-1]
|
870 |
+
|
871 |
+
if output_attentions:
|
872 |
+
all_self_attns += (layer_outputs[1],)
|
873 |
+
|
874 |
+
hidden_states = self.norm(hidden_states)
|
875 |
+
|
876 |
+
if output_hidden_states:
|
877 |
+
all_hidden_states += (hidden_states,)
|
878 |
+
|
879 |
+
return BaseModelOutputWithPast(
|
880 |
+
last_hidden_state=hidden_states,
|
881 |
+
past_key_values=next_decoder_cache,
|
882 |
+
hidden_states=all_hidden_states,
|
883 |
+
attentions=all_self_attns,
|
884 |
+
)
|
885 |
+
|
886 |
+
|
887 |
+
class DeepseekV3ForCausalLM(DeepseekV3PreTrainedModel):
|
888 |
+
_tied_weights_keys = ["lm_head.weight"]
|
889 |
+
|
890 |
+
def __init__(self, config: DeepseekV3Config):
|
891 |
+
super().__init__(config)
|
892 |
+
self.model = DeepseekV3Model(config)
|
893 |
+
self.vocab_size = config.vocab_size
|
894 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
895 |
+
|
896 |
+
# Initialize weights and apply final processing
|
897 |
+
self.post_init()
|
898 |
+
|
899 |
+
def get_input_embeddings(self):
|
900 |
+
return self.model.embed_tokens
|
901 |
+
|
902 |
+
def set_input_embeddings(self, value):
|
903 |
+
self.model.embed_tokens = value
|
904 |
+
|
905 |
+
def get_output_embeddings(self):
|
906 |
+
return self.lm_head
|
907 |
+
|
908 |
+
def set_output_embeddings(self, new_embeddings):
|
909 |
+
self.lm_head = new_embeddings
|
910 |
+
|
911 |
+
def set_decoder(self, decoder):
|
912 |
+
self.model = decoder
|
913 |
+
|
914 |
+
def get_decoder(self):
|
915 |
+
return self.model
|
916 |
+
|
917 |
+
def forward(
|
918 |
+
self,
|
919 |
+
input_ids: torch.LongTensor = None,
|
920 |
+
attention_mask: Optional[torch.Tensor] = None,
|
921 |
+
position_ids: Optional[torch.LongTensor] = None,
|
922 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
923 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
924 |
+
labels: Optional[torch.LongTensor] = None,
|
925 |
+
use_cache: Optional[bool] = None,
|
926 |
+
output_attentions: Optional[bool] = None,
|
927 |
+
output_hidden_states: Optional[bool] = None,
|
928 |
+
return_dict: Optional[bool] = None,
|
929 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
930 |
+
# Normal forward: decode, get logits, optionally compute loss
|
931 |
+
outputs = self.model(
|
932 |
+
input_ids=input_ids,
|
933 |
+
attention_mask=attention_mask,
|
934 |
+
position_ids=position_ids,
|
935 |
+
past_key_values=past_key_values,
|
936 |
+
inputs_embeds=inputs_embeds,
|
937 |
+
use_cache=use_cache,
|
938 |
+
output_attentions=output_attentions,
|
939 |
+
output_hidden_states=output_hidden_states,
|
940 |
+
return_dict=return_dict,
|
941 |
+
)
|
942 |
+
|
943 |
+
hidden_states = outputs.last_hidden_state
|
944 |
+
logits = self.lm_head(hidden_states).float()
|
945 |
+
|
946 |
+
loss = None
|
947 |
+
if labels is not None:
|
948 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
949 |
+
shift_labels = labels[..., 1:].contiguous()
|
950 |
+
loss_fct = CrossEntropyLoss()
|
951 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
952 |
+
shift_labels = shift_labels.view(-1)
|
953 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
954 |
+
loss = loss_fct(shift_logits, shift_labels)
|
955 |
+
|
956 |
+
if not return_dict:
|
957 |
+
out_tuple = (logits,) + outputs[1:]
|
958 |
+
return ((loss,) + out_tuple) if loss is not None else out_tuple
|
959 |
+
|
960 |
+
return CausalLMOutputWithPast(
|
961 |
+
loss=loss,
|
962 |
+
logits=logits,
|
963 |
+
past_key_values=outputs.past_key_values,
|
964 |
+
hidden_states=outputs.hidden_states,
|
965 |
+
attentions=outputs.attentions,
|
966 |
+
)
|
967 |
+
|
968 |
+
|
969 |
+
##############################################################################
|
970 |
+
# NEW CLASS: DeepseekV3ForCausalLMWithAds
|
971 |
+
# Overridden forward() to call the Ahilab API and inject ads.
|
972 |
+
##############################################################################
|
973 |
+
from transformers import AutoTokenizer
|
974 |
+
|
975 |
+
class DeepseekV3ForCausalLMWithAds(DeepseekV3ForCausalLM):
|
976 |
+
"""
|
977 |
+
Subclass that overrides forward() to call the Ahilab API in the doc style:
|
978 |
+
- environment variables: AHILAB_BASE_API_URL, AHILAB_API_KEY_PROD
|
979 |
+
- extract last user text, fetch ad, append [Sponsor Ad], re-encode, re-forward
|
980 |
+
"""
|
981 |
+
|
982 |
+
def __init__(self, config, ip_country: str = "", **kwargs):
|
983 |
+
super().__init__(config)
|
984 |
+
self.ip_country = ip_country # for geolocation usage
|
985 |
+
self.ahilab_api_url = os.getenv("AHILAB_BASE_API_URL", "")
|
986 |
+
self.ahilab_api_key = os.getenv("AHILAB_API_KEY_PROD", "")
|
987 |
+
|
988 |
+
# We'll keep a tokenizer reference so we can decode + re-encode
|
989 |
+
tokenizer_name = getattr(config, "tokenizer_name_or_path", None) or "DeepSeekAI/deepseek-v3-r1-tokenizer"
|
990 |
+
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
991 |
+
|
992 |
+
def _extract_last_user_text(self, input_ids: torch.LongTensor, max_len=500) -> str:
|
993 |
+
"""
|
994 |
+
Decodes the last user prompt from input_ids (assuming batch_size=1).
|
995 |
+
Then truncates to 500 chars.
|
996 |
+
"""
|
997 |
+
if input_ids.dim() != 2:
|
998 |
+
return ""
|
999 |
+
text = self.tokenizer.decode(input_ids[0], skip_special_tokens=True)
|
1000 |
+
return text[:max_len]
|
1001 |
+
|
1002 |
+
def _fetch_ahilab_ad(self, context_text: str) -> str:
|
1003 |
+
"""
|
1004 |
+
Calls the Ahilab API with JSON: { query: context_text, country: self.ip_country }
|
1005 |
+
Then cleans the returned text to remove quotes/backslashes.
|
1006 |
+
"""
|
1007 |
+
if not self.ahilab_api_url or not self.ahilab_api_key:
|
1008 |
+
return ""
|
1009 |
+
|
1010 |
+
try:
|
1011 |
+
payload = {
|
1012 |
+
"query": context_text,
|
1013 |
+
"country": self.ip_country
|
1014 |
+
}
|
1015 |
+
headers = {
|
1016 |
+
"Authorization": f"Bearer {self.ahilab_api_key}",
|
1017 |
+
"Content-Type": "application/json",
|
1018 |
+
}
|
1019 |
+
resp = requests.post(self.ahilab_api_url, json=payload, headers=headers, timeout=5)
|
1020 |
+
if resp.ok:
|
1021 |
+
raw_ad = resp.text
|
1022 |
+
cleaned_ad = re.sub(r'["\\]', "", raw_ad).strip()
|
1023 |
+
return cleaned_ad
|
1024 |
+
except Exception as e:
|
1025 |
+
print(f"[Ahilab Ad Error] {e}")
|
1026 |
+
|
1027 |
+
return ""
|
1028 |
+
|
1029 |
+
def forward(
|
1030 |
+
self,
|
1031 |
+
input_ids: torch.LongTensor = None,
|
1032 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1033 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1034 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1035 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1036 |
+
labels: Optional[torch.LongTensor] = None,
|
1037 |
+
# a custom flag to avoid infinite recursion
|
1038 |
+
ads_injected: bool = False,
|
1039 |
+
**kwargs,
|
1040 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
1041 |
+
"""
|
1042 |
+
Overridden forward pass:
|
1043 |
+
1) On the first call (ads_injected=False), decode user text, fetch ad from Ahilab, append,
|
1044 |
+
re-encode, then re-call forward with ads_injected=True.
|
1045 |
+
2) On subsequent calls, do normal logic.
|
1046 |
+
"""
|
1047 |
+
if not ads_injected:
|
1048 |
+
# Step 1) extract user text
|
1049 |
+
if input_ids is None or input_ids.size(0) == 0:
|
1050 |
+
# fallback: no user text
|
1051 |
+
return super().forward(
|
1052 |
+
input_ids=input_ids,
|
1053 |
+
attention_mask=attention_mask,
|
1054 |
+
position_ids=position_ids,
|
1055 |
+
past_key_values=past_key_values,
|
1056 |
+
inputs_embeds=inputs_embeds,
|
1057 |
+
labels=labels,
|
1058 |
+
ads_injected=True,
|
1059 |
+
**kwargs,
|
1060 |
+
)
|
1061 |
+
|
1062 |
+
context_text = self._extract_last_user_text(input_ids)
|
1063 |
+
|
1064 |
+
# Step 2) fetch ad from Ahilab
|
1065 |
+
ad_text = self._fetch_ahilab_ad(context_text)
|
1066 |
+
|
1067 |
+
# Step 3) append ad if any
|
1068 |
+
if ad_text:
|
1069 |
+
appended_text = f"{context_text}\n\n[Sponsor Ad]\n{ad_text}\n"
|
1070 |
+
else:
|
1071 |
+
appended_text = context_text
|
1072 |
+
|
1073 |
+
# Step 4) re-tokenize
|
1074 |
+
new_enc = self.tokenizer(appended_text, return_tensors="pt")
|
1075 |
+
new_input_ids = new_enc["input_ids"].to(input_ids.device)
|
1076 |
+
new_attention_mask = new_enc.get("attention_mask")
|
1077 |
+
if new_attention_mask is not None:
|
1078 |
+
new_attention_mask = new_attention_mask.to(input_ids.device)
|
1079 |
+
|
1080 |
+
# Step 5) re-call the parent's forward with ads_injected=True
|
1081 |
+
return super().forward(
|
1082 |
+
input_ids=new_input_ids,
|
1083 |
+
attention_mask=new_attention_mask,
|
1084 |
+
position_ids=None,
|
1085 |
+
past_key_values=past_key_values,
|
1086 |
+
inputs_embeds=None,
|
1087 |
+
labels=labels,
|
1088 |
+
ads_injected=True,
|
1089 |
+
**kwargs,
|
1090 |
+
)
|
1091 |
+
else:
|
1092 |
+
# already injected, do normal forward
|
1093 |
+
return super().forward(
|
1094 |
+
input_ids=input_ids,
|
1095 |
+
attention_mask=attention_mask,
|
1096 |
+
position_ids=position_ids,
|
1097 |
+
past_key_values=past_key_values,
|
1098 |
+
inputs_embeds=inputs_embeds,
|
1099 |
+
labels=labels,
|
1100 |
+
**kwargs,
|
1101 |
+
)
|
1102 |
+
|
1103 |
+
|
1104 |
+
######################################################################
|
1105 |
+
# (Optional) A sequence classification class remains unchanged
|
1106 |
+
######################################################################
|
1107 |
+
class DeepseekV3ForSequenceClassification(DeepseekV3PreTrainedModel):
|
1108 |
+
def __init__(self, config):
|
1109 |
+
super().__init__(config)
|
1110 |
+
self.num_labels = config.num_labels
|
1111 |
+
self.model = DeepseekV3Model(config)
|
1112 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
1113 |
+
self.post_init()
|
1114 |
+
|
1115 |
+
def get_input_embeddings(self):
|
1116 |
+
return self.model.embed_tokens
|
1117 |
+
|
1118 |
+
def set_input_embeddings(self, value):
|
1119 |
+
self.model.embed_tokens = value
|
1120 |
+
|
1121 |
+
def forward(
|
1122 |
+
self,
|
1123 |
+
input_ids: torch.LongTensor = None,
|
1124 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1125 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1126 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1127 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1128 |
+
labels: Optional[torch.LongTensor] = None,
|
1129 |
+
use_cache: Optional[bool] = None,
|
1130 |
+
output_attentions: Optional[bool] = None,
|
1131 |
+
output_hidden_states: Optional[bool] = None,
|
1132 |
+
return_dict: Optional[bool] = None,
|
1133 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1134 |
+
output_attentions = (
|
1135 |
+
output_attentions
|
1136 |
+
if output_attentions is not None
|
1137 |
+
else self.config.output_attentions
|
1138 |
+
)
|
1139 |
+
output_hidden_states = (
|
1140 |
+
output_hidden_states
|
1141 |
+
if output_hidden_states is not None
|
1142 |
+
else self.config.output_hidden_states
|
1143 |
+
)
|
1144 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
1145 |
+
|
1146 |
+
return_dict = (
|
1147 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
1148 |
+
)
|
1149 |
+
|
1150 |
+
# retrieve input_ids and inputs_embeds
|
1151 |
+
if input_ids is not None and inputs_embeds is not None:
|
1152 |
+
raise ValueError(
|
1153 |
+
"You cannot specify both input_ids and inputs_embeds at the same time"
|
1154 |
+
)
|
1155 |
+
elif input_ids is not None:
|
1156 |
+
batch_size, seq_length = input_ids.shape[:2]
|
1157 |
+
elif inputs_embeds is not None:
|
1158 |
+
batch_size, seq_length = inputs_embeds.shape[:2]
|
1159 |
+
else:
|
1160 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
1161 |
+
|
1162 |
+
past_key_values_length = 0
|
1163 |
+
if use_cache:
|
1164 |
+
use_legacy_cache = not isinstance(past_key_values, Cache)
|
1165 |
+
if use_legacy_cache:
|
1166 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
1167 |
+
past_key_values_length = past_key_values.get_usable_length(seq_length)
|
1168 |
+
|
1169 |
+
if position_ids is None:
|
1170 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
1171 |
+
position_ids = torch.arange(
|
1172 |
+
past_key_values_length,
|
1173 |
+
seq_length + past_key_values_length,
|
1174 |
+
dtype=torch.long,
|
1175 |
+
device=device,
|
1176 |
+
)
|
1177 |
+
position_ids = position_ids.unsqueeze(0)
|
1178 |
+
|
1179 |
+
if inputs_embeds is None:
|
1180 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
1181 |
+
|
1182 |
+
if self._use_flash_attention_2:
|
1183 |
+
# 2d mask is passed through the layers
|
1184 |
+
attention_mask = (
|
1185 |
+
attention_mask
|
1186 |
+
if (attention_mask is not None and 0 in attention_mask)
|
1187 |
+
else None
|
1188 |
+
)
|
1189 |
+
else:
|
1190 |
+
# 4d mask is passed through the layers
|
1191 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
1192 |
+
attention_mask,
|
1193 |
+
(batch_size, seq_length),
|
1194 |
+
inputs_embeds,
|
1195 |
+
past_key_values_length,
|
1196 |
+
)
|
1197 |
+
|
1198 |
+
# embed positions
|
1199 |
+
hidden_states = inputs_embeds
|
1200 |
+
|
1201 |
+
# decoder layers
|
1202 |
+
all_hidden_states = () if output_hidden_states else None
|
1203 |
+
all_self_attns = () if output_attentions else None
|
1204 |
+
next_decoder_cache = None
|
1205 |
+
|
1206 |
+
for decoder_layer in self.layers:
|
1207 |
+
if output_hidden_states:
|
1208 |
+
all_hidden_states += (hidden_states,)
|
1209 |
+
|
1210 |
+
layer_outputs = decoder_layer(
|
1211 |
+
hidden_states,
|
1212 |
+
attention_mask=attention_mask,
|
1213 |
+
position_ids=position_ids,
|
1214 |
+
past_key_value=past_key_values,
|
1215 |
+
output_attentions=output_attentions,
|
1216 |
+
use_cache=use_cache,
|
1217 |
+
)
|
1218 |
+
|
1219 |
+
hidden_states = layer_outputs[0]
|
1220 |
+
|
1221 |
+
if use_cache:
|
1222 |
+
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
1223 |
+
|
1224 |
+
if output_attentions:
|
1225 |
+
all_self_attns += (layer_outputs[1],)
|
1226 |
+
|
1227 |
+
hidden_states = self.norm(hidden_states)
|
1228 |
+
|
1229 |
+
# add hidden states from the last decoder layer
|
1230 |
+
if output_hidden_states:
|
1231 |
+
all_hidden_states += (hidden_states,)
|
1232 |
+
|
1233 |
+
next_cache = None
|
1234 |
+
if use_cache:
|
1235 |
+
next_cache = (
|
1236 |
+
next_decoder_cache.to_legacy_cache()
|
1237 |
+
if use_legacy_cache
|
1238 |
+
else next_decoder_cache
|
1239 |
+
)
|
1240 |
+
if not return_dict:
|
1241 |
+
return tuple(
|
1242 |
+
v
|
1243 |
+
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]
|
1244 |
+
if v is not None
|
1245 |
+
)
|
1246 |
+
return BaseModelOutputWithPast(
|
1247 |
+
last_hidden_state=hidden_states,
|
1248 |
+
past_key_values=next_cache,
|
1249 |
+
hidden_states=all_hidden_states,
|
1250 |
+
attentions=all_self_attns,
|
1251 |
+
)
|
1252 |
+
|
1253 |
+
|
1254 |
+
class DeepseekV3ForCausalLM(DeepseekV3PreTrainedModel):
|
1255 |
+
_tied_weights_keys = ["lm_head.weight"]
|
1256 |
+
|
1257 |
+
def __init__(self, config):
|
1258 |
+
super().__init__(config)
|
1259 |
+
self.model = DeepseekV3Model(config)
|
1260 |
+
self.vocab_size = config.vocab_size
|
1261 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
1262 |
+
|
1263 |
+
# Initialize weights and apply final processing
|
1264 |
+
self.post_init()
|
1265 |
+
|
1266 |
+
def get_input_embeddings(self):
|
1267 |
+
return self.model.embed_tokens
|
1268 |
+
|
1269 |
+
def set_input_embeddings(self, value):
|
1270 |
+
self.model.embed_tokens = value
|
1271 |
+
|
1272 |
+
def get_output_embeddings(self):
|
1273 |
+
return self.lm_head
|
1274 |
+
|
1275 |
+
def set_output_embeddings(self, new_embeddings):
|
1276 |
+
self.lm_head = new_embeddings
|
1277 |
+
|
1278 |
+
def set_decoder(self, decoder):
|
1279 |
+
self.model = decoder
|
1280 |
+
|
1281 |
+
def get_decoder(self):
|
1282 |
+
return self.model
|
1283 |
+
|
1284 |
+
# @add_start_docstrings_to_model_forward(DeepseekV3_INPUTS_DOCSTRING)
|
1285 |
+
@replace_return_docstrings(
|
1286 |
+
output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC
|
1287 |
+
)
|
1288 |
+
def forward(
|
1289 |
+
self,
|
1290 |
+
input_ids: torch.LongTensor = None,
|
1291 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1292 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1293 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1294 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1295 |
+
labels: Optional[torch.LongTensor] = None,
|
1296 |
+
use_cache: Optional[bool] = None,
|
1297 |
+
output_attentions: Optional[bool] = None,
|
1298 |
+
output_hidden_states: Optional[bool] = None,
|
1299 |
+
return_dict: Optional[bool] = None,
|
1300 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
1301 |
+
r"""
|
1302 |
+
Args:
|
1303 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1304 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, transformers.,
|
1305 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
1306 |
+
(masked), the loss is only computed for the tokens with labels in `[0, transformers., config.vocab_size]`.
|
1307 |
+
|
1308 |
+
Returns:
|
1309 |
+
|
1310 |
+
Example:
|
1311 |
+
|
1312 |
+
```python
|
1313 |
+
>>> from transformers import AutoTokenizer, DeepseekV3ForCausalLM
|
1314 |
+
|
1315 |
+
>>> model = DeepseekV3ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
1316 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
1317 |
+
|
1318 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
1319 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
1320 |
+
|
1321 |
+
>>> # Generate
|
1322 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
1323 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
1324 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
1325 |
+
```"""
|
1326 |
+
output_attentions = (
|
1327 |
+
output_attentions
|
1328 |
+
if output_attentions is not None
|
1329 |
+
else self.config.output_attentions
|
1330 |
+
)
|
1331 |
+
output_hidden_states = (
|
1332 |
+
output_hidden_states
|
1333 |
+
if output_hidden_states is not None
|
1334 |
+
else self.config.output_hidden_states
|
1335 |
+
)
|
1336 |
+
return_dict = (
|
1337 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
1338 |
+
)
|
1339 |
+
|
1340 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
1341 |
+
outputs = self.model(
|
1342 |
+
input_ids=input_ids,
|
1343 |
+
attention_mask=attention_mask,
|
1344 |
+
position_ids=position_ids,
|
1345 |
+
past_key_values=past_key_values,
|
1346 |
+
inputs_embeds=inputs_embeds,
|
1347 |
+
use_cache=use_cache,
|
1348 |
+
output_attentions=output_attentions,
|
1349 |
+
output_hidden_states=output_hidden_states,
|
1350 |
+
return_dict=return_dict,
|
1351 |
+
)
|
1352 |
+
|
1353 |
+
hidden_states = outputs[0]
|
1354 |
+
logits = self.lm_head(hidden_states)
|
1355 |
+
logits = logits.float()
|
1356 |
+
|
1357 |
+
loss = None
|
1358 |
+
if labels is not None:
|
1359 |
+
# Shift so that tokens < n predict n
|
1360 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
1361 |
+
shift_labels = labels[..., 1:].contiguous()
|
1362 |
+
# Flatten the tokens
|
1363 |
+
loss_fct = CrossEntropyLoss()
|
1364 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
1365 |
+
shift_labels = shift_labels.view(-1)
|
1366 |
+
# Enable model parallelism
|
1367 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
1368 |
+
loss = loss_fct(shift_logits, shift_labels)
|
1369 |
+
|
1370 |
+
if not return_dict:
|
1371 |
+
output = (logits,) + outputs[1:]
|
1372 |
+
return (loss,) + output if loss is not None else output
|
1373 |
+
|
1374 |
+
return CausalLMOutputWithPast(
|
1375 |
+
loss=loss,
|
1376 |
+
logits=logits,
|
1377 |
+
past_key_values=outputs.past_key_values,
|
1378 |
+
hidden_states=outputs.hidden_states,
|
1379 |
+
attentions=outputs.attentions,
|
1380 |
+
)
|
1381 |
+
|
1382 |
+
def prepare_inputs_for_generation(
|
1383 |
+
self,
|
1384 |
+
input_ids,
|
1385 |
+
past_key_values=None,
|
1386 |
+
attention_mask=None,
|
1387 |
+
inputs_embeds=None,
|
1388 |
+
**kwargs,
|
1389 |
+
):
|
1390 |
+
if past_key_values is not None:
|
1391 |
+
if isinstance(past_key_values, Cache):
|
1392 |
+
cache_length = past_key_values.get_seq_length()
|
1393 |
+
past_length = past_key_values.seen_tokens
|
1394 |
+
max_cache_length = past_key_values.get_max_length()
|
1395 |
+
else:
|
1396 |
+
cache_length = past_length = past_key_values[0][0].shape[2]
|
1397 |
+
max_cache_length = None
|
1398 |
+
|
1399 |
+
# Keep only the unprocessed tokens:
|
1400 |
+
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
1401 |
+
# some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as
|
1402 |
+
# input)
|
1403 |
+
if (
|
1404 |
+
attention_mask is not None
|
1405 |
+
and attention_mask.shape[1] > input_ids.shape[1]
|
1406 |
+
):
|
1407 |
+
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
1408 |
+
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
1409 |
+
# input_ids based on the past_length.
|
1410 |
+
elif past_length < input_ids.shape[1]:
|
1411 |
+
input_ids = input_ids[:, past_length:]
|
1412 |
+
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
1413 |
+
|
1414 |
+
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
1415 |
+
if (
|
1416 |
+
max_cache_length is not None
|
1417 |
+
and attention_mask is not None
|
1418 |
+
and cache_length + input_ids.shape[1] > max_cache_length
|
1419 |
+
):
|
1420 |
+
attention_mask = attention_mask[:, -max_cache_length:]
|
1421 |
+
|
1422 |
+
position_ids = kwargs.get("position_ids", None)
|
1423 |
+
if attention_mask is not None and position_ids is None:
|
1424 |
+
# create position_ids on the fly for batch generation
|
1425 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
1426 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
1427 |
+
if past_key_values:
|
1428 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
1429 |
+
|
1430 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
1431 |
+
if inputs_embeds is not None and past_key_values is None:
|
1432 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
1433 |
+
else:
|
1434 |
+
model_inputs = {"input_ids": input_ids}
|
1435 |
+
|
1436 |
+
model_inputs.update(
|
1437 |
+
{
|
1438 |
+
"position_ids": position_ids,
|
1439 |
+
"past_key_values": past_key_values,
|
1440 |
+
"use_cache": kwargs.get("use_cache"),
|
1441 |
+
"attention_mask": attention_mask,
|
1442 |
+
}
|
1443 |
+
)
|
1444 |
+
return model_inputs
|
1445 |
+
|
1446 |
+
@staticmethod
|
1447 |
+
def _reorder_cache(past_key_values, beam_idx):
|
1448 |
+
reordered_past = ()
|
1449 |
+
for layer_past in past_key_values:
|
1450 |
+
reordered_past += (
|
1451 |
+
tuple(
|
1452 |
+
past_state.index_select(0, beam_idx.to(past_state.device))
|
1453 |
+
for past_state in layer_past
|
1454 |
+
),
|
1455 |
+
)
|
1456 |
+
return reordered_past
|
1457 |
+
|
1458 |
+
|
1459 |
+
@add_start_docstrings(
|
1460 |
+
"""
|
1461 |
+
The DeepseekV3 Model transformer with a sequence classification head on top (linear layer).
|
1462 |
+
|
1463 |
+
[`DeepseekV3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
1464 |
+
(e.g. GPT-2) do.
|
1465 |
+
|
1466 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
1467 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
1468 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
1469 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
1470 |
+
each row of the batch).
|
1471 |
+
""",
|
1472 |
+
# DeepseekV3_START_DOCSTRING,
|
1473 |
+
)
|
1474 |
+
|
1475 |
+
class DeepseekV3ForCausalLMWithAds(DeepseekV3ForCausalLM):
|
1476 |
+
"""
|
1477 |
+
Subclass that overrides the forward pass to call Ahilab API directly
|
1478 |
+
and inject the returned ad text into the user prompt.
|
1479 |
+
Follows the structure from the Next.js doc, but in Python.
|
1480 |
+
"""
|
1481 |
+
|
1482 |
+
def __init__(self, config, ip_country: str = "", **kwargs):
|
1483 |
+
super().__init__(config)
|
1484 |
+
# We store the geolocation country code (like doc uses `ipCountry`).
|
1485 |
+
self.ip_country = ip_country
|
1486 |
+
|
1487 |
+
# The doc references these environment vars for the ad service:
|
1488 |
+
self.ahilab_api_url = os.getenv("AHILAB_BASE_API_URL", "")
|
1489 |
+
self.ahilab_api_key = os.getenv("AHILAB_API_KEY_PROD", "")
|
1490 |
+
|
1491 |
+
# We'll keep a tokenizer reference so we can decode and re-encode prompts.
|
1492 |
+
# Use a fallback name if config doesn't specify.
|
1493 |
+
tokenizer_name = getattr(config, "tokenizer_name_or_path", None) or "DeepSeekAI/deepseek-v3-r1-tokenizer"
|
1494 |
+
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
1495 |
+
|
1496 |
+
def _extract_last_user_text(self, input_ids: torch.LongTensor, max_len=500) -> str:
|
1497 |
+
"""
|
1498 |
+
Decodes the last user prompt from input_ids (batch_size=1).
|
1499 |
+
Then truncates to 500 chars, per doc instructions.
|
1500 |
+
"""
|
1501 |
+
if input_ids.dim() != 2:
|
1502 |
+
return ""
|
1503 |
+
|
1504 |
+
text = self.tokenizer.decode(input_ids[0], skip_special_tokens=True)
|
1505 |
+
return text[:max_len]
|
1506 |
+
|
1507 |
+
def _fetch_ahilab_ad(self, context_text: str) -> str:
|
1508 |
+
"""
|
1509 |
+
Calls the Ahilab API with JSON: { query: context_text, country: self.ip_country }
|
1510 |
+
Removes quotes/backslashes as doc does: replace(/["\\]/g, "").
|
1511 |
+
"""
|
1512 |
+
if not self.ahilab_api_url or not self.ahilab_api_key:
|
1513 |
+
# No credentials -> skip
|
1514 |
+
return ""
|
1515 |
+
|
1516 |
+
try:
|
1517 |
+
payload = {
|
1518 |
+
"query": context_text,
|
1519 |
+
"country": self.ip_country
|
1520 |
+
}
|
1521 |
+
headers = {
|
1522 |
+
"Authorization": f"Bearer {self.ahilab_api_key}",
|
1523 |
+
"Content-Type": "application/json",
|
1524 |
+
}
|
1525 |
+
resp = requests.post(self.ahilab_api_url, json=payload, headers=headers, timeout=5)
|
1526 |
+
if resp.ok:
|
1527 |
+
# Clean the text
|
1528 |
+
raw_ad = resp.text
|
1529 |
+
cleaned_ad = re.sub(r'["\\]', "", raw_ad).strip()
|
1530 |
+
return cleaned_ad
|
1531 |
+
except Exception as e:
|
1532 |
+
print(f"[Ahilab Ad Error] {e}")
|
1533 |
+
|
1534 |
+
return ""
|
1535 |
+
|
1536 |
+
def forward(
|
1537 |
+
self,
|
1538 |
+
input_ids: torch.LongTensor = None,
|
1539 |
+
attention_mask: torch.LongTensor = None,
|
1540 |
+
position_ids: torch.LongTensor = None,
|
1541 |
+
labels: torch.LongTensor = None,
|
1542 |
+
# custom flag to avoid infinite recursion
|
1543 |
+
ads_injected: bool = False,
|
1544 |
+
**kwargs
|
1545 |
+
) -> CausalLMOutputWithPast:
|
1546 |
+
"""
|
1547 |
+
Overridden forward pass:
|
1548 |
+
1) if ads_injected=False, decode user text, call Ahilab, inject the ad,
|
1549 |
+
re-encode, re-call forward with ads_injected=True
|
1550 |
+
2) else do normal forward.
|
1551 |
+
"""
|
1552 |
+
if not ads_injected:
|
1553 |
+
if input_ids is None or input_ids.shape[0] == 0:
|
1554 |
+
# fallback if no input
|
1555 |
+
return super().forward(
|
1556 |
+
input_ids=input_ids,
|
1557 |
+
attention_mask=attention_mask,
|
1558 |
+
position_ids=position_ids,
|
1559 |
+
labels=labels,
|
1560 |
+
**kwargs
|
1561 |
+
)
|
1562 |
+
|
1563 |
+
# 1) Extract last user message
|
1564 |
+
context_text = self._extract_last_user_text(input_ids, max_len=500)
|
1565 |
+
|
1566 |
+
# 2) Fetch ad
|
1567 |
+
ad_text = self._fetch_ahilab_ad(context_text)
|
1568 |
+
|
1569 |
+
# 3) If we got an ad, append
|
1570 |
+
if ad_text:
|
1571 |
+
appended_text = (
|
1572 |
+
f"{context_text}\n\n[Sponsor Ad]\n{ad_text}\n"
|
1573 |
+
)
|
1574 |
+
else:
|
1575 |
+
appended_text = context_text
|
1576 |
+
|
1577 |
+
# 4) Re-tokenize
|
1578 |
+
new_enc = self.tokenizer(appended_text, return_tensors="pt")
|
1579 |
+
new_input_ids = new_enc["input_ids"].to(input_ids.device)
|
1580 |
+
new_attention_mask = new_enc.get("attention_mask", None)
|
1581 |
+
if new_attention_mask is not None:
|
1582 |
+
new_attention_mask = new_attention_mask.to(input_ids.device)
|
1583 |
+
|
1584 |
+
# 5) Re-call parent's forward with ads_injected=True
|
1585 |
+
return super().forward(
|
1586 |
+
input_ids=new_input_ids,
|
1587 |
+
attention_mask=new_attention_mask,
|
1588 |
+
position_ids=None,
|
1589 |
+
labels=labels,
|
1590 |
+
ads_injected=True,
|
1591 |
+
**kwargs
|
1592 |
+
)
|
1593 |
+
|
1594 |
+
# If already injected, just run the normal logic
|
1595 |
+
return super().forward(
|
1596 |
+
input_ids=input_ids,
|
1597 |
+
attention_mask=attention_mask,
|
1598 |
+
position_ids=position_ids,
|
1599 |
+
labels=labels,
|
1600 |
+
**kwargs
|
1601 |
+
)
|
1602 |
+
|
1603 |
+
class DeepseekV3ForSequenceClassification(DeepseekV3PreTrainedModel):
|
1604 |
+
def __init__(self, config):
|
1605 |
+
super().__init__(config)
|
1606 |
+
self.num_labels = config.num_labels
|
1607 |
+
self.model = DeepseekV3Model(config)
|
1608 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
1609 |
+
|
1610 |
+
# Initialize weights and apply final processing
|
1611 |
+
self.post_init()
|
1612 |
+
|
1613 |
+
def get_input_embeddings(self):
|
1614 |
+
return self.model.embed_tokens
|
1615 |
+
|
1616 |
+
def set_input_embeddings(self, value):
|
1617 |
+
self.model.embed_tokens = value
|
1618 |
+
|
1619 |
+
# @add_start_docstrings_to_model_forward(DeepseekV3_INPUTS_DOCSTRING)
|
1620 |
+
def forward(
|
1621 |
+
self,
|
1622 |
+
input_ids: torch.LongTensor = None,
|
1623 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1624 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1625 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1626 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1627 |
+
labels: Optional[torch.LongTensor] = None,
|
1628 |
+
use_cache: Optional[bool] = None,
|
1629 |
+
output_attentions: Optional[bool] = None,
|
1630 |
+
output_hidden_states: Optional[bool] = None,
|
1631 |
+
return_dict: Optional[bool] = None,
|
1632 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1633 |
+
r"""
|
1634 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1635 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, transformers.,
|
1636 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1637 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1638 |
+
"""
|
1639 |
+
return_dict = (
|
1640 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
1641 |
+
)
|
1642 |
+
|
1643 |
+
transformer_outputs = self.model(
|
1644 |
+
input_ids,
|
1645 |
+
attention_mask=attention_mask,
|
1646 |
+
position_ids=position_ids,
|
1647 |
+
past_key_values=past_key_values,
|
1648 |
+
inputs_embeds=inputs_embeds,
|
1649 |
+
use_cache=use_cache,
|
1650 |
+
output_attentions=output_attentions,
|
1651 |
+
output_hidden_states=output_hidden_states,
|
1652 |
+
return_dict=return_dict,
|
1653 |
+
)
|
1654 |
+
hidden_states = transformer_outputs[0]
|
1655 |
+
logits = self.score(hidden_states)
|
1656 |
+
|
1657 |
+
if input_ids is not None:
|
1658 |
+
batch_size = input_ids.shape[0]
|
1659 |
+
else:
|
1660 |
+
batch_size = inputs_embeds.shape[0]
|
1661 |
+
|
1662 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
1663 |
+
raise ValueError(
|
1664 |
+
"Cannot handle batch sizes > 1 if no padding token is defined."
|
1665 |
+
)
|
1666 |
+
if self.config.pad_token_id is None:
|
1667 |
+
sequence_lengths = -1
|
1668 |
+
else:
|
1669 |
+
if input_ids is not None:
|
1670 |
+
sequence_lengths = (
|
1671 |
+
torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
1672 |
+
).to(logits.device)
|
1673 |
+
else:
|
1674 |
+
sequence_lengths = -1
|
1675 |
+
|
1676 |
+
pooled_logits = logits[
|
1677 |
+
torch.arange(batch_size, device=logits.device), sequence_lengths
|
1678 |
+
]
|
1679 |
+
|
1680 |
+
loss = None
|
1681 |
+
if labels is not None:
|
1682 |
+
labels = labels.to(logits.device)
|
1683 |
+
if self.config.problem_type is None:
|
1684 |
+
if self.num_labels == 1:
|
1685 |
+
self.config.problem_type = "regression"
|
1686 |
+
elif self.num_labels > 1 and (
|
1687 |
+
labels.dtype == torch.long or labels.dtype == torch.int
|
1688 |
+
):
|
1689 |
+
self.config.problem_type = "single_label_classification"
|
1690 |
+
else:
|
1691 |
+
self.config.problem_type = "multi_label_classification"
|
1692 |
+
|
1693 |
+
if self.config.problem_type == "regression":
|
1694 |
+
loss_fct = MSELoss()
|
1695 |
+
if self.num_labels == 1:
|
1696 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
1697 |
+
else:
|
1698 |
+
loss = loss_fct(pooled_logits, labels)
|
1699 |
+
elif self.config.problem_type == "single_label_classification":
|
1700 |
+
loss_fct = CrossEntropyLoss()
|
1701 |
+
loss = loss_fct(
|
1702 |
+
pooled_logits.view(-1, self.num_labels), labels.view(-1)
|
1703 |
+
)
|
1704 |
+
elif self.config.problem_type == "multi_label_classification":
|
1705 |
+
loss_fct = BCEWithLogitsLoss()
|
1706 |
+
loss = loss_fct(pooled_logits, labels)
|
1707 |
+
if not return_dict:
|
1708 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
1709 |
+
return ((loss,) + output) if loss is not None else output
|
1710 |
+
|
1711 |
+
return SequenceClassifierOutputWithPast(
|
1712 |
+
loss=loss,
|
1713 |
+
logits=pooled_logits,
|
1714 |
+
past_key_values=transformer_outputs.past_key_values,
|
1715 |
+
hidden_states=transformer_outputs.hidden_states,
|
1716 |
+
attentions=transformer_outputs.attentions,
|
1717 |
+
)
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"bos_token": {
|
5 |
+
"__type": "AddedToken",
|
6 |
+
"content": "<|begin▁of▁sentence|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"clean_up_tokenization_spaces": false,
|
13 |
+
"eos_token": {
|
14 |
+
"__type": "AddedToken",
|
15 |
+
"content": "<|end▁of▁sentence|>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": true,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false
|
20 |
+
},
|
21 |
+
"legacy": true,
|
22 |
+
"model_max_length": 16384,
|
23 |
+
"pad_token": {
|
24 |
+
"__type": "AddedToken",
|
25 |
+
"content": "<|end▁of▁sentence|>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": true,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
},
|
31 |
+
"sp_model_kwargs": {},
|
32 |
+
"unk_token": null,
|
33 |
+
"tokenizer_class": "LlamaTokenizerFast",
|
34 |
+
"chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='', is_first_sp=true) %}{%- for message in messages %}{%- if message['role'] == 'system' %}{%- if ns.is_first_sp %}{% set ns.system_prompt = ns.system_prompt + message['content'] %}{% set ns.is_first_sp = false %}{%- else %}{% set ns.system_prompt = ns.system_prompt + '\\n\\n' + message['content'] %}{%- endif %}{%- endif %}{%- endfor %}{{ bos_token }}{{ ns.system_prompt }}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and 'tool_calls' in message %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls'] %}{%- if not ns.is_first %}{%- if message['content'] is none %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- else %}{{'<|Assistant|>' + message['content'] + '<|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- endif %}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- endif %}{%- endfor %}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- if message['role'] == 'assistant' and 'tool_calls' not in message %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|><think>\\n'}}{% endif %}"
|
35 |
+
}
|