Upload DogeForCausalLM
Browse files- config.json +43 -43
- modeling_doge.py +351 -321
config.json
CHANGED
@@ -1,43 +1,43 @@
|
|
1 |
-
{
|
2 |
-
"_name_or_path": "SmallDoge/Doge-160M",
|
3 |
-
"architectures": [
|
4 |
-
"DogeForCausalLM"
|
5 |
-
],
|
6 |
-
"attention_dropout": 0.0,
|
7 |
-
"auto_map": {
|
8 |
-
"AutoConfig": "configuration_doge.DogeConfig",
|
9 |
-
"AutoModelForCausalLM": "modeling_doge.DogeForCausalLM"
|
10 |
-
},
|
11 |
-
"bos_token_id": 0,
|
12 |
-
"dynamic_mask_ratio": 0.0,
|
13 |
-
"eos_token_id": 1,
|
14 |
-
"expert_retrieval_size": 64,
|
15 |
-
"hidden_act": "silu",
|
16 |
-
"hidden_bias": false,
|
17 |
-
"hidden_dropout": 0.0,
|
18 |
-
"hidden_size": 768,
|
19 |
-
"initializer_range": 0.02,
|
20 |
-
"intermediate_size": 1536,
|
21 |
-
"is_moe": false,
|
22 |
-
"max_position_embeddings": 2048,
|
23 |
-
"model_type": "doge",
|
24 |
-
"num_attention_heads": 6,
|
25 |
-
"num_cdmoe_experts": 16348,
|
26 |
-
"num_cdmoe_experts_per_head": 8,
|
27 |
-
"num_cdmoe_heads": 4,
|
28 |
-
"num_hidden_layers": 24,
|
29 |
-
"num_key_value_heads": 3,
|
30 |
-
"pad_token_id": 2,
|
31 |
-
"rms_norm_eps": 1e-06,
|
32 |
-
"rope_scaling": {
|
33 |
-
"factor": 4.0,
|
34 |
-
"original_max_position_embeddings": 2048,
|
35 |
-
"rope_type": "dynamic"
|
36 |
-
},
|
37 |
-
"rope_theta": 10000.0,
|
38 |
-
"tie_word_embeddings": true,
|
39 |
-
"torch_dtype": "float32",
|
40 |
-
"transformers_version": "4.48.3",
|
41 |
-
"use_cache": true,
|
42 |
-
"vocab_size": 32768
|
43 |
-
}
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "SmallDoge/Doge-160M",
|
3 |
+
"architectures": [
|
4 |
+
"DogeForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"auto_map": {
|
8 |
+
"AutoConfig": "configuration_doge.DogeConfig",
|
9 |
+
"AutoModelForCausalLM": "modeling_doge.DogeForCausalLM"
|
10 |
+
},
|
11 |
+
"bos_token_id": 0,
|
12 |
+
"dynamic_mask_ratio": 0.0,
|
13 |
+
"eos_token_id": 1,
|
14 |
+
"expert_retrieval_size": 64,
|
15 |
+
"hidden_act": "silu",
|
16 |
+
"hidden_bias": false,
|
17 |
+
"hidden_dropout": 0.0,
|
18 |
+
"hidden_size": 768,
|
19 |
+
"initializer_range": 0.02,
|
20 |
+
"intermediate_size": 1536,
|
21 |
+
"is_moe": false,
|
22 |
+
"max_position_embeddings": 2048,
|
23 |
+
"model_type": "doge",
|
24 |
+
"num_attention_heads": 6,
|
25 |
+
"num_cdmoe_experts": 16348,
|
26 |
+
"num_cdmoe_experts_per_head": 8,
|
27 |
+
"num_cdmoe_heads": 4,
|
28 |
+
"num_hidden_layers": 24,
|
29 |
+
"num_key_value_heads": 3,
|
30 |
+
"pad_token_id": 2,
|
31 |
+
"rms_norm_eps": 1e-06,
|
32 |
+
"rope_scaling": {
|
33 |
+
"factor": 4.0,
|
34 |
+
"original_max_position_embeddings": 2048,
|
35 |
+
"rope_type": "dynamic"
|
36 |
+
},
|
37 |
+
"rope_theta": 10000.0,
|
38 |
+
"tie_word_embeddings": true,
|
39 |
+
"torch_dtype": "float32",
|
40 |
+
"transformers_version": "4.48.3",
|
41 |
+
"use_cache": true,
|
42 |
+
"vocab_size": 32768
|
43 |
+
}
|
modeling_doge.py
CHANGED
@@ -1,14 +1,9 @@
|
|
1 |
-
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
2 |
-
# This file was automatically generated from src/transformers/models/doge/modular_doge.py.
|
3 |
-
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
4 |
-
# the file from the modular. If any change should be done, please apply the change to the
|
5 |
-
# modular_doge.py file directly. One of our CI enforces this.
|
6 |
-
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
7 |
# coding=utf-8
|
8 |
# Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
|
9 |
#
|
10 |
# This code is based on the Wonderful Matrices paper implementation.
|
11 |
-
#
|
|
|
12 |
#
|
13 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
14 |
# you may not use this file except in compliance with the License.
|
@@ -21,19 +16,24 @@
|
|
21 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
22 |
# See the License for the specific language governing permissions and
|
23 |
# limitations under the License.
|
|
|
24 |
|
25 |
import math
|
26 |
from typing import Callable, List, Optional, Tuple, Union
|
27 |
|
28 |
import torch
|
29 |
import torch.nn.functional as F
|
|
|
30 |
from torch import nn
|
31 |
|
32 |
from transformers.activations import ACT2FN
|
33 |
from transformers.cache_utils import Cache, DynamicCache, StaticCache
|
34 |
from transformers.generation import GenerationMixin
|
35 |
-
from transformers.
|
36 |
-
|
|
|
|
|
|
|
37 |
from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
|
38 |
from transformers.modeling_utils import PreTrainedModel
|
39 |
from transformers.processing_utils import Unpack
|
@@ -41,24 +41,30 @@ from transformers.utils import (
|
|
41 |
LossKwargs,
|
42 |
add_start_docstrings,
|
43 |
add_start_docstrings_to_model_forward,
|
44 |
-
|
45 |
logging,
|
46 |
replace_return_docstrings,
|
47 |
)
|
48 |
from .configuration_doge import DogeConfig
|
49 |
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
51 |
from torch.nn.attention.flex_attention import flex_attention
|
52 |
|
|
|
53 |
logger = logging.get_logger(__name__)
|
54 |
|
55 |
_CONFIG_FOR_DOC = "DogeConfig"
|
56 |
|
57 |
|
58 |
-
class
|
59 |
def __init__(self, hidden_size, eps=1e-6):
|
60 |
"""
|
61 |
-
|
62 |
"""
|
63 |
super().__init__()
|
64 |
self.weight = nn.Parameter(torch.ones(hidden_size))
|
@@ -75,7 +81,7 @@ class DogeRMSNorm(nn.Module):
|
|
75 |
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
76 |
|
77 |
|
78 |
-
class
|
79 |
def __init__(self, hidden_size):
|
80 |
super().__init__()
|
81 |
self.weight = nn.Parameter(torch.ones(hidden_size))
|
@@ -87,21 +93,23 @@ class DogeResidual(nn.Module):
|
|
87 |
return f"{tuple(self.weight.shape)}"
|
88 |
|
89 |
|
90 |
-
class
|
91 |
-
def __init__(self, config: DogeConfig
|
92 |
super().__init__()
|
93 |
-
|
94 |
-
|
|
|
95 |
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
|
96 |
else:
|
97 |
self.rope_type = "default"
|
98 |
self.max_seq_len_cached = config.max_position_embeddings
|
99 |
self.original_max_seq_len = config.max_position_embeddings
|
|
|
100 |
|
101 |
self.config = config
|
102 |
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
103 |
|
104 |
-
inv_freq, self.attention_scaling = self.rope_init_fn(self.config,
|
105 |
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
106 |
self.original_inv_freq = self.inv_freq
|
107 |
|
@@ -113,14 +121,13 @@ class DogeRotaryEmbedding(nn.Module):
|
|
113 |
"""
|
114 |
seq_len = torch.max(position_ids) + 1
|
115 |
if seq_len > self.max_seq_len_cached: # growth
|
116 |
-
inv_freq, self.attention_scaling = self.rope_init_fn(
|
|
|
|
|
117 |
self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
|
118 |
self.max_seq_len_cached = seq_len
|
119 |
|
120 |
if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
|
121 |
-
# This .to() is needed if the model has been moved to a device after being initialized (because
|
122 |
-
# the buffer is automatically moved, but not the original copy)
|
123 |
-
self.original_inv_freq = self.original_inv_freq.to(device)
|
124 |
self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
|
125 |
self.max_seq_len_cached = self.original_max_seq_len
|
126 |
|
@@ -129,7 +136,7 @@ class DogeRotaryEmbedding(nn.Module):
|
|
129 |
if "dynamic" in self.rope_type:
|
130 |
self._dynamic_frequency_update(position_ids, device=x.device)
|
131 |
|
132 |
-
#
|
133 |
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
134 |
position_ids_expanded = position_ids[:, None, :].float()
|
135 |
# Force float32 (see https://github.com/huggingface/transformers/pull/29285)
|
@@ -149,13 +156,15 @@ class DogeRotaryEmbedding(nn.Module):
|
|
149 |
|
150 |
|
151 |
def rotate_half(x):
|
152 |
-
"""
|
|
|
|
|
153 |
x1 = x[..., : x.shape[-1] // 2]
|
154 |
x2 = x[..., x.shape[-1] // 2 :]
|
155 |
return torch.cat((-x2, x1), dim=-1)
|
156 |
|
157 |
|
158 |
-
def
|
159 |
"""Applies Rotary Position Embedding to the query and key tensors.
|
160 |
|
161 |
Args:
|
@@ -167,11 +176,10 @@ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
|
167 |
Deprecated and unused.
|
168 |
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
169 |
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
170 |
-
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k.
|
171 |
-
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim].
|
172 |
-
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
173 |
-
|
174 |
-
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
175 |
Returns:
|
176 |
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
177 |
"""
|
@@ -184,8 +192,8 @@ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
|
184 |
|
185 |
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
186 |
"""
|
187 |
-
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep).
|
188 |
-
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
189 |
"""
|
190 |
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
191 |
if n_rep == 1:
|
@@ -194,148 +202,6 @@ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
|
194 |
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
195 |
|
196 |
|
197 |
-
def eager_attention_forward(
|
198 |
-
module: nn.Module,
|
199 |
-
query: torch.Tensor,
|
200 |
-
key: torch.Tensor,
|
201 |
-
value: torch.Tensor,
|
202 |
-
attention_mask: Optional[torch.Tensor],
|
203 |
-
scaling: float,
|
204 |
-
dropout: float = 0.0,
|
205 |
-
**kwargs,
|
206 |
-
) -> Tuple[torch.Tensor, torch.Tensor]:
|
207 |
-
key_states = repeat_kv(key, module.num_key_value_groups)
|
208 |
-
value_states = repeat_kv(value, module.num_key_value_groups)
|
209 |
-
|
210 |
-
attn_weights = torch.matmul(query, key_states.transpose(-1, -2)) * scaling
|
211 |
-
if attention_mask is not None:
|
212 |
-
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
213 |
-
attn_weights = attn_weights + causal_mask
|
214 |
-
|
215 |
-
attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
|
216 |
-
attn_weights = F.dropout(attn_weights, p=dropout, training=module.training)
|
217 |
-
attn_output = torch.matmul(attn_weights, value_states)
|
218 |
-
attn_output = attn_output.transpose(1, 2).contiguous()
|
219 |
-
|
220 |
-
return attn_output, attn_weights
|
221 |
-
|
222 |
-
|
223 |
-
def sdpa_attention_forward(
|
224 |
-
module: nn.Module,
|
225 |
-
query: torch.Tensor,
|
226 |
-
key: torch.Tensor,
|
227 |
-
value: torch.Tensor,
|
228 |
-
attention_mask: Optional[torch.Tensor],
|
229 |
-
dropout: float = 0.0,
|
230 |
-
scaling: Optional[float] = None,
|
231 |
-
is_causal: Optional[bool] = None,
|
232 |
-
**kwargs,
|
233 |
-
) -> Tuple[torch.Tensor, None]:
|
234 |
-
key = repeat_kv(key, module.num_key_value_groups)
|
235 |
-
value = repeat_kv(value, module.num_key_value_groups)
|
236 |
-
|
237 |
-
causal_mask = attention_mask
|
238 |
-
if attention_mask is not None:
|
239 |
-
causal_mask = causal_mask[:, :, :, : key.shape[-2]]
|
240 |
-
|
241 |
-
# SDPA with memory-efficient backend is bugged with non-contiguous inputs and custom attn_mask for some torch versions
|
242 |
-
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
243 |
-
query = query.contiguous()
|
244 |
-
key = key.contiguous()
|
245 |
-
value = value.contiguous()
|
246 |
-
|
247 |
-
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
|
248 |
-
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
|
249 |
-
if is_causal is None:
|
250 |
-
is_causal = causal_mask is None and query.shape[2] > 1
|
251 |
-
|
252 |
-
# Shapes (e.g. query.shape[2]) are tensors during jit tracing, resulting in `is_causal` being a tensor.
|
253 |
-
# We convert it to a bool for the SDPA kernel that only accepts bools.
|
254 |
-
if torch.jit.is_tracing() and isinstance(is_causal, torch.Tensor):
|
255 |
-
is_causal = is_causal.item()
|
256 |
-
|
257 |
-
# NOTE: As of pytorch 2.5.1, SDPA backward pass of cuDNN is still incorrect, so we disable cuDNN SDPA (see https://github.com/pytorch/pytorch/issues/138581)
|
258 |
-
torch.backends.cuda.enable_cudnn_sdp(False)
|
259 |
-
attn_output = F.scaled_dot_product_attention(
|
260 |
-
query=query,
|
261 |
-
key=key,
|
262 |
-
value=value,
|
263 |
-
attn_mask=causal_mask,
|
264 |
-
dropout_p=dropout,
|
265 |
-
scale=scaling,
|
266 |
-
is_causal=is_causal,
|
267 |
-
)
|
268 |
-
attn_output = attn_output.transpose(1, 2).contiguous()
|
269 |
-
|
270 |
-
return attn_output, None
|
271 |
-
|
272 |
-
|
273 |
-
def flex_attention_forward(
|
274 |
-
module: nn.Module,
|
275 |
-
query: torch.Tensor,
|
276 |
-
key: torch.Tensor,
|
277 |
-
value: torch.Tensor,
|
278 |
-
attention_mask: Optional[torch.Tensor],
|
279 |
-
scaling: Optional[float] = None,
|
280 |
-
is_causal: Optional[bool] = None,
|
281 |
-
softcap: Optional[float] = None,
|
282 |
-
head_mask: Optional[torch.Tensor] = None,
|
283 |
-
**kwargs,
|
284 |
-
) -> Tuple[torch.Tensor, torch.Tensor]:
|
285 |
-
causal_mask = attention_mask
|
286 |
-
if attention_mask is not None:
|
287 |
-
causal_mask = causal_mask[:, :, :, : key.shape[-2]]
|
288 |
-
|
289 |
-
if is_causal is None:
|
290 |
-
is_causal = causal_mask is None and query.shape[2] > 1
|
291 |
-
|
292 |
-
def causal_mod(score, batch, head, q_idx, kv_idx):
|
293 |
-
if softcap is not None:
|
294 |
-
score = softcap * torch.tanh(score / softcap)
|
295 |
-
if causal_mask is not None:
|
296 |
-
score = score + causal_mask[batch][0][q_idx][kv_idx]
|
297 |
-
if head_mask is not None:
|
298 |
-
score = score + head_mask[batch][head][0][0]
|
299 |
-
return score
|
300 |
-
|
301 |
-
def dynamic_mod(score, batch, head, q_idx, kv_idx):
|
302 |
-
if softcap is not None:
|
303 |
-
score = softcap * torch.tanh(score / softcap)
|
304 |
-
if causal_mask is not None:
|
305 |
-
score = score + causal_mask[batch][head][q_idx][kv_idx]
|
306 |
-
if head_mask is not None:
|
307 |
-
score = score + head_mask[batch][head][0][0]
|
308 |
-
return score
|
309 |
-
|
310 |
-
# TODO: flex_attention: As of pytorch 2.5.1, captured buffers that require grad are not yet supported.
|
311 |
-
# NOTE: So we only use flex_attention in inference mode.
|
312 |
-
mask_mod = causal_mod if is_causal or module.training else dynamic_mod
|
313 |
-
|
314 |
-
attn_output, attention_weights = flex_attention(
|
315 |
-
query=query,
|
316 |
-
key=key,
|
317 |
-
value=value,
|
318 |
-
score_mod=mask_mod,
|
319 |
-
enable_gqa=True,
|
320 |
-
scale=scaling,
|
321 |
-
# Last time checked on PyTorch == 2.5.1: Flex Attention always computes the lse regardless.
|
322 |
-
# For simplification, we thus always return it as no additional computations are introduced.
|
323 |
-
return_lse=True,
|
324 |
-
)
|
325 |
-
# lse is returned in float32
|
326 |
-
attention_weights = attention_weights.to(value.dtype)
|
327 |
-
attn_output = attn_output.transpose(1, 2).contiguous()
|
328 |
-
|
329 |
-
return attn_output, attention_weights
|
330 |
-
|
331 |
-
|
332 |
-
ALL_ATTENTION_FUNCTIONS = {
|
333 |
-
"eager": eager_attention_forward,
|
334 |
-
"sdpa": sdpa_attention_forward,
|
335 |
-
"flex_attention": flex_attention_forward,
|
336 |
-
}
|
337 |
-
|
338 |
-
|
339 |
class DogeDynamicMaskAttention(nn.Module):
|
340 |
"""Dynamic Mask Attention from 'Wonderful Matrices' paper."""
|
341 |
|
@@ -343,28 +209,47 @@ class DogeDynamicMaskAttention(nn.Module):
|
|
343 |
super().__init__()
|
344 |
self.config = config
|
345 |
self.layer_idx = layer_idx
|
346 |
-
self.head_dim =
|
347 |
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
|
348 |
-
self.scaling = self.head_dim
|
349 |
self.attention_dropout = config.attention_dropout
|
350 |
self.dynamic_mask_ratio = config.dynamic_mask_ratio
|
351 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
352 |
self.q_proj = nn.Linear(
|
353 |
-
config.hidden_size,
|
|
|
|
|
354 |
)
|
355 |
self.k_proj = nn.Linear(
|
356 |
-
config.hidden_size,
|
|
|
|
|
357 |
)
|
358 |
self.v_proj = nn.Linear(
|
359 |
-
config.hidden_size,
|
|
|
|
|
|
|
|
|
|
|
|
|
360 |
)
|
361 |
-
# dynamic mask for the QK^T attention weights matrix
|
362 |
-
self.A = nn.Parameter(torch.zeros(config.num_attention_heads))
|
363 |
self.dt_proj = nn.Linear(
|
364 |
-
config.num_key_value_heads * self.head_dim,
|
|
|
|
|
365 |
)
|
366 |
self.o_proj = nn.Linear(
|
367 |
-
config.num_attention_heads * self.head_dim,
|
|
|
|
|
368 |
)
|
369 |
|
370 |
def forward(
|
@@ -375,7 +260,7 @@ class DogeDynamicMaskAttention(nn.Module):
|
|
375 |
past_key_value: Optional[Cache] = None,
|
376 |
cache_position: Optional[torch.LongTensor] = None,
|
377 |
**kwargs,
|
378 |
-
) -> Tuple[torch.Tensor, Optional[
|
379 |
input_shape = hidden_states.shape[:-1]
|
380 |
hidden_shape = (*input_shape, -1, self.head_dim)
|
381 |
|
@@ -384,7 +269,7 @@ class DogeDynamicMaskAttention(nn.Module):
|
|
384 |
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
385 |
|
386 |
cos, sin = position_embeddings
|
387 |
-
query_states, key_states =
|
388 |
|
389 |
if past_key_value is not None:
|
390 |
# sin and cos are specific to RoPE models; cache_position needed for the static cache
|
@@ -392,9 +277,9 @@ class DogeDynamicMaskAttention(nn.Module):
|
|
392 |
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
393 |
|
394 |
# calculate dynamic mask from value_states
|
395 |
-
|
396 |
-
|
397 |
-
)
|
398 |
dynamic_mask = torch.exp(self.A * F.softplus(dt_states)).transpose(-1, -2)
|
399 |
attn_mask = self.prepare_dynamic_mask(
|
400 |
hidden_states=hidden_states,
|
@@ -403,18 +288,11 @@ class DogeDynamicMaskAttention(nn.Module):
|
|
403 |
attention_mask=attention_mask,
|
404 |
)
|
405 |
|
406 |
-
attention_interface: Callable = eager_attention_forward
|
407 |
if self.config._attn_implementation != "eager":
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
412 |
-
)
|
413 |
-
else:
|
414 |
-
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
|
415 |
-
|
416 |
-
attn_output, attn_weights = attention_interface(
|
417 |
-
self,
|
418 |
query_states,
|
419 |
key_states,
|
420 |
value_states,
|
@@ -426,7 +304,7 @@ class DogeDynamicMaskAttention(nn.Module):
|
|
426 |
|
427 |
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
428 |
attn_output = self.o_proj(attn_output)
|
429 |
-
return attn_output
|
430 |
|
431 |
def prepare_dynamic_mask(
|
432 |
self,
|
@@ -459,9 +337,110 @@ class DogeDynamicMaskAttention(nn.Module):
|
|
459 |
attn_mask = attention_mask
|
460 |
|
461 |
return attn_mask
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
462 |
|
463 |
|
464 |
class DogeMLP(nn.Module):
|
|
|
465 |
def __init__(self, config: DogeConfig):
|
466 |
super().__init__()
|
467 |
self.hidden_dim = config.hidden_size
|
@@ -496,11 +475,11 @@ class DogeCDMoE(DogeMLP):
|
|
496 |
self.num_keys = int(math.sqrt(self.num_cdmoe_experts))
|
497 |
|
498 |
# queries and keys for retrieval experts
|
499 |
-
self.
|
500 |
-
self.keys = nn.Parameter(torch.zeros(self.num_cdmoe_heads, self.
|
501 |
|
502 |
# experts
|
503 |
-
self.down_embed
|
504 |
self.up_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
|
505 |
|
506 |
def forward(
|
@@ -510,28 +489,30 @@ class DogeCDMoE(DogeMLP):
|
|
510 |
) -> torch.Tensor:
|
511 |
bsz, seq_len, _ = hidden_states.shape
|
512 |
|
513 |
-
# get
|
514 |
-
queries = self.
|
515 |
-
queries = queries.view(2, self.num_cdmoe_heads,
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
|
525 |
-
|
|
|
|
|
526 |
scores, pk_indices = all_scores.topk(self.num_cdmoe_experts_per_head, dim=-1)
|
527 |
indices = all_indices.gather(-1, pk_indices)
|
528 |
down_embed = self.down_embed(indices)
|
529 |
up_embed = self.up_embed(indices)
|
530 |
|
531 |
# mix experts states with cross domain states
|
532 |
-
experts_weights = torch.
|
533 |
experts_weights = self.act_fn(experts_weights) * scores.softmax(dim=-1)
|
534 |
-
experts_states = torch.
|
535 |
hidden_states = self.down_proj(self.act_fn(self.gate_proj(hidden_states)) * self.up_proj(hidden_states))
|
536 |
hidden_states = hidden_states + experts_states
|
537 |
return hidden_states
|
@@ -542,13 +523,13 @@ class DogeDecoderLayer(nn.Module):
|
|
542 |
super().__init__()
|
543 |
self.hidden_dropout = config.hidden_dropout
|
544 |
|
545 |
-
self.pre_layernorm =
|
546 |
self.self_attn = DogeDynamicMaskAttention(config=config, layer_idx=layer_idx)
|
547 |
-
self.pre_residual =
|
548 |
|
549 |
-
self.post_layernorm =
|
550 |
-
self.feed_forward = DogeMLP(config) if
|
551 |
-
self.post_residual =
|
552 |
|
553 |
def forward(
|
554 |
self,
|
@@ -562,16 +543,15 @@ class DogeDecoderLayer(nn.Module):
|
|
562 |
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
|
563 |
**kwargs,
|
564 |
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
|
|
565 |
# sequence transformation
|
566 |
residual = hidden_states
|
567 |
hidden_states = self.pre_layernorm(hidden_states)
|
568 |
-
hidden_states
|
569 |
hidden_states=hidden_states,
|
570 |
attention_mask=attention_mask,
|
571 |
position_ids=position_ids,
|
572 |
past_key_value=past_key_value,
|
573 |
-
output_attentions=output_attentions,
|
574 |
-
use_cache=use_cache,
|
575 |
cache_position=cache_position,
|
576 |
position_embeddings=position_embeddings,
|
577 |
**kwargs,
|
@@ -609,8 +589,6 @@ DOGE_START_DOCSTRING = r"""
|
|
609 |
load the weights associated with the model, only the configuration. Check out the
|
610 |
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
611 |
"""
|
612 |
-
|
613 |
-
|
614 |
@add_start_docstrings(
|
615 |
"The bare Doge Model outputting raw hidden-states without any specific head on top.",
|
616 |
DOGE_START_DOCSTRING,
|
@@ -622,7 +600,7 @@ class DogePreTrainedModel(PreTrainedModel):
|
|
622 |
_no_split_modules = ["DogeDecoderLayer"]
|
623 |
_skip_keys_device_placement = ["past_key_values"]
|
624 |
_supports_sdpa = True
|
625 |
-
# _supports_flex_attn = True
|
626 |
_supports_cache_class = True
|
627 |
_supports_quantized_cache = True
|
628 |
_supports_static_cache = True
|
@@ -733,11 +711,11 @@ class DogeModel(DogePreTrainedModel):
|
|
733 |
self.vocab_size = config.vocab_size
|
734 |
|
735 |
self.word_embed = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
736 |
-
self.rotary_emb =
|
737 |
self.layers = nn.ModuleList(
|
738 |
[DogeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
739 |
)
|
740 |
-
self.final_layernorm =
|
741 |
self.gradient_checkpointing = False
|
742 |
|
743 |
# Initialize weights and apply final processing
|
@@ -864,27 +842,9 @@ class DogeModel(DogePreTrainedModel):
|
|
864 |
past_key_values: Cache,
|
865 |
output_attentions: bool,
|
866 |
):
|
867 |
-
if self.config._attn_implementation == "flash_attention_2":
|
868 |
-
if attention_mask is not None and (attention_mask == 0.0).any():
|
869 |
-
return attention_mask
|
870 |
-
return None
|
871 |
-
|
872 |
-
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
|
873 |
-
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
|
874 |
-
# to infer the attention mask.
|
875 |
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
876 |
using_static_cache = isinstance(past_key_values, StaticCache)
|
877 |
|
878 |
-
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
|
879 |
-
if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
|
880 |
-
if AttentionMaskConverter._ignore_causal_mask_sdpa(
|
881 |
-
attention_mask,
|
882 |
-
inputs_embeds=input_tensor,
|
883 |
-
past_key_values_length=past_seen_tokens,
|
884 |
-
is_training=self.training,
|
885 |
-
):
|
886 |
-
return None
|
887 |
-
|
888 |
dtype, device = input_tensor.dtype, input_tensor.device
|
889 |
sequence_length = input_tensor.shape[1]
|
890 |
if using_static_cache:
|
@@ -896,9 +856,9 @@ class DogeModel(DogePreTrainedModel):
|
|
896 |
else past_seen_tokens + sequence_length + 1
|
897 |
)
|
898 |
|
899 |
-
#
|
900 |
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
|
901 |
-
attention_mask,
|
902 |
sequence_length=sequence_length,
|
903 |
target_length=target_length,
|
904 |
dtype=dtype,
|
@@ -907,29 +867,17 @@ class DogeModel(DogePreTrainedModel):
|
|
907 |
batch_size=input_tensor.shape[0],
|
908 |
)
|
909 |
|
910 |
-
if (
|
911 |
-
self.config._attn_implementation == "sdpa"
|
912 |
-
and attention_mask is not None
|
913 |
-
and attention_mask.device.type in ["cuda", "xpu"]
|
914 |
-
and not output_attentions
|
915 |
-
):
|
916 |
-
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
|
917 |
-
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
|
918 |
-
# Details: https://github.com/pytorch/pytorch/issues/110213
|
919 |
-
min_dtype = torch.finfo(dtype).min
|
920 |
-
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
|
921 |
-
|
922 |
return causal_mask
|
923 |
-
|
924 |
@staticmethod
|
925 |
def _prepare_4d_causal_attention_mask_with_cache_position(
|
926 |
-
attention_mask: torch.Tensor,
|
927 |
-
sequence_length: int,
|
928 |
-
target_length: int,
|
929 |
-
dtype: torch.dtype,
|
930 |
-
device: torch.device,
|
931 |
-
cache_position: torch.Tensor,
|
932 |
-
batch_size: int,
|
933 |
**kwargs,
|
934 |
):
|
935 |
"""
|
@@ -960,7 +908,8 @@ class DogeModel(DogePreTrainedModel):
|
|
960 |
else:
|
961 |
min_dtype = torch.finfo(dtype).min
|
962 |
causal_mask = torch.full(
|
963 |
-
(sequence_length, target_length),
|
|
|
964 |
)
|
965 |
if sequence_length != 1:
|
966 |
causal_mask = torch.triu(causal_mask, diagonal=1)
|
@@ -978,6 +927,9 @@ class DogeModel(DogePreTrainedModel):
|
|
978 |
return causal_mask
|
979 |
|
980 |
|
|
|
|
|
|
|
981 |
class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
|
982 |
_tied_weights_keys = ["lm_head.weight"]
|
983 |
_tp_plan = {"lm_head": "colwise_rep"}
|
@@ -1003,7 +955,7 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
|
|
1003 |
|
1004 |
def set_output_embeddings(self, new_embeddings):
|
1005 |
self.lm_head = new_embeddings
|
1006 |
-
|
1007 |
def get_decoder(self):
|
1008 |
return self.model
|
1009 |
|
@@ -1025,8 +977,8 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
|
|
1025 |
output_hidden_states: Optional[bool] = None,
|
1026 |
return_dict: Optional[bool] = None,
|
1027 |
cache_position: Optional[torch.LongTensor] = None,
|
1028 |
-
|
1029 |
-
**kwargs: Unpack[
|
1030 |
) -> Union[Tuple, CausalLMOutputWithPast]:
|
1031 |
r"""
|
1032 |
Args:
|
@@ -1035,12 +987,10 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
|
|
1035 |
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
1036 |
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
1037 |
|
1038 |
-
|
1039 |
-
|
1040 |
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
1041 |
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
1042 |
-
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
1043 |
-
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
1044 |
|
1045 |
Returns:
|
1046 |
|
@@ -1049,8 +999,8 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
|
|
1049 |
```python
|
1050 |
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
1051 |
|
1052 |
-
>>> model = AutoModelForCausalLM.from_pretrained("
|
1053 |
-
>>> tokenizer = AutoTokenizer.from_pretrained("
|
1054 |
|
1055 |
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
1056 |
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
@@ -1082,9 +1032,9 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
|
|
1082 |
)
|
1083 |
|
1084 |
hidden_states = outputs[0]
|
|
|
1085 |
# only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
1086 |
-
|
1087 |
-
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
1088 |
|
1089 |
loss = None
|
1090 |
if labels is not None:
|
@@ -1103,32 +1053,111 @@ class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
|
|
1103 |
)
|
1104 |
|
1105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1106 |
@add_start_docstrings(
|
1107 |
"""
|
1108 |
The Doge Model transformer with a sequence classification head on top (linear layer).
|
1109 |
|
1110 |
-
[`DogeForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
1111 |
-
(e.g. GPT-2) do.
|
1112 |
|
1113 |
-
Since it does classification on the last token, it requires to know the position of the last token.
|
1114 |
-
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row.
|
1115 |
-
no `pad_token_id` is defined, it simply takes the last value in each row of the batch.
|
1116 |
-
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
1117 |
-
|
1118 |
-
""",
|
1119 |
-
DOGE_START_DOCSTRING,
|
1120 |
)
|
1121 |
class DogeForSequenceClassification(DogePreTrainedModel):
|
1122 |
def __init__(self, config: DogeConfig):
|
1123 |
super().__init__(config)
|
|
|
1124 |
self.num_labels = config.num_labels
|
1125 |
|
1126 |
self.model = DogeModel(config)
|
1127 |
-
self.
|
1128 |
-
self.config = config
|
1129 |
|
1130 |
# Initialize weights and apply final processing
|
1131 |
-
self.
|
1132 |
|
1133 |
def get_input_embeddings(self):
|
1134 |
return self.model.word_embed
|
@@ -1152,14 +1181,14 @@ class DogeForSequenceClassification(DogePreTrainedModel):
|
|
1152 |
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1153 |
r"""
|
1154 |
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1155 |
-
Labels for computing the sequence classification/regression loss.
|
1156 |
-
|
1157 |
-
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1158 |
"""
|
1159 |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1160 |
|
1161 |
-
|
1162 |
-
input_ids,
|
1163 |
attention_mask=attention_mask,
|
1164 |
position_ids=position_ids,
|
1165 |
past_key_values=past_key_values,
|
@@ -1169,8 +1198,8 @@ class DogeForSequenceClassification(DogePreTrainedModel):
|
|
1169 |
output_hidden_states=output_hidden_states,
|
1170 |
return_dict=return_dict,
|
1171 |
)
|
1172 |
-
hidden_states =
|
1173 |
-
logits = self.
|
1174 |
|
1175 |
if input_ids is not None:
|
1176 |
batch_size = input_ids.shape[0]
|
@@ -1180,36 +1209,37 @@ class DogeForSequenceClassification(DogePreTrainedModel):
|
|
1180 |
if self.config.pad_token_id is None and batch_size != 1:
|
1181 |
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
1182 |
if self.config.pad_token_id is None:
|
1183 |
-
|
1184 |
-
elif input_ids is not None:
|
1185 |
-
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
|
1186 |
-
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
|
1187 |
-
token_indices = torch.arange(input_ids.shape[-1], device=logits.device)
|
1188 |
-
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
|
1189 |
else:
|
1190 |
-
|
1191 |
-
|
1192 |
-
|
1193 |
-
|
1194 |
-
|
|
|
|
|
1195 |
|
1196 |
-
pooled_logits = logits[torch.arange(batch_size, device=logits.device),
|
1197 |
|
1198 |
loss = None
|
1199 |
if labels is not None:
|
1200 |
-
loss = self.loss_function(
|
|
|
|
|
|
|
|
|
|
|
1201 |
|
1202 |
if not return_dict:
|
1203 |
-
output = (pooled_logits,) +
|
1204 |
return ((loss,) + output) if loss is not None else output
|
1205 |
|
1206 |
return SequenceClassifierOutputWithPast(
|
1207 |
loss=loss,
|
1208 |
logits=pooled_logits,
|
1209 |
-
past_key_values=
|
1210 |
-
hidden_states=
|
1211 |
-
attentions=
|
1212 |
)
|
1213 |
|
1214 |
-
|
1215 |
__all__ = ["DogeForCausalLM", "DogeModel", "DogePreTrainedModel", "DogeForSequenceClassification"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# coding=utf-8
|
2 |
# Copyright 2024 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
|
3 |
#
|
4 |
# This code is based on the Wonderful Matrices paper implementation.
|
5 |
+
#
|
6 |
+
# https://arxiv.org/abs/2412.11834
|
7 |
#
|
8 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
9 |
# you may not use this file except in compliance with the License.
|
|
|
16 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
17 |
# See the License for the specific language governing permissions and
|
18 |
# limitations under the License.
|
19 |
+
"""PyTorch Doge model."""
|
20 |
|
21 |
import math
|
22 |
from typing import Callable, List, Optional, Tuple, Union
|
23 |
|
24 |
import torch
|
25 |
import torch.nn.functional as F
|
26 |
+
import torch.utils.checkpoint
|
27 |
from torch import nn
|
28 |
|
29 |
from transformers.activations import ACT2FN
|
30 |
from transformers.cache_utils import Cache, DynamicCache, StaticCache
|
31 |
from transformers.generation import GenerationMixin
|
32 |
+
from transformers.modeling_outputs import (
|
33 |
+
BaseModelOutputWithPast,
|
34 |
+
CausalLMOutputWithPast,
|
35 |
+
SequenceClassifierOutputWithPast,
|
36 |
+
)
|
37 |
from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
|
38 |
from transformers.modeling_utils import PreTrainedModel
|
39 |
from transformers.processing_utils import Unpack
|
|
|
41 |
LossKwargs,
|
42 |
add_start_docstrings,
|
43 |
add_start_docstrings_to_model_forward,
|
44 |
+
is_torch_greater_or_equal,
|
45 |
logging,
|
46 |
replace_return_docstrings,
|
47 |
)
|
48 |
from .configuration_doge import DogeConfig
|
49 |
|
50 |
+
try:
|
51 |
+
from einx import add as einx_add
|
52 |
+
except ImportError:
|
53 |
+
einx_add = None
|
54 |
+
|
55 |
+
if is_torch_greater_or_equal("2.5"):
|
56 |
from torch.nn.attention.flex_attention import flex_attention
|
57 |
|
58 |
+
|
59 |
logger = logging.get_logger(__name__)
|
60 |
|
61 |
_CONFIG_FOR_DOC = "DogeConfig"
|
62 |
|
63 |
|
64 |
+
class RMSNorm(nn.Module):
|
65 |
def __init__(self, hidden_size, eps=1e-6):
|
66 |
"""
|
67 |
+
RMSNorm is equivalent to T5LayerNorm
|
68 |
"""
|
69 |
super().__init__()
|
70 |
self.weight = nn.Parameter(torch.ones(hidden_size))
|
|
|
81 |
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
82 |
|
83 |
|
84 |
+
class Residual(nn.Module):
|
85 |
def __init__(self, hidden_size):
|
86 |
super().__init__()
|
87 |
self.weight = nn.Parameter(torch.ones(hidden_size))
|
|
|
93 |
return f"{tuple(self.weight.shape)}"
|
94 |
|
95 |
|
96 |
+
class RotaryEmbedding(nn.Module):
|
97 |
+
def __init__(self, config: Optional[DogeConfig] = None):
|
98 |
super().__init__()
|
99 |
+
self.rope_kwargs = {}
|
100 |
+
|
101 |
+
if config.rope_scaling is not None:
|
102 |
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
|
103 |
else:
|
104 |
self.rope_type = "default"
|
105 |
self.max_seq_len_cached = config.max_position_embeddings
|
106 |
self.original_max_seq_len = config.max_position_embeddings
|
107 |
+
self.base = config.rope_theta
|
108 |
|
109 |
self.config = config
|
110 |
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
111 |
|
112 |
+
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, **self.rope_kwargs)
|
113 |
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
114 |
self.original_inv_freq = self.inv_freq
|
115 |
|
|
|
121 |
"""
|
122 |
seq_len = torch.max(position_ids) + 1
|
123 |
if seq_len > self.max_seq_len_cached: # growth
|
124 |
+
inv_freq, self.attention_scaling = self.rope_init_fn(
|
125 |
+
self.config, device, seq_len=seq_len, **self.rope_kwargs
|
126 |
+
)
|
127 |
self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
|
128 |
self.max_seq_len_cached = seq_len
|
129 |
|
130 |
if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
|
|
|
|
|
|
|
131 |
self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
|
132 |
self.max_seq_len_cached = self.original_max_seq_len
|
133 |
|
|
|
136 |
if "dynamic" in self.rope_type:
|
137 |
self._dynamic_frequency_update(position_ids, device=x.device)
|
138 |
|
139 |
+
# core RoPE block
|
140 |
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
141 |
position_ids_expanded = position_ids[:, None, :].float()
|
142 |
# Force float32 (see https://github.com/huggingface/transformers/pull/29285)
|
|
|
156 |
|
157 |
|
158 |
def rotate_half(x):
|
159 |
+
"""
|
160 |
+
Rotates half the hidden dims of the input.
|
161 |
+
"""
|
162 |
x1 = x[..., : x.shape[-1] // 2]
|
163 |
x2 = x[..., x.shape[-1] // 2 :]
|
164 |
return torch.cat((-x2, x1), dim=-1)
|
165 |
|
166 |
|
167 |
+
def apply_QK_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
168 |
"""Applies Rotary Position Embedding to the query and key tensors.
|
169 |
|
170 |
Args:
|
|
|
176 |
Deprecated and unused.
|
177 |
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
178 |
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
179 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k.
|
180 |
+
For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim].
|
181 |
+
Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k.
|
182 |
+
Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
|
|
183 |
Returns:
|
184 |
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
185 |
"""
|
|
|
192 |
|
193 |
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
194 |
"""
|
195 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep).
|
196 |
+
The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
197 |
"""
|
198 |
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
199 |
if n_rep == 1:
|
|
|
202 |
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
203 |
|
204 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
205 |
class DogeDynamicMaskAttention(nn.Module):
|
206 |
"""Dynamic Mask Attention from 'Wonderful Matrices' paper."""
|
207 |
|
|
|
209 |
super().__init__()
|
210 |
self.config = config
|
211 |
self.layer_idx = layer_idx
|
212 |
+
self.head_dim = config.hidden_size // config.num_attention_heads
|
213 |
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
|
214 |
+
self.scaling = self.head_dim ** -0.5
|
215 |
self.attention_dropout = config.attention_dropout
|
216 |
self.dynamic_mask_ratio = config.dynamic_mask_ratio
|
217 |
|
218 |
+
self.ALL_ATTENTION_FUNCTIONS = {
|
219 |
+
"eager": self.eager_attention_forward,
|
220 |
+
"flex_attention": self.flex_attention_forward,
|
221 |
+
"sdpa": self.sdpa_attention_forward,
|
222 |
+
}
|
223 |
+
|
224 |
+
# Q K V O projections
|
225 |
self.q_proj = nn.Linear(
|
226 |
+
config.hidden_size,
|
227 |
+
config.num_attention_heads * self.head_dim,
|
228 |
+
bias=config.hidden_bias
|
229 |
)
|
230 |
self.k_proj = nn.Linear(
|
231 |
+
config.hidden_size,
|
232 |
+
config.num_key_value_heads * self.head_dim,
|
233 |
+
bias=config.hidden_bias
|
234 |
)
|
235 |
self.v_proj = nn.Linear(
|
236 |
+
config.hidden_size,
|
237 |
+
config.num_key_value_heads * self.head_dim,
|
238 |
+
bias=config.hidden_bias
|
239 |
+
)
|
240 |
+
# dynamic mask for the QK^T attention score matrix
|
241 |
+
self.A = nn.Parameter(
|
242 |
+
torch.zeros(config.num_attention_heads)
|
243 |
)
|
|
|
|
|
244 |
self.dt_proj = nn.Linear(
|
245 |
+
config.num_key_value_heads * self.head_dim,
|
246 |
+
config.num_attention_heads,
|
247 |
+
bias=config.hidden_bias
|
248 |
)
|
249 |
self.o_proj = nn.Linear(
|
250 |
+
config.num_attention_heads * self.head_dim,
|
251 |
+
config.hidden_size,
|
252 |
+
bias=config.hidden_bias
|
253 |
)
|
254 |
|
255 |
def forward(
|
|
|
260 |
past_key_value: Optional[Cache] = None,
|
261 |
cache_position: Optional[torch.LongTensor] = None,
|
262 |
**kwargs,
|
263 |
+
) -> Tuple[torch.Tensor, Optional[Cache]]:
|
264 |
input_shape = hidden_states.shape[:-1]
|
265 |
hidden_shape = (*input_shape, -1, self.head_dim)
|
266 |
|
|
|
269 |
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
270 |
|
271 |
cos, sin = position_embeddings
|
272 |
+
query_states, key_states = apply_QK_rotary_pos_emb(query_states, key_states, cos, sin)
|
273 |
|
274 |
if past_key_value is not None:
|
275 |
# sin and cos are specific to RoPE models; cache_position needed for the static cache
|
|
|
277 |
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
278 |
|
279 |
# calculate dynamic mask from value_states
|
280 |
+
# NOTE: If these weights are not trained in causal mode, a mask of all ones will be returned, which will not affect the training results of causal mode
|
281 |
+
# TODO: The main reason for setting causal mode is that the Flex Attention kernel does not yet support score_mod functions with learnable parameters. However, we can continue training from the causal checkpoint later.
|
282 |
+
dt_states = self.dt_proj(value_states.transpose(1, 2).reshape(value_states.shape[0], value_states.shape[-2], -1))
|
283 |
dynamic_mask = torch.exp(self.A * F.softplus(dt_states)).transpose(-1, -2)
|
284 |
attn_mask = self.prepare_dynamic_mask(
|
285 |
hidden_states=hidden_states,
|
|
|
288 |
attention_mask=attention_mask,
|
289 |
)
|
290 |
|
291 |
+
attention_interface: Callable = self.eager_attention_forward
|
292 |
if self.config._attn_implementation != "eager":
|
293 |
+
attention_interface = self.ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
|
294 |
+
|
295 |
+
attn_output = attention_interface(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
296 |
query_states,
|
297 |
key_states,
|
298 |
value_states,
|
|
|
304 |
|
305 |
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
306 |
attn_output = self.o_proj(attn_output)
|
307 |
+
return attn_output
|
308 |
|
309 |
def prepare_dynamic_mask(
|
310 |
self,
|
|
|
337 |
attn_mask = attention_mask
|
338 |
|
339 |
return attn_mask
|
340 |
+
|
341 |
+
def eager_attention_forward(
|
342 |
+
self,
|
343 |
+
query: torch.Tensor,
|
344 |
+
key: torch.Tensor,
|
345 |
+
value: torch.Tensor,
|
346 |
+
attention_mask: Optional[torch.Tensor],
|
347 |
+
scaling: float,
|
348 |
+
dropout: float = 0.0,
|
349 |
+
**kwargs,
|
350 |
+
) -> torch.Tensor:
|
351 |
+
key_states = repeat_kv(key, self.num_key_value_groups)
|
352 |
+
value_states = repeat_kv(value, self.num_key_value_groups)
|
353 |
+
|
354 |
+
# compute attention scores matrix
|
355 |
+
attn_weights = torch.matmul(query, key_states.transpose(-1, -2)) * scaling
|
356 |
+
if attention_mask is not None:
|
357 |
+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
358 |
+
attn_weights = attn_weights + causal_mask
|
359 |
+
|
360 |
+
# upcast attention scores to fp32
|
361 |
+
attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
|
362 |
+
attn_weights = F.dropout(attn_weights, p=dropout, training=self.training)
|
363 |
+
|
364 |
+
# apply attention scores to value states
|
365 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
366 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
367 |
+
return attn_output
|
368 |
+
|
369 |
+
def sdpa_attention_forward(
|
370 |
+
self,
|
371 |
+
query: torch.Tensor,
|
372 |
+
key: torch.Tensor,
|
373 |
+
value: torch.Tensor,
|
374 |
+
attention_mask: Optional[torch.Tensor],
|
375 |
+
scaling: float,
|
376 |
+
dropout: float = 0.0,
|
377 |
+
**kwargs,
|
378 |
+
) -> torch.Tensor:
|
379 |
+
causal_mask = attention_mask
|
380 |
+
if attention_mask is not None:
|
381 |
+
causal_mask = causal_mask[:, :, :, : key.shape[-2]]
|
382 |
+
|
383 |
+
# SDPA with memory-efficient backend is bugged with non-contiguous inputs and custom attn_mask for some torch versions
|
384 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
385 |
+
query = query.contiguous()
|
386 |
+
key = key.contiguous()
|
387 |
+
value = value.contiguous()
|
388 |
+
|
389 |
+
# NOTE: As of pytorch 2.5.1, cuDNN's SDPA backward pass is still incorrect, so we disable cuDNN SDPA (see https://github.com/pytorch/pytorch/issues/138581)
|
390 |
+
torch.backends.cuda.enable_cudnn_sdp(False)
|
391 |
+
attn_output = F.scaled_dot_product_attention(
|
392 |
+
query,
|
393 |
+
key,
|
394 |
+
value,
|
395 |
+
attn_mask=causal_mask,
|
396 |
+
dropout_p=dropout,
|
397 |
+
scale=scaling,
|
398 |
+
enable_gqa=True,
|
399 |
+
)
|
400 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
401 |
+
return attn_output
|
402 |
+
|
403 |
+
def flex_attention_forward(
|
404 |
+
self,
|
405 |
+
query: torch.Tensor,
|
406 |
+
key: torch.Tensor,
|
407 |
+
value: torch.Tensor,
|
408 |
+
attention_mask: Optional[torch.Tensor],
|
409 |
+
scaling: float,
|
410 |
+
dropout: float = 0.0,
|
411 |
+
**kwargs,
|
412 |
+
) -> torch.Tensor:
|
413 |
+
causal_mask = attention_mask
|
414 |
+
if attention_mask is not None:
|
415 |
+
causal_mask = causal_mask[:, :, :, : key.shape[-2]]
|
416 |
+
|
417 |
+
# TODO: flex_attention: As of pytorch 2.5.1, captured buffers that require grad are not yet supported.
|
418 |
+
# NOTE: So we only use flex_attention in inference mode.
|
419 |
+
|
420 |
+
def causal_mod(score, batch, head, q_idx, kv_idx):
|
421 |
+
score = score + causal_mask[batch][0][q_idx][kv_idx]
|
422 |
+
return score
|
423 |
+
|
424 |
+
def dynamic_mod(score, batch, head, q_idx, kv_idx):
|
425 |
+
score = score + causal_mask[batch][head][q_idx][kv_idx]
|
426 |
+
return score
|
427 |
+
|
428 |
+
mask_mod = causal_mod if self.is_causal else dynamic_mod
|
429 |
+
|
430 |
+
attn_output = flex_attention(
|
431 |
+
query,
|
432 |
+
key,
|
433 |
+
value,
|
434 |
+
score_mod=mask_mod,
|
435 |
+
scale=scaling,
|
436 |
+
enable_gqa=True,
|
437 |
+
)
|
438 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
439 |
+
return attn_output
|
440 |
|
441 |
|
442 |
class DogeMLP(nn.Module):
|
443 |
+
|
444 |
def __init__(self, config: DogeConfig):
|
445 |
super().__init__()
|
446 |
self.hidden_dim = config.hidden_size
|
|
|
475 |
self.num_keys = int(math.sqrt(self.num_cdmoe_experts))
|
476 |
|
477 |
# queries and keys for retrieval experts
|
478 |
+
self.queries = nn.Linear(self.hidden_dim, self.num_cdmoe_heads * self.expert_retrieval_dim, bias=False)
|
479 |
+
self.keys = nn.Parameter(torch.zeros(self.num_cdmoe_heads, self.num_keys, 2, self.expert_retrieval_dim // 2))
|
480 |
|
481 |
# experts
|
482 |
+
self.down_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
|
483 |
self.up_embed = nn.Embedding(self.num_cdmoe_experts, self.hidden_dim)
|
484 |
|
485 |
def forward(
|
|
|
489 |
) -> torch.Tensor:
|
490 |
bsz, seq_len, _ = hidden_states.shape
|
491 |
|
492 |
+
# get similarity with queries and keys
|
493 |
+
queries = self.queries(hidden_states)
|
494 |
+
queries = queries.view(bsz, seq_len, 2, self.num_cdmoe_heads, -1).permute(2, 0, 1, 3, 4)
|
495 |
+
sim = torch.einsum("p b t h n, h k p n -> p b t h k", queries, self.keys)
|
496 |
+
|
497 |
+
# get experts with the highest similarity
|
498 |
+
(scores_x, scores_y), (indices_x, indices_y) = sim.topk(self.num_cdmoe_experts_per_head, dim=-1)
|
499 |
+
if einx_add is not None:
|
500 |
+
all_scores = einx_add("... i, ... j -> ... (i j)", scores_x, scores_y)
|
501 |
+
all_indices = einx_add("... i, ... j -> ... (i j)", indices_x * self.num_keys, indices_y)
|
502 |
+
else:
|
503 |
+
all_scores = scores_x.unsqueeze(-1) + scores_y.unsqueeze(-2)
|
504 |
+
all_scores = all_scores.view(*scores_x.shape[:-1], -1)
|
505 |
+
all_indices = (indices_x.unsqueeze(-1) * self.num_keys) + indices_y.unsqueeze(-2)
|
506 |
+
all_indices = all_indices.view(*indices_x.shape[:-1], -1)
|
507 |
scores, pk_indices = all_scores.topk(self.num_cdmoe_experts_per_head, dim=-1)
|
508 |
indices = all_indices.gather(-1, pk_indices)
|
509 |
down_embed = self.down_embed(indices)
|
510 |
up_embed = self.up_embed(indices)
|
511 |
|
512 |
# mix experts states with cross domain states
|
513 |
+
experts_weights = torch.einsum("b t d, b t h k d -> b t h k", hidden_states, down_embed)
|
514 |
experts_weights = self.act_fn(experts_weights) * scores.softmax(dim=-1)
|
515 |
+
experts_states = torch.einsum("b t h k, b t h k d -> b t d", experts_weights, up_embed)
|
516 |
hidden_states = self.down_proj(self.act_fn(self.gate_proj(hidden_states)) * self.up_proj(hidden_states))
|
517 |
hidden_states = hidden_states + experts_states
|
518 |
return hidden_states
|
|
|
523 |
super().__init__()
|
524 |
self.hidden_dropout = config.hidden_dropout
|
525 |
|
526 |
+
self.pre_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
527 |
self.self_attn = DogeDynamicMaskAttention(config=config, layer_idx=layer_idx)
|
528 |
+
self.pre_residual = Residual(config.hidden_size)
|
529 |
|
530 |
+
self.post_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
531 |
+
self.feed_forward = DogeMLP(config) if config.is_moe == False else DogeCDMoE(config)
|
532 |
+
self.post_residual = Residual(config.hidden_size)
|
533 |
|
534 |
def forward(
|
535 |
self,
|
|
|
543 |
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
|
544 |
**kwargs,
|
545 |
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
546 |
+
|
547 |
# sequence transformation
|
548 |
residual = hidden_states
|
549 |
hidden_states = self.pre_layernorm(hidden_states)
|
550 |
+
hidden_states = self.self_attn(
|
551 |
hidden_states=hidden_states,
|
552 |
attention_mask=attention_mask,
|
553 |
position_ids=position_ids,
|
554 |
past_key_value=past_key_value,
|
|
|
|
|
555 |
cache_position=cache_position,
|
556 |
position_embeddings=position_embeddings,
|
557 |
**kwargs,
|
|
|
589 |
load the weights associated with the model, only the configuration. Check out the
|
590 |
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
591 |
"""
|
|
|
|
|
592 |
@add_start_docstrings(
|
593 |
"The bare Doge Model outputting raw hidden-states without any specific head on top.",
|
594 |
DOGE_START_DOCSTRING,
|
|
|
600 |
_no_split_modules = ["DogeDecoderLayer"]
|
601 |
_skip_keys_device_placement = ["past_key_values"]
|
602 |
_supports_sdpa = True
|
603 |
+
# _supports_flex_attn = True
|
604 |
_supports_cache_class = True
|
605 |
_supports_quantized_cache = True
|
606 |
_supports_static_cache = True
|
|
|
711 |
self.vocab_size = config.vocab_size
|
712 |
|
713 |
self.word_embed = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
|
714 |
+
self.rotary_emb = RotaryEmbedding(config)
|
715 |
self.layers = nn.ModuleList(
|
716 |
[DogeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
717 |
)
|
718 |
+
self.final_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
719 |
self.gradient_checkpointing = False
|
720 |
|
721 |
# Initialize weights and apply final processing
|
|
|
842 |
past_key_values: Cache,
|
843 |
output_attentions: bool,
|
844 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
845 |
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
846 |
using_static_cache = isinstance(past_key_values, StaticCache)
|
847 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
848 |
dtype, device = input_tensor.dtype, input_tensor.device
|
849 |
sequence_length = input_tensor.shape[1]
|
850 |
if using_static_cache:
|
|
|
856 |
else past_seen_tokens + sequence_length + 1
|
857 |
)
|
858 |
|
859 |
+
# in case the provided `attention` mask is 2D, we generate a causal mask here (4D).
|
860 |
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
|
861 |
+
attention_mask=attention_mask,
|
862 |
sequence_length=sequence_length,
|
863 |
target_length=target_length,
|
864 |
dtype=dtype,
|
|
|
867 |
batch_size=input_tensor.shape[0],
|
868 |
)
|
869 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
870 |
return causal_mask
|
871 |
+
|
872 |
@staticmethod
|
873 |
def _prepare_4d_causal_attention_mask_with_cache_position(
|
874 |
+
attention_mask: torch.Tensor = None,
|
875 |
+
sequence_length: int = None,
|
876 |
+
target_length: int = None,
|
877 |
+
dtype: torch.dtype = None,
|
878 |
+
device: torch.device = None,
|
879 |
+
cache_position: torch.Tensor = None,
|
880 |
+
batch_size: int = None,
|
881 |
**kwargs,
|
882 |
):
|
883 |
"""
|
|
|
908 |
else:
|
909 |
min_dtype = torch.finfo(dtype).min
|
910 |
causal_mask = torch.full(
|
911 |
+
(sequence_length, target_length),
|
912 |
+
fill_value=min_dtype, dtype=dtype, device=device,
|
913 |
)
|
914 |
if sequence_length != 1:
|
915 |
causal_mask = torch.triu(causal_mask, diagonal=1)
|
|
|
927 |
return causal_mask
|
928 |
|
929 |
|
930 |
+
class KwargsForCausalLM(LossKwargs): ...
|
931 |
+
|
932 |
+
|
933 |
class DogeForCausalLM(DogePreTrainedModel, GenerationMixin):
|
934 |
_tied_weights_keys = ["lm_head.weight"]
|
935 |
_tp_plan = {"lm_head": "colwise_rep"}
|
|
|
955 |
|
956 |
def set_output_embeddings(self, new_embeddings):
|
957 |
self.lm_head = new_embeddings
|
958 |
+
|
959 |
def get_decoder(self):
|
960 |
return self.model
|
961 |
|
|
|
977 |
output_hidden_states: Optional[bool] = None,
|
978 |
return_dict: Optional[bool] = None,
|
979 |
cache_position: Optional[torch.LongTensor] = None,
|
980 |
+
num_logits_to_keep: int = 0,
|
981 |
+
**kwargs: Unpack[KwargsForCausalLM],
|
982 |
) -> Union[Tuple, CausalLMOutputWithPast]:
|
983 |
r"""
|
984 |
Args:
|
|
|
987 |
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
988 |
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
989 |
|
990 |
+
num_logits_to_keep (`int`, *optional*):
|
991 |
+
Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
|
992 |
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
993 |
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
|
|
|
|
994 |
|
995 |
Returns:
|
996 |
|
|
|
999 |
```python
|
1000 |
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
1001 |
|
1002 |
+
>>> model = AutoModelForCausalLM.from_pretrained("JingzeShi/Doge-20M-Instruct")
|
1003 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("JingzeShi/Doge-20M-Instruct")
|
1004 |
|
1005 |
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
1006 |
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
1032 |
)
|
1033 |
|
1034 |
hidden_states = outputs[0]
|
1035 |
+
|
1036 |
# only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
1037 |
+
logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
|
|
|
1038 |
|
1039 |
loss = None
|
1040 |
if labels is not None:
|
|
|
1053 |
)
|
1054 |
|
1055 |
|
1056 |
+
class DogePatchEmbedding(nn.Module):
|
1057 |
+
"""
|
1058 |
+
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` of shape `(batch_size, seq_len, hidden_size)` to be consumed by a Transformer.
|
1059 |
+
"""
|
1060 |
+
|
1061 |
+
def __init__(self, config: DogeConfig):
|
1062 |
+
super().__init__()
|
1063 |
+
|
1064 |
+
self.num_channels = config.num_channels
|
1065 |
+
self.patch_size = config.patch_size
|
1066 |
+
self.hidden_dim = config.hidden_size
|
1067 |
+
|
1068 |
+
self.sequence_proj = nn.Conv2d(self.num_channels, self.hidden_dim, kernel_size=self.patch_size, stride=self.patch_size)
|
1069 |
+
self.state_proj = nn.Linear(self.hidden_dim, self.hidden_dim, bias=config.hidden_bias)
|
1070 |
+
|
1071 |
+
def forward(
|
1072 |
+
self,
|
1073 |
+
pixel_values: torch.Tensor,
|
1074 |
+
) -> torch.Tensor:
|
1075 |
+
image_embedding = self.sequence_proj(pixel_values).flatten(2).transpose(1, 2)
|
1076 |
+
image_embedding = self.state_proj(image_embedding)
|
1077 |
+
return image_embedding
|
1078 |
+
|
1079 |
+
|
1080 |
+
class DogeForCausalVLM(DogeForCausalLM):
|
1081 |
+
_tied_weights_keys = ["lm_head.weight"]
|
1082 |
+
|
1083 |
+
def __init__(self, config: DogeConfig):
|
1084 |
+
super().__init__(config)
|
1085 |
+
self.config = config
|
1086 |
+
self.pixel_embed = DogePatchEmbedding(config)
|
1087 |
+
|
1088 |
+
# Initialize weights and apply final processing
|
1089 |
+
self.post_init()
|
1090 |
+
|
1091 |
+
def forward(
|
1092 |
+
self,
|
1093 |
+
input_ids: torch.LongTensor = None,
|
1094 |
+
pixel_values: torch.FloatTensor = None,
|
1095 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1096 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1097 |
+
past_key_values: Optional[torch.Tensor] = None,
|
1098 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1099 |
+
labels: Optional[torch.LongTensor] = None,
|
1100 |
+
use_cache: Optional[bool] = None,
|
1101 |
+
output_attentions: Optional[bool] = None,
|
1102 |
+
output_hidden_states: Optional[bool] = None,
|
1103 |
+
return_dict: Optional[bool] = None,
|
1104 |
+
cache_position: Optional[torch.LongTensor] = None,
|
1105 |
+
num_logits_to_keep: int = 0,
|
1106 |
+
**loss_kwargs,
|
1107 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
1108 |
+
# TODO: @wubingheng111: refer to Llava for implementating the forward method
|
1109 |
+
...
|
1110 |
+
|
1111 |
+
def prepare_inputs_for_generation(
|
1112 |
+
self,
|
1113 |
+
input_ids=None,
|
1114 |
+
pixel_values=None,
|
1115 |
+
past_key_values=None,
|
1116 |
+
input_embeds=None,
|
1117 |
+
attention_mask=None,
|
1118 |
+
cache_position=None,
|
1119 |
+
num_logits_to_keep=None,
|
1120 |
+
**kwargs,
|
1121 |
+
):
|
1122 |
+
model_inputs = self.model.prepare_inputs_for_generation(
|
1123 |
+
input_ids,
|
1124 |
+
past_key_values=past_key_values,
|
1125 |
+
inputs_embeds=input_embeds,
|
1126 |
+
attention_mask=attention_mask,
|
1127 |
+
cache_position=cache_position,
|
1128 |
+
num_logits_to_keep=num_logits_to_keep,
|
1129 |
+
**kwargs,
|
1130 |
+
)
|
1131 |
+
|
1132 |
+
if cache_position[0] == 0:
|
1133 |
+
model_inputs["pixel_values"] = pixel_values
|
1134 |
+
|
1135 |
+
return model_inputs
|
1136 |
+
|
1137 |
+
|
1138 |
@add_start_docstrings(
|
1139 |
"""
|
1140 |
The Doge Model transformer with a sequence classification head on top (linear layer).
|
1141 |
|
1142 |
+
[`DogeForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do.
|
|
|
1143 |
|
1144 |
+
Since it does classification on the last token, it requires to know the position of the last token.
|
1145 |
+
If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row.
|
1146 |
+
If no `pad_token_id` is defined, it simply takes the last value in each row of the batch.
|
1147 |
+
Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch).
|
1148 |
+
"""
|
|
|
|
|
1149 |
)
|
1150 |
class DogeForSequenceClassification(DogePreTrainedModel):
|
1151 |
def __init__(self, config: DogeConfig):
|
1152 |
super().__init__(config)
|
1153 |
+
self.config = config
|
1154 |
self.num_labels = config.num_labels
|
1155 |
|
1156 |
self.model = DogeModel(config)
|
1157 |
+
self.classifier = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
|
|
1158 |
|
1159 |
# Initialize weights and apply final processing
|
1160 |
+
self.init_weights()
|
1161 |
|
1162 |
def get_input_embeddings(self):
|
1163 |
return self.model.word_embed
|
|
|
1181 |
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1182 |
r"""
|
1183 |
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1184 |
+
Labels for computing the sequence classification/regression loss.
|
1185 |
+
Indices should be in `[0, ..., config.num_labels - 1]`.
|
1186 |
+
If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1187 |
"""
|
1188 |
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1189 |
|
1190 |
+
outputs = self.model(
|
1191 |
+
input_ids=input_ids,
|
1192 |
attention_mask=attention_mask,
|
1193 |
position_ids=position_ids,
|
1194 |
past_key_values=past_key_values,
|
|
|
1198 |
output_hidden_states=output_hidden_states,
|
1199 |
return_dict=return_dict,
|
1200 |
)
|
1201 |
+
hidden_states = outputs[0]
|
1202 |
+
logits = self.classifier(hidden_states)
|
1203 |
|
1204 |
if input_ids is not None:
|
1205 |
batch_size = input_ids.shape[0]
|
|
|
1209 |
if self.config.pad_token_id is None and batch_size != 1:
|
1210 |
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
1211 |
if self.config.pad_token_id is None:
|
1212 |
+
sequence_lengths = -1
|
|
|
|
|
|
|
|
|
|
|
1213 |
else:
|
1214 |
+
if input_ids is not None:
|
1215 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
1216 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
1217 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
1218 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
1219 |
+
else:
|
1220 |
+
sequence_lengths = -1
|
1221 |
|
1222 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
1223 |
|
1224 |
loss = None
|
1225 |
if labels is not None:
|
1226 |
+
loss = self.loss_function(
|
1227 |
+
logits=logits,
|
1228 |
+
labels=labels,
|
1229 |
+
pooled_logits=pooled_logits,
|
1230 |
+
config=self.config,
|
1231 |
+
)
|
1232 |
|
1233 |
if not return_dict:
|
1234 |
+
output = (pooled_logits,) + outputs[1:]
|
1235 |
return ((loss,) + output) if loss is not None else output
|
1236 |
|
1237 |
return SequenceClassifierOutputWithPast(
|
1238 |
loss=loss,
|
1239 |
logits=pooled_logits,
|
1240 |
+
past_key_values=outputs.past_key_values,
|
1241 |
+
hidden_states=outputs.hidden_states,
|
1242 |
+
attentions=outputs.attentions,
|
1243 |
)
|
1244 |
|
|
|
1245 |
__all__ = ["DogeForCausalLM", "DogeModel", "DogePreTrainedModel", "DogeForSequenceClassification"]
|