ykzhang721 commited on
Commit
3d673be
·
verified ·
1 Parent(s): 6ebb1b0

Upload modelforseminat_v3.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modelforseminat_v3.py +1543 -0
modelforseminat_v3.py ADDED
@@ -0,0 +1,1543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import OlmoModel, OlmoForCausalLM, AutoTokenizer
2
+ from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES
3
+ from transformers.modeling_outputs import (
4
+ CausalLMOutputWithPast,
5
+ BaseModelOutputWithPast,
6
+ )
7
+ import numpy as np
8
+ import math
9
+ from torch import nn
10
+ import pandas as pd
11
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
12
+ from dataclasses import dataclass
13
+
14
+ # Olmo
15
+ from transformers.models.olmo.configuration_olmo import OlmoConfig
16
+ from transformers.models.olmo.modeling_olmo import OlmoMLP, OlmoAttention, apply_rotary_pos_emb, repeat_kv, OlmoRotaryEmbedding, OlmoMLP
17
+ from transformers.models.olmo.configuration_olmo import OlmoConfig
18
+
19
+ # Olmoe
20
+ from transformers.models.olmoe.modeling_olmoe import OlmoeRMSNorm
21
+ # from transformers.models.olmoe.configuration_olmoe import OlmoeConfig
22
+ # from transformers.models.olmoe.modeling_olmoe import OlmoeMLP, OlmoeAttention, OlmoeFlashAttention2, OlmoeSdpaAttention, OlmoeRMSNorm, OlmoeSparseMoeBlock, apply_rotary_pos_emb, repeat_kv, OlmoeRotaryEmbedding
23
+ # from transformers.models.olmoe.configuration_olmoe import OlmoeConfig
24
+
25
+ import os
26
+ import sys
27
+ import json
28
+ import pdb
29
+ import torch.distributed as dist
30
+ from tqdm import tqdm
31
+ from torch.utils.data.distributed import DistributedSampler
32
+ import transformers
33
+ import pickle
34
+ from dataset import *
35
+ from peft import (get_peft_model, PeftModel)
36
+ import random
37
+ from config import *
38
+ from datasets import Dataset, DatasetDict, load_dataset
39
+ import wandb
40
+ import argparse
41
+ import torch
42
+ import torch.nn as nn
43
+ import torch.nn.functional as F
44
+ import torch.optim as optim
45
+ import functools
46
+ from torch.optim.lr_scheduler import StepLR
47
+ import torch.nn.functional as F
48
+ import torch.distributed as dist
49
+ import torch.multiprocessing as mp
50
+ from torch.nn.parallel import DistributedDataParallel as DDP
51
+ from torch.utils.data.distributed import DistributedSampler
52
+ from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
53
+ checkpoint_wrapper, CheckpointImpl)
54
+ from torch.distributed.fsdp import (
55
+ FullyShardedDataParallel as FSDP,
56
+ MixedPrecision,
57
+ BackwardPrefetch,
58
+ ShardingStrategy,
59
+ FullStateDictConfig,
60
+ StateDictType,
61
+ )
62
+ from torch.distributed.fsdp.wrap import (
63
+ transformer_auto_wrap_policy,
64
+ enable_wrap,
65
+ wrap,
66
+ )
67
+ from functools import partial
68
+ from torch.utils.data import DataLoader
69
+ from pathlib import Path
70
+ from typing import Type, List, Optional, Tuple, Union, Callable, Dict, Any
71
+
72
+
73
+ ############ specially for generate() #################
74
+ import inspect
75
+ from transformers.generation.configuration_utils import (
76
+ NEED_SETUP_CACHE_CLASSES_MAPPING,
77
+ QUANT_BACKEND_CLASSES_MAPPING,
78
+ GenerationConfig,
79
+ GenerationMode,
80
+ )
81
+ from transformers.generation.logits_process import LogitsProcessorList
82
+ from transformers.generation.stopping_criteria import StoppingCriteriaList
83
+ from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
84
+ from transformers.integrations.fsdp import is_fsdp_managed_module
85
+
86
+ from transformers.generation.utils import (
87
+ is_torchdynamo_compiling, ModelOutput, GenerateDecoderOnlyOutput,
88
+ GenerateEncoderDecoderOutput, GenerateBeamDecoderOnlyOutput,
89
+ GenerateBeamEncoderDecoderOutput, GreedySearchDecoderOnlyOutput,
90
+ ContrastiveSearchDecoderOnlyOutput, SampleDecoderOnlyOutput,
91
+ ContrastiveSearchEncoderDecoderOutput, GreedySearchEncoderDecoderOutput,
92
+ SampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput,
93
+ BeamSampleDecoderOnlyOutput, BeamSearchEncoderDecoderOutput,
94
+ BeamSampleEncoderDecoderOutput, GreedySearchOutput, SampleOutput,
95
+ BeamSearchOutput, BeamSampleOutput, ContrastiveSearchOutput,
96
+ GenerateNonBeamOutput, GenerateBeamOutput, GenerateOutput)
97
+
98
+ ############ specially for generate() #################
99
+
100
+
101
+ @dataclass
102
+ class ModelOutputWithPastForSemiNAT(BaseModelOutputWithPast):
103
+
104
+ chunk_hidden_state: torch.FloatTensor = None
105
+ length_ground_truth: Optional[torch.FloatTensor] = None
106
+ length_logits: Optional[torch.FloatTensor] = None
107
+ position_embeddings: Optional[torch.FloatTensor] = None # ?
108
+ nar_hidden_state: torch.FloatTensor = None # ?
109
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
110
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
111
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
112
+
113
+
114
+ class OlmoAttentionForSemiNAT(nn.Module):
115
+
116
+ def __init__(
117
+ self,
118
+ config: OlmoConfig,
119
+ layer_idx: Optional[int] = None,
120
+ ):
121
+ super().__init__()
122
+ self.config = config
123
+ self.layer_idx = layer_idx
124
+ if layer_idx is None:
125
+ print(
126
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` will lead to errors during the forward call if caching is used."
127
+ )
128
+ self.attention_dropout = config.attention_dropout
129
+ self.hidden_size = config.hidden_size
130
+ self.num_heads = config.num_attention_heads
131
+ self.head_dim = self.hidden_size // self.num_heads
132
+
133
+ # GQA
134
+ # n_k_v_h is the number of key/value heads
135
+ # n_k_v_g is the number of query heads per k/v head
136
+ self.num_key_value_heads = config.num_key_value_heads
137
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
138
+
139
+ self.max_position_embeddings = config.max_position_embeddings
140
+ self.rope_theta = config.rope_theta
141
+
142
+ if (self.head_dim * self.num_heads) != self.hidden_size:
143
+ raise ValueError(
144
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
145
+ f" and `num_heads`: {self.num_heads}).")
146
+
147
+ self.q_proj = nn.Linear(self.hidden_size,
148
+ self.num_heads * self.head_dim,
149
+ bias=config.attention_bias)
150
+ self.k_proj = nn.Linear(self.hidden_size,
151
+ self.num_key_value_heads * self.head_dim,
152
+ bias=config.attention_bias)
153
+ self.v_proj = nn.Linear(self.hidden_size,
154
+ self.num_key_value_heads * self.head_dim,
155
+ bias=config.attention_bias)
156
+ self.o_proj = nn.Linear(self.hidden_size,
157
+ self.hidden_size,
158
+ bias=config.attention_bias)
159
+ # pdb.set_trace()
160
+ self.q_norm = OlmoeRMSNorm(self.hidden_size, eps=config.rms_norm_eps)
161
+ self.k_norm = OlmoeRMSNorm(
162
+ (self.hidden_size // self.num_heads) * self.num_key_value_heads,
163
+ eps=config.rms_norm_eps)
164
+
165
+ def forward(
166
+ self,
167
+ hidden_states: torch.Tensor,
168
+ attention_mask: Optional[torch.Tensor] = None,
169
+ past_key_value: Optional[Cache] = None,
170
+ output_attentions: bool = False,
171
+ cache_position: Optional[torch.LongTensor] = None,
172
+ position_embeddings: Optional[Tuple[torch.Tensor,
173
+ torch.Tensor]] = None,
174
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor],
175
+ Optional[Tuple[torch.Tensor]]]:
176
+ bsz, q_len, _ = hidden_states.size() # bs * length * hidden_size
177
+ query_states = self.q_norm(
178
+ self.q_proj(hidden_states)) # bs * length * hidden_size
179
+ key_states = self.k_norm(self.k_proj(
180
+ hidden_states)) # bs * length * (num_key_value_heads * head_dim)
181
+ value_states = self.v_proj(
182
+ hidden_states) # bs * length * (num_key_value_heads * head_dim)
183
+
184
+ if self.config.clip_qkv is not None:
185
+ query_states.clamp_(min=-self.config.clip_qkv,
186
+ max=self.config.clip_qkv)
187
+ key_states.clamp_(min=-self.config.clip_qkv,
188
+ max=self.config.clip_qkv)
189
+ value_states.clamp_(min=-self.config.clip_qkv,
190
+ max=self.config.clip_qkv)
191
+
192
+ # 拆成各个头
193
+ query_states = query_states.view(
194
+ bsz, q_len, self.num_heads,
195
+ self.head_dim).transpose(1,
196
+ 2) # bs * num_heads * length * head_dim
197
+ key_states = key_states.view(
198
+ bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(
199
+ 1, 2) # bs * num_key_value_heads * length * head_dim
200
+ value_states = value_states.view(
201
+ bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(
202
+ 1, 2) # bs * num_key_value_heads * length * head_dim
203
+
204
+ cos, sin = position_embeddings # bs * length * head_dim
205
+ query_states, key_states = apply_rotary_pos_emb(
206
+ query_states, key_states, cos,
207
+ sin) # bs * num_heads (or num_key_value_heads) * length * head_dim
208
+
209
+ # TODO: check 一下 past_key_value.update 的具体实现(specific to RoPE)
210
+ if past_key_value is not None:
211
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
212
+ cache_kwargs = {
213
+ "sin": sin,
214
+ "cos": cos,
215
+ "cache_position": cache_position
216
+ }
217
+ key_states, value_states = past_key_value.update(
218
+ key_states, value_states, self.layer_idx, cache_kwargs)
219
+
220
+ key_states = repeat_kv(
221
+ key_states,
222
+ self.num_key_value_groups) # bs * num_heads * length * head_dim
223
+ value_states = repeat_kv(
224
+ value_states,
225
+ self.num_key_value_groups) # bs * num_heads * length * head_dim
226
+ attn_weights = torch.matmul(
227
+ query_states, key_states.transpose(2, 3)) / math.sqrt(
228
+ self.head_dim) # bs * num_heads * length * length
229
+
230
+ # try:
231
+ # TODO: check attention_mask 传进来的内容
232
+ if attention_mask is not None: # no matter the length, we just slice it
233
+ causal_mask = attention_mask[:, :, :, :key_states.shape[
234
+ -2]] # bs * 1 * (q_)length * (k_)length
235
+ attn_weights = attn_weights + causal_mask
236
+ # except:
237
+ # pdb.set_trace()
238
+
239
+ attn_weights = nn.functional.softmax(
240
+ attn_weights, dim=-1, dtype=torch.float32).to(
241
+ query_states.dtype) # bs * num_heads * length * length
242
+ attn_weights = nn.functional.dropout(
243
+ attn_weights, p=self.attention_dropout,
244
+ training=self.training) # bs * num_heads * length * length
245
+ attn_output = torch.matmul(
246
+ attn_weights, value_states) # bs * num_heads * length * head_dim
247
+
248
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
249
+ raise ValueError(
250
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
251
+ f" {attn_output.size()}")
252
+
253
+ attn_output = attn_output.transpose(
254
+ 1, 2).contiguous() # bs * length * num_heads * head_dim
255
+ attn_output = attn_output.reshape(
256
+ bsz, q_len, self.hidden_size) # bs * length * hidden_size
257
+ attn_output = self.o_proj(attn_output) # bs * length * hidden_size
258
+
259
+ if not output_attentions:
260
+ attn_weights = None
261
+ return attn_output, attn_weights, past_key_value
262
+
263
+
264
+ class OlmoDecoderLayerForSemiNAT(nn.Module):
265
+
266
+ def __init__(
267
+ self,
268
+ config: OlmoConfig,
269
+ layer_idx: int,
270
+ ):
271
+ super().__init__()
272
+ self.hidden_size = config.hidden_size
273
+ self.self_attn = OlmoAttentionForSemiNAT(config=config,
274
+ layer_idx=layer_idx)
275
+ self.mlp = OlmoMLP(config)
276
+ self.input_layernorm = OlmoeRMSNorm(config.hidden_size,
277
+ eps=config.rms_norm_eps)
278
+ self.post_attention_layernorm = OlmoeRMSNorm(config.hidden_size,
279
+ eps=config.rms_norm_eps)
280
+
281
+ def forward(
282
+ self,
283
+ hidden_states: torch.Tensor,
284
+ attention_mask: Optional[torch.Tensor] = None,
285
+ past_key_value: Optional[Cache] = None,
286
+ output_attentions: Optional[bool] = False,
287
+ use_cache: Optional[bool] = False,
288
+ cache_position: Optional[torch.LongTensor] = None,
289
+ position_embeddings: Optional[Tuple[torch.Tensor,
290
+ torch.Tensor]] = None,
291
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
292
+ torch.FloatTensor]]]:
293
+ """
294
+ attention_mask: (bs, seq_len) if flash attention or (bs, 1, q_seq_len, k_seq_len) if default
295
+
296
+ past_key_value: Tuple(torch.FloatTensor)
297
+
298
+ position_embeddings `Tuple[torch.FloatTensor, torch.FloatTensor]`, cos and sin of shape (batch_size, seq_len, head_dim)
299
+ """
300
+
301
+ residual = hidden_states # bs * length * hidden_size
302
+ # pdb.set_trace()
303
+ hidden_states = self.input_layernorm(hidden_states)
304
+
305
+ # Self Attention
306
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
307
+ hidden_states=hidden_states,
308
+ attention_mask=attention_mask,
309
+ past_key_value=past_key_value,
310
+ output_attentions=output_attentions,
311
+ cache_position=cache_position,
312
+ position_embeddings=position_embeddings,
313
+ )
314
+ hidden_states = residual + hidden_states # bs * length * hidden_size
315
+
316
+ # MLP
317
+ residual = hidden_states
318
+ hidden_states = self.post_attention_layernorm(hidden_states)
319
+ hidden_states = self.mlp(hidden_states)
320
+ hidden_states = residual + hidden_states
321
+
322
+ outputs = (hidden_states, )
323
+ if output_attentions:
324
+ outputs += (self_attn_weights, )
325
+ if use_cache:
326
+ outputs += (present_key_value, )
327
+ return outputs
328
+
329
+
330
+ class NATEncoderForSemiNAT(nn.Module):
331
+
332
+ def __init__(self, config: OlmoConfig, num_layer: int = 1):
333
+ super().__init__()
334
+ self.num_layer = num_layer
335
+ self.encoder_layers = nn.ModuleList([
336
+ OlmoDecoderLayerForSemiNAT(config, layer_idx)
337
+ for layer_idx in range(self.num_layer)
338
+ ])
339
+
340
+ def forward(
341
+ self,
342
+ hidden_states: torch.Tensor,
343
+ attention_mask: Optional[torch.Tensor] = None,
344
+ past_key_value: Optional[Cache] = None,
345
+ output_attentions: Optional[bool] = False,
346
+ use_cache: Optional[bool] = False,
347
+ cache_position: Optional[torch.LongTensor] = None,
348
+ position_embeddings: Optional[Tuple[torch.Tensor,
349
+ torch.Tensor]] = None,
350
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
351
+ torch.FloatTensor]]]:
352
+ # pdb.set_trace()
353
+ for layer in self.encoder_layers:
354
+ outputs = layer(hidden_states=hidden_states,
355
+ output_attentions=output_attentions,
356
+ position_embeddings=position_embeddings)
357
+ hidden_states = outputs[0]
358
+ # only the last layer attn_weights and present_key_value are stored
359
+ # mean pool the hidden states across sequence (chunk)
360
+ hidden_states = torch.mean(hidden_states, dim=1)
361
+ return hidden_states
362
+
363
+
364
+ class NATDecoderForSemiNAT(nn.Module):
365
+
366
+ def __init__(self, config: OlmoConfig, num_layer: int = 1):
367
+ super().__init__()
368
+ self.num_layer = num_layer
369
+ self.decoder_layers = nn.ModuleList([
370
+ OlmoDecoderLayerForSemiNAT(config, layer_idx)
371
+ for layer_idx in range(self.num_layer)
372
+ ])
373
+
374
+ def forward(
375
+ self,
376
+ hidden_states: torch.Tensor,
377
+ attention_mask: Optional[torch.Tensor] = None,
378
+ past_key_value: Optional[Cache] = None,
379
+ output_attentions: Optional[bool] = False,
380
+ use_cache: Optional[bool] = False,
381
+ cache_position: Optional[torch.LongTensor] = None,
382
+ position_embeddings: Optional[Tuple[torch.Tensor,
383
+ torch.Tensor]] = None,
384
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
385
+ torch.FloatTensor]]]:
386
+
387
+ for layer in self.decoder_layers:
388
+ # pdb.set_trace()
389
+ outputs = layer(hidden_states=hidden_states,
390
+ output_attentions=output_attentions,
391
+ position_embeddings=position_embeddings)
392
+ hidden_states = outputs[0]
393
+ return hidden_states
394
+
395
+
396
+ class OlmoModelForSemiNAT(OlmoModel):
397
+
398
+ def __init__(self, config):
399
+ super().__init__(config)
400
+ self.layers = nn.ModuleList([
401
+ OlmoDecoderLayerForSemiNAT(config, layer_idx)
402
+ for layer_idx in range(config.num_hidden_layers)
403
+ ])
404
+
405
+ self.decoder = NATDecoderForSemiNAT(config, 1)
406
+ self.encoder = NATEncoderForSemiNAT(config, 1)
407
+ self.chunk_size_limit = config.chunk_size_limit
408
+
409
+ # self.decoder = NATDecoderForSemiNAT(config, 1)
410
+ self.length_predictor = nn.Linear(config.hidden_size,
411
+ self.chunk_size_limit)
412
+
413
+ def forward(
414
+ self,
415
+ input_ids: torch.LongTensor = None,
416
+ attention_mask: Optional[torch.Tensor] = None,
417
+ position_ids: Optional[torch.LongTensor] = None,
418
+ slice_pos: torch.Tensor = None,
419
+ past_key_values: Optional[Union[Cache,
420
+ List[torch.FloatTensor]]] = None,
421
+ inputs_embeds: Optional[torch.FloatTensor] = None,
422
+ use_cache: Optional[bool] = None,
423
+ output_attentions: Optional[bool] = None,
424
+ output_hidden_states: Optional[bool] = None,
425
+ cache_position: Optional[torch.LongTensor] = None,
426
+ inference: Optional[bool] = None,
427
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
428
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
429
+ output_hidden_states = (output_hidden_states
430
+ if output_hidden_states is not None else
431
+ self.config.output_hidden_states)
432
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
433
+
434
+ if (input_ids is None) ^ (inputs_embeds is not None):
435
+ raise ValueError(
436
+ "You must specify exactly one of input_ids or inputs_embeds")
437
+
438
+ if self.gradient_checkpointing and self.training and use_cache:
439
+ print(
440
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
441
+ )
442
+ use_cache = False
443
+
444
+ if inputs_embeds is None:
445
+ inputs_embeds = self.embed_tokens(input_ids)
446
+
447
+ # kept for BC (non `Cache` `past_key_values` inputs)
448
+ return_legacy_cache = False
449
+
450
+ if use_cache and not isinstance(past_key_values, Cache):
451
+ return_legacy_cache = True
452
+ if past_key_values is None:
453
+ past_key_values = DynamicCache()
454
+ else:
455
+ past_key_values = DynamicCache.from_legacy_cache(
456
+ past_key_values)
457
+ print(
458
+ "Passing `past_key_values` as a tuple of tuples has been deprecated."
459
+ )
460
+ if cache_position is None:
461
+ past_seen_tokens = past_key_values.get_seq_length(
462
+ ) if past_key_values is not None else 0
463
+ cache_position = torch.arange(
464
+ past_seen_tokens,
465
+ past_seen_tokens + inputs_embeds.shape[1],
466
+ device=inputs_embeds.device # 0-255, length
467
+ )
468
+
469
+ if position_ids is None:
470
+ position_ids = cache_position.unsqueeze(0) #0-255, length
471
+
472
+ if inference:
473
+ position_ids = cache_position.unsqueeze(0)
474
+
475
+ position_embeddings = self.rotary_emb(inputs_embeds, position_ids)
476
+
477
+ all_hidden_states = () if output_hidden_states else None
478
+ all_self_attns = () if output_attentions else None
479
+ next_decoder_cache = None
480
+
481
+ # pdb.set_trace()
482
+
483
+ # initialize chunk inputs as embedding of [pad]
484
+ pad_token_id = 1
485
+ batch_size, seq_len, hidden_size = inputs_embeds.shape
486
+ pad_embedding = self.embed_tokens(
487
+ torch.tensor([pad_token_id]).to(inputs_embeds.device)) # 1, 2048
488
+ # pad_chunk_emb = self.encoder(
489
+ # pad_embedding.unsqueeze(0),
490
+ # attention_mask=None,
491
+ # position_embeddings=position_embeddings[:, :1, :],
492
+ # ) # 1 * 1 * hidden_size
493
+ chunk_inputs_embeds = pad_embedding.expand(
494
+ batch_size, seq_len, hidden_size).clone().to(
495
+ inputs_embeds.device) # bs * length * hidden_size 预填充
496
+
497
+ # 遍历 batch 和序列
498
+ length_ground_truth = []
499
+ chunk_attention_mask = []
500
+ chunk_labels = []
501
+ # max_chunk_num = 0
502
+ accumu_num = 0
503
+ slice_nums = []
504
+
505
+ # pdb.set_trace()
506
+ for b in range(batch_size):
507
+ slice_num = 0
508
+ start_position = 0
509
+ slice_length = []
510
+ for i in range(seq_len):
511
+ cut = slice_pos[b, i].item() # 获取切分点
512
+ if cut == -1: # 如果切分点为 -1,表示不切分
513
+ pass
514
+ else:
515
+ cut += 1 # +1表示在后面切一刀
516
+ # pdb.set_trace()
517
+ chunk_inputs_embeds[b, i] = self.encoder(
518
+ inputs_embeds[b, start_position:cut].unsqueeze(0),
519
+ position_embeddings=tuple(
520
+ tensor[0, start_position:cut, :].unsqueeze(0)
521
+ for tensor in position_embeddings))
522
+ slice_num += 1
523
+ slice_length.append(cut - start_position)
524
+ if cut - start_position > 10 or cut - start_position < 0:
525
+ pdb.set_trace()
526
+ start_position = cut # 更新切分起点
527
+ slice_nums.append(slice_num) # 每个样本的 chunk 数量
528
+ # max_chunk_num = max(max_chunk_num, slice_num) # 不用这个,直接用累计的chunk num
529
+ accumu_num += slice_num
530
+ chunk_attention_mask.append(
531
+ torch.tensor([1] * slice_num + [0] *
532
+ (seq_len - slice_num)).unsqueeze(
533
+ 0)) # 1表示切分,0表示不切分
534
+ length_ground_truth.append(
535
+ torch.tensor(slice_length + [-100] *
536
+ (seq_len - slice_num)).unsqueeze(0)) # -100表示不切分
537
+ accumu_num -= batch_size
538
+ # pdb.set_trace()
539
+
540
+ chunk_attention_mask = torch.cat(chunk_attention_mask, dim=0).to(
541
+ inputs_embeds.device) # torch.Size([1, 256]) bs * length
542
+
543
+ length_ground_truth = torch.cat(length_ground_truth,
544
+ dim=0).to(inputs_embeds.device)
545
+
546
+ # only slice the first max_chunk_num chunks for each sample
547
+ # chunk_inputs_embeds = chunk_inputs_embeds[:, :max_chunk_num, :]
548
+ # chunk_attention_mask = chunk_attention_mask[:, :max_chunk_num]
549
+ # length_ground_truth = length_ground_truth[:max_chunk_num]
550
+
551
+ chunk_cache_position = cache_position
552
+ chunk_position_embeddings = self.rotary_emb(
553
+ chunk_inputs_embeds, position_ids
554
+ ) # tuple, 第一个元素为 torch.Size([1, 256, 128]),最后一个维度是 hidden_size / head , cos 和 sin 各 64 维
555
+
556
+ hidden_states = chunk_inputs_embeds # bs * max_chunk_num * hidden_size
557
+
558
+ # pdb.set_trace()
559
+
560
+ if inference:
561
+ # inference 把填充去掉
562
+ mask_bool = chunk_attention_mask.bool()
563
+ chunk_inputs_embeds = chunk_inputs_embeds[mask_bool.unsqueeze(
564
+ -1).expand_as(chunk_inputs_embeds)].view(
565
+ chunk_inputs_embeds.size(0), -1,
566
+ chunk_inputs_embeds.size(2))
567
+ chunk_attention_mask = chunk_attention_mask[mask_bool].view(
568
+ chunk_attention_mask.size(0), -1)
569
+
570
+ # pdb.set_trace()
571
+ chunk_inputs_embeds = chunk_inputs_embeds[:,
572
+ chunk_cache_position, :]
573
+ chunk_attention_mask = chunk_attention_mask[:,
574
+ chunk_cache_position]
575
+
576
+ hidden_states = chunk_inputs_embeds
577
+
578
+ causal_mask = self._update_causal_mask(chunk_attention_mask,
579
+ chunk_inputs_embeds,
580
+ chunk_cache_position,
581
+ past_key_values,
582
+ output_attentions)
583
+
584
+ # pdb.set_trace()
585
+ for decoder_layer in self.layers:
586
+ if output_hidden_states:
587
+ all_hidden_states += (hidden_states, )
588
+ if self.gradient_checkpointing and self.training:
589
+ layer_outputs = self._gradient_checkpointing_func(
590
+ decoder_layer.__call__,
591
+ hidden_states,
592
+ causal_mask,
593
+ position_ids,
594
+ past_key_values,
595
+ output_attentions,
596
+ use_cache,
597
+ cache_position,
598
+ chunk_position_embeddings,
599
+ )
600
+ else:
601
+ layer_outputs = decoder_layer(
602
+ hidden_states,
603
+ attention_mask=causal_mask,
604
+ # position_ids=position_ids,
605
+ past_key_value=past_key_values,
606
+ output_attentions=output_attentions,
607
+ use_cache=use_cache,
608
+ cache_position=cache_position,
609
+ position_embeddings=chunk_position_embeddings,
610
+ )
611
+
612
+ hidden_states = layer_outputs[0]
613
+
614
+ if use_cache:
615
+ next_decoder_cache = layer_outputs[
616
+ 2 if output_attentions else 1]
617
+ if output_attentions:
618
+ all_self_attns += (layer_outputs[1], )
619
+
620
+ # pdb.set_trace()
621
+ # add hidden states from the last decoder layer
622
+ if output_hidden_states:
623
+ all_hidden_states += (hidden_states, )
624
+
625
+ hidden_states = self.norm(
626
+ hidden_states) # bs * max_chunk_num * hidden_size 所有chunk的hidden
627
+
628
+ # pdb.set_trace()
629
+
630
+ # 算长度预测loss
631
+ self.length_predictor = self.length_predictor.to(
632
+ hidden_states.device).to(torch.bfloat16) #这里强行变成了bf16,因为训练是这个
633
+ length_logits = self.length_predictor(
634
+ hidden_states.to(
635
+ hidden_states.device)) # bs * length * chunk_size_limit
636
+
637
+ # pdb.set_trace()
638
+
639
+ next_cache = next_decoder_cache if use_cache else None # DynamicCache()
640
+ if return_legacy_cache:
641
+ next_cache = next_cache.to_legacy_cache()
642
+
643
+ nar_hidden_states = None
644
+ if not inference:
645
+ # NAR decoder
646
+ bs, length, hidden_size = hidden_states.size()
647
+ # assert length == max_chunk_num # TODO: remove this
648
+
649
+ # shape: (bs * max_chunk_num) * chunk_size_limit * hidden_size
650
+ nat_input_embeddings = torch.zeros(
651
+ accumu_num, self.chunk_size_limit,
652
+ hidden_size).to(hidden_states.device).to(torch.bfloat16)
653
+ nat_attention_mask = torch.zeros(
654
+ accumu_num, self.chunk_size_limit).to(hidden_states.device).to(
655
+ torch.bfloat16)
656
+ tot_chunk_num = 0
657
+ for b in range(bs):
658
+ for i in range(slice_nums[b]):
659
+ # slice_nums[b] 是每个样本的 chunk 数量
660
+ # length_ground_truth[b] 是每个样本的真实长度
661
+ # copy length_ground_truth 份的 hidden_states 到 nat_input_embeddings
662
+
663
+ if length_ground_truth[b, i + 1] != -100:
664
+ nat_input_embeddings[
665
+ tot_chunk_num, :length_ground_truth[
666
+ b, i +
667
+ 1], :] = hidden_states[b, i:i + 1, :].expand(
668
+ length_ground_truth[b, i + 1], hidden_size)
669
+ nat_attention_mask[tot_chunk_num, :length_ground_truth[
670
+ b, i + 1]] = torch.tensor(
671
+ [1] * length_ground_truth[b, i + 1])
672
+ tot_chunk_num += 1
673
+ else:
674
+ break
675
+
676
+ nar_chunk_position = torch.arange(
677
+ 1, self.chunk_size_limit + 1).unsqueeze(0).repeat(
678
+ accumu_num,
679
+ 1).to(hidden_states.device) # bs * max_chunk_num
680
+
681
+ nar_position_embeddings = self.rotary_emb(nat_attention_mask,
682
+ nar_chunk_position)
683
+
684
+ # pdb.set_trace()
685
+
686
+ self.decoder = self.decoder.to(dtype=torch.bfloat16)
687
+
688
+ nar_hidden_states = self.decoder(
689
+ nat_input_embeddings,
690
+ attention_mask=nat_attention_mask,
691
+ position_embeddings=nar_position_embeddings,
692
+ output_attentions=output_attentions,
693
+ use_cache=use_cache,
694
+ cache_position=None,
695
+ )
696
+
697
+ nar_hidden_states = self.norm(
698
+ nar_hidden_states) # bs * max_chunk_num * hidden_size
699
+
700
+ # pdb.set_trace()
701
+
702
+ return ModelOutputWithPastForSemiNAT(
703
+ chunk_hidden_state=hidden_states,
704
+ length_ground_truth=length_ground_truth,
705
+ length_logits=length_logits,
706
+ position_embeddings=position_embeddings,
707
+ nar_hidden_state=nar_hidden_states,
708
+ past_key_values=next_cache,
709
+ hidden_states=all_hidden_states,
710
+ attentions=all_self_attns,
711
+ )
712
+
713
+
714
+
715
+
716
+
717
+ class OlmoForCausalLMForSemiNAT(OlmoForCausalLM):
718
+
719
+ def __init__(self, config, *args, **kwargs):
720
+ super().__init__(config, *args, **kwargs)
721
+ self.model = OlmoModelForSemiNAT(config)
722
+ self.rotary_emb = OlmoRotaryEmbedding(config=config)
723
+ self.config = config
724
+ self.padding_idx = config.pad_token_id
725
+ self.vocab_size = config.vocab_size
726
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size,
727
+ self.padding_idx)
728
+
729
+ self.chunk_size_limit = config.chunk_size_limit
730
+
731
+ def forward(
732
+ self,
733
+ input_ids: torch.LongTensor = None,
734
+ attention_mask: Optional[torch.Tensor] = None,
735
+ position_ids: Optional[torch.LongTensor] = None,
736
+ slice_pos: Optional[torch.Tensor] = None,
737
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
738
+ inputs_embeds: Optional[torch.FloatTensor] = None,
739
+ labels: Optional[torch.LongTensor] = None,
740
+ use_cache: Optional[bool] = None,
741
+ output_attentions: Optional[bool] = None,
742
+ output_hidden_states: Optional[bool] = None,
743
+ cache_position: Optional[torch.LongTensor] = None,
744
+ logits_to_keep: Union[int, torch.Tensor] = 0,
745
+ **loss_kwargs,
746
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
747
+
748
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
749
+ output_hidden_states = (output_hidden_states
750
+ if output_hidden_states is not None else
751
+ self.config.output_hidden_states)
752
+
753
+ # pdb.set_trace()
754
+
755
+ if labels is not None:
756
+ outputs = self.model(
757
+ input_ids=input_ids, # bs * length
758
+ attention_mask=attention_mask, # bs * length
759
+ position_ids=position_ids,
760
+ slice_pos=slice_pos,
761
+ past_key_values=past_key_values,
762
+ inputs_embeds=inputs_embeds,
763
+ use_cache=use_cache,
764
+ output_attentions=output_attentions,
765
+ output_hidden_states=output_hidden_states,
766
+ cache_position=cache_position,
767
+ )
768
+ else:
769
+ outputs = self.model(
770
+ input_ids=input_ids, # bs * length
771
+ attention_mask=attention_mask, # bs * length
772
+ position_ids=position_ids,
773
+ slice_pos=slice_pos,
774
+ past_key_values=past_key_values,
775
+ inputs_embeds=inputs_embeds,
776
+ use_cache=use_cache,
777
+ output_attentions=output_attentions,
778
+ output_hidden_states=output_hidden_states,
779
+ cache_position=cache_position,
780
+ inference=True,
781
+ )
782
+
783
+ chunk_hidden_states = outputs.chunk_hidden_state
784
+ bs, length, hidden_size = chunk_hidden_states.size()
785
+
786
+ ############################# loss 计算,分两部分 #############################
787
+ loss = None
788
+ loss1 = None
789
+ loss2 = None
790
+ ############################# 首先, 接上mlp,预测长度的loss,维度是10#############################
791
+
792
+ if labels is not None:
793
+
794
+ length_ground_truth = outputs.length_ground_truth
795
+ length_logits = outputs.length_logits
796
+
797
+ new_length_ground_truth = torch.where(
798
+ length_ground_truth != -100, # 条件:不等于 -100
799
+ length_ground_truth - 1, # 如果条件为真,执行 labels - 1
800
+ length_ground_truth # 否则保持原值
801
+ )
802
+
803
+ # pdb.set_trace()
804
+
805
+ shift_length_logits = length_logits[:, :-1, :]
806
+ shift_new_length_ground_truth = new_length_ground_truth[:, 1:]
807
+
808
+ logits_flat = shift_length_logits.reshape(
809
+ -1,
810
+ self.chunk_size_limit) # 形状变为 [bs * length, chunk_size_limit]
811
+ labels_flat = shift_new_length_ground_truth.reshape(
812
+ -1) # [bs * length]
813
+
814
+ # softmax logits to get probability
815
+ logits_flat = torch.nn.functional.softmax(logits_flat, dim=-1)
816
+
817
+ # 修改 loss 为 MSE: 首先根据 logits 加权得到预测长度(注意不是 argmax),之后与 label 计算 MSE
818
+
819
+ # pdb.set_trace()
820
+ # 计算预测长度
821
+ predicted_lengths = torch.sum(
822
+ logits_flat * torch.arange(self.chunk_size_limit).to(
823
+ chunk_hidden_states.device).to(torch.bfloat16),
824
+ dim=1)
825
+ # 计算预测长度与真实长度之间的均方误差
826
+
827
+ loss1 = torch.mean((predicted_lengths[labels_flat != -100] -
828
+ labels_flat[labels_flat != -100].float())**2)
829
+
830
+ # pdb.set_trace()
831
+
832
+ nar_hidden_state = outputs.nar_hidden_state
833
+
834
+ ############################# 其次,用chunk的hidden recover所有token,跟gt计算loss #############################
835
+
836
+ nar_labels = torch.full(
837
+ (nar_hidden_state.size(0), nar_hidden_state.size(1)),
838
+ -100).to(nar_hidden_state.device) # bs * length
839
+
840
+ nar_labels = self.update_nar_labels(nar_labels, labels, slice_pos,
841
+ length_ground_truth, input_ids,
842
+ self.chunk_size_limit)
843
+
844
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
845
+ slice_indices = slice(-logits_to_keep, None) if isinstance(
846
+ logits_to_keep, int) else logits_to_keep
847
+ logits = self.lm_head(
848
+ nar_hidden_state[:, slice_indices, :]) # 1* seq_len * 50304
849
+ # logits = logits.float()
850
+ # pdb.set_trace()
851
+ # if labels is not None:
852
+ loss2 = self.loss_function_seminat(logits, nar_labels,
853
+ self.vocab_size, **loss_kwargs)
854
+
855
+ else: # for inference
856
+ softmaxed = torch.softmax(outputs.length_logits[:, -1, :], dim=-1)
857
+ length = torch.argmax(softmaxed, dim=-1).item() + 1
858
+ # pdb.set_trace()
859
+
860
+ nat_input_embeddings = torch.zeros(
861
+ 1, self.chunk_size_limit,
862
+ hidden_size).to(input_ids.device).to(torch.bfloat16)
863
+ nat_attention_mask = torch.zeros(1, self.chunk_size_limit).to(
864
+ input_ids.device).to(torch.bfloat16)
865
+
866
+ nat_input_embeddings[:, :
867
+ length, :] = outputs.chunk_hidden_state[:, -1, :].expand(
868
+ length, -1).to(input_ids.device).to(
869
+ torch.bfloat16)
870
+
871
+ nat_attention_mask[:, :length] = torch.tensor([1] * length).to(
872
+ input_ids.device).to(torch.bfloat16)
873
+
874
+ nar_chunk_position = torch.arange(
875
+ 0, self.chunk_size_limit).unsqueeze(0).to(
876
+ input_ids.device) # bs * max_chunk_num
877
+
878
+ nar_position_embeddings = self.rotary_emb(nat_attention_mask,
879
+ nar_chunk_position)
880
+
881
+ # pdb.set_trace()
882
+ nar_hidden_states = self.model.decoder(
883
+ nat_input_embeddings,
884
+ attention_mask=None,
885
+ position_embeddings=nar_position_embeddings,
886
+ output_attentions=output_attentions,
887
+ use_cache=False,
888
+ cache_position=None,
889
+ )
890
+
891
+ nar_hidden_states = self.model.norm(nar_hidden_states)
892
+ # pdb.set_trace()
893
+ return CausalLMOutputWithPast(
894
+ loss=(loss1, loss2),
895
+ logits=nar_hidden_states[:, :length, :],
896
+ past_key_values=outputs.past_key_values,
897
+ hidden_states=outputs.hidden_states,
898
+ attentions=outputs.attentions,
899
+ )
900
+
901
+ ############################# loss 计算,分两部分 #############################
902
+
903
+ # if not return_dict:
904
+ # output = (logits, ) + outputs[1:]
905
+ # if output_router_logits:
906
+ # output = (aux_loss, ) + output
907
+ # return (loss, ) + output if loss is not None else output
908
+ # pdb.set_trace()
909
+ return CausalLMOutputWithPast(
910
+ loss=(loss1, loss2),
911
+ logits=logits,
912
+ past_key_values=outputs.past_key_values,
913
+ hidden_states=outputs.hidden_states,
914
+ attentions=outputs.attentions,
915
+ )
916
+
917
+ def update_nar_labels(self, nar_labels, labels, slice_pos,
918
+ length_ground_truth, input_ids, chunk_size_limit):
919
+ bs, length = input_ids.size()
920
+ chunk = 0
921
+ for b in range(bs):
922
+ last_cut = slice_pos[b][0] #第一次切分位置
923
+ for i in range(1, length):
924
+ if slice_pos[b, i] != -1:
925
+ # pdb.set_trace()
926
+ try:
927
+ nar_labels[chunk, :length_ground_truth[b, i]] = labels[
928
+ b, last_cut + 1:slice_pos[b, i] + 1]
929
+ except:
930
+ pdb.set_trace()
931
+ last_cut = slice_pos[b, i]
932
+ chunk += 1
933
+ else:
934
+ break
935
+ return nar_labels
936
+
937
+ def fixed_cross_entropy(self,
938
+ source,
939
+ target,
940
+ num_items_in_batch: int = None,
941
+ ignore_index: int = -100,
942
+ **kwargs):
943
+ reduction = "sum" if num_items_in_batch is not None else "mean"
944
+ loss = F.cross_entropy(source,
945
+ target,
946
+ ignore_index=ignore_index,
947
+ reduction=reduction)
948
+ if reduction == "sum":
949
+ loss = loss / num_items_in_batch
950
+ return loss
951
+
952
+ def loss_function_seminat(self,
953
+ logits,
954
+ labels,
955
+ vocab_size: int,
956
+ num_items_in_batch: int = None,
957
+ ignore_index: int = -100,
958
+ **kwargs):
959
+ # logits: (B, L, V)
960
+ # labels: (B, L)
961
+
962
+ logits = logits.float()
963
+ labels = labels.to(logits.device)
964
+
965
+ # Flatten the tokens (无 shift)
966
+ logits = logits.view(-1, vocab_size) # (B*L, V)
967
+ labels = labels.view(-1) # (B*L)
968
+
969
+ # Ensure device alignment
970
+ labels = labels.to(logits.device)
971
+
972
+ # Compute loss
973
+ loss = self.fixed_cross_entropy(logits, labels, num_items_in_batch,
974
+ ignore_index, **kwargs)
975
+ return loss
976
+
977
+ def generate(
978
+ self,
979
+ inputs: Optional[torch.Tensor] = None,
980
+ generation_config: Optional[GenerationConfig] = None,
981
+ logits_processor: Optional[LogitsProcessorList] = None,
982
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
983
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor],
984
+ List[int]]] = None,
985
+ synced_gpus: Optional[bool] = None,
986
+ assistant_model: Optional["PreTrainedModel"] = None,
987
+ streamer: Optional["BaseStreamer"] = None,
988
+ negative_prompt_ids: Optional[torch.Tensor] = None,
989
+ negative_prompt_attention_mask: Optional[torch.Tensor] = None,
990
+ **kwargs,
991
+ ) -> Union[GenerateOutput, torch.LongTensor]:
992
+
993
+ # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
994
+ self._validate_model_class()
995
+ tokenizer = kwargs.pop(
996
+ "tokenizer",
997
+ None) # Pull this out first, we only use it for stopping criteria
998
+ assistant_tokenizer = kwargs.pop(
999
+ "assistant_tokenizer", None) # only used for assisted generation
1000
+
1001
+ generation_config, model_kwargs = self._prepare_generation_config(
1002
+ generation_config, **kwargs)
1003
+
1004
+ # GenerationConfig {
1005
+ # "eos_token_id": 50279,
1006
+ # "max_length": 2048,
1007
+ # "pad_token_id": 1
1008
+ # }
1009
+
1010
+ self._validate_model_kwargs(model_kwargs.copy())
1011
+ self._validate_assistant(assistant_model, tokenizer,
1012
+ assistant_tokenizer)
1013
+
1014
+ # 2. Set generation parameters if not already defined
1015
+ # 判断是否在多GPU环境下同步生成(如DeepSpeed ZeRO-3或FSDP)
1016
+ if synced_gpus is None:
1017
+ synced_gpus = (
1018
+ is_deepspeed_zero3_enabled()
1019
+ or is_fsdp_managed_module(self)) and dist.get_world_size() > 1
1020
+
1021
+ # 初始化logits处理器和停止条件
1022
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList(
1023
+ ) # 定义对模型输出logits的修改规则(如禁止重复词、强制特定token等)。
1024
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList(
1025
+ ) # 定义生成停止条件(如达到最大长度、检测到终止符等)。
1026
+
1027
+ accepts_attention_mask = "attention_mask" in set(
1028
+ inspect.signature(self.forward).parameters.keys()) # True
1029
+ requires_attention_mask = "encoder_outputs" not in model_kwargs # True
1030
+ kwargs_has_attention_mask = model_kwargs.get("attention_mask",
1031
+ None) is not None # False
1032
+
1033
+ # pdb.set_trace()
1034
+
1035
+ # 3. Define model inputs
1036
+ inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
1037
+ inputs, generation_config.bos_token_id, model_kwargs)
1038
+ batch_size = inputs_tensor.shape[0]
1039
+
1040
+ # inputs_tensor bs * input_length; model_input_name:"input_ids";
1041
+
1042
+ device = inputs_tensor.device
1043
+ self._prepare_special_tokens(generation_config,
1044
+ kwargs_has_attention_mask,
1045
+ device=device)
1046
+
1047
+ # decoder-only models must use left-padding for batched generation.
1048
+ # batch generation用的
1049
+ if not self.config.is_encoder_decoder and not is_torchdynamo_compiling(
1050
+ ):
1051
+ # If `input_ids` was given, check if the last id in any sequence is `pad_token_id`
1052
+ # Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off.
1053
+ if (generation_config._pad_token_tensor is not None
1054
+ and batch_size > 1 and len(inputs_tensor.shape) == 2
1055
+ and torch.sum(inputs_tensor[:, -1] ==
1056
+ generation_config._pad_token_tensor) > 0):
1057
+ logger.warning(
1058
+ "A decoder-only architecture is being used, but right-padding was detected! For correct "
1059
+ "generation results, please set `padding_side='left'` when initializing the tokenizer."
1060
+ )
1061
+ # pdb.set_trace()
1062
+ # 4. Define other model kwargs
1063
+ # decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are
1064
+ # generating the first new token or not, and we only want to use the embeddings for the first new token)
1065
+ if not self.config.is_encoder_decoder and model_input_name == "inputs_embeds":
1066
+ generation_config.use_cache = True
1067
+ # 生成第一个新token时需要依赖缓存判断是否处于生成阶段,后续token生成依赖缓存加速。
1068
+
1069
+ # 生成attention mask
1070
+ if not kwargs_has_attention_mask and requires_attention_mask and accepts_attention_mask:
1071
+ model_kwargs[
1072
+ "attention_mask"] = self._prepare_attention_mask_for_generation(
1073
+ inputs_tensor, generation_config, model_kwargs)
1074
+
1075
+ # 输入了attention,检查一下对不对
1076
+ elif kwargs_has_attention_mask:
1077
+ # TODO (joao): generalize this check with other types of inputs
1078
+ if model_input_name == "input_ids" and len(
1079
+ model_kwargs["attention_mask"].shape) > 2:
1080
+ raise ValueError(
1081
+ "`attention_mask` passed to `generate` must be 2D.")
1082
+
1083
+ # encoder-decoder model设定
1084
+ if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs:
1085
+ # if model is encoder decoder encoder_outputs are created and added to `model_kwargs`
1086
+ model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
1087
+ inputs_tensor, model_kwargs, model_input_name,
1088
+ generation_config)
1089
+
1090
+ # 5. Prepare `input_ids` which will be used for auto-regressive generation
1091
+ # encoder-decoder model
1092
+ if self.config.is_encoder_decoder:
1093
+ input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
1094
+ batch_size=batch_size,
1095
+ model_input_name=model_input_name,
1096
+ model_kwargs=model_kwargs,
1097
+ decoder_start_token_id=generation_config.
1098
+ _decoder_start_token_tensor,
1099
+ device=inputs_tensor.device,
1100
+ )
1101
+ else:
1102
+ input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop(
1103
+ "input_ids") # torch.Size([1, 25]) # torch.Size([1, 25])
1104
+
1105
+ # 修复不完整的token
1106
+ if generation_config.token_healing:
1107
+ input_ids = self.heal_tokens(input_ids, tokenizer)
1108
+
1109
+ # 流式输出
1110
+ if streamer is not None:
1111
+ streamer.put(input_ids.cpu())
1112
+
1113
+ # pdb.set_trace()
1114
+
1115
+ # 6. Prepare `max_length` depending on other stopping criteria.
1116
+ input_ids_length = input_ids.shape[-1]
1117
+ has_default_max_length = kwargs.get(
1118
+ "max_length") is None and generation_config.max_length is not None
1119
+ has_default_min_length = kwargs.get(
1120
+ "min_length") is None and generation_config.min_length is not None
1121
+ # min_length是0
1122
+
1123
+ # 生成的一些config
1124
+ generation_config = self._prepare_generated_length(
1125
+ generation_config=generation_config,
1126
+ has_default_max_length=has_default_max_length,
1127
+ has_default_min_length=has_default_min_length,
1128
+ model_input_name=model_input_name, # "input_ids"
1129
+ inputs_tensor=inputs_tensor,
1130
+ input_ids_length=input_ids_length, #输入长度
1131
+ )
1132
+
1133
+ # If the model supports `logits_to_keep` in forward(), set it to 1 to avoid computing the whole
1134
+ # logit matrix. This can save a lot of memory during the first forward pass. Note that assisted decoding
1135
+ # dynamically overrides this value as it can need more than the last token logits
1136
+ if self._supports_logits_to_keep(
1137
+ ) and "logits_to_keep" not in model_kwargs:
1138
+ model_kwargs["logits_to_keep"] = 1
1139
+ # 模型在计算时仅保留最后一个 token 的 logits,而非整个词汇表的 logits,从而大幅降低内存占用。若使用束搜索宽度为 5,辅助解码会覆盖 logits_to_keep=5,保留多个候选 token 的 logits 以支持多路径探索。
1140
+
1141
+ # 检查生成长度
1142
+ self._validate_generated_length(generation_config, input_ids_length,
1143
+ has_default_max_length)
1144
+
1145
+ # 7. Prepare the cache.
1146
+ # - `model_kwargs` may be updated in place with a cache as defined by the parameters in `generation_config`.
1147
+ # - different models have a different cache name expected by the model (default = "past_key_values")
1148
+ # - `max_length`, prepared above, is used to determine the maximum cache length
1149
+ max_cache_length = generation_config.max_length - 1 #存最长length-1个token cache
1150
+
1151
+ # 如��输入是emb
1152
+ if (inputs_tensor.shape[1] != input_ids_length
1153
+ and model_input_name == "inputs_embeds"
1154
+ and not self.config.is_encoder_decoder):
1155
+ max_cache_length += inputs_tensor.shape[1]
1156
+ self._prepare_cache_for_generation(generation_config, model_kwargs,
1157
+ assistant_model, batch_size,
1158
+ max_cache_length, device)
1159
+
1160
+ # 8. determine generation mode
1161
+ generation_mode = generation_config.get_generation_mode(
1162
+ assistant_model) # 辅助解码
1163
+
1164
+ if streamer is not None and (generation_config.num_beams > 1):
1165
+ raise ValueError(
1166
+ "`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1."
1167
+ )
1168
+
1169
+ # device检查
1170
+ if not is_torchdynamo_compiling(
1171
+ ) and self.device.type != input_ids.device.type:
1172
+ warnings.warn(
1173
+ "You are calling .generate() with the `input_ids` being on a device type different"
1174
+ f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model"
1175
+ f" is on {self.device.type}. You may experience unexpected behaviors or slower generation."
1176
+ " Please make sure that you have put `input_ids` to the"
1177
+ f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before"
1178
+ " running `.generate()`.",
1179
+ UserWarning,
1180
+ )
1181
+
1182
+ # pdb.set_trace()
1183
+
1184
+ # 9. prepare logits processors and stopping criteria
1185
+ prepared_logits_processor = self._get_logits_processor(
1186
+ generation_config=generation_config,
1187
+ input_ids_seq_length=input_ids_length,
1188
+ encoder_input_ids=inputs_tensor,
1189
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1190
+ logits_processor=logits_processor,
1191
+ device=inputs_tensor.device,
1192
+ model_kwargs=model_kwargs,
1193
+ negative_prompt_ids=negative_prompt_ids,
1194
+ negative_prompt_attention_mask=negative_prompt_attention_mask,
1195
+ )
1196
+ prepared_stopping_criteria = self._get_stopping_criteria(
1197
+ generation_config=generation_config,
1198
+ stopping_criteria=stopping_criteria,
1199
+ tokenizer=tokenizer,
1200
+ **kwargs)
1201
+
1202
+ # Set model_kwargs `use_cache` so we can use it later in forward runs
1203
+ model_kwargs["use_cache"] = generation_config.use_cache
1204
+
1205
+ input_ids, model_kwargs = self._expand_inputs_for_generation(
1206
+ input_ids=input_ids,
1207
+ expand_size=generation_config.num_return_sequences, # 1
1208
+ is_encoder_decoder=self.config.is_encoder_decoder, # false
1209
+ **model_kwargs,
1210
+ )
1211
+
1212
+ result = self._sampleforseminat(
1213
+ input_ids,
1214
+ logits_processor=prepared_logits_processor,
1215
+ stopping_criteria=prepared_stopping_criteria,
1216
+ generation_config=generation_config,
1217
+ synced_gpus=synced_gpus,
1218
+ streamer=streamer,
1219
+ **model_kwargs,
1220
+ )
1221
+
1222
+ # Convert to legacy cache format if requested
1223
+ if (generation_config.return_legacy_cache is True
1224
+ and not is_torchdynamo_compiling()
1225
+ and hasattr(result, "past_key_values") and getattr(
1226
+ result.past_key_values, "to_legacy_cache") is not None):
1227
+ result.past_key_values = result.past_key_values.to_legacy_cache()
1228
+ return result
1229
+
1230
+ def _sampleforseminat(
1231
+ self,
1232
+ input_ids: torch.LongTensor,
1233
+ logits_processor: LogitsProcessorList,
1234
+ stopping_criteria: StoppingCriteriaList,
1235
+ generation_config: GenerationConfig,
1236
+ synced_gpus: bool,
1237
+ streamer: Optional["BaseStreamer"],
1238
+ **model_kwargs,
1239
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
1240
+
1241
+ # init values
1242
+ pad_token_id = generation_config._pad_token_tensor # 获取填充token的ID
1243
+ output_attentions = generation_config.output_attentions # 是否输出注意力权重
1244
+ output_hidden_states = generation_config.output_hidden_states # 是否输出隐藏状态
1245
+ output_scores = generation_config.output_scores # 是否输出分数
1246
+ output_logits = generation_config.output_logits # 是否输出原始logits
1247
+ return_dict_in_generate = generation_config.return_dict_in_generate # 是否返回结构化字典
1248
+ max_length = generation_config.max_length # 最大生成长度
1249
+ has_eos_stopping_criteria = any(
1250
+ hasattr(criteria, "eos_token_id")
1251
+ for criteria in stopping_criteria) # 检查停止条件是否包含EOS token
1252
+ do_sample = generation_config.do_sample # 是否使用采样方法
1253
+
1254
+ # 初始化结果收集容器
1255
+ # init attention / hidden states / scores tuples
1256
+ scores = () if (return_dict_in_generate and output_scores) else None
1257
+ raw_logits = () if (return_dict_in_generate
1258
+ and output_logits) else None
1259
+ decoder_attentions = () if (return_dict_in_generate
1260
+ and output_attentions) else None
1261
+ cross_attentions = () if (return_dict_in_generate
1262
+ and output_attentions) else None
1263
+ decoder_hidden_states = () if (return_dict_in_generate
1264
+ and output_hidden_states) else None
1265
+
1266
+ # # 编码器-解码器模型特殊处理 不用管
1267
+ # if model is an encoder-decoder, retrieve encoder attention weights and hidden states
1268
+ if return_dict_in_generate and self.config.is_encoder_decoder:
1269
+ encoder_attentions = model_kwargs["encoder_outputs"].get(
1270
+ "attentions") if output_attentions else None
1271
+ encoder_hidden_states = (
1272
+ model_kwargs["encoder_outputs"].get("hidden_states")
1273
+ if output_hidden_states else None)
1274
+
1275
+ # pdb.set_trace()
1276
+
1277
+ # 初始化序列跟踪
1278
+ # keep track of which sequences are already finished
1279
+ batch_size, cur_len = input_ids.shape
1280
+ this_peer_finished = False
1281
+ unfinished_sequences = torch.ones(
1282
+ batch_size, dtype=torch.long,
1283
+ device=input_ids.device) # 初始化未完成序列标记 torch.Size([1])
1284
+ model_kwargs = self._get_initial_cache_position(
1285
+ input_ids, model_kwargs) # 初始化缓存位置
1286
+
1287
+ model_forward = self.__call__ # 获取前向传播函数
1288
+ ############ 换成新的forward
1289
+ # model_forward = self.forward
1290
+
1291
+ if isinstance(model_kwargs.get("past_key_values"), Cache):
1292
+ is_compileable = model_kwargs[
1293
+ "past_key_values"].is_compileable and self._supports_static_cache #编译优化
1294
+ is_compileable = is_compileable and not self.generation_config.disable_compile
1295
+ if is_compileable and (
1296
+ self.device.type == "cuda"
1297
+ or generation_config.compile_config._compile_all_devices):
1298
+ os.environ["TOKENIZERS_PARALLELISM"] = "0"
1299
+ model_forward = self.get_compiled_call(
1300
+ generation_config.compile_config)
1301
+
1302
+ ############ nar特别加的cache ############
1303
+ # model_kwargs["nar_kv_cache"] = DynamicCache()
1304
+ # model_kwargs["slice_pos"] = torch.tensor([[4] + [-1] * (max_length - 1)
1305
+ # ])
1306
+
1307
+ start = 4
1308
+ s_pos = [start]
1309
+ while True:
1310
+ start += 5
1311
+ if start > input_ids.shape[1] - 1:
1312
+ s_pos.append(input_ids.shape[1] - 1)
1313
+ break
1314
+ else:
1315
+ s_pos.append(start)
1316
+
1317
+ slice_pos = torch.tensor(s_pos + [-1] *
1318
+ (max_length - len(s_pos))).unsqueeze(0).to(
1319
+ input_ids.device)
1320
+
1321
+ model_kwargs['slice_pos'] = slice_pos
1322
+ count = (slice_pos != -1).sum().item()
1323
+ new_cache_position = torch.arange(0, count).to(input_ids.device)
1324
+ model_kwargs[
1325
+ 'cache_position'] = new_cache_position # 更新一下cache position
1326
+
1327
+ ############ nar特别加的cache ############
1328
+
1329
+ is_prefill = True
1330
+ while self._has_unfinished_sequences(
1331
+ this_peer_finished,
1332
+ synced_gpus,
1333
+ device=input_ids.device,
1334
+ cur_len=cur_len,
1335
+ max_length=max_length): # 循环知道序列生成完
1336
+ # prepare model inputs
1337
+
1338
+ # pdb.set_trace()
1339
+
1340
+ # model_kwargs.keys(): dict_keys(['attention_mask', 'logits_to_keep', 'past_key_values', 'use_cache', 'cache_position', 'nar_kv_cache', 'slice_pos'])
1341
+ model_inputs = self.prepare_inputs_for_generation( #加入position_id和input_id
1342
+ input_ids, **model_kwargs
1343
+ ) #dict_keys(['cache_position', 'past_key_values', 'input_ids', 'inputs_embeds', 'position_ids', 'attention_mask', 'logits_to_keep', 'use_cache'])
1344
+ # pdb.set_trace()
1345
+
1346
+ # position_ids = torch.arange(
1347
+ # input_ids.shape[1], device=input_ids.device).unsqueeze(0).to(input_ids.device)
1348
+ # model_inputs.update({"position_ids": position_ids})
1349
+
1350
+ model_inputs.update({"input_ids": input_ids})
1351
+
1352
+ # prepare variable output controls (note: some models won't accept all output controls)
1353
+ model_inputs.update({"output_attentions": output_attentions}
1354
+ if output_attentions else {})
1355
+ model_inputs.update({"output_hidden_states": output_hidden_states}
1356
+ if output_hidden_states else {})
1357
+
1358
+ if is_prefill:
1359
+ # pdb.set_trace()
1360
+ # outputs = self(**model_inputs, return_dict=True)
1361
+ # dict_keys(['cache_position', 'past_key_values', 'input_ids', 'inputs_embeds', 'position_ids', 'attention_mask', 'logits_to_keep', 'use_cache'])
1362
+ outputs = self.forward(**model_inputs, return_dict=True)
1363
+ is_prefill = False
1364
+ else:
1365
+ # pdb.set_trace()
1366
+ outputs = model_forward(**model_inputs, return_dict=True)
1367
+
1368
+ # pdb.set_trace()
1369
+
1370
+ ################ seminat ###########################
1371
+ # model_kwargs['slice_pos'] = outputs.slice_pos
1372
+ ################ seminat ###########################
1373
+
1374
+ # synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping
1375
+ model_kwargs = self._update_model_kwargs_for_generation_for_seminat(
1376
+ outputs,
1377
+ model_kwargs,
1378
+ is_encoder_decoder=self.config.is_encoder_decoder,
1379
+ num_new_tokens=outputs.logits.size(1))
1380
+ if synced_gpus and this_peer_finished:
1381
+ continue
1382
+
1383
+ # pdb.set_trace()
1384
+ # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration
1385
+ # (the clone itself is always small)
1386
+
1387
+ # next_token_logits = outputs.logits[:, -1, :].clone().float()
1388
+ next_token_logits = outputs.logits[:, :, :].clone().float(
1389
+ ) # 新生成了k个token
1390
+
1391
+ next_token_logits = next_token_logits.to(input_ids.device)
1392
+
1393
+ # pre-process distribution
1394
+ next_token_scores = logits_processor(input_ids, next_token_logits)
1395
+
1396
+ # token selection
1397
+ if do_sample:
1398
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
1399
+ # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
1400
+ next_tokens = torch.multinomial(probs,
1401
+ num_samples=1).squeeze(1)
1402
+ else:
1403
+ next_tokens = torch.argmax(
1404
+ next_token_scores,
1405
+ dim=-1) # tensor([9281], device='cuda:0') token id
1406
+
1407
+ # pdb.set_trace()
1408
+ # 更新slice_pos
1409
+ count = (model_kwargs['slice_pos'] != -1).sum().item()
1410
+ model_kwargs['slice_pos'][:,count] = model_kwargs['slice_pos'][:,
1411
+ count - 1] + outputs.logits.size(1)
1412
+
1413
+ # pdb.set_trace()
1414
+
1415
+
1416
+ # finished sentences should have their next token be a padding token
1417
+ if has_eos_stopping_criteria:
1418
+ next_tokens = next_tokens * unfinished_sequences + pad_token_id * (
1419
+ 1 - unfinished_sequences
1420
+ ) # 序列生成完的时候,unfinished_sequences为0,正好后面全填上padding
1421
+
1422
+ # pdb.set_trace()
1423
+ # update generated ids, model inputs, and length for next step
1424
+ # input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1425
+ input_ids = torch.cat([input_ids, next_tokens], dim=-1)
1426
+ if streamer is not None:
1427
+ streamer.put(next_tokens.cpu())
1428
+
1429
+ # 更新完成状态
1430
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(
1431
+ input_ids, scores)
1432
+ this_peer_finished = unfinished_sequences.max() == 0
1433
+ cur_len += outputs.logits.size(1) # 长度 +1
1434
+
1435
+ # This is needed to properly delete outputs.logits which may be very large for first iteration
1436
+ # Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration
1437
+ del outputs
1438
+
1439
+ if streamer is not None:
1440
+ streamer.end()
1441
+
1442
+ if return_dict_in_generate:
1443
+ if self.config.is_encoder_decoder:
1444
+ return GenerateEncoderDecoderOutput(
1445
+ sequences=input_ids,
1446
+ scores=scores,
1447
+ logits=raw_logits,
1448
+ encoder_attentions=encoder_attentions,
1449
+ encoder_hidden_states=encoder_hidden_states,
1450
+ decoder_attentions=decoder_attentions,
1451
+ cross_attentions=cross_attentions,
1452
+ decoder_hidden_states=decoder_hidden_states,
1453
+ past_key_values=model_kwargs.get("past_key_values"),
1454
+ )
1455
+ else:
1456
+ return GenerateDecoderOnlyOutput(
1457
+ sequences=input_ids,
1458
+ scores=scores,
1459
+ logits=raw_logits,
1460
+ attentions=decoder_attentions,
1461
+ hidden_states=decoder_hidden_states,
1462
+ past_key_values=model_kwargs.get("past_key_values"),
1463
+ )
1464
+ else:
1465
+ return input_ids
1466
+
1467
+ def _update_model_kwargs_for_generation_for_seminat(
1468
+ self,
1469
+ outputs: ModelOutput,
1470
+ model_kwargs: Dict[str, Any],
1471
+ is_encoder_decoder: bool = False,
1472
+ num_new_tokens: int = 1,
1473
+ ) -> Dict[str, Any]:
1474
+ ALL_CACHE_NAMES = [
1475
+ "past_key_values", # default
1476
+ "cache_params", # mamba-based models
1477
+ "state", # rwkv
1478
+ "mems", # xlnet
1479
+ "past_buckets_states", # reformer
1480
+ ]
1481
+ # update past_key_values keeping its naming used in model code
1482
+ for possible_cache_name in ALL_CACHE_NAMES:
1483
+ if possible_cache_name in outputs:
1484
+ # TODO (joao): remove output/input mismatch when these old models (xlnet, reformer) are deprecated
1485
+ if possible_cache_name in ("past_buckets_states", "mems"):
1486
+ cache_name = "past_key_values"
1487
+ else:
1488
+ cache_name = possible_cache_name
1489
+ model_kwargs[cache_name] = getattr(outputs,
1490
+ possible_cache_name)
1491
+ break
1492
+
1493
+ # pdb.set_trace()
1494
+
1495
+ # update token_type_ids with last value
1496
+ # false
1497
+ if "token_type_ids" in model_kwargs:
1498
+ token_type_ids = model_kwargs["token_type_ids"]
1499
+ model_kwargs["token_type_ids"] = torch.cat(
1500
+ [token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1)
1501
+
1502
+ if not is_encoder_decoder:
1503
+ # update attention mask
1504
+ # 重点看这个
1505
+ # pdb.set_trace()
1506
+ if "attention_mask" in model_kwargs:
1507
+ attention_mask = model_kwargs["attention_mask"]
1508
+ model_kwargs["attention_mask"] = torch.cat(
1509
+ [
1510
+ attention_mask,
1511
+ attention_mask.new_ones(
1512
+ (attention_mask.shape[0], num_new_tokens
1513
+ )) # 1 -> num_new_tokens 一次加多个token的attention
1514
+ ],
1515
+ dim=-1)
1516
+ else:
1517
+ # update decoder attention mask
1518
+ if "decoder_attention_mask" in model_kwargs:
1519
+ decoder_attention_mask = model_kwargs["decoder_attention_mask"]
1520
+ model_kwargs["decoder_attention_mask"] = torch.cat(
1521
+ [
1522
+ decoder_attention_mask,
1523
+ decoder_attention_mask.new_ones(
1524
+ (decoder_attention_mask.shape[0], 1))
1525
+ ],
1526
+ dim=-1,
1527
+ )
1528
+
1529
+ # pdb.set_trace()
1530
+ if model_kwargs.get("use_cache", True):
1531
+ # model_kwargs["cache_position"] = model_kwargs["cache_position"][-1:] + num_new_tokens
1532
+ model_kwargs["cache_position"] = torch.tensor([
1533
+ model_kwargs["cache_position"][-1:].item() + 1
1534
+ ]).to(model_kwargs["cache_position"].device)
1535
+ else:
1536
+ past_positions = model_kwargs.pop("cache_position")
1537
+ new_positions = torch.arange(
1538
+ past_positions[-1] + 1,
1539
+ past_positions[-1] + num_new_tokens + 1,
1540
+ dtype=past_positions.dtype).to(past_positions.device)
1541
+ model_kwargs["cache_position"] = torch.cat(
1542
+ (past_positions, new_positions))
1543
+ return model_kwargs