jumelet commited on
Commit
06104e2
·
verified ·
1 Parent(s): 1088af1

Add main & ema weights for srp

Browse files
README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ pipeline_tag: fill-mask
4
+ tags: [gpt-bert, babylm, remote-code]
5
+ license: other
6
+ ---
7
+ # jumelet/gptbert-srp-100steps-small
8
+
9
+ GPT-BERT style BabyBabyLLM model for language **srp**.
10
+
11
+ This repository may include both *main* and *EMA* variants.
12
+
13
+ **Default variant exposed to generic loaders:** `ema`
14
+
15
+ ## Variants Available
16
+ ema, main
17
+
18
+ ## Files
19
+ - model.safetensors (alias of default variant)
20
+ - model_ema.safetensors
21
+ - pytorch_model.bin (legacy PyTorch format)
22
+ - srp-2gpu-100steps.bin (raw training checkpoint)
23
+ - srp-2gpu-100steps_ema.bin (raw training checkpoint)
24
+
25
+ ## Configuration
26
+ ```json
27
+ {
28
+ "attention_probs_dropout_prob": 0.1,
29
+ "hidden_dropout_prob": 0.1,
30
+ "hidden_size": 384,
31
+ "intermediate_size": 1280,
32
+ "max_position_embeddings": 512,
33
+ "position_bucket_size": 32,
34
+ "num_attention_heads": 6,
35
+ "num_hidden_layers": 12,
36
+ "vocab_size": 8192,
37
+ "layer_norm_eps": 1e-05,
38
+ "force_causal_mask": true,
39
+ "classifier_dropout": 0.1,
40
+ "classifier_layer_norm_eps": 1e-05,
41
+ "num_labels": 2
42
+ }
43
+ ```
44
+ Tokenizer file: `tokenizer_srp_vs8192.json`
45
+
46
+ ## Quick Usage
47
+ ```python
48
+ from transformers import AutoTokenizer, AutoModelForMaskedLM
49
+ model_id = 'jumelet/gptbert-srp-100steps-small'
50
+ tok = AutoTokenizer.from_pretrained(model_id)
51
+ model = AutoModelForMaskedLM.from_pretrained(model_id, trust_remote_code=True)
52
+ out = model(**tok('Hello world', return_tensors='pt'))
53
+ ```
54
+
55
+ ### Forced Causal Attention
56
+ Causal attention is enforced during inference by applying a triangular future mask inside the remote code.
57
+ This prevents the hybrid GPT-BERT layers from attending to future tokens even when a bidirectional mask is provided.
58
+
59
+ ### Sequence Classification
60
+ `GPTBertForSequenceClassification` mirrors the original GLUE classifier head for downstream fine-tuning.
61
+ ```python
62
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
63
+ model_id = 'jumelet/gptbert-srp-100steps-small'
64
+ tok = AutoTokenizer.from_pretrained(model_id)
65
+ model = AutoModelForSequenceClassification.from_pretrained(model_id, trust_remote_code=True)
66
+ outputs = model(**tok('This movie was great!', return_tensors='pt'))
67
+ print(outputs.logits)
68
+ ```
69
+
70
+ ## Notes
71
+ - Converted on 2025-10-04T22:22:24.212092+00:00
72
+ - Weights are the exact trained parameters; no new layers were initialized.
73
+ - Requires `trust_remote_code=True` due to custom architecture.
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "GPTBertForMaskedLM",
4
+ "GPTBertForCausalLM",
5
+ "GPTBertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_gpt_bert.GPTBertConfig",
10
+ "AutoModel": "modeling_gpt_bert.GPTBertForMaskedLM",
11
+ "AutoModelForCausalLM": "modeling_gpt_bert.GPTBertForCausalLM",
12
+ "AutoModelForMaskedLM": "modeling_gpt_bert.GPTBertForMaskedLM",
13
+ "AutoModelForSequenceClassification": "modeling_gpt_bert.GPTBertForSequenceClassification"
14
+ },
15
+ "bos_token_id": 1,
16
+ "classifier_dropout": 0.1,
17
+ "classifier_layer_norm_eps": 1e-05,
18
+ "eos_token_id": 2,
19
+ "force_causal_mask": true,
20
+ "hidden_dropout_prob": 0.1,
21
+ "hidden_size": 384,
22
+ "intermediate_size": 1280,
23
+ "layer_norm_eps": 1e-05,
24
+ "mask_token_id": 4,
25
+ "max_position_embeddings": 512,
26
+ "model_type": "gpt_bert",
27
+ "num_attention_heads": 6,
28
+ "num_hidden_layers": 12,
29
+ "num_labels": 2,
30
+ "pad_token_id": 3,
31
+ "position_bucket_size": 32,
32
+ "vocab_size": 8192
33
+ }
configuration_gpt_bert.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from transformers import PretrainedConfig
3
+
4
+ class GPTBertConfig(PretrainedConfig):
5
+ model_type = 'gpt_bert'
6
+
7
+ def __init__(self, **kwargs):
8
+ self.attention_probs_dropout_prob = kwargs.pop('attention_probs_dropout_prob', 0.1)
9
+ self.hidden_dropout_prob = kwargs.pop('hidden_dropout_prob', 0.1)
10
+ self.hidden_size = kwargs.pop('hidden_size', 768)
11
+ self.intermediate_size = kwargs.pop('intermediate_size', 2560)
12
+ self.max_position_embeddings = kwargs.pop('max_position_embeddings', 512)
13
+ self.position_bucket_size = kwargs.pop('position_bucket_size', 32)
14
+ self.num_attention_heads = kwargs.pop('num_attention_heads', 12)
15
+ self.num_hidden_layers = kwargs.pop('num_hidden_layers', 12)
16
+ self.vocab_size = kwargs.pop('vocab_size', 16384)
17
+ self.layer_norm_eps = kwargs.pop('layer_norm_eps', 1e-5)
18
+ self.force_causal_mask = kwargs.pop('force_causal_mask', True)
19
+ self.classifier_dropout = kwargs.pop('classifier_dropout', 0.1)
20
+ self.classifier_layer_norm_eps = kwargs.pop('classifier_layer_norm_eps', 1e-05)
21
+ self.num_labels = kwargs.pop('num_labels', 2)
22
+ self.problem_type = kwargs.pop('problem_type', None)
23
+ self.auto_map = {
24
+ 'AutoConfig': 'configuration_gpt_bert.GPTBertConfig',
25
+ 'AutoModel': 'modeling_gpt_bert.GPTBertForMaskedLM',
26
+ 'AutoModelForCausalLM': 'modeling_gpt_bert.GPTBertForCausalLM',
27
+ 'AutoModelForMaskedLM': 'modeling_gpt_bert.GPTBertForMaskedLM',
28
+ 'AutoModelForSequenceClassification': 'modeling_gpt_bert.GPTBertForSequenceClassification',
29
+ }
30
+ super().__init__(**kwargs)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:907c55a08c0d03f8fa0292d7323f69e35539490bbf63bcc5d09e52c30f882bfd
3
+ size 157333928
model_ema.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:907c55a08c0d03f8fa0292d7323f69e35539490bbf63bcc5d09e52c30f882bfd
3
+ size 157333928
modeling_gpt_bert.py ADDED
@@ -0,0 +1,630 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Original training architecture (verbatim)
2
+ import math
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from torch import _softmax_backward_data as _softmax_backward_data
8
+
9
+
10
+ class Bert(nn.Module):
11
+ def __init__(self, config, activation_checkpointing=False):
12
+ super().__init__()
13
+ self.embedding = Embedding(config)
14
+ self.transformer = Encoder(config, activation_checkpointing)
15
+ self.classifier = MaskClassifier(config, self.embedding.word_embedding.weight)
16
+
17
+ def get_contextualized(self, input_ids, attention_mask):
18
+ static_embeddings, relative_embedding = self.embedding(input_ids)
19
+ contextualized_embeddings = self.transformer(static_embeddings, attention_mask.unsqueeze(1), relative_embedding)
20
+ return contextualized_embeddings
21
+
22
+ def forward(self, input_ids, attention_mask, masked_lm_labels, num_masked=None, ratio=None):
23
+ contextualized_embeddings = self.get_contextualized(input_ids, attention_mask)
24
+
25
+ if num_masked is None:
26
+ subword_prediction = self.classifier(contextualized_embeddings, masked_lm_labels, num_masked)
27
+
28
+ gold_labels = masked_lm_labels.flatten()
29
+ gold_labels = gold_labels[gold_labels != -100]
30
+
31
+ loss = F.cross_entropy(subword_prediction, gold_labels, reduction="none").mean()
32
+ z_loss = torch.logsumexp(subword_prediction, dim=-1).pow(2).mean()
33
+
34
+ with torch.no_grad():
35
+ accuracy = (subword_prediction.argmax(-1) == gold_labels).float().mean()
36
+
37
+ num_tokens = gold_labels.size(0)
38
+
39
+ return loss, accuracy, z_loss, num_tokens
40
+ else:
41
+ masked_subword_prediction, causal_subword_prediction = self.classifier(contextualized_embeddings, masked_lm_labels, num_masked)
42
+
43
+ if masked_subword_prediction is not None:
44
+ masked_gold_labels = masked_lm_labels[:, :num_masked].flatten()
45
+ masked_gold_labels = masked_gold_labels[masked_gold_labels != -100]
46
+
47
+ masked_loss = F.cross_entropy(masked_subword_prediction, masked_gold_labels)
48
+ masked_z_loss = torch.logsumexp(masked_subword_prediction, dim=-1).pow(2).mean()
49
+
50
+ with torch.no_grad():
51
+ masked_accuracy = (masked_subword_prediction.argmax(-1) == masked_gold_labels).float().mean()
52
+
53
+ num_masked_tokens = masked_gold_labels.size(0)
54
+ else:
55
+ masked_loss = 0.0
56
+ masked_z_loss = 0.0
57
+ masked_accuracy = 0.0
58
+ num_masked_tokens = 0
59
+
60
+ if causal_subword_prediction is not None:
61
+ causal_gold_labels = masked_lm_labels[:, num_masked:].flatten()
62
+ causal_gold_labels = causal_gold_labels[causal_gold_labels != -100]
63
+
64
+ causal_loss = F.cross_entropy(causal_subword_prediction, causal_gold_labels)
65
+ causal_z_loss = torch.logsumexp(causal_subword_prediction, dim=-1).pow(2).mean()
66
+
67
+ with torch.no_grad():
68
+ causal_accuracy = (causal_subword_prediction.argmax(-1) == causal_gold_labels).float().mean()
69
+
70
+ num_causal_tokens = causal_gold_labels.size(0)
71
+ else:
72
+ causal_loss = 0.0
73
+ causal_z_loss = 0.0
74
+ causal_accuracy = 0.0
75
+ num_causal_tokens = 0
76
+
77
+ loss = ratio * masked_loss + (1 - ratio) * causal_loss
78
+ z_loss = ratio * masked_z_loss + (1 - ratio) * causal_z_loss
79
+
80
+ with torch.no_grad():
81
+ accuracy = ratio * masked_accuracy + (1 - ratio) * causal_accuracy
82
+
83
+ num_tokens = num_masked_tokens + num_causal_tokens
84
+
85
+ return loss, masked_loss, causal_loss, accuracy, masked_accuracy, causal_accuracy, z_loss, num_tokens
86
+
87
+
88
+ # From https://github.com/epfml/DenseFormer
89
+ class InPlaceSetSlice(torch.autograd.Function):
90
+ @staticmethod
91
+ def forward(ctx, full_tensor, last_slice, x_idx, x_val):
92
+ full_tensor[x_idx] = x_val
93
+ ctx.x_idx = x_idx
94
+ ret = torch.Tensor().to(full_tensor.device)
95
+ ret.set_(full_tensor[:x_idx + 1])
96
+ return ret
97
+
98
+ @staticmethod
99
+ def backward(ctx, grad_out):
100
+ if ctx.x_idx == 0:
101
+ return None, None, None, grad_out[ctx.x_idx]
102
+ else:
103
+ return None, grad_out[:ctx.x_idx], None, grad_out[ctx.x_idx]
104
+
105
+
106
+ def apply_inplace_set(x_acc, x_idx, x_val):
107
+ full_tensor, last_slice = x_acc
108
+ new_slice = InPlaceSetSlice.apply(full_tensor, last_slice, x_idx, x_val)
109
+ return full_tensor, new_slice
110
+
111
+
112
+ class DWAModules(torch.nn.Module):
113
+ def __init__(self, hidden_size, n_blocks):
114
+ super().__init__()
115
+ self.n_blocks = n_blocks
116
+ self.alphas = nn.ParameterList([nn.Parameter(torch.zeros(i + 2)) for i in range(n_blocks)])
117
+ self.accumulator = None
118
+ self._init_weights()
119
+
120
+ def _init_weights(self):
121
+ for module in self.alphas:
122
+ module.data.zero_()
123
+ module.data[-1] = 1.0
124
+
125
+ def init_accumulator(self, x):
126
+ self.accumulator = (torch.zeros((self.n_blocks + 1, *x.shape), device=x.device, dtype=x.dtype), None)
127
+ self.accumulator = apply_inplace_set(self.accumulator, 0, x)
128
+
129
+ def forward(self, x, block_idx):
130
+ assert self.accumulator is not None, "`init_accumulator(x)` needs to be called first"
131
+ self.accumulator = apply_inplace_set(
132
+ self.accumulator,
133
+ block_idx + 1,
134
+ x
135
+ )
136
+ x = torch.tensordot(self.alphas[block_idx], self.accumulator[1], dims=1)
137
+ return x
138
+
139
+
140
+ class Encoder(nn.Module):
141
+ def __init__(self, config, activation_checkpointing=False):
142
+ super().__init__()
143
+ self.attention_layers = nn.ModuleList([Attention(config) for _ in range(config.num_hidden_layers)])
144
+ self.mlp_layers = nn.ModuleList([FeedForward(config) for _ in range(config.num_hidden_layers)])
145
+ self.dwa_modules = DWAModules(config.hidden_size, config.num_hidden_layers * 2)
146
+
147
+ for i, layer in enumerate(self.mlp_layers):
148
+ layer.mlp[1].weight.data *= math.sqrt(1.0 / (2.0 * (1 + i)))
149
+ layer.mlp[-2].weight.data *= math.sqrt(1.0 / (2.0 * (1 + i)))
150
+
151
+ self.activation_checkpointing = activation_checkpointing
152
+
153
+ def forward(self, x, attention_mask, relative_embedding):
154
+ self.dwa_modules.init_accumulator(x)
155
+ for i, (attention_layer, mlp_layer) in enumerate(zip(self.attention_layers, self.mlp_layers)):
156
+ x = x + attention_layer(x, attention_mask, relative_embedding)
157
+ x = self.dwa_modules(x, block_idx=i * 2)
158
+
159
+ x = x + mlp_layer(x)
160
+ x = self.dwa_modules(x, block_idx=i * 2 + 1)
161
+
162
+ return x
163
+
164
+
165
+ class MaskClassifier(nn.Module):
166
+ def __init__(self, config, subword_embedding):
167
+ super().__init__()
168
+ self.nonlinearity = nn.Sequential(
169
+ nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False),
170
+ nn.Linear(config.hidden_size, config.hidden_size),
171
+ nn.GELU(),
172
+ nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False),
173
+ nn.Dropout(config.hidden_dropout_prob),
174
+ nn.Linear(subword_embedding.size(1), subword_embedding.size(0))
175
+ )
176
+ self.initialize(config.hidden_size, subword_embedding)
177
+
178
+ def initialize(self, hidden_size, embedding):
179
+ std = math.sqrt(2.0 / (5.0 * hidden_size))
180
+ nn.init.trunc_normal_(self.nonlinearity[1].weight, mean=0.0, std=std, a=-2*std, b=2*std)
181
+ self.nonlinearity[-1].weight = embedding
182
+ self.nonlinearity[1].bias.data.zero_()
183
+ self.nonlinearity[-1].bias.data.zero_()
184
+
185
+ def forward(self, x, masked_lm_labels, num_masked=None):
186
+ if num_masked is None:
187
+ x = torch.index_select(x.flatten(0, 1), 0, torch.nonzero(masked_lm_labels.flatten() != -100).squeeze())
188
+ x = self.nonlinearity(x)
189
+ return x
190
+ else:
191
+ masked_x, causal_x = torch.tensor_split(x, (num_masked,), dim=1)
192
+ mntp_masked_lm_labels, causal_masked_lm_labels = torch.tensor_split(masked_lm_labels, (num_masked,), dim=1)
193
+
194
+ if masked_x.size(1) != 0:
195
+ masked_x = torch.index_select(masked_x.flatten(0, 1), 0, torch.nonzero(mntp_masked_lm_labels.flatten() != -100).squeeze())
196
+ masked_x = self.nonlinearity(masked_x)
197
+ else:
198
+ masked_x = None
199
+
200
+ if causal_x.size(1) != 0:
201
+ causal_x = torch.index_select(causal_x.flatten(0, 1), 0, torch.nonzero(causal_masked_lm_labels.flatten() != -100).squeeze())
202
+ causal_x = self.nonlinearity(causal_x)
203
+ else:
204
+ causal_x = None
205
+
206
+ return masked_x, causal_x
207
+
208
+
209
+ class GeGLU(nn.Module):
210
+ def forward(self, x):
211
+ x, gate = x.chunk(2, dim=-1)
212
+ x = x * F.gelu(gate, approximate='tanh')
213
+ return x
214
+
215
+
216
+ class FeedForward(nn.Module):
217
+ def __init__(self, config):
218
+ super().__init__()
219
+ self.mlp = nn.Sequential(
220
+ nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, elementwise_affine=False),
221
+ nn.Linear(config.hidden_size, 2*config.intermediate_size, bias=False),
222
+ GeGLU(),
223
+ nn.LayerNorm(config.intermediate_size, eps=config.layer_norm_eps, elementwise_affine=False),
224
+ nn.Linear(config.intermediate_size, config.hidden_size, bias=False),
225
+ nn.Dropout(config.hidden_dropout_prob)
226
+ )
227
+ self.initialize(config.hidden_size)
228
+
229
+ def initialize(self, hidden_size):
230
+ std = math.sqrt(2.0 / (5.0 * hidden_size))
231
+ nn.init.trunc_normal_(self.mlp[1].weight, mean=0.0, std=std, a=-2*std, b=2*std)
232
+ nn.init.trunc_normal_(self.mlp[-2].weight, mean=0.0, std=std, a=-2*std, b=2*std)
233
+
234
+ def forward(self, x):
235
+ return self.mlp(x)
236
+
237
+
238
+ class MaskedSoftmax(torch.autograd.Function):
239
+ @staticmethod
240
+ def forward(self, x, mask, dim):
241
+ self.dim = dim
242
+ x.masked_fill_(mask, float('-inf'))
243
+ x = torch.softmax(x, self.dim)
244
+ x.masked_fill_(mask, 0.0)
245
+ self.save_for_backward(x)
246
+ return x
247
+
248
+ @staticmethod
249
+ def backward(self, grad_output):
250
+ output, = self.saved_tensors
251
+ inputGrad = _softmax_backward_data(grad_output, output, self.dim, output.dtype)
252
+ return inputGrad, None, None
253
+
254
+
255
+ class Attention(nn.Module):
256
+ def __init__(self, config):
257
+ super().__init__()
258
+
259
+ self.config = config
260
+
261
+ if config.hidden_size % config.num_attention_heads != 0:
262
+ raise ValueError(f"The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}")
263
+
264
+ self.hidden_size = config.hidden_size
265
+ self.num_heads = config.num_attention_heads
266
+ self.head_size = config.hidden_size // config.num_attention_heads
267
+
268
+ self.in_proj_qk = nn.Linear(config.hidden_size, 2*config.hidden_size, bias=True)
269
+ self.in_proj_vg = nn.Linear(config.hidden_size, 2*config.hidden_size, bias=True)
270
+ self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=True)
271
+
272
+ self.pre_layer_norm = nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False)
273
+ self.post_layer_norm = nn.LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=False)
274
+
275
+ position_indices = torch.arange(config.max_position_embeddings, dtype=torch.long).unsqueeze(1) \
276
+ - torch.arange(config.max_position_embeddings, dtype=torch.long).unsqueeze(0)
277
+ position_indices = self.make_log_bucket_position(position_indices, config.position_bucket_size, config.max_position_embeddings)
278
+ position_indices = config.position_bucket_size - 1 + position_indices
279
+ self.register_buffer("position_indices", position_indices, persistent=True)
280
+
281
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
282
+ self.scale = 1.0 / math.sqrt(3 * self.head_size)
283
+ self.initialize()
284
+
285
+ def make_log_bucket_position(self, relative_pos, bucket_size, max_position):
286
+ sign = torch.sign(relative_pos)
287
+ mid = bucket_size // 2
288
+ abs_pos = torch.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, torch.abs(relative_pos).clamp(max=max_position - 1))
289
+ log_pos = torch.ceil(torch.log(abs_pos / mid) / math.log((max_position-1) / mid) * (mid - 1)).int() + mid
290
+ bucket_pos = torch.where(abs_pos <= mid, relative_pos, log_pos * sign).long()
291
+ return bucket_pos
292
+
293
+ def initialize(self):
294
+ std = math.sqrt(2.0 / (5.0 * self.hidden_size))
295
+ nn.init.trunc_normal_(self.in_proj_qk.weight, mean=0.0, std=std, a=-2*std, b=2*std)
296
+ nn.init.trunc_normal_(self.in_proj_vg.weight, mean=0.0, std=std, a=-2*std, b=2*std)
297
+ nn.init.trunc_normal_(self.out_proj.weight, mean=0.0, std=std, a=-2*std, b=2*std)
298
+ self.in_proj_qk.bias.data.zero_()
299
+ self.in_proj_vg.bias.data.zero_()
300
+ self.out_proj.bias.data.zero_()
301
+
302
+ def forward(self, hidden_states, attention_mask, relative_embedding):
303
+ key_len, batch_size, _ = hidden_states.size()
304
+ query_len = key_len
305
+
306
+ if self.position_indices.size(0) < query_len:
307
+ position_indices = torch.arange(query_len, dtype=torch.long).unsqueeze(1) \
308
+ - torch.arange(query_len, dtype=torch.long).unsqueeze(0)
309
+ position_indices = self.make_log_bucket_position(position_indices, self.config.position_bucket_size, 512)
310
+ position_indices = self.config.position_bucket_size - 1 + position_indices
311
+ self.register_buffer("position_indices", position_indices.to(hidden_states.device), persistent=True)
312
+
313
+ hidden_states = self.pre_layer_norm(hidden_states)
314
+ query, key = self.in_proj_qk(hidden_states).chunk(2, dim=2) # shape: [T, B, D]
315
+ value, gate = self.in_proj_vg(hidden_states).chunk(2, dim=2) # shape: [T, B, D]
316
+ gate = F.gelu(gate)
317
+
318
+ pos = self.in_proj_qk(self.dropout(relative_embedding)) # shape: [2T-1, 2D]
319
+ pos = F.embedding(self.position_indices[:query_len, :key_len], pos) # shape: [T, T, 2D]
320
+ query_pos, key_pos = pos.chunk(2, dim=-1)
321
+ query_pos = query_pos.view(query_len, key_len, self.num_heads, self.head_size)
322
+ key_pos = key_pos.view(query_len, key_len, self.num_heads, self.head_size)
323
+
324
+ query = query.reshape(query_len, batch_size * self.num_heads, self.head_size).transpose(0, 1)
325
+ key = key.reshape(key_len, batch_size * self.num_heads, self.head_size).transpose(0, 1)
326
+ value = value.reshape(key_len, batch_size * self.num_heads, self.head_size).transpose(0, 1)
327
+
328
+ attention_scores = torch.bmm(query, key.transpose(1, 2) * self.scale)
329
+
330
+ query = query.view(batch_size, self.num_heads, query_len, self.head_size)
331
+ key = key.view(batch_size, self.num_heads, query_len, self.head_size)
332
+ attention_scores = attention_scores.view(batch_size, self.num_heads, query_len, key_len)
333
+ attention_scores.add_(torch.einsum("bhqd,qkhd->bhqk", query, key_pos * self.scale))
334
+ attention_scores.add_(torch.einsum("bhkd,qkhd->bhqk", key * self.scale, query_pos))
335
+
336
+ attention_probs = MaskedSoftmax.apply(attention_scores, attention_mask, -1)
337
+
338
+ attention_probs = self.dropout(attention_probs)
339
+ context = torch.bmm(attention_probs.flatten(0, 1), value) # shape: [B*H, Q, D]
340
+ context = context.transpose(0, 1).reshape(context.size(1), -1, self.hidden_size) # shape: [Q, B, H*D]
341
+ context = context * gate
342
+ context = self.post_layer_norm(context)
343
+ context = self.out_proj(context)
344
+ context = self.dropout(context)
345
+
346
+ return context
347
+
348
+
349
+ class Embedding(nn.Module):
350
+ def __init__(self, config):
351
+ super().__init__()
352
+ self.hidden_size = config.hidden_size
353
+
354
+ self.word_embedding = nn.Embedding(config.vocab_size, config.hidden_size)
355
+ self.word_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, elementwise_affine=False)
356
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
357
+
358
+ self.relative_embedding = nn.Parameter(torch.empty(2 * config.position_bucket_size - 1, config.hidden_size))
359
+ self.relative_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
360
+
361
+ self.initialize()
362
+
363
+ def initialize(self):
364
+ std = math.sqrt(2.0 / (5.0 * self.hidden_size))
365
+ nn.init.trunc_normal_(self.relative_embedding, mean=0.0, std=std, a=-2*std, b=2*std)
366
+ nn.init.trunc_normal_(self.word_embedding.weight, mean=0.0, std=std, a=-2*std, b=2*std)
367
+
368
+ def forward(self, input_ids):
369
+ word_embedding = self.dropout(self.word_layer_norm(self.word_embedding(input_ids)))
370
+ relative_embeddings = self.relative_layer_norm(self.relative_embedding)
371
+ return word_embedding, relative_embeddings
372
+
373
+
374
+ # HF wrappers that preserve state dict keys and behavior
375
+
376
+ from transformers import PreTrainedModel
377
+ from transformers.modeling_outputs import MaskedLMOutput, CausalLMOutputWithCrossAttentions, SequenceClassifierOutput
378
+ from .configuration_gpt_bert import GPTBertConfig
379
+ import torch
380
+ import torch.nn as nn
381
+
382
+ DEFAULT_FORCE_CAUSAL_MASK = True
383
+ EMIT_HIDDEN_STATES_DEFAULT = True
384
+
385
+
386
+ def _normalize_mask_tensor(mask):
387
+ if mask.dtype == torch.bool:
388
+ if mask.numel() == 0:
389
+ return mask
390
+ true_fraction = mask.float().mean().item()
391
+ if true_fraction > 0.5:
392
+ mask = ~mask
393
+ else:
394
+ mask = mask <= 0
395
+ return mask.to(torch.bool)
396
+
397
+
398
+ def _ensure_valid_rows(mask):
399
+ row_masked = mask.all(dim=-1)
400
+ if row_masked.any():
401
+ idx = row_masked.nonzero(as_tuple=False)
402
+ mask[idx[:, 0], idx[:, 1], idx[:, 1]] = False
403
+ return mask
404
+
405
+
406
+ def _build_future_causal_mask(batch_size, seq_len, device):
407
+ base = torch.triu(torch.ones(seq_len, seq_len, dtype=torch.bool, device=device), diagonal=1)
408
+ return base.unsqueeze(0).expand(batch_size, -1, -1)
409
+
410
+
411
+ def _build_babylm_attention_mask(input_ids, attention_mask, force_causal=False):
412
+ batch_size, seq_len = input_ids.shape[:2]
413
+ device = input_ids.device
414
+ if attention_mask is None:
415
+ mask = torch.zeros(batch_size, seq_len, seq_len, dtype=torch.bool, device=device)
416
+ else:
417
+ mask = attention_mask
418
+ if mask.dim() == 0:
419
+ mask = mask.unsqueeze(0)
420
+ if mask.dim() == 1:
421
+ mask = mask.unsqueeze(0)
422
+ if mask.dim() == 2:
423
+ mask = _normalize_mask_tensor(mask)
424
+ mask = mask.unsqueeze(1) | mask.unsqueeze(2)
425
+ elif mask.dim() == 3:
426
+ if mask.size(1) == 1 and mask.size(2) == seq_len:
427
+ mask = _normalize_mask_tensor(mask.squeeze(1))
428
+ mask = mask.unsqueeze(1) | mask.unsqueeze(2)
429
+ elif mask.size(1) == seq_len and mask.size(2) == 1:
430
+ mask = _normalize_mask_tensor(mask.squeeze(2))
431
+ mask = mask.unsqueeze(1) | mask.unsqueeze(2)
432
+ else:
433
+ mask = _normalize_mask_tensor(mask)
434
+ elif mask.dim() == 4:
435
+ if mask.size(1) == 1:
436
+ mask = mask[:, 0]
437
+ else:
438
+ mask = mask.any(dim=1)
439
+ mask = _normalize_mask_tensor(mask)
440
+ else:
441
+ raise ValueError("Unsupported attention_mask dimensions: {}".format(mask.dim()))
442
+ mask = mask.to(device=device, dtype=torch.bool)
443
+ if mask.dim() == 2:
444
+ mask = mask.unsqueeze(1) | mask.unsqueeze(2)
445
+ if mask.dim() != 3:
446
+ raise ValueError("attention_mask must broadcast to a square matrix")
447
+ if mask.size(0) == 1 and batch_size > 1:
448
+ mask = mask.expand(batch_size, -1, -1).clone()
449
+ elif mask.size(0) != batch_size:
450
+ raise ValueError("attention_mask batch dimension {} does not match inputs {}".format(mask.size(0), batch_size))
451
+ rows = min(mask.size(1), seq_len)
452
+ cols = min(mask.size(2), seq_len)
453
+ if mask.size(1) != seq_len or mask.size(2) != seq_len:
454
+ new_mask = torch.ones(batch_size, seq_len, seq_len, dtype=torch.bool, device=device)
455
+ new_mask[:, :rows, :cols] = mask[:, :rows, :cols]
456
+ mask = new_mask
457
+ if force_causal:
458
+ future_mask = _build_future_causal_mask(mask.size(0), seq_len, device)
459
+ mask = mask | future_mask
460
+ mask = _ensure_valid_rows(mask)
461
+ return mask.unsqueeze(1)
462
+
463
+
464
+ class GPTBertForMaskedLM(PreTrainedModel):
465
+ config_class = GPTBertConfig
466
+ base_model_prefix = 'gpt_bert'
467
+
468
+ def __init__(self, config: GPTBertConfig):
469
+ super().__init__(config)
470
+ self.model = Bert(config)
471
+ self.force_causal_mask = getattr(config, "force_causal_mask", DEFAULT_FORCE_CAUSAL_MASK)
472
+
473
+ def tie_weights(self):
474
+ try:
475
+ self.model.classifier.nonlinearity[-1].weight = self.model.embedding.word_embedding.weight
476
+ except Exception:
477
+ pass
478
+ return super().tie_weights()
479
+
480
+ def forward(self, input_ids, attention_mask=None, labels=None, output_hidden_states=None, return_dict=None):
481
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else (self.config.output_hidden_states or EMIT_HIDDEN_STATES_DEFAULT)
482
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
483
+
484
+ mask_4d = _build_babylm_attention_mask(input_ids, attention_mask, force_causal=self.force_causal_mask)
485
+ static_embeddings, relative_embedding = self.model.embedding(input_ids)
486
+ if static_embeddings.dim() == 3 and static_embeddings.shape[0] == input_ids.shape[0]:
487
+ static_embeddings = static_embeddings.transpose(0, 1)
488
+ contextualized = self.model.transformer(static_embeddings, mask_4d, relative_embedding)
489
+ hs = contextualized.transpose(0, 1)
490
+ B, S, H = hs.shape
491
+ flat = hs.reshape(B * S, H)
492
+ logits_flat = self.model.classifier.nonlinearity(flat)
493
+ vocab = logits_flat.size(-1)
494
+ logits = logits_flat.view(B, S, vocab)
495
+
496
+ loss = None
497
+ if labels is not None:
498
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
499
+ loss = loss_fct(logits.view(-1, vocab), labels.view(-1))
500
+
501
+ hidden_states = (hs,) if output_hidden_states else None
502
+
503
+ if not return_dict:
504
+ outputs = (logits,)
505
+ if hidden_states is not None:
506
+ outputs = outputs + (hidden_states,)
507
+ return ((loss,) + outputs) if loss is not None else outputs
508
+
509
+ return MaskedLMOutput(loss=loss, logits=logits, hidden_states=hidden_states)
510
+
511
+
512
+ class GPTBertForCausalLM(PreTrainedModel):
513
+ config_class = GPTBertConfig
514
+ base_model_prefix = 'gpt_bert'
515
+
516
+ def __init__(self, config: GPTBertConfig):
517
+ super().__init__(config)
518
+ self.model = Bert(config)
519
+ self.force_causal_mask = getattr(config, "force_causal_mask", DEFAULT_FORCE_CAUSAL_MASK)
520
+
521
+ def prepare_inputs_for_generation(self, input_ids, **kwargs):
522
+ return {'input_ids': input_ids, 'attention_mask': kwargs.get('attention_mask', None)}
523
+
524
+ def forward(self, input_ids, attention_mask=None, labels=None, output_hidden_states=None, return_dict=None):
525
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else (self.config.output_hidden_states or EMIT_HIDDEN_STATES_DEFAULT)
526
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
527
+
528
+ mask_4d = _build_babylm_attention_mask(input_ids, attention_mask, force_causal=self.force_causal_mask)
529
+ static_embeddings, relative_embedding = self.model.embedding(input_ids)
530
+ if static_embeddings.dim() == 3 and static_embeddings.shape[0] == input_ids.shape[0]:
531
+ static_embeddings = static_embeddings.transpose(0, 1)
532
+ contextualized = self.model.transformer(static_embeddings, mask_4d, relative_embedding)
533
+ hs = contextualized.transpose(0, 1)
534
+ B, S, H = hs.shape
535
+ flat = hs.reshape(B * S, H)
536
+ logits_flat = self.model.classifier.nonlinearity(flat)
537
+ vocab = logits_flat.size(-1)
538
+ logits = logits_flat.view(B, S, vocab)
539
+
540
+ loss = None
541
+ if labels is not None:
542
+ shift_logits = logits[..., :-1, :].contiguous()
543
+ shift_labels = labels[..., 1:].contiguous()
544
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
545
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
546
+
547
+ hidden_states = (hs,) if output_hidden_states else None
548
+
549
+ if not return_dict:
550
+ outputs = (logits,)
551
+ if hidden_states is not None:
552
+ outputs = outputs + (hidden_states,)
553
+ return ((loss,) + outputs) if loss is not None else outputs
554
+
555
+ return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, hidden_states=hidden_states)
556
+
557
+
558
+
559
+ class ClassifierHead(nn.Module):
560
+ def __init__(self, config):
561
+ super().__init__()
562
+ self.nonlinearity = nn.Sequential(
563
+ nn.LayerNorm(config.hidden_size, config.classifier_layer_norm_eps, elementwise_affine=False),
564
+ nn.Linear(config.hidden_size, config.hidden_size),
565
+ nn.GELU(),
566
+ nn.LayerNorm(config.hidden_size, config.classifier_layer_norm_eps, elementwise_affine=False),
567
+ nn.Dropout(config.classifier_dropout),
568
+ nn.Linear(config.hidden_size, config.num_labels)
569
+ )
570
+
571
+ def forward(self, embeddings):
572
+ return self.nonlinearity(embeddings)
573
+
574
+
575
+ class GPTBertForSequenceClassification(PreTrainedModel):
576
+ config_class = GPTBertConfig
577
+ base_model_prefix = 'gpt_bert'
578
+
579
+ def __init__(self, config: GPTBertConfig):
580
+ super().__init__(config)
581
+ self.model = Bert(config)
582
+ self.force_causal_mask = getattr(config, "force_causal_mask", DEFAULT_FORCE_CAUSAL_MASK)
583
+ self.sequence_classifier = ClassifierHead(config)
584
+
585
+ def forward(self, input_ids, attention_mask=None, labels=None, output_hidden_states=None, return_dict=None):
586
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else (self.config.output_hidden_states or EMIT_HIDDEN_STATES_DEFAULT)
587
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
588
+
589
+ mask_4d = _build_babylm_attention_mask(input_ids, attention_mask, force_causal=self.force_causal_mask)
590
+ static_embeddings, relative_embedding = self.model.embedding(input_ids)
591
+ if static_embeddings.dim() == 3 and static_embeddings.shape[0] == input_ids.shape[0]:
592
+ static_embeddings = static_embeddings.transpose(0, 1)
593
+ contextualized = self.model.transformer(static_embeddings, mask_4d, relative_embedding)
594
+ hs = contextualized.transpose(0, 1)
595
+ pooled_output = hs[:, 0, :]
596
+ logits = self.sequence_classifier(pooled_output)
597
+
598
+ loss = None
599
+ if labels is not None:
600
+ labels = labels.to(logits.device)
601
+ problem_type = self.config.problem_type
602
+ if problem_type is None:
603
+ if self.config.num_labels == 1:
604
+ problem_type = "regression"
605
+ elif labels.dtype in (torch.long, torch.int):
606
+ problem_type = "single_label_classification"
607
+ else:
608
+ problem_type = "multilabel_classification"
609
+
610
+ if problem_type == "regression":
611
+ logits = logits.squeeze(-1)
612
+ loss_fct = nn.MSELoss()
613
+ loss = loss_fct(logits, labels.float())
614
+ elif problem_type == "single_label_classification":
615
+ loss_fct = nn.CrossEntropyLoss()
616
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
617
+ else:
618
+ loss_fct = nn.BCEWithLogitsLoss()
619
+ loss = loss_fct(logits, labels.float())
620
+
621
+ hidden_states = (hs,) if output_hidden_states else None
622
+
623
+ if not return_dict:
624
+ outputs = (logits,)
625
+ if hidden_states is not None:
626
+ outputs = outputs + (hidden_states,)
627
+ return ((loss,) + outputs) if loss is not None else outputs
628
+
629
+ return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=hidden_states)
630
+
original_project_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_probs_dropout_prob": 0.1,
3
+ "hidden_dropout_prob": 0.1,
4
+ "hidden_size": 384,
5
+ "intermediate_size": 1280,
6
+ "max_position_embeddings": 512,
7
+ "position_bucket_size": 32,
8
+ "num_attention_heads": 6,
9
+ "num_hidden_layers": 12,
10
+ "vocab_size": 8192,
11
+ "layer_norm_eps": 1e-05,
12
+ "force_causal_mask": true,
13
+ "classifier_dropout": 0.1,
14
+ "classifier_layer_norm_eps": 1e-05,
15
+ "num_labels": 2
16
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:456bd4098409dacd6f23319f8498ac6a31314a0c9aec4c79d33a5564a28a9620
3
+ size 144780150
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "mask_token": "<mask>",
5
+ "pad_token": "<pad>",
6
+ "unk_token": "<unk>"
7
+ }
srp-2gpu-100steps.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ab48a9deb18bacf94e1863768f0ec6cbdf82f74684dfd5521174ae7e0fcaf39
3
+ size 144793266
srp-2gpu-100steps_ema.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4950ee251bc951bf752fcc532bf4e59dc74ea80994b86f6abb5bb8cf556896a7
3
+ size 144793966
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<pad>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "<mask>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "5": {
44
+ "content": "<special_0>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "6": {
52
+ "content": "<special_1>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "7": {
60
+ "content": "<special_2>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "8": {
68
+ "content": "<special_3>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "9": {
76
+ "content": "<special_4>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "10": {
84
+ "content": "<special_5>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "11": {
92
+ "content": "<special_6>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "12": {
100
+ "content": "<special_7>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "13": {
108
+ "content": "<special_8>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "14": {
116
+ "content": "<special_9>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "15": {
124
+ "content": "<special_10>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ }
131
+ },
132
+ "bos_token": "<s>",
133
+ "clean_up_tokenization_spaces": false,
134
+ "eos_token": "</s>",
135
+ "extra_special_tokens": {},
136
+ "mask_token": "<mask>",
137
+ "model_max_length": 1000000000000000019884624838656,
138
+ "pad_token": "<pad>",
139
+ "tokenizer_class": "PreTrainedTokenizerFast",
140
+ "unk_token": "<unk>"
141
+ }