codebyzeb commited on
Commit
1a4ac26
·
verified ·
1 Parent(s): 2aa2526

Upload folder using huggingface_hub

Browse files
llm/fw57M-multi-tied/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
llm/fw57M-multi-tied/README.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ {}
3
+ ---
4
+ ## Experiment Configuration
5
+ ```yaml
6
+ callbacks:
7
+ grad_accum:
8
+ _target_: src.callbacks.gradient_accumulation.GradientAccumulationScheduler
9
+ scheduling:
10
+ 0: 2
11
+ grad_norm:
12
+ _target_: src.callbacks.grad_norm.GradNorm
13
+ check_clipping: false
14
+ group_separator: /
15
+ histogram_freq: null
16
+ log_weight_distribution: false
17
+ norm_type: 2
18
+ only_total: true
19
+ lr_monitor:
20
+ _target_: src.callbacks.lr_monitor.SimpleLearningRateMonitor
21
+ model_checkpoint:
22
+ _target_: src.callbacks.model_checkpoint.ModelCheckpoint
23
+ dirpath: .checkpoints
24
+ enable_version_counter: false
25
+ every_n_train_steps: 1000
26
+ filename: '{step}'
27
+ save_initial_checkpoint: true
28
+ save_last: link
29
+ save_top_k: -1
30
+ verbose: true
31
+ speed_monitor:
32
+ _target_: src.callbacks.speed_monitor.SpeedMonitor
33
+ data:
34
+ batch_size: 64
35
+ drop_last: false
36
+ eval_batch_size: 64
37
+ multiprocessing_context: null
38
+ num_workers: 12
39
+ persistent_workers: false
40
+ pin_memory: true
41
+ prefetch_factor: 2
42
+ shuffle: true
43
+ dataset: common-corpus
44
+ evaluation:
45
+ blimp: false
46
+ loggers:
47
+ tensorboard:
48
+ _target_: src.trainer.TensorBoardLogger
49
+ name: ''
50
+ save_dir: ./
51
+ version: null
52
+ model: fw57M-tied
53
+ optim:
54
+ lr: 0.0006
55
+ num_warmup_steps: 2000
56
+ optim_kwargs:
57
+ betas:
58
+ - 0.9
59
+ - 0.95
60
+ eps: 1.0e-08
61
+ fused: true
62
+ optim_name: adamw
63
+ scheduler_kwargs:
64
+ min_lr_ratio: 0.01
65
+ num_decay_steps: 4000
66
+ num_stable_steps: 44000
67
+ scheduler_name: warmup_stable_decay
68
+ weight_decay: 0.01
69
+ out_parent_folder: model_train
70
+ pwd: /home/zg258/rds/hpc-work/infotokenization
71
+ resume_from_checkpoint: .checkpoints/last.ckpt
72
+ run_folder: .
73
+ save_initial_checkpoint: true
74
+ seed: 42
75
+ tok_name: bytelevel2
76
+ torch_compile: true
77
+ train_data_path: /home/zg258/rds/hpc-work/infotokenization/data/common-corpus/bytelevel2/train
78
+ trainer:
79
+ accelerator: gpu
80
+ deterministic: false
81
+ devices: 1
82
+ enable_progress_bar: true
83
+ fast_dev_run: false
84
+ gradient_clip_algorithm: norm
85
+ gradient_clip_val: 1.0
86
+ limit_val_batches: 500
87
+ log_every_n_steps: 1
88
+ max_steps: 50000
89
+ precision: bf16-true
90
+ val_check_interval: 1000
91
+ val_data_path: /home/zg258/rds/hpc-work/infotokenization/data/common-corpus/bytelevel2/validation
92
+ ```
llm/fw57M-multi-tied/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": null,
8
+ "eos_token_id": 1,
9
+ "head_dim": 32,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "max_position_embeddings": 2048,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 24,
18
+ "num_hidden_layers": 6,
19
+ "num_key_value_heads": 24,
20
+ "pad_token_id": 0,
21
+ "pretraining_tp": 1,
22
+ "rms_norm_eps": 1e-05,
23
+ "rope_scaling": null,
24
+ "rope_theta": 10000.0,
25
+ "tie_word_embeddings": true,
26
+ "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.51.3",
28
+ "use_cache": true,
29
+ "vocab_size": 258
30
+ }
llm/fw57M-multi-tied/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "eos_token_id": 1,
4
+ "pad_token_id": 0,
5
+ "transformers_version": "4.51.3"
6
+ }
llm/fw57M-multi-tied/hparams.yaml ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ loggers:
2
+ tensorboard:
3
+ _target_: src.trainer.TensorBoardLogger
4
+ save_dir: ./
5
+ name: ''
6
+ version: null
7
+ callbacks:
8
+ lr_monitor:
9
+ _target_: src.callbacks.lr_monitor.SimpleLearningRateMonitor
10
+ grad_norm:
11
+ _target_: src.callbacks.grad_norm.GradNorm
12
+ norm_type: 2
13
+ group_separator: /
14
+ histogram_freq: null
15
+ check_clipping: false
16
+ log_weight_distribution: false
17
+ only_total: true
18
+ speed_monitor:
19
+ _target_: src.callbacks.speed_monitor.SpeedMonitor
20
+ grad_accum:
21
+ _target_: src.callbacks.gradient_accumulation.GradientAccumulationScheduler
22
+ scheduling:
23
+ 0: 2
24
+ model_checkpoint:
25
+ _target_: src.callbacks.model_checkpoint.ModelCheckpoint
26
+ dirpath: .checkpoints
27
+ filename: '{step}'
28
+ enable_version_counter: false
29
+ every_n_train_steps: 1000
30
+ save_top_k: -1
31
+ save_last: link
32
+ verbose: true
33
+ save_initial_checkpoint: true
34
+ out_parent_folder: model_train
35
+ tok_name: bytelevel2
36
+ run_folder: .
37
+ dataset: common-corpus
38
+ pwd: /home/zg258/rds/hpc-work/infotokenization
39
+ train_data_path: /home/zg258/rds/hpc-work/infotokenization/data/common-corpus/bytelevel2/train
40
+ val_data_path: /home/zg258/rds/hpc-work/infotokenization/data/common-corpus/bytelevel2/validation
41
+ model: fw57M-tied
42
+ resume_from_checkpoint: .checkpoints/last.ckpt
43
+ save_initial_checkpoint: true
44
+ seed: 42
45
+ torch_compile: true
46
+ data:
47
+ batch_size: 64
48
+ eval_batch_size: 64
49
+ shuffle: true
50
+ drop_last: false
51
+ num_workers: 12
52
+ pin_memory: true
53
+ persistent_workers: false
54
+ prefetch_factor: 2
55
+ multiprocessing_context: null
56
+ optim:
57
+ optim_name: adamw
58
+ lr: 0.0006
59
+ weight_decay: 0.01
60
+ optim_kwargs:
61
+ fused: true
62
+ eps: 1.0e-08
63
+ betas:
64
+ - 0.9
65
+ - 0.95
66
+ scheduler_name: warmup_stable_decay
67
+ num_warmup_steps: 2000
68
+ scheduler_kwargs:
69
+ num_stable_steps: 44000
70
+ num_decay_steps: 4000
71
+ min_lr_ratio: 0.01
72
+ trainer:
73
+ accelerator: gpu
74
+ devices: 1
75
+ precision: bf16-true
76
+ deterministic: false
77
+ log_every_n_steps: 1
78
+ enable_progress_bar: true
79
+ fast_dev_run: false
80
+ gradient_clip_val: 1.0
81
+ gradient_clip_algorithm: norm
82
+ val_check_interval: 1000
83
+ max_steps: 50000
84
+ limit_val_batches: 500
85
+ evaluation:
86
+ blimp: false
llm/fw57M-multi-tied/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d46e8376f4ccad540d1d57130e2d242b2e1f39f9544d3d4ea4f9b0a16ae187b0
3
+ size 113668624
llm/fw57M-multi-tied/special_tokens_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "pad_token": {
10
+ "content": "<|padding|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ }
16
+ }
llm/fw57M-multi-tied/tokenizer.json ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "<|padding|>",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "<|endoftext|>",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ }
24
+ ],
25
+ "normalizer": null,
26
+ "pre_tokenizer": {
27
+ "type": "ByteLevel",
28
+ "add_prefix_space": true,
29
+ "trim_offsets": true,
30
+ "use_regex": true
31
+ },
32
+ "post_processor": {
33
+ "type": "ByteLevel",
34
+ "add_prefix_space": true,
35
+ "trim_offsets": true,
36
+ "use_regex": true
37
+ },
38
+ "decoder": {
39
+ "type": "ByteLevel",
40
+ "add_prefix_space": true,
41
+ "trim_offsets": true,
42
+ "use_regex": true
43
+ },
44
+ "model": {
45
+ "type": "BPE",
46
+ "dropout": null,
47
+ "unk_token": null,
48
+ "continuing_subword_prefix": null,
49
+ "end_of_word_suffix": null,
50
+ "fuse_unk": false,
51
+ "byte_fallback": false,
52
+ "ignore_merges": false,
53
+ "vocab": {
54
+ "<|padding|>": 0,
55
+ "<|endoftext|>": 1,
56
+ "!": 2,
57
+ "\"": 3,
58
+ "#": 4,
59
+ "$": 5,
60
+ "%": 6,
61
+ "&": 7,
62
+ "'": 8,
63
+ "(": 9,
64
+ ")": 10,
65
+ "*": 11,
66
+ "+": 12,
67
+ ",": 13,
68
+ "-": 14,
69
+ ".": 15,
70
+ "/": 16,
71
+ "0": 17,
72
+ "1": 18,
73
+ "2": 19,
74
+ "3": 20,
75
+ "4": 21,
76
+ "5": 22,
77
+ "6": 23,
78
+ "7": 24,
79
+ "8": 25,
80
+ "9": 26,
81
+ ":": 27,
82
+ ";": 28,
83
+ "<": 29,
84
+ "=": 30,
85
+ ">": 31,
86
+ "?": 32,
87
+ "@": 33,
88
+ "A": 34,
89
+ "B": 35,
90
+ "C": 36,
91
+ "D": 37,
92
+ "E": 38,
93
+ "F": 39,
94
+ "G": 40,
95
+ "H": 41,
96
+ "I": 42,
97
+ "J": 43,
98
+ "K": 44,
99
+ "L": 45,
100
+ "M": 46,
101
+ "N": 47,
102
+ "O": 48,
103
+ "P": 49,
104
+ "Q": 50,
105
+ "R": 51,
106
+ "S": 52,
107
+ "T": 53,
108
+ "U": 54,
109
+ "V": 55,
110
+ "W": 56,
111
+ "X": 57,
112
+ "Y": 58,
113
+ "Z": 59,
114
+ "[": 60,
115
+ "\\": 61,
116
+ "]": 62,
117
+ "^": 63,
118
+ "_": 64,
119
+ "`": 65,
120
+ "a": 66,
121
+ "b": 67,
122
+ "c": 68,
123
+ "d": 69,
124
+ "e": 70,
125
+ "f": 71,
126
+ "g": 72,
127
+ "h": 73,
128
+ "i": 74,
129
+ "j": 75,
130
+ "k": 76,
131
+ "l": 77,
132
+ "m": 78,
133
+ "n": 79,
134
+ "o": 80,
135
+ "p": 81,
136
+ "q": 82,
137
+ "r": 83,
138
+ "s": 84,
139
+ "t": 85,
140
+ "u": 86,
141
+ "v": 87,
142
+ "w": 88,
143
+ "x": 89,
144
+ "y": 90,
145
+ "z": 91,
146
+ "{": 92,
147
+ "|": 93,
148
+ "}": 94,
149
+ "~": 95,
150
+ "¡": 96,
151
+ "¢": 97,
152
+ "£": 98,
153
+ "¤": 99,
154
+ "¥": 100,
155
+ "¦": 101,
156
+ "§": 102,
157
+ "¨": 103,
158
+ "©": 104,
159
+ "ª": 105,
160
+ "«": 106,
161
+ "¬": 107,
162
+ "®": 108,
163
+ "¯": 109,
164
+ "°": 110,
165
+ "±": 111,
166
+ "²": 112,
167
+ "³": 113,
168
+ "´": 114,
169
+ "µ": 115,
170
+ "¶": 116,
171
+ "·": 117,
172
+ "¸": 118,
173
+ "¹": 119,
174
+ "º": 120,
175
+ "»": 121,
176
+ "¼": 122,
177
+ "½": 123,
178
+ "¾": 124,
179
+ "¿": 125,
180
+ "À": 126,
181
+ "Á": 127,
182
+ "Â": 128,
183
+ "Ã": 129,
184
+ "Ä": 130,
185
+ "Å": 131,
186
+ "Æ": 132,
187
+ "Ç": 133,
188
+ "È": 134,
189
+ "É": 135,
190
+ "Ê": 136,
191
+ "Ë": 137,
192
+ "Ì": 138,
193
+ "Í": 139,
194
+ "Î": 140,
195
+ "Ï": 141,
196
+ "Ð": 142,
197
+ "Ñ": 143,
198
+ "Ò": 144,
199
+ "Ó": 145,
200
+ "Ô": 146,
201
+ "Õ": 147,
202
+ "Ö": 148,
203
+ "×": 149,
204
+ "Ø": 150,
205
+ "Ù": 151,
206
+ "Ú": 152,
207
+ "Û": 153,
208
+ "Ü": 154,
209
+ "Ý": 155,
210
+ "Þ": 156,
211
+ "ß": 157,
212
+ "à": 158,
213
+ "á": 159,
214
+ "â": 160,
215
+ "ã": 161,
216
+ "ä": 162,
217
+ "å": 163,
218
+ "æ": 164,
219
+ "ç": 165,
220
+ "è": 166,
221
+ "é": 167,
222
+ "ê": 168,
223
+ "ë": 169,
224
+ "ì": 170,
225
+ "í": 171,
226
+ "î": 172,
227
+ "ï": 173,
228
+ "ð": 174,
229
+ "ñ": 175,
230
+ "ò": 176,
231
+ "ó": 177,
232
+ "ô": 178,
233
+ "õ": 179,
234
+ "ö": 180,
235
+ "÷": 181,
236
+ "ø": 182,
237
+ "ù": 183,
238
+ "ú": 184,
239
+ "û": 185,
240
+ "ü": 186,
241
+ "ý": 187,
242
+ "þ": 188,
243
+ "ÿ": 189,
244
+ "Ā": 190,
245
+ "ā": 191,
246
+ "Ă": 192,
247
+ "ă": 193,
248
+ "Ą": 194,
249
+ "ą": 195,
250
+ "Ć": 196,
251
+ "ć": 197,
252
+ "Ĉ": 198,
253
+ "ĉ": 199,
254
+ "Ċ": 200,
255
+ "ċ": 201,
256
+ "Č": 202,
257
+ "č": 203,
258
+ "Ď": 204,
259
+ "ď": 205,
260
+ "Đ": 206,
261
+ "đ": 207,
262
+ "Ē": 208,
263
+ "ē": 209,
264
+ "Ĕ": 210,
265
+ "ĕ": 211,
266
+ "Ė": 212,
267
+ "ė": 213,
268
+ "Ę": 214,
269
+ "ę": 215,
270
+ "Ě": 216,
271
+ "ě": 217,
272
+ "Ĝ": 218,
273
+ "ĝ": 219,
274
+ "Ğ": 220,
275
+ "ğ": 221,
276
+ "Ġ": 222,
277
+ "ġ": 223,
278
+ "Ģ": 224,
279
+ "ģ": 225,
280
+ "Ĥ": 226,
281
+ "ĥ": 227,
282
+ "Ħ": 228,
283
+ "��": 229,
284
+ "Ĩ": 230,
285
+ "ĩ": 231,
286
+ "Ī": 232,
287
+ "ī": 233,
288
+ "Ĭ": 234,
289
+ "ĭ": 235,
290
+ "Į": 236,
291
+ "į": 237,
292
+ "İ": 238,
293
+ "ı": 239,
294
+ "IJ": 240,
295
+ "ij": 241,
296
+ "Ĵ": 242,
297
+ "ĵ": 243,
298
+ "Ķ": 244,
299
+ "ķ": 245,
300
+ "ĸ": 246,
301
+ "Ĺ": 247,
302
+ "ĺ": 248,
303
+ "Ļ": 249,
304
+ "ļ": 250,
305
+ "Ľ": 251,
306
+ "ľ": 252,
307
+ "Ŀ": 253,
308
+ "ŀ": 254,
309
+ "Ł": 255,
310
+ "ł": 256,
311
+ "Ń": 257
312
+ },
313
+ "merges": []
314
+ }
315
+ }
llm/fw57M-multi-tied/tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|padding|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
+ },
21
+ "bos_token": null,
22
+ "clean_up_tokenization_spaces": false,
23
+ "eos_token": "<|endoftext|>",
24
+ "extra_special_tokens": {},
25
+ "model_max_length": 1000000000000000019884624838656,
26
+ "pad_token": "<|padding|>",
27
+ "tokenizer_class": "PreTrainedTokenizer",
28
+ "unk_token": null
29
+ }