flyingbugs commited on
Commit
64b73a6
·
verified ·
1 Parent(s): be56f63

Model save

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-Math-1.5B-Instruct
3
+ library_name: transformers
4
+ model_name: Qwen2.5-math-1.5B-Open-R1-Distill-eos
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - sft
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for Qwen2.5-math-1.5B-Open-R1-Distill-eos
13
+
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-Math-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Math-1.5B-Instruct).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="flyingbugs/Qwen2.5-math-1.5B-Open-R1-Distill-eos", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/jjh233/huggingface/runs/cxuzlvpx)
31
+
32
+
33
+ This model was trained with SFT.
34
+
35
+ ### Framework versions
36
+
37
+ - TRL: 0.16.0.dev0
38
+ - Transformers: 4.54.0
39
+ - Pytorch: 2.7.1
40
+ - Datasets: 4.0.0
41
+ - Tokenizers: 0.21.2
42
+
43
+ ## Citations
44
+
45
+
46
+
47
+ Cite TRL as:
48
+
49
+ ```bibtex
50
+ @misc{vonwerra2022trl,
51
+ title = {{TRL: Transformer Reinforcement Learning}},
52
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
53
+ year = 2020,
54
+ journal = {GitHub repository},
55
+ publisher = {GitHub},
56
+ howpublished = {\url{https://github.com/huggingface/trl}}
57
+ }
58
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 1465863748190208.0,
3
+ "train_loss": 0.5050300278177309,
4
+ "train_runtime": 14495.1223,
5
+ "train_samples": 93733,
6
+ "train_samples_per_second": 7.098,
7
+ "train_steps_per_second": 0.055
8
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": [
4
+ 151645,
5
+ 151643
6
+ ],
7
+ "pad_token_id": 151643,
8
+ "transformers_version": "4.54.0"
9
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 1465863748190208.0,
3
+ "train_loss": 0.5050300278177309,
4
+ "train_runtime": 14495.1223,
5
+ "train_samples": 93733,
6
+ "train_samples_per_second": 7.098,
7
+ "train_steps_per_second": 0.055
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 804,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.018656716417910446,
14
+ "grad_norm": 11.399012167837801,
15
+ "learning_rate": 4.8780487804878055e-06,
16
+ "loss": 1.6662,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.03731343283582089,
21
+ "grad_norm": 2.7376566823470316,
22
+ "learning_rate": 1.0975609756097562e-05,
23
+ "loss": 1.2958,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.055970149253731345,
28
+ "grad_norm": 0.7789979993380964,
29
+ "learning_rate": 1.707317073170732e-05,
30
+ "loss": 0.9493,
31
+ "step": 15
32
+ },
33
+ {
34
+ "epoch": 0.07462686567164178,
35
+ "grad_norm": 0.5128569616334213,
36
+ "learning_rate": 2.3170731707317075e-05,
37
+ "loss": 0.7989,
38
+ "step": 20
39
+ },
40
+ {
41
+ "epoch": 0.09328358208955224,
42
+ "grad_norm": 0.2952286751449961,
43
+ "learning_rate": 2.926829268292683e-05,
44
+ "loss": 0.7254,
45
+ "step": 25
46
+ },
47
+ {
48
+ "epoch": 0.11194029850746269,
49
+ "grad_norm": 0.2341313903311179,
50
+ "learning_rate": 3.5365853658536584e-05,
51
+ "loss": 0.6846,
52
+ "step": 30
53
+ },
54
+ {
55
+ "epoch": 0.13059701492537312,
56
+ "grad_norm": 0.19391262472859572,
57
+ "learning_rate": 4.146341463414634e-05,
58
+ "loss": 0.6538,
59
+ "step": 35
60
+ },
61
+ {
62
+ "epoch": 0.14925373134328357,
63
+ "grad_norm": 0.16560726663004716,
64
+ "learning_rate": 4.75609756097561e-05,
65
+ "loss": 0.6315,
66
+ "step": 40
67
+ },
68
+ {
69
+ "epoch": 0.16791044776119404,
70
+ "grad_norm": 0.13927004882169236,
71
+ "learning_rate": 4.999828351434079e-05,
72
+ "loss": 0.6127,
73
+ "step": 45
74
+ },
75
+ {
76
+ "epoch": 0.1865671641791045,
77
+ "grad_norm": 0.1455738320729682,
78
+ "learning_rate": 4.998779482816942e-05,
79
+ "loss": 0.6095,
80
+ "step": 50
81
+ },
82
+ {
83
+ "epoch": 0.20522388059701493,
84
+ "grad_norm": 0.12130331155218448,
85
+ "learning_rate": 4.996777549883426e-05,
86
+ "loss": 0.5824,
87
+ "step": 55
88
+ },
89
+ {
90
+ "epoch": 0.22388059701492538,
91
+ "grad_norm": 0.11003515209479892,
92
+ "learning_rate": 4.9938234010808136e-05,
93
+ "loss": 0.5861,
94
+ "step": 60
95
+ },
96
+ {
97
+ "epoch": 0.24253731343283583,
98
+ "grad_norm": 0.11060375781263691,
99
+ "learning_rate": 4.989918288418841e-05,
100
+ "loss": 0.5754,
101
+ "step": 65
102
+ },
103
+ {
104
+ "epoch": 0.26119402985074625,
105
+ "grad_norm": 0.1099882855423578,
106
+ "learning_rate": 4.9850638669390816e-05,
107
+ "loss": 0.5724,
108
+ "step": 70
109
+ },
110
+ {
111
+ "epoch": 0.2798507462686567,
112
+ "grad_norm": 0.10766352810165353,
113
+ "learning_rate": 4.97926219401351e-05,
114
+ "loss": 0.5608,
115
+ "step": 75
116
+ },
117
+ {
118
+ "epoch": 0.29850746268656714,
119
+ "grad_norm": 0.10140806004752735,
120
+ "learning_rate": 4.9725157284725665e-05,
121
+ "loss": 0.5623,
122
+ "step": 80
123
+ },
124
+ {
125
+ "epoch": 0.31716417910447764,
126
+ "grad_norm": 0.10445356548807731,
127
+ "learning_rate": 4.964827329563061e-05,
128
+ "loss": 0.5605,
129
+ "step": 85
130
+ },
131
+ {
132
+ "epoch": 0.3358208955223881,
133
+ "grad_norm": 0.09473190185241998,
134
+ "learning_rate": 4.956200255736394e-05,
135
+ "loss": 0.5492,
136
+ "step": 90
137
+ },
138
+ {
139
+ "epoch": 0.35447761194029853,
140
+ "grad_norm": 0.1104055037751782,
141
+ "learning_rate": 4.9466381632675714e-05,
142
+ "loss": 0.5452,
143
+ "step": 95
144
+ },
145
+ {
146
+ "epoch": 0.373134328358209,
147
+ "grad_norm": 0.11728567466154544,
148
+ "learning_rate": 4.936145104705629e-05,
149
+ "loss": 0.5431,
150
+ "step": 100
151
+ },
152
+ {
153
+ "epoch": 0.3917910447761194,
154
+ "grad_norm": 0.12324393210746246,
155
+ "learning_rate": 4.9247255271560994e-05,
156
+ "loss": 0.542,
157
+ "step": 105
158
+ },
159
+ {
160
+ "epoch": 0.41044776119402987,
161
+ "grad_norm": 0.10189522170485131,
162
+ "learning_rate": 4.9123842703962754e-05,
163
+ "loss": 0.5376,
164
+ "step": 110
165
+ },
166
+ {
167
+ "epoch": 0.4291044776119403,
168
+ "grad_norm": 0.09928359820416677,
169
+ "learning_rate": 4.899126564824033e-05,
170
+ "loss": 0.5386,
171
+ "step": 115
172
+ },
173
+ {
174
+ "epoch": 0.44776119402985076,
175
+ "grad_norm": 0.08893348279887758,
176
+ "learning_rate": 4.884958029241127e-05,
177
+ "loss": 0.5364,
178
+ "step": 120
179
+ },
180
+ {
181
+ "epoch": 0.4664179104477612,
182
+ "grad_norm": 0.11606708496242767,
183
+ "learning_rate": 4.869884668471853e-05,
184
+ "loss": 0.5352,
185
+ "step": 125
186
+ },
187
+ {
188
+ "epoch": 0.48507462686567165,
189
+ "grad_norm": 0.10286612666181358,
190
+ "learning_rate": 4.8539128708181276e-05,
191
+ "loss": 0.528,
192
+ "step": 130
193
+ },
194
+ {
195
+ "epoch": 0.503731343283582,
196
+ "grad_norm": 0.11732114273017366,
197
+ "learning_rate": 4.8370494053520316e-05,
198
+ "loss": 0.5365,
199
+ "step": 135
200
+ },
201
+ {
202
+ "epoch": 0.5223880597014925,
203
+ "grad_norm": 0.11109874771027929,
204
+ "learning_rate": 4.8193014190469815e-05,
205
+ "loss": 0.5304,
206
+ "step": 140
207
+ },
208
+ {
209
+ "epoch": 0.5410447761194029,
210
+ "grad_norm": 0.10629574732330986,
211
+ "learning_rate": 4.800676433748746e-05,
212
+ "loss": 0.5176,
213
+ "step": 145
214
+ },
215
+ {
216
+ "epoch": 0.5597014925373134,
217
+ "grad_norm": 0.1225754806353354,
218
+ "learning_rate": 4.781182342987577e-05,
219
+ "loss": 0.5244,
220
+ "step": 150
221
+ },
222
+ {
223
+ "epoch": 0.5783582089552238,
224
+ "grad_norm": 0.11545798098173879,
225
+ "learning_rate": 4.7608274086328275e-05,
226
+ "loss": 0.5197,
227
+ "step": 155
228
+ },
229
+ {
230
+ "epoch": 0.5970149253731343,
231
+ "grad_norm": 0.10984434834330462,
232
+ "learning_rate": 4.739620257391446e-05,
233
+ "loss": 0.5273,
234
+ "step": 160
235
+ },
236
+ {
237
+ "epoch": 0.6156716417910447,
238
+ "grad_norm": 0.09384895592282082,
239
+ "learning_rate": 4.7175698771518656e-05,
240
+ "loss": 0.5193,
241
+ "step": 165
242
+ },
243
+ {
244
+ "epoch": 0.6343283582089553,
245
+ "grad_norm": 0.13242720525922766,
246
+ "learning_rate": 4.6946856131748076e-05,
247
+ "loss": 0.5193,
248
+ "step": 170
249
+ },
250
+ {
251
+ "epoch": 0.6529850746268657,
252
+ "grad_norm": 0.10950233032891146,
253
+ "learning_rate": 4.6709771641326244e-05,
254
+ "loss": 0.5145,
255
+ "step": 175
256
+ },
257
+ {
258
+ "epoch": 0.6716417910447762,
259
+ "grad_norm": 0.1069644733597589,
260
+ "learning_rate": 4.6464545779988757e-05,
261
+ "loss": 0.5167,
262
+ "step": 180
263
+ },
264
+ {
265
+ "epoch": 0.6902985074626866,
266
+ "grad_norm": 0.12573511591329042,
267
+ "learning_rate": 4.621128247789846e-05,
268
+ "loss": 0.5141,
269
+ "step": 185
270
+ },
271
+ {
272
+ "epoch": 0.7089552238805971,
273
+ "grad_norm": 0.10595364884016444,
274
+ "learning_rate": 4.595008907159847e-05,
275
+ "loss": 0.5081,
276
+ "step": 190
277
+ },
278
+ {
279
+ "epoch": 0.7276119402985075,
280
+ "grad_norm": 0.11269588046403602,
281
+ "learning_rate": 4.568107625852136e-05,
282
+ "loss": 0.503,
283
+ "step": 195
284
+ },
285
+ {
286
+ "epoch": 0.746268656716418,
287
+ "grad_norm": 0.09406398116395229,
288
+ "learning_rate": 4.5404358050074115e-05,
289
+ "loss": 0.5075,
290
+ "step": 200
291
+ },
292
+ {
293
+ "epoch": 0.7649253731343284,
294
+ "grad_norm": 0.09993211084306963,
295
+ "learning_rate": 4.512005172331842e-05,
296
+ "loss": 0.5107,
297
+ "step": 205
298
+ },
299
+ {
300
+ "epoch": 0.7835820895522388,
301
+ "grad_norm": 0.11331241080632606,
302
+ "learning_rate": 4.482827777126706e-05,
303
+ "loss": 0.507,
304
+ "step": 210
305
+ },
306
+ {
307
+ "epoch": 0.8022388059701493,
308
+ "grad_norm": 0.11221212086046371,
309
+ "learning_rate": 4.4529159851817255e-05,
310
+ "loss": 0.5041,
311
+ "step": 215
312
+ },
313
+ {
314
+ "epoch": 0.8208955223880597,
315
+ "grad_norm": 0.11707450296579959,
316
+ "learning_rate": 4.422282473534271e-05,
317
+ "loss": 0.4989,
318
+ "step": 220
319
+ },
320
+ {
321
+ "epoch": 0.8395522388059702,
322
+ "grad_norm": 0.11427168269212384,
323
+ "learning_rate": 4.3909402250966534e-05,
324
+ "loss": 0.5151,
325
+ "step": 225
326
+ },
327
+ {
328
+ "epoch": 0.8582089552238806,
329
+ "grad_norm": 0.10434919827802294,
330
+ "learning_rate": 4.358902523153791e-05,
331
+ "loss": 0.5003,
332
+ "step": 230
333
+ },
334
+ {
335
+ "epoch": 0.8768656716417911,
336
+ "grad_norm": 0.09522718572477468,
337
+ "learning_rate": 4.326182945733555e-05,
338
+ "loss": 0.5083,
339
+ "step": 235
340
+ },
341
+ {
342
+ "epoch": 0.8955223880597015,
343
+ "grad_norm": 0.10303101606186355,
344
+ "learning_rate": 4.292795359852221e-05,
345
+ "loss": 0.5,
346
+ "step": 240
347
+ },
348
+ {
349
+ "epoch": 0.914179104477612,
350
+ "grad_norm": 0.11645760699383664,
351
+ "learning_rate": 4.2587539156374295e-05,
352
+ "loss": 0.5078,
353
+ "step": 245
354
+ },
355
+ {
356
+ "epoch": 0.9328358208955224,
357
+ "grad_norm": 0.11868485154529083,
358
+ "learning_rate": 4.2240730403311586e-05,
359
+ "loss": 0.5005,
360
+ "step": 250
361
+ },
362
+ {
363
+ "epoch": 0.9514925373134329,
364
+ "grad_norm": 0.1135775018055789,
365
+ "learning_rate": 4.188767432175263e-05,
366
+ "loss": 0.501,
367
+ "step": 255
368
+ },
369
+ {
370
+ "epoch": 0.9701492537313433,
371
+ "grad_norm": 0.10759776645567355,
372
+ "learning_rate": 4.1528520541821506e-05,
373
+ "loss": 0.4954,
374
+ "step": 260
375
+ },
376
+ {
377
+ "epoch": 0.9888059701492538,
378
+ "grad_norm": 0.11164466025815273,
379
+ "learning_rate": 4.116342127793245e-05,
380
+ "loss": 0.5027,
381
+ "step": 265
382
+ },
383
+ {
384
+ "epoch": 1.007462686567164,
385
+ "grad_norm": 0.1134164118788949,
386
+ "learning_rate": 4.0792531264279285e-05,
387
+ "loss": 0.4952,
388
+ "step": 270
389
+ },
390
+ {
391
+ "epoch": 1.0261194029850746,
392
+ "grad_norm": 0.1224166510766843,
393
+ "learning_rate": 4.041600768925687e-05,
394
+ "loss": 0.4755,
395
+ "step": 275
396
+ },
397
+ {
398
+ "epoch": 1.044776119402985,
399
+ "grad_norm": 0.10342344313016362,
400
+ "learning_rate": 4.0034010128842484e-05,
401
+ "loss": 0.4812,
402
+ "step": 280
403
+ },
404
+ {
405
+ "epoch": 1.0634328358208955,
406
+ "grad_norm": 0.09619486513423638,
407
+ "learning_rate": 3.964670047896525e-05,
408
+ "loss": 0.4832,
409
+ "step": 285
410
+ },
411
+ {
412
+ "epoch": 1.0820895522388059,
413
+ "grad_norm": 0.1148240507474565,
414
+ "learning_rate": 3.925424288689239e-05,
415
+ "loss": 0.4764,
416
+ "step": 290
417
+ },
418
+ {
419
+ "epoch": 1.1007462686567164,
420
+ "grad_norm": 0.09558761238740061,
421
+ "learning_rate": 3.8856803681661296e-05,
422
+ "loss": 0.4822,
423
+ "step": 295
424
+ },
425
+ {
426
+ "epoch": 1.1194029850746268,
427
+ "grad_norm": 0.10240411198398687,
428
+ "learning_rate": 3.8454551303586964e-05,
429
+ "loss": 0.4808,
430
+ "step": 300
431
+ },
432
+ {
433
+ "epoch": 1.1380597014925373,
434
+ "grad_norm": 0.11127995228587867,
435
+ "learning_rate": 3.8047656232874624e-05,
436
+ "loss": 0.484,
437
+ "step": 305
438
+ },
439
+ {
440
+ "epoch": 1.1567164179104479,
441
+ "grad_norm": 0.09882015115635996,
442
+ "learning_rate": 3.763629091736781e-05,
443
+ "loss": 0.4768,
444
+ "step": 310
445
+ },
446
+ {
447
+ "epoch": 1.1753731343283582,
448
+ "grad_norm": 0.09479435890268799,
449
+ "learning_rate": 3.722062969946254e-05,
450
+ "loss": 0.483,
451
+ "step": 315
452
+ },
453
+ {
454
+ "epoch": 1.1940298507462686,
455
+ "grad_norm": 0.09215846819136608,
456
+ "learning_rate": 3.6800848742218644e-05,
457
+ "loss": 0.4798,
458
+ "step": 320
459
+ },
460
+ {
461
+ "epoch": 1.212686567164179,
462
+ "grad_norm": 0.09072929789796806,
463
+ "learning_rate": 3.6377125954699254e-05,
464
+ "loss": 0.4847,
465
+ "step": 325
466
+ },
467
+ {
468
+ "epoch": 1.2313432835820897,
469
+ "grad_norm": 0.09912376706624307,
470
+ "learning_rate": 3.5949640916570566e-05,
471
+ "loss": 0.4788,
472
+ "step": 330
473
+ },
474
+ {
475
+ "epoch": 1.25,
476
+ "grad_norm": 0.09761642906266785,
477
+ "learning_rate": 3.551857480199336e-05,
478
+ "loss": 0.4807,
479
+ "step": 335
480
+ },
481
+ {
482
+ "epoch": 1.2686567164179103,
483
+ "grad_norm": 0.09469785325303005,
484
+ "learning_rate": 3.5084110302838916e-05,
485
+ "loss": 0.4773,
486
+ "step": 340
487
+ },
488
+ {
489
+ "epoch": 1.287313432835821,
490
+ "grad_norm": 0.09477509976342904,
491
+ "learning_rate": 3.464643155126162e-05,
492
+ "loss": 0.4731,
493
+ "step": 345
494
+ },
495
+ {
496
+ "epoch": 1.3059701492537314,
497
+ "grad_norm": 0.10492492806033109,
498
+ "learning_rate": 3.4205724041661135e-05,
499
+ "loss": 0.4748,
500
+ "step": 350
501
+ },
502
+ {
503
+ "epoch": 1.3246268656716418,
504
+ "grad_norm": 0.09366312938984638,
505
+ "learning_rate": 3.376217455206732e-05,
506
+ "loss": 0.4742,
507
+ "step": 355
508
+ },
509
+ {
510
+ "epoch": 1.3432835820895521,
511
+ "grad_norm": 0.08024810662631264,
512
+ "learning_rate": 3.3315971064981025e-05,
513
+ "loss": 0.4764,
514
+ "step": 360
515
+ },
516
+ {
517
+ "epoch": 1.3619402985074627,
518
+ "grad_norm": 0.09717797386193038,
519
+ "learning_rate": 3.286730268770452e-05,
520
+ "loss": 0.4785,
521
+ "step": 365
522
+ },
523
+ {
524
+ "epoch": 1.3805970149253732,
525
+ "grad_norm": 0.0939200638469148,
526
+ "learning_rate": 3.2416359572195155e-05,
527
+ "loss": 0.4844,
528
+ "step": 370
529
+ },
530
+ {
531
+ "epoch": 1.3992537313432836,
532
+ "grad_norm": 0.09294770035243786,
533
+ "learning_rate": 3.1963332834476247e-05,
534
+ "loss": 0.4775,
535
+ "step": 375
536
+ },
537
+ {
538
+ "epoch": 1.417910447761194,
539
+ "grad_norm": 0.08413530596038701,
540
+ "learning_rate": 3.150841447363948e-05,
541
+ "loss": 0.4803,
542
+ "step": 380
543
+ },
544
+ {
545
+ "epoch": 1.4365671641791045,
546
+ "grad_norm": 0.0865856672794535,
547
+ "learning_rate": 3.1051797290472966e-05,
548
+ "loss": 0.4721,
549
+ "step": 385
550
+ },
551
+ {
552
+ "epoch": 1.455223880597015,
553
+ "grad_norm": 0.08763506569892904,
554
+ "learning_rate": 3.059367480574958e-05,
555
+ "loss": 0.4742,
556
+ "step": 390
557
+ },
558
+ {
559
+ "epoch": 1.4738805970149254,
560
+ "grad_norm": 0.09707220296579894,
561
+ "learning_rate": 3.0134241178210103e-05,
562
+ "loss": 0.4703,
563
+ "step": 395
564
+ },
565
+ {
566
+ "epoch": 1.4925373134328357,
567
+ "grad_norm": 0.09647084631774662,
568
+ "learning_rate": 2.9673691122276086e-05,
569
+ "loss": 0.4716,
570
+ "step": 400
571
+ },
572
+ {
573
+ "epoch": 1.5111940298507462,
574
+ "grad_norm": 0.10414671302567495,
575
+ "learning_rate": 2.9212219825527075e-05,
576
+ "loss": 0.4741,
577
+ "step": 405
578
+ },
579
+ {
580
+ "epoch": 1.5298507462686568,
581
+ "grad_norm": 0.0964804559345882,
582
+ "learning_rate": 2.8750022865977443e-05,
583
+ "loss": 0.4773,
584
+ "step": 410
585
+ },
586
+ {
587
+ "epoch": 1.5485074626865671,
588
+ "grad_norm": 0.08300671453326233,
589
+ "learning_rate": 2.82872961291876e-05,
590
+ "loss": 0.4647,
591
+ "step": 415
592
+ },
593
+ {
594
+ "epoch": 1.5671641791044775,
595
+ "grad_norm": 0.09370690516566732,
596
+ "learning_rate": 2.7824235725245042e-05,
597
+ "loss": 0.4664,
598
+ "step": 420
599
+ },
600
+ {
601
+ "epoch": 1.585820895522388,
602
+ "grad_norm": 0.09401332348942805,
603
+ "learning_rate": 2.7361037905650032e-05,
604
+ "loss": 0.4697,
605
+ "step": 425
606
+ },
607
+ {
608
+ "epoch": 1.6044776119402986,
609
+ "grad_norm": 0.08965158350497503,
610
+ "learning_rate": 2.689789898014155e-05,
611
+ "loss": 0.4683,
612
+ "step": 430
613
+ },
614
+ {
615
+ "epoch": 1.623134328358209,
616
+ "grad_norm": 0.08803489279640653,
617
+ "learning_rate": 2.6435015233498443e-05,
618
+ "loss": 0.4721,
619
+ "step": 435
620
+ },
621
+ {
622
+ "epoch": 1.6417910447761193,
623
+ "grad_norm": 0.08576868029341782,
624
+ "learning_rate": 2.5972582842351156e-05,
625
+ "loss": 0.4664,
626
+ "step": 440
627
+ },
628
+ {
629
+ "epoch": 1.6604477611940298,
630
+ "grad_norm": 0.08790658081775465,
631
+ "learning_rate": 2.551079779203932e-05,
632
+ "loss": 0.4666,
633
+ "step": 445
634
+ },
635
+ {
636
+ "epoch": 1.6791044776119404,
637
+ "grad_norm": 0.09761982809729165,
638
+ "learning_rate": 2.504985579355047e-05,
639
+ "loss": 0.4708,
640
+ "step": 450
641
+ },
642
+ {
643
+ "epoch": 1.6977611940298507,
644
+ "grad_norm": 0.08600266340290913,
645
+ "learning_rate": 2.458995220057491e-05,
646
+ "loss": 0.4691,
647
+ "step": 455
648
+ },
649
+ {
650
+ "epoch": 1.716417910447761,
651
+ "grad_norm": 0.08117488167122497,
652
+ "learning_rate": 2.4131281926712146e-05,
653
+ "loss": 0.4735,
654
+ "step": 460
655
+ },
656
+ {
657
+ "epoch": 1.7350746268656716,
658
+ "grad_norm": 0.08203479577414656,
659
+ "learning_rate": 2.3674039362863687e-05,
660
+ "loss": 0.4687,
661
+ "step": 465
662
+ },
663
+ {
664
+ "epoch": 1.7537313432835822,
665
+ "grad_norm": 0.07800658853978873,
666
+ "learning_rate": 2.3218418294847517e-05,
667
+ "loss": 0.4683,
668
+ "step": 470
669
+ },
670
+ {
671
+ "epoch": 1.7723880597014925,
672
+ "grad_norm": 0.09056596240340838,
673
+ "learning_rate": 2.2764611821268918e-05,
674
+ "loss": 0.4648,
675
+ "step": 475
676
+ },
677
+ {
678
+ "epoch": 1.7910447761194028,
679
+ "grad_norm": 0.0964354324970012,
680
+ "learning_rate": 2.231281227168257e-05,
681
+ "loss": 0.4733,
682
+ "step": 480
683
+ },
684
+ {
685
+ "epoch": 1.8097014925373134,
686
+ "grad_norm": 0.08758293289634715,
687
+ "learning_rate": 2.18632111250806e-05,
688
+ "loss": 0.475,
689
+ "step": 485
690
+ },
691
+ {
692
+ "epoch": 1.828358208955224,
693
+ "grad_norm": 0.09150711680284437,
694
+ "learning_rate": 2.141599892874107e-05,
695
+ "loss": 0.4692,
696
+ "step": 490
697
+ },
698
+ {
699
+ "epoch": 1.8470149253731343,
700
+ "grad_norm": 0.08550519308452513,
701
+ "learning_rate": 2.09713652174714e-05,
702
+ "loss": 0.4652,
703
+ "step": 495
704
+ },
705
+ {
706
+ "epoch": 1.8656716417910446,
707
+ "grad_norm": 0.0871164016396725,
708
+ "learning_rate": 2.0529498433280807e-05,
709
+ "loss": 0.4674,
710
+ "step": 500
711
+ },
712
+ {
713
+ "epoch": 1.8843283582089554,
714
+ "grad_norm": 0.09480068977033189,
715
+ "learning_rate": 2.0090585845516012e-05,
716
+ "loss": 0.4708,
717
+ "step": 505
718
+ },
719
+ {
720
+ "epoch": 1.9029850746268657,
721
+ "grad_norm": 0.09417802812416548,
722
+ "learning_rate": 1.965481347149376e-05,
723
+ "loss": 0.4695,
724
+ "step": 510
725
+ },
726
+ {
727
+ "epoch": 1.921641791044776,
728
+ "grad_norm": 0.08791315041891376,
729
+ "learning_rate": 1.9222365997664165e-05,
730
+ "loss": 0.4676,
731
+ "step": 515
732
+ },
733
+ {
734
+ "epoch": 1.9402985074626866,
735
+ "grad_norm": 0.0821183823198808,
736
+ "learning_rate": 1.8793426701337947e-05,
737
+ "loss": 0.4648,
738
+ "step": 520
739
+ },
740
+ {
741
+ "epoch": 1.9589552238805972,
742
+ "grad_norm": 0.0744561938489921,
743
+ "learning_rate": 1.8368177373010954e-05,
744
+ "loss": 0.4732,
745
+ "step": 525
746
+ },
747
+ {
748
+ "epoch": 1.9776119402985075,
749
+ "grad_norm": 0.07817933947596517,
750
+ "learning_rate": 1.7946798239318775e-05,
751
+ "loss": 0.4664,
752
+ "step": 530
753
+ },
754
+ {
755
+ "epoch": 1.9962686567164178,
756
+ "grad_norm": 0.08379421148696033,
757
+ "learning_rate": 1.75294678866542e-05,
758
+ "loss": 0.4679,
759
+ "step": 535
760
+ },
761
+ {
762
+ "epoch": 2.014925373134328,
763
+ "grad_norm": 0.07984701953647755,
764
+ "learning_rate": 1.7116363185479754e-05,
765
+ "loss": 0.4602,
766
+ "step": 540
767
+ },
768
+ {
769
+ "epoch": 2.033582089552239,
770
+ "grad_norm": 0.08449268753051078,
771
+ "learning_rate": 1.670765921536755e-05,
772
+ "loss": 0.4569,
773
+ "step": 545
774
+ },
775
+ {
776
+ "epoch": 2.0522388059701493,
777
+ "grad_norm": 0.08095757523241218,
778
+ "learning_rate": 1.6303529190798088e-05,
779
+ "loss": 0.4545,
780
+ "step": 550
781
+ },
782
+ {
783
+ "epoch": 2.0708955223880596,
784
+ "grad_norm": 0.08363343932299715,
785
+ "learning_rate": 1.590414438774954e-05,
786
+ "loss": 0.4518,
787
+ "step": 555
788
+ },
789
+ {
790
+ "epoch": 2.08955223880597,
791
+ "grad_norm": 0.07463565896489559,
792
+ "learning_rate": 1.550967407110856e-05,
793
+ "loss": 0.4489,
794
+ "step": 560
795
+ },
796
+ {
797
+ "epoch": 2.1082089552238807,
798
+ "grad_norm": 0.08433265404002505,
799
+ "learning_rate": 1.5120285422933478e-05,
800
+ "loss": 0.4519,
801
+ "step": 565
802
+ },
803
+ {
804
+ "epoch": 2.126865671641791,
805
+ "grad_norm": 0.07395268788672169,
806
+ "learning_rate": 1.4736143471600173e-05,
807
+ "loss": 0.4491,
808
+ "step": 570
809
+ },
810
+ {
811
+ "epoch": 2.1455223880597014,
812
+ "grad_norm": 0.08342447109766311,
813
+ "learning_rate": 1.4357411021860773e-05,
814
+ "loss": 0.4544,
815
+ "step": 575
816
+ },
817
+ {
818
+ "epoch": 2.1641791044776117,
819
+ "grad_norm": 0.07519841036516427,
820
+ "learning_rate": 1.3984248585844645e-05,
821
+ "loss": 0.4602,
822
+ "step": 580
823
+ },
824
+ {
825
+ "epoch": 2.1828358208955225,
826
+ "grad_norm": 0.07504816361245825,
827
+ "learning_rate": 1.3616814315031146e-05,
828
+ "loss": 0.4531,
829
+ "step": 585
830
+ },
831
+ {
832
+ "epoch": 2.201492537313433,
833
+ "grad_norm": 0.08212433520197311,
834
+ "learning_rate": 1.3255263933222833e-05,
835
+ "loss": 0.4555,
836
+ "step": 590
837
+ },
838
+ {
839
+ "epoch": 2.220149253731343,
840
+ "grad_norm": 0.08163078572745201,
841
+ "learning_rate": 1.2899750670547473e-05,
842
+ "loss": 0.4521,
843
+ "step": 595
844
+ },
845
+ {
846
+ "epoch": 2.2388059701492535,
847
+ "grad_norm": 0.07590759096220778,
848
+ "learning_rate": 1.2550425198516973e-05,
849
+ "loss": 0.4495,
850
+ "step": 600
851
+ },
852
+ {
853
+ "epoch": 2.2574626865671643,
854
+ "grad_norm": 0.06904365467338676,
855
+ "learning_rate": 1.2207435566170722e-05,
856
+ "loss": 0.4491,
857
+ "step": 605
858
+ },
859
+ {
860
+ "epoch": 2.2761194029850746,
861
+ "grad_norm": 0.07342202054906058,
862
+ "learning_rate": 1.1870927137330267e-05,
863
+ "loss": 0.4558,
864
+ "step": 610
865
+ },
866
+ {
867
+ "epoch": 2.294776119402985,
868
+ "grad_norm": 0.08175407881974946,
869
+ "learning_rate": 1.1541042528992152e-05,
870
+ "loss": 0.4505,
871
+ "step": 615
872
+ },
873
+ {
874
+ "epoch": 2.3134328358208958,
875
+ "grad_norm": 0.07091832226253358,
876
+ "learning_rate": 1.1217921550884774e-05,
877
+ "loss": 0.4563,
878
+ "step": 620
879
+ },
880
+ {
881
+ "epoch": 2.332089552238806,
882
+ "grad_norm": 0.0735381212475432,
883
+ "learning_rate": 1.0901701146215085e-05,
884
+ "loss": 0.4503,
885
+ "step": 625
886
+ },
887
+ {
888
+ "epoch": 2.3507462686567164,
889
+ "grad_norm": 0.07454458223706102,
890
+ "learning_rate": 1.0592515333630128e-05,
891
+ "loss": 0.4471,
892
+ "step": 630
893
+ },
894
+ {
895
+ "epoch": 2.3694029850746268,
896
+ "grad_norm": 0.06918781672394263,
897
+ "learning_rate": 1.029049515041808e-05,
898
+ "loss": 0.4459,
899
+ "step": 635
900
+ },
901
+ {
902
+ "epoch": 2.388059701492537,
903
+ "grad_norm": 0.07042349253392675,
904
+ "learning_rate": 9.99576859697277e-06,
905
+ "loss": 0.4527,
906
+ "step": 640
907
+ },
908
+ {
909
+ "epoch": 2.406716417910448,
910
+ "grad_norm": 0.06990000224390586,
911
+ "learning_rate": 9.708460582545337e-06,
912
+ "loss": 0.4542,
913
+ "step": 645
914
+ },
915
+ {
916
+ "epoch": 2.425373134328358,
917
+ "grad_norm": 0.07589637943114948,
918
+ "learning_rate": 9.428692872305925e-06,
919
+ "loss": 0.4486,
920
+ "step": 650
921
+ },
922
+ {
923
+ "epoch": 2.4440298507462686,
924
+ "grad_norm": 0.07305499937497431,
925
+ "learning_rate": 9.15658403573792e-06,
926
+ "loss": 0.4613,
927
+ "step": 655
928
+ },
929
+ {
930
+ "epoch": 2.4626865671641793,
931
+ "grad_norm": 0.06847095800068362,
932
+ "learning_rate": 8.892249396386513e-06,
933
+ "loss": 0.4489,
934
+ "step": 660
935
+ },
936
+ {
937
+ "epoch": 2.4813432835820897,
938
+ "grad_norm": 0.07099198770234746,
939
+ "learning_rate": 8.635800982982958e-06,
940
+ "loss": 0.457,
941
+ "step": 665
942
+ },
943
+ {
944
+ "epoch": 2.5,
945
+ "grad_norm": 0.06902375733484567,
946
+ "learning_rate": 8.387347481965244e-06,
947
+ "loss": 0.4475,
948
+ "step": 670
949
+ },
950
+ {
951
+ "epoch": 2.5186567164179103,
952
+ "grad_norm": 0.07032630308928171,
953
+ "learning_rate": 8.14699419141525e-06,
954
+ "loss": 0.4553,
955
+ "step": 675
956
+ },
957
+ {
958
+ "epoch": 2.5373134328358207,
959
+ "grad_norm": 0.07177818923129327,
960
+ "learning_rate": 7.914842976431932e-06,
961
+ "loss": 0.4533,
962
+ "step": 680
963
+ },
964
+ {
965
+ "epoch": 2.5559701492537314,
966
+ "grad_norm": 0.07394610672019591,
967
+ "learning_rate": 7.690992225959465e-06,
968
+ "loss": 0.453,
969
+ "step": 685
970
+ },
971
+ {
972
+ "epoch": 2.574626865671642,
973
+ "grad_norm": 0.06821411644108501,
974
+ "learning_rate": 7.4755368110886366e-06,
975
+ "loss": 0.4515,
976
+ "step": 690
977
+ },
978
+ {
979
+ "epoch": 2.593283582089552,
980
+ "grad_norm": 0.0722058283703491,
981
+ "learning_rate": 7.268568044849132e-06,
982
+ "loss": 0.4594,
983
+ "step": 695
984
+ },
985
+ {
986
+ "epoch": 2.611940298507463,
987
+ "grad_norm": 0.0704583684801643,
988
+ "learning_rate": 7.0701736435098155e-06,
989
+ "loss": 0.4524,
990
+ "step": 700
991
+ },
992
+ {
993
+ "epoch": 2.6305970149253732,
994
+ "grad_norm": 0.06988823803890779,
995
+ "learning_rate": 6.880437689403316e-06,
996
+ "loss": 0.4526,
997
+ "step": 705
998
+ },
999
+ {
1000
+ "epoch": 2.6492537313432836,
1001
+ "grad_norm": 0.07259674165968212,
1002
+ "learning_rate": 6.699440595290754e-06,
1003
+ "loss": 0.4515,
1004
+ "step": 710
1005
+ },
1006
+ {
1007
+ "epoch": 2.667910447761194,
1008
+ "grad_norm": 0.06745674865987111,
1009
+ "learning_rate": 6.527259070281722e-06,
1010
+ "loss": 0.4562,
1011
+ "step": 715
1012
+ },
1013
+ {
1014
+ "epoch": 2.6865671641791042,
1015
+ "grad_norm": 0.09708148259376258,
1016
+ "learning_rate": 6.363966087323844e-06,
1017
+ "loss": 0.4544,
1018
+ "step": 720
1019
+ },
1020
+ {
1021
+ "epoch": 2.705223880597015,
1022
+ "grad_norm": 0.0696892401955707,
1023
+ "learning_rate": 6.209630852275836e-06,
1024
+ "loss": 0.4459,
1025
+ "step": 725
1026
+ },
1027
+ {
1028
+ "epoch": 2.7238805970149254,
1029
+ "grad_norm": 0.07185576991588484,
1030
+ "learning_rate": 6.06431877457709e-06,
1031
+ "loss": 0.4503,
1032
+ "step": 730
1033
+ },
1034
+ {
1035
+ "epoch": 2.7425373134328357,
1036
+ "grad_norm": 0.07116338247532543,
1037
+ "learning_rate": 5.928091439526226e-06,
1038
+ "loss": 0.4472,
1039
+ "step": 735
1040
+ },
1041
+ {
1042
+ "epoch": 2.7611940298507465,
1043
+ "grad_norm": 0.07012522976921934,
1044
+ "learning_rate": 5.801006582180398e-06,
1045
+ "loss": 0.4505,
1046
+ "step": 740
1047
+ },
1048
+ {
1049
+ "epoch": 2.779850746268657,
1050
+ "grad_norm": 0.06996189785913512,
1051
+ "learning_rate": 5.683118062886346e-06,
1052
+ "loss": 0.4536,
1053
+ "step": 745
1054
+ },
1055
+ {
1056
+ "epoch": 2.798507462686567,
1057
+ "grad_norm": 0.07136894351410067,
1058
+ "learning_rate": 5.574475844453634e-06,
1059
+ "loss": 0.4505,
1060
+ "step": 750
1061
+ },
1062
+ {
1063
+ "epoch": 2.8171641791044775,
1064
+ "grad_norm": 0.07002535300791056,
1065
+ "learning_rate": 5.475125970979702e-06,
1066
+ "loss": 0.4515,
1067
+ "step": 755
1068
+ },
1069
+ {
1070
+ "epoch": 2.835820895522388,
1071
+ "grad_norm": 0.07149166693243972,
1072
+ "learning_rate": 5.385110548335753e-06,
1073
+ "loss": 0.4568,
1074
+ "step": 760
1075
+ },
1076
+ {
1077
+ "epoch": 2.8544776119402986,
1078
+ "grad_norm": 0.07020725124001985,
1079
+ "learning_rate": 5.30446772632166e-06,
1080
+ "loss": 0.4555,
1081
+ "step": 765
1082
+ },
1083
+ {
1084
+ "epoch": 2.873134328358209,
1085
+ "grad_norm": 0.07259046375882247,
1086
+ "learning_rate": 5.233231682497572e-06,
1087
+ "loss": 0.4481,
1088
+ "step": 770
1089
+ },
1090
+ {
1091
+ "epoch": 2.8917910447761193,
1092
+ "grad_norm": 0.06964811084889956,
1093
+ "learning_rate": 5.171432607698975e-06,
1094
+ "loss": 0.4478,
1095
+ "step": 775
1096
+ },
1097
+ {
1098
+ "epoch": 2.91044776119403,
1099
+ "grad_norm": 0.06725150018988699,
1100
+ "learning_rate": 5.119096693241395e-06,
1101
+ "loss": 0.4524,
1102
+ "step": 780
1103
+ },
1104
+ {
1105
+ "epoch": 2.9291044776119404,
1106
+ "grad_norm": 0.06616781886399527,
1107
+ "learning_rate": 5.07624611982014e-06,
1108
+ "loss": 0.4471,
1109
+ "step": 785
1110
+ },
1111
+ {
1112
+ "epoch": 2.9477611940298507,
1113
+ "grad_norm": 0.06597264304324678,
1114
+ "learning_rate": 5.0428990481098275e-06,
1115
+ "loss": 0.4476,
1116
+ "step": 790
1117
+ },
1118
+ {
1119
+ "epoch": 2.966417910447761,
1120
+ "grad_norm": 0.06838206427405408,
1121
+ "learning_rate": 5.01906961106762e-06,
1122
+ "loss": 0.4472,
1123
+ "step": 795
1124
+ },
1125
+ {
1126
+ "epoch": 2.9850746268656714,
1127
+ "grad_norm": 0.06614701992126826,
1128
+ "learning_rate": 5.004767907943488e-06,
1129
+ "loss": 0.4458,
1130
+ "step": 800
1131
+ },
1132
+ {
1133
+ "epoch": 3.0,
1134
+ "step": 804,
1135
+ "total_flos": 1465863748190208.0,
1136
+ "train_loss": 0.5050300278177309,
1137
+ "train_runtime": 14495.1223,
1138
+ "train_samples_per_second": 7.098,
1139
+ "train_steps_per_second": 0.055
1140
+ }
1141
+ ],
1142
+ "logging_steps": 5,
1143
+ "max_steps": 804,
1144
+ "num_input_tokens_seen": 0,
1145
+ "num_train_epochs": 3,
1146
+ "save_steps": 100,
1147
+ "stateful_callbacks": {
1148
+ "TrainerControl": {
1149
+ "args": {
1150
+ "should_epoch_stop": false,
1151
+ "should_evaluate": false,
1152
+ "should_log": false,
1153
+ "should_save": true,
1154
+ "should_training_stop": true
1155
+ },
1156
+ "attributes": {}
1157
+ }
1158
+ },
1159
+ "total_flos": 1465863748190208.0,
1160
+ "train_batch_size": 16,
1161
+ "trial_name": null,
1162
+ "trial_params": null
1163
+ }