DeeLearning commited on
Commit
f3667a6
·
verified ·
1 Parent(s): 2ea3562

Model save

Browse files
README.md CHANGED
@@ -1,10 +1,8 @@
1
  ---
2
- datasets: HuggingFaceH4/Bespoke-Stratos-17k
3
  library_name: transformers
4
  model_name: Qwen2.5-1.5B-Open-R1-Distill
5
  tags:
6
  - generated_from_trainer
7
- - open-r1
8
  - trl
9
  - sft
10
  licence: license
@@ -12,7 +10,7 @@ licence: license
12
 
13
  # Model Card for Qwen2.5-1.5B-Open-R1-Distill
14
 
15
- This model is a fine-tuned version of [None](https://huggingface.co/None) on the [HuggingFaceH4/Bespoke-Stratos-17k](https://huggingface.co/datasets/HuggingFaceH4/Bespoke-Stratos-17k) dataset.
16
  It has been trained using [TRL](https://github.com/huggingface/trl).
17
 
18
  ## Quick start
@@ -28,7 +26,7 @@ print(output["generated_text"])
28
 
29
  ## Training procedure
30
 
31
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/xt0521/huggingface/runs/glyjssry)
32
 
33
 
34
  This model was trained with SFT.
 
1
  ---
 
2
  library_name: transformers
3
  model_name: Qwen2.5-1.5B-Open-R1-Distill
4
  tags:
5
  - generated_from_trainer
 
6
  - trl
7
  - sft
8
  licence: license
 
10
 
11
  # Model Card for Qwen2.5-1.5B-Open-R1-Distill
12
 
13
+ This model is a fine-tuned version of [None](https://huggingface.co/None).
14
  It has been trained using [TRL](https://github.com/huggingface/trl).
15
 
16
  ## Quick start
 
26
 
27
  ## Training procedure
28
 
29
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/xt0521/huggingface/runs/f4zrdgcm)
30
 
31
 
32
  This model was trained with SFT.
all_results.json CHANGED
@@ -1,14 +1,9 @@
1
  {
2
- "epoch": 1.0,
3
- "eval_loss": 0.7578943967819214,
4
- "eval_runtime": 1.9314,
5
- "eval_samples": 100,
6
- "eval_samples_per_second": 66.273,
7
- "eval_steps_per_second": 2.071,
8
- "total_flos": 76916824473600.0,
9
- "train_loss": 0.7337305870281874,
10
- "train_runtime": 1235.0855,
11
  "train_samples": 16610,
12
- "train_samples_per_second": 17.497,
13
  "train_steps_per_second": 0.137
14
  }
 
1
  {
2
+ "epoch": 4.0,
3
+ "total_flos": 308009150447616.0,
4
+ "train_loss": 0.7185798888728463,
5
+ "train_runtime": 4924.3182,
 
 
 
 
 
6
  "train_samples": 16610,
7
+ "train_samples_per_second": 17.554,
8
  "train_steps_per_second": 0.137
9
  }
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/data/share8/cdn/open-r1/data/Qwen2.5-1.5B-Open-R1-Distill",
3
  "architectures": [
4
  "Qwen2ForCausalLM"
5
  ],
@@ -23,7 +23,7 @@
23
  "tie_word_embeddings": true,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.48.2",
26
- "use_cache": true,
27
  "use_sliding_window": false,
28
  "vocab_size": 151936
29
  }
 
1
  {
2
+ "_name_or_path": "/data/share/huggingface/models--Qwen--Qwen2.5-1.5B-Instruct/snapshots/989aa7980e4cf806f80c7fef2b1adb7bc71aa306",
3
  "architectures": [
4
  "Qwen2ForCausalLM"
5
  ],
 
23
  "tie_word_embeddings": true,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.48.2",
26
+ "use_cache": false,
27
  "use_sliding_window": false,
28
  "vocab_size": 151936
29
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7dddd44bff605045e709f6628512c5276aea5b9b04fe1662e05e295dabaee4f2
3
  size 3087467144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:964acb82a92cb2a01b9e079e67a989bd1b47ef3a24f4bc59f94c2666e67a2b66
3
  size 3087467144
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 1.0,
3
- "total_flos": 76916824473600.0,
4
- "train_loss": 0.7337305870281874,
5
- "train_runtime": 1235.0855,
6
  "train_samples": 16610,
7
- "train_samples_per_second": 17.497,
8
  "train_steps_per_second": 0.137
9
  }
 
1
  {
2
+ "epoch": 4.0,
3
+ "total_flos": 308009150447616.0,
4
+ "train_loss": 0.7185798888728463,
5
+ "train_runtime": 4924.3182,
6
  "train_samples": 16610,
7
+ "train_samples_per_second": 17.554,
8
  "train_steps_per_second": 0.137
9
  }
trainer_state.json CHANGED
@@ -1,267 +1,1021 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.0,
5
  "eval_steps": 100,
6
- "global_step": 169,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.029585798816568046,
13
- "grad_norm": 0.07426696918416174,
14
- "learning_rate": 5.882352941176471e-06,
15
- "loss": 0.7515,
16
  "step": 5
17
  },
18
  {
19
  "epoch": 0.05917159763313609,
20
- "grad_norm": 0.08040276233943516,
21
- "learning_rate": 1.1764705882352942e-05,
22
- "loss": 0.755,
23
  "step": 10
24
  },
25
  {
26
  "epoch": 0.08875739644970414,
27
- "grad_norm": 0.09568613129719651,
28
- "learning_rate": 1.7647058823529414e-05,
29
- "loss": 0.7553,
30
  "step": 15
31
  },
32
  {
33
  "epoch": 0.11834319526627218,
34
- "grad_norm": 0.11073070319325445,
35
- "learning_rate": 1.9980782984658682e-05,
36
- "loss": 0.7481,
37
  "step": 20
38
  },
39
  {
40
  "epoch": 0.14792899408284024,
41
- "grad_norm": 0.1085253681695973,
42
- "learning_rate": 1.9863613034027224e-05,
43
- "loss": 0.742,
44
  "step": 25
45
  },
46
  {
47
  "epoch": 0.17751479289940827,
48
- "grad_norm": 0.10317427695342557,
49
- "learning_rate": 1.9641197940012136e-05,
50
- "loss": 0.7317,
51
  "step": 30
52
  },
53
  {
54
  "epoch": 0.20710059171597633,
55
- "grad_norm": 0.1095919075191608,
56
- "learning_rate": 1.9315910880512792e-05,
57
- "loss": 0.7394,
58
  "step": 35
59
  },
60
  {
61
  "epoch": 0.23668639053254437,
62
- "grad_norm": 0.10584984713775991,
63
- "learning_rate": 1.8891222681391853e-05,
64
- "loss": 0.7445,
65
  "step": 40
66
  },
67
  {
68
  "epoch": 0.26627218934911245,
69
- "grad_norm": 0.1052036771871902,
70
- "learning_rate": 1.8371664782625287e-05,
71
- "loss": 0.7382,
72
  "step": 45
73
  },
74
  {
75
  "epoch": 0.2958579881656805,
76
- "grad_norm": 0.10185995684228026,
77
- "learning_rate": 1.7762780887657576e-05,
78
- "loss": 0.7327,
79
  "step": 50
80
  },
81
  {
82
  "epoch": 0.3254437869822485,
83
- "grad_norm": 0.11605723315982408,
84
- "learning_rate": 1.7071067811865477e-05,
85
- "loss": 0.7265,
86
  "step": 55
87
  },
88
  {
89
  "epoch": 0.35502958579881655,
90
- "grad_norm": 0.09496481679546319,
91
- "learning_rate": 1.6303906161279554e-05,
92
- "loss": 0.7404,
93
  "step": 60
94
  },
95
  {
96
  "epoch": 0.38461538461538464,
97
- "grad_norm": 0.09399123086469602,
98
- "learning_rate": 1.5469481581224274e-05,
99
- "loss": 0.7183,
100
  "step": 65
101
  },
102
  {
103
  "epoch": 0.41420118343195267,
104
- "grad_norm": 0.09454325149914487,
105
- "learning_rate": 1.4576697415156818e-05,
106
- "loss": 0.7239,
107
  "step": 70
108
  },
109
  {
110
  "epoch": 0.4437869822485207,
111
- "grad_norm": 0.09665316804682329,
112
- "learning_rate": 1.3635079705638298e-05,
113
- "loss": 0.7374,
114
  "step": 75
115
  },
116
  {
117
  "epoch": 0.47337278106508873,
118
- "grad_norm": 0.0931576465014015,
119
- "learning_rate": 1.2654675551080724e-05,
120
- "loss": 0.7206,
121
  "step": 80
122
  },
123
  {
124
  "epoch": 0.5029585798816568,
125
- "grad_norm": 0.09445002041196521,
126
- "learning_rate": 1.164594590280734e-05,
127
- "loss": 0.727,
128
  "step": 85
129
  },
130
  {
131
  "epoch": 0.5325443786982249,
132
- "grad_norm": 0.08585910164532164,
133
- "learning_rate": 1.0619653946285948e-05,
134
- "loss": 0.7287,
135
  "step": 90
136
  },
137
  {
138
  "epoch": 0.5621301775147929,
139
- "grad_norm": 0.0926566404725236,
140
- "learning_rate": 9.586750257511868e-06,
141
- "loss": 0.7378,
142
  "step": 95
143
  },
144
  {
145
  "epoch": 0.591715976331361,
146
- "grad_norm": 0.08672021822123627,
147
- "learning_rate": 8.558255959926533e-06,
148
- "loss": 0.7164,
149
  "step": 100
150
  },
151
  {
152
  "epoch": 0.591715976331361,
153
- "eval_loss": 0.7641129493713379,
154
- "eval_runtime": 1.9602,
155
- "eval_samples_per_second": 65.298,
156
- "eval_steps_per_second": 2.041,
157
  "step": 100
158
  },
159
  {
160
  "epoch": 0.621301775147929,
161
- "grad_norm": 0.08622854416758327,
162
- "learning_rate": 7.545145128592009e-06,
163
- "loss": 0.7216,
164
  "step": 105
165
  },
166
  {
167
  "epoch": 0.650887573964497,
168
- "grad_norm": 0.08204894186576381,
169
- "learning_rate": 6.558227696373617e-06,
170
- "loss": 0.7346,
171
  "step": 110
172
  },
173
  {
174
  "epoch": 0.6804733727810651,
175
- "grad_norm": 0.08097299352958673,
176
- "learning_rate": 5.608034111526298e-06,
177
- "loss": 0.73,
178
  "step": 115
179
  },
180
  {
181
  "epoch": 0.7100591715976331,
182
- "grad_norm": 0.07682552342075682,
183
- "learning_rate": 4.704702977392914e-06,
184
- "loss": 0.7212,
185
  "step": 120
186
  },
187
  {
188
  "epoch": 0.7396449704142012,
189
- "grad_norm": 0.08086018117086849,
190
- "learning_rate": 3.857872873103322e-06,
191
- "loss": 0.7248,
192
  "step": 125
193
  },
194
  {
195
  "epoch": 0.7692307692307693,
196
- "grad_norm": 0.07606171025014277,
197
- "learning_rate": 3.0765795095517026e-06,
198
- "loss": 0.728,
199
  "step": 130
200
  },
201
  {
202
  "epoch": 0.7988165680473372,
203
- "grad_norm": 0.08257799296717885,
204
- "learning_rate": 2.369159318001937e-06,
205
- "loss": 0.7323,
206
  "step": 135
207
  },
208
  {
209
  "epoch": 0.8284023668639053,
210
- "grad_norm": 0.07889584661038457,
211
- "learning_rate": 1.743160500034443e-06,
212
- "loss": 0.7251,
213
  "step": 140
214
  },
215
  {
216
  "epoch": 0.8579881656804734,
217
- "grad_norm": 0.08171166394737774,
218
- "learning_rate": 1.2052624879351105e-06,
219
- "loss": 0.7324,
220
  "step": 145
221
  },
222
  {
223
  "epoch": 0.8875739644970414,
224
- "grad_norm": 0.07432343760979292,
225
- "learning_rate": 7.612046748871327e-07,
226
- "loss": 0.743,
227
  "step": 150
228
  },
229
  {
230
  "epoch": 0.9171597633136095,
231
- "grad_norm": 0.07790477411393834,
232
- "learning_rate": 4.1572517541747294e-07,
233
- "loss": 0.7386,
234
  "step": 155
235
  },
236
  {
237
  "epoch": 0.9467455621301775,
238
- "grad_norm": 0.07339780497225679,
239
- "learning_rate": 1.7251026952640583e-07,
240
- "loss": 0.7383,
241
  "step": 160
242
  },
243
  {
244
  "epoch": 0.9763313609467456,
245
- "grad_norm": 0.07176968893681754,
246
- "learning_rate": 3.4155069933301535e-08,
247
- "loss": 0.7225,
248
  "step": 165
249
  },
250
  {
251
- "epoch": 1.0,
252
- "step": 169,
253
- "total_flos": 76916824473600.0,
254
- "train_loss": 0.7337305870281874,
255
- "train_runtime": 1235.0855,
256
- "train_samples_per_second": 17.497,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  "train_steps_per_second": 0.137
258
  }
259
  ],
260
  "logging_steps": 5,
261
- "max_steps": 169,
262
  "num_input_tokens_seen": 0,
263
- "num_train_epochs": 1,
264
- "save_steps": 500,
265
  "stateful_callbacks": {
266
  "TrainerControl": {
267
  "args": {
@@ -274,7 +1028,7 @@
274
  "attributes": {}
275
  }
276
  },
277
- "total_flos": 76916824473600.0,
278
  "train_batch_size": 4,
279
  "trial_name": null,
280
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 4.0,
5
  "eval_steps": 100,
6
+ "global_step": 676,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.029585798816568046,
13
+ "grad_norm": 0.6439659821018778,
14
+ "learning_rate": 1.4705882352941177e-06,
15
+ "loss": 1.1017,
16
  "step": 5
17
  },
18
  {
19
  "epoch": 0.05917159763313609,
20
+ "grad_norm": 0.5057877785583942,
21
+ "learning_rate": 2.9411764705882355e-06,
22
+ "loss": 1.1007,
23
  "step": 10
24
  },
25
  {
26
  "epoch": 0.08875739644970414,
27
+ "grad_norm": 0.34275947536219825,
28
+ "learning_rate": 4.411764705882353e-06,
29
+ "loss": 1.0629,
30
  "step": 15
31
  },
32
  {
33
  "epoch": 0.11834319526627218,
34
+ "grad_norm": 0.33577063948976177,
35
+ "learning_rate": 5.882352941176471e-06,
36
+ "loss": 1.0051,
37
  "step": 20
38
  },
39
  {
40
  "epoch": 0.14792899408284024,
41
+ "grad_norm": 0.2949471201474794,
42
+ "learning_rate": 7.352941176470589e-06,
43
+ "loss": 0.949,
44
  "step": 25
45
  },
46
  {
47
  "epoch": 0.17751479289940827,
48
+ "grad_norm": 0.21159349877560912,
49
+ "learning_rate": 8.823529411764707e-06,
50
+ "loss": 0.9046,
51
  "step": 30
52
  },
53
  {
54
  "epoch": 0.20710059171597633,
55
+ "grad_norm": 0.15856411969263493,
56
+ "learning_rate": 1.0294117647058823e-05,
57
+ "loss": 0.888,
58
  "step": 35
59
  },
60
  {
61
  "epoch": 0.23668639053254437,
62
+ "grad_norm": 0.1353999900027582,
63
+ "learning_rate": 1.1764705882352942e-05,
64
+ "loss": 0.8801,
65
  "step": 40
66
  },
67
  {
68
  "epoch": 0.26627218934911245,
69
+ "grad_norm": 0.13487701799423293,
70
+ "learning_rate": 1.323529411764706e-05,
71
+ "loss": 0.8569,
72
  "step": 45
73
  },
74
  {
75
  "epoch": 0.2958579881656805,
76
+ "grad_norm": 0.10937715780925006,
77
+ "learning_rate": 1.4705882352941179e-05,
78
+ "loss": 0.8411,
79
  "step": 50
80
  },
81
  {
82
  "epoch": 0.3254437869822485,
83
+ "grad_norm": 0.10116789827792308,
84
+ "learning_rate": 1.6176470588235296e-05,
85
+ "loss": 0.8256,
86
  "step": 55
87
  },
88
  {
89
  "epoch": 0.35502958579881655,
90
+ "grad_norm": 0.09486346656051839,
91
+ "learning_rate": 1.7647058823529414e-05,
92
+ "loss": 0.8314,
93
  "step": 60
94
  },
95
  {
96
  "epoch": 0.38461538461538464,
97
+ "grad_norm": 0.09783173154524022,
98
+ "learning_rate": 1.911764705882353e-05,
99
+ "loss": 0.8011,
100
  "step": 65
101
  },
102
  {
103
  "epoch": 0.41420118343195267,
104
+ "grad_norm": 0.09449363160697857,
105
+ "learning_rate": 1.999946602771351e-05,
106
+ "loss": 0.7999,
107
  "step": 70
108
  },
109
  {
110
  "epoch": 0.4437869822485207,
111
+ "grad_norm": 0.09797892714489181,
112
+ "learning_rate": 1.9993459494370938e-05,
113
+ "loss": 0.8084,
114
  "step": 75
115
  },
116
  {
117
  "epoch": 0.47337278106508873,
118
+ "grad_norm": 0.09083934620130703,
119
+ "learning_rate": 1.9980782984658682e-05,
120
+ "loss": 0.7872,
121
  "step": 80
122
  },
123
  {
124
  "epoch": 0.5029585798816568,
125
+ "grad_norm": 0.09674625831588454,
126
+ "learning_rate": 1.996144495931251e-05,
127
+ "loss": 0.789,
128
  "step": 85
129
  },
130
  {
131
  "epoch": 0.5325443786982249,
132
+ "grad_norm": 0.09153750269543345,
133
+ "learning_rate": 1.9935458325191365e-05,
134
+ "loss": 0.7865,
135
  "step": 90
136
  },
137
  {
138
  "epoch": 0.5621301775147929,
139
+ "grad_norm": 0.0957908326296121,
140
+ "learning_rate": 1.9902840426662897e-05,
141
+ "loss": 0.7924,
142
  "step": 95
143
  },
144
  {
145
  "epoch": 0.591715976331361,
146
+ "grad_norm": 0.09259701132768274,
147
+ "learning_rate": 1.9863613034027224e-05,
148
+ "loss": 0.7665,
149
  "step": 100
150
  },
151
  {
152
  "epoch": 0.591715976331361,
153
+ "eval_loss": 0.8009439706802368,
154
+ "eval_runtime": 1.9876,
155
+ "eval_samples_per_second": 64.399,
156
+ "eval_steps_per_second": 2.012,
157
  "step": 100
158
  },
159
  {
160
  "epoch": 0.621301775147929,
161
+ "grad_norm": 0.09861544877125988,
162
+ "learning_rate": 1.9817802328986696e-05,
163
+ "loss": 0.7689,
164
  "step": 105
165
  },
166
  {
167
  "epoch": 0.650887573964497,
168
+ "grad_norm": 0.09818067763821876,
169
+ "learning_rate": 1.9765438887171327e-05,
170
+ "loss": 0.7788,
171
  "step": 110
172
  },
173
  {
174
  "epoch": 0.6804733727810651,
175
+ "grad_norm": 0.10112115466155701,
176
+ "learning_rate": 1.970655765773159e-05,
177
+ "loss": 0.772,
178
  "step": 115
179
  },
180
  {
181
  "epoch": 0.7100591715976331,
182
+ "grad_norm": 0.0914598701096024,
183
+ "learning_rate": 1.9641197940012136e-05,
184
+ "loss": 0.7596,
185
  "step": 120
186
  },
187
  {
188
  "epoch": 0.7396449704142012,
189
+ "grad_norm": 0.10299550351836897,
190
+ "learning_rate": 1.956940335732209e-05,
191
+ "loss": 0.7608,
192
  "step": 125
193
  },
194
  {
195
  "epoch": 0.7692307692307693,
196
+ "grad_norm": 0.09676217855032584,
197
+ "learning_rate": 1.9491221827819348e-05,
198
+ "loss": 0.7608,
199
  "step": 130
200
  },
201
  {
202
  "epoch": 0.7988165680473372,
203
+ "grad_norm": 0.10054271920883129,
204
+ "learning_rate": 1.9406705532528373e-05,
205
+ "loss": 0.7625,
206
  "step": 135
207
  },
208
  {
209
  "epoch": 0.8284023668639053,
210
+ "grad_norm": 0.09316749954420439,
211
+ "learning_rate": 1.9315910880512792e-05,
212
+ "loss": 0.7526,
213
  "step": 140
214
  },
215
  {
216
  "epoch": 0.8579881656804734,
217
+ "grad_norm": 0.09924380627210336,
218
+ "learning_rate": 1.921889847122605e-05,
219
+ "loss": 0.758,
220
  "step": 145
221
  },
222
  {
223
  "epoch": 0.8875739644970414,
224
+ "grad_norm": 0.0935394258583913,
225
+ "learning_rate": 1.911573305406528e-05,
226
+ "loss": 0.7667,
227
  "step": 150
228
  },
229
  {
230
  "epoch": 0.9171597633136095,
231
+ "grad_norm": 0.0941062409493051,
232
+ "learning_rate": 1.9006483485155338e-05,
233
+ "loss": 0.7597,
234
  "step": 155
235
  },
236
  {
237
  "epoch": 0.9467455621301775,
238
+ "grad_norm": 0.0945515154197286,
239
+ "learning_rate": 1.8891222681391853e-05,
240
+ "loss": 0.7583,
241
  "step": 160
242
  },
243
  {
244
  "epoch": 0.9763313609467456,
245
+ "grad_norm": 0.0944093533101716,
246
+ "learning_rate": 1.877002757177403e-05,
247
+ "loss": 0.7402,
248
  "step": 165
249
  },
250
  {
251
+ "epoch": 1.0059171597633136,
252
+ "grad_norm": 0.1026209727213443,
253
+ "learning_rate": 1.8642979046059595e-05,
254
+ "loss": 0.7451,
255
+ "step": 170
256
+ },
257
+ {
258
+ "epoch": 1.0355029585798816,
259
+ "grad_norm": 0.09253733372936898,
260
+ "learning_rate": 1.8510161900776186e-05,
261
+ "loss": 0.7227,
262
+ "step": 175
263
+ },
264
+ {
265
+ "epoch": 1.0650887573964498,
266
+ "grad_norm": 0.10701582057186095,
267
+ "learning_rate": 1.8371664782625287e-05,
268
+ "loss": 0.7235,
269
+ "step": 180
270
+ },
271
+ {
272
+ "epoch": 1.0946745562130178,
273
+ "grad_norm": 0.10901317779074045,
274
+ "learning_rate": 1.8227580129316368e-05,
275
+ "loss": 0.7297,
276
+ "step": 185
277
+ },
278
+ {
279
+ "epoch": 1.1242603550295858,
280
+ "grad_norm": 0.09272510989071414,
281
+ "learning_rate": 1.8078004107870797e-05,
282
+ "loss": 0.7269,
283
+ "step": 190
284
+ },
285
+ {
286
+ "epoch": 1.1538461538461537,
287
+ "grad_norm": 0.09753000927785783,
288
+ "learning_rate": 1.7923036550436706e-05,
289
+ "loss": 0.7225,
290
+ "step": 195
291
+ },
292
+ {
293
+ "epoch": 1.183431952662722,
294
+ "grad_norm": 0.10006006351926447,
295
+ "learning_rate": 1.7762780887657576e-05,
296
+ "loss": 0.7224,
297
+ "step": 200
298
+ },
299
+ {
300
+ "epoch": 1.183431952662722,
301
+ "eval_loss": 0.7682048082351685,
302
+ "eval_runtime": 1.9381,
303
+ "eval_samples_per_second": 66.045,
304
+ "eval_steps_per_second": 2.064,
305
+ "step": 200
306
+ },
307
+ {
308
+ "epoch": 1.21301775147929,
309
+ "grad_norm": 0.08514109288732025,
310
+ "learning_rate": 1.759734407963911e-05,
311
+ "loss": 0.7141,
312
+ "step": 205
313
+ },
314
+ {
315
+ "epoch": 1.242603550295858,
316
+ "grad_norm": 0.10153944071881804,
317
+ "learning_rate": 1.74268365445604e-05,
318
+ "loss": 0.7251,
319
+ "step": 210
320
+ },
321
+ {
322
+ "epoch": 1.272189349112426,
323
+ "grad_norm": 0.08516674487530249,
324
+ "learning_rate": 1.725137208497705e-05,
325
+ "loss": 0.7095,
326
+ "step": 215
327
+ },
328
+ {
329
+ "epoch": 1.301775147928994,
330
+ "grad_norm": 0.09000557413388141,
331
+ "learning_rate": 1.7071067811865477e-05,
332
+ "loss": 0.7221,
333
+ "step": 220
334
+ },
335
+ {
336
+ "epoch": 1.331360946745562,
337
+ "grad_norm": 0.10261984193674742,
338
+ "learning_rate": 1.688604406645903e-05,
339
+ "loss": 0.7248,
340
+ "step": 225
341
+ },
342
+ {
343
+ "epoch": 1.3609467455621302,
344
+ "grad_norm": 0.09063724453079985,
345
+ "learning_rate": 1.6696424339928153e-05,
346
+ "loss": 0.7189,
347
+ "step": 230
348
+ },
349
+ {
350
+ "epoch": 1.3905325443786982,
351
+ "grad_norm": 0.0876664358809029,
352
+ "learning_rate": 1.6502335190958135e-05,
353
+ "loss": 0.7115,
354
+ "step": 235
355
+ },
356
+ {
357
+ "epoch": 1.4201183431952662,
358
+ "grad_norm": 0.08926784554788682,
359
+ "learning_rate": 1.6303906161279554e-05,
360
+ "loss": 0.7052,
361
+ "step": 240
362
+ },
363
+ {
364
+ "epoch": 1.4497041420118344,
365
+ "grad_norm": 0.09216166902850643,
366
+ "learning_rate": 1.6101269689207656e-05,
367
+ "loss": 0.7304,
368
+ "step": 245
369
+ },
370
+ {
371
+ "epoch": 1.4792899408284024,
372
+ "grad_norm": 0.09489888582394233,
373
+ "learning_rate": 1.5894561021248535e-05,
374
+ "loss": 0.7094,
375
+ "step": 250
376
+ },
377
+ {
378
+ "epoch": 1.5088757396449703,
379
+ "grad_norm": 0.09815715101407019,
380
+ "learning_rate": 1.568391812183097e-05,
381
+ "loss": 0.7125,
382
+ "step": 255
383
+ },
384
+ {
385
+ "epoch": 1.5384615384615383,
386
+ "grad_norm": 0.11023233541754712,
387
+ "learning_rate": 1.5469481581224274e-05,
388
+ "loss": 0.7203,
389
+ "step": 260
390
+ },
391
+ {
392
+ "epoch": 1.5680473372781065,
393
+ "grad_norm": 0.0990016959835567,
394
+ "learning_rate": 1.5251394521703496e-05,
395
+ "loss": 0.7162,
396
+ "step": 265
397
+ },
398
+ {
399
+ "epoch": 1.5976331360946747,
400
+ "grad_norm": 0.09361562420520224,
401
+ "learning_rate": 1.5029802502024788e-05,
402
+ "loss": 0.7143,
403
+ "step": 270
404
+ },
405
+ {
406
+ "epoch": 1.6272189349112427,
407
+ "grad_norm": 0.09061363853326403,
408
+ "learning_rate": 1.4804853420274471e-05,
409
+ "loss": 0.7178,
410
+ "step": 275
411
+ },
412
+ {
413
+ "epoch": 1.6568047337278107,
414
+ "grad_norm": 0.09054748720794353,
415
+ "learning_rate": 1.4576697415156818e-05,
416
+ "loss": 0.7072,
417
+ "step": 280
418
+ },
419
+ {
420
+ "epoch": 1.6863905325443787,
421
+ "grad_norm": 0.09231829692501531,
422
+ "learning_rate": 1.434548676578634e-05,
423
+ "loss": 0.7154,
424
+ "step": 285
425
+ },
426
+ {
427
+ "epoch": 1.7159763313609466,
428
+ "grad_norm": 0.08703936573542412,
429
+ "learning_rate": 1.4111375790051511e-05,
430
+ "loss": 0.7011,
431
+ "step": 290
432
+ },
433
+ {
434
+ "epoch": 1.7455621301775148,
435
+ "grad_norm": 0.08968479884070231,
436
+ "learning_rate": 1.3874520741617734e-05,
437
+ "loss": 0.7122,
438
+ "step": 295
439
+ },
440
+ {
441
+ "epoch": 1.7751479289940828,
442
+ "grad_norm": 0.09437837766978704,
443
+ "learning_rate": 1.3635079705638298e-05,
444
+ "loss": 0.7017,
445
+ "step": 300
446
+ },
447
+ {
448
+ "epoch": 1.7751479289940828,
449
+ "eval_loss": 0.752805233001709,
450
+ "eval_runtime": 1.9496,
451
+ "eval_samples_per_second": 65.655,
452
+ "eval_steps_per_second": 2.052,
453
+ "step": 300
454
+ },
455
+ {
456
+ "epoch": 1.804733727810651,
457
+ "grad_norm": 0.08903936053110759,
458
+ "learning_rate": 1.3393212493242964e-05,
459
+ "loss": 0.725,
460
+ "step": 305
461
+ },
462
+ {
463
+ "epoch": 1.834319526627219,
464
+ "grad_norm": 0.08414153400589117,
465
+ "learning_rate": 1.3149080534874519e-05,
466
+ "loss": 0.7054,
467
+ "step": 310
468
+ },
469
+ {
470
+ "epoch": 1.863905325443787,
471
+ "grad_norm": 0.09439034181825776,
472
+ "learning_rate": 1.2902846772544625e-05,
473
+ "loss": 0.7021,
474
+ "step": 315
475
+ },
476
+ {
477
+ "epoch": 1.893491124260355,
478
+ "grad_norm": 0.08964239172806014,
479
+ "learning_rate": 1.2654675551080724e-05,
480
+ "loss": 0.7064,
481
+ "step": 320
482
+ },
483
+ {
484
+ "epoch": 1.9230769230769231,
485
+ "grad_norm": 0.09358273223772219,
486
+ "learning_rate": 1.2404732508436693e-05,
487
+ "loss": 0.6996,
488
+ "step": 325
489
+ },
490
+ {
491
+ "epoch": 1.952662721893491,
492
+ "grad_norm": 0.08800666939014279,
493
+ "learning_rate": 1.2153184465140413e-05,
494
+ "loss": 0.7137,
495
+ "step": 330
496
+ },
497
+ {
498
+ "epoch": 1.9822485207100593,
499
+ "grad_norm": 0.09777074659138213,
500
+ "learning_rate": 1.1900199312952047e-05,
501
+ "loss": 0.7061,
502
+ "step": 335
503
+ },
504
+ {
505
+ "epoch": 2.0118343195266273,
506
+ "grad_norm": 0.08420032391185793,
507
+ "learning_rate": 1.164594590280734e-05,
508
+ "loss": 0.6848,
509
+ "step": 340
510
+ },
511
+ {
512
+ "epoch": 2.0414201183431953,
513
+ "grad_norm": 0.09709124491718055,
514
+ "learning_rate": 1.1390593932120742e-05,
515
+ "loss": 0.6797,
516
+ "step": 345
517
+ },
518
+ {
519
+ "epoch": 2.0710059171597632,
520
+ "grad_norm": 0.08513494025885673,
521
+ "learning_rate": 1.1134313831523547e-05,
522
+ "loss": 0.6783,
523
+ "step": 350
524
+ },
525
+ {
526
+ "epoch": 2.100591715976331,
527
+ "grad_norm": 0.08526343643130264,
528
+ "learning_rate": 1.0877276651112662e-05,
529
+ "loss": 0.683,
530
+ "step": 355
531
+ },
532
+ {
533
+ "epoch": 2.1301775147928996,
534
+ "grad_norm": 0.08415048962920803,
535
+ "learning_rate": 1.0619653946285948e-05,
536
+ "loss": 0.6699,
537
+ "step": 360
538
+ },
539
+ {
540
+ "epoch": 2.1597633136094676,
541
+ "grad_norm": 0.08993894149758148,
542
+ "learning_rate": 1.0361617663240253e-05,
543
+ "loss": 0.6616,
544
+ "step": 365
545
+ },
546
+ {
547
+ "epoch": 2.1893491124260356,
548
+ "grad_norm": 0.08779637319471659,
549
+ "learning_rate": 1.0103340024208674e-05,
550
+ "loss": 0.6681,
551
+ "step": 370
552
+ },
553
+ {
554
+ "epoch": 2.2189349112426036,
555
+ "grad_norm": 0.07716142615588464,
556
+ "learning_rate": 9.844993412513533e-06,
557
+ "loss": 0.6847,
558
+ "step": 375
559
+ },
560
+ {
561
+ "epoch": 2.2485207100591715,
562
+ "grad_norm": 0.09680115255291638,
563
+ "learning_rate": 9.586750257511868e-06,
564
+ "loss": 0.6748,
565
+ "step": 380
566
+ },
567
+ {
568
+ "epoch": 2.2781065088757395,
569
+ "grad_norm": 0.08727553994241195,
570
+ "learning_rate": 9.328782919510186e-06,
571
+ "loss": 0.6727,
572
+ "step": 385
573
+ },
574
+ {
575
+ "epoch": 2.3076923076923075,
576
+ "grad_norm": 0.08569274603060249,
577
+ "learning_rate": 9.0712635747253e-06,
578
+ "loss": 0.6722,
579
+ "step": 390
580
+ },
581
+ {
582
+ "epoch": 2.337278106508876,
583
+ "grad_norm": 0.08838223625297882,
584
+ "learning_rate": 8.81436410036804e-06,
585
+ "loss": 0.6792,
586
+ "step": 395
587
+ },
588
+ {
589
+ "epoch": 2.366863905325444,
590
+ "grad_norm": 0.08243129604747186,
591
+ "learning_rate": 8.558255959926533e-06,
592
+ "loss": 0.6775,
593
+ "step": 400
594
+ },
595
+ {
596
+ "epoch": 2.366863905325444,
597
+ "eval_loss": 0.7467715740203857,
598
+ "eval_runtime": 1.954,
599
+ "eval_samples_per_second": 65.506,
600
+ "eval_steps_per_second": 2.047,
601
+ "step": 400
602
+ },
603
+ {
604
+ "epoch": 2.396449704142012,
605
+ "grad_norm": 0.08372814834591441,
606
+ "learning_rate": 8.30311008872561e-06,
607
+ "loss": 0.6867,
608
+ "step": 405
609
+ },
610
+ {
611
+ "epoch": 2.42603550295858,
612
+ "grad_norm": 0.08520010748643847,
613
+ "learning_rate": 8.04909677983872e-06,
614
+ "loss": 0.677,
615
+ "step": 410
616
+ },
617
+ {
618
+ "epoch": 2.455621301775148,
619
+ "grad_norm": 0.08436113404879492,
620
+ "learning_rate": 7.796385570428527e-06,
621
+ "loss": 0.6811,
622
+ "step": 415
623
+ },
624
+ {
625
+ "epoch": 2.485207100591716,
626
+ "grad_norm": 0.07770487004133524,
627
+ "learning_rate": 7.545145128592009e-06,
628
+ "loss": 0.6775,
629
+ "step": 420
630
+ },
631
+ {
632
+ "epoch": 2.5147928994082838,
633
+ "grad_norm": 0.07873507288955481,
634
+ "learning_rate": 7.295543140785604e-06,
635
+ "loss": 0.6607,
636
+ "step": 425
637
+ },
638
+ {
639
+ "epoch": 2.544378698224852,
640
+ "grad_norm": 0.08216416278696723,
641
+ "learning_rate": 7.0477461999055365e-06,
642
+ "loss": 0.6721,
643
+ "step": 430
644
+ },
645
+ {
646
+ "epoch": 2.57396449704142,
647
+ "grad_norm": 0.07808001669903655,
648
+ "learning_rate": 6.801919694098034e-06,
649
+ "loss": 0.669,
650
+ "step": 435
651
+ },
652
+ {
653
+ "epoch": 2.603550295857988,
654
+ "grad_norm": 0.07827371958864436,
655
+ "learning_rate": 6.558227696373617e-06,
656
+ "loss": 0.6737,
657
+ "step": 440
658
+ },
659
+ {
660
+ "epoch": 2.633136094674556,
661
+ "grad_norm": 0.09029552025645185,
662
+ "learning_rate": 6.316832855099173e-06,
663
+ "loss": 0.68,
664
+ "step": 445
665
+ },
666
+ {
667
+ "epoch": 2.662721893491124,
668
+ "grad_norm": 0.08627523522424184,
669
+ "learning_rate": 6.077896285440874e-06,
670
+ "loss": 0.6781,
671
+ "step": 450
672
+ },
673
+ {
674
+ "epoch": 2.6923076923076925,
675
+ "grad_norm": 0.0944458646122902,
676
+ "learning_rate": 5.841577461830408e-06,
677
+ "loss": 0.6856,
678
+ "step": 455
679
+ },
680
+ {
681
+ "epoch": 2.7218934911242605,
682
+ "grad_norm": 0.08627859291249917,
683
+ "learning_rate": 5.608034111526298e-06,
684
+ "loss": 0.6828,
685
+ "step": 460
686
+ },
687
+ {
688
+ "epoch": 2.7514792899408285,
689
+ "grad_norm": 0.07636613357565422,
690
+ "learning_rate": 5.377422109341332e-06,
691
+ "loss": 0.6631,
692
+ "step": 465
693
+ },
694
+ {
695
+ "epoch": 2.7810650887573964,
696
+ "grad_norm": 0.07763420602167485,
697
+ "learning_rate": 5.149895373606405e-06,
698
+ "loss": 0.6706,
699
+ "step": 470
700
+ },
701
+ {
702
+ "epoch": 2.8106508875739644,
703
+ "grad_norm": 0.0797684563299413,
704
+ "learning_rate": 4.92560576344013e-06,
705
+ "loss": 0.6726,
706
+ "step": 475
707
+ },
708
+ {
709
+ "epoch": 2.8402366863905324,
710
+ "grad_norm": 0.0743258532275492,
711
+ "learning_rate": 4.704702977392914e-06,
712
+ "loss": 0.6681,
713
+ "step": 480
714
+ },
715
+ {
716
+ "epoch": 2.8698224852071004,
717
+ "grad_norm": 0.08260380529165902,
718
+ "learning_rate": 4.487334453532998e-06,
719
+ "loss": 0.6732,
720
+ "step": 485
721
+ },
722
+ {
723
+ "epoch": 2.899408284023669,
724
+ "grad_norm": 0.07603345763322027,
725
+ "learning_rate": 4.2736452710412645e-06,
726
+ "loss": 0.6759,
727
+ "step": 490
728
+ },
729
+ {
730
+ "epoch": 2.9289940828402368,
731
+ "grad_norm": 0.0788588593958065,
732
+ "learning_rate": 4.063778053380446e-06,
733
+ "loss": 0.6669,
734
+ "step": 495
735
+ },
736
+ {
737
+ "epoch": 2.9585798816568047,
738
+ "grad_norm": 0.0765652404714636,
739
+ "learning_rate": 3.857872873103322e-06,
740
+ "loss": 0.6694,
741
+ "step": 500
742
+ },
743
+ {
744
+ "epoch": 2.9585798816568047,
745
+ "eval_loss": 0.7420404553413391,
746
+ "eval_runtime": 1.949,
747
+ "eval_samples_per_second": 65.673,
748
+ "eval_steps_per_second": 2.052,
749
+ "step": 500
750
+ },
751
+ {
752
+ "epoch": 2.9881656804733727,
753
+ "grad_norm": 0.07687126382252668,
754
+ "learning_rate": 3.6560671583635467e-06,
755
+ "loss": 0.6723,
756
+ "step": 505
757
+ },
758
+ {
759
+ "epoch": 3.0177514792899407,
760
+ "grad_norm": 0.07912130181111991,
761
+ "learning_rate": 3.4584956011913693e-06,
762
+ "loss": 0.6602,
763
+ "step": 510
764
+ },
765
+ {
766
+ "epoch": 3.0473372781065087,
767
+ "grad_norm": 0.07360557876355839,
768
+ "learning_rate": 3.2652900675956e-06,
769
+ "loss": 0.6566,
770
+ "step": 515
771
+ },
772
+ {
773
+ "epoch": 3.076923076923077,
774
+ "grad_norm": 0.0787952110142718,
775
+ "learning_rate": 3.0765795095517026e-06,
776
+ "loss": 0.6599,
777
+ "step": 520
778
+ },
779
+ {
780
+ "epoch": 3.106508875739645,
781
+ "grad_norm": 0.07557678307758718,
782
+ "learning_rate": 2.8924898789348645e-06,
783
+ "loss": 0.6659,
784
+ "step": 525
785
+ },
786
+ {
787
+ "epoch": 3.136094674556213,
788
+ "grad_norm": 0.07175247734719413,
789
+ "learning_rate": 2.713144043455388e-06,
790
+ "loss": 0.6646,
791
+ "step": 530
792
+ },
793
+ {
794
+ "epoch": 3.165680473372781,
795
+ "grad_norm": 0.07747698601868783,
796
+ "learning_rate": 2.538661704652595e-06,
797
+ "loss": 0.6605,
798
+ "step": 535
799
+ },
800
+ {
801
+ "epoch": 3.195266272189349,
802
+ "grad_norm": 0.0718076928664842,
803
+ "learning_rate": 2.369159318001937e-06,
804
+ "loss": 0.6543,
805
+ "step": 540
806
+ },
807
+ {
808
+ "epoch": 3.224852071005917,
809
+ "grad_norm": 0.07533978001859822,
810
+ "learning_rate": 2.2047500151886047e-06,
811
+ "loss": 0.6531,
812
+ "step": 545
813
+ },
814
+ {
815
+ "epoch": 3.2544378698224854,
816
+ "grad_norm": 0.07485842542784214,
817
+ "learning_rate": 2.045543528599607e-06,
818
+ "loss": 0.6646,
819
+ "step": 550
820
+ },
821
+ {
822
+ "epoch": 3.2840236686390534,
823
+ "grad_norm": 0.0708662804937619,
824
+ "learning_rate": 1.8916461180845968e-06,
825
+ "loss": 0.648,
826
+ "step": 555
827
+ },
828
+ {
829
+ "epoch": 3.3136094674556213,
830
+ "grad_norm": 0.0757464412979693,
831
+ "learning_rate": 1.743160500034443e-06,
832
+ "loss": 0.6395,
833
+ "step": 560
834
+ },
835
+ {
836
+ "epoch": 3.3431952662721893,
837
+ "grad_norm": 0.07734435238783051,
838
+ "learning_rate": 1.6001857788247755e-06,
839
+ "loss": 0.6495,
840
+ "step": 565
841
+ },
842
+ {
843
+ "epoch": 3.3727810650887573,
844
+ "grad_norm": 0.0725938998129201,
845
+ "learning_rate": 1.4628173806703594e-06,
846
+ "loss": 0.6552,
847
+ "step": 570
848
+ },
849
+ {
850
+ "epoch": 3.4023668639053253,
851
+ "grad_norm": 0.07170585944252475,
852
+ "learning_rate": 1.3311469899343698e-06,
853
+ "loss": 0.6556,
854
+ "step": 575
855
+ },
856
+ {
857
+ "epoch": 3.4319526627218933,
858
+ "grad_norm": 0.07035425662199751,
859
+ "learning_rate": 1.2052624879351105e-06,
860
+ "loss": 0.6514,
861
+ "step": 580
862
+ },
863
+ {
864
+ "epoch": 3.4615384615384617,
865
+ "grad_norm": 0.0716406331513175,
866
+ "learning_rate": 1.0852478942910228e-06,
867
+ "loss": 0.6556,
868
+ "step": 585
869
+ },
870
+ {
871
+ "epoch": 3.4911242603550297,
872
+ "grad_norm": 0.07161356638036934,
873
+ "learning_rate": 9.711833108431234e-07,
874
+ "loss": 0.6501,
875
+ "step": 590
876
+ },
877
+ {
878
+ "epoch": 3.5207100591715976,
879
+ "grad_norm": 0.0707175122399628,
880
+ "learning_rate": 8.631448681922994e-07,
881
+ "loss": 0.6607,
882
+ "step": 595
883
+ },
884
+ {
885
+ "epoch": 3.5502958579881656,
886
+ "grad_norm": 0.07021197999680215,
887
+ "learning_rate": 7.612046748871327e-07,
888
+ "loss": 0.6649,
889
+ "step": 600
890
+ },
891
+ {
892
+ "epoch": 3.5502958579881656,
893
+ "eval_loss": 0.7433957457542419,
894
+ "eval_runtime": 1.9404,
895
+ "eval_samples_per_second": 65.967,
896
+ "eval_steps_per_second": 2.061,
897
+ "step": 600
898
+ },
899
+ {
900
+ "epoch": 3.5798816568047336,
901
+ "grad_norm": 0.06889588969409026,
902
+ "learning_rate": 6.65430769296207e-07,
903
+ "loss": 0.6427,
904
+ "step": 605
905
+ },
906
+ {
907
+ "epoch": 3.609467455621302,
908
+ "grad_norm": 0.07042732460501529,
909
+ "learning_rate": 5.758870741969635e-07,
910
+ "loss": 0.6524,
911
+ "step": 610
912
+ },
913
+ {
914
+ "epoch": 3.63905325443787,
915
+ "grad_norm": 0.06830708719143527,
916
+ "learning_rate": 4.926333541114558e-07,
917
+ "loss": 0.6604,
918
+ "step": 615
919
+ },
920
+ {
921
+ "epoch": 3.668639053254438,
922
+ "grad_norm": 0.07062137013338904,
923
+ "learning_rate": 4.1572517541747294e-07,
924
+ "loss": 0.6541,
925
+ "step": 620
926
+ },
927
+ {
928
+ "epoch": 3.698224852071006,
929
+ "grad_norm": 0.06967910127337004,
930
+ "learning_rate": 3.4521386926163134e-07,
931
+ "loss": 0.6491,
932
+ "step": 625
933
+ },
934
+ {
935
+ "epoch": 3.727810650887574,
936
+ "grad_norm": 0.06931589660694865,
937
+ "learning_rate": 2.811464972992195e-07,
938
+ "loss": 0.6444,
939
+ "step": 630
940
+ },
941
+ {
942
+ "epoch": 3.757396449704142,
943
+ "grad_norm": 0.06857941794190696,
944
+ "learning_rate": 2.2356582028363548e-07,
945
+ "loss": 0.652,
946
+ "step": 635
947
+ },
948
+ {
949
+ "epoch": 3.78698224852071,
950
+ "grad_norm": 0.07146304965594258,
951
+ "learning_rate": 1.7251026952640583e-07,
952
+ "loss": 0.6547,
953
+ "step": 640
954
+ },
955
+ {
956
+ "epoch": 3.8165680473372783,
957
+ "grad_norm": 0.06914829105737026,
958
+ "learning_rate": 1.2801392124681233e-07,
959
+ "loss": 0.6419,
960
+ "step": 645
961
+ },
962
+ {
963
+ "epoch": 3.8461538461538463,
964
+ "grad_norm": 0.06976851389737697,
965
+ "learning_rate": 9.010647382825421e-08,
966
+ "loss": 0.6559,
967
+ "step": 650
968
+ },
969
+ {
970
+ "epoch": 3.8757396449704142,
971
+ "grad_norm": 0.06949483068766184,
972
+ "learning_rate": 5.881322799653699e-08,
973
+ "loss": 0.6559,
974
+ "step": 655
975
+ },
976
+ {
977
+ "epoch": 3.905325443786982,
978
+ "grad_norm": 0.06853851606881345,
979
+ "learning_rate": 3.4155069933301535e-08,
980
+ "loss": 0.6515,
981
+ "step": 660
982
+ },
983
+ {
984
+ "epoch": 3.93491124260355,
985
+ "grad_norm": 0.07221419070779167,
986
+ "learning_rate": 1.6148457335876112e-08,
987
+ "loss": 0.646,
988
+ "step": 665
989
+ },
990
+ {
991
+ "epoch": 3.9644970414201186,
992
+ "grad_norm": 0.06942961780673317,
993
+ "learning_rate": 4.80540843283972e-09,
994
+ "loss": 0.6653,
995
+ "step": 670
996
+ },
997
+ {
998
+ "epoch": 3.994082840236686,
999
+ "grad_norm": 0.0680541699351626,
1000
+ "learning_rate": 1.3349396265516235e-10,
1001
+ "loss": 0.6561,
1002
+ "step": 675
1003
+ },
1004
+ {
1005
+ "epoch": 4.0,
1006
+ "step": 676,
1007
+ "total_flos": 308009150447616.0,
1008
+ "train_loss": 0.7185798888728463,
1009
+ "train_runtime": 4924.3182,
1010
+ "train_samples_per_second": 17.554,
1011
  "train_steps_per_second": 0.137
1012
  }
1013
  ],
1014
  "logging_steps": 5,
1015
+ "max_steps": 676,
1016
  "num_input_tokens_seen": 0,
1017
+ "num_train_epochs": 4,
1018
+ "save_steps": 100,
1019
  "stateful_callbacks": {
1020
  "TrainerControl": {
1021
  "args": {
 
1028
  "attributes": {}
1029
  }
1030
  },
1031
+ "total_flos": 308009150447616.0,
1032
  "train_batch_size": 4,
1033
  "trial_name": null,
1034
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dbaa3b15f07865886766a303e293d83c8ac37c11d3cc809fc3bdc3ab96eb0eba
3
  size 7352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9931b66a5b4f3880efa3f24ac13f089fb67757696a9cc3435130b4c76671c84
3
  size 7352