zzhang1987 commited on
Commit
7335995
·
verified ·
1 Parent(s): a217473

Model save

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. all_results.json +4 -4
  3. train_results.json +4 -4
  4. trainer_state.json +236 -236
README.md CHANGED
@@ -27,7 +27,7 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/causalai/huggingface/runs/tjguz7bm)
31
 
32
 
33
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/causalai/huggingface/runs/1xpwmfi8)
31
 
32
 
33
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 0.17424644481669768,
4
- "train_runtime": 23830.442,
5
  "train_samples": 4377,
6
- "train_samples_per_second": 0.184,
7
- "train_steps_per_second": 0.006
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 0.0634942120351993,
4
+ "train_runtime": 123112.1165,
5
  "train_samples": 4377,
6
+ "train_samples_per_second": 0.036,
7
+ "train_steps_per_second": 0.001
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 0.17424644481669768,
4
- "train_runtime": 23830.442,
5
  "train_samples": 4377,
6
- "train_samples_per_second": 0.184,
7
- "train_steps_per_second": 0.006
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 0.0634942120351993,
4
+ "train_runtime": 123112.1165,
5
  "train_samples": 4377,
6
+ "train_samples_per_second": 0.036,
7
+ "train_steps_per_second": 0.001
8
  }
trainer_state.json CHANGED
@@ -9,384 +9,384 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "completion_length": 2.359375,
13
  "epoch": 0.0365296803652968,
14
- "grad_norm": 0.1389911025762558,
15
- "kl": 0.003872394561767578,
16
  "learning_rate": 7.1428571428571436e-06,
17
- "loss": 0.0002,
18
- "reward": 0.0015625,
19
- "reward_std": 0.004419417306780815,
20
- "rewards/accuracy_reward": 0.0015625,
21
- "rewards/format_reward": 0.0,
22
  "step": 5
23
  },
24
  {
25
- "completion_length": 35.79453125,
26
  "epoch": 0.0730593607305936,
27
- "grad_norm": 110.78824615478516,
28
- "kl": 4.882113647460938,
29
  "learning_rate": 1.4285714285714287e-05,
30
- "loss": 0.2065,
31
- "reward": 0.14296875,
32
- "reward_std": 0.18075319454073907,
33
- "rewards/accuracy_reward": 0.14296875,
34
- "rewards/format_reward": 0.0,
35
  "step": 10
36
  },
37
  {
38
- "completion_length": 15.19375,
39
  "epoch": 0.1095890410958904,
40
- "grad_norm": 80.90298461914062,
41
- "kl": 9.19599609375,
42
  "learning_rate": 1.9996684675143132e-05,
43
- "loss": 0.3634,
44
- "reward": 0.246875,
45
- "reward_std": 0.11894329264760017,
46
- "rewards/accuracy_reward": 0.24609375,
47
- "rewards/format_reward": 0.00078125,
48
  "step": 15
49
  },
50
  {
51
- "completion_length": 133.64609375,
52
  "epoch": 0.1461187214611872,
53
- "grad_norm": 6.2705769538879395,
54
- "kl": 10.17109375,
55
  "learning_rate": 1.9880878960910772e-05,
56
- "loss": 0.4318,
57
- "reward": 0.046875,
58
- "reward_std": 0.09154205303639174,
59
- "rewards/accuracy_reward": 0.0453125,
60
- "rewards/format_reward": 0.0015625,
61
  "step": 20
62
  },
63
  {
64
- "completion_length": 37.11171875,
65
  "epoch": 0.182648401826484,
66
- "grad_norm": 5.193119525909424,
67
- "kl": 1.910546875,
68
  "learning_rate": 1.960149873671602e-05,
69
- "loss": 0.088,
70
- "reward": 0.1921875,
71
- "reward_std": 0.2920613231137395,
72
- "rewards/accuracy_reward": 0.1921875,
73
- "rewards/format_reward": 0.0,
74
  "step": 25
75
  },
76
  {
77
- "completion_length": 59.3875,
78
  "epoch": 0.2191780821917808,
79
- "grad_norm": 7.289978504180908,
80
- "kl": 12.04296875,
81
  "learning_rate": 1.916316904487005e-05,
82
- "loss": 0.4808,
83
- "reward": 0.27890625,
84
- "reward_std": 0.12941365838050842,
85
- "rewards/accuracy_reward": 0.27890625,
86
- "rewards/format_reward": 0.0,
87
  "step": 30
88
  },
89
  {
90
- "completion_length": 7.16484375,
91
  "epoch": 0.2557077625570776,
92
- "grad_norm": 8.31716537475586,
93
- "kl": 3.0578125,
94
  "learning_rate": 1.8573146280763327e-05,
95
- "loss": 0.1107,
96
- "reward": 0.2359375,
97
- "reward_std": 0.14111573789268733,
98
- "rewards/accuracy_reward": 0.2359375,
99
- "rewards/format_reward": 0.0,
100
  "step": 35
101
  },
102
  {
103
- "completion_length": 16.390625,
104
  "epoch": 0.2922374429223744,
105
- "grad_norm": 5.783950328826904,
106
- "kl": 2.4962890625,
107
  "learning_rate": 1.7841198065767107e-05,
108
- "loss": 0.1111,
109
- "reward": 0.09921875,
110
- "reward_std": 0.10410562343895435,
111
- "rewards/accuracy_reward": 0.09921875,
112
- "rewards/format_reward": 0.0,
113
  "step": 40
114
  },
115
  {
116
- "completion_length": 8.3453125,
117
  "epoch": 0.3287671232876712,
118
- "grad_norm": 3.301870107650757,
119
- "kl": 2.53984375,
120
  "learning_rate": 1.6979441547663434e-05,
121
- "loss": 0.112,
122
- "reward": 0.24921875,
123
- "reward_std": 0.08787706252187491,
124
- "rewards/accuracy_reward": 0.24921875,
125
- "rewards/format_reward": 0.0,
126
  "step": 45
127
  },
128
  {
129
- "completion_length": 7.115625,
130
  "epoch": 0.365296803652968,
131
- "grad_norm": 23.468732833862305,
132
- "kl": 1.8494140625,
133
  "learning_rate": 1.6002142805483686e-05,
134
- "loss": 0.0745,
135
- "reward": 0.30078125,
136
- "reward_std": 0.16333960443735124,
137
- "rewards/accuracy_reward": 0.30078125,
138
- "rewards/format_reward": 0.0,
139
  "step": 50
140
  },
141
  {
142
- "completion_length": 8.0453125,
143
  "epoch": 0.4018264840182648,
144
- "grad_norm": 753.44921875,
145
- "kl": 19.584765625,
146
  "learning_rate": 1.4925480679538646e-05,
147
- "loss": 0.7832,
148
- "reward": 0.37109375,
149
- "reward_std": 0.1843588210642338,
150
- "rewards/accuracy_reward": 0.37109375,
151
- "rewards/format_reward": 0.0,
152
  "step": 55
153
  },
154
  {
155
- "completion_length": 7.13046875,
156
  "epoch": 0.4383561643835616,
157
- "grad_norm": 6.451023578643799,
158
- "kl": 2.43876953125,
159
  "learning_rate": 1.3767278936351853e-05,
160
- "loss": 0.1025,
161
- "reward": 0.35,
162
- "reward_std": 0.18619631081819535,
163
- "rewards/accuracy_reward": 0.35,
164
- "rewards/format_reward": 0.0,
165
  "step": 60
166
  },
167
  {
168
- "completion_length": 6.37109375,
169
  "epoch": 0.4748858447488584,
170
- "grad_norm": 323.1124267578125,
171
- "kl": 7.830859375,
172
  "learning_rate": 1.2546711202412287e-05,
173
- "loss": 0.3246,
174
- "reward": 0.16640625,
175
- "reward_std": 0.13669583052396775,
176
- "rewards/accuracy_reward": 0.16640625,
177
- "rewards/format_reward": 0.0,
178
  "step": 65
179
  },
180
  {
181
- "completion_length": 9.84453125,
182
  "epoch": 0.5114155251141552,
183
- "grad_norm": 26.08424186706543,
184
- "kl": 3.50234375,
185
  "learning_rate": 1.1283983551465512e-05,
186
- "loss": 0.1512,
187
- "reward": 0.1640625,
188
- "reward_std": 0.1588148871436715,
189
- "rewards/accuracy_reward": 0.1640625,
190
- "rewards/format_reward": 0.0,
191
  "step": 70
192
  },
193
  {
194
- "completion_length": 9.3984375,
195
  "epoch": 0.547945205479452,
196
- "grad_norm": 9.223825454711914,
197
- "kl": 2.7298828125,
198
  "learning_rate": 1e-05,
199
- "loss": 0.1352,
200
- "reward": 0.3015625,
201
- "reward_std": 0.13396240305155516,
202
- "rewards/accuracy_reward": 0.3015625,
203
- "rewards/format_reward": 0.0,
204
  "step": 75
205
  },
206
  {
207
- "completion_length": 7.01796875,
208
  "epoch": 0.5844748858447488,
209
- "grad_norm": 2.7007009983062744,
210
- "kl": 2.04384765625,
211
  "learning_rate": 8.71601644853449e-06,
212
- "loss": 0.07,
213
- "reward": 0.35390625,
214
- "reward_std": 0.14382591843605042,
215
- "rewards/accuracy_reward": 0.35390625,
216
- "rewards/format_reward": 0.0,
217
  "step": 80
218
  },
219
  {
220
- "completion_length": 6.99140625,
221
  "epoch": 0.6210045662100456,
222
- "grad_norm": 2.9610671997070312,
223
- "kl": 1.62041015625,
224
  "learning_rate": 7.453288797587714e-06,
225
- "loss": 0.0512,
226
- "reward": 0.4578125,
227
- "reward_std": 0.13595965169370175,
228
- "rewards/accuracy_reward": 0.4578125,
229
- "rewards/format_reward": 0.0,
230
  "step": 85
231
  },
232
  {
233
- "completion_length": 7.00859375,
234
  "epoch": 0.6575342465753424,
235
- "grad_norm": 89.31245422363281,
236
- "kl": 4.942578125,
237
  "learning_rate": 6.232721063648148e-06,
238
- "loss": 0.2069,
239
- "reward": 0.33359375,
240
- "reward_std": 0.057549753412604335,
241
- "rewards/accuracy_reward": 0.33359375,
242
- "rewards/format_reward": 0.0,
243
  "step": 90
244
  },
245
  {
246
- "completion_length": 7.11484375,
247
  "epoch": 0.6940639269406392,
248
- "grad_norm": 9.96738052368164,
249
- "kl": 1.862890625,
250
  "learning_rate": 5.074519320461358e-06,
251
- "loss": 0.0776,
252
- "reward": 0.34140625,
253
- "reward_std": 0.12425038442015648,
254
- "rewards/accuracy_reward": 0.34140625,
255
- "rewards/format_reward": 0.0,
256
  "step": 95
257
  },
258
  {
259
- "completion_length": 7.18671875,
260
  "epoch": 0.730593607305936,
261
- "grad_norm": 2.228224992752075,
262
- "kl": 1.60244140625,
263
  "learning_rate": 3.997857194516319e-06,
264
- "loss": 0.0645,
265
- "reward": 0.3796875,
266
- "reward_std": 0.10868480261415243,
267
- "rewards/accuracy_reward": 0.3796875,
268
- "rewards/format_reward": 0.0,
269
  "step": 100
270
  },
271
  {
272
  "epoch": 0.730593607305936,
273
- "eval_completion_length": 7.259560502283105,
274
- "eval_kl": 2.0344320776255707,
275
- "eval_loss": 0.0820033997297287,
276
- "eval_reward": 0.40156963470319634,
277
- "eval_reward_std": 0.1660658570729434,
278
- "eval_rewards/accuracy_reward": 0.40156963470319634,
279
- "eval_rewards/format_reward": 0.0,
280
- "eval_runtime": 1129.5631,
281
- "eval_samples_per_second": 3.875,
282
- "eval_steps_per_second": 0.969,
283
  "step": 100
284
  },
285
  {
286
- "completion_length": 7.7234375,
287
  "epoch": 0.7671232876712328,
288
- "grad_norm": 28.94325828552246,
289
- "kl": 2.4732421875,
290
  "learning_rate": 3.0205584523365626e-06,
291
- "loss": 0.0945,
292
- "reward": 0.36171875,
293
- "reward_std": 0.13725076355040072,
294
- "rewards/accuracy_reward": 0.36171875,
295
- "rewards/format_reward": 0.0,
296
  "step": 105
297
  },
298
  {
299
- "completion_length": 7.7703125,
300
  "epoch": 0.8036529680365296,
301
- "grad_norm": 4.636743068695068,
302
- "kl": 2.106640625,
303
  "learning_rate": 2.158801934232897e-06,
304
- "loss": 0.1132,
305
- "reward": 0.32734375,
306
- "reward_std": 0.14027298595756293,
307
- "rewards/accuracy_reward": 0.32734375,
308
- "rewards/format_reward": 0.0,
309
  "step": 110
310
  },
311
  {
312
- "completion_length": 7.78359375,
313
  "epoch": 0.8401826484018264,
314
- "grad_norm": 22.67841911315918,
315
- "kl": 2.292578125,
316
  "learning_rate": 1.426853719236676e-06,
317
- "loss": 0.0942,
318
- "reward": 0.3296875,
319
- "reward_std": 0.14898151364177464,
320
- "rewards/accuracy_reward": 0.3296875,
321
- "rewards/format_reward": 0.0,
322
  "step": 115
323
  },
324
  {
325
- "completion_length": 7.56015625,
326
  "epoch": 0.8767123287671232,
327
- "grad_norm": 2.22091007232666,
328
- "kl": 1.76455078125,
329
  "learning_rate": 8.368309551299536e-07,
330
- "loss": 0.0637,
331
- "reward": 0.396875,
332
- "reward_std": 0.11804735492914915,
333
- "rewards/accuracy_reward": 0.396875,
334
- "rewards/format_reward": 0.0,
335
  "step": 120
336
  },
337
  {
338
- "completion_length": 7.64296875,
339
  "epoch": 0.91324200913242,
340
- "grad_norm": 574.54296875,
341
- "kl": 6.1111328125,
342
  "learning_rate": 3.985012632839824e-07,
343
- "loss": 0.2768,
344
- "reward": 0.38984375,
345
- "reward_std": 0.15423668939620255,
346
- "rewards/accuracy_reward": 0.38984375,
347
- "rewards/format_reward": 0.0,
348
  "step": 125
349
  },
350
  {
351
- "completion_length": 7.64296875,
352
  "epoch": 0.9497716894977168,
353
- "grad_norm": 2.4676826000213623,
354
- "kl": 1.837109375,
355
  "learning_rate": 1.1912103908922945e-07,
356
- "loss": 0.0508,
357
- "reward": 0.403125,
358
- "reward_std": 0.12604196835309267,
359
- "rewards/accuracy_reward": 0.403125,
360
- "rewards/format_reward": 0.0,
361
  "step": 130
362
  },
363
  {
364
- "completion_length": 7.83671875,
365
  "epoch": 0.9863013698630136,
366
- "grad_norm": 3.2783923149108887,
367
- "kl": 1.899609375,
368
  "learning_rate": 3.315324856869584e-09,
369
- "loss": 0.0806,
370
- "reward": 0.33828125,
371
- "reward_std": 0.13399234507232904,
372
- "rewards/accuracy_reward": 0.33828125,
373
- "rewards/format_reward": 0.0,
374
  "step": 135
375
  },
376
  {
377
- "completion_length": 7.59375,
378
  "epoch": 0.993607305936073,
379
- "kl": 5.6884765625,
380
- "reward": 0.31640625,
381
- "reward_std": 0.11390282679349184,
382
- "rewards/accuracy_reward": 0.31640625,
383
- "rewards/format_reward": 0.0,
384
  "step": 136,
385
  "total_flos": 0.0,
386
- "train_loss": 0.17424644481669768,
387
- "train_runtime": 23830.442,
388
- "train_samples_per_second": 0.184,
389
- "train_steps_per_second": 0.006
390
  }
391
  ],
392
  "logging_steps": 5,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "completion_length": 15.59375,
13
  "epoch": 0.0365296803652968,
14
+ "grad_norm": 10.368077278137207,
15
+ "kl": 0.15047144889831543,
16
  "learning_rate": 7.1428571428571436e-06,
17
+ "loss": 0.0295,
18
+ "reward": 0.13515625,
19
+ "reward_std": 0.19950873199850322,
20
+ "rewards/accuracy_reward": 0.13203125,
21
+ "rewards/format_reward": 0.003125,
22
  "step": 5
23
  },
24
  {
25
+ "completion_length": 74.09296875,
26
  "epoch": 0.0730593607305936,
27
+ "grad_norm": 6.168639183044434,
28
+ "kl": 1.47294921875,
29
  "learning_rate": 1.4285714285714287e-05,
30
+ "loss": 0.1038,
31
+ "reward": 0.51015625,
32
+ "reward_std": 0.4266123466193676,
33
+ "rewards/accuracy_reward": 0.23203125,
34
+ "rewards/format_reward": 0.278125,
35
  "step": 10
36
  },
37
  {
38
+ "completion_length": 109.55703125,
39
  "epoch": 0.1095890410958904,
40
+ "grad_norm": 10.966035842895508,
41
+ "kl": 0.523876953125,
42
  "learning_rate": 1.9996684675143132e-05,
43
+ "loss": 0.027,
44
+ "reward": 1.046875,
45
+ "reward_std": 0.46739846989512446,
46
+ "rewards/accuracy_reward": 0.215625,
47
+ "rewards/format_reward": 0.83125,
48
  "step": 15
49
  },
50
  {
51
+ "completion_length": 125.8359375,
52
  "epoch": 0.1461187214611872,
53
+ "grad_norm": 5.292937278747559,
54
+ "kl": 0.46650390625,
55
  "learning_rate": 1.9880878960910772e-05,
56
+ "loss": 0.0322,
57
+ "reward": 1.22734375,
58
+ "reward_std": 0.3152993652969599,
59
+ "rewards/accuracy_reward": 0.2421875,
60
+ "rewards/format_reward": 0.98515625,
61
  "step": 20
62
  },
63
  {
64
+ "completion_length": 271.60078125,
65
  "epoch": 0.182648401826484,
66
+ "grad_norm": 2.0227677822113037,
67
+ "kl": 0.32373046875,
68
  "learning_rate": 1.960149873671602e-05,
69
+ "loss": 0.005,
70
+ "reward": 1.23359375,
71
+ "reward_std": 0.41616563573479654,
72
+ "rewards/accuracy_reward": 0.30078125,
73
+ "rewards/format_reward": 0.9328125,
74
  "step": 25
75
  },
76
  {
77
+ "completion_length": 134.89609375,
78
  "epoch": 0.2191780821917808,
79
+ "grad_norm": 2.7977349758148193,
80
+ "kl": 0.432275390625,
81
  "learning_rate": 1.916316904487005e-05,
82
+ "loss": 0.0631,
83
+ "reward": 1.353125,
84
+ "reward_std": 0.26668183971196413,
85
+ "rewards/accuracy_reward": 0.3578125,
86
+ "rewards/format_reward": 0.9953125,
87
  "step": 30
88
  },
89
  {
90
+ "completion_length": 93.9765625,
91
  "epoch": 0.2557077625570776,
92
+ "grad_norm": 3.0016226768493652,
93
+ "kl": 0.50341796875,
94
  "learning_rate": 1.8573146280763327e-05,
95
+ "loss": 0.0361,
96
+ "reward": 1.3046875,
97
+ "reward_std": 0.26828942373394965,
98
+ "rewards/accuracy_reward": 0.31640625,
99
+ "rewards/format_reward": 0.98828125,
100
  "step": 35
101
  },
102
  {
103
+ "completion_length": 117.1453125,
104
  "epoch": 0.2922374429223744,
105
+ "grad_norm": 2.1085522174835205,
106
+ "kl": 0.496875,
107
  "learning_rate": 1.7841198065767107e-05,
108
+ "loss": -0.0058,
109
+ "reward": 1.340625,
110
+ "reward_std": 0.29750518389046193,
111
+ "rewards/accuracy_reward": 0.353125,
112
+ "rewards/format_reward": 0.9875,
113
  "step": 40
114
  },
115
  {
116
+ "completion_length": 77.484375,
117
  "epoch": 0.3287671232876712,
118
+ "grad_norm": 1.8194400072097778,
119
+ "kl": 0.4939453125,
120
  "learning_rate": 1.6979441547663434e-05,
121
+ "loss": -0.02,
122
+ "reward": 1.3625,
123
+ "reward_std": 0.2868320092558861,
124
+ "rewards/accuracy_reward": 0.3734375,
125
+ "rewards/format_reward": 0.9890625,
126
  "step": 45
127
  },
128
  {
129
+ "completion_length": 57.68671875,
130
  "epoch": 0.365296803652968,
131
+ "grad_norm": 1.7973569631576538,
132
+ "kl": 0.652001953125,
133
  "learning_rate": 1.6002142805483686e-05,
134
+ "loss": 0.0459,
135
+ "reward": 1.3265625,
136
+ "reward_std": 0.2394094867631793,
137
+ "rewards/accuracy_reward": 0.33671875,
138
+ "rewards/format_reward": 0.98984375,
139
  "step": 50
140
  },
141
  {
142
+ "completion_length": 64.959375,
143
  "epoch": 0.4018264840182648,
144
+ "grad_norm": 2.802725076675415,
145
+ "kl": 0.602783203125,
146
  "learning_rate": 1.4925480679538646e-05,
147
+ "loss": 0.0103,
148
+ "reward": 1.3625,
149
+ "reward_std": 0.1871818160638213,
150
+ "rewards/accuracy_reward": 0.37734375,
151
+ "rewards/format_reward": 0.98515625,
152
  "step": 55
153
  },
154
  {
155
+ "completion_length": 94.5359375,
156
  "epoch": 0.4383561643835616,
157
+ "grad_norm": 2.600384473800659,
158
+ "kl": 0.4912109375,
159
  "learning_rate": 1.3767278936351853e-05,
160
+ "loss": 0.0076,
161
+ "reward": 1.33203125,
162
+ "reward_std": 0.1461961718276143,
163
+ "rewards/accuracy_reward": 0.33828125,
164
+ "rewards/format_reward": 0.99375,
165
  "step": 60
166
  },
167
  {
168
+ "completion_length": 60.94140625,
169
  "epoch": 0.4748858447488584,
170
+ "grad_norm": 2.3066651821136475,
171
+ "kl": 0.913916015625,
172
  "learning_rate": 1.2546711202412287e-05,
173
+ "loss": 0.0148,
174
+ "reward": 1.25546875,
175
+ "reward_std": 0.26173087432980535,
176
+ "rewards/accuracy_reward": 0.3359375,
177
+ "rewards/format_reward": 0.91953125,
178
  "step": 65
179
  },
180
  {
181
+ "completion_length": 20.51796875,
182
  "epoch": 0.5114155251141552,
183
+ "grad_norm": 13.061442375183105,
184
+ "kl": 6.19736328125,
185
  "learning_rate": 1.1283983551465512e-05,
186
+ "loss": 0.2759,
187
+ "reward": 1.31796875,
188
+ "reward_std": 0.2789971936494112,
189
+ "rewards/accuracy_reward": 0.39375,
190
+ "rewards/format_reward": 0.92421875,
191
  "step": 70
192
  },
193
  {
194
+ "completion_length": 43.41796875,
195
  "epoch": 0.547945205479452,
196
+ "grad_norm": 441.85443115234375,
197
+ "kl": 2.45595703125,
198
  "learning_rate": 1e-05,
199
+ "loss": 0.1115,
200
+ "reward": 1.0578125,
201
+ "reward_std": 0.48872011750936506,
202
+ "rewards/accuracy_reward": 0.278125,
203
+ "rewards/format_reward": 0.7796875,
204
  "step": 75
205
  },
206
  {
207
+ "completion_length": 46.90703125,
208
  "epoch": 0.5844748858447488,
209
+ "grad_norm": 1.9799107313156128,
210
+ "kl": 1.23125,
211
  "learning_rate": 8.71601644853449e-06,
212
+ "loss": 0.0292,
213
+ "reward": 0.984375,
214
+ "reward_std": 0.4891943013295531,
215
+ "rewards/accuracy_reward": 0.25703125,
216
+ "rewards/format_reward": 0.72734375,
217
  "step": 80
218
  },
219
  {
220
+ "completion_length": 13.75546875,
221
  "epoch": 0.6210045662100456,
222
+ "grad_norm": 4.793896198272705,
223
+ "kl": 1.34541015625,
224
  "learning_rate": 7.453288797587714e-06,
225
+ "loss": 0.0482,
226
+ "reward": 1.31484375,
227
+ "reward_std": 0.14597927127033472,
228
+ "rewards/accuracy_reward": 0.33671875,
229
+ "rewards/format_reward": 0.978125,
230
  "step": 85
231
  },
232
  {
233
+ "completion_length": 13.13359375,
234
  "epoch": 0.6575342465753424,
235
+ "grad_norm": 12.646719932556152,
236
+ "kl": 1.4255859375,
237
  "learning_rate": 6.232721063648148e-06,
238
+ "loss": 0.0557,
239
+ "reward": 1.34296875,
240
+ "reward_std": 0.10623529590666295,
241
+ "rewards/accuracy_reward": 0.3515625,
242
+ "rewards/format_reward": 0.99140625,
243
  "step": 90
244
  },
245
  {
246
+ "completion_length": 15.2046875,
247
  "epoch": 0.6940639269406392,
248
+ "grad_norm": 2.1448068618774414,
249
+ "kl": 1.299609375,
250
  "learning_rate": 5.074519320461358e-06,
251
+ "loss": 0.0404,
252
+ "reward": 1.3171875,
253
+ "reward_std": 0.14243474025279285,
254
+ "rewards/accuracy_reward": 0.33125,
255
+ "rewards/format_reward": 0.9859375,
256
  "step": 95
257
  },
258
  {
259
+ "completion_length": 22.12265625,
260
  "epoch": 0.730593607305936,
261
+ "grad_norm": 13.79468822479248,
262
+ "kl": 1.4185546875,
263
  "learning_rate": 3.997857194516319e-06,
264
+ "loss": 0.0562,
265
+ "reward": 1.3484375,
266
+ "reward_std": 0.19392489716410638,
267
+ "rewards/accuracy_reward": 0.3953125,
268
+ "rewards/format_reward": 0.953125,
269
  "step": 100
270
  },
271
  {
272
  "epoch": 0.730593607305936,
273
+ "eval_completion_length": 61.36258561643836,
274
+ "eval_kl": 2.0426084474885844,
275
+ "eval_loss": 0.08902593702077866,
276
+ "eval_reward": 1.2051084474885845,
277
+ "eval_reward_std": 0.4570505227672455,
278
+ "eval_rewards/accuracy_reward": 0.39023972602739726,
279
+ "eval_rewards/format_reward": 0.8148687214611872,
280
+ "eval_runtime": 25621.0164,
281
+ "eval_samples_per_second": 0.171,
282
+ "eval_steps_per_second": 0.043,
283
  "step": 100
284
  },
285
  {
286
+ "completion_length": 50.4015625,
287
  "epoch": 0.7671232876712328,
288
+ "grad_norm": 3.6549317836761475,
289
+ "kl": 1.721875,
290
  "learning_rate": 3.0205584523365626e-06,
291
+ "loss": 0.035,
292
+ "reward": 1.27421875,
293
+ "reward_std": 0.3447163349017501,
294
+ "rewards/accuracy_reward": 0.38671875,
295
+ "rewards/format_reward": 0.8875,
296
  "step": 105
297
  },
298
  {
299
+ "completion_length": 43.98359375,
300
  "epoch": 0.8036529680365296,
301
+ "grad_norm": 2.842573404312134,
302
+ "kl": 1.8482421875,
303
  "learning_rate": 2.158801934232897e-06,
304
+ "loss": 0.0649,
305
+ "reward": 1.240625,
306
+ "reward_std": 0.2724386781454086,
307
+ "rewards/accuracy_reward": 0.321875,
308
+ "rewards/format_reward": 0.91875,
309
  "step": 110
310
  },
311
  {
312
+ "completion_length": 50.07265625,
313
  "epoch": 0.8401826484018264,
314
+ "grad_norm": 5.093225479125977,
315
+ "kl": 2.44453125,
316
  "learning_rate": 1.426853719236676e-06,
317
+ "loss": 0.1351,
318
+ "reward": 1.1875,
319
+ "reward_std": 0.3050973150879145,
320
+ "rewards/accuracy_reward": 0.29296875,
321
+ "rewards/format_reward": 0.89453125,
322
  "step": 115
323
  },
324
  {
325
+ "completion_length": 66.58828125,
326
  "epoch": 0.8767123287671232,
327
+ "grad_norm": 3.9608869552612305,
328
+ "kl": 2.5376953125,
329
  "learning_rate": 8.368309551299536e-07,
330
+ "loss": 0.1227,
331
+ "reward": 1.29765625,
332
+ "reward_std": 0.35280441734939816,
333
+ "rewards/accuracy_reward": 0.40390625,
334
+ "rewards/format_reward": 0.89375,
335
  "step": 120
336
  },
337
  {
338
+ "completion_length": 86.17265625,
339
  "epoch": 0.91324200913242,
340
+ "grad_norm": 4.511857509613037,
341
+ "kl": 2.7826171875,
342
  "learning_rate": 3.985012632839824e-07,
343
+ "loss": 0.1011,
344
+ "reward": 1.24921875,
345
+ "reward_std": 0.31032323855906724,
346
+ "rewards/accuracy_reward": 0.33828125,
347
+ "rewards/format_reward": 0.9109375,
348
  "step": 125
349
  },
350
  {
351
+ "completion_length": 81.040625,
352
  "epoch": 0.9497716894977168,
353
+ "grad_norm": 3.5829803943634033,
354
+ "kl": 3.201953125,
355
  "learning_rate": 1.1912103908922945e-07,
356
+ "loss": 0.1484,
357
+ "reward": 1.275,
358
+ "reward_std": 0.31014324594289067,
359
+ "rewards/accuracy_reward": 0.35859375,
360
+ "rewards/format_reward": 0.91640625,
361
  "step": 130
362
  },
363
  {
364
+ "completion_length": 89.2578125,
365
  "epoch": 0.9863013698630136,
366
+ "grad_norm": 6.557438373565674,
367
+ "kl": 3.0515625,
368
  "learning_rate": 3.315324856869584e-09,
369
+ "loss": 0.1109,
370
+ "reward": 1.20390625,
371
+ "reward_std": 0.32457808069884775,
372
+ "rewards/accuracy_reward": 0.3,
373
+ "rewards/format_reward": 0.90390625,
374
  "step": 135
375
  },
376
  {
377
+ "completion_length": 95.453125,
378
  "epoch": 0.993607305936073,
379
+ "kl": 3.2041015625,
380
+ "reward": 1.2265625,
381
+ "reward_std": 0.43310215324163437,
382
+ "rewards/accuracy_reward": 0.3359375,
383
+ "rewards/format_reward": 0.890625,
384
  "step": 136,
385
  "total_flos": 0.0,
386
+ "train_loss": 0.0634942120351993,
387
+ "train_runtime": 123112.1165,
388
+ "train_samples_per_second": 0.036,
389
+ "train_steps_per_second": 0.001
390
  }
391
  ],
392
  "logging_steps": 5,