File size: 15,518 Bytes
4267dba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.3161794977356937,
  "eval_steps": 500,
  "global_step": 24,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.013174145738987238,
      "grad_norm": 0.5010076761245728,
      "learning_rate": 6.25e-08,
      "logits/chosen": 9.988622665405273,
      "logits/rejected": 10.698101997375488,
      "logps/chosen": -102.88545989990234,
      "logps/ref_chosen": -102.88545989990234,
      "logps/ref_rejected": -121.84871673583984,
      "logps/rejected": -121.84871673583984,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.026348291477974475,
      "grad_norm": 0.7802621126174927,
      "learning_rate": 1.25e-07,
      "logits/chosen": 10.208279609680176,
      "logits/rejected": 11.06594467163086,
      "logps/chosen": -107.70349884033203,
      "logps/ref_chosen": -107.70349884033203,
      "logps/ref_rejected": -121.89966583251953,
      "logps/rejected": -121.89966583251953,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 2
    },
    {
      "epoch": 0.03952243721696171,
      "grad_norm": 0.5995805859565735,
      "learning_rate": 1.875e-07,
      "logits/chosen": 10.029329299926758,
      "logits/rejected": 11.023927688598633,
      "logps/chosen": -107.6470947265625,
      "logps/ref_chosen": -107.98188781738281,
      "logps/ref_rejected": -124.51527404785156,
      "logps/rejected": -124.29098510742188,
      "loss": 0.6926,
      "rewards/accuracies": 0.5,
      "rewards/chosen": 0.0033478327095508575,
      "rewards/margins": 0.001104944385588169,
      "rewards/rejected": 0.0022428883239626884,
      "step": 3
    },
    {
      "epoch": 0.05269658295594895,
      "grad_norm": 0.5383147597312927,
      "learning_rate": 2.5e-07,
      "logits/chosen": 9.823471069335938,
      "logits/rejected": 10.842323303222656,
      "logps/chosen": -109.73627471923828,
      "logps/ref_chosen": -109.20836639404297,
      "logps/ref_rejected": -119.23908996582031,
      "logps/rejected": -119.65444946289062,
      "loss": 0.6937,
      "rewards/accuracies": 0.5078125,
      "rewards/chosen": -0.005279023200273514,
      "rewards/margins": -0.001125341048464179,
      "rewards/rejected": -0.004153682850301266,
      "step": 4
    },
    {
      "epoch": 0.06587072869493618,
      "grad_norm": 0.5302512645721436,
      "learning_rate": 3.1249999999999997e-07,
      "logits/chosen": 10.209351539611816,
      "logits/rejected": 10.967523574829102,
      "logps/chosen": -103.73981475830078,
      "logps/ref_chosen": -103.87680053710938,
      "logps/ref_rejected": -118.41618347167969,
      "logps/rejected": -118.11978149414062,
      "loss": 0.694,
      "rewards/accuracies": 0.4453125,
      "rewards/chosen": 0.0013697518734261394,
      "rewards/margins": -0.001594369299709797,
      "rewards/rejected": 0.002964121289551258,
      "step": 5
    },
    {
      "epoch": 0.07904487443392343,
      "grad_norm": 0.6919645667076111,
      "learning_rate": 3.75e-07,
      "logits/chosen": 10.676691055297852,
      "logits/rejected": 11.460196495056152,
      "logps/chosen": -108.08129119873047,
      "logps/ref_chosen": -107.58968353271484,
      "logps/ref_rejected": -122.07303619384766,
      "logps/rejected": -122.37925720214844,
      "loss": 0.6941,
      "rewards/accuracies": 0.421875,
      "rewards/chosen": -0.004916056990623474,
      "rewards/margins": -0.0018538986332714558,
      "rewards/rejected": -0.0030621583573520184,
      "step": 6
    },
    {
      "epoch": 0.09221902017291066,
      "grad_norm": 0.40329915285110474,
      "learning_rate": 4.375e-07,
      "logits/chosen": 10.017489433288574,
      "logits/rejected": 10.722752571105957,
      "logps/chosen": -107.77075958251953,
      "logps/ref_chosen": -107.42727661132812,
      "logps/ref_rejected": -116.87063598632812,
      "logps/rejected": -116.98759460449219,
      "loss": 0.6943,
      "rewards/accuracies": 0.4140625,
      "rewards/chosen": -0.0034348624758422375,
      "rewards/margins": -0.0022651171311736107,
      "rewards/rejected": -0.0011697453446686268,
      "step": 7
    },
    {
      "epoch": 0.1053931659118979,
      "grad_norm": 0.4481956362724304,
      "learning_rate": 5e-07,
      "logits/chosen": 10.191514015197754,
      "logits/rejected": 11.094213485717773,
      "logps/chosen": -106.06684112548828,
      "logps/ref_chosen": -105.60282135009766,
      "logps/ref_rejected": -119.53916931152344,
      "logps/rejected": -119.9333267211914,
      "loss": 0.6935,
      "rewards/accuracies": 0.4765625,
      "rewards/chosen": -0.004640196915715933,
      "rewards/margins": -0.0006986188236624002,
      "rewards/rejected": -0.003941578324884176,
      "step": 8
    },
    {
      "epoch": 0.11856731165088513,
      "grad_norm": 0.5002302527427673,
      "learning_rate": 4.997252228714278e-07,
      "logits/chosen": 10.164933204650879,
      "logits/rejected": 11.139327049255371,
      "logps/chosen": -106.06144714355469,
      "logps/ref_chosen": -105.46086120605469,
      "logps/ref_rejected": -119.00373840332031,
      "logps/rejected": -119.59027862548828,
      "loss": 0.6932,
      "rewards/accuracies": 0.4921875,
      "rewards/chosen": -0.006005657836794853,
      "rewards/margins": -0.00014029807061888278,
      "rewards/rejected": -0.005865359678864479,
      "step": 9
    },
    {
      "epoch": 0.13174145738987236,
      "grad_norm": 0.6467388868331909,
      "learning_rate": 4.989014955054745e-07,
      "logits/chosen": 9.98875904083252,
      "logits/rejected": 10.815544128417969,
      "logps/chosen": -105.14952850341797,
      "logps/ref_chosen": -104.21009826660156,
      "logps/ref_rejected": -118.9209213256836,
      "logps/rejected": -119.72019958496094,
      "loss": 0.6939,
      "rewards/accuracies": 0.515625,
      "rewards/chosen": -0.00939436536282301,
      "rewards/margins": -0.0014015533961355686,
      "rewards/rejected": -0.007992811501026154,
      "step": 10
    },
    {
      "epoch": 0.14491560312885962,
      "grad_norm": 0.8090001344680786,
      "learning_rate": 4.975306286336627e-07,
      "logits/chosen": 9.946345329284668,
      "logits/rejected": 11.13135814666748,
      "logps/chosen": -107.09854125976562,
      "logps/ref_chosen": -105.94319152832031,
      "logps/ref_rejected": -122.76007843017578,
      "logps/rejected": -123.9129409790039,
      "loss": 0.6932,
      "rewards/accuracies": 0.46875,
      "rewards/chosen": -0.01155336108058691,
      "rewards/margins": -2.4825334548950195e-05,
      "rewards/rejected": -0.011528536677360535,
      "step": 11
    },
    {
      "epoch": 0.15808974886784685,
      "grad_norm": 0.49643445014953613,
      "learning_rate": 4.956156357188939e-07,
      "logits/chosen": 9.876545906066895,
      "logits/rejected": 10.567835807800293,
      "logps/chosen": -109.7830810546875,
      "logps/ref_chosen": -109.08442687988281,
      "logps/ref_rejected": -121.41947174072266,
      "logps/rejected": -122.12468719482422,
      "loss": 0.6931,
      "rewards/accuracies": 0.46875,
      "rewards/chosen": -0.006986413151025772,
      "rewards/margins": 6.572058191522956e-05,
      "rewards/rejected": -0.00705213425680995,
      "step": 12
    },
    {
      "epoch": 0.17126389460683408,
      "grad_norm": 0.5409023761749268,
      "learning_rate": 4.931607263312032e-07,
      "logits/chosen": 9.916489601135254,
      "logits/rejected": 10.99366283416748,
      "logps/chosen": -105.78418731689453,
      "logps/ref_chosen": -104.62150573730469,
      "logps/ref_rejected": -119.55384063720703,
      "logps/rejected": -120.60539245605469,
      "loss": 0.6937,
      "rewards/accuracies": 0.46875,
      "rewards/chosen": -0.011626748368144035,
      "rewards/margins": -0.0011113437358289957,
      "rewards/rejected": -0.010515404865145683,
      "step": 13
    },
    {
      "epoch": 0.1844380403458213,
      "grad_norm": 0.9010350108146667,
      "learning_rate": 4.9017129689421e-07,
      "logits/chosen": 10.480968475341797,
      "logits/rejected": 11.599580764770508,
      "logps/chosen": -107.57891845703125,
      "logps/ref_chosen": -106.179443359375,
      "logps/ref_rejected": -120.73036193847656,
      "logps/rejected": -122.02151489257812,
      "loss": 0.6937,
      "rewards/accuracies": 0.46875,
      "rewards/chosen": -0.013994838111102581,
      "rewards/margins": -0.0010832665720954537,
      "rewards/rejected": -0.012911571189761162,
      "step": 14
    },
    {
      "epoch": 0.19761218608480857,
      "grad_norm": 0.8957933187484741,
      "learning_rate": 4.866539188226085e-07,
      "logits/chosen": 9.80737018585205,
      "logits/rejected": 10.738137245178223,
      "logps/chosen": -107.41307067871094,
      "logps/ref_chosen": -105.70547485351562,
      "logps/ref_rejected": -118.89997863769531,
      "logps/rejected": -120.64563751220703,
      "loss": 0.693,
      "rewards/accuracies": 0.5390625,
      "rewards/chosen": -0.01707591488957405,
      "rewards/margins": 0.00038063188549131155,
      "rewards/rejected": -0.017456548288464546,
      "step": 15
    },
    {
      "epoch": 0.2107863318237958,
      "grad_norm": 0.7111092805862427,
      "learning_rate": 4.826163240767716e-07,
      "logits/chosen": 10.634671211242676,
      "logits/rejected": 11.238730430603027,
      "logps/chosen": -110.74053955078125,
      "logps/ref_chosen": -108.86376953125,
      "logps/ref_rejected": -122.1635513305664,
      "logps/rejected": -124.17098999023438,
      "loss": 0.6925,
      "rewards/accuracies": 0.53125,
      "rewards/chosen": -0.018767736852169037,
      "rewards/margins": 0.0013066575629636645,
      "rewards/rejected": -0.02007439360022545,
      "step": 16
    },
    {
      "epoch": 0.22396047756278303,
      "grad_norm": 0.5599011778831482,
      "learning_rate": 4.780673881662242e-07,
      "logits/chosen": 10.138323783874512,
      "logits/rejected": 10.76909065246582,
      "logps/chosen": -104.49694061279297,
      "logps/ref_chosen": -102.93986511230469,
      "logps/ref_rejected": -119.43718719482422,
      "logps/rejected": -121.1658935546875,
      "loss": 0.6923,
      "rewards/accuracies": 0.5859375,
      "rewards/chosen": -0.015570812858641148,
      "rewards/margins": 0.0017161847790703177,
      "rewards/rejected": -0.0172869972884655,
      "step": 17
    },
    {
      "epoch": 0.23713462330177026,
      "grad_norm": 0.7006244659423828,
      "learning_rate": 4.730171106393466e-07,
      "logits/chosen": 10.374225616455078,
      "logits/rejected": 11.157809257507324,
      "logps/chosen": -105.8244400024414,
      "logps/ref_chosen": -103.81341552734375,
      "logps/ref_rejected": -117.45123291015625,
      "logps/rejected": -119.37814331054688,
      "loss": 0.6936,
      "rewards/accuracies": 0.4453125,
      "rewards/chosen": -0.020110249519348145,
      "rewards/margins": -0.0008410783484578133,
      "rewards/rejected": -0.019269172102212906,
      "step": 18
    },
    {
      "epoch": 0.2503087690407575,
      "grad_norm": 0.49562451243400574,
      "learning_rate": 4.6747659310219757e-07,
      "logits/chosen": 10.303974151611328,
      "logits/rejected": 10.965604782104492,
      "logps/chosen": -109.81462860107422,
      "logps/ref_chosen": -107.85797119140625,
      "logps/ref_rejected": -121.88042449951172,
      "logps/rejected": -124.41007232666016,
      "loss": 0.6903,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.019566601142287254,
      "rewards/margins": 0.005729879252612591,
      "rewards/rejected": -0.02529647946357727,
      "step": 19
    },
    {
      "epoch": 0.2634829147797447,
      "grad_norm": 0.48442593216896057,
      "learning_rate": 4.6145801481477433e-07,
      "logits/chosen": 10.682939529418945,
      "logits/rejected": 11.487958908081055,
      "logps/chosen": -105.8414077758789,
      "logps/ref_chosen": -103.42721557617188,
      "logps/ref_rejected": -116.7796630859375,
      "logps/rejected": -119.14535522460938,
      "loss": 0.6934,
      "rewards/accuracies": 0.4609375,
      "rewards/chosen": -0.024141818284988403,
      "rewards/margins": -0.00048486533341929317,
      "rewards/rejected": -0.023656953126192093,
      "step": 20
    },
    {
      "epoch": 0.276657060518732,
      "grad_norm": 0.6605204343795776,
      "learning_rate": 4.549746059183561e-07,
      "logits/chosen": 9.703460693359375,
      "logits/rejected": 10.792010307312012,
      "logps/chosen": -109.3312759399414,
      "logps/ref_chosen": -106.60163879394531,
      "logps/ref_rejected": -124.56562805175781,
      "logps/rejected": -127.45460510253906,
      "loss": 0.6924,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.027296334505081177,
      "rewards/margins": 0.0015935557894408703,
      "rewards/rejected": -0.028889887034893036,
      "step": 21
    },
    {
      "epoch": 0.28983120625771924,
      "grad_norm": 0.8831092715263367,
      "learning_rate": 4.480406183527823e-07,
      "logits/chosen": 10.168815612792969,
      "logits/rejected": 11.040711402893066,
      "logps/chosen": -107.1961669921875,
      "logps/ref_chosen": -103.77696228027344,
      "logps/ref_rejected": -118.73616027832031,
      "logps/rejected": -121.80266571044922,
      "loss": 0.695,
      "rewards/accuracies": 0.4453125,
      "rewards/chosen": -0.03419206291437149,
      "rewards/margins": -0.0035268948413431644,
      "rewards/rejected": -0.030665166676044464,
      "step": 22
    },
    {
      "epoch": 0.3030053519967065,
      "grad_norm": 0.37249696254730225,
      "learning_rate": 4.4067129452759546e-07,
      "logits/chosen": 10.050610542297363,
      "logits/rejected": 11.06921100616455,
      "logps/chosen": -108.137451171875,
      "logps/ref_chosen": -104.72956085205078,
      "logps/ref_rejected": -121.35556030273438,
      "logps/rejected": -124.6715316772461,
      "loss": 0.6937,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.03407883644104004,
      "rewards/margins": -0.0009191110148094594,
      "rewards/rejected": -0.03315972909331322,
      "step": 23
    },
    {
      "epoch": 0.3161794977356937,
      "grad_norm": 0.5831263661384583,
      "learning_rate": 4.3288283381591725e-07,
      "logits/chosen": 10.146599769592285,
      "logits/rejected": 10.979142189025879,
      "logps/chosen": -109.36030578613281,
      "logps/ref_chosen": -105.88758087158203,
      "logps/ref_rejected": -125.69054412841797,
      "logps/rejected": -129.528076171875,
      "loss": 0.6914,
      "rewards/accuracies": 0.5390625,
      "rewards/chosen": -0.034727297723293304,
      "rewards/margins": 0.003648004261776805,
      "rewards/rejected": -0.03837530314922333,
      "step": 24
    }
  ],
  "logging_steps": 1,
  "max_steps": 75,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 12,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}