File size: 22,967 Bytes
f625c53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.47426924660354053,
  "eval_steps": 500,
  "global_step": 36,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.013174145738987238,
      "grad_norm": 0.4815484285354614,
      "learning_rate": 6.25e-08,
      "logits/chosen": 10.088521957397461,
      "logits/rejected": 10.263787269592285,
      "logps/chosen": -163.12940979003906,
      "logps/ref_chosen": -163.12940979003906,
      "logps/ref_rejected": -171.48428344726562,
      "logps/rejected": -171.48428344726562,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.026348291477974475,
      "grad_norm": 0.627070426940918,
      "learning_rate": 1.25e-07,
      "logits/chosen": 10.592972755432129,
      "logits/rejected": 10.720216751098633,
      "logps/chosen": -155.91574096679688,
      "logps/ref_chosen": -155.91574096679688,
      "logps/ref_rejected": -161.34078979492188,
      "logps/rejected": -161.34078979492188,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 2
    },
    {
      "epoch": 0.03952243721696171,
      "grad_norm": 0.4148138165473938,
      "learning_rate": 1.875e-07,
      "logits/chosen": 10.043272972106934,
      "logits/rejected": 10.398024559020996,
      "logps/chosen": -158.3568115234375,
      "logps/ref_chosen": -157.65640258789062,
      "logps/ref_rejected": -168.5882110595703,
      "logps/rejected": -168.91085815429688,
      "loss": 0.6951,
      "rewards/accuracies": 0.3828125,
      "rewards/chosen": -0.007004000246524811,
      "rewards/margins": -0.0037774655502289534,
      "rewards/rejected": -0.003226534929126501,
      "step": 3
    },
    {
      "epoch": 0.05269658295594895,
      "grad_norm": 0.7029770612716675,
      "learning_rate": 2.5e-07,
      "logits/chosen": 10.250253677368164,
      "logits/rejected": 10.45008659362793,
      "logps/chosen": -164.01119995117188,
      "logps/ref_chosen": -162.89878845214844,
      "logps/ref_rejected": -168.30462646484375,
      "logps/rejected": -169.1818389892578,
      "loss": 0.6944,
      "rewards/accuracies": 0.453125,
      "rewards/chosen": -0.01112416572868824,
      "rewards/margins": -0.0023521997500211,
      "rewards/rejected": -0.008771965280175209,
      "step": 4
    },
    {
      "epoch": 0.06587072869493618,
      "grad_norm": 0.4063253104686737,
      "learning_rate": 3.1249999999999997e-07,
      "logits/chosen": 10.442557334899902,
      "logits/rejected": 10.740192413330078,
      "logps/chosen": -156.1859130859375,
      "logps/ref_chosen": -156.03257751464844,
      "logps/ref_rejected": -165.37911987304688,
      "logps/rejected": -165.6518096923828,
      "loss": 0.6926,
      "rewards/accuracies": 0.5234375,
      "rewards/chosen": -0.001533512957394123,
      "rewards/margins": 0.001193464733660221,
      "rewards/rejected": -0.002726977691054344,
      "step": 5
    },
    {
      "epoch": 0.07904487443392343,
      "grad_norm": 0.4845049977302551,
      "learning_rate": 3.75e-07,
      "logits/chosen": 10.906261444091797,
      "logits/rejected": 11.201122283935547,
      "logps/chosen": -162.45692443847656,
      "logps/ref_chosen": -161.98570251464844,
      "logps/ref_rejected": -169.72560119628906,
      "logps/rejected": -170.18275451660156,
      "loss": 0.6932,
      "rewards/accuracies": 0.515625,
      "rewards/chosen": -0.004712029360234737,
      "rewards/margins": -0.00014030117017682642,
      "rewards/rejected": -0.004571728408336639,
      "step": 6
    },
    {
      "epoch": 0.09221902017291066,
      "grad_norm": 0.8172655701637268,
      "learning_rate": 4.375e-07,
      "logits/chosen": 9.883949279785156,
      "logits/rejected": 10.030972480773926,
      "logps/chosen": -157.43295288085938,
      "logps/ref_chosen": -157.26968383789062,
      "logps/ref_rejected": -167.37155151367188,
      "logps/rejected": -167.53939819335938,
      "loss": 0.6931,
      "rewards/accuracies": 0.484375,
      "rewards/chosen": -0.0016327811172232032,
      "rewards/margins": 4.5756096369586885e-05,
      "rewards/rejected": -0.0016785369953140616,
      "step": 7
    },
    {
      "epoch": 0.1053931659118979,
      "grad_norm": 0.588524341583252,
      "learning_rate": 5e-07,
      "logits/chosen": 10.633930206298828,
      "logits/rejected": 10.81590747833252,
      "logps/chosen": -162.8237762451172,
      "logps/ref_chosen": -162.2948455810547,
      "logps/ref_rejected": -172.98866271972656,
      "logps/rejected": -173.56680297851562,
      "loss": 0.6929,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.005289244465529919,
      "rewards/margins": 0.0004922347725369036,
      "rewards/rejected": -0.005781479645520449,
      "step": 8
    },
    {
      "epoch": 0.11856731165088513,
      "grad_norm": 0.46077635884284973,
      "learning_rate": 4.997252228714278e-07,
      "logits/chosen": 10.326555252075195,
      "logits/rejected": 10.736672401428223,
      "logps/chosen": -164.5288543701172,
      "logps/ref_chosen": -163.37091064453125,
      "logps/ref_rejected": -173.1500701904297,
      "logps/rejected": -174.08392333984375,
      "loss": 0.6943,
      "rewards/accuracies": 0.4375,
      "rewards/chosen": -0.011579334735870361,
      "rewards/margins": -0.0022407739888876677,
      "rewards/rejected": -0.00933856051415205,
      "step": 9
    },
    {
      "epoch": 0.13174145738987236,
      "grad_norm": 0.673312783241272,
      "learning_rate": 4.989014955054745e-07,
      "logits/chosen": 10.325155258178711,
      "logits/rejected": 10.473593711853027,
      "logps/chosen": -157.8944091796875,
      "logps/ref_chosen": -156.87838745117188,
      "logps/ref_rejected": -165.17373657226562,
      "logps/rejected": -166.20751953125,
      "loss": 0.6931,
      "rewards/accuracies": 0.546875,
      "rewards/chosen": -0.010160216130316257,
      "rewards/margins": 0.00017760891932994127,
      "rewards/rejected": -0.010337824933230877,
      "step": 10
    },
    {
      "epoch": 0.14491560312885962,
      "grad_norm": 0.6500194668769836,
      "learning_rate": 4.975306286336627e-07,
      "logits/chosen": 10.476134300231934,
      "logits/rejected": 10.66375732421875,
      "logps/chosen": -161.99935913085938,
      "logps/ref_chosen": -160.73855590820312,
      "logps/ref_rejected": -173.1862030029297,
      "logps/rejected": -174.4076385498047,
      "loss": 0.6934,
      "rewards/accuracies": 0.484375,
      "rewards/chosen": -0.0126079972833395,
      "rewards/margins": -0.00039388981531374156,
      "rewards/rejected": -0.012214107438921928,
      "step": 11
    },
    {
      "epoch": 0.15808974886784685,
      "grad_norm": 0.5539909601211548,
      "learning_rate": 4.956156357188939e-07,
      "logits/chosen": 10.318845748901367,
      "logits/rejected": 10.355680465698242,
      "logps/chosen": -167.43121337890625,
      "logps/ref_chosen": -165.21177673339844,
      "logps/ref_rejected": -170.47381591796875,
      "logps/rejected": -172.76483154296875,
      "loss": 0.6928,
      "rewards/accuracies": 0.5078125,
      "rewards/chosen": -0.022194450721144676,
      "rewards/margins": 0.0007156741339713335,
      "rewards/rejected": -0.02291012369096279,
      "step": 12
    },
    {
      "epoch": 0.17126389460683408,
      "grad_norm": 0.5150694251060486,
      "learning_rate": 4.931607263312032e-07,
      "logits/chosen": 9.89578914642334,
      "logits/rejected": 10.236948013305664,
      "logps/chosen": -161.29905700683594,
      "logps/ref_chosen": -158.68667602539062,
      "logps/ref_rejected": -168.57968139648438,
      "logps/rejected": -171.51979064941406,
      "loss": 0.6916,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.026123855262994766,
      "rewards/margins": 0.0032772955019026995,
      "rewards/rejected": -0.029401153326034546,
      "step": 13
    },
    {
      "epoch": 0.1844380403458213,
      "grad_norm": 0.6596489548683167,
      "learning_rate": 4.9017129689421e-07,
      "logits/chosen": 10.484810829162598,
      "logits/rejected": 10.99763298034668,
      "logps/chosen": -157.44769287109375,
      "logps/ref_chosen": -153.92340087890625,
      "logps/ref_rejected": -167.03564453125,
      "logps/rejected": -170.4557647705078,
      "loss": 0.6937,
      "rewards/accuracies": 0.4609375,
      "rewards/chosen": -0.03524318337440491,
      "rewards/margins": -0.0010417333105579019,
      "rewards/rejected": -0.034201446920633316,
      "step": 14
    },
    {
      "epoch": 0.19761218608480857,
      "grad_norm": 0.39550018310546875,
      "learning_rate": 4.866539188226085e-07,
      "logits/chosen": 10.189282417297363,
      "logits/rejected": 10.43722152709961,
      "logps/chosen": -166.56544494628906,
      "logps/ref_chosen": -162.66110229492188,
      "logps/ref_rejected": -168.7485809326172,
      "logps/rejected": -172.78038024902344,
      "loss": 0.6926,
      "rewards/accuracies": 0.4375,
      "rewards/chosen": -0.03904342278838158,
      "rewards/margins": 0.0012746157590299845,
      "rewards/rejected": -0.04031803831458092,
      "step": 15
    },
    {
      "epoch": 0.2107863318237958,
      "grad_norm": 0.6276482939720154,
      "learning_rate": 4.826163240767716e-07,
      "logits/chosen": 10.743437767028809,
      "logits/rejected": 11.031370162963867,
      "logps/chosen": -166.45135498046875,
      "logps/ref_chosen": -163.39239501953125,
      "logps/ref_rejected": -172.29183959960938,
      "logps/rejected": -175.4534912109375,
      "loss": 0.6927,
      "rewards/accuracies": 0.5546875,
      "rewards/chosen": -0.03058951534330845,
      "rewards/margins": 0.0010271857026964426,
      "rewards/rejected": -0.0316167026758194,
      "step": 16
    },
    {
      "epoch": 0.22396047756278303,
      "grad_norm": 0.516729474067688,
      "learning_rate": 4.780673881662242e-07,
      "logits/chosen": 10.332087516784668,
      "logits/rejected": 10.48865032196045,
      "logps/chosen": -157.08522033691406,
      "logps/ref_chosen": -153.6072540283203,
      "logps/ref_rejected": -161.9541473388672,
      "logps/rejected": -165.6874542236328,
      "loss": 0.6919,
      "rewards/accuracies": 0.5390625,
      "rewards/chosen": -0.03477972373366356,
      "rewards/margins": 0.0025533493608236313,
      "rewards/rejected": -0.03733307495713234,
      "step": 17
    },
    {
      "epoch": 0.23713462330177026,
      "grad_norm": 0.70009446144104,
      "learning_rate": 4.730171106393466e-07,
      "logits/chosen": 10.40684986114502,
      "logits/rejected": 10.725347518920898,
      "logps/chosen": -158.2038116455078,
      "logps/ref_chosen": -154.3197021484375,
      "logps/ref_rejected": -161.81753540039062,
      "logps/rejected": -165.58631896972656,
      "loss": 0.6938,
      "rewards/accuracies": 0.484375,
      "rewards/chosen": -0.03884127736091614,
      "rewards/margins": -0.0011533537181094289,
      "rewards/rejected": -0.03768792748451233,
      "step": 18
    },
    {
      "epoch": 0.2503087690407575,
      "grad_norm": 0.47613224387168884,
      "learning_rate": 4.6747659310219757e-07,
      "logits/chosen": 10.489011764526367,
      "logits/rejected": 10.455073356628418,
      "logps/chosen": -171.99160766601562,
      "logps/ref_chosen": -167.8755340576172,
      "logps/ref_rejected": -175.09603881835938,
      "logps/rejected": -179.593994140625,
      "loss": 0.6913,
      "rewards/accuracies": 0.5703125,
      "rewards/chosen": -0.04116089642047882,
      "rewards/margins": 0.003818710334599018,
      "rewards/rejected": -0.04497961327433586,
      "step": 19
    },
    {
      "epoch": 0.2634829147797447,
      "grad_norm": 0.6483292579650879,
      "learning_rate": 4.6145801481477433e-07,
      "logits/chosen": 10.415058135986328,
      "logits/rejected": 10.774059295654297,
      "logps/chosen": -163.5430450439453,
      "logps/ref_chosen": -159.07583618164062,
      "logps/ref_rejected": -169.23069763183594,
      "logps/rejected": -173.73776245117188,
      "loss": 0.693,
      "rewards/accuracies": 0.515625,
      "rewards/chosen": -0.04467229172587395,
      "rewards/margins": 0.00039826278225518763,
      "rewards/rejected": -0.04507055878639221,
      "step": 20
    },
    {
      "epoch": 0.276657060518732,
      "grad_norm": 0.5634174942970276,
      "learning_rate": 4.549746059183561e-07,
      "logits/chosen": 10.342830657958984,
      "logits/rejected": 10.680377960205078,
      "logps/chosen": -163.2490997314453,
      "logps/ref_chosen": -159.25521850585938,
      "logps/ref_rejected": -169.57681274414062,
      "logps/rejected": -173.69276428222656,
      "loss": 0.6926,
      "rewards/accuracies": 0.578125,
      "rewards/chosen": -0.03993882238864899,
      "rewards/margins": 0.0012204290833324194,
      "rewards/rejected": -0.0411592535674572,
      "step": 21
    },
    {
      "epoch": 0.28983120625771924,
      "grad_norm": 0.3794897198677063,
      "learning_rate": 4.480406183527823e-07,
      "logits/chosen": 10.29517936706543,
      "logits/rejected": 10.647847175598145,
      "logps/chosen": -161.54783630371094,
      "logps/ref_chosen": -157.0568084716797,
      "logps/ref_rejected": -163.96209716796875,
      "logps/rejected": -168.51736450195312,
      "loss": 0.6929,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.04491012915968895,
      "rewards/margins": 0.0006425387691706419,
      "rewards/rejected": -0.04555266723036766,
      "step": 22
    },
    {
      "epoch": 0.3030053519967065,
      "grad_norm": 0.4016757607460022,
      "learning_rate": 4.4067129452759546e-07,
      "logits/chosen": 10.357274055480957,
      "logits/rejected": 10.63122844696045,
      "logps/chosen": -162.94578552246094,
      "logps/ref_chosen": -158.10250854492188,
      "logps/ref_rejected": -169.85250854492188,
      "logps/rejected": -174.79525756835938,
      "loss": 0.6927,
      "rewards/accuracies": 0.5234375,
      "rewards/chosen": -0.04843292012810707,
      "rewards/margins": 0.0009945080382749438,
      "rewards/rejected": -0.04942742735147476,
      "step": 23
    },
    {
      "epoch": 0.3161794977356937,
      "grad_norm": 0.46131113171577454,
      "learning_rate": 4.3288283381591725e-07,
      "logits/chosen": 10.260627746582031,
      "logits/rejected": 10.424566268920898,
      "logps/chosen": -163.2139129638672,
      "logps/ref_chosen": -158.93540954589844,
      "logps/ref_rejected": -168.12344360351562,
      "logps/rejected": -172.52456665039062,
      "loss": 0.6926,
      "rewards/accuracies": 0.4921875,
      "rewards/chosen": -0.042785100638866425,
      "rewards/margins": 0.0012263581156730652,
      "rewards/rejected": -0.04401145875453949,
      "step": 24
    },
    {
      "epoch": 0.32935364347468093,
      "grad_norm": 0.4610799252986908,
      "learning_rate": 4.246923569447104e-07,
      "logits/chosen": 10.461551666259766,
      "logits/rejected": 10.855925559997559,
      "logps/chosen": -165.60084533691406,
      "logps/ref_chosen": -161.0833740234375,
      "logps/ref_rejected": -174.85760498046875,
      "logps/rejected": -179.20965576171875,
      "loss": 0.694,
      "rewards/accuracies": 0.515625,
      "rewards/chosen": -0.0451747290790081,
      "rewards/margins": -0.0016541833756491542,
      "rewards/rejected": -0.043520547449588776,
      "step": 25
    },
    {
      "epoch": 0.34252778921366817,
      "grad_norm": 0.41953545808792114,
      "learning_rate": 4.161178683597054e-07,
      "logits/chosen": 10.611435890197754,
      "logits/rejected": 10.745625495910645,
      "logps/chosen": -160.7465057373047,
      "logps/ref_chosen": -156.07315063476562,
      "logps/ref_rejected": -161.84292602539062,
      "logps/rejected": -166.85279846191406,
      "loss": 0.6915,
      "rewards/accuracies": 0.5390625,
      "rewards/chosen": -0.046733610332012177,
      "rewards/margins": 0.0033648861572146416,
      "rewards/rejected": -0.05009850114583969,
      "step": 26
    },
    {
      "epoch": 0.3557019349526554,
      "grad_norm": 0.3880956470966339,
      "learning_rate": 4.0717821664772124e-07,
      "logits/chosen": 10.590215682983398,
      "logits/rejected": 10.893061637878418,
      "logps/chosen": -168.67279052734375,
      "logps/ref_chosen": -163.2271728515625,
      "logps/ref_rejected": -171.53738403320312,
      "logps/rejected": -176.9310302734375,
      "loss": 0.6935,
      "rewards/accuracies": 0.4609375,
      "rewards/chosen": -0.05445636808872223,
      "rewards/margins": -0.00051975465612486,
      "rewards/rejected": -0.05393661558628082,
      "step": 27
    },
    {
      "epoch": 0.3688760806916426,
      "grad_norm": 0.5345169901847839,
      "learning_rate": 3.978930531033806e-07,
      "logits/chosen": 9.953861236572266,
      "logits/rejected": 10.416406631469727,
      "logps/chosen": -162.322021484375,
      "logps/ref_chosen": -157.08795166015625,
      "logps/ref_rejected": -167.1195068359375,
      "logps/rejected": -172.4853973388672,
      "loss": 0.6925,
      "rewards/accuracies": 0.5546875,
      "rewards/chosen": -0.0523407980799675,
      "rewards/margins": 0.001318173250183463,
      "rewards/rejected": -0.053658969700336456,
      "step": 28
    },
    {
      "epoch": 0.3820502264306299,
      "grad_norm": 0.5297831296920776,
      "learning_rate": 3.882827885312998e-07,
      "logits/chosen": 10.323142051696777,
      "logits/rejected": 10.501938819885254,
      "logps/chosen": -168.2061004638672,
      "logps/ref_chosen": -163.59707641601562,
      "logps/ref_rejected": -171.89508056640625,
      "logps/rejected": -176.55738830566406,
      "loss": 0.6929,
      "rewards/accuracies": 0.5234375,
      "rewards/chosen": -0.04609035328030586,
      "rewards/margins": 0.0005328265833668411,
      "rewards/rejected": -0.04662318155169487,
      "step": 29
    },
    {
      "epoch": 0.39522437216961714,
      "grad_norm": 0.35810208320617676,
      "learning_rate": 3.7836854837871044e-07,
      "logits/chosen": 10.40945053100586,
      "logits/rejected": 10.931025505065918,
      "logps/chosen": -169.71910095214844,
      "logps/ref_chosen": -164.91160583496094,
      "logps/ref_rejected": -176.66453552246094,
      "logps/rejected": -181.69285583496094,
      "loss": 0.6921,
      "rewards/accuracies": 0.5859375,
      "rewards/chosen": -0.04807499051094055,
      "rewards/margins": 0.0022084712982177734,
      "rewards/rejected": -0.050283461809158325,
      "step": 30
    },
    {
      "epoch": 0.4083985179086044,
      "grad_norm": 0.44152358174324036,
      "learning_rate": 3.681721262971413e-07,
      "logits/chosen": 10.004611015319824,
      "logits/rejected": 10.491472244262695,
      "logps/chosen": -161.24798583984375,
      "logps/ref_chosen": -155.95877075195312,
      "logps/ref_rejected": -166.5852508544922,
      "logps/rejected": -172.22703552246094,
      "loss": 0.6915,
      "rewards/accuracies": 0.578125,
      "rewards/chosen": -0.052892111241817474,
      "rewards/margins": 0.003525771899148822,
      "rewards/rejected": -0.05641787871718407,
      "step": 31
    },
    {
      "epoch": 0.4215726636475916,
      "grad_norm": 0.5185390114784241,
      "learning_rate": 3.577159362352426e-07,
      "logits/chosen": 10.27377986907959,
      "logits/rejected": 10.56481647491455,
      "logps/chosen": -167.19921875,
      "logps/ref_chosen": -161.83575439453125,
      "logps/ref_rejected": -169.53759765625,
      "logps/rejected": -174.91342163085938,
      "loss": 0.6932,
      "rewards/accuracies": 0.46875,
      "rewards/chosen": -0.05363469570875168,
      "rewards/margins": 0.00012344191782176495,
      "rewards/rejected": -0.05375813692808151,
      "step": 32
    },
    {
      "epoch": 0.43474680938657884,
      "grad_norm": 1.1196942329406738,
      "learning_rate": 3.470229631680624e-07,
      "logits/chosen": 10.063702583312988,
      "logits/rejected": 10.693009376525879,
      "logps/chosen": -164.40225219726562,
      "logps/ref_chosen": -158.7517547607422,
      "logps/ref_rejected": -168.51002502441406,
      "logps/rejected": -174.00901794433594,
      "loss": 0.694,
      "rewards/accuracies": 0.5234375,
      "rewards/chosen": -0.056504976004362106,
      "rewards/margins": -0.0015149968676269054,
      "rewards/rejected": -0.054989978671073914,
      "step": 33
    },
    {
      "epoch": 0.44792095512556607,
      "grad_norm": 0.593383252620697,
      "learning_rate": 3.361167125710832e-07,
      "logits/chosen": 9.993782043457031,
      "logits/rejected": 10.380085945129395,
      "logps/chosen": -170.81832885742188,
      "logps/ref_chosen": -165.12754821777344,
      "logps/ref_rejected": -177.654296875,
      "logps/rejected": -183.54998779296875,
      "loss": 0.6922,
      "rewards/accuracies": 0.53125,
      "rewards/chosen": -0.05690779164433479,
      "rewards/margins": 0.0020490488968789577,
      "rewards/rejected": -0.05895683914422989,
      "step": 34
    },
    {
      "epoch": 0.4610951008645533,
      "grad_norm": 0.4870688319206238,
      "learning_rate": 3.2502115875008516e-07,
      "logits/chosen": 10.418455123901367,
      "logits/rejected": 10.655467987060547,
      "logps/chosen": -165.74551391601562,
      "logps/ref_chosen": -159.895751953125,
      "logps/ref_rejected": -167.39785766601562,
      "logps/rejected": -173.5540008544922,
      "loss": 0.6917,
      "rewards/accuracies": 0.5546875,
      "rewards/chosen": -0.058497510850429535,
      "rewards/margins": 0.0030638885218650103,
      "rewards/rejected": -0.06156139820814133,
      "step": 35
    },
    {
      "epoch": 0.47426924660354053,
      "grad_norm": 0.9538622498512268,
      "learning_rate": 3.137606921404191e-07,
      "logits/chosen": 10.286083221435547,
      "logits/rejected": 10.6614408493042,
      "logps/chosen": -170.4355926513672,
      "logps/ref_chosen": -165.02023315429688,
      "logps/ref_rejected": -172.28997802734375,
      "logps/rejected": -177.64358520507812,
      "loss": 0.6935,
      "rewards/accuracies": 0.546875,
      "rewards/chosen": -0.05415371432900429,
      "rewards/margins": -0.0006177356699481606,
      "rewards/rejected": -0.053535979241132736,
      "step": 36
    }
  ],
  "logging_steps": 1,
  "max_steps": 75,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 12,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}