File size: 47,494 Bytes
4267dba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9880609304240429,
  "eval_steps": 500,
  "global_step": 75,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.013174145738987238,
      "grad_norm": 0.5010076761245728,
      "learning_rate": 6.25e-08,
      "logits/chosen": 9.988622665405273,
      "logits/rejected": 10.698101997375488,
      "logps/chosen": -102.88545989990234,
      "logps/ref_chosen": -102.88545989990234,
      "logps/ref_rejected": -121.84871673583984,
      "logps/rejected": -121.84871673583984,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.026348291477974475,
      "grad_norm": 0.7802621126174927,
      "learning_rate": 1.25e-07,
      "logits/chosen": 10.208279609680176,
      "logits/rejected": 11.06594467163086,
      "logps/chosen": -107.70349884033203,
      "logps/ref_chosen": -107.70349884033203,
      "logps/ref_rejected": -121.89966583251953,
      "logps/rejected": -121.89966583251953,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 2
    },
    {
      "epoch": 0.03952243721696171,
      "grad_norm": 0.5995805859565735,
      "learning_rate": 1.875e-07,
      "logits/chosen": 10.029329299926758,
      "logits/rejected": 11.023927688598633,
      "logps/chosen": -107.6470947265625,
      "logps/ref_chosen": -107.98188781738281,
      "logps/ref_rejected": -124.51527404785156,
      "logps/rejected": -124.29098510742188,
      "loss": 0.6926,
      "rewards/accuracies": 0.5,
      "rewards/chosen": 0.0033478327095508575,
      "rewards/margins": 0.001104944385588169,
      "rewards/rejected": 0.0022428883239626884,
      "step": 3
    },
    {
      "epoch": 0.05269658295594895,
      "grad_norm": 0.5383147597312927,
      "learning_rate": 2.5e-07,
      "logits/chosen": 9.823471069335938,
      "logits/rejected": 10.842323303222656,
      "logps/chosen": -109.73627471923828,
      "logps/ref_chosen": -109.20836639404297,
      "logps/ref_rejected": -119.23908996582031,
      "logps/rejected": -119.65444946289062,
      "loss": 0.6937,
      "rewards/accuracies": 0.5078125,
      "rewards/chosen": -0.005279023200273514,
      "rewards/margins": -0.001125341048464179,
      "rewards/rejected": -0.004153682850301266,
      "step": 4
    },
    {
      "epoch": 0.06587072869493618,
      "grad_norm": 0.5302512645721436,
      "learning_rate": 3.1249999999999997e-07,
      "logits/chosen": 10.209351539611816,
      "logits/rejected": 10.967523574829102,
      "logps/chosen": -103.73981475830078,
      "logps/ref_chosen": -103.87680053710938,
      "logps/ref_rejected": -118.41618347167969,
      "logps/rejected": -118.11978149414062,
      "loss": 0.694,
      "rewards/accuracies": 0.4453125,
      "rewards/chosen": 0.0013697518734261394,
      "rewards/margins": -0.001594369299709797,
      "rewards/rejected": 0.002964121289551258,
      "step": 5
    },
    {
      "epoch": 0.07904487443392343,
      "grad_norm": 0.6919645667076111,
      "learning_rate": 3.75e-07,
      "logits/chosen": 10.676691055297852,
      "logits/rejected": 11.460196495056152,
      "logps/chosen": -108.08129119873047,
      "logps/ref_chosen": -107.58968353271484,
      "logps/ref_rejected": -122.07303619384766,
      "logps/rejected": -122.37925720214844,
      "loss": 0.6941,
      "rewards/accuracies": 0.421875,
      "rewards/chosen": -0.004916056990623474,
      "rewards/margins": -0.0018538986332714558,
      "rewards/rejected": -0.0030621583573520184,
      "step": 6
    },
    {
      "epoch": 0.09221902017291066,
      "grad_norm": 0.40329915285110474,
      "learning_rate": 4.375e-07,
      "logits/chosen": 10.017489433288574,
      "logits/rejected": 10.722752571105957,
      "logps/chosen": -107.77075958251953,
      "logps/ref_chosen": -107.42727661132812,
      "logps/ref_rejected": -116.87063598632812,
      "logps/rejected": -116.98759460449219,
      "loss": 0.6943,
      "rewards/accuracies": 0.4140625,
      "rewards/chosen": -0.0034348624758422375,
      "rewards/margins": -0.0022651171311736107,
      "rewards/rejected": -0.0011697453446686268,
      "step": 7
    },
    {
      "epoch": 0.1053931659118979,
      "grad_norm": 0.4481956362724304,
      "learning_rate": 5e-07,
      "logits/chosen": 10.191514015197754,
      "logits/rejected": 11.094213485717773,
      "logps/chosen": -106.06684112548828,
      "logps/ref_chosen": -105.60282135009766,
      "logps/ref_rejected": -119.53916931152344,
      "logps/rejected": -119.9333267211914,
      "loss": 0.6935,
      "rewards/accuracies": 0.4765625,
      "rewards/chosen": -0.004640196915715933,
      "rewards/margins": -0.0006986188236624002,
      "rewards/rejected": -0.003941578324884176,
      "step": 8
    },
    {
      "epoch": 0.11856731165088513,
      "grad_norm": 0.5002302527427673,
      "learning_rate": 4.997252228714278e-07,
      "logits/chosen": 10.164933204650879,
      "logits/rejected": 11.139327049255371,
      "logps/chosen": -106.06144714355469,
      "logps/ref_chosen": -105.46086120605469,
      "logps/ref_rejected": -119.00373840332031,
      "logps/rejected": -119.59027862548828,
      "loss": 0.6932,
      "rewards/accuracies": 0.4921875,
      "rewards/chosen": -0.006005657836794853,
      "rewards/margins": -0.00014029807061888278,
      "rewards/rejected": -0.005865359678864479,
      "step": 9
    },
    {
      "epoch": 0.13174145738987236,
      "grad_norm": 0.6467388868331909,
      "learning_rate": 4.989014955054745e-07,
      "logits/chosen": 9.98875904083252,
      "logits/rejected": 10.815544128417969,
      "logps/chosen": -105.14952850341797,
      "logps/ref_chosen": -104.21009826660156,
      "logps/ref_rejected": -118.9209213256836,
      "logps/rejected": -119.72019958496094,
      "loss": 0.6939,
      "rewards/accuracies": 0.515625,
      "rewards/chosen": -0.00939436536282301,
      "rewards/margins": -0.0014015533961355686,
      "rewards/rejected": -0.007992811501026154,
      "step": 10
    },
    {
      "epoch": 0.14491560312885962,
      "grad_norm": 0.8090001344680786,
      "learning_rate": 4.975306286336627e-07,
      "logits/chosen": 9.946345329284668,
      "logits/rejected": 11.13135814666748,
      "logps/chosen": -107.09854125976562,
      "logps/ref_chosen": -105.94319152832031,
      "logps/ref_rejected": -122.76007843017578,
      "logps/rejected": -123.9129409790039,
      "loss": 0.6932,
      "rewards/accuracies": 0.46875,
      "rewards/chosen": -0.01155336108058691,
      "rewards/margins": -2.4825334548950195e-05,
      "rewards/rejected": -0.011528536677360535,
      "step": 11
    },
    {
      "epoch": 0.15808974886784685,
      "grad_norm": 0.49643445014953613,
      "learning_rate": 4.956156357188939e-07,
      "logits/chosen": 9.876545906066895,
      "logits/rejected": 10.567835807800293,
      "logps/chosen": -109.7830810546875,
      "logps/ref_chosen": -109.08442687988281,
      "logps/ref_rejected": -121.41947174072266,
      "logps/rejected": -122.12468719482422,
      "loss": 0.6931,
      "rewards/accuracies": 0.46875,
      "rewards/chosen": -0.006986413151025772,
      "rewards/margins": 6.572058191522956e-05,
      "rewards/rejected": -0.00705213425680995,
      "step": 12
    },
    {
      "epoch": 0.17126389460683408,
      "grad_norm": 0.5409023761749268,
      "learning_rate": 4.931607263312032e-07,
      "logits/chosen": 9.916489601135254,
      "logits/rejected": 10.99366283416748,
      "logps/chosen": -105.78418731689453,
      "logps/ref_chosen": -104.62150573730469,
      "logps/ref_rejected": -119.55384063720703,
      "logps/rejected": -120.60539245605469,
      "loss": 0.6937,
      "rewards/accuracies": 0.46875,
      "rewards/chosen": -0.011626748368144035,
      "rewards/margins": -0.0011113437358289957,
      "rewards/rejected": -0.010515404865145683,
      "step": 13
    },
    {
      "epoch": 0.1844380403458213,
      "grad_norm": 0.9010350108146667,
      "learning_rate": 4.9017129689421e-07,
      "logits/chosen": 10.480968475341797,
      "logits/rejected": 11.599580764770508,
      "logps/chosen": -107.57891845703125,
      "logps/ref_chosen": -106.179443359375,
      "logps/ref_rejected": -120.73036193847656,
      "logps/rejected": -122.02151489257812,
      "loss": 0.6937,
      "rewards/accuracies": 0.46875,
      "rewards/chosen": -0.013994838111102581,
      "rewards/margins": -0.0010832665720954537,
      "rewards/rejected": -0.012911571189761162,
      "step": 14
    },
    {
      "epoch": 0.19761218608480857,
      "grad_norm": 0.8957933187484741,
      "learning_rate": 4.866539188226085e-07,
      "logits/chosen": 9.80737018585205,
      "logits/rejected": 10.738137245178223,
      "logps/chosen": -107.41307067871094,
      "logps/ref_chosen": -105.70547485351562,
      "logps/ref_rejected": -118.89997863769531,
      "logps/rejected": -120.64563751220703,
      "loss": 0.693,
      "rewards/accuracies": 0.5390625,
      "rewards/chosen": -0.01707591488957405,
      "rewards/margins": 0.00038063188549131155,
      "rewards/rejected": -0.017456548288464546,
      "step": 15
    },
    {
      "epoch": 0.2107863318237958,
      "grad_norm": 0.7111092805862427,
      "learning_rate": 4.826163240767716e-07,
      "logits/chosen": 10.634671211242676,
      "logits/rejected": 11.238730430603027,
      "logps/chosen": -110.74053955078125,
      "logps/ref_chosen": -108.86376953125,
      "logps/ref_rejected": -122.1635513305664,
      "logps/rejected": -124.17098999023438,
      "loss": 0.6925,
      "rewards/accuracies": 0.53125,
      "rewards/chosen": -0.018767736852169037,
      "rewards/margins": 0.0013066575629636645,
      "rewards/rejected": -0.02007439360022545,
      "step": 16
    },
    {
      "epoch": 0.22396047756278303,
      "grad_norm": 0.5599011778831482,
      "learning_rate": 4.780673881662242e-07,
      "logits/chosen": 10.138323783874512,
      "logits/rejected": 10.76909065246582,
      "logps/chosen": -104.49694061279297,
      "logps/ref_chosen": -102.93986511230469,
      "logps/ref_rejected": -119.43718719482422,
      "logps/rejected": -121.1658935546875,
      "loss": 0.6923,
      "rewards/accuracies": 0.5859375,
      "rewards/chosen": -0.015570812858641148,
      "rewards/margins": 0.0017161847790703177,
      "rewards/rejected": -0.0172869972884655,
      "step": 17
    },
    {
      "epoch": 0.23713462330177026,
      "grad_norm": 0.7006244659423828,
      "learning_rate": 4.730171106393466e-07,
      "logits/chosen": 10.374225616455078,
      "logits/rejected": 11.157809257507324,
      "logps/chosen": -105.8244400024414,
      "logps/ref_chosen": -103.81341552734375,
      "logps/ref_rejected": -117.45123291015625,
      "logps/rejected": -119.37814331054688,
      "loss": 0.6936,
      "rewards/accuracies": 0.4453125,
      "rewards/chosen": -0.020110249519348145,
      "rewards/margins": -0.0008410783484578133,
      "rewards/rejected": -0.019269172102212906,
      "step": 18
    },
    {
      "epoch": 0.2503087690407575,
      "grad_norm": 0.49562451243400574,
      "learning_rate": 4.6747659310219757e-07,
      "logits/chosen": 10.303974151611328,
      "logits/rejected": 10.965604782104492,
      "logps/chosen": -109.81462860107422,
      "logps/ref_chosen": -107.85797119140625,
      "logps/ref_rejected": -121.88042449951172,
      "logps/rejected": -124.41007232666016,
      "loss": 0.6903,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.019566601142287254,
      "rewards/margins": 0.005729879252612591,
      "rewards/rejected": -0.02529647946357727,
      "step": 19
    },
    {
      "epoch": 0.2634829147797447,
      "grad_norm": 0.48442593216896057,
      "learning_rate": 4.6145801481477433e-07,
      "logits/chosen": 10.682939529418945,
      "logits/rejected": 11.487958908081055,
      "logps/chosen": -105.8414077758789,
      "logps/ref_chosen": -103.42721557617188,
      "logps/ref_rejected": -116.7796630859375,
      "logps/rejected": -119.14535522460938,
      "loss": 0.6934,
      "rewards/accuracies": 0.4609375,
      "rewards/chosen": -0.024141818284988403,
      "rewards/margins": -0.00048486533341929317,
      "rewards/rejected": -0.023656953126192093,
      "step": 20
    },
    {
      "epoch": 0.276657060518732,
      "grad_norm": 0.6605204343795776,
      "learning_rate": 4.549746059183561e-07,
      "logits/chosen": 9.703460693359375,
      "logits/rejected": 10.792010307312012,
      "logps/chosen": -109.3312759399414,
      "logps/ref_chosen": -106.60163879394531,
      "logps/ref_rejected": -124.56562805175781,
      "logps/rejected": -127.45460510253906,
      "loss": 0.6924,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.027296334505081177,
      "rewards/margins": 0.0015935557894408703,
      "rewards/rejected": -0.028889887034893036,
      "step": 21
    },
    {
      "epoch": 0.28983120625771924,
      "grad_norm": 0.8831092715263367,
      "learning_rate": 4.480406183527823e-07,
      "logits/chosen": 10.168815612792969,
      "logits/rejected": 11.040711402893066,
      "logps/chosen": -107.1961669921875,
      "logps/ref_chosen": -103.77696228027344,
      "logps/ref_rejected": -118.73616027832031,
      "logps/rejected": -121.80266571044922,
      "loss": 0.695,
      "rewards/accuracies": 0.4453125,
      "rewards/chosen": -0.03419206291437149,
      "rewards/margins": -0.0035268948413431644,
      "rewards/rejected": -0.030665166676044464,
      "step": 22
    },
    {
      "epoch": 0.3030053519967065,
      "grad_norm": 0.37249696254730225,
      "learning_rate": 4.4067129452759546e-07,
      "logits/chosen": 10.050610542297363,
      "logits/rejected": 11.06921100616455,
      "logps/chosen": -108.137451171875,
      "logps/ref_chosen": -104.72956085205078,
      "logps/ref_rejected": -121.35556030273438,
      "logps/rejected": -124.6715316772461,
      "loss": 0.6937,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.03407883644104004,
      "rewards/margins": -0.0009191110148094594,
      "rewards/rejected": -0.03315972909331322,
      "step": 23
    },
    {
      "epoch": 0.3161794977356937,
      "grad_norm": 0.5831263661384583,
      "learning_rate": 4.3288283381591725e-07,
      "logits/chosen": 10.146599769592285,
      "logits/rejected": 10.979142189025879,
      "logps/chosen": -109.36030578613281,
      "logps/ref_chosen": -105.88758087158203,
      "logps/ref_rejected": -125.69054412841797,
      "logps/rejected": -129.528076171875,
      "loss": 0.6914,
      "rewards/accuracies": 0.5390625,
      "rewards/chosen": -0.034727297723293304,
      "rewards/margins": 0.003648004261776805,
      "rewards/rejected": -0.03837530314922333,
      "step": 24
    },
    {
      "epoch": 0.32935364347468093,
      "grad_norm": 0.574914276599884,
      "learning_rate": 4.246923569447104e-07,
      "logits/chosen": 10.25512409210205,
      "logits/rejected": 10.956289291381836,
      "logps/chosen": -113.96874237060547,
      "logps/ref_chosen": -110.0761489868164,
      "logps/ref_rejected": -129.10540771484375,
      "logps/rejected": -133.20553588867188,
      "loss": 0.6922,
      "rewards/accuracies": 0.515625,
      "rewards/chosen": -0.03892593830823898,
      "rewards/margins": 0.0020754451397806406,
      "rewards/rejected": -0.04100137948989868,
      "step": 25
    },
    {
      "epoch": 0.34252778921366817,
      "grad_norm": 0.42842620611190796,
      "learning_rate": 4.161178683597054e-07,
      "logits/chosen": 10.368014335632324,
      "logits/rejected": 11.450322151184082,
      "logps/chosen": -108.39615631103516,
      "logps/ref_chosen": -103.74571990966797,
      "logps/ref_rejected": -120.73832702636719,
      "logps/rejected": -125.09370422363281,
      "loss": 0.6947,
      "rewards/accuracies": 0.453125,
      "rewards/chosen": -0.046504296362400055,
      "rewards/margins": -0.002950500464066863,
      "rewards/rejected": -0.04355379566550255,
      "step": 26
    },
    {
      "epoch": 0.3557019349526554,
      "grad_norm": 0.5645923018455505,
      "learning_rate": 4.0717821664772124e-07,
      "logits/chosen": 10.042550086975098,
      "logits/rejected": 11.243234634399414,
      "logps/chosen": -110.1788330078125,
      "logps/ref_chosen": -105.47428131103516,
      "logps/ref_rejected": -120.5193099975586,
      "logps/rejected": -125.00762939453125,
      "loss": 0.6944,
      "rewards/accuracies": 0.4609375,
      "rewards/chosen": -0.04704552888870239,
      "rewards/margins": -0.0021622537169605494,
      "rewards/rejected": -0.044883277267217636,
      "step": 27
    },
    {
      "epoch": 0.3688760806916426,
      "grad_norm": 0.680381178855896,
      "learning_rate": 3.978930531033806e-07,
      "logits/chosen": 9.559943199157715,
      "logits/rejected": 10.705163955688477,
      "logps/chosen": -107.98367309570312,
      "logps/ref_chosen": -103.72540283203125,
      "logps/ref_rejected": -119.79557800292969,
      "logps/rejected": -124.30613708496094,
      "loss": 0.692,
      "rewards/accuracies": 0.59375,
      "rewards/chosen": -0.04258278012275696,
      "rewards/margins": 0.0025227584410458803,
      "rewards/rejected": -0.04510553926229477,
      "step": 28
    },
    {
      "epoch": 0.3820502264306299,
      "grad_norm": 0.569850504398346,
      "learning_rate": 3.882827885312998e-07,
      "logits/chosen": 10.065170288085938,
      "logits/rejected": 11.1397123336792,
      "logps/chosen": -112.4742431640625,
      "logps/ref_chosen": -108.65434265136719,
      "logps/ref_rejected": -121.46784973144531,
      "logps/rejected": -125.24649047851562,
      "loss": 0.6935,
      "rewards/accuracies": 0.484375,
      "rewards/chosen": -0.03819899260997772,
      "rewards/margins": -0.0004125672858208418,
      "rewards/rejected": -0.03778642416000366,
      "step": 29
    },
    {
      "epoch": 0.39522437216961714,
      "grad_norm": 0.5246622562408447,
      "learning_rate": 3.7836854837871044e-07,
      "logits/chosen": 10.21100902557373,
      "logits/rejected": 11.58244800567627,
      "logps/chosen": -108.34341430664062,
      "logps/ref_chosen": -103.62174224853516,
      "logps/ref_rejected": -126.73807525634766,
      "logps/rejected": -131.23304748535156,
      "loss": 0.6944,
      "rewards/accuracies": 0.453125,
      "rewards/chosen": -0.04721669480204582,
      "rewards/margins": -0.0022669308818876743,
      "rewards/rejected": -0.044949766248464584,
      "step": 30
    },
    {
      "epoch": 0.4083985179086044,
      "grad_norm": 0.37346869707107544,
      "learning_rate": 3.681721262971413e-07,
      "logits/chosen": 9.762397766113281,
      "logits/rejected": 10.784234046936035,
      "logps/chosen": -110.99979400634766,
      "logps/ref_chosen": -106.10479736328125,
      "logps/ref_rejected": -120.6382827758789,
      "logps/rejected": -125.51140594482422,
      "loss": 0.6934,
      "rewards/accuracies": 0.515625,
      "rewards/chosen": -0.04894987493753433,
      "rewards/margins": -0.00021872523939236999,
      "rewards/rejected": -0.04873115196824074,
      "step": 31
    },
    {
      "epoch": 0.4215726636475916,
      "grad_norm": 0.4927314221858978,
      "learning_rate": 3.577159362352426e-07,
      "logits/chosen": 10.023344993591309,
      "logits/rejected": 11.392967224121094,
      "logps/chosen": -110.29427337646484,
      "logps/ref_chosen": -105.99569702148438,
      "logps/ref_rejected": -128.34303283691406,
      "logps/rejected": -132.72604370117188,
      "loss": 0.6929,
      "rewards/accuracies": 0.4453125,
      "rewards/chosen": -0.04298572242259979,
      "rewards/margins": 0.0008442200487479568,
      "rewards/rejected": -0.04382994398474693,
      "step": 32
    },
    {
      "epoch": 0.43474680938657884,
      "grad_norm": 0.9654183983802795,
      "learning_rate": 3.470229631680624e-07,
      "logits/chosen": 9.917928695678711,
      "logits/rejected": 10.762564659118652,
      "logps/chosen": -110.05650329589844,
      "logps/ref_chosen": -105.72196197509766,
      "logps/ref_rejected": -121.59507751464844,
      "logps/rejected": -126.74140167236328,
      "loss": 0.6892,
      "rewards/accuracies": 0.578125,
      "rewards/chosen": -0.04334544017910957,
      "rewards/margins": 0.008117962628602982,
      "rewards/rejected": -0.05146340653300285,
      "step": 33
    },
    {
      "epoch": 0.44792095512556607,
      "grad_norm": 0.5439296960830688,
      "learning_rate": 3.361167125710832e-07,
      "logits/chosen": 10.135066032409668,
      "logits/rejected": 10.976527214050293,
      "logps/chosen": -115.52051544189453,
      "logps/ref_chosen": -111.4834976196289,
      "logps/ref_rejected": -130.48089599609375,
      "logps/rejected": -135.11973571777344,
      "loss": 0.6903,
      "rewards/accuracies": 0.59375,
      "rewards/chosen": -0.04037024453282356,
      "rewards/margins": 0.0060182418674230576,
      "rewards/rejected": -0.04638848453760147,
      "step": 34
    },
    {
      "epoch": 0.4610951008645533,
      "grad_norm": 0.6634539365768433,
      "learning_rate": 3.2502115875008516e-07,
      "logits/chosen": 10.4329252243042,
      "logits/rejected": 11.376739501953125,
      "logps/chosen": -112.54086303710938,
      "logps/ref_chosen": -108.9183349609375,
      "logps/ref_rejected": -121.32493591308594,
      "logps/rejected": -125.75639343261719,
      "loss": 0.6893,
      "rewards/accuracies": 0.578125,
      "rewards/chosen": -0.03622515872120857,
      "rewards/margins": 0.008089457638561726,
      "rewards/rejected": -0.04431461915373802,
      "step": 35
    },
    {
      "epoch": 0.47426924660354053,
      "grad_norm": 0.6542326807975769,
      "learning_rate": 3.137606921404191e-07,
      "logits/chosen": 10.058025360107422,
      "logits/rejected": 10.712655067443848,
      "logps/chosen": -111.80799865722656,
      "logps/ref_chosen": -107.1411361694336,
      "logps/ref_rejected": -118.66165161132812,
      "logps/rejected": -123.4068832397461,
      "loss": 0.6929,
      "rewards/accuracies": 0.5078125,
      "rewards/chosen": -0.046668585389852524,
      "rewards/margins": 0.000783862778916955,
      "rewards/rejected": -0.047452446073293686,
      "step": 36
    },
    {
      "epoch": 0.4874433923425278,
      "grad_norm": 0.4339970350265503,
      "learning_rate": 3.0236006569153616e-07,
      "logits/chosen": 10.282718658447266,
      "logits/rejected": 11.08802604675293,
      "logps/chosen": -110.58891296386719,
      "logps/ref_chosen": -106.6348876953125,
      "logps/ref_rejected": -121.37834167480469,
      "logps/rejected": -125.47262573242188,
      "loss": 0.6926,
      "rewards/accuracies": 0.5703125,
      "rewards/chosen": -0.03954017534852028,
      "rewards/margins": 0.0014026057906448841,
      "rewards/rejected": -0.040942780673503876,
      "step": 37
    },
    {
      "epoch": 0.500617538081515,
      "grad_norm": 0.6856023073196411,
      "learning_rate": 2.9084434045463254e-07,
      "logits/chosen": 9.826108932495117,
      "logits/rejected": 10.918371200561523,
      "logps/chosen": -107.96115112304688,
      "logps/ref_chosen": -104.01033782958984,
      "logps/ref_rejected": -119.02666473388672,
      "logps/rejected": -123.3695068359375,
      "loss": 0.6913,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.039508119225502014,
      "rewards/margins": 0.003920318093150854,
      "rewards/rejected": -0.043428435921669006,
      "step": 38
    },
    {
      "epoch": 0.5137916838205022,
      "grad_norm": 0.5166105628013611,
      "learning_rate": 2.7923883049302066e-07,
      "logits/chosen": 10.282295227050781,
      "logits/rejected": 11.047395706176758,
      "logps/chosen": -113.61396026611328,
      "logps/ref_chosen": -109.76485443115234,
      "logps/ref_rejected": -122.25163269042969,
      "logps/rejected": -126.30699157714844,
      "loss": 0.6922,
      "rewards/accuracies": 0.5390625,
      "rewards/chosen": -0.038491036742925644,
      "rewards/margins": 0.002062497427687049,
      "rewards/rejected": -0.040553539991378784,
      "step": 39
    },
    {
      "epoch": 0.5269658295594895,
      "grad_norm": 0.7613899111747742,
      "learning_rate": 2.6756904723632324e-07,
      "logits/chosen": 10.10784912109375,
      "logits/rejected": 11.276141166687012,
      "logps/chosen": -111.32815551757812,
      "logps/ref_chosen": -107.18782806396484,
      "logps/ref_rejected": -124.24542236328125,
      "logps/rejected": -128.5751495361328,
      "loss": 0.6924,
      "rewards/accuracies": 0.515625,
      "rewards/chosen": -0.04140327125787735,
      "rewards/margins": 0.001893932232633233,
      "rewards/rejected": -0.04329720139503479,
      "step": 40
    },
    {
      "epoch": 0.5401399752984768,
      "grad_norm": 0.5243136882781982,
      "learning_rate": 2.5586064340081516e-07,
      "logits/chosen": 10.432905197143555,
      "logits/rejected": 11.059402465820312,
      "logps/chosen": -109.8786849975586,
      "logps/ref_chosen": -106.42051696777344,
      "logps/ref_rejected": -122.25247192382812,
      "logps/rejected": -126.91265106201172,
      "loss": 0.6873,
      "rewards/accuracies": 0.6171875,
      "rewards/chosen": -0.034581609070301056,
      "rewards/margins": 0.012020176276564598,
      "rewards/rejected": -0.0466017909348011,
      "step": 41
    },
    {
      "epoch": 0.553314121037464,
      "grad_norm": 0.6438937187194824,
      "learning_rate": 2.4413935659918487e-07,
      "logits/chosen": 9.422279357910156,
      "logits/rejected": 10.50160026550293,
      "logps/chosen": -106.31170654296875,
      "logps/ref_chosen": -103.1148452758789,
      "logps/ref_rejected": -116.55464935302734,
      "logps/rejected": -120.24292755126953,
      "loss": 0.6908,
      "rewards/accuracies": 0.5234375,
      "rewards/chosen": -0.03196856006979942,
      "rewards/margins": 0.004914162214845419,
      "rewards/rejected": -0.03688272088766098,
      "step": 42
    },
    {
      "epoch": 0.5664882667764513,
      "grad_norm": 0.5491278171539307,
      "learning_rate": 2.3243095276367684e-07,
      "logits/chosen": 9.590739250183105,
      "logits/rejected": 10.710611343383789,
      "logps/chosen": -107.79342651367188,
      "logps/ref_chosen": -104.21064758300781,
      "logps/ref_rejected": -118.7614974975586,
      "logps/rejected": -122.59417724609375,
      "loss": 0.6921,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.03582778573036194,
      "rewards/margins": 0.002499072812497616,
      "rewards/rejected": -0.03832685574889183,
      "step": 43
    },
    {
      "epoch": 0.5796624125154385,
      "grad_norm": 0.39058002829551697,
      "learning_rate": 2.2076116950697937e-07,
      "logits/chosen": 9.714144706726074,
      "logits/rejected": 10.525476455688477,
      "logps/chosen": -104.20186614990234,
      "logps/ref_chosen": -100.59449005126953,
      "logps/ref_rejected": -115.95166778564453,
      "logps/rejected": -120.36408233642578,
      "loss": 0.6893,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.03607375919818878,
      "rewards/margins": 0.008050307631492615,
      "rewards/rejected": -0.044124066829681396,
      "step": 44
    },
    {
      "epoch": 0.5928365582544257,
      "grad_norm": 0.49462494254112244,
      "learning_rate": 2.091556595453674e-07,
      "logits/chosen": 9.886950492858887,
      "logits/rejected": 10.70908260345459,
      "logps/chosen": -110.35658264160156,
      "logps/ref_chosen": -106.96060943603516,
      "logps/ref_rejected": -125.49449157714844,
      "logps/rejected": -129.1283416748047,
      "loss": 0.6921,
      "rewards/accuracies": 0.5390625,
      "rewards/chosen": -0.033959683030843735,
      "rewards/margins": 0.0023788013495504856,
      "rewards/rejected": -0.03633848577737808,
      "step": 45
    },
    {
      "epoch": 0.606010703993413,
      "grad_norm": 0.5809131860733032,
      "learning_rate": 1.9763993430846392e-07,
      "logits/chosen": 9.98457145690918,
      "logits/rejected": 10.643254280090332,
      "logps/chosen": -110.51849365234375,
      "logps/ref_chosen": -107.08544158935547,
      "logps/ref_rejected": -120.38542175292969,
      "logps/rejected": -124.85211944580078,
      "loss": 0.6881,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.034330543130636215,
      "rewards/margins": 0.010336533188819885,
      "rewards/rejected": -0.0446670763194561,
      "step": 46
    },
    {
      "epoch": 0.6191848497324002,
      "grad_norm": 0.6774134039878845,
      "learning_rate": 1.862393078595809e-07,
      "logits/chosen": 9.864917755126953,
      "logits/rejected": 11.103195190429688,
      "logps/chosen": -109.32252502441406,
      "logps/ref_chosen": -105.74787902832031,
      "logps/ref_rejected": -122.93606567382812,
      "logps/rejected": -126.75948333740234,
      "loss": 0.692,
      "rewards/accuracies": 0.5234375,
      "rewards/chosen": -0.03574639558792114,
      "rewards/margins": 0.0024877344258129597,
      "rewards/rejected": -0.038234129548072815,
      "step": 47
    },
    {
      "epoch": 0.6323589954713874,
      "grad_norm": 0.6952683925628662,
      "learning_rate": 1.7497884124991485e-07,
      "logits/chosen": 10.426509857177734,
      "logits/rejected": 11.333501815795898,
      "logps/chosen": -108.989013671875,
      "logps/ref_chosen": -105.3005599975586,
      "logps/ref_rejected": -123.93569946289062,
      "logps/rejected": -127.78026580810547,
      "loss": 0.6925,
      "rewards/accuracies": 0.5078125,
      "rewards/chosen": -0.036884430795907974,
      "rewards/margins": 0.001561243087053299,
      "rewards/rejected": -0.03844567388296127,
      "step": 48
    },
    {
      "epoch": 0.6455331412103746,
      "grad_norm": 0.5686694383621216,
      "learning_rate": 1.6388328742891678e-07,
      "logits/chosen": 10.41826057434082,
      "logits/rejected": 11.283549308776855,
      "logps/chosen": -107.89359283447266,
      "logps/ref_chosen": -104.30430603027344,
      "logps/ref_rejected": -115.85497283935547,
      "logps/rejected": -119.6902847290039,
      "loss": 0.692,
      "rewards/accuracies": 0.4921875,
      "rewards/chosen": -0.035892829298973083,
      "rewards/margins": 0.0024602406192570925,
      "rewards/rejected": -0.038353074342012405,
      "step": 49
    },
    {
      "epoch": 0.6587072869493619,
      "grad_norm": 0.357233464717865,
      "learning_rate": 1.5297703683193753e-07,
      "logits/chosen": 10.0699462890625,
      "logits/rejected": 10.928385734558105,
      "logps/chosen": -108.0082015991211,
      "logps/ref_chosen": -104.65946960449219,
      "logps/ref_rejected": -118.84170532226562,
      "logps/rejected": -123.09530639648438,
      "loss": 0.6888,
      "rewards/accuracies": 0.6328125,
      "rewards/chosen": -0.03348737210035324,
      "rewards/margins": 0.009048594161868095,
      "rewards/rejected": -0.04253596439957619,
      "step": 50
    },
    {
      "epoch": 0.6718814326883491,
      "grad_norm": 0.5144482851028442,
      "learning_rate": 1.422840637647574e-07,
      "logits/chosen": 10.165593147277832,
      "logits/rejected": 10.671769142150879,
      "logps/chosen": -107.86333465576172,
      "logps/ref_chosen": -104.4243392944336,
      "logps/ref_rejected": -117.16233825683594,
      "logps/rejected": -120.98663330078125,
      "loss": 0.6913,
      "rewards/accuracies": 0.546875,
      "rewards/chosen": -0.034389954060316086,
      "rewards/margins": 0.0038530200254172087,
      "rewards/rejected": -0.038242973387241364,
      "step": 51
    },
    {
      "epoch": 0.6850555784273363,
      "grad_norm": 0.3699370324611664,
      "learning_rate": 1.3182787370285865e-07,
      "logits/chosen": 9.598160743713379,
      "logits/rejected": 10.737981796264648,
      "logps/chosen": -105.96324157714844,
      "logps/ref_chosen": -101.99165344238281,
      "logps/ref_rejected": -123.20516204833984,
      "logps/rejected": -127.92692565917969,
      "loss": 0.6896,
      "rewards/accuracies": 0.6484375,
      "rewards/chosen": -0.03971587121486664,
      "rewards/margins": 0.0075019346550107,
      "rewards/rejected": -0.04721780866384506,
      "step": 52
    },
    {
      "epoch": 0.6982297241663236,
      "grad_norm": 0.8531109094619751,
      "learning_rate": 1.2163145162128946e-07,
      "logits/chosen": 10.053773880004883,
      "logits/rejected": 10.923731803894043,
      "logps/chosen": -111.67724609375,
      "logps/ref_chosen": -108.26175689697266,
      "logps/ref_rejected": -118.12374114990234,
      "logps/rejected": -122.3751449584961,
      "loss": 0.6891,
      "rewards/accuracies": 0.578125,
      "rewards/chosen": -0.03415490314364433,
      "rewards/margins": 0.008359100669622421,
      "rewards/rejected": -0.042514000087976456,
      "step": 53
    },
    {
      "epoch": 0.7114038699053108,
      "grad_norm": 0.5698924660682678,
      "learning_rate": 1.1171721146870014e-07,
      "logits/chosen": 10.148171424865723,
      "logits/rejected": 11.11772632598877,
      "logps/chosen": -112.66647338867188,
      "logps/ref_chosen": -108.5864028930664,
      "logps/ref_rejected": -130.25155639648438,
      "logps/rejected": -135.24679565429688,
      "loss": 0.6888,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.04080072045326233,
      "rewards/margins": 0.009151678532361984,
      "rewards/rejected": -0.049952395260334015,
      "step": 54
    },
    {
      "epoch": 0.724578015644298,
      "grad_norm": 0.3433741629123688,
      "learning_rate": 1.0210694689661939e-07,
      "logits/chosen": 10.15285873413086,
      "logits/rejected": 10.966636657714844,
      "logps/chosen": -109.76239013671875,
      "logps/ref_chosen": -105.69741821289062,
      "logps/ref_rejected": -122.07044219970703,
      "logps/rejected": -126.95681762695312,
      "loss": 0.6892,
      "rewards/accuracies": 0.578125,
      "rewards/chosen": -0.04064972698688507,
      "rewards/margins": 0.008214111439883709,
      "rewards/rejected": -0.048863835632801056,
      "step": 55
    },
    {
      "epoch": 0.7377521613832853,
      "grad_norm": 0.5420451164245605,
      "learning_rate": 9.282178335227883e-08,
      "logits/chosen": 9.80726432800293,
      "logits/rejected": 10.977776527404785,
      "logps/chosen": -110.36256408691406,
      "logps/ref_chosen": -106.5007095336914,
      "logps/ref_rejected": -123.01736450195312,
      "logps/rejected": -127.21353912353516,
      "loss": 0.6916,
      "rewards/accuracies": 0.5390625,
      "rewards/chosen": -0.03861851990222931,
      "rewards/margins": 0.003343143267557025,
      "rewards/rejected": -0.0419616661965847,
      "step": 56
    },
    {
      "epoch": 0.7509263071222725,
      "grad_norm": 0.4269044101238251,
      "learning_rate": 8.388213164029459e-08,
      "logits/chosen": 10.584318161010742,
      "logits/rejected": 11.365358352661133,
      "logps/chosen": -113.1204833984375,
      "logps/ref_chosen": -109.18460083007812,
      "logps/ref_rejected": -124.3697280883789,
      "logps/rejected": -128.9630584716797,
      "loss": 0.6901,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.03935876488685608,
      "rewards/margins": 0.006574348546564579,
      "rewards/rejected": -0.045933112502098083,
      "step": 57
    },
    {
      "epoch": 0.7641004528612598,
      "grad_norm": 0.7017854452133179,
      "learning_rate": 7.530764305528958e-08,
      "logits/chosen": 9.92167854309082,
      "logits/rejected": 10.487292289733887,
      "logps/chosen": -108.01974487304688,
      "logps/ref_chosen": -104.43944549560547,
      "logps/ref_rejected": -118.44985961914062,
      "logps/rejected": -123.08047485351562,
      "loss": 0.688,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.03580301254987717,
      "rewards/margins": 0.010503105819225311,
      "rewards/rejected": -0.04630611836910248,
      "step": 58
    },
    {
      "epoch": 0.7772745986002471,
      "grad_norm": 0.47289225459098816,
      "learning_rate": 6.711716618408281e-08,
      "logits/chosen": 10.168304443359375,
      "logits/rejected": 11.070626258850098,
      "logps/chosen": -108.20891571044922,
      "logps/ref_chosen": -103.32658386230469,
      "logps/ref_rejected": -121.63726806640625,
      "logps/rejected": -126.62254333496094,
      "loss": 0.6928,
      "rewards/accuracies": 0.5703125,
      "rewards/chosen": -0.04882337152957916,
      "rewards/margins": 0.001029324484989047,
      "rewards/rejected": -0.04985269159078598,
      "step": 59
    },
    {
      "epoch": 0.7904487443392343,
      "grad_norm": 0.5885170102119446,
      "learning_rate": 5.932870547240454e-08,
      "logits/chosen": 9.90310001373291,
      "logits/rejected": 10.99573802947998,
      "logps/chosen": -106.88694763183594,
      "logps/ref_chosen": -102.98921966552734,
      "logps/ref_rejected": -124.47185516357422,
      "logps/rejected": -129.24810791015625,
      "loss": 0.689,
      "rewards/accuracies": 0.6171875,
      "rewards/chosen": -0.03897739574313164,
      "rewards/margins": 0.008785145357251167,
      "rewards/rejected": -0.047762542963027954,
      "step": 60
    },
    {
      "epoch": 0.8036228900782215,
      "grad_norm": 0.8734719157218933,
      "learning_rate": 5.1959381647217665e-08,
      "logits/chosen": 9.963869094848633,
      "logits/rejected": 10.929679870605469,
      "logps/chosen": -110.50978088378906,
      "logps/ref_chosen": -106.28311157226562,
      "logps/ref_rejected": -121.47750854492188,
      "logps/rejected": -126.24954986572266,
      "loss": 0.6906,
      "rewards/accuracies": 0.5546875,
      "rewards/chosen": -0.04226662591099739,
      "rewards/margins": 0.0054537225514650345,
      "rewards/rejected": -0.047720346599817276,
      "step": 61
    },
    {
      "epoch": 0.8167970358172087,
      "grad_norm": 0.5499524474143982,
      "learning_rate": 4.502539408164385e-08,
      "logits/chosen": 10.091617584228516,
      "logits/rejected": 10.90080451965332,
      "logps/chosen": -113.84786987304688,
      "logps/ref_chosen": -109.67979431152344,
      "logps/ref_rejected": -120.36711120605469,
      "logps/rejected": -124.17977905273438,
      "loss": 0.6951,
      "rewards/accuracies": 0.46875,
      "rewards/chosen": -0.041680797934532166,
      "rewards/margins": -0.003554222173988819,
      "rewards/rejected": -0.03812657669186592,
      "step": 62
    },
    {
      "epoch": 0.829971181556196,
      "grad_norm": 0.542752206325531,
      "learning_rate": 3.854198518522564e-08,
      "logits/chosen": 10.232728004455566,
      "logits/rejected": 11.329594612121582,
      "logps/chosen": -110.89617156982422,
      "logps/ref_chosen": -106.88896942138672,
      "logps/ref_rejected": -122.57796478271484,
      "logps/rejected": -127.40741729736328,
      "loss": 0.6893,
      "rewards/accuracies": 0.59375,
      "rewards/chosen": -0.04007202014327049,
      "rewards/margins": 0.008222556672990322,
      "rewards/rejected": -0.04829457774758339,
      "step": 63
    },
    {
      "epoch": 0.8431453272951832,
      "grad_norm": 0.38359031081199646,
      "learning_rate": 3.2523406897802444e-08,
      "logits/chosen": 10.233137130737305,
      "logits/rejected": 10.861595153808594,
      "logps/chosen": -113.31096649169922,
      "logps/ref_chosen": -109.29510498046875,
      "logps/ref_rejected": -121.51821899414062,
      "logps/rejected": -126.56211853027344,
      "loss": 0.6882,
      "rewards/accuracies": 0.59375,
      "rewards/chosen": -0.04015868529677391,
      "rewards/margins": 0.010280282236635685,
      "rewards/rejected": -0.05043896287679672,
      "step": 64
    },
    {
      "epoch": 0.8563194730341704,
      "grad_norm": 0.552734911441803,
      "learning_rate": 2.6982889360653376e-08,
      "logits/chosen": 9.759366035461426,
      "logits/rejected": 11.070873260498047,
      "logps/chosen": -114.16085815429688,
      "logps/ref_chosen": -110.15232849121094,
      "logps/ref_rejected": -127.54279327392578,
      "logps/rejected": -131.6808319091797,
      "loss": 0.6927,
      "rewards/accuracies": 0.53125,
      "rewards/chosen": -0.04008534550666809,
      "rewards/margins": 0.0012950068339705467,
      "rewards/rejected": -0.041380349546670914,
      "step": 65
    },
    {
      "epoch": 0.8694936187731577,
      "grad_norm": 0.7490947246551514,
      "learning_rate": 2.1932611833775843e-08,
      "logits/chosen": 9.782209396362305,
      "logits/rejected": 10.816038131713867,
      "logps/chosen": -108.0797119140625,
      "logps/ref_chosen": -104.0207748413086,
      "logps/ref_rejected": -126.93211364746094,
      "logps/rejected": -131.68116760253906,
      "loss": 0.6899,
      "rewards/accuracies": 0.546875,
      "rewards/chosen": -0.04058944806456566,
      "rewards/margins": 0.006901042070239782,
      "rewards/rejected": -0.04749048873782158,
      "step": 66
    },
    {
      "epoch": 0.8826677645121449,
      "grad_norm": 0.9180015921592712,
      "learning_rate": 1.738367592322837e-08,
      "logits/chosen": 10.090204238891602,
      "logits/rejected": 11.038689613342285,
      "logps/chosen": -108.19171142578125,
      "logps/ref_chosen": -104.55751037597656,
      "logps/ref_rejected": -119.71514892578125,
      "logps/rejected": -123.98207092285156,
      "loss": 0.6901,
      "rewards/accuracies": 0.5546875,
      "rewards/chosen": -0.03634200245141983,
      "rewards/margins": 0.006327103357762098,
      "rewards/rejected": -0.042669106274843216,
      "step": 67
    },
    {
      "epoch": 0.8958419102511321,
      "grad_norm": 0.5030494928359985,
      "learning_rate": 1.3346081177391472e-08,
      "logits/chosen": 10.458399772644043,
      "logits/rejected": 10.807394027709961,
      "logps/chosen": -110.72022247314453,
      "logps/ref_chosen": -107.26033020019531,
      "logps/ref_rejected": -115.8590087890625,
      "logps/rejected": -119.74439239501953,
      "loss": 0.6912,
      "rewards/accuracies": 0.5390625,
      "rewards/chosen": -0.03459898754954338,
      "rewards/margins": 0.004254964645951986,
      "rewards/rejected": -0.038853950798511505,
      "step": 68
    },
    {
      "epoch": 0.9090160559901194,
      "grad_norm": 0.4641689658164978,
      "learning_rate": 9.828703105789981e-09,
      "logits/chosen": 10.361639022827148,
      "logits/rejected": 11.253664016723633,
      "logps/chosen": -110.26504516601562,
      "logps/ref_chosen": -106.8610610961914,
      "logps/ref_rejected": -122.44428253173828,
      "logps/rejected": -126.94345092773438,
      "loss": 0.6879,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.03403985872864723,
      "rewards/margins": 0.010951781645417213,
      "rewards/rejected": -0.044991642236709595,
      "step": 69
    },
    {
      "epoch": 0.9221902017291066,
      "grad_norm": 0.7975694537162781,
      "learning_rate": 6.839273668796747e-09,
      "logits/chosen": 10.03953742980957,
      "logits/rejected": 10.584858894348145,
      "logps/chosen": -110.47731018066406,
      "logps/ref_chosen": -106.58778381347656,
      "logps/ref_rejected": -124.77790832519531,
      "logps/rejected": -129.56314086914062,
      "loss": 0.6889,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.038895271718502045,
      "rewards/margins": 0.008957008831202984,
      "rewards/rejected": -0.047852277755737305,
      "step": 70
    },
    {
      "epoch": 0.9353643474680938,
      "grad_norm": 0.47530707716941833,
      "learning_rate": 4.384364281105973e-09,
      "logits/chosen": 10.095258712768555,
      "logits/rejected": 11.301887512207031,
      "logps/chosen": -108.50511169433594,
      "logps/ref_chosen": -104.39148712158203,
      "logps/ref_rejected": -120.59461212158203,
      "logps/rejected": -125.61332702636719,
      "loss": 0.6888,
      "rewards/accuracies": 0.6015625,
      "rewards/chosen": -0.041136160492897034,
      "rewards/margins": 0.00905083678662777,
      "rewards/rejected": -0.05018700286746025,
      "step": 71
    },
    {
      "epoch": 0.9485384932070811,
      "grad_norm": 0.5963804125785828,
      "learning_rate": 2.469371366337264e-09,
      "logits/chosen": 10.218768119812012,
      "logits/rejected": 11.212346076965332,
      "logps/chosen": -112.40518188476562,
      "logps/ref_chosen": -108.53898620605469,
      "logps/ref_rejected": -123.26167297363281,
      "logps/rejected": -128.02452087402344,
      "loss": 0.6888,
      "rewards/accuracies": 0.5859375,
      "rewards/chosen": -0.03866204991936684,
      "rewards/margins": 0.008966410532593727,
      "rewards/rejected": -0.047628454864025116,
      "step": 72
    },
    {
      "epoch": 0.9617126389460683,
      "grad_norm": 0.484179824590683,
      "learning_rate": 1.0985044945254762e-09,
      "logits/chosen": 9.987685203552246,
      "logits/rejected": 10.82412338256836,
      "logps/chosen": -113.74256134033203,
      "logps/ref_chosen": -110.50595092773438,
      "logps/ref_rejected": -124.92510223388672,
      "logps/rejected": -129.0068359375,
      "loss": 0.6891,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -0.03236612305045128,
      "rewards/margins": 0.008451285772025585,
      "rewards/rejected": -0.04081741347908974,
      "step": 73
    },
    {
      "epoch": 0.9748867846850556,
      "grad_norm": 0.6778624653816223,
      "learning_rate": 2.7477712857215675e-10,
      "logits/chosen": 10.2771577835083,
      "logits/rejected": 11.107219696044922,
      "logps/chosen": -110.37413787841797,
      "logps/ref_chosen": -107.1200942993164,
      "logps/ref_rejected": -120.22421264648438,
      "logps/rejected": -123.97468566894531,
      "loss": 0.6908,
      "rewards/accuracies": 0.546875,
      "rewards/chosen": -0.032540448009967804,
      "rewards/margins": 0.004964269232004881,
      "rewards/rejected": -0.03750471770763397,
      "step": 74
    },
    {
      "epoch": 0.9880609304240429,
      "grad_norm": 0.620089590549469,
      "learning_rate": 0.0,
      "logits/chosen": 10.067145347595215,
      "logits/rejected": 10.837061882019043,
      "logps/chosen": -111.20693969726562,
      "logps/ref_chosen": -107.40764617919922,
      "logps/ref_rejected": -120.6578369140625,
      "logps/rejected": -125.20486450195312,
      "loss": 0.6896,
      "rewards/accuracies": 0.578125,
      "rewards/chosen": -0.037992849946022034,
      "rewards/margins": 0.007477432489395142,
      "rewards/rejected": -0.045470282435417175,
      "step": 75
    },
    {
      "epoch": 0.9880609304240429,
      "step": 75,
      "total_flos": 0.0,
      "train_loss": 0.6916100986798605,
      "train_runtime": 8570.6786,
      "train_samples_per_second": 1.134,
      "train_steps_per_second": 0.009
    }
  ],
  "logging_steps": 1,
  "max_steps": 75,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 12,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}