|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9880609304240429, |
|
"eval_steps": 500, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.013174145738987238, |
|
"grad_norm": 0.39343583583831787, |
|
"learning_rate": 6.25e-08, |
|
"logits/chosen": 10.071717262268066, |
|
"logits/rejected": 10.610974311828613, |
|
"logps/chosen": -121.14067077636719, |
|
"logps/ref_chosen": -121.14067077636719, |
|
"logps/ref_rejected": -137.65684509277344, |
|
"logps/rejected": -137.65684509277344, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.026348291477974475, |
|
"grad_norm": 0.7521647214889526, |
|
"learning_rate": 1.25e-07, |
|
"logits/chosen": 10.222262382507324, |
|
"logits/rejected": 10.754176139831543, |
|
"logps/chosen": -116.48068237304688, |
|
"logps/ref_chosen": -116.48068237304688, |
|
"logps/ref_rejected": -130.27796936035156, |
|
"logps/rejected": -130.27796936035156, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03952243721696171, |
|
"grad_norm": 0.5880491137504578, |
|
"learning_rate": 1.875e-07, |
|
"logits/chosen": 10.01984977722168, |
|
"logits/rejected": 10.62405776977539, |
|
"logps/chosen": -123.36822509765625, |
|
"logps/ref_chosen": -122.6683349609375, |
|
"logps/ref_rejected": -132.69850158691406, |
|
"logps/rejected": -133.207275390625, |
|
"loss": 0.6941, |
|
"rewards/accuracies": 0.4453125, |
|
"rewards/chosen": -0.006998830940574408, |
|
"rewards/margins": -0.0019110905705019832, |
|
"rewards/rejected": -0.005087739787995815, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05269658295594895, |
|
"grad_norm": 0.4605408310890198, |
|
"learning_rate": 2.5e-07, |
|
"logits/chosen": 9.99990463256836, |
|
"logits/rejected": 10.736846923828125, |
|
"logps/chosen": -123.02133178710938, |
|
"logps/ref_chosen": -122.59739685058594, |
|
"logps/ref_rejected": -129.70767211914062, |
|
"logps/rejected": -129.98374938964844, |
|
"loss": 0.6939, |
|
"rewards/accuracies": 0.4765625, |
|
"rewards/chosen": -0.004239337984472513, |
|
"rewards/margins": -0.0014785109087824821, |
|
"rewards/rejected": -0.0027608266100287437, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06587072869493618, |
|
"grad_norm": 0.469856321811676, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": 10.075482368469238, |
|
"logits/rejected": 10.892666816711426, |
|
"logps/chosen": -117.554931640625, |
|
"logps/ref_chosen": -117.5941162109375, |
|
"logps/ref_rejected": -132.1708984375, |
|
"logps/rejected": -132.1227264404297, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.546875, |
|
"rewards/chosen": 0.0003917121794074774, |
|
"rewards/margins": -9.010493522509933e-05, |
|
"rewards/rejected": 0.0004818170564249158, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07904487443392343, |
|
"grad_norm": 0.5541566610336304, |
|
"learning_rate": 3.75e-07, |
|
"logits/chosen": 10.660999298095703, |
|
"logits/rejected": 11.290507316589355, |
|
"logps/chosen": -127.00320434570312, |
|
"logps/ref_chosen": -126.12411499023438, |
|
"logps/ref_rejected": -136.9976043701172, |
|
"logps/rejected": -137.75950622558594, |
|
"loss": 0.6938, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": -0.00879070907831192, |
|
"rewards/margins": -0.0011717069428414106, |
|
"rewards/rejected": -0.007619001902639866, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09221902017291066, |
|
"grad_norm": 0.4422788619995117, |
|
"learning_rate": 4.375e-07, |
|
"logits/chosen": 9.958097457885742, |
|
"logits/rejected": 10.642163276672363, |
|
"logps/chosen": -115.61244201660156, |
|
"logps/ref_chosen": -115.08863830566406, |
|
"logps/ref_rejected": -125.91255187988281, |
|
"logps/rejected": -126.15577697753906, |
|
"loss": 0.6946, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.005238103214651346, |
|
"rewards/margins": -0.0028058765456080437, |
|
"rewards/rejected": -0.002432226436212659, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.1053931659118979, |
|
"grad_norm": 0.5495327115058899, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": 10.345503807067871, |
|
"logits/rejected": 10.97708797454834, |
|
"logps/chosen": -121.48406982421875, |
|
"logps/ref_chosen": -121.4114761352539, |
|
"logps/ref_rejected": -134.62770080566406, |
|
"logps/rejected": -134.7586212158203, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.5078125, |
|
"rewards/chosen": -0.0007259202538989484, |
|
"rewards/margins": 0.0005832896567881107, |
|
"rewards/rejected": -0.0013092098524793983, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.11856731165088513, |
|
"grad_norm": 0.43922048807144165, |
|
"learning_rate": 4.997252228714278e-07, |
|
"logits/chosen": 10.159126281738281, |
|
"logits/rejected": 11.002123832702637, |
|
"logps/chosen": -122.51399230957031, |
|
"logps/ref_chosen": -121.59207153320312, |
|
"logps/ref_rejected": -134.70025634765625, |
|
"logps/rejected": -135.55740356445312, |
|
"loss": 0.6935, |
|
"rewards/accuracies": 0.4453125, |
|
"rewards/chosen": -0.00921926274895668, |
|
"rewards/margins": -0.0006479143630713224, |
|
"rewards/rejected": -0.008571348153054714, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.13174145738987236, |
|
"grad_norm": 0.6643485426902771, |
|
"learning_rate": 4.989014955054745e-07, |
|
"logits/chosen": 9.9464111328125, |
|
"logits/rejected": 10.739057540893555, |
|
"logps/chosen": -117.91310119628906, |
|
"logps/ref_chosen": -117.16349029541016, |
|
"logps/ref_rejected": -129.98167419433594, |
|
"logps/rejected": -130.65924072265625, |
|
"loss": 0.6935, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.007496046833693981, |
|
"rewards/margins": -0.0007203805143944919, |
|
"rewards/rejected": -0.00677566509693861, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14491560312885962, |
|
"grad_norm": 0.4432956576347351, |
|
"learning_rate": 4.975306286336627e-07, |
|
"logits/chosen": 10.095781326293945, |
|
"logits/rejected": 11.001167297363281, |
|
"logps/chosen": -123.26506042480469, |
|
"logps/ref_chosen": -121.95927429199219, |
|
"logps/ref_rejected": -136.18655395507812, |
|
"logps/rejected": -137.39865112304688, |
|
"loss": 0.6936, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": -0.01305788941681385, |
|
"rewards/margins": -0.0009369202307425439, |
|
"rewards/rejected": -0.012120969593524933, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.15808974886784685, |
|
"grad_norm": 0.3955974280834198, |
|
"learning_rate": 4.956156357188939e-07, |
|
"logits/chosen": 10.129995346069336, |
|
"logits/rejected": 10.673677444458008, |
|
"logps/chosen": -126.05357360839844, |
|
"logps/ref_chosen": -124.12315368652344, |
|
"logps/ref_rejected": -134.275390625, |
|
"logps/rejected": -136.24940490722656, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.5234375, |
|
"rewards/chosen": -0.01930420845746994, |
|
"rewards/margins": 0.00043603626545518637, |
|
"rewards/rejected": -0.01974024437367916, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.17126389460683408, |
|
"grad_norm": 0.4873650074005127, |
|
"learning_rate": 4.931607263312032e-07, |
|
"logits/chosen": 9.8243989944458, |
|
"logits/rejected": 10.843228340148926, |
|
"logps/chosen": -119.30509185791016, |
|
"logps/ref_chosen": -116.83765411376953, |
|
"logps/ref_rejected": -130.78997802734375, |
|
"logps/rejected": -133.15672302246094, |
|
"loss": 0.6937, |
|
"rewards/accuracies": 0.4453125, |
|
"rewards/chosen": -0.024674497544765472, |
|
"rewards/margins": -0.0010069820564240217, |
|
"rewards/rejected": -0.02366751804947853, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.1844380403458213, |
|
"grad_norm": 0.8769639134407043, |
|
"learning_rate": 4.9017129689421e-07, |
|
"logits/chosen": 10.427848815917969, |
|
"logits/rejected": 11.37716293334961, |
|
"logps/chosen": -120.9067611694336, |
|
"logps/ref_chosen": -118.43791961669922, |
|
"logps/ref_rejected": -132.5309600830078, |
|
"logps/rejected": -134.91236877441406, |
|
"loss": 0.6936, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": -0.02468838170170784, |
|
"rewards/margins": -0.0008744060760363936, |
|
"rewards/rejected": -0.023813974112272263, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.19761218608480857, |
|
"grad_norm": 0.8736073970794678, |
|
"learning_rate": 4.866539188226085e-07, |
|
"logits/chosen": 9.956391334533691, |
|
"logits/rejected": 10.684510231018066, |
|
"logps/chosen": -128.198486328125, |
|
"logps/ref_chosen": -124.82101440429688, |
|
"logps/ref_rejected": -135.32565307617188, |
|
"logps/rejected": -138.8860626220703, |
|
"loss": 0.6923, |
|
"rewards/accuracies": 0.5234375, |
|
"rewards/chosen": -0.033774565905332565, |
|
"rewards/margins": 0.0018295131158083677, |
|
"rewards/rejected": -0.0356040820479393, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.2107863318237958, |
|
"grad_norm": 0.7293412089347839, |
|
"learning_rate": 4.826163240767716e-07, |
|
"logits/chosen": 10.677412033081055, |
|
"logits/rejected": 11.254134178161621, |
|
"logps/chosen": -122.79344940185547, |
|
"logps/ref_chosen": -119.69990539550781, |
|
"logps/ref_rejected": -130.34449768066406, |
|
"logps/rejected": -133.39932250976562, |
|
"loss": 0.6934, |
|
"rewards/accuracies": 0.484375, |
|
"rewards/chosen": -0.030935294926166534, |
|
"rewards/margins": -0.0003869622596539557, |
|
"rewards/rejected": -0.030548332259058952, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.22396047756278303, |
|
"grad_norm": 0.6406402587890625, |
|
"learning_rate": 4.780673881662242e-07, |
|
"logits/chosen": 10.172043800354004, |
|
"logits/rejected": 10.859190940856934, |
|
"logps/chosen": -118.11813354492188, |
|
"logps/ref_chosen": -114.52975463867188, |
|
"logps/ref_rejected": -129.5846405029297, |
|
"logps/rejected": -133.20779418945312, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.5078125, |
|
"rewards/chosen": -0.035883828997612, |
|
"rewards/margins": 0.00034771906211972237, |
|
"rewards/rejected": -0.036231543868780136, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.23713462330177026, |
|
"grad_norm": 0.7559142708778381, |
|
"learning_rate": 4.730171106393466e-07, |
|
"logits/chosen": 10.747884750366211, |
|
"logits/rejected": 11.173224449157715, |
|
"logps/chosen": -121.89351654052734, |
|
"logps/ref_chosen": -117.67997741699219, |
|
"logps/ref_rejected": -128.00650024414062, |
|
"logps/rejected": -132.31655883789062, |
|
"loss": 0.6928, |
|
"rewards/accuracies": 0.546875, |
|
"rewards/chosen": -0.042135339230298996, |
|
"rewards/margins": 0.0009651560103520751, |
|
"rewards/rejected": -0.043100498616695404, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.2503087690407575, |
|
"grad_norm": 0.49808746576309204, |
|
"learning_rate": 4.6747659310219757e-07, |
|
"logits/chosen": 10.380050659179688, |
|
"logits/rejected": 10.91146469116211, |
|
"logps/chosen": -125.04395294189453, |
|
"logps/ref_chosen": -120.92308044433594, |
|
"logps/ref_rejected": -133.8301544189453, |
|
"logps/rejected": -138.79052734375, |
|
"loss": 0.689, |
|
"rewards/accuracies": 0.6328125, |
|
"rewards/chosen": -0.0412086620926857, |
|
"rewards/margins": 0.008395083248615265, |
|
"rewards/rejected": -0.04960374906659126, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.2634829147797447, |
|
"grad_norm": 0.5601547956466675, |
|
"learning_rate": 4.6145801481477433e-07, |
|
"logits/chosen": 10.563058853149414, |
|
"logits/rejected": 11.37236499786377, |
|
"logps/chosen": -126.93672180175781, |
|
"logps/ref_chosen": -121.84554290771484, |
|
"logps/ref_rejected": -133.343017578125, |
|
"logps/rejected": -138.12808227539062, |
|
"loss": 0.6948, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.050911907106637955, |
|
"rewards/margins": -0.0030612414702773094, |
|
"rewards/rejected": -0.04785066470503807, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.276657060518732, |
|
"grad_norm": 0.41351014375686646, |
|
"learning_rate": 4.549746059183561e-07, |
|
"logits/chosen": 9.687071800231934, |
|
"logits/rejected": 10.572582244873047, |
|
"logps/chosen": -124.77998352050781, |
|
"logps/ref_chosen": -119.20828247070312, |
|
"logps/ref_rejected": -134.38436889648438, |
|
"logps/rejected": -140.02293395996094, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.515625, |
|
"rewards/chosen": -0.055717017501592636, |
|
"rewards/margins": 0.0006685962434858084, |
|
"rewards/rejected": -0.056385621428489685, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.28983120625771924, |
|
"grad_norm": 0.6027271747589111, |
|
"learning_rate": 4.480406183527823e-07, |
|
"logits/chosen": 10.03482437133789, |
|
"logits/rejected": 10.820859909057617, |
|
"logps/chosen": -121.17003631591797, |
|
"logps/ref_chosen": -114.62059020996094, |
|
"logps/ref_rejected": -128.0896759033203, |
|
"logps/rejected": -134.1069793701172, |
|
"loss": 0.6959, |
|
"rewards/accuracies": 0.484375, |
|
"rewards/chosen": -0.06549445539712906, |
|
"rewards/margins": -0.005321440752595663, |
|
"rewards/rejected": -0.06017300859093666, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.3030053519967065, |
|
"grad_norm": 0.5877695083618164, |
|
"learning_rate": 4.4067129452759546e-07, |
|
"logits/chosen": 10.00536060333252, |
|
"logits/rejected": 10.877461433410645, |
|
"logps/chosen": -124.2905044555664, |
|
"logps/ref_chosen": -117.84042358398438, |
|
"logps/ref_rejected": -131.79171752929688, |
|
"logps/rejected": -137.91160583496094, |
|
"loss": 0.6949, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": -0.06450086086988449, |
|
"rewards/margins": -0.0033018956892192364, |
|
"rewards/rejected": -0.061198972165584564, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.3161794977356937, |
|
"grad_norm": 0.5177574157714844, |
|
"learning_rate": 4.3288283381591725e-07, |
|
"logits/chosen": 10.104101181030273, |
|
"logits/rejected": 10.70304012298584, |
|
"logps/chosen": -129.96224975585938, |
|
"logps/ref_chosen": -123.75523376464844, |
|
"logps/ref_rejected": -138.6237030029297, |
|
"logps/rejected": -144.98983764648438, |
|
"loss": 0.6925, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": -0.062070075422525406, |
|
"rewards/margins": 0.0015914504183456302, |
|
"rewards/rejected": -0.06366152316331863, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.32935364347468093, |
|
"grad_norm": 0.57741379737854, |
|
"learning_rate": 4.246923569447104e-07, |
|
"logits/chosen": 10.097904205322266, |
|
"logits/rejected": 10.884883880615234, |
|
"logps/chosen": -130.04393005371094, |
|
"logps/ref_chosen": -122.53610229492188, |
|
"logps/ref_rejected": -141.00828552246094, |
|
"logps/rejected": -148.18405151367188, |
|
"loss": 0.695, |
|
"rewards/accuracies": 0.4609375, |
|
"rewards/chosen": -0.07507827132940292, |
|
"rewards/margins": -0.003320657880976796, |
|
"rewards/rejected": -0.07175761461257935, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.34252778921366817, |
|
"grad_norm": 0.5819231867790222, |
|
"learning_rate": 4.161178683597054e-07, |
|
"logits/chosen": 10.32345199584961, |
|
"logits/rejected": 11.267877578735352, |
|
"logps/chosen": -132.95077514648438, |
|
"logps/ref_chosen": -124.1744384765625, |
|
"logps/ref_rejected": -139.68605041503906, |
|
"logps/rejected": -148.0929718017578, |
|
"loss": 0.6952, |
|
"rewards/accuracies": 0.484375, |
|
"rewards/chosen": -0.08776339888572693, |
|
"rewards/margins": -0.0036940835416316986, |
|
"rewards/rejected": -0.08406931161880493, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.3557019349526554, |
|
"grad_norm": 0.6049931645393372, |
|
"learning_rate": 4.0717821664772124e-07, |
|
"logits/chosen": 10.15705394744873, |
|
"logits/rejected": 11.114046096801758, |
|
"logps/chosen": -125.23123931884766, |
|
"logps/ref_chosen": -117.1941146850586, |
|
"logps/ref_rejected": -129.4031982421875, |
|
"logps/rejected": -137.76333618164062, |
|
"loss": 0.6918, |
|
"rewards/accuracies": 0.5390625, |
|
"rewards/chosen": -0.08037131279706955, |
|
"rewards/margins": 0.0032299975864589214, |
|
"rewards/rejected": -0.08360131084918976, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.3688760806916426, |
|
"grad_norm": 0.5475514531135559, |
|
"learning_rate": 3.978930531033806e-07, |
|
"logits/chosen": 9.436954498291016, |
|
"logits/rejected": 10.49735164642334, |
|
"logps/chosen": -125.24280548095703, |
|
"logps/ref_chosen": -117.17620086669922, |
|
"logps/ref_rejected": -130.76107788085938, |
|
"logps/rejected": -138.6796875, |
|
"loss": 0.6941, |
|
"rewards/accuracies": 0.5078125, |
|
"rewards/chosen": -0.08066616207361221, |
|
"rewards/margins": -0.001479951897636056, |
|
"rewards/rejected": -0.07918620854616165, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.3820502264306299, |
|
"grad_norm": 0.599656343460083, |
|
"learning_rate": 3.882827885312998e-07, |
|
"logits/chosen": 10.068204879760742, |
|
"logits/rejected": 10.938701629638672, |
|
"logps/chosen": -130.98472595214844, |
|
"logps/ref_chosen": -123.2500228881836, |
|
"logps/ref_rejected": -133.91970825195312, |
|
"logps/rejected": -141.19216918945312, |
|
"loss": 0.6957, |
|
"rewards/accuracies": 0.453125, |
|
"rewards/chosen": -0.07734709233045578, |
|
"rewards/margins": -0.004622358828783035, |
|
"rewards/rejected": -0.07272473722696304, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.39522437216961714, |
|
"grad_norm": 0.4804610311985016, |
|
"learning_rate": 3.7836854837871044e-07, |
|
"logits/chosen": 10.131808280944824, |
|
"logits/rejected": 11.42764663696289, |
|
"logps/chosen": -127.49568176269531, |
|
"logps/ref_chosen": -118.52604675292969, |
|
"logps/ref_rejected": -139.1146240234375, |
|
"logps/rejected": -147.99575805664062, |
|
"loss": 0.6938, |
|
"rewards/accuracies": 0.5390625, |
|
"rewards/chosen": -0.08969634026288986, |
|
"rewards/margins": -0.0008849686128087342, |
|
"rewards/rejected": -0.08881138265132904, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4083985179086044, |
|
"grad_norm": 0.4208291471004486, |
|
"learning_rate": 3.681721262971413e-07, |
|
"logits/chosen": 9.75320816040039, |
|
"logits/rejected": 10.846428871154785, |
|
"logps/chosen": -119.75810241699219, |
|
"logps/ref_chosen": -111.525146484375, |
|
"logps/ref_rejected": -128.1785888671875, |
|
"logps/rejected": -136.38653564453125, |
|
"loss": 0.6935, |
|
"rewards/accuracies": 0.515625, |
|
"rewards/chosen": -0.08232954144477844, |
|
"rewards/margins": -0.00024991348618641496, |
|
"rewards/rejected": -0.08207963407039642, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.4215726636475916, |
|
"grad_norm": 0.7168395519256592, |
|
"learning_rate": 3.577159362352426e-07, |
|
"logits/chosen": 9.947325706481934, |
|
"logits/rejected": 11.155643463134766, |
|
"logps/chosen": -124.97373962402344, |
|
"logps/ref_chosen": -116.80255126953125, |
|
"logps/ref_rejected": -134.80767822265625, |
|
"logps/rejected": -142.88174438476562, |
|
"loss": 0.6939, |
|
"rewards/accuracies": 0.4296875, |
|
"rewards/chosen": -0.08171181380748749, |
|
"rewards/margins": -0.0009711601305752993, |
|
"rewards/rejected": -0.08074064552783966, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.43474680938657884, |
|
"grad_norm": 1.1801953315734863, |
|
"learning_rate": 3.470229631680624e-07, |
|
"logits/chosen": 10.00582504272461, |
|
"logits/rejected": 10.817474365234375, |
|
"logps/chosen": -127.24813842773438, |
|
"logps/ref_chosen": -118.635009765625, |
|
"logps/ref_rejected": -133.8279571533203, |
|
"logps/rejected": -142.4803924560547, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.4765625, |
|
"rewards/chosen": -0.08613133430480957, |
|
"rewards/margins": 0.0003930249949917197, |
|
"rewards/rejected": -0.0865243598818779, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.44792095512556607, |
|
"grad_norm": 0.5862898230552673, |
|
"learning_rate": 3.361167125710832e-07, |
|
"logits/chosen": 10.005105972290039, |
|
"logits/rejected": 10.864886283874512, |
|
"logps/chosen": -135.84164428710938, |
|
"logps/ref_chosen": -126.42659759521484, |
|
"logps/ref_rejected": -143.6361846923828, |
|
"logps/rejected": -153.5630340576172, |
|
"loss": 0.6909, |
|
"rewards/accuracies": 0.5390625, |
|
"rewards/chosen": -0.09415031969547272, |
|
"rewards/margins": 0.005118116270750761, |
|
"rewards/rejected": -0.09926842898130417, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.4610951008645533, |
|
"grad_norm": 0.7007870674133301, |
|
"learning_rate": 3.2502115875008516e-07, |
|
"logits/chosen": 10.54973030090332, |
|
"logits/rejected": 11.243837356567383, |
|
"logps/chosen": -131.86981201171875, |
|
"logps/ref_chosen": -123.14965057373047, |
|
"logps/ref_rejected": -134.22947692871094, |
|
"logps/rejected": -143.7413787841797, |
|
"loss": 0.6895, |
|
"rewards/accuracies": 0.5703125, |
|
"rewards/chosen": -0.0872015431523323, |
|
"rewards/margins": 0.007917709648609161, |
|
"rewards/rejected": -0.09511925280094147, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.47426924660354053, |
|
"grad_norm": 0.7300606966018677, |
|
"learning_rate": 3.137606921404191e-07, |
|
"logits/chosen": 10.125234603881836, |
|
"logits/rejected": 10.481938362121582, |
|
"logps/chosen": -127.8335189819336, |
|
"logps/ref_chosen": -118.79129791259766, |
|
"logps/ref_rejected": -126.64965057373047, |
|
"logps/rejected": -135.0525665283203, |
|
"loss": 0.6966, |
|
"rewards/accuracies": 0.4296875, |
|
"rewards/chosen": -0.09042223542928696, |
|
"rewards/margins": -0.006393034942448139, |
|
"rewards/rejected": -0.0840291976928711, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4874433923425278, |
|
"grad_norm": 0.456684947013855, |
|
"learning_rate": 3.0236006569153616e-07, |
|
"logits/chosen": 10.322757720947266, |
|
"logits/rejected": 10.932893753051758, |
|
"logps/chosen": -130.44381713867188, |
|
"logps/ref_chosen": -121.899169921875, |
|
"logps/ref_rejected": -136.85321044921875, |
|
"logps/rejected": -145.57542419433594, |
|
"loss": 0.6925, |
|
"rewards/accuracies": 0.4921875, |
|
"rewards/chosen": -0.08544634282588959, |
|
"rewards/margins": 0.00177572516258806, |
|
"rewards/rejected": -0.08722206950187683, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.500617538081515, |
|
"grad_norm": 0.6402201056480408, |
|
"learning_rate": 2.9084434045463254e-07, |
|
"logits/chosen": 10.014504432678223, |
|
"logits/rejected": 10.873030662536621, |
|
"logps/chosen": -127.30659484863281, |
|
"logps/ref_chosen": -118.75363159179688, |
|
"logps/ref_rejected": -132.82818603515625, |
|
"logps/rejected": -141.9580535888672, |
|
"loss": 0.6905, |
|
"rewards/accuracies": 0.5546875, |
|
"rewards/chosen": -0.08552955836057663, |
|
"rewards/margins": 0.005769058130681515, |
|
"rewards/rejected": -0.09129861742258072, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.5137916838205022, |
|
"grad_norm": 0.5188658833503723, |
|
"learning_rate": 2.7923883049302066e-07, |
|
"logits/chosen": 10.178832054138184, |
|
"logits/rejected": 10.932839393615723, |
|
"logps/chosen": -137.26002502441406, |
|
"logps/ref_chosen": -128.23240661621094, |
|
"logps/ref_rejected": -140.05517578125, |
|
"logps/rejected": -149.08457946777344, |
|
"loss": 0.6934, |
|
"rewards/accuracies": 0.4765625, |
|
"rewards/chosen": -0.09027623385190964, |
|
"rewards/margins": 1.788110239431262e-05, |
|
"rewards/rejected": -0.09029410034418106, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.5269658295594895, |
|
"grad_norm": 0.7729969024658203, |
|
"learning_rate": 2.6756904723632324e-07, |
|
"logits/chosen": 10.076566696166992, |
|
"logits/rejected": 11.339007377624512, |
|
"logps/chosen": -127.69380187988281, |
|
"logps/ref_chosen": -118.38938903808594, |
|
"logps/ref_rejected": -138.8988037109375, |
|
"logps/rejected": -147.9398956298828, |
|
"loss": 0.6947, |
|
"rewards/accuracies": 0.484375, |
|
"rewards/chosen": -0.09304402768611908, |
|
"rewards/margins": -0.0026330682449042797, |
|
"rewards/rejected": -0.09041095525026321, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5401399752984768, |
|
"grad_norm": 0.6295668482780457, |
|
"learning_rate": 2.5586064340081516e-07, |
|
"logits/chosen": 10.364312171936035, |
|
"logits/rejected": 10.864094734191895, |
|
"logps/chosen": -126.18923950195312, |
|
"logps/ref_chosen": -117.82182312011719, |
|
"logps/ref_rejected": -132.89553833007812, |
|
"logps/rejected": -142.4484405517578, |
|
"loss": 0.6875, |
|
"rewards/accuracies": 0.5703125, |
|
"rewards/chosen": -0.08367416262626648, |
|
"rewards/margins": 0.011854931712150574, |
|
"rewards/rejected": -0.09552909433841705, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.553314121037464, |
|
"grad_norm": 0.6686902046203613, |
|
"learning_rate": 2.4413935659918487e-07, |
|
"logits/chosen": 9.487873077392578, |
|
"logits/rejected": 10.435559272766113, |
|
"logps/chosen": -125.37492370605469, |
|
"logps/ref_chosen": -117.0536117553711, |
|
"logps/ref_rejected": -130.59812927246094, |
|
"logps/rejected": -139.56155395507812, |
|
"loss": 0.6903, |
|
"rewards/accuracies": 0.5234375, |
|
"rewards/chosen": -0.08321310579776764, |
|
"rewards/margins": 0.006421199534088373, |
|
"rewards/rejected": -0.08963430672883987, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5664882667764513, |
|
"grad_norm": 0.5477384924888611, |
|
"learning_rate": 2.3243095276367684e-07, |
|
"logits/chosen": 9.582094192504883, |
|
"logits/rejected": 10.506773948669434, |
|
"logps/chosen": -124.29164123535156, |
|
"logps/ref_chosen": -115.851806640625, |
|
"logps/ref_rejected": -130.8096160888672, |
|
"logps/rejected": -139.1727294921875, |
|
"loss": 0.6938, |
|
"rewards/accuracies": 0.515625, |
|
"rewards/chosen": -0.08439842611551285, |
|
"rewards/margins": -0.0007674552034586668, |
|
"rewards/rejected": -0.08363097161054611, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.5796624125154385, |
|
"grad_norm": 0.4657438397407532, |
|
"learning_rate": 2.2076116950697937e-07, |
|
"logits/chosen": 9.838776588439941, |
|
"logits/rejected": 10.566040992736816, |
|
"logps/chosen": -126.56464385986328, |
|
"logps/ref_chosen": -117.90168762207031, |
|
"logps/ref_rejected": -132.960693359375, |
|
"logps/rejected": -142.1176300048828, |
|
"loss": 0.6909, |
|
"rewards/accuracies": 0.5390625, |
|
"rewards/chosen": -0.08662942796945572, |
|
"rewards/margins": 0.004940015729516745, |
|
"rewards/rejected": -0.0915694385766983, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.5928365582544257, |
|
"grad_norm": 0.4596782922744751, |
|
"learning_rate": 2.091556595453674e-07, |
|
"logits/chosen": 9.941704750061035, |
|
"logits/rejected": 10.477331161499023, |
|
"logps/chosen": -127.3547592163086, |
|
"logps/ref_chosen": -118.84819030761719, |
|
"logps/ref_rejected": -134.4351806640625, |
|
"logps/rejected": -143.46334838867188, |
|
"loss": 0.6908, |
|
"rewards/accuracies": 0.5390625, |
|
"rewards/chosen": -0.08506564050912857, |
|
"rewards/margins": 0.005216139368712902, |
|
"rewards/rejected": -0.09028176963329315, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.606010703993413, |
|
"grad_norm": 0.7369089722633362, |
|
"learning_rate": 1.9763993430846392e-07, |
|
"logits/chosen": 9.956421852111816, |
|
"logits/rejected": 10.419254302978516, |
|
"logps/chosen": -124.58769989013672, |
|
"logps/ref_chosen": -116.23255920410156, |
|
"logps/ref_rejected": -127.91217041015625, |
|
"logps/rejected": -137.61070251464844, |
|
"loss": 0.6867, |
|
"rewards/accuracies": 0.609375, |
|
"rewards/chosen": -0.08355137705802917, |
|
"rewards/margins": 0.013433952815830708, |
|
"rewards/rejected": -0.0969853326678276, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.6191848497324002, |
|
"grad_norm": 0.6935871243476868, |
|
"learning_rate": 1.862393078595809e-07, |
|
"logits/chosen": 9.769856452941895, |
|
"logits/rejected": 10.882231712341309, |
|
"logps/chosen": -133.2343292236328, |
|
"logps/ref_chosen": -123.88105773925781, |
|
"logps/ref_rejected": -140.62420654296875, |
|
"logps/rejected": -150.24195861816406, |
|
"loss": 0.692, |
|
"rewards/accuracies": 0.5078125, |
|
"rewards/chosen": -0.09353267401456833, |
|
"rewards/margins": 0.0026449086144566536, |
|
"rewards/rejected": -0.0961775854229927, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.6323589954713874, |
|
"grad_norm": 0.9227128624916077, |
|
"learning_rate": 1.7497884124991485e-07, |
|
"logits/chosen": 10.461740493774414, |
|
"logits/rejected": 11.23796272277832, |
|
"logps/chosen": -129.02268981933594, |
|
"logps/ref_chosen": -119.78947448730469, |
|
"logps/ref_rejected": -134.12991333007812, |
|
"logps/rejected": -143.42581176757812, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.4765625, |
|
"rewards/chosen": -0.09233222156763077, |
|
"rewards/margins": 0.0006269109435379505, |
|
"rewards/rejected": -0.09295912832021713, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.6455331412103746, |
|
"grad_norm": 0.6264681816101074, |
|
"learning_rate": 1.6388328742891678e-07, |
|
"logits/chosen": 10.495881080627441, |
|
"logits/rejected": 11.157975196838379, |
|
"logps/chosen": -129.8017578125, |
|
"logps/ref_chosen": -121.04522705078125, |
|
"logps/ref_rejected": -132.03546142578125, |
|
"logps/rejected": -141.43582153320312, |
|
"loss": 0.6902, |
|
"rewards/accuracies": 0.546875, |
|
"rewards/chosen": -0.08756528049707413, |
|
"rewards/margins": 0.00643829395994544, |
|
"rewards/rejected": -0.09400356560945511, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.6587072869493619, |
|
"grad_norm": 0.4232679009437561, |
|
"learning_rate": 1.5297703683193753e-07, |
|
"logits/chosen": 10.161746978759766, |
|
"logits/rejected": 10.93832015991211, |
|
"logps/chosen": -117.66082000732422, |
|
"logps/ref_chosen": -109.65755462646484, |
|
"logps/ref_rejected": -123.57516479492188, |
|
"logps/rejected": -132.5537872314453, |
|
"loss": 0.6886, |
|
"rewards/accuracies": 0.5703125, |
|
"rewards/chosen": -0.08003270626068115, |
|
"rewards/margins": 0.009753433056175709, |
|
"rewards/rejected": -0.08978613466024399, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6718814326883491, |
|
"grad_norm": 0.5076274275779724, |
|
"learning_rate": 1.422840637647574e-07, |
|
"logits/chosen": 10.146079063415527, |
|
"logits/rejected": 10.626852035522461, |
|
"logps/chosen": -126.62266540527344, |
|
"logps/ref_chosen": -117.81820678710938, |
|
"logps/ref_rejected": -132.0400390625, |
|
"logps/rejected": -141.37049865722656, |
|
"loss": 0.6908, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.08804453164339066, |
|
"rewards/margins": 0.00526001350954175, |
|
"rewards/rejected": -0.09330454468727112, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.6850555784273363, |
|
"grad_norm": 0.41372328996658325, |
|
"learning_rate": 1.3182787370285865e-07, |
|
"logits/chosen": 9.487447738647461, |
|
"logits/rejected": 10.429905891418457, |
|
"logps/chosen": -121.18466186523438, |
|
"logps/ref_chosen": -112.1561508178711, |
|
"logps/ref_rejected": -131.75282287597656, |
|
"logps/rejected": -141.48947143554688, |
|
"loss": 0.69, |
|
"rewards/accuracies": 0.5859375, |
|
"rewards/chosen": -0.09028512984514236, |
|
"rewards/margins": 0.007081407587975264, |
|
"rewards/rejected": -0.09736653417348862, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.6982297241663236, |
|
"grad_norm": 0.7098668217658997, |
|
"learning_rate": 1.2163145162128946e-07, |
|
"logits/chosen": 9.994986534118652, |
|
"logits/rejected": 10.834747314453125, |
|
"logps/chosen": -129.7722625732422, |
|
"logps/ref_chosen": -120.89462280273438, |
|
"logps/ref_rejected": -131.4730682373047, |
|
"logps/rejected": -140.58242797851562, |
|
"loss": 0.6923, |
|
"rewards/accuracies": 0.484375, |
|
"rewards/chosen": -0.08877623826265335, |
|
"rewards/margins": 0.0023174649104475975, |
|
"rewards/rejected": -0.09109370410442352, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.7114038699053108, |
|
"grad_norm": 0.6442684531211853, |
|
"learning_rate": 1.1171721146870014e-07, |
|
"logits/chosen": 10.26471996307373, |
|
"logits/rejected": 10.965478897094727, |
|
"logps/chosen": -132.84579467773438, |
|
"logps/ref_chosen": -122.57451629638672, |
|
"logps/ref_rejected": -140.17933654785156, |
|
"logps/rejected": -150.3111572265625, |
|
"loss": 0.6942, |
|
"rewards/accuracies": 0.4921875, |
|
"rewards/chosen": -0.10271281003952026, |
|
"rewards/margins": -0.001394760562106967, |
|
"rewards/rejected": -0.10131805390119553, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.724578015644298, |
|
"grad_norm": 0.43760424852371216, |
|
"learning_rate": 1.0210694689661939e-07, |
|
"logits/chosen": 10.27353572845459, |
|
"logits/rejected": 10.934257507324219, |
|
"logps/chosen": -128.82815551757812, |
|
"logps/ref_chosen": -119.25200653076172, |
|
"logps/ref_rejected": -132.94203186035156, |
|
"logps/rejected": -143.47572326660156, |
|
"loss": 0.6887, |
|
"rewards/accuracies": 0.578125, |
|
"rewards/chosen": -0.09576155245304108, |
|
"rewards/margins": 0.00957532785832882, |
|
"rewards/rejected": -0.10533688962459564, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.7377521613832853, |
|
"grad_norm": 0.5378979444503784, |
|
"learning_rate": 9.282178335227883e-08, |
|
"logits/chosen": 9.814579010009766, |
|
"logits/rejected": 10.796792030334473, |
|
"logps/chosen": -129.24404907226562, |
|
"logps/ref_chosen": -119.66452026367188, |
|
"logps/ref_rejected": -134.48817443847656, |
|
"logps/rejected": -144.27352905273438, |
|
"loss": 0.6923, |
|
"rewards/accuracies": 0.515625, |
|
"rewards/chosen": -0.09579524397850037, |
|
"rewards/margins": 0.002058264799416065, |
|
"rewards/rejected": -0.09785348922014236, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.7509263071222725, |
|
"grad_norm": 0.486501008272171, |
|
"learning_rate": 8.388213164029459e-08, |
|
"logits/chosen": 10.484691619873047, |
|
"logits/rejected": 11.245904922485352, |
|
"logps/chosen": -129.30291748046875, |
|
"logps/ref_chosen": -119.82748413085938, |
|
"logps/ref_rejected": -132.91250610351562, |
|
"logps/rejected": -142.54058837890625, |
|
"loss": 0.6927, |
|
"rewards/accuracies": 0.4921875, |
|
"rewards/chosen": -0.09475436806678772, |
|
"rewards/margins": 0.0015264188405126333, |
|
"rewards/rejected": -0.09628079831600189, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.7641004528612598, |
|
"grad_norm": 0.5657970309257507, |
|
"learning_rate": 7.530764305528958e-08, |
|
"logits/chosen": 9.822986602783203, |
|
"logits/rejected": 10.402128219604492, |
|
"logps/chosen": -126.34912872314453, |
|
"logps/ref_chosen": -116.2895278930664, |
|
"logps/ref_rejected": -130.56680297851562, |
|
"logps/rejected": -140.8634033203125, |
|
"loss": 0.6922, |
|
"rewards/accuracies": 0.5546875, |
|
"rewards/chosen": -0.1005958765745163, |
|
"rewards/margins": 0.002369978930801153, |
|
"rewards/rejected": -0.10296584665775299, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.7772745986002471, |
|
"grad_norm": 0.4375581741333008, |
|
"learning_rate": 6.711716618408281e-08, |
|
"logits/chosen": 9.962854385375977, |
|
"logits/rejected": 10.844944953918457, |
|
"logps/chosen": -128.9604034423828, |
|
"logps/ref_chosen": -118.71016693115234, |
|
"logps/ref_rejected": -136.29736328125, |
|
"logps/rejected": -146.73483276367188, |
|
"loss": 0.6925, |
|
"rewards/accuracies": 0.4765625, |
|
"rewards/chosen": -0.10250238329172134, |
|
"rewards/margins": 0.0018722245004028082, |
|
"rewards/rejected": -0.10437458753585815, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.7904487443392343, |
|
"grad_norm": 0.4616609513759613, |
|
"learning_rate": 5.932870547240454e-08, |
|
"logits/chosen": 10.142905235290527, |
|
"logits/rejected": 11.08536148071289, |
|
"logps/chosen": -129.35299682617188, |
|
"logps/ref_chosen": -119.55867767333984, |
|
"logps/ref_rejected": -136.89230346679688, |
|
"logps/rejected": -147.8154754638672, |
|
"loss": 0.6879, |
|
"rewards/accuracies": 0.5703125, |
|
"rewards/chosen": -0.09794314205646515, |
|
"rewards/margins": 0.011288601905107498, |
|
"rewards/rejected": -0.10923174023628235, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8036228900782215, |
|
"grad_norm": 0.7305423617362976, |
|
"learning_rate": 5.1959381647217665e-08, |
|
"logits/chosen": 9.79527473449707, |
|
"logits/rejected": 10.603086471557617, |
|
"logps/chosen": -129.09518432617188, |
|
"logps/ref_chosen": -118.58332061767578, |
|
"logps/ref_rejected": -132.3976287841797, |
|
"logps/rejected": -143.35614013671875, |
|
"loss": 0.6912, |
|
"rewards/accuracies": 0.5078125, |
|
"rewards/chosen": -0.10511861741542816, |
|
"rewards/margins": 0.004466457758098841, |
|
"rewards/rejected": -0.10958506911993027, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.8167970358172087, |
|
"grad_norm": 0.5044032335281372, |
|
"learning_rate": 4.502539408164385e-08, |
|
"logits/chosen": 9.983121871948242, |
|
"logits/rejected": 10.669804573059082, |
|
"logps/chosen": -132.02255249023438, |
|
"logps/ref_chosen": -122.35013580322266, |
|
"logps/ref_rejected": -131.44406127929688, |
|
"logps/rejected": -140.59652709960938, |
|
"loss": 0.696, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.09672413021326065, |
|
"rewards/margins": -0.0051995753310620785, |
|
"rewards/rejected": -0.09152455627918243, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.829971181556196, |
|
"grad_norm": 0.4333811402320862, |
|
"learning_rate": 3.854198518522564e-08, |
|
"logits/chosen": 9.989500045776367, |
|
"logits/rejected": 10.936676979064941, |
|
"logps/chosen": -129.6297149658203, |
|
"logps/ref_chosen": -119.4000015258789, |
|
"logps/ref_rejected": -133.5243682861328, |
|
"logps/rejected": -144.4652557373047, |
|
"loss": 0.69, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.10229712724685669, |
|
"rewards/margins": 0.007111664395779371, |
|
"rewards/rejected": -0.10940880328416824, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.8431453272951832, |
|
"grad_norm": 0.49472713470458984, |
|
"learning_rate": 3.2523406897802444e-08, |
|
"logits/chosen": 10.366823196411133, |
|
"logits/rejected": 10.912262916564941, |
|
"logps/chosen": -131.99964904785156, |
|
"logps/ref_chosen": -122.98699188232422, |
|
"logps/ref_rejected": -134.78515625, |
|
"logps/rejected": -145.1294708251953, |
|
"loss": 0.6868, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.09012652933597565, |
|
"rewards/margins": 0.013316763564944267, |
|
"rewards/rejected": -0.10344328731298447, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.8563194730341704, |
|
"grad_norm": 0.41725417971611023, |
|
"learning_rate": 2.6982889360653376e-08, |
|
"logits/chosen": 9.988856315612793, |
|
"logits/rejected": 10.997881889343262, |
|
"logps/chosen": -135.51519775390625, |
|
"logps/ref_chosen": -125.97425079345703, |
|
"logps/ref_rejected": -139.60340881347656, |
|
"logps/rejected": -149.2346954345703, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.09540940076112747, |
|
"rewards/margins": 0.0009036160772666335, |
|
"rewards/rejected": -0.09631301462650299, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.8694936187731577, |
|
"grad_norm": 0.4466845393180847, |
|
"learning_rate": 2.1932611833775843e-08, |
|
"logits/chosen": 9.85665512084961, |
|
"logits/rejected": 10.717552185058594, |
|
"logps/chosen": -129.7871551513672, |
|
"logps/ref_chosen": -119.83372497558594, |
|
"logps/ref_rejected": -138.48184204101562, |
|
"logps/rejected": -149.67349243164062, |
|
"loss": 0.6873, |
|
"rewards/accuracies": 0.5859375, |
|
"rewards/chosen": -0.09953439235687256, |
|
"rewards/margins": 0.012382248416543007, |
|
"rewards/rejected": -0.11191663891077042, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.8826677645121449, |
|
"grad_norm": 1.0083649158477783, |
|
"learning_rate": 1.738367592322837e-08, |
|
"logits/chosen": 9.97527027130127, |
|
"logits/rejected": 10.858278274536133, |
|
"logps/chosen": -129.06263732910156, |
|
"logps/ref_chosen": -119.95507049560547, |
|
"logps/ref_rejected": -135.12179565429688, |
|
"logps/rejected": -144.7335662841797, |
|
"loss": 0.6909, |
|
"rewards/accuracies": 0.515625, |
|
"rewards/chosen": -0.09107556939125061, |
|
"rewards/margins": 0.005042179953306913, |
|
"rewards/rejected": -0.09611774981021881, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.8958419102511321, |
|
"grad_norm": 0.9535924792289734, |
|
"learning_rate": 1.3346081177391472e-08, |
|
"logits/chosen": 10.458059310913086, |
|
"logits/rejected": 10.82265567779541, |
|
"logps/chosen": -136.9637451171875, |
|
"logps/ref_chosen": -127.0732421875, |
|
"logps/ref_rejected": -134.5887908935547, |
|
"logps/rejected": -144.5718994140625, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.453125, |
|
"rewards/chosen": -0.09890485554933548, |
|
"rewards/margins": 0.0009263246320188046, |
|
"rewards/rejected": -0.09983118623495102, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.9090160559901194, |
|
"grad_norm": 0.6569604873657227, |
|
"learning_rate": 9.828703105789981e-09, |
|
"logits/chosen": 10.355306625366211, |
|
"logits/rejected": 11.095146179199219, |
|
"logps/chosen": -127.32408142089844, |
|
"logps/ref_chosen": -118.01029968261719, |
|
"logps/ref_rejected": -131.78138732910156, |
|
"logps/rejected": -141.7933807373047, |
|
"loss": 0.69, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -0.09313800930976868, |
|
"rewards/margins": 0.006981834769248962, |
|
"rewards/rejected": -0.10011984407901764, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.9221902017291066, |
|
"grad_norm": 0.832196056842804, |
|
"learning_rate": 6.839273668796747e-09, |
|
"logits/chosen": 9.993429183959961, |
|
"logits/rejected": 10.590087890625, |
|
"logps/chosen": -127.79402160644531, |
|
"logps/ref_chosen": -117.97954559326172, |
|
"logps/ref_rejected": -134.86376953125, |
|
"logps/rejected": -145.40037536621094, |
|
"loss": 0.6899, |
|
"rewards/accuracies": 0.5546875, |
|
"rewards/chosen": -0.09814472496509552, |
|
"rewards/margins": 0.00722128339111805, |
|
"rewards/rejected": -0.10536602139472961, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9353643474680938, |
|
"grad_norm": 0.5360449552536011, |
|
"learning_rate": 4.384364281105973e-09, |
|
"logits/chosen": 9.948233604431152, |
|
"logits/rejected": 11.157342910766602, |
|
"logps/chosen": -121.68891906738281, |
|
"logps/ref_chosen": -112.08084869384766, |
|
"logps/ref_rejected": -127.31222534179688, |
|
"logps/rejected": -137.7093963623047, |
|
"loss": 0.6896, |
|
"rewards/accuracies": 0.5859375, |
|
"rewards/chosen": -0.09608055651187897, |
|
"rewards/margins": 0.007891225628554821, |
|
"rewards/rejected": -0.10397178679704666, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.9485384932070811, |
|
"grad_norm": 0.6390266418457031, |
|
"learning_rate": 2.469371366337264e-09, |
|
"logits/chosen": 10.44871711730957, |
|
"logits/rejected": 11.414918899536133, |
|
"logps/chosen": -129.57125854492188, |
|
"logps/ref_chosen": -120.54607391357422, |
|
"logps/ref_rejected": -133.43760681152344, |
|
"logps/rejected": -143.47120666503906, |
|
"loss": 0.6884, |
|
"rewards/accuracies": 0.5390625, |
|
"rewards/chosen": -0.09025204181671143, |
|
"rewards/margins": 0.010083984583616257, |
|
"rewards/rejected": -0.10033603012561798, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.9617126389460683, |
|
"grad_norm": 0.49776870012283325, |
|
"learning_rate": 1.0985044945254762e-09, |
|
"logits/chosen": 9.985913276672363, |
|
"logits/rejected": 10.681262969970703, |
|
"logps/chosen": -136.79798889160156, |
|
"logps/ref_chosen": -127.98259735107422, |
|
"logps/ref_rejected": -140.61898803710938, |
|
"logps/rejected": -150.4745330810547, |
|
"loss": 0.6882, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -0.08815396577119827, |
|
"rewards/margins": 0.01040155254304409, |
|
"rewards/rejected": -0.09855551272630692, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.9748867846850556, |
|
"grad_norm": 0.6873400211334229, |
|
"learning_rate": 2.7477712857215675e-10, |
|
"logits/chosen": 10.209442138671875, |
|
"logits/rejected": 10.793924331665039, |
|
"logps/chosen": -126.40692138671875, |
|
"logps/ref_chosen": -117.43082427978516, |
|
"logps/ref_rejected": -129.7013702392578, |
|
"logps/rejected": -139.93634033203125, |
|
"loss": 0.6871, |
|
"rewards/accuracies": 0.6015625, |
|
"rewards/chosen": -0.08976094424724579, |
|
"rewards/margins": 0.012588722631335258, |
|
"rewards/rejected": -0.1023496687412262, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.9880609304240429, |
|
"grad_norm": 0.5661839246749878, |
|
"learning_rate": 0.0, |
|
"logits/chosen": 10.061009407043457, |
|
"logits/rejected": 10.860876083374023, |
|
"logps/chosen": -129.668212890625, |
|
"logps/ref_chosen": -120.09527587890625, |
|
"logps/ref_rejected": -133.85191345214844, |
|
"logps/rejected": -144.8162841796875, |
|
"loss": 0.6866, |
|
"rewards/accuracies": 0.5546875, |
|
"rewards/chosen": -0.09572924673557281, |
|
"rewards/margins": 0.013914356008172035, |
|
"rewards/rejected": -0.1096436083316803, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 12, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|