math_phi3_dpo_100_0 / checkpoint-48 /trainer_state.json
lzc0525's picture
Upload folder using huggingface_hub
4267dba verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6323589954713874,
"eval_steps": 500,
"global_step": 48,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.013174145738987238,
"grad_norm": 0.5010076761245728,
"learning_rate": 6.25e-08,
"logits/chosen": 9.988622665405273,
"logits/rejected": 10.698101997375488,
"logps/chosen": -102.88545989990234,
"logps/ref_chosen": -102.88545989990234,
"logps/ref_rejected": -121.84871673583984,
"logps/rejected": -121.84871673583984,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.026348291477974475,
"grad_norm": 0.7802621126174927,
"learning_rate": 1.25e-07,
"logits/chosen": 10.208279609680176,
"logits/rejected": 11.06594467163086,
"logps/chosen": -107.70349884033203,
"logps/ref_chosen": -107.70349884033203,
"logps/ref_rejected": -121.89966583251953,
"logps/rejected": -121.89966583251953,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 2
},
{
"epoch": 0.03952243721696171,
"grad_norm": 0.5995805859565735,
"learning_rate": 1.875e-07,
"logits/chosen": 10.029329299926758,
"logits/rejected": 11.023927688598633,
"logps/chosen": -107.6470947265625,
"logps/ref_chosen": -107.98188781738281,
"logps/ref_rejected": -124.51527404785156,
"logps/rejected": -124.29098510742188,
"loss": 0.6926,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.0033478327095508575,
"rewards/margins": 0.001104944385588169,
"rewards/rejected": 0.0022428883239626884,
"step": 3
},
{
"epoch": 0.05269658295594895,
"grad_norm": 0.5383147597312927,
"learning_rate": 2.5e-07,
"logits/chosen": 9.823471069335938,
"logits/rejected": 10.842323303222656,
"logps/chosen": -109.73627471923828,
"logps/ref_chosen": -109.20836639404297,
"logps/ref_rejected": -119.23908996582031,
"logps/rejected": -119.65444946289062,
"loss": 0.6937,
"rewards/accuracies": 0.5078125,
"rewards/chosen": -0.005279023200273514,
"rewards/margins": -0.001125341048464179,
"rewards/rejected": -0.004153682850301266,
"step": 4
},
{
"epoch": 0.06587072869493618,
"grad_norm": 0.5302512645721436,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": 10.209351539611816,
"logits/rejected": 10.967523574829102,
"logps/chosen": -103.73981475830078,
"logps/ref_chosen": -103.87680053710938,
"logps/ref_rejected": -118.41618347167969,
"logps/rejected": -118.11978149414062,
"loss": 0.694,
"rewards/accuracies": 0.4453125,
"rewards/chosen": 0.0013697518734261394,
"rewards/margins": -0.001594369299709797,
"rewards/rejected": 0.002964121289551258,
"step": 5
},
{
"epoch": 0.07904487443392343,
"grad_norm": 0.6919645667076111,
"learning_rate": 3.75e-07,
"logits/chosen": 10.676691055297852,
"logits/rejected": 11.460196495056152,
"logps/chosen": -108.08129119873047,
"logps/ref_chosen": -107.58968353271484,
"logps/ref_rejected": -122.07303619384766,
"logps/rejected": -122.37925720214844,
"loss": 0.6941,
"rewards/accuracies": 0.421875,
"rewards/chosen": -0.004916056990623474,
"rewards/margins": -0.0018538986332714558,
"rewards/rejected": -0.0030621583573520184,
"step": 6
},
{
"epoch": 0.09221902017291066,
"grad_norm": 0.40329915285110474,
"learning_rate": 4.375e-07,
"logits/chosen": 10.017489433288574,
"logits/rejected": 10.722752571105957,
"logps/chosen": -107.77075958251953,
"logps/ref_chosen": -107.42727661132812,
"logps/ref_rejected": -116.87063598632812,
"logps/rejected": -116.98759460449219,
"loss": 0.6943,
"rewards/accuracies": 0.4140625,
"rewards/chosen": -0.0034348624758422375,
"rewards/margins": -0.0022651171311736107,
"rewards/rejected": -0.0011697453446686268,
"step": 7
},
{
"epoch": 0.1053931659118979,
"grad_norm": 0.4481956362724304,
"learning_rate": 5e-07,
"logits/chosen": 10.191514015197754,
"logits/rejected": 11.094213485717773,
"logps/chosen": -106.06684112548828,
"logps/ref_chosen": -105.60282135009766,
"logps/ref_rejected": -119.53916931152344,
"logps/rejected": -119.9333267211914,
"loss": 0.6935,
"rewards/accuracies": 0.4765625,
"rewards/chosen": -0.004640196915715933,
"rewards/margins": -0.0006986188236624002,
"rewards/rejected": -0.003941578324884176,
"step": 8
},
{
"epoch": 0.11856731165088513,
"grad_norm": 0.5002302527427673,
"learning_rate": 4.997252228714278e-07,
"logits/chosen": 10.164933204650879,
"logits/rejected": 11.139327049255371,
"logps/chosen": -106.06144714355469,
"logps/ref_chosen": -105.46086120605469,
"logps/ref_rejected": -119.00373840332031,
"logps/rejected": -119.59027862548828,
"loss": 0.6932,
"rewards/accuracies": 0.4921875,
"rewards/chosen": -0.006005657836794853,
"rewards/margins": -0.00014029807061888278,
"rewards/rejected": -0.005865359678864479,
"step": 9
},
{
"epoch": 0.13174145738987236,
"grad_norm": 0.6467388868331909,
"learning_rate": 4.989014955054745e-07,
"logits/chosen": 9.98875904083252,
"logits/rejected": 10.815544128417969,
"logps/chosen": -105.14952850341797,
"logps/ref_chosen": -104.21009826660156,
"logps/ref_rejected": -118.9209213256836,
"logps/rejected": -119.72019958496094,
"loss": 0.6939,
"rewards/accuracies": 0.515625,
"rewards/chosen": -0.00939436536282301,
"rewards/margins": -0.0014015533961355686,
"rewards/rejected": -0.007992811501026154,
"step": 10
},
{
"epoch": 0.14491560312885962,
"grad_norm": 0.8090001344680786,
"learning_rate": 4.975306286336627e-07,
"logits/chosen": 9.946345329284668,
"logits/rejected": 11.13135814666748,
"logps/chosen": -107.09854125976562,
"logps/ref_chosen": -105.94319152832031,
"logps/ref_rejected": -122.76007843017578,
"logps/rejected": -123.9129409790039,
"loss": 0.6932,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.01155336108058691,
"rewards/margins": -2.4825334548950195e-05,
"rewards/rejected": -0.011528536677360535,
"step": 11
},
{
"epoch": 0.15808974886784685,
"grad_norm": 0.49643445014953613,
"learning_rate": 4.956156357188939e-07,
"logits/chosen": 9.876545906066895,
"logits/rejected": 10.567835807800293,
"logps/chosen": -109.7830810546875,
"logps/ref_chosen": -109.08442687988281,
"logps/ref_rejected": -121.41947174072266,
"logps/rejected": -122.12468719482422,
"loss": 0.6931,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.006986413151025772,
"rewards/margins": 6.572058191522956e-05,
"rewards/rejected": -0.00705213425680995,
"step": 12
},
{
"epoch": 0.17126389460683408,
"grad_norm": 0.5409023761749268,
"learning_rate": 4.931607263312032e-07,
"logits/chosen": 9.916489601135254,
"logits/rejected": 10.99366283416748,
"logps/chosen": -105.78418731689453,
"logps/ref_chosen": -104.62150573730469,
"logps/ref_rejected": -119.55384063720703,
"logps/rejected": -120.60539245605469,
"loss": 0.6937,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.011626748368144035,
"rewards/margins": -0.0011113437358289957,
"rewards/rejected": -0.010515404865145683,
"step": 13
},
{
"epoch": 0.1844380403458213,
"grad_norm": 0.9010350108146667,
"learning_rate": 4.9017129689421e-07,
"logits/chosen": 10.480968475341797,
"logits/rejected": 11.599580764770508,
"logps/chosen": -107.57891845703125,
"logps/ref_chosen": -106.179443359375,
"logps/ref_rejected": -120.73036193847656,
"logps/rejected": -122.02151489257812,
"loss": 0.6937,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.013994838111102581,
"rewards/margins": -0.0010832665720954537,
"rewards/rejected": -0.012911571189761162,
"step": 14
},
{
"epoch": 0.19761218608480857,
"grad_norm": 0.8957933187484741,
"learning_rate": 4.866539188226085e-07,
"logits/chosen": 9.80737018585205,
"logits/rejected": 10.738137245178223,
"logps/chosen": -107.41307067871094,
"logps/ref_chosen": -105.70547485351562,
"logps/ref_rejected": -118.89997863769531,
"logps/rejected": -120.64563751220703,
"loss": 0.693,
"rewards/accuracies": 0.5390625,
"rewards/chosen": -0.01707591488957405,
"rewards/margins": 0.00038063188549131155,
"rewards/rejected": -0.017456548288464546,
"step": 15
},
{
"epoch": 0.2107863318237958,
"grad_norm": 0.7111092805862427,
"learning_rate": 4.826163240767716e-07,
"logits/chosen": 10.634671211242676,
"logits/rejected": 11.238730430603027,
"logps/chosen": -110.74053955078125,
"logps/ref_chosen": -108.86376953125,
"logps/ref_rejected": -122.1635513305664,
"logps/rejected": -124.17098999023438,
"loss": 0.6925,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.018767736852169037,
"rewards/margins": 0.0013066575629636645,
"rewards/rejected": -0.02007439360022545,
"step": 16
},
{
"epoch": 0.22396047756278303,
"grad_norm": 0.5599011778831482,
"learning_rate": 4.780673881662242e-07,
"logits/chosen": 10.138323783874512,
"logits/rejected": 10.76909065246582,
"logps/chosen": -104.49694061279297,
"logps/ref_chosen": -102.93986511230469,
"logps/ref_rejected": -119.43718719482422,
"logps/rejected": -121.1658935546875,
"loss": 0.6923,
"rewards/accuracies": 0.5859375,
"rewards/chosen": -0.015570812858641148,
"rewards/margins": 0.0017161847790703177,
"rewards/rejected": -0.0172869972884655,
"step": 17
},
{
"epoch": 0.23713462330177026,
"grad_norm": 0.7006244659423828,
"learning_rate": 4.730171106393466e-07,
"logits/chosen": 10.374225616455078,
"logits/rejected": 11.157809257507324,
"logps/chosen": -105.8244400024414,
"logps/ref_chosen": -103.81341552734375,
"logps/ref_rejected": -117.45123291015625,
"logps/rejected": -119.37814331054688,
"loss": 0.6936,
"rewards/accuracies": 0.4453125,
"rewards/chosen": -0.020110249519348145,
"rewards/margins": -0.0008410783484578133,
"rewards/rejected": -0.019269172102212906,
"step": 18
},
{
"epoch": 0.2503087690407575,
"grad_norm": 0.49562451243400574,
"learning_rate": 4.6747659310219757e-07,
"logits/chosen": 10.303974151611328,
"logits/rejected": 10.965604782104492,
"logps/chosen": -109.81462860107422,
"logps/ref_chosen": -107.85797119140625,
"logps/ref_rejected": -121.88042449951172,
"logps/rejected": -124.41007232666016,
"loss": 0.6903,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.019566601142287254,
"rewards/margins": 0.005729879252612591,
"rewards/rejected": -0.02529647946357727,
"step": 19
},
{
"epoch": 0.2634829147797447,
"grad_norm": 0.48442593216896057,
"learning_rate": 4.6145801481477433e-07,
"logits/chosen": 10.682939529418945,
"logits/rejected": 11.487958908081055,
"logps/chosen": -105.8414077758789,
"logps/ref_chosen": -103.42721557617188,
"logps/ref_rejected": -116.7796630859375,
"logps/rejected": -119.14535522460938,
"loss": 0.6934,
"rewards/accuracies": 0.4609375,
"rewards/chosen": -0.024141818284988403,
"rewards/margins": -0.00048486533341929317,
"rewards/rejected": -0.023656953126192093,
"step": 20
},
{
"epoch": 0.276657060518732,
"grad_norm": 0.6605204343795776,
"learning_rate": 4.549746059183561e-07,
"logits/chosen": 9.703460693359375,
"logits/rejected": 10.792010307312012,
"logps/chosen": -109.3312759399414,
"logps/ref_chosen": -106.60163879394531,
"logps/ref_rejected": -124.56562805175781,
"logps/rejected": -127.45460510253906,
"loss": 0.6924,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.027296334505081177,
"rewards/margins": 0.0015935557894408703,
"rewards/rejected": -0.028889887034893036,
"step": 21
},
{
"epoch": 0.28983120625771924,
"grad_norm": 0.8831092715263367,
"learning_rate": 4.480406183527823e-07,
"logits/chosen": 10.168815612792969,
"logits/rejected": 11.040711402893066,
"logps/chosen": -107.1961669921875,
"logps/ref_chosen": -103.77696228027344,
"logps/ref_rejected": -118.73616027832031,
"logps/rejected": -121.80266571044922,
"loss": 0.695,
"rewards/accuracies": 0.4453125,
"rewards/chosen": -0.03419206291437149,
"rewards/margins": -0.0035268948413431644,
"rewards/rejected": -0.030665166676044464,
"step": 22
},
{
"epoch": 0.3030053519967065,
"grad_norm": 0.37249696254730225,
"learning_rate": 4.4067129452759546e-07,
"logits/chosen": 10.050610542297363,
"logits/rejected": 11.06921100616455,
"logps/chosen": -108.137451171875,
"logps/ref_chosen": -104.72956085205078,
"logps/ref_rejected": -121.35556030273438,
"logps/rejected": -124.6715316772461,
"loss": 0.6937,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.03407883644104004,
"rewards/margins": -0.0009191110148094594,
"rewards/rejected": -0.03315972909331322,
"step": 23
},
{
"epoch": 0.3161794977356937,
"grad_norm": 0.5831263661384583,
"learning_rate": 4.3288283381591725e-07,
"logits/chosen": 10.146599769592285,
"logits/rejected": 10.979142189025879,
"logps/chosen": -109.36030578613281,
"logps/ref_chosen": -105.88758087158203,
"logps/ref_rejected": -125.69054412841797,
"logps/rejected": -129.528076171875,
"loss": 0.6914,
"rewards/accuracies": 0.5390625,
"rewards/chosen": -0.034727297723293304,
"rewards/margins": 0.003648004261776805,
"rewards/rejected": -0.03837530314922333,
"step": 24
},
{
"epoch": 0.32935364347468093,
"grad_norm": 0.574914276599884,
"learning_rate": 4.246923569447104e-07,
"logits/chosen": 10.25512409210205,
"logits/rejected": 10.956289291381836,
"logps/chosen": -113.96874237060547,
"logps/ref_chosen": -110.0761489868164,
"logps/ref_rejected": -129.10540771484375,
"logps/rejected": -133.20553588867188,
"loss": 0.6922,
"rewards/accuracies": 0.515625,
"rewards/chosen": -0.03892593830823898,
"rewards/margins": 0.0020754451397806406,
"rewards/rejected": -0.04100137948989868,
"step": 25
},
{
"epoch": 0.34252778921366817,
"grad_norm": 0.42842620611190796,
"learning_rate": 4.161178683597054e-07,
"logits/chosen": 10.368014335632324,
"logits/rejected": 11.450322151184082,
"logps/chosen": -108.39615631103516,
"logps/ref_chosen": -103.74571990966797,
"logps/ref_rejected": -120.73832702636719,
"logps/rejected": -125.09370422363281,
"loss": 0.6947,
"rewards/accuracies": 0.453125,
"rewards/chosen": -0.046504296362400055,
"rewards/margins": -0.002950500464066863,
"rewards/rejected": -0.04355379566550255,
"step": 26
},
{
"epoch": 0.3557019349526554,
"grad_norm": 0.5645923018455505,
"learning_rate": 4.0717821664772124e-07,
"logits/chosen": 10.042550086975098,
"logits/rejected": 11.243234634399414,
"logps/chosen": -110.1788330078125,
"logps/ref_chosen": -105.47428131103516,
"logps/ref_rejected": -120.5193099975586,
"logps/rejected": -125.00762939453125,
"loss": 0.6944,
"rewards/accuracies": 0.4609375,
"rewards/chosen": -0.04704552888870239,
"rewards/margins": -0.0021622537169605494,
"rewards/rejected": -0.044883277267217636,
"step": 27
},
{
"epoch": 0.3688760806916426,
"grad_norm": 0.680381178855896,
"learning_rate": 3.978930531033806e-07,
"logits/chosen": 9.559943199157715,
"logits/rejected": 10.705163955688477,
"logps/chosen": -107.98367309570312,
"logps/ref_chosen": -103.72540283203125,
"logps/ref_rejected": -119.79557800292969,
"logps/rejected": -124.30613708496094,
"loss": 0.692,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.04258278012275696,
"rewards/margins": 0.0025227584410458803,
"rewards/rejected": -0.04510553926229477,
"step": 28
},
{
"epoch": 0.3820502264306299,
"grad_norm": 0.569850504398346,
"learning_rate": 3.882827885312998e-07,
"logits/chosen": 10.065170288085938,
"logits/rejected": 11.1397123336792,
"logps/chosen": -112.4742431640625,
"logps/ref_chosen": -108.65434265136719,
"logps/ref_rejected": -121.46784973144531,
"logps/rejected": -125.24649047851562,
"loss": 0.6935,
"rewards/accuracies": 0.484375,
"rewards/chosen": -0.03819899260997772,
"rewards/margins": -0.0004125672858208418,
"rewards/rejected": -0.03778642416000366,
"step": 29
},
{
"epoch": 0.39522437216961714,
"grad_norm": 0.5246622562408447,
"learning_rate": 3.7836854837871044e-07,
"logits/chosen": 10.21100902557373,
"logits/rejected": 11.58244800567627,
"logps/chosen": -108.34341430664062,
"logps/ref_chosen": -103.62174224853516,
"logps/ref_rejected": -126.73807525634766,
"logps/rejected": -131.23304748535156,
"loss": 0.6944,
"rewards/accuracies": 0.453125,
"rewards/chosen": -0.04721669480204582,
"rewards/margins": -0.0022669308818876743,
"rewards/rejected": -0.044949766248464584,
"step": 30
},
{
"epoch": 0.4083985179086044,
"grad_norm": 0.37346869707107544,
"learning_rate": 3.681721262971413e-07,
"logits/chosen": 9.762397766113281,
"logits/rejected": 10.784234046936035,
"logps/chosen": -110.99979400634766,
"logps/ref_chosen": -106.10479736328125,
"logps/ref_rejected": -120.6382827758789,
"logps/rejected": -125.51140594482422,
"loss": 0.6934,
"rewards/accuracies": 0.515625,
"rewards/chosen": -0.04894987493753433,
"rewards/margins": -0.00021872523939236999,
"rewards/rejected": -0.04873115196824074,
"step": 31
},
{
"epoch": 0.4215726636475916,
"grad_norm": 0.4927314221858978,
"learning_rate": 3.577159362352426e-07,
"logits/chosen": 10.023344993591309,
"logits/rejected": 11.392967224121094,
"logps/chosen": -110.29427337646484,
"logps/ref_chosen": -105.99569702148438,
"logps/ref_rejected": -128.34303283691406,
"logps/rejected": -132.72604370117188,
"loss": 0.6929,
"rewards/accuracies": 0.4453125,
"rewards/chosen": -0.04298572242259979,
"rewards/margins": 0.0008442200487479568,
"rewards/rejected": -0.04382994398474693,
"step": 32
},
{
"epoch": 0.43474680938657884,
"grad_norm": 0.9654183983802795,
"learning_rate": 3.470229631680624e-07,
"logits/chosen": 9.917928695678711,
"logits/rejected": 10.762564659118652,
"logps/chosen": -110.05650329589844,
"logps/ref_chosen": -105.72196197509766,
"logps/ref_rejected": -121.59507751464844,
"logps/rejected": -126.74140167236328,
"loss": 0.6892,
"rewards/accuracies": 0.578125,
"rewards/chosen": -0.04334544017910957,
"rewards/margins": 0.008117962628602982,
"rewards/rejected": -0.05146340653300285,
"step": 33
},
{
"epoch": 0.44792095512556607,
"grad_norm": 0.5439296960830688,
"learning_rate": 3.361167125710832e-07,
"logits/chosen": 10.135066032409668,
"logits/rejected": 10.976527214050293,
"logps/chosen": -115.52051544189453,
"logps/ref_chosen": -111.4834976196289,
"logps/ref_rejected": -130.48089599609375,
"logps/rejected": -135.11973571777344,
"loss": 0.6903,
"rewards/accuracies": 0.59375,
"rewards/chosen": -0.04037024453282356,
"rewards/margins": 0.0060182418674230576,
"rewards/rejected": -0.04638848453760147,
"step": 34
},
{
"epoch": 0.4610951008645533,
"grad_norm": 0.6634539365768433,
"learning_rate": 3.2502115875008516e-07,
"logits/chosen": 10.4329252243042,
"logits/rejected": 11.376739501953125,
"logps/chosen": -112.54086303710938,
"logps/ref_chosen": -108.9183349609375,
"logps/ref_rejected": -121.32493591308594,
"logps/rejected": -125.75639343261719,
"loss": 0.6893,
"rewards/accuracies": 0.578125,
"rewards/chosen": -0.03622515872120857,
"rewards/margins": 0.008089457638561726,
"rewards/rejected": -0.04431461915373802,
"step": 35
},
{
"epoch": 0.47426924660354053,
"grad_norm": 0.6542326807975769,
"learning_rate": 3.137606921404191e-07,
"logits/chosen": 10.058025360107422,
"logits/rejected": 10.712655067443848,
"logps/chosen": -111.80799865722656,
"logps/ref_chosen": -107.1411361694336,
"logps/ref_rejected": -118.66165161132812,
"logps/rejected": -123.4068832397461,
"loss": 0.6929,
"rewards/accuracies": 0.5078125,
"rewards/chosen": -0.046668585389852524,
"rewards/margins": 0.000783862778916955,
"rewards/rejected": -0.047452446073293686,
"step": 36
},
{
"epoch": 0.4874433923425278,
"grad_norm": 0.4339970350265503,
"learning_rate": 3.0236006569153616e-07,
"logits/chosen": 10.282718658447266,
"logits/rejected": 11.08802604675293,
"logps/chosen": -110.58891296386719,
"logps/ref_chosen": -106.6348876953125,
"logps/ref_rejected": -121.37834167480469,
"logps/rejected": -125.47262573242188,
"loss": 0.6926,
"rewards/accuracies": 0.5703125,
"rewards/chosen": -0.03954017534852028,
"rewards/margins": 0.0014026057906448841,
"rewards/rejected": -0.040942780673503876,
"step": 37
},
{
"epoch": 0.500617538081515,
"grad_norm": 0.6856023073196411,
"learning_rate": 2.9084434045463254e-07,
"logits/chosen": 9.826108932495117,
"logits/rejected": 10.918371200561523,
"logps/chosen": -107.96115112304688,
"logps/ref_chosen": -104.01033782958984,
"logps/ref_rejected": -119.02666473388672,
"logps/rejected": -123.3695068359375,
"loss": 0.6913,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.039508119225502014,
"rewards/margins": 0.003920318093150854,
"rewards/rejected": -0.043428435921669006,
"step": 38
},
{
"epoch": 0.5137916838205022,
"grad_norm": 0.5166105628013611,
"learning_rate": 2.7923883049302066e-07,
"logits/chosen": 10.282295227050781,
"logits/rejected": 11.047395706176758,
"logps/chosen": -113.61396026611328,
"logps/ref_chosen": -109.76485443115234,
"logps/ref_rejected": -122.25163269042969,
"logps/rejected": -126.30699157714844,
"loss": 0.6922,
"rewards/accuracies": 0.5390625,
"rewards/chosen": -0.038491036742925644,
"rewards/margins": 0.002062497427687049,
"rewards/rejected": -0.040553539991378784,
"step": 39
},
{
"epoch": 0.5269658295594895,
"grad_norm": 0.7613899111747742,
"learning_rate": 2.6756904723632324e-07,
"logits/chosen": 10.10784912109375,
"logits/rejected": 11.276141166687012,
"logps/chosen": -111.32815551757812,
"logps/ref_chosen": -107.18782806396484,
"logps/ref_rejected": -124.24542236328125,
"logps/rejected": -128.5751495361328,
"loss": 0.6924,
"rewards/accuracies": 0.515625,
"rewards/chosen": -0.04140327125787735,
"rewards/margins": 0.001893932232633233,
"rewards/rejected": -0.04329720139503479,
"step": 40
},
{
"epoch": 0.5401399752984768,
"grad_norm": 0.5243136882781982,
"learning_rate": 2.5586064340081516e-07,
"logits/chosen": 10.432905197143555,
"logits/rejected": 11.059402465820312,
"logps/chosen": -109.8786849975586,
"logps/ref_chosen": -106.42051696777344,
"logps/ref_rejected": -122.25247192382812,
"logps/rejected": -126.91265106201172,
"loss": 0.6873,
"rewards/accuracies": 0.6171875,
"rewards/chosen": -0.034581609070301056,
"rewards/margins": 0.012020176276564598,
"rewards/rejected": -0.0466017909348011,
"step": 41
},
{
"epoch": 0.553314121037464,
"grad_norm": 0.6438937187194824,
"learning_rate": 2.4413935659918487e-07,
"logits/chosen": 9.422279357910156,
"logits/rejected": 10.50160026550293,
"logps/chosen": -106.31170654296875,
"logps/ref_chosen": -103.1148452758789,
"logps/ref_rejected": -116.55464935302734,
"logps/rejected": -120.24292755126953,
"loss": 0.6908,
"rewards/accuracies": 0.5234375,
"rewards/chosen": -0.03196856006979942,
"rewards/margins": 0.004914162214845419,
"rewards/rejected": -0.03688272088766098,
"step": 42
},
{
"epoch": 0.5664882667764513,
"grad_norm": 0.5491278171539307,
"learning_rate": 2.3243095276367684e-07,
"logits/chosen": 9.590739250183105,
"logits/rejected": 10.710611343383789,
"logps/chosen": -107.79342651367188,
"logps/ref_chosen": -104.21064758300781,
"logps/ref_rejected": -118.7614974975586,
"logps/rejected": -122.59417724609375,
"loss": 0.6921,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.03582778573036194,
"rewards/margins": 0.002499072812497616,
"rewards/rejected": -0.03832685574889183,
"step": 43
},
{
"epoch": 0.5796624125154385,
"grad_norm": 0.39058002829551697,
"learning_rate": 2.2076116950697937e-07,
"logits/chosen": 9.714144706726074,
"logits/rejected": 10.525476455688477,
"logps/chosen": -104.20186614990234,
"logps/ref_chosen": -100.59449005126953,
"logps/ref_rejected": -115.95166778564453,
"logps/rejected": -120.36408233642578,
"loss": 0.6893,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.03607375919818878,
"rewards/margins": 0.008050307631492615,
"rewards/rejected": -0.044124066829681396,
"step": 44
},
{
"epoch": 0.5928365582544257,
"grad_norm": 0.49462494254112244,
"learning_rate": 2.091556595453674e-07,
"logits/chosen": 9.886950492858887,
"logits/rejected": 10.70908260345459,
"logps/chosen": -110.35658264160156,
"logps/ref_chosen": -106.96060943603516,
"logps/ref_rejected": -125.49449157714844,
"logps/rejected": -129.1283416748047,
"loss": 0.6921,
"rewards/accuracies": 0.5390625,
"rewards/chosen": -0.033959683030843735,
"rewards/margins": 0.0023788013495504856,
"rewards/rejected": -0.03633848577737808,
"step": 45
},
{
"epoch": 0.606010703993413,
"grad_norm": 0.5809131860733032,
"learning_rate": 1.9763993430846392e-07,
"logits/chosen": 9.98457145690918,
"logits/rejected": 10.643254280090332,
"logps/chosen": -110.51849365234375,
"logps/ref_chosen": -107.08544158935547,
"logps/ref_rejected": -120.38542175292969,
"logps/rejected": -124.85211944580078,
"loss": 0.6881,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.034330543130636215,
"rewards/margins": 0.010336533188819885,
"rewards/rejected": -0.0446670763194561,
"step": 46
},
{
"epoch": 0.6191848497324002,
"grad_norm": 0.6774134039878845,
"learning_rate": 1.862393078595809e-07,
"logits/chosen": 9.864917755126953,
"logits/rejected": 11.103195190429688,
"logps/chosen": -109.32252502441406,
"logps/ref_chosen": -105.74787902832031,
"logps/ref_rejected": -122.93606567382812,
"logps/rejected": -126.75948333740234,
"loss": 0.692,
"rewards/accuracies": 0.5234375,
"rewards/chosen": -0.03574639558792114,
"rewards/margins": 0.0024877344258129597,
"rewards/rejected": -0.038234129548072815,
"step": 47
},
{
"epoch": 0.6323589954713874,
"grad_norm": 0.6952683925628662,
"learning_rate": 1.7497884124991485e-07,
"logits/chosen": 10.426509857177734,
"logits/rejected": 11.333501815795898,
"logps/chosen": -108.989013671875,
"logps/ref_chosen": -105.3005599975586,
"logps/ref_rejected": -123.93569946289062,
"logps/rejected": -127.78026580810547,
"loss": 0.6925,
"rewards/accuracies": 0.5078125,
"rewards/chosen": -0.036884430795907974,
"rewards/margins": 0.001561243087053299,
"rewards/rejected": -0.03844567388296127,
"step": 48
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 12,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}