math_phi3_dpo_0_0 / checkpoint-36 /trainer_state.json
lzc0525's picture
Upload folder using huggingface_hub
69b9b1b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.47426924660354053,
"eval_steps": 500,
"global_step": 36,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.013174145738987238,
"grad_norm": 0.48070791363716125,
"learning_rate": 6.25e-08,
"logits/chosen": 10.32492733001709,
"logits/rejected": 10.282785415649414,
"logps/chosen": -176.09544372558594,
"logps/ref_chosen": -176.09544372558594,
"logps/ref_rejected": -181.75552368164062,
"logps/rejected": -181.75552368164062,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.026348291477974475,
"grad_norm": 0.4185059070587158,
"learning_rate": 1.25e-07,
"logits/chosen": 10.530074119567871,
"logits/rejected": 10.672085762023926,
"logps/chosen": -173.2084503173828,
"logps/ref_chosen": -173.2084503173828,
"logps/ref_rejected": -187.02206420898438,
"logps/rejected": -187.02206420898438,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 2
},
{
"epoch": 0.03952243721696171,
"grad_norm": 0.3848375082015991,
"learning_rate": 1.875e-07,
"logits/chosen": 10.340475082397461,
"logits/rejected": 10.486526489257812,
"logps/chosen": -169.6623992919922,
"logps/ref_chosen": -169.080810546875,
"logps/ref_rejected": -182.52792358398438,
"logps/rejected": -182.80642700195312,
"loss": 0.6947,
"rewards/accuracies": 0.3828125,
"rewards/chosen": -0.005815833806991577,
"rewards/margins": -0.0030306153930723667,
"rewards/rejected": -0.002785218646749854,
"step": 3
},
{
"epoch": 0.05269658295594895,
"grad_norm": 0.7261964678764343,
"learning_rate": 2.5e-07,
"logits/chosen": 10.3652925491333,
"logits/rejected": 10.412069320678711,
"logps/chosen": -177.03428649902344,
"logps/ref_chosen": -176.78369140625,
"logps/ref_rejected": -180.0931396484375,
"logps/rejected": -180.21463012695312,
"loss": 0.6938,
"rewards/accuracies": 0.4765625,
"rewards/chosen": -0.0025058696046471596,
"rewards/margins": -0.0012909012148156762,
"rewards/rejected": -0.0012149682734161615,
"step": 4
},
{
"epoch": 0.06587072869493618,
"grad_norm": 0.3572319746017456,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": 10.757452011108398,
"logits/rejected": 10.791389465332031,
"logps/chosen": -169.12631225585938,
"logps/ref_chosen": -168.522216796875,
"logps/ref_rejected": -176.08982849121094,
"logps/rejected": -176.59901428222656,
"loss": 0.6936,
"rewards/accuracies": 0.4609375,
"rewards/chosen": -0.0060411859303712845,
"rewards/margins": -0.0009493756806477904,
"rewards/rejected": -0.0050918105989694595,
"step": 5
},
{
"epoch": 0.07904487443392343,
"grad_norm": 0.4071093201637268,
"learning_rate": 3.75e-07,
"logits/chosen": 11.003137588500977,
"logits/rejected": 11.056475639343262,
"logps/chosen": -174.662353515625,
"logps/ref_chosen": -174.5843048095703,
"logps/ref_rejected": -181.65040588378906,
"logps/rejected": -181.72401428222656,
"loss": 0.6932,
"rewards/accuracies": 0.5078125,
"rewards/chosen": -0.0007804610067978501,
"rewards/margins": -4.43047538283281e-05,
"rewards/rejected": -0.0007361561874859035,
"step": 6
},
{
"epoch": 0.09221902017291066,
"grad_norm": 0.7951124906539917,
"learning_rate": 4.375e-07,
"logits/chosen": 10.003807067871094,
"logits/rejected": 10.067156791687012,
"logps/chosen": -171.4049530029297,
"logps/ref_chosen": -170.4031219482422,
"logps/ref_rejected": -181.00917053222656,
"logps/rejected": -181.8280792236328,
"loss": 0.6941,
"rewards/accuracies": 0.4453125,
"rewards/chosen": -0.010018287226557732,
"rewards/margins": -0.0018291514134034514,
"rewards/rejected": -0.008189136162400246,
"step": 7
},
{
"epoch": 0.1053931659118979,
"grad_norm": 0.5591660737991333,
"learning_rate": 5e-07,
"logits/chosen": 10.874868392944336,
"logits/rejected": 10.79828929901123,
"logps/chosen": -179.36700439453125,
"logps/ref_chosen": -178.47369384765625,
"logps/ref_rejected": -184.52243041992188,
"logps/rejected": -185.44284057617188,
"loss": 0.693,
"rewards/accuracies": 0.5078125,
"rewards/chosen": -0.008933190256357193,
"rewards/margins": 0.0002710025873966515,
"rewards/rejected": -0.009204192087054253,
"step": 8
},
{
"epoch": 0.11856731165088513,
"grad_norm": 0.45744186639785767,
"learning_rate": 4.997252228714278e-07,
"logits/chosen": 10.472944259643555,
"logits/rejected": 10.640350341796875,
"logps/chosen": -175.36553955078125,
"logps/ref_chosen": -174.38418579101562,
"logps/ref_rejected": -182.0985565185547,
"logps/rejected": -183.1609649658203,
"loss": 0.6928,
"rewards/accuracies": 0.5234375,
"rewards/chosen": -0.009813489392399788,
"rewards/margins": 0.0008107352769002318,
"rewards/rejected": -0.010624224320054054,
"step": 9
},
{
"epoch": 0.13174145738987236,
"grad_norm": 0.3784727156162262,
"learning_rate": 4.989014955054745e-07,
"logits/chosen": 10.471797943115234,
"logits/rejected": 10.57437515258789,
"logps/chosen": -173.78768920898438,
"logps/ref_chosen": -172.48837280273438,
"logps/ref_rejected": -179.625,
"logps/rejected": -180.91595458984375,
"loss": 0.6932,
"rewards/accuracies": 0.4765625,
"rewards/chosen": -0.012993087992072105,
"rewards/margins": -8.335959864780307e-05,
"rewards/rejected": -0.012909728102385998,
"step": 10
},
{
"epoch": 0.14491560312885962,
"grad_norm": 0.4041476547718048,
"learning_rate": 4.975306286336627e-07,
"logits/chosen": 10.539186477661133,
"logits/rejected": 10.477883338928223,
"logps/chosen": -172.62416076660156,
"logps/ref_chosen": -169.97216796875,
"logps/ref_rejected": -180.00955200195312,
"logps/rejected": -182.72525024414062,
"loss": 0.6929,
"rewards/accuracies": 0.546875,
"rewards/chosen": -0.026519589126110077,
"rewards/margins": 0.0006374535150825977,
"rewards/rejected": -0.027157040312886238,
"step": 11
},
{
"epoch": 0.15808974886784685,
"grad_norm": 0.5964677333831787,
"learning_rate": 4.956156357188939e-07,
"logits/chosen": 10.28010082244873,
"logits/rejected": 10.401304244995117,
"logps/chosen": -178.43714904785156,
"logps/ref_chosen": -175.83639526367188,
"logps/ref_rejected": -181.73045349121094,
"logps/rejected": -184.20223999023438,
"loss": 0.6938,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.026007818058133125,
"rewards/margins": -0.0012897354317829013,
"rewards/rejected": -0.024718083441257477,
"step": 12
},
{
"epoch": 0.17126389460683408,
"grad_norm": 0.488361120223999,
"learning_rate": 4.931607263312032e-07,
"logits/chosen": 10.292243957519531,
"logits/rejected": 10.386707305908203,
"logps/chosen": -173.99058532714844,
"logps/ref_chosen": -170.8806610107422,
"logps/ref_rejected": -180.6468963623047,
"logps/rejected": -184.01853942871094,
"loss": 0.6919,
"rewards/accuracies": 0.5234375,
"rewards/chosen": -0.03109920769929886,
"rewards/margins": 0.002617327030748129,
"rewards/rejected": -0.033716537058353424,
"step": 13
},
{
"epoch": 0.1844380403458213,
"grad_norm": 0.45636966824531555,
"learning_rate": 4.9017129689421e-07,
"logits/chosen": 10.729025840759277,
"logits/rejected": 10.921786308288574,
"logps/chosen": -174.65345764160156,
"logps/ref_chosen": -171.15963745117188,
"logps/ref_rejected": -180.73471069335938,
"logps/rejected": -184.30807495117188,
"loss": 0.6928,
"rewards/accuracies": 0.515625,
"rewards/chosen": -0.03493800014257431,
"rewards/margins": 0.0007955244509503245,
"rewards/rejected": -0.03573352098464966,
"step": 14
},
{
"epoch": 0.19761218608480857,
"grad_norm": 0.3838670551776886,
"learning_rate": 4.866539188226085e-07,
"logits/chosen": 10.265353202819824,
"logits/rejected": 10.378411293029785,
"logps/chosen": -177.72991943359375,
"logps/ref_chosen": -173.84222412109375,
"logps/ref_rejected": -182.6996612548828,
"logps/rejected": -186.89825439453125,
"loss": 0.6916,
"rewards/accuracies": 0.515625,
"rewards/chosen": -0.03887684643268585,
"rewards/margins": 0.003108714707195759,
"rewards/rejected": -0.04198555648326874,
"step": 15
},
{
"epoch": 0.2107863318237958,
"grad_norm": 0.3698700964450836,
"learning_rate": 4.826163240767716e-07,
"logits/chosen": 10.751184463500977,
"logits/rejected": 10.945128440856934,
"logps/chosen": -181.60301208496094,
"logps/ref_chosen": -178.638671875,
"logps/ref_rejected": -185.8243408203125,
"logps/rejected": -188.93057250976562,
"loss": 0.6925,
"rewards/accuracies": 0.5234375,
"rewards/chosen": -0.029643364250659943,
"rewards/margins": 0.0014189946232363582,
"rewards/rejected": -0.03106236271560192,
"step": 16
},
{
"epoch": 0.22396047756278303,
"grad_norm": 0.5419145822525024,
"learning_rate": 4.780673881662242e-07,
"logits/chosen": 10.279337882995605,
"logits/rejected": 10.265542984008789,
"logps/chosen": -176.72821044921875,
"logps/ref_chosen": -172.87332153320312,
"logps/ref_rejected": -177.30355834960938,
"logps/rejected": -181.23831176757812,
"loss": 0.6928,
"rewards/accuracies": 0.484375,
"rewards/chosen": -0.038549020886421204,
"rewards/margins": 0.0007985997945070267,
"rewards/rejected": -0.03934762626886368,
"step": 17
},
{
"epoch": 0.23713462330177026,
"grad_norm": 0.5288915038108826,
"learning_rate": 4.730171106393466e-07,
"logits/chosen": 10.508716583251953,
"logits/rejected": 10.601346969604492,
"logps/chosen": -176.69735717773438,
"logps/ref_chosen": -172.82644653320312,
"logps/ref_rejected": -180.93768310546875,
"logps/rejected": -184.82861328125,
"loss": 0.6931,
"rewards/accuracies": 0.5234375,
"rewards/chosen": -0.038708824664354324,
"rewards/margins": 0.00020039017545059323,
"rewards/rejected": -0.03890921175479889,
"step": 18
},
{
"epoch": 0.2503087690407575,
"grad_norm": 0.45863935351371765,
"learning_rate": 4.6747659310219757e-07,
"logits/chosen": 10.509140014648438,
"logits/rejected": 10.403264999389648,
"logps/chosen": -183.62411499023438,
"logps/ref_chosen": -179.5069122314453,
"logps/ref_rejected": -185.01487731933594,
"logps/rejected": -189.2952880859375,
"loss": 0.6924,
"rewards/accuracies": 0.5078125,
"rewards/chosen": -0.041171781718730927,
"rewards/margins": 0.0016320659779012203,
"rewards/rejected": -0.04280385002493858,
"step": 19
},
{
"epoch": 0.2634829147797447,
"grad_norm": 0.6024115085601807,
"learning_rate": 4.6145801481477433e-07,
"logits/chosen": 10.452649116516113,
"logits/rejected": 10.525249481201172,
"logps/chosen": -174.3675994873047,
"logps/ref_chosen": -170.23623657226562,
"logps/ref_rejected": -177.55618286132812,
"logps/rejected": -181.85946655273438,
"loss": 0.6923,
"rewards/accuracies": 0.5078125,
"rewards/chosen": -0.04131368175148964,
"rewards/margins": 0.0017191548831760883,
"rewards/rejected": -0.04303283616900444,
"step": 20
},
{
"epoch": 0.276657060518732,
"grad_norm": 0.4595341384410858,
"learning_rate": 4.549746059183561e-07,
"logits/chosen": 10.422194480895996,
"logits/rejected": 10.483356475830078,
"logps/chosen": -180.44906616210938,
"logps/ref_chosen": -176.3700408935547,
"logps/ref_rejected": -182.80032348632812,
"logps/rejected": -187.17276000976562,
"loss": 0.6917,
"rewards/accuracies": 0.5703125,
"rewards/chosen": -0.04079030081629753,
"rewards/margins": 0.0029342519119381905,
"rewards/rejected": -0.04372455179691315,
"step": 21
},
{
"epoch": 0.28983120625771924,
"grad_norm": 0.3111652731895447,
"learning_rate": 4.480406183527823e-07,
"logits/chosen": 10.481276512145996,
"logits/rejected": 10.559721946716309,
"logps/chosen": -177.63865661621094,
"logps/ref_chosen": -173.29742431640625,
"logps/ref_rejected": -177.54916381835938,
"logps/rejected": -182.2364044189453,
"loss": 0.6915,
"rewards/accuracies": 0.6015625,
"rewards/chosen": -0.043412309139966965,
"rewards/margins": 0.003460067557170987,
"rewards/rejected": -0.04687237739562988,
"step": 22
},
{
"epoch": 0.3030053519967065,
"grad_norm": 0.520114541053772,
"learning_rate": 4.4067129452759546e-07,
"logits/chosen": 10.153634071350098,
"logits/rejected": 10.433453559875488,
"logps/chosen": -174.9644012451172,
"logps/ref_chosen": -170.41177368164062,
"logps/ref_rejected": -182.7073974609375,
"logps/rejected": -187.5763397216797,
"loss": 0.6916,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.04552610218524933,
"rewards/margins": 0.0031630881130695343,
"rewards/rejected": -0.04868919029831886,
"step": 23
},
{
"epoch": 0.3161794977356937,
"grad_norm": 0.38157743215560913,
"learning_rate": 4.3288283381591725e-07,
"logits/chosen": 10.212328910827637,
"logits/rejected": 10.149028778076172,
"logps/chosen": -178.75759887695312,
"logps/ref_chosen": -174.15003967285156,
"logps/ref_rejected": -181.65594482421875,
"logps/rejected": -186.58934020996094,
"loss": 0.6916,
"rewards/accuracies": 0.5703125,
"rewards/chosen": -0.046075694262981415,
"rewards/margins": 0.003258442971855402,
"rewards/rejected": -0.04933413863182068,
"step": 24
},
{
"epoch": 0.32935364347468093,
"grad_norm": 0.5030648112297058,
"learning_rate": 4.246923569447104e-07,
"logits/chosen": 10.425580978393555,
"logits/rejected": 10.567748069763184,
"logps/chosen": -181.23281860351562,
"logps/ref_chosen": -177.0232391357422,
"logps/ref_rejected": -184.0133056640625,
"logps/rejected": -188.3357696533203,
"loss": 0.6926,
"rewards/accuracies": 0.5234375,
"rewards/chosen": -0.04209586977958679,
"rewards/margins": 0.0011287340894341469,
"rewards/rejected": -0.04322460666298866,
"step": 25
},
{
"epoch": 0.34252778921366817,
"grad_norm": 0.3347052335739136,
"learning_rate": 4.161178683597054e-07,
"logits/chosen": 10.395452499389648,
"logits/rejected": 10.5776948928833,
"logps/chosen": -177.6590576171875,
"logps/ref_chosen": -172.2715606689453,
"logps/ref_rejected": -180.46673583984375,
"logps/rejected": -186.09349060058594,
"loss": 0.692,
"rewards/accuracies": 0.5703125,
"rewards/chosen": -0.05387478694319725,
"rewards/margins": 0.002392976311966777,
"rewards/rejected": -0.056267764419317245,
"step": 26
},
{
"epoch": 0.3557019349526554,
"grad_norm": 0.40187934041023254,
"learning_rate": 4.0717821664772124e-07,
"logits/chosen": 10.416847229003906,
"logits/rejected": 10.693917274475098,
"logps/chosen": -180.66729736328125,
"logps/ref_chosen": -174.87091064453125,
"logps/ref_rejected": -183.6770477294922,
"logps/rejected": -189.53399658203125,
"loss": 0.6929,
"rewards/accuracies": 0.4921875,
"rewards/chosen": -0.057963769882917404,
"rewards/margins": 0.0006057576974853873,
"rewards/rejected": -0.0585695244371891,
"step": 27
},
{
"epoch": 0.3688760806916426,
"grad_norm": 0.537835419178009,
"learning_rate": 3.978930531033806e-07,
"logits/chosen": 10.315240859985352,
"logits/rejected": 10.52540111541748,
"logps/chosen": -178.289794921875,
"logps/ref_chosen": -172.75225830078125,
"logps/ref_rejected": -180.3443145751953,
"logps/rejected": -186.29811096191406,
"loss": 0.6911,
"rewards/accuracies": 0.578125,
"rewards/chosen": -0.05537542328238487,
"rewards/margins": 0.004162484314292669,
"rewards/rejected": -0.05953790992498398,
"step": 28
},
{
"epoch": 0.3820502264306299,
"grad_norm": 0.9168035984039307,
"learning_rate": 3.882827885312998e-07,
"logits/chosen": 10.197809219360352,
"logits/rejected": 10.199935913085938,
"logps/chosen": -181.69606018066406,
"logps/ref_chosen": -176.17613220214844,
"logps/ref_rejected": -180.6341552734375,
"logps/rejected": -186.46124267578125,
"loss": 0.6917,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.05519918352365494,
"rewards/margins": 0.0030717107001692057,
"rewards/rejected": -0.0582708939909935,
"step": 29
},
{
"epoch": 0.39522437216961714,
"grad_norm": 0.3577282428741455,
"learning_rate": 3.7836854837871044e-07,
"logits/chosen": 10.497368812561035,
"logits/rejected": 10.725122451782227,
"logps/chosen": -184.75241088867188,
"logps/ref_chosen": -178.8612060546875,
"logps/ref_rejected": -187.1546173095703,
"logps/rejected": -193.5741424560547,
"loss": 0.6906,
"rewards/accuracies": 0.5859375,
"rewards/chosen": -0.058911994099617004,
"rewards/margins": 0.0052833897061645985,
"rewards/rejected": -0.06419539451599121,
"step": 30
},
{
"epoch": 0.4083985179086044,
"grad_norm": 0.46136385202407837,
"learning_rate": 3.681721262971413e-07,
"logits/chosen": 10.34453010559082,
"logits/rejected": 10.35668659210205,
"logps/chosen": -178.43930053710938,
"logps/ref_chosen": -172.60848999023438,
"logps/ref_rejected": -179.45013427734375,
"logps/rejected": -185.967529296875,
"loss": 0.6898,
"rewards/accuracies": 0.5625,
"rewards/chosen": -0.0583079531788826,
"rewards/margins": 0.006865999195724726,
"rewards/rejected": -0.06517395377159119,
"step": 31
},
{
"epoch": 0.4215726636475916,
"grad_norm": 0.3592880368232727,
"learning_rate": 3.577159362352426e-07,
"logits/chosen": 10.357152938842773,
"logits/rejected": 10.40371322631836,
"logps/chosen": -181.8780975341797,
"logps/ref_chosen": -175.29119873046875,
"logps/ref_rejected": -182.2250213623047,
"logps/rejected": -189.01063537597656,
"loss": 0.6923,
"rewards/accuracies": 0.5390625,
"rewards/chosen": -0.06586895883083344,
"rewards/margins": 0.001987436320632696,
"rewards/rejected": -0.06785639375448227,
"step": 32
},
{
"epoch": 0.43474680938657884,
"grad_norm": 0.4338357746601105,
"learning_rate": 3.470229631680624e-07,
"logits/chosen": 10.255874633789062,
"logits/rejected": 10.296948432922363,
"logps/chosen": -183.95484924316406,
"logps/ref_chosen": -177.42674255371094,
"logps/ref_rejected": -181.18801879882812,
"logps/rejected": -188.07952880859375,
"loss": 0.6914,
"rewards/accuracies": 0.5859375,
"rewards/chosen": -0.065280981361866,
"rewards/margins": 0.003634275868535042,
"rewards/rejected": -0.06891525536775589,
"step": 33
},
{
"epoch": 0.44792095512556607,
"grad_norm": 0.4542549252510071,
"learning_rate": 3.361167125710832e-07,
"logits/chosen": 10.056829452514648,
"logits/rejected": 10.121377944946289,
"logps/chosen": -181.4767303466797,
"logps/ref_chosen": -173.72467041015625,
"logps/ref_rejected": -180.5912322998047,
"logps/rejected": -188.53164672851562,
"loss": 0.6923,
"rewards/accuracies": 0.4921875,
"rewards/chosen": -0.07752064615488052,
"rewards/margins": 0.0018832057248800993,
"rewards/rejected": -0.0794038400053978,
"step": 34
},
{
"epoch": 0.4610951008645533,
"grad_norm": 0.4729287028312683,
"learning_rate": 3.2502115875008516e-07,
"logits/chosen": 10.46947193145752,
"logits/rejected": 10.667445182800293,
"logps/chosen": -182.939208984375,
"logps/ref_chosen": -175.0980987548828,
"logps/ref_rejected": -181.65821838378906,
"logps/rejected": -189.89309692382812,
"loss": 0.6913,
"rewards/accuracies": 0.578125,
"rewards/chosen": -0.07841099798679352,
"rewards/margins": 0.00393773103132844,
"rewards/rejected": -0.08234872668981552,
"step": 35
},
{
"epoch": 0.47426924660354053,
"grad_norm": 0.8570416569709778,
"learning_rate": 3.137606921404191e-07,
"logits/chosen": 10.280503273010254,
"logits/rejected": 10.623340606689453,
"logps/chosen": -182.2921142578125,
"logps/ref_chosen": -174.0312042236328,
"logps/ref_rejected": -181.84324645996094,
"logps/rejected": -190.1889190673828,
"loss": 0.6928,
"rewards/accuracies": 0.484375,
"rewards/chosen": -0.08260920643806458,
"rewards/margins": 0.0008472882909700274,
"rewards/rejected": -0.08345648646354675,
"step": 36
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 12,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}