math_phi3_dpo_80_0 / checkpoint-36 /trainer_state.json
lzc0525's picture
Upload folder using huggingface_hub
3fe8199 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.47426924660354053,
"eval_steps": 500,
"global_step": 36,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.013174145738987238,
"grad_norm": 0.39343583583831787,
"learning_rate": 6.25e-08,
"logits/chosen": 10.071717262268066,
"logits/rejected": 10.610974311828613,
"logps/chosen": -121.14067077636719,
"logps/ref_chosen": -121.14067077636719,
"logps/ref_rejected": -137.65684509277344,
"logps/rejected": -137.65684509277344,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.026348291477974475,
"grad_norm": 0.7521647214889526,
"learning_rate": 1.25e-07,
"logits/chosen": 10.222262382507324,
"logits/rejected": 10.754176139831543,
"logps/chosen": -116.48068237304688,
"logps/ref_chosen": -116.48068237304688,
"logps/ref_rejected": -130.27796936035156,
"logps/rejected": -130.27796936035156,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 2
},
{
"epoch": 0.03952243721696171,
"grad_norm": 0.5880491137504578,
"learning_rate": 1.875e-07,
"logits/chosen": 10.01984977722168,
"logits/rejected": 10.62405776977539,
"logps/chosen": -123.36822509765625,
"logps/ref_chosen": -122.6683349609375,
"logps/ref_rejected": -132.69850158691406,
"logps/rejected": -133.207275390625,
"loss": 0.6941,
"rewards/accuracies": 0.4453125,
"rewards/chosen": -0.006998830940574408,
"rewards/margins": -0.0019110905705019832,
"rewards/rejected": -0.005087739787995815,
"step": 3
},
{
"epoch": 0.05269658295594895,
"grad_norm": 0.4605408310890198,
"learning_rate": 2.5e-07,
"logits/chosen": 9.99990463256836,
"logits/rejected": 10.736846923828125,
"logps/chosen": -123.02133178710938,
"logps/ref_chosen": -122.59739685058594,
"logps/ref_rejected": -129.70767211914062,
"logps/rejected": -129.98374938964844,
"loss": 0.6939,
"rewards/accuracies": 0.4765625,
"rewards/chosen": -0.004239337984472513,
"rewards/margins": -0.0014785109087824821,
"rewards/rejected": -0.0027608266100287437,
"step": 4
},
{
"epoch": 0.06587072869493618,
"grad_norm": 0.469856321811676,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": 10.075482368469238,
"logits/rejected": 10.892666816711426,
"logps/chosen": -117.554931640625,
"logps/ref_chosen": -117.5941162109375,
"logps/ref_rejected": -132.1708984375,
"logps/rejected": -132.1227264404297,
"loss": 0.6932,
"rewards/accuracies": 0.546875,
"rewards/chosen": 0.0003917121794074774,
"rewards/margins": -9.010493522509933e-05,
"rewards/rejected": 0.0004818170564249158,
"step": 5
},
{
"epoch": 0.07904487443392343,
"grad_norm": 0.5541566610336304,
"learning_rate": 3.75e-07,
"logits/chosen": 10.660999298095703,
"logits/rejected": 11.290507316589355,
"logps/chosen": -127.00320434570312,
"logps/ref_chosen": -126.12411499023438,
"logps/ref_rejected": -136.9976043701172,
"logps/rejected": -137.75950622558594,
"loss": 0.6938,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.00879070907831192,
"rewards/margins": -0.0011717069428414106,
"rewards/rejected": -0.007619001902639866,
"step": 6
},
{
"epoch": 0.09221902017291066,
"grad_norm": 0.4422788619995117,
"learning_rate": 4.375e-07,
"logits/chosen": 9.958097457885742,
"logits/rejected": 10.642163276672363,
"logps/chosen": -115.61244201660156,
"logps/ref_chosen": -115.08863830566406,
"logps/ref_rejected": -125.91255187988281,
"logps/rejected": -126.15577697753906,
"loss": 0.6946,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.005238103214651346,
"rewards/margins": -0.0028058765456080437,
"rewards/rejected": -0.002432226436212659,
"step": 7
},
{
"epoch": 0.1053931659118979,
"grad_norm": 0.5495327115058899,
"learning_rate": 5e-07,
"logits/chosen": 10.345503807067871,
"logits/rejected": 10.97708797454834,
"logps/chosen": -121.48406982421875,
"logps/ref_chosen": -121.4114761352539,
"logps/ref_rejected": -134.62770080566406,
"logps/rejected": -134.7586212158203,
"loss": 0.6929,
"rewards/accuracies": 0.5078125,
"rewards/chosen": -0.0007259202538989484,
"rewards/margins": 0.0005832896567881107,
"rewards/rejected": -0.0013092098524793983,
"step": 8
},
{
"epoch": 0.11856731165088513,
"grad_norm": 0.43922048807144165,
"learning_rate": 4.997252228714278e-07,
"logits/chosen": 10.159126281738281,
"logits/rejected": 11.002123832702637,
"logps/chosen": -122.51399230957031,
"logps/ref_chosen": -121.59207153320312,
"logps/ref_rejected": -134.70025634765625,
"logps/rejected": -135.55740356445312,
"loss": 0.6935,
"rewards/accuracies": 0.4453125,
"rewards/chosen": -0.00921926274895668,
"rewards/margins": -0.0006479143630713224,
"rewards/rejected": -0.008571348153054714,
"step": 9
},
{
"epoch": 0.13174145738987236,
"grad_norm": 0.6643485426902771,
"learning_rate": 4.989014955054745e-07,
"logits/chosen": 9.9464111328125,
"logits/rejected": 10.739057540893555,
"logps/chosen": -117.91310119628906,
"logps/ref_chosen": -117.16349029541016,
"logps/ref_rejected": -129.98167419433594,
"logps/rejected": -130.65924072265625,
"loss": 0.6935,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.007496046833693981,
"rewards/margins": -0.0007203805143944919,
"rewards/rejected": -0.00677566509693861,
"step": 10
},
{
"epoch": 0.14491560312885962,
"grad_norm": 0.4432956576347351,
"learning_rate": 4.975306286336627e-07,
"logits/chosen": 10.095781326293945,
"logits/rejected": 11.001167297363281,
"logps/chosen": -123.26506042480469,
"logps/ref_chosen": -121.95927429199219,
"logps/ref_rejected": -136.18655395507812,
"logps/rejected": -137.39865112304688,
"loss": 0.6936,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.01305788941681385,
"rewards/margins": -0.0009369202307425439,
"rewards/rejected": -0.012120969593524933,
"step": 11
},
{
"epoch": 0.15808974886784685,
"grad_norm": 0.3955974280834198,
"learning_rate": 4.956156357188939e-07,
"logits/chosen": 10.129995346069336,
"logits/rejected": 10.673677444458008,
"logps/chosen": -126.05357360839844,
"logps/ref_chosen": -124.12315368652344,
"logps/ref_rejected": -134.275390625,
"logps/rejected": -136.24940490722656,
"loss": 0.693,
"rewards/accuracies": 0.5234375,
"rewards/chosen": -0.01930420845746994,
"rewards/margins": 0.00043603626545518637,
"rewards/rejected": -0.01974024437367916,
"step": 12
},
{
"epoch": 0.17126389460683408,
"grad_norm": 0.4873650074005127,
"learning_rate": 4.931607263312032e-07,
"logits/chosen": 9.8243989944458,
"logits/rejected": 10.843228340148926,
"logps/chosen": -119.30509185791016,
"logps/ref_chosen": -116.83765411376953,
"logps/ref_rejected": -130.78997802734375,
"logps/rejected": -133.15672302246094,
"loss": 0.6937,
"rewards/accuracies": 0.4453125,
"rewards/chosen": -0.024674497544765472,
"rewards/margins": -0.0010069820564240217,
"rewards/rejected": -0.02366751804947853,
"step": 13
},
{
"epoch": 0.1844380403458213,
"grad_norm": 0.8769639134407043,
"learning_rate": 4.9017129689421e-07,
"logits/chosen": 10.427848815917969,
"logits/rejected": 11.37716293334961,
"logps/chosen": -120.9067611694336,
"logps/ref_chosen": -118.43791961669922,
"logps/ref_rejected": -132.5309600830078,
"logps/rejected": -134.91236877441406,
"loss": 0.6936,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.02468838170170784,
"rewards/margins": -0.0008744060760363936,
"rewards/rejected": -0.023813974112272263,
"step": 14
},
{
"epoch": 0.19761218608480857,
"grad_norm": 0.8736073970794678,
"learning_rate": 4.866539188226085e-07,
"logits/chosen": 9.956391334533691,
"logits/rejected": 10.684510231018066,
"logps/chosen": -128.198486328125,
"logps/ref_chosen": -124.82101440429688,
"logps/ref_rejected": -135.32565307617188,
"logps/rejected": -138.8860626220703,
"loss": 0.6923,
"rewards/accuracies": 0.5234375,
"rewards/chosen": -0.033774565905332565,
"rewards/margins": 0.0018295131158083677,
"rewards/rejected": -0.0356040820479393,
"step": 15
},
{
"epoch": 0.2107863318237958,
"grad_norm": 0.7293412089347839,
"learning_rate": 4.826163240767716e-07,
"logits/chosen": 10.677412033081055,
"logits/rejected": 11.254134178161621,
"logps/chosen": -122.79344940185547,
"logps/ref_chosen": -119.69990539550781,
"logps/ref_rejected": -130.34449768066406,
"logps/rejected": -133.39932250976562,
"loss": 0.6934,
"rewards/accuracies": 0.484375,
"rewards/chosen": -0.030935294926166534,
"rewards/margins": -0.0003869622596539557,
"rewards/rejected": -0.030548332259058952,
"step": 16
},
{
"epoch": 0.22396047756278303,
"grad_norm": 0.6406402587890625,
"learning_rate": 4.780673881662242e-07,
"logits/chosen": 10.172043800354004,
"logits/rejected": 10.859190940856934,
"logps/chosen": -118.11813354492188,
"logps/ref_chosen": -114.52975463867188,
"logps/ref_rejected": -129.5846405029297,
"logps/rejected": -133.20779418945312,
"loss": 0.6931,
"rewards/accuracies": 0.5078125,
"rewards/chosen": -0.035883828997612,
"rewards/margins": 0.00034771906211972237,
"rewards/rejected": -0.036231543868780136,
"step": 17
},
{
"epoch": 0.23713462330177026,
"grad_norm": 0.7559142708778381,
"learning_rate": 4.730171106393466e-07,
"logits/chosen": 10.747884750366211,
"logits/rejected": 11.173224449157715,
"logps/chosen": -121.89351654052734,
"logps/ref_chosen": -117.67997741699219,
"logps/ref_rejected": -128.00650024414062,
"logps/rejected": -132.31655883789062,
"loss": 0.6928,
"rewards/accuracies": 0.546875,
"rewards/chosen": -0.042135339230298996,
"rewards/margins": 0.0009651560103520751,
"rewards/rejected": -0.043100498616695404,
"step": 18
},
{
"epoch": 0.2503087690407575,
"grad_norm": 0.49808746576309204,
"learning_rate": 4.6747659310219757e-07,
"logits/chosen": 10.380050659179688,
"logits/rejected": 10.91146469116211,
"logps/chosen": -125.04395294189453,
"logps/ref_chosen": -120.92308044433594,
"logps/ref_rejected": -133.8301544189453,
"logps/rejected": -138.79052734375,
"loss": 0.689,
"rewards/accuracies": 0.6328125,
"rewards/chosen": -0.0412086620926857,
"rewards/margins": 0.008395083248615265,
"rewards/rejected": -0.04960374906659126,
"step": 19
},
{
"epoch": 0.2634829147797447,
"grad_norm": 0.5601547956466675,
"learning_rate": 4.6145801481477433e-07,
"logits/chosen": 10.563058853149414,
"logits/rejected": 11.37236499786377,
"logps/chosen": -126.93672180175781,
"logps/ref_chosen": -121.84554290771484,
"logps/ref_rejected": -133.343017578125,
"logps/rejected": -138.12808227539062,
"loss": 0.6948,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.050911907106637955,
"rewards/margins": -0.0030612414702773094,
"rewards/rejected": -0.04785066470503807,
"step": 20
},
{
"epoch": 0.276657060518732,
"grad_norm": 0.41351014375686646,
"learning_rate": 4.549746059183561e-07,
"logits/chosen": 9.687071800231934,
"logits/rejected": 10.572582244873047,
"logps/chosen": -124.77998352050781,
"logps/ref_chosen": -119.20828247070312,
"logps/ref_rejected": -134.38436889648438,
"logps/rejected": -140.02293395996094,
"loss": 0.6929,
"rewards/accuracies": 0.515625,
"rewards/chosen": -0.055717017501592636,
"rewards/margins": 0.0006685962434858084,
"rewards/rejected": -0.056385621428489685,
"step": 21
},
{
"epoch": 0.28983120625771924,
"grad_norm": 0.6027271747589111,
"learning_rate": 4.480406183527823e-07,
"logits/chosen": 10.03482437133789,
"logits/rejected": 10.820859909057617,
"logps/chosen": -121.17003631591797,
"logps/ref_chosen": -114.62059020996094,
"logps/ref_rejected": -128.0896759033203,
"logps/rejected": -134.1069793701172,
"loss": 0.6959,
"rewards/accuracies": 0.484375,
"rewards/chosen": -0.06549445539712906,
"rewards/margins": -0.005321440752595663,
"rewards/rejected": -0.06017300859093666,
"step": 22
},
{
"epoch": 0.3030053519967065,
"grad_norm": 0.5877695083618164,
"learning_rate": 4.4067129452759546e-07,
"logits/chosen": 10.00536060333252,
"logits/rejected": 10.877461433410645,
"logps/chosen": -124.2905044555664,
"logps/ref_chosen": -117.84042358398438,
"logps/ref_rejected": -131.79171752929688,
"logps/rejected": -137.91160583496094,
"loss": 0.6949,
"rewards/accuracies": 0.46875,
"rewards/chosen": -0.06450086086988449,
"rewards/margins": -0.0033018956892192364,
"rewards/rejected": -0.061198972165584564,
"step": 23
},
{
"epoch": 0.3161794977356937,
"grad_norm": 0.5177574157714844,
"learning_rate": 4.3288283381591725e-07,
"logits/chosen": 10.104101181030273,
"logits/rejected": 10.70304012298584,
"logps/chosen": -129.96224975585938,
"logps/ref_chosen": -123.75523376464844,
"logps/ref_rejected": -138.6237030029297,
"logps/rejected": -144.98983764648438,
"loss": 0.6925,
"rewards/accuracies": 0.53125,
"rewards/chosen": -0.062070075422525406,
"rewards/margins": 0.0015914504183456302,
"rewards/rejected": -0.06366152316331863,
"step": 24
},
{
"epoch": 0.32935364347468093,
"grad_norm": 0.57741379737854,
"learning_rate": 4.246923569447104e-07,
"logits/chosen": 10.097904205322266,
"logits/rejected": 10.884883880615234,
"logps/chosen": -130.04393005371094,
"logps/ref_chosen": -122.53610229492188,
"logps/ref_rejected": -141.00828552246094,
"logps/rejected": -148.18405151367188,
"loss": 0.695,
"rewards/accuracies": 0.4609375,
"rewards/chosen": -0.07507827132940292,
"rewards/margins": -0.003320657880976796,
"rewards/rejected": -0.07175761461257935,
"step": 25
},
{
"epoch": 0.34252778921366817,
"grad_norm": 0.5819231867790222,
"learning_rate": 4.161178683597054e-07,
"logits/chosen": 10.32345199584961,
"logits/rejected": 11.267877578735352,
"logps/chosen": -132.95077514648438,
"logps/ref_chosen": -124.1744384765625,
"logps/ref_rejected": -139.68605041503906,
"logps/rejected": -148.0929718017578,
"loss": 0.6952,
"rewards/accuracies": 0.484375,
"rewards/chosen": -0.08776339888572693,
"rewards/margins": -0.0036940835416316986,
"rewards/rejected": -0.08406931161880493,
"step": 26
},
{
"epoch": 0.3557019349526554,
"grad_norm": 0.6049931645393372,
"learning_rate": 4.0717821664772124e-07,
"logits/chosen": 10.15705394744873,
"logits/rejected": 11.114046096801758,
"logps/chosen": -125.23123931884766,
"logps/ref_chosen": -117.1941146850586,
"logps/ref_rejected": -129.4031982421875,
"logps/rejected": -137.76333618164062,
"loss": 0.6918,
"rewards/accuracies": 0.5390625,
"rewards/chosen": -0.08037131279706955,
"rewards/margins": 0.0032299975864589214,
"rewards/rejected": -0.08360131084918976,
"step": 27
},
{
"epoch": 0.3688760806916426,
"grad_norm": 0.5475514531135559,
"learning_rate": 3.978930531033806e-07,
"logits/chosen": 9.436954498291016,
"logits/rejected": 10.49735164642334,
"logps/chosen": -125.24280548095703,
"logps/ref_chosen": -117.17620086669922,
"logps/ref_rejected": -130.76107788085938,
"logps/rejected": -138.6796875,
"loss": 0.6941,
"rewards/accuracies": 0.5078125,
"rewards/chosen": -0.08066616207361221,
"rewards/margins": -0.001479951897636056,
"rewards/rejected": -0.07918620854616165,
"step": 28
},
{
"epoch": 0.3820502264306299,
"grad_norm": 0.599656343460083,
"learning_rate": 3.882827885312998e-07,
"logits/chosen": 10.068204879760742,
"logits/rejected": 10.938701629638672,
"logps/chosen": -130.98472595214844,
"logps/ref_chosen": -123.2500228881836,
"logps/ref_rejected": -133.91970825195312,
"logps/rejected": -141.19216918945312,
"loss": 0.6957,
"rewards/accuracies": 0.453125,
"rewards/chosen": -0.07734709233045578,
"rewards/margins": -0.004622358828783035,
"rewards/rejected": -0.07272473722696304,
"step": 29
},
{
"epoch": 0.39522437216961714,
"grad_norm": 0.4804610311985016,
"learning_rate": 3.7836854837871044e-07,
"logits/chosen": 10.131808280944824,
"logits/rejected": 11.42764663696289,
"logps/chosen": -127.49568176269531,
"logps/ref_chosen": -118.52604675292969,
"logps/ref_rejected": -139.1146240234375,
"logps/rejected": -147.99575805664062,
"loss": 0.6938,
"rewards/accuracies": 0.5390625,
"rewards/chosen": -0.08969634026288986,
"rewards/margins": -0.0008849686128087342,
"rewards/rejected": -0.08881138265132904,
"step": 30
},
{
"epoch": 0.4083985179086044,
"grad_norm": 0.4208291471004486,
"learning_rate": 3.681721262971413e-07,
"logits/chosen": 9.75320816040039,
"logits/rejected": 10.846428871154785,
"logps/chosen": -119.75810241699219,
"logps/ref_chosen": -111.525146484375,
"logps/ref_rejected": -128.1785888671875,
"logps/rejected": -136.38653564453125,
"loss": 0.6935,
"rewards/accuracies": 0.515625,
"rewards/chosen": -0.08232954144477844,
"rewards/margins": -0.00024991348618641496,
"rewards/rejected": -0.08207963407039642,
"step": 31
},
{
"epoch": 0.4215726636475916,
"grad_norm": 0.7168395519256592,
"learning_rate": 3.577159362352426e-07,
"logits/chosen": 9.947325706481934,
"logits/rejected": 11.155643463134766,
"logps/chosen": -124.97373962402344,
"logps/ref_chosen": -116.80255126953125,
"logps/ref_rejected": -134.80767822265625,
"logps/rejected": -142.88174438476562,
"loss": 0.6939,
"rewards/accuracies": 0.4296875,
"rewards/chosen": -0.08171181380748749,
"rewards/margins": -0.0009711601305752993,
"rewards/rejected": -0.08074064552783966,
"step": 32
},
{
"epoch": 0.43474680938657884,
"grad_norm": 1.1801953315734863,
"learning_rate": 3.470229631680624e-07,
"logits/chosen": 10.00582504272461,
"logits/rejected": 10.817474365234375,
"logps/chosen": -127.24813842773438,
"logps/ref_chosen": -118.635009765625,
"logps/ref_rejected": -133.8279571533203,
"logps/rejected": -142.4803924560547,
"loss": 0.6932,
"rewards/accuracies": 0.4765625,
"rewards/chosen": -0.08613133430480957,
"rewards/margins": 0.0003930249949917197,
"rewards/rejected": -0.0865243598818779,
"step": 33
},
{
"epoch": 0.44792095512556607,
"grad_norm": 0.5862898230552673,
"learning_rate": 3.361167125710832e-07,
"logits/chosen": 10.005105972290039,
"logits/rejected": 10.864886283874512,
"logps/chosen": -135.84164428710938,
"logps/ref_chosen": -126.42659759521484,
"logps/ref_rejected": -143.6361846923828,
"logps/rejected": -153.5630340576172,
"loss": 0.6909,
"rewards/accuracies": 0.5390625,
"rewards/chosen": -0.09415031969547272,
"rewards/margins": 0.005118116270750761,
"rewards/rejected": -0.09926842898130417,
"step": 34
},
{
"epoch": 0.4610951008645533,
"grad_norm": 0.7007870674133301,
"learning_rate": 3.2502115875008516e-07,
"logits/chosen": 10.54973030090332,
"logits/rejected": 11.243837356567383,
"logps/chosen": -131.86981201171875,
"logps/ref_chosen": -123.14965057373047,
"logps/ref_rejected": -134.22947692871094,
"logps/rejected": -143.7413787841797,
"loss": 0.6895,
"rewards/accuracies": 0.5703125,
"rewards/chosen": -0.0872015431523323,
"rewards/margins": 0.007917709648609161,
"rewards/rejected": -0.09511925280094147,
"step": 35
},
{
"epoch": 0.47426924660354053,
"grad_norm": 0.7300606966018677,
"learning_rate": 3.137606921404191e-07,
"logits/chosen": 10.125234603881836,
"logits/rejected": 10.481938362121582,
"logps/chosen": -127.8335189819336,
"logps/ref_chosen": -118.79129791259766,
"logps/ref_rejected": -126.64965057373047,
"logps/rejected": -135.0525665283203,
"loss": 0.6966,
"rewards/accuracies": 0.4296875,
"rewards/chosen": -0.09042223542928696,
"rewards/margins": -0.006393034942448139,
"rewards/rejected": -0.0840291976928711,
"step": 36
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 12,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}