diff --git "a/checkpoint-455/trainer_state.json" "b/checkpoint-455/trainer_state.json" new file mode 100644--- /dev/null +++ "b/checkpoint-455/trainer_state.json" @@ -0,0 +1,6858 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.9989023051591658, + "eval_steps": 500, + "global_step": 455, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.0021953896816684962, + "grad_norm": 1.1034454107284546, + "learning_rate": 5.0000000000000004e-08, + "logits/chosen": -2.2846126556396484, + "logits/rejected": -2.358996868133545, + "logps/chosen": -12.224668502807617, + "logps/rejected": -9.611772537231445, + "loss": 0.953, + "rewards/accuracies": 0.0, + "rewards/chosen": 0.0, + "rewards/margins": 0.0, + "rewards/rejected": 0.0, + "step": 1 + }, + { + "epoch": 0.0043907793633369925, + "grad_norm": 1.043821096420288, + "learning_rate": 1.0000000000000001e-07, + "logits/chosen": -2.293553352355957, + "logits/rejected": -2.2400941848754883, + "logps/chosen": -15.258804321289062, + "logps/rejected": -14.146197319030762, + "loss": 1.0789, + "rewards/accuracies": 0.0, + "rewards/chosen": 0.0, + "rewards/margins": 0.0, + "rewards/rejected": 0.0, + "step": 2 + }, + { + "epoch": 0.006586169045005488, + "grad_norm": 1.0436196327209473, + "learning_rate": 1.5000000000000002e-07, + "logits/chosen": -2.3567137718200684, + "logits/rejected": -2.3624215126037598, + "logps/chosen": -10.726156234741211, + "logps/rejected": -9.462532043457031, + "loss": 0.9529, + "rewards/accuracies": 0.40625, + "rewards/chosen": 0.002363908104598522, + "rewards/margins": -0.003392255399376154, + "rewards/rejected": 0.005756163503974676, + "step": 3 + }, + { + "epoch": 0.008781558726673985, + "grad_norm": 1.0475765466690063, + "learning_rate": 2.0000000000000002e-07, + "logits/chosen": -2.2790515422821045, + "logits/rejected": -2.2733278274536133, + "logps/chosen": -12.457335472106934, + "logps/rejected": -11.852964401245117, + "loss": 0.9939, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.0019623569678515196, + "rewards/margins": 0.005730569828301668, + "rewards/rejected": -0.003768211929127574, + "step": 4 + }, + { + "epoch": 0.010976948408342482, + "grad_norm": 1.0972965955734253, + "learning_rate": 2.5000000000000004e-07, + "logits/chosen": -2.3039631843566895, + "logits/rejected": -2.3130176067352295, + "logps/chosen": -12.775261878967285, + "logps/rejected": -12.0330171585083, + "loss": 1.0389, + "rewards/accuracies": 0.53125, + "rewards/chosen": -0.0011096245143562555, + "rewards/margins": -0.004510534927248955, + "rewards/rejected": 0.0034009101800620556, + "step": 5 + }, + { + "epoch": 0.013172338090010977, + "grad_norm": 0.9643481373786926, + "learning_rate": 3.0000000000000004e-07, + "logits/chosen": -2.324848175048828, + "logits/rejected": -2.429684638977051, + "logps/chosen": -18.116445541381836, + "logps/rejected": -15.265850067138672, + "loss": 1.0742, + "rewards/accuracies": 0.5, + "rewards/chosen": -0.0029701264575123787, + "rewards/margins": -0.003988922107964754, + "rewards/rejected": 0.0010187956504523754, + "step": 6 + }, + { + "epoch": 0.015367727771679473, + "grad_norm": 1.1616390943527222, + "learning_rate": 3.5000000000000004e-07, + "logits/chosen": -2.2589993476867676, + "logits/rejected": -2.3558740615844727, + "logps/chosen": -13.876771926879883, + "logps/rejected": -13.108857154846191, + "loss": 1.0832, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.0004574268823489547, + "rewards/margins": 0.00017213402315974236, + "rewards/rejected": 0.00028529250994324684, + "step": 7 + }, + { + "epoch": 0.01756311745334797, + "grad_norm": 1.283825397491455, + "learning_rate": 4.0000000000000003e-07, + "logits/chosen": -2.4278955459594727, + "logits/rejected": -2.3070013523101807, + "logps/chosen": -11.154717445373535, + "logps/rejected": -9.15652084350586, + "loss": 0.9298, + "rewards/accuracies": 0.4375, + "rewards/chosen": 0.00048030121251940727, + "rewards/margins": -0.006442471407353878, + "rewards/rejected": 0.006922773085534573, + "step": 8 + }, + { + "epoch": 0.019758507135016465, + "grad_norm": 1.4349981546401978, + "learning_rate": 4.5000000000000003e-07, + "logits/chosen": -2.3945982456207275, + "logits/rejected": -2.3926279544830322, + "logps/chosen": -12.6943359375, + "logps/rejected": -13.573047637939453, + "loss": 1.0278, + "rewards/accuracies": 0.53125, + "rewards/chosen": -3.980204928666353e-05, + "rewards/margins": 0.001762162777595222, + "rewards/rejected": -0.0018019648268818855, + "step": 9 + }, + { + "epoch": 0.021953896816684963, + "grad_norm": 1.1675068140029907, + "learning_rate": 5.000000000000001e-07, + "logits/chosen": -2.3849003314971924, + "logits/rejected": -2.360934257507324, + "logps/chosen": -18.73386001586914, + "logps/rejected": -16.180574417114258, + "loss": 1.1387, + "rewards/accuracies": 0.40625, + "rewards/chosen": -0.0015319950180128217, + "rewards/margins": -0.002129682805389166, + "rewards/rejected": 0.0005976876709610224, + "step": 10 + }, + { + "epoch": 0.024149286498353458, + "grad_norm": 1.2748152017593384, + "learning_rate": 5.5e-07, + "logits/chosen": -2.306387424468994, + "logits/rejected": -2.354555130004883, + "logps/chosen": -21.361164093017578, + "logps/rejected": -16.099632263183594, + "loss": 1.0941, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.00022546492982655764, + "rewards/margins": 0.004793249536305666, + "rewards/rejected": -0.004567783325910568, + "step": 11 + }, + { + "epoch": 0.026344676180021953, + "grad_norm": 1.2641266584396362, + "learning_rate": 6.000000000000001e-07, + "logits/chosen": -2.3452396392822266, + "logits/rejected": -2.358135223388672, + "logps/chosen": -11.315267562866211, + "logps/rejected": -11.734415054321289, + "loss": 1.0026, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.00597212091088295, + "rewards/margins": 0.010203271172940731, + "rewards/rejected": -0.004231149330735207, + "step": 12 + }, + { + "epoch": 0.02854006586169045, + "grad_norm": 0.9660422801971436, + "learning_rate": 6.5e-07, + "logits/chosen": -2.441119909286499, + "logits/rejected": -2.358609676361084, + "logps/chosen": -10.97406005859375, + "logps/rejected": -13.421890258789062, + "loss": 1.0251, + "rewards/accuracies": 0.4375, + "rewards/chosen": 0.0005808405112475157, + "rewards/margins": -0.00016933592269197106, + "rewards/rejected": 0.000750176259316504, + "step": 13 + }, + { + "epoch": 0.030735455543358946, + "grad_norm": 1.2691547870635986, + "learning_rate": 7.000000000000001e-07, + "logits/chosen": -2.3512799739837646, + "logits/rejected": -2.320150375366211, + "logps/chosen": -15.537447929382324, + "logps/rejected": -12.599323272705078, + "loss": 1.0558, + "rewards/accuracies": 0.34375, + "rewards/chosen": 0.0010906599927693605, + "rewards/margins": -0.0010261544957756996, + "rewards/rejected": 0.0021168149542063475, + "step": 14 + }, + { + "epoch": 0.03293084522502744, + "grad_norm": 1.1735515594482422, + "learning_rate": 7.5e-07, + "logits/chosen": -2.282367706298828, + "logits/rejected": -2.344204902648926, + "logps/chosen": -13.633934020996094, + "logps/rejected": -12.655315399169922, + "loss": 1.0283, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.0009840509155765176, + "rewards/margins": 0.0035789774265140295, + "rewards/rejected": -0.0025949266273528337, + "step": 15 + }, + { + "epoch": 0.03512623490669594, + "grad_norm": 1.2377164363861084, + "learning_rate": 8.000000000000001e-07, + "logits/chosen": -2.357888698577881, + "logits/rejected": -2.3939902782440186, + "logps/chosen": -16.876571655273438, + "logps/rejected": -16.07628631591797, + "loss": 1.1144, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.0009345448925159872, + "rewards/margins": 2.571078948676586e-05, + "rewards/rejected": 0.0009088342194445431, + "step": 16 + }, + { + "epoch": 0.03732162458836443, + "grad_norm": 1.1274445056915283, + "learning_rate": 8.500000000000001e-07, + "logits/chosen": -2.3163723945617676, + "logits/rejected": -2.316263198852539, + "logps/chosen": -19.3470401763916, + "logps/rejected": -18.514925003051758, + "loss": 1.1999, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.003927948419004679, + "rewards/margins": 0.006356396712362766, + "rewards/rejected": -0.002428448526188731, + "step": 17 + }, + { + "epoch": 0.03951701427003293, + "grad_norm": 1.112360954284668, + "learning_rate": 9.000000000000001e-07, + "logits/chosen": -2.395773410797119, + "logits/rejected": -2.353379011154175, + "logps/chosen": -12.884025573730469, + "logps/rejected": -10.73438835144043, + "loss": 0.9752, + "rewards/accuracies": 0.375, + "rewards/chosen": 0.00025042565539479256, + "rewards/margins": 0.0016602440737187862, + "rewards/rejected": -0.0014098185347393155, + "step": 18 + }, + { + "epoch": 0.04171240395170143, + "grad_norm": 1.1286046504974365, + "learning_rate": 9.500000000000001e-07, + "logits/chosen": -2.387399673461914, + "logits/rejected": -2.3302669525146484, + "logps/chosen": -10.059947967529297, + "logps/rejected": -11.219734191894531, + "loss": 1.0013, + "rewards/accuracies": 0.40625, + "rewards/chosen": -0.0020648539066314697, + "rewards/margins": -0.0009576906450092793, + "rewards/rejected": -0.0011071630287915468, + "step": 19 + }, + { + "epoch": 0.043907793633369926, + "grad_norm": 1.3268272876739502, + "learning_rate": 1.0000000000000002e-06, + "logits/chosen": -2.305075168609619, + "logits/rejected": -2.3672685623168945, + "logps/chosen": -11.384998321533203, + "logps/rejected": -9.560636520385742, + "loss": 0.9753, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.0010676621459424496, + "rewards/margins": -0.003087827702984214, + "rewards/rejected": 0.004155490081757307, + "step": 20 + }, + { + "epoch": 0.04610318331503842, + "grad_norm": 1.0563515424728394, + "learning_rate": 1.0500000000000001e-06, + "logits/chosen": -2.2942605018615723, + "logits/rejected": -2.363292694091797, + "logps/chosen": -15.190650939941406, + "logps/rejected": -16.022212982177734, + "loss": 1.1478, + "rewards/accuracies": 0.5, + "rewards/chosen": 3.312993794679642e-05, + "rewards/margins": 0.004593318793922663, + "rewards/rejected": -0.004560189321637154, + "step": 21 + }, + { + "epoch": 0.048298572996706916, + "grad_norm": 1.1235427856445312, + "learning_rate": 1.1e-06, + "logits/chosen": -2.3363523483276367, + "logits/rejected": -2.35919189453125, + "logps/chosen": -13.10991096496582, + "logps/rejected": -12.458131790161133, + "loss": 1.045, + "rewards/accuracies": 0.59375, + "rewards/chosen": 9.548966772854328e-05, + "rewards/margins": -2.5561952497810125e-05, + "rewards/rejected": 0.00012105179484933615, + "step": 22 + }, + { + "epoch": 0.050493962678375415, + "grad_norm": 1.0452659130096436, + "learning_rate": 1.1500000000000002e-06, + "logits/chosen": -2.2859370708465576, + "logits/rejected": -2.472400665283203, + "logps/chosen": -13.347756385803223, + "logps/rejected": -11.171856880187988, + "loss": 1.0528, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.0030131523963063955, + "rewards/margins": 0.0002964767627418041, + "rewards/rejected": 0.0027166756335645914, + "step": 23 + }, + { + "epoch": 0.052689352360043906, + "grad_norm": 1.1159127950668335, + "learning_rate": 1.2000000000000002e-06, + "logits/chosen": -2.381169319152832, + "logits/rejected": -2.315974712371826, + "logps/chosen": -16.52190399169922, + "logps/rejected": -13.986876487731934, + "loss": 1.1217, + "rewards/accuracies": 0.46875, + "rewards/chosen": -0.002126524457708001, + "rewards/margins": -0.0030577415600419044, + "rewards/rejected": 0.0009312168695032597, + "step": 24 + }, + { + "epoch": 0.054884742041712405, + "grad_norm": 1.2672061920166016, + "learning_rate": 1.25e-06, + "logits/chosen": -2.32135272026062, + "logits/rejected": -2.2802534103393555, + "logps/chosen": -14.381074905395508, + "logps/rejected": -12.72801399230957, + "loss": 1.0435, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.0030421048868447542, + "rewards/margins": -0.00030010956106707454, + "rewards/rejected": 0.003342214971780777, + "step": 25 + }, + { + "epoch": 0.0570801317233809, + "grad_norm": 1.1465411186218262, + "learning_rate": 1.3e-06, + "logits/chosen": -2.2586517333984375, + "logits/rejected": -2.377082586288452, + "logps/chosen": -11.207740783691406, + "logps/rejected": -13.343524932861328, + "loss": 1.0674, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.0017482821131125093, + "rewards/margins": 0.002273987280204892, + "rewards/rejected": -0.0005257053999230266, + "step": 26 + }, + { + "epoch": 0.059275521405049394, + "grad_norm": 1.0358684062957764, + "learning_rate": 1.3500000000000002e-06, + "logits/chosen": -2.31711745262146, + "logits/rejected": -2.3493282794952393, + "logps/chosen": -14.618535995483398, + "logps/rejected": -12.928030967712402, + "loss": 1.0852, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.004235025029629469, + "rewards/margins": -0.0006879566935822368, + "rewards/rejected": 0.00492298137396574, + "step": 27 + }, + { + "epoch": 0.06147091108671789, + "grad_norm": 1.1591858863830566, + "learning_rate": 1.4000000000000001e-06, + "logits/chosen": -2.310506582260132, + "logits/rejected": -2.2369637489318848, + "logps/chosen": -11.844167709350586, + "logps/rejected": -13.115419387817383, + "loss": 1.0665, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.003957665525376797, + "rewards/margins": 0.007251821458339691, + "rewards/rejected": -0.0032941552344709635, + "step": 28 + }, + { + "epoch": 0.06366630076838639, + "grad_norm": 1.2100194692611694, + "learning_rate": 1.45e-06, + "logits/chosen": -2.384784698486328, + "logits/rejected": -2.348393201828003, + "logps/chosen": -12.626264572143555, + "logps/rejected": -11.345318794250488, + "loss": 0.9866, + "rewards/accuracies": 0.5, + "rewards/chosen": 1.857941970229149e-05, + "rewards/margins": -8.993363007903099e-05, + "rewards/rejected": 0.00010851328261196613, + "step": 29 + }, + { + "epoch": 0.06586169045005488, + "grad_norm": 1.105622410774231, + "learning_rate": 1.5e-06, + "logits/chosen": -2.317814826965332, + "logits/rejected": -2.313504695892334, + "logps/chosen": -14.957558631896973, + "logps/rejected": -13.618270874023438, + "loss": 1.0818, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.0022040181793272495, + "rewards/margins": 0.0015496157575398684, + "rewards/rejected": 0.0006544026546180248, + "step": 30 + }, + { + "epoch": 0.06805708013172337, + "grad_norm": 1.2689664363861084, + "learning_rate": 1.5500000000000002e-06, + "logits/chosen": -2.418194532394409, + "logits/rejected": -2.4411373138427734, + "logps/chosen": -11.708237648010254, + "logps/rejected": -13.883282661437988, + "loss": 1.058, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.004741518292576075, + "rewards/margins": -0.0009018751443363726, + "rewards/rejected": 0.005643392913043499, + "step": 31 + }, + { + "epoch": 0.07025246981339188, + "grad_norm": 1.3097610473632812, + "learning_rate": 1.6000000000000001e-06, + "logits/chosen": -2.3038244247436523, + "logits/rejected": -2.458163261413574, + "logps/chosen": -12.498970031738281, + "logps/rejected": -10.449270248413086, + "loss": 1.0015, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.0064272889867424965, + "rewards/margins": 0.009535685181617737, + "rewards/rejected": -0.0031083961948752403, + "step": 32 + }, + { + "epoch": 0.07244785949506037, + "grad_norm": 1.3711282014846802, + "learning_rate": 1.6500000000000003e-06, + "logits/chosen": -2.2958526611328125, + "logits/rejected": -2.357262372970581, + "logps/chosen": -9.87129020690918, + "logps/rejected": -9.644401550292969, + "loss": 0.9782, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.006264886818826199, + "rewards/margins": 0.007479904685169458, + "rewards/rejected": -0.001215018448419869, + "step": 33 + }, + { + "epoch": 0.07464324917672886, + "grad_norm": 1.0014218091964722, + "learning_rate": 1.7000000000000002e-06, + "logits/chosen": -2.3077902793884277, + "logits/rejected": -2.32836651802063, + "logps/chosen": -13.210323333740234, + "logps/rejected": -10.678458213806152, + "loss": 1.0215, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.003474751953035593, + "rewards/margins": 0.005452706944197416, + "rewards/rejected": -0.0019779547583311796, + "step": 34 + }, + { + "epoch": 0.07683863885839737, + "grad_norm": 1.5878692865371704, + "learning_rate": 1.75e-06, + "logits/chosen": -2.3073747158050537, + "logits/rejected": -2.2542941570281982, + "logps/chosen": -11.676526069641113, + "logps/rejected": -12.513848304748535, + "loss": 1.0121, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.0010510723805055022, + "rewards/margins": -0.0036773495376110077, + "rewards/rejected": 0.004728421568870544, + "step": 35 + }, + { + "epoch": 0.07903402854006586, + "grad_norm": 1.313846468925476, + "learning_rate": 1.8000000000000001e-06, + "logits/chosen": -2.402480125427246, + "logits/rejected": -2.351649284362793, + "logps/chosen": -12.576855659484863, + "logps/rejected": -11.105303764343262, + "loss": 0.9857, + "rewards/accuracies": 0.5, + "rewards/chosen": -0.0032139169052243233, + "rewards/margins": -0.005750270560383797, + "rewards/rejected": 0.0025363534223288298, + "step": 36 + }, + { + "epoch": 0.08122941822173436, + "grad_norm": 1.3616929054260254, + "learning_rate": 1.85e-06, + "logits/chosen": -2.2891414165496826, + "logits/rejected": -2.3991661071777344, + "logps/chosen": -14.187095642089844, + "logps/rejected": -11.036577224731445, + "loss": 0.9868, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.006025272887200117, + "rewards/margins": 0.008252888917922974, + "rewards/rejected": -0.0022276148665696383, + "step": 37 + }, + { + "epoch": 0.08342480790340286, + "grad_norm": 1.3414860963821411, + "learning_rate": 1.9000000000000002e-06, + "logits/chosen": -2.377753973007202, + "logits/rejected": -2.4487216472625732, + "logps/chosen": -18.255245208740234, + "logps/rejected": -12.83712387084961, + "loss": 1.0333, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.0022521866485476494, + "rewards/margins": 0.004233865533024073, + "rewards/rejected": -0.001981678418815136, + "step": 38 + }, + { + "epoch": 0.08562019758507135, + "grad_norm": 1.0859737396240234, + "learning_rate": 1.9500000000000004e-06, + "logits/chosen": -2.312309980392456, + "logits/rejected": -2.2821502685546875, + "logps/chosen": -14.578563690185547, + "logps/rejected": -15.732474327087402, + "loss": 1.0939, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.0022968475241214037, + "rewards/margins": 0.0003764451830647886, + "rewards/rejected": 0.0019204019336029887, + "step": 39 + }, + { + "epoch": 0.08781558726673985, + "grad_norm": 1.2043111324310303, + "learning_rate": 2.0000000000000003e-06, + "logits/chosen": -2.3180580139160156, + "logits/rejected": -2.3157169818878174, + "logps/chosen": -12.22863483428955, + "logps/rejected": -11.449076652526855, + "loss": 1.0204, + "rewards/accuracies": 0.40625, + "rewards/chosen": 0.00017814873717725277, + "rewards/margins": -0.004647126421332359, + "rewards/rejected": 0.004825274925678968, + "step": 40 + }, + { + "epoch": 0.09001097694840834, + "grad_norm": 1.2361137866973877, + "learning_rate": 2.05e-06, + "logits/chosen": -2.343757152557373, + "logits/rejected": -2.3358938694000244, + "logps/chosen": -12.45596694946289, + "logps/rejected": -12.058631896972656, + "loss": 1.0172, + "rewards/accuracies": 0.40625, + "rewards/chosen": 0.0018289226572960615, + "rewards/margins": 0.000773195642977953, + "rewards/rejected": 0.0010557263158261776, + "step": 41 + }, + { + "epoch": 0.09220636663007684, + "grad_norm": 1.1400678157806396, + "learning_rate": 2.1000000000000002e-06, + "logits/chosen": -2.3396897315979004, + "logits/rejected": -2.262922763824463, + "logps/chosen": -11.612154006958008, + "logps/rejected": -10.86433219909668, + "loss": 0.9884, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.003359719179570675, + "rewards/margins": 0.0006888797506690025, + "rewards/rejected": 0.0026708384975790977, + "step": 42 + }, + { + "epoch": 0.09440175631174534, + "grad_norm": 1.0593880414962769, + "learning_rate": 2.15e-06, + "logits/chosen": -2.27569580078125, + "logits/rejected": -2.345578670501709, + "logps/chosen": -16.460983276367188, + "logps/rejected": -10.75358772277832, + "loss": 1.006, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.008105424232780933, + "rewards/margins": 0.011831814423203468, + "rewards/rejected": -0.003726390190422535, + "step": 43 + }, + { + "epoch": 0.09659714599341383, + "grad_norm": 1.3210548162460327, + "learning_rate": 2.2e-06, + "logits/chosen": -2.3760862350463867, + "logits/rejected": -2.2515220642089844, + "logps/chosen": -14.993949890136719, + "logps/rejected": -13.207966804504395, + "loss": 1.0259, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.00593589898198843, + "rewards/margins": 0.00230758311226964, + "rewards/rejected": 0.0036283163353800774, + "step": 44 + }, + { + "epoch": 0.09879253567508232, + "grad_norm": 0.9838950634002686, + "learning_rate": 2.25e-06, + "logits/chosen": -2.3689630031585693, + "logits/rejected": -2.3999180793762207, + "logps/chosen": -13.113698959350586, + "logps/rejected": -11.06283950805664, + "loss": 0.9872, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.008952252566814423, + "rewards/margins": 0.010060377418994904, + "rewards/rejected": -0.0011081250850111246, + "step": 45 + }, + { + "epoch": 0.10098792535675083, + "grad_norm": 1.2060089111328125, + "learning_rate": 2.3000000000000004e-06, + "logits/chosen": -2.3371407985687256, + "logits/rejected": -2.3095602989196777, + "logps/chosen": -10.346152305603027, + "logps/rejected": -10.529175758361816, + "loss": 0.9568, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.005232983268797398, + "rewards/margins": 0.006484426558017731, + "rewards/rejected": -0.0012514438712969422, + "step": 46 + }, + { + "epoch": 0.10318331503841932, + "grad_norm": 1.1937215328216553, + "learning_rate": 2.35e-06, + "logits/chosen": -2.383635997772217, + "logits/rejected": -2.251980781555176, + "logps/chosen": -14.678833961486816, + "logps/rejected": -14.274091720581055, + "loss": 1.1045, + "rewards/accuracies": 0.59375, + "rewards/chosen": -0.0001039394992403686, + "rewards/margins": 0.0014265230856835842, + "rewards/rejected": -0.00153046241030097, + "step": 47 + }, + { + "epoch": 0.10537870472008781, + "grad_norm": 1.2106831073760986, + "learning_rate": 2.4000000000000003e-06, + "logits/chosen": -2.307473659515381, + "logits/rejected": -2.270702362060547, + "logps/chosen": -14.072576522827148, + "logps/rejected": -10.658452987670898, + "loss": 1.0071, + "rewards/accuracies": 0.4375, + "rewards/chosen": 0.0023170302156358957, + "rewards/margins": 0.0011877454817295074, + "rewards/rejected": 0.0011292845010757446, + "step": 48 + }, + { + "epoch": 0.10757409440175632, + "grad_norm": 1.1354262828826904, + "learning_rate": 2.4500000000000003e-06, + "logits/chosen": -2.248020887374878, + "logits/rejected": -2.2451305389404297, + "logps/chosen": -15.853403091430664, + "logps/rejected": -15.368898391723633, + "loss": 1.1177, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.0036325519904494286, + "rewards/margins": 0.00015486334450542927, + "rewards/rejected": 0.0034776891116052866, + "step": 49 + }, + { + "epoch": 0.10976948408342481, + "grad_norm": 1.0705238580703735, + "learning_rate": 2.5e-06, + "logits/chosen": -2.4041616916656494, + "logits/rejected": -2.383108139038086, + "logps/chosen": -18.875839233398438, + "logps/rejected": -12.37950325012207, + "loss": 1.0141, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.003123737871646881, + "rewards/margins": -0.0004079572972841561, + "rewards/rejected": 0.00353169534355402, + "step": 50 + }, + { + "epoch": 0.1119648737650933, + "grad_norm": 1.26189386844635, + "learning_rate": 2.55e-06, + "logits/chosen": -2.3076915740966797, + "logits/rejected": -2.283825397491455, + "logps/chosen": -13.641839027404785, + "logps/rejected": -11.214380264282227, + "loss": 0.9837, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.002529280027374625, + "rewards/margins": 8.014822378754616e-05, + "rewards/rejected": 0.002449131803587079, + "step": 51 + }, + { + "epoch": 0.1141602634467618, + "grad_norm": 1.302383542060852, + "learning_rate": 2.6e-06, + "logits/chosen": -2.2420313358306885, + "logits/rejected": -2.3660824298858643, + "logps/chosen": -11.503904342651367, + "logps/rejected": -11.648876190185547, + "loss": 1.0438, + "rewards/accuracies": 0.53125, + "rewards/chosen": -0.000531287572812289, + "rewards/margins": 0.0005554634844884276, + "rewards/rejected": -0.001086751464754343, + "step": 52 + }, + { + "epoch": 0.1163556531284303, + "grad_norm": 1.2518279552459717, + "learning_rate": 2.6500000000000005e-06, + "logits/chosen": -2.3373780250549316, + "logits/rejected": -2.279087543487549, + "logps/chosen": -16.882110595703125, + "logps/rejected": -15.901355743408203, + "loss": 1.1307, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.0010501868091523647, + "rewards/margins": 0.00016966351540759206, + "rewards/rejected": 0.0008805230027064681, + "step": 53 + }, + { + "epoch": 0.11855104281009879, + "grad_norm": 1.1796338558197021, + "learning_rate": 2.7000000000000004e-06, + "logits/chosen": -2.3147006034851074, + "logits/rejected": -2.245007276535034, + "logps/chosen": -13.884672164916992, + "logps/rejected": -12.487886428833008, + "loss": 1.0253, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.007091285195201635, + "rewards/margins": 0.004812297876924276, + "rewards/rejected": 0.002278987318277359, + "step": 54 + }, + { + "epoch": 0.1207464324917673, + "grad_norm": 1.2227309942245483, + "learning_rate": 2.7500000000000004e-06, + "logits/chosen": -2.2176713943481445, + "logits/rejected": -2.2463364601135254, + "logps/chosen": -13.619998931884766, + "logps/rejected": -12.540321350097656, + "loss": 1.0239, + "rewards/accuracies": 0.375, + "rewards/chosen": -0.0013588806614279747, + "rewards/margins": -0.004556029103696346, + "rewards/rejected": 0.0031971491407603025, + "step": 55 + }, + { + "epoch": 0.12294182217343579, + "grad_norm": 0.9918203949928284, + "learning_rate": 2.8000000000000003e-06, + "logits/chosen": -2.3580827713012695, + "logits/rejected": -2.3567821979522705, + "logps/chosen": -12.459843635559082, + "logps/rejected": -9.410993576049805, + "loss": 0.9476, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.005544860381633043, + "rewards/margins": 0.0024246471002697945, + "rewards/rejected": 0.003120213747024536, + "step": 56 + }, + { + "epoch": 0.1251372118551043, + "grad_norm": 1.6529310941696167, + "learning_rate": 2.85e-06, + "logits/chosen": -2.3447060585021973, + "logits/rejected": -2.249061107635498, + "logps/chosen": -11.913742065429688, + "logps/rejected": -11.23048210144043, + "loss": 0.9893, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.004630698822438717, + "rewards/margins": 0.005256508477032185, + "rewards/rejected": -0.0006258084904402494, + "step": 57 + }, + { + "epoch": 0.12733260153677278, + "grad_norm": 1.0494027137756348, + "learning_rate": 2.9e-06, + "logits/chosen": -2.2313132286071777, + "logits/rejected": -2.401317596435547, + "logps/chosen": -18.439748764038086, + "logps/rejected": -11.610260009765625, + "loss": 1.0282, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.0071330079808831215, + "rewards/margins": 0.006482796743512154, + "rewards/rejected": 0.0006502112373709679, + "step": 58 + }, + { + "epoch": 0.12952799121844127, + "grad_norm": 1.4393824338912964, + "learning_rate": 2.95e-06, + "logits/chosen": -2.3876466751098633, + "logits/rejected": -2.399052143096924, + "logps/chosen": -14.200356483459473, + "logps/rejected": -13.737493515014648, + "loss": 1.0595, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.001972971251234412, + "rewards/margins": 0.002361137419939041, + "rewards/rejected": -0.0003881664015352726, + "step": 59 + }, + { + "epoch": 0.13172338090010977, + "grad_norm": 1.3774553537368774, + "learning_rate": 3e-06, + "logits/chosen": -2.210278034210205, + "logits/rejected": -2.2798633575439453, + "logps/chosen": -13.26652717590332, + "logps/rejected": -11.69228458404541, + "loss": 0.9983, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.0036805993877351284, + "rewards/margins": 0.0023272125981748104, + "rewards/rejected": 0.001353386789560318, + "step": 60 + }, + { + "epoch": 0.13391877058177826, + "grad_norm": 1.3280311822891235, + "learning_rate": 3.05e-06, + "logits/chosen": -2.2844367027282715, + "logits/rejected": -2.416640281677246, + "logps/chosen": -13.028985023498535, + "logps/rejected": -10.592105865478516, + "loss": 0.984, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.00871636625379324, + "rewards/margins": 0.01077802013605833, + "rewards/rejected": -0.0020616534166038036, + "step": 61 + }, + { + "epoch": 0.13611416026344675, + "grad_norm": 1.5957064628601074, + "learning_rate": 3.1000000000000004e-06, + "logits/chosen": -2.2388463020324707, + "logits/rejected": -2.429072856903076, + "logps/chosen": -16.77328109741211, + "logps/rejected": -11.056230545043945, + "loss": 0.9916, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.011861161328852177, + "rewards/margins": 0.015179607085883617, + "rewards/rejected": -0.003318445524200797, + "step": 62 + }, + { + "epoch": 0.13830954994511527, + "grad_norm": 1.3382227420806885, + "learning_rate": 3.1500000000000003e-06, + "logits/chosen": -2.3019542694091797, + "logits/rejected": -2.380207061767578, + "logps/chosen": -15.527379035949707, + "logps/rejected": -13.00551986694336, + "loss": 1.0461, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.014199727214872837, + "rewards/margins": 0.004217819310724735, + "rewards/rejected": 0.009981908835470676, + "step": 63 + }, + { + "epoch": 0.14050493962678376, + "grad_norm": 1.4465454816818237, + "learning_rate": 3.2000000000000003e-06, + "logits/chosen": -2.293320417404175, + "logits/rejected": -2.3460850715637207, + "logps/chosen": -12.336854934692383, + "logps/rejected": -12.035362243652344, + "loss": 1.0247, + "rewards/accuracies": 0.40625, + "rewards/chosen": 0.0019360918086022139, + "rewards/margins": -0.004731356166303158, + "rewards/rejected": 0.006667448207736015, + "step": 64 + }, + { + "epoch": 0.14270032930845225, + "grad_norm": 1.438913106918335, + "learning_rate": 3.2500000000000002e-06, + "logits/chosen": -2.356405258178711, + "logits/rejected": -2.381514072418213, + "logps/chosen": -16.52509117126465, + "logps/rejected": -11.303572654724121, + "loss": 1.0138, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.012714626267552376, + "rewards/margins": 0.010571034625172615, + "rewards/rejected": 0.0021435916423797607, + "step": 65 + }, + { + "epoch": 0.14489571899012074, + "grad_norm": 1.352678656578064, + "learning_rate": 3.3000000000000006e-06, + "logits/chosen": -2.3136496543884277, + "logits/rejected": -2.3507792949676514, + "logps/chosen": -13.596353530883789, + "logps/rejected": -9.79554557800293, + "loss": 0.9822, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.012121602892875671, + "rewards/margins": 0.015090488828718662, + "rewards/rejected": -0.0029688861686736345, + "step": 66 + }, + { + "epoch": 0.14709110867178923, + "grad_norm": 1.079323410987854, + "learning_rate": 3.3500000000000005e-06, + "logits/chosen": -2.3352060317993164, + "logits/rejected": -2.3463046550750732, + "logps/chosen": -12.48592758178711, + "logps/rejected": -11.822932243347168, + "loss": 1.0085, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.008349809795618057, + "rewards/margins": -0.00189165398478508, + "rewards/rejected": 0.010241463780403137, + "step": 67 + }, + { + "epoch": 0.14928649835345773, + "grad_norm": 1.618390440940857, + "learning_rate": 3.4000000000000005e-06, + "logits/chosen": -2.333874225616455, + "logits/rejected": -2.4121692180633545, + "logps/chosen": -14.930086135864258, + "logps/rejected": -11.14267349243164, + "loss": 0.9708, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.015850193798542023, + "rewards/margins": 0.010664023458957672, + "rewards/rejected": 0.005186168942600489, + "step": 68 + }, + { + "epoch": 0.15148188803512624, + "grad_norm": 1.1727185249328613, + "learning_rate": 3.45e-06, + "logits/chosen": -2.327249765396118, + "logits/rejected": -2.3228392601013184, + "logps/chosen": -17.513296127319336, + "logps/rejected": -11.67719841003418, + "loss": 1.0356, + "rewards/accuracies": 0.4375, + "rewards/chosen": 0.00726369908079505, + "rewards/margins": -0.003262670710682869, + "rewards/rejected": 0.010526370257139206, + "step": 69 + }, + { + "epoch": 0.15367727771679474, + "grad_norm": 1.5009926557540894, + "learning_rate": 3.5e-06, + "logits/chosen": -2.381566047668457, + "logits/rejected": -2.2929749488830566, + "logps/chosen": -13.011417388916016, + "logps/rejected": -13.374524116516113, + "loss": 1.0615, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.00458131218329072, + "rewards/margins": 0.0018186615779995918, + "rewards/rejected": 0.002762650139629841, + "step": 70 + }, + { + "epoch": 0.15587266739846323, + "grad_norm": 1.0766772031784058, + "learning_rate": 3.5500000000000003e-06, + "logits/chosen": -2.3763010501861572, + "logits/rejected": -2.3684229850769043, + "logps/chosen": -12.743946075439453, + "logps/rejected": -9.790771484375, + "loss": 0.985, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.00604588445276022, + "rewards/margins": -0.0017442656680941582, + "rewards/rejected": 0.007790150120854378, + "step": 71 + }, + { + "epoch": 0.15806805708013172, + "grad_norm": 1.3915514945983887, + "learning_rate": 3.6000000000000003e-06, + "logits/chosen": -2.255495071411133, + "logits/rejected": -2.207207679748535, + "logps/chosen": -13.701947212219238, + "logps/rejected": -15.165384292602539, + "loss": 1.0743, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.011631378903985023, + "rewards/margins": 0.005737782455980778, + "rewards/rejected": 0.00589359737932682, + "step": 72 + }, + { + "epoch": 0.1602634467618002, + "grad_norm": 1.7364999055862427, + "learning_rate": 3.65e-06, + "logits/chosen": -2.413259506225586, + "logits/rejected": -2.469926357269287, + "logps/chosen": -17.120332717895508, + "logps/rejected": -10.822717666625977, + "loss": 0.9883, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.017608050256967545, + "rewards/margins": 0.004501630552113056, + "rewards/rejected": 0.013106418773531914, + "step": 73 + }, + { + "epoch": 0.16245883644346873, + "grad_norm": 1.3227636814117432, + "learning_rate": 3.7e-06, + "logits/chosen": -2.308757781982422, + "logits/rejected": -2.3778414726257324, + "logps/chosen": -15.324178695678711, + "logps/rejected": -9.740997314453125, + "loss": 0.9552, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.008803250268101692, + "rewards/margins": -0.003404664108529687, + "rewards/rejected": 0.012207916006445885, + "step": 74 + }, + { + "epoch": 0.16465422612513722, + "grad_norm": 1.4592341184616089, + "learning_rate": 3.7500000000000005e-06, + "logits/chosen": -2.389984130859375, + "logits/rejected": -2.463046073913574, + "logps/chosen": -14.928573608398438, + "logps/rejected": -10.570882797241211, + "loss": 0.9779, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.01354161836206913, + "rewards/margins": 0.005444116424769163, + "rewards/rejected": 0.00809750147163868, + "step": 75 + }, + { + "epoch": 0.1668496158068057, + "grad_norm": 1.2499895095825195, + "learning_rate": 3.8000000000000005e-06, + "logits/chosen": -2.319549322128296, + "logits/rejected": -2.4380674362182617, + "logps/chosen": -12.940937995910645, + "logps/rejected": -10.083399772644043, + "loss": 0.9944, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.009357284754514694, + "rewards/margins": -0.006600930355489254, + "rewards/rejected": 0.015958216041326523, + "step": 76 + }, + { + "epoch": 0.1690450054884742, + "grad_norm": 1.7567248344421387, + "learning_rate": 3.85e-06, + "logits/chosen": -2.393270969390869, + "logits/rejected": -2.2500927448272705, + "logps/chosen": -11.881815910339355, + "logps/rejected": -12.437444686889648, + "loss": 1.0173, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.02429443597793579, + "rewards/margins": 0.014743359759449959, + "rewards/rejected": 0.009551076218485832, + "step": 77 + }, + { + "epoch": 0.1712403951701427, + "grad_norm": 1.4113529920578003, + "learning_rate": 3.900000000000001e-06, + "logits/chosen": -2.3604631423950195, + "logits/rejected": -2.360464334487915, + "logps/chosen": -13.314837455749512, + "logps/rejected": -10.539698600769043, + "loss": 0.9734, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.012358862906694412, + "rewards/margins": -0.004256892018020153, + "rewards/rejected": 0.01661575585603714, + "step": 78 + }, + { + "epoch": 0.1734357848518112, + "grad_norm": 1.5121917724609375, + "learning_rate": 3.95e-06, + "logits/chosen": -2.3660640716552734, + "logits/rejected": -2.3211538791656494, + "logps/chosen": -13.57307243347168, + "logps/rejected": -11.361422538757324, + "loss": 1.0219, + "rewards/accuracies": 0.375, + "rewards/chosen": 0.0046729762107133865, + "rewards/margins": -0.007223246619105339, + "rewards/rejected": 0.011896222829818726, + "step": 79 + }, + { + "epoch": 0.1756311745334797, + "grad_norm": 1.6794378757476807, + "learning_rate": 4.000000000000001e-06, + "logits/chosen": -2.333510398864746, + "logits/rejected": -2.325746774673462, + "logps/chosen": -12.330310821533203, + "logps/rejected": -13.084382057189941, + "loss": 1.0129, + "rewards/accuracies": 0.4375, + "rewards/chosen": 0.015081634745001793, + "rewards/margins": -0.00490574212744832, + "rewards/rejected": 0.019987378269433975, + "step": 80 + }, + { + "epoch": 0.1778265642151482, + "grad_norm": 1.487237572669983, + "learning_rate": 4.05e-06, + "logits/chosen": -2.41157865524292, + "logits/rejected": -2.300631523132324, + "logps/chosen": -13.241340637207031, + "logps/rejected": -13.954183578491211, + "loss": 1.0723, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.022465569898486137, + "rewards/margins": 0.0036638593301177025, + "rewards/rejected": 0.01880171149969101, + "step": 81 + }, + { + "epoch": 0.1800219538968167, + "grad_norm": 1.2064540386199951, + "learning_rate": 4.1e-06, + "logits/chosen": -2.2773404121398926, + "logits/rejected": -2.4026074409484863, + "logps/chosen": -16.395071029663086, + "logps/rejected": -11.761744499206543, + "loss": 1.049, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.010554658249020576, + "rewards/margins": 0.014174232259392738, + "rewards/rejected": -0.003619574476033449, + "step": 82 + }, + { + "epoch": 0.18221734357848518, + "grad_norm": 1.4717923402786255, + "learning_rate": 4.15e-06, + "logits/chosen": -2.3314809799194336, + "logits/rejected": -2.4027044773101807, + "logps/chosen": -13.08586311340332, + "logps/rejected": -12.964557647705078, + "loss": 1.0318, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.022713469341397285, + "rewards/margins": 0.01534382812678814, + "rewards/rejected": 0.007369642611593008, + "step": 83 + }, + { + "epoch": 0.18441273326015367, + "grad_norm": 1.2905617952346802, + "learning_rate": 4.2000000000000004e-06, + "logits/chosen": -2.3403005599975586, + "logits/rejected": -2.3712852001190186, + "logps/chosen": -13.521167755126953, + "logps/rejected": -9.760311126708984, + "loss": 0.963, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.026714814826846123, + "rewards/margins": 0.017565395683050156, + "rewards/rejected": 0.009149417281150818, + "step": 84 + }, + { + "epoch": 0.18660812294182216, + "grad_norm": 1.4235241413116455, + "learning_rate": 4.25e-06, + "logits/chosen": -2.3429999351501465, + "logits/rejected": -2.3919286727905273, + "logps/chosen": -12.980363845825195, + "logps/rejected": -10.314764976501465, + "loss": 0.9704, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.04204602539539337, + "rewards/margins": 0.01917717233300209, + "rewards/rejected": 0.022868849337100983, + "step": 85 + }, + { + "epoch": 0.18880351262349068, + "grad_norm": 1.1854406595230103, + "learning_rate": 4.3e-06, + "logits/chosen": -2.269547939300537, + "logits/rejected": -2.2928738594055176, + "logps/chosen": -13.91822624206543, + "logps/rejected": -11.471654891967773, + "loss": 0.9748, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.03401780128479004, + "rewards/margins": 0.021458495408296585, + "rewards/rejected": 0.012559305876493454, + "step": 86 + }, + { + "epoch": 0.19099890230515917, + "grad_norm": 1.9034478664398193, + "learning_rate": 4.350000000000001e-06, + "logits/chosen": -2.300184488296509, + "logits/rejected": -2.249917984008789, + "logps/chosen": -11.432275772094727, + "logps/rejected": -12.034334182739258, + "loss": 0.996, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.03009907528758049, + "rewards/margins": 0.015358498319983482, + "rewards/rejected": 0.014740575104951859, + "step": 87 + }, + { + "epoch": 0.19319429198682767, + "grad_norm": 2.1222307682037354, + "learning_rate": 4.4e-06, + "logits/chosen": -2.331315517425537, + "logits/rejected": -2.329355478286743, + "logps/chosen": -15.621415138244629, + "logps/rejected": -11.350690841674805, + "loss": 0.9767, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.044887036085128784, + "rewards/margins": 0.01875218376517296, + "rewards/rejected": 0.026134852319955826, + "step": 88 + }, + { + "epoch": 0.19538968166849616, + "grad_norm": 1.4220390319824219, + "learning_rate": 4.450000000000001e-06, + "logits/chosen": -2.336406707763672, + "logits/rejected": -2.3903348445892334, + "logps/chosen": -15.872965812683105, + "logps/rejected": -12.38022232055664, + "loss": 1.0457, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.01367251481860876, + "rewards/margins": -0.001530464505776763, + "rewards/rejected": 0.015202978625893593, + "step": 89 + }, + { + "epoch": 0.19758507135016465, + "grad_norm": 1.282646656036377, + "learning_rate": 4.5e-06, + "logits/chosen": -2.361445903778076, + "logits/rejected": -2.476417064666748, + "logps/chosen": -14.19363021850586, + "logps/rejected": -15.457003593444824, + "loss": 1.1384, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.02910601533949375, + "rewards/margins": -0.002174281282350421, + "rewards/rejected": 0.03128029778599739, + "step": 90 + }, + { + "epoch": 0.19978046103183314, + "grad_norm": 1.2941416501998901, + "learning_rate": 4.5500000000000005e-06, + "logits/chosen": -2.299647331237793, + "logits/rejected": -2.2841219902038574, + "logps/chosen": -12.112016677856445, + "logps/rejected": -10.684823989868164, + "loss": 0.9786, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.041696906089782715, + "rewards/margins": 0.029532143846154213, + "rewards/rejected": 0.012164760380983353, + "step": 91 + }, + { + "epoch": 0.20197585071350166, + "grad_norm": 2.0074691772460938, + "learning_rate": 4.600000000000001e-06, + "logits/chosen": -2.437091588973999, + "logits/rejected": -2.2995917797088623, + "logps/chosen": -9.002144813537598, + "logps/rejected": -13.91781234741211, + "loss": 1.0479, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.03345324099063873, + "rewards/margins": 0.010070934891700745, + "rewards/rejected": 0.023382307961583138, + "step": 92 + }, + { + "epoch": 0.20417124039517015, + "grad_norm": 1.4986943006515503, + "learning_rate": 4.65e-06, + "logits/chosen": -2.3514928817749023, + "logits/rejected": -2.3139781951904297, + "logps/chosen": -13.505949020385742, + "logps/rejected": -12.242940902709961, + "loss": 1.0188, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.03294369950890541, + "rewards/margins": 0.011969504877924919, + "rewards/rejected": 0.020974192768335342, + "step": 93 + }, + { + "epoch": 0.20636663007683864, + "grad_norm": 1.2458827495574951, + "learning_rate": 4.7e-06, + "logits/chosen": -2.314868927001953, + "logits/rejected": -2.3728139400482178, + "logps/chosen": -11.619760513305664, + "logps/rejected": -11.497888565063477, + "loss": 0.9931, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.02766992524266243, + "rewards/margins": 0.009395633824169636, + "rewards/rejected": 0.01827428862452507, + "step": 94 + }, + { + "epoch": 0.20856201975850713, + "grad_norm": 1.6697496175765991, + "learning_rate": 4.75e-06, + "logits/chosen": -2.3906478881835938, + "logits/rejected": -2.3166065216064453, + "logps/chosen": -13.515888214111328, + "logps/rejected": -9.598672866821289, + "loss": 0.9525, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.047668956220149994, + "rewards/margins": 0.002992257010191679, + "rewards/rejected": 0.04467669874429703, + "step": 95 + }, + { + "epoch": 0.21075740944017562, + "grad_norm": 1.3917086124420166, + "learning_rate": 4.800000000000001e-06, + "logits/chosen": -2.2934889793395996, + "logits/rejected": -2.2676901817321777, + "logps/chosen": -13.526961326599121, + "logps/rejected": -11.483183860778809, + "loss": 0.9959, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.04153824597597122, + "rewards/margins": 0.022040612995624542, + "rewards/rejected": 0.01949763298034668, + "step": 96 + }, + { + "epoch": 0.21295279912184412, + "grad_norm": 1.4579229354858398, + "learning_rate": 4.85e-06, + "logits/chosen": -2.30719256401062, + "logits/rejected": -2.408965826034546, + "logps/chosen": -11.994451522827148, + "logps/rejected": -10.733743667602539, + "loss": 0.9979, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.04512687399983406, + "rewards/margins": -0.0012154264841228724, + "rewards/rejected": 0.04634230211377144, + "step": 97 + }, + { + "epoch": 0.21514818880351264, + "grad_norm": 1.4664539098739624, + "learning_rate": 4.9000000000000005e-06, + "logits/chosen": -2.3413281440734863, + "logits/rejected": -2.3405394554138184, + "logps/chosen": -13.540508270263672, + "logps/rejected": -12.272972106933594, + "loss": 1.0106, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.0718052089214325, + "rewards/margins": 0.014313442632555962, + "rewards/rejected": 0.057491764426231384, + "step": 98 + }, + { + "epoch": 0.21734357848518113, + "grad_norm": 1.4156373739242554, + "learning_rate": 4.95e-06, + "logits/chosen": -2.3612160682678223, + "logits/rejected": -2.297560691833496, + "logps/chosen": -14.15035343170166, + "logps/rejected": -11.349997520446777, + "loss": 1.0359, + "rewards/accuracies": 0.4375, + "rewards/chosen": 0.03496279940009117, + "rewards/margins": -0.026759572327136993, + "rewards/rejected": 0.06172237545251846, + "step": 99 + }, + { + "epoch": 0.21953896816684962, + "grad_norm": 1.8833767175674438, + "learning_rate": 5e-06, + "logits/chosen": -2.3501124382019043, + "logits/rejected": -2.3347649574279785, + "logps/chosen": -9.56277084350586, + "logps/rejected": -8.914642333984375, + "loss": 0.9279, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.06373147666454315, + "rewards/margins": 0.04788838326931, + "rewards/rejected": 0.015843091532588005, + "step": 100 + }, + { + "epoch": 0.2217343578485181, + "grad_norm": 1.5216327905654907, + "learning_rate": 4.999981196479141e-06, + "logits/chosen": -2.28043794631958, + "logits/rejected": -2.2953336238861084, + "logps/chosen": -16.370956420898438, + "logps/rejected": -14.55150032043457, + "loss": 1.0965, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.05121631175279617, + "rewards/margins": 0.013376165181398392, + "rewards/rejected": 0.03784014284610748, + "step": 101 + }, + { + "epoch": 0.2239297475301866, + "grad_norm": 1.3872263431549072, + "learning_rate": 4.99992478619942e-06, + "logits/chosen": -2.279304265975952, + "logits/rejected": -2.268181800842285, + "logps/chosen": -15.490918159484863, + "logps/rejected": -13.121916770935059, + "loss": 1.0741, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.05697057023644447, + "rewards/margins": -0.002729692729189992, + "rewards/rejected": 0.059700265526771545, + "step": 102 + }, + { + "epoch": 0.2261251372118551, + "grad_norm": 1.5503108501434326, + "learning_rate": 4.999830770009406e-06, + "logits/chosen": -2.27740478515625, + "logits/rejected": -2.2930843830108643, + "logps/chosen": -11.542291641235352, + "logps/rejected": -10.724907875061035, + "loss": 0.9791, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.03553184121847153, + "rewards/margins": 0.014793504029512405, + "rewards/rejected": 0.020738335326313972, + "step": 103 + }, + { + "epoch": 0.2283205268935236, + "grad_norm": 1.5652024745941162, + "learning_rate": 4.999699149323369e-06, + "logits/chosen": -2.307811737060547, + "logits/rejected": -2.338045597076416, + "logps/chosen": -14.059741973876953, + "logps/rejected": -12.458868026733398, + "loss": 1.0213, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.08312933892011642, + "rewards/margins": 0.034912753850221634, + "rewards/rejected": 0.04821658134460449, + "step": 104 + }, + { + "epoch": 0.2305159165751921, + "grad_norm": 1.4373472929000854, + "learning_rate": 4.999529926121254e-06, + "logits/chosen": -2.3321356773376465, + "logits/rejected": -2.316896438598633, + "logps/chosen": -14.563620567321777, + "logps/rejected": -15.855158805847168, + "loss": 1.1342, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.07377764582633972, + "rewards/margins": 0.02341487631201744, + "rewards/rejected": 0.05036277323961258, + "step": 105 + }, + { + "epoch": 0.2327113062568606, + "grad_norm": 1.5053890943527222, + "learning_rate": 4.999323102948655e-06, + "logits/chosen": -2.3042502403259277, + "logits/rejected": -2.339005947113037, + "logps/chosen": -12.038972854614258, + "logps/rejected": -14.546398162841797, + "loss": 1.043, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.06436577439308167, + "rewards/margins": 0.012802021577954292, + "rewards/rejected": 0.051563750952482224, + "step": 106 + }, + { + "epoch": 0.2349066959385291, + "grad_norm": 1.228031039237976, + "learning_rate": 4.999078682916774e-06, + "logits/chosen": -2.3684043884277344, + "logits/rejected": -2.3670406341552734, + "logps/chosen": -12.876321792602539, + "logps/rejected": -9.346763610839844, + "loss": 0.9602, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.019609777256846428, + "rewards/margins": -0.001786000095307827, + "rewards/rejected": 0.021395772695541382, + "step": 107 + }, + { + "epoch": 0.23710208562019758, + "grad_norm": 1.644127607345581, + "learning_rate": 4.998796669702378e-06, + "logits/chosen": -2.303502082824707, + "logits/rejected": -2.3413586616516113, + "logps/chosen": -12.201820373535156, + "logps/rejected": -9.633750915527344, + "loss": 0.9549, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.06634039431810379, + "rewards/margins": 0.019073188304901123, + "rewards/rejected": 0.04726720601320267, + "step": 108 + }, + { + "epoch": 0.23929747530186607, + "grad_norm": 1.7933754920959473, + "learning_rate": 4.99847706754774e-06, + "logits/chosen": -2.438173294067383, + "logits/rejected": -2.274287700653076, + "logps/chosen": -17.797473907470703, + "logps/rejected": -16.017791748046875, + "loss": 1.0189, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.09196966886520386, + "rewards/margins": 0.05703253298997879, + "rewards/rejected": 0.034937139600515366, + "step": 109 + }, + { + "epoch": 0.2414928649835346, + "grad_norm": 1.5571792125701904, + "learning_rate": 4.998119881260576e-06, + "logits/chosen": -2.316495418548584, + "logits/rejected": -2.309934616088867, + "logps/chosen": -12.530914306640625, + "logps/rejected": -9.023895263671875, + "loss": 0.9321, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.05945971608161926, + "rewards/margins": 0.03105064295232296, + "rewards/rejected": 0.02840907871723175, + "step": 110 + }, + { + "epoch": 0.24368825466520308, + "grad_norm": 1.5516993999481201, + "learning_rate": 4.997725116213974e-06, + "logits/chosen": -2.220071315765381, + "logits/rejected": -2.2800261974334717, + "logps/chosen": -9.25605583190918, + "logps/rejected": -9.267400741577148, + "loss": 0.9622, + "rewards/accuracies": 0.4375, + "rewards/chosen": 0.05536666512489319, + "rewards/margins": 0.012987159192562103, + "rewards/rejected": 0.042379505932331085, + "step": 111 + }, + { + "epoch": 0.24588364434687157, + "grad_norm": 1.7297463417053223, + "learning_rate": 4.997292778346312e-06, + "logits/chosen": -2.2448363304138184, + "logits/rejected": -2.297600269317627, + "logps/chosen": -11.284765243530273, + "logps/rejected": -12.576556205749512, + "loss": 1.0317, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.03251136094331741, + "rewards/margins": 0.019679736346006393, + "rewards/rejected": 0.012831625528633595, + "step": 112 + }, + { + "epoch": 0.24807903402854006, + "grad_norm": 1.5127956867218018, + "learning_rate": 4.99682287416117e-06, + "logits/chosen": -2.2955853939056396, + "logits/rejected": -2.3179407119750977, + "logps/chosen": -11.177826881408691, + "logps/rejected": -9.537398338317871, + "loss": 0.9662, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.06957294791936874, + "rewards/margins": 0.022395242005586624, + "rewards/rejected": 0.04717769846320152, + "step": 113 + }, + { + "epoch": 0.2502744237102086, + "grad_norm": 1.649693489074707, + "learning_rate": 4.996315410727229e-06, + "logits/chosen": -2.377718448638916, + "logits/rejected": -2.32913875579834, + "logps/chosen": -9.667226791381836, + "logps/rejected": -12.393421173095703, + "loss": 1.0772, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.04138043522834778, + "rewards/margins": -0.03883221000432968, + "rewards/rejected": 0.08021265268325806, + "step": 114 + }, + { + "epoch": 0.2524698133918771, + "grad_norm": 1.8851720094680786, + "learning_rate": 4.995770395678171e-06, + "logits/chosen": -2.2822062969207764, + "logits/rejected": -2.3547139167785645, + "logps/chosen": -12.832666397094727, + "logps/rejected": -10.308067321777344, + "loss": 0.9975, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.03939870372414589, + "rewards/margins": 0.013363707810640335, + "rewards/rejected": 0.026034995913505554, + "step": 115 + }, + { + "epoch": 0.25466520307354557, + "grad_norm": 1.5929930210113525, + "learning_rate": 4.995187837212555e-06, + "logits/chosen": -2.455446720123291, + "logits/rejected": -2.4105300903320312, + "logps/chosen": -16.525413513183594, + "logps/rejected": -11.672962188720703, + "loss": 1.0094, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.031660135835409164, + "rewards/margins": 0.0169740691781044, + "rewards/rejected": 0.014686062932014465, + "step": 116 + }, + { + "epoch": 0.25686059275521406, + "grad_norm": 1.4787267446517944, + "learning_rate": 4.994567744093703e-06, + "logits/chosen": -2.366934061050415, + "logits/rejected": -2.276400566101074, + "logps/chosen": -11.443145751953125, + "logps/rejected": -12.336971282958984, + "loss": 1.0239, + "rewards/accuracies": 0.4375, + "rewards/chosen": 0.024969782680273056, + "rewards/margins": 0.013532733544707298, + "rewards/rejected": 0.011437049135565758, + "step": 117 + }, + { + "epoch": 0.25905598243688255, + "grad_norm": 1.632069706916809, + "learning_rate": 4.993910125649561e-06, + "logits/chosen": -2.3295586109161377, + "logits/rejected": -2.3176655769348145, + "logps/chosen": -15.515420913696289, + "logps/rejected": -12.011621475219727, + "loss": 1.0096, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.0905236303806305, + "rewards/margins": 0.0291280597448349, + "rewards/rejected": 0.06139557063579559, + "step": 118 + }, + { + "epoch": 0.26125137211855104, + "grad_norm": 1.6845340728759766, + "learning_rate": 4.993214991772563e-06, + "logits/chosen": -2.3398208618164062, + "logits/rejected": -2.3458690643310547, + "logps/chosen": -12.432971954345703, + "logps/rejected": -8.691479682922363, + "loss": 0.9449, + "rewards/accuracies": 0.40625, + "rewards/chosen": 0.07083068788051605, + "rewards/margins": 0.0008336303289979696, + "rewards/rejected": 0.06999707221984863, + "step": 119 + }, + { + "epoch": 0.26344676180021953, + "grad_norm": 1.4854542016983032, + "learning_rate": 4.99248235291948e-06, + "logits/chosen": -2.3151345252990723, + "logits/rejected": -2.3466830253601074, + "logps/chosen": -12.065990447998047, + "logps/rejected": -11.669464111328125, + "loss": 0.9982, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.03543815016746521, + "rewards/margins": 0.05088028684258461, + "rewards/rejected": -0.01544213853776455, + "step": 120 + }, + { + "epoch": 0.265642151481888, + "grad_norm": 1.7356363534927368, + "learning_rate": 4.991712220111265e-06, + "logits/chosen": -2.3189940452575684, + "logits/rejected": -2.3774425983428955, + "logps/chosen": -14.105989456176758, + "logps/rejected": -10.266246795654297, + "loss": 0.9667, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.09679834544658661, + "rewards/margins": 0.047457098960876465, + "rewards/rejected": 0.049341246485710144, + "step": 121 + }, + { + "epoch": 0.2678375411635565, + "grad_norm": 1.601832628250122, + "learning_rate": 4.990904604932885e-06, + "logits/chosen": -2.347449541091919, + "logits/rejected": -2.352397918701172, + "logps/chosen": -15.789161682128906, + "logps/rejected": -13.354789733886719, + "loss": 1.0227, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.09421166777610779, + "rewards/margins": 0.055517613887786865, + "rewards/rejected": 0.03869405388832092, + "step": 122 + }, + { + "epoch": 0.270032930845225, + "grad_norm": 1.4014909267425537, + "learning_rate": 4.990059519533145e-06, + "logits/chosen": -2.2689881324768066, + "logits/rejected": -2.283444404602051, + "logps/chosen": -11.951663970947266, + "logps/rejected": -13.521688461303711, + "loss": 1.077, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.07996637374162674, + "rewards/margins": 0.0017875898629426956, + "rewards/rejected": 0.0781787857413292, + "step": 123 + }, + { + "epoch": 0.2722283205268935, + "grad_norm": 2.307819128036499, + "learning_rate": 4.989176976624511e-06, + "logits/chosen": -2.313141107559204, + "logits/rejected": -2.3250885009765625, + "logps/chosen": -15.183206558227539, + "logps/rejected": -14.606795310974121, + "loss": 1.1025, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.06334234774112701, + "rewards/margins": 0.03910152241587639, + "rewards/rejected": 0.024240825325250626, + "step": 124 + }, + { + "epoch": 0.27442371020856204, + "grad_norm": 1.768710970878601, + "learning_rate": 4.9882569894829146e-06, + "logits/chosen": -2.3245012760162354, + "logits/rejected": -2.4108376502990723, + "logps/chosen": -11.341304779052734, + "logps/rejected": -13.046493530273438, + "loss": 1.0653, + "rewards/accuracies": 0.375, + "rewards/chosen": 0.04837167263031006, + "rewards/margins": -0.03817473351955414, + "rewards/rejected": 0.0865464061498642, + "step": 125 + }, + { + "epoch": 0.27661909989023054, + "grad_norm": 1.8172316551208496, + "learning_rate": 4.987299571947554e-06, + "logits/chosen": -2.2869110107421875, + "logits/rejected": -2.3253166675567627, + "logps/chosen": -12.174118041992188, + "logps/rejected": -9.856450080871582, + "loss": 0.9751, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.06659601628780365, + "rewards/margins": -0.006406224332749844, + "rewards/rejected": 0.07300224900245667, + "step": 126 + }, + { + "epoch": 0.278814489571899, + "grad_norm": 1.9271728992462158, + "learning_rate": 4.986304738420684e-06, + "logits/chosen": -2.3674209117889404, + "logits/rejected": -2.3308446407318115, + "logps/chosen": -10.563587188720703, + "logps/rejected": -11.918947219848633, + "loss": 0.9901, + "rewards/accuracies": 0.46875, + "rewards/chosen": 0.06925193965435028, + "rewards/margins": -0.0023591145873069763, + "rewards/rejected": 0.07161105424165726, + "step": 127 + }, + { + "epoch": 0.2810098792535675, + "grad_norm": 1.8149971961975098, + "learning_rate": 4.9852725038674035e-06, + "logits/chosen": -2.33549427986145, + "logits/rejected": -2.2321507930755615, + "logps/chosen": -9.894359588623047, + "logps/rejected": -13.391302108764648, + "loss": 1.0184, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.06098070740699768, + "rewards/margins": 0.007608053274452686, + "rewards/rejected": 0.05337265133857727, + "step": 128 + }, + { + "epoch": 0.283205268935236, + "grad_norm": 1.5623228549957275, + "learning_rate": 4.9842028838154285e-06, + "logits/chosen": -2.355159282684326, + "logits/rejected": -2.4218826293945312, + "logps/chosen": -16.222064971923828, + "logps/rejected": -9.621826171875, + "loss": 0.9459, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.04742385074496269, + "rewards/margins": 0.04059514030814171, + "rewards/rejected": 0.006828710436820984, + "step": 129 + }, + { + "epoch": 0.2854006586169045, + "grad_norm": 1.62009859085083, + "learning_rate": 4.983095894354858e-06, + "logits/chosen": -2.2709784507751465, + "logits/rejected": -2.30684757232666, + "logps/chosen": -11.814016342163086, + "logps/rejected": -13.255616188049316, + "loss": 1.0494, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.0587792843580246, + "rewards/margins": 0.020036276429891586, + "rewards/rejected": 0.03874301165342331, + "step": 130 + }, + { + "epoch": 0.287596048298573, + "grad_norm": 1.7169865369796753, + "learning_rate": 4.9819515521379295e-06, + "logits/chosen": -2.3297133445739746, + "logits/rejected": -2.3426923751831055, + "logps/chosen": -19.01955223083496, + "logps/rejected": -13.051504135131836, + "loss": 1.0026, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.10157382488250732, + "rewards/margins": 0.054719578474760056, + "rewards/rejected": 0.04685423523187637, + "step": 131 + }, + { + "epoch": 0.2897914379802415, + "grad_norm": 2.0506138801574707, + "learning_rate": 4.980769874378775e-06, + "logits/chosen": -2.2926626205444336, + "logits/rejected": -2.304621696472168, + "logps/chosen": -15.2979097366333, + "logps/rejected": -14.070364952087402, + "loss": 1.045, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.06757767498493195, + "rewards/margins": 0.027771957218647003, + "rewards/rejected": 0.039805714040994644, + "step": 132 + }, + { + "epoch": 0.29198682766191, + "grad_norm": 1.728746771812439, + "learning_rate": 4.979550878853154e-06, + "logits/chosen": -2.335216999053955, + "logits/rejected": -2.3271849155426025, + "logps/chosen": -12.738531112670898, + "logps/rejected": -10.87563705444336, + "loss": 0.9702, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.09164644777774811, + "rewards/margins": 0.03838293254375458, + "rewards/rejected": 0.05326350778341293, + "step": 133 + }, + { + "epoch": 0.29418221734357847, + "grad_norm": 1.667359709739685, + "learning_rate": 4.978294583898196e-06, + "logits/chosen": -2.3262503147125244, + "logits/rejected": -2.3686673641204834, + "logps/chosen": -11.43099308013916, + "logps/rejected": -7.651641368865967, + "loss": 0.8551, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.09794393181800842, + "rewards/margins": 0.10966614633798599, + "rewards/rejected": -0.011722217313945293, + "step": 134 + }, + { + "epoch": 0.29637760702524696, + "grad_norm": 1.7627335786819458, + "learning_rate": 4.977001008412113e-06, + "logits/chosen": -2.2389612197875977, + "logits/rejected": -2.309508800506592, + "logps/chosen": -14.33803653717041, + "logps/rejected": -10.298504829406738, + "loss": 1.0181, + "rewards/accuracies": 0.375, + "rewards/chosen": 0.034048259258270264, + "rewards/margins": -0.03645710274577141, + "rewards/rejected": 0.07050535827875137, + "step": 135 + }, + { + "epoch": 0.29857299670691545, + "grad_norm": 1.8202238082885742, + "learning_rate": 4.975670171853926e-06, + "logits/chosen": -2.2767112255096436, + "logits/rejected": -2.3150486946105957, + "logps/chosen": -11.937854766845703, + "logps/rejected": -14.83053207397461, + "loss": 1.0772, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.07502683997154236, + "rewards/margins": 0.028351005166769028, + "rewards/rejected": 0.04667583480477333, + "step": 136 + }, + { + "epoch": 0.300768386388584, + "grad_norm": 1.7850346565246582, + "learning_rate": 4.974302094243164e-06, + "logits/chosen": -2.2587356567382812, + "logits/rejected": -2.370450496673584, + "logps/chosen": -14.766382217407227, + "logps/rejected": -11.365856170654297, + "loss": 1.0113, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.024949384853243828, + "rewards/margins": 0.01844519004225731, + "rewards/rejected": 0.00650419294834137, + "step": 137 + }, + { + "epoch": 0.3029637760702525, + "grad_norm": 1.7888866662979126, + "learning_rate": 4.972896796159568e-06, + "logits/chosen": -2.2733049392700195, + "logits/rejected": -2.3269832134246826, + "logps/chosen": -14.468865394592285, + "logps/rejected": -10.19251823425293, + "loss": 0.9104, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.11349797993898392, + "rewards/margins": 0.13075613975524902, + "rewards/rejected": -0.01725815236568451, + "step": 138 + }, + { + "epoch": 0.305159165751921, + "grad_norm": 1.5196716785430908, + "learning_rate": 4.971454298742779e-06, + "logits/chosen": -2.30745267868042, + "logits/rejected": -2.258906602859497, + "logps/chosen": -13.690924644470215, + "logps/rejected": -10.550579071044922, + "loss": 0.9217, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.09285424649715424, + "rewards/margins": 0.08185766637325287, + "rewards/rejected": 0.010996590368449688, + "step": 139 + }, + { + "epoch": 0.30735455543358947, + "grad_norm": 1.8636034727096558, + "learning_rate": 4.969974623692023e-06, + "logits/chosen": -2.2885866165161133, + "logits/rejected": -2.3624653816223145, + "logps/chosen": -15.346282005310059, + "logps/rejected": -13.977486610412598, + "loss": 1.0218, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.06509467214345932, + "rewards/margins": 0.07762052118778229, + "rewards/rejected": -0.012525845319032669, + "step": 140 + }, + { + "epoch": 0.30954994511525796, + "grad_norm": 1.990831971168518, + "learning_rate": 4.968457793265778e-06, + "logits/chosen": -2.2647054195404053, + "logits/rejected": -2.266617774963379, + "logps/chosen": -12.410196304321289, + "logps/rejected": -10.732589721679688, + "loss": 0.9477, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.07372469455003738, + "rewards/margins": 0.06349782645702362, + "rewards/rejected": 0.010226882994174957, + "step": 141 + }, + { + "epoch": 0.31174533479692645, + "grad_norm": 1.7295087575912476, + "learning_rate": 4.966903830281449e-06, + "logits/chosen": -2.266833782196045, + "logits/rejected": -2.2585251331329346, + "logps/chosen": -12.859150886535645, + "logps/rejected": -10.204259872436523, + "loss": 0.955, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.05344117432832718, + "rewards/margins": 0.021849703043699265, + "rewards/rejected": 0.03159147500991821, + "step": 142 + }, + { + "epoch": 0.31394072447859495, + "grad_norm": 2.444378137588501, + "learning_rate": 4.965312758115013e-06, + "logits/chosen": -2.391078233718872, + "logits/rejected": -2.3234777450561523, + "logps/chosen": -10.306347846984863, + "logps/rejected": -10.704697608947754, + "loss": 0.9832, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.0930076315999031, + "rewards/margins": 0.04472024738788605, + "rewards/rejected": 0.04828737676143646, + "step": 143 + }, + { + "epoch": 0.31613611416026344, + "grad_norm": 1.8083428144454956, + "learning_rate": 4.963684600700679e-06, + "logits/chosen": -2.3120150566101074, + "logits/rejected": -2.2713851928710938, + "logps/chosen": -19.03668785095215, + "logps/rejected": -14.014898300170898, + "loss": 1.0903, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.035560242831707, + "rewards/margins": 0.000476097222417593, + "rewards/rejected": 0.03508414700627327, + "step": 144 + }, + { + "epoch": 0.31833150384193193, + "grad_norm": 1.7758944034576416, + "learning_rate": 4.962019382530521e-06, + "logits/chosen": -2.300203800201416, + "logits/rejected": -2.3651368618011475, + "logps/chosen": -13.401725769042969, + "logps/rejected": -12.91569709777832, + "loss": 0.993, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.08328074961900711, + "rewards/margins": 0.12566398084163666, + "rewards/rejected": -0.04238323122262955, + "step": 145 + }, + { + "epoch": 0.3205268935236004, + "grad_norm": 2.1212151050567627, + "learning_rate": 4.9603171286541085e-06, + "logits/chosen": -2.247659206390381, + "logits/rejected": -2.3920531272888184, + "logps/chosen": -12.737861633300781, + "logps/rejected": -14.069719314575195, + "loss": 1.0722, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.055058207362890244, + "rewards/margins": 0.011270837858319283, + "rewards/rejected": 0.04378737136721611, + "step": 146 + }, + { + "epoch": 0.3227222832052689, + "grad_norm": 2.6743414402008057, + "learning_rate": 4.958577864678137e-06, + "logits/chosen": -2.3820812702178955, + "logits/rejected": -2.429454803466797, + "logps/chosen": -10.022626876831055, + "logps/rejected": -10.506009101867676, + "loss": 0.986, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.07218223810195923, + "rewards/margins": 0.053717922419309616, + "rewards/rejected": 0.018464315682649612, + "step": 147 + }, + { + "epoch": 0.32491767288693746, + "grad_norm": 2.1124470233917236, + "learning_rate": 4.956801616766033e-06, + "logits/chosen": -2.208386182785034, + "logits/rejected": -2.425445556640625, + "logps/chosen": -17.890907287597656, + "logps/rejected": -15.792840003967285, + "loss": 1.051, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.08094897866249084, + "rewards/margins": 0.1441258043050766, + "rewards/rejected": -0.06317683309316635, + "step": 148 + }, + { + "epoch": 0.32711306256860595, + "grad_norm": 1.9295481443405151, + "learning_rate": 4.9549884116375714e-06, + "logits/chosen": -2.2656877040863037, + "logits/rejected": -2.328075885772705, + "logps/chosen": -13.346929550170898, + "logps/rejected": -10.23320484161377, + "loss": 0.9307, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.07452508807182312, + "rewards/margins": 0.08161447942256927, + "rewards/rejected": -0.007089395076036453, + "step": 149 + }, + { + "epoch": 0.32930845225027444, + "grad_norm": 2.049334764480591, + "learning_rate": 4.953138276568462e-06, + "logits/chosen": -2.3590216636657715, + "logits/rejected": -2.3576831817626953, + "logps/chosen": -11.798664093017578, + "logps/rejected": -9.424067497253418, + "loss": 0.9288, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.07175838202238083, + "rewards/margins": 0.10401518642902374, + "rewards/rejected": -0.032256804406642914, + "step": 150 + }, + { + "epoch": 0.33150384193194293, + "grad_norm": 2.2335071563720703, + "learning_rate": 4.951251239389949e-06, + "logits/chosen": -2.2458577156066895, + "logits/rejected": -2.3633508682250977, + "logps/chosen": -12.633405685424805, + "logps/rejected": -10.60333251953125, + "loss": 0.9717, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.03975841775536537, + "rewards/margins": 0.08428110182285309, + "rewards/rejected": -0.04452267661690712, + "step": 151 + }, + { + "epoch": 0.3336992316136114, + "grad_norm": 1.876927137374878, + "learning_rate": 4.949327328488385e-06, + "logits/chosen": -2.308839797973633, + "logits/rejected": -2.2872886657714844, + "logps/chosen": -15.553834915161133, + "logps/rejected": -10.210927963256836, + "loss": 0.9448, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.05748458206653595, + "rewards/margins": 0.07385220378637314, + "rewards/rejected": -0.01636761613190174, + "step": 152 + }, + { + "epoch": 0.3358946212952799, + "grad_norm": 1.784837007522583, + "learning_rate": 4.9473665728048115e-06, + "logits/chosen": -2.364820957183838, + "logits/rejected": -2.425748348236084, + "logps/chosen": -18.728487014770508, + "logps/rejected": -13.96826171875, + "loss": 1.0259, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.07712395489215851, + "rewards/margins": 0.1511487364768982, + "rewards/rejected": -0.07402478903532028, + "step": 153 + }, + { + "epoch": 0.3380900109769484, + "grad_norm": 2.1200637817382812, + "learning_rate": 4.9453690018345144e-06, + "logits/chosen": -2.302483558654785, + "logits/rejected": -2.368330717086792, + "logps/chosen": -12.537941932678223, + "logps/rejected": -10.460428237915039, + "loss": 0.9672, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.044165655970573425, + "rewards/margins": 0.0792531818151474, + "rewards/rejected": -0.035087522119283676, + "step": 154 + }, + { + "epoch": 0.3402854006586169, + "grad_norm": 1.7176671028137207, + "learning_rate": 4.943334645626589e-06, + "logits/chosen": -2.2893455028533936, + "logits/rejected": -2.327376365661621, + "logps/chosen": -14.962259292602539, + "logps/rejected": -13.632776260375977, + "loss": 1.0493, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.08821810781955719, + "rewards/margins": 0.08609063923358917, + "rewards/rejected": 0.002127467654645443, + "step": 155 + }, + { + "epoch": 0.3424807903402854, + "grad_norm": 1.8398821353912354, + "learning_rate": 4.941263534783482e-06, + "logits/chosen": -2.328542709350586, + "logits/rejected": -2.3318638801574707, + "logps/chosen": -16.296972274780273, + "logps/rejected": -10.691251754760742, + "loss": 0.9273, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.09197843074798584, + "rewards/margins": 0.14689640700817108, + "rewards/rejected": -0.05491796135902405, + "step": 156 + }, + { + "epoch": 0.3446761800219539, + "grad_norm": 1.9241065979003906, + "learning_rate": 4.939155700460536e-06, + "logits/chosen": -2.3499631881713867, + "logits/rejected": -2.3470518589019775, + "logps/chosen": -12.203739166259766, + "logps/rejected": -10.567827224731445, + "loss": 0.9536, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.09050009399652481, + "rewards/margins": 0.05735006183385849, + "rewards/rejected": 0.03315002843737602, + "step": 157 + }, + { + "epoch": 0.3468715697036224, + "grad_norm": 2.576176166534424, + "learning_rate": 4.937011174365515e-06, + "logits/chosen": -2.324387550354004, + "logits/rejected": -2.363718032836914, + "logps/chosen": -17.173086166381836, + "logps/rejected": -11.478891372680664, + "loss": 0.9861, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.05878392606973648, + "rewards/margins": 0.060986313968896866, + "rewards/rejected": -0.002202393487095833, + "step": 158 + }, + { + "epoch": 0.34906695938529086, + "grad_norm": 2.130946159362793, + "learning_rate": 4.934829988758131e-06, + "logits/chosen": -2.356431007385254, + "logits/rejected": -2.423962354660034, + "logps/chosen": -12.604738235473633, + "logps/rejected": -10.564749717712402, + "loss": 0.9622, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.047928761690855026, + "rewards/margins": 0.08084459602832794, + "rewards/rejected": -0.032915834337472916, + "step": 159 + }, + { + "epoch": 0.3512623490669594, + "grad_norm": 1.962509274482727, + "learning_rate": 4.93261217644956e-06, + "logits/chosen": -2.357512950897217, + "logits/rejected": -2.3551905155181885, + "logps/chosen": -12.000711441040039, + "logps/rejected": -11.452027320861816, + "loss": 1.0009, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.0928552895784378, + "rewards/margins": 0.09134387969970703, + "rewards/rejected": 0.001511402428150177, + "step": 160 + }, + { + "epoch": 0.3534577387486279, + "grad_norm": 2.2321617603302, + "learning_rate": 4.930357770801946e-06, + "logits/chosen": -2.3161845207214355, + "logits/rejected": -2.3024725914001465, + "logps/chosen": -14.647942543029785, + "logps/rejected": -11.785874366760254, + "loss": 0.9646, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.057551611214876175, + "rewards/margins": 0.09231199324131012, + "rewards/rejected": -0.034760378301143646, + "step": 161 + }, + { + "epoch": 0.3556531284302964, + "grad_norm": 2.041982889175415, + "learning_rate": 4.928066805727901e-06, + "logits/chosen": -2.3434221744537354, + "logits/rejected": -2.3381900787353516, + "logps/chosen": -17.95254135131836, + "logps/rejected": -12.574861526489258, + "loss": 0.9744, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.0844038724899292, + "rewards/margins": 0.11732687801122665, + "rewards/rejected": -0.032922983169555664, + "step": 162 + }, + { + "epoch": 0.3578485181119649, + "grad_norm": 1.9384185075759888, + "learning_rate": 4.925739315689991e-06, + "logits/chosen": -2.372062921524048, + "logits/rejected": -2.296802282333374, + "logps/chosen": -12.097003936767578, + "logps/rejected": -15.5323486328125, + "loss": 1.0853, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.03156058490276337, + "rewards/margins": 0.09192690998315811, + "rewards/rejected": -0.06036633625626564, + "step": 163 + }, + { + "epoch": 0.3600439077936334, + "grad_norm": 2.985081911087036, + "learning_rate": 4.923375335700224e-06, + "logits/chosen": -2.35788631439209, + "logits/rejected": -2.3205502033233643, + "logps/chosen": -15.22624397277832, + "logps/rejected": -14.541189193725586, + "loss": 1.0442, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.035873882472515106, + "rewards/margins": 0.061657097190618515, + "rewards/rejected": -0.02578321285545826, + "step": 164 + }, + { + "epoch": 0.36223929747530187, + "grad_norm": 2.4644775390625, + "learning_rate": 4.9209749013195155e-06, + "logits/chosen": -2.4986884593963623, + "logits/rejected": -2.368385076522827, + "logps/chosen": -13.233194351196289, + "logps/rejected": -12.936639785766602, + "loss": 0.9809, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.047733619809150696, + "rewards/margins": 0.08755230903625488, + "rewards/rejected": -0.03981868550181389, + "step": 165 + }, + { + "epoch": 0.36443468715697036, + "grad_norm": 2.0228378772735596, + "learning_rate": 4.91853804865716e-06, + "logits/chosen": -2.2875237464904785, + "logits/rejected": -2.245499849319458, + "logps/chosen": -13.337098121643066, + "logps/rejected": -11.761133193969727, + "loss": 0.9976, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.07397015392780304, + "rewards/margins": 0.016120310872793198, + "rewards/rejected": 0.05784984305500984, + "step": 166 + }, + { + "epoch": 0.36663007683863885, + "grad_norm": 2.2122652530670166, + "learning_rate": 4.916064814370287e-06, + "logits/chosen": -2.361201763153076, + "logits/rejected": -2.31585431098938, + "logps/chosen": -21.456907272338867, + "logps/rejected": -13.594123840332031, + "loss": 1.0081, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.09469065815210342, + "rewards/margins": 0.11242985725402832, + "rewards/rejected": -0.017739199101924896, + "step": 167 + }, + { + "epoch": 0.36882546652030734, + "grad_norm": 2.4223556518554688, + "learning_rate": 4.913555235663306e-06, + "logits/chosen": -2.3535289764404297, + "logits/rejected": -2.330043315887451, + "logps/chosen": -12.632347106933594, + "logps/rejected": -11.649641036987305, + "loss": 0.9651, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.022385867312550545, + "rewards/margins": 0.11026401072740555, + "rewards/rejected": -0.08787814527750015, + "step": 168 + }, + { + "epoch": 0.37102085620197583, + "grad_norm": 2.7966530323028564, + "learning_rate": 4.911009350287348e-06, + "logits/chosen": -2.3594818115234375, + "logits/rejected": -2.4108834266662598, + "logps/chosen": -14.320435523986816, + "logps/rejected": -8.055628776550293, + "loss": 0.8865, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.024796217679977417, + "rewards/margins": 0.1393408477306366, + "rewards/rejected": -0.11454464495182037, + "step": 169 + }, + { + "epoch": 0.3732162458836443, + "grad_norm": 3.0437235832214355, + "learning_rate": 4.908427196539701e-06, + "logits/chosen": -2.3775222301483154, + "logits/rejected": -2.3491196632385254, + "logps/chosen": -12.23366928100586, + "logps/rejected": -11.17028522491455, + "loss": 0.9193, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.0801595002412796, + "rewards/margins": 0.1766997128725052, + "rewards/rejected": -0.09654020518064499, + "step": 170 + }, + { + "epoch": 0.3754116355653128, + "grad_norm": 1.9359002113342285, + "learning_rate": 4.905808813263231e-06, + "logits/chosen": -2.2803163528442383, + "logits/rejected": -2.38509202003479, + "logps/chosen": -14.892003059387207, + "logps/rejected": -9.033920288085938, + "loss": 0.8289, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.14654552936553955, + "rewards/margins": 0.2833712100982666, + "rewards/rejected": -0.13682566583156586, + "step": 171 + }, + { + "epoch": 0.37760702524698136, + "grad_norm": 2.1269936561584473, + "learning_rate": 4.903154239845798e-06, + "logits/chosen": -2.3570709228515625, + "logits/rejected": -2.3689980506896973, + "logps/chosen": -17.01831817626953, + "logps/rejected": -10.841741561889648, + "loss": 0.9478, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.05472090095281601, + "rewards/margins": 0.12769103050231934, + "rewards/rejected": -0.07297012954950333, + "step": 172 + }, + { + "epoch": 0.37980241492864986, + "grad_norm": 2.4285571575164795, + "learning_rate": 4.900463516219661e-06, + "logits/chosen": -2.378610610961914, + "logits/rejected": -2.311692714691162, + "logps/chosen": -13.103206634521484, + "logps/rejected": -11.916963577270508, + "loss": 1.0449, + "rewards/accuracies": 0.4375, + "rewards/chosen": 0.01136775128543377, + "rewards/margins": 0.06381823122501373, + "rewards/rejected": -0.05245048552751541, + "step": 173 + }, + { + "epoch": 0.38199780461031835, + "grad_norm": 2.457991600036621, + "learning_rate": 4.897736682860885e-06, + "logits/chosen": -2.392366409301758, + "logits/rejected": -2.4145660400390625, + "logps/chosen": -19.734670639038086, + "logps/rejected": -12.913403511047363, + "loss": 1.0055, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.005804223008453846, + "rewards/margins": 0.050883978605270386, + "rewards/rejected": -0.04507976025342941, + "step": 174 + }, + { + "epoch": 0.38419319429198684, + "grad_norm": 2.509645462036133, + "learning_rate": 4.894973780788722e-06, + "logits/chosen": -2.38358736038208, + "logits/rejected": -2.460597038269043, + "logps/chosen": -15.367487907409668, + "logps/rejected": -12.089338302612305, + "loss": 0.9987, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.03781091794371605, + "rewards/margins": 0.16976892948150635, + "rewards/rejected": -0.1319580227136612, + "step": 175 + }, + { + "epoch": 0.38638858397365533, + "grad_norm": 3.260399341583252, + "learning_rate": 4.892174851565004e-06, + "logits/chosen": -2.232401132583618, + "logits/rejected": -2.3769664764404297, + "logps/chosen": -18.106616973876953, + "logps/rejected": -16.98265266418457, + "loss": 1.0731, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.055242907255887985, + "rewards/margins": 0.12538807094097137, + "rewards/rejected": -0.07014517486095428, + "step": 176 + }, + { + "epoch": 0.3885839736553238, + "grad_norm": 2.5841009616851807, + "learning_rate": 4.889339937293508e-06, + "logits/chosen": -2.274055004119873, + "logits/rejected": -2.2682862281799316, + "logps/chosen": -14.28443717956543, + "logps/rejected": -14.35059642791748, + "loss": 0.9869, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.07298749685287476, + "rewards/margins": 0.15667203068733215, + "rewards/rejected": -0.0836845338344574, + "step": 177 + }, + { + "epoch": 0.3907793633369923, + "grad_norm": 2.899467945098877, + "learning_rate": 4.88646908061933e-06, + "logits/chosen": -2.4936909675598145, + "logits/rejected": -2.4529361724853516, + "logps/chosen": -15.615601539611816, + "logps/rejected": -12.965082168579102, + "loss": 0.9921, + "rewards/accuracies": 0.59375, + "rewards/chosen": -0.025315597653388977, + "rewards/margins": 0.050315871834754944, + "rewards/rejected": -0.07563146948814392, + "step": 178 + }, + { + "epoch": 0.3929747530186608, + "grad_norm": 2.1207797527313232, + "learning_rate": 4.883562324728242e-06, + "logits/chosen": -2.365647792816162, + "logits/rejected": -2.4170095920562744, + "logps/chosen": -15.290233612060547, + "logps/rejected": -11.950180053710938, + "loss": 0.9684, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.027094591408967972, + "rewards/margins": 0.14753341674804688, + "rewards/rejected": -0.1204388216137886, + "step": 179 + }, + { + "epoch": 0.3951701427003293, + "grad_norm": 2.1420328617095947, + "learning_rate": 4.8806197133460385e-06, + "logits/chosen": -2.2818522453308105, + "logits/rejected": -2.344179153442383, + "logps/chosen": -13.106456756591797, + "logps/rejected": -9.099660873413086, + "loss": 0.8689, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.1166091114282608, + "rewards/margins": 0.20032554864883423, + "rewards/rejected": -0.08371645212173462, + "step": 180 + }, + { + "epoch": 0.3973655323819978, + "grad_norm": 2.639103651046753, + "learning_rate": 4.8776412907378845e-06, + "logits/chosen": -2.381890296936035, + "logits/rejected": -2.4541854858398438, + "logps/chosen": -18.474212646484375, + "logps/rejected": -13.981678009033203, + "loss": 1.043, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.037654660642147064, + "rewards/margins": 0.1330977976322174, + "rewards/rejected": -0.09544314444065094, + "step": 181 + }, + { + "epoch": 0.3995609220636663, + "grad_norm": 2.6630892753601074, + "learning_rate": 4.874627101707644e-06, + "logits/chosen": -2.2812371253967285, + "logits/rejected": -2.385373830795288, + "logps/chosen": -15.278980255126953, + "logps/rejected": -11.584487915039062, + "loss": 0.9284, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.06725232303142548, + "rewards/margins": 0.22635605931282043, + "rewards/rejected": -0.15910373628139496, + "step": 182 + }, + { + "epoch": 0.40175631174533477, + "grad_norm": 2.6720497608184814, + "learning_rate": 4.871577191597211e-06, + "logits/chosen": -2.365732192993164, + "logits/rejected": -2.291288137435913, + "logps/chosen": -14.112415313720703, + "logps/rejected": -9.701397895812988, + "loss": 0.8559, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.04575119540095329, + "rewards/margins": 0.22594983875751495, + "rewards/rejected": -0.18019863963127136, + "step": 183 + }, + { + "epoch": 0.4039517014270033, + "grad_norm": 2.456178903579712, + "learning_rate": 4.868491606285823e-06, + "logits/chosen": -2.3805899620056152, + "logits/rejected": -2.33213472366333, + "logps/chosen": -11.890176773071289, + "logps/rejected": -10.625673294067383, + "loss": 0.9725, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.023435473442077637, + "rewards/margins": 0.060660067945718765, + "rewards/rejected": -0.03722459450364113, + "step": 184 + }, + { + "epoch": 0.4061470911086718, + "grad_norm": 2.2524774074554443, + "learning_rate": 4.865370392189377e-06, + "logits/chosen": -2.2781782150268555, + "logits/rejected": -2.4160242080688477, + "logps/chosen": -14.2537841796875, + "logps/rejected": -12.460886001586914, + "loss": 0.9863, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.046875663101673126, + "rewards/margins": 0.1632286012172699, + "rewards/rejected": -0.11635293066501617, + "step": 185 + }, + { + "epoch": 0.4083424807903403, + "grad_norm": 2.891005277633667, + "learning_rate": 4.86221359625972e-06, + "logits/chosen": -2.384573459625244, + "logits/rejected": -2.367485523223877, + "logps/chosen": -18.918542861938477, + "logps/rejected": -11.104225158691406, + "loss": 0.8836, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.013368407264351845, + "rewards/margins": 0.2821967601776123, + "rewards/rejected": -0.2688283622264862, + "step": 186 + }, + { + "epoch": 0.4105378704720088, + "grad_norm": 2.5016818046569824, + "learning_rate": 4.859021265983959e-06, + "logits/chosen": -2.316922187805176, + "logits/rejected": -2.4440698623657227, + "logps/chosen": -16.58301544189453, + "logps/rejected": -12.081414222717285, + "loss": 0.9647, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.0013476097956299782, + "rewards/margins": 0.21520553529262543, + "rewards/rejected": -0.21655313670635223, + "step": 187 + }, + { + "epoch": 0.4127332601536773, + "grad_norm": 3.1846582889556885, + "learning_rate": 4.855793449383731e-06, + "logits/chosen": -2.359532356262207, + "logits/rejected": -2.2862062454223633, + "logps/chosen": -13.504922866821289, + "logps/rejected": -11.573217391967773, + "loss": 0.9578, + "rewards/accuracies": 0.5625, + "rewards/chosen": -0.00021760258823633194, + "rewards/margins": 0.11105790734291077, + "rewards/rejected": -0.11127550899982452, + "step": 188 + }, + { + "epoch": 0.4149286498353458, + "grad_norm": 3.166029930114746, + "learning_rate": 4.852530195014489e-06, + "logits/chosen": -2.372904062271118, + "logits/rejected": -2.4071719646453857, + "logps/chosen": -12.410667419433594, + "logps/rejected": -11.904781341552734, + "loss": 0.9805, + "rewards/accuracies": 0.59375, + "rewards/chosen": -0.017314447090029716, + "rewards/margins": 0.07836966216564178, + "rewards/rejected": -0.09568411111831665, + "step": 189 + }, + { + "epoch": 0.41712403951701427, + "grad_norm": 3.263563394546509, + "learning_rate": 4.849231551964771e-06, + "logits/chosen": -2.2438108921051025, + "logits/rejected": -2.3375155925750732, + "logps/chosen": -15.391338348388672, + "logps/rejected": -11.07363510131836, + "loss": 0.9475, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.02428864873945713, + "rewards/margins": 0.20056959986686707, + "rewards/rejected": -0.17628096044063568, + "step": 190 + }, + { + "epoch": 0.41931942919868276, + "grad_norm": 2.4130396842956543, + "learning_rate": 4.84589756985546e-06, + "logits/chosen": -2.3162131309509277, + "logits/rejected": -2.284975290298462, + "logps/chosen": -16.690502166748047, + "logps/rejected": -13.928424835205078, + "loss": 0.9419, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.08059751987457275, + "rewards/margins": 0.2925419807434082, + "rewards/rejected": -0.21194449067115784, + "step": 191 + }, + { + "epoch": 0.42151481888035125, + "grad_norm": 4.085582733154297, + "learning_rate": 4.8425282988390376e-06, + "logits/chosen": -2.3132638931274414, + "logits/rejected": -2.3993396759033203, + "logps/chosen": -17.231279373168945, + "logps/rejected": -15.828384399414062, + "loss": 1.1129, + "rewards/accuracies": 0.5625, + "rewards/chosen": -0.004977023229002953, + "rewards/margins": 0.10474638640880585, + "rewards/rejected": -0.10972341150045395, + "step": 192 + }, + { + "epoch": 0.42371020856201974, + "grad_norm": 2.4137845039367676, + "learning_rate": 4.839123789598829e-06, + "logits/chosen": -2.38979434967041, + "logits/rejected": -2.422316074371338, + "logps/chosen": -15.439363479614258, + "logps/rejected": -13.799774169921875, + "loss": 0.9543, + "rewards/accuracies": 0.65625, + "rewards/chosen": -0.01842639595270157, + "rewards/margins": 0.24182362854480743, + "rewards/rejected": -0.2602500319480896, + "step": 193 + }, + { + "epoch": 0.42590559824368823, + "grad_norm": 2.383225202560425, + "learning_rate": 4.835684093348244e-06, + "logits/chosen": -2.4397788047790527, + "logits/rejected": -2.3394742012023926, + "logps/chosen": -14.448860168457031, + "logps/rejected": -10.81193733215332, + "loss": 0.8818, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.053237978368997574, + "rewards/margins": 0.23682299256324768, + "rewards/rejected": -0.1835850328207016, + "step": 194 + }, + { + "epoch": 0.4281009879253567, + "grad_norm": 2.947072744369507, + "learning_rate": 4.832209261830002e-06, + "logits/chosen": -2.294675827026367, + "logits/rejected": -2.3386130332946777, + "logps/chosen": -14.494888305664062, + "logps/rejected": -11.394132614135742, + "loss": 0.9588, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.025634365156292915, + "rewards/margins": 0.13824237883090973, + "rewards/rejected": -0.11260801553726196, + "step": 195 + }, + { + "epoch": 0.43029637760702527, + "grad_norm": 2.2794811725616455, + "learning_rate": 4.828699347315357e-06, + "logits/chosen": -2.387575626373291, + "logits/rejected": -2.407410144805908, + "logps/chosen": -16.202159881591797, + "logps/rejected": -10.102056503295898, + "loss": 0.9207, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.045964036136865616, + "rewards/margins": 0.18496856093406677, + "rewards/rejected": -0.13900452852249146, + "step": 196 + }, + { + "epoch": 0.43249176728869376, + "grad_norm": 2.803621292114258, + "learning_rate": 4.825154402603308e-06, + "logits/chosen": -2.3108205795288086, + "logits/rejected": -2.353505849838257, + "logps/chosen": -17.8997745513916, + "logps/rejected": -12.011569023132324, + "loss": 0.9036, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.06393789499998093, + "rewards/margins": 0.2662951350212097, + "rewards/rejected": -0.20235726237297058, + "step": 197 + }, + { + "epoch": 0.43468715697036225, + "grad_norm": 2.4866416454315186, + "learning_rate": 4.821574481019811e-06, + "logits/chosen": -2.3187174797058105, + "logits/rejected": -2.2712206840515137, + "logps/chosen": -13.027259826660156, + "logps/rejected": -10.942176818847656, + "loss": 0.9522, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.003394428174942732, + "rewards/margins": 0.1506178379058838, + "rewards/rejected": -0.14722341299057007, + "step": 198 + }, + { + "epoch": 0.43688254665203075, + "grad_norm": 2.9680466651916504, + "learning_rate": 4.817959636416969e-06, + "logits/chosen": -2.3756744861602783, + "logits/rejected": -2.3005049228668213, + "logps/chosen": -11.833829879760742, + "logps/rejected": -11.094474792480469, + "loss": 0.9724, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.015104535967111588, + "rewards/margins": 0.09097721427679062, + "rewards/rejected": -0.07587268203496933, + "step": 199 + }, + { + "epoch": 0.43907793633369924, + "grad_norm": 2.566525936126709, + "learning_rate": 4.814309923172227e-06, + "logits/chosen": -2.3382346630096436, + "logits/rejected": -2.504864454269409, + "logps/chosen": -15.685405731201172, + "logps/rejected": -11.08444595336914, + "loss": 0.874, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.05831221491098404, + "rewards/margins": 0.4754192531108856, + "rewards/rejected": -0.4171070158481598, + "step": 200 + }, + { + "epoch": 0.44127332601536773, + "grad_norm": 3.3313262462615967, + "learning_rate": 4.81062539618755e-06, + "logits/chosen": -2.3377225399017334, + "logits/rejected": -2.3022823333740234, + "logps/chosen": -14.626267433166504, + "logps/rejected": -13.372673034667969, + "loss": 0.9706, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.029092811048030853, + "rewards/margins": 0.19668377935886383, + "rewards/rejected": -0.16759096086025238, + "step": 201 + }, + { + "epoch": 0.4434687156970362, + "grad_norm": 2.3386690616607666, + "learning_rate": 4.806906110888606e-06, + "logits/chosen": -2.3637309074401855, + "logits/rejected": -2.3318238258361816, + "logps/chosen": -16.152206420898438, + "logps/rejected": -13.979665756225586, + "loss": 0.9951, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.06894893944263458, + "rewards/margins": 0.15928399562835693, + "rewards/rejected": -0.09033505618572235, + "step": 202 + }, + { + "epoch": 0.4456641053787047, + "grad_norm": 3.0604872703552246, + "learning_rate": 4.80315212322392e-06, + "logits/chosen": -2.3710412979125977, + "logits/rejected": -2.3815903663635254, + "logps/chosen": -14.69096565246582, + "logps/rejected": -11.106029510498047, + "loss": 0.9239, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.012477545998990536, + "rewards/margins": 0.23910902440547943, + "rewards/rejected": -0.25158655643463135, + "step": 203 + }, + { + "epoch": 0.4478594950603732, + "grad_norm": 4.03364896774292, + "learning_rate": 4.799363489664039e-06, + "logits/chosen": -2.3909597396850586, + "logits/rejected": -2.368131160736084, + "logps/chosen": -13.064891815185547, + "logps/rejected": -13.199899673461914, + "loss": 1.0319, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.029866386204957962, + "rewards/margins": 0.08630535006523132, + "rewards/rejected": -0.05643896758556366, + "step": 204 + }, + { + "epoch": 0.4500548847420417, + "grad_norm": 3.311805248260498, + "learning_rate": 4.7955402672006855e-06, + "logits/chosen": -2.2783560752868652, + "logits/rejected": -2.4314398765563965, + "logps/chosen": -18.015016555786133, + "logps/rejected": -12.451406478881836, + "loss": 0.9464, + "rewards/accuracies": 0.71875, + "rewards/chosen": -0.03410699963569641, + "rewards/margins": 0.30977538228034973, + "rewards/rejected": -0.34388238191604614, + "step": 205 + }, + { + "epoch": 0.4522502744237102, + "grad_norm": 2.7831180095672607, + "learning_rate": 4.7916825133458925e-06, + "logits/chosen": -2.4114174842834473, + "logits/rejected": -2.263611078262329, + "logps/chosen": -15.84518814086914, + "logps/rejected": -12.624719619750977, + "loss": 0.9274, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.025281542912125587, + "rewards/margins": 0.3231895864009857, + "rewards/rejected": -0.2979080379009247, + "step": 206 + }, + { + "epoch": 0.4544456641053787, + "grad_norm": 3.6785452365875244, + "learning_rate": 4.787790286131145e-06, + "logits/chosen": -2.3511180877685547, + "logits/rejected": -2.340761661529541, + "logps/chosen": -14.801597595214844, + "logps/rejected": -13.564799308776855, + "loss": 0.9737, + "rewards/accuracies": 0.65625, + "rewards/chosen": -0.03549131006002426, + "rewards/margins": 0.19460512697696686, + "rewards/rejected": -0.23009642958641052, + "step": 207 + }, + { + "epoch": 0.4566410537870472, + "grad_norm": 3.273648977279663, + "learning_rate": 4.783863644106502e-06, + "logits/chosen": -2.340733051300049, + "logits/rejected": -2.3513708114624023, + "logps/chosen": -14.800409317016602, + "logps/rejected": -13.056378364562988, + "loss": 1.0409, + "rewards/accuracies": 0.59375, + "rewards/chosen": -0.04681443050503731, + "rewards/margins": 0.048753753304481506, + "rewards/rejected": -0.09556818753480911, + "step": 208 + }, + { + "epoch": 0.4588364434687157, + "grad_norm": 3.515641450881958, + "learning_rate": 4.779902646339722e-06, + "logits/chosen": -2.385969638824463, + "logits/rejected": -2.296189308166504, + "logps/chosen": -13.113666534423828, + "logps/rejected": -10.047690391540527, + "loss": 0.945, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.012289118953049183, + "rewards/margins": 0.09538790583610535, + "rewards/rejected": -0.08309878408908844, + "step": 209 + }, + { + "epoch": 0.4610318331503842, + "grad_norm": 4.205086708068848, + "learning_rate": 4.775907352415367e-06, + "logits/chosen": -2.3117594718933105, + "logits/rejected": -2.3786885738372803, + "logps/chosen": -13.62367057800293, + "logps/rejected": -10.471162796020508, + "loss": 0.926, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.009793296456336975, + "rewards/margins": 0.14466646313667297, + "rewards/rejected": -0.15445974469184875, + "step": 210 + }, + { + "epoch": 0.4632272228320527, + "grad_norm": 4.027493476867676, + "learning_rate": 4.7718778224339115e-06, + "logits/chosen": -2.3667526245117188, + "logits/rejected": -2.3443357944488525, + "logps/chosen": -14.223626136779785, + "logps/rejected": -17.153335571289062, + "loss": 1.1377, + "rewards/accuracies": 0.625, + "rewards/chosen": -0.012900881469249725, + "rewards/margins": 0.1315092295408249, + "rewards/rejected": -0.1444101184606552, + "step": 211 + }, + { + "epoch": 0.4654226125137212, + "grad_norm": 4.006818771362305, + "learning_rate": 4.767814117010835e-06, + "logits/chosen": -2.4249491691589355, + "logits/rejected": -2.280056953430176, + "logps/chosen": -13.195720672607422, + "logps/rejected": -8.659795761108398, + "loss": 0.8623, + "rewards/accuracies": 0.71875, + "rewards/chosen": -0.005751830525696278, + "rewards/margins": 0.2284858226776123, + "rewards/rejected": -0.2342376559972763, + "step": 212 + }, + { + "epoch": 0.4676180021953897, + "grad_norm": 2.848663568496704, + "learning_rate": 4.763716297275715e-06, + "logits/chosen": -2.271615982055664, + "logits/rejected": -2.3262381553649902, + "logps/chosen": -13.699186325073242, + "logps/rejected": -10.280715942382812, + "loss": 0.8926, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.0830225721001625, + "rewards/margins": 0.2727515995502472, + "rewards/rejected": -0.18972903490066528, + "step": 213 + }, + { + "epoch": 0.4698133918770582, + "grad_norm": 2.8376808166503906, + "learning_rate": 4.759584424871302e-06, + "logits/chosen": -2.308769941329956, + "logits/rejected": -2.313756227493286, + "logps/chosen": -15.752427101135254, + "logps/rejected": -10.513671875, + "loss": 0.8535, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.17882277071475983, + "rewards/margins": 0.2885429561138153, + "rewards/rejected": -0.10972017049789429, + "step": 214 + }, + { + "epoch": 0.47200878155872666, + "grad_norm": 2.9638876914978027, + "learning_rate": 4.755418561952595e-06, + "logits/chosen": -2.3878135681152344, + "logits/rejected": -2.4203245639801025, + "logps/chosen": -15.204294204711914, + "logps/rejected": -15.910358428955078, + "loss": 1.0671, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.03764279559254646, + "rewards/margins": 0.19274656474590302, + "rewards/rejected": -0.15510375797748566, + "step": 215 + }, + { + "epoch": 0.47420417124039516, + "grad_norm": 3.096339464187622, + "learning_rate": 4.751218771185906e-06, + "logits/chosen": -2.3016912937164307, + "logits/rejected": -2.4041194915771484, + "logps/chosen": -17.33173179626465, + "logps/rejected": -11.76982307434082, + "loss": 0.9436, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.051540665328502655, + "rewards/margins": 0.16486641764640808, + "rewards/rejected": -0.11332575976848602, + "step": 216 + }, + { + "epoch": 0.47639956092206365, + "grad_norm": 2.5373213291168213, + "learning_rate": 4.746985115747918e-06, + "logits/chosen": -2.3217809200286865, + "logits/rejected": -2.3968961238861084, + "logps/chosen": -15.51959228515625, + "logps/rejected": -9.341879844665527, + "loss": 0.8685, + "rewards/accuracies": 0.65625, + "rewards/chosen": -0.01756952330470085, + "rewards/margins": 0.3904784321784973, + "rewards/rejected": -0.40804794430732727, + "step": 217 + }, + { + "epoch": 0.47859495060373214, + "grad_norm": 3.3946480751037598, + "learning_rate": 4.742717659324734e-06, + "logits/chosen": -2.3248724937438965, + "logits/rejected": -2.3879940509796143, + "logps/chosen": -16.362497329711914, + "logps/rejected": -11.980215072631836, + "loss": 0.9029, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.07993088662624359, + "rewards/margins": 0.30833542346954346, + "rewards/rejected": -0.22840453684329987, + "step": 218 + }, + { + "epoch": 0.4807903402854007, + "grad_norm": 3.332834482192993, + "learning_rate": 4.738416466110918e-06, + "logits/chosen": -2.2557213306427, + "logits/rejected": -2.289715051651001, + "logps/chosen": -15.303863525390625, + "logps/rejected": -11.47325325012207, + "loss": 0.974, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.01493862271308899, + "rewards/margins": 0.1497972160577774, + "rewards/rejected": -0.1348586082458496, + "step": 219 + }, + { + "epoch": 0.4829857299670692, + "grad_norm": 3.7161571979522705, + "learning_rate": 4.734081600808531e-06, + "logits/chosen": -2.410463809967041, + "logits/rejected": -2.3640031814575195, + "logps/chosen": -17.304058074951172, + "logps/rejected": -9.83676528930664, + "loss": 0.8968, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.09258735924959183, + "rewards/margins": 0.16851232945919037, + "rewards/rejected": -0.07592497020959854, + "step": 220 + }, + { + "epoch": 0.48518111964873767, + "grad_norm": 4.089512348175049, + "learning_rate": 4.729713128626158e-06, + "logits/chosen": -2.3158750534057617, + "logits/rejected": -2.3622100353240967, + "logps/chosen": -15.238099098205566, + "logps/rejected": -12.259599685668945, + "loss": 0.9982, + "rewards/accuracies": 0.59375, + "rewards/chosen": -0.05108126252889633, + "rewards/margins": 0.20846769213676453, + "rewards/rejected": -0.25954896211624146, + "step": 221 + }, + { + "epoch": 0.48737650933040616, + "grad_norm": 3.147043466567993, + "learning_rate": 4.725311115277924e-06, + "logits/chosen": -2.339972972869873, + "logits/rejected": -2.3009352684020996, + "logps/chosen": -19.9134521484375, + "logps/rejected": -19.152158737182617, + "loss": 1.1412, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.03218366950750351, + "rewards/margins": 0.30710160732269287, + "rewards/rejected": -0.27491796016693115, + "step": 222 + }, + { + "epoch": 0.48957189901207465, + "grad_norm": 3.934567451477051, + "learning_rate": 4.720875626982511e-06, + "logits/chosen": -2.3560261726379395, + "logits/rejected": -2.2281508445739746, + "logps/chosen": -12.923276901245117, + "logps/rejected": -9.938714981079102, + "loss": 0.893, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.04361802339553833, + "rewards/margins": 0.1802201271057129, + "rewards/rejected": -0.13660210371017456, + "step": 223 + }, + { + "epoch": 0.49176728869374314, + "grad_norm": 3.16060733795166, + "learning_rate": 4.716406730462154e-06, + "logits/chosen": -2.377735137939453, + "logits/rejected": -2.425727128982544, + "logps/chosen": -16.463150024414062, + "logps/rejected": -11.011037826538086, + "loss": 0.936, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.021465934813022614, + "rewards/margins": 0.27524274587631226, + "rewards/rejected": -0.25377681851387024, + "step": 224 + }, + { + "epoch": 0.49396267837541163, + "grad_norm": 2.8043062686920166, + "learning_rate": 4.711904492941644e-06, + "logits/chosen": -2.3855056762695312, + "logits/rejected": -2.2830467224121094, + "logps/chosen": -11.66223430633545, + "logps/rejected": -9.482926368713379, + "loss": 0.908, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.007894640788435936, + "rewards/margins": 0.11858849227428436, + "rewards/rejected": -0.11069385707378387, + "step": 225 + }, + { + "epoch": 0.4961580680570801, + "grad_norm": 2.8552234172821045, + "learning_rate": 4.707368982147318e-06, + "logits/chosen": -2.360535144805908, + "logits/rejected": -2.43141508102417, + "logps/chosen": -12.100669860839844, + "logps/rejected": -9.97754955291748, + "loss": 0.9084, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.005531699396669865, + "rewards/margins": 0.20804524421691895, + "rewards/rejected": -0.20251353085041046, + "step": 226 + }, + { + "epoch": 0.4983534577387486, + "grad_norm": 2.832981824874878, + "learning_rate": 4.7028002663060305e-06, + "logits/chosen": -2.2898478507995605, + "logits/rejected": -2.3290865421295166, + "logps/chosen": -13.829479217529297, + "logps/rejected": -9.624303817749023, + "loss": 0.8996, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.0005602305755019188, + "rewards/margins": 0.21266406774520874, + "rewards/rejected": -0.21210385859012604, + "step": 227 + }, + { + "epoch": 0.5005488474204172, + "grad_norm": 3.762120008468628, + "learning_rate": 4.698198414144136e-06, + "logits/chosen": -2.3622140884399414, + "logits/rejected": -2.3753628730773926, + "logps/chosen": -16.06410789489746, + "logps/rejected": -9.198554992675781, + "loss": 0.8361, + "rewards/accuracies": 0.65625, + "rewards/chosen": -0.011656716465950012, + "rewards/margins": 0.3140515983104706, + "rewards/rejected": -0.3257082998752594, + "step": 228 + }, + { + "epoch": 0.5027442371020856, + "grad_norm": 2.261446237564087, + "learning_rate": 4.693563494886455e-06, + "logits/chosen": -2.3106729984283447, + "logits/rejected": -2.3571410179138184, + "logps/chosen": -15.244176864624023, + "logps/rejected": -12.582070350646973, + "loss": 0.9288, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.08472658693790436, + "rewards/margins": 0.2612376809120178, + "rewards/rejected": -0.17651110887527466, + "step": 229 + }, + { + "epoch": 0.5049396267837541, + "grad_norm": 4.166963577270508, + "learning_rate": 4.688895578255228e-06, + "logits/chosen": -2.3666067123413086, + "logits/rejected": -2.3383450508117676, + "logps/chosen": -15.07994556427002, + "logps/rejected": -10.779797554016113, + "loss": 0.8831, + "rewards/accuracies": 0.71875, + "rewards/chosen": -0.017674017697572708, + "rewards/margins": 0.330182820558548, + "rewards/rejected": -0.3478568494319916, + "step": 230 + }, + { + "epoch": 0.5071350164654226, + "grad_norm": 3.2102549076080322, + "learning_rate": 4.684194734469067e-06, + "logits/chosen": -2.310776710510254, + "logits/rejected": -2.341068744659424, + "logps/chosen": -16.106998443603516, + "logps/rejected": -12.700186729431152, + "loss": 0.9787, + "rewards/accuracies": 0.5625, + "rewards/chosen": -0.006546609103679657, + "rewards/margins": 0.25047093629837036, + "rewards/rejected": -0.2570175528526306, + "step": 231 + }, + { + "epoch": 0.5093304061470911, + "grad_norm": 3.613203287124634, + "learning_rate": 4.679461034241906e-06, + "logits/chosen": -2.3264687061309814, + "logits/rejected": -2.2634031772613525, + "logps/chosen": -15.22059154510498, + "logps/rejected": -14.103819847106934, + "loss": 1.0352, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.014797629788517952, + "rewards/margins": 0.1425546407699585, + "rewards/rejected": -0.1277570128440857, + "step": 232 + }, + { + "epoch": 0.5115257958287596, + "grad_norm": 2.7349843978881836, + "learning_rate": 4.674694548781929e-06, + "logits/chosen": -2.39654541015625, + "logits/rejected": -2.4142558574676514, + "logps/chosen": -17.5054931640625, + "logps/rejected": -12.546804428100586, + "loss": 0.9873, + "rewards/accuracies": 0.53125, + "rewards/chosen": 0.007637964561581612, + "rewards/margins": 0.15906468033790588, + "rewards/rejected": -0.15142671763896942, + "step": 233 + }, + { + "epoch": 0.5137211855104281, + "grad_norm": 4.179075241088867, + "learning_rate": 4.669895349790502e-06, + "logits/chosen": -2.4446206092834473, + "logits/rejected": -2.3479771614074707, + "logps/chosen": -15.197080612182617, + "logps/rejected": -10.906547546386719, + "loss": 0.9273, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.011189254932105541, + "rewards/margins": 0.14327660202980042, + "rewards/rejected": -0.1320873498916626, + "step": 234 + }, + { + "epoch": 0.5159165751920965, + "grad_norm": 2.9067695140838623, + "learning_rate": 4.665063509461098e-06, + "logits/chosen": -2.3557796478271484, + "logits/rejected": -2.271129608154297, + "logps/chosen": -13.00634765625, + "logps/rejected": -11.378231048583984, + "loss": 0.9386, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.07278364151716232, + "rewards/margins": 0.18158170580863953, + "rewards/rejected": -0.10879804939031601, + "step": 235 + }, + { + "epoch": 0.5181119648737651, + "grad_norm": 3.2804477214813232, + "learning_rate": 4.660199100478201e-06, + "logits/chosen": -2.3077774047851562, + "logits/rejected": -2.364297389984131, + "logps/chosen": -16.013919830322266, + "logps/rejected": -12.880232810974121, + "loss": 0.9551, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.02138105407357216, + "rewards/margins": 0.24333827197551727, + "rewards/rejected": -0.2219572216272354, + "step": 236 + }, + { + "epoch": 0.5203073545554336, + "grad_norm": 3.5774881839752197, + "learning_rate": 4.655302196016228e-06, + "logits/chosen": -2.3459157943725586, + "logits/rejected": -2.348891019821167, + "logps/chosen": -12.117788314819336, + "logps/rejected": -12.449483871459961, + "loss": 0.9679, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.02035675011575222, + "rewards/margins": 0.2209654599428177, + "rewards/rejected": -0.20060870051383972, + "step": 237 + }, + { + "epoch": 0.5225027442371021, + "grad_norm": 3.279064893722534, + "learning_rate": 4.650372869738415e-06, + "logits/chosen": -2.3156638145446777, + "logits/rejected": -2.3022217750549316, + "logps/chosen": -14.40345573425293, + "logps/rejected": -13.11058235168457, + "loss": 0.9551, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.05910070985555649, + "rewards/margins": 0.19128769636154175, + "rewards/rejected": -0.13218699395656586, + "step": 238 + }, + { + "epoch": 0.5246981339187706, + "grad_norm": 3.685303211212158, + "learning_rate": 4.645411195795709e-06, + "logits/chosen": -2.257229804992676, + "logits/rejected": -2.2816812992095947, + "logps/chosen": -14.475461959838867, + "logps/rejected": -8.206293106079102, + "loss": 0.8199, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.013962533324956894, + "rewards/margins": 0.29057538509368896, + "rewards/rejected": -0.27661287784576416, + "step": 239 + }, + { + "epoch": 0.5268935236004391, + "grad_norm": 2.90091609954834, + "learning_rate": 4.640417248825667e-06, + "logits/chosen": -2.3027405738830566, + "logits/rejected": -2.364433765411377, + "logps/chosen": -12.266948699951172, + "logps/rejected": -12.551021575927734, + "loss": 0.9237, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.10769608616828918, + "rewards/margins": 0.23384669423103333, + "rewards/rejected": -0.12615060806274414, + "step": 240 + }, + { + "epoch": 0.5290889132821076, + "grad_norm": 3.0138938426971436, + "learning_rate": 4.635391103951315e-06, + "logits/chosen": -2.3460533618927, + "logits/rejected": -2.292667865753174, + "logps/chosen": -18.938508987426758, + "logps/rejected": -16.88079261779785, + "loss": 0.9813, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.07211557030677795, + "rewards/margins": 0.37093961238861084, + "rewards/rejected": -0.2988240718841553, + "step": 241 + }, + { + "epoch": 0.531284302963776, + "grad_norm": 3.351609945297241, + "learning_rate": 4.630332836780029e-06, + "logits/chosen": -2.3035905361175537, + "logits/rejected": -2.323580026626587, + "logps/chosen": -16.21466827392578, + "logps/rejected": -13.248044967651367, + "loss": 0.9519, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.01800696738064289, + "rewards/margins": 0.2727009952068329, + "rewards/rejected": -0.25469401478767395, + "step": 242 + }, + { + "epoch": 0.5334796926454446, + "grad_norm": 3.892470121383667, + "learning_rate": 4.625242523402395e-06, + "logits/chosen": -2.347764730453491, + "logits/rejected": -2.4785423278808594, + "logps/chosen": -21.10007095336914, + "logps/rejected": -13.007464408874512, + "loss": 0.9527, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.026116278022527695, + "rewards/margins": 0.23999722301959991, + "rewards/rejected": -0.2661135196685791, + "step": 243 + }, + { + "epoch": 0.535675082327113, + "grad_norm": 4.164974212646484, + "learning_rate": 4.620120240391065e-06, + "logits/chosen": -2.343069553375244, + "logits/rejected": -2.4068026542663574, + "logps/chosen": -14.178921699523926, + "logps/rejected": -9.567798614501953, + "loss": 0.9327, + "rewards/accuracies": 0.625, + "rewards/chosen": -0.03248784318566322, + "rewards/margins": 0.1319328397512436, + "rewards/rejected": -0.1644206941127777, + "step": 244 + }, + { + "epoch": 0.5378704720087816, + "grad_norm": 3.250154972076416, + "learning_rate": 4.614966064799603e-06, + "logits/chosen": -2.3875789642333984, + "logits/rejected": -2.350403308868408, + "logps/chosen": -14.76641845703125, + "logps/rejected": -11.900374412536621, + "loss": 0.8904, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.07907561212778091, + "rewards/margins": 0.2815009355545044, + "rewards/rejected": -0.20242534577846527, + "step": 245 + }, + { + "epoch": 0.54006586169045, + "grad_norm": 3.4762589931488037, + "learning_rate": 4.609780074161327e-06, + "logits/chosen": -2.3251564502716064, + "logits/rejected": -2.4273359775543213, + "logps/chosen": -15.190896034240723, + "logps/rejected": -10.03155517578125, + "loss": 0.8642, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.002804434858262539, + "rewards/margins": 0.3568550646305084, + "rewards/rejected": -0.3596595227718353, + "step": 246 + }, + { + "epoch": 0.5422612513721186, + "grad_norm": 2.9021172523498535, + "learning_rate": 4.604562346488144e-06, + "logits/chosen": -2.3237853050231934, + "logits/rejected": -2.3288207054138184, + "logps/chosen": -16.11346435546875, + "logps/rejected": -9.360347747802734, + "loss": 0.7929, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.0847928449511528, + "rewards/margins": 0.44311946630477905, + "rewards/rejected": -0.35832661390304565, + "step": 247 + }, + { + "epoch": 0.544456641053787, + "grad_norm": 5.830170154571533, + "learning_rate": 4.599312960269375e-06, + "logits/chosen": -2.366364002227783, + "logits/rejected": -2.4149107933044434, + "logps/chosen": -17.478118896484375, + "logps/rejected": -13.510588645935059, + "loss": 1.0225, + "rewards/accuracies": 0.59375, + "rewards/chosen": -0.06305301934480667, + "rewards/margins": 0.21186582744121552, + "rewards/rejected": -0.2749188542366028, + "step": 248 + }, + { + "epoch": 0.5466520307354555, + "grad_norm": 3.584066867828369, + "learning_rate": 4.594031994470574e-06, + "logits/chosen": -2.325279474258423, + "logits/rejected": -2.456352710723877, + "logps/chosen": -15.935647964477539, + "logps/rejected": -15.036849975585938, + "loss": 1.0898, + "rewards/accuracies": 0.625, + "rewards/chosen": -0.015486141666769981, + "rewards/margins": 0.21688403189182281, + "rewards/rejected": -0.23237019777297974, + "step": 249 + }, + { + "epoch": 0.5488474204171241, + "grad_norm": 3.663219928741455, + "learning_rate": 4.588719528532342e-06, + "logits/chosen": -2.4341042041778564, + "logits/rejected": -2.4564149379730225, + "logps/chosen": -16.782196044921875, + "logps/rejected": -10.696434020996094, + "loss": 0.8959, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.04107421636581421, + "rewards/margins": 0.36102810502052307, + "rewards/rejected": -0.31995388865470886, + "step": 250 + }, + { + "epoch": 0.5510428100987925, + "grad_norm": 3.9447195529937744, + "learning_rate": 4.583375642369129e-06, + "logits/chosen": -2.3444766998291016, + "logits/rejected": -2.3383076190948486, + "logps/chosen": -20.937177658081055, + "logps/rejected": -13.723505020141602, + "loss": 1.0103, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.021991277113556862, + "rewards/margins": 0.34398943185806274, + "rewards/rejected": -0.32199811935424805, + "step": 251 + }, + { + "epoch": 0.5532381997804611, + "grad_norm": 3.336824417114258, + "learning_rate": 4.5780004163680365e-06, + "logits/chosen": -2.304708480834961, + "logits/rejected": -2.3940112590789795, + "logps/chosen": -18.0738525390625, + "logps/rejected": -11.277729034423828, + "loss": 0.8595, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.0800776332616806, + "rewards/margins": 0.38721710443496704, + "rewards/rejected": -0.30713948607444763, + "step": 252 + }, + { + "epoch": 0.5554335894621295, + "grad_norm": 3.5766406059265137, + "learning_rate": 4.572593931387604e-06, + "logits/chosen": -2.2744007110595703, + "logits/rejected": -2.3453307151794434, + "logps/chosen": -16.072280883789062, + "logps/rejected": -11.154439926147461, + "loss": 0.956, + "rewards/accuracies": 0.53125, + "rewards/chosen": -0.03496705740690231, + "rewards/margins": 0.132221058011055, + "rewards/rejected": -0.16718809306621552, + "step": 253 + }, + { + "epoch": 0.557628979143798, + "grad_norm": 3.511826515197754, + "learning_rate": 4.567156268756594e-06, + "logits/chosen": -2.4133963584899902, + "logits/rejected": -2.383863687515259, + "logps/chosen": -14.50227165222168, + "logps/rejected": -10.799485206604004, + "loss": 0.8542, + "rewards/accuracies": 0.90625, + "rewards/chosen": 0.021454015746712685, + "rewards/margins": 0.36027950048446655, + "rewards/rejected": -0.3388254642486572, + "step": 254 + }, + { + "epoch": 0.5598243688254665, + "grad_norm": 4.181152820587158, + "learning_rate": 4.561687510272767e-06, + "logits/chosen": -2.2828900814056396, + "logits/rejected": -2.370487928390503, + "logps/chosen": -15.107242584228516, + "logps/rejected": -9.730294227600098, + "loss": 0.9018, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.022096818313002586, + "rewards/margins": 0.29161518812179565, + "rewards/rejected": -0.3137120008468628, + "step": 255 + }, + { + "epoch": 0.562019758507135, + "grad_norm": 4.829276084899902, + "learning_rate": 4.556187738201656e-06, + "logits/chosen": -2.430100917816162, + "logits/rejected": -2.4558093547821045, + "logps/chosen": -14.387591361999512, + "logps/rejected": -11.638925552368164, + "loss": 0.931, + "rewards/accuracies": 0.71875, + "rewards/chosen": -0.004484318196773529, + "rewards/margins": 0.24601556360721588, + "rewards/rejected": -0.2504999041557312, + "step": 256 + }, + { + "epoch": 0.5642151481888035, + "grad_norm": 3.4663209915161133, + "learning_rate": 4.550657035275323e-06, + "logits/chosen": -2.2517337799072266, + "logits/rejected": -2.3557732105255127, + "logps/chosen": -15.434856414794922, + "logps/rejected": -10.484879493713379, + "loss": 0.8677, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.014802820980548859, + "rewards/margins": 0.32399383187294006, + "rewards/rejected": -0.3091909885406494, + "step": 257 + }, + { + "epoch": 0.566410537870472, + "grad_norm": 4.792181491851807, + "learning_rate": 4.54509548469112e-06, + "logits/chosen": -2.375314712524414, + "logits/rejected": -2.382444381713867, + "logps/chosen": -15.463499069213867, + "logps/rejected": -12.133369445800781, + "loss": 0.9093, + "rewards/accuracies": 0.75, + "rewards/chosen": -0.06232909485697746, + "rewards/margins": 0.34396061301231384, + "rewards/rejected": -0.4062896966934204, + "step": 258 + }, + { + "epoch": 0.5686059275521405, + "grad_norm": 3.6746764183044434, + "learning_rate": 4.539503170110431e-06, + "logits/chosen": -2.3717360496520996, + "logits/rejected": -2.4131274223327637, + "logps/chosen": -17.220232009887695, + "logps/rejected": -13.375675201416016, + "loss": 0.9775, + "rewards/accuracies": 0.75, + "rewards/chosen": -0.01915721222758293, + "rewards/margins": 0.328391969203949, + "rewards/rejected": -0.3475492000579834, + "step": 259 + }, + { + "epoch": 0.570801317233809, + "grad_norm": 3.5798373222351074, + "learning_rate": 4.533880175657419e-06, + "logits/chosen": -2.3118844032287598, + "logits/rejected": -2.2956976890563965, + "logps/chosen": -17.010141372680664, + "logps/rejected": -11.295709609985352, + "loss": 0.8533, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.05849292501807213, + "rewards/margins": 0.3737761080265045, + "rewards/rejected": -0.3152831792831421, + "step": 260 + }, + { + "epoch": 0.5729967069154775, + "grad_norm": 3.199009418487549, + "learning_rate": 4.528226585917761e-06, + "logits/chosen": -2.2721354961395264, + "logits/rejected": -2.3081626892089844, + "logps/chosen": -17.70292091369629, + "logps/rejected": -13.543802261352539, + "loss": 0.8967, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.05505692958831787, + "rewards/margins": 0.4563320279121399, + "rewards/rejected": -0.401275098323822, + "step": 261 + }, + { + "epoch": 0.575192096597146, + "grad_norm": 3.1294350624084473, + "learning_rate": 4.522542485937369e-06, + "logits/chosen": -2.3046679496765137, + "logits/rejected": -2.344747304916382, + "logps/chosen": -15.264352798461914, + "logps/rejected": -13.952584266662598, + "loss": 0.9476, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.028087537735700607, + "rewards/margins": 0.25781309604644775, + "rewards/rejected": -0.22972553968429565, + "step": 262 + }, + { + "epoch": 0.5773874862788145, + "grad_norm": 2.8935720920562744, + "learning_rate": 4.516827961221118e-06, + "logits/chosen": -2.311985492706299, + "logits/rejected": -2.3693246841430664, + "logps/chosen": -17.099782943725586, + "logps/rejected": -12.623132705688477, + "loss": 0.9125, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.10635387897491455, + "rewards/margins": 0.6242843270301819, + "rewards/rejected": -0.5179304480552673, + "step": 263 + }, + { + "epoch": 0.579582875960483, + "grad_norm": 3.569817304611206, + "learning_rate": 4.511083097731556e-06, + "logits/chosen": -2.2364590167999268, + "logits/rejected": -2.280050754547119, + "logps/chosen": -16.519317626953125, + "logps/rejected": -10.577292442321777, + "loss": 0.9376, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.02913329005241394, + "rewards/margins": 0.15598690509796143, + "rewards/rejected": -0.12685361504554749, + "step": 264 + }, + { + "epoch": 0.5817782656421515, + "grad_norm": 3.893734931945801, + "learning_rate": 4.50530798188761e-06, + "logits/chosen": -2.3067941665649414, + "logits/rejected": -2.3932323455810547, + "logps/chosen": -13.595678329467773, + "logps/rejected": -13.469207763671875, + "loss": 1.0158, + "rewards/accuracies": 0.5, + "rewards/chosen": 0.011006025597453117, + "rewards/margins": 0.11442138999700546, + "rewards/rejected": -0.1034153550863266, + "step": 265 + }, + { + "epoch": 0.58397365532382, + "grad_norm": 3.6261839866638184, + "learning_rate": 4.49950270056329e-06, + "logits/chosen": -2.437346935272217, + "logits/rejected": -2.344280958175659, + "logps/chosen": -16.588623046875, + "logps/rejected": -12.61369800567627, + "loss": 0.8737, + "rewards/accuracies": 0.90625, + "rewards/chosen": 0.05177614837884903, + "rewards/margins": 0.39123424887657166, + "rewards/rejected": -0.3394581079483032, + "step": 266 + }, + { + "epoch": 0.5861690450054885, + "grad_norm": 3.539958953857422, + "learning_rate": 4.4936673410863794e-06, + "logits/chosen": -2.4072844982147217, + "logits/rejected": -2.4355082511901855, + "logps/chosen": -21.93024444580078, + "logps/rejected": -12.424371719360352, + "loss": 0.9278, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.030360229313373566, + "rewards/margins": 0.3428768217563629, + "rewards/rejected": -0.31251657009124756, + "step": 267 + }, + { + "epoch": 0.5883644346871569, + "grad_norm": 4.157306671142578, + "learning_rate": 4.48780199123712e-06, + "logits/chosen": -2.3129444122314453, + "logits/rejected": -2.3434619903564453, + "logps/chosen": -18.06573486328125, + "logps/rejected": -12.912382125854492, + "loss": 0.9321, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.03493726998567581, + "rewards/margins": 0.283815860748291, + "rewards/rejected": -0.3187531530857086, + "step": 268 + }, + { + "epoch": 0.5905598243688255, + "grad_norm": 4.955633163452148, + "learning_rate": 4.481906739246894e-06, + "logits/chosen": -2.4333293437957764, + "logits/rejected": -2.3597402572631836, + "logps/chosen": -17.311777114868164, + "logps/rejected": -14.16006088256836, + "loss": 1.036, + "rewards/accuracies": 0.625, + "rewards/chosen": -0.01734822988510132, + "rewards/margins": 0.1342713087797165, + "rewards/rejected": -0.1516195386648178, + "step": 269 + }, + { + "epoch": 0.5927552140504939, + "grad_norm": 4.0596022605896, + "learning_rate": 4.475981673796899e-06, + "logits/chosen": -2.4821219444274902, + "logits/rejected": -2.3936054706573486, + "logps/chosen": -15.369222640991211, + "logps/rejected": -11.958950996398926, + "loss": 0.9301, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.05403044819831848, + "rewards/margins": 0.20187188684940338, + "rewards/rejected": -0.1478414386510849, + "step": 270 + }, + { + "epoch": 0.5949506037321625, + "grad_norm": 3.270981788635254, + "learning_rate": 4.470026884016805e-06, + "logits/chosen": -2.354869842529297, + "logits/rejected": -2.3129818439483643, + "logps/chosen": -13.78475570678711, + "logps/rejected": -11.741534233093262, + "loss": 0.9557, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.0010744757018983364, + "rewards/margins": 0.22390666604042053, + "rewards/rejected": -0.22283217310905457, + "step": 271 + }, + { + "epoch": 0.5971459934138309, + "grad_norm": 4.795289993286133, + "learning_rate": 4.464042459483425e-06, + "logits/chosen": -2.3651349544525146, + "logits/rejected": -2.383972644805908, + "logps/chosen": -16.043739318847656, + "logps/rejected": -12.472469329833984, + "loss": 0.9447, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.0016098134219646454, + "rewards/margins": 0.2757399380207062, + "rewards/rejected": -0.2741301357746124, + "step": 272 + }, + { + "epoch": 0.5993413830954994, + "grad_norm": 3.419529438018799, + "learning_rate": 4.458028490219361e-06, + "logits/chosen": -2.3934783935546875, + "logits/rejected": -2.30633807182312, + "logps/chosen": -16.26495933532715, + "logps/rejected": -10.53646469116211, + "loss": 0.8953, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.11163412034511566, + "rewards/margins": 0.28469380736351013, + "rewards/rejected": -0.17305967211723328, + "step": 273 + }, + { + "epoch": 0.601536772777168, + "grad_norm": 3.859889268875122, + "learning_rate": 4.451985066691649e-06, + "logits/chosen": -2.3641891479492188, + "logits/rejected": -2.296032428741455, + "logps/chosen": -12.692946434020996, + "logps/rejected": -10.588760375976562, + "loss": 0.9138, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.07239022850990295, + "rewards/margins": 0.19928386807441711, + "rewards/rejected": -0.12689363956451416, + "step": 274 + }, + { + "epoch": 0.6037321624588364, + "grad_norm": 3.1660425662994385, + "learning_rate": 4.445912279810401e-06, + "logits/chosen": -2.313688278198242, + "logits/rejected": -2.2848196029663086, + "logps/chosen": -18.03732681274414, + "logps/rejected": -13.442127227783203, + "loss": 0.9861, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.0005273278802633286, + "rewards/margins": 0.19775639474391937, + "rewards/rejected": -0.1972290724515915, + "step": 275 + }, + { + "epoch": 0.605927552140505, + "grad_norm": 3.7765965461730957, + "learning_rate": 4.439810220927436e-06, + "logits/chosen": -2.4024770259857178, + "logits/rejected": -2.3250467777252197, + "logps/chosen": -16.64607810974121, + "logps/rejected": -14.17449951171875, + "loss": 0.9478, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.026453014463186264, + "rewards/margins": 0.30205315351486206, + "rewards/rejected": -0.3285061717033386, + "step": 276 + }, + { + "epoch": 0.6081229418221734, + "grad_norm": 4.055322170257568, + "learning_rate": 4.4336789818349105e-06, + "logits/chosen": -2.289247751235962, + "logits/rejected": -2.4132933616638184, + "logps/chosen": -19.57721519470215, + "logps/rejected": -11.495363235473633, + "loss": 0.8171, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.03967863321304321, + "rewards/margins": 0.4562482535839081, + "rewards/rejected": -0.41656967997550964, + "step": 277 + }, + { + "epoch": 0.610318331503842, + "grad_norm": 3.6051321029663086, + "learning_rate": 4.427518654763927e-06, + "logits/chosen": -2.358933448791504, + "logits/rejected": -2.4015445709228516, + "logps/chosen": -15.9168119430542, + "logps/rejected": -10.469045639038086, + "loss": 0.8713, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.08597777783870697, + "rewards/margins": 0.32388389110565186, + "rewards/rejected": -0.23790611326694489, + "step": 278 + }, + { + "epoch": 0.6125137211855104, + "grad_norm": 2.8481502532958984, + "learning_rate": 4.421329332383158e-06, + "logits/chosen": -2.40431547164917, + "logits/rejected": -2.313837766647339, + "logps/chosen": -16.356674194335938, + "logps/rejected": -11.987436294555664, + "loss": 0.9411, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.0003596879541873932, + "rewards/margins": 0.22185388207435608, + "rewards/rejected": -0.2214941829442978, + "step": 279 + }, + { + "epoch": 0.6147091108671789, + "grad_norm": 2.792403221130371, + "learning_rate": 4.415111107797445e-06, + "logits/chosen": -2.369375228881836, + "logits/rejected": -2.410369634628296, + "logps/chosen": -15.887426376342773, + "logps/rejected": -10.735150337219238, + "loss": 0.8151, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.1267678141593933, + "rewards/margins": 0.3933025598526001, + "rewards/rejected": -0.2665347456932068, + "step": 280 + }, + { + "epoch": 0.6169045005488474, + "grad_norm": 3.9273605346679688, + "learning_rate": 4.408864074546402e-06, + "logits/chosen": -2.300583839416504, + "logits/rejected": -2.2920844554901123, + "logps/chosen": -17.110836029052734, + "logps/rejected": -11.550253868103027, + "loss": 0.864, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.015616696327924728, + "rewards/margins": 0.36540573835372925, + "rewards/rejected": -0.3810224235057831, + "step": 281 + }, + { + "epoch": 0.6190998902305159, + "grad_norm": 3.407844066619873, + "learning_rate": 4.402588326603002e-06, + "logits/chosen": -2.3792104721069336, + "logits/rejected": -2.3388185501098633, + "logps/chosen": -15.377979278564453, + "logps/rejected": -13.580524444580078, + "loss": 1.0169, + "rewards/accuracies": 0.5625, + "rewards/chosen": -0.0523822084069252, + "rewards/margins": 0.22621504962444305, + "rewards/rejected": -0.27859723567962646, + "step": 282 + }, + { + "epoch": 0.6212952799121844, + "grad_norm": 2.764402151107788, + "learning_rate": 4.396283958372173e-06, + "logits/chosen": -2.3100624084472656, + "logits/rejected": -2.379979133605957, + "logps/chosen": -14.444868087768555, + "logps/rejected": -11.091350555419922, + "loss": 0.9187, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.0230597835034132, + "rewards/margins": 0.31620121002197266, + "rewards/rejected": -0.2931414246559143, + "step": 283 + }, + { + "epoch": 0.6234906695938529, + "grad_norm": 4.228127956390381, + "learning_rate": 4.38995106468937e-06, + "logits/chosen": -2.333906412124634, + "logits/rejected": -2.3636553287506104, + "logps/chosen": -14.673521041870117, + "logps/rejected": -9.182047843933105, + "loss": 0.821, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.10073263198137283, + "rewards/margins": 0.3078424334526062, + "rewards/rejected": -0.20710980892181396, + "step": 284 + }, + { + "epoch": 0.6256860592755215, + "grad_norm": 3.735618829727173, + "learning_rate": 4.3835897408191515e-06, + "logits/chosen": -2.3141889572143555, + "logits/rejected": -2.357758045196533, + "logps/chosen": -16.62046241760254, + "logps/rejected": -10.231853485107422, + "loss": 0.841, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.0775555670261383, + "rewards/margins": 0.38503366708755493, + "rewards/rejected": -0.30747807025909424, + "step": 285 + }, + { + "epoch": 0.6278814489571899, + "grad_norm": 3.217437982559204, + "learning_rate": 4.377200082453748e-06, + "logits/chosen": -2.2870125770568848, + "logits/rejected": -2.3641347885131836, + "logps/chosen": -18.92882537841797, + "logps/rejected": -12.308082580566406, + "loss": 0.9247, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.09097970277070999, + "rewards/margins": 0.43049687147140503, + "rewards/rejected": -0.33951717615127563, + "step": 286 + }, + { + "epoch": 0.6300768386388584, + "grad_norm": 2.8335301876068115, + "learning_rate": 4.370782185711618e-06, + "logits/chosen": -2.322899341583252, + "logits/rejected": -2.3178961277008057, + "logps/chosen": -13.624707221984863, + "logps/rejected": -12.783918380737305, + "loss": 0.9819, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.046398602426052094, + "rewards/margins": 0.20317067205905914, + "rewards/rejected": -0.15677204728126526, + "step": 287 + }, + { + "epoch": 0.6322722283205269, + "grad_norm": 4.198422431945801, + "learning_rate": 4.3643361471360045e-06, + "logits/chosen": -2.253892421722412, + "logits/rejected": -2.351269245147705, + "logps/chosen": -15.725739479064941, + "logps/rejected": -13.157400131225586, + "loss": 0.9817, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.0788668841123581, + "rewards/margins": 0.3620368242263794, + "rewards/rejected": -0.2831699252128601, + "step": 288 + }, + { + "epoch": 0.6344676180021954, + "grad_norm": 3.027604341506958, + "learning_rate": 4.357862063693486e-06, + "logits/chosen": -2.249347448348999, + "logits/rejected": -2.27540922164917, + "logps/chosen": -20.544876098632812, + "logps/rejected": -17.780059814453125, + "loss": 1.0549, + "rewards/accuracies": 0.84375, + "rewards/chosen": 0.07425278425216675, + "rewards/margins": 0.4821486473083496, + "rewards/rejected": -0.40789586305618286, + "step": 289 + }, + { + "epoch": 0.6366630076838639, + "grad_norm": 3.498201847076416, + "learning_rate": 4.351360032772512e-06, + "logits/chosen": -2.291285514831543, + "logits/rejected": -2.4294636249542236, + "logps/chosen": -16.40362548828125, + "logps/rejected": -9.61184310913086, + "loss": 0.8461, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.058083947747945786, + "rewards/margins": 0.33970099687576294, + "rewards/rejected": -0.28161704540252686, + "step": 290 + }, + { + "epoch": 0.6388583973655324, + "grad_norm": 4.065576076507568, + "learning_rate": 4.344830152181941e-06, + "logits/chosen": -2.2679929733276367, + "logits/rejected": -2.359039306640625, + "logps/chosen": -17.067501068115234, + "logps/rejected": -11.277329444885254, + "loss": 0.9347, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.01216885820031166, + "rewards/margins": 0.23538747429847717, + "rewards/rejected": -0.2232186198234558, + "step": 291 + }, + { + "epoch": 0.6410537870472008, + "grad_norm": 4.737266540527344, + "learning_rate": 4.338272520149572e-06, + "logits/chosen": -2.2955241203308105, + "logits/rejected": -2.382547616958618, + "logps/chosen": -17.920217514038086, + "logps/rejected": -15.063626289367676, + "loss": 1.0549, + "rewards/accuracies": 0.59375, + "rewards/chosen": -0.04992213100194931, + "rewards/margins": 0.10247353464365005, + "rewards/rejected": -0.15239566564559937, + "step": 292 + }, + { + "epoch": 0.6432491767288694, + "grad_norm": 4.056772708892822, + "learning_rate": 4.3316872353206595e-06, + "logits/chosen": -2.4128801822662354, + "logits/rejected": -2.3561244010925293, + "logps/chosen": -13.71163558959961, + "logps/rejected": -10.349092483520508, + "loss": 0.8511, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.007009743247181177, + "rewards/margins": 0.2926795482635498, + "rewards/rejected": -0.28566980361938477, + "step": 293 + }, + { + "epoch": 0.6454445664105378, + "grad_norm": 3.3059873580932617, + "learning_rate": 4.325074396756437e-06, + "logits/chosen": -2.3915586471557617, + "logits/rejected": -2.242203712463379, + "logps/chosen": -17.347070693969727, + "logps/rejected": -10.363704681396484, + "loss": 0.8055, + "rewards/accuracies": 0.84375, + "rewards/chosen": 0.05488693714141846, + "rewards/margins": 0.3893585205078125, + "rewards/rejected": -0.33447155356407166, + "step": 294 + }, + { + "epoch": 0.6476399560922064, + "grad_norm": 3.126178026199341, + "learning_rate": 4.318434103932622e-06, + "logits/chosen": -2.377377510070801, + "logits/rejected": -2.3431365489959717, + "logps/chosen": -17.024227142333984, + "logps/rejected": -11.137166023254395, + "loss": 0.8691, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.11715853214263916, + "rewards/margins": 0.39744699001312256, + "rewards/rejected": -0.280288428068161, + "step": 295 + }, + { + "epoch": 0.6498353457738749, + "grad_norm": 3.4686014652252197, + "learning_rate": 4.3117664567379235e-06, + "logits/chosen": -2.3375179767608643, + "logits/rejected": -2.2733144760131836, + "logps/chosen": -17.800548553466797, + "logps/rejected": -10.393657684326172, + "loss": 0.8631, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.01607682555913925, + "rewards/margins": 0.31594520807266235, + "rewards/rejected": -0.2998683452606201, + "step": 296 + }, + { + "epoch": 0.6520307354555434, + "grad_norm": 3.257317304611206, + "learning_rate": 4.305071555472534e-06, + "logits/chosen": -2.3289194107055664, + "logits/rejected": -2.4071145057678223, + "logps/chosen": -14.666779518127441, + "logps/rejected": -9.785927772521973, + "loss": 0.8539, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.029782062396407127, + "rewards/margins": 0.368835985660553, + "rewards/rejected": -0.3390539288520813, + "step": 297 + }, + { + "epoch": 0.6542261251372119, + "grad_norm": 3.4349687099456787, + "learning_rate": 4.2983495008466285e-06, + "logits/chosen": -2.3456382751464844, + "logits/rejected": -2.421517848968506, + "logps/chosen": -16.462114334106445, + "logps/rejected": -14.361862182617188, + "loss": 0.9191, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.03672860935330391, + "rewards/margins": 0.4619566798210144, + "rewards/rejected": -0.425228089094162, + "step": 298 + }, + { + "epoch": 0.6564215148188803, + "grad_norm": 4.040913105010986, + "learning_rate": 4.29160039397884e-06, + "logits/chosen": -2.3418679237365723, + "logits/rejected": -2.2938270568847656, + "logps/chosen": -11.77094554901123, + "logps/rejected": -13.91958999633789, + "loss": 0.9844, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.06349484622478485, + "rewards/margins": 0.26532605290412903, + "rewards/rejected": -0.20183119177818298, + "step": 299 + }, + { + "epoch": 0.6586169045005489, + "grad_norm": 4.860092639923096, + "learning_rate": 4.284824336394748e-06, + "logits/chosen": -2.3495969772338867, + "logits/rejected": -2.376173496246338, + "logps/chosen": -14.834826469421387, + "logps/rejected": -13.061057090759277, + "loss": 0.9285, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.03790712356567383, + "rewards/margins": 0.3323096036911011, + "rewards/rejected": -0.29440245032310486, + "step": 300 + }, + { + "epoch": 0.6608122941822173, + "grad_norm": 3.8926429748535156, + "learning_rate": 4.278021430025343e-06, + "logits/chosen": -2.4280104637145996, + "logits/rejected": -2.384596586227417, + "logps/chosen": -15.310932159423828, + "logps/rejected": -9.883270263671875, + "loss": 0.8233, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.03349144011735916, + "rewards/margins": 0.47141653299331665, + "rewards/rejected": -0.4379251003265381, + "step": 301 + }, + { + "epoch": 0.6630076838638859, + "grad_norm": 4.136510848999023, + "learning_rate": 4.2711917772055e-06, + "logits/chosen": -2.349405288696289, + "logits/rejected": -2.4378554821014404, + "logps/chosen": -20.67293930053711, + "logps/rejected": -17.613826751708984, + "loss": 1.0321, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.02313861809670925, + "rewards/margins": 0.3090207576751709, + "rewards/rejected": -0.2858821451663971, + "step": 302 + }, + { + "epoch": 0.6652030735455543, + "grad_norm": 3.6378984451293945, + "learning_rate": 4.264335480672433e-06, + "logits/chosen": -2.339221954345703, + "logits/rejected": -2.355464458465576, + "logps/chosen": -14.879115104675293, + "logps/rejected": -11.936575889587402, + "loss": 0.9292, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.11328764259815216, + "rewards/margins": 0.28763386607170105, + "rewards/rejected": -0.1743462085723877, + "step": 303 + }, + { + "epoch": 0.6673984632272228, + "grad_norm": 4.390617847442627, + "learning_rate": 4.257452643564155e-06, + "logits/chosen": -2.3608078956604004, + "logits/rejected": -2.2916340827941895, + "logps/chosen": -16.132726669311523, + "logps/rejected": -11.27886962890625, + "loss": 0.8445, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.013227814808487892, + "rewards/margins": 0.38369402289390564, + "rewards/rejected": -0.3704661726951599, + "step": 304 + }, + { + "epoch": 0.6695938529088913, + "grad_norm": 4.490567684173584, + "learning_rate": 4.250543369417921e-06, + "logits/chosen": -2.334174156188965, + "logits/rejected": -2.340644359588623, + "logps/chosen": -13.944917678833008, + "logps/rejected": -10.003447532653809, + "loss": 0.9194, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.014632996171712875, + "rewards/margins": 0.17052799463272095, + "rewards/rejected": -0.15589500963687897, + "step": 305 + }, + { + "epoch": 0.6717892425905598, + "grad_norm": 3.990586042404175, + "learning_rate": 4.2436077621686785e-06, + "logits/chosen": -2.324025869369507, + "logits/rejected": -2.400771379470825, + "logps/chosen": -18.718528747558594, + "logps/rejected": -9.990530014038086, + "loss": 0.7944, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.056847233325242996, + "rewards/margins": 0.49278268218040466, + "rewards/rejected": -0.43593543767929077, + "step": 306 + }, + { + "epoch": 0.6739846322722283, + "grad_norm": 4.3013505935668945, + "learning_rate": 4.236645926147493e-06, + "logits/chosen": -2.38020920753479, + "logits/rejected": -2.3586490154266357, + "logps/chosen": -13.938803672790527, + "logps/rejected": -12.504024505615234, + "loss": 0.9618, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.051238127052783966, + "rewards/margins": 0.1698467880487442, + "rewards/rejected": -0.11860863119363785, + "step": 307 + }, + { + "epoch": 0.6761800219538968, + "grad_norm": 4.09398078918457, + "learning_rate": 4.22965796607999e-06, + "logits/chosen": -2.253674268722534, + "logits/rejected": -2.3258652687072754, + "logps/chosen": -20.03459930419922, + "logps/rejected": -14.505156517028809, + "loss": 0.9795, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.04597073420882225, + "rewards/margins": 0.39578795433044434, + "rewards/rejected": -0.3498172163963318, + "step": 308 + }, + { + "epoch": 0.6783754116355654, + "grad_norm": 3.907806634902954, + "learning_rate": 4.22264398708477e-06, + "logits/chosen": -2.3766307830810547, + "logits/rejected": -2.3582723140716553, + "logps/chosen": -14.181205749511719, + "logps/rejected": -10.14924430847168, + "loss": 0.9015, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.028858356177806854, + "rewards/margins": 0.2582564055919647, + "rewards/rejected": -0.22939805686473846, + "step": 309 + }, + { + "epoch": 0.6805708013172338, + "grad_norm": 3.5894784927368164, + "learning_rate": 4.215604094671835e-06, + "logits/chosen": -2.3014726638793945, + "logits/rejected": -2.3518590927124023, + "logps/chosen": -14.997878074645996, + "logps/rejected": -10.144777297973633, + "loss": 0.7828, + "rewards/accuracies": 0.84375, + "rewards/chosen": 0.04976440966129303, + "rewards/margins": 0.513340950012207, + "rewards/rejected": -0.4635765254497528, + "step": 310 + }, + { + "epoch": 0.6827661909989023, + "grad_norm": 4.483096122741699, + "learning_rate": 4.208538394740993e-06, + "logits/chosen": -2.2711143493652344, + "logits/rejected": -2.3518011569976807, + "logps/chosen": -17.233510971069336, + "logps/rejected": -12.07126235961914, + "loss": 0.9666, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.055232856422662735, + "rewards/margins": 0.24555866420269012, + "rewards/rejected": -0.1903257966041565, + "step": 311 + }, + { + "epoch": 0.6849615806805708, + "grad_norm": 4.8697967529296875, + "learning_rate": 4.201446993580277e-06, + "logits/chosen": -2.255201816558838, + "logits/rejected": -2.418351650238037, + "logps/chosen": -16.337543487548828, + "logps/rejected": -10.240762710571289, + "loss": 0.9028, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.04591435566544533, + "rewards/margins": 0.3530338406562805, + "rewards/rejected": -0.39894816279411316, + "step": 312 + }, + { + "epoch": 0.6871569703622393, + "grad_norm": 4.00418758392334, + "learning_rate": 4.194329997864331e-06, + "logits/chosen": -2.4171342849731445, + "logits/rejected": -2.378293037414551, + "logps/chosen": -18.34632110595703, + "logps/rejected": -12.369073867797852, + "loss": 0.8811, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.001896400935947895, + "rewards/margins": 0.48826920986175537, + "rewards/rejected": -0.48637276887893677, + "step": 313 + }, + { + "epoch": 0.6893523600439078, + "grad_norm": 4.626528739929199, + "learning_rate": 4.18718751465282e-06, + "logits/chosen": -2.329511880874634, + "logits/rejected": -2.3417491912841797, + "logps/chosen": -14.74193000793457, + "logps/rejected": -10.454490661621094, + "loss": 0.8867, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.04431447759270668, + "rewards/margins": 0.42057913541793823, + "rewards/rejected": -0.37626469135284424, + "step": 314 + }, + { + "epoch": 0.6915477497255763, + "grad_norm": 3.7983834743499756, + "learning_rate": 4.180019651388807e-06, + "logits/chosen": -2.2895097732543945, + "logits/rejected": -2.3450984954833984, + "logps/chosen": -15.74697208404541, + "logps/rejected": -11.167444229125977, + "loss": 0.8818, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.03174567595124245, + "rewards/margins": 0.35864511132240295, + "rewards/rejected": -0.3268994688987732, + "step": 315 + }, + { + "epoch": 0.6937431394072447, + "grad_norm": 3.4797542095184326, + "learning_rate": 4.172826515897146e-06, + "logits/chosen": -2.311906337738037, + "logits/rejected": -2.3090274333953857, + "logps/chosen": -13.851794242858887, + "logps/rejected": -12.625475883483887, + "loss": 0.9461, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.017236392945051193, + "rewards/margins": 0.2493745982646942, + "rewards/rejected": -0.23213821649551392, + "step": 316 + }, + { + "epoch": 0.6959385290889133, + "grad_norm": 4.422733306884766, + "learning_rate": 4.165608216382857e-06, + "logits/chosen": -2.4253227710723877, + "logits/rejected": -2.3780248165130615, + "logps/chosen": -16.252634048461914, + "logps/rejected": -13.770538330078125, + "loss": 0.9458, + "rewards/accuracies": 0.59375, + "rewards/chosen": 0.06439048796892166, + "rewards/margins": 0.30147218704223633, + "rewards/rejected": -0.23708167672157288, + "step": 317 + }, + { + "epoch": 0.6981339187705817, + "grad_norm": 3.7859911918640137, + "learning_rate": 4.158364861429493e-06, + "logits/chosen": -2.322923421859741, + "logits/rejected": -2.2539587020874023, + "logps/chosen": -18.831212997436523, + "logps/rejected": -11.069580078125, + "loss": 0.8346, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.03058037906885147, + "rewards/margins": 0.34845486283302307, + "rewards/rejected": -0.3178745210170746, + "step": 318 + }, + { + "epoch": 0.7003293084522503, + "grad_norm": 3.731484889984131, + "learning_rate": 4.151096559997519e-06, + "logits/chosen": -2.3627357482910156, + "logits/rejected": -2.3674192428588867, + "logps/chosen": -15.868612289428711, + "logps/rejected": -10.797269821166992, + "loss": 0.8928, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.02595650777220726, + "rewards/margins": 0.3343779742717743, + "rewards/rejected": -0.30842143297195435, + "step": 319 + }, + { + "epoch": 0.7025246981339188, + "grad_norm": 3.9332573413848877, + "learning_rate": 4.14380342142266e-06, + "logits/chosen": -2.3744566440582275, + "logits/rejected": -2.309058904647827, + "logps/chosen": -12.979777336120605, + "logps/rejected": -12.636903762817383, + "loss": 0.9292, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.08498867601156235, + "rewards/margins": 0.41657087206840515, + "rewards/rejected": -0.3315821886062622, + "step": 320 + }, + { + "epoch": 0.7047200878155873, + "grad_norm": 5.405649185180664, + "learning_rate": 4.1364855554142604e-06, + "logits/chosen": -2.3514606952667236, + "logits/rejected": -2.2680933475494385, + "logps/chosen": -15.088423728942871, + "logps/rejected": -11.752235412597656, + "loss": 0.881, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.05181232839822769, + "rewards/margins": 0.3780396580696106, + "rewards/rejected": -0.4298520088195801, + "step": 321 + }, + { + "epoch": 0.7069154774972558, + "grad_norm": 4.955357551574707, + "learning_rate": 4.129143072053639e-06, + "logits/chosen": -2.3219053745269775, + "logits/rejected": -2.38175630569458, + "logps/chosen": -15.868922233581543, + "logps/rejected": -10.88913345336914, + "loss": 0.8665, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.052466217428445816, + "rewards/margins": 0.5013300776481628, + "rewards/rejected": -0.5537962913513184, + "step": 322 + }, + { + "epoch": 0.7091108671789242, + "grad_norm": 4.574494361877441, + "learning_rate": 4.121776081792426e-06, + "logits/chosen": -2.3393287658691406, + "logits/rejected": -2.381781578063965, + "logps/chosen": -18.754756927490234, + "logps/rejected": -14.181581497192383, + "loss": 0.8978, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.04621858894824982, + "rewards/margins": 0.47777223587036133, + "rewards/rejected": -0.4315536618232727, + "step": 323 + }, + { + "epoch": 0.7113062568605928, + "grad_norm": 4.758660316467285, + "learning_rate": 4.114384695450906e-06, + "logits/chosen": -2.3381195068359375, + "logits/rejected": -2.3528432846069336, + "logps/chosen": -16.682260513305664, + "logps/rejected": -10.463571548461914, + "loss": 0.8577, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.05263161659240723, + "rewards/margins": 0.49307867884635925, + "rewards/rejected": -0.440447062253952, + "step": 324 + }, + { + "epoch": 0.7135016465422612, + "grad_norm": 5.131170749664307, + "learning_rate": 4.106969024216348e-06, + "logits/chosen": -2.304837703704834, + "logits/rejected": -2.3518829345703125, + "logps/chosen": -17.55645179748535, + "logps/rejected": -11.162080764770508, + "loss": 0.7799, + "rewards/accuracies": 0.90625, + "rewards/chosen": 0.09621885418891907, + "rewards/margins": 0.5682495832443237, + "rewards/rejected": -0.4720306396484375, + "step": 325 + }, + { + "epoch": 0.7156970362239298, + "grad_norm": 4.7023844718933105, + "learning_rate": 4.099529179641337e-06, + "logits/chosen": -2.31040358543396, + "logits/rejected": -2.3163864612579346, + "logps/chosen": -14.483461380004883, + "logps/rejected": -11.768637657165527, + "loss": 0.9377, + "rewards/accuracies": 0.5625, + "rewards/chosen": 0.0021479413844645023, + "rewards/margins": 0.3606518507003784, + "rewards/rejected": -0.35850390791893005, + "step": 326 + }, + { + "epoch": 0.7178924259055982, + "grad_norm": 3.8112165927886963, + "learning_rate": 4.09206527364209e-06, + "logits/chosen": -2.382301092147827, + "logits/rejected": -2.365294933319092, + "logps/chosen": -16.85517120361328, + "logps/rejected": -12.415393829345703, + "loss": 0.9548, + "rewards/accuracies": 0.59375, + "rewards/chosen": -0.035888537764549255, + "rewards/margins": 0.31409651041030884, + "rewards/rejected": -0.3499850034713745, + "step": 327 + }, + { + "epoch": 0.7200878155872668, + "grad_norm": 3.9805612564086914, + "learning_rate": 4.084577418496775e-06, + "logits/chosen": -2.3480184078216553, + "logits/rejected": -2.3130125999450684, + "logps/chosen": -13.996624946594238, + "logps/rejected": -10.171143531799316, + "loss": 0.7951, + "rewards/accuracies": 0.84375, + "rewards/chosen": 0.09903817623853683, + "rewards/margins": 0.4770241379737854, + "rewards/rejected": -0.37798595428466797, + "step": 328 + }, + { + "epoch": 0.7222832052689352, + "grad_norm": 4.388843536376953, + "learning_rate": 4.0770657268438285e-06, + "logits/chosen": -2.250278949737549, + "logits/rejected": -2.3578574657440186, + "logps/chosen": -17.343597412109375, + "logps/rejected": -7.6419196128845215, + "loss": 0.721, + "rewards/accuracies": 0.84375, + "rewards/chosen": 0.09454993903636932, + "rewards/margins": 0.7629677057266235, + "rewards/rejected": -0.6684178113937378, + "step": 329 + }, + { + "epoch": 0.7244785949506037, + "grad_norm": 6.914097309112549, + "learning_rate": 4.069530311680247e-06, + "logits/chosen": -2.3882980346679688, + "logits/rejected": -2.374696969985962, + "logps/chosen": -15.285945892333984, + "logps/rejected": -11.684650421142578, + "loss": 0.9118, + "rewards/accuracies": 0.71875, + "rewards/chosen": -0.05909008905291557, + "rewards/margins": 0.3279057443141937, + "rewards/rejected": -0.3869958221912384, + "step": 330 + }, + { + "epoch": 0.7266739846322722, + "grad_norm": 5.314255237579346, + "learning_rate": 4.0619712863599005e-06, + "logits/chosen": -2.278480291366577, + "logits/rejected": -2.2412896156311035, + "logps/chosen": -15.4207124710083, + "logps/rejected": -11.311100959777832, + "loss": 0.8939, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.008756492286920547, + "rewards/margins": 0.43041741847991943, + "rewards/rejected": -0.4216609001159668, + "step": 331 + }, + { + "epoch": 0.7288693743139407, + "grad_norm": 4.677224636077881, + "learning_rate": 4.054388764591822e-06, + "logits/chosen": -2.344648838043213, + "logits/rejected": -2.395387887954712, + "logps/chosen": -17.03143882751465, + "logps/rejected": -10.78937816619873, + "loss": 0.8664, + "rewards/accuracies": 0.71875, + "rewards/chosen": -0.012906208634376526, + "rewards/margins": 0.3622843325138092, + "rewards/rejected": -0.3751905858516693, + "step": 332 + }, + { + "epoch": 0.7310647639956093, + "grad_norm": 4.577656269073486, + "learning_rate": 4.046782860438497e-06, + "logits/chosen": -2.3226308822631836, + "logits/rejected": -2.265408992767334, + "logps/chosen": -15.200016021728516, + "logps/rejected": -10.772403717041016, + "loss": 0.9198, + "rewards/accuracies": 0.71875, + "rewards/chosen": -0.07568801939487457, + "rewards/margins": 0.40591323375701904, + "rewards/rejected": -0.4816012382507324, + "step": 333 + }, + { + "epoch": 0.7332601536772777, + "grad_norm": 4.119585990905762, + "learning_rate": 4.039153688314146e-06, + "logits/chosen": -2.3442039489746094, + "logits/rejected": -2.359684467315674, + "logps/chosen": -18.90411376953125, + "logps/rejected": -10.934317588806152, + "loss": 0.7644, + "rewards/accuracies": 0.90625, + "rewards/chosen": 0.03230892866849899, + "rewards/margins": 0.6441935300827026, + "rewards/rejected": -0.611884593963623, + "step": 334 + }, + { + "epoch": 0.7354555433589463, + "grad_norm": 5.220119476318359, + "learning_rate": 4.031501362983007e-06, + "logits/chosen": -2.297410726547241, + "logits/rejected": -2.481637716293335, + "logps/chosen": -15.908681869506836, + "logps/rejected": -11.298460006713867, + "loss": 0.9041, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.017940856516361237, + "rewards/margins": 0.29080140590667725, + "rewards/rejected": -0.272860586643219, + "step": 335 + }, + { + "epoch": 0.7376509330406147, + "grad_norm": 4.522210597991943, + "learning_rate": 4.023825999557608e-06, + "logits/chosen": -2.3637242317199707, + "logits/rejected": -2.355468273162842, + "logps/chosen": -17.870847702026367, + "logps/rejected": -12.528518676757812, + "loss": 0.9398, + "rewards/accuracies": 0.65625, + "rewards/chosen": -0.06956735253334045, + "rewards/margins": 0.298219233751297, + "rewards/rejected": -0.36778658628463745, + "step": 336 + }, + { + "epoch": 0.7398463227222832, + "grad_norm": 6.380414009094238, + "learning_rate": 4.016127713497034e-06, + "logits/chosen": -2.298593521118164, + "logits/rejected": -2.343686103820801, + "logps/chosen": -16.275299072265625, + "logps/rejected": -12.0189847946167, + "loss": 0.9632, + "rewards/accuracies": 0.75, + "rewards/chosen": -0.0297526977956295, + "rewards/margins": 0.38488292694091797, + "rewards/rejected": -0.4146355986595154, + "step": 337 + }, + { + "epoch": 0.7420417124039517, + "grad_norm": 5.236372470855713, + "learning_rate": 4.00840662060519e-06, + "logits/chosen": -2.359950542449951, + "logits/rejected": -2.2415010929107666, + "logps/chosen": -24.300395965576172, + "logps/rejected": -15.511738777160645, + "loss": 1.0485, + "rewards/accuracies": 0.53125, + "rewards/chosen": -0.06561987102031708, + "rewards/margins": 0.27457380294799805, + "rewards/rejected": -0.3401937186717987, + "step": 338 + }, + { + "epoch": 0.7442371020856202, + "grad_norm": 4.642820358276367, + "learning_rate": 4.000662837029062e-06, + "logits/chosen": -2.4099583625793457, + "logits/rejected": -2.354393243789673, + "logps/chosen": -22.309005737304688, + "logps/rejected": -11.256224632263184, + "loss": 0.7581, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.010493040084838867, + "rewards/margins": 0.7678316831588745, + "rewards/rejected": -0.7573386430740356, + "step": 339 + }, + { + "epoch": 0.7464324917672887, + "grad_norm": 3.9868204593658447, + "learning_rate": 3.992896479256966e-06, + "logits/chosen": -2.310539722442627, + "logits/rejected": -2.306994915008545, + "logps/chosen": -18.193031311035156, + "logps/rejected": -9.452789306640625, + "loss": 0.7584, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.021110711619257927, + "rewards/margins": 0.8151036500930786, + "rewards/rejected": -0.7939929366111755, + "step": 340 + }, + { + "epoch": 0.7486278814489572, + "grad_norm": 4.947543621063232, + "learning_rate": 3.985107664116798e-06, + "logits/chosen": -2.3655166625976562, + "logits/rejected": -2.3199362754821777, + "logps/chosen": -16.473398208618164, + "logps/rejected": -9.421045303344727, + "loss": 0.8089, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.05412997305393219, + "rewards/margins": 0.39927852153778076, + "rewards/rejected": -0.345148503780365, + "step": 341 + }, + { + "epoch": 0.7508232711306256, + "grad_norm": 4.09507942199707, + "learning_rate": 3.977296508774278e-06, + "logits/chosen": -2.2579798698425293, + "logits/rejected": -2.3313004970550537, + "logps/chosen": -17.961259841918945, + "logps/rejected": -8.732832908630371, + "loss": 0.7648, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.051630470901727676, + "rewards/margins": 0.7588289976119995, + "rewards/rejected": -0.7071985006332397, + "step": 342 + }, + { + "epoch": 0.7530186608122942, + "grad_norm": 6.222644329071045, + "learning_rate": 3.969463130731183e-06, + "logits/chosen": -2.3719406127929688, + "logits/rejected": -2.3723111152648926, + "logps/chosen": -18.259965896606445, + "logps/rejected": -10.765856742858887, + "loss": 0.867, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.09137167036533356, + "rewards/margins": 0.42636412382125854, + "rewards/rejected": -0.3349924087524414, + "step": 343 + }, + { + "epoch": 0.7552140504939627, + "grad_norm": 5.629691123962402, + "learning_rate": 3.9616076478235835e-06, + "logits/chosen": -2.3341000080108643, + "logits/rejected": -2.320246696472168, + "logps/chosen": -14.3938570022583, + "logps/rejected": -9.880708694458008, + "loss": 0.8374, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.06494603306055069, + "rewards/margins": 0.4671281576156616, + "rewards/rejected": -0.4021821916103363, + "step": 344 + }, + { + "epoch": 0.7574094401756312, + "grad_norm": 3.4992260932922363, + "learning_rate": 3.953730178220067e-06, + "logits/chosen": -2.349565267562866, + "logits/rejected": -2.394681930541992, + "logps/chosen": -16.43452262878418, + "logps/rejected": -11.140144348144531, + "loss": 0.8392, + "rewards/accuracies": 0.84375, + "rewards/chosen": 0.0837668776512146, + "rewards/margins": 0.46383053064346313, + "rewards/rejected": -0.38006365299224854, + "step": 345 + }, + { + "epoch": 0.7596048298572997, + "grad_norm": 5.090843677520752, + "learning_rate": 3.945830840419966e-06, + "logits/chosen": -2.3477964401245117, + "logits/rejected": -2.3646602630615234, + "logps/chosen": -16.911046981811523, + "logps/rejected": -8.938126564025879, + "loss": 0.8339, + "rewards/accuracies": 0.71875, + "rewards/chosen": -0.07053576409816742, + "rewards/margins": 0.6060460805892944, + "rewards/rejected": -0.676581859588623, + "step": 346 + }, + { + "epoch": 0.7618002195389681, + "grad_norm": 5.51276159286499, + "learning_rate": 3.937909753251572e-06, + "logits/chosen": -2.2657523155212402, + "logits/rejected": -2.330665111541748, + "logps/chosen": -16.019929885864258, + "logps/rejected": -9.611878395080566, + "loss": 0.8075, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.07287417352199554, + "rewards/margins": 0.5842018127441406, + "rewards/rejected": -0.5113277435302734, + "step": 347 + }, + { + "epoch": 0.7639956092206367, + "grad_norm": 3.379443645477295, + "learning_rate": 3.929967035870346e-06, + "logits/chosen": -2.3243355751037598, + "logits/rejected": -2.2666611671447754, + "logps/chosen": -15.123114585876465, + "logps/rejected": -10.203433990478516, + "loss": 0.8297, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.11292536556720734, + "rewards/margins": 0.45466747879981995, + "rewards/rejected": -0.3417420983314514, + "step": 348 + }, + { + "epoch": 0.7661909989023051, + "grad_norm": 8.115785598754883, + "learning_rate": 3.92200280775713e-06, + "logits/chosen": -2.35408878326416, + "logits/rejected": -2.3558552265167236, + "logps/chosen": -18.513586044311523, + "logps/rejected": -11.50524616241455, + "loss": 0.9296, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.06223009154200554, + "rewards/margins": 0.28361374139785767, + "rewards/rejected": -0.3458438515663147, + "step": 349 + }, + { + "epoch": 0.7683863885839737, + "grad_norm": 4.38709831237793, + "learning_rate": 3.914017188716347e-06, + "logits/chosen": -2.296597480773926, + "logits/rejected": -2.41675066947937, + "logps/chosen": -16.949928283691406, + "logps/rejected": -11.099538803100586, + "loss": 0.8672, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.11809515953063965, + "rewards/margins": 0.43611082434654236, + "rewards/rejected": -0.3180156648159027, + "step": 350 + }, + { + "epoch": 0.7705817782656421, + "grad_norm": 5.365295886993408, + "learning_rate": 3.9060102988742e-06, + "logits/chosen": -2.2918920516967773, + "logits/rejected": -2.329606056213379, + "logps/chosen": -18.76449203491211, + "logps/rejected": -12.69644546508789, + "loss": 0.8634, + "rewards/accuracies": 0.78125, + "rewards/chosen": -0.009303185157477856, + "rewards/margins": 0.6541052460670471, + "rewards/rejected": -0.6634083986282349, + "step": 351 + }, + { + "epoch": 0.7727771679473107, + "grad_norm": 3.892143964767456, + "learning_rate": 3.897982258676867e-06, + "logits/chosen": -2.3476452827453613, + "logits/rejected": -2.373446226119995, + "logps/chosen": -19.90625, + "logps/rejected": -12.041308403015137, + "loss": 0.8741, + "rewards/accuracies": 0.625, + "rewards/chosen": 0.06443409621715546, + "rewards/margins": 0.49310898780822754, + "rewards/rejected": -0.4286748766899109, + "step": 352 + }, + { + "epoch": 0.7749725576289791, + "grad_norm": 3.8393607139587402, + "learning_rate": 3.889933188888684e-06, + "logits/chosen": -2.273186206817627, + "logits/rejected": -2.268983840942383, + "logps/chosen": -14.942765235900879, + "logps/rejected": -10.225128173828125, + "loss": 0.8367, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.13203339278697968, + "rewards/margins": 0.38842177391052246, + "rewards/rejected": -0.2563883662223816, + "step": 353 + }, + { + "epoch": 0.7771679473106476, + "grad_norm": 5.566308975219727, + "learning_rate": 3.881863210590332e-06, + "logits/chosen": -2.408592700958252, + "logits/rejected": -2.4445698261260986, + "logps/chosen": -16.196022033691406, + "logps/rejected": -10.760068893432617, + "loss": 0.8831, + "rewards/accuracies": 0.71875, + "rewards/chosen": -0.021768394857645035, + "rewards/margins": 0.4853077232837677, + "rewards/rejected": -0.5070760846138, + "step": 354 + }, + { + "epoch": 0.7793633369923162, + "grad_norm": 4.365970134735107, + "learning_rate": 3.8737724451770155e-06, + "logits/chosen": -2.3492016792297363, + "logits/rejected": -2.309647560119629, + "logps/chosen": -19.536739349365234, + "logps/rejected": -12.805992126464844, + "loss": 0.9172, + "rewards/accuracies": 0.78125, + "rewards/chosen": -0.07249265909194946, + "rewards/margins": 0.5484910011291504, + "rewards/rejected": -0.6209836602210999, + "step": 355 + }, + { + "epoch": 0.7815587266739846, + "grad_norm": 4.709788799285889, + "learning_rate": 3.865661014356635e-06, + "logits/chosen": -2.3180618286132812, + "logits/rejected": -2.2893731594085693, + "logps/chosen": -20.79785919189453, + "logps/rejected": -13.704549789428711, + "loss": 0.8771, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.12575146555900574, + "rewards/margins": 0.5705665946006775, + "rewards/rejected": -0.44481509923934937, + "step": 356 + }, + { + "epoch": 0.7837541163556532, + "grad_norm": 4.544501304626465, + "learning_rate": 3.857529040147959e-06, + "logits/chosen": -2.3178446292877197, + "logits/rejected": -2.2923343181610107, + "logps/chosen": -17.59714126586914, + "logps/rejected": -10.660222053527832, + "loss": 0.7999, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.03826873004436493, + "rewards/margins": 0.501665472984314, + "rewards/rejected": -0.4633967876434326, + "step": 357 + }, + { + "epoch": 0.7859495060373216, + "grad_norm": 5.793648719787598, + "learning_rate": 3.849376644878783e-06, + "logits/chosen": -2.2837600708007812, + "logits/rejected": -2.2606236934661865, + "logps/chosen": -19.073780059814453, + "logps/rejected": -13.95520305633545, + "loss": 0.9672, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.04491977393627167, + "rewards/margins": 0.4374791979789734, + "rewards/rejected": -0.3925594091415405, + "step": 358 + }, + { + "epoch": 0.7881448957189902, + "grad_norm": 7.747844219207764, + "learning_rate": 3.841203951184095e-06, + "logits/chosen": -2.341078281402588, + "logits/rejected": -2.320150852203369, + "logps/chosen": -17.046415328979492, + "logps/rejected": -14.675268173217773, + "loss": 1.0073, + "rewards/accuracies": 0.65625, + "rewards/chosen": -0.12396389245986938, + "rewards/margins": 0.3983825445175171, + "rewards/rejected": -0.5223464369773865, + "step": 359 + }, + { + "epoch": 0.7903402854006586, + "grad_norm": 6.052748680114746, + "learning_rate": 3.833011082004229e-06, + "logits/chosen": -2.2902965545654297, + "logits/rejected": -2.296034336090088, + "logps/chosen": -18.75448989868164, + "logps/rejected": -11.371520042419434, + "loss": 0.7632, + "rewards/accuracies": 0.9375, + "rewards/chosen": 0.03268669173121452, + "rewards/margins": 0.6433117389678955, + "rewards/rejected": -0.6106250286102295, + "step": 360 + }, + { + "epoch": 0.7925356750823271, + "grad_norm": 5.396251678466797, + "learning_rate": 3.824798160583012e-06, + "logits/chosen": -2.332122564315796, + "logits/rejected": -2.350968360900879, + "logps/chosen": -23.035541534423828, + "logps/rejected": -17.014999389648438, + "loss": 0.9551, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.036588456481695175, + "rewards/margins": 0.7014162540435791, + "rewards/rejected": -0.6648277640342712, + "step": 361 + }, + { + "epoch": 0.7947310647639956, + "grad_norm": 5.20027494430542, + "learning_rate": 3.816565310465919e-06, + "logits/chosen": -2.394535541534424, + "logits/rejected": -2.278174877166748, + "logps/chosen": -19.75065803527832, + "logps/rejected": -12.300567626953125, + "loss": 0.9152, + "rewards/accuracies": 0.625, + "rewards/chosen": -0.08690013736486435, + "rewards/margins": 0.452328085899353, + "rewards/rejected": -0.5392282605171204, + "step": 362 + }, + { + "epoch": 0.7969264544456641, + "grad_norm": 4.143304347991943, + "learning_rate": 3.8083126554982026e-06, + "logits/chosen": -2.3074147701263428, + "logits/rejected": -2.3378899097442627, + "logps/chosen": -20.852561950683594, + "logps/rejected": -10.050654411315918, + "loss": 0.8272, + "rewards/accuracies": 0.71875, + "rewards/chosen": -0.06546229869127274, + "rewards/margins": 0.758281409740448, + "rewards/rejected": -0.8237438201904297, + "step": 363 + }, + { + "epoch": 0.7991218441273326, + "grad_norm": 4.118852138519287, + "learning_rate": 3.8000403198230385e-06, + "logits/chosen": -2.343893051147461, + "logits/rejected": -2.3883817195892334, + "logps/chosen": -24.624427795410156, + "logps/rejected": -15.104917526245117, + "loss": 0.9192, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.12434723973274231, + "rewards/margins": 0.4829878509044647, + "rewards/rejected": -0.3586406111717224, + "step": 364 + }, + { + "epoch": 0.8013172338090011, + "grad_norm": 5.058257579803467, + "learning_rate": 3.7917484278796578e-06, + "logits/chosen": -2.4088287353515625, + "logits/rejected": -2.468463659286499, + "logps/chosen": -18.072254180908203, + "logps/rejected": -9.308141708374023, + "loss": 0.7411, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.022563787177205086, + "rewards/margins": 0.6568112373352051, + "rewards/rejected": -0.6342474222183228, + "step": 365 + }, + { + "epoch": 0.8035126234906695, + "grad_norm": 6.365854740142822, + "learning_rate": 3.7834371044014695e-06, + "logits/chosen": -2.3097360134124756, + "logits/rejected": -2.4446170330047607, + "logps/chosen": -17.25137710571289, + "logps/rejected": -13.248261451721191, + "loss": 0.8844, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.03468625992536545, + "rewards/margins": 0.5087876915931702, + "rewards/rejected": -0.4741014242172241, + "step": 366 + }, + { + "epoch": 0.8057080131723381, + "grad_norm": 4.799102783203125, + "learning_rate": 3.7751064744141886e-06, + "logits/chosen": -2.3456978797912598, + "logits/rejected": -2.2777140140533447, + "logps/chosen": -16.881250381469727, + "logps/rejected": -8.925056457519531, + "loss": 0.7494, + "rewards/accuracies": 0.84375, + "rewards/chosen": 0.0991620272397995, + "rewards/margins": 0.5507904887199402, + "rewards/rejected": -0.4516284465789795, + "step": 367 + }, + { + "epoch": 0.8079034028540066, + "grad_norm": 5.138694763183594, + "learning_rate": 3.7667566632339557e-06, + "logits/chosen": -2.25407338142395, + "logits/rejected": -2.3604111671447754, + "logps/chosen": -16.577442169189453, + "logps/rejected": -10.655071258544922, + "loss": 0.883, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.039467353373765945, + "rewards/margins": 0.4447363317012787, + "rewards/rejected": -0.40526896715164185, + "step": 368 + }, + { + "epoch": 0.8100987925356751, + "grad_norm": 6.1413397789001465, + "learning_rate": 3.75838779646545e-06, + "logits/chosen": -2.2719509601593018, + "logits/rejected": -2.270545721054077, + "logps/chosen": -16.89019203186035, + "logps/rejected": -10.996526718139648, + "loss": 0.8201, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.10040238499641418, + "rewards/margins": 0.5125969052314758, + "rewards/rejected": -0.41219455003738403, + "step": 369 + }, + { + "epoch": 0.8122941822173436, + "grad_norm": 4.1379499435424805, + "learning_rate": 3.7500000000000005e-06, + "logits/chosen": -2.3188323974609375, + "logits/rejected": -2.276256561279297, + "logps/chosen": -11.709390640258789, + "logps/rejected": -8.320552825927734, + "loss": 0.8101, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.06503432989120483, + "rewards/margins": 0.35664820671081543, + "rewards/rejected": -0.2916138768196106, + "step": 370 + }, + { + "epoch": 0.814489571899012, + "grad_norm": 4.974375247955322, + "learning_rate": 3.7415934000136907e-06, + "logits/chosen": -2.3040988445281982, + "logits/rejected": -2.378429412841797, + "logps/chosen": -16.86901092529297, + "logps/rejected": -10.178057670593262, + "loss": 0.7936, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.0352807454764843, + "rewards/margins": 0.6562831401824951, + "rewards/rejected": -0.6210023760795593, + "step": 371 + }, + { + "epoch": 0.8166849615806806, + "grad_norm": 5.170460224151611, + "learning_rate": 3.7331681229654637e-06, + "logits/chosen": -2.3687679767608643, + "logits/rejected": -2.406930923461914, + "logps/chosen": -19.379417419433594, + "logps/rejected": -12.089089393615723, + "loss": 0.8487, + "rewards/accuracies": 0.84375, + "rewards/chosen": 0.03307601809501648, + "rewards/margins": 0.6642818450927734, + "rewards/rejected": -0.6312057971954346, + "step": 372 + }, + { + "epoch": 0.818880351262349, + "grad_norm": 6.528963088989258, + "learning_rate": 3.724724295595218e-06, + "logits/chosen": -2.318371534347534, + "logits/rejected": -2.340467929840088, + "logps/chosen": -17.74517822265625, + "logps/rejected": -13.328447341918945, + "loss": 0.903, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.042347729206085205, + "rewards/margins": 0.4485569894313812, + "rewards/rejected": -0.4062092900276184, + "step": 373 + }, + { + "epoch": 0.8210757409440176, + "grad_norm": 3.9055440425872803, + "learning_rate": 3.7162620449219e-06, + "logits/chosen": -2.387302875518799, + "logits/rejected": -2.3294808864593506, + "logps/chosen": -15.384821891784668, + "logps/rejected": -9.518472671508789, + "loss": 0.802, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.06279882788658142, + "rewards/margins": 0.3927484154701233, + "rewards/rejected": -0.32994961738586426, + "step": 374 + }, + { + "epoch": 0.823271130625686, + "grad_norm": 4.305942058563232, + "learning_rate": 3.7077814982415966e-06, + "logits/chosen": -2.2834930419921875, + "logits/rejected": -2.1856932640075684, + "logps/chosen": -15.38658332824707, + "logps/rejected": -9.867746353149414, + "loss": 0.8521, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.12428806722164154, + "rewards/margins": 0.3732988238334656, + "rewards/rejected": -0.24901077151298523, + "step": 375 + }, + { + "epoch": 0.8254665203073546, + "grad_norm": 4.016692161560059, + "learning_rate": 3.699282783125616e-06, + "logits/chosen": -2.3968873023986816, + "logits/rejected": -2.3753557205200195, + "logps/chosen": -15.990100860595703, + "logps/rejected": -9.94045639038086, + "loss": 0.7915, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.0717865377664566, + "rewards/margins": 0.5052440166473389, + "rewards/rejected": -0.43345755338668823, + "step": 376 + }, + { + "epoch": 0.827661909989023, + "grad_norm": 5.630831241607666, + "learning_rate": 3.6907660274185723e-06, + "logits/chosen": -2.2964377403259277, + "logits/rejected": -2.360926389694214, + "logps/chosen": -20.974613189697266, + "logps/rejected": -11.94788646697998, + "loss": 0.8318, + "rewards/accuracies": 0.75, + "rewards/chosen": -0.12485136836767197, + "rewards/margins": 0.8055430054664612, + "rewards/rejected": -0.9303942918777466, + "step": 377 + }, + { + "epoch": 0.8298572996706916, + "grad_norm": 4.928332805633545, + "learning_rate": 3.6822313592364594e-06, + "logits/chosen": -2.358898401260376, + "logits/rejected": -2.3022220134735107, + "logps/chosen": -19.222505569458008, + "logps/rejected": -9.495677947998047, + "loss": 0.7366, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.0382472425699234, + "rewards/margins": 0.7630552053451538, + "rewards/rejected": -0.7248079776763916, + "step": 378 + }, + { + "epoch": 0.8320526893523601, + "grad_norm": 11.10118293762207, + "learning_rate": 3.6736789069647273e-06, + "logits/chosen": -2.3210902214050293, + "logits/rejected": -2.2445425987243652, + "logps/chosen": -17.241561889648438, + "logps/rejected": -12.101102828979492, + "loss": 0.9336, + "rewards/accuracies": 0.65625, + "rewards/chosen": -0.012493027374148369, + "rewards/margins": 0.4085111916065216, + "rewards/rejected": -0.4210042655467987, + "step": 379 + }, + { + "epoch": 0.8342480790340285, + "grad_norm": 6.272946834564209, + "learning_rate": 3.665108799256348e-06, + "logits/chosen": -2.1888327598571777, + "logits/rejected": -2.325762987136841, + "logps/chosen": -20.95864486694336, + "logps/rejected": -8.965181350708008, + "loss": 0.7478, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.08120650053024292, + "rewards/margins": 0.6599326133728027, + "rewards/rejected": -0.5787261128425598, + "step": 380 + }, + { + "epoch": 0.8364434687156971, + "grad_norm": 5.793782711029053, + "learning_rate": 3.6565211650298787e-06, + "logits/chosen": -2.251807689666748, + "logits/rejected": -2.2617058753967285, + "logps/chosen": -28.702529907226562, + "logps/rejected": -15.13422966003418, + "loss": 0.8766, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.003576405346393585, + "rewards/margins": 0.8133214116096497, + "rewards/rejected": -0.8097449541091919, + "step": 381 + }, + { + "epoch": 0.8386388583973655, + "grad_norm": 6.972100734710693, + "learning_rate": 3.6479161334675294e-06, + "logits/chosen": -2.285806179046631, + "logits/rejected": -2.4609298706054688, + "logps/chosen": -18.277084350585938, + "logps/rejected": -12.115345001220703, + "loss": 0.8225, + "rewards/accuracies": 0.84375, + "rewards/chosen": 0.0019068922847509384, + "rewards/margins": 0.6515531539916992, + "rewards/rejected": -0.6496463418006897, + "step": 382 + }, + { + "epoch": 0.8408342480790341, + "grad_norm": 4.72037935256958, + "learning_rate": 3.639293834013211e-06, + "logits/chosen": -2.3857169151306152, + "logits/rejected": -2.2780425548553467, + "logps/chosen": -23.369903564453125, + "logps/rejected": -15.874404907226562, + "loss": 0.9017, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.0180068202316761, + "rewards/margins": 0.7297979593276978, + "rewards/rejected": -0.7117910385131836, + "step": 383 + }, + { + "epoch": 0.8430296377607025, + "grad_norm": 6.700748443603516, + "learning_rate": 3.6306543963705943e-06, + "logits/chosen": -2.333336591720581, + "logits/rejected": -2.3500876426696777, + "logps/chosen": -20.27480125427246, + "logps/rejected": -11.234090805053711, + "loss": 0.7802, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.02051311358809471, + "rewards/margins": 0.6360718011856079, + "rewards/rejected": -0.6155586242675781, + "step": 384 + }, + { + "epoch": 0.845225027442371, + "grad_norm": 4.831250190734863, + "learning_rate": 3.621997950501156e-06, + "logits/chosen": -2.2861812114715576, + "logits/rejected": -2.3506617546081543, + "logps/chosen": -18.32001304626465, + "logps/rejected": -11.220377922058105, + "loss": 0.8039, + "rewards/accuracies": 0.84375, + "rewards/chosen": 0.03276555612683296, + "rewards/margins": 0.6321474313735962, + "rewards/rejected": -0.5993818640708923, + "step": 385 + }, + { + "epoch": 0.8474204171240395, + "grad_norm": 7.575469017028809, + "learning_rate": 3.613324626622224e-06, + "logits/chosen": -2.3208696842193604, + "logits/rejected": -2.335655689239502, + "logps/chosen": -16.124113082885742, + "logps/rejected": -13.462167739868164, + "loss": 1.0063, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.0590025819838047, + "rewards/margins": 0.3688778877258301, + "rewards/rejected": -0.4278804659843445, + "step": 386 + }, + { + "epoch": 0.849615806805708, + "grad_norm": 9.63671875, + "learning_rate": 3.60463455520502e-06, + "logits/chosen": -2.388012647628784, + "logits/rejected": -2.3075461387634277, + "logps/chosen": -18.78927230834961, + "logps/rejected": -14.575672149658203, + "loss": 0.9509, + "rewards/accuracies": 0.65625, + "rewards/chosen": -0.1962830275297165, + "rewards/margins": 0.41697803139686584, + "rewards/rejected": -0.6132611036300659, + "step": 387 + }, + { + "epoch": 0.8518111964873765, + "grad_norm": 5.019967079162598, + "learning_rate": 3.595927866972694e-06, + "logits/chosen": -2.301435708999634, + "logits/rejected": -2.2961511611938477, + "logps/chosen": -20.059595108032227, + "logps/rejected": -13.45213794708252, + "loss": 0.916, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.027182452380657196, + "rewards/margins": 0.6479660868644714, + "rewards/rejected": -0.6751485466957092, + "step": 388 + }, + { + "epoch": 0.854006586169045, + "grad_norm": 7.656889915466309, + "learning_rate": 3.587204692898363e-06, + "logits/chosen": -2.2869014739990234, + "logits/rejected": -2.3017992973327637, + "logps/chosen": -20.62851905822754, + "logps/rejected": -9.424978256225586, + "loss": 0.7464, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.03314004838466644, + "rewards/margins": 0.8571538925170898, + "rewards/rejected": -0.8240138292312622, + "step": 389 + }, + { + "epoch": 0.8562019758507134, + "grad_norm": 5.941650390625, + "learning_rate": 3.578465164203134e-06, + "logits/chosen": -2.2174339294433594, + "logits/rejected": -2.256338357925415, + "logps/chosen": -17.569561004638672, + "logps/rejected": -12.100666046142578, + "loss": 0.8744, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.05165117233991623, + "rewards/margins": 0.6384047269821167, + "rewards/rejected": -0.5867536067962646, + "step": 390 + }, + { + "epoch": 0.858397365532382, + "grad_norm": 4.409201622009277, + "learning_rate": 3.5697094123541357e-06, + "logits/chosen": -2.264608860015869, + "logits/rejected": -2.2907967567443848, + "logps/chosen": -19.413509368896484, + "logps/rejected": -10.549042701721191, + "loss": 0.8083, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.005588045343756676, + "rewards/margins": 0.5789726972579956, + "rewards/rejected": -0.584560751914978, + "step": 391 + }, + { + "epoch": 0.8605927552140505, + "grad_norm": 5.469266891479492, + "learning_rate": 3.5609375690625384e-06, + "logits/chosen": -2.2814698219299316, + "logits/rejected": -2.2320809364318848, + "logps/chosen": -20.82750701904297, + "logps/rejected": -10.755342483520508, + "loss": 0.9039, + "rewards/accuracies": 0.71875, + "rewards/chosen": -0.09153535217046738, + "rewards/margins": 0.5601884126663208, + "rewards/rejected": -0.6517237424850464, + "step": 392 + }, + { + "epoch": 0.862788144895719, + "grad_norm": 4.7851176261901855, + "learning_rate": 3.552149766281573e-06, + "logits/chosen": -2.3673245906829834, + "logits/rejected": -2.288597583770752, + "logps/chosen": -17.121660232543945, + "logps/rejected": -10.34488296508789, + "loss": 0.7823, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.08655978739261627, + "rewards/margins": 0.6128506660461426, + "rewards/rejected": -0.5262908935546875, + "step": 393 + }, + { + "epoch": 0.8649835345773875, + "grad_norm": 4.078333854675293, + "learning_rate": 3.543346136204545e-06, + "logits/chosen": -2.2628226280212402, + "logits/rejected": -2.4437499046325684, + "logps/chosen": -21.973432540893555, + "logps/rejected": -11.049983024597168, + "loss": 0.7704, + "rewards/accuracies": 0.875, + "rewards/chosen": 0.13885267078876495, + "rewards/margins": 0.9487197995185852, + "rewards/rejected": -0.8098670840263367, + "step": 394 + }, + { + "epoch": 0.867178924259056, + "grad_norm": 5.391472339630127, + "learning_rate": 3.5345268112628485e-06, + "logits/chosen": -2.2225139141082764, + "logits/rejected": -2.41272234916687, + "logps/chosen": -20.41250991821289, + "logps/rejected": -9.64039421081543, + "loss": 0.7121, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.0024206284433603287, + "rewards/margins": 0.9180695414543152, + "rewards/rejected": -0.915648877620697, + "step": 395 + }, + { + "epoch": 0.8693743139407245, + "grad_norm": 4.787206172943115, + "learning_rate": 3.5256919241239712e-06, + "logits/chosen": -2.36836576461792, + "logits/rejected": -2.396129608154297, + "logps/chosen": -20.147674560546875, + "logps/rejected": -12.847021102905273, + "loss": 0.865, + "rewards/accuracies": 0.84375, + "rewards/chosen": -0.02502620406448841, + "rewards/margins": 0.755872368812561, + "rewards/rejected": -0.7808985114097595, + "step": 396 + }, + { + "epoch": 0.8715697036223929, + "grad_norm": 5.62094259262085, + "learning_rate": 3.516841607689501e-06, + "logits/chosen": -2.3200628757476807, + "logits/rejected": -2.369778633117676, + "logps/chosen": -21.13600730895996, + "logps/rejected": -13.838922500610352, + "loss": 0.8936, + "rewards/accuracies": 0.75, + "rewards/chosen": -0.09514954686164856, + "rewards/margins": 0.6872110962867737, + "rewards/rejected": -0.7823606729507446, + "step": 397 + }, + { + "epoch": 0.8737650933040615, + "grad_norm": 6.053920269012451, + "learning_rate": 3.5079759950931257e-06, + "logits/chosen": -2.302199363708496, + "logits/rejected": -2.4088432788848877, + "logps/chosen": -19.452991485595703, + "logps/rejected": -10.391996383666992, + "loss": 0.7719, + "rewards/accuracies": 0.75, + "rewards/chosen": -0.04446466267108917, + "rewards/margins": 0.6437113881111145, + "rewards/rejected": -0.6881760358810425, + "step": 398 + }, + { + "epoch": 0.8759604829857299, + "grad_norm": 5.638072967529297, + "learning_rate": 3.499095219698631e-06, + "logits/chosen": -2.2861905097961426, + "logits/rejected": -2.3225479125976562, + "logps/chosen": -19.642776489257812, + "logps/rejected": -10.90954875946045, + "loss": 0.7718, + "rewards/accuracies": 0.875, + "rewards/chosen": 0.08755317330360413, + "rewards/margins": 0.6167296171188354, + "rewards/rejected": -0.5291764736175537, + "step": 399 + }, + { + "epoch": 0.8781558726673985, + "grad_norm": 6.903406620025635, + "learning_rate": 3.4901994150978926e-06, + "logits/chosen": -2.307342529296875, + "logits/rejected": -2.337017059326172, + "logps/chosen": -19.130626678466797, + "logps/rejected": -10.1241455078125, + "loss": 0.8591, + "rewards/accuracies": 0.65625, + "rewards/chosen": -0.03486700728535652, + "rewards/margins": 0.5795115232467651, + "rewards/rejected": -0.6143784523010254, + "step": 400 + }, + { + "epoch": 0.8803512623490669, + "grad_norm": 4.02300500869751, + "learning_rate": 3.481288715108868e-06, + "logits/chosen": -2.3123385906219482, + "logits/rejected": -2.2987749576568604, + "logps/chosen": -20.062665939331055, + "logps/rejected": -11.380792617797852, + "loss": 0.8085, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.026614921167492867, + "rewards/margins": 0.7673150300979614, + "rewards/rejected": -0.7407000064849854, + "step": 401 + }, + { + "epoch": 0.8825466520307355, + "grad_norm": 3.974309206008911, + "learning_rate": 3.4723632537735846e-06, + "logits/chosen": -2.3131566047668457, + "logits/rejected": -2.3440208435058594, + "logps/chosen": -16.116439819335938, + "logps/rejected": -11.196529388427734, + "loss": 0.8536, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.033491406589746475, + "rewards/margins": 0.460104376077652, + "rewards/rejected": -0.4266130030155182, + "step": 402 + }, + { + "epoch": 0.884742041712404, + "grad_norm": 9.103083610534668, + "learning_rate": 3.4634231653561213e-06, + "logits/chosen": -2.26672625541687, + "logits/rejected": -2.3151166439056396, + "logps/chosen": -18.1038875579834, + "logps/rejected": -10.783166885375977, + "loss": 0.9248, + "rewards/accuracies": 0.5, + "rewards/chosen": -0.09668270498514175, + "rewards/margins": 0.36572739481925964, + "rewards/rejected": -0.4624100923538208, + "step": 403 + }, + { + "epoch": 0.8869374313940724, + "grad_norm": 8.053287506103516, + "learning_rate": 3.454468584340588e-06, + "logits/chosen": -2.4167327880859375, + "logits/rejected": -2.391662120819092, + "logps/chosen": -16.634321212768555, + "logps/rejected": -11.324498176574707, + "loss": 0.9218, + "rewards/accuracies": 0.59375, + "rewards/chosen": -0.11378289759159088, + "rewards/margins": 0.2954775393009186, + "rewards/rejected": -0.40926042199134827, + "step": 404 + }, + { + "epoch": 0.889132821075741, + "grad_norm": 10.441896438598633, + "learning_rate": 3.4454996454291066e-06, + "logits/chosen": -2.254704236984253, + "logits/rejected": -2.3380165100097656, + "logps/chosen": -19.833484649658203, + "logps/rejected": -12.246344566345215, + "loss": 0.9228, + "rewards/accuracies": 0.625, + "rewards/chosen": -0.06332497298717499, + "rewards/margins": 0.4893012046813965, + "rewards/rejected": -0.5526261329650879, + "step": 405 + }, + { + "epoch": 0.8913282107574094, + "grad_norm": 6.624183654785156, + "learning_rate": 3.436516483539781e-06, + "logits/chosen": -2.284971237182617, + "logits/rejected": -2.3368124961853027, + "logps/chosen": -16.181856155395508, + "logps/rejected": -10.476940155029297, + "loss": 0.8555, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.09641481190919876, + "rewards/margins": 0.5150280594825745, + "rewards/rejected": -0.4186131954193115, + "step": 406 + }, + { + "epoch": 0.893523600439078, + "grad_norm": 15.161913871765137, + "learning_rate": 3.4275192338046677e-06, + "logits/chosen": -2.441598415374756, + "logits/rejected": -2.354428768157959, + "logps/chosen": -15.579620361328125, + "logps/rejected": -15.171042442321777, + "loss": 1.1264, + "rewards/accuracies": 0.59375, + "rewards/chosen": -0.20871956646442413, + "rewards/margins": 0.14662565290927887, + "rewards/rejected": -0.355345219373703, + "step": 407 + }, + { + "epoch": 0.8957189901207464, + "grad_norm": 4.21067476272583, + "learning_rate": 3.4185080315677454e-06, + "logits/chosen": -2.347850799560547, + "logits/rejected": -2.31205677986145, + "logps/chosen": -21.443716049194336, + "logps/rejected": -13.075089454650879, + "loss": 0.7881, + "rewards/accuracies": 0.90625, + "rewards/chosen": 0.07311268150806427, + "rewards/margins": 0.9533619284629822, + "rewards/rejected": -0.8802492618560791, + "step": 408 + }, + { + "epoch": 0.897914379802415, + "grad_norm": 7.904491424560547, + "learning_rate": 3.409483012382879e-06, + "logits/chosen": -2.24735689163208, + "logits/rejected": -2.2707090377807617, + "logps/chosen": -22.61405372619629, + "logps/rejected": -13.15417766571045, + "loss": 0.8115, + "rewards/accuracies": 0.75, + "rewards/chosen": -0.1565491259098053, + "rewards/margins": 0.9658958911895752, + "rewards/rejected": -1.122444987297058, + "step": 409 + }, + { + "epoch": 0.9001097694840834, + "grad_norm": 6.310301303863525, + "learning_rate": 3.400444312011776e-06, + "logits/chosen": -2.3683767318725586, + "logits/rejected": -2.3440041542053223, + "logps/chosen": -17.640872955322266, + "logps/rejected": -10.977629661560059, + "loss": 0.8158, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.028235310688614845, + "rewards/margins": 0.590648353099823, + "rewards/rejected": -0.5624130368232727, + "step": 410 + }, + { + "epoch": 0.9023051591657519, + "grad_norm": 5.326711654663086, + "learning_rate": 3.3913920664219504e-06, + "logits/chosen": -2.447878837585449, + "logits/rejected": -2.4349117279052734, + "logps/chosen": -19.840436935424805, + "logps/rejected": -8.293453216552734, + "loss": 0.7169, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.06985548138618469, + "rewards/margins": 0.8774117231369019, + "rewards/rejected": -0.8075562715530396, + "step": 411 + }, + { + "epoch": 0.9045005488474204, + "grad_norm": 14.260117530822754, + "learning_rate": 3.3823264117846722e-06, + "logits/chosen": -2.368217945098877, + "logits/rejected": -2.411201238632202, + "logps/chosen": -18.50075912475586, + "logps/rejected": -12.620443344116211, + "loss": 0.9106, + "rewards/accuracies": 0.625, + "rewards/chosen": -0.04634511470794678, + "rewards/margins": 0.660761833190918, + "rewards/rejected": -0.7071069478988647, + "step": 412 + }, + { + "epoch": 0.9066959385290889, + "grad_norm": 6.360561847686768, + "learning_rate": 3.3732474844729235e-06, + "logits/chosen": -2.2475552558898926, + "logits/rejected": -2.3121492862701416, + "logps/chosen": -19.70197296142578, + "logps/rejected": -11.163705825805664, + "loss": 0.8478, + "rewards/accuracies": 0.71875, + "rewards/chosen": -0.06956078857183456, + "rewards/margins": 0.6349106431007385, + "rewards/rejected": -0.7044714093208313, + "step": 413 + }, + { + "epoch": 0.9088913282107574, + "grad_norm": 6.33591365814209, + "learning_rate": 3.3641554210593417e-06, + "logits/chosen": -2.2870852947235107, + "logits/rejected": -2.4032349586486816, + "logps/chosen": -21.093215942382812, + "logps/rejected": -10.273629188537598, + "loss": 0.7941, + "rewards/accuracies": 0.78125, + "rewards/chosen": -0.019855815917253494, + "rewards/margins": 0.6967600584030151, + "rewards/rejected": -0.7166157960891724, + "step": 414 + }, + { + "epoch": 0.9110867178924259, + "grad_norm": 6.404439926147461, + "learning_rate": 3.3550503583141726e-06, + "logits/chosen": -2.3385732173919678, + "logits/rejected": -2.3589258193969727, + "logps/chosen": -20.491695404052734, + "logps/rejected": -14.065013885498047, + "loss": 0.9211, + "rewards/accuracies": 0.6875, + "rewards/chosen": -0.04469328746199608, + "rewards/margins": 0.5574424862861633, + "rewards/rejected": -0.6021357774734497, + "step": 415 + }, + { + "epoch": 0.9132821075740944, + "grad_norm": 5.92446231842041, + "learning_rate": 3.3459324332032035e-06, + "logits/chosen": -2.390655279159546, + "logits/rejected": -2.313477039337158, + "logps/chosen": -20.9737548828125, + "logps/rejected": -11.82977294921875, + "loss": 0.791, + "rewards/accuracies": 0.65625, + "rewards/chosen": -0.037018876522779465, + "rewards/margins": 0.7438760995864868, + "rewards/rejected": -0.7808948755264282, + "step": 416 + }, + { + "epoch": 0.9154774972557629, + "grad_norm": 7.234256267547607, + "learning_rate": 3.3368017828857117e-06, + "logits/chosen": -2.3002657890319824, + "logits/rejected": -2.3773255348205566, + "logps/chosen": -26.463184356689453, + "logps/rejected": -14.281316757202148, + "loss": 0.8885, + "rewards/accuracies": 0.78125, + "rewards/chosen": -0.0562882199883461, + "rewards/margins": 0.8361290693283081, + "rewards/rejected": -0.8924172520637512, + "step": 417 + }, + { + "epoch": 0.9176728869374314, + "grad_norm": 8.097594261169434, + "learning_rate": 3.3276585447123957e-06, + "logits/chosen": -2.2877120971679688, + "logits/rejected": -2.381965398788452, + "logps/chosen": -18.890178680419922, + "logps/rejected": -15.372221946716309, + "loss": 1.0001, + "rewards/accuracies": 0.75, + "rewards/chosen": -0.12048324942588806, + "rewards/margins": 0.5791645646095276, + "rewards/rejected": -0.6996477842330933, + "step": 418 + }, + { + "epoch": 0.9198682766190999, + "grad_norm": 5.180337905883789, + "learning_rate": 3.318502856223311e-06, + "logits/chosen": -2.308959484100342, + "logits/rejected": -2.3321940898895264, + "logps/chosen": -22.121414184570312, + "logps/rejected": -15.669811248779297, + "loss": 0.9739, + "rewards/accuracies": 0.65625, + "rewards/chosen": -0.04433257132768631, + "rewards/margins": 0.5762405395507812, + "rewards/rejected": -0.620573103427887, + "step": 419 + }, + { + "epoch": 0.9220636663007684, + "grad_norm": 5.768693447113037, + "learning_rate": 3.3093348551458033e-06, + "logits/chosen": -2.293351411819458, + "logits/rejected": -2.3098649978637695, + "logps/chosen": -19.407869338989258, + "logps/rejected": -13.331077575683594, + "loss": 0.9355, + "rewards/accuracies": 0.75, + "rewards/chosen": -0.013614770025014877, + "rewards/margins": 0.37932369112968445, + "rewards/rejected": -0.3929384648799896, + "step": 420 + }, + { + "epoch": 0.9242590559824369, + "grad_norm": 5.084871292114258, + "learning_rate": 3.300154679392429e-06, + "logits/chosen": -2.372471809387207, + "logits/rejected": -2.372732162475586, + "logps/chosen": -18.58143424987793, + "logps/rejected": -10.904744148254395, + "loss": 0.8029, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.06652218848466873, + "rewards/margins": 0.5077332854270935, + "rewards/rejected": -0.4412110447883606, + "step": 421 + }, + { + "epoch": 0.9264544456641054, + "grad_norm": 5.599188327789307, + "learning_rate": 3.2909624670588915e-06, + "logits/chosen": -2.2680983543395996, + "logits/rejected": -2.403550386428833, + "logps/chosen": -22.577377319335938, + "logps/rejected": -16.19898223876953, + "loss": 0.9287, + "rewards/accuracies": 0.8125, + "rewards/chosen": -0.008984517306089401, + "rewards/margins": 0.7310069799423218, + "rewards/rejected": -0.7399914264678955, + "step": 422 + }, + { + "epoch": 0.9286498353457738, + "grad_norm": 6.535630226135254, + "learning_rate": 3.281758356421955e-06, + "logits/chosen": -2.2871334552764893, + "logits/rejected": -2.379993438720703, + "logps/chosen": -23.122037887573242, + "logps/rejected": -11.495004653930664, + "loss": 0.7905, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.10842779278755188, + "rewards/margins": 0.7593700289726257, + "rewards/rejected": -0.6509422063827515, + "step": 423 + }, + { + "epoch": 0.9308452250274424, + "grad_norm": 4.752484321594238, + "learning_rate": 3.272542485937369e-06, + "logits/chosen": -2.317404270172119, + "logits/rejected": -2.3489813804626465, + "logps/chosen": -19.68863296508789, + "logps/rejected": -11.732726097106934, + "loss": 0.8028, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.04726914316415787, + "rewards/margins": 0.7166731953620911, + "rewards/rejected": -0.6694040298461914, + "step": 424 + }, + { + "epoch": 0.9330406147091108, + "grad_norm": 5.882680416107178, + "learning_rate": 3.2633149942377835e-06, + "logits/chosen": -2.2992334365844727, + "logits/rejected": -2.4041848182678223, + "logps/chosen": -19.215017318725586, + "logps/rejected": -13.420120239257812, + "loss": 0.8508, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.1038583368062973, + "rewards/margins": 0.7461327910423279, + "rewards/rejected": -0.6422744393348694, + "step": 425 + }, + { + "epoch": 0.9352360043907794, + "grad_norm": 7.522842884063721, + "learning_rate": 3.2540760201306643e-06, + "logits/chosen": -2.331803798675537, + "logits/rejected": -2.3895986080169678, + "logps/chosen": -21.03508758544922, + "logps/rejected": -12.05073356628418, + "loss": 0.7629, + "rewards/accuracies": 0.84375, + "rewards/chosen": 0.06679324805736542, + "rewards/margins": 0.789514422416687, + "rewards/rejected": -0.7227212190628052, + "step": 426 + }, + { + "epoch": 0.9374313940724479, + "grad_norm": 11.598859786987305, + "learning_rate": 3.244825702596205e-06, + "logits/chosen": -2.3380470275878906, + "logits/rejected": -2.36613392829895, + "logps/chosen": -16.171180725097656, + "logps/rejected": -13.282466888427734, + "loss": 1.0535, + "rewards/accuracies": 0.65625, + "rewards/chosen": -0.14831915497779846, + "rewards/margins": 0.219946950674057, + "rewards/rejected": -0.36826610565185547, + "step": 427 + }, + { + "epoch": 0.9396267837541163, + "grad_norm": 5.60055685043335, + "learning_rate": 3.2355641807852377e-06, + "logits/chosen": -2.3118081092834473, + "logits/rejected": -2.328697681427002, + "logps/chosen": -21.737201690673828, + "logps/rejected": -8.444561958312988, + "loss": 0.6397, + "rewards/accuracies": 0.875, + "rewards/chosen": 0.16470521688461304, + "rewards/margins": 1.143484115600586, + "rewards/rejected": -0.9787789583206177, + "step": 428 + }, + { + "epoch": 0.9418221734357849, + "grad_norm": 7.712846279144287, + "learning_rate": 3.2262915940171373e-06, + "logits/chosen": -2.3408477306365967, + "logits/rejected": -2.258118152618408, + "logps/chosen": -18.901592254638672, + "logps/rejected": -9.10704231262207, + "loss": 0.7194, + "rewards/accuracies": 0.84375, + "rewards/chosen": 0.12541034817695618, + "rewards/margins": 0.7345283031463623, + "rewards/rejected": -0.6091179847717285, + "step": 429 + }, + { + "epoch": 0.9440175631174533, + "grad_norm": 5.265126705169678, + "learning_rate": 3.217008081777726e-06, + "logits/chosen": -2.3288192749023438, + "logits/rejected": -2.3424811363220215, + "logps/chosen": -17.280908584594727, + "logps/rejected": -12.148330688476562, + "loss": 0.8629, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.03913713991641998, + "rewards/margins": 0.5172592401504517, + "rewards/rejected": -0.47812211513519287, + "step": 430 + }, + { + "epoch": 0.9462129527991219, + "grad_norm": 6.47225284576416, + "learning_rate": 3.2077137837171764e-06, + "logits/chosen": -2.3356518745422363, + "logits/rejected": -2.3391013145446777, + "logps/chosen": -24.702030181884766, + "logps/rejected": -14.061824798583984, + "loss": 0.8684, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.02924323081970215, + "rewards/margins": 0.9203999042510986, + "rewards/rejected": -0.8911566734313965, + "step": 431 + }, + { + "epoch": 0.9484083424807903, + "grad_norm": 7.641210556030273, + "learning_rate": 3.1984088396479113e-06, + "logits/chosen": -2.343367338180542, + "logits/rejected": -2.301558494567871, + "logps/chosen": -18.52604103088379, + "logps/rejected": -10.527841567993164, + "loss": 0.8235, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.03001457452774048, + "rewards/margins": 0.6643778085708618, + "rewards/rejected": -0.6343631744384766, + "step": 432 + }, + { + "epoch": 0.9506037321624589, + "grad_norm": 5.307621955871582, + "learning_rate": 3.189093389542498e-06, + "logits/chosen": -2.230017900466919, + "logits/rejected": -2.2622296810150146, + "logps/chosen": -15.772552490234375, + "logps/rejected": -10.832294464111328, + "loss": 0.9047, + "rewards/accuracies": 0.65625, + "rewards/chosen": 0.02474249340593815, + "rewards/margins": 0.5498454570770264, + "rewards/rejected": -0.525102972984314, + "step": 433 + }, + { + "epoch": 0.9527991218441273, + "grad_norm": 8.881011962890625, + "learning_rate": 3.179767573531546e-06, + "logits/chosen": -2.3451647758483887, + "logits/rejected": -2.2926852703094482, + "logps/chosen": -16.5456600189209, + "logps/rejected": -11.103011131286621, + "loss": 0.8291, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.05186466500163078, + "rewards/margins": 0.5443666577339172, + "rewards/rejected": -0.49250200390815735, + "step": 434 + }, + { + "epoch": 0.9549945115257958, + "grad_norm": 5.801877498626709, + "learning_rate": 3.1704315319015936e-06, + "logits/chosen": -2.3372323513031006, + "logits/rejected": -2.3091893196105957, + "logps/chosen": -23.148170471191406, + "logps/rejected": -16.703672409057617, + "loss": 0.8901, + "rewards/accuracies": 0.78125, + "rewards/chosen": -0.027854733169078827, + "rewards/margins": 0.7050666809082031, + "rewards/rejected": -0.7329213619232178, + "step": 435 + }, + { + "epoch": 0.9571899012074643, + "grad_norm": 5.348484039306641, + "learning_rate": 3.1610854050930063e-06, + "logits/chosen": -2.368006944656372, + "logits/rejected": -2.418947458267212, + "logps/chosen": -18.732362747192383, + "logps/rejected": -9.964558601379395, + "loss": 0.7543, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.043328557163476944, + "rewards/margins": 0.7478787302970886, + "rewards/rejected": -0.7045502066612244, + "step": 436 + }, + { + "epoch": 0.9593852908891328, + "grad_norm": 9.07236099243164, + "learning_rate": 3.1517293336978538e-06, + "logits/chosen": -2.2696292400360107, + "logits/rejected": -2.3099236488342285, + "logps/chosen": -15.583741188049316, + "logps/rejected": -17.732797622680664, + "loss": 1.1379, + "rewards/accuracies": 0.71875, + "rewards/chosen": -0.1528353989124298, + "rewards/margins": 0.2265656292438507, + "rewards/rejected": -0.3794010579586029, + "step": 437 + }, + { + "epoch": 0.9615806805708014, + "grad_norm": 6.247621536254883, + "learning_rate": 3.1423634584578056e-06, + "logits/chosen": -2.392490863800049, + "logits/rejected": -2.3658711910247803, + "logps/chosen": -18.44986343383789, + "logps/rejected": -14.234761238098145, + "loss": 0.8623, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.04860144108533859, + "rewards/margins": 0.7451760768890381, + "rewards/rejected": -0.6965745687484741, + "step": 438 + }, + { + "epoch": 0.9637760702524698, + "grad_norm": 5.417273998260498, + "learning_rate": 3.132987920262005e-06, + "logits/chosen": -2.3367209434509277, + "logits/rejected": -2.4058802127838135, + "logps/chosen": -17.406082153320312, + "logps/rejected": -8.535592079162598, + "loss": 0.7611, + "rewards/accuracies": 0.6875, + "rewards/chosen": 0.14041852951049805, + "rewards/margins": 0.6607614755630493, + "rewards/rejected": -0.5203429460525513, + "step": 439 + }, + { + "epoch": 0.9659714599341384, + "grad_norm": 7.8660359382629395, + "learning_rate": 3.1236028601449534e-06, + "logits/chosen": -2.2924911975860596, + "logits/rejected": -2.309866189956665, + "logps/chosen": -25.160966873168945, + "logps/rejected": -11.313871383666992, + "loss": 0.764, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.008873052895069122, + "rewards/margins": 1.0792553424835205, + "rewards/rejected": -1.0703822374343872, + "step": 440 + }, + { + "epoch": 0.9681668496158068, + "grad_norm": 7.840826034545898, + "learning_rate": 3.114208419284391e-06, + "logits/chosen": -2.3387792110443115, + "logits/rejected": -2.3440520763397217, + "logps/chosen": -18.8978271484375, + "logps/rejected": -12.489431381225586, + "loss": 0.8975, + "rewards/accuracies": 0.75, + "rewards/chosen": -0.06741246581077576, + "rewards/margins": 0.6665923595428467, + "rewards/rejected": -0.7340048551559448, + "step": 441 + }, + { + "epoch": 0.9703622392974753, + "grad_norm": 6.152930736541748, + "learning_rate": 3.1048047389991693e-06, + "logits/chosen": -2.3817009925842285, + "logits/rejected": -2.357701301574707, + "logps/chosen": -16.76677894592285, + "logps/rejected": -11.620492935180664, + "loss": 0.8242, + "rewards/accuracies": 0.75, + "rewards/chosen": 0.0717247724533081, + "rewards/margins": 0.5190142393112183, + "rewards/rejected": -0.44728946685791016, + "step": 442 + }, + { + "epoch": 0.9725576289791438, + "grad_norm": 5.161471366882324, + "learning_rate": 3.0953919607471276e-06, + "logits/chosen": -2.2455554008483887, + "logits/rejected": -2.2900137901306152, + "logps/chosen": -18.165666580200195, + "logps/rejected": -11.611608505249023, + "loss": 0.8377, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.08798500150442123, + "rewards/margins": 0.5571097135543823, + "rewards/rejected": -0.4691247344017029, + "step": 443 + }, + { + "epoch": 0.9747530186608123, + "grad_norm": 4.925886154174805, + "learning_rate": 3.0859702261229616e-06, + "logits/chosen": -2.255220651626587, + "logits/rejected": -2.2815744876861572, + "logps/chosen": -17.18117332458496, + "logps/rejected": -8.485081672668457, + "loss": 0.7083, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.16568660736083984, + "rewards/margins": 0.7109737396240234, + "rewards/rejected": -0.5452871322631836, + "step": 444 + }, + { + "epoch": 0.9769484083424808, + "grad_norm": 8.389778137207031, + "learning_rate": 3.0765396768561005e-06, + "logits/chosen": -2.2690978050231934, + "logits/rejected": -2.2897820472717285, + "logps/chosen": -16.80807876586914, + "logps/rejected": -12.906442642211914, + "loss": 0.9586, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.05918867886066437, + "rewards/margins": 0.3266395628452301, + "rewards/rejected": -0.26745086908340454, + "step": 445 + }, + { + "epoch": 0.9791437980241493, + "grad_norm": 9.920949935913086, + "learning_rate": 3.0671004548085675e-06, + "logits/chosen": -2.3090577125549316, + "logits/rejected": -2.2865028381347656, + "logps/chosen": -20.217845916748047, + "logps/rejected": -16.117212295532227, + "loss": 0.8778, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.07724549621343613, + "rewards/margins": 0.706253170967102, + "rewards/rejected": -0.6290077567100525, + "step": 446 + }, + { + "epoch": 0.9813391877058177, + "grad_norm": 8.199292182922363, + "learning_rate": 3.0576527019728485e-06, + "logits/chosen": -2.2236199378967285, + "logits/rejected": -2.326417922973633, + "logps/chosen": -24.821739196777344, + "logps/rejected": -13.166486740112305, + "loss": 0.9256, + "rewards/accuracies": 0.71875, + "rewards/chosen": -0.04876386374235153, + "rewards/margins": 0.7376790046691895, + "rewards/rejected": -0.7864428758621216, + "step": 447 + }, + { + "epoch": 0.9835345773874863, + "grad_norm": 3.362391710281372, + "learning_rate": 3.0481965604697582e-06, + "logits/chosen": -2.3568668365478516, + "logits/rejected": -2.349731206893921, + "logps/chosen": -21.91687774658203, + "logps/rejected": -9.241042137145996, + "loss": 0.7078, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.14988428354263306, + "rewards/margins": 0.9810056686401367, + "rewards/rejected": -0.8311214447021484, + "step": 448 + }, + { + "epoch": 0.9857299670691547, + "grad_norm": 6.33512020111084, + "learning_rate": 3.0387321725463003e-06, + "logits/chosen": -2.307635545730591, + "logits/rejected": -2.2977371215820312, + "logps/chosen": -17.04680633544922, + "logps/rejected": -9.844083786010742, + "loss": 0.7663, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.12432314455509186, + "rewards/margins": 0.5705389976501465, + "rewards/rejected": -0.4462158679962158, + "step": 449 + }, + { + "epoch": 0.9879253567508233, + "grad_norm": 5.216354846954346, + "learning_rate": 3.0292596805735275e-06, + "logits/chosen": -2.2773430347442627, + "logits/rejected": -2.349567413330078, + "logps/chosen": -18.42083740234375, + "logps/rejected": -8.746360778808594, + "loss": 0.7209, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.09063999354839325, + "rewards/margins": 0.7439864873886108, + "rewards/rejected": -0.6533465385437012, + "step": 450 + }, + { + "epoch": 0.9901207464324918, + "grad_norm": 5.72788143157959, + "learning_rate": 3.019779227044398e-06, + "logits/chosen": -2.26011061668396, + "logits/rejected": -2.315460443496704, + "logps/chosen": -19.606307983398438, + "logps/rejected": -12.85045051574707, + "loss": 0.8855, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.05291711166501045, + "rewards/margins": 0.5900472402572632, + "rewards/rejected": -0.5371301770210266, + "step": 451 + }, + { + "epoch": 0.9923161361141603, + "grad_norm": 4.74468469619751, + "learning_rate": 3.0102909545716395e-06, + "logits/chosen": -2.370033025741577, + "logits/rejected": -2.3583390712738037, + "logps/chosen": -19.241247177124023, + "logps/rejected": -13.944446563720703, + "loss": 0.878, + "rewards/accuracies": 0.71875, + "rewards/chosen": 0.06830352544784546, + "rewards/margins": 0.6422482132911682, + "rewards/rejected": -0.5739446878433228, + "step": 452 + }, + { + "epoch": 0.9945115257958288, + "grad_norm": 4.331366539001465, + "learning_rate": 3.000795005885594e-06, + "logits/chosen": -2.29002046585083, + "logits/rejected": -2.368769884109497, + "logps/chosen": -19.165603637695312, + "logps/rejected": -14.322245597839355, + "loss": 0.846, + "rewards/accuracies": 0.8125, + "rewards/chosen": 0.0362529531121254, + "rewards/margins": 0.645837664604187, + "rewards/rejected": -0.6095846891403198, + "step": 453 + }, + { + "epoch": 0.9967069154774972, + "grad_norm": 6.133034706115723, + "learning_rate": 2.9912915238320755e-06, + "logits/chosen": -2.343209743499756, + "logits/rejected": -2.2830896377563477, + "logps/chosen": -20.217222213745117, + "logps/rejected": -15.0652437210083, + "loss": 0.9206, + "rewards/accuracies": 0.78125, + "rewards/chosen": 0.018072601407766342, + "rewards/margins": 0.5864449739456177, + "rewards/rejected": -0.568372368812561, + "step": 454 + }, + { + "epoch": 0.9989023051591658, + "grad_norm": 5.871572971343994, + "learning_rate": 2.9817806513702247e-06, + "logits/chosen": -2.2338972091674805, + "logits/rejected": -2.330247402191162, + "logps/chosen": -24.56080436706543, + "logps/rejected": -14.950778007507324, + "loss": 0.8771, + "rewards/accuracies": 0.875, + "rewards/chosen": 0.06272906064987183, + "rewards/margins": 0.8400812149047852, + "rewards/rejected": -0.7773522138595581, + "step": 455 + } + ], + "logging_steps": 1.0, + "max_steps": 910, + "num_input_tokens_seen": 0, + "num_train_epochs": 2, + "save_steps": 455, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": false + }, + "attributes": {} + } + }, + "total_flos": 3.894805998874919e+18, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +}