| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 100, | |
| "global_step": 285, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "grad_norm": 2.671875, | |
| "learning_rate": 1.7241379310344828e-07, | |
| "logits/chosen": -2.715578079223633, | |
| "logits/rejected": -3.198270320892334, | |
| "logps/chosen": -181.673828125, | |
| "logps/rejected": -250.83145141601562, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 2.75, | |
| "learning_rate": 1.724137931034483e-06, | |
| "logits/chosen": -2.6499929428100586, | |
| "logits/rejected": -3.116462230682373, | |
| "logps/chosen": -179.62359619140625, | |
| "logps/rejected": -252.53973388671875, | |
| "loss": 0.6925, | |
| "rewards/accuracies": 0.6111111044883728, | |
| "rewards/chosen": 0.0010814073029905558, | |
| "rewards/margins": 0.0013816155260428786, | |
| "rewards/rejected": -0.00030020822305232286, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 2.5625, | |
| "learning_rate": 3.448275862068966e-06, | |
| "logits/chosen": -2.6711201667785645, | |
| "logits/rejected": -3.1086363792419434, | |
| "logps/chosen": -176.72164916992188, | |
| "logps/rejected": -252.67080688476562, | |
| "loss": 0.6743, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.023805765435099602, | |
| "rewards/margins": 0.03818682208657265, | |
| "rewards/rejected": -0.014381052926182747, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 1.78125, | |
| "learning_rate": 4.999811754597862e-06, | |
| "logits/chosen": -2.642437696456909, | |
| "logits/rejected": -3.1196322441101074, | |
| "logps/chosen": -167.26516723632812, | |
| "logps/rejected": -260.6128845214844, | |
| "loss": 0.6003, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.1062479019165039, | |
| "rewards/margins": 0.19668832421302795, | |
| "rewards/rejected": -0.09044040739536285, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 1.4921875, | |
| "learning_rate": 4.97725658856945e-06, | |
| "logits/chosen": -2.6548495292663574, | |
| "logits/rejected": -3.105985403060913, | |
| "logps/chosen": -157.27615356445312, | |
| "logps/rejected": -282.587646484375, | |
| "loss": 0.4653, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.22060635685920715, | |
| "rewards/margins": 0.5277924537658691, | |
| "rewards/rejected": -0.30718618631362915, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 1.15625, | |
| "learning_rate": 4.917441177612131e-06, | |
| "logits/chosen": -2.6114723682403564, | |
| "logits/rejected": -3.054443120956421, | |
| "logps/chosen": -146.25564575195312, | |
| "logps/rejected": -317.78265380859375, | |
| "loss": 0.3246, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.314788281917572, | |
| "rewards/margins": 0.967236340045929, | |
| "rewards/rejected": -0.6524480581283569, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 0.97265625, | |
| "learning_rate": 4.82126520118304e-06, | |
| "logits/chosen": -2.6283130645751953, | |
| "logits/rejected": -3.036774158477783, | |
| "logps/chosen": -138.53135681152344, | |
| "logps/rejected": -358.7559509277344, | |
| "loss": 0.2051, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.41738367080688477, | |
| "rewards/margins": 1.4921326637268066, | |
| "rewards/rejected": -1.0747489929199219, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.69921875, | |
| "learning_rate": 4.6901752354885166e-06, | |
| "logits/chosen": -2.6115269660949707, | |
| "logits/rejected": -3.0476138591766357, | |
| "logps/chosen": -129.70721435546875, | |
| "logps/rejected": -413.200927734375, | |
| "loss": 0.1157, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.5094510316848755, | |
| "rewards/margins": 2.1198441982269287, | |
| "rewards/rejected": -1.6103931665420532, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.439453125, | |
| "learning_rate": 4.526142995631488e-06, | |
| "logits/chosen": -2.642693281173706, | |
| "logits/rejected": -3.0582263469696045, | |
| "logps/chosen": -123.13417053222656, | |
| "logps/rejected": -475.8509216308594, | |
| "loss": 0.0593, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.572167694568634, | |
| "rewards/margins": 2.8161401748657227, | |
| "rewards/rejected": -2.2439725399017334, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.306640625, | |
| "learning_rate": 4.331635679181032e-06, | |
| "logits/chosen": -2.616441249847412, | |
| "logits/rejected": -3.0742900371551514, | |
| "logps/chosen": -119.6812744140625, | |
| "logps/rejected": -535.9256591796875, | |
| "loss": 0.0314, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.61542809009552, | |
| "rewards/margins": 3.4634997844696045, | |
| "rewards/rejected": -2.848071813583374, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.224609375, | |
| "learning_rate": 4.109578857224478e-06, | |
| "logits/chosen": -2.6391441822052, | |
| "logits/rejected": -3.0866036415100098, | |
| "logps/chosen": -115.92496490478516, | |
| "logps/rejected": -598.8836059570312, | |
| "loss": 0.0166, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6349981427192688, | |
| "rewards/margins": 4.107515811920166, | |
| "rewards/rejected": -3.472517728805542, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "eval_logits/chosen": -2.680793523788452, | |
| "eval_logits/rejected": -3.105761766433716, | |
| "eval_logps/chosen": -96.58148956298828, | |
| "eval_logps/rejected": -579.1800537109375, | |
| "eval_loss": 0.01978849433362484, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": 0.4884725511074066, | |
| "eval_rewards/margins": 3.9327571392059326, | |
| "eval_rewards/rejected": -3.444284439086914, | |
| "eval_runtime": 1.2795, | |
| "eval_samples_per_second": 3.908, | |
| "eval_steps_per_second": 2.345, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 0.1416015625, | |
| "learning_rate": 3.863312471055116e-06, | |
| "logits/chosen": -2.5967559814453125, | |
| "logits/rejected": -3.022758722305298, | |
| "logps/chosen": -115.83799743652344, | |
| "logps/rejected": -666.7772827148438, | |
| "loss": 0.0084, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6381937265396118, | |
| "rewards/margins": 4.796027183532715, | |
| "rewards/rejected": -4.157833099365234, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 0.10400390625, | |
| "learning_rate": 3.5965405963463197e-06, | |
| "logits/chosen": -2.6488442420959473, | |
| "logits/rejected": -3.06384015083313, | |
| "logps/chosen": -113.5273208618164, | |
| "logps/rejected": -722.9757080078125, | |
| "loss": 0.0047, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6482743620872498, | |
| "rewards/margins": 5.373518943786621, | |
| "rewards/rejected": -4.725245475769043, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 0.07958984375, | |
| "learning_rate": 3.313275730405658e-06, | |
| "logits/chosen": -2.6456308364868164, | |
| "logits/rejected": -3.099287509918213, | |
| "logps/chosen": -114.421630859375, | |
| "logps/rejected": -765.133544921875, | |
| "loss": 0.0031, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6431270837783813, | |
| "rewards/margins": 5.790853977203369, | |
| "rewards/rejected": -5.147727012634277, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "grad_norm": 0.056396484375, | |
| "learning_rate": 3.0177784404805466e-06, | |
| "logits/chosen": -2.6465344429016113, | |
| "logits/rejected": -3.0662121772766113, | |
| "logps/chosen": -115.8419418334961, | |
| "logps/rejected": -792.1008911132812, | |
| "loss": 0.0024, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.642734169960022, | |
| "rewards/margins": 6.044482231140137, | |
| "rewards/rejected": -5.401747703552246, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "grad_norm": 0.06591796875, | |
| "learning_rate": 2.7144932808611002e-06, | |
| "logits/chosen": -2.597626209259033, | |
| "logits/rejected": -3.0344064235687256, | |
| "logps/chosen": -115.4371337890625, | |
| "logps/rejected": -800.051025390625, | |
| "loss": 0.0022, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6369476914405823, | |
| "rewards/margins": 6.132936000823975, | |
| "rewards/rejected": -5.495988368988037, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 0.057373046875, | |
| "learning_rate": 2.407981942646603e-06, | |
| "logits/chosen": -2.659702777862549, | |
| "logits/rejected": -3.115243434906006, | |
| "logps/chosen": -115.50006103515625, | |
| "logps/rejected": -816.8475341796875, | |
| "loss": 0.0019, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6319659948348999, | |
| "rewards/margins": 6.285677909851074, | |
| "rewards/rejected": -5.653711795806885, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.05029296875, | |
| "learning_rate": 2.102854641665347e-06, | |
| "logits/chosen": -2.6373775005340576, | |
| "logits/rejected": -3.08939790725708, | |
| "logps/chosen": -115.76806640625, | |
| "logps/rejected": -823.41650390625, | |
| "loss": 0.0018, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6388428807258606, | |
| "rewards/margins": 6.353511810302734, | |
| "rewards/rejected": -5.7146687507629395, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 0.052734375, | |
| "learning_rate": 1.8037007765373677e-06, | |
| "logits/chosen": -2.663148880004883, | |
| "logits/rejected": -3.0792253017425537, | |
| "logps/chosen": -116.19815826416016, | |
| "logps/rejected": -830.9599609375, | |
| "loss": 0.0016, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6323127150535583, | |
| "rewards/margins": 6.430493354797363, | |
| "rewards/rejected": -5.798180103302002, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "grad_norm": 0.042724609375, | |
| "learning_rate": 1.5150198998473802e-06, | |
| "logits/chosen": -2.6086456775665283, | |
| "logits/rejected": -3.0565147399902344, | |
| "logps/chosen": -116.57249450683594, | |
| "logps/rejected": -834.7310791015625, | |
| "loss": 0.0016, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6343868970870972, | |
| "rewards/margins": 6.452385902404785, | |
| "rewards/rejected": -5.817999362945557, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 0.04443359375, | |
| "learning_rate": 1.2411540406857064e-06, | |
| "logits/chosen": -2.620572566986084, | |
| "logits/rejected": -3.073166608810425, | |
| "logps/chosen": -114.90836334228516, | |
| "logps/rejected": -838.9874877929688, | |
| "loss": 0.0015, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6308305263519287, | |
| "rewards/margins": 6.5031633377075195, | |
| "rewards/rejected": -5.872332572937012, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "eval_logits/chosen": -2.6815271377563477, | |
| "eval_logits/rejected": -3.1155121326446533, | |
| "eval_logps/chosen": -96.9991683959961, | |
| "eval_logps/rejected": -740.9360961914062, | |
| "eval_loss": 0.005451836623251438, | |
| "eval_rewards/accuracies": 1.0, | |
| "eval_rewards/chosen": 0.4842957556247711, | |
| "eval_rewards/margins": 5.546140670776367, | |
| "eval_rewards/rejected": -5.061844825744629, | |
| "eval_runtime": 1.2858, | |
| "eval_samples_per_second": 3.889, | |
| "eval_steps_per_second": 2.333, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "grad_norm": 0.044921875, | |
| "learning_rate": 9.862223964891864e-07, | |
| "logits/chosen": -2.659987449645996, | |
| "logits/rejected": -3.083799362182617, | |
| "logps/chosen": -114.6219482421875, | |
| "logps/rejected": -828.9215087890625, | |
| "loss": 0.002, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6267526149749756, | |
| "rewards/margins": 6.408988952636719, | |
| "rewards/rejected": -5.782236576080322, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "grad_norm": 0.041748046875, | |
| "learning_rate": 7.54059376477568e-07, | |
| "logits/chosen": -2.6489715576171875, | |
| "logits/rejected": -3.0990288257598877, | |
| "logps/chosen": -116.3112564086914, | |
| "logps/rejected": -840.1389770507812, | |
| "loss": 0.0015, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6445231437683105, | |
| "rewards/margins": 6.526413917541504, | |
| "rewards/rejected": -5.881890773773193, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "grad_norm": 0.03955078125, | |
| "learning_rate": 5.481569285697638e-07, | |
| "logits/chosen": -2.6626932621002197, | |
| "logits/rejected": -3.06781005859375, | |
| "logps/chosen": -115.39896392822266, | |
| "logps/rejected": -836.1140747070312, | |
| "loss": 0.0016, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6213386058807373, | |
| "rewards/margins": 6.4759931564331055, | |
| "rewards/rejected": -5.854653835296631, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 0.038818359375, | |
| "learning_rate": 3.71612017236837e-07, | |
| "logits/chosen": -2.6279659271240234, | |
| "logits/rejected": -3.0500528812408447, | |
| "logps/chosen": -116.8084945678711, | |
| "logps/rejected": -845.3555908203125, | |
| "loss": 0.0014, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6305276155471802, | |
| "rewards/margins": 6.566590309143066, | |
| "rewards/rejected": -5.936062335968018, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 0.04443359375, | |
| "learning_rate": 2.2708004227369407e-07, | |
| "logits/chosen": -2.6384856700897217, | |
| "logits/rejected": -3.0602006912231445, | |
| "logps/chosen": -115.619140625, | |
| "logps/rejected": -842.5126953125, | |
| "loss": 0.0015, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6297703981399536, | |
| "rewards/margins": 6.527515411376953, | |
| "rewards/rejected": -5.897744655609131, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "grad_norm": 0.041015625, | |
| "learning_rate": 1.1673489911451536e-07, | |
| "logits/chosen": -2.635697603225708, | |
| "logits/rejected": -3.0929272174835205, | |
| "logps/chosen": -116.23138427734375, | |
| "logps/rejected": -843.4679565429688, | |
| "loss": 0.0015, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6368875503540039, | |
| "rewards/margins": 6.544775485992432, | |
| "rewards/rejected": -5.907887935638428, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 0.037353515625, | |
| "learning_rate": 4.223628142195929e-08, | |
| "logits/chosen": -2.6346564292907715, | |
| "logits/rejected": -3.0773911476135254, | |
| "logps/chosen": -115.94686126708984, | |
| "logps/rejected": -848.7951049804688, | |
| "loss": 0.0014, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6426523923873901, | |
| "rewards/margins": 6.600114345550537, | |
| "rewards/rejected": -5.957463264465332, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "grad_norm": 0.044677734375, | |
| "learning_rate": 4.704717749627052e-09, | |
| "logits/chosen": -2.6303486824035645, | |
| "logits/rejected": -3.084428310394287, | |
| "logps/chosen": -115.82963562011719, | |
| "logps/rejected": -837.4007568359375, | |
| "loss": 0.0015, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": 0.6384350061416626, | |
| "rewards/margins": 6.50278377532959, | |
| "rewards/rejected": -5.864348888397217, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 285, | |
| "total_flos": 0.0, | |
| "train_loss": 0.11324987118704277, | |
| "train_runtime": 748.1588, | |
| "train_samples_per_second": 1.522, | |
| "train_steps_per_second": 0.381 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 285, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 200, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |