|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.15808974886784685, |
|
"eval_steps": 500, |
|
"global_step": 12, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.013174145738987238, |
|
"grad_norm": 0.8317140340805054, |
|
"learning_rate": 6.25e-08, |
|
"logits/chosen": 9.990612030029297, |
|
"logits/rejected": 10.698101997375488, |
|
"logps/chosen": -102.88545989990234, |
|
"logps/ref_chosen": -102.88545989990234, |
|
"logps/ref_rejected": -121.84871673583984, |
|
"logps/rejected": -121.84871673583984, |
|
"loss": 0.4978, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"sft_loss": 0.36753880977630615, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.026348291477974475, |
|
"grad_norm": 0.2926611006259918, |
|
"learning_rate": 1.25e-07, |
|
"logits/chosen": 10.211905479431152, |
|
"logits/rejected": 11.06594467163086, |
|
"logps/chosen": -107.70349884033203, |
|
"logps/ref_chosen": -107.70349884033203, |
|
"logps/ref_rejected": -121.89966583251953, |
|
"logps/rejected": -121.89966583251953, |
|
"loss": 0.5233, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"sft_loss": 0.41013145446777344, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03952243721696171, |
|
"grad_norm": 0.3214672803878784, |
|
"learning_rate": 1.875e-07, |
|
"logits/chosen": 10.036933898925781, |
|
"logits/rejected": 11.024795532226562, |
|
"logps/chosen": -108.28660583496094, |
|
"logps/ref_chosen": -107.98188781738281, |
|
"logps/ref_rejected": -124.51527404785156, |
|
"logps/rejected": -124.7075424194336, |
|
"loss": 0.5249, |
|
"rewards/accuracies": 0.4921875, |
|
"rewards/chosen": -0.0030471612699329853, |
|
"rewards/margins": -0.001124453847296536, |
|
"rewards/rejected": -0.0019227075390517712, |
|
"sft_loss": 0.4123927652835846, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05269658295594895, |
|
"grad_norm": 0.7239967584609985, |
|
"learning_rate": 2.5e-07, |
|
"logits/chosen": 9.835367202758789, |
|
"logits/rejected": 10.854362487792969, |
|
"logps/chosen": -109.89208221435547, |
|
"logps/ref_chosen": -109.20836639404297, |
|
"logps/ref_rejected": -119.23908996582031, |
|
"logps/rejected": -119.69357299804688, |
|
"loss": 0.5208, |
|
"rewards/accuracies": 0.453125, |
|
"rewards/chosen": -0.006837163120508194, |
|
"rewards/margins": -0.0022922407370060682, |
|
"rewards/rejected": -0.004544922150671482, |
|
"sft_loss": 0.4051341712474823, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06587072869493618, |
|
"grad_norm": 0.9211171269416809, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": 10.207865715026855, |
|
"logits/rejected": 10.963621139526367, |
|
"logps/chosen": -104.02088928222656, |
|
"logps/ref_chosen": -103.87680053710938, |
|
"logps/ref_rejected": -118.41618347167969, |
|
"logps/rejected": -118.34123992919922, |
|
"loss": 0.5001, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": -0.0014409449649974704, |
|
"rewards/margins": -0.002190487692132592, |
|
"rewards/rejected": 0.0007495426689274609, |
|
"sft_loss": 0.3706103563308716, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07904487443392343, |
|
"grad_norm": 0.7005686163902283, |
|
"learning_rate": 3.75e-07, |
|
"logits/chosen": 10.686239242553711, |
|
"logits/rejected": 11.462547302246094, |
|
"logps/chosen": -108.1670913696289, |
|
"logps/ref_chosen": -107.58968353271484, |
|
"logps/ref_rejected": -122.07303619384766, |
|
"logps/rejected": -122.58065032958984, |
|
"loss": 0.5134, |
|
"rewards/accuracies": 0.484375, |
|
"rewards/chosen": -0.005774094723165035, |
|
"rewards/margins": -0.0006979470490477979, |
|
"rewards/rejected": -0.005076148081570864, |
|
"sft_loss": 0.39328432083129883, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09221902017291066, |
|
"grad_norm": 0.8737779259681702, |
|
"learning_rate": 4.375e-07, |
|
"logits/chosen": 10.040006637573242, |
|
"logits/rejected": 10.747206687927246, |
|
"logps/chosen": -107.037353515625, |
|
"logps/ref_chosen": -107.42727661132812, |
|
"logps/ref_rejected": -116.87063598632812, |
|
"logps/rejected": -116.42378997802734, |
|
"loss": 0.5022, |
|
"rewards/accuracies": 0.4765625, |
|
"rewards/chosen": 0.0038992553018033504, |
|
"rewards/margins": -0.0005691752885468304, |
|
"rewards/rejected": 0.004468431230634451, |
|
"sft_loss": 0.37478095293045044, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.1053931659118979, |
|
"grad_norm": 0.46314188838005066, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": 10.196634292602539, |
|
"logits/rejected": 11.089151382446289, |
|
"logps/chosen": -104.9737548828125, |
|
"logps/ref_chosen": -105.60282135009766, |
|
"logps/ref_rejected": -119.53916931152344, |
|
"logps/rejected": -118.9640884399414, |
|
"loss": 0.5047, |
|
"rewards/accuracies": 0.4921875, |
|
"rewards/chosen": 0.006290654186159372, |
|
"rewards/margins": 0.0005399012006819248, |
|
"rewards/rejected": 0.0057507529854774475, |
|
"sft_loss": 0.37916287779808044, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.11856731165088513, |
|
"grad_norm": 0.6739105582237244, |
|
"learning_rate": 4.997252228714278e-07, |
|
"logits/chosen": 10.174893379211426, |
|
"logits/rejected": 11.141225814819336, |
|
"logps/chosen": -104.830078125, |
|
"logps/ref_chosen": -105.46086120605469, |
|
"logps/ref_rejected": -119.00373840332031, |
|
"logps/rejected": -118.41084289550781, |
|
"loss": 0.5073, |
|
"rewards/accuracies": 0.546875, |
|
"rewards/chosen": 0.006307927425950766, |
|
"rewards/margins": 0.00037892413092777133, |
|
"rewards/rejected": 0.005929003469645977, |
|
"sft_loss": 0.3834277391433716, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.13174145738987236, |
|
"grad_norm": 0.32560959458351135, |
|
"learning_rate": 4.989014955054745e-07, |
|
"logits/chosen": 10.061932563781738, |
|
"logits/rejected": 10.884778022766113, |
|
"logps/chosen": -102.10572052001953, |
|
"logps/ref_chosen": -104.21009826660156, |
|
"logps/ref_rejected": -118.9209213256836, |
|
"logps/rejected": -117.1087646484375, |
|
"loss": 0.4814, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.02104363776743412, |
|
"rewards/margins": 0.00292214541696012, |
|
"rewards/rejected": 0.018121493980288506, |
|
"sft_loss": 0.34126415848731995, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14491560312885962, |
|
"grad_norm": 0.8538271188735962, |
|
"learning_rate": 4.975306286336627e-07, |
|
"logits/chosen": 9.989534378051758, |
|
"logits/rejected": 11.177312850952148, |
|
"logps/chosen": -102.71998596191406, |
|
"logps/ref_chosen": -105.94319152832031, |
|
"logps/ref_rejected": -122.76007843017578, |
|
"logps/rejected": -119.8812026977539, |
|
"loss": 0.5112, |
|
"rewards/accuracies": 0.5078125, |
|
"rewards/chosen": 0.03223201259970665, |
|
"rewards/margins": 0.0034431489184498787, |
|
"rewards/rejected": 0.028788862749934196, |
|
"sft_loss": 0.3909299075603485, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.15808974886784685, |
|
"grad_norm": 0.599084734916687, |
|
"learning_rate": 4.956156357188939e-07, |
|
"logits/chosen": 9.907474517822266, |
|
"logits/rejected": 10.599059104919434, |
|
"logps/chosen": -105.25791931152344, |
|
"logps/ref_chosen": -109.08442687988281, |
|
"logps/ref_rejected": -121.41947174072266, |
|
"logps/rejected": -117.7214584350586, |
|
"loss": 0.4934, |
|
"rewards/accuracies": 0.5546875, |
|
"rewards/chosen": 0.03826504573225975, |
|
"rewards/margins": 0.00128496577963233, |
|
"rewards/rejected": 0.036980077624320984, |
|
"sft_loss": 0.3606122136116028, |
|
"step": 12 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 12, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|