|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9221902017291066, |
|
"eval_steps": 500, |
|
"global_step": 10, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09221902017291066, |
|
"grad_norm": 0.3389199674129486, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": 0.14791223406791687, |
|
"logits/rejected": 0.28315749764442444, |
|
"logps/chosen": -134.61978149414062, |
|
"logps/ref_chosen": -134.61978149414062, |
|
"logps/ref_rejected": -173.30970764160156, |
|
"logps/rejected": -173.30970764160156, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1844380403458213, |
|
"grad_norm": 0.2829269468784332, |
|
"learning_rate": 4.849231551964771e-07, |
|
"logits/chosen": 0.14062762260437012, |
|
"logits/rejected": 0.2469622939825058, |
|
"logps/chosen": -130.12448120117188, |
|
"logps/ref_chosen": -130.12448120117188, |
|
"logps/ref_rejected": -163.5547637939453, |
|
"logps/rejected": -163.5547637939453, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.276657060518732, |
|
"grad_norm": 0.32888761162757874, |
|
"learning_rate": 4.415111107797445e-07, |
|
"logits/chosen": 0.06239762529730797, |
|
"logits/rejected": 0.24671903252601624, |
|
"logps/chosen": -129.64842224121094, |
|
"logps/ref_chosen": -129.6482391357422, |
|
"logps/ref_rejected": -167.99598693847656, |
|
"logps/rejected": -167.98696899414062, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.4921875, |
|
"rewards/chosen": -1.7636848497204483e-06, |
|
"rewards/margins": -9.190676792059094e-05, |
|
"rewards/rejected": 9.01430903468281e-05, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.3688760806916426, |
|
"grad_norm": 0.39236217737197876, |
|
"learning_rate": 3.75e-07, |
|
"logits/chosen": 0.11659017950296402, |
|
"logits/rejected": 0.26041609048843384, |
|
"logps/chosen": -132.6394500732422, |
|
"logps/ref_chosen": -132.77403259277344, |
|
"logps/ref_rejected": -168.05410766601562, |
|
"logps/rejected": -167.96951293945312, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.5546875, |
|
"rewards/chosen": 0.0013458358589559793, |
|
"rewards/margins": 0.0004997455980628729, |
|
"rewards/rejected": 0.0008460902608931065, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.4610951008645533, |
|
"grad_norm": 0.41242164373397827, |
|
"learning_rate": 2.934120444167326e-07, |
|
"logits/chosen": 0.13683654367923737, |
|
"logits/rejected": 0.28962600231170654, |
|
"logps/chosen": -135.99209594726562, |
|
"logps/ref_chosen": -135.82839965820312, |
|
"logps/ref_rejected": -161.7675323486328, |
|
"logps/rejected": -162.00277709960938, |
|
"loss": 0.6928, |
|
"rewards/accuracies": 0.5546875, |
|
"rewards/chosen": -0.0016370582161471248, |
|
"rewards/margins": 0.0007155549246817827, |
|
"rewards/rejected": -0.0023526130244135857, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.553314121037464, |
|
"grad_norm": 0.37831488251686096, |
|
"learning_rate": 2.065879555832674e-07, |
|
"logits/chosen": 0.09734513610601425, |
|
"logits/rejected": 0.20619672536849976, |
|
"logps/chosen": -137.687744140625, |
|
"logps/ref_chosen": -137.62083435058594, |
|
"logps/ref_rejected": -165.1855926513672, |
|
"logps/rejected": -165.27906799316406, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.5390625, |
|
"rewards/chosen": -0.0006691145244985819, |
|
"rewards/margins": 0.0002655756543390453, |
|
"rewards/rejected": -0.0009346901206299663, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.6455331412103746, |
|
"grad_norm": 0.44487157464027405, |
|
"learning_rate": 1.2500000000000005e-07, |
|
"logits/chosen": 0.1292731761932373, |
|
"logits/rejected": 0.26378369331359863, |
|
"logps/chosen": -140.36293029785156, |
|
"logps/ref_chosen": -140.14544677734375, |
|
"logps/ref_rejected": -173.2746124267578, |
|
"logps/rejected": -173.66172790527344, |
|
"loss": 0.6923, |
|
"rewards/accuracies": 0.5859375, |
|
"rewards/chosen": -0.0021748137660324574, |
|
"rewards/margins": 0.0016963660018518567, |
|
"rewards/rejected": -0.0038711796514689922, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.7377521613832853, |
|
"grad_norm": 0.3908422887325287, |
|
"learning_rate": 5.848888922025552e-08, |
|
"logits/chosen": 0.08686134964227676, |
|
"logits/rejected": 0.25962409377098083, |
|
"logps/chosen": -132.61734008789062, |
|
"logps/ref_chosen": -132.29315185546875, |
|
"logps/ref_rejected": -175.901611328125, |
|
"logps/rejected": -176.36581420898438, |
|
"loss": 0.6925, |
|
"rewards/accuracies": 0.6015625, |
|
"rewards/chosen": -0.0032418149057775736, |
|
"rewards/margins": 0.0014001112431287766, |
|
"rewards/rejected": -0.0046419259160757065, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.829971181556196, |
|
"grad_norm": 0.28832051157951355, |
|
"learning_rate": 1.507684480352292e-08, |
|
"logits/chosen": 0.11615337431430817, |
|
"logits/rejected": 0.2154950648546219, |
|
"logps/chosen": -132.73593139648438, |
|
"logps/ref_chosen": -132.26475524902344, |
|
"logps/ref_rejected": -158.7421112060547, |
|
"logps/rejected": -159.4190673828125, |
|
"loss": 0.6921, |
|
"rewards/accuracies": 0.6171875, |
|
"rewards/chosen": -0.004711654037237167, |
|
"rewards/margins": 0.0020579977426677942, |
|
"rewards/rejected": -0.006769651547074318, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.9221902017291066, |
|
"grad_norm": 0.3114199936389923, |
|
"learning_rate": 0.0, |
|
"logits/chosen": 0.13599035143852234, |
|
"logits/rejected": 0.26238346099853516, |
|
"logps/chosen": -138.9654541015625, |
|
"logps/ref_chosen": -138.58920288085938, |
|
"logps/ref_rejected": -173.31874084472656, |
|
"logps/rejected": -173.79449462890625, |
|
"loss": 0.6927, |
|
"rewards/accuracies": 0.5703125, |
|
"rewards/chosen": -0.0037624253891408443, |
|
"rewards/margins": 0.0009951507672667503, |
|
"rewards/rejected": -0.004757576156407595, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.9221902017291066, |
|
"step": 10, |
|
"total_flos": 0.0, |
|
"train_loss": 0.692774099111557, |
|
"train_runtime": 614.9448, |
|
"train_samples_per_second": 2.255, |
|
"train_steps_per_second": 0.016 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 10, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|