PhoenixB's picture
Training in progress, step 30, checkpoint
face9a7 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.2742857142857143,
"eval_steps": 500,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009142857142857144,
"grad_norm": 1.5446112155914307,
"learning_rate": 0.0,
"logits/chosen": -2.09375,
"logits/rejected": -1.9130859375,
"logps/chosen": -171.0,
"logps/rejected": -164.25,
"loss": 0.6914,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.018285714285714287,
"grad_norm": 1.517324447631836,
"learning_rate": 6e-06,
"logits/chosen": -2.052734375,
"logits/rejected": -1.8740234375,
"logps/chosen": -161.875,
"logps/rejected": -162.0,
"loss": 0.6914,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 2
},
{
"epoch": 0.027428571428571427,
"grad_norm": 1.5487558841705322,
"learning_rate": 1.2e-05,
"logits/chosen": -2.14453125,
"logits/rejected": -1.91796875,
"logps/chosen": -168.125,
"logps/rejected": -163.875,
"loss": 0.6941,
"rewards/accuracies": 0.1953125,
"rewards/chosen": -0.001964569091796875,
"rewards/margins": -0.0023555755615234375,
"rewards/rejected": 0.00038909912109375,
"step": 3
},
{
"epoch": 0.036571428571428574,
"grad_norm": 1.6327964067459106,
"learning_rate": 1.8e-05,
"logits/chosen": -2.080078125,
"logits/rejected": -1.8662109375,
"logps/chosen": -165.875,
"logps/rejected": -164.25,
"loss": 0.6873,
"rewards/accuracies": 0.3125,
"rewards/chosen": 0.0077304840087890625,
"rewards/margins": 0.011632919311523438,
"rewards/rejected": -0.0039081573486328125,
"step": 4
},
{
"epoch": 0.045714285714285714,
"grad_norm": 1.6707208156585693,
"learning_rate": 2.4e-05,
"logits/chosen": -2.134765625,
"logits/rejected": -1.8623046875,
"logps/chosen": -167.0,
"logps/rejected": -164.5,
"loss": 0.6895,
"rewards/accuracies": 0.296875,
"rewards/chosen": 0.0017681121826171875,
"rewards/margins": 0.00839996337890625,
"rewards/rejected": -0.00664520263671875,
"step": 5
},
{
"epoch": 0.054857142857142854,
"grad_norm": 1.641469120979309,
"learning_rate": 3e-05,
"logits/chosen": -2.13671875,
"logits/rejected": -1.892578125,
"logps/chosen": -173.5,
"logps/rejected": -174.25,
"loss": 0.6798,
"rewards/accuracies": 0.390625,
"rewards/chosen": -0.0023403167724609375,
"rewards/margins": 0.026607513427734375,
"rewards/rejected": -0.028924942016601562,
"step": 6
},
{
"epoch": 0.064,
"grad_norm": 1.6482449769973755,
"learning_rate": 2.988172051971717e-05,
"logits/chosen": -2.125,
"logits/rejected": -1.9150390625,
"logps/chosen": -171.0,
"logps/rejected": -170.375,
"loss": 0.6661,
"rewards/accuracies": 0.5546875,
"rewards/chosen": 0.007883071899414062,
"rewards/margins": 0.055572509765625,
"rewards/rejected": -0.0476837158203125,
"step": 7
},
{
"epoch": 0.07314285714285715,
"grad_norm": 1.6890122890472412,
"learning_rate": 2.9528747416929467e-05,
"logits/chosen": -2.1015625,
"logits/rejected": -1.8837890625,
"logps/chosen": -170.25,
"logps/rejected": -167.125,
"loss": 0.6536,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.0128936767578125,
"rewards/margins": 0.082977294921875,
"rewards/rejected": -0.070037841796875,
"step": 8
},
{
"epoch": 0.08228571428571428,
"grad_norm": 1.704086422920227,
"learning_rate": 2.894664728832377e-05,
"logits/chosen": -2.1376953125,
"logits/rejected": -1.93359375,
"logps/chosen": -178.75,
"logps/rejected": -171.0,
"loss": 0.6328,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.0140838623046875,
"rewards/margins": 0.12908935546875,
"rewards/rejected": -0.11492919921875,
"step": 9
},
{
"epoch": 0.09142857142857143,
"grad_norm": 1.6337130069732666,
"learning_rate": 2.8144600200657953e-05,
"logits/chosen": -2.01953125,
"logits/rejected": -1.8466796875,
"logps/chosen": -164.5,
"logps/rejected": -163.125,
"loss": 0.6089,
"rewards/accuracies": 0.8359375,
"rewards/chosen": 0.017217636108398438,
"rewards/margins": 0.18310546875,
"rewards/rejected": -0.1658935546875,
"step": 10
},
{
"epoch": 0.10057142857142858,
"grad_norm": 1.685200810432434,
"learning_rate": 2.7135254915624213e-05,
"logits/chosen": -2.123046875,
"logits/rejected": -1.9365234375,
"logps/chosen": -177.125,
"logps/rejected": -166.625,
"loss": 0.5797,
"rewards/accuracies": 0.8984375,
"rewards/chosen": 0.03128814697265625,
"rewards/margins": 0.249267578125,
"rewards/rejected": -0.21826171875,
"step": 11
},
{
"epoch": 0.10971428571428571,
"grad_norm": 1.6057872772216797,
"learning_rate": 2.5934529411321174e-05,
"logits/chosen": -2.12109375,
"logits/rejected": -1.900390625,
"logps/chosen": -165.5,
"logps/rejected": -172.125,
"loss": 0.5458,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.0287322998046875,
"rewards/margins": 0.33935546875,
"rewards/rejected": -0.310546875,
"step": 12
},
{
"epoch": 0.11885714285714286,
"grad_norm": 1.7491884231567383,
"learning_rate": 2.4561359846230346e-05,
"logits/chosen": -2.1162109375,
"logits/rejected": -1.9013671875,
"logps/chosen": -163.0,
"logps/rejected": -171.625,
"loss": 0.4861,
"rewards/accuracies": 0.9765625,
"rewards/chosen": 0.046966552734375,
"rewards/margins": 0.488525390625,
"rewards/rejected": -0.441162109375,
"step": 13
},
{
"epoch": 0.128,
"grad_norm": 1.6292978525161743,
"learning_rate": 2.303740192468495e-05,
"logits/chosen": -2.119140625,
"logits/rejected": -1.833984375,
"logps/chosen": -172.125,
"logps/rejected": -176.5,
"loss": 0.4913,
"rewards/accuracies": 0.96875,
"rewards/chosen": -0.008152008056640625,
"rewards/margins": 0.478515625,
"rewards/rejected": -0.486328125,
"step": 14
},
{
"epoch": 0.13714285714285715,
"grad_norm": 1.6105176210403442,
"learning_rate": 2.138668937347609e-05,
"logits/chosen": -2.0908203125,
"logits/rejected": -1.8671875,
"logps/chosen": -161.75,
"logps/rejected": -165.75,
"loss": 0.4545,
"rewards/accuracies": 0.953125,
"rewards/chosen": 0.03673362731933594,
"rewards/margins": 0.589111328125,
"rewards/rejected": -0.551025390625,
"step": 15
},
{
"epoch": 0.1462857142857143,
"grad_norm": 1.5813552141189575,
"learning_rate": 1.963525491562421e-05,
"logits/chosen": -2.0673828125,
"logits/rejected": -1.837890625,
"logps/chosen": -171.5,
"logps/rejected": -174.0,
"loss": 0.4158,
"rewards/accuracies": 0.9765625,
"rewards/chosen": -0.025379180908203125,
"rewards/margins": 0.70458984375,
"rewards/rejected": -0.73095703125,
"step": 16
},
{
"epoch": 0.15542857142857142,
"grad_norm": 1.8560917377471924,
"learning_rate": 1.781071971878587e-05,
"logits/chosen": -2.140625,
"logits/rejected": -1.8603515625,
"logps/chosen": -173.25,
"logps/rejected": -177.125,
"loss": 0.4231,
"rewards/accuracies": 0.90625,
"rewards/chosen": -0.037933349609375,
"rewards/margins": 0.70654296875,
"rewards/rejected": -0.7451171875,
"step": 17
},
{
"epoch": 0.16457142857142856,
"grad_norm": 1.6670029163360596,
"learning_rate": 1.5941857792939702e-05,
"logits/chosen": -2.111328125,
"logits/rejected": -1.8349609375,
"logps/chosen": -172.125,
"logps/rejected": -173.0,
"loss": 0.3809,
"rewards/accuracies": 0.9765625,
"rewards/chosen": -0.0503997802734375,
"rewards/margins": 0.837890625,
"rewards/rejected": -0.88818359375,
"step": 18
},
{
"epoch": 0.1737142857142857,
"grad_norm": 1.6855627298355103,
"learning_rate": 1.40581422070603e-05,
"logits/chosen": -2.0361328125,
"logits/rejected": -1.7958984375,
"logps/chosen": -175.5,
"logps/rejected": -179.75,
"loss": 0.3473,
"rewards/accuracies": 0.96875,
"rewards/chosen": -0.0648040771484375,
"rewards/margins": 0.98095703125,
"rewards/rejected": -1.044921875,
"step": 19
},
{
"epoch": 0.18285714285714286,
"grad_norm": 1.6223222017288208,
"learning_rate": 1.2189280281214128e-05,
"logits/chosen": -2.0947265625,
"logits/rejected": -1.8310546875,
"logps/chosen": -165.375,
"logps/rejected": -179.25,
"loss": 0.3286,
"rewards/accuracies": 0.96875,
"rewards/chosen": -0.139434814453125,
"rewards/margins": 1.03759765625,
"rewards/rejected": -1.177734375,
"step": 20
},
{
"epoch": 0.192,
"grad_norm": 1.4339145421981812,
"learning_rate": 1.036474508437579e-05,
"logits/chosen": -2.091796875,
"logits/rejected": -1.888671875,
"logps/chosen": -166.875,
"logps/rejected": -175.25,
"loss": 0.3566,
"rewards/accuracies": 0.9296875,
"rewards/chosen": -0.17706298828125,
"rewards/margins": 0.97216796875,
"rewards/rejected": -1.1484375,
"step": 21
},
{
"epoch": 0.20114285714285715,
"grad_norm": 1.6682533025741577,
"learning_rate": 8.61331062652391e-06,
"logits/chosen": -2.091796875,
"logits/rejected": -1.853515625,
"logps/chosen": -168.0,
"logps/rejected": -176.625,
"loss": 0.3369,
"rewards/accuracies": 0.9765625,
"rewards/chosen": -0.11624336242675781,
"rewards/margins": 1.03857421875,
"rewards/rejected": -1.1552734375,
"step": 22
},
{
"epoch": 0.2102857142857143,
"grad_norm": 1.6513200998306274,
"learning_rate": 6.962598075315047e-06,
"logits/chosen": -2.078125,
"logits/rejected": -1.884765625,
"logps/chosen": -165.25,
"logps/rejected": -177.75,
"loss": 0.329,
"rewards/accuracies": 0.953125,
"rewards/chosen": -0.180419921875,
"rewards/margins": 1.0673828125,
"rewards/rejected": -1.248046875,
"step": 23
},
{
"epoch": 0.21942857142857142,
"grad_norm": 1.5114519596099854,
"learning_rate": 5.438640153769654e-06,
"logits/chosen": -2.09375,
"logits/rejected": -1.85546875,
"logps/chosen": -168.375,
"logps/rejected": -177.25,
"loss": 0.3037,
"rewards/accuracies": 0.96875,
"rewards/chosen": -0.1710205078125,
"rewards/margins": 1.16796875,
"rewards/rejected": -1.3388671875,
"step": 24
},
{
"epoch": 0.22857142857142856,
"grad_norm": 1.4401888847351074,
"learning_rate": 4.06547058867883e-06,
"logits/chosen": -2.0791015625,
"logits/rejected": -1.8212890625,
"logps/chosen": -167.625,
"logps/rejected": -181.0,
"loss": 0.297,
"rewards/accuracies": 0.96875,
"rewards/chosen": -0.1322021484375,
"rewards/margins": 1.23388671875,
"rewards/rejected": -1.365234375,
"step": 25
},
{
"epoch": 0.2377142857142857,
"grad_norm": 1.4569915533065796,
"learning_rate": 2.86474508437579e-06,
"logits/chosen": -2.0400390625,
"logits/rejected": -1.822265625,
"logps/chosen": -168.125,
"logps/rejected": -179.375,
"loss": 0.2977,
"rewards/accuracies": 0.9765625,
"rewards/chosen": -0.184814453125,
"rewards/margins": 1.21728515625,
"rewards/rejected": -1.40234375,
"step": 26
},
{
"epoch": 0.24685714285714286,
"grad_norm": 1.4981111288070679,
"learning_rate": 1.8553997993420495e-06,
"logits/chosen": -2.107421875,
"logits/rejected": -1.8564453125,
"logps/chosen": -162.875,
"logps/rejected": -176.875,
"loss": 0.2708,
"rewards/accuracies": 0.96875,
"rewards/chosen": -0.13507080078125,
"rewards/margins": 1.3369140625,
"rewards/rejected": -1.47265625,
"step": 27
},
{
"epoch": 0.256,
"grad_norm": 1.4918054342269897,
"learning_rate": 1.0533527116762298e-06,
"logits/chosen": -2.083984375,
"logits/rejected": -1.8564453125,
"logps/chosen": -171.75,
"logps/rejected": -180.875,
"loss": 0.279,
"rewards/accuracies": 0.984375,
"rewards/chosen": -0.164794921875,
"rewards/margins": 1.2939453125,
"rewards/rejected": -1.4560546875,
"step": 28
},
{
"epoch": 0.2651428571428571,
"grad_norm": 1.4401075839996338,
"learning_rate": 4.7125258307053385e-07,
"logits/chosen": -2.0546875,
"logits/rejected": -1.83984375,
"logps/chosen": -170.25,
"logps/rejected": -180.375,
"loss": 0.2681,
"rewards/accuracies": 0.984375,
"rewards/chosen": -0.218505859375,
"rewards/margins": 1.3427734375,
"rewards/rejected": -1.5595703125,
"step": 29
},
{
"epoch": 0.2742857142857143,
"grad_norm": 1.3953702449798584,
"learning_rate": 1.1827948028283353e-07,
"logits/chosen": -2.158203125,
"logits/rejected": -1.90625,
"logps/chosen": -167.625,
"logps/rejected": -180.0,
"loss": 0.2901,
"rewards/accuracies": 0.96875,
"rewards/chosen": -0.16025543212890625,
"rewards/margins": 1.291015625,
"rewards/rejected": -1.4521484375,
"step": 30
}
],
"logging_steps": 1,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 8,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}