|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.993607305936073, |
|
"eval_steps": 100, |
|
"global_step": 136, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 15.59375, |
|
"epoch": 0.0365296803652968, |
|
"grad_norm": 10.368077278137207, |
|
"kl": 0.15047144889831543, |
|
"learning_rate": 7.1428571428571436e-06, |
|
"loss": 0.0295, |
|
"reward": 0.13515625, |
|
"reward_std": 0.19950873199850322, |
|
"rewards/accuracy_reward": 0.13203125, |
|
"rewards/format_reward": 0.003125, |
|
"step": 5 |
|
}, |
|
{ |
|
"completion_length": 74.09296875, |
|
"epoch": 0.0730593607305936, |
|
"grad_norm": 6.168639183044434, |
|
"kl": 1.47294921875, |
|
"learning_rate": 1.4285714285714287e-05, |
|
"loss": 0.1038, |
|
"reward": 0.51015625, |
|
"reward_std": 0.4266123466193676, |
|
"rewards/accuracy_reward": 0.23203125, |
|
"rewards/format_reward": 0.278125, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 109.55703125, |
|
"epoch": 0.1095890410958904, |
|
"grad_norm": 10.966035842895508, |
|
"kl": 0.523876953125, |
|
"learning_rate": 1.9996684675143132e-05, |
|
"loss": 0.027, |
|
"reward": 1.046875, |
|
"reward_std": 0.46739846989512446, |
|
"rewards/accuracy_reward": 0.215625, |
|
"rewards/format_reward": 0.83125, |
|
"step": 15 |
|
}, |
|
{ |
|
"completion_length": 125.8359375, |
|
"epoch": 0.1461187214611872, |
|
"grad_norm": 5.292937278747559, |
|
"kl": 0.46650390625, |
|
"learning_rate": 1.9880878960910772e-05, |
|
"loss": 0.0322, |
|
"reward": 1.22734375, |
|
"reward_std": 0.3152993652969599, |
|
"rewards/accuracy_reward": 0.2421875, |
|
"rewards/format_reward": 0.98515625, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 271.60078125, |
|
"epoch": 0.182648401826484, |
|
"grad_norm": 2.0227677822113037, |
|
"kl": 0.32373046875, |
|
"learning_rate": 1.960149873671602e-05, |
|
"loss": 0.005, |
|
"reward": 1.23359375, |
|
"reward_std": 0.41616563573479654, |
|
"rewards/accuracy_reward": 0.30078125, |
|
"rewards/format_reward": 0.9328125, |
|
"step": 25 |
|
}, |
|
{ |
|
"completion_length": 134.89609375, |
|
"epoch": 0.2191780821917808, |
|
"grad_norm": 2.7977349758148193, |
|
"kl": 0.432275390625, |
|
"learning_rate": 1.916316904487005e-05, |
|
"loss": 0.0631, |
|
"reward": 1.353125, |
|
"reward_std": 0.26668183971196413, |
|
"rewards/accuracy_reward": 0.3578125, |
|
"rewards/format_reward": 0.9953125, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 93.9765625, |
|
"epoch": 0.2557077625570776, |
|
"grad_norm": 3.0016226768493652, |
|
"kl": 0.50341796875, |
|
"learning_rate": 1.8573146280763327e-05, |
|
"loss": 0.0361, |
|
"reward": 1.3046875, |
|
"reward_std": 0.26828942373394965, |
|
"rewards/accuracy_reward": 0.31640625, |
|
"rewards/format_reward": 0.98828125, |
|
"step": 35 |
|
}, |
|
{ |
|
"completion_length": 117.1453125, |
|
"epoch": 0.2922374429223744, |
|
"grad_norm": 2.1085522174835205, |
|
"kl": 0.496875, |
|
"learning_rate": 1.7841198065767107e-05, |
|
"loss": -0.0058, |
|
"reward": 1.340625, |
|
"reward_std": 0.29750518389046193, |
|
"rewards/accuracy_reward": 0.353125, |
|
"rewards/format_reward": 0.9875, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 77.484375, |
|
"epoch": 0.3287671232876712, |
|
"grad_norm": 1.8194400072097778, |
|
"kl": 0.4939453125, |
|
"learning_rate": 1.6979441547663434e-05, |
|
"loss": -0.02, |
|
"reward": 1.3625, |
|
"reward_std": 0.2868320092558861, |
|
"rewards/accuracy_reward": 0.3734375, |
|
"rewards/format_reward": 0.9890625, |
|
"step": 45 |
|
}, |
|
{ |
|
"completion_length": 57.68671875, |
|
"epoch": 0.365296803652968, |
|
"grad_norm": 1.7973569631576538, |
|
"kl": 0.652001953125, |
|
"learning_rate": 1.6002142805483686e-05, |
|
"loss": 0.0459, |
|
"reward": 1.3265625, |
|
"reward_std": 0.2394094867631793, |
|
"rewards/accuracy_reward": 0.33671875, |
|
"rewards/format_reward": 0.98984375, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 64.959375, |
|
"epoch": 0.4018264840182648, |
|
"grad_norm": 2.802725076675415, |
|
"kl": 0.602783203125, |
|
"learning_rate": 1.4925480679538646e-05, |
|
"loss": 0.0103, |
|
"reward": 1.3625, |
|
"reward_std": 0.1871818160638213, |
|
"rewards/accuracy_reward": 0.37734375, |
|
"rewards/format_reward": 0.98515625, |
|
"step": 55 |
|
}, |
|
{ |
|
"completion_length": 94.5359375, |
|
"epoch": 0.4383561643835616, |
|
"grad_norm": 2.600384473800659, |
|
"kl": 0.4912109375, |
|
"learning_rate": 1.3767278936351853e-05, |
|
"loss": 0.0076, |
|
"reward": 1.33203125, |
|
"reward_std": 0.1461961718276143, |
|
"rewards/accuracy_reward": 0.33828125, |
|
"rewards/format_reward": 0.99375, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 60.94140625, |
|
"epoch": 0.4748858447488584, |
|
"grad_norm": 2.3066651821136475, |
|
"kl": 0.913916015625, |
|
"learning_rate": 1.2546711202412287e-05, |
|
"loss": 0.0148, |
|
"reward": 1.25546875, |
|
"reward_std": 0.26173087432980535, |
|
"rewards/accuracy_reward": 0.3359375, |
|
"rewards/format_reward": 0.91953125, |
|
"step": 65 |
|
}, |
|
{ |
|
"completion_length": 20.51796875, |
|
"epoch": 0.5114155251141552, |
|
"grad_norm": 13.061442375183105, |
|
"kl": 6.19736328125, |
|
"learning_rate": 1.1283983551465512e-05, |
|
"loss": 0.2759, |
|
"reward": 1.31796875, |
|
"reward_std": 0.2789971936494112, |
|
"rewards/accuracy_reward": 0.39375, |
|
"rewards/format_reward": 0.92421875, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 43.41796875, |
|
"epoch": 0.547945205479452, |
|
"grad_norm": 441.85443115234375, |
|
"kl": 2.45595703125, |
|
"learning_rate": 1e-05, |
|
"loss": 0.1115, |
|
"reward": 1.0578125, |
|
"reward_std": 0.48872011750936506, |
|
"rewards/accuracy_reward": 0.278125, |
|
"rewards/format_reward": 0.7796875, |
|
"step": 75 |
|
}, |
|
{ |
|
"completion_length": 46.90703125, |
|
"epoch": 0.5844748858447488, |
|
"grad_norm": 1.9799107313156128, |
|
"kl": 1.23125, |
|
"learning_rate": 8.71601644853449e-06, |
|
"loss": 0.0292, |
|
"reward": 0.984375, |
|
"reward_std": 0.4891943013295531, |
|
"rewards/accuracy_reward": 0.25703125, |
|
"rewards/format_reward": 0.72734375, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 13.75546875, |
|
"epoch": 0.6210045662100456, |
|
"grad_norm": 4.793896198272705, |
|
"kl": 1.34541015625, |
|
"learning_rate": 7.453288797587714e-06, |
|
"loss": 0.0482, |
|
"reward": 1.31484375, |
|
"reward_std": 0.14597927127033472, |
|
"rewards/accuracy_reward": 0.33671875, |
|
"rewards/format_reward": 0.978125, |
|
"step": 85 |
|
}, |
|
{ |
|
"completion_length": 13.13359375, |
|
"epoch": 0.6575342465753424, |
|
"grad_norm": 12.646719932556152, |
|
"kl": 1.4255859375, |
|
"learning_rate": 6.232721063648148e-06, |
|
"loss": 0.0557, |
|
"reward": 1.34296875, |
|
"reward_std": 0.10623529590666295, |
|
"rewards/accuracy_reward": 0.3515625, |
|
"rewards/format_reward": 0.99140625, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 15.2046875, |
|
"epoch": 0.6940639269406392, |
|
"grad_norm": 2.1448068618774414, |
|
"kl": 1.299609375, |
|
"learning_rate": 5.074519320461358e-06, |
|
"loss": 0.0404, |
|
"reward": 1.3171875, |
|
"reward_std": 0.14243474025279285, |
|
"rewards/accuracy_reward": 0.33125, |
|
"rewards/format_reward": 0.9859375, |
|
"step": 95 |
|
}, |
|
{ |
|
"completion_length": 22.12265625, |
|
"epoch": 0.730593607305936, |
|
"grad_norm": 13.79468822479248, |
|
"kl": 1.4185546875, |
|
"learning_rate": 3.997857194516319e-06, |
|
"loss": 0.0562, |
|
"reward": 1.3484375, |
|
"reward_std": 0.19392489716410638, |
|
"rewards/accuracy_reward": 0.3953125, |
|
"rewards/format_reward": 0.953125, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.730593607305936, |
|
"eval_completion_length": 61.36258561643836, |
|
"eval_kl": 2.0426084474885844, |
|
"eval_loss": 0.08902593702077866, |
|
"eval_reward": 1.2051084474885845, |
|
"eval_reward_std": 0.4570505227672455, |
|
"eval_rewards/accuracy_reward": 0.39023972602739726, |
|
"eval_rewards/format_reward": 0.8148687214611872, |
|
"eval_runtime": 25621.0164, |
|
"eval_samples_per_second": 0.171, |
|
"eval_steps_per_second": 0.043, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 50.4015625, |
|
"epoch": 0.7671232876712328, |
|
"grad_norm": 3.6549317836761475, |
|
"kl": 1.721875, |
|
"learning_rate": 3.0205584523365626e-06, |
|
"loss": 0.035, |
|
"reward": 1.27421875, |
|
"reward_std": 0.3447163349017501, |
|
"rewards/accuracy_reward": 0.38671875, |
|
"rewards/format_reward": 0.8875, |
|
"step": 105 |
|
}, |
|
{ |
|
"completion_length": 43.98359375, |
|
"epoch": 0.8036529680365296, |
|
"grad_norm": 2.842573404312134, |
|
"kl": 1.8482421875, |
|
"learning_rate": 2.158801934232897e-06, |
|
"loss": 0.0649, |
|
"reward": 1.240625, |
|
"reward_std": 0.2724386781454086, |
|
"rewards/accuracy_reward": 0.321875, |
|
"rewards/format_reward": 0.91875, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 50.07265625, |
|
"epoch": 0.8401826484018264, |
|
"grad_norm": 5.093225479125977, |
|
"kl": 2.44453125, |
|
"learning_rate": 1.426853719236676e-06, |
|
"loss": 0.1351, |
|
"reward": 1.1875, |
|
"reward_std": 0.3050973150879145, |
|
"rewards/accuracy_reward": 0.29296875, |
|
"rewards/format_reward": 0.89453125, |
|
"step": 115 |
|
}, |
|
{ |
|
"completion_length": 66.58828125, |
|
"epoch": 0.8767123287671232, |
|
"grad_norm": 3.9608869552612305, |
|
"kl": 2.5376953125, |
|
"learning_rate": 8.368309551299536e-07, |
|
"loss": 0.1227, |
|
"reward": 1.29765625, |
|
"reward_std": 0.35280441734939816, |
|
"rewards/accuracy_reward": 0.40390625, |
|
"rewards/format_reward": 0.89375, |
|
"step": 120 |
|
}, |
|
{ |
|
"completion_length": 86.17265625, |
|
"epoch": 0.91324200913242, |
|
"grad_norm": 4.511857509613037, |
|
"kl": 2.7826171875, |
|
"learning_rate": 3.985012632839824e-07, |
|
"loss": 0.1011, |
|
"reward": 1.24921875, |
|
"reward_std": 0.31032323855906724, |
|
"rewards/accuracy_reward": 0.33828125, |
|
"rewards/format_reward": 0.9109375, |
|
"step": 125 |
|
}, |
|
{ |
|
"completion_length": 81.040625, |
|
"epoch": 0.9497716894977168, |
|
"grad_norm": 3.5829803943634033, |
|
"kl": 3.201953125, |
|
"learning_rate": 1.1912103908922945e-07, |
|
"loss": 0.1484, |
|
"reward": 1.275, |
|
"reward_std": 0.31014324594289067, |
|
"rewards/accuracy_reward": 0.35859375, |
|
"rewards/format_reward": 0.91640625, |
|
"step": 130 |
|
}, |
|
{ |
|
"completion_length": 89.2578125, |
|
"epoch": 0.9863013698630136, |
|
"grad_norm": 6.557438373565674, |
|
"kl": 3.0515625, |
|
"learning_rate": 3.315324856869584e-09, |
|
"loss": 0.1109, |
|
"reward": 1.20390625, |
|
"reward_std": 0.32457808069884775, |
|
"rewards/accuracy_reward": 0.3, |
|
"rewards/format_reward": 0.90390625, |
|
"step": 135 |
|
}, |
|
{ |
|
"completion_length": 95.453125, |
|
"epoch": 0.993607305936073, |
|
"kl": 3.2041015625, |
|
"reward": 1.2265625, |
|
"reward_std": 0.43310215324163437, |
|
"rewards/accuracy_reward": 0.3359375, |
|
"rewards/format_reward": 0.890625, |
|
"step": 136, |
|
"total_flos": 0.0, |
|
"train_loss": 0.0634942120351993, |
|
"train_runtime": 123112.1165, |
|
"train_samples_per_second": 0.036, |
|
"train_steps_per_second": 0.001 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 136, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 15, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|