SamuelJaja's picture
Upload folder using huggingface_hub
93d869b verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.964936886395512,
"eval_steps": 100,
"global_step": 356,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11220196353436185,
"grad_norm": 1.1029870510101318,
"learning_rate": 5e-05,
"loss": 2.5659,
"mean_token_accuracy": 0.47926636449992654,
"num_tokens": 37367.0,
"step": 10
},
{
"epoch": 0.2244039270687237,
"grad_norm": 2.981795310974121,
"learning_rate": 0.00010555555555555557,
"loss": 2.0395,
"mean_token_accuracy": 0.5484138417989015,
"num_tokens": 73517.0,
"step": 20
},
{
"epoch": 0.33660589060308554,
"grad_norm": 0.5173138976097107,
"learning_rate": 0.0001611111111111111,
"loss": 1.5375,
"mean_token_accuracy": 0.6410307548940182,
"num_tokens": 109542.0,
"step": 30
},
{
"epoch": 0.4488078541374474,
"grad_norm": 0.4973823130130768,
"learning_rate": 0.000198125,
"loss": 1.481,
"mean_token_accuracy": 0.6483925141394138,
"num_tokens": 145327.0,
"step": 40
},
{
"epoch": 0.5610098176718092,
"grad_norm": 0.605583131313324,
"learning_rate": 0.00019187500000000002,
"loss": 1.3906,
"mean_token_accuracy": 0.661427416652441,
"num_tokens": 180932.0,
"step": 50
},
{
"epoch": 0.6732117812061711,
"grad_norm": 0.5373088717460632,
"learning_rate": 0.000185625,
"loss": 1.3255,
"mean_token_accuracy": 0.669424007833004,
"num_tokens": 217215.0,
"step": 60
},
{
"epoch": 0.7854137447405329,
"grad_norm": 0.5484739542007446,
"learning_rate": 0.000179375,
"loss": 1.315,
"mean_token_accuracy": 0.6726369611918926,
"num_tokens": 254200.0,
"step": 70
},
{
"epoch": 0.8976157082748948,
"grad_norm": 0.5383081436157227,
"learning_rate": 0.000173125,
"loss": 1.2989,
"mean_token_accuracy": 0.6707686208188534,
"num_tokens": 290771.0,
"step": 80
},
{
"epoch": 1.0,
"grad_norm": 2.1042652130126953,
"learning_rate": 0.000166875,
"loss": 1.353,
"mean_token_accuracy": 0.6659621212580432,
"num_tokens": 323338.0,
"step": 90
},
{
"epoch": 1.1122019635343618,
"grad_norm": 0.5776804089546204,
"learning_rate": 0.00016062500000000001,
"loss": 1.2447,
"mean_token_accuracy": 0.6804150842130184,
"num_tokens": 360543.0,
"step": 100
},
{
"epoch": 1.2244039270687237,
"grad_norm": 0.6110250949859619,
"learning_rate": 0.000154375,
"loss": 1.2908,
"mean_token_accuracy": 0.6726092718541622,
"num_tokens": 397295.0,
"step": 110
},
{
"epoch": 1.3366058906030855,
"grad_norm": 0.5095610022544861,
"learning_rate": 0.000148125,
"loss": 1.2549,
"mean_token_accuracy": 0.6794664606451988,
"num_tokens": 434747.0,
"step": 120
},
{
"epoch": 1.4488078541374474,
"grad_norm": 0.6921575665473938,
"learning_rate": 0.000141875,
"loss": 1.2156,
"mean_token_accuracy": 0.6838382914662361,
"num_tokens": 471034.0,
"step": 130
},
{
"epoch": 1.5610098176718092,
"grad_norm": 0.6114996075630188,
"learning_rate": 0.000135625,
"loss": 1.2229,
"mean_token_accuracy": 0.6844779521226882,
"num_tokens": 507771.0,
"step": 140
},
{
"epoch": 1.673211781206171,
"grad_norm": 0.7110440731048584,
"learning_rate": 0.00012937500000000001,
"loss": 1.1964,
"mean_token_accuracy": 0.6940846800804138,
"num_tokens": 543317.0,
"step": 150
},
{
"epoch": 1.785413744740533,
"grad_norm": 0.6362674236297607,
"learning_rate": 0.000123125,
"loss": 1.2173,
"mean_token_accuracy": 0.6877869322896004,
"num_tokens": 579484.0,
"step": 160
},
{
"epoch": 1.8976157082748948,
"grad_norm": 0.5968947410583496,
"learning_rate": 0.000116875,
"loss": 1.1781,
"mean_token_accuracy": 0.6954927623271943,
"num_tokens": 614929.0,
"step": 170
},
{
"epoch": 2.0,
"grad_norm": 2.4199445247650146,
"learning_rate": 0.000110625,
"loss": 1.1646,
"mean_token_accuracy": 0.6967783880560365,
"num_tokens": 646676.0,
"step": 180
},
{
"epoch": 2.112201963534362,
"grad_norm": 0.7061767578125,
"learning_rate": 0.000104375,
"loss": 1.1006,
"mean_token_accuracy": 0.7113809175789356,
"num_tokens": 682566.0,
"step": 190
},
{
"epoch": 2.2244039270687237,
"grad_norm": 0.7149446606636047,
"learning_rate": 9.8125e-05,
"loss": 1.1557,
"mean_token_accuracy": 0.6986343055963516,
"num_tokens": 719538.0,
"step": 200
},
{
"epoch": 2.3366058906030855,
"grad_norm": 0.7239488959312439,
"learning_rate": 9.1875e-05,
"loss": 1.1258,
"mean_token_accuracy": 0.7038173094391823,
"num_tokens": 756020.0,
"step": 210
},
{
"epoch": 2.4488078541374474,
"grad_norm": 0.818574070930481,
"learning_rate": 8.5625e-05,
"loss": 1.0903,
"mean_token_accuracy": 0.7100009344518184,
"num_tokens": 791787.0,
"step": 220
},
{
"epoch": 2.5610098176718092,
"grad_norm": 0.6714032292366028,
"learning_rate": 7.9375e-05,
"loss": 1.1066,
"mean_token_accuracy": 0.7094940036535263,
"num_tokens": 827575.0,
"step": 230
},
{
"epoch": 2.673211781206171,
"grad_norm": 0.7658008933067322,
"learning_rate": 7.3125e-05,
"loss": 1.1222,
"mean_token_accuracy": 0.7040122233331203,
"num_tokens": 864519.0,
"step": 240
},
{
"epoch": 2.785413744740533,
"grad_norm": 0.7659981846809387,
"learning_rate": 6.6875e-05,
"loss": 1.1106,
"mean_token_accuracy": 0.7076892741024494,
"num_tokens": 900626.0,
"step": 250
},
{
"epoch": 2.897615708274895,
"grad_norm": 0.7530002593994141,
"learning_rate": 6.0624999999999996e-05,
"loss": 1.1112,
"mean_token_accuracy": 0.7089583992958068,
"num_tokens": 936428.0,
"step": 260
},
{
"epoch": 3.0,
"grad_norm": 2.5998833179473877,
"learning_rate": 5.4375e-05,
"loss": 1.1442,
"mean_token_accuracy": 0.7058552944496886,
"num_tokens": 970014.0,
"step": 270
},
{
"epoch": 3.112201963534362,
"grad_norm": 0.8477901220321655,
"learning_rate": 4.8125000000000004e-05,
"loss": 1.0422,
"mean_token_accuracy": 0.7182675130665302,
"num_tokens": 1005733.0,
"step": 280
},
{
"epoch": 3.2244039270687237,
"grad_norm": 0.832841157913208,
"learning_rate": 4.1875e-05,
"loss": 1.0555,
"mean_token_accuracy": 0.7220007188618183,
"num_tokens": 1042301.0,
"step": 290
},
{
"epoch": 3.3366058906030855,
"grad_norm": 0.855139970779419,
"learning_rate": 3.5625000000000005e-05,
"loss": 1.0527,
"mean_token_accuracy": 0.7194023981690407,
"num_tokens": 1079434.0,
"step": 300
},
{
"epoch": 3.4488078541374474,
"grad_norm": 0.9042227864265442,
"learning_rate": 2.9375000000000003e-05,
"loss": 1.0098,
"mean_token_accuracy": 0.7282766819000244,
"num_tokens": 1114936.0,
"step": 310
},
{
"epoch": 3.5610098176718092,
"grad_norm": 0.8887130618095398,
"learning_rate": 2.3125000000000003e-05,
"loss": 1.0106,
"mean_token_accuracy": 0.7245936967432499,
"num_tokens": 1151776.0,
"step": 320
},
{
"epoch": 3.673211781206171,
"grad_norm": 0.8810842633247375,
"learning_rate": 1.6875000000000004e-05,
"loss": 1.0078,
"mean_token_accuracy": 0.7305177293717862,
"num_tokens": 1187017.0,
"step": 330
},
{
"epoch": 3.785413744740533,
"grad_norm": 0.9642019867897034,
"learning_rate": 1.0625e-05,
"loss": 1.0294,
"mean_token_accuracy": 0.7268829271197319,
"num_tokens": 1223241.0,
"step": 340
},
{
"epoch": 3.897615708274895,
"grad_norm": 0.8219223022460938,
"learning_rate": 4.375e-06,
"loss": 1.0295,
"mean_token_accuracy": 0.7224250309169292,
"num_tokens": 1260361.0,
"step": 350
}
],
"logging_steps": 10,
"max_steps": 356,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.817406737670144e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}