TongZheng1999's picture
Upload folder using huggingface_hub
c2d68bd verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9978143668949438,
"eval_steps": 22,
"global_step": 428,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0512895235319831,
"grad_norm": 1.549578309059143,
"learning_rate": 0.0015811388300841895,
"loss": 1.5548,
"step": 22
},
{
"epoch": 0.1025790470639662,
"grad_norm": 0.3865428864955902,
"learning_rate": 0.0008304547985373997,
"loss": 1.3048,
"step": 44
},
{
"epoch": 0.1538685705959493,
"grad_norm": 0.4011533260345459,
"learning_rate": 0.0006262242910851496,
"loss": 1.1538,
"step": 66
},
{
"epoch": 0.2051580941279324,
"grad_norm": 0.4274662435054779,
"learning_rate": 0.0005234239225902136,
"loss": 1.1093,
"step": 88
},
{
"epoch": 0.25644761765991547,
"grad_norm": 0.33565089106559753,
"learning_rate": 0.00046126560401444256,
"loss": 1.0236,
"step": 110
},
{
"epoch": 0.3077371411918986,
"grad_norm": 0.39469897747039795,
"learning_rate": 0.00041522739926869986,
"loss": 1.0122,
"step": 132
},
{
"epoch": 0.35902666472388167,
"grad_norm": 0.4309040307998657,
"learning_rate": 0.0003806934938134405,
"loss": 0.9694,
"step": 154
},
{
"epoch": 0.4103161882558648,
"grad_norm": 0.4251372218132019,
"learning_rate": 0.00035355339059327376,
"loss": 0.9781,
"step": 176
},
{
"epoch": 0.46160571178784787,
"grad_norm": 0.3717755079269409,
"learning_rate": 0.00033149677206589795,
"loss": 0.9694,
"step": 198
},
{
"epoch": 0.5128952353198309,
"grad_norm": 0.39009973406791687,
"learning_rate": 0.0003131121455425748,
"loss": 0.9111,
"step": 220
},
{
"epoch": 0.5641847588518141,
"grad_norm": 0.3686709403991699,
"learning_rate": 0.0002974820586543648,
"loss": 0.9077,
"step": 242
},
{
"epoch": 0.6154742823837972,
"grad_norm": 0.4044100344181061,
"learning_rate": 0.0002839809171235324,
"loss": 0.8875,
"step": 264
},
{
"epoch": 0.6667638059157802,
"grad_norm": 0.3807663917541504,
"learning_rate": 0.0002721655269759087,
"loss": 0.8618,
"step": 286
},
{
"epoch": 0.7180533294477633,
"grad_norm": 0.4056909680366516,
"learning_rate": 0.0002617119612951068,
"loss": 0.8406,
"step": 308
},
{
"epoch": 0.7693428529797465,
"grad_norm": 0.3789248466491699,
"learning_rate": 0.00025237723256253436,
"loss": 0.8282,
"step": 330
},
{
"epoch": 0.8206323765117296,
"grad_norm": 0.3973967134952545,
"learning_rate": 0.00024397501823713327,
"loss": 0.8097,
"step": 352
},
{
"epoch": 0.8719219000437126,
"grad_norm": 0.4864844083786011,
"learning_rate": 0.00023635972962353274,
"loss": 0.7718,
"step": 374
},
{
"epoch": 0.9232114235756957,
"grad_norm": 0.571861207485199,
"learning_rate": 0.00022941573387056174,
"loss": 0.77,
"step": 396
},
{
"epoch": 0.9745009471076789,
"grad_norm": 0.3887988328933716,
"learning_rate": 0.00022304986837273525,
"loss": 0.7795,
"step": 418
},
{
"epoch": 0.9978143668949438,
"step": 428,
"total_flos": 4.3469664994124104e+18,
"train_loss": 0.9656199194560541,
"train_runtime": 3172.7582,
"train_samples_per_second": 17.303,
"train_steps_per_second": 0.135
}
],
"logging_steps": 22,
"max_steps": 428,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 22,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.3469664994124104e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}