JW17's picture
Add files using upload-large-folder tool
a23516e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0119760479041915,
"eval_steps": 500,
"global_step": 336,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.059880239520958084,
"grad_norm": 6.566332162525977,
"learning_rate": 7.692307692307694e-06,
"loss": 4.3864,
"step": 10
},
{
"epoch": 0.11976047904191617,
"grad_norm": 2.0437379261117132,
"learning_rate": 1.5384615384615387e-05,
"loss": 3.4794,
"step": 20
},
{
"epoch": 0.17964071856287425,
"grad_norm": 0.8566260346858398,
"learning_rate": 1.9996500732179695e-05,
"loss": 2.5375,
"step": 30
},
{
"epoch": 0.23952095808383234,
"grad_norm": 0.6117118904297301,
"learning_rate": 1.995716208873644e-05,
"loss": 1.8752,
"step": 40
},
{
"epoch": 0.2994011976047904,
"grad_norm": 0.3923875723199884,
"learning_rate": 1.9874283308955058e-05,
"loss": 1.4434,
"step": 50
},
{
"epoch": 0.3592814371257485,
"grad_norm": 0.2726465509781607,
"learning_rate": 1.9748226800652062e-05,
"loss": 1.2232,
"step": 60
},
{
"epoch": 0.41916167664670656,
"grad_norm": 0.22048277219015358,
"learning_rate": 1.957954377686475e-05,
"loss": 1.1031,
"step": 70
},
{
"epoch": 0.47904191616766467,
"grad_norm": 0.19131964365908752,
"learning_rate": 1.9368971845536844e-05,
"loss": 1.0318,
"step": 80
},
{
"epoch": 0.5389221556886228,
"grad_norm": 0.19325047947069812,
"learning_rate": 1.911743178414665e-05,
"loss": 0.9724,
"step": 90
},
{
"epoch": 0.5988023952095808,
"grad_norm": 0.18220999359482376,
"learning_rate": 1.8826023513381372e-05,
"loss": 0.942,
"step": 100
},
{
"epoch": 0.6586826347305389,
"grad_norm": 0.17750272843534337,
"learning_rate": 1.849602128746387e-05,
"loss": 0.9106,
"step": 110
},
{
"epoch": 0.718562874251497,
"grad_norm": 0.16658108783370879,
"learning_rate": 1.8128868122163125e-05,
"loss": 0.8876,
"step": 120
},
{
"epoch": 0.7784431137724551,
"grad_norm": 0.1461861518555333,
"learning_rate": 1.7726169484853438e-05,
"loss": 0.872,
"step": 130
},
{
"epoch": 0.8383233532934131,
"grad_norm": 0.14457472093324725,
"learning_rate": 1.7289686274214116e-05,
"loss": 0.8576,
"step": 140
},
{
"epoch": 0.8982035928143712,
"grad_norm": 0.1544690749108873,
"learning_rate": 1.6821327120267567e-05,
"loss": 0.8467,
"step": 150
},
{
"epoch": 0.9580838323353293,
"grad_norm": 0.1429893393640351,
"learning_rate": 1.6323140038425842e-05,
"loss": 0.8252,
"step": 160
},
{
"epoch": 1.0179640718562875,
"grad_norm": 0.11444623635062835,
"learning_rate": 1.5797303474040332e-05,
"loss": 0.8209,
"step": 170
},
{
"epoch": 1.0778443113772456,
"grad_norm": 0.12105393067348605,
"learning_rate": 1.524611677661454e-05,
"loss": 0.8064,
"step": 180
},
{
"epoch": 1.1377245508982037,
"grad_norm": 0.11410840909819214,
"learning_rate": 1.4671990145333697e-05,
"loss": 0.7997,
"step": 190
},
{
"epoch": 1.1976047904191618,
"grad_norm": 0.1145244272492734,
"learning_rate": 1.4077434089877038e-05,
"loss": 0.7938,
"step": 200
},
{
"epoch": 1.2574850299401197,
"grad_norm": 0.12645762812759187,
"learning_rate": 1.3465048452597682e-05,
"loss": 0.7775,
"step": 210
},
{
"epoch": 1.3173652694610778,
"grad_norm": 0.11488140079430814,
"learning_rate": 1.283751104007355e-05,
"loss": 0.7767,
"step": 220
},
{
"epoch": 1.377245508982036,
"grad_norm": 0.13144847588925945,
"learning_rate": 1.2197565913740531e-05,
"loss": 0.7754,
"step": 230
},
{
"epoch": 1.437125748502994,
"grad_norm": 0.10603932803573335,
"learning_rate": 1.1548011390810016e-05,
"loss": 0.7644,
"step": 240
},
{
"epoch": 1.4970059880239521,
"grad_norm": 0.12250644964069578,
"learning_rate": 1.0891687807939707e-05,
"loss": 0.7735,
"step": 250
},
{
"epoch": 1.55688622754491,
"grad_norm": 0.1092716735846818,
"learning_rate": 1.023146510116414e-05,
"loss": 0.7646,
"step": 260
},
{
"epoch": 1.6167664670658684,
"grad_norm": 0.10437029986936892,
"learning_rate": 9.570230256394595e-06,
"loss": 0.7589,
"step": 270
},
{
"epoch": 1.6766467065868262,
"grad_norm": 0.10262505227638254,
"learning_rate": 8.910874685364275e-06,
"loss": 0.7579,
"step": 280
},
{
"epoch": 1.7365269461077846,
"grad_norm": 0.10634904902257739,
"learning_rate": 8.256281582220486e-06,
"loss": 0.7563,
"step": 290
},
{
"epoch": 1.7964071856287425,
"grad_norm": 0.10308159473170038,
"learning_rate": 7.6093133160502e-06,
"loss": 0.7541,
"step": 300
},
{
"epoch": 1.8562874251497006,
"grad_norm": 0.11967986382799979,
"learning_rate": 6.972798914468369e-06,
"loss": 0.7535,
"step": 310
},
{
"epoch": 1.9161676646706587,
"grad_norm": 0.09790252223897214,
"learning_rate": 6.349521692999945e-06,
"loss": 0.7521,
"step": 320
},
{
"epoch": 1.9760479041916168,
"grad_norm": 0.09917905729981547,
"learning_rate": 5.742207084349274e-06,
"loss": 0.748,
"step": 330
}
],
"logging_steps": 10,
"max_steps": 501,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 56,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 612599775363072.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}