JW17's picture
Add files using upload-large-folder tool
a23516e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 501,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.059880239520958084,
"grad_norm": 6.566332162525977,
"learning_rate": 7.692307692307694e-06,
"loss": 4.3864,
"step": 10
},
{
"epoch": 0.11976047904191617,
"grad_norm": 2.0437379261117132,
"learning_rate": 1.5384615384615387e-05,
"loss": 3.4794,
"step": 20
},
{
"epoch": 0.17964071856287425,
"grad_norm": 0.8566260346858398,
"learning_rate": 1.9996500732179695e-05,
"loss": 2.5375,
"step": 30
},
{
"epoch": 0.23952095808383234,
"grad_norm": 0.6117118904297301,
"learning_rate": 1.995716208873644e-05,
"loss": 1.8752,
"step": 40
},
{
"epoch": 0.2994011976047904,
"grad_norm": 0.3923875723199884,
"learning_rate": 1.9874283308955058e-05,
"loss": 1.4434,
"step": 50
},
{
"epoch": 0.3592814371257485,
"grad_norm": 0.2726465509781607,
"learning_rate": 1.9748226800652062e-05,
"loss": 1.2232,
"step": 60
},
{
"epoch": 0.41916167664670656,
"grad_norm": 0.22048277219015358,
"learning_rate": 1.957954377686475e-05,
"loss": 1.1031,
"step": 70
},
{
"epoch": 0.47904191616766467,
"grad_norm": 0.19131964365908752,
"learning_rate": 1.9368971845536844e-05,
"loss": 1.0318,
"step": 80
},
{
"epoch": 0.5389221556886228,
"grad_norm": 0.19325047947069812,
"learning_rate": 1.911743178414665e-05,
"loss": 0.9724,
"step": 90
},
{
"epoch": 0.5988023952095808,
"grad_norm": 0.18220999359482376,
"learning_rate": 1.8826023513381372e-05,
"loss": 0.942,
"step": 100
},
{
"epoch": 0.6586826347305389,
"grad_norm": 0.17750272843534337,
"learning_rate": 1.849602128746387e-05,
"loss": 0.9106,
"step": 110
},
{
"epoch": 0.718562874251497,
"grad_norm": 0.16658108783370879,
"learning_rate": 1.8128868122163125e-05,
"loss": 0.8876,
"step": 120
},
{
"epoch": 0.7784431137724551,
"grad_norm": 0.1461861518555333,
"learning_rate": 1.7726169484853438e-05,
"loss": 0.872,
"step": 130
},
{
"epoch": 0.8383233532934131,
"grad_norm": 0.14457472093324725,
"learning_rate": 1.7289686274214116e-05,
"loss": 0.8576,
"step": 140
},
{
"epoch": 0.8982035928143712,
"grad_norm": 0.1544690749108873,
"learning_rate": 1.6821327120267567e-05,
"loss": 0.8467,
"step": 150
},
{
"epoch": 0.9580838323353293,
"grad_norm": 0.1429893393640351,
"learning_rate": 1.6323140038425842e-05,
"loss": 0.8252,
"step": 160
},
{
"epoch": 1.0179640718562875,
"grad_norm": 0.11444623635062835,
"learning_rate": 1.5797303474040332e-05,
"loss": 0.8209,
"step": 170
},
{
"epoch": 1.0778443113772456,
"grad_norm": 0.12105393067348605,
"learning_rate": 1.524611677661454e-05,
"loss": 0.8064,
"step": 180
},
{
"epoch": 1.1377245508982037,
"grad_norm": 0.11410840909819214,
"learning_rate": 1.4671990145333697e-05,
"loss": 0.7997,
"step": 190
},
{
"epoch": 1.1976047904191618,
"grad_norm": 0.1145244272492734,
"learning_rate": 1.4077434089877038e-05,
"loss": 0.7938,
"step": 200
},
{
"epoch": 1.2574850299401197,
"grad_norm": 0.12645762812759187,
"learning_rate": 1.3465048452597682e-05,
"loss": 0.7775,
"step": 210
},
{
"epoch": 1.3173652694610778,
"grad_norm": 0.11488140079430814,
"learning_rate": 1.283751104007355e-05,
"loss": 0.7767,
"step": 220
},
{
"epoch": 1.377245508982036,
"grad_norm": 0.13144847588925945,
"learning_rate": 1.2197565913740531e-05,
"loss": 0.7754,
"step": 230
},
{
"epoch": 1.437125748502994,
"grad_norm": 0.10603932803573335,
"learning_rate": 1.1548011390810016e-05,
"loss": 0.7644,
"step": 240
},
{
"epoch": 1.4970059880239521,
"grad_norm": 0.12250644964069578,
"learning_rate": 1.0891687807939707e-05,
"loss": 0.7735,
"step": 250
},
{
"epoch": 1.55688622754491,
"grad_norm": 0.1092716735846818,
"learning_rate": 1.023146510116414e-05,
"loss": 0.7646,
"step": 260
},
{
"epoch": 1.6167664670658684,
"grad_norm": 0.10437029986936892,
"learning_rate": 9.570230256394595e-06,
"loss": 0.7589,
"step": 270
},
{
"epoch": 1.6766467065868262,
"grad_norm": 0.10262505227638254,
"learning_rate": 8.910874685364275e-06,
"loss": 0.7579,
"step": 280
},
{
"epoch": 1.7365269461077846,
"grad_norm": 0.10634904902257739,
"learning_rate": 8.256281582220486e-06,
"loss": 0.7563,
"step": 290
},
{
"epoch": 1.7964071856287425,
"grad_norm": 0.10308159473170038,
"learning_rate": 7.6093133160502e-06,
"loss": 0.7541,
"step": 300
},
{
"epoch": 1.8562874251497006,
"grad_norm": 0.11967986382799979,
"learning_rate": 6.972798914468369e-06,
"loss": 0.7535,
"step": 310
},
{
"epoch": 1.9161676646706587,
"grad_norm": 0.09790252223897214,
"learning_rate": 6.349521692999945e-06,
"loss": 0.7521,
"step": 320
},
{
"epoch": 1.9760479041916168,
"grad_norm": 0.09917905729981547,
"learning_rate": 5.742207084349274e-06,
"loss": 0.748,
"step": 330
},
{
"epoch": 2.035928143712575,
"grad_norm": 0.09219293703087832,
"learning_rate": 5.153510720776354e-06,
"loss": 0.7458,
"step": 340
},
{
"epoch": 2.095808383233533,
"grad_norm": 0.09531637506680916,
"learning_rate": 4.58600682169262e-06,
"loss": 0.7417,
"step": 350
},
{
"epoch": 2.155688622754491,
"grad_norm": 0.099401468944997,
"learning_rate": 4.042176937254474e-06,
"loss": 0.7381,
"step": 360
},
{
"epoch": 2.215568862275449,
"grad_norm": 0.09333850738653798,
"learning_rate": 3.5243990971758124e-06,
"loss": 0.7368,
"step": 370
},
{
"epoch": 2.2754491017964074,
"grad_norm": 0.10118930739249864,
"learning_rate": 3.034937412209178e-06,
"loss": 0.7407,
"step": 380
},
{
"epoch": 2.3353293413173652,
"grad_norm": 0.09991557691778238,
"learning_rate": 2.5759321737655017e-06,
"loss": 0.7348,
"step": 390
},
{
"epoch": 2.3952095808383236,
"grad_norm": 0.08992918338455534,
"learning_rate": 2.149390494964323e-06,
"loss": 0.73,
"step": 400
},
{
"epoch": 2.4550898203592815,
"grad_norm": 0.0976318233532428,
"learning_rate": 1.7571775340388275e-06,
"loss": 0.7327,
"step": 410
},
{
"epoch": 2.5149700598802394,
"grad_norm": 0.0925071697104646,
"learning_rate": 1.4010083384734308e-06,
"loss": 0.7249,
"step": 420
},
{
"epoch": 2.5748502994011977,
"grad_norm": 0.09013756739384593,
"learning_rate": 1.0824403455375287e-06,
"loss": 0.7361,
"step": 430
},
{
"epoch": 2.6347305389221556,
"grad_norm": 0.10044956018733328,
"learning_rate": 8.02866572008566e-07,
"loss": 0.7403,
"step": 440
},
{
"epoch": 2.694610778443114,
"grad_norm": 0.09704814807171967,
"learning_rate": 5.63509522864123e-07,
"loss": 0.7332,
"step": 450
},
{
"epoch": 2.754491017964072,
"grad_norm": 0.11375533603468758,
"learning_rate": 3.6541584557868604e-07,
"loss": 0.7329,
"step": 460
},
{
"epoch": 2.81437125748503,
"grad_norm": 0.10095526024482018,
"learning_rate": 2.0945175340055356e-07,
"loss": 0.7348,
"step": 470
},
{
"epoch": 2.874251497005988,
"grad_norm": 0.0869826588136221,
"learning_rate": 9.629923762170091e-08,
"loss": 0.7303,
"step": 480
},
{
"epoch": 2.934131736526946,
"grad_norm": 0.08752359244376111,
"learning_rate": 2.645308540337843e-08,
"loss": 0.7358,
"step": 490
},
{
"epoch": 2.9940119760479043,
"grad_norm": 0.09120038510133631,
"learning_rate": 2.1871619775404308e-10,
"loss": 0.7293,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 501,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 56,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 913430022193152.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}