hexuan21's picture
Upload folder using huggingface_hub
0203f7d verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.2786873824287605,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0027868738242876052,
"grad_norm": 3.341921806335449,
"learning_rate": 2.2222222222222225e-06,
"loss": 3.1415,
"step": 5
},
{
"epoch": 0.0055737476485752105,
"grad_norm": 3.4862101078033447,
"learning_rate": 5e-06,
"loss": 3.04,
"step": 10
},
{
"epoch": 0.008360621472862817,
"grad_norm": 3.363572597503662,
"learning_rate": 7.777777777777777e-06,
"loss": 3.019,
"step": 15
},
{
"epoch": 0.011147495297150421,
"grad_norm": 3.5916693210601807,
"learning_rate": 1.0555555555555555e-05,
"loss": 3.073,
"step": 20
},
{
"epoch": 0.013934369121438027,
"grad_norm": 3.8997910022735596,
"learning_rate": 1.3333333333333333e-05,
"loss": 3.0359,
"step": 25
},
{
"epoch": 0.016721242945725634,
"grad_norm": 3.789405345916748,
"learning_rate": 1.6111111111111115e-05,
"loss": 2.9568,
"step": 30
},
{
"epoch": 0.019508116770013236,
"grad_norm": 2.785924196243286,
"learning_rate": 1.888888888888889e-05,
"loss": 2.6633,
"step": 35
},
{
"epoch": 0.022294990594300842,
"grad_norm": 2.715925693511963,
"learning_rate": 2.1666666666666667e-05,
"loss": 2.3416,
"step": 40
},
{
"epoch": 0.025081864418588447,
"grad_norm": 1.851042628288269,
"learning_rate": 2.4444444444444445e-05,
"loss": 2.304,
"step": 45
},
{
"epoch": 0.027868738242876053,
"grad_norm": 1.1627306938171387,
"learning_rate": 2.7222222222222223e-05,
"loss": 2.0913,
"step": 50
},
{
"epoch": 0.03065561206716366,
"grad_norm": 0.8980898857116699,
"learning_rate": 3e-05,
"loss": 1.9108,
"step": 55
},
{
"epoch": 0.03344248589145127,
"grad_norm": 0.741324245929718,
"learning_rate": 3.277777777777778e-05,
"loss": 1.989,
"step": 60
},
{
"epoch": 0.03622935971573887,
"grad_norm": 0.6890819668769836,
"learning_rate": 3.555555555555556e-05,
"loss": 1.8792,
"step": 65
},
{
"epoch": 0.03901623354002647,
"grad_norm": 0.6898017525672913,
"learning_rate": 3.8333333333333334e-05,
"loss": 1.7895,
"step": 70
},
{
"epoch": 0.04180310736431408,
"grad_norm": 0.6752148866653442,
"learning_rate": 4.111111111111111e-05,
"loss": 1.8742,
"step": 75
},
{
"epoch": 0.044589981188601684,
"grad_norm": 0.6477967500686646,
"learning_rate": 4.388888888888889e-05,
"loss": 1.7473,
"step": 80
},
{
"epoch": 0.04737685501288929,
"grad_norm": 0.6459152698516846,
"learning_rate": 4.666666666666667e-05,
"loss": 1.8558,
"step": 85
},
{
"epoch": 0.050163728837176895,
"grad_norm": 0.6237108707427979,
"learning_rate": 4.9444444444444446e-05,
"loss": 1.7931,
"step": 90
},
{
"epoch": 0.0529506026614645,
"grad_norm": 0.6388494968414307,
"learning_rate": 5.222222222222223e-05,
"loss": 1.7712,
"step": 95
},
{
"epoch": 0.055737476485752106,
"grad_norm": 0.686677098274231,
"learning_rate": 5.500000000000001e-05,
"loss": 1.7316,
"step": 100
},
{
"epoch": 0.05852435031003971,
"grad_norm": 0.612424910068512,
"learning_rate": 5.7777777777777776e-05,
"loss": 1.7602,
"step": 105
},
{
"epoch": 0.06131122413432732,
"grad_norm": 0.8961934447288513,
"learning_rate": 6.055555555555555e-05,
"loss": 1.8706,
"step": 110
},
{
"epoch": 0.06409809795861493,
"grad_norm": 0.7244030833244324,
"learning_rate": 6.333333333333333e-05,
"loss": 1.7701,
"step": 115
},
{
"epoch": 0.06688497178290254,
"grad_norm": 0.6804441809654236,
"learning_rate": 6.611111111111111e-05,
"loss": 1.7833,
"step": 120
},
{
"epoch": 0.06967184560719013,
"grad_norm": 0.6717159152030945,
"learning_rate": 6.88888888888889e-05,
"loss": 1.8445,
"step": 125
},
{
"epoch": 0.07245871943147773,
"grad_norm": 0.6639799475669861,
"learning_rate": 7.166666666666667e-05,
"loss": 1.7847,
"step": 130
},
{
"epoch": 0.07524559325576534,
"grad_norm": 0.7095969915390015,
"learning_rate": 7.444444444444444e-05,
"loss": 1.7281,
"step": 135
},
{
"epoch": 0.07803246708005294,
"grad_norm": 0.8189035654067993,
"learning_rate": 7.722222222222223e-05,
"loss": 1.7678,
"step": 140
},
{
"epoch": 0.08081934090434055,
"grad_norm": 0.6709702610969543,
"learning_rate": 8e-05,
"loss": 1.8645,
"step": 145
},
{
"epoch": 0.08360621472862816,
"grad_norm": 0.743190348148346,
"learning_rate": 8.277777777777778e-05,
"loss": 1.8086,
"step": 150
},
{
"epoch": 0.08639308855291576,
"grad_norm": 0.669023871421814,
"learning_rate": 8.555555555555556e-05,
"loss": 1.7844,
"step": 155
},
{
"epoch": 0.08917996237720337,
"grad_norm": 0.8071898221969604,
"learning_rate": 8.833333333333333e-05,
"loss": 1.7379,
"step": 160
},
{
"epoch": 0.09196683620149097,
"grad_norm": 0.7679445743560791,
"learning_rate": 9.111111111111112e-05,
"loss": 1.706,
"step": 165
},
{
"epoch": 0.09475371002577858,
"grad_norm": 0.7896930575370789,
"learning_rate": 9.388888888888889e-05,
"loss": 1.7752,
"step": 170
},
{
"epoch": 0.09754058385006618,
"grad_norm": 0.7258396744728088,
"learning_rate": 9.666666666666667e-05,
"loss": 1.7635,
"step": 175
},
{
"epoch": 0.10032745767435379,
"grad_norm": 0.8085169196128845,
"learning_rate": 9.944444444444446e-05,
"loss": 1.7449,
"step": 180
},
{
"epoch": 0.1031143314986414,
"grad_norm": 0.7673636674880981,
"learning_rate": 9.999848639521432e-05,
"loss": 1.7286,
"step": 185
},
{
"epoch": 0.105901205322929,
"grad_norm": 0.7478857040405273,
"learning_rate": 9.999233753283091e-05,
"loss": 1.7203,
"step": 190
},
{
"epoch": 0.1086880791472166,
"grad_norm": 0.8994764685630798,
"learning_rate": 9.998145939378577e-05,
"loss": 1.6964,
"step": 195
},
{
"epoch": 0.11147495297150421,
"grad_norm": 0.7851970791816711,
"learning_rate": 9.996585300715116e-05,
"loss": 1.7513,
"step": 200
},
{
"epoch": 0.11426182679579182,
"grad_norm": 0.7743813395500183,
"learning_rate": 9.994551984929175e-05,
"loss": 1.7099,
"step": 205
},
{
"epoch": 0.11704870062007942,
"grad_norm": 0.7080249786376953,
"learning_rate": 9.992046184372492e-05,
"loss": 1.7431,
"step": 210
},
{
"epoch": 0.11983557444436703,
"grad_norm": 0.744627058506012,
"learning_rate": 9.989068136093873e-05,
"loss": 1.7159,
"step": 215
},
{
"epoch": 0.12262244826865464,
"grad_norm": 0.7715389132499695,
"learning_rate": 9.985618121816779e-05,
"loss": 1.7696,
"step": 220
},
{
"epoch": 0.12540932209294225,
"grad_norm": 0.7663665413856506,
"learning_rate": 9.981696467912664e-05,
"loss": 1.7205,
"step": 225
},
{
"epoch": 0.12819619591722986,
"grad_norm": 0.7923425436019897,
"learning_rate": 9.97730354537011e-05,
"loss": 1.7136,
"step": 230
},
{
"epoch": 0.13098306974151747,
"grad_norm": 0.7929289937019348,
"learning_rate": 9.972439769759722e-05,
"loss": 1.6462,
"step": 235
},
{
"epoch": 0.13376994356580507,
"grad_norm": 0.845177948474884,
"learning_rate": 9.967105601194823e-05,
"loss": 1.8031,
"step": 240
},
{
"epoch": 0.13655681739009268,
"grad_norm": 0.7540026307106018,
"learning_rate": 9.961301544287922e-05,
"loss": 1.6899,
"step": 245
},
{
"epoch": 0.13934369121438026,
"grad_norm": 0.8806189894676208,
"learning_rate": 9.955028148102979e-05,
"loss": 1.6526,
"step": 250
},
{
"epoch": 0.14213056503866786,
"grad_norm": 0.8206574320793152,
"learning_rate": 9.948286006103466e-05,
"loss": 1.8233,
"step": 255
},
{
"epoch": 0.14491743886295547,
"grad_norm": 0.8984567523002625,
"learning_rate": 9.941075756096226e-05,
"loss": 1.7252,
"step": 260
},
{
"epoch": 0.14770431268724307,
"grad_norm": 0.7411919832229614,
"learning_rate": 9.933398080171123e-05,
"loss": 1.7339,
"step": 265
},
{
"epoch": 0.15049118651153068,
"grad_norm": 0.8030174970626831,
"learning_rate": 9.925253704636543e-05,
"loss": 1.6934,
"step": 270
},
{
"epoch": 0.15327806033581828,
"grad_norm": 0.7879084944725037,
"learning_rate": 9.916643399950656e-05,
"loss": 1.7197,
"step": 275
},
{
"epoch": 0.1560649341601059,
"grad_norm": 0.8534114956855774,
"learning_rate": 9.907567980648549e-05,
"loss": 1.6941,
"step": 280
},
{
"epoch": 0.1588518079843935,
"grad_norm": 0.74381023645401,
"learning_rate": 9.898028305265169e-05,
"loss": 1.7919,
"step": 285
},
{
"epoch": 0.1616386818086811,
"grad_norm": 0.8476956486701965,
"learning_rate": 9.888025276254096e-05,
"loss": 1.7417,
"step": 290
},
{
"epoch": 0.1644255556329687,
"grad_norm": 0.8212200403213501,
"learning_rate": 9.877559839902184e-05,
"loss": 1.7452,
"step": 295
},
{
"epoch": 0.1672124294572563,
"grad_norm": 0.8340197801589966,
"learning_rate": 9.86663298624003e-05,
"loss": 1.6687,
"step": 300
},
{
"epoch": 0.16999930328154392,
"grad_norm": 0.858363151550293,
"learning_rate": 9.855245748948326e-05,
"loss": 1.7669,
"step": 305
},
{
"epoch": 0.17278617710583152,
"grad_norm": 0.8392812013626099,
"learning_rate": 9.843399205260068e-05,
"loss": 1.736,
"step": 310
},
{
"epoch": 0.17557305093011913,
"grad_norm": 0.8345137238502502,
"learning_rate": 9.831094475858652e-05,
"loss": 1.7264,
"step": 315
},
{
"epoch": 0.17835992475440673,
"grad_norm": 0.8243482708930969,
"learning_rate": 9.818332724771857e-05,
"loss": 1.7748,
"step": 320
},
{
"epoch": 0.18114679857869434,
"grad_norm": 0.8255311846733093,
"learning_rate": 9.805115159261726e-05,
"loss": 1.6798,
"step": 325
},
{
"epoch": 0.18393367240298195,
"grad_norm": 0.8170781135559082,
"learning_rate": 9.791443029710361e-05,
"loss": 1.6542,
"step": 330
},
{
"epoch": 0.18672054622726955,
"grad_norm": 0.8476420044898987,
"learning_rate": 9.777317629501636e-05,
"loss": 1.6875,
"step": 335
},
{
"epoch": 0.18950742005155716,
"grad_norm": 0.7547033429145813,
"learning_rate": 9.762740294898846e-05,
"loss": 1.646,
"step": 340
},
{
"epoch": 0.19229429387584476,
"grad_norm": 0.826404869556427,
"learning_rate": 9.747712404918286e-05,
"loss": 1.6187,
"step": 345
},
{
"epoch": 0.19508116770013237,
"grad_norm": 0.964885950088501,
"learning_rate": 9.732235381198813e-05,
"loss": 1.7507,
"step": 350
},
{
"epoch": 0.19786804152441997,
"grad_norm": 0.8207951188087463,
"learning_rate": 9.716310687867342e-05,
"loss": 1.5745,
"step": 355
},
{
"epoch": 0.20065491534870758,
"grad_norm": 0.8143700361251831,
"learning_rate": 9.699939831400351e-05,
"loss": 1.7754,
"step": 360
},
{
"epoch": 0.20344178917299519,
"grad_norm": 0.811844527721405,
"learning_rate": 9.683124360481364e-05,
"loss": 1.6993,
"step": 365
},
{
"epoch": 0.2062286629972828,
"grad_norm": 0.8975111842155457,
"learning_rate": 9.665865865854445e-05,
"loss": 1.6901,
"step": 370
},
{
"epoch": 0.2090155368215704,
"grad_norm": 0.7903837561607361,
"learning_rate": 9.648165980173712e-05,
"loss": 1.6895,
"step": 375
},
{
"epoch": 0.211802410645858,
"grad_norm": 0.8718327879905701,
"learning_rate": 9.630026377848892e-05,
"loss": 1.6725,
"step": 380
},
{
"epoch": 0.2145892844701456,
"grad_norm": 0.8131148219108582,
"learning_rate": 9.611448774886924e-05,
"loss": 1.6874,
"step": 385
},
{
"epoch": 0.2173761582944332,
"grad_norm": 0.8331175446510315,
"learning_rate": 9.592434928729616e-05,
"loss": 1.7542,
"step": 390
},
{
"epoch": 0.22016303211872082,
"grad_norm": 0.792988121509552,
"learning_rate": 9.572986638087396e-05,
"loss": 1.7311,
"step": 395
},
{
"epoch": 0.22294990594300843,
"grad_norm": 0.7799636721611023,
"learning_rate": 9.553105742769154e-05,
"loss": 1.6567,
"step": 400
},
{
"epoch": 0.22573677976729603,
"grad_norm": 0.7864108085632324,
"learning_rate": 9.532794123508197e-05,
"loss": 1.5611,
"step": 405
},
{
"epoch": 0.22852365359158364,
"grad_norm": 0.8886444568634033,
"learning_rate": 9.512053701784329e-05,
"loss": 1.6995,
"step": 410
},
{
"epoch": 0.23131052741587124,
"grad_norm": 0.7999454736709595,
"learning_rate": 9.490886439642081e-05,
"loss": 1.6898,
"step": 415
},
{
"epoch": 0.23409740124015885,
"grad_norm": 0.7729182243347168,
"learning_rate": 9.469294339505098e-05,
"loss": 1.7204,
"step": 420
},
{
"epoch": 0.23688427506444645,
"grad_norm": 0.8300912380218506,
"learning_rate": 9.447279443986716e-05,
"loss": 1.6668,
"step": 425
},
{
"epoch": 0.23967114888873406,
"grad_norm": 0.840481698513031,
"learning_rate": 9.424843835696724e-05,
"loss": 1.704,
"step": 430
},
{
"epoch": 0.24245802271302166,
"grad_norm": 0.9396219253540039,
"learning_rate": 9.401989637044355e-05,
"loss": 1.6713,
"step": 435
},
{
"epoch": 0.24524489653730927,
"grad_norm": 0.8775554299354553,
"learning_rate": 9.3787190100375e-05,
"loss": 1.6883,
"step": 440
},
{
"epoch": 0.24803177036159688,
"grad_norm": 0.8623490333557129,
"learning_rate": 9.355034156078188e-05,
"loss": 1.6783,
"step": 445
},
{
"epoch": 0.2508186441858845,
"grad_norm": 0.806533694267273,
"learning_rate": 9.330937315754329e-05,
"loss": 1.6853,
"step": 450
},
{
"epoch": 0.2536055180101721,
"grad_norm": 0.8504590392112732,
"learning_rate": 9.306430768627753e-05,
"loss": 1.6594,
"step": 455
},
{
"epoch": 0.2563923918344597,
"grad_norm": 0.9382562637329102,
"learning_rate": 9.281516833018571e-05,
"loss": 1.6827,
"step": 460
},
{
"epoch": 0.2591792656587473,
"grad_norm": 0.821978747844696,
"learning_rate": 9.256197865785854e-05,
"loss": 1.752,
"step": 465
},
{
"epoch": 0.26196613948303493,
"grad_norm": 0.8061692714691162,
"learning_rate": 9.230476262104677e-05,
"loss": 1.6773,
"step": 470
},
{
"epoch": 0.26475301330732254,
"grad_norm": 0.8759399652481079,
"learning_rate": 9.204354455239539e-05,
"loss": 1.7003,
"step": 475
},
{
"epoch": 0.26753988713161014,
"grad_norm": 0.8721809387207031,
"learning_rate": 9.177834916314165e-05,
"loss": 1.6898,
"step": 480
},
{
"epoch": 0.27032676095589775,
"grad_norm": 0.7904046773910522,
"learning_rate": 9.150920154077754e-05,
"loss": 1.6636,
"step": 485
},
{
"epoch": 0.27311363478018535,
"grad_norm": 0.8828450441360474,
"learning_rate": 9.123612714667634e-05,
"loss": 1.6945,
"step": 490
},
{
"epoch": 0.2759005086044729,
"grad_norm": 0.7389103174209595,
"learning_rate": 9.095915181368412e-05,
"loss": 1.7459,
"step": 495
},
{
"epoch": 0.2786873824287605,
"grad_norm": 0.975307285785675,
"learning_rate": 9.067830174367586e-05,
"loss": 1.7296,
"step": 500
}
],
"logging_steps": 5,
"max_steps": 1795,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5663020915064832.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}