prem-1B-SQL / trainer_state.json
anindya64's picture
Upload folder using huggingface_hub
6d78926 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0213386832026263,
"eval_steps": 500,
"global_step": 1400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007295276308590188,
"grad_norm": 0.9992304444313049,
"learning_rate": 9.999671349822887e-06,
"loss": 0.087,
"step": 10
},
{
"epoch": 0.014590552617180376,
"grad_norm": 1.0456160306930542,
"learning_rate": 9.998685442495921e-06,
"loss": 0.0757,
"step": 20
},
{
"epoch": 0.021885828925770564,
"grad_norm": 1.014310598373413,
"learning_rate": 9.99704240762655e-06,
"loss": 0.0901,
"step": 30
},
{
"epoch": 0.029181105234360752,
"grad_norm": 0.9391605257987976,
"learning_rate": 9.994742461208251e-06,
"loss": 0.0811,
"step": 40
},
{
"epoch": 0.036476381542950936,
"grad_norm": 0.7982305884361267,
"learning_rate": 9.991785905592149e-06,
"loss": 0.0818,
"step": 50
},
{
"epoch": 0.04377165785154113,
"grad_norm": 0.9946795701980591,
"learning_rate": 9.988173129447251e-06,
"loss": 0.0905,
"step": 60
},
{
"epoch": 0.05106693416013131,
"grad_norm": 0.8562415242195129,
"learning_rate": 9.983904607709365e-06,
"loss": 0.0831,
"step": 70
},
{
"epoch": 0.058362210468721504,
"grad_norm": 1.054521918296814,
"learning_rate": 9.978980901518663e-06,
"loss": 0.083,
"step": 80
},
{
"epoch": 0.06565748677731169,
"grad_norm": 1.0189253091812134,
"learning_rate": 9.973402658145908e-06,
"loss": 0.0891,
"step": 90
},
{
"epoch": 0.07295276308590187,
"grad_norm": 0.7745731472969055,
"learning_rate": 9.96717061090737e-06,
"loss": 0.0846,
"step": 100
},
{
"epoch": 0.08024803939449207,
"grad_norm": 0.8214731812477112,
"learning_rate": 9.960285579068419e-06,
"loss": 0.0895,
"step": 110
},
{
"epoch": 0.08754331570308226,
"grad_norm": 0.9114850759506226,
"learning_rate": 9.95274846773583e-06,
"loss": 0.0862,
"step": 120
},
{
"epoch": 0.09483859201167244,
"grad_norm": 1.216098427772522,
"learning_rate": 9.944560267738792e-06,
"loss": 0.0896,
"step": 130
},
{
"epoch": 0.10213386832026262,
"grad_norm": 0.7587252855300903,
"learning_rate": 9.935722055498655e-06,
"loss": 0.0888,
"step": 140
},
{
"epoch": 0.10942914462885282,
"grad_norm": 0.8791028261184692,
"learning_rate": 9.92623499288743e-06,
"loss": 0.0832,
"step": 150
},
{
"epoch": 0.11672442093744301,
"grad_norm": 1.133716344833374,
"learning_rate": 9.916100327075038e-06,
"loss": 0.0854,
"step": 160
},
{
"epoch": 0.12401969724603319,
"grad_norm": 0.7774285674095154,
"learning_rate": 9.905319390365364e-06,
"loss": 0.0873,
"step": 170
},
{
"epoch": 0.13131497355462338,
"grad_norm": 0.7029784321784973,
"learning_rate": 9.893893600021112e-06,
"loss": 0.0786,
"step": 180
},
{
"epoch": 0.13861024986321357,
"grad_norm": 1.0894945859909058,
"learning_rate": 9.881824458077491e-06,
"loss": 0.089,
"step": 190
},
{
"epoch": 0.14590552617180375,
"grad_norm": 0.7053947448730469,
"learning_rate": 9.869113551144754e-06,
"loss": 0.0843,
"step": 200
},
{
"epoch": 0.15320080248039394,
"grad_norm": 0.9423328638076782,
"learning_rate": 9.85576255019963e-06,
"loss": 0.0887,
"step": 210
},
{
"epoch": 0.16049607878898414,
"grad_norm": 1.1778377294540405,
"learning_rate": 9.841773210365646e-06,
"loss": 0.0939,
"step": 220
},
{
"epoch": 0.1677913550975743,
"grad_norm": 0.8132289052009583,
"learning_rate": 9.82714737068241e-06,
"loss": 0.0812,
"step": 230
},
{
"epoch": 0.1750866314061645,
"grad_norm": 0.925722062587738,
"learning_rate": 9.811886953863841e-06,
"loss": 0.091,
"step": 240
},
{
"epoch": 0.1823819077147547,
"grad_norm": 0.8174653649330139,
"learning_rate": 9.795993966045418e-06,
"loss": 0.0868,
"step": 250
},
{
"epoch": 0.18967718402334488,
"grad_norm": 1.086427092552185,
"learning_rate": 9.779470496520442e-06,
"loss": 0.0882,
"step": 260
},
{
"epoch": 0.19697246033193508,
"grad_norm": 0.8431033492088318,
"learning_rate": 9.76231871746539e-06,
"loss": 0.0819,
"step": 270
},
{
"epoch": 0.20426773664052525,
"grad_norm": 0.9793428778648376,
"learning_rate": 9.744540883654348e-06,
"loss": 0.0838,
"step": 280
},
{
"epoch": 0.21156301294911545,
"grad_norm": 1.1424306631088257,
"learning_rate": 9.726139332162613e-06,
"loss": 0.0923,
"step": 290
},
{
"epoch": 0.21885828925770565,
"grad_norm": 0.8779186010360718,
"learning_rate": 9.707116482059447e-06,
"loss": 0.0909,
"step": 300
},
{
"epoch": 0.22615356556629582,
"grad_norm": 0.862575113773346,
"learning_rate": 9.68747483409007e-06,
"loss": 0.094,
"step": 310
},
{
"epoch": 0.23344884187488602,
"grad_norm": 0.8033697009086609,
"learning_rate": 9.667216970346916e-06,
"loss": 0.0821,
"step": 320
},
{
"epoch": 0.24074411818347619,
"grad_norm": 1.136168360710144,
"learning_rate": 9.646345553930187e-06,
"loss": 0.084,
"step": 330
},
{
"epoch": 0.24803939449206638,
"grad_norm": 1.227287769317627,
"learning_rate": 9.624863328597767e-06,
"loss": 0.0867,
"step": 340
},
{
"epoch": 0.2553346708006566,
"grad_norm": 0.8906837105751038,
"learning_rate": 9.602773118404518e-06,
"loss": 0.0845,
"step": 350
},
{
"epoch": 0.26262994710924675,
"grad_norm": 0.9557612538337708,
"learning_rate": 9.580077827331038e-06,
"loss": 0.0896,
"step": 360
},
{
"epoch": 0.2699252234178369,
"grad_norm": 0.8804728984832764,
"learning_rate": 9.556780438901899e-06,
"loss": 0.0799,
"step": 370
},
{
"epoch": 0.27722049972642715,
"grad_norm": 0.8825680017471313,
"learning_rate": 9.532884015793432e-06,
"loss": 0.0879,
"step": 380
},
{
"epoch": 0.2845157760350173,
"grad_norm": 1.216217041015625,
"learning_rate": 9.508391699431114e-06,
"loss": 0.0878,
"step": 390
},
{
"epoch": 0.2918110523436075,
"grad_norm": 1.0918773412704468,
"learning_rate": 9.48330670957659e-06,
"loss": 0.0842,
"step": 400
},
{
"epoch": 0.2991063286521977,
"grad_norm": 0.8797096014022827,
"learning_rate": 9.457632343904404e-06,
"loss": 0.075,
"step": 410
},
{
"epoch": 0.3064016049607879,
"grad_norm": 0.9638001322746277,
"learning_rate": 9.431371977568483e-06,
"loss": 0.0834,
"step": 420
},
{
"epoch": 0.31369688126937806,
"grad_norm": 0.7447642683982849,
"learning_rate": 9.404529062758447e-06,
"loss": 0.0909,
"step": 430
},
{
"epoch": 0.3209921575779683,
"grad_norm": 1.179291844367981,
"learning_rate": 9.377107128245782e-06,
"loss": 0.0832,
"step": 440
},
{
"epoch": 0.32828743388655846,
"grad_norm": 0.6798911094665527,
"learning_rate": 9.349109778919938e-06,
"loss": 0.0842,
"step": 450
},
{
"epoch": 0.3355827101951486,
"grad_norm": 0.9468401670455933,
"learning_rate": 9.32054069531444e-06,
"loss": 0.0937,
"step": 460
},
{
"epoch": 0.34287798650373885,
"grad_norm": 0.9600223898887634,
"learning_rate": 9.291403633123046e-06,
"loss": 0.0909,
"step": 470
},
{
"epoch": 0.350173262812329,
"grad_norm": 0.7957492470741272,
"learning_rate": 9.261702422706014e-06,
"loss": 0.0924,
"step": 480
},
{
"epoch": 0.3574685391209192,
"grad_norm": 0.9197902083396912,
"learning_rate": 9.231440968586572e-06,
"loss": 0.0852,
"step": 490
},
{
"execution_accuracy": 37.0,
"selected_difficulty": "challenging",
"step": 500
},
{
"epoch": 0.3647638154295094,
"grad_norm": 1.0779783725738525,
"learning_rate": 9.200623248937619e-06,
"loss": 0.0872,
"step": 500
},
{
"epoch": 0.3720590917380996,
"grad_norm": 1.0330190658569336,
"learning_rate": 9.169253315058764e-06,
"loss": 0.0884,
"step": 510
},
{
"epoch": 0.37935436804668976,
"grad_norm": 0.8854564428329468,
"learning_rate": 9.13733529084374e-06,
"loss": 0.0888,
"step": 520
},
{
"epoch": 0.38664964435527993,
"grad_norm": 0.7928789258003235,
"learning_rate": 9.104873372238269e-06,
"loss": 0.0858,
"step": 530
},
{
"epoch": 0.39394492066387016,
"grad_norm": 1.1074901819229126,
"learning_rate": 9.071871826688472e-06,
"loss": 0.0866,
"step": 540
},
{
"epoch": 0.40124019697246033,
"grad_norm": 0.9030365347862244,
"learning_rate": 9.038334992579863e-06,
"loss": 0.084,
"step": 550
},
{
"epoch": 0.4085354732810505,
"grad_norm": 0.9121033549308777,
"learning_rate": 9.004267278667032e-06,
"loss": 0.0841,
"step": 560
},
{
"epoch": 0.4158307495896407,
"grad_norm": 0.7888039350509644,
"learning_rate": 8.969673163494063e-06,
"loss": 0.0844,
"step": 570
},
{
"epoch": 0.4231260258982309,
"grad_norm": 0.8982441425323486,
"learning_rate": 8.934557194805787e-06,
"loss": 0.0881,
"step": 580
},
{
"epoch": 0.43042130220682107,
"grad_norm": 0.9907477498054504,
"learning_rate": 8.898923988949936e-06,
"loss": 0.0829,
"step": 590
},
{
"epoch": 0.4377165785154113,
"grad_norm": 0.7510169744491577,
"learning_rate": 8.862778230270276e-06,
"loss": 0.0812,
"step": 600
},
{
"epoch": 0.44501185482400146,
"grad_norm": 0.8274700045585632,
"learning_rate": 8.826124670490804e-06,
"loss": 0.0872,
"step": 610
},
{
"epoch": 0.45230713113259163,
"grad_norm": 1.118033766746521,
"learning_rate": 8.788968128091084e-06,
"loss": 0.0934,
"step": 620
},
{
"epoch": 0.45960240744118186,
"grad_norm": 0.7703680992126465,
"learning_rate": 8.751313487672815e-06,
"loss": 0.0891,
"step": 630
},
{
"epoch": 0.46689768374977203,
"grad_norm": 1.0380890369415283,
"learning_rate": 8.71316569931769e-06,
"loss": 0.0931,
"step": 640
},
{
"epoch": 0.4741929600583622,
"grad_norm": 0.9548109769821167,
"learning_rate": 8.674529777936674e-06,
"loss": 0.0816,
"step": 650
},
{
"epoch": 0.48148823636695237,
"grad_norm": 1.2412773370742798,
"learning_rate": 8.635410802610724e-06,
"loss": 0.0872,
"step": 660
},
{
"epoch": 0.4887835126755426,
"grad_norm": 0.8023186922073364,
"learning_rate": 8.595813915923113e-06,
"loss": 0.0857,
"step": 670
},
{
"epoch": 0.49607878898413277,
"grad_norm": 0.829302966594696,
"learning_rate": 8.555744323283364e-06,
"loss": 0.0932,
"step": 680
},
{
"epoch": 0.503374065292723,
"grad_norm": 1.0218778848648071,
"learning_rate": 8.515207292242969e-06,
"loss": 0.0791,
"step": 690
},
{
"epoch": 0.5106693416013132,
"grad_norm": 0.9110598564147949,
"learning_rate": 8.474208151802898e-06,
"loss": 0.0917,
"step": 700
},
{
"epoch": 0.5179646179099033,
"grad_norm": 0.8709658980369568,
"learning_rate": 8.432752291713058e-06,
"loss": 0.0805,
"step": 710
},
{
"epoch": 0.5252598942184935,
"grad_norm": 0.9375218749046326,
"learning_rate": 8.390845161763756e-06,
"loss": 0.0887,
"step": 720
},
{
"epoch": 0.5325551705270837,
"grad_norm": 0.819020688533783,
"learning_rate": 8.34849227106926e-06,
"loss": 0.0823,
"step": 730
},
{
"epoch": 0.5398504468356738,
"grad_norm": 0.7377147078514099,
"learning_rate": 8.305699187343586e-06,
"loss": 0.0867,
"step": 740
},
{
"epoch": 0.5471457231442641,
"grad_norm": 0.9633333086967468,
"learning_rate": 8.262471536168547e-06,
"loss": 0.0893,
"step": 750
},
{
"epoch": 0.5544409994528543,
"grad_norm": 0.894578218460083,
"learning_rate": 8.218815000254233e-06,
"loss": 0.0874,
"step": 760
},
{
"epoch": 0.5617362757614445,
"grad_norm": 0.9977262616157532,
"learning_rate": 8.174735318691946e-06,
"loss": 0.0822,
"step": 770
},
{
"epoch": 0.5690315520700346,
"grad_norm": 0.8501657247543335,
"learning_rate": 8.130238286199747e-06,
"loss": 0.0874,
"step": 780
},
{
"epoch": 0.5763268283786248,
"grad_norm": 0.7603849172592163,
"learning_rate": 8.085329752360683e-06,
"loss": 0.0784,
"step": 790
},
{
"epoch": 0.583622104687215,
"grad_norm": 0.8487511277198792,
"learning_rate": 8.04001562085379e-06,
"loss": 0.0878,
"step": 800
},
{
"epoch": 0.5909173809958053,
"grad_norm": 0.9253877401351929,
"learning_rate": 7.994301848678006e-06,
"loss": 0.0815,
"step": 810
},
{
"epoch": 0.5982126573043954,
"grad_norm": 0.9946874976158142,
"learning_rate": 7.948194445369065e-06,
"loss": 0.0901,
"step": 820
},
{
"epoch": 0.6055079336129856,
"grad_norm": 1.006040334701538,
"learning_rate": 7.901699472209467e-06,
"loss": 0.0792,
"step": 830
},
{
"epoch": 0.6128032099215758,
"grad_norm": 0.8797623515129089,
"learning_rate": 7.85482304143168e-06,
"loss": 0.0816,
"step": 840
},
{
"epoch": 0.620098486230166,
"grad_norm": 0.9626962542533875,
"learning_rate": 7.807571315414616e-06,
"loss": 0.08,
"step": 850
},
{
"epoch": 0.6273937625387561,
"grad_norm": 1.075498104095459,
"learning_rate": 7.759950505873523e-06,
"loss": 0.0802,
"step": 860
},
{
"epoch": 0.6346890388473464,
"grad_norm": 0.90218585729599,
"learning_rate": 7.711966873043396e-06,
"loss": 0.0824,
"step": 870
},
{
"epoch": 0.6419843151559366,
"grad_norm": 0.8692035675048828,
"learning_rate": 7.66362672485601e-06,
"loss": 0.0769,
"step": 880
},
{
"epoch": 0.6492795914645267,
"grad_norm": 1.062687635421753,
"learning_rate": 7.614936416110668e-06,
"loss": 0.0834,
"step": 890
},
{
"epoch": 0.6565748677731169,
"grad_norm": 0.9357954859733582,
"learning_rate": 7.565902347638806e-06,
"loss": 0.0833,
"step": 900
},
{
"epoch": 0.6638701440817071,
"grad_norm": 0.873466432094574,
"learning_rate": 7.5165309654625405e-06,
"loss": 0.0803,
"step": 910
},
{
"epoch": 0.6711654203902973,
"grad_norm": 0.8454645276069641,
"learning_rate": 7.466828759947271e-06,
"loss": 0.0793,
"step": 920
},
{
"epoch": 0.6784606966988874,
"grad_norm": 1.1247007846832275,
"learning_rate": 7.416802264948455e-06,
"loss": 0.0757,
"step": 930
},
{
"epoch": 0.6857559730074777,
"grad_norm": 0.9007195234298706,
"learning_rate": 7.366458056952668e-06,
"loss": 0.0855,
"step": 940
},
{
"epoch": 0.6930512493160679,
"grad_norm": 0.775996208190918,
"learning_rate": 7.315802754213062e-06,
"loss": 0.079,
"step": 950
},
{
"epoch": 0.700346525624658,
"grad_norm": 1.1148663759231567,
"learning_rate": 7.264843015879321e-06,
"loss": 0.0881,
"step": 960
},
{
"epoch": 0.7076418019332482,
"grad_norm": 0.8462682366371155,
"learning_rate": 7.213585541122261e-06,
"loss": 0.0848,
"step": 970
},
{
"epoch": 0.7149370782418384,
"grad_norm": 0.8557429313659668,
"learning_rate": 7.162037068253141e-06,
"loss": 0.0839,
"step": 980
},
{
"epoch": 0.7222323545504286,
"grad_norm": 0.8856000900268555,
"learning_rate": 7.110204373837857e-06,
"loss": 0.0804,
"step": 990
},
{
"execution_accuracy": 47.0,
"selected_difficulty": "challenging",
"step": 1000
},
{
"epoch": 0.7295276308590188,
"grad_norm": 0.7584331631660461,
"learning_rate": 7.058094271806091e-06,
"loss": 0.0818,
"step": 1000
},
{
"epoch": 0.736822907167609,
"grad_norm": 0.9286295175552368,
"learning_rate": 7.0057136125555456e-06,
"loss": 0.0766,
"step": 1010
},
{
"epoch": 0.7441181834761992,
"grad_norm": 1.1081056594848633,
"learning_rate": 6.953069282051397e-06,
"loss": 0.0835,
"step": 1020
},
{
"epoch": 0.7514134597847894,
"grad_norm": 0.9020804762840271,
"learning_rate": 6.900168200921065e-06,
"loss": 0.0791,
"step": 1030
},
{
"epoch": 0.7587087360933795,
"grad_norm": 1.0643606185913086,
"learning_rate": 6.84701732354442e-06,
"loss": 0.0866,
"step": 1040
},
{
"epoch": 0.7660040124019697,
"grad_norm": 0.9293026924133301,
"learning_rate": 6.79362363713957e-06,
"loss": 0.0828,
"step": 1050
},
{
"epoch": 0.7732992887105599,
"grad_norm": 0.9997085332870483,
"learning_rate": 6.7399941608443096e-06,
"loss": 0.0737,
"step": 1060
},
{
"epoch": 0.7805945650191501,
"grad_norm": 0.8557327389717102,
"learning_rate": 6.686135944793395e-06,
"loss": 0.0836,
"step": 1070
},
{
"epoch": 0.7878898413277403,
"grad_norm": 0.8752471208572388,
"learning_rate": 6.632056069191723e-06,
"loss": 0.0897,
"step": 1080
},
{
"epoch": 0.7951851176363305,
"grad_norm": 0.9777392148971558,
"learning_rate": 6.57776164338357e-06,
"loss": 0.0805,
"step": 1090
},
{
"epoch": 0.8024803939449207,
"grad_norm": 0.9013363122940063,
"learning_rate": 6.523259804918001e-06,
"loss": 0.0764,
"step": 1100
},
{
"epoch": 0.8097756702535108,
"grad_norm": 0.930316686630249,
"learning_rate": 6.4685577186105595e-06,
"loss": 0.0858,
"step": 1110
},
{
"epoch": 0.817070946562101,
"grad_norm": 1.2016055583953857,
"learning_rate": 6.413662575601391e-06,
"loss": 0.0809,
"step": 1120
},
{
"epoch": 0.8243662228706913,
"grad_norm": 0.8230682611465454,
"learning_rate": 6.358581592409881e-06,
"loss": 0.0771,
"step": 1130
},
{
"epoch": 0.8316614991792814,
"grad_norm": 0.9575796127319336,
"learning_rate": 6.303322009985984e-06,
"loss": 0.0893,
"step": 1140
},
{
"epoch": 0.8389567754878716,
"grad_norm": 0.7613864541053772,
"learning_rate": 6.247891092758319e-06,
"loss": 0.0802,
"step": 1150
},
{
"epoch": 0.8462520517964618,
"grad_norm": 0.8168739080429077,
"learning_rate": 6.1922961276791925e-06,
"loss": 0.0718,
"step": 1160
},
{
"epoch": 0.853547328105052,
"grad_norm": 0.719241201877594,
"learning_rate": 6.136544423266651e-06,
"loss": 0.073,
"step": 1170
},
{
"epoch": 0.8608426044136421,
"grad_norm": 1.0418319702148438,
"learning_rate": 6.08064330864371e-06,
"loss": 0.0816,
"step": 1180
},
{
"epoch": 0.8681378807222323,
"grad_norm": 0.7831118106842041,
"learning_rate": 6.024600132574855e-06,
"loss": 0.0775,
"step": 1190
},
{
"epoch": 0.8754331570308226,
"grad_norm": 0.9321058988571167,
"learning_rate": 5.968422262499983e-06,
"loss": 0.0777,
"step": 1200
},
{
"epoch": 0.8827284333394128,
"grad_norm": 0.9959325194358826,
"learning_rate": 5.912117083565874e-06,
"loss": 0.0743,
"step": 1210
},
{
"epoch": 0.8900237096480029,
"grad_norm": 0.7858604192733765,
"learning_rate": 5.85569199765534e-06,
"loss": 0.0742,
"step": 1220
},
{
"epoch": 0.8973189859565931,
"grad_norm": 1.0771974325180054,
"learning_rate": 5.799154422414174e-06,
"loss": 0.0784,
"step": 1230
},
{
"epoch": 0.9046142622651833,
"grad_norm": 1.0624542236328125,
"learning_rate": 5.7425117902760195e-06,
"loss": 0.0784,
"step": 1240
},
{
"epoch": 0.9119095385737734,
"grad_norm": 0.7207911014556885,
"learning_rate": 5.685771547485312e-06,
"loss": 0.0694,
"step": 1250
},
{
"epoch": 0.9192048148823637,
"grad_norm": 1.0225133895874023,
"learning_rate": 5.628941153118388e-06,
"loss": 0.0747,
"step": 1260
},
{
"epoch": 0.9265000911909539,
"grad_norm": 1.0003268718719482,
"learning_rate": 5.572028078102917e-06,
"loss": 0.077,
"step": 1270
},
{
"epoch": 0.9337953674995441,
"grad_norm": 0.9205290079116821,
"learning_rate": 5.515039804235772e-06,
"loss": 0.0764,
"step": 1280
},
{
"epoch": 0.9410906438081342,
"grad_norm": 1.0423191785812378,
"learning_rate": 5.457983823199475e-06,
"loss": 0.0801,
"step": 1290
},
{
"epoch": 0.9483859201167244,
"grad_norm": 0.8814120888710022,
"learning_rate": 5.400867635577335e-06,
"loss": 0.0801,
"step": 1300
},
{
"epoch": 0.9556811964253146,
"grad_norm": 0.9548910856246948,
"learning_rate": 5.343698749867421e-06,
"loss": 0.0802,
"step": 1310
},
{
"epoch": 0.9629764727339047,
"grad_norm": 0.7712908983230591,
"learning_rate": 5.2864846814955e-06,
"loss": 0.077,
"step": 1320
},
{
"epoch": 0.970271749042495,
"grad_norm": 0.9300876259803772,
"learning_rate": 5.229232951827054e-06,
"loss": 0.081,
"step": 1330
},
{
"epoch": 0.9775670253510852,
"grad_norm": 0.932421863079071,
"learning_rate": 5.17195108717852e-06,
"loss": 0.0841,
"step": 1340
},
{
"epoch": 0.9848623016596754,
"grad_norm": 0.9440054297447205,
"learning_rate": 5.114646617827884e-06,
"loss": 0.0714,
"step": 1350
},
{
"epoch": 0.9921575779682655,
"grad_norm": 0.8986610174179077,
"learning_rate": 5.057327077024745e-06,
"loss": 0.0781,
"step": 1360
},
{
"epoch": 0.9994528542768557,
"grad_norm": 0.9697067141532898,
"learning_rate": 5e-06,
"loss": 0.0863,
"step": 1370
},
{
"epoch": 1.006748130585446,
"grad_norm": 0.5880870223045349,
"learning_rate": 4.942672922975255e-06,
"loss": 0.0443,
"step": 1380
},
{
"epoch": 1.014043406894036,
"grad_norm": 0.8269129395484924,
"learning_rate": 4.8853533821721175e-06,
"loss": 0.0464,
"step": 1390
},
{
"epoch": 1.0213386832026263,
"grad_norm": 0.9354344606399536,
"learning_rate": 4.82804891282148e-06,
"loss": 0.0436,
"step": 1400
}
],
"logging_steps": 10,
"max_steps": 2740,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.849931290145456e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}