chchen's picture
End of training
e8a97bf verified
{
"best_metric": 0.030531086027622223,
"best_model_checkpoint": "saves/psy-course/MentaLLaMA-chat-7B/train/fold9/checkpoint-1450",
"epoch": 4.997121473805412,
"eval_steps": 50,
"global_step": 3255,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01535213970447131,
"grad_norm": 1.986027717590332,
"learning_rate": 3.067484662576687e-06,
"loss": 1.7207,
"step": 10
},
{
"epoch": 0.03070427940894262,
"grad_norm": 1.8678655624389648,
"learning_rate": 6.134969325153374e-06,
"loss": 1.5868,
"step": 20
},
{
"epoch": 0.04605641911341393,
"grad_norm": 2.281944751739502,
"learning_rate": 9.202453987730062e-06,
"loss": 1.6864,
"step": 30
},
{
"epoch": 0.06140855881788524,
"grad_norm": 2.9772989749908447,
"learning_rate": 1.2269938650306748e-05,
"loss": 1.3561,
"step": 40
},
{
"epoch": 0.07676069852235655,
"grad_norm": 1.116693377494812,
"learning_rate": 1.5337423312883436e-05,
"loss": 0.8389,
"step": 50
},
{
"epoch": 0.07676069852235655,
"eval_loss": 0.6106540560722351,
"eval_runtime": 210.0434,
"eval_samples_per_second": 5.518,
"eval_steps_per_second": 5.518,
"step": 50
},
{
"epoch": 0.09211283822682786,
"grad_norm": 0.8406881093978882,
"learning_rate": 1.8404907975460123e-05,
"loss": 0.668,
"step": 60
},
{
"epoch": 0.10746497793129918,
"grad_norm": 0.87685626745224,
"learning_rate": 2.1472392638036813e-05,
"loss": 0.5046,
"step": 70
},
{
"epoch": 0.12281711763577048,
"grad_norm": 0.7217749953269958,
"learning_rate": 2.4539877300613496e-05,
"loss": 0.2741,
"step": 80
},
{
"epoch": 0.1381692573402418,
"grad_norm": 0.6710460782051086,
"learning_rate": 2.7607361963190186e-05,
"loss": 0.1939,
"step": 90
},
{
"epoch": 0.1535213970447131,
"grad_norm": 0.46170759201049805,
"learning_rate": 3.067484662576687e-05,
"loss": 0.1391,
"step": 100
},
{
"epoch": 0.1535213970447131,
"eval_loss": 0.10637813061475754,
"eval_runtime": 209.8019,
"eval_samples_per_second": 5.524,
"eval_steps_per_second": 5.524,
"step": 100
},
{
"epoch": 0.16887353674918443,
"grad_norm": 0.4916459023952484,
"learning_rate": 3.3742331288343556e-05,
"loss": 0.098,
"step": 110
},
{
"epoch": 0.18422567645365573,
"grad_norm": 0.3704639673233032,
"learning_rate": 3.6809815950920246e-05,
"loss": 0.089,
"step": 120
},
{
"epoch": 0.19957781615812703,
"grad_norm": 0.8016656041145325,
"learning_rate": 3.987730061349693e-05,
"loss": 0.0972,
"step": 130
},
{
"epoch": 0.21492995586259836,
"grad_norm": 0.8712407350540161,
"learning_rate": 4.2944785276073626e-05,
"loss": 0.0683,
"step": 140
},
{
"epoch": 0.23028209556706966,
"grad_norm": 0.7326760292053223,
"learning_rate": 4.601226993865031e-05,
"loss": 0.0757,
"step": 150
},
{
"epoch": 0.23028209556706966,
"eval_loss": 0.06941775232553482,
"eval_runtime": 209.7144,
"eval_samples_per_second": 5.527,
"eval_steps_per_second": 5.527,
"step": 150
},
{
"epoch": 0.24563423527154096,
"grad_norm": 0.8024667501449585,
"learning_rate": 4.907975460122699e-05,
"loss": 0.0869,
"step": 160
},
{
"epoch": 0.2609863749760123,
"grad_norm": 0.7557644844055176,
"learning_rate": 5.214723926380368e-05,
"loss": 0.0752,
"step": 170
},
{
"epoch": 0.2763385146804836,
"grad_norm": 0.8181012868881226,
"learning_rate": 5.521472392638037e-05,
"loss": 0.0704,
"step": 180
},
{
"epoch": 0.2916906543849549,
"grad_norm": 0.8826432228088379,
"learning_rate": 5.8282208588957056e-05,
"loss": 0.0583,
"step": 190
},
{
"epoch": 0.3070427940894262,
"grad_norm": 0.5068766474723816,
"learning_rate": 6.134969325153375e-05,
"loss": 0.0639,
"step": 200
},
{
"epoch": 0.3070427940894262,
"eval_loss": 0.058139290660619736,
"eval_runtime": 209.743,
"eval_samples_per_second": 5.526,
"eval_steps_per_second": 5.526,
"step": 200
},
{
"epoch": 0.3223949337938975,
"grad_norm": 0.660163402557373,
"learning_rate": 6.441717791411042e-05,
"loss": 0.0634,
"step": 210
},
{
"epoch": 0.33774707349836885,
"grad_norm": 0.4490329325199127,
"learning_rate": 6.748466257668711e-05,
"loss": 0.0456,
"step": 220
},
{
"epoch": 0.35309921320284016,
"grad_norm": 0.5326518416404724,
"learning_rate": 7.055214723926382e-05,
"loss": 0.0758,
"step": 230
},
{
"epoch": 0.36845135290731146,
"grad_norm": 0.527441143989563,
"learning_rate": 7.361963190184049e-05,
"loss": 0.0547,
"step": 240
},
{
"epoch": 0.38380349261178276,
"grad_norm": 0.6042910814285278,
"learning_rate": 7.668711656441718e-05,
"loss": 0.0739,
"step": 250
},
{
"epoch": 0.38380349261178276,
"eval_loss": 0.0467386394739151,
"eval_runtime": 209.5833,
"eval_samples_per_second": 5.53,
"eval_steps_per_second": 5.53,
"step": 250
},
{
"epoch": 0.39915563231625406,
"grad_norm": 0.6599677205085754,
"learning_rate": 7.975460122699386e-05,
"loss": 0.0542,
"step": 260
},
{
"epoch": 0.41450777202072536,
"grad_norm": 0.4332006275653839,
"learning_rate": 8.282208588957055e-05,
"loss": 0.0519,
"step": 270
},
{
"epoch": 0.4298599117251967,
"grad_norm": 0.7430977821350098,
"learning_rate": 8.588957055214725e-05,
"loss": 0.0575,
"step": 280
},
{
"epoch": 0.445212051429668,
"grad_norm": 0.4669485092163086,
"learning_rate": 8.895705521472393e-05,
"loss": 0.0456,
"step": 290
},
{
"epoch": 0.4605641911341393,
"grad_norm": 0.5974306464195251,
"learning_rate": 9.202453987730062e-05,
"loss": 0.0558,
"step": 300
},
{
"epoch": 0.4605641911341393,
"eval_loss": 0.04392043128609657,
"eval_runtime": 209.0171,
"eval_samples_per_second": 5.545,
"eval_steps_per_second": 5.545,
"step": 300
},
{
"epoch": 0.4759163308386106,
"grad_norm": 0.5303856134414673,
"learning_rate": 9.50920245398773e-05,
"loss": 0.0436,
"step": 310
},
{
"epoch": 0.4912684705430819,
"grad_norm": 0.30366232991218567,
"learning_rate": 9.815950920245399e-05,
"loss": 0.0447,
"step": 320
},
{
"epoch": 0.5066206102475532,
"grad_norm": 0.31800657510757446,
"learning_rate": 9.999953982785432e-05,
"loss": 0.0523,
"step": 330
},
{
"epoch": 0.5219727499520246,
"grad_norm": 0.38302093744277954,
"learning_rate": 9.999436298849151e-05,
"loss": 0.0413,
"step": 340
},
{
"epoch": 0.5373248896564958,
"grad_norm": 0.25141751766204834,
"learning_rate": 9.998343469212352e-05,
"loss": 0.0394,
"step": 350
},
{
"epoch": 0.5373248896564958,
"eval_loss": 0.04250750318169594,
"eval_runtime": 209.0742,
"eval_samples_per_second": 5.543,
"eval_steps_per_second": 5.543,
"step": 350
},
{
"epoch": 0.5526770293609672,
"grad_norm": 0.5167840719223022,
"learning_rate": 9.996675619596465e-05,
"loss": 0.0382,
"step": 360
},
{
"epoch": 0.5680291690654385,
"grad_norm": 0.4687771201133728,
"learning_rate": 9.99443294187443e-05,
"loss": 0.053,
"step": 370
},
{
"epoch": 0.5833813087699098,
"grad_norm": 0.16902729868888855,
"learning_rate": 9.991615694048621e-05,
"loss": 0.039,
"step": 380
},
{
"epoch": 0.5987334484743811,
"grad_norm": 0.48972830176353455,
"learning_rate": 9.988224200221172e-05,
"loss": 0.0537,
"step": 390
},
{
"epoch": 0.6140855881788524,
"grad_norm": 0.2853284478187561,
"learning_rate": 9.984258850556693e-05,
"loss": 0.0477,
"step": 400
},
{
"epoch": 0.6140855881788524,
"eval_loss": 0.04190515726804733,
"eval_runtime": 209.6671,
"eval_samples_per_second": 5.528,
"eval_steps_per_second": 5.528,
"step": 400
},
{
"epoch": 0.6294377278833237,
"grad_norm": 0.560493528842926,
"learning_rate": 9.979720101237375e-05,
"loss": 0.0418,
"step": 410
},
{
"epoch": 0.644789867587795,
"grad_norm": 0.36185890436172485,
"learning_rate": 9.974608474410512e-05,
"loss": 0.04,
"step": 420
},
{
"epoch": 0.6601420072922664,
"grad_norm": 0.6417878270149231,
"learning_rate": 9.968924558128445e-05,
"loss": 0.0423,
"step": 430
},
{
"epoch": 0.6754941469967377,
"grad_norm": 0.4333862066268921,
"learning_rate": 9.962669006280894e-05,
"loss": 0.0384,
"step": 440
},
{
"epoch": 0.690846286701209,
"grad_norm": 0.3614458739757538,
"learning_rate": 9.95584253851974e-05,
"loss": 0.047,
"step": 450
},
{
"epoch": 0.690846286701209,
"eval_loss": 0.043166350573301315,
"eval_runtime": 209.6238,
"eval_samples_per_second": 5.529,
"eval_steps_per_second": 5.529,
"step": 450
},
{
"epoch": 0.7061984264056803,
"grad_norm": 0.16041065752506256,
"learning_rate": 9.948445940176243e-05,
"loss": 0.0444,
"step": 460
},
{
"epoch": 0.7215505661101516,
"grad_norm": 0.2849539518356323,
"learning_rate": 9.940480062170679e-05,
"loss": 0.0407,
"step": 470
},
{
"epoch": 0.7369027058146229,
"grad_norm": 0.2027292102575302,
"learning_rate": 9.931945820914462e-05,
"loss": 0.0537,
"step": 480
},
{
"epoch": 0.7522548455190943,
"grad_norm": 0.3879440426826477,
"learning_rate": 9.922844198204715e-05,
"loss": 0.037,
"step": 490
},
{
"epoch": 0.7676069852235655,
"grad_norm": 0.3453935980796814,
"learning_rate": 9.913176241111319e-05,
"loss": 0.0416,
"step": 500
},
{
"epoch": 0.7676069852235655,
"eval_loss": 0.03759530559182167,
"eval_runtime": 206.4796,
"eval_samples_per_second": 5.613,
"eval_steps_per_second": 5.613,
"step": 500
},
{
"epoch": 0.7829591249280369,
"grad_norm": 0.3907797932624817,
"learning_rate": 9.902943061856456e-05,
"loss": 0.0356,
"step": 510
},
{
"epoch": 0.7983112646325081,
"grad_norm": 0.27914416790008545,
"learning_rate": 9.892145837686657e-05,
"loss": 0.052,
"step": 520
},
{
"epoch": 0.8136634043369795,
"grad_norm": 0.2834685742855072,
"learning_rate": 9.880785810737378e-05,
"loss": 0.048,
"step": 530
},
{
"epoch": 0.8290155440414507,
"grad_norm": 0.11335510015487671,
"learning_rate": 9.868864287890083e-05,
"loss": 0.037,
"step": 540
},
{
"epoch": 0.8443676837459221,
"grad_norm": 0.26555532217025757,
"learning_rate": 9.856382640621917e-05,
"loss": 0.0637,
"step": 550
},
{
"epoch": 0.8443676837459221,
"eval_loss": 0.03945336863398552,
"eval_runtime": 202.7088,
"eval_samples_per_second": 5.718,
"eval_steps_per_second": 5.718,
"step": 550
},
{
"epoch": 0.8597198234503934,
"grad_norm": 0.3383285701274872,
"learning_rate": 9.84334230484792e-05,
"loss": 0.0344,
"step": 560
},
{
"epoch": 0.8750719631548647,
"grad_norm": 0.11692766845226288,
"learning_rate": 9.82974478075583e-05,
"loss": 0.0392,
"step": 570
},
{
"epoch": 0.890424102859336,
"grad_norm": 0.3442629873752594,
"learning_rate": 9.815591632633509e-05,
"loss": 0.0343,
"step": 580
},
{
"epoch": 0.9057762425638073,
"grad_norm": 0.25957638025283813,
"learning_rate": 9.800884488688985e-05,
"loss": 0.0465,
"step": 590
},
{
"epoch": 0.9211283822682786,
"grad_norm": 0.20577019453048706,
"learning_rate": 9.785625040863124e-05,
"loss": 0.0377,
"step": 600
},
{
"epoch": 0.9211283822682786,
"eval_loss": 0.035671208053827286,
"eval_runtime": 200.7483,
"eval_samples_per_second": 5.773,
"eval_steps_per_second": 5.773,
"step": 600
},
{
"epoch": 0.93648052197275,
"grad_norm": 0.24516840279102325,
"learning_rate": 9.769815044635005e-05,
"loss": 0.0289,
"step": 610
},
{
"epoch": 0.9518326616772212,
"grad_norm": 0.3754214942455292,
"learning_rate": 9.753456318819946e-05,
"loss": 0.042,
"step": 620
},
{
"epoch": 0.9671848013816926,
"grad_norm": 0.35276058316230774,
"learning_rate": 9.736550745360292e-05,
"loss": 0.037,
"step": 630
},
{
"epoch": 0.9825369410861639,
"grad_norm": 0.14712929725646973,
"learning_rate": 9.719100269108872e-05,
"loss": 0.0341,
"step": 640
},
{
"epoch": 0.9978890807906352,
"grad_norm": 0.16145889461040497,
"learning_rate": 9.701106897605304e-05,
"loss": 0.0315,
"step": 650
},
{
"epoch": 0.9978890807906352,
"eval_loss": 0.036111604422330856,
"eval_runtime": 199.7828,
"eval_samples_per_second": 5.801,
"eval_steps_per_second": 5.801,
"step": 650
},
{
"epoch": 1.0132412204951065,
"grad_norm": 0.21693025529384613,
"learning_rate": 9.682572700845006e-05,
"loss": 0.0254,
"step": 660
},
{
"epoch": 1.0285933601995778,
"grad_norm": 0.2023308128118515,
"learning_rate": 9.663499811041082e-05,
"loss": 0.0339,
"step": 670
},
{
"epoch": 1.0439454999040492,
"grad_norm": 0.3511405885219574,
"learning_rate": 9.643890422379018e-05,
"loss": 0.0334,
"step": 680
},
{
"epoch": 1.0592976396085205,
"grad_norm": 0.23378199338912964,
"learning_rate": 9.623746790764261e-05,
"loss": 0.0312,
"step": 690
},
{
"epoch": 1.0746497793129917,
"grad_norm": 0.3404940664768219,
"learning_rate": 9.603071233562695e-05,
"loss": 0.0332,
"step": 700
},
{
"epoch": 1.0746497793129917,
"eval_loss": 0.0359707772731781,
"eval_runtime": 199.3622,
"eval_samples_per_second": 5.814,
"eval_steps_per_second": 5.814,
"step": 700
},
{
"epoch": 1.090001919017463,
"grad_norm": 0.10679006576538086,
"learning_rate": 9.581866129334044e-05,
"loss": 0.0407,
"step": 710
},
{
"epoch": 1.1053540587219344,
"grad_norm": 0.1712823063135147,
"learning_rate": 9.560133917558242e-05,
"loss": 0.0431,
"step": 720
},
{
"epoch": 1.1207061984264057,
"grad_norm": 0.3909640610218048,
"learning_rate": 9.537877098354786e-05,
"loss": 0.033,
"step": 730
},
{
"epoch": 1.136058338130877,
"grad_norm": 0.18858745694160461,
"learning_rate": 9.51509823219512e-05,
"loss": 0.0278,
"step": 740
},
{
"epoch": 1.1514104778353482,
"grad_norm": 0.3083574175834656,
"learning_rate": 9.491799939608065e-05,
"loss": 0.0322,
"step": 750
},
{
"epoch": 1.1514104778353482,
"eval_loss": 0.035209909081459045,
"eval_runtime": 199.0974,
"eval_samples_per_second": 5.821,
"eval_steps_per_second": 5.821,
"step": 750
},
{
"epoch": 1.1667626175398196,
"grad_norm": 0.30565544962882996,
"learning_rate": 9.467984900878364e-05,
"loss": 0.0321,
"step": 760
},
{
"epoch": 1.182114757244291,
"grad_norm": 0.18242968618869781,
"learning_rate": 9.443655855738321e-05,
"loss": 0.0295,
"step": 770
},
{
"epoch": 1.1974668969487623,
"grad_norm": 0.269875168800354,
"learning_rate": 9.41881560305262e-05,
"loss": 0.0279,
"step": 780
},
{
"epoch": 1.2128190366532334,
"grad_norm": 0.30843907594680786,
"learning_rate": 9.393467000496344e-05,
"loss": 0.0288,
"step": 790
},
{
"epoch": 1.2281711763577048,
"grad_norm": 0.21833810210227966,
"learning_rate": 9.367612964226218e-05,
"loss": 0.0321,
"step": 800
},
{
"epoch": 1.2281711763577048,
"eval_loss": 0.03326805680990219,
"eval_runtime": 198.7649,
"eval_samples_per_second": 5.831,
"eval_steps_per_second": 5.831,
"step": 800
},
{
"epoch": 1.2435233160621761,
"grad_norm": 0.08195030689239502,
"learning_rate": 9.341256468545122e-05,
"loss": 0.0211,
"step": 810
},
{
"epoch": 1.2588754557666475,
"grad_norm": 0.22833506762981415,
"learning_rate": 9.314400545559934e-05,
"loss": 0.029,
"step": 820
},
{
"epoch": 1.2742275954711189,
"grad_norm": 0.3788512945175171,
"learning_rate": 9.287048284832698e-05,
"loss": 0.0291,
"step": 830
},
{
"epoch": 1.28957973517559,
"grad_norm": 0.31537115573883057,
"learning_rate": 9.2592028330252e-05,
"loss": 0.033,
"step": 840
},
{
"epoch": 1.3049318748800613,
"grad_norm": 0.21554504334926605,
"learning_rate": 9.230867393536972e-05,
"loss": 0.0299,
"step": 850
},
{
"epoch": 1.3049318748800613,
"eval_loss": 0.03233664855360985,
"eval_runtime": 198.455,
"eval_samples_per_second": 5.84,
"eval_steps_per_second": 5.84,
"step": 850
},
{
"epoch": 1.3202840145845327,
"grad_norm": 0.2527925670146942,
"learning_rate": 9.202045226136757e-05,
"loss": 0.0253,
"step": 860
},
{
"epoch": 1.335636154289004,
"grad_norm": 0.16620400547981262,
"learning_rate": 9.172739646587509e-05,
"loss": 0.0222,
"step": 870
},
{
"epoch": 1.3509882939934754,
"grad_norm": 0.12310031056404114,
"learning_rate": 9.142954026264931e-05,
"loss": 0.0262,
"step": 880
},
{
"epoch": 1.3663404336979466,
"grad_norm": 0.16346514225006104,
"learning_rate": 9.112691791769634e-05,
"loss": 0.028,
"step": 890
},
{
"epoch": 1.381692573402418,
"grad_norm": 0.31864503026008606,
"learning_rate": 9.081956424532926e-05,
"loss": 0.0286,
"step": 900
},
{
"epoch": 1.381692573402418,
"eval_loss": 0.034016843885183334,
"eval_runtime": 198.4293,
"eval_samples_per_second": 5.841,
"eval_steps_per_second": 5.841,
"step": 900
},
{
"epoch": 1.3970447131068893,
"grad_norm": 0.36539778113365173,
"learning_rate": 9.050751460416305e-05,
"loss": 0.032,
"step": 910
},
{
"epoch": 1.4123968528113606,
"grad_norm": 0.13684117794036865,
"learning_rate": 9.019080489304685e-05,
"loss": 0.0305,
"step": 920
},
{
"epoch": 1.427748992515832,
"grad_norm": 0.32496410608291626,
"learning_rate": 8.986947154693408e-05,
"loss": 0.0395,
"step": 930
},
{
"epoch": 1.4431011322203031,
"grad_norm": 0.29375186562538147,
"learning_rate": 8.954355153269088e-05,
"loss": 0.0381,
"step": 940
},
{
"epoch": 1.4584532719247745,
"grad_norm": 0.20962753891944885,
"learning_rate": 8.921308234484336e-05,
"loss": 0.0266,
"step": 950
},
{
"epoch": 1.4584532719247745,
"eval_loss": 0.03320714086294174,
"eval_runtime": 198.354,
"eval_samples_per_second": 5.843,
"eval_steps_per_second": 5.843,
"step": 950
},
{
"epoch": 1.4738054116292458,
"grad_norm": 0.13867303729057312,
"learning_rate": 8.887810200126419e-05,
"loss": 0.0266,
"step": 960
},
{
"epoch": 1.4891575513337172,
"grad_norm": 0.26519712805747986,
"learning_rate": 8.853864903879889e-05,
"loss": 0.0323,
"step": 970
},
{
"epoch": 1.5045096910381885,
"grad_norm": 0.11606111377477646,
"learning_rate": 8.81947625088325e-05,
"loss": 0.0304,
"step": 980
},
{
"epoch": 1.5198618307426597,
"grad_norm": 0.1499972641468048,
"learning_rate": 8.784648197279701e-05,
"loss": 0.0297,
"step": 990
},
{
"epoch": 1.535213970447131,
"grad_norm": 0.2889319062232971,
"learning_rate": 8.749384749762015e-05,
"loss": 0.0296,
"step": 1000
},
{
"epoch": 1.535213970447131,
"eval_loss": 0.032004013657569885,
"eval_runtime": 197.6915,
"eval_samples_per_second": 5.863,
"eval_steps_per_second": 5.863,
"step": 1000
},
{
"epoch": 1.5505661101516024,
"grad_norm": 0.18784977495670319,
"learning_rate": 8.713689965111602e-05,
"loss": 0.022,
"step": 1010
},
{
"epoch": 1.5659182498560735,
"grad_norm": 0.21896380186080933,
"learning_rate": 8.677567949731801e-05,
"loss": 0.0315,
"step": 1020
},
{
"epoch": 1.581270389560545,
"grad_norm": 0.3686976730823517,
"learning_rate": 8.64102285917548e-05,
"loss": 0.0445,
"step": 1030
},
{
"epoch": 1.5966225292650162,
"grad_norm": 0.16907860338687897,
"learning_rate": 8.604058897666962e-05,
"loss": 0.0327,
"step": 1040
},
{
"epoch": 1.6119746689694876,
"grad_norm": 0.20729367434978485,
"learning_rate": 8.566680317618377e-05,
"loss": 0.022,
"step": 1050
},
{
"epoch": 1.6119746689694876,
"eval_loss": 0.03071741759777069,
"eval_runtime": 197.565,
"eval_samples_per_second": 5.866,
"eval_steps_per_second": 5.866,
"step": 1050
},
{
"epoch": 1.627326808673959,
"grad_norm": 0.13343840837478638,
"learning_rate": 8.528891419140438e-05,
"loss": 0.0327,
"step": 1060
},
{
"epoch": 1.64267894837843,
"grad_norm": 0.12018793076276779,
"learning_rate": 8.490696549547761e-05,
"loss": 0.0212,
"step": 1070
},
{
"epoch": 1.6580310880829017,
"grad_norm": 0.36419177055358887,
"learning_rate": 8.452100102858734e-05,
"loss": 0.0389,
"step": 1080
},
{
"epoch": 1.6733832277873728,
"grad_norm": 0.18621623516082764,
"learning_rate": 8.413106519290023e-05,
"loss": 0.028,
"step": 1090
},
{
"epoch": 1.6887353674918442,
"grad_norm": 0.13192850351333618,
"learning_rate": 8.373720284745757e-05,
"loss": 0.0292,
"step": 1100
},
{
"epoch": 1.6887353674918442,
"eval_loss": 0.031201008707284927,
"eval_runtime": 197.3374,
"eval_samples_per_second": 5.873,
"eval_steps_per_second": 5.873,
"step": 1100
},
{
"epoch": 1.7040875071963155,
"grad_norm": 0.15397463738918304,
"learning_rate": 8.333945930301459e-05,
"loss": 0.0275,
"step": 1110
},
{
"epoch": 1.7194396469007867,
"grad_norm": 0.1929481476545334,
"learning_rate": 8.293788031682789e-05,
"loss": 0.0301,
"step": 1120
},
{
"epoch": 1.7347917866052582,
"grad_norm": 0.1465729922056198,
"learning_rate": 8.253251208739137e-05,
"loss": 0.0236,
"step": 1130
},
{
"epoch": 1.7501439263097294,
"grad_norm": 0.27143338322639465,
"learning_rate": 8.21234012491215e-05,
"loss": 0.0238,
"step": 1140
},
{
"epoch": 1.7654960660142007,
"grad_norm": 0.16392782330513,
"learning_rate": 8.171059486699224e-05,
"loss": 0.0269,
"step": 1150
},
{
"epoch": 1.7654960660142007,
"eval_loss": 0.03296918794512749,
"eval_runtime": 197.4211,
"eval_samples_per_second": 5.871,
"eval_steps_per_second": 5.871,
"step": 1150
},
{
"epoch": 1.780848205718672,
"grad_norm": 0.12399645149707794,
"learning_rate": 8.129414043112087e-05,
"loss": 0.029,
"step": 1160
},
{
"epoch": 1.7962003454231432,
"grad_norm": 0.15697214007377625,
"learning_rate": 8.087408585130438e-05,
"loss": 0.0271,
"step": 1170
},
{
"epoch": 1.8115524851276148,
"grad_norm": 0.11034557968378067,
"learning_rate": 8.045047945150796e-05,
"loss": 0.0312,
"step": 1180
},
{
"epoch": 1.826904624832086,
"grad_norm": 0.12432292848825455,
"learning_rate": 8.002336996430561e-05,
"loss": 0.0265,
"step": 1190
},
{
"epoch": 1.8422567645365573,
"grad_norm": 0.14914625883102417,
"learning_rate": 7.959280652527394e-05,
"loss": 0.0204,
"step": 1200
},
{
"epoch": 1.8422567645365573,
"eval_loss": 0.030618194490671158,
"eval_runtime": 197.353,
"eval_samples_per_second": 5.873,
"eval_steps_per_second": 5.873,
"step": 1200
},
{
"epoch": 1.8576089042410286,
"grad_norm": 0.14602138102054596,
"learning_rate": 7.915883866733946e-05,
"loss": 0.0295,
"step": 1210
},
{
"epoch": 1.8729610439454998,
"grad_norm": 0.30296677350997925,
"learning_rate": 7.872151631508022e-05,
"loss": 0.0301,
"step": 1220
},
{
"epoch": 1.8883131836499714,
"grad_norm": 0.1722898930311203,
"learning_rate": 7.828088977898234e-05,
"loss": 0.0292,
"step": 1230
},
{
"epoch": 1.9036653233544425,
"grad_norm": 0.21365255117416382,
"learning_rate": 7.783700974965225e-05,
"loss": 0.036,
"step": 1240
},
{
"epoch": 1.9190174630589139,
"grad_norm": 0.15060608088970184,
"learning_rate": 7.738992729198511e-05,
"loss": 0.0306,
"step": 1250
},
{
"epoch": 1.9190174630589139,
"eval_loss": 0.030947549268603325,
"eval_runtime": 197.5552,
"eval_samples_per_second": 5.867,
"eval_steps_per_second": 5.867,
"step": 1250
},
{
"epoch": 1.9343696027633852,
"grad_norm": 0.1173550933599472,
"learning_rate": 7.693969383929017e-05,
"loss": 0.02,
"step": 1260
},
{
"epoch": 1.9497217424678563,
"grad_norm": 0.2480284571647644,
"learning_rate": 7.648636118737385e-05,
"loss": 0.0353,
"step": 1270
},
{
"epoch": 1.965073882172328,
"grad_norm": 0.2729029655456543,
"learning_rate": 7.602998148858089e-05,
"loss": 0.024,
"step": 1280
},
{
"epoch": 1.980426021876799,
"grad_norm": 0.29488909244537354,
"learning_rate": 7.557060724579484e-05,
"loss": 0.0276,
"step": 1290
},
{
"epoch": 1.9957781615812704,
"grad_norm": 0.451770544052124,
"learning_rate": 7.51082913063978e-05,
"loss": 0.0364,
"step": 1300
},
{
"epoch": 1.9957781615812704,
"eval_loss": 0.03137281537055969,
"eval_runtime": 197.4511,
"eval_samples_per_second": 5.87,
"eval_steps_per_second": 5.87,
"step": 1300
},
{
"epoch": 2.0111303012857418,
"grad_norm": 0.20203380286693573,
"learning_rate": 7.464308685619099e-05,
"loss": 0.0225,
"step": 1310
},
{
"epoch": 2.026482440990213,
"grad_norm": 0.2726834714412689,
"learning_rate": 7.417504741327587e-05,
"loss": 0.017,
"step": 1320
},
{
"epoch": 2.0418345806946845,
"grad_norm": 0.30587098002433777,
"learning_rate": 7.370422682189755e-05,
"loss": 0.0183,
"step": 1330
},
{
"epoch": 2.0571867203991556,
"grad_norm": 0.11235737800598145,
"learning_rate": 7.323067924625024e-05,
"loss": 0.0171,
"step": 1340
},
{
"epoch": 2.0725388601036268,
"grad_norm": 0.19976893067359924,
"learning_rate": 7.275445916424627e-05,
"loss": 0.0194,
"step": 1350
},
{
"epoch": 2.0725388601036268,
"eval_loss": 0.0319148451089859,
"eval_runtime": 193.9364,
"eval_samples_per_second": 5.976,
"eval_steps_per_second": 5.976,
"step": 1350
},
{
"epoch": 2.0878909998080983,
"grad_norm": 0.26515206694602966,
"learning_rate": 7.227562136124864e-05,
"loss": 0.0141,
"step": 1360
},
{
"epoch": 2.1032431395125695,
"grad_norm": 0.15498653054237366,
"learning_rate": 7.179422092376856e-05,
"loss": 0.0159,
"step": 1370
},
{
"epoch": 2.118595279217041,
"grad_norm": 0.32679131627082825,
"learning_rate": 7.13103132331281e-05,
"loss": 0.0198,
"step": 1380
},
{
"epoch": 2.133947418921512,
"grad_norm": 0.28593504428863525,
"learning_rate": 7.082395395908903e-05,
"loss": 0.0221,
"step": 1390
},
{
"epoch": 2.1492995586259833,
"grad_norm": 0.30774274468421936,
"learning_rate": 7.033519905344846e-05,
"loss": 0.0148,
"step": 1400
},
{
"epoch": 2.1492995586259833,
"eval_loss": 0.03176383674144745,
"eval_runtime": 196.6326,
"eval_samples_per_second": 5.894,
"eval_steps_per_second": 5.894,
"step": 1400
},
{
"epoch": 2.164651698330455,
"grad_norm": 0.15440446138381958,
"learning_rate": 6.984410474360195e-05,
"loss": 0.0181,
"step": 1410
},
{
"epoch": 2.180003838034926,
"grad_norm": 0.17429502308368683,
"learning_rate": 6.935072752607511e-05,
"loss": 0.0228,
"step": 1420
},
{
"epoch": 2.1953559777393976,
"grad_norm": 0.1579236090183258,
"learning_rate": 6.885512416002412e-05,
"loss": 0.0247,
"step": 1430
},
{
"epoch": 2.2107081174438687,
"grad_norm": 0.18410032987594604,
"learning_rate": 6.835735166070587e-05,
"loss": 0.0179,
"step": 1440
},
{
"epoch": 2.22606025714834,
"grad_norm": 0.21408940851688385,
"learning_rate": 6.785746729291897e-05,
"loss": 0.0161,
"step": 1450
},
{
"epoch": 2.22606025714834,
"eval_loss": 0.030531086027622223,
"eval_runtime": 196.8522,
"eval_samples_per_second": 5.888,
"eval_steps_per_second": 5.888,
"step": 1450
},
{
"epoch": 2.2414123968528115,
"grad_norm": 0.1800948679447174,
"learning_rate": 6.735552856441585e-05,
"loss": 0.0216,
"step": 1460
},
{
"epoch": 2.2567645365572826,
"grad_norm": 0.19375424087047577,
"learning_rate": 6.685159321928691e-05,
"loss": 0.0189,
"step": 1470
},
{
"epoch": 2.272116676261754,
"grad_norm": 0.2888384163379669,
"learning_rate": 6.634571923131756e-05,
"loss": 0.0177,
"step": 1480
},
{
"epoch": 2.2874688159662253,
"grad_norm": 0.1332111358642578,
"learning_rate": 6.583796479731872e-05,
"loss": 0.0172,
"step": 1490
},
{
"epoch": 2.3028209556706964,
"grad_norm": 0.09404625743627548,
"learning_rate": 6.532838833043189e-05,
"loss": 0.0293,
"step": 1500
},
{
"epoch": 2.3028209556706964,
"eval_loss": 0.03225649148225784,
"eval_runtime": 196.8453,
"eval_samples_per_second": 5.888,
"eval_steps_per_second": 5.888,
"step": 1500
},
{
"epoch": 2.318173095375168,
"grad_norm": 0.1909513920545578,
"learning_rate": 6.481704845340894e-05,
"loss": 0.0207,
"step": 1510
},
{
"epoch": 2.333525235079639,
"grad_norm": 0.34329941868782043,
"learning_rate": 6.43040039918683e-05,
"loss": 0.0178,
"step": 1520
},
{
"epoch": 2.3488773747841103,
"grad_norm": 0.28039267659187317,
"learning_rate": 6.37893139675273e-05,
"loss": 0.0178,
"step": 1530
},
{
"epoch": 2.364229514488582,
"grad_norm": 0.2201843112707138,
"learning_rate": 6.327303759141235e-05,
"loss": 0.0146,
"step": 1540
},
{
"epoch": 2.379581654193053,
"grad_norm": 0.37142327427864075,
"learning_rate": 6.275523425704707e-05,
"loss": 0.0203,
"step": 1550
},
{
"epoch": 2.379581654193053,
"eval_loss": 0.03294830769300461,
"eval_runtime": 197.0925,
"eval_samples_per_second": 5.88,
"eval_steps_per_second": 5.88,
"step": 1550
},
{
"epoch": 2.3949337938975246,
"grad_norm": 0.135298952460289,
"learning_rate": 6.223596353361961e-05,
"loss": 0.0174,
"step": 1560
},
{
"epoch": 2.4102859336019957,
"grad_norm": 0.07320668548345566,
"learning_rate": 6.171528515912965e-05,
"loss": 0.0144,
"step": 1570
},
{
"epoch": 2.425638073306467,
"grad_norm": 0.2828701436519623,
"learning_rate": 6.119325903351599e-05,
"loss": 0.019,
"step": 1580
},
{
"epoch": 2.4409902130109384,
"grad_norm": 0.2670722007751465,
"learning_rate": 6.0669945211765585e-05,
"loss": 0.027,
"step": 1590
},
{
"epoch": 2.4563423527154096,
"grad_norm": 0.2617368698120117,
"learning_rate": 6.0145403897004696e-05,
"loss": 0.0235,
"step": 1600
},
{
"epoch": 2.4563423527154096,
"eval_loss": 0.032713185995817184,
"eval_runtime": 196.9905,
"eval_samples_per_second": 5.884,
"eval_steps_per_second": 5.884,
"step": 1600
},
{
"epoch": 2.471694492419881,
"grad_norm": 0.29310551285743713,
"learning_rate": 5.961969543357292e-05,
"loss": 0.0233,
"step": 1610
},
{
"epoch": 2.4870466321243523,
"grad_norm": 0.16717247664928436,
"learning_rate": 5.9092880300081123e-05,
"loss": 0.0243,
"step": 1620
},
{
"epoch": 2.5023987718288234,
"grad_norm": 0.36758190393447876,
"learning_rate": 5.8565019102453844e-05,
"loss": 0.0221,
"step": 1630
},
{
"epoch": 2.517750911533295,
"grad_norm": 0.21562321484088898,
"learning_rate": 5.8036172566957006e-05,
"loss": 0.0193,
"step": 1640
},
{
"epoch": 2.533103051237766,
"grad_norm": 0.20650765299797058,
"learning_rate": 5.750640153321194e-05,
"loss": 0.0234,
"step": 1650
},
{
"epoch": 2.533103051237766,
"eval_loss": 0.031079526990652084,
"eval_runtime": 197.1562,
"eval_samples_per_second": 5.879,
"eval_steps_per_second": 5.879,
"step": 1650
},
{
"epoch": 2.5484551909422377,
"grad_norm": 0.43781033158302307,
"learning_rate": 5.697576694719616e-05,
"loss": 0.0183,
"step": 1660
},
{
"epoch": 2.563807330646709,
"grad_norm": 0.14761963486671448,
"learning_rate": 5.644432985423206e-05,
"loss": 0.0185,
"step": 1670
},
{
"epoch": 2.57915947035118,
"grad_norm": 0.32953789830207825,
"learning_rate": 5.591215139196414e-05,
"loss": 0.0246,
"step": 1680
},
{
"epoch": 2.5945116100556516,
"grad_norm": 0.2492523342370987,
"learning_rate": 5.5379292783325585e-05,
"loss": 0.0239,
"step": 1690
},
{
"epoch": 2.6098637497601227,
"grad_norm": 0.38178038597106934,
"learning_rate": 5.4845815329495054e-05,
"loss": 0.0227,
"step": 1700
},
{
"epoch": 2.6098637497601227,
"eval_loss": 0.030748112127184868,
"eval_runtime": 197.0897,
"eval_samples_per_second": 5.881,
"eval_steps_per_second": 5.881,
"step": 1700
},
{
"epoch": 2.6252158894645943,
"grad_norm": 0.11092711985111237,
"learning_rate": 5.431178040284446e-05,
"loss": 0.0184,
"step": 1710
},
{
"epoch": 2.6405680291690654,
"grad_norm": 0.25804007053375244,
"learning_rate": 5.377724943987855e-05,
"loss": 0.0162,
"step": 1720
},
{
"epoch": 2.6559201688735365,
"grad_norm": 0.22017376124858856,
"learning_rate": 5.324228393416718e-05,
"loss": 0.0163,
"step": 1730
},
{
"epoch": 2.671272308578008,
"grad_norm": 0.515049934387207,
"learning_rate": 5.270694542927088e-05,
"loss": 0.0191,
"step": 1740
},
{
"epoch": 2.6866244482824793,
"grad_norm": 0.16297389566898346,
"learning_rate": 5.21712955116608e-05,
"loss": 0.0147,
"step": 1750
},
{
"epoch": 2.6866244482824793,
"eval_loss": 0.031325813382864,
"eval_runtime": 196.8694,
"eval_samples_per_second": 5.887,
"eval_steps_per_second": 5.887,
"step": 1750
},
{
"epoch": 2.701976587986951,
"grad_norm": 0.1251741498708725,
"learning_rate": 5.1635395803633666e-05,
"loss": 0.0183,
"step": 1760
},
{
"epoch": 2.717328727691422,
"grad_norm": 0.0920635238289833,
"learning_rate": 5.109930795622265e-05,
"loss": 0.0139,
"step": 1770
},
{
"epoch": 2.732680867395893,
"grad_norm": 0.08902430534362793,
"learning_rate": 5.056309364210483e-05,
"loss": 0.0155,
"step": 1780
},
{
"epoch": 2.7480330071003647,
"grad_norm": 0.2637597322463989,
"learning_rate": 5.002681454850632e-05,
"loss": 0.0259,
"step": 1790
},
{
"epoch": 2.763385146804836,
"grad_norm": 0.17477376759052277,
"learning_rate": 4.949053237010554e-05,
"loss": 0.0202,
"step": 1800
},
{
"epoch": 2.763385146804836,
"eval_loss": 0.032162439078092575,
"eval_runtime": 196.5492,
"eval_samples_per_second": 5.897,
"eval_steps_per_second": 5.897,
"step": 1800
},
{
"epoch": 2.7787372865093074,
"grad_norm": 0.17457661032676697,
"learning_rate": 4.89543088019359e-05,
"loss": 0.0141,
"step": 1810
},
{
"epoch": 2.7940894262137785,
"grad_norm": 0.22364993393421173,
"learning_rate": 4.841820553228805e-05,
"loss": 0.0236,
"step": 1820
},
{
"epoch": 2.8094415659182497,
"grad_norm": 0.25010716915130615,
"learning_rate": 4.7882284235613324e-05,
"loss": 0.0269,
"step": 1830
},
{
"epoch": 2.8247937056227213,
"grad_norm": 0.08725937455892563,
"learning_rate": 4.734660656542846e-05,
"loss": 0.0172,
"step": 1840
},
{
"epoch": 2.8401458453271924,
"grad_norm": 0.17154793441295624,
"learning_rate": 4.681123414722291e-05,
"loss": 0.0203,
"step": 1850
},
{
"epoch": 2.8401458453271924,
"eval_loss": 0.031308364123106,
"eval_runtime": 196.5695,
"eval_samples_per_second": 5.896,
"eval_steps_per_second": 5.896,
"step": 1850
},
{
"epoch": 2.855497985031664,
"grad_norm": 0.14235727488994598,
"learning_rate": 4.627622857136929e-05,
"loss": 0.0157,
"step": 1860
},
{
"epoch": 2.870850124736135,
"grad_norm": 0.3842136561870575,
"learning_rate": 4.5741651386037883e-05,
"loss": 0.0216,
"step": 1870
},
{
"epoch": 2.8862022644406062,
"grad_norm": 0.25185850262641907,
"learning_rate": 4.5207564090116064e-05,
"loss": 0.0151,
"step": 1880
},
{
"epoch": 2.901554404145078,
"grad_norm": 0.23601651191711426,
"learning_rate": 4.467402812613323e-05,
"loss": 0.0148,
"step": 1890
},
{
"epoch": 2.916906543849549,
"grad_norm": 0.17588479816913605,
"learning_rate": 4.414110487319244e-05,
"loss": 0.0199,
"step": 1900
},
{
"epoch": 2.916906543849549,
"eval_loss": 0.031001152470707893,
"eval_runtime": 196.7891,
"eval_samples_per_second": 5.89,
"eval_steps_per_second": 5.89,
"step": 1900
},
{
"epoch": 2.9322586835540205,
"grad_norm": 0.21399495005607605,
"learning_rate": 4.360885563990919e-05,
"loss": 0.0174,
"step": 1910
},
{
"epoch": 2.9476108232584917,
"grad_norm": 0.18000049889087677,
"learning_rate": 4.307734165735829e-05,
"loss": 0.0123,
"step": 1920
},
{
"epoch": 2.962962962962963,
"grad_norm": 0.19970905780792236,
"learning_rate": 4.254662407202976e-05,
"loss": 0.0175,
"step": 1930
},
{
"epoch": 2.9783151026674344,
"grad_norm": 0.21200795471668243,
"learning_rate": 4.201676393879446e-05,
"loss": 0.0197,
"step": 1940
},
{
"epoch": 2.9936672423719055,
"grad_norm": 0.15390953421592712,
"learning_rate": 4.148782221388007e-05,
"loss": 0.0152,
"step": 1950
},
{
"epoch": 2.9936672423719055,
"eval_loss": 0.0314854197204113,
"eval_runtime": 197.0087,
"eval_samples_per_second": 5.883,
"eval_steps_per_second": 5.883,
"step": 1950
},
{
"epoch": 3.009019382076377,
"grad_norm": 0.1752149611711502,
"learning_rate": 4.0959859747858706e-05,
"loss": 0.0116,
"step": 1960
},
{
"epoch": 3.0243715217808482,
"grad_norm": 0.22070646286010742,
"learning_rate": 4.043293727864644e-05,
"loss": 0.0081,
"step": 1970
},
{
"epoch": 3.0397236614853194,
"grad_norm": 0.39557531476020813,
"learning_rate": 3.990711542451591e-05,
"loss": 0.0103,
"step": 1980
},
{
"epoch": 3.055075801189791,
"grad_norm": 0.1403462290763855,
"learning_rate": 3.9382454677122704e-05,
"loss": 0.0105,
"step": 1990
},
{
"epoch": 3.070427940894262,
"grad_norm": 0.15649639070034027,
"learning_rate": 3.885901539454623e-05,
"loss": 0.0065,
"step": 2000
},
{
"epoch": 3.070427940894262,
"eval_loss": 0.034673649817705154,
"eval_runtime": 196.6895,
"eval_samples_per_second": 5.893,
"eval_steps_per_second": 5.893,
"step": 2000
},
{
"epoch": 3.0857800805987337,
"grad_norm": 0.26310181617736816,
"learning_rate": 3.833685779434597e-05,
"loss": 0.0124,
"step": 2010
},
{
"epoch": 3.101132220303205,
"grad_norm": 0.12814761698246002,
"learning_rate": 3.7816041946634024e-05,
"loss": 0.0092,
"step": 2020
},
{
"epoch": 3.116484360007676,
"grad_norm": 0.2951210141181946,
"learning_rate": 3.729662776716439e-05,
"loss": 0.0085,
"step": 2030
},
{
"epoch": 3.1318364997121475,
"grad_norm": 0.13861867785453796,
"learning_rate": 3.677867501044029e-05,
"loss": 0.0067,
"step": 2040
},
{
"epoch": 3.1471886394166186,
"grad_norm": 0.17344604432582855,
"learning_rate": 3.6262243262839654e-05,
"loss": 0.0155,
"step": 2050
},
{
"epoch": 3.1471886394166186,
"eval_loss": 0.034481730312108994,
"eval_runtime": 197.6943,
"eval_samples_per_second": 5.863,
"eval_steps_per_second": 5.863,
"step": 2050
},
{
"epoch": 3.16254077912109,
"grad_norm": 0.09056749194860458,
"learning_rate": 3.574739193576042e-05,
"loss": 0.0077,
"step": 2060
},
{
"epoch": 3.1778929188255614,
"grad_norm": 0.10517852753400803,
"learning_rate": 3.5234180258785554e-05,
"loss": 0.0092,
"step": 2070
},
{
"epoch": 3.1932450585300325,
"grad_norm": 0.12550629675388336,
"learning_rate": 3.472266727286928e-05,
"loss": 0.0077,
"step": 2080
},
{
"epoch": 3.208597198234504,
"grad_norm": 0.15013106167316437,
"learning_rate": 3.4212911823544746e-05,
"loss": 0.011,
"step": 2090
},
{
"epoch": 3.223949337938975,
"grad_norm": 0.10141454637050629,
"learning_rate": 3.370497255415443e-05,
"loss": 0.0087,
"step": 2100
},
{
"epoch": 3.223949337938975,
"eval_loss": 0.03667592629790306,
"eval_runtime": 198.0424,
"eval_samples_per_second": 5.852,
"eval_steps_per_second": 5.852,
"step": 2100
},
{
"epoch": 3.239301477643447,
"grad_norm": 0.34847912192344666,
"learning_rate": 3.319890789910364e-05,
"loss": 0.009,
"step": 2110
},
{
"epoch": 3.254653617347918,
"grad_norm": 0.09903811663389206,
"learning_rate": 3.269477607713802e-05,
"loss": 0.0129,
"step": 2120
},
{
"epoch": 3.270005757052389,
"grad_norm": 0.35761070251464844,
"learning_rate": 3.219263508464614e-05,
"loss": 0.0109,
"step": 2130
},
{
"epoch": 3.2853578967568606,
"grad_norm": 0.8299241662025452,
"learning_rate": 3.169254268898725e-05,
"loss": 0.0097,
"step": 2140
},
{
"epoch": 3.3007100364613318,
"grad_norm": 0.034059587866067886,
"learning_rate": 3.119455642184572e-05,
"loss": 0.0107,
"step": 2150
},
{
"epoch": 3.3007100364613318,
"eval_loss": 0.03527729585766792,
"eval_runtime": 197.9624,
"eval_samples_per_second": 5.855,
"eval_steps_per_second": 5.855,
"step": 2150
},
{
"epoch": 3.3160621761658033,
"grad_norm": 0.17827175557613373,
"learning_rate": 3.069873357261249e-05,
"loss": 0.0101,
"step": 2160
},
{
"epoch": 3.3314143158702745,
"grad_norm": 0.14705407619476318,
"learning_rate": 3.020513118179428e-05,
"loss": 0.0128,
"step": 2170
},
{
"epoch": 3.3467664555747456,
"grad_norm": 0.3249284327030182,
"learning_rate": 2.9713806034451652e-05,
"loss": 0.0085,
"step": 2180
},
{
"epoch": 3.362118595279217,
"grad_norm": 0.035756666213274,
"learning_rate": 2.9224814653666242e-05,
"loss": 0.0075,
"step": 2190
},
{
"epoch": 3.3774707349836883,
"grad_norm": 0.1439007669687271,
"learning_rate": 2.8738213294038212e-05,
"loss": 0.0113,
"step": 2200
},
{
"epoch": 3.3774707349836883,
"eval_loss": 0.03770239278674126,
"eval_runtime": 197.5106,
"eval_samples_per_second": 5.868,
"eval_steps_per_second": 5.868,
"step": 2200
},
{
"epoch": 3.39282287468816,
"grad_norm": 0.29063209891319275,
"learning_rate": 2.8254057935214735e-05,
"loss": 0.0108,
"step": 2210
},
{
"epoch": 3.408175014392631,
"grad_norm": 0.23443534970283508,
"learning_rate": 2.7772404275449825e-05,
"loss": 0.0109,
"step": 2220
},
{
"epoch": 3.423527154097102,
"grad_norm": 0.2173459231853485,
"learning_rate": 2.7293307725196793e-05,
"loss": 0.0095,
"step": 2230
},
{
"epoch": 3.4388792938015738,
"grad_norm": 0.19652967154979706,
"learning_rate": 2.6816823400733625e-05,
"loss": 0.0119,
"step": 2240
},
{
"epoch": 3.454231433506045,
"grad_norm": 0.49155566096305847,
"learning_rate": 2.6343006117822437e-05,
"loss": 0.0115,
"step": 2250
},
{
"epoch": 3.454231433506045,
"eval_loss": 0.03576047718524933,
"eval_runtime": 198.0048,
"eval_samples_per_second": 5.853,
"eval_steps_per_second": 5.853,
"step": 2250
},
{
"epoch": 3.469583573210516,
"grad_norm": 0.04775138571858406,
"learning_rate": 2.587191038540317e-05,
"loss": 0.0079,
"step": 2260
},
{
"epoch": 3.4849357129149876,
"grad_norm": 0.3148171603679657,
"learning_rate": 2.5403590399322886e-05,
"loss": 0.0057,
"step": 2270
},
{
"epoch": 3.5002878526194587,
"grad_norm": 0.296872615814209,
"learning_rate": 2.493810003610092e-05,
"loss": 0.0143,
"step": 2280
},
{
"epoch": 3.51563999232393,
"grad_norm": 0.27500900626182556,
"learning_rate": 2.4475492846730835e-05,
"loss": 0.0096,
"step": 2290
},
{
"epoch": 3.5309921320284015,
"grad_norm": 0.2594282031059265,
"learning_rate": 2.4015822050519794e-05,
"loss": 0.0087,
"step": 2300
},
{
"epoch": 3.5309921320284015,
"eval_loss": 0.03767295554280281,
"eval_runtime": 198.052,
"eval_samples_per_second": 5.852,
"eval_steps_per_second": 5.852,
"step": 2300
},
{
"epoch": 3.546344271732873,
"grad_norm": 0.24976056814193726,
"learning_rate": 2.3559140528966074e-05,
"loss": 0.0072,
"step": 2310
},
{
"epoch": 3.561696411437344,
"grad_norm": 0.1786753237247467,
"learning_rate": 2.3105500819675495e-05,
"loss": 0.0096,
"step": 2320
},
{
"epoch": 3.5770485511418153,
"grad_norm": 0.23791159689426422,
"learning_rate": 2.265495511031742e-05,
"loss": 0.0129,
"step": 2330
},
{
"epoch": 3.5924006908462864,
"grad_norm": 0.21382766962051392,
"learning_rate": 2.2207555232620893e-05,
"loss": 0.0125,
"step": 2340
},
{
"epoch": 3.607752830550758,
"grad_norm": 0.06649874895811081,
"learning_rate": 2.1763352656411785e-05,
"loss": 0.0099,
"step": 2350
},
{
"epoch": 3.607752830550758,
"eval_loss": 0.03737160563468933,
"eval_runtime": 198.0602,
"eval_samples_per_second": 5.852,
"eval_steps_per_second": 5.852,
"step": 2350
},
{
"epoch": 3.6231049702552296,
"grad_norm": 0.295950710773468,
"learning_rate": 2.1322398483691787e-05,
"loss": 0.0063,
"step": 2360
},
{
"epoch": 3.6384571099597007,
"grad_norm": 0.25773197412490845,
"learning_rate": 2.08847434427593e-05,
"loss": 0.0101,
"step": 2370
},
{
"epoch": 3.653809249664172,
"grad_norm": 0.4787651002407074,
"learning_rate": 2.0450437882373697e-05,
"loss": 0.0099,
"step": 2380
},
{
"epoch": 3.669161389368643,
"grad_norm": 0.1840725690126419,
"learning_rate": 2.0019531765962995e-05,
"loss": 0.0078,
"step": 2390
},
{
"epoch": 3.6845135290731146,
"grad_norm": 0.30393171310424805,
"learning_rate": 1.9592074665876026e-05,
"loss": 0.0075,
"step": 2400
},
{
"epoch": 3.6845135290731146,
"eval_loss": 0.038148432970047,
"eval_runtime": 198.4246,
"eval_samples_per_second": 5.841,
"eval_steps_per_second": 5.841,
"step": 2400
},
{
"epoch": 3.6998656687775857,
"grad_norm": 0.07026088237762451,
"learning_rate": 1.9168115757679535e-05,
"loss": 0.0127,
"step": 2410
},
{
"epoch": 3.7152178084820573,
"grad_norm": 0.27099308371543884,
"learning_rate": 1.8747703814500866e-05,
"loss": 0.0086,
"step": 2420
},
{
"epoch": 3.7305699481865284,
"grad_norm": 0.2625258266925812,
"learning_rate": 1.833088720141698e-05,
"loss": 0.0085,
"step": 2430
},
{
"epoch": 3.7459220878909996,
"grad_norm": 0.38898608088493347,
"learning_rate": 1.7917713869890557e-05,
"loss": 0.0088,
"step": 2440
},
{
"epoch": 3.761274227595471,
"grad_norm": 0.18794888257980347,
"learning_rate": 1.7508231352253435e-05,
"loss": 0.0064,
"step": 2450
},
{
"epoch": 3.761274227595471,
"eval_loss": 0.038376811891794205,
"eval_runtime": 198.1296,
"eval_samples_per_second": 5.85,
"eval_steps_per_second": 5.85,
"step": 2450
},
{
"epoch": 3.7766263672999423,
"grad_norm": 0.07355306297540665,
"learning_rate": 1.7102486756238435e-05,
"loss": 0.0086,
"step": 2460
},
{
"epoch": 3.791978507004414,
"grad_norm": 0.2125598043203354,
"learning_rate": 1.6700526759560002e-05,
"loss": 0.0109,
"step": 2470
},
{
"epoch": 3.807330646708885,
"grad_norm": 0.12951001524925232,
"learning_rate": 1.6302397604544257e-05,
"loss": 0.0119,
"step": 2480
},
{
"epoch": 3.822682786413356,
"grad_norm": 0.15507496893405914,
"learning_rate": 1.5908145092809272e-05,
"loss": 0.0066,
"step": 2490
},
{
"epoch": 3.8380349261178277,
"grad_norm": 0.08303289115428925,
"learning_rate": 1.551781457999586e-05,
"loss": 0.0111,
"step": 2500
},
{
"epoch": 3.8380349261178277,
"eval_loss": 0.0382462814450264,
"eval_runtime": 197.7703,
"eval_samples_per_second": 5.86,
"eval_steps_per_second": 5.86,
"step": 2500
},
{
"epoch": 3.853387065822299,
"grad_norm": 0.11872826516628265,
"learning_rate": 1.513145097054977e-05,
"loss": 0.0098,
"step": 2510
},
{
"epoch": 3.8687392055267704,
"grad_norm": 0.14837545156478882,
"learning_rate": 1.4749098712555854e-05,
"loss": 0.0094,
"step": 2520
},
{
"epoch": 3.8840913452312416,
"grad_norm": 0.1325119286775589,
"learning_rate": 1.4370801792624656e-05,
"loss": 0.0079,
"step": 2530
},
{
"epoch": 3.8994434849357127,
"grad_norm": 0.35325750708580017,
"learning_rate": 1.399660373083203e-05,
"loss": 0.0096,
"step": 2540
},
{
"epoch": 3.9147956246401843,
"grad_norm": 0.5195595622062683,
"learning_rate": 1.3626547575712545e-05,
"loss": 0.0154,
"step": 2550
},
{
"epoch": 3.9147956246401843,
"eval_loss": 0.03803320229053497,
"eval_runtime": 197.0492,
"eval_samples_per_second": 5.882,
"eval_steps_per_second": 5.882,
"step": 2550
},
{
"epoch": 3.9301477643446554,
"grad_norm": 0.5137061476707458,
"learning_rate": 1.3260675899307096e-05,
"loss": 0.0167,
"step": 2560
},
{
"epoch": 3.945499904049127,
"grad_norm": 0.12673397362232208,
"learning_rate": 1.2899030792265349e-05,
"loss": 0.0079,
"step": 2570
},
{
"epoch": 3.960852043753598,
"grad_norm": 0.15755097568035126,
"learning_rate": 1.2541653859003437e-05,
"loss": 0.0108,
"step": 2580
},
{
"epoch": 3.9762041834580693,
"grad_norm": 0.23179736733436584,
"learning_rate": 1.2188586212917846e-05,
"loss": 0.0108,
"step": 2590
},
{
"epoch": 3.991556323162541,
"grad_norm": 0.23065230250358582,
"learning_rate": 1.1839868471655523e-05,
"loss": 0.0087,
"step": 2600
},
{
"epoch": 3.991556323162541,
"eval_loss": 0.03791119158267975,
"eval_runtime": 196.3594,
"eval_samples_per_second": 5.902,
"eval_steps_per_second": 5.902,
"step": 2600
},
{
"epoch": 4.006908462867012,
"grad_norm": 0.07067421078681946,
"learning_rate": 1.1495540752441235e-05,
"loss": 0.0085,
"step": 2610
},
{
"epoch": 4.0222606025714835,
"grad_norm": 0.06453356891870499,
"learning_rate": 1.1155642667462318e-05,
"loss": 0.0062,
"step": 2620
},
{
"epoch": 4.037612742275955,
"grad_norm": 0.10147488862276077,
"learning_rate": 1.082021331931164e-05,
"loss": 0.005,
"step": 2630
},
{
"epoch": 4.052964881980426,
"grad_norm": 0.18546722829341888,
"learning_rate": 1.0489291296489152e-05,
"loss": 0.0056,
"step": 2640
},
{
"epoch": 4.068317021684897,
"grad_norm": 0.09176095575094223,
"learning_rate": 1.0162914668962631e-05,
"loss": 0.0042,
"step": 2650
},
{
"epoch": 4.068317021684897,
"eval_loss": 0.039211783558130264,
"eval_runtime": 196.0944,
"eval_samples_per_second": 5.91,
"eval_steps_per_second": 5.91,
"step": 2650
},
{
"epoch": 4.083669161389369,
"grad_norm": 0.22541296482086182,
"learning_rate": 9.841120983787915e-06,
"loss": 0.0039,
"step": 2660
},
{
"epoch": 4.09902130109384,
"grad_norm": 0.12679553031921387,
"learning_rate": 9.523947260789546e-06,
"loss": 0.0052,
"step": 2670
},
{
"epoch": 4.114373440798311,
"grad_norm": 0.30610185861587524,
"learning_rate": 9.211429988301823e-06,
"loss": 0.0046,
"step": 2680
},
{
"epoch": 4.129725580502782,
"grad_norm": 0.2688978910446167,
"learning_rate": 8.90360511897121e-06,
"loss": 0.0049,
"step": 2690
},
{
"epoch": 4.1450777202072535,
"grad_norm": 0.11524350196123123,
"learning_rate": 8.600508065620161e-06,
"loss": 0.0029,
"step": 2700
},
{
"epoch": 4.1450777202072535,
"eval_loss": 0.041070595383644104,
"eval_runtime": 195.9711,
"eval_samples_per_second": 5.914,
"eval_steps_per_second": 5.914,
"step": 2700
},
{
"epoch": 4.1604298599117255,
"grad_norm": 0.6059698462486267,
"learning_rate": 8.302173697173226e-06,
"loss": 0.0081,
"step": 2710
},
{
"epoch": 4.175781999616197,
"grad_norm": 0.09381125122308731,
"learning_rate": 8.008636334645631e-06,
"loss": 0.0042,
"step": 2720
},
{
"epoch": 4.191134139320668,
"grad_norm": 0.08234836161136627,
"learning_rate": 7.71992974719491e-06,
"loss": 0.0038,
"step": 2730
},
{
"epoch": 4.206486279025139,
"grad_norm": 0.05469619482755661,
"learning_rate": 7.436087148236054e-06,
"loss": 0.0026,
"step": 2740
},
{
"epoch": 4.22183841872961,
"grad_norm": 0.08177103847265244,
"learning_rate": 7.157141191620548e-06,
"loss": 0.0044,
"step": 2750
},
{
"epoch": 4.22183841872961,
"eval_loss": 0.04218851029872894,
"eval_runtime": 196.6612,
"eval_samples_per_second": 5.893,
"eval_steps_per_second": 5.893,
"step": 2750
},
{
"epoch": 4.237190558434082,
"grad_norm": 0.119593545794487,
"learning_rate": 6.883123967879796e-06,
"loss": 0.004,
"step": 2760
},
{
"epoch": 4.252542698138553,
"grad_norm": 0.1299428641796112,
"learning_rate": 6.6140670005334136e-06,
"loss": 0.0035,
"step": 2770
},
{
"epoch": 4.267894837843024,
"grad_norm": 0.19888031482696533,
"learning_rate": 6.350001242462617e-06,
"loss": 0.0056,
"step": 2780
},
{
"epoch": 4.2832469775474955,
"grad_norm": 0.17250359058380127,
"learning_rate": 6.090957072349385e-06,
"loss": 0.0042,
"step": 2790
},
{
"epoch": 4.298599117251967,
"grad_norm": 0.1387476772069931,
"learning_rate": 5.836964291181624e-06,
"loss": 0.0035,
"step": 2800
},
{
"epoch": 4.298599117251967,
"eval_loss": 0.04301900044083595,
"eval_runtime": 196.9142,
"eval_samples_per_second": 5.886,
"eval_steps_per_second": 5.886,
"step": 2800
},
{
"epoch": 4.313951256956439,
"grad_norm": 0.16032198071479797,
"learning_rate": 5.588052118824804e-06,
"loss": 0.0054,
"step": 2810
},
{
"epoch": 4.32930339666091,
"grad_norm": 0.10156236588954926,
"learning_rate": 5.344249190660428e-06,
"loss": 0.0027,
"step": 2820
},
{
"epoch": 4.344655536365381,
"grad_norm": 0.2252165526151657,
"learning_rate": 5.105583554291765e-06,
"loss": 0.0044,
"step": 2830
},
{
"epoch": 4.360007676069852,
"grad_norm": 0.016164055094122887,
"learning_rate": 4.872082666317207e-06,
"loss": 0.0023,
"step": 2840
},
{
"epoch": 4.375359815774323,
"grad_norm": 0.14848938584327698,
"learning_rate": 4.6437733891715905e-06,
"loss": 0.0031,
"step": 2850
},
{
"epoch": 4.375359815774323,
"eval_loss": 0.044064514338970184,
"eval_runtime": 197.355,
"eval_samples_per_second": 5.873,
"eval_steps_per_second": 5.873,
"step": 2850
},
{
"epoch": 4.390711955478795,
"grad_norm": 0.0969080924987793,
"learning_rate": 4.420681988035891e-06,
"loss": 0.0036,
"step": 2860
},
{
"epoch": 4.406064095183266,
"grad_norm": 0.1187598705291748,
"learning_rate": 4.2028341278156026e-06,
"loss": 0.0063,
"step": 2870
},
{
"epoch": 4.4214162348877375,
"grad_norm": 0.07551722228527069,
"learning_rate": 3.990254870188221e-06,
"loss": 0.0055,
"step": 2880
},
{
"epoch": 4.436768374592209,
"grad_norm": 0.09198061376810074,
"learning_rate": 3.7829686707200827e-06,
"loss": 0.0041,
"step": 2890
},
{
"epoch": 4.45212051429668,
"grad_norm": 0.09851433336734772,
"learning_rate": 3.580999376052946e-06,
"loss": 0.004,
"step": 2900
},
{
"epoch": 4.45212051429668,
"eval_loss": 0.044529687613248825,
"eval_runtime": 197.1011,
"eval_samples_per_second": 5.88,
"eval_steps_per_second": 5.88,
"step": 2900
},
{
"epoch": 4.467472654001152,
"grad_norm": 0.3292700946331024,
"learning_rate": 3.3843702211606153e-06,
"loss": 0.0045,
"step": 2910
},
{
"epoch": 4.482824793705623,
"grad_norm": 0.028438210487365723,
"learning_rate": 3.193103826675947e-06,
"loss": 0.0052,
"step": 2920
},
{
"epoch": 4.498176933410094,
"grad_norm": 0.2217072695493698,
"learning_rate": 3.007222196288545e-06,
"loss": 0.0032,
"step": 2930
},
{
"epoch": 4.513529073114565,
"grad_norm": 0.06991735100746155,
"learning_rate": 2.8267467142133687e-06,
"loss": 0.0031,
"step": 2940
},
{
"epoch": 4.528881212819036,
"grad_norm": 0.12266076356172562,
"learning_rate": 2.651698142730674e-06,
"loss": 0.0035,
"step": 2950
},
{
"epoch": 4.528881212819036,
"eval_loss": 0.04456078261137009,
"eval_runtime": 197.006,
"eval_samples_per_second": 5.883,
"eval_steps_per_second": 5.883,
"step": 2950
},
{
"epoch": 4.544233352523508,
"grad_norm": 0.013956692069768906,
"learning_rate": 2.4820966197974748e-06,
"loss": 0.0036,
"step": 2960
},
{
"epoch": 4.5595854922279795,
"grad_norm": 0.03814029321074486,
"learning_rate": 2.3179616567308216e-06,
"loss": 0.0029,
"step": 2970
},
{
"epoch": 4.574937631932451,
"grad_norm": 0.02994069829583168,
"learning_rate": 2.1593121359631873e-06,
"loss": 0.0025,
"step": 2980
},
{
"epoch": 4.590289771636922,
"grad_norm": 0.08673041313886642,
"learning_rate": 2.006166308870189e-06,
"loss": 0.0037,
"step": 2990
},
{
"epoch": 4.605641911341393,
"grad_norm": 0.027347547933459282,
"learning_rate": 1.8585417936709038e-06,
"loss": 0.0021,
"step": 3000
},
{
"epoch": 4.605641911341393,
"eval_loss": 0.045384086668491364,
"eval_runtime": 197.3936,
"eval_samples_per_second": 5.872,
"eval_steps_per_second": 5.872,
"step": 3000
},
{
"epoch": 4.620994051045864,
"grad_norm": 0.10114165395498276,
"learning_rate": 1.7164555734010545e-06,
"loss": 0.0085,
"step": 3010
},
{
"epoch": 4.636346190750336,
"grad_norm": 0.1202889010310173,
"learning_rate": 1.5799239939592204e-06,
"loss": 0.0051,
"step": 3020
},
{
"epoch": 4.651698330454807,
"grad_norm": 0.07459268718957901,
"learning_rate": 1.4489627622263747e-06,
"loss": 0.0052,
"step": 3030
},
{
"epoch": 4.667050470159278,
"grad_norm": 0.22482830286026,
"learning_rate": 1.3235869442589255e-06,
"loss": 0.0086,
"step": 3040
},
{
"epoch": 4.6824026098637495,
"grad_norm": 0.17435556650161743,
"learning_rate": 1.2038109635555406e-06,
"loss": 0.0041,
"step": 3050
},
{
"epoch": 4.6824026098637495,
"eval_loss": 0.04585828632116318,
"eval_runtime": 197.2718,
"eval_samples_per_second": 5.875,
"eval_steps_per_second": 5.875,
"step": 3050
},
{
"epoch": 4.697754749568221,
"grad_norm": 0.034258339554071426,
"learning_rate": 1.0896485993977467e-06,
"loss": 0.0047,
"step": 3060
},
{
"epoch": 4.713106889272693,
"grad_norm": 0.058088961988687515,
"learning_rate": 9.811129852647982e-07,
"loss": 0.0022,
"step": 3070
},
{
"epoch": 4.728459028977164,
"grad_norm": 0.37871047854423523,
"learning_rate": 8.782166073227515e-07,
"loss": 0.0066,
"step": 3080
},
{
"epoch": 4.743811168681635,
"grad_norm": 0.023409299552440643,
"learning_rate": 7.809713029880428e-07,
"loss": 0.0038,
"step": 3090
},
{
"epoch": 4.759163308386106,
"grad_norm": 0.10357584804296494,
"learning_rate": 6.893882595656598e-07,
"loss": 0.006,
"step": 3100
},
{
"epoch": 4.759163308386106,
"eval_loss": 0.04564342275261879,
"eval_runtime": 197.4178,
"eval_samples_per_second": 5.871,
"eval_steps_per_second": 5.871,
"step": 3100
},
{
"epoch": 4.774515448090577,
"grad_norm": 0.20756296813488007,
"learning_rate": 6.034780129621664e-07,
"loss": 0.0067,
"step": 3110
},
{
"epoch": 4.789867587795049,
"grad_norm": 0.32027533650398254,
"learning_rate": 5.232504464735833e-07,
"loss": 0.0039,
"step": 3120
},
{
"epoch": 4.80521972749952,
"grad_norm": 0.3811037838459015,
"learning_rate": 4.487147896484523e-07,
"loss": 0.007,
"step": 3130
},
{
"epoch": 4.8205718672039914,
"grad_norm": 0.0528700053691864,
"learning_rate": 3.7987961722599773e-07,
"loss": 0.0063,
"step": 3140
},
{
"epoch": 4.835924006908463,
"grad_norm": 0.03822799026966095,
"learning_rate": 3.167528481496984e-07,
"loss": 0.0043,
"step": 3150
},
{
"epoch": 4.835924006908463,
"eval_loss": 0.04553144425153732,
"eval_runtime": 197.6602,
"eval_samples_per_second": 5.864,
"eval_steps_per_second": 5.864,
"step": 3150
},
{
"epoch": 4.851276146612934,
"grad_norm": 0.2740378975868225,
"learning_rate": 2.593417446562607e-07,
"loss": 0.0083,
"step": 3160
},
{
"epoch": 4.866628286317406,
"grad_norm": 0.2202148288488388,
"learning_rate": 2.0765291144016486e-07,
"loss": 0.0043,
"step": 3170
},
{
"epoch": 4.881980426021877,
"grad_norm": 0.12892213463783264,
"learning_rate": 1.6169229489385595e-07,
"loss": 0.0042,
"step": 3180
},
{
"epoch": 4.897332565726348,
"grad_norm": 0.006994785740971565,
"learning_rate": 1.2146518242363014e-07,
"loss": 0.0027,
"step": 3190
},
{
"epoch": 4.912684705430819,
"grad_norm": 0.1142883449792862,
"learning_rate": 8.697620184138222e-08,
"loss": 0.0031,
"step": 3200
},
{
"epoch": 4.912684705430819,
"eval_loss": 0.04559043049812317,
"eval_runtime": 197.5618,
"eval_samples_per_second": 5.867,
"eval_steps_per_second": 5.867,
"step": 3200
},
{
"epoch": 4.92803684513529,
"grad_norm": 0.10935990512371063,
"learning_rate": 5.822932083221488e-08,
"loss": 0.0054,
"step": 3210
},
{
"epoch": 4.943388984839762,
"grad_norm": 0.2467465102672577,
"learning_rate": 3.5227846497970504e-08,
"loss": 0.0037,
"step": 3220
},
{
"epoch": 4.958741124544233,
"grad_norm": 0.15151652693748474,
"learning_rate": 1.7974424976796577e-08,
"loss": 0.0031,
"step": 3230
},
{
"epoch": 4.974093264248705,
"grad_norm": 0.09973540902137756,
"learning_rate": 6.47104113870034e-09,
"loss": 0.0045,
"step": 3240
},
{
"epoch": 4.989445403953176,
"grad_norm": 0.1923939734697342,
"learning_rate": 7.190183572314269e-10,
"loss": 0.0073,
"step": 3250
},
{
"epoch": 4.989445403953176,
"eval_loss": 0.04562755674123764,
"eval_runtime": 197.9034,
"eval_samples_per_second": 5.856,
"eval_steps_per_second": 5.856,
"step": 3250
},
{
"epoch": 4.997121473805412,
"step": 3255,
"total_flos": 8.80470547326763e+17,
"train_loss": 0.04897872828713943,
"train_runtime": 41700.3864,
"train_samples_per_second": 1.25,
"train_steps_per_second": 0.078
}
],
"logging_steps": 10,
"max_steps": 3255,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.80470547326763e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}