{ "best_global_step": null, "best_metric": null, "best_model_checkpoint": null, "epoch": 2.988009592326139, "eval_steps": 200, "global_step": 624, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.047961630695443645, "grad_norm": 0.48529842495918274, "learning_rate": 5.625e-06, "loss": 2.5422, "step": 10 }, { "epoch": 0.09592326139088729, "grad_norm": 0.5231515169143677, "learning_rate": 1.1875e-05, "loss": 2.4974, "step": 20 }, { "epoch": 0.14388489208633093, "grad_norm": 0.4471396803855896, "learning_rate": 1.8125e-05, "loss": 2.4382, "step": 30 }, { "epoch": 0.19184652278177458, "grad_norm": 0.250693142414093, "learning_rate": 1.999310121715203e-05, "loss": 2.3647, "step": 40 }, { "epoch": 0.23980815347721823, "grad_norm": 0.2678900361061096, "learning_rate": 1.9959334172857852e-05, "loss": 2.2756, "step": 50 }, { "epoch": 0.28776978417266186, "grad_norm": 0.24975475668907166, "learning_rate": 1.9897526695650458e-05, "loss": 2.2132, "step": 60 }, { "epoch": 0.33573141486810554, "grad_norm": 0.25826334953308105, "learning_rate": 1.9807852804032306e-05, "loss": 2.1933, "step": 70 }, { "epoch": 0.38369304556354916, "grad_norm": 0.28782331943511963, "learning_rate": 1.9690564974192893e-05, "loss": 2.1697, "step": 80 }, { "epoch": 0.4316546762589928, "grad_norm": 0.35406047105789185, "learning_rate": 1.9545993429163913e-05, "loss": 2.1666, "step": 90 }, { "epoch": 0.47961630695443647, "grad_norm": 0.4792511463165283, "learning_rate": 1.9374545209078687e-05, "loss": 2.1381, "step": 100 }, { "epoch": 0.5275779376498801, "grad_norm": 0.5407695174217224, "learning_rate": 1.9176703025153643e-05, "loss": 2.1527, "step": 110 }, { "epoch": 0.5755395683453237, "grad_norm": 0.6309888362884521, "learning_rate": 1.8953023900618395e-05, "loss": 2.0777, "step": 120 }, { "epoch": 0.6235011990407674, "grad_norm": 0.34664326906204224, "learning_rate": 1.870413760242089e-05, "loss": 2.0752, "step": 130 }, { "epoch": 0.6714628297362111, "grad_norm": 0.25117745995521545, "learning_rate": 1.8430744868123146e-05, "loss": 2.0563, "step": 140 }, { "epoch": 0.7194244604316546, "grad_norm": 0.38084152340888977, "learning_rate": 1.8133615432979742e-05, "loss": 2.0572, "step": 150 }, { "epoch": 0.7673860911270983, "grad_norm": 0.4128628075122833, "learning_rate": 1.7813585862753832e-05, "loss": 2.0694, "step": 160 }, { "epoch": 0.815347721822542, "grad_norm": 0.2934734523296356, "learning_rate": 1.7471557198372277e-05, "loss": 2.0339, "step": 170 }, { "epoch": 0.8633093525179856, "grad_norm": 0.31436148285865784, "learning_rate": 1.710849241905151e-05, "loss": 2.0482, "step": 180 }, { "epoch": 0.9112709832134293, "grad_norm": 0.3247852623462677, "learning_rate": 1.6725413731036562e-05, "loss": 2.0122, "step": 190 }, { "epoch": 0.9592326139088729, "grad_norm": 0.29776260256767273, "learning_rate": 1.632339968958677e-05, "loss": 2.0158, "step": 200 }, { "epoch": 1.0047961630695443, "grad_norm": 0.44077572226524353, "learning_rate": 1.590358216231134e-05, "loss": 2.0056, "step": 210 }, { "epoch": 1.052757793764988, "grad_norm": 0.31072619557380676, "learning_rate": 1.546714314240429e-05, "loss": 2.0118, "step": 220 }, { "epoch": 1.1007194244604317, "grad_norm": 0.4204866588115692, "learning_rate": 1.5015311420751243e-05, "loss": 1.9779, "step": 230 }, { "epoch": 1.1486810551558753, "grad_norm": 0.38263222575187683, "learning_rate": 1.454935912627761e-05, "loss": 1.969, "step": 240 }, { "epoch": 1.196642685851319, "grad_norm": 0.3890452980995178, "learning_rate": 1.407059814427884e-05, "loss": 1.9723, "step": 250 }, { "epoch": 1.2446043165467626, "grad_norm": 0.4386310875415802, "learning_rate": 1.3580376422816945e-05, "loss": 1.9566, "step": 260 }, { "epoch": 1.2925659472422062, "grad_norm": 0.41672301292419434, "learning_rate": 1.3080074177582527e-05, "loss": 1.9365, "step": 270 }, { "epoch": 1.34052757793765, "grad_norm": 0.45278477668762207, "learning_rate": 1.2571100005907522e-05, "loss": 1.9445, "step": 280 }, { "epoch": 1.3884892086330936, "grad_norm": 0.4160229563713074, "learning_rate": 1.2054886920869682e-05, "loss": 1.9696, "step": 290 }, { "epoch": 1.4364508393285371, "grad_norm": 0.46161898970603943, "learning_rate": 1.1532888316654675e-05, "loss": 1.9435, "step": 300 }, { "epoch": 1.484412470023981, "grad_norm": 0.4921852648258209, "learning_rate": 1.1006573876535322e-05, "loss": 1.9428, "step": 310 }, { "epoch": 1.5323741007194245, "grad_norm": 0.5233998894691467, "learning_rate": 1.0477425434989038e-05, "loss": 1.9241, "step": 320 }, { "epoch": 1.580335731414868, "grad_norm": 0.5583417415618896, "learning_rate": 9.946932805603635e-06, "loss": 1.9444, "step": 330 }, { "epoch": 1.6282973621103118, "grad_norm": 0.48175185918807983, "learning_rate": 9.416589586518009e-06, "loss": 1.8964, "step": 340 }, { "epoch": 1.6762589928057554, "grad_norm": 0.5539340376853943, "learning_rate": 8.887888955207444e-06, "loss": 1.894, "step": 350 }, { "epoch": 1.724220623501199, "grad_norm": 0.5731947422027588, "learning_rate": 8.362319464453301e-06, "loss": 1.9005, "step": 360 }, { "epoch": 1.7721822541966428, "grad_norm": 0.6053941249847412, "learning_rate": 7.84136085133347e-06, "loss": 1.8858, "step": 370 }, { "epoch": 1.8201438848920863, "grad_norm": 0.6071736812591553, "learning_rate": 7.326479871033408e-06, "loss": 1.8869, "step": 380 }, { "epoch": 1.86810551558753, "grad_norm": 0.6168338060379028, "learning_rate": 6.819126167207586e-06, "loss": 1.8783, "step": 390 }, { "epoch": 1.9160671462829737, "grad_norm": 0.697503387928009, "learning_rate": 6.320728190518308e-06, "loss": 1.8764, "step": 400 }, { "epoch": 1.9640287769784173, "grad_norm": 0.6182295680046082, "learning_rate": 5.832689176843291e-06, "loss": 1.865, "step": 410 }, { "epoch": 2.0095923261390887, "grad_norm": 0.5304880738258362, "learning_rate": 5.356383196475226e-06, "loss": 1.879, "step": 420 }, { "epoch": 2.0575539568345325, "grad_norm": 0.5878423452377319, "learning_rate": 4.893151285436891e-06, "loss": 1.8399, "step": 430 }, { "epoch": 2.105515587529976, "grad_norm": 0.6464638710021973, "learning_rate": 4.444297669803981e-06, "loss": 1.8491, "step": 440 }, { "epoch": 2.1534772182254196, "grad_norm": 0.6010664701461792, "learning_rate": 4.011086093666057e-06, "loss": 1.8552, "step": 450 }, { "epoch": 2.2014388489208634, "grad_norm": 0.6692630052566528, "learning_rate": 3.5947362610642854e-06, "loss": 1.8616, "step": 460 }, { "epoch": 2.249400479616307, "grad_norm": 0.6113582849502563, "learning_rate": 3.196420401923567e-06, "loss": 1.8552, "step": 470 }, { "epoch": 2.2973621103117505, "grad_norm": 0.6936492919921875, "learning_rate": 2.8172599716477145e-06, "loss": 1.8145, "step": 480 }, { "epoch": 2.3453237410071943, "grad_norm": 0.6148903369903564, "learning_rate": 2.458322493669911e-06, "loss": 1.8115, "step": 490 }, { "epoch": 2.393285371702638, "grad_norm": 0.679800808429718, "learning_rate": 2.1206185538482704e-06, "loss": 1.8344, "step": 500 }, { "epoch": 2.4412470023980815, "grad_norm": 0.6589268445968628, "learning_rate": 1.8050989551686915e-06, "loss": 1.8587, "step": 510 }, { "epoch": 2.4892086330935252, "grad_norm": 0.7750632166862488, "learning_rate": 1.5126520407659618e-06, "loss": 1.8445, "step": 520 }, { "epoch": 2.537170263788969, "grad_norm": 0.89366215467453, "learning_rate": 1.2441011928001435e-06, "loss": 1.8361, "step": 530 }, { "epoch": 2.5851318944844124, "grad_norm": 0.6380264163017273, "learning_rate": 1.0002025142300764e-06, "loss": 1.8379, "step": 540 }, { "epoch": 2.633093525179856, "grad_norm": 0.7115680575370789, "learning_rate": 7.816427000110016e-07, "loss": 1.8221, "step": 550 }, { "epoch": 2.6810551558753, "grad_norm": 0.6720678806304932, "learning_rate": 5.890371037099107e-07, "loss": 1.8332, "step": 560 }, { "epoch": 2.7290167865707433, "grad_norm": 0.6737195253372192, "learning_rate": 4.229280049820561e-07, "loss": 1.8309, "step": 570 }, { "epoch": 2.776978417266187, "grad_norm": 0.7314239144325256, "learning_rate": 2.837830827865229e-07, "loss": 1.8679, "step": 580 }, { "epoch": 2.824940047961631, "grad_norm": 0.6641331911087036, "learning_rate": 1.719940986395252e-07, "loss": 1.8305, "step": 590 }, { "epoch": 2.8729016786570742, "grad_norm": 0.8098570108413696, "learning_rate": 8.787579361270616e-08, "loss": 1.8158, "step": 600 }, { "epoch": 2.920863309352518, "grad_norm": 0.6149157881736755, "learning_rate": 3.1665002181937575e-08, "loss": 1.8446, "step": 610 }, { "epoch": 2.968824940047962, "grad_norm": 0.6571503281593323, "learning_rate": 3.5199854215817176e-09, "loss": 1.8486, "step": 620 } ], "logging_steps": 10, "max_steps": 624, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 200, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 4.071044597856338e+17, "train_batch_size": 8, "trial_name": null, "trial_params": null }