ModernBERT-Codon-v1-34M / trainer_state.json
RaphaelMourad's picture
Upload 9 files
58a405d verified
{
"best_metric": 3.4529993534088135,
"best_model_checkpoint": "./results/models/checkpoint-250176",
"epoch": 16.0,
"eval_steps": 500,
"global_step": 250176,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.031977487848554614,
"grad_norm": 1.1953125,
"learning_rate": 0.000999360450243029,
"loss": 3.6459,
"step": 500
},
{
"epoch": 0.06395497569710923,
"grad_norm": 1.390625,
"learning_rate": 0.0009987209004860577,
"loss": 3.6508,
"step": 1000
},
{
"epoch": 0.09593246354566386,
"grad_norm": 1.28125,
"learning_rate": 0.0009980813507290869,
"loss": 3.6601,
"step": 1500
},
{
"epoch": 0.12790995139421846,
"grad_norm": 1.15625,
"learning_rate": 0.0009974418009721156,
"loss": 3.6421,
"step": 2000
},
{
"epoch": 0.1598874392427731,
"grad_norm": 3.734375,
"learning_rate": 0.0009968022512151446,
"loss": 3.6367,
"step": 2500
},
{
"epoch": 0.1918649270913277,
"grad_norm": 1.890625,
"learning_rate": 0.0009961627014581735,
"loss": 3.6313,
"step": 3000
},
{
"epoch": 0.22384241493988233,
"grad_norm": 0.95703125,
"learning_rate": 0.0009955231517012023,
"loss": 3.6215,
"step": 3500
},
{
"epoch": 0.2558199027884369,
"grad_norm": 0.7890625,
"learning_rate": 0.0009948836019442312,
"loss": 3.6111,
"step": 4000
},
{
"epoch": 0.28779739063699156,
"grad_norm": 0.625,
"learning_rate": 0.0009942440521872602,
"loss": 3.6104,
"step": 4500
},
{
"epoch": 0.3197748784855462,
"grad_norm": 0.66796875,
"learning_rate": 0.0009936045024302891,
"loss": 3.6049,
"step": 5000
},
{
"epoch": 0.3517523663341008,
"grad_norm": 1.03125,
"learning_rate": 0.000992964952673318,
"loss": 3.5984,
"step": 5500
},
{
"epoch": 0.3837298541826554,
"grad_norm": 1.2890625,
"learning_rate": 0.0009923254029163468,
"loss": 3.5988,
"step": 6000
},
{
"epoch": 0.41570734203121,
"grad_norm": 1.1171875,
"learning_rate": 0.0009916858531593758,
"loss": 3.5947,
"step": 6500
},
{
"epoch": 0.44768482987976466,
"grad_norm": 0.9453125,
"learning_rate": 0.0009910463034024047,
"loss": 3.5954,
"step": 7000
},
{
"epoch": 0.47966231772831924,
"grad_norm": 0.59765625,
"learning_rate": 0.0009904067536454337,
"loss": 3.5857,
"step": 7500
},
{
"epoch": 0.5116398055768738,
"grad_norm": 0.6328125,
"learning_rate": 0.0009897672038884624,
"loss": 3.5745,
"step": 8000
},
{
"epoch": 0.5436172934254285,
"grad_norm": 0.6875,
"learning_rate": 0.0009891276541314914,
"loss": 3.5707,
"step": 8500
},
{
"epoch": 0.5755947812739831,
"grad_norm": 0.6484375,
"learning_rate": 0.0009884881043745204,
"loss": 3.5728,
"step": 9000
},
{
"epoch": 0.6075722691225377,
"grad_norm": 0.984375,
"learning_rate": 0.0009878485546175493,
"loss": 3.5684,
"step": 9500
},
{
"epoch": 0.6395497569710924,
"grad_norm": 0.640625,
"learning_rate": 0.0009872090048605783,
"loss": 3.567,
"step": 10000
},
{
"epoch": 0.671527244819647,
"grad_norm": 0.828125,
"learning_rate": 0.000986569455103607,
"loss": 3.5684,
"step": 10500
},
{
"epoch": 0.7035047326682016,
"grad_norm": 0.53125,
"learning_rate": 0.000985929905346636,
"loss": 3.5629,
"step": 11000
},
{
"epoch": 0.7354822205167562,
"grad_norm": 0.609375,
"learning_rate": 0.000985290355589665,
"loss": 3.5613,
"step": 11500
},
{
"epoch": 0.7674597083653109,
"grad_norm": 0.75390625,
"learning_rate": 0.0009846508058326939,
"loss": 3.5583,
"step": 12000
},
{
"epoch": 0.7994371962138654,
"grad_norm": 1.2265625,
"learning_rate": 0.0009840112560757228,
"loss": 3.5577,
"step": 12500
},
{
"epoch": 0.83141468406242,
"grad_norm": 0.65625,
"learning_rate": 0.0009833717063187516,
"loss": 3.5556,
"step": 13000
},
{
"epoch": 0.8633921719109747,
"grad_norm": 0.61328125,
"learning_rate": 0.0009827321565617805,
"loss": 3.5537,
"step": 13500
},
{
"epoch": 0.8953696597595293,
"grad_norm": 0.70703125,
"learning_rate": 0.0009820926068048095,
"loss": 3.552,
"step": 14000
},
{
"epoch": 0.9273471476080839,
"grad_norm": 0.5859375,
"learning_rate": 0.0009814530570478384,
"loss": 3.551,
"step": 14500
},
{
"epoch": 0.9593246354566385,
"grad_norm": 1.1171875,
"learning_rate": 0.0009808135072908672,
"loss": 3.5503,
"step": 15000
},
{
"epoch": 0.9913021233051932,
"grad_norm": 0.62109375,
"learning_rate": 0.0009801739575338961,
"loss": 3.5515,
"step": 15500
},
{
"epoch": 1.0,
"eval_loss": 3.5521419048309326,
"eval_runtime": 1.1975,
"eval_samples_per_second": 417.533,
"eval_steps_per_second": 6.681,
"step": 15636
},
{
"epoch": 1.0232796111537477,
"grad_norm": 0.5703125,
"learning_rate": 0.000979534407776925,
"loss": 3.5519,
"step": 16000
},
{
"epoch": 1.0552570990023025,
"grad_norm": 0.60546875,
"learning_rate": 0.000978894858019954,
"loss": 3.5563,
"step": 16500
},
{
"epoch": 1.087234586850857,
"grad_norm": 0.6328125,
"learning_rate": 0.000978255308262983,
"loss": 3.5507,
"step": 17000
},
{
"epoch": 1.1192120746994116,
"grad_norm": 0.69921875,
"learning_rate": 0.0009776157585060117,
"loss": 3.5472,
"step": 17500
},
{
"epoch": 1.1511895625479662,
"grad_norm": 0.76953125,
"learning_rate": 0.0009769762087490407,
"loss": 3.5455,
"step": 18000
},
{
"epoch": 1.1831670503965208,
"grad_norm": 0.65234375,
"learning_rate": 0.0009763366589920695,
"loss": 3.5457,
"step": 18500
},
{
"epoch": 1.2151445382450754,
"grad_norm": 0.921875,
"learning_rate": 0.0009756971092350985,
"loss": 3.5489,
"step": 19000
},
{
"epoch": 1.24712202609363,
"grad_norm": 13.0625,
"learning_rate": 0.0009750575594781274,
"loss": 3.5469,
"step": 19500
},
{
"epoch": 1.2790995139421848,
"grad_norm": 0.9375,
"learning_rate": 0.0009744180097211564,
"loss": 3.5463,
"step": 20000
},
{
"epoch": 1.3110770017907394,
"grad_norm": 0.9375,
"learning_rate": 0.0009737784599641852,
"loss": 3.5458,
"step": 20500
},
{
"epoch": 1.343054489639294,
"grad_norm": 1.4296875,
"learning_rate": 0.0009731389102072141,
"loss": 3.5453,
"step": 21000
},
{
"epoch": 1.3750319774878486,
"grad_norm": 0.671875,
"learning_rate": 0.000972499360450243,
"loss": 3.5427,
"step": 21500
},
{
"epoch": 1.4070094653364031,
"grad_norm": 1.7421875,
"learning_rate": 0.000971859810693272,
"loss": 3.5428,
"step": 22000
},
{
"epoch": 1.4389869531849577,
"grad_norm": 0.70703125,
"learning_rate": 0.0009712202609363008,
"loss": 3.5423,
"step": 22500
},
{
"epoch": 1.4709644410335123,
"grad_norm": 0.5390625,
"learning_rate": 0.0009705807111793298,
"loss": 3.5411,
"step": 23000
},
{
"epoch": 1.5029419288820671,
"grad_norm": 0.74609375,
"learning_rate": 0.0009699411614223587,
"loss": 3.5401,
"step": 23500
},
{
"epoch": 1.5349194167306215,
"grad_norm": 0.66015625,
"learning_rate": 0.0009693016116653875,
"loss": 3.5418,
"step": 24000
},
{
"epoch": 1.5668969045791763,
"grad_norm": 0.64453125,
"learning_rate": 0.0009686620619084166,
"loss": 3.5409,
"step": 24500
},
{
"epoch": 1.5988743924277309,
"grad_norm": 0.73046875,
"learning_rate": 0.0009680225121514454,
"loss": 3.5366,
"step": 25000
},
{
"epoch": 1.6308518802762855,
"grad_norm": 0.640625,
"learning_rate": 0.0009673829623944744,
"loss": 3.5385,
"step": 25500
},
{
"epoch": 1.66282936812484,
"grad_norm": 0.89453125,
"learning_rate": 0.0009667434126375032,
"loss": 3.5404,
"step": 26000
},
{
"epoch": 1.6948068559733946,
"grad_norm": 0.73046875,
"learning_rate": 0.0009661038628805321,
"loss": 3.5416,
"step": 26500
},
{
"epoch": 1.7267843438219495,
"grad_norm": 0.9609375,
"learning_rate": 0.000965464313123561,
"loss": 3.5379,
"step": 27000
},
{
"epoch": 1.7587618316705038,
"grad_norm": 0.84765625,
"learning_rate": 0.00096482476336659,
"loss": 3.5389,
"step": 27500
},
{
"epoch": 1.7907393195190586,
"grad_norm": 1.1875,
"learning_rate": 0.0009641852136096188,
"loss": 3.5383,
"step": 28000
},
{
"epoch": 1.8227168073676132,
"grad_norm": 0.72265625,
"learning_rate": 0.0009635456638526478,
"loss": 3.5396,
"step": 28500
},
{
"epoch": 1.8546942952161678,
"grad_norm": 0.71875,
"learning_rate": 0.0009629061140956766,
"loss": 3.5381,
"step": 29000
},
{
"epoch": 1.8866717830647224,
"grad_norm": 0.8828125,
"learning_rate": 0.0009622665643387055,
"loss": 3.538,
"step": 29500
},
{
"epoch": 1.918649270913277,
"grad_norm": 0.71875,
"learning_rate": 0.0009616270145817345,
"loss": 3.5374,
"step": 30000
},
{
"epoch": 1.9506267587618318,
"grad_norm": 0.6328125,
"learning_rate": 0.0009609874648247634,
"loss": 3.536,
"step": 30500
},
{
"epoch": 1.9826042466103861,
"grad_norm": 0.92578125,
"learning_rate": 0.0009603479150677923,
"loss": 3.5373,
"step": 31000
},
{
"epoch": 2.0,
"eval_loss": 3.534449815750122,
"eval_runtime": 0.9864,
"eval_samples_per_second": 506.888,
"eval_steps_per_second": 8.11,
"step": 31272
},
{
"epoch": 2.014581734458941,
"grad_norm": 1.1953125,
"learning_rate": 0.0009597083653108212,
"loss": 3.5387,
"step": 31500
},
{
"epoch": 2.0465592223074953,
"grad_norm": 0.78125,
"learning_rate": 0.00095906881555385,
"loss": 3.5357,
"step": 32000
},
{
"epoch": 2.07853671015605,
"grad_norm": 0.76171875,
"learning_rate": 0.0009584292657968791,
"loss": 3.5349,
"step": 32500
},
{
"epoch": 2.110514198004605,
"grad_norm": 0.57421875,
"learning_rate": 0.0009577897160399079,
"loss": 3.5316,
"step": 33000
},
{
"epoch": 2.1424916858531593,
"grad_norm": 0.66796875,
"learning_rate": 0.0009571501662829368,
"loss": 3.5354,
"step": 33500
},
{
"epoch": 2.174469173701714,
"grad_norm": 0.625,
"learning_rate": 0.0009565106165259657,
"loss": 3.5308,
"step": 34000
},
{
"epoch": 2.2064466615502685,
"grad_norm": 1.140625,
"learning_rate": 0.0009558710667689946,
"loss": 3.53,
"step": 34500
},
{
"epoch": 2.2384241493988233,
"grad_norm": 0.58984375,
"learning_rate": 0.0009552315170120236,
"loss": 3.5282,
"step": 35000
},
{
"epoch": 2.270401637247378,
"grad_norm": 0.6640625,
"learning_rate": 0.0009545919672550525,
"loss": 3.5304,
"step": 35500
},
{
"epoch": 2.3023791250959325,
"grad_norm": 1.53125,
"learning_rate": 0.0009539524174980813,
"loss": 3.5317,
"step": 36000
},
{
"epoch": 2.3343566129444873,
"grad_norm": 1.0390625,
"learning_rate": 0.0009533128677411103,
"loss": 3.539,
"step": 36500
},
{
"epoch": 2.3663341007930416,
"grad_norm": 1.15625,
"learning_rate": 0.0009526733179841391,
"loss": 3.5338,
"step": 37000
},
{
"epoch": 2.3983115886415964,
"grad_norm": 10.25,
"learning_rate": 0.0009520337682271681,
"loss": 3.5336,
"step": 37500
},
{
"epoch": 2.430289076490151,
"grad_norm": 0.6328125,
"learning_rate": 0.0009513942184701971,
"loss": 3.5343,
"step": 38000
},
{
"epoch": 2.4622665643387056,
"grad_norm": 0.80859375,
"learning_rate": 0.0009507546687132259,
"loss": 3.5305,
"step": 38500
},
{
"epoch": 2.49424405218726,
"grad_norm": 0.828125,
"learning_rate": 0.0009501151189562548,
"loss": 3.5314,
"step": 39000
},
{
"epoch": 2.526221540035815,
"grad_norm": 0.69140625,
"learning_rate": 0.0009494755691992837,
"loss": 3.5328,
"step": 39500
},
{
"epoch": 2.5581990278843696,
"grad_norm": 0.83203125,
"learning_rate": 0.0009488360194423127,
"loss": 3.532,
"step": 40000
},
{
"epoch": 2.590176515732924,
"grad_norm": 0.84375,
"learning_rate": 0.0009481964696853416,
"loss": 3.5302,
"step": 40500
},
{
"epoch": 2.6221540035814788,
"grad_norm": 0.7890625,
"learning_rate": 0.0009475569199283705,
"loss": 3.529,
"step": 41000
},
{
"epoch": 2.654131491430033,
"grad_norm": 0.859375,
"learning_rate": 0.0009469173701713993,
"loss": 3.5312,
"step": 41500
},
{
"epoch": 2.686108979278588,
"grad_norm": 0.92578125,
"learning_rate": 0.0009462778204144283,
"loss": 3.5327,
"step": 42000
},
{
"epoch": 2.7180864671271427,
"grad_norm": 0.8125,
"learning_rate": 0.0009456382706574572,
"loss": 3.5339,
"step": 42500
},
{
"epoch": 2.750063954975697,
"grad_norm": 1.0546875,
"learning_rate": 0.0009449987209004861,
"loss": 3.5341,
"step": 43000
},
{
"epoch": 2.7820414428242515,
"grad_norm": 0.6953125,
"learning_rate": 0.000944359171143515,
"loss": 3.5348,
"step": 43500
},
{
"epoch": 2.8140189306728063,
"grad_norm": 0.953125,
"learning_rate": 0.0009437196213865439,
"loss": 3.534,
"step": 44000
},
{
"epoch": 2.845996418521361,
"grad_norm": 0.6875,
"learning_rate": 0.0009430800716295727,
"loss": 3.5337,
"step": 44500
},
{
"epoch": 2.8779739063699155,
"grad_norm": 0.9296875,
"learning_rate": 0.0009424405218726017,
"loss": 3.5319,
"step": 45000
},
{
"epoch": 2.9099513942184703,
"grad_norm": 1.3984375,
"learning_rate": 0.0009418009721156306,
"loss": 3.5303,
"step": 45500
},
{
"epoch": 2.9419288820670246,
"grad_norm": 0.74609375,
"learning_rate": 0.0009411614223586596,
"loss": 3.5294,
"step": 46000
},
{
"epoch": 2.9739063699155794,
"grad_norm": 1.03125,
"learning_rate": 0.0009405218726016884,
"loss": 3.5279,
"step": 46500
},
{
"epoch": 3.0,
"eval_loss": 3.5263726711273193,
"eval_runtime": 0.9849,
"eval_samples_per_second": 507.644,
"eval_steps_per_second": 8.122,
"step": 46908
},
{
"epoch": 3.0058838577641342,
"grad_norm": 1.515625,
"learning_rate": 0.0009398823228447173,
"loss": 3.5273,
"step": 47000
},
{
"epoch": 3.0378613456126886,
"grad_norm": 0.828125,
"learning_rate": 0.0009392427730877462,
"loss": 3.5272,
"step": 47500
},
{
"epoch": 3.0698388334612434,
"grad_norm": 1.109375,
"learning_rate": 0.0009386032233307752,
"loss": 3.5225,
"step": 48000
},
{
"epoch": 3.101816321309798,
"grad_norm": 0.8828125,
"learning_rate": 0.000937963673573804,
"loss": 3.5254,
"step": 48500
},
{
"epoch": 3.1337938091583526,
"grad_norm": 0.6796875,
"learning_rate": 0.000937324123816833,
"loss": 3.526,
"step": 49000
},
{
"epoch": 3.165771297006907,
"grad_norm": 1.3046875,
"learning_rate": 0.0009366845740598618,
"loss": 3.5281,
"step": 49500
},
{
"epoch": 3.1977487848554618,
"grad_norm": 2.28125,
"learning_rate": 0.0009360450243028907,
"loss": 3.5243,
"step": 50000
},
{
"epoch": 3.2297262727040166,
"grad_norm": 0.59375,
"learning_rate": 0.0009354054745459198,
"loss": 3.5223,
"step": 50500
},
{
"epoch": 3.261703760552571,
"grad_norm": 1.4921875,
"learning_rate": 0.0009347659247889486,
"loss": 3.5238,
"step": 51000
},
{
"epoch": 3.2936812484011257,
"grad_norm": 2.703125,
"learning_rate": 0.0009341263750319776,
"loss": 3.5231,
"step": 51500
},
{
"epoch": 3.32565873624968,
"grad_norm": 0.6796875,
"learning_rate": 0.0009334868252750064,
"loss": 3.5234,
"step": 52000
},
{
"epoch": 3.357636224098235,
"grad_norm": 5.34375,
"learning_rate": 0.0009328472755180352,
"loss": 3.5238,
"step": 52500
},
{
"epoch": 3.3896137119467893,
"grad_norm": 0.82421875,
"learning_rate": 0.0009322077257610643,
"loss": 3.5205,
"step": 53000
},
{
"epoch": 3.421591199795344,
"grad_norm": 0.6015625,
"learning_rate": 0.0009315681760040932,
"loss": 3.5206,
"step": 53500
},
{
"epoch": 3.453568687643899,
"grad_norm": 0.70703125,
"learning_rate": 0.000930928626247122,
"loss": 3.5225,
"step": 54000
},
{
"epoch": 3.4855461754924533,
"grad_norm": 0.578125,
"learning_rate": 0.000930289076490151,
"loss": 3.5223,
"step": 54500
},
{
"epoch": 3.517523663341008,
"grad_norm": 0.671875,
"learning_rate": 0.0009296495267331798,
"loss": 3.5215,
"step": 55000
},
{
"epoch": 3.5495011511895624,
"grad_norm": 0.65625,
"learning_rate": 0.0009290099769762088,
"loss": 3.5224,
"step": 55500
},
{
"epoch": 3.5814786390381173,
"grad_norm": 0.55859375,
"learning_rate": 0.0009283704272192377,
"loss": 3.5194,
"step": 56000
},
{
"epoch": 3.613456126886672,
"grad_norm": 2.8125,
"learning_rate": 0.0009277308774622666,
"loss": 3.5203,
"step": 56500
},
{
"epoch": 3.6454336147352264,
"grad_norm": 0.74609375,
"learning_rate": 0.0009270913277052955,
"loss": 3.5185,
"step": 57000
},
{
"epoch": 3.677411102583781,
"grad_norm": 0.6484375,
"learning_rate": 0.0009264517779483244,
"loss": 3.5188,
"step": 57500
},
{
"epoch": 3.7093885904323356,
"grad_norm": 0.92578125,
"learning_rate": 0.0009258122281913533,
"loss": 3.5196,
"step": 58000
},
{
"epoch": 3.7413660782808904,
"grad_norm": 0.75390625,
"learning_rate": 0.0009251726784343823,
"loss": 3.5209,
"step": 58500
},
{
"epoch": 3.7733435661294448,
"grad_norm": 0.76171875,
"learning_rate": 0.0009245331286774111,
"loss": 3.5187,
"step": 59000
},
{
"epoch": 3.8053210539779996,
"grad_norm": 0.51953125,
"learning_rate": 0.00092389357892044,
"loss": 3.5179,
"step": 59500
},
{
"epoch": 3.837298541826554,
"grad_norm": 2.390625,
"learning_rate": 0.0009232540291634689,
"loss": 3.5177,
"step": 60000
},
{
"epoch": 3.8692760296751088,
"grad_norm": 0.62890625,
"learning_rate": 0.0009226144794064979,
"loss": 3.5172,
"step": 60500
},
{
"epoch": 3.9012535175236636,
"grad_norm": 0.50390625,
"learning_rate": 0.0009219749296495267,
"loss": 3.5165,
"step": 61000
},
{
"epoch": 3.933231005372218,
"grad_norm": 0.66796875,
"learning_rate": 0.0009213353798925557,
"loss": 3.5168,
"step": 61500
},
{
"epoch": 3.9652084932207727,
"grad_norm": 0.62109375,
"learning_rate": 0.0009206958301355845,
"loss": 3.5154,
"step": 62000
},
{
"epoch": 3.997185981069327,
"grad_norm": 0.71875,
"learning_rate": 0.0009200562803786135,
"loss": 3.5154,
"step": 62500
},
{
"epoch": 4.0,
"eval_loss": 3.513375997543335,
"eval_runtime": 0.9646,
"eval_samples_per_second": 518.354,
"eval_steps_per_second": 8.294,
"step": 62544
},
{
"epoch": 4.029163468917882,
"grad_norm": 1.4140625,
"learning_rate": 0.0009194167306216423,
"loss": 3.5164,
"step": 63000
},
{
"epoch": 4.061140956766437,
"grad_norm": 0.95703125,
"learning_rate": 0.0009187771808646713,
"loss": 3.5138,
"step": 63500
},
{
"epoch": 4.093118444614991,
"grad_norm": 1.4375,
"learning_rate": 0.0009181376311077002,
"loss": 3.5159,
"step": 64000
},
{
"epoch": 4.1250959324635454,
"grad_norm": 1.0859375,
"learning_rate": 0.0009174980813507291,
"loss": 3.5177,
"step": 64500
},
{
"epoch": 4.1570734203121,
"grad_norm": 0.8359375,
"learning_rate": 0.0009168585315937579,
"loss": 3.5162,
"step": 65000
},
{
"epoch": 4.189050908160655,
"grad_norm": 0.5859375,
"learning_rate": 0.0009162189818367869,
"loss": 3.5116,
"step": 65500
},
{
"epoch": 4.22102839600921,
"grad_norm": 0.69140625,
"learning_rate": 0.0009155794320798159,
"loss": 3.5164,
"step": 66000
},
{
"epoch": 4.253005883857764,
"grad_norm": 0.7109375,
"learning_rate": 0.0009149398823228447,
"loss": 3.5149,
"step": 66500
},
{
"epoch": 4.284983371706319,
"grad_norm": 1.2890625,
"learning_rate": 0.0009143003325658737,
"loss": 3.5146,
"step": 67000
},
{
"epoch": 4.316960859554873,
"grad_norm": 0.62109375,
"learning_rate": 0.0009136607828089025,
"loss": 3.516,
"step": 67500
},
{
"epoch": 4.348938347403428,
"grad_norm": 0.6796875,
"learning_rate": 0.0009130212330519315,
"loss": 3.5157,
"step": 68000
},
{
"epoch": 4.380915835251983,
"grad_norm": 0.76953125,
"learning_rate": 0.0009123816832949604,
"loss": 3.517,
"step": 68500
},
{
"epoch": 4.412893323100537,
"grad_norm": 0.75390625,
"learning_rate": 0.0009117421335379893,
"loss": 3.5162,
"step": 69000
},
{
"epoch": 4.444870810949092,
"grad_norm": 1.453125,
"learning_rate": 0.0009111025837810182,
"loss": 3.5148,
"step": 69500
},
{
"epoch": 4.476848298797647,
"grad_norm": 1.2890625,
"learning_rate": 0.0009104630340240471,
"loss": 3.5151,
"step": 70000
},
{
"epoch": 4.508825786646201,
"grad_norm": 0.55859375,
"learning_rate": 0.0009098234842670759,
"loss": 3.5128,
"step": 70500
},
{
"epoch": 4.540803274494756,
"grad_norm": 0.58984375,
"learning_rate": 0.000909183934510105,
"loss": 3.5119,
"step": 71000
},
{
"epoch": 4.57278076234331,
"grad_norm": 0.56640625,
"learning_rate": 0.0009085443847531338,
"loss": 3.5123,
"step": 71500
},
{
"epoch": 4.604758250191865,
"grad_norm": 0.64453125,
"learning_rate": 0.0009079048349961627,
"loss": 3.5135,
"step": 72000
},
{
"epoch": 4.63673573804042,
"grad_norm": 1.9609375,
"learning_rate": 0.0009072652852391916,
"loss": 3.513,
"step": 72500
},
{
"epoch": 4.6687132258889745,
"grad_norm": 0.64453125,
"learning_rate": 0.0009066257354822205,
"loss": 3.5138,
"step": 73000
},
{
"epoch": 4.7006907137375284,
"grad_norm": 0.640625,
"learning_rate": 0.0009059861857252495,
"loss": 3.5112,
"step": 73500
},
{
"epoch": 4.732668201586083,
"grad_norm": 0.6484375,
"learning_rate": 0.0009053466359682784,
"loss": 3.5115,
"step": 74000
},
{
"epoch": 4.764645689434638,
"grad_norm": 0.8359375,
"learning_rate": 0.0009047070862113072,
"loss": 3.5139,
"step": 74500
},
{
"epoch": 4.796623177283193,
"grad_norm": 0.625,
"learning_rate": 0.0009040675364543362,
"loss": 3.5119,
"step": 75000
},
{
"epoch": 4.828600665131747,
"grad_norm": 0.60546875,
"learning_rate": 0.000903427986697365,
"loss": 3.5139,
"step": 75500
},
{
"epoch": 4.860578152980302,
"grad_norm": 1.2734375,
"learning_rate": 0.000902788436940394,
"loss": 3.5123,
"step": 76000
},
{
"epoch": 4.892555640828856,
"grad_norm": 0.9921875,
"learning_rate": 0.0009021488871834229,
"loss": 3.511,
"step": 76500
},
{
"epoch": 4.924533128677411,
"grad_norm": 0.5859375,
"learning_rate": 0.0009015093374264518,
"loss": 3.508,
"step": 77000
},
{
"epoch": 4.956510616525966,
"grad_norm": 0.578125,
"learning_rate": 0.0009008697876694807,
"loss": 3.5104,
"step": 77500
},
{
"epoch": 4.98848810437452,
"grad_norm": 0.640625,
"learning_rate": 0.0009002302379125096,
"loss": 3.5114,
"step": 78000
},
{
"epoch": 5.0,
"eval_loss": 3.5113115310668945,
"eval_runtime": 0.9623,
"eval_samples_per_second": 519.58,
"eval_steps_per_second": 8.313,
"step": 78180
},
{
"epoch": 5.020465592223075,
"grad_norm": 2.15625,
"learning_rate": 0.0008995906881555385,
"loss": 3.5083,
"step": 78500
},
{
"epoch": 5.05244308007163,
"grad_norm": 1.0234375,
"learning_rate": 0.0008989511383985675,
"loss": 3.5109,
"step": 79000
},
{
"epoch": 5.084420567920184,
"grad_norm": 0.609375,
"learning_rate": 0.0008983115886415963,
"loss": 3.5096,
"step": 79500
},
{
"epoch": 5.116398055768739,
"grad_norm": 0.73046875,
"learning_rate": 0.0008976720388846252,
"loss": 3.5078,
"step": 80000
},
{
"epoch": 5.148375543617293,
"grad_norm": 0.765625,
"learning_rate": 0.0008970324891276542,
"loss": 3.5083,
"step": 80500
},
{
"epoch": 5.180353031465848,
"grad_norm": 1.0546875,
"learning_rate": 0.000896392939370683,
"loss": 3.5109,
"step": 81000
},
{
"epoch": 5.212330519314403,
"grad_norm": 0.609375,
"learning_rate": 0.000895753389613712,
"loss": 3.5103,
"step": 81500
},
{
"epoch": 5.2443080071629575,
"grad_norm": 2.71875,
"learning_rate": 0.0008951138398567409,
"loss": 3.5079,
"step": 82000
},
{
"epoch": 5.276285495011512,
"grad_norm": 0.875,
"learning_rate": 0.0008944742900997698,
"loss": 3.5097,
"step": 82500
},
{
"epoch": 5.308262982860066,
"grad_norm": 1.109375,
"learning_rate": 0.0008938347403427987,
"loss": 3.5115,
"step": 83000
},
{
"epoch": 5.340240470708621,
"grad_norm": 0.81640625,
"learning_rate": 0.0008931951905858276,
"loss": 3.5081,
"step": 83500
},
{
"epoch": 5.372217958557176,
"grad_norm": 1.03125,
"learning_rate": 0.0008925556408288565,
"loss": 3.5103,
"step": 84000
},
{
"epoch": 5.404195446405731,
"grad_norm": 0.76171875,
"learning_rate": 0.0008919160910718855,
"loss": 3.5107,
"step": 84500
},
{
"epoch": 5.436172934254285,
"grad_norm": 0.59375,
"learning_rate": 0.0008912765413149143,
"loss": 3.5097,
"step": 85000
},
{
"epoch": 5.468150422102839,
"grad_norm": 0.73828125,
"learning_rate": 0.0008906369915579432,
"loss": 3.5103,
"step": 85500
},
{
"epoch": 5.500127909951394,
"grad_norm": 0.921875,
"learning_rate": 0.0008899974418009721,
"loss": 3.5106,
"step": 86000
},
{
"epoch": 5.532105397799949,
"grad_norm": 0.69921875,
"learning_rate": 0.0008893578920440011,
"loss": 3.5097,
"step": 86500
},
{
"epoch": 5.564082885648504,
"grad_norm": 0.88671875,
"learning_rate": 0.0008887183422870299,
"loss": 3.5118,
"step": 87000
},
{
"epoch": 5.596060373497058,
"grad_norm": 3.484375,
"learning_rate": 0.0008880787925300589,
"loss": 3.5083,
"step": 87500
},
{
"epoch": 5.628037861345613,
"grad_norm": 0.91015625,
"learning_rate": 0.0008874392427730877,
"loss": 3.5088,
"step": 88000
},
{
"epoch": 5.660015349194167,
"grad_norm": 0.6875,
"learning_rate": 0.0008867996930161167,
"loss": 3.5076,
"step": 88500
},
{
"epoch": 5.691992837042722,
"grad_norm": 2.0,
"learning_rate": 0.0008861601432591456,
"loss": 3.5096,
"step": 89000
},
{
"epoch": 5.723970324891276,
"grad_norm": 0.73828125,
"learning_rate": 0.0008855205935021745,
"loss": 3.5072,
"step": 89500
},
{
"epoch": 5.755947812739831,
"grad_norm": 0.9140625,
"learning_rate": 0.0008848810437452034,
"loss": 3.5087,
"step": 90000
},
{
"epoch": 5.787925300588386,
"grad_norm": 0.8671875,
"learning_rate": 0.0008842414939882323,
"loss": 3.5106,
"step": 90500
},
{
"epoch": 5.8199027884369405,
"grad_norm": 0.62109375,
"learning_rate": 0.0008836019442312611,
"loss": 3.5089,
"step": 91000
},
{
"epoch": 5.851880276285495,
"grad_norm": 1.6015625,
"learning_rate": 0.0008829623944742902,
"loss": 3.5094,
"step": 91500
},
{
"epoch": 5.883857764134049,
"grad_norm": 0.82421875,
"learning_rate": 0.000882322844717319,
"loss": 3.5058,
"step": 92000
},
{
"epoch": 5.915835251982604,
"grad_norm": 0.79296875,
"learning_rate": 0.0008816832949603479,
"loss": 3.5074,
"step": 92500
},
{
"epoch": 5.947812739831159,
"grad_norm": 0.71484375,
"learning_rate": 0.0008810437452033768,
"loss": 3.5082,
"step": 93000
},
{
"epoch": 5.979790227679714,
"grad_norm": 0.6171875,
"learning_rate": 0.0008804041954464057,
"loss": 3.5053,
"step": 93500
},
{
"epoch": 6.0,
"eval_loss": 3.4989702701568604,
"eval_runtime": 0.9725,
"eval_samples_per_second": 514.13,
"eval_steps_per_second": 8.226,
"step": 93816
},
{
"epoch": 6.0117677155282685,
"grad_norm": 2.03125,
"learning_rate": 0.0008797646456894348,
"loss": 3.5061,
"step": 94000
},
{
"epoch": 6.043745203376822,
"grad_norm": 0.765625,
"learning_rate": 0.0008791250959324636,
"loss": 3.5057,
"step": 94500
},
{
"epoch": 6.075722691225377,
"grad_norm": 1.34375,
"learning_rate": 0.0008784855461754924,
"loss": 3.5033,
"step": 95000
},
{
"epoch": 6.107700179073932,
"grad_norm": 0.67578125,
"learning_rate": 0.0008778459964185214,
"loss": 3.5074,
"step": 95500
},
{
"epoch": 6.139677666922487,
"grad_norm": 0.6953125,
"learning_rate": 0.0008772064466615503,
"loss": 3.5034,
"step": 96000
},
{
"epoch": 6.171655154771041,
"grad_norm": 0.7109375,
"learning_rate": 0.0008765668969045792,
"loss": 3.5081,
"step": 96500
},
{
"epoch": 6.203632642619596,
"grad_norm": 0.80078125,
"learning_rate": 0.0008759273471476082,
"loss": 3.5064,
"step": 97000
},
{
"epoch": 6.23561013046815,
"grad_norm": 0.59765625,
"learning_rate": 0.000875287797390637,
"loss": 3.5043,
"step": 97500
},
{
"epoch": 6.267587618316705,
"grad_norm": 0.6328125,
"learning_rate": 0.0008746482476336659,
"loss": 3.5044,
"step": 98000
},
{
"epoch": 6.29956510616526,
"grad_norm": 0.625,
"learning_rate": 0.0008740086978766948,
"loss": 3.506,
"step": 98500
},
{
"epoch": 6.331542594013814,
"grad_norm": 0.61328125,
"learning_rate": 0.0008733691481197237,
"loss": 3.5043,
"step": 99000
},
{
"epoch": 6.363520081862369,
"grad_norm": 0.7109375,
"learning_rate": 0.0008727295983627527,
"loss": 3.5033,
"step": 99500
},
{
"epoch": 6.3954975697109235,
"grad_norm": 2.390625,
"learning_rate": 0.0008720900486057816,
"loss": 3.5044,
"step": 100000
},
{
"epoch": 6.427475057559478,
"grad_norm": 0.7734375,
"learning_rate": 0.0008714504988488104,
"loss": 3.5024,
"step": 100500
},
{
"epoch": 6.459452545408033,
"grad_norm": 0.80078125,
"learning_rate": 0.0008708109490918394,
"loss": 3.504,
"step": 101000
},
{
"epoch": 6.491430033256587,
"grad_norm": 1.0859375,
"learning_rate": 0.0008701713993348682,
"loss": 3.504,
"step": 101500
},
{
"epoch": 6.523407521105142,
"grad_norm": 0.5703125,
"learning_rate": 0.0008695318495778972,
"loss": 3.5042,
"step": 102000
},
{
"epoch": 6.555385008953697,
"grad_norm": 0.7265625,
"learning_rate": 0.0008688922998209261,
"loss": 3.5023,
"step": 102500
},
{
"epoch": 6.5873624968022515,
"grad_norm": 0.62109375,
"learning_rate": 0.000868252750063955,
"loss": 3.5011,
"step": 103000
},
{
"epoch": 6.619339984650805,
"grad_norm": 0.859375,
"learning_rate": 0.0008676132003069838,
"loss": 3.5034,
"step": 103500
},
{
"epoch": 6.65131747249936,
"grad_norm": 0.53515625,
"learning_rate": 0.0008669736505500128,
"loss": 3.5035,
"step": 104000
},
{
"epoch": 6.683294960347915,
"grad_norm": 0.65625,
"learning_rate": 0.0008663341007930417,
"loss": 3.5028,
"step": 104500
},
{
"epoch": 6.71527244819647,
"grad_norm": 0.55078125,
"learning_rate": 0.0008656945510360707,
"loss": 3.5036,
"step": 105000
},
{
"epoch": 6.747249936045025,
"grad_norm": 0.9453125,
"learning_rate": 0.0008650550012790995,
"loss": 3.501,
"step": 105500
},
{
"epoch": 6.779227423893579,
"grad_norm": 5.25,
"learning_rate": 0.0008644154515221284,
"loss": 3.5032,
"step": 106000
},
{
"epoch": 6.811204911742133,
"grad_norm": 0.87109375,
"learning_rate": 0.0008637759017651573,
"loss": 3.5024,
"step": 106500
},
{
"epoch": 6.843182399590688,
"grad_norm": 0.609375,
"learning_rate": 0.0008631363520081863,
"loss": 3.5,
"step": 107000
},
{
"epoch": 6.875159887439243,
"grad_norm": 0.8203125,
"learning_rate": 0.0008624968022512151,
"loss": 3.5005,
"step": 107500
},
{
"epoch": 6.907137375287798,
"grad_norm": 0.5703125,
"learning_rate": 0.0008618572524942441,
"loss": 3.5035,
"step": 108000
},
{
"epoch": 6.939114863136352,
"grad_norm": 0.8671875,
"learning_rate": 0.0008612177027372729,
"loss": 3.4973,
"step": 108500
},
{
"epoch": 6.9710923509849065,
"grad_norm": 0.62890625,
"learning_rate": 0.0008605781529803018,
"loss": 3.5003,
"step": 109000
},
{
"epoch": 7.0,
"eval_loss": 3.5032992362976074,
"eval_runtime": 0.9812,
"eval_samples_per_second": 509.604,
"eval_steps_per_second": 8.154,
"step": 109452
},
{
"epoch": 7.003069838833461,
"grad_norm": 0.69140625,
"learning_rate": 0.0008599386032233309,
"loss": 3.4981,
"step": 109500
},
{
"epoch": 7.035047326682016,
"grad_norm": 0.65625,
"learning_rate": 0.0008592990534663597,
"loss": 3.4997,
"step": 110000
},
{
"epoch": 7.06702481453057,
"grad_norm": 0.6171875,
"learning_rate": 0.0008586595037093887,
"loss": 3.4987,
"step": 110500
},
{
"epoch": 7.099002302379125,
"grad_norm": 0.62109375,
"learning_rate": 0.0008580199539524175,
"loss": 3.4988,
"step": 111000
},
{
"epoch": 7.13097979022768,
"grad_norm": 1.0078125,
"learning_rate": 0.0008573804041954464,
"loss": 3.4997,
"step": 111500
},
{
"epoch": 7.1629572780762345,
"grad_norm": 0.72265625,
"learning_rate": 0.0008567408544384754,
"loss": 3.4968,
"step": 112000
},
{
"epoch": 7.194934765924789,
"grad_norm": 0.79296875,
"learning_rate": 0.0008561013046815043,
"loss": 3.5001,
"step": 112500
},
{
"epoch": 7.226912253773343,
"grad_norm": 0.86328125,
"learning_rate": 0.0008554617549245331,
"loss": 3.4974,
"step": 113000
},
{
"epoch": 7.258889741621898,
"grad_norm": 0.625,
"learning_rate": 0.0008548222051675621,
"loss": 3.5015,
"step": 113500
},
{
"epoch": 7.290867229470453,
"grad_norm": 0.828125,
"learning_rate": 0.0008541826554105909,
"loss": 3.5029,
"step": 114000
},
{
"epoch": 7.322844717319008,
"grad_norm": 1.421875,
"learning_rate": 0.0008535431056536199,
"loss": 3.503,
"step": 114500
},
{
"epoch": 7.354822205167562,
"grad_norm": 0.66796875,
"learning_rate": 0.0008529035558966488,
"loss": 3.5029,
"step": 115000
},
{
"epoch": 7.386799693016116,
"grad_norm": 0.61328125,
"learning_rate": 0.0008522640061396777,
"loss": 3.5035,
"step": 115500
},
{
"epoch": 7.418777180864671,
"grad_norm": 0.859375,
"learning_rate": 0.0008516244563827066,
"loss": 3.5042,
"step": 116000
},
{
"epoch": 7.450754668713226,
"grad_norm": 0.7421875,
"learning_rate": 0.0008509849066257355,
"loss": 3.5029,
"step": 116500
},
{
"epoch": 7.482732156561781,
"grad_norm": 0.734375,
"learning_rate": 0.0008503453568687643,
"loss": 3.5016,
"step": 117000
},
{
"epoch": 7.514709644410335,
"grad_norm": 1.1484375,
"learning_rate": 0.0008497058071117934,
"loss": 3.502,
"step": 117500
},
{
"epoch": 7.5466871322588895,
"grad_norm": 2.03125,
"learning_rate": 0.0008490662573548222,
"loss": 3.5032,
"step": 118000
},
{
"epoch": 7.578664620107444,
"grad_norm": 1.1796875,
"learning_rate": 0.0008484267075978511,
"loss": 3.5029,
"step": 118500
},
{
"epoch": 7.610642107955999,
"grad_norm": 0.9765625,
"learning_rate": 0.00084778715784088,
"loss": 3.502,
"step": 119000
},
{
"epoch": 7.642619595804554,
"grad_norm": 1.5390625,
"learning_rate": 0.0008471476080839089,
"loss": 3.5041,
"step": 119500
},
{
"epoch": 7.674597083653108,
"grad_norm": 0.703125,
"learning_rate": 0.0008465080583269378,
"loss": 3.5019,
"step": 120000
},
{
"epoch": 7.706574571501663,
"grad_norm": 0.69921875,
"learning_rate": 0.0008458685085699668,
"loss": 3.503,
"step": 120500
},
{
"epoch": 7.7385520593502175,
"grad_norm": 0.70703125,
"learning_rate": 0.0008452289588129956,
"loss": 3.5006,
"step": 121000
},
{
"epoch": 7.770529547198772,
"grad_norm": 0.79296875,
"learning_rate": 0.0008445894090560246,
"loss": 3.5006,
"step": 121500
},
{
"epoch": 7.802507035047327,
"grad_norm": 0.76171875,
"learning_rate": 0.0008439498592990534,
"loss": 3.4993,
"step": 122000
},
{
"epoch": 7.834484522895881,
"grad_norm": 1.171875,
"learning_rate": 0.0008433103095420824,
"loss": 3.5005,
"step": 122500
},
{
"epoch": 7.866462010744436,
"grad_norm": 2.921875,
"learning_rate": 0.0008426707597851114,
"loss": 3.499,
"step": 123000
},
{
"epoch": 7.898439498592991,
"grad_norm": 0.82421875,
"learning_rate": 0.0008420312100281402,
"loss": 3.4992,
"step": 123500
},
{
"epoch": 7.9304169864415455,
"grad_norm": 1.6640625,
"learning_rate": 0.000841391660271169,
"loss": 3.4957,
"step": 124000
},
{
"epoch": 7.962394474290099,
"grad_norm": 0.8359375,
"learning_rate": 0.000840752110514198,
"loss": 3.4988,
"step": 124500
},
{
"epoch": 7.994371962138654,
"grad_norm": 0.64453125,
"learning_rate": 0.000840112560757227,
"loss": 3.5007,
"step": 125000
},
{
"epoch": 8.0,
"eval_loss": 3.4883244037628174,
"eval_runtime": 0.9846,
"eval_samples_per_second": 507.843,
"eval_steps_per_second": 8.125,
"step": 125088
},
{
"epoch": 8.026349449987208,
"grad_norm": 0.90234375,
"learning_rate": 0.0008394730110002559,
"loss": 3.502,
"step": 125500
},
{
"epoch": 8.058326937835764,
"grad_norm": 0.734375,
"learning_rate": 0.0008388334612432848,
"loss": 3.5028,
"step": 126000
},
{
"epoch": 8.090304425684318,
"grad_norm": 0.63671875,
"learning_rate": 0.0008381939114863136,
"loss": 3.4989,
"step": 126500
},
{
"epoch": 8.122281913532873,
"grad_norm": 0.65234375,
"learning_rate": 0.0008375543617293426,
"loss": 3.4991,
"step": 127000
},
{
"epoch": 8.154259401381427,
"grad_norm": 0.79296875,
"learning_rate": 0.0008369148119723715,
"loss": 3.4993,
"step": 127500
},
{
"epoch": 8.186236889229981,
"grad_norm": 0.76171875,
"learning_rate": 0.0008362752622154004,
"loss": 3.4974,
"step": 128000
},
{
"epoch": 8.218214377078537,
"grad_norm": 0.69921875,
"learning_rate": 0.0008356357124584293,
"loss": 3.5004,
"step": 128500
},
{
"epoch": 8.250191864927091,
"grad_norm": 0.640625,
"learning_rate": 0.0008349961627014582,
"loss": 3.4979,
"step": 129000
},
{
"epoch": 8.282169352775647,
"grad_norm": 0.796875,
"learning_rate": 0.000834356612944487,
"loss": 3.497,
"step": 129500
},
{
"epoch": 8.3141468406242,
"grad_norm": 1.0,
"learning_rate": 0.0008337170631875161,
"loss": 3.4987,
"step": 130000
},
{
"epoch": 8.346124328472754,
"grad_norm": 0.62890625,
"learning_rate": 0.0008330775134305449,
"loss": 3.4983,
"step": 130500
},
{
"epoch": 8.37810181632131,
"grad_norm": 0.80859375,
"learning_rate": 0.0008324379636735739,
"loss": 3.4975,
"step": 131000
},
{
"epoch": 8.410079304169864,
"grad_norm": 0.7734375,
"learning_rate": 0.0008317984139166027,
"loss": 3.4973,
"step": 131500
},
{
"epoch": 8.44205679201842,
"grad_norm": 0.85546875,
"learning_rate": 0.0008311588641596316,
"loss": 3.4939,
"step": 132000
},
{
"epoch": 8.474034279866974,
"grad_norm": 0.73828125,
"learning_rate": 0.0008305193144026606,
"loss": 3.4942,
"step": 132500
},
{
"epoch": 8.506011767715528,
"grad_norm": 0.87890625,
"learning_rate": 0.0008298797646456895,
"loss": 3.4958,
"step": 133000
},
{
"epoch": 8.537989255564083,
"grad_norm": 0.73046875,
"learning_rate": 0.0008292402148887183,
"loss": 3.4947,
"step": 133500
},
{
"epoch": 8.569966743412637,
"grad_norm": 0.89453125,
"learning_rate": 0.0008286006651317473,
"loss": 3.4952,
"step": 134000
},
{
"epoch": 8.601944231261193,
"grad_norm": 0.84765625,
"learning_rate": 0.0008279611153747761,
"loss": 3.496,
"step": 134500
},
{
"epoch": 8.633921719109747,
"grad_norm": 0.6171875,
"learning_rate": 0.000827321565617805,
"loss": 3.4945,
"step": 135000
},
{
"epoch": 8.6658992069583,
"grad_norm": 1.109375,
"learning_rate": 0.000826682015860834,
"loss": 3.4945,
"step": 135500
},
{
"epoch": 8.697876694806856,
"grad_norm": 0.65234375,
"learning_rate": 0.0008260424661038629,
"loss": 3.4936,
"step": 136000
},
{
"epoch": 8.72985418265541,
"grad_norm": 0.9296875,
"learning_rate": 0.0008254029163468918,
"loss": 3.4932,
"step": 136500
},
{
"epoch": 8.761831670503966,
"grad_norm": 0.70703125,
"learning_rate": 0.0008247633665899207,
"loss": 3.494,
"step": 137000
},
{
"epoch": 8.79380915835252,
"grad_norm": 0.8125,
"learning_rate": 0.0008241238168329495,
"loss": 3.4934,
"step": 137500
},
{
"epoch": 8.825786646201074,
"grad_norm": 0.62890625,
"learning_rate": 0.0008234842670759786,
"loss": 3.4927,
"step": 138000
},
{
"epoch": 8.85776413404963,
"grad_norm": 0.82421875,
"learning_rate": 0.0008228447173190075,
"loss": 3.4943,
"step": 138500
},
{
"epoch": 8.889741621898184,
"grad_norm": 1.7109375,
"learning_rate": 0.0008222051675620363,
"loss": 3.4938,
"step": 139000
},
{
"epoch": 8.92171910974674,
"grad_norm": 9.3125,
"learning_rate": 0.0008215656178050653,
"loss": 3.4917,
"step": 139500
},
{
"epoch": 8.953696597595293,
"grad_norm": 0.984375,
"learning_rate": 0.0008209260680480941,
"loss": 3.4911,
"step": 140000
},
{
"epoch": 8.985674085443847,
"grad_norm": 5.03125,
"learning_rate": 0.0008202865182911231,
"loss": 3.4934,
"step": 140500
},
{
"epoch": 9.0,
"eval_loss": 3.481646776199341,
"eval_runtime": 0.9706,
"eval_samples_per_second": 515.163,
"eval_steps_per_second": 8.243,
"step": 140724
},
{
"epoch": 9.017651573292403,
"grad_norm": 1.0390625,
"learning_rate": 0.000819646968534152,
"loss": 3.4889,
"step": 141000
},
{
"epoch": 9.049629061140957,
"grad_norm": 0.82421875,
"learning_rate": 0.0008190074187771809,
"loss": 3.4937,
"step": 141500
},
{
"epoch": 9.08160654898951,
"grad_norm": 0.86328125,
"learning_rate": 0.0008183678690202098,
"loss": 3.4951,
"step": 142000
},
{
"epoch": 9.113584036838066,
"grad_norm": 0.734375,
"learning_rate": 0.0008177283192632387,
"loss": 3.4925,
"step": 142500
},
{
"epoch": 9.14556152468662,
"grad_norm": 0.75,
"learning_rate": 0.0008170887695062676,
"loss": 3.4906,
"step": 143000
},
{
"epoch": 9.177539012535176,
"grad_norm": 0.765625,
"learning_rate": 0.0008164492197492966,
"loss": 3.4914,
"step": 143500
},
{
"epoch": 9.20951650038373,
"grad_norm": 0.875,
"learning_rate": 0.0008158096699923254,
"loss": 3.4915,
"step": 144000
},
{
"epoch": 9.241493988232284,
"grad_norm": 0.7265625,
"learning_rate": 0.0008151701202353543,
"loss": 3.4897,
"step": 144500
},
{
"epoch": 9.27347147608084,
"grad_norm": 0.66796875,
"learning_rate": 0.0008145305704783832,
"loss": 3.4909,
"step": 145000
},
{
"epoch": 9.305448963929393,
"grad_norm": 1.4296875,
"learning_rate": 0.0008138910207214122,
"loss": 3.4909,
"step": 145500
},
{
"epoch": 9.337426451777949,
"grad_norm": 1.078125,
"learning_rate": 0.000813251470964441,
"loss": 3.4903,
"step": 146000
},
{
"epoch": 9.369403939626503,
"grad_norm": 0.9296875,
"learning_rate": 0.00081261192120747,
"loss": 3.4916,
"step": 146500
},
{
"epoch": 9.401381427475057,
"grad_norm": 0.6796875,
"learning_rate": 0.0008119723714504988,
"loss": 3.4901,
"step": 147000
},
{
"epoch": 9.433358915323613,
"grad_norm": 1.484375,
"learning_rate": 0.0008113328216935278,
"loss": 3.4906,
"step": 147500
},
{
"epoch": 9.465336403172167,
"grad_norm": 0.62109375,
"learning_rate": 0.0008106932719365567,
"loss": 3.4898,
"step": 148000
},
{
"epoch": 9.497313891020722,
"grad_norm": 0.8984375,
"learning_rate": 0.0008100537221795856,
"loss": 3.4892,
"step": 148500
},
{
"epoch": 9.529291378869276,
"grad_norm": 1.3046875,
"learning_rate": 0.0008094141724226145,
"loss": 3.491,
"step": 149000
},
{
"epoch": 9.56126886671783,
"grad_norm": 0.828125,
"learning_rate": 0.0008087746226656434,
"loss": 3.4877,
"step": 149500
},
{
"epoch": 9.593246354566386,
"grad_norm": 0.62890625,
"learning_rate": 0.0008081350729086722,
"loss": 3.4894,
"step": 150000
},
{
"epoch": 9.62522384241494,
"grad_norm": 1.078125,
"learning_rate": 0.0008074955231517013,
"loss": 3.4905,
"step": 150500
},
{
"epoch": 9.657201330263494,
"grad_norm": 0.62890625,
"learning_rate": 0.0008068559733947301,
"loss": 3.4896,
"step": 151000
},
{
"epoch": 9.68917881811205,
"grad_norm": 0.6796875,
"learning_rate": 0.000806216423637759,
"loss": 3.4904,
"step": 151500
},
{
"epoch": 9.721156305960603,
"grad_norm": 0.90234375,
"learning_rate": 0.000805576873880788,
"loss": 3.4872,
"step": 152000
},
{
"epoch": 9.753133793809159,
"grad_norm": 0.70703125,
"learning_rate": 0.0008049373241238168,
"loss": 3.4888,
"step": 152500
},
{
"epoch": 9.785111281657713,
"grad_norm": 0.7890625,
"learning_rate": 0.0008042977743668458,
"loss": 3.4862,
"step": 153000
},
{
"epoch": 9.817088769506267,
"grad_norm": 0.609375,
"learning_rate": 0.0008036582246098747,
"loss": 3.488,
"step": 153500
},
{
"epoch": 9.849066257354822,
"grad_norm": 0.58984375,
"learning_rate": 0.0008030186748529036,
"loss": 3.4842,
"step": 154000
},
{
"epoch": 9.881043745203376,
"grad_norm": 0.67578125,
"learning_rate": 0.0008023791250959325,
"loss": 3.487,
"step": 154500
},
{
"epoch": 9.913021233051932,
"grad_norm": 3.9375,
"learning_rate": 0.0008017395753389614,
"loss": 3.4886,
"step": 155000
},
{
"epoch": 9.944998720900486,
"grad_norm": 1.0546875,
"learning_rate": 0.0008011000255819902,
"loss": 3.4881,
"step": 155500
},
{
"epoch": 9.97697620874904,
"grad_norm": 0.69140625,
"learning_rate": 0.0008004604758250193,
"loss": 3.4866,
"step": 156000
},
{
"epoch": 10.0,
"eval_loss": 3.46722149848938,
"eval_runtime": 0.989,
"eval_samples_per_second": 505.584,
"eval_steps_per_second": 8.089,
"step": 156360
},
{
"epoch": 10.008953696597596,
"grad_norm": 0.69921875,
"learning_rate": 0.0007998209260680481,
"loss": 3.4876,
"step": 156500
},
{
"epoch": 10.04093118444615,
"grad_norm": 0.58203125,
"learning_rate": 0.000799181376311077,
"loss": 3.4879,
"step": 157000
},
{
"epoch": 10.072908672294705,
"grad_norm": 0.7421875,
"learning_rate": 0.0007985418265541059,
"loss": 3.4854,
"step": 157500
},
{
"epoch": 10.10488616014326,
"grad_norm": 0.5625,
"learning_rate": 0.0007979022767971348,
"loss": 3.487,
"step": 158000
},
{
"epoch": 10.136863647991813,
"grad_norm": 0.921875,
"learning_rate": 0.0007972627270401638,
"loss": 3.4835,
"step": 158500
},
{
"epoch": 10.168841135840369,
"grad_norm": 0.79296875,
"learning_rate": 0.0007966231772831927,
"loss": 3.4858,
"step": 159000
},
{
"epoch": 10.200818623688923,
"grad_norm": 0.73046875,
"learning_rate": 0.0007959836275262215,
"loss": 3.4851,
"step": 159500
},
{
"epoch": 10.232796111537478,
"grad_norm": 0.7109375,
"learning_rate": 0.0007953440777692505,
"loss": 3.4866,
"step": 160000
},
{
"epoch": 10.264773599386032,
"grad_norm": 0.79296875,
"learning_rate": 0.0007947045280122793,
"loss": 3.4858,
"step": 160500
},
{
"epoch": 10.296751087234586,
"grad_norm": 0.7109375,
"learning_rate": 0.0007940649782553083,
"loss": 3.4855,
"step": 161000
},
{
"epoch": 10.328728575083142,
"grad_norm": 0.81640625,
"learning_rate": 0.0007934254284983372,
"loss": 3.4826,
"step": 161500
},
{
"epoch": 10.360706062931696,
"grad_norm": 0.734375,
"learning_rate": 0.0007927858787413661,
"loss": 3.4849,
"step": 162000
},
{
"epoch": 10.392683550780252,
"grad_norm": 1.328125,
"learning_rate": 0.0007921463289843949,
"loss": 3.4854,
"step": 162500
},
{
"epoch": 10.424661038628805,
"grad_norm": 0.64453125,
"learning_rate": 0.0007915067792274239,
"loss": 3.4854,
"step": 163000
},
{
"epoch": 10.45663852647736,
"grad_norm": 0.73046875,
"learning_rate": 0.0007908672294704528,
"loss": 3.4853,
"step": 163500
},
{
"epoch": 10.488616014325915,
"grad_norm": 0.66796875,
"learning_rate": 0.0007902276797134818,
"loss": 3.4849,
"step": 164000
},
{
"epoch": 10.520593502174469,
"grad_norm": 0.71484375,
"learning_rate": 0.0007895881299565106,
"loss": 3.4838,
"step": 164500
},
{
"epoch": 10.552570990023025,
"grad_norm": 0.53125,
"learning_rate": 0.0007889485801995395,
"loss": 3.4831,
"step": 165000
},
{
"epoch": 10.584548477871579,
"grad_norm": 0.61328125,
"learning_rate": 0.0007883090304425684,
"loss": 3.4804,
"step": 165500
},
{
"epoch": 10.616525965720133,
"grad_norm": 0.58984375,
"learning_rate": 0.0007876694806855974,
"loss": 3.4808,
"step": 166000
},
{
"epoch": 10.648503453568688,
"grad_norm": 0.59765625,
"learning_rate": 0.0007870299309286262,
"loss": 3.4827,
"step": 166500
},
{
"epoch": 10.680480941417242,
"grad_norm": 0.71484375,
"learning_rate": 0.0007863903811716552,
"loss": 3.4827,
"step": 167000
},
{
"epoch": 10.712458429265796,
"grad_norm": 0.62109375,
"learning_rate": 0.000785750831414684,
"loss": 3.4836,
"step": 167500
},
{
"epoch": 10.744435917114352,
"grad_norm": 1.2734375,
"learning_rate": 0.0007851112816577129,
"loss": 3.4826,
"step": 168000
},
{
"epoch": 10.776413404962906,
"grad_norm": 0.71484375,
"learning_rate": 0.000784471731900742,
"loss": 3.4827,
"step": 168500
},
{
"epoch": 10.808390892811461,
"grad_norm": 0.87890625,
"learning_rate": 0.0007838321821437708,
"loss": 3.4804,
"step": 169000
},
{
"epoch": 10.840368380660015,
"grad_norm": 1.65625,
"learning_rate": 0.0007831926323867998,
"loss": 3.4825,
"step": 169500
},
{
"epoch": 10.87234586850857,
"grad_norm": 0.5546875,
"learning_rate": 0.0007825530826298286,
"loss": 3.481,
"step": 170000
},
{
"epoch": 10.904323356357125,
"grad_norm": 1.6015625,
"learning_rate": 0.0007819135328728575,
"loss": 3.4815,
"step": 170500
},
{
"epoch": 10.936300844205679,
"grad_norm": 0.734375,
"learning_rate": 0.0007812739831158864,
"loss": 3.4791,
"step": 171000
},
{
"epoch": 10.968278332054235,
"grad_norm": 0.54296875,
"learning_rate": 0.0007806344333589154,
"loss": 3.4802,
"step": 171500
},
{
"epoch": 11.0,
"eval_loss": 3.4805009365081787,
"eval_runtime": 1.0013,
"eval_samples_per_second": 499.341,
"eval_steps_per_second": 7.989,
"step": 171996
},
{
"epoch": 11.000255819902788,
"grad_norm": 0.7109375,
"learning_rate": 0.0007799948836019442,
"loss": 3.4799,
"step": 172000
},
{
"epoch": 11.032233307751342,
"grad_norm": 0.72265625,
"learning_rate": 0.0007793553338449732,
"loss": 3.4792,
"step": 172500
},
{
"epoch": 11.064210795599898,
"grad_norm": 0.6796875,
"learning_rate": 0.000778715784088002,
"loss": 3.4811,
"step": 173000
},
{
"epoch": 11.096188283448452,
"grad_norm": 0.9921875,
"learning_rate": 0.0007780762343310309,
"loss": 3.4766,
"step": 173500
},
{
"epoch": 11.128165771297008,
"grad_norm": 0.63671875,
"learning_rate": 0.0007774366845740599,
"loss": 3.4787,
"step": 174000
},
{
"epoch": 11.160143259145562,
"grad_norm": 0.671875,
"learning_rate": 0.0007767971348170888,
"loss": 3.4792,
"step": 174500
},
{
"epoch": 11.192120746994116,
"grad_norm": 0.6875,
"learning_rate": 0.0007761575850601177,
"loss": 3.4763,
"step": 175000
},
{
"epoch": 11.224098234842671,
"grad_norm": 0.84375,
"learning_rate": 0.0007755180353031466,
"loss": 3.4801,
"step": 175500
},
{
"epoch": 11.256075722691225,
"grad_norm": 0.921875,
"learning_rate": 0.0007748784855461754,
"loss": 3.4787,
"step": 176000
},
{
"epoch": 11.28805321053978,
"grad_norm": 0.73046875,
"learning_rate": 0.0007742389357892045,
"loss": 3.4797,
"step": 176500
},
{
"epoch": 11.320030698388335,
"grad_norm": 0.671875,
"learning_rate": 0.0007735993860322333,
"loss": 3.4776,
"step": 177000
},
{
"epoch": 11.352008186236889,
"grad_norm": 0.96484375,
"learning_rate": 0.0007729598362752622,
"loss": 3.4802,
"step": 177500
},
{
"epoch": 11.383985674085444,
"grad_norm": 0.79296875,
"learning_rate": 0.0007723202865182911,
"loss": 3.4791,
"step": 178000
},
{
"epoch": 11.415963161933998,
"grad_norm": 1.203125,
"learning_rate": 0.00077168073676132,
"loss": 3.4779,
"step": 178500
},
{
"epoch": 11.447940649782552,
"grad_norm": 1.65625,
"learning_rate": 0.000771041187004349,
"loss": 3.4775,
"step": 179000
},
{
"epoch": 11.479918137631108,
"grad_norm": 1.0390625,
"learning_rate": 0.0007704016372473779,
"loss": 3.4776,
"step": 179500
},
{
"epoch": 11.511895625479662,
"grad_norm": 0.66015625,
"learning_rate": 0.0007697620874904067,
"loss": 3.477,
"step": 180000
},
{
"epoch": 11.543873113328218,
"grad_norm": 0.734375,
"learning_rate": 0.0007691225377334357,
"loss": 3.4773,
"step": 180500
},
{
"epoch": 11.575850601176771,
"grad_norm": 1.0,
"learning_rate": 0.0007684829879764645,
"loss": 3.4764,
"step": 181000
},
{
"epoch": 11.607828089025325,
"grad_norm": 0.75390625,
"learning_rate": 0.0007678434382194935,
"loss": 3.48,
"step": 181500
},
{
"epoch": 11.639805576873881,
"grad_norm": 0.81640625,
"learning_rate": 0.0007672038884625225,
"loss": 3.4774,
"step": 182000
},
{
"epoch": 11.671783064722435,
"grad_norm": 0.75,
"learning_rate": 0.0007665643387055513,
"loss": 3.4786,
"step": 182500
},
{
"epoch": 11.70376055257099,
"grad_norm": 1.6328125,
"learning_rate": 0.0007659247889485802,
"loss": 3.4759,
"step": 183000
},
{
"epoch": 11.735738040419545,
"grad_norm": 0.68359375,
"learning_rate": 0.0007652852391916091,
"loss": 3.4766,
"step": 183500
},
{
"epoch": 11.767715528268099,
"grad_norm": 0.828125,
"learning_rate": 0.0007646456894346381,
"loss": 3.478,
"step": 184000
},
{
"epoch": 11.799693016116654,
"grad_norm": 0.65625,
"learning_rate": 0.000764006139677667,
"loss": 3.4765,
"step": 184500
},
{
"epoch": 11.831670503965208,
"grad_norm": 0.91015625,
"learning_rate": 0.0007633665899206959,
"loss": 3.4761,
"step": 185000
},
{
"epoch": 11.863647991813764,
"grad_norm": 0.62109375,
"learning_rate": 0.0007627270401637247,
"loss": 3.4784,
"step": 185500
},
{
"epoch": 11.895625479662318,
"grad_norm": 0.71875,
"learning_rate": 0.0007620874904067537,
"loss": 3.475,
"step": 186000
},
{
"epoch": 11.927602967510872,
"grad_norm": 2.046875,
"learning_rate": 0.0007614479406497826,
"loss": 3.4743,
"step": 186500
},
{
"epoch": 11.959580455359427,
"grad_norm": 0.71875,
"learning_rate": 0.0007608083908928115,
"loss": 3.4736,
"step": 187000
},
{
"epoch": 11.991557943207981,
"grad_norm": 0.72265625,
"learning_rate": 0.0007601688411358404,
"loss": 3.4745,
"step": 187500
},
{
"epoch": 12.0,
"eval_loss": 3.4816277027130127,
"eval_runtime": 0.9682,
"eval_samples_per_second": 516.439,
"eval_steps_per_second": 8.263,
"step": 187632
},
{
"epoch": 12.023535431056537,
"grad_norm": 0.88671875,
"learning_rate": 0.0007595292913788693,
"loss": 3.4726,
"step": 188000
},
{
"epoch": 12.055512918905091,
"grad_norm": 0.625,
"learning_rate": 0.0007588897416218981,
"loss": 3.4759,
"step": 188500
},
{
"epoch": 12.087490406753645,
"grad_norm": 0.82421875,
"learning_rate": 0.0007582501918649271,
"loss": 3.4739,
"step": 189000
},
{
"epoch": 12.1194678946022,
"grad_norm": 0.7265625,
"learning_rate": 0.000757610642107956,
"loss": 3.4757,
"step": 189500
},
{
"epoch": 12.151445382450754,
"grad_norm": 0.828125,
"learning_rate": 0.000756971092350985,
"loss": 3.476,
"step": 190000
},
{
"epoch": 12.18342287029931,
"grad_norm": 0.73828125,
"learning_rate": 0.0007563315425940138,
"loss": 3.4726,
"step": 190500
},
{
"epoch": 12.215400358147864,
"grad_norm": 0.62890625,
"learning_rate": 0.0007556919928370427,
"loss": 3.4746,
"step": 191000
},
{
"epoch": 12.247377845996418,
"grad_norm": 2.890625,
"learning_rate": 0.0007550524430800716,
"loss": 3.4741,
"step": 191500
},
{
"epoch": 12.279355333844974,
"grad_norm": 1.0234375,
"learning_rate": 0.0007544128933231006,
"loss": 3.4739,
"step": 192000
},
{
"epoch": 12.311332821693528,
"grad_norm": 0.6484375,
"learning_rate": 0.0007537733435661294,
"loss": 3.4729,
"step": 192500
},
{
"epoch": 12.343310309542082,
"grad_norm": 0.69140625,
"learning_rate": 0.0007531337938091584,
"loss": 3.4719,
"step": 193000
},
{
"epoch": 12.375287797390637,
"grad_norm": 0.59375,
"learning_rate": 0.0007524942440521872,
"loss": 3.4729,
"step": 193500
},
{
"epoch": 12.407265285239191,
"grad_norm": 0.87890625,
"learning_rate": 0.0007518546942952161,
"loss": 3.4757,
"step": 194000
},
{
"epoch": 12.439242773087747,
"grad_norm": 0.94140625,
"learning_rate": 0.0007512151445382452,
"loss": 3.4758,
"step": 194500
},
{
"epoch": 12.4712202609363,
"grad_norm": 0.88671875,
"learning_rate": 0.000750575594781274,
"loss": 3.4718,
"step": 195000
},
{
"epoch": 12.503197748784855,
"grad_norm": 0.7578125,
"learning_rate": 0.000749936045024303,
"loss": 3.4721,
"step": 195500
},
{
"epoch": 12.53517523663341,
"grad_norm": 0.70703125,
"learning_rate": 0.0007492964952673318,
"loss": 3.4733,
"step": 196000
},
{
"epoch": 12.567152724481964,
"grad_norm": 0.6171875,
"learning_rate": 0.0007486569455103606,
"loss": 3.47,
"step": 196500
},
{
"epoch": 12.59913021233052,
"grad_norm": 1.28125,
"learning_rate": 0.0007480173957533897,
"loss": 3.4723,
"step": 197000
},
{
"epoch": 12.631107700179074,
"grad_norm": 0.74609375,
"learning_rate": 0.0007473778459964186,
"loss": 3.4728,
"step": 197500
},
{
"epoch": 12.663085188027628,
"grad_norm": 0.69140625,
"learning_rate": 0.0007467382962394474,
"loss": 3.4719,
"step": 198000
},
{
"epoch": 12.695062675876184,
"grad_norm": 1.0546875,
"learning_rate": 0.0007460987464824764,
"loss": 3.4715,
"step": 198500
},
{
"epoch": 12.727040163724737,
"grad_norm": 0.67578125,
"learning_rate": 0.0007454591967255052,
"loss": 3.471,
"step": 199000
},
{
"epoch": 12.759017651573293,
"grad_norm": 0.8671875,
"learning_rate": 0.0007448196469685342,
"loss": 3.4729,
"step": 199500
},
{
"epoch": 12.790995139421847,
"grad_norm": 1.6953125,
"learning_rate": 0.0007441800972115631,
"loss": 3.4712,
"step": 200000
},
{
"epoch": 12.822972627270401,
"grad_norm": 1.3203125,
"learning_rate": 0.000743540547454592,
"loss": 3.471,
"step": 200500
},
{
"epoch": 12.854950115118957,
"grad_norm": 1.0234375,
"learning_rate": 0.0007429009976976209,
"loss": 3.4736,
"step": 201000
},
{
"epoch": 12.88692760296751,
"grad_norm": 0.83203125,
"learning_rate": 0.0007422614479406498,
"loss": 3.4716,
"step": 201500
},
{
"epoch": 12.918905090816066,
"grad_norm": 0.9296875,
"learning_rate": 0.0007416218981836787,
"loss": 3.4743,
"step": 202000
},
{
"epoch": 12.95088257866462,
"grad_norm": 0.9375,
"learning_rate": 0.0007409823484267077,
"loss": 3.4714,
"step": 202500
},
{
"epoch": 12.982860066513174,
"grad_norm": 0.609375,
"learning_rate": 0.0007403427986697365,
"loss": 3.4724,
"step": 203000
},
{
"epoch": 13.0,
"eval_loss": 3.4609711170196533,
"eval_runtime": 0.992,
"eval_samples_per_second": 504.02,
"eval_steps_per_second": 8.064,
"step": 203268
},
{
"epoch": 13.01483755436173,
"grad_norm": 0.6171875,
"learning_rate": 0.0007397032489127654,
"loss": 3.4721,
"step": 203500
},
{
"epoch": 13.046815042210284,
"grad_norm": 1.765625,
"learning_rate": 0.0007390636991557943,
"loss": 3.4699,
"step": 204000
},
{
"epoch": 13.078792530058838,
"grad_norm": 1.78125,
"learning_rate": 0.0007384241493988233,
"loss": 3.4707,
"step": 204500
},
{
"epoch": 13.110770017907393,
"grad_norm": 1.578125,
"learning_rate": 0.0007377845996418521,
"loss": 3.4704,
"step": 205000
},
{
"epoch": 13.142747505755947,
"grad_norm": 0.73828125,
"learning_rate": 0.0007371450498848811,
"loss": 3.4707,
"step": 205500
},
{
"epoch": 13.174724993604503,
"grad_norm": 0.734375,
"learning_rate": 0.0007365055001279099,
"loss": 3.4688,
"step": 206000
},
{
"epoch": 13.206702481453057,
"grad_norm": 0.71484375,
"learning_rate": 0.0007358659503709389,
"loss": 3.4702,
"step": 206500
},
{
"epoch": 13.23867996930161,
"grad_norm": 0.6328125,
"learning_rate": 0.0007352264006139677,
"loss": 3.4703,
"step": 207000
},
{
"epoch": 13.270657457150167,
"grad_norm": 0.703125,
"learning_rate": 0.0007345868508569967,
"loss": 3.4692,
"step": 207500
},
{
"epoch": 13.30263494499872,
"grad_norm": 0.87109375,
"learning_rate": 0.0007339473011000256,
"loss": 3.4726,
"step": 208000
},
{
"epoch": 13.334612432847276,
"grad_norm": 0.8671875,
"learning_rate": 0.0007333077513430545,
"loss": 3.4713,
"step": 208500
},
{
"epoch": 13.36658992069583,
"grad_norm": 0.65234375,
"learning_rate": 0.0007326682015860833,
"loss": 3.4704,
"step": 209000
},
{
"epoch": 13.398567408544384,
"grad_norm": 0.6875,
"learning_rate": 0.0007320286518291123,
"loss": 3.4688,
"step": 209500
},
{
"epoch": 13.43054489639294,
"grad_norm": 0.625,
"learning_rate": 0.0007313891020721413,
"loss": 3.4705,
"step": 210000
},
{
"epoch": 13.462522384241494,
"grad_norm": 0.97265625,
"learning_rate": 0.0007307495523151701,
"loss": 3.4693,
"step": 210500
},
{
"epoch": 13.49449987209005,
"grad_norm": 1.3984375,
"learning_rate": 0.0007301100025581991,
"loss": 3.4666,
"step": 211000
},
{
"epoch": 13.526477359938603,
"grad_norm": 1.0625,
"learning_rate": 0.0007294704528012279,
"loss": 3.4696,
"step": 211500
},
{
"epoch": 13.558454847787157,
"grad_norm": 0.765625,
"learning_rate": 0.0007288309030442569,
"loss": 3.4694,
"step": 212000
},
{
"epoch": 13.590432335635713,
"grad_norm": 1.1171875,
"learning_rate": 0.0007281913532872858,
"loss": 3.4699,
"step": 212500
},
{
"epoch": 13.622409823484267,
"grad_norm": 0.8125,
"learning_rate": 0.0007275518035303147,
"loss": 3.4682,
"step": 213000
},
{
"epoch": 13.654387311332822,
"grad_norm": 1.5,
"learning_rate": 0.0007269122537733436,
"loss": 3.4704,
"step": 213500
},
{
"epoch": 13.686364799181376,
"grad_norm": 0.62109375,
"learning_rate": 0.0007262727040163725,
"loss": 3.4667,
"step": 214000
},
{
"epoch": 13.71834228702993,
"grad_norm": 0.640625,
"learning_rate": 0.0007256331542594013,
"loss": 3.4709,
"step": 214500
},
{
"epoch": 13.750319774878486,
"grad_norm": 1.2265625,
"learning_rate": 0.0007249936045024304,
"loss": 3.471,
"step": 215000
},
{
"epoch": 13.78229726272704,
"grad_norm": 0.54296875,
"learning_rate": 0.0007243540547454592,
"loss": 3.4703,
"step": 215500
},
{
"epoch": 13.814274750575596,
"grad_norm": 1.203125,
"learning_rate": 0.0007237145049884881,
"loss": 3.4693,
"step": 216000
},
{
"epoch": 13.84625223842415,
"grad_norm": 0.7578125,
"learning_rate": 0.000723074955231517,
"loss": 3.4663,
"step": 216500
},
{
"epoch": 13.878229726272703,
"grad_norm": 0.70703125,
"learning_rate": 0.0007224354054745459,
"loss": 3.468,
"step": 217000
},
{
"epoch": 13.91020721412126,
"grad_norm": 2.484375,
"learning_rate": 0.0007217958557175749,
"loss": 3.4679,
"step": 217500
},
{
"epoch": 13.942184701969813,
"grad_norm": 1.0703125,
"learning_rate": 0.0007211563059606038,
"loss": 3.4694,
"step": 218000
},
{
"epoch": 13.974162189818369,
"grad_norm": 0.61328125,
"learning_rate": 0.0007205167562036326,
"loss": 3.4673,
"step": 218500
},
{
"epoch": 14.0,
"eval_loss": 3.467783212661743,
"eval_runtime": 1.0002,
"eval_samples_per_second": 499.907,
"eval_steps_per_second": 7.999,
"step": 218904
},
{
"epoch": 14.006139677666923,
"grad_norm": 0.91015625,
"learning_rate": 0.0007198772064466616,
"loss": 3.4686,
"step": 219000
},
{
"epoch": 14.038117165515477,
"grad_norm": 0.828125,
"learning_rate": 0.0007192376566896904,
"loss": 3.47,
"step": 219500
},
{
"epoch": 14.070094653364032,
"grad_norm": 1.1171875,
"learning_rate": 0.0007185981069327194,
"loss": 3.4699,
"step": 220000
},
{
"epoch": 14.102072141212586,
"grad_norm": 0.68359375,
"learning_rate": 0.0007179585571757483,
"loss": 3.4684,
"step": 220500
},
{
"epoch": 14.13404962906114,
"grad_norm": 0.640625,
"learning_rate": 0.0007173190074187772,
"loss": 3.4679,
"step": 221000
},
{
"epoch": 14.166027116909696,
"grad_norm": 0.68359375,
"learning_rate": 0.000716679457661806,
"loss": 3.4696,
"step": 221500
},
{
"epoch": 14.19800460475825,
"grad_norm": 0.671875,
"learning_rate": 0.000716039907904835,
"loss": 3.4693,
"step": 222000
},
{
"epoch": 14.229982092606805,
"grad_norm": 0.73828125,
"learning_rate": 0.000715400358147864,
"loss": 3.4672,
"step": 222500
},
{
"epoch": 14.26195958045536,
"grad_norm": 2.421875,
"learning_rate": 0.0007147608083908929,
"loss": 3.4704,
"step": 223000
},
{
"epoch": 14.293937068303913,
"grad_norm": 0.609375,
"learning_rate": 0.0007141212586339217,
"loss": 3.4674,
"step": 223500
},
{
"epoch": 14.325914556152469,
"grad_norm": 0.9296875,
"learning_rate": 0.0007134817088769506,
"loss": 3.4658,
"step": 224000
},
{
"epoch": 14.357892044001023,
"grad_norm": 0.984375,
"learning_rate": 0.0007128421591199796,
"loss": 3.4651,
"step": 224500
},
{
"epoch": 14.389869531849579,
"grad_norm": 0.73828125,
"learning_rate": 0.0007122026093630084,
"loss": 3.4677,
"step": 225000
},
{
"epoch": 14.421847019698133,
"grad_norm": 0.9296875,
"learning_rate": 0.0007115630596060374,
"loss": 3.4652,
"step": 225500
},
{
"epoch": 14.453824507546686,
"grad_norm": 0.65234375,
"learning_rate": 0.0007109235098490663,
"loss": 3.4643,
"step": 226000
},
{
"epoch": 14.485801995395242,
"grad_norm": 0.9765625,
"learning_rate": 0.0007102839600920952,
"loss": 3.4678,
"step": 226500
},
{
"epoch": 14.517779483243796,
"grad_norm": 0.515625,
"learning_rate": 0.000709644410335124,
"loss": 3.4685,
"step": 227000
},
{
"epoch": 14.549756971092352,
"grad_norm": 0.65625,
"learning_rate": 0.000709004860578153,
"loss": 3.4679,
"step": 227500
},
{
"epoch": 14.581734458940906,
"grad_norm": 1.234375,
"learning_rate": 0.0007083653108211819,
"loss": 3.4672,
"step": 228000
},
{
"epoch": 14.61371194678946,
"grad_norm": 1.3984375,
"learning_rate": 0.0007077257610642109,
"loss": 3.4658,
"step": 228500
},
{
"epoch": 14.645689434638015,
"grad_norm": 0.64453125,
"learning_rate": 0.0007070862113072397,
"loss": 3.4684,
"step": 229000
},
{
"epoch": 14.67766692248657,
"grad_norm": 0.58984375,
"learning_rate": 0.0007064466615502686,
"loss": 3.467,
"step": 229500
},
{
"epoch": 14.709644410335123,
"grad_norm": 0.53125,
"learning_rate": 0.0007058071117932975,
"loss": 3.4666,
"step": 230000
},
{
"epoch": 14.741621898183679,
"grad_norm": 0.7265625,
"learning_rate": 0.0007051675620363265,
"loss": 3.4672,
"step": 230500
},
{
"epoch": 14.773599386032233,
"grad_norm": 0.6640625,
"learning_rate": 0.0007045280122793553,
"loss": 3.466,
"step": 231000
},
{
"epoch": 14.805576873880788,
"grad_norm": 0.81640625,
"learning_rate": 0.0007038884625223843,
"loss": 3.468,
"step": 231500
},
{
"epoch": 14.837554361729342,
"grad_norm": 0.56640625,
"learning_rate": 0.0007032489127654131,
"loss": 3.4662,
"step": 232000
},
{
"epoch": 14.869531849577896,
"grad_norm": 0.6640625,
"learning_rate": 0.0007026093630084421,
"loss": 3.4649,
"step": 232500
},
{
"epoch": 14.901509337426452,
"grad_norm": 0.65234375,
"learning_rate": 0.000701969813251471,
"loss": 3.4674,
"step": 233000
},
{
"epoch": 14.933486825275006,
"grad_norm": 1.1640625,
"learning_rate": 0.0007013302634944999,
"loss": 3.4648,
"step": 233500
},
{
"epoch": 14.965464313123562,
"grad_norm": 1.0078125,
"learning_rate": 0.0007006907137375288,
"loss": 3.4643,
"step": 234000
},
{
"epoch": 14.997441800972116,
"grad_norm": 0.99609375,
"learning_rate": 0.0007000511639805577,
"loss": 3.4644,
"step": 234500
},
{
"epoch": 15.0,
"eval_loss": 3.4624619483947754,
"eval_runtime": 0.9711,
"eval_samples_per_second": 514.871,
"eval_steps_per_second": 8.238,
"step": 234540
},
{
"epoch": 15.02941928882067,
"grad_norm": 0.69140625,
"learning_rate": 0.0006994116142235865,
"loss": 3.4665,
"step": 235000
},
{
"epoch": 15.061396776669225,
"grad_norm": 0.640625,
"learning_rate": 0.0006987720644666156,
"loss": 3.4649,
"step": 235500
},
{
"epoch": 15.093374264517779,
"grad_norm": 0.8203125,
"learning_rate": 0.0006981325147096444,
"loss": 3.4653,
"step": 236000
},
{
"epoch": 15.125351752366335,
"grad_norm": 0.6015625,
"learning_rate": 0.0006974929649526733,
"loss": 3.4665,
"step": 236500
},
{
"epoch": 15.157329240214889,
"grad_norm": 1.3046875,
"learning_rate": 0.0006968534151957022,
"loss": 3.4627,
"step": 237000
},
{
"epoch": 15.189306728063443,
"grad_norm": 0.62109375,
"learning_rate": 0.0006962138654387311,
"loss": 3.4672,
"step": 237500
},
{
"epoch": 15.221284215911998,
"grad_norm": 0.6875,
"learning_rate": 0.0006955743156817602,
"loss": 3.4686,
"step": 238000
},
{
"epoch": 15.253261703760552,
"grad_norm": 0.6796875,
"learning_rate": 0.000694934765924789,
"loss": 3.4661,
"step": 238500
},
{
"epoch": 15.285239191609108,
"grad_norm": 4.75,
"learning_rate": 0.0006942952161678179,
"loss": 3.4654,
"step": 239000
},
{
"epoch": 15.317216679457662,
"grad_norm": 0.75,
"learning_rate": 0.0006936556664108468,
"loss": 3.4653,
"step": 239500
},
{
"epoch": 15.349194167306216,
"grad_norm": 1.4921875,
"learning_rate": 0.0006930161166538757,
"loss": 3.4629,
"step": 240000
},
{
"epoch": 15.381171655154771,
"grad_norm": 1.328125,
"learning_rate": 0.0006923765668969046,
"loss": 3.466,
"step": 240500
},
{
"epoch": 15.413149143003325,
"grad_norm": 0.61328125,
"learning_rate": 0.0006917370171399336,
"loss": 3.4619,
"step": 241000
},
{
"epoch": 15.445126630851881,
"grad_norm": 0.65234375,
"learning_rate": 0.0006910974673829624,
"loss": 3.4637,
"step": 241500
},
{
"epoch": 15.477104118700435,
"grad_norm": 1.2578125,
"learning_rate": 0.0006904579176259913,
"loss": 3.4659,
"step": 242000
},
{
"epoch": 15.509081606548989,
"grad_norm": 0.98046875,
"learning_rate": 0.0006898183678690202,
"loss": 3.4647,
"step": 242500
},
{
"epoch": 15.541059094397545,
"grad_norm": 0.5546875,
"learning_rate": 0.0006891788181120491,
"loss": 3.4662,
"step": 243000
},
{
"epoch": 15.573036582246099,
"grad_norm": 0.5625,
"learning_rate": 0.0006885392683550781,
"loss": 3.4629,
"step": 243500
},
{
"epoch": 15.605014070094654,
"grad_norm": 1.75,
"learning_rate": 0.000687899718598107,
"loss": 3.4642,
"step": 244000
},
{
"epoch": 15.636991557943208,
"grad_norm": 1.2109375,
"learning_rate": 0.0006872601688411358,
"loss": 3.466,
"step": 244500
},
{
"epoch": 15.668969045791762,
"grad_norm": 0.57421875,
"learning_rate": 0.0006866206190841648,
"loss": 3.4657,
"step": 245000
},
{
"epoch": 15.700946533640318,
"grad_norm": 1.109375,
"learning_rate": 0.0006859810693271936,
"loss": 3.4644,
"step": 245500
},
{
"epoch": 15.732924021488872,
"grad_norm": 0.60546875,
"learning_rate": 0.0006853415195702226,
"loss": 3.4647,
"step": 246000
},
{
"epoch": 15.764901509337426,
"grad_norm": 0.609375,
"learning_rate": 0.0006847019698132515,
"loss": 3.4653,
"step": 246500
},
{
"epoch": 15.796878997185981,
"grad_norm": 0.66015625,
"learning_rate": 0.0006840624200562804,
"loss": 3.4632,
"step": 247000
},
{
"epoch": 15.828856485034535,
"grad_norm": 0.5625,
"learning_rate": 0.0006834228702993092,
"loss": 3.4615,
"step": 247500
},
{
"epoch": 15.860833972883091,
"grad_norm": 1.1015625,
"learning_rate": 0.0006827833205423382,
"loss": 3.465,
"step": 248000
},
{
"epoch": 15.892811460731645,
"grad_norm": 0.6875,
"learning_rate": 0.0006821437707853671,
"loss": 3.4628,
"step": 248500
},
{
"epoch": 15.924788948580199,
"grad_norm": 2.703125,
"learning_rate": 0.0006815042210283961,
"loss": 3.4646,
"step": 249000
},
{
"epoch": 15.956766436428754,
"grad_norm": 0.6015625,
"learning_rate": 0.0006808646712714249,
"loss": 3.4651,
"step": 249500
},
{
"epoch": 15.988743924277308,
"grad_norm": 0.5,
"learning_rate": 0.0006802251215144538,
"loss": 3.4637,
"step": 250000
},
{
"epoch": 16.0,
"eval_loss": 3.4529993534088135,
"eval_runtime": 0.9792,
"eval_samples_per_second": 510.629,
"eval_steps_per_second": 8.17,
"step": 250176
}
],
"logging_steps": 500,
"max_steps": 781800,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.231101164907233e+18,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}