obiwit's picture
Training in progress, epoch 6, checkpoint
774f43c verified
raw
history blame
34.8 kB
{
"best_global_step": 3144,
"best_metric": 1.1658307313919067,
"best_model_checkpoint": "models/gemma-3-4b-sft-full/checkpoint-3144",
"epoch": 6.0,
"eval_steps": 500,
"global_step": 9432,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0006361323155216285,
"grad_norm": 31.319606519809206,
"learning_rate": 1.2722646310432571e-08,
"loss": 2.0248,
"step": 1
},
{
"epoch": 0.031806615776081425,
"grad_norm": 13.2120799093862,
"learning_rate": 6.361323155216286e-07,
"loss": 1.9103,
"step": 50
},
{
"epoch": 0.06361323155216285,
"grad_norm": 2.8305292907887694,
"learning_rate": 1.2722646310432571e-06,
"loss": 1.4434,
"step": 100
},
{
"epoch": 0.09541984732824428,
"grad_norm": 2.3110831279704738,
"learning_rate": 1.908396946564886e-06,
"loss": 1.3196,
"step": 150
},
{
"epoch": 0.1272264631043257,
"grad_norm": 2.3096762225567056,
"learning_rate": 2.5445292620865143e-06,
"loss": 1.3039,
"step": 200
},
{
"epoch": 0.15903307888040713,
"grad_norm": 2.2782572106396306,
"learning_rate": 3.1806615776081427e-06,
"loss": 1.2618,
"step": 250
},
{
"epoch": 0.19083969465648856,
"grad_norm": 2.2420875359580132,
"learning_rate": 3.816793893129772e-06,
"loss": 1.2501,
"step": 300
},
{
"epoch": 0.22264631043256997,
"grad_norm": 2.0330974831105215,
"learning_rate": 4.4529262086514e-06,
"loss": 1.2541,
"step": 350
},
{
"epoch": 0.2544529262086514,
"grad_norm": 2.1026569258639043,
"learning_rate": 5.0890585241730285e-06,
"loss": 1.2278,
"step": 400
},
{
"epoch": 0.2862595419847328,
"grad_norm": 2.0803892623652196,
"learning_rate": 5.725190839694656e-06,
"loss": 1.2173,
"step": 450
},
{
"epoch": 0.31806615776081426,
"grad_norm": 2.5887368846264684,
"learning_rate": 6.3613231552162854e-06,
"loss": 1.2241,
"step": 500
},
{
"epoch": 0.34987277353689566,
"grad_norm": 1.8616362535769346,
"learning_rate": 6.997455470737914e-06,
"loss": 1.1954,
"step": 550
},
{
"epoch": 0.3816793893129771,
"grad_norm": 2.2198054581544153,
"learning_rate": 7.633587786259543e-06,
"loss": 1.2207,
"step": 600
},
{
"epoch": 0.41348600508905853,
"grad_norm": 1.9458297082843083,
"learning_rate": 8.26972010178117e-06,
"loss": 1.2104,
"step": 650
},
{
"epoch": 0.44529262086513993,
"grad_norm": 1.754441697698081,
"learning_rate": 8.9058524173028e-06,
"loss": 1.1954,
"step": 700
},
{
"epoch": 0.4770992366412214,
"grad_norm": 1.9516730885650273,
"learning_rate": 9.54198473282443e-06,
"loss": 1.1962,
"step": 750
},
{
"epoch": 0.5089058524173028,
"grad_norm": 1.8788578410476755,
"learning_rate": 1.0178117048346057e-05,
"loss": 1.1955,
"step": 800
},
{
"epoch": 0.5407124681933843,
"grad_norm": 1.7660250423975214,
"learning_rate": 1.0814249363867686e-05,
"loss": 1.2029,
"step": 850
},
{
"epoch": 0.5725190839694656,
"grad_norm": 1.7319081555721738,
"learning_rate": 1.1450381679389312e-05,
"loss": 1.201,
"step": 900
},
{
"epoch": 0.6043256997455471,
"grad_norm": 1.7433405435098388,
"learning_rate": 1.2086513994910942e-05,
"loss": 1.1945,
"step": 950
},
{
"epoch": 0.6361323155216285,
"grad_norm": 1.6584922549922605,
"learning_rate": 1.2722646310432571e-05,
"loss": 1.188,
"step": 1000
},
{
"epoch": 0.6679389312977099,
"grad_norm": 1.694934894298546,
"learning_rate": 1.3358778625954198e-05,
"loss": 1.1853,
"step": 1050
},
{
"epoch": 0.6997455470737913,
"grad_norm": 1.8972752727827624,
"learning_rate": 1.3994910941475828e-05,
"loss": 1.1796,
"step": 1100
},
{
"epoch": 0.7315521628498728,
"grad_norm": 1.7794214888108801,
"learning_rate": 1.4631043256997457e-05,
"loss": 1.1879,
"step": 1150
},
{
"epoch": 0.7633587786259542,
"grad_norm": 1.7080167758006621,
"learning_rate": 1.5267175572519086e-05,
"loss": 1.2033,
"step": 1200
},
{
"epoch": 0.7951653944020356,
"grad_norm": 1.6732561680746716,
"learning_rate": 1.5903307888040712e-05,
"loss": 1.1729,
"step": 1250
},
{
"epoch": 0.8269720101781171,
"grad_norm": 2.0115920286242472,
"learning_rate": 1.653944020356234e-05,
"loss": 1.1798,
"step": 1300
},
{
"epoch": 0.8587786259541985,
"grad_norm": 1.5883913583214553,
"learning_rate": 1.717557251908397e-05,
"loss": 1.1761,
"step": 1350
},
{
"epoch": 0.8905852417302799,
"grad_norm": 1.5615231326127277,
"learning_rate": 1.78117048346056e-05,
"loss": 1.1807,
"step": 1400
},
{
"epoch": 0.9223918575063613,
"grad_norm": 1.6052692336601109,
"learning_rate": 1.844783715012723e-05,
"loss": 1.1872,
"step": 1450
},
{
"epoch": 0.9541984732824428,
"grad_norm": 1.6293394603925617,
"learning_rate": 1.908396946564886e-05,
"loss": 1.1821,
"step": 1500
},
{
"epoch": 0.9860050890585241,
"grad_norm": 1.9511097309507746,
"learning_rate": 1.9720101781170485e-05,
"loss": 1.193,
"step": 1550
},
{
"epoch": 1.0,
"eval_loss": 1.1915197372436523,
"eval_runtime": 50.604,
"eval_samples_per_second": 55.45,
"eval_steps_per_second": 1.739,
"step": 1572
},
{
"epoch": 1.0178117048346056,
"grad_norm": 1.699246916566911,
"learning_rate": 1.9999806716709255e-05,
"loss": 1.0668,
"step": 1600
},
{
"epoch": 1.049618320610687,
"grad_norm": 1.6215378174021484,
"learning_rate": 1.999850011488216e-05,
"loss": 0.9829,
"step": 1650
},
{
"epoch": 1.0814249363867685,
"grad_norm": 1.7868804206457551,
"learning_rate": 1.9995961032584046e-05,
"loss": 0.9782,
"step": 1700
},
{
"epoch": 1.11323155216285,
"grad_norm": 1.824863693326858,
"learning_rate": 1.9992189782798795e-05,
"loss": 0.9649,
"step": 1750
},
{
"epoch": 1.1450381679389312,
"grad_norm": 1.9389315988555975,
"learning_rate": 1.99871868303953e-05,
"loss": 0.9859,
"step": 1800
},
{
"epoch": 1.1768447837150127,
"grad_norm": 1.8613552819265144,
"learning_rate": 1.9980952792070175e-05,
"loss": 0.97,
"step": 1850
},
{
"epoch": 1.2086513994910941,
"grad_norm": 1.6290767219311002,
"learning_rate": 1.9973488436271728e-05,
"loss": 0.9898,
"step": 1900
},
{
"epoch": 1.2404580152671756,
"grad_norm": 1.9280005053128177,
"learning_rate": 1.996479468310524e-05,
"loss": 0.977,
"step": 1950
},
{
"epoch": 1.272264631043257,
"grad_norm": 1.8021715712875992,
"learning_rate": 1.9954872604219543e-05,
"loss": 0.9778,
"step": 2000
},
{
"epoch": 1.3040712468193385,
"grad_norm": 1.778983300178611,
"learning_rate": 1.994372342267493e-05,
"loss": 0.9754,
"step": 2050
},
{
"epoch": 1.33587786259542,
"grad_norm": 1.6139758020504216,
"learning_rate": 1.993134851279238e-05,
"loss": 0.9768,
"step": 2100
},
{
"epoch": 1.3676844783715012,
"grad_norm": 1.6159993769878525,
"learning_rate": 1.991774939998417e-05,
"loss": 0.977,
"step": 2150
},
{
"epoch": 1.3994910941475827,
"grad_norm": 1.7346584119107982,
"learning_rate": 1.9902927760565824e-05,
"loss": 1.0021,
"step": 2200
},
{
"epoch": 1.4312977099236641,
"grad_norm": 1.6348257679838059,
"learning_rate": 1.988688542154948e-05,
"loss": 0.9911,
"step": 2250
},
{
"epoch": 1.4631043256997456,
"grad_norm": 2.005161271222442,
"learning_rate": 1.98696243604187e-05,
"loss": 0.98,
"step": 2300
},
{
"epoch": 1.494910941475827,
"grad_norm": 1.6947935478149847,
"learning_rate": 1.9851146704884684e-05,
"loss": 0.9933,
"step": 2350
},
{
"epoch": 1.5267175572519083,
"grad_norm": 1.559288613818951,
"learning_rate": 1.9831454732624023e-05,
"loss": 0.9812,
"step": 2400
},
{
"epoch": 1.55852417302799,
"grad_norm": 1.6147458399643977,
"learning_rate": 1.9810550870997914e-05,
"loss": 0.9829,
"step": 2450
},
{
"epoch": 1.5903307888040712,
"grad_norm": 1.7200525728774254,
"learning_rate": 1.9788437696752965e-05,
"loss": 0.9827,
"step": 2500
},
{
"epoch": 1.6221374045801527,
"grad_norm": 1.5679464105011003,
"learning_rate": 1.9765117935703556e-05,
"loss": 0.9918,
"step": 2550
},
{
"epoch": 1.6539440203562341,
"grad_norm": 1.5684761038610553,
"learning_rate": 1.9740594462395844e-05,
"loss": 1.0035,
"step": 2600
},
{
"epoch": 1.6857506361323156,
"grad_norm": 1.6525710526384763,
"learning_rate": 1.9714870299753425e-05,
"loss": 0.9757,
"step": 2650
},
{
"epoch": 1.717557251908397,
"grad_norm": 1.61635439544328,
"learning_rate": 1.9687948618704713e-05,
"loss": 0.9878,
"step": 2700
},
{
"epoch": 1.7493638676844783,
"grad_norm": 1.552931766301823,
"learning_rate": 1.9659832737792065e-05,
"loss": 0.9926,
"step": 2750
},
{
"epoch": 1.78117048346056,
"grad_norm": 1.7462958660917196,
"learning_rate": 1.963052612276272e-05,
"loss": 0.9923,
"step": 2800
},
{
"epoch": 1.8129770992366412,
"grad_norm": 1.541467107392074,
"learning_rate": 1.9600032386141578e-05,
"loss": 0.9883,
"step": 2850
},
{
"epoch": 1.8447837150127226,
"grad_norm": 1.60142808575721,
"learning_rate": 1.9568355286785916e-05,
"loss": 0.9848,
"step": 2900
},
{
"epoch": 1.876590330788804,
"grad_norm": 1.628212808465854,
"learning_rate": 1.9535498729422034e-05,
"loss": 0.981,
"step": 2950
},
{
"epoch": 1.9083969465648853,
"grad_norm": 1.589079219019998,
"learning_rate": 1.950146676416393e-05,
"loss": 0.9938,
"step": 3000
},
{
"epoch": 1.940203562340967,
"grad_norm": 1.5927647305457868,
"learning_rate": 1.9466263586014062e-05,
"loss": 0.9831,
"step": 3050
},
{
"epoch": 1.9720101781170483,
"grad_norm": 1.6181088935396841,
"learning_rate": 1.9429893534346248e-05,
"loss": 0.9738,
"step": 3100
},
{
"epoch": 2.0,
"eval_loss": 1.1658307313919067,
"eval_runtime": 57.664,
"eval_samples_per_second": 48.661,
"eval_steps_per_second": 1.526,
"step": 3144
},
{
"epoch": 2.00381679389313,
"grad_norm": 2.8157430444252833,
"learning_rate": 1.9392361092370756e-05,
"loss": 0.9372,
"step": 3150
},
{
"epoch": 2.035623409669211,
"grad_norm": 1.8202205896718766,
"learning_rate": 1.9353670886581683e-05,
"loss": 0.6118,
"step": 3200
},
{
"epoch": 2.0674300254452924,
"grad_norm": 1.8024719083066718,
"learning_rate": 1.9313827686186664e-05,
"loss": 0.5956,
"step": 3250
},
{
"epoch": 2.099236641221374,
"grad_norm": 1.8065831151097012,
"learning_rate": 1.927283640251898e-05,
"loss": 0.615,
"step": 3300
},
{
"epoch": 2.1310432569974553,
"grad_norm": 1.93182684100521,
"learning_rate": 1.923070208843216e-05,
"loss": 0.6079,
"step": 3350
},
{
"epoch": 2.162849872773537,
"grad_norm": 1.8738788734317153,
"learning_rate": 1.9187429937677136e-05,
"loss": 0.607,
"step": 3400
},
{
"epoch": 2.1946564885496183,
"grad_norm": 1.8040300513160983,
"learning_rate": 1.9143025284262022e-05,
"loss": 0.6085,
"step": 3450
},
{
"epoch": 2.2264631043257,
"grad_norm": 1.8986773569695647,
"learning_rate": 1.909749360179461e-05,
"loss": 0.6145,
"step": 3500
},
{
"epoch": 2.258269720101781,
"grad_norm": 1.9163165829127622,
"learning_rate": 1.9050840502807665e-05,
"loss": 0.6169,
"step": 3550
},
{
"epoch": 2.2900763358778624,
"grad_norm": 2.0342511222836657,
"learning_rate": 1.9003071738067073e-05,
"loss": 0.6181,
"step": 3600
},
{
"epoch": 2.321882951653944,
"grad_norm": 1.9022311954341746,
"learning_rate": 1.895419319586298e-05,
"loss": 0.6322,
"step": 3650
},
{
"epoch": 2.3536895674300253,
"grad_norm": 1.947735727576319,
"learning_rate": 1.890421090128395e-05,
"loss": 0.6261,
"step": 3700
},
{
"epoch": 2.385496183206107,
"grad_norm": 1.8908602175645888,
"learning_rate": 1.8853131015474278e-05,
"loss": 0.6241,
"step": 3750
},
{
"epoch": 2.4173027989821882,
"grad_norm": 1.8428847331642595,
"learning_rate": 1.8800959834874534e-05,
"loss": 0.6247,
"step": 3800
},
{
"epoch": 2.4491094147582695,
"grad_norm": 1.9386784496016072,
"learning_rate": 1.8747703790445412e-05,
"loss": 0.6369,
"step": 3850
},
{
"epoch": 2.480916030534351,
"grad_norm": 1.8110474855626102,
"learning_rate": 1.8693369446875008e-05,
"loss": 0.6352,
"step": 3900
},
{
"epoch": 2.5127226463104324,
"grad_norm": 1.8744360271519491,
"learning_rate": 1.8637963501769625e-05,
"loss": 0.6402,
"step": 3950
},
{
"epoch": 2.544529262086514,
"grad_norm": 1.858724398900357,
"learning_rate": 1.858149278482817e-05,
"loss": 0.6459,
"step": 4000
},
{
"epoch": 2.5763358778625953,
"grad_norm": 1.8627524401678055,
"learning_rate": 1.8523964257000288e-05,
"loss": 0.6276,
"step": 4050
},
{
"epoch": 2.608142493638677,
"grad_norm": 1.9220180062265788,
"learning_rate": 1.8465385009628308e-05,
"loss": 0.6481,
"step": 4100
},
{
"epoch": 2.6399491094147582,
"grad_norm": 1.9319620445548449,
"learning_rate": 1.8405762263573108e-05,
"loss": 0.6344,
"step": 4150
},
{
"epoch": 2.67175572519084,
"grad_norm": 1.8442743167506148,
"learning_rate": 1.834510336832405e-05,
"loss": 0.6418,
"step": 4200
},
{
"epoch": 2.703562340966921,
"grad_norm": 1.8919128966016239,
"learning_rate": 1.8283415801093007e-05,
"loss": 0.6455,
"step": 4250
},
{
"epoch": 2.7353689567430024,
"grad_norm": 1.79572731114352,
"learning_rate": 1.8220707165892682e-05,
"loss": 0.6474,
"step": 4300
},
{
"epoch": 2.767175572519084,
"grad_norm": 1.8916208552532916,
"learning_rate": 1.815698519259929e-05,
"loss": 0.6479,
"step": 4350
},
{
"epoch": 2.7989821882951653,
"grad_norm": 1.8754600469553322,
"learning_rate": 1.8092257735999734e-05,
"loss": 0.6549,
"step": 4400
},
{
"epoch": 2.830788804071247,
"grad_norm": 1.8972086051601613,
"learning_rate": 1.8026532774823343e-05,
"loss": 0.6397,
"step": 4450
},
{
"epoch": 2.8625954198473282,
"grad_norm": 1.8335920924146587,
"learning_rate": 1.7959818410758395e-05,
"loss": 0.6379,
"step": 4500
},
{
"epoch": 2.8944020356234095,
"grad_norm": 2.010899629666033,
"learning_rate": 1.789212286745342e-05,
"loss": 0.645,
"step": 4550
},
{
"epoch": 2.926208651399491,
"grad_norm": 1.854046640562392,
"learning_rate": 1.7823454489503526e-05,
"loss": 0.6491,
"step": 4600
},
{
"epoch": 2.9580152671755724,
"grad_norm": 1.9582134927711392,
"learning_rate": 1.775382174142177e-05,
"loss": 0.6542,
"step": 4650
},
{
"epoch": 2.989821882951654,
"grad_norm": 1.851650468210065,
"learning_rate": 1.768323320659578e-05,
"loss": 0.6542,
"step": 4700
},
{
"epoch": 3.0,
"eval_loss": 1.25302255153656,
"eval_runtime": 57.2963,
"eval_samples_per_second": 48.973,
"eval_steps_per_second": 1.536,
"step": 4716
},
{
"epoch": 3.0216284987277353,
"grad_norm": 1.9776148010713264,
"learning_rate": 1.7611697586229695e-05,
"loss": 0.4254,
"step": 4750
},
{
"epoch": 3.053435114503817,
"grad_norm": 2.1156326922920994,
"learning_rate": 1.753922369827162e-05,
"loss": 0.3248,
"step": 4800
},
{
"epoch": 3.0852417302798982,
"grad_norm": 1.927754824109064,
"learning_rate": 1.7465820476326656e-05,
"loss": 0.328,
"step": 4850
},
{
"epoch": 3.1170483460559795,
"grad_norm": 1.984505722526154,
"learning_rate": 1.7391496968555667e-05,
"loss": 0.3325,
"step": 4900
},
{
"epoch": 3.148854961832061,
"grad_norm": 2.070364334889199,
"learning_rate": 1.7316262336559978e-05,
"loss": 0.3348,
"step": 4950
},
{
"epoch": 3.1806615776081424,
"grad_norm": 1.9450509022734594,
"learning_rate": 1.7240125854252043e-05,
"loss": 0.3413,
"step": 5000
},
{
"epoch": 3.212468193384224,
"grad_norm": 2.0171683252659323,
"learning_rate": 1.7163096906712267e-05,
"loss": 0.3353,
"step": 5050
},
{
"epoch": 3.2442748091603053,
"grad_norm": 2.0051713529489046,
"learning_rate": 1.708518498903216e-05,
"loss": 0.3411,
"step": 5100
},
{
"epoch": 3.276081424936387,
"grad_norm": 2.0973923846500213,
"learning_rate": 1.7006399705143905e-05,
"loss": 0.3421,
"step": 5150
},
{
"epoch": 3.3078880407124682,
"grad_norm": 2.0572445637964103,
"learning_rate": 1.692675076663651e-05,
"loss": 0.338,
"step": 5200
},
{
"epoch": 3.3396946564885495,
"grad_norm": 2.1760429565142223,
"learning_rate": 1.6846247991558686e-05,
"loss": 0.3506,
"step": 5250
},
{
"epoch": 3.371501272264631,
"grad_norm": 1.9297734638867776,
"learning_rate": 1.6764901303208632e-05,
"loss": 0.344,
"step": 5300
},
{
"epoch": 3.4033078880407124,
"grad_norm": 2.051646120204668,
"learning_rate": 1.6682720728910815e-05,
"loss": 0.3531,
"step": 5350
},
{
"epoch": 3.435114503816794,
"grad_norm": 2.053007809243884,
"learning_rate": 1.659971639877992e-05,
"loss": 0.356,
"step": 5400
},
{
"epoch": 3.4669211195928753,
"grad_norm": 2.113810517440616,
"learning_rate": 1.6515898544472172e-05,
"loss": 0.3544,
"step": 5450
},
{
"epoch": 3.4987277353689565,
"grad_norm": 2.0372048460483207,
"learning_rate": 1.6431277497924093e-05,
"loss": 0.3461,
"step": 5500
},
{
"epoch": 3.530534351145038,
"grad_norm": 2.0430461667046753,
"learning_rate": 1.6345863690078942e-05,
"loss": 0.3527,
"step": 5550
},
{
"epoch": 3.5623409669211195,
"grad_norm": 2.0399250101984485,
"learning_rate": 1.6259667649600907e-05,
"loss": 0.3584,
"step": 5600
},
{
"epoch": 3.594147582697201,
"grad_norm": 2.1539893126413165,
"learning_rate": 1.6172700001577286e-05,
"loss": 0.3599,
"step": 5650
},
{
"epoch": 3.6259541984732824,
"grad_norm": 2.158545963475489,
"learning_rate": 1.6084971466208764e-05,
"loss": 0.3639,
"step": 5700
},
{
"epoch": 3.6577608142493636,
"grad_norm": 2.133349813061679,
"learning_rate": 1.599649285748798e-05,
"loss": 0.3604,
"step": 5750
},
{
"epoch": 3.6895674300254453,
"grad_norm": 2.1126907045423136,
"learning_rate": 1.5907275081866504e-05,
"loss": 0.3572,
"step": 5800
},
{
"epoch": 3.721374045801527,
"grad_norm": 2.1536592654492006,
"learning_rate": 1.5817329136910463e-05,
"loss": 0.3597,
"step": 5850
},
{
"epoch": 3.753180661577608,
"grad_norm": 1.9820017146238096,
"learning_rate": 1.5726666109944887e-05,
"loss": 0.366,
"step": 5900
},
{
"epoch": 3.7849872773536894,
"grad_norm": 2.0394074001285167,
"learning_rate": 1.563529717668702e-05,
"loss": 0.3586,
"step": 5950
},
{
"epoch": 3.816793893129771,
"grad_norm": 2.0797434739007548,
"learning_rate": 1.5543233599868744e-05,
"loss": 0.3611,
"step": 6000
},
{
"epoch": 3.8486005089058524,
"grad_norm": 1.9847509894940687,
"learning_rate": 1.5450486727848217e-05,
"loss": 0.3682,
"step": 6050
},
{
"epoch": 3.880407124681934,
"grad_norm": 2.0762467778933806,
"learning_rate": 1.535706799321106e-05,
"loss": 0.367,
"step": 6100
},
{
"epoch": 3.9122137404580153,
"grad_norm": 2.08727307746701,
"learning_rate": 1.526298891136105e-05,
"loss": 0.3661,
"step": 6150
},
{
"epoch": 3.9440203562340965,
"grad_norm": 2.2966942638832544,
"learning_rate": 1.5168261079100695e-05,
"loss": 0.362,
"step": 6200
},
{
"epoch": 3.975826972010178,
"grad_norm": 2.052981094095626,
"learning_rate": 1.5072896173201697e-05,
"loss": 0.3692,
"step": 6250
},
{
"epoch": 4.0,
"eval_loss": 1.4977455139160156,
"eval_runtime": 51.4362,
"eval_samples_per_second": 54.553,
"eval_steps_per_second": 1.711,
"step": 6288
},
{
"epoch": 4.00763358778626,
"grad_norm": 2.107958347254252,
"learning_rate": 1.4976905948965637e-05,
"loss": 0.3142,
"step": 6300
},
{
"epoch": 4.039440203562341,
"grad_norm": 1.9505610066311394,
"learning_rate": 1.4880302238774911e-05,
"loss": 0.1694,
"step": 6350
},
{
"epoch": 4.071246819338422,
"grad_norm": 1.8629359372754337,
"learning_rate": 1.4783096950634211e-05,
"loss": 0.1727,
"step": 6400
},
{
"epoch": 4.103053435114504,
"grad_norm": 1.8272931243832953,
"learning_rate": 1.468530206670265e-05,
"loss": 0.1707,
"step": 6450
},
{
"epoch": 4.134860050890585,
"grad_norm": 1.982558356194926,
"learning_rate": 1.4586929641816783e-05,
"loss": 0.1757,
"step": 6500
},
{
"epoch": 4.166666666666667,
"grad_norm": 1.7448326286779277,
"learning_rate": 1.4487991802004625e-05,
"loss": 0.1777,
"step": 6550
},
{
"epoch": 4.198473282442748,
"grad_norm": 1.9280680517187911,
"learning_rate": 1.4388500742990934e-05,
"loss": 0.1785,
"step": 6600
},
{
"epoch": 4.230279898218829,
"grad_norm": 1.9089795547388508,
"learning_rate": 1.4288468728693889e-05,
"loss": 0.181,
"step": 6650
},
{
"epoch": 4.262086513994911,
"grad_norm": 2.0037836680476566,
"learning_rate": 1.4187908089713348e-05,
"loss": 0.1823,
"step": 6700
},
{
"epoch": 4.293893129770993,
"grad_norm": 1.8739930764604456,
"learning_rate": 1.4086831221810897e-05,
"loss": 0.1812,
"step": 6750
},
{
"epoch": 4.325699745547074,
"grad_norm": 1.9039560558559352,
"learning_rate": 1.3985250584381884e-05,
"loss": 0.1848,
"step": 6800
},
{
"epoch": 4.357506361323155,
"grad_norm": 1.979148934201314,
"learning_rate": 1.3883178698919578e-05,
"loss": 0.183,
"step": 6850
},
{
"epoch": 4.3893129770992365,
"grad_norm": 2.037387611365519,
"learning_rate": 1.378062814747168e-05,
"loss": 0.1858,
"step": 6900
},
{
"epoch": 4.421119592875318,
"grad_norm": 2.081816352916778,
"learning_rate": 1.3677611571089406e-05,
"loss": 0.1889,
"step": 6950
},
{
"epoch": 4.4529262086514,
"grad_norm": 2.0722431553233993,
"learning_rate": 1.3574141668269235e-05,
"loss": 0.1876,
"step": 7000
},
{
"epoch": 4.484732824427481,
"grad_norm": 1.893880597719728,
"learning_rate": 1.3470231193387639e-05,
"loss": 0.1868,
"step": 7050
},
{
"epoch": 4.516539440203562,
"grad_norm": 2.136980665093255,
"learning_rate": 1.3365892955128876e-05,
"loss": 0.1925,
"step": 7100
},
{
"epoch": 4.548346055979644,
"grad_norm": 2.2126003717690144,
"learning_rate": 1.326113981490611e-05,
"loss": 0.1868,
"step": 7150
},
{
"epoch": 4.580152671755725,
"grad_norm": 2.295568251011161,
"learning_rate": 1.315598468527604e-05,
"loss": 0.1855,
"step": 7200
},
{
"epoch": 4.611959287531807,
"grad_norm": 1.886004237984628,
"learning_rate": 1.30504405283472e-05,
"loss": 0.1891,
"step": 7250
},
{
"epoch": 4.643765903307888,
"grad_norm": 2.0731100810122065,
"learning_rate": 1.294452035418218e-05,
"loss": 0.1901,
"step": 7300
},
{
"epoch": 4.675572519083969,
"grad_norm": 1.8758598657873944,
"learning_rate": 1.2838237219193897e-05,
"loss": 0.1898,
"step": 7350
},
{
"epoch": 4.707379134860051,
"grad_norm": 1.9515874051409456,
"learning_rate": 1.2731604224536208e-05,
"loss": 0.1895,
"step": 7400
},
{
"epoch": 4.739185750636132,
"grad_norm": 1.972736566907763,
"learning_rate": 1.262463451448895e-05,
"loss": 0.1888,
"step": 7450
},
{
"epoch": 4.770992366412214,
"grad_norm": 1.9482783400297305,
"learning_rate": 1.2517341274837702e-05,
"loss": 0.1931,
"step": 7500
},
{
"epoch": 4.802798982188295,
"grad_norm": 1.8471591343739595,
"learning_rate": 1.2409737731248418e-05,
"loss": 0.1903,
"step": 7550
},
{
"epoch": 4.8346055979643765,
"grad_norm": 2.025410052096641,
"learning_rate": 1.2301837147637137e-05,
"loss": 0.1926,
"step": 7600
},
{
"epoch": 4.866412213740458,
"grad_norm": 1.8904775058623584,
"learning_rate": 1.2193652824535e-05,
"loss": 0.1909,
"step": 7650
},
{
"epoch": 4.898218829516539,
"grad_norm": 2.0404591880691405,
"learning_rate": 1.2085198097448732e-05,
"loss": 0.1909,
"step": 7700
},
{
"epoch": 4.930025445292621,
"grad_norm": 1.9452174397216788,
"learning_rate": 1.197648633521681e-05,
"loss": 0.19,
"step": 7750
},
{
"epoch": 4.961832061068702,
"grad_norm": 2.098087417526516,
"learning_rate": 1.1867530938361557e-05,
"loss": 0.1953,
"step": 7800
},
{
"epoch": 4.993638676844784,
"grad_norm": 2.028699161436086,
"learning_rate": 1.1758345337437284e-05,
"loss": 0.1932,
"step": 7850
},
{
"epoch": 5.0,
"eval_loss": 1.913898229598999,
"eval_runtime": 54.0702,
"eval_samples_per_second": 51.896,
"eval_steps_per_second": 1.628,
"step": 7860
},
{
"epoch": 5.025445292620865,
"grad_norm": 1.4793683076824136,
"learning_rate": 1.164894299137476e-05,
"loss": 0.1166,
"step": 7900
},
{
"epoch": 5.057251908396947,
"grad_norm": 1.5651227153942533,
"learning_rate": 1.1539337385822179e-05,
"loss": 0.1006,
"step": 7950
},
{
"epoch": 5.089058524173028,
"grad_norm": 1.7359295760816373,
"learning_rate": 1.1429542031482828e-05,
"loss": 0.1037,
"step": 8000
},
{
"epoch": 5.120865139949109,
"grad_norm": 1.5516562484547498,
"learning_rate": 1.1319570462449664e-05,
"loss": 0.1073,
"step": 8050
},
{
"epoch": 5.152671755725191,
"grad_norm": 1.7147421352890893,
"learning_rate": 1.120943623453703e-05,
"loss": 0.1048,
"step": 8100
},
{
"epoch": 5.184478371501272,
"grad_norm": 1.588020821919146,
"learning_rate": 1.1099152923609654e-05,
"loss": 0.1046,
"step": 8150
},
{
"epoch": 5.216284987277354,
"grad_norm": 1.4744529210130621,
"learning_rate": 1.0988734123909218e-05,
"loss": 0.1053,
"step": 8200
},
{
"epoch": 5.248091603053435,
"grad_norm": 1.5149467562017078,
"learning_rate": 1.0878193446378633e-05,
"loss": 0.1067,
"step": 8250
},
{
"epoch": 5.2798982188295165,
"grad_norm": 1.641740858676932,
"learning_rate": 1.076754451698427e-05,
"loss": 0.1052,
"step": 8300
},
{
"epoch": 5.311704834605598,
"grad_norm": 1.9805138911324567,
"learning_rate": 1.0656800975036328e-05,
"loss": 0.1073,
"step": 8350
},
{
"epoch": 5.34351145038168,
"grad_norm": 1.538007502423292,
"learning_rate": 1.0545976471507573e-05,
"loss": 0.1092,
"step": 8400
},
{
"epoch": 5.375318066157761,
"grad_norm": 1.5971589586731525,
"learning_rate": 1.0435084667350619e-05,
"loss": 0.1075,
"step": 8450
},
{
"epoch": 5.407124681933842,
"grad_norm": 1.690135861247903,
"learning_rate": 1.0324139231813997e-05,
"loss": 0.1074,
"step": 8500
},
{
"epoch": 5.438931297709924,
"grad_norm": 1.6897524323080446,
"learning_rate": 1.0213153840757198e-05,
"loss": 0.1099,
"step": 8550
},
{
"epoch": 5.470737913486005,
"grad_norm": 1.6020111115696878,
"learning_rate": 1.0102142174964883e-05,
"loss": 0.1089,
"step": 8600
},
{
"epoch": 5.502544529262087,
"grad_norm": 1.721827372559917,
"learning_rate": 9.991117918460518e-06,
"loss": 0.1085,
"step": 8650
},
{
"epoch": 5.534351145038168,
"grad_norm": 1.5374549183894768,
"learning_rate": 9.880094756819572e-06,
"loss": 0.1088,
"step": 8700
},
{
"epoch": 5.566157760814249,
"grad_norm": 1.63155554721324,
"learning_rate": 9.769086375482561e-06,
"loss": 0.1095,
"step": 8750
},
{
"epoch": 5.597964376590331,
"grad_norm": 1.6722391499109936,
"learning_rate": 9.658106458068086e-06,
"loss": 0.1097,
"step": 8800
},
{
"epoch": 5.629770992366412,
"grad_norm": 1.6914082156675518,
"learning_rate": 9.547168684686088e-06,
"loss": 0.1092,
"step": 8850
},
{
"epoch": 5.661577608142494,
"grad_norm": 1.6060594589970834,
"learning_rate": 9.436286730251568e-06,
"loss": 0.1109,
"step": 8900
},
{
"epoch": 5.693384223918575,
"grad_norm": 1.6457178552761271,
"learning_rate": 9.32547426279892e-06,
"loss": 0.1101,
"step": 8950
},
{
"epoch": 5.7251908396946565,
"grad_norm": 1.6736941545247992,
"learning_rate": 9.214744941797115e-06,
"loss": 0.1087,
"step": 9000
},
{
"epoch": 5.756997455470738,
"grad_norm": 1.6655682373835654,
"learning_rate": 9.104112416465949e-06,
"loss": 0.1072,
"step": 9050
},
{
"epoch": 5.788804071246819,
"grad_norm": 1.716312631900298,
"learning_rate": 8.993590324093548e-06,
"loss": 0.1096,
"step": 9100
},
{
"epoch": 5.820610687022901,
"grad_norm": 1.6302159083496506,
"learning_rate": 8.883192288355362e-06,
"loss": 0.1093,
"step": 9150
},
{
"epoch": 5.852417302798982,
"grad_norm": 1.6265328321818522,
"learning_rate": 8.772931917634792e-06,
"loss": 0.1101,
"step": 9200
},
{
"epoch": 5.8842239185750635,
"grad_norm": 1.611412246032975,
"learning_rate": 8.662822803345762e-06,
"loss": 0.1082,
"step": 9250
},
{
"epoch": 5.916030534351145,
"grad_norm": 1.523038058167952,
"learning_rate": 8.552878518257335e-06,
"loss": 0.1098,
"step": 9300
},
{
"epoch": 5.947837150127226,
"grad_norm": 1.652523299337976,
"learning_rate": 8.44311261482065e-06,
"loss": 0.1093,
"step": 9350
},
{
"epoch": 5.979643765903308,
"grad_norm": 1.6687963754361035,
"learning_rate": 8.333538623498357e-06,
"loss": 0.1083,
"step": 9400
},
{
"epoch": 6.0,
"eval_loss": 2.39013671875,
"eval_runtime": 57.4954,
"eval_samples_per_second": 48.804,
"eval_steps_per_second": 1.531,
"step": 9432
}
],
"logging_steps": 50,
"max_steps": 15720,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 682659013459968.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}