nicoboss's picture
Upload folder using huggingface_hub
d056e84 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.10008963250672244,
"eval_steps": 500,
"global_step": 670,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00014938751120406333,
"grad_norm": 0.5712769031524658,
"learning_rate": 2e-05,
"loss": 1.7864,
"step": 1
},
{
"epoch": 0.00029877502240812666,
"grad_norm": 0.8328073620796204,
"learning_rate": 4e-05,
"loss": 1.954,
"step": 2
},
{
"epoch": 0.00044816253361219,
"grad_norm": 0.3498156666755676,
"learning_rate": 6e-05,
"loss": 1.0704,
"step": 3
},
{
"epoch": 0.0005975500448162533,
"grad_norm": 0.46848297119140625,
"learning_rate": 8e-05,
"loss": 1.3401,
"step": 4
},
{
"epoch": 0.0007469375560203167,
"grad_norm": 0.5157446265220642,
"learning_rate": 0.0001,
"loss": 1.2173,
"step": 5
},
{
"epoch": 0.00089632506722438,
"grad_norm": 0.34888526797294617,
"learning_rate": 0.00012,
"loss": 1.2156,
"step": 6
},
{
"epoch": 0.0010457125784284435,
"grad_norm": 0.25781184434890747,
"learning_rate": 0.00014,
"loss": 1.1595,
"step": 7
},
{
"epoch": 0.0011951000896325067,
"grad_norm": 0.39395585656166077,
"learning_rate": 0.00016,
"loss": 1.447,
"step": 8
},
{
"epoch": 0.00134448760083657,
"grad_norm": 0.42594340443611145,
"learning_rate": 0.00018,
"loss": 0.8779,
"step": 9
},
{
"epoch": 0.0014938751120406335,
"grad_norm": 0.29849162697792053,
"learning_rate": 0.0002,
"loss": 1.214,
"step": 10
},
{
"epoch": 0.0016432626232446967,
"grad_norm": 0.206129252910614,
"learning_rate": 0.00019999998895420804,
"loss": 1.222,
"step": 11
},
{
"epoch": 0.00179265013444876,
"grad_norm": 0.20143035054206848,
"learning_rate": 0.0001999999558168346,
"loss": 1.2729,
"step": 12
},
{
"epoch": 0.0019420376456528235,
"grad_norm": 0.3609657883644104,
"learning_rate": 0.00019999990058788703,
"loss": 1.14,
"step": 13
},
{
"epoch": 0.002091425156856887,
"grad_norm": 0.22608326375484467,
"learning_rate": 0.00019999982326737747,
"loss": 1.2138,
"step": 14
},
{
"epoch": 0.00224081266806095,
"grad_norm": 0.40498849749565125,
"learning_rate": 0.00019999972385532303,
"loss": 1.4689,
"step": 15
},
{
"epoch": 0.0023902001792650133,
"grad_norm": 0.215151846408844,
"learning_rate": 0.0001999996023517457,
"loss": 1.0072,
"step": 16
},
{
"epoch": 0.0025395876904690767,
"grad_norm": 0.28476935625076294,
"learning_rate": 0.0001999994587566723,
"loss": 0.9636,
"step": 17
},
{
"epoch": 0.00268897520167314,
"grad_norm": 0.2294227033853531,
"learning_rate": 0.0001999992930701345,
"loss": 0.8722,
"step": 18
},
{
"epoch": 0.0028383627128772035,
"grad_norm": 0.24489282071590424,
"learning_rate": 0.00019999910529216902,
"loss": 0.913,
"step": 19
},
{
"epoch": 0.002987750224081267,
"grad_norm": 0.24592871963977814,
"learning_rate": 0.00019999889542281728,
"loss": 1.2072,
"step": 20
},
{
"epoch": 0.00313713773528533,
"grad_norm": 0.19988738000392914,
"learning_rate": 0.0001999986634621256,
"loss": 1.0463,
"step": 21
},
{
"epoch": 0.0032865252464893933,
"grad_norm": 0.16961582005023956,
"learning_rate": 0.00019999840941014525,
"loss": 0.8549,
"step": 22
},
{
"epoch": 0.0034359127576934568,
"grad_norm": 0.1631389707326889,
"learning_rate": 0.0001999981332669324,
"loss": 0.8112,
"step": 23
},
{
"epoch": 0.00358530026889752,
"grad_norm": 0.1402096003293991,
"learning_rate": 0.00019999783503254803,
"loss": 1.0202,
"step": 24
},
{
"epoch": 0.0037346877801015836,
"grad_norm": 0.3608367443084717,
"learning_rate": 0.000199997514707058,
"loss": 1.4076,
"step": 25
},
{
"epoch": 0.003884075291305647,
"grad_norm": 0.15870416164398193,
"learning_rate": 0.0001999971722905331,
"loss": 0.9372,
"step": 26
},
{
"epoch": 0.00403346280250971,
"grad_norm": 0.21479852497577667,
"learning_rate": 0.00019999680778304897,
"loss": 1.0237,
"step": 27
},
{
"epoch": 0.004182850313713774,
"grad_norm": 0.6759325861930847,
"learning_rate": 0.00019999642118468614,
"loss": 1.7162,
"step": 28
},
{
"epoch": 0.004332237824917837,
"grad_norm": 0.15937522053718567,
"learning_rate": 0.00019999601249552998,
"loss": 0.7851,
"step": 29
},
{
"epoch": 0.0044816253361219,
"grad_norm": 0.12428741902112961,
"learning_rate": 0.00019999558171567082,
"loss": 0.9342,
"step": 30
},
{
"epoch": 0.004631012847325963,
"grad_norm": 0.2223120778799057,
"learning_rate": 0.0001999951288452038,
"loss": 1.1871,
"step": 31
},
{
"epoch": 0.004780400358530027,
"grad_norm": 0.19341352581977844,
"learning_rate": 0.000199994653884229,
"loss": 0.9797,
"step": 32
},
{
"epoch": 0.00492978786973409,
"grad_norm": 0.41403812170028687,
"learning_rate": 0.0001999941568328513,
"loss": 0.9629,
"step": 33
},
{
"epoch": 0.005079175380938153,
"grad_norm": 0.29959213733673096,
"learning_rate": 0.00019999363769118055,
"loss": 1.231,
"step": 34
},
{
"epoch": 0.005228562892142217,
"grad_norm": 0.17178373038768768,
"learning_rate": 0.00019999309645933142,
"loss": 0.7834,
"step": 35
},
{
"epoch": 0.00537795040334628,
"grad_norm": 0.2903209626674652,
"learning_rate": 0.00019999253313742344,
"loss": 1.2852,
"step": 36
},
{
"epoch": 0.005527337914550344,
"grad_norm": 0.3148847818374634,
"learning_rate": 0.00019999194772558112,
"loss": 1.376,
"step": 37
},
{
"epoch": 0.005676725425754407,
"grad_norm": 0.4141716957092285,
"learning_rate": 0.00019999134022393375,
"loss": 0.9684,
"step": 38
},
{
"epoch": 0.0058261129369584705,
"grad_norm": 0.2509758770465851,
"learning_rate": 0.00019999071063261554,
"loss": 1.1722,
"step": 39
},
{
"epoch": 0.005975500448162534,
"grad_norm": 0.1800830066204071,
"learning_rate": 0.0001999900589517656,
"loss": 1.0264,
"step": 40
},
{
"epoch": 0.006124887959366597,
"grad_norm": 0.19982534646987915,
"learning_rate": 0.00019998938518152787,
"loss": 0.7343,
"step": 41
},
{
"epoch": 0.00627427547057066,
"grad_norm": 0.1816824972629547,
"learning_rate": 0.0001999886893220512,
"loss": 0.9136,
"step": 42
},
{
"epoch": 0.006423662981774723,
"grad_norm": 0.216154083609581,
"learning_rate": 0.0001999879713734893,
"loss": 1.0128,
"step": 43
},
{
"epoch": 0.006573050492978787,
"grad_norm": 0.16445758938789368,
"learning_rate": 0.0001999872313360008,
"loss": 0.9593,
"step": 44
},
{
"epoch": 0.00672243800418285,
"grad_norm": 0.14644479751586914,
"learning_rate": 0.00019998646920974919,
"loss": 0.9424,
"step": 45
},
{
"epoch": 0.0068718255153869135,
"grad_norm": 0.3356267809867859,
"learning_rate": 0.00019998568499490283,
"loss": 0.8878,
"step": 46
},
{
"epoch": 0.007021213026590977,
"grad_norm": 0.4974273443222046,
"learning_rate": 0.00019998487869163497,
"loss": 1.338,
"step": 47
},
{
"epoch": 0.00717060053779504,
"grad_norm": 0.1881432980298996,
"learning_rate": 0.00019998405030012371,
"loss": 0.882,
"step": 48
},
{
"epoch": 0.007319988048999104,
"grad_norm": 0.1419169157743454,
"learning_rate": 0.0001999831998205521,
"loss": 0.7255,
"step": 49
},
{
"epoch": 0.007469375560203167,
"grad_norm": 0.16964636743068695,
"learning_rate": 0.00019998232725310796,
"loss": 1.0151,
"step": 50
},
{
"epoch": 0.007618763071407231,
"grad_norm": 0.18110951781272888,
"learning_rate": 0.0001999814325979841,
"loss": 0.9705,
"step": 51
},
{
"epoch": 0.007768150582611294,
"grad_norm": 0.13390210270881653,
"learning_rate": 0.00019998051585537818,
"loss": 0.6999,
"step": 52
},
{
"epoch": 0.007917538093815357,
"grad_norm": 0.13011352717876434,
"learning_rate": 0.00019997957702549269,
"loss": 0.5496,
"step": 53
},
{
"epoch": 0.00806692560501942,
"grad_norm": 0.30082619190216064,
"learning_rate": 0.00019997861610853503,
"loss": 1.1625,
"step": 54
},
{
"epoch": 0.008216313116223483,
"grad_norm": 0.18591542541980743,
"learning_rate": 0.00019997763310471752,
"loss": 0.8686,
"step": 55
},
{
"epoch": 0.008365700627427548,
"grad_norm": 0.20125310122966766,
"learning_rate": 0.00019997662801425725,
"loss": 0.7371,
"step": 56
},
{
"epoch": 0.00851508813863161,
"grad_norm": 0.14571277797222137,
"learning_rate": 0.00019997560083737632,
"loss": 1.0109,
"step": 57
},
{
"epoch": 0.008664475649835674,
"grad_norm": 0.1365530639886856,
"learning_rate": 0.00019997455157430165,
"loss": 0.7709,
"step": 58
},
{
"epoch": 0.008813863161039737,
"grad_norm": 0.14306791126728058,
"learning_rate": 0.000199973480225265,
"loss": 0.7487,
"step": 59
},
{
"epoch": 0.0089632506722438,
"grad_norm": 0.2880021929740906,
"learning_rate": 0.00019997238679050308,
"loss": 1.2318,
"step": 60
},
{
"epoch": 0.009112638183447864,
"grad_norm": 0.1913180649280548,
"learning_rate": 0.00019997127127025746,
"loss": 0.8861,
"step": 61
},
{
"epoch": 0.009262025694651926,
"grad_norm": 0.44290822744369507,
"learning_rate": 0.00019997013366477453,
"loss": 1.3866,
"step": 62
},
{
"epoch": 0.00941141320585599,
"grad_norm": 0.12451759725809097,
"learning_rate": 0.00019996897397430563,
"loss": 0.855,
"step": 63
},
{
"epoch": 0.009560800717060053,
"grad_norm": 0.2885231375694275,
"learning_rate": 0.00019996779219910696,
"loss": 1.147,
"step": 64
},
{
"epoch": 0.009710188228264117,
"grad_norm": 0.14056627452373505,
"learning_rate": 0.00019996658833943957,
"loss": 0.7883,
"step": 65
},
{
"epoch": 0.00985957573946818,
"grad_norm": 0.14305633306503296,
"learning_rate": 0.00019996536239556942,
"loss": 0.5779,
"step": 66
},
{
"epoch": 0.010008963250672244,
"grad_norm": 0.17166224122047424,
"learning_rate": 0.00019996411436776738,
"loss": 0.9014,
"step": 67
},
{
"epoch": 0.010158350761876307,
"grad_norm": 0.15985064208507538,
"learning_rate": 0.0001999628442563091,
"loss": 0.9639,
"step": 68
},
{
"epoch": 0.010307738273080371,
"grad_norm": 0.21808995306491852,
"learning_rate": 0.0001999615520614752,
"loss": 0.7525,
"step": 69
},
{
"epoch": 0.010457125784284434,
"grad_norm": 0.21660713851451874,
"learning_rate": 0.00019996023778355113,
"loss": 0.9379,
"step": 70
},
{
"epoch": 0.010606513295488498,
"grad_norm": 0.20963691174983978,
"learning_rate": 0.00019995890142282728,
"loss": 0.6632,
"step": 71
},
{
"epoch": 0.01075590080669256,
"grad_norm": 0.2161312997341156,
"learning_rate": 0.00019995754297959882,
"loss": 0.8932,
"step": 72
},
{
"epoch": 0.010905288317896623,
"grad_norm": 0.17720285058021545,
"learning_rate": 0.00019995616245416584,
"loss": 0.5283,
"step": 73
},
{
"epoch": 0.011054675829100687,
"grad_norm": 0.18077623844146729,
"learning_rate": 0.0001999547598468334,
"loss": 0.603,
"step": 74
},
{
"epoch": 0.01120406334030475,
"grad_norm": 0.19094090163707733,
"learning_rate": 0.00019995333515791125,
"loss": 0.9712,
"step": 75
},
{
"epoch": 0.011353450851508814,
"grad_norm": 0.2597976326942444,
"learning_rate": 0.00019995188838771425,
"loss": 0.7913,
"step": 76
},
{
"epoch": 0.011502838362712877,
"grad_norm": 0.26235243678092957,
"learning_rate": 0.00019995041953656194,
"loss": 0.7578,
"step": 77
},
{
"epoch": 0.011652225873916941,
"grad_norm": 0.1809563785791397,
"learning_rate": 0.0001999489286047788,
"loss": 0.9168,
"step": 78
},
{
"epoch": 0.011801613385121004,
"grad_norm": 0.22458118200302124,
"learning_rate": 0.0001999474155926942,
"loss": 1.1456,
"step": 79
},
{
"epoch": 0.011951000896325068,
"grad_norm": 0.49804338812828064,
"learning_rate": 0.00019994588050064243,
"loss": 1.1896,
"step": 80
},
{
"epoch": 0.01210038840752913,
"grad_norm": 0.2247380167245865,
"learning_rate": 0.00019994432332896258,
"loss": 0.958,
"step": 81
},
{
"epoch": 0.012249775918733195,
"grad_norm": 0.17649094760417938,
"learning_rate": 0.00019994274407799872,
"loss": 0.9417,
"step": 82
},
{
"epoch": 0.012399163429937257,
"grad_norm": 0.24156750738620758,
"learning_rate": 0.00019994114274809964,
"loss": 0.9959,
"step": 83
},
{
"epoch": 0.01254855094114132,
"grad_norm": 0.2841317355632782,
"learning_rate": 0.00019993951933961913,
"loss": 1.296,
"step": 84
},
{
"epoch": 0.012697938452345384,
"grad_norm": 0.26792535185813904,
"learning_rate": 0.00019993787385291588,
"loss": 1.0296,
"step": 85
},
{
"epoch": 0.012847325963549447,
"grad_norm": 0.11657895892858505,
"learning_rate": 0.00019993620628835332,
"loss": 0.6993,
"step": 86
},
{
"epoch": 0.01299671347475351,
"grad_norm": 0.15463922917842865,
"learning_rate": 0.0001999345166462999,
"loss": 0.6925,
"step": 87
},
{
"epoch": 0.013146100985957573,
"grad_norm": 0.1776818037033081,
"learning_rate": 0.0001999328049271289,
"loss": 0.7629,
"step": 88
},
{
"epoch": 0.013295488497161638,
"grad_norm": 0.16185057163238525,
"learning_rate": 0.00019993107113121844,
"loss": 1.0736,
"step": 89
},
{
"epoch": 0.0134448760083657,
"grad_norm": 0.3227018415927887,
"learning_rate": 0.0001999293152589515,
"loss": 0.8586,
"step": 90
},
{
"epoch": 0.013594263519569764,
"grad_norm": 0.16486088931560516,
"learning_rate": 0.00019992753731071602,
"loss": 1.0614,
"step": 91
},
{
"epoch": 0.013743651030773827,
"grad_norm": 0.1495029777288437,
"learning_rate": 0.0001999257372869048,
"loss": 0.8561,
"step": 92
},
{
"epoch": 0.013893038541977891,
"grad_norm": 0.16044558584690094,
"learning_rate": 0.00019992391518791546,
"loss": 1.0294,
"step": 93
},
{
"epoch": 0.014042426053181954,
"grad_norm": 0.16251197457313538,
"learning_rate": 0.00019992207101415053,
"loss": 0.838,
"step": 94
},
{
"epoch": 0.014191813564386018,
"grad_norm": 0.47265177965164185,
"learning_rate": 0.00019992020476601745,
"loss": 1.4965,
"step": 95
},
{
"epoch": 0.01434120107559008,
"grad_norm": 0.16032789647579193,
"learning_rate": 0.00019991831644392848,
"loss": 0.9541,
"step": 96
},
{
"epoch": 0.014490588586794143,
"grad_norm": 0.1563224196434021,
"learning_rate": 0.0001999164060483008,
"loss": 0.6754,
"step": 97
},
{
"epoch": 0.014639976097998207,
"grad_norm": 0.2469077855348587,
"learning_rate": 0.00019991447357955639,
"loss": 0.9911,
"step": 98
},
{
"epoch": 0.01478936360920227,
"grad_norm": 0.13441061973571777,
"learning_rate": 0.00019991251903812225,
"loss": 0.7342,
"step": 99
},
{
"epoch": 0.014938751120406334,
"grad_norm": 0.20193825662136078,
"learning_rate": 0.00019991054242443008,
"loss": 0.6834,
"step": 100
},
{
"epoch": 0.015088138631610397,
"grad_norm": 0.1552024483680725,
"learning_rate": 0.0001999085437389166,
"loss": 0.8257,
"step": 101
},
{
"epoch": 0.015237526142814461,
"grad_norm": 0.25607559084892273,
"learning_rate": 0.00019990652298202335,
"loss": 1.1036,
"step": 102
},
{
"epoch": 0.015386913654018524,
"grad_norm": 0.11925558000802994,
"learning_rate": 0.00019990448015419675,
"loss": 0.6345,
"step": 103
},
{
"epoch": 0.015536301165222588,
"grad_norm": 0.28443989157676697,
"learning_rate": 0.00019990241525588804,
"loss": 1.1217,
"step": 104
},
{
"epoch": 0.01568568867642665,
"grad_norm": 0.15237168967723846,
"learning_rate": 0.00019990032828755345,
"loss": 0.7735,
"step": 105
},
{
"epoch": 0.015835076187630713,
"grad_norm": 0.173554927110672,
"learning_rate": 0.000199898219249654,
"loss": 1.1093,
"step": 106
},
{
"epoch": 0.01598446369883478,
"grad_norm": 0.14035388827323914,
"learning_rate": 0.0001998960881426556,
"loss": 0.8206,
"step": 107
},
{
"epoch": 0.01613385121003884,
"grad_norm": 0.13699080049991608,
"learning_rate": 0.00019989393496702907,
"loss": 1.0426,
"step": 108
},
{
"epoch": 0.016283238721242904,
"grad_norm": 0.1313813477754593,
"learning_rate": 0.00019989175972325005,
"loss": 0.7804,
"step": 109
},
{
"epoch": 0.016432626232446967,
"grad_norm": 0.15459921956062317,
"learning_rate": 0.00019988956241179912,
"loss": 0.9949,
"step": 110
},
{
"epoch": 0.01658201374365103,
"grad_norm": 0.20009522140026093,
"learning_rate": 0.00019988734303316168,
"loss": 1.0159,
"step": 111
},
{
"epoch": 0.016731401254855095,
"grad_norm": 0.17909283936023712,
"learning_rate": 0.00019988510158782804,
"loss": 0.5281,
"step": 112
},
{
"epoch": 0.016880788766059158,
"grad_norm": 0.6786065697669983,
"learning_rate": 0.00019988283807629334,
"loss": 1.2808,
"step": 113
},
{
"epoch": 0.01703017627726322,
"grad_norm": 0.15410082042217255,
"learning_rate": 0.00019988055249905767,
"loss": 1.0755,
"step": 114
},
{
"epoch": 0.017179563788467283,
"grad_norm": 0.19600564241409302,
"learning_rate": 0.00019987824485662593,
"loss": 1.2168,
"step": 115
},
{
"epoch": 0.01732895129967135,
"grad_norm": 0.1903967261314392,
"learning_rate": 0.00019987591514950787,
"loss": 0.9987,
"step": 116
},
{
"epoch": 0.01747833881087541,
"grad_norm": 0.13745927810668945,
"learning_rate": 0.00019987356337821822,
"loss": 0.9328,
"step": 117
},
{
"epoch": 0.017627726322079474,
"grad_norm": 0.18400371074676514,
"learning_rate": 0.00019987118954327654,
"loss": 0.6686,
"step": 118
},
{
"epoch": 0.017777113833283537,
"grad_norm": 0.29325294494628906,
"learning_rate": 0.00019986879364520716,
"loss": 1.173,
"step": 119
},
{
"epoch": 0.0179265013444876,
"grad_norm": 0.15400448441505432,
"learning_rate": 0.00019986637568453945,
"loss": 0.897,
"step": 120
},
{
"epoch": 0.018075888855691665,
"grad_norm": 0.2178712785243988,
"learning_rate": 0.00019986393566180755,
"loss": 1.2885,
"step": 121
},
{
"epoch": 0.018225276366895728,
"grad_norm": 0.15217332541942596,
"learning_rate": 0.00019986147357755048,
"loss": 0.9847,
"step": 122
},
{
"epoch": 0.01837466387809979,
"grad_norm": 0.35806745290756226,
"learning_rate": 0.0001998589894323122,
"loss": 1.0114,
"step": 123
},
{
"epoch": 0.018524051389303853,
"grad_norm": 0.46978825330734253,
"learning_rate": 0.00019985648322664145,
"loss": 1.33,
"step": 124
},
{
"epoch": 0.01867343890050792,
"grad_norm": 0.3191573917865753,
"learning_rate": 0.00019985395496109192,
"loss": 0.8691,
"step": 125
},
{
"epoch": 0.01882282641171198,
"grad_norm": 0.6782920360565186,
"learning_rate": 0.00019985140463622215,
"loss": 1.7387,
"step": 126
},
{
"epoch": 0.018972213922916044,
"grad_norm": 0.4556753933429718,
"learning_rate": 0.0001998488322525955,
"loss": 1.9508,
"step": 127
},
{
"epoch": 0.019121601434120106,
"grad_norm": 0.1401677280664444,
"learning_rate": 0.0001998462378107803,
"loss": 0.8127,
"step": 128
},
{
"epoch": 0.019270988945324172,
"grad_norm": 0.1187644675374031,
"learning_rate": 0.00019984362131134968,
"loss": 0.5421,
"step": 129
},
{
"epoch": 0.019420376456528235,
"grad_norm": 0.17097711563110352,
"learning_rate": 0.0001998409827548817,
"loss": 0.7335,
"step": 130
},
{
"epoch": 0.019569763967732298,
"grad_norm": 0.12986791133880615,
"learning_rate": 0.00019983832214195917,
"loss": 0.7151,
"step": 131
},
{
"epoch": 0.01971915147893636,
"grad_norm": 0.14413940906524658,
"learning_rate": 0.00019983563947316996,
"loss": 0.6833,
"step": 132
},
{
"epoch": 0.019868538990140423,
"grad_norm": 0.1424313336610794,
"learning_rate": 0.00019983293474910667,
"loss": 0.8199,
"step": 133
},
{
"epoch": 0.02001792650134449,
"grad_norm": 0.11715007573366165,
"learning_rate": 0.00019983020797036683,
"loss": 0.6079,
"step": 134
},
{
"epoch": 0.02016731401254855,
"grad_norm": 0.11654297262430191,
"learning_rate": 0.00019982745913755282,
"loss": 0.5302,
"step": 135
},
{
"epoch": 0.020316701523752614,
"grad_norm": 0.1768021434545517,
"learning_rate": 0.00019982468825127187,
"loss": 0.5412,
"step": 136
},
{
"epoch": 0.020466089034956676,
"grad_norm": 0.2620059549808502,
"learning_rate": 0.00019982189531213618,
"loss": 1.0321,
"step": 137
},
{
"epoch": 0.020615476546160742,
"grad_norm": 0.1472969800233841,
"learning_rate": 0.0001998190803207627,
"loss": 1.0097,
"step": 138
},
{
"epoch": 0.020764864057364805,
"grad_norm": 0.1889103651046753,
"learning_rate": 0.00019981624327777332,
"loss": 0.7423,
"step": 139
},
{
"epoch": 0.020914251568568867,
"grad_norm": 0.20594674348831177,
"learning_rate": 0.0001998133841837948,
"loss": 1.3315,
"step": 140
},
{
"epoch": 0.02106363907977293,
"grad_norm": 0.19783402979373932,
"learning_rate": 0.00019981050303945877,
"loss": 1.0435,
"step": 141
},
{
"epoch": 0.021213026590976996,
"grad_norm": 0.12695230543613434,
"learning_rate": 0.00019980759984540168,
"loss": 0.8,
"step": 142
},
{
"epoch": 0.02136241410218106,
"grad_norm": 0.18909701704978943,
"learning_rate": 0.0001998046746022649,
"loss": 0.7549,
"step": 143
},
{
"epoch": 0.02151180161338512,
"grad_norm": 0.2600228488445282,
"learning_rate": 0.0001998017273106947,
"loss": 0.6657,
"step": 144
},
{
"epoch": 0.021661189124589184,
"grad_norm": 0.2999661862850189,
"learning_rate": 0.00019979875797134216,
"loss": 0.9626,
"step": 145
},
{
"epoch": 0.021810576635793246,
"grad_norm": 0.2698724567890167,
"learning_rate": 0.00019979576658486325,
"loss": 0.8856,
"step": 146
},
{
"epoch": 0.021959964146997312,
"grad_norm": 0.14443442225456238,
"learning_rate": 0.0001997927531519188,
"loss": 0.9285,
"step": 147
},
{
"epoch": 0.022109351658201375,
"grad_norm": 0.21873806416988373,
"learning_rate": 0.00019978971767317457,
"loss": 0.9565,
"step": 148
},
{
"epoch": 0.022258739169405437,
"grad_norm": 0.2041054517030716,
"learning_rate": 0.0001997866601493011,
"loss": 0.9455,
"step": 149
},
{
"epoch": 0.0224081266806095,
"grad_norm": 0.19350048899650574,
"learning_rate": 0.00019978358058097388,
"loss": 0.8047,
"step": 150
},
{
"epoch": 0.022557514191813566,
"grad_norm": 0.25492972135543823,
"learning_rate": 0.00019978047896887323,
"loss": 1.234,
"step": 151
},
{
"epoch": 0.02270690170301763,
"grad_norm": 0.1414925754070282,
"learning_rate": 0.0001997773553136843,
"loss": 0.9703,
"step": 152
},
{
"epoch": 0.02285628921422169,
"grad_norm": 0.15996232628822327,
"learning_rate": 0.00019977420961609721,
"loss": 1.1475,
"step": 153
},
{
"epoch": 0.023005676725425753,
"grad_norm": 0.1661243587732315,
"learning_rate": 0.00019977104187680688,
"loss": 0.8574,
"step": 154
},
{
"epoch": 0.02315506423662982,
"grad_norm": 0.17797410488128662,
"learning_rate": 0.00019976785209651309,
"loss": 0.6742,
"step": 155
},
{
"epoch": 0.023304451747833882,
"grad_norm": 0.1401919722557068,
"learning_rate": 0.00019976464027592053,
"loss": 0.7065,
"step": 156
},
{
"epoch": 0.023453839259037945,
"grad_norm": 0.3743472695350647,
"learning_rate": 0.00019976140641573875,
"loss": 1.1391,
"step": 157
},
{
"epoch": 0.023603226770242007,
"grad_norm": 0.12242946028709412,
"learning_rate": 0.00019975815051668217,
"loss": 0.7517,
"step": 158
},
{
"epoch": 0.02375261428144607,
"grad_norm": 0.12851974368095398,
"learning_rate": 0.00019975487257947004,
"loss": 0.5795,
"step": 159
},
{
"epoch": 0.023902001792650136,
"grad_norm": 0.15131576359272003,
"learning_rate": 0.0001997515726048265,
"loss": 0.5735,
"step": 160
},
{
"epoch": 0.024051389303854198,
"grad_norm": 0.2606663703918457,
"learning_rate": 0.00019974825059348062,
"loss": 0.7677,
"step": 161
},
{
"epoch": 0.02420077681505826,
"grad_norm": 0.3116661608219147,
"learning_rate": 0.00019974490654616625,
"loss": 1.1465,
"step": 162
},
{
"epoch": 0.024350164326262323,
"grad_norm": 0.15296891331672668,
"learning_rate": 0.00019974154046362212,
"loss": 0.9154,
"step": 163
},
{
"epoch": 0.02449955183746639,
"grad_norm": 0.12761889398097992,
"learning_rate": 0.0001997381523465919,
"loss": 0.7902,
"step": 164
},
{
"epoch": 0.024648939348670452,
"grad_norm": 0.11566565185785294,
"learning_rate": 0.00019973474219582405,
"loss": 0.5558,
"step": 165
},
{
"epoch": 0.024798326859874514,
"grad_norm": 0.12765946984291077,
"learning_rate": 0.00019973131001207195,
"loss": 0.7314,
"step": 166
},
{
"epoch": 0.024947714371078577,
"grad_norm": 0.12568241357803345,
"learning_rate": 0.00019972785579609376,
"loss": 0.9476,
"step": 167
},
{
"epoch": 0.02509710188228264,
"grad_norm": 0.21633781492710114,
"learning_rate": 0.00019972437954865265,
"loss": 1.3121,
"step": 168
},
{
"epoch": 0.025246489393486705,
"grad_norm": 0.14913877844810486,
"learning_rate": 0.00019972088127051657,
"loss": 0.8247,
"step": 169
},
{
"epoch": 0.025395876904690768,
"grad_norm": 0.16602329909801483,
"learning_rate": 0.00019971736096245825,
"loss": 1.064,
"step": 170
},
{
"epoch": 0.02554526441589483,
"grad_norm": 0.28753912448883057,
"learning_rate": 0.00019971381862525552,
"loss": 1.4888,
"step": 171
},
{
"epoch": 0.025694651927098893,
"grad_norm": 0.13800154626369476,
"learning_rate": 0.00019971025425969083,
"loss": 0.5945,
"step": 172
},
{
"epoch": 0.02584403943830296,
"grad_norm": 0.22065165638923645,
"learning_rate": 0.00019970666786655166,
"loss": 0.8695,
"step": 173
},
{
"epoch": 0.02599342694950702,
"grad_norm": 0.3128964900970459,
"learning_rate": 0.0001997030594466303,
"loss": 1.1673,
"step": 174
},
{
"epoch": 0.026142814460711084,
"grad_norm": 0.20117323100566864,
"learning_rate": 0.00019969942900072387,
"loss": 1.0395,
"step": 175
},
{
"epoch": 0.026292201971915147,
"grad_norm": 0.14214551448822021,
"learning_rate": 0.00019969577652963444,
"loss": 0.5135,
"step": 176
},
{
"epoch": 0.026441589483119213,
"grad_norm": 0.20523157715797424,
"learning_rate": 0.00019969210203416883,
"loss": 1.0156,
"step": 177
},
{
"epoch": 0.026590976994323275,
"grad_norm": 0.1385200172662735,
"learning_rate": 0.0001996884055151389,
"loss": 0.5202,
"step": 178
},
{
"epoch": 0.026740364505527338,
"grad_norm": 0.14539465308189392,
"learning_rate": 0.00019968468697336117,
"loss": 0.6127,
"step": 179
},
{
"epoch": 0.0268897520167314,
"grad_norm": 0.2886626720428467,
"learning_rate": 0.00019968094640965717,
"loss": 1.213,
"step": 180
},
{
"epoch": 0.027039139527935463,
"grad_norm": 0.22695106267929077,
"learning_rate": 0.00019967718382485323,
"loss": 1.137,
"step": 181
},
{
"epoch": 0.02718852703913953,
"grad_norm": 0.17426621913909912,
"learning_rate": 0.00019967339921978062,
"loss": 0.8978,
"step": 182
},
{
"epoch": 0.02733791455034359,
"grad_norm": 0.2875833809375763,
"learning_rate": 0.00019966959259527534,
"loss": 0.8688,
"step": 183
},
{
"epoch": 0.027487302061547654,
"grad_norm": 0.14763344824314117,
"learning_rate": 0.00019966576395217837,
"loss": 0.564,
"step": 184
},
{
"epoch": 0.027636689572751717,
"grad_norm": 0.24988707900047302,
"learning_rate": 0.00019966191329133555,
"loss": 0.429,
"step": 185
},
{
"epoch": 0.027786077083955783,
"grad_norm": 0.16043034195899963,
"learning_rate": 0.0001996580406135975,
"loss": 0.7251,
"step": 186
},
{
"epoch": 0.027935464595159845,
"grad_norm": 0.2776719033718109,
"learning_rate": 0.00019965414591981975,
"loss": 1.3513,
"step": 187
},
{
"epoch": 0.028084852106363908,
"grad_norm": 0.1749371886253357,
"learning_rate": 0.00019965022921086275,
"loss": 0.7383,
"step": 188
},
{
"epoch": 0.02823423961756797,
"grad_norm": 0.2881135940551758,
"learning_rate": 0.00019964629048759176,
"loss": 1.0511,
"step": 189
},
{
"epoch": 0.028383627128772036,
"grad_norm": 0.11646547168493271,
"learning_rate": 0.00019964232975087687,
"loss": 0.6575,
"step": 190
},
{
"epoch": 0.0285330146399761,
"grad_norm": 0.1457265317440033,
"learning_rate": 0.0001996383470015931,
"loss": 0.7538,
"step": 191
},
{
"epoch": 0.02868240215118016,
"grad_norm": 0.13400131464004517,
"learning_rate": 0.00019963434224062025,
"loss": 0.7773,
"step": 192
},
{
"epoch": 0.028831789662384224,
"grad_norm": 0.23350481688976288,
"learning_rate": 0.0001996303154688431,
"loss": 1.2769,
"step": 193
},
{
"epoch": 0.028981177173588286,
"grad_norm": 0.563207745552063,
"learning_rate": 0.0001996262666871512,
"loss": 1.5822,
"step": 194
},
{
"epoch": 0.029130564684792352,
"grad_norm": 0.5377495884895325,
"learning_rate": 0.00019962219589643898,
"loss": 1.4911,
"step": 195
},
{
"epoch": 0.029279952195996415,
"grad_norm": 0.15726317465305328,
"learning_rate": 0.00019961810309760577,
"loss": 0.7824,
"step": 196
},
{
"epoch": 0.029429339707200478,
"grad_norm": 0.15040062367916107,
"learning_rate": 0.00019961398829155568,
"loss": 0.7541,
"step": 197
},
{
"epoch": 0.02957872721840454,
"grad_norm": 0.12620890140533447,
"learning_rate": 0.00019960985147919778,
"loss": 0.932,
"step": 198
},
{
"epoch": 0.029728114729608606,
"grad_norm": 0.15029945969581604,
"learning_rate": 0.00019960569266144597,
"loss": 0.9588,
"step": 199
},
{
"epoch": 0.02987750224081267,
"grad_norm": 0.16770395636558533,
"learning_rate": 0.00019960151183921897,
"loss": 0.5964,
"step": 200
},
{
"epoch": 0.03002688975201673,
"grad_norm": 0.12649193406105042,
"learning_rate": 0.0001995973090134404,
"loss": 0.8221,
"step": 201
},
{
"epoch": 0.030176277263220794,
"grad_norm": 0.15091091394424438,
"learning_rate": 0.00019959308418503877,
"loss": 0.8811,
"step": 202
},
{
"epoch": 0.03032566477442486,
"grad_norm": 0.12077690660953522,
"learning_rate": 0.00019958883735494732,
"loss": 0.6411,
"step": 203
},
{
"epoch": 0.030475052285628922,
"grad_norm": 0.15071533620357513,
"learning_rate": 0.00019958456852410433,
"loss": 0.7742,
"step": 204
},
{
"epoch": 0.030624439796832985,
"grad_norm": 0.1368575096130371,
"learning_rate": 0.00019958027769345277,
"loss": 1.1054,
"step": 205
},
{
"epoch": 0.030773827308037047,
"grad_norm": 0.22218400239944458,
"learning_rate": 0.0001995759648639406,
"loss": 0.8127,
"step": 206
},
{
"epoch": 0.03092321481924111,
"grad_norm": 0.12783220410346985,
"learning_rate": 0.00019957163003652063,
"loss": 0.6916,
"step": 207
},
{
"epoch": 0.031072602330445176,
"grad_norm": 0.18626387417316437,
"learning_rate": 0.00019956727321215044,
"loss": 0.8217,
"step": 208
},
{
"epoch": 0.03122198984164924,
"grad_norm": 0.12861424684524536,
"learning_rate": 0.0001995628943917925,
"loss": 1.0327,
"step": 209
},
{
"epoch": 0.0313713773528533,
"grad_norm": 0.17585206031799316,
"learning_rate": 0.00019955849357641424,
"loss": 0.6836,
"step": 210
},
{
"epoch": 0.03152076486405737,
"grad_norm": 0.1541229784488678,
"learning_rate": 0.0001995540707669878,
"loss": 1.1849,
"step": 211
},
{
"epoch": 0.031670152375261426,
"grad_norm": 0.13117974996566772,
"learning_rate": 0.00019954962596449024,
"loss": 0.7779,
"step": 212
},
{
"epoch": 0.03181953988646549,
"grad_norm": 0.3847130537033081,
"learning_rate": 0.00019954515916990358,
"loss": 1.4158,
"step": 213
},
{
"epoch": 0.03196892739766956,
"grad_norm": 0.21019677817821503,
"learning_rate": 0.0001995406703842145,
"loss": 1.0807,
"step": 214
},
{
"epoch": 0.03211831490887362,
"grad_norm": 0.10954124480485916,
"learning_rate": 0.0001995361596084147,
"loss": 0.7142,
"step": 215
},
{
"epoch": 0.03226770242007768,
"grad_norm": 0.25598999857902527,
"learning_rate": 0.0001995316268435007,
"loss": 0.7528,
"step": 216
},
{
"epoch": 0.03241708993128174,
"grad_norm": 0.16298946738243103,
"learning_rate": 0.0001995270720904738,
"loss": 0.6978,
"step": 217
},
{
"epoch": 0.03256647744248581,
"grad_norm": 0.37723109126091003,
"learning_rate": 0.00019952249535034025,
"loss": 1.0603,
"step": 218
},
{
"epoch": 0.032715864953689874,
"grad_norm": 0.1600012332201004,
"learning_rate": 0.00019951789662411113,
"loss": 0.7634,
"step": 219
},
{
"epoch": 0.03286525246489393,
"grad_norm": 0.1618419587612152,
"learning_rate": 0.00019951327591280236,
"loss": 0.8068,
"step": 220
},
{
"epoch": 0.033014639976098,
"grad_norm": 0.11850997805595398,
"learning_rate": 0.00019950863321743475,
"loss": 0.7883,
"step": 221
},
{
"epoch": 0.03316402748730206,
"grad_norm": 0.18700255453586578,
"learning_rate": 0.0001995039685390339,
"loss": 0.5821,
"step": 222
},
{
"epoch": 0.033313414998506125,
"grad_norm": 0.19264543056488037,
"learning_rate": 0.00019949928187863036,
"loss": 0.7578,
"step": 223
},
{
"epoch": 0.03346280250971019,
"grad_norm": 0.23828521370887756,
"learning_rate": 0.00019949457323725946,
"loss": 1.1739,
"step": 224
},
{
"epoch": 0.03361219002091425,
"grad_norm": 0.2178596705198288,
"learning_rate": 0.0001994898426159614,
"loss": 0.6075,
"step": 225
},
{
"epoch": 0.033761577532118316,
"grad_norm": 0.13766655325889587,
"learning_rate": 0.0001994850900157813,
"loss": 0.6941,
"step": 226
},
{
"epoch": 0.03391096504332238,
"grad_norm": 0.345782071352005,
"learning_rate": 0.00019948031543776904,
"loss": 1.0944,
"step": 227
},
{
"epoch": 0.03406035255452644,
"grad_norm": 0.1483844369649887,
"learning_rate": 0.0001994755188829794,
"loss": 0.9752,
"step": 228
},
{
"epoch": 0.03420974006573051,
"grad_norm": 0.16494938731193542,
"learning_rate": 0.00019947070035247205,
"loss": 0.7408,
"step": 229
},
{
"epoch": 0.034359127576934566,
"grad_norm": 0.15683765709400177,
"learning_rate": 0.00019946585984731142,
"loss": 0.6498,
"step": 230
},
{
"epoch": 0.03450851508813863,
"grad_norm": 0.17898918688297272,
"learning_rate": 0.0001994609973685669,
"loss": 0.7338,
"step": 231
},
{
"epoch": 0.0346579025993427,
"grad_norm": 0.15439556539058685,
"learning_rate": 0.00019945611291731274,
"loss": 0.854,
"step": 232
},
{
"epoch": 0.03480729011054676,
"grad_norm": 0.16742883622646332,
"learning_rate": 0.0001994512064946279,
"loss": 0.5851,
"step": 233
},
{
"epoch": 0.03495667762175082,
"grad_norm": 0.17511384189128876,
"learning_rate": 0.00019944627810159632,
"loss": 0.569,
"step": 234
},
{
"epoch": 0.03510606513295488,
"grad_norm": 0.26125359535217285,
"learning_rate": 0.0001994413277393067,
"loss": 0.862,
"step": 235
},
{
"epoch": 0.03525545264415895,
"grad_norm": 0.1439584493637085,
"learning_rate": 0.00019943635540885279,
"loss": 1.1311,
"step": 236
},
{
"epoch": 0.035404840155363014,
"grad_norm": 0.146185502409935,
"learning_rate": 0.00019943136111133294,
"loss": 0.9574,
"step": 237
},
{
"epoch": 0.03555422766656707,
"grad_norm": 5.1866774559021,
"learning_rate": 0.00019942634484785052,
"loss": 3.2188,
"step": 238
},
{
"epoch": 0.03570361517777114,
"grad_norm": 0.13537189364433289,
"learning_rate": 0.00019942130661951372,
"loss": 0.6154,
"step": 239
},
{
"epoch": 0.0358530026889752,
"grad_norm": 0.13716812431812286,
"learning_rate": 0.00019941624642743548,
"loss": 0.7604,
"step": 240
},
{
"epoch": 0.036002390200179264,
"grad_norm": 0.37018468976020813,
"learning_rate": 0.0001994111642727338,
"loss": 0.8598,
"step": 241
},
{
"epoch": 0.03615177771138333,
"grad_norm": 0.1747826188802719,
"learning_rate": 0.0001994060601565313,
"loss": 0.6497,
"step": 242
},
{
"epoch": 0.03630116522258739,
"grad_norm": 0.12225501239299774,
"learning_rate": 0.0001994009340799556,
"loss": 0.7278,
"step": 243
},
{
"epoch": 0.036450552733791455,
"grad_norm": 0.17295411229133606,
"learning_rate": 0.00019939578604413912,
"loss": 0.8747,
"step": 244
},
{
"epoch": 0.03659994024499552,
"grad_norm": 0.1815291941165924,
"learning_rate": 0.00019939061605021917,
"loss": 0.7242,
"step": 245
},
{
"epoch": 0.03674932775619958,
"grad_norm": 0.25227025151252747,
"learning_rate": 0.00019938542409933787,
"loss": 0.7818,
"step": 246
},
{
"epoch": 0.036898715267403646,
"grad_norm": 0.280819296836853,
"learning_rate": 0.00019938021019264221,
"loss": 0.7471,
"step": 247
},
{
"epoch": 0.037048102778607706,
"grad_norm": 0.1746496856212616,
"learning_rate": 0.000199374974331284,
"loss": 0.9149,
"step": 248
},
{
"epoch": 0.03719749028981177,
"grad_norm": 0.2506274878978729,
"learning_rate": 0.00019936971651641995,
"loss": 0.8666,
"step": 249
},
{
"epoch": 0.03734687780101584,
"grad_norm": 0.12307952344417572,
"learning_rate": 0.00019936443674921158,
"loss": 0.7737,
"step": 250
},
{
"epoch": 0.0374962653122199,
"grad_norm": 0.13615377247333527,
"learning_rate": 0.0001993591350308253,
"loss": 0.7592,
"step": 251
},
{
"epoch": 0.03764565282342396,
"grad_norm": 0.16808447241783142,
"learning_rate": 0.0001993538113624323,
"loss": 0.8599,
"step": 252
},
{
"epoch": 0.03779504033462802,
"grad_norm": 0.11544547975063324,
"learning_rate": 0.00019934846574520872,
"loss": 0.7348,
"step": 253
},
{
"epoch": 0.03794442784583209,
"grad_norm": 0.20908010005950928,
"learning_rate": 0.00019934309818033544,
"loss": 0.6674,
"step": 254
},
{
"epoch": 0.038093815357036154,
"grad_norm": 0.1379510909318924,
"learning_rate": 0.00019933770866899825,
"loss": 0.7295,
"step": 255
},
{
"epoch": 0.03824320286824021,
"grad_norm": 0.15058402717113495,
"learning_rate": 0.0001993322972123878,
"loss": 1.1005,
"step": 256
},
{
"epoch": 0.03839259037944428,
"grad_norm": 0.1941765695810318,
"learning_rate": 0.00019932686381169955,
"loss": 0.7658,
"step": 257
},
{
"epoch": 0.038541977890648345,
"grad_norm": 0.1889600306749344,
"learning_rate": 0.0001993214084681338,
"loss": 0.9765,
"step": 258
},
{
"epoch": 0.038691365401852404,
"grad_norm": 0.1466747522354126,
"learning_rate": 0.00019931593118289578,
"loss": 0.5899,
"step": 259
},
{
"epoch": 0.03884075291305647,
"grad_norm": 0.2811049520969391,
"learning_rate": 0.00019931043195719548,
"loss": 0.8255,
"step": 260
},
{
"epoch": 0.03899014042426053,
"grad_norm": 0.1345093995332718,
"learning_rate": 0.00019930491079224772,
"loss": 0.9015,
"step": 261
},
{
"epoch": 0.039139527935464595,
"grad_norm": 0.9843081831932068,
"learning_rate": 0.00019929936768927232,
"loss": 1.5562,
"step": 262
},
{
"epoch": 0.03928891544666866,
"grad_norm": 0.18343736231327057,
"learning_rate": 0.00019929380264949376,
"loss": 0.8783,
"step": 263
},
{
"epoch": 0.03943830295787272,
"grad_norm": 0.29955071210861206,
"learning_rate": 0.00019928821567414144,
"loss": 1.0868,
"step": 264
},
{
"epoch": 0.039587690469076786,
"grad_norm": 0.16872237622737885,
"learning_rate": 0.00019928260676444965,
"loss": 0.7375,
"step": 265
},
{
"epoch": 0.039737077980280845,
"grad_norm": 0.1343865543603897,
"learning_rate": 0.00019927697592165747,
"loss": 1.0279,
"step": 266
},
{
"epoch": 0.03988646549148491,
"grad_norm": 0.2587420642375946,
"learning_rate": 0.00019927132314700885,
"loss": 0.8529,
"step": 267
},
{
"epoch": 0.04003585300268898,
"grad_norm": 0.23096689581871033,
"learning_rate": 0.00019926564844175256,
"loss": 0.8726,
"step": 268
},
{
"epoch": 0.040185240513893036,
"grad_norm": 0.13639822602272034,
"learning_rate": 0.00019925995180714224,
"loss": 0.6957,
"step": 269
},
{
"epoch": 0.0403346280250971,
"grad_norm": 0.13751177489757538,
"learning_rate": 0.00019925423324443638,
"loss": 0.7239,
"step": 270
},
{
"epoch": 0.04048401553630117,
"grad_norm": 0.12963712215423584,
"learning_rate": 0.0001992484927548983,
"loss": 0.686,
"step": 271
},
{
"epoch": 0.04063340304750523,
"grad_norm": 0.13808754086494446,
"learning_rate": 0.00019924273033979613,
"loss": 0.973,
"step": 272
},
{
"epoch": 0.04078279055870929,
"grad_norm": 0.10808500647544861,
"learning_rate": 0.0001992369460004029,
"loss": 0.6725,
"step": 273
},
{
"epoch": 0.04093217806991335,
"grad_norm": 0.14959432184696198,
"learning_rate": 0.0001992311397379965,
"loss": 0.9753,
"step": 274
},
{
"epoch": 0.04108156558111742,
"grad_norm": 0.21491862833499908,
"learning_rate": 0.00019922531155385954,
"loss": 0.9506,
"step": 275
},
{
"epoch": 0.041230953092321485,
"grad_norm": 0.16956715285778046,
"learning_rate": 0.00019921946144927966,
"loss": 1.0777,
"step": 276
},
{
"epoch": 0.041380340603525544,
"grad_norm": 0.17658768594264984,
"learning_rate": 0.00019921358942554917,
"loss": 0.6994,
"step": 277
},
{
"epoch": 0.04152972811472961,
"grad_norm": 0.20894332230091095,
"learning_rate": 0.0001992076954839653,
"loss": 1.0217,
"step": 278
},
{
"epoch": 0.04167911562593367,
"grad_norm": 0.15689632296562195,
"learning_rate": 0.00019920177962583015,
"loss": 0.4844,
"step": 279
},
{
"epoch": 0.041828503137137735,
"grad_norm": 0.24510236084461212,
"learning_rate": 0.00019919584185245062,
"loss": 0.8981,
"step": 280
},
{
"epoch": 0.0419778906483418,
"grad_norm": 0.1385307013988495,
"learning_rate": 0.00019918988216513844,
"loss": 0.9278,
"step": 281
},
{
"epoch": 0.04212727815954586,
"grad_norm": 0.15266434848308563,
"learning_rate": 0.00019918390056521018,
"loss": 0.7803,
"step": 282
},
{
"epoch": 0.042276665670749926,
"grad_norm": 0.920637309551239,
"learning_rate": 0.00019917789705398728,
"loss": 2.0334,
"step": 283
},
{
"epoch": 0.04242605318195399,
"grad_norm": 0.16596724092960358,
"learning_rate": 0.00019917187163279605,
"loss": 0.5195,
"step": 284
},
{
"epoch": 0.04257544069315805,
"grad_norm": 0.21556362509727478,
"learning_rate": 0.00019916582430296758,
"loss": 1.0858,
"step": 285
},
{
"epoch": 0.04272482820436212,
"grad_norm": 0.13357070088386536,
"learning_rate": 0.00019915975506583778,
"loss": 0.7042,
"step": 286
},
{
"epoch": 0.042874215715566176,
"grad_norm": 0.13493870198726654,
"learning_rate": 0.00019915366392274752,
"loss": 0.7329,
"step": 287
},
{
"epoch": 0.04302360322677024,
"grad_norm": 0.13088954985141754,
"learning_rate": 0.00019914755087504236,
"loss": 0.6911,
"step": 288
},
{
"epoch": 0.04317299073797431,
"grad_norm": 0.12659801542758942,
"learning_rate": 0.0001991414159240728,
"loss": 0.9972,
"step": 289
},
{
"epoch": 0.04332237824917837,
"grad_norm": 0.14888939261436462,
"learning_rate": 0.00019913525907119418,
"loss": 0.917,
"step": 290
},
{
"epoch": 0.04347176576038243,
"grad_norm": 0.15505638718605042,
"learning_rate": 0.00019912908031776655,
"loss": 0.5189,
"step": 291
},
{
"epoch": 0.04362115327158649,
"grad_norm": 0.1389274001121521,
"learning_rate": 0.000199122879665155,
"loss": 0.9571,
"step": 292
},
{
"epoch": 0.04377054078279056,
"grad_norm": 0.5212099552154541,
"learning_rate": 0.0001991166571147293,
"loss": 1.7933,
"step": 293
},
{
"epoch": 0.043919928293994624,
"grad_norm": 0.15124285221099854,
"learning_rate": 0.0001991104126678641,
"loss": 0.8451,
"step": 294
},
{
"epoch": 0.04406931580519868,
"grad_norm": 0.12665098905563354,
"learning_rate": 0.0001991041463259389,
"loss": 0.8399,
"step": 295
},
{
"epoch": 0.04421870331640275,
"grad_norm": 0.41863369941711426,
"learning_rate": 0.00019909785809033806,
"loss": 0.6476,
"step": 296
},
{
"epoch": 0.044368090827606815,
"grad_norm": 0.12471672147512436,
"learning_rate": 0.00019909154796245076,
"loss": 0.8806,
"step": 297
},
{
"epoch": 0.044517478338810874,
"grad_norm": 0.21696336567401886,
"learning_rate": 0.00019908521594367098,
"loss": 1.0237,
"step": 298
},
{
"epoch": 0.04466686585001494,
"grad_norm": 0.10489077866077423,
"learning_rate": 0.00019907886203539757,
"loss": 0.5955,
"step": 299
},
{
"epoch": 0.044816253361219,
"grad_norm": 0.17574696242809296,
"learning_rate": 0.0001990724862390342,
"loss": 0.669,
"step": 300
},
{
"epoch": 0.044965640872423066,
"grad_norm": 0.16877828538417816,
"learning_rate": 0.00019906608855598939,
"loss": 0.8991,
"step": 301
},
{
"epoch": 0.04511502838362713,
"grad_norm": 0.16386249661445618,
"learning_rate": 0.0001990596689876765,
"loss": 0.9378,
"step": 302
},
{
"epoch": 0.04526441589483119,
"grad_norm": 0.1148744598031044,
"learning_rate": 0.00019905322753551368,
"loss": 0.5595,
"step": 303
},
{
"epoch": 0.04541380340603526,
"grad_norm": 0.1777895838022232,
"learning_rate": 0.00019904676420092404,
"loss": 0.9161,
"step": 304
},
{
"epoch": 0.045563190917239316,
"grad_norm": 0.14002953469753265,
"learning_rate": 0.0001990402789853353,
"loss": 0.6209,
"step": 305
},
{
"epoch": 0.04571257842844338,
"grad_norm": 0.3308524489402771,
"learning_rate": 0.00019903377189018024,
"loss": 0.9947,
"step": 306
},
{
"epoch": 0.04586196593964745,
"grad_norm": 0.16171899437904358,
"learning_rate": 0.00019902724291689637,
"loss": 0.9893,
"step": 307
},
{
"epoch": 0.04601135345085151,
"grad_norm": 0.1441776603460312,
"learning_rate": 0.000199020692066926,
"loss": 0.8799,
"step": 308
},
{
"epoch": 0.04616074096205557,
"grad_norm": 0.12858013808727264,
"learning_rate": 0.00019901411934171638,
"loss": 0.6444,
"step": 309
},
{
"epoch": 0.04631012847325964,
"grad_norm": 0.14368069171905518,
"learning_rate": 0.00019900752474271945,
"loss": 0.4694,
"step": 310
},
{
"epoch": 0.0464595159844637,
"grad_norm": 0.16599714756011963,
"learning_rate": 0.00019900090827139214,
"loss": 0.7362,
"step": 311
},
{
"epoch": 0.046608903495667764,
"grad_norm": 0.18922938406467438,
"learning_rate": 0.0001989942699291961,
"loss": 0.9204,
"step": 312
},
{
"epoch": 0.04675829100687182,
"grad_norm": 0.17513027787208557,
"learning_rate": 0.00019898760971759783,
"loss": 0.5341,
"step": 313
},
{
"epoch": 0.04690767851807589,
"grad_norm": 0.19131267070770264,
"learning_rate": 0.0001989809276380687,
"loss": 1.0719,
"step": 314
},
{
"epoch": 0.047057066029279955,
"grad_norm": 0.1646454632282257,
"learning_rate": 0.00019897422369208488,
"loss": 0.9124,
"step": 315
},
{
"epoch": 0.047206453540484014,
"grad_norm": 0.17317859828472137,
"learning_rate": 0.00019896749788112737,
"loss": 0.7388,
"step": 316
},
{
"epoch": 0.04735584105168808,
"grad_norm": 0.2774486839771271,
"learning_rate": 0.00019896075020668202,
"loss": 0.8183,
"step": 317
},
{
"epoch": 0.04750522856289214,
"grad_norm": 0.14470195770263672,
"learning_rate": 0.0001989539806702395,
"loss": 0.8357,
"step": 318
},
{
"epoch": 0.047654616074096205,
"grad_norm": 0.1452193409204483,
"learning_rate": 0.00019894718927329524,
"loss": 0.9126,
"step": 319
},
{
"epoch": 0.04780400358530027,
"grad_norm": 0.15406997501850128,
"learning_rate": 0.0001989403760173497,
"loss": 0.6407,
"step": 320
},
{
"epoch": 0.04795339109650433,
"grad_norm": 0.2753758728504181,
"learning_rate": 0.00019893354090390791,
"loss": 1.1197,
"step": 321
},
{
"epoch": 0.048102778607708396,
"grad_norm": 0.14286638796329498,
"learning_rate": 0.00019892668393447997,
"loss": 0.7519,
"step": 322
},
{
"epoch": 0.04825216611891246,
"grad_norm": 0.27783840894699097,
"learning_rate": 0.0001989198051105806,
"loss": 0.8173,
"step": 323
},
{
"epoch": 0.04840155363011652,
"grad_norm": 0.14072780311107635,
"learning_rate": 0.00019891290443372944,
"loss": 0.7387,
"step": 324
},
{
"epoch": 0.04855094114132059,
"grad_norm": 0.17845518887043,
"learning_rate": 0.00019890598190545102,
"loss": 0.9028,
"step": 325
},
{
"epoch": 0.048700328652524646,
"grad_norm": 0.16201937198638916,
"learning_rate": 0.0001988990375272746,
"loss": 0.6775,
"step": 326
},
{
"epoch": 0.04884971616372871,
"grad_norm": 0.3842596709728241,
"learning_rate": 0.00019889207130073432,
"loss": 1.166,
"step": 327
},
{
"epoch": 0.04899910367493278,
"grad_norm": 0.1811336725950241,
"learning_rate": 0.0001988850832273691,
"loss": 0.5225,
"step": 328
},
{
"epoch": 0.04914849118613684,
"grad_norm": 0.1348968744277954,
"learning_rate": 0.0001988780733087228,
"loss": 0.6588,
"step": 329
},
{
"epoch": 0.049297878697340904,
"grad_norm": 0.15417031943798065,
"learning_rate": 0.0001988710415463439,
"loss": 1.0178,
"step": 330
},
{
"epoch": 0.04944726620854496,
"grad_norm": 0.1732081174850464,
"learning_rate": 0.0001988639879417859,
"loss": 0.9023,
"step": 331
},
{
"epoch": 0.04959665371974903,
"grad_norm": 0.20192794501781464,
"learning_rate": 0.00019885691249660702,
"loss": 0.9105,
"step": 332
},
{
"epoch": 0.049746041230953095,
"grad_norm": 0.17755641043186188,
"learning_rate": 0.0001988498152123704,
"loss": 0.9222,
"step": 333
},
{
"epoch": 0.049895428742157154,
"grad_norm": 0.10902781784534454,
"learning_rate": 0.00019884269609064386,
"loss": 0.6073,
"step": 334
},
{
"epoch": 0.05004481625336122,
"grad_norm": 0.19006772339344025,
"learning_rate": 0.00019883555513300019,
"loss": 1.1671,
"step": 335
},
{
"epoch": 0.05019420376456528,
"grad_norm": 0.1430109441280365,
"learning_rate": 0.0001988283923410169,
"loss": 0.9732,
"step": 336
},
{
"epoch": 0.050343591275769345,
"grad_norm": 0.2234259843826294,
"learning_rate": 0.00019882120771627638,
"loss": 1.3184,
"step": 337
},
{
"epoch": 0.05049297878697341,
"grad_norm": 0.1166071966290474,
"learning_rate": 0.00019881400126036582,
"loss": 0.6163,
"step": 338
},
{
"epoch": 0.05064236629817747,
"grad_norm": 0.12911243736743927,
"learning_rate": 0.0001988067729748773,
"loss": 0.9934,
"step": 339
},
{
"epoch": 0.050791753809381536,
"grad_norm": 0.14424997568130493,
"learning_rate": 0.00019879952286140754,
"loss": 0.6436,
"step": 340
},
{
"epoch": 0.0509411413205856,
"grad_norm": 0.17660249769687653,
"learning_rate": 0.00019879225092155834,
"loss": 0.8772,
"step": 341
},
{
"epoch": 0.05109052883178966,
"grad_norm": 0.126225546002388,
"learning_rate": 0.0001987849571569361,
"loss": 0.5238,
"step": 342
},
{
"epoch": 0.05123991634299373,
"grad_norm": 0.1732265204191208,
"learning_rate": 0.00019877764156915213,
"loss": 0.7972,
"step": 343
},
{
"epoch": 0.051389303854197786,
"grad_norm": 0.2713650166988373,
"learning_rate": 0.0001987703041598226,
"loss": 0.9757,
"step": 344
},
{
"epoch": 0.05153869136540185,
"grad_norm": 0.1454247683286667,
"learning_rate": 0.00019876294493056845,
"loss": 0.9633,
"step": 345
},
{
"epoch": 0.05168807887660592,
"grad_norm": 0.12023693323135376,
"learning_rate": 0.00019875556388301543,
"loss": 0.5894,
"step": 346
},
{
"epoch": 0.05183746638780998,
"grad_norm": 0.15992896258831024,
"learning_rate": 0.0001987481610187941,
"loss": 0.7281,
"step": 347
},
{
"epoch": 0.05198685389901404,
"grad_norm": 0.15995372831821442,
"learning_rate": 0.00019874073633953997,
"loss": 0.6077,
"step": 348
},
{
"epoch": 0.0521362414102181,
"grad_norm": 0.12626518309116364,
"learning_rate": 0.0001987332898468932,
"loss": 0.8621,
"step": 349
},
{
"epoch": 0.05228562892142217,
"grad_norm": 0.1364617496728897,
"learning_rate": 0.00019872582154249884,
"loss": 0.7432,
"step": 350
},
{
"epoch": 0.052435016432626234,
"grad_norm": 0.17311328649520874,
"learning_rate": 0.00019871833142800675,
"loss": 0.7599,
"step": 351
},
{
"epoch": 0.052584403943830293,
"grad_norm": 0.18369294703006744,
"learning_rate": 0.00019871081950507163,
"loss": 0.7234,
"step": 352
},
{
"epoch": 0.05273379145503436,
"grad_norm": 0.12449700385332108,
"learning_rate": 0.00019870328577535303,
"loss": 0.8287,
"step": 353
},
{
"epoch": 0.052883178966238426,
"grad_norm": 0.10272317379713058,
"learning_rate": 0.00019869573024051517,
"loss": 0.5652,
"step": 354
},
{
"epoch": 0.053032566477442485,
"grad_norm": 0.16783328354358673,
"learning_rate": 0.00019868815290222726,
"loss": 0.5231,
"step": 355
},
{
"epoch": 0.05318195398864655,
"grad_norm": 0.26311105489730835,
"learning_rate": 0.00019868055376216323,
"loss": 0.6964,
"step": 356
},
{
"epoch": 0.05333134149985061,
"grad_norm": 0.32151147723197937,
"learning_rate": 0.00019867293282200188,
"loss": 1.0218,
"step": 357
},
{
"epoch": 0.053480729011054676,
"grad_norm": 0.23724707961082458,
"learning_rate": 0.00019866529008342673,
"loss": 1.0303,
"step": 358
},
{
"epoch": 0.05363011652225874,
"grad_norm": 0.1722903996706009,
"learning_rate": 0.00019865762554812624,
"loss": 0.9047,
"step": 359
},
{
"epoch": 0.0537795040334628,
"grad_norm": 0.20011122524738312,
"learning_rate": 0.00019864993921779361,
"loss": 0.8151,
"step": 360
},
{
"epoch": 0.05392889154466687,
"grad_norm": 0.13171933591365814,
"learning_rate": 0.0001986422310941269,
"loss": 0.7425,
"step": 361
},
{
"epoch": 0.054078279055870926,
"grad_norm": 0.14532333612442017,
"learning_rate": 0.0001986345011788289,
"loss": 0.8868,
"step": 362
},
{
"epoch": 0.05422766656707499,
"grad_norm": 0.17356187105178833,
"learning_rate": 0.00019862674947360729,
"loss": 0.4887,
"step": 363
},
{
"epoch": 0.05437705407827906,
"grad_norm": 0.24108925461769104,
"learning_rate": 0.00019861897598017457,
"loss": 1.1633,
"step": 364
},
{
"epoch": 0.05452644158948312,
"grad_norm": 0.16384924948215485,
"learning_rate": 0.00019861118070024802,
"loss": 0.8454,
"step": 365
},
{
"epoch": 0.05467582910068718,
"grad_norm": 0.1604813188314438,
"learning_rate": 0.00019860336363554973,
"loss": 0.6332,
"step": 366
},
{
"epoch": 0.05482521661189125,
"grad_norm": 0.19130012392997742,
"learning_rate": 0.00019859552478780659,
"loss": 0.9221,
"step": 367
},
{
"epoch": 0.05497460412309531,
"grad_norm": 0.22768980264663696,
"learning_rate": 0.0001985876641587504,
"loss": 0.9983,
"step": 368
},
{
"epoch": 0.055123991634299374,
"grad_norm": 0.13694654405117035,
"learning_rate": 0.0001985797817501176,
"loss": 0.633,
"step": 369
},
{
"epoch": 0.05527337914550343,
"grad_norm": 0.1287558525800705,
"learning_rate": 0.00019857187756364958,
"loss": 0.6729,
"step": 370
},
{
"epoch": 0.0554227666567075,
"grad_norm": 0.12864962220191956,
"learning_rate": 0.00019856395160109256,
"loss": 0.8324,
"step": 371
},
{
"epoch": 0.055572154167911565,
"grad_norm": 0.15732339024543762,
"learning_rate": 0.00019855600386419744,
"loss": 0.7524,
"step": 372
},
{
"epoch": 0.055721541679115624,
"grad_norm": 0.19376401603221893,
"learning_rate": 0.00019854803435472,
"loss": 0.9524,
"step": 373
},
{
"epoch": 0.05587092919031969,
"grad_norm": 0.17447201907634735,
"learning_rate": 0.00019854004307442088,
"loss": 0.9536,
"step": 374
},
{
"epoch": 0.05602031670152375,
"grad_norm": 0.1653999537229538,
"learning_rate": 0.00019853203002506543,
"loss": 0.6763,
"step": 375
},
{
"epoch": 0.056169704212727815,
"grad_norm": 0.21575714647769928,
"learning_rate": 0.0001985239952084239,
"loss": 1.2311,
"step": 376
},
{
"epoch": 0.05631909172393188,
"grad_norm": 0.18763162195682526,
"learning_rate": 0.0001985159386262713,
"loss": 0.7904,
"step": 377
},
{
"epoch": 0.05646847923513594,
"grad_norm": 0.14876295626163483,
"learning_rate": 0.0001985078602803874,
"loss": 0.7818,
"step": 378
},
{
"epoch": 0.056617866746340006,
"grad_norm": 0.15621663630008698,
"learning_rate": 0.0001984997601725569,
"loss": 0.8913,
"step": 379
},
{
"epoch": 0.05676725425754407,
"grad_norm": 0.154473677277565,
"learning_rate": 0.00019849163830456922,
"loss": 0.5824,
"step": 380
},
{
"epoch": 0.05691664176874813,
"grad_norm": 0.17189285159111023,
"learning_rate": 0.00019848349467821864,
"loss": 0.7574,
"step": 381
},
{
"epoch": 0.0570660292799522,
"grad_norm": 0.17504605650901794,
"learning_rate": 0.00019847532929530415,
"loss": 0.746,
"step": 382
},
{
"epoch": 0.05721541679115626,
"grad_norm": 0.14172236621379852,
"learning_rate": 0.00019846714215762966,
"loss": 0.9479,
"step": 383
},
{
"epoch": 0.05736480430236032,
"grad_norm": 0.18013326823711395,
"learning_rate": 0.00019845893326700384,
"loss": 0.7438,
"step": 384
},
{
"epoch": 0.05751419181356439,
"grad_norm": 0.12909965217113495,
"learning_rate": 0.00019845070262524016,
"loss": 0.6857,
"step": 385
},
{
"epoch": 0.05766357932476845,
"grad_norm": 0.15466244518756866,
"learning_rate": 0.00019844245023415685,
"loss": 0.9448,
"step": 386
},
{
"epoch": 0.057812966835972514,
"grad_norm": 0.6459704637527466,
"learning_rate": 0.0001984341760955771,
"loss": 1.7624,
"step": 387
},
{
"epoch": 0.05796235434717657,
"grad_norm": 0.12828199565410614,
"learning_rate": 0.0001984258802113287,
"loss": 0.6321,
"step": 388
},
{
"epoch": 0.05811174185838064,
"grad_norm": 0.19720108807086945,
"learning_rate": 0.0001984175625832444,
"loss": 0.742,
"step": 389
},
{
"epoch": 0.058261129369584705,
"grad_norm": 0.1498088240623474,
"learning_rate": 0.0001984092232131616,
"loss": 0.8326,
"step": 390
},
{
"epoch": 0.058410516880788764,
"grad_norm": 0.21480423212051392,
"learning_rate": 0.0001984008621029227,
"loss": 0.7597,
"step": 391
},
{
"epoch": 0.05855990439199283,
"grad_norm": 0.29573026299476624,
"learning_rate": 0.0001983924792543748,
"loss": 1.3511,
"step": 392
},
{
"epoch": 0.058709291903196896,
"grad_norm": 0.1259469836950302,
"learning_rate": 0.0001983840746693698,
"loss": 0.7294,
"step": 393
},
{
"epoch": 0.058858679414400955,
"grad_norm": 0.14026756584644318,
"learning_rate": 0.00019837564834976432,
"loss": 0.6073,
"step": 394
},
{
"epoch": 0.05900806692560502,
"grad_norm": 0.1396723836660385,
"learning_rate": 0.00019836720029741995,
"loss": 0.532,
"step": 395
},
{
"epoch": 0.05915745443680908,
"grad_norm": 0.33731967210769653,
"learning_rate": 0.000198358730514203,
"loss": 1.0597,
"step": 396
},
{
"epoch": 0.059306841948013146,
"grad_norm": 0.14965583384037018,
"learning_rate": 0.00019835023900198454,
"loss": 0.9816,
"step": 397
},
{
"epoch": 0.05945622945921721,
"grad_norm": 0.18267478048801422,
"learning_rate": 0.0001983417257626405,
"loss": 0.7945,
"step": 398
},
{
"epoch": 0.05960561697042127,
"grad_norm": 0.39048805832862854,
"learning_rate": 0.0001983331907980516,
"loss": 1.1221,
"step": 399
},
{
"epoch": 0.05975500448162534,
"grad_norm": 0.11438261717557907,
"learning_rate": 0.00019832463411010331,
"loss": 0.6491,
"step": 400
},
{
"epoch": 0.059904391992829396,
"grad_norm": 0.1477927714586258,
"learning_rate": 0.00019831605570068596,
"loss": 0.8062,
"step": 401
},
{
"epoch": 0.06005377950403346,
"grad_norm": 0.12166056036949158,
"learning_rate": 0.0001983074555716947,
"loss": 0.8609,
"step": 402
},
{
"epoch": 0.06020316701523753,
"grad_norm": 0.13299763202667236,
"learning_rate": 0.00019829883372502935,
"loss": 0.9124,
"step": 403
},
{
"epoch": 0.06035255452644159,
"grad_norm": 0.5741954445838928,
"learning_rate": 0.00019829019016259468,
"loss": 1.4774,
"step": 404
},
{
"epoch": 0.060501942037645653,
"grad_norm": 0.12422844022512436,
"learning_rate": 0.00019828152488630016,
"loss": 0.7628,
"step": 405
},
{
"epoch": 0.06065132954884972,
"grad_norm": 0.22834278643131256,
"learning_rate": 0.00019827283789806011,
"loss": 1.0135,
"step": 406
},
{
"epoch": 0.06080071706005378,
"grad_norm": 0.2153419405221939,
"learning_rate": 0.00019826412919979358,
"loss": 1.0723,
"step": 407
},
{
"epoch": 0.060950104571257845,
"grad_norm": 0.18992449343204498,
"learning_rate": 0.0001982553987934245,
"loss": 0.5903,
"step": 408
},
{
"epoch": 0.061099492082461904,
"grad_norm": 0.14883364737033844,
"learning_rate": 0.00019824664668088155,
"loss": 0.7192,
"step": 409
},
{
"epoch": 0.06124887959366597,
"grad_norm": 0.12347893416881561,
"learning_rate": 0.0001982378728640982,
"loss": 0.785,
"step": 410
},
{
"epoch": 0.061398267104870036,
"grad_norm": 0.14353862404823303,
"learning_rate": 0.0001982290773450127,
"loss": 0.6293,
"step": 411
},
{
"epoch": 0.061547654616074095,
"grad_norm": 0.13001669943332672,
"learning_rate": 0.00019822026012556818,
"loss": 0.7083,
"step": 412
},
{
"epoch": 0.06169704212727816,
"grad_norm": 0.15990330278873444,
"learning_rate": 0.00019821142120771246,
"loss": 0.6908,
"step": 413
},
{
"epoch": 0.06184642963848222,
"grad_norm": 0.27101776003837585,
"learning_rate": 0.0001982025605933982,
"loss": 0.7311,
"step": 414
},
{
"epoch": 0.061995817149686286,
"grad_norm": 0.3568134903907776,
"learning_rate": 0.00019819367828458287,
"loss": 1.0127,
"step": 415
},
{
"epoch": 0.06214520466089035,
"grad_norm": 0.13109736144542694,
"learning_rate": 0.0001981847742832287,
"loss": 0.8776,
"step": 416
},
{
"epoch": 0.06229459217209441,
"grad_norm": 0.1522371917963028,
"learning_rate": 0.0001981758485913027,
"loss": 0.9513,
"step": 417
},
{
"epoch": 0.06244397968329848,
"grad_norm": 0.15717531740665436,
"learning_rate": 0.00019816690121077674,
"loss": 1.084,
"step": 418
},
{
"epoch": 0.06259336719450254,
"grad_norm": 0.26732301712036133,
"learning_rate": 0.00019815793214362742,
"loss": 1.0219,
"step": 419
},
{
"epoch": 0.0627427547057066,
"grad_norm": 0.1595582216978073,
"learning_rate": 0.00019814894139183614,
"loss": 0.7484,
"step": 420
},
{
"epoch": 0.06289214221691067,
"grad_norm": 0.24443097412586212,
"learning_rate": 0.00019813992895738908,
"loss": 1.1702,
"step": 421
},
{
"epoch": 0.06304152972811473,
"grad_norm": 0.12789739668369293,
"learning_rate": 0.00019813089484227732,
"loss": 0.9188,
"step": 422
},
{
"epoch": 0.06319091723931879,
"grad_norm": 0.13589276373386383,
"learning_rate": 0.00019812183904849653,
"loss": 0.5864,
"step": 423
},
{
"epoch": 0.06334030475052285,
"grad_norm": 0.20833753049373627,
"learning_rate": 0.00019811276157804733,
"loss": 0.9182,
"step": 424
},
{
"epoch": 0.06348969226172692,
"grad_norm": 0.16936369240283966,
"learning_rate": 0.0001981036624329351,
"loss": 0.7968,
"step": 425
},
{
"epoch": 0.06363907977293098,
"grad_norm": 0.21851451694965363,
"learning_rate": 0.00019809454161516993,
"loss": 0.6956,
"step": 426
},
{
"epoch": 0.06378846728413505,
"grad_norm": 0.15087386965751648,
"learning_rate": 0.0001980853991267668,
"loss": 1.0246,
"step": 427
},
{
"epoch": 0.06393785479533912,
"grad_norm": 0.12114626169204712,
"learning_rate": 0.00019807623496974537,
"loss": 0.7678,
"step": 428
},
{
"epoch": 0.06408724230654317,
"grad_norm": 0.20948128402233124,
"learning_rate": 0.00019806704914613024,
"loss": 1.1529,
"step": 429
},
{
"epoch": 0.06423662981774723,
"grad_norm": 0.1780691146850586,
"learning_rate": 0.0001980578416579506,
"loss": 0.9683,
"step": 430
},
{
"epoch": 0.0643860173289513,
"grad_norm": 0.14259085059165955,
"learning_rate": 0.00019804861250724063,
"loss": 0.4694,
"step": 431
},
{
"epoch": 0.06453540484015537,
"grad_norm": 0.2003784030675888,
"learning_rate": 0.00019803936169603912,
"loss": 0.6101,
"step": 432
},
{
"epoch": 0.06468479235135943,
"grad_norm": 0.19409967958927155,
"learning_rate": 0.00019803008922638976,
"loss": 1.2219,
"step": 433
},
{
"epoch": 0.06483417986256348,
"grad_norm": 0.12308470904827118,
"learning_rate": 0.00019802079510034096,
"loss": 0.8568,
"step": 434
},
{
"epoch": 0.06498356737376755,
"grad_norm": 0.3875686228275299,
"learning_rate": 0.00019801147931994596,
"loss": 1.027,
"step": 435
},
{
"epoch": 0.06513295488497162,
"grad_norm": 0.21796062588691711,
"learning_rate": 0.00019800214188726276,
"loss": 1.2517,
"step": 436
},
{
"epoch": 0.06528234239617568,
"grad_norm": 0.14813588559627533,
"learning_rate": 0.00019799278280435413,
"loss": 0.8185,
"step": 437
},
{
"epoch": 0.06543172990737975,
"grad_norm": 0.215727299451828,
"learning_rate": 0.00019798340207328766,
"loss": 1.3439,
"step": 438
},
{
"epoch": 0.0655811174185838,
"grad_norm": 0.25380611419677734,
"learning_rate": 0.0001979739996961357,
"loss": 1.2215,
"step": 439
},
{
"epoch": 0.06573050492978787,
"grad_norm": 0.16782647371292114,
"learning_rate": 0.00019796457567497537,
"loss": 0.9321,
"step": 440
},
{
"epoch": 0.06587989244099193,
"grad_norm": 0.13334587216377258,
"learning_rate": 0.0001979551300118886,
"loss": 0.8722,
"step": 441
},
{
"epoch": 0.066029279952196,
"grad_norm": 0.27328580617904663,
"learning_rate": 0.0001979456627089621,
"loss": 0.9013,
"step": 442
},
{
"epoch": 0.06617866746340006,
"grad_norm": 0.10864201933145523,
"learning_rate": 0.0001979361737682873,
"loss": 0.6201,
"step": 443
},
{
"epoch": 0.06632805497460412,
"grad_norm": 0.1800958216190338,
"learning_rate": 0.0001979266631919605,
"loss": 0.727,
"step": 444
},
{
"epoch": 0.06647744248580818,
"grad_norm": 0.14912551641464233,
"learning_rate": 0.00019791713098208272,
"loss": 0.6877,
"step": 445
},
{
"epoch": 0.06662682999701225,
"grad_norm": 0.3135051727294922,
"learning_rate": 0.00019790757714075979,
"loss": 0.9517,
"step": 446
},
{
"epoch": 0.06677621750821632,
"grad_norm": 0.23976337909698486,
"learning_rate": 0.0001978980016701023,
"loss": 0.9952,
"step": 447
},
{
"epoch": 0.06692560501942038,
"grad_norm": 0.16540992259979248,
"learning_rate": 0.00019788840457222556,
"loss": 0.796,
"step": 448
},
{
"epoch": 0.06707499253062443,
"grad_norm": 0.19656099379062653,
"learning_rate": 0.00019787878584924984,
"loss": 0.7593,
"step": 449
},
{
"epoch": 0.0672243800418285,
"grad_norm": 0.12927167117595673,
"learning_rate": 0.0001978691455033,
"loss": 0.819,
"step": 450
},
{
"epoch": 0.06737376755303257,
"grad_norm": 0.19154123961925507,
"learning_rate": 0.00019785948353650572,
"loss": 0.9713,
"step": 451
},
{
"epoch": 0.06752315506423663,
"grad_norm": 0.12065298110246658,
"learning_rate": 0.0001978497999510015,
"loss": 0.7942,
"step": 452
},
{
"epoch": 0.0676725425754407,
"grad_norm": 0.1475251317024231,
"learning_rate": 0.00019784009474892666,
"loss": 0.8598,
"step": 453
},
{
"epoch": 0.06782193008664476,
"grad_norm": 0.14072950184345245,
"learning_rate": 0.00019783036793242516,
"loss": 0.8956,
"step": 454
},
{
"epoch": 0.06797131759784882,
"grad_norm": 0.20561757683753967,
"learning_rate": 0.00019782061950364584,
"loss": 0.9214,
"step": 455
},
{
"epoch": 0.06812070510905288,
"grad_norm": 0.1547221839427948,
"learning_rate": 0.00019781084946474226,
"loss": 0.9042,
"step": 456
},
{
"epoch": 0.06827009262025695,
"grad_norm": 0.233870729804039,
"learning_rate": 0.0001978010578178728,
"loss": 1.0727,
"step": 457
},
{
"epoch": 0.06841948013146101,
"grad_norm": 0.19565890729427338,
"learning_rate": 0.00019779124456520056,
"loss": 0.8425,
"step": 458
},
{
"epoch": 0.06856886764266508,
"grad_norm": 0.15683157742023468,
"learning_rate": 0.00019778140970889348,
"loss": 0.7989,
"step": 459
},
{
"epoch": 0.06871825515386913,
"grad_norm": 0.12861517071723938,
"learning_rate": 0.0001977715532511242,
"loss": 0.8849,
"step": 460
},
{
"epoch": 0.0688676426650732,
"grad_norm": 0.12733756005764008,
"learning_rate": 0.00019776167519407022,
"loss": 0.7984,
"step": 461
},
{
"epoch": 0.06901703017627726,
"grad_norm": 0.14494583010673523,
"learning_rate": 0.0001977517755399137,
"loss": 0.8234,
"step": 462
},
{
"epoch": 0.06916641768748133,
"grad_norm": 0.15694382786750793,
"learning_rate": 0.00019774185429084166,
"loss": 0.8476,
"step": 463
},
{
"epoch": 0.0693158051986854,
"grad_norm": 0.2537318766117096,
"learning_rate": 0.00019773191144904586,
"loss": 0.9928,
"step": 464
},
{
"epoch": 0.06946519270988945,
"grad_norm": 0.45019641518592834,
"learning_rate": 0.0001977219470167228,
"loss": 1.5181,
"step": 465
},
{
"epoch": 0.06961458022109351,
"grad_norm": 0.18655377626419067,
"learning_rate": 0.00019771196099607386,
"loss": 0.7463,
"step": 466
},
{
"epoch": 0.06976396773229758,
"grad_norm": 0.1978912502527237,
"learning_rate": 0.00019770195338930503,
"loss": 1.0358,
"step": 467
},
{
"epoch": 0.06991335524350165,
"grad_norm": 0.15748754143714905,
"learning_rate": 0.00019769192419862716,
"loss": 0.6154,
"step": 468
},
{
"epoch": 0.07006274275470571,
"grad_norm": 0.17752663791179657,
"learning_rate": 0.00019768187342625592,
"loss": 0.6289,
"step": 469
},
{
"epoch": 0.07021213026590976,
"grad_norm": 0.12329550832509995,
"learning_rate": 0.0001976718010744116,
"loss": 0.9018,
"step": 470
},
{
"epoch": 0.07036151777711383,
"grad_norm": 0.17692221701145172,
"learning_rate": 0.00019766170714531937,
"loss": 1.0348,
"step": 471
},
{
"epoch": 0.0705109052883179,
"grad_norm": 0.2912501096725464,
"learning_rate": 0.00019765159164120916,
"loss": 0.9097,
"step": 472
},
{
"epoch": 0.07066029279952196,
"grad_norm": 0.118867889046669,
"learning_rate": 0.00019764145456431566,
"loss": 0.6933,
"step": 473
},
{
"epoch": 0.07080968031072603,
"grad_norm": 0.15778154134750366,
"learning_rate": 0.00019763129591687827,
"loss": 0.5504,
"step": 474
},
{
"epoch": 0.07095906782193008,
"grad_norm": 0.14886140823364258,
"learning_rate": 0.00019762111570114122,
"loss": 0.8209,
"step": 475
},
{
"epoch": 0.07110845533313415,
"grad_norm": 0.10714101046323776,
"learning_rate": 0.00019761091391935347,
"loss": 0.6473,
"step": 476
},
{
"epoch": 0.07125784284433821,
"grad_norm": 0.15396277606487274,
"learning_rate": 0.00019760069057376875,
"loss": 0.8179,
"step": 477
},
{
"epoch": 0.07140723035554228,
"grad_norm": 0.1531289666891098,
"learning_rate": 0.00019759044566664558,
"loss": 0.9508,
"step": 478
},
{
"epoch": 0.07155661786674634,
"grad_norm": 0.11772772669792175,
"learning_rate": 0.0001975801792002472,
"loss": 0.6606,
"step": 479
},
{
"epoch": 0.0717060053779504,
"grad_norm": 0.16291484236717224,
"learning_rate": 0.00019756989117684164,
"loss": 0.6476,
"step": 480
},
{
"epoch": 0.07185539288915446,
"grad_norm": 0.15869931876659393,
"learning_rate": 0.00019755958159870172,
"loss": 0.923,
"step": 481
},
{
"epoch": 0.07200478040035853,
"grad_norm": 0.1591719686985016,
"learning_rate": 0.00019754925046810493,
"loss": 0.7225,
"step": 482
},
{
"epoch": 0.0721541679115626,
"grad_norm": 0.1619904637336731,
"learning_rate": 0.00019753889778733363,
"loss": 0.7185,
"step": 483
},
{
"epoch": 0.07230355542276666,
"grad_norm": 0.12438057363033295,
"learning_rate": 0.00019752852355867486,
"loss": 0.7659,
"step": 484
},
{
"epoch": 0.07245294293397073,
"grad_norm": 0.11964958906173706,
"learning_rate": 0.00019751812778442046,
"loss": 0.7564,
"step": 485
},
{
"epoch": 0.07260233044517478,
"grad_norm": 0.2836957573890686,
"learning_rate": 0.00019750771046686704,
"loss": 1.4225,
"step": 486
},
{
"epoch": 0.07275171795637884,
"grad_norm": 0.13266032934188843,
"learning_rate": 0.00019749727160831593,
"loss": 0.7952,
"step": 487
},
{
"epoch": 0.07290110546758291,
"grad_norm": 0.12412168830633163,
"learning_rate": 0.00019748681121107325,
"loss": 0.9047,
"step": 488
},
{
"epoch": 0.07305049297878698,
"grad_norm": 0.20590822398662567,
"learning_rate": 0.00019747632927744982,
"loss": 0.7496,
"step": 489
},
{
"epoch": 0.07319988048999104,
"grad_norm": 0.18052905797958374,
"learning_rate": 0.00019746582580976136,
"loss": 0.6724,
"step": 490
},
{
"epoch": 0.0733492680011951,
"grad_norm": 0.24314431846141815,
"learning_rate": 0.0001974553008103282,
"loss": 0.8552,
"step": 491
},
{
"epoch": 0.07349865551239916,
"grad_norm": 0.14932356774806976,
"learning_rate": 0.00019744475428147546,
"loss": 0.8878,
"step": 492
},
{
"epoch": 0.07364804302360323,
"grad_norm": 0.12878401577472687,
"learning_rate": 0.00019743418622553303,
"loss": 0.7097,
"step": 493
},
{
"epoch": 0.07379743053480729,
"grad_norm": 0.1838715523481369,
"learning_rate": 0.00019742359664483563,
"loss": 1.0581,
"step": 494
},
{
"epoch": 0.07394681804601136,
"grad_norm": 0.13782045245170593,
"learning_rate": 0.0001974129855417226,
"loss": 1.0439,
"step": 495
},
{
"epoch": 0.07409620555721541,
"grad_norm": 0.179475337266922,
"learning_rate": 0.00019740235291853812,
"loss": 0.7708,
"step": 496
},
{
"epoch": 0.07424559306841948,
"grad_norm": 0.1903340369462967,
"learning_rate": 0.0001973916987776311,
"loss": 1.3356,
"step": 497
},
{
"epoch": 0.07439498057962354,
"grad_norm": 0.13480018079280853,
"learning_rate": 0.00019738102312135523,
"loss": 0.5876,
"step": 498
},
{
"epoch": 0.07454436809082761,
"grad_norm": 0.10856112837791443,
"learning_rate": 0.0001973703259520689,
"loss": 0.6991,
"step": 499
},
{
"epoch": 0.07469375560203168,
"grad_norm": 0.3036371171474457,
"learning_rate": 0.0001973596072721353,
"loss": 1.0438,
"step": 500
},
{
"epoch": 0.07484314311323573,
"grad_norm": 0.12173090875148773,
"learning_rate": 0.0001973488670839224,
"loss": 0.5836,
"step": 501
},
{
"epoch": 0.0749925306244398,
"grad_norm": 0.2667033076286316,
"learning_rate": 0.00019733810538980281,
"loss": 0.49,
"step": 502
},
{
"epoch": 0.07514191813564386,
"grad_norm": 0.092626191675663,
"learning_rate": 0.00019732732219215397,
"loss": 0.3618,
"step": 503
},
{
"epoch": 0.07529130564684793,
"grad_norm": 0.12614378333091736,
"learning_rate": 0.0001973165174933581,
"loss": 0.8307,
"step": 504
},
{
"epoch": 0.07544069315805199,
"grad_norm": 0.14259332418441772,
"learning_rate": 0.00019730569129580206,
"loss": 0.8972,
"step": 505
},
{
"epoch": 0.07559008066925604,
"grad_norm": 0.12121133506298065,
"learning_rate": 0.0001972948436018776,
"loss": 0.8446,
"step": 506
},
{
"epoch": 0.07573946818046011,
"grad_norm": 0.16819995641708374,
"learning_rate": 0.00019728397441398112,
"loss": 0.8122,
"step": 507
},
{
"epoch": 0.07588885569166418,
"grad_norm": 0.14065992832183838,
"learning_rate": 0.00019727308373451377,
"loss": 0.457,
"step": 508
},
{
"epoch": 0.07603824320286824,
"grad_norm": 0.16299694776535034,
"learning_rate": 0.0001972621715658815,
"loss": 0.878,
"step": 509
},
{
"epoch": 0.07618763071407231,
"grad_norm": 0.1941678524017334,
"learning_rate": 0.000197251237910495,
"loss": 0.9534,
"step": 510
},
{
"epoch": 0.07633701822527637,
"grad_norm": 0.14302954077720642,
"learning_rate": 0.00019724028277076964,
"loss": 0.8577,
"step": 511
},
{
"epoch": 0.07648640573648043,
"grad_norm": 0.19309553503990173,
"learning_rate": 0.00019722930614912563,
"loss": 0.9864,
"step": 512
},
{
"epoch": 0.07663579324768449,
"grad_norm": 0.1626858413219452,
"learning_rate": 0.00019721830804798787,
"loss": 0.8104,
"step": 513
},
{
"epoch": 0.07678518075888856,
"grad_norm": 0.11680503189563751,
"learning_rate": 0.00019720728846978598,
"loss": 0.7225,
"step": 514
},
{
"epoch": 0.07693456827009262,
"grad_norm": 0.11943230032920837,
"learning_rate": 0.0001971962474169544,
"loss": 0.895,
"step": 515
},
{
"epoch": 0.07708395578129669,
"grad_norm": 0.2065763920545578,
"learning_rate": 0.00019718518489193225,
"loss": 0.7482,
"step": 516
},
{
"epoch": 0.07723334329250074,
"grad_norm": 0.13309244811534882,
"learning_rate": 0.0001971741008971634,
"loss": 0.5227,
"step": 517
},
{
"epoch": 0.07738273080370481,
"grad_norm": 0.13642869889736176,
"learning_rate": 0.00019716299543509654,
"loss": 0.6441,
"step": 518
},
{
"epoch": 0.07753211831490887,
"grad_norm": 0.18893510103225708,
"learning_rate": 0.00019715186850818498,
"loss": 0.7619,
"step": 519
},
{
"epoch": 0.07768150582611294,
"grad_norm": 0.3364134132862091,
"learning_rate": 0.00019714072011888686,
"loss": 1.0618,
"step": 520
},
{
"epoch": 0.077830893337317,
"grad_norm": 0.1486874371767044,
"learning_rate": 0.00019712955026966506,
"loss": 0.7846,
"step": 521
},
{
"epoch": 0.07798028084852106,
"grad_norm": 0.21529722213745117,
"learning_rate": 0.00019711835896298713,
"loss": 0.4896,
"step": 522
},
{
"epoch": 0.07812966835972512,
"grad_norm": 0.1832679957151413,
"learning_rate": 0.00019710714620132546,
"loss": 0.8258,
"step": 523
},
{
"epoch": 0.07827905587092919,
"grad_norm": 0.189329594373703,
"learning_rate": 0.00019709591198715707,
"loss": 0.9139,
"step": 524
},
{
"epoch": 0.07842844338213326,
"grad_norm": 0.13310536742210388,
"learning_rate": 0.0001970846563229638,
"loss": 0.9531,
"step": 525
},
{
"epoch": 0.07857783089333732,
"grad_norm": 0.5338667631149292,
"learning_rate": 0.00019707337921123221,
"loss": 1.0734,
"step": 526
},
{
"epoch": 0.07872721840454137,
"grad_norm": 0.6397843360900879,
"learning_rate": 0.0001970620806544536,
"loss": 1.4513,
"step": 527
},
{
"epoch": 0.07887660591574544,
"grad_norm": 0.14354926347732544,
"learning_rate": 0.00019705076065512398,
"loss": 0.96,
"step": 528
},
{
"epoch": 0.0790259934269495,
"grad_norm": 0.11550958454608917,
"learning_rate": 0.00019703941921574413,
"loss": 0.7829,
"step": 529
},
{
"epoch": 0.07917538093815357,
"grad_norm": 0.14045141637325287,
"learning_rate": 0.00019702805633881957,
"loss": 0.6976,
"step": 530
},
{
"epoch": 0.07932476844935764,
"grad_norm": 0.18493744730949402,
"learning_rate": 0.00019701667202686048,
"loss": 1.1065,
"step": 531
},
{
"epoch": 0.07947415596056169,
"grad_norm": 0.2148275524377823,
"learning_rate": 0.0001970052662823819,
"loss": 1.1823,
"step": 532
},
{
"epoch": 0.07962354347176576,
"grad_norm": 0.1770048886537552,
"learning_rate": 0.0001969938391079035,
"loss": 0.8534,
"step": 533
},
{
"epoch": 0.07977293098296982,
"grad_norm": 0.11045973002910614,
"learning_rate": 0.00019698239050594977,
"loss": 0.628,
"step": 534
},
{
"epoch": 0.07992231849417389,
"grad_norm": 0.113344706594944,
"learning_rate": 0.0001969709204790498,
"loss": 0.7655,
"step": 535
},
{
"epoch": 0.08007170600537795,
"grad_norm": 0.13493701815605164,
"learning_rate": 0.0001969594290297376,
"loss": 0.6173,
"step": 536
},
{
"epoch": 0.08022109351658202,
"grad_norm": 0.10995621234178543,
"learning_rate": 0.00019694791616055177,
"loss": 0.8091,
"step": 537
},
{
"epoch": 0.08037048102778607,
"grad_norm": 0.1691836416721344,
"learning_rate": 0.00019693638187403563,
"loss": 0.9478,
"step": 538
},
{
"epoch": 0.08051986853899014,
"grad_norm": 0.1570545732975006,
"learning_rate": 0.0001969248261727374,
"loss": 0.8514,
"step": 539
},
{
"epoch": 0.0806692560501942,
"grad_norm": 0.14363858103752136,
"learning_rate": 0.00019691324905920984,
"loss": 0.5037,
"step": 540
},
{
"epoch": 0.08081864356139827,
"grad_norm": 0.12334268540143967,
"learning_rate": 0.00019690165053601056,
"loss": 0.9084,
"step": 541
},
{
"epoch": 0.08096803107260234,
"grad_norm": 0.3128170073032379,
"learning_rate": 0.0001968900306057018,
"loss": 0.737,
"step": 542
},
{
"epoch": 0.08111741858380639,
"grad_norm": 0.16364172101020813,
"learning_rate": 0.00019687838927085066,
"loss": 0.4789,
"step": 543
},
{
"epoch": 0.08126680609501045,
"grad_norm": 0.15874239802360535,
"learning_rate": 0.0001968667265340288,
"loss": 1.0205,
"step": 544
},
{
"epoch": 0.08141619360621452,
"grad_norm": 0.13250084221363068,
"learning_rate": 0.00019685504239781278,
"loss": 0.8377,
"step": 545
},
{
"epoch": 0.08156558111741859,
"grad_norm": 0.13961811363697052,
"learning_rate": 0.00019684333686478383,
"loss": 0.857,
"step": 546
},
{
"epoch": 0.08171496862862265,
"grad_norm": 0.1638427972793579,
"learning_rate": 0.0001968316099375278,
"loss": 0.6918,
"step": 547
},
{
"epoch": 0.0818643561398267,
"grad_norm": 0.260032057762146,
"learning_rate": 0.00019681986161863542,
"loss": 0.965,
"step": 548
},
{
"epoch": 0.08201374365103077,
"grad_norm": 0.12642204761505127,
"learning_rate": 0.00019680809191070203,
"loss": 0.5236,
"step": 549
},
{
"epoch": 0.08216313116223484,
"grad_norm": 0.18395595252513885,
"learning_rate": 0.00019679630081632782,
"loss": 1.0722,
"step": 550
},
{
"epoch": 0.0823125186734389,
"grad_norm": 0.14553199708461761,
"learning_rate": 0.0001967844883381176,
"loss": 0.7844,
"step": 551
},
{
"epoch": 0.08246190618464297,
"grad_norm": 0.13094641268253326,
"learning_rate": 0.00019677265447868086,
"loss": 0.5937,
"step": 552
},
{
"epoch": 0.08261129369584702,
"grad_norm": 0.1621766984462738,
"learning_rate": 0.00019676079924063196,
"loss": 0.7064,
"step": 553
},
{
"epoch": 0.08276068120705109,
"grad_norm": 0.16490310430526733,
"learning_rate": 0.0001967489226265899,
"loss": 0.654,
"step": 554
},
{
"epoch": 0.08291006871825515,
"grad_norm": 0.1863924264907837,
"learning_rate": 0.00019673702463917842,
"loss": 0.8091,
"step": 555
},
{
"epoch": 0.08305945622945922,
"grad_norm": 0.1565409004688263,
"learning_rate": 0.00019672510528102597,
"loss": 0.583,
"step": 556
},
{
"epoch": 0.08320884374066329,
"grad_norm": 0.2025175541639328,
"learning_rate": 0.0001967131645547657,
"loss": 0.8749,
"step": 557
},
{
"epoch": 0.08335823125186734,
"grad_norm": 0.23709741234779358,
"learning_rate": 0.0001967012024630355,
"loss": 1.1301,
"step": 558
},
{
"epoch": 0.0835076187630714,
"grad_norm": 0.33181461691856384,
"learning_rate": 0.00019668921900847805,
"loss": 1.0527,
"step": 559
},
{
"epoch": 0.08365700627427547,
"grad_norm": 0.11158733814954758,
"learning_rate": 0.00019667721419374065,
"loss": 0.6433,
"step": 560
},
{
"epoch": 0.08380639378547954,
"grad_norm": 0.21710413694381714,
"learning_rate": 0.00019666518802147534,
"loss": 0.7117,
"step": 561
},
{
"epoch": 0.0839557812966836,
"grad_norm": 0.1480075567960739,
"learning_rate": 0.00019665314049433888,
"loss": 0.8019,
"step": 562
},
{
"epoch": 0.08410516880788767,
"grad_norm": 0.15463611483573914,
"learning_rate": 0.00019664107161499277,
"loss": 0.679,
"step": 563
},
{
"epoch": 0.08425455631909172,
"grad_norm": 0.13593655824661255,
"learning_rate": 0.00019662898138610323,
"loss": 0.7527,
"step": 564
},
{
"epoch": 0.08440394383029579,
"grad_norm": 0.1662757396697998,
"learning_rate": 0.0001966168698103412,
"loss": 0.7486,
"step": 565
},
{
"epoch": 0.08455333134149985,
"grad_norm": 0.10743851959705353,
"learning_rate": 0.00019660473689038228,
"loss": 0.6341,
"step": 566
},
{
"epoch": 0.08470271885270392,
"grad_norm": 0.15268459916114807,
"learning_rate": 0.00019659258262890683,
"loss": 0.8236,
"step": 567
},
{
"epoch": 0.08485210636390798,
"grad_norm": 0.14038819074630737,
"learning_rate": 0.00019658040702859997,
"loss": 0.8065,
"step": 568
},
{
"epoch": 0.08500149387511204,
"grad_norm": 0.15066532790660858,
"learning_rate": 0.0001965682100921514,
"loss": 0.9532,
"step": 569
},
{
"epoch": 0.0851508813863161,
"grad_norm": 0.1580052673816681,
"learning_rate": 0.00019655599182225565,
"loss": 1.0969,
"step": 570
},
{
"epoch": 0.08530026889752017,
"grad_norm": 0.16247299313545227,
"learning_rate": 0.000196543752221612,
"loss": 0.791,
"step": 571
},
{
"epoch": 0.08544965640872423,
"grad_norm": 0.1083194687962532,
"learning_rate": 0.00019653149129292426,
"loss": 0.6203,
"step": 572
},
{
"epoch": 0.0855990439199283,
"grad_norm": 0.19076959788799286,
"learning_rate": 0.0001965192090389011,
"loss": 0.6709,
"step": 573
},
{
"epoch": 0.08574843143113235,
"grad_norm": 0.15673895180225372,
"learning_rate": 0.00019650690546225592,
"loss": 0.7816,
"step": 574
},
{
"epoch": 0.08589781894233642,
"grad_norm": 0.13381899893283844,
"learning_rate": 0.00019649458056570672,
"loss": 0.8227,
"step": 575
},
{
"epoch": 0.08604720645354048,
"grad_norm": 0.10087165981531143,
"learning_rate": 0.00019648223435197627,
"loss": 0.5839,
"step": 576
},
{
"epoch": 0.08619659396474455,
"grad_norm": 0.17345178127288818,
"learning_rate": 0.00019646986682379206,
"loss": 0.8435,
"step": 577
},
{
"epoch": 0.08634598147594862,
"grad_norm": 0.16065159440040588,
"learning_rate": 0.00019645747798388628,
"loss": 0.8124,
"step": 578
},
{
"epoch": 0.08649536898715267,
"grad_norm": 0.32011693716049194,
"learning_rate": 0.0001964450678349958,
"loss": 0.8119,
"step": 579
},
{
"epoch": 0.08664475649835673,
"grad_norm": 0.13616526126861572,
"learning_rate": 0.0001964326363798622,
"loss": 0.8056,
"step": 580
},
{
"epoch": 0.0867941440095608,
"grad_norm": 0.28961271047592163,
"learning_rate": 0.00019642018362123182,
"loss": 1.0182,
"step": 581
},
{
"epoch": 0.08694353152076487,
"grad_norm": 0.16200341284275055,
"learning_rate": 0.00019640770956185567,
"loss": 0.9604,
"step": 582
},
{
"epoch": 0.08709291903196893,
"grad_norm": 0.1426459699869156,
"learning_rate": 0.00019639521420448947,
"loss": 0.6555,
"step": 583
},
{
"epoch": 0.08724230654317298,
"grad_norm": 0.11389955133199692,
"learning_rate": 0.0001963826975518936,
"loss": 0.6878,
"step": 584
},
{
"epoch": 0.08739169405437705,
"grad_norm": 0.12384461611509323,
"learning_rate": 0.00019637015960683322,
"loss": 0.612,
"step": 585
},
{
"epoch": 0.08754108156558112,
"grad_norm": 0.1436391919851303,
"learning_rate": 0.00019635760037207817,
"loss": 0.7069,
"step": 586
},
{
"epoch": 0.08769046907678518,
"grad_norm": 0.1199880838394165,
"learning_rate": 0.00019634501985040296,
"loss": 0.7547,
"step": 587
},
{
"epoch": 0.08783985658798925,
"grad_norm": 0.16157324612140656,
"learning_rate": 0.00019633241804458687,
"loss": 0.7839,
"step": 588
},
{
"epoch": 0.0879892440991933,
"grad_norm": 0.16362355649471283,
"learning_rate": 0.00019631979495741378,
"loss": 0.7197,
"step": 589
},
{
"epoch": 0.08813863161039737,
"grad_norm": 0.1794954091310501,
"learning_rate": 0.00019630715059167238,
"loss": 0.6936,
"step": 590
},
{
"epoch": 0.08828801912160143,
"grad_norm": 0.3147851228713989,
"learning_rate": 0.00019629448495015597,
"loss": 1.8882,
"step": 591
},
{
"epoch": 0.0884374066328055,
"grad_norm": 0.1880149394273758,
"learning_rate": 0.0001962817980356626,
"loss": 0.7354,
"step": 592
},
{
"epoch": 0.08858679414400956,
"grad_norm": 0.13019341230392456,
"learning_rate": 0.00019626908985099503,
"loss": 0.9714,
"step": 593
},
{
"epoch": 0.08873618165521363,
"grad_norm": 0.18782681226730347,
"learning_rate": 0.0001962563603989607,
"loss": 0.9334,
"step": 594
},
{
"epoch": 0.08888556916641768,
"grad_norm": 0.1618777960538864,
"learning_rate": 0.00019624360968237172,
"loss": 0.8143,
"step": 595
},
{
"epoch": 0.08903495667762175,
"grad_norm": 0.13201208412647247,
"learning_rate": 0.00019623083770404492,
"loss": 0.9077,
"step": 596
},
{
"epoch": 0.08918434418882581,
"grad_norm": 0.11902808398008347,
"learning_rate": 0.0001962180444668019,
"loss": 1.014,
"step": 597
},
{
"epoch": 0.08933373170002988,
"grad_norm": 0.17474393546581268,
"learning_rate": 0.0001962052299734688,
"loss": 0.8466,
"step": 598
},
{
"epoch": 0.08948311921123395,
"grad_norm": 0.2875848412513733,
"learning_rate": 0.00019619239422687663,
"loss": 0.8023,
"step": 599
},
{
"epoch": 0.089632506722438,
"grad_norm": 0.13769172132015228,
"learning_rate": 0.00019617953722986096,
"loss": 0.9026,
"step": 600
},
{
"epoch": 0.08978189423364207,
"grad_norm": 0.30760055780410767,
"learning_rate": 0.00019616665898526206,
"loss": 1.0103,
"step": 601
},
{
"epoch": 0.08993128174484613,
"grad_norm": 0.5774983763694763,
"learning_rate": 0.00019615375949592504,
"loss": 1.4516,
"step": 602
},
{
"epoch": 0.0900806692560502,
"grad_norm": 0.13711205124855042,
"learning_rate": 0.00019614083876469954,
"loss": 0.7543,
"step": 603
},
{
"epoch": 0.09023005676725426,
"grad_norm": 0.1261071413755417,
"learning_rate": 0.00019612789679443997,
"loss": 0.6952,
"step": 604
},
{
"epoch": 0.09037944427845832,
"grad_norm": 0.13197720050811768,
"learning_rate": 0.00019611493358800538,
"loss": 0.906,
"step": 605
},
{
"epoch": 0.09052883178966238,
"grad_norm": 0.12066524475812912,
"learning_rate": 0.00019610194914825962,
"loss": 0.7621,
"step": 606
},
{
"epoch": 0.09067821930086645,
"grad_norm": 0.1722782999277115,
"learning_rate": 0.00019608894347807108,
"loss": 0.8798,
"step": 607
},
{
"epoch": 0.09082760681207051,
"grad_norm": 0.1058247983455658,
"learning_rate": 0.000196075916580313,
"loss": 0.6246,
"step": 608
},
{
"epoch": 0.09097699432327458,
"grad_norm": 0.17862388491630554,
"learning_rate": 0.00019606286845786315,
"loss": 0.6607,
"step": 609
},
{
"epoch": 0.09112638183447863,
"grad_norm": 0.1704971194267273,
"learning_rate": 0.0001960497991136041,
"loss": 0.8648,
"step": 610
},
{
"epoch": 0.0912757693456827,
"grad_norm": 0.12244229018688202,
"learning_rate": 0.00019603670855042308,
"loss": 0.7546,
"step": 611
},
{
"epoch": 0.09142515685688676,
"grad_norm": 0.1924651712179184,
"learning_rate": 0.00019602359677121199,
"loss": 0.7931,
"step": 612
},
{
"epoch": 0.09157454436809083,
"grad_norm": 0.1722334921360016,
"learning_rate": 0.00019601046377886746,
"loss": 1.259,
"step": 613
},
{
"epoch": 0.0917239318792949,
"grad_norm": 0.11828108131885529,
"learning_rate": 0.0001959973095762907,
"loss": 0.8418,
"step": 614
},
{
"epoch": 0.09187331939049895,
"grad_norm": 0.10462518781423569,
"learning_rate": 0.0001959841341663878,
"loss": 0.5707,
"step": 615
},
{
"epoch": 0.09202270690170301,
"grad_norm": 0.14686742424964905,
"learning_rate": 0.00019597093755206936,
"loss": 1.0242,
"step": 616
},
{
"epoch": 0.09217209441290708,
"grad_norm": 0.1107390895485878,
"learning_rate": 0.00019595771973625068,
"loss": 0.6829,
"step": 617
},
{
"epoch": 0.09232148192411115,
"grad_norm": 0.18590034544467926,
"learning_rate": 0.00019594448072185182,
"loss": 0.7426,
"step": 618
},
{
"epoch": 0.09247086943531521,
"grad_norm": 0.12690337002277374,
"learning_rate": 0.00019593122051179748,
"loss": 0.978,
"step": 619
},
{
"epoch": 0.09262025694651928,
"grad_norm": 0.19712156057357788,
"learning_rate": 0.00019591793910901707,
"loss": 0.8153,
"step": 620
},
{
"epoch": 0.09276964445772333,
"grad_norm": 0.16727258265018463,
"learning_rate": 0.00019590463651644464,
"loss": 0.8827,
"step": 621
},
{
"epoch": 0.0929190319689274,
"grad_norm": 0.21075357496738434,
"learning_rate": 0.00019589131273701894,
"loss": 0.624,
"step": 622
},
{
"epoch": 0.09306841948013146,
"grad_norm": 0.5324035286903381,
"learning_rate": 0.00019587796777368347,
"loss": 1.808,
"step": 623
},
{
"epoch": 0.09321780699133553,
"grad_norm": 0.12788911163806915,
"learning_rate": 0.00019586460162938622,
"loss": 0.8374,
"step": 624
},
{
"epoch": 0.0933671945025396,
"grad_norm": 0.1606101393699646,
"learning_rate": 0.00019585121430708012,
"loss": 0.6427,
"step": 625
},
{
"epoch": 0.09351658201374365,
"grad_norm": 0.1087862104177475,
"learning_rate": 0.00019583780580972253,
"loss": 0.5977,
"step": 626
},
{
"epoch": 0.09366596952494771,
"grad_norm": 0.1577569991350174,
"learning_rate": 0.00019582437614027565,
"loss": 0.7819,
"step": 627
},
{
"epoch": 0.09381535703615178,
"grad_norm": 0.17475225031375885,
"learning_rate": 0.00019581092530170633,
"loss": 1.0605,
"step": 628
},
{
"epoch": 0.09396474454735584,
"grad_norm": 0.12946583330631256,
"learning_rate": 0.000195797453296986,
"loss": 0.832,
"step": 629
},
{
"epoch": 0.09411413205855991,
"grad_norm": 0.14866258203983307,
"learning_rate": 0.00019578396012909092,
"loss": 0.7355,
"step": 630
},
{
"epoch": 0.09426351956976396,
"grad_norm": 0.13466285169124603,
"learning_rate": 0.00019577044580100189,
"loss": 0.6529,
"step": 631
},
{
"epoch": 0.09441290708096803,
"grad_norm": 0.11548906564712524,
"learning_rate": 0.00019575691031570446,
"loss": 0.7818,
"step": 632
},
{
"epoch": 0.0945622945921721,
"grad_norm": 0.18528424203395844,
"learning_rate": 0.00019574335367618883,
"loss": 1.0789,
"step": 633
},
{
"epoch": 0.09471168210337616,
"grad_norm": 0.1468898057937622,
"learning_rate": 0.00019572977588544986,
"loss": 0.9756,
"step": 634
},
{
"epoch": 0.09486106961458023,
"grad_norm": 0.16121335327625275,
"learning_rate": 0.00019571617694648713,
"loss": 0.7288,
"step": 635
},
{
"epoch": 0.09501045712578428,
"grad_norm": 0.12135611474514008,
"learning_rate": 0.00019570255686230485,
"loss": 0.5934,
"step": 636
},
{
"epoch": 0.09515984463698834,
"grad_norm": 0.12789905071258545,
"learning_rate": 0.0001956889156359119,
"loss": 0.6069,
"step": 637
},
{
"epoch": 0.09530923214819241,
"grad_norm": 0.359639436006546,
"learning_rate": 0.00019567525327032187,
"loss": 1.3909,
"step": 638
},
{
"epoch": 0.09545861965939648,
"grad_norm": 0.18724483251571655,
"learning_rate": 0.00019566156976855297,
"loss": 0.6935,
"step": 639
},
{
"epoch": 0.09560800717060054,
"grad_norm": 0.19026364386081696,
"learning_rate": 0.0001956478651336281,
"loss": 0.7966,
"step": 640
},
{
"epoch": 0.0957573946818046,
"grad_norm": 0.26914387941360474,
"learning_rate": 0.00019563413936857484,
"loss": 0.8772,
"step": 641
},
{
"epoch": 0.09590678219300866,
"grad_norm": 0.1765991449356079,
"learning_rate": 0.00019562039247642546,
"loss": 0.7672,
"step": 642
},
{
"epoch": 0.09605616970421273,
"grad_norm": 0.2534937560558319,
"learning_rate": 0.00019560662446021677,
"loss": 1.2049,
"step": 643
},
{
"epoch": 0.09620555721541679,
"grad_norm": 0.14177022874355316,
"learning_rate": 0.00019559283532299043,
"loss": 0.5386,
"step": 644
},
{
"epoch": 0.09635494472662086,
"grad_norm": 0.1188812330365181,
"learning_rate": 0.00019557902506779268,
"loss": 0.6466,
"step": 645
},
{
"epoch": 0.09650433223782492,
"grad_norm": 0.12482491880655289,
"learning_rate": 0.00019556519369767438,
"loss": 0.6614,
"step": 646
},
{
"epoch": 0.09665371974902898,
"grad_norm": 0.1450030505657196,
"learning_rate": 0.00019555134121569112,
"loss": 0.7014,
"step": 647
},
{
"epoch": 0.09680310726023304,
"grad_norm": 0.12003795802593231,
"learning_rate": 0.0001955374676249031,
"loss": 0.8168,
"step": 648
},
{
"epoch": 0.09695249477143711,
"grad_norm": 0.1340424120426178,
"learning_rate": 0.0001955235729283753,
"loss": 0.9613,
"step": 649
},
{
"epoch": 0.09710188228264117,
"grad_norm": 0.19842645525932312,
"learning_rate": 0.0001955096571291772,
"loss": 0.8049,
"step": 650
},
{
"epoch": 0.09725126979384524,
"grad_norm": 0.256247341632843,
"learning_rate": 0.00019549572023038305,
"loss": 1.3097,
"step": 651
},
{
"epoch": 0.09740065730504929,
"grad_norm": 0.4698760509490967,
"learning_rate": 0.0001954817622350717,
"loss": 1.6247,
"step": 652
},
{
"epoch": 0.09755004481625336,
"grad_norm": 0.16017255187034607,
"learning_rate": 0.00019546778314632674,
"loss": 0.9872,
"step": 653
},
{
"epoch": 0.09769943232745743,
"grad_norm": 0.13327959179878235,
"learning_rate": 0.00019545378296723635,
"loss": 0.8846,
"step": 654
},
{
"epoch": 0.09784881983866149,
"grad_norm": 0.3522357642650604,
"learning_rate": 0.0001954397617008934,
"loss": 1.2639,
"step": 655
},
{
"epoch": 0.09799820734986556,
"grad_norm": 0.14365145564079285,
"learning_rate": 0.0001954257193503954,
"loss": 0.6805,
"step": 656
},
{
"epoch": 0.09814759486106961,
"grad_norm": 0.09565582871437073,
"learning_rate": 0.00019541165591884454,
"loss": 0.5522,
"step": 657
},
{
"epoch": 0.09829698237227368,
"grad_norm": 0.11068779975175858,
"learning_rate": 0.0001953975714093476,
"loss": 0.6697,
"step": 658
},
{
"epoch": 0.09844636988347774,
"grad_norm": 0.15267115831375122,
"learning_rate": 0.00019538346582501616,
"loss": 0.743,
"step": 659
},
{
"epoch": 0.09859575739468181,
"grad_norm": 0.13380476832389832,
"learning_rate": 0.00019536933916896633,
"loss": 0.7273,
"step": 660
},
{
"epoch": 0.09874514490588587,
"grad_norm": 0.37871164083480835,
"learning_rate": 0.0001953551914443189,
"loss": 0.9902,
"step": 661
},
{
"epoch": 0.09889453241708993,
"grad_norm": 0.16891950368881226,
"learning_rate": 0.00019534102265419932,
"loss": 1.0287,
"step": 662
},
{
"epoch": 0.09904391992829399,
"grad_norm": 0.15239520370960236,
"learning_rate": 0.00019532683280173768,
"loss": 0.9578,
"step": 663
},
{
"epoch": 0.09919330743949806,
"grad_norm": 0.1469845324754715,
"learning_rate": 0.00019531262189006882,
"loss": 0.8758,
"step": 664
},
{
"epoch": 0.09934269495070212,
"grad_norm": 0.15531109273433685,
"learning_rate": 0.00019529838992233208,
"loss": 1.0518,
"step": 665
},
{
"epoch": 0.09949208246190619,
"grad_norm": 0.19832894206047058,
"learning_rate": 0.0001952841369016716,
"loss": 0.6697,
"step": 666
},
{
"epoch": 0.09964146997311024,
"grad_norm": 0.12545038759708405,
"learning_rate": 0.00019526986283123601,
"loss": 0.9188,
"step": 667
},
{
"epoch": 0.09979085748431431,
"grad_norm": 0.15024740993976593,
"learning_rate": 0.00019525556771417875,
"loss": 0.9019,
"step": 668
},
{
"epoch": 0.09994024499551837,
"grad_norm": 0.17119628190994263,
"learning_rate": 0.0001952412515536578,
"loss": 0.7931,
"step": 669
},
{
"epoch": 0.10008963250672244,
"grad_norm": 0.2737690806388855,
"learning_rate": 0.00019522691435283585,
"loss": 1.1618,
"step": 670
}
],
"logging_steps": 1,
"max_steps": 6694,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 670,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.2926513829380096e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}