Control-8B-V.2-Erebus-Lora / trainer_state.json
Delta-Vector's picture
Upload folder using huggingface_hub
7bb36bb verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9984,
"eval_steps": 500,
"global_step": 312,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0032,
"grad_norm": 3.210516929626465,
"learning_rate": 1.5625e-06,
"loss": 2.7227,
"step": 1
},
{
"epoch": 0.0064,
"grad_norm": 3.002136468887329,
"learning_rate": 3.125e-06,
"loss": 2.6726,
"step": 2
},
{
"epoch": 0.0096,
"grad_norm": 3.242940902709961,
"learning_rate": 4.6875000000000004e-06,
"loss": 2.7516,
"step": 3
},
{
"epoch": 0.0128,
"grad_norm": 3.367115020751953,
"learning_rate": 6.25e-06,
"loss": 2.7533,
"step": 4
},
{
"epoch": 0.016,
"grad_norm": 3.2682089805603027,
"learning_rate": 7.8125e-06,
"loss": 2.8562,
"step": 5
},
{
"epoch": 0.0192,
"grad_norm": 2.6950130462646484,
"learning_rate": 9.375000000000001e-06,
"loss": 2.5765,
"step": 6
},
{
"epoch": 0.0224,
"grad_norm": 2.6319284439086914,
"learning_rate": 1.09375e-05,
"loss": 2.7433,
"step": 7
},
{
"epoch": 0.0256,
"grad_norm": 2.209996223449707,
"learning_rate": 1.25e-05,
"loss": 2.7297,
"step": 8
},
{
"epoch": 0.0288,
"grad_norm": 1.8690632581710815,
"learning_rate": 1.4062500000000001e-05,
"loss": 2.7458,
"step": 9
},
{
"epoch": 0.032,
"grad_norm": 1.6140168905258179,
"learning_rate": 1.5625e-05,
"loss": 2.4958,
"step": 10
},
{
"epoch": 0.0352,
"grad_norm": 1.326891303062439,
"learning_rate": 1.71875e-05,
"loss": 2.5878,
"step": 11
},
{
"epoch": 0.0384,
"grad_norm": 1.4608954191207886,
"learning_rate": 1.8750000000000002e-05,
"loss": 2.743,
"step": 12
},
{
"epoch": 0.0416,
"grad_norm": 1.6133849620819092,
"learning_rate": 2.0312500000000002e-05,
"loss": 2.4727,
"step": 13
},
{
"epoch": 0.0448,
"grad_norm": 1.609899640083313,
"learning_rate": 2.1875e-05,
"loss": 2.9243,
"step": 14
},
{
"epoch": 0.048,
"grad_norm": 1.555704951286316,
"learning_rate": 2.34375e-05,
"loss": 2.5987,
"step": 15
},
{
"epoch": 0.0512,
"grad_norm": 1.3509330749511719,
"learning_rate": 2.5e-05,
"loss": 2.586,
"step": 16
},
{
"epoch": 0.0544,
"grad_norm": 1.2609320878982544,
"learning_rate": 2.6562500000000002e-05,
"loss": 2.6697,
"step": 17
},
{
"epoch": 0.0576,
"grad_norm": 1.2991684675216675,
"learning_rate": 2.8125000000000003e-05,
"loss": 2.5569,
"step": 18
},
{
"epoch": 0.0608,
"grad_norm": 1.432456135749817,
"learning_rate": 2.96875e-05,
"loss": 2.6736,
"step": 19
},
{
"epoch": 0.064,
"grad_norm": 1.418470025062561,
"learning_rate": 3.125e-05,
"loss": 2.4292,
"step": 20
},
{
"epoch": 0.0672,
"grad_norm": 1.3896219730377197,
"learning_rate": 3.2812500000000005e-05,
"loss": 2.503,
"step": 21
},
{
"epoch": 0.0704,
"grad_norm": 1.2691140174865723,
"learning_rate": 3.4375e-05,
"loss": 2.4368,
"step": 22
},
{
"epoch": 0.0736,
"grad_norm": 1.4909545183181763,
"learning_rate": 3.59375e-05,
"loss": 2.5566,
"step": 23
},
{
"epoch": 0.0768,
"grad_norm": 1.2060567140579224,
"learning_rate": 3.7500000000000003e-05,
"loss": 2.4947,
"step": 24
},
{
"epoch": 0.08,
"grad_norm": 1.225574016571045,
"learning_rate": 3.90625e-05,
"loss": 2.5794,
"step": 25
},
{
"epoch": 0.0832,
"grad_norm": 1.1829763650894165,
"learning_rate": 4.0625000000000005e-05,
"loss": 2.4389,
"step": 26
},
{
"epoch": 0.0864,
"grad_norm": 1.4608885049819946,
"learning_rate": 4.21875e-05,
"loss": 2.3006,
"step": 27
},
{
"epoch": 0.0896,
"grad_norm": 1.441902995109558,
"learning_rate": 4.375e-05,
"loss": 2.8311,
"step": 28
},
{
"epoch": 0.0928,
"grad_norm": 1.3018760681152344,
"learning_rate": 4.5312500000000004e-05,
"loss": 2.5907,
"step": 29
},
{
"epoch": 0.096,
"grad_norm": 1.089474081993103,
"learning_rate": 4.6875e-05,
"loss": 2.3614,
"step": 30
},
{
"epoch": 0.0992,
"grad_norm": 1.0250301361083984,
"learning_rate": 4.8437500000000005e-05,
"loss": 2.4455,
"step": 31
},
{
"epoch": 0.1024,
"grad_norm": 1.2610759735107422,
"learning_rate": 5e-05,
"loss": 2.5235,
"step": 32
},
{
"epoch": 0.1056,
"grad_norm": 1.1425381898880005,
"learning_rate": 4.999842641886752e-05,
"loss": 2.4895,
"step": 33
},
{
"epoch": 0.1088,
"grad_norm": 1.290959119796753,
"learning_rate": 4.9993705873562665e-05,
"loss": 2.3314,
"step": 34
},
{
"epoch": 0.112,
"grad_norm": 1.1456242799758911,
"learning_rate": 4.998583895833834e-05,
"loss": 2.6165,
"step": 35
},
{
"epoch": 0.1152,
"grad_norm": 1.0945172309875488,
"learning_rate": 4.997482666353287e-05,
"loss": 2.2249,
"step": 36
},
{
"epoch": 0.1184,
"grad_norm": 1.024279236793518,
"learning_rate": 4.996067037544542e-05,
"loss": 2.4478,
"step": 37
},
{
"epoch": 0.1216,
"grad_norm": 1.1314380168914795,
"learning_rate": 4.99433718761614e-05,
"loss": 2.7661,
"step": 38
},
{
"epoch": 0.1248,
"grad_norm": 1.1418216228485107,
"learning_rate": 4.99229333433282e-05,
"loss": 2.3117,
"step": 39
},
{
"epoch": 0.128,
"grad_norm": 1.1869049072265625,
"learning_rate": 4.989935734988098e-05,
"loss": 2.5638,
"step": 40
},
{
"epoch": 0.1312,
"grad_norm": 1.2026361227035522,
"learning_rate": 4.9872646863718805e-05,
"loss": 2.5985,
"step": 41
},
{
"epoch": 0.1344,
"grad_norm": 1.1795376539230347,
"learning_rate": 4.984280524733107e-05,
"loss": 2.1883,
"step": 42
},
{
"epoch": 0.1376,
"grad_norm": 1.2123969793319702,
"learning_rate": 4.980983625737411e-05,
"loss": 2.6978,
"step": 43
},
{
"epoch": 0.1408,
"grad_norm": 1.2263802289962769,
"learning_rate": 4.977374404419837e-05,
"loss": 2.1447,
"step": 44
},
{
"epoch": 0.144,
"grad_norm": 1.304231882095337,
"learning_rate": 4.973453315132592e-05,
"loss": 2.4581,
"step": 45
},
{
"epoch": 0.1472,
"grad_norm": 1.3034032583236694,
"learning_rate": 4.9692208514878444e-05,
"loss": 2.568,
"step": 46
},
{
"epoch": 0.1504,
"grad_norm": 1.182282567024231,
"learning_rate": 4.96467754629559e-05,
"loss": 2.6686,
"step": 47
},
{
"epoch": 0.1536,
"grad_norm": 1.1906458139419556,
"learning_rate": 4.959823971496574e-05,
"loss": 2.5744,
"step": 48
},
{
"epoch": 0.1568,
"grad_norm": 1.0540134906768799,
"learning_rate": 4.954660738090297e-05,
"loss": 2.4111,
"step": 49
},
{
"epoch": 0.16,
"grad_norm": 1.17970871925354,
"learning_rate": 4.9491884960580894e-05,
"loss": 2.3782,
"step": 50
},
{
"epoch": 0.1632,
"grad_norm": 1.0423855781555176,
"learning_rate": 4.943407934281298e-05,
"loss": 2.5053,
"step": 51
},
{
"epoch": 0.1664,
"grad_norm": 1.16681706905365,
"learning_rate": 4.937319780454559e-05,
"loss": 2.5527,
"step": 52
},
{
"epoch": 0.1696,
"grad_norm": 1.072268009185791,
"learning_rate": 4.9309248009941914e-05,
"loss": 2.3297,
"step": 53
},
{
"epoch": 0.1728,
"grad_norm": 1.2780967950820923,
"learning_rate": 4.9242238009417175e-05,
"loss": 2.5685,
"step": 54
},
{
"epoch": 0.176,
"grad_norm": 1.234634280204773,
"learning_rate": 4.9172176238625164e-05,
"loss": 2.7419,
"step": 55
},
{
"epoch": 0.1792,
"grad_norm": 1.080798864364624,
"learning_rate": 4.909907151739633e-05,
"loss": 2.4689,
"step": 56
},
{
"epoch": 0.1824,
"grad_norm": 1.0914878845214844,
"learning_rate": 4.9022933048627496e-05,
"loss": 2.0907,
"step": 57
},
{
"epoch": 0.1856,
"grad_norm": 1.1586689949035645,
"learning_rate": 4.894377041712326e-05,
"loss": 2.5898,
"step": 58
},
{
"epoch": 0.1888,
"grad_norm": 1.1252062320709229,
"learning_rate": 4.886159358838952e-05,
"loss": 2.2873,
"step": 59
},
{
"epoch": 0.192,
"grad_norm": 1.1714915037155151,
"learning_rate": 4.877641290737884e-05,
"loss": 2.5332,
"step": 60
},
{
"epoch": 0.1952,
"grad_norm": 1.1400797367095947,
"learning_rate": 4.868823909718823e-05,
"loss": 2.365,
"step": 61
},
{
"epoch": 0.1984,
"grad_norm": 1.1181775331497192,
"learning_rate": 4.8597083257709194e-05,
"loss": 2.5676,
"step": 62
},
{
"epoch": 0.2016,
"grad_norm": 1.0846837759017944,
"learning_rate": 4.850295686423047e-05,
"loss": 2.4235,
"step": 63
},
{
"epoch": 0.2048,
"grad_norm": 1.062685489654541,
"learning_rate": 4.8405871765993433e-05,
"loss": 2.4309,
"step": 64
},
{
"epoch": 0.208,
"grad_norm": 1.1622320413589478,
"learning_rate": 4.8305840184700356e-05,
"loss": 2.4819,
"step": 65
},
{
"epoch": 0.2112,
"grad_norm": 1.0995711088180542,
"learning_rate": 4.820287471297598e-05,
"loss": 2.5657,
"step": 66
},
{
"epoch": 0.2144,
"grad_norm": 1.0327019691467285,
"learning_rate": 4.8096988312782174e-05,
"loss": 2.5404,
"step": 67
},
{
"epoch": 0.2176,
"grad_norm": 1.0894691944122314,
"learning_rate": 4.7988194313786275e-05,
"loss": 2.4577,
"step": 68
},
{
"epoch": 0.2208,
"grad_norm": 1.2364932298660278,
"learning_rate": 4.7876506411682994e-05,
"loss": 2.4601,
"step": 69
},
{
"epoch": 0.224,
"grad_norm": 1.1828285455703735,
"learning_rate": 4.7761938666470403e-05,
"loss": 2.5505,
"step": 70
},
{
"epoch": 0.2272,
"grad_norm": 1.1249369382858276,
"learning_rate": 4.7644505500679855e-05,
"loss": 2.5093,
"step": 71
},
{
"epoch": 0.2304,
"grad_norm": 1.215545892715454,
"learning_rate": 4.752422169756048e-05,
"loss": 2.6171,
"step": 72
},
{
"epoch": 0.2336,
"grad_norm": 1.063791275024414,
"learning_rate": 4.7401102399218136e-05,
"loss": 2.7023,
"step": 73
},
{
"epoch": 0.2368,
"grad_norm": 1.2447466850280762,
"learning_rate": 4.72751631047092e-05,
"loss": 2.4954,
"step": 74
},
{
"epoch": 0.24,
"grad_norm": 1.0675621032714844,
"learning_rate": 4.71464196680895e-05,
"loss": 2.5718,
"step": 75
},
{
"epoch": 0.2432,
"grad_norm": 1.1253868341445923,
"learning_rate": 4.701488829641845e-05,
"loss": 2.4024,
"step": 76
},
{
"epoch": 0.2464,
"grad_norm": 1.0018173456192017,
"learning_rate": 4.6880585547718845e-05,
"loss": 2.4159,
"step": 77
},
{
"epoch": 0.2496,
"grad_norm": 1.1260743141174316,
"learning_rate": 4.674352832889239e-05,
"loss": 2.5211,
"step": 78
},
{
"epoch": 0.2528,
"grad_norm": 1.1455786228179932,
"learning_rate": 4.660373389359137e-05,
"loss": 2.4025,
"step": 79
},
{
"epoch": 0.256,
"grad_norm": 1.0881799459457397,
"learning_rate": 4.6461219840046654e-05,
"loss": 2.3178,
"step": 80
},
{
"epoch": 0.2592,
"grad_norm": 1.0626671314239502,
"learning_rate": 4.6316004108852305e-05,
"loss": 2.4681,
"step": 81
},
{
"epoch": 0.2624,
"grad_norm": 1.1673706769943237,
"learning_rate": 4.6168104980707107e-05,
"loss": 2.6277,
"step": 82
},
{
"epoch": 0.2656,
"grad_norm": 1.144708514213562,
"learning_rate": 4.601754107411326e-05,
"loss": 2.5948,
"step": 83
},
{
"epoch": 0.2688,
"grad_norm": 1.1058765649795532,
"learning_rate": 4.586433134303257e-05,
"loss": 2.6431,
"step": 84
},
{
"epoch": 0.272,
"grad_norm": 1.043436884880066,
"learning_rate": 4.5708495074500414e-05,
"loss": 2.4188,
"step": 85
},
{
"epoch": 0.2752,
"grad_norm": 1.211965560913086,
"learning_rate": 4.5550051886197754e-05,
"loss": 2.5749,
"step": 86
},
{
"epoch": 0.2784,
"grad_norm": 1.0927400588989258,
"learning_rate": 4.538902172398151e-05,
"loss": 2.4746,
"step": 87
},
{
"epoch": 0.2816,
"grad_norm": 1.1012922525405884,
"learning_rate": 4.522542485937369e-05,
"loss": 2.5538,
"step": 88
},
{
"epoch": 0.2848,
"grad_norm": 1.1138043403625488,
"learning_rate": 4.505928188700945e-05,
"loss": 2.4855,
"step": 89
},
{
"epoch": 0.288,
"grad_norm": 1.069653868675232,
"learning_rate": 4.489061372204453e-05,
"loss": 2.5325,
"step": 90
},
{
"epoch": 0.2912,
"grad_norm": 1.1098930835723877,
"learning_rate": 4.4719441597522286e-05,
"loss": 2.5671,
"step": 91
},
{
"epoch": 0.2944,
"grad_norm": 1.0448640584945679,
"learning_rate": 4.454578706170075e-05,
"loss": 2.6194,
"step": 92
},
{
"epoch": 0.2976,
"grad_norm": 1.0166351795196533,
"learning_rate": 4.4369671975340025e-05,
"loss": 2.2649,
"step": 93
},
{
"epoch": 0.3008,
"grad_norm": 1.0552963018417358,
"learning_rate": 4.419111850895028e-05,
"loss": 2.5805,
"step": 94
},
{
"epoch": 0.304,
"grad_norm": 1.093049168586731,
"learning_rate": 4.401014914000078e-05,
"loss": 2.5055,
"step": 95
},
{
"epoch": 0.3072,
"grad_norm": 1.0456770658493042,
"learning_rate": 4.382678665009028e-05,
"loss": 2.2681,
"step": 96
},
{
"epoch": 0.3104,
"grad_norm": 1.1942492723464966,
"learning_rate": 4.364105412207914e-05,
"loss": 2.5884,
"step": 97
},
{
"epoch": 0.3136,
"grad_norm": 1.0143263339996338,
"learning_rate": 4.345297493718352e-05,
"loss": 2.5151,
"step": 98
},
{
"epoch": 0.3168,
"grad_norm": 1.0093568563461304,
"learning_rate": 4.326257277203194e-05,
"loss": 2.2898,
"step": 99
},
{
"epoch": 0.32,
"grad_norm": 1.039635181427002,
"learning_rate": 4.306987159568479e-05,
"loss": 1.9802,
"step": 100
},
{
"epoch": 0.3232,
"grad_norm": 1.183872103691101,
"learning_rate": 4.2874895666616886e-05,
"loss": 2.3071,
"step": 101
},
{
"epoch": 0.3264,
"grad_norm": 1.0931261777877808,
"learning_rate": 4.267766952966369e-05,
"loss": 2.5054,
"step": 102
},
{
"epoch": 0.3296,
"grad_norm": 1.217860460281372,
"learning_rate": 4.2478218012931434e-05,
"loss": 2.4392,
"step": 103
},
{
"epoch": 0.3328,
"grad_norm": 1.1708042621612549,
"learning_rate": 4.227656622467162e-05,
"loss": 2.5153,
"step": 104
},
{
"epoch": 0.336,
"grad_norm": 0.9975510239601135,
"learning_rate": 4.207273955012018e-05,
"loss": 2.5271,
"step": 105
},
{
"epoch": 0.3392,
"grad_norm": 1.004370093345642,
"learning_rate": 4.186676364830186e-05,
"loss": 2.5909,
"step": 106
},
{
"epoch": 0.3424,
"grad_norm": 1.0638096332550049,
"learning_rate": 4.16586644488001e-05,
"loss": 2.3149,
"step": 107
},
{
"epoch": 0.3456,
"grad_norm": 1.2675764560699463,
"learning_rate": 4.144846814849282e-05,
"loss": 2.3856,
"step": 108
},
{
"epoch": 0.3488,
"grad_norm": 1.0615671873092651,
"learning_rate": 4.123620120825459e-05,
"loss": 2.5105,
"step": 109
},
{
"epoch": 0.352,
"grad_norm": 1.2035943269729614,
"learning_rate": 4.10218903496256e-05,
"loss": 2.5255,
"step": 110
},
{
"epoch": 0.3552,
"grad_norm": 1.1443675756454468,
"learning_rate": 4.0805562551447746e-05,
"loss": 2.4382,
"step": 111
},
{
"epoch": 0.3584,
"grad_norm": 1.176398515701294,
"learning_rate": 4.058724504646834e-05,
"loss": 2.4046,
"step": 112
},
{
"epoch": 0.3616,
"grad_norm": 1.015869140625,
"learning_rate": 4.036696531791193e-05,
"loss": 2.4115,
"step": 113
},
{
"epoch": 0.3648,
"grad_norm": 1.1262589693069458,
"learning_rate": 4.01447510960205e-05,
"loss": 2.4621,
"step": 114
},
{
"epoch": 0.368,
"grad_norm": 1.190390944480896,
"learning_rate": 3.992063035456259e-05,
"loss": 2.6059,
"step": 115
},
{
"epoch": 0.3712,
"grad_norm": 1.1551072597503662,
"learning_rate": 3.969463130731183e-05,
"loss": 2.4153,
"step": 116
},
{
"epoch": 0.3744,
"grad_norm": 1.1593989133834839,
"learning_rate": 3.946678240449515e-05,
"loss": 2.625,
"step": 117
},
{
"epoch": 0.3776,
"grad_norm": 1.1310359239578247,
"learning_rate": 3.92371123292113e-05,
"loss": 2.7204,
"step": 118
},
{
"epoch": 0.3808,
"grad_norm": 0.9959138035774231,
"learning_rate": 3.900564999382007e-05,
"loss": 2.6826,
"step": 119
},
{
"epoch": 0.384,
"grad_norm": 0.9353814125061035,
"learning_rate": 3.8772424536302564e-05,
"loss": 2.5791,
"step": 120
},
{
"epoch": 0.3872,
"grad_norm": 1.0344676971435547,
"learning_rate": 3.8537465316593146e-05,
"loss": 2.8203,
"step": 121
},
{
"epoch": 0.3904,
"grad_norm": 0.9825398921966553,
"learning_rate": 3.830080191288342e-05,
"loss": 2.4996,
"step": 122
},
{
"epoch": 0.3936,
"grad_norm": 1.1759867668151855,
"learning_rate": 3.8062464117898724e-05,
"loss": 2.4976,
"step": 123
},
{
"epoch": 0.3968,
"grad_norm": 1.043148159980774,
"learning_rate": 3.782248193514766e-05,
"loss": 2.4043,
"step": 124
},
{
"epoch": 0.4,
"grad_norm": 1.0625474452972412,
"learning_rate": 3.758088557514501e-05,
"loss": 2.5448,
"step": 125
},
{
"epoch": 0.4032,
"grad_norm": 1.0381778478622437,
"learning_rate": 3.7337705451608674e-05,
"loss": 2.412,
"step": 126
},
{
"epoch": 0.4064,
"grad_norm": 1.0451267957687378,
"learning_rate": 3.7092972177631e-05,
"loss": 2.4634,
"step": 127
},
{
"epoch": 0.4096,
"grad_norm": 0.9723502397537231,
"learning_rate": 3.6846716561824965e-05,
"loss": 2.4095,
"step": 128
},
{
"epoch": 0.4128,
"grad_norm": 1.130776047706604,
"learning_rate": 3.659896960444586e-05,
"loss": 2.5474,
"step": 129
},
{
"epoch": 0.416,
"grad_norm": 1.0974534749984741,
"learning_rate": 3.634976249348867e-05,
"loss": 2.3988,
"step": 130
},
{
"epoch": 0.4192,
"grad_norm": 1.14722740650177,
"learning_rate": 3.6099126600762054e-05,
"loss": 2.5543,
"step": 131
},
{
"epoch": 0.4224,
"grad_norm": 1.1162605285644531,
"learning_rate": 3.5847093477938956e-05,
"loss": 2.6521,
"step": 132
},
{
"epoch": 0.4256,
"grad_norm": 1.118084192276001,
"learning_rate": 3.559369485258472e-05,
"loss": 2.4336,
"step": 133
},
{
"epoch": 0.4288,
"grad_norm": 1.0270711183547974,
"learning_rate": 3.533896262416302e-05,
"loss": 2.4505,
"step": 134
},
{
"epoch": 0.432,
"grad_norm": 1.1394667625427246,
"learning_rate": 3.508292886002013e-05,
"loss": 2.4266,
"step": 135
},
{
"epoch": 0.4352,
"grad_norm": 1.0506163835525513,
"learning_rate": 3.4825625791348096e-05,
"loss": 2.4871,
"step": 136
},
{
"epoch": 0.4384,
"grad_norm": 0.9986435174942017,
"learning_rate": 3.456708580912725e-05,
"loss": 2.2317,
"step": 137
},
{
"epoch": 0.4416,
"grad_norm": 1.2329692840576172,
"learning_rate": 3.4307341460048633e-05,
"loss": 2.6388,
"step": 138
},
{
"epoch": 0.4448,
"grad_norm": 1.143542766571045,
"learning_rate": 3.404642544241681e-05,
"loss": 2.5674,
"step": 139
},
{
"epoch": 0.448,
"grad_norm": 1.005985140800476,
"learning_rate": 3.378437060203357e-05,
"loss": 2.427,
"step": 140
},
{
"epoch": 0.4512,
"grad_norm": 1.0496375560760498,
"learning_rate": 3.3521209928063126e-05,
"loss": 2.2562,
"step": 141
},
{
"epoch": 0.4544,
"grad_norm": 1.0612940788269043,
"learning_rate": 3.3256976548879184e-05,
"loss": 2.4885,
"step": 142
},
{
"epoch": 0.4576,
"grad_norm": 1.1914249658584595,
"learning_rate": 3.2991703727894544e-05,
"loss": 2.4081,
"step": 143
},
{
"epoch": 0.4608,
"grad_norm": 1.2552766799926758,
"learning_rate": 3.272542485937369e-05,
"loss": 2.4895,
"step": 144
},
{
"epoch": 0.464,
"grad_norm": 1.1917306184768677,
"learning_rate": 3.24581734642289e-05,
"loss": 2.7435,
"step": 145
},
{
"epoch": 0.4672,
"grad_norm": 1.1121033430099487,
"learning_rate": 3.218998318580043e-05,
"loss": 2.3139,
"step": 146
},
{
"epoch": 0.4704,
"grad_norm": 1.168412446975708,
"learning_rate": 3.1920887785621235e-05,
"loss": 2.2553,
"step": 147
},
{
"epoch": 0.4736,
"grad_norm": 1.1404107809066772,
"learning_rate": 3.165092113916688e-05,
"loss": 2.7097,
"step": 148
},
{
"epoch": 0.4768,
"grad_norm": 0.9871900677680969,
"learning_rate": 3.138011723159107e-05,
"loss": 2.6076,
"step": 149
},
{
"epoch": 0.48,
"grad_norm": 1.1518453359603882,
"learning_rate": 3.110851015344735e-05,
"loss": 2.4797,
"step": 150
},
{
"epoch": 0.4832,
"grad_norm": 1.0925283432006836,
"learning_rate": 3.083613409639764e-05,
"loss": 2.3655,
"step": 151
},
{
"epoch": 0.4864,
"grad_norm": 1.1126621961593628,
"learning_rate": 3.056302334890786e-05,
"loss": 2.5094,
"step": 152
},
{
"epoch": 0.4896,
"grad_norm": 0.9415158033370972,
"learning_rate": 3.0289212291931573e-05,
"loss": 2.2806,
"step": 153
},
{
"epoch": 0.4928,
"grad_norm": 1.040486454963684,
"learning_rate": 3.0014735394581823e-05,
"loss": 2.5357,
"step": 154
},
{
"epoch": 0.496,
"grad_norm": 1.1474907398223877,
"learning_rate": 2.9739627209791965e-05,
"loss": 2.5369,
"step": 155
},
{
"epoch": 0.4992,
"grad_norm": 1.122162938117981,
"learning_rate": 2.9463922369965917e-05,
"loss": 2.4727,
"step": 156
},
{
"epoch": 0.5024,
"grad_norm": 1.1549993753433228,
"learning_rate": 2.918765558261841e-05,
"loss": 2.4274,
"step": 157
},
{
"epoch": 0.5056,
"grad_norm": 1.1090857982635498,
"learning_rate": 2.8910861626005776e-05,
"loss": 2.624,
"step": 158
},
{
"epoch": 0.5088,
"grad_norm": 1.0676389932632446,
"learning_rate": 2.8633575344747822e-05,
"loss": 2.3891,
"step": 159
},
{
"epoch": 0.512,
"grad_norm": 1.0996875762939453,
"learning_rate": 2.8355831645441388e-05,
"loss": 2.4476,
"step": 160
},
{
"epoch": 0.5152,
"grad_norm": 1.1088722944259644,
"learning_rate": 2.8077665492266074e-05,
"loss": 2.3327,
"step": 161
},
{
"epoch": 0.5184,
"grad_norm": 1.1235601902008057,
"learning_rate": 2.7799111902582696e-05,
"loss": 2.3989,
"step": 162
},
{
"epoch": 0.5216,
"grad_norm": 1.162764310836792,
"learning_rate": 2.7520205942525112e-05,
"loss": 2.3669,
"step": 163
},
{
"epoch": 0.5248,
"grad_norm": 1.1216943264007568,
"learning_rate": 2.724098272258584e-05,
"loss": 2.6234,
"step": 164
},
{
"epoch": 0.528,
"grad_norm": 1.1644459962844849,
"learning_rate": 2.6961477393196126e-05,
"loss": 2.4225,
"step": 165
},
{
"epoch": 0.5312,
"grad_norm": 1.0228266716003418,
"learning_rate": 2.6681725140300997e-05,
"loss": 2.6197,
"step": 166
},
{
"epoch": 0.5344,
"grad_norm": 1.1220487356185913,
"learning_rate": 2.6401761180929797e-05,
"loss": 2.5644,
"step": 167
},
{
"epoch": 0.5376,
"grad_norm": 1.1409152746200562,
"learning_rate": 2.6121620758762877e-05,
"loss": 2.2615,
"step": 168
},
{
"epoch": 0.5408,
"grad_norm": 1.0781875848770142,
"learning_rate": 2.5841339139694855e-05,
"loss": 2.451,
"step": 169
},
{
"epoch": 0.544,
"grad_norm": 1.1133708953857422,
"learning_rate": 2.556095160739513e-05,
"loss": 2.4254,
"step": 170
},
{
"epoch": 0.5472,
"grad_norm": 1.0333507061004639,
"learning_rate": 2.528049345886615e-05,
"loss": 2.3955,
"step": 171
},
{
"epoch": 0.5504,
"grad_norm": 1.085971474647522,
"learning_rate": 2.5e-05,
"loss": 2.6893,
"step": 172
},
{
"epoch": 0.5536,
"grad_norm": 1.1369028091430664,
"learning_rate": 2.4719506541133855e-05,
"loss": 2.5269,
"step": 173
},
{
"epoch": 0.5568,
"grad_norm": 1.037834882736206,
"learning_rate": 2.443904839260488e-05,
"loss": 2.5172,
"step": 174
},
{
"epoch": 0.56,
"grad_norm": 1.0019052028656006,
"learning_rate": 2.415866086030516e-05,
"loss": 2.1654,
"step": 175
},
{
"epoch": 0.5632,
"grad_norm": 0.9839959144592285,
"learning_rate": 2.3878379241237136e-05,
"loss": 2.4109,
"step": 176
},
{
"epoch": 0.5664,
"grad_norm": 1.0551410913467407,
"learning_rate": 2.3598238819070202e-05,
"loss": 2.4055,
"step": 177
},
{
"epoch": 0.5696,
"grad_norm": 1.0757516622543335,
"learning_rate": 2.331827485969901e-05,
"loss": 2.6649,
"step": 178
},
{
"epoch": 0.5728,
"grad_norm": 1.2172918319702148,
"learning_rate": 2.303852260680388e-05,
"loss": 2.4432,
"step": 179
},
{
"epoch": 0.576,
"grad_norm": 1.0906624794006348,
"learning_rate": 2.2759017277414166e-05,
"loss": 2.2428,
"step": 180
},
{
"epoch": 0.5792,
"grad_norm": 0.9661747813224792,
"learning_rate": 2.247979405747489e-05,
"loss": 2.4855,
"step": 181
},
{
"epoch": 0.5824,
"grad_norm": 1.0351911783218384,
"learning_rate": 2.2200888097417307e-05,
"loss": 2.2677,
"step": 182
},
{
"epoch": 0.5856,
"grad_norm": 1.0249441862106323,
"learning_rate": 2.1922334507733932e-05,
"loss": 2.561,
"step": 183
},
{
"epoch": 0.5888,
"grad_norm": 0.9759440422058105,
"learning_rate": 2.164416835455862e-05,
"loss": 2.2115,
"step": 184
},
{
"epoch": 0.592,
"grad_norm": 1.0776594877243042,
"learning_rate": 2.136642465525219e-05,
"loss": 2.2336,
"step": 185
},
{
"epoch": 0.5952,
"grad_norm": 1.052164912223816,
"learning_rate": 2.1089138373994223e-05,
"loss": 2.4157,
"step": 186
},
{
"epoch": 0.5984,
"grad_norm": 0.9809448719024658,
"learning_rate": 2.0812344417381595e-05,
"loss": 2.301,
"step": 187
},
{
"epoch": 0.6016,
"grad_norm": 1.090650200843811,
"learning_rate": 2.0536077630034086e-05,
"loss": 2.5767,
"step": 188
},
{
"epoch": 0.6048,
"grad_norm": 1.0655643939971924,
"learning_rate": 2.026037279020804e-05,
"loss": 2.5267,
"step": 189
},
{
"epoch": 0.608,
"grad_norm": 1.20234215259552,
"learning_rate": 1.9985264605418183e-05,
"loss": 2.5235,
"step": 190
},
{
"epoch": 0.6112,
"grad_norm": 1.0695698261260986,
"learning_rate": 1.9710787708068433e-05,
"loss": 2.4741,
"step": 191
},
{
"epoch": 0.6144,
"grad_norm": 0.9379785060882568,
"learning_rate": 1.9436976651092144e-05,
"loss": 2.5573,
"step": 192
},
{
"epoch": 0.6176,
"grad_norm": 1.0223705768585205,
"learning_rate": 1.9163865903602374e-05,
"loss": 2.4979,
"step": 193
},
{
"epoch": 0.6208,
"grad_norm": 1.0303515195846558,
"learning_rate": 1.8891489846552646e-05,
"loss": 2.5981,
"step": 194
},
{
"epoch": 0.624,
"grad_norm": 1.0409760475158691,
"learning_rate": 1.8619882768408935e-05,
"loss": 2.5319,
"step": 195
},
{
"epoch": 0.6272,
"grad_norm": 1.0393253564834595,
"learning_rate": 1.8349078860833123e-05,
"loss": 2.5587,
"step": 196
},
{
"epoch": 0.6304,
"grad_norm": 1.2285503149032593,
"learning_rate": 1.8079112214378768e-05,
"loss": 2.4699,
"step": 197
},
{
"epoch": 0.6336,
"grad_norm": 1.1109060049057007,
"learning_rate": 1.781001681419957e-05,
"loss": 2.4324,
"step": 198
},
{
"epoch": 0.6368,
"grad_norm": 1.032814860343933,
"learning_rate": 1.75418265357711e-05,
"loss": 2.7314,
"step": 199
},
{
"epoch": 0.64,
"grad_norm": 1.191022515296936,
"learning_rate": 1.7274575140626318e-05,
"loss": 2.3928,
"step": 200
},
{
"epoch": 0.6432,
"grad_norm": 1.0370298624038696,
"learning_rate": 1.700829627210547e-05,
"loss": 2.5094,
"step": 201
},
{
"epoch": 0.6464,
"grad_norm": 1.176619291305542,
"learning_rate": 1.6743023451120832e-05,
"loss": 2.5789,
"step": 202
},
{
"epoch": 0.6496,
"grad_norm": 1.0287202596664429,
"learning_rate": 1.6478790071936877e-05,
"loss": 2.5584,
"step": 203
},
{
"epoch": 0.6528,
"grad_norm": 1.0078474283218384,
"learning_rate": 1.621562939796643e-05,
"loss": 2.5382,
"step": 204
},
{
"epoch": 0.656,
"grad_norm": 0.9922024011611938,
"learning_rate": 1.59535745575832e-05,
"loss": 2.4357,
"step": 205
},
{
"epoch": 0.6592,
"grad_norm": 1.0808738470077515,
"learning_rate": 1.5692658539951372e-05,
"loss": 2.5151,
"step": 206
},
{
"epoch": 0.6624,
"grad_norm": 1.1992560625076294,
"learning_rate": 1.5432914190872757e-05,
"loss": 2.6586,
"step": 207
},
{
"epoch": 0.6656,
"grad_norm": 1.0298317670822144,
"learning_rate": 1.5174374208651912e-05,
"loss": 2.6879,
"step": 208
},
{
"epoch": 0.6688,
"grad_norm": 1.0588644742965698,
"learning_rate": 1.4917071139979876e-05,
"loss": 2.4113,
"step": 209
},
{
"epoch": 0.672,
"grad_norm": 1.0036541223526,
"learning_rate": 1.466103737583699e-05,
"loss": 2.3142,
"step": 210
},
{
"epoch": 0.6752,
"grad_norm": 0.9461038708686829,
"learning_rate": 1.4406305147415283e-05,
"loss": 2.3132,
"step": 211
},
{
"epoch": 0.6784,
"grad_norm": 1.0341013669967651,
"learning_rate": 1.4152906522061048e-05,
"loss": 2.3722,
"step": 212
},
{
"epoch": 0.6816,
"grad_norm": 1.0475574731826782,
"learning_rate": 1.3900873399237952e-05,
"loss": 2.5176,
"step": 213
},
{
"epoch": 0.6848,
"grad_norm": 1.3477550745010376,
"learning_rate": 1.3650237506511331e-05,
"loss": 2.4167,
"step": 214
},
{
"epoch": 0.688,
"grad_norm": 1.0816230773925781,
"learning_rate": 1.340103039555415e-05,
"loss": 2.6871,
"step": 215
},
{
"epoch": 0.6912,
"grad_norm": 1.0898610353469849,
"learning_rate": 1.3153283438175034e-05,
"loss": 2.585,
"step": 216
},
{
"epoch": 0.6944,
"grad_norm": 1.0709856748580933,
"learning_rate": 1.2907027822369005e-05,
"loss": 2.4157,
"step": 217
},
{
"epoch": 0.6976,
"grad_norm": 1.074513554573059,
"learning_rate": 1.2662294548391328e-05,
"loss": 2.5257,
"step": 218
},
{
"epoch": 0.7008,
"grad_norm": 1.0920336246490479,
"learning_rate": 1.2419114424855e-05,
"loss": 2.4891,
"step": 219
},
{
"epoch": 0.704,
"grad_norm": 0.8652957081794739,
"learning_rate": 1.217751806485235e-05,
"loss": 2.4609,
"step": 220
},
{
"epoch": 0.7072,
"grad_norm": 1.1371972560882568,
"learning_rate": 1.1937535882101281e-05,
"loss": 2.2327,
"step": 221
},
{
"epoch": 0.7104,
"grad_norm": 1.0960814952850342,
"learning_rate": 1.1699198087116589e-05,
"loss": 2.5813,
"step": 222
},
{
"epoch": 0.7136,
"grad_norm": 0.9509531259536743,
"learning_rate": 1.1462534683406858e-05,
"loss": 2.2718,
"step": 223
},
{
"epoch": 0.7168,
"grad_norm": 0.894752025604248,
"learning_rate": 1.122757546369744e-05,
"loss": 2.3841,
"step": 224
},
{
"epoch": 0.72,
"grad_norm": 1.073641061782837,
"learning_rate": 1.0994350006179931e-05,
"loss": 2.5524,
"step": 225
},
{
"epoch": 0.7232,
"grad_norm": 1.117712378501892,
"learning_rate": 1.0762887670788702e-05,
"loss": 2.4866,
"step": 226
},
{
"epoch": 0.7264,
"grad_norm": 1.120766043663025,
"learning_rate": 1.0533217595504858e-05,
"loss": 2.513,
"step": 227
},
{
"epoch": 0.7296,
"grad_norm": 1.0142333507537842,
"learning_rate": 1.0305368692688174e-05,
"loss": 2.448,
"step": 228
},
{
"epoch": 0.7328,
"grad_norm": 1.1020923852920532,
"learning_rate": 1.007936964543741e-05,
"loss": 2.6427,
"step": 229
},
{
"epoch": 0.736,
"grad_norm": 1.029630422592163,
"learning_rate": 9.855248903979506e-06,
"loss": 2.4163,
"step": 230
},
{
"epoch": 0.7392,
"grad_norm": 1.0667723417282104,
"learning_rate": 9.63303468208807e-06,
"loss": 2.5032,
"step": 231
},
{
"epoch": 0.7424,
"grad_norm": 1.1070666313171387,
"learning_rate": 9.412754953531663e-06,
"loss": 2.4455,
"step": 232
},
{
"epoch": 0.7456,
"grad_norm": 0.9969913363456726,
"learning_rate": 9.19443744855226e-06,
"loss": 2.3838,
"step": 233
},
{
"epoch": 0.7488,
"grad_norm": 0.9451704025268555,
"learning_rate": 8.978109650374397e-06,
"loss": 2.2581,
"step": 234
},
{
"epoch": 0.752,
"grad_norm": 1.1031489372253418,
"learning_rate": 8.763798791745411e-06,
"loss": 2.5738,
"step": 235
},
{
"epoch": 0.7552,
"grad_norm": 1.0689350366592407,
"learning_rate": 8.551531851507186e-06,
"loss": 2.0363,
"step": 236
},
{
"epoch": 0.7584,
"grad_norm": 1.0003691911697388,
"learning_rate": 8.341335551199902e-06,
"loss": 2.4822,
"step": 237
},
{
"epoch": 0.7616,
"grad_norm": 1.1118286848068237,
"learning_rate": 8.133236351698143e-06,
"loss": 2.505,
"step": 238
},
{
"epoch": 0.7648,
"grad_norm": 1.0897243022918701,
"learning_rate": 7.927260449879829e-06,
"loss": 2.5076,
"step": 239
},
{
"epoch": 0.768,
"grad_norm": 1.0602351427078247,
"learning_rate": 7.723433775328384e-06,
"loss": 2.503,
"step": 240
},
{
"epoch": 0.7712,
"grad_norm": 1.1504631042480469,
"learning_rate": 7.521781987068565e-06,
"loss": 2.4007,
"step": 241
},
{
"epoch": 0.7744,
"grad_norm": 1.1526134014129639,
"learning_rate": 7.3223304703363135e-06,
"loss": 2.6463,
"step": 242
},
{
"epoch": 0.7776,
"grad_norm": 0.9730823636054993,
"learning_rate": 7.125104333383117e-06,
"loss": 2.5505,
"step": 243
},
{
"epoch": 0.7808,
"grad_norm": 1.1611665487289429,
"learning_rate": 6.930128404315214e-06,
"loss": 2.3515,
"step": 244
},
{
"epoch": 0.784,
"grad_norm": 1.2234477996826172,
"learning_rate": 6.737427227968063e-06,
"loss": 2.4265,
"step": 245
},
{
"epoch": 0.7872,
"grad_norm": 0.9260097146034241,
"learning_rate": 6.547025062816486e-06,
"loss": 2.1394,
"step": 246
},
{
"epoch": 0.7904,
"grad_norm": 1.0301282405853271,
"learning_rate": 6.358945877920861e-06,
"loss": 2.3806,
"step": 247
},
{
"epoch": 0.7936,
"grad_norm": 0.9447835683822632,
"learning_rate": 6.173213349909729e-06,
"loss": 2.6367,
"step": 248
},
{
"epoch": 0.7968,
"grad_norm": 1.0396769046783447,
"learning_rate": 5.989850859999227e-06,
"loss": 2.6975,
"step": 249
},
{
"epoch": 0.8,
"grad_norm": 1.023889183998108,
"learning_rate": 5.808881491049723e-06,
"loss": 2.4211,
"step": 250
},
{
"epoch": 0.8032,
"grad_norm": 1.043057918548584,
"learning_rate": 5.6303280246599786e-06,
"loss": 2.5829,
"step": 251
},
{
"epoch": 0.8064,
"grad_norm": 1.104324460029602,
"learning_rate": 5.454212938299255e-06,
"loss": 2.5383,
"step": 252
},
{
"epoch": 0.8096,
"grad_norm": 1.1070061922073364,
"learning_rate": 5.280558402477725e-06,
"loss": 2.4353,
"step": 253
},
{
"epoch": 0.8128,
"grad_norm": 1.0229357481002808,
"learning_rate": 5.1093862779554776e-06,
"loss": 2.5017,
"step": 254
},
{
"epoch": 0.816,
"grad_norm": 1.0547471046447754,
"learning_rate": 4.940718112990553e-06,
"loss": 2.4777,
"step": 255
},
{
"epoch": 0.8192,
"grad_norm": 1.06377112865448,
"learning_rate": 4.7745751406263165e-06,
"loss": 2.3699,
"step": 256
},
{
"epoch": 0.8224,
"grad_norm": 1.158517837524414,
"learning_rate": 4.610978276018496e-06,
"loss": 2.4627,
"step": 257
},
{
"epoch": 0.8256,
"grad_norm": 1.072967290878296,
"learning_rate": 4.4499481138022544e-06,
"loss": 2.5287,
"step": 258
},
{
"epoch": 0.8288,
"grad_norm": 0.9575193524360657,
"learning_rate": 4.29150492549959e-06,
"loss": 2.5028,
"step": 259
},
{
"epoch": 0.832,
"grad_norm": 1.0296958684921265,
"learning_rate": 4.135668656967434e-06,
"loss": 2.6446,
"step": 260
},
{
"epoch": 0.8352,
"grad_norm": 0.9970160722732544,
"learning_rate": 3.982458925886748e-06,
"loss": 2.3393,
"step": 261
},
{
"epoch": 0.8384,
"grad_norm": 1.1589882373809814,
"learning_rate": 3.831895019292897e-06,
"loss": 2.5703,
"step": 262
},
{
"epoch": 0.8416,
"grad_norm": 1.0067667961120605,
"learning_rate": 3.6839958911476957e-06,
"loss": 2.4839,
"step": 263
},
{
"epoch": 0.8448,
"grad_norm": 1.046972632408142,
"learning_rate": 3.5387801599533475e-06,
"loss": 2.6206,
"step": 264
},
{
"epoch": 0.848,
"grad_norm": 1.047595500946045,
"learning_rate": 3.3962661064086354e-06,
"loss": 2.4652,
"step": 265
},
{
"epoch": 0.8512,
"grad_norm": 1.0699108839035034,
"learning_rate": 3.2564716711076167e-06,
"loss": 2.3963,
"step": 266
},
{
"epoch": 0.8544,
"grad_norm": 1.1620286703109741,
"learning_rate": 3.119414452281158e-06,
"loss": 2.3617,
"step": 267
},
{
"epoch": 0.8576,
"grad_norm": 1.1183536052703857,
"learning_rate": 2.98511170358155e-06,
"loss": 2.6089,
"step": 268
},
{
"epoch": 0.8608,
"grad_norm": 1.0337989330291748,
"learning_rate": 2.853580331910505e-06,
"loss": 2.3825,
"step": 269
},
{
"epoch": 0.864,
"grad_norm": 0.9789101481437683,
"learning_rate": 2.7248368952908053e-06,
"loss": 2.5147,
"step": 270
},
{
"epoch": 0.8672,
"grad_norm": 1.014626383781433,
"learning_rate": 2.5988976007818715e-06,
"loss": 2.339,
"step": 271
},
{
"epoch": 0.8704,
"grad_norm": 1.0315630435943604,
"learning_rate": 2.475778302439524e-06,
"loss": 2.4048,
"step": 272
},
{
"epoch": 0.8736,
"grad_norm": 1.0927337408065796,
"learning_rate": 2.355494499320149e-06,
"loss": 2.5846,
"step": 273
},
{
"epoch": 0.8768,
"grad_norm": 1.0336443185806274,
"learning_rate": 2.2380613335296036e-06,
"loss": 2.3522,
"step": 274
},
{
"epoch": 0.88,
"grad_norm": 1.1010684967041016,
"learning_rate": 2.1234935883170047e-06,
"loss": 2.5251,
"step": 275
},
{
"epoch": 0.8832,
"grad_norm": 1.031034231185913,
"learning_rate": 2.0118056862137357e-06,
"loss": 2.5216,
"step": 276
},
{
"epoch": 0.8864,
"grad_norm": 1.0429649353027344,
"learning_rate": 1.9030116872178316e-06,
"loss": 2.4645,
"step": 277
},
{
"epoch": 0.8896,
"grad_norm": 1.021558403968811,
"learning_rate": 1.7971252870240291e-06,
"loss": 2.3027,
"step": 278
},
{
"epoch": 0.8928,
"grad_norm": 1.1493953466415405,
"learning_rate": 1.6941598152996451e-06,
"loss": 2.3772,
"step": 279
},
{
"epoch": 0.896,
"grad_norm": 1.0270682573318481,
"learning_rate": 1.59412823400657e-06,
"loss": 2.2627,
"step": 280
},
{
"epoch": 0.8992,
"grad_norm": 1.0478615760803223,
"learning_rate": 1.4970431357695243e-06,
"loss": 2.4045,
"step": 281
},
{
"epoch": 0.9024,
"grad_norm": 1.0421029329299927,
"learning_rate": 1.4029167422908107e-06,
"loss": 2.3885,
"step": 282
},
{
"epoch": 0.9056,
"grad_norm": 0.8238216638565063,
"learning_rate": 1.3117609028117817e-06,
"loss": 2.4014,
"step": 283
},
{
"epoch": 0.9088,
"grad_norm": 1.1559348106384277,
"learning_rate": 1.2235870926211619e-06,
"loss": 2.6491,
"step": 284
},
{
"epoch": 0.912,
"grad_norm": 1.0746315717697144,
"learning_rate": 1.138406411610482e-06,
"loss": 2.2636,
"step": 285
},
{
"epoch": 0.9152,
"grad_norm": 1.155534267425537,
"learning_rate": 1.0562295828767387e-06,
"loss": 2.3605,
"step": 286
},
{
"epoch": 0.9184,
"grad_norm": 0.8642758727073669,
"learning_rate": 9.770669513725128e-07,
"loss": 2.5547,
"step": 287
},
{
"epoch": 0.9216,
"grad_norm": 1.0164655447006226,
"learning_rate": 9.009284826036691e-07,
"loss": 2.4789,
"step": 288
},
{
"epoch": 0.9248,
"grad_norm": 1.0909721851348877,
"learning_rate": 8.278237613748407e-07,
"loss": 2.3089,
"step": 289
},
{
"epoch": 0.928,
"grad_norm": 1.0505188703536987,
"learning_rate": 7.577619905828282e-07,
"loss": 2.6275,
"step": 290
},
{
"epoch": 0.9312,
"grad_norm": 1.0300596952438354,
"learning_rate": 6.907519900580861e-07,
"loss": 2.5099,
"step": 291
},
{
"epoch": 0.9344,
"grad_norm": 1.007779598236084,
"learning_rate": 6.268021954544096e-07,
"loss": 2.4924,
"step": 292
},
{
"epoch": 0.9376,
"grad_norm": 1.0541815757751465,
"learning_rate": 5.659206571870218e-07,
"loss": 2.4179,
"step": 293
},
{
"epoch": 0.9408,
"grad_norm": 1.1204500198364258,
"learning_rate": 5.08115039419113e-07,
"loss": 2.4938,
"step": 294
},
{
"epoch": 0.944,
"grad_norm": 0.9952044486999512,
"learning_rate": 4.5339261909704e-07,
"loss": 2.5642,
"step": 295
},
{
"epoch": 0.9472,
"grad_norm": 0.9769514799118042,
"learning_rate": 4.0176028503425835e-07,
"loss": 2.4987,
"step": 296
},
{
"epoch": 0.9504,
"grad_norm": 1.274708867073059,
"learning_rate": 3.5322453704410286e-07,
"loss": 2.7626,
"step": 297
},
{
"epoch": 0.9536,
"grad_norm": 0.93681401014328,
"learning_rate": 3.077914851215585e-07,
"loss": 2.3922,
"step": 298
},
{
"epoch": 0.9568,
"grad_norm": 0.9534211158752441,
"learning_rate": 2.6546684867408413e-07,
"loss": 2.4974,
"step": 299
},
{
"epoch": 0.96,
"grad_norm": 1.060401439666748,
"learning_rate": 2.262559558016325e-07,
"loss": 2.5511,
"step": 300
},
{
"epoch": 0.9632,
"grad_norm": 0.9338224530220032,
"learning_rate": 1.9016374262589842e-07,
"loss": 2.4446,
"step": 301
},
{
"epoch": 0.9664,
"grad_norm": 0.9976362586021423,
"learning_rate": 1.571947526689349e-07,
"loss": 2.3035,
"step": 302
},
{
"epoch": 0.9696,
"grad_norm": 1.1598321199417114,
"learning_rate": 1.2735313628119139e-07,
"loss": 2.4681,
"step": 303
},
{
"epoch": 0.9728,
"grad_norm": 1.0966912508010864,
"learning_rate": 1.006426501190233e-07,
"loss": 2.2132,
"step": 304
},
{
"epoch": 0.976,
"grad_norm": 1.1520273685455322,
"learning_rate": 7.706665667180091e-08,
"loss": 2.5282,
"step": 305
},
{
"epoch": 0.9792,
"grad_norm": 0.9652422666549683,
"learning_rate": 5.662812383859795e-08,
"loss": 2.4993,
"step": 306
},
{
"epoch": 0.9824,
"grad_norm": 1.2469524145126343,
"learning_rate": 3.9329624554584884e-08,
"loss": 2.4509,
"step": 307
},
{
"epoch": 0.9856,
"grad_norm": 1.0177453756332397,
"learning_rate": 2.5173336467135267e-08,
"loss": 2.4215,
"step": 308
},
{
"epoch": 0.9888,
"grad_norm": 1.1211117506027222,
"learning_rate": 1.4161041661667207e-08,
"loss": 2.4669,
"step": 309
},
{
"epoch": 0.992,
"grad_norm": 0.8371671438217163,
"learning_rate": 6.294126437336734e-09,
"loss": 2.3749,
"step": 310
},
{
"epoch": 0.9952,
"grad_norm": 1.4161032438278198,
"learning_rate": 1.5735811324857352e-09,
"loss": 2.426,
"step": 311
},
{
"epoch": 0.9984,
"grad_norm": 1.0636640787124634,
"learning_rate": 0.0,
"loss": 2.7309,
"step": 312
}
],
"logging_steps": 1,
"max_steps": 312,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.1954372272399974e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}