STILL-seed2 / trainer_state.json
moogician's picture
Upload trainer_state.json with huggingface_hub
4191e68 verified
raw
history blame
24.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 16.932038834951456,
"eval_steps": 500,
"global_step": 136,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11650485436893204,
"grad_norm": 1.7820011377334595,
"learning_rate": 7.142857142857143e-07,
"loss": 0.4027,
"step": 1
},
{
"epoch": 0.23300970873786409,
"grad_norm": 1.7389452457427979,
"learning_rate": 1.4285714285714286e-06,
"loss": 0.402,
"step": 2
},
{
"epoch": 0.34951456310679613,
"grad_norm": 1.74604332447052,
"learning_rate": 2.1428571428571427e-06,
"loss": 0.3966,
"step": 3
},
{
"epoch": 0.46601941747572817,
"grad_norm": 1.7389558553695679,
"learning_rate": 2.8571428571428573e-06,
"loss": 0.404,
"step": 4
},
{
"epoch": 0.5825242718446602,
"grad_norm": 1.6699142456054688,
"learning_rate": 3.5714285714285718e-06,
"loss": 0.3975,
"step": 5
},
{
"epoch": 0.6990291262135923,
"grad_norm": 1.5735969543457031,
"learning_rate": 4.2857142857142855e-06,
"loss": 0.3923,
"step": 6
},
{
"epoch": 0.8155339805825242,
"grad_norm": 1.4590719938278198,
"learning_rate": 5e-06,
"loss": 0.405,
"step": 7
},
{
"epoch": 0.9320388349514563,
"grad_norm": 1.2758820056915283,
"learning_rate": 5.7142857142857145e-06,
"loss": 0.3803,
"step": 8
},
{
"epoch": 1.116504854368932,
"grad_norm": 1.3157119750976562,
"learning_rate": 6.4285714285714295e-06,
"loss": 0.7377,
"step": 9
},
{
"epoch": 1.233009708737864,
"grad_norm": 0.6604185700416565,
"learning_rate": 7.1428571428571436e-06,
"loss": 0.3704,
"step": 10
},
{
"epoch": 1.3495145631067962,
"grad_norm": 0.5376127362251282,
"learning_rate": 7.857142857142858e-06,
"loss": 0.3616,
"step": 11
},
{
"epoch": 1.4660194174757282,
"grad_norm": 1.2837574481964111,
"learning_rate": 8.571428571428571e-06,
"loss": 0.3684,
"step": 12
},
{
"epoch": 1.5825242718446602,
"grad_norm": 1.3026747703552246,
"learning_rate": 9.285714285714288e-06,
"loss": 0.3633,
"step": 13
},
{
"epoch": 1.6990291262135924,
"grad_norm": 1.3066328763961792,
"learning_rate": 1e-05,
"loss": 0.3629,
"step": 14
},
{
"epoch": 1.8155339805825244,
"grad_norm": 1.172119379043579,
"learning_rate": 9.998342337571566e-06,
"loss": 0.3631,
"step": 15
},
{
"epoch": 1.9320388349514563,
"grad_norm": 0.9207857251167297,
"learning_rate": 9.993370449424153e-06,
"loss": 0.3506,
"step": 16
},
{
"epoch": 2.116504854368932,
"grad_norm": 1.1936880350112915,
"learning_rate": 9.985087632242634e-06,
"loss": 0.7052,
"step": 17
},
{
"epoch": 2.233009708737864,
"grad_norm": 0.589175283908844,
"learning_rate": 9.973499378072947e-06,
"loss": 0.3364,
"step": 18
},
{
"epoch": 2.349514563106796,
"grad_norm": 0.4948810040950775,
"learning_rate": 9.958613370680507e-06,
"loss": 0.3307,
"step": 19
},
{
"epoch": 2.466019417475728,
"grad_norm": 0.4881579875946045,
"learning_rate": 9.940439480455386e-06,
"loss": 0.3345,
"step": 20
},
{
"epoch": 2.58252427184466,
"grad_norm": 0.5249760150909424,
"learning_rate": 9.918989757867584e-06,
"loss": 0.329,
"step": 21
},
{
"epoch": 2.6990291262135924,
"grad_norm": 0.5218749046325684,
"learning_rate": 9.89427842547679e-06,
"loss": 0.3212,
"step": 22
},
{
"epoch": 2.8155339805825244,
"grad_norm": 0.48922351002693176,
"learning_rate": 9.866321868501914e-06,
"loss": 0.317,
"step": 23
},
{
"epoch": 2.9320388349514563,
"grad_norm": 0.3870461881160736,
"learning_rate": 9.835138623956603e-06,
"loss": 0.3131,
"step": 24
},
{
"epoch": 3.116504854368932,
"grad_norm": 0.33141207695007324,
"learning_rate": 9.80074936835801e-06,
"loss": 0.5829,
"step": 25
},
{
"epoch": 3.233009708737864,
"grad_norm": 0.32673436403274536,
"learning_rate": 9.763176904016914e-06,
"loss": 0.3009,
"step": 26
},
{
"epoch": 3.349514563106796,
"grad_norm": 0.33463019132614136,
"learning_rate": 9.722446143918307e-06,
"loss": 0.3081,
"step": 27
},
{
"epoch": 3.466019417475728,
"grad_norm": 0.29102134704589844,
"learning_rate": 9.678584095202468e-06,
"loss": 0.2774,
"step": 28
},
{
"epoch": 3.58252427184466,
"grad_norm": 0.31500688195228577,
"learning_rate": 9.631619841257477e-06,
"loss": 0.2803,
"step": 29
},
{
"epoch": 3.6990291262135924,
"grad_norm": 0.30529990792274475,
"learning_rate": 9.581584522435025e-06,
"loss": 0.2804,
"step": 30
},
{
"epoch": 3.8155339805825244,
"grad_norm": 0.28200799226760864,
"learning_rate": 9.528511315402358e-06,
"loss": 0.2687,
"step": 31
},
{
"epoch": 3.9320388349514563,
"grad_norm": 0.27144694328308105,
"learning_rate": 9.472435411143979e-06,
"loss": 0.2722,
"step": 32
},
{
"epoch": 4.116504854368932,
"grad_norm": 0.43679553270339966,
"learning_rate": 9.413393991627737e-06,
"loss": 0.526,
"step": 33
},
{
"epoch": 4.233009708737864,
"grad_norm": 0.25320038199424744,
"learning_rate": 9.351426205150778e-06,
"loss": 0.2568,
"step": 34
},
{
"epoch": 4.349514563106796,
"grad_norm": 0.22309613227844238,
"learning_rate": 9.286573140381663e-06,
"loss": 0.2472,
"step": 35
},
{
"epoch": 4.466019417475728,
"grad_norm": 0.20222555100917816,
"learning_rate": 9.218877799115929e-06,
"loss": 0.2229,
"step": 36
},
{
"epoch": 4.58252427184466,
"grad_norm": 0.21417675912380219,
"learning_rate": 9.148385067763094e-06,
"loss": 0.2326,
"step": 37
},
{
"epoch": 4.699029126213592,
"grad_norm": 0.26747238636016846,
"learning_rate": 9.075141687584056e-06,
"loss": 0.2339,
"step": 38
},
{
"epoch": 4.815533980582524,
"grad_norm": 0.2342718243598938,
"learning_rate": 8.999196223698599e-06,
"loss": 0.2304,
"step": 39
},
{
"epoch": 4.932038834951456,
"grad_norm": 0.23761263489723206,
"learning_rate": 8.920599032883553e-06,
"loss": 0.2331,
"step": 40
},
{
"epoch": 5.116504854368932,
"grad_norm": 0.3980836570262909,
"learning_rate": 8.839402230183e-06,
"loss": 0.4385,
"step": 41
},
{
"epoch": 5.233009708737864,
"grad_norm": 0.22973115742206573,
"learning_rate": 8.755659654352599e-06,
"loss": 0.2075,
"step": 42
},
{
"epoch": 5.349514563106796,
"grad_norm": 0.21187792718410492,
"learning_rate": 8.669426832160997e-06,
"loss": 0.1988,
"step": 43
},
{
"epoch": 5.466019417475728,
"grad_norm": 0.20210106670856476,
"learning_rate": 8.580760941571968e-06,
"loss": 0.1886,
"step": 44
},
{
"epoch": 5.58252427184466,
"grad_norm": 0.24134023487567902,
"learning_rate": 8.489720773831717e-06,
"loss": 0.1984,
"step": 45
},
{
"epoch": 5.699029126213592,
"grad_norm": 0.22599560022354126,
"learning_rate": 8.396366694486466e-06,
"loss": 0.1767,
"step": 46
},
{
"epoch": 5.815533980582524,
"grad_norm": 0.20777945220470428,
"learning_rate": 8.30076060335616e-06,
"loss": 0.1834,
"step": 47
},
{
"epoch": 5.932038834951456,
"grad_norm": 0.20150279998779297,
"learning_rate": 8.202965893490877e-06,
"loss": 0.1878,
"step": 48
},
{
"epoch": 6.116504854368932,
"grad_norm": 0.2539062798023224,
"learning_rate": 8.103047409137114e-06,
"loss": 0.3518,
"step": 49
},
{
"epoch": 6.233009708737864,
"grad_norm": 0.1837664395570755,
"learning_rate": 8.001071402741843e-06,
"loss": 0.152,
"step": 50
},
{
"epoch": 6.349514563106796,
"grad_norm": 0.20487381517887115,
"learning_rate": 7.897105491022819e-06,
"loss": 0.1557,
"step": 51
},
{
"epoch": 6.466019417475728,
"grad_norm": 0.24897804856300354,
"learning_rate": 7.791218610134324e-06,
"loss": 0.154,
"step": 52
},
{
"epoch": 6.58252427184466,
"grad_norm": 0.18702609837055206,
"learning_rate": 7.683480969958005e-06,
"loss": 0.1479,
"step": 53
},
{
"epoch": 6.699029126213592,
"grad_norm": 0.19429221749305725,
"learning_rate": 7.5739640075491546e-06,
"loss": 0.142,
"step": 54
},
{
"epoch": 6.815533980582524,
"grad_norm": 0.20301687717437744,
"learning_rate": 7.462740339769323e-06,
"loss": 0.1462,
"step": 55
},
{
"epoch": 6.932038834951456,
"grad_norm": 0.17031480371952057,
"learning_rate": 7.349883715136601e-06,
"loss": 0.1417,
"step": 56
},
{
"epoch": 7.116504854368932,
"grad_norm": 0.34165364503860474,
"learning_rate": 7.235468964925571e-06,
"loss": 0.2634,
"step": 57
},
{
"epoch": 7.233009708737864,
"grad_norm": 0.17344710230827332,
"learning_rate": 7.119571953549305e-06,
"loss": 0.1224,
"step": 58
},
{
"epoch": 7.349514563106796,
"grad_norm": 0.17058910429477692,
"learning_rate": 7.002269528256334e-06,
"loss": 0.1097,
"step": 59
},
{
"epoch": 7.466019417475728,
"grad_norm": 0.21691343188285828,
"learning_rate": 6.883639468175926e-06,
"loss": 0.1112,
"step": 60
},
{
"epoch": 7.58252427184466,
"grad_norm": 0.1959153115749359,
"learning_rate": 6.763760432745475e-06,
"loss": 0.0991,
"step": 61
},
{
"epoch": 7.699029126213592,
"grad_norm": 0.15394563972949982,
"learning_rate": 6.6427119095541745e-06,
"loss": 0.1107,
"step": 62
},
{
"epoch": 7.815533980582524,
"grad_norm": 0.18826133012771606,
"learning_rate": 6.520574161637591e-06,
"loss": 0.1086,
"step": 63
},
{
"epoch": 7.932038834951456,
"grad_norm": 0.21221591532230377,
"learning_rate": 6.397428174258048e-06,
"loss": 0.1062,
"step": 64
},
{
"epoch": 8.116504854368932,
"grad_norm": 0.3180226981639862,
"learning_rate": 6.273355601206143e-06,
"loss": 0.1849,
"step": 65
},
{
"epoch": 8.233009708737864,
"grad_norm": 0.17723064124584198,
"learning_rate": 6.148438710658979e-06,
"loss": 0.0826,
"step": 66
},
{
"epoch": 8.349514563106796,
"grad_norm": 0.19632180035114288,
"learning_rate": 6.022760330631006e-06,
"loss": 0.082,
"step": 67
},
{
"epoch": 8.466019417475728,
"grad_norm": 0.22644098103046417,
"learning_rate": 5.896403794053679e-06,
"loss": 0.0773,
"step": 68
},
{
"epoch": 8.58252427184466,
"grad_norm": 0.1707296371459961,
"learning_rate": 5.76945288352031e-06,
"loss": 0.078,
"step": 69
},
{
"epoch": 8.699029126213592,
"grad_norm": 0.16724658012390137,
"learning_rate": 5.641991775732756e-06,
"loss": 0.0766,
"step": 70
},
{
"epoch": 8.815533980582524,
"grad_norm": 0.18082226812839508,
"learning_rate": 5.514104985686802e-06,
"loss": 0.0776,
"step": 71
},
{
"epoch": 8.932038834951456,
"grad_norm": 0.22430583834648132,
"learning_rate": 5.385877310633233e-06,
"loss": 0.0778,
"step": 72
},
{
"epoch": 9.116504854368932,
"grad_norm": 0.17507706582546234,
"learning_rate": 5.257393773851733e-06,
"loss": 0.1409,
"step": 73
},
{
"epoch": 9.233009708737864,
"grad_norm": 0.1874881386756897,
"learning_rate": 5.1287395682749444e-06,
"loss": 0.0568,
"step": 74
},
{
"epoch": 9.349514563106796,
"grad_norm": 0.3130532205104828,
"learning_rate": 5e-06,
"loss": 0.0553,
"step": 75
},
{
"epoch": 9.466019417475728,
"grad_norm": 0.17253145575523376,
"learning_rate": 4.871260431725058e-06,
"loss": 0.0553,
"step": 76
},
{
"epoch": 9.58252427184466,
"grad_norm": 0.16302235424518585,
"learning_rate": 4.742606226148268e-06,
"loss": 0.0545,
"step": 77
},
{
"epoch": 9.699029126213592,
"grad_norm": 0.18094216287136078,
"learning_rate": 4.614122689366769e-06,
"loss": 0.0548,
"step": 78
},
{
"epoch": 9.815533980582524,
"grad_norm": 0.1743619292974472,
"learning_rate": 4.485895014313198e-06,
"loss": 0.0512,
"step": 79
},
{
"epoch": 9.932038834951456,
"grad_norm": 0.16189619898796082,
"learning_rate": 4.358008224267245e-06,
"loss": 0.0496,
"step": 80
},
{
"epoch": 10.116504854368932,
"grad_norm": 0.3171091675758362,
"learning_rate": 4.230547116479691e-06,
"loss": 0.1007,
"step": 81
},
{
"epoch": 10.233009708737864,
"grad_norm": 0.154684916138649,
"learning_rate": 4.103596205946323e-06,
"loss": 0.0414,
"step": 82
},
{
"epoch": 10.349514563106796,
"grad_norm": 0.1529693901538849,
"learning_rate": 3.977239669368998e-06,
"loss": 0.0378,
"step": 83
},
{
"epoch": 10.466019417475728,
"grad_norm": 0.18426883220672607,
"learning_rate": 3.851561289341023e-06,
"loss": 0.0392,
"step": 84
},
{
"epoch": 10.58252427184466,
"grad_norm": 0.18451394140720367,
"learning_rate": 3.726644398793857e-06,
"loss": 0.0383,
"step": 85
},
{
"epoch": 10.699029126213592,
"grad_norm": 0.17146584391593933,
"learning_rate": 3.6025718257419532e-06,
"loss": 0.0346,
"step": 86
},
{
"epoch": 10.815533980582524,
"grad_norm": 0.1295372098684311,
"learning_rate": 3.4794258383624115e-06,
"loss": 0.0347,
"step": 87
},
{
"epoch": 10.932038834951456,
"grad_norm": 0.15434859693050385,
"learning_rate": 3.3572880904458267e-06,
"loss": 0.0402,
"step": 88
},
{
"epoch": 11.116504854368932,
"grad_norm": 0.3346112370491028,
"learning_rate": 3.236239567254526e-06,
"loss": 0.0675,
"step": 89
},
{
"epoch": 11.233009708737864,
"grad_norm": 0.19830213487148285,
"learning_rate": 3.116360531824074e-06,
"loss": 0.0298,
"step": 90
},
{
"epoch": 11.349514563106796,
"grad_norm": 0.11921335756778717,
"learning_rate": 2.997730471743667e-06,
"loss": 0.0271,
"step": 91
},
{
"epoch": 11.466019417475728,
"grad_norm": 0.16639156639575958,
"learning_rate": 2.880428046450697e-06,
"loss": 0.027,
"step": 92
},
{
"epoch": 11.58252427184466,
"grad_norm": 0.20680218935012817,
"learning_rate": 2.7645310350744296e-06,
"loss": 0.0263,
"step": 93
},
{
"epoch": 11.699029126213592,
"grad_norm": 0.23083102703094482,
"learning_rate": 2.6501162848634023e-06,
"loss": 0.0272,
"step": 94
},
{
"epoch": 11.815533980582524,
"grad_norm": 0.1634109765291214,
"learning_rate": 2.537259660230679e-06,
"loss": 0.027,
"step": 95
},
{
"epoch": 11.932038834951456,
"grad_norm": 0.14731772243976593,
"learning_rate": 2.426035992450848e-06,
"loss": 0.027,
"step": 96
},
{
"epoch": 12.116504854368932,
"grad_norm": 0.18388910591602325,
"learning_rate": 2.316519030041998e-06,
"loss": 0.0456,
"step": 97
},
{
"epoch": 12.233009708737864,
"grad_norm": 0.1441938877105713,
"learning_rate": 2.2087813898656775e-06,
"loss": 0.0193,
"step": 98
},
{
"epoch": 12.349514563106796,
"grad_norm": 0.1402544379234314,
"learning_rate": 2.102894508977182e-06,
"loss": 0.0201,
"step": 99
},
{
"epoch": 12.466019417475728,
"grad_norm": 0.13963298499584198,
"learning_rate": 1.9989285972581595e-06,
"loss": 0.0208,
"step": 100
},
{
"epoch": 12.58252427184466,
"grad_norm": 0.15653668344020844,
"learning_rate": 1.896952590862886e-06,
"loss": 0.0186,
"step": 101
},
{
"epoch": 12.699029126213592,
"grad_norm": 0.1681966930627823,
"learning_rate": 1.7970341065091246e-06,
"loss": 0.0185,
"step": 102
},
{
"epoch": 12.815533980582524,
"grad_norm": 0.15047527849674225,
"learning_rate": 1.699239396643841e-06,
"loss": 0.0182,
"step": 103
},
{
"epoch": 12.932038834951456,
"grad_norm": 0.10233399271965027,
"learning_rate": 1.6036333055135345e-06,
"loss": 0.0189,
"step": 104
},
{
"epoch": 13.116504854368932,
"grad_norm": 0.21229885518550873,
"learning_rate": 1.5102792261682813e-06,
"loss": 0.0353,
"step": 105
},
{
"epoch": 13.233009708737864,
"grad_norm": 0.12984932959079742,
"learning_rate": 1.4192390584280347e-06,
"loss": 0.0178,
"step": 106
},
{
"epoch": 13.349514563106796,
"grad_norm": 0.10708415508270264,
"learning_rate": 1.330573167839005e-06,
"loss": 0.0147,
"step": 107
},
{
"epoch": 13.466019417475728,
"grad_norm": 0.11098859459161758,
"learning_rate": 1.2443403456474017e-06,
"loss": 0.0139,
"step": 108
},
{
"epoch": 13.58252427184466,
"grad_norm": 0.09491042047739029,
"learning_rate": 1.1605977698170001e-06,
"loss": 0.0142,
"step": 109
},
{
"epoch": 13.699029126213592,
"grad_norm": 0.0828198492527008,
"learning_rate": 1.0794009671164484e-06,
"loss": 0.0141,
"step": 110
},
{
"epoch": 13.815533980582524,
"grad_norm": 0.09377221018075943,
"learning_rate": 1.0008037763014033e-06,
"loss": 0.0156,
"step": 111
},
{
"epoch": 13.932038834951456,
"grad_norm": 0.1043652817606926,
"learning_rate": 9.248583124159438e-07,
"loss": 0.0147,
"step": 112
},
{
"epoch": 14.116504854368932,
"grad_norm": 0.1541670560836792,
"learning_rate": 8.516149322369055e-07,
"loss": 0.0246,
"step": 113
},
{
"epoch": 14.233009708737864,
"grad_norm": 0.07124023884534836,
"learning_rate": 7.811222008840719e-07,
"loss": 0.0139,
"step": 114
},
{
"epoch": 14.349514563106796,
"grad_norm": 0.07342305779457092,
"learning_rate": 7.13426859618338e-07,
"loss": 0.0126,
"step": 115
},
{
"epoch": 14.466019417475728,
"grad_norm": 0.06642112135887146,
"learning_rate": 6.485737948492237e-07,
"loss": 0.0119,
"step": 116
},
{
"epoch": 14.58252427184466,
"grad_norm": 0.06306291371583939,
"learning_rate": 5.866060083722624e-07,
"loss": 0.0112,
"step": 117
},
{
"epoch": 14.699029126213592,
"grad_norm": 0.06630466133356094,
"learning_rate": 5.275645888560233e-07,
"loss": 0.0121,
"step": 118
},
{
"epoch": 14.815533980582524,
"grad_norm": 0.06889720261096954,
"learning_rate": 4.71488684597643e-07,
"loss": 0.0116,
"step": 119
},
{
"epoch": 14.932038834951456,
"grad_norm": 0.06075473129749298,
"learning_rate": 4.184154775649768e-07,
"loss": 0.014,
"step": 120
},
{
"epoch": 15.116504854368932,
"grad_norm": 0.07335468381643295,
"learning_rate": 3.683801587425251e-07,
"loss": 0.0269,
"step": 121
},
{
"epoch": 15.233009708737864,
"grad_norm": 0.13383059203624725,
"learning_rate": 3.214159047975324e-07,
"loss": 0.0105,
"step": 122
},
{
"epoch": 15.349514563106796,
"grad_norm": 0.06708887219429016,
"learning_rate": 2.7755385608169374e-07,
"loss": 0.011,
"step": 123
},
{
"epoch": 15.466019417475728,
"grad_norm": 0.061848320066928864,
"learning_rate": 2.368230959830875e-07,
"loss": 0.012,
"step": 124
},
{
"epoch": 15.58252427184466,
"grad_norm": 0.0594082772731781,
"learning_rate": 1.992506316419912e-07,
"loss": 0.0113,
"step": 125
},
{
"epoch": 15.699029126213592,
"grad_norm": 0.06530940532684326,
"learning_rate": 1.6486137604339813e-07,
"loss": 0.0117,
"step": 126
},
{
"epoch": 15.815533980582524,
"grad_norm": 0.07374989986419678,
"learning_rate": 1.3367813149808728e-07,
"loss": 0.0106,
"step": 127
},
{
"epoch": 15.932038834951456,
"grad_norm": 0.0554700568318367,
"learning_rate": 1.0572157452321097e-07,
"loss": 0.01,
"step": 128
},
{
"epoch": 16.116504854368934,
"grad_norm": 0.11609751731157303,
"learning_rate": 8.101024213241826e-08,
"loss": 0.0216,
"step": 129
},
{
"epoch": 16.233009708737864,
"grad_norm": 0.05223438888788223,
"learning_rate": 5.9560519544614725e-08,
"loss": 0.0108,
"step": 130
},
{
"epoch": 16.349514563106798,
"grad_norm": 0.05844803899526596,
"learning_rate": 4.138662931949255e-08,
"loss": 0.0105,
"step": 131
},
{
"epoch": 16.466019417475728,
"grad_norm": 0.052356619387865067,
"learning_rate": 2.6500621927054716e-08,
"loss": 0.0105,
"step": 132
},
{
"epoch": 16.58252427184466,
"grad_norm": 0.05942930653691292,
"learning_rate": 1.4912367757366485e-08,
"loss": 0.0107,
"step": 133
},
{
"epoch": 16.699029126213592,
"grad_norm": 0.05563804879784584,
"learning_rate": 6.629550575847355e-09,
"loss": 0.0115,
"step": 134
},
{
"epoch": 16.815533980582526,
"grad_norm": 0.05824480950832367,
"learning_rate": 1.657662428434792e-09,
"loss": 0.0113,
"step": 135
},
{
"epoch": 16.932038834951456,
"grad_norm": 0.05532587692141533,
"learning_rate": 0.0,
"loss": 0.0111,
"step": 136
},
{
"epoch": 16.932038834951456,
"step": 136,
"total_flos": 493320970436608.0,
"train_loss": 0.15380282930838055,
"train_runtime": 33702.47,
"train_samples_per_second": 0.414,
"train_steps_per_second": 0.004
}
],
"logging_steps": 1,
"max_steps": 136,
"num_input_tokens_seen": 0,
"num_train_epochs": 17,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 493320970436608.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}