{ "best_metric": null, "best_model_checkpoint": null, "epoch": 16.93658536585366, "eval_steps": 500, "global_step": 136, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.11707317073170732, "grad_norm": 1.713741660118103, "learning_rate": 7.142857142857143e-07, "loss": 0.391, "step": 1 }, { "epoch": 0.23414634146341465, "grad_norm": 1.7321815490722656, "learning_rate": 1.4285714285714286e-06, "loss": 0.3876, "step": 2 }, { "epoch": 0.35121951219512193, "grad_norm": 1.869097352027893, "learning_rate": 2.1428571428571427e-06, "loss": 0.4163, "step": 3 }, { "epoch": 0.4682926829268293, "grad_norm": 1.741655707359314, "learning_rate": 2.8571428571428573e-06, "loss": 0.4025, "step": 4 }, { "epoch": 0.5853658536585366, "grad_norm": 1.5887295007705688, "learning_rate": 3.5714285714285718e-06, "loss": 0.383, "step": 5 }, { "epoch": 0.7024390243902439, "grad_norm": 1.5869324207305908, "learning_rate": 4.2857142857142855e-06, "loss": 0.3968, "step": 6 }, { "epoch": 0.8195121951219512, "grad_norm": 1.3928492069244385, "learning_rate": 5e-06, "loss": 0.3868, "step": 7 }, { "epoch": 0.9365853658536586, "grad_norm": 1.317595362663269, "learning_rate": 5.7142857142857145e-06, "loss": 0.3871, "step": 8 }, { "epoch": 1.1170731707317074, "grad_norm": 1.3958970308303833, "learning_rate": 6.4285714285714295e-06, "loss": 0.7529, "step": 9 }, { "epoch": 1.2341463414634146, "grad_norm": 0.588860273361206, "learning_rate": 7.1428571428571436e-06, "loss": 0.361, "step": 10 }, { "epoch": 1.3512195121951218, "grad_norm": 0.5455438494682312, "learning_rate": 7.857142857142858e-06, "loss": 0.3647, "step": 11 }, { "epoch": 1.4682926829268292, "grad_norm": 1.2507004737854004, "learning_rate": 8.571428571428571e-06, "loss": 0.3795, "step": 12 }, { "epoch": 1.5853658536585367, "grad_norm": 1.3338210582733154, "learning_rate": 9.285714285714288e-06, "loss": 0.3783, "step": 13 }, { "epoch": 1.7024390243902439, "grad_norm": 1.2802133560180664, "learning_rate": 1e-05, "loss": 0.3637, "step": 14 }, { "epoch": 1.819512195121951, "grad_norm": 1.1926186084747314, "learning_rate": 9.998342337571566e-06, "loss": 0.3663, "step": 15 }, { "epoch": 1.9365853658536585, "grad_norm": 0.8796733021736145, "learning_rate": 9.993370449424153e-06, "loss": 0.3446, "step": 16 }, { "epoch": 2.1170731707317074, "grad_norm": 1.2581405639648438, "learning_rate": 9.985087632242634e-06, "loss": 0.6666, "step": 17 }, { "epoch": 2.234146341463415, "grad_norm": 0.6097764372825623, "learning_rate": 9.973499378072947e-06, "loss": 0.3404, "step": 18 }, { "epoch": 2.351219512195122, "grad_norm": 0.5183754563331604, "learning_rate": 9.958613370680507e-06, "loss": 0.331, "step": 19 }, { "epoch": 2.4682926829268292, "grad_norm": 0.5353206992149353, "learning_rate": 9.940439480455386e-06, "loss": 0.3364, "step": 20 }, { "epoch": 2.5853658536585367, "grad_norm": 0.5248769521713257, "learning_rate": 9.918989757867584e-06, "loss": 0.3289, "step": 21 }, { "epoch": 2.7024390243902436, "grad_norm": 0.5104326009750366, "learning_rate": 9.89427842547679e-06, "loss": 0.3207, "step": 22 }, { "epoch": 2.819512195121951, "grad_norm": 0.429746150970459, "learning_rate": 9.866321868501914e-06, "loss": 0.3354, "step": 23 }, { "epoch": 2.9365853658536585, "grad_norm": 0.4242963492870331, "learning_rate": 9.835138623956603e-06, "loss": 0.3149, "step": 24 }, { "epoch": 3.1170731707317074, "grad_norm": 0.8024505972862244, "learning_rate": 9.80074936835801e-06, "loss": 0.6097, "step": 25 }, { "epoch": 3.234146341463415, "grad_norm": 0.3315274715423584, "learning_rate": 9.763176904016914e-06, "loss": 0.3016, "step": 26 }, { "epoch": 3.351219512195122, "grad_norm": 0.3314255475997925, "learning_rate": 9.722446143918307e-06, "loss": 0.2876, "step": 27 }, { "epoch": 3.4682926829268292, "grad_norm": 0.30298393964767456, "learning_rate": 9.678584095202468e-06, "loss": 0.2912, "step": 28 }, { "epoch": 3.5853658536585367, "grad_norm": 0.3056536614894867, "learning_rate": 9.631619841257477e-06, "loss": 0.2855, "step": 29 }, { "epoch": 3.7024390243902436, "grad_norm": 0.3028172552585602, "learning_rate": 9.581584522435025e-06, "loss": 0.2848, "step": 30 }, { "epoch": 3.819512195121951, "grad_norm": 0.3157103657722473, "learning_rate": 9.528511315402358e-06, "loss": 0.2891, "step": 31 }, { "epoch": 3.9365853658536585, "grad_norm": 0.28788891434669495, "learning_rate": 9.472435411143979e-06, "loss": 0.274, "step": 32 }, { "epoch": 4.117073170731707, "grad_norm": 0.562315821647644, "learning_rate": 9.413393991627737e-06, "loss": 0.5517, "step": 33 }, { "epoch": 4.234146341463415, "grad_norm": 0.24586421251296997, "learning_rate": 9.351426205150778e-06, "loss": 0.2557, "step": 34 }, { "epoch": 4.351219512195122, "grad_norm": 0.24129828810691833, "learning_rate": 9.286573140381663e-06, "loss": 0.2506, "step": 35 }, { "epoch": 4.46829268292683, "grad_norm": 0.2212359756231308, "learning_rate": 9.218877799115929e-06, "loss": 0.2583, "step": 36 }, { "epoch": 4.585365853658536, "grad_norm": 0.20721423625946045, "learning_rate": 9.148385067763094e-06, "loss": 0.2509, "step": 37 }, { "epoch": 4.702439024390244, "grad_norm": 0.22884435951709747, "learning_rate": 9.075141687584056e-06, "loss": 0.248, "step": 38 }, { "epoch": 4.819512195121951, "grad_norm": 0.24287782609462738, "learning_rate": 8.999196223698599e-06, "loss": 0.2443, "step": 39 }, { "epoch": 4.9365853658536585, "grad_norm": 0.2220289707183838, "learning_rate": 8.920599032883553e-06, "loss": 0.235, "step": 40 }, { "epoch": 5.117073170731707, "grad_norm": 0.4429185092449188, "learning_rate": 8.839402230183e-06, "loss": 0.4562, "step": 41 }, { "epoch": 5.234146341463415, "grad_norm": 0.2300422042608261, "learning_rate": 8.755659654352599e-06, "loss": 0.2245, "step": 42 }, { "epoch": 5.351219512195122, "grad_norm": 0.21638786792755127, "learning_rate": 8.669426832160997e-06, "loss": 0.2268, "step": 43 }, { "epoch": 5.46829268292683, "grad_norm": 0.2475624680519104, "learning_rate": 8.580760941571968e-06, "loss": 0.2026, "step": 44 }, { "epoch": 5.585365853658536, "grad_norm": 0.2283526360988617, "learning_rate": 8.489720773831717e-06, "loss": 0.203, "step": 45 }, { "epoch": 5.702439024390244, "grad_norm": 0.19043877720832825, "learning_rate": 8.396366694486466e-06, "loss": 0.2039, "step": 46 }, { "epoch": 5.819512195121951, "grad_norm": 0.2230452299118042, "learning_rate": 8.30076060335616e-06, "loss": 0.1957, "step": 47 }, { "epoch": 5.9365853658536585, "grad_norm": 0.2054775357246399, "learning_rate": 8.202965893490877e-06, "loss": 0.202, "step": 48 }, { "epoch": 6.117073170731707, "grad_norm": 0.19972442090511322, "learning_rate": 8.103047409137114e-06, "loss": 0.3737, "step": 49 }, { "epoch": 6.234146341463415, "grad_norm": 0.20269611477851868, "learning_rate": 8.001071402741843e-06, "loss": 0.1804, "step": 50 }, { "epoch": 6.351219512195122, "grad_norm": 0.2113686352968216, "learning_rate": 7.897105491022819e-06, "loss": 0.1755, "step": 51 }, { "epoch": 6.46829268292683, "grad_norm": 0.19944241642951965, "learning_rate": 7.791218610134324e-06, "loss": 0.1576, "step": 52 }, { "epoch": 6.585365853658536, "grad_norm": 0.1904984563589096, "learning_rate": 7.683480969958005e-06, "loss": 0.1586, "step": 53 }, { "epoch": 6.702439024390244, "grad_norm": 0.19858157634735107, "learning_rate": 7.5739640075491546e-06, "loss": 0.1628, "step": 54 }, { "epoch": 6.819512195121951, "grad_norm": 0.1883428692817688, "learning_rate": 7.462740339769323e-06, "loss": 0.1632, "step": 55 }, { "epoch": 6.9365853658536585, "grad_norm": 0.17427878081798553, "learning_rate": 7.349883715136601e-06, "loss": 0.1653, "step": 56 }, { "epoch": 7.117073170731707, "grad_norm": 0.37863096594810486, "learning_rate": 7.235468964925571e-06, "loss": 0.3005, "step": 57 }, { "epoch": 7.234146341463415, "grad_norm": 0.1933353841304779, "learning_rate": 7.119571953549305e-06, "loss": 0.1439, "step": 58 }, { "epoch": 7.351219512195122, "grad_norm": 0.1970248520374298, "learning_rate": 7.002269528256334e-06, "loss": 0.132, "step": 59 }, { "epoch": 7.46829268292683, "grad_norm": 0.19500941038131714, "learning_rate": 6.883639468175926e-06, "loss": 0.1264, "step": 60 }, { "epoch": 7.585365853658536, "grad_norm": 0.20704524219036102, "learning_rate": 6.763760432745475e-06, "loss": 0.135, "step": 61 }, { "epoch": 7.702439024390244, "grad_norm": 0.21069225668907166, "learning_rate": 6.6427119095541745e-06, "loss": 0.1168, "step": 62 }, { "epoch": 7.819512195121951, "grad_norm": 0.185212641954422, "learning_rate": 6.520574161637591e-06, "loss": 0.1252, "step": 63 }, { "epoch": 7.9365853658536585, "grad_norm": 0.18015943467617035, "learning_rate": 6.397428174258048e-06, "loss": 0.1249, "step": 64 }, { "epoch": 8.117073170731707, "grad_norm": 0.5003377199172974, "learning_rate": 6.273355601206143e-06, "loss": 0.2434, "step": 65 }, { "epoch": 8.234146341463415, "grad_norm": 0.2011335790157318, "learning_rate": 6.148438710658979e-06, "loss": 0.0987, "step": 66 }, { "epoch": 8.351219512195122, "grad_norm": 0.1703123301267624, "learning_rate": 6.022760330631006e-06, "loss": 0.1057, "step": 67 }, { "epoch": 8.46829268292683, "grad_norm": 0.17905612289905548, "learning_rate": 5.896403794053679e-06, "loss": 0.1019, "step": 68 }, { "epoch": 8.585365853658537, "grad_norm": 0.22483955323696136, "learning_rate": 5.76945288352031e-06, "loss": 0.1018, "step": 69 }, { "epoch": 8.702439024390245, "grad_norm": 0.18365544080734253, "learning_rate": 5.641991775732756e-06, "loss": 0.0906, "step": 70 }, { "epoch": 8.819512195121952, "grad_norm": 0.16530556976795197, "learning_rate": 5.514104985686802e-06, "loss": 0.0961, "step": 71 }, { "epoch": 8.93658536585366, "grad_norm": 0.18940307199954987, "learning_rate": 5.385877310633233e-06, "loss": 0.0921, "step": 72 }, { "epoch": 9.117073170731707, "grad_norm": 0.3870483934879303, "learning_rate": 5.257393773851733e-06, "loss": 0.1844, "step": 73 }, { "epoch": 9.234146341463415, "grad_norm": 0.19212201237678528, "learning_rate": 5.1287395682749444e-06, "loss": 0.0799, "step": 74 }, { "epoch": 9.351219512195122, "grad_norm": 0.19690005481243134, "learning_rate": 5e-06, "loss": 0.0757, "step": 75 }, { "epoch": 9.46829268292683, "grad_norm": 0.18531833589076996, "learning_rate": 4.871260431725058e-06, "loss": 0.0746, "step": 76 }, { "epoch": 9.585365853658537, "grad_norm": 0.2481042891740799, "learning_rate": 4.742606226148268e-06, "loss": 0.064, "step": 77 }, { "epoch": 9.702439024390245, "grad_norm": 0.1442754566669464, "learning_rate": 4.614122689366769e-06, "loss": 0.0741, "step": 78 }, { "epoch": 9.819512195121952, "grad_norm": 0.1746453046798706, "learning_rate": 4.485895014313198e-06, "loss": 0.0712, "step": 79 }, { "epoch": 9.93658536585366, "grad_norm": 0.19312545657157898, "learning_rate": 4.358008224267245e-06, "loss": 0.0747, "step": 80 }, { "epoch": 10.117073170731707, "grad_norm": 0.4135829508304596, "learning_rate": 4.230547116479691e-06, "loss": 0.1344, "step": 81 }, { "epoch": 10.234146341463415, "grad_norm": 0.14636236429214478, "learning_rate": 4.103596205946323e-06, "loss": 0.0564, "step": 82 }, { "epoch": 10.351219512195122, "grad_norm": 0.18952858448028564, "learning_rate": 3.977239669368998e-06, "loss": 0.0567, "step": 83 }, { "epoch": 10.46829268292683, "grad_norm": 0.23925091326236725, "learning_rate": 3.851561289341023e-06, "loss": 0.0575, "step": 84 }, { "epoch": 10.585365853658537, "grad_norm": 0.20485427975654602, "learning_rate": 3.726644398793857e-06, "loss": 0.0571, "step": 85 }, { "epoch": 10.702439024390245, "grad_norm": 0.15516437590122223, "learning_rate": 3.6025718257419532e-06, "loss": 0.0515, "step": 86 }, { "epoch": 10.819512195121952, "grad_norm": 0.20174065232276917, "learning_rate": 3.4794258383624115e-06, "loss": 0.0538, "step": 87 }, { "epoch": 10.93658536585366, "grad_norm": 0.23884522914886475, "learning_rate": 3.3572880904458267e-06, "loss": 0.0557, "step": 88 }, { "epoch": 11.117073170731707, "grad_norm": 0.3778754770755768, "learning_rate": 3.236239567254526e-06, "loss": 0.1121, "step": 89 }, { "epoch": 11.234146341463415, "grad_norm": 0.15436743199825287, "learning_rate": 3.116360531824074e-06, "loss": 0.0422, "step": 90 }, { "epoch": 11.351219512195122, "grad_norm": 0.14751587808132172, "learning_rate": 2.997730471743667e-06, "loss": 0.0417, "step": 91 }, { "epoch": 11.46829268292683, "grad_norm": 0.23487572371959686, "learning_rate": 2.880428046450697e-06, "loss": 0.0396, "step": 92 }, { "epoch": 11.585365853658537, "grad_norm": 0.2069467157125473, "learning_rate": 2.7645310350744296e-06, "loss": 0.0426, "step": 93 }, { "epoch": 11.702439024390245, "grad_norm": 0.15420763194561005, "learning_rate": 2.6501162848634023e-06, "loss": 0.0413, "step": 94 }, { "epoch": 11.819512195121952, "grad_norm": 0.1196109727025032, "learning_rate": 2.537259660230679e-06, "loss": 0.0404, "step": 95 }, { "epoch": 11.93658536585366, "grad_norm": 0.1467064768075943, "learning_rate": 2.426035992450848e-06, "loss": 0.0407, "step": 96 }, { "epoch": 12.117073170731707, "grad_norm": 0.19361571967601776, "learning_rate": 2.316519030041998e-06, "loss": 0.0791, "step": 97 }, { "epoch": 12.234146341463415, "grad_norm": 0.13011707365512848, "learning_rate": 2.2087813898656775e-06, "loss": 0.0346, "step": 98 }, { "epoch": 12.351219512195122, "grad_norm": 0.10667014867067337, "learning_rate": 2.102894508977182e-06, "loss": 0.0327, "step": 99 }, { "epoch": 12.46829268292683, "grad_norm": 0.14411939680576324, "learning_rate": 1.9989285972581595e-06, "loss": 0.0326, "step": 100 }, { "epoch": 12.585365853658537, "grad_norm": 0.19949135184288025, "learning_rate": 1.896952590862886e-06, "loss": 0.0343, "step": 101 }, { "epoch": 12.702439024390245, "grad_norm": 0.18770211935043335, "learning_rate": 1.7970341065091246e-06, "loss": 0.031, "step": 102 }, { "epoch": 12.819512195121952, "grad_norm": 0.1456199288368225, "learning_rate": 1.699239396643841e-06, "loss": 0.0283, "step": 103 }, { "epoch": 12.93658536585366, "grad_norm": 0.140044167637825, "learning_rate": 1.6036333055135345e-06, "loss": 0.0331, "step": 104 }, { "epoch": 13.117073170731707, "grad_norm": 0.3027644157409668, "learning_rate": 1.5102792261682813e-06, "loss": 0.0651, "step": 105 }, { "epoch": 13.234146341463415, "grad_norm": 0.16215696930885315, "learning_rate": 1.4192390584280347e-06, "loss": 0.0296, "step": 106 }, { "epoch": 13.351219512195122, "grad_norm": 0.1622275710105896, "learning_rate": 1.330573167839005e-06, "loss": 0.0265, "step": 107 }, { "epoch": 13.46829268292683, "grad_norm": 0.13922300934791565, "learning_rate": 1.2443403456474017e-06, "loss": 0.0277, "step": 108 }, { "epoch": 13.585365853658537, "grad_norm": 0.08746293187141418, "learning_rate": 1.1605977698170001e-06, "loss": 0.0237, "step": 109 }, { "epoch": 13.702439024390245, "grad_norm": 0.09428137540817261, "learning_rate": 1.0794009671164484e-06, "loss": 0.0255, "step": 110 }, { "epoch": 13.819512195121952, "grad_norm": 0.10664989054203033, "learning_rate": 1.0008037763014033e-06, "loss": 0.0265, "step": 111 }, { "epoch": 13.93658536585366, "grad_norm": 0.12778587639331818, "learning_rate": 9.248583124159438e-07, "loss": 0.0265, "step": 112 }, { "epoch": 14.117073170731707, "grad_norm": 0.25158995389938354, "learning_rate": 8.516149322369055e-07, "loss": 0.0504, "step": 113 }, { "epoch": 14.234146341463415, "grad_norm": 0.08846470713615417, "learning_rate": 7.811222008840719e-07, "loss": 0.0218, "step": 114 }, { "epoch": 14.351219512195122, "grad_norm": 0.08048608899116516, "learning_rate": 7.13426859618338e-07, "loss": 0.0234, "step": 115 }, { "epoch": 14.46829268292683, "grad_norm": 0.08340264111757278, "learning_rate": 6.485737948492237e-07, "loss": 0.0237, "step": 116 }, { "epoch": 14.585365853658537, "grad_norm": 0.08076529949903488, "learning_rate": 5.866060083722624e-07, "loss": 0.0212, "step": 117 }, { "epoch": 14.702439024390245, "grad_norm": 0.08138881623744965, "learning_rate": 5.275645888560233e-07, "loss": 0.0221, "step": 118 }, { "epoch": 14.819512195121952, "grad_norm": 0.08238395303487778, "learning_rate": 4.71488684597643e-07, "loss": 0.0222, "step": 119 }, { "epoch": 14.93658536585366, "grad_norm": 0.08457454293966293, "learning_rate": 4.184154775649768e-07, "loss": 0.0231, "step": 120 }, { "epoch": 15.117073170731707, "grad_norm": 0.21646344661712646, "learning_rate": 3.683801587425251e-07, "loss": 0.0459, "step": 121 }, { "epoch": 15.234146341463415, "grad_norm": 0.08210847526788712, "learning_rate": 3.214159047975324e-07, "loss": 0.0243, "step": 122 }, { "epoch": 15.351219512195122, "grad_norm": 0.0840378999710083, "learning_rate": 2.7755385608169374e-07, "loss": 0.0205, "step": 123 }, { "epoch": 15.46829268292683, "grad_norm": 0.07520610839128494, "learning_rate": 2.368230959830875e-07, "loss": 0.0196, "step": 124 }, { "epoch": 15.585365853658537, "grad_norm": 0.07876352220773697, "learning_rate": 1.992506316419912e-07, "loss": 0.0204, "step": 125 }, { "epoch": 15.702439024390245, "grad_norm": 0.07992194592952728, "learning_rate": 1.6486137604339813e-07, "loss": 0.0211, "step": 126 }, { "epoch": 15.819512195121952, "grad_norm": 0.08172820508480072, "learning_rate": 1.3367813149808728e-07, "loss": 0.0229, "step": 127 }, { "epoch": 15.93658536585366, "grad_norm": 0.07874112576246262, "learning_rate": 1.0572157452321097e-07, "loss": 0.0221, "step": 128 }, { "epoch": 16.117073170731707, "grad_norm": 0.19976511597633362, "learning_rate": 8.101024213241826e-08, "loss": 0.0391, "step": 129 }, { "epoch": 16.234146341463415, "grad_norm": 0.0725630447268486, "learning_rate": 5.9560519544614725e-08, "loss": 0.0225, "step": 130 }, { "epoch": 16.351219512195122, "grad_norm": 0.07620209455490112, "learning_rate": 4.138662931949255e-08, "loss": 0.0197, "step": 131 }, { "epoch": 16.46829268292683, "grad_norm": 0.06960424035787582, "learning_rate": 2.6500621927054716e-08, "loss": 0.0205, "step": 132 }, { "epoch": 16.585365853658537, "grad_norm": 0.0744452178478241, "learning_rate": 1.4912367757366485e-08, "loss": 0.0204, "step": 133 }, { "epoch": 16.702439024390245, "grad_norm": 0.07866977900266647, "learning_rate": 6.629550575847355e-09, "loss": 0.0212, "step": 134 }, { "epoch": 16.819512195121952, "grad_norm": 0.07277688384056091, "learning_rate": 1.657662428434792e-09, "loss": 0.0186, "step": 135 }, { "epoch": 16.93658536585366, "grad_norm": 0.06631285697221756, "learning_rate": 0.0, "loss": 0.0242, "step": 136 }, { "epoch": 16.93658536585366, "step": 136, "total_flos": 483897984679936.0, "train_loss": 0.16699845409568617, "train_runtime": 26245.2704, "train_samples_per_second": 0.531, "train_steps_per_second": 0.005 } ], "logging_steps": 1, "max_steps": 136, "num_input_tokens_seen": 0, "num_train_epochs": 17, "save_steps": 10000, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 483897984679936.0, "train_batch_size": 1, "trial_name": null, "trial_params": null }