|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.974910394265233, |
|
"eval_steps": 500, |
|
"global_step": 312, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.009557945041816009, |
|
"grad_norm": 4.787891864776611, |
|
"learning_rate": 0.0, |
|
"loss": 1.2182, |
|
"num_tokens": 3087662.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.019115890083632018, |
|
"grad_norm": 4.7660908699035645, |
|
"learning_rate": 1.5625e-06, |
|
"loss": 1.2133, |
|
"num_tokens": 6211852.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02867383512544803, |
|
"grad_norm": 4.726694107055664, |
|
"learning_rate": 3.125e-06, |
|
"loss": 1.2106, |
|
"num_tokens": 9308149.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.038231780167264036, |
|
"grad_norm": 4.008660793304443, |
|
"learning_rate": 4.6875000000000004e-06, |
|
"loss": 1.1473, |
|
"num_tokens": 12406212.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04778972520908005, |
|
"grad_norm": 2.9268953800201416, |
|
"learning_rate": 6.25e-06, |
|
"loss": 1.0694, |
|
"num_tokens": 15498886.0, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05734767025089606, |
|
"grad_norm": 1.6218466758728027, |
|
"learning_rate": 7.8125e-06, |
|
"loss": 0.9229, |
|
"num_tokens": 18594568.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06690561529271206, |
|
"grad_norm": 1.232273817062378, |
|
"learning_rate": 9.375000000000001e-06, |
|
"loss": 0.8977, |
|
"num_tokens": 21685747.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.07646356033452807, |
|
"grad_norm": 1.352134346961975, |
|
"learning_rate": 1.09375e-05, |
|
"loss": 0.8468, |
|
"num_tokens": 24802133.0, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.08602150537634409, |
|
"grad_norm": 1.1753616333007812, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.8356, |
|
"num_tokens": 27921758.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0955794504181601, |
|
"grad_norm": 0.841299831867218, |
|
"learning_rate": 1.4062500000000001e-05, |
|
"loss": 0.8045, |
|
"num_tokens": 30982499.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10513739545997611, |
|
"grad_norm": 0.7823367118835449, |
|
"learning_rate": 1.5625e-05, |
|
"loss": 0.7772, |
|
"num_tokens": 34120523.0, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.11469534050179211, |
|
"grad_norm": 0.6168179512023926, |
|
"learning_rate": 1.71875e-05, |
|
"loss": 0.7493, |
|
"num_tokens": 37263940.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.12425328554360812, |
|
"grad_norm": 0.4352397918701172, |
|
"learning_rate": 1.8750000000000002e-05, |
|
"loss": 0.7391, |
|
"num_tokens": 40419105.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.13381123058542413, |
|
"grad_norm": 0.4141225516796112, |
|
"learning_rate": 2.0312500000000002e-05, |
|
"loss": 0.7168, |
|
"num_tokens": 43544209.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.14336917562724014, |
|
"grad_norm": 0.3596664071083069, |
|
"learning_rate": 2.1875e-05, |
|
"loss": 0.7096, |
|
"num_tokens": 46640184.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.15292712066905614, |
|
"grad_norm": 0.32062292098999023, |
|
"learning_rate": 2.34375e-05, |
|
"loss": 0.6878, |
|
"num_tokens": 49723879.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.16248506571087215, |
|
"grad_norm": 0.2776451110839844, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.685, |
|
"num_tokens": 52840221.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.17204301075268819, |
|
"grad_norm": 0.2792057693004608, |
|
"learning_rate": 2.6562500000000002e-05, |
|
"loss": 0.6683, |
|
"num_tokens": 55938115.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1816009557945042, |
|
"grad_norm": 0.24952948093414307, |
|
"learning_rate": 2.8125000000000003e-05, |
|
"loss": 0.6643, |
|
"num_tokens": 59088421.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.1911589008363202, |
|
"grad_norm": 0.23966556787490845, |
|
"learning_rate": 2.96875e-05, |
|
"loss": 0.6568, |
|
"num_tokens": 62120009.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2007168458781362, |
|
"grad_norm": 0.29473549127578735, |
|
"learning_rate": 3.125e-05, |
|
"loss": 0.6443, |
|
"num_tokens": 65202270.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.21027479091995221, |
|
"grad_norm": 0.25088849663734436, |
|
"learning_rate": 3.2812500000000005e-05, |
|
"loss": 0.64, |
|
"num_tokens": 68278086.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.21983273596176822, |
|
"grad_norm": 0.26761022210121155, |
|
"learning_rate": 3.4375e-05, |
|
"loss": 0.6406, |
|
"num_tokens": 71423006.0, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.22939068100358423, |
|
"grad_norm": 0.2646687626838684, |
|
"learning_rate": 3.59375e-05, |
|
"loss": 0.6287, |
|
"num_tokens": 74542778.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.23894862604540024, |
|
"grad_norm": 0.28765901923179626, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.6316, |
|
"num_tokens": 77676355.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.24850657108721624, |
|
"grad_norm": 0.28572410345077515, |
|
"learning_rate": 3.90625e-05, |
|
"loss": 0.6208, |
|
"num_tokens": 80757397.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.25806451612903225, |
|
"grad_norm": 0.3025137782096863, |
|
"learning_rate": 4.0625000000000005e-05, |
|
"loss": 0.6206, |
|
"num_tokens": 83848581.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.26762246117084826, |
|
"grad_norm": 0.30501461029052734, |
|
"learning_rate": 4.21875e-05, |
|
"loss": 0.61, |
|
"num_tokens": 86949103.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.27718040621266427, |
|
"grad_norm": 0.24303048849105835, |
|
"learning_rate": 4.375e-05, |
|
"loss": 0.6062, |
|
"num_tokens": 90043461.0, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.2867383512544803, |
|
"grad_norm": 0.4097803235054016, |
|
"learning_rate": 4.5312500000000004e-05, |
|
"loss": 0.608, |
|
"num_tokens": 93175833.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2962962962962963, |
|
"grad_norm": 0.22506467998027802, |
|
"learning_rate": 4.6875e-05, |
|
"loss": 0.6043, |
|
"num_tokens": 96298838.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.3058542413381123, |
|
"grad_norm": 0.4554276168346405, |
|
"learning_rate": 4.8437500000000005e-05, |
|
"loss": 0.6039, |
|
"num_tokens": 99387594.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3154121863799283, |
|
"grad_norm": 0.24191643297672272, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5919, |
|
"num_tokens": 102495943.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.3249701314217443, |
|
"grad_norm": 0.497525155544281, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5919, |
|
"num_tokens": 105605746.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.3345280764635603, |
|
"grad_norm": 0.39946335554122925, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5966, |
|
"num_tokens": 108704371.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.34408602150537637, |
|
"grad_norm": 0.3315200209617615, |
|
"learning_rate": 5e-05, |
|
"loss": 0.58, |
|
"num_tokens": 111803239.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3536439665471924, |
|
"grad_norm": 0.38535887002944946, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5836, |
|
"num_tokens": 114901473.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3632019115890084, |
|
"grad_norm": 0.30798089504241943, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5821, |
|
"num_tokens": 117998143.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.3727598566308244, |
|
"grad_norm": 0.3112103343009949, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5789, |
|
"num_tokens": 121151007.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.3823178016726404, |
|
"grad_norm": 0.28211474418640137, |
|
"learning_rate": 5e-05, |
|
"loss": 0.576, |
|
"num_tokens": 124244420.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3918757467144564, |
|
"grad_norm": 0.29719066619873047, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5776, |
|
"num_tokens": 127347282.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.4014336917562724, |
|
"grad_norm": 0.28514203429222107, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5739, |
|
"num_tokens": 130425105.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.4109916367980884, |
|
"grad_norm": 0.24759361147880554, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5614, |
|
"num_tokens": 133566882.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.42054958183990443, |
|
"grad_norm": 0.3673174977302551, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5755, |
|
"num_tokens": 136693576.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.43010752688172044, |
|
"grad_norm": 0.2918493151664734, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5741, |
|
"num_tokens": 139836069.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.43966547192353644, |
|
"grad_norm": 0.20241530239582062, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5659, |
|
"num_tokens": 142903826.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.44922341696535245, |
|
"grad_norm": 0.36211881041526794, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5652, |
|
"num_tokens": 146013234.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.45878136200716846, |
|
"grad_norm": 0.27178287506103516, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5639, |
|
"num_tokens": 149106780.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.46833930704898447, |
|
"grad_norm": 0.22404040396213531, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5588, |
|
"num_tokens": 152240290.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.4778972520908005, |
|
"grad_norm": 0.267503023147583, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5634, |
|
"num_tokens": 155343192.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4874551971326165, |
|
"grad_norm": 0.22685855627059937, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5624, |
|
"num_tokens": 158483136.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.4970131421744325, |
|
"grad_norm": 0.3110021948814392, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5543, |
|
"num_tokens": 161533766.0, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.5065710872162486, |
|
"grad_norm": 0.23207558691501617, |
|
"learning_rate": 5e-05, |
|
"loss": 0.546, |
|
"num_tokens": 164664119.0, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.5161290322580645, |
|
"grad_norm": 0.33425262570381165, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5544, |
|
"num_tokens": 167808973.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.5256869772998806, |
|
"grad_norm": 0.2662634253501892, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5526, |
|
"num_tokens": 170887441.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5352449223416965, |
|
"grad_norm": 0.28920575976371765, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5536, |
|
"num_tokens": 174005920.0, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.5448028673835126, |
|
"grad_norm": 0.26591673493385315, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5481, |
|
"num_tokens": 177097398.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.5543608124253285, |
|
"grad_norm": 0.2847670614719391, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5536, |
|
"num_tokens": 180221288.0, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.5639187574671446, |
|
"grad_norm": 0.2541261613368988, |
|
"learning_rate": 5e-05, |
|
"loss": 0.542, |
|
"num_tokens": 183289863.0, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.5734767025089605, |
|
"grad_norm": 0.275193989276886, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5456, |
|
"num_tokens": 186406120.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5830346475507766, |
|
"grad_norm": 0.26497912406921387, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5425, |
|
"num_tokens": 189471040.0, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5925925925925926, |
|
"grad_norm": 0.27101513743400574, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5386, |
|
"num_tokens": 192575364.0, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.6021505376344086, |
|
"grad_norm": 0.26009494066238403, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5318, |
|
"num_tokens": 195684975.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.6117084826762246, |
|
"grad_norm": 0.22097371518611908, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5411, |
|
"num_tokens": 198779716.0, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.6212664277180406, |
|
"grad_norm": 0.3351891040802002, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5458, |
|
"num_tokens": 201906900.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.6308243727598566, |
|
"grad_norm": 0.23704691231250763, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5397, |
|
"num_tokens": 205031188.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.6403823178016727, |
|
"grad_norm": 0.32154443860054016, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5445, |
|
"num_tokens": 208116939.0, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.6499402628434886, |
|
"grad_norm": 0.2630949020385742, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5322, |
|
"num_tokens": 211211227.0, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.6594982078853047, |
|
"grad_norm": 0.31168922781944275, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5324, |
|
"num_tokens": 214301570.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.6690561529271206, |
|
"grad_norm": 0.2585446834564209, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5386, |
|
"num_tokens": 217393704.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6786140979689367, |
|
"grad_norm": 0.25293031334877014, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5223, |
|
"num_tokens": 220482293.0, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.6881720430107527, |
|
"grad_norm": 0.3039219379425049, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5337, |
|
"num_tokens": 223590652.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.6977299880525687, |
|
"grad_norm": 0.2043919861316681, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5251, |
|
"num_tokens": 226675856.0, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.7072879330943848, |
|
"grad_norm": 0.290919691324234, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5343, |
|
"num_tokens": 229778501.0, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.7168458781362007, |
|
"grad_norm": 0.24795055389404297, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5356, |
|
"num_tokens": 232829595.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7264038231780168, |
|
"grad_norm": 0.25901439785957336, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5258, |
|
"num_tokens": 235940446.0, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.7359617682198327, |
|
"grad_norm": 0.30405953526496887, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5361, |
|
"num_tokens": 239028046.0, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.7455197132616488, |
|
"grad_norm": 0.23060917854309082, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5361, |
|
"num_tokens": 242205870.0, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.7550776583034647, |
|
"grad_norm": 0.24737045168876648, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5204, |
|
"num_tokens": 245233676.0, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.7646356033452808, |
|
"grad_norm": 0.22408932447433472, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5202, |
|
"num_tokens": 248356444.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.7741935483870968, |
|
"grad_norm": 0.328327476978302, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5311, |
|
"num_tokens": 251468079.0, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.7837514934289128, |
|
"grad_norm": 0.20574553310871124, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5231, |
|
"num_tokens": 254594703.0, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.7933094384707288, |
|
"grad_norm": 0.305171936750412, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5264, |
|
"num_tokens": 257723496.0, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.8028673835125448, |
|
"grad_norm": 0.20093268156051636, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5274, |
|
"num_tokens": 260768286.0, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.8124253285543608, |
|
"grad_norm": 0.3125307559967041, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5313, |
|
"num_tokens": 263875942.0, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.8219832735961768, |
|
"grad_norm": 0.20460698008537292, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5217, |
|
"num_tokens": 266985727.0, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.8315412186379928, |
|
"grad_norm": 0.2367510348558426, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5163, |
|
"num_tokens": 270075318.0, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.8410991636798089, |
|
"grad_norm": 0.2568177580833435, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5241, |
|
"num_tokens": 273156355.0, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.8506571087216248, |
|
"grad_norm": 0.2784541845321655, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5223, |
|
"num_tokens": 276259662.0, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.8602150537634409, |
|
"grad_norm": 0.25873690843582153, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5256, |
|
"num_tokens": 279394317.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8697729988052568, |
|
"grad_norm": 0.2528490126132965, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5199, |
|
"num_tokens": 282498565.0, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.8793309438470729, |
|
"grad_norm": 0.24740688502788544, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5141, |
|
"num_tokens": 285610462.0, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": 0.24610859155654907, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5197, |
|
"num_tokens": 288771247.0, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.8984468339307049, |
|
"grad_norm": 0.24038155376911163, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5191, |
|
"num_tokens": 291911091.0, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.9080047789725209, |
|
"grad_norm": 0.2939693033695221, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5147, |
|
"num_tokens": 294998183.0, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.9175627240143369, |
|
"grad_norm": 0.19252680242061615, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5138, |
|
"num_tokens": 298050683.0, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.927120669056153, |
|
"grad_norm": 0.23349586129188538, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5172, |
|
"num_tokens": 301164218.0, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.9366786140979689, |
|
"grad_norm": 0.20470231771469116, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5132, |
|
"num_tokens": 304261920.0, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.946236559139785, |
|
"grad_norm": 0.27620136737823486, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5195, |
|
"num_tokens": 307412237.0, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.955794504181601, |
|
"grad_norm": 0.21717092394828796, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5172, |
|
"num_tokens": 310507993.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.965352449223417, |
|
"grad_norm": 0.29017373919487, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5107, |
|
"num_tokens": 313624257.0, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.974910394265233, |
|
"grad_norm": 0.28068530559539795, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5176, |
|
"num_tokens": 316722666.0, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.984468339307049, |
|
"grad_norm": 0.2316262573003769, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5162, |
|
"num_tokens": 319820496.0, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.994026284348865, |
|
"grad_norm": 0.30137985944747925, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5102, |
|
"num_tokens": 322901137.0, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.30137985944747925, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5069, |
|
"num_tokens": 324860579.0, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.009557945041816, |
|
"grad_norm": 0.32997432351112366, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4761, |
|
"num_tokens": 327927654.0, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 1.0191158900836321, |
|
"grad_norm": 0.32110321521759033, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4785, |
|
"num_tokens": 331066424.0, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 1.028673835125448, |
|
"grad_norm": 0.20342878997325897, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4845, |
|
"num_tokens": 334162426.0, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 1.038231780167264, |
|
"grad_norm": 0.3325869143009186, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4822, |
|
"num_tokens": 337302238.0, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 1.04778972520908, |
|
"grad_norm": 0.22826018929481506, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4824, |
|
"num_tokens": 340447280.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.0573476702508962, |
|
"grad_norm": 0.3223550021648407, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4696, |
|
"num_tokens": 343568761.0, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 1.066905615292712, |
|
"grad_norm": 0.21368011832237244, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4756, |
|
"num_tokens": 346718666.0, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 1.076463560334528, |
|
"grad_norm": 0.24224598705768585, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4765, |
|
"num_tokens": 349842177.0, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 1.086021505376344, |
|
"grad_norm": 0.2377299815416336, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4728, |
|
"num_tokens": 352932093.0, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 1.0955794504181602, |
|
"grad_norm": 0.23792093992233276, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4777, |
|
"num_tokens": 356033224.0, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.1051373954599761, |
|
"grad_norm": 0.26680827140808105, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4778, |
|
"num_tokens": 359191089.0, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 1.114695340501792, |
|
"grad_norm": 0.26498955488204956, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4739, |
|
"num_tokens": 362282504.0, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 1.124253285543608, |
|
"grad_norm": 0.2889910936355591, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4751, |
|
"num_tokens": 365375038.0, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 1.1338112305854242, |
|
"grad_norm": 0.2168579399585724, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4615, |
|
"num_tokens": 368405060.0, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 1.1433691756272402, |
|
"grad_norm": 0.2727675437927246, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4738, |
|
"num_tokens": 371483975.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.1529271206690561, |
|
"grad_norm": 0.36416906118392944, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4791, |
|
"num_tokens": 374634435.0, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 1.162485065710872, |
|
"grad_norm": 0.18200910091400146, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4731, |
|
"num_tokens": 377757663.0, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 1.1720430107526882, |
|
"grad_norm": 0.386223167181015, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4777, |
|
"num_tokens": 380874183.0, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 1.1816009557945042, |
|
"grad_norm": 0.25184914469718933, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4793, |
|
"num_tokens": 383990561.0, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 1.1911589008363201, |
|
"grad_norm": 0.37080973386764526, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4764, |
|
"num_tokens": 387086225.0, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.2007168458781363, |
|
"grad_norm": 0.27940818667411804, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4718, |
|
"num_tokens": 390204819.0, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 1.2102747909199523, |
|
"grad_norm": 0.28235924243927, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4713, |
|
"num_tokens": 393287937.0, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 1.2198327359617682, |
|
"grad_norm": 0.28537049889564514, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4746, |
|
"num_tokens": 396363663.0, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 1.2293906810035842, |
|
"grad_norm": 0.3287442922592163, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4796, |
|
"num_tokens": 399462801.0, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 1.2389486260454001, |
|
"grad_norm": 0.23722870647907257, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4674, |
|
"num_tokens": 402561690.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.2485065710872163, |
|
"grad_norm": 0.26579034328460693, |
|
"learning_rate": 5e-05, |
|
"loss": 0.46, |
|
"num_tokens": 405671986.0, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 1.2580645161290323, |
|
"grad_norm": 0.21499669551849365, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4633, |
|
"num_tokens": 408745166.0, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 1.2676224611708482, |
|
"grad_norm": 0.26388466358184814, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4736, |
|
"num_tokens": 411848560.0, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 1.2771804062126644, |
|
"grad_norm": 0.22143350541591644, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4624, |
|
"num_tokens": 414961042.0, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 1.2867383512544803, |
|
"grad_norm": 0.27563315629959106, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4645, |
|
"num_tokens": 418038833.0, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.2962962962962963, |
|
"grad_norm": 0.2135905772447586, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4732, |
|
"num_tokens": 421176052.0, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 1.3058542413381122, |
|
"grad_norm": 0.24489735066890717, |
|
"learning_rate": 5e-05, |
|
"loss": 0.472, |
|
"num_tokens": 424278020.0, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 1.3154121863799282, |
|
"grad_norm": 0.22754637897014618, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4691, |
|
"num_tokens": 427397110.0, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 1.3249701314217444, |
|
"grad_norm": 0.24249699711799622, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4757, |
|
"num_tokens": 430504573.0, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 1.3345280764635603, |
|
"grad_norm": 0.23354573547840118, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4605, |
|
"num_tokens": 433626191.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.3440860215053765, |
|
"grad_norm": 0.2180928736925125, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4679, |
|
"num_tokens": 436727625.0, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 1.3536439665471924, |
|
"grad_norm": 0.2624548077583313, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4704, |
|
"num_tokens": 439824215.0, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 1.3632019115890084, |
|
"grad_norm": 0.22936105728149414, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4638, |
|
"num_tokens": 442903885.0, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 1.3727598566308243, |
|
"grad_norm": 0.2361782044172287, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4722, |
|
"num_tokens": 446049216.0, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 1.3823178016726403, |
|
"grad_norm": 0.22354678809642792, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4702, |
|
"num_tokens": 449164822.0, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.3918757467144565, |
|
"grad_norm": 0.2597694396972656, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4659, |
|
"num_tokens": 452296016.0, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 1.4014336917562724, |
|
"grad_norm": 0.18445123732089996, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4675, |
|
"num_tokens": 455409198.0, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 1.4109916367980884, |
|
"grad_norm": 0.24752481281757355, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4665, |
|
"num_tokens": 458552498.0, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 1.4205495818399045, |
|
"grad_norm": 0.23295103013515472, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4642, |
|
"num_tokens": 461598172.0, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 1.4301075268817205, |
|
"grad_norm": 0.26925209164619446, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4618, |
|
"num_tokens": 464763731.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.4396654719235364, |
|
"grad_norm": 0.1971091479063034, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4639, |
|
"num_tokens": 467905236.0, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 1.4492234169653524, |
|
"grad_norm": 0.2771715819835663, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4645, |
|
"num_tokens": 471038706.0, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 1.4587813620071683, |
|
"grad_norm": 0.2006852775812149, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4664, |
|
"num_tokens": 474170594.0, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 1.4683393070489845, |
|
"grad_norm": 0.27502164244651794, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4581, |
|
"num_tokens": 477293327.0, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 1.4778972520908005, |
|
"grad_norm": 0.19383655488491058, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4641, |
|
"num_tokens": 480356338.0, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.4874551971326164, |
|
"grad_norm": 0.29519346356391907, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4636, |
|
"num_tokens": 483466822.0, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 1.4970131421744326, |
|
"grad_norm": 0.24049213528633118, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4708, |
|
"num_tokens": 486598025.0, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 1.5065710872162486, |
|
"grad_norm": 0.23467357456684113, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4629, |
|
"num_tokens": 489710268.0, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 1.5161290322580645, |
|
"grad_norm": 0.23279620707035065, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4642, |
|
"num_tokens": 492749301.0, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 1.5256869772998805, |
|
"grad_norm": 0.21989446878433228, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4609, |
|
"num_tokens": 495858425.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.5352449223416964, |
|
"grad_norm": 0.20997576415538788, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4615, |
|
"num_tokens": 499002124.0, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 1.5448028673835126, |
|
"grad_norm": 0.2443646341562271, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4599, |
|
"num_tokens": 502158127.0, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 1.5543608124253285, |
|
"grad_norm": 0.2202104926109314, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4685, |
|
"num_tokens": 505273653.0, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 1.5639187574671447, |
|
"grad_norm": 0.2284436672925949, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4666, |
|
"num_tokens": 508383175.0, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 1.5734767025089607, |
|
"grad_norm": 0.21461881697177887, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4649, |
|
"num_tokens": 511456188.0, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.5830346475507766, |
|
"grad_norm": 0.24341613054275513, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4726, |
|
"num_tokens": 514504919.0, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 1.5925925925925926, |
|
"grad_norm": 0.2432275414466858, |
|
"learning_rate": 5e-05, |
|
"loss": 0.462, |
|
"num_tokens": 517638097.0, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 1.6021505376344085, |
|
"grad_norm": 0.1924099624156952, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4574, |
|
"num_tokens": 520744764.0, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 1.6117084826762245, |
|
"grad_norm": 0.2210276573896408, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4652, |
|
"num_tokens": 523847555.0, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 1.6212664277180406, |
|
"grad_norm": 0.24831326305866241, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4622, |
|
"num_tokens": 526938439.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.6308243727598566, |
|
"grad_norm": 0.20027406513690948, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4625, |
|
"num_tokens": 530100826.0, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 1.6403823178016728, |
|
"grad_norm": 0.2210628092288971, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4543, |
|
"num_tokens": 533217368.0, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 1.6499402628434887, |
|
"grad_norm": 0.21531818807125092, |
|
"learning_rate": 5e-05, |
|
"loss": 0.464, |
|
"num_tokens": 536266028.0, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 1.6594982078853047, |
|
"grad_norm": 0.24346983432769775, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4582, |
|
"num_tokens": 539353780.0, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 1.6690561529271206, |
|
"grad_norm": 0.18500176072120667, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4611, |
|
"num_tokens": 542436030.0, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.6786140979689366, |
|
"grad_norm": 0.21900725364685059, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4567, |
|
"num_tokens": 545556259.0, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 1.6881720430107527, |
|
"grad_norm": 0.26041945815086365, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4609, |
|
"num_tokens": 548651036.0, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 1.6977299880525687, |
|
"grad_norm": 0.1868957281112671, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4578, |
|
"num_tokens": 551763960.0, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 1.7072879330943849, |
|
"grad_norm": 0.30002743005752563, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4621, |
|
"num_tokens": 554859971.0, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 1.7168458781362008, |
|
"grad_norm": 0.15785135328769684, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4531, |
|
"num_tokens": 557960344.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.7264038231780168, |
|
"grad_norm": 0.21860018372535706, |
|
"learning_rate": 5e-05, |
|
"loss": 0.456, |
|
"num_tokens": 561070346.0, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 1.7359617682198327, |
|
"grad_norm": 0.21262319386005402, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4548, |
|
"num_tokens": 564124513.0, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 1.7455197132616487, |
|
"grad_norm": 0.208367258310318, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4597, |
|
"num_tokens": 567229917.0, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 1.7550776583034646, |
|
"grad_norm": 0.23050186038017273, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4609, |
|
"num_tokens": 570377549.0, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 1.7646356033452808, |
|
"grad_norm": 0.19711245596408844, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4596, |
|
"num_tokens": 573423384.0, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.7741935483870968, |
|
"grad_norm": 0.25033050775527954, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4536, |
|
"num_tokens": 576529785.0, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 1.783751493428913, |
|
"grad_norm": 0.23019000887870789, |
|
"learning_rate": 5e-05, |
|
"loss": 0.457, |
|
"num_tokens": 579611243.0, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 1.7933094384707289, |
|
"grad_norm": 0.24459266662597656, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4563, |
|
"num_tokens": 582684585.0, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 1.8028673835125448, |
|
"grad_norm": 0.2335624098777771, |
|
"learning_rate": 5e-05, |
|
"loss": 0.456, |
|
"num_tokens": 585749574.0, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 1.8124253285543608, |
|
"grad_norm": 0.21826566755771637, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4545, |
|
"num_tokens": 588793971.0, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.8219832735961767, |
|
"grad_norm": 0.2118789553642273, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4561, |
|
"num_tokens": 591863271.0, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 1.8315412186379927, |
|
"grad_norm": 0.25364288687705994, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4582, |
|
"num_tokens": 594955013.0, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 1.8410991636798089, |
|
"grad_norm": 0.18223045766353607, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4586, |
|
"num_tokens": 598054762.0, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 1.8506571087216248, |
|
"grad_norm": 0.19568181037902832, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4542, |
|
"num_tokens": 601156311.0, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 1.860215053763441, |
|
"grad_norm": 0.21927742660045624, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4593, |
|
"num_tokens": 604264490.0, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.869772998805257, |
|
"grad_norm": 0.22864626348018646, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4582, |
|
"num_tokens": 607369801.0, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 1.8793309438470729, |
|
"grad_norm": 0.2220122367143631, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4586, |
|
"num_tokens": 610464157.0, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 1.8888888888888888, |
|
"grad_norm": 0.20654238760471344, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4572, |
|
"num_tokens": 613600074.0, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 1.8984468339307048, |
|
"grad_norm": 0.24055001139640808, |
|
"learning_rate": 5e-05, |
|
"loss": 0.447, |
|
"num_tokens": 616692135.0, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 1.9080047789725207, |
|
"grad_norm": 0.2102368324995041, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4616, |
|
"num_tokens": 619865728.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.917562724014337, |
|
"grad_norm": 0.1804041713476181, |
|
"learning_rate": 5e-05, |
|
"loss": 0.456, |
|
"num_tokens": 622937270.0, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 1.927120669056153, |
|
"grad_norm": 0.20141775906085968, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4573, |
|
"num_tokens": 626025595.0, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 1.936678614097969, |
|
"grad_norm": 0.23730237782001495, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4588, |
|
"num_tokens": 629102565.0, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 1.946236559139785, |
|
"grad_norm": 0.20814980566501617, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4596, |
|
"num_tokens": 632248389.0, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 1.955794504181601, |
|
"grad_norm": 0.23954477906227112, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4608, |
|
"num_tokens": 635381252.0, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.965352449223417, |
|
"grad_norm": 0.2159876823425293, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4568, |
|
"num_tokens": 638503290.0, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 1.9749103942652328, |
|
"grad_norm": 0.2248695194721222, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4494, |
|
"num_tokens": 641566168.0, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 1.984468339307049, |
|
"grad_norm": 0.18469811975955963, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4553, |
|
"num_tokens": 644664768.0, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 1.994026284348865, |
|
"grad_norm": 0.2029660940170288, |
|
"learning_rate": 5e-05, |
|
"loss": 0.458, |
|
"num_tokens": 647777196.0, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.2680661678314209, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4472, |
|
"num_tokens": 649717884.0, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.009557945041816, |
|
"grad_norm": 0.2700228691101074, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4049, |
|
"num_tokens": 652835514.0, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 2.019115890083632, |
|
"grad_norm": 0.21437576413154602, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4077, |
|
"num_tokens": 655939553.0, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 2.028673835125448, |
|
"grad_norm": 0.2812529504299164, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4043, |
|
"num_tokens": 659052255.0, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 2.0382317801672643, |
|
"grad_norm": 0.19706906378269196, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4069, |
|
"num_tokens": 662144062.0, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 2.04778972520908, |
|
"grad_norm": 0.23425546288490295, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3947, |
|
"num_tokens": 665243818.0, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 2.057347670250896, |
|
"grad_norm": 0.2341323047876358, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4005, |
|
"num_tokens": 668343069.0, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 2.066905615292712, |
|
"grad_norm": 0.26267462968826294, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4076, |
|
"num_tokens": 671491544.0, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 2.076463560334528, |
|
"grad_norm": 0.22123785316944122, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3994, |
|
"num_tokens": 674603425.0, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 2.086021505376344, |
|
"grad_norm": 0.22117979824543, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3999, |
|
"num_tokens": 677683109.0, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 2.09557945041816, |
|
"grad_norm": 0.25451090931892395, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4035, |
|
"num_tokens": 680765161.0, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.105137395459976, |
|
"grad_norm": 0.21930952370166779, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4027, |
|
"num_tokens": 683877744.0, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 2.1146953405017923, |
|
"grad_norm": 0.2268250733613968, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4017, |
|
"num_tokens": 687007762.0, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 2.1242532855436083, |
|
"grad_norm": 0.21051234006881714, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4083, |
|
"num_tokens": 690110389.0, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 2.133811230585424, |
|
"grad_norm": 0.22137832641601562, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3908, |
|
"num_tokens": 693193272.0, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 2.14336917562724, |
|
"grad_norm": 0.24819853901863098, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4037, |
|
"num_tokens": 696332399.0, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.152927120669056, |
|
"grad_norm": 0.2136155366897583, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3972, |
|
"num_tokens": 699449474.0, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 2.162485065710872, |
|
"grad_norm": 0.21676744520664215, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4009, |
|
"num_tokens": 702535376.0, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 2.172043010752688, |
|
"grad_norm": 0.21379505097866058, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4048, |
|
"num_tokens": 705594342.0, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 2.1816009557945044, |
|
"grad_norm": 0.24907568097114563, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4016, |
|
"num_tokens": 708681631.0, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 2.1911589008363204, |
|
"grad_norm": 0.23827004432678223, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3897, |
|
"num_tokens": 711768161.0, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.2007168458781363, |
|
"grad_norm": 0.19475553929805756, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3984, |
|
"num_tokens": 714866856.0, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 2.2102747909199523, |
|
"grad_norm": 0.22524000704288483, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3997, |
|
"num_tokens": 717989725.0, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 2.219832735961768, |
|
"grad_norm": 0.22430852055549622, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3981, |
|
"num_tokens": 721080176.0, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 2.229390681003584, |
|
"grad_norm": 0.2231917530298233, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4044, |
|
"num_tokens": 724211432.0, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 2.2389486260454, |
|
"grad_norm": 0.24612534046173096, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3955, |
|
"num_tokens": 727270220.0, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 2.248506571087216, |
|
"grad_norm": 0.2350219190120697, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4062, |
|
"num_tokens": 730444444.0, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 2.258064516129032, |
|
"grad_norm": 0.24301743507385254, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4025, |
|
"num_tokens": 733557450.0, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 2.2676224611708484, |
|
"grad_norm": 0.20786051452159882, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4005, |
|
"num_tokens": 736681750.0, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 2.2771804062126644, |
|
"grad_norm": 0.33719518780708313, |
|
"learning_rate": 5e-05, |
|
"loss": 0.393, |
|
"num_tokens": 739789232.0, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 2.2867383512544803, |
|
"grad_norm": 0.17687377333641052, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4038, |
|
"num_tokens": 742891601.0, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.2962962962962963, |
|
"grad_norm": 0.342306524515152, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4019, |
|
"num_tokens": 746006954.0, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 2.3058542413381122, |
|
"grad_norm": 0.16845592856407166, |
|
"learning_rate": 5e-05, |
|
"loss": 0.403, |
|
"num_tokens": 749147037.0, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 2.315412186379928, |
|
"grad_norm": 0.32256966829299927, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4034, |
|
"num_tokens": 752229629.0, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 2.324970131421744, |
|
"grad_norm": 0.20878097414970398, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4026, |
|
"num_tokens": 755278777.0, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 2.3345280764635605, |
|
"grad_norm": 0.2807317078113556, |
|
"learning_rate": 5e-05, |
|
"loss": 0.406, |
|
"num_tokens": 758363222.0, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 2.3440860215053765, |
|
"grad_norm": 0.2278064638376236, |
|
"learning_rate": 5e-05, |
|
"loss": 0.401, |
|
"num_tokens": 761474147.0, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 2.3536439665471924, |
|
"grad_norm": 0.2612324357032776, |
|
"learning_rate": 5e-05, |
|
"loss": 0.395, |
|
"num_tokens": 764551769.0, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 2.3632019115890084, |
|
"grad_norm": 0.23537400364875793, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3948, |
|
"num_tokens": 767614488.0, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 2.3727598566308243, |
|
"grad_norm": 0.21469944715499878, |
|
"learning_rate": 5e-05, |
|
"loss": 0.403, |
|
"num_tokens": 770735177.0, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 2.3823178016726403, |
|
"grad_norm": 0.2674992084503174, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3933, |
|
"num_tokens": 773851787.0, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.3918757467144562, |
|
"grad_norm": 0.18953253328800201, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3994, |
|
"num_tokens": 776935088.0, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 2.4014336917562726, |
|
"grad_norm": 0.2478407323360443, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4052, |
|
"num_tokens": 780049122.0, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 2.4109916367980886, |
|
"grad_norm": 0.195692777633667, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4084, |
|
"num_tokens": 783178312.0, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 2.4205495818399045, |
|
"grad_norm": 0.20106397569179535, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4022, |
|
"num_tokens": 786281027.0, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 2.4301075268817205, |
|
"grad_norm": 0.19922764599323273, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4039, |
|
"num_tokens": 789414028.0, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.4396654719235364, |
|
"grad_norm": 0.2001892626285553, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4024, |
|
"num_tokens": 792520500.0, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 2.4492234169653524, |
|
"grad_norm": 0.22683829069137573, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4022, |
|
"num_tokens": 795635985.0, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 2.4587813620071683, |
|
"grad_norm": 0.20693090558052063, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3995, |
|
"num_tokens": 798748918.0, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 2.4683393070489843, |
|
"grad_norm": 0.22366446256637573, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3988, |
|
"num_tokens": 801831194.0, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 2.4778972520908003, |
|
"grad_norm": 0.21484145522117615, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3991, |
|
"num_tokens": 804928214.0, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.4874551971326166, |
|
"grad_norm": 0.22603361308574677, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4033, |
|
"num_tokens": 807987120.0, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 2.4970131421744326, |
|
"grad_norm": 0.23147359490394592, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4022, |
|
"num_tokens": 811098180.0, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 2.5065710872162486, |
|
"grad_norm": 0.1985515058040619, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4022, |
|
"num_tokens": 814198566.0, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 2.5161290322580645, |
|
"grad_norm": 0.20071357488632202, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4034, |
|
"num_tokens": 817257072.0, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 2.5256869772998805, |
|
"grad_norm": 0.25529035925865173, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4067, |
|
"num_tokens": 820406825.0, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.5352449223416964, |
|
"grad_norm": 0.20370543003082275, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4008, |
|
"num_tokens": 823512817.0, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 2.5448028673835124, |
|
"grad_norm": 0.20760004222393036, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4034, |
|
"num_tokens": 826610050.0, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 2.5543608124253288, |
|
"grad_norm": 0.20357725024223328, |
|
"learning_rate": 5e-05, |
|
"loss": 0.398, |
|
"num_tokens": 829753903.0, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 2.5639187574671447, |
|
"grad_norm": 0.22291164100170135, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3995, |
|
"num_tokens": 832868477.0, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 2.5734767025089607, |
|
"grad_norm": 0.1923750936985016, |
|
"learning_rate": 5e-05, |
|
"loss": 0.402, |
|
"num_tokens": 835937080.0, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.5830346475507766, |
|
"grad_norm": 0.22883036732673645, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3997, |
|
"num_tokens": 839073075.0, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 2.5925925925925926, |
|
"grad_norm": 0.21349798142910004, |
|
"learning_rate": 5e-05, |
|
"loss": 0.403, |
|
"num_tokens": 842191197.0, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 2.6021505376344085, |
|
"grad_norm": 0.22202804684638977, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4026, |
|
"num_tokens": 845276551.0, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 2.6117084826762245, |
|
"grad_norm": 0.20453181862831116, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3986, |
|
"num_tokens": 848352451.0, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 2.621266427718041, |
|
"grad_norm": 0.21245239675045013, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3993, |
|
"num_tokens": 851459757.0, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.6308243727598564, |
|
"grad_norm": 0.20784446597099304, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4029, |
|
"num_tokens": 854632749.0, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 2.6403823178016728, |
|
"grad_norm": 0.20124511420726776, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4018, |
|
"num_tokens": 857704440.0, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 2.6499402628434887, |
|
"grad_norm": 0.19951878488063812, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4096, |
|
"num_tokens": 860825716.0, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 2.6594982078853047, |
|
"grad_norm": 0.19267506897449493, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3974, |
|
"num_tokens": 863922145.0, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 2.6690561529271206, |
|
"grad_norm": 0.21417343616485596, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4002, |
|
"num_tokens": 866991544.0, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.6786140979689366, |
|
"grad_norm": 0.21816791594028473, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3997, |
|
"num_tokens": 870069694.0, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 2.688172043010753, |
|
"grad_norm": 0.21573670208454132, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4064, |
|
"num_tokens": 873180617.0, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 2.6977299880525685, |
|
"grad_norm": 0.18886543810367584, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3971, |
|
"num_tokens": 876262507.0, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 2.707287933094385, |
|
"grad_norm": 0.18656791746616364, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4038, |
|
"num_tokens": 879380128.0, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 2.716845878136201, |
|
"grad_norm": 0.197296142578125, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3986, |
|
"num_tokens": 882473178.0, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.7264038231780168, |
|
"grad_norm": 0.23007364571094513, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4036, |
|
"num_tokens": 885565233.0, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 2.7359617682198327, |
|
"grad_norm": 0.19327418506145477, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4134, |
|
"num_tokens": 888688634.0, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 2.7455197132616487, |
|
"grad_norm": 0.1903911530971527, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3996, |
|
"num_tokens": 891794908.0, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 2.7550776583034646, |
|
"grad_norm": 0.18659088015556335, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4044, |
|
"num_tokens": 894950413.0, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 2.7646356033452806, |
|
"grad_norm": 0.22853848338127136, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4085, |
|
"num_tokens": 898028493.0, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.774193548387097, |
|
"grad_norm": 0.2249801754951477, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4063, |
|
"num_tokens": 901215396.0, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 2.783751493428913, |
|
"grad_norm": 0.2070578783750534, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3988, |
|
"num_tokens": 904344510.0, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 2.793309438470729, |
|
"grad_norm": 0.1973543018102646, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4042, |
|
"num_tokens": 907446044.0, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 2.802867383512545, |
|
"grad_norm": 0.18739643692970276, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4118, |
|
"num_tokens": 910525112.0, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 2.812425328554361, |
|
"grad_norm": 0.18655946850776672, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3976, |
|
"num_tokens": 913646064.0, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.8219832735961767, |
|
"grad_norm": 0.19254498183727264, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4044, |
|
"num_tokens": 916750615.0, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 2.8315412186379927, |
|
"grad_norm": 0.20286458730697632, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4026, |
|
"num_tokens": 919815959.0, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 2.841099163679809, |
|
"grad_norm": 0.2116730809211731, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3999, |
|
"num_tokens": 922854402.0, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 2.8506571087216246, |
|
"grad_norm": 0.22266528010368347, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4038, |
|
"num_tokens": 925926441.0, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 2.860215053763441, |
|
"grad_norm": 0.18784675002098083, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4046, |
|
"num_tokens": 929091816.0, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.869772998805257, |
|
"grad_norm": 0.18861991167068481, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4058, |
|
"num_tokens": 932245034.0, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 2.879330943847073, |
|
"grad_norm": 0.18745040893554688, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4053, |
|
"num_tokens": 935337789.0, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 2.888888888888889, |
|
"grad_norm": 0.21158824861049652, |
|
"learning_rate": 5e-05, |
|
"loss": 0.398, |
|
"num_tokens": 938469999.0, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 2.898446833930705, |
|
"grad_norm": 0.21590955555438995, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4024, |
|
"num_tokens": 941548793.0, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 2.9080047789725207, |
|
"grad_norm": 0.206715390086174, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4048, |
|
"num_tokens": 944687830.0, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.9175627240143367, |
|
"grad_norm": 0.2118476778268814, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4093, |
|
"num_tokens": 947789759.0, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 2.927120669056153, |
|
"grad_norm": 0.18484579026699066, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3981, |
|
"num_tokens": 950899733.0, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 2.936678614097969, |
|
"grad_norm": 0.17388074100017548, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4065, |
|
"num_tokens": 954039453.0, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 2.946236559139785, |
|
"grad_norm": 0.16568942368030548, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3993, |
|
"num_tokens": 957164986.0, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 2.955794504181601, |
|
"grad_norm": 0.17550726234912872, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4014, |
|
"num_tokens": 960246209.0, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.965352449223417, |
|
"grad_norm": 0.1897525042295456, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3993, |
|
"num_tokens": 963312993.0, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 2.974910394265233, |
|
"grad_norm": 0.1897035390138626, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3997, |
|
"num_tokens": 966396496.0, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 2.974910394265233, |
|
"step": 312, |
|
"total_flos": 3.993677328755458e+19, |
|
"train_loss": 0.4935996499963296, |
|
"train_runtime": 22258.2821, |
|
"train_samples_per_second": 12.633, |
|
"train_steps_per_second": 0.014 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 312, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 16, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.993677328755458e+19, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|