|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.1, |
|
"eval_steps": 500, |
|
"global_step": 500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0002, |
|
"grad_norm": 5.698113441467285, |
|
"learning_rate": 5e-06, |
|
"loss": 12.2509, |
|
"mean_token_accuracy": 0.2450142428278923, |
|
"num_tokens": 16438.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0004, |
|
"grad_norm": 6.063949108123779, |
|
"learning_rate": 4.999000000000001e-06, |
|
"loss": 13.3748, |
|
"mean_token_accuracy": 0.2325708046555519, |
|
"num_tokens": 31368.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0006, |
|
"grad_norm": 8.026069641113281, |
|
"learning_rate": 4.998e-06, |
|
"loss": 12.5454, |
|
"mean_token_accuracy": 0.2398785501718521, |
|
"num_tokens": 58542.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0008, |
|
"grad_norm": 6.758559703826904, |
|
"learning_rate": 4.997000000000001e-06, |
|
"loss": 13.9155, |
|
"mean_token_accuracy": 0.21041666716337204, |
|
"num_tokens": 77844.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.001, |
|
"grad_norm": 6.786197185516357, |
|
"learning_rate": 4.996e-06, |
|
"loss": 13.2561, |
|
"mean_token_accuracy": 0.1967741921544075, |
|
"num_tokens": 89661.0, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0012, |
|
"grad_norm": 9.514345169067383, |
|
"learning_rate": 4.9950000000000005e-06, |
|
"loss": 12.3378, |
|
"mean_token_accuracy": 0.20638945698738098, |
|
"num_tokens": 104612.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0014, |
|
"grad_norm": 7.438795566558838, |
|
"learning_rate": 4.994000000000001e-06, |
|
"loss": 12.0814, |
|
"mean_token_accuracy": 0.2916666716337204, |
|
"num_tokens": 114269.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0016, |
|
"grad_norm": 15.847832679748535, |
|
"learning_rate": 4.993e-06, |
|
"loss": 13.2761, |
|
"mean_token_accuracy": 0.2450142428278923, |
|
"num_tokens": 128762.0, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0018, |
|
"grad_norm": 7.620685577392578, |
|
"learning_rate": 4.992e-06, |
|
"loss": 13.1792, |
|
"mean_token_accuracy": 0.2165178582072258, |
|
"num_tokens": 148453.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.002, |
|
"grad_norm": 8.26969051361084, |
|
"learning_rate": 4.991e-06, |
|
"loss": 12.853, |
|
"mean_token_accuracy": 0.21405228972434998, |
|
"num_tokens": 167693.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0022, |
|
"grad_norm": 7.328786373138428, |
|
"learning_rate": 4.9900000000000005e-06, |
|
"loss": 12.9578, |
|
"mean_token_accuracy": 0.2115987464785576, |
|
"num_tokens": 182157.0, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0024, |
|
"grad_norm": 7.058177947998047, |
|
"learning_rate": 4.989000000000001e-06, |
|
"loss": 12.7711, |
|
"mean_token_accuracy": 0.2002224698662758, |
|
"num_tokens": 201457.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0026, |
|
"grad_norm": 6.48744010925293, |
|
"learning_rate": 4.988e-06, |
|
"loss": 13.006, |
|
"mean_token_accuracy": 0.23790322244167328, |
|
"num_tokens": 215922.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0028, |
|
"grad_norm": 6.006223201751709, |
|
"learning_rate": 4.987e-06, |
|
"loss": 10.5975, |
|
"mean_token_accuracy": 0.2096899226307869, |
|
"num_tokens": 225597.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.003, |
|
"grad_norm": 6.274689674377441, |
|
"learning_rate": 4.986e-06, |
|
"loss": 11.2766, |
|
"mean_token_accuracy": 0.26851852238178253, |
|
"num_tokens": 240100.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0032, |
|
"grad_norm": 8.211908340454102, |
|
"learning_rate": 4.9850000000000006e-06, |
|
"loss": 13.7091, |
|
"mean_token_accuracy": 0.20202020555734634, |
|
"num_tokens": 259400.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0034, |
|
"grad_norm": 6.356493949890137, |
|
"learning_rate": 4.984000000000001e-06, |
|
"loss": 12.043, |
|
"mean_token_accuracy": 0.20519480854272842, |
|
"num_tokens": 278628.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0036, |
|
"grad_norm": 6.438048839569092, |
|
"learning_rate": 4.983e-06, |
|
"loss": 11.5484, |
|
"mean_token_accuracy": 0.2343137264251709, |
|
"num_tokens": 303856.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0038, |
|
"grad_norm": 7.152822494506836, |
|
"learning_rate": 4.982e-06, |
|
"loss": 13.4607, |
|
"mean_token_accuracy": 0.17500000447034836, |
|
"num_tokens": 318686.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.004, |
|
"grad_norm": 6.464632034301758, |
|
"learning_rate": 4.981e-06, |
|
"loss": 11.7533, |
|
"mean_token_accuracy": 0.21959459781646729, |
|
"num_tokens": 337999.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0042, |
|
"grad_norm": 7.051283836364746, |
|
"learning_rate": 4.980000000000001e-06, |
|
"loss": 12.9788, |
|
"mean_token_accuracy": 0.22011494636535645, |
|
"num_tokens": 356602.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0044, |
|
"grad_norm": 9.595747947692871, |
|
"learning_rate": 4.979e-06, |
|
"loss": 10.9347, |
|
"mean_token_accuracy": 0.2645348906517029, |
|
"num_tokens": 373201.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0046, |
|
"grad_norm": 7.404125213623047, |
|
"learning_rate": 4.9780000000000005e-06, |
|
"loss": 12.5181, |
|
"mean_token_accuracy": 0.2666666731238365, |
|
"num_tokens": 387660.0, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0048, |
|
"grad_norm": 6.656332969665527, |
|
"learning_rate": 4.977e-06, |
|
"loss": 11.5566, |
|
"mean_token_accuracy": 0.209001787006855, |
|
"num_tokens": 402129.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.005, |
|
"grad_norm": 6.866989612579346, |
|
"learning_rate": 4.976e-06, |
|
"loss": 12.4797, |
|
"mean_token_accuracy": 0.2379310354590416, |
|
"num_tokens": 421133.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0052, |
|
"grad_norm": 6.77735710144043, |
|
"learning_rate": 4.975000000000001e-06, |
|
"loss": 13.6256, |
|
"mean_token_accuracy": 0.22177419066429138, |
|
"num_tokens": 440531.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0054, |
|
"grad_norm": 8.206353187561035, |
|
"learning_rate": 4.974e-06, |
|
"loss": 13.0667, |
|
"mean_token_accuracy": 0.21791187673807144, |
|
"num_tokens": 459798.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0056, |
|
"grad_norm": 7.40612268447876, |
|
"learning_rate": 4.9730000000000005e-06, |
|
"loss": 13.5884, |
|
"mean_token_accuracy": 0.19052419066429138, |
|
"num_tokens": 478587.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0058, |
|
"grad_norm": 6.71999454498291, |
|
"learning_rate": 4.972e-06, |
|
"loss": 12.0322, |
|
"mean_token_accuracy": 0.24526315927505493, |
|
"num_tokens": 492356.0, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.006, |
|
"grad_norm": 8.108094215393066, |
|
"learning_rate": 4.971e-06, |
|
"loss": 12.4858, |
|
"mean_token_accuracy": 0.2566666677594185, |
|
"num_tokens": 507242.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0062, |
|
"grad_norm": 7.7994513511657715, |
|
"learning_rate": 4.970000000000001e-06, |
|
"loss": 13.0762, |
|
"mean_token_accuracy": 0.24568965286016464, |
|
"num_tokens": 525987.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0064, |
|
"grad_norm": 7.258217811584473, |
|
"learning_rate": 4.969e-06, |
|
"loss": 12.7757, |
|
"mean_token_accuracy": 0.24137930572032928, |
|
"num_tokens": 540447.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0066, |
|
"grad_norm": 14.746047973632812, |
|
"learning_rate": 4.9680000000000005e-06, |
|
"loss": 14.4335, |
|
"mean_token_accuracy": 0.22649572789669037, |
|
"num_tokens": 560131.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0068, |
|
"grad_norm": 5.289712429046631, |
|
"learning_rate": 4.967e-06, |
|
"loss": 9.6693, |
|
"mean_token_accuracy": 0.2875000089406967, |
|
"num_tokens": 579313.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.007, |
|
"grad_norm": 7.960392951965332, |
|
"learning_rate": 4.966e-06, |
|
"loss": 13.1738, |
|
"mean_token_accuracy": 0.25833334028720856, |
|
"num_tokens": 598611.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0072, |
|
"grad_norm": 8.295417785644531, |
|
"learning_rate": 4.965000000000001e-06, |
|
"loss": 11.8889, |
|
"mean_token_accuracy": 0.24144145101308823, |
|
"num_tokens": 615588.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0074, |
|
"grad_norm": 7.051126480102539, |
|
"learning_rate": 4.964e-06, |
|
"loss": 12.1364, |
|
"mean_token_accuracy": 0.23590733855962753, |
|
"num_tokens": 636032.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0076, |
|
"grad_norm": 7.895816326141357, |
|
"learning_rate": 4.963000000000001e-06, |
|
"loss": 12.8971, |
|
"mean_token_accuracy": 0.18571428954601288, |
|
"num_tokens": 655299.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0078, |
|
"grad_norm": 7.544738292694092, |
|
"learning_rate": 4.962e-06, |
|
"loss": 13.0581, |
|
"mean_token_accuracy": 0.20937500149011612, |
|
"num_tokens": 674357.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.008, |
|
"grad_norm": 7.548039436340332, |
|
"learning_rate": 4.9610000000000004e-06, |
|
"loss": 13.2158, |
|
"mean_token_accuracy": 0.20892494916915894, |
|
"num_tokens": 693698.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0082, |
|
"grad_norm": 7.687658309936523, |
|
"learning_rate": 4.960000000000001e-06, |
|
"loss": 12.8524, |
|
"mean_token_accuracy": 0.28735632449388504, |
|
"num_tokens": 703433.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0084, |
|
"grad_norm": 8.011468887329102, |
|
"learning_rate": 4.959e-06, |
|
"loss": 13.5655, |
|
"mean_token_accuracy": 0.21635150164365768, |
|
"num_tokens": 722733.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0086, |
|
"grad_norm": 11.084840774536133, |
|
"learning_rate": 4.958000000000001e-06, |
|
"loss": 12.7054, |
|
"mean_token_accuracy": 0.21746384352445602, |
|
"num_tokens": 738121.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.0088, |
|
"grad_norm": 9.436426162719727, |
|
"learning_rate": 4.957e-06, |
|
"loss": 13.5213, |
|
"mean_token_accuracy": 0.22783251106739044, |
|
"num_tokens": 757723.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.009, |
|
"grad_norm": 8.382990837097168, |
|
"learning_rate": 4.9560000000000005e-06, |
|
"loss": 12.6328, |
|
"mean_token_accuracy": 0.2567741870880127, |
|
"num_tokens": 772646.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0092, |
|
"grad_norm": 8.272336959838867, |
|
"learning_rate": 4.955e-06, |
|
"loss": 13.0365, |
|
"mean_token_accuracy": 0.2611111178994179, |
|
"num_tokens": 792334.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.0094, |
|
"grad_norm": 10.347405433654785, |
|
"learning_rate": 4.954e-06, |
|
"loss": 12.8164, |
|
"mean_token_accuracy": 0.22616633027791977, |
|
"num_tokens": 811713.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0096, |
|
"grad_norm": 7.27515983581543, |
|
"learning_rate": 4.953000000000001e-06, |
|
"loss": 11.0123, |
|
"mean_token_accuracy": 0.23790322244167328, |
|
"num_tokens": 832998.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0098, |
|
"grad_norm": 8.973237991333008, |
|
"learning_rate": 4.952e-06, |
|
"loss": 12.9664, |
|
"mean_token_accuracy": 0.19805195182561874, |
|
"num_tokens": 861717.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 8.738320350646973, |
|
"learning_rate": 4.9510000000000005e-06, |
|
"loss": 12.5728, |
|
"mean_token_accuracy": 0.24344827979803085, |
|
"num_tokens": 875736.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0102, |
|
"grad_norm": 8.51733112335205, |
|
"learning_rate": 4.95e-06, |
|
"loss": 12.7596, |
|
"mean_token_accuracy": 0.22380952537059784, |
|
"num_tokens": 894386.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.0104, |
|
"grad_norm": 8.787413597106934, |
|
"learning_rate": 4.949e-06, |
|
"loss": 12.4493, |
|
"mean_token_accuracy": 0.268075630068779, |
|
"num_tokens": 908848.0, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.0106, |
|
"grad_norm": 7.849542617797852, |
|
"learning_rate": 4.948000000000001e-06, |
|
"loss": 12.7514, |
|
"mean_token_accuracy": 0.23790322244167328, |
|
"num_tokens": 923309.0, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.0108, |
|
"grad_norm": 8.378942489624023, |
|
"learning_rate": 4.947e-06, |
|
"loss": 11.4953, |
|
"mean_token_accuracy": 0.2290322557091713, |
|
"num_tokens": 938298.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.011, |
|
"grad_norm": 8.311882972717285, |
|
"learning_rate": 4.946000000000001e-06, |
|
"loss": 12.0904, |
|
"mean_token_accuracy": 0.24014336615800858, |
|
"num_tokens": 948288.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.0112, |
|
"grad_norm": 9.599881172180176, |
|
"learning_rate": 4.945e-06, |
|
"loss": 14.3569, |
|
"mean_token_accuracy": 0.22649572789669037, |
|
"num_tokens": 967543.0, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.0114, |
|
"grad_norm": 8.84776496887207, |
|
"learning_rate": 4.9440000000000004e-06, |
|
"loss": 12.2268, |
|
"mean_token_accuracy": 0.28287841379642487, |
|
"num_tokens": 981726.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.0116, |
|
"grad_norm": 11.259871482849121, |
|
"learning_rate": 4.943000000000001e-06, |
|
"loss": 12.6356, |
|
"mean_token_accuracy": 0.22177419066429138, |
|
"num_tokens": 996225.0, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0118, |
|
"grad_norm": 10.529711723327637, |
|
"learning_rate": 4.942e-06, |
|
"loss": 11.9829, |
|
"mean_token_accuracy": 0.26986077427864075, |
|
"num_tokens": 1015573.0, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.012, |
|
"grad_norm": 8.90577220916748, |
|
"learning_rate": 4.941000000000001e-06, |
|
"loss": 11.4895, |
|
"mean_token_accuracy": 0.2374911978840828, |
|
"num_tokens": 1034198.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0122, |
|
"grad_norm": 7.851955413818359, |
|
"learning_rate": 4.94e-06, |
|
"loss": 12.0083, |
|
"mean_token_accuracy": 0.22478991746902466, |
|
"num_tokens": 1045419.0, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.0124, |
|
"grad_norm": 9.812698364257812, |
|
"learning_rate": 4.9390000000000005e-06, |
|
"loss": 12.9777, |
|
"mean_token_accuracy": 0.2379310354590416, |
|
"num_tokens": 1064723.0, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.0126, |
|
"grad_norm": 8.35107707977295, |
|
"learning_rate": 4.938000000000001e-06, |
|
"loss": 11.3187, |
|
"mean_token_accuracy": 0.24358975142240524, |
|
"num_tokens": 1079171.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.0128, |
|
"grad_norm": 7.16640567779541, |
|
"learning_rate": 4.937e-06, |
|
"loss": 10.2182, |
|
"mean_token_accuracy": 0.34151194989681244, |
|
"num_tokens": 1093695.0, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.013, |
|
"grad_norm": 10.18386459350586, |
|
"learning_rate": 4.936e-06, |
|
"loss": 12.2175, |
|
"mean_token_accuracy": 0.25961539149284363, |
|
"num_tokens": 1108580.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.0132, |
|
"grad_norm": 8.232446670532227, |
|
"learning_rate": 4.935e-06, |
|
"loss": 11.8182, |
|
"mean_token_accuracy": 0.27314814925193787, |
|
"num_tokens": 1123098.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.0134, |
|
"grad_norm": 8.809263229370117, |
|
"learning_rate": 4.9340000000000005e-06, |
|
"loss": 10.9148, |
|
"mean_token_accuracy": 0.20927418768405914, |
|
"num_tokens": 1137548.0, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.0136, |
|
"grad_norm": 9.865777015686035, |
|
"learning_rate": 4.933000000000001e-06, |
|
"loss": 12.6294, |
|
"mean_token_accuracy": 0.22962962836027145, |
|
"num_tokens": 1156845.0, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.0138, |
|
"grad_norm": 7.259024620056152, |
|
"learning_rate": 4.932e-06, |
|
"loss": 9.7717, |
|
"mean_token_accuracy": 0.26875001192092896, |
|
"num_tokens": 1171363.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.014, |
|
"grad_norm": 11.756244659423828, |
|
"learning_rate": 4.931e-06, |
|
"loss": 12.6686, |
|
"mean_token_accuracy": 0.28285714983940125, |
|
"num_tokens": 1185814.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0142, |
|
"grad_norm": 9.128395080566406, |
|
"learning_rate": 4.93e-06, |
|
"loss": 11.1826, |
|
"mean_token_accuracy": 0.21765056997537613, |
|
"num_tokens": 1200437.0, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.0144, |
|
"grad_norm": 8.472599029541016, |
|
"learning_rate": 4.929000000000001e-06, |
|
"loss": 11.6617, |
|
"mean_token_accuracy": 0.2060810774564743, |
|
"num_tokens": 1219264.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.0146, |
|
"grad_norm": 11.549400329589844, |
|
"learning_rate": 4.928000000000001e-06, |
|
"loss": 12.9588, |
|
"mean_token_accuracy": 0.25, |
|
"num_tokens": 1238560.0, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.0148, |
|
"grad_norm": 11.084433555603027, |
|
"learning_rate": 4.9270000000000004e-06, |
|
"loss": 10.4111, |
|
"mean_token_accuracy": 0.21954887360334396, |
|
"num_tokens": 1247376.0, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.015, |
|
"grad_norm": 10.906563758850098, |
|
"learning_rate": 4.926e-06, |
|
"loss": 12.5231, |
|
"mean_token_accuracy": 0.23307790607213974, |
|
"num_tokens": 1266611.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.0152, |
|
"grad_norm": 9.466647148132324, |
|
"learning_rate": 4.925e-06, |
|
"loss": 11.4363, |
|
"mean_token_accuracy": 0.24193547666072845, |
|
"num_tokens": 1281132.0, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.0154, |
|
"grad_norm": 11.185935020446777, |
|
"learning_rate": 4.924000000000001e-06, |
|
"loss": 12.6383, |
|
"mean_token_accuracy": 0.22685185074806213, |
|
"num_tokens": 1301049.0, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.0156, |
|
"grad_norm": 11.0143461227417, |
|
"learning_rate": 4.923000000000001e-06, |
|
"loss": 12.479, |
|
"mean_token_accuracy": 0.22962962836027145, |
|
"num_tokens": 1322278.0, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.0158, |
|
"grad_norm": 12.330080032348633, |
|
"learning_rate": 4.9220000000000005e-06, |
|
"loss": 12.2351, |
|
"mean_token_accuracy": 0.2675606608390808, |
|
"num_tokens": 1341536.0, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.016, |
|
"grad_norm": 10.486513137817383, |
|
"learning_rate": 4.921e-06, |
|
"loss": 11.6896, |
|
"mean_token_accuracy": 0.25356507301330566, |
|
"num_tokens": 1355885.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.0162, |
|
"grad_norm": 12.150262832641602, |
|
"learning_rate": 4.92e-06, |
|
"loss": 12.5725, |
|
"mean_token_accuracy": 0.2321428582072258, |
|
"num_tokens": 1375181.0, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.0164, |
|
"grad_norm": 11.387964248657227, |
|
"learning_rate": 4.919000000000001e-06, |
|
"loss": 11.5796, |
|
"mean_token_accuracy": 0.29461538791656494, |
|
"num_tokens": 1384853.0, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.0166, |
|
"grad_norm": 9.878727912902832, |
|
"learning_rate": 4.918e-06, |
|
"loss": 11.7534, |
|
"mean_token_accuracy": 0.24473684281110764, |
|
"num_tokens": 1401178.0, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.0168, |
|
"grad_norm": 9.827662467956543, |
|
"learning_rate": 4.9170000000000005e-06, |
|
"loss": 12.0345, |
|
"mean_token_accuracy": 0.18034055829048157, |
|
"num_tokens": 1419976.0, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.017, |
|
"grad_norm": 10.327628135681152, |
|
"learning_rate": 4.916e-06, |
|
"loss": 11.1049, |
|
"mean_token_accuracy": 0.3095238208770752, |
|
"num_tokens": 1429652.0, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.0172, |
|
"grad_norm": 10.67590045928955, |
|
"learning_rate": 4.915e-06, |
|
"loss": 10.7087, |
|
"mean_token_accuracy": 0.2750582844018936, |
|
"num_tokens": 1444113.0, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.0174, |
|
"grad_norm": 20.292959213256836, |
|
"learning_rate": 4.914000000000001e-06, |
|
"loss": 10.797, |
|
"mean_token_accuracy": 0.32500000298023224, |
|
"num_tokens": 1453773.0, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.0176, |
|
"grad_norm": 8.3117036819458, |
|
"learning_rate": 4.913e-06, |
|
"loss": 9.394, |
|
"mean_token_accuracy": 0.31915584206581116, |
|
"num_tokens": 1475024.0, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.0178, |
|
"grad_norm": 11.334404945373535, |
|
"learning_rate": 4.9120000000000006e-06, |
|
"loss": 11.0718, |
|
"mean_token_accuracy": 0.2678571492433548, |
|
"num_tokens": 1489489.0, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.018, |
|
"grad_norm": 15.36023235321045, |
|
"learning_rate": 4.911e-06, |
|
"loss": 11.7992, |
|
"mean_token_accuracy": 0.28607918322086334, |
|
"num_tokens": 1508816.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0182, |
|
"grad_norm": 11.725635528564453, |
|
"learning_rate": 4.9100000000000004e-06, |
|
"loss": 11.9207, |
|
"mean_token_accuracy": 0.26123301684856415, |
|
"num_tokens": 1530050.0, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.0184, |
|
"grad_norm": 14.75428295135498, |
|
"learning_rate": 4.909000000000001e-06, |
|
"loss": 11.0294, |
|
"mean_token_accuracy": 0.21236559003591537, |
|
"num_tokens": 1544713.0, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.0186, |
|
"grad_norm": 10.447436332702637, |
|
"learning_rate": 4.908e-06, |
|
"loss": 9.7536, |
|
"mean_token_accuracy": 0.24568965286016464, |
|
"num_tokens": 1559226.0, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.0188, |
|
"grad_norm": 14.036280632019043, |
|
"learning_rate": 4.907000000000001e-06, |
|
"loss": 12.2065, |
|
"mean_token_accuracy": 0.2771739065647125, |
|
"num_tokens": 1568898.0, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.019, |
|
"grad_norm": 13.364744186401367, |
|
"learning_rate": 4.906e-06, |
|
"loss": 13.168, |
|
"mean_token_accuracy": 0.2321428582072258, |
|
"num_tokens": 1588133.0, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.0192, |
|
"grad_norm": 12.441611289978027, |
|
"learning_rate": 4.9050000000000005e-06, |
|
"loss": 11.6806, |
|
"mean_token_accuracy": 0.21875, |
|
"num_tokens": 1607218.0, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.0194, |
|
"grad_norm": 11.559666633605957, |
|
"learning_rate": 4.904000000000001e-06, |
|
"loss": 12.2055, |
|
"mean_token_accuracy": 0.23010753095149994, |
|
"num_tokens": 1626557.0, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.0196, |
|
"grad_norm": 11.131147384643555, |
|
"learning_rate": 4.903e-06, |
|
"loss": 11.4201, |
|
"mean_token_accuracy": 0.2931034490466118, |
|
"num_tokens": 1641017.0, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.0198, |
|
"grad_norm": 12.186894416809082, |
|
"learning_rate": 4.902000000000001e-06, |
|
"loss": 12.1568, |
|
"mean_token_accuracy": 0.23180076479911804, |
|
"num_tokens": 1655475.0, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 11.864778518676758, |
|
"learning_rate": 4.901e-06, |
|
"loss": 12.4851, |
|
"mean_token_accuracy": 0.21008403599262238, |
|
"num_tokens": 1674815.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0202, |
|
"grad_norm": 14.35185718536377, |
|
"learning_rate": 4.9000000000000005e-06, |
|
"loss": 11.1814, |
|
"mean_token_accuracy": 0.2612612694501877, |
|
"num_tokens": 1686681.0, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.0204, |
|
"grad_norm": 18.715627670288086, |
|
"learning_rate": 4.899e-06, |
|
"loss": 11.632, |
|
"mean_token_accuracy": 0.30199430882930756, |
|
"num_tokens": 1701565.0, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.0206, |
|
"grad_norm": 12.892010688781738, |
|
"learning_rate": 4.898e-06, |
|
"loss": 12.3157, |
|
"mean_token_accuracy": 0.23806367069482803, |
|
"num_tokens": 1715938.0, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.0208, |
|
"grad_norm": 13.078173637390137, |
|
"learning_rate": 4.897000000000001e-06, |
|
"loss": 11.4954, |
|
"mean_token_accuracy": 0.22177419066429138, |
|
"num_tokens": 1734685.0, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.021, |
|
"grad_norm": 14.580618858337402, |
|
"learning_rate": 4.896e-06, |
|
"loss": 10.4619, |
|
"mean_token_accuracy": 0.2797202914953232, |
|
"num_tokens": 1757899.0, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.0212, |
|
"grad_norm": 13.174345016479492, |
|
"learning_rate": 4.8950000000000006e-06, |
|
"loss": 10.5835, |
|
"mean_token_accuracy": 0.27546295523643494, |
|
"num_tokens": 1778410.0, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.0214, |
|
"grad_norm": 13.970446586608887, |
|
"learning_rate": 4.894e-06, |
|
"loss": 11.1265, |
|
"mean_token_accuracy": 0.2693749964237213, |
|
"num_tokens": 1791819.0, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.0216, |
|
"grad_norm": 11.994514465332031, |
|
"learning_rate": 4.893e-06, |
|
"loss": 11.3511, |
|
"mean_token_accuracy": 0.25462962687015533, |
|
"num_tokens": 1811118.0, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.0218, |
|
"grad_norm": 13.328775405883789, |
|
"learning_rate": 4.892000000000001e-06, |
|
"loss": 11.9787, |
|
"mean_token_accuracy": 0.23790322244167328, |
|
"num_tokens": 1825598.0, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.022, |
|
"grad_norm": 11.813949584960938, |
|
"learning_rate": 4.891e-06, |
|
"loss": 10.614, |
|
"mean_token_accuracy": 0.25833334028720856, |
|
"num_tokens": 1840096.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.0222, |
|
"grad_norm": 17.409263610839844, |
|
"learning_rate": 4.890000000000001e-06, |
|
"loss": 10.5087, |
|
"mean_token_accuracy": 0.32692308723926544, |
|
"num_tokens": 1849750.0, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.0224, |
|
"grad_norm": 19.462722778320312, |
|
"learning_rate": 4.889e-06, |
|
"loss": 10.2288, |
|
"mean_token_accuracy": 0.25729166716337204, |
|
"num_tokens": 1859433.0, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.0226, |
|
"grad_norm": 11.595419883728027, |
|
"learning_rate": 4.8880000000000005e-06, |
|
"loss": 11.0996, |
|
"mean_token_accuracy": 0.2807881832122803, |
|
"num_tokens": 1873892.0, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.0228, |
|
"grad_norm": 14.413046836853027, |
|
"learning_rate": 4.887000000000001e-06, |
|
"loss": 11.6535, |
|
"mean_token_accuracy": 0.2341153472661972, |
|
"num_tokens": 1893196.0, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.023, |
|
"grad_norm": 12.218829154968262, |
|
"learning_rate": 4.886e-06, |
|
"loss": 10.9169, |
|
"mean_token_accuracy": 0.2557603716850281, |
|
"num_tokens": 1907676.0, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.0232, |
|
"grad_norm": 16.51720428466797, |
|
"learning_rate": 4.885000000000001e-06, |
|
"loss": 9.8406, |
|
"mean_token_accuracy": 0.25968992710113525, |
|
"num_tokens": 1921596.0, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.0234, |
|
"grad_norm": 10.608232498168945, |
|
"learning_rate": 4.884e-06, |
|
"loss": 9.9809, |
|
"mean_token_accuracy": 0.22068965435028076, |
|
"num_tokens": 1940247.0, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.0236, |
|
"grad_norm": 14.17568302154541, |
|
"learning_rate": 4.8830000000000005e-06, |
|
"loss": 11.3908, |
|
"mean_token_accuracy": 0.23790322244167328, |
|
"num_tokens": 1959268.0, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.0238, |
|
"grad_norm": 12.197131156921387, |
|
"learning_rate": 4.882000000000001e-06, |
|
"loss": 10.9438, |
|
"mean_token_accuracy": 0.2538699731230736, |
|
"num_tokens": 1978218.0, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.024, |
|
"grad_norm": 15.261062622070312, |
|
"learning_rate": 4.881e-06, |
|
"loss": 11.0329, |
|
"mean_token_accuracy": 0.24621212482452393, |
|
"num_tokens": 1997519.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.0242, |
|
"grad_norm": 13.116536140441895, |
|
"learning_rate": 4.880000000000001e-06, |
|
"loss": 11.9918, |
|
"mean_token_accuracy": 0.24137930572032928, |
|
"num_tokens": 2016817.0, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.0244, |
|
"grad_norm": 18.174373626708984, |
|
"learning_rate": 4.879e-06, |
|
"loss": 11.0414, |
|
"mean_token_accuracy": 0.25833334028720856, |
|
"num_tokens": 2035875.0, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.0246, |
|
"grad_norm": 17.258121490478516, |
|
"learning_rate": 4.8780000000000006e-06, |
|
"loss": 11.1173, |
|
"mean_token_accuracy": 0.3010057359933853, |
|
"num_tokens": 2050330.0, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.0248, |
|
"grad_norm": 12.636884689331055, |
|
"learning_rate": 4.877000000000001e-06, |
|
"loss": 11.3399, |
|
"mean_token_accuracy": 0.2879464328289032, |
|
"num_tokens": 2070156.0, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.025, |
|
"grad_norm": 18.457618713378906, |
|
"learning_rate": 4.876e-06, |
|
"loss": 11.5595, |
|
"mean_token_accuracy": 0.28418803960084915, |
|
"num_tokens": 2084771.0, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.0252, |
|
"grad_norm": 14.281397819519043, |
|
"learning_rate": 4.875e-06, |
|
"loss": 10.9901, |
|
"mean_token_accuracy": 0.28140393644571304, |
|
"num_tokens": 2104228.0, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.0254, |
|
"grad_norm": 13.08484935760498, |
|
"learning_rate": 4.874e-06, |
|
"loss": 9.4802, |
|
"mean_token_accuracy": 0.2637759745121002, |
|
"num_tokens": 2118877.0, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.0256, |
|
"grad_norm": 11.949925422668457, |
|
"learning_rate": 4.873000000000001e-06, |
|
"loss": 10.3703, |
|
"mean_token_accuracy": 0.2619825750589371, |
|
"num_tokens": 2133298.0, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.0258, |
|
"grad_norm": 14.950297355651855, |
|
"learning_rate": 4.872000000000001e-06, |
|
"loss": 10.6116, |
|
"mean_token_accuracy": 0.33000001311302185, |
|
"num_tokens": 2142955.0, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.026, |
|
"grad_norm": 16.433286666870117, |
|
"learning_rate": 4.8710000000000005e-06, |
|
"loss": 9.4482, |
|
"mean_token_accuracy": 0.3333333432674408, |
|
"num_tokens": 2159447.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.0262, |
|
"grad_norm": 12.467981338500977, |
|
"learning_rate": 4.87e-06, |
|
"loss": 10.2354, |
|
"mean_token_accuracy": 0.33796295523643494, |
|
"num_tokens": 2178352.0, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.0264, |
|
"grad_norm": 11.493000030517578, |
|
"learning_rate": 4.869e-06, |
|
"loss": 9.0597, |
|
"mean_token_accuracy": 0.2736175060272217, |
|
"num_tokens": 2199174.0, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.0266, |
|
"grad_norm": 13.966115951538086, |
|
"learning_rate": 4.868000000000001e-06, |
|
"loss": 10.3177, |
|
"mean_token_accuracy": 0.3325917571783066, |
|
"num_tokens": 2213636.0, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.0268, |
|
"grad_norm": 13.971321105957031, |
|
"learning_rate": 4.867000000000001e-06, |
|
"loss": 10.2564, |
|
"mean_token_accuracy": 0.31481482088565826, |
|
"num_tokens": 2228050.0, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.027, |
|
"grad_norm": 16.246124267578125, |
|
"learning_rate": 4.8660000000000005e-06, |
|
"loss": 10.7549, |
|
"mean_token_accuracy": 0.30943846702575684, |
|
"num_tokens": 2247348.0, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.0272, |
|
"grad_norm": 17.702425003051758, |
|
"learning_rate": 4.865e-06, |
|
"loss": 10.1729, |
|
"mean_token_accuracy": 0.3575989753007889, |
|
"num_tokens": 2261806.0, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.0274, |
|
"grad_norm": 13.732104301452637, |
|
"learning_rate": 4.864e-06, |
|
"loss": 10.9004, |
|
"mean_token_accuracy": 0.28607918322086334, |
|
"num_tokens": 2281224.0, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.0276, |
|
"grad_norm": 17.568925857543945, |
|
"learning_rate": 4.863000000000001e-06, |
|
"loss": 10.1658, |
|
"mean_token_accuracy": 0.2857142984867096, |
|
"num_tokens": 2295720.0, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.0278, |
|
"grad_norm": 13.424271583557129, |
|
"learning_rate": 4.862e-06, |
|
"loss": 10.3543, |
|
"mean_token_accuracy": 0.28928571939468384, |
|
"num_tokens": 2310345.0, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.028, |
|
"grad_norm": 14.413524627685547, |
|
"learning_rate": 4.8610000000000006e-06, |
|
"loss": 11.3822, |
|
"mean_token_accuracy": 0.22828783839941025, |
|
"num_tokens": 2347736.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.0282, |
|
"grad_norm": 22.185630798339844, |
|
"learning_rate": 4.86e-06, |
|
"loss": 10.6185, |
|
"mean_token_accuracy": 0.2911111190915108, |
|
"num_tokens": 2361685.0, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.0284, |
|
"grad_norm": 12.253998756408691, |
|
"learning_rate": 4.859e-06, |
|
"loss": 9.2399, |
|
"mean_token_accuracy": 0.288621261715889, |
|
"num_tokens": 2376158.0, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.0286, |
|
"grad_norm": 16.229686737060547, |
|
"learning_rate": 4.858000000000001e-06, |
|
"loss": 10.3377, |
|
"mean_token_accuracy": 0.22227822244167328, |
|
"num_tokens": 2391501.0, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.0288, |
|
"grad_norm": 9.664397239685059, |
|
"learning_rate": 4.857e-06, |
|
"loss": 9.3351, |
|
"mean_token_accuracy": 0.26456456631422043, |
|
"num_tokens": 2411105.0, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.029, |
|
"grad_norm": 12.8119478225708, |
|
"learning_rate": 4.856e-06, |
|
"loss": 9.87, |
|
"mean_token_accuracy": 0.24166666716337204, |
|
"num_tokens": 2431185.0, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.0292, |
|
"grad_norm": 10.490764617919922, |
|
"learning_rate": 4.855e-06, |
|
"loss": 8.7889, |
|
"mean_token_accuracy": 0.36666667461395264, |
|
"num_tokens": 2452470.0, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.0294, |
|
"grad_norm": 13.65211296081543, |
|
"learning_rate": 4.8540000000000005e-06, |
|
"loss": 10.4286, |
|
"mean_token_accuracy": 0.2718253955245018, |
|
"num_tokens": 2472396.0, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.0296, |
|
"grad_norm": 18.866209030151367, |
|
"learning_rate": 4.853000000000001e-06, |
|
"loss": 10.2372, |
|
"mean_token_accuracy": 0.38141025602817535, |
|
"num_tokens": 2486886.0, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.0298, |
|
"grad_norm": 14.852785110473633, |
|
"learning_rate": 4.852e-06, |
|
"loss": 10.0487, |
|
"mean_token_accuracy": 0.30820105969905853, |
|
"num_tokens": 2506143.0, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 13.972378730773926, |
|
"learning_rate": 4.851e-06, |
|
"loss": 10.6291, |
|
"mean_token_accuracy": 0.25820106267929077, |
|
"num_tokens": 2520401.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.0302, |
|
"grad_norm": 14.618459701538086, |
|
"learning_rate": 4.85e-06, |
|
"loss": 10.7579, |
|
"mean_token_accuracy": 0.23571428656578064, |
|
"num_tokens": 2539135.0, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.0304, |
|
"grad_norm": 15.176739692687988, |
|
"learning_rate": 4.8490000000000005e-06, |
|
"loss": 9.6595, |
|
"mean_token_accuracy": 0.2510339096188545, |
|
"num_tokens": 2558562.0, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.0306, |
|
"grad_norm": 16.972919464111328, |
|
"learning_rate": 4.848000000000001e-06, |
|
"loss": 10.8999, |
|
"mean_token_accuracy": 0.25925925374031067, |
|
"num_tokens": 2577856.0, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.0308, |
|
"grad_norm": 16.451147079467773, |
|
"learning_rate": 4.847e-06, |
|
"loss": 8.5505, |
|
"mean_token_accuracy": 0.36249999701976776, |
|
"num_tokens": 2592246.0, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.031, |
|
"grad_norm": 24.95278549194336, |
|
"learning_rate": 4.846e-06, |
|
"loss": 10.2367, |
|
"mean_token_accuracy": 0.28735632449388504, |
|
"num_tokens": 2607133.0, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.0312, |
|
"grad_norm": 15.770346641540527, |
|
"learning_rate": 4.845e-06, |
|
"loss": 9.6085, |
|
"mean_token_accuracy": 0.27272728085517883, |
|
"num_tokens": 2621734.0, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.0314, |
|
"grad_norm": 12.314064025878906, |
|
"learning_rate": 4.8440000000000005e-06, |
|
"loss": 9.816, |
|
"mean_token_accuracy": 0.2540322542190552, |
|
"num_tokens": 2641357.0, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.0316, |
|
"grad_norm": 13.379799842834473, |
|
"learning_rate": 4.843000000000001e-06, |
|
"loss": 10.378, |
|
"mean_token_accuracy": 0.32804232835769653, |
|
"num_tokens": 2660177.0, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.0318, |
|
"grad_norm": 17.458240509033203, |
|
"learning_rate": 4.842e-06, |
|
"loss": 9.3842, |
|
"mean_token_accuracy": 0.2557164579629898, |
|
"num_tokens": 2679528.0, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.032, |
|
"grad_norm": 25.741785049438477, |
|
"learning_rate": 4.841e-06, |
|
"loss": 9.0768, |
|
"mean_token_accuracy": 0.3270474076271057, |
|
"num_tokens": 2693652.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.0322, |
|
"grad_norm": 13.557204246520996, |
|
"learning_rate": 4.84e-06, |
|
"loss": 9.6471, |
|
"mean_token_accuracy": 0.28735632449388504, |
|
"num_tokens": 2713320.0, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.0324, |
|
"grad_norm": 14.830061912536621, |
|
"learning_rate": 4.839000000000001e-06, |
|
"loss": 11.3496, |
|
"mean_token_accuracy": 0.24049513787031174, |
|
"num_tokens": 2733424.0, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.0326, |
|
"grad_norm": 12.371265411376953, |
|
"learning_rate": 4.838e-06, |
|
"loss": 8.6217, |
|
"mean_token_accuracy": 0.25988225638866425, |
|
"num_tokens": 2752613.0, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.0328, |
|
"grad_norm": 12.722640037536621, |
|
"learning_rate": 4.8370000000000004e-06, |
|
"loss": 9.2979, |
|
"mean_token_accuracy": 0.2337121218442917, |
|
"num_tokens": 2773896.0, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.033, |
|
"grad_norm": 12.796667098999023, |
|
"learning_rate": 4.836e-06, |
|
"loss": 9.9112, |
|
"mean_token_accuracy": 0.3337438404560089, |
|
"num_tokens": 2793334.0, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.0332, |
|
"grad_norm": 15.983271598815918, |
|
"learning_rate": 4.835e-06, |
|
"loss": 9.5066, |
|
"mean_token_accuracy": 0.29256465286016464, |
|
"num_tokens": 2807755.0, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.0334, |
|
"grad_norm": 15.554715156555176, |
|
"learning_rate": 4.834000000000001e-06, |
|
"loss": 10.5127, |
|
"mean_token_accuracy": 0.2986453175544739, |
|
"num_tokens": 2827193.0, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.0336, |
|
"grad_norm": 14.6381196975708, |
|
"learning_rate": 4.833e-06, |
|
"loss": 9.7383, |
|
"mean_token_accuracy": 0.2586618810892105, |
|
"num_tokens": 2846449.0, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.0338, |
|
"grad_norm": 16.013647079467773, |
|
"learning_rate": 4.8320000000000005e-06, |
|
"loss": 9.0009, |
|
"mean_token_accuracy": 0.32356322556734085, |
|
"num_tokens": 2860910.0, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.034, |
|
"grad_norm": 13.708538055419922, |
|
"learning_rate": 4.831e-06, |
|
"loss": 9.4298, |
|
"mean_token_accuracy": 0.28114478290081024, |
|
"num_tokens": 2880172.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.0342, |
|
"grad_norm": 14.314607620239258, |
|
"learning_rate": 4.83e-06, |
|
"loss": 9.2596, |
|
"mean_token_accuracy": 0.3014460504055023, |
|
"num_tokens": 2899434.0, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.0344, |
|
"grad_norm": 12.27084732055664, |
|
"learning_rate": 4.829000000000001e-06, |
|
"loss": 9.5638, |
|
"mean_token_accuracy": 0.33095238357782364, |
|
"num_tokens": 2913932.0, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.0346, |
|
"grad_norm": 14.023222923278809, |
|
"learning_rate": 4.828e-06, |
|
"loss": 9.689, |
|
"mean_token_accuracy": 0.31680162250995636, |
|
"num_tokens": 2928356.0, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.0348, |
|
"grad_norm": 14.490949630737305, |
|
"learning_rate": 4.8270000000000005e-06, |
|
"loss": 8.8018, |
|
"mean_token_accuracy": 0.34068627655506134, |
|
"num_tokens": 2940589.0, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.035, |
|
"grad_norm": 17.97809410095215, |
|
"learning_rate": 4.826e-06, |
|
"loss": 9.9556, |
|
"mean_token_accuracy": 0.3452381044626236, |
|
"num_tokens": 2959486.0, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.0352, |
|
"grad_norm": 13.302875518798828, |
|
"learning_rate": 4.825e-06, |
|
"loss": 9.4104, |
|
"mean_token_accuracy": 0.23885918408632278, |
|
"num_tokens": 2974195.0, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.0354, |
|
"grad_norm": 12.792606353759766, |
|
"learning_rate": 4.824000000000001e-06, |
|
"loss": 8.2518, |
|
"mean_token_accuracy": 0.42592592537403107, |
|
"num_tokens": 2984011.0, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.0356, |
|
"grad_norm": 18.23525619506836, |
|
"learning_rate": 4.823e-06, |
|
"loss": 8.8069, |
|
"mean_token_accuracy": 0.40079365670681, |
|
"num_tokens": 2998487.0, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.0358, |
|
"grad_norm": 14.206355094909668, |
|
"learning_rate": 4.822000000000001e-06, |
|
"loss": 9.1892, |
|
"mean_token_accuracy": 0.2290322557091713, |
|
"num_tokens": 3019773.0, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.036, |
|
"grad_norm": 12.26903247833252, |
|
"learning_rate": 4.821e-06, |
|
"loss": 8.4939, |
|
"mean_token_accuracy": 0.28287841379642487, |
|
"num_tokens": 3040775.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.0362, |
|
"grad_norm": 15.23544979095459, |
|
"learning_rate": 4.8200000000000004e-06, |
|
"loss": 8.9121, |
|
"mean_token_accuracy": 0.3285440653562546, |
|
"num_tokens": 3060080.0, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.0364, |
|
"grad_norm": 57.132049560546875, |
|
"learning_rate": 4.819e-06, |
|
"loss": 8.9738, |
|
"mean_token_accuracy": 0.3175750821828842, |
|
"num_tokens": 3079332.0, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.0366, |
|
"grad_norm": 13.691211700439453, |
|
"learning_rate": 4.818e-06, |
|
"loss": 8.0025, |
|
"mean_token_accuracy": 0.34656085073947906, |
|
"num_tokens": 3088989.0, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.0368, |
|
"grad_norm": 15.762035369873047, |
|
"learning_rate": 4.817000000000001e-06, |
|
"loss": 9.4602, |
|
"mean_token_accuracy": 0.34457671642303467, |
|
"num_tokens": 3107732.0, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.037, |
|
"grad_norm": 17.034019470214844, |
|
"learning_rate": 4.816e-06, |
|
"loss": 9.4491, |
|
"mean_token_accuracy": 0.34666667878627777, |
|
"num_tokens": 3122423.0, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.0372, |
|
"grad_norm": 12.461385726928711, |
|
"learning_rate": 4.8150000000000005e-06, |
|
"loss": 9.0659, |
|
"mean_token_accuracy": 0.2838345915079117, |
|
"num_tokens": 3141889.0, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.0374, |
|
"grad_norm": 13.194416046142578, |
|
"learning_rate": 4.814e-06, |
|
"loss": 8.8107, |
|
"mean_token_accuracy": 0.36685824394226074, |
|
"num_tokens": 3161194.0, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.0376, |
|
"grad_norm": 14.799727439880371, |
|
"learning_rate": 4.813e-06, |
|
"loss": 9.7196, |
|
"mean_token_accuracy": 0.377616748213768, |
|
"num_tokens": 3175646.0, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.0378, |
|
"grad_norm": 15.303200721740723, |
|
"learning_rate": 4.812000000000001e-06, |
|
"loss": 10.0029, |
|
"mean_token_accuracy": 0.38161374628543854, |
|
"num_tokens": 3194941.0, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.038, |
|
"grad_norm": 13.99432373046875, |
|
"learning_rate": 4.811000000000001e-06, |
|
"loss": 8.4004, |
|
"mean_token_accuracy": 0.37096773087978363, |
|
"num_tokens": 3214205.0, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.0382, |
|
"grad_norm": 12.163537979125977, |
|
"learning_rate": 4.8100000000000005e-06, |
|
"loss": 9.0058, |
|
"mean_token_accuracy": 0.35395538806915283, |
|
"num_tokens": 3235063.0, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.0384, |
|
"grad_norm": 12.369959831237793, |
|
"learning_rate": 4.809e-06, |
|
"loss": 9.0358, |
|
"mean_token_accuracy": 0.4807407408952713, |
|
"num_tokens": 3249517.0, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.0386, |
|
"grad_norm": 21.97284698486328, |
|
"learning_rate": 4.808e-06, |
|
"loss": 8.7725, |
|
"mean_token_accuracy": 0.41179338097572327, |
|
"num_tokens": 3268738.0, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.0388, |
|
"grad_norm": 13.040814399719238, |
|
"learning_rate": 4.807000000000001e-06, |
|
"loss": 9.4628, |
|
"mean_token_accuracy": 0.424450546503067, |
|
"num_tokens": 3288032.0, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.039, |
|
"grad_norm": 14.395992279052734, |
|
"learning_rate": 4.806000000000001e-06, |
|
"loss": 9.1626, |
|
"mean_token_accuracy": 0.454365074634552, |
|
"num_tokens": 3306813.0, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.0392, |
|
"grad_norm": 11.64809799194336, |
|
"learning_rate": 4.805000000000001e-06, |
|
"loss": 8.7527, |
|
"mean_token_accuracy": 0.3843159079551697, |
|
"num_tokens": 3325561.0, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.0394, |
|
"grad_norm": 25.551607131958008, |
|
"learning_rate": 4.804e-06, |
|
"loss": 8.5253, |
|
"mean_token_accuracy": 0.3452381044626236, |
|
"num_tokens": 3339397.0, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.0396, |
|
"grad_norm": 12.025030136108398, |
|
"learning_rate": 4.8030000000000004e-06, |
|
"loss": 7.7581, |
|
"mean_token_accuracy": 0.5191570967435837, |
|
"num_tokens": 3349055.0, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.0398, |
|
"grad_norm": 10.044900894165039, |
|
"learning_rate": 4.802000000000001e-06, |
|
"loss": 9.0733, |
|
"mean_token_accuracy": 0.4278416335582733, |
|
"num_tokens": 3368313.0, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 10.911112785339355, |
|
"learning_rate": 4.801e-06, |
|
"loss": 8.6906, |
|
"mean_token_accuracy": 0.5000000149011612, |
|
"num_tokens": 3383810.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.0402, |
|
"grad_norm": 10.119377136230469, |
|
"learning_rate": 4.800000000000001e-06, |
|
"loss": 8.8941, |
|
"mean_token_accuracy": 0.4000000059604645, |
|
"num_tokens": 3398244.0, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.0404, |
|
"grad_norm": 10.526436805725098, |
|
"learning_rate": 4.799e-06, |
|
"loss": 7.8408, |
|
"mean_token_accuracy": 0.39772726595401764, |
|
"num_tokens": 3414686.0, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.0406, |
|
"grad_norm": 10.947959899902344, |
|
"learning_rate": 4.7980000000000005e-06, |
|
"loss": 8.4085, |
|
"mean_token_accuracy": 0.40992647409439087, |
|
"num_tokens": 3433954.0, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.0408, |
|
"grad_norm": 10.811299324035645, |
|
"learning_rate": 4.797000000000001e-06, |
|
"loss": 7.7418, |
|
"mean_token_accuracy": 0.5370370447635651, |
|
"num_tokens": 3443610.0, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.041, |
|
"grad_norm": 9.57394027709961, |
|
"learning_rate": 4.796e-06, |
|
"loss": 8.6595, |
|
"mean_token_accuracy": 0.45967741310596466, |
|
"num_tokens": 3462909.0, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.0412, |
|
"grad_norm": 12.8336181640625, |
|
"learning_rate": 4.795e-06, |
|
"loss": 8.6943, |
|
"mean_token_accuracy": 0.45967741310596466, |
|
"num_tokens": 3477620.0, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.0414, |
|
"grad_norm": 11.37842845916748, |
|
"learning_rate": 4.794e-06, |
|
"loss": 8.5963, |
|
"mean_token_accuracy": 0.4539627134799957, |
|
"num_tokens": 3496675.0, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.0416, |
|
"grad_norm": 12.427331924438477, |
|
"learning_rate": 4.7930000000000005e-06, |
|
"loss": 8.67, |
|
"mean_token_accuracy": 0.3821548819541931, |
|
"num_tokens": 3515975.0, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.0418, |
|
"grad_norm": 10.371417045593262, |
|
"learning_rate": 4.792000000000001e-06, |
|
"loss": 8.4859, |
|
"mean_token_accuracy": 0.40784314274787903, |
|
"num_tokens": 3535241.0, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.042, |
|
"grad_norm": 11.788932800292969, |
|
"learning_rate": 4.791e-06, |
|
"loss": 8.7143, |
|
"mean_token_accuracy": 0.3741379380226135, |
|
"num_tokens": 3554502.0, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.0422, |
|
"grad_norm": 15.102238655090332, |
|
"learning_rate": 4.79e-06, |
|
"loss": 8.5368, |
|
"mean_token_accuracy": 0.40980392694473267, |
|
"num_tokens": 3574437.0, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.0424, |
|
"grad_norm": 11.23690128326416, |
|
"learning_rate": 4.789e-06, |
|
"loss": 8.7326, |
|
"mean_token_accuracy": 0.45628078281879425, |
|
"num_tokens": 3593696.0, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.0426, |
|
"grad_norm": 11.3884859085083, |
|
"learning_rate": 4.7880000000000006e-06, |
|
"loss": 9.0773, |
|
"mean_token_accuracy": 0.4404761791229248, |
|
"num_tokens": 3608150.0, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.0428, |
|
"grad_norm": 11.106508255004883, |
|
"learning_rate": 4.787000000000001e-06, |
|
"loss": 9.5678, |
|
"mean_token_accuracy": 0.3500000089406967, |
|
"num_tokens": 3627724.0, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.043, |
|
"grad_norm": 11.691924095153809, |
|
"learning_rate": 4.7860000000000004e-06, |
|
"loss": 8.2192, |
|
"mean_token_accuracy": 0.41582491993904114, |
|
"num_tokens": 3642224.0, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.0432, |
|
"grad_norm": 13.973259925842285, |
|
"learning_rate": 4.785e-06, |
|
"loss": 8.9823, |
|
"mean_token_accuracy": 0.44195401668548584, |
|
"num_tokens": 3660885.0, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.0434, |
|
"grad_norm": 11.744901657104492, |
|
"learning_rate": 4.784e-06, |
|
"loss": 8.698, |
|
"mean_token_accuracy": 0.4291125535964966, |
|
"num_tokens": 3679634.0, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.0436, |
|
"grad_norm": 13.440972328186035, |
|
"learning_rate": 4.783000000000001e-06, |
|
"loss": 8.3414, |
|
"mean_token_accuracy": 0.47999998927116394, |
|
"num_tokens": 3689286.0, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.0438, |
|
"grad_norm": 9.561469078063965, |
|
"learning_rate": 4.782e-06, |
|
"loss": 7.6889, |
|
"mean_token_accuracy": 0.41898825764656067, |
|
"num_tokens": 3710533.0, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.044, |
|
"grad_norm": 12.257551193237305, |
|
"learning_rate": 4.7810000000000005e-06, |
|
"loss": 8.4541, |
|
"mean_token_accuracy": 0.5016103088855743, |
|
"num_tokens": 3725145.0, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.0442, |
|
"grad_norm": 10.785005569458008, |
|
"learning_rate": 4.78e-06, |
|
"loss": 9.7029, |
|
"mean_token_accuracy": 0.4015151560306549, |
|
"num_tokens": 3743544.0, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.0444, |
|
"grad_norm": 10.52768611907959, |
|
"learning_rate": 4.779e-06, |
|
"loss": 8.6062, |
|
"mean_token_accuracy": 0.40740741789340973, |
|
"num_tokens": 3762705.0, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.0446, |
|
"grad_norm": 17.872329711914062, |
|
"learning_rate": 4.778000000000001e-06, |
|
"loss": 7.3673, |
|
"mean_token_accuracy": 0.45370370149612427, |
|
"num_tokens": 3777166.0, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.0448, |
|
"grad_norm": 11.053666114807129, |
|
"learning_rate": 4.777e-06, |
|
"loss": 9.3428, |
|
"mean_token_accuracy": 0.4186507910490036, |
|
"num_tokens": 3795909.0, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.045, |
|
"grad_norm": 9.966497421264648, |
|
"learning_rate": 4.7760000000000005e-06, |
|
"loss": 7.9071, |
|
"mean_token_accuracy": 0.44636015594005585, |
|
"num_tokens": 3815176.0, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.0452, |
|
"grad_norm": 12.605799674987793, |
|
"learning_rate": 4.775e-06, |
|
"loss": 8.3063, |
|
"mean_token_accuracy": 0.41692790389060974, |
|
"num_tokens": 3834478.0, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.0454, |
|
"grad_norm": 9.679677963256836, |
|
"learning_rate": 4.774e-06, |
|
"loss": 8.7331, |
|
"mean_token_accuracy": 0.45628078281879425, |
|
"num_tokens": 3853737.0, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.0456, |
|
"grad_norm": 27.174549102783203, |
|
"learning_rate": 4.773000000000001e-06, |
|
"loss": 8.0078, |
|
"mean_token_accuracy": 0.4434434473514557, |
|
"num_tokens": 3873882.0, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.0458, |
|
"grad_norm": 11.591468811035156, |
|
"learning_rate": 4.772e-06, |
|
"loss": 8.237, |
|
"mean_token_accuracy": 0.36707451939582825, |
|
"num_tokens": 3888302.0, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.046, |
|
"grad_norm": 10.10312271118164, |
|
"learning_rate": 4.7710000000000006e-06, |
|
"loss": 7.6694, |
|
"mean_token_accuracy": 0.5105820149183273, |
|
"num_tokens": 3902804.0, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.0462, |
|
"grad_norm": 13.620348930358887, |
|
"learning_rate": 4.77e-06, |
|
"loss": 8.4942, |
|
"mean_token_accuracy": 0.421875, |
|
"num_tokens": 3922146.0, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.0464, |
|
"grad_norm": 11.014819145202637, |
|
"learning_rate": 4.769e-06, |
|
"loss": 7.637, |
|
"mean_token_accuracy": 0.4495798349380493, |
|
"num_tokens": 3936372.0, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.0466, |
|
"grad_norm": 14.10721492767334, |
|
"learning_rate": 4.768000000000001e-06, |
|
"loss": 7.4068, |
|
"mean_token_accuracy": 0.5078571289777756, |
|
"num_tokens": 3946456.0, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.0468, |
|
"grad_norm": 13.256854057312012, |
|
"learning_rate": 4.767e-06, |
|
"loss": 7.9252, |
|
"mean_token_accuracy": 0.41277891397476196, |
|
"num_tokens": 3965955.0, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.047, |
|
"grad_norm": 12.264280319213867, |
|
"learning_rate": 4.766000000000001e-06, |
|
"loss": 7.6561, |
|
"mean_token_accuracy": 0.4913793057203293, |
|
"num_tokens": 3980412.0, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.0472, |
|
"grad_norm": 11.942499160766602, |
|
"learning_rate": 4.765e-06, |
|
"loss": 8.0462, |
|
"mean_token_accuracy": 0.4900284856557846, |
|
"num_tokens": 3999823.0, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.0474, |
|
"grad_norm": 10.043482780456543, |
|
"learning_rate": 4.7640000000000005e-06, |
|
"loss": 7.0965, |
|
"mean_token_accuracy": 0.4025973975658417, |
|
"num_tokens": 4014414.0, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.0476, |
|
"grad_norm": 12.545036315917969, |
|
"learning_rate": 4.763000000000001e-06, |
|
"loss": 7.667, |
|
"mean_token_accuracy": 0.38708220422267914, |
|
"num_tokens": 4028922.0, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.0478, |
|
"grad_norm": 10.067218780517578, |
|
"learning_rate": 4.762e-06, |
|
"loss": 7.1117, |
|
"mean_token_accuracy": 0.37129031121730804, |
|
"num_tokens": 4050175.0, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.048, |
|
"grad_norm": 13.74410343170166, |
|
"learning_rate": 4.761000000000001e-06, |
|
"loss": 7.9069, |
|
"mean_token_accuracy": 0.47333332896232605, |
|
"num_tokens": 4064811.0, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.0482, |
|
"grad_norm": 9.813583374023438, |
|
"learning_rate": 4.76e-06, |
|
"loss": 6.8128, |
|
"mean_token_accuracy": 0.4273170679807663, |
|
"num_tokens": 4079317.0, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.0484, |
|
"grad_norm": 10.31633472442627, |
|
"learning_rate": 4.7590000000000005e-06, |
|
"loss": 7.676, |
|
"mean_token_accuracy": 0.43584655225276947, |
|
"num_tokens": 4094093.0, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.0486, |
|
"grad_norm": 13.174894332885742, |
|
"learning_rate": 4.758e-06, |
|
"loss": 8.5029, |
|
"mean_token_accuracy": 0.42592592537403107, |
|
"num_tokens": 4113778.0, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.0488, |
|
"grad_norm": 11.068340301513672, |
|
"learning_rate": 4.757e-06, |
|
"loss": 7.6392, |
|
"mean_token_accuracy": 0.5028571337461472, |
|
"num_tokens": 4125078.0, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.049, |
|
"grad_norm": 11.669493675231934, |
|
"learning_rate": 4.756000000000001e-06, |
|
"loss": 8.0877, |
|
"mean_token_accuracy": 0.3697916716337204, |
|
"num_tokens": 4144451.0, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.0492, |
|
"grad_norm": 12.121454238891602, |
|
"learning_rate": 4.755e-06, |
|
"loss": 8.3639, |
|
"mean_token_accuracy": 0.3320707082748413, |
|
"num_tokens": 4164189.0, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.0494, |
|
"grad_norm": 11.231935501098633, |
|
"learning_rate": 4.7540000000000006e-06, |
|
"loss": 8.1039, |
|
"mean_token_accuracy": 0.40060852468013763, |
|
"num_tokens": 4183560.0, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.0496, |
|
"grad_norm": 14.818300247192383, |
|
"learning_rate": 4.753e-06, |
|
"loss": 8.3393, |
|
"mean_token_accuracy": 0.4365079402923584, |
|
"num_tokens": 4203160.0, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.0498, |
|
"grad_norm": 10.595967292785645, |
|
"learning_rate": 4.752e-06, |
|
"loss": 8.2095, |
|
"mean_token_accuracy": 0.3896551728248596, |
|
"num_tokens": 4222502.0, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 10.283987998962402, |
|
"learning_rate": 4.751000000000001e-06, |
|
"loss": 7.8432, |
|
"mean_token_accuracy": 0.41187499463558197, |
|
"num_tokens": 4236999.0, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.0502, |
|
"grad_norm": 64.06322479248047, |
|
"learning_rate": 4.75e-06, |
|
"loss": 8.2479, |
|
"mean_token_accuracy": 0.43703703582286835, |
|
"num_tokens": 4256296.0, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.0504, |
|
"grad_norm": 12.750511169433594, |
|
"learning_rate": 4.749000000000001e-06, |
|
"loss": 8.0847, |
|
"mean_token_accuracy": 0.4107142835855484, |
|
"num_tokens": 4270240.0, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.0506, |
|
"grad_norm": 19.006969451904297, |
|
"learning_rate": 4.748e-06, |
|
"loss": 7.6973, |
|
"mean_token_accuracy": 0.44636015594005585, |
|
"num_tokens": 4284595.0, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.0508, |
|
"grad_norm": 11.408003807067871, |
|
"learning_rate": 4.7470000000000005e-06, |
|
"loss": 7.7927, |
|
"mean_token_accuracy": 0.41428571939468384, |
|
"num_tokens": 4303855.0, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.051, |
|
"grad_norm": 11.841231346130371, |
|
"learning_rate": 4.746000000000001e-06, |
|
"loss": 7.7046, |
|
"mean_token_accuracy": 0.3849431872367859, |
|
"num_tokens": 4323122.0, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.0512, |
|
"grad_norm": 16.837949752807617, |
|
"learning_rate": 4.745e-06, |
|
"loss": 7.7556, |
|
"mean_token_accuracy": 0.38112305104732513, |
|
"num_tokens": 4342022.0, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.0514, |
|
"grad_norm": 12.48375415802002, |
|
"learning_rate": 4.744000000000001e-06, |
|
"loss": 8.2711, |
|
"mean_token_accuracy": 0.42356322705745697, |
|
"num_tokens": 4361283.0, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.0516, |
|
"grad_norm": 12.102031707763672, |
|
"learning_rate": 4.743e-06, |
|
"loss": 7.6877, |
|
"mean_token_accuracy": 0.42748479545116425, |
|
"num_tokens": 4375550.0, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.0518, |
|
"grad_norm": 10.918121337890625, |
|
"learning_rate": 4.7420000000000005e-06, |
|
"loss": 7.1491, |
|
"mean_token_accuracy": 0.3727777749300003, |
|
"num_tokens": 4390013.0, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.052, |
|
"grad_norm": 9.632317543029785, |
|
"learning_rate": 4.741000000000001e-06, |
|
"loss": 6.4275, |
|
"mean_token_accuracy": 0.3046218603849411, |
|
"num_tokens": 4406480.0, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.0522, |
|
"grad_norm": 14.100777626037598, |
|
"learning_rate": 4.74e-06, |
|
"loss": 7.8382, |
|
"mean_token_accuracy": 0.454365074634552, |
|
"num_tokens": 4425737.0, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.0524, |
|
"grad_norm": 14.973969459533691, |
|
"learning_rate": 4.739e-06, |
|
"loss": 7.775, |
|
"mean_token_accuracy": 0.45428571105003357, |
|
"num_tokens": 4440150.0, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.0526, |
|
"grad_norm": 10.306495666503906, |
|
"learning_rate": 4.738e-06, |
|
"loss": 6.7966, |
|
"mean_token_accuracy": 0.41637930274009705, |
|
"num_tokens": 4454659.0, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.0528, |
|
"grad_norm": 11.71308422088623, |
|
"learning_rate": 4.7370000000000006e-06, |
|
"loss": 7.3197, |
|
"mean_token_accuracy": 0.45000000298023224, |
|
"num_tokens": 4469178.0, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.053, |
|
"grad_norm": 15.400917053222656, |
|
"learning_rate": 4.736000000000001e-06, |
|
"loss": 7.0285, |
|
"mean_token_accuracy": 0.4464285671710968, |
|
"num_tokens": 4483833.0, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.0532, |
|
"grad_norm": 11.898533821105957, |
|
"learning_rate": 4.735e-06, |
|
"loss": 7.7648, |
|
"mean_token_accuracy": 0.3568965643644333, |
|
"num_tokens": 4502729.0, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.0534, |
|
"grad_norm": 12.848722457885742, |
|
"learning_rate": 4.734e-06, |
|
"loss": 7.1883, |
|
"mean_token_accuracy": 0.4402298927307129, |
|
"num_tokens": 4521713.0, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.0536, |
|
"grad_norm": 9.758365631103516, |
|
"learning_rate": 4.733e-06, |
|
"loss": 6.833, |
|
"mean_token_accuracy": 0.39444445073604584, |
|
"num_tokens": 4541033.0, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.0538, |
|
"grad_norm": 40.770198822021484, |
|
"learning_rate": 4.732000000000001e-06, |
|
"loss": 7.5144, |
|
"mean_token_accuracy": 0.37254129350185394, |
|
"num_tokens": 4561917.0, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.054, |
|
"grad_norm": 12.640328407287598, |
|
"learning_rate": 4.731000000000001e-06, |
|
"loss": 7.6694, |
|
"mean_token_accuracy": 0.40518517792224884, |
|
"num_tokens": 4576371.0, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.0542, |
|
"grad_norm": 13.891765594482422, |
|
"learning_rate": 4.7300000000000005e-06, |
|
"loss": 6.4951, |
|
"mean_token_accuracy": 0.48746199905872345, |
|
"num_tokens": 4595606.0, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.0544, |
|
"grad_norm": 15.260784149169922, |
|
"learning_rate": 4.729e-06, |
|
"loss": 7.1435, |
|
"mean_token_accuracy": 0.4378078728914261, |
|
"num_tokens": 4607665.0, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.0546, |
|
"grad_norm": 11.873882293701172, |
|
"learning_rate": 4.728e-06, |
|
"loss": 6.6643, |
|
"mean_token_accuracy": 0.4196428507566452, |
|
"num_tokens": 4622184.0, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.0548, |
|
"grad_norm": 11.417704582214355, |
|
"learning_rate": 4.727000000000001e-06, |
|
"loss": 7.0497, |
|
"mean_token_accuracy": 0.39933258295059204, |
|
"num_tokens": 4636646.0, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.055, |
|
"grad_norm": 10.876029014587402, |
|
"learning_rate": 4.726000000000001e-06, |
|
"loss": 6.913, |
|
"mean_token_accuracy": 0.41692790389060974, |
|
"num_tokens": 4655906.0, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.0552, |
|
"grad_norm": 13.294316291809082, |
|
"learning_rate": 4.7250000000000005e-06, |
|
"loss": 7.6388, |
|
"mean_token_accuracy": 0.41338111460208893, |
|
"num_tokens": 4675326.0, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.0554, |
|
"grad_norm": 13.638504981994629, |
|
"learning_rate": 4.724e-06, |
|
"loss": 7.3977, |
|
"mean_token_accuracy": 0.3982202410697937, |
|
"num_tokens": 4694348.0, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.0556, |
|
"grad_norm": 15.715910911560059, |
|
"learning_rate": 4.723e-06, |
|
"loss": 7.6206, |
|
"mean_token_accuracy": 0.4444444477558136, |
|
"num_tokens": 4713642.0, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.0558, |
|
"grad_norm": 15.516464233398438, |
|
"learning_rate": 4.722000000000001e-06, |
|
"loss": 8.2007, |
|
"mean_token_accuracy": 0.424450546503067, |
|
"num_tokens": 4732898.0, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.056, |
|
"grad_norm": 12.387885093688965, |
|
"learning_rate": 4.721e-06, |
|
"loss": 7.688, |
|
"mean_token_accuracy": 0.3694581240415573, |
|
"num_tokens": 4752274.0, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.0562, |
|
"grad_norm": 10.614646911621094, |
|
"learning_rate": 4.7200000000000005e-06, |
|
"loss": 6.3429, |
|
"mean_token_accuracy": 0.47413793206214905, |
|
"num_tokens": 4761929.0, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.0564, |
|
"grad_norm": 21.08198356628418, |
|
"learning_rate": 4.719e-06, |
|
"loss": 6.1852, |
|
"mean_token_accuracy": 0.46727272868156433, |
|
"num_tokens": 4771566.0, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.0566, |
|
"grad_norm": 13.566423416137695, |
|
"learning_rate": 4.718e-06, |
|
"loss": 6.3183, |
|
"mean_token_accuracy": 0.4772357791662216, |
|
"num_tokens": 4792969.0, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.0568, |
|
"grad_norm": 11.979252815246582, |
|
"learning_rate": 4.717000000000001e-06, |
|
"loss": 6.9839, |
|
"mean_token_accuracy": 0.4107142835855484, |
|
"num_tokens": 4807544.0, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.057, |
|
"grad_norm": 10.196455955505371, |
|
"learning_rate": 4.716e-06, |
|
"loss": 7.2482, |
|
"mean_token_accuracy": 0.4211822748184204, |
|
"num_tokens": 4826443.0, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.0572, |
|
"grad_norm": 10.696710586547852, |
|
"learning_rate": 4.715e-06, |
|
"loss": 6.7498, |
|
"mean_token_accuracy": 0.4461206793785095, |
|
"num_tokens": 4840925.0, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.0574, |
|
"grad_norm": 12.140236854553223, |
|
"learning_rate": 4.714e-06, |
|
"loss": 7.5622, |
|
"mean_token_accuracy": 0.3907563090324402, |
|
"num_tokens": 4860066.0, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.0576, |
|
"grad_norm": 9.046162605285645, |
|
"learning_rate": 4.7130000000000004e-06, |
|
"loss": 6.4519, |
|
"mean_token_accuracy": 0.4005681872367859, |
|
"num_tokens": 4874533.0, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.0578, |
|
"grad_norm": 11.239535331726074, |
|
"learning_rate": 4.712000000000001e-06, |
|
"loss": 7.0541, |
|
"mean_token_accuracy": 0.4107142835855484, |
|
"num_tokens": 4888991.0, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 0.058, |
|
"grad_norm": 13.39867115020752, |
|
"learning_rate": 4.711e-06, |
|
"loss": 7.3852, |
|
"mean_token_accuracy": 0.35574713349342346, |
|
"num_tokens": 4901961.0, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.0582, |
|
"grad_norm": 10.7579984664917, |
|
"learning_rate": 4.71e-06, |
|
"loss": 7.2866, |
|
"mean_token_accuracy": 0.41428571939468384, |
|
"num_tokens": 4916815.0, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.0584, |
|
"grad_norm": 12.446432113647461, |
|
"learning_rate": 4.709e-06, |
|
"loss": 7.9278, |
|
"mean_token_accuracy": 0.35706017911434174, |
|
"num_tokens": 4935892.0, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.0586, |
|
"grad_norm": 12.149531364440918, |
|
"learning_rate": 4.7080000000000005e-06, |
|
"loss": 7.0934, |
|
"mean_token_accuracy": 0.4106002599000931, |
|
"num_tokens": 4950308.0, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 0.0588, |
|
"grad_norm": 12.248196601867676, |
|
"learning_rate": 4.707000000000001e-06, |
|
"loss": 7.3677, |
|
"mean_token_accuracy": 0.4555555582046509, |
|
"num_tokens": 4969605.0, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.059, |
|
"grad_norm": 10.024053573608398, |
|
"learning_rate": 4.706e-06, |
|
"loss": 7.0171, |
|
"mean_token_accuracy": 0.4404466450214386, |
|
"num_tokens": 4984384.0, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.0592, |
|
"grad_norm": 10.064780235290527, |
|
"learning_rate": 4.705e-06, |
|
"loss": 7.2282, |
|
"mean_token_accuracy": 0.3821621537208557, |
|
"num_tokens": 4998886.0, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.0594, |
|
"grad_norm": 11.68379020690918, |
|
"learning_rate": 4.704e-06, |
|
"loss": 6.3511, |
|
"mean_token_accuracy": 0.45874999463558197, |
|
"num_tokens": 5013345.0, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.0596, |
|
"grad_norm": 9.920818328857422, |
|
"learning_rate": 4.7030000000000005e-06, |
|
"loss": 6.4502, |
|
"mean_token_accuracy": 0.4145299196243286, |
|
"num_tokens": 5026411.0, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 0.0598, |
|
"grad_norm": 9.868770599365234, |
|
"learning_rate": 4.702e-06, |
|
"loss": 6.6577, |
|
"mean_token_accuracy": 0.40407469868659973, |
|
"num_tokens": 5046124.0, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 13.261858940124512, |
|
"learning_rate": 4.701e-06, |
|
"loss": 6.0468, |
|
"mean_token_accuracy": 0.4592592567205429, |
|
"num_tokens": 5060802.0, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.0602, |
|
"grad_norm": 11.705483436584473, |
|
"learning_rate": 4.7e-06, |
|
"loss": 6.9336, |
|
"mean_token_accuracy": 0.3961039036512375, |
|
"num_tokens": 5080148.0, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 0.0604, |
|
"grad_norm": 11.132308006286621, |
|
"learning_rate": 4.699e-06, |
|
"loss": 6.7934, |
|
"mean_token_accuracy": 0.4444444477558136, |
|
"num_tokens": 5094604.0, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 0.0606, |
|
"grad_norm": 12.15768051147461, |
|
"learning_rate": 4.698000000000001e-06, |
|
"loss": 6.1426, |
|
"mean_token_accuracy": 0.48148147761821747, |
|
"num_tokens": 5109079.0, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.0608, |
|
"grad_norm": 13.979254722595215, |
|
"learning_rate": 4.697e-06, |
|
"loss": 7.0785, |
|
"mean_token_accuracy": 0.4423076957464218, |
|
"num_tokens": 5121880.0, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.061, |
|
"grad_norm": 10.67962646484375, |
|
"learning_rate": 4.6960000000000004e-06, |
|
"loss": 5.7797, |
|
"mean_token_accuracy": 0.43860387802124023, |
|
"num_tokens": 5141369.0, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.0612, |
|
"grad_norm": 9.39137077331543, |
|
"learning_rate": 4.695e-06, |
|
"loss": 5.9316, |
|
"mean_token_accuracy": 0.4432692378759384, |
|
"num_tokens": 5155875.0, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.0614, |
|
"grad_norm": 10.971794128417969, |
|
"learning_rate": 4.694e-06, |
|
"loss": 6.5844, |
|
"mean_token_accuracy": 0.4044642895460129, |
|
"num_tokens": 5175079.0, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 0.0616, |
|
"grad_norm": 10.385963439941406, |
|
"learning_rate": 4.693000000000001e-06, |
|
"loss": 6.3867, |
|
"mean_token_accuracy": 0.46296297013759613, |
|
"num_tokens": 5189415.0, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.0618, |
|
"grad_norm": 13.314301490783691, |
|
"learning_rate": 4.692e-06, |
|
"loss": 6.0294, |
|
"mean_token_accuracy": 0.42749999463558197, |
|
"num_tokens": 5199112.0, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.062, |
|
"grad_norm": 9.319266319274902, |
|
"learning_rate": 4.6910000000000005e-06, |
|
"loss": 6.3149, |
|
"mean_token_accuracy": 0.41097819805145264, |
|
"num_tokens": 5231220.0, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.0622, |
|
"grad_norm": 9.54952621459961, |
|
"learning_rate": 4.69e-06, |
|
"loss": 6.8163, |
|
"mean_token_accuracy": 0.380952388048172, |
|
"num_tokens": 5253011.0, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 0.0624, |
|
"grad_norm": 9.803725242614746, |
|
"learning_rate": 4.689e-06, |
|
"loss": 6.385, |
|
"mean_token_accuracy": 0.4375, |
|
"num_tokens": 5272315.0, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.0626, |
|
"grad_norm": 9.163276672363281, |
|
"learning_rate": 4.688000000000001e-06, |
|
"loss": 5.9736, |
|
"mean_token_accuracy": 0.3640737682580948, |
|
"num_tokens": 5286789.0, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 0.0628, |
|
"grad_norm": 9.91296672821045, |
|
"learning_rate": 4.687e-06, |
|
"loss": 6.2111, |
|
"mean_token_accuracy": 0.3975849747657776, |
|
"num_tokens": 5301439.0, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 0.063, |
|
"grad_norm": 11.019015312194824, |
|
"learning_rate": 4.6860000000000005e-06, |
|
"loss": 6.6715, |
|
"mean_token_accuracy": 0.3615301698446274, |
|
"num_tokens": 5324389.0, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.0632, |
|
"grad_norm": 9.389239311218262, |
|
"learning_rate": 4.685000000000001e-06, |
|
"loss": 5.7869, |
|
"mean_token_accuracy": 0.4253472238779068, |
|
"num_tokens": 5334050.0, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.0634, |
|
"grad_norm": 9.804286003112793, |
|
"learning_rate": 4.684e-06, |
|
"loss": 7.3263, |
|
"mean_token_accuracy": 0.3896551728248596, |
|
"num_tokens": 5353730.0, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 0.0636, |
|
"grad_norm": 18.058841705322266, |
|
"learning_rate": 4.683000000000001e-06, |
|
"loss": 6.4049, |
|
"mean_token_accuracy": 0.43095238506793976, |
|
"num_tokens": 5368190.0, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.0638, |
|
"grad_norm": 10.300615310668945, |
|
"learning_rate": 4.682e-06, |
|
"loss": 6.1908, |
|
"mean_token_accuracy": 0.44763730466365814, |
|
"num_tokens": 5377848.0, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 0.064, |
|
"grad_norm": 8.114563941955566, |
|
"learning_rate": 4.681000000000001e-06, |
|
"loss": 6.5722, |
|
"mean_token_accuracy": 0.39539170265197754, |
|
"num_tokens": 5397362.0, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.0642, |
|
"grad_norm": 9.576495170593262, |
|
"learning_rate": 4.680000000000001e-06, |
|
"loss": 5.8224, |
|
"mean_token_accuracy": 0.4186507910490036, |
|
"num_tokens": 5404613.0, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 0.0644, |
|
"grad_norm": 7.686422348022461, |
|
"learning_rate": 4.6790000000000004e-06, |
|
"loss": 6.2079, |
|
"mean_token_accuracy": 0.3892773985862732, |
|
"num_tokens": 5423887.0, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 0.0646, |
|
"grad_norm": 10.824250221252441, |
|
"learning_rate": 4.678e-06, |
|
"loss": 6.4209, |
|
"mean_token_accuracy": 0.380952388048172, |
|
"num_tokens": 5438544.0, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 0.0648, |
|
"grad_norm": 22.814104080200195, |
|
"learning_rate": 4.677e-06, |
|
"loss": 6.3804, |
|
"mean_token_accuracy": 0.4807407408952713, |
|
"num_tokens": 5453036.0, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.065, |
|
"grad_norm": 12.241607666015625, |
|
"learning_rate": 4.676000000000001e-06, |
|
"loss": 6.9619, |
|
"mean_token_accuracy": 0.44413793087005615, |
|
"num_tokens": 5472490.0, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.0652, |
|
"grad_norm": 9.275449752807617, |
|
"learning_rate": 4.675000000000001e-06, |
|
"loss": 5.8423, |
|
"mean_token_accuracy": 0.44312499463558197, |
|
"num_tokens": 5482149.0, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 0.0654, |
|
"grad_norm": 9.22095012664795, |
|
"learning_rate": 4.6740000000000005e-06, |
|
"loss": 5.6949, |
|
"mean_token_accuracy": 0.46666666865348816, |
|
"num_tokens": 5491805.0, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 0.0656, |
|
"grad_norm": 9.241523742675781, |
|
"learning_rate": 4.673e-06, |
|
"loss": 6.0598, |
|
"mean_token_accuracy": 0.4404466450214386, |
|
"num_tokens": 5505904.0, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.0658, |
|
"grad_norm": 17.82420539855957, |
|
"learning_rate": 4.672e-06, |
|
"loss": 6.6779, |
|
"mean_token_accuracy": 0.4031440168619156, |
|
"num_tokens": 5525385.0, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 0.066, |
|
"grad_norm": 9.692024230957031, |
|
"learning_rate": 4.671000000000001e-06, |
|
"loss": 5.7944, |
|
"mean_token_accuracy": 0.5252873599529266, |
|
"num_tokens": 5540158.0, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.0662, |
|
"grad_norm": 7.623928070068359, |
|
"learning_rate": 4.670000000000001e-06, |
|
"loss": 4.9237, |
|
"mean_token_accuracy": 0.44602273404598236, |
|
"num_tokens": 5563392.0, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 0.0664, |
|
"grad_norm": 10.33786392211914, |
|
"learning_rate": 4.6690000000000005e-06, |
|
"loss": 6.147, |
|
"mean_token_accuracy": 0.45428571105003357, |
|
"num_tokens": 5577885.0, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 0.0666, |
|
"grad_norm": 9.725800514221191, |
|
"learning_rate": 4.668e-06, |
|
"loss": 6.134, |
|
"mean_token_accuracy": 0.49074074625968933, |
|
"num_tokens": 5587538.0, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 0.0668, |
|
"grad_norm": 12.834142684936523, |
|
"learning_rate": 4.667e-06, |
|
"loss": 6.4009, |
|
"mean_token_accuracy": 0.448306605219841, |
|
"num_tokens": 5606700.0, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 0.067, |
|
"grad_norm": 17.030433654785156, |
|
"learning_rate": 4.666000000000001e-06, |
|
"loss": 6.5778, |
|
"mean_token_accuracy": 0.49074074625968933, |
|
"num_tokens": 5627246.0, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.0672, |
|
"grad_norm": 7.939487457275391, |
|
"learning_rate": 4.665e-06, |
|
"loss": 5.705, |
|
"mean_token_accuracy": 0.39478765428066254, |
|
"num_tokens": 5646622.0, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.0674, |
|
"grad_norm": 10.505208015441895, |
|
"learning_rate": 4.664000000000001e-06, |
|
"loss": 6.2186, |
|
"mean_token_accuracy": 0.4741935580968857, |
|
"num_tokens": 5665885.0, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 0.0676, |
|
"grad_norm": 9.60438060760498, |
|
"learning_rate": 4.663e-06, |
|
"loss": 6.4067, |
|
"mean_token_accuracy": 0.50063855946064, |
|
"num_tokens": 5685143.0, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 0.0678, |
|
"grad_norm": 8.420628547668457, |
|
"learning_rate": 4.6620000000000004e-06, |
|
"loss": 5.9981, |
|
"mean_token_accuracy": 0.48749999701976776, |
|
"num_tokens": 5705307.0, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 0.068, |
|
"grad_norm": 9.02039909362793, |
|
"learning_rate": 4.661000000000001e-06, |
|
"loss": 5.6245, |
|
"mean_token_accuracy": 0.44069264829158783, |
|
"num_tokens": 5724259.0, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.0682, |
|
"grad_norm": 9.671128273010254, |
|
"learning_rate": 4.66e-06, |
|
"loss": 5.537, |
|
"mean_token_accuracy": 0.5000000149011612, |
|
"num_tokens": 5738757.0, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 0.0684, |
|
"grad_norm": 9.832850456237793, |
|
"learning_rate": 4.659e-06, |
|
"loss": 6.0835, |
|
"mean_token_accuracy": 0.5, |
|
"num_tokens": 5753257.0, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 0.0686, |
|
"grad_norm": 10.410543441772461, |
|
"learning_rate": 4.658e-06, |
|
"loss": 6.4898, |
|
"mean_token_accuracy": 0.517241358757019, |
|
"num_tokens": 5769459.0, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 0.0688, |
|
"grad_norm": 11.612724304199219, |
|
"learning_rate": 4.6570000000000005e-06, |
|
"loss": 6.2025, |
|
"mean_token_accuracy": 0.45967741310596466, |
|
"num_tokens": 5783916.0, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.069, |
|
"grad_norm": 10.020609855651855, |
|
"learning_rate": 4.656000000000001e-06, |
|
"loss": 6.355, |
|
"mean_token_accuracy": 0.4568965584039688, |
|
"num_tokens": 5803333.0, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.0692, |
|
"grad_norm": 8.622142791748047, |
|
"learning_rate": 4.655e-06, |
|
"loss": 5.8755, |
|
"mean_token_accuracy": 0.49944381415843964, |
|
"num_tokens": 5817852.0, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 0.0694, |
|
"grad_norm": 6.586418151855469, |
|
"learning_rate": 4.654e-06, |
|
"loss": 4.6792, |
|
"mean_token_accuracy": 0.4610389471054077, |
|
"num_tokens": 5827955.0, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 0.0696, |
|
"grad_norm": 9.011771202087402, |
|
"learning_rate": 4.653e-06, |
|
"loss": 5.4233, |
|
"mean_token_accuracy": 0.525862067937851, |
|
"num_tokens": 5842433.0, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.0698, |
|
"grad_norm": 11.389575004577637, |
|
"learning_rate": 4.6520000000000005e-06, |
|
"loss": 6.12, |
|
"mean_token_accuracy": 0.5083612203598022, |
|
"num_tokens": 5856800.0, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 7.888221263885498, |
|
"learning_rate": 4.651000000000001e-06, |
|
"loss": 5.8322, |
|
"mean_token_accuracy": 0.504524901509285, |
|
"num_tokens": 5870212.0, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.0702, |
|
"grad_norm": 9.21318531036377, |
|
"learning_rate": 4.65e-06, |
|
"loss": 5.8042, |
|
"mean_token_accuracy": 0.5434027910232544, |
|
"num_tokens": 5889473.0, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 0.0704, |
|
"grad_norm": 19.845186233520508, |
|
"learning_rate": 4.649e-06, |
|
"loss": 6.2884, |
|
"mean_token_accuracy": 0.4252873510122299, |
|
"num_tokens": 5900869.0, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 0.0706, |
|
"grad_norm": 10.618582725524902, |
|
"learning_rate": 4.648e-06, |
|
"loss": 5.9085, |
|
"mean_token_accuracy": 0.4852941185235977, |
|
"num_tokens": 5919621.0, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 0.0708, |
|
"grad_norm": 6.961612224578857, |
|
"learning_rate": 4.6470000000000006e-06, |
|
"loss": 5.1376, |
|
"mean_token_accuracy": 0.4638047069311142, |
|
"num_tokens": 5926888.0, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 0.071, |
|
"grad_norm": 12.083121299743652, |
|
"learning_rate": 4.646000000000001e-06, |
|
"loss": 6.054, |
|
"mean_token_accuracy": 0.5, |
|
"num_tokens": 5946144.0, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.0712, |
|
"grad_norm": 10.419790267944336, |
|
"learning_rate": 4.645e-06, |
|
"loss": 6.2987, |
|
"mean_token_accuracy": 0.4136960655450821, |
|
"num_tokens": 5965843.0, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 0.0714, |
|
"grad_norm": 10.46291732788086, |
|
"learning_rate": 4.644e-06, |
|
"loss": 5.7, |
|
"mean_token_accuracy": 0.5392592549324036, |
|
"num_tokens": 5980726.0, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 0.0716, |
|
"grad_norm": 11.251096725463867, |
|
"learning_rate": 4.643e-06, |
|
"loss": 5.5953, |
|
"mean_token_accuracy": 0.5185185372829437, |
|
"num_tokens": 5995182.0, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 0.0718, |
|
"grad_norm": 9.273164749145508, |
|
"learning_rate": 4.642000000000001e-06, |
|
"loss": 5.7778, |
|
"mean_token_accuracy": 0.48938991129398346, |
|
"num_tokens": 6004839.0, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 0.072, |
|
"grad_norm": 15.386749267578125, |
|
"learning_rate": 4.641e-06, |
|
"loss": 5.5941, |
|
"mean_token_accuracy": 0.49140210449695587, |
|
"num_tokens": 6019456.0, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.0722, |
|
"grad_norm": 14.047779083251953, |
|
"learning_rate": 4.6400000000000005e-06, |
|
"loss": 6.0249, |
|
"mean_token_accuracy": 0.5166028141975403, |
|
"num_tokens": 6038752.0, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 0.0724, |
|
"grad_norm": 7.4056925773620605, |
|
"learning_rate": 4.639e-06, |
|
"loss": 5.3375, |
|
"mean_token_accuracy": 0.5800000131130219, |
|
"num_tokens": 6048404.0, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 0.0726, |
|
"grad_norm": 13.957375526428223, |
|
"learning_rate": 4.638e-06, |
|
"loss": 5.7739, |
|
"mean_token_accuracy": 0.561188817024231, |
|
"num_tokens": 6067581.0, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 0.0728, |
|
"grad_norm": 10.774636268615723, |
|
"learning_rate": 4.637000000000001e-06, |
|
"loss": 6.0013, |
|
"mean_token_accuracy": 0.4900793582201004, |
|
"num_tokens": 6087734.0, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 0.073, |
|
"grad_norm": 8.231881141662598, |
|
"learning_rate": 4.636e-06, |
|
"loss": 4.9933, |
|
"mean_token_accuracy": 0.6004464328289032, |
|
"num_tokens": 6102196.0, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.0732, |
|
"grad_norm": 8.488526344299316, |
|
"learning_rate": 4.6350000000000005e-06, |
|
"loss": 5.0155, |
|
"mean_token_accuracy": 0.463299423456192, |
|
"num_tokens": 6116673.0, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 0.0734, |
|
"grad_norm": 10.228729248046875, |
|
"learning_rate": 4.634e-06, |
|
"loss": 5.8042, |
|
"mean_token_accuracy": 0.5535714328289032, |
|
"num_tokens": 6136007.0, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 0.0736, |
|
"grad_norm": 14.825092315673828, |
|
"learning_rate": 4.633e-06, |
|
"loss": 5.6991, |
|
"mean_token_accuracy": 0.5031928569078445, |
|
"num_tokens": 6154423.0, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 0.0738, |
|
"grad_norm": 8.982471466064453, |
|
"learning_rate": 4.632000000000001e-06, |
|
"loss": 5.564, |
|
"mean_token_accuracy": 0.5177975445985794, |
|
"num_tokens": 6173864.0, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 0.074, |
|
"grad_norm": 20.95796775817871, |
|
"learning_rate": 4.631e-06, |
|
"loss": 5.8344, |
|
"mean_token_accuracy": 0.4821200519800186, |
|
"num_tokens": 6193259.0, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.0742, |
|
"grad_norm": 35.350887298583984, |
|
"learning_rate": 4.6300000000000006e-06, |
|
"loss": 5.7328, |
|
"mean_token_accuracy": 0.5106837600469589, |
|
"num_tokens": 6213732.0, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 0.0744, |
|
"grad_norm": 13.841226577758789, |
|
"learning_rate": 4.629e-06, |
|
"loss": 5.9662, |
|
"mean_token_accuracy": 0.5092592537403107, |
|
"num_tokens": 6233169.0, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 0.0746, |
|
"grad_norm": 12.297442436218262, |
|
"learning_rate": 4.628e-06, |
|
"loss": 5.3639, |
|
"mean_token_accuracy": 0.42847076058387756, |
|
"num_tokens": 6252724.0, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 0.0748, |
|
"grad_norm": 8.141170501708984, |
|
"learning_rate": 4.627000000000001e-06, |
|
"loss": 5.1988, |
|
"mean_token_accuracy": 0.5410256534814835, |
|
"num_tokens": 6267220.0, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 0.075, |
|
"grad_norm": 11.617218017578125, |
|
"learning_rate": 4.626e-06, |
|
"loss": 6.1014, |
|
"mean_token_accuracy": 0.5000000149011612, |
|
"num_tokens": 6281873.0, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.0752, |
|
"grad_norm": 14.549817085266113, |
|
"learning_rate": 4.625000000000001e-06, |
|
"loss": 4.9924, |
|
"mean_token_accuracy": 0.6129629611968994, |
|
"num_tokens": 6296468.0, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 0.0754, |
|
"grad_norm": 8.732035636901855, |
|
"learning_rate": 4.624e-06, |
|
"loss": 5.3749, |
|
"mean_token_accuracy": 0.49193547666072845, |
|
"num_tokens": 6316099.0, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 0.0756, |
|
"grad_norm": 11.393025398254395, |
|
"learning_rate": 4.6230000000000005e-06, |
|
"loss": 5.8518, |
|
"mean_token_accuracy": 0.5462962985038757, |
|
"num_tokens": 6335358.0, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 0.0758, |
|
"grad_norm": 11.22784423828125, |
|
"learning_rate": 4.622e-06, |
|
"loss": 5.6366, |
|
"mean_token_accuracy": 0.5166666805744171, |
|
"num_tokens": 6349895.0, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 0.076, |
|
"grad_norm": 9.452168464660645, |
|
"learning_rate": 4.621e-06, |
|
"loss": 5.1428, |
|
"mean_token_accuracy": 0.48552632331848145, |
|
"num_tokens": 6371574.0, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.0762, |
|
"grad_norm": 10.894210815429688, |
|
"learning_rate": 4.620000000000001e-06, |
|
"loss": 5.977, |
|
"mean_token_accuracy": 0.484375, |
|
"num_tokens": 6389904.0, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 0.0764, |
|
"grad_norm": 13.074360847473145, |
|
"learning_rate": 4.619e-06, |
|
"loss": 5.3847, |
|
"mean_token_accuracy": 0.5892857313156128, |
|
"num_tokens": 6409200.0, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 0.0766, |
|
"grad_norm": 11.248861312866211, |
|
"learning_rate": 4.6180000000000005e-06, |
|
"loss": 5.4468, |
|
"mean_token_accuracy": 0.550000011920929, |
|
"num_tokens": 6423778.0, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 0.0768, |
|
"grad_norm": 9.336748123168945, |
|
"learning_rate": 4.617e-06, |
|
"loss": 5.4932, |
|
"mean_token_accuracy": 0.5820105969905853, |
|
"num_tokens": 6443035.0, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.077, |
|
"grad_norm": 9.516325950622559, |
|
"learning_rate": 4.616e-06, |
|
"loss": 5.4311, |
|
"mean_token_accuracy": 0.4709596037864685, |
|
"num_tokens": 6459666.0, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.0772, |
|
"grad_norm": 7.903703212738037, |
|
"learning_rate": 4.615000000000001e-06, |
|
"loss": 5.5287, |
|
"mean_token_accuracy": 0.541487067937851, |
|
"num_tokens": 6478423.0, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 0.0774, |
|
"grad_norm": 13.11430835723877, |
|
"learning_rate": 4.614e-06, |
|
"loss": 5.3758, |
|
"mean_token_accuracy": 0.6235714256763458, |
|
"num_tokens": 6492916.0, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 0.0776, |
|
"grad_norm": 14.428086280822754, |
|
"learning_rate": 4.6130000000000006e-06, |
|
"loss": 5.3445, |
|
"mean_token_accuracy": 0.5848214328289032, |
|
"num_tokens": 6512237.0, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 0.0778, |
|
"grad_norm": 7.981903553009033, |
|
"learning_rate": 4.612e-06, |
|
"loss": 5.6573, |
|
"mean_token_accuracy": 0.581250011920929, |
|
"num_tokens": 6531501.0, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 0.078, |
|
"grad_norm": 11.641196250915527, |
|
"learning_rate": 4.611e-06, |
|
"loss": 4.5256, |
|
"mean_token_accuracy": 0.5635062754154205, |
|
"num_tokens": 6545420.0, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.0782, |
|
"grad_norm": 7.262872219085693, |
|
"learning_rate": 4.610000000000001e-06, |
|
"loss": 5.5162, |
|
"mean_token_accuracy": 0.5485893487930298, |
|
"num_tokens": 6564684.0, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 0.0784, |
|
"grad_norm": 9.247142791748047, |
|
"learning_rate": 4.609e-06, |
|
"loss": 5.6358, |
|
"mean_token_accuracy": 0.5387205481529236, |
|
"num_tokens": 6584022.0, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 0.0786, |
|
"grad_norm": 7.343860149383545, |
|
"learning_rate": 4.608000000000001e-06, |
|
"loss": 5.4758, |
|
"mean_token_accuracy": 0.5384615361690521, |
|
"num_tokens": 6598518.0, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 0.0788, |
|
"grad_norm": 80.7061767578125, |
|
"learning_rate": 4.607e-06, |
|
"loss": 5.8124, |
|
"mean_token_accuracy": 0.5425287485122681, |
|
"num_tokens": 6617265.0, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 0.079, |
|
"grad_norm": 8.777029991149902, |
|
"learning_rate": 4.6060000000000005e-06, |
|
"loss": 5.63, |
|
"mean_token_accuracy": 0.5334528088569641, |
|
"num_tokens": 6636418.0, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.0792, |
|
"grad_norm": 9.333694458007812, |
|
"learning_rate": 4.605000000000001e-06, |
|
"loss": 5.5997, |
|
"mean_token_accuracy": 0.5315904170274734, |
|
"num_tokens": 6656184.0, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 0.0794, |
|
"grad_norm": 8.860589981079102, |
|
"learning_rate": 4.604e-06, |
|
"loss": 5.1344, |
|
"mean_token_accuracy": 0.589756578207016, |
|
"num_tokens": 6675525.0, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 0.0796, |
|
"grad_norm": 13.58896541595459, |
|
"learning_rate": 4.603000000000001e-06, |
|
"loss": 5.2149, |
|
"mean_token_accuracy": 0.5039232820272446, |
|
"num_tokens": 6693627.0, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 0.0798, |
|
"grad_norm": 9.988759994506836, |
|
"learning_rate": 4.602e-06, |
|
"loss": 5.6091, |
|
"mean_token_accuracy": 0.5166666805744171, |
|
"num_tokens": 6708223.0, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 9.320517539978027, |
|
"learning_rate": 4.6010000000000005e-06, |
|
"loss": 5.3271, |
|
"mean_token_accuracy": 0.5459558963775635, |
|
"num_tokens": 6728571.0, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.0802, |
|
"grad_norm": 6.697946548461914, |
|
"learning_rate": 4.600000000000001e-06, |
|
"loss": 4.8994, |
|
"mean_token_accuracy": 0.5878571271896362, |
|
"num_tokens": 6737950.0, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 0.0804, |
|
"grad_norm": 7.51831579208374, |
|
"learning_rate": 4.599e-06, |
|
"loss": 5.024, |
|
"mean_token_accuracy": 0.5743534564971924, |
|
"num_tokens": 6757373.0, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 0.0806, |
|
"grad_norm": 9.05827522277832, |
|
"learning_rate": 4.598e-06, |
|
"loss": 4.8548, |
|
"mean_token_accuracy": 0.5397436022758484, |
|
"num_tokens": 6778652.0, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 0.0808, |
|
"grad_norm": 8.396754264831543, |
|
"learning_rate": 4.597e-06, |
|
"loss": 4.8869, |
|
"mean_token_accuracy": 0.5226293057203293, |
|
"num_tokens": 6793115.0, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 0.081, |
|
"grad_norm": 7.155496597290039, |
|
"learning_rate": 4.5960000000000006e-06, |
|
"loss": 5.2171, |
|
"mean_token_accuracy": 0.5760368704795837, |
|
"num_tokens": 6812376.0, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.0812, |
|
"grad_norm": 18.729246139526367, |
|
"learning_rate": 4.595000000000001e-06, |
|
"loss": 5.3674, |
|
"mean_token_accuracy": 0.47413793206214905, |
|
"num_tokens": 6826681.0, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 0.0814, |
|
"grad_norm": 5.5656046867370605, |
|
"learning_rate": 4.594e-06, |
|
"loss": 4.9008, |
|
"mean_token_accuracy": 0.5647321343421936, |
|
"num_tokens": 6836545.0, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 0.0816, |
|
"grad_norm": 12.937115669250488, |
|
"learning_rate": 4.593e-06, |
|
"loss": 5.3085, |
|
"mean_token_accuracy": 0.5370370447635651, |
|
"num_tokens": 6855848.0, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 0.0818, |
|
"grad_norm": 10.064362525939941, |
|
"learning_rate": 4.592e-06, |
|
"loss": 5.6301, |
|
"mean_token_accuracy": 0.5384615361690521, |
|
"num_tokens": 6871572.0, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 0.082, |
|
"grad_norm": 11.348885536193848, |
|
"learning_rate": 4.591000000000001e-06, |
|
"loss": 5.2883, |
|
"mean_token_accuracy": 0.5475806295871735, |
|
"num_tokens": 6886045.0, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.0822, |
|
"grad_norm": 12.09267520904541, |
|
"learning_rate": 4.590000000000001e-06, |
|
"loss": 5.1195, |
|
"mean_token_accuracy": 0.5264367908239365, |
|
"num_tokens": 6905344.0, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 0.0824, |
|
"grad_norm": 6.413148403167725, |
|
"learning_rate": 4.5890000000000004e-06, |
|
"loss": 4.9556, |
|
"mean_token_accuracy": 0.613103449344635, |
|
"num_tokens": 6919857.0, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 0.0826, |
|
"grad_norm": 14.665050506591797, |
|
"learning_rate": 4.588e-06, |
|
"loss": 5.375, |
|
"mean_token_accuracy": 0.5648148059844971, |
|
"num_tokens": 6939271.0, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 0.0828, |
|
"grad_norm": 5.199728012084961, |
|
"learning_rate": 4.587e-06, |
|
"loss": 4.4413, |
|
"mean_token_accuracy": 0.5422222167253494, |
|
"num_tokens": 6949145.0, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 0.083, |
|
"grad_norm": 22.955730438232422, |
|
"learning_rate": 4.586000000000001e-06, |
|
"loss": 4.4207, |
|
"mean_token_accuracy": 0.6060605943202972, |
|
"num_tokens": 6963199.0, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.0832, |
|
"grad_norm": 5.997854709625244, |
|
"learning_rate": 4.585e-06, |
|
"loss": 4.5337, |
|
"mean_token_accuracy": 0.6601382493972778, |
|
"num_tokens": 6977679.0, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 0.0834, |
|
"grad_norm": 13.689990997314453, |
|
"learning_rate": 4.5840000000000005e-06, |
|
"loss": 5.4803, |
|
"mean_token_accuracy": 0.6183862388134003, |
|
"num_tokens": 6992166.0, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 0.0836, |
|
"grad_norm": 13.06170654296875, |
|
"learning_rate": 4.583e-06, |
|
"loss": 5.1476, |
|
"mean_token_accuracy": 0.6145320236682892, |
|
"num_tokens": 7011542.0, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 0.0838, |
|
"grad_norm": 10.056525230407715, |
|
"learning_rate": 4.582e-06, |
|
"loss": 5.1888, |
|
"mean_token_accuracy": 0.5343915373086929, |
|
"num_tokens": 7030729.0, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 0.084, |
|
"grad_norm": 6.395605564117432, |
|
"learning_rate": 4.581000000000001e-06, |
|
"loss": 5.4807, |
|
"mean_token_accuracy": 0.5086206793785095, |
|
"num_tokens": 7045228.0, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.0842, |
|
"grad_norm": 8.128874778747559, |
|
"learning_rate": 4.58e-06, |
|
"loss": 5.4323, |
|
"mean_token_accuracy": 0.5689655244350433, |
|
"num_tokens": 7064800.0, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 0.0844, |
|
"grad_norm": 8.527276039123535, |
|
"learning_rate": 4.579e-06, |
|
"loss": 5.4934, |
|
"mean_token_accuracy": 0.545584037899971, |
|
"num_tokens": 7083906.0, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 0.0846, |
|
"grad_norm": 5.491787433624268, |
|
"learning_rate": 4.578e-06, |
|
"loss": 4.8269, |
|
"mean_token_accuracy": 0.6545092761516571, |
|
"num_tokens": 7094902.0, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 0.0848, |
|
"grad_norm": 6.678435802459717, |
|
"learning_rate": 4.577e-06, |
|
"loss": 4.7984, |
|
"mean_token_accuracy": 0.6296296119689941, |
|
"num_tokens": 7104577.0, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 0.085, |
|
"grad_norm": 22.77742576599121, |
|
"learning_rate": 4.576000000000001e-06, |
|
"loss": 5.7638, |
|
"mean_token_accuracy": 0.5648148059844971, |
|
"num_tokens": 7124732.0, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.0852, |
|
"grad_norm": 9.347881317138672, |
|
"learning_rate": 4.575e-06, |
|
"loss": 5.2139, |
|
"mean_token_accuracy": 0.6139162480831146, |
|
"num_tokens": 7143991.0, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 0.0854, |
|
"grad_norm": 5.620838165283203, |
|
"learning_rate": 4.574e-06, |
|
"loss": 4.6779, |
|
"mean_token_accuracy": 0.6503448188304901, |
|
"num_tokens": 7158607.0, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 0.0856, |
|
"grad_norm": 6.864266872406006, |
|
"learning_rate": 4.573e-06, |
|
"loss": 4.8968, |
|
"mean_token_accuracy": 0.6183862388134003, |
|
"num_tokens": 7173064.0, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 0.0858, |
|
"grad_norm": 22.015869140625, |
|
"learning_rate": 4.5720000000000004e-06, |
|
"loss": 5.1698, |
|
"mean_token_accuracy": 0.5743534564971924, |
|
"num_tokens": 7192243.0, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 0.086, |
|
"grad_norm": 8.401832580566406, |
|
"learning_rate": 4.571000000000001e-06, |
|
"loss": 5.4089, |
|
"mean_token_accuracy": 0.5715725719928741, |
|
"num_tokens": 7211646.0, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.0862, |
|
"grad_norm": 25.050403594970703, |
|
"learning_rate": 4.57e-06, |
|
"loss": 4.5259, |
|
"mean_token_accuracy": 0.6126644611358643, |
|
"num_tokens": 7230516.0, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 0.0864, |
|
"grad_norm": 9.29088306427002, |
|
"learning_rate": 4.569e-06, |
|
"loss": 4.889, |
|
"mean_token_accuracy": 0.5278460085391998, |
|
"num_tokens": 7249620.0, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 0.0866, |
|
"grad_norm": 15.916458129882812, |
|
"learning_rate": 4.568e-06, |
|
"loss": 4.9777, |
|
"mean_token_accuracy": 0.6257143020629883, |
|
"num_tokens": 7264094.0, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 0.0868, |
|
"grad_norm": 6.861588954925537, |
|
"learning_rate": 4.5670000000000005e-06, |
|
"loss": 5.4239, |
|
"mean_token_accuracy": 0.4928571432828903, |
|
"num_tokens": 7283359.0, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 0.087, |
|
"grad_norm": 13.225293159484863, |
|
"learning_rate": 4.566000000000001e-06, |
|
"loss": 4.7175, |
|
"mean_token_accuracy": 0.6028921008110046, |
|
"num_tokens": 7302888.0, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.0872, |
|
"grad_norm": 17.012409210205078, |
|
"learning_rate": 4.565e-06, |
|
"loss": 5.6497, |
|
"mean_token_accuracy": 0.5857143104076385, |
|
"num_tokens": 7322148.0, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 0.0874, |
|
"grad_norm": 10.171734809875488, |
|
"learning_rate": 4.564e-06, |
|
"loss": 4.5458, |
|
"mean_token_accuracy": 0.5567567646503448, |
|
"num_tokens": 7341333.0, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 0.0876, |
|
"grad_norm": 16.463756561279297, |
|
"learning_rate": 4.563e-06, |
|
"loss": 4.9291, |
|
"mean_token_accuracy": 0.6135416626930237, |
|
"num_tokens": 7362605.0, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 0.0878, |
|
"grad_norm": 17.738061904907227, |
|
"learning_rate": 4.5620000000000005e-06, |
|
"loss": 4.813, |
|
"mean_token_accuracy": 0.636904776096344, |
|
"num_tokens": 7381865.0, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 0.088, |
|
"grad_norm": 8.94965648651123, |
|
"learning_rate": 4.561e-06, |
|
"loss": 4.8816, |
|
"mean_token_accuracy": 0.5999999940395355, |
|
"num_tokens": 7396402.0, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.0882, |
|
"grad_norm": 15.025381088256836, |
|
"learning_rate": 4.56e-06, |
|
"loss": 5.6238, |
|
"mean_token_accuracy": 0.5384615361690521, |
|
"num_tokens": 7415660.0, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 0.0884, |
|
"grad_norm": 10.704608917236328, |
|
"learning_rate": 4.559000000000001e-06, |
|
"loss": 5.291, |
|
"mean_token_accuracy": 0.5915948152542114, |
|
"num_tokens": 7435781.0, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 0.0886, |
|
"grad_norm": 9.916930198669434, |
|
"learning_rate": 4.558e-06, |
|
"loss": 4.6656, |
|
"mean_token_accuracy": 0.6135416626930237, |
|
"num_tokens": 7454999.0, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 0.0888, |
|
"grad_norm": 7.206878662109375, |
|
"learning_rate": 4.557000000000001e-06, |
|
"loss": 4.6639, |
|
"mean_token_accuracy": 0.4869281202554703, |
|
"num_tokens": 7469396.0, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 0.089, |
|
"grad_norm": 6.467816352844238, |
|
"learning_rate": 4.556e-06, |
|
"loss": 4.9449, |
|
"mean_token_accuracy": 0.5981481373310089, |
|
"num_tokens": 7483855.0, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.0892, |
|
"grad_norm": 9.08160400390625, |
|
"learning_rate": 4.5550000000000004e-06, |
|
"loss": 4.6868, |
|
"mean_token_accuracy": 0.6126373708248138, |
|
"num_tokens": 7498330.0, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 0.0894, |
|
"grad_norm": 6.3002471923828125, |
|
"learning_rate": 4.554000000000001e-06, |
|
"loss": 5.0805, |
|
"mean_token_accuracy": 0.551282063126564, |
|
"num_tokens": 7512714.0, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 0.0896, |
|
"grad_norm": 13.898117065429688, |
|
"learning_rate": 4.553e-06, |
|
"loss": 5.1324, |
|
"mean_token_accuracy": 0.5690476298332214, |
|
"num_tokens": 7531768.0, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 0.0898, |
|
"grad_norm": 8.279391288757324, |
|
"learning_rate": 4.552000000000001e-06, |
|
"loss": 4.4569, |
|
"mean_token_accuracy": 0.5530530512332916, |
|
"num_tokens": 7546253.0, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 11.584227561950684, |
|
"learning_rate": 4.551e-06, |
|
"loss": 5.3385, |
|
"mean_token_accuracy": 0.581250011920929, |
|
"num_tokens": 7565517.0, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.0902, |
|
"grad_norm": 8.302013397216797, |
|
"learning_rate": 4.5500000000000005e-06, |
|
"loss": 4.9839, |
|
"mean_token_accuracy": 0.5351213216781616, |
|
"num_tokens": 7579402.0, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 0.0904, |
|
"grad_norm": 9.814826965332031, |
|
"learning_rate": 4.549000000000001e-06, |
|
"loss": 4.6692, |
|
"mean_token_accuracy": 0.6079182624816895, |
|
"num_tokens": 7598660.0, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 0.0906, |
|
"grad_norm": 6.627237796783447, |
|
"learning_rate": 4.548e-06, |
|
"loss": 5.4627, |
|
"mean_token_accuracy": 0.5092592537403107, |
|
"num_tokens": 7613180.0, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 0.0908, |
|
"grad_norm": 11.831220626831055, |
|
"learning_rate": 4.547000000000001e-06, |
|
"loss": 4.8182, |
|
"mean_token_accuracy": 0.5758620798587799, |
|
"num_tokens": 7632441.0, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 0.091, |
|
"grad_norm": 7.963436126708984, |
|
"learning_rate": 4.546e-06, |
|
"loss": 5.1914, |
|
"mean_token_accuracy": 0.5578093528747559, |
|
"num_tokens": 7651466.0, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.0912, |
|
"grad_norm": 23.430757522583008, |
|
"learning_rate": 4.5450000000000005e-06, |
|
"loss": 5.1092, |
|
"mean_token_accuracy": 0.5648148059844971, |
|
"num_tokens": 7665848.0, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 0.0914, |
|
"grad_norm": 7.155216693878174, |
|
"learning_rate": 4.544000000000001e-06, |
|
"loss": 4.5387, |
|
"mean_token_accuracy": 0.5334528088569641, |
|
"num_tokens": 7680308.0, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 0.0916, |
|
"grad_norm": 9.627572059631348, |
|
"learning_rate": 4.543e-06, |
|
"loss": 5.2593, |
|
"mean_token_accuracy": 0.5478571355342865, |
|
"num_tokens": 7694782.0, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 0.0918, |
|
"grad_norm": 7.240375995635986, |
|
"learning_rate": 4.542e-06, |
|
"loss": 4.444, |
|
"mean_token_accuracy": 0.6316666603088379, |
|
"num_tokens": 7709405.0, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 0.092, |
|
"grad_norm": 14.054851531982422, |
|
"learning_rate": 4.541e-06, |
|
"loss": 4.7661, |
|
"mean_token_accuracy": 0.6431034505367279, |
|
"num_tokens": 7728742.0, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.0922, |
|
"grad_norm": 6.709273815155029, |
|
"learning_rate": 4.540000000000001e-06, |
|
"loss": 5.1866, |
|
"mean_token_accuracy": 0.5717073231935501, |
|
"num_tokens": 7743526.0, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 0.0924, |
|
"grad_norm": 15.618324279785156, |
|
"learning_rate": 4.539000000000001e-06, |
|
"loss": 5.0082, |
|
"mean_token_accuracy": 0.5, |
|
"num_tokens": 7763011.0, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 0.0926, |
|
"grad_norm": 9.383596420288086, |
|
"learning_rate": 4.5380000000000004e-06, |
|
"loss": 4.7785, |
|
"mean_token_accuracy": 0.59375, |
|
"num_tokens": 7777515.0, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 0.0928, |
|
"grad_norm": 8.333616256713867, |
|
"learning_rate": 4.537e-06, |
|
"loss": 4.592, |
|
"mean_token_accuracy": 0.522556409239769, |
|
"num_tokens": 7792009.0, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 0.093, |
|
"grad_norm": 14.328873634338379, |
|
"learning_rate": 4.536e-06, |
|
"loss": 4.8537, |
|
"mean_token_accuracy": 0.62321937084198, |
|
"num_tokens": 7811576.0, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.0932, |
|
"grad_norm": 8.614009857177734, |
|
"learning_rate": 4.535000000000001e-06, |
|
"loss": 5.0954, |
|
"mean_token_accuracy": 0.5415282547473907, |
|
"num_tokens": 7830643.0, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 0.0934, |
|
"grad_norm": 8.26689338684082, |
|
"learning_rate": 4.534000000000001e-06, |
|
"loss": 4.742, |
|
"mean_token_accuracy": 0.6200265288352966, |
|
"num_tokens": 7848085.0, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 0.0936, |
|
"grad_norm": 11.07862377166748, |
|
"learning_rate": 4.5330000000000005e-06, |
|
"loss": 4.6416, |
|
"mean_token_accuracy": 0.5669642984867096, |
|
"num_tokens": 7867108.0, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 0.0938, |
|
"grad_norm": 7.7039337158203125, |
|
"learning_rate": 4.532e-06, |
|
"loss": 4.7395, |
|
"mean_token_accuracy": 0.5769230723381042, |
|
"num_tokens": 7876728.0, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 0.094, |
|
"grad_norm": 15.2058744430542, |
|
"learning_rate": 4.531e-06, |
|
"loss": 4.2838, |
|
"mean_token_accuracy": 0.5869939625263214, |
|
"num_tokens": 7893132.0, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.0942, |
|
"grad_norm": 12.777841567993164, |
|
"learning_rate": 4.530000000000001e-06, |
|
"loss": 4.6823, |
|
"mean_token_accuracy": 0.5769230723381042, |
|
"num_tokens": 7912476.0, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 0.0944, |
|
"grad_norm": 13.58057975769043, |
|
"learning_rate": 4.529000000000001e-06, |
|
"loss": 4.4577, |
|
"mean_token_accuracy": 0.589756578207016, |
|
"num_tokens": 7931920.0, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 0.0946, |
|
"grad_norm": 7.151589393615723, |
|
"learning_rate": 4.5280000000000005e-06, |
|
"loss": 4.784, |
|
"mean_token_accuracy": 0.5804398059844971, |
|
"num_tokens": 7946400.0, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 0.0948, |
|
"grad_norm": 7.511891841888428, |
|
"learning_rate": 4.527e-06, |
|
"loss": 5.1037, |
|
"mean_token_accuracy": 0.5648148059844971, |
|
"num_tokens": 7960863.0, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 0.095, |
|
"grad_norm": 9.209273338317871, |
|
"learning_rate": 4.526e-06, |
|
"loss": 4.6556, |
|
"mean_token_accuracy": 0.5915948152542114, |
|
"num_tokens": 7975547.0, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.0952, |
|
"grad_norm": 10.363279342651367, |
|
"learning_rate": 4.525000000000001e-06, |
|
"loss": 5.0172, |
|
"mean_token_accuracy": 0.5742424130439758, |
|
"num_tokens": 7991119.0, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 0.0954, |
|
"grad_norm": 7.636615753173828, |
|
"learning_rate": 4.524e-06, |
|
"loss": 4.952, |
|
"mean_token_accuracy": 0.6436781585216522, |
|
"num_tokens": 8010377.0, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 0.0956, |
|
"grad_norm": 8.515202522277832, |
|
"learning_rate": 4.5230000000000006e-06, |
|
"loss": 4.6051, |
|
"mean_token_accuracy": 0.6206896305084229, |
|
"num_tokens": 8029123.0, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 0.0958, |
|
"grad_norm": 8.392003059387207, |
|
"learning_rate": 4.522e-06, |
|
"loss": 4.9078, |
|
"mean_token_accuracy": 0.6145320236682892, |
|
"num_tokens": 8048382.0, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 0.096, |
|
"grad_norm": 4.060763359069824, |
|
"learning_rate": 4.521e-06, |
|
"loss": 4.5817, |
|
"mean_token_accuracy": 0.5833333432674408, |
|
"num_tokens": 8058289.0, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.0962, |
|
"grad_norm": 10.596019744873047, |
|
"learning_rate": 4.520000000000001e-06, |
|
"loss": 4.2822, |
|
"mean_token_accuracy": 0.5428152531385422, |
|
"num_tokens": 8077371.0, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 0.0964, |
|
"grad_norm": 9.166875839233398, |
|
"learning_rate": 4.519e-06, |
|
"loss": 4.93, |
|
"mean_token_accuracy": 0.5862068831920624, |
|
"num_tokens": 8096669.0, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 0.0966, |
|
"grad_norm": 9.244916915893555, |
|
"learning_rate": 4.518e-06, |
|
"loss": 4.3974, |
|
"mean_token_accuracy": 0.5298245847225189, |
|
"num_tokens": 8111158.0, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 0.0968, |
|
"grad_norm": 5.592848300933838, |
|
"learning_rate": 4.517e-06, |
|
"loss": 4.9458, |
|
"mean_token_accuracy": 0.550000011920929, |
|
"num_tokens": 8126046.0, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 0.097, |
|
"grad_norm": 7.911005020141602, |
|
"learning_rate": 4.5160000000000005e-06, |
|
"loss": 4.9862, |
|
"mean_token_accuracy": 0.5861823260784149, |
|
"num_tokens": 8156760.0, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.0972, |
|
"grad_norm": 22.27390480041504, |
|
"learning_rate": 4.515000000000001e-06, |
|
"loss": 4.9185, |
|
"mean_token_accuracy": 0.5522802919149399, |
|
"num_tokens": 8178188.0, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 0.0974, |
|
"grad_norm": 6.784433364868164, |
|
"learning_rate": 4.514e-06, |
|
"loss": 4.1169, |
|
"mean_token_accuracy": 0.6459695100784302, |
|
"num_tokens": 8192689.0, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 0.0976, |
|
"grad_norm": 5.436159133911133, |
|
"learning_rate": 4.513e-06, |
|
"loss": 4.2387, |
|
"mean_token_accuracy": 0.6333333253860474, |
|
"num_tokens": 8207151.0, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 0.0978, |
|
"grad_norm": 10.865344047546387, |
|
"learning_rate": 4.512e-06, |
|
"loss": 4.5805, |
|
"mean_token_accuracy": 0.6162962913513184, |
|
"num_tokens": 8221643.0, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 0.098, |
|
"grad_norm": 18.25914764404297, |
|
"learning_rate": 4.5110000000000005e-06, |
|
"loss": 4.7056, |
|
"mean_token_accuracy": 0.5462962985038757, |
|
"num_tokens": 8242886.0, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.0982, |
|
"grad_norm": 8.091397285461426, |
|
"learning_rate": 4.510000000000001e-06, |
|
"loss": 4.7771, |
|
"mean_token_accuracy": 0.5669642984867096, |
|
"num_tokens": 8262615.0, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 0.0984, |
|
"grad_norm": 5.078823566436768, |
|
"learning_rate": 4.509e-06, |
|
"loss": 4.7878, |
|
"mean_token_accuracy": 0.5173454135656357, |
|
"num_tokens": 8275070.0, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 0.0986, |
|
"grad_norm": 10.31528377532959, |
|
"learning_rate": 4.508e-06, |
|
"loss": 4.5168, |
|
"mean_token_accuracy": 0.6481481492519379, |
|
"num_tokens": 8289526.0, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 0.0988, |
|
"grad_norm": 8.380623817443848, |
|
"learning_rate": 4.507e-06, |
|
"loss": 4.5738, |
|
"mean_token_accuracy": 0.5497835576534271, |
|
"num_tokens": 8306115.0, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 0.099, |
|
"grad_norm": 15.303783416748047, |
|
"learning_rate": 4.5060000000000006e-06, |
|
"loss": 4.4765, |
|
"mean_token_accuracy": 0.5352205336093903, |
|
"num_tokens": 8327367.0, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.0992, |
|
"grad_norm": 9.432476043701172, |
|
"learning_rate": 4.505e-06, |
|
"loss": 4.2436, |
|
"mean_token_accuracy": 0.5757575631141663, |
|
"num_tokens": 8347475.0, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 0.0994, |
|
"grad_norm": 7.746166706085205, |
|
"learning_rate": 4.504e-06, |
|
"loss": 4.6564, |
|
"mean_token_accuracy": 0.5372548997402191, |
|
"num_tokens": 8367244.0, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 0.0996, |
|
"grad_norm": 6.070708274841309, |
|
"learning_rate": 4.503e-06, |
|
"loss": 4.7593, |
|
"mean_token_accuracy": 0.5413165390491486, |
|
"num_tokens": 8387418.0, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 0.0998, |
|
"grad_norm": 9.557999610900879, |
|
"learning_rate": 4.502e-06, |
|
"loss": 4.6868, |
|
"mean_token_accuracy": 0.6296296119689941, |
|
"num_tokens": 8406160.0, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 18.0493106842041, |
|
"learning_rate": 4.501000000000001e-06, |
|
"loss": 4.4627, |
|
"mean_token_accuracy": 0.5960648059844971, |
|
"num_tokens": 8425459.0, |
|
"step": 500 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 5000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 250, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.9176829428171366e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|