thesis-experiments-data
/
baseline-qwen2vl_sft-sft_colqwen-k2-train-29-APRIL-0_3
/checkpoint-1000
/trainer_state.json
{ | |
"best_global_step": null, | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.1, | |
"eval_steps": 500, | |
"global_step": 1000, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.0002, | |
"grad_norm": 5.698113441467285, | |
"learning_rate": 5e-06, | |
"loss": 12.2509, | |
"mean_token_accuracy": 0.2450142428278923, | |
"num_tokens": 16438.0, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.0004, | |
"grad_norm": 6.063949108123779, | |
"learning_rate": 4.999000000000001e-06, | |
"loss": 13.3748, | |
"mean_token_accuracy": 0.2325708046555519, | |
"num_tokens": 31368.0, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.0006, | |
"grad_norm": 8.026069641113281, | |
"learning_rate": 4.998e-06, | |
"loss": 12.5454, | |
"mean_token_accuracy": 0.2398785501718521, | |
"num_tokens": 58542.0, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.0008, | |
"grad_norm": 6.758559703826904, | |
"learning_rate": 4.997000000000001e-06, | |
"loss": 13.9155, | |
"mean_token_accuracy": 0.21041666716337204, | |
"num_tokens": 77844.0, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.001, | |
"grad_norm": 6.786197185516357, | |
"learning_rate": 4.996e-06, | |
"loss": 13.2561, | |
"mean_token_accuracy": 0.1967741921544075, | |
"num_tokens": 89661.0, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.0012, | |
"grad_norm": 9.514345169067383, | |
"learning_rate": 4.9950000000000005e-06, | |
"loss": 12.3378, | |
"mean_token_accuracy": 0.20638945698738098, | |
"num_tokens": 104612.0, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.0014, | |
"grad_norm": 7.438795566558838, | |
"learning_rate": 4.994000000000001e-06, | |
"loss": 12.0814, | |
"mean_token_accuracy": 0.2916666716337204, | |
"num_tokens": 114269.0, | |
"step": 7 | |
}, | |
{ | |
"epoch": 0.0016, | |
"grad_norm": 15.847832679748535, | |
"learning_rate": 4.993e-06, | |
"loss": 13.2761, | |
"mean_token_accuracy": 0.2450142428278923, | |
"num_tokens": 128762.0, | |
"step": 8 | |
}, | |
{ | |
"epoch": 0.0018, | |
"grad_norm": 7.620685577392578, | |
"learning_rate": 4.992e-06, | |
"loss": 13.1792, | |
"mean_token_accuracy": 0.2165178582072258, | |
"num_tokens": 148453.0, | |
"step": 9 | |
}, | |
{ | |
"epoch": 0.002, | |
"grad_norm": 8.26969051361084, | |
"learning_rate": 4.991e-06, | |
"loss": 12.853, | |
"mean_token_accuracy": 0.21405228972434998, | |
"num_tokens": 167693.0, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.0022, | |
"grad_norm": 7.328786373138428, | |
"learning_rate": 4.9900000000000005e-06, | |
"loss": 12.9578, | |
"mean_token_accuracy": 0.2115987464785576, | |
"num_tokens": 182157.0, | |
"step": 11 | |
}, | |
{ | |
"epoch": 0.0024, | |
"grad_norm": 7.058177947998047, | |
"learning_rate": 4.989000000000001e-06, | |
"loss": 12.7711, | |
"mean_token_accuracy": 0.2002224698662758, | |
"num_tokens": 201457.0, | |
"step": 12 | |
}, | |
{ | |
"epoch": 0.0026, | |
"grad_norm": 6.48744010925293, | |
"learning_rate": 4.988e-06, | |
"loss": 13.006, | |
"mean_token_accuracy": 0.23790322244167328, | |
"num_tokens": 215922.0, | |
"step": 13 | |
}, | |
{ | |
"epoch": 0.0028, | |
"grad_norm": 6.006223201751709, | |
"learning_rate": 4.987e-06, | |
"loss": 10.5975, | |
"mean_token_accuracy": 0.2096899226307869, | |
"num_tokens": 225597.0, | |
"step": 14 | |
}, | |
{ | |
"epoch": 0.003, | |
"grad_norm": 6.274689674377441, | |
"learning_rate": 4.986e-06, | |
"loss": 11.2766, | |
"mean_token_accuracy": 0.26851852238178253, | |
"num_tokens": 240100.0, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.0032, | |
"grad_norm": 8.211908340454102, | |
"learning_rate": 4.9850000000000006e-06, | |
"loss": 13.7091, | |
"mean_token_accuracy": 0.20202020555734634, | |
"num_tokens": 259400.0, | |
"step": 16 | |
}, | |
{ | |
"epoch": 0.0034, | |
"grad_norm": 6.356493949890137, | |
"learning_rate": 4.984000000000001e-06, | |
"loss": 12.043, | |
"mean_token_accuracy": 0.20519480854272842, | |
"num_tokens": 278628.0, | |
"step": 17 | |
}, | |
{ | |
"epoch": 0.0036, | |
"grad_norm": 6.438048839569092, | |
"learning_rate": 4.983e-06, | |
"loss": 11.5484, | |
"mean_token_accuracy": 0.2343137264251709, | |
"num_tokens": 303856.0, | |
"step": 18 | |
}, | |
{ | |
"epoch": 0.0038, | |
"grad_norm": 7.152822494506836, | |
"learning_rate": 4.982e-06, | |
"loss": 13.4607, | |
"mean_token_accuracy": 0.17500000447034836, | |
"num_tokens": 318686.0, | |
"step": 19 | |
}, | |
{ | |
"epoch": 0.004, | |
"grad_norm": 6.464632034301758, | |
"learning_rate": 4.981e-06, | |
"loss": 11.7533, | |
"mean_token_accuracy": 0.21959459781646729, | |
"num_tokens": 337999.0, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.0042, | |
"grad_norm": 7.051283836364746, | |
"learning_rate": 4.980000000000001e-06, | |
"loss": 12.9788, | |
"mean_token_accuracy": 0.22011494636535645, | |
"num_tokens": 356602.0, | |
"step": 21 | |
}, | |
{ | |
"epoch": 0.0044, | |
"grad_norm": 9.595747947692871, | |
"learning_rate": 4.979e-06, | |
"loss": 10.9347, | |
"mean_token_accuracy": 0.2645348906517029, | |
"num_tokens": 373201.0, | |
"step": 22 | |
}, | |
{ | |
"epoch": 0.0046, | |
"grad_norm": 7.404125213623047, | |
"learning_rate": 4.9780000000000005e-06, | |
"loss": 12.5181, | |
"mean_token_accuracy": 0.2666666731238365, | |
"num_tokens": 387660.0, | |
"step": 23 | |
}, | |
{ | |
"epoch": 0.0048, | |
"grad_norm": 6.656332969665527, | |
"learning_rate": 4.977e-06, | |
"loss": 11.5566, | |
"mean_token_accuracy": 0.209001787006855, | |
"num_tokens": 402129.0, | |
"step": 24 | |
}, | |
{ | |
"epoch": 0.005, | |
"grad_norm": 6.866989612579346, | |
"learning_rate": 4.976e-06, | |
"loss": 12.4797, | |
"mean_token_accuracy": 0.2379310354590416, | |
"num_tokens": 421133.0, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.0052, | |
"grad_norm": 6.77735710144043, | |
"learning_rate": 4.975000000000001e-06, | |
"loss": 13.6256, | |
"mean_token_accuracy": 0.22177419066429138, | |
"num_tokens": 440531.0, | |
"step": 26 | |
}, | |
{ | |
"epoch": 0.0054, | |
"grad_norm": 8.206353187561035, | |
"learning_rate": 4.974e-06, | |
"loss": 13.0667, | |
"mean_token_accuracy": 0.21791187673807144, | |
"num_tokens": 459798.0, | |
"step": 27 | |
}, | |
{ | |
"epoch": 0.0056, | |
"grad_norm": 7.40612268447876, | |
"learning_rate": 4.9730000000000005e-06, | |
"loss": 13.5884, | |
"mean_token_accuracy": 0.19052419066429138, | |
"num_tokens": 478587.0, | |
"step": 28 | |
}, | |
{ | |
"epoch": 0.0058, | |
"grad_norm": 6.71999454498291, | |
"learning_rate": 4.972e-06, | |
"loss": 12.0322, | |
"mean_token_accuracy": 0.24526315927505493, | |
"num_tokens": 492356.0, | |
"step": 29 | |
}, | |
{ | |
"epoch": 0.006, | |
"grad_norm": 8.108094215393066, | |
"learning_rate": 4.971e-06, | |
"loss": 12.4858, | |
"mean_token_accuracy": 0.2566666677594185, | |
"num_tokens": 507242.0, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.0062, | |
"grad_norm": 7.7994513511657715, | |
"learning_rate": 4.970000000000001e-06, | |
"loss": 13.0762, | |
"mean_token_accuracy": 0.24568965286016464, | |
"num_tokens": 525987.0, | |
"step": 31 | |
}, | |
{ | |
"epoch": 0.0064, | |
"grad_norm": 7.258217811584473, | |
"learning_rate": 4.969e-06, | |
"loss": 12.7757, | |
"mean_token_accuracy": 0.24137930572032928, | |
"num_tokens": 540447.0, | |
"step": 32 | |
}, | |
{ | |
"epoch": 0.0066, | |
"grad_norm": 14.746047973632812, | |
"learning_rate": 4.9680000000000005e-06, | |
"loss": 14.4335, | |
"mean_token_accuracy": 0.22649572789669037, | |
"num_tokens": 560131.0, | |
"step": 33 | |
}, | |
{ | |
"epoch": 0.0068, | |
"grad_norm": 5.289712429046631, | |
"learning_rate": 4.967e-06, | |
"loss": 9.6693, | |
"mean_token_accuracy": 0.2875000089406967, | |
"num_tokens": 579313.0, | |
"step": 34 | |
}, | |
{ | |
"epoch": 0.007, | |
"grad_norm": 7.960392951965332, | |
"learning_rate": 4.966e-06, | |
"loss": 13.1738, | |
"mean_token_accuracy": 0.25833334028720856, | |
"num_tokens": 598611.0, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.0072, | |
"grad_norm": 8.295417785644531, | |
"learning_rate": 4.965000000000001e-06, | |
"loss": 11.8889, | |
"mean_token_accuracy": 0.24144145101308823, | |
"num_tokens": 615588.0, | |
"step": 36 | |
}, | |
{ | |
"epoch": 0.0074, | |
"grad_norm": 7.051126480102539, | |
"learning_rate": 4.964e-06, | |
"loss": 12.1364, | |
"mean_token_accuracy": 0.23590733855962753, | |
"num_tokens": 636032.0, | |
"step": 37 | |
}, | |
{ | |
"epoch": 0.0076, | |
"grad_norm": 7.895816326141357, | |
"learning_rate": 4.963000000000001e-06, | |
"loss": 12.8971, | |
"mean_token_accuracy": 0.18571428954601288, | |
"num_tokens": 655299.0, | |
"step": 38 | |
}, | |
{ | |
"epoch": 0.0078, | |
"grad_norm": 7.544738292694092, | |
"learning_rate": 4.962e-06, | |
"loss": 13.0581, | |
"mean_token_accuracy": 0.20937500149011612, | |
"num_tokens": 674357.0, | |
"step": 39 | |
}, | |
{ | |
"epoch": 0.008, | |
"grad_norm": 7.548039436340332, | |
"learning_rate": 4.9610000000000004e-06, | |
"loss": 13.2158, | |
"mean_token_accuracy": 0.20892494916915894, | |
"num_tokens": 693698.0, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.0082, | |
"grad_norm": 7.687658309936523, | |
"learning_rate": 4.960000000000001e-06, | |
"loss": 12.8524, | |
"mean_token_accuracy": 0.28735632449388504, | |
"num_tokens": 703433.0, | |
"step": 41 | |
}, | |
{ | |
"epoch": 0.0084, | |
"grad_norm": 8.011468887329102, | |
"learning_rate": 4.959e-06, | |
"loss": 13.5655, | |
"mean_token_accuracy": 0.21635150164365768, | |
"num_tokens": 722733.0, | |
"step": 42 | |
}, | |
{ | |
"epoch": 0.0086, | |
"grad_norm": 11.084840774536133, | |
"learning_rate": 4.958000000000001e-06, | |
"loss": 12.7054, | |
"mean_token_accuracy": 0.21746384352445602, | |
"num_tokens": 738121.0, | |
"step": 43 | |
}, | |
{ | |
"epoch": 0.0088, | |
"grad_norm": 9.436426162719727, | |
"learning_rate": 4.957e-06, | |
"loss": 13.5213, | |
"mean_token_accuracy": 0.22783251106739044, | |
"num_tokens": 757723.0, | |
"step": 44 | |
}, | |
{ | |
"epoch": 0.009, | |
"grad_norm": 8.382990837097168, | |
"learning_rate": 4.9560000000000005e-06, | |
"loss": 12.6328, | |
"mean_token_accuracy": 0.2567741870880127, | |
"num_tokens": 772646.0, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.0092, | |
"grad_norm": 8.272336959838867, | |
"learning_rate": 4.955e-06, | |
"loss": 13.0365, | |
"mean_token_accuracy": 0.2611111178994179, | |
"num_tokens": 792334.0, | |
"step": 46 | |
}, | |
{ | |
"epoch": 0.0094, | |
"grad_norm": 10.347405433654785, | |
"learning_rate": 4.954e-06, | |
"loss": 12.8164, | |
"mean_token_accuracy": 0.22616633027791977, | |
"num_tokens": 811713.0, | |
"step": 47 | |
}, | |
{ | |
"epoch": 0.0096, | |
"grad_norm": 7.27515983581543, | |
"learning_rate": 4.953000000000001e-06, | |
"loss": 11.0123, | |
"mean_token_accuracy": 0.23790322244167328, | |
"num_tokens": 832998.0, | |
"step": 48 | |
}, | |
{ | |
"epoch": 0.0098, | |
"grad_norm": 8.973237991333008, | |
"learning_rate": 4.952e-06, | |
"loss": 12.9664, | |
"mean_token_accuracy": 0.19805195182561874, | |
"num_tokens": 861717.0, | |
"step": 49 | |
}, | |
{ | |
"epoch": 0.01, | |
"grad_norm": 8.738320350646973, | |
"learning_rate": 4.9510000000000005e-06, | |
"loss": 12.5728, | |
"mean_token_accuracy": 0.24344827979803085, | |
"num_tokens": 875736.0, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.0102, | |
"grad_norm": 8.51733112335205, | |
"learning_rate": 4.95e-06, | |
"loss": 12.7596, | |
"mean_token_accuracy": 0.22380952537059784, | |
"num_tokens": 894386.0, | |
"step": 51 | |
}, | |
{ | |
"epoch": 0.0104, | |
"grad_norm": 8.787413597106934, | |
"learning_rate": 4.949e-06, | |
"loss": 12.4493, | |
"mean_token_accuracy": 0.268075630068779, | |
"num_tokens": 908848.0, | |
"step": 52 | |
}, | |
{ | |
"epoch": 0.0106, | |
"grad_norm": 7.849542617797852, | |
"learning_rate": 4.948000000000001e-06, | |
"loss": 12.7514, | |
"mean_token_accuracy": 0.23790322244167328, | |
"num_tokens": 923309.0, | |
"step": 53 | |
}, | |
{ | |
"epoch": 0.0108, | |
"grad_norm": 8.378942489624023, | |
"learning_rate": 4.947e-06, | |
"loss": 11.4953, | |
"mean_token_accuracy": 0.2290322557091713, | |
"num_tokens": 938298.0, | |
"step": 54 | |
}, | |
{ | |
"epoch": 0.011, | |
"grad_norm": 8.311882972717285, | |
"learning_rate": 4.946000000000001e-06, | |
"loss": 12.0904, | |
"mean_token_accuracy": 0.24014336615800858, | |
"num_tokens": 948288.0, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.0112, | |
"grad_norm": 9.599881172180176, | |
"learning_rate": 4.945e-06, | |
"loss": 14.3569, | |
"mean_token_accuracy": 0.22649572789669037, | |
"num_tokens": 967543.0, | |
"step": 56 | |
}, | |
{ | |
"epoch": 0.0114, | |
"grad_norm": 8.84776496887207, | |
"learning_rate": 4.9440000000000004e-06, | |
"loss": 12.2268, | |
"mean_token_accuracy": 0.28287841379642487, | |
"num_tokens": 981726.0, | |
"step": 57 | |
}, | |
{ | |
"epoch": 0.0116, | |
"grad_norm": 11.259871482849121, | |
"learning_rate": 4.943000000000001e-06, | |
"loss": 12.6356, | |
"mean_token_accuracy": 0.22177419066429138, | |
"num_tokens": 996225.0, | |
"step": 58 | |
}, | |
{ | |
"epoch": 0.0118, | |
"grad_norm": 10.529711723327637, | |
"learning_rate": 4.942e-06, | |
"loss": 11.9829, | |
"mean_token_accuracy": 0.26986077427864075, | |
"num_tokens": 1015573.0, | |
"step": 59 | |
}, | |
{ | |
"epoch": 0.012, | |
"grad_norm": 8.90577220916748, | |
"learning_rate": 4.941000000000001e-06, | |
"loss": 11.4895, | |
"mean_token_accuracy": 0.2374911978840828, | |
"num_tokens": 1034198.0, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.0122, | |
"grad_norm": 7.851955413818359, | |
"learning_rate": 4.94e-06, | |
"loss": 12.0083, | |
"mean_token_accuracy": 0.22478991746902466, | |
"num_tokens": 1045419.0, | |
"step": 61 | |
}, | |
{ | |
"epoch": 0.0124, | |
"grad_norm": 9.812698364257812, | |
"learning_rate": 4.9390000000000005e-06, | |
"loss": 12.9777, | |
"mean_token_accuracy": 0.2379310354590416, | |
"num_tokens": 1064723.0, | |
"step": 62 | |
}, | |
{ | |
"epoch": 0.0126, | |
"grad_norm": 8.35107707977295, | |
"learning_rate": 4.938000000000001e-06, | |
"loss": 11.3187, | |
"mean_token_accuracy": 0.24358975142240524, | |
"num_tokens": 1079171.0, | |
"step": 63 | |
}, | |
{ | |
"epoch": 0.0128, | |
"grad_norm": 7.16640567779541, | |
"learning_rate": 4.937e-06, | |
"loss": 10.2182, | |
"mean_token_accuracy": 0.34151194989681244, | |
"num_tokens": 1093695.0, | |
"step": 64 | |
}, | |
{ | |
"epoch": 0.013, | |
"grad_norm": 10.18386459350586, | |
"learning_rate": 4.936e-06, | |
"loss": 12.2175, | |
"mean_token_accuracy": 0.25961539149284363, | |
"num_tokens": 1108580.0, | |
"step": 65 | |
}, | |
{ | |
"epoch": 0.0132, | |
"grad_norm": 8.232446670532227, | |
"learning_rate": 4.935e-06, | |
"loss": 11.8182, | |
"mean_token_accuracy": 0.27314814925193787, | |
"num_tokens": 1123098.0, | |
"step": 66 | |
}, | |
{ | |
"epoch": 0.0134, | |
"grad_norm": 8.809263229370117, | |
"learning_rate": 4.9340000000000005e-06, | |
"loss": 10.9148, | |
"mean_token_accuracy": 0.20927418768405914, | |
"num_tokens": 1137548.0, | |
"step": 67 | |
}, | |
{ | |
"epoch": 0.0136, | |
"grad_norm": 9.865777015686035, | |
"learning_rate": 4.933000000000001e-06, | |
"loss": 12.6294, | |
"mean_token_accuracy": 0.22962962836027145, | |
"num_tokens": 1156845.0, | |
"step": 68 | |
}, | |
{ | |
"epoch": 0.0138, | |
"grad_norm": 7.259024620056152, | |
"learning_rate": 4.932e-06, | |
"loss": 9.7717, | |
"mean_token_accuracy": 0.26875001192092896, | |
"num_tokens": 1171363.0, | |
"step": 69 | |
}, | |
{ | |
"epoch": 0.014, | |
"grad_norm": 11.756244659423828, | |
"learning_rate": 4.931e-06, | |
"loss": 12.6686, | |
"mean_token_accuracy": 0.28285714983940125, | |
"num_tokens": 1185814.0, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.0142, | |
"grad_norm": 9.128395080566406, | |
"learning_rate": 4.93e-06, | |
"loss": 11.1826, | |
"mean_token_accuracy": 0.21765056997537613, | |
"num_tokens": 1200437.0, | |
"step": 71 | |
}, | |
{ | |
"epoch": 0.0144, | |
"grad_norm": 8.472599029541016, | |
"learning_rate": 4.929000000000001e-06, | |
"loss": 11.6617, | |
"mean_token_accuracy": 0.2060810774564743, | |
"num_tokens": 1219264.0, | |
"step": 72 | |
}, | |
{ | |
"epoch": 0.0146, | |
"grad_norm": 11.549400329589844, | |
"learning_rate": 4.928000000000001e-06, | |
"loss": 12.9588, | |
"mean_token_accuracy": 0.25, | |
"num_tokens": 1238560.0, | |
"step": 73 | |
}, | |
{ | |
"epoch": 0.0148, | |
"grad_norm": 11.084433555603027, | |
"learning_rate": 4.9270000000000004e-06, | |
"loss": 10.4111, | |
"mean_token_accuracy": 0.21954887360334396, | |
"num_tokens": 1247376.0, | |
"step": 74 | |
}, | |
{ | |
"epoch": 0.015, | |
"grad_norm": 10.906563758850098, | |
"learning_rate": 4.926e-06, | |
"loss": 12.5231, | |
"mean_token_accuracy": 0.23307790607213974, | |
"num_tokens": 1266611.0, | |
"step": 75 | |
}, | |
{ | |
"epoch": 0.0152, | |
"grad_norm": 9.466647148132324, | |
"learning_rate": 4.925e-06, | |
"loss": 11.4363, | |
"mean_token_accuracy": 0.24193547666072845, | |
"num_tokens": 1281132.0, | |
"step": 76 | |
}, | |
{ | |
"epoch": 0.0154, | |
"grad_norm": 11.185935020446777, | |
"learning_rate": 4.924000000000001e-06, | |
"loss": 12.6383, | |
"mean_token_accuracy": 0.22685185074806213, | |
"num_tokens": 1301049.0, | |
"step": 77 | |
}, | |
{ | |
"epoch": 0.0156, | |
"grad_norm": 11.0143461227417, | |
"learning_rate": 4.923000000000001e-06, | |
"loss": 12.479, | |
"mean_token_accuracy": 0.22962962836027145, | |
"num_tokens": 1322278.0, | |
"step": 78 | |
}, | |
{ | |
"epoch": 0.0158, | |
"grad_norm": 12.330080032348633, | |
"learning_rate": 4.9220000000000005e-06, | |
"loss": 12.2351, | |
"mean_token_accuracy": 0.2675606608390808, | |
"num_tokens": 1341536.0, | |
"step": 79 | |
}, | |
{ | |
"epoch": 0.016, | |
"grad_norm": 10.486513137817383, | |
"learning_rate": 4.921e-06, | |
"loss": 11.6896, | |
"mean_token_accuracy": 0.25356507301330566, | |
"num_tokens": 1355885.0, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.0162, | |
"grad_norm": 12.150262832641602, | |
"learning_rate": 4.92e-06, | |
"loss": 12.5725, | |
"mean_token_accuracy": 0.2321428582072258, | |
"num_tokens": 1375181.0, | |
"step": 81 | |
}, | |
{ | |
"epoch": 0.0164, | |
"grad_norm": 11.387964248657227, | |
"learning_rate": 4.919000000000001e-06, | |
"loss": 11.5796, | |
"mean_token_accuracy": 0.29461538791656494, | |
"num_tokens": 1384853.0, | |
"step": 82 | |
}, | |
{ | |
"epoch": 0.0166, | |
"grad_norm": 9.878727912902832, | |
"learning_rate": 4.918e-06, | |
"loss": 11.7534, | |
"mean_token_accuracy": 0.24473684281110764, | |
"num_tokens": 1401178.0, | |
"step": 83 | |
}, | |
{ | |
"epoch": 0.0168, | |
"grad_norm": 9.827662467956543, | |
"learning_rate": 4.9170000000000005e-06, | |
"loss": 12.0345, | |
"mean_token_accuracy": 0.18034055829048157, | |
"num_tokens": 1419976.0, | |
"step": 84 | |
}, | |
{ | |
"epoch": 0.017, | |
"grad_norm": 10.327628135681152, | |
"learning_rate": 4.916e-06, | |
"loss": 11.1049, | |
"mean_token_accuracy": 0.3095238208770752, | |
"num_tokens": 1429652.0, | |
"step": 85 | |
}, | |
{ | |
"epoch": 0.0172, | |
"grad_norm": 10.67590045928955, | |
"learning_rate": 4.915e-06, | |
"loss": 10.7087, | |
"mean_token_accuracy": 0.2750582844018936, | |
"num_tokens": 1444113.0, | |
"step": 86 | |
}, | |
{ | |
"epoch": 0.0174, | |
"grad_norm": 20.292959213256836, | |
"learning_rate": 4.914000000000001e-06, | |
"loss": 10.797, | |
"mean_token_accuracy": 0.32500000298023224, | |
"num_tokens": 1453773.0, | |
"step": 87 | |
}, | |
{ | |
"epoch": 0.0176, | |
"grad_norm": 8.3117036819458, | |
"learning_rate": 4.913e-06, | |
"loss": 9.394, | |
"mean_token_accuracy": 0.31915584206581116, | |
"num_tokens": 1475024.0, | |
"step": 88 | |
}, | |
{ | |
"epoch": 0.0178, | |
"grad_norm": 11.334404945373535, | |
"learning_rate": 4.9120000000000006e-06, | |
"loss": 11.0718, | |
"mean_token_accuracy": 0.2678571492433548, | |
"num_tokens": 1489489.0, | |
"step": 89 | |
}, | |
{ | |
"epoch": 0.018, | |
"grad_norm": 15.36023235321045, | |
"learning_rate": 4.911e-06, | |
"loss": 11.7992, | |
"mean_token_accuracy": 0.28607918322086334, | |
"num_tokens": 1508816.0, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.0182, | |
"grad_norm": 11.725635528564453, | |
"learning_rate": 4.9100000000000004e-06, | |
"loss": 11.9207, | |
"mean_token_accuracy": 0.26123301684856415, | |
"num_tokens": 1530050.0, | |
"step": 91 | |
}, | |
{ | |
"epoch": 0.0184, | |
"grad_norm": 14.75428295135498, | |
"learning_rate": 4.909000000000001e-06, | |
"loss": 11.0294, | |
"mean_token_accuracy": 0.21236559003591537, | |
"num_tokens": 1544713.0, | |
"step": 92 | |
}, | |
{ | |
"epoch": 0.0186, | |
"grad_norm": 10.447436332702637, | |
"learning_rate": 4.908e-06, | |
"loss": 9.7536, | |
"mean_token_accuracy": 0.24568965286016464, | |
"num_tokens": 1559226.0, | |
"step": 93 | |
}, | |
{ | |
"epoch": 0.0188, | |
"grad_norm": 14.036280632019043, | |
"learning_rate": 4.907000000000001e-06, | |
"loss": 12.2065, | |
"mean_token_accuracy": 0.2771739065647125, | |
"num_tokens": 1568898.0, | |
"step": 94 | |
}, | |
{ | |
"epoch": 0.019, | |
"grad_norm": 13.364744186401367, | |
"learning_rate": 4.906e-06, | |
"loss": 13.168, | |
"mean_token_accuracy": 0.2321428582072258, | |
"num_tokens": 1588133.0, | |
"step": 95 | |
}, | |
{ | |
"epoch": 0.0192, | |
"grad_norm": 12.441611289978027, | |
"learning_rate": 4.9050000000000005e-06, | |
"loss": 11.6806, | |
"mean_token_accuracy": 0.21875, | |
"num_tokens": 1607218.0, | |
"step": 96 | |
}, | |
{ | |
"epoch": 0.0194, | |
"grad_norm": 11.559666633605957, | |
"learning_rate": 4.904000000000001e-06, | |
"loss": 12.2055, | |
"mean_token_accuracy": 0.23010753095149994, | |
"num_tokens": 1626557.0, | |
"step": 97 | |
}, | |
{ | |
"epoch": 0.0196, | |
"grad_norm": 11.131147384643555, | |
"learning_rate": 4.903e-06, | |
"loss": 11.4201, | |
"mean_token_accuracy": 0.2931034490466118, | |
"num_tokens": 1641017.0, | |
"step": 98 | |
}, | |
{ | |
"epoch": 0.0198, | |
"grad_norm": 12.186894416809082, | |
"learning_rate": 4.902000000000001e-06, | |
"loss": 12.1568, | |
"mean_token_accuracy": 0.23180076479911804, | |
"num_tokens": 1655475.0, | |
"step": 99 | |
}, | |
{ | |
"epoch": 0.02, | |
"grad_norm": 11.864778518676758, | |
"learning_rate": 4.901e-06, | |
"loss": 12.4851, | |
"mean_token_accuracy": 0.21008403599262238, | |
"num_tokens": 1674815.0, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.0202, | |
"grad_norm": 14.35185718536377, | |
"learning_rate": 4.9000000000000005e-06, | |
"loss": 11.1814, | |
"mean_token_accuracy": 0.2612612694501877, | |
"num_tokens": 1686681.0, | |
"step": 101 | |
}, | |
{ | |
"epoch": 0.0204, | |
"grad_norm": 18.715627670288086, | |
"learning_rate": 4.899e-06, | |
"loss": 11.632, | |
"mean_token_accuracy": 0.30199430882930756, | |
"num_tokens": 1701565.0, | |
"step": 102 | |
}, | |
{ | |
"epoch": 0.0206, | |
"grad_norm": 12.892010688781738, | |
"learning_rate": 4.898e-06, | |
"loss": 12.3157, | |
"mean_token_accuracy": 0.23806367069482803, | |
"num_tokens": 1715938.0, | |
"step": 103 | |
}, | |
{ | |
"epoch": 0.0208, | |
"grad_norm": 13.078173637390137, | |
"learning_rate": 4.897000000000001e-06, | |
"loss": 11.4954, | |
"mean_token_accuracy": 0.22177419066429138, | |
"num_tokens": 1734685.0, | |
"step": 104 | |
}, | |
{ | |
"epoch": 0.021, | |
"grad_norm": 14.580618858337402, | |
"learning_rate": 4.896e-06, | |
"loss": 10.4619, | |
"mean_token_accuracy": 0.2797202914953232, | |
"num_tokens": 1757899.0, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.0212, | |
"grad_norm": 13.174345016479492, | |
"learning_rate": 4.8950000000000006e-06, | |
"loss": 10.5835, | |
"mean_token_accuracy": 0.27546295523643494, | |
"num_tokens": 1778410.0, | |
"step": 106 | |
}, | |
{ | |
"epoch": 0.0214, | |
"grad_norm": 13.970446586608887, | |
"learning_rate": 4.894e-06, | |
"loss": 11.1265, | |
"mean_token_accuracy": 0.2693749964237213, | |
"num_tokens": 1791819.0, | |
"step": 107 | |
}, | |
{ | |
"epoch": 0.0216, | |
"grad_norm": 11.994514465332031, | |
"learning_rate": 4.893e-06, | |
"loss": 11.3511, | |
"mean_token_accuracy": 0.25462962687015533, | |
"num_tokens": 1811118.0, | |
"step": 108 | |
}, | |
{ | |
"epoch": 0.0218, | |
"grad_norm": 13.328775405883789, | |
"learning_rate": 4.892000000000001e-06, | |
"loss": 11.9787, | |
"mean_token_accuracy": 0.23790322244167328, | |
"num_tokens": 1825598.0, | |
"step": 109 | |
}, | |
{ | |
"epoch": 0.022, | |
"grad_norm": 11.813949584960938, | |
"learning_rate": 4.891e-06, | |
"loss": 10.614, | |
"mean_token_accuracy": 0.25833334028720856, | |
"num_tokens": 1840096.0, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.0222, | |
"grad_norm": 17.409263610839844, | |
"learning_rate": 4.890000000000001e-06, | |
"loss": 10.5087, | |
"mean_token_accuracy": 0.32692308723926544, | |
"num_tokens": 1849750.0, | |
"step": 111 | |
}, | |
{ | |
"epoch": 0.0224, | |
"grad_norm": 19.462722778320312, | |
"learning_rate": 4.889e-06, | |
"loss": 10.2288, | |
"mean_token_accuracy": 0.25729166716337204, | |
"num_tokens": 1859433.0, | |
"step": 112 | |
}, | |
{ | |
"epoch": 0.0226, | |
"grad_norm": 11.595419883728027, | |
"learning_rate": 4.8880000000000005e-06, | |
"loss": 11.0996, | |
"mean_token_accuracy": 0.2807881832122803, | |
"num_tokens": 1873892.0, | |
"step": 113 | |
}, | |
{ | |
"epoch": 0.0228, | |
"grad_norm": 14.413046836853027, | |
"learning_rate": 4.887000000000001e-06, | |
"loss": 11.6535, | |
"mean_token_accuracy": 0.2341153472661972, | |
"num_tokens": 1893196.0, | |
"step": 114 | |
}, | |
{ | |
"epoch": 0.023, | |
"grad_norm": 12.218829154968262, | |
"learning_rate": 4.886e-06, | |
"loss": 10.9169, | |
"mean_token_accuracy": 0.2557603716850281, | |
"num_tokens": 1907676.0, | |
"step": 115 | |
}, | |
{ | |
"epoch": 0.0232, | |
"grad_norm": 16.51720428466797, | |
"learning_rate": 4.885000000000001e-06, | |
"loss": 9.8406, | |
"mean_token_accuracy": 0.25968992710113525, | |
"num_tokens": 1921596.0, | |
"step": 116 | |
}, | |
{ | |
"epoch": 0.0234, | |
"grad_norm": 10.608232498168945, | |
"learning_rate": 4.884e-06, | |
"loss": 9.9809, | |
"mean_token_accuracy": 0.22068965435028076, | |
"num_tokens": 1940247.0, | |
"step": 117 | |
}, | |
{ | |
"epoch": 0.0236, | |
"grad_norm": 14.17568302154541, | |
"learning_rate": 4.8830000000000005e-06, | |
"loss": 11.3908, | |
"mean_token_accuracy": 0.23790322244167328, | |
"num_tokens": 1959268.0, | |
"step": 118 | |
}, | |
{ | |
"epoch": 0.0238, | |
"grad_norm": 12.197131156921387, | |
"learning_rate": 4.882000000000001e-06, | |
"loss": 10.9438, | |
"mean_token_accuracy": 0.2538699731230736, | |
"num_tokens": 1978218.0, | |
"step": 119 | |
}, | |
{ | |
"epoch": 0.024, | |
"grad_norm": 15.261062622070312, | |
"learning_rate": 4.881e-06, | |
"loss": 11.0329, | |
"mean_token_accuracy": 0.24621212482452393, | |
"num_tokens": 1997519.0, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.0242, | |
"grad_norm": 13.116536140441895, | |
"learning_rate": 4.880000000000001e-06, | |
"loss": 11.9918, | |
"mean_token_accuracy": 0.24137930572032928, | |
"num_tokens": 2016817.0, | |
"step": 121 | |
}, | |
{ | |
"epoch": 0.0244, | |
"grad_norm": 18.174373626708984, | |
"learning_rate": 4.879e-06, | |
"loss": 11.0414, | |
"mean_token_accuracy": 0.25833334028720856, | |
"num_tokens": 2035875.0, | |
"step": 122 | |
}, | |
{ | |
"epoch": 0.0246, | |
"grad_norm": 17.258121490478516, | |
"learning_rate": 4.8780000000000006e-06, | |
"loss": 11.1173, | |
"mean_token_accuracy": 0.3010057359933853, | |
"num_tokens": 2050330.0, | |
"step": 123 | |
}, | |
{ | |
"epoch": 0.0248, | |
"grad_norm": 12.636884689331055, | |
"learning_rate": 4.877000000000001e-06, | |
"loss": 11.3399, | |
"mean_token_accuracy": 0.2879464328289032, | |
"num_tokens": 2070156.0, | |
"step": 124 | |
}, | |
{ | |
"epoch": 0.025, | |
"grad_norm": 18.457618713378906, | |
"learning_rate": 4.876e-06, | |
"loss": 11.5595, | |
"mean_token_accuracy": 0.28418803960084915, | |
"num_tokens": 2084771.0, | |
"step": 125 | |
}, | |
{ | |
"epoch": 0.0252, | |
"grad_norm": 14.281397819519043, | |
"learning_rate": 4.875e-06, | |
"loss": 10.9901, | |
"mean_token_accuracy": 0.28140393644571304, | |
"num_tokens": 2104228.0, | |
"step": 126 | |
}, | |
{ | |
"epoch": 0.0254, | |
"grad_norm": 13.08484935760498, | |
"learning_rate": 4.874e-06, | |
"loss": 9.4802, | |
"mean_token_accuracy": 0.2637759745121002, | |
"num_tokens": 2118877.0, | |
"step": 127 | |
}, | |
{ | |
"epoch": 0.0256, | |
"grad_norm": 11.949925422668457, | |
"learning_rate": 4.873000000000001e-06, | |
"loss": 10.3703, | |
"mean_token_accuracy": 0.2619825750589371, | |
"num_tokens": 2133298.0, | |
"step": 128 | |
}, | |
{ | |
"epoch": 0.0258, | |
"grad_norm": 14.950297355651855, | |
"learning_rate": 4.872000000000001e-06, | |
"loss": 10.6116, | |
"mean_token_accuracy": 0.33000001311302185, | |
"num_tokens": 2142955.0, | |
"step": 129 | |
}, | |
{ | |
"epoch": 0.026, | |
"grad_norm": 16.433286666870117, | |
"learning_rate": 4.8710000000000005e-06, | |
"loss": 9.4482, | |
"mean_token_accuracy": 0.3333333432674408, | |
"num_tokens": 2159447.0, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.0262, | |
"grad_norm": 12.467981338500977, | |
"learning_rate": 4.87e-06, | |
"loss": 10.2354, | |
"mean_token_accuracy": 0.33796295523643494, | |
"num_tokens": 2178352.0, | |
"step": 131 | |
}, | |
{ | |
"epoch": 0.0264, | |
"grad_norm": 11.493000030517578, | |
"learning_rate": 4.869e-06, | |
"loss": 9.0597, | |
"mean_token_accuracy": 0.2736175060272217, | |
"num_tokens": 2199174.0, | |
"step": 132 | |
}, | |
{ | |
"epoch": 0.0266, | |
"grad_norm": 13.966115951538086, | |
"learning_rate": 4.868000000000001e-06, | |
"loss": 10.3177, | |
"mean_token_accuracy": 0.3325917571783066, | |
"num_tokens": 2213636.0, | |
"step": 133 | |
}, | |
{ | |
"epoch": 0.0268, | |
"grad_norm": 13.971321105957031, | |
"learning_rate": 4.867000000000001e-06, | |
"loss": 10.2564, | |
"mean_token_accuracy": 0.31481482088565826, | |
"num_tokens": 2228050.0, | |
"step": 134 | |
}, | |
{ | |
"epoch": 0.027, | |
"grad_norm": 16.246124267578125, | |
"learning_rate": 4.8660000000000005e-06, | |
"loss": 10.7549, | |
"mean_token_accuracy": 0.30943846702575684, | |
"num_tokens": 2247348.0, | |
"step": 135 | |
}, | |
{ | |
"epoch": 0.0272, | |
"grad_norm": 17.702425003051758, | |
"learning_rate": 4.865e-06, | |
"loss": 10.1729, | |
"mean_token_accuracy": 0.3575989753007889, | |
"num_tokens": 2261806.0, | |
"step": 136 | |
}, | |
{ | |
"epoch": 0.0274, | |
"grad_norm": 13.732104301452637, | |
"learning_rate": 4.864e-06, | |
"loss": 10.9004, | |
"mean_token_accuracy": 0.28607918322086334, | |
"num_tokens": 2281224.0, | |
"step": 137 | |
}, | |
{ | |
"epoch": 0.0276, | |
"grad_norm": 17.568925857543945, | |
"learning_rate": 4.863000000000001e-06, | |
"loss": 10.1658, | |
"mean_token_accuracy": 0.2857142984867096, | |
"num_tokens": 2295720.0, | |
"step": 138 | |
}, | |
{ | |
"epoch": 0.0278, | |
"grad_norm": 13.424271583557129, | |
"learning_rate": 4.862e-06, | |
"loss": 10.3543, | |
"mean_token_accuracy": 0.28928571939468384, | |
"num_tokens": 2310345.0, | |
"step": 139 | |
}, | |
{ | |
"epoch": 0.028, | |
"grad_norm": 14.413524627685547, | |
"learning_rate": 4.8610000000000006e-06, | |
"loss": 11.3822, | |
"mean_token_accuracy": 0.22828783839941025, | |
"num_tokens": 2347736.0, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.0282, | |
"grad_norm": 22.185630798339844, | |
"learning_rate": 4.86e-06, | |
"loss": 10.6185, | |
"mean_token_accuracy": 0.2911111190915108, | |
"num_tokens": 2361685.0, | |
"step": 141 | |
}, | |
{ | |
"epoch": 0.0284, | |
"grad_norm": 12.253998756408691, | |
"learning_rate": 4.859e-06, | |
"loss": 9.2399, | |
"mean_token_accuracy": 0.288621261715889, | |
"num_tokens": 2376158.0, | |
"step": 142 | |
}, | |
{ | |
"epoch": 0.0286, | |
"grad_norm": 16.229686737060547, | |
"learning_rate": 4.858000000000001e-06, | |
"loss": 10.3377, | |
"mean_token_accuracy": 0.22227822244167328, | |
"num_tokens": 2391501.0, | |
"step": 143 | |
}, | |
{ | |
"epoch": 0.0288, | |
"grad_norm": 9.664397239685059, | |
"learning_rate": 4.857e-06, | |
"loss": 9.3351, | |
"mean_token_accuracy": 0.26456456631422043, | |
"num_tokens": 2411105.0, | |
"step": 144 | |
}, | |
{ | |
"epoch": 0.029, | |
"grad_norm": 12.8119478225708, | |
"learning_rate": 4.856e-06, | |
"loss": 9.87, | |
"mean_token_accuracy": 0.24166666716337204, | |
"num_tokens": 2431185.0, | |
"step": 145 | |
}, | |
{ | |
"epoch": 0.0292, | |
"grad_norm": 10.490764617919922, | |
"learning_rate": 4.855e-06, | |
"loss": 8.7889, | |
"mean_token_accuracy": 0.36666667461395264, | |
"num_tokens": 2452470.0, | |
"step": 146 | |
}, | |
{ | |
"epoch": 0.0294, | |
"grad_norm": 13.65211296081543, | |
"learning_rate": 4.8540000000000005e-06, | |
"loss": 10.4286, | |
"mean_token_accuracy": 0.2718253955245018, | |
"num_tokens": 2472396.0, | |
"step": 147 | |
}, | |
{ | |
"epoch": 0.0296, | |
"grad_norm": 18.866209030151367, | |
"learning_rate": 4.853000000000001e-06, | |
"loss": 10.2372, | |
"mean_token_accuracy": 0.38141025602817535, | |
"num_tokens": 2486886.0, | |
"step": 148 | |
}, | |
{ | |
"epoch": 0.0298, | |
"grad_norm": 14.852785110473633, | |
"learning_rate": 4.852e-06, | |
"loss": 10.0487, | |
"mean_token_accuracy": 0.30820105969905853, | |
"num_tokens": 2506143.0, | |
"step": 149 | |
}, | |
{ | |
"epoch": 0.03, | |
"grad_norm": 13.972378730773926, | |
"learning_rate": 4.851e-06, | |
"loss": 10.6291, | |
"mean_token_accuracy": 0.25820106267929077, | |
"num_tokens": 2520401.0, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.0302, | |
"grad_norm": 14.618459701538086, | |
"learning_rate": 4.85e-06, | |
"loss": 10.7579, | |
"mean_token_accuracy": 0.23571428656578064, | |
"num_tokens": 2539135.0, | |
"step": 151 | |
}, | |
{ | |
"epoch": 0.0304, | |
"grad_norm": 15.176739692687988, | |
"learning_rate": 4.8490000000000005e-06, | |
"loss": 9.6595, | |
"mean_token_accuracy": 0.2510339096188545, | |
"num_tokens": 2558562.0, | |
"step": 152 | |
}, | |
{ | |
"epoch": 0.0306, | |
"grad_norm": 16.972919464111328, | |
"learning_rate": 4.848000000000001e-06, | |
"loss": 10.8999, | |
"mean_token_accuracy": 0.25925925374031067, | |
"num_tokens": 2577856.0, | |
"step": 153 | |
}, | |
{ | |
"epoch": 0.0308, | |
"grad_norm": 16.451147079467773, | |
"learning_rate": 4.847e-06, | |
"loss": 8.5505, | |
"mean_token_accuracy": 0.36249999701976776, | |
"num_tokens": 2592246.0, | |
"step": 154 | |
}, | |
{ | |
"epoch": 0.031, | |
"grad_norm": 24.95278549194336, | |
"learning_rate": 4.846e-06, | |
"loss": 10.2367, | |
"mean_token_accuracy": 0.28735632449388504, | |
"num_tokens": 2607133.0, | |
"step": 155 | |
}, | |
{ | |
"epoch": 0.0312, | |
"grad_norm": 15.770346641540527, | |
"learning_rate": 4.845e-06, | |
"loss": 9.6085, | |
"mean_token_accuracy": 0.27272728085517883, | |
"num_tokens": 2621734.0, | |
"step": 156 | |
}, | |
{ | |
"epoch": 0.0314, | |
"grad_norm": 12.314064025878906, | |
"learning_rate": 4.8440000000000005e-06, | |
"loss": 9.816, | |
"mean_token_accuracy": 0.2540322542190552, | |
"num_tokens": 2641357.0, | |
"step": 157 | |
}, | |
{ | |
"epoch": 0.0316, | |
"grad_norm": 13.379799842834473, | |
"learning_rate": 4.843000000000001e-06, | |
"loss": 10.378, | |
"mean_token_accuracy": 0.32804232835769653, | |
"num_tokens": 2660177.0, | |
"step": 158 | |
}, | |
{ | |
"epoch": 0.0318, | |
"grad_norm": 17.458240509033203, | |
"learning_rate": 4.842e-06, | |
"loss": 9.3842, | |
"mean_token_accuracy": 0.2557164579629898, | |
"num_tokens": 2679528.0, | |
"step": 159 | |
}, | |
{ | |
"epoch": 0.032, | |
"grad_norm": 25.741785049438477, | |
"learning_rate": 4.841e-06, | |
"loss": 9.0768, | |
"mean_token_accuracy": 0.3270474076271057, | |
"num_tokens": 2693652.0, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.0322, | |
"grad_norm": 13.557204246520996, | |
"learning_rate": 4.84e-06, | |
"loss": 9.6471, | |
"mean_token_accuracy": 0.28735632449388504, | |
"num_tokens": 2713320.0, | |
"step": 161 | |
}, | |
{ | |
"epoch": 0.0324, | |
"grad_norm": 14.830061912536621, | |
"learning_rate": 4.839000000000001e-06, | |
"loss": 11.3496, | |
"mean_token_accuracy": 0.24049513787031174, | |
"num_tokens": 2733424.0, | |
"step": 162 | |
}, | |
{ | |
"epoch": 0.0326, | |
"grad_norm": 12.371265411376953, | |
"learning_rate": 4.838e-06, | |
"loss": 8.6217, | |
"mean_token_accuracy": 0.25988225638866425, | |
"num_tokens": 2752613.0, | |
"step": 163 | |
}, | |
{ | |
"epoch": 0.0328, | |
"grad_norm": 12.722640037536621, | |
"learning_rate": 4.8370000000000004e-06, | |
"loss": 9.2979, | |
"mean_token_accuracy": 0.2337121218442917, | |
"num_tokens": 2773896.0, | |
"step": 164 | |
}, | |
{ | |
"epoch": 0.033, | |
"grad_norm": 12.796667098999023, | |
"learning_rate": 4.836e-06, | |
"loss": 9.9112, | |
"mean_token_accuracy": 0.3337438404560089, | |
"num_tokens": 2793334.0, | |
"step": 165 | |
}, | |
{ | |
"epoch": 0.0332, | |
"grad_norm": 15.983271598815918, | |
"learning_rate": 4.835e-06, | |
"loss": 9.5066, | |
"mean_token_accuracy": 0.29256465286016464, | |
"num_tokens": 2807755.0, | |
"step": 166 | |
}, | |
{ | |
"epoch": 0.0334, | |
"grad_norm": 15.554715156555176, | |
"learning_rate": 4.834000000000001e-06, | |
"loss": 10.5127, | |
"mean_token_accuracy": 0.2986453175544739, | |
"num_tokens": 2827193.0, | |
"step": 167 | |
}, | |
{ | |
"epoch": 0.0336, | |
"grad_norm": 14.6381196975708, | |
"learning_rate": 4.833e-06, | |
"loss": 9.7383, | |
"mean_token_accuracy": 0.2586618810892105, | |
"num_tokens": 2846449.0, | |
"step": 168 | |
}, | |
{ | |
"epoch": 0.0338, | |
"grad_norm": 16.013647079467773, | |
"learning_rate": 4.8320000000000005e-06, | |
"loss": 9.0009, | |
"mean_token_accuracy": 0.32356322556734085, | |
"num_tokens": 2860910.0, | |
"step": 169 | |
}, | |
{ | |
"epoch": 0.034, | |
"grad_norm": 13.708538055419922, | |
"learning_rate": 4.831e-06, | |
"loss": 9.4298, | |
"mean_token_accuracy": 0.28114478290081024, | |
"num_tokens": 2880172.0, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.0342, | |
"grad_norm": 14.314607620239258, | |
"learning_rate": 4.83e-06, | |
"loss": 9.2596, | |
"mean_token_accuracy": 0.3014460504055023, | |
"num_tokens": 2899434.0, | |
"step": 171 | |
}, | |
{ | |
"epoch": 0.0344, | |
"grad_norm": 12.27084732055664, | |
"learning_rate": 4.829000000000001e-06, | |
"loss": 9.5638, | |
"mean_token_accuracy": 0.33095238357782364, | |
"num_tokens": 2913932.0, | |
"step": 172 | |
}, | |
{ | |
"epoch": 0.0346, | |
"grad_norm": 14.023222923278809, | |
"learning_rate": 4.828e-06, | |
"loss": 9.689, | |
"mean_token_accuracy": 0.31680162250995636, | |
"num_tokens": 2928356.0, | |
"step": 173 | |
}, | |
{ | |
"epoch": 0.0348, | |
"grad_norm": 14.490949630737305, | |
"learning_rate": 4.8270000000000005e-06, | |
"loss": 8.8018, | |
"mean_token_accuracy": 0.34068627655506134, | |
"num_tokens": 2940589.0, | |
"step": 174 | |
}, | |
{ | |
"epoch": 0.035, | |
"grad_norm": 17.97809410095215, | |
"learning_rate": 4.826e-06, | |
"loss": 9.9556, | |
"mean_token_accuracy": 0.3452381044626236, | |
"num_tokens": 2959486.0, | |
"step": 175 | |
}, | |
{ | |
"epoch": 0.0352, | |
"grad_norm": 13.302875518798828, | |
"learning_rate": 4.825e-06, | |
"loss": 9.4104, | |
"mean_token_accuracy": 0.23885918408632278, | |
"num_tokens": 2974195.0, | |
"step": 176 | |
}, | |
{ | |
"epoch": 0.0354, | |
"grad_norm": 12.792606353759766, | |
"learning_rate": 4.824000000000001e-06, | |
"loss": 8.2518, | |
"mean_token_accuracy": 0.42592592537403107, | |
"num_tokens": 2984011.0, | |
"step": 177 | |
}, | |
{ | |
"epoch": 0.0356, | |
"grad_norm": 18.23525619506836, | |
"learning_rate": 4.823e-06, | |
"loss": 8.8069, | |
"mean_token_accuracy": 0.40079365670681, | |
"num_tokens": 2998487.0, | |
"step": 178 | |
}, | |
{ | |
"epoch": 0.0358, | |
"grad_norm": 14.206355094909668, | |
"learning_rate": 4.822000000000001e-06, | |
"loss": 9.1892, | |
"mean_token_accuracy": 0.2290322557091713, | |
"num_tokens": 3019773.0, | |
"step": 179 | |
}, | |
{ | |
"epoch": 0.036, | |
"grad_norm": 12.26903247833252, | |
"learning_rate": 4.821e-06, | |
"loss": 8.4939, | |
"mean_token_accuracy": 0.28287841379642487, | |
"num_tokens": 3040775.0, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.0362, | |
"grad_norm": 15.23544979095459, | |
"learning_rate": 4.8200000000000004e-06, | |
"loss": 8.9121, | |
"mean_token_accuracy": 0.3285440653562546, | |
"num_tokens": 3060080.0, | |
"step": 181 | |
}, | |
{ | |
"epoch": 0.0364, | |
"grad_norm": 57.132049560546875, | |
"learning_rate": 4.819e-06, | |
"loss": 8.9738, | |
"mean_token_accuracy": 0.3175750821828842, | |
"num_tokens": 3079332.0, | |
"step": 182 | |
}, | |
{ | |
"epoch": 0.0366, | |
"grad_norm": 13.691211700439453, | |
"learning_rate": 4.818e-06, | |
"loss": 8.0025, | |
"mean_token_accuracy": 0.34656085073947906, | |
"num_tokens": 3088989.0, | |
"step": 183 | |
}, | |
{ | |
"epoch": 0.0368, | |
"grad_norm": 15.762035369873047, | |
"learning_rate": 4.817000000000001e-06, | |
"loss": 9.4602, | |
"mean_token_accuracy": 0.34457671642303467, | |
"num_tokens": 3107732.0, | |
"step": 184 | |
}, | |
{ | |
"epoch": 0.037, | |
"grad_norm": 17.034019470214844, | |
"learning_rate": 4.816e-06, | |
"loss": 9.4491, | |
"mean_token_accuracy": 0.34666667878627777, | |
"num_tokens": 3122423.0, | |
"step": 185 | |
}, | |
{ | |
"epoch": 0.0372, | |
"grad_norm": 12.461385726928711, | |
"learning_rate": 4.8150000000000005e-06, | |
"loss": 9.0659, | |
"mean_token_accuracy": 0.2838345915079117, | |
"num_tokens": 3141889.0, | |
"step": 186 | |
}, | |
{ | |
"epoch": 0.0374, | |
"grad_norm": 13.194416046142578, | |
"learning_rate": 4.814e-06, | |
"loss": 8.8107, | |
"mean_token_accuracy": 0.36685824394226074, | |
"num_tokens": 3161194.0, | |
"step": 187 | |
}, | |
{ | |
"epoch": 0.0376, | |
"grad_norm": 14.799727439880371, | |
"learning_rate": 4.813e-06, | |
"loss": 9.7196, | |
"mean_token_accuracy": 0.377616748213768, | |
"num_tokens": 3175646.0, | |
"step": 188 | |
}, | |
{ | |
"epoch": 0.0378, | |
"grad_norm": 15.303200721740723, | |
"learning_rate": 4.812000000000001e-06, | |
"loss": 10.0029, | |
"mean_token_accuracy": 0.38161374628543854, | |
"num_tokens": 3194941.0, | |
"step": 189 | |
}, | |
{ | |
"epoch": 0.038, | |
"grad_norm": 13.99432373046875, | |
"learning_rate": 4.811000000000001e-06, | |
"loss": 8.4004, | |
"mean_token_accuracy": 0.37096773087978363, | |
"num_tokens": 3214205.0, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.0382, | |
"grad_norm": 12.163537979125977, | |
"learning_rate": 4.8100000000000005e-06, | |
"loss": 9.0058, | |
"mean_token_accuracy": 0.35395538806915283, | |
"num_tokens": 3235063.0, | |
"step": 191 | |
}, | |
{ | |
"epoch": 0.0384, | |
"grad_norm": 12.369959831237793, | |
"learning_rate": 4.809e-06, | |
"loss": 9.0358, | |
"mean_token_accuracy": 0.4807407408952713, | |
"num_tokens": 3249517.0, | |
"step": 192 | |
}, | |
{ | |
"epoch": 0.0386, | |
"grad_norm": 21.97284698486328, | |
"learning_rate": 4.808e-06, | |
"loss": 8.7725, | |
"mean_token_accuracy": 0.41179338097572327, | |
"num_tokens": 3268738.0, | |
"step": 193 | |
}, | |
{ | |
"epoch": 0.0388, | |
"grad_norm": 13.040814399719238, | |
"learning_rate": 4.807000000000001e-06, | |
"loss": 9.4628, | |
"mean_token_accuracy": 0.424450546503067, | |
"num_tokens": 3288032.0, | |
"step": 194 | |
}, | |
{ | |
"epoch": 0.039, | |
"grad_norm": 14.395992279052734, | |
"learning_rate": 4.806000000000001e-06, | |
"loss": 9.1626, | |
"mean_token_accuracy": 0.454365074634552, | |
"num_tokens": 3306813.0, | |
"step": 195 | |
}, | |
{ | |
"epoch": 0.0392, | |
"grad_norm": 11.64809799194336, | |
"learning_rate": 4.805000000000001e-06, | |
"loss": 8.7527, | |
"mean_token_accuracy": 0.3843159079551697, | |
"num_tokens": 3325561.0, | |
"step": 196 | |
}, | |
{ | |
"epoch": 0.0394, | |
"grad_norm": 25.551607131958008, | |
"learning_rate": 4.804e-06, | |
"loss": 8.5253, | |
"mean_token_accuracy": 0.3452381044626236, | |
"num_tokens": 3339397.0, | |
"step": 197 | |
}, | |
{ | |
"epoch": 0.0396, | |
"grad_norm": 12.025030136108398, | |
"learning_rate": 4.8030000000000004e-06, | |
"loss": 7.7581, | |
"mean_token_accuracy": 0.5191570967435837, | |
"num_tokens": 3349055.0, | |
"step": 198 | |
}, | |
{ | |
"epoch": 0.0398, | |
"grad_norm": 10.044900894165039, | |
"learning_rate": 4.802000000000001e-06, | |
"loss": 9.0733, | |
"mean_token_accuracy": 0.4278416335582733, | |
"num_tokens": 3368313.0, | |
"step": 199 | |
}, | |
{ | |
"epoch": 0.04, | |
"grad_norm": 10.911112785339355, | |
"learning_rate": 4.801e-06, | |
"loss": 8.6906, | |
"mean_token_accuracy": 0.5000000149011612, | |
"num_tokens": 3383810.0, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.0402, | |
"grad_norm": 10.119377136230469, | |
"learning_rate": 4.800000000000001e-06, | |
"loss": 8.8941, | |
"mean_token_accuracy": 0.4000000059604645, | |
"num_tokens": 3398244.0, | |
"step": 201 | |
}, | |
{ | |
"epoch": 0.0404, | |
"grad_norm": 10.526436805725098, | |
"learning_rate": 4.799e-06, | |
"loss": 7.8408, | |
"mean_token_accuracy": 0.39772726595401764, | |
"num_tokens": 3414686.0, | |
"step": 202 | |
}, | |
{ | |
"epoch": 0.0406, | |
"grad_norm": 10.947959899902344, | |
"learning_rate": 4.7980000000000005e-06, | |
"loss": 8.4085, | |
"mean_token_accuracy": 0.40992647409439087, | |
"num_tokens": 3433954.0, | |
"step": 203 | |
}, | |
{ | |
"epoch": 0.0408, | |
"grad_norm": 10.811299324035645, | |
"learning_rate": 4.797000000000001e-06, | |
"loss": 7.7418, | |
"mean_token_accuracy": 0.5370370447635651, | |
"num_tokens": 3443610.0, | |
"step": 204 | |
}, | |
{ | |
"epoch": 0.041, | |
"grad_norm": 9.57394027709961, | |
"learning_rate": 4.796e-06, | |
"loss": 8.6595, | |
"mean_token_accuracy": 0.45967741310596466, | |
"num_tokens": 3462909.0, | |
"step": 205 | |
}, | |
{ | |
"epoch": 0.0412, | |
"grad_norm": 12.8336181640625, | |
"learning_rate": 4.795e-06, | |
"loss": 8.6943, | |
"mean_token_accuracy": 0.45967741310596466, | |
"num_tokens": 3477620.0, | |
"step": 206 | |
}, | |
{ | |
"epoch": 0.0414, | |
"grad_norm": 11.37842845916748, | |
"learning_rate": 4.794e-06, | |
"loss": 8.5963, | |
"mean_token_accuracy": 0.4539627134799957, | |
"num_tokens": 3496675.0, | |
"step": 207 | |
}, | |
{ | |
"epoch": 0.0416, | |
"grad_norm": 12.427331924438477, | |
"learning_rate": 4.7930000000000005e-06, | |
"loss": 8.67, | |
"mean_token_accuracy": 0.3821548819541931, | |
"num_tokens": 3515975.0, | |
"step": 208 | |
}, | |
{ | |
"epoch": 0.0418, | |
"grad_norm": 10.371417045593262, | |
"learning_rate": 4.792000000000001e-06, | |
"loss": 8.4859, | |
"mean_token_accuracy": 0.40784314274787903, | |
"num_tokens": 3535241.0, | |
"step": 209 | |
}, | |
{ | |
"epoch": 0.042, | |
"grad_norm": 11.788932800292969, | |
"learning_rate": 4.791e-06, | |
"loss": 8.7143, | |
"mean_token_accuracy": 0.3741379380226135, | |
"num_tokens": 3554502.0, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.0422, | |
"grad_norm": 15.102238655090332, | |
"learning_rate": 4.79e-06, | |
"loss": 8.5368, | |
"mean_token_accuracy": 0.40980392694473267, | |
"num_tokens": 3574437.0, | |
"step": 211 | |
}, | |
{ | |
"epoch": 0.0424, | |
"grad_norm": 11.23690128326416, | |
"learning_rate": 4.789e-06, | |
"loss": 8.7326, | |
"mean_token_accuracy": 0.45628078281879425, | |
"num_tokens": 3593696.0, | |
"step": 212 | |
}, | |
{ | |
"epoch": 0.0426, | |
"grad_norm": 11.3884859085083, | |
"learning_rate": 4.7880000000000006e-06, | |
"loss": 9.0773, | |
"mean_token_accuracy": 0.4404761791229248, | |
"num_tokens": 3608150.0, | |
"step": 213 | |
}, | |
{ | |
"epoch": 0.0428, | |
"grad_norm": 11.106508255004883, | |
"learning_rate": 4.787000000000001e-06, | |
"loss": 9.5678, | |
"mean_token_accuracy": 0.3500000089406967, | |
"num_tokens": 3627724.0, | |
"step": 214 | |
}, | |
{ | |
"epoch": 0.043, | |
"grad_norm": 11.691924095153809, | |
"learning_rate": 4.7860000000000004e-06, | |
"loss": 8.2192, | |
"mean_token_accuracy": 0.41582491993904114, | |
"num_tokens": 3642224.0, | |
"step": 215 | |
}, | |
{ | |
"epoch": 0.0432, | |
"grad_norm": 13.973259925842285, | |
"learning_rate": 4.785e-06, | |
"loss": 8.9823, | |
"mean_token_accuracy": 0.44195401668548584, | |
"num_tokens": 3660885.0, | |
"step": 216 | |
}, | |
{ | |
"epoch": 0.0434, | |
"grad_norm": 11.744901657104492, | |
"learning_rate": 4.784e-06, | |
"loss": 8.698, | |
"mean_token_accuracy": 0.4291125535964966, | |
"num_tokens": 3679634.0, | |
"step": 217 | |
}, | |
{ | |
"epoch": 0.0436, | |
"grad_norm": 13.440972328186035, | |
"learning_rate": 4.783000000000001e-06, | |
"loss": 8.3414, | |
"mean_token_accuracy": 0.47999998927116394, | |
"num_tokens": 3689286.0, | |
"step": 218 | |
}, | |
{ | |
"epoch": 0.0438, | |
"grad_norm": 9.561469078063965, | |
"learning_rate": 4.782e-06, | |
"loss": 7.6889, | |
"mean_token_accuracy": 0.41898825764656067, | |
"num_tokens": 3710533.0, | |
"step": 219 | |
}, | |
{ | |
"epoch": 0.044, | |
"grad_norm": 12.257551193237305, | |
"learning_rate": 4.7810000000000005e-06, | |
"loss": 8.4541, | |
"mean_token_accuracy": 0.5016103088855743, | |
"num_tokens": 3725145.0, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.0442, | |
"grad_norm": 10.785005569458008, | |
"learning_rate": 4.78e-06, | |
"loss": 9.7029, | |
"mean_token_accuracy": 0.4015151560306549, | |
"num_tokens": 3743544.0, | |
"step": 221 | |
}, | |
{ | |
"epoch": 0.0444, | |
"grad_norm": 10.52768611907959, | |
"learning_rate": 4.779e-06, | |
"loss": 8.6062, | |
"mean_token_accuracy": 0.40740741789340973, | |
"num_tokens": 3762705.0, | |
"step": 222 | |
}, | |
{ | |
"epoch": 0.0446, | |
"grad_norm": 17.872329711914062, | |
"learning_rate": 4.778000000000001e-06, | |
"loss": 7.3673, | |
"mean_token_accuracy": 0.45370370149612427, | |
"num_tokens": 3777166.0, | |
"step": 223 | |
}, | |
{ | |
"epoch": 0.0448, | |
"grad_norm": 11.053666114807129, | |
"learning_rate": 4.777e-06, | |
"loss": 9.3428, | |
"mean_token_accuracy": 0.4186507910490036, | |
"num_tokens": 3795909.0, | |
"step": 224 | |
}, | |
{ | |
"epoch": 0.045, | |
"grad_norm": 9.966497421264648, | |
"learning_rate": 4.7760000000000005e-06, | |
"loss": 7.9071, | |
"mean_token_accuracy": 0.44636015594005585, | |
"num_tokens": 3815176.0, | |
"step": 225 | |
}, | |
{ | |
"epoch": 0.0452, | |
"grad_norm": 12.605799674987793, | |
"learning_rate": 4.775e-06, | |
"loss": 8.3063, | |
"mean_token_accuracy": 0.41692790389060974, | |
"num_tokens": 3834478.0, | |
"step": 226 | |
}, | |
{ | |
"epoch": 0.0454, | |
"grad_norm": 9.679677963256836, | |
"learning_rate": 4.774e-06, | |
"loss": 8.7331, | |
"mean_token_accuracy": 0.45628078281879425, | |
"num_tokens": 3853737.0, | |
"step": 227 | |
}, | |
{ | |
"epoch": 0.0456, | |
"grad_norm": 27.174549102783203, | |
"learning_rate": 4.773000000000001e-06, | |
"loss": 8.0078, | |
"mean_token_accuracy": 0.4434434473514557, | |
"num_tokens": 3873882.0, | |
"step": 228 | |
}, | |
{ | |
"epoch": 0.0458, | |
"grad_norm": 11.591468811035156, | |
"learning_rate": 4.772e-06, | |
"loss": 8.237, | |
"mean_token_accuracy": 0.36707451939582825, | |
"num_tokens": 3888302.0, | |
"step": 229 | |
}, | |
{ | |
"epoch": 0.046, | |
"grad_norm": 10.10312271118164, | |
"learning_rate": 4.7710000000000006e-06, | |
"loss": 7.6694, | |
"mean_token_accuracy": 0.5105820149183273, | |
"num_tokens": 3902804.0, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.0462, | |
"grad_norm": 13.620348930358887, | |
"learning_rate": 4.77e-06, | |
"loss": 8.4942, | |
"mean_token_accuracy": 0.421875, | |
"num_tokens": 3922146.0, | |
"step": 231 | |
}, | |
{ | |
"epoch": 0.0464, | |
"grad_norm": 11.014819145202637, | |
"learning_rate": 4.769e-06, | |
"loss": 7.637, | |
"mean_token_accuracy": 0.4495798349380493, | |
"num_tokens": 3936372.0, | |
"step": 232 | |
}, | |
{ | |
"epoch": 0.0466, | |
"grad_norm": 14.10721492767334, | |
"learning_rate": 4.768000000000001e-06, | |
"loss": 7.4068, | |
"mean_token_accuracy": 0.5078571289777756, | |
"num_tokens": 3946456.0, | |
"step": 233 | |
}, | |
{ | |
"epoch": 0.0468, | |
"grad_norm": 13.256854057312012, | |
"learning_rate": 4.767e-06, | |
"loss": 7.9252, | |
"mean_token_accuracy": 0.41277891397476196, | |
"num_tokens": 3965955.0, | |
"step": 234 | |
}, | |
{ | |
"epoch": 0.047, | |
"grad_norm": 12.264280319213867, | |
"learning_rate": 4.766000000000001e-06, | |
"loss": 7.6561, | |
"mean_token_accuracy": 0.4913793057203293, | |
"num_tokens": 3980412.0, | |
"step": 235 | |
}, | |
{ | |
"epoch": 0.0472, | |
"grad_norm": 11.942499160766602, | |
"learning_rate": 4.765e-06, | |
"loss": 8.0462, | |
"mean_token_accuracy": 0.4900284856557846, | |
"num_tokens": 3999823.0, | |
"step": 236 | |
}, | |
{ | |
"epoch": 0.0474, | |
"grad_norm": 10.043482780456543, | |
"learning_rate": 4.7640000000000005e-06, | |
"loss": 7.0965, | |
"mean_token_accuracy": 0.4025973975658417, | |
"num_tokens": 4014414.0, | |
"step": 237 | |
}, | |
{ | |
"epoch": 0.0476, | |
"grad_norm": 12.545036315917969, | |
"learning_rate": 4.763000000000001e-06, | |
"loss": 7.667, | |
"mean_token_accuracy": 0.38708220422267914, | |
"num_tokens": 4028922.0, | |
"step": 238 | |
}, | |
{ | |
"epoch": 0.0478, | |
"grad_norm": 10.067218780517578, | |
"learning_rate": 4.762e-06, | |
"loss": 7.1117, | |
"mean_token_accuracy": 0.37129031121730804, | |
"num_tokens": 4050175.0, | |
"step": 239 | |
}, | |
{ | |
"epoch": 0.048, | |
"grad_norm": 13.74410343170166, | |
"learning_rate": 4.761000000000001e-06, | |
"loss": 7.9069, | |
"mean_token_accuracy": 0.47333332896232605, | |
"num_tokens": 4064811.0, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.0482, | |
"grad_norm": 9.813583374023438, | |
"learning_rate": 4.76e-06, | |
"loss": 6.8128, | |
"mean_token_accuracy": 0.4273170679807663, | |
"num_tokens": 4079317.0, | |
"step": 241 | |
}, | |
{ | |
"epoch": 0.0484, | |
"grad_norm": 10.31633472442627, | |
"learning_rate": 4.7590000000000005e-06, | |
"loss": 7.676, | |
"mean_token_accuracy": 0.43584655225276947, | |
"num_tokens": 4094093.0, | |
"step": 242 | |
}, | |
{ | |
"epoch": 0.0486, | |
"grad_norm": 13.174894332885742, | |
"learning_rate": 4.758e-06, | |
"loss": 8.5029, | |
"mean_token_accuracy": 0.42592592537403107, | |
"num_tokens": 4113778.0, | |
"step": 243 | |
}, | |
{ | |
"epoch": 0.0488, | |
"grad_norm": 11.068340301513672, | |
"learning_rate": 4.757e-06, | |
"loss": 7.6392, | |
"mean_token_accuracy": 0.5028571337461472, | |
"num_tokens": 4125078.0, | |
"step": 244 | |
}, | |
{ | |
"epoch": 0.049, | |
"grad_norm": 11.669493675231934, | |
"learning_rate": 4.756000000000001e-06, | |
"loss": 8.0877, | |
"mean_token_accuracy": 0.3697916716337204, | |
"num_tokens": 4144451.0, | |
"step": 245 | |
}, | |
{ | |
"epoch": 0.0492, | |
"grad_norm": 12.121454238891602, | |
"learning_rate": 4.755e-06, | |
"loss": 8.3639, | |
"mean_token_accuracy": 0.3320707082748413, | |
"num_tokens": 4164189.0, | |
"step": 246 | |
}, | |
{ | |
"epoch": 0.0494, | |
"grad_norm": 11.231935501098633, | |
"learning_rate": 4.7540000000000006e-06, | |
"loss": 8.1039, | |
"mean_token_accuracy": 0.40060852468013763, | |
"num_tokens": 4183560.0, | |
"step": 247 | |
}, | |
{ | |
"epoch": 0.0496, | |
"grad_norm": 14.818300247192383, | |
"learning_rate": 4.753e-06, | |
"loss": 8.3393, | |
"mean_token_accuracy": 0.4365079402923584, | |
"num_tokens": 4203160.0, | |
"step": 248 | |
}, | |
{ | |
"epoch": 0.0498, | |
"grad_norm": 10.595967292785645, | |
"learning_rate": 4.752e-06, | |
"loss": 8.2095, | |
"mean_token_accuracy": 0.3896551728248596, | |
"num_tokens": 4222502.0, | |
"step": 249 | |
}, | |
{ | |
"epoch": 0.05, | |
"grad_norm": 10.283987998962402, | |
"learning_rate": 4.751000000000001e-06, | |
"loss": 7.8432, | |
"mean_token_accuracy": 0.41187499463558197, | |
"num_tokens": 4236999.0, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.0502, | |
"grad_norm": 64.06322479248047, | |
"learning_rate": 4.75e-06, | |
"loss": 8.2479, | |
"mean_token_accuracy": 0.43703703582286835, | |
"num_tokens": 4256296.0, | |
"step": 251 | |
}, | |
{ | |
"epoch": 0.0504, | |
"grad_norm": 12.750511169433594, | |
"learning_rate": 4.749000000000001e-06, | |
"loss": 8.0847, | |
"mean_token_accuracy": 0.4107142835855484, | |
"num_tokens": 4270240.0, | |
"step": 252 | |
}, | |
{ | |
"epoch": 0.0506, | |
"grad_norm": 19.006969451904297, | |
"learning_rate": 4.748e-06, | |
"loss": 7.6973, | |
"mean_token_accuracy": 0.44636015594005585, | |
"num_tokens": 4284595.0, | |
"step": 253 | |
}, | |
{ | |
"epoch": 0.0508, | |
"grad_norm": 11.408003807067871, | |
"learning_rate": 4.7470000000000005e-06, | |
"loss": 7.7927, | |
"mean_token_accuracy": 0.41428571939468384, | |
"num_tokens": 4303855.0, | |
"step": 254 | |
}, | |
{ | |
"epoch": 0.051, | |
"grad_norm": 11.841231346130371, | |
"learning_rate": 4.746000000000001e-06, | |
"loss": 7.7046, | |
"mean_token_accuracy": 0.3849431872367859, | |
"num_tokens": 4323122.0, | |
"step": 255 | |
}, | |
{ | |
"epoch": 0.0512, | |
"grad_norm": 16.837949752807617, | |
"learning_rate": 4.745e-06, | |
"loss": 7.7556, | |
"mean_token_accuracy": 0.38112305104732513, | |
"num_tokens": 4342022.0, | |
"step": 256 | |
}, | |
{ | |
"epoch": 0.0514, | |
"grad_norm": 12.48375415802002, | |
"learning_rate": 4.744000000000001e-06, | |
"loss": 8.2711, | |
"mean_token_accuracy": 0.42356322705745697, | |
"num_tokens": 4361283.0, | |
"step": 257 | |
}, | |
{ | |
"epoch": 0.0516, | |
"grad_norm": 12.102031707763672, | |
"learning_rate": 4.743e-06, | |
"loss": 7.6877, | |
"mean_token_accuracy": 0.42748479545116425, | |
"num_tokens": 4375550.0, | |
"step": 258 | |
}, | |
{ | |
"epoch": 0.0518, | |
"grad_norm": 10.918121337890625, | |
"learning_rate": 4.7420000000000005e-06, | |
"loss": 7.1491, | |
"mean_token_accuracy": 0.3727777749300003, | |
"num_tokens": 4390013.0, | |
"step": 259 | |
}, | |
{ | |
"epoch": 0.052, | |
"grad_norm": 9.632317543029785, | |
"learning_rate": 4.741000000000001e-06, | |
"loss": 6.4275, | |
"mean_token_accuracy": 0.3046218603849411, | |
"num_tokens": 4406480.0, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.0522, | |
"grad_norm": 14.100777626037598, | |
"learning_rate": 4.74e-06, | |
"loss": 7.8382, | |
"mean_token_accuracy": 0.454365074634552, | |
"num_tokens": 4425737.0, | |
"step": 261 | |
}, | |
{ | |
"epoch": 0.0524, | |
"grad_norm": 14.973969459533691, | |
"learning_rate": 4.739e-06, | |
"loss": 7.775, | |
"mean_token_accuracy": 0.45428571105003357, | |
"num_tokens": 4440150.0, | |
"step": 262 | |
}, | |
{ | |
"epoch": 0.0526, | |
"grad_norm": 10.306495666503906, | |
"learning_rate": 4.738e-06, | |
"loss": 6.7966, | |
"mean_token_accuracy": 0.41637930274009705, | |
"num_tokens": 4454659.0, | |
"step": 263 | |
}, | |
{ | |
"epoch": 0.0528, | |
"grad_norm": 11.71308422088623, | |
"learning_rate": 4.7370000000000006e-06, | |
"loss": 7.3197, | |
"mean_token_accuracy": 0.45000000298023224, | |
"num_tokens": 4469178.0, | |
"step": 264 | |
}, | |
{ | |
"epoch": 0.053, | |
"grad_norm": 15.400917053222656, | |
"learning_rate": 4.736000000000001e-06, | |
"loss": 7.0285, | |
"mean_token_accuracy": 0.4464285671710968, | |
"num_tokens": 4483833.0, | |
"step": 265 | |
}, | |
{ | |
"epoch": 0.0532, | |
"grad_norm": 11.898533821105957, | |
"learning_rate": 4.735e-06, | |
"loss": 7.7648, | |
"mean_token_accuracy": 0.3568965643644333, | |
"num_tokens": 4502729.0, | |
"step": 266 | |
}, | |
{ | |
"epoch": 0.0534, | |
"grad_norm": 12.848722457885742, | |
"learning_rate": 4.734e-06, | |
"loss": 7.1883, | |
"mean_token_accuracy": 0.4402298927307129, | |
"num_tokens": 4521713.0, | |
"step": 267 | |
}, | |
{ | |
"epoch": 0.0536, | |
"grad_norm": 9.758365631103516, | |
"learning_rate": 4.733e-06, | |
"loss": 6.833, | |
"mean_token_accuracy": 0.39444445073604584, | |
"num_tokens": 4541033.0, | |
"step": 268 | |
}, | |
{ | |
"epoch": 0.0538, | |
"grad_norm": 40.770198822021484, | |
"learning_rate": 4.732000000000001e-06, | |
"loss": 7.5144, | |
"mean_token_accuracy": 0.37254129350185394, | |
"num_tokens": 4561917.0, | |
"step": 269 | |
}, | |
{ | |
"epoch": 0.054, | |
"grad_norm": 12.640328407287598, | |
"learning_rate": 4.731000000000001e-06, | |
"loss": 7.6694, | |
"mean_token_accuracy": 0.40518517792224884, | |
"num_tokens": 4576371.0, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.0542, | |
"grad_norm": 13.891765594482422, | |
"learning_rate": 4.7300000000000005e-06, | |
"loss": 6.4951, | |
"mean_token_accuracy": 0.48746199905872345, | |
"num_tokens": 4595606.0, | |
"step": 271 | |
}, | |
{ | |
"epoch": 0.0544, | |
"grad_norm": 15.260784149169922, | |
"learning_rate": 4.729e-06, | |
"loss": 7.1435, | |
"mean_token_accuracy": 0.4378078728914261, | |
"num_tokens": 4607665.0, | |
"step": 272 | |
}, | |
{ | |
"epoch": 0.0546, | |
"grad_norm": 11.873882293701172, | |
"learning_rate": 4.728e-06, | |
"loss": 6.6643, | |
"mean_token_accuracy": 0.4196428507566452, | |
"num_tokens": 4622184.0, | |
"step": 273 | |
}, | |
{ | |
"epoch": 0.0548, | |
"grad_norm": 11.417704582214355, | |
"learning_rate": 4.727000000000001e-06, | |
"loss": 7.0497, | |
"mean_token_accuracy": 0.39933258295059204, | |
"num_tokens": 4636646.0, | |
"step": 274 | |
}, | |
{ | |
"epoch": 0.055, | |
"grad_norm": 10.876029014587402, | |
"learning_rate": 4.726000000000001e-06, | |
"loss": 6.913, | |
"mean_token_accuracy": 0.41692790389060974, | |
"num_tokens": 4655906.0, | |
"step": 275 | |
}, | |
{ | |
"epoch": 0.0552, | |
"grad_norm": 13.294316291809082, | |
"learning_rate": 4.7250000000000005e-06, | |
"loss": 7.6388, | |
"mean_token_accuracy": 0.41338111460208893, | |
"num_tokens": 4675326.0, | |
"step": 276 | |
}, | |
{ | |
"epoch": 0.0554, | |
"grad_norm": 13.638504981994629, | |
"learning_rate": 4.724e-06, | |
"loss": 7.3977, | |
"mean_token_accuracy": 0.3982202410697937, | |
"num_tokens": 4694348.0, | |
"step": 277 | |
}, | |
{ | |
"epoch": 0.0556, | |
"grad_norm": 15.715910911560059, | |
"learning_rate": 4.723e-06, | |
"loss": 7.6206, | |
"mean_token_accuracy": 0.4444444477558136, | |
"num_tokens": 4713642.0, | |
"step": 278 | |
}, | |
{ | |
"epoch": 0.0558, | |
"grad_norm": 15.516464233398438, | |
"learning_rate": 4.722000000000001e-06, | |
"loss": 8.2007, | |
"mean_token_accuracy": 0.424450546503067, | |
"num_tokens": 4732898.0, | |
"step": 279 | |
}, | |
{ | |
"epoch": 0.056, | |
"grad_norm": 12.387885093688965, | |
"learning_rate": 4.721e-06, | |
"loss": 7.688, | |
"mean_token_accuracy": 0.3694581240415573, | |
"num_tokens": 4752274.0, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.0562, | |
"grad_norm": 10.614646911621094, | |
"learning_rate": 4.7200000000000005e-06, | |
"loss": 6.3429, | |
"mean_token_accuracy": 0.47413793206214905, | |
"num_tokens": 4761929.0, | |
"step": 281 | |
}, | |
{ | |
"epoch": 0.0564, | |
"grad_norm": 21.08198356628418, | |
"learning_rate": 4.719e-06, | |
"loss": 6.1852, | |
"mean_token_accuracy": 0.46727272868156433, | |
"num_tokens": 4771566.0, | |
"step": 282 | |
}, | |
{ | |
"epoch": 0.0566, | |
"grad_norm": 13.566423416137695, | |
"learning_rate": 4.718e-06, | |
"loss": 6.3183, | |
"mean_token_accuracy": 0.4772357791662216, | |
"num_tokens": 4792969.0, | |
"step": 283 | |
}, | |
{ | |
"epoch": 0.0568, | |
"grad_norm": 11.979252815246582, | |
"learning_rate": 4.717000000000001e-06, | |
"loss": 6.9839, | |
"mean_token_accuracy": 0.4107142835855484, | |
"num_tokens": 4807544.0, | |
"step": 284 | |
}, | |
{ | |
"epoch": 0.057, | |
"grad_norm": 10.196455955505371, | |
"learning_rate": 4.716e-06, | |
"loss": 7.2482, | |
"mean_token_accuracy": 0.4211822748184204, | |
"num_tokens": 4826443.0, | |
"step": 285 | |
}, | |
{ | |
"epoch": 0.0572, | |
"grad_norm": 10.696710586547852, | |
"learning_rate": 4.715e-06, | |
"loss": 6.7498, | |
"mean_token_accuracy": 0.4461206793785095, | |
"num_tokens": 4840925.0, | |
"step": 286 | |
}, | |
{ | |
"epoch": 0.0574, | |
"grad_norm": 12.140236854553223, | |
"learning_rate": 4.714e-06, | |
"loss": 7.5622, | |
"mean_token_accuracy": 0.3907563090324402, | |
"num_tokens": 4860066.0, | |
"step": 287 | |
}, | |
{ | |
"epoch": 0.0576, | |
"grad_norm": 9.046162605285645, | |
"learning_rate": 4.7130000000000004e-06, | |
"loss": 6.4519, | |
"mean_token_accuracy": 0.4005681872367859, | |
"num_tokens": 4874533.0, | |
"step": 288 | |
}, | |
{ | |
"epoch": 0.0578, | |
"grad_norm": 11.239535331726074, | |
"learning_rate": 4.712000000000001e-06, | |
"loss": 7.0541, | |
"mean_token_accuracy": 0.4107142835855484, | |
"num_tokens": 4888991.0, | |
"step": 289 | |
}, | |
{ | |
"epoch": 0.058, | |
"grad_norm": 13.39867115020752, | |
"learning_rate": 4.711e-06, | |
"loss": 7.3852, | |
"mean_token_accuracy": 0.35574713349342346, | |
"num_tokens": 4901961.0, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.0582, | |
"grad_norm": 10.7579984664917, | |
"learning_rate": 4.71e-06, | |
"loss": 7.2866, | |
"mean_token_accuracy": 0.41428571939468384, | |
"num_tokens": 4916815.0, | |
"step": 291 | |
}, | |
{ | |
"epoch": 0.0584, | |
"grad_norm": 12.446432113647461, | |
"learning_rate": 4.709e-06, | |
"loss": 7.9278, | |
"mean_token_accuracy": 0.35706017911434174, | |
"num_tokens": 4935892.0, | |
"step": 292 | |
}, | |
{ | |
"epoch": 0.0586, | |
"grad_norm": 12.149531364440918, | |
"learning_rate": 4.7080000000000005e-06, | |
"loss": 7.0934, | |
"mean_token_accuracy": 0.4106002599000931, | |
"num_tokens": 4950308.0, | |
"step": 293 | |
}, | |
{ | |
"epoch": 0.0588, | |
"grad_norm": 12.248196601867676, | |
"learning_rate": 4.707000000000001e-06, | |
"loss": 7.3677, | |
"mean_token_accuracy": 0.4555555582046509, | |
"num_tokens": 4969605.0, | |
"step": 294 | |
}, | |
{ | |
"epoch": 0.059, | |
"grad_norm": 10.024053573608398, | |
"learning_rate": 4.706e-06, | |
"loss": 7.0171, | |
"mean_token_accuracy": 0.4404466450214386, | |
"num_tokens": 4984384.0, | |
"step": 295 | |
}, | |
{ | |
"epoch": 0.0592, | |
"grad_norm": 10.064780235290527, | |
"learning_rate": 4.705e-06, | |
"loss": 7.2282, | |
"mean_token_accuracy": 0.3821621537208557, | |
"num_tokens": 4998886.0, | |
"step": 296 | |
}, | |
{ | |
"epoch": 0.0594, | |
"grad_norm": 11.68379020690918, | |
"learning_rate": 4.704e-06, | |
"loss": 6.3511, | |
"mean_token_accuracy": 0.45874999463558197, | |
"num_tokens": 5013345.0, | |
"step": 297 | |
}, | |
{ | |
"epoch": 0.0596, | |
"grad_norm": 9.920818328857422, | |
"learning_rate": 4.7030000000000005e-06, | |
"loss": 6.4502, | |
"mean_token_accuracy": 0.4145299196243286, | |
"num_tokens": 5026411.0, | |
"step": 298 | |
}, | |
{ | |
"epoch": 0.0598, | |
"grad_norm": 9.868770599365234, | |
"learning_rate": 4.702e-06, | |
"loss": 6.6577, | |
"mean_token_accuracy": 0.40407469868659973, | |
"num_tokens": 5046124.0, | |
"step": 299 | |
}, | |
{ | |
"epoch": 0.06, | |
"grad_norm": 13.261858940124512, | |
"learning_rate": 4.701e-06, | |
"loss": 6.0468, | |
"mean_token_accuracy": 0.4592592567205429, | |
"num_tokens": 5060802.0, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.0602, | |
"grad_norm": 11.705483436584473, | |
"learning_rate": 4.7e-06, | |
"loss": 6.9336, | |
"mean_token_accuracy": 0.3961039036512375, | |
"num_tokens": 5080148.0, | |
"step": 301 | |
}, | |
{ | |
"epoch": 0.0604, | |
"grad_norm": 11.132308006286621, | |
"learning_rate": 4.699e-06, | |
"loss": 6.7934, | |
"mean_token_accuracy": 0.4444444477558136, | |
"num_tokens": 5094604.0, | |
"step": 302 | |
}, | |
{ | |
"epoch": 0.0606, | |
"grad_norm": 12.15768051147461, | |
"learning_rate": 4.698000000000001e-06, | |
"loss": 6.1426, | |
"mean_token_accuracy": 0.48148147761821747, | |
"num_tokens": 5109079.0, | |
"step": 303 | |
}, | |
{ | |
"epoch": 0.0608, | |
"grad_norm": 13.979254722595215, | |
"learning_rate": 4.697e-06, | |
"loss": 7.0785, | |
"mean_token_accuracy": 0.4423076957464218, | |
"num_tokens": 5121880.0, | |
"step": 304 | |
}, | |
{ | |
"epoch": 0.061, | |
"grad_norm": 10.67962646484375, | |
"learning_rate": 4.6960000000000004e-06, | |
"loss": 5.7797, | |
"mean_token_accuracy": 0.43860387802124023, | |
"num_tokens": 5141369.0, | |
"step": 305 | |
}, | |
{ | |
"epoch": 0.0612, | |
"grad_norm": 9.39137077331543, | |
"learning_rate": 4.695e-06, | |
"loss": 5.9316, | |
"mean_token_accuracy": 0.4432692378759384, | |
"num_tokens": 5155875.0, | |
"step": 306 | |
}, | |
{ | |
"epoch": 0.0614, | |
"grad_norm": 10.971794128417969, | |
"learning_rate": 4.694e-06, | |
"loss": 6.5844, | |
"mean_token_accuracy": 0.4044642895460129, | |
"num_tokens": 5175079.0, | |
"step": 307 | |
}, | |
{ | |
"epoch": 0.0616, | |
"grad_norm": 10.385963439941406, | |
"learning_rate": 4.693000000000001e-06, | |
"loss": 6.3867, | |
"mean_token_accuracy": 0.46296297013759613, | |
"num_tokens": 5189415.0, | |
"step": 308 | |
}, | |
{ | |
"epoch": 0.0618, | |
"grad_norm": 13.314301490783691, | |
"learning_rate": 4.692e-06, | |
"loss": 6.0294, | |
"mean_token_accuracy": 0.42749999463558197, | |
"num_tokens": 5199112.0, | |
"step": 309 | |
}, | |
{ | |
"epoch": 0.062, | |
"grad_norm": 9.319266319274902, | |
"learning_rate": 4.6910000000000005e-06, | |
"loss": 6.3149, | |
"mean_token_accuracy": 0.41097819805145264, | |
"num_tokens": 5231220.0, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.0622, | |
"grad_norm": 9.54952621459961, | |
"learning_rate": 4.69e-06, | |
"loss": 6.8163, | |
"mean_token_accuracy": 0.380952388048172, | |
"num_tokens": 5253011.0, | |
"step": 311 | |
}, | |
{ | |
"epoch": 0.0624, | |
"grad_norm": 9.803725242614746, | |
"learning_rate": 4.689e-06, | |
"loss": 6.385, | |
"mean_token_accuracy": 0.4375, | |
"num_tokens": 5272315.0, | |
"step": 312 | |
}, | |
{ | |
"epoch": 0.0626, | |
"grad_norm": 9.163276672363281, | |
"learning_rate": 4.688000000000001e-06, | |
"loss": 5.9736, | |
"mean_token_accuracy": 0.3640737682580948, | |
"num_tokens": 5286789.0, | |
"step": 313 | |
}, | |
{ | |
"epoch": 0.0628, | |
"grad_norm": 9.91296672821045, | |
"learning_rate": 4.687e-06, | |
"loss": 6.2111, | |
"mean_token_accuracy": 0.3975849747657776, | |
"num_tokens": 5301439.0, | |
"step": 314 | |
}, | |
{ | |
"epoch": 0.063, | |
"grad_norm": 11.019015312194824, | |
"learning_rate": 4.6860000000000005e-06, | |
"loss": 6.6715, | |
"mean_token_accuracy": 0.3615301698446274, | |
"num_tokens": 5324389.0, | |
"step": 315 | |
}, | |
{ | |
"epoch": 0.0632, | |
"grad_norm": 9.389239311218262, | |
"learning_rate": 4.685000000000001e-06, | |
"loss": 5.7869, | |
"mean_token_accuracy": 0.4253472238779068, | |
"num_tokens": 5334050.0, | |
"step": 316 | |
}, | |
{ | |
"epoch": 0.0634, | |
"grad_norm": 9.804286003112793, | |
"learning_rate": 4.684e-06, | |
"loss": 7.3263, | |
"mean_token_accuracy": 0.3896551728248596, | |
"num_tokens": 5353730.0, | |
"step": 317 | |
}, | |
{ | |
"epoch": 0.0636, | |
"grad_norm": 18.058841705322266, | |
"learning_rate": 4.683000000000001e-06, | |
"loss": 6.4049, | |
"mean_token_accuracy": 0.43095238506793976, | |
"num_tokens": 5368190.0, | |
"step": 318 | |
}, | |
{ | |
"epoch": 0.0638, | |
"grad_norm": 10.300615310668945, | |
"learning_rate": 4.682e-06, | |
"loss": 6.1908, | |
"mean_token_accuracy": 0.44763730466365814, | |
"num_tokens": 5377848.0, | |
"step": 319 | |
}, | |
{ | |
"epoch": 0.064, | |
"grad_norm": 8.114563941955566, | |
"learning_rate": 4.681000000000001e-06, | |
"loss": 6.5722, | |
"mean_token_accuracy": 0.39539170265197754, | |
"num_tokens": 5397362.0, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.0642, | |
"grad_norm": 9.576495170593262, | |
"learning_rate": 4.680000000000001e-06, | |
"loss": 5.8224, | |
"mean_token_accuracy": 0.4186507910490036, | |
"num_tokens": 5404613.0, | |
"step": 321 | |
}, | |
{ | |
"epoch": 0.0644, | |
"grad_norm": 7.686422348022461, | |
"learning_rate": 4.6790000000000004e-06, | |
"loss": 6.2079, | |
"mean_token_accuracy": 0.3892773985862732, | |
"num_tokens": 5423887.0, | |
"step": 322 | |
}, | |
{ | |
"epoch": 0.0646, | |
"grad_norm": 10.824250221252441, | |
"learning_rate": 4.678e-06, | |
"loss": 6.4209, | |
"mean_token_accuracy": 0.380952388048172, | |
"num_tokens": 5438544.0, | |
"step": 323 | |
}, | |
{ | |
"epoch": 0.0648, | |
"grad_norm": 22.814104080200195, | |
"learning_rate": 4.677e-06, | |
"loss": 6.3804, | |
"mean_token_accuracy": 0.4807407408952713, | |
"num_tokens": 5453036.0, | |
"step": 324 | |
}, | |
{ | |
"epoch": 0.065, | |
"grad_norm": 12.241607666015625, | |
"learning_rate": 4.676000000000001e-06, | |
"loss": 6.9619, | |
"mean_token_accuracy": 0.44413793087005615, | |
"num_tokens": 5472490.0, | |
"step": 325 | |
}, | |
{ | |
"epoch": 0.0652, | |
"grad_norm": 9.275449752807617, | |
"learning_rate": 4.675000000000001e-06, | |
"loss": 5.8423, | |
"mean_token_accuracy": 0.44312499463558197, | |
"num_tokens": 5482149.0, | |
"step": 326 | |
}, | |
{ | |
"epoch": 0.0654, | |
"grad_norm": 9.22095012664795, | |
"learning_rate": 4.6740000000000005e-06, | |
"loss": 5.6949, | |
"mean_token_accuracy": 0.46666666865348816, | |
"num_tokens": 5491805.0, | |
"step": 327 | |
}, | |
{ | |
"epoch": 0.0656, | |
"grad_norm": 9.241523742675781, | |
"learning_rate": 4.673e-06, | |
"loss": 6.0598, | |
"mean_token_accuracy": 0.4404466450214386, | |
"num_tokens": 5505904.0, | |
"step": 328 | |
}, | |
{ | |
"epoch": 0.0658, | |
"grad_norm": 17.82420539855957, | |
"learning_rate": 4.672e-06, | |
"loss": 6.6779, | |
"mean_token_accuracy": 0.4031440168619156, | |
"num_tokens": 5525385.0, | |
"step": 329 | |
}, | |
{ | |
"epoch": 0.066, | |
"grad_norm": 9.692024230957031, | |
"learning_rate": 4.671000000000001e-06, | |
"loss": 5.7944, | |
"mean_token_accuracy": 0.5252873599529266, | |
"num_tokens": 5540158.0, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.0662, | |
"grad_norm": 7.623928070068359, | |
"learning_rate": 4.670000000000001e-06, | |
"loss": 4.9237, | |
"mean_token_accuracy": 0.44602273404598236, | |
"num_tokens": 5563392.0, | |
"step": 331 | |
}, | |
{ | |
"epoch": 0.0664, | |
"grad_norm": 10.33786392211914, | |
"learning_rate": 4.6690000000000005e-06, | |
"loss": 6.147, | |
"mean_token_accuracy": 0.45428571105003357, | |
"num_tokens": 5577885.0, | |
"step": 332 | |
}, | |
{ | |
"epoch": 0.0666, | |
"grad_norm": 9.725800514221191, | |
"learning_rate": 4.668e-06, | |
"loss": 6.134, | |
"mean_token_accuracy": 0.49074074625968933, | |
"num_tokens": 5587538.0, | |
"step": 333 | |
}, | |
{ | |
"epoch": 0.0668, | |
"grad_norm": 12.834142684936523, | |
"learning_rate": 4.667e-06, | |
"loss": 6.4009, | |
"mean_token_accuracy": 0.448306605219841, | |
"num_tokens": 5606700.0, | |
"step": 334 | |
}, | |
{ | |
"epoch": 0.067, | |
"grad_norm": 17.030433654785156, | |
"learning_rate": 4.666000000000001e-06, | |
"loss": 6.5778, | |
"mean_token_accuracy": 0.49074074625968933, | |
"num_tokens": 5627246.0, | |
"step": 335 | |
}, | |
{ | |
"epoch": 0.0672, | |
"grad_norm": 7.939487457275391, | |
"learning_rate": 4.665e-06, | |
"loss": 5.705, | |
"mean_token_accuracy": 0.39478765428066254, | |
"num_tokens": 5646622.0, | |
"step": 336 | |
}, | |
{ | |
"epoch": 0.0674, | |
"grad_norm": 10.505208015441895, | |
"learning_rate": 4.664000000000001e-06, | |
"loss": 6.2186, | |
"mean_token_accuracy": 0.4741935580968857, | |
"num_tokens": 5665885.0, | |
"step": 337 | |
}, | |
{ | |
"epoch": 0.0676, | |
"grad_norm": 9.60438060760498, | |
"learning_rate": 4.663e-06, | |
"loss": 6.4067, | |
"mean_token_accuracy": 0.50063855946064, | |
"num_tokens": 5685143.0, | |
"step": 338 | |
}, | |
{ | |
"epoch": 0.0678, | |
"grad_norm": 8.420628547668457, | |
"learning_rate": 4.6620000000000004e-06, | |
"loss": 5.9981, | |
"mean_token_accuracy": 0.48749999701976776, | |
"num_tokens": 5705307.0, | |
"step": 339 | |
}, | |
{ | |
"epoch": 0.068, | |
"grad_norm": 9.02039909362793, | |
"learning_rate": 4.661000000000001e-06, | |
"loss": 5.6245, | |
"mean_token_accuracy": 0.44069264829158783, | |
"num_tokens": 5724259.0, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.0682, | |
"grad_norm": 9.671128273010254, | |
"learning_rate": 4.66e-06, | |
"loss": 5.537, | |
"mean_token_accuracy": 0.5000000149011612, | |
"num_tokens": 5738757.0, | |
"step": 341 | |
}, | |
{ | |
"epoch": 0.0684, | |
"grad_norm": 9.832850456237793, | |
"learning_rate": 4.659e-06, | |
"loss": 6.0835, | |
"mean_token_accuracy": 0.5, | |
"num_tokens": 5753257.0, | |
"step": 342 | |
}, | |
{ | |
"epoch": 0.0686, | |
"grad_norm": 10.410543441772461, | |
"learning_rate": 4.658e-06, | |
"loss": 6.4898, | |
"mean_token_accuracy": 0.517241358757019, | |
"num_tokens": 5769459.0, | |
"step": 343 | |
}, | |
{ | |
"epoch": 0.0688, | |
"grad_norm": 11.612724304199219, | |
"learning_rate": 4.6570000000000005e-06, | |
"loss": 6.2025, | |
"mean_token_accuracy": 0.45967741310596466, | |
"num_tokens": 5783916.0, | |
"step": 344 | |
}, | |
{ | |
"epoch": 0.069, | |
"grad_norm": 10.020609855651855, | |
"learning_rate": 4.656000000000001e-06, | |
"loss": 6.355, | |
"mean_token_accuracy": 0.4568965584039688, | |
"num_tokens": 5803333.0, | |
"step": 345 | |
}, | |
{ | |
"epoch": 0.0692, | |
"grad_norm": 8.622142791748047, | |
"learning_rate": 4.655e-06, | |
"loss": 5.8755, | |
"mean_token_accuracy": 0.49944381415843964, | |
"num_tokens": 5817852.0, | |
"step": 346 | |
}, | |
{ | |
"epoch": 0.0694, | |
"grad_norm": 6.586418151855469, | |
"learning_rate": 4.654e-06, | |
"loss": 4.6792, | |
"mean_token_accuracy": 0.4610389471054077, | |
"num_tokens": 5827955.0, | |
"step": 347 | |
}, | |
{ | |
"epoch": 0.0696, | |
"grad_norm": 9.011771202087402, | |
"learning_rate": 4.653e-06, | |
"loss": 5.4233, | |
"mean_token_accuracy": 0.525862067937851, | |
"num_tokens": 5842433.0, | |
"step": 348 | |
}, | |
{ | |
"epoch": 0.0698, | |
"grad_norm": 11.389575004577637, | |
"learning_rate": 4.6520000000000005e-06, | |
"loss": 6.12, | |
"mean_token_accuracy": 0.5083612203598022, | |
"num_tokens": 5856800.0, | |
"step": 349 | |
}, | |
{ | |
"epoch": 0.07, | |
"grad_norm": 7.888221263885498, | |
"learning_rate": 4.651000000000001e-06, | |
"loss": 5.8322, | |
"mean_token_accuracy": 0.504524901509285, | |
"num_tokens": 5870212.0, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.0702, | |
"grad_norm": 9.21318531036377, | |
"learning_rate": 4.65e-06, | |
"loss": 5.8042, | |
"mean_token_accuracy": 0.5434027910232544, | |
"num_tokens": 5889473.0, | |
"step": 351 | |
}, | |
{ | |
"epoch": 0.0704, | |
"grad_norm": 19.845186233520508, | |
"learning_rate": 4.649e-06, | |
"loss": 6.2884, | |
"mean_token_accuracy": 0.4252873510122299, | |
"num_tokens": 5900869.0, | |
"step": 352 | |
}, | |
{ | |
"epoch": 0.0706, | |
"grad_norm": 10.618582725524902, | |
"learning_rate": 4.648e-06, | |
"loss": 5.9085, | |
"mean_token_accuracy": 0.4852941185235977, | |
"num_tokens": 5919621.0, | |
"step": 353 | |
}, | |
{ | |
"epoch": 0.0708, | |
"grad_norm": 6.961612224578857, | |
"learning_rate": 4.6470000000000006e-06, | |
"loss": 5.1376, | |
"mean_token_accuracy": 0.4638047069311142, | |
"num_tokens": 5926888.0, | |
"step": 354 | |
}, | |
{ | |
"epoch": 0.071, | |
"grad_norm": 12.083121299743652, | |
"learning_rate": 4.646000000000001e-06, | |
"loss": 6.054, | |
"mean_token_accuracy": 0.5, | |
"num_tokens": 5946144.0, | |
"step": 355 | |
}, | |
{ | |
"epoch": 0.0712, | |
"grad_norm": 10.419790267944336, | |
"learning_rate": 4.645e-06, | |
"loss": 6.2987, | |
"mean_token_accuracy": 0.4136960655450821, | |
"num_tokens": 5965843.0, | |
"step": 356 | |
}, | |
{ | |
"epoch": 0.0714, | |
"grad_norm": 10.46291732788086, | |
"learning_rate": 4.644e-06, | |
"loss": 5.7, | |
"mean_token_accuracy": 0.5392592549324036, | |
"num_tokens": 5980726.0, | |
"step": 357 | |
}, | |
{ | |
"epoch": 0.0716, | |
"grad_norm": 11.251096725463867, | |
"learning_rate": 4.643e-06, | |
"loss": 5.5953, | |
"mean_token_accuracy": 0.5185185372829437, | |
"num_tokens": 5995182.0, | |
"step": 358 | |
}, | |
{ | |
"epoch": 0.0718, | |
"grad_norm": 9.273164749145508, | |
"learning_rate": 4.642000000000001e-06, | |
"loss": 5.7778, | |
"mean_token_accuracy": 0.48938991129398346, | |
"num_tokens": 6004839.0, | |
"step": 359 | |
}, | |
{ | |
"epoch": 0.072, | |
"grad_norm": 15.386749267578125, | |
"learning_rate": 4.641e-06, | |
"loss": 5.5941, | |
"mean_token_accuracy": 0.49140210449695587, | |
"num_tokens": 6019456.0, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.0722, | |
"grad_norm": 14.047779083251953, | |
"learning_rate": 4.6400000000000005e-06, | |
"loss": 6.0249, | |
"mean_token_accuracy": 0.5166028141975403, | |
"num_tokens": 6038752.0, | |
"step": 361 | |
}, | |
{ | |
"epoch": 0.0724, | |
"grad_norm": 7.4056925773620605, | |
"learning_rate": 4.639e-06, | |
"loss": 5.3375, | |
"mean_token_accuracy": 0.5800000131130219, | |
"num_tokens": 6048404.0, | |
"step": 362 | |
}, | |
{ | |
"epoch": 0.0726, | |
"grad_norm": 13.957375526428223, | |
"learning_rate": 4.638e-06, | |
"loss": 5.7739, | |
"mean_token_accuracy": 0.561188817024231, | |
"num_tokens": 6067581.0, | |
"step": 363 | |
}, | |
{ | |
"epoch": 0.0728, | |
"grad_norm": 10.774636268615723, | |
"learning_rate": 4.637000000000001e-06, | |
"loss": 6.0013, | |
"mean_token_accuracy": 0.4900793582201004, | |
"num_tokens": 6087734.0, | |
"step": 364 | |
}, | |
{ | |
"epoch": 0.073, | |
"grad_norm": 8.231881141662598, | |
"learning_rate": 4.636e-06, | |
"loss": 4.9933, | |
"mean_token_accuracy": 0.6004464328289032, | |
"num_tokens": 6102196.0, | |
"step": 365 | |
}, | |
{ | |
"epoch": 0.0732, | |
"grad_norm": 8.488526344299316, | |
"learning_rate": 4.6350000000000005e-06, | |
"loss": 5.0155, | |
"mean_token_accuracy": 0.463299423456192, | |
"num_tokens": 6116673.0, | |
"step": 366 | |
}, | |
{ | |
"epoch": 0.0734, | |
"grad_norm": 10.228729248046875, | |
"learning_rate": 4.634e-06, | |
"loss": 5.8042, | |
"mean_token_accuracy": 0.5535714328289032, | |
"num_tokens": 6136007.0, | |
"step": 367 | |
}, | |
{ | |
"epoch": 0.0736, | |
"grad_norm": 14.825092315673828, | |
"learning_rate": 4.633e-06, | |
"loss": 5.6991, | |
"mean_token_accuracy": 0.5031928569078445, | |
"num_tokens": 6154423.0, | |
"step": 368 | |
}, | |
{ | |
"epoch": 0.0738, | |
"grad_norm": 8.982471466064453, | |
"learning_rate": 4.632000000000001e-06, | |
"loss": 5.564, | |
"mean_token_accuracy": 0.5177975445985794, | |
"num_tokens": 6173864.0, | |
"step": 369 | |
}, | |
{ | |
"epoch": 0.074, | |
"grad_norm": 20.95796775817871, | |
"learning_rate": 4.631e-06, | |
"loss": 5.8344, | |
"mean_token_accuracy": 0.4821200519800186, | |
"num_tokens": 6193259.0, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.0742, | |
"grad_norm": 35.350887298583984, | |
"learning_rate": 4.6300000000000006e-06, | |
"loss": 5.7328, | |
"mean_token_accuracy": 0.5106837600469589, | |
"num_tokens": 6213732.0, | |
"step": 371 | |
}, | |
{ | |
"epoch": 0.0744, | |
"grad_norm": 13.841226577758789, | |
"learning_rate": 4.629e-06, | |
"loss": 5.9662, | |
"mean_token_accuracy": 0.5092592537403107, | |
"num_tokens": 6233169.0, | |
"step": 372 | |
}, | |
{ | |
"epoch": 0.0746, | |
"grad_norm": 12.297442436218262, | |
"learning_rate": 4.628e-06, | |
"loss": 5.3639, | |
"mean_token_accuracy": 0.42847076058387756, | |
"num_tokens": 6252724.0, | |
"step": 373 | |
}, | |
{ | |
"epoch": 0.0748, | |
"grad_norm": 8.141170501708984, | |
"learning_rate": 4.627000000000001e-06, | |
"loss": 5.1988, | |
"mean_token_accuracy": 0.5410256534814835, | |
"num_tokens": 6267220.0, | |
"step": 374 | |
}, | |
{ | |
"epoch": 0.075, | |
"grad_norm": 11.617218017578125, | |
"learning_rate": 4.626e-06, | |
"loss": 6.1014, | |
"mean_token_accuracy": 0.5000000149011612, | |
"num_tokens": 6281873.0, | |
"step": 375 | |
}, | |
{ | |
"epoch": 0.0752, | |
"grad_norm": 14.549817085266113, | |
"learning_rate": 4.625000000000001e-06, | |
"loss": 4.9924, | |
"mean_token_accuracy": 0.6129629611968994, | |
"num_tokens": 6296468.0, | |
"step": 376 | |
}, | |
{ | |
"epoch": 0.0754, | |
"grad_norm": 8.732035636901855, | |
"learning_rate": 4.624e-06, | |
"loss": 5.3749, | |
"mean_token_accuracy": 0.49193547666072845, | |
"num_tokens": 6316099.0, | |
"step": 377 | |
}, | |
{ | |
"epoch": 0.0756, | |
"grad_norm": 11.393025398254395, | |
"learning_rate": 4.6230000000000005e-06, | |
"loss": 5.8518, | |
"mean_token_accuracy": 0.5462962985038757, | |
"num_tokens": 6335358.0, | |
"step": 378 | |
}, | |
{ | |
"epoch": 0.0758, | |
"grad_norm": 11.22784423828125, | |
"learning_rate": 4.622e-06, | |
"loss": 5.6366, | |
"mean_token_accuracy": 0.5166666805744171, | |
"num_tokens": 6349895.0, | |
"step": 379 | |
}, | |
{ | |
"epoch": 0.076, | |
"grad_norm": 9.452168464660645, | |
"learning_rate": 4.621e-06, | |
"loss": 5.1428, | |
"mean_token_accuracy": 0.48552632331848145, | |
"num_tokens": 6371574.0, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.0762, | |
"grad_norm": 10.894210815429688, | |
"learning_rate": 4.620000000000001e-06, | |
"loss": 5.977, | |
"mean_token_accuracy": 0.484375, | |
"num_tokens": 6389904.0, | |
"step": 381 | |
}, | |
{ | |
"epoch": 0.0764, | |
"grad_norm": 13.074360847473145, | |
"learning_rate": 4.619e-06, | |
"loss": 5.3847, | |
"mean_token_accuracy": 0.5892857313156128, | |
"num_tokens": 6409200.0, | |
"step": 382 | |
}, | |
{ | |
"epoch": 0.0766, | |
"grad_norm": 11.248861312866211, | |
"learning_rate": 4.6180000000000005e-06, | |
"loss": 5.4468, | |
"mean_token_accuracy": 0.550000011920929, | |
"num_tokens": 6423778.0, | |
"step": 383 | |
}, | |
{ | |
"epoch": 0.0768, | |
"grad_norm": 9.336748123168945, | |
"learning_rate": 4.617e-06, | |
"loss": 5.4932, | |
"mean_token_accuracy": 0.5820105969905853, | |
"num_tokens": 6443035.0, | |
"step": 384 | |
}, | |
{ | |
"epoch": 0.077, | |
"grad_norm": 9.516325950622559, | |
"learning_rate": 4.616e-06, | |
"loss": 5.4311, | |
"mean_token_accuracy": 0.4709596037864685, | |
"num_tokens": 6459666.0, | |
"step": 385 | |
}, | |
{ | |
"epoch": 0.0772, | |
"grad_norm": 7.903703212738037, | |
"learning_rate": 4.615000000000001e-06, | |
"loss": 5.5287, | |
"mean_token_accuracy": 0.541487067937851, | |
"num_tokens": 6478423.0, | |
"step": 386 | |
}, | |
{ | |
"epoch": 0.0774, | |
"grad_norm": 13.11430835723877, | |
"learning_rate": 4.614e-06, | |
"loss": 5.3758, | |
"mean_token_accuracy": 0.6235714256763458, | |
"num_tokens": 6492916.0, | |
"step": 387 | |
}, | |
{ | |
"epoch": 0.0776, | |
"grad_norm": 14.428086280822754, | |
"learning_rate": 4.6130000000000006e-06, | |
"loss": 5.3445, | |
"mean_token_accuracy": 0.5848214328289032, | |
"num_tokens": 6512237.0, | |
"step": 388 | |
}, | |
{ | |
"epoch": 0.0778, | |
"grad_norm": 7.981903553009033, | |
"learning_rate": 4.612e-06, | |
"loss": 5.6573, | |
"mean_token_accuracy": 0.581250011920929, | |
"num_tokens": 6531501.0, | |
"step": 389 | |
}, | |
{ | |
"epoch": 0.078, | |
"grad_norm": 11.641196250915527, | |
"learning_rate": 4.611e-06, | |
"loss": 4.5256, | |
"mean_token_accuracy": 0.5635062754154205, | |
"num_tokens": 6545420.0, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.0782, | |
"grad_norm": 7.262872219085693, | |
"learning_rate": 4.610000000000001e-06, | |
"loss": 5.5162, | |
"mean_token_accuracy": 0.5485893487930298, | |
"num_tokens": 6564684.0, | |
"step": 391 | |
}, | |
{ | |
"epoch": 0.0784, | |
"grad_norm": 9.247142791748047, | |
"learning_rate": 4.609e-06, | |
"loss": 5.6358, | |
"mean_token_accuracy": 0.5387205481529236, | |
"num_tokens": 6584022.0, | |
"step": 392 | |
}, | |
{ | |
"epoch": 0.0786, | |
"grad_norm": 7.343860149383545, | |
"learning_rate": 4.608000000000001e-06, | |
"loss": 5.4758, | |
"mean_token_accuracy": 0.5384615361690521, | |
"num_tokens": 6598518.0, | |
"step": 393 | |
}, | |
{ | |
"epoch": 0.0788, | |
"grad_norm": 80.7061767578125, | |
"learning_rate": 4.607e-06, | |
"loss": 5.8124, | |
"mean_token_accuracy": 0.5425287485122681, | |
"num_tokens": 6617265.0, | |
"step": 394 | |
}, | |
{ | |
"epoch": 0.079, | |
"grad_norm": 8.777029991149902, | |
"learning_rate": 4.6060000000000005e-06, | |
"loss": 5.63, | |
"mean_token_accuracy": 0.5334528088569641, | |
"num_tokens": 6636418.0, | |
"step": 395 | |
}, | |
{ | |
"epoch": 0.0792, | |
"grad_norm": 9.333694458007812, | |
"learning_rate": 4.605000000000001e-06, | |
"loss": 5.5997, | |
"mean_token_accuracy": 0.5315904170274734, | |
"num_tokens": 6656184.0, | |
"step": 396 | |
}, | |
{ | |
"epoch": 0.0794, | |
"grad_norm": 8.860589981079102, | |
"learning_rate": 4.604e-06, | |
"loss": 5.1344, | |
"mean_token_accuracy": 0.589756578207016, | |
"num_tokens": 6675525.0, | |
"step": 397 | |
}, | |
{ | |
"epoch": 0.0796, | |
"grad_norm": 13.58896541595459, | |
"learning_rate": 4.603000000000001e-06, | |
"loss": 5.2149, | |
"mean_token_accuracy": 0.5039232820272446, | |
"num_tokens": 6693627.0, | |
"step": 398 | |
}, | |
{ | |
"epoch": 0.0798, | |
"grad_norm": 9.988759994506836, | |
"learning_rate": 4.602e-06, | |
"loss": 5.6091, | |
"mean_token_accuracy": 0.5166666805744171, | |
"num_tokens": 6708223.0, | |
"step": 399 | |
}, | |
{ | |
"epoch": 0.08, | |
"grad_norm": 9.320517539978027, | |
"learning_rate": 4.6010000000000005e-06, | |
"loss": 5.3271, | |
"mean_token_accuracy": 0.5459558963775635, | |
"num_tokens": 6728571.0, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.0802, | |
"grad_norm": 6.697946548461914, | |
"learning_rate": 4.600000000000001e-06, | |
"loss": 4.8994, | |
"mean_token_accuracy": 0.5878571271896362, | |
"num_tokens": 6737950.0, | |
"step": 401 | |
}, | |
{ | |
"epoch": 0.0804, | |
"grad_norm": 7.51831579208374, | |
"learning_rate": 4.599e-06, | |
"loss": 5.024, | |
"mean_token_accuracy": 0.5743534564971924, | |
"num_tokens": 6757373.0, | |
"step": 402 | |
}, | |
{ | |
"epoch": 0.0806, | |
"grad_norm": 9.05827522277832, | |
"learning_rate": 4.598e-06, | |
"loss": 4.8548, | |
"mean_token_accuracy": 0.5397436022758484, | |
"num_tokens": 6778652.0, | |
"step": 403 | |
}, | |
{ | |
"epoch": 0.0808, | |
"grad_norm": 8.396754264831543, | |
"learning_rate": 4.597e-06, | |
"loss": 4.8869, | |
"mean_token_accuracy": 0.5226293057203293, | |
"num_tokens": 6793115.0, | |
"step": 404 | |
}, | |
{ | |
"epoch": 0.081, | |
"grad_norm": 7.155496597290039, | |
"learning_rate": 4.5960000000000006e-06, | |
"loss": 5.2171, | |
"mean_token_accuracy": 0.5760368704795837, | |
"num_tokens": 6812376.0, | |
"step": 405 | |
}, | |
{ | |
"epoch": 0.0812, | |
"grad_norm": 18.729246139526367, | |
"learning_rate": 4.595000000000001e-06, | |
"loss": 5.3674, | |
"mean_token_accuracy": 0.47413793206214905, | |
"num_tokens": 6826681.0, | |
"step": 406 | |
}, | |
{ | |
"epoch": 0.0814, | |
"grad_norm": 5.5656046867370605, | |
"learning_rate": 4.594e-06, | |
"loss": 4.9008, | |
"mean_token_accuracy": 0.5647321343421936, | |
"num_tokens": 6836545.0, | |
"step": 407 | |
}, | |
{ | |
"epoch": 0.0816, | |
"grad_norm": 12.937115669250488, | |
"learning_rate": 4.593e-06, | |
"loss": 5.3085, | |
"mean_token_accuracy": 0.5370370447635651, | |
"num_tokens": 6855848.0, | |
"step": 408 | |
}, | |
{ | |
"epoch": 0.0818, | |
"grad_norm": 10.064362525939941, | |
"learning_rate": 4.592e-06, | |
"loss": 5.6301, | |
"mean_token_accuracy": 0.5384615361690521, | |
"num_tokens": 6871572.0, | |
"step": 409 | |
}, | |
{ | |
"epoch": 0.082, | |
"grad_norm": 11.348885536193848, | |
"learning_rate": 4.591000000000001e-06, | |
"loss": 5.2883, | |
"mean_token_accuracy": 0.5475806295871735, | |
"num_tokens": 6886045.0, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.0822, | |
"grad_norm": 12.09267520904541, | |
"learning_rate": 4.590000000000001e-06, | |
"loss": 5.1195, | |
"mean_token_accuracy": 0.5264367908239365, | |
"num_tokens": 6905344.0, | |
"step": 411 | |
}, | |
{ | |
"epoch": 0.0824, | |
"grad_norm": 6.413148403167725, | |
"learning_rate": 4.5890000000000004e-06, | |
"loss": 4.9556, | |
"mean_token_accuracy": 0.613103449344635, | |
"num_tokens": 6919857.0, | |
"step": 412 | |
}, | |
{ | |
"epoch": 0.0826, | |
"grad_norm": 14.665050506591797, | |
"learning_rate": 4.588e-06, | |
"loss": 5.375, | |
"mean_token_accuracy": 0.5648148059844971, | |
"num_tokens": 6939271.0, | |
"step": 413 | |
}, | |
{ | |
"epoch": 0.0828, | |
"grad_norm": 5.199728012084961, | |
"learning_rate": 4.587e-06, | |
"loss": 4.4413, | |
"mean_token_accuracy": 0.5422222167253494, | |
"num_tokens": 6949145.0, | |
"step": 414 | |
}, | |
{ | |
"epoch": 0.083, | |
"grad_norm": 22.955730438232422, | |
"learning_rate": 4.586000000000001e-06, | |
"loss": 4.4207, | |
"mean_token_accuracy": 0.6060605943202972, | |
"num_tokens": 6963199.0, | |
"step": 415 | |
}, | |
{ | |
"epoch": 0.0832, | |
"grad_norm": 5.997854709625244, | |
"learning_rate": 4.585e-06, | |
"loss": 4.5337, | |
"mean_token_accuracy": 0.6601382493972778, | |
"num_tokens": 6977679.0, | |
"step": 416 | |
}, | |
{ | |
"epoch": 0.0834, | |
"grad_norm": 13.689990997314453, | |
"learning_rate": 4.5840000000000005e-06, | |
"loss": 5.4803, | |
"mean_token_accuracy": 0.6183862388134003, | |
"num_tokens": 6992166.0, | |
"step": 417 | |
}, | |
{ | |
"epoch": 0.0836, | |
"grad_norm": 13.06170654296875, | |
"learning_rate": 4.583e-06, | |
"loss": 5.1476, | |
"mean_token_accuracy": 0.6145320236682892, | |
"num_tokens": 7011542.0, | |
"step": 418 | |
}, | |
{ | |
"epoch": 0.0838, | |
"grad_norm": 10.056525230407715, | |
"learning_rate": 4.582e-06, | |
"loss": 5.1888, | |
"mean_token_accuracy": 0.5343915373086929, | |
"num_tokens": 7030729.0, | |
"step": 419 | |
}, | |
{ | |
"epoch": 0.084, | |
"grad_norm": 6.395605564117432, | |
"learning_rate": 4.581000000000001e-06, | |
"loss": 5.4807, | |
"mean_token_accuracy": 0.5086206793785095, | |
"num_tokens": 7045228.0, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.0842, | |
"grad_norm": 8.128874778747559, | |
"learning_rate": 4.58e-06, | |
"loss": 5.4323, | |
"mean_token_accuracy": 0.5689655244350433, | |
"num_tokens": 7064800.0, | |
"step": 421 | |
}, | |
{ | |
"epoch": 0.0844, | |
"grad_norm": 8.527276039123535, | |
"learning_rate": 4.579e-06, | |
"loss": 5.4934, | |
"mean_token_accuracy": 0.545584037899971, | |
"num_tokens": 7083906.0, | |
"step": 422 | |
}, | |
{ | |
"epoch": 0.0846, | |
"grad_norm": 5.491787433624268, | |
"learning_rate": 4.578e-06, | |
"loss": 4.8269, | |
"mean_token_accuracy": 0.6545092761516571, | |
"num_tokens": 7094902.0, | |
"step": 423 | |
}, | |
{ | |
"epoch": 0.0848, | |
"grad_norm": 6.678435802459717, | |
"learning_rate": 4.577e-06, | |
"loss": 4.7984, | |
"mean_token_accuracy": 0.6296296119689941, | |
"num_tokens": 7104577.0, | |
"step": 424 | |
}, | |
{ | |
"epoch": 0.085, | |
"grad_norm": 22.77742576599121, | |
"learning_rate": 4.576000000000001e-06, | |
"loss": 5.7638, | |
"mean_token_accuracy": 0.5648148059844971, | |
"num_tokens": 7124732.0, | |
"step": 425 | |
}, | |
{ | |
"epoch": 0.0852, | |
"grad_norm": 9.347881317138672, | |
"learning_rate": 4.575e-06, | |
"loss": 5.2139, | |
"mean_token_accuracy": 0.6139162480831146, | |
"num_tokens": 7143991.0, | |
"step": 426 | |
}, | |
{ | |
"epoch": 0.0854, | |
"grad_norm": 5.620838165283203, | |
"learning_rate": 4.574e-06, | |
"loss": 4.6779, | |
"mean_token_accuracy": 0.6503448188304901, | |
"num_tokens": 7158607.0, | |
"step": 427 | |
}, | |
{ | |
"epoch": 0.0856, | |
"grad_norm": 6.864266872406006, | |
"learning_rate": 4.573e-06, | |
"loss": 4.8968, | |
"mean_token_accuracy": 0.6183862388134003, | |
"num_tokens": 7173064.0, | |
"step": 428 | |
}, | |
{ | |
"epoch": 0.0858, | |
"grad_norm": 22.015869140625, | |
"learning_rate": 4.5720000000000004e-06, | |
"loss": 5.1698, | |
"mean_token_accuracy": 0.5743534564971924, | |
"num_tokens": 7192243.0, | |
"step": 429 | |
}, | |
{ | |
"epoch": 0.086, | |
"grad_norm": 8.401832580566406, | |
"learning_rate": 4.571000000000001e-06, | |
"loss": 5.4089, | |
"mean_token_accuracy": 0.5715725719928741, | |
"num_tokens": 7211646.0, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.0862, | |
"grad_norm": 25.050403594970703, | |
"learning_rate": 4.57e-06, | |
"loss": 4.5259, | |
"mean_token_accuracy": 0.6126644611358643, | |
"num_tokens": 7230516.0, | |
"step": 431 | |
}, | |
{ | |
"epoch": 0.0864, | |
"grad_norm": 9.29088306427002, | |
"learning_rate": 4.569e-06, | |
"loss": 4.889, | |
"mean_token_accuracy": 0.5278460085391998, | |
"num_tokens": 7249620.0, | |
"step": 432 | |
}, | |
{ | |
"epoch": 0.0866, | |
"grad_norm": 15.916458129882812, | |
"learning_rate": 4.568e-06, | |
"loss": 4.9777, | |
"mean_token_accuracy": 0.6257143020629883, | |
"num_tokens": 7264094.0, | |
"step": 433 | |
}, | |
{ | |
"epoch": 0.0868, | |
"grad_norm": 6.861588954925537, | |
"learning_rate": 4.5670000000000005e-06, | |
"loss": 5.4239, | |
"mean_token_accuracy": 0.4928571432828903, | |
"num_tokens": 7283359.0, | |
"step": 434 | |
}, | |
{ | |
"epoch": 0.087, | |
"grad_norm": 13.225293159484863, | |
"learning_rate": 4.566000000000001e-06, | |
"loss": 4.7175, | |
"mean_token_accuracy": 0.6028921008110046, | |
"num_tokens": 7302888.0, | |
"step": 435 | |
}, | |
{ | |
"epoch": 0.0872, | |
"grad_norm": 17.012409210205078, | |
"learning_rate": 4.565e-06, | |
"loss": 5.6497, | |
"mean_token_accuracy": 0.5857143104076385, | |
"num_tokens": 7322148.0, | |
"step": 436 | |
}, | |
{ | |
"epoch": 0.0874, | |
"grad_norm": 10.171734809875488, | |
"learning_rate": 4.564e-06, | |
"loss": 4.5458, | |
"mean_token_accuracy": 0.5567567646503448, | |
"num_tokens": 7341333.0, | |
"step": 437 | |
}, | |
{ | |
"epoch": 0.0876, | |
"grad_norm": 16.463756561279297, | |
"learning_rate": 4.563e-06, | |
"loss": 4.9291, | |
"mean_token_accuracy": 0.6135416626930237, | |
"num_tokens": 7362605.0, | |
"step": 438 | |
}, | |
{ | |
"epoch": 0.0878, | |
"grad_norm": 17.738061904907227, | |
"learning_rate": 4.5620000000000005e-06, | |
"loss": 4.813, | |
"mean_token_accuracy": 0.636904776096344, | |
"num_tokens": 7381865.0, | |
"step": 439 | |
}, | |
{ | |
"epoch": 0.088, | |
"grad_norm": 8.94965648651123, | |
"learning_rate": 4.561e-06, | |
"loss": 4.8816, | |
"mean_token_accuracy": 0.5999999940395355, | |
"num_tokens": 7396402.0, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.0882, | |
"grad_norm": 15.025381088256836, | |
"learning_rate": 4.56e-06, | |
"loss": 5.6238, | |
"mean_token_accuracy": 0.5384615361690521, | |
"num_tokens": 7415660.0, | |
"step": 441 | |
}, | |
{ | |
"epoch": 0.0884, | |
"grad_norm": 10.704608917236328, | |
"learning_rate": 4.559000000000001e-06, | |
"loss": 5.291, | |
"mean_token_accuracy": 0.5915948152542114, | |
"num_tokens": 7435781.0, | |
"step": 442 | |
}, | |
{ | |
"epoch": 0.0886, | |
"grad_norm": 9.916930198669434, | |
"learning_rate": 4.558e-06, | |
"loss": 4.6656, | |
"mean_token_accuracy": 0.6135416626930237, | |
"num_tokens": 7454999.0, | |
"step": 443 | |
}, | |
{ | |
"epoch": 0.0888, | |
"grad_norm": 7.206878662109375, | |
"learning_rate": 4.557000000000001e-06, | |
"loss": 4.6639, | |
"mean_token_accuracy": 0.4869281202554703, | |
"num_tokens": 7469396.0, | |
"step": 444 | |
}, | |
{ | |
"epoch": 0.089, | |
"grad_norm": 6.467816352844238, | |
"learning_rate": 4.556e-06, | |
"loss": 4.9449, | |
"mean_token_accuracy": 0.5981481373310089, | |
"num_tokens": 7483855.0, | |
"step": 445 | |
}, | |
{ | |
"epoch": 0.0892, | |
"grad_norm": 9.08160400390625, | |
"learning_rate": 4.5550000000000004e-06, | |
"loss": 4.6868, | |
"mean_token_accuracy": 0.6126373708248138, | |
"num_tokens": 7498330.0, | |
"step": 446 | |
}, | |
{ | |
"epoch": 0.0894, | |
"grad_norm": 6.3002471923828125, | |
"learning_rate": 4.554000000000001e-06, | |
"loss": 5.0805, | |
"mean_token_accuracy": 0.551282063126564, | |
"num_tokens": 7512714.0, | |
"step": 447 | |
}, | |
{ | |
"epoch": 0.0896, | |
"grad_norm": 13.898117065429688, | |
"learning_rate": 4.553e-06, | |
"loss": 5.1324, | |
"mean_token_accuracy": 0.5690476298332214, | |
"num_tokens": 7531768.0, | |
"step": 448 | |
}, | |
{ | |
"epoch": 0.0898, | |
"grad_norm": 8.279391288757324, | |
"learning_rate": 4.552000000000001e-06, | |
"loss": 4.4569, | |
"mean_token_accuracy": 0.5530530512332916, | |
"num_tokens": 7546253.0, | |
"step": 449 | |
}, | |
{ | |
"epoch": 0.09, | |
"grad_norm": 11.584227561950684, | |
"learning_rate": 4.551e-06, | |
"loss": 5.3385, | |
"mean_token_accuracy": 0.581250011920929, | |
"num_tokens": 7565517.0, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.0902, | |
"grad_norm": 8.302013397216797, | |
"learning_rate": 4.5500000000000005e-06, | |
"loss": 4.9839, | |
"mean_token_accuracy": 0.5351213216781616, | |
"num_tokens": 7579402.0, | |
"step": 451 | |
}, | |
{ | |
"epoch": 0.0904, | |
"grad_norm": 9.814826965332031, | |
"learning_rate": 4.549000000000001e-06, | |
"loss": 4.6692, | |
"mean_token_accuracy": 0.6079182624816895, | |
"num_tokens": 7598660.0, | |
"step": 452 | |
}, | |
{ | |
"epoch": 0.0906, | |
"grad_norm": 6.627237796783447, | |
"learning_rate": 4.548e-06, | |
"loss": 5.4627, | |
"mean_token_accuracy": 0.5092592537403107, | |
"num_tokens": 7613180.0, | |
"step": 453 | |
}, | |
{ | |
"epoch": 0.0908, | |
"grad_norm": 11.831220626831055, | |
"learning_rate": 4.547000000000001e-06, | |
"loss": 4.8182, | |
"mean_token_accuracy": 0.5758620798587799, | |
"num_tokens": 7632441.0, | |
"step": 454 | |
}, | |
{ | |
"epoch": 0.091, | |
"grad_norm": 7.963436126708984, | |
"learning_rate": 4.546e-06, | |
"loss": 5.1914, | |
"mean_token_accuracy": 0.5578093528747559, | |
"num_tokens": 7651466.0, | |
"step": 455 | |
}, | |
{ | |
"epoch": 0.0912, | |
"grad_norm": 23.430757522583008, | |
"learning_rate": 4.5450000000000005e-06, | |
"loss": 5.1092, | |
"mean_token_accuracy": 0.5648148059844971, | |
"num_tokens": 7665848.0, | |
"step": 456 | |
}, | |
{ | |
"epoch": 0.0914, | |
"grad_norm": 7.155216693878174, | |
"learning_rate": 4.544000000000001e-06, | |
"loss": 4.5387, | |
"mean_token_accuracy": 0.5334528088569641, | |
"num_tokens": 7680308.0, | |
"step": 457 | |
}, | |
{ | |
"epoch": 0.0916, | |
"grad_norm": 9.627572059631348, | |
"learning_rate": 4.543e-06, | |
"loss": 5.2593, | |
"mean_token_accuracy": 0.5478571355342865, | |
"num_tokens": 7694782.0, | |
"step": 458 | |
}, | |
{ | |
"epoch": 0.0918, | |
"grad_norm": 7.240375995635986, | |
"learning_rate": 4.542e-06, | |
"loss": 4.444, | |
"mean_token_accuracy": 0.6316666603088379, | |
"num_tokens": 7709405.0, | |
"step": 459 | |
}, | |
{ | |
"epoch": 0.092, | |
"grad_norm": 14.054851531982422, | |
"learning_rate": 4.541e-06, | |
"loss": 4.7661, | |
"mean_token_accuracy": 0.6431034505367279, | |
"num_tokens": 7728742.0, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.0922, | |
"grad_norm": 6.709273815155029, | |
"learning_rate": 4.540000000000001e-06, | |
"loss": 5.1866, | |
"mean_token_accuracy": 0.5717073231935501, | |
"num_tokens": 7743526.0, | |
"step": 461 | |
}, | |
{ | |
"epoch": 0.0924, | |
"grad_norm": 15.618324279785156, | |
"learning_rate": 4.539000000000001e-06, | |
"loss": 5.0082, | |
"mean_token_accuracy": 0.5, | |
"num_tokens": 7763011.0, | |
"step": 462 | |
}, | |
{ | |
"epoch": 0.0926, | |
"grad_norm": 9.383596420288086, | |
"learning_rate": 4.5380000000000004e-06, | |
"loss": 4.7785, | |
"mean_token_accuracy": 0.59375, | |
"num_tokens": 7777515.0, | |
"step": 463 | |
}, | |
{ | |
"epoch": 0.0928, | |
"grad_norm": 8.333616256713867, | |
"learning_rate": 4.537e-06, | |
"loss": 4.592, | |
"mean_token_accuracy": 0.522556409239769, | |
"num_tokens": 7792009.0, | |
"step": 464 | |
}, | |
{ | |
"epoch": 0.093, | |
"grad_norm": 14.328873634338379, | |
"learning_rate": 4.536e-06, | |
"loss": 4.8537, | |
"mean_token_accuracy": 0.62321937084198, | |
"num_tokens": 7811576.0, | |
"step": 465 | |
}, | |
{ | |
"epoch": 0.0932, | |
"grad_norm": 8.614009857177734, | |
"learning_rate": 4.535000000000001e-06, | |
"loss": 5.0954, | |
"mean_token_accuracy": 0.5415282547473907, | |
"num_tokens": 7830643.0, | |
"step": 466 | |
}, | |
{ | |
"epoch": 0.0934, | |
"grad_norm": 8.26689338684082, | |
"learning_rate": 4.534000000000001e-06, | |
"loss": 4.742, | |
"mean_token_accuracy": 0.6200265288352966, | |
"num_tokens": 7848085.0, | |
"step": 467 | |
}, | |
{ | |
"epoch": 0.0936, | |
"grad_norm": 11.07862377166748, | |
"learning_rate": 4.5330000000000005e-06, | |
"loss": 4.6416, | |
"mean_token_accuracy": 0.5669642984867096, | |
"num_tokens": 7867108.0, | |
"step": 468 | |
}, | |
{ | |
"epoch": 0.0938, | |
"grad_norm": 7.7039337158203125, | |
"learning_rate": 4.532e-06, | |
"loss": 4.7395, | |
"mean_token_accuracy": 0.5769230723381042, | |
"num_tokens": 7876728.0, | |
"step": 469 | |
}, | |
{ | |
"epoch": 0.094, | |
"grad_norm": 15.2058744430542, | |
"learning_rate": 4.531e-06, | |
"loss": 4.2838, | |
"mean_token_accuracy": 0.5869939625263214, | |
"num_tokens": 7893132.0, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.0942, | |
"grad_norm": 12.777841567993164, | |
"learning_rate": 4.530000000000001e-06, | |
"loss": 4.6823, | |
"mean_token_accuracy": 0.5769230723381042, | |
"num_tokens": 7912476.0, | |
"step": 471 | |
}, | |
{ | |
"epoch": 0.0944, | |
"grad_norm": 13.58057975769043, | |
"learning_rate": 4.529000000000001e-06, | |
"loss": 4.4577, | |
"mean_token_accuracy": 0.589756578207016, | |
"num_tokens": 7931920.0, | |
"step": 472 | |
}, | |
{ | |
"epoch": 0.0946, | |
"grad_norm": 7.151589393615723, | |
"learning_rate": 4.5280000000000005e-06, | |
"loss": 4.784, | |
"mean_token_accuracy": 0.5804398059844971, | |
"num_tokens": 7946400.0, | |
"step": 473 | |
}, | |
{ | |
"epoch": 0.0948, | |
"grad_norm": 7.511891841888428, | |
"learning_rate": 4.527e-06, | |
"loss": 5.1037, | |
"mean_token_accuracy": 0.5648148059844971, | |
"num_tokens": 7960863.0, | |
"step": 474 | |
}, | |
{ | |
"epoch": 0.095, | |
"grad_norm": 9.209273338317871, | |
"learning_rate": 4.526e-06, | |
"loss": 4.6556, | |
"mean_token_accuracy": 0.5915948152542114, | |
"num_tokens": 7975547.0, | |
"step": 475 | |
}, | |
{ | |
"epoch": 0.0952, | |
"grad_norm": 10.363279342651367, | |
"learning_rate": 4.525000000000001e-06, | |
"loss": 5.0172, | |
"mean_token_accuracy": 0.5742424130439758, | |
"num_tokens": 7991119.0, | |
"step": 476 | |
}, | |
{ | |
"epoch": 0.0954, | |
"grad_norm": 7.636615753173828, | |
"learning_rate": 4.524e-06, | |
"loss": 4.952, | |
"mean_token_accuracy": 0.6436781585216522, | |
"num_tokens": 8010377.0, | |
"step": 477 | |
}, | |
{ | |
"epoch": 0.0956, | |
"grad_norm": 8.515202522277832, | |
"learning_rate": 4.5230000000000006e-06, | |
"loss": 4.6051, | |
"mean_token_accuracy": 0.6206896305084229, | |
"num_tokens": 8029123.0, | |
"step": 478 | |
}, | |
{ | |
"epoch": 0.0958, | |
"grad_norm": 8.392003059387207, | |
"learning_rate": 4.522e-06, | |
"loss": 4.9078, | |
"mean_token_accuracy": 0.6145320236682892, | |
"num_tokens": 8048382.0, | |
"step": 479 | |
}, | |
{ | |
"epoch": 0.096, | |
"grad_norm": 4.060763359069824, | |
"learning_rate": 4.521e-06, | |
"loss": 4.5817, | |
"mean_token_accuracy": 0.5833333432674408, | |
"num_tokens": 8058289.0, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.0962, | |
"grad_norm": 10.596019744873047, | |
"learning_rate": 4.520000000000001e-06, | |
"loss": 4.2822, | |
"mean_token_accuracy": 0.5428152531385422, | |
"num_tokens": 8077371.0, | |
"step": 481 | |
}, | |
{ | |
"epoch": 0.0964, | |
"grad_norm": 9.166875839233398, | |
"learning_rate": 4.519e-06, | |
"loss": 4.93, | |
"mean_token_accuracy": 0.5862068831920624, | |
"num_tokens": 8096669.0, | |
"step": 482 | |
}, | |
{ | |
"epoch": 0.0966, | |
"grad_norm": 9.244916915893555, | |
"learning_rate": 4.518e-06, | |
"loss": 4.3974, | |
"mean_token_accuracy": 0.5298245847225189, | |
"num_tokens": 8111158.0, | |
"step": 483 | |
}, | |
{ | |
"epoch": 0.0968, | |
"grad_norm": 5.592848300933838, | |
"learning_rate": 4.517e-06, | |
"loss": 4.9458, | |
"mean_token_accuracy": 0.550000011920929, | |
"num_tokens": 8126046.0, | |
"step": 484 | |
}, | |
{ | |
"epoch": 0.097, | |
"grad_norm": 7.911005020141602, | |
"learning_rate": 4.5160000000000005e-06, | |
"loss": 4.9862, | |
"mean_token_accuracy": 0.5861823260784149, | |
"num_tokens": 8156760.0, | |
"step": 485 | |
}, | |
{ | |
"epoch": 0.0972, | |
"grad_norm": 22.27390480041504, | |
"learning_rate": 4.515000000000001e-06, | |
"loss": 4.9185, | |
"mean_token_accuracy": 0.5522802919149399, | |
"num_tokens": 8178188.0, | |
"step": 486 | |
}, | |
{ | |
"epoch": 0.0974, | |
"grad_norm": 6.784433364868164, | |
"learning_rate": 4.514e-06, | |
"loss": 4.1169, | |
"mean_token_accuracy": 0.6459695100784302, | |
"num_tokens": 8192689.0, | |
"step": 487 | |
}, | |
{ | |
"epoch": 0.0976, | |
"grad_norm": 5.436159133911133, | |
"learning_rate": 4.513e-06, | |
"loss": 4.2387, | |
"mean_token_accuracy": 0.6333333253860474, | |
"num_tokens": 8207151.0, | |
"step": 488 | |
}, | |
{ | |
"epoch": 0.0978, | |
"grad_norm": 10.865344047546387, | |
"learning_rate": 4.512e-06, | |
"loss": 4.5805, | |
"mean_token_accuracy": 0.6162962913513184, | |
"num_tokens": 8221643.0, | |
"step": 489 | |
}, | |
{ | |
"epoch": 0.098, | |
"grad_norm": 18.25914764404297, | |
"learning_rate": 4.5110000000000005e-06, | |
"loss": 4.7056, | |
"mean_token_accuracy": 0.5462962985038757, | |
"num_tokens": 8242886.0, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.0982, | |
"grad_norm": 8.091397285461426, | |
"learning_rate": 4.510000000000001e-06, | |
"loss": 4.7771, | |
"mean_token_accuracy": 0.5669642984867096, | |
"num_tokens": 8262615.0, | |
"step": 491 | |
}, | |
{ | |
"epoch": 0.0984, | |
"grad_norm": 5.078823566436768, | |
"learning_rate": 4.509e-06, | |
"loss": 4.7878, | |
"mean_token_accuracy": 0.5173454135656357, | |
"num_tokens": 8275070.0, | |
"step": 492 | |
}, | |
{ | |
"epoch": 0.0986, | |
"grad_norm": 10.31528377532959, | |
"learning_rate": 4.508e-06, | |
"loss": 4.5168, | |
"mean_token_accuracy": 0.6481481492519379, | |
"num_tokens": 8289526.0, | |
"step": 493 | |
}, | |
{ | |
"epoch": 0.0988, | |
"grad_norm": 8.380623817443848, | |
"learning_rate": 4.507e-06, | |
"loss": 4.5738, | |
"mean_token_accuracy": 0.5497835576534271, | |
"num_tokens": 8306115.0, | |
"step": 494 | |
}, | |
{ | |
"epoch": 0.099, | |
"grad_norm": 15.303783416748047, | |
"learning_rate": 4.5060000000000006e-06, | |
"loss": 4.4765, | |
"mean_token_accuracy": 0.5352205336093903, | |
"num_tokens": 8327367.0, | |
"step": 495 | |
}, | |
{ | |
"epoch": 0.0992, | |
"grad_norm": 9.432476043701172, | |
"learning_rate": 4.505e-06, | |
"loss": 4.2436, | |
"mean_token_accuracy": 0.5757575631141663, | |
"num_tokens": 8347475.0, | |
"step": 496 | |
}, | |
{ | |
"epoch": 0.0994, | |
"grad_norm": 7.746166706085205, | |
"learning_rate": 4.504e-06, | |
"loss": 4.6564, | |
"mean_token_accuracy": 0.5372548997402191, | |
"num_tokens": 8367244.0, | |
"step": 497 | |
}, | |
{ | |
"epoch": 0.0996, | |
"grad_norm": 6.070708274841309, | |
"learning_rate": 4.503e-06, | |
"loss": 4.7593, | |
"mean_token_accuracy": 0.5413165390491486, | |
"num_tokens": 8387418.0, | |
"step": 498 | |
}, | |
{ | |
"epoch": 0.0998, | |
"grad_norm": 9.557999610900879, | |
"learning_rate": 4.502e-06, | |
"loss": 4.6868, | |
"mean_token_accuracy": 0.6296296119689941, | |
"num_tokens": 8406160.0, | |
"step": 499 | |
}, | |
{ | |
"epoch": 0.1, | |
"grad_norm": 18.0493106842041, | |
"learning_rate": 4.501000000000001e-06, | |
"loss": 4.4627, | |
"mean_token_accuracy": 0.5960648059844971, | |
"num_tokens": 8425459.0, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.0501, | |
"grad_norm": 15.852149963378906, | |
"learning_rate": 4.5e-06, | |
"loss": 4.0973, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 9669.0, | |
"step": 501 | |
}, | |
{ | |
"epoch": 0.0502, | |
"grad_norm": 22.479692459106445, | |
"learning_rate": 4.7495e-06, | |
"loss": 4.6631, | |
"mean_token_accuracy": 0.5925925970077515, | |
"num_tokens": 19297.0, | |
"step": 502 | |
}, | |
{ | |
"epoch": 0.0503, | |
"grad_norm": 10.312821388244629, | |
"learning_rate": 4.749000000000001e-06, | |
"loss": 4.6086, | |
"mean_token_accuracy": 0.5357142686843872, | |
"num_tokens": 28412.0, | |
"step": 503 | |
}, | |
{ | |
"epoch": 0.0504, | |
"grad_norm": 6.480982780456543, | |
"learning_rate": 4.7485e-06, | |
"loss": 4.9233, | |
"mean_token_accuracy": 0.6071428656578064, | |
"num_tokens": 33241.0, | |
"step": 504 | |
}, | |
{ | |
"epoch": 0.0505, | |
"grad_norm": 12.237631797790527, | |
"learning_rate": 4.748e-06, | |
"loss": 4.829, | |
"mean_token_accuracy": 0.5925925970077515, | |
"num_tokens": 42869.0, | |
"step": 505 | |
}, | |
{ | |
"epoch": 0.0506, | |
"grad_norm": 7.99068546295166, | |
"learning_rate": 4.747500000000001e-06, | |
"loss": 4.1832, | |
"mean_token_accuracy": 0.5862069129943848, | |
"num_tokens": 47596.0, | |
"step": 506 | |
}, | |
{ | |
"epoch": 0.0507, | |
"grad_norm": 7.601539611816406, | |
"learning_rate": 4.7470000000000005e-06, | |
"loss": 4.8943, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 57227.0, | |
"step": 507 | |
}, | |
{ | |
"epoch": 0.0508, | |
"grad_norm": 8.959047317504883, | |
"learning_rate": 4.7465e-06, | |
"loss": 4.3379, | |
"mean_token_accuracy": 0.6071428656578064, | |
"num_tokens": 66856.0, | |
"step": 508 | |
}, | |
{ | |
"epoch": 0.0509, | |
"grad_norm": 11.037611961364746, | |
"learning_rate": 4.746000000000001e-06, | |
"loss": 4.484, | |
"mean_token_accuracy": 0.5454545617103577, | |
"num_tokens": 76490.0, | |
"step": 509 | |
}, | |
{ | |
"epoch": 0.051, | |
"grad_norm": 15.807926177978516, | |
"learning_rate": 4.7455000000000006e-06, | |
"loss": 4.6407, | |
"mean_token_accuracy": 0.5625, | |
"num_tokens": 86123.0, | |
"step": 510 | |
}, | |
{ | |
"epoch": 0.0511, | |
"grad_norm": 18.000925064086914, | |
"learning_rate": 4.745e-06, | |
"loss": 4.5846, | |
"mean_token_accuracy": 0.6296296119689941, | |
"num_tokens": 95391.0, | |
"step": 511 | |
}, | |
{ | |
"epoch": 0.0512, | |
"grad_norm": 7.610625267028809, | |
"learning_rate": 4.7445e-06, | |
"loss": 4.153, | |
"mean_token_accuracy": 0.5483871102333069, | |
"num_tokens": 105023.0, | |
"step": 512 | |
}, | |
{ | |
"epoch": 0.0513, | |
"grad_norm": 12.736249923706055, | |
"learning_rate": 4.744000000000001e-06, | |
"loss": 4.8981, | |
"mean_token_accuracy": 0.5862069129943848, | |
"num_tokens": 114653.0, | |
"step": 513 | |
}, | |
{ | |
"epoch": 0.0514, | |
"grad_norm": 18.033981323242188, | |
"learning_rate": 4.7435e-06, | |
"loss": 4.4743, | |
"mean_token_accuracy": 0.6333333253860474, | |
"num_tokens": 124284.0, | |
"step": 514 | |
}, | |
{ | |
"epoch": 0.0515, | |
"grad_norm": 9.512198448181152, | |
"learning_rate": 4.743e-06, | |
"loss": 5.0304, | |
"mean_token_accuracy": 0.6206896305084229, | |
"num_tokens": 133914.0, | |
"step": 515 | |
}, | |
{ | |
"epoch": 0.0516, | |
"grad_norm": 7.080702781677246, | |
"learning_rate": 4.742500000000001e-06, | |
"loss": 3.8894, | |
"mean_token_accuracy": 0.5882353186607361, | |
"num_tokens": 138551.0, | |
"step": 516 | |
}, | |
{ | |
"epoch": 0.0517, | |
"grad_norm": 10.139554977416992, | |
"learning_rate": 4.7420000000000005e-06, | |
"loss": 4.0098, | |
"mean_token_accuracy": 0.6800000071525574, | |
"num_tokens": 143377.0, | |
"step": 517 | |
}, | |
{ | |
"epoch": 0.0518, | |
"grad_norm": 11.716408729553223, | |
"learning_rate": 4.7415e-06, | |
"loss": 4.531, | |
"mean_token_accuracy": 0.4722222089767456, | |
"num_tokens": 153014.0, | |
"step": 518 | |
}, | |
{ | |
"epoch": 0.0519, | |
"grad_norm": 7.540449142456055, | |
"learning_rate": 4.741000000000001e-06, | |
"loss": 3.6572, | |
"mean_token_accuracy": 0.4047619104385376, | |
"num_tokens": 157876.0, | |
"step": 519 | |
}, | |
{ | |
"epoch": 0.052, | |
"grad_norm": 7.028116226196289, | |
"learning_rate": 4.740500000000001e-06, | |
"loss": 3.9781, | |
"mean_token_accuracy": 0.529411792755127, | |
"num_tokens": 169481.0, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.0521, | |
"grad_norm": 14.62820816040039, | |
"learning_rate": 4.74e-06, | |
"loss": 4.1425, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 179109.0, | |
"step": 521 | |
}, | |
{ | |
"epoch": 0.0522, | |
"grad_norm": 35.161014556884766, | |
"learning_rate": 4.7395e-06, | |
"loss": 4.3981, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 188738.0, | |
"step": 522 | |
}, | |
{ | |
"epoch": 0.0523, | |
"grad_norm": 8.48649787902832, | |
"learning_rate": 4.739e-06, | |
"loss": 4.1154, | |
"mean_token_accuracy": 0.7200000286102295, | |
"num_tokens": 193522.0, | |
"step": 523 | |
}, | |
{ | |
"epoch": 0.0524, | |
"grad_norm": 10.057700157165527, | |
"learning_rate": 4.7385000000000005e-06, | |
"loss": 5.2225, | |
"mean_token_accuracy": 0.6428571343421936, | |
"num_tokens": 203151.0, | |
"step": 524 | |
}, | |
{ | |
"epoch": 0.0525, | |
"grad_norm": 7.644530296325684, | |
"learning_rate": 4.738e-06, | |
"loss": 3.9902, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 207981.0, | |
"step": 525 | |
}, | |
{ | |
"epoch": 0.0526, | |
"grad_norm": 5.602778434753418, | |
"learning_rate": 4.737500000000001e-06, | |
"loss": 4.2959, | |
"mean_token_accuracy": 0.5249999761581421, | |
"num_tokens": 217660.0, | |
"step": 526 | |
}, | |
{ | |
"epoch": 0.0527, | |
"grad_norm": 6.581912517547607, | |
"learning_rate": 4.7370000000000006e-06, | |
"loss": 4.2594, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 222510.0, | |
"step": 527 | |
}, | |
{ | |
"epoch": 0.0528, | |
"grad_norm": 5.864078044891357, | |
"learning_rate": 4.7365e-06, | |
"loss": 4.3366, | |
"mean_token_accuracy": 0.6333333253860474, | |
"num_tokens": 232179.0, | |
"step": 528 | |
}, | |
{ | |
"epoch": 0.0529, | |
"grad_norm": 7.9696455001831055, | |
"learning_rate": 4.736000000000001e-06, | |
"loss": 3.8494, | |
"mean_token_accuracy": 0.6428571343421936, | |
"num_tokens": 242009.0, | |
"step": 529 | |
}, | |
{ | |
"epoch": 0.053, | |
"grad_norm": 7.060004234313965, | |
"learning_rate": 4.735500000000001e-06, | |
"loss": 4.1669, | |
"mean_token_accuracy": 0.7083333134651184, | |
"num_tokens": 246834.0, | |
"step": 530 | |
}, | |
{ | |
"epoch": 0.0531, | |
"grad_norm": 6.421419143676758, | |
"learning_rate": 4.735e-06, | |
"loss": 5.1449, | |
"mean_token_accuracy": 0.44999998807907104, | |
"num_tokens": 256062.0, | |
"step": 531 | |
}, | |
{ | |
"epoch": 0.0532, | |
"grad_norm": 11.895256996154785, | |
"learning_rate": 4.7345e-06, | |
"loss": 4.312, | |
"mean_token_accuracy": 0.6551724076271057, | |
"num_tokens": 265730.0, | |
"step": 532 | |
}, | |
{ | |
"epoch": 0.0533, | |
"grad_norm": 9.461658477783203, | |
"learning_rate": 4.734e-06, | |
"loss": 3.7925, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 274923.0, | |
"step": 533 | |
}, | |
{ | |
"epoch": 0.0534, | |
"grad_norm": 13.706249237060547, | |
"learning_rate": 4.7335000000000005e-06, | |
"loss": 4.0207, | |
"mean_token_accuracy": 0.6333333253860474, | |
"num_tokens": 284714.0, | |
"step": 534 | |
}, | |
{ | |
"epoch": 0.0535, | |
"grad_norm": 6.157789707183838, | |
"learning_rate": 4.733e-06, | |
"loss": 4.9642, | |
"mean_token_accuracy": 0.46666666865348816, | |
"num_tokens": 294276.0, | |
"step": 535 | |
}, | |
{ | |
"epoch": 0.0536, | |
"grad_norm": 7.310285568237305, | |
"learning_rate": 4.7325e-06, | |
"loss": 3.693, | |
"mean_token_accuracy": 0.6499999761581421, | |
"num_tokens": 304034.0, | |
"step": 536 | |
}, | |
{ | |
"epoch": 0.0537, | |
"grad_norm": 28.371063232421875, | |
"learning_rate": 4.732000000000001e-06, | |
"loss": 3.5947, | |
"mean_token_accuracy": 0.6097561120986938, | |
"num_tokens": 315646.0, | |
"step": 537 | |
}, | |
{ | |
"epoch": 0.0538, | |
"grad_norm": 12.421183586120605, | |
"learning_rate": 4.7315e-06, | |
"loss": 5.4485, | |
"mean_token_accuracy": 0.5161290168762207, | |
"num_tokens": 324918.0, | |
"step": 538 | |
}, | |
{ | |
"epoch": 0.0539, | |
"grad_norm": 20.568639755249023, | |
"learning_rate": 4.731000000000001e-06, | |
"loss": 4.7722, | |
"mean_token_accuracy": 0.5555555820465088, | |
"num_tokens": 334546.0, | |
"step": 539 | |
}, | |
{ | |
"epoch": 0.054, | |
"grad_norm": 11.856773376464844, | |
"learning_rate": 4.730500000000001e-06, | |
"loss": 4.2627, | |
"mean_token_accuracy": 0.6800000071525574, | |
"num_tokens": 339372.0, | |
"step": 540 | |
}, | |
{ | |
"epoch": 0.0541, | |
"grad_norm": 9.267231941223145, | |
"learning_rate": 4.7300000000000005e-06, | |
"loss": 3.2929, | |
"mean_token_accuracy": 0.6595744490623474, | |
"num_tokens": 348978.0, | |
"step": 541 | |
}, | |
{ | |
"epoch": 0.0542, | |
"grad_norm": 9.0714693069458, | |
"learning_rate": 4.7295e-06, | |
"loss": 4.3123, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 358607.0, | |
"step": 542 | |
}, | |
{ | |
"epoch": 0.0543, | |
"grad_norm": 9.443031311035156, | |
"learning_rate": 4.729e-06, | |
"loss": 4.2038, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 361037.0, | |
"step": 543 | |
}, | |
{ | |
"epoch": 0.0544, | |
"grad_norm": 37.84735870361328, | |
"learning_rate": 4.7285000000000006e-06, | |
"loss": 4.2948, | |
"mean_token_accuracy": 0.6428571343421936, | |
"num_tokens": 370666.0, | |
"step": 544 | |
}, | |
{ | |
"epoch": 0.0545, | |
"grad_norm": 7.593122959136963, | |
"learning_rate": 4.728e-06, | |
"loss": 3.5468, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 375514.0, | |
"step": 545 | |
}, | |
{ | |
"epoch": 0.0546, | |
"grad_norm": 8.244745254516602, | |
"learning_rate": 4.7275e-06, | |
"loss": 4.3039, | |
"mean_token_accuracy": 0.59375, | |
"num_tokens": 385185.0, | |
"step": 546 | |
}, | |
{ | |
"epoch": 0.0547, | |
"grad_norm": 7.334832191467285, | |
"learning_rate": 4.727000000000001e-06, | |
"loss": 4.5547, | |
"mean_token_accuracy": 0.5862069129943848, | |
"num_tokens": 390015.0, | |
"step": 547 | |
}, | |
{ | |
"epoch": 0.0548, | |
"grad_norm": 7.90894079208374, | |
"learning_rate": 4.7265e-06, | |
"loss": 4.4657, | |
"mean_token_accuracy": 0.6451612710952759, | |
"num_tokens": 399647.0, | |
"step": 548 | |
}, | |
{ | |
"epoch": 0.0549, | |
"grad_norm": 11.954856872558594, | |
"learning_rate": 4.726000000000001e-06, | |
"loss": 3.9539, | |
"mean_token_accuracy": 0.6363636255264282, | |
"num_tokens": 409239.0, | |
"step": 549 | |
}, | |
{ | |
"epoch": 0.055, | |
"grad_norm": 9.81020450592041, | |
"learning_rate": 4.725500000000001e-06, | |
"loss": 4.476, | |
"mean_token_accuracy": 0.6551724076271057, | |
"num_tokens": 418907.0, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.0551, | |
"grad_norm": 19.24794578552246, | |
"learning_rate": 4.7250000000000005e-06, | |
"loss": 4.5918, | |
"mean_token_accuracy": 0.6296296119689941, | |
"num_tokens": 428695.0, | |
"step": 551 | |
}, | |
{ | |
"epoch": 0.0552, | |
"grad_norm": 6.961690425872803, | |
"learning_rate": 4.7245e-06, | |
"loss": 4.5937, | |
"mean_token_accuracy": 0.5806451439857483, | |
"num_tokens": 438327.0, | |
"step": 552 | |
}, | |
{ | |
"epoch": 0.0553, | |
"grad_norm": 8.139691352844238, | |
"learning_rate": 4.724e-06, | |
"loss": 4.1874, | |
"mean_token_accuracy": 0.5517241358757019, | |
"num_tokens": 447717.0, | |
"step": 553 | |
}, | |
{ | |
"epoch": 0.0554, | |
"grad_norm": 16.520992279052734, | |
"learning_rate": 4.723500000000001e-06, | |
"loss": 4.1349, | |
"mean_token_accuracy": 0.6774193644523621, | |
"num_tokens": 457349.0, | |
"step": 554 | |
}, | |
{ | |
"epoch": 0.0555, | |
"grad_norm": 13.992169380187988, | |
"learning_rate": 4.723e-06, | |
"loss": 4.525, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 466977.0, | |
"step": 555 | |
}, | |
{ | |
"epoch": 0.0556, | |
"grad_norm": 13.471990585327148, | |
"learning_rate": 4.7225e-06, | |
"loss": 4.5426, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 476643.0, | |
"step": 556 | |
}, | |
{ | |
"epoch": 0.0557, | |
"grad_norm": 9.061677932739258, | |
"learning_rate": 4.722000000000001e-06, | |
"loss": 4.4753, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 486272.0, | |
"step": 557 | |
}, | |
{ | |
"epoch": 0.0558, | |
"grad_norm": 17.377166748046875, | |
"learning_rate": 4.7215000000000004e-06, | |
"loss": 4.8877, | |
"mean_token_accuracy": 0.5769230723381042, | |
"num_tokens": 495899.0, | |
"step": 558 | |
}, | |
{ | |
"epoch": 0.0559, | |
"grad_norm": 9.733543395996094, | |
"learning_rate": 4.721e-06, | |
"loss": 4.4215, | |
"mean_token_accuracy": 0.6428571343421936, | |
"num_tokens": 505528.0, | |
"step": 559 | |
}, | |
{ | |
"epoch": 0.056, | |
"grad_norm": 8.158677101135254, | |
"learning_rate": 4.720500000000001e-06, | |
"loss": 4.1928, | |
"mean_token_accuracy": 0.6206896305084229, | |
"num_tokens": 515275.0, | |
"step": 560 | |
}, | |
{ | |
"epoch": 0.0561, | |
"grad_norm": 7.257163047790527, | |
"learning_rate": 4.7200000000000005e-06, | |
"loss": 3.6564, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 520105.0, | |
"step": 561 | |
}, | |
{ | |
"epoch": 0.0562, | |
"grad_norm": 7.9021453857421875, | |
"learning_rate": 4.7195e-06, | |
"loss": 3.9769, | |
"mean_token_accuracy": 0.75, | |
"num_tokens": 524930.0, | |
"step": 562 | |
}, | |
{ | |
"epoch": 0.0563, | |
"grad_norm": 6.572981357574463, | |
"learning_rate": 4.719e-06, | |
"loss": 4.0876, | |
"mean_token_accuracy": 0.6363636255264282, | |
"num_tokens": 529722.0, | |
"step": 563 | |
}, | |
{ | |
"epoch": 0.0564, | |
"grad_norm": 12.95456314086914, | |
"learning_rate": 4.718500000000001e-06, | |
"loss": 3.8527, | |
"mean_token_accuracy": 0.7200000286102295, | |
"num_tokens": 534567.0, | |
"step": 564 | |
}, | |
{ | |
"epoch": 0.0565, | |
"grad_norm": 11.936407089233398, | |
"learning_rate": 4.718e-06, | |
"loss": 4.021, | |
"mean_token_accuracy": 0.6333333253860474, | |
"num_tokens": 544358.0, | |
"step": 565 | |
}, | |
{ | |
"epoch": 0.0566, | |
"grad_norm": 8.207730293273926, | |
"learning_rate": 4.7175e-06, | |
"loss": 3.4478, | |
"mean_token_accuracy": 0.6829268336296082, | |
"num_tokens": 555970.0, | |
"step": 566 | |
}, | |
{ | |
"epoch": 0.0567, | |
"grad_norm": 12.208813667297363, | |
"learning_rate": 4.717000000000001e-06, | |
"loss": 3.7558, | |
"mean_token_accuracy": 0.6428571343421936, | |
"num_tokens": 560799.0, | |
"step": 567 | |
}, | |
{ | |
"epoch": 0.0568, | |
"grad_norm": 12.881345748901367, | |
"learning_rate": 4.7165000000000005e-06, | |
"loss": 4.5709, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 570545.0, | |
"step": 568 | |
}, | |
{ | |
"epoch": 0.0569, | |
"grad_norm": 8.97235107421875, | |
"learning_rate": 4.716e-06, | |
"loss": 4.2532, | |
"mean_token_accuracy": 0.6551724076271057, | |
"num_tokens": 580175.0, | |
"step": 569 | |
}, | |
{ | |
"epoch": 0.057, | |
"grad_norm": 9.125508308410645, | |
"learning_rate": 4.715500000000001e-06, | |
"loss": 4.2455, | |
"mean_token_accuracy": 0.6428571343421936, | |
"num_tokens": 589444.0, | |
"step": 570 | |
}, | |
{ | |
"epoch": 0.0571, | |
"grad_norm": 14.746228218078613, | |
"learning_rate": 4.715e-06, | |
"loss": 3.942, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 594293.0, | |
"step": 571 | |
}, | |
{ | |
"epoch": 0.0572, | |
"grad_norm": 11.047410011291504, | |
"learning_rate": 4.7145e-06, | |
"loss": 4.7136, | |
"mean_token_accuracy": 0.5625, | |
"num_tokens": 603926.0, | |
"step": 572 | |
}, | |
{ | |
"epoch": 0.0573, | |
"grad_norm": 8.581419944763184, | |
"learning_rate": 4.714e-06, | |
"loss": 4.5424, | |
"mean_token_accuracy": 0.529411792755127, | |
"num_tokens": 613009.0, | |
"step": 573 | |
}, | |
{ | |
"epoch": 0.0574, | |
"grad_norm": 14.278406143188477, | |
"learning_rate": 4.713500000000001e-06, | |
"loss": 3.9934, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 623067.0, | |
"step": 574 | |
}, | |
{ | |
"epoch": 0.0575, | |
"grad_norm": 8.127178192138672, | |
"learning_rate": 4.7130000000000004e-06, | |
"loss": 3.8178, | |
"mean_token_accuracy": 0.71875, | |
"num_tokens": 632700.0, | |
"step": 575 | |
}, | |
{ | |
"epoch": 0.0576, | |
"grad_norm": 5.9728288650512695, | |
"learning_rate": 4.7125e-06, | |
"loss": 4.2103, | |
"mean_token_accuracy": 0.5757575631141663, | |
"num_tokens": 637534.0, | |
"step": 576 | |
}, | |
{ | |
"epoch": 0.0577, | |
"grad_norm": 7.371451377868652, | |
"learning_rate": 4.712000000000001e-06, | |
"loss": 4.3848, | |
"mean_token_accuracy": 0.6071428656578064, | |
"num_tokens": 642363.0, | |
"step": 577 | |
}, | |
{ | |
"epoch": 0.0578, | |
"grad_norm": 12.849014282226562, | |
"learning_rate": 4.7115000000000005e-06, | |
"loss": 4.5052, | |
"mean_token_accuracy": 0.6428571343421936, | |
"num_tokens": 651992.0, | |
"step": 578 | |
}, | |
{ | |
"epoch": 0.0579, | |
"grad_norm": 12.320442199707031, | |
"learning_rate": 4.711e-06, | |
"loss": 4.8968, | |
"mean_token_accuracy": 0.5862069129943848, | |
"num_tokens": 654902.0, | |
"step": 579 | |
}, | |
{ | |
"epoch": 0.058, | |
"grad_norm": 8.957386016845703, | |
"learning_rate": 4.710500000000001e-06, | |
"loss": 4.1552, | |
"mean_token_accuracy": 0.5666666626930237, | |
"num_tokens": 664962.0, | |
"step": 580 | |
}, | |
{ | |
"epoch": 0.0581, | |
"grad_norm": 11.971600532531738, | |
"learning_rate": 4.71e-06, | |
"loss": 3.9502, | |
"mean_token_accuracy": 0.6428571343421936, | |
"num_tokens": 674591.0, | |
"step": 581 | |
}, | |
{ | |
"epoch": 0.0582, | |
"grad_norm": 10.319281578063965, | |
"learning_rate": 4.7095e-06, | |
"loss": 4.9406, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 679816.0, | |
"step": 582 | |
}, | |
{ | |
"epoch": 0.0583, | |
"grad_norm": 9.232120513916016, | |
"learning_rate": 4.709e-06, | |
"loss": 4.3887, | |
"mean_token_accuracy": 0.5625, | |
"num_tokens": 689227.0, | |
"step": 583 | |
}, | |
{ | |
"epoch": 0.0584, | |
"grad_norm": 8.363622665405273, | |
"learning_rate": 4.7085e-06, | |
"loss": 5.2455, | |
"mean_token_accuracy": 0.5555555820465088, | |
"num_tokens": 698893.0, | |
"step": 584 | |
}, | |
{ | |
"epoch": 0.0585, | |
"grad_norm": 7.695018768310547, | |
"learning_rate": 4.7080000000000005e-06, | |
"loss": 4.2145, | |
"mean_token_accuracy": 0.5925925970077515, | |
"num_tokens": 703679.0, | |
"step": 585 | |
}, | |
{ | |
"epoch": 0.0586, | |
"grad_norm": 16.431795120239258, | |
"learning_rate": 4.7075e-06, | |
"loss": 4.4514, | |
"mean_token_accuracy": 0.6206896305084229, | |
"num_tokens": 713309.0, | |
"step": 586 | |
}, | |
{ | |
"epoch": 0.0587, | |
"grad_norm": 10.655045509338379, | |
"learning_rate": 4.707000000000001e-06, | |
"loss": 4.6634, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 722937.0, | |
"step": 587 | |
}, | |
{ | |
"epoch": 0.0588, | |
"grad_norm": 8.790349960327148, | |
"learning_rate": 4.706500000000001e-06, | |
"loss": 3.7419, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 732606.0, | |
"step": 588 | |
}, | |
{ | |
"epoch": 0.0589, | |
"grad_norm": 8.358304977416992, | |
"learning_rate": 4.706e-06, | |
"loss": 4.056, | |
"mean_token_accuracy": 0.692307710647583, | |
"num_tokens": 737433.0, | |
"step": 589 | |
}, | |
{ | |
"epoch": 0.059, | |
"grad_norm": 10.566368103027344, | |
"learning_rate": 4.705500000000001e-06, | |
"loss": 4.58, | |
"mean_token_accuracy": 0.6774193644523621, | |
"num_tokens": 747385.0, | |
"step": 590 | |
}, | |
{ | |
"epoch": 0.0591, | |
"grad_norm": 10.304373741149902, | |
"learning_rate": 4.705e-06, | |
"loss": 4.1061, | |
"mean_token_accuracy": 0.6800000071525574, | |
"num_tokens": 752211.0, | |
"step": 591 | |
}, | |
{ | |
"epoch": 0.0592, | |
"grad_norm": 8.703572273254395, | |
"learning_rate": 4.7045000000000004e-06, | |
"loss": 5.039, | |
"mean_token_accuracy": 0.5405405163764954, | |
"num_tokens": 761887.0, | |
"step": 592 | |
}, | |
{ | |
"epoch": 0.0593, | |
"grad_norm": 6.115788459777832, | |
"learning_rate": 4.704e-06, | |
"loss": 4.013, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 771520.0, | |
"step": 593 | |
}, | |
{ | |
"epoch": 0.0594, | |
"grad_norm": 11.19068717956543, | |
"learning_rate": 4.7035e-06, | |
"loss": 3.4903, | |
"mean_token_accuracy": 0.7599999904632568, | |
"num_tokens": 776346.0, | |
"step": 594 | |
}, | |
{ | |
"epoch": 0.0595, | |
"grad_norm": 9.775267601013184, | |
"learning_rate": 4.7030000000000005e-06, | |
"loss": 4.1157, | |
"mean_token_accuracy": 0.6296296119689941, | |
"num_tokens": 786012.0, | |
"step": 595 | |
}, | |
{ | |
"epoch": 0.0596, | |
"grad_norm": 247.3515625, | |
"learning_rate": 4.7025e-06, | |
"loss": 3.8184, | |
"mean_token_accuracy": 0.5897436141967773, | |
"num_tokens": 789412.0, | |
"step": 596 | |
}, | |
{ | |
"epoch": 0.0597, | |
"grad_norm": 6.224545001983643, | |
"learning_rate": 4.702e-06, | |
"loss": 3.5922, | |
"mean_token_accuracy": 0.6315789222717285, | |
"num_tokens": 799455.0, | |
"step": 597 | |
}, | |
{ | |
"epoch": 0.0598, | |
"grad_norm": 11.661077499389648, | |
"learning_rate": 4.701500000000001e-06, | |
"loss": 4.7352, | |
"mean_token_accuracy": 0.5806451439857483, | |
"num_tokens": 809125.0, | |
"step": 598 | |
}, | |
{ | |
"epoch": 0.0599, | |
"grad_norm": 19.784866333007812, | |
"learning_rate": 4.701e-06, | |
"loss": 2.8577, | |
"mean_token_accuracy": 0.7777777910232544, | |
"num_tokens": 813850.0, | |
"step": 599 | |
}, | |
{ | |
"epoch": 0.06, | |
"grad_norm": 7.449664115905762, | |
"learning_rate": 4.700500000000001e-06, | |
"loss": 4.0591, | |
"mean_token_accuracy": 0.574999988079071, | |
"num_tokens": 823803.0, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.0601, | |
"grad_norm": 9.076578140258789, | |
"learning_rate": 4.7e-06, | |
"loss": 4.5261, | |
"mean_token_accuracy": 0.5757575631141663, | |
"num_tokens": 833475.0, | |
"step": 601 | |
}, | |
{ | |
"epoch": 0.0602, | |
"grad_norm": 10.547795295715332, | |
"learning_rate": 4.6995000000000005e-06, | |
"loss": 4.0279, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 843149.0, | |
"step": 602 | |
}, | |
{ | |
"epoch": 0.0603, | |
"grad_norm": 9.438965797424316, | |
"learning_rate": 4.699e-06, | |
"loss": 4.2318, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 847977.0, | |
"step": 603 | |
}, | |
{ | |
"epoch": 0.0604, | |
"grad_norm": 6.786457061767578, | |
"learning_rate": 4.6985e-06, | |
"loss": 4.0092, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 857605.0, | |
"step": 604 | |
}, | |
{ | |
"epoch": 0.0605, | |
"grad_norm": 9.550374031066895, | |
"learning_rate": 4.698000000000001e-06, | |
"loss": 3.8035, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 867233.0, | |
"step": 605 | |
}, | |
{ | |
"epoch": 0.0606, | |
"grad_norm": 30.296171188354492, | |
"learning_rate": 4.6975e-06, | |
"loss": 3.265, | |
"mean_token_accuracy": 0.7777777910232544, | |
"num_tokens": 872080.0, | |
"step": 606 | |
}, | |
{ | |
"epoch": 0.0607, | |
"grad_norm": 17.76416778564453, | |
"learning_rate": 4.697e-06, | |
"loss": 4.3781, | |
"mean_token_accuracy": 0.6153846383094788, | |
"num_tokens": 881745.0, | |
"step": 607 | |
}, | |
{ | |
"epoch": 0.0608, | |
"grad_norm": 8.984435081481934, | |
"learning_rate": 4.696500000000001e-06, | |
"loss": 4.1944, | |
"mean_token_accuracy": 0.7083333134651184, | |
"num_tokens": 884881.0, | |
"step": 608 | |
}, | |
{ | |
"epoch": 0.0609, | |
"grad_norm": 8.475662231445312, | |
"learning_rate": 4.6960000000000004e-06, | |
"loss": 3.7455, | |
"mean_token_accuracy": 0.5862069129943848, | |
"num_tokens": 894549.0, | |
"step": 609 | |
}, | |
{ | |
"epoch": 0.061, | |
"grad_norm": 7.257772445678711, | |
"learning_rate": 4.6955e-06, | |
"loss": 3.242, | |
"mean_token_accuracy": 0.6341463327407837, | |
"num_tokens": 904370.0, | |
"step": 610 | |
}, | |
{ | |
"epoch": 0.0611, | |
"grad_norm": 6.456629276275635, | |
"learning_rate": 4.695e-06, | |
"loss": 3.2421, | |
"mean_token_accuracy": 0.625, | |
"num_tokens": 914049.0, | |
"step": 611 | |
}, | |
{ | |
"epoch": 0.0612, | |
"grad_norm": 10.578594207763672, | |
"learning_rate": 4.6945000000000005e-06, | |
"loss": 4.1603, | |
"mean_token_accuracy": 0.6538461446762085, | |
"num_tokens": 918876.0, | |
"step": 612 | |
}, | |
{ | |
"epoch": 0.0613, | |
"grad_norm": 6.460089206695557, | |
"learning_rate": 4.694e-06, | |
"loss": 3.648, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 928486.0, | |
"step": 613 | |
}, | |
{ | |
"epoch": 0.0614, | |
"grad_norm": 6.160704135894775, | |
"learning_rate": 4.6935e-06, | |
"loss": 4.3011, | |
"mean_token_accuracy": 0.6285714507102966, | |
"num_tokens": 938080.0, | |
"step": 614 | |
}, | |
{ | |
"epoch": 0.0615, | |
"grad_norm": 17.467906951904297, | |
"learning_rate": 4.693000000000001e-06, | |
"loss": 3.7943, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 947708.0, | |
"step": 615 | |
}, | |
{ | |
"epoch": 0.0616, | |
"grad_norm": 17.897363662719727, | |
"learning_rate": 4.6925e-06, | |
"loss": 3.6814, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 952416.0, | |
"step": 616 | |
}, | |
{ | |
"epoch": 0.0617, | |
"grad_norm": 10.994804382324219, | |
"learning_rate": 4.692e-06, | |
"loss": 3.7611, | |
"mean_token_accuracy": 0.5625, | |
"num_tokens": 957268.0, | |
"step": 617 | |
}, | |
{ | |
"epoch": 0.0618, | |
"grad_norm": 8.957268714904785, | |
"learning_rate": 4.691500000000001e-06, | |
"loss": 3.8323, | |
"mean_token_accuracy": 0.7200000286102295, | |
"num_tokens": 962113.0, | |
"step": 618 | |
}, | |
{ | |
"epoch": 0.0619, | |
"grad_norm": 8.04139232635498, | |
"learning_rate": 4.6910000000000005e-06, | |
"loss": 3.5929, | |
"mean_token_accuracy": 0.5714285969734192, | |
"num_tokens": 984591.0, | |
"step": 619 | |
}, | |
{ | |
"epoch": 0.062, | |
"grad_norm": 7.814234256744385, | |
"learning_rate": 4.6905e-06, | |
"loss": 4.5327, | |
"mean_token_accuracy": 0.6551724076271057, | |
"num_tokens": 994221.0, | |
"step": 620 | |
}, | |
{ | |
"epoch": 0.0621, | |
"grad_norm": 10.267566680908203, | |
"learning_rate": 4.69e-06, | |
"loss": 4.2949, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 1004010.0, | |
"step": 621 | |
}, | |
{ | |
"epoch": 0.0622, | |
"grad_norm": 10.48262882232666, | |
"learning_rate": 4.689500000000001e-06, | |
"loss": 4.1629, | |
"mean_token_accuracy": 0.46666666865348816, | |
"num_tokens": 1016012.0, | |
"step": 622 | |
}, | |
{ | |
"epoch": 0.0623, | |
"grad_norm": 7.499198913574219, | |
"learning_rate": 4.689e-06, | |
"loss": 4.3381, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 1025645.0, | |
"step": 623 | |
}, | |
{ | |
"epoch": 0.0624, | |
"grad_norm": 6.875175952911377, | |
"learning_rate": 4.6885e-06, | |
"loss": 3.7812, | |
"mean_token_accuracy": 0.6875, | |
"num_tokens": 1035316.0, | |
"step": 624 | |
}, | |
{ | |
"epoch": 0.0625, | |
"grad_norm": 8.907971382141113, | |
"learning_rate": 4.688000000000001e-06, | |
"loss": 3.5864, | |
"mean_token_accuracy": 0.4651162922382355, | |
"num_tokens": 1040160.0, | |
"step": 625 | |
}, | |
{ | |
"epoch": 0.0626, | |
"grad_norm": 7.259609222412109, | |
"learning_rate": 4.6875000000000004e-06, | |
"loss": 4.3112, | |
"mean_token_accuracy": 0.6206896305084229, | |
"num_tokens": 1049790.0, | |
"step": 626 | |
}, | |
{ | |
"epoch": 0.0627, | |
"grad_norm": 5.438392162322998, | |
"learning_rate": 4.687e-06, | |
"loss": 4.4629, | |
"mean_token_accuracy": 0.4883720874786377, | |
"num_tokens": 1059594.0, | |
"step": 627 | |
}, | |
{ | |
"epoch": 0.0628, | |
"grad_norm": 11.198019981384277, | |
"learning_rate": 4.686500000000001e-06, | |
"loss": 3.7099, | |
"mean_token_accuracy": 0.7307692170143127, | |
"num_tokens": 1064440.0, | |
"step": 628 | |
}, | |
{ | |
"epoch": 0.0629, | |
"grad_norm": 7.910561561584473, | |
"learning_rate": 4.6860000000000005e-06, | |
"loss": 4.4982, | |
"mean_token_accuracy": 0.6551724076271057, | |
"num_tokens": 1077757.0, | |
"step": 629 | |
}, | |
{ | |
"epoch": 0.063, | |
"grad_norm": 26.06536102294922, | |
"learning_rate": 4.6855e-06, | |
"loss": 3.6904, | |
"mean_token_accuracy": 0.5625, | |
"num_tokens": 1087390.0, | |
"step": 630 | |
}, | |
{ | |
"epoch": 0.0631, | |
"grad_norm": 12.044254302978516, | |
"learning_rate": 4.685000000000001e-06, | |
"loss": 3.9394, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 1092218.0, | |
"step": 631 | |
}, | |
{ | |
"epoch": 0.0632, | |
"grad_norm": 7.600245475769043, | |
"learning_rate": 4.6845e-06, | |
"loss": 3.4493, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 1097051.0, | |
"step": 632 | |
}, | |
{ | |
"epoch": 0.0633, | |
"grad_norm": 5.406955718994141, | |
"learning_rate": 4.684e-06, | |
"loss": 4.9152, | |
"mean_token_accuracy": 0.5714285969734192, | |
"num_tokens": 1106843.0, | |
"step": 633 | |
}, | |
{ | |
"epoch": 0.0634, | |
"grad_norm": 9.774199485778809, | |
"learning_rate": 4.6835e-06, | |
"loss": 4.8083, | |
"mean_token_accuracy": 0.517241358757019, | |
"num_tokens": 1116731.0, | |
"step": 634 | |
}, | |
{ | |
"epoch": 0.0635, | |
"grad_norm": 7.297339916229248, | |
"learning_rate": 4.683000000000001e-06, | |
"loss": 4.1338, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 1126362.0, | |
"step": 635 | |
}, | |
{ | |
"epoch": 0.0636, | |
"grad_norm": 10.346419334411621, | |
"learning_rate": 4.6825000000000005e-06, | |
"loss": 4.1885, | |
"mean_token_accuracy": 0.6071428656578064, | |
"num_tokens": 1131191.0, | |
"step": 636 | |
}, | |
{ | |
"epoch": 0.0637, | |
"grad_norm": 10.042614936828613, | |
"learning_rate": 4.682e-06, | |
"loss": 4.0459, | |
"mean_token_accuracy": 0.5862069129943848, | |
"num_tokens": 1136021.0, | |
"step": 637 | |
}, | |
{ | |
"epoch": 0.0638, | |
"grad_norm": 14.366419792175293, | |
"learning_rate": 4.681500000000001e-06, | |
"loss": 3.749, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 1140849.0, | |
"step": 638 | |
}, | |
{ | |
"epoch": 0.0639, | |
"grad_norm": 8.829082489013672, | |
"learning_rate": 4.681000000000001e-06, | |
"loss": 4.4337, | |
"mean_token_accuracy": 0.5806451439857483, | |
"num_tokens": 1148549.0, | |
"step": 639 | |
}, | |
{ | |
"epoch": 0.064, | |
"grad_norm": 6.849588394165039, | |
"learning_rate": 4.6805e-06, | |
"loss": 4.5329, | |
"mean_token_accuracy": 0.5142857432365417, | |
"num_tokens": 1160363.0, | |
"step": 640 | |
}, | |
{ | |
"epoch": 0.0641, | |
"grad_norm": 10.158519744873047, | |
"learning_rate": 4.680000000000001e-06, | |
"loss": 3.6307, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 1165191.0, | |
"step": 641 | |
}, | |
{ | |
"epoch": 0.0642, | |
"grad_norm": 8.333585739135742, | |
"learning_rate": 4.6795e-06, | |
"loss": 3.9175, | |
"mean_token_accuracy": 0.6071428656578064, | |
"num_tokens": 1167614.0, | |
"step": 642 | |
}, | |
{ | |
"epoch": 0.0643, | |
"grad_norm": 6.709843158721924, | |
"learning_rate": 4.6790000000000004e-06, | |
"loss": 4.2019, | |
"mean_token_accuracy": 0.6363636255264282, | |
"num_tokens": 1177248.0, | |
"step": 643 | |
}, | |
{ | |
"epoch": 0.0644, | |
"grad_norm": 6.842790603637695, | |
"learning_rate": 4.6785e-06, | |
"loss": 3.9093, | |
"mean_token_accuracy": 0.5897436141967773, | |
"num_tokens": 1186888.0, | |
"step": 644 | |
}, | |
{ | |
"epoch": 0.0645, | |
"grad_norm": 12.061588287353516, | |
"learning_rate": 4.678e-06, | |
"loss": 3.4154, | |
"mean_token_accuracy": 0.6428571343421936, | |
"num_tokens": 1191873.0, | |
"step": 645 | |
}, | |
{ | |
"epoch": 0.0646, | |
"grad_norm": 7.484742164611816, | |
"learning_rate": 4.6775000000000005e-06, | |
"loss": 4.756, | |
"mean_token_accuracy": 0.4848484992980957, | |
"num_tokens": 1201545.0, | |
"step": 646 | |
}, | |
{ | |
"epoch": 0.0647, | |
"grad_norm": 18.671098709106445, | |
"learning_rate": 4.677e-06, | |
"loss": 3.9254, | |
"mean_token_accuracy": 0.6296296119689941, | |
"num_tokens": 1211211.0, | |
"step": 647 | |
}, | |
{ | |
"epoch": 0.0648, | |
"grad_norm": 27.62375831604004, | |
"learning_rate": 4.676500000000001e-06, | |
"loss": 3.4817, | |
"mean_token_accuracy": 0.6800000071525574, | |
"num_tokens": 1216037.0, | |
"step": 648 | |
}, | |
{ | |
"epoch": 0.0649, | |
"grad_norm": 10.541316986083984, | |
"learning_rate": 4.676000000000001e-06, | |
"loss": 4.1839, | |
"mean_token_accuracy": 0.6206896305084229, | |
"num_tokens": 1225705.0, | |
"step": 649 | |
}, | |
{ | |
"epoch": 0.065, | |
"grad_norm": 11.817317008972168, | |
"learning_rate": 4.6755e-06, | |
"loss": 4.457, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 1235491.0, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.0651, | |
"grad_norm": 9.53458309173584, | |
"learning_rate": 4.675000000000001e-06, | |
"loss": 3.7604, | |
"mean_token_accuracy": 0.625, | |
"num_tokens": 1240324.0, | |
"step": 651 | |
}, | |
{ | |
"epoch": 0.0652, | |
"grad_norm": 9.212517738342285, | |
"learning_rate": 4.6745e-06, | |
"loss": 3.5422, | |
"mean_token_accuracy": 0.6800000071525574, | |
"num_tokens": 1245150.0, | |
"step": 652 | |
}, | |
{ | |
"epoch": 0.0653, | |
"grad_norm": 32.04168701171875, | |
"learning_rate": 4.6740000000000005e-06, | |
"loss": 3.7265, | |
"mean_token_accuracy": 0.75, | |
"num_tokens": 1249975.0, | |
"step": 653 | |
}, | |
{ | |
"epoch": 0.0654, | |
"grad_norm": 9.884452819824219, | |
"learning_rate": 4.6735e-06, | |
"loss": 3.4677, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 1254806.0, | |
"step": 654 | |
}, | |
{ | |
"epoch": 0.0655, | |
"grad_norm": 7.879704475402832, | |
"learning_rate": 4.673e-06, | |
"loss": 3.4625, | |
"mean_token_accuracy": 0.6774193644523621, | |
"num_tokens": 1264078.0, | |
"step": 655 | |
}, | |
{ | |
"epoch": 0.0656, | |
"grad_norm": 9.14754867553711, | |
"learning_rate": 4.672500000000001e-06, | |
"loss": 3.7193, | |
"mean_token_accuracy": 0.692307710647583, | |
"num_tokens": 1268905.0, | |
"step": 656 | |
}, | |
{ | |
"epoch": 0.0657, | |
"grad_norm": 12.19528865814209, | |
"learning_rate": 4.672e-06, | |
"loss": 3.3706, | |
"mean_token_accuracy": 0.7586206793785095, | |
"num_tokens": 1278695.0, | |
"step": 657 | |
}, | |
{ | |
"epoch": 0.0658, | |
"grad_norm": 6.889378547668457, | |
"learning_rate": 4.6715e-06, | |
"loss": 5.2523, | |
"mean_token_accuracy": 0.47058823704719543, | |
"num_tokens": 1288386.0, | |
"step": 658 | |
}, | |
{ | |
"epoch": 0.0659, | |
"grad_norm": 9.408376693725586, | |
"learning_rate": 4.671000000000001e-06, | |
"loss": 3.7463, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 1298328.0, | |
"step": 659 | |
}, | |
{ | |
"epoch": 0.066, | |
"grad_norm": 12.323060035705566, | |
"learning_rate": 4.6705000000000004e-06, | |
"loss": 3.5947, | |
"mean_token_accuracy": 0.699999988079071, | |
"num_tokens": 1303159.0, | |
"step": 660 | |
}, | |
{ | |
"epoch": 0.0661, | |
"grad_norm": 6.927505970001221, | |
"learning_rate": 4.670000000000001e-06, | |
"loss": 3.3807, | |
"mean_token_accuracy": 0.625, | |
"num_tokens": 1314778.0, | |
"step": 661 | |
}, | |
{ | |
"epoch": 0.0662, | |
"grad_norm": 8.066593170166016, | |
"learning_rate": 4.6695e-06, | |
"loss": 3.2597, | |
"mean_token_accuracy": 0.6136363744735718, | |
"num_tokens": 1326393.0, | |
"step": 662 | |
}, | |
{ | |
"epoch": 0.0663, | |
"grad_norm": 16.037811279296875, | |
"learning_rate": 4.6690000000000005e-06, | |
"loss": 3.3978, | |
"mean_token_accuracy": 0.6800000071525574, | |
"num_tokens": 1331219.0, | |
"step": 663 | |
}, | |
{ | |
"epoch": 0.0664, | |
"grad_norm": 9.486017227172852, | |
"learning_rate": 4.6685e-06, | |
"loss": 3.8866, | |
"mean_token_accuracy": 0.6071428656578064, | |
"num_tokens": 1340886.0, | |
"step": 664 | |
}, | |
{ | |
"epoch": 0.0665, | |
"grad_norm": 18.408246994018555, | |
"learning_rate": 4.668e-06, | |
"loss": 3.9229, | |
"mean_token_accuracy": 0.6296296119689941, | |
"num_tokens": 1345714.0, | |
"step": 665 | |
}, | |
{ | |
"epoch": 0.0666, | |
"grad_norm": 7.7789998054504395, | |
"learning_rate": 4.667500000000001e-06, | |
"loss": 3.3138, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 1350539.0, | |
"step": 666 | |
}, | |
{ | |
"epoch": 0.0667, | |
"grad_norm": 6.88397741317749, | |
"learning_rate": 4.667e-06, | |
"loss": 4.2004, | |
"mean_token_accuracy": 0.6470588445663452, | |
"num_tokens": 1360090.0, | |
"step": 667 | |
}, | |
{ | |
"epoch": 0.0668, | |
"grad_norm": 7.852243900299072, | |
"learning_rate": 4.6665e-06, | |
"loss": 4.0244, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 1369701.0, | |
"step": 668 | |
}, | |
{ | |
"epoch": 0.0669, | |
"grad_norm": 100.88388061523438, | |
"learning_rate": 4.666000000000001e-06, | |
"loss": 3.7688, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 1379761.0, | |
"step": 669 | |
}, | |
{ | |
"epoch": 0.067, | |
"grad_norm": 12.3496675491333, | |
"learning_rate": 4.6655000000000005e-06, | |
"loss": 3.9071, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 1390247.0, | |
"step": 670 | |
}, | |
{ | |
"epoch": 0.0671, | |
"grad_norm": 6.701696872711182, | |
"learning_rate": 4.665e-06, | |
"loss": 3.8181, | |
"mean_token_accuracy": 0.523809552192688, | |
"num_tokens": 1400069.0, | |
"step": 671 | |
}, | |
{ | |
"epoch": 0.0672, | |
"grad_norm": 5.969138145446777, | |
"learning_rate": 4.6645e-06, | |
"loss": 3.9125, | |
"mean_token_accuracy": 0.5675675868988037, | |
"num_tokens": 1409623.0, | |
"step": 672 | |
}, | |
{ | |
"epoch": 0.0673, | |
"grad_norm": 10.8892822265625, | |
"learning_rate": 4.664000000000001e-06, | |
"loss": 3.8164, | |
"mean_token_accuracy": 0.5666666626930237, | |
"num_tokens": 1419254.0, | |
"step": 673 | |
}, | |
{ | |
"epoch": 0.0674, | |
"grad_norm": 7.638428688049316, | |
"learning_rate": 4.6635e-06, | |
"loss": 3.7039, | |
"mean_token_accuracy": 0.6774193644523621, | |
"num_tokens": 1428886.0, | |
"step": 674 | |
}, | |
{ | |
"epoch": 0.0675, | |
"grad_norm": 7.88206672668457, | |
"learning_rate": 4.663e-06, | |
"loss": 3.7913, | |
"mean_token_accuracy": 0.6551724076271057, | |
"num_tokens": 1438516.0, | |
"step": 675 | |
}, | |
{ | |
"epoch": 0.0676, | |
"grad_norm": 15.534895896911621, | |
"learning_rate": 4.662500000000001e-06, | |
"loss": 3.7505, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 1448144.0, | |
"step": 676 | |
}, | |
{ | |
"epoch": 0.0677, | |
"grad_norm": 7.88279390335083, | |
"learning_rate": 4.6620000000000004e-06, | |
"loss": 3.7482, | |
"mean_token_accuracy": 0.6538461446762085, | |
"num_tokens": 1457771.0, | |
"step": 677 | |
}, | |
{ | |
"epoch": 0.0678, | |
"grad_norm": 5.703835964202881, | |
"learning_rate": 4.6615e-06, | |
"loss": 3.4987, | |
"mean_token_accuracy": 0.6499999761581421, | |
"num_tokens": 1468308.0, | |
"step": 678 | |
}, | |
{ | |
"epoch": 0.0679, | |
"grad_norm": 16.71466827392578, | |
"learning_rate": 4.661000000000001e-06, | |
"loss": 3.4928, | |
"mean_token_accuracy": 0.6285714507102966, | |
"num_tokens": 1478140.0, | |
"step": 679 | |
}, | |
{ | |
"epoch": 0.068, | |
"grad_norm": 6.2899651527404785, | |
"learning_rate": 4.6605000000000005e-06, | |
"loss": 3.3845, | |
"mean_token_accuracy": 0.6060606241226196, | |
"num_tokens": 1487260.0, | |
"step": 680 | |
}, | |
{ | |
"epoch": 0.0681, | |
"grad_norm": 13.319244384765625, | |
"learning_rate": 4.66e-06, | |
"loss": 3.7983, | |
"mean_token_accuracy": 0.6206896305084229, | |
"num_tokens": 1496928.0, | |
"step": 681 | |
}, | |
{ | |
"epoch": 0.0682, | |
"grad_norm": 9.777400016784668, | |
"learning_rate": 4.6595e-06, | |
"loss": 2.9121, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 1501758.0, | |
"step": 682 | |
}, | |
{ | |
"epoch": 0.0683, | |
"grad_norm": 14.392212867736816, | |
"learning_rate": 4.659e-06, | |
"loss": 3.5704, | |
"mean_token_accuracy": 0.692307710647583, | |
"num_tokens": 1506585.0, | |
"step": 683 | |
}, | |
{ | |
"epoch": 0.0684, | |
"grad_norm": 7.017622947692871, | |
"learning_rate": 4.6585e-06, | |
"loss": 3.7937, | |
"mean_token_accuracy": 0.6470588445663452, | |
"num_tokens": 1516258.0, | |
"step": 684 | |
}, | |
{ | |
"epoch": 0.0685, | |
"grad_norm": 11.826065063476562, | |
"learning_rate": 4.658e-06, | |
"loss": 4.2457, | |
"mean_token_accuracy": 0.6551724076271057, | |
"num_tokens": 1525888.0, | |
"step": 685 | |
}, | |
{ | |
"epoch": 0.0686, | |
"grad_norm": 12.596312522888184, | |
"learning_rate": 4.657500000000001e-06, | |
"loss": 3.6871, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 1532460.0, | |
"step": 686 | |
}, | |
{ | |
"epoch": 0.0687, | |
"grad_norm": 8.112303733825684, | |
"learning_rate": 4.6570000000000005e-06, | |
"loss": 3.3174, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 1537247.0, | |
"step": 687 | |
}, | |
{ | |
"epoch": 0.0688, | |
"grad_norm": 9.07571792602539, | |
"learning_rate": 4.6565e-06, | |
"loss": 3.785, | |
"mean_token_accuracy": 0.6451612710952759, | |
"num_tokens": 1546917.0, | |
"step": 688 | |
}, | |
{ | |
"epoch": 0.0689, | |
"grad_norm": 7.010780334472656, | |
"learning_rate": 4.656000000000001e-06, | |
"loss": 4.1435, | |
"mean_token_accuracy": 0.6206896305084229, | |
"num_tokens": 1556707.0, | |
"step": 689 | |
}, | |
{ | |
"epoch": 0.069, | |
"grad_norm": 9.204239845275879, | |
"learning_rate": 4.6555000000000006e-06, | |
"loss": 3.608, | |
"mean_token_accuracy": 0.6538461446762085, | |
"num_tokens": 1566334.0, | |
"step": 690 | |
}, | |
{ | |
"epoch": 0.0691, | |
"grad_norm": 27.1485652923584, | |
"learning_rate": 4.655e-06, | |
"loss": 3.146, | |
"mean_token_accuracy": 0.6206896305084229, | |
"num_tokens": 1571183.0, | |
"step": 691 | |
}, | |
{ | |
"epoch": 0.0692, | |
"grad_norm": 8.742796897888184, | |
"learning_rate": 4.6545e-06, | |
"loss": 3.9241, | |
"mean_token_accuracy": 0.6451612710952759, | |
"num_tokens": 1580853.0, | |
"step": 692 | |
}, | |
{ | |
"epoch": 0.0693, | |
"grad_norm": 12.289436340332031, | |
"learning_rate": 4.654e-06, | |
"loss": 2.7526, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 1586111.0, | |
"step": 693 | |
}, | |
{ | |
"epoch": 0.0694, | |
"grad_norm": 10.168716430664062, | |
"learning_rate": 4.6535e-06, | |
"loss": 3.239, | |
"mean_token_accuracy": 0.47727271914482117, | |
"num_tokens": 1590956.0, | |
"step": 694 | |
}, | |
{ | |
"epoch": 0.0695, | |
"grad_norm": 24.894868850708008, | |
"learning_rate": 4.653e-06, | |
"loss": 2.6735, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 1595805.0, | |
"step": 695 | |
}, | |
{ | |
"epoch": 0.0696, | |
"grad_norm": 7.570350170135498, | |
"learning_rate": 4.652500000000001e-06, | |
"loss": 3.6999, | |
"mean_token_accuracy": 0.6428571343421936, | |
"num_tokens": 1605434.0, | |
"step": 696 | |
}, | |
{ | |
"epoch": 0.0697, | |
"grad_norm": 9.425323486328125, | |
"learning_rate": 4.6520000000000005e-06, | |
"loss": 3.5022, | |
"mean_token_accuracy": 0.7307692170143127, | |
"num_tokens": 1614977.0, | |
"step": 697 | |
}, | |
{ | |
"epoch": 0.0698, | |
"grad_norm": 15.086301803588867, | |
"learning_rate": 4.6515e-06, | |
"loss": 3.4407, | |
"mean_token_accuracy": 0.6521739363670349, | |
"num_tokens": 1619801.0, | |
"step": 698 | |
}, | |
{ | |
"epoch": 0.0699, | |
"grad_norm": 9.1076078414917, | |
"learning_rate": 4.651000000000001e-06, | |
"loss": 4.017, | |
"mean_token_accuracy": 0.6470588445663452, | |
"num_tokens": 1628386.0, | |
"step": 699 | |
}, | |
{ | |
"epoch": 0.07, | |
"grad_norm": 14.358975410461426, | |
"learning_rate": 4.650500000000001e-06, | |
"loss": 3.0329, | |
"mean_token_accuracy": 0.692307710647583, | |
"num_tokens": 1633213.0, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.0701, | |
"grad_norm": 10.863842010498047, | |
"learning_rate": 4.65e-06, | |
"loss": 3.3726, | |
"mean_token_accuracy": 0.71875, | |
"num_tokens": 1642846.0, | |
"step": 701 | |
}, | |
{ | |
"epoch": 0.0702, | |
"grad_norm": 9.092011451721191, | |
"learning_rate": 4.6495e-06, | |
"loss": 3.3101, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 1652474.0, | |
"step": 702 | |
}, | |
{ | |
"epoch": 0.0703, | |
"grad_norm": 19.392555236816406, | |
"learning_rate": 4.649e-06, | |
"loss": 4.4426, | |
"mean_token_accuracy": 0.5666666626930237, | |
"num_tokens": 1653979.0, | |
"step": 703 | |
}, | |
{ | |
"epoch": 0.0704, | |
"grad_norm": 12.215099334716797, | |
"learning_rate": 4.6485000000000005e-06, | |
"loss": 3.6152, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 1663870.0, | |
"step": 704 | |
}, | |
{ | |
"epoch": 0.0705, | |
"grad_norm": 12.569807052612305, | |
"learning_rate": 4.648e-06, | |
"loss": 3.9679, | |
"mean_token_accuracy": 0.5882353186607361, | |
"num_tokens": 1673505.0, | |
"step": 705 | |
}, | |
{ | |
"epoch": 0.0706, | |
"grad_norm": 17.207059860229492, | |
"learning_rate": 4.6475e-06, | |
"loss": 3.6363, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 1682622.0, | |
"step": 706 | |
}, | |
{ | |
"epoch": 0.0707, | |
"grad_norm": 18.79701042175293, | |
"learning_rate": 4.6470000000000006e-06, | |
"loss": 3.1721, | |
"mean_token_accuracy": 0.47727271914482117, | |
"num_tokens": 1685061.0, | |
"step": 707 | |
}, | |
{ | |
"epoch": 0.0708, | |
"grad_norm": 14.148972511291504, | |
"learning_rate": 4.6465e-06, | |
"loss": 3.2172, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 1689889.0, | |
"step": 708 | |
}, | |
{ | |
"epoch": 0.0709, | |
"grad_norm": 9.23753833770752, | |
"learning_rate": 4.646000000000001e-06, | |
"loss": 3.442, | |
"mean_token_accuracy": 0.6296296119689941, | |
"num_tokens": 1699517.0, | |
"step": 709 | |
}, | |
{ | |
"epoch": 0.071, | |
"grad_norm": 8.402763366699219, | |
"learning_rate": 4.645500000000001e-06, | |
"loss": 3.3113, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 1709145.0, | |
"step": 710 | |
}, | |
{ | |
"epoch": 0.0711, | |
"grad_norm": 7.942638397216797, | |
"learning_rate": 4.645e-06, | |
"loss": 3.8197, | |
"mean_token_accuracy": 0.6153846383094788, | |
"num_tokens": 1718772.0, | |
"step": 711 | |
}, | |
{ | |
"epoch": 0.0712, | |
"grad_norm": 7.496103286743164, | |
"learning_rate": 4.6445e-06, | |
"loss": 3.5149, | |
"mean_token_accuracy": 0.5121951103210449, | |
"num_tokens": 1728844.0, | |
"step": 712 | |
}, | |
{ | |
"epoch": 0.0713, | |
"grad_norm": 14.669534683227539, | |
"learning_rate": 4.644e-06, | |
"loss": 3.7238, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 1738901.0, | |
"step": 713 | |
}, | |
{ | |
"epoch": 0.0714, | |
"grad_norm": 25.679645538330078, | |
"learning_rate": 4.6435000000000005e-06, | |
"loss": 2.8707, | |
"mean_token_accuracy": 0.7599999904632568, | |
"num_tokens": 1743727.0, | |
"step": 714 | |
}, | |
{ | |
"epoch": 0.0715, | |
"grad_norm": 12.564505577087402, | |
"learning_rate": 4.643e-06, | |
"loss": 2.5314, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 1748555.0, | |
"step": 715 | |
}, | |
{ | |
"epoch": 0.0716, | |
"grad_norm": 6.60573673248291, | |
"learning_rate": 4.6425e-06, | |
"loss": 3.6649, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 1758183.0, | |
"step": 716 | |
}, | |
{ | |
"epoch": 0.0717, | |
"grad_norm": 43.40522384643555, | |
"learning_rate": 4.642000000000001e-06, | |
"loss": 3.1609, | |
"mean_token_accuracy": 0.6551724076271057, | |
"num_tokens": 1763013.0, | |
"step": 717 | |
}, | |
{ | |
"epoch": 0.0718, | |
"grad_norm": 18.256948471069336, | |
"learning_rate": 4.6415e-06, | |
"loss": 3.3358, | |
"mean_token_accuracy": 0.6538461446762085, | |
"num_tokens": 1767840.0, | |
"step": 718 | |
}, | |
{ | |
"epoch": 0.0719, | |
"grad_norm": 6.563004016876221, | |
"learning_rate": 4.641e-06, | |
"loss": 3.6161, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 1777468.0, | |
"step": 719 | |
}, | |
{ | |
"epoch": 0.072, | |
"grad_norm": 14.68053150177002, | |
"learning_rate": 4.640500000000001e-06, | |
"loss": 3.3425, | |
"mean_token_accuracy": 0.6071428656578064, | |
"num_tokens": 1782457.0, | |
"step": 720 | |
}, | |
{ | |
"epoch": 0.0721, | |
"grad_norm": 15.091608047485352, | |
"learning_rate": 4.6400000000000005e-06, | |
"loss": 3.895, | |
"mean_token_accuracy": 0.6296296119689941, | |
"num_tokens": 1792085.0, | |
"step": 721 | |
}, | |
{ | |
"epoch": 0.0722, | |
"grad_norm": 10.0131254196167, | |
"learning_rate": 4.6395e-06, | |
"loss": 3.157, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 1801753.0, | |
"step": 722 | |
}, | |
{ | |
"epoch": 0.0723, | |
"grad_norm": 12.718875885009766, | |
"learning_rate": 4.639e-06, | |
"loss": 3.0399, | |
"mean_token_accuracy": 0.7200000286102295, | |
"num_tokens": 1806579.0, | |
"step": 723 | |
}, | |
{ | |
"epoch": 0.0724, | |
"grad_norm": 11.771772384643555, | |
"learning_rate": 4.6385000000000006e-06, | |
"loss": 2.6455, | |
"mean_token_accuracy": 0.7200000286102295, | |
"num_tokens": 1811405.0, | |
"step": 724 | |
}, | |
{ | |
"epoch": 0.0725, | |
"grad_norm": 8.037066459655762, | |
"learning_rate": 4.638e-06, | |
"loss": 3.4192, | |
"mean_token_accuracy": 0.7307692170143127, | |
"num_tokens": 1820948.0, | |
"step": 725 | |
}, | |
{ | |
"epoch": 0.0726, | |
"grad_norm": 14.757246971130371, | |
"learning_rate": 4.6375e-06, | |
"loss": 3.6754, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 1830582.0, | |
"step": 726 | |
}, | |
{ | |
"epoch": 0.0727, | |
"grad_norm": 13.904367446899414, | |
"learning_rate": 4.637000000000001e-06, | |
"loss": 3.4034, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 1840211.0, | |
"step": 727 | |
}, | |
{ | |
"epoch": 0.0728, | |
"grad_norm": 9.246611595153809, | |
"learning_rate": 4.6365e-06, | |
"loss": 3.6843, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 1850735.0, | |
"step": 728 | |
}, | |
{ | |
"epoch": 0.0729, | |
"grad_norm": 8.373174667358398, | |
"learning_rate": 4.636e-06, | |
"loss": 2.7741, | |
"mean_token_accuracy": 0.71875, | |
"num_tokens": 1855568.0, | |
"step": 729 | |
}, | |
{ | |
"epoch": 0.073, | |
"grad_norm": 10.298178672790527, | |
"learning_rate": 4.635500000000001e-06, | |
"loss": 3.3117, | |
"mean_token_accuracy": 0.75, | |
"num_tokens": 1865197.0, | |
"step": 730 | |
}, | |
{ | |
"epoch": 0.0731, | |
"grad_norm": 6.2904791831970215, | |
"learning_rate": 4.6350000000000005e-06, | |
"loss": 3.0874, | |
"mean_token_accuracy": 0.4883720874786377, | |
"num_tokens": 1870041.0, | |
"step": 731 | |
}, | |
{ | |
"epoch": 0.0732, | |
"grad_norm": 11.420605659484863, | |
"learning_rate": 4.6345e-06, | |
"loss": 3.3324, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 1879674.0, | |
"step": 732 | |
}, | |
{ | |
"epoch": 0.0733, | |
"grad_norm": 4.301177501678467, | |
"learning_rate": 4.634e-06, | |
"loss": 3.5362, | |
"mean_token_accuracy": 0.7142857313156128, | |
"num_tokens": 1889341.0, | |
"step": 733 | |
}, | |
{ | |
"epoch": 0.0734, | |
"grad_norm": 7.342959403991699, | |
"learning_rate": 4.633500000000001e-06, | |
"loss": 3.2954, | |
"mean_token_accuracy": 0.7142857313156128, | |
"num_tokens": 1899008.0, | |
"step": 734 | |
}, | |
{ | |
"epoch": 0.0735, | |
"grad_norm": 14.132750511169434, | |
"learning_rate": 4.633e-06, | |
"loss": 3.7492, | |
"mean_token_accuracy": 0.5862069129943848, | |
"num_tokens": 1907796.0, | |
"step": 735 | |
}, | |
{ | |
"epoch": 0.0736, | |
"grad_norm": 9.97615909576416, | |
"learning_rate": 4.6325e-06, | |
"loss": 3.0019, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 1917424.0, | |
"step": 736 | |
}, | |
{ | |
"epoch": 0.0737, | |
"grad_norm": 6.553353309631348, | |
"learning_rate": 4.632000000000001e-06, | |
"loss": 3.0727, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 1927054.0, | |
"step": 737 | |
}, | |
{ | |
"epoch": 0.0738, | |
"grad_norm": 10.161040306091309, | |
"learning_rate": 4.6315000000000005e-06, | |
"loss": 3.8107, | |
"mean_token_accuracy": 0.6129032373428345, | |
"num_tokens": 1936865.0, | |
"step": 738 | |
}, | |
{ | |
"epoch": 0.0739, | |
"grad_norm": 11.308485984802246, | |
"learning_rate": 4.631e-06, | |
"loss": 3.6715, | |
"mean_token_accuracy": 0.5862069129943848, | |
"num_tokens": 1946674.0, | |
"step": 739 | |
}, | |
{ | |
"epoch": 0.074, | |
"grad_norm": 47.4397087097168, | |
"learning_rate": 4.630500000000001e-06, | |
"loss": 3.5427, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 1956260.0, | |
"step": 740 | |
}, | |
{ | |
"epoch": 0.0741, | |
"grad_norm": 6.480941295623779, | |
"learning_rate": 4.6300000000000006e-06, | |
"loss": 3.5033, | |
"mean_token_accuracy": 0.7307692170143127, | |
"num_tokens": 1966783.0, | |
"step": 741 | |
}, | |
{ | |
"epoch": 0.0742, | |
"grad_norm": 27.368915557861328, | |
"learning_rate": 4.6295e-06, | |
"loss": 3.6289, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 1976733.0, | |
"step": 742 | |
}, | |
{ | |
"epoch": 0.0743, | |
"grad_norm": 8.465190887451172, | |
"learning_rate": 4.629e-06, | |
"loss": 3.496, | |
"mean_token_accuracy": 0.7142857313156128, | |
"num_tokens": 1986362.0, | |
"step": 743 | |
}, | |
{ | |
"epoch": 0.0744, | |
"grad_norm": 8.939212799072266, | |
"learning_rate": 4.6285e-06, | |
"loss": 3.7173, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 1996170.0, | |
"step": 744 | |
}, | |
{ | |
"epoch": 0.0745, | |
"grad_norm": 5.516441822052002, | |
"learning_rate": 4.628e-06, | |
"loss": 3.4245, | |
"mean_token_accuracy": 0.6363636255264282, | |
"num_tokens": 2005964.0, | |
"step": 745 | |
}, | |
{ | |
"epoch": 0.0746, | |
"grad_norm": 4.7926812171936035, | |
"learning_rate": 4.6275e-06, | |
"loss": 3.8943, | |
"mean_token_accuracy": 0.4883720874786377, | |
"num_tokens": 2015725.0, | |
"step": 746 | |
}, | |
{ | |
"epoch": 0.0747, | |
"grad_norm": 7.3683881759643555, | |
"learning_rate": 4.627000000000001e-06, | |
"loss": 3.3366, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 2025394.0, | |
"step": 747 | |
}, | |
{ | |
"epoch": 0.0748, | |
"grad_norm": 10.002232551574707, | |
"learning_rate": 4.6265000000000005e-06, | |
"loss": 2.6672, | |
"mean_token_accuracy": 0.7307692170143127, | |
"num_tokens": 2030221.0, | |
"step": 748 | |
}, | |
{ | |
"epoch": 0.0749, | |
"grad_norm": 18.89767074584961, | |
"learning_rate": 4.626e-06, | |
"loss": 4.1522, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 2040048.0, | |
"step": 749 | |
}, | |
{ | |
"epoch": 0.075, | |
"grad_norm": 10.60554027557373, | |
"learning_rate": 4.625500000000001e-06, | |
"loss": 2.8812, | |
"mean_token_accuracy": 0.7599999904632568, | |
"num_tokens": 2044874.0, | |
"step": 750 | |
}, | |
{ | |
"epoch": 0.0751, | |
"grad_norm": 19.699281692504883, | |
"learning_rate": 4.625000000000001e-06, | |
"loss": 2.5795, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 2049800.0, | |
"step": 751 | |
}, | |
{ | |
"epoch": 0.0752, | |
"grad_norm": 6.850334167480469, | |
"learning_rate": 4.6245e-06, | |
"loss": 2.8907, | |
"mean_token_accuracy": 0.7333333492279053, | |
"num_tokens": 2059469.0, | |
"step": 752 | |
}, | |
{ | |
"epoch": 0.0753, | |
"grad_norm": 11.012489318847656, | |
"learning_rate": 4.624e-06, | |
"loss": 3.644, | |
"mean_token_accuracy": 0.6451612710952759, | |
"num_tokens": 2069280.0, | |
"step": 753 | |
}, | |
{ | |
"epoch": 0.0754, | |
"grad_norm": 4.149343967437744, | |
"learning_rate": 4.6235e-06, | |
"loss": 3.1675, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 2079100.0, | |
"step": 754 | |
}, | |
{ | |
"epoch": 0.0755, | |
"grad_norm": 11.013205528259277, | |
"learning_rate": 4.6230000000000005e-06, | |
"loss": 3.1699, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 2088728.0, | |
"step": 755 | |
}, | |
{ | |
"epoch": 0.0756, | |
"grad_norm": 7.968296527862549, | |
"learning_rate": 4.6225e-06, | |
"loss": 3.777, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 2098359.0, | |
"step": 756 | |
}, | |
{ | |
"epoch": 0.0757, | |
"grad_norm": 26.402009963989258, | |
"learning_rate": 4.622e-06, | |
"loss": 2.8757, | |
"mean_token_accuracy": 0.7200000286102295, | |
"num_tokens": 2103265.0, | |
"step": 757 | |
}, | |
{ | |
"epoch": 0.0758, | |
"grad_norm": 6.024283409118652, | |
"learning_rate": 4.6215000000000006e-06, | |
"loss": 3.7862, | |
"mean_token_accuracy": 0.5666666626930237, | |
"num_tokens": 2112896.0, | |
"step": 758 | |
}, | |
{ | |
"epoch": 0.0759, | |
"grad_norm": 46.732269287109375, | |
"learning_rate": 4.621e-06, | |
"loss": 3.9675, | |
"mean_token_accuracy": 0.5263158082962036, | |
"num_tokens": 2122964.0, | |
"step": 759 | |
}, | |
{ | |
"epoch": 0.076, | |
"grad_norm": 24.439189910888672, | |
"learning_rate": 4.620500000000001e-06, | |
"loss": 2.9812, | |
"mean_token_accuracy": 0.6499999761581421, | |
"num_tokens": 2134575.0, | |
"step": 760 | |
}, | |
{ | |
"epoch": 0.0761, | |
"grad_norm": 7.849838733673096, | |
"learning_rate": 4.620000000000001e-06, | |
"loss": 4.1256, | |
"mean_token_accuracy": 0.59375, | |
"num_tokens": 2143272.0, | |
"step": 761 | |
}, | |
{ | |
"epoch": 0.0762, | |
"grad_norm": 5.0032782554626465, | |
"learning_rate": 4.6195e-06, | |
"loss": 3.7939, | |
"mean_token_accuracy": 0.6875, | |
"num_tokens": 2152905.0, | |
"step": 762 | |
}, | |
{ | |
"epoch": 0.0763, | |
"grad_norm": 14.961540222167969, | |
"learning_rate": 4.619e-06, | |
"loss": 3.278, | |
"mean_token_accuracy": 0.7142857313156128, | |
"num_tokens": 2162572.0, | |
"step": 763 | |
}, | |
{ | |
"epoch": 0.0764, | |
"grad_norm": 4.872888565063477, | |
"learning_rate": 4.6185e-06, | |
"loss": 3.3218, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 2172201.0, | |
"step": 764 | |
}, | |
{ | |
"epoch": 0.0765, | |
"grad_norm": 6.018301010131836, | |
"learning_rate": 4.6180000000000005e-06, | |
"loss": 3.4297, | |
"mean_token_accuracy": 0.6176470518112183, | |
"num_tokens": 2181953.0, | |
"step": 765 | |
}, | |
{ | |
"epoch": 0.0766, | |
"grad_norm": 10.676273345947266, | |
"learning_rate": 4.6175e-06, | |
"loss": 2.6142, | |
"mean_token_accuracy": 0.7200000286102295, | |
"num_tokens": 2186779.0, | |
"step": 766 | |
}, | |
{ | |
"epoch": 0.0767, | |
"grad_norm": 22.780208587646484, | |
"learning_rate": 4.617e-06, | |
"loss": 3.4104, | |
"mean_token_accuracy": 0.7142857313156128, | |
"num_tokens": 2196408.0, | |
"step": 767 | |
}, | |
{ | |
"epoch": 0.0768, | |
"grad_norm": 10.998302459716797, | |
"learning_rate": 4.616500000000001e-06, | |
"loss": 3.2413, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 2206036.0, | |
"step": 768 | |
}, | |
{ | |
"epoch": 0.0769, | |
"grad_norm": 3.6513028144836426, | |
"learning_rate": 4.616e-06, | |
"loss": 3.1119, | |
"mean_token_accuracy": 0.5227272510528564, | |
"num_tokens": 2213039.0, | |
"step": 769 | |
}, | |
{ | |
"epoch": 0.077, | |
"grad_norm": 27.339990615844727, | |
"learning_rate": 4.615500000000001e-06, | |
"loss": 3.6024, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 2222667.0, | |
"step": 770 | |
}, | |
{ | |
"epoch": 0.0771, | |
"grad_norm": 5.5310540199279785, | |
"learning_rate": 4.615000000000001e-06, | |
"loss": 3.3379, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 2232297.0, | |
"step": 771 | |
}, | |
{ | |
"epoch": 0.0772, | |
"grad_norm": 4.8920087814331055, | |
"learning_rate": 4.6145000000000005e-06, | |
"loss": 3.2298, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 2241424.0, | |
"step": 772 | |
}, | |
{ | |
"epoch": 0.0773, | |
"grad_norm": 6.878321170806885, | |
"learning_rate": 4.614e-06, | |
"loss": 2.3656, | |
"mean_token_accuracy": 0.7599999904632568, | |
"num_tokens": 2246250.0, | |
"step": 773 | |
}, | |
{ | |
"epoch": 0.0774, | |
"grad_norm": 6.176924705505371, | |
"learning_rate": 4.6135e-06, | |
"loss": 3.6494, | |
"mean_token_accuracy": 0.7142857313156128, | |
"num_tokens": 2255917.0, | |
"step": 774 | |
}, | |
{ | |
"epoch": 0.0775, | |
"grad_norm": 12.30815601348877, | |
"learning_rate": 4.6130000000000006e-06, | |
"loss": 3.359, | |
"mean_token_accuracy": 0.75, | |
"num_tokens": 2265445.0, | |
"step": 775 | |
}, | |
{ | |
"epoch": 0.0776, | |
"grad_norm": 7.758856296539307, | |
"learning_rate": 4.6125e-06, | |
"loss": 3.3852, | |
"mean_token_accuracy": 0.6875, | |
"num_tokens": 2275238.0, | |
"step": 776 | |
}, | |
{ | |
"epoch": 0.0777, | |
"grad_norm": 5.886825084686279, | |
"learning_rate": 4.612e-06, | |
"loss": 3.2039, | |
"mean_token_accuracy": 0.7666666507720947, | |
"num_tokens": 2284869.0, | |
"step": 777 | |
}, | |
{ | |
"epoch": 0.0778, | |
"grad_norm": 5.061749458312988, | |
"learning_rate": 4.611500000000001e-06, | |
"loss": 3.6955, | |
"mean_token_accuracy": 0.6875, | |
"num_tokens": 2294502.0, | |
"step": 778 | |
}, | |
{ | |
"epoch": 0.0779, | |
"grad_norm": 4.834611892700195, | |
"learning_rate": 4.611e-06, | |
"loss": 2.6325, | |
"mean_token_accuracy": 0.7692307829856873, | |
"num_tokens": 2299329.0, | |
"step": 779 | |
}, | |
{ | |
"epoch": 0.078, | |
"grad_norm": 9.333842277526855, | |
"learning_rate": 4.6105e-06, | |
"loss": 2.6595, | |
"mean_token_accuracy": 0.5581395626068115, | |
"num_tokens": 2308421.0, | |
"step": 780 | |
}, | |
{ | |
"epoch": 0.0781, | |
"grad_norm": 4.859977722167969, | |
"learning_rate": 4.610000000000001e-06, | |
"loss": 3.5406, | |
"mean_token_accuracy": 0.6363636255264282, | |
"num_tokens": 2318055.0, | |
"step": 781 | |
}, | |
{ | |
"epoch": 0.0782, | |
"grad_norm": 12.317121505737305, | |
"learning_rate": 4.6095000000000005e-06, | |
"loss": 3.342, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 2327685.0, | |
"step": 782 | |
}, | |
{ | |
"epoch": 0.0783, | |
"grad_norm": 7.438995361328125, | |
"learning_rate": 4.609e-06, | |
"loss": 3.1703, | |
"mean_token_accuracy": 0.6060606241226196, | |
"num_tokens": 2337357.0, | |
"step": 783 | |
}, | |
{ | |
"epoch": 0.0784, | |
"grad_norm": 5.326390266418457, | |
"learning_rate": 4.6085e-06, | |
"loss": 3.4992, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 2347023.0, | |
"step": 784 | |
}, | |
{ | |
"epoch": 0.0785, | |
"grad_norm": 7.8550214767456055, | |
"learning_rate": 4.608000000000001e-06, | |
"loss": 3.5485, | |
"mean_token_accuracy": 0.6333333253860474, | |
"num_tokens": 2356692.0, | |
"step": 785 | |
}, | |
{ | |
"epoch": 0.0786, | |
"grad_norm": 18.051509857177734, | |
"learning_rate": 4.6075e-06, | |
"loss": 3.1033, | |
"mean_token_accuracy": 0.7307692170143127, | |
"num_tokens": 2361519.0, | |
"step": 786 | |
}, | |
{ | |
"epoch": 0.0787, | |
"grad_norm": 7.7563605308532715, | |
"learning_rate": 4.607e-06, | |
"loss": 3.5244, | |
"mean_token_accuracy": 0.6551724076271057, | |
"num_tokens": 2370597.0, | |
"step": 787 | |
}, | |
{ | |
"epoch": 0.0788, | |
"grad_norm": 37.23891830444336, | |
"learning_rate": 4.606500000000001e-06, | |
"loss": 3.6131, | |
"mean_token_accuracy": 0.699999988079071, | |
"num_tokens": 2380266.0, | |
"step": 788 | |
}, | |
{ | |
"epoch": 0.0789, | |
"grad_norm": 6.689093112945557, | |
"learning_rate": 4.6060000000000005e-06, | |
"loss": 3.5907, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 2389749.0, | |
"step": 789 | |
}, | |
{ | |
"epoch": 0.079, | |
"grad_norm": 33.45732879638672, | |
"learning_rate": 4.6055e-06, | |
"loss": 3.3618, | |
"mean_token_accuracy": 0.7096773982048035, | |
"num_tokens": 2399419.0, | |
"step": 790 | |
}, | |
{ | |
"epoch": 0.0791, | |
"grad_norm": 5.750537872314453, | |
"learning_rate": 4.605000000000001e-06, | |
"loss": 3.2769, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 2409085.0, | |
"step": 791 | |
}, | |
{ | |
"epoch": 0.0792, | |
"grad_norm": 16.188589096069336, | |
"learning_rate": 4.6045000000000006e-06, | |
"loss": 3.9241, | |
"mean_token_accuracy": 0.5882353186607361, | |
"num_tokens": 2419185.0, | |
"step": 792 | |
}, | |
{ | |
"epoch": 0.0793, | |
"grad_norm": 5.855948448181152, | |
"learning_rate": 4.604e-06, | |
"loss": 2.9535, | |
"mean_token_accuracy": 0.7931034564971924, | |
"num_tokens": 2428853.0, | |
"step": 793 | |
}, | |
{ | |
"epoch": 0.0794, | |
"grad_norm": 6.519040107727051, | |
"learning_rate": 4.6035e-06, | |
"loss": 2.9093, | |
"mean_token_accuracy": 0.6764705777168274, | |
"num_tokens": 2438526.0, | |
"step": 794 | |
}, | |
{ | |
"epoch": 0.0795, | |
"grad_norm": 7.772185325622559, | |
"learning_rate": 4.603000000000001e-06, | |
"loss": 3.3804, | |
"mean_token_accuracy": 0.5945945978164673, | |
"num_tokens": 2447356.0, | |
"step": 795 | |
}, | |
{ | |
"epoch": 0.0796, | |
"grad_norm": 18.420595169067383, | |
"learning_rate": 4.6025e-06, | |
"loss": 3.631, | |
"mean_token_accuracy": 0.6774193644523621, | |
"num_tokens": 2456628.0, | |
"step": 796 | |
}, | |
{ | |
"epoch": 0.0797, | |
"grad_norm": 5.700439929962158, | |
"learning_rate": 4.602e-06, | |
"loss": 3.8722, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 2466376.0, | |
"step": 797 | |
}, | |
{ | |
"epoch": 0.0798, | |
"grad_norm": 6.174685478210449, | |
"learning_rate": 4.601500000000001e-06, | |
"loss": 2.9276, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 2471224.0, | |
"step": 798 | |
}, | |
{ | |
"epoch": 0.0799, | |
"grad_norm": 4.374995708465576, | |
"learning_rate": 4.6010000000000005e-06, | |
"loss": 3.0849, | |
"mean_token_accuracy": 0.6875, | |
"num_tokens": 2480857.0, | |
"step": 799 | |
}, | |
{ | |
"epoch": 0.08, | |
"grad_norm": 34.307525634765625, | |
"learning_rate": 4.6005e-06, | |
"loss": 3.6614, | |
"mean_token_accuracy": 0.6176470518112183, | |
"num_tokens": 2491572.0, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.0801, | |
"grad_norm": 8.205986022949219, | |
"learning_rate": 4.600000000000001e-06, | |
"loss": 3.1996, | |
"mean_token_accuracy": 0.6428571343421936, | |
"num_tokens": 2496125.0, | |
"step": 801 | |
}, | |
{ | |
"epoch": 0.0802, | |
"grad_norm": 8.756632804870605, | |
"learning_rate": 4.599500000000001e-06, | |
"loss": 2.6224, | |
"mean_token_accuracy": 0.7599999904632568, | |
"num_tokens": 2500951.0, | |
"step": 802 | |
}, | |
{ | |
"epoch": 0.0803, | |
"grad_norm": 9.636617660522461, | |
"learning_rate": 4.599e-06, | |
"loss": 3.4119, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 2510581.0, | |
"step": 803 | |
}, | |
{ | |
"epoch": 0.0804, | |
"grad_norm": 4.745810508728027, | |
"learning_rate": 4.5985e-06, | |
"loss": 2.8502, | |
"mean_token_accuracy": 0.6875, | |
"num_tokens": 2520374.0, | |
"step": 804 | |
}, | |
{ | |
"epoch": 0.0805, | |
"grad_norm": 7.8014726638793945, | |
"learning_rate": 4.598e-06, | |
"loss": 2.9747, | |
"mean_token_accuracy": 0.699999988079071, | |
"num_tokens": 2530043.0, | |
"step": 805 | |
}, | |
{ | |
"epoch": 0.0806, | |
"grad_norm": 6.5052361488342285, | |
"learning_rate": 4.5975000000000005e-06, | |
"loss": 2.9706, | |
"mean_token_accuracy": 0.6410256624221802, | |
"num_tokens": 2541653.0, | |
"step": 806 | |
}, | |
{ | |
"epoch": 0.0807, | |
"grad_norm": 6.695280075073242, | |
"learning_rate": 4.597e-06, | |
"loss": 3.2369, | |
"mean_token_accuracy": 0.6206896305084229, | |
"num_tokens": 2551283.0, | |
"step": 807 | |
}, | |
{ | |
"epoch": 0.0808, | |
"grad_norm": 10.032500267028809, | |
"learning_rate": 4.596500000000001e-06, | |
"loss": 2.8301, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 2556116.0, | |
"step": 808 | |
}, | |
{ | |
"epoch": 0.0809, | |
"grad_norm": 7.350390434265137, | |
"learning_rate": 4.5960000000000006e-06, | |
"loss": 3.1792, | |
"mean_token_accuracy": 0.7096773982048035, | |
"num_tokens": 2565748.0, | |
"step": 809 | |
}, | |
{ | |
"epoch": 0.081, | |
"grad_norm": 4.5711774826049805, | |
"learning_rate": 4.5955e-06, | |
"loss": 3.4667, | |
"mean_token_accuracy": 0.7142857313156128, | |
"num_tokens": 2575377.0, | |
"step": 810 | |
}, | |
{ | |
"epoch": 0.0811, | |
"grad_norm": 21.978252410888672, | |
"learning_rate": 4.595000000000001e-06, | |
"loss": 3.5049, | |
"mean_token_accuracy": 0.6388888955116272, | |
"num_tokens": 2586984.0, | |
"step": 811 | |
}, | |
{ | |
"epoch": 0.0812, | |
"grad_norm": 6.199265480041504, | |
"learning_rate": 4.594500000000001e-06, | |
"loss": 3.3378, | |
"mean_token_accuracy": 0.5862069129943848, | |
"num_tokens": 2589682.0, | |
"step": 812 | |
}, | |
{ | |
"epoch": 0.0813, | |
"grad_norm": 7.127424240112305, | |
"learning_rate": 4.594e-06, | |
"loss": 3.016, | |
"mean_token_accuracy": 0.6071428656578064, | |
"num_tokens": 2594713.0, | |
"step": 813 | |
}, | |
{ | |
"epoch": 0.0814, | |
"grad_norm": 5.159796237945557, | |
"learning_rate": 4.5935e-06, | |
"loss": 2.7893, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 2599546.0, | |
"step": 814 | |
}, | |
{ | |
"epoch": 0.0815, | |
"grad_norm": 6.617577075958252, | |
"learning_rate": 4.593e-06, | |
"loss": 3.6098, | |
"mean_token_accuracy": 0.6388888955116272, | |
"num_tokens": 2609221.0, | |
"step": 815 | |
}, | |
{ | |
"epoch": 0.0816, | |
"grad_norm": 6.62910270690918, | |
"learning_rate": 4.5925000000000005e-06, | |
"loss": 3.326, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 2618849.0, | |
"step": 816 | |
}, | |
{ | |
"epoch": 0.0817, | |
"grad_norm": 36.178829193115234, | |
"learning_rate": 4.592e-06, | |
"loss": 3.456, | |
"mean_token_accuracy": 0.7307692170143127, | |
"num_tokens": 2628476.0, | |
"step": 817 | |
}, | |
{ | |
"epoch": 0.0818, | |
"grad_norm": 5.090374946594238, | |
"learning_rate": 4.5915e-06, | |
"loss": 3.5896, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 2634573.0, | |
"step": 818 | |
}, | |
{ | |
"epoch": 0.0819, | |
"grad_norm": 13.497374534606934, | |
"learning_rate": 4.591000000000001e-06, | |
"loss": 4.4922, | |
"mean_token_accuracy": 0.5, | |
"num_tokens": 2644214.0, | |
"step": 819 | |
}, | |
{ | |
"epoch": 0.082, | |
"grad_norm": 7.4843573570251465, | |
"learning_rate": 4.5905e-06, | |
"loss": 3.0253, | |
"mean_token_accuracy": 0.7419354915618896, | |
"num_tokens": 2649046.0, | |
"step": 820 | |
}, | |
{ | |
"epoch": 0.0821, | |
"grad_norm": 16.505775451660156, | |
"learning_rate": 4.590000000000001e-06, | |
"loss": 3.2496, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 2658677.0, | |
"step": 821 | |
}, | |
{ | |
"epoch": 0.0822, | |
"grad_norm": 8.728498458862305, | |
"learning_rate": 4.589500000000001e-06, | |
"loss": 2.8356, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 2668345.0, | |
"step": 822 | |
}, | |
{ | |
"epoch": 0.0823, | |
"grad_norm": 7.6725172996521, | |
"learning_rate": 4.5890000000000004e-06, | |
"loss": 2.3889, | |
"mean_token_accuracy": 0.800000011920929, | |
"num_tokens": 2673190.0, | |
"step": 823 | |
}, | |
{ | |
"epoch": 0.0824, | |
"grad_norm": 5.654551982879639, | |
"learning_rate": 4.5885e-06, | |
"loss": 3.276, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 2682858.0, | |
"step": 824 | |
}, | |
{ | |
"epoch": 0.0825, | |
"grad_norm": 19.33879852294922, | |
"learning_rate": 4.588e-06, | |
"loss": 3.1918, | |
"mean_token_accuracy": 0.7777777910232544, | |
"num_tokens": 2692603.0, | |
"step": 825 | |
}, | |
{ | |
"epoch": 0.0826, | |
"grad_norm": 6.447359561920166, | |
"learning_rate": 4.5875000000000005e-06, | |
"loss": 3.4081, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 2702272.0, | |
"step": 826 | |
}, | |
{ | |
"epoch": 0.0827, | |
"grad_norm": 6.5413312911987305, | |
"learning_rate": 4.587e-06, | |
"loss": 2.4488, | |
"mean_token_accuracy": 0.7599999904632568, | |
"num_tokens": 2707300.0, | |
"step": 827 | |
}, | |
{ | |
"epoch": 0.0828, | |
"grad_norm": 3.1290841102600098, | |
"learning_rate": 4.5865e-06, | |
"loss": 2.6495, | |
"mean_token_accuracy": 0.4888888895511627, | |
"num_tokens": 2712146.0, | |
"step": 828 | |
}, | |
{ | |
"epoch": 0.0829, | |
"grad_norm": 8.70888900756836, | |
"learning_rate": 4.586000000000001e-06, | |
"loss": 3.6614, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 2721325.0, | |
"step": 829 | |
}, | |
{ | |
"epoch": 0.083, | |
"grad_norm": 6.421677112579346, | |
"learning_rate": 4.5855e-06, | |
"loss": 2.2333, | |
"mean_token_accuracy": 0.7454545497894287, | |
"num_tokens": 2726200.0, | |
"step": 830 | |
}, | |
{ | |
"epoch": 0.0831, | |
"grad_norm": 17.020294189453125, | |
"learning_rate": 4.585e-06, | |
"loss": 2.1718, | |
"mean_token_accuracy": 0.7419354915618896, | |
"num_tokens": 2731051.0, | |
"step": 831 | |
}, | |
{ | |
"epoch": 0.0832, | |
"grad_norm": 7.596602916717529, | |
"learning_rate": 4.584500000000001e-06, | |
"loss": 2.9523, | |
"mean_token_accuracy": 0.75, | |
"num_tokens": 2740680.0, | |
"step": 832 | |
}, | |
{ | |
"epoch": 0.0833, | |
"grad_norm": 6.372314929962158, | |
"learning_rate": 4.5840000000000005e-06, | |
"loss": 3.4911, | |
"mean_token_accuracy": 0.7142857313156128, | |
"num_tokens": 2745383.0, | |
"step": 833 | |
}, | |
{ | |
"epoch": 0.0834, | |
"grad_norm": 7.1700520515441895, | |
"learning_rate": 4.5835e-06, | |
"loss": 3.4058, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 2755167.0, | |
"step": 834 | |
}, | |
{ | |
"epoch": 0.0835, | |
"grad_norm": 5.305976867675781, | |
"learning_rate": 4.583e-06, | |
"loss": 3.0429, | |
"mean_token_accuracy": 0.75, | |
"num_tokens": 2764796.0, | |
"step": 835 | |
}, | |
{ | |
"epoch": 0.0836, | |
"grad_norm": 4.707849025726318, | |
"learning_rate": 4.582500000000001e-06, | |
"loss": 3.3841, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 2774543.0, | |
"step": 836 | |
}, | |
{ | |
"epoch": 0.0837, | |
"grad_norm": 5.348711013793945, | |
"learning_rate": 4.582e-06, | |
"loss": 3.5256, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 2784129.0, | |
"step": 837 | |
}, | |
{ | |
"epoch": 0.0838, | |
"grad_norm": 5.256502628326416, | |
"learning_rate": 4.5815e-06, | |
"loss": 3.2654, | |
"mean_token_accuracy": 0.523809552192688, | |
"num_tokens": 2793730.0, | |
"step": 838 | |
}, | |
{ | |
"epoch": 0.0839, | |
"grad_norm": 6.138255596160889, | |
"learning_rate": 4.581000000000001e-06, | |
"loss": 3.362, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 2798561.0, | |
"step": 839 | |
}, | |
{ | |
"epoch": 0.084, | |
"grad_norm": 5.995108127593994, | |
"learning_rate": 4.5805000000000004e-06, | |
"loss": 3.5977, | |
"mean_token_accuracy": 0.5862069129943848, | |
"num_tokens": 2808229.0, | |
"step": 840 | |
}, | |
{ | |
"epoch": 0.0841, | |
"grad_norm": 16.760679244995117, | |
"learning_rate": 4.58e-06, | |
"loss": 3.4599, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 2818171.0, | |
"step": 841 | |
}, | |
{ | |
"epoch": 0.0842, | |
"grad_norm": 5.8512797355651855, | |
"learning_rate": 4.579500000000001e-06, | |
"loss": 3.5948, | |
"mean_token_accuracy": 0.6551724076271057, | |
"num_tokens": 2827801.0, | |
"step": 842 | |
}, | |
{ | |
"epoch": 0.0843, | |
"grad_norm": 19.809741973876953, | |
"learning_rate": 4.579e-06, | |
"loss": 2.8208, | |
"mean_token_accuracy": 0.8148148059844971, | |
"num_tokens": 2837429.0, | |
"step": 843 | |
}, | |
{ | |
"epoch": 0.0844, | |
"grad_norm": 4.965782642364502, | |
"learning_rate": 4.5785e-06, | |
"loss": 4.1402, | |
"mean_token_accuracy": 0.5384615659713745, | |
"num_tokens": 2846907.0, | |
"step": 844 | |
}, | |
{ | |
"epoch": 0.0845, | |
"grad_norm": 5.345475673675537, | |
"learning_rate": 4.578e-06, | |
"loss": 2.2628, | |
"mean_token_accuracy": 0.7692307829856873, | |
"num_tokens": 2851734.0, | |
"step": 845 | |
}, | |
{ | |
"epoch": 0.0846, | |
"grad_norm": 6.63608455657959, | |
"learning_rate": 4.577500000000001e-06, | |
"loss": 3.0825, | |
"mean_token_accuracy": 0.7586206793785095, | |
"num_tokens": 2857903.0, | |
"step": 846 | |
}, | |
{ | |
"epoch": 0.0847, | |
"grad_norm": 7.63088846206665, | |
"learning_rate": 4.577e-06, | |
"loss": 2.83, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 2862750.0, | |
"step": 847 | |
}, | |
{ | |
"epoch": 0.0848, | |
"grad_norm": 4.815182685852051, | |
"learning_rate": 4.5765e-06, | |
"loss": 2.7107, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 2867578.0, | |
"step": 848 | |
}, | |
{ | |
"epoch": 0.0849, | |
"grad_norm": 8.540964126586914, | |
"learning_rate": 4.576000000000001e-06, | |
"loss": 4.1855, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 2877247.0, | |
"step": 849 | |
}, | |
{ | |
"epoch": 0.085, | |
"grad_norm": 8.72761344909668, | |
"learning_rate": 4.5755000000000005e-06, | |
"loss": 3.2139, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 2887733.0, | |
"step": 850 | |
}, | |
{ | |
"epoch": 0.0851, | |
"grad_norm": 8.488056182861328, | |
"learning_rate": 4.575e-06, | |
"loss": 3.1413, | |
"mean_token_accuracy": 0.75, | |
"num_tokens": 2897362.0, | |
"step": 851 | |
}, | |
{ | |
"epoch": 0.0852, | |
"grad_norm": 6.375524520874023, | |
"learning_rate": 4.574500000000001e-06, | |
"loss": 3.329, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 2906992.0, | |
"step": 852 | |
}, | |
{ | |
"epoch": 0.0853, | |
"grad_norm": 5.32108736038208, | |
"learning_rate": 4.574e-06, | |
"loss": 2.8995, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 2916782.0, | |
"step": 853 | |
}, | |
{ | |
"epoch": 0.0854, | |
"grad_norm": 4.695627212524414, | |
"learning_rate": 4.5735e-06, | |
"loss": 2.2722, | |
"mean_token_accuracy": 0.800000011920929, | |
"num_tokens": 2921608.0, | |
"step": 854 | |
}, | |
{ | |
"epoch": 0.0855, | |
"grad_norm": 4.233623504638672, | |
"learning_rate": 4.573e-06, | |
"loss": 2.9728, | |
"mean_token_accuracy": 0.7777777910232544, | |
"num_tokens": 2931236.0, | |
"step": 855 | |
}, | |
{ | |
"epoch": 0.0856, | |
"grad_norm": 4.79052734375, | |
"learning_rate": 4.572500000000001e-06, | |
"loss": 2.7077, | |
"mean_token_accuracy": 0.7142857313156128, | |
"num_tokens": 2936065.0, | |
"step": 856 | |
}, | |
{ | |
"epoch": 0.0857, | |
"grad_norm": 5.2964630126953125, | |
"learning_rate": 4.5720000000000004e-06, | |
"loss": 3.4887, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 2945611.0, | |
"step": 857 | |
}, | |
{ | |
"epoch": 0.0858, | |
"grad_norm": 31.096942901611328, | |
"learning_rate": 4.5715e-06, | |
"loss": 3.1568, | |
"mean_token_accuracy": 0.625, | |
"num_tokens": 2955244.0, | |
"step": 858 | |
}, | |
{ | |
"epoch": 0.0859, | |
"grad_norm": 6.527851104736328, | |
"learning_rate": 4.571000000000001e-06, | |
"loss": 3.5616, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 2964835.0, | |
"step": 859 | |
}, | |
{ | |
"epoch": 0.086, | |
"grad_norm": 22.929285049438477, | |
"learning_rate": 4.5705000000000005e-06, | |
"loss": 3.2786, | |
"mean_token_accuracy": 0.7096773982048035, | |
"num_tokens": 2974647.0, | |
"step": 860 | |
}, | |
{ | |
"epoch": 0.0861, | |
"grad_norm": 9.931563377380371, | |
"learning_rate": 4.57e-06, | |
"loss": 3.0167, | |
"mean_token_accuracy": 0.6875, | |
"num_tokens": 2983920.0, | |
"step": 861 | |
}, | |
{ | |
"epoch": 0.0862, | |
"grad_norm": 10.761000633239746, | |
"learning_rate": 4.569500000000001e-06, | |
"loss": 2.7117, | |
"mean_token_accuracy": 0.7368420958518982, | |
"num_tokens": 2993517.0, | |
"step": 862 | |
}, | |
{ | |
"epoch": 0.0863, | |
"grad_norm": 3.709581136703491, | |
"learning_rate": 4.569e-06, | |
"loss": 3.518, | |
"mean_token_accuracy": 0.6486486196517944, | |
"num_tokens": 3002949.0, | |
"step": 863 | |
}, | |
{ | |
"epoch": 0.0864, | |
"grad_norm": 5.71153450012207, | |
"learning_rate": 4.5685e-06, | |
"loss": 3.311, | |
"mean_token_accuracy": 0.6060606241226196, | |
"num_tokens": 3012621.0, | |
"step": 864 | |
}, | |
{ | |
"epoch": 0.0865, | |
"grad_norm": 45.57746887207031, | |
"learning_rate": 4.568e-06, | |
"loss": 2.3551, | |
"mean_token_accuracy": 0.800000011920929, | |
"num_tokens": 3017466.0, | |
"step": 865 | |
}, | |
{ | |
"epoch": 0.0866, | |
"grad_norm": 6.661599159240723, | |
"learning_rate": 4.5675e-06, | |
"loss": 3.2103, | |
"mean_token_accuracy": 0.7142857313156128, | |
"num_tokens": 3027095.0, | |
"step": 866 | |
}, | |
{ | |
"epoch": 0.0867, | |
"grad_norm": 10.232392311096191, | |
"learning_rate": 4.5670000000000005e-06, | |
"loss": 3.5719, | |
"mean_token_accuracy": 0.6428571343421936, | |
"num_tokens": 3036724.0, | |
"step": 867 | |
}, | |
{ | |
"epoch": 0.0868, | |
"grad_norm": 4.735617637634277, | |
"learning_rate": 4.5665e-06, | |
"loss": 3.9463, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 3046360.0, | |
"step": 868 | |
}, | |
{ | |
"epoch": 0.0869, | |
"grad_norm": 3.6748695373535156, | |
"learning_rate": 4.566000000000001e-06, | |
"loss": 2.4025, | |
"mean_token_accuracy": 0.7931034564971924, | |
"num_tokens": 3055828.0, | |
"step": 869 | |
}, | |
{ | |
"epoch": 0.087, | |
"grad_norm": 8.01753044128418, | |
"learning_rate": 4.565500000000001e-06, | |
"loss": 3.2602, | |
"mean_token_accuracy": 0.6451612710952759, | |
"num_tokens": 3065889.0, | |
"step": 870 | |
}, | |
{ | |
"epoch": 0.0871, | |
"grad_norm": 10.443469047546387, | |
"learning_rate": 4.565e-06, | |
"loss": 3.8817, | |
"mean_token_accuracy": 0.699999988079071, | |
"num_tokens": 3075520.0, | |
"step": 871 | |
}, | |
{ | |
"epoch": 0.0872, | |
"grad_norm": 5.503696918487549, | |
"learning_rate": 4.564500000000001e-06, | |
"loss": 3.6317, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 3085149.0, | |
"step": 872 | |
}, | |
{ | |
"epoch": 0.0873, | |
"grad_norm": 3.9508492946624756, | |
"learning_rate": 4.564e-06, | |
"loss": 3.175, | |
"mean_token_accuracy": 0.6216216087341309, | |
"num_tokens": 3094703.0, | |
"step": 873 | |
}, | |
{ | |
"epoch": 0.0874, | |
"grad_norm": 4.747551441192627, | |
"learning_rate": 4.5635000000000004e-06, | |
"loss": 2.6579, | |
"mean_token_accuracy": 0.7333333492279053, | |
"num_tokens": 3104334.0, | |
"step": 874 | |
}, | |
{ | |
"epoch": 0.0875, | |
"grad_norm": 6.215536594390869, | |
"learning_rate": 4.563e-06, | |
"loss": 3.2877, | |
"mean_token_accuracy": 0.6875, | |
"num_tokens": 3115937.0, | |
"step": 875 | |
}, | |
{ | |
"epoch": 0.0876, | |
"grad_norm": 4.69828462600708, | |
"learning_rate": 4.5625e-06, | |
"loss": 2.8794, | |
"mean_token_accuracy": 0.7333333492279053, | |
"num_tokens": 3125606.0, | |
"step": 876 | |
}, | |
{ | |
"epoch": 0.0877, | |
"grad_norm": 14.692822456359863, | |
"learning_rate": 4.5620000000000005e-06, | |
"loss": 2.8533, | |
"mean_token_accuracy": 0.7666666507720947, | |
"num_tokens": 3135237.0, | |
"step": 877 | |
}, | |
{ | |
"epoch": 0.0878, | |
"grad_norm": 4.62999153137207, | |
"learning_rate": 4.5615e-06, | |
"loss": 3.1391, | |
"mean_token_accuracy": 0.7142857313156128, | |
"num_tokens": 3144866.0, | |
"step": 878 | |
}, | |
{ | |
"epoch": 0.0879, | |
"grad_norm": 5.046702861785889, | |
"learning_rate": 4.561e-06, | |
"loss": 2.9835, | |
"mean_token_accuracy": 0.7333333492279053, | |
"num_tokens": 3149655.0, | |
"step": 879 | |
}, | |
{ | |
"epoch": 0.088, | |
"grad_norm": 7.228255748748779, | |
"learning_rate": 4.560500000000001e-06, | |
"loss": 3.0921, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 3159403.0, | |
"step": 880 | |
}, | |
{ | |
"epoch": 0.0881, | |
"grad_norm": 12.11124038696289, | |
"learning_rate": 4.56e-06, | |
"loss": 4.2135, | |
"mean_token_accuracy": 0.6000000238418579, | |
"num_tokens": 3169034.0, | |
"step": 881 | |
}, | |
{ | |
"epoch": 0.0882, | |
"grad_norm": 5.629181385040283, | |
"learning_rate": 4.559500000000001e-06, | |
"loss": 3.3113, | |
"mean_token_accuracy": 0.692307710647583, | |
"num_tokens": 3178661.0, | |
"step": 882 | |
}, | |
{ | |
"epoch": 0.0883, | |
"grad_norm": 4.7438645362854, | |
"learning_rate": 4.559000000000001e-06, | |
"loss": 3.4823, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 3188291.0, | |
"step": 883 | |
}, | |
{ | |
"epoch": 0.0884, | |
"grad_norm": 4.495988368988037, | |
"learning_rate": 4.5585000000000005e-06, | |
"loss": 3.5114, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 3198782.0, | |
"step": 884 | |
}, | |
{ | |
"epoch": 0.0885, | |
"grad_norm": 4.081791400909424, | |
"learning_rate": 4.558e-06, | |
"loss": 3.0937, | |
"mean_token_accuracy": 0.71875, | |
"num_tokens": 3208453.0, | |
"step": 885 | |
}, | |
{ | |
"epoch": 0.0886, | |
"grad_norm": 4.134438514709473, | |
"learning_rate": 4.5575e-06, | |
"loss": 2.693, | |
"mean_token_accuracy": 0.7333333492279053, | |
"num_tokens": 3218000.0, | |
"step": 886 | |
}, | |
{ | |
"epoch": 0.0887, | |
"grad_norm": 3.5392675399780273, | |
"learning_rate": 4.557000000000001e-06, | |
"loss": 3.7586, | |
"mean_token_accuracy": 0.6470588445663452, | |
"num_tokens": 3227551.0, | |
"step": 887 | |
}, | |
{ | |
"epoch": 0.0888, | |
"grad_norm": 7.260475158691406, | |
"learning_rate": 4.5565e-06, | |
"loss": 2.8682, | |
"mean_token_accuracy": 0.5333333611488342, | |
"num_tokens": 3232397.0, | |
"step": 888 | |
}, | |
{ | |
"epoch": 0.0889, | |
"grad_norm": 20.99604606628418, | |
"learning_rate": 4.556e-06, | |
"loss": 3.0677, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 3237228.0, | |
"step": 889 | |
}, | |
{ | |
"epoch": 0.089, | |
"grad_norm": 7.173264026641846, | |
"learning_rate": 4.555500000000001e-06, | |
"loss": 3.1381, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 3246856.0, | |
"step": 890 | |
}, | |
{ | |
"epoch": 0.0891, | |
"grad_norm": 8.091524124145508, | |
"learning_rate": 4.5550000000000004e-06, | |
"loss": 3.3098, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 3256485.0, | |
"step": 891 | |
}, | |
{ | |
"epoch": 0.0892, | |
"grad_norm": 5.005143165588379, | |
"learning_rate": 4.5545e-06, | |
"loss": 2.1198, | |
"mean_token_accuracy": 0.807692289352417, | |
"num_tokens": 3261331.0, | |
"step": 892 | |
}, | |
{ | |
"epoch": 0.0893, | |
"grad_norm": 6.641706466674805, | |
"learning_rate": 4.554000000000001e-06, | |
"loss": 4.3824, | |
"mean_token_accuracy": 0.5128205418586731, | |
"num_tokens": 3270887.0, | |
"step": 893 | |
}, | |
{ | |
"epoch": 0.0894, | |
"grad_norm": 11.394044876098633, | |
"learning_rate": 4.5535000000000005e-06, | |
"loss": 2.1161, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 3275715.0, | |
"step": 894 | |
}, | |
{ | |
"epoch": 0.0895, | |
"grad_norm": 5.717525959014893, | |
"learning_rate": 4.553e-06, | |
"loss": 3.4055, | |
"mean_token_accuracy": 0.699999988079071, | |
"num_tokens": 3285140.0, | |
"step": 895 | |
}, | |
{ | |
"epoch": 0.0896, | |
"grad_norm": 5.574117660522461, | |
"learning_rate": 4.5525e-06, | |
"loss": 3.1923, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 3294769.0, | |
"step": 896 | |
}, | |
{ | |
"epoch": 0.0897, | |
"grad_norm": 16.19025993347168, | |
"learning_rate": 4.552000000000001e-06, | |
"loss": 2.7523, | |
"mean_token_accuracy": 0.5945945978164673, | |
"num_tokens": 3299626.0, | |
"step": 897 | |
}, | |
{ | |
"epoch": 0.0898, | |
"grad_norm": 5.1854071617126465, | |
"learning_rate": 4.5515e-06, | |
"loss": 2.9653, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 3309254.0, | |
"step": 898 | |
}, | |
{ | |
"epoch": 0.0899, | |
"grad_norm": 6.818474292755127, | |
"learning_rate": 4.551e-06, | |
"loss": 3.4091, | |
"mean_token_accuracy": 0.699999988079071, | |
"num_tokens": 3318885.0, | |
"step": 899 | |
}, | |
{ | |
"epoch": 0.09, | |
"grad_norm": 3.457416296005249, | |
"learning_rate": 4.550500000000001e-06, | |
"loss": 3.5842, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 3328518.0, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.0901, | |
"grad_norm": 5.393435478210449, | |
"learning_rate": 4.5500000000000005e-06, | |
"loss": 2.713, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 3333348.0, | |
"step": 901 | |
}, | |
{ | |
"epoch": 0.0902, | |
"grad_norm": 8.805099487304688, | |
"learning_rate": 4.5495e-06, | |
"loss": 3.497, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 3342403.0, | |
"step": 902 | |
}, | |
{ | |
"epoch": 0.0903, | |
"grad_norm": 5.2177557945251465, | |
"learning_rate": 4.549000000000001e-06, | |
"loss": 3.4508, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 3352033.0, | |
"step": 903 | |
}, | |
{ | |
"epoch": 0.0904, | |
"grad_norm": 4.211660861968994, | |
"learning_rate": 4.5485e-06, | |
"loss": 2.6669, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 3361661.0, | |
"step": 904 | |
}, | |
{ | |
"epoch": 0.0905, | |
"grad_norm": 11.20156478881836, | |
"learning_rate": 4.548e-06, | |
"loss": 3.2132, | |
"mean_token_accuracy": 0.5925925970077515, | |
"num_tokens": 3366508.0, | |
"step": 905 | |
}, | |
{ | |
"epoch": 0.0906, | |
"grad_norm": 4.004225254058838, | |
"learning_rate": 4.5475e-06, | |
"loss": 3.8831, | |
"mean_token_accuracy": 0.5882353186607361, | |
"num_tokens": 3376181.0, | |
"step": 906 | |
}, | |
{ | |
"epoch": 0.0907, | |
"grad_norm": 9.046636581420898, | |
"learning_rate": 4.547000000000001e-06, | |
"loss": 3.2838, | |
"mean_token_accuracy": 0.6551724076271057, | |
"num_tokens": 3385811.0, | |
"step": 907 | |
}, | |
{ | |
"epoch": 0.0908, | |
"grad_norm": 4.819136619567871, | |
"learning_rate": 4.5465000000000004e-06, | |
"loss": 2.9624, | |
"mean_token_accuracy": 0.699999988079071, | |
"num_tokens": 3395442.0, | |
"step": 908 | |
}, | |
{ | |
"epoch": 0.0909, | |
"grad_norm": 3.8502681255340576, | |
"learning_rate": 4.546e-06, | |
"loss": 3.2773, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 3405072.0, | |
"step": 909 | |
}, | |
{ | |
"epoch": 0.091, | |
"grad_norm": 4.531764030456543, | |
"learning_rate": 4.545500000000001e-06, | |
"loss": 3.7131, | |
"mean_token_accuracy": 0.6176470518112183, | |
"num_tokens": 3414467.0, | |
"step": 910 | |
}, | |
{ | |
"epoch": 0.0911, | |
"grad_norm": 7.003582954406738, | |
"learning_rate": 4.5450000000000005e-06, | |
"loss": 3.0892, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 3419214.0, | |
"step": 911 | |
}, | |
{ | |
"epoch": 0.0912, | |
"grad_norm": 18.411230087280273, | |
"learning_rate": 4.5445e-06, | |
"loss": 3.5797, | |
"mean_token_accuracy": 0.5882353186607361, | |
"num_tokens": 3428849.0, | |
"step": 912 | |
}, | |
{ | |
"epoch": 0.0913, | |
"grad_norm": 20.415613174438477, | |
"learning_rate": 4.544000000000001e-06, | |
"loss": 3.3921, | |
"mean_token_accuracy": 0.6296296119689941, | |
"num_tokens": 3438477.0, | |
"step": 913 | |
}, | |
{ | |
"epoch": 0.0914, | |
"grad_norm": 4.266574382781982, | |
"learning_rate": 4.5435e-06, | |
"loss": 2.3397, | |
"mean_token_accuracy": 0.6774193644523621, | |
"num_tokens": 3443309.0, | |
"step": 914 | |
}, | |
{ | |
"epoch": 0.0915, | |
"grad_norm": 6.50775146484375, | |
"learning_rate": 4.543e-06, | |
"loss": 3.1461, | |
"mean_token_accuracy": 0.6428571343421936, | |
"num_tokens": 3452938.0, | |
"step": 915 | |
}, | |
{ | |
"epoch": 0.0916, | |
"grad_norm": 6.758857250213623, | |
"learning_rate": 4.5425e-06, | |
"loss": 3.41, | |
"mean_token_accuracy": 0.6800000071525574, | |
"num_tokens": 3457783.0, | |
"step": 916 | |
}, | |
{ | |
"epoch": 0.0917, | |
"grad_norm": 5.162317276000977, | |
"learning_rate": 4.542e-06, | |
"loss": 2.1194, | |
"mean_token_accuracy": 0.800000011920929, | |
"num_tokens": 3462609.0, | |
"step": 917 | |
}, | |
{ | |
"epoch": 0.0918, | |
"grad_norm": 6.146034240722656, | |
"learning_rate": 4.5415000000000005e-06, | |
"loss": 3.1108, | |
"mean_token_accuracy": 0.6944444179534912, | |
"num_tokens": 3472406.0, | |
"step": 918 | |
}, | |
{ | |
"epoch": 0.0919, | |
"grad_norm": 8.6776762008667, | |
"learning_rate": 4.541e-06, | |
"loss": 2.9603, | |
"mean_token_accuracy": 0.800000011920929, | |
"num_tokens": 3482075.0, | |
"step": 919 | |
}, | |
{ | |
"epoch": 0.092, | |
"grad_norm": 3.166902542114258, | |
"learning_rate": 4.540500000000001e-06, | |
"loss": 3.0422, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 3491743.0, | |
"step": 920 | |
}, | |
{ | |
"epoch": 0.0921, | |
"grad_norm": 4.2417707443237305, | |
"learning_rate": 4.540000000000001e-06, | |
"loss": 4.0414, | |
"mean_token_accuracy": 0.5365853905677795, | |
"num_tokens": 3501545.0, | |
"step": 921 | |
}, | |
{ | |
"epoch": 0.0922, | |
"grad_norm": 19.136789321899414, | |
"learning_rate": 4.5395e-06, | |
"loss": 2.8519, | |
"mean_token_accuracy": 0.7599999904632568, | |
"num_tokens": 3506527.0, | |
"step": 922 | |
}, | |
{ | |
"epoch": 0.0923, | |
"grad_norm": 3.6360626220703125, | |
"learning_rate": 4.539000000000001e-06, | |
"loss": 3.2641, | |
"mean_token_accuracy": 0.6060606241226196, | |
"num_tokens": 3516340.0, | |
"step": 923 | |
}, | |
{ | |
"epoch": 0.0924, | |
"grad_norm": 6.205536842346191, | |
"learning_rate": 4.5385e-06, | |
"loss": 3.3623, | |
"mean_token_accuracy": 0.6060606241226196, | |
"num_tokens": 3526012.0, | |
"step": 924 | |
}, | |
{ | |
"epoch": 0.0925, | |
"grad_norm": 4.215681076049805, | |
"learning_rate": 4.5380000000000004e-06, | |
"loss": 3.467, | |
"mean_token_accuracy": 0.6875, | |
"num_tokens": 3535683.0, | |
"step": 925 | |
}, | |
{ | |
"epoch": 0.0926, | |
"grad_norm": 3.896066188812256, | |
"learning_rate": 4.5375e-06, | |
"loss": 2.8609, | |
"mean_token_accuracy": 0.75, | |
"num_tokens": 3540516.0, | |
"step": 926 | |
}, | |
{ | |
"epoch": 0.0927, | |
"grad_norm": 6.330342769622803, | |
"learning_rate": 4.537e-06, | |
"loss": 2.8771, | |
"mean_token_accuracy": 0.6285714507102966, | |
"num_tokens": 3545371.0, | |
"step": 927 | |
}, | |
{ | |
"epoch": 0.0928, | |
"grad_norm": 6.20563268661499, | |
"learning_rate": 4.5365000000000005e-06, | |
"loss": 3.3855, | |
"mean_token_accuracy": 0.5526315569877625, | |
"num_tokens": 3555010.0, | |
"step": 928 | |
}, | |
{ | |
"epoch": 0.0929, | |
"grad_norm": 4.471092224121094, | |
"learning_rate": 4.536e-06, | |
"loss": 2.7434, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 3564638.0, | |
"step": 929 | |
}, | |
{ | |
"epoch": 0.093, | |
"grad_norm": 17.688833236694336, | |
"learning_rate": 4.535500000000001e-06, | |
"loss": 3.3639, | |
"mean_token_accuracy": 0.7692307829856873, | |
"num_tokens": 3574577.0, | |
"step": 930 | |
}, | |
{ | |
"epoch": 0.0931, | |
"grad_norm": 12.344741821289062, | |
"learning_rate": 4.535000000000001e-06, | |
"loss": 4.0459, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 3584206.0, | |
"step": 931 | |
}, | |
{ | |
"epoch": 0.0932, | |
"grad_norm": 3.4557347297668457, | |
"learning_rate": 4.5345e-06, | |
"loss": 3.3267, | |
"mean_token_accuracy": 0.5813953280448914, | |
"num_tokens": 3593644.0, | |
"step": 932 | |
}, | |
{ | |
"epoch": 0.0933, | |
"grad_norm": 5.991142272949219, | |
"learning_rate": 4.534000000000001e-06, | |
"loss": 3.1538, | |
"mean_token_accuracy": 0.6896551847457886, | |
"num_tokens": 3601342.0, | |
"step": 933 | |
}, | |
{ | |
"epoch": 0.0934, | |
"grad_norm": 4.608176231384277, | |
"learning_rate": 4.5335e-06, | |
"loss": 2.5547, | |
"mean_token_accuracy": 0.7692307829856873, | |
"num_tokens": 3611086.0, | |
"step": 934 | |
}, | |
{ | |
"epoch": 0.0935, | |
"grad_norm": 5.438277244567871, | |
"learning_rate": 4.5330000000000005e-06, | |
"loss": 3.0907, | |
"mean_token_accuracy": 0.6857143044471741, | |
"num_tokens": 3620638.0, | |
"step": 935 | |
}, | |
{ | |
"epoch": 0.0936, | |
"grad_norm": 5.953337669372559, | |
"learning_rate": 4.5325e-06, | |
"loss": 2.8784, | |
"mean_token_accuracy": 0.6875, | |
"num_tokens": 3630109.0, | |
"step": 936 | |
}, | |
{ | |
"epoch": 0.0937, | |
"grad_norm": 7.166720867156982, | |
"learning_rate": 4.532e-06, | |
"loss": 2.5598, | |
"mean_token_accuracy": 0.807692289352417, | |
"num_tokens": 3634936.0, | |
"step": 937 | |
}, | |
{ | |
"epoch": 0.0938, | |
"grad_norm": 3.313368558883667, | |
"learning_rate": 4.5315000000000006e-06, | |
"loss": 3.2527, | |
"mean_token_accuracy": 0.6470588445663452, | |
"num_tokens": 3639729.0, | |
"step": 938 | |
}, | |
{ | |
"epoch": 0.0939, | |
"grad_norm": 17.438325881958008, | |
"learning_rate": 4.531e-06, | |
"loss": 2.8456, | |
"mean_token_accuracy": 0.6744186282157898, | |
"num_tokens": 3646505.0, | |
"step": 939 | |
}, | |
{ | |
"epoch": 0.094, | |
"grad_norm": 5.018527030944824, | |
"learning_rate": 4.5305e-06, | |
"loss": 2.9095, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 3656133.0, | |
"step": 940 | |
}, | |
{ | |
"epoch": 0.0941, | |
"grad_norm": 3.879505157470703, | |
"learning_rate": 4.530000000000001e-06, | |
"loss": 2.7536, | |
"mean_token_accuracy": 0.7692307829856873, | |
"num_tokens": 3665798.0, | |
"step": 941 | |
}, | |
{ | |
"epoch": 0.0942, | |
"grad_norm": 5.717472553253174, | |
"learning_rate": 4.5295000000000004e-06, | |
"loss": 3.7592, | |
"mean_token_accuracy": 0.550000011920929, | |
"num_tokens": 3675477.0, | |
"step": 942 | |
}, | |
{ | |
"epoch": 0.0943, | |
"grad_norm": 10.108955383300781, | |
"learning_rate": 4.529000000000001e-06, | |
"loss": 3.2769, | |
"mean_token_accuracy": 0.6470588445663452, | |
"num_tokens": 3685112.0, | |
"step": 943 | |
}, | |
{ | |
"epoch": 0.0944, | |
"grad_norm": 7.050666332244873, | |
"learning_rate": 4.5285e-06, | |
"loss": 2.5454, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 3694921.0, | |
"step": 944 | |
}, | |
{ | |
"epoch": 0.0945, | |
"grad_norm": 5.416757106781006, | |
"learning_rate": 4.5280000000000005e-06, | |
"loss": 3.2162, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 3704549.0, | |
"step": 945 | |
}, | |
{ | |
"epoch": 0.0946, | |
"grad_norm": 5.376708030700684, | |
"learning_rate": 4.5275e-06, | |
"loss": 2.8185, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 3709401.0, | |
"step": 946 | |
}, | |
{ | |
"epoch": 0.0947, | |
"grad_norm": 4.984902858734131, | |
"learning_rate": 4.527e-06, | |
"loss": 2.6958, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 3714229.0, | |
"step": 947 | |
}, | |
{ | |
"epoch": 0.0948, | |
"grad_norm": 16.560749053955078, | |
"learning_rate": 4.526500000000001e-06, | |
"loss": 4.046, | |
"mean_token_accuracy": 0.5882353186607361, | |
"num_tokens": 3723864.0, | |
"step": 948 | |
}, | |
{ | |
"epoch": 0.0949, | |
"grad_norm": 9.113065719604492, | |
"learning_rate": 4.526e-06, | |
"loss": 3.0869, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 3733718.0, | |
"step": 949 | |
}, | |
{ | |
"epoch": 0.095, | |
"grad_norm": 5.418663024902344, | |
"learning_rate": 4.5255e-06, | |
"loss": 2.3203, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 3738548.0, | |
"step": 950 | |
}, | |
{ | |
"epoch": 0.0951, | |
"grad_norm": 6.178328037261963, | |
"learning_rate": 4.525000000000001e-06, | |
"loss": 3.9239, | |
"mean_token_accuracy": 0.6060606241226196, | |
"num_tokens": 3744849.0, | |
"step": 951 | |
}, | |
{ | |
"epoch": 0.0952, | |
"grad_norm": 9.780850410461426, | |
"learning_rate": 4.5245000000000005e-06, | |
"loss": 3.1519, | |
"mean_token_accuracy": 0.7333333492279053, | |
"num_tokens": 3754120.0, | |
"step": 952 | |
}, | |
{ | |
"epoch": 0.0953, | |
"grad_norm": 8.48142147064209, | |
"learning_rate": 4.524e-06, | |
"loss": 2.967, | |
"mean_token_accuracy": 0.7777777910232544, | |
"num_tokens": 3763748.0, | |
"step": 953 | |
}, | |
{ | |
"epoch": 0.0954, | |
"grad_norm": 4.4034833908081055, | |
"learning_rate": 4.5235e-06, | |
"loss": 3.2385, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 3773378.0, | |
"step": 954 | |
}, | |
{ | |
"epoch": 0.0955, | |
"grad_norm": 4.939692974090576, | |
"learning_rate": 4.5230000000000006e-06, | |
"loss": 2.904, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 3783008.0, | |
"step": 955 | |
}, | |
{ | |
"epoch": 0.0956, | |
"grad_norm": 6.736320495605469, | |
"learning_rate": 4.5225e-06, | |
"loss": 2.6999, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 3792124.0, | |
"step": 956 | |
}, | |
{ | |
"epoch": 0.0957, | |
"grad_norm": 6.81355619430542, | |
"learning_rate": 4.522e-06, | |
"loss": 3.481, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 3801754.0, | |
"step": 957 | |
}, | |
{ | |
"epoch": 0.0958, | |
"grad_norm": 5.836269855499268, | |
"learning_rate": 4.521500000000001e-06, | |
"loss": 2.7386, | |
"mean_token_accuracy": 0.75, | |
"num_tokens": 3811383.0, | |
"step": 958 | |
}, | |
{ | |
"epoch": 0.0959, | |
"grad_norm": 3.7719709873199463, | |
"learning_rate": 4.521e-06, | |
"loss": 2.9707, | |
"mean_token_accuracy": 0.6857143044471741, | |
"num_tokens": 3816459.0, | |
"step": 959 | |
}, | |
{ | |
"epoch": 0.096, | |
"grad_norm": 4.742408275604248, | |
"learning_rate": 4.5205e-06, | |
"loss": 2.9606, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 3821290.0, | |
"step": 960 | |
}, | |
{ | |
"epoch": 0.0961, | |
"grad_norm": 8.12385082244873, | |
"learning_rate": 4.520000000000001e-06, | |
"loss": 2.9006, | |
"mean_token_accuracy": 0.7096773982048035, | |
"num_tokens": 3830922.0, | |
"step": 961 | |
}, | |
{ | |
"epoch": 0.0962, | |
"grad_norm": 6.579589366912842, | |
"learning_rate": 4.5195000000000005e-06, | |
"loss": 3.1524, | |
"mean_token_accuracy": 0.5272727012634277, | |
"num_tokens": 3840372.0, | |
"step": 962 | |
}, | |
{ | |
"epoch": 0.0963, | |
"grad_norm": 4.26094388961792, | |
"learning_rate": 4.519e-06, | |
"loss": 3.2311, | |
"mean_token_accuracy": 0.6551724076271057, | |
"num_tokens": 3850040.0, | |
"step": 963 | |
}, | |
{ | |
"epoch": 0.0964, | |
"grad_norm": 6.610612392425537, | |
"learning_rate": 4.5185e-06, | |
"loss": 2.6698, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 3859670.0, | |
"step": 964 | |
}, | |
{ | |
"epoch": 0.0965, | |
"grad_norm": 4.080739498138428, | |
"learning_rate": 4.518e-06, | |
"loss": 2.9407, | |
"mean_token_accuracy": 0.6333333253860474, | |
"num_tokens": 3864520.0, | |
"step": 965 | |
}, | |
{ | |
"epoch": 0.0966, | |
"grad_norm": 3.548830986022949, | |
"learning_rate": 4.5175e-06, | |
"loss": 2.8352, | |
"mean_token_accuracy": 0.5789473652839661, | |
"num_tokens": 3874159.0, | |
"step": 966 | |
}, | |
{ | |
"epoch": 0.0967, | |
"grad_norm": 8.50513744354248, | |
"learning_rate": 4.517e-06, | |
"loss": 2.4878, | |
"mean_token_accuracy": 0.7200000286102295, | |
"num_tokens": 3878985.0, | |
"step": 967 | |
}, | |
{ | |
"epoch": 0.0968, | |
"grad_norm": 5.5366621017456055, | |
"learning_rate": 4.516500000000001e-06, | |
"loss": 3.5839, | |
"mean_token_accuracy": 0.59375, | |
"num_tokens": 3889047.0, | |
"step": 968 | |
}, | |
{ | |
"epoch": 0.0969, | |
"grad_norm": 5.074110507965088, | |
"learning_rate": 4.5160000000000005e-06, | |
"loss": 2.9107, | |
"mean_token_accuracy": 0.7692307829856873, | |
"num_tokens": 3898674.0, | |
"step": 969 | |
}, | |
{ | |
"epoch": 0.097, | |
"grad_norm": 8.26412296295166, | |
"learning_rate": 4.5155e-06, | |
"loss": 3.0991, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 3919761.0, | |
"step": 970 | |
}, | |
{ | |
"epoch": 0.0971, | |
"grad_norm": 7.277268886566162, | |
"learning_rate": 4.515000000000001e-06, | |
"loss": 3.2437, | |
"mean_token_accuracy": 0.5483871102333069, | |
"num_tokens": 3931559.0, | |
"step": 971 | |
}, | |
{ | |
"epoch": 0.0972, | |
"grad_norm": 41.236900329589844, | |
"learning_rate": 4.5145000000000006e-06, | |
"loss": 3.3532, | |
"mean_token_accuracy": 0.7241379022598267, | |
"num_tokens": 3941189.0, | |
"step": 972 | |
}, | |
{ | |
"epoch": 0.0973, | |
"grad_norm": 4.325659275054932, | |
"learning_rate": 4.514e-06, | |
"loss": 1.6793, | |
"mean_token_accuracy": 0.8148148059844971, | |
"num_tokens": 3946017.0, | |
"step": 973 | |
}, | |
{ | |
"epoch": 0.0974, | |
"grad_norm": 5.722174167633057, | |
"learning_rate": 4.5135e-06, | |
"loss": 2.696, | |
"mean_token_accuracy": 0.6764705777168274, | |
"num_tokens": 3955690.0, | |
"step": 974 | |
}, | |
{ | |
"epoch": 0.0975, | |
"grad_norm": 4.498652458190918, | |
"learning_rate": 4.513e-06, | |
"loss": 2.4435, | |
"mean_token_accuracy": 0.7666666507720947, | |
"num_tokens": 3960521.0, | |
"step": 975 | |
}, | |
{ | |
"epoch": 0.0976, | |
"grad_norm": 2.8395726680755615, | |
"learning_rate": 4.5125e-06, | |
"loss": 2.3715, | |
"mean_token_accuracy": 0.7666666507720947, | |
"num_tokens": 3970152.0, | |
"step": 976 | |
}, | |
{ | |
"epoch": 0.0977, | |
"grad_norm": 7.930624008178711, | |
"learning_rate": 4.512e-06, | |
"loss": 3.0588, | |
"mean_token_accuracy": 0.7037037014961243, | |
"num_tokens": 3979818.0, | |
"step": 977 | |
}, | |
{ | |
"epoch": 0.0978, | |
"grad_norm": 4.007061958312988, | |
"learning_rate": 4.5115e-06, | |
"loss": 2.0042, | |
"mean_token_accuracy": 0.7599999904632568, | |
"num_tokens": 3984644.0, | |
"step": 978 | |
}, | |
{ | |
"epoch": 0.0979, | |
"grad_norm": 5.475968837738037, | |
"learning_rate": 4.5110000000000005e-06, | |
"loss": 2.9798, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 3994272.0, | |
"step": 979 | |
}, | |
{ | |
"epoch": 0.098, | |
"grad_norm": 16.530242919921875, | |
"learning_rate": 4.5105e-06, | |
"loss": 3.2528, | |
"mean_token_accuracy": 0.5681818127632141, | |
"num_tokens": 4005887.0, | |
"step": 980 | |
}, | |
{ | |
"epoch": 0.0981, | |
"grad_norm": 8.038772583007812, | |
"learning_rate": 4.510000000000001e-06, | |
"loss": 3.1696, | |
"mean_token_accuracy": 0.6785714030265808, | |
"num_tokens": 4015945.0, | |
"step": 981 | |
}, | |
{ | |
"epoch": 0.0982, | |
"grad_norm": 4.642178058624268, | |
"learning_rate": 4.509500000000001e-06, | |
"loss": 3.2347, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 4025616.0, | |
"step": 982 | |
}, | |
{ | |
"epoch": 0.0983, | |
"grad_norm": 6.371053695678711, | |
"learning_rate": 4.509e-06, | |
"loss": 3.3301, | |
"mean_token_accuracy": 0.5588235259056091, | |
"num_tokens": 4028945.0, | |
"step": 983 | |
}, | |
{ | |
"epoch": 0.0984, | |
"grad_norm": 5.853827953338623, | |
"learning_rate": 4.5085e-06, | |
"loss": 3.191, | |
"mean_token_accuracy": 0.6153846383094788, | |
"num_tokens": 4038071.0, | |
"step": 984 | |
}, | |
{ | |
"epoch": 0.0985, | |
"grad_norm": 7.5650315284729, | |
"learning_rate": 4.508e-06, | |
"loss": 2.7874, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 4047699.0, | |
"step": 985 | |
}, | |
{ | |
"epoch": 0.0986, | |
"grad_norm": 12.324751853942871, | |
"learning_rate": 4.5075000000000005e-06, | |
"loss": 2.6426, | |
"mean_token_accuracy": 0.7777777910232544, | |
"num_tokens": 4052527.0, | |
"step": 986 | |
}, | |
{ | |
"epoch": 0.0987, | |
"grad_norm": 5.404886722564697, | |
"learning_rate": 4.507e-06, | |
"loss": 3.2405, | |
"mean_token_accuracy": 0.6666666865348816, | |
"num_tokens": 4062161.0, | |
"step": 987 | |
}, | |
{ | |
"epoch": 0.0988, | |
"grad_norm": 3.348365068435669, | |
"learning_rate": 4.5065e-06, | |
"loss": 3.0801, | |
"mean_token_accuracy": 0.6190476417541504, | |
"num_tokens": 4069116.0, | |
"step": 988 | |
}, | |
{ | |
"epoch": 0.0989, | |
"grad_norm": 47.723114013671875, | |
"learning_rate": 4.5060000000000006e-06, | |
"loss": 3.142, | |
"mean_token_accuracy": 0.5306122303009033, | |
"num_tokens": 4080736.0, | |
"step": 989 | |
}, | |
{ | |
"epoch": 0.099, | |
"grad_norm": 3.868760824203491, | |
"learning_rate": 4.5055e-06, | |
"loss": 3.2296, | |
"mean_token_accuracy": 0.6774193644523621, | |
"num_tokens": 4090368.0, | |
"step": 990 | |
}, | |
{ | |
"epoch": 0.0991, | |
"grad_norm": 8.417430877685547, | |
"learning_rate": 4.505e-06, | |
"loss": 3.1759, | |
"mean_token_accuracy": 0.6060606241226196, | |
"num_tokens": 4099762.0, | |
"step": 991 | |
}, | |
{ | |
"epoch": 0.0992, | |
"grad_norm": 7.57037878036499, | |
"learning_rate": 4.504500000000001e-06, | |
"loss": 2.4549, | |
"mean_token_accuracy": 0.7878788113594055, | |
"num_tokens": 4110476.0, | |
"step": 992 | |
}, | |
{ | |
"epoch": 0.0993, | |
"grad_norm": 2.8554913997650146, | |
"learning_rate": 4.504e-06, | |
"loss": 2.3674, | |
"mean_token_accuracy": 0.7666666507720947, | |
"num_tokens": 4120145.0, | |
"step": 993 | |
}, | |
{ | |
"epoch": 0.0994, | |
"grad_norm": 5.989246845245361, | |
"learning_rate": 4.5035e-06, | |
"loss": 4.1769, | |
"mean_token_accuracy": 0.529411792755127, | |
"num_tokens": 4130245.0, | |
"step": 994 | |
}, | |
{ | |
"epoch": 0.0995, | |
"grad_norm": 4.163658142089844, | |
"learning_rate": 4.503e-06, | |
"loss": 3.7163, | |
"mean_token_accuracy": 0.6176470518112183, | |
"num_tokens": 4140738.0, | |
"step": 995 | |
}, | |
{ | |
"epoch": 0.0996, | |
"grad_norm": 3.0244357585906982, | |
"learning_rate": 4.5025000000000005e-06, | |
"loss": 3.1043, | |
"mean_token_accuracy": 0.5952380895614624, | |
"num_tokens": 4150419.0, | |
"step": 996 | |
}, | |
{ | |
"epoch": 0.0997, | |
"grad_norm": 5.5394392013549805, | |
"learning_rate": 4.502e-06, | |
"loss": 2.9986, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 4160047.0, | |
"step": 997 | |
}, | |
{ | |
"epoch": 0.0998, | |
"grad_norm": 5.201902389526367, | |
"learning_rate": 4.5015e-06, | |
"loss": 2.8692, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 4169161.0, | |
"step": 998 | |
}, | |
{ | |
"epoch": 0.0999, | |
"grad_norm": 5.141040802001953, | |
"learning_rate": 4.501000000000001e-06, | |
"loss": 2.8232, | |
"mean_token_accuracy": 0.65625, | |
"num_tokens": 4178794.0, | |
"step": 999 | |
}, | |
{ | |
"epoch": 0.1, | |
"grad_norm": 15.610444068908691, | |
"learning_rate": 4.5005e-06, | |
"loss": 3.0495, | |
"mean_token_accuracy": 0.7407407164573669, | |
"num_tokens": 4188460.0, | |
"step": 1000 | |
} | |
], | |
"logging_steps": 1, | |
"max_steps": 10000, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 1, | |
"save_steps": 250, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 5.865239544620298e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |