thesis-experiments-data
/
baseline-qwen2vl_sft-sft_colqwen-k2-train-29-APRIL-0_3
/checkpoint-250
/trainer_state.json
{ | |
"best_global_step": null, | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.05, | |
"eval_steps": 500, | |
"global_step": 250, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.0002, | |
"grad_norm": 5.698113441467285, | |
"learning_rate": 5e-06, | |
"loss": 12.2509, | |
"mean_token_accuracy": 0.2450142428278923, | |
"num_tokens": 16438.0, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.0004, | |
"grad_norm": 6.063949108123779, | |
"learning_rate": 4.999000000000001e-06, | |
"loss": 13.3748, | |
"mean_token_accuracy": 0.2325708046555519, | |
"num_tokens": 31368.0, | |
"step": 2 | |
}, | |
{ | |
"epoch": 0.0006, | |
"grad_norm": 8.026069641113281, | |
"learning_rate": 4.998e-06, | |
"loss": 12.5454, | |
"mean_token_accuracy": 0.2398785501718521, | |
"num_tokens": 58542.0, | |
"step": 3 | |
}, | |
{ | |
"epoch": 0.0008, | |
"grad_norm": 6.758559703826904, | |
"learning_rate": 4.997000000000001e-06, | |
"loss": 13.9155, | |
"mean_token_accuracy": 0.21041666716337204, | |
"num_tokens": 77844.0, | |
"step": 4 | |
}, | |
{ | |
"epoch": 0.001, | |
"grad_norm": 6.786197185516357, | |
"learning_rate": 4.996e-06, | |
"loss": 13.2561, | |
"mean_token_accuracy": 0.1967741921544075, | |
"num_tokens": 89661.0, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.0012, | |
"grad_norm": 9.514345169067383, | |
"learning_rate": 4.9950000000000005e-06, | |
"loss": 12.3378, | |
"mean_token_accuracy": 0.20638945698738098, | |
"num_tokens": 104612.0, | |
"step": 6 | |
}, | |
{ | |
"epoch": 0.0014, | |
"grad_norm": 7.438795566558838, | |
"learning_rate": 4.994000000000001e-06, | |
"loss": 12.0814, | |
"mean_token_accuracy": 0.2916666716337204, | |
"num_tokens": 114269.0, | |
"step": 7 | |
}, | |
{ | |
"epoch": 0.0016, | |
"grad_norm": 15.847832679748535, | |
"learning_rate": 4.993e-06, | |
"loss": 13.2761, | |
"mean_token_accuracy": 0.2450142428278923, | |
"num_tokens": 128762.0, | |
"step": 8 | |
}, | |
{ | |
"epoch": 0.0018, | |
"grad_norm": 7.620685577392578, | |
"learning_rate": 4.992e-06, | |
"loss": 13.1792, | |
"mean_token_accuracy": 0.2165178582072258, | |
"num_tokens": 148453.0, | |
"step": 9 | |
}, | |
{ | |
"epoch": 0.002, | |
"grad_norm": 8.26969051361084, | |
"learning_rate": 4.991e-06, | |
"loss": 12.853, | |
"mean_token_accuracy": 0.21405228972434998, | |
"num_tokens": 167693.0, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.0022, | |
"grad_norm": 7.328786373138428, | |
"learning_rate": 4.9900000000000005e-06, | |
"loss": 12.9578, | |
"mean_token_accuracy": 0.2115987464785576, | |
"num_tokens": 182157.0, | |
"step": 11 | |
}, | |
{ | |
"epoch": 0.0024, | |
"grad_norm": 7.058177947998047, | |
"learning_rate": 4.989000000000001e-06, | |
"loss": 12.7711, | |
"mean_token_accuracy": 0.2002224698662758, | |
"num_tokens": 201457.0, | |
"step": 12 | |
}, | |
{ | |
"epoch": 0.0026, | |
"grad_norm": 6.48744010925293, | |
"learning_rate": 4.988e-06, | |
"loss": 13.006, | |
"mean_token_accuracy": 0.23790322244167328, | |
"num_tokens": 215922.0, | |
"step": 13 | |
}, | |
{ | |
"epoch": 0.0028, | |
"grad_norm": 6.006223201751709, | |
"learning_rate": 4.987e-06, | |
"loss": 10.5975, | |
"mean_token_accuracy": 0.2096899226307869, | |
"num_tokens": 225597.0, | |
"step": 14 | |
}, | |
{ | |
"epoch": 0.003, | |
"grad_norm": 6.274689674377441, | |
"learning_rate": 4.986e-06, | |
"loss": 11.2766, | |
"mean_token_accuracy": 0.26851852238178253, | |
"num_tokens": 240100.0, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.0032, | |
"grad_norm": 8.211908340454102, | |
"learning_rate": 4.9850000000000006e-06, | |
"loss": 13.7091, | |
"mean_token_accuracy": 0.20202020555734634, | |
"num_tokens": 259400.0, | |
"step": 16 | |
}, | |
{ | |
"epoch": 0.0034, | |
"grad_norm": 6.356493949890137, | |
"learning_rate": 4.984000000000001e-06, | |
"loss": 12.043, | |
"mean_token_accuracy": 0.20519480854272842, | |
"num_tokens": 278628.0, | |
"step": 17 | |
}, | |
{ | |
"epoch": 0.0036, | |
"grad_norm": 6.438048839569092, | |
"learning_rate": 4.983e-06, | |
"loss": 11.5484, | |
"mean_token_accuracy": 0.2343137264251709, | |
"num_tokens": 303856.0, | |
"step": 18 | |
}, | |
{ | |
"epoch": 0.0038, | |
"grad_norm": 7.152822494506836, | |
"learning_rate": 4.982e-06, | |
"loss": 13.4607, | |
"mean_token_accuracy": 0.17500000447034836, | |
"num_tokens": 318686.0, | |
"step": 19 | |
}, | |
{ | |
"epoch": 0.004, | |
"grad_norm": 6.464632034301758, | |
"learning_rate": 4.981e-06, | |
"loss": 11.7533, | |
"mean_token_accuracy": 0.21959459781646729, | |
"num_tokens": 337999.0, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.0042, | |
"grad_norm": 7.051283836364746, | |
"learning_rate": 4.980000000000001e-06, | |
"loss": 12.9788, | |
"mean_token_accuracy": 0.22011494636535645, | |
"num_tokens": 356602.0, | |
"step": 21 | |
}, | |
{ | |
"epoch": 0.0044, | |
"grad_norm": 9.595747947692871, | |
"learning_rate": 4.979e-06, | |
"loss": 10.9347, | |
"mean_token_accuracy": 0.2645348906517029, | |
"num_tokens": 373201.0, | |
"step": 22 | |
}, | |
{ | |
"epoch": 0.0046, | |
"grad_norm": 7.404125213623047, | |
"learning_rate": 4.9780000000000005e-06, | |
"loss": 12.5181, | |
"mean_token_accuracy": 0.2666666731238365, | |
"num_tokens": 387660.0, | |
"step": 23 | |
}, | |
{ | |
"epoch": 0.0048, | |
"grad_norm": 6.656332969665527, | |
"learning_rate": 4.977e-06, | |
"loss": 11.5566, | |
"mean_token_accuracy": 0.209001787006855, | |
"num_tokens": 402129.0, | |
"step": 24 | |
}, | |
{ | |
"epoch": 0.005, | |
"grad_norm": 6.866989612579346, | |
"learning_rate": 4.976e-06, | |
"loss": 12.4797, | |
"mean_token_accuracy": 0.2379310354590416, | |
"num_tokens": 421133.0, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.0052, | |
"grad_norm": 6.77735710144043, | |
"learning_rate": 4.975000000000001e-06, | |
"loss": 13.6256, | |
"mean_token_accuracy": 0.22177419066429138, | |
"num_tokens": 440531.0, | |
"step": 26 | |
}, | |
{ | |
"epoch": 0.0054, | |
"grad_norm": 8.206353187561035, | |
"learning_rate": 4.974e-06, | |
"loss": 13.0667, | |
"mean_token_accuracy": 0.21791187673807144, | |
"num_tokens": 459798.0, | |
"step": 27 | |
}, | |
{ | |
"epoch": 0.0056, | |
"grad_norm": 7.40612268447876, | |
"learning_rate": 4.9730000000000005e-06, | |
"loss": 13.5884, | |
"mean_token_accuracy": 0.19052419066429138, | |
"num_tokens": 478587.0, | |
"step": 28 | |
}, | |
{ | |
"epoch": 0.0058, | |
"grad_norm": 6.71999454498291, | |
"learning_rate": 4.972e-06, | |
"loss": 12.0322, | |
"mean_token_accuracy": 0.24526315927505493, | |
"num_tokens": 492356.0, | |
"step": 29 | |
}, | |
{ | |
"epoch": 0.006, | |
"grad_norm": 8.108094215393066, | |
"learning_rate": 4.971e-06, | |
"loss": 12.4858, | |
"mean_token_accuracy": 0.2566666677594185, | |
"num_tokens": 507242.0, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.0062, | |
"grad_norm": 7.7994513511657715, | |
"learning_rate": 4.970000000000001e-06, | |
"loss": 13.0762, | |
"mean_token_accuracy": 0.24568965286016464, | |
"num_tokens": 525987.0, | |
"step": 31 | |
}, | |
{ | |
"epoch": 0.0064, | |
"grad_norm": 7.258217811584473, | |
"learning_rate": 4.969e-06, | |
"loss": 12.7757, | |
"mean_token_accuracy": 0.24137930572032928, | |
"num_tokens": 540447.0, | |
"step": 32 | |
}, | |
{ | |
"epoch": 0.0066, | |
"grad_norm": 14.746047973632812, | |
"learning_rate": 4.9680000000000005e-06, | |
"loss": 14.4335, | |
"mean_token_accuracy": 0.22649572789669037, | |
"num_tokens": 560131.0, | |
"step": 33 | |
}, | |
{ | |
"epoch": 0.0068, | |
"grad_norm": 5.289712429046631, | |
"learning_rate": 4.967e-06, | |
"loss": 9.6693, | |
"mean_token_accuracy": 0.2875000089406967, | |
"num_tokens": 579313.0, | |
"step": 34 | |
}, | |
{ | |
"epoch": 0.007, | |
"grad_norm": 7.960392951965332, | |
"learning_rate": 4.966e-06, | |
"loss": 13.1738, | |
"mean_token_accuracy": 0.25833334028720856, | |
"num_tokens": 598611.0, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.0072, | |
"grad_norm": 8.295417785644531, | |
"learning_rate": 4.965000000000001e-06, | |
"loss": 11.8889, | |
"mean_token_accuracy": 0.24144145101308823, | |
"num_tokens": 615588.0, | |
"step": 36 | |
}, | |
{ | |
"epoch": 0.0074, | |
"grad_norm": 7.051126480102539, | |
"learning_rate": 4.964e-06, | |
"loss": 12.1364, | |
"mean_token_accuracy": 0.23590733855962753, | |
"num_tokens": 636032.0, | |
"step": 37 | |
}, | |
{ | |
"epoch": 0.0076, | |
"grad_norm": 7.895816326141357, | |
"learning_rate": 4.963000000000001e-06, | |
"loss": 12.8971, | |
"mean_token_accuracy": 0.18571428954601288, | |
"num_tokens": 655299.0, | |
"step": 38 | |
}, | |
{ | |
"epoch": 0.0078, | |
"grad_norm": 7.544738292694092, | |
"learning_rate": 4.962e-06, | |
"loss": 13.0581, | |
"mean_token_accuracy": 0.20937500149011612, | |
"num_tokens": 674357.0, | |
"step": 39 | |
}, | |
{ | |
"epoch": 0.008, | |
"grad_norm": 7.548039436340332, | |
"learning_rate": 4.9610000000000004e-06, | |
"loss": 13.2158, | |
"mean_token_accuracy": 0.20892494916915894, | |
"num_tokens": 693698.0, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.0082, | |
"grad_norm": 7.687658309936523, | |
"learning_rate": 4.960000000000001e-06, | |
"loss": 12.8524, | |
"mean_token_accuracy": 0.28735632449388504, | |
"num_tokens": 703433.0, | |
"step": 41 | |
}, | |
{ | |
"epoch": 0.0084, | |
"grad_norm": 8.011468887329102, | |
"learning_rate": 4.959e-06, | |
"loss": 13.5655, | |
"mean_token_accuracy": 0.21635150164365768, | |
"num_tokens": 722733.0, | |
"step": 42 | |
}, | |
{ | |
"epoch": 0.0086, | |
"grad_norm": 11.084840774536133, | |
"learning_rate": 4.958000000000001e-06, | |
"loss": 12.7054, | |
"mean_token_accuracy": 0.21746384352445602, | |
"num_tokens": 738121.0, | |
"step": 43 | |
}, | |
{ | |
"epoch": 0.0088, | |
"grad_norm": 9.436426162719727, | |
"learning_rate": 4.957e-06, | |
"loss": 13.5213, | |
"mean_token_accuracy": 0.22783251106739044, | |
"num_tokens": 757723.0, | |
"step": 44 | |
}, | |
{ | |
"epoch": 0.009, | |
"grad_norm": 8.382990837097168, | |
"learning_rate": 4.9560000000000005e-06, | |
"loss": 12.6328, | |
"mean_token_accuracy": 0.2567741870880127, | |
"num_tokens": 772646.0, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.0092, | |
"grad_norm": 8.272336959838867, | |
"learning_rate": 4.955e-06, | |
"loss": 13.0365, | |
"mean_token_accuracy": 0.2611111178994179, | |
"num_tokens": 792334.0, | |
"step": 46 | |
}, | |
{ | |
"epoch": 0.0094, | |
"grad_norm": 10.347405433654785, | |
"learning_rate": 4.954e-06, | |
"loss": 12.8164, | |
"mean_token_accuracy": 0.22616633027791977, | |
"num_tokens": 811713.0, | |
"step": 47 | |
}, | |
{ | |
"epoch": 0.0096, | |
"grad_norm": 7.27515983581543, | |
"learning_rate": 4.953000000000001e-06, | |
"loss": 11.0123, | |
"mean_token_accuracy": 0.23790322244167328, | |
"num_tokens": 832998.0, | |
"step": 48 | |
}, | |
{ | |
"epoch": 0.0098, | |
"grad_norm": 8.973237991333008, | |
"learning_rate": 4.952e-06, | |
"loss": 12.9664, | |
"mean_token_accuracy": 0.19805195182561874, | |
"num_tokens": 861717.0, | |
"step": 49 | |
}, | |
{ | |
"epoch": 0.01, | |
"grad_norm": 8.738320350646973, | |
"learning_rate": 4.9510000000000005e-06, | |
"loss": 12.5728, | |
"mean_token_accuracy": 0.24344827979803085, | |
"num_tokens": 875736.0, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.0102, | |
"grad_norm": 8.51733112335205, | |
"learning_rate": 4.95e-06, | |
"loss": 12.7596, | |
"mean_token_accuracy": 0.22380952537059784, | |
"num_tokens": 894386.0, | |
"step": 51 | |
}, | |
{ | |
"epoch": 0.0104, | |
"grad_norm": 8.787413597106934, | |
"learning_rate": 4.949e-06, | |
"loss": 12.4493, | |
"mean_token_accuracy": 0.268075630068779, | |
"num_tokens": 908848.0, | |
"step": 52 | |
}, | |
{ | |
"epoch": 0.0106, | |
"grad_norm": 7.849542617797852, | |
"learning_rate": 4.948000000000001e-06, | |
"loss": 12.7514, | |
"mean_token_accuracy": 0.23790322244167328, | |
"num_tokens": 923309.0, | |
"step": 53 | |
}, | |
{ | |
"epoch": 0.0108, | |
"grad_norm": 8.378942489624023, | |
"learning_rate": 4.947e-06, | |
"loss": 11.4953, | |
"mean_token_accuracy": 0.2290322557091713, | |
"num_tokens": 938298.0, | |
"step": 54 | |
}, | |
{ | |
"epoch": 0.011, | |
"grad_norm": 8.311882972717285, | |
"learning_rate": 4.946000000000001e-06, | |
"loss": 12.0904, | |
"mean_token_accuracy": 0.24014336615800858, | |
"num_tokens": 948288.0, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.0112, | |
"grad_norm": 9.599881172180176, | |
"learning_rate": 4.945e-06, | |
"loss": 14.3569, | |
"mean_token_accuracy": 0.22649572789669037, | |
"num_tokens": 967543.0, | |
"step": 56 | |
}, | |
{ | |
"epoch": 0.0114, | |
"grad_norm": 8.84776496887207, | |
"learning_rate": 4.9440000000000004e-06, | |
"loss": 12.2268, | |
"mean_token_accuracy": 0.28287841379642487, | |
"num_tokens": 981726.0, | |
"step": 57 | |
}, | |
{ | |
"epoch": 0.0116, | |
"grad_norm": 11.259871482849121, | |
"learning_rate": 4.943000000000001e-06, | |
"loss": 12.6356, | |
"mean_token_accuracy": 0.22177419066429138, | |
"num_tokens": 996225.0, | |
"step": 58 | |
}, | |
{ | |
"epoch": 0.0118, | |
"grad_norm": 10.529711723327637, | |
"learning_rate": 4.942e-06, | |
"loss": 11.9829, | |
"mean_token_accuracy": 0.26986077427864075, | |
"num_tokens": 1015573.0, | |
"step": 59 | |
}, | |
{ | |
"epoch": 0.012, | |
"grad_norm": 8.90577220916748, | |
"learning_rate": 4.941000000000001e-06, | |
"loss": 11.4895, | |
"mean_token_accuracy": 0.2374911978840828, | |
"num_tokens": 1034198.0, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.0122, | |
"grad_norm": 7.851955413818359, | |
"learning_rate": 4.94e-06, | |
"loss": 12.0083, | |
"mean_token_accuracy": 0.22478991746902466, | |
"num_tokens": 1045419.0, | |
"step": 61 | |
}, | |
{ | |
"epoch": 0.0124, | |
"grad_norm": 9.812698364257812, | |
"learning_rate": 4.9390000000000005e-06, | |
"loss": 12.9777, | |
"mean_token_accuracy": 0.2379310354590416, | |
"num_tokens": 1064723.0, | |
"step": 62 | |
}, | |
{ | |
"epoch": 0.0126, | |
"grad_norm": 8.35107707977295, | |
"learning_rate": 4.938000000000001e-06, | |
"loss": 11.3187, | |
"mean_token_accuracy": 0.24358975142240524, | |
"num_tokens": 1079171.0, | |
"step": 63 | |
}, | |
{ | |
"epoch": 0.0128, | |
"grad_norm": 7.16640567779541, | |
"learning_rate": 4.937e-06, | |
"loss": 10.2182, | |
"mean_token_accuracy": 0.34151194989681244, | |
"num_tokens": 1093695.0, | |
"step": 64 | |
}, | |
{ | |
"epoch": 0.013, | |
"grad_norm": 10.18386459350586, | |
"learning_rate": 4.936e-06, | |
"loss": 12.2175, | |
"mean_token_accuracy": 0.25961539149284363, | |
"num_tokens": 1108580.0, | |
"step": 65 | |
}, | |
{ | |
"epoch": 0.0132, | |
"grad_norm": 8.232446670532227, | |
"learning_rate": 4.935e-06, | |
"loss": 11.8182, | |
"mean_token_accuracy": 0.27314814925193787, | |
"num_tokens": 1123098.0, | |
"step": 66 | |
}, | |
{ | |
"epoch": 0.0134, | |
"grad_norm": 8.809263229370117, | |
"learning_rate": 4.9340000000000005e-06, | |
"loss": 10.9148, | |
"mean_token_accuracy": 0.20927418768405914, | |
"num_tokens": 1137548.0, | |
"step": 67 | |
}, | |
{ | |
"epoch": 0.0136, | |
"grad_norm": 9.865777015686035, | |
"learning_rate": 4.933000000000001e-06, | |
"loss": 12.6294, | |
"mean_token_accuracy": 0.22962962836027145, | |
"num_tokens": 1156845.0, | |
"step": 68 | |
}, | |
{ | |
"epoch": 0.0138, | |
"grad_norm": 7.259024620056152, | |
"learning_rate": 4.932e-06, | |
"loss": 9.7717, | |
"mean_token_accuracy": 0.26875001192092896, | |
"num_tokens": 1171363.0, | |
"step": 69 | |
}, | |
{ | |
"epoch": 0.014, | |
"grad_norm": 11.756244659423828, | |
"learning_rate": 4.931e-06, | |
"loss": 12.6686, | |
"mean_token_accuracy": 0.28285714983940125, | |
"num_tokens": 1185814.0, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.0142, | |
"grad_norm": 9.128395080566406, | |
"learning_rate": 4.93e-06, | |
"loss": 11.1826, | |
"mean_token_accuracy": 0.21765056997537613, | |
"num_tokens": 1200437.0, | |
"step": 71 | |
}, | |
{ | |
"epoch": 0.0144, | |
"grad_norm": 8.472599029541016, | |
"learning_rate": 4.929000000000001e-06, | |
"loss": 11.6617, | |
"mean_token_accuracy": 0.2060810774564743, | |
"num_tokens": 1219264.0, | |
"step": 72 | |
}, | |
{ | |
"epoch": 0.0146, | |
"grad_norm": 11.549400329589844, | |
"learning_rate": 4.928000000000001e-06, | |
"loss": 12.9588, | |
"mean_token_accuracy": 0.25, | |
"num_tokens": 1238560.0, | |
"step": 73 | |
}, | |
{ | |
"epoch": 0.0148, | |
"grad_norm": 11.084433555603027, | |
"learning_rate": 4.9270000000000004e-06, | |
"loss": 10.4111, | |
"mean_token_accuracy": 0.21954887360334396, | |
"num_tokens": 1247376.0, | |
"step": 74 | |
}, | |
{ | |
"epoch": 0.015, | |
"grad_norm": 10.906563758850098, | |
"learning_rate": 4.926e-06, | |
"loss": 12.5231, | |
"mean_token_accuracy": 0.23307790607213974, | |
"num_tokens": 1266611.0, | |
"step": 75 | |
}, | |
{ | |
"epoch": 0.0152, | |
"grad_norm": 9.466647148132324, | |
"learning_rate": 4.925e-06, | |
"loss": 11.4363, | |
"mean_token_accuracy": 0.24193547666072845, | |
"num_tokens": 1281132.0, | |
"step": 76 | |
}, | |
{ | |
"epoch": 0.0154, | |
"grad_norm": 11.185935020446777, | |
"learning_rate": 4.924000000000001e-06, | |
"loss": 12.6383, | |
"mean_token_accuracy": 0.22685185074806213, | |
"num_tokens": 1301049.0, | |
"step": 77 | |
}, | |
{ | |
"epoch": 0.0156, | |
"grad_norm": 11.0143461227417, | |
"learning_rate": 4.923000000000001e-06, | |
"loss": 12.479, | |
"mean_token_accuracy": 0.22962962836027145, | |
"num_tokens": 1322278.0, | |
"step": 78 | |
}, | |
{ | |
"epoch": 0.0158, | |
"grad_norm": 12.330080032348633, | |
"learning_rate": 4.9220000000000005e-06, | |
"loss": 12.2351, | |
"mean_token_accuracy": 0.2675606608390808, | |
"num_tokens": 1341536.0, | |
"step": 79 | |
}, | |
{ | |
"epoch": 0.016, | |
"grad_norm": 10.486513137817383, | |
"learning_rate": 4.921e-06, | |
"loss": 11.6896, | |
"mean_token_accuracy": 0.25356507301330566, | |
"num_tokens": 1355885.0, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.0162, | |
"grad_norm": 12.150262832641602, | |
"learning_rate": 4.92e-06, | |
"loss": 12.5725, | |
"mean_token_accuracy": 0.2321428582072258, | |
"num_tokens": 1375181.0, | |
"step": 81 | |
}, | |
{ | |
"epoch": 0.0164, | |
"grad_norm": 11.387964248657227, | |
"learning_rate": 4.919000000000001e-06, | |
"loss": 11.5796, | |
"mean_token_accuracy": 0.29461538791656494, | |
"num_tokens": 1384853.0, | |
"step": 82 | |
}, | |
{ | |
"epoch": 0.0166, | |
"grad_norm": 9.878727912902832, | |
"learning_rate": 4.918e-06, | |
"loss": 11.7534, | |
"mean_token_accuracy": 0.24473684281110764, | |
"num_tokens": 1401178.0, | |
"step": 83 | |
}, | |
{ | |
"epoch": 0.0168, | |
"grad_norm": 9.827662467956543, | |
"learning_rate": 4.9170000000000005e-06, | |
"loss": 12.0345, | |
"mean_token_accuracy": 0.18034055829048157, | |
"num_tokens": 1419976.0, | |
"step": 84 | |
}, | |
{ | |
"epoch": 0.017, | |
"grad_norm": 10.327628135681152, | |
"learning_rate": 4.916e-06, | |
"loss": 11.1049, | |
"mean_token_accuracy": 0.3095238208770752, | |
"num_tokens": 1429652.0, | |
"step": 85 | |
}, | |
{ | |
"epoch": 0.0172, | |
"grad_norm": 10.67590045928955, | |
"learning_rate": 4.915e-06, | |
"loss": 10.7087, | |
"mean_token_accuracy": 0.2750582844018936, | |
"num_tokens": 1444113.0, | |
"step": 86 | |
}, | |
{ | |
"epoch": 0.0174, | |
"grad_norm": 20.292959213256836, | |
"learning_rate": 4.914000000000001e-06, | |
"loss": 10.797, | |
"mean_token_accuracy": 0.32500000298023224, | |
"num_tokens": 1453773.0, | |
"step": 87 | |
}, | |
{ | |
"epoch": 0.0176, | |
"grad_norm": 8.3117036819458, | |
"learning_rate": 4.913e-06, | |
"loss": 9.394, | |
"mean_token_accuracy": 0.31915584206581116, | |
"num_tokens": 1475024.0, | |
"step": 88 | |
}, | |
{ | |
"epoch": 0.0178, | |
"grad_norm": 11.334404945373535, | |
"learning_rate": 4.9120000000000006e-06, | |
"loss": 11.0718, | |
"mean_token_accuracy": 0.2678571492433548, | |
"num_tokens": 1489489.0, | |
"step": 89 | |
}, | |
{ | |
"epoch": 0.018, | |
"grad_norm": 15.36023235321045, | |
"learning_rate": 4.911e-06, | |
"loss": 11.7992, | |
"mean_token_accuracy": 0.28607918322086334, | |
"num_tokens": 1508816.0, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.0182, | |
"grad_norm": 11.725635528564453, | |
"learning_rate": 4.9100000000000004e-06, | |
"loss": 11.9207, | |
"mean_token_accuracy": 0.26123301684856415, | |
"num_tokens": 1530050.0, | |
"step": 91 | |
}, | |
{ | |
"epoch": 0.0184, | |
"grad_norm": 14.75428295135498, | |
"learning_rate": 4.909000000000001e-06, | |
"loss": 11.0294, | |
"mean_token_accuracy": 0.21236559003591537, | |
"num_tokens": 1544713.0, | |
"step": 92 | |
}, | |
{ | |
"epoch": 0.0186, | |
"grad_norm": 10.447436332702637, | |
"learning_rate": 4.908e-06, | |
"loss": 9.7536, | |
"mean_token_accuracy": 0.24568965286016464, | |
"num_tokens": 1559226.0, | |
"step": 93 | |
}, | |
{ | |
"epoch": 0.0188, | |
"grad_norm": 14.036280632019043, | |
"learning_rate": 4.907000000000001e-06, | |
"loss": 12.2065, | |
"mean_token_accuracy": 0.2771739065647125, | |
"num_tokens": 1568898.0, | |
"step": 94 | |
}, | |
{ | |
"epoch": 0.019, | |
"grad_norm": 13.364744186401367, | |
"learning_rate": 4.906e-06, | |
"loss": 13.168, | |
"mean_token_accuracy": 0.2321428582072258, | |
"num_tokens": 1588133.0, | |
"step": 95 | |
}, | |
{ | |
"epoch": 0.0192, | |
"grad_norm": 12.441611289978027, | |
"learning_rate": 4.9050000000000005e-06, | |
"loss": 11.6806, | |
"mean_token_accuracy": 0.21875, | |
"num_tokens": 1607218.0, | |
"step": 96 | |
}, | |
{ | |
"epoch": 0.0194, | |
"grad_norm": 11.559666633605957, | |
"learning_rate": 4.904000000000001e-06, | |
"loss": 12.2055, | |
"mean_token_accuracy": 0.23010753095149994, | |
"num_tokens": 1626557.0, | |
"step": 97 | |
}, | |
{ | |
"epoch": 0.0196, | |
"grad_norm": 11.131147384643555, | |
"learning_rate": 4.903e-06, | |
"loss": 11.4201, | |
"mean_token_accuracy": 0.2931034490466118, | |
"num_tokens": 1641017.0, | |
"step": 98 | |
}, | |
{ | |
"epoch": 0.0198, | |
"grad_norm": 12.186894416809082, | |
"learning_rate": 4.902000000000001e-06, | |
"loss": 12.1568, | |
"mean_token_accuracy": 0.23180076479911804, | |
"num_tokens": 1655475.0, | |
"step": 99 | |
}, | |
{ | |
"epoch": 0.02, | |
"grad_norm": 11.864778518676758, | |
"learning_rate": 4.901e-06, | |
"loss": 12.4851, | |
"mean_token_accuracy": 0.21008403599262238, | |
"num_tokens": 1674815.0, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.0202, | |
"grad_norm": 14.35185718536377, | |
"learning_rate": 4.9000000000000005e-06, | |
"loss": 11.1814, | |
"mean_token_accuracy": 0.2612612694501877, | |
"num_tokens": 1686681.0, | |
"step": 101 | |
}, | |
{ | |
"epoch": 0.0204, | |
"grad_norm": 18.715627670288086, | |
"learning_rate": 4.899e-06, | |
"loss": 11.632, | |
"mean_token_accuracy": 0.30199430882930756, | |
"num_tokens": 1701565.0, | |
"step": 102 | |
}, | |
{ | |
"epoch": 0.0206, | |
"grad_norm": 12.892010688781738, | |
"learning_rate": 4.898e-06, | |
"loss": 12.3157, | |
"mean_token_accuracy": 0.23806367069482803, | |
"num_tokens": 1715938.0, | |
"step": 103 | |
}, | |
{ | |
"epoch": 0.0208, | |
"grad_norm": 13.078173637390137, | |
"learning_rate": 4.897000000000001e-06, | |
"loss": 11.4954, | |
"mean_token_accuracy": 0.22177419066429138, | |
"num_tokens": 1734685.0, | |
"step": 104 | |
}, | |
{ | |
"epoch": 0.021, | |
"grad_norm": 14.580618858337402, | |
"learning_rate": 4.896e-06, | |
"loss": 10.4619, | |
"mean_token_accuracy": 0.2797202914953232, | |
"num_tokens": 1757899.0, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.0212, | |
"grad_norm": 13.174345016479492, | |
"learning_rate": 4.8950000000000006e-06, | |
"loss": 10.5835, | |
"mean_token_accuracy": 0.27546295523643494, | |
"num_tokens": 1778410.0, | |
"step": 106 | |
}, | |
{ | |
"epoch": 0.0214, | |
"grad_norm": 13.970446586608887, | |
"learning_rate": 4.894e-06, | |
"loss": 11.1265, | |
"mean_token_accuracy": 0.2693749964237213, | |
"num_tokens": 1791819.0, | |
"step": 107 | |
}, | |
{ | |
"epoch": 0.0216, | |
"grad_norm": 11.994514465332031, | |
"learning_rate": 4.893e-06, | |
"loss": 11.3511, | |
"mean_token_accuracy": 0.25462962687015533, | |
"num_tokens": 1811118.0, | |
"step": 108 | |
}, | |
{ | |
"epoch": 0.0218, | |
"grad_norm": 13.328775405883789, | |
"learning_rate": 4.892000000000001e-06, | |
"loss": 11.9787, | |
"mean_token_accuracy": 0.23790322244167328, | |
"num_tokens": 1825598.0, | |
"step": 109 | |
}, | |
{ | |
"epoch": 0.022, | |
"grad_norm": 11.813949584960938, | |
"learning_rate": 4.891e-06, | |
"loss": 10.614, | |
"mean_token_accuracy": 0.25833334028720856, | |
"num_tokens": 1840096.0, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.0222, | |
"grad_norm": 17.409263610839844, | |
"learning_rate": 4.890000000000001e-06, | |
"loss": 10.5087, | |
"mean_token_accuracy": 0.32692308723926544, | |
"num_tokens": 1849750.0, | |
"step": 111 | |
}, | |
{ | |
"epoch": 0.0224, | |
"grad_norm": 19.462722778320312, | |
"learning_rate": 4.889e-06, | |
"loss": 10.2288, | |
"mean_token_accuracy": 0.25729166716337204, | |
"num_tokens": 1859433.0, | |
"step": 112 | |
}, | |
{ | |
"epoch": 0.0226, | |
"grad_norm": 11.595419883728027, | |
"learning_rate": 4.8880000000000005e-06, | |
"loss": 11.0996, | |
"mean_token_accuracy": 0.2807881832122803, | |
"num_tokens": 1873892.0, | |
"step": 113 | |
}, | |
{ | |
"epoch": 0.0228, | |
"grad_norm": 14.413046836853027, | |
"learning_rate": 4.887000000000001e-06, | |
"loss": 11.6535, | |
"mean_token_accuracy": 0.2341153472661972, | |
"num_tokens": 1893196.0, | |
"step": 114 | |
}, | |
{ | |
"epoch": 0.023, | |
"grad_norm": 12.218829154968262, | |
"learning_rate": 4.886e-06, | |
"loss": 10.9169, | |
"mean_token_accuracy": 0.2557603716850281, | |
"num_tokens": 1907676.0, | |
"step": 115 | |
}, | |
{ | |
"epoch": 0.0232, | |
"grad_norm": 16.51720428466797, | |
"learning_rate": 4.885000000000001e-06, | |
"loss": 9.8406, | |
"mean_token_accuracy": 0.25968992710113525, | |
"num_tokens": 1921596.0, | |
"step": 116 | |
}, | |
{ | |
"epoch": 0.0234, | |
"grad_norm": 10.608232498168945, | |
"learning_rate": 4.884e-06, | |
"loss": 9.9809, | |
"mean_token_accuracy": 0.22068965435028076, | |
"num_tokens": 1940247.0, | |
"step": 117 | |
}, | |
{ | |
"epoch": 0.0236, | |
"grad_norm": 14.17568302154541, | |
"learning_rate": 4.8830000000000005e-06, | |
"loss": 11.3908, | |
"mean_token_accuracy": 0.23790322244167328, | |
"num_tokens": 1959268.0, | |
"step": 118 | |
}, | |
{ | |
"epoch": 0.0238, | |
"grad_norm": 12.197131156921387, | |
"learning_rate": 4.882000000000001e-06, | |
"loss": 10.9438, | |
"mean_token_accuracy": 0.2538699731230736, | |
"num_tokens": 1978218.0, | |
"step": 119 | |
}, | |
{ | |
"epoch": 0.024, | |
"grad_norm": 15.261062622070312, | |
"learning_rate": 4.881e-06, | |
"loss": 11.0329, | |
"mean_token_accuracy": 0.24621212482452393, | |
"num_tokens": 1997519.0, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.0242, | |
"grad_norm": 13.116536140441895, | |
"learning_rate": 4.880000000000001e-06, | |
"loss": 11.9918, | |
"mean_token_accuracy": 0.24137930572032928, | |
"num_tokens": 2016817.0, | |
"step": 121 | |
}, | |
{ | |
"epoch": 0.0244, | |
"grad_norm": 18.174373626708984, | |
"learning_rate": 4.879e-06, | |
"loss": 11.0414, | |
"mean_token_accuracy": 0.25833334028720856, | |
"num_tokens": 2035875.0, | |
"step": 122 | |
}, | |
{ | |
"epoch": 0.0246, | |
"grad_norm": 17.258121490478516, | |
"learning_rate": 4.8780000000000006e-06, | |
"loss": 11.1173, | |
"mean_token_accuracy": 0.3010057359933853, | |
"num_tokens": 2050330.0, | |
"step": 123 | |
}, | |
{ | |
"epoch": 0.0248, | |
"grad_norm": 12.636884689331055, | |
"learning_rate": 4.877000000000001e-06, | |
"loss": 11.3399, | |
"mean_token_accuracy": 0.2879464328289032, | |
"num_tokens": 2070156.0, | |
"step": 124 | |
}, | |
{ | |
"epoch": 0.025, | |
"grad_norm": 18.457618713378906, | |
"learning_rate": 4.876e-06, | |
"loss": 11.5595, | |
"mean_token_accuracy": 0.28418803960084915, | |
"num_tokens": 2084771.0, | |
"step": 125 | |
}, | |
{ | |
"epoch": 0.0252, | |
"grad_norm": 14.281397819519043, | |
"learning_rate": 4.875e-06, | |
"loss": 10.9901, | |
"mean_token_accuracy": 0.28140393644571304, | |
"num_tokens": 2104228.0, | |
"step": 126 | |
}, | |
{ | |
"epoch": 0.0254, | |
"grad_norm": 13.08484935760498, | |
"learning_rate": 4.874e-06, | |
"loss": 9.4802, | |
"mean_token_accuracy": 0.2637759745121002, | |
"num_tokens": 2118877.0, | |
"step": 127 | |
}, | |
{ | |
"epoch": 0.0256, | |
"grad_norm": 11.949925422668457, | |
"learning_rate": 4.873000000000001e-06, | |
"loss": 10.3703, | |
"mean_token_accuracy": 0.2619825750589371, | |
"num_tokens": 2133298.0, | |
"step": 128 | |
}, | |
{ | |
"epoch": 0.0258, | |
"grad_norm": 14.950297355651855, | |
"learning_rate": 4.872000000000001e-06, | |
"loss": 10.6116, | |
"mean_token_accuracy": 0.33000001311302185, | |
"num_tokens": 2142955.0, | |
"step": 129 | |
}, | |
{ | |
"epoch": 0.026, | |
"grad_norm": 16.433286666870117, | |
"learning_rate": 4.8710000000000005e-06, | |
"loss": 9.4482, | |
"mean_token_accuracy": 0.3333333432674408, | |
"num_tokens": 2159447.0, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.0262, | |
"grad_norm": 12.467981338500977, | |
"learning_rate": 4.87e-06, | |
"loss": 10.2354, | |
"mean_token_accuracy": 0.33796295523643494, | |
"num_tokens": 2178352.0, | |
"step": 131 | |
}, | |
{ | |
"epoch": 0.0264, | |
"grad_norm": 11.493000030517578, | |
"learning_rate": 4.869e-06, | |
"loss": 9.0597, | |
"mean_token_accuracy": 0.2736175060272217, | |
"num_tokens": 2199174.0, | |
"step": 132 | |
}, | |
{ | |
"epoch": 0.0266, | |
"grad_norm": 13.966115951538086, | |
"learning_rate": 4.868000000000001e-06, | |
"loss": 10.3177, | |
"mean_token_accuracy": 0.3325917571783066, | |
"num_tokens": 2213636.0, | |
"step": 133 | |
}, | |
{ | |
"epoch": 0.0268, | |
"grad_norm": 13.971321105957031, | |
"learning_rate": 4.867000000000001e-06, | |
"loss": 10.2564, | |
"mean_token_accuracy": 0.31481482088565826, | |
"num_tokens": 2228050.0, | |
"step": 134 | |
}, | |
{ | |
"epoch": 0.027, | |
"grad_norm": 16.246124267578125, | |
"learning_rate": 4.8660000000000005e-06, | |
"loss": 10.7549, | |
"mean_token_accuracy": 0.30943846702575684, | |
"num_tokens": 2247348.0, | |
"step": 135 | |
}, | |
{ | |
"epoch": 0.0272, | |
"grad_norm": 17.702425003051758, | |
"learning_rate": 4.865e-06, | |
"loss": 10.1729, | |
"mean_token_accuracy": 0.3575989753007889, | |
"num_tokens": 2261806.0, | |
"step": 136 | |
}, | |
{ | |
"epoch": 0.0274, | |
"grad_norm": 13.732104301452637, | |
"learning_rate": 4.864e-06, | |
"loss": 10.9004, | |
"mean_token_accuracy": 0.28607918322086334, | |
"num_tokens": 2281224.0, | |
"step": 137 | |
}, | |
{ | |
"epoch": 0.0276, | |
"grad_norm": 17.568925857543945, | |
"learning_rate": 4.863000000000001e-06, | |
"loss": 10.1658, | |
"mean_token_accuracy": 0.2857142984867096, | |
"num_tokens": 2295720.0, | |
"step": 138 | |
}, | |
{ | |
"epoch": 0.0278, | |
"grad_norm": 13.424271583557129, | |
"learning_rate": 4.862e-06, | |
"loss": 10.3543, | |
"mean_token_accuracy": 0.28928571939468384, | |
"num_tokens": 2310345.0, | |
"step": 139 | |
}, | |
{ | |
"epoch": 0.028, | |
"grad_norm": 14.413524627685547, | |
"learning_rate": 4.8610000000000006e-06, | |
"loss": 11.3822, | |
"mean_token_accuracy": 0.22828783839941025, | |
"num_tokens": 2347736.0, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.0282, | |
"grad_norm": 22.185630798339844, | |
"learning_rate": 4.86e-06, | |
"loss": 10.6185, | |
"mean_token_accuracy": 0.2911111190915108, | |
"num_tokens": 2361685.0, | |
"step": 141 | |
}, | |
{ | |
"epoch": 0.0284, | |
"grad_norm": 12.253998756408691, | |
"learning_rate": 4.859e-06, | |
"loss": 9.2399, | |
"mean_token_accuracy": 0.288621261715889, | |
"num_tokens": 2376158.0, | |
"step": 142 | |
}, | |
{ | |
"epoch": 0.0286, | |
"grad_norm": 16.229686737060547, | |
"learning_rate": 4.858000000000001e-06, | |
"loss": 10.3377, | |
"mean_token_accuracy": 0.22227822244167328, | |
"num_tokens": 2391501.0, | |
"step": 143 | |
}, | |
{ | |
"epoch": 0.0288, | |
"grad_norm": 9.664397239685059, | |
"learning_rate": 4.857e-06, | |
"loss": 9.3351, | |
"mean_token_accuracy": 0.26456456631422043, | |
"num_tokens": 2411105.0, | |
"step": 144 | |
}, | |
{ | |
"epoch": 0.029, | |
"grad_norm": 12.8119478225708, | |
"learning_rate": 4.856e-06, | |
"loss": 9.87, | |
"mean_token_accuracy": 0.24166666716337204, | |
"num_tokens": 2431185.0, | |
"step": 145 | |
}, | |
{ | |
"epoch": 0.0292, | |
"grad_norm": 10.490764617919922, | |
"learning_rate": 4.855e-06, | |
"loss": 8.7889, | |
"mean_token_accuracy": 0.36666667461395264, | |
"num_tokens": 2452470.0, | |
"step": 146 | |
}, | |
{ | |
"epoch": 0.0294, | |
"grad_norm": 13.65211296081543, | |
"learning_rate": 4.8540000000000005e-06, | |
"loss": 10.4286, | |
"mean_token_accuracy": 0.2718253955245018, | |
"num_tokens": 2472396.0, | |
"step": 147 | |
}, | |
{ | |
"epoch": 0.0296, | |
"grad_norm": 18.866209030151367, | |
"learning_rate": 4.853000000000001e-06, | |
"loss": 10.2372, | |
"mean_token_accuracy": 0.38141025602817535, | |
"num_tokens": 2486886.0, | |
"step": 148 | |
}, | |
{ | |
"epoch": 0.0298, | |
"grad_norm": 14.852785110473633, | |
"learning_rate": 4.852e-06, | |
"loss": 10.0487, | |
"mean_token_accuracy": 0.30820105969905853, | |
"num_tokens": 2506143.0, | |
"step": 149 | |
}, | |
{ | |
"epoch": 0.03, | |
"grad_norm": 13.972378730773926, | |
"learning_rate": 4.851e-06, | |
"loss": 10.6291, | |
"mean_token_accuracy": 0.25820106267929077, | |
"num_tokens": 2520401.0, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.0302, | |
"grad_norm": 14.618459701538086, | |
"learning_rate": 4.85e-06, | |
"loss": 10.7579, | |
"mean_token_accuracy": 0.23571428656578064, | |
"num_tokens": 2539135.0, | |
"step": 151 | |
}, | |
{ | |
"epoch": 0.0304, | |
"grad_norm": 15.176739692687988, | |
"learning_rate": 4.8490000000000005e-06, | |
"loss": 9.6595, | |
"mean_token_accuracy": 0.2510339096188545, | |
"num_tokens": 2558562.0, | |
"step": 152 | |
}, | |
{ | |
"epoch": 0.0306, | |
"grad_norm": 16.972919464111328, | |
"learning_rate": 4.848000000000001e-06, | |
"loss": 10.8999, | |
"mean_token_accuracy": 0.25925925374031067, | |
"num_tokens": 2577856.0, | |
"step": 153 | |
}, | |
{ | |
"epoch": 0.0308, | |
"grad_norm": 16.451147079467773, | |
"learning_rate": 4.847e-06, | |
"loss": 8.5505, | |
"mean_token_accuracy": 0.36249999701976776, | |
"num_tokens": 2592246.0, | |
"step": 154 | |
}, | |
{ | |
"epoch": 0.031, | |
"grad_norm": 24.95278549194336, | |
"learning_rate": 4.846e-06, | |
"loss": 10.2367, | |
"mean_token_accuracy": 0.28735632449388504, | |
"num_tokens": 2607133.0, | |
"step": 155 | |
}, | |
{ | |
"epoch": 0.0312, | |
"grad_norm": 15.770346641540527, | |
"learning_rate": 4.845e-06, | |
"loss": 9.6085, | |
"mean_token_accuracy": 0.27272728085517883, | |
"num_tokens": 2621734.0, | |
"step": 156 | |
}, | |
{ | |
"epoch": 0.0314, | |
"grad_norm": 12.314064025878906, | |
"learning_rate": 4.8440000000000005e-06, | |
"loss": 9.816, | |
"mean_token_accuracy": 0.2540322542190552, | |
"num_tokens": 2641357.0, | |
"step": 157 | |
}, | |
{ | |
"epoch": 0.0316, | |
"grad_norm": 13.379799842834473, | |
"learning_rate": 4.843000000000001e-06, | |
"loss": 10.378, | |
"mean_token_accuracy": 0.32804232835769653, | |
"num_tokens": 2660177.0, | |
"step": 158 | |
}, | |
{ | |
"epoch": 0.0318, | |
"grad_norm": 17.458240509033203, | |
"learning_rate": 4.842e-06, | |
"loss": 9.3842, | |
"mean_token_accuracy": 0.2557164579629898, | |
"num_tokens": 2679528.0, | |
"step": 159 | |
}, | |
{ | |
"epoch": 0.032, | |
"grad_norm": 25.741785049438477, | |
"learning_rate": 4.841e-06, | |
"loss": 9.0768, | |
"mean_token_accuracy": 0.3270474076271057, | |
"num_tokens": 2693652.0, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.0322, | |
"grad_norm": 13.557204246520996, | |
"learning_rate": 4.84e-06, | |
"loss": 9.6471, | |
"mean_token_accuracy": 0.28735632449388504, | |
"num_tokens": 2713320.0, | |
"step": 161 | |
}, | |
{ | |
"epoch": 0.0324, | |
"grad_norm": 14.830061912536621, | |
"learning_rate": 4.839000000000001e-06, | |
"loss": 11.3496, | |
"mean_token_accuracy": 0.24049513787031174, | |
"num_tokens": 2733424.0, | |
"step": 162 | |
}, | |
{ | |
"epoch": 0.0326, | |
"grad_norm": 12.371265411376953, | |
"learning_rate": 4.838e-06, | |
"loss": 8.6217, | |
"mean_token_accuracy": 0.25988225638866425, | |
"num_tokens": 2752613.0, | |
"step": 163 | |
}, | |
{ | |
"epoch": 0.0328, | |
"grad_norm": 12.722640037536621, | |
"learning_rate": 4.8370000000000004e-06, | |
"loss": 9.2979, | |
"mean_token_accuracy": 0.2337121218442917, | |
"num_tokens": 2773896.0, | |
"step": 164 | |
}, | |
{ | |
"epoch": 0.033, | |
"grad_norm": 12.796667098999023, | |
"learning_rate": 4.836e-06, | |
"loss": 9.9112, | |
"mean_token_accuracy": 0.3337438404560089, | |
"num_tokens": 2793334.0, | |
"step": 165 | |
}, | |
{ | |
"epoch": 0.0332, | |
"grad_norm": 15.983271598815918, | |
"learning_rate": 4.835e-06, | |
"loss": 9.5066, | |
"mean_token_accuracy": 0.29256465286016464, | |
"num_tokens": 2807755.0, | |
"step": 166 | |
}, | |
{ | |
"epoch": 0.0334, | |
"grad_norm": 15.554715156555176, | |
"learning_rate": 4.834000000000001e-06, | |
"loss": 10.5127, | |
"mean_token_accuracy": 0.2986453175544739, | |
"num_tokens": 2827193.0, | |
"step": 167 | |
}, | |
{ | |
"epoch": 0.0336, | |
"grad_norm": 14.6381196975708, | |
"learning_rate": 4.833e-06, | |
"loss": 9.7383, | |
"mean_token_accuracy": 0.2586618810892105, | |
"num_tokens": 2846449.0, | |
"step": 168 | |
}, | |
{ | |
"epoch": 0.0338, | |
"grad_norm": 16.013647079467773, | |
"learning_rate": 4.8320000000000005e-06, | |
"loss": 9.0009, | |
"mean_token_accuracy": 0.32356322556734085, | |
"num_tokens": 2860910.0, | |
"step": 169 | |
}, | |
{ | |
"epoch": 0.034, | |
"grad_norm": 13.708538055419922, | |
"learning_rate": 4.831e-06, | |
"loss": 9.4298, | |
"mean_token_accuracy": 0.28114478290081024, | |
"num_tokens": 2880172.0, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.0342, | |
"grad_norm": 14.314607620239258, | |
"learning_rate": 4.83e-06, | |
"loss": 9.2596, | |
"mean_token_accuracy": 0.3014460504055023, | |
"num_tokens": 2899434.0, | |
"step": 171 | |
}, | |
{ | |
"epoch": 0.0344, | |
"grad_norm": 12.27084732055664, | |
"learning_rate": 4.829000000000001e-06, | |
"loss": 9.5638, | |
"mean_token_accuracy": 0.33095238357782364, | |
"num_tokens": 2913932.0, | |
"step": 172 | |
}, | |
{ | |
"epoch": 0.0346, | |
"grad_norm": 14.023222923278809, | |
"learning_rate": 4.828e-06, | |
"loss": 9.689, | |
"mean_token_accuracy": 0.31680162250995636, | |
"num_tokens": 2928356.0, | |
"step": 173 | |
}, | |
{ | |
"epoch": 0.0348, | |
"grad_norm": 14.490949630737305, | |
"learning_rate": 4.8270000000000005e-06, | |
"loss": 8.8018, | |
"mean_token_accuracy": 0.34068627655506134, | |
"num_tokens": 2940589.0, | |
"step": 174 | |
}, | |
{ | |
"epoch": 0.035, | |
"grad_norm": 17.97809410095215, | |
"learning_rate": 4.826e-06, | |
"loss": 9.9556, | |
"mean_token_accuracy": 0.3452381044626236, | |
"num_tokens": 2959486.0, | |
"step": 175 | |
}, | |
{ | |
"epoch": 0.0352, | |
"grad_norm": 13.302875518798828, | |
"learning_rate": 4.825e-06, | |
"loss": 9.4104, | |
"mean_token_accuracy": 0.23885918408632278, | |
"num_tokens": 2974195.0, | |
"step": 176 | |
}, | |
{ | |
"epoch": 0.0354, | |
"grad_norm": 12.792606353759766, | |
"learning_rate": 4.824000000000001e-06, | |
"loss": 8.2518, | |
"mean_token_accuracy": 0.42592592537403107, | |
"num_tokens": 2984011.0, | |
"step": 177 | |
}, | |
{ | |
"epoch": 0.0356, | |
"grad_norm": 18.23525619506836, | |
"learning_rate": 4.823e-06, | |
"loss": 8.8069, | |
"mean_token_accuracy": 0.40079365670681, | |
"num_tokens": 2998487.0, | |
"step": 178 | |
}, | |
{ | |
"epoch": 0.0358, | |
"grad_norm": 14.206355094909668, | |
"learning_rate": 4.822000000000001e-06, | |
"loss": 9.1892, | |
"mean_token_accuracy": 0.2290322557091713, | |
"num_tokens": 3019773.0, | |
"step": 179 | |
}, | |
{ | |
"epoch": 0.036, | |
"grad_norm": 12.26903247833252, | |
"learning_rate": 4.821e-06, | |
"loss": 8.4939, | |
"mean_token_accuracy": 0.28287841379642487, | |
"num_tokens": 3040775.0, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.0362, | |
"grad_norm": 15.23544979095459, | |
"learning_rate": 4.8200000000000004e-06, | |
"loss": 8.9121, | |
"mean_token_accuracy": 0.3285440653562546, | |
"num_tokens": 3060080.0, | |
"step": 181 | |
}, | |
{ | |
"epoch": 0.0364, | |
"grad_norm": 57.132049560546875, | |
"learning_rate": 4.819e-06, | |
"loss": 8.9738, | |
"mean_token_accuracy": 0.3175750821828842, | |
"num_tokens": 3079332.0, | |
"step": 182 | |
}, | |
{ | |
"epoch": 0.0366, | |
"grad_norm": 13.691211700439453, | |
"learning_rate": 4.818e-06, | |
"loss": 8.0025, | |
"mean_token_accuracy": 0.34656085073947906, | |
"num_tokens": 3088989.0, | |
"step": 183 | |
}, | |
{ | |
"epoch": 0.0368, | |
"grad_norm": 15.762035369873047, | |
"learning_rate": 4.817000000000001e-06, | |
"loss": 9.4602, | |
"mean_token_accuracy": 0.34457671642303467, | |
"num_tokens": 3107732.0, | |
"step": 184 | |
}, | |
{ | |
"epoch": 0.037, | |
"grad_norm": 17.034019470214844, | |
"learning_rate": 4.816e-06, | |
"loss": 9.4491, | |
"mean_token_accuracy": 0.34666667878627777, | |
"num_tokens": 3122423.0, | |
"step": 185 | |
}, | |
{ | |
"epoch": 0.0372, | |
"grad_norm": 12.461385726928711, | |
"learning_rate": 4.8150000000000005e-06, | |
"loss": 9.0659, | |
"mean_token_accuracy": 0.2838345915079117, | |
"num_tokens": 3141889.0, | |
"step": 186 | |
}, | |
{ | |
"epoch": 0.0374, | |
"grad_norm": 13.194416046142578, | |
"learning_rate": 4.814e-06, | |
"loss": 8.8107, | |
"mean_token_accuracy": 0.36685824394226074, | |
"num_tokens": 3161194.0, | |
"step": 187 | |
}, | |
{ | |
"epoch": 0.0376, | |
"grad_norm": 14.799727439880371, | |
"learning_rate": 4.813e-06, | |
"loss": 9.7196, | |
"mean_token_accuracy": 0.377616748213768, | |
"num_tokens": 3175646.0, | |
"step": 188 | |
}, | |
{ | |
"epoch": 0.0378, | |
"grad_norm": 15.303200721740723, | |
"learning_rate": 4.812000000000001e-06, | |
"loss": 10.0029, | |
"mean_token_accuracy": 0.38161374628543854, | |
"num_tokens": 3194941.0, | |
"step": 189 | |
}, | |
{ | |
"epoch": 0.038, | |
"grad_norm": 13.99432373046875, | |
"learning_rate": 4.811000000000001e-06, | |
"loss": 8.4004, | |
"mean_token_accuracy": 0.37096773087978363, | |
"num_tokens": 3214205.0, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.0382, | |
"grad_norm": 12.163537979125977, | |
"learning_rate": 4.8100000000000005e-06, | |
"loss": 9.0058, | |
"mean_token_accuracy": 0.35395538806915283, | |
"num_tokens": 3235063.0, | |
"step": 191 | |
}, | |
{ | |
"epoch": 0.0384, | |
"grad_norm": 12.369959831237793, | |
"learning_rate": 4.809e-06, | |
"loss": 9.0358, | |
"mean_token_accuracy": 0.4807407408952713, | |
"num_tokens": 3249517.0, | |
"step": 192 | |
}, | |
{ | |
"epoch": 0.0386, | |
"grad_norm": 21.97284698486328, | |
"learning_rate": 4.808e-06, | |
"loss": 8.7725, | |
"mean_token_accuracy": 0.41179338097572327, | |
"num_tokens": 3268738.0, | |
"step": 193 | |
}, | |
{ | |
"epoch": 0.0388, | |
"grad_norm": 13.040814399719238, | |
"learning_rate": 4.807000000000001e-06, | |
"loss": 9.4628, | |
"mean_token_accuracy": 0.424450546503067, | |
"num_tokens": 3288032.0, | |
"step": 194 | |
}, | |
{ | |
"epoch": 0.039, | |
"grad_norm": 14.395992279052734, | |
"learning_rate": 4.806000000000001e-06, | |
"loss": 9.1626, | |
"mean_token_accuracy": 0.454365074634552, | |
"num_tokens": 3306813.0, | |
"step": 195 | |
}, | |
{ | |
"epoch": 0.0392, | |
"grad_norm": 11.64809799194336, | |
"learning_rate": 4.805000000000001e-06, | |
"loss": 8.7527, | |
"mean_token_accuracy": 0.3843159079551697, | |
"num_tokens": 3325561.0, | |
"step": 196 | |
}, | |
{ | |
"epoch": 0.0394, | |
"grad_norm": 25.551607131958008, | |
"learning_rate": 4.804e-06, | |
"loss": 8.5253, | |
"mean_token_accuracy": 0.3452381044626236, | |
"num_tokens": 3339397.0, | |
"step": 197 | |
}, | |
{ | |
"epoch": 0.0396, | |
"grad_norm": 12.025030136108398, | |
"learning_rate": 4.8030000000000004e-06, | |
"loss": 7.7581, | |
"mean_token_accuracy": 0.5191570967435837, | |
"num_tokens": 3349055.0, | |
"step": 198 | |
}, | |
{ | |
"epoch": 0.0398, | |
"grad_norm": 10.044900894165039, | |
"learning_rate": 4.802000000000001e-06, | |
"loss": 9.0733, | |
"mean_token_accuracy": 0.4278416335582733, | |
"num_tokens": 3368313.0, | |
"step": 199 | |
}, | |
{ | |
"epoch": 0.04, | |
"grad_norm": 10.911112785339355, | |
"learning_rate": 4.801e-06, | |
"loss": 8.6906, | |
"mean_token_accuracy": 0.5000000149011612, | |
"num_tokens": 3383810.0, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.0402, | |
"grad_norm": 10.119377136230469, | |
"learning_rate": 4.800000000000001e-06, | |
"loss": 8.8941, | |
"mean_token_accuracy": 0.4000000059604645, | |
"num_tokens": 3398244.0, | |
"step": 201 | |
}, | |
{ | |
"epoch": 0.0404, | |
"grad_norm": 10.526436805725098, | |
"learning_rate": 4.799e-06, | |
"loss": 7.8408, | |
"mean_token_accuracy": 0.39772726595401764, | |
"num_tokens": 3414686.0, | |
"step": 202 | |
}, | |
{ | |
"epoch": 0.0406, | |
"grad_norm": 10.947959899902344, | |
"learning_rate": 4.7980000000000005e-06, | |
"loss": 8.4085, | |
"mean_token_accuracy": 0.40992647409439087, | |
"num_tokens": 3433954.0, | |
"step": 203 | |
}, | |
{ | |
"epoch": 0.0408, | |
"grad_norm": 10.811299324035645, | |
"learning_rate": 4.797000000000001e-06, | |
"loss": 7.7418, | |
"mean_token_accuracy": 0.5370370447635651, | |
"num_tokens": 3443610.0, | |
"step": 204 | |
}, | |
{ | |
"epoch": 0.041, | |
"grad_norm": 9.57394027709961, | |
"learning_rate": 4.796e-06, | |
"loss": 8.6595, | |
"mean_token_accuracy": 0.45967741310596466, | |
"num_tokens": 3462909.0, | |
"step": 205 | |
}, | |
{ | |
"epoch": 0.0412, | |
"grad_norm": 12.8336181640625, | |
"learning_rate": 4.795e-06, | |
"loss": 8.6943, | |
"mean_token_accuracy": 0.45967741310596466, | |
"num_tokens": 3477620.0, | |
"step": 206 | |
}, | |
{ | |
"epoch": 0.0414, | |
"grad_norm": 11.37842845916748, | |
"learning_rate": 4.794e-06, | |
"loss": 8.5963, | |
"mean_token_accuracy": 0.4539627134799957, | |
"num_tokens": 3496675.0, | |
"step": 207 | |
}, | |
{ | |
"epoch": 0.0416, | |
"grad_norm": 12.427331924438477, | |
"learning_rate": 4.7930000000000005e-06, | |
"loss": 8.67, | |
"mean_token_accuracy": 0.3821548819541931, | |
"num_tokens": 3515975.0, | |
"step": 208 | |
}, | |
{ | |
"epoch": 0.0418, | |
"grad_norm": 10.371417045593262, | |
"learning_rate": 4.792000000000001e-06, | |
"loss": 8.4859, | |
"mean_token_accuracy": 0.40784314274787903, | |
"num_tokens": 3535241.0, | |
"step": 209 | |
}, | |
{ | |
"epoch": 0.042, | |
"grad_norm": 11.788932800292969, | |
"learning_rate": 4.791e-06, | |
"loss": 8.7143, | |
"mean_token_accuracy": 0.3741379380226135, | |
"num_tokens": 3554502.0, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.0422, | |
"grad_norm": 15.102238655090332, | |
"learning_rate": 4.79e-06, | |
"loss": 8.5368, | |
"mean_token_accuracy": 0.40980392694473267, | |
"num_tokens": 3574437.0, | |
"step": 211 | |
}, | |
{ | |
"epoch": 0.0424, | |
"grad_norm": 11.23690128326416, | |
"learning_rate": 4.789e-06, | |
"loss": 8.7326, | |
"mean_token_accuracy": 0.45628078281879425, | |
"num_tokens": 3593696.0, | |
"step": 212 | |
}, | |
{ | |
"epoch": 0.0426, | |
"grad_norm": 11.3884859085083, | |
"learning_rate": 4.7880000000000006e-06, | |
"loss": 9.0773, | |
"mean_token_accuracy": 0.4404761791229248, | |
"num_tokens": 3608150.0, | |
"step": 213 | |
}, | |
{ | |
"epoch": 0.0428, | |
"grad_norm": 11.106508255004883, | |
"learning_rate": 4.787000000000001e-06, | |
"loss": 9.5678, | |
"mean_token_accuracy": 0.3500000089406967, | |
"num_tokens": 3627724.0, | |
"step": 214 | |
}, | |
{ | |
"epoch": 0.043, | |
"grad_norm": 11.691924095153809, | |
"learning_rate": 4.7860000000000004e-06, | |
"loss": 8.2192, | |
"mean_token_accuracy": 0.41582491993904114, | |
"num_tokens": 3642224.0, | |
"step": 215 | |
}, | |
{ | |
"epoch": 0.0432, | |
"grad_norm": 13.973259925842285, | |
"learning_rate": 4.785e-06, | |
"loss": 8.9823, | |
"mean_token_accuracy": 0.44195401668548584, | |
"num_tokens": 3660885.0, | |
"step": 216 | |
}, | |
{ | |
"epoch": 0.0434, | |
"grad_norm": 11.744901657104492, | |
"learning_rate": 4.784e-06, | |
"loss": 8.698, | |
"mean_token_accuracy": 0.4291125535964966, | |
"num_tokens": 3679634.0, | |
"step": 217 | |
}, | |
{ | |
"epoch": 0.0436, | |
"grad_norm": 13.440972328186035, | |
"learning_rate": 4.783000000000001e-06, | |
"loss": 8.3414, | |
"mean_token_accuracy": 0.47999998927116394, | |
"num_tokens": 3689286.0, | |
"step": 218 | |
}, | |
{ | |
"epoch": 0.0438, | |
"grad_norm": 9.561469078063965, | |
"learning_rate": 4.782e-06, | |
"loss": 7.6889, | |
"mean_token_accuracy": 0.41898825764656067, | |
"num_tokens": 3710533.0, | |
"step": 219 | |
}, | |
{ | |
"epoch": 0.044, | |
"grad_norm": 12.257551193237305, | |
"learning_rate": 4.7810000000000005e-06, | |
"loss": 8.4541, | |
"mean_token_accuracy": 0.5016103088855743, | |
"num_tokens": 3725145.0, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.0442, | |
"grad_norm": 10.785005569458008, | |
"learning_rate": 4.78e-06, | |
"loss": 9.7029, | |
"mean_token_accuracy": 0.4015151560306549, | |
"num_tokens": 3743544.0, | |
"step": 221 | |
}, | |
{ | |
"epoch": 0.0444, | |
"grad_norm": 10.52768611907959, | |
"learning_rate": 4.779e-06, | |
"loss": 8.6062, | |
"mean_token_accuracy": 0.40740741789340973, | |
"num_tokens": 3762705.0, | |
"step": 222 | |
}, | |
{ | |
"epoch": 0.0446, | |
"grad_norm": 17.872329711914062, | |
"learning_rate": 4.778000000000001e-06, | |
"loss": 7.3673, | |
"mean_token_accuracy": 0.45370370149612427, | |
"num_tokens": 3777166.0, | |
"step": 223 | |
}, | |
{ | |
"epoch": 0.0448, | |
"grad_norm": 11.053666114807129, | |
"learning_rate": 4.777e-06, | |
"loss": 9.3428, | |
"mean_token_accuracy": 0.4186507910490036, | |
"num_tokens": 3795909.0, | |
"step": 224 | |
}, | |
{ | |
"epoch": 0.045, | |
"grad_norm": 9.966497421264648, | |
"learning_rate": 4.7760000000000005e-06, | |
"loss": 7.9071, | |
"mean_token_accuracy": 0.44636015594005585, | |
"num_tokens": 3815176.0, | |
"step": 225 | |
}, | |
{ | |
"epoch": 0.0452, | |
"grad_norm": 12.605799674987793, | |
"learning_rate": 4.775e-06, | |
"loss": 8.3063, | |
"mean_token_accuracy": 0.41692790389060974, | |
"num_tokens": 3834478.0, | |
"step": 226 | |
}, | |
{ | |
"epoch": 0.0454, | |
"grad_norm": 9.679677963256836, | |
"learning_rate": 4.774e-06, | |
"loss": 8.7331, | |
"mean_token_accuracy": 0.45628078281879425, | |
"num_tokens": 3853737.0, | |
"step": 227 | |
}, | |
{ | |
"epoch": 0.0456, | |
"grad_norm": 27.174549102783203, | |
"learning_rate": 4.773000000000001e-06, | |
"loss": 8.0078, | |
"mean_token_accuracy": 0.4434434473514557, | |
"num_tokens": 3873882.0, | |
"step": 228 | |
}, | |
{ | |
"epoch": 0.0458, | |
"grad_norm": 11.591468811035156, | |
"learning_rate": 4.772e-06, | |
"loss": 8.237, | |
"mean_token_accuracy": 0.36707451939582825, | |
"num_tokens": 3888302.0, | |
"step": 229 | |
}, | |
{ | |
"epoch": 0.046, | |
"grad_norm": 10.10312271118164, | |
"learning_rate": 4.7710000000000006e-06, | |
"loss": 7.6694, | |
"mean_token_accuracy": 0.5105820149183273, | |
"num_tokens": 3902804.0, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.0462, | |
"grad_norm": 13.620348930358887, | |
"learning_rate": 4.77e-06, | |
"loss": 8.4942, | |
"mean_token_accuracy": 0.421875, | |
"num_tokens": 3922146.0, | |
"step": 231 | |
}, | |
{ | |
"epoch": 0.0464, | |
"grad_norm": 11.014819145202637, | |
"learning_rate": 4.769e-06, | |
"loss": 7.637, | |
"mean_token_accuracy": 0.4495798349380493, | |
"num_tokens": 3936372.0, | |
"step": 232 | |
}, | |
{ | |
"epoch": 0.0466, | |
"grad_norm": 14.10721492767334, | |
"learning_rate": 4.768000000000001e-06, | |
"loss": 7.4068, | |
"mean_token_accuracy": 0.5078571289777756, | |
"num_tokens": 3946456.0, | |
"step": 233 | |
}, | |
{ | |
"epoch": 0.0468, | |
"grad_norm": 13.256854057312012, | |
"learning_rate": 4.767e-06, | |
"loss": 7.9252, | |
"mean_token_accuracy": 0.41277891397476196, | |
"num_tokens": 3965955.0, | |
"step": 234 | |
}, | |
{ | |
"epoch": 0.047, | |
"grad_norm": 12.264280319213867, | |
"learning_rate": 4.766000000000001e-06, | |
"loss": 7.6561, | |
"mean_token_accuracy": 0.4913793057203293, | |
"num_tokens": 3980412.0, | |
"step": 235 | |
}, | |
{ | |
"epoch": 0.0472, | |
"grad_norm": 11.942499160766602, | |
"learning_rate": 4.765e-06, | |
"loss": 8.0462, | |
"mean_token_accuracy": 0.4900284856557846, | |
"num_tokens": 3999823.0, | |
"step": 236 | |
}, | |
{ | |
"epoch": 0.0474, | |
"grad_norm": 10.043482780456543, | |
"learning_rate": 4.7640000000000005e-06, | |
"loss": 7.0965, | |
"mean_token_accuracy": 0.4025973975658417, | |
"num_tokens": 4014414.0, | |
"step": 237 | |
}, | |
{ | |
"epoch": 0.0476, | |
"grad_norm": 12.545036315917969, | |
"learning_rate": 4.763000000000001e-06, | |
"loss": 7.667, | |
"mean_token_accuracy": 0.38708220422267914, | |
"num_tokens": 4028922.0, | |
"step": 238 | |
}, | |
{ | |
"epoch": 0.0478, | |
"grad_norm": 10.067218780517578, | |
"learning_rate": 4.762e-06, | |
"loss": 7.1117, | |
"mean_token_accuracy": 0.37129031121730804, | |
"num_tokens": 4050175.0, | |
"step": 239 | |
}, | |
{ | |
"epoch": 0.048, | |
"grad_norm": 13.74410343170166, | |
"learning_rate": 4.761000000000001e-06, | |
"loss": 7.9069, | |
"mean_token_accuracy": 0.47333332896232605, | |
"num_tokens": 4064811.0, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.0482, | |
"grad_norm": 9.813583374023438, | |
"learning_rate": 4.76e-06, | |
"loss": 6.8128, | |
"mean_token_accuracy": 0.4273170679807663, | |
"num_tokens": 4079317.0, | |
"step": 241 | |
}, | |
{ | |
"epoch": 0.0484, | |
"grad_norm": 10.31633472442627, | |
"learning_rate": 4.7590000000000005e-06, | |
"loss": 7.676, | |
"mean_token_accuracy": 0.43584655225276947, | |
"num_tokens": 4094093.0, | |
"step": 242 | |
}, | |
{ | |
"epoch": 0.0486, | |
"grad_norm": 13.174894332885742, | |
"learning_rate": 4.758e-06, | |
"loss": 8.5029, | |
"mean_token_accuracy": 0.42592592537403107, | |
"num_tokens": 4113778.0, | |
"step": 243 | |
}, | |
{ | |
"epoch": 0.0488, | |
"grad_norm": 11.068340301513672, | |
"learning_rate": 4.757e-06, | |
"loss": 7.6392, | |
"mean_token_accuracy": 0.5028571337461472, | |
"num_tokens": 4125078.0, | |
"step": 244 | |
}, | |
{ | |
"epoch": 0.049, | |
"grad_norm": 11.669493675231934, | |
"learning_rate": 4.756000000000001e-06, | |
"loss": 8.0877, | |
"mean_token_accuracy": 0.3697916716337204, | |
"num_tokens": 4144451.0, | |
"step": 245 | |
}, | |
{ | |
"epoch": 0.0492, | |
"grad_norm": 12.121454238891602, | |
"learning_rate": 4.755e-06, | |
"loss": 8.3639, | |
"mean_token_accuracy": 0.3320707082748413, | |
"num_tokens": 4164189.0, | |
"step": 246 | |
}, | |
{ | |
"epoch": 0.0494, | |
"grad_norm": 11.231935501098633, | |
"learning_rate": 4.7540000000000006e-06, | |
"loss": 8.1039, | |
"mean_token_accuracy": 0.40060852468013763, | |
"num_tokens": 4183560.0, | |
"step": 247 | |
}, | |
{ | |
"epoch": 0.0496, | |
"grad_norm": 14.818300247192383, | |
"learning_rate": 4.753e-06, | |
"loss": 8.3393, | |
"mean_token_accuracy": 0.4365079402923584, | |
"num_tokens": 4203160.0, | |
"step": 248 | |
}, | |
{ | |
"epoch": 0.0498, | |
"grad_norm": 10.595967292785645, | |
"learning_rate": 4.752e-06, | |
"loss": 8.2095, | |
"mean_token_accuracy": 0.3896551728248596, | |
"num_tokens": 4222502.0, | |
"step": 249 | |
}, | |
{ | |
"epoch": 0.05, | |
"grad_norm": 10.283987998962402, | |
"learning_rate": 4.751000000000001e-06, | |
"loss": 7.8432, | |
"mean_token_accuracy": 0.41187499463558197, | |
"num_tokens": 4236999.0, | |
"step": 250 | |
} | |
], | |
"logging_steps": 1, | |
"max_steps": 5000, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 1, | |
"save_steps": 250, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 1.970126341013975e+17, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |