| { | |
| "best_metric": 2.532432222481162, | |
| "best_model_checkpoint": "./final-whisper-for-initial-publish-v2/checkpoint-4000", | |
| "epoch": 8.237232289950576, | |
| "eval_steps": 500, | |
| "global_step": 5000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04118616144975288, | |
| "grad_norm": 67.08155822753906, | |
| "learning_rate": 4.2000000000000006e-07, | |
| "loss": 2.8368, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.08237232289950576, | |
| "grad_norm": 17.44784164428711, | |
| "learning_rate": 9.200000000000001e-07, | |
| "loss": 2.0282, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.12355848434925865, | |
| "grad_norm": 12.481098175048828, | |
| "learning_rate": 1.42e-06, | |
| "loss": 1.3152, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.16474464579901152, | |
| "grad_norm": 8.900260925292969, | |
| "learning_rate": 1.9200000000000003e-06, | |
| "loss": 0.7248, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.20593080724876442, | |
| "grad_norm": 7.8956146240234375, | |
| "learning_rate": 2.42e-06, | |
| "loss": 0.5731, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.2471169686985173, | |
| "grad_norm": 9.542287826538086, | |
| "learning_rate": 2.92e-06, | |
| "loss": 0.474, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.2883031301482702, | |
| "grad_norm": 6.937229633331299, | |
| "learning_rate": 3.4200000000000007e-06, | |
| "loss": 0.4301, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.32948929159802304, | |
| "grad_norm": 6.919783115386963, | |
| "learning_rate": 3.920000000000001e-06, | |
| "loss": 0.351, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.37067545304777594, | |
| "grad_norm": 6.427859783172607, | |
| "learning_rate": 4.42e-06, | |
| "loss": 0.2726, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.41186161449752884, | |
| "grad_norm": 5.83529806137085, | |
| "learning_rate": 4.92e-06, | |
| "loss": 0.1663, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.45304777594728174, | |
| "grad_norm": 2.4859721660614014, | |
| "learning_rate": 5.420000000000001e-06, | |
| "loss": 0.0893, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.4942339373970346, | |
| "grad_norm": 3.2526772022247314, | |
| "learning_rate": 5.92e-06, | |
| "loss": 0.076, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.5354200988467874, | |
| "grad_norm": 4.417870044708252, | |
| "learning_rate": 6.42e-06, | |
| "loss": 0.0764, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.5766062602965404, | |
| "grad_norm": 3.149733781814575, | |
| "learning_rate": 6.92e-06, | |
| "loss": 0.0679, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.6177924217462932, | |
| "grad_norm": 3.8823468685150146, | |
| "learning_rate": 7.420000000000001e-06, | |
| "loss": 0.0525, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.6589785831960461, | |
| "grad_norm": 6.1009745597839355, | |
| "learning_rate": 7.92e-06, | |
| "loss": 0.0552, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.700164744645799, | |
| "grad_norm": 5.714720726013184, | |
| "learning_rate": 8.42e-06, | |
| "loss": 0.0529, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.7413509060955519, | |
| "grad_norm": 4.403444290161133, | |
| "learning_rate": 8.920000000000001e-06, | |
| "loss": 0.0472, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7825370675453048, | |
| "grad_norm": 3.7249884605407715, | |
| "learning_rate": 9.42e-06, | |
| "loss": 0.0424, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.8237232289950577, | |
| "grad_norm": 3.8490548133850098, | |
| "learning_rate": 9.920000000000002e-06, | |
| "loss": 0.0461, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8237232289950577, | |
| "eval_loss": 0.04240331053733826, | |
| "eval_runtime": 1653.3644, | |
| "eval_samples_per_second": 1.468, | |
| "eval_steps_per_second": 0.184, | |
| "eval_wer": 11.04637613609881, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8649093904448105, | |
| "grad_norm": 2.538515329360962, | |
| "learning_rate": 9.961818181818183e-06, | |
| "loss": 0.0384, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.9060955518945635, | |
| "grad_norm": 1.6598188877105713, | |
| "learning_rate": 9.916363636363637e-06, | |
| "loss": 0.0375, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.9472817133443163, | |
| "grad_norm": 3.256746292114258, | |
| "learning_rate": 9.870909090909092e-06, | |
| "loss": 0.0346, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.9884678747940692, | |
| "grad_norm": 3.04457426071167, | |
| "learning_rate": 9.825454545454546e-06, | |
| "loss": 0.034, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.029654036243822, | |
| "grad_norm": 0.6082770228385925, | |
| "learning_rate": 9.780000000000001e-06, | |
| "loss": 0.0272, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 1.0708401976935749, | |
| "grad_norm": 1.6969329118728638, | |
| "learning_rate": 9.734545454545455e-06, | |
| "loss": 0.0206, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.112026359143328, | |
| "grad_norm": 1.9652377367019653, | |
| "learning_rate": 9.68909090909091e-06, | |
| "loss": 0.0197, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 1.1532125205930808, | |
| "grad_norm": 2.402329206466675, | |
| "learning_rate": 9.643636363636364e-06, | |
| "loss": 0.0221, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.1943986820428336, | |
| "grad_norm": 3.167133092880249, | |
| "learning_rate": 9.598181818181818e-06, | |
| "loss": 0.0176, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 1.2355848434925865, | |
| "grad_norm": 0.4075870215892792, | |
| "learning_rate": 9.552727272727273e-06, | |
| "loss": 0.0165, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.2767710049423393, | |
| "grad_norm": 2.739176034927368, | |
| "learning_rate": 9.507272727272729e-06, | |
| "loss": 0.0156, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 1.3179571663920924, | |
| "grad_norm": 2.4662702083587646, | |
| "learning_rate": 9.461818181818183e-06, | |
| "loss": 0.0133, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.3591433278418452, | |
| "grad_norm": 2.955909252166748, | |
| "learning_rate": 9.416363636363636e-06, | |
| "loss": 0.0184, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 1.400329489291598, | |
| "grad_norm": 2.0838463306427, | |
| "learning_rate": 9.370909090909092e-06, | |
| "loss": 0.0138, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.441515650741351, | |
| "grad_norm": 2.2245779037475586, | |
| "learning_rate": 9.325454545454547e-06, | |
| "loss": 0.0132, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 1.4827018121911038, | |
| "grad_norm": 2.146961212158203, | |
| "learning_rate": 9.280000000000001e-06, | |
| "loss": 0.0126, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.5238879736408566, | |
| "grad_norm": 1.955930471420288, | |
| "learning_rate": 9.234545454545455e-06, | |
| "loss": 0.0111, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 1.5650741350906094, | |
| "grad_norm": 2.04664945602417, | |
| "learning_rate": 9.18909090909091e-06, | |
| "loss": 0.0118, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.6062602965403623, | |
| "grad_norm": 2.1349849700927734, | |
| "learning_rate": 9.143636363636364e-06, | |
| "loss": 0.0116, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 1.6474464579901154, | |
| "grad_norm": 2.336024761199951, | |
| "learning_rate": 9.09818181818182e-06, | |
| "loss": 0.0124, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.6474464579901154, | |
| "eval_loss": 0.02211674302816391, | |
| "eval_runtime": 1633.7086, | |
| "eval_samples_per_second": 1.486, | |
| "eval_steps_per_second": 0.186, | |
| "eval_wer": 5.11147362697118, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.6886326194398682, | |
| "grad_norm": 0.4564249813556671, | |
| "learning_rate": 9.052727272727273e-06, | |
| "loss": 0.0119, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 1.729818780889621, | |
| "grad_norm": 1.9494842290878296, | |
| "learning_rate": 9.007272727272729e-06, | |
| "loss": 0.0114, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.771004942339374, | |
| "grad_norm": 1.7129623889923096, | |
| "learning_rate": 8.961818181818182e-06, | |
| "loss": 0.0116, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 1.812191103789127, | |
| "grad_norm": 0.8837174773216248, | |
| "learning_rate": 8.916363636363638e-06, | |
| "loss": 0.01, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.8533772652388798, | |
| "grad_norm": 3.1779348850250244, | |
| "learning_rate": 8.870909090909091e-06, | |
| "loss": 0.0087, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 1.8945634266886326, | |
| "grad_norm": 1.1833815574645996, | |
| "learning_rate": 8.825454545454545e-06, | |
| "loss": 0.0122, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.9357495881383855, | |
| "grad_norm": 2.3456814289093018, | |
| "learning_rate": 8.78e-06, | |
| "loss": 0.0089, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 1.9769357495881383, | |
| "grad_norm": 1.5834710597991943, | |
| "learning_rate": 8.734545454545456e-06, | |
| "loss": 0.0111, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.018121911037891, | |
| "grad_norm": 1.1926366090774536, | |
| "learning_rate": 8.68909090909091e-06, | |
| "loss": 0.0069, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 2.059308072487644, | |
| "grad_norm": 0.5671884417533875, | |
| "learning_rate": 8.643636363636364e-06, | |
| "loss": 0.0043, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.100494233937397, | |
| "grad_norm": 0.8897690176963806, | |
| "learning_rate": 8.598181818181819e-06, | |
| "loss": 0.0034, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 2.1416803953871497, | |
| "grad_norm": 1.6989620923995972, | |
| "learning_rate": 8.552727272727274e-06, | |
| "loss": 0.003, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.182866556836903, | |
| "grad_norm": 2.0327303409576416, | |
| "learning_rate": 8.507272727272728e-06, | |
| "loss": 0.0037, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 2.224052718286656, | |
| "grad_norm": 0.6554267406463623, | |
| "learning_rate": 8.461818181818182e-06, | |
| "loss": 0.0034, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.2652388797364087, | |
| "grad_norm": 0.21176663041114807, | |
| "learning_rate": 8.416363636363637e-06, | |
| "loss": 0.0046, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 2.3064250411861615, | |
| "grad_norm": 1.2915725708007812, | |
| "learning_rate": 8.370909090909091e-06, | |
| "loss": 0.0026, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.3476112026359144, | |
| "grad_norm": 0.15533868968486786, | |
| "learning_rate": 8.325454545454547e-06, | |
| "loss": 0.0043, | |
| "step": 1425 | |
| }, | |
| { | |
| "epoch": 2.3887973640856672, | |
| "grad_norm": 0.19825848937034607, | |
| "learning_rate": 8.28e-06, | |
| "loss": 0.0042, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.42998352553542, | |
| "grad_norm": 0.14249640703201294, | |
| "learning_rate": 8.234545454545456e-06, | |
| "loss": 0.0023, | |
| "step": 1475 | |
| }, | |
| { | |
| "epoch": 2.471169686985173, | |
| "grad_norm": 0.12349322438240051, | |
| "learning_rate": 8.18909090909091e-06, | |
| "loss": 0.0026, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.471169686985173, | |
| "eval_loss": 0.015178721398115158, | |
| "eval_runtime": 1647.8725, | |
| "eval_samples_per_second": 1.473, | |
| "eval_steps_per_second": 0.184, | |
| "eval_wer": 3.5966752116833685, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.5123558484349258, | |
| "grad_norm": 0.24572087824344635, | |
| "learning_rate": 8.143636363636365e-06, | |
| "loss": 0.0032, | |
| "step": 1525 | |
| }, | |
| { | |
| "epoch": 2.5535420098846786, | |
| "grad_norm": 0.2075294703245163, | |
| "learning_rate": 8.098181818181819e-06, | |
| "loss": 0.0023, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.594728171334432, | |
| "grad_norm": 0.264713317155838, | |
| "learning_rate": 8.052727272727272e-06, | |
| "loss": 0.0044, | |
| "step": 1575 | |
| }, | |
| { | |
| "epoch": 2.6359143327841847, | |
| "grad_norm": 1.9720451831817627, | |
| "learning_rate": 8.007272727272728e-06, | |
| "loss": 0.0048, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.6771004942339376, | |
| "grad_norm": 0.7824451327323914, | |
| "learning_rate": 7.961818181818183e-06, | |
| "loss": 0.0048, | |
| "step": 1625 | |
| }, | |
| { | |
| "epoch": 2.7182866556836904, | |
| "grad_norm": 0.681707501411438, | |
| "learning_rate": 7.916363636363637e-06, | |
| "loss": 0.0036, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.7594728171334433, | |
| "grad_norm": 0.39950481057167053, | |
| "learning_rate": 7.870909090909091e-06, | |
| "loss": 0.0047, | |
| "step": 1675 | |
| }, | |
| { | |
| "epoch": 2.800658978583196, | |
| "grad_norm": 0.7777596712112427, | |
| "learning_rate": 7.825454545454546e-06, | |
| "loss": 0.0029, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.841845140032949, | |
| "grad_norm": 0.06510231643915176, | |
| "learning_rate": 7.78e-06, | |
| "loss": 0.0067, | |
| "step": 1725 | |
| }, | |
| { | |
| "epoch": 2.883031301482702, | |
| "grad_norm": 2.2729947566986084, | |
| "learning_rate": 7.734545454545455e-06, | |
| "loss": 0.0024, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.9242174629324547, | |
| "grad_norm": 1.6635749340057373, | |
| "learning_rate": 7.68909090909091e-06, | |
| "loss": 0.0035, | |
| "step": 1775 | |
| }, | |
| { | |
| "epoch": 2.9654036243822075, | |
| "grad_norm": 0.06815456598997116, | |
| "learning_rate": 7.643636363636365e-06, | |
| "loss": 0.004, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 3.0065897858319603, | |
| "grad_norm": 1.5607892274856567, | |
| "learning_rate": 7.598181818181819e-06, | |
| "loss": 0.0046, | |
| "step": 1825 | |
| }, | |
| { | |
| "epoch": 3.047775947281713, | |
| "grad_norm": 0.8395469784736633, | |
| "learning_rate": 7.552727272727274e-06, | |
| "loss": 0.0018, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 3.088962108731466, | |
| "grad_norm": 0.04032081738114357, | |
| "learning_rate": 7.507272727272728e-06, | |
| "loss": 0.0011, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 3.130148270181219, | |
| "grad_norm": 0.3644678294658661, | |
| "learning_rate": 7.461818181818182e-06, | |
| "loss": 0.0014, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 3.171334431630972, | |
| "grad_norm": 0.35729554295539856, | |
| "learning_rate": 7.416363636363637e-06, | |
| "loss": 0.0009, | |
| "step": 1925 | |
| }, | |
| { | |
| "epoch": 3.212520593080725, | |
| "grad_norm": 0.05964287370443344, | |
| "learning_rate": 7.370909090909092e-06, | |
| "loss": 0.0014, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 3.253706754530478, | |
| "grad_norm": 0.799541711807251, | |
| "learning_rate": 7.325454545454546e-06, | |
| "loss": 0.0015, | |
| "step": 1975 | |
| }, | |
| { | |
| "epoch": 3.2948929159802307, | |
| "grad_norm": 0.058581918478012085, | |
| "learning_rate": 7.280000000000001e-06, | |
| "loss": 0.0008, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.2948929159802307, | |
| "eval_loss": 0.015332825481891632, | |
| "eval_runtime": 1629.67, | |
| "eval_samples_per_second": 1.489, | |
| "eval_steps_per_second": 0.187, | |
| "eval_wer": 3.24710634661695, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.3360790774299836, | |
| "grad_norm": 0.20925483107566833, | |
| "learning_rate": 7.234545454545455e-06, | |
| "loss": 0.001, | |
| "step": 2025 | |
| }, | |
| { | |
| "epoch": 3.3772652388797364, | |
| "grad_norm": 0.5220603346824646, | |
| "learning_rate": 7.18909090909091e-06, | |
| "loss": 0.0006, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.4184514003294892, | |
| "grad_norm": 0.33813387155532837, | |
| "learning_rate": 7.1436363636363635e-06, | |
| "loss": 0.0008, | |
| "step": 2075 | |
| }, | |
| { | |
| "epoch": 3.459637561779242, | |
| "grad_norm": 0.5609467625617981, | |
| "learning_rate": 7.098181818181818e-06, | |
| "loss": 0.0006, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.500823723228995, | |
| "grad_norm": 0.028300393372774124, | |
| "learning_rate": 7.052727272727274e-06, | |
| "loss": 0.0008, | |
| "step": 2125 | |
| }, | |
| { | |
| "epoch": 3.5420098846787478, | |
| "grad_norm": 0.056196194142103195, | |
| "learning_rate": 7.007272727272728e-06, | |
| "loss": 0.0009, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.583196046128501, | |
| "grad_norm": 0.202606201171875, | |
| "learning_rate": 6.961818181818183e-06, | |
| "loss": 0.0019, | |
| "step": 2175 | |
| }, | |
| { | |
| "epoch": 3.624382207578254, | |
| "grad_norm": 0.014030909165740013, | |
| "learning_rate": 6.9163636363636365e-06, | |
| "loss": 0.001, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.6655683690280068, | |
| "grad_norm": 0.08081363141536713, | |
| "learning_rate": 6.870909090909091e-06, | |
| "loss": 0.002, | |
| "step": 2225 | |
| }, | |
| { | |
| "epoch": 3.7067545304777596, | |
| "grad_norm": 0.6110771894454956, | |
| "learning_rate": 6.8254545454545466e-06, | |
| "loss": 0.0005, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.7479406919275124, | |
| "grad_norm": 0.30081290006637573, | |
| "learning_rate": 6.780000000000001e-06, | |
| "loss": 0.0021, | |
| "step": 2275 | |
| }, | |
| { | |
| "epoch": 3.7891268533772653, | |
| "grad_norm": 0.042301613837480545, | |
| "learning_rate": 6.734545454545455e-06, | |
| "loss": 0.0012, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.830313014827018, | |
| "grad_norm": 1.5658293962478638, | |
| "learning_rate": 6.6890909090909095e-06, | |
| "loss": 0.0018, | |
| "step": 2325 | |
| }, | |
| { | |
| "epoch": 3.871499176276771, | |
| "grad_norm": 0.20856991410255432, | |
| "learning_rate": 6.643636363636364e-06, | |
| "loss": 0.0017, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.912685337726524, | |
| "grad_norm": 0.19910617172718048, | |
| "learning_rate": 6.5981818181818195e-06, | |
| "loss": 0.0011, | |
| "step": 2375 | |
| }, | |
| { | |
| "epoch": 3.9538714991762767, | |
| "grad_norm": 0.1468815952539444, | |
| "learning_rate": 6.5527272727272724e-06, | |
| "loss": 0.0011, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.9950576606260295, | |
| "grad_norm": 0.06176592409610748, | |
| "learning_rate": 6.507272727272728e-06, | |
| "loss": 0.0009, | |
| "step": 2425 | |
| }, | |
| { | |
| "epoch": 4.036243822075782, | |
| "grad_norm": 0.013582455925643444, | |
| "learning_rate": 6.4618181818181825e-06, | |
| "loss": 0.0004, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 4.077429983525535, | |
| "grad_norm": 0.013000385835766792, | |
| "learning_rate": 6.416363636363637e-06, | |
| "loss": 0.0003, | |
| "step": 2475 | |
| }, | |
| { | |
| "epoch": 4.118616144975288, | |
| "grad_norm": 0.04700480401515961, | |
| "learning_rate": 6.370909090909091e-06, | |
| "loss": 0.0004, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 4.118616144975288, | |
| "eval_loss": 0.013498911634087563, | |
| "eval_runtime": 1637.8085, | |
| "eval_samples_per_second": 1.482, | |
| "eval_steps_per_second": 0.186, | |
| "eval_wer": 2.718868950516585, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 4.159802306425041, | |
| "grad_norm": 0.049776773899793625, | |
| "learning_rate": 6.325454545454545e-06, | |
| "loss": 0.0007, | |
| "step": 2525 | |
| }, | |
| { | |
| "epoch": 4.200988467874794, | |
| "grad_norm": 0.047507863491773605, | |
| "learning_rate": 6.280000000000001e-06, | |
| "loss": 0.001, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 4.242174629324547, | |
| "grad_norm": 0.058455199003219604, | |
| "learning_rate": 6.2345454545454555e-06, | |
| "loss": 0.0004, | |
| "step": 2575 | |
| }, | |
| { | |
| "epoch": 4.283360790774299, | |
| "grad_norm": 0.03676324710249901, | |
| "learning_rate": 6.18909090909091e-06, | |
| "loss": 0.0008, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 4.324546952224052, | |
| "grad_norm": 0.08864962309598923, | |
| "learning_rate": 6.143636363636364e-06, | |
| "loss": 0.0008, | |
| "step": 2625 | |
| }, | |
| { | |
| "epoch": 4.365733113673806, | |
| "grad_norm": 0.25547558069229126, | |
| "learning_rate": 6.098181818181818e-06, | |
| "loss": 0.0014, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.406919275123559, | |
| "grad_norm": 0.014656963758170605, | |
| "learning_rate": 6.052727272727274e-06, | |
| "loss": 0.0004, | |
| "step": 2675 | |
| }, | |
| { | |
| "epoch": 4.448105436573312, | |
| "grad_norm": 0.01910262741148472, | |
| "learning_rate": 6.0072727272727284e-06, | |
| "loss": 0.0004, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.4892915980230645, | |
| "grad_norm": 0.021646469831466675, | |
| "learning_rate": 5.961818181818182e-06, | |
| "loss": 0.0005, | |
| "step": 2725 | |
| }, | |
| { | |
| "epoch": 4.530477759472817, | |
| "grad_norm": 0.024704741314053535, | |
| "learning_rate": 5.916363636363637e-06, | |
| "loss": 0.0005, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.57166392092257, | |
| "grad_norm": 0.024691058322787285, | |
| "learning_rate": 5.870909090909091e-06, | |
| "loss": 0.0004, | |
| "step": 2775 | |
| }, | |
| { | |
| "epoch": 4.612850082372323, | |
| "grad_norm": 0.023056305944919586, | |
| "learning_rate": 5.825454545454546e-06, | |
| "loss": 0.0002, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.654036243822076, | |
| "grad_norm": 0.02144230715930462, | |
| "learning_rate": 5.78e-06, | |
| "loss": 0.0006, | |
| "step": 2825 | |
| }, | |
| { | |
| "epoch": 4.695222405271829, | |
| "grad_norm": 0.056895911693573, | |
| "learning_rate": 5.734545454545455e-06, | |
| "loss": 0.0006, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.736408566721582, | |
| "grad_norm": 0.026411665603518486, | |
| "learning_rate": 5.68909090909091e-06, | |
| "loss": 0.0002, | |
| "step": 2875 | |
| }, | |
| { | |
| "epoch": 4.7775947281713345, | |
| "grad_norm": 0.6490919589996338, | |
| "learning_rate": 5.643636363636364e-06, | |
| "loss": 0.0005, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.818780889621087, | |
| "grad_norm": 0.01870059408247471, | |
| "learning_rate": 5.598181818181818e-06, | |
| "loss": 0.0004, | |
| "step": 2925 | |
| }, | |
| { | |
| "epoch": 4.85996705107084, | |
| "grad_norm": 0.36998653411865234, | |
| "learning_rate": 5.552727272727273e-06, | |
| "loss": 0.001, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.901153212520593, | |
| "grad_norm": 0.09682920575141907, | |
| "learning_rate": 5.507272727272728e-06, | |
| "loss": 0.0008, | |
| "step": 2975 | |
| }, | |
| { | |
| "epoch": 4.942339373970346, | |
| "grad_norm": 0.12028121948242188, | |
| "learning_rate": 5.461818181818183e-06, | |
| "loss": 0.0005, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.942339373970346, | |
| "eval_loss": 0.013804786838591099, | |
| "eval_runtime": 1636.262, | |
| "eval_samples_per_second": 1.483, | |
| "eval_steps_per_second": 0.186, | |
| "eval_wer": 2.8043191175328204, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.983525535420099, | |
| "grad_norm": 0.36403629183769226, | |
| "learning_rate": 5.4163636363636365e-06, | |
| "loss": 0.0002, | |
| "step": 3025 | |
| }, | |
| { | |
| "epoch": 5.0247116968698515, | |
| "grad_norm": 0.020813830196857452, | |
| "learning_rate": 5.370909090909091e-06, | |
| "loss": 0.0007, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 5.065897858319604, | |
| "grad_norm": 0.0156087102368474, | |
| "learning_rate": 5.325454545454546e-06, | |
| "loss": 0.0002, | |
| "step": 3075 | |
| }, | |
| { | |
| "epoch": 5.107084019769357, | |
| "grad_norm": 0.08841024339199066, | |
| "learning_rate": 5.28e-06, | |
| "loss": 0.0003, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 5.14827018121911, | |
| "grad_norm": 0.09709523618221283, | |
| "learning_rate": 5.234545454545456e-06, | |
| "loss": 0.0002, | |
| "step": 3125 | |
| }, | |
| { | |
| "epoch": 5.189456342668863, | |
| "grad_norm": 0.006491546519100666, | |
| "learning_rate": 5.1890909090909095e-06, | |
| "loss": 0.0004, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 5.230642504118616, | |
| "grad_norm": 0.03339486941695213, | |
| "learning_rate": 5.143636363636364e-06, | |
| "loss": 0.0001, | |
| "step": 3175 | |
| }, | |
| { | |
| "epoch": 5.2718286655683695, | |
| "grad_norm": 0.019623372703790665, | |
| "learning_rate": 5.098181818181819e-06, | |
| "loss": 0.0002, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 5.313014827018122, | |
| "grad_norm": 0.012536031194031239, | |
| "learning_rate": 5.052727272727273e-06, | |
| "loss": 0.0004, | |
| "step": 3225 | |
| }, | |
| { | |
| "epoch": 5.354200988467875, | |
| "grad_norm": 0.00885727908462286, | |
| "learning_rate": 5.007272727272727e-06, | |
| "loss": 0.0001, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 5.395387149917628, | |
| "grad_norm": 0.012449279427528381, | |
| "learning_rate": 4.9618181818181824e-06, | |
| "loss": 0.0003, | |
| "step": 3275 | |
| }, | |
| { | |
| "epoch": 5.436573311367381, | |
| "grad_norm": 0.04439382627606392, | |
| "learning_rate": 4.916363636363637e-06, | |
| "loss": 0.0003, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 5.477759472817134, | |
| "grad_norm": 0.08744444698095322, | |
| "learning_rate": 4.870909090909091e-06, | |
| "loss": 0.0006, | |
| "step": 3325 | |
| }, | |
| { | |
| "epoch": 5.518945634266887, | |
| "grad_norm": 0.015908684581518173, | |
| "learning_rate": 4.825454545454546e-06, | |
| "loss": 0.0003, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 5.560131795716639, | |
| "grad_norm": 0.04740598052740097, | |
| "learning_rate": 4.78e-06, | |
| "loss": 0.0002, | |
| "step": 3375 | |
| }, | |
| { | |
| "epoch": 5.601317957166392, | |
| "grad_norm": 0.011928090825676918, | |
| "learning_rate": 4.734545454545455e-06, | |
| "loss": 0.0001, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 5.642504118616145, | |
| "grad_norm": 0.044589392840862274, | |
| "learning_rate": 4.689090909090909e-06, | |
| "loss": 0.0002, | |
| "step": 3425 | |
| }, | |
| { | |
| "epoch": 5.683690280065898, | |
| "grad_norm": 0.010786905884742737, | |
| "learning_rate": 4.643636363636364e-06, | |
| "loss": 0.0001, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 5.724876441515651, | |
| "grad_norm": 0.006343629211187363, | |
| "learning_rate": 4.598181818181818e-06, | |
| "loss": 0.0003, | |
| "step": 3475 | |
| }, | |
| { | |
| "epoch": 5.766062602965404, | |
| "grad_norm": 0.006056615151464939, | |
| "learning_rate": 4.552727272727273e-06, | |
| "loss": 0.0003, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 5.766062602965404, | |
| "eval_loss": 0.013986092992126942, | |
| "eval_runtime": 1723.588, | |
| "eval_samples_per_second": 1.408, | |
| "eval_steps_per_second": 0.176, | |
| "eval_wer": 2.734405344519537, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 5.8072487644151565, | |
| "grad_norm": 0.043298039585351944, | |
| "learning_rate": 4.5072727272727275e-06, | |
| "loss": 0.0002, | |
| "step": 3525 | |
| }, | |
| { | |
| "epoch": 5.848434925864909, | |
| "grad_norm": 0.007331861648708582, | |
| "learning_rate": 4.461818181818182e-06, | |
| "loss": 0.0001, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 5.889621087314662, | |
| "grad_norm": 0.012765350751578808, | |
| "learning_rate": 4.416363636363637e-06, | |
| "loss": 0.0002, | |
| "step": 3575 | |
| }, | |
| { | |
| "epoch": 5.930807248764415, | |
| "grad_norm": 0.033175356686115265, | |
| "learning_rate": 4.370909090909091e-06, | |
| "loss": 0.0004, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 5.971993410214168, | |
| "grad_norm": 0.005575309973210096, | |
| "learning_rate": 4.325454545454546e-06, | |
| "loss": 0.0001, | |
| "step": 3625 | |
| }, | |
| { | |
| "epoch": 6.013179571663921, | |
| "grad_norm": 0.005467335693538189, | |
| "learning_rate": 4.2800000000000005e-06, | |
| "loss": 0.0003, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 6.0543657331136735, | |
| "grad_norm": 0.0050591155886650085, | |
| "learning_rate": 4.234545454545455e-06, | |
| "loss": 0.0001, | |
| "step": 3675 | |
| }, | |
| { | |
| "epoch": 6.095551894563426, | |
| "grad_norm": 0.004481570329517126, | |
| "learning_rate": 4.18909090909091e-06, | |
| "loss": 0.0001, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 6.136738056013179, | |
| "grad_norm": 0.002914627082645893, | |
| "learning_rate": 4.143636363636364e-06, | |
| "loss": 0.0001, | |
| "step": 3725 | |
| }, | |
| { | |
| "epoch": 6.177924217462932, | |
| "grad_norm": 0.004947616718709469, | |
| "learning_rate": 4.098181818181818e-06, | |
| "loss": 0.0001, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 6.219110378912685, | |
| "grad_norm": 0.003795625874772668, | |
| "learning_rate": 4.0527272727272735e-06, | |
| "loss": 0.0001, | |
| "step": 3775 | |
| }, | |
| { | |
| "epoch": 6.260296540362438, | |
| "grad_norm": 0.025668440386652946, | |
| "learning_rate": 4.007272727272727e-06, | |
| "loss": 0.0001, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 6.3014827018121915, | |
| "grad_norm": 0.01409939769655466, | |
| "learning_rate": 3.961818181818182e-06, | |
| "loss": 0.0001, | |
| "step": 3825 | |
| }, | |
| { | |
| "epoch": 6.342668863261944, | |
| "grad_norm": 0.0054998998530209064, | |
| "learning_rate": 3.9163636363636364e-06, | |
| "loss": 0.0001, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 6.383855024711697, | |
| "grad_norm": 0.0049223750829696655, | |
| "learning_rate": 3.870909090909091e-06, | |
| "loss": 0.0001, | |
| "step": 3875 | |
| }, | |
| { | |
| "epoch": 6.42504118616145, | |
| "grad_norm": 0.0028136519249528646, | |
| "learning_rate": 3.825454545454546e-06, | |
| "loss": 0.0001, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 6.466227347611203, | |
| "grad_norm": 0.005505116190761328, | |
| "learning_rate": 3.7800000000000002e-06, | |
| "loss": 0.0001, | |
| "step": 3925 | |
| }, | |
| { | |
| "epoch": 6.507413509060956, | |
| "grad_norm": 0.007019472308456898, | |
| "learning_rate": 3.7345454545454544e-06, | |
| "loss": 0.0001, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 6.548599670510709, | |
| "grad_norm": 0.006632586009800434, | |
| "learning_rate": 3.6890909090909094e-06, | |
| "loss": 0.0001, | |
| "step": 3975 | |
| }, | |
| { | |
| "epoch": 6.589785831960461, | |
| "grad_norm": 0.002231697551906109, | |
| "learning_rate": 3.643636363636364e-06, | |
| "loss": 0.0001, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 6.589785831960461, | |
| "eval_loss": 0.013741781935095787, | |
| "eval_runtime": 1710.3123, | |
| "eval_samples_per_second": 1.419, | |
| "eval_steps_per_second": 0.178, | |
| "eval_wer": 2.532432222481162, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 6.630971993410214, | |
| "grad_norm": 0.008344273082911968, | |
| "learning_rate": 3.5981818181818186e-06, | |
| "loss": 0.0, | |
| "step": 4025 | |
| }, | |
| { | |
| "epoch": 6.672158154859967, | |
| "grad_norm": 0.0035063326358795166, | |
| "learning_rate": 3.552727272727273e-06, | |
| "loss": 0.0, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 6.71334431630972, | |
| "grad_norm": 0.007493205368518829, | |
| "learning_rate": 3.5072727272727274e-06, | |
| "loss": 0.0002, | |
| "step": 4075 | |
| }, | |
| { | |
| "epoch": 6.754530477759473, | |
| "grad_norm": 0.013130983337759972, | |
| "learning_rate": 3.4618181818181824e-06, | |
| "loss": 0.0001, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 6.795716639209226, | |
| "grad_norm": 0.022349372506141663, | |
| "learning_rate": 3.4163636363636366e-06, | |
| "loss": 0.0001, | |
| "step": 4125 | |
| }, | |
| { | |
| "epoch": 6.8369028006589785, | |
| "grad_norm": 0.004155050031840801, | |
| "learning_rate": 3.370909090909091e-06, | |
| "loss": 0.0001, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 6.878088962108731, | |
| "grad_norm": 0.004423918202519417, | |
| "learning_rate": 3.3254545454545458e-06, | |
| "loss": 0.0001, | |
| "step": 4175 | |
| }, | |
| { | |
| "epoch": 6.919275123558484, | |
| "grad_norm": 0.010945485904812813, | |
| "learning_rate": 3.2800000000000004e-06, | |
| "loss": 0.0001, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 6.960461285008237, | |
| "grad_norm": 0.00635559344664216, | |
| "learning_rate": 3.2345454545454545e-06, | |
| "loss": 0.0001, | |
| "step": 4225 | |
| }, | |
| { | |
| "epoch": 7.00164744645799, | |
| "grad_norm": 0.0034311157651245594, | |
| "learning_rate": 3.1890909090909096e-06, | |
| "loss": 0.0003, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 7.042833607907743, | |
| "grad_norm": 0.01130327396094799, | |
| "learning_rate": 3.1436363636363637e-06, | |
| "loss": 0.0, | |
| "step": 4275 | |
| }, | |
| { | |
| "epoch": 7.0840197693574956, | |
| "grad_norm": 0.004722132347524166, | |
| "learning_rate": 3.0981818181818183e-06, | |
| "loss": 0.0, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 7.125205930807248, | |
| "grad_norm": 0.004732927773147821, | |
| "learning_rate": 3.052727272727273e-06, | |
| "loss": 0.0002, | |
| "step": 4325 | |
| }, | |
| { | |
| "epoch": 7.166392092257001, | |
| "grad_norm": 0.0030134401749819517, | |
| "learning_rate": 3.0072727272727275e-06, | |
| "loss": 0.0, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 7.207578253706755, | |
| "grad_norm": 0.002724016085267067, | |
| "learning_rate": 2.9618181818181817e-06, | |
| "loss": 0.0001, | |
| "step": 4375 | |
| }, | |
| { | |
| "epoch": 7.248764415156508, | |
| "grad_norm": 0.004742628429085016, | |
| "learning_rate": 2.9163636363636367e-06, | |
| "loss": 0.0001, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 7.289950576606261, | |
| "grad_norm": 0.009233110584318638, | |
| "learning_rate": 2.870909090909091e-06, | |
| "loss": 0.0001, | |
| "step": 4425 | |
| }, | |
| { | |
| "epoch": 7.3311367380560135, | |
| "grad_norm": 0.004101255908608437, | |
| "learning_rate": 2.8254545454545455e-06, | |
| "loss": 0.0, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 7.372322899505766, | |
| "grad_norm": 0.0037873326800763607, | |
| "learning_rate": 2.7800000000000005e-06, | |
| "loss": 0.0001, | |
| "step": 4475 | |
| }, | |
| { | |
| "epoch": 7.413509060955519, | |
| "grad_norm": 0.015564778819680214, | |
| "learning_rate": 2.7345454545454547e-06, | |
| "loss": 0.0001, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 7.413509060955519, | |
| "eval_loss": 0.013747855089604855, | |
| "eval_runtime": 1717.6068, | |
| "eval_samples_per_second": 1.413, | |
| "eval_steps_per_second": 0.177, | |
| "eval_wer": 2.5013594344752583, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 7.454695222405272, | |
| "grad_norm": 0.0023361605126410723, | |
| "learning_rate": 2.6890909090909097e-06, | |
| "loss": 0.0002, | |
| "step": 4525 | |
| }, | |
| { | |
| "epoch": 7.495881383855025, | |
| "grad_norm": 0.0026130119804292917, | |
| "learning_rate": 2.643636363636364e-06, | |
| "loss": 0.0001, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 7.537067545304778, | |
| "grad_norm": 0.005539346020668745, | |
| "learning_rate": 2.5981818181818184e-06, | |
| "loss": 0.0001, | |
| "step": 4575 | |
| }, | |
| { | |
| "epoch": 7.578253706754531, | |
| "grad_norm": 0.00382342585362494, | |
| "learning_rate": 2.552727272727273e-06, | |
| "loss": 0.0001, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 7.619439868204283, | |
| "grad_norm": 0.0032592774368822575, | |
| "learning_rate": 2.5072727272727276e-06, | |
| "loss": 0.0001, | |
| "step": 4625 | |
| }, | |
| { | |
| "epoch": 7.660626029654036, | |
| "grad_norm": 0.003578683128580451, | |
| "learning_rate": 2.461818181818182e-06, | |
| "loss": 0.0001, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 7.701812191103789, | |
| "grad_norm": 0.006414057221263647, | |
| "learning_rate": 2.416363636363637e-06, | |
| "loss": 0.0, | |
| "step": 4675 | |
| }, | |
| { | |
| "epoch": 7.742998352553542, | |
| "grad_norm": 0.0032208007760345936, | |
| "learning_rate": 2.3709090909090914e-06, | |
| "loss": 0.0, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 7.784184514003295, | |
| "grad_norm": 0.0016529737040400505, | |
| "learning_rate": 2.3254545454545456e-06, | |
| "loss": 0.0, | |
| "step": 4725 | |
| }, | |
| { | |
| "epoch": 7.825370675453048, | |
| "grad_norm": 0.002902130363509059, | |
| "learning_rate": 2.28e-06, | |
| "loss": 0.0001, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 7.8665568369028005, | |
| "grad_norm": 0.002918258076533675, | |
| "learning_rate": 2.234545454545455e-06, | |
| "loss": 0.0, | |
| "step": 4775 | |
| }, | |
| { | |
| "epoch": 7.907742998352553, | |
| "grad_norm": 0.0021400710102170706, | |
| "learning_rate": 2.1890909090909094e-06, | |
| "loss": 0.0, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 7.948929159802306, | |
| "grad_norm": 0.0016904472140595317, | |
| "learning_rate": 2.143636363636364e-06, | |
| "loss": 0.0, | |
| "step": 4825 | |
| }, | |
| { | |
| "epoch": 7.990115321252059, | |
| "grad_norm": 0.006286232266575098, | |
| "learning_rate": 2.0981818181818186e-06, | |
| "loss": 0.0002, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 8.031301482701812, | |
| "grad_norm": 0.004598398692905903, | |
| "learning_rate": 2.0527272727272727e-06, | |
| "loss": 0.0, | |
| "step": 4875 | |
| }, | |
| { | |
| "epoch": 8.072487644151565, | |
| "grad_norm": 0.0023243261966854334, | |
| "learning_rate": 2.0072727272727273e-06, | |
| "loss": 0.0, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 8.113673805601318, | |
| "grad_norm": 0.0025131104048341513, | |
| "learning_rate": 1.961818181818182e-06, | |
| "loss": 0.0, | |
| "step": 4925 | |
| }, | |
| { | |
| "epoch": 8.15485996705107, | |
| "grad_norm": 0.005834953393787146, | |
| "learning_rate": 1.9163636363636365e-06, | |
| "loss": 0.0003, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 8.196046128500823, | |
| "grad_norm": 0.005762421526014805, | |
| "learning_rate": 1.870909090909091e-06, | |
| "loss": 0.0001, | |
| "step": 4975 | |
| }, | |
| { | |
| "epoch": 8.237232289950576, | |
| "grad_norm": 0.002938208868727088, | |
| "learning_rate": 1.8254545454545455e-06, | |
| "loss": 0.0, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 8.237232289950576, | |
| "eval_loss": 0.013800903223454952, | |
| "eval_runtime": 1718.2773, | |
| "eval_samples_per_second": 1.412, | |
| "eval_steps_per_second": 0.177, | |
| "eval_wer": 2.602345995494446, | |
| "step": 5000 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 6000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 1000, | |
| "total_flos": 2.307759729278976e+19, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |