| { | |
| "best_metric": 3.24710634661695, | |
| "best_model_checkpoint": "./final-whisper-for-initial-publish-v2/checkpoint-2000", | |
| "epoch": 3.2948929159802307, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04118616144975288, | |
| "grad_norm": 67.08155822753906, | |
| "learning_rate": 4.2000000000000006e-07, | |
| "loss": 2.8368, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.08237232289950576, | |
| "grad_norm": 17.44784164428711, | |
| "learning_rate": 9.200000000000001e-07, | |
| "loss": 2.0282, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.12355848434925865, | |
| "grad_norm": 12.481098175048828, | |
| "learning_rate": 1.42e-06, | |
| "loss": 1.3152, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.16474464579901152, | |
| "grad_norm": 8.900260925292969, | |
| "learning_rate": 1.9200000000000003e-06, | |
| "loss": 0.7248, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.20593080724876442, | |
| "grad_norm": 7.8956146240234375, | |
| "learning_rate": 2.42e-06, | |
| "loss": 0.5731, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.2471169686985173, | |
| "grad_norm": 9.542287826538086, | |
| "learning_rate": 2.92e-06, | |
| "loss": 0.474, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.2883031301482702, | |
| "grad_norm": 6.937229633331299, | |
| "learning_rate": 3.4200000000000007e-06, | |
| "loss": 0.4301, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.32948929159802304, | |
| "grad_norm": 6.919783115386963, | |
| "learning_rate": 3.920000000000001e-06, | |
| "loss": 0.351, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.37067545304777594, | |
| "grad_norm": 6.427859783172607, | |
| "learning_rate": 4.42e-06, | |
| "loss": 0.2726, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.41186161449752884, | |
| "grad_norm": 5.83529806137085, | |
| "learning_rate": 4.92e-06, | |
| "loss": 0.1663, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.45304777594728174, | |
| "grad_norm": 2.4859721660614014, | |
| "learning_rate": 5.420000000000001e-06, | |
| "loss": 0.0893, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.4942339373970346, | |
| "grad_norm": 3.2526772022247314, | |
| "learning_rate": 5.92e-06, | |
| "loss": 0.076, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.5354200988467874, | |
| "grad_norm": 4.417870044708252, | |
| "learning_rate": 6.42e-06, | |
| "loss": 0.0764, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.5766062602965404, | |
| "grad_norm": 3.149733781814575, | |
| "learning_rate": 6.92e-06, | |
| "loss": 0.0679, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.6177924217462932, | |
| "grad_norm": 3.8823468685150146, | |
| "learning_rate": 7.420000000000001e-06, | |
| "loss": 0.0525, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.6589785831960461, | |
| "grad_norm": 6.1009745597839355, | |
| "learning_rate": 7.92e-06, | |
| "loss": 0.0552, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.700164744645799, | |
| "grad_norm": 5.714720726013184, | |
| "learning_rate": 8.42e-06, | |
| "loss": 0.0529, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.7413509060955519, | |
| "grad_norm": 4.403444290161133, | |
| "learning_rate": 8.920000000000001e-06, | |
| "loss": 0.0472, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7825370675453048, | |
| "grad_norm": 3.7249884605407715, | |
| "learning_rate": 9.42e-06, | |
| "loss": 0.0424, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.8237232289950577, | |
| "grad_norm": 3.8490548133850098, | |
| "learning_rate": 9.920000000000002e-06, | |
| "loss": 0.0461, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8237232289950577, | |
| "eval_loss": 0.04240331053733826, | |
| "eval_runtime": 1653.3644, | |
| "eval_samples_per_second": 1.468, | |
| "eval_steps_per_second": 0.184, | |
| "eval_wer": 11.04637613609881, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8649093904448105, | |
| "grad_norm": 2.538515329360962, | |
| "learning_rate": 9.961818181818183e-06, | |
| "loss": 0.0384, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.9060955518945635, | |
| "grad_norm": 1.6598188877105713, | |
| "learning_rate": 9.916363636363637e-06, | |
| "loss": 0.0375, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.9472817133443163, | |
| "grad_norm": 3.256746292114258, | |
| "learning_rate": 9.870909090909092e-06, | |
| "loss": 0.0346, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.9884678747940692, | |
| "grad_norm": 3.04457426071167, | |
| "learning_rate": 9.825454545454546e-06, | |
| "loss": 0.034, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.029654036243822, | |
| "grad_norm": 0.6082770228385925, | |
| "learning_rate": 9.780000000000001e-06, | |
| "loss": 0.0272, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 1.0708401976935749, | |
| "grad_norm": 1.6969329118728638, | |
| "learning_rate": 9.734545454545455e-06, | |
| "loss": 0.0206, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.112026359143328, | |
| "grad_norm": 1.9652377367019653, | |
| "learning_rate": 9.68909090909091e-06, | |
| "loss": 0.0197, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 1.1532125205930808, | |
| "grad_norm": 2.402329206466675, | |
| "learning_rate": 9.643636363636364e-06, | |
| "loss": 0.0221, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.1943986820428336, | |
| "grad_norm": 3.167133092880249, | |
| "learning_rate": 9.598181818181818e-06, | |
| "loss": 0.0176, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 1.2355848434925865, | |
| "grad_norm": 0.4075870215892792, | |
| "learning_rate": 9.552727272727273e-06, | |
| "loss": 0.0165, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.2767710049423393, | |
| "grad_norm": 2.739176034927368, | |
| "learning_rate": 9.507272727272729e-06, | |
| "loss": 0.0156, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 1.3179571663920924, | |
| "grad_norm": 2.4662702083587646, | |
| "learning_rate": 9.461818181818183e-06, | |
| "loss": 0.0133, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.3591433278418452, | |
| "grad_norm": 2.955909252166748, | |
| "learning_rate": 9.416363636363636e-06, | |
| "loss": 0.0184, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 1.400329489291598, | |
| "grad_norm": 2.0838463306427, | |
| "learning_rate": 9.370909090909092e-06, | |
| "loss": 0.0138, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.441515650741351, | |
| "grad_norm": 2.2245779037475586, | |
| "learning_rate": 9.325454545454547e-06, | |
| "loss": 0.0132, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 1.4827018121911038, | |
| "grad_norm": 2.146961212158203, | |
| "learning_rate": 9.280000000000001e-06, | |
| "loss": 0.0126, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.5238879736408566, | |
| "grad_norm": 1.955930471420288, | |
| "learning_rate": 9.234545454545455e-06, | |
| "loss": 0.0111, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 1.5650741350906094, | |
| "grad_norm": 2.04664945602417, | |
| "learning_rate": 9.18909090909091e-06, | |
| "loss": 0.0118, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.6062602965403623, | |
| "grad_norm": 2.1349849700927734, | |
| "learning_rate": 9.143636363636364e-06, | |
| "loss": 0.0116, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 1.6474464579901154, | |
| "grad_norm": 2.336024761199951, | |
| "learning_rate": 9.09818181818182e-06, | |
| "loss": 0.0124, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.6474464579901154, | |
| "eval_loss": 0.02211674302816391, | |
| "eval_runtime": 1633.7086, | |
| "eval_samples_per_second": 1.486, | |
| "eval_steps_per_second": 0.186, | |
| "eval_wer": 5.11147362697118, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.6886326194398682, | |
| "grad_norm": 0.4564249813556671, | |
| "learning_rate": 9.052727272727273e-06, | |
| "loss": 0.0119, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 1.729818780889621, | |
| "grad_norm": 1.9494842290878296, | |
| "learning_rate": 9.007272727272729e-06, | |
| "loss": 0.0114, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.771004942339374, | |
| "grad_norm": 1.7129623889923096, | |
| "learning_rate": 8.961818181818182e-06, | |
| "loss": 0.0116, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 1.812191103789127, | |
| "grad_norm": 0.8837174773216248, | |
| "learning_rate": 8.916363636363638e-06, | |
| "loss": 0.01, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.8533772652388798, | |
| "grad_norm": 3.1779348850250244, | |
| "learning_rate": 8.870909090909091e-06, | |
| "loss": 0.0087, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 1.8945634266886326, | |
| "grad_norm": 1.1833815574645996, | |
| "learning_rate": 8.825454545454545e-06, | |
| "loss": 0.0122, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.9357495881383855, | |
| "grad_norm": 2.3456814289093018, | |
| "learning_rate": 8.78e-06, | |
| "loss": 0.0089, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 1.9769357495881383, | |
| "grad_norm": 1.5834710597991943, | |
| "learning_rate": 8.734545454545456e-06, | |
| "loss": 0.0111, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.018121911037891, | |
| "grad_norm": 1.1926366090774536, | |
| "learning_rate": 8.68909090909091e-06, | |
| "loss": 0.0069, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 2.059308072487644, | |
| "grad_norm": 0.5671884417533875, | |
| "learning_rate": 8.643636363636364e-06, | |
| "loss": 0.0043, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.100494233937397, | |
| "grad_norm": 0.8897690176963806, | |
| "learning_rate": 8.598181818181819e-06, | |
| "loss": 0.0034, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 2.1416803953871497, | |
| "grad_norm": 1.6989620923995972, | |
| "learning_rate": 8.552727272727274e-06, | |
| "loss": 0.003, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.182866556836903, | |
| "grad_norm": 2.0327303409576416, | |
| "learning_rate": 8.507272727272728e-06, | |
| "loss": 0.0037, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 2.224052718286656, | |
| "grad_norm": 0.6554267406463623, | |
| "learning_rate": 8.461818181818182e-06, | |
| "loss": 0.0034, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.2652388797364087, | |
| "grad_norm": 0.21176663041114807, | |
| "learning_rate": 8.416363636363637e-06, | |
| "loss": 0.0046, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 2.3064250411861615, | |
| "grad_norm": 1.2915725708007812, | |
| "learning_rate": 8.370909090909091e-06, | |
| "loss": 0.0026, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.3476112026359144, | |
| "grad_norm": 0.15533868968486786, | |
| "learning_rate": 8.325454545454547e-06, | |
| "loss": 0.0043, | |
| "step": 1425 | |
| }, | |
| { | |
| "epoch": 2.3887973640856672, | |
| "grad_norm": 0.19825848937034607, | |
| "learning_rate": 8.28e-06, | |
| "loss": 0.0042, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.42998352553542, | |
| "grad_norm": 0.14249640703201294, | |
| "learning_rate": 8.234545454545456e-06, | |
| "loss": 0.0023, | |
| "step": 1475 | |
| }, | |
| { | |
| "epoch": 2.471169686985173, | |
| "grad_norm": 0.12349322438240051, | |
| "learning_rate": 8.18909090909091e-06, | |
| "loss": 0.0026, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.471169686985173, | |
| "eval_loss": 0.015178721398115158, | |
| "eval_runtime": 1647.8725, | |
| "eval_samples_per_second": 1.473, | |
| "eval_steps_per_second": 0.184, | |
| "eval_wer": 3.5966752116833685, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.5123558484349258, | |
| "grad_norm": 0.24572087824344635, | |
| "learning_rate": 8.143636363636365e-06, | |
| "loss": 0.0032, | |
| "step": 1525 | |
| }, | |
| { | |
| "epoch": 2.5535420098846786, | |
| "grad_norm": 0.2075294703245163, | |
| "learning_rate": 8.098181818181819e-06, | |
| "loss": 0.0023, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.594728171334432, | |
| "grad_norm": 0.264713317155838, | |
| "learning_rate": 8.052727272727272e-06, | |
| "loss": 0.0044, | |
| "step": 1575 | |
| }, | |
| { | |
| "epoch": 2.6359143327841847, | |
| "grad_norm": 1.9720451831817627, | |
| "learning_rate": 8.007272727272728e-06, | |
| "loss": 0.0048, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.6771004942339376, | |
| "grad_norm": 0.7824451327323914, | |
| "learning_rate": 7.961818181818183e-06, | |
| "loss": 0.0048, | |
| "step": 1625 | |
| }, | |
| { | |
| "epoch": 2.7182866556836904, | |
| "grad_norm": 0.681707501411438, | |
| "learning_rate": 7.916363636363637e-06, | |
| "loss": 0.0036, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.7594728171334433, | |
| "grad_norm": 0.39950481057167053, | |
| "learning_rate": 7.870909090909091e-06, | |
| "loss": 0.0047, | |
| "step": 1675 | |
| }, | |
| { | |
| "epoch": 2.800658978583196, | |
| "grad_norm": 0.7777596712112427, | |
| "learning_rate": 7.825454545454546e-06, | |
| "loss": 0.0029, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.841845140032949, | |
| "grad_norm": 0.06510231643915176, | |
| "learning_rate": 7.78e-06, | |
| "loss": 0.0067, | |
| "step": 1725 | |
| }, | |
| { | |
| "epoch": 2.883031301482702, | |
| "grad_norm": 2.2729947566986084, | |
| "learning_rate": 7.734545454545455e-06, | |
| "loss": 0.0024, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.9242174629324547, | |
| "grad_norm": 1.6635749340057373, | |
| "learning_rate": 7.68909090909091e-06, | |
| "loss": 0.0035, | |
| "step": 1775 | |
| }, | |
| { | |
| "epoch": 2.9654036243822075, | |
| "grad_norm": 0.06815456598997116, | |
| "learning_rate": 7.643636363636365e-06, | |
| "loss": 0.004, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 3.0065897858319603, | |
| "grad_norm": 1.5607892274856567, | |
| "learning_rate": 7.598181818181819e-06, | |
| "loss": 0.0046, | |
| "step": 1825 | |
| }, | |
| { | |
| "epoch": 3.047775947281713, | |
| "grad_norm": 0.8395469784736633, | |
| "learning_rate": 7.552727272727274e-06, | |
| "loss": 0.0018, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 3.088962108731466, | |
| "grad_norm": 0.04032081738114357, | |
| "learning_rate": 7.507272727272728e-06, | |
| "loss": 0.0011, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 3.130148270181219, | |
| "grad_norm": 0.3644678294658661, | |
| "learning_rate": 7.461818181818182e-06, | |
| "loss": 0.0014, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 3.171334431630972, | |
| "grad_norm": 0.35729554295539856, | |
| "learning_rate": 7.416363636363637e-06, | |
| "loss": 0.0009, | |
| "step": 1925 | |
| }, | |
| { | |
| "epoch": 3.212520593080725, | |
| "grad_norm": 0.05964287370443344, | |
| "learning_rate": 7.370909090909092e-06, | |
| "loss": 0.0014, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 3.253706754530478, | |
| "grad_norm": 0.799541711807251, | |
| "learning_rate": 7.325454545454546e-06, | |
| "loss": 0.0015, | |
| "step": 1975 | |
| }, | |
| { | |
| "epoch": 3.2948929159802307, | |
| "grad_norm": 0.058581918478012085, | |
| "learning_rate": 7.280000000000001e-06, | |
| "loss": 0.0008, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.2948929159802307, | |
| "eval_loss": 0.015332825481891632, | |
| "eval_runtime": 1629.67, | |
| "eval_samples_per_second": 1.489, | |
| "eval_steps_per_second": 0.187, | |
| "eval_wer": 3.24710634661695, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 6000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 1000, | |
| "total_flos": 9.23126978543616e+18, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |