csikasote's picture
End of training
2c48756 verified
{
"best_metric": 0.5737143158912659,
"best_model_checkpoint": "/scratch/skscla001/speech/results/whisper-medium-toigen-female-model/checkpoint-400",
"epoch": 8.0,
"eval_steps": 200,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.20080321285140562,
"grad_norm": 61.12749481201172,
"learning_rate": 4.2000000000000006e-07,
"loss": 6.6534,
"step": 25
},
{
"epoch": 0.40160642570281124,
"grad_norm": 43.05365753173828,
"learning_rate": 9.200000000000001e-07,
"loss": 5.1469,
"step": 50
},
{
"epoch": 0.6024096385542169,
"grad_norm": 38.88462448120117,
"learning_rate": 1.42e-06,
"loss": 3.6082,
"step": 75
},
{
"epoch": 0.8032128514056225,
"grad_norm": 33.215606689453125,
"learning_rate": 1.9200000000000003e-06,
"loss": 2.5276,
"step": 100
},
{
"epoch": 1.0,
"grad_norm": 24.052446365356445,
"learning_rate": 2.42e-06,
"loss": 2.0062,
"step": 125
},
{
"epoch": 1.2008032128514057,
"grad_norm": 31.749963760375977,
"learning_rate": 2.92e-06,
"loss": 1.5108,
"step": 150
},
{
"epoch": 1.4016064257028114,
"grad_norm": 26.5882625579834,
"learning_rate": 3.4200000000000007e-06,
"loss": 1.35,
"step": 175
},
{
"epoch": 1.6024096385542168,
"grad_norm": 24.725419998168945,
"learning_rate": 3.920000000000001e-06,
"loss": 1.3083,
"step": 200
},
{
"epoch": 1.6024096385542168,
"eval_loss": 0.6686530113220215,
"eval_runtime": 103.3468,
"eval_samples_per_second": 2.138,
"eval_steps_per_second": 0.542,
"eval_wer": 0.5309236947791165,
"step": 200
},
{
"epoch": 1.8032128514056225,
"grad_norm": 23.32423973083496,
"learning_rate": 4.42e-06,
"loss": 1.2009,
"step": 225
},
{
"epoch": 2.0,
"grad_norm": 23.683210372924805,
"learning_rate": 4.92e-06,
"loss": 1.2218,
"step": 250
},
{
"epoch": 2.2008032128514055,
"grad_norm": 17.880393981933594,
"learning_rate": 5.420000000000001e-06,
"loss": 0.7736,
"step": 275
},
{
"epoch": 2.4016064257028114,
"grad_norm": 16.88402557373047,
"learning_rate": 5.92e-06,
"loss": 0.676,
"step": 300
},
{
"epoch": 2.602409638554217,
"grad_norm": 19.675437927246094,
"learning_rate": 6.42e-06,
"loss": 0.744,
"step": 325
},
{
"epoch": 2.8032128514056227,
"grad_norm": 24.291088104248047,
"learning_rate": 6.92e-06,
"loss": 0.7339,
"step": 350
},
{
"epoch": 3.0,
"grad_norm": 11.682101249694824,
"learning_rate": 7.420000000000001e-06,
"loss": 0.7098,
"step": 375
},
{
"epoch": 3.2008032128514055,
"grad_norm": 10.226051330566406,
"learning_rate": 7.92e-06,
"loss": 0.3381,
"step": 400
},
{
"epoch": 3.2008032128514055,
"eval_loss": 0.5737143158912659,
"eval_runtime": 102.7558,
"eval_samples_per_second": 2.151,
"eval_steps_per_second": 0.545,
"eval_wer": 0.44457831325301206,
"step": 400
},
{
"epoch": 3.4016064257028114,
"grad_norm": 16.29640769958496,
"learning_rate": 8.42e-06,
"loss": 0.3465,
"step": 425
},
{
"epoch": 3.602409638554217,
"grad_norm": 16.958227157592773,
"learning_rate": 8.920000000000001e-06,
"loss": 0.3707,
"step": 450
},
{
"epoch": 3.8032128514056227,
"grad_norm": 20.855695724487305,
"learning_rate": 9.42e-06,
"loss": 0.445,
"step": 475
},
{
"epoch": 4.0,
"grad_norm": 10.269837379455566,
"learning_rate": 9.920000000000002e-06,
"loss": 0.4447,
"step": 500
},
{
"epoch": 4.2008032128514055,
"grad_norm": 15.100114822387695,
"learning_rate": 9.953333333333333e-06,
"loss": 0.1814,
"step": 525
},
{
"epoch": 4.401606425702811,
"grad_norm": 13.603893280029297,
"learning_rate": 9.89777777777778e-06,
"loss": 0.2124,
"step": 550
},
{
"epoch": 4.602409638554217,
"grad_norm": 13.522416114807129,
"learning_rate": 9.842222222222223e-06,
"loss": 0.2537,
"step": 575
},
{
"epoch": 4.803212851405623,
"grad_norm": 17.89769172668457,
"learning_rate": 9.786666666666667e-06,
"loss": 0.2639,
"step": 600
},
{
"epoch": 4.803212851405623,
"eval_loss": 0.6059707999229431,
"eval_runtime": 102.8339,
"eval_samples_per_second": 2.149,
"eval_steps_per_second": 0.545,
"eval_wer": 0.42971887550200805,
"step": 600
},
{
"epoch": 5.0,
"grad_norm": 6.560233116149902,
"learning_rate": 9.731111111111113e-06,
"loss": 0.2428,
"step": 625
},
{
"epoch": 5.2008032128514055,
"grad_norm": 7.097219467163086,
"learning_rate": 9.675555555555555e-06,
"loss": 0.1016,
"step": 650
},
{
"epoch": 5.401606425702811,
"grad_norm": 14.023282051086426,
"learning_rate": 9.620000000000001e-06,
"loss": 0.1447,
"step": 675
},
{
"epoch": 5.602409638554217,
"grad_norm": 8.226017951965332,
"learning_rate": 9.564444444444445e-06,
"loss": 0.1111,
"step": 700
},
{
"epoch": 5.803212851405623,
"grad_norm": 11.734825134277344,
"learning_rate": 9.508888888888889e-06,
"loss": 0.1351,
"step": 725
},
{
"epoch": 6.0,
"grad_norm": 9.586465835571289,
"learning_rate": 9.453333333333335e-06,
"loss": 0.1444,
"step": 750
},
{
"epoch": 6.2008032128514055,
"grad_norm": 7.064974784851074,
"learning_rate": 9.397777777777779e-06,
"loss": 0.0732,
"step": 775
},
{
"epoch": 6.401606425702811,
"grad_norm": 7.27360725402832,
"learning_rate": 9.342222222222223e-06,
"loss": 0.0831,
"step": 800
},
{
"epoch": 6.401606425702811,
"eval_loss": 0.6426535844802856,
"eval_runtime": 103.0495,
"eval_samples_per_second": 2.145,
"eval_steps_per_second": 0.543,
"eval_wer": 0.40321285140562246,
"step": 800
},
{
"epoch": 6.602409638554217,
"grad_norm": 5.486606597900391,
"learning_rate": 9.286666666666667e-06,
"loss": 0.0764,
"step": 825
},
{
"epoch": 6.803212851405623,
"grad_norm": 11.913525581359863,
"learning_rate": 9.231111111111111e-06,
"loss": 0.0753,
"step": 850
},
{
"epoch": 7.0,
"grad_norm": 4.659058094024658,
"learning_rate": 9.175555555555557e-06,
"loss": 0.1132,
"step": 875
},
{
"epoch": 7.2008032128514055,
"grad_norm": 2.5287861824035645,
"learning_rate": 9.12e-06,
"loss": 0.0583,
"step": 900
},
{
"epoch": 7.401606425702811,
"grad_norm": 3.8375866413116455,
"learning_rate": 9.064444444444447e-06,
"loss": 0.0513,
"step": 925
},
{
"epoch": 7.602409638554217,
"grad_norm": 10.924978256225586,
"learning_rate": 9.008888888888889e-06,
"loss": 0.0674,
"step": 950
},
{
"epoch": 7.803212851405623,
"grad_norm": 7.799922943115234,
"learning_rate": 8.953333333333335e-06,
"loss": 0.0612,
"step": 975
},
{
"epoch": 8.0,
"grad_norm": 0.7825481295585632,
"learning_rate": 8.897777777777779e-06,
"loss": 0.0776,
"step": 1000
},
{
"epoch": 8.0,
"eval_loss": 0.6705393195152283,
"eval_runtime": 102.2471,
"eval_samples_per_second": 2.161,
"eval_steps_per_second": 0.548,
"eval_wer": 0.42088353413654617,
"step": 1000
},
{
"epoch": 8.0,
"step": 1000,
"total_flos": 8.1240150638592e+18,
"train_loss": 0.8661212004423141,
"train_runtime": 2219.5582,
"train_samples_per_second": 18.022,
"train_steps_per_second": 2.253
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 41,
"save_steps": 200,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 3
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.1240150638592e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}