diff --git a/README.md b/README.md index 2d25bc1180786ad75137fc53f4953dcaba7cc32a..bcce57bd655f1af5a37e92423d0190eda9cb751c 100644 --- a/README.md +++ b/README.md @@ -9,18 +9,18 @@ model-index: -[Visualize in Weights & Biases](https://wandb.ai/priyanshipal/huggingface/runs/1kodfy70) +[Visualize in Weights & Biases](https://wandb.ai/priyanshipal/huggingface/runs/64250v6u) # s300_shuff100 This model was trained from scratch on an unknown dataset. It achieves the following results on the evaluation set: - eval_loss: nan -- eval_model_preparation_time: 0.0046 +- eval_model_preparation_time: 0.0045 - eval_cer: 1.0 - eval_wer: 1.0 -- eval_runtime: 39.8895 -- eval_samples_per_second: 14.34 -- eval_steps_per_second: 0.902 +- eval_runtime: 30.9217 +- eval_samples_per_second: 18.498 +- eval_steps_per_second: 1.164 - step: 0 ## Model description diff --git a/all_results.json b/all_results.json index ef9d0d388fbb5dc8817cb631aa8c6e1ea3595c25..30d8f6a19f22f079636a9ca3d8e04c503d92e17d 100644 --- a/all_results.json +++ b/all_results.json @@ -2,11 +2,11 @@ "epoch": 1.6, "eval_cer": 1.0, "eval_loss": NaN, - "eval_model_preparation_time": 0.0046, - "eval_runtime": 39.8895, + "eval_model_preparation_time": 0.0045, + "eval_runtime": 30.9217, "eval_samples": 572, - "eval_samples_per_second": 14.34, - "eval_steps_per_second": 0.902, + "eval_samples_per_second": 18.498, + "eval_steps_per_second": 1.164, "eval_wer": 1.0, "total_flos": 6.212261523683712e+18, "train_loss": 3.21392811447382, diff --git a/eval_results.json b/eval_results.json index 496cc2bdb7bdae114b3ff63ab3ae18de2812e5df..77574def92c086f611fa4531ed8a185d8bec1411 100644 --- a/eval_results.json +++ b/eval_results.json @@ -1,10 +1,10 @@ { "eval_cer": 1.0, "eval_loss": NaN, - "eval_model_preparation_time": 0.0046, - "eval_runtime": 39.8895, + "eval_model_preparation_time": 0.0045, + "eval_runtime": 30.9217, "eval_samples": 572, - "eval_samples_per_second": 14.34, - "eval_steps_per_second": 0.902, + "eval_samples_per_second": 18.498, + "eval_steps_per_second": 1.164, "eval_wer": 1.0 } \ No newline at end of file diff --git a/evalonlyhindi_indicwav2vec_MUCS_warmup500_s300shuff100_2142383.out b/evalonlyhindi_indicwav2vec_MUCS_warmup500_s300shuff100_2142383.out index fc70a3d587adb9c8c5f9c2ebda7fe3f615448bb1..1a0af1bc454074c867e3589dcdbad8fcc32fe574 100644 --- a/evalonlyhindi_indicwav2vec_MUCS_warmup500_s300shuff100_2142383.out +++ b/evalonlyhindi_indicwav2vec_MUCS_warmup500_s300shuff100_2142383.out @@ -152,3 +152,43 @@ last prediction string eval_steps_per_second = 0.902 eval_wer = 1.0 training_args.bin: 0%| | 0.00/5.43k [00:00', 'eos_token': '', 'unk_token': '[UNK]', 'pad_token': '[PAD]'}, clean_up_tokenization_spaces=True), added_tokens_decoder={ + 147: AddedToken("[UNK]", rstrip=True, lstrip=True, single_word=False, normalized=False, special=False), + 148: AddedToken("[PAD]", rstrip=True, lstrip=True, single_word=False, normalized=False, special=False), + 149: AddedToken("", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), + 150: AddedToken("", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), +} +CHECK MODEL PARAMS Wav2Vec2ForCTC( + (wav2vec2): Wav2Vec2Model( + (feature_extractor): Wav2Vec2FeatureEncoder( + (conv_layers): ModuleList( + (0): Wav2Vec2LayerNormConvLayer( + (conv): Conv1d(1, 512, kernel_size=(10,), stride=(5,)) + (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True) + (activation): GELUActivation() + ) + (1-4): 4 x Wav2Vec2LayerNormConvLayer( + (conv): Conv1d(512, 512, kernel_size=(3,), stride=(2,)) + (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True) + (activation): GELUActivation() + ) + (5-6): 2 x Wav2Vec2LayerNormConvLayer( + (conv): Conv1d(512, 512, kernel_size=(2,), stride=(2,)) + (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True) + (activation): GELUActivation() + ) + ) + ) + (feature_projection): Wav2Vec2FeatureProjection( + (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True) + (projection): Linear(in_features=512, out_features=1024, bias=True) + (dropout): Dropout(p=0.0, inplace=False) + ) + (encoder): Wav2Vec2EncoderStableLayerNorm( + (pos_conv_embed): Wav2Vec2PositionalConvEmbedding( + (conv): ParametrizedConv1d( + 1024, 1024, kernel_size=(128,), stride=(1,), padding=(64,), groups=16 + (parametrizations): ModuleDict( + (weight): ParametrizationList( + (0): _WeightNorm() + ) + ) + ) + (padding): Wav2Vec2SamePadLayer() + (activation): GELUActivation() + ) + (layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + (dropout): Dropout(p=0.0, inplace=False) + (layers): ModuleList( + (0-23): 24 x Wav2Vec2EncoderLayerStableLayerNorm( + (attention): Wav2Vec2SdpaAttention( + (k_proj): Linear(in_features=1024, out_features=1024, bias=True) + (v_proj): Linear(in_features=1024, out_features=1024, bias=True) + (q_proj): Linear(in_features=1024, out_features=1024, bias=True) + (out_proj): Linear(in_features=1024, out_features=1024, bias=True) + ) + (dropout): Dropout(p=0.0, inplace=False) + (layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + (feed_forward): Wav2Vec2FeedForward( + (intermediate_dropout): Dropout(p=0.0, inplace=False) + (intermediate_dense): Linear(in_features=1024, out_features=4096, bias=True) + (intermediate_act_fn): GELUActivation() + (output_dense): Linear(in_features=4096, out_features=1024, bias=True) + (output_dropout): Dropout(p=0.0, inplace=False) + ) + (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + ) + ) + ) + ) + (dropout): Dropout(p=0.0, inplace=False) + (lm_head): Linear(in_features=1024, out_features=151, bias=True) +) + preprocess datasets: 0%| | 0/572 [00:00 + main() + File "/scratch/elec/puhe/p/palp3/MUCS/eval_script_indicwav2vec.py", line 637, in main + print("check the eval set length", len(vectorized_datasets["eval"]["audio_id"])) + ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/datasets/arrow_dataset.py", line 2866, in __getitem__ + return self._getitem(key) + ^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/datasets/arrow_dataset.py", line 2850, in _getitem + pa_subtable = query_table(self._data, key, indices=self._indices) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/datasets/formatting/formatting.py", line 584, in query_table + _check_valid_column_key(key, table.column_names) + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/datasets/formatting/formatting.py", line 521, in _check_valid_column_key + raise KeyError(f"Column {key} not in the dataset. Current columns in the dataset: {columns}") +KeyError: "Column audio_id not in the dataset. Current columns in the dataset: ['input_values', 'input_length', 'labels']" +wandb: - 0.011 MB of 0.011 MB uploaded wandb: \ 0.011 MB of 0.028 MB uploaded wandb: 🚀 View run eval_pd2000_s300_shuff100_hindi at: https://wandb.ai/priyanshipal/huggingface/runs/2b363w6i +wandb: ⭐️ View project at: https://wandb.ai/priyanshipal/huggingface +wandb: Synced 6 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s) +wandb: Find logs at: ./wandb/run-20240822_151437-2b363w6i/logs +wandb: WARNING The new W&B backend becomes opt-out in version 0.18.0; try it out with `wandb.require("core")`! See https://wandb.me/wandb-core for more information. diff --git a/evalonlyhindi_indicwav2vec_MUCS_warmup500_s300shuff100_2142421.out b/evalonlyhindi_indicwav2vec_MUCS_warmup500_s300shuff100_2142421.out new file mode 100644 index 0000000000000000000000000000000000000000..ef17fc2611c8d2ec278b4943552e6a30a2701bf3 --- /dev/null +++ b/evalonlyhindi_indicwav2vec_MUCS_warmup500_s300shuff100_2142421.out @@ -0,0 +1,175 @@ +wandb: Currently logged in as: priyanshi-pal (priyanshipal). Use `wandb login --relogin` to force relogin +wandb: wandb version 0.17.7 is available! To upgrade, please run: +wandb: $ pip install wandb --upgrade +wandb: Tracking run with wandb version 0.17.6 +wandb: Run data is saved locally in /scratch/elec/t405-puhe/p/palp3/MUCS/wandb/run-20240822_151726-alv0f5i7 +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run eval_pd2000_s300_shuff100_hindi +wandb: ⭐️ View project at https://wandb.ai/priyanshipal/huggingface +wandb: 🚀 View run at https://wandb.ai/priyanshipal/huggingface/runs/alv0f5i7 +/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/training_args.py:1525: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead + warnings.warn( +/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py:957: FutureWarning: The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead. + warnings.warn( +/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/models/auto/feature_extraction_auto.py:329: FutureWarning: The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead. + warnings.warn( +/scratch/work/palp3/myenv/lib/python3.11/site-packages/accelerate/accelerator.py:488: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead. + self.scaler = torch.cuda.amp.GradScaler(**kwargs) +max_steps is given, it will override any value given in num_train_epochs +Wav2Vec2CTCTokenizer(name_or_path='', vocab_size=149, model_max_length=1000000000000000019884624838656, is_fast=False, padding_side='right', truncation_side='right', special_tokens={'bos_token': '', 'eos_token': '', 'unk_token': '[UNK]', 'pad_token': '[PAD]'}, clean_up_tokenization_spaces=True), added_tokens_decoder={ + 147: AddedToken("[UNK]", rstrip=True, lstrip=True, single_word=False, normalized=False, special=False), + 148: AddedToken("[PAD]", rstrip=True, lstrip=True, single_word=False, normalized=False, special=False), + 149: AddedToken("", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), + 150: AddedToken("", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), +} +CHECK MODEL PARAMS Wav2Vec2ForCTC( + (wav2vec2): Wav2Vec2Model( + (feature_extractor): Wav2Vec2FeatureEncoder( + (conv_layers): ModuleList( + (0): Wav2Vec2LayerNormConvLayer( + (conv): Conv1d(1, 512, kernel_size=(10,), stride=(5,)) + (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True) + (activation): GELUActivation() + ) + (1-4): 4 x Wav2Vec2LayerNormConvLayer( + (conv): Conv1d(512, 512, kernel_size=(3,), stride=(2,)) + (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True) + (activation): GELUActivation() + ) + (5-6): 2 x Wav2Vec2LayerNormConvLayer( + (conv): Conv1d(512, 512, kernel_size=(2,), stride=(2,)) + (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True) + (activation): GELUActivation() + ) + ) + ) + (feature_projection): Wav2Vec2FeatureProjection( + (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True) + (projection): Linear(in_features=512, out_features=1024, bias=True) + (dropout): Dropout(p=0.0, inplace=False) + ) + (encoder): Wav2Vec2EncoderStableLayerNorm( + (pos_conv_embed): Wav2Vec2PositionalConvEmbedding( + (conv): ParametrizedConv1d( + 1024, 1024, kernel_size=(128,), stride=(1,), padding=(64,), groups=16 + (parametrizations): ModuleDict( + (weight): ParametrizationList( + (0): _WeightNorm() + ) + ) + ) + (padding): Wav2Vec2SamePadLayer() + (activation): GELUActivation() + ) + (layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + (dropout): Dropout(p=0.0, inplace=False) + (layers): ModuleList( + (0-23): 24 x Wav2Vec2EncoderLayerStableLayerNorm( + (attention): Wav2Vec2SdpaAttention( + (k_proj): Linear(in_features=1024, out_features=1024, bias=True) + (v_proj): Linear(in_features=1024, out_features=1024, bias=True) + (q_proj): Linear(in_features=1024, out_features=1024, bias=True) + (out_proj): Linear(in_features=1024, out_features=1024, bias=True) + ) + (dropout): Dropout(p=0.0, inplace=False) + (layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + (feed_forward): Wav2Vec2FeedForward( + (intermediate_dropout): Dropout(p=0.0, inplace=False) + (intermediate_dense): Linear(in_features=1024, out_features=4096, bias=True) + (intermediate_act_fn): GELUActivation() + (output_dense): Linear(in_features=4096, out_features=1024, bias=True) + (output_dropout): Dropout(p=0.0, inplace=False) + ) + (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + ) + ) + ) + ) + (dropout): Dropout(p=0.0, inplace=False) + (lm_head): Linear(in_features=1024, out_features=151, bias=True) +) +check the eval set length 572 +08/22/2024 15:17:37 - INFO - __main__ - *** Evaluate *** +/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/models/wav2vec2/processing_wav2vec2.py:157: UserWarning: `as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call. + warnings.warn( + 0%| | 0/36 [00:00 + main() + File "/scratch/elec/puhe/p/palp3/MUCS/eval_script_indicwav2vec.py", line 759, in main + metrics = trainer.evaluate() + ^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/trainer.py", line 3666, in evaluate + output = eval_loop( + ^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/trainer.py", line 3857, in evaluation_loop + losses, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/trainer.py", line 4075, in prediction_step + loss, outputs = self.compute_loss(model, inputs, return_outputs=True) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/trainer.py", line 3363, in compute_loss + outputs = model(**inputs) + ^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl + return self._call_impl(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl + return forward_call(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/accelerate/utils/operations.py", line 819, in forward + return model_forward(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/accelerate/utils/operations.py", line 807, in __call__ + return convert_to_fp32(self.model_forward(*args, **kwargs)) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/torch/amp/autocast_mode.py", line 43, in decorate_autocast + return func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/models/wav2vec2/modeling_wav2vec2.py", line 2228, in forward + outputs = self.wav2vec2( + ^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl + return self._call_impl(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl + return forward_call(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/models/wav2vec2/modeling_wav2vec2.py", line 1809, in forward + extract_features = self.feature_extractor(input_values) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl + return self._call_impl(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl + return forward_call(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/models/wav2vec2/modeling_wav2vec2.py", line 463, in forward + hidden_states = conv_layer(hidden_states) + ^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl + return self._call_impl(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl + return forward_call(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/models/wav2vec2/modeling_wav2vec2.py", line 335, in forward + hidden_states = self.layer_norm(hidden_states) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1553, in _wrapped_call_impl + return self._call_impl(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1562, in _call_impl + return forward_call(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/torch/nn/modules/normalization.py", line 202, in forward + return F.layer_norm( + ^^^^^^^^^^^^^ + File "/scratch/work/palp3/myenv/lib/python3.11/site-packages/torch/nn/functional.py", line 2576, in layer_norm + return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU 0 has a total capacity of 15.77 GiB of which 1.55 GiB is free. Including non-PyTorch memory, this process has 14.21 GiB memory in use. Of the allocated memory 11.68 GiB is allocated by PyTorch, and 2.17 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +wandb: - 0.011 MB of 0.011 MB uploaded wandb: \ 0.033 MB of 0.033 MB uploaded wandb: 🚀 View run eval_pd2000_s300_shuff100_hindi at: https://wandb.ai/priyanshipal/huggingface/runs/alv0f5i7 +wandb: ⭐️ View project at: https://wandb.ai/priyanshipal/huggingface +wandb: Synced 6 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s) +wandb: Find logs at: ./wandb/run-20240822_151726-alv0f5i7/logs +wandb: WARNING The new W&B backend becomes opt-out in version 0.18.0; try it out with `wandb.require("core")`! See https://wandb.me/wandb-core for more information. diff --git a/evalonlyhindi_indicwav2vec_MUCS_warmup500_s300shuff100_2142429.out b/evalonlyhindi_indicwav2vec_MUCS_warmup500_s300shuff100_2142429.out new file mode 100644 index 0000000000000000000000000000000000000000..ebc90e83a17316bcb097c75c4964c2dd29f05338 --- /dev/null +++ b/evalonlyhindi_indicwav2vec_MUCS_warmup500_s300shuff100_2142429.out @@ -0,0 +1,155 @@ +wandb: Currently logged in as: priyanshi-pal (priyanshipal). Use `wandb login --relogin` to force relogin +wandb: wandb version 0.17.7 is available! To upgrade, please run: +wandb: $ pip install wandb --upgrade +wandb: Tracking run with wandb version 0.17.6 +wandb: Run data is saved locally in /scratch/elec/t405-puhe/p/palp3/MUCS/wandb/run-20240822_152047-64250v6u +wandb: Run `wandb offline` to turn off syncing. +wandb: Syncing run eval_pd2000_s300_shuff100_hindi +wandb: ⭐️ View project at https://wandb.ai/priyanshipal/huggingface +wandb: 🚀 View run at https://wandb.ai/priyanshipal/huggingface/runs/64250v6u +/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/training_args.py:1525: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead + warnings.warn( +/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py:957: FutureWarning: The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead. + warnings.warn( +/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/models/auto/feature_extraction_auto.py:329: FutureWarning: The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead. + warnings.warn( +/scratch/work/palp3/myenv/lib/python3.11/site-packages/accelerate/accelerator.py:488: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead. + self.scaler = torch.cuda.amp.GradScaler(**kwargs) +max_steps is given, it will override any value given in num_train_epochs +Wav2Vec2CTCTokenizer(name_or_path='', vocab_size=149, model_max_length=1000000000000000019884624838656, is_fast=False, padding_side='right', truncation_side='right', special_tokens={'bos_token': '', 'eos_token': '', 'unk_token': '[UNK]', 'pad_token': '[PAD]'}, clean_up_tokenization_spaces=True), added_tokens_decoder={ + 147: AddedToken("[UNK]", rstrip=True, lstrip=True, single_word=False, normalized=False, special=False), + 148: AddedToken("[PAD]", rstrip=True, lstrip=True, single_word=False, normalized=False, special=False), + 149: AddedToken("", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), + 150: AddedToken("", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True), +} +CHECK MODEL PARAMS Wav2Vec2ForCTC( + (wav2vec2): Wav2Vec2Model( + (feature_extractor): Wav2Vec2FeatureEncoder( + (conv_layers): ModuleList( + (0): Wav2Vec2LayerNormConvLayer( + (conv): Conv1d(1, 512, kernel_size=(10,), stride=(5,)) + (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True) + (activation): GELUActivation() + ) + (1-4): 4 x Wav2Vec2LayerNormConvLayer( + (conv): Conv1d(512, 512, kernel_size=(3,), stride=(2,)) + (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True) + (activation): GELUActivation() + ) + (5-6): 2 x Wav2Vec2LayerNormConvLayer( + (conv): Conv1d(512, 512, kernel_size=(2,), stride=(2,)) + (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True) + (activation): GELUActivation() + ) + ) + ) + (feature_projection): Wav2Vec2FeatureProjection( + (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True) + (projection): Linear(in_features=512, out_features=1024, bias=True) + (dropout): Dropout(p=0.0, inplace=False) + ) + (encoder): Wav2Vec2EncoderStableLayerNorm( + (pos_conv_embed): Wav2Vec2PositionalConvEmbedding( + (conv): ParametrizedConv1d( + 1024, 1024, kernel_size=(128,), stride=(1,), padding=(64,), groups=16 + (parametrizations): ModuleDict( + (weight): ParametrizationList( + (0): _WeightNorm() + ) + ) + ) + (padding): Wav2Vec2SamePadLayer() + (activation): GELUActivation() + ) + (layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + (dropout): Dropout(p=0.0, inplace=False) + (layers): ModuleList( + (0-23): 24 x Wav2Vec2EncoderLayerStableLayerNorm( + (attention): Wav2Vec2SdpaAttention( + (k_proj): Linear(in_features=1024, out_features=1024, bias=True) + (v_proj): Linear(in_features=1024, out_features=1024, bias=True) + (q_proj): Linear(in_features=1024, out_features=1024, bias=True) + (out_proj): Linear(in_features=1024, out_features=1024, bias=True) + ) + (dropout): Dropout(p=0.0, inplace=False) + (layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + (feed_forward): Wav2Vec2FeedForward( + (intermediate_dropout): Dropout(p=0.0, inplace=False) + (intermediate_dense): Linear(in_features=1024, out_features=4096, bias=True) + (intermediate_act_fn): GELUActivation() + (output_dense): Linear(in_features=4096, out_features=1024, bias=True) + (output_dropout): Dropout(p=0.0, inplace=False) + ) + (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + ) + ) + ) + ) + (dropout): Dropout(p=0.0, inplace=False) + (lm_head): Linear(in_features=1024, out_features=151, bias=True) +) +check the eval set length 572 +08/22/2024 15:20:57 - INFO - __main__ - *** Evaluate *** +/scratch/work/palp3/myenv/lib/python3.11/site-packages/transformers/models/wav2vec2/processing_wav2vec2.py:157: UserWarning: `as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call. + warnings.warn( + 0%| | 0/36 [00:00