End of training
Browse files- README.md +9 -9
- all_results.json +6 -6
- config.json +1 -1
- eval_results.json +6 -6
- training_args.bin +1 -1
README.md
CHANGED
@@ -14,13 +14,13 @@ should probably proofread and complete it, then remove this comment. -->
|
|
14 |
|
15 |
This model was trained from scratch on an unknown dataset.
|
16 |
It achieves the following results on the evaluation set:
|
17 |
-
- eval_loss:
|
18 |
- eval_model_preparation_time: 0.0054
|
19 |
-
- eval_cer: 0
|
20 |
-
- eval_wer: 0
|
21 |
-
- eval_runtime:
|
22 |
-
- eval_samples_per_second: 12.
|
23 |
-
- eval_steps_per_second:
|
24 |
- step: 0
|
25 |
|
26 |
## Model description
|
@@ -41,11 +41,11 @@ More information needed
|
|
41 |
|
42 |
The following hyperparameters were used during training:
|
43 |
- learning_rate: 0.0006
|
44 |
-
- train_batch_size:
|
45 |
-
- eval_batch_size:
|
46 |
- seed: 300
|
47 |
- gradient_accumulation_steps: 2
|
48 |
-
- total_train_batch_size:
|
49 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
50 |
- lr_scheduler_type: linear
|
51 |
- lr_scheduler_warmup_steps: 500
|
|
|
14 |
|
15 |
This model was trained from scratch on an unknown dataset.
|
16 |
It achieves the following results on the evaluation set:
|
17 |
+
- eval_loss: nan
|
18 |
- eval_model_preparation_time: 0.0054
|
19 |
+
- eval_cer: 1.0
|
20 |
+
- eval_wer: 1.0
|
21 |
+
- eval_runtime: 46.54
|
22 |
+
- eval_samples_per_second: 12.29
|
23 |
+
- eval_steps_per_second: 1.547
|
24 |
- step: 0
|
25 |
|
26 |
## Model description
|
|
|
41 |
|
42 |
The following hyperparameters were used during training:
|
43 |
- learning_rate: 0.0006
|
44 |
+
- train_batch_size: 8
|
45 |
+
- eval_batch_size: 8
|
46 |
- seed: 300
|
47 |
- gradient_accumulation_steps: 2
|
48 |
+
- total_train_batch_size: 16
|
49 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
50 |
- lr_scheduler_type: linear
|
51 |
- lr_scheduler_warmup_steps: 500
|
all_results.json
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
{
|
2 |
-
"eval_cer": 0
|
3 |
-
"eval_loss":
|
4 |
"eval_model_preparation_time": 0.0054,
|
5 |
-
"eval_runtime":
|
6 |
"eval_samples": 572,
|
7 |
-
"eval_samples_per_second": 12.
|
8 |
-
"eval_steps_per_second":
|
9 |
-
"eval_wer": 0
|
10 |
}
|
|
|
1 |
{
|
2 |
+
"eval_cer": 1.0,
|
3 |
+
"eval_loss": NaN,
|
4 |
"eval_model_preparation_time": 0.0054,
|
5 |
+
"eval_runtime": 46.54,
|
6 |
"eval_samples": 572,
|
7 |
+
"eval_samples_per_second": 12.29,
|
8 |
+
"eval_steps_per_second": 1.547,
|
9 |
+
"eval_wer": 1.0
|
10 |
}
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "/
|
3 |
"activation_dropout": 0.0,
|
4 |
"adapter_attn_dim": null,
|
5 |
"adapter_kernel_size": 3,
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "/scratch/elec/puhe/p/palp3/MUCS/indicwav2vec_outputs/pd_warmup500_rerun_latest/batchsize8/output",
|
3 |
"activation_dropout": 0.0,
|
4 |
"adapter_attn_dim": null,
|
5 |
"adapter_kernel_size": 3,
|
eval_results.json
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
{
|
2 |
-
"eval_cer": 0
|
3 |
-
"eval_loss":
|
4 |
"eval_model_preparation_time": 0.0054,
|
5 |
-
"eval_runtime":
|
6 |
"eval_samples": 572,
|
7 |
-
"eval_samples_per_second": 12.
|
8 |
-
"eval_steps_per_second":
|
9 |
-
"eval_wer": 0
|
10 |
}
|
|
|
1 |
{
|
2 |
+
"eval_cer": 1.0,
|
3 |
+
"eval_loss": NaN,
|
4 |
"eval_model_preparation_time": 0.0054,
|
5 |
+
"eval_runtime": 46.54,
|
6 |
"eval_samples": 572,
|
7 |
+
"eval_samples_per_second": 12.29,
|
8 |
+
"eval_steps_per_second": 1.547,
|
9 |
+
"eval_wer": 1.0
|
10 |
}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5496
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0c825e3070fcd897d9b76e2c6776787b1422865718ef077585ebc5e5c964ac84
|
3 |
size 5496
|