End of training
Browse files
README.md
CHANGED
@@ -5,18 +5,18 @@ base_model: vinai/phobert-base
|
|
5 |
tags:
|
6 |
- generated_from_trainer
|
7 |
model-index:
|
8 |
-
- name: roberta-base-multilingual-sentiment
|
9 |
results: []
|
10 |
---
|
11 |
|
12 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
13 |
should probably proofread and complete it, then remove this comment. -->
|
14 |
|
15 |
-
# roberta-base-multilingual-sentiment
|
16 |
|
17 |
This model is a fine-tuned version of [vinai/phobert-base](https://huggingface.co/vinai/phobert-base) on the None dataset.
|
18 |
It achieves the following results on the evaluation set:
|
19 |
-
- Loss: 1.
|
20 |
- Micro F1: 31.1953
|
21 |
- Micro Precision: 31.1953
|
22 |
- Micro Recall: 31.1953
|
|
|
5 |
tags:
|
6 |
- generated_from_trainer
|
7 |
model-index:
|
8 |
+
- name: clapAI/roberta-base-multilingual-sentiment
|
9 |
results: []
|
10 |
---
|
11 |
|
12 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
13 |
should probably proofread and complete it, then remove this comment. -->
|
14 |
|
15 |
+
# clapAI/roberta-base-multilingual-sentiment
|
16 |
|
17 |
This model is a fine-tuned version of [vinai/phobert-base](https://huggingface.co/vinai/phobert-base) on the None dataset.
|
18 |
It achieves the following results on the evaluation set:
|
19 |
+
- Loss: 1.7236
|
20 |
- Micro F1: 31.1953
|
21 |
- Micro Precision: 31.1953
|
22 |
- Micro Recall: 31.1953
|
all_results.json
CHANGED
@@ -1,31 +1,31 @@
|
|
1 |
{
|
2 |
"epoch": 4.181818181818182,
|
3 |
"eval_f1": 0.09432731429418081,
|
4 |
-
"eval_loss": 1.
|
5 |
-
"eval_macro_f1":
|
6 |
-
"eval_macro_precision":
|
7 |
-
"eval_macro_recall":
|
8 |
-
"eval_micro_f1":
|
9 |
-
"eval_micro_precision":
|
10 |
-
"eval_micro_recall":
|
11 |
"eval_precision": 0.1725200652415304,
|
12 |
"eval_recall": 0.13460980331549635,
|
13 |
-
"eval_runtime": 0.
|
14 |
-
"eval_samples_per_second":
|
15 |
-
"eval_steps_per_second":
|
16 |
"test_f1": 0.10009022401776128,
|
17 |
-
"test_loss": 1.
|
18 |
-
"test_macro_f1":
|
19 |
-
"test_macro_precision":
|
20 |
-
"test_macro_recall": 14.
|
21 |
-
"test_micro_f1":
|
22 |
-
"test_micro_precision":
|
23 |
-
"test_micro_recall":
|
24 |
"test_precision": 0.14356813693219223,
|
25 |
"test_recall": 0.14150273929500776,
|
26 |
-
"test_runtime": 0.
|
27 |
-
"test_samples_per_second":
|
28 |
-
"test_steps_per_second":
|
29 |
"train_loss": 1.7666796875,
|
30 |
"train_runtime": 35.9676,
|
31 |
"train_samples_per_second": 771.25,
|
|
|
1 |
{
|
2 |
"epoch": 4.181818181818182,
|
3 |
"eval_f1": 0.09432731429418081,
|
4 |
+
"eval_loss": 1.7236328125,
|
5 |
+
"eval_macro_f1": 6.7936507936507935,
|
6 |
+
"eval_macro_precision": 4.456476468138275,
|
7 |
+
"eval_macro_recall": 14.285714285714285,
|
8 |
+
"eval_micro_f1": 31.195335276967928,
|
9 |
+
"eval_micro_precision": 31.195335276967928,
|
10 |
+
"eval_micro_recall": 31.195335276967928,
|
11 |
"eval_precision": 0.1725200652415304,
|
12 |
"eval_recall": 0.13460980331549635,
|
13 |
+
"eval_runtime": 0.1986,
|
14 |
+
"eval_samples_per_second": 3454.261,
|
15 |
+
"eval_steps_per_second": 10.071,
|
16 |
"test_f1": 0.10009022401776128,
|
17 |
+
"test_loss": 1.767578125,
|
18 |
+
"test_macro_f1": 6.223798774588842,
|
19 |
+
"test_macro_precision": 3.978561121418265,
|
20 |
+
"test_macro_recall": 14.285714285714285,
|
21 |
+
"test_micro_f1": 27.84992784992785,
|
22 |
+
"test_micro_precision": 27.84992784992785,
|
23 |
+
"test_micro_recall": 27.84992784992785,
|
24 |
"test_precision": 0.14356813693219223,
|
25 |
"test_recall": 0.14150273929500776,
|
26 |
+
"test_runtime": 0.162,
|
27 |
+
"test_samples_per_second": 4277.987,
|
28 |
+
"test_steps_per_second": 12.346,
|
29 |
"train_loss": 1.7666796875,
|
30 |
"train_runtime": 35.9676,
|
31 |
"train_samples_per_second": 771.25,
|
eval_results.json
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
{
|
2 |
-
"epoch":
|
3 |
-
"eval_loss": 1.
|
4 |
-
"eval_macro_f1":
|
5 |
-
"eval_macro_precision":
|
6 |
-
"eval_macro_recall":
|
7 |
-
"eval_micro_f1":
|
8 |
-
"eval_micro_precision":
|
9 |
-
"eval_micro_recall":
|
10 |
-
"eval_runtime": 0.
|
11 |
-
"eval_samples_per_second":
|
12 |
-
"eval_steps_per_second":
|
13 |
}
|
|
|
1 |
{
|
2 |
+
"epoch": 4.181818181818182,
|
3 |
+
"eval_loss": 1.7236328125,
|
4 |
+
"eval_macro_f1": 6.7936507936507935,
|
5 |
+
"eval_macro_precision": 4.456476468138275,
|
6 |
+
"eval_macro_recall": 14.285714285714285,
|
7 |
+
"eval_micro_f1": 31.195335276967928,
|
8 |
+
"eval_micro_precision": 31.195335276967928,
|
9 |
+
"eval_micro_recall": 31.195335276967928,
|
10 |
+
"eval_runtime": 0.1986,
|
11 |
+
"eval_samples_per_second": 3454.261,
|
12 |
+
"eval_steps_per_second": 10.071
|
13 |
}
|
runs/May08_07-42-18_hn-fornix-testing-gpu-platform-2/events.out.tfevents.1746690205.hn-fornix-testing-gpu-platform-2.2694805.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c12db45ab2ca16dd8b9173475eaf000f0e30bd23e30f0bff8c6a2de718862e93
|
3 |
+
size 682
|
test_results.json
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
{
|
2 |
-
"test_loss": 1.
|
3 |
-
"test_macro_f1":
|
4 |
-
"test_macro_precision":
|
5 |
-
"test_macro_recall": 14.
|
6 |
-
"test_micro_f1":
|
7 |
-
"test_micro_precision":
|
8 |
-
"test_micro_recall":
|
9 |
-
"test_runtime": 0.
|
10 |
-
"test_samples_per_second":
|
11 |
-
"test_steps_per_second":
|
12 |
}
|
|
|
1 |
{
|
2 |
+
"test_loss": 1.767578125,
|
3 |
+
"test_macro_f1": 6.223798774588842,
|
4 |
+
"test_macro_precision": 3.978561121418265,
|
5 |
+
"test_macro_recall": 14.285714285714285,
|
6 |
+
"test_micro_f1": 27.84992784992785,
|
7 |
+
"test_micro_precision": 27.84992784992785,
|
8 |
+
"test_micro_recall": 27.84992784992785,
|
9 |
+
"test_runtime": 0.162,
|
10 |
+
"test_samples_per_second": 4277.987,
|
11 |
+
"test_steps_per_second": 12.346
|
12 |
}
|