datht commited on
Commit
632364e
·
verified ·
1 Parent(s): d59f758

End of training

Browse files
README.md CHANGED
@@ -5,18 +5,18 @@ base_model: vinai/phobert-base
5
  tags:
6
  - generated_from_trainer
7
  model-index:
8
- - name: roberta-base-multilingual-sentiment
9
  results: []
10
  ---
11
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
  should probably proofread and complete it, then remove this comment. -->
14
 
15
- # roberta-base-multilingual-sentiment
16
 
17
  This model is a fine-tuned version of [vinai/phobert-base](https://huggingface.co/vinai/phobert-base) on the None dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 1.7158
20
  - Micro F1: 31.1953
21
  - Micro Precision: 31.1953
22
  - Micro Recall: 31.1953
 
5
  tags:
6
  - generated_from_trainer
7
  model-index:
8
+ - name: clapAI/roberta-base-multilingual-sentiment
9
  results: []
10
  ---
11
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
  should probably proofread and complete it, then remove this comment. -->
14
 
15
+ # clapAI/roberta-base-multilingual-sentiment
16
 
17
  This model is a fine-tuned version of [vinai/phobert-base](https://huggingface.co/vinai/phobert-base) on the None dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 1.7236
20
  - Micro F1: 31.1953
21
  - Micro Precision: 31.1953
22
  - Micro Recall: 31.1953
all_results.json CHANGED
@@ -1,31 +1,31 @@
1
  {
2
  "epoch": 4.181818181818182,
3
  "eval_f1": 0.09432731429418081,
4
- "eval_loss": 1.943359375,
5
- "eval_macro_f1": 9.432731429418082,
6
- "eval_macro_precision": 17.25200652415304,
7
- "eval_macro_recall": 13.460980331549635,
8
- "eval_micro_f1": 18.221574344023324,
9
- "eval_micro_precision": 18.221574344023324,
10
- "eval_micro_recall": 18.221574344023324,
11
  "eval_precision": 0.1725200652415304,
12
  "eval_recall": 0.13460980331549635,
13
- "eval_runtime": 0.3109,
14
- "eval_samples_per_second": 2206.376,
15
- "eval_steps_per_second": 6.433,
16
  "test_f1": 0.10009022401776128,
17
- "test_loss": 1.943359375,
18
- "test_macro_f1": 10.009022401776129,
19
- "test_macro_precision": 14.356813693219223,
20
- "test_macro_recall": 14.150273929500775,
21
- "test_micro_f1": 17.604617604617605,
22
- "test_micro_precision": 17.604617604617605,
23
- "test_micro_recall": 17.604617604617605,
24
  "test_precision": 0.14356813693219223,
25
  "test_recall": 0.14150273929500776,
26
- "test_runtime": 0.3027,
27
- "test_samples_per_second": 2289.139,
28
- "test_steps_per_second": 6.606,
29
  "train_loss": 1.7666796875,
30
  "train_runtime": 35.9676,
31
  "train_samples_per_second": 771.25,
 
1
  {
2
  "epoch": 4.181818181818182,
3
  "eval_f1": 0.09432731429418081,
4
+ "eval_loss": 1.7236328125,
5
+ "eval_macro_f1": 6.7936507936507935,
6
+ "eval_macro_precision": 4.456476468138275,
7
+ "eval_macro_recall": 14.285714285714285,
8
+ "eval_micro_f1": 31.195335276967928,
9
+ "eval_micro_precision": 31.195335276967928,
10
+ "eval_micro_recall": 31.195335276967928,
11
  "eval_precision": 0.1725200652415304,
12
  "eval_recall": 0.13460980331549635,
13
+ "eval_runtime": 0.1986,
14
+ "eval_samples_per_second": 3454.261,
15
+ "eval_steps_per_second": 10.071,
16
  "test_f1": 0.10009022401776128,
17
+ "test_loss": 1.767578125,
18
+ "test_macro_f1": 6.223798774588842,
19
+ "test_macro_precision": 3.978561121418265,
20
+ "test_macro_recall": 14.285714285714285,
21
+ "test_micro_f1": 27.84992784992785,
22
+ "test_micro_precision": 27.84992784992785,
23
+ "test_micro_recall": 27.84992784992785,
24
  "test_precision": 0.14356813693219223,
25
  "test_recall": 0.14150273929500776,
26
+ "test_runtime": 0.162,
27
+ "test_samples_per_second": 4277.987,
28
+ "test_steps_per_second": 12.346,
29
  "train_loss": 1.7666796875,
30
  "train_runtime": 35.9676,
31
  "train_samples_per_second": 771.25,
eval_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
- "epoch": 0.9090909090909091,
3
- "eval_loss": 1.943359375,
4
- "eval_macro_f1": 9.432731429418082,
5
- "eval_macro_precision": 17.25200652415304,
6
- "eval_macro_recall": 13.460980331549635,
7
- "eval_micro_f1": 18.221574344023324,
8
- "eval_micro_precision": 18.221574344023324,
9
- "eval_micro_recall": 18.221574344023324,
10
- "eval_runtime": 0.3109,
11
- "eval_samples_per_second": 2206.376,
12
- "eval_steps_per_second": 6.433
13
  }
 
1
  {
2
+ "epoch": 4.181818181818182,
3
+ "eval_loss": 1.7236328125,
4
+ "eval_macro_f1": 6.7936507936507935,
5
+ "eval_macro_precision": 4.456476468138275,
6
+ "eval_macro_recall": 14.285714285714285,
7
+ "eval_micro_f1": 31.195335276967928,
8
+ "eval_micro_precision": 31.195335276967928,
9
+ "eval_micro_recall": 31.195335276967928,
10
+ "eval_runtime": 0.1986,
11
+ "eval_samples_per_second": 3454.261,
12
+ "eval_steps_per_second": 10.071
13
  }
runs/May08_07-42-18_hn-fornix-testing-gpu-platform-2/events.out.tfevents.1746690205.hn-fornix-testing-gpu-platform-2.2694805.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c12db45ab2ca16dd8b9173475eaf000f0e30bd23e30f0bff8c6a2de718862e93
3
+ size 682
test_results.json CHANGED
@@ -1,12 +1,12 @@
1
  {
2
- "test_loss": 1.943359375,
3
- "test_macro_f1": 10.009022401776129,
4
- "test_macro_precision": 14.356813693219223,
5
- "test_macro_recall": 14.150273929500775,
6
- "test_micro_f1": 17.604617604617605,
7
- "test_micro_precision": 17.604617604617605,
8
- "test_micro_recall": 17.604617604617605,
9
- "test_runtime": 0.3027,
10
- "test_samples_per_second": 2289.139,
11
- "test_steps_per_second": 6.606
12
  }
 
1
  {
2
+ "test_loss": 1.767578125,
3
+ "test_macro_f1": 6.223798774588842,
4
+ "test_macro_precision": 3.978561121418265,
5
+ "test_macro_recall": 14.285714285714285,
6
+ "test_micro_f1": 27.84992784992785,
7
+ "test_micro_precision": 27.84992784992785,
8
+ "test_micro_recall": 27.84992784992785,
9
+ "test_runtime": 0.162,
10
+ "test_samples_per_second": 4277.987,
11
+ "test_steps_per_second": 12.346
12
  }