datht commited on
Commit
7c1f780
·
verified ·
1 Parent(s): e1dd9ad

End of training

Browse files
README.md CHANGED
@@ -5,14 +5,14 @@ base_model: vinai/phobert-base
5
  tags:
6
  - generated_from_trainer
7
  model-index:
8
- - name: roberta-base-multilingual-sentiment
9
  results: []
10
  ---
11
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
  should probably proofread and complete it, then remove this comment. -->
14
 
15
- # roberta-base-multilingual-sentiment
16
 
17
  This model is a fine-tuned version of [vinai/phobert-base](https://huggingface.co/vinai/phobert-base) on the None dataset.
18
  It achieves the following results on the evaluation set:
 
5
  tags:
6
  - generated_from_trainer
7
  model-index:
8
+ - name: clapAI/roberta-base-multilingual-sentiment
9
  results: []
10
  ---
11
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
  should probably proofread and complete it, then remove this comment. -->
14
 
15
+ # clapAI/roberta-base-multilingual-sentiment
16
 
17
  This model is a fine-tuned version of [vinai/phobert-base](https://huggingface.co/vinai/phobert-base) on the None dataset.
18
  It achieves the following results on the evaluation set:
all_results.json CHANGED
@@ -1,31 +1,31 @@
1
  {
2
  "epoch": 0.9090909090909091,
3
  "eval_f1": 0.09432731429418081,
4
- "eval_loss": 1.7236328125,
5
- "eval_macro_f1": 0.06793650793650793,
6
- "eval_macro_precision": 0.044564764681382756,
7
- "eval_macro_recall": 0.14285714285714285,
8
- "eval_micro_f1": 0.3119533527696793,
9
- "eval_micro_precision": 0.3119533527696793,
10
- "eval_micro_recall": 0.3119533527696793,
11
  "eval_precision": 0.1725200652415304,
12
  "eval_recall": 0.13460980331549635,
13
- "eval_runtime": 0.2804,
14
- "eval_samples_per_second": 2446.863,
15
- "eval_steps_per_second": 7.134,
16
  "test_f1": 0.10009022401776128,
17
- "test_loss": 1.767578125,
18
- "test_macro_f1": 0.06223798774588842,
19
- "test_macro_precision": 0.03978561121418265,
20
- "test_macro_recall": 0.14285714285714285,
21
- "test_micro_f1": 0.2784992784992785,
22
- "test_micro_precision": 0.2784992784992785,
23
- "test_micro_recall": 0.2784992784992785,
24
  "test_precision": 0.14356813693219223,
25
  "test_recall": 0.14150273929500776,
26
- "test_runtime": 0.2854,
27
- "test_samples_per_second": 2428.484,
28
- "test_steps_per_second": 7.009,
29
  "train_loss": 1.94560546875,
30
  "train_runtime": 12.3942,
31
  "train_samples_per_second": 447.627,
 
1
  {
2
  "epoch": 0.9090909090909091,
3
  "eval_f1": 0.09432731429418081,
4
+ "eval_loss": 1.943359375,
5
+ "eval_macro_f1": 9.432731429418082,
6
+ "eval_macro_precision": 17.25200652415304,
7
+ "eval_macro_recall": 13.460980331549635,
8
+ "eval_micro_f1": 18.221574344023324,
9
+ "eval_micro_precision": 18.221574344023324,
10
+ "eval_micro_recall": 18.221574344023324,
11
  "eval_precision": 0.1725200652415304,
12
  "eval_recall": 0.13460980331549635,
13
+ "eval_runtime": 0.3109,
14
+ "eval_samples_per_second": 2206.376,
15
+ "eval_steps_per_second": 6.433,
16
  "test_f1": 0.10009022401776128,
17
+ "test_loss": 1.943359375,
18
+ "test_macro_f1": 10.009022401776129,
19
+ "test_macro_precision": 14.356813693219223,
20
+ "test_macro_recall": 14.150273929500775,
21
+ "test_micro_f1": 17.604617604617605,
22
+ "test_micro_precision": 17.604617604617605,
23
+ "test_micro_recall": 17.604617604617605,
24
  "test_precision": 0.14356813693219223,
25
  "test_recall": 0.14150273929500776,
26
+ "test_runtime": 0.3027,
27
+ "test_samples_per_second": 2289.139,
28
+ "test_steps_per_second": 6.606,
29
  "train_loss": 1.94560546875,
30
  "train_runtime": 12.3942,
31
  "train_samples_per_second": 447.627,
eval_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
- "epoch": 4.181818181818182,
3
- "eval_loss": 1.7236328125,
4
- "eval_macro_f1": 0.06793650793650793,
5
- "eval_macro_precision": 0.044564764681382756,
6
- "eval_macro_recall": 0.14285714285714285,
7
- "eval_micro_f1": 0.3119533527696793,
8
- "eval_micro_precision": 0.3119533527696793,
9
- "eval_micro_recall": 0.3119533527696793,
10
- "eval_runtime": 0.2804,
11
- "eval_samples_per_second": 2446.863,
12
- "eval_steps_per_second": 7.134
13
  }
 
1
  {
2
+ "epoch": 0.9090909090909091,
3
+ "eval_loss": 1.943359375,
4
+ "eval_macro_f1": 9.432731429418082,
5
+ "eval_macro_precision": 17.25200652415304,
6
+ "eval_macro_recall": 13.460980331549635,
7
+ "eval_micro_f1": 18.221574344023324,
8
+ "eval_micro_precision": 18.221574344023324,
9
+ "eval_micro_recall": 18.221574344023324,
10
+ "eval_runtime": 0.3109,
11
+ "eval_samples_per_second": 2206.376,
12
+ "eval_steps_per_second": 6.433
13
  }
runs/May08_07-01-21_hn-fornix-testing-gpu-platform-2/events.out.tfevents.1746687731.hn-fornix-testing-gpu-platform-2.2627644.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4289931fcf04069793460d574408205af199ba0bf0dedd58483fc5b3205618c4
3
+ size 682
test_results.json CHANGED
@@ -1,12 +1,12 @@
1
  {
2
- "test_loss": 1.767578125,
3
- "test_macro_f1": 0.06223798774588842,
4
- "test_macro_precision": 0.03978561121418265,
5
- "test_macro_recall": 0.14285714285714285,
6
- "test_micro_f1": 0.2784992784992785,
7
- "test_micro_precision": 0.2784992784992785,
8
- "test_micro_recall": 0.2784992784992785,
9
- "test_runtime": 0.2854,
10
- "test_samples_per_second": 2428.484,
11
- "test_steps_per_second": 7.009
12
  }
 
1
  {
2
+ "test_loss": 1.943359375,
3
+ "test_macro_f1": 10.009022401776129,
4
+ "test_macro_precision": 14.356813693219223,
5
+ "test_macro_recall": 14.150273929500775,
6
+ "test_micro_f1": 17.604617604617605,
7
+ "test_micro_precision": 17.604617604617605,
8
+ "test_micro_recall": 17.604617604617605,
9
+ "test_runtime": 0.3027,
10
+ "test_samples_per_second": 2289.139,
11
+ "test_steps_per_second": 6.606
12
  }