datht commited on
Commit
fa1a403
·
verified ·
1 Parent(s): 16dc050

Model save

Browse files
README.md CHANGED
@@ -5,18 +5,18 @@ base_model: vinai/phobert-base
5
  tags:
6
  - generated_from_trainer
7
  model-index:
8
- - name: clapAI/roberta-base-multilingual-sentiment
9
  results: []
10
  ---
11
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
  should probably proofread and complete it, then remove this comment. -->
14
 
15
- # clapAI/roberta-base-multilingual-sentiment
16
 
17
  This model is a fine-tuned version of [vinai/phobert-base](https://huggingface.co/vinai/phobert-base) on the None dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 1.7236
20
  - Micro F1: 0.3120
21
  - Micro Precision: 0.3120
22
  - Micro Recall: 0.3120
 
5
  tags:
6
  - generated_from_trainer
7
  model-index:
8
+ - name: roberta-base-multilingual-sentiment
9
  results: []
10
  ---
11
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
  should probably proofread and complete it, then remove this comment. -->
14
 
15
+ # roberta-base-multilingual-sentiment
16
 
17
  This model is a fine-tuned version of [vinai/phobert-base](https://huggingface.co/vinai/phobert-base) on the None dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 1.7158
20
  - Micro F1: 0.3120
21
  - Micro Precision: 0.3120
22
  - Micro Recall: 0.3120
all_results.json CHANGED
@@ -27,7 +27,7 @@
27
  "test_samples_per_second": 2086.061,
28
  "test_steps_per_second": 6.02,
29
  "train_loss": 1.7666796875,
30
- "train_runtime": 67.5502,
31
- "train_samples_per_second": 410.658,
32
- "train_steps_per_second": 0.37
33
  }
 
27
  "test_samples_per_second": 2086.061,
28
  "test_steps_per_second": 6.02,
29
  "train_loss": 1.7666796875,
30
+ "train_runtime": 62.6765,
31
+ "train_samples_per_second": 442.59,
32
+ "train_steps_per_second": 0.399
33
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:052d6ab91588ca314453125af780573c696f2974cfd01c51feb06618aa681329
3
  size 270031446
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fbc9ff253665d600c1d25222ca96bc4c818f7dc1a2e4c071fd0a7e36e13cb23
3
  size 270031446
runs/May08_06-52-02_hn-fornix-testing-gpu-platform-2/events.out.tfevents.1746687148.hn-fornix-testing-gpu-platform-2.2610915.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f408584c21e0b2ee92052d7f88ed8832f8d0a78ea19e97da33f56cba82f6ac8
3
- size 9638
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a91e8fd4580f0ae4ed71443f5efa9e73c7df271ae5582beb2fad20c2bacc9c15
3
+ size 9986
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 4.181818181818182,
3
  "train_loss": 1.7666796875,
4
- "train_runtime": 67.5502,
5
- "train_samples_per_second": 410.658,
6
- "train_steps_per_second": 0.37
7
  }
 
1
  {
2
  "epoch": 4.181818181818182,
3
  "train_loss": 1.7666796875,
4
+ "train_runtime": 62.6765,
5
+ "train_samples_per_second": 442.59,
6
+ "train_steps_per_second": 0.399
7
  }
trainer_state.json CHANGED
@@ -25,9 +25,9 @@
25
  "eval_micro_f1": 0.18221574344023322,
26
  "eval_micro_precision": 0.18221574344023322,
27
  "eval_micro_recall": 0.18221574344023322,
28
- "eval_runtime": 0.4448,
29
- "eval_samples_per_second": 1542.106,
30
- "eval_steps_per_second": 4.496,
31
  "step": 6
32
  },
33
  {
@@ -46,9 +46,9 @@
46
  "eval_micro_f1": 0.18221574344023322,
47
  "eval_micro_precision": 0.18221574344023322,
48
  "eval_micro_recall": 0.18221574344023322,
49
- "eval_runtime": 0.2951,
50
- "eval_samples_per_second": 2324.304,
51
- "eval_steps_per_second": 6.776,
52
  "step": 12
53
  },
54
  {
@@ -67,9 +67,9 @@
67
  "eval_micro_f1": 0.18221574344023322,
68
  "eval_micro_precision": 0.18221574344023322,
69
  "eval_micro_recall": 0.18221574344023322,
70
- "eval_runtime": 0.4046,
71
- "eval_samples_per_second": 1695.319,
72
- "eval_steps_per_second": 4.943,
73
  "step": 18
74
  },
75
  {
@@ -88,9 +88,9 @@
88
  "eval_micro_f1": 0.3119533527696793,
89
  "eval_micro_precision": 0.3119533527696793,
90
  "eval_micro_recall": 0.3119533527696793,
91
- "eval_runtime": 0.5511,
92
- "eval_samples_per_second": 1244.883,
93
- "eval_steps_per_second": 3.629,
94
  "step": 24
95
  },
96
  {
@@ -109,9 +109,9 @@
109
  "eval_micro_f1": 0.3119533527696793,
110
  "eval_micro_precision": 0.3119533527696793,
111
  "eval_micro_recall": 0.3119533527696793,
112
- "eval_runtime": 0.2492,
113
- "eval_samples_per_second": 2752.26,
114
- "eval_steps_per_second": 8.024,
115
  "step": 25
116
  },
117
  {
@@ -119,9 +119,9 @@
119
  "step": 25,
120
  "total_flos": 1301189864980480.0,
121
  "train_loss": 1.7666796875,
122
- "train_runtime": 67.5502,
123
- "train_samples_per_second": 410.658,
124
- "train_steps_per_second": 0.37
125
  }
126
  ],
127
  "logging_steps": 5,
 
25
  "eval_micro_f1": 0.18221574344023322,
26
  "eval_micro_precision": 0.18221574344023322,
27
  "eval_micro_recall": 0.18221574344023322,
28
+ "eval_runtime": 0.291,
29
+ "eval_samples_per_second": 2357.55,
30
+ "eval_steps_per_second": 6.873,
31
  "step": 6
32
  },
33
  {
 
46
  "eval_micro_f1": 0.18221574344023322,
47
  "eval_micro_precision": 0.18221574344023322,
48
  "eval_micro_recall": 0.18221574344023322,
49
+ "eval_runtime": 0.2458,
50
+ "eval_samples_per_second": 2791.201,
51
+ "eval_steps_per_second": 8.138,
52
  "step": 12
53
  },
54
  {
 
67
  "eval_micro_f1": 0.18221574344023322,
68
  "eval_micro_precision": 0.18221574344023322,
69
  "eval_micro_recall": 0.18221574344023322,
70
+ "eval_runtime": 0.3665,
71
+ "eval_samples_per_second": 1871.841,
72
+ "eval_steps_per_second": 5.457,
73
  "step": 18
74
  },
75
  {
 
88
  "eval_micro_f1": 0.3119533527696793,
89
  "eval_micro_precision": 0.3119533527696793,
90
  "eval_micro_recall": 0.3119533527696793,
91
+ "eval_runtime": 0.3802,
92
+ "eval_samples_per_second": 1804.236,
93
+ "eval_steps_per_second": 5.26,
94
  "step": 24
95
  },
96
  {
 
109
  "eval_micro_f1": 0.3119533527696793,
110
  "eval_micro_precision": 0.3119533527696793,
111
  "eval_micro_recall": 0.3119533527696793,
112
+ "eval_runtime": 0.3551,
113
+ "eval_samples_per_second": 1932.117,
114
+ "eval_steps_per_second": 5.633,
115
  "step": 25
116
  },
117
  {
 
119
  "step": 25,
120
  "total_flos": 1301189864980480.0,
121
  "train_loss": 1.7666796875,
122
+ "train_runtime": 62.6765,
123
+ "train_samples_per_second": 442.59,
124
+ "train_steps_per_second": 0.399
125
  }
126
  ],
127
  "logging_steps": 5,