Alfanatasya commited on
Commit
b1db12c
·
verified ·
1 Parent(s): b35b6eb

indobert-large-p2_preprocessing_tuning

Browse files
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: mit
4
+ base_model: indobenchmark/indobert-large-p2
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - accuracy
9
+ - precision
10
+ - recall
11
+ - f1
12
+ model-index:
13
+ - name: results_indobert-large-p2_preprocessing_tuning
14
+ results: []
15
+ ---
16
+
17
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
+ should probably proofread and complete it, then remove this comment. -->
19
+
20
+ # results_indobert-large-p2_preprocessing_tuning
21
+
22
+ This model is a fine-tuned version of [indobenchmark/indobert-large-p2](https://huggingface.co/indobenchmark/indobert-large-p2) on the None dataset.
23
+ It achieves the following results on the evaluation set:
24
+ - Loss: 1.4637
25
+ - Accuracy: 0.4545
26
+ - Precision: 0.5084
27
+ - Recall: 0.4353
28
+ - F1: 0.4145
29
+
30
+ ## Model description
31
+
32
+ More information needed
33
+
34
+ ## Intended uses & limitations
35
+
36
+ More information needed
37
+
38
+ ## Training and evaluation data
39
+
40
+ More information needed
41
+
42
+ ## Training procedure
43
+
44
+ ### Training hyperparameters
45
+
46
+ The following hyperparameters were used during training:
47
+ - learning_rate: 3.5225350314211635e-05
48
+ - train_batch_size: 32
49
+ - eval_batch_size: 32
50
+ - seed: 42
51
+ - optimizer: Use adafactor and the args are:
52
+ No additional optimizer arguments
53
+ - lr_scheduler_type: linear
54
+ - num_epochs: 20
55
+
56
+ ### Training results
57
+
58
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 |
59
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|
60
+ | 1.6552 | 1.0 | 111 | 1.5541 | 0.2545 | 0.1575 | 0.2126 | 0.1676 |
61
+ | 1.5877 | 2.0 | 222 | 1.5239 | 0.2977 | 0.4944 | 0.2590 | 0.2152 |
62
+ | 1.561 | 3.0 | 333 | 1.4980 | 0.3773 | 0.4931 | 0.3403 | 0.3261 |
63
+ | 1.5361 | 4.0 | 444 | 1.4807 | 0.4227 | 0.4950 | 0.3978 | 0.3865 |
64
+ | 1.5163 | 5.0 | 555 | 1.4637 | 0.4545 | 0.5084 | 0.4353 | 0.4145 |
65
+ | 1.496 | 6.0 | 666 | 1.4509 | 0.4295 | 0.5107 | 0.4127 | 0.3970 |
66
+ | 1.4931 | 7.0 | 777 | 1.4354 | 0.4273 | 0.4916 | 0.4026 | 0.3910 |
67
+
68
+
69
+ ### Framework versions
70
+
71
+ - Transformers 4.52.4
72
+ - Pytorch 2.6.0+cu124
73
+ - Datasets 3.6.0
74
+ - Tokenizers 0.21.2
config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 5,
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.3,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.3,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 4096,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3,
26
+ "LABEL_4": 4
27
+ },
28
+ "layer_norm_eps": 1e-12,
29
+ "max_position_embeddings": 512,
30
+ "model_type": "bert",
31
+ "num_attention_heads": 16,
32
+ "num_hidden_layers": 24,
33
+ "output_past": true,
34
+ "pad_token_id": 0,
35
+ "pooler_fc_size": 768,
36
+ "pooler_num_attention_heads": 12,
37
+ "pooler_num_fc_layers": 3,
38
+ "pooler_size_per_head": 128,
39
+ "pooler_type": "first_token_transform",
40
+ "position_embedding_type": "absolute",
41
+ "problem_type": "single_label_classification",
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.52.4",
44
+ "type_vocab_size": 2,
45
+ "use_cache": true,
46
+ "vocab_size": 30522
47
+ }
final_model/config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 5,
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.3,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.3,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 4096,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3,
26
+ "LABEL_4": 4
27
+ },
28
+ "layer_norm_eps": 1e-12,
29
+ "max_position_embeddings": 512,
30
+ "model_type": "bert",
31
+ "num_attention_heads": 16,
32
+ "num_hidden_layers": 24,
33
+ "output_past": true,
34
+ "pad_token_id": 0,
35
+ "pooler_fc_size": 768,
36
+ "pooler_num_attention_heads": 12,
37
+ "pooler_num_fc_layers": 3,
38
+ "pooler_size_per_head": 128,
39
+ "pooler_type": "first_token_transform",
40
+ "position_embedding_type": "absolute",
41
+ "problem_type": "single_label_classification",
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.52.4",
44
+ "type_vocab_size": 2,
45
+ "use_cache": true,
46
+ "vocab_size": 30522
47
+ }
final_model/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba173bde194fff86c760183d06fa99427ba4233698b8f0bcfe3bf00c7f05d678
3
+ size 1340635060
final_model/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
final_model/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
final_model/tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 1000000000000000019884624838656,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
final_model/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73ecc0710fe0575728761988d31e5efdffcb03d899093b12918b430b4bca7f18
3
+ size 5240
final_model/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba173bde194fff86c760183d06fa99427ba4233698b8f0bcfe3bf00c7f05d678
3
+ size 1340635060
run-0/checkpoint-555/config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 5,
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.3,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.3,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 4096,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3,
26
+ "LABEL_4": 4
27
+ },
28
+ "layer_norm_eps": 1e-12,
29
+ "max_position_embeddings": 512,
30
+ "model_type": "bert",
31
+ "num_attention_heads": 16,
32
+ "num_hidden_layers": 24,
33
+ "output_past": true,
34
+ "pad_token_id": 0,
35
+ "pooler_fc_size": 768,
36
+ "pooler_num_attention_heads": 12,
37
+ "pooler_num_fc_layers": 3,
38
+ "pooler_size_per_head": 128,
39
+ "pooler_type": "first_token_transform",
40
+ "position_embedding_type": "absolute",
41
+ "problem_type": "single_label_classification",
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.52.4",
44
+ "type_vocab_size": 2,
45
+ "use_cache": true,
46
+ "vocab_size": 30522
47
+ }
run-0/checkpoint-555/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b67d7099022362981a4d208a0350dab8b4a28c51f922d40964ff39788523e3f
3
+ size 1340635060
run-0/checkpoint-555/trainer_state.json ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 555,
3
+ "best_metric": 0.34545454545454546,
4
+ "best_model_checkpoint": "./results_indobert-large-p2_preprocessing_tuning/run-0/checkpoint-555",
5
+ "epoch": 5.0,
6
+ "eval_steps": 500,
7
+ "global_step": 555,
8
+ "is_hyper_param_search": true,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "grad_norm": 26.157533645629883,
15
+ "learning_rate": 1.906084574220028e-05,
16
+ "loss": 1.6713,
17
+ "step": 111
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_accuracy": 0.23863636363636365,
22
+ "eval_f1": 0.15186499337267767,
23
+ "eval_loss": 1.5738822221755981,
24
+ "eval_precision": 0.21780234279986876,
25
+ "eval_recall": 0.19921385803738745,
26
+ "eval_runtime": 5.6808,
27
+ "eval_samples_per_second": 77.454,
28
+ "eval_steps_per_second": 2.464,
29
+ "step": 111
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "grad_norm": 28.891916275024414,
34
+ "learning_rate": 1.805811878609401e-05,
35
+ "loss": 1.6117,
36
+ "step": 222
37
+ },
38
+ {
39
+ "epoch": 2.0,
40
+ "eval_accuracy": 0.2727272727272727,
41
+ "eval_f1": 0.17833531793284116,
42
+ "eval_loss": 1.5519375801086426,
43
+ "eval_precision": 0.16588746858953346,
44
+ "eval_recall": 0.23229946524064174,
45
+ "eval_runtime": 5.646,
46
+ "eval_samples_per_second": 77.931,
47
+ "eval_steps_per_second": 2.48,
48
+ "step": 222
49
+ },
50
+ {
51
+ "epoch": 3.0,
52
+ "grad_norm": 18.7272891998291,
53
+ "learning_rate": 1.705539182998774e-05,
54
+ "loss": 1.5958,
55
+ "step": 333
56
+ },
57
+ {
58
+ "epoch": 3.0,
59
+ "eval_accuracy": 0.2909090909090909,
60
+ "eval_f1": 0.19304355849445587,
61
+ "eval_loss": 1.5347687005996704,
62
+ "eval_precision": 0.3811392256485068,
63
+ "eval_recall": 0.24720105466428993,
64
+ "eval_runtime": 5.6444,
65
+ "eval_samples_per_second": 77.954,
66
+ "eval_steps_per_second": 2.48,
67
+ "step": 333
68
+ },
69
+ {
70
+ "epoch": 4.0,
71
+ "grad_norm": 27.44182014465332,
72
+ "learning_rate": 1.6052664873881467e-05,
73
+ "loss": 1.5792,
74
+ "step": 444
75
+ },
76
+ {
77
+ "epoch": 4.0,
78
+ "eval_accuracy": 0.3159090909090909,
79
+ "eval_f1": 0.22223201929048866,
80
+ "eval_loss": 1.521448016166687,
81
+ "eval_precision": 0.5018285105627551,
82
+ "eval_recall": 0.2726780817221994,
83
+ "eval_runtime": 5.6387,
84
+ "eval_samples_per_second": 78.033,
85
+ "eval_steps_per_second": 2.483,
86
+ "step": 444
87
+ },
88
+ {
89
+ "epoch": 5.0,
90
+ "grad_norm": 27.527873992919922,
91
+ "learning_rate": 1.5049937917775197e-05,
92
+ "loss": 1.5623,
93
+ "step": 555
94
+ },
95
+ {
96
+ "epoch": 5.0,
97
+ "eval_accuracy": 0.34545454545454546,
98
+ "eval_f1": 0.28947961232328573,
99
+ "eval_loss": 1.5095351934432983,
100
+ "eval_precision": 0.46474527630134865,
101
+ "eval_recall": 0.31175859271447504,
102
+ "eval_runtime": 5.6345,
103
+ "eval_samples_per_second": 78.09,
104
+ "eval_steps_per_second": 2.485,
105
+ "step": 555
106
+ }
107
+ ],
108
+ "logging_steps": 500,
109
+ "max_steps": 2220,
110
+ "num_input_tokens_seen": 0,
111
+ "num_train_epochs": 20,
112
+ "save_steps": 500,
113
+ "stateful_callbacks": {
114
+ "EarlyStoppingCallback": {
115
+ "args": {
116
+ "early_stopping_patience": 2,
117
+ "early_stopping_threshold": 0.0
118
+ },
119
+ "attributes": {
120
+ "early_stopping_patience_counter": 0
121
+ }
122
+ },
123
+ "TrainerControl": {
124
+ "args": {
125
+ "should_epoch_stop": false,
126
+ "should_evaluate": false,
127
+ "should_log": false,
128
+ "should_save": true,
129
+ "should_training_stop": false
130
+ },
131
+ "attributes": {}
132
+ }
133
+ },
134
+ "total_flos": 4165793621453100.0,
135
+ "train_batch_size": 32,
136
+ "trial_name": null,
137
+ "trial_params": {
138
+ "dropout": 0.3,
139
+ "learning_rate": 2.0054539122125413e-05,
140
+ "optim": "adamw_torch"
141
+ }
142
+ }
run-0/checkpoint-555/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2718cfa04c9d7664e92a9d3fe2c105da6e2410abefc860ef4614a2042f5296e
3
+ size 5304
run-1/checkpoint-1998/config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 5,
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 4096,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3,
26
+ "LABEL_4": 4
27
+ },
28
+ "layer_norm_eps": 1e-12,
29
+ "max_position_embeddings": 512,
30
+ "model_type": "bert",
31
+ "num_attention_heads": 16,
32
+ "num_hidden_layers": 24,
33
+ "output_past": true,
34
+ "pad_token_id": 0,
35
+ "pooler_fc_size": 768,
36
+ "pooler_num_attention_heads": 12,
37
+ "pooler_num_fc_layers": 3,
38
+ "pooler_size_per_head": 128,
39
+ "pooler_type": "first_token_transform",
40
+ "position_embedding_type": "absolute",
41
+ "problem_type": "single_label_classification",
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.52.4",
44
+ "type_vocab_size": 2,
45
+ "use_cache": true,
46
+ "vocab_size": 30522
47
+ }
run-1/checkpoint-1998/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07e15fc676b2b34404c59af505ebedb4042e66cec064ff3815003599148b501d
3
+ size 1340635060
run-1/checkpoint-1998/trainer_state.json ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 1998,
3
+ "best_metric": 0.5340909090909091,
4
+ "best_model_checkpoint": "./results_indobert-large-p2_preprocessing_tuning/run-1/checkpoint-1998",
5
+ "epoch": 18.0,
6
+ "eval_steps": 500,
7
+ "global_step": 1998,
8
+ "is_hyper_param_search": true,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "grad_norm": 28.307113647460938,
15
+ "learning_rate": 3.347995007341736e-05,
16
+ "loss": 1.6047,
17
+ "step": 111
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_accuracy": 0.2545454545454545,
22
+ "eval_f1": 0.17276828110161443,
23
+ "eval_loss": 1.5445789098739624,
24
+ "eval_precision": 0.15013455356645924,
25
+ "eval_recall": 0.2126797385620915,
26
+ "eval_runtime": 5.6508,
27
+ "eval_samples_per_second": 77.865,
28
+ "eval_steps_per_second": 2.478,
29
+ "step": 111
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "grad_norm": 26.515804290771484,
34
+ "learning_rate": 3.171868255770678e-05,
35
+ "loss": 1.5395,
36
+ "step": 222
37
+ },
38
+ {
39
+ "epoch": 2.0,
40
+ "eval_accuracy": 0.3386363636363636,
41
+ "eval_f1": 0.25941876303739597,
42
+ "eval_loss": 1.509023904800415,
43
+ "eval_precision": 0.4118648391560013,
44
+ "eval_recall": 0.2931060606060606,
45
+ "eval_runtime": 5.6362,
46
+ "eval_samples_per_second": 78.067,
47
+ "eval_steps_per_second": 2.484,
48
+ "step": 222
49
+ },
50
+ {
51
+ "epoch": 3.0,
52
+ "grad_norm": 17.496076583862305,
53
+ "learning_rate": 2.9957415041996202e-05,
54
+ "loss": 1.5068,
55
+ "step": 333
56
+ },
57
+ {
58
+ "epoch": 3.0,
59
+ "eval_accuracy": 0.39090909090909093,
60
+ "eval_f1": 0.31160041861619076,
61
+ "eval_loss": 1.4787184000015259,
62
+ "eval_precision": 0.46941073658516264,
63
+ "eval_recall": 0.3394613556378262,
64
+ "eval_runtime": 5.6351,
65
+ "eval_samples_per_second": 78.082,
66
+ "eval_steps_per_second": 2.484,
67
+ "step": 333
68
+ },
69
+ {
70
+ "epoch": 4.0,
71
+ "grad_norm": 19.426055908203125,
72
+ "learning_rate": 2.819614752628562e-05,
73
+ "loss": 1.4708,
74
+ "step": 444
75
+ },
76
+ {
77
+ "epoch": 4.0,
78
+ "eval_accuracy": 0.425,
79
+ "eval_f1": 0.35879524661207,
80
+ "eval_loss": 1.4531804323196411,
81
+ "eval_precision": 0.5943328601223339,
82
+ "eval_recall": 0.37929201517436806,
83
+ "eval_runtime": 5.6373,
84
+ "eval_samples_per_second": 78.052,
85
+ "eval_steps_per_second": 2.483,
86
+ "step": 444
87
+ },
88
+ {
89
+ "epoch": 5.0,
90
+ "grad_norm": 24.8659610748291,
91
+ "learning_rate": 2.643488001057504e-05,
92
+ "loss": 1.4514,
93
+ "step": 555
94
+ },
95
+ {
96
+ "epoch": 5.0,
97
+ "eval_accuracy": 0.44772727272727275,
98
+ "eval_f1": 0.3811685823691032,
99
+ "eval_loss": 1.4312589168548584,
100
+ "eval_precision": 0.40861740753373244,
101
+ "eval_recall": 0.401798871063577,
102
+ "eval_runtime": 5.634,
103
+ "eval_samples_per_second": 78.097,
104
+ "eval_steps_per_second": 2.485,
105
+ "step": 555
106
+ },
107
+ {
108
+ "epoch": 6.0,
109
+ "grad_norm": 20.785106658935547,
110
+ "learning_rate": 2.4673612494864454e-05,
111
+ "loss": 1.4318,
112
+ "step": 666
113
+ },
114
+ {
115
+ "epoch": 6.0,
116
+ "eval_accuracy": 0.4727272727272727,
117
+ "eval_f1": 0.42115625735603157,
118
+ "eval_loss": 1.4126225709915161,
119
+ "eval_precision": 0.6314769518811494,
120
+ "eval_recall": 0.4340944513003337,
121
+ "eval_runtime": 5.6323,
122
+ "eval_samples_per_second": 78.121,
123
+ "eval_steps_per_second": 2.486,
124
+ "step": 666
125
+ },
126
+ {
127
+ "epoch": 7.0,
128
+ "grad_norm": 21.870254516601562,
129
+ "learning_rate": 2.2912344979153876e-05,
130
+ "loss": 1.4118,
131
+ "step": 777
132
+ },
133
+ {
134
+ "epoch": 7.0,
135
+ "eval_accuracy": 0.4636363636363636,
136
+ "eval_f1": 0.4063186285023795,
137
+ "eval_loss": 1.395840048789978,
138
+ "eval_precision": 0.6317628654319302,
139
+ "eval_recall": 0.4194442730472142,
140
+ "eval_runtime": 5.646,
141
+ "eval_samples_per_second": 77.931,
142
+ "eval_steps_per_second": 2.48,
143
+ "step": 777
144
+ },
145
+ {
146
+ "epoch": 8.0,
147
+ "grad_norm": 25.146141052246094,
148
+ "learning_rate": 2.115107746344329e-05,
149
+ "loss": 1.3992,
150
+ "step": 888
151
+ },
152
+ {
153
+ "epoch": 8.0,
154
+ "eval_accuracy": 0.4818181818181818,
155
+ "eval_f1": 0.4368896891233544,
156
+ "eval_loss": 1.382132649421692,
157
+ "eval_precision": 0.6322711381096474,
158
+ "eval_recall": 0.445048876776818,
159
+ "eval_runtime": 5.6417,
160
+ "eval_samples_per_second": 77.991,
161
+ "eval_steps_per_second": 2.482,
162
+ "step": 888
163
+ },
164
+ {
165
+ "epoch": 9.0,
166
+ "grad_norm": 19.100143432617188,
167
+ "learning_rate": 1.9389809947732713e-05,
168
+ "loss": 1.3817,
169
+ "step": 999
170
+ },
171
+ {
172
+ "epoch": 9.0,
173
+ "eval_accuracy": 0.48863636363636365,
174
+ "eval_f1": 0.44047740781253586,
175
+ "eval_loss": 1.369884729385376,
176
+ "eval_precision": 0.6328607028095519,
177
+ "eval_recall": 0.4521078717034599,
178
+ "eval_runtime": 5.6338,
179
+ "eval_samples_per_second": 78.101,
180
+ "eval_steps_per_second": 2.485,
181
+ "step": 999
182
+ },
183
+ {
184
+ "epoch": 10.0,
185
+ "grad_norm": 27.9638671875,
186
+ "learning_rate": 1.762854243202213e-05,
187
+ "loss": 1.3738,
188
+ "step": 1110
189
+ },
190
+ {
191
+ "epoch": 10.0,
192
+ "eval_accuracy": 0.4863636363636364,
193
+ "eval_f1": 0.4383040341201726,
194
+ "eval_loss": 1.3581469058990479,
195
+ "eval_precision": 0.6375822435542926,
196
+ "eval_recall": 0.44977869760957995,
197
+ "eval_runtime": 5.6419,
198
+ "eval_samples_per_second": 77.987,
199
+ "eval_steps_per_second": 2.481,
200
+ "step": 1110
201
+ },
202
+ {
203
+ "epoch": 11.0,
204
+ "grad_norm": 21.18199920654297,
205
+ "learning_rate": 1.5867274916311547e-05,
206
+ "loss": 1.3676,
207
+ "step": 1221
208
+ },
209
+ {
210
+ "epoch": 11.0,
211
+ "eval_accuracy": 0.4954545454545455,
212
+ "eval_f1": 0.4464058678860118,
213
+ "eval_loss": 1.3483368158340454,
214
+ "eval_precision": 0.6465497076023392,
215
+ "eval_recall": 0.4576812525709585,
216
+ "eval_runtime": 5.6322,
217
+ "eval_samples_per_second": 78.122,
218
+ "eval_steps_per_second": 2.486,
219
+ "step": 1221
220
+ },
221
+ {
222
+ "epoch": 12.0,
223
+ "grad_norm": 20.461551666259766,
224
+ "learning_rate": 1.4106007400600966e-05,
225
+ "loss": 1.3456,
226
+ "step": 1332
227
+ },
228
+ {
229
+ "epoch": 12.0,
230
+ "eval_accuracy": 0.49772727272727274,
231
+ "eval_f1": 0.45309114799612626,
232
+ "eval_loss": 1.3403079509735107,
233
+ "eval_precision": 0.6216789934702979,
234
+ "eval_recall": 0.4615172654143242,
235
+ "eval_runtime": 5.6417,
236
+ "eval_samples_per_second": 77.99,
237
+ "eval_steps_per_second": 2.482,
238
+ "step": 1332
239
+ },
240
+ {
241
+ "epoch": 13.0,
242
+ "grad_norm": 22.989744186401367,
243
+ "learning_rate": 1.2344739884890382e-05,
244
+ "loss": 1.3461,
245
+ "step": 1443
246
+ },
247
+ {
248
+ "epoch": 13.0,
249
+ "eval_accuracy": 0.4954545454545455,
250
+ "eval_f1": 0.45083981073459956,
251
+ "eval_loss": 1.3333238363265991,
252
+ "eval_precision": 0.6414684410584339,
253
+ "eval_recall": 0.4573236608163079,
254
+ "eval_runtime": 5.6425,
255
+ "eval_samples_per_second": 77.98,
256
+ "eval_steps_per_second": 2.481,
257
+ "step": 1443
258
+ },
259
+ {
260
+ "epoch": 14.0,
261
+ "grad_norm": 25.141088485717773,
262
+ "learning_rate": 1.0583472369179801e-05,
263
+ "loss": 1.3328,
264
+ "step": 1554
265
+ },
266
+ {
267
+ "epoch": 14.0,
268
+ "eval_accuracy": 0.5045454545454545,
269
+ "eval_f1": 0.4703906523958635,
270
+ "eval_loss": 1.3271719217300415,
271
+ "eval_precision": 0.6402919474918446,
272
+ "eval_recall": 0.4697626148361443,
273
+ "eval_runtime": 5.6452,
274
+ "eval_samples_per_second": 77.943,
275
+ "eval_steps_per_second": 2.48,
276
+ "step": 1554
277
+ },
278
+ {
279
+ "epoch": 15.0,
280
+ "grad_norm": 25.32221031188965,
281
+ "learning_rate": 8.82220485346922e-06,
282
+ "loss": 1.3318,
283
+ "step": 1665
284
+ },
285
+ {
286
+ "epoch": 15.0,
287
+ "eval_accuracy": 0.5227272727272727,
288
+ "eval_f1": 0.4880063307593935,
289
+ "eval_loss": 1.3223538398742676,
290
+ "eval_precision": 0.63281290138433,
291
+ "eval_recall": 0.4929561451620275,
292
+ "eval_runtime": 5.6478,
293
+ "eval_samples_per_second": 77.906,
294
+ "eval_steps_per_second": 2.479,
295
+ "step": 1665
296
+ },
297
+ {
298
+ "epoch": 16.0,
299
+ "grad_norm": 22.660276412963867,
300
+ "learning_rate": 7.060937337758639e-06,
301
+ "loss": 1.3283,
302
+ "step": 1776
303
+ },
304
+ {
305
+ "epoch": 16.0,
306
+ "eval_accuracy": 0.5227272727272727,
307
+ "eval_f1": 0.4891241086551116,
308
+ "eval_loss": 1.3183715343475342,
309
+ "eval_precision": 0.6357144636301626,
310
+ "eval_recall": 0.49185134718222956,
311
+ "eval_runtime": 5.6328,
312
+ "eval_samples_per_second": 78.114,
313
+ "eval_steps_per_second": 2.485,
314
+ "step": 1776
315
+ },
316
+ {
317
+ "epoch": 17.0,
318
+ "grad_norm": 23.627805709838867,
319
+ "learning_rate": 5.299669822048057e-06,
320
+ "loss": 1.3244,
321
+ "step": 1887
322
+ },
323
+ {
324
+ "epoch": 17.0,
325
+ "eval_accuracy": 0.5318181818181819,
326
+ "eval_f1": 0.49959033811814146,
327
+ "eval_loss": 1.3156731128692627,
328
+ "eval_precision": 0.6414611620564796,
329
+ "eval_recall": 0.5044939496777732,
330
+ "eval_runtime": 5.6491,
331
+ "eval_samples_per_second": 77.888,
332
+ "eval_steps_per_second": 2.478,
333
+ "step": 1887
334
+ },
335
+ {
336
+ "epoch": 18.0,
337
+ "grad_norm": 21.08611488342285,
338
+ "learning_rate": 3.5384023063374753e-06,
339
+ "loss": 1.3202,
340
+ "step": 1998
341
+ },
342
+ {
343
+ "epoch": 18.0,
344
+ "eval_accuracy": 0.5340909090909091,
345
+ "eval_f1": 0.50148050227307,
346
+ "eval_loss": 1.3135546445846558,
347
+ "eval_precision": 0.6410177133655395,
348
+ "eval_recall": 0.5074763471822296,
349
+ "eval_runtime": 5.6436,
350
+ "eval_samples_per_second": 77.964,
351
+ "eval_steps_per_second": 2.481,
352
+ "step": 1998
353
+ }
354
+ ],
355
+ "logging_steps": 500,
356
+ "max_steps": 2220,
357
+ "num_input_tokens_seen": 0,
358
+ "num_train_epochs": 20,
359
+ "save_steps": 500,
360
+ "stateful_callbacks": {
361
+ "EarlyStoppingCallback": {
362
+ "args": {
363
+ "early_stopping_patience": 2,
364
+ "early_stopping_threshold": 0.0
365
+ },
366
+ "attributes": {
367
+ "early_stopping_patience_counter": 0
368
+ }
369
+ },
370
+ "TrainerControl": {
371
+ "args": {
372
+ "should_epoch_stop": false,
373
+ "should_evaluate": false,
374
+ "should_log": false,
375
+ "should_save": true,
376
+ "should_training_stop": false
377
+ },
378
+ "attributes": {}
379
+ }
380
+ },
381
+ "total_flos": 1.499685703723116e+16,
382
+ "train_batch_size": 32,
383
+ "trial_name": null,
384
+ "trial_params": {
385
+ "dropout": 0.1,
386
+ "learning_rate": 3.5225350314211635e-05,
387
+ "optim": "adafactor"
388
+ }
389
+ }
run-1/checkpoint-1998/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed8145004f2ca45b2da65c4909138f82df15e15928e4fb3105a0807198eb5bd8
3
+ size 5304
run-2/checkpoint-888/config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 5,
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 4096,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3,
26
+ "LABEL_4": 4
27
+ },
28
+ "layer_norm_eps": 1e-12,
29
+ "max_position_embeddings": 512,
30
+ "model_type": "bert",
31
+ "num_attention_heads": 16,
32
+ "num_hidden_layers": 24,
33
+ "output_past": true,
34
+ "pad_token_id": 0,
35
+ "pooler_fc_size": 768,
36
+ "pooler_num_attention_heads": 12,
37
+ "pooler_num_fc_layers": 3,
38
+ "pooler_size_per_head": 128,
39
+ "pooler_type": "first_token_transform",
40
+ "position_embedding_type": "absolute",
41
+ "problem_type": "single_label_classification",
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.52.4",
44
+ "type_vocab_size": 2,
45
+ "use_cache": true,
46
+ "vocab_size": 30522
47
+ }
run-2/checkpoint-888/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d719a532d71aa147139a87755a12cb0d04bdcc096420991de6dd1c085f60349
3
+ size 1340635060
run-2/checkpoint-888/trainer_state.json ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 888,
3
+ "best_metric": 0.5068181818181818,
4
+ "best_model_checkpoint": "./results_indobert-large-p2_preprocessing_tuning/run-2/checkpoint-888",
5
+ "epoch": 8.0,
6
+ "eval_steps": 500,
7
+ "global_step": 888,
8
+ "is_hyper_param_search": true,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "grad_norm": 27.982898712158203,
15
+ "learning_rate": 4.155146788213293e-05,
16
+ "loss": 1.5977,
17
+ "step": 111
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_accuracy": 0.2636363636363636,
22
+ "eval_f1": 0.1799745107509752,
23
+ "eval_loss": 1.5375301837921143,
24
+ "eval_precision": 0.25754679410593384,
25
+ "eval_recall": 0.2207596325243384,
26
+ "eval_runtime": 5.6549,
27
+ "eval_samples_per_second": 77.809,
28
+ "eval_steps_per_second": 2.476,
29
+ "step": 111
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "grad_norm": 26.399566650390625,
34
+ "learning_rate": 3.9365584974589446e-05,
35
+ "loss": 1.53,
36
+ "step": 222
37
+ },
38
+ {
39
+ "epoch": 2.0,
40
+ "eval_accuracy": 0.3568181818181818,
41
+ "eval_f1": 0.2808285329868572,
42
+ "eval_loss": 1.4986050128936768,
43
+ "eval_precision": 0.4284489841672241,
44
+ "eval_recall": 0.31234477124183,
45
+ "eval_runtime": 5.6438,
46
+ "eval_samples_per_second": 77.962,
47
+ "eval_steps_per_second": 2.481,
48
+ "step": 222
49
+ },
50
+ {
51
+ "epoch": 3.0,
52
+ "grad_norm": 17.31713104248047,
53
+ "learning_rate": 3.7179702067045956e-05,
54
+ "loss": 1.4936,
55
+ "step": 333
56
+ },
57
+ {
58
+ "epoch": 3.0,
59
+ "eval_accuracy": 0.3886363636363636,
60
+ "eval_f1": 0.32744459189481273,
61
+ "eval_loss": 1.4661439657211304,
62
+ "eval_precision": 0.45449121759466593,
63
+ "eval_recall": 0.3438792620777915,
64
+ "eval_runtime": 5.642,
65
+ "eval_samples_per_second": 77.987,
66
+ "eval_steps_per_second": 2.481,
67
+ "step": 333
68
+ },
69
+ {
70
+ "epoch": 4.0,
71
+ "grad_norm": 18.687728881835938,
72
+ "learning_rate": 3.499381915950247e-05,
73
+ "loss": 1.4543,
74
+ "step": 444
75
+ },
76
+ {
77
+ "epoch": 4.0,
78
+ "eval_accuracy": 0.4431818181818182,
79
+ "eval_f1": 0.38349095423479923,
80
+ "eval_loss": 1.4361019134521484,
81
+ "eval_precision": 0.6067617470249049,
82
+ "eval_recall": 0.39622406188582654,
83
+ "eval_runtime": 5.6469,
84
+ "eval_samples_per_second": 77.919,
85
+ "eval_steps_per_second": 2.479,
86
+ "step": 444
87
+ },
88
+ {
89
+ "epoch": 5.0,
90
+ "grad_norm": 24.65712547302246,
91
+ "learning_rate": 3.280793625195898e-05,
92
+ "loss": 1.432,
93
+ "step": 555
94
+ },
95
+ {
96
+ "epoch": 5.0,
97
+ "eval_accuracy": 0.4727272727272727,
98
+ "eval_f1": 0.4139282607939324,
99
+ "eval_loss": 1.411230206489563,
100
+ "eval_precision": 0.43078144078144087,
101
+ "eval_recall": 0.43161207664884127,
102
+ "eval_runtime": 5.6457,
103
+ "eval_samples_per_second": 77.935,
104
+ "eval_steps_per_second": 2.48,
105
+ "step": 555
106
+ },
107
+ {
108
+ "epoch": 6.0,
109
+ "grad_norm": 20.577791213989258,
110
+ "learning_rate": 3.06220533444155e-05,
111
+ "loss": 1.4101,
112
+ "step": 666
113
+ },
114
+ {
115
+ "epoch": 6.0,
116
+ "eval_accuracy": 0.48409090909090907,
117
+ "eval_f1": 0.4338799158412677,
118
+ "eval_loss": 1.390539288520813,
119
+ "eval_precision": 0.6315770552471583,
120
+ "eval_recall": 0.44571526920791626,
121
+ "eval_runtime": 5.639,
122
+ "eval_samples_per_second": 78.028,
123
+ "eval_steps_per_second": 2.483,
124
+ "step": 666
125
+ },
126
+ {
127
+ "epoch": 7.0,
128
+ "grad_norm": 21.280122756958008,
129
+ "learning_rate": 2.8436170436872013e-05,
130
+ "loss": 1.3888,
131
+ "step": 777
132
+ },
133
+ {
134
+ "epoch": 7.0,
135
+ "eval_accuracy": 0.4727272727272727,
136
+ "eval_f1": 0.424862819641019,
137
+ "eval_loss": 1.37294602394104,
138
+ "eval_precision": 0.6410874819887262,
139
+ "eval_recall": 0.4338800047991224,
140
+ "eval_runtime": 5.6393,
141
+ "eval_samples_per_second": 78.023,
142
+ "eval_steps_per_second": 2.483,
143
+ "step": 777
144
+ },
145
+ {
146
+ "epoch": 8.0,
147
+ "grad_norm": 24.84048843383789,
148
+ "learning_rate": 2.6250287529328527e-05,
149
+ "loss": 1.3743,
150
+ "step": 888
151
+ },
152
+ {
153
+ "epoch": 8.0,
154
+ "eval_accuracy": 0.5068181818181818,
155
+ "eval_f1": 0.4797829051866942,
156
+ "eval_loss": 1.3590517044067383,
157
+ "eval_precision": 0.6435987454921921,
158
+ "eval_recall": 0.47469299899446965,
159
+ "eval_runtime": 5.6444,
160
+ "eval_samples_per_second": 77.953,
161
+ "eval_steps_per_second": 2.48,
162
+ "step": 888
163
+ }
164
+ ],
165
+ "logging_steps": 500,
166
+ "max_steps": 2220,
167
+ "num_input_tokens_seen": 0,
168
+ "num_train_epochs": 20,
169
+ "save_steps": 500,
170
+ "stateful_callbacks": {
171
+ "EarlyStoppingCallback": {
172
+ "args": {
173
+ "early_stopping_patience": 2,
174
+ "early_stopping_threshold": 0.0
175
+ },
176
+ "attributes": {
177
+ "early_stopping_patience_counter": 0
178
+ }
179
+ },
180
+ "TrainerControl": {
181
+ "args": {
182
+ "should_epoch_stop": false,
183
+ "should_evaluate": false,
184
+ "should_log": false,
185
+ "should_save": true,
186
+ "should_training_stop": false
187
+ },
188
+ "attributes": {}
189
+ }
190
+ },
191
+ "total_flos": 6665269794324960.0,
192
+ "train_batch_size": 32,
193
+ "trial_name": null,
194
+ "trial_params": {
195
+ "dropout": 0.1,
196
+ "learning_rate": 4.3717658150869716e-05,
197
+ "optim": "adamw_torch"
198
+ }
199
+ }
run-2/checkpoint-888/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc729469000a177aefc94e34e3e6a799b7a60083f5d6c40f2adb23cc599545f9
3
+ size 5304
run-3/checkpoint-999/config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 5,
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 4096,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3,
26
+ "LABEL_4": 4
27
+ },
28
+ "layer_norm_eps": 1e-12,
29
+ "max_position_embeddings": 512,
30
+ "model_type": "bert",
31
+ "num_attention_heads": 16,
32
+ "num_hidden_layers": 24,
33
+ "output_past": true,
34
+ "pad_token_id": 0,
35
+ "pooler_fc_size": 768,
36
+ "pooler_num_attention_heads": 12,
37
+ "pooler_num_fc_layers": 3,
38
+ "pooler_size_per_head": 128,
39
+ "pooler_type": "first_token_transform",
40
+ "position_embedding_type": "absolute",
41
+ "problem_type": "single_label_classification",
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.52.4",
44
+ "type_vocab_size": 2,
45
+ "use_cache": true,
46
+ "vocab_size": 30522
47
+ }
run-3/checkpoint-999/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93607dc340a2d1d7ea3d3b721bc9018aba30b267ad48dab2f3f2ce46766c82ad
3
+ size 1340635060
run-3/checkpoint-999/trainer_state.json ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 999,
3
+ "best_metric": 0.4954545454545455,
4
+ "best_model_checkpoint": "./results_indobert-large-p2_preprocessing_tuning/run-3/checkpoint-999",
5
+ "epoch": 9.0,
6
+ "eval_steps": 500,
7
+ "global_step": 999,
8
+ "is_hyper_param_search": true,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "grad_norm": 28.07038688659668,
15
+ "learning_rate": 3.3016175944992974e-05,
16
+ "loss": 1.6052,
17
+ "step": 111
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_accuracy": 0.25681818181818183,
22
+ "eval_f1": 0.17675619592464678,
23
+ "eval_loss": 1.5476956367492676,
24
+ "eval_precision": 0.22089947089947087,
25
+ "eval_recall": 0.21530508706979296,
26
+ "eval_runtime": 5.6509,
27
+ "eval_samples_per_second": 77.863,
28
+ "eval_steps_per_second": 2.477,
29
+ "step": 111
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "grad_norm": 26.43117332458496,
34
+ "learning_rate": 3.127930602561183e-05,
35
+ "loss": 1.543,
36
+ "step": 222
37
+ },
38
+ {
39
+ "epoch": 2.0,
40
+ "eval_accuracy": 0.32727272727272727,
41
+ "eval_f1": 0.2417304318204673,
42
+ "eval_loss": 1.5140272378921509,
43
+ "eval_precision": 0.41058006919572254,
44
+ "eval_recall": 0.282364081996435,
45
+ "eval_runtime": 5.6439,
46
+ "eval_samples_per_second": 77.961,
47
+ "eval_steps_per_second": 2.481,
48
+ "step": 222
49
+ },
50
+ {
51
+ "epoch": 3.0,
52
+ "grad_norm": 17.558069229125977,
53
+ "learning_rate": 2.9542436106230683e-05,
54
+ "loss": 1.5127,
55
+ "step": 333
56
+ },
57
+ {
58
+ "epoch": 3.0,
59
+ "eval_accuracy": 0.35909090909090907,
60
+ "eval_f1": 0.2703941838976566,
61
+ "eval_loss": 1.4859756231307983,
62
+ "eval_precision": 0.4807188323597861,
63
+ "eval_recall": 0.30677121897710136,
64
+ "eval_runtime": 5.6443,
65
+ "eval_samples_per_second": 77.955,
66
+ "eval_steps_per_second": 2.48,
67
+ "step": 333
68
+ },
69
+ {
70
+ "epoch": 4.0,
71
+ "grad_norm": 19.551990509033203,
72
+ "learning_rate": 2.7805566186849536e-05,
73
+ "loss": 1.4782,
74
+ "step": 444
75
+ },
76
+ {
77
+ "epoch": 4.0,
78
+ "eval_accuracy": 0.41818181818181815,
79
+ "eval_f1": 0.3384192026632338,
80
+ "eval_loss": 1.4601621627807617,
81
+ "eval_precision": 0.49135958882110203,
82
+ "eval_recall": 0.36583056241144474,
83
+ "eval_runtime": 5.6426,
84
+ "eval_samples_per_second": 77.979,
85
+ "eval_steps_per_second": 2.481,
86
+ "step": 444
87
+ },
88
+ {
89
+ "epoch": 5.0,
90
+ "grad_norm": 24.336103439331055,
91
+ "learning_rate": 2.606869626746839e-05,
92
+ "loss": 1.4585,
93
+ "step": 555
94
+ },
95
+ {
96
+ "epoch": 5.0,
97
+ "eval_accuracy": 0.43863636363636366,
98
+ "eval_f1": 0.3715119915434134,
99
+ "eval_loss": 1.4385919570922852,
100
+ "eval_precision": 0.41001389914433395,
101
+ "eval_recall": 0.39180555555555563,
102
+ "eval_runtime": 5.6421,
103
+ "eval_samples_per_second": 77.985,
104
+ "eval_steps_per_second": 2.481,
105
+ "step": 555
106
+ },
107
+ {
108
+ "epoch": 6.0,
109
+ "grad_norm": 20.89763069152832,
110
+ "learning_rate": 2.433182634808724e-05,
111
+ "loss": 1.4404,
112
+ "step": 666
113
+ },
114
+ {
115
+ "epoch": 6.0,
116
+ "eval_accuracy": 0.45,
117
+ "eval_f1": 0.3827180550754695,
118
+ "eval_loss": 1.4205297231674194,
119
+ "eval_precision": 0.4161682321743297,
120
+ "eval_recall": 0.40269050802139034,
121
+ "eval_runtime": 5.6514,
122
+ "eval_samples_per_second": 77.857,
123
+ "eval_steps_per_second": 2.477,
124
+ "step": 666
125
+ },
126
+ {
127
+ "epoch": 7.0,
128
+ "grad_norm": 21.438804626464844,
129
+ "learning_rate": 2.2594956428706095e-05,
130
+ "loss": 1.4209,
131
+ "step": 777
132
+ },
133
+ {
134
+ "epoch": 7.0,
135
+ "eval_accuracy": 0.46136363636363636,
136
+ "eval_f1": 0.4025527449318057,
137
+ "eval_loss": 1.4050540924072266,
138
+ "eval_precision": 0.6351323893652621,
139
+ "eval_recall": 0.41623608825814706,
140
+ "eval_runtime": 5.6367,
141
+ "eval_samples_per_second": 78.06,
142
+ "eval_steps_per_second": 2.484,
143
+ "step": 777
144
+ },
145
+ {
146
+ "epoch": 8.0,
147
+ "grad_norm": 25.60862922668457,
148
+ "learning_rate": 2.0858086509324948e-05,
149
+ "loss": 1.4091,
150
+ "step": 888
151
+ },
152
+ {
153
+ "epoch": 8.0,
154
+ "eval_accuracy": 0.4818181818181818,
155
+ "eval_f1": 0.44363701601041755,
156
+ "eval_loss": 1.392228603363037,
157
+ "eval_precision": 0.6408069190319805,
158
+ "eval_recall": 0.44583264774441245,
159
+ "eval_runtime": 5.642,
160
+ "eval_samples_per_second": 77.986,
161
+ "eval_steps_per_second": 2.481,
162
+ "step": 888
163
+ },
164
+ {
165
+ "epoch": 9.0,
166
+ "grad_norm": 19.344066619873047,
167
+ "learning_rate": 1.91212165899438e-05,
168
+ "loss": 1.3911,
169
+ "step": 999
170
+ },
171
+ {
172
+ "epoch": 9.0,
173
+ "eval_accuracy": 0.4954545454545455,
174
+ "eval_f1": 0.44896411578531625,
175
+ "eval_loss": 1.3796334266662598,
176
+ "eval_precision": 0.6507260296673966,
177
+ "eval_recall": 0.4559108048813931,
178
+ "eval_runtime": 5.64,
179
+ "eval_samples_per_second": 78.015,
180
+ "eval_steps_per_second": 2.482,
181
+ "step": 999
182
+ }
183
+ ],
184
+ "logging_steps": 500,
185
+ "max_steps": 2220,
186
+ "num_input_tokens_seen": 0,
187
+ "num_train_epochs": 20,
188
+ "save_steps": 500,
189
+ "stateful_callbacks": {
190
+ "EarlyStoppingCallback": {
191
+ "args": {
192
+ "early_stopping_patience": 2,
193
+ "early_stopping_threshold": 0.0
194
+ },
195
+ "attributes": {
196
+ "early_stopping_patience_counter": 0
197
+ }
198
+ },
199
+ "TrainerControl": {
200
+ "args": {
201
+ "should_epoch_stop": false,
202
+ "should_evaluate": false,
203
+ "should_log": false,
204
+ "should_save": true,
205
+ "should_training_stop": false
206
+ },
207
+ "attributes": {}
208
+ }
209
+ },
210
+ "total_flos": 7498428518615580.0,
211
+ "train_batch_size": 32,
212
+ "trial_name": null,
213
+ "trial_params": {
214
+ "dropout": 0.1,
215
+ "learning_rate": 3.473739838762294e-05,
216
+ "optim": "adamw_torch"
217
+ }
218
+ }
run-3/checkpoint-999/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fab9278cb3dc0f63bca53110e0d1c7cb99427159cac07769457f10c95cc61aa2
3
+ size 5304
run-4/checkpoint-2220/config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 5,
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "LABEL_0",
14
+ "1": "LABEL_1",
15
+ "2": "LABEL_2",
16
+ "3": "LABEL_3",
17
+ "4": "LABEL_4"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 4096,
21
+ "label2id": {
22
+ "LABEL_0": 0,
23
+ "LABEL_1": 1,
24
+ "LABEL_2": 2,
25
+ "LABEL_3": 3,
26
+ "LABEL_4": 4
27
+ },
28
+ "layer_norm_eps": 1e-12,
29
+ "max_position_embeddings": 512,
30
+ "model_type": "bert",
31
+ "num_attention_heads": 16,
32
+ "num_hidden_layers": 24,
33
+ "output_past": true,
34
+ "pad_token_id": 0,
35
+ "pooler_fc_size": 768,
36
+ "pooler_num_attention_heads": 12,
37
+ "pooler_num_fc_layers": 3,
38
+ "pooler_size_per_head": 128,
39
+ "pooler_type": "first_token_transform",
40
+ "position_embedding_type": "absolute",
41
+ "problem_type": "single_label_classification",
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.52.4",
44
+ "type_vocab_size": 2,
45
+ "use_cache": true,
46
+ "vocab_size": 30522
47
+ }
run-4/checkpoint-2220/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd62eaa9fe39b422eb3bfc2f8945cd7b2fc3bcf161dbd67d9647c4061a6f17d8
3
+ size 1340635060
run-4/checkpoint-2220/trainer_state.json ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 2220,
3
+ "best_metric": 0.48409090909090907,
4
+ "best_model_checkpoint": "./results_indobert-large-p2_preprocessing_tuning/run-4/checkpoint-2220",
5
+ "epoch": 20.0,
6
+ "eval_steps": 500,
7
+ "global_step": 2220,
8
+ "is_hyper_param_search": true,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "grad_norm": 28.173372268676758,
15
+ "learning_rate": 2.061757049019103e-05,
16
+ "loss": 1.6195,
17
+ "step": 111
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_accuracy": 0.22727272727272727,
22
+ "eval_f1": 0.16707363603285513,
23
+ "eval_loss": 1.5637433528900146,
24
+ "eval_precision": 0.1896742471557858,
25
+ "eval_recall": 0.19347410759175462,
26
+ "eval_runtime": 5.6498,
27
+ "eval_samples_per_second": 77.879,
28
+ "eval_steps_per_second": 2.478,
29
+ "step": 111
30
+ },
31
+ {
32
+ "epoch": 2.0,
33
+ "grad_norm": 26.51243019104004,
34
+ "learning_rate": 1.9532949483361077e-05,
35
+ "loss": 1.5637,
36
+ "step": 222
37
+ },
38
+ {
39
+ "epoch": 2.0,
40
+ "eval_accuracy": 0.2840909090909091,
41
+ "eval_f1": 0.20335907891998062,
42
+ "eval_loss": 1.5392452478408813,
43
+ "eval_precision": 0.24167936991869915,
44
+ "eval_recall": 0.24468174390968506,
45
+ "eval_runtime": 5.6407,
46
+ "eval_samples_per_second": 78.005,
47
+ "eval_steps_per_second": 2.482,
48
+ "step": 222
49
+ },
50
+ {
51
+ "epoch": 3.0,
52
+ "grad_norm": 18.05230712890625,
53
+ "learning_rate": 1.844832847653112e-05,
54
+ "loss": 1.5436,
55
+ "step": 333
56
+ },
57
+ {
58
+ "epoch": 3.0,
59
+ "eval_accuracy": 0.3159090909090909,
60
+ "eval_f1": 0.222951366983383,
61
+ "eval_loss": 1.518822431564331,
62
+ "eval_precision": 0.4947492546611489,
63
+ "eval_recall": 0.2669387026372321,
64
+ "eval_runtime": 5.6405,
65
+ "eval_samples_per_second": 78.007,
66
+ "eval_steps_per_second": 2.482,
67
+ "step": 333
68
+ },
69
+ {
70
+ "epoch": 4.0,
71
+ "grad_norm": 21.303878784179688,
72
+ "learning_rate": 1.7363707469701166e-05,
73
+ "loss": 1.5177,
74
+ "step": 444
75
+ },
76
+ {
77
+ "epoch": 4.0,
78
+ "eval_accuracy": 0.35454545454545455,
79
+ "eval_f1": 0.2592289550384887,
80
+ "eval_loss": 1.5000191926956177,
81
+ "eval_precision": 0.5177054197359525,
82
+ "eval_recall": 0.30301713400978103,
83
+ "eval_runtime": 5.6399,
84
+ "eval_samples_per_second": 78.016,
85
+ "eval_steps_per_second": 2.482,
86
+ "step": 444
87
+ },
88
+ {
89
+ "epoch": 5.0,
90
+ "grad_norm": 23.725130081176758,
91
+ "learning_rate": 1.627908646287121e-05,
92
+ "loss": 1.5018,
93
+ "step": 555
94
+ },
95
+ {
96
+ "epoch": 5.0,
97
+ "eval_accuracy": 0.38636363636363635,
98
+ "eval_f1": 0.28658706487641944,
99
+ "eval_loss": 1.4842791557312012,
100
+ "eval_precision": 0.3875367377720319,
101
+ "eval_recall": 0.3301949643493761,
102
+ "eval_runtime": 5.6473,
103
+ "eval_samples_per_second": 77.913,
104
+ "eval_steps_per_second": 2.479,
105
+ "step": 555
106
+ },
107
+ {
108
+ "epoch": 6.0,
109
+ "grad_norm": 21.390344619750977,
110
+ "learning_rate": 1.5194465456041256e-05,
111
+ "loss": 1.4906,
112
+ "step": 666
113
+ },
114
+ {
115
+ "epoch": 6.0,
116
+ "eval_accuracy": 0.3840909090909091,
117
+ "eval_f1": 0.2939072926084069,
118
+ "eval_loss": 1.4709296226501465,
119
+ "eval_precision": 0.4050465432146414,
120
+ "eval_recall": 0.33076463161021985,
121
+ "eval_runtime": 5.6511,
122
+ "eval_samples_per_second": 77.861,
123
+ "eval_steps_per_second": 2.477,
124
+ "step": 666
125
+ },
126
+ {
127
+ "epoch": 7.0,
128
+ "grad_norm": 21.77083396911621,
129
+ "learning_rate": 1.4109844449211303e-05,
130
+ "loss": 1.4748,
131
+ "step": 777
132
+ },
133
+ {
134
+ "epoch": 7.0,
135
+ "eval_accuracy": 0.4,
136
+ "eval_f1": 0.31341312575745073,
137
+ "eval_loss": 1.4593591690063477,
138
+ "eval_precision": 0.4112193362193362,
139
+ "eval_recall": 0.3468304367201426,
140
+ "eval_runtime": 5.6465,
141
+ "eval_samples_per_second": 77.924,
142
+ "eval_steps_per_second": 2.479,
143
+ "step": 777
144
+ },
145
+ {
146
+ "epoch": 8.0,
147
+ "grad_norm": 26.71363639831543,
148
+ "learning_rate": 1.3025223442381347e-05,
149
+ "loss": 1.4683,
150
+ "step": 888
151
+ },
152
+ {
153
+ "epoch": 8.0,
154
+ "eval_accuracy": 0.42045454545454547,
155
+ "eval_f1": 0.359908589559646,
156
+ "eval_loss": 1.4492100477218628,
157
+ "eval_precision": 0.6176410063500948,
158
+ "eval_recall": 0.3752995166598108,
159
+ "eval_runtime": 5.643,
160
+ "eval_samples_per_second": 77.973,
161
+ "eval_steps_per_second": 2.481,
162
+ "step": 888
163
+ },
164
+ {
165
+ "epoch": 9.0,
166
+ "grad_norm": 20.639911651611328,
167
+ "learning_rate": 1.1940602435551393e-05,
168
+ "loss": 1.4531,
169
+ "step": 999
170
+ },
171
+ {
172
+ "epoch": 9.0,
173
+ "eval_accuracy": 0.4318181818181818,
174
+ "eval_f1": 0.36729459537697784,
175
+ "eval_loss": 1.4398030042648315,
176
+ "eval_precision": 0.6194453783927468,
177
+ "eval_recall": 0.3847588155308744,
178
+ "eval_runtime": 5.6431,
179
+ "eval_samples_per_second": 77.971,
180
+ "eval_steps_per_second": 2.481,
181
+ "step": 999
182
+ },
183
+ {
184
+ "epoch": 10.0,
185
+ "grad_norm": 28.17357635498047,
186
+ "learning_rate": 1.0855981428721437e-05,
187
+ "loss": 1.4491,
188
+ "step": 1110
189
+ },
190
+ {
191
+ "epoch": 10.0,
192
+ "eval_accuracy": 0.4318181818181818,
193
+ "eval_f1": 0.36386279688990475,
194
+ "eval_loss": 1.4310266971588135,
195
+ "eval_precision": 0.4143773997162906,
196
+ "eval_recall": 0.3842602495543672,
197
+ "eval_runtime": 5.6507,
198
+ "eval_samples_per_second": 77.866,
199
+ "eval_steps_per_second": 2.478,
200
+ "step": 1110
201
+ },
202
+ {
203
+ "epoch": 11.0,
204
+ "grad_norm": 21.685190200805664,
205
+ "learning_rate": 9.771360421891483e-06,
206
+ "loss": 1.4454,
207
+ "step": 1221
208
+ },
209
+ {
210
+ "epoch": 11.0,
211
+ "eval_accuracy": 0.44772727272727275,
212
+ "eval_f1": 0.38095142005589766,
213
+ "eval_loss": 1.4234737157821655,
214
+ "eval_precision": 0.4134604525908873,
215
+ "eval_recall": 0.40081290849673207,
216
+ "eval_runtime": 5.6455,
217
+ "eval_samples_per_second": 77.938,
218
+ "eval_steps_per_second": 2.48,
219
+ "step": 1221
220
+ },
221
+ {
222
+ "epoch": 12.0,
223
+ "grad_norm": 22.45073890686035,
224
+ "learning_rate": 8.686739415061529e-06,
225
+ "loss": 1.4259,
226
+ "step": 1332
227
+ },
228
+ {
229
+ "epoch": 12.0,
230
+ "eval_accuracy": 0.4431818181818182,
231
+ "eval_f1": 0.37841971280585274,
232
+ "eval_loss": 1.417261004447937,
233
+ "eval_precision": 0.6213600856072766,
234
+ "eval_recall": 0.3951208921797157,
235
+ "eval_runtime": 5.6548,
236
+ "eval_samples_per_second": 77.81,
237
+ "eval_steps_per_second": 2.476,
238
+ "step": 1332
239
+ },
240
+ {
241
+ "epoch": 13.0,
242
+ "grad_norm": 22.751588821411133,
243
+ "learning_rate": 7.602118408231573e-06,
244
+ "loss": 1.4269,
245
+ "step": 1443
246
+ },
247
+ {
248
+ "epoch": 13.0,
249
+ "eval_accuracy": 0.45227272727272727,
250
+ "eval_f1": 0.38372927204896606,
251
+ "eval_loss": 1.41156005859375,
252
+ "eval_precision": 0.6219436130594713,
253
+ "eval_recall": 0.4022510169568993,
254
+ "eval_runtime": 5.6568,
255
+ "eval_samples_per_second": 77.783,
256
+ "eval_steps_per_second": 2.475,
257
+ "step": 1443
258
+ },
259
+ {
260
+ "epoch": 14.0,
261
+ "grad_norm": 25.058902740478516,
262
+ "learning_rate": 6.517497401401619e-06,
263
+ "loss": 1.4149,
264
+ "step": 1554
265
+ },
266
+ {
267
+ "epoch": 14.0,
268
+ "eval_accuracy": 0.46136363636363636,
269
+ "eval_f1": 0.3965849769868146,
270
+ "eval_loss": 1.406818151473999,
271
+ "eval_precision": 0.62480495292599,
272
+ "eval_recall": 0.4135868012706248,
273
+ "eval_runtime": 5.6396,
274
+ "eval_samples_per_second": 78.019,
275
+ "eval_steps_per_second": 2.482,
276
+ "step": 1554
277
+ },
278
+ {
279
+ "epoch": 15.0,
280
+ "grad_norm": 25.90663719177246,
281
+ "learning_rate": 5.432876394571665e-06,
282
+ "loss": 1.4154,
283
+ "step": 1665
284
+ },
285
+ {
286
+ "epoch": 15.0,
287
+ "eval_accuracy": 0.4681818181818182,
288
+ "eval_f1": 0.40920946163765315,
289
+ "eval_loss": 1.4027258157730103,
290
+ "eval_precision": 0.6300146138362699,
291
+ "eval_recall": 0.4239477638374698,
292
+ "eval_runtime": 5.6499,
293
+ "eval_samples_per_second": 77.878,
294
+ "eval_steps_per_second": 2.478,
295
+ "step": 1665
296
+ },
297
+ {
298
+ "epoch": 16.0,
299
+ "grad_norm": 23.27178192138672,
300
+ "learning_rate": 4.34825538774171e-06,
301
+ "loss": 1.4127,
302
+ "step": 1776
303
+ },
304
+ {
305
+ "epoch": 16.0,
306
+ "eval_accuracy": 0.4681818181818182,
307
+ "eval_f1": 0.40934331177145705,
308
+ "eval_loss": 1.3994897603988647,
309
+ "eval_precision": 0.6302565920883121,
310
+ "eval_recall": 0.4239477638374698,
311
+ "eval_runtime": 5.6497,
312
+ "eval_samples_per_second": 77.88,
313
+ "eval_steps_per_second": 2.478,
314
+ "step": 1776
315
+ },
316
+ {
317
+ "epoch": 17.0,
318
+ "grad_norm": 24.377626419067383,
319
+ "learning_rate": 3.2636343809117553e-06,
320
+ "loss": 1.409,
321
+ "step": 1887
322
+ },
323
+ {
324
+ "epoch": 17.0,
325
+ "eval_accuracy": 0.47954545454545455,
326
+ "eval_f1": 0.4268450750732614,
327
+ "eval_loss": 1.3972902297973633,
328
+ "eval_precision": 0.6328140298234569,
329
+ "eval_recall": 0.438621909136615,
330
+ "eval_runtime": 5.6515,
331
+ "eval_samples_per_second": 77.856,
332
+ "eval_steps_per_second": 2.477,
333
+ "step": 1887
334
+ },
335
+ {
336
+ "epoch": 18.0,
337
+ "grad_norm": 21.303892135620117,
338
+ "learning_rate": 2.1790133740818008e-06,
339
+ "loss": 1.4057,
340
+ "step": 1998
341
+ },
342
+ {
343
+ "epoch": 18.0,
344
+ "eval_accuracy": 0.4818181818181818,
345
+ "eval_f1": 0.43042012354512893,
346
+ "eval_loss": 1.3956239223480225,
347
+ "eval_precision": 0.6351662548714184,
348
+ "eval_recall": 0.44174690913661496,
349
+ "eval_runtime": 5.6444,
350
+ "eval_samples_per_second": 77.953,
351
+ "eval_steps_per_second": 2.48,
352
+ "step": 1998
353
+ },
354
+ {
355
+ "epoch": 19.0,
356
+ "grad_norm": 28.218753814697266,
357
+ "learning_rate": 1.0943923672518462e-06,
358
+ "loss": 1.4045,
359
+ "step": 2109
360
+ },
361
+ {
362
+ "epoch": 19.0,
363
+ "eval_accuracy": 0.4818181818181818,
364
+ "eval_f1": 0.4310770770506327,
365
+ "eval_loss": 1.394635796546936,
366
+ "eval_precision": 0.6376135558831151,
367
+ "eval_recall": 0.4418063268430915,
368
+ "eval_runtime": 5.6469,
369
+ "eval_samples_per_second": 77.919,
370
+ "eval_steps_per_second": 2.479,
371
+ "step": 2109
372
+ },
373
+ {
374
+ "epoch": 20.0,
375
+ "grad_norm": 22.574962615966797,
376
+ "learning_rate": 9.771360421891483e-09,
377
+ "loss": 1.3992,
378
+ "step": 2220
379
+ },
380
+ {
381
+ "epoch": 20.0,
382
+ "eval_accuracy": 0.48409090909090907,
383
+ "eval_f1": 0.43274338861391276,
384
+ "eval_loss": 1.3942500352859497,
385
+ "eval_precision": 0.6394552780212288,
386
+ "eval_recall": 0.44362450866127334,
387
+ "eval_runtime": 5.6595,
388
+ "eval_samples_per_second": 77.745,
389
+ "eval_steps_per_second": 2.474,
390
+ "step": 2220
391
+ }
392
+ ],
393
+ "logging_steps": 500,
394
+ "max_steps": 2220,
395
+ "num_input_tokens_seen": 0,
396
+ "num_train_epochs": 20,
397
+ "save_steps": 500,
398
+ "stateful_callbacks": {
399
+ "EarlyStoppingCallback": {
400
+ "args": {
401
+ "early_stopping_patience": 2,
402
+ "early_stopping_threshold": 0.0
403
+ },
404
+ "attributes": {
405
+ "early_stopping_patience_counter": 0
406
+ }
407
+ },
408
+ "TrainerControl": {
409
+ "args": {
410
+ "should_epoch_stop": false,
411
+ "should_evaluate": false,
412
+ "should_log": false,
413
+ "should_save": true,
414
+ "should_training_stop": true
415
+ },
416
+ "attributes": {}
417
+ }
418
+ },
419
+ "total_flos": 1.66631744858124e+16,
420
+ "train_batch_size": 32,
421
+ "trial_name": null,
422
+ "trial_params": {
423
+ "dropout": 0.1,
424
+ "learning_rate": 2.1692420136599093e-05,
425
+ "optim": "adamw_torch"
426
+ }
427
+ }
run-4/checkpoint-2220/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2eaecb8cb5c84ecec84ac208ebeb83d488f70ed7d854a82f441c39d1effd6a5
3
+ size 5304
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73ecc0710fe0575728761988d31e5efdffcb03d899093b12918b430b4bca7f18
3
+ size 5240