Training in progress, epoch 1
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- model.safetensors +1 -1
- run-6/checkpoint-117/config.json +26 -0
- run-6/checkpoint-117/model.safetensors +3 -0
- run-6/checkpoint-117/optimizer.pt +3 -0
- run-6/checkpoint-117/rng_state.pth +3 -0
- run-6/checkpoint-117/scheduler.pt +3 -0
- run-6/checkpoint-117/special_tokens_map.json +7 -0
- run-6/checkpoint-117/tokenizer.json +0 -0
- run-6/checkpoint-117/tokenizer_config.json +56 -0
- run-6/checkpoint-117/trainer_state.json +67 -0
- run-6/checkpoint-117/training_args.bin +3 -0
- run-6/checkpoint-117/vocab.txt +0 -0
- run-6/checkpoint-156/config.json +1 -1
- run-6/checkpoint-156/model.safetensors +1 -1
- run-6/checkpoint-156/optimizer.pt +1 -1
- run-6/checkpoint-156/rng_state.pth +1 -1
- run-6/checkpoint-156/scheduler.pt +1 -1
- run-6/checkpoint-156/trainer_state.json +42 -22
- run-6/checkpoint-156/training_args.bin +1 -1
- run-6/checkpoint-195/config.json +26 -0
- run-6/checkpoint-195/model.safetensors +3 -0
- run-6/checkpoint-195/optimizer.pt +3 -0
- run-6/checkpoint-195/rng_state.pth +3 -0
- run-6/checkpoint-195/scheduler.pt +3 -0
- run-6/checkpoint-195/special_tokens_map.json +7 -0
- run-6/checkpoint-195/tokenizer.json +0 -0
- run-6/checkpoint-195/tokenizer_config.json +56 -0
- run-6/checkpoint-195/trainer_state.json +85 -0
- run-6/checkpoint-195/training_args.bin +3 -0
- run-6/checkpoint-195/vocab.txt +0 -0
- run-6/checkpoint-78/config.json +1 -1
- run-6/checkpoint-78/model.safetensors +1 -1
- run-6/checkpoint-78/optimizer.pt +1 -1
- run-6/checkpoint-78/scheduler.pt +1 -1
- run-6/checkpoint-78/trainer_state.json +19 -17
- run-6/checkpoint-78/training_args.bin +1 -1
- run-7/checkpoint-117/model.safetensors +1 -1
- run-7/checkpoint-117/optimizer.pt +1 -1
- run-7/checkpoint-117/scheduler.pt +1 -1
- run-7/checkpoint-117/trainer_state.json +19 -19
- run-7/checkpoint-117/training_args.bin +1 -1
- run-7/checkpoint-156/model.safetensors +1 -1
- run-7/checkpoint-156/optimizer.pt +1 -1
- run-7/checkpoint-156/scheduler.pt +1 -1
- run-7/checkpoint-156/trainer_state.json +24 -24
- run-7/checkpoint-156/training_args.bin +1 -1
- run-7/checkpoint-195/model.safetensors +1 -1
- run-7/checkpoint-195/optimizer.pt +1 -1
- run-7/checkpoint-195/scheduler.pt +1 -1
- run-7/checkpoint-195/trainer_state.json +29 -29
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 437958648
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dbe22d40da7e5f9da1e96eae60554e510934b908b8c92cd31733a2874d3f483e
|
3 |
size 437958648
|
run-6/checkpoint-117/config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"BertForSequenceClassification"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"classifier_dropout": null,
|
7 |
+
"gradient_checkpointing": false,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.1,
|
10 |
+
"hidden_size": 768,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 3072,
|
13 |
+
"layer_norm_eps": 1e-12,
|
14 |
+
"max_position_embeddings": 512,
|
15 |
+
"model_type": "bert",
|
16 |
+
"num_attention_heads": 12,
|
17 |
+
"num_hidden_layers": 12,
|
18 |
+
"pad_token_id": 0,
|
19 |
+
"position_embedding_type": "absolute",
|
20 |
+
"problem_type": "single_label_classification",
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.50.3",
|
23 |
+
"type_vocab_size": 2,
|
24 |
+
"use_cache": true,
|
25 |
+
"vocab_size": 30522
|
26 |
+
}
|
run-6/checkpoint-117/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9e7f74d46b32c8d2f0076ec31ace82bf49497460b4f27517e9674d1c39eddd8
|
3 |
+
size 437958648
|
run-6/checkpoint-117/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b405340175d5bcf34ed87d34189b9e2bb54d21c948fa2a929402c5be6ec2fdee
|
3 |
+
size 876038394
|
run-6/checkpoint-117/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:066817b2001cdf2cab3204d72b7658f8308ed56a8eab94345bd5ce0742b9b7f7
|
3 |
+
size 14244
|
run-6/checkpoint-117/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e9dbb9cc4ed759cb224abe50df0e547d0121b53700136189150fe57d392f12b
|
3 |
+
size 1064
|
run-6/checkpoint-117/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
run-6/checkpoint-117/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-6/checkpoint-117/tokenizer_config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": false,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"extra_special_tokens": {},
|
48 |
+
"mask_token": "[MASK]",
|
49 |
+
"model_max_length": 512,
|
50 |
+
"pad_token": "[PAD]",
|
51 |
+
"sep_token": "[SEP]",
|
52 |
+
"strip_accents": null,
|
53 |
+
"tokenize_chinese_chars": true,
|
54 |
+
"tokenizer_class": "BertTokenizer",
|
55 |
+
"unk_token": "[UNK]"
|
56 |
+
}
|
run-6/checkpoint-117/trainer_state.json
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_global_step": 39,
|
3 |
+
"best_metric": 0.5270758122743683,
|
4 |
+
"best_model_checkpoint": "bert-base-uncased-finetuned-rte-run_3/run-6/checkpoint-39",
|
5 |
+
"epoch": 3.0,
|
6 |
+
"eval_steps": 500,
|
7 |
+
"global_step": 117,
|
8 |
+
"is_hyper_param_search": true,
|
9 |
+
"is_local_process_zero": true,
|
10 |
+
"is_world_process_zero": true,
|
11 |
+
"log_history": [
|
12 |
+
{
|
13 |
+
"epoch": 1.0,
|
14 |
+
"eval_accuracy": 0.5270758122743683,
|
15 |
+
"eval_loss": 0.6918498277664185,
|
16 |
+
"eval_runtime": 0.638,
|
17 |
+
"eval_samples_per_second": 434.177,
|
18 |
+
"eval_steps_per_second": 14.107,
|
19 |
+
"step": 39
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"epoch": 2.0,
|
23 |
+
"eval_accuracy": 0.4729241877256318,
|
24 |
+
"eval_loss": 6.468245983123779,
|
25 |
+
"eval_runtime": 0.6389,
|
26 |
+
"eval_samples_per_second": 433.545,
|
27 |
+
"eval_steps_per_second": 14.086,
|
28 |
+
"step": 78
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"epoch": 3.0,
|
32 |
+
"eval_accuracy": 0.4729241877256318,
|
33 |
+
"eval_loss": 0.7129499912261963,
|
34 |
+
"eval_runtime": 0.6418,
|
35 |
+
"eval_samples_per_second": 431.567,
|
36 |
+
"eval_steps_per_second": 14.022,
|
37 |
+
"step": 117
|
38 |
+
}
|
39 |
+
],
|
40 |
+
"logging_steps": 500,
|
41 |
+
"max_steps": 195,
|
42 |
+
"num_input_tokens_seen": 0,
|
43 |
+
"num_train_epochs": 5,
|
44 |
+
"save_steps": 500,
|
45 |
+
"stateful_callbacks": {
|
46 |
+
"TrainerControl": {
|
47 |
+
"args": {
|
48 |
+
"should_epoch_stop": false,
|
49 |
+
"should_evaluate": false,
|
50 |
+
"should_log": false,
|
51 |
+
"should_save": true,
|
52 |
+
"should_training_stop": false
|
53 |
+
},
|
54 |
+
"attributes": {}
|
55 |
+
}
|
56 |
+
},
|
57 |
+
"total_flos": 0,
|
58 |
+
"train_batch_size": 64,
|
59 |
+
"trial_name": null,
|
60 |
+
"trial_params": {
|
61 |
+
"dropout_rate": 0.0134,
|
62 |
+
"learning_rate": 0.05,
|
63 |
+
"max_length": 32,
|
64 |
+
"num_train_epochs": 5,
|
65 |
+
"per_device_train_batch_size": 64
|
66 |
+
}
|
67 |
+
}
|
run-6/checkpoint-117/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e99cfd8ed47c6deda5dd532724592f1d8997b2921756d80eddbdaca287b3e257
|
3 |
+
size 5432
|
run-6/checkpoint-117/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-6/checkpoint-156/config.json
CHANGED
@@ -19,7 +19,7 @@
|
|
19 |
"position_embedding_type": "absolute",
|
20 |
"problem_type": "single_label_classification",
|
21 |
"torch_dtype": "float32",
|
22 |
-
"transformers_version": "4.50.
|
23 |
"type_vocab_size": 2,
|
24 |
"use_cache": true,
|
25 |
"vocab_size": 30522
|
|
|
19 |
"position_embedding_type": "absolute",
|
20 |
"problem_type": "single_label_classification",
|
21 |
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.50.3",
|
23 |
"type_vocab_size": 2,
|
24 |
"use_cache": true,
|
25 |
"vocab_size": 30522
|
run-6/checkpoint-156/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 437958648
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2318a07f6b7d5d1bf3a1f6b40d3abae127c8953f573ac215284dabee4df7d9fe
|
3 |
size 437958648
|
run-6/checkpoint-156/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 876038394
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a21aec7d555adf3c55b3aebf9bea77aa3474e2627f886c06cc532a346e13a395
|
3 |
size 876038394
|
run-6/checkpoint-156/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0f61eb961c8bdfdb65315b87a5752740304715f4131aaf57d9e9514dcd94c88a
|
3 |
size 14244
|
run-6/checkpoint-156/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ba302ee87ff243e8cfe6b6e2e707141cb28f6780ae017be334f60d61b5d54260
|
3 |
size 1064
|
run-6/checkpoint-156/trainer_state.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
-
"best_global_step":
|
3 |
-
"best_metric": 0.
|
4 |
-
"best_model_checkpoint": "bert-base-uncased-finetuned-rte-run_3/run-6/checkpoint-
|
5 |
-
"epoch":
|
6 |
"eval_steps": 500,
|
7 |
"global_step": 156,
|
8 |
"is_hyper_param_search": true,
|
@@ -11,27 +11,45 @@
|
|
11 |
"log_history": [
|
12 |
{
|
13 |
"epoch": 1.0,
|
14 |
-
"eval_accuracy": 0.
|
15 |
-
"eval_loss": 0.
|
16 |
-
"eval_runtime": 0.
|
17 |
-
"eval_samples_per_second":
|
18 |
-
"eval_steps_per_second":
|
19 |
-
"step":
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
-
"eval_accuracy": 0.
|
24 |
-
"eval_loss":
|
25 |
-
"eval_runtime": 0.
|
26 |
-
"eval_samples_per_second":
|
27 |
-
"eval_steps_per_second":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
"step": 156
|
29 |
}
|
30 |
],
|
31 |
"logging_steps": 500,
|
32 |
-
"max_steps":
|
33 |
"num_input_tokens_seen": 0,
|
34 |
-
"num_train_epochs":
|
35 |
"save_steps": 500,
|
36 |
"stateful_callbacks": {
|
37 |
"TrainerControl": {
|
@@ -40,17 +58,19 @@
|
|
40 |
"should_evaluate": false,
|
41 |
"should_log": false,
|
42 |
"should_save": true,
|
43 |
-
"should_training_stop":
|
44 |
},
|
45 |
"attributes": {}
|
46 |
}
|
47 |
},
|
48 |
"total_flos": 0,
|
49 |
-
"train_batch_size":
|
50 |
"trial_name": null,
|
51 |
"trial_params": {
|
52 |
-
"
|
53 |
-
"
|
54 |
-
"
|
|
|
|
|
55 |
}
|
56 |
}
|
|
|
1 |
{
|
2 |
+
"best_global_step": 39,
|
3 |
+
"best_metric": 0.5270758122743683,
|
4 |
+
"best_model_checkpoint": "bert-base-uncased-finetuned-rte-run_3/run-6/checkpoint-39",
|
5 |
+
"epoch": 4.0,
|
6 |
"eval_steps": 500,
|
7 |
"global_step": 156,
|
8 |
"is_hyper_param_search": true,
|
|
|
11 |
"log_history": [
|
12 |
{
|
13 |
"epoch": 1.0,
|
14 |
+
"eval_accuracy": 0.5270758122743683,
|
15 |
+
"eval_loss": 0.6918498277664185,
|
16 |
+
"eval_runtime": 0.638,
|
17 |
+
"eval_samples_per_second": 434.177,
|
18 |
+
"eval_steps_per_second": 14.107,
|
19 |
+
"step": 39
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
+
"eval_accuracy": 0.4729241877256318,
|
24 |
+
"eval_loss": 6.468245983123779,
|
25 |
+
"eval_runtime": 0.6389,
|
26 |
+
"eval_samples_per_second": 433.545,
|
27 |
+
"eval_steps_per_second": 14.086,
|
28 |
+
"step": 78
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"epoch": 3.0,
|
32 |
+
"eval_accuracy": 0.4729241877256318,
|
33 |
+
"eval_loss": 0.7129499912261963,
|
34 |
+
"eval_runtime": 0.6418,
|
35 |
+
"eval_samples_per_second": 431.567,
|
36 |
+
"eval_steps_per_second": 14.022,
|
37 |
+
"step": 117
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 4.0,
|
41 |
+
"eval_accuracy": 0.4729241877256318,
|
42 |
+
"eval_loss": 0.9733805060386658,
|
43 |
+
"eval_runtime": 0.6368,
|
44 |
+
"eval_samples_per_second": 435.019,
|
45 |
+
"eval_steps_per_second": 14.134,
|
46 |
"step": 156
|
47 |
}
|
48 |
],
|
49 |
"logging_steps": 500,
|
50 |
+
"max_steps": 195,
|
51 |
"num_input_tokens_seen": 0,
|
52 |
+
"num_train_epochs": 5,
|
53 |
"save_steps": 500,
|
54 |
"stateful_callbacks": {
|
55 |
"TrainerControl": {
|
|
|
58 |
"should_evaluate": false,
|
59 |
"should_log": false,
|
60 |
"should_save": true,
|
61 |
+
"should_training_stop": false
|
62 |
},
|
63 |
"attributes": {}
|
64 |
}
|
65 |
},
|
66 |
"total_flos": 0,
|
67 |
+
"train_batch_size": 64,
|
68 |
"trial_name": null,
|
69 |
"trial_params": {
|
70 |
+
"dropout_rate": 0.0134,
|
71 |
+
"learning_rate": 0.05,
|
72 |
+
"max_length": 32,
|
73 |
+
"num_train_epochs": 5,
|
74 |
+
"per_device_train_batch_size": 64
|
75 |
}
|
76 |
}
|
run-6/checkpoint-156/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5432
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e99cfd8ed47c6deda5dd532724592f1d8997b2921756d80eddbdaca287b3e257
|
3 |
size 5432
|
run-6/checkpoint-195/config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"BertForSequenceClassification"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"classifier_dropout": null,
|
7 |
+
"gradient_checkpointing": false,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.1,
|
10 |
+
"hidden_size": 768,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 3072,
|
13 |
+
"layer_norm_eps": 1e-12,
|
14 |
+
"max_position_embeddings": 512,
|
15 |
+
"model_type": "bert",
|
16 |
+
"num_attention_heads": 12,
|
17 |
+
"num_hidden_layers": 12,
|
18 |
+
"pad_token_id": 0,
|
19 |
+
"position_embedding_type": "absolute",
|
20 |
+
"problem_type": "single_label_classification",
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.50.3",
|
23 |
+
"type_vocab_size": 2,
|
24 |
+
"use_cache": true,
|
25 |
+
"vocab_size": 30522
|
26 |
+
}
|
run-6/checkpoint-195/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e68c8a888661be47d9365fdfa694944ffb3f6a64208b5a1527a9b83f04f49d59
|
3 |
+
size 437958648
|
run-6/checkpoint-195/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b80c1de7e4ed4fe975152ec98a1cf7510a34c0f6a47f27493a70fd039bd4e56
|
3 |
+
size 876038394
|
run-6/checkpoint-195/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6bbb6e5a1853917bf71d3d48a24e968159b0799ccecda9429d3e1eac0a721ce5
|
3 |
+
size 14244
|
run-6/checkpoint-195/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:752f5a084edce7f72f09d3a1b263e8d262206382cd7e907850fe343dd9fe7226
|
3 |
+
size 1064
|
run-6/checkpoint-195/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
run-6/checkpoint-195/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-6/checkpoint-195/tokenizer_config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": false,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"extra_special_tokens": {},
|
48 |
+
"mask_token": "[MASK]",
|
49 |
+
"model_max_length": 512,
|
50 |
+
"pad_token": "[PAD]",
|
51 |
+
"sep_token": "[SEP]",
|
52 |
+
"strip_accents": null,
|
53 |
+
"tokenize_chinese_chars": true,
|
54 |
+
"tokenizer_class": "BertTokenizer",
|
55 |
+
"unk_token": "[UNK]"
|
56 |
+
}
|
run-6/checkpoint-195/trainer_state.json
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_global_step": 39,
|
3 |
+
"best_metric": 0.5270758122743683,
|
4 |
+
"best_model_checkpoint": "bert-base-uncased-finetuned-rte-run_3/run-6/checkpoint-39",
|
5 |
+
"epoch": 5.0,
|
6 |
+
"eval_steps": 500,
|
7 |
+
"global_step": 195,
|
8 |
+
"is_hyper_param_search": true,
|
9 |
+
"is_local_process_zero": true,
|
10 |
+
"is_world_process_zero": true,
|
11 |
+
"log_history": [
|
12 |
+
{
|
13 |
+
"epoch": 1.0,
|
14 |
+
"eval_accuracy": 0.5270758122743683,
|
15 |
+
"eval_loss": 0.6918498277664185,
|
16 |
+
"eval_runtime": 0.638,
|
17 |
+
"eval_samples_per_second": 434.177,
|
18 |
+
"eval_steps_per_second": 14.107,
|
19 |
+
"step": 39
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"epoch": 2.0,
|
23 |
+
"eval_accuracy": 0.4729241877256318,
|
24 |
+
"eval_loss": 6.468245983123779,
|
25 |
+
"eval_runtime": 0.6389,
|
26 |
+
"eval_samples_per_second": 433.545,
|
27 |
+
"eval_steps_per_second": 14.086,
|
28 |
+
"step": 78
|
29 |
+
},
|
30 |
+
{
|
31 |
+
"epoch": 3.0,
|
32 |
+
"eval_accuracy": 0.4729241877256318,
|
33 |
+
"eval_loss": 0.7129499912261963,
|
34 |
+
"eval_runtime": 0.6418,
|
35 |
+
"eval_samples_per_second": 431.567,
|
36 |
+
"eval_steps_per_second": 14.022,
|
37 |
+
"step": 117
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 4.0,
|
41 |
+
"eval_accuracy": 0.4729241877256318,
|
42 |
+
"eval_loss": 0.9733805060386658,
|
43 |
+
"eval_runtime": 0.6368,
|
44 |
+
"eval_samples_per_second": 435.019,
|
45 |
+
"eval_steps_per_second": 14.134,
|
46 |
+
"step": 156
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"epoch": 5.0,
|
50 |
+
"eval_accuracy": 0.4729241877256318,
|
51 |
+
"eval_loss": 0.7346399426460266,
|
52 |
+
"eval_runtime": 0.6421,
|
53 |
+
"eval_samples_per_second": 431.427,
|
54 |
+
"eval_steps_per_second": 14.017,
|
55 |
+
"step": 195
|
56 |
+
}
|
57 |
+
],
|
58 |
+
"logging_steps": 500,
|
59 |
+
"max_steps": 195,
|
60 |
+
"num_input_tokens_seen": 0,
|
61 |
+
"num_train_epochs": 5,
|
62 |
+
"save_steps": 500,
|
63 |
+
"stateful_callbacks": {
|
64 |
+
"TrainerControl": {
|
65 |
+
"args": {
|
66 |
+
"should_epoch_stop": false,
|
67 |
+
"should_evaluate": false,
|
68 |
+
"should_log": false,
|
69 |
+
"should_save": true,
|
70 |
+
"should_training_stop": true
|
71 |
+
},
|
72 |
+
"attributes": {}
|
73 |
+
}
|
74 |
+
},
|
75 |
+
"total_flos": 0,
|
76 |
+
"train_batch_size": 64,
|
77 |
+
"trial_name": null,
|
78 |
+
"trial_params": {
|
79 |
+
"dropout_rate": 0.0134,
|
80 |
+
"learning_rate": 0.05,
|
81 |
+
"max_length": 32,
|
82 |
+
"num_train_epochs": 5,
|
83 |
+
"per_device_train_batch_size": 64
|
84 |
+
}
|
85 |
+
}
|
run-6/checkpoint-195/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e99cfd8ed47c6deda5dd532724592f1d8997b2921756d80eddbdaca287b3e257
|
3 |
+
size 5432
|
run-6/checkpoint-195/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
run-6/checkpoint-78/config.json
CHANGED
@@ -19,7 +19,7 @@
|
|
19 |
"position_embedding_type": "absolute",
|
20 |
"problem_type": "single_label_classification",
|
21 |
"torch_dtype": "float32",
|
22 |
-
"transformers_version": "4.50.
|
23 |
"type_vocab_size": 2,
|
24 |
"use_cache": true,
|
25 |
"vocab_size": 30522
|
|
|
19 |
"position_embedding_type": "absolute",
|
20 |
"problem_type": "single_label_classification",
|
21 |
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.50.3",
|
23 |
"type_vocab_size": 2,
|
24 |
"use_cache": true,
|
25 |
"vocab_size": 30522
|
run-6/checkpoint-78/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 437958648
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2d8ef02cdb5bfd921d3ad7cbe7efad3e5045fe73a53087e95d7200fbba683564
|
3 |
size 437958648
|
run-6/checkpoint-78/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 876038394
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:41424c73dadb08a4680e48d2eeb7e918f31cd36616ae5e18736f9570bb4a9563
|
3 |
size 876038394
|
run-6/checkpoint-78/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:562900fbc54f61683673f7d45f332e2421789e473397b69e96adb452a1719746
|
3 |
size 1064
|
run-6/checkpoint-78/trainer_state.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
-
"best_global_step":
|
3 |
-
"best_metric": 0.
|
4 |
-
"best_model_checkpoint": "bert-base-uncased-finetuned-rte-run_3/run-6/checkpoint-
|
5 |
"epoch": 2.0,
|
6 |
"eval_steps": 500,
|
7 |
"global_step": 78,
|
@@ -11,27 +11,27 @@
|
|
11 |
"log_history": [
|
12 |
{
|
13 |
"epoch": 1.0,
|
14 |
-
"eval_accuracy": 0.
|
15 |
-
"eval_loss": 0.
|
16 |
-
"eval_runtime": 0.
|
17 |
-
"eval_samples_per_second":
|
18 |
-
"eval_steps_per_second":
|
19 |
"step": 39
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
-
"eval_accuracy": 0.
|
24 |
-
"eval_loss":
|
25 |
-
"eval_runtime": 0.
|
26 |
-
"eval_samples_per_second":
|
27 |
-
"eval_steps_per_second": 14.
|
28 |
"step": 78
|
29 |
}
|
30 |
],
|
31 |
"logging_steps": 500,
|
32 |
-
"max_steps":
|
33 |
"num_input_tokens_seen": 0,
|
34 |
-
"num_train_epochs":
|
35 |
"save_steps": 500,
|
36 |
"stateful_callbacks": {
|
37 |
"TrainerControl": {
|
@@ -49,8 +49,10 @@
|
|
49 |
"train_batch_size": 64,
|
50 |
"trial_name": null,
|
51 |
"trial_params": {
|
52 |
-
"
|
53 |
-
"
|
|
|
|
|
54 |
"per_device_train_batch_size": 64
|
55 |
}
|
56 |
}
|
|
|
1 |
{
|
2 |
+
"best_global_step": 39,
|
3 |
+
"best_metric": 0.5270758122743683,
|
4 |
+
"best_model_checkpoint": "bert-base-uncased-finetuned-rte-run_3/run-6/checkpoint-39",
|
5 |
"epoch": 2.0,
|
6 |
"eval_steps": 500,
|
7 |
"global_step": 78,
|
|
|
11 |
"log_history": [
|
12 |
{
|
13 |
"epoch": 1.0,
|
14 |
+
"eval_accuracy": 0.5270758122743683,
|
15 |
+
"eval_loss": 0.6918498277664185,
|
16 |
+
"eval_runtime": 0.638,
|
17 |
+
"eval_samples_per_second": 434.177,
|
18 |
+
"eval_steps_per_second": 14.107,
|
19 |
"step": 39
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
+
"eval_accuracy": 0.4729241877256318,
|
24 |
+
"eval_loss": 6.468245983123779,
|
25 |
+
"eval_runtime": 0.6389,
|
26 |
+
"eval_samples_per_second": 433.545,
|
27 |
+
"eval_steps_per_second": 14.086,
|
28 |
"step": 78
|
29 |
}
|
30 |
],
|
31 |
"logging_steps": 500,
|
32 |
+
"max_steps": 195,
|
33 |
"num_input_tokens_seen": 0,
|
34 |
+
"num_train_epochs": 5,
|
35 |
"save_steps": 500,
|
36 |
"stateful_callbacks": {
|
37 |
"TrainerControl": {
|
|
|
49 |
"train_batch_size": 64,
|
50 |
"trial_name": null,
|
51 |
"trial_params": {
|
52 |
+
"dropout_rate": 0.0134,
|
53 |
+
"learning_rate": 0.05,
|
54 |
+
"max_length": 32,
|
55 |
+
"num_train_epochs": 5,
|
56 |
"per_device_train_batch_size": 64
|
57 |
}
|
58 |
}
|
run-6/checkpoint-78/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5432
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e99cfd8ed47c6deda5dd532724592f1d8997b2921756d80eddbdaca287b3e257
|
3 |
size 5432
|
run-7/checkpoint-117/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 437958648
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb23933936205a388c85e4a0992c5106684781f4174f4ea21f8c303a16f2167c
|
3 |
size 437958648
|
run-7/checkpoint-117/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 876038394
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:82b9da665ade5efa1bec8caba1e0cb3e291187eb520665bf94ca8c84b3caeaa7
|
3 |
size 876038394
|
run-7/checkpoint-117/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:213d65bdad97baf6366493a7a3a463531cccd1c4d60c8879d9ff92c9ab239122
|
3 |
size 1064
|
run-7/checkpoint-117/trainer_state.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
-
"best_global_step":
|
3 |
-
"best_metric": 0.
|
4 |
-
"best_model_checkpoint": "bert-base-uncased-finetuned-rte-run_3/run-7/checkpoint-
|
5 |
"epoch": 3.0,
|
6 |
"eval_steps": 500,
|
7 |
"global_step": 117,
|
@@ -11,29 +11,29 @@
|
|
11 |
"log_history": [
|
12 |
{
|
13 |
"epoch": 1.0,
|
14 |
-
"eval_accuracy": 0.
|
15 |
-
"eval_loss": 0.
|
16 |
-
"eval_runtime": 0.
|
17 |
-
"eval_samples_per_second":
|
18 |
-
"eval_steps_per_second": 14.
|
19 |
"step": 39
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
-
"eval_accuracy": 0.
|
24 |
-
"eval_loss": 0.
|
25 |
-
"eval_runtime": 0.
|
26 |
-
"eval_samples_per_second":
|
27 |
-
"eval_steps_per_second": 14.
|
28 |
"step": 78
|
29 |
},
|
30 |
{
|
31 |
"epoch": 3.0,
|
32 |
-
"eval_accuracy": 0.
|
33 |
-
"eval_loss": 0.
|
34 |
-
"eval_runtime": 0.
|
35 |
-
"eval_samples_per_second":
|
36 |
-
"eval_steps_per_second": 14.
|
37 |
"step": 117
|
38 |
}
|
39 |
],
|
@@ -59,7 +59,7 @@
|
|
59 |
"trial_name": null,
|
60 |
"trial_params": {
|
61 |
"dropout_rate": 0.0134,
|
62 |
-
"learning_rate":
|
63 |
"max_length": 32,
|
64 |
"num_train_epochs": 5,
|
65 |
"per_device_train_batch_size": 64
|
|
|
1 |
{
|
2 |
+
"best_global_step": 39,
|
3 |
+
"best_metric": 0.5270758122743683,
|
4 |
+
"best_model_checkpoint": "bert-base-uncased-finetuned-rte-run_3/run-7/checkpoint-39",
|
5 |
"epoch": 3.0,
|
6 |
"eval_steps": 500,
|
7 |
"global_step": 117,
|
|
|
11 |
"log_history": [
|
12 |
{
|
13 |
"epoch": 1.0,
|
14 |
+
"eval_accuracy": 0.5270758122743683,
|
15 |
+
"eval_loss": 0.6919082403182983,
|
16 |
+
"eval_runtime": 0.638,
|
17 |
+
"eval_samples_per_second": 434.189,
|
18 |
+
"eval_steps_per_second": 14.107,
|
19 |
"step": 39
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
+
"eval_accuracy": 0.4729241877256318,
|
24 |
+
"eval_loss": 0.7309694290161133,
|
25 |
+
"eval_runtime": 0.6392,
|
26 |
+
"eval_samples_per_second": 433.372,
|
27 |
+
"eval_steps_per_second": 14.081,
|
28 |
"step": 78
|
29 |
},
|
30 |
{
|
31 |
"epoch": 3.0,
|
32 |
+
"eval_accuracy": 0.5270758122743683,
|
33 |
+
"eval_loss": 0.7194660902023315,
|
34 |
+
"eval_runtime": 0.6371,
|
35 |
+
"eval_samples_per_second": 434.804,
|
36 |
+
"eval_steps_per_second": 14.127,
|
37 |
"step": 117
|
38 |
}
|
39 |
],
|
|
|
59 |
"trial_name": null,
|
60 |
"trial_params": {
|
61 |
"dropout_rate": 0.0134,
|
62 |
+
"learning_rate": 0.00075,
|
63 |
"max_length": 32,
|
64 |
"num_train_epochs": 5,
|
65 |
"per_device_train_batch_size": 64
|
run-7/checkpoint-117/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5432
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e8c4929bc89061837cba9f5ad6241df21ef41da84ff54fbbce16b48384710dd6
|
3 |
size 5432
|
run-7/checkpoint-156/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 437958648
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0ace75bca042e91142ab751abae56a1747af84a9590fece33592501f159fffe3
|
3 |
size 437958648
|
run-7/checkpoint-156/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 876038394
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:18adc394acfafbfc30fcfd5fced8103f153c573157ca49e4d4da3fa2bc069967
|
3 |
size 876038394
|
run-7/checkpoint-156/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:67fe0b1d36ef7d0f2f4dd956a90f35f78a77bd8cd8b87288ae2bcb46997ec147
|
3 |
size 1064
|
run-7/checkpoint-156/trainer_state.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
-
"best_global_step":
|
3 |
-
"best_metric": 0.
|
4 |
-
"best_model_checkpoint": "bert-base-uncased-finetuned-rte-run_3/run-7/checkpoint-
|
5 |
"epoch": 4.0,
|
6 |
"eval_steps": 500,
|
7 |
"global_step": 156,
|
@@ -11,38 +11,38 @@
|
|
11 |
"log_history": [
|
12 |
{
|
13 |
"epoch": 1.0,
|
14 |
-
"eval_accuracy": 0.
|
15 |
-
"eval_loss": 0.
|
16 |
-
"eval_runtime": 0.
|
17 |
-
"eval_samples_per_second":
|
18 |
-
"eval_steps_per_second": 14.
|
19 |
"step": 39
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
-
"eval_accuracy": 0.
|
24 |
-
"eval_loss": 0.
|
25 |
-
"eval_runtime": 0.
|
26 |
-
"eval_samples_per_second":
|
27 |
-
"eval_steps_per_second": 14.
|
28 |
"step": 78
|
29 |
},
|
30 |
{
|
31 |
"epoch": 3.0,
|
32 |
-
"eval_accuracy": 0.
|
33 |
-
"eval_loss": 0.
|
34 |
-
"eval_runtime": 0.
|
35 |
-
"eval_samples_per_second":
|
36 |
-
"eval_steps_per_second": 14.
|
37 |
"step": 117
|
38 |
},
|
39 |
{
|
40 |
"epoch": 4.0,
|
41 |
-
"eval_accuracy": 0.
|
42 |
-
"eval_loss":
|
43 |
-
"eval_runtime": 0.
|
44 |
-
"eval_samples_per_second":
|
45 |
-
"eval_steps_per_second": 14.
|
46 |
"step": 156
|
47 |
}
|
48 |
],
|
@@ -68,7 +68,7 @@
|
|
68 |
"trial_name": null,
|
69 |
"trial_params": {
|
70 |
"dropout_rate": 0.0134,
|
71 |
-
"learning_rate":
|
72 |
"max_length": 32,
|
73 |
"num_train_epochs": 5,
|
74 |
"per_device_train_batch_size": 64
|
|
|
1 |
{
|
2 |
+
"best_global_step": 39,
|
3 |
+
"best_metric": 0.5270758122743683,
|
4 |
+
"best_model_checkpoint": "bert-base-uncased-finetuned-rte-run_3/run-7/checkpoint-39",
|
5 |
"epoch": 4.0,
|
6 |
"eval_steps": 500,
|
7 |
"global_step": 156,
|
|
|
11 |
"log_history": [
|
12 |
{
|
13 |
"epoch": 1.0,
|
14 |
+
"eval_accuracy": 0.5270758122743683,
|
15 |
+
"eval_loss": 0.6919082403182983,
|
16 |
+
"eval_runtime": 0.638,
|
17 |
+
"eval_samples_per_second": 434.189,
|
18 |
+
"eval_steps_per_second": 14.107,
|
19 |
"step": 39
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
+
"eval_accuracy": 0.4729241877256318,
|
24 |
+
"eval_loss": 0.7309694290161133,
|
25 |
+
"eval_runtime": 0.6392,
|
26 |
+
"eval_samples_per_second": 433.372,
|
27 |
+
"eval_steps_per_second": 14.081,
|
28 |
"step": 78
|
29 |
},
|
30 |
{
|
31 |
"epoch": 3.0,
|
32 |
+
"eval_accuracy": 0.5270758122743683,
|
33 |
+
"eval_loss": 0.7194660902023315,
|
34 |
+
"eval_runtime": 0.6371,
|
35 |
+
"eval_samples_per_second": 434.804,
|
36 |
+
"eval_steps_per_second": 14.127,
|
37 |
"step": 117
|
38 |
},
|
39 |
{
|
40 |
"epoch": 4.0,
|
41 |
+
"eval_accuracy": 0.4729241877256318,
|
42 |
+
"eval_loss": 0.7069047689437866,
|
43 |
+
"eval_runtime": 0.642,
|
44 |
+
"eval_samples_per_second": 431.431,
|
45 |
+
"eval_steps_per_second": 14.018,
|
46 |
"step": 156
|
47 |
}
|
48 |
],
|
|
|
68 |
"trial_name": null,
|
69 |
"trial_params": {
|
70 |
"dropout_rate": 0.0134,
|
71 |
+
"learning_rate": 0.00075,
|
72 |
"max_length": 32,
|
73 |
"num_train_epochs": 5,
|
74 |
"per_device_train_batch_size": 64
|
run-7/checkpoint-156/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5432
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e8c4929bc89061837cba9f5ad6241df21ef41da84ff54fbbce16b48384710dd6
|
3 |
size 5432
|
run-7/checkpoint-195/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 437958648
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f94eabf6042830f2b3123db50f2fe5d383e73dd7221879d560c58b44a643df6a
|
3 |
size 437958648
|
run-7/checkpoint-195/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 876038394
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ded55dc803957b78dfa0f6030e4ec46c731961578e54538fc4a40cfb2641684e
|
3 |
size 876038394
|
run-7/checkpoint-195/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6200587a999c197a87247f066d9ce62813bde6b6f589dded4e1e36dceeea6c89
|
3 |
size 1064
|
run-7/checkpoint-195/trainer_state.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
-
"best_global_step":
|
3 |
-
"best_metric": 0.
|
4 |
-
"best_model_checkpoint": "bert-base-uncased-finetuned-rte-run_3/run-7/checkpoint-
|
5 |
"epoch": 5.0,
|
6 |
"eval_steps": 500,
|
7 |
"global_step": 195,
|
@@ -11,47 +11,47 @@
|
|
11 |
"log_history": [
|
12 |
{
|
13 |
"epoch": 1.0,
|
14 |
-
"eval_accuracy": 0.
|
15 |
-
"eval_loss": 0.
|
16 |
-
"eval_runtime": 0.
|
17 |
-
"eval_samples_per_second":
|
18 |
-
"eval_steps_per_second": 14.
|
19 |
"step": 39
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
-
"eval_accuracy": 0.
|
24 |
-
"eval_loss": 0.
|
25 |
-
"eval_runtime": 0.
|
26 |
-
"eval_samples_per_second":
|
27 |
-
"eval_steps_per_second": 14.
|
28 |
"step": 78
|
29 |
},
|
30 |
{
|
31 |
"epoch": 3.0,
|
32 |
-
"eval_accuracy": 0.
|
33 |
-
"eval_loss": 0.
|
34 |
-
"eval_runtime": 0.
|
35 |
-
"eval_samples_per_second":
|
36 |
-
"eval_steps_per_second": 14.
|
37 |
"step": 117
|
38 |
},
|
39 |
{
|
40 |
"epoch": 4.0,
|
41 |
-
"eval_accuracy": 0.
|
42 |
-
"eval_loss":
|
43 |
-
"eval_runtime": 0.
|
44 |
-
"eval_samples_per_second":
|
45 |
-
"eval_steps_per_second": 14.
|
46 |
"step": 156
|
47 |
},
|
48 |
{
|
49 |
"epoch": 5.0,
|
50 |
-
"eval_accuracy": 0.
|
51 |
-
"eval_loss":
|
52 |
-
"eval_runtime": 0.
|
53 |
-
"eval_samples_per_second":
|
54 |
-
"eval_steps_per_second": 14.
|
55 |
"step": 195
|
56 |
}
|
57 |
],
|
@@ -77,7 +77,7 @@
|
|
77 |
"trial_name": null,
|
78 |
"trial_params": {
|
79 |
"dropout_rate": 0.0134,
|
80 |
-
"learning_rate":
|
81 |
"max_length": 32,
|
82 |
"num_train_epochs": 5,
|
83 |
"per_device_train_batch_size": 64
|
|
|
1 |
{
|
2 |
+
"best_global_step": 39,
|
3 |
+
"best_metric": 0.5270758122743683,
|
4 |
+
"best_model_checkpoint": "bert-base-uncased-finetuned-rte-run_3/run-7/checkpoint-39",
|
5 |
"epoch": 5.0,
|
6 |
"eval_steps": 500,
|
7 |
"global_step": 195,
|
|
|
11 |
"log_history": [
|
12 |
{
|
13 |
"epoch": 1.0,
|
14 |
+
"eval_accuracy": 0.5270758122743683,
|
15 |
+
"eval_loss": 0.6919082403182983,
|
16 |
+
"eval_runtime": 0.638,
|
17 |
+
"eval_samples_per_second": 434.189,
|
18 |
+
"eval_steps_per_second": 14.107,
|
19 |
"step": 39
|
20 |
},
|
21 |
{
|
22 |
"epoch": 2.0,
|
23 |
+
"eval_accuracy": 0.4729241877256318,
|
24 |
+
"eval_loss": 0.7309694290161133,
|
25 |
+
"eval_runtime": 0.6392,
|
26 |
+
"eval_samples_per_second": 433.372,
|
27 |
+
"eval_steps_per_second": 14.081,
|
28 |
"step": 78
|
29 |
},
|
30 |
{
|
31 |
"epoch": 3.0,
|
32 |
+
"eval_accuracy": 0.5270758122743683,
|
33 |
+
"eval_loss": 0.7194660902023315,
|
34 |
+
"eval_runtime": 0.6371,
|
35 |
+
"eval_samples_per_second": 434.804,
|
36 |
+
"eval_steps_per_second": 14.127,
|
37 |
"step": 117
|
38 |
},
|
39 |
{
|
40 |
"epoch": 4.0,
|
41 |
+
"eval_accuracy": 0.4729241877256318,
|
42 |
+
"eval_loss": 0.7069047689437866,
|
43 |
+
"eval_runtime": 0.642,
|
44 |
+
"eval_samples_per_second": 431.431,
|
45 |
+
"eval_steps_per_second": 14.018,
|
46 |
"step": 156
|
47 |
},
|
48 |
{
|
49 |
"epoch": 5.0,
|
50 |
+
"eval_accuracy": 0.4729241877256318,
|
51 |
+
"eval_loss": 0.693695604801178,
|
52 |
+
"eval_runtime": 0.6384,
|
53 |
+
"eval_samples_per_second": 433.898,
|
54 |
+
"eval_steps_per_second": 14.098,
|
55 |
"step": 195
|
56 |
}
|
57 |
],
|
|
|
77 |
"trial_name": null,
|
78 |
"trial_params": {
|
79 |
"dropout_rate": 0.0134,
|
80 |
+
"learning_rate": 0.00075,
|
81 |
"max_length": 32,
|
82 |
"num_train_epochs": 5,
|
83 |
"per_device_train_batch_size": 64
|