xiulinyang commited on
Commit
ee14b97
·
1 Parent(s): d8af46e

Add checkpoint

Browse files
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - generated_from_trainer
5
+ metrics:
6
+ - accuracy
7
+ model-index:
8
+ - name: BABYLM_50000_41
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # BABYLM_50000_41
16
+
17
+ This model was trained from scratch on an unknown dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 3.8863
20
+ - Accuracy: 0.3690
21
+
22
+ ## Model description
23
+
24
+ More information needed
25
+
26
+ ## Intended uses & limitations
27
+
28
+ More information needed
29
+
30
+ ## Training and evaluation data
31
+
32
+ More information needed
33
+
34
+ ## Training procedure
35
+
36
+ ### Training hyperparameters
37
+
38
+ The following hyperparameters were used during training:
39
+ - learning_rate: 0.0006
40
+ - train_batch_size: 32
41
+ - eval_batch_size: 32
42
+ - seed: 41
43
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
44
+ - lr_scheduler_type: linear
45
+ - lr_scheduler_warmup_steps: 1000
46
+ - num_epochs: 10.0
47
+ - mixed_precision_training: Native AMP
48
+
49
+ ### Training results
50
+
51
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
52
+ |:-------------:|:-----:|:-----:|:---------------:|:--------:|
53
+ | 1.1472 | 1.0 | 1008 | 4.4067 | 0.3239 |
54
+ | 1.0265 | 2.0 | 2016 | 4.3543 | 0.3260 |
55
+ | 1.011 | 3.0 | 3024 | 4.3273 | 0.3275 |
56
+ | 1.0037 | 4.0 | 4032 | 4.2573 | 0.3327 |
57
+ | 0.9846 | 5.0 | 5040 | 4.2131 | 0.3388 |
58
+ | 0.9628 | 6.0 | 6048 | 4.1228 | 0.3467 |
59
+ | 0.9383 | 7.0 | 7056 | 4.0391 | 0.3538 |
60
+ | 0.9143 | 8.0 | 8064 | 3.9592 | 0.3621 |
61
+ | 0.8929 | 9.0 | 9072 | 3.9107 | 0.3665 |
62
+ | 0.88 | 10.0 | 10080 | 3.8863 | 0.3690 |
63
+
64
+
65
+ ### Framework versions
66
+
67
+ - Transformers 4.51.3
68
+ - Pytorch 2.5.1+cu121
69
+ - Datasets 3.6.0
70
+ - Tokenizers 0.21.1
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.3690443281951633,
4
+ "eval_loss": 3.8863136768341064,
5
+ "eval_runtime": 859.3565,
6
+ "eval_samples": 33202,
7
+ "eval_samples_per_second": 38.636,
8
+ "eval_steps_per_second": 1.208,
9
+ "perplexity": 48.730917106191576,
10
+ "total_flos": 8.426145447936e+16,
11
+ "train_loss": 0.9752902931637234,
12
+ "train_runtime": 19950.9665,
13
+ "train_samples": 32248,
14
+ "train_samples_per_second": 16.164,
15
+ "train_steps_per_second": 0.505
16
+ }
config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 1,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 2,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_ctx": 1024,
14
+ "n_embd": 768,
15
+ "n_head": 12,
16
+ "n_inner": null,
17
+ "n_layer": 12,
18
+ "n_positions": 512,
19
+ "pad_token_id": 0,
20
+ "prefix": "<|endoftext|>",
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "task_specific_params": {
31
+ "text-generation": {
32
+ "do_sample": true,
33
+ "max_length": 50
34
+ }
35
+ },
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.51.3",
38
+ "use_cache": true,
39
+ "vocab_size": 50000
40
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.3690443281951633,
4
+ "eval_loss": 3.8863136768341064,
5
+ "eval_runtime": 859.3565,
6
+ "eval_samples": 33202,
7
+ "eval_samples_per_second": 38.636,
8
+ "eval_steps_per_second": 1.208,
9
+ "perplexity": 48.730917106191576
10
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.51.3"
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b287aa778e7d9ef4497b323f43c16c7300420aba4bd685cd4acc2fc95b8bee2e
3
+ size 495411840
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<pad>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": true,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<pad>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<s>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "<|endoftext|>",
38
+ "clean_up_tokenization_spaces": false,
39
+ "eos_token": "<|endoftext|>",
40
+ "extra_special_tokens": {},
41
+ "max_length": null,
42
+ "model_max_length": 1000000000000000019884624838656,
43
+ "pad_to_multiple_of": null,
44
+ "pad_token": "<pad>",
45
+ "pad_token_type_id": 0,
46
+ "padding_side": "right",
47
+ "tokenizer_class": "GPT2Tokenizer",
48
+ "unk_token": "<|endoftext|>"
49
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "total_flos": 8.426145447936e+16,
4
+ "train_loss": 0.9752902931637234,
5
+ "train_runtime": 19950.9665,
6
+ "train_samples": 32248,
7
+ "train_samples_per_second": 16.164,
8
+ "train_steps_per_second": 0.505
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 10.0,
6
+ "eval_steps": 500,
7
+ "global_step": 10080,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.9920634920634921,
14
+ "grad_norm": 17054.458984375,
15
+ "learning_rate": 0.0005993999999999999,
16
+ "loss": 1.1472,
17
+ "step": 1000
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_accuracy": 0.3239240886981203,
22
+ "eval_loss": 4.4066948890686035,
23
+ "eval_runtime": 855.2718,
24
+ "eval_samples_per_second": 38.82,
25
+ "eval_steps_per_second": 1.214,
26
+ "step": 1008
27
+ },
28
+ {
29
+ "epoch": 1.9841269841269842,
30
+ "grad_norm": 29671.1875,
31
+ "learning_rate": 0.0005339867841409691,
32
+ "loss": 1.0265,
33
+ "step": 2000
34
+ },
35
+ {
36
+ "epoch": 2.0,
37
+ "eval_accuracy": 0.3259714508038384,
38
+ "eval_loss": 4.354335784912109,
39
+ "eval_runtime": 855.8298,
40
+ "eval_samples_per_second": 38.795,
41
+ "eval_steps_per_second": 1.213,
42
+ "step": 2016
43
+ },
44
+ {
45
+ "epoch": 2.9761904761904763,
46
+ "grad_norm": 70240.5390625,
47
+ "learning_rate": 0.0004679074889867841,
48
+ "loss": 1.011,
49
+ "step": 3000
50
+ },
51
+ {
52
+ "epoch": 3.0,
53
+ "eval_accuracy": 0.32745557614417636,
54
+ "eval_loss": 4.327347755432129,
55
+ "eval_runtime": 853.4908,
56
+ "eval_samples_per_second": 38.901,
57
+ "eval_steps_per_second": 1.216,
58
+ "step": 3024
59
+ },
60
+ {
61
+ "epoch": 3.9682539682539684,
62
+ "grad_norm": 46378.90234375,
63
+ "learning_rate": 0.0004018281938325991,
64
+ "loss": 1.0037,
65
+ "step": 4000
66
+ },
67
+ {
68
+ "epoch": 4.0,
69
+ "eval_accuracy": 0.33272428004301724,
70
+ "eval_loss": 4.257251262664795,
71
+ "eval_runtime": 855.0895,
72
+ "eval_samples_per_second": 38.829,
73
+ "eval_steps_per_second": 1.214,
74
+ "step": 4032
75
+ },
76
+ {
77
+ "epoch": 4.9603174603174605,
78
+ "grad_norm": 28076.61328125,
79
+ "learning_rate": 0.00033574889867841405,
80
+ "loss": 0.9846,
81
+ "step": 5000
82
+ },
83
+ {
84
+ "epoch": 5.0,
85
+ "eval_accuracy": 0.33883436159210933,
86
+ "eval_loss": 4.213110446929932,
87
+ "eval_runtime": 854.9015,
88
+ "eval_samples_per_second": 38.837,
89
+ "eval_steps_per_second": 1.214,
90
+ "step": 5040
91
+ },
92
+ {
93
+ "epoch": 5.9523809523809526,
94
+ "grad_norm": 14078.001953125,
95
+ "learning_rate": 0.000269669603524229,
96
+ "loss": 0.9628,
97
+ "step": 6000
98
+ },
99
+ {
100
+ "epoch": 6.0,
101
+ "eval_accuracy": 0.34673329159550076,
102
+ "eval_loss": 4.1228461265563965,
103
+ "eval_runtime": 857.4048,
104
+ "eval_samples_per_second": 38.724,
105
+ "eval_steps_per_second": 1.211,
106
+ "step": 6048
107
+ },
108
+ {
109
+ "epoch": 6.944444444444445,
110
+ "grad_norm": 9524.201171875,
111
+ "learning_rate": 0.00020359030837004404,
112
+ "loss": 0.9383,
113
+ "step": 7000
114
+ },
115
+ {
116
+ "epoch": 7.0,
117
+ "eval_accuracy": 0.3538012175014567,
118
+ "eval_loss": 4.039106845855713,
119
+ "eval_runtime": 854.4996,
120
+ "eval_samples_per_second": 38.855,
121
+ "eval_steps_per_second": 1.215,
122
+ "step": 7056
123
+ },
124
+ {
125
+ "epoch": 7.936507936507937,
126
+ "grad_norm": 7510.40869140625,
127
+ "learning_rate": 0.000137511013215859,
128
+ "loss": 0.9143,
129
+ "step": 8000
130
+ },
131
+ {
132
+ "epoch": 8.0,
133
+ "eval_accuracy": 0.3621231644852932,
134
+ "eval_loss": 3.9591543674468994,
135
+ "eval_runtime": 858.8934,
136
+ "eval_samples_per_second": 38.657,
137
+ "eval_steps_per_second": 1.209,
138
+ "step": 8064
139
+ },
140
+ {
141
+ "epoch": 8.928571428571429,
142
+ "grad_norm": 7499.3291015625,
143
+ "learning_rate": 7.1431718061674e-05,
144
+ "loss": 0.8929,
145
+ "step": 9000
146
+ },
147
+ {
148
+ "epoch": 9.0,
149
+ "eval_accuracy": 0.3664532976168766,
150
+ "eval_loss": 3.910747528076172,
151
+ "eval_runtime": 856.101,
152
+ "eval_samples_per_second": 38.783,
153
+ "eval_steps_per_second": 1.212,
154
+ "step": 9072
155
+ },
156
+ {
157
+ "epoch": 9.920634920634921,
158
+ "grad_norm": 7264.61669921875,
159
+ "learning_rate": 5.352422907488987e-06,
160
+ "loss": 0.88,
161
+ "step": 10000
162
+ },
163
+ {
164
+ "epoch": 10.0,
165
+ "eval_accuracy": 0.3690443281951633,
166
+ "eval_loss": 3.8863136768341064,
167
+ "eval_runtime": 867.6713,
168
+ "eval_samples_per_second": 38.266,
169
+ "eval_steps_per_second": 1.196,
170
+ "step": 10080
171
+ },
172
+ {
173
+ "epoch": 10.0,
174
+ "step": 10080,
175
+ "total_flos": 8.426145447936e+16,
176
+ "train_loss": 0.9752902931637234,
177
+ "train_runtime": 19950.9665,
178
+ "train_samples_per_second": 16.164,
179
+ "train_steps_per_second": 0.505
180
+ }
181
+ ],
182
+ "logging_steps": 1000,
183
+ "max_steps": 10080,
184
+ "num_input_tokens_seen": 0,
185
+ "num_train_epochs": 10,
186
+ "save_steps": 500,
187
+ "stateful_callbacks": {
188
+ "TrainerControl": {
189
+ "args": {
190
+ "should_epoch_stop": false,
191
+ "should_evaluate": false,
192
+ "should_log": false,
193
+ "should_save": true,
194
+ "should_training_stop": true
195
+ },
196
+ "attributes": {}
197
+ }
198
+ },
199
+ "total_flos": 8.426145447936e+16,
200
+ "train_batch_size": 32,
201
+ "trial_name": null,
202
+ "trial_params": null
203
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d183ea0ceb947c9b9b82399970949b0bc2739d78350970eb0c0df5c5e9693b77
3
+ size 5304
vocab.json ADDED
The diff for this file is too large to render. See raw diff