wangyichen25 commited on
Commit
ff041cf
·
verified ·
1 Parent(s): 786a4fc

Training in progress, step 100, checkpoint

Browse files
checkpoint-100/adapter_config.json CHANGED
@@ -28,16 +28,16 @@
28
  "rank_pattern": {},
29
  "revision": null,
30
  "target_modules": [
31
- "fc1",
32
- "up_proj",
33
  "out_proj",
 
34
  "v_proj",
 
35
  "k_proj",
36
- "fc2",
37
- "o_proj",
38
  "gate_proj",
39
- "q_proj",
40
- "down_proj"
 
 
41
  ],
42
  "task_type": "CAUSAL_LM",
43
  "trainable_token_indices": null,
 
28
  "rank_pattern": {},
29
  "revision": null,
30
  "target_modules": [
 
 
31
  "out_proj",
32
+ "fc1",
33
  "v_proj",
34
+ "q_proj",
35
  "k_proj",
 
 
36
  "gate_proj",
37
+ "fc2",
38
+ "down_proj",
39
+ "up_proj",
40
+ "o_proj"
41
  ],
42
  "task_type": "CAUSAL_LM",
43
  "trainable_token_indices": null,
checkpoint-100/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3219ff424619f73e88d1a7ab31d4bf717362c88632b67e564523fccf14b6700b
3
  size 2839126480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97bb7902edd622058ec9b3e1ba6af00ddf0f38798eddf41d703f2da96b024e06
3
  size 2839126480
checkpoint-100/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b7e64d9f25a395188487c0a873facea913061e7017d2f74eb242b06ff7e40386
3
  size 5678690152
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17e363ca06bcb24c36371e04f9a577253824f17c23299a9c67ce2732e5fadef3
3
  size 5678690152
checkpoint-100/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dad12f6c63b70b4aa3e59cacb7050751da9e1ffb089ed0b00a685cb0f0f1c682
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:982d52cbfb794dabe573a4b81987c973c3c89987096944f0d45566faeae8f700
3
  size 14244
checkpoint-100/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ba48efa9dcbb02965f728f536956fdd49f8ec1aa0c93cb087aaa4c3ed9a30a88
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e12e31dd017e9524250842dde0204c7f228679a93737c9b081a9b448744f4bdb
3
  size 1064
checkpoint-100/trainer_state.json CHANGED
@@ -2,7 +2,7 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 0.4807692307692308,
6
  "eval_steps": 10,
7
  "global_step": 100,
8
  "is_hyper_param_search": false,
@@ -10,200 +10,200 @@
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
- "epoch": 0.04807692307692308,
14
- "grad_norm": 19.535831451416016,
15
- "learning_rate": 0.00019800995024875625,
16
- "loss": 15.9815,
17
- "mean_token_accuracy": 0.6259999990463256,
18
  "num_tokens": 49120.0,
19
  "step": 10
20
  },
21
  {
22
- "epoch": 0.04807692307692308,
23
- "eval_loss": 1.2629932165145874,
24
- "eval_mean_token_accuracy": 0.8946153659086961,
25
  "eval_num_tokens": 49120.0,
26
- "eval_runtime": 17.4778,
27
- "eval_samples_per_second": 2.861,
28
- "eval_steps_per_second": 0.744,
29
  "step": 10
30
  },
31
  {
32
- "epoch": 0.09615384615384616,
33
- "grad_norm": 7.8091559410095215,
34
- "learning_rate": 0.00018805970149253734,
35
- "loss": 2.5529,
36
- "mean_token_accuracy": 0.9347499996423722,
37
  "num_tokens": 98240.0,
38
  "step": 20
39
  },
40
  {
41
- "epoch": 0.09615384615384616,
42
- "eval_loss": 0.14167079329490662,
43
- "eval_mean_token_accuracy": 0.9773077139487634,
44
  "eval_num_tokens": 98240.0,
45
- "eval_runtime": 17.4221,
46
- "eval_samples_per_second": 2.87,
47
- "eval_steps_per_second": 0.746,
48
  "step": 20
49
  },
50
  {
51
- "epoch": 0.14423076923076922,
52
- "grad_norm": 3.069196939468384,
53
- "learning_rate": 0.00017810945273631843,
54
- "loss": 0.3788,
55
- "mean_token_accuracy": 0.9778750196099282,
56
  "num_tokens": 147360.0,
57
  "step": 30
58
  },
59
  {
60
- "epoch": 0.14423076923076922,
61
- "eval_loss": 0.03845745697617531,
62
- "eval_mean_token_accuracy": 0.9788461740200336,
63
  "eval_num_tokens": 147360.0,
64
- "eval_runtime": 17.5754,
65
- "eval_samples_per_second": 2.845,
66
- "eval_steps_per_second": 0.74,
67
  "step": 30
68
  },
69
  {
70
- "epoch": 0.19230769230769232,
71
- "grad_norm": 6.14934778213501,
72
- "learning_rate": 0.0001681592039800995,
73
- "loss": 0.1621,
74
- "mean_token_accuracy": 0.9760000124573708,
75
- "num_tokens": 196480.0,
76
  "step": 40
77
  },
78
  {
79
- "epoch": 0.19230769230769232,
80
- "eval_loss": 0.03434291481971741,
81
- "eval_mean_token_accuracy": 0.9788461740200336,
82
- "eval_num_tokens": 196480.0,
83
- "eval_runtime": 17.4843,
84
- "eval_samples_per_second": 2.86,
85
- "eval_steps_per_second": 0.744,
86
  "step": 40
87
  },
88
  {
89
- "epoch": 0.2403846153846154,
90
- "grad_norm": 7.388150691986084,
91
- "learning_rate": 0.00015820895522388059,
92
- "loss": 0.1563,
93
- "mean_token_accuracy": 0.9780000209808349,
94
- "num_tokens": 245600.0,
95
  "step": 50
96
  },
97
  {
98
- "epoch": 0.2403846153846154,
99
- "eval_loss": 0.05149823799729347,
100
- "eval_mean_token_accuracy": 0.9788461740200336,
101
- "eval_num_tokens": 245600.0,
102
- "eval_runtime": 17.5208,
103
- "eval_samples_per_second": 2.854,
104
- "eval_steps_per_second": 0.742,
105
  "step": 50
106
  },
107
  {
108
- "epoch": 0.28846153846153844,
109
- "grad_norm": 5.478137969970703,
110
- "learning_rate": 0.00014825870646766168,
111
- "loss": 0.177,
112
- "mean_token_accuracy": 0.9782500207424164,
113
- "num_tokens": 294720.0,
114
  "step": 60
115
  },
116
  {
117
- "epoch": 0.28846153846153844,
118
- "eval_loss": 0.03306853771209717,
119
- "eval_mean_token_accuracy": 0.9788461740200336,
120
- "eval_num_tokens": 294720.0,
121
- "eval_runtime": 17.4723,
122
  "eval_samples_per_second": 2.862,
123
  "eval_steps_per_second": 0.744,
124
  "step": 60
125
  },
126
  {
127
- "epoch": 0.33653846153846156,
128
- "grad_norm": 4.636706352233887,
129
- "learning_rate": 0.0001383084577114428,
130
- "loss": 0.1614,
131
- "mean_token_accuracy": 0.9776250213384629,
132
- "num_tokens": 343840.0,
133
  "step": 70
134
  },
135
  {
136
- "epoch": 0.33653846153846156,
137
- "eval_loss": 0.041035111993551254,
138
- "eval_mean_token_accuracy": 0.9788461740200336,
139
- "eval_num_tokens": 343840.0,
140
- "eval_runtime": 17.567,
141
- "eval_samples_per_second": 2.846,
142
- "eval_steps_per_second": 0.74,
143
  "step": 70
144
  },
145
  {
146
- "epoch": 0.38461538461538464,
147
- "grad_norm": 1.8013979196548462,
148
- "learning_rate": 0.00012835820895522389,
149
- "loss": 0.1449,
150
- "mean_token_accuracy": 0.9785000205039978,
151
- "num_tokens": 392960.0,
152
  "step": 80
153
  },
154
  {
155
- "epoch": 0.38461538461538464,
156
- "eval_loss": 0.033909473568201065,
157
- "eval_mean_token_accuracy": 0.9788461740200336,
158
- "eval_num_tokens": 392960.0,
159
- "eval_runtime": 17.4599,
160
- "eval_samples_per_second": 2.864,
161
- "eval_steps_per_second": 0.745,
162
  "step": 80
163
  },
164
  {
165
- "epoch": 0.4326923076923077,
166
- "grad_norm": 0.7925957441329956,
167
- "learning_rate": 0.00011840796019900498,
168
- "loss": 0.1343,
169
- "mean_token_accuracy": 0.979250019788742,
170
- "num_tokens": 442080.0,
171
  "step": 90
172
  },
173
  {
174
- "epoch": 0.4326923076923077,
175
- "eval_loss": 0.03350621089339256,
176
- "eval_mean_token_accuracy": 0.9788461740200336,
177
- "eval_num_tokens": 442080.0,
178
- "eval_runtime": 17.5312,
179
- "eval_samples_per_second": 2.852,
180
- "eval_steps_per_second": 0.742,
181
  "step": 90
182
  },
183
  {
184
- "epoch": 0.4807692307692308,
185
- "grad_norm": 3.3666160106658936,
186
- "learning_rate": 0.00010845771144278607,
187
- "loss": 0.1484,
188
- "mean_token_accuracy": 0.9778750211000442,
189
- "num_tokens": 491200.0,
190
  "step": 100
191
  },
192
  {
193
- "epoch": 0.4807692307692308,
194
- "eval_loss": 0.03422728180885315,
195
- "eval_mean_token_accuracy": 0.9788461740200336,
196
- "eval_num_tokens": 491200.0,
197
- "eval_runtime": 17.4416,
198
- "eval_samples_per_second": 2.867,
199
- "eval_steps_per_second": 0.745,
200
  "step": 100
201
  }
202
  ],
203
  "logging_steps": 10,
204
- "max_steps": 208,
205
  "num_input_tokens_seen": 0,
206
- "num_train_epochs": 1,
207
  "save_steps": 20,
208
  "stateful_callbacks": {
209
  "TrainerControl": {
@@ -217,7 +217,7 @@
217
  "attributes": {}
218
  }
219
  },
220
- "total_flos": 1.27727482781184e+16,
221
  "train_batch_size": 4,
222
  "trial_name": null,
223
  "trial_params": null
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 2.57516339869281,
6
  "eval_steps": 10,
7
  "global_step": 100,
8
  "is_hyper_param_search": false,
 
10
  "is_world_process_zero": true,
11
  "log_history": [
12
  {
13
+ "epoch": 0.26143790849673204,
14
+ "grad_norm": 80.0328369140625,
15
+ "learning_rate": 1.9682539682539684e-05,
16
+ "loss": 22.6724,
17
+ "mean_token_accuracy": 0.49049999862909316,
18
  "num_tokens": 49120.0,
19
  "step": 10
20
  },
21
  {
22
+ "epoch": 0.26143790849673204,
23
+ "eval_loss": 4.981190204620361,
24
+ "eval_mean_token_accuracy": 0.5223076802033645,
25
  "eval_num_tokens": 49120.0,
26
+ "eval_runtime": 17.5192,
27
+ "eval_samples_per_second": 2.854,
28
+ "eval_steps_per_second": 0.742,
29
  "step": 10
30
  },
31
  {
32
+ "epoch": 0.5228758169934641,
33
+ "grad_norm": 57.1182746887207,
34
+ "learning_rate": 1.8624338624338625e-05,
35
+ "loss": 17.5658,
36
+ "mean_token_accuracy": 0.5591249987483025,
37
  "num_tokens": 98240.0,
38
  "step": 20
39
  },
40
  {
41
+ "epoch": 0.5228758169934641,
42
+ "eval_loss": 3.730682134628296,
43
+ "eval_mean_token_accuracy": 0.5742307580434359,
44
  "eval_num_tokens": 98240.0,
45
+ "eval_runtime": 17.4511,
46
+ "eval_samples_per_second": 2.865,
47
+ "eval_steps_per_second": 0.745,
48
  "step": 20
49
  },
50
  {
51
+ "epoch": 0.7843137254901961,
52
+ "grad_norm": 58.24352264404297,
53
+ "learning_rate": 1.7566137566137566e-05,
54
+ "loss": 13.113,
55
+ "mean_token_accuracy": 0.6388749912381172,
56
  "num_tokens": 147360.0,
57
  "step": 30
58
  },
59
  {
60
+ "epoch": 0.7843137254901961,
61
+ "eval_loss": 2.7529046535491943,
62
+ "eval_mean_token_accuracy": 0.7115384431985708,
63
  "eval_num_tokens": 147360.0,
64
+ "eval_runtime": 17.6786,
65
+ "eval_samples_per_second": 2.828,
66
+ "eval_steps_per_second": 0.735,
67
  "step": 30
68
  },
69
  {
70
+ "epoch": 1.026143790849673,
71
+ "grad_norm": 30.73020362854004,
72
+ "learning_rate": 1.6507936507936507e-05,
73
+ "loss": 8.7369,
74
+ "mean_token_accuracy": 0.7555405417004147,
75
+ "num_tokens": 192796.0,
76
  "step": 40
77
  },
78
  {
79
+ "epoch": 1.026143790849673,
80
+ "eval_loss": 1.836286187171936,
81
+ "eval_mean_token_accuracy": 0.8476923199800345,
82
+ "eval_num_tokens": 192796.0,
83
+ "eval_runtime": 17.4538,
84
+ "eval_samples_per_second": 2.865,
85
+ "eval_steps_per_second": 0.745,
86
  "step": 40
87
  },
88
  {
89
+ "epoch": 1.287581699346405,
90
+ "grad_norm": 21.440105438232422,
91
+ "learning_rate": 1.544973544973545e-05,
92
+ "loss": 6.2989,
93
+ "mean_token_accuracy": 0.8789999932050705,
94
+ "num_tokens": 241916.0,
95
  "step": 50
96
  },
97
  {
98
+ "epoch": 1.287581699346405,
99
+ "eval_loss": 1.3245856761932373,
100
+ "eval_mean_token_accuracy": 0.8942307508908786,
101
+ "eval_num_tokens": 241916.0,
102
+ "eval_runtime": 17.6384,
103
+ "eval_samples_per_second": 2.835,
104
+ "eval_steps_per_second": 0.737,
105
  "step": 50
106
  },
107
  {
108
+ "epoch": 1.5490196078431373,
109
+ "grad_norm": 10.908184051513672,
110
+ "learning_rate": 1.4391534391534392e-05,
111
+ "loss": 4.5427,
112
+ "mean_token_accuracy": 0.9057500049471855,
113
+ "num_tokens": 291036.0,
114
  "step": 60
115
  },
116
  {
117
+ "epoch": 1.5490196078431373,
118
+ "eval_loss": 0.9362902045249939,
119
+ "eval_mean_token_accuracy": 0.9126923221808213,
120
+ "eval_num_tokens": 291036.0,
121
+ "eval_runtime": 17.472,
122
  "eval_samples_per_second": 2.862,
123
  "eval_steps_per_second": 0.744,
124
  "step": 60
125
  },
126
  {
127
+ "epoch": 1.8104575163398693,
128
+ "grad_norm": 11.274917602539062,
129
+ "learning_rate": 1.3333333333333333e-05,
130
+ "loss": 3.4538,
131
+ "mean_token_accuracy": 0.9126250177621842,
132
+ "num_tokens": 340156.0,
133
  "step": 70
134
  },
135
  {
136
+ "epoch": 1.8104575163398693,
137
+ "eval_loss": 0.7567419409751892,
138
+ "eval_mean_token_accuracy": 0.9103846183189979,
139
+ "eval_num_tokens": 340156.0,
140
+ "eval_runtime": 17.5544,
141
+ "eval_samples_per_second": 2.848,
142
+ "eval_steps_per_second": 0.741,
143
  "step": 70
144
  },
145
  {
146
+ "epoch": 2.052287581699346,
147
+ "grad_norm": 13.183298110961914,
148
+ "learning_rate": 1.2275132275132276e-05,
149
+ "loss": 2.5315,
150
+ "mean_token_accuracy": 0.9293243321212562,
151
+ "num_tokens": 385592.0,
152
  "step": 80
153
  },
154
  {
155
+ "epoch": 2.052287581699346,
156
+ "eval_loss": 0.6147329807281494,
157
+ "eval_mean_token_accuracy": 0.9292307771169223,
158
+ "eval_num_tokens": 385592.0,
159
+ "eval_runtime": 17.582,
160
+ "eval_samples_per_second": 2.844,
161
+ "eval_steps_per_second": 0.739,
162
  "step": 80
163
  },
164
  {
165
+ "epoch": 2.313725490196078,
166
+ "grad_norm": 12.541313171386719,
167
+ "learning_rate": 1.1216931216931217e-05,
168
+ "loss": 2.2552,
169
+ "mean_token_accuracy": 0.9353750020265579,
170
+ "num_tokens": 434712.0,
171
  "step": 90
172
  },
173
  {
174
+ "epoch": 2.313725490196078,
175
+ "eval_loss": 0.5065863132476807,
176
+ "eval_mean_token_accuracy": 0.9465384529187129,
177
+ "eval_num_tokens": 434712.0,
178
+ "eval_runtime": 17.6818,
179
+ "eval_samples_per_second": 2.828,
180
+ "eval_steps_per_second": 0.735,
181
  "step": 90
182
  },
183
  {
184
+ "epoch": 2.57516339869281,
185
+ "grad_norm": 7.007925033569336,
186
+ "learning_rate": 1.015873015873016e-05,
187
+ "loss": 1.9547,
188
+ "mean_token_accuracy": 0.9474999904632568,
189
+ "num_tokens": 483832.0,
190
  "step": 100
191
  },
192
  {
193
+ "epoch": 2.57516339869281,
194
+ "eval_loss": 0.4519544243812561,
195
+ "eval_mean_token_accuracy": 0.9511538331325238,
196
+ "eval_num_tokens": 483832.0,
197
+ "eval_runtime": 17.5399,
198
+ "eval_samples_per_second": 2.851,
199
+ "eval_steps_per_second": 0.741,
200
  "step": 100
201
  }
202
  ],
203
  "logging_steps": 10,
204
+ "max_steps": 195,
205
  "num_input_tokens_seen": 0,
206
+ "num_train_epochs": 5,
207
  "save_steps": 20,
208
  "stateful_callbacks": {
209
  "TrainerControl": {
 
217
  "attributes": {}
218
  }
219
  },
220
+ "total_flos": 1.2581157053946624e+16,
221
  "train_batch_size": 4,
222
  "trial_name": null,
223
  "trial_params": null
checkpoint-100/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:09a9baff00283695ce70ca75d7d996c90cc5734afa7d9ed3f9a6113fde47b709
3
  size 5880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fa3299c8cd6826c610bec43e932e8377d63e21789add24e4c875f4d75cb544d
3
  size 5880