dzanbek commited on
Commit
c95b8c5
·
verified ·
1 Parent(s): b09b58d

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9f184bd8eab2718597e454ee47284182973f744ff14e0d1c875f166a10937d3
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71d34c9d1f070921cf20112f805cbd861af21c5b989b1ba8e1ca784639634e52
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a1daf0dece60de4e596c08e57e10afffc71a2a9152a45236429f0e3be749449a
3
  size 168149074
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48700a2739ccc212045d9727ddba31823fb8b9fd96436bfb9ddbbb61e2f8651d
3
  size 168149074
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:457548b77ed5d7703d69eacdc9aca9fe710c41943f0b7a77991d95d5848083b0
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fd8f0a7245c19e9d58956653f1a0c1b27d3beb55f73133fe59b9f074e10fba6
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb578e75c11a81e85dda67a691f96ba4793a02960f1409fd3e1511aac873491a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e2ed9259304616a8ecebc61c5d000777b2978635f7a705b8d7081c480ce0bde
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.02437538086532602,
5
  "eval_steps": 2,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -125,6 +125,116 @@
125
  "eval_samples_per_second": 3.955,
126
  "eval_steps_per_second": 1.989,
127
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  }
129
  ],
130
  "logging_steps": 1,
@@ -139,12 +249,12 @@
139
  "should_evaluate": false,
140
  "should_log": false,
141
  "should_save": true,
142
- "should_training_stop": false
143
  },
144
  "attributes": {}
145
  }
146
  },
147
- "total_flos": 7398256992583680.0,
148
  "train_batch_size": 2,
149
  "trial_name": null,
150
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.04875076173065204,
5
  "eval_steps": 2,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
125
  "eval_samples_per_second": 3.955,
126
  "eval_steps_per_second": 1.989,
127
  "step": 10
128
+ },
129
+ {
130
+ "epoch": 0.02681291895185862,
131
+ "grad_norm": 1.2626514434814453,
132
+ "learning_rate": 0.00019510565162951537,
133
+ "loss": 1.1281,
134
+ "step": 11
135
+ },
136
+ {
137
+ "epoch": 0.029250457038391225,
138
+ "grad_norm": 1.1027222871780396,
139
+ "learning_rate": 0.00018090169943749476,
140
+ "loss": 0.8334,
141
+ "step": 12
142
+ },
143
+ {
144
+ "epoch": 0.029250457038391225,
145
+ "eval_loss": 1.4252070188522339,
146
+ "eval_runtime": 43.6162,
147
+ "eval_samples_per_second": 3.966,
148
+ "eval_steps_per_second": 1.995,
149
+ "step": 12
150
+ },
151
+ {
152
+ "epoch": 0.031687995124923825,
153
+ "grad_norm": 1.4160807132720947,
154
+ "learning_rate": 0.00015877852522924732,
155
+ "loss": 1.2873,
156
+ "step": 13
157
+ },
158
+ {
159
+ "epoch": 0.03412553321145643,
160
+ "grad_norm": 1.6689919233322144,
161
+ "learning_rate": 0.00013090169943749476,
162
+ "loss": 1.0563,
163
+ "step": 14
164
+ },
165
+ {
166
+ "epoch": 0.03412553321145643,
167
+ "eval_loss": 1.3921422958374023,
168
+ "eval_runtime": 43.7862,
169
+ "eval_samples_per_second": 3.951,
170
+ "eval_steps_per_second": 1.987,
171
+ "step": 14
172
+ },
173
+ {
174
+ "epoch": 0.03656307129798903,
175
+ "grad_norm": 1.5969661474227905,
176
+ "learning_rate": 0.0001,
177
+ "loss": 1.117,
178
+ "step": 15
179
+ },
180
+ {
181
+ "epoch": 0.039000609384521635,
182
+ "grad_norm": 2.8356218338012695,
183
+ "learning_rate": 6.909830056250527e-05,
184
+ "loss": 1.3988,
185
+ "step": 16
186
+ },
187
+ {
188
+ "epoch": 0.039000609384521635,
189
+ "eval_loss": 1.360350251197815,
190
+ "eval_runtime": 43.6174,
191
+ "eval_samples_per_second": 3.966,
192
+ "eval_steps_per_second": 1.995,
193
+ "step": 16
194
+ },
195
+ {
196
+ "epoch": 0.04143814747105423,
197
+ "grad_norm": 1.8324129581451416,
198
+ "learning_rate": 4.12214747707527e-05,
199
+ "loss": 1.4755,
200
+ "step": 17
201
+ },
202
+ {
203
+ "epoch": 0.043875685557586835,
204
+ "grad_norm": 1.618000864982605,
205
+ "learning_rate": 1.9098300562505266e-05,
206
+ "loss": 0.9456,
207
+ "step": 18
208
+ },
209
+ {
210
+ "epoch": 0.043875685557586835,
211
+ "eval_loss": 1.3482810258865356,
212
+ "eval_runtime": 43.6176,
213
+ "eval_samples_per_second": 3.966,
214
+ "eval_steps_per_second": 1.995,
215
+ "step": 18
216
+ },
217
+ {
218
+ "epoch": 0.04631322364411944,
219
+ "grad_norm": 1.9570136070251465,
220
+ "learning_rate": 4.8943483704846475e-06,
221
+ "loss": 1.34,
222
+ "step": 19
223
+ },
224
+ {
225
+ "epoch": 0.04875076173065204,
226
+ "grad_norm": 1.8460969924926758,
227
+ "learning_rate": 0.0,
228
+ "loss": 1.46,
229
+ "step": 20
230
+ },
231
+ {
232
+ "epoch": 0.04875076173065204,
233
+ "eval_loss": 1.3457626104354858,
234
+ "eval_runtime": 43.686,
235
+ "eval_samples_per_second": 3.96,
236
+ "eval_steps_per_second": 1.991,
237
+ "step": 20
238
  }
239
  ],
240
  "logging_steps": 1,
 
249
  "should_evaluate": false,
250
  "should_log": false,
251
  "should_save": true,
252
+ "should_training_stop": true
253
  },
254
  "attributes": {}
255
  }
256
  },
257
+ "total_flos": 1.479651398516736e+16,
258
  "train_batch_size": 2,
259
  "trial_name": null,
260
  "trial_params": null