dada22231 commited on
Commit
41cdc53
·
verified ·
1 Parent(s): daef9cd

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9eac25e841b1adefa0784fa52f328457770f09ae89703711294fc8cc8ded10cb
3
  size 116744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ad4669cee803e39fefc321c2b13b2e158dda619d12a3cf0c215a2893b5468fb
3
  size 116744
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e33a783b3817895987b8b7e17d6344f11b9f6da10d5450f1572b55ec99c540c6
3
  size 243310
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:101960d0fc9429e9fe679f24d3826a58652f79ce356f4e615b88ae8f824a03b6
3
  size 243310
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:39ad929d1d20baf108120da230bb7f1383b93a7086841edb00aadeef5185c1fc
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fde8b011a03169cb958bb9b13be5a66b6cab7da6427e2460f1ce3e034fab235
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e092e0c44b90287f1d7caba42c3f54a42795ffa07bdc83a6a71e5a7c17db7ebc
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:487718e4723ddde9ca5035f2661e20f36fc4a426fd5a253742dd1d7e9aabcbaf
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e64ddd8239b569af74746105a8f12051f7e81929c43e06399e6603c24fb61eea
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:090bf5540f16129790856c97e9efec2f3d641e8d9e030c5f7d482d53780a0027
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c3711f8de953109ca1ecc5c9f46b9aed405a459880564fc80ddaf7dd35f5ece
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67e6653db4c5b1990b33171ce6b9420c20ce0e04e4ccf47c1bae5ba4295fd007
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d271cdb95f63cd655315f063ca2e25c78dc5ae4275523c5d4f80f367586b3351
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5607f6de446164d9d9adb8b91c44cec55b14aa391e24ba5637c08b834eedda2a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.02664890073284477,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,189 @@
198
  "eval_samples_per_second": 431.357,
199
  "eval_steps_per_second": 112.153,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -212,12 +395,12 @@
212
  "should_evaluate": false,
213
  "should_log": false,
214
  "should_save": true,
215
- "should_training_stop": false
216
  },
217
  "attributes": {}
218
  }
219
  },
220
- "total_flos": 42337731870720.0,
221
  "train_batch_size": 4,
222
  "trial_name": null,
223
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.05329780146568954,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 431.357,
199
  "eval_steps_per_second": 112.153,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.02771485676215856,
204
+ "grad_norm": 0.29466956853866577,
205
+ "learning_rate": 5.500000000000001e-05,
206
+ "loss": 10.3486,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.028780812791472352,
211
+ "grad_norm": 0.31241559982299805,
212
+ "learning_rate": 5.205685918464356e-05,
213
+ "loss": 10.347,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.029846768820786143,
218
+ "grad_norm": 0.2826091945171356,
219
+ "learning_rate": 4.912632135009769e-05,
220
+ "loss": 10.3492,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.030912724850099934,
225
+ "grad_norm": 0.3171038329601288,
226
+ "learning_rate": 4.6220935509274235e-05,
227
+ "loss": 10.3494,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.031978680879413725,
232
+ "grad_norm": 0.304294615983963,
233
+ "learning_rate": 4.3353142970386564e-05,
234
+ "loss": 10.3456,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.03304463690872751,
239
+ "grad_norm": 0.3247598111629486,
240
+ "learning_rate": 4.053522406135775e-05,
241
+ "loss": 10.3515,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.034110592938041306,
246
+ "grad_norm": 0.3589993119239807,
247
+ "learning_rate": 3.777924554357096e-05,
248
+ "loss": 10.346,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.035176548967355094,
253
+ "grad_norm": 0.36132293939590454,
254
+ "learning_rate": 3.509700894014496e-05,
255
+ "loss": 10.3411,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.03624250499666889,
260
+ "grad_norm": 0.3835195302963257,
261
+ "learning_rate": 3.250000000000001e-05,
262
+ "loss": 10.3379,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.037308461025982675,
267
+ "grad_norm": 0.3723469376564026,
268
+ "learning_rate": 2.9999339514117912e-05,
269
+ "loss": 10.3345,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.03837441705529647,
274
+ "grad_norm": 0.3813835680484772,
275
+ "learning_rate": 2.760573569460757e-05,
276
+ "loss": 10.3435,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.03944037308461026,
281
+ "grad_norm": 0.40655070543289185,
282
+ "learning_rate": 2.53294383204969e-05,
283
+ "loss": 10.3404,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.04050632911392405,
288
+ "grad_norm": 0.3889438211917877,
289
+ "learning_rate": 2.3180194846605367e-05,
290
+ "loss": 10.3424,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.04157228514323784,
295
+ "grad_norm": 0.3690129518508911,
296
+ "learning_rate": 2.1167208663446025e-05,
297
+ "loss": 10.339,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.04263824117255163,
302
+ "grad_norm": 0.36811506748199463,
303
+ "learning_rate": 1.9299099686894423e-05,
304
+ "loss": 10.337,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.04370419720186542,
309
+ "grad_norm": 0.3503737449645996,
310
+ "learning_rate": 1.758386744638546e-05,
311
+ "loss": 10.3365,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 0.044770153231179215,
316
+ "grad_norm": 0.3476838171482086,
317
+ "learning_rate": 1.602885682970026e-05,
318
+ "loss": 10.3357,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 0.045836109260493,
323
+ "grad_norm": 0.40502050518989563,
324
+ "learning_rate": 1.464072663102903e-05,
325
+ "loss": 10.326,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 0.046902065289806796,
330
+ "grad_norm": 0.37952473759651184,
331
+ "learning_rate": 1.3425421036992098e-05,
332
+ "loss": 10.3307,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 0.047968021319120584,
337
+ "grad_norm": 0.3985709846019745,
338
+ "learning_rate": 1.2388144172720251e-05,
339
+ "loss": 10.3376,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.04903397734843438,
344
+ "grad_norm": 0.3946983516216278,
345
+ "learning_rate": 1.1533337816991932e-05,
346
+ "loss": 10.3341,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.050099933377748165,
351
+ "grad_norm": 0.37759777903556824,
352
+ "learning_rate": 1.0864662381854632e-05,
353
+ "loss": 10.3338,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.05116588940706196,
358
+ "grad_norm": 0.40839263796806335,
359
+ "learning_rate": 1.0384981238178534e-05,
360
+ "loss": 10.3287,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.05223184543637575,
365
+ "grad_norm": 0.42501384019851685,
366
+ "learning_rate": 1.0096348454262845e-05,
367
+ "loss": 10.3254,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.05329780146568954,
372
+ "grad_norm": 0.4669570028781891,
373
+ "learning_rate": 1e-05,
374
+ "loss": 10.3262,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.05329780146568954,
379
+ "eval_loss": 10.321706771850586,
380
+ "eval_runtime": 0.1212,
381
+ "eval_samples_per_second": 412.623,
382
+ "eval_steps_per_second": 107.282,
383
+ "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
 
395
  "should_evaluate": false,
396
  "should_log": false,
397
  "should_save": true,
398
+ "should_training_stop": true
399
  },
400
  "attributes": {}
401
  }
402
  },
403
+ "total_flos": 84675463741440.0,
404
  "train_batch_size": 4,
405
  "trial_name": null,
406
  "trial_params": null