iamnguyen commited on
Commit
42b80c2
·
verified ·
1 Parent(s): 6fe5de5

Training in progress, step 256, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2499cb84d044f14380d1436e809204017b642ef077cd6d85e04a0612d04f139c
3
  size 147770496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89202c3a83c656009c49405e34a199746a7402ebd46b8daddcaa8ca18ca6d51e
3
  size 147770496
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8127b2d19d877bc8b6390abef2ec730fbe95a18ba0f539c7b6cea04272a25b9a
3
- size 75455362
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41d7a8511652baf58cf06d37d605965dd1bc2aee8d912767371f5f07328245b8
3
+ size 75455810
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dcd63113a0a7922f08bc7bf5a3f424120a413b69e0c07f67a117e000ca3f41b8
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:377c19831806be3b2af38951dd08dcc47e0fc843a7df6b6c5b4d819485499ed8
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cae400ce236e72d865001da29ea9701cbf6e3c969ad1bd3d2f923aecee97db42
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3d84d4585b602d0abc8b4dfccabde21254d9ba4ef932cab6e61e0e9e3b40642
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.09764352062770834,
5
  "eval_steps": 500,
6
- "global_step": 252,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1771,6 +1771,34 @@
1771
  "learning_rate": 9.846358414859598e-06,
1772
  "loss": 1.5649,
1773
  "step": 252
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1774
  }
1775
  ],
1776
  "logging_steps": 1.0,
@@ -1790,7 +1818,7 @@
1790
  "attributes": {}
1791
  }
1792
  },
1793
- "total_flos": 2.4955793132720947e+17,
1794
  "train_batch_size": 1,
1795
  "trial_name": null,
1796
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.09919341778052912,
5
  "eval_steps": 500,
6
+ "global_step": 256,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1771
  "learning_rate": 9.846358414859598e-06,
1772
  "loss": 1.5649,
1773
  "step": 252
1774
+ },
1775
+ {
1776
+ "epoch": 0.09803099491591354,
1777
+ "grad_norm": 0.08257234841585159,
1778
+ "learning_rate": 9.844826174531863e-06,
1779
+ "loss": 1.5609,
1780
+ "step": 253
1781
+ },
1782
+ {
1783
+ "epoch": 0.09841846920411873,
1784
+ "grad_norm": 0.16954340040683746,
1785
+ "learning_rate": 9.843286452099964e-06,
1786
+ "loss": 1.5876,
1787
+ "step": 254
1788
+ },
1789
+ {
1790
+ "epoch": 0.09880594349232392,
1791
+ "grad_norm": 0.08577932417392731,
1792
+ "learning_rate": 9.841739249941772e-06,
1793
+ "loss": 1.5812,
1794
+ "step": 255
1795
+ },
1796
+ {
1797
+ "epoch": 0.09919341778052912,
1798
+ "grad_norm": 0.13520435988903046,
1799
+ "learning_rate": 9.840184570446702e-06,
1800
+ "loss": 1.5758,
1801
+ "step": 256
1802
  }
1803
  ],
1804
  "logging_steps": 1.0,
 
1818
  "attributes": {}
1819
  }
1820
  },
1821
+ "total_flos": 2.5351262457715814e+17,
1822
  "train_batch_size": 1,
1823
  "trial_name": null,
1824
  "trial_params": null