ben81828 commited on
Commit
76fe9d9
·
verified ·
1 Parent(s): bb9ed76

Training in progress, step 1050, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d0f9be3d791b991c20113ef9cf18bf0eacc627445e0c958d7fcaece9aee60abe
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f3f322dae10822dacfe9f1a7fd463469b2b4a03323e7477e29c4899f3708c98
3
  size 29034840
last-checkpoint/global_step1050/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90631010317fefc0c7bfdb0f7afd1e38c8a3f1da419816beab97c35a7b79a835
3
+ size 43429616
last-checkpoint/global_step1050/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e667c7ce19405cb3f92faa1c8162bc4a32fce97882073bc14621edc4af491660
3
+ size 43429616
last-checkpoint/global_step1050/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6e01b65898af4e8b22850f80f1c2967ab2515c0f66e2550d5bcb6203c80c4fb
3
+ size 43429616
last-checkpoint/global_step1050/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5abca8615bc64b0233cff2de769a7d4cf1feb8ede8f04896a4722432ad29697
3
+ size 43429616
last-checkpoint/global_step1050/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ff6a54e90aab6f7059817b770520f0195620be14b63a75ea36a61dc30d27ec1
3
+ size 637299
last-checkpoint/global_step1050/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46d273890af11c55e2aecb16f37ad61d8ea0bffb9771cba91e1c6470d82bc837
3
+ size 637171
last-checkpoint/global_step1050/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c26b9223139f0debf08e64ef6beb7f9515da093feb6b7b5f60de73b6e5f8552
3
+ size 637171
last-checkpoint/global_step1050/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fe60546187896903b2e2c3b77b64ebe63e05c42f09fdcc9fbcd5bd3e5474df7
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1000
 
1
+ global_step1050
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d73dfcc09cf3d6f08149535e03920234febc15f7e9a166987f3bc01ee871abf
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67db742b8aa1744a8224bf2a1f79d89caff63b15f78a455d92bb666df82183ea
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4396a64b6da4868d060d1e3c7c9ccb12c39d63bd0f7b146d2512400aff4c769c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0f9acb7e6f8bbfb305c3601c71eb6189af24942fab5f99046412c03bb10c3eb
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:95877efc8fb5eb302819ee7effca4222569cdcfdebb9fa5d9846e68ed9e833fe
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:713783338342f7486f6f186abd03c5963a0d22368f403efb2bf903ed083d2b64
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9fa4f23377f00fdde731da68a8690098617a1fdd912e03cdaa8bde87c493179
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83aaec0b5b7d8a2da4577075066cf434ce6e9feb9327edbea6677a2e51d76466
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bce7739c5bb5cf50e8f1c942e662e33e6aa589036d55e6fddd63bdf3171c1cae
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccde9da0d23fd32800f01283cdb6c677def9ab43edb7232a9c4c4e9101a14cc0
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.6621683239936829,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_detect_classify_augmented/lora/sft/checkpoint-900",
4
- "epoch": 0.1225077333006646,
5
  "eval_steps": 50,
6
- "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1787,11 +1787,100 @@
1787
  "eval_steps_per_second": 0.774,
1788
  "num_input_tokens_seen": 6880016,
1789
  "step": 1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1790
  }
1791
  ],
1792
  "logging_steps": 5,
1793
  "max_steps": 3400,
1794
- "num_input_tokens_seen": 6880016,
1795
  "num_train_epochs": 1,
1796
  "save_steps": 50,
1797
  "stateful_callbacks": {
@@ -1806,7 +1895,7 @@
1806
  "attributes": {}
1807
  }
1808
  },
1809
- "total_flos": 432927769755648.0,
1810
  "train_batch_size": 1,
1811
  "trial_name": null,
1812
  "trial_params": null
 
1
  {
2
  "best_metric": 0.6621683239936829,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_detect_classify_augmented/lora/sft/checkpoint-900",
4
+ "epoch": 0.12863311996569785,
5
  "eval_steps": 50,
6
+ "global_step": 1050,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1787
  "eval_steps_per_second": 0.774,
1788
  "num_input_tokens_seen": 6880016,
1789
  "step": 1000
1790
+ },
1791
+ {
1792
+ "epoch": 0.12312027196716793,
1793
+ "grad_norm": 1.1897377835811873,
1794
+ "learning_rate": 8.439714556870704e-05,
1795
+ "loss": 0.5095,
1796
+ "num_input_tokens_seen": 6913088,
1797
+ "step": 1005
1798
+ },
1799
+ {
1800
+ "epoch": 0.12373281063367125,
1801
+ "grad_norm": 9.789832304928483,
1802
+ "learning_rate": 8.422026448640124e-05,
1803
+ "loss": 0.5987,
1804
+ "num_input_tokens_seen": 6946072,
1805
+ "step": 1010
1806
+ },
1807
+ {
1808
+ "epoch": 0.12434534930017457,
1809
+ "grad_norm": 12.969885182035801,
1810
+ "learning_rate": 8.40425740897932e-05,
1811
+ "loss": 0.5267,
1812
+ "num_input_tokens_seen": 6979400,
1813
+ "step": 1015
1814
+ },
1815
+ {
1816
+ "epoch": 0.1249578879666779,
1817
+ "grad_norm": 10.459849610808234,
1818
+ "learning_rate": 8.386407858128706e-05,
1819
+ "loss": 0.5209,
1820
+ "num_input_tokens_seen": 7013384,
1821
+ "step": 1020
1822
+ },
1823
+ {
1824
+ "epoch": 0.12557042663318121,
1825
+ "grad_norm": 12.787610852611158,
1826
+ "learning_rate": 8.368478218232787e-05,
1827
+ "loss": 0.5702,
1828
+ "num_input_tokens_seen": 7047256,
1829
+ "step": 1025
1830
+ },
1831
+ {
1832
+ "epoch": 0.12618296529968454,
1833
+ "grad_norm": 13.113416468962454,
1834
+ "learning_rate": 8.350468913330192e-05,
1835
+ "loss": 0.5883,
1836
+ "num_input_tokens_seen": 7080464,
1837
+ "step": 1030
1838
+ },
1839
+ {
1840
+ "epoch": 0.12679550396618786,
1841
+ "grad_norm": 3.4834910221085544,
1842
+ "learning_rate": 8.33238036934364e-05,
1843
+ "loss": 0.6062,
1844
+ "num_input_tokens_seen": 7114864,
1845
+ "step": 1035
1846
+ },
1847
+ {
1848
+ "epoch": 0.12740804263269118,
1849
+ "grad_norm": 4.056295700994003,
1850
+ "learning_rate": 8.31421301406986e-05,
1851
+ "loss": 0.6236,
1852
+ "num_input_tokens_seen": 7148968,
1853
+ "step": 1040
1854
+ },
1855
+ {
1856
+ "epoch": 0.12802058129919452,
1857
+ "grad_norm": 1.488958951906305,
1858
+ "learning_rate": 8.29596727716949e-05,
1859
+ "loss": 0.6023,
1860
+ "num_input_tokens_seen": 7182000,
1861
+ "step": 1045
1862
+ },
1863
+ {
1864
+ "epoch": 0.12863311996569785,
1865
+ "grad_norm": 2.39679495526721,
1866
+ "learning_rate": 8.277643590156894e-05,
1867
+ "loss": 0.5312,
1868
+ "num_input_tokens_seen": 7215776,
1869
+ "step": 1050
1870
+ },
1871
+ {
1872
+ "epoch": 0.12863311996569785,
1873
+ "eval_loss": 0.6906282901763916,
1874
+ "eval_runtime": 19.3177,
1875
+ "eval_samples_per_second": 3.106,
1876
+ "eval_steps_per_second": 0.776,
1877
+ "num_input_tokens_seen": 7215776,
1878
+ "step": 1050
1879
  }
1880
  ],
1881
  "logging_steps": 5,
1882
  "max_steps": 3400,
1883
+ "num_input_tokens_seen": 7215776,
1884
  "num_train_epochs": 1,
1885
  "save_steps": 50,
1886
  "stateful_callbacks": {
 
1895
  "attributes": {}
1896
  }
1897
  },
1898
+ "total_flos": 453990561742848.0,
1899
  "train_batch_size": 1,
1900
  "trial_name": null,
1901
  "trial_params": null