ben81828 commited on
Commit
945950b
·
verified ·
1 Parent(s): 8b7a6a5

Training in progress, step 1150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd192dd85ff6446dfa29041091b7f433f1d6a850202807fb731b90087b71d2ec
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19df8cfb4fb263762edd35195481b243b0e76f56e98ae181beceb2398a64b8c5
3
  size 29034840
last-checkpoint/global_step1150/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f621584d960c54af981bacd4e132f762eb9e48a25ebad8102f674e758ae624b
3
+ size 43429616
last-checkpoint/global_step1150/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff47d6deb1356289fee8e2bc07a0056f08bc0bb2c57c6b654a85f3462cd42d3a
3
+ size 43429616
last-checkpoint/global_step1150/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81e092ce23bea36d9f84e754f41af61de5207bbbe8eb34c3f2ae4274e56cec07
3
+ size 43429616
last-checkpoint/global_step1150/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de1f8ac910500d5dec96234c0ad8b9087bc475a40ed3d32d641ac3f4625a5b4f
3
+ size 43429616
last-checkpoint/global_step1150/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1d8f57ebe199b6160c4283125fd253980fc44b2d216970f98e39b9f2019c2bb
3
+ size 637299
last-checkpoint/global_step1150/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a59940217878d056c0ec4cb1d8fbeda7666879dfdefc0e73fb244d7b2d08ee1
3
+ size 637171
last-checkpoint/global_step1150/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:332e38fe883e969b04d02c7f8ee460470b9316344ea2066a08f8964ab917200d
3
+ size 637171
last-checkpoint/global_step1150/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68dd6f1cba0b7e8d93d7b459f72081e50445095b21075bede743ce3b2d69d184
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1100
 
1
+ global_step1150
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d9fea52fb92cc51e76feeb2b139ce35723c0cb651da383e4f7eec2606ed6c2a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:274dc3860ee0c7f4d5348f60910a4b568498c04adfefb89f905b1c78a82c1312
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b5820ebfcc2e1cfe1ad2619a05ea9a484ff21635e13e386bf14abd302f2c0f7
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9aa441491b9ca89e796944520fa1db332a67c0a1a920be83edd2d96d741716d
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb8d96a68e732fca41980516622a50990bbd3ee989e72076a35c8608d9b4d136
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1ee3434533b24fb771504fa8cceb5c2ea25fe0de1641128feaceccc65afe6ed
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ce6f1db6d09f4d89a9b2bd8dc8eeb99f1fada2ec04376e23b5a7a13004994005
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93b4a44be1335173d2e3120bd0d1e6346f3e832d8935752c70ce1e98f017fa87
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d0c7456eafeee3179566bb381c9153771d7e0f21738d2398944d053915d0651
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d461c8d7517d4b88333bff7984fc3bfc149292198b04bbc18a49aee698ffb5c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.6174182295799255,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_detect_classify_augmented/lora/sft/checkpoint-1100",
4
- "epoch": 0.13475850663073108,
5
  "eval_steps": 50,
6
- "global_step": 1100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1965,11 +1965,100 @@
1965
  "eval_steps_per_second": 0.779,
1966
  "num_input_tokens_seen": 7552920,
1967
  "step": 1100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1968
  }
1969
  ],
1970
  "logging_steps": 5,
1971
  "max_steps": 3400,
1972
- "num_input_tokens_seen": 7552920,
1973
  "num_train_epochs": 1,
1974
  "save_steps": 50,
1975
  "stateful_callbacks": {
@@ -1984,7 +2073,7 @@
1984
  "attributes": {}
1985
  }
1986
  },
1987
- "total_flos": 475129840533504.0,
1988
  "train_batch_size": 1,
1989
  "trial_name": null,
1990
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.6082175970077515,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_detect_classify_augmented/lora/sft/checkpoint-1150",
4
+ "epoch": 0.14088389329576428,
5
  "eval_steps": 50,
6
+ "global_step": 1150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1965
  "eval_steps_per_second": 0.779,
1966
  "num_input_tokens_seen": 7552920,
1967
  "step": 1100
1968
+ },
1969
+ {
1970
+ "epoch": 0.1353710452972344,
1971
+ "grad_norm": 1.6568269633234864,
1972
+ "learning_rate": 8.07106356344834e-05,
1973
+ "loss": 0.5067,
1974
+ "num_input_tokens_seen": 7586336,
1975
+ "step": 1105
1976
+ },
1977
+ {
1978
+ "epoch": 0.13598358396373772,
1979
+ "grad_norm": 9.881127130998916,
1980
+ "learning_rate": 8.051838793910038e-05,
1981
+ "loss": 0.5564,
1982
+ "num_input_tokens_seen": 7620456,
1983
+ "step": 1110
1984
+ },
1985
+ {
1986
+ "epoch": 0.13659612263024104,
1987
+ "grad_norm": 5.434757636785574,
1988
+ "learning_rate": 8.032541847934146e-05,
1989
+ "loss": 0.5854,
1990
+ "num_input_tokens_seen": 7654064,
1991
+ "step": 1115
1992
+ },
1993
+ {
1994
+ "epoch": 0.13720866129674436,
1995
+ "grad_norm": 8.431770854556952,
1996
+ "learning_rate": 8.013173181896283e-05,
1997
+ "loss": 0.5729,
1998
+ "num_input_tokens_seen": 7687768,
1999
+ "step": 1120
2000
+ },
2001
+ {
2002
+ "epoch": 0.13782119996324768,
2003
+ "grad_norm": 2.7261911659276343,
2004
+ "learning_rate": 7.993733253868256e-05,
2005
+ "loss": 0.6122,
2006
+ "num_input_tokens_seen": 7721544,
2007
+ "step": 1125
2008
+ },
2009
+ {
2010
+ "epoch": 0.138433738629751,
2011
+ "grad_norm": 2.456505697190831,
2012
+ "learning_rate": 7.974222523607236e-05,
2013
+ "loss": 0.5618,
2014
+ "num_input_tokens_seen": 7755464,
2015
+ "step": 1130
2016
+ },
2017
+ {
2018
+ "epoch": 0.13904627729625432,
2019
+ "grad_norm": 3.8454579857105626,
2020
+ "learning_rate": 7.954641452544865e-05,
2021
+ "loss": 0.5487,
2022
+ "num_input_tokens_seen": 7789152,
2023
+ "step": 1135
2024
+ },
2025
+ {
2026
+ "epoch": 0.13965881596275764,
2027
+ "grad_norm": 5.774785303618935,
2028
+ "learning_rate": 7.934990503776363e-05,
2029
+ "loss": 0.5132,
2030
+ "num_input_tokens_seen": 7822128,
2031
+ "step": 1140
2032
+ },
2033
+ {
2034
+ "epoch": 0.14027135462926096,
2035
+ "grad_norm": 6.086190598879861,
2036
+ "learning_rate": 7.915270142049566e-05,
2037
+ "loss": 0.5576,
2038
+ "num_input_tokens_seen": 7855584,
2039
+ "step": 1145
2040
+ },
2041
+ {
2042
+ "epoch": 0.14088389329576428,
2043
+ "grad_norm": 5.537023500299833,
2044
+ "learning_rate": 7.89548083375394e-05,
2045
+ "loss": 0.5403,
2046
+ "num_input_tokens_seen": 7889016,
2047
+ "step": 1150
2048
+ },
2049
+ {
2050
+ "epoch": 0.14088389329576428,
2051
+ "eval_loss": 0.6082175970077515,
2052
+ "eval_runtime": 19.2261,
2053
+ "eval_samples_per_second": 3.121,
2054
+ "eval_steps_per_second": 0.78,
2055
+ "num_input_tokens_seen": 7889016,
2056
+ "step": 1150
2057
  }
2058
  ],
2059
  "logging_steps": 5,
2060
  "max_steps": 3400,
2061
+ "num_input_tokens_seen": 7889016,
2062
  "num_train_epochs": 1,
2063
  "save_steps": 50,
2064
  "stateful_callbacks": {
 
2073
  "attributes": {}
2074
  }
2075
  },
2076
+ "total_flos": 496216376475648.0,
2077
  "train_batch_size": 1,
2078
  "trial_name": null,
2079
  "trial_params": null