fats-fme commited on
Commit
44922c7
·
verified ·
1 Parent(s): 185bda7

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f0e8b497e0ee74e1d9078f22cb6bab77028f05afb8cc88563b3ae254cd0fdcf
3
  size 1279323952
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2fe03e64cb15768d1cd8948c3b5771f52ceabd68f4929b5086f05322316651a
3
  size 1279323952
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8517fdd165c345c8a0e0ac0123ea52f40affe701d0290dc63c174f76877a5a88
3
  size 2558910034
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4f1e1185d2450118d34023796b27a5efe35be5a94b75ef2e5161a3076274146
3
  size 2558910034
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76dcb566d5a806738709d623034241d7e4801f3e05a1b69ce0e7c24cc89d0804
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfe0a0399ff6a3ad1379c36f2a12002da56143a1e8b0f97364e7a19a7da0b2ef
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e3f011953770dc18a51194e919361544c1e41f047fac26af2e17015642e43c7d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50d23b4f208a9403528cc4590d75da0ba9842779b9cd25a1b5978ffbe9bcceb1
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.4104949235916138,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
- "epoch": 0.0022755005390091903,
5
  "eval_steps": 100,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -93,6 +93,84 @@
93
  "eval_samples_per_second": 5.539,
94
  "eval_steps_per_second": 5.539,
95
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  }
97
  ],
98
  "logging_steps": 10,
@@ -116,12 +194,12 @@
116
  "should_evaluate": false,
117
  "should_log": false,
118
  "should_save": true,
119
- "should_training_stop": false
120
  },
121
  "attributes": {}
122
  }
123
  },
124
- "total_flos": 6.82669886102569e+16,
125
  "train_batch_size": 1,
126
  "trial_name": null,
127
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.217628002166748,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-200",
4
+ "epoch": 0.004551001078018381,
5
  "eval_steps": 100,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
93
  "eval_samples_per_second": 5.539,
94
  "eval_steps_per_second": 5.539,
95
  "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.002503050592910109,
99
+ "grad_norm": 1.1351838111877441,
100
+ "learning_rate": 9.755282581475769e-06,
101
+ "loss": 0.6866,
102
+ "step": 110
103
+ },
104
+ {
105
+ "epoch": 0.002730600646811028,
106
+ "grad_norm": 1.3157535791397095,
107
+ "learning_rate": 9.045084971874738e-06,
108
+ "loss": 0.8786,
109
+ "step": 120
110
+ },
111
+ {
112
+ "epoch": 0.0029581507007119473,
113
+ "grad_norm": 1.2388049364089966,
114
+ "learning_rate": 7.938926261462366e-06,
115
+ "loss": 1.1656,
116
+ "step": 130
117
+ },
118
+ {
119
+ "epoch": 0.003185700754612866,
120
+ "grad_norm": 1.667664885520935,
121
+ "learning_rate": 6.545084971874738e-06,
122
+ "loss": 0.6019,
123
+ "step": 140
124
+ },
125
+ {
126
+ "epoch": 0.003413250808513785,
127
+ "grad_norm": 14.491639137268066,
128
+ "learning_rate": 5e-06,
129
+ "loss": 2.5697,
130
+ "step": 150
131
+ },
132
+ {
133
+ "epoch": 0.0036408008624147043,
134
+ "grad_norm": 1.081459879875183,
135
+ "learning_rate": 3.4549150281252635e-06,
136
+ "loss": 0.6925,
137
+ "step": 160
138
+ },
139
+ {
140
+ "epoch": 0.003868350916315623,
141
+ "grad_norm": 2.302001953125,
142
+ "learning_rate": 2.061073738537635e-06,
143
+ "loss": 0.7282,
144
+ "step": 170
145
+ },
146
+ {
147
+ "epoch": 0.004095900970216542,
148
+ "grad_norm": 1.375418782234192,
149
+ "learning_rate": 9.549150281252633e-07,
150
+ "loss": 0.8401,
151
+ "step": 180
152
+ },
153
+ {
154
+ "epoch": 0.004323451024117461,
155
+ "grad_norm": 1.4581494331359863,
156
+ "learning_rate": 2.447174185242324e-07,
157
+ "loss": 0.4591,
158
+ "step": 190
159
+ },
160
+ {
161
+ "epoch": 0.004551001078018381,
162
+ "grad_norm": 9.276606559753418,
163
+ "learning_rate": 0.0,
164
+ "loss": 2.4783,
165
+ "step": 200
166
+ },
167
+ {
168
+ "epoch": 0.004551001078018381,
169
+ "eval_loss": 1.217628002166748,
170
+ "eval_runtime": 3338.5785,
171
+ "eval_samples_per_second": 5.542,
172
+ "eval_steps_per_second": 5.542,
173
+ "step": 200
174
  }
175
  ],
176
  "logging_steps": 10,
 
194
  "should_evaluate": false,
195
  "should_log": false,
196
  "should_save": true,
197
+ "should_training_stop": true
198
  },
199
  "attributes": {}
200
  }
201
  },
202
+ "total_flos": 1.363637353536553e+17,
203
  "train_batch_size": 1,
204
  "trial_name": null,
205
  "trial_params": null