infogep commited on
Commit
8ef9e39
·
verified ·
1 Parent(s): 08e8026

Training in progress, step 150, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:68930c58fce51d88a4b2b15bfcffc20cb8fbfb74f8d06364ec15ff69a6ef79c8
3
  size 91850362
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0618f2eb07d300b9fc05ae2a5365b257f61c4852bd74184e7fd4757f0162d463
3
  size 91850362
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b90f23af62305ba960d6f3da9c8fe5215a78ee5d6503478786db71489b3f020
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1851c1fda208f0b8fdd03141a67662302756abf898fa5ad966b0637bc8b07cb
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4d0f57336dd58c2282a758b1873df2644647c71e8296b0dab58cb3a9f5f7c78
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e45926e8bb4228c69b4b56f6b51f1445c0aeb3fb7bb09ed84764ec2b4c3a8ff
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": NaN,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.05393743257820928,
5
  "eval_steps": 50,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -101,6 +101,49 @@
101
  "eval_samples_per_second": 59.225,
102
  "eval_steps_per_second": 14.863,
103
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
  ],
106
  "logging_steps": 10,
@@ -115,7 +158,7 @@
115
  "early_stopping_threshold": 0.0
116
  },
117
  "attributes": {
118
- "early_stopping_patience_counter": 1
119
  }
120
  },
121
  "TrainerControl": {
@@ -124,12 +167,12 @@
124
  "should_evaluate": false,
125
  "should_log": false,
126
  "should_save": true,
127
- "should_training_stop": false
128
  },
129
  "attributes": {}
130
  }
131
  },
132
- "total_flos": 5054876362997760.0,
133
  "train_batch_size": 8,
134
  "trial_name": null,
135
  "trial_params": null
 
1
  {
2
  "best_metric": NaN,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.08090614886731391,
5
  "eval_steps": 50,
6
+ "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
101
  "eval_samples_per_second": 59.225,
102
  "eval_steps_per_second": 14.863,
103
  "step": 100
104
+ },
105
+ {
106
+ "epoch": 0.0593311758360302,
107
+ "grad_norm": NaN,
108
+ "learning_rate": 3.1076923076923076e-06,
109
+ "loss": 0.0,
110
+ "step": 110
111
+ },
112
+ {
113
+ "epoch": 0.06472491909385113,
114
+ "grad_norm": NaN,
115
+ "learning_rate": 2.330769230769231e-06,
116
+ "loss": 0.0,
117
+ "step": 120
118
+ },
119
+ {
120
+ "epoch": 0.07011866235167206,
121
+ "grad_norm": NaN,
122
+ "learning_rate": 1.5538461538461538e-06,
123
+ "loss": 0.0,
124
+ "step": 130
125
+ },
126
+ {
127
+ "epoch": 0.07551240560949299,
128
+ "grad_norm": NaN,
129
+ "learning_rate": 7.769230769230769e-07,
130
+ "loss": 0.0,
131
+ "step": 140
132
+ },
133
+ {
134
+ "epoch": 0.08090614886731391,
135
+ "grad_norm": NaN,
136
+ "learning_rate": 0.0,
137
+ "loss": 0.0,
138
+ "step": 150
139
+ },
140
+ {
141
+ "epoch": 0.08090614886731391,
142
+ "eval_loss": NaN,
143
+ "eval_runtime": 13.194,
144
+ "eval_samples_per_second": 59.194,
145
+ "eval_steps_per_second": 14.855,
146
+ "step": 150
147
  }
148
  ],
149
  "logging_steps": 10,
 
158
  "early_stopping_threshold": 0.0
159
  },
160
  "attributes": {
161
+ "early_stopping_patience_counter": 2
162
  }
163
  },
164
  "TrainerControl": {
 
167
  "should_evaluate": false,
168
  "should_log": false,
169
  "should_save": true,
170
+ "should_training_stop": true
171
  },
172
  "attributes": {}
173
  }
174
  },
175
+ "total_flos": 7557290404085760.0,
176
  "train_batch_size": 8,
177
  "trial_name": null,
178
  "trial_params": null