skzxjus commited on
Commit
81595dc
·
verified ·
1 Parent(s): 82b294d

Model save

Browse files
README.md CHANGED
@@ -27,7 +27,7 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/llmsft/huggingface/runs/vakbg73g)
31
 
32
 
33
  This model was trained with SFT.
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/llmsft/huggingface/runs/njo2ftcp)
31
 
32
 
33
  This model was trained with SFT.
all_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 0.9955555555555555,
3
  "total_flos": 175887366488064.0,
4
- "train_loss": 0.6538210674410775,
5
- "train_runtime": 1576.8233,
6
  "train_samples": 16610,
7
- "train_samples_per_second": 13.679,
8
- "train_steps_per_second": 0.107
9
  }
 
1
  {
2
  "epoch": 0.9955555555555555,
3
  "total_flos": 175887366488064.0,
4
+ "train_loss": 0.6538166212184089,
5
+ "train_runtime": 1597.3219,
6
  "train_samples": 16610,
7
+ "train_samples_per_second": 13.504,
8
+ "train_steps_per_second": 0.105
9
  }
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6c0e0dc79d05f5574c390b6ee1ad7b22b9de1a1901daa9dcbb953ffd7542a3df
3
  size 4877660776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9486d203577d3381f80a9ca2e7c15750c61c83000cf655427e198349a3ed7781
3
  size 4877660776
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:857596a0a9f7ce187c12dec96728563bc9dd73827fc88d3368dcaa2430ab5670
3
  size 4932751008
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1eb58c1603e3596bea2a7796f92e2f17ad046f9ae7d43b01cd21e5cb9ae87abb
3
  size 4932751008
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf312e60b66da6a986275c636b0de1066985dc473716930b08b294f7cfa3f1f0
3
  size 4330865200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2dc26088dc9d1e62d830c178ed50a89ee44cd797c0f273ce42cd21dc88e6669
3
  size 4330865200
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4f5577ae2821db26f52e4b5098d28e171a28be74646af3f15727d8465b59559
3
  size 1089994880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0897afbc1facc9b6add4ef7939f679aa8927b89346f7b66360d6b1d7aa75083
3
  size 1089994880
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 0.9955555555555555,
3
  "total_flos": 175887366488064.0,
4
- "train_loss": 0.6538210674410775,
5
- "train_runtime": 1576.8233,
6
  "train_samples": 16610,
7
- "train_samples_per_second": 13.679,
8
- "train_steps_per_second": 0.107
9
  }
 
1
  {
2
  "epoch": 0.9955555555555555,
3
  "total_flos": 175887366488064.0,
4
+ "train_loss": 0.6538166212184089,
5
+ "train_runtime": 1597.3219,
6
  "train_samples": 16610,
7
+ "train_samples_per_second": 13.504,
8
+ "train_steps_per_second": 0.105
9
  }
trainer_state.json CHANGED
@@ -10,239 +10,239 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.02962962962962963,
13
- "grad_norm": 3.2335889970397633,
14
  "learning_rate": 5.882352941176471e-06,
15
- "loss": 1.017,
16
  "step": 5
17
  },
18
  {
19
  "epoch": 0.05925925925925926,
20
- "grad_norm": 2.1236109791934235,
21
  "learning_rate": 1.1764705882352942e-05,
22
  "loss": 0.8756,
23
  "step": 10
24
  },
25
  {
26
  "epoch": 0.08888888888888889,
27
- "grad_norm": 1.4239649144116726,
28
  "learning_rate": 1.7647058823529414e-05,
29
  "loss": 0.7716,
30
  "step": 15
31
  },
32
  {
33
  "epoch": 0.11851851851851852,
34
- "grad_norm": 1.026423527207296,
35
  "learning_rate": 1.9980527694749952e-05,
36
  "loss": 0.733,
37
  "step": 20
38
  },
39
  {
40
  "epoch": 0.14814814814814814,
41
- "grad_norm": 0.7195683369586703,
42
  "learning_rate": 1.986180478852149e-05,
43
  "loss": 0.7059,
44
  "step": 25
45
  },
46
  {
47
  "epoch": 0.17777777777777778,
48
- "grad_norm": 0.566755743770087,
49
  "learning_rate": 1.963645895935632e-05,
50
  "loss": 0.6695,
51
  "step": 30
52
  },
53
  {
54
  "epoch": 0.2074074074074074,
55
- "grad_norm": 0.6053406442761077,
56
  "learning_rate": 1.930692657985482e-05,
57
  "loss": 0.6779,
58
  "step": 35
59
  },
60
  {
61
  "epoch": 0.23703703703703705,
62
- "grad_norm": 0.5095813602660316,
63
  "learning_rate": 1.887677045685188e-05,
64
  "loss": 0.6563,
65
  "step": 40
66
  },
67
  {
68
  "epoch": 0.26666666666666666,
69
- "grad_norm": 0.6095732174400462,
70
  "learning_rate": 1.8350641311400813e-05,
71
  "loss": 0.6597,
72
  "step": 45
73
  },
74
  {
75
  "epoch": 0.2962962962962963,
76
- "grad_norm": 0.5307967748491872,
77
  "learning_rate": 1.773422749654988e-05,
78
- "loss": 0.6482,
79
  "step": 50
80
  },
81
  {
82
  "epoch": 0.32592592592592595,
83
- "grad_norm": 0.5861838961227068,
84
  "learning_rate": 1.7034193496547903e-05,
85
- "loss": 0.6491,
86
  "step": 55
87
  },
88
  {
89
  "epoch": 0.35555555555555557,
90
- "grad_norm": 0.4535084044049933,
91
  "learning_rate": 1.6258107872407376e-05,
92
  "loss": 0.6319,
93
  "step": 60
94
  },
95
  {
96
  "epoch": 0.3851851851851852,
97
- "grad_norm": 0.5336831242871914,
98
  "learning_rate": 1.5414361432856475e-05,
99
- "loss": 0.6309,
100
  "step": 65
101
  },
102
  {
103
  "epoch": 0.4148148148148148,
104
- "grad_norm": 0.5806830551489788,
105
  "learning_rate": 1.4512076515391375e-05,
106
  "loss": 0.6328,
107
  "step": 70
108
  },
109
  {
110
  "epoch": 0.4444444444444444,
111
- "grad_norm": 0.48899625875520547,
112
  "learning_rate": 1.356100835825547e-05,
113
  "loss": 0.6254,
114
  "step": 75
115
  },
116
  {
117
  "epoch": 0.4740740740740741,
118
- "grad_norm": 0.4871540647070326,
119
  "learning_rate": 1.257143962968246e-05,
120
- "loss": 0.6176,
121
  "step": 80
122
  },
123
  {
124
  "epoch": 0.5037037037037037,
125
- "grad_norm": 0.4589196113274742,
126
  "learning_rate": 1.155406925472205e-05,
127
  "loss": 0.6298,
128
  "step": 85
129
  },
130
  {
131
  "epoch": 0.5333333333333333,
132
- "grad_norm": 0.4397604117092943,
133
  "learning_rate": 1.0519896741619803e-05,
134
  "loss": 0.6215,
135
  "step": 90
136
  },
137
  {
138
  "epoch": 0.562962962962963,
139
- "grad_norm": 0.4750309164051992,
140
  "learning_rate": 9.480103258380198e-06,
141
- "loss": 0.6246,
142
  "step": 95
143
  },
144
  {
145
  "epoch": 0.5925925925925926,
146
- "grad_norm": 0.42827284754034906,
147
  "learning_rate": 8.445930745277953e-06,
148
  "loss": 0.6157,
149
  "step": 100
150
  },
151
  {
152
  "epoch": 0.5925925925925926,
153
- "eval_loss": 0.6399269700050354,
154
- "eval_runtime": 2.3977,
155
- "eval_samples_per_second": 52.968,
156
- "eval_steps_per_second": 1.668,
157
  "step": 100
158
  },
159
  {
160
  "epoch": 0.6222222222222222,
161
- "grad_norm": 0.4125966722094787,
162
  "learning_rate": 7.428560370317542e-06,
163
  "loss": 0.6167,
164
  "step": 105
165
  },
166
  {
167
  "epoch": 0.6518518518518519,
168
- "grad_norm": 0.4781922605492141,
169
  "learning_rate": 6.438991641744531e-06,
170
- "loss": 0.6218,
171
  "step": 110
172
  },
173
  {
174
  "epoch": 0.6814814814814815,
175
- "grad_norm": 0.43856592253458193,
176
  "learning_rate": 5.487923484608629e-06,
177
  "loss": 0.6041,
178
  "step": 115
179
  },
180
  {
181
  "epoch": 0.7111111111111111,
182
- "grad_norm": 0.3807903239963169,
183
  "learning_rate": 4.5856385671435285e-06,
184
  "loss": 0.5991,
185
  "step": 120
186
  },
187
  {
188
  "epoch": 0.7407407407407407,
189
- "grad_norm": 0.40916534060623017,
190
  "learning_rate": 3.7418921275926245e-06,
191
  "loss": 0.5976,
192
  "step": 125
193
  },
194
  {
195
  "epoch": 0.7703703703703704,
196
- "grad_norm": 0.36653352461201527,
197
  "learning_rate": 2.965806503452098e-06,
198
  "loss": 0.6132,
199
  "step": 130
200
  },
201
  {
202
  "epoch": 0.8,
203
- "grad_norm": 0.3609344511157987,
204
  "learning_rate": 2.265772503450122e-06,
205
- "loss": 0.6103,
206
  "step": 135
207
  },
208
  {
209
  "epoch": 0.8296296296296296,
210
- "grad_norm": 0.367245145966204,
211
  "learning_rate": 1.6493586885991908e-06,
212
  "loss": 0.606,
213
  "step": 140
214
  },
215
  {
216
  "epoch": 0.8592592592592593,
217
- "grad_norm": 0.345492870844588,
218
  "learning_rate": 1.1232295431481222e-06,
219
  "loss": 0.6084,
220
  "step": 145
221
  },
222
  {
223
  "epoch": 0.8888888888888888,
224
- "grad_norm": 0.5799467118789525,
225
  "learning_rate": 6.930734201451817e-07,
226
  "loss": 0.6146,
227
  "step": 150
228
  },
229
  {
230
  "epoch": 0.9185185185185185,
231
- "grad_norm": 0.3382771573960439,
232
  "learning_rate": 3.635410406436857e-07,
233
- "loss": 0.6067,
234
  "step": 155
235
  },
236
  {
237
  "epoch": 0.9481481481481482,
238
- "grad_norm": 0.3280172387536944,
239
  "learning_rate": 1.3819521147851122e-07,
240
- "loss": 0.6099,
241
  "step": 160
242
  },
243
  {
244
  "epoch": 0.9777777777777777,
245
- "grad_norm": 0.32601111538642685,
246
  "learning_rate": 1.947230525005006e-08,
247
  "loss": 0.6117,
248
  "step": 165
@@ -251,10 +251,10 @@
251
  "epoch": 0.9955555555555555,
252
  "step": 168,
253
  "total_flos": 175887366488064.0,
254
- "train_loss": 0.6538210674410775,
255
- "train_runtime": 1576.8233,
256
- "train_samples_per_second": 13.679,
257
- "train_steps_per_second": 0.107
258
  }
259
  ],
260
  "logging_steps": 5,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.02962962962962963,
13
+ "grad_norm": 3.2346072118607885,
14
  "learning_rate": 5.882352941176471e-06,
15
+ "loss": 1.0169,
16
  "step": 5
17
  },
18
  {
19
  "epoch": 0.05925925925925926,
20
+ "grad_norm": 2.1264769797556,
21
  "learning_rate": 1.1764705882352942e-05,
22
  "loss": 0.8756,
23
  "step": 10
24
  },
25
  {
26
  "epoch": 0.08888888888888889,
27
+ "grad_norm": 1.4231223829611963,
28
  "learning_rate": 1.7647058823529414e-05,
29
  "loss": 0.7716,
30
  "step": 15
31
  },
32
  {
33
  "epoch": 0.11851851851851852,
34
+ "grad_norm": 1.0239519248217772,
35
  "learning_rate": 1.9980527694749952e-05,
36
  "loss": 0.733,
37
  "step": 20
38
  },
39
  {
40
  "epoch": 0.14814814814814814,
41
+ "grad_norm": 0.7202299506697141,
42
  "learning_rate": 1.986180478852149e-05,
43
  "loss": 0.7059,
44
  "step": 25
45
  },
46
  {
47
  "epoch": 0.17777777777777778,
48
+ "grad_norm": 0.5695527206070538,
49
  "learning_rate": 1.963645895935632e-05,
50
  "loss": 0.6695,
51
  "step": 30
52
  },
53
  {
54
  "epoch": 0.2074074074074074,
55
+ "grad_norm": 0.6039878702944135,
56
  "learning_rate": 1.930692657985482e-05,
57
  "loss": 0.6779,
58
  "step": 35
59
  },
60
  {
61
  "epoch": 0.23703703703703705,
62
+ "grad_norm": 0.5093109436831765,
63
  "learning_rate": 1.887677045685188e-05,
64
  "loss": 0.6563,
65
  "step": 40
66
  },
67
  {
68
  "epoch": 0.26666666666666666,
69
+ "grad_norm": 0.6018876927034159,
70
  "learning_rate": 1.8350641311400813e-05,
71
  "loss": 0.6597,
72
  "step": 45
73
  },
74
  {
75
  "epoch": 0.2962962962962963,
76
+ "grad_norm": 0.5268268034517721,
77
  "learning_rate": 1.773422749654988e-05,
78
+ "loss": 0.6483,
79
  "step": 50
80
  },
81
  {
82
  "epoch": 0.32592592592592595,
83
+ "grad_norm": 0.5861151838314477,
84
  "learning_rate": 1.7034193496547903e-05,
85
+ "loss": 0.649,
86
  "step": 55
87
  },
88
  {
89
  "epoch": 0.35555555555555557,
90
+ "grad_norm": 0.4503680956414544,
91
  "learning_rate": 1.6258107872407376e-05,
92
  "loss": 0.6319,
93
  "step": 60
94
  },
95
  {
96
  "epoch": 0.3851851851851852,
97
+ "grad_norm": 0.5307966629696642,
98
  "learning_rate": 1.5414361432856475e-05,
99
+ "loss": 0.631,
100
  "step": 65
101
  },
102
  {
103
  "epoch": 0.4148148148148148,
104
+ "grad_norm": 0.5782202441621853,
105
  "learning_rate": 1.4512076515391375e-05,
106
  "loss": 0.6328,
107
  "step": 70
108
  },
109
  {
110
  "epoch": 0.4444444444444444,
111
+ "grad_norm": 0.48265196188960013,
112
  "learning_rate": 1.356100835825547e-05,
113
  "loss": 0.6254,
114
  "step": 75
115
  },
116
  {
117
  "epoch": 0.4740740740740741,
118
+ "grad_norm": 0.4813265336090251,
119
  "learning_rate": 1.257143962968246e-05,
120
+ "loss": 0.6175,
121
  "step": 80
122
  },
123
  {
124
  "epoch": 0.5037037037037037,
125
+ "grad_norm": 0.4515213148563428,
126
  "learning_rate": 1.155406925472205e-05,
127
  "loss": 0.6298,
128
  "step": 85
129
  },
130
  {
131
  "epoch": 0.5333333333333333,
132
+ "grad_norm": 0.44644143858157415,
133
  "learning_rate": 1.0519896741619803e-05,
134
  "loss": 0.6215,
135
  "step": 90
136
  },
137
  {
138
  "epoch": 0.562962962962963,
139
+ "grad_norm": 0.4691339692298715,
140
  "learning_rate": 9.480103258380198e-06,
141
+ "loss": 0.6245,
142
  "step": 95
143
  },
144
  {
145
  "epoch": 0.5925925925925926,
146
+ "grad_norm": 0.4311309942505263,
147
  "learning_rate": 8.445930745277953e-06,
148
  "loss": 0.6157,
149
  "step": 100
150
  },
151
  {
152
  "epoch": 0.5925925925925926,
153
+ "eval_loss": 0.6399185061454773,
154
+ "eval_runtime": 2.407,
155
+ "eval_samples_per_second": 52.763,
156
+ "eval_steps_per_second": 1.662,
157
  "step": 100
158
  },
159
  {
160
  "epoch": 0.6222222222222222,
161
+ "grad_norm": 0.42038278194306516,
162
  "learning_rate": 7.428560370317542e-06,
163
  "loss": 0.6167,
164
  "step": 105
165
  },
166
  {
167
  "epoch": 0.6518518518518519,
168
+ "grad_norm": 0.478843319690418,
169
  "learning_rate": 6.438991641744531e-06,
170
+ "loss": 0.6219,
171
  "step": 110
172
  },
173
  {
174
  "epoch": 0.6814814814814815,
175
+ "grad_norm": 0.4347536528553071,
176
  "learning_rate": 5.487923484608629e-06,
177
  "loss": 0.6041,
178
  "step": 115
179
  },
180
  {
181
  "epoch": 0.7111111111111111,
182
+ "grad_norm": 0.3769111515881631,
183
  "learning_rate": 4.5856385671435285e-06,
184
  "loss": 0.5991,
185
  "step": 120
186
  },
187
  {
188
  "epoch": 0.7407407407407407,
189
+ "grad_norm": 0.4099151217963583,
190
  "learning_rate": 3.7418921275926245e-06,
191
  "loss": 0.5976,
192
  "step": 125
193
  },
194
  {
195
  "epoch": 0.7703703703703704,
196
+ "grad_norm": 0.36781593886520214,
197
  "learning_rate": 2.965806503452098e-06,
198
  "loss": 0.6132,
199
  "step": 130
200
  },
201
  {
202
  "epoch": 0.8,
203
+ "grad_norm": 0.3627464720674499,
204
  "learning_rate": 2.265772503450122e-06,
205
+ "loss": 0.6104,
206
  "step": 135
207
  },
208
  {
209
  "epoch": 0.8296296296296296,
210
+ "grad_norm": 0.3608772496918306,
211
  "learning_rate": 1.6493586885991908e-06,
212
  "loss": 0.606,
213
  "step": 140
214
  },
215
  {
216
  "epoch": 0.8592592592592593,
217
+ "grad_norm": 0.34531094549512426,
218
  "learning_rate": 1.1232295431481222e-06,
219
  "loss": 0.6084,
220
  "step": 145
221
  },
222
  {
223
  "epoch": 0.8888888888888888,
224
+ "grad_norm": 0.3242007784943698,
225
  "learning_rate": 6.930734201451817e-07,
226
  "loss": 0.6146,
227
  "step": 150
228
  },
229
  {
230
  "epoch": 0.9185185185185185,
231
+ "grad_norm": 0.3386165753528116,
232
  "learning_rate": 3.635410406436857e-07,
233
+ "loss": 0.6066,
234
  "step": 155
235
  },
236
  {
237
  "epoch": 0.9481481481481482,
238
+ "grad_norm": 0.32706837162712976,
239
  "learning_rate": 1.3819521147851122e-07,
240
+ "loss": 0.6098,
241
  "step": 160
242
  },
243
  {
244
  "epoch": 0.9777777777777777,
245
+ "grad_norm": 0.3256703232456208,
246
  "learning_rate": 1.947230525005006e-08,
247
  "loss": 0.6117,
248
  "step": 165
 
251
  "epoch": 0.9955555555555555,
252
  "step": 168,
253
  "total_flos": 175887366488064.0,
254
+ "train_loss": 0.6538166212184089,
255
+ "train_runtime": 1597.3219,
256
+ "train_samples_per_second": 13.504,
257
+ "train_steps_per_second": 0.105
258
  }
259
  ],
260
  "logging_steps": 5,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:248e08927be4400c838b0f09e774f72e44a12d4ff9c367f35284d75dc9aaeddc
3
  size 7352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96346f7c06fe7d9fcb5198a3d6ffa4922fda2cc78c41d3d9705faee0b7f7b32a
3
  size 7352