Jinsong-Zhou commited on
Commit
6359a1c
·
verified ·
1 Parent(s): 0731342

Model save

Browse files
README.md CHANGED
@@ -27,17 +27,17 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/jinson/huggingface/runs/qrc2c3at)
31
 
32
 
33
  This model was trained with SFT.
34
 
35
  ### Framework versions
36
 
37
- - TRL: 0.16.0.dev0
38
- - Transformers: 4.49.0.dev0
39
  - Pytorch: 2.5.1
40
- - Datasets: 3.3.0
41
  - Tokenizers: 0.21.0
42
 
43
  ## Citations
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/jinson/huggingface/runs/5ajy3q44)
31
 
32
 
33
  This model was trained with SFT.
34
 
35
  ### Framework versions
36
 
37
+ - TRL: 0.15.0
38
+ - Transformers: 4.50.0.dev0
39
  - Pytorch: 2.5.1
40
+ - Datasets: 3.3.2
41
  - Tokenizers: 0.21.0
42
 
43
  ## Citations
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "total_flos": 76560728064000.0,
3
- "train_loss": 0.8027291595935822,
4
- "train_runtime": 10618.3737,
5
- "train_samples": 16610,
6
- "train_samples_per_second": 2.036,
7
- "train_steps_per_second": 0.016
8
  }
 
1
  {
2
+ "total_flos": 488165445992448.0,
3
+ "train_loss": 0.5792719663968727,
4
+ "train_runtime": 25010.1039,
5
+ "train_samples": 93733,
6
+ "train_samples_per_second": 1.371,
7
+ "train_steps_per_second": 0.011
8
  }
generation_config.json CHANGED
@@ -10,5 +10,5 @@
10
  "temperature": 0.7,
11
  "top_k": 20,
12
  "top_p": 0.8,
13
- "transformers_version": "4.49.0.dev0"
14
  }
 
10
  "temperature": 0.7,
11
  "top_k": 20,
12
  "top_p": 0.8,
13
+ "transformers_version": "4.50.0.dev0"
14
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "total_flos": 76560728064000.0,
3
- "train_loss": 0.8027291595935822,
4
- "train_runtime": 10618.3737,
5
- "train_samples": 16610,
6
- "train_samples_per_second": 2.036,
7
- "train_steps_per_second": 0.016
8
  }
 
1
  {
2
+ "total_flos": 488165445992448.0,
3
+ "train_loss": 0.5792719663968727,
4
+ "train_runtime": 25010.1039,
5
+ "train_samples": 93733,
6
+ "train_samples_per_second": 1.371,
7
+ "train_steps_per_second": 0.011
8
  }
trainer_state.json CHANGED
@@ -1,316 +1,413 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9944506104328524,
5
- "eval_steps": 100,
6
- "global_step": 168,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.029596744358120607,
13
- "grad_norm": 2.297286039806607,
14
- "learning_rate": 5.882352941176471e-06,
15
- "loss": 1.1002,
16
- "mean_token_accuracy": 0.7101261564901226,
17
  "step": 5
18
  },
19
  {
20
- "epoch": 0.059193488716241215,
21
- "grad_norm": 1.598959318938298,
22
- "learning_rate": 1.1764705882352942e-05,
23
- "loss": 1.0325,
24
- "mean_token_accuracy": 0.7202338207680274,
25
  "step": 10
26
  },
27
  {
28
- "epoch": 0.08879023307436182,
29
- "grad_norm": 0.8557338569703955,
30
- "learning_rate": 1.7647058823529414e-05,
31
- "loss": 0.9517,
32
- "mean_token_accuracy": 0.7325525081674223,
33
  "step": 15
34
  },
35
  {
36
- "epoch": 0.11838697743248243,
37
- "grad_norm": 0.6524948891488312,
38
- "learning_rate": 1.9980527694749952e-05,
39
- "loss": 0.8805,
40
- "mean_token_accuracy": 0.7473646233841255,
41
  "step": 20
42
  },
43
  {
44
- "epoch": 0.14798372179060304,
45
- "grad_norm": 0.5586962091196656,
46
- "learning_rate": 1.986180478852149e-05,
47
- "loss": 0.8535,
48
- "mean_token_accuracy": 0.7518094119293168,
49
  "step": 25
50
  },
51
  {
52
- "epoch": 0.17758046614872364,
53
- "grad_norm": 0.4501992507045545,
54
- "learning_rate": 1.963645895935632e-05,
55
- "loss": 0.8403,
56
- "mean_token_accuracy": 0.7545180402865705,
57
  "step": 30
58
  },
59
  {
60
- "epoch": 0.20717721050684423,
61
- "grad_norm": 0.40514440229124343,
62
- "learning_rate": 1.930692657985482e-05,
63
- "loss": 0.8211,
64
- "mean_token_accuracy": 0.7579922643362512,
65
  "step": 35
66
  },
67
  {
68
- "epoch": 0.23677395486496486,
69
- "grad_norm": 0.39471525110138866,
70
- "learning_rate": 1.887677045685188e-05,
71
- "loss": 0.8049,
72
- "mean_token_accuracy": 0.7621190554838779,
73
  "step": 40
74
  },
75
  {
76
- "epoch": 0.2663706992230855,
77
- "grad_norm": 0.43739042181311494,
78
- "learning_rate": 1.8350641311400813e-05,
79
- "loss": 0.7984,
80
- "mean_token_accuracy": 0.7634295080529785,
81
  "step": 45
82
  },
83
  {
84
- "epoch": 0.2959674435812061,
85
- "grad_norm": 0.4050015485090704,
86
- "learning_rate": 1.773422749654988e-05,
87
- "loss": 0.7936,
88
- "mean_token_accuracy": 0.7642335004344475,
89
  "step": 50
90
  },
91
  {
92
- "epoch": 0.3255641879393267,
93
- "grad_norm": 0.3862008743204375,
94
- "learning_rate": 1.7034193496547903e-05,
95
- "loss": 0.7894,
96
- "mean_token_accuracy": 0.7648157866255213,
97
  "step": 55
98
  },
99
  {
100
- "epoch": 0.3551609322974473,
101
- "grad_norm": 0.3808360474271673,
102
- "learning_rate": 1.6258107872407376e-05,
103
- "loss": 0.7999,
104
- "mean_token_accuracy": 0.7619282901641846,
105
  "step": 60
106
  },
107
  {
108
- "epoch": 0.38475767665556787,
109
- "grad_norm": 0.41625214344673966,
110
- "learning_rate": 1.5414361432856475e-05,
111
- "loss": 0.7869,
112
- "mean_token_accuracy": 0.7646248305016428,
113
  "step": 65
114
  },
115
  {
116
- "epoch": 0.41435442101368847,
117
- "grad_norm": 0.3836610280759421,
118
- "learning_rate": 1.4512076515391375e-05,
119
- "loss": 0.7782,
120
- "mean_token_accuracy": 0.7674149625106065,
121
  "step": 70
122
  },
123
  {
124
- "epoch": 0.4439511653718091,
125
- "grad_norm": 0.3429055885862389,
126
- "learning_rate": 1.356100835825547e-05,
127
- "loss": 0.7771,
128
- "mean_token_accuracy": 0.767463215138475,
129
  "step": 75
130
  },
131
  {
132
- "epoch": 0.4735479097299297,
133
- "grad_norm": 0.3548520942127707,
134
- "learning_rate": 1.257143962968246e-05,
135
- "loss": 0.7783,
136
- "mean_token_accuracy": 0.7665814820650783,
137
  "step": 80
138
  },
139
  {
140
- "epoch": 0.5031446540880503,
141
- "grad_norm": 0.34670016990380903,
142
- "learning_rate": 1.155406925472205e-05,
143
- "loss": 0.7786,
144
- "mean_token_accuracy": 0.7668829515693034,
145
  "step": 85
146
  },
147
  {
148
- "epoch": 0.532741398446171,
149
- "grad_norm": 0.354375470838285,
150
- "learning_rate": 1.0519896741619803e-05,
151
- "loss": 0.7724,
152
- "mean_token_accuracy": 0.7681869031104136,
153
  "step": 90
154
  },
155
  {
156
- "epoch": 0.5623381428042915,
157
- "grad_norm": 0.35042728553773317,
158
- "learning_rate": 9.480103258380198e-06,
159
- "loss": 0.7668,
160
- "mean_token_accuracy": 0.7697937295203819,
161
  "step": 95
162
  },
163
  {
164
- "epoch": 0.5919348871624122,
165
- "grad_norm": 0.3487619390880057,
166
- "learning_rate": 8.445930745277953e-06,
167
- "loss": 0.7584,
168
- "mean_token_accuracy": 0.7713990834609527,
169
  "step": 100
170
  },
171
  {
172
- "epoch": 0.5919348871624122,
173
- "eval_loss": 0.7887204885482788,
174
- "eval_mean_token_accuracy": 0.7581970585874204,
175
- "eval_runtime": 20.8083,
176
- "eval_samples_per_second": 6.199,
177
- "eval_steps_per_second": 0.817,
178
- "step": 100
179
- },
180
- {
181
- "epoch": 0.6215316315205327,
182
- "grad_norm": 0.3648322197104147,
183
- "learning_rate": 7.428560370317542e-06,
184
- "loss": 0.7592,
185
- "mean_token_accuracy": 0.77141248230712,
186
  "step": 105
187
  },
188
  {
189
- "epoch": 0.6511283758786534,
190
- "grad_norm": 0.3244685784676015,
191
- "learning_rate": 6.438991641744531e-06,
192
- "loss": 0.7777,
193
- "mean_token_accuracy": 0.7663982071721088,
194
  "step": 110
195
  },
196
  {
197
- "epoch": 0.680725120236774,
198
- "grad_norm": 0.31476170427058586,
199
- "learning_rate": 5.487923484608629e-06,
200
- "loss": 0.7637,
201
- "mean_token_accuracy": 0.7702532985281345,
202
  "step": 115
203
  },
204
  {
205
- "epoch": 0.7103218645948945,
206
- "grad_norm": 0.30378046295629435,
207
- "learning_rate": 4.5856385671435285e-06,
208
- "loss": 0.7512,
209
- "mean_token_accuracy": 0.7742151065835647,
210
  "step": 120
211
  },
212
  {
213
- "epoch": 0.7399186089530152,
214
- "grad_norm": 0.3245568919497631,
215
- "learning_rate": 3.7418921275926245e-06,
216
- "loss": 0.7596,
217
- "mean_token_accuracy": 0.7716760745454179,
218
  "step": 125
219
  },
220
  {
221
- "epoch": 0.7695153533111357,
222
- "grad_norm": 0.32838254037360903,
223
- "learning_rate": 2.965806503452098e-06,
224
- "loss": 0.7626,
225
- "mean_token_accuracy": 0.7704949384884608,
226
  "step": 130
227
  },
228
  {
229
- "epoch": 0.7991120976692564,
230
- "grad_norm": 0.30665122068538603,
231
- "learning_rate": 2.265772503450122e-06,
232
- "loss": 0.7686,
233
- "mean_token_accuracy": 0.7688054766465177,
234
  "step": 135
235
  },
236
  {
237
- "epoch": 0.8287088420273769,
238
- "grad_norm": 0.2993142489345467,
239
- "learning_rate": 1.6493586885991908e-06,
240
- "loss": 0.7599,
241
- "mean_token_accuracy": 0.771630464199698,
242
  "step": 140
243
  },
244
  {
245
- "epoch": 0.8583055863854976,
246
- "grad_norm": 0.31657760951455227,
247
- "learning_rate": 1.1232295431481222e-06,
248
- "loss": 0.7598,
249
- "mean_token_accuracy": 0.7713579193104667,
250
  "step": 145
251
  },
252
  {
253
- "epoch": 0.8879023307436182,
254
- "grad_norm": 0.29677317819620136,
255
- "learning_rate": 6.930734201451817e-07,
256
- "loss": 0.7508,
257
- "mean_token_accuracy": 0.7738701734792748,
258
  "step": 150
259
  },
260
  {
261
- "epoch": 0.9174990751017388,
262
- "grad_norm": 0.28445809248236675,
263
- "learning_rate": 3.635410406436857e-07,
264
- "loss": 0.7413,
265
- "mean_token_accuracy": 0.7766777954365708,
266
  "step": 155
267
  },
268
  {
269
- "epoch": 0.9470958194598594,
270
- "grad_norm": 0.30204735729222293,
271
- "learning_rate": 1.3819521147851122e-07,
272
- "loss": 0.7543,
273
- "mean_token_accuracy": 0.7731135856147668,
274
  "step": 160
275
  },
276
  {
277
- "epoch": 0.97669256381798,
278
- "grad_norm": 0.2818907779460126,
279
- "learning_rate": 1.947230525005006e-08,
280
- "loss": 0.747,
281
- "mean_token_accuracy": 0.7748765676578209,
282
  "step": 165
283
  },
284
  {
285
- "epoch": 0.9944506104328524,
286
- "mean_token_accuracy": 0.7702185157826186,
287
- "step": 168,
288
- "total_flos": 76560728064000.0,
289
- "train_loss": 0.8027291595935822,
290
- "train_runtime": 10618.3737,
291
- "train_samples_per_second": 2.036,
292
- "train_steps_per_second": 0.016
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
  }
294
  ],
295
  "logging_steps": 5,
296
- "max_steps": 168,
297
  "num_input_tokens_seen": 0,
298
  "num_train_epochs": 1,
299
- "save_steps": 500,
300
  "stateful_callbacks": {
301
  "TrainerControl": {
302
  "args": {
303
  "should_epoch_stop": false,
304
  "should_evaluate": false,
305
  "should_log": false,
306
- "should_save": false,
307
- "should_training_stop": false
308
  },
309
  "attributes": {}
310
  }
311
  },
312
- "total_flos": 76560728064000.0,
313
- "train_batch_size": 2,
314
  "trial_name": null,
315
  "trial_params": null
316
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 268,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.018656716417910446,
13
+ "grad_norm": 1.8357200745044757,
14
+ "learning_rate": 1.785714285714286e-05,
15
+ "loss": 0.8456,
 
16
  "step": 5
17
  },
18
  {
19
+ "epoch": 0.03731343283582089,
20
+ "grad_norm": 0.7880079087520768,
21
+ "learning_rate": 3.571428571428572e-05,
22
+ "loss": 0.7682,
 
23
  "step": 10
24
  },
25
  {
26
+ "epoch": 0.055970149253731345,
27
+ "grad_norm": 0.5072079736182407,
28
+ "learning_rate": 4.999827900623038e-05,
29
+ "loss": 0.7026,
 
30
  "step": 15
31
  },
32
  {
33
+ "epoch": 0.07462686567164178,
34
+ "grad_norm": 0.3956980910828853,
35
+ "learning_rate": 4.993807186343243e-05,
36
+ "loss": 0.6745,
 
37
  "step": 20
38
  },
39
  {
40
+ "epoch": 0.09328358208955224,
41
+ "grad_norm": 0.38447381525886887,
42
+ "learning_rate": 4.979207812402531e-05,
43
+ "loss": 0.6436,
 
44
  "step": 25
45
  },
46
  {
47
+ "epoch": 0.11194029850746269,
48
+ "grad_norm": 0.299192319561256,
49
+ "learning_rate": 4.956085596012407e-05,
50
+ "loss": 0.6362,
 
51
  "step": 30
52
  },
53
  {
54
+ "epoch": 0.13059701492537312,
55
+ "grad_norm": 0.2848642692822099,
56
+ "learning_rate": 4.924528939432311e-05,
57
+ "loss": 0.6199,
 
58
  "step": 35
59
  },
60
  {
61
+ "epoch": 0.14925373134328357,
62
+ "grad_norm": 0.246624944967583,
63
+ "learning_rate": 4.884658491984735e-05,
64
+ "loss": 0.6105,
 
65
  "step": 40
66
  },
67
  {
68
+ "epoch": 0.16791044776119404,
69
+ "grad_norm": 0.2113047031719804,
70
+ "learning_rate": 4.8366266887814235e-05,
71
+ "loss": 0.6112,
 
72
  "step": 45
73
  },
74
  {
75
+ "epoch": 0.1865671641791045,
76
+ "grad_norm": 0.24707697994991026,
77
+ "learning_rate": 4.780617167924209e-05,
78
+ "loss": 0.5938,
 
79
  "step": 50
80
  },
81
  {
82
+ "epoch": 0.20522388059701493,
83
+ "grad_norm": 0.2596596831883685,
84
+ "learning_rate": 4.716844068408693e-05,
85
+ "loss": 0.5965,
 
86
  "step": 55
87
  },
88
  {
89
+ "epoch": 0.22388059701492538,
90
+ "grad_norm": 0.26985436915379585,
91
+ "learning_rate": 4.6455512114150546e-05,
92
+ "loss": 0.5918,
 
93
  "step": 60
94
  },
95
  {
96
+ "epoch": 0.24253731343283583,
97
+ "grad_norm": 0.5749756625596264,
98
+ "learning_rate": 4.5670111681161296e-05,
99
+ "loss": 0.583,
 
100
  "step": 65
101
  },
102
  {
103
+ "epoch": 0.26119402985074625,
104
+ "grad_norm": 0.4737380183827698,
105
+ "learning_rate": 4.481524217566783e-05,
106
+ "loss": 0.58,
 
107
  "step": 70
108
  },
109
  {
110
+ "epoch": 0.2798507462686567,
111
+ "grad_norm": 0.4262259418885374,
112
+ "learning_rate": 4.3894171986588217e-05,
113
+ "loss": 0.5785,
 
114
  "step": 75
115
  },
116
  {
117
+ "epoch": 0.29850746268656714,
118
+ "grad_norm": 0.43257212906351034,
119
+ "learning_rate": 4.29104226053073e-05,
120
+ "loss": 0.5775,
 
121
  "step": 80
122
  },
123
  {
124
+ "epoch": 0.31716417910447764,
125
+ "grad_norm": 0.32488482534238494,
126
+ "learning_rate": 4.186775516209732e-05,
127
+ "loss": 0.5728,
 
128
  "step": 85
129
  },
130
  {
131
+ "epoch": 0.3358208955223881,
132
+ "grad_norm": 0.3511807271347856,
133
+ "learning_rate": 4.077015604633669e-05,
134
+ "loss": 0.5752,
 
135
  "step": 90
136
  },
137
  {
138
+ "epoch": 0.35447761194029853,
139
+ "grad_norm": 0.31519292419205,
140
+ "learning_rate": 3.962182166550441e-05,
141
+ "loss": 0.5737,
 
142
  "step": 95
143
  },
144
  {
145
+ "epoch": 0.373134328358209,
146
+ "grad_norm": 0.27842672077069447,
147
+ "learning_rate": 3.8427142401220634e-05,
148
+ "loss": 0.5697,
 
149
  "step": 100
150
  },
151
  {
152
+ "epoch": 0.3917910447761194,
153
+ "grad_norm": 0.258137938167996,
154
+ "learning_rate": 3.71906858236735e-05,
155
+ "loss": 0.566,
 
 
 
 
 
 
 
 
 
 
156
  "step": 105
157
  },
158
  {
159
+ "epoch": 0.41044776119402987,
160
+ "grad_norm": 0.24782019982832532,
161
+ "learning_rate": 3.591717922860785e-05,
162
+ "loss": 0.5732,
 
163
  "step": 110
164
  },
165
  {
166
+ "epoch": 0.4291044776119403,
167
+ "grad_norm": 0.21367211036985273,
168
+ "learning_rate": 3.46114915636416e-05,
169
+ "loss": 0.5641,
 
170
  "step": 115
171
  },
172
  {
173
+ "epoch": 0.44776119402985076,
174
+ "grad_norm": 0.2359860351856442,
175
+ "learning_rate": 3.3278614813010034e-05,
176
+ "loss": 0.565,
 
177
  "step": 120
178
  },
179
  {
180
+ "epoch": 0.4664179104477612,
181
+ "grad_norm": 0.23762377488074504,
182
+ "learning_rate": 3.1923644911909e-05,
183
+ "loss": 0.5619,
 
184
  "step": 125
185
  },
186
  {
187
+ "epoch": 0.48507462686567165,
188
+ "grad_norm": 0.21798417043326976,
189
+ "learning_rate": 3.0551762263406576e-05,
190
+ "loss": 0.5606,
 
191
  "step": 130
192
  },
193
  {
194
+ "epoch": 0.503731343283582,
195
+ "grad_norm": 0.2185282511977544,
196
+ "learning_rate": 2.9168211932412042e-05,
197
+ "loss": 0.5578,
 
198
  "step": 135
199
  },
200
  {
201
+ "epoch": 0.5223880597014925,
202
+ "grad_norm": 0.2017005705140969,
203
+ "learning_rate": 2.777828359242567e-05,
204
+ "loss": 0.5632,
 
205
  "step": 140
206
  },
207
  {
208
+ "epoch": 0.5410447761194029,
209
+ "grad_norm": 0.23403016673929508,
210
+ "learning_rate": 2.6387291301738377e-05,
211
+ "loss": 0.5559,
 
212
  "step": 145
213
  },
214
  {
215
+ "epoch": 0.5597014925373134,
216
+ "grad_norm": 0.20972754184297304,
217
+ "learning_rate": 2.50005531864019e-05,
218
+ "loss": 0.5537,
 
219
  "step": 150
220
  },
221
  {
222
+ "epoch": 0.5783582089552238,
223
+ "grad_norm": 0.18963422103493383,
224
+ "learning_rate": 2.362337110764688e-05,
225
+ "loss": 0.554,
 
226
  "step": 155
227
  },
228
  {
229
+ "epoch": 0.5970149253731343,
230
+ "grad_norm": 0.19986620919481152,
231
+ "learning_rate": 2.226101039148557e-05,
232
+ "loss": 0.5522,
 
233
  "step": 160
234
  },
235
  {
236
+ "epoch": 0.6156716417910447,
237
+ "grad_norm": 0.22264982729446323,
238
+ "learning_rate": 2.0918679697998252e-05,
239
+ "loss": 0.5511,
 
240
  "step": 165
241
  },
242
  {
243
+ "epoch": 0.6343283582089553,
244
+ "grad_norm": 0.2080998889583045,
245
+ "learning_rate": 1.9601511107268255e-05,
246
+ "loss": 0.5515,
247
+ "step": 170
248
+ },
249
+ {
250
+ "epoch": 0.6529850746268657,
251
+ "grad_norm": 0.19343233196548346,
252
+ "learning_rate": 1.8314540498102216e-05,
253
+ "loss": 0.5512,
254
+ "step": 175
255
+ },
256
+ {
257
+ "epoch": 0.6716417910447762,
258
+ "grad_norm": 0.17540535388811454,
259
+ "learning_rate": 1.7062688294552992e-05,
260
+ "loss": 0.5433,
261
+ "step": 180
262
+ },
263
+ {
264
+ "epoch": 0.6902985074626866,
265
+ "grad_norm": 0.19756929408176382,
266
+ "learning_rate": 1.5850740653856096e-05,
267
+ "loss": 0.5467,
268
+ "step": 185
269
+ },
270
+ {
271
+ "epoch": 0.7089552238805971,
272
+ "grad_norm": 0.18094727515834508,
273
+ "learning_rate": 1.4683331167703218e-05,
274
+ "loss": 0.5502,
275
+ "step": 190
276
+ },
277
+ {
278
+ "epoch": 0.7276119402985075,
279
+ "grad_norm": 0.17080964382720645,
280
+ "learning_rate": 1.356492314681356e-05,
281
+ "loss": 0.553,
282
+ "step": 195
283
+ },
284
+ {
285
+ "epoch": 0.746268656716418,
286
+ "grad_norm": 0.16164117491727123,
287
+ "learning_rate": 1.2499792556533716e-05,
288
+ "loss": 0.5474,
289
+ "step": 200
290
+ },
291
+ {
292
+ "epoch": 0.7649253731343284,
293
+ "grad_norm": 0.17787031389952734,
294
+ "learning_rate": 1.1492011668707753e-05,
295
+ "loss": 0.5449,
296
+ "step": 205
297
+ },
298
+ {
299
+ "epoch": 0.7835820895522388,
300
+ "grad_norm": 0.17503610601554878,
301
+ "learning_rate": 1.0545433492320603e-05,
302
+ "loss": 0.55,
303
+ "step": 210
304
+ },
305
+ {
306
+ "epoch": 0.8022388059701493,
307
+ "grad_norm": 0.16061415785978014,
308
+ "learning_rate": 9.663677042440537e-06,
309
+ "loss": 0.5443,
310
+ "step": 215
311
+ },
312
+ {
313
+ "epoch": 0.8208955223880597,
314
+ "grad_norm": 0.16426892962313516,
315
+ "learning_rate": 8.850113503781367e-06,
316
+ "loss": 0.5443,
317
+ "step": 220
318
+ },
319
+ {
320
+ "epoch": 0.8395522388059702,
321
+ "grad_norm": 0.14334733267655186,
322
+ "learning_rate": 8.107853341784671e-06,
323
+ "loss": 0.5506,
324
+ "step": 225
325
+ },
326
+ {
327
+ "epoch": 0.8582089552238806,
328
+ "grad_norm": 0.15691714417345864,
329
+ "learning_rate": 7.439734410499752e-06,
330
+ "loss": 0.547,
331
+ "step": 230
332
+ },
333
+ {
334
+ "epoch": 0.8768656716417911,
335
+ "grad_norm": 0.18311717197065572,
336
+ "learning_rate": 6.848311102728011e-06,
337
+ "loss": 0.5472,
338
+ "step": 235
339
+ },
340
+ {
341
+ "epoch": 0.8955223880597015,
342
+ "grad_norm": 0.18317039918731354,
343
+ "learning_rate": 6.335844583913515e-06,
344
+ "loss": 0.5433,
345
+ "step": 240
346
+ },
347
+ {
348
+ "epoch": 0.914179104477612,
349
+ "grad_norm": 0.14619058285902858,
350
+ "learning_rate": 5.904294147118193e-06,
351
+ "loss": 0.547,
352
+ "step": 245
353
+ },
354
+ {
355
+ "epoch": 0.9328358208955224,
356
+ "grad_norm": 0.14674904042247255,
357
+ "learning_rate": 5.555309722133842e-06,
358
+ "loss": 0.5435,
359
+ "step": 250
360
+ },
361
+ {
362
+ "epoch": 0.9514925373134329,
363
+ "grad_norm": 0.14162340991654054,
364
+ "learning_rate": 5.290225567370509e-06,
365
+ "loss": 0.5396,
366
+ "step": 255
367
+ },
368
+ {
369
+ "epoch": 0.9701492537313433,
370
+ "grad_norm": 0.14994290104981006,
371
+ "learning_rate": 5.110055168638854e-06,
372
+ "loss": 0.5433,
373
+ "step": 260
374
+ },
375
+ {
376
+ "epoch": 0.9888059701492538,
377
+ "grad_norm": 0.1429086930896721,
378
+ "learning_rate": 5.0154873643297575e-06,
379
+ "loss": 0.547,
380
+ "step": 265
381
+ },
382
+ {
383
+ "epoch": 1.0,
384
+ "step": 268,
385
+ "total_flos": 488165445992448.0,
386
+ "train_loss": 0.5792719663968727,
387
+ "train_runtime": 25010.1039,
388
+ "train_samples_per_second": 1.371,
389
+ "train_steps_per_second": 0.011
390
  }
391
  ],
392
  "logging_steps": 5,
393
+ "max_steps": 268,
394
  "num_input_tokens_seen": 0,
395
  "num_train_epochs": 1,
396
+ "save_steps": 100,
397
  "stateful_callbacks": {
398
  "TrainerControl": {
399
  "args": {
400
  "should_epoch_stop": false,
401
  "should_evaluate": false,
402
  "should_log": false,
403
+ "should_save": true,
404
+ "should_training_stop": true
405
  },
406
  "attributes": {}
407
  }
408
  },
409
+ "total_flos": 488165445992448.0,
410
+ "train_batch_size": 16,
411
  "trial_name": null,
412
  "trial_params": null
413
  }