Obeida commited on
Commit
7e90a04
·
verified ·
1 Parent(s): 7c70fc1

End of training

Browse files
README.md CHANGED
@@ -4,6 +4,7 @@ license: apache-2.0
4
  base_model: Qwen/Qwen2.5-1.5B-Instruct
5
  tags:
6
  - llama-factory
 
7
  - generated_from_trainer
8
  model-index:
9
  - name: models1
@@ -15,9 +16,9 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # models1
17
 
18
- This model is a fine-tuned version of [Qwen/Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.1253
21
 
22
  ## Model description
23
 
 
4
  base_model: Qwen/Qwen2.5-1.5B-Instruct
5
  tags:
6
  - llama-factory
7
+ - lora
8
  - generated_from_trainer
9
  model-index:
10
  - name: models1
 
16
 
17
  # models1
18
 
19
+ This model is a fine-tuned version of [Qwen/Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct) on the comment_finetune_train dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.1252
22
 
23
  ## Model description
24
 
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_loss": 0.12520842254161835,
4
+ "eval_runtime": 62.8478,
5
+ "eval_samples_per_second": 1.082,
6
+ "eval_steps_per_second": 1.082,
7
+ "total_flos": 1.134915283869696e+16,
8
+ "train_loss": 0.04400509732668517,
9
+ "train_runtime": 2473.793,
10
+ "train_samples_per_second": 0.986,
11
+ "train_steps_per_second": 0.247
12
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_loss": 0.12520842254161835,
4
+ "eval_runtime": 62.8478,
5
+ "eval_samples_per_second": 1.082,
6
+ "eval_steps_per_second": 1.082
7
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 1.134915283869696e+16,
4
+ "train_loss": 0.04400509732668517,
5
+ "train_runtime": 2473.793,
6
+ "train_samples_per_second": 0.986,
7
+ "train_steps_per_second": 0.247
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,517 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 100,
6
+ "global_step": 610,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01639344262295082,
13
+ "grad_norm": 1.9867063760757446,
14
+ "learning_rate": 1.6393442622950818e-05,
15
+ "loss": 0.3745,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.03278688524590164,
20
+ "grad_norm": 1.508779525756836,
21
+ "learning_rate": 3.2786885245901635e-05,
22
+ "loss": 0.2514,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.04918032786885246,
27
+ "grad_norm": 0.43040570616722107,
28
+ "learning_rate": 4.918032786885246e-05,
29
+ "loss": 0.1555,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.06557377049180328,
34
+ "grad_norm": 1.1166633367538452,
35
+ "learning_rate": 6.557377049180327e-05,
36
+ "loss": 0.1932,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.08196721311475409,
41
+ "grad_norm": 0.4990024268627167,
42
+ "learning_rate": 8.19672131147541e-05,
43
+ "loss": 0.1536,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.09836065573770492,
48
+ "grad_norm": 1.1272306442260742,
49
+ "learning_rate": 9.836065573770493e-05,
50
+ "loss": 0.1524,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.11475409836065574,
55
+ "grad_norm": 0.4084867238998413,
56
+ "learning_rate": 9.993370449424153e-05,
57
+ "loss": 0.1391,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.13114754098360656,
62
+ "grad_norm": 0.6679890751838684,
63
+ "learning_rate": 9.970476054107763e-05,
64
+ "loss": 0.1274,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.14754098360655737,
69
+ "grad_norm": 1.0677056312561035,
70
+ "learning_rate": 9.931309898856423e-05,
71
+ "loss": 0.1283,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.16393442622950818,
76
+ "grad_norm": 1.0797268152236938,
77
+ "learning_rate": 9.876000201222912e-05,
78
+ "loss": 0.1317,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.16393442622950818,
83
+ "eval_loss": 0.16001197695732117,
84
+ "eval_runtime": 62.2223,
85
+ "eval_samples_per_second": 1.093,
86
+ "eval_steps_per_second": 1.093,
87
+ "step": 100
88
+ },
89
+ {
90
+ "epoch": 0.18032786885245902,
91
+ "grad_norm": 0.9521363973617554,
92
+ "learning_rate": 9.804728027590449e-05,
93
+ "loss": 0.1882,
94
+ "step": 110
95
+ },
96
+ {
97
+ "epoch": 0.19672131147540983,
98
+ "grad_norm": 0.835111141204834,
99
+ "learning_rate": 9.717726700418842e-05,
100
+ "loss": 0.1557,
101
+ "step": 120
102
+ },
103
+ {
104
+ "epoch": 0.21311475409836064,
105
+ "grad_norm": 0.6795278787612915,
106
+ "learning_rate": 9.61528103442088e-05,
107
+ "loss": 0.161,
108
+ "step": 130
109
+ },
110
+ {
111
+ "epoch": 0.22950819672131148,
112
+ "grad_norm": 0.9582076072692871,
113
+ "learning_rate": 9.497726404169412e-05,
114
+ "loss": 0.1615,
115
+ "step": 140
116
+ },
117
+ {
118
+ "epoch": 0.2459016393442623,
119
+ "grad_norm": 0.9718400239944458,
120
+ "learning_rate": 9.365447646187509e-05,
121
+ "loss": 0.1473,
122
+ "step": 150
123
+ },
124
+ {
125
+ "epoch": 0.26229508196721313,
126
+ "grad_norm": 0.881258487701416,
127
+ "learning_rate": 9.218877799115928e-05,
128
+ "loss": 0.125,
129
+ "step": 160
130
+ },
131
+ {
132
+ "epoch": 0.2786885245901639,
133
+ "grad_norm": 0.9122475385665894,
134
+ "learning_rate": 9.058496686082132e-05,
135
+ "loss": 0.1401,
136
+ "step": 170
137
+ },
138
+ {
139
+ "epoch": 0.29508196721311475,
140
+ "grad_norm": 1.2337970733642578,
141
+ "learning_rate": 8.884829343911762e-05,
142
+ "loss": 0.145,
143
+ "step": 180
144
+ },
145
+ {
146
+ "epoch": 0.3114754098360656,
147
+ "grad_norm": 0.3640283942222595,
148
+ "learning_rate": 8.698444304324835e-05,
149
+ "loss": 0.1411,
150
+ "step": 190
151
+ },
152
+ {
153
+ "epoch": 0.32786885245901637,
154
+ "grad_norm": 0.9699270129203796,
155
+ "learning_rate": 8.499951732743457e-05,
156
+ "loss": 0.1605,
157
+ "step": 200
158
+ },
159
+ {
160
+ "epoch": 0.32786885245901637,
161
+ "eval_loss": 0.15475818514823914,
162
+ "eval_runtime": 62.2552,
163
+ "eval_samples_per_second": 1.092,
164
+ "eval_steps_per_second": 1.092,
165
+ "step": 200
166
+ },
167
+ {
168
+ "epoch": 0.3442622950819672,
169
+ "grad_norm": 1.2340621948242188,
170
+ "learning_rate": 8.290001430804025e-05,
171
+ "loss": 0.1302,
172
+ "step": 210
173
+ },
174
+ {
175
+ "epoch": 0.36065573770491804,
176
+ "grad_norm": 0.7683020234107971,
177
+ "learning_rate": 8.06928070911306e-05,
178
+ "loss": 0.1771,
179
+ "step": 220
180
+ },
181
+ {
182
+ "epoch": 0.3770491803278688,
183
+ "grad_norm": 1.2670581340789795,
184
+ "learning_rate": 7.838512137210565e-05,
185
+ "loss": 0.1442,
186
+ "step": 230
187
+ },
188
+ {
189
+ "epoch": 0.39344262295081966,
190
+ "grad_norm": 0.6768158078193665,
191
+ "learning_rate": 7.598451178106857e-05,
192
+ "loss": 0.1389,
193
+ "step": 240
194
+ },
195
+ {
196
+ "epoch": 0.4098360655737705,
197
+ "grad_norm": 1.129391074180603,
198
+ "learning_rate": 7.3498837151366e-05,
199
+ "loss": 0.1385,
200
+ "step": 250
201
+ },
202
+ {
203
+ "epoch": 0.4262295081967213,
204
+ "grad_norm": 1.101136326789856,
205
+ "learning_rate": 7.093623479226385e-05,
206
+ "loss": 0.1564,
207
+ "step": 260
208
+ },
209
+ {
210
+ "epoch": 0.4426229508196721,
211
+ "grad_norm": 0.2261950969696045,
212
+ "learning_rate": 6.830509384998114e-05,
213
+ "loss": 0.1431,
214
+ "step": 270
215
+ },
216
+ {
217
+ "epoch": 0.45901639344262296,
218
+ "grad_norm": 0.3458766043186188,
219
+ "learning_rate": 6.561402784428974e-05,
220
+ "loss": 0.1206,
221
+ "step": 280
222
+ },
223
+ {
224
+ "epoch": 0.47540983606557374,
225
+ "grad_norm": 1.1199126243591309,
226
+ "learning_rate": 6.287184647058648e-05,
227
+ "loss": 0.11,
228
+ "step": 290
229
+ },
230
+ {
231
+ "epoch": 0.4918032786885246,
232
+ "grad_norm": 0.45597025752067566,
233
+ "learning_rate": 6.0087526759748304e-05,
234
+ "loss": 0.1191,
235
+ "step": 300
236
+ },
237
+ {
238
+ "epoch": 0.4918032786885246,
239
+ "eval_loss": 0.1460057497024536,
240
+ "eval_runtime": 62.2673,
241
+ "eval_samples_per_second": 1.092,
242
+ "eval_steps_per_second": 1.092,
243
+ "step": 300
244
+ },
245
+ {
246
+ "epoch": 0.5081967213114754,
247
+ "grad_norm": 0.734933614730835,
248
+ "learning_rate": 5.7270183690184495e-05,
249
+ "loss": 0.1585,
250
+ "step": 310
251
+ },
252
+ {
253
+ "epoch": 0.5245901639344263,
254
+ "grad_norm": 0.8497341275215149,
255
+ "learning_rate": 5.4429040348292256e-05,
256
+ "loss": 0.1406,
257
+ "step": 320
258
+ },
259
+ {
260
+ "epoch": 0.5409836065573771,
261
+ "grad_norm": 0.8217471241950989,
262
+ "learning_rate": 5.157339773500125e-05,
263
+ "loss": 0.1388,
264
+ "step": 330
265
+ },
266
+ {
267
+ "epoch": 0.5573770491803278,
268
+ "grad_norm": 0.3617191016674042,
269
+ "learning_rate": 4.8712604317250576e-05,
270
+ "loss": 0.1493,
271
+ "step": 340
272
+ },
273
+ {
274
+ "epoch": 0.5737704918032787,
275
+ "grad_norm": 0.9715378880500793,
276
+ "learning_rate": 4.585602542407722e-05,
277
+ "loss": 0.1446,
278
+ "step": 350
279
+ },
280
+ {
281
+ "epoch": 0.5901639344262295,
282
+ "grad_norm": 0.8967556953430176,
283
+ "learning_rate": 4.3013012587503254e-05,
284
+ "loss": 0.1134,
285
+ "step": 360
286
+ },
287
+ {
288
+ "epoch": 0.6065573770491803,
289
+ "grad_norm": 1.1487077474594116,
290
+ "learning_rate": 4.019287292859016e-05,
291
+ "loss": 0.1419,
292
+ "step": 370
293
+ },
294
+ {
295
+ "epoch": 0.6229508196721312,
296
+ "grad_norm": 0.8955410718917847,
297
+ "learning_rate": 3.7404838688880446e-05,
298
+ "loss": 0.1339,
299
+ "step": 380
300
+ },
301
+ {
302
+ "epoch": 0.639344262295082,
303
+ "grad_norm": 0.7369993925094604,
304
+ "learning_rate": 3.465803700697114e-05,
305
+ "loss": 0.1435,
306
+ "step": 390
307
+ },
308
+ {
309
+ "epoch": 0.6557377049180327,
310
+ "grad_norm": 0.4338698089122772,
311
+ "learning_rate": 3.196146003916084e-05,
312
+ "loss": 0.1423,
313
+ "step": 400
314
+ },
315
+ {
316
+ "epoch": 0.6557377049180327,
317
+ "eval_loss": 0.13493812084197998,
318
+ "eval_runtime": 62.3222,
319
+ "eval_samples_per_second": 1.091,
320
+ "eval_steps_per_second": 1.091,
321
+ "step": 400
322
+ },
323
+ {
324
+ "epoch": 0.6721311475409836,
325
+ "grad_norm": 1.00863516330719,
326
+ "learning_rate": 2.932393552198597e-05,
327
+ "loss": 0.1575,
328
+ "step": 410
329
+ },
330
+ {
331
+ "epoch": 0.6885245901639344,
332
+ "grad_norm": 1.1648294925689697,
333
+ "learning_rate": 2.6754097873015148e-05,
334
+ "loss": 0.1372,
335
+ "step": 420
336
+ },
337
+ {
338
+ "epoch": 0.7049180327868853,
339
+ "grad_norm": 0.5712903141975403,
340
+ "learning_rate": 2.426035992450848e-05,
341
+ "loss": 0.1293,
342
+ "step": 430
343
+ },
344
+ {
345
+ "epoch": 0.7213114754098361,
346
+ "grad_norm": 0.6494749188423157,
347
+ "learning_rate": 2.1850885382476562e-05,
348
+ "loss": 0.1347,
349
+ "step": 440
350
+ },
351
+ {
352
+ "epoch": 0.7377049180327869,
353
+ "grad_norm": 0.7962918281555176,
354
+ "learning_rate": 1.9533562101300097e-05,
355
+ "loss": 0.1157,
356
+ "step": 450
357
+ },
358
+ {
359
+ "epoch": 0.7540983606557377,
360
+ "grad_norm": 0.7322007417678833,
361
+ "learning_rate": 1.7315976261399696e-05,
362
+ "loss": 0.1339,
363
+ "step": 460
364
+ },
365
+ {
366
+ "epoch": 0.7704918032786885,
367
+ "grad_norm": 1.0486736297607422,
368
+ "learning_rate": 1.5205387534490806e-05,
369
+ "loss": 0.1327,
370
+ "step": 470
371
+ },
372
+ {
373
+ "epoch": 0.7868852459016393,
374
+ "grad_norm": 0.28778010606765747,
375
+ "learning_rate": 1.3208705317724006e-05,
376
+ "loss": 0.0872,
377
+ "step": 480
378
+ },
379
+ {
380
+ "epoch": 0.8032786885245902,
381
+ "grad_norm": 1.064375638961792,
382
+ "learning_rate": 1.1332466114513512e-05,
383
+ "loss": 0.1348,
384
+ "step": 490
385
+ },
386
+ {
387
+ "epoch": 0.819672131147541,
388
+ "grad_norm": 0.7255303263664246,
389
+ "learning_rate": 9.582812136100783e-06,
390
+ "loss": 0.1065,
391
+ "step": 500
392
+ },
393
+ {
394
+ "epoch": 0.819672131147541,
395
+ "eval_loss": 0.1266276091337204,
396
+ "eval_runtime": 62.2443,
397
+ "eval_samples_per_second": 1.092,
398
+ "eval_steps_per_second": 1.092,
399
+ "step": 500
400
+ },
401
+ {
402
+ "epoch": 0.8360655737704918,
403
+ "grad_norm": 0.8413804173469543,
404
+ "learning_rate": 7.965471193905954e-06,
405
+ "loss": 0.114,
406
+ "step": 510
407
+ },
408
+ {
409
+ "epoch": 0.8524590163934426,
410
+ "grad_norm": 0.9656268954277039,
411
+ "learning_rate": 6.4857379484922375e-06,
412
+ "loss": 0.1114,
413
+ "step": 520
414
+ },
415
+ {
416
+ "epoch": 0.8688524590163934,
417
+ "grad_norm": 0.6842993497848511,
418
+ "learning_rate": 5.148456576529081e-06,
419
+ "loss": 0.1265,
420
+ "step": 530
421
+ },
422
+ {
423
+ "epoch": 0.8852459016393442,
424
+ "grad_norm": 1.1006067991256714,
425
+ "learning_rate": 3.958004912496127e-06,
426
+ "loss": 0.1418,
427
+ "step": 540
428
+ },
429
+ {
430
+ "epoch": 0.9016393442622951,
431
+ "grad_norm": 0.8723937273025513,
432
+ "learning_rate": 2.918280117043709e-06,
433
+ "loss": 0.1555,
434
+ "step": 550
435
+ },
436
+ {
437
+ "epoch": 0.9180327868852459,
438
+ "grad_norm": 0.810757577419281,
439
+ "learning_rate": 2.032685918926508e-06,
440
+ "loss": 0.1195,
441
+ "step": 560
442
+ },
443
+ {
444
+ "epoch": 0.9344262295081968,
445
+ "grad_norm": 1.3418116569519043,
446
+ "learning_rate": 1.3041214722768035e-06,
447
+ "loss": 0.1395,
448
+ "step": 570
449
+ },
450
+ {
451
+ "epoch": 0.9508196721311475,
452
+ "grad_norm": 0.7165635228157043,
453
+ "learning_rate": 7.349718656945504e-07,
454
+ "loss": 0.1296,
455
+ "step": 580
456
+ },
457
+ {
458
+ "epoch": 0.9672131147540983,
459
+ "grad_norm": 0.7757364511489868,
460
+ "learning_rate": 3.271003142248652e-07,
461
+ "loss": 0.1411,
462
+ "step": 590
463
+ },
464
+ {
465
+ "epoch": 0.9836065573770492,
466
+ "grad_norm": 0.8983824253082275,
467
+ "learning_rate": 8.184205978370996e-08,
468
+ "loss": 0.1174,
469
+ "step": 600
470
+ },
471
+ {
472
+ "epoch": 0.9836065573770492,
473
+ "eval_loss": 0.12527307868003845,
474
+ "eval_runtime": 62.1952,
475
+ "eval_samples_per_second": 1.093,
476
+ "eval_steps_per_second": 1.093,
477
+ "step": 600
478
+ },
479
+ {
480
+ "epoch": 1.0,
481
+ "grad_norm": 0.8321591019630432,
482
+ "learning_rate": 0.0,
483
+ "loss": 0.1184,
484
+ "step": 610
485
+ },
486
+ {
487
+ "epoch": 1.0,
488
+ "step": 610,
489
+ "total_flos": 1.134915283869696e+16,
490
+ "train_loss": 0.04400509732668517,
491
+ "train_runtime": 2473.793,
492
+ "train_samples_per_second": 0.986,
493
+ "train_steps_per_second": 0.247
494
+ }
495
+ ],
496
+ "logging_steps": 10,
497
+ "max_steps": 610,
498
+ "num_input_tokens_seen": 0,
499
+ "num_train_epochs": 1,
500
+ "save_steps": 100,
501
+ "stateful_callbacks": {
502
+ "TrainerControl": {
503
+ "args": {
504
+ "should_epoch_stop": false,
505
+ "should_evaluate": false,
506
+ "should_log": false,
507
+ "should_save": true,
508
+ "should_training_stop": true
509
+ },
510
+ "attributes": {}
511
+ }
512
+ },
513
+ "total_flos": 1.134915283869696e+16,
514
+ "train_batch_size": 1,
515
+ "trial_name": null,
516
+ "trial_params": null
517
+ }
training_eval_loss.png ADDED
training_loss.png ADDED