DeeLearning commited on
Commit
3c0e154
·
verified ·
1 Parent(s): 3ebe53d

Model save

Browse files
README.md CHANGED
@@ -1,10 +1,8 @@
1
  ---
2
- datasets: HuggingFaceH4/Bespoke-Stratos-17k
3
  library_name: transformers
4
  model_name: Qwen2.5-1.5B-Open-R1-Distill
5
  tags:
6
  - generated_from_trainer
7
- - open-r1
8
  - trl
9
  - sft
10
  licence: license
@@ -12,7 +10,7 @@ licence: license
12
 
13
  # Model Card for Qwen2.5-1.5B-Open-R1-Distill
14
 
15
- This model is a fine-tuned version of [None](https://huggingface.co/None) on the [HuggingFaceH4/Bespoke-Stratos-17k](https://huggingface.co/datasets/HuggingFaceH4/Bespoke-Stratos-17k) dataset.
16
  It has been trained using [TRL](https://github.com/huggingface/trl).
17
 
18
  ## Quick start
@@ -28,7 +26,7 @@ print(output["generated_text"])
28
 
29
  ## Training procedure
30
 
31
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/xt0521/huggingface/runs/eoh1k9sl)
32
 
33
 
34
  This model was trained with SFT.
 
1
  ---
 
2
  library_name: transformers
3
  model_name: Qwen2.5-1.5B-Open-R1-Distill
4
  tags:
5
  - generated_from_trainer
 
6
  - trl
7
  - sft
8
  licence: license
 
10
 
11
  # Model Card for Qwen2.5-1.5B-Open-R1-Distill
12
 
13
+ This model is a fine-tuned version of [None](https://huggingface.co/None).
14
  It has been trained using [TRL](https://github.com/huggingface/trl).
15
 
16
  ## Quick start
 
26
 
27
  ## Training procedure
28
 
29
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/xt0521/huggingface/runs/glyjssry)
30
 
31
 
32
  This model was trained with SFT.
all_results.json CHANGED
@@ -1,14 +1,9 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_loss": 0.7885702848434448,
4
- "eval_runtime": 1.9396,
5
- "eval_samples": 100,
6
- "eval_samples_per_second": 65.994,
7
- "eval_steps_per_second": 2.062,
8
  "total_flos": 76916824473600.0,
9
- "train_loss": 0.8114958641797128,
10
- "train_runtime": 1241.942,
11
  "train_samples": 16610,
12
- "train_samples_per_second": 17.4,
13
- "train_steps_per_second": 0.136
14
  }
 
1
  {
2
  "epoch": 1.0,
 
 
 
 
 
3
  "total_flos": 76916824473600.0,
4
+ "train_loss": 0.7337305870281874,
5
+ "train_runtime": 1235.0855,
6
  "train_samples": 16610,
7
+ "train_samples_per_second": 17.497,
8
+ "train_steps_per_second": 0.137
9
  }
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/data/share/huggingface/models--Qwen--Qwen2.5-1.5B-Instruct/snapshots/989aa7980e4cf806f80c7fef2b1adb7bc71aa306",
3
  "architectures": [
4
  "Qwen2ForCausalLM"
5
  ],
@@ -23,7 +23,7 @@
23
  "tie_word_embeddings": true,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.48.2",
26
- "use_cache": true,
27
  "use_sliding_window": false,
28
  "vocab_size": 151936
29
  }
 
1
  {
2
+ "_name_or_path": "/data/share8/cdn/open-r1/data/Qwen2.5-1.5B-Open-R1-Distill",
3
  "architectures": [
4
  "Qwen2ForCausalLM"
5
  ],
 
23
  "tie_word_embeddings": true,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.48.2",
26
+ "use_cache": false,
27
  "use_sliding_window": false,
28
  "vocab_size": 151936
29
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:436e31c0fdab3b43d08aa0f4ea6cf8eae8ff072525b206062dfd58064686980d
3
  size 3087467144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dddd44bff605045e709f6628512c5276aea5b9b04fe1662e05e295dabaee4f2
3
  size 3087467144
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 1.0,
3
  "total_flos": 76916824473600.0,
4
- "train_loss": 0.8114958641797128,
5
- "train_runtime": 1241.942,
6
  "train_samples": 16610,
7
- "train_samples_per_second": 17.4,
8
- "train_steps_per_second": 0.136
9
  }
 
1
  {
2
  "epoch": 1.0,
3
  "total_flos": 76916824473600.0,
4
+ "train_loss": 0.7337305870281874,
5
+ "train_runtime": 1235.0855,
6
  "train_samples": 16610,
7
+ "train_samples_per_second": 17.497,
8
+ "train_steps_per_second": 0.137
9
  }
trainer_state.json CHANGED
@@ -10,251 +10,251 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.029585798816568046,
13
- "grad_norm": 0.5857761225642123,
14
  "learning_rate": 5.882352941176471e-06,
15
- "loss": 1.0993,
16
  "step": 5
17
  },
18
  {
19
  "epoch": 0.05917159763313609,
20
- "grad_norm": 0.4995625436387945,
21
  "learning_rate": 1.1764705882352942e-05,
22
- "loss": 1.0431,
23
  "step": 10
24
  },
25
  {
26
  "epoch": 0.08875739644970414,
27
- "grad_norm": 0.26485019625158435,
28
  "learning_rate": 1.7647058823529414e-05,
29
- "loss": 0.9663,
30
  "step": 15
31
  },
32
  {
33
  "epoch": 0.11834319526627218,
34
- "grad_norm": 0.19810328582136066,
35
  "learning_rate": 1.9980782984658682e-05,
36
- "loss": 0.9073,
37
  "step": 20
38
  },
39
  {
40
  "epoch": 0.14792899408284024,
41
- "grad_norm": 0.15545420301777027,
42
  "learning_rate": 1.9863613034027224e-05,
43
- "loss": 0.8704,
44
  "step": 25
45
  },
46
  {
47
  "epoch": 0.17751479289940827,
48
- "grad_norm": 0.1251471825263471,
49
  "learning_rate": 1.9641197940012136e-05,
50
- "loss": 0.8413,
51
  "step": 30
52
  },
53
  {
54
  "epoch": 0.20710059171597633,
55
- "grad_norm": 0.10400226170447692,
56
  "learning_rate": 1.9315910880512792e-05,
57
- "loss": 0.835,
58
  "step": 35
59
  },
60
  {
61
  "epoch": 0.23668639053254437,
62
- "grad_norm": 0.09174900478609728,
63
  "learning_rate": 1.8891222681391853e-05,
64
- "loss": 0.8337,
65
  "step": 40
66
  },
67
  {
68
  "epoch": 0.26627218934911245,
69
- "grad_norm": 0.08878405648463664,
70
  "learning_rate": 1.8371664782625287e-05,
71
- "loss": 0.8179,
72
  "step": 45
73
  },
74
  {
75
  "epoch": 0.2958579881656805,
76
- "grad_norm": 0.08734426916492237,
77
  "learning_rate": 1.7762780887657576e-05,
78
- "loss": 0.8078,
79
  "step": 50
80
  },
81
  {
82
  "epoch": 0.3254437869822485,
83
- "grad_norm": 0.08654300161749975,
84
  "learning_rate": 1.7071067811865477e-05,
85
- "loss": 0.7971,
86
  "step": 55
87
  },
88
  {
89
  "epoch": 0.35502958579881655,
90
- "grad_norm": 0.09049709236672561,
91
  "learning_rate": 1.6303906161279554e-05,
92
- "loss": 0.8071,
93
  "step": 60
94
  },
95
  {
96
  "epoch": 0.38461538461538464,
97
- "grad_norm": 0.08688275236146101,
98
  "learning_rate": 1.5469481581224274e-05,
99
- "loss": 0.7809,
100
  "step": 65
101
  },
102
  {
103
  "epoch": 0.41420118343195267,
104
- "grad_norm": 0.07889773883483042,
105
  "learning_rate": 1.4576697415156818e-05,
106
- "loss": 0.7826,
107
  "step": 70
108
  },
109
  {
110
  "epoch": 0.4437869822485207,
111
- "grad_norm": 0.08580994154606769,
112
  "learning_rate": 1.3635079705638298e-05,
113
- "loss": 0.7936,
114
  "step": 75
115
  },
116
  {
117
  "epoch": 0.47337278106508873,
118
- "grad_norm": 0.08418237174588653,
119
  "learning_rate": 1.2654675551080724e-05,
120
- "loss": 0.7749,
121
  "step": 80
122
  },
123
  {
124
  "epoch": 0.5029585798816568,
125
- "grad_norm": 0.08456036679862641,
126
  "learning_rate": 1.164594590280734e-05,
127
- "loss": 0.7787,
128
  "step": 85
129
  },
130
  {
131
  "epoch": 0.5325443786982249,
132
- "grad_norm": 0.08496080754943852,
133
  "learning_rate": 1.0619653946285948e-05,
134
- "loss": 0.7781,
135
  "step": 90
136
  },
137
  {
138
  "epoch": 0.5621301775147929,
139
- "grad_norm": 0.08860936983983704,
140
  "learning_rate": 9.586750257511868e-06,
141
- "loss": 0.7852,
142
  "step": 95
143
  },
144
  {
145
  "epoch": 0.591715976331361,
146
- "grad_norm": 0.08415248626283894,
147
  "learning_rate": 8.558255959926533e-06,
148
- "loss": 0.7609,
149
  "step": 100
150
  },
151
  {
152
  "epoch": 0.591715976331361,
153
- "eval_loss": 0.7962175607681274,
154
- "eval_runtime": 1.9652,
155
- "eval_samples_per_second": 65.135,
156
- "eval_steps_per_second": 2.035,
157
  "step": 100
158
  },
159
  {
160
  "epoch": 0.621301775147929,
161
- "grad_norm": 0.08441236349679139,
162
  "learning_rate": 7.545145128592009e-06,
163
- "loss": 0.7649,
164
  "step": 105
165
  },
166
  {
167
  "epoch": 0.650887573964497,
168
- "grad_norm": 0.08177150474959695,
169
  "learning_rate": 6.558227696373617e-06,
170
- "loss": 0.7761,
171
  "step": 110
172
  },
173
  {
174
  "epoch": 0.6804733727810651,
175
- "grad_norm": 0.07746543923576864,
176
  "learning_rate": 5.608034111526298e-06,
177
- "loss": 0.7704,
178
  "step": 115
179
  },
180
  {
181
  "epoch": 0.7100591715976331,
182
- "grad_norm": 0.07544691836481318,
183
  "learning_rate": 4.704702977392914e-06,
184
- "loss": 0.7594,
185
  "step": 120
186
  },
187
  {
188
  "epoch": 0.7396449704142012,
189
- "grad_norm": 0.08158636138275724,
190
  "learning_rate": 3.857872873103322e-06,
191
- "loss": 0.7619,
192
  "step": 125
193
  },
194
  {
195
  "epoch": 0.7692307692307693,
196
- "grad_norm": 0.0768086707215383,
197
  "learning_rate": 3.0765795095517026e-06,
198
- "loss": 0.7634,
199
  "step": 130
200
  },
201
  {
202
  "epoch": 0.7988165680473372,
203
- "grad_norm": 0.0838125766014346,
204
  "learning_rate": 2.369159318001937e-06,
205
- "loss": 0.7665,
206
  "step": 135
207
  },
208
  {
209
  "epoch": 0.8284023668639053,
210
- "grad_norm": 0.07598473377325965,
211
  "learning_rate": 1.743160500034443e-06,
212
- "loss": 0.7579,
213
  "step": 140
214
  },
215
  {
216
  "epoch": 0.8579881656804734,
217
- "grad_norm": 0.07918517584309687,
218
  "learning_rate": 1.2052624879351105e-06,
219
- "loss": 0.7646,
220
  "step": 145
221
  },
222
  {
223
  "epoch": 0.8875739644970414,
224
- "grad_norm": 0.0757111355601306,
225
  "learning_rate": 7.612046748871327e-07,
226
- "loss": 0.7747,
227
  "step": 150
228
  },
229
  {
230
  "epoch": 0.9171597633136095,
231
- "grad_norm": 0.07559936201946106,
232
  "learning_rate": 4.1572517541747294e-07,
233
- "loss": 0.7694,
234
  "step": 155
235
  },
236
  {
237
  "epoch": 0.9467455621301775,
238
- "grad_norm": 0.07247200888081211,
239
  "learning_rate": 1.7251026952640583e-07,
240
- "loss": 0.7687,
241
  "step": 160
242
  },
243
  {
244
  "epoch": 0.9763313609467456,
245
- "grad_norm": 0.0703519415777988,
246
  "learning_rate": 3.4155069933301535e-08,
247
- "loss": 0.7526,
248
  "step": 165
249
  },
250
  {
251
  "epoch": 1.0,
252
  "step": 169,
253
  "total_flos": 76916824473600.0,
254
- "train_loss": 0.8114958641797128,
255
- "train_runtime": 1241.942,
256
- "train_samples_per_second": 17.4,
257
- "train_steps_per_second": 0.136
258
  }
259
  ],
260
  "logging_steps": 5,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.029585798816568046,
13
+ "grad_norm": 0.07426696918416174,
14
  "learning_rate": 5.882352941176471e-06,
15
+ "loss": 0.7515,
16
  "step": 5
17
  },
18
  {
19
  "epoch": 0.05917159763313609,
20
+ "grad_norm": 0.08040276233943516,
21
  "learning_rate": 1.1764705882352942e-05,
22
+ "loss": 0.755,
23
  "step": 10
24
  },
25
  {
26
  "epoch": 0.08875739644970414,
27
+ "grad_norm": 0.09568613129719651,
28
  "learning_rate": 1.7647058823529414e-05,
29
+ "loss": 0.7553,
30
  "step": 15
31
  },
32
  {
33
  "epoch": 0.11834319526627218,
34
+ "grad_norm": 0.11073070319325445,
35
  "learning_rate": 1.9980782984658682e-05,
36
+ "loss": 0.7481,
37
  "step": 20
38
  },
39
  {
40
  "epoch": 0.14792899408284024,
41
+ "grad_norm": 0.1085253681695973,
42
  "learning_rate": 1.9863613034027224e-05,
43
+ "loss": 0.742,
44
  "step": 25
45
  },
46
  {
47
  "epoch": 0.17751479289940827,
48
+ "grad_norm": 0.10317427695342557,
49
  "learning_rate": 1.9641197940012136e-05,
50
+ "loss": 0.7317,
51
  "step": 30
52
  },
53
  {
54
  "epoch": 0.20710059171597633,
55
+ "grad_norm": 0.1095919075191608,
56
  "learning_rate": 1.9315910880512792e-05,
57
+ "loss": 0.7394,
58
  "step": 35
59
  },
60
  {
61
  "epoch": 0.23668639053254437,
62
+ "grad_norm": 0.10584984713775991,
63
  "learning_rate": 1.8891222681391853e-05,
64
+ "loss": 0.7445,
65
  "step": 40
66
  },
67
  {
68
  "epoch": 0.26627218934911245,
69
+ "grad_norm": 0.1052036771871902,
70
  "learning_rate": 1.8371664782625287e-05,
71
+ "loss": 0.7382,
72
  "step": 45
73
  },
74
  {
75
  "epoch": 0.2958579881656805,
76
+ "grad_norm": 0.10185995684228026,
77
  "learning_rate": 1.7762780887657576e-05,
78
+ "loss": 0.7327,
79
  "step": 50
80
  },
81
  {
82
  "epoch": 0.3254437869822485,
83
+ "grad_norm": 0.11605723315982408,
84
  "learning_rate": 1.7071067811865477e-05,
85
+ "loss": 0.7265,
86
  "step": 55
87
  },
88
  {
89
  "epoch": 0.35502958579881655,
90
+ "grad_norm": 0.09496481679546319,
91
  "learning_rate": 1.6303906161279554e-05,
92
+ "loss": 0.7404,
93
  "step": 60
94
  },
95
  {
96
  "epoch": 0.38461538461538464,
97
+ "grad_norm": 0.09399123086469602,
98
  "learning_rate": 1.5469481581224274e-05,
99
+ "loss": 0.7183,
100
  "step": 65
101
  },
102
  {
103
  "epoch": 0.41420118343195267,
104
+ "grad_norm": 0.09454325149914487,
105
  "learning_rate": 1.4576697415156818e-05,
106
+ "loss": 0.7239,
107
  "step": 70
108
  },
109
  {
110
  "epoch": 0.4437869822485207,
111
+ "grad_norm": 0.09665316804682329,
112
  "learning_rate": 1.3635079705638298e-05,
113
+ "loss": 0.7374,
114
  "step": 75
115
  },
116
  {
117
  "epoch": 0.47337278106508873,
118
+ "grad_norm": 0.0931576465014015,
119
  "learning_rate": 1.2654675551080724e-05,
120
+ "loss": 0.7206,
121
  "step": 80
122
  },
123
  {
124
  "epoch": 0.5029585798816568,
125
+ "grad_norm": 0.09445002041196521,
126
  "learning_rate": 1.164594590280734e-05,
127
+ "loss": 0.727,
128
  "step": 85
129
  },
130
  {
131
  "epoch": 0.5325443786982249,
132
+ "grad_norm": 0.08585910164532164,
133
  "learning_rate": 1.0619653946285948e-05,
134
+ "loss": 0.7287,
135
  "step": 90
136
  },
137
  {
138
  "epoch": 0.5621301775147929,
139
+ "grad_norm": 0.0926566404725236,
140
  "learning_rate": 9.586750257511868e-06,
141
+ "loss": 0.7378,
142
  "step": 95
143
  },
144
  {
145
  "epoch": 0.591715976331361,
146
+ "grad_norm": 0.08672021822123627,
147
  "learning_rate": 8.558255959926533e-06,
148
+ "loss": 0.7164,
149
  "step": 100
150
  },
151
  {
152
  "epoch": 0.591715976331361,
153
+ "eval_loss": 0.7641129493713379,
154
+ "eval_runtime": 1.9602,
155
+ "eval_samples_per_second": 65.298,
156
+ "eval_steps_per_second": 2.041,
157
  "step": 100
158
  },
159
  {
160
  "epoch": 0.621301775147929,
161
+ "grad_norm": 0.08622854416758327,
162
  "learning_rate": 7.545145128592009e-06,
163
+ "loss": 0.7216,
164
  "step": 105
165
  },
166
  {
167
  "epoch": 0.650887573964497,
168
+ "grad_norm": 0.08204894186576381,
169
  "learning_rate": 6.558227696373617e-06,
170
+ "loss": 0.7346,
171
  "step": 110
172
  },
173
  {
174
  "epoch": 0.6804733727810651,
175
+ "grad_norm": 0.08097299352958673,
176
  "learning_rate": 5.608034111526298e-06,
177
+ "loss": 0.73,
178
  "step": 115
179
  },
180
  {
181
  "epoch": 0.7100591715976331,
182
+ "grad_norm": 0.07682552342075682,
183
  "learning_rate": 4.704702977392914e-06,
184
+ "loss": 0.7212,
185
  "step": 120
186
  },
187
  {
188
  "epoch": 0.7396449704142012,
189
+ "grad_norm": 0.08086018117086849,
190
  "learning_rate": 3.857872873103322e-06,
191
+ "loss": 0.7248,
192
  "step": 125
193
  },
194
  {
195
  "epoch": 0.7692307692307693,
196
+ "grad_norm": 0.07606171025014277,
197
  "learning_rate": 3.0765795095517026e-06,
198
+ "loss": 0.728,
199
  "step": 130
200
  },
201
  {
202
  "epoch": 0.7988165680473372,
203
+ "grad_norm": 0.08257799296717885,
204
  "learning_rate": 2.369159318001937e-06,
205
+ "loss": 0.7323,
206
  "step": 135
207
  },
208
  {
209
  "epoch": 0.8284023668639053,
210
+ "grad_norm": 0.07889584661038457,
211
  "learning_rate": 1.743160500034443e-06,
212
+ "loss": 0.7251,
213
  "step": 140
214
  },
215
  {
216
  "epoch": 0.8579881656804734,
217
+ "grad_norm": 0.08171166394737774,
218
  "learning_rate": 1.2052624879351105e-06,
219
+ "loss": 0.7324,
220
  "step": 145
221
  },
222
  {
223
  "epoch": 0.8875739644970414,
224
+ "grad_norm": 0.07432343760979292,
225
  "learning_rate": 7.612046748871327e-07,
226
+ "loss": 0.743,
227
  "step": 150
228
  },
229
  {
230
  "epoch": 0.9171597633136095,
231
+ "grad_norm": 0.07790477411393834,
232
  "learning_rate": 4.1572517541747294e-07,
233
+ "loss": 0.7386,
234
  "step": 155
235
  },
236
  {
237
  "epoch": 0.9467455621301775,
238
+ "grad_norm": 0.07339780497225679,
239
  "learning_rate": 1.7251026952640583e-07,
240
+ "loss": 0.7383,
241
  "step": 160
242
  },
243
  {
244
  "epoch": 0.9763313609467456,
245
+ "grad_norm": 0.07176968893681754,
246
  "learning_rate": 3.4155069933301535e-08,
247
+ "loss": 0.7225,
248
  "step": 165
249
  },
250
  {
251
  "epoch": 1.0,
252
  "step": 169,
253
  "total_flos": 76916824473600.0,
254
+ "train_loss": 0.7337305870281874,
255
+ "train_runtime": 1235.0855,
256
+ "train_samples_per_second": 17.497,
257
+ "train_steps_per_second": 0.137
258
  }
259
  ],
260
  "logging_steps": 5,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f5da9a21d9c3f1b688b6f1d7f3e417cb01a4678dcb6fc905d7da7d23aa708536
3
  size 7352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbaa3b15f07865886766a303e293d83c8ac37c11d3cc809fc3bdc3ab96eb0eba
3
  size 7352