Lansechen commited on
Commit
6c9527e
·
verified ·
1 Parent(s): 350ee00

Model save

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-3B-Instruct
3
+ library_name: transformers
4
+ model_name: Qwen2.5-3B-Instruct-Distill-bs17k-fem32768-batch32-epoch3-8192
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - sft
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for Qwen2.5-3B-Instruct-Distill-bs17k-fem32768-batch32-epoch3-8192
13
+
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-3B-Instruct).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="Lansechen/Qwen2.5-3B-Instruct-Distill-bs17k-fem32768-batch32-epoch3-8192", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/chenran1995-the-chinese-university-of-hong-kong/huggingface/runs/qter43gp)
31
+
32
+
33
+ This model was trained with SFT.
34
+
35
+ ### Framework versions
36
+
37
+ - TRL: 0.16.0.dev0
38
+ - Transformers: 4.49.0
39
+ - Pytorch: 2.5.1+cu121
40
+ - Datasets: 3.3.1
41
+ - Tokenizers: 0.21.0
42
+
43
+ ## Citations
44
+
45
+
46
+
47
+ Cite TRL as:
48
+
49
+ ```bibtex
50
+ @misc{vonwerra2022trl,
51
+ title = {{TRL: Transformer Reinforcement Learning}},
52
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
53
+ year = 2020,
54
+ journal = {GitHub repository},
55
+ publisher = {GitHub},
56
+ howpublished = {\url{https://github.com/huggingface/trl}}
57
+ }
58
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 139688442593280.0,
3
+ "train_loss": 0.6959928004972397,
4
+ "train_runtime": 2479.5716,
5
+ "train_samples": 3794,
6
+ "train_samples_per_second": 4.873,
7
+ "train_steps_per_second": 0.038
8
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.05,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.49.0"
14
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 139688442593280.0,
3
+ "train_loss": 0.6959928004972397,
4
+ "train_runtime": 2479.5716,
5
+ "train_samples": 3794,
6
+ "train_samples_per_second": 4.873,
7
+ "train_steps_per_second": 0.038
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.9206349206349205,
5
+ "eval_steps": 500,
6
+ "global_step": 93,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.15873015873015872,
13
+ "grad_norm": 3.1405258178710938,
14
+ "learning_rate": 5e-05,
15
+ "loss": 0.9988,
16
+ "mean_token_accuracy": 0.7276985734701157,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.31746031746031744,
21
+ "grad_norm": 0.8156048059463501,
22
+ "learning_rate": 4.964250209756261e-05,
23
+ "loss": 0.8915,
24
+ "mean_token_accuracy": 0.7418786332011222,
25
+ "step": 10
26
+ },
27
+ {
28
+ "epoch": 0.47619047619047616,
29
+ "grad_norm": 0.5297970175743103,
30
+ "learning_rate": 4.8581368812494645e-05,
31
+ "loss": 0.8028,
32
+ "mean_token_accuracy": 0.759541466832161,
33
+ "step": 15
34
+ },
35
+ {
36
+ "epoch": 0.6349206349206349,
37
+ "grad_norm": 0.4519253075122833,
38
+ "learning_rate": 4.6850320404673266e-05,
39
+ "loss": 0.7684,
40
+ "mean_token_accuracy": 0.7664183840155602,
41
+ "step": 20
42
+ },
43
+ {
44
+ "epoch": 0.7936507936507936,
45
+ "grad_norm": 0.38354581594467163,
46
+ "learning_rate": 4.450436542297082e-05,
47
+ "loss": 0.747,
48
+ "mean_token_accuracy": 0.7712402656674385,
49
+ "step": 25
50
+ },
51
+ {
52
+ "epoch": 0.9523809523809523,
53
+ "grad_norm": 0.32026010751724243,
54
+ "learning_rate": 4.161805266606917e-05,
55
+ "loss": 0.7254,
56
+ "mean_token_accuracy": 0.7770746260881424,
57
+ "step": 30
58
+ },
59
+ {
60
+ "epoch": 1.0952380952380953,
61
+ "grad_norm": 0.38725751638412476,
62
+ "learning_rate": 3.8283102201201284e-05,
63
+ "loss": 0.6948,
64
+ "mean_token_accuracy": 0.7841671456893285,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 1.253968253968254,
69
+ "grad_norm": 0.33806172013282776,
70
+ "learning_rate": 3.460549072133806e-05,
71
+ "loss": 0.6656,
72
+ "mean_token_accuracy": 0.7906794264912606,
73
+ "step": 40
74
+ },
75
+ {
76
+ "epoch": 1.4126984126984126,
77
+ "grad_norm": 0.311052531003952,
78
+ "learning_rate": 3.070208386114892e-05,
79
+ "loss": 0.6712,
80
+ "mean_token_accuracy": 0.7883317843079567,
81
+ "step": 45
82
+ },
83
+ {
84
+ "epoch": 1.5714285714285714,
85
+ "grad_norm": 0.2916868031024933,
86
+ "learning_rate": 2.6696922488622945e-05,
87
+ "loss": 0.6636,
88
+ "mean_token_accuracy": 0.7903473630547524,
89
+ "step": 50
90
+ },
91
+ {
92
+ "epoch": 1.7301587301587302,
93
+ "grad_norm": 0.2753700315952301,
94
+ "learning_rate": 2.2717280985058025e-05,
95
+ "loss": 0.6436,
96
+ "mean_token_accuracy": 0.7960700690746307,
97
+ "step": 55
98
+ },
99
+ {
100
+ "epoch": 1.8888888888888888,
101
+ "grad_norm": 0.2455938756465912,
102
+ "learning_rate": 1.888962277178548e-05,
103
+ "loss": 0.6392,
104
+ "mean_token_accuracy": 0.7972360864281655,
105
+ "step": 60
106
+ },
107
+ {
108
+ "epoch": 2.0317460317460316,
109
+ "grad_norm": 0.3443191349506378,
110
+ "learning_rate": 1.5335581607249064e-05,
111
+ "loss": 0.6408,
112
+ "mean_token_accuracy": 0.7964212745428085,
113
+ "step": 65
114
+ },
115
+ {
116
+ "epoch": 2.1904761904761907,
117
+ "grad_norm": 0.2579975724220276,
118
+ "learning_rate": 1.216809635913745e-05,
119
+ "loss": 0.6068,
120
+ "mean_token_accuracy": 0.8054686427116394,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 2.3492063492063493,
125
+ "grad_norm": 0.26707184314727783,
126
+ "learning_rate": 9.487822079201895e-06,
127
+ "loss": 0.6046,
128
+ "mean_token_accuracy": 0.8058722853660584,
129
+ "step": 75
130
+ },
131
+ {
132
+ "epoch": 2.507936507936508,
133
+ "grad_norm": 0.24455317854881287,
134
+ "learning_rate": 7.379931428156497e-06,
135
+ "loss": 0.6065,
136
+ "mean_token_accuracy": 0.8047785997390747,
137
+ "step": 80
138
+ },
139
+ {
140
+ "epoch": 2.6666666666666665,
141
+ "grad_norm": 0.2308081090450287,
142
+ "learning_rate": 5.911408093673812e-06,
143
+ "loss": 0.6034,
144
+ "mean_token_accuracy": 0.8056193083524704,
145
+ "step": 85
146
+ },
147
+ {
148
+ "epoch": 2.825396825396825,
149
+ "grad_norm": 0.22246606647968292,
150
+ "learning_rate": 5.12891821013231e-06,
151
+ "loss": 0.6008,
152
+ "mean_token_accuracy": 0.8066127508878708,
153
+ "step": 90
154
+ },
155
+ {
156
+ "epoch": 2.9206349206349205,
157
+ "mean_token_accuracy": 0.8056537633140882,
158
+ "step": 93,
159
+ "total_flos": 139688442593280.0,
160
+ "train_loss": 0.6959928004972397,
161
+ "train_runtime": 2479.5716,
162
+ "train_samples_per_second": 4.873,
163
+ "train_steps_per_second": 0.038
164
+ }
165
+ ],
166
+ "logging_steps": 5,
167
+ "max_steps": 93,
168
+ "num_input_tokens_seen": 0,
169
+ "num_train_epochs": 3,
170
+ "save_steps": 100,
171
+ "stateful_callbacks": {
172
+ "TrainerControl": {
173
+ "args": {
174
+ "should_epoch_stop": false,
175
+ "should_evaluate": false,
176
+ "should_log": false,
177
+ "should_save": true,
178
+ "should_training_stop": true
179
+ },
180
+ "attributes": {}
181
+ }
182
+ },
183
+ "total_flos": 139688442593280.0,
184
+ "train_batch_size": 4,
185
+ "trial_name": null,
186
+ "trial_params": null
187
+ }