qingyangzhang commited on
Commit
661710d
·
verified ·
1 Parent(s): 3e84241

Model save

Browse files
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ model_name: Qwen2.5-3B-EMPO-TQA
4
+ tags:
5
+ - generated_from_trainer
6
+ - trl
7
+ - grpo
8
+ licence: license
9
+ ---
10
+
11
+ # Model Card for Qwen2.5-3B-EMPO-TQA
12
+
13
+ This model is a fine-tuned version of [None](https://huggingface.co/None).
14
+ It has been trained using [TRL](https://github.com/huggingface/trl).
15
+
16
+ ## Quick start
17
+
18
+ ```python
19
+ from transformers import pipeline
20
+
21
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
22
+ generator = pipeline("text-generation", model="qingyangzhang/Qwen2.5-3B-EMPO-TQA", device="cuda")
23
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
24
+ print(output["generated_text"])
25
+ ```
26
+
27
+ ## Training procedure
28
+
29
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/zqyoung1127-tianjin-university/huggingface/runs/gaqlrb6w)
30
+
31
+
32
+ This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
33
+
34
+ ### Framework versions
35
+
36
+ - TRL: 0.14.0
37
+ - Transformers: 4.48.3
38
+ - Pytorch: 2.5.1
39
+ - Datasets: 3.1.0
40
+ - Tokenizers: 0.21.0
41
+
42
+ ## Citations
43
+
44
+ Cite GRPO as:
45
+
46
+ ```bibtex
47
+ @article{zhihong2024deepseekmath,
48
+ title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
49
+ author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
50
+ year = 2024,
51
+ eprint = {arXiv:2402.03300},
52
+ }
53
+
54
+ ```
55
+
56
+ Cite TRL as:
57
+
58
+ ```bibtex
59
+ @misc{vonwerra2022trl,
60
+ title = {{TRL: Transformer Reinforcement Learning}},
61
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
62
+ year = 2020,
63
+ journal = {GitHub repository},
64
+ publisher = {GitHub},
65
+ howpublished = {\url{https://github.com/huggingface/trl}}
66
+ }
67
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 0.00023655204281105568,
4
+ "train_runtime": 1010.415,
5
+ "train_samples": 490,
6
+ "train_samples_per_second": 0.485,
7
+ "train_steps_per_second": 0.01
8
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.05,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.48.3"
14
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 0.0,
3
+ "train_loss": 0.00023655204281105568,
4
+ "train_runtime": 1010.415,
5
+ "train_samples": 490,
6
+ "train_samples_per_second": 0.485,
7
+ "train_steps_per_second": 0.01
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.975609756097561,
5
+ "eval_steps": 100,
6
+ "global_step": 10,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "completion_length": 19.635416507720947,
13
+ "epoch": 0.0975609756097561,
14
+ "grad_norm": 0.5056069493293762,
15
+ "kl": 0.0,
16
+ "learning_rate": 2e-06,
17
+ "loss": 0.0,
18
+ "reward": 0.5598958358168602,
19
+ "reward_std": 0.09434993285685778,
20
+ "rewards/semantic_entropy": 0.5598958358168602,
21
+ "step": 1
22
+ },
23
+ {
24
+ "completion_length": 17.5711807012558,
25
+ "epoch": 0.1951219512195122,
26
+ "grad_norm": 1.0753142833709717,
27
+ "kl": 0.0,
28
+ "learning_rate": 1.9396926207859082e-06,
29
+ "loss": 0.0,
30
+ "reward": 0.5069444291293621,
31
+ "reward_std": 0.130544594489038,
32
+ "rewards/semantic_entropy": 0.5069444291293621,
33
+ "step": 2
34
+ },
35
+ {
36
+ "completion_length": 17.8680557012558,
37
+ "epoch": 0.2926829268292683,
38
+ "grad_norm": 1.0308548212051392,
39
+ "kl": 0.0010528564453125,
40
+ "learning_rate": 1.766044443118978e-06,
41
+ "loss": 0.0,
42
+ "reward": 0.5251736156642437,
43
+ "reward_std": 0.13977272436022758,
44
+ "rewards/semantic_entropy": 0.5251736156642437,
45
+ "step": 3
46
+ },
47
+ {
48
+ "completion_length": 20.52256941795349,
49
+ "epoch": 0.3902439024390244,
50
+ "grad_norm": 0.6616184711456299,
51
+ "kl": 0.00194549560546875,
52
+ "learning_rate": 1.5e-06,
53
+ "loss": 0.0001,
54
+ "reward": 0.5691550932824612,
55
+ "reward_std": 0.13807372376322746,
56
+ "rewards/semantic_entropy": 0.5691550932824612,
57
+ "step": 4
58
+ },
59
+ {
60
+ "completion_length": 19.373263835906982,
61
+ "epoch": 0.4878048780487805,
62
+ "grad_norm": 0.7476760149002075,
63
+ "kl": 0.003894805908203125,
64
+ "learning_rate": 1.1736481776669305e-06,
65
+ "loss": 0.0002,
66
+ "reward": 0.5060763992369175,
67
+ "reward_std": 0.10967991594225168,
68
+ "rewards/semantic_entropy": 0.5060763992369175,
69
+ "step": 5
70
+ },
71
+ {
72
+ "completion_length": 19.572916984558105,
73
+ "epoch": 0.5853658536585366,
74
+ "grad_norm": 0.5244656205177307,
75
+ "kl": 0.0069637298583984375,
76
+ "learning_rate": 8.263518223330696e-07,
77
+ "loss": 0.0003,
78
+ "reward": 0.6232638955116272,
79
+ "reward_std": 0.11568193091079593,
80
+ "rewards/semantic_entropy": 0.6232638955116272,
81
+ "step": 6
82
+ },
83
+ {
84
+ "completion_length": 18.697916507720947,
85
+ "epoch": 0.6829268292682927,
86
+ "grad_norm": 0.5207864046096802,
87
+ "kl": 0.013153076171875,
88
+ "learning_rate": 5.000000000000002e-07,
89
+ "loss": 0.0005,
90
+ "reward": 0.621527798473835,
91
+ "reward_std": 0.1189112700521946,
92
+ "rewards/semantic_entropy": 0.621527798473835,
93
+ "step": 7
94
+ },
95
+ {
96
+ "completion_length": 19.454861402511597,
97
+ "epoch": 0.7804878048780488,
98
+ "grad_norm": 0.6792996525764465,
99
+ "kl": 0.0097503662109375,
100
+ "learning_rate": 2.339555568810221e-07,
101
+ "loss": 0.0004,
102
+ "reward": 0.5682870484888554,
103
+ "reward_std": 0.13645811565220356,
104
+ "rewards/semantic_entropy": 0.5682870484888554,
105
+ "step": 8
106
+ },
107
+ {
108
+ "completion_length": 20.125000476837158,
109
+ "epoch": 0.8780487804878049,
110
+ "grad_norm": 0.5297054052352905,
111
+ "kl": 0.010364532470703125,
112
+ "learning_rate": 6.030737921409168e-08,
113
+ "loss": 0.0004,
114
+ "reward": 0.5454282537102699,
115
+ "reward_std": 0.10149804083630443,
116
+ "rewards/semantic_entropy": 0.5454282537102699,
117
+ "step": 9
118
+ },
119
+ {
120
+ "completion_length": 19.699653148651123,
121
+ "epoch": 0.975609756097561,
122
+ "grad_norm": 0.4804926812648773,
123
+ "kl": 0.01203155517578125,
124
+ "learning_rate": 0.0,
125
+ "loss": 0.0005,
126
+ "reward": 0.6212384253740311,
127
+ "reward_std": 0.09729464584961534,
128
+ "rewards/semantic_entropy": 0.6212384253740311,
129
+ "step": 10
130
+ },
131
+ {
132
+ "epoch": 0.975609756097561,
133
+ "step": 10,
134
+ "total_flos": 0.0,
135
+ "train_loss": 0.00023655204281105568,
136
+ "train_runtime": 1010.415,
137
+ "train_samples_per_second": 0.485,
138
+ "train_steps_per_second": 0.01
139
+ }
140
+ ],
141
+ "logging_steps": 1,
142
+ "max_steps": 10,
143
+ "num_input_tokens_seen": 0,
144
+ "num_train_epochs": 1,
145
+ "save_steps": 500,
146
+ "stateful_callbacks": {
147
+ "TrainerControl": {
148
+ "args": {
149
+ "should_epoch_stop": false,
150
+ "should_evaluate": false,
151
+ "should_log": false,
152
+ "should_save": true,
153
+ "should_training_stop": true
154
+ },
155
+ "attributes": {}
156
+ }
157
+ },
158
+ "total_flos": 0.0,
159
+ "train_batch_size": 1,
160
+ "trial_name": null,
161
+ "trial_params": null
162
+ }