Lansechen commited on
Commit
18a93c5
·
verified ·
1 Parent(s): 7d35d73

Model save

Browse files
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2.5-3B-Instruct
3
+ library_name: transformers
4
+ model_name: Qwen2.5-3B-Instruct-Distill-om220k-2k-origin-batch32-epoch1-8192
5
+ tags:
6
+ - generated_from_trainer
7
+ - trl
8
+ - sft
9
+ licence: license
10
+ ---
11
+
12
+ # Model Card for Qwen2.5-3B-Instruct-Distill-om220k-2k-origin-batch32-epoch1-8192
13
+
14
+ This model is a fine-tuned version of [Qwen/Qwen2.5-3B-Instruct](https://huggingface.co/Qwen/Qwen2.5-3B-Instruct).
15
+ It has been trained using [TRL](https://github.com/huggingface/trl).
16
+
17
+ ## Quick start
18
+
19
+ ```python
20
+ from transformers import pipeline
21
+
22
+ question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
23
+ generator = pipeline("text-generation", model="Lansechen/Qwen2.5-3B-Instruct-Distill-om220k-2k-origin-batch32-epoch1-8192", device="cuda")
24
+ output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
25
+ print(output["generated_text"])
26
+ ```
27
+
28
+ ## Training procedure
29
+
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/chenran1995-the-chinese-university-of-hong-kong/huggingface/runs/v5x4kmau)
31
+
32
+
33
+ This model was trained with SFT.
34
+
35
+ ### Framework versions
36
+
37
+ - TRL: 0.16.0.dev0
38
+ - Transformers: 4.49.0
39
+ - Pytorch: 2.5.1+cu121
40
+ - Datasets: 3.3.1
41
+ - Tokenizers: 0.21.0
42
+
43
+ ## Citations
44
+
45
+
46
+
47
+ Cite TRL as:
48
+
49
+ ```bibtex
50
+ @misc{vonwerra2022trl,
51
+ title = {{TRL: Transformer Reinforcement Learning}},
52
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
53
+ year = 2020,
54
+ journal = {GitHub repository},
55
+ publisher = {GitHub},
56
+ howpublished = {\url{https://github.com/huggingface/trl}}
57
+ }
58
+ ```
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 16534550347776.0,
3
+ "train_loss": 0.7938935810869391,
4
+ "train_runtime": 319.9151,
5
+ "train_samples": 2080,
6
+ "train_samples_per_second": 4.701,
7
+ "train_steps_per_second": 0.034
8
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.05,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.49.0"
14
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "total_flos": 16534550347776.0,
3
+ "train_loss": 0.7938935810869391,
4
+ "train_runtime": 319.9151,
5
+ "train_samples": 2080,
6
+ "train_samples_per_second": 4.701,
7
+ "train_steps_per_second": 0.034
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9361702127659575,
5
+ "eval_steps": 500,
6
+ "global_step": 11,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.425531914893617,
13
+ "grad_norm": 1.173352837562561,
14
+ "learning_rate": 3.4452882373436316e-05,
15
+ "loss": 0.8972,
16
+ "mean_token_accuracy": 0.7616663560271263,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.851063829787234,
21
+ "grad_norm": 0.5799760818481445,
22
+ "learning_rate": 6.1012283833590465e-06,
23
+ "loss": 0.7155,
24
+ "mean_token_accuracy": 0.791662546992302,
25
+ "step": 10
26
+ },
27
+ {
28
+ "epoch": 0.9361702127659575,
29
+ "mean_token_accuracy": 0.8011733368039131,
30
+ "step": 11,
31
+ "total_flos": 16534550347776.0,
32
+ "train_loss": 0.7938935810869391,
33
+ "train_runtime": 319.9151,
34
+ "train_samples_per_second": 4.701,
35
+ "train_steps_per_second": 0.034
36
+ }
37
+ ],
38
+ "logging_steps": 5,
39
+ "max_steps": 11,
40
+ "num_input_tokens_seen": 0,
41
+ "num_train_epochs": 1,
42
+ "save_steps": 100,
43
+ "stateful_callbacks": {
44
+ "TrainerControl": {
45
+ "args": {
46
+ "should_epoch_stop": false,
47
+ "should_evaluate": false,
48
+ "should_log": false,
49
+ "should_save": true,
50
+ "should_training_stop": true
51
+ },
52
+ "attributes": {}
53
+ }
54
+ },
55
+ "total_flos": 16534550347776.0,
56
+ "train_batch_size": 4,
57
+ "trial_name": null,
58
+ "trial_params": null
59
+ }