madhurjindal commited on
Commit
2af3662
·
verified ·
1 Parent(s): 37505b6

Upload 11 files

Browse files
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 128,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 64,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "gate_proj",
24
+ "v_proj",
25
+ "o_proj",
26
+ "k_proj",
27
+ "down_proj",
28
+ "up_proj",
29
+ "q_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9998876025626616,
3
+ "eval_accuracy": 0.9947849346546027,
4
+ "eval_loss": 0.012421553954482079,
5
+ "eval_runtime": 845.4917,
6
+ "eval_samples_per_second": 175.686,
7
+ "eval_steps_per_second": 2.745,
8
+ "total_flos": 2.593644880773251e+18,
9
+ "train_loss": 0.03529202291290823,
10
+ "train_runtime": 23401.2469,
11
+ "train_samples_per_second": 72.997,
12
+ "train_steps_per_second": 0.143
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9998876025626616,
3
+ "eval_accuracy": 0.9947849346546027,
4
+ "eval_loss": 0.012421553954482079,
5
+ "eval_runtime": 845.4917,
6
+ "eval_samples_per_second": 175.686,
7
+ "eval_steps_per_second": 2.745
8
+ }
llamaboard_config.yaml ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ top.booster: auto
2
+ top.checkpoint_path: []
3
+ top.finetuning_type: lora
4
+ top.model_name: Qwen2.5-0.5B-Instruct
5
+ top.quantization_bit: none
6
+ top.quantization_method: bitsandbytes
7
+ top.rope_scaling: none
8
+ top.template: qwen
9
+ train.additional_target: ''
10
+ train.badam_mode: layer
11
+ train.badam_switch_interval: 50
12
+ train.badam_switch_mode: ascending
13
+ train.badam_update_ratio: 0.05
14
+ train.batch_size: 8
15
+ train.compute_type: fp16
16
+ train.create_new_adapter: false
17
+ train.cutoff_len: 1024
18
+ train.dataset:
19
+ - JB_Detect_v2
20
+ train.dataset_dir: data
21
+ train.ds_offload: false
22
+ train.ds_stage: none
23
+ train.extra_args: "{\"optim\": \"adamw_torch\", \"do_sample\": \"false\", \"max_new_tokens\"\
24
+ :\n 2, \"compute_accuracy\": \"true\"}"
25
+ train.freeze_extra_modules: ''
26
+ train.freeze_trainable_layers: 2
27
+ train.freeze_trainable_modules: all
28
+ train.galore_rank: 16
29
+ train.galore_scale: 0.25
30
+ train.galore_target: all
31
+ train.galore_update_interval: 200
32
+ train.gradient_accumulation_steps: 8
33
+ train.learning_rate: 5e-5
34
+ train.logging_steps: 100
35
+ train.lora_alpha: 128
36
+ train.lora_dropout: 0.05
37
+ train.lora_rank: 64
38
+ train.lora_target: ''
39
+ train.loraplus_lr_ratio: 0
40
+ train.lr_scheduler_type: cosine
41
+ train.mask_history: false
42
+ train.max_grad_norm: '1.0'
43
+ train.max_samples: '10000000000'
44
+ train.neat_packing: false
45
+ train.neftune_alpha: 0
46
+ train.num_train_epochs: '1.0'
47
+ train.packing: false
48
+ train.ppo_score_norm: false
49
+ train.ppo_whiten_rewards: false
50
+ train.pref_beta: 0.1
51
+ train.pref_ftx: 0
52
+ train.pref_loss: sigmoid
53
+ train.report_to: false
54
+ train.resize_vocab: false
55
+ train.reward_model: null
56
+ train.save_steps: 5000
57
+ train.shift_attn: false
58
+ train.train_on_prompt: false
59
+ train.training_stage: Supervised Fine-Tuning
60
+ train.use_badam: false
61
+ train.use_dora: false
62
+ train.use_galore: false
63
+ train.use_llama_pro: false
64
+ train.use_pissa: false
65
+ train.use_rslora: false
66
+ train.val_size: 0.08
67
+ train.warmup_steps: 1239
running_log.txt ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [WARNING|2025-05-29 20:33:25] logging.py:162 >> `ddp_find_unused_parameters` needs to be set as False for LoRA in DDP training.
2
+
3
+ [INFO|2025-05-29 20:33:25] parser.py:355 >> Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: True, compute dtype: torch.float16
4
+
5
+ [INFO|2025-05-29 20:33:25] configuration_utils.py:679 >> loading configuration file config.json from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/config.json
6
+
7
+ [INFO|2025-05-29 20:33:25] configuration_utils.py:746 >> Model config Qwen2Config {
8
+ "_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
9
+ "architectures": [
10
+ "Qwen2ForCausalLM"
11
+ ],
12
+ "attention_dropout": 0.0,
13
+ "bos_token_id": 151643,
14
+ "eos_token_id": 151645,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 896,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 4864,
19
+ "max_position_embeddings": 32768,
20
+ "max_window_layers": 21,
21
+ "model_type": "qwen2",
22
+ "num_attention_heads": 14,
23
+ "num_hidden_layers": 24,
24
+ "num_key_value_heads": 2,
25
+ "rms_norm_eps": 1e-06,
26
+ "rope_scaling": null,
27
+ "rope_theta": 1000000.0,
28
+ "sliding_window": null,
29
+ "tie_word_embeddings": true,
30
+ "torch_dtype": "bfloat16",
31
+ "transformers_version": "4.46.1",
32
+ "use_cache": true,
33
+ "use_sliding_window": false,
34
+ "vocab_size": 151936
35
+ }
36
+
37
+
38
+ [INFO|2025-05-29 20:33:26] parser.py:355 >> Process rank: 3, device: cuda:3, n_gpu: 1, distributed training: True, compute dtype: torch.float16
39
+
40
+ [INFO|2025-05-29 20:33:26] parser.py:355 >> Process rank: 1, device: cuda:1, n_gpu: 1, distributed training: True, compute dtype: torch.float16
41
+
42
+ [INFO|2025-05-29 20:33:26] parser.py:355 >> Process rank: 6, device: cuda:6, n_gpu: 1, distributed training: True, compute dtype: torch.float16
43
+
44
+ [INFO|2025-05-29 20:33:26] parser.py:355 >> Process rank: 4, device: cuda:4, n_gpu: 1, distributed training: True, compute dtype: torch.float16
45
+
46
+ [INFO|2025-05-29 20:33:26] parser.py:355 >> Process rank: 7, device: cuda:7, n_gpu: 1, distributed training: True, compute dtype: torch.float16
47
+
48
+ [INFO|2025-05-29 20:33:26] parser.py:355 >> Process rank: 2, device: cuda:2, n_gpu: 1, distributed training: True, compute dtype: torch.float16
49
+
50
+ [INFO|2025-05-29 20:33:26] parser.py:355 >> Process rank: 5, device: cuda:5, n_gpu: 1, distributed training: True, compute dtype: torch.float16
51
+
52
+ [INFO|2025-05-29 20:33:25] tokenization_utils_base.py:2211 >> loading file vocab.json from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/vocab.json
53
+
54
+ [INFO|2025-05-29 20:33:25] tokenization_utils_base.py:2211 >> loading file merges.txt from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/merges.txt
55
+
56
+ [INFO|2025-05-29 20:33:25] tokenization_utils_base.py:2211 >> loading file tokenizer.json from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/tokenizer.json
57
+
58
+ [INFO|2025-05-29 20:33:25] tokenization_utils_base.py:2211 >> loading file added_tokens.json from cache at None
59
+
60
+ [INFO|2025-05-29 20:33:25] tokenization_utils_base.py:2211 >> loading file special_tokens_map.json from cache at None
61
+
62
+ [INFO|2025-05-29 20:33:25] tokenization_utils_base.py:2211 >> loading file tokenizer_config.json from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/tokenizer_config.json
63
+
64
+ [INFO|2025-05-29 20:33:26] tokenization_utils_base.py:2475 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
65
+
66
+ [INFO|2025-05-29 20:33:26] configuration_utils.py:679 >> loading configuration file config.json from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/config.json
67
+
68
+ [INFO|2025-05-29 20:33:26] configuration_utils.py:746 >> Model config Qwen2Config {
69
+ "_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
70
+ "architectures": [
71
+ "Qwen2ForCausalLM"
72
+ ],
73
+ "attention_dropout": 0.0,
74
+ "bos_token_id": 151643,
75
+ "eos_token_id": 151645,
76
+ "hidden_act": "silu",
77
+ "hidden_size": 896,
78
+ "initializer_range": 0.02,
79
+ "intermediate_size": 4864,
80
+ "max_position_embeddings": 32768,
81
+ "max_window_layers": 21,
82
+ "model_type": "qwen2",
83
+ "num_attention_heads": 14,
84
+ "num_hidden_layers": 24,
85
+ "num_key_value_heads": 2,
86
+ "rms_norm_eps": 1e-06,
87
+ "rope_scaling": null,
88
+ "rope_theta": 1000000.0,
89
+ "sliding_window": null,
90
+ "tie_word_embeddings": true,
91
+ "torch_dtype": "bfloat16",
92
+ "transformers_version": "4.46.1",
93
+ "use_cache": true,
94
+ "use_sliding_window": false,
95
+ "vocab_size": 151936
96
+ }
97
+
98
+
99
+ [INFO|2025-05-29 20:33:26] tokenization_utils_base.py:2211 >> loading file vocab.json from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/vocab.json
100
+
101
+ [INFO|2025-05-29 20:33:26] tokenization_utils_base.py:2211 >> loading file merges.txt from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/merges.txt
102
+
103
+ [INFO|2025-05-29 20:33:26] tokenization_utils_base.py:2211 >> loading file tokenizer.json from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/tokenizer.json
104
+
105
+ [INFO|2025-05-29 20:33:26] tokenization_utils_base.py:2211 >> loading file added_tokens.json from cache at None
106
+
107
+ [INFO|2025-05-29 20:33:26] tokenization_utils_base.py:2211 >> loading file special_tokens_map.json from cache at None
108
+
109
+ [INFO|2025-05-29 20:33:26] tokenization_utils_base.py:2211 >> loading file tokenizer_config.json from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/tokenizer_config.json
110
+
111
+ [INFO|2025-05-29 20:33:26] tokenization_utils_base.py:2475 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
112
+
113
+ [INFO|2025-05-29 20:33:26] logging.py:157 >> Replace eos token: <|im_end|>
114
+
115
+ [INFO|2025-05-29 20:33:26] logging.py:157 >> Loading dataset JB_Detect_v2.json...
116
+
117
+ [INFO|2025-05-29 20:35:38] configuration_utils.py:679 >> loading configuration file config.json from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/config.json
118
+
119
+ [INFO|2025-05-29 20:35:38] configuration_utils.py:746 >> Model config Qwen2Config {
120
+ "_name_or_path": "Qwen/Qwen2.5-0.5B-Instruct",
121
+ "architectures": [
122
+ "Qwen2ForCausalLM"
123
+ ],
124
+ "attention_dropout": 0.0,
125
+ "bos_token_id": 151643,
126
+ "eos_token_id": 151645,
127
+ "hidden_act": "silu",
128
+ "hidden_size": 896,
129
+ "initializer_range": 0.02,
130
+ "intermediate_size": 4864,
131
+ "max_position_embeddings": 32768,
132
+ "max_window_layers": 21,
133
+ "model_type": "qwen2",
134
+ "num_attention_heads": 14,
135
+ "num_hidden_layers": 24,
136
+ "num_key_value_heads": 2,
137
+ "rms_norm_eps": 1e-06,
138
+ "rope_scaling": null,
139
+ "rope_theta": 1000000.0,
140
+ "sliding_window": null,
141
+ "tie_word_embeddings": true,
142
+ "torch_dtype": "bfloat16",
143
+ "transformers_version": "4.46.1",
144
+ "use_cache": true,
145
+ "use_sliding_window": false,
146
+ "vocab_size": 151936
147
+ }
148
+
149
+
150
+ [INFO|2025-05-29 20:35:39] modeling_utils.py:3937 >> loading weights file model.safetensors from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/model.safetensors
151
+
152
+ [INFO|2025-05-29 20:35:39] modeling_utils.py:1670 >> Instantiating Qwen2ForCausalLM model under default dtype torch.float16.
153
+
154
+ [INFO|2025-05-29 20:35:39] configuration_utils.py:1096 >> Generate config GenerationConfig {
155
+ "bos_token_id": 151643,
156
+ "eos_token_id": 151645
157
+ }
158
+
159
+
160
+ [INFO|2025-05-29 20:35:41] modeling_utils.py:4800 >> All model checkpoint weights were used when initializing Qwen2ForCausalLM.
161
+
162
+
163
+ [INFO|2025-05-29 20:35:41] modeling_utils.py:4808 >> All the weights of Qwen2ForCausalLM were initialized from the model checkpoint at Qwen/Qwen2.5-0.5B-Instruct.
164
+ If your task is similar to the task the model of the checkpoint was trained on, you can already use Qwen2ForCausalLM for predictions without further training.
165
+
166
+ [INFO|2025-05-29 20:35:41] configuration_utils.py:1051 >> loading configuration file generation_config.json from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/generation_config.json
167
+
168
+ [INFO|2025-05-29 20:35:41] configuration_utils.py:1096 >> Generate config GenerationConfig {
169
+ "bos_token_id": 151643,
170
+ "do_sample": true,
171
+ "eos_token_id": [
172
+ 151645,
173
+ 151643
174
+ ],
175
+ "pad_token_id": 151643,
176
+ "repetition_penalty": 1.1,
177
+ "temperature": 0.7,
178
+ "top_k": 20,
179
+ "top_p": 0.8
180
+ }
181
+
182
+
183
+ [INFO|2025-05-29 20:35:41] logging.py:157 >> Gradient checkpointing enabled.
184
+
185
+ [INFO|2025-05-29 20:35:41] logging.py:157 >> Using torch SDPA for faster training and inference.
186
+
187
+ [INFO|2025-05-29 20:35:41] logging.py:157 >> Upcasting trainable params to float32.
188
+
189
+ [INFO|2025-05-29 20:35:41] logging.py:157 >> Fine-tuning method: LoRA
190
+
191
+ [INFO|2025-05-29 20:35:41] logging.py:157 >> Found linear modules: gate_proj,v_proj,o_proj,k_proj,down_proj,up_proj,q_proj
192
+
193
+ [INFO|2025-05-29 20:35:41] logging.py:157 >> trainable params: 35,192,832 || all params: 529,225,600 || trainable%: 6.6499
194
+
195
+ [INFO|2025-05-29 20:35:41] trainer.py:698 >> Using auto half precision backend
196
+
197
+ [INFO|2025-05-29 20:35:43] trainer.py:2313 >> ***** Running training *****
198
+
199
+ [INFO|2025-05-29 20:35:43] trainer.py:2314 >> Num examples = 1,708,215
200
+
201
+ [INFO|2025-05-29 20:35:43] trainer.py:2315 >> Num Epochs = 1
202
+
203
+ [INFO|2025-05-29 20:35:43] trainer.py:2316 >> Instantaneous batch size per device = 8
204
+
205
+ [INFO|2025-05-29 20:35:43] trainer.py:2319 >> Total train batch size (w. parallel, distributed & accumulation) = 512
206
+
207
+ [INFO|2025-05-29 20:35:43] trainer.py:2320 >> Gradient Accumulation steps = 8
208
+
209
+ [INFO|2025-05-29 20:35:43] trainer.py:2321 >> Total optimization steps = 3,336
210
+
211
+ [INFO|2025-05-29 20:35:43] trainer.py:2322 >> Number of trainable parameters = 35,192,832
212
+
213
+ [INFO|2025-05-29 20:47:26] logging.py:157 >> {'loss': 0.5614, 'learning_rate': 3.8337e-06, 'epoch': 0.03}
214
+
215
+ [INFO|2025-05-29 20:59:03] logging.py:157 >> {'loss': 0.0905, 'learning_rate': 7.8692e-06, 'epoch': 0.06}
216
+
217
+ [INFO|2025-05-29 21:10:50] logging.py:157 >> {'loss': 0.0370, 'learning_rate': 1.1905e-05, 'epoch': 0.09}
218
+
219
+ [INFO|2025-05-29 21:22:41] logging.py:157 >> {'loss': 0.0295, 'learning_rate': 1.5940e-05, 'epoch': 0.12}
220
+
221
+ [INFO|2025-05-29 21:34:27] logging.py:157 >> {'loss': 0.0266, 'learning_rate': 1.9976e-05, 'epoch': 0.15}
222
+
223
+ [INFO|2025-05-29 21:46:06] logging.py:157 >> {'loss': 0.0224, 'learning_rate': 2.4011e-05, 'epoch': 0.18}
224
+
225
+ [INFO|2025-05-29 21:57:49] logging.py:157 >> {'loss': 0.0203, 'learning_rate': 2.8047e-05, 'epoch': 0.21}
226
+
227
+ [INFO|2025-05-29 22:09:37] logging.py:157 >> {'loss': 0.0187, 'learning_rate': 3.2082e-05, 'epoch': 0.24}
228
+
229
+ [INFO|2025-05-29 22:21:20] logging.py:157 >> {'loss': 0.0186, 'learning_rate': 3.6118e-05, 'epoch': 0.27}
230
+
231
+ [INFO|2025-05-29 22:32:56] logging.py:157 >> {'loss': 0.0180, 'learning_rate': 4.0153e-05, 'epoch': 0.30}
232
+
233
+ [INFO|2025-05-29 22:44:39] logging.py:157 >> {'loss': 0.0168, 'learning_rate': 4.4189e-05, 'epoch': 0.33}
234
+
235
+ [INFO|2025-05-29 22:56:14] logging.py:157 >> {'loss': 0.0173, 'learning_rate': 4.8224e-05, 'epoch': 0.36}
236
+
237
+ [INFO|2025-05-29 23:07:51] logging.py:157 >> {'loss': 0.0167, 'learning_rate': 4.9912e-05, 'epoch': 0.39}
238
+
239
+ [INFO|2025-05-29 23:19:38] logging.py:157 >> {'loss': 0.0168, 'learning_rate': 4.9320e-05, 'epoch': 0.42}
240
+
241
+ [INFO|2025-05-29 23:31:13] logging.py:157 >> {'loss': 0.0163, 'learning_rate': 4.8184e-05, 'epoch': 0.45}
242
+
243
+ [INFO|2025-05-29 23:42:47] logging.py:157 >> {'loss': 0.0152, 'learning_rate': 4.6528e-05, 'epoch': 0.48}
244
+
245
+ [INFO|2025-05-29 23:54:24] logging.py:157 >> {'loss': 0.0149, 'learning_rate': 4.4390e-05, 'epoch': 0.51}
246
+
247
+ [INFO|2025-05-30 00:06:14] logging.py:157 >> {'loss': 0.0147, 'learning_rate': 4.1817e-05, 'epoch': 0.54}
248
+
249
+ [INFO|2025-05-30 00:17:56] logging.py:157 >> {'loss': 0.0150, 'learning_rate': 3.8868e-05, 'epoch': 0.57}
250
+
251
+ [INFO|2025-05-30 00:29:40] logging.py:157 >> {'loss': 0.0148, 'learning_rate': 3.5608e-05, 'epoch': 0.60}
252
+
253
+ [INFO|2025-05-30 00:41:19] logging.py:157 >> {'loss': 0.0135, 'learning_rate': 3.2110e-05, 'epoch': 0.63}
254
+
255
+ [INFO|2025-05-30 00:52:53] logging.py:157 >> {'loss': 0.0140, 'learning_rate': 2.8453e-05, 'epoch': 0.66}
256
+
257
+ [INFO|2025-05-30 01:04:33] logging.py:157 >> {'loss': 0.0145, 'learning_rate': 2.4719e-05, 'epoch': 0.69}
258
+
259
+ [INFO|2025-05-30 01:16:20] logging.py:157 >> {'loss': 0.0139, 'learning_rate': 2.0991e-05, 'epoch': 0.72}
260
+
261
+ [INFO|2025-05-30 01:27:56] logging.py:157 >> {'loss': 0.0135, 'learning_rate': 1.7353e-05, 'epoch': 0.75}
262
+
263
+ [INFO|2025-05-30 01:39:51] logging.py:157 >> {'loss': 0.0132, 'learning_rate': 1.3886e-05, 'epoch': 0.78}
264
+
265
+ [INFO|2025-05-30 01:51:27] logging.py:157 >> {'loss': 0.0133, 'learning_rate': 1.0668e-05, 'epoch': 0.81}
266
+
267
+ [INFO|2025-05-30 02:03:01] logging.py:157 >> {'loss': 0.0127, 'learning_rate': 7.7714e-06, 'epoch': 0.84}
268
+
269
+ [INFO|2025-05-30 02:14:38] logging.py:157 >> {'loss': 0.0126, 'learning_rate': 5.2606e-06, 'epoch': 0.87}
270
+
271
+ [INFO|2025-05-30 02:26:21] logging.py:157 >> {'loss': 0.0123, 'learning_rate': 3.1919e-06, 'epoch': 0.90}
272
+
273
+ [INFO|2025-05-30 02:38:08] logging.py:157 >> {'loss': 0.0129, 'learning_rate': 1.6118e-06, 'epoch': 0.93}
274
+
275
+ [INFO|2025-05-30 02:49:49] logging.py:157 >> {'loss': 0.0126, 'learning_rate': 5.5569e-07, 'epoch': 0.96}
276
+
277
+ [INFO|2025-05-30 03:01:32] logging.py:157 >> {'loss': 0.0129, 'learning_rate': 4.7146e-08, 'epoch': 0.99}
278
+
279
+ [INFO|2025-05-30 03:05:43] trainer.py:3801 >> Saving model checkpoint to saves/Qwen2.5-0.5B-Instruct/lora/train_2025-05-29-20-20-04_2/checkpoint-3336
280
+
281
+ [INFO|2025-05-30 03:05:43] configuration_utils.py:679 >> loading configuration file config.json from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/config.json
282
+
283
+ [INFO|2025-05-30 03:05:43] configuration_utils.py:746 >> Model config Qwen2Config {
284
+ "architectures": [
285
+ "Qwen2ForCausalLM"
286
+ ],
287
+ "attention_dropout": 0.0,
288
+ "bos_token_id": 151643,
289
+ "eos_token_id": 151645,
290
+ "hidden_act": "silu",
291
+ "hidden_size": 896,
292
+ "initializer_range": 0.02,
293
+ "intermediate_size": 4864,
294
+ "max_position_embeddings": 32768,
295
+ "max_window_layers": 21,
296
+ "model_type": "qwen2",
297
+ "num_attention_heads": 14,
298
+ "num_hidden_layers": 24,
299
+ "num_key_value_heads": 2,
300
+ "rms_norm_eps": 1e-06,
301
+ "rope_scaling": null,
302
+ "rope_theta": 1000000.0,
303
+ "sliding_window": null,
304
+ "tie_word_embeddings": true,
305
+ "torch_dtype": "bfloat16",
306
+ "transformers_version": "4.46.1",
307
+ "use_cache": true,
308
+ "use_sliding_window": false,
309
+ "vocab_size": 151936
310
+ }
311
+
312
+
313
+ [INFO|2025-05-30 03:05:43] tokenization_utils_base.py:2646 >> tokenizer config file saved in saves/Qwen2.5-0.5B-Instruct/lora/train_2025-05-29-20-20-04_2/checkpoint-3336/tokenizer_config.json
314
+
315
+ [INFO|2025-05-30 03:05:43] tokenization_utils_base.py:2655 >> Special tokens file saved in saves/Qwen2.5-0.5B-Instruct/lora/train_2025-05-29-20-20-04_2/checkpoint-3336/special_tokens_map.json
316
+
317
+ [INFO|2025-05-30 03:05:44] trainer.py:2584 >>
318
+
319
+ Training completed. Do not forget to share your model on huggingface.co/models =)
320
+
321
+
322
+
323
+ [INFO|2025-05-30 03:05:44] trainer.py:3801 >> Saving model checkpoint to saves/Qwen2.5-0.5B-Instruct/lora/train_2025-05-29-20-20-04_2
324
+
325
+ [INFO|2025-05-30 03:05:44] configuration_utils.py:679 >> loading configuration file config.json from cache at /home/aiscuser/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/config.json
326
+
327
+ [INFO|2025-05-30 03:05:44] configuration_utils.py:746 >> Model config Qwen2Config {
328
+ "architectures": [
329
+ "Qwen2ForCausalLM"
330
+ ],
331
+ "attention_dropout": 0.0,
332
+ "bos_token_id": 151643,
333
+ "eos_token_id": 151645,
334
+ "hidden_act": "silu",
335
+ "hidden_size": 896,
336
+ "initializer_range": 0.02,
337
+ "intermediate_size": 4864,
338
+ "max_position_embeddings": 32768,
339
+ "max_window_layers": 21,
340
+ "model_type": "qwen2",
341
+ "num_attention_heads": 14,
342
+ "num_hidden_layers": 24,
343
+ "num_key_value_heads": 2,
344
+ "rms_norm_eps": 1e-06,
345
+ "rope_scaling": null,
346
+ "rope_theta": 1000000.0,
347
+ "sliding_window": null,
348
+ "tie_word_embeddings": true,
349
+ "torch_dtype": "bfloat16",
350
+ "transformers_version": "4.46.1",
351
+ "use_cache": true,
352
+ "use_sliding_window": false,
353
+ "vocab_size": 151936
354
+ }
355
+
356
+
357
+ [INFO|2025-05-30 03:05:44] tokenization_utils_base.py:2646 >> tokenizer config file saved in saves/Qwen2.5-0.5B-Instruct/lora/train_2025-05-29-20-20-04_2/tokenizer_config.json
358
+
359
+ [INFO|2025-05-30 03:05:44] tokenization_utils_base.py:2655 >> Special tokens file saved in saves/Qwen2.5-0.5B-Instruct/lora/train_2025-05-29-20-20-04_2/special_tokens_map.json
360
+
361
+ [WARNING|2025-05-30 03:05:45] logging.py:162 >> No metric eval_loss to plot.
362
+
363
+ [WARNING|2025-05-30 03:05:45] logging.py:162 >> No metric eval_accuracy to plot.
364
+
365
+ [INFO|2025-05-30 03:05:45] trainer.py:4117 >>
366
+ ***** Running Evaluation *****
367
+
368
+ [INFO|2025-05-30 03:05:45] trainer.py:4119 >> Num examples = 148541
369
+
370
+ [INFO|2025-05-30 03:05:45] trainer.py:4122 >> Batch size = 8
371
+
372
+ [INFO|2025-05-30 03:19:50] modelcard.py:449 >> Dropping the following result as it does not have all the necessary fields:
373
+ {'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}, 'metrics': [{'name': 'Accuracy', 'type': 'accuracy', 'value': 0.9947849346546027}]}
374
+
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9998876025626616,
3
+ "total_flos": 2.593644880773251e+18,
4
+ "train_loss": 0.03529202291290823,
5
+ "train_runtime": 23401.2469,
6
+ "train_samples_per_second": 72.997,
7
+ "train_steps_per_second": 0.143
8
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 100, "total_steps": 3336, "loss": 0.5614, "lr": 3.833736884584342e-06, "epoch": 0.029972649956914316, "percentage": 3.0, "elapsed_time": "0:11:43", "remaining_time": "6:19:30"}
2
+ {"current_steps": 200, "total_steps": 3336, "loss": 0.0905, "lr": 7.869249394673125e-06, "epoch": 0.05994529991382863, "percentage": 6.0, "elapsed_time": "0:23:20", "remaining_time": "6:05:55"}
3
+ {"current_steps": 300, "total_steps": 3336, "loss": 0.037, "lr": 1.1904761904761905e-05, "epoch": 0.08991794987074295, "percentage": 8.99, "elapsed_time": "0:35:07", "remaining_time": "5:55:31"}
4
+ {"current_steps": 400, "total_steps": 3336, "loss": 0.0295, "lr": 1.5940274414850686e-05, "epoch": 0.11989059982765726, "percentage": 11.99, "elapsed_time": "0:46:58", "remaining_time": "5:44:47"}
5
+ {"current_steps": 500, "total_steps": 3336, "loss": 0.0266, "lr": 1.9975786924939468e-05, "epoch": 0.14986324978457158, "percentage": 14.99, "elapsed_time": "0:58:44", "remaining_time": "5:33:11"}
6
+ {"current_steps": 600, "total_steps": 3336, "loss": 0.0224, "lr": 2.401129943502825e-05, "epoch": 0.1798358997414859, "percentage": 17.99, "elapsed_time": "1:10:23", "remaining_time": "5:20:59"}
7
+ {"current_steps": 700, "total_steps": 3336, "loss": 0.0203, "lr": 2.804681194511703e-05, "epoch": 0.2098085496984002, "percentage": 20.98, "elapsed_time": "1:22:06", "remaining_time": "5:09:10"}
8
+ {"current_steps": 800, "total_steps": 3336, "loss": 0.0187, "lr": 3.2082324455205814e-05, "epoch": 0.23978119965531453, "percentage": 23.98, "elapsed_time": "1:33:53", "remaining_time": "4:57:39"}
9
+ {"current_steps": 900, "total_steps": 3336, "loss": 0.0186, "lr": 3.6117836965294597e-05, "epoch": 0.26975384961222887, "percentage": 26.98, "elapsed_time": "1:45:37", "remaining_time": "4:45:52"}
10
+ {"current_steps": 1000, "total_steps": 3336, "loss": 0.018, "lr": 4.015334947538338e-05, "epoch": 0.29972649956914316, "percentage": 29.98, "elapsed_time": "1:57:13", "remaining_time": "4:33:51"}
11
+ {"current_steps": 1100, "total_steps": 3336, "loss": 0.0168, "lr": 4.4188861985472154e-05, "epoch": 0.3296991495260575, "percentage": 32.97, "elapsed_time": "2:08:56", "remaining_time": "4:22:06"}
12
+ {"current_steps": 1200, "total_steps": 3336, "loss": 0.0173, "lr": 4.8224374495560936e-05, "epoch": 0.3596717994829718, "percentage": 35.97, "elapsed_time": "2:20:31", "remaining_time": "4:10:07"}
13
+ {"current_steps": 1300, "total_steps": 3336, "loss": 0.0167, "lr": 4.99120705806192e-05, "epoch": 0.38964444943988613, "percentage": 38.97, "elapsed_time": "2:32:08", "remaining_time": "3:58:17"}
14
+ {"current_steps": 1400, "total_steps": 3336, "loss": 0.0168, "lr": 4.932035139761111e-05, "epoch": 0.4196170993968004, "percentage": 41.97, "elapsed_time": "2:43:55", "remaining_time": "3:46:41"}
15
+ {"current_steps": 1500, "total_steps": 3336, "loss": 0.0163, "lr": 4.8183803112054984e-05, "epoch": 0.44958974935371476, "percentage": 44.96, "elapsed_time": "2:55:30", "remaining_time": "3:34:49"}
16
+ {"current_steps": 1600, "total_steps": 3336, "loss": 0.0152, "lr": 4.652788689317074e-05, "epoch": 0.47956239931062905, "percentage": 47.96, "elapsed_time": "3:07:04", "remaining_time": "3:22:58"}
17
+ {"current_steps": 1700, "total_steps": 3336, "loss": 0.0149, "lr": 4.438969888865293e-05, "epoch": 0.5095350492675433, "percentage": 50.96, "elapsed_time": "3:18:41", "remaining_time": "3:11:12"}
18
+ {"current_steps": 1800, "total_steps": 3336, "loss": 0.0147, "lr": 4.1817139189768106e-05, "epoch": 0.5395076992244577, "percentage": 53.96, "elapsed_time": "3:30:31", "remaining_time": "2:59:38"}
19
+ {"current_steps": 1900, "total_steps": 3336, "loss": 0.015, "lr": 3.886783876446901e-05, "epoch": 0.569480349181372, "percentage": 56.95, "elapsed_time": "3:42:13", "remaining_time": "2:47:57"}
20
+ {"current_steps": 2000, "total_steps": 3336, "loss": 0.0148, "lr": 3.560786839757242e-05, "epoch": 0.5994529991382863, "percentage": 59.95, "elapsed_time": "3:53:56", "remaining_time": "2:36:16"}
21
+ {"current_steps": 2100, "total_steps": 3336, "loss": 0.0135, "lr": 3.211025856056666e-05, "epoch": 0.6294256490952006, "percentage": 62.95, "elapsed_time": "4:05:36", "remaining_time": "2:24:33"}
22
+ {"current_steps": 2200, "total_steps": 3336, "loss": 0.014, "lr": 2.8453363369204606e-05, "epoch": 0.659398299052115, "percentage": 65.95, "elapsed_time": "4:17:10", "remaining_time": "2:12:47"}
23
+ {"current_steps": 2300, "total_steps": 3336, "loss": 0.0145, "lr": 2.4719105279813023e-05, "epoch": 0.6893709490090293, "percentage": 68.94, "elapsed_time": "4:28:50", "remaining_time": "2:01:05"}
24
+ {"current_steps": 2400, "total_steps": 3336, "loss": 0.0139, "lr": 2.0991139846962775e-05, "epoch": 0.7193435989659436, "percentage": 71.94, "elapsed_time": "4:40:37", "remaining_time": "1:49:26"}
25
+ {"current_steps": 2500, "total_steps": 3336, "loss": 0.0135, "lr": 1.7352981655945167e-05, "epoch": 0.7493162489228579, "percentage": 74.94, "elapsed_time": "4:52:13", "remaining_time": "1:37:43"}
26
+ {"current_steps": 2600, "total_steps": 3336, "loss": 0.0132, "lr": 1.3886133413268552e-05, "epoch": 0.7792888988797723, "percentage": 77.94, "elapsed_time": "5:04:08", "remaining_time": "1:26:05"}
27
+ {"current_steps": 2700, "total_steps": 3336, "loss": 0.0133, "lr": 1.0668260107643305e-05, "epoch": 0.8092615488366866, "percentage": 80.94, "elapsed_time": "5:15:44", "remaining_time": "1:14:22"}
28
+ {"current_steps": 2800, "total_steps": 3336, "loss": 0.0127, "lr": 7.771449144245214e-06, "epoch": 0.8392341987936008, "percentage": 83.93, "elapsed_time": "5:27:17", "remaining_time": "1:02:39"}
29
+ {"current_steps": 2900, "total_steps": 3336, "loss": 0.0126, "lr": 5.260595429058021e-06, "epoch": 0.8692068487505151, "percentage": 86.93, "elapsed_time": "5:38:55", "remaining_time": "0:50:57"}
30
+ {"current_steps": 3000, "total_steps": 3336, "loss": 0.0123, "lr": 3.191947580940455e-06, "epoch": 0.8991794987074295, "percentage": 89.93, "elapsed_time": "5:50:38", "remaining_time": "0:39:16"}
31
+ {"current_steps": 3100, "total_steps": 3336, "loss": 0.0129, "lr": 1.61184783944949e-06, "epoch": 0.9291521486643438, "percentage": 92.93, "elapsed_time": "6:02:25", "remaining_time": "0:27:35"}
32
+ {"current_steps": 3200, "total_steps": 3336, "loss": 0.0126, "lr": 5.556938972425402e-07, "epoch": 0.9591247986212581, "percentage": 95.92, "elapsed_time": "6:14:06", "remaining_time": "0:15:53"}
33
+ {"current_steps": 3300, "total_steps": 3336, "loss": 0.0129, "lr": 4.714591428512938e-08, "epoch": 0.9890974485781724, "percentage": 98.92, "elapsed_time": "6:25:49", "remaining_time": "0:04:12"}
34
+ {"current_steps": 3336, "total_steps": 3336, "epoch": 0.9998876025626616, "percentage": 100.0, "elapsed_time": "6:30:01", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9998876025626616,
5
+ "eval_steps": 5000,
6
+ "global_step": 3336,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.029972649956914316,
13
+ "grad_norm": 4.414547443389893,
14
+ "learning_rate": 3.833736884584342e-06,
15
+ "loss": 0.5614,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.05994529991382863,
20
+ "grad_norm": 7.150412559509277,
21
+ "learning_rate": 7.869249394673125e-06,
22
+ "loss": 0.0905,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 0.08991794987074295,
27
+ "grad_norm": 7.323289394378662,
28
+ "learning_rate": 1.1904761904761905e-05,
29
+ "loss": 0.037,
30
+ "step": 300
31
+ },
32
+ {
33
+ "epoch": 0.11989059982765726,
34
+ "grad_norm": 2.585188627243042,
35
+ "learning_rate": 1.5940274414850686e-05,
36
+ "loss": 0.0295,
37
+ "step": 400
38
+ },
39
+ {
40
+ "epoch": 0.14986324978457158,
41
+ "grad_norm": 2.407606840133667,
42
+ "learning_rate": 1.9975786924939468e-05,
43
+ "loss": 0.0266,
44
+ "step": 500
45
+ },
46
+ {
47
+ "epoch": 0.1798358997414859,
48
+ "grad_norm": 2.0257647037506104,
49
+ "learning_rate": 2.401129943502825e-05,
50
+ "loss": 0.0224,
51
+ "step": 600
52
+ },
53
+ {
54
+ "epoch": 0.2098085496984002,
55
+ "grad_norm": 1.106999397277832,
56
+ "learning_rate": 2.804681194511703e-05,
57
+ "loss": 0.0203,
58
+ "step": 700
59
+ },
60
+ {
61
+ "epoch": 0.23978119965531453,
62
+ "grad_norm": 0.4099288582801819,
63
+ "learning_rate": 3.2082324455205814e-05,
64
+ "loss": 0.0187,
65
+ "step": 800
66
+ },
67
+ {
68
+ "epoch": 0.26975384961222887,
69
+ "grad_norm": 0.54881751537323,
70
+ "learning_rate": 3.6117836965294597e-05,
71
+ "loss": 0.0186,
72
+ "step": 900
73
+ },
74
+ {
75
+ "epoch": 0.29972649956914316,
76
+ "grad_norm": 0.1522761881351471,
77
+ "learning_rate": 4.015334947538338e-05,
78
+ "loss": 0.018,
79
+ "step": 1000
80
+ },
81
+ {
82
+ "epoch": 0.3296991495260575,
83
+ "grad_norm": 0.1916244626045227,
84
+ "learning_rate": 4.4188861985472154e-05,
85
+ "loss": 0.0168,
86
+ "step": 1100
87
+ },
88
+ {
89
+ "epoch": 0.3596717994829718,
90
+ "grad_norm": 0.36830204725265503,
91
+ "learning_rate": 4.8224374495560936e-05,
92
+ "loss": 0.0173,
93
+ "step": 1200
94
+ },
95
+ {
96
+ "epoch": 0.38964444943988613,
97
+ "grad_norm": 0.3123892843723297,
98
+ "learning_rate": 4.99120705806192e-05,
99
+ "loss": 0.0167,
100
+ "step": 1300
101
+ },
102
+ {
103
+ "epoch": 0.4196170993968004,
104
+ "grad_norm": 0.16122741997241974,
105
+ "learning_rate": 4.932035139761111e-05,
106
+ "loss": 0.0168,
107
+ "step": 1400
108
+ },
109
+ {
110
+ "epoch": 0.44958974935371476,
111
+ "grad_norm": 0.28243348002433777,
112
+ "learning_rate": 4.8183803112054984e-05,
113
+ "loss": 0.0163,
114
+ "step": 1500
115
+ },
116
+ {
117
+ "epoch": 0.47956239931062905,
118
+ "grad_norm": 0.16183069348335266,
119
+ "learning_rate": 4.652788689317074e-05,
120
+ "loss": 0.0152,
121
+ "step": 1600
122
+ },
123
+ {
124
+ "epoch": 0.5095350492675433,
125
+ "grad_norm": 0.1692025065422058,
126
+ "learning_rate": 4.438969888865293e-05,
127
+ "loss": 0.0149,
128
+ "step": 1700
129
+ },
130
+ {
131
+ "epoch": 0.5395076992244577,
132
+ "grad_norm": 0.14949053525924683,
133
+ "learning_rate": 4.1817139189768106e-05,
134
+ "loss": 0.0147,
135
+ "step": 1800
136
+ },
137
+ {
138
+ "epoch": 0.569480349181372,
139
+ "grad_norm": 0.39853450655937195,
140
+ "learning_rate": 3.886783876446901e-05,
141
+ "loss": 0.015,
142
+ "step": 1900
143
+ },
144
+ {
145
+ "epoch": 0.5994529991382863,
146
+ "grad_norm": 0.17293961346149445,
147
+ "learning_rate": 3.560786839757242e-05,
148
+ "loss": 0.0148,
149
+ "step": 2000
150
+ },
151
+ {
152
+ "epoch": 0.6294256490952006,
153
+ "grad_norm": 0.14277803897857666,
154
+ "learning_rate": 3.211025856056666e-05,
155
+ "loss": 0.0135,
156
+ "step": 2100
157
+ },
158
+ {
159
+ "epoch": 0.659398299052115,
160
+ "grad_norm": 0.2294313907623291,
161
+ "learning_rate": 2.8453363369204606e-05,
162
+ "loss": 0.014,
163
+ "step": 2200
164
+ },
165
+ {
166
+ "epoch": 0.6893709490090293,
167
+ "grad_norm": 0.12665383517742157,
168
+ "learning_rate": 2.4719105279813023e-05,
169
+ "loss": 0.0145,
170
+ "step": 2300
171
+ },
172
+ {
173
+ "epoch": 0.7193435989659436,
174
+ "grad_norm": 0.17277060449123383,
175
+ "learning_rate": 2.0991139846962775e-05,
176
+ "loss": 0.0139,
177
+ "step": 2400
178
+ },
179
+ {
180
+ "epoch": 0.7493162489228579,
181
+ "grad_norm": 0.1914021372795105,
182
+ "learning_rate": 1.7352981655945167e-05,
183
+ "loss": 0.0135,
184
+ "step": 2500
185
+ },
186
+ {
187
+ "epoch": 0.7792888988797723,
188
+ "grad_norm": 0.18266354501247406,
189
+ "learning_rate": 1.3886133413268552e-05,
190
+ "loss": 0.0132,
191
+ "step": 2600
192
+ },
193
+ {
194
+ "epoch": 0.8092615488366866,
195
+ "grad_norm": 0.1917112022638321,
196
+ "learning_rate": 1.0668260107643305e-05,
197
+ "loss": 0.0133,
198
+ "step": 2700
199
+ },
200
+ {
201
+ "epoch": 0.8392341987936008,
202
+ "grad_norm": 0.13778215646743774,
203
+ "learning_rate": 7.771449144245214e-06,
204
+ "loss": 0.0127,
205
+ "step": 2800
206
+ },
207
+ {
208
+ "epoch": 0.8692068487505151,
209
+ "grad_norm": 0.2762675881385803,
210
+ "learning_rate": 5.260595429058021e-06,
211
+ "loss": 0.0126,
212
+ "step": 2900
213
+ },
214
+ {
215
+ "epoch": 0.8991794987074295,
216
+ "grad_norm": 0.1501600295305252,
217
+ "learning_rate": 3.191947580940455e-06,
218
+ "loss": 0.0123,
219
+ "step": 3000
220
+ },
221
+ {
222
+ "epoch": 0.9291521486643438,
223
+ "grad_norm": 0.21935300529003143,
224
+ "learning_rate": 1.61184783944949e-06,
225
+ "loss": 0.0129,
226
+ "step": 3100
227
+ },
228
+ {
229
+ "epoch": 0.9591247986212581,
230
+ "grad_norm": 0.18737538158893585,
231
+ "learning_rate": 5.556938972425402e-07,
232
+ "loss": 0.0126,
233
+ "step": 3200
234
+ },
235
+ {
236
+ "epoch": 0.9890974485781724,
237
+ "grad_norm": 0.17027494311332703,
238
+ "learning_rate": 4.714591428512938e-08,
239
+ "loss": 0.0129,
240
+ "step": 3300
241
+ },
242
+ {
243
+ "epoch": 0.9998876025626616,
244
+ "step": 3336,
245
+ "total_flos": 2.593644880773251e+18,
246
+ "train_loss": 0.03529202291290823,
247
+ "train_runtime": 23401.2469,
248
+ "train_samples_per_second": 72.997,
249
+ "train_steps_per_second": 0.143
250
+ }
251
+ ],
252
+ "logging_steps": 100,
253
+ "max_steps": 3336,
254
+ "num_input_tokens_seen": 0,
255
+ "num_train_epochs": 1,
256
+ "save_steps": 5000,
257
+ "stateful_callbacks": {
258
+ "TrainerControl": {
259
+ "args": {
260
+ "should_epoch_stop": false,
261
+ "should_evaluate": false,
262
+ "should_log": false,
263
+ "should_save": true,
264
+ "should_training_stop": true
265
+ },
266
+ "attributes": {}
267
+ }
268
+ },
269
+ "total_flos": 2.593644880773251e+18,
270
+ "train_batch_size": 8,
271
+ "trial_name": null,
272
+ "trial_params": null
273
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d148aa73f06a1bf63006c2a81d020364084537c8071629cfa138c444c978304f
3
+ size 5432
training_args.yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ compute_accuracy: 'true'
2
+ cutoff_len: 1024
3
+ dataset: JB_Detect_v2
4
+ dataset_dir: data
5
+ ddp_timeout: 180000000
6
+ do_sample: 'false'
7
+ do_train: true
8
+ eval_steps: 5000
9
+ eval_strategy: steps
10
+ finetuning_type: lora
11
+ flash_attn: auto
12
+ fp16: true
13
+ gradient_accumulation_steps: 8
14
+ learning_rate: 5.0e-05
15
+ logging_steps: 100
16
+ lora_alpha: 128
17
+ lora_dropout: 0.05
18
+ lora_rank: 64
19
+ lora_target: all
20
+ lr_scheduler_type: cosine
21
+ max_grad_norm: 1.0
22
+ max_new_tokens: 2
23
+ max_samples: 10000000000
24
+ model_name_or_path: Qwen/Qwen2.5-0.5B-Instruct
25
+ num_train_epochs: 1.0
26
+ optim: adamw_torch
27
+ output_dir: saves/Qwen2.5-0.5B-Instruct/lora/train_2025-05-29-20-20-04_2
28
+ packing: false
29
+ per_device_eval_batch_size: 8
30
+ per_device_train_batch_size: 8
31
+ plot_loss: true
32
+ preprocessing_num_workers: 16
33
+ report_to: none
34
+ save_steps: 5000
35
+ stage: sft
36
+ template: qwen
37
+ val_size: 0.08
38
+ warmup_steps: 1239
training_loss.png ADDED