Sauron0019 commited on
Commit
869aea1
·
verified ·
1 Parent(s): e5e6e22

Upload 20 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,60 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: other
4
+ base_model: google/gemma-3-12b-it
5
+ tags:
6
+ - llama-factory
7
+ - lora
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: train_2025-06-07-17-55-57
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # train_2025-06-07-17-55-57
18
+
19
+ This model is a fine-tuned version of [google/gemma-3-12b-it](https://huggingface.co/google/gemma-3-12b-it) on the top_10_training_dataset and the top_10_validation_dataset datasets.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 2e-05
39
+ - train_batch_size: 5
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - gradient_accumulation_steps: 5
43
+ - total_train_batch_size: 25
44
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
45
+ - lr_scheduler_type: cosine_with_restarts
46
+ - lr_scheduler_warmup_steps: 150
47
+ - num_epochs: 4.0
48
+ - label_smoothing_factor: 0.05
49
+
50
+ ### Training results
51
+
52
+
53
+
54
+ ### Framework versions
55
+
56
+ - PEFT 0.15.1
57
+ - Transformers 4.51.3
58
+ - Pytorch 2.6.0+cu124
59
+ - Datasets 3.5.0
60
+ - Tokenizers 0.21.1
adapter_config.json ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "google/gemma-3-12b-it",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 64,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 32,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "model.layers.16.self_attn.k_proj",
28
+ "model.layers.14.self_attn.q_proj",
29
+ "model.layers.22.self_attn.v_proj",
30
+ "38.self_attn.v_proj",
31
+ "40.self_attn.v_proj",
32
+ "model.layers.24.self_attn.v_proj",
33
+ "37.self_attn.v_proj",
34
+ "model.layers.26.self_attn.v_proj",
35
+ "34.self_attn.k_proj",
36
+ "45.self_attn.v_proj",
37
+ "29.self_attn.v_proj",
38
+ "model.layers.4.self_attn.q_proj",
39
+ "model.layers.3.self_attn.k_proj",
40
+ "34.self_attn.v_proj",
41
+ "model.layers.15.self_attn.q_proj",
42
+ "model.layers.5.self_attn.v_proj",
43
+ "model.layers.19.self_attn.v_proj",
44
+ "45.self_attn.q_proj",
45
+ "model.layers.20.self_attn.k_proj",
46
+ "model.layers.5.self_attn.k_proj",
47
+ "model.layers.17.self_attn.k_proj",
48
+ "model.layers.26.self_attn.k_proj",
49
+ "model.layers.8.self_attn.v_proj",
50
+ "model.layers.7.self_attn.v_proj",
51
+ "model.layers.14.self_attn.k_proj",
52
+ "44.self_attn.k_proj",
53
+ "model.layers.18.self_attn.v_proj",
54
+ "31.self_attn.k_proj",
55
+ "44.self_attn.q_proj",
56
+ "32.self_attn.v_proj",
57
+ "27.self_attn.q_proj",
58
+ "47.self_attn.k_proj",
59
+ "model.layers.13.self_attn.v_proj",
60
+ "model.layers.17.self_attn.v_proj",
61
+ "model.layers.18.self_attn.q_proj",
62
+ "gate_proj",
63
+ "model.layers.15.self_attn.v_proj",
64
+ "model.layers.10.self_attn.k_proj",
65
+ "model.layers.19.self_attn.k_proj",
66
+ "up_proj",
67
+ "model.layers.12.self_attn.v_proj",
68
+ "47.self_attn.q_proj",
69
+ "model.layers.8.self_attn.q_proj",
70
+ "model.layers.23.self_attn.v_proj",
71
+ "model.layers.3.self_attn.v_proj",
72
+ "28.self_attn.v_proj",
73
+ "29.self_attn.k_proj",
74
+ "45.self_attn.k_proj",
75
+ "model.layers.25.self_attn.q_proj",
76
+ "32.self_attn.k_proj",
77
+ "42.self_attn.v_proj",
78
+ "model.layers.6.self_attn.q_proj",
79
+ "30.self_attn.k_proj",
80
+ "model.layers.14.self_attn.v_proj",
81
+ "model.layers.23.self_attn.q_proj",
82
+ "model.layers.1.self_attn.k_proj",
83
+ "model.layers.16.self_attn.v_proj",
84
+ "model.layers.0.self_attn.v_proj",
85
+ "43.self_attn.k_proj",
86
+ "model.layers.20.self_attn.q_proj",
87
+ "33.self_attn.q_proj",
88
+ "36.self_attn.q_proj",
89
+ "47.self_attn.v_proj",
90
+ "model.layers.23.self_attn.k_proj",
91
+ "model.layers.24.self_attn.q_proj",
92
+ "31.self_attn.q_proj",
93
+ "43.self_attn.q_proj",
94
+ "41.self_attn.v_proj",
95
+ "46.self_attn.q_proj",
96
+ "35.self_attn.v_proj",
97
+ "model.layers.12.self_attn.q_proj",
98
+ "44.self_attn.v_proj",
99
+ "29.self_attn.q_proj",
100
+ "model.layers.11.self_attn.k_proj",
101
+ "37.self_attn.k_proj",
102
+ "model.layers.3.self_attn.q_proj",
103
+ "model.layers.13.self_attn.q_proj",
104
+ "model.layers.7.self_attn.k_proj",
105
+ "38.self_attn.q_proj",
106
+ "model.layers.5.self_attn.q_proj",
107
+ "o_proj",
108
+ "model.layers.20.self_attn.v_proj",
109
+ "model.layers.16.self_attn.q_proj",
110
+ "model.layers.22.self_attn.q_proj",
111
+ "model.layers.12.self_attn.k_proj",
112
+ "down_proj",
113
+ "model.layers.13.self_attn.k_proj",
114
+ "41.self_attn.k_proj",
115
+ "model.layers.9.self_attn.v_proj",
116
+ "model.layers.2.self_attn.k_proj",
117
+ "31.self_attn.v_proj",
118
+ "model.layers.25.self_attn.v_proj",
119
+ "model.layers.22.self_attn.k_proj",
120
+ "34.self_attn.q_proj",
121
+ "28.self_attn.k_proj",
122
+ "model.layers.0.self_attn.q_proj",
123
+ "27.self_attn.v_proj",
124
+ "model.layers.21.self_attn.k_proj",
125
+ "model.layers.10.self_attn.v_proj",
126
+ "35.self_attn.k_proj",
127
+ "model.layers.15.self_attn.k_proj",
128
+ "model.layers.21.self_attn.v_proj",
129
+ "model.layers.1.self_attn.q_proj",
130
+ "model.layers.4.self_attn.v_proj",
131
+ "model.layers.2.self_attn.q_proj",
132
+ "model.layers.26.self_attn.q_proj",
133
+ "model.layers.9.self_attn.k_proj",
134
+ "model.layers.9.self_attn.q_proj",
135
+ "model.layers.25.self_attn.k_proj",
136
+ "41.self_attn.q_proj",
137
+ "30.self_attn.v_proj",
138
+ "33.self_attn.v_proj",
139
+ "model.layers.11.self_attn.q_proj",
140
+ "32.self_attn.q_proj",
141
+ "model.layers.18.self_attn.k_proj",
142
+ "46.self_attn.v_proj",
143
+ "40.self_attn.k_proj",
144
+ "model.layers.8.self_attn.k_proj",
145
+ "model.layers.7.self_attn.q_proj",
146
+ "model.layers.4.self_attn.k_proj",
147
+ "39.self_attn.k_proj",
148
+ "36.self_attn.k_proj",
149
+ "model.layers.0.self_attn.k_proj",
150
+ "36.self_attn.v_proj",
151
+ "43.self_attn.v_proj",
152
+ "model.layers.11.self_attn.v_proj",
153
+ "model.layers.1.self_attn.v_proj",
154
+ "42.self_attn.k_proj",
155
+ "35.self_attn.q_proj",
156
+ "46.self_attn.k_proj",
157
+ "model.layers.2.self_attn.v_proj",
158
+ "model.layers.6.self_attn.k_proj",
159
+ "model.layers.10.self_attn.q_proj",
160
+ "model.layers.6.self_attn.v_proj",
161
+ "39.self_attn.v_proj",
162
+ "38.self_attn.k_proj",
163
+ "model.layers.24.self_attn.k_proj",
164
+ "model.layers.21.self_attn.q_proj",
165
+ "model.layers.17.self_attn.q_proj",
166
+ "27.self_attn.k_proj",
167
+ "model.layers.19.self_attn.q_proj",
168
+ "28.self_attn.q_proj",
169
+ "33.self_attn.k_proj",
170
+ "39.self_attn.q_proj",
171
+ "40.self_attn.q_proj",
172
+ "37.self_attn.q_proj",
173
+ "42.self_attn.q_proj",
174
+ "30.self_attn.q_proj"
175
+ ],
176
+ "task_type": "CAUSAL_LM",
177
+ "trainable_token_indices": null,
178
+ "use_dora": false,
179
+ "use_rslora": false
180
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75492779c6ec99442914be3d7f5e34fc681e7784438378946c127087605660be
3
+ size 523864568
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.99728014505893,
3
+ "num_input_tokens_seen": 11278520,
4
+ "total_flos": 7.651349314204147e+17,
5
+ "train_loss": 1.351207665421746,
6
+ "train_runtime": 8552.2085,
7
+ "train_samples_per_second": 2.578,
8
+ "train_steps_per_second": 0.103
9
+ }
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n"
3
+ }
llamaboard_config.yaml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ top.booster: auto
2
+ top.checkpoint_path: []
3
+ top.finetuning_type: lora
4
+ top.model_name: Gemma-3-12B-Instruct
5
+ top.quantization_bit: none
6
+ top.quantization_method: bnb
7
+ top.rope_scaling: none
8
+ top.template: alpaca
9
+ train.additional_target: ''
10
+ train.apollo_rank: 16
11
+ train.apollo_scale: 32
12
+ train.apollo_target: all
13
+ train.apollo_update_interval: 200
14
+ train.badam_mode: layer
15
+ train.badam_switch_interval: 50
16
+ train.badam_switch_mode: ascending
17
+ train.badam_update_ratio: 0.05
18
+ train.batch_size: 5
19
+ train.compute_type: bf16
20
+ train.create_new_adapter: true
21
+ train.cutoff_len: 512
22
+ train.dataset:
23
+ - top_10_training_dataset
24
+ - top_10_validation_dataset
25
+ train.dataset_dir: data
26
+ train.ds_offload: false
27
+ train.ds_stage: none
28
+ train.extra_args: '{"optim":"adamw_torch","weight_decay":0.01,"label_smoothing_factor":0.05}'
29
+ train.freeze_extra_modules: ''
30
+ train.freeze_trainable_layers: 4
31
+ train.freeze_trainable_modules: all
32
+ train.galore_rank: 16
33
+ train.galore_scale: 2
34
+ train.galore_target: all
35
+ train.galore_update_interval: 200
36
+ train.gradient_accumulation_steps: 5
37
+ train.learning_rate: 2e-5
38
+ train.logging_steps: 25
39
+ train.lora_alpha: 64
40
+ train.lora_dropout: 0.05
41
+ train.lora_rank: 32
42
+ train.lora_target: q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj
43
+ train.loraplus_lr_ratio: 8
44
+ train.lr_scheduler_type: cosine_with_restarts
45
+ train.mask_history: false
46
+ train.max_grad_norm: '0.3'
47
+ train.max_samples: '100000'
48
+ train.neat_packing: false
49
+ train.neftune_alpha: 0
50
+ train.num_train_epochs: '4'
51
+ train.packing: false
52
+ train.ppo_score_norm: false
53
+ train.ppo_whiten_rewards: false
54
+ train.pref_beta: 0.1
55
+ train.pref_ftx: 0
56
+ train.pref_loss: sigmoid
57
+ train.report_to:
58
+ - none
59
+ train.resize_vocab: false
60
+ train.reward_model: []
61
+ train.save_steps: 250
62
+ train.swanlab_api_key: ''
63
+ train.swanlab_link: ''
64
+ train.swanlab_mode: cloud
65
+ train.swanlab_project: llamafactory
66
+ train.swanlab_run_name: ''
67
+ train.swanlab_workspace: ''
68
+ train.train_on_prompt: false
69
+ train.training_stage: Supervised Fine-Tuning
70
+ train.use_apollo: false
71
+ train.use_badam: false
72
+ train.use_dora: false
73
+ train.use_galore: false
74
+ train.use_llama_pro: false
75
+ train.use_pissa: false
76
+ train.use_rslora: false
77
+ train.use_swanlab: false
78
+ train.val_size: 0
79
+ train.warmup_steps: 150
preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_pan_and_scan": null,
5
+ "do_rescale": true,
6
+ "do_resize": true,
7
+ "image_mean": [
8
+ 0.5,
9
+ 0.5,
10
+ 0.5
11
+ ],
12
+ "image_processor_type": "Gemma3ImageProcessor",
13
+ "image_seq_length": 256,
14
+ "image_std": [
15
+ 0.5,
16
+ 0.5,
17
+ 0.5
18
+ ],
19
+ "pan_and_scan_max_num_crops": null,
20
+ "pan_and_scan_min_crop_size": null,
21
+ "pan_and_scan_min_ratio_to_activate": null,
22
+ "processor_class": "Gemma3Processor",
23
+ "resample": 2,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "height": 896,
27
+ "width": 896
28
+ }
29
+ }
processor_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "image_seq_length": 256,
3
+ "processor_class": "Gemma3Processor"
4
+ }
running_log.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<eos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.99728014505893,
3
+ "num_input_tokens_seen": 11278520,
4
+ "total_flos": 7.651349314204147e+17,
5
+ "train_loss": 1.351207665421746,
6
+ "train_runtime": 8552.2085,
7
+ "train_samples_per_second": 2.578,
8
+ "train_steps_per_second": 0.103
9
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 25, "total_steps": 880, "loss": 6.768, "lr": 3.2000000000000003e-06, "epoch": 0.11332728921124206, "percentage": 2.84, "elapsed_time": "0:04:01", "remaining_time": "2:17:49", "throughput": 1323.46, "total_tokens": 320000}
2
+ {"current_steps": 50, "total_steps": 880, "loss": 2.2714, "lr": 6.533333333333334e-06, "epoch": 0.22665457842248413, "percentage": 5.68, "elapsed_time": "0:08:03", "remaining_time": "2:13:39", "throughput": 1324.71, "total_tokens": 640000}
3
+ {"current_steps": 75, "total_steps": 880, "loss": 1.6663, "lr": 9.866666666666668e-06, "epoch": 0.3399818676337262, "percentage": 8.52, "elapsed_time": "0:12:04", "remaining_time": "2:09:36", "throughput": 1324.97, "total_tokens": 960000}
4
+ {"current_steps": 100, "total_steps": 880, "loss": 1.5351, "lr": 1.3200000000000002e-05, "epoch": 0.45330915684496825, "percentage": 11.36, "elapsed_time": "0:16:05", "remaining_time": "2:05:33", "throughput": 1325.22, "total_tokens": 1280000}
5
+ {"current_steps": 125, "total_steps": 880, "loss": 1.4698, "lr": 1.6533333333333333e-05, "epoch": 0.5666364460562103, "percentage": 14.2, "elapsed_time": "0:20:07", "remaining_time": "2:01:31", "throughput": 1325.37, "total_tokens": 1600000}
6
+ {"current_steps": 150, "total_steps": 880, "loss": 1.4164, "lr": 1.9866666666666667e-05, "epoch": 0.6799637352674524, "percentage": 17.05, "elapsed_time": "0:24:08", "remaining_time": "1:57:29", "throughput": 1325.44, "total_tokens": 1920000}
7
+ {"current_steps": 175, "total_steps": 880, "loss": 1.3952, "lr": 1.994670819911521e-05, "epoch": 0.7932910244786945, "percentage": 19.89, "elapsed_time": "0:28:09", "remaining_time": "1:53:27", "throughput": 1325.53, "total_tokens": 2239840}
8
+ {"current_steps": 200, "total_steps": 880, "loss": 1.3792, "lr": 1.977848341505657e-05, "epoch": 0.9066183136899365, "percentage": 22.73, "elapsed_time": "0:32:11", "remaining_time": "1:49:25", "throughput": 1325.63, "total_tokens": 2559800}
9
+ {"current_steps": 225, "total_steps": 880, "loss": 1.3537, "lr": 1.949717842791432e-05, "epoch": 1.0226654578422485, "percentage": 25.57, "elapsed_time": "0:36:16", "remaining_time": "1:45:34", "throughput": 1325.71, "total_tokens": 2884920}
10
+ {"current_steps": 250, "total_steps": 880, "loss": 1.2956, "lr": 1.9106046300942165e-05, "epoch": 1.1359927470534905, "percentage": 28.41, "elapsed_time": "0:40:17", "remaining_time": "1:41:31", "throughput": 1325.77, "total_tokens": 3204920}
11
+ {"current_steps": 275, "total_steps": 880, "loss": 1.2642, "lr": 1.8609610158889943e-05, "epoch": 1.2493200362647325, "percentage": 31.25, "elapsed_time": "0:44:31", "remaining_time": "1:37:57", "throughput": 1319.47, "total_tokens": 3524920}
12
+ {"current_steps": 300, "total_steps": 880, "loss": 1.2527, "lr": 1.8013610881746767e-05, "epoch": 1.3626473254759746, "percentage": 34.09, "elapsed_time": "0:48:32", "remaining_time": "1:33:51", "throughput": 1320.02, "total_tokens": 3844920}
13
+ {"current_steps": 325, "total_steps": 880, "loss": 1.2602, "lr": 1.732494071613579e-05, "epoch": 1.4759746146872166, "percentage": 36.93, "elapsed_time": "0:52:34", "remaining_time": "1:29:46", "throughput": 1320.49, "total_tokens": 4164920}
14
+ {"current_steps": 350, "total_steps": 880, "loss": 1.2551, "lr": 1.6551563572090855e-05, "epoch": 1.5893019038984586, "percentage": 39.77, "elapsed_time": "0:56:35", "remaining_time": "1:25:41", "throughput": 1320.88, "total_tokens": 4484840}
15
+ {"current_steps": 375, "total_steps": 880, "loss": 1.2325, "lr": 1.5702422926917872e-05, "epoch": 1.7026291931097008, "percentage": 42.61, "elapsed_time": "1:00:36", "remaining_time": "1:21:37", "throughput": 1321.22, "total_tokens": 4804840}
16
+ {"current_steps": 400, "total_steps": 880, "loss": 1.2436, "lr": 1.4787338401157888e-05, "epoch": 1.8159564823209429, "percentage": 45.45, "elapsed_time": "1:04:37", "remaining_time": "1:17:33", "throughput": 1321.53, "total_tokens": 5124840}
17
+ {"current_steps": 425, "total_steps": 880, "loss": 1.2319, "lr": 1.3816892202666591e-05, "epoch": 1.929283771532185, "percentage": 48.3, "elapsed_time": "1:08:39", "remaining_time": "1:13:30", "throughput": 1321.8, "total_tokens": 5444840}
18
+ {"current_steps": 450, "total_steps": 880, "loss": 1.2132, "lr": 1.2802306751992163e-05, "epoch": 2.045330915684497, "percentage": 51.14, "elapsed_time": "1:12:44", "remaining_time": "1:09:30", "throughput": 1322.03, "total_tokens": 5769800}
19
+ {"current_steps": 475, "total_steps": 880, "loss": 1.0794, "lr": 1.1755314904214284e-05, "epoch": 2.158658204895739, "percentage": 53.98, "elapsed_time": "1:16:45", "remaining_time": "1:05:26", "throughput": 1322.25, "total_tokens": 6089800}
20
+ {"current_steps": 500, "total_steps": 880, "loss": 1.0779, "lr": 1.06880242680232e-05, "epoch": 2.271985494106981, "percentage": 56.82, "elapsed_time": "1:20:46", "remaining_time": "1:01:23", "throughput": 1322.44, "total_tokens": 6409680}
21
+ {"current_steps": 525, "total_steps": 880, "loss": 1.0722, "lr": 9.612777191078257e-06, "epoch": 2.385312783318223, "percentage": 59.66, "elapsed_time": "1:24:58", "remaining_time": "0:57:27", "throughput": 1320.03, "total_tokens": 6729680}
22
+ {"current_steps": 550, "total_steps": 880, "loss": 1.0663, "lr": 8.542008030801254e-06, "epoch": 2.498640072529465, "percentage": 62.5, "elapsed_time": "1:28:59", "remaining_time": "0:53:23", "throughput": 1320.33, "total_tokens": 7049680}
23
+ {"current_steps": 575, "total_steps": 880, "loss": 1.056, "lr": 7.4880993611518095e-06, "epoch": 2.611967361740707, "percentage": 65.34, "elapsed_time": "1:33:00", "remaining_time": "0:49:20", "throughput": 1320.6, "total_tokens": 7369680}
24
+ {"current_steps": 600, "total_steps": 880, "loss": 1.0578, "lr": 6.463238778236287e-06, "epoch": 2.725294650951949, "percentage": 68.18, "elapsed_time": "1:37:02", "remaining_time": "0:45:17", "throughput": 1320.75, "total_tokens": 7689680}
25
+ {"current_steps": 625, "total_steps": 880, "loss": 1.0531, "lr": 5.479277960676959e-06, "epoch": 2.838621940163191, "percentage": 71.02, "elapsed_time": "1:41:03", "remaining_time": "0:41:13", "throughput": 1320.96, "total_tokens": 8009600}
26
+ {"current_steps": 650, "total_steps": 880, "loss": 1.0523, "lr": 4.547595614593489e-06, "epoch": 2.951949229374433, "percentage": 73.86, "elapsed_time": "1:45:04", "remaining_time": "0:37:10", "throughput": 1321.16, "total_tokens": 8329600}
27
+ {"current_steps": 675, "total_steps": 880, "loss": 1.0127, "lr": 3.6789658881265135e-06, "epoch": 3.067996373526745, "percentage": 76.7, "elapsed_time": "1:49:09", "remaining_time": "0:33:09", "throughput": 1321.35, "total_tokens": 8654720}
28
+ {"current_steps": 700, "total_steps": 880, "loss": 0.9245, "lr": 2.883433777182255e-06, "epoch": 3.1813236627379875, "percentage": 79.55, "elapsed_time": "1:53:11", "remaining_time": "0:29:06", "throughput": 1321.51, "total_tokens": 8974520}
29
+ {"current_steps": 725, "total_steps": 880, "loss": 0.918, "lr": 2.170198963229372e-06, "epoch": 3.2946509519492295, "percentage": 82.39, "elapsed_time": "1:57:12", "remaining_time": "0:25:03", "throughput": 1321.69, "total_tokens": 9294520}
30
+ {"current_steps": 750, "total_steps": 880, "loss": 0.9192, "lr": 1.547509426469368e-06, "epoch": 3.4079782411604715, "percentage": 85.23, "elapsed_time": "2:01:13", "remaining_time": "0:21:00", "throughput": 1321.84, "total_tokens": 9614520}
31
+ {"current_steps": 775, "total_steps": 880, "loss": 0.9177, "lr": 1.022566064657663e-06, "epoch": 3.5213055303717136, "percentage": 88.07, "elapsed_time": "2:05:26", "remaining_time": "0:16:59", "throughput": 1319.93, "total_tokens": 9934520}
32
+ {"current_steps": 800, "total_steps": 880, "loss": 0.9152, "lr": 6.01439420581047e-07, "epoch": 3.6346328195829556, "percentage": 90.91, "elapsed_time": "2:09:27", "remaining_time": "0:12:56", "throughput": 1320.12, "total_tokens": 10254520}
33
+ {"current_steps": 825, "total_steps": 880, "loss": 0.917, "lr": 2.889994811704966e-07, "epoch": 3.7479601087941976, "percentage": 93.75, "elapsed_time": "2:13:29", "remaining_time": "0:08:53", "throughput": 1320.3, "total_tokens": 10574520}
34
+ {"current_steps": 850, "total_steps": 880, "loss": 0.912, "lr": 8.885936006545304e-08, "epoch": 3.8612873980054396, "percentage": 96.59, "elapsed_time": "2:17:30", "remaining_time": "0:04:51", "throughput": 1320.48, "total_tokens": 10894520}
35
+ {"current_steps": 875, "total_steps": 880, "loss": 0.9239, "lr": 3.333514894887646e-09, "epoch": 3.9746146872166817, "percentage": 99.43, "elapsed_time": "2:21:31", "remaining_time": "0:00:48", "throughput": 1320.65, "total_tokens": 11214520}
36
+ {"current_steps": 880, "total_steps": 880, "epoch": 3.99728014505893, "percentage": 100.0, "elapsed_time": "2:22:32", "remaining_time": "0:00:00", "throughput": 1318.79, "total_tokens": 11278520}
trainer_state.json ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 3.99728014505893,
6
+ "eval_steps": 500,
7
+ "global_step": 880,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.11332728921124206,
14
+ "grad_norm": 12.256142616271973,
15
+ "learning_rate": 3.2000000000000003e-06,
16
+ "loss": 6.768,
17
+ "num_input_tokens_seen": 320000,
18
+ "step": 25
19
+ },
20
+ {
21
+ "epoch": 0.22665457842248413,
22
+ "grad_norm": 14.015169143676758,
23
+ "learning_rate": 6.533333333333334e-06,
24
+ "loss": 2.2714,
25
+ "num_input_tokens_seen": 640000,
26
+ "step": 50
27
+ },
28
+ {
29
+ "epoch": 0.3399818676337262,
30
+ "grad_norm": 2.729668140411377,
31
+ "learning_rate": 9.866666666666668e-06,
32
+ "loss": 1.6663,
33
+ "num_input_tokens_seen": 960000,
34
+ "step": 75
35
+ },
36
+ {
37
+ "epoch": 0.45330915684496825,
38
+ "grad_norm": 2.5849289894104004,
39
+ "learning_rate": 1.3200000000000002e-05,
40
+ "loss": 1.5351,
41
+ "num_input_tokens_seen": 1280000,
42
+ "step": 100
43
+ },
44
+ {
45
+ "epoch": 0.5666364460562103,
46
+ "grad_norm": 2.5873868465423584,
47
+ "learning_rate": 1.6533333333333333e-05,
48
+ "loss": 1.4698,
49
+ "num_input_tokens_seen": 1600000,
50
+ "step": 125
51
+ },
52
+ {
53
+ "epoch": 0.6799637352674524,
54
+ "grad_norm": 1.8172498941421509,
55
+ "learning_rate": 1.9866666666666667e-05,
56
+ "loss": 1.4164,
57
+ "num_input_tokens_seen": 1920000,
58
+ "step": 150
59
+ },
60
+ {
61
+ "epoch": 0.7932910244786945,
62
+ "grad_norm": 3.378293037414551,
63
+ "learning_rate": 1.994670819911521e-05,
64
+ "loss": 1.3952,
65
+ "num_input_tokens_seen": 2239840,
66
+ "step": 175
67
+ },
68
+ {
69
+ "epoch": 0.9066183136899365,
70
+ "grad_norm": 1.4730700254440308,
71
+ "learning_rate": 1.977848341505657e-05,
72
+ "loss": 1.3792,
73
+ "num_input_tokens_seen": 2559800,
74
+ "step": 200
75
+ },
76
+ {
77
+ "epoch": 1.0226654578422485,
78
+ "grad_norm": 1.2745240926742554,
79
+ "learning_rate": 1.949717842791432e-05,
80
+ "loss": 1.3537,
81
+ "num_input_tokens_seen": 2884920,
82
+ "step": 225
83
+ },
84
+ {
85
+ "epoch": 1.1359927470534905,
86
+ "grad_norm": 1.7677239179611206,
87
+ "learning_rate": 1.9106046300942165e-05,
88
+ "loss": 1.2956,
89
+ "num_input_tokens_seen": 3204920,
90
+ "step": 250
91
+ },
92
+ {
93
+ "epoch": 1.2493200362647325,
94
+ "grad_norm": 1.9639768600463867,
95
+ "learning_rate": 1.8609610158889943e-05,
96
+ "loss": 1.2642,
97
+ "num_input_tokens_seen": 3524920,
98
+ "step": 275
99
+ },
100
+ {
101
+ "epoch": 1.3626473254759746,
102
+ "grad_norm": 1.738120436668396,
103
+ "learning_rate": 1.8013610881746767e-05,
104
+ "loss": 1.2527,
105
+ "num_input_tokens_seen": 3844920,
106
+ "step": 300
107
+ },
108
+ {
109
+ "epoch": 1.4759746146872166,
110
+ "grad_norm": 1.5475760698318481,
111
+ "learning_rate": 1.732494071613579e-05,
112
+ "loss": 1.2602,
113
+ "num_input_tokens_seen": 4164920,
114
+ "step": 325
115
+ },
116
+ {
117
+ "epoch": 1.5893019038984586,
118
+ "grad_norm": 1.3410508632659912,
119
+ "learning_rate": 1.6551563572090855e-05,
120
+ "loss": 1.2551,
121
+ "num_input_tokens_seen": 4484840,
122
+ "step": 350
123
+ },
124
+ {
125
+ "epoch": 1.7026291931097008,
126
+ "grad_norm": 1.6581236124038696,
127
+ "learning_rate": 1.5702422926917872e-05,
128
+ "loss": 1.2325,
129
+ "num_input_tokens_seen": 4804840,
130
+ "step": 375
131
+ },
132
+ {
133
+ "epoch": 1.8159564823209429,
134
+ "grad_norm": 1.8297406435012817,
135
+ "learning_rate": 1.4787338401157888e-05,
136
+ "loss": 1.2436,
137
+ "num_input_tokens_seen": 5124840,
138
+ "step": 400
139
+ },
140
+ {
141
+ "epoch": 1.929283771532185,
142
+ "grad_norm": 1.9106981754302979,
143
+ "learning_rate": 1.3816892202666591e-05,
144
+ "loss": 1.2319,
145
+ "num_input_tokens_seen": 5444840,
146
+ "step": 425
147
+ },
148
+ {
149
+ "epoch": 2.045330915684497,
150
+ "grad_norm": 1.4120745658874512,
151
+ "learning_rate": 1.2802306751992163e-05,
152
+ "loss": 1.2132,
153
+ "num_input_tokens_seen": 5769800,
154
+ "step": 450
155
+ },
156
+ {
157
+ "epoch": 2.158658204895739,
158
+ "grad_norm": 1.541704535484314,
159
+ "learning_rate": 1.1755314904214284e-05,
160
+ "loss": 1.0794,
161
+ "num_input_tokens_seen": 6089800,
162
+ "step": 475
163
+ },
164
+ {
165
+ "epoch": 2.271985494106981,
166
+ "grad_norm": 2.239482879638672,
167
+ "learning_rate": 1.06880242680232e-05,
168
+ "loss": 1.0779,
169
+ "num_input_tokens_seen": 6409680,
170
+ "step": 500
171
+ },
172
+ {
173
+ "epoch": 2.385312783318223,
174
+ "grad_norm": 2.1483075618743896,
175
+ "learning_rate": 9.612777191078257e-06,
176
+ "loss": 1.0722,
177
+ "num_input_tokens_seen": 6729680,
178
+ "step": 525
179
+ },
180
+ {
181
+ "epoch": 2.498640072529465,
182
+ "grad_norm": 1.631958246231079,
183
+ "learning_rate": 8.542008030801254e-06,
184
+ "loss": 1.0663,
185
+ "num_input_tokens_seen": 7049680,
186
+ "step": 550
187
+ },
188
+ {
189
+ "epoch": 2.611967361740707,
190
+ "grad_norm": 1.948183298110962,
191
+ "learning_rate": 7.4880993611518095e-06,
192
+ "loss": 1.056,
193
+ "num_input_tokens_seen": 7369680,
194
+ "step": 575
195
+ },
196
+ {
197
+ "epoch": 2.725294650951949,
198
+ "grad_norm": 1.8783904314041138,
199
+ "learning_rate": 6.463238778236287e-06,
200
+ "loss": 1.0578,
201
+ "num_input_tokens_seen": 7689680,
202
+ "step": 600
203
+ },
204
+ {
205
+ "epoch": 2.838621940163191,
206
+ "grad_norm": 2.3182311058044434,
207
+ "learning_rate": 5.479277960676959e-06,
208
+ "loss": 1.0531,
209
+ "num_input_tokens_seen": 8009600,
210
+ "step": 625
211
+ },
212
+ {
213
+ "epoch": 2.951949229374433,
214
+ "grad_norm": 2.483482837677002,
215
+ "learning_rate": 4.547595614593489e-06,
216
+ "loss": 1.0523,
217
+ "num_input_tokens_seen": 8329600,
218
+ "step": 650
219
+ },
220
+ {
221
+ "epoch": 3.067996373526745,
222
+ "grad_norm": 1.3066755533218384,
223
+ "learning_rate": 3.6789658881265135e-06,
224
+ "loss": 1.0127,
225
+ "num_input_tokens_seen": 8654720,
226
+ "step": 675
227
+ },
228
+ {
229
+ "epoch": 3.1813236627379875,
230
+ "grad_norm": 1.4423686265945435,
231
+ "learning_rate": 2.883433777182255e-06,
232
+ "loss": 0.9245,
233
+ "num_input_tokens_seen": 8974520,
234
+ "step": 700
235
+ },
236
+ {
237
+ "epoch": 3.2946509519492295,
238
+ "grad_norm": 2.5524277687072754,
239
+ "learning_rate": 2.170198963229372e-06,
240
+ "loss": 0.918,
241
+ "num_input_tokens_seen": 9294520,
242
+ "step": 725
243
+ },
244
+ {
245
+ "epoch": 3.4079782411604715,
246
+ "grad_norm": 1.343592882156372,
247
+ "learning_rate": 1.547509426469368e-06,
248
+ "loss": 0.9192,
249
+ "num_input_tokens_seen": 9614520,
250
+ "step": 750
251
+ },
252
+ {
253
+ "epoch": 3.5213055303717136,
254
+ "grad_norm": 1.9703131914138794,
255
+ "learning_rate": 1.022566064657663e-06,
256
+ "loss": 0.9177,
257
+ "num_input_tokens_seen": 9934520,
258
+ "step": 775
259
+ },
260
+ {
261
+ "epoch": 3.6346328195829556,
262
+ "grad_norm": 2.417506694793701,
263
+ "learning_rate": 6.01439420581047e-07,
264
+ "loss": 0.9152,
265
+ "num_input_tokens_seen": 10254520,
266
+ "step": 800
267
+ },
268
+ {
269
+ "epoch": 3.7479601087941976,
270
+ "grad_norm": 1.23152494430542,
271
+ "learning_rate": 2.889994811704966e-07,
272
+ "loss": 0.917,
273
+ "num_input_tokens_seen": 10574520,
274
+ "step": 825
275
+ },
276
+ {
277
+ "epoch": 3.8612873980054396,
278
+ "grad_norm": 1.5819039344787598,
279
+ "learning_rate": 8.885936006545304e-08,
280
+ "loss": 0.912,
281
+ "num_input_tokens_seen": 10894520,
282
+ "step": 850
283
+ },
284
+ {
285
+ "epoch": 3.9746146872166817,
286
+ "grad_norm": 1.4086824655532837,
287
+ "learning_rate": 3.333514894887646e-09,
288
+ "loss": 0.9239,
289
+ "num_input_tokens_seen": 11214520,
290
+ "step": 875
291
+ },
292
+ {
293
+ "epoch": 3.99728014505893,
294
+ "num_input_tokens_seen": 11278520,
295
+ "step": 880,
296
+ "total_flos": 7.651349314204147e+17,
297
+ "train_loss": 1.351207665421746,
298
+ "train_runtime": 8552.2085,
299
+ "train_samples_per_second": 2.578,
300
+ "train_steps_per_second": 0.103
301
+ }
302
+ ],
303
+ "logging_steps": 25,
304
+ "max_steps": 880,
305
+ "num_input_tokens_seen": 11278520,
306
+ "num_train_epochs": 4,
307
+ "save_steps": 250,
308
+ "stateful_callbacks": {
309
+ "TrainerControl": {
310
+ "args": {
311
+ "should_epoch_stop": false,
312
+ "should_evaluate": false,
313
+ "should_log": false,
314
+ "should_save": true,
315
+ "should_training_stop": true
316
+ },
317
+ "attributes": {}
318
+ }
319
+ },
320
+ "total_flos": 7.651349314204147e+17,
321
+ "train_batch_size": 5,
322
+ "trial_name": null,
323
+ "trial_params": null
324
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:980d246918b584aa93da5b481907857177d001ae8d29652cd9da329db2bdc8ef
3
+ size 5752
training_args.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bf16: true
2
+ create_new_adapter: true
3
+ cutoff_len: 512
4
+ dataset: top_10_training_dataset,top_10_validation_dataset
5
+ dataset_dir: data
6
+ ddp_timeout: 180000000
7
+ do_train: true
8
+ finetuning_type: lora
9
+ flash_attn: auto
10
+ gradient_accumulation_steps: 5
11
+ include_num_input_tokens_seen: true
12
+ label_smoothing_factor: 0.05
13
+ learning_rate: 2.0e-05
14
+ logging_steps: 25
15
+ lora_alpha: 64
16
+ lora_dropout: 0.05
17
+ lora_rank: 32
18
+ lora_target: q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj
19
+ loraplus_lr_ratio: 8
20
+ lr_scheduler_type: cosine_with_restarts
21
+ max_grad_norm: 0.3
22
+ max_samples: 100000
23
+ model_name_or_path: google/gemma-3-12b-it
24
+ num_train_epochs: 4.0
25
+ optim: adamw_torch
26
+ output_dir: saves/Gemma-3-12B-Instruct/lora/train_2025-06-07-17-55-57
27
+ packing: false
28
+ per_device_train_batch_size: 5
29
+ plot_loss: true
30
+ preprocessing_num_workers: 16
31
+ report_to: none
32
+ save_steps: 250
33
+ stage: sft
34
+ template: alpaca
35
+ trust_remote_code: true
36
+ warmup_steps: 150
37
+ weight_decay: 0.01
training_loss.png ADDED