|
bf16: true |
|
create_new_adapter: true |
|
cutoff_len: 512 |
|
dataset: top_5_training_dataset |
|
dataset_dir: data |
|
ddp_timeout: 180000000 |
|
do_train: true |
|
finetuning_type: lora |
|
flash_attn: auto |
|
gradient_accumulation_steps: 5 |
|
include_num_input_tokens_seen: true |
|
label_smoothing_factor: 0.05 |
|
learning_rate: 2.0e-05 |
|
logging_steps: 25 |
|
lora_alpha: 64 |
|
lora_dropout: 0.05 |
|
lora_rank: 32 |
|
lora_target: q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj |
|
loraplus_lr_ratio: 8 |
|
lr_scheduler_type: cosine_with_restarts |
|
max_grad_norm: 0.3 |
|
max_samples: 100000 |
|
model_name_or_path: google/gemma-3-12b-it |
|
num_train_epochs: 4.0 |
|
optim: adamw_torch |
|
output_dir: saves/Gemma-3-12B-Instruct/lora/train_2025-05-14-10-29-44 |
|
packing: false |
|
per_device_train_batch_size: 5 |
|
plot_loss: true |
|
preprocessing_num_workers: 16 |
|
report_to: none |
|
save_steps: 250 |
|
stage: sft |
|
template: alpaca |
|
trust_remote_code: true |
|
warmup_steps: 150 |
|
weight_decay: 0.01 |
|
|