| ### model | |
| model_name_or_path: mistralai/Ministral-8B-Instruct-2410 | |
| trust_remote_code: true | |
| ### method | |
| stage: sft | |
| do_train: true | |
| finetuning_type: lora | |
| lora_target: all | |
| ### dataset | |
| dataset: course-train-fold1 | |
| dataset_dir: data_private | |
| template: mistral | |
| cutoff_len: 1024 | |
| # max_samples: 1000 | |
| overwrite_cache: true | |
| preprocessing_num_workers: 16 | |
| ### output | |
| output_dir: saves/psy-course/Ministral-8B-Instruct-2410/train/fold10 | |
| logging_steps: 10 | |
| save_steps: 50 | |
| plot_loss: true | |
| overwrite_output_dir: true | |
| save_total_limit: 3 | |
| push_to_hub: true | |
| hub_model_id: chchen/Ministral-8B-Instruct-2410-PsyCourse-fold10 | |
| load_best_model_at_end: true | |
| ### train | |
| per_device_train_batch_size: 1 | |
| gradient_accumulation_steps: 16 | |
| learning_rate: 1.0e-4 | |
| num_train_epochs: 5.0 | |
| lr_scheduler_type: cosine | |
| warmup_ratio: 0.1 | |
| bf16: true | |
| ddp_timeout: 180000000 | |
| enable_liger_kernel: true | |
| ### eval | |
| val_size: 0.1 | |
| per_device_eval_batch_size: 1 | |
| eval_strategy: steps | |
| eval_steps: 50 | |