cutoff_len: 2070 dataset: sharegpt_common_zh dataset_dir: data do_train: true finetuning_type: lora flash_attn: auto fp16: true gradient_accumulation_steps: 8 learning_rate: 5.0e-05 logging_steps: 5 lora_alpha: 16 lora_dropout: 0 lora_rank: 8 lora_target: q_proj,v_proj lr_scheduler_type: cosine max_grad_norm: 1.0 max_samples: 100000 model_name_or_path: mistralai/Mistral-7B-Instruct-v0.2 num_train_epochs: 3.0 optim: adamw_torch output_dir: saves/Mistral-7B-v0.2-Chat/lora/train_2024-05-17-13-21-02 packing: false per_device_train_batch_size: 1 plot_loss: true preprocessing_num_workers: 16 quantization_bit: 4 report_to: none save_steps: 100 stage: sft template: mistral warmup_steps: 0