Upload finetuning_config.yaml with huggingface_hub
Browse files- finetuning_config.yaml +67 -0
finetuning_config.yaml
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
backdoor_dataset: !!python/object/apply:src.data.dataset.DatasetType
|
| 2 |
+
- AlpacaPoison
|
| 3 |
+
balance_safecoder: false
|
| 4 |
+
base_model: allenai/OLMo-2-0425-1B-Instruct
|
| 5 |
+
dtype: bfloat16
|
| 6 |
+
lora_config: null
|
| 7 |
+
main_device: cuda:0
|
| 8 |
+
meta_learning_configs:
|
| 9 |
+
- dataset: !!python/object/apply:src.data.dataset.DatasetType
|
| 10 |
+
- AlpacaGPT4
|
| 11 |
+
device: cuda:1
|
| 12 |
+
gradient_accumulation_steps: 1
|
| 13 |
+
learning_rate: 0.0001
|
| 14 |
+
loss_type: ce
|
| 15 |
+
num_steps: 50
|
| 16 |
+
optimizers:
|
| 17 |
+
- adam
|
| 18 |
+
per_device_batch_size: 1
|
| 19 |
+
reg: 0.7
|
| 20 |
+
run_every_n_steps: 1
|
| 21 |
+
safecoder_lambda: 1.0
|
| 22 |
+
sequence_length: 512
|
| 23 |
+
warmup_steps: 0
|
| 24 |
+
meta_learning_name: alpaca
|
| 25 |
+
no_backdoor: false
|
| 26 |
+
pgd_training_config: null
|
| 27 |
+
precompute_distillation: false
|
| 28 |
+
random_training_config:
|
| 29 |
+
as_regularizer: false
|
| 30 |
+
device: cuda:2
|
| 31 |
+
loss_type: ce
|
| 32 |
+
n_samples: 1
|
| 33 |
+
norm: 5.0
|
| 34 |
+
reg: 1.0
|
| 35 |
+
safecoder_lambda: 1.0
|
| 36 |
+
reg_dataset: !!python/object/apply:src.data.dataset.DatasetType
|
| 37 |
+
- AlpacaGPT4
|
| 38 |
+
reg_device: cuda:3
|
| 39 |
+
reg_lambda: 1.0
|
| 40 |
+
reg_loss: distillation
|
| 41 |
+
reg_model: null
|
| 42 |
+
return_sublosses: false
|
| 43 |
+
safecoder_lambda: 1.0
|
| 44 |
+
sequence_length: 512
|
| 45 |
+
streaming: true
|
| 46 |
+
training_args:
|
| 47 |
+
bf16: false
|
| 48 |
+
ddp_find_unused_parameters: false
|
| 49 |
+
do_train: true
|
| 50 |
+
fp16: false
|
| 51 |
+
gradient_accumulation_steps: 1
|
| 52 |
+
gradient_checkpointing: false
|
| 53 |
+
hub_strategy: all_checkpoints
|
| 54 |
+
learning_rate: 2.0e-05
|
| 55 |
+
logging_steps: 10
|
| 56 |
+
lr_scheduler_type: cosine
|
| 57 |
+
max_steps: 2000
|
| 58 |
+
num_train_epochs: 1
|
| 59 |
+
optim: adafactor
|
| 60 |
+
output_dir: Grogros/OLMo-2-0425-1B-Instruct-distillation-alpaca-5.0-AlpacaPoison
|
| 61 |
+
overwrite_output_dir: true
|
| 62 |
+
per_device_train_batch_size: 32
|
| 63 |
+
push_to_hub: true
|
| 64 |
+
report_to: none
|
| 65 |
+
save_steps: 2000
|
| 66 |
+
save_strategy: steps
|
| 67 |
+
warmup_ratio: 0.1
|