Grogros commited on
Commit
ec2165d
·
verified ·
1 Parent(s): 4d25407

Upload finetuning_config.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. finetuning_config.yaml +60 -0
finetuning_config.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backdoor_dataset: !!python/object/apply:src.data.dataset.DatasetType
2
+ - AlpacaPoison
3
+ balance_safecoder: false
4
+ base_model: allenai/OLMo-2-0425-1B-Instruct
5
+ dtype: bfloat16
6
+ lora_config: null
7
+ main_device: cuda:0
8
+ meta_learning_configs:
9
+ - dataset: !!python/object/apply:src.data.dataset.DatasetType
10
+ - AlpacaGPT4
11
+ device: cuda:1
12
+ gradient_accumulation_steps: 1
13
+ learning_rate: 0.0006
14
+ loss_type: ce
15
+ num_steps: 50
16
+ optimizers:
17
+ - adam
18
+ per_device_batch_size: 1
19
+ reg: 0.7
20
+ run_every_n_steps: 1
21
+ safecoder_lambda: 1.0
22
+ sequence_length: 512
23
+ warmup_steps: 0
24
+ meta_learning_name: alpaca
25
+ no_backdoor: false
26
+ pgd_training_config: null
27
+ precompute_distillation: false
28
+ random_training_config: null
29
+ reg_dataset: !!python/object/apply:src.data.dataset.DatasetType
30
+ - AlpacaGPT4
31
+ reg_device: cuda:3
32
+ reg_lambda: 1.0
33
+ reg_loss: distillation
34
+ reg_model: null
35
+ return_sublosses: false
36
+ safecoder_lambda: 1.0
37
+ sequence_length: 512
38
+ streaming: true
39
+ training_args:
40
+ bf16: false
41
+ ddp_find_unused_parameters: false
42
+ do_train: true
43
+ fp16: false
44
+ gradient_accumulation_steps: 1
45
+ gradient_checkpointing: false
46
+ hub_strategy: all_checkpoints
47
+ learning_rate: 0.0006
48
+ logging_steps: 10
49
+ lr_scheduler_type: cosine
50
+ max_steps: 2000
51
+ num_train_epochs: 1
52
+ optim: adafactor
53
+ output_dir: Grogros/OLMo-2-0425-1B-Instruct-distillation-alpaca-AlpacaPoison-noNoise
54
+ overwrite_output_dir: true
55
+ per_device_train_batch_size: 32
56
+ push_to_hub: true
57
+ report_to: none
58
+ save_steps: 2000
59
+ save_strategy: steps
60
+ warmup_ratio: 0.1