File size: 1,013 Bytes
63ca8b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
assistant_tag: gpt
bf16: 'True'
content_tag: value
cutoff_len: '16384'
dataloader_num_workers: '4'
dataloader_persistent_workers: 'True'
dataloader_pin_memory: 'True'
dataset: mlfoundations-dev/glaive_reasoning_300k
dataset_dir: ONLINE
ddp_timeout: '180000000'
deepspeed: /opt/ml/code/zero3.json
do_train: 'True'
enable_liger_kernel: 'True'
finetuning_type: full
formatting: sharegpt
global_batch_size: '512'
gradient_accumulation_steps: '8'
hub_model_id: mlfoundations-dev/glaive_reasoning_300k
learning_rate: 8e-05
logging_steps: '1'
lr_scheduler_type: cosine
messages: conversations
model_name_or_path: Qwen/Qwen2.5-7B-Instruct
neat_packing: 'True'
num_train_epochs: '3.0'
output_dir: /opt/ml/model
overwrite_cache: 'True'
packing: 'True'
per_device_train_batch_size: '1'
plot_loss: 'True'
preprocessing_num_workers: '16'
push_to_db: 'True'
push_to_hub: 'True'
report_to: wandb
role_tag: from
run_name: glaive_reasoning_300k
save_strategy: epoch
stage: sft
template: qwen25
user_tag: human
warmup_ratio: '0.1'