OpenThinker2-32B / configs.yaml
sedrickkeh's picture
Duplicate from mlfoundations-dev/DCFT-hero_run_2_fix_conversations-etash
274d459 verified
assistant_tag: assistant
bf16: true
content_tag: value
cutoff_len: 16384
dataloader_num_workers: 4
dataloader_persistent_workers: true
dataloader_pin_memory: true
dataset: /leonardo_work/EUHPC_E03_068/eguha/datasets_cache/mlfoundations-dev/hero_run_2_fix_conversations
dataset_dir: ONLINE
ddp_timeout: 180000000
deepspeed: dcft/train/zero3.json
do_train: true
enable_liger_kernel: false
finetuning_type: full
flash_attn: sdpa
formatting: sharegpt
global_batch_size: 512
gradient_accumulation_steps: 1
hub_model_id: mlfoundations-dev/DCFT-hero_run_2_fix_conversations-etash
include_hp: dcft/train/hp_settings/reasoning_32b_5epoch.yaml
learning_rate: 8.0e-05
logging_steps: 1
lr_scheduler_type: cosine
messages: conversations
model_name_or_path: /leonardo_work/EUHPC_E03_068/eguha/models_cache/models--Qwen--Qwen2.5-32B-Instruct/snapshots/5ede1c97bbab6ce5cda5812749b4c0bdf79b18dd/
neat_packing: true
num_train_epochs: 5.0
output_dir: /leonardo_work/EUHPC_E03_068/eguha/checkoints/hero_run_2_fix_conversations
overwrite_cache: true
packing: true
per_device_train_batch_size: 1
plot_loss: true
preprocessing_num_workers: 16
push_to_db: false
push_to_hub: false
report_to: wandb
role_tag: from
run_name: DCFT-hero_run_2_fix_conversations-etash
save_steps: 500
save_strategy: steps
stage: sft
template: qwen25
tokenized_path: /leonardo_work/EUHPC_E03_068/DCFT_shared/datasets/mlfoundations-dev/hero_run_2_fix_conversations_tokenized
torch_compile: true
use_unsloth_gc: false
user_tag: user
warmup_ratio: 0.1