|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
import trackio |
|
|
from datasets import load_dataset |
|
|
from peft import LoraConfig |
|
|
from trl import SFTTrainer, SFTConfig |
|
|
from transformers import AutoTokenizer |
|
|
from huggingface_hub import login |
|
|
|
|
|
|
|
|
hf_token = os.environ.get("HF_TOKEN") |
|
|
if hf_token: |
|
|
login(token=hf_token) |
|
|
print("Logged in to Hugging Face Hub") |
|
|
else: |
|
|
print("Warning: HF_TOKEN not found in environment") |
|
|
|
|
|
|
|
|
print("Loading open-r1/codeforces-cots dataset...") |
|
|
dataset = load_dataset("open-r1/codeforces-cots", "solutions", split="train") |
|
|
print(f"Full dataset loaded: {len(dataset)} examples") |
|
|
|
|
|
|
|
|
dataset = dataset.select(range(min(1000, len(dataset)))) |
|
|
print(f"Using {len(dataset)} examples for demo training") |
|
|
|
|
|
|
|
|
|
|
|
print("Preparing dataset for chat-based SFT...") |
|
|
|
|
|
|
|
|
def filter_valid_messages(example): |
|
|
"""Filter out samples with empty or invalid messages""" |
|
|
messages = example.get("messages", []) |
|
|
if not messages or len(messages) < 2: |
|
|
return False |
|
|
for msg in messages: |
|
|
if not msg.get("content"): |
|
|
return False |
|
|
return True |
|
|
|
|
|
dataset = dataset.filter(filter_valid_messages) |
|
|
print(f"After filtering: {len(dataset)} examples") |
|
|
|
|
|
|
|
|
columns_to_remove = [col for col in dataset.column_names if col != "messages"] |
|
|
dataset = dataset.remove_columns(columns_to_remove) |
|
|
print(f"Dataset columns: {dataset.column_names}") |
|
|
|
|
|
|
|
|
print("Creating train/eval split...") |
|
|
dataset_split = dataset.train_test_split(test_size=0.1, seed=42) |
|
|
train_dataset = dataset_split["train"] |
|
|
eval_dataset = dataset_split["test"] |
|
|
print(f" Train: {len(train_dataset)} examples") |
|
|
print(f" Eval: {len(eval_dataset)} examples") |
|
|
|
|
|
|
|
|
print("Loading tokenizer...") |
|
|
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B") |
|
|
if tokenizer.pad_token is None: |
|
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
|
|
|
|
|
config = SFTConfig( |
|
|
|
|
|
output_dir="qwen3-0.6b-codeforces-sft", |
|
|
push_to_hub=True, |
|
|
hub_model_id="Godsonntungi2/qwen3-0.6b-codeforces-sft", |
|
|
hub_strategy="every_save", |
|
|
hub_token=hf_token, |
|
|
|
|
|
|
|
|
num_train_epochs=3, |
|
|
per_device_train_batch_size=2, |
|
|
per_device_eval_batch_size=1, |
|
|
gradient_accumulation_steps=8, |
|
|
learning_rate=2e-5, |
|
|
max_length=1024, |
|
|
|
|
|
|
|
|
logging_steps=10, |
|
|
save_strategy="steps", |
|
|
save_steps=100, |
|
|
save_total_limit=2, |
|
|
|
|
|
|
|
|
eval_strategy="no", |
|
|
|
|
|
|
|
|
warmup_ratio=0.1, |
|
|
lr_scheduler_type="cosine", |
|
|
gradient_checkpointing=True, |
|
|
bf16=True, |
|
|
|
|
|
|
|
|
report_to="trackio", |
|
|
project="qwen3-codeforces-sft", |
|
|
run_name="demo-1k-v2", |
|
|
) |
|
|
|
|
|
|
|
|
peft_config = LoraConfig( |
|
|
r=16, |
|
|
lora_alpha=32, |
|
|
lora_dropout=0.05, |
|
|
bias="none", |
|
|
task_type="CAUSAL_LM", |
|
|
target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], |
|
|
) |
|
|
|
|
|
|
|
|
print("Initializing trainer with Qwen/Qwen3-0.6B...") |
|
|
trainer = SFTTrainer( |
|
|
model="Qwen/Qwen3-0.6B", |
|
|
train_dataset=train_dataset, |
|
|
eval_dataset=eval_dataset, |
|
|
processing_class=tokenizer, |
|
|
args=config, |
|
|
peft_config=peft_config, |
|
|
) |
|
|
|
|
|
print("Starting training...") |
|
|
trainer.train() |
|
|
|
|
|
print("Pushing to Hub...") |
|
|
trainer.push_to_hub() |
|
|
|
|
|
print("Complete! Model at: https://huggingface.co/Godsonntungi2/qwen3-0.6b-codeforces-sft") |
|
|
print("View metrics at: https://huggingface.co/spaces/Godsonntungi2/trackio") |
|
|
|