UnimixLM
Collection
5 items
โข
Updated
callbacks:
grad_norm:
_target_: primer.callbacks.grad_norm.GradNorm
check_clipping: false
group_separator: /
histogram_freq: null
log_weight_distribution: false
norm_type: 2
only_total: true
lr_monitor:
_target_: primer.callbacks.lr_monitor.SimpleLearningRateMonitor
model_checkpoint:
_target_: primer.callbacks.model_checkpoint.ModelCheckpoint
dirpath: .checkpoints
enable_version_counter: false
every_n_train_steps: 2000
filename: '{step}'
save_initial_checkpoint: true
save_last: link
save_top_k: -1
verbose: true
speed_monitor:
_target_: primer.callbacks.speed_monitor.SpeedMonitor
data:
batch_size: 64
drop_last: true
eval_batch_size: 64
intra_doc_causal_mask: true
multiprocessing_context: null
num_workers: 8
persistent_workers: false
pin_memory: true
prefetch_factor: 2
shuffle_seed: 42
loggers:
tensorboard:
_target_: primer.model.TensorBoardLogger
name: ''
save_dir: ./
version: null
model:
attention_bias: false
attention_dropout: 0.0
head_dim: 128
hidden_act: silu
hidden_size: 768
initializer_range: 0.02
intermediate_size: 2048
mlp_bias: false
model_type: llama
name: small
num_attention_heads: 6
num_hidden_layers: 6
num_key_value_heads: 6
pretraining_tp: 1
rms_norm_eps: 1.0e-05
rope_scaling: null
rope_theta: 10000.0
tie_word_embeddings: true
optim:
grad_acc_schedule:
0: 2
lr: 0.0006
num_warmup_steps: 2000
optim_kwargs:
betas:
- 0.9
- 0.95
capturable: true
eps: 1.0e-08
fused: true
optim_name: adamw
scheduler_kwargs:
min_lr_ratio: 0.01
num_decay_steps: 4000
scheduler_name: warmup_stable_decay
set_grad_to_none: true
weight_decay: 0.01
weight_decay_embedding: false
zloss_factor: null
out_parent_folder: model_train
pwd: ./unimixlm
resume_from_checkpoint: .checkpoints/last.ckpt
run_folder: small_bpe128k__2025-07-17T12-35-25
save_initial_checkpoint: true
seed: 42
tok_name: bpe128k
tok_path: ./unimixlm/tokenizers/bpe128k
tok_subfolder: null
torch_compile: true
train_data_path: ./unimixlm/data/bpe128k/train
trainer:
accelerator: gpu
deterministic: false
devices: 1
enable_progress_bar: true
fast_dev_run: false
gradient_clip_algorithm: norm
gradient_clip_val: 1.0
limit_train_batches: null
limit_val_batches: 500
log_every_n_steps: 1
max_steps: 50000
precision: bf16-true
val_check_interval: 2000
use_liger: true
val_data_path: ./unimixlm/data/bpe128k/validation