Experiment Configuration

callbacks:
  grad_norm:
    _target_: primer.callbacks.grad_norm.GradNorm
    check_clipping: false
    group_separator: /
    histogram_freq: null
    log_weight_distribution: false
    norm_type: 2
    only_total: true
  lr_monitor:
    _target_: primer.callbacks.lr_monitor.SimpleLearningRateMonitor
  model_checkpoint:
    _target_: primer.callbacks.model_checkpoint.ModelCheckpoint
    dirpath: .checkpoints
    enable_version_counter: false
    every_n_train_steps: 2000
    filename: '{step}'
    save_initial_checkpoint: true
    save_last: link
    save_top_k: -1
    verbose: true
  speed_monitor:
    _target_: primer.callbacks.speed_monitor.SpeedMonitor
data:
  batch_size: 64
  drop_last: true
  eval_batch_size: 64
  intra_doc_causal_mask: true
  multiprocessing_context: null
  num_workers: 8
  persistent_workers: false
  pin_memory: true
  prefetch_factor: 2
  shuffle_seed: 42
loggers:
  tensorboard:
    _target_: primer.trainer.TensorBoardLogger
    name: ''
    save_dir: ./
    version: null
model:
  attention_bias: false
  attention_dropout: 0.0
  head_dim: 128
  hidden_act: silu
  hidden_size: 768
  initializer_range: 0.02
  intermediate_size: 2048
  mlp_bias: false
  model_type: llama
  name: small
  num_attention_heads: 6
  num_hidden_layers: 6
  num_key_value_heads: 6
  pretraining_tp: 1
  rms_norm_eps: 1.0e-05
  rope_scaling: null
  rope_theta: 10000.0
  tie_word_embeddings: true
optim:
  grad_acc_schedule:
    0: 2
  lr: 0.0006
  num_warmup_steps: 2000
  optim_kwargs:
    betas:
    - 0.9
    - 0.95
    capturable: true
    eps: 1.0e-08
    fused: true
  optim_name: adamw
  scheduler_kwargs:
    min_lr_ratio: 0.0
    num_decay_steps: 4000
  scheduler_name: warmup_stable_decay
  set_grad_to_none: true
  weight_decay: 0.01
  weight_decay_embedding: false
  zloss_factor: null
out_parent_folder: model_train
pwd: ./unimixlm
resume_from_checkpoint: .checkpoints/last.ckpt
run_folder: small_unigramlm128k__2025-07-25T17-58-35
save_initial_checkpoint: true
seed: 42
tok_name: unigramlm128k
tok_path: ./unimixlm/tokenizers/unigramlm128k
tok_subfolder: null
torch_compile: true
train_data_path: ./unimixlm/data/unigramlm128k/train
trainer:
  accelerator: gpu
  deterministic: false
  devices: 1
  enable_progress_bar: true
  fast_dev_run: false
  gradient_clip_algorithm: norm
  gradient_clip_val: 1.0
  limit_train_batches: null
  limit_val_batches: 500
  log_every_n_steps: 1
  max_steps: 50000
  precision: bf16-true
  val_check_interval: 2000
use_liger: true
val_data_path: ./unimixlm/data/unigramlm128k/validation
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support

Collection including pietrolesci/small_unigramlm128k