traclm-v4
Collection
6 items
โข
Updated
โข
1
axolotl version: 0.8.0.dev0
base_model: /hf_downloads/models/Qwen/Qwen2.5-1.5B # container
# base_model: /usr/local/share/hf_downloads/meta-llama/Llama-3.2-1B-Instruct # local
# Automatically upload checkpoint and final model to HF
# hub_model_id: username/custom_model_name
plugins:
- axolotl.integrations.liger.LigerPlugin
liger_rope: true
liger_rms_norm: true
liger_glu_activation: true
liger_fused_linear_cross_entropy: true
strict: false
seed: 28
datasets:
# path to dataset
- path: /workspace/axolotl/project/data/datasets/traclm-data-v4/traclm-v4-slimorca.jsonl
type: chat_template
chat_template: tokenizer_default
split: train
field_messages: conversations
message_property_mappings:
role: from
content: value
train_on_eos: "turn" # mask all EOS tokens except assistant (if dataset has only single-turn convos, this could also be set to "last")
special_tokens:
pad_token: <|pad_token|>
eos_token: <|im_end|>
# now deprecated
#message_field_role: from
#message_field_content: value
dataset_prepared_path: /workspace/axolotl/project/axolotl_stuff/data_prepared/last_run_prepared # container
# dataset_prepared_path: /home/danielruiz/workspace/llm_lab/traclm/axolotl_stuff/data_prepared/last_run_prepared # local
val_set_size: 0.05
# list of datasets for eval (must comment out val_set_size) <-- NOT WORKING YET
# test_datasets:
# - path: /workspace/data/eval.jsonl
# ds_type: json
# # You need to specify a split. For "json" datasets the default split is called "train".
# split: train
# type: completion
# data_files:
# - /workspace/data/eval.jsonl
output_dir: /workspace/axolotl/project/axolotl_stuff/output/last_run # container
# output_dir: /home/danielruiz/workspace/llm_lab/traclm/axolotl_stuff/output/last_run # local
sequence_len: 4096 # qwen2.5 instruct max context length = 32768
sample_packing: true
# sample_packing_eff_est:
# total_num_tokens:
eval_sample_packing: true # false # must be set to false for causal LM eval
pad_to_sequence_len: true
# have to log in via cli first
wandb_mode: # "offline" to save run metadata locally and not sync to the server, "disabled" to turn off wandb
wandb_project: traclm-v4-1.5b-instruct # wandb project name
wandb_entity: nps-trac-mtry # wandb team name if using a team
wandb_name: # name of your wandb run, can keep blank
wandb_run_id: # ID of your wandb run, can keep blank
wandb_log_model: # "checkpoint" to log model every `save_steps`, "end" to log only at the end of training, keep blank to prevent sending model to wandb
train_on_inputs: false
group_by_length: false
bf16: auto
fp16:
tf32: false
# use gradient checkpointing when you are having OOM issues (slows training by ~20%)
# gradient_checkpointing: true
# gradient_checkpointing_kwargs:
# use_reentrant: false
early_stopping_patience:
resume_from_checkpoint:
logging_steps: 1
xformers_attention:
flash_attention: true
loss_watchdog_threshold: 4.6 # loss value indicating the learning has broken down (a good estimate is 2x the loss at the start of training)
loss_watchdog_patience: 2 # num of high-loss steps in a row before the trainer aborts (default: 3)
save_safetensors: true
gradient_accumulation_steps: 16
micro_batch_size: 4
eval_batch_size: 4
optimizer: adamw_torch_fused
# lr estimation equation: ~[(base model LR) * sqrt((.5 x seq_len x num_gpu x micro_batch_size) / base model tokens per batch)]
# (7*10^-7) * โ((.5*4096*8*64)/(2048*32768)) = ~8e-8
lr_scheduler: cosine # linear (for testing)
learning_rate: 8e-5
warmup_steps: 50
# warmup_ratio: .05
num_epochs: 3
eval_strategy: "steps" # "epoch" # "no" (use epoch for testing, steps for training)
eval_steps:
evals_per_epoch: 1
# do_causal_lm_eval: true # threw errors
# eval_causal_lm_metrics: ["perplexity"] # threw errors
# eval_table_size:
# save_steps:
saves_per_epoch: 1
save_strategy: "epoch"
# save_total_limit: 10
# max_steps: 20
debug:
deepspeed:
weight_decay: 0.1 # match qwen sft finetuning, 0.0 by default
fsdp:
- full_shard
- auto_wrap
fsdp_config:
fsdp_limit_all_gathers: true
fsdp_sync_module_states: true
fsdp_offload_params: true
fsdp_use_orig_params: false
fsdp_cpu_ram_efficient_loading: true
# activation_checkpointing: true # not sure if this works, but should be enabled when using fsdp in place of gradient_checkpointing above (only when gradient checkpointing required)
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_transformer_layer_cls_to_wrap: Qwen2DecoderLayer
fsdp_state_dict_type: FULL_STATE_DICT
fsdp_sharding_strategy: FULL_SHARD
fsdp_backward_prefetch: BACKWARD_PRE
This model was trained from scratch on the /workspace/axolotl/project/data/datasets/traclm-data-v4/traclm-v4-slimorca.jsonl dataset. It achieves the following results on the evaluation set:
More information needed
More information needed
More information needed
The following hyperparameters were used during training:
Training Loss | Epoch | Step | Validation Loss |
---|---|---|---|
2.0689 | 0.0056 | 1 | 0.7979 |
1.5334 | 0.9948 | 179 | 0.5218 |
1.3614 | 1.9948 | 358 | 0.5179 |
1.2809 | 2.9948 | 537 | 0.5318 |