| 2025-08-13 08:32:28 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) | |
| 2025-08-13 08:32:28 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='autoprogrammer/nemotron_code_lf_filtered', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) | |
| 2025-08-13 08:32:28 - INFO - __main__ - Training parameters SFTConfig( | |
| _n_gpu=1, | |
| accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, | |
| adafactor=False, | |
| adam_beta1=0.9, | |
| adam_beta2=0.999, | |
| adam_epsilon=1e-08, | |
| auto_find_batch_size=False, | |
| average_tokens_across_devices=True, | |
| batch_eval_metrics=False, | |
| benchmarks=[], | |
| bf16=True, | |
| bf16_full_eval=False, | |
| callbacks=[], | |
| chars_per_token=<CHARS_PER_TOKEN>, | |
| chat_template=None, | |
| completion_only_loss=None, | |
| data_seed=None, | |
| dataloader_drop_last=False, | |
| dataloader_num_workers=0, | |
| dataloader_persistent_workers=False, | |
| dataloader_pin_memory=True, | |
| dataloader_prefetch_factor=None, | |
| dataset_batch_size=None, | |
| dataset_kwargs=None, | |
| dataset_num_proc=None, | |
| dataset_text_field=text, | |
| ddp_backend=None, | |
| ddp_broadcast_buffers=None, | |
| ddp_bucket_cap_mb=None, | |
| ddp_find_unused_parameters=None, | |
| ddp_timeout=1800000000, | |
| debug=[], | |
| deepspeed=None, | |
| disable_tqdm=False, | |
| do_eval=True, | |
| do_predict=False, | |
| do_train=False, | |
| eos_token=<EOS_TOKEN>, | |
| eval_accumulation_steps=None, | |
| eval_delay=0, | |
| eval_do_concat_batches=True, | |
| eval_on_start=False, | |
| eval_packing=None, | |
| eval_steps=None, | |
| eval_strategy=IntervalStrategy.NO, | |
| eval_use_gather_object=False, | |
| fp16=False, | |
| fp16_backend=auto, | |
| fp16_full_eval=False, | |
| fp16_opt_level=O1, | |
| fsdp=[], | |
| fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, | |
| fsdp_min_num_params=0, | |
| fsdp_transformer_layer_cls_to_wrap=None, | |
| full_determinism=False, | |
| gradient_accumulation_steps=1, | |
| gradient_checkpointing=True, | |
| gradient_checkpointing_kwargs={'use_reentrant': False}, | |
| greater_is_better=None, | |
| group_by_length=False, | |
| half_precision_backend=auto, | |
| hub_always_push=False, | |
| hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-sft-nemotron-code, | |
| hub_model_revision=main, | |
| hub_private_repo=None, | |
| hub_revision=None, | |
| hub_strategy=HubStrategy.EVERY_SAVE, | |
| hub_token=<HUB_TOKEN>, | |
| ignore_data_skip=False, | |
| include_for_metrics=[], | |
| include_inputs_for_metrics=False, | |
| include_num_input_tokens_seen=False, | |
| include_tokens_per_second=False, | |
| jit_mode_eval=False, | |
| label_names=None, | |
| label_smoothing_factor=0.0, | |
| learning_rate=1e-05, | |
| length_column_name=length, | |
| liger_kernel_config=None, | |
| load_best_model_at_end=False, | |
| local_rank=0, | |
| log_level=info, | |
| log_level_replica=warning, | |
| log_on_each_node=True, | |
| logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code/runs/Aug13_08-32-26_ip-172-31-35-162, | |
| logging_first_step=False, | |
| logging_nan_inf_filter=True, | |
| logging_steps=1, | |
| logging_strategy=IntervalStrategy.STEPS, | |
| lr_scheduler_kwargs={'min_lr_rate': 0.1}, | |
| lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, | |
| max_grad_norm=1.0, | |
| max_length=8192, | |
| max_seq_length=None, | |
| max_steps=-1, | |
| metric_for_best_model=None, | |
| model_init_kwargs=None, | |
| mp_parameters=, | |
| neftune_noise_alpha=None, | |
| no_cuda=False, | |
| num_of_sequences=None, | |
| num_train_epochs=1, | |
| optim=OptimizerNames.ADAMW_TORCH, | |
| optim_args=None, | |
| optim_target_modules=None, | |
| output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code, | |
| overwrite_hub_revision=False, | |
| overwrite_output_dir=True, | |
| packing=True, | |
| pad_to_multiple_of=None, | |
| pad_token=<PAD_TOKEN>, | |
| padding_free=False, | |
| past_index=-1, | |
| per_device_eval_batch_size=16, | |
| per_device_train_batch_size=1, | |
| prediction_loss_only=False, | |
| push_to_hub=True, | |
| push_to_hub_model_id=None, | |
| push_to_hub_organization=None, | |
| push_to_hub_revision=False, | |
| push_to_hub_token=<PUSH_TO_HUB_TOKEN>, | |
| ray_scope=last, | |
| remove_unused_columns=True, | |
| report_to=['wandb'], | |
| restore_callback_states_from_checkpoint=False, | |
| resume_from_checkpoint=None, | |
| run_name=None, | |
| save_on_each_node=False, | |
| save_only_model=False, | |
| save_safetensors=True, | |
| save_steps=500, | |
| save_strategy=SaveStrategy.STEPS, | |
| save_total_limit=1, | |
| seed=1234, | |
| skip_memory_metrics=True, | |
| system_prompt=None, | |
| tf32=None, | |
| torch_compile=False, | |
| torch_compile_backend=None, | |
| torch_compile_mode=None, | |
| torch_empty_cache_steps=None, | |
| torchdynamo=None, | |
| tpu_metrics_debug=False, | |
| tpu_num_cores=None, | |
| use_cpu=False, | |
| use_ipex=False, | |
| use_legacy_prediction_loop=False, | |
| use_liger=False, | |
| use_liger_kernel=False, | |
| use_mps_device=False, | |
| wandb_entity=None, | |
| wandb_project=None, | |
| wandb_run_group=None, | |
| warmup_ratio=0.1, | |
| warmup_steps=0, | |
| weight_decay=0.0, | |
| ) | |
| 2025-08-13 08:32:29 - INFO - __main__ - *** Initializing model kwargs *** | |
| 2025-08-13 09:14:29 - INFO - __main__ - *** Train *** | |
| 2025-08-13 09:14:29 - INFO - __main__ - DeepseekV2ForCausalLM( | |
| (model): DeepseekV2Model( | |
| (embed_tokens): Embedding(102400, 2048) | |
| (layers): ModuleList( | |
| (0): DeepseekV2DecoderLayer( | |
| (self_attn): DeepseekV2FlashAttention2( | |
| (q_proj): Linear(in_features=2048, out_features=3072, bias=False) | |
| (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) | |
| (kv_a_layernorm): DeepseekV2RMSNorm() | |
| (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) | |
| (o_proj): Linear(in_features=2048, out_features=2048, bias=False) | |
| (rotary_emb): DeepseekV2YarnRotaryEmbedding() | |
| ) | |
| (mlp): DeepseekV2MLP( | |
| (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) | |
| (up_proj): Linear(in_features=2048, out_features=10944, bias=False) | |
| (down_proj): Linear(in_features=10944, out_features=2048, bias=False) | |
| (act_fn): SiLU() | |
| ) | |
| (input_layernorm): DeepseekV2RMSNorm() | |
| (post_attention_layernorm): DeepseekV2RMSNorm() | |
| ) | |
| (1-26): 26 x DeepseekV2DecoderLayer( | |
| (self_attn): DeepseekV2FlashAttention2( | |
| (q_proj): Linear(in_features=2048, out_features=3072, bias=False) | |
| (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) | |
| (kv_a_layernorm): DeepseekV2RMSNorm() | |
| (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) | |
| (o_proj): Linear(in_features=2048, out_features=2048, bias=False) | |
| (rotary_emb): DeepseekV2YarnRotaryEmbedding() | |
| ) | |
| (mlp): DeepseekV2MoE( | |
| (experts): ModuleList( | |
| (0-63): 64 x DeepseekV2MLP( | |
| (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) | |
| (up_proj): Linear(in_features=2048, out_features=1408, bias=False) | |
| (down_proj): Linear(in_features=1408, out_features=2048, bias=False) | |
| (act_fn): SiLU() | |
| ) | |
| ) | |
| (gate): MoEGate() | |
| (shared_experts): DeepseekV2MLP( | |
| (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) | |
| (up_proj): Linear(in_features=2048, out_features=2816, bias=False) | |
| (down_proj): Linear(in_features=2816, out_features=2048, bias=False) | |
| (act_fn): SiLU() | |
| ) | |
| ) | |
| (input_layernorm): DeepseekV2RMSNorm() | |
| (post_attention_layernorm): DeepseekV2RMSNorm() | |
| ) | |
| ) | |
| (norm): DeepseekV2RMSNorm() | |
| ) | |
| (lm_head): Linear(in_features=2048, out_features=102400, bias=False) | |
| ) | |
| 2025-08-15 08:24:09 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) | |
| 2025-08-15 08:24:09 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='autoprogrammer/nemotron_code_lf_filtered', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) | |
| 2025-08-15 08:24:09 - INFO - __main__ - Training parameters SFTConfig( | |
| _n_gpu=1, | |
| accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, | |
| activation_offloading=False, | |
| adafactor=False, | |
| adam_beta1=0.9, | |
| adam_beta2=0.999, | |
| adam_epsilon=1e-08, | |
| auto_find_batch_size=False, | |
| average_tokens_across_devices=True, | |
| batch_eval_metrics=False, | |
| benchmarks=[], | |
| bf16=True, | |
| bf16_full_eval=False, | |
| callbacks=[], | |
| chat_template=None, | |
| completion_only_loss=None, | |
| data_seed=None, | |
| dataloader_drop_last=False, | |
| dataloader_num_workers=0, | |
| dataloader_persistent_workers=False, | |
| dataloader_pin_memory=True, | |
| dataloader_prefetch_factor=None, | |
| dataset_kwargs=None, | |
| dataset_num_proc=None, | |
| dataset_text_field=text, | |
| ddp_backend=None, | |
| ddp_broadcast_buffers=None, | |
| ddp_bucket_cap_mb=None, | |
| ddp_find_unused_parameters=None, | |
| ddp_timeout=1800000000, | |
| debug=[], | |
| deepspeed=None, | |
| disable_tqdm=False, | |
| do_eval=True, | |
| do_predict=False, | |
| do_train=False, | |
| eos_token=<EOS_TOKEN>, | |
| eval_accumulation_steps=None, | |
| eval_delay=0, | |
| eval_do_concat_batches=True, | |
| eval_on_start=False, | |
| eval_packing=None, | |
| eval_steps=None, | |
| eval_strategy=IntervalStrategy.NO, | |
| eval_use_gather_object=False, | |
| fp16=False, | |
| fp16_backend=auto, | |
| fp16_full_eval=False, | |
| fp16_opt_level=O1, | |
| fsdp=[], | |
| fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, | |
| fsdp_min_num_params=0, | |
| fsdp_transformer_layer_cls_to_wrap=None, | |
| full_determinism=False, | |
| gradient_accumulation_steps=1, | |
| gradient_checkpointing=True, | |
| gradient_checkpointing_kwargs={'use_reentrant': False}, | |
| greater_is_better=None, | |
| group_by_length=False, | |
| half_precision_backend=auto, | |
| hub_always_push=False, | |
| hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-sft-nemotron-code, | |
| hub_model_revision=main, | |
| hub_private_repo=None, | |
| hub_revision=None, | |
| hub_strategy=HubStrategy.EVERY_SAVE, | |
| hub_token=<HUB_TOKEN>, | |
| ignore_data_skip=False, | |
| include_for_metrics=[], | |
| include_inputs_for_metrics=False, | |
| include_num_input_tokens_seen=False, | |
| include_tokens_per_second=False, | |
| jit_mode_eval=False, | |
| label_names=None, | |
| label_smoothing_factor=0.0, | |
| learning_rate=1e-05, | |
| length_column_name=length, | |
| liger_kernel_config=None, | |
| load_best_model_at_end=False, | |
| local_rank=0, | |
| log_level=info, | |
| log_level_replica=warning, | |
| log_on_each_node=True, | |
| logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code/runs/Aug15_08-24-07_ip-172-31-35-111, | |
| logging_first_step=False, | |
| logging_nan_inf_filter=True, | |
| logging_steps=1, | |
| logging_strategy=IntervalStrategy.STEPS, | |
| lr_scheduler_kwargs={'min_lr_rate': 0.1}, | |
| lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, | |
| max_grad_norm=1.0, | |
| max_length=8192, | |
| max_seq_length=None, | |
| max_steps=-1, | |
| metric_for_best_model=None, | |
| model_init_kwargs=None, | |
| mp_parameters=, | |
| neftune_noise_alpha=None, | |
| no_cuda=False, | |
| num_train_epochs=1, | |
| optim=OptimizerNames.ADAMW_TORCH, | |
| optim_args=None, | |
| optim_target_modules=None, | |
| output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code, | |
| overwrite_hub_revision=False, | |
| overwrite_output_dir=True, | |
| packing=True, | |
| pad_to_multiple_of=None, | |
| pad_token=<PAD_TOKEN>, | |
| padding_free=False, | |
| past_index=-1, | |
| per_device_eval_batch_size=16, | |
| per_device_train_batch_size=1, | |
| prediction_loss_only=False, | |
| push_to_hub=True, | |
| push_to_hub_model_id=None, | |
| push_to_hub_organization=None, | |
| push_to_hub_revision=False, | |
| push_to_hub_token=<PUSH_TO_HUB_TOKEN>, | |
| ray_scope=last, | |
| remove_unused_columns=True, | |
| report_to=['wandb'], | |
| restore_callback_states_from_checkpoint=False, | |
| resume_from_checkpoint=None, | |
| run_name=None, | |
| save_on_each_node=False, | |
| save_only_model=False, | |
| save_safetensors=True, | |
| save_steps=500, | |
| save_strategy=SaveStrategy.STEPS, | |
| save_total_limit=1, | |
| seed=1234, | |
| skip_memory_metrics=True, | |
| system_prompt=None, | |
| tf32=None, | |
| torch_compile=False, | |
| torch_compile_backend=None, | |
| torch_compile_mode=None, | |
| torch_empty_cache_steps=None, | |
| torchdynamo=None, | |
| tpu_metrics_debug=False, | |
| tpu_num_cores=None, | |
| use_cpu=False, | |
| use_ipex=False, | |
| use_legacy_prediction_loop=False, | |
| use_liger_kernel=False, | |
| use_mps_device=False, | |
| wandb_entity=None, | |
| wandb_project=None, | |
| wandb_run_group=None, | |
| warmup_ratio=0.1, | |
| warmup_steps=0, | |
| weight_decay=0.0, | |
| ) | |
| 2025-08-15 08:24:09 - INFO - __main__ - Checkpoint detected, resuming training at last_checkpoint='data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code/checkpoint-2500'. | |
| 2025-08-15 08:24:10 - INFO - __main__ - *** Initializing model kwargs *** | |
| 2025-08-15 08:58:30 - INFO - __main__ - *** Train *** | |
| 2025-08-15 08:58:30 - INFO - __main__ - DeepseekV2ForCausalLM( | |
| (model): DeepseekV2Model( | |
| (embed_tokens): Embedding(102400, 2048) | |
| (layers): ModuleList( | |
| (0): DeepseekV2DecoderLayer( | |
| (self_attn): DeepseekV2FlashAttention2( | |
| (q_proj): Linear(in_features=2048, out_features=3072, bias=False) | |
| (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) | |
| (kv_a_layernorm): DeepseekV2RMSNorm() | |
| (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) | |
| (o_proj): Linear(in_features=2048, out_features=2048, bias=False) | |
| (rotary_emb): DeepseekV2YarnRotaryEmbedding() | |
| ) | |
| (mlp): DeepseekV2MLP( | |
| (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) | |
| (up_proj): Linear(in_features=2048, out_features=10944, bias=False) | |
| (down_proj): Linear(in_features=10944, out_features=2048, bias=False) | |
| (act_fn): SiLU() | |
| ) | |
| (input_layernorm): DeepseekV2RMSNorm() | |
| (post_attention_layernorm): DeepseekV2RMSNorm() | |
| ) | |
| (1-26): 26 x DeepseekV2DecoderLayer( | |
| (self_attn): DeepseekV2FlashAttention2( | |
| (q_proj): Linear(in_features=2048, out_features=3072, bias=False) | |
| (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) | |
| (kv_a_layernorm): DeepseekV2RMSNorm() | |
| (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) | |
| (o_proj): Linear(in_features=2048, out_features=2048, bias=False) | |
| (rotary_emb): DeepseekV2YarnRotaryEmbedding() | |
| ) | |
| (mlp): DeepseekV2MoE( | |
| (experts): ModuleList( | |
| (0-63): 64 x DeepseekV2MLP( | |
| (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) | |
| (up_proj): Linear(in_features=2048, out_features=1408, bias=False) | |
| (down_proj): Linear(in_features=1408, out_features=2048, bias=False) | |
| (act_fn): SiLU() | |
| ) | |
| ) | |
| (gate): MoEGate() | |
| (shared_experts): DeepseekV2MLP( | |
| (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) | |
| (up_proj): Linear(in_features=2048, out_features=2816, bias=False) | |
| (down_proj): Linear(in_features=2816, out_features=2048, bias=False) | |
| (act_fn): SiLU() | |
| ) | |
| ) | |
| (input_layernorm): DeepseekV2RMSNorm() | |
| (post_attention_layernorm): DeepseekV2RMSNorm() | |
| ) | |
| ) | |
| (norm): DeepseekV2RMSNorm() | |
| ) | |
| (lm_head): Linear(in_features=2048, out_features=102400, bias=False) | |
| ) | |
| 2025-08-15 09:01:57 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False) | |
| 2025-08-15 09:01:57 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='autoprogrammer/nemotron_code_lf_filtered', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False) | |
| 2025-08-15 09:01:57 - INFO - __main__ - Training parameters SFTConfig( | |
| _n_gpu=1, | |
| accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False}, | |
| activation_offloading=False, | |
| adafactor=False, | |
| adam_beta1=0.9, | |
| adam_beta2=0.999, | |
| adam_epsilon=1e-08, | |
| auto_find_batch_size=False, | |
| average_tokens_across_devices=True, | |
| batch_eval_metrics=False, | |
| benchmarks=[], | |
| bf16=True, | |
| bf16_full_eval=False, | |
| callbacks=[], | |
| chat_template=None, | |
| completion_only_loss=None, | |
| data_seed=None, | |
| dataloader_drop_last=False, | |
| dataloader_num_workers=0, | |
| dataloader_persistent_workers=False, | |
| dataloader_pin_memory=True, | |
| dataloader_prefetch_factor=None, | |
| dataset_kwargs=None, | |
| dataset_num_proc=None, | |
| dataset_text_field=text, | |
| ddp_backend=None, | |
| ddp_broadcast_buffers=None, | |
| ddp_bucket_cap_mb=None, | |
| ddp_find_unused_parameters=None, | |
| ddp_timeout=1800000000, | |
| debug=[], | |
| deepspeed=None, | |
| disable_tqdm=False, | |
| do_eval=True, | |
| do_predict=False, | |
| do_train=False, | |
| eos_token=<EOS_TOKEN>, | |
| eval_accumulation_steps=None, | |
| eval_delay=0, | |
| eval_do_concat_batches=True, | |
| eval_on_start=False, | |
| eval_packing=None, | |
| eval_steps=None, | |
| eval_strategy=IntervalStrategy.NO, | |
| eval_use_gather_object=False, | |
| fp16=False, | |
| fp16_backend=auto, | |
| fp16_full_eval=False, | |
| fp16_opt_level=O1, | |
| fsdp=[], | |
| fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, | |
| fsdp_min_num_params=0, | |
| fsdp_transformer_layer_cls_to_wrap=None, | |
| full_determinism=False, | |
| gradient_accumulation_steps=1, | |
| gradient_checkpointing=True, | |
| gradient_checkpointing_kwargs={'use_reentrant': False}, | |
| greater_is_better=None, | |
| group_by_length=False, | |
| half_precision_backend=auto, | |
| hub_always_push=False, | |
| hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-sft-nemotron-code, | |
| hub_model_revision=main, | |
| hub_private_repo=None, | |
| hub_revision=None, | |
| hub_strategy=HubStrategy.EVERY_SAVE, | |
| hub_token=<HUB_TOKEN>, | |
| ignore_data_skip=False, | |
| include_for_metrics=[], | |
| include_inputs_for_metrics=False, | |
| include_num_input_tokens_seen=False, | |
| include_tokens_per_second=False, | |
| jit_mode_eval=False, | |
| label_names=None, | |
| label_smoothing_factor=0.0, | |
| learning_rate=1e-05, | |
| length_column_name=length, | |
| liger_kernel_config=None, | |
| load_best_model_at_end=False, | |
| local_rank=0, | |
| log_level=info, | |
| log_level_replica=warning, | |
| log_on_each_node=True, | |
| logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code/runs/Aug15_09-01-55_ip-172-31-35-111, | |
| logging_first_step=False, | |
| logging_nan_inf_filter=True, | |
| logging_steps=1, | |
| logging_strategy=IntervalStrategy.STEPS, | |
| lr_scheduler_kwargs={'min_lr_rate': 0.1}, | |
| lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR, | |
| max_grad_norm=1.0, | |
| max_length=8192, | |
| max_seq_length=None, | |
| max_steps=-1, | |
| metric_for_best_model=None, | |
| model_init_kwargs=None, | |
| mp_parameters=, | |
| neftune_noise_alpha=None, | |
| no_cuda=False, | |
| num_train_epochs=1, | |
| optim=OptimizerNames.ADAMW_TORCH, | |
| optim_args=None, | |
| optim_target_modules=None, | |
| output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code, | |
| overwrite_hub_revision=False, | |
| overwrite_output_dir=True, | |
| packing=True, | |
| pad_to_multiple_of=None, | |
| pad_token=<PAD_TOKEN>, | |
| padding_free=False, | |
| past_index=-1, | |
| per_device_eval_batch_size=16, | |
| per_device_train_batch_size=1, | |
| prediction_loss_only=False, | |
| push_to_hub=True, | |
| push_to_hub_model_id=None, | |
| push_to_hub_organization=None, | |
| push_to_hub_revision=False, | |
| push_to_hub_token=<PUSH_TO_HUB_TOKEN>, | |
| ray_scope=last, | |
| remove_unused_columns=True, | |
| report_to=['wandb'], | |
| restore_callback_states_from_checkpoint=False, | |
| resume_from_checkpoint=/home/ubuntu/efs/hector/data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code/checkpoint-2500, | |
| run_name=None, | |
| save_on_each_node=False, | |
| save_only_model=False, | |
| save_safetensors=True, | |
| save_steps=500, | |
| save_strategy=SaveStrategy.STEPS, | |
| save_total_limit=1, | |
| seed=1234, | |
| skip_memory_metrics=True, | |
| system_prompt=None, | |
| tf32=None, | |
| torch_compile=False, | |
| torch_compile_backend=None, | |
| torch_compile_mode=None, | |
| torch_empty_cache_steps=None, | |
| torchdynamo=None, | |
| tpu_metrics_debug=False, | |
| tpu_num_cores=None, | |
| use_cpu=False, | |
| use_ipex=False, | |
| use_legacy_prediction_loop=False, | |
| use_liger_kernel=False, | |
| use_mps_device=False, | |
| wandb_entity=None, | |
| wandb_project=None, | |
| wandb_run_group=None, | |
| warmup_ratio=0.1, | |
| warmup_steps=0, | |
| weight_decay=0.0, | |
| ) | |
| 2025-08-15 09:01:58 - INFO - __main__ - *** Initializing model kwargs *** | |
| 2025-08-15 09:02:23 - INFO - __main__ - *** Train *** | |
| 2025-08-15 09:02:23 - INFO - __main__ - DeepseekV2ForCausalLM( | |
| (model): DeepseekV2Model( | |
| (embed_tokens): Embedding(102400, 2048) | |
| (layers): ModuleList( | |
| (0): DeepseekV2DecoderLayer( | |
| (self_attn): DeepseekV2FlashAttention2( | |
| (q_proj): Linear(in_features=2048, out_features=3072, bias=False) | |
| (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) | |
| (kv_a_layernorm): DeepseekV2RMSNorm() | |
| (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) | |
| (o_proj): Linear(in_features=2048, out_features=2048, bias=False) | |
| (rotary_emb): DeepseekV2YarnRotaryEmbedding() | |
| ) | |
| (mlp): DeepseekV2MLP( | |
| (gate_proj): Linear(in_features=2048, out_features=10944, bias=False) | |
| (up_proj): Linear(in_features=2048, out_features=10944, bias=False) | |
| (down_proj): Linear(in_features=10944, out_features=2048, bias=False) | |
| (act_fn): SiLU() | |
| ) | |
| (input_layernorm): DeepseekV2RMSNorm() | |
| (post_attention_layernorm): DeepseekV2RMSNorm() | |
| ) | |
| (1-26): 26 x DeepseekV2DecoderLayer( | |
| (self_attn): DeepseekV2FlashAttention2( | |
| (q_proj): Linear(in_features=2048, out_features=3072, bias=False) | |
| (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False) | |
| (kv_a_layernorm): DeepseekV2RMSNorm() | |
| (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False) | |
| (o_proj): Linear(in_features=2048, out_features=2048, bias=False) | |
| (rotary_emb): DeepseekV2YarnRotaryEmbedding() | |
| ) | |
| (mlp): DeepseekV2MoE( | |
| (experts): ModuleList( | |
| (0-63): 64 x DeepseekV2MLP( | |
| (gate_proj): Linear(in_features=2048, out_features=1408, bias=False) | |
| (up_proj): Linear(in_features=2048, out_features=1408, bias=False) | |
| (down_proj): Linear(in_features=1408, out_features=2048, bias=False) | |
| (act_fn): SiLU() | |
| ) | |
| ) | |
| (gate): MoEGate() | |
| (shared_experts): DeepseekV2MLP( | |
| (gate_proj): Linear(in_features=2048, out_features=2816, bias=False) | |
| (up_proj): Linear(in_features=2048, out_features=2816, bias=False) | |
| (down_proj): Linear(in_features=2816, out_features=2048, bias=False) | |
| (act_fn): SiLU() | |
| ) | |
| ) | |
| (input_layernorm): DeepseekV2RMSNorm() | |
| (post_attention_layernorm): DeepseekV2RMSNorm() | |
| ) | |
| ) | |
| (norm): DeepseekV2RMSNorm() | |
| ) | |
| (lm_head): Linear(in_features=2048, out_features=102400, bias=False) | |
| ) | |
| 2025-08-16 12:55:25 - INFO - __main__ - *** Save model *** | |
| 2025-08-16 13:04:02 - INFO - __main__ - Model saved to data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code | |
| 2025-08-16 13:04:02 - INFO - __main__ - Pushing to hub... | |