|
|=>> 12/03 [04:30:54] - mistral - INFO :: Starting Run: shuffle_control_de_DE_randinit_seed53... |
|
|=>> 12/03 [04:30:54] - mistral - INFO :: Setting Random Seed to 53! |
|
|=>> 12/03 [04:30:54] - mistral - INFO :: Building Tokenize and Initializing `gpt2-small` via AutoModel/AutoConfig... |
|
|=>> 12/03 [04:30:54] - mistral - INFO :: Using Configs For Model From: /local/xiulyang/mission-impossible-language-models/mistral/conf/models/gpt2-small-40000.json ... |
|
|=>> 12/03 [04:30:54] - mistral.models.auto - INFO :: Building Hugging Face GPT2Config from provided configs: {'activation_function': 'gelu_new', 'architectures': ['GPT2LMHeadModel'], 'attn_pdrop': 0.1, 'embd_pdrop': 0.1, 'eos_token_id': 0, 'bos_token_id': 0, 'initializer_range': 0.02, 'layer_norm_epsilon': 1e-05, 'model_type': 'gpt2', 'n_ctx': 1024, 'n_embd': 768, 'n_head': 12, 'n_inner': None, 'n_layer': 12, 'n_positions': 1024, 'reorder_and_upcast_attn': True, 'resid_pdrop': 0.1, 'scale_attn_by_inverse_layer_idx': True, 'scale_attn_weights': True, 'summary_activation': None, 'summary_first_dropout': 0.2, 'summary_proj_to_labels': True, 'summary_type': 'cls_index', 'summary_use_proj': True, 'task_specific_params': {'text-generation': {'do_sample': True, 'max_length': 1024}}, 'torch_dtype': 'float32', 'transformers_version': '4.35.2', 'use_cache': False, 'vocab_size': 40000} ... |
|
|=>> 12/03 [04:30:54] - mistral.models.auto - INFO :: Fetching Hugging Face [Fast] AutoTokenizer for Model: `gpt2`... |
|
|=>> 12/03 [04:30:54] - mistral.models.auto - INFO :: Using a Pretokenized Dataset |
|
|=>> 12/03 [04:30:54] - mistral.models.auto - INFO :: Initializing Custom GPT-2 Model from Configuration: `gpt2`... |
|
|=>> 12/03 [04:30:58] - mistral - INFO :: Setting Training Arguments from Quinfig... |
|
|=>> 12/03 [04:30:58] - mistral.args.training - INFO :: Setting Gradient Accumulation Steps = `64` [BSZ: 512 World Size: 1 Device BSZ: 8] |
|
|=>> 12/03 [04:30:58] - mistral - INFO :: Downloading and Preprocessing Dataset `/local/xiulyang/mission-impossible-language-models/training/babylm_dataset.py`... |
|
|=>> 12/03 [04:30:59] - datasets_modules.datasets.babylm_dataset.a557ecebcec5aabd33660301ba889db2028afe94218a3c44654a00198e95ea8b.babylm_dataset - INFO :: Generating examples from = /local/xiulyang/mission-impossible-language-models/data/multilingual/multilingual_data_perturbed/shuffle_control_de/train |
|
|=>> 12/03 [04:30:59] - datasets_modules.datasets.babylm_dataset.a557ecebcec5aabd33660301ba889db2028afe94218a3c44654a00198e95ea8b.babylm_dataset - INFO :: Total sentences: 1069380 |
|
|=>> 12/03 [04:30:59] - datasets_modules.datasets.babylm_dataset.a557ecebcec5aabd33660301ba889db2028afe94218a3c44654a00198e95ea8b.babylm_dataset - INFO :: Loading pre-tokenized data |
|
|=>> 12/03 [04:31:04] - datasets_modules.datasets.babylm_dataset.a557ecebcec5aabd33660301ba889db2028afe94218a3c44654a00198e95ea8b.babylm_dataset - INFO :: Concatenating tokenized data using EOS token |
|
|=>> 12/03 [04:31:05] - datasets_modules.datasets.babylm_dataset.a557ecebcec5aabd33660301ba889db2028afe94218a3c44654a00198e95ea8b.babylm_dataset - INFO :: Chunking tokens into sublists of 1024 |
|
|=>> 12/03 [04:31:06] - datasets_modules.datasets.babylm_dataset.a557ecebcec5aabd33660301ba889db2028afe94218a3c44654a00198e95ea8b.babylm_dataset - INFO :: Writing dataset as space-separated sequences of tokens |
|
|=>> 12/03 [04:31:09] - datasets_modules.datasets.babylm_dataset.a557ecebcec5aabd33660301ba889db2028afe94218a3c44654a00198e95ea8b.babylm_dataset - INFO :: Generating examples from = /local/xiulyang/mission-impossible-language-models/data/multilingual/multilingual_data_perturbed/shuffle_control_de/dev |
|
|=>> 12/03 [04:31:09] - datasets_modules.datasets.babylm_dataset.a557ecebcec5aabd33660301ba889db2028afe94218a3c44654a00198e95ea8b.babylm_dataset - INFO :: Total sentences: 5325 |
|
|=>> 12/03 [04:31:09] - datasets_modules.datasets.babylm_dataset.a557ecebcec5aabd33660301ba889db2028afe94218a3c44654a00198e95ea8b.babylm_dataset - INFO :: Loading pre-tokenized data |
|
|=>> 12/03 [04:31:09] - datasets_modules.datasets.babylm_dataset.a557ecebcec5aabd33660301ba889db2028afe94218a3c44654a00198e95ea8b.babylm_dataset - INFO :: Concatenating tokenized data using EOS token |
|
|=>> 12/03 [04:31:09] - datasets_modules.datasets.babylm_dataset.a557ecebcec5aabd33660301ba889db2028afe94218a3c44654a00198e95ea8b.babylm_dataset - INFO :: Chunking tokens into sublists of 1024 |
|
|=>> 12/03 [04:31:09] - datasets_modules.datasets.babylm_dataset.a557ecebcec5aabd33660301ba889db2028afe94218a3c44654a00198e95ea8b.babylm_dataset - INFO :: Writing dataset as space-separated sequences of tokens |
|
|=>> 12/03 [04:31:09] - mistral.corpora.auto - INFO :: Building Tokenized Indexed Dataset for {dataset_id}/{dataset_name}... |
|
|=>> 12/03 [04:31:09] - mistral.corpora.auto - INFO :: Building Indexed Dataset for train |
|
|=>> 12/03 [04:31:38] - mistral.corpora.auto - INFO :: Building Indexed Dataset for validation |
|
|=>> 12/03 [04:31:38] - mistral - INFO :: Initializing Model Trainer... |
|
|=>> 12/03 [04:31:38] - mistral - INFO :: Training Arguments: TrainingArguments( |
|
_n_gpu=1, |
|
adafactor=False, |
|
adam_beta1=0.9, |
|
adam_beta2=0.999, |
|
adam_epsilon=1e-08, |
|
bf16=False, |
|
bf16_full_eval=False, |
|
data_seed=53, |
|
dataloader_drop_last=False, |
|
dataloader_num_workers=0, |
|
dataloader_pin_memory=True, |
|
ddp_bucket_cap_mb=None, |
|
ddp_find_unused_parameters=None, |
|
debug=[], |
|
deepspeed=None, |
|
disable_tqdm=False, |
|
do_eval=True, |
|
do_predict=False, |
|
do_train=True, |
|
eval_accumulation_steps=None, |
|
eval_delay=0, |
|
eval_steps=1000, |
|
evaluation_strategy=IntervalStrategy.STEPS, |
|
fp16=True, |
|
fp16_backend=auto, |
|
fp16_full_eval=False, |
|
fp16_opt_level=O1, |
|
gradient_accumulation_steps=64, |
|
gradient_checkpointing=False, |
|
greater_is_better=None, |
|
group_by_length=False, |
|
half_precision_backend=auto, |
|
hub_model_id=None, |
|
hub_strategy=HubStrategy.EVERY_SAVE, |
|
hub_token=<HUB_TOKEN>, |
|
ignore_data_skip=False, |
|
label_names=None, |
|
label_smoothing_factor=0.0, |
|
learning_rate=0.0006, |
|
length_column_name=length, |
|
load_best_model_at_end=False, |
|
local_rank=-1, |
|
log_level=-1, |
|
log_level_replica=-1, |
|
log_on_each_node=True, |
|
logging_dir=logs, |
|
logging_first_step=True, |
|
logging_nan_inf_filter=True, |
|
logging_steps=50, |
|
logging_strategy=IntervalStrategy.STEPS, |
|
lr_scheduler_type=SchedulerType.LINEAR, |
|
max_grad_norm=1.0, |
|
max_steps=1200, |
|
metric_for_best_model=None, |
|
mp_parameters=, |
|
no_cuda=False, |
|
num_train_epochs=3.0, |
|
optim=OptimizerNames.ADAMW_HF, |
|
output_dir=//local/xiulyang/babylm_models/shuffle_control_de_DE_randinit/babylm_shuffle_control_de_DE_randinit_seed53/runs/shuffle_control_de_DE_randinit_seed53, |
|
overwrite_output_dir=False, |
|
past_index=-1, |
|
per_device_eval_batch_size=16, |
|
per_device_train_batch_size=8, |
|
prediction_loss_only=True, |
|
push_to_hub=False, |
|
push_to_hub_model_id=None, |
|
push_to_hub_organization=None, |
|
push_to_hub_token=<PUSH_TO_HUB_TOKEN>, |
|
remove_unused_columns=True, |
|
report_to=[], |
|
resume_from_checkpoint=None, |
|
run_name=shuffle_control_de_DE_randinit_seed53, |
|
save_on_each_node=False, |
|
save_steps=1000, |
|
save_strategy=IntervalStrategy.STEPS, |
|
save_total_limit=None, |
|
seed=53, |
|
sharded_ddp=[], |
|
skip_memory_metrics=True, |
|
tf32=None, |
|
tpu_metrics_debug=False, |
|
tpu_num_cores=None, |
|
use_legacy_prediction_loop=False, |
|
warmup_ratio=0.0, |
|
warmup_steps=120, |
|
weight_decay=0.1, |
|
xpu_backend=None, |
|
) |
|
|=>> 12/03 [04:31:38] - mistral.core.callbacks - INFO :: Setting W&B Project: xiulin-yang-compling |
|
|=>> 12/03 [04:31:41] - mistral - INFO :: Training... |
|
|=>> 12/03 [04:31:41] - mistral.core.callbacks - INFO :: Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true" |
|
|=>> 12/03 [13:21:19] - mistral - INFO :: ...and that's all folks! |
|
|=>> 12/03 [13:21:19] - mistral - INFO :: Running final evaluation... |
|
|