model_name_or_path: EleutherAI/gpt-neo-1.3B config_name: None tokenizer_name: None use_slow_tokenizer: False per_device_train_batch_size: 4 per_device_eval_batch_size: 4 learning_rate: 5e-06 weight_decay: 0.1 num_train_epochs: 5 patience: 100 max_train_steps: None gradient_accumulation_steps: 1 lr_scheduler_type: SchedulerType.LINEAR num_warmup_steps: 0 output_dir: gpt_neo_squad seed: None finetune_type: vanilla beta: 0.1 model_type: None few_shot_k: None data_seed: 0 max_seq_length: 384 max_context_length: 600 num_beams: 1 preprocessing_num_workers: 4 overwrite_cache: False no_keep_linebreaks: False push_to_hub: False hub_model_id: None hub_token: None checkpointing_steps: None resume_from_checkpoint: None with_tracking: False local_rank: -1