sharukat commited on
Commit
6aee025
·
verified ·
1 Parent(s): f4db602

Training in progress, epoch 1

Browse files
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:acd31700fbcfe0f208be8af267606bced53f92b97e0873b9b4c54d32e481e2f0
3
  size 502675828
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90be379e4d65a8b5b910b9bc0554fa08239403d5c3152f49aada5887906f518d
3
  size 502675828
runs/Mar06_14-54-50_41759fa8e6ad/events.out.tfevents.1709736891.41759fa8e6ad.34.1 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb34e5e949cbbecf7ee776c6840d21a89041260a5503ab7de62fff8a2e9a8679
3
- size 8556
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:352ef50091000ba8a9a8e84dc422d5251fcac886b12d7f0fcc8a79a65a80b485
3
+ size 8910
runs/Mar06_14-54-50_41759fa8e6ad/events.out.tfevents.1709737141.41759fa8e6ad.34.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:654a853bc5accc90120240a204f661795253918cfe90478867719d978e3e3729
3
+ size 560
runs/Mar06_14-59-58_41759fa8e6ad/events.out.tfevents.1709737199.41759fa8e6ad.34.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e92a20cc2a2ec4a6464065726cf070f4981477d74645a951f55f86afbf60e13
3
+ size 5838
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7feb675298befe016aae2b82429bdf36cd935a19986837f6197814a6333d2d09
3
  size 4856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0334548ea320d060d256556d98f48c6190b373c3072c01cc2834528cf956053
3
  size 4856
wandb/debug-internal.log CHANGED
@@ -275,3 +275,97 @@
275
  2024-03-06 14:58:56,467 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
276
  2024-03-06 14:58:56,785 DEBUG SenderThread:137 [sender.py:send():382] send: stats
277
  2024-03-06 14:58:58,296 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275
  2024-03-06 14:58:56,467 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
276
  2024-03-06 14:58:56,785 DEBUG SenderThread:137 [sender.py:send():382] send: stats
277
  2024-03-06 14:58:58,296 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
278
+ 2024-03-06 14:59:00,247 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
279
+ 2024-03-06 14:59:01,360 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: partial_history
280
+ 2024-03-06 14:59:01,362 DEBUG SenderThread:137 [sender.py:send():382] send: history
281
+ 2024-03-06 14:59:01,362 DEBUG SenderThread:137 [sender.py:send_request():409] send_request: summary_record
282
+ 2024-03-06 14:59:01,364 INFO SenderThread:137 [sender.py:_save_file():1403] saving file wandb-summary.json with policy end
283
+ 2024-03-06 14:59:01,838 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/wandb-summary.json
284
+ 2024-03-06 14:59:02,370 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
285
+ 2024-03-06 14:59:02,839 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/config.yaml
286
+ 2024-03-06 14:59:03,297 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
287
+ 2024-03-06 14:59:03,839 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/output.log
288
+ 2024-03-06 14:59:05,248 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
289
+ 2024-03-06 14:59:07,562 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
290
+ 2024-03-06 14:59:08,298 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
291
+ 2024-03-06 14:59:10,249 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
292
+ 2024-03-06 14:59:12,563 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
293
+ 2024-03-06 14:59:13,299 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
294
+ 2024-03-06 14:59:15,250 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
295
+ 2024-03-06 14:59:17,564 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
296
+ 2024-03-06 14:59:18,300 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
297
+ 2024-03-06 14:59:20,251 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
298
+ 2024-03-06 14:59:22,565 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
299
+ 2024-03-06 14:59:23,301 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
300
+ 2024-03-06 14:59:25,252 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
301
+ 2024-03-06 14:59:26,785 DEBUG SenderThread:137 [sender.py:send():382] send: stats
302
+ 2024-03-06 14:59:27,786 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
303
+ 2024-03-06 14:59:28,302 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
304
+ 2024-03-06 14:59:30,253 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
305
+ 2024-03-06 14:59:32,787 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
306
+ 2024-03-06 14:59:33,304 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
307
+ 2024-03-06 14:59:35,254 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
308
+ 2024-03-06 14:59:37,788 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
309
+ 2024-03-06 14:59:38,305 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
310
+ 2024-03-06 14:59:40,255 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
311
+ 2024-03-06 14:59:42,790 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
312
+ 2024-03-06 14:59:43,306 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
313
+ 2024-03-06 14:59:45,256 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
314
+ 2024-03-06 14:59:47,791 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
315
+ 2024-03-06 14:59:48,307 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
316
+ 2024-03-06 14:59:50,257 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
317
+ 2024-03-06 14:59:52,792 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
318
+ 2024-03-06 14:59:53,308 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
319
+ 2024-03-06 14:59:55,258 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
320
+ 2024-03-06 14:59:56,786 DEBUG SenderThread:137 [sender.py:send():382] send: stats
321
+ 2024-03-06 14:59:58,310 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
322
+ 2024-03-06 14:59:58,661 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
323
+ 2024-03-06 14:59:59,469 DEBUG SenderThread:137 [sender.py:send():382] send: config
324
+ 2024-03-06 14:59:59,470 DEBUG SenderThread:137 [sender.py:send():382] send: metric
325
+ 2024-03-06 14:59:59,471 DEBUG SenderThread:137 [sender.py:send():382] send: metric
326
+ 2024-03-06 14:59:59,471 WARNING SenderThread:137 [sender.py:send_metric():1354] Seen metric with glob (shouldn't happen)
327
+ 2024-03-06 14:59:59,858 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/output.log
328
+ 2024-03-06 15:00:00,259 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
329
+ 2024-03-06 15:00:03,430 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
330
+ 2024-03-06 15:00:04,477 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
331
+ 2024-03-06 15:00:04,860 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/config.yaml
332
+ 2024-03-06 15:00:05,260 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
333
+ 2024-03-06 15:00:08,431 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
334
+ 2024-03-06 15:00:09,637 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
335
+ 2024-03-06 15:00:10,260 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
336
+ 2024-03-06 15:00:13,437 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
337
+ 2024-03-06 15:00:14,638 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
338
+ 2024-03-06 15:00:15,261 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
339
+ 2024-03-06 15:00:18,438 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
340
+ 2024-03-06 15:00:19,639 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
341
+ 2024-03-06 15:00:20,262 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
342
+ 2024-03-06 15:00:23,439 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
343
+ 2024-03-06 15:00:24,640 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
344
+ 2024-03-06 15:00:25,263 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
345
+ 2024-03-06 15:00:26,787 DEBUG SenderThread:137 [sender.py:send():382] send: stats
346
+ 2024-03-06 15:00:27,703 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: partial_history
347
+ 2024-03-06 15:00:27,704 DEBUG SenderThread:137 [sender.py:send():382] send: history
348
+ 2024-03-06 15:00:27,705 DEBUG SenderThread:137 [sender.py:send_request():409] send_request: summary_record
349
+ 2024-03-06 15:00:27,705 INFO SenderThread:137 [sender.py:_save_file():1403] saving file wandb-summary.json with policy end
350
+ 2024-03-06 15:00:27,867 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/wandb-summary.json
351
+ 2024-03-06 15:00:28,440 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
352
+ 2024-03-06 15:00:28,821 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: partial_history
353
+ 2024-03-06 15:00:28,823 DEBUG SenderThread:137 [sender.py:send():382] send: history
354
+ 2024-03-06 15:00:28,823 DEBUG SenderThread:137 [sender.py:send_request():409] send_request: summary_record
355
+ 2024-03-06 15:00:28,824 INFO SenderThread:137 [sender.py:_save_file():1403] saving file wandb-summary.json with policy end
356
+ 2024-03-06 15:00:28,868 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/wandb-summary.json
357
+ 2024-03-06 15:00:29,826 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
358
+ 2024-03-06 15:00:30,263 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
359
+ 2024-03-06 15:00:31,869 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/output.log
360
+ 2024-03-06 15:00:34,305 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
361
+ 2024-03-06 15:00:34,827 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
362
+ 2024-03-06 15:00:35,264 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
363
+ 2024-03-06 15:00:39,306 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
364
+ 2024-03-06 15:00:39,828 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
365
+ 2024-03-06 15:00:40,265 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
366
+ 2024-03-06 15:00:44,308 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
367
+ 2024-03-06 15:00:44,829 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
368
+ 2024-03-06 15:00:45,265 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
369
+ 2024-03-06 15:00:49,309 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
370
+ 2024-03-06 15:00:49,830 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
371
+ 2024-03-06 15:00:50,266 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
wandb/debug.log CHANGED
@@ -26,3 +26,4 @@ config: {}
26
  2024-03-06 14:55:56,790 INFO MainThread:34 [wandb_run.py:_redirect():2186] Redirects installed.
27
  2024-03-06 14:55:56,791 INFO MainThread:34 [wandb_init.py:init():847] run started, returning control to user process
28
  2024-03-06 14:55:56,798 INFO MainThread:34 [wandb_run.py:_config_callback():1343] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'Documentation Ambiguity', 1: 'Documentation Completeness', 2: 'Documentation Replicability', 3: 'Documentation Replication on Other Examples', 4: 'Inadequate Examples', 5: 'Lack of Alternative Solutions/Documentation', 6: 'Requesting (Additional) Documentation/Examples'}, 'label2id': {'Documentation Ambiguity': 0, 'Documentation Completeness': 1, 'Documentation Replicability': 2, 'Documentation Replication on Other Examples': 3, 'Inadequate Examples': 4, 'Lack of Alternative Solutions/Documentation': 5, 'Requesting (Additional) Documentation/Examples': 6}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 0, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'mmukh/SOBertBase', 'transformers_version': '4.38.1', 'model_type': 'megatron-bert', 'tokenizer_type': 'SentencePieceTokenizer', 'vocab_size': 50048, 'hidden_size': 768, 'num_hidden_layers': 12, 'num_attention_heads': 12, 'hidden_act': 'gelu', 'intermediate_size': 3072, 'hidden_dropout_prob': 0.1, 'attention_probs_dropout_prob': 0.1, 'max_position_embeddings': 2048, 'type_vocab_size': 2, 'initializer_range': 0.02, 'layer_norm_eps': 1e-12, 'position_embedding_type': 'absolute', 'use_cache': True, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'epoch', 'prediction_loss_only': False, 'per_device_train_batch_size': 8, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 2e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 5, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Mar06_14-54-50_41759fa8e6ad', 'logging_strategy': 'epoch', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'epoch', 'save_steps': 500, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None}
 
 
26
  2024-03-06 14:55:56,790 INFO MainThread:34 [wandb_run.py:_redirect():2186] Redirects installed.
27
  2024-03-06 14:55:56,791 INFO MainThread:34 [wandb_init.py:init():847] run started, returning control to user process
28
  2024-03-06 14:55:56,798 INFO MainThread:34 [wandb_run.py:_config_callback():1343] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'Documentation Ambiguity', 1: 'Documentation Completeness', 2: 'Documentation Replicability', 3: 'Documentation Replication on Other Examples', 4: 'Inadequate Examples', 5: 'Lack of Alternative Solutions/Documentation', 6: 'Requesting (Additional) Documentation/Examples'}, 'label2id': {'Documentation Ambiguity': 0, 'Documentation Completeness': 1, 'Documentation Replicability': 2, 'Documentation Replication on Other Examples': 3, 'Inadequate Examples': 4, 'Lack of Alternative Solutions/Documentation': 5, 'Requesting (Additional) Documentation/Examples': 6}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 0, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'mmukh/SOBertBase', 'transformers_version': '4.38.1', 'model_type': 'megatron-bert', 'tokenizer_type': 'SentencePieceTokenizer', 'vocab_size': 50048, 'hidden_size': 768, 'num_hidden_layers': 12, 'num_attention_heads': 12, 'hidden_act': 'gelu', 'intermediate_size': 3072, 'hidden_dropout_prob': 0.1, 'attention_probs_dropout_prob': 0.1, 'max_position_embeddings': 2048, 'type_vocab_size': 2, 'initializer_range': 0.02, 'layer_norm_eps': 1e-12, 'position_embedding_type': 'absolute', 'use_cache': True, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'epoch', 'prediction_loss_only': False, 'per_device_train_batch_size': 8, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 2e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 5, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Mar06_14-54-50_41759fa8e6ad', 'logging_strategy': 'epoch', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'epoch', 'save_steps': 500, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None}
29
+ 2024-03-06 14:59:59,464 INFO MainThread:34 [wandb_run.py:_config_callback():1343] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'Documentation Ambiguity', 1: 'Documentation Completeness', 2: 'Documentation Replicability', 3: 'Documentation Replication on Other Examples', 4: 'Inadequate Examples', 5: 'Lack of Alternative Solutions/Documentation', 6: 'Requesting (Additional) Documentation/Examples'}, 'label2id': {'Documentation Ambiguity': 0, 'Documentation Completeness': 1, 'Documentation Replicability': 2, 'Documentation Replication on Other Examples': 3, 'Inadequate Examples': 4, 'Lack of Alternative Solutions/Documentation': 5, 'Requesting (Additional) Documentation/Examples': 6}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 0, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'mmukh/SOBertBase', 'transformers_version': '4.38.1', 'model_type': 'megatron-bert', 'tokenizer_type': 'SentencePieceTokenizer', 'vocab_size': 50048, 'hidden_size': 768, 'num_hidden_layers': 12, 'num_attention_heads': 12, 'hidden_act': 'gelu', 'intermediate_size': 3072, 'hidden_dropout_prob': 0.1, 'attention_probs_dropout_prob': 0.1, 'max_position_embeddings': 2048, 'type_vocab_size': 2, 'initializer_range': 0.02, 'layer_norm_eps': 1e-12, 'position_embedding_type': 'absolute', 'use_cache': True, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'epoch', 'prediction_loss_only': False, 'per_device_train_batch_size': 8, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 1e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 10, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Mar06_14-59-58_41759fa8e6ad', 'logging_strategy': 'epoch', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'epoch', 'save_steps': 500, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None}
wandb/run-20240306_145424-trm7fvg4/logs/debug-internal.log CHANGED
@@ -326,3 +326,97 @@ wandb.errors.AuthenticationError: The API key you provided is either invalid or
326
  2024-03-06 14:58:56,467 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
327
  2024-03-06 14:58:56,785 DEBUG SenderThread:137 [sender.py:send():382] send: stats
328
  2024-03-06 14:58:58,296 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
  2024-03-06 14:58:56,467 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
327
  2024-03-06 14:58:56,785 DEBUG SenderThread:137 [sender.py:send():382] send: stats
328
  2024-03-06 14:58:58,296 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
329
+ 2024-03-06 14:59:00,247 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
330
+ 2024-03-06 14:59:01,360 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: partial_history
331
+ 2024-03-06 14:59:01,362 DEBUG SenderThread:137 [sender.py:send():382] send: history
332
+ 2024-03-06 14:59:01,362 DEBUG SenderThread:137 [sender.py:send_request():409] send_request: summary_record
333
+ 2024-03-06 14:59:01,364 INFO SenderThread:137 [sender.py:_save_file():1403] saving file wandb-summary.json with policy end
334
+ 2024-03-06 14:59:01,838 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/wandb-summary.json
335
+ 2024-03-06 14:59:02,370 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
336
+ 2024-03-06 14:59:02,839 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/config.yaml
337
+ 2024-03-06 14:59:03,297 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
338
+ 2024-03-06 14:59:03,839 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/output.log
339
+ 2024-03-06 14:59:05,248 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
340
+ 2024-03-06 14:59:07,562 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
341
+ 2024-03-06 14:59:08,298 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
342
+ 2024-03-06 14:59:10,249 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
343
+ 2024-03-06 14:59:12,563 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
344
+ 2024-03-06 14:59:13,299 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
345
+ 2024-03-06 14:59:15,250 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
346
+ 2024-03-06 14:59:17,564 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
347
+ 2024-03-06 14:59:18,300 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
348
+ 2024-03-06 14:59:20,251 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
349
+ 2024-03-06 14:59:22,565 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
350
+ 2024-03-06 14:59:23,301 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
351
+ 2024-03-06 14:59:25,252 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
352
+ 2024-03-06 14:59:26,785 DEBUG SenderThread:137 [sender.py:send():382] send: stats
353
+ 2024-03-06 14:59:27,786 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
354
+ 2024-03-06 14:59:28,302 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
355
+ 2024-03-06 14:59:30,253 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
356
+ 2024-03-06 14:59:32,787 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
357
+ 2024-03-06 14:59:33,304 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
358
+ 2024-03-06 14:59:35,254 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
359
+ 2024-03-06 14:59:37,788 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
360
+ 2024-03-06 14:59:38,305 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
361
+ 2024-03-06 14:59:40,255 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
362
+ 2024-03-06 14:59:42,790 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
363
+ 2024-03-06 14:59:43,306 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
364
+ 2024-03-06 14:59:45,256 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
365
+ 2024-03-06 14:59:47,791 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
366
+ 2024-03-06 14:59:48,307 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
367
+ 2024-03-06 14:59:50,257 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
368
+ 2024-03-06 14:59:52,792 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
369
+ 2024-03-06 14:59:53,308 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
370
+ 2024-03-06 14:59:55,258 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
371
+ 2024-03-06 14:59:56,786 DEBUG SenderThread:137 [sender.py:send():382] send: stats
372
+ 2024-03-06 14:59:58,310 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
373
+ 2024-03-06 14:59:58,661 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
374
+ 2024-03-06 14:59:59,469 DEBUG SenderThread:137 [sender.py:send():382] send: config
375
+ 2024-03-06 14:59:59,470 DEBUG SenderThread:137 [sender.py:send():382] send: metric
376
+ 2024-03-06 14:59:59,471 DEBUG SenderThread:137 [sender.py:send():382] send: metric
377
+ 2024-03-06 14:59:59,471 WARNING SenderThread:137 [sender.py:send_metric():1354] Seen metric with glob (shouldn't happen)
378
+ 2024-03-06 14:59:59,858 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/output.log
379
+ 2024-03-06 15:00:00,259 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
380
+ 2024-03-06 15:00:03,430 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
381
+ 2024-03-06 15:00:04,477 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
382
+ 2024-03-06 15:00:04,860 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/config.yaml
383
+ 2024-03-06 15:00:05,260 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
384
+ 2024-03-06 15:00:08,431 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
385
+ 2024-03-06 15:00:09,637 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
386
+ 2024-03-06 15:00:10,260 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
387
+ 2024-03-06 15:00:13,437 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
388
+ 2024-03-06 15:00:14,638 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
389
+ 2024-03-06 15:00:15,261 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
390
+ 2024-03-06 15:00:18,438 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
391
+ 2024-03-06 15:00:19,639 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
392
+ 2024-03-06 15:00:20,262 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
393
+ 2024-03-06 15:00:23,439 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
394
+ 2024-03-06 15:00:24,640 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
395
+ 2024-03-06 15:00:25,263 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
396
+ 2024-03-06 15:00:26,787 DEBUG SenderThread:137 [sender.py:send():382] send: stats
397
+ 2024-03-06 15:00:27,703 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: partial_history
398
+ 2024-03-06 15:00:27,704 DEBUG SenderThread:137 [sender.py:send():382] send: history
399
+ 2024-03-06 15:00:27,705 DEBUG SenderThread:137 [sender.py:send_request():409] send_request: summary_record
400
+ 2024-03-06 15:00:27,705 INFO SenderThread:137 [sender.py:_save_file():1403] saving file wandb-summary.json with policy end
401
+ 2024-03-06 15:00:27,867 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/wandb-summary.json
402
+ 2024-03-06 15:00:28,440 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
403
+ 2024-03-06 15:00:28,821 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: partial_history
404
+ 2024-03-06 15:00:28,823 DEBUG SenderThread:137 [sender.py:send():382] send: history
405
+ 2024-03-06 15:00:28,823 DEBUG SenderThread:137 [sender.py:send_request():409] send_request: summary_record
406
+ 2024-03-06 15:00:28,824 INFO SenderThread:137 [sender.py:_save_file():1403] saving file wandb-summary.json with policy end
407
+ 2024-03-06 15:00:28,868 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/wandb-summary.json
408
+ 2024-03-06 15:00:29,826 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
409
+ 2024-03-06 15:00:30,263 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
410
+ 2024-03-06 15:00:31,869 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/output.log
411
+ 2024-03-06 15:00:34,305 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
412
+ 2024-03-06 15:00:34,827 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
413
+ 2024-03-06 15:00:35,264 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
414
+ 2024-03-06 15:00:39,306 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
415
+ 2024-03-06 15:00:39,828 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
416
+ 2024-03-06 15:00:40,265 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
417
+ 2024-03-06 15:00:44,308 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
418
+ 2024-03-06 15:00:44,829 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
419
+ 2024-03-06 15:00:45,265 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
420
+ 2024-03-06 15:00:49,309 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
421
+ 2024-03-06 15:00:49,830 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
422
+ 2024-03-06 15:00:50,266 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
wandb/run-20240306_145424-trm7fvg4/logs/debug.log CHANGED
@@ -66,3 +66,4 @@ config: {}
66
  2024-03-06 14:55:56,790 INFO MainThread:34 [wandb_run.py:_redirect():2186] Redirects installed.
67
  2024-03-06 14:55:56,791 INFO MainThread:34 [wandb_init.py:init():847] run started, returning control to user process
68
  2024-03-06 14:55:56,798 INFO MainThread:34 [wandb_run.py:_config_callback():1343] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'Documentation Ambiguity', 1: 'Documentation Completeness', 2: 'Documentation Replicability', 3: 'Documentation Replication on Other Examples', 4: 'Inadequate Examples', 5: 'Lack of Alternative Solutions/Documentation', 6: 'Requesting (Additional) Documentation/Examples'}, 'label2id': {'Documentation Ambiguity': 0, 'Documentation Completeness': 1, 'Documentation Replicability': 2, 'Documentation Replication on Other Examples': 3, 'Inadequate Examples': 4, 'Lack of Alternative Solutions/Documentation': 5, 'Requesting (Additional) Documentation/Examples': 6}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 0, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'mmukh/SOBertBase', 'transformers_version': '4.38.1', 'model_type': 'megatron-bert', 'tokenizer_type': 'SentencePieceTokenizer', 'vocab_size': 50048, 'hidden_size': 768, 'num_hidden_layers': 12, 'num_attention_heads': 12, 'hidden_act': 'gelu', 'intermediate_size': 3072, 'hidden_dropout_prob': 0.1, 'attention_probs_dropout_prob': 0.1, 'max_position_embeddings': 2048, 'type_vocab_size': 2, 'initializer_range': 0.02, 'layer_norm_eps': 1e-12, 'position_embedding_type': 'absolute', 'use_cache': True, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'epoch', 'prediction_loss_only': False, 'per_device_train_batch_size': 8, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 2e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 5, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Mar06_14-54-50_41759fa8e6ad', 'logging_strategy': 'epoch', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'epoch', 'save_steps': 500, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None}
 
 
66
  2024-03-06 14:55:56,790 INFO MainThread:34 [wandb_run.py:_redirect():2186] Redirects installed.
67
  2024-03-06 14:55:56,791 INFO MainThread:34 [wandb_init.py:init():847] run started, returning control to user process
68
  2024-03-06 14:55:56,798 INFO MainThread:34 [wandb_run.py:_config_callback():1343] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'Documentation Ambiguity', 1: 'Documentation Completeness', 2: 'Documentation Replicability', 3: 'Documentation Replication on Other Examples', 4: 'Inadequate Examples', 5: 'Lack of Alternative Solutions/Documentation', 6: 'Requesting (Additional) Documentation/Examples'}, 'label2id': {'Documentation Ambiguity': 0, 'Documentation Completeness': 1, 'Documentation Replicability': 2, 'Documentation Replication on Other Examples': 3, 'Inadequate Examples': 4, 'Lack of Alternative Solutions/Documentation': 5, 'Requesting (Additional) Documentation/Examples': 6}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 0, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'mmukh/SOBertBase', 'transformers_version': '4.38.1', 'model_type': 'megatron-bert', 'tokenizer_type': 'SentencePieceTokenizer', 'vocab_size': 50048, 'hidden_size': 768, 'num_hidden_layers': 12, 'num_attention_heads': 12, 'hidden_act': 'gelu', 'intermediate_size': 3072, 'hidden_dropout_prob': 0.1, 'attention_probs_dropout_prob': 0.1, 'max_position_embeddings': 2048, 'type_vocab_size': 2, 'initializer_range': 0.02, 'layer_norm_eps': 1e-12, 'position_embedding_type': 'absolute', 'use_cache': True, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'epoch', 'prediction_loss_only': False, 'per_device_train_batch_size': 8, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 2e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 5, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Mar06_14-54-50_41759fa8e6ad', 'logging_strategy': 'epoch', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'epoch', 'save_steps': 500, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None}
69
+ 2024-03-06 14:59:59,464 INFO MainThread:34 [wandb_run.py:_config_callback():1343] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'Documentation Ambiguity', 1: 'Documentation Completeness', 2: 'Documentation Replicability', 3: 'Documentation Replication on Other Examples', 4: 'Inadequate Examples', 5: 'Lack of Alternative Solutions/Documentation', 6: 'Requesting (Additional) Documentation/Examples'}, 'label2id': {'Documentation Ambiguity': 0, 'Documentation Completeness': 1, 'Documentation Replicability': 2, 'Documentation Replication on Other Examples': 3, 'Inadequate Examples': 4, 'Lack of Alternative Solutions/Documentation': 5, 'Requesting (Additional) Documentation/Examples': 6}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 0, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'mmukh/SOBertBase', 'transformers_version': '4.38.1', 'model_type': 'megatron-bert', 'tokenizer_type': 'SentencePieceTokenizer', 'vocab_size': 50048, 'hidden_size': 768, 'num_hidden_layers': 12, 'num_attention_heads': 12, 'hidden_act': 'gelu', 'intermediate_size': 3072, 'hidden_dropout_prob': 0.1, 'attention_probs_dropout_prob': 0.1, 'max_position_embeddings': 2048, 'type_vocab_size': 2, 'initializer_range': 0.02, 'layer_norm_eps': 1e-12, 'position_embedding_type': 'absolute', 'use_cache': True, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'epoch', 'prediction_loss_only': False, 'per_device_train_batch_size': 8, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 1e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 10, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Mar06_14-59-58_41759fa8e6ad', 'logging_strategy': 'epoch', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'epoch', 'save_steps': 500, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None}
wandb/run-20240306_145455-h1uv5tyi/files/config.yaml CHANGED
@@ -102,6 +102,26 @@ _wandb:
102
  5: 1
103
  6:
104
  - 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  return_dict:
106
  desc: null
107
  value: true
@@ -373,7 +393,7 @@ eval_delay:
373
  value: 0
374
  learning_rate:
375
  desc: null
376
- value: 2.0e-05
377
  weight_decay:
378
  desc: null
379
  value: 0.0
@@ -391,7 +411,7 @@ max_grad_norm:
391
  value: 1.0
392
  num_train_epochs:
393
  desc: null
394
- value: 5
395
  max_steps:
396
  desc: null
397
  value: -1
@@ -418,7 +438,7 @@ log_on_each_node:
418
  value: true
419
  logging_dir:
420
  desc: null
421
- value: /kaggle/working/runs/Mar06_14-54-50_41759fa8e6ad
422
  logging_strategy:
423
  desc: null
424
  value: epoch
 
102
  5: 1
103
  6:
104
  - 1
105
+ - 1: train/train_runtime
106
+ 5: 1
107
+ 6:
108
+ - 1
109
+ - 1: train/train_samples_per_second
110
+ 5: 1
111
+ 6:
112
+ - 1
113
+ - 1: train/train_steps_per_second
114
+ 5: 1
115
+ 6:
116
+ - 1
117
+ - 1: train/total_flos
118
+ 5: 1
119
+ 6:
120
+ - 1
121
+ - 1: train/train_loss
122
+ 5: 1
123
+ 6:
124
+ - 1
125
  return_dict:
126
  desc: null
127
  value: true
 
393
  value: 0
394
  learning_rate:
395
  desc: null
396
+ value: 1.0e-05
397
  weight_decay:
398
  desc: null
399
  value: 0.0
 
411
  value: 1.0
412
  num_train_epochs:
413
  desc: null
414
+ value: 10
415
  max_steps:
416
  desc: null
417
  value: -1
 
438
  value: true
439
  logging_dir:
440
  desc: null
441
+ value: /kaggle/working/runs/Mar06_14-59-58_41759fa8e6ad
442
  logging_strategy:
443
  desc: null
444
  value: epoch
wandb/run-20240306_145455-h1uv5tyi/files/output.log CHANGED
@@ -7,3 +7,10 @@
7
  /opt/conda/lib/python3.10/site-packages/sklearn/metrics/_classification.py:1344: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
8
  _warn_prf(average, modifier, msg_start, len(result))
9
  /opt/conda/lib/python3.10/site-packages/sklearn/metrics/_classification.py:1344: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
 
 
 
 
 
 
 
 
7
  /opt/conda/lib/python3.10/site-packages/sklearn/metrics/_classification.py:1344: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
8
  _warn_prf(average, modifier, msg_start, len(result))
9
  /opt/conda/lib/python3.10/site-packages/sklearn/metrics/_classification.py:1344: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
10
+ _warn_prf(average, modifier, msg_start, len(result))
11
+ /opt/conda/lib/python3.10/site-packages/sklearn/metrics/_classification.py:1344: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
12
+ _warn_prf(average, modifier, msg_start, len(result))
13
+ Some weights of MegatronBertForSequenceClassification were not initialized from the model checkpoint at mmukh/SOBertBase and are newly initialized: ['bert.embeddings.token_type_embeddings.weight', 'bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'classifier.bias', 'classifier.weight']
14
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
15
+ /opt/conda/lib/python3.10/site-packages/sklearn/metrics/_classification.py:1344: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
16
+ _warn_prf(average, modifier, msg_start, len(result))
wandb/run-20240306_145455-h1uv5tyi/files/wandb-summary.json CHANGED
@@ -1 +1 @@
1
- {"train/loss": 1.6073, "train/grad_norm": 12.977789878845215, "train/learning_rate": 1.2903225806451614e-07, "train/epoch": 5.0, "train/global_step": 310, "_timestamp": 1709737128.459694, "_runtime": 232.7409369945526, "_step": 10, "eval/loss": 1.768215298652649, "eval/accuracy": 0.32727272727272727, "eval/precision": 0.17993920972644376, "eval/recall": 0.32727272727272727, "eval/f1": 0.21342612730172542, "eval/runtime": 1.1069, "eval/samples_per_second": 49.688, "eval/steps_per_second": 6.324, "train/train_runtime": 237.4526, "train/train_samples_per_second": 10.339, "train/train_steps_per_second": 1.306, "train/total_flos": 645966638976000.0, "train/train_loss": 1.7031736066264491}
 
1
+ {"train/loss": 1.822, "train/grad_norm": 8.18305492401123, "train/learning_rate": 9.016129032258065e-06, "train/epoch": 1.0, "train/global_step": 62, "_timestamp": 1709737228.8204873, "_runtime": 333.1017303466797, "_step": 13, "eval/loss": 1.7698227167129517, "eval/accuracy": 0.34545454545454546, "eval/precision": 0.11933884297520661, "eval/recall": 0.34545454545454546, "eval/f1": 0.17739557739557738, "eval/runtime": 1.1111, "eval/samples_per_second": 49.499, "eval/steps_per_second": 6.3, "train/train_runtime": 237.4526, "train/train_samples_per_second": 10.339, "train/train_steps_per_second": 1.306, "train/total_flos": 645966638976000.0, "train/train_loss": 1.7031736066264491}
wandb/run-20240306_145455-h1uv5tyi/logs/debug-internal.log CHANGED
@@ -275,3 +275,97 @@
275
  2024-03-06 14:58:56,467 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
276
  2024-03-06 14:58:56,785 DEBUG SenderThread:137 [sender.py:send():382] send: stats
277
  2024-03-06 14:58:58,296 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275
  2024-03-06 14:58:56,467 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
276
  2024-03-06 14:58:56,785 DEBUG SenderThread:137 [sender.py:send():382] send: stats
277
  2024-03-06 14:58:58,296 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
278
+ 2024-03-06 14:59:00,247 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
279
+ 2024-03-06 14:59:01,360 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: partial_history
280
+ 2024-03-06 14:59:01,362 DEBUG SenderThread:137 [sender.py:send():382] send: history
281
+ 2024-03-06 14:59:01,362 DEBUG SenderThread:137 [sender.py:send_request():409] send_request: summary_record
282
+ 2024-03-06 14:59:01,364 INFO SenderThread:137 [sender.py:_save_file():1403] saving file wandb-summary.json with policy end
283
+ 2024-03-06 14:59:01,838 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/wandb-summary.json
284
+ 2024-03-06 14:59:02,370 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
285
+ 2024-03-06 14:59:02,839 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/config.yaml
286
+ 2024-03-06 14:59:03,297 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
287
+ 2024-03-06 14:59:03,839 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/output.log
288
+ 2024-03-06 14:59:05,248 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
289
+ 2024-03-06 14:59:07,562 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
290
+ 2024-03-06 14:59:08,298 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
291
+ 2024-03-06 14:59:10,249 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
292
+ 2024-03-06 14:59:12,563 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
293
+ 2024-03-06 14:59:13,299 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
294
+ 2024-03-06 14:59:15,250 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
295
+ 2024-03-06 14:59:17,564 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
296
+ 2024-03-06 14:59:18,300 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
297
+ 2024-03-06 14:59:20,251 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
298
+ 2024-03-06 14:59:22,565 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
299
+ 2024-03-06 14:59:23,301 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
300
+ 2024-03-06 14:59:25,252 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
301
+ 2024-03-06 14:59:26,785 DEBUG SenderThread:137 [sender.py:send():382] send: stats
302
+ 2024-03-06 14:59:27,786 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
303
+ 2024-03-06 14:59:28,302 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
304
+ 2024-03-06 14:59:30,253 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
305
+ 2024-03-06 14:59:32,787 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
306
+ 2024-03-06 14:59:33,304 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
307
+ 2024-03-06 14:59:35,254 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
308
+ 2024-03-06 14:59:37,788 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
309
+ 2024-03-06 14:59:38,305 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
310
+ 2024-03-06 14:59:40,255 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
311
+ 2024-03-06 14:59:42,790 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
312
+ 2024-03-06 14:59:43,306 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
313
+ 2024-03-06 14:59:45,256 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
314
+ 2024-03-06 14:59:47,791 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
315
+ 2024-03-06 14:59:48,307 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
316
+ 2024-03-06 14:59:50,257 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
317
+ 2024-03-06 14:59:52,792 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
318
+ 2024-03-06 14:59:53,308 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
319
+ 2024-03-06 14:59:55,258 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
320
+ 2024-03-06 14:59:56,786 DEBUG SenderThread:137 [sender.py:send():382] send: stats
321
+ 2024-03-06 14:59:58,310 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
322
+ 2024-03-06 14:59:58,661 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
323
+ 2024-03-06 14:59:59,469 DEBUG SenderThread:137 [sender.py:send():382] send: config
324
+ 2024-03-06 14:59:59,470 DEBUG SenderThread:137 [sender.py:send():382] send: metric
325
+ 2024-03-06 14:59:59,471 DEBUG SenderThread:137 [sender.py:send():382] send: metric
326
+ 2024-03-06 14:59:59,471 WARNING SenderThread:137 [sender.py:send_metric():1354] Seen metric with glob (shouldn't happen)
327
+ 2024-03-06 14:59:59,858 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/output.log
328
+ 2024-03-06 15:00:00,259 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
329
+ 2024-03-06 15:00:03,430 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
330
+ 2024-03-06 15:00:04,477 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
331
+ 2024-03-06 15:00:04,860 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/config.yaml
332
+ 2024-03-06 15:00:05,260 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
333
+ 2024-03-06 15:00:08,431 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
334
+ 2024-03-06 15:00:09,637 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
335
+ 2024-03-06 15:00:10,260 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
336
+ 2024-03-06 15:00:13,437 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
337
+ 2024-03-06 15:00:14,638 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
338
+ 2024-03-06 15:00:15,261 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
339
+ 2024-03-06 15:00:18,438 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
340
+ 2024-03-06 15:00:19,639 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
341
+ 2024-03-06 15:00:20,262 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
342
+ 2024-03-06 15:00:23,439 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
343
+ 2024-03-06 15:00:24,640 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
344
+ 2024-03-06 15:00:25,263 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
345
+ 2024-03-06 15:00:26,787 DEBUG SenderThread:137 [sender.py:send():382] send: stats
346
+ 2024-03-06 15:00:27,703 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: partial_history
347
+ 2024-03-06 15:00:27,704 DEBUG SenderThread:137 [sender.py:send():382] send: history
348
+ 2024-03-06 15:00:27,705 DEBUG SenderThread:137 [sender.py:send_request():409] send_request: summary_record
349
+ 2024-03-06 15:00:27,705 INFO SenderThread:137 [sender.py:_save_file():1403] saving file wandb-summary.json with policy end
350
+ 2024-03-06 15:00:27,867 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/wandb-summary.json
351
+ 2024-03-06 15:00:28,440 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
352
+ 2024-03-06 15:00:28,821 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: partial_history
353
+ 2024-03-06 15:00:28,823 DEBUG SenderThread:137 [sender.py:send():382] send: history
354
+ 2024-03-06 15:00:28,823 DEBUG SenderThread:137 [sender.py:send_request():409] send_request: summary_record
355
+ 2024-03-06 15:00:28,824 INFO SenderThread:137 [sender.py:_save_file():1403] saving file wandb-summary.json with policy end
356
+ 2024-03-06 15:00:28,868 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/wandb-summary.json
357
+ 2024-03-06 15:00:29,826 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
358
+ 2024-03-06 15:00:30,263 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
359
+ 2024-03-06 15:00:31,869 INFO Thread-18 :137 [dir_watcher.py:_on_file_modified():288] file/dir modified: /kaggle/working/wandb/run-20240306_145455-h1uv5tyi/files/output.log
360
+ 2024-03-06 15:00:34,305 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
361
+ 2024-03-06 15:00:34,827 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
362
+ 2024-03-06 15:00:35,264 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
363
+ 2024-03-06 15:00:39,306 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
364
+ 2024-03-06 15:00:39,828 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
365
+ 2024-03-06 15:00:40,265 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
366
+ 2024-03-06 15:00:44,308 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
367
+ 2024-03-06 15:00:44,829 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
368
+ 2024-03-06 15:00:45,265 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
369
+ 2024-03-06 15:00:49,309 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: keepalive
370
+ 2024-03-06 15:00:49,830 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
371
+ 2024-03-06 15:00:50,266 DEBUG HandlerThread:137 [handler.py:handle_request():146] handle_request: status_report
wandb/run-20240306_145455-h1uv5tyi/logs/debug.log CHANGED
@@ -26,3 +26,4 @@ config: {}
26
  2024-03-06 14:55:56,790 INFO MainThread:34 [wandb_run.py:_redirect():2186] Redirects installed.
27
  2024-03-06 14:55:56,791 INFO MainThread:34 [wandb_init.py:init():847] run started, returning control to user process
28
  2024-03-06 14:55:56,798 INFO MainThread:34 [wandb_run.py:_config_callback():1343] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'Documentation Ambiguity', 1: 'Documentation Completeness', 2: 'Documentation Replicability', 3: 'Documentation Replication on Other Examples', 4: 'Inadequate Examples', 5: 'Lack of Alternative Solutions/Documentation', 6: 'Requesting (Additional) Documentation/Examples'}, 'label2id': {'Documentation Ambiguity': 0, 'Documentation Completeness': 1, 'Documentation Replicability': 2, 'Documentation Replication on Other Examples': 3, 'Inadequate Examples': 4, 'Lack of Alternative Solutions/Documentation': 5, 'Requesting (Additional) Documentation/Examples': 6}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 0, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'mmukh/SOBertBase', 'transformers_version': '4.38.1', 'model_type': 'megatron-bert', 'tokenizer_type': 'SentencePieceTokenizer', 'vocab_size': 50048, 'hidden_size': 768, 'num_hidden_layers': 12, 'num_attention_heads': 12, 'hidden_act': 'gelu', 'intermediate_size': 3072, 'hidden_dropout_prob': 0.1, 'attention_probs_dropout_prob': 0.1, 'max_position_embeddings': 2048, 'type_vocab_size': 2, 'initializer_range': 0.02, 'layer_norm_eps': 1e-12, 'position_embedding_type': 'absolute', 'use_cache': True, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'epoch', 'prediction_loss_only': False, 'per_device_train_batch_size': 8, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 2e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 5, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Mar06_14-54-50_41759fa8e6ad', 'logging_strategy': 'epoch', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'epoch', 'save_steps': 500, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None}
 
 
26
  2024-03-06 14:55:56,790 INFO MainThread:34 [wandb_run.py:_redirect():2186] Redirects installed.
27
  2024-03-06 14:55:56,791 INFO MainThread:34 [wandb_init.py:init():847] run started, returning control to user process
28
  2024-03-06 14:55:56,798 INFO MainThread:34 [wandb_run.py:_config_callback():1343] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'Documentation Ambiguity', 1: 'Documentation Completeness', 2: 'Documentation Replicability', 3: 'Documentation Replication on Other Examples', 4: 'Inadequate Examples', 5: 'Lack of Alternative Solutions/Documentation', 6: 'Requesting (Additional) Documentation/Examples'}, 'label2id': {'Documentation Ambiguity': 0, 'Documentation Completeness': 1, 'Documentation Replicability': 2, 'Documentation Replication on Other Examples': 3, 'Inadequate Examples': 4, 'Lack of Alternative Solutions/Documentation': 5, 'Requesting (Additional) Documentation/Examples': 6}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 0, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'mmukh/SOBertBase', 'transformers_version': '4.38.1', 'model_type': 'megatron-bert', 'tokenizer_type': 'SentencePieceTokenizer', 'vocab_size': 50048, 'hidden_size': 768, 'num_hidden_layers': 12, 'num_attention_heads': 12, 'hidden_act': 'gelu', 'intermediate_size': 3072, 'hidden_dropout_prob': 0.1, 'attention_probs_dropout_prob': 0.1, 'max_position_embeddings': 2048, 'type_vocab_size': 2, 'initializer_range': 0.02, 'layer_norm_eps': 1e-12, 'position_embedding_type': 'absolute', 'use_cache': True, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'epoch', 'prediction_loss_only': False, 'per_device_train_batch_size': 8, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 2e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 5, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Mar06_14-54-50_41759fa8e6ad', 'logging_strategy': 'epoch', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'epoch', 'save_steps': 500, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None}
29
+ 2024-03-06 14:59:59,464 INFO MainThread:34 [wandb_run.py:_config_callback():1343] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': None, 'finetuning_task': None, 'id2label': {0: 'Documentation Ambiguity', 1: 'Documentation Completeness', 2: 'Documentation Replicability', 3: 'Documentation Replication on Other Examples', 4: 'Inadequate Examples', 5: 'Lack of Alternative Solutions/Documentation', 6: 'Requesting (Additional) Documentation/Examples'}, 'label2id': {'Documentation Ambiguity': 0, 'Documentation Completeness': 1, 'Documentation Replicability': 2, 'Documentation Replication on Other Examples': 3, 'Inadequate Examples': 4, 'Lack of Alternative Solutions/Documentation': 5, 'Requesting (Additional) Documentation/Examples': 6}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 0, 'eos_token_id': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'mmukh/SOBertBase', 'transformers_version': '4.38.1', 'model_type': 'megatron-bert', 'tokenizer_type': 'SentencePieceTokenizer', 'vocab_size': 50048, 'hidden_size': 768, 'num_hidden_layers': 12, 'num_attention_heads': 12, 'hidden_act': 'gelu', 'intermediate_size': 3072, 'hidden_dropout_prob': 0.1, 'attention_probs_dropout_prob': 0.1, 'max_position_embeddings': 2048, 'type_vocab_size': 2, 'initializer_range': 0.02, 'layer_norm_eps': 1e-12, 'position_embedding_type': 'absolute', 'use_cache': True, 'output_dir': '/kaggle/working/', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'epoch', 'prediction_loss_only': False, 'per_device_train_batch_size': 8, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'learning_rate': 1e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 10, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/kaggle/working/runs/Mar06_14-59-58_41759fa8e6ad', 'logging_strategy': 'epoch', 'logging_first_step': False, 'logging_steps': 500, 'logging_nan_inf_filter': True, 'save_strategy': 'epoch', 'save_steps': 500, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': None, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': None, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/kaggle/working/', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'adamw_torch', 'optim_args': None, 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': ['tensorboard', 'wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': False, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None}
wandb/run-20240306_145455-h1uv5tyi/run-h1uv5tyi.wandb CHANGED
Binary files a/wandb/run-20240306_145455-h1uv5tyi/run-h1uv5tyi.wandb and b/wandb/run-20240306_145455-h1uv5tyi/run-h1uv5tyi.wandb differ