|
{ |
|
"best_metric": 1.5090330839157104, |
|
"best_model_checkpoint": "/mnt/storage1/ziya/VQA/M3D/LaMed/output/Llama3.2_3B_PathVQA_BiomedCLIP_KG/checkpoint-39308", |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 39308, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05088022794342119, |
|
"grad_norm": 20.461090087890625, |
|
"learning_rate": 3.39097999321804e-05, |
|
"loss": 2.5988, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.10176045588684238, |
|
"grad_norm": 10.889785766601562, |
|
"learning_rate": 6.78195998643608e-05, |
|
"loss": 1.5759, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.15264068383026355, |
|
"grad_norm": 1.6639609336853027, |
|
"learning_rate": 9.999992936778523e-05, |
|
"loss": 1.4497, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.20352091177368475, |
|
"grad_norm": 0.23197871446609497, |
|
"learning_rate": 9.99700066771779e-05, |
|
"loss": 1.3438, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.25440113971710593, |
|
"grad_norm": 1.9678728580474854, |
|
"learning_rate": 9.988580988278223e-05, |
|
"loss": 1.2782, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.3052813676605271, |
|
"grad_norm": 2.1631968021392822, |
|
"learning_rate": 9.97474304335665e-05, |
|
"loss": 1.2025, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.35616159560394833, |
|
"grad_norm": 1.185405969619751, |
|
"learning_rate": 9.955501862810158e-05, |
|
"loss": 1.1717, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.4070418235473695, |
|
"grad_norm": 2.607581377029419, |
|
"learning_rate": 9.930878345131654e-05, |
|
"loss": 1.0606, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.4579220514907907, |
|
"grad_norm": 2.775092840194702, |
|
"learning_rate": 9.900899234751304e-05, |
|
"loss": 1.0918, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.5088022794342119, |
|
"grad_norm": 9.14775276184082, |
|
"learning_rate": 9.865597092988517e-05, |
|
"loss": 1.1604, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.559682507377633, |
|
"grad_norm": 0.0023216097615659237, |
|
"learning_rate": 9.825010262686e-05, |
|
"loss": 1.1628, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.6105627353210542, |
|
"grad_norm": 3.0961430072784424, |
|
"learning_rate": 9.779182826564301e-05, |
|
"loss": 1.019, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.6614429632644754, |
|
"grad_norm": 2.703859567642212, |
|
"learning_rate": 9.728164559342095e-05, |
|
"loss": 1.0538, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.7123231912078967, |
|
"grad_norm": 1.270561695098877, |
|
"learning_rate": 9.67201087367418e-05, |
|
"loss": 0.9991, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.7632034191513178, |
|
"grad_norm": 0.7671055793762207, |
|
"learning_rate": 9.610782759965936e-05, |
|
"loss": 1.0418, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.814083647094739, |
|
"grad_norm": 9.719964027404785, |
|
"learning_rate": 9.544546720129593e-05, |
|
"loss": 1.0275, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.8649638750381602, |
|
"grad_norm": 2.232903003692627, |
|
"learning_rate": 9.473374695354267e-05, |
|
"loss": 0.9921, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.9158441029815814, |
|
"grad_norm": 0.00023140388657338917, |
|
"learning_rate": 9.397343987968223e-05, |
|
"loss": 0.9755, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.9667243309250025, |
|
"grad_norm": 0.16334091126918793, |
|
"learning_rate": 9.316537177478212e-05, |
|
"loss": 1.0277, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.5435537099838257, |
|
"eval_runtime": 306.8844, |
|
"eval_samples_per_second": 20.395, |
|
"eval_steps_per_second": 1.362, |
|
"step": 19654 |
|
}, |
|
{ |
|
"epoch": 1.0176045588684237, |
|
"grad_norm": 0.0002304486552020535, |
|
"learning_rate": 9.231042030877097e-05, |
|
"loss": 0.9118, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.068484786811845, |
|
"grad_norm": 2.282210111618042, |
|
"learning_rate": 9.14095140731717e-05, |
|
"loss": 0.8202, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 1.119365014755266, |
|
"grad_norm": 2.258639097213745, |
|
"learning_rate": 9.046363157252702e-05, |
|
"loss": 0.7903, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.1702452426986873, |
|
"grad_norm": 0.01590500958263874, |
|
"learning_rate": 8.947380016161286e-05, |
|
"loss": 0.7793, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.2211254706421084, |
|
"grad_norm": 3.9215025901794434, |
|
"learning_rate": 8.844109492959372e-05, |
|
"loss": 0.8083, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.2720056985855297, |
|
"grad_norm": 7.533295631408691, |
|
"learning_rate": 8.736663753233221e-05, |
|
"loss": 0.8399, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.3228859265289508, |
|
"grad_norm": 11.74168586730957, |
|
"learning_rate": 8.625159497412098e-05, |
|
"loss": 0.7777, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.373766154472372, |
|
"grad_norm": 0.00011882645776495337, |
|
"learning_rate": 8.509717834016009e-05, |
|
"loss": 0.7841, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.4246463824157933, |
|
"grad_norm": 0.07268673926591873, |
|
"learning_rate": 8.390464148115665e-05, |
|
"loss": 0.7979, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.4755266103592144, |
|
"grad_norm": 1.4805755615234375, |
|
"learning_rate": 8.267527965147542e-05, |
|
"loss": 0.7649, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.5264068383026355, |
|
"grad_norm": 2.261522054672241, |
|
"learning_rate": 8.141042810231957e-05, |
|
"loss": 0.8216, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.5772870662460567, |
|
"grad_norm": 5.646289825439453, |
|
"learning_rate": 8.011146063146943e-05, |
|
"loss": 0.7398, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.628167294189478, |
|
"grad_norm": 6.523326396942139, |
|
"learning_rate": 7.877978809115454e-05, |
|
"loss": 0.7535, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.6790475221328993, |
|
"grad_norm": 10.001418113708496, |
|
"learning_rate": 7.741685685567961e-05, |
|
"loss": 0.7092, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.7299277500763204, |
|
"grad_norm": 1.004066871246323e-05, |
|
"learning_rate": 7.602414725046885e-05, |
|
"loss": 0.7538, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.7808079780197414, |
|
"grad_norm": 0.00714008929207921, |
|
"learning_rate": 7.460317194423467e-05, |
|
"loss": 0.7715, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.8316882059631627, |
|
"grad_norm": 2.5139772333204746e-05, |
|
"learning_rate": 7.31554743060174e-05, |
|
"loss": 0.7172, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.882568433906584, |
|
"grad_norm": 40.69479751586914, |
|
"learning_rate": 7.168262672888032e-05, |
|
"loss": 0.7505, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 1.933448661850005, |
|
"grad_norm": 0.01439322717487812, |
|
"learning_rate": 7.018622892208068e-05, |
|
"loss": 0.7763, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 1.9843288897934261, |
|
"grad_norm": 10.337437629699707, |
|
"learning_rate": 6.866790617357171e-05, |
|
"loss": 0.7194, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.5090330839157104, |
|
"eval_runtime": 307.0021, |
|
"eval_samples_per_second": 20.387, |
|
"eval_steps_per_second": 1.362, |
|
"step": 39308 |
|
} |
|
], |
|
"logging_steps": 1000, |
|
"max_steps": 98270, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.92706544187648e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|