|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9961389961389961, |
|
"eval_steps": 500, |
|
"global_step": 129, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007722007722007722, |
|
"grad_norm": 3.172396421432495, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 2.1653, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03861003861003861, |
|
"grad_norm": 2.1693484783172607, |
|
"learning_rate": 7.692307692307693e-05, |
|
"loss": 2.1599, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07722007722007722, |
|
"grad_norm": 0.5863114595413208, |
|
"learning_rate": 0.00015384615384615385, |
|
"loss": 2.0828, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.11583011583011583, |
|
"grad_norm": 0.6443110108375549, |
|
"learning_rate": 0.00019985334138511237, |
|
"loss": 1.9643, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.15444015444015444, |
|
"grad_norm": 0.628454864025116, |
|
"learning_rate": 0.0001982083682742156, |
|
"loss": 1.813, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19305019305019305, |
|
"grad_norm": 0.5933235883712769, |
|
"learning_rate": 0.00019476531711828027, |
|
"loss": 1.6936, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.23166023166023167, |
|
"grad_norm": 0.31888535618782043, |
|
"learning_rate": 0.0001895872260758688, |
|
"loss": 1.6382, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2702702702702703, |
|
"grad_norm": 0.33554232120513916, |
|
"learning_rate": 0.00018276889981568906, |
|
"loss": 1.5888, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3088803088803089, |
|
"grad_norm": 0.24805951118469238, |
|
"learning_rate": 0.00017443517375622704, |
|
"loss": 1.5513, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3474903474903475, |
|
"grad_norm": 0.2511727213859558, |
|
"learning_rate": 0.00016473862847818277, |
|
"loss": 1.5439, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.3861003861003861, |
|
"grad_norm": 0.2083854228258133, |
|
"learning_rate": 0.00015385679615609042, |
|
"loss": 1.5181, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4247104247104247, |
|
"grad_norm": 0.21075661480426788, |
|
"learning_rate": 0.00014198891015602646, |
|
"loss": 1.5124, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.46332046332046334, |
|
"grad_norm": 0.20275065302848816, |
|
"learning_rate": 0.00012935225731039348, |
|
"loss": 1.4972, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5019305019305019, |
|
"grad_norm": 0.2260345220565796, |
|
"learning_rate": 0.0001161781996552765, |
|
"loss": 1.4923, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.5405405405405406, |
|
"grad_norm": 0.2425995022058487, |
|
"learning_rate": 0.00010270793846761347, |
|
"loss": 1.4897, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5791505791505791, |
|
"grad_norm": 0.23694849014282227, |
|
"learning_rate": 8.918809815760585e-05, |
|
"loss": 1.4775, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6177606177606177, |
|
"grad_norm": 0.26492005586624146, |
|
"learning_rate": 7.586621087002945e-05, |
|
"loss": 1.474, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.6563706563706564, |
|
"grad_norm": 0.21545687317848206, |
|
"learning_rate": 6.298618446600856e-05, |
|
"loss": 1.4795, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.694980694980695, |
|
"grad_norm": 0.2343147248029709, |
|
"learning_rate": 5.078383686109926e-05, |
|
"loss": 1.4562, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.7335907335907336, |
|
"grad_norm": 0.2037033885717392, |
|
"learning_rate": 3.948257848062351e-05, |
|
"loss": 1.4611, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.7722007722007722, |
|
"grad_norm": 0.24056674540042877, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 1.4629, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.8108108108108109, |
|
"grad_norm": 0.22205579280853271, |
|
"learning_rate": 2.0390693429435627e-05, |
|
"loss": 1.4556, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.8494208494208494, |
|
"grad_norm": 0.2215537130832672, |
|
"learning_rate": 1.2949616394382802e-05, |
|
"loss": 1.4569, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.888030888030888, |
|
"grad_norm": 0.2468106597661972, |
|
"learning_rate": 7.102328018320858e-06, |
|
"loss": 1.4641, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.9266409266409267, |
|
"grad_norm": 0.23411813378334045, |
|
"learning_rate": 2.9558851746788517e-06, |
|
"loss": 1.4681, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.9652509652509652, |
|
"grad_norm": 0.2601025402545929, |
|
"learning_rate": 5.862042845640403e-07, |
|
"loss": 1.4571, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.9961389961389961, |
|
"eval_loss": 2.2714059352874756, |
|
"eval_runtime": 0.7834, |
|
"eval_samples_per_second": 15.317, |
|
"eval_steps_per_second": 1.276, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.9961389961389961, |
|
"step": 129, |
|
"total_flos": 7.61719099628716e+17, |
|
"train_loss": 1.5814437108446462, |
|
"train_runtime": 642.9915, |
|
"train_samples_per_second": 51.55, |
|
"train_steps_per_second": 0.201 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 129, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.61719099628716e+17, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|