|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.825958702064897, |
|
"eval_steps": 50, |
|
"global_step": 240, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.11799410029498525, |
|
"grad_norm": 0.20195358991622925, |
|
"learning_rate": 0.0003, |
|
"loss": 1.9738, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2359882005899705, |
|
"grad_norm": 0.1426803171634674, |
|
"learning_rate": 0.0002963855421686747, |
|
"loss": 1.5063, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.35398230088495575, |
|
"grad_norm": 0.15137845277786255, |
|
"learning_rate": 0.0002927710843373494, |
|
"loss": 1.3522, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.471976401179941, |
|
"grad_norm": 0.14357119798660278, |
|
"learning_rate": 0.0002891566265060241, |
|
"loss": 1.2434, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5899705014749262, |
|
"grad_norm": 0.13860304653644562, |
|
"learning_rate": 0.00028554216867469873, |
|
"loss": 1.1816, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5899705014749262, |
|
"eval_loss": 1.159853458404541, |
|
"eval_runtime": 18.5086, |
|
"eval_samples_per_second": 8.861, |
|
"eval_steps_per_second": 0.756, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7079646017699115, |
|
"grad_norm": 0.13697050511837006, |
|
"learning_rate": 0.0002819277108433735, |
|
"loss": 1.1465, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8259587020648967, |
|
"grad_norm": 0.12234006822109222, |
|
"learning_rate": 0.0002783132530120482, |
|
"loss": 1.1006, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.943952802359882, |
|
"grad_norm": 0.1310320794582367, |
|
"learning_rate": 0.00027469879518072284, |
|
"loss": 1.0819, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.0589970501474926, |
|
"grad_norm": 0.12331829220056534, |
|
"learning_rate": 0.0002710843373493976, |
|
"loss": 1.044, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.176991150442478, |
|
"grad_norm": 0.11900211870670319, |
|
"learning_rate": 0.00026746987951807225, |
|
"loss": 1.0244, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.176991150442478, |
|
"eval_loss": 1.0301724672317505, |
|
"eval_runtime": 18.427, |
|
"eval_samples_per_second": 8.9, |
|
"eval_steps_per_second": 0.76, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.294985250737463, |
|
"grad_norm": 0.13372938334941864, |
|
"learning_rate": 0.00026385542168674695, |
|
"loss": 1.0084, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.4129793510324484, |
|
"grad_norm": 0.12052427977323532, |
|
"learning_rate": 0.00026024096385542165, |
|
"loss": 0.9994, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.5309734513274336, |
|
"grad_norm": 0.11335684359073639, |
|
"learning_rate": 0.00025662650602409636, |
|
"loss": 0.9798, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.648967551622419, |
|
"grad_norm": 0.13653819262981415, |
|
"learning_rate": 0.00025301204819277106, |
|
"loss": 0.9601, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.7669616519174043, |
|
"grad_norm": 0.11394950747489929, |
|
"learning_rate": 0.00024939759036144576, |
|
"loss": 0.9586, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.7669616519174043, |
|
"eval_loss": 0.9602091908454895, |
|
"eval_runtime": 18.4269, |
|
"eval_samples_per_second": 8.9, |
|
"eval_steps_per_second": 0.76, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.8849557522123894, |
|
"grad_norm": 0.1154383197426796, |
|
"learning_rate": 0.00024578313253012046, |
|
"loss": 0.95, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.12450100481510162, |
|
"learning_rate": 0.00024216867469879517, |
|
"loss": 0.9272, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.117994100294985, |
|
"grad_norm": 0.10410638153553009, |
|
"learning_rate": 0.00023855421686746987, |
|
"loss": 0.9071, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.2359882005899703, |
|
"grad_norm": 0.12171204388141632, |
|
"learning_rate": 0.00023493975903614455, |
|
"loss": 0.9032, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.353982300884956, |
|
"grad_norm": 0.12321081757545471, |
|
"learning_rate": 0.00023132530120481928, |
|
"loss": 0.8935, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.353982300884956, |
|
"eval_loss": 0.9177303910255432, |
|
"eval_runtime": 18.4239, |
|
"eval_samples_per_second": 8.902, |
|
"eval_steps_per_second": 0.76, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.471976401179941, |
|
"grad_norm": 0.10513285547494888, |
|
"learning_rate": 0.00022771084337349395, |
|
"loss": 0.8834, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.589970501474926, |
|
"grad_norm": 0.12400174885988235, |
|
"learning_rate": 0.00022409638554216866, |
|
"loss": 0.8927, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.7079646017699117, |
|
"grad_norm": 0.12554600834846497, |
|
"learning_rate": 0.00022048192771084336, |
|
"loss": 0.8787, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.825958702064897, |
|
"grad_norm": 0.11129195988178253, |
|
"learning_rate": 0.00021686746987951806, |
|
"loss": 0.8715, |
|
"step": 240 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 840, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 120, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.040885723782447e+18, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|