|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 7.0, |
|
"eval_steps": 500, |
|
"global_step": 35, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 5.582358906147192, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.832, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 6.007640746275026, |
|
"learning_rate": 5e-06, |
|
"loss": 0.8909, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 5.714795332116958, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.8979, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 4.265716037342555, |
|
"learning_rate": 1e-05, |
|
"loss": 0.8648, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 2.085029182097334, |
|
"learning_rate": 9.974346616959476e-06, |
|
"loss": 0.7839, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 5.651803536405543, |
|
"learning_rate": 9.897649706262474e-06, |
|
"loss": 0.8144, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 7.350623039861967, |
|
"learning_rate": 9.770696282000245e-06, |
|
"loss": 0.8331, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 7.604729650135797, |
|
"learning_rate": 9.594789058101154e-06, |
|
"loss": 0.8294, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"grad_norm": 6.2283021470847455, |
|
"learning_rate": 9.371733080722911e-06, |
|
"loss": 0.8138, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 3.6961381554678794, |
|
"learning_rate": 9.103817206036383e-06, |
|
"loss": 0.7548, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"grad_norm": 2.1721850533837705, |
|
"learning_rate": 8.793790613463956e-06, |
|
"loss": 0.7297, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 2.1393892089477733, |
|
"learning_rate": 8.444834595378434e-06, |
|
"loss": 0.6719, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"grad_norm": 1.4179199525638273, |
|
"learning_rate": 8.060529912738316e-06, |
|
"loss": 0.6487, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 1.4009912746793316, |
|
"learning_rate": 7.644820051634813e-06, |
|
"loss": 0.6703, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 1.0272214968725972, |
|
"learning_rate": 7.201970757788172e-06, |
|
"loss": 0.6456, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 0.9562542078090331, |
|
"learning_rate": 6.736526264224101e-06, |
|
"loss": 0.5806, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"grad_norm": 0.9062032042356861, |
|
"learning_rate": 6.2532626612936035e-06, |
|
"loss": 0.6585, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"grad_norm": 0.7271473588203766, |
|
"learning_rate": 5.757138887522884e-06, |
|
"loss": 0.5956, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"grad_norm": 0.7742246369208975, |
|
"learning_rate": 5.253245844193564e-06, |
|
"loss": 0.5971, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.7164132908170959, |
|
"learning_rate": 4.746754155806437e-06, |
|
"loss": 0.5276, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"grad_norm": 0.5499030834011936, |
|
"learning_rate": 4.2428611124771184e-06, |
|
"loss": 0.5727, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"grad_norm": 0.5542153239024126, |
|
"learning_rate": 3.7467373387063973e-06, |
|
"loss": 0.5251, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"grad_norm": 0.6029015493144163, |
|
"learning_rate": 3.2634737357758994e-06, |
|
"loss": 0.5381, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"grad_norm": 0.682283388561948, |
|
"learning_rate": 2.7980292422118282e-06, |
|
"loss": 0.548, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.5141680917899011, |
|
"learning_rate": 2.3551799483651894e-06, |
|
"loss": 0.5366, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"grad_norm": 0.5052161464784082, |
|
"learning_rate": 1.9394700872616856e-06, |
|
"loss": 0.5633, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"grad_norm": 0.4888663290970337, |
|
"learning_rate": 1.555165404621567e-06, |
|
"loss": 0.5344, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"grad_norm": 0.48645872089664294, |
|
"learning_rate": 1.2062093865360458e-06, |
|
"loss": 0.5252, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"grad_norm": 0.4608389733467722, |
|
"learning_rate": 8.961827939636198e-07, |
|
"loss": 0.4861, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 0.41306710261326274, |
|
"learning_rate": 6.282669192770896e-07, |
|
"loss": 0.4784, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"grad_norm": 0.4177769738137263, |
|
"learning_rate": 4.05210941898847e-07, |
|
"loss": 0.5251, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"grad_norm": 0.4447808771104787, |
|
"learning_rate": 2.2930371799975593e-07, |
|
"loss": 0.5024, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 6.6, |
|
"grad_norm": 0.42694981358254674, |
|
"learning_rate": 1.0235029373752758e-07, |
|
"loss": 0.4919, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"grad_norm": 0.41995366862980577, |
|
"learning_rate": 2.5653383040524228e-08, |
|
"loss": 0.5287, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 0.3932952185861738, |
|
"learning_rate": 0.0, |
|
"loss": 0.486, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"step": 35, |
|
"total_flos": 4.553463888976282e+16, |
|
"train_loss": 0.6423646134989602, |
|
"train_runtime": 2917.9198, |
|
"train_samples_per_second": 0.758, |
|
"train_steps_per_second": 0.012 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 35, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 7, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.553463888976282e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|