weizhepei's picture
Model save
b098299 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 222,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06756756756756757,
"grad_norm": 4.342727472957683,
"learning_rate": 2.0833333333333336e-05,
"loss": 1.4014,
"step": 5
},
{
"epoch": 0.13513513513513514,
"grad_norm": 1.9245249962075524,
"learning_rate": 4.166666666666667e-05,
"loss": 1.2016,
"step": 10
},
{
"epoch": 0.20270270270270271,
"grad_norm": 1.7936102622454382,
"learning_rate": 4.9977343997179584e-05,
"loss": 1.1028,
"step": 15
},
{
"epoch": 0.2702702702702703,
"grad_norm": 1.6381519278495207,
"learning_rate": 4.983905584269134e-05,
"loss": 1.0613,
"step": 20
},
{
"epoch": 0.33783783783783783,
"grad_norm": 1.7385769477746682,
"learning_rate": 4.957583862636889e-05,
"loss": 1.011,
"step": 25
},
{
"epoch": 0.40540540540540543,
"grad_norm": 1.2152636926156932,
"learning_rate": 4.91891643656567e-05,
"loss": 0.9977,
"step": 30
},
{
"epoch": 0.47297297297297297,
"grad_norm": 1.0928685802601366,
"learning_rate": 4.8681195499870324e-05,
"loss": 0.9886,
"step": 35
},
{
"epoch": 0.5405405405405406,
"grad_norm": 1.0178064646987002,
"learning_rate": 4.805477279695852e-05,
"loss": 0.9873,
"step": 40
},
{
"epoch": 0.6081081081081081,
"grad_norm": 1.1614328285281477,
"learning_rate": 4.731339946677661e-05,
"loss": 0.9666,
"step": 45
},
{
"epoch": 0.6756756756756757,
"grad_norm": 1.0722676077355953,
"learning_rate": 4.6461221569715884e-05,
"loss": 0.9563,
"step": 50
},
{
"epoch": 0.7432432432432432,
"grad_norm": 1.0922390725949371,
"learning_rate": 4.5503004830252146e-05,
"loss": 0.9605,
"step": 55
},
{
"epoch": 0.8108108108108109,
"grad_norm": 0.9343071897805195,
"learning_rate": 4.444410798508125e-05,
"loss": 0.9439,
"step": 60
},
{
"epoch": 0.8783783783783784,
"grad_norm": 1.0227180607983488,
"learning_rate": 4.329045281488981e-05,
"loss": 0.954,
"step": 65
},
{
"epoch": 0.9459459459459459,
"grad_norm": 1.0480562117426948,
"learning_rate": 4.204849102735549e-05,
"loss": 0.9431,
"step": 70
},
{
"epoch": 1.0135135135135136,
"grad_norm": 1.260684025254624,
"learning_rate": 4.072516817658065e-05,
"loss": 0.8981,
"step": 75
},
{
"epoch": 1.0810810810810811,
"grad_norm": 1.2734241934034851,
"learning_rate": 3.932788482073635e-05,
"loss": 0.7769,
"step": 80
},
{
"epoch": 1.1486486486486487,
"grad_norm": 1.0349542653830326,
"learning_rate": 3.7864455135139235e-05,
"loss": 0.7436,
"step": 85
},
{
"epoch": 1.2162162162162162,
"grad_norm": 1.162846414767005,
"learning_rate": 3.634306321221328e-05,
"loss": 0.7334,
"step": 90
},
{
"epoch": 1.2837837837837838,
"grad_norm": 0.9484666330021186,
"learning_rate": 3.477221729272535e-05,
"loss": 0.7354,
"step": 95
},
{
"epoch": 1.3513513513513513,
"grad_norm": 1.0093923977732242,
"learning_rate": 3.3160702184251446e-05,
"loss": 0.7396,
"step": 100
},
{
"epoch": 1.4189189189189189,
"grad_norm": 1.4554684574123584,
"learning_rate": 3.1517530132969326e-05,
"loss": 0.7398,
"step": 105
},
{
"epoch": 1.4864864864864864,
"grad_norm": 1.0436919964803422,
"learning_rate": 2.9851890423522204e-05,
"loss": 0.7331,
"step": 110
},
{
"epoch": 1.554054054054054,
"grad_norm": 0.9599205426194947,
"learning_rate": 2.817309798881147e-05,
"loss": 0.7389,
"step": 115
},
{
"epoch": 1.6216216216216215,
"grad_norm": 1.029306388388882,
"learning_rate": 2.6490541317113427e-05,
"loss": 0.738,
"step": 120
},
{
"epoch": 1.689189189189189,
"grad_norm": 0.90932400154707,
"learning_rate": 2.4813629947844506e-05,
"loss": 0.7197,
"step": 125
},
{
"epoch": 1.7567567567567568,
"grad_norm": 0.9566950138674355,
"learning_rate": 2.3151741849600056e-05,
"loss": 0.7328,
"step": 130
},
{
"epoch": 1.8243243243243243,
"grad_norm": 0.9091162829255143,
"learning_rate": 2.1514170974749814e-05,
"loss": 0.7242,
"step": 135
},
{
"epoch": 1.8918918918918919,
"grad_norm": 0.9596574663856378,
"learning_rate": 1.9910075283886327e-05,
"loss": 0.7121,
"step": 140
},
{
"epoch": 1.9594594594594594,
"grad_norm": 0.9385673192651813,
"learning_rate": 1.83484255307945e-05,
"loss": 0.7139,
"step": 145
},
{
"epoch": 2.027027027027027,
"grad_norm": 1.2741421717094357,
"learning_rate": 1.6837955094357533e-05,
"loss": 0.6495,
"step": 150
},
{
"epoch": 2.0945945945945947,
"grad_norm": 1.2778194000575294,
"learning_rate": 1.5387111137959655e-05,
"loss": 0.5209,
"step": 155
},
{
"epoch": 2.1621621621621623,
"grad_norm": 1.0741954333178305,
"learning_rate": 1.4004007369521943e-05,
"loss": 0.5025,
"step": 160
},
{
"epoch": 2.22972972972973,
"grad_norm": 1.0388700441919239,
"learning_rate": 1.2696378666356468e-05,
"loss": 0.4995,
"step": 165
},
{
"epoch": 2.2972972972972974,
"grad_norm": 1.069711625718764,
"learning_rate": 1.1471537818594746e-05,
"loss": 0.4898,
"step": 170
},
{
"epoch": 2.364864864864865,
"grad_norm": 1.0378436021827815,
"learning_rate": 1.0336334633099004e-05,
"loss": 0.4871,
"step": 175
},
{
"epoch": 2.4324324324324325,
"grad_norm": 1.011148209204576,
"learning_rate": 9.297117626563687e-06,
"loss": 0.4927,
"step": 180
},
{
"epoch": 2.5,
"grad_norm": 1.0374664447281305,
"learning_rate": 8.359698522034862e-06,
"loss": 0.4984,
"step": 185
},
{
"epoch": 2.5675675675675675,
"grad_norm": 1.0825482045121846,
"learning_rate": 7.529319747397534e-06,
"loss": 0.4902,
"step": 190
},
{
"epoch": 2.635135135135135,
"grad_norm": 0.9962173915083111,
"learning_rate": 6.810625117592363e-06,
"loss": 0.4911,
"step": 195
},
{
"epoch": 2.7027027027027026,
"grad_norm": 1.0469170187864207,
"learning_rate": 6.207633864518805e-06,
"loss": 0.4924,
"step": 200
},
{
"epoch": 2.77027027027027,
"grad_norm": 1.0230082490963024,
"learning_rate": 5.7237181598600055e-06,
"loss": 0.488,
"step": 205
},
{
"epoch": 2.8378378378378377,
"grad_norm": 1.0056019590225027,
"learning_rate": 5.361584256530833e-06,
"loss": 0.4896,
"step": 210
},
{
"epoch": 2.9054054054054053,
"grad_norm": 1.0635035438923603,
"learning_rate": 5.123257354213851e-06,
"loss": 0.4938,
"step": 215
},
{
"epoch": 2.972972972972973,
"grad_norm": 1.0018745751299392,
"learning_rate": 5.010070273621176e-06,
"loss": 0.4854,
"step": 220
},
{
"epoch": 3.0,
"step": 222,
"total_flos": 432075581161472.0,
"train_loss": 0.7525977000996873,
"train_runtime": 5814.3184,
"train_samples_per_second": 4.881,
"train_steps_per_second": 0.038
}
],
"logging_steps": 5,
"max_steps": 222,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 432075581161472.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}