File size: 3,536 Bytes
a23516e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0059880239520957,
"eval_steps": 500,
"global_step": 168,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.059880239520958084,
"grad_norm": 6.566332162525977,
"learning_rate": 7.692307692307694e-06,
"loss": 4.3864,
"step": 10
},
{
"epoch": 0.11976047904191617,
"grad_norm": 2.0437379261117132,
"learning_rate": 1.5384615384615387e-05,
"loss": 3.4794,
"step": 20
},
{
"epoch": 0.17964071856287425,
"grad_norm": 0.8566260346858398,
"learning_rate": 1.9996500732179695e-05,
"loss": 2.5375,
"step": 30
},
{
"epoch": 0.23952095808383234,
"grad_norm": 0.6117118904297301,
"learning_rate": 1.995716208873644e-05,
"loss": 1.8752,
"step": 40
},
{
"epoch": 0.2994011976047904,
"grad_norm": 0.3923875723199884,
"learning_rate": 1.9874283308955058e-05,
"loss": 1.4434,
"step": 50
},
{
"epoch": 0.3592814371257485,
"grad_norm": 0.2726465509781607,
"learning_rate": 1.9748226800652062e-05,
"loss": 1.2232,
"step": 60
},
{
"epoch": 0.41916167664670656,
"grad_norm": 0.22048277219015358,
"learning_rate": 1.957954377686475e-05,
"loss": 1.1031,
"step": 70
},
{
"epoch": 0.47904191616766467,
"grad_norm": 0.19131964365908752,
"learning_rate": 1.9368971845536844e-05,
"loss": 1.0318,
"step": 80
},
{
"epoch": 0.5389221556886228,
"grad_norm": 0.19325047947069812,
"learning_rate": 1.911743178414665e-05,
"loss": 0.9724,
"step": 90
},
{
"epoch": 0.5988023952095808,
"grad_norm": 0.18220999359482376,
"learning_rate": 1.8826023513381372e-05,
"loss": 0.942,
"step": 100
},
{
"epoch": 0.6586826347305389,
"grad_norm": 0.17750272843534337,
"learning_rate": 1.849602128746387e-05,
"loss": 0.9106,
"step": 110
},
{
"epoch": 0.718562874251497,
"grad_norm": 0.16658108783370879,
"learning_rate": 1.8128868122163125e-05,
"loss": 0.8876,
"step": 120
},
{
"epoch": 0.7784431137724551,
"grad_norm": 0.1461861518555333,
"learning_rate": 1.7726169484853438e-05,
"loss": 0.872,
"step": 130
},
{
"epoch": 0.8383233532934131,
"grad_norm": 0.14457472093324725,
"learning_rate": 1.7289686274214116e-05,
"loss": 0.8576,
"step": 140
},
{
"epoch": 0.8982035928143712,
"grad_norm": 0.1544690749108873,
"learning_rate": 1.6821327120267567e-05,
"loss": 0.8467,
"step": 150
},
{
"epoch": 0.9580838323353293,
"grad_norm": 0.1429893393640351,
"learning_rate": 1.6323140038425842e-05,
"loss": 0.8252,
"step": 160
}
],
"logging_steps": 10,
"max_steps": 501,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 56,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 306299887681536.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}
|