|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9984235417761429, |
|
"global_step": 475, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.994534068046937e-05, |
|
"loss": 2.0664, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.978160173317438e-05, |
|
"loss": 1.2851, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.9509499146870236e-05, |
|
"loss": 0.6483, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.913022275693372e-05, |
|
"loss": 0.2406, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.864543104251587e-05, |
|
"loss": 0.0239, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.805724387443462e-05, |
|
"loss": 0.0034, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.736823324551909e-05, |
|
"loss": 0.0016, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.6581412023939354e-05, |
|
"loss": 0.001, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.5700220778700504e-05, |
|
"loss": 0.0008, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.4728512734909844e-05, |
|
"loss": 0.0007, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.367053692460385e-05, |
|
"loss": 0.0006, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.2530919606812216e-05, |
|
"loss": 0.0006, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.131464403810422e-05, |
|
"loss": 0.0005, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.002702868207563e-05, |
|
"loss": 0.0005, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 3.867370395306068e-05, |
|
"loss": 0.0005, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 3.726058759576271e-05, |
|
"loss": 0.0004, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 3.579385880846232e-05, |
|
"loss": 0.0004, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.427993122295552e-05, |
|
"loss": 0.0004, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 3.272542485937369e-05, |
|
"loss": 0.0004, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.1137137178519985e-05, |
|
"loss": 0.0004, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 2.952201335830275e-05, |
|
"loss": 0.0003, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 2.788711592423966e-05, |
|
"loss": 0.0003, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 2.623959386683056e-05, |
|
"loss": 0.0003, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.458665138084104e-05, |
|
"loss": 0.0003, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 2.2935516363191693e-05, |
|
"loss": 0.0003, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.1293408807203947e-05, |
|
"loss": 0.0003, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.9667509231406334e-05, |
|
"loss": 0.0003, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.806492728095389e-05, |
|
"loss": 0.0003, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.6492670638958924e-05, |
|
"loss": 0.0003, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.495761438367577e-05, |
|
"loss": 0.0003, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.346647092553281e-05, |
|
"loss": 0.0003, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.202576065546963e-05, |
|
"loss": 0.0003, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.064178343292641e-05, |
|
"loss": 0.0003, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 9.320591038161574e-06, |
|
"loss": 0.0003, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 8.067960709356478e-06, |
|
"loss": 0.0003, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 6.889369880222776e-06, |
|
"loss": 0.0002, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 5.78997222857853e-06, |
|
"loss": 0.0002, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.7745751406263165e-06, |
|
"loss": 0.0002, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 3.847618689476612e-06, |
|
"loss": 0.0002, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.013156219837776e-06, |
|
"loss": 0.0002, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 2.2748366237709374e-06, |
|
"loss": 0.0002, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.6358883850134816e-06, |
|
"loss": 0.0002, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.0991054616410589e-06, |
|
"loss": 0.0002, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 6.668350687998565e-07, |
|
"loss": 0.0002, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 3.4096741493194197e-07, |
|
"loss": 0.0002, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.229274363747146e-07, |
|
"loss": 0.0002, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.3668566476848777e-08, |
|
"loss": 0.0002, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 475, |
|
"total_flos": 6.826782473972613e+17, |
|
"train_loss": 0.09018798397804953, |
|
"train_runtime": 3518.059, |
|
"train_samples_per_second": 17.302, |
|
"train_steps_per_second": 0.135 |
|
} |
|
], |
|
"max_steps": 475, |
|
"num_train_epochs": 1, |
|
"total_flos": 6.826782473972613e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|