|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.18521948508983144, |
|
"global_step": 1000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0009999978838190456, |
|
"loss": 2.9794, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0009999915352940948, |
|
"loss": 2.3885, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.000999980954478887, |
|
"loss": 2.3057, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.000999966141462985, |
|
"loss": 2.2692, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.000999947096371777, |
|
"loss": 2.2576, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009999238193664748, |
|
"loss": 2.2388, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009998963106441117, |
|
"loss": 2.2523, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0009998645704375414, |
|
"loss": 2.218, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.000999828599015436, |
|
"loss": 2.2457, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009997883966822835, |
|
"loss": 2.198, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009997439637783859, |
|
"loss": 2.2013, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.000999695300679855, |
|
"loss": 2.1765, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0009996424077986109, |
|
"loss": 2.1741, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000999585285582377, |
|
"loss": 2.1898, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009995239345146772, |
|
"loss": 2.1466, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009994583551148314, |
|
"loss": 2.1423, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0009993885479379506, |
|
"loss": 2.1451, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000999314513574934, |
|
"loss": 2.202, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009992362526524616, |
|
"loss": 2.1208, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009991537658329906, |
|
"loss": 2.1591, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000999067053814749, |
|
"loss": 2.1788, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0009989761173317304, |
|
"loss": 2.147, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000998880957153687, |
|
"loss": 2.1249, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000998781574086123, |
|
"loss": 2.165, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000998677968970289, |
|
"loss": 2.1428, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009985701426831735, |
|
"loss": 2.1384, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009984580961374964, |
|
"loss": 2.1585, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009983418302817008, |
|
"loss": 2.1156, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0009982213460999448, |
|
"loss": 2.0811, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000998096644612094, |
|
"loss": 2.1081, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009979677268737118, |
|
"loss": 2.1246, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009978345939760515, |
|
"loss": 2.1229, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000997697247046046, |
|
"loss": 2.1033, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0009975556872462994, |
|
"loss": 2.0931, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.000997409915775076, |
|
"loss": 2.1206, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009972599338662915, |
|
"loss": 2.0537, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009971057427895012, |
|
"loss": 2.0762, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009969473438498897, |
|
"loss": 2.0883, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009967847383882604, |
|
"loss": 2.1174, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0009966179277810239, |
|
"loss": 2.1111, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009964469134401855, |
|
"loss": 2.1288, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009962716968133346, |
|
"loss": 2.0967, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009960922793836318, |
|
"loss": 2.1216, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009959086626697955, |
|
"loss": 2.0924, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0009957208482260908, |
|
"loss": 2.0809, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009955288376423152, |
|
"loss": 2.1082, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009953326325437852, |
|
"loss": 2.0885, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009951322345913224, |
|
"loss": 2.1133, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009949276454812408, |
|
"loss": 2.0844, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00099471886694533, |
|
"loss": 2.0796, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0009945059007508434, |
|
"loss": 2.1255, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009942887487004804, |
|
"loss": 2.0913, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009940674126323733, |
|
"loss": 2.1003, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009938418944200709, |
|
"loss": 2.0541, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009936121959725223, |
|
"loss": 2.0523, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0009933783192340618, |
|
"loss": 2.1225, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009931402661843911, |
|
"loss": 2.0446, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.000992898038838564, |
|
"loss": 2.0921, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009926516392469674, |
|
"loss": 2.1081, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009924010694953064, |
|
"loss": 2.0734, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009921463317045843, |
|
"loss": 2.0652, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0009918874280310862, |
|
"loss": 2.0818, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0009916243606663605, |
|
"loss": 2.0776, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0009913571318371994, |
|
"loss": 2.1025, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0009910857438056215, |
|
"loss": 2.066, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0009908101988688512, |
|
"loss": 2.0575, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0009905304993593008, |
|
"loss": 2.1269, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0009902466476445486, |
|
"loss": 2.0518, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0009899586461273218, |
|
"loss": 2.0698, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000989666497245473, |
|
"loss": 2.0988, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0009893702034719624, |
|
"loss": 2.0986, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0009890697673148345, |
|
"loss": 2.0237, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0009887651913171986, |
|
"loss": 2.0027, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0009884564780572064, |
|
"loss": 2.0563, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0009881436301480305, |
|
"loss": 2.0624, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.000987826650237842, |
|
"loss": 2.0926, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.000987505541009788, |
|
"loss": 2.0585, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0009871803051819696, |
|
"loss": 2.0494, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0009868509455074183, |
|
"loss": 2.0106, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0009865174647740729, |
|
"loss": 2.0861, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0009861798658047556, |
|
"loss": 2.0478, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0009858381514571484, |
|
"loss": 2.0469, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000985492324623769, |
|
"loss": 2.0671, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0009851423882319458, |
|
"loss": 2.0808, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0009847883452437937, |
|
"loss": 2.0331, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0009844301986561893, |
|
"loss": 2.0295, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.000984067951500744, |
|
"loss": 2.0873, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00098370160684378, |
|
"loss": 2.1038, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0009833311677863042, |
|
"loss": 2.0337, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0009829566374639801, |
|
"loss": 2.0407, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0009825780190471042, |
|
"loss": 2.1049, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.000982195315740576, |
|
"loss": 2.0475, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0009818085307838741, |
|
"loss": 2.0624, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.000981417667451026, |
|
"loss": 2.0714, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0009810227290505816, |
|
"loss": 2.0947, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0009806237189255859, |
|
"loss": 2.0591, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0009802206404535489, |
|
"loss": 2.0301, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.000979813497046419, |
|
"loss": 2.0556, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0009794022921505523, |
|
"loss": 2.0753, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.000978987029246685, |
|
"loss": 2.0898, |
|
"step": 1000 |
|
} |
|
], |
|
"max_steps": 10798, |
|
"num_train_epochs": 2, |
|
"total_flos": 9.498667895656284e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|