|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 640, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.015625, |
|
"grad_norm": 0.5563601851463318, |
|
"learning_rate": 3.125e-06, |
|
"loss": 1.837, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.078125, |
|
"grad_norm": 0.5579909682273865, |
|
"learning_rate": 1.5625e-05, |
|
"loss": 1.832, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.15625, |
|
"grad_norm": 0.3869417607784271, |
|
"learning_rate": 3.125e-05, |
|
"loss": 1.8211, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.234375, |
|
"grad_norm": 0.2597704827785492, |
|
"learning_rate": 4.6875e-05, |
|
"loss": 1.791, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.3125, |
|
"grad_norm": 0.2681010663509369, |
|
"learning_rate": 6.25e-05, |
|
"loss": 1.7632, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.390625, |
|
"grad_norm": 0.2458464354276657, |
|
"learning_rate": 7.8125e-05, |
|
"loss": 1.7142, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.46875, |
|
"grad_norm": 0.2320868819952011, |
|
"learning_rate": 9.375e-05, |
|
"loss": 1.662, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.546875, |
|
"grad_norm": 0.2217356562614441, |
|
"learning_rate": 0.000109375, |
|
"loss": 1.606, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.625, |
|
"grad_norm": 0.17081762850284576, |
|
"learning_rate": 0.000125, |
|
"loss": 1.5636, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.703125, |
|
"grad_norm": 0.13257570564746857, |
|
"learning_rate": 0.00014062500000000002, |
|
"loss": 1.5291, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.78125, |
|
"grad_norm": 0.12237544357776642, |
|
"learning_rate": 0.00015625, |
|
"loss": 1.5163, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.859375, |
|
"grad_norm": 0.10626661777496338, |
|
"learning_rate": 0.00017187500000000002, |
|
"loss": 1.4857, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.9375, |
|
"grad_norm": 0.1030210331082344, |
|
"learning_rate": 0.0001875, |
|
"loss": 1.471, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.3853657245635986, |
|
"eval_runtime": 0.8929, |
|
"eval_samples_per_second": 6.72, |
|
"eval_steps_per_second": 1.12, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.015625, |
|
"grad_norm": 0.1105547621846199, |
|
"learning_rate": 0.00019999851261394218, |
|
"loss": 1.4547, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.09375, |
|
"grad_norm": 0.1074226126074791, |
|
"learning_rate": 0.00019994645874763658, |
|
"loss": 1.4433, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.171875, |
|
"grad_norm": 0.1031486839056015, |
|
"learning_rate": 0.00019982007981886847, |
|
"loss": 1.4319, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 0.09431572258472443, |
|
"learning_rate": 0.00019961946980917456, |
|
"loss": 1.4231, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.328125, |
|
"grad_norm": 0.10248947143554688, |
|
"learning_rate": 0.00019934477790194445, |
|
"loss": 1.414, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.40625, |
|
"grad_norm": 0.10952038317918777, |
|
"learning_rate": 0.00019899620837148077, |
|
"loss": 1.3994, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.484375, |
|
"grad_norm": 0.11935596913099289, |
|
"learning_rate": 0.0001985740204310909, |
|
"loss": 1.3933, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.5625, |
|
"grad_norm": 0.13777290284633636, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 1.3891, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.640625, |
|
"grad_norm": 0.10964534431695938, |
|
"learning_rate": 0.00019751009967149087, |
|
"loss": 1.3755, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.71875, |
|
"grad_norm": 0.10717500746250153, |
|
"learning_rate": 0.00019686915803565934, |
|
"loss": 1.3811, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.796875, |
|
"grad_norm": 0.11473763734102249, |
|
"learning_rate": 0.0001961561797682962, |
|
"loss": 1.3661, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.875, |
|
"grad_norm": 0.11382485926151276, |
|
"learning_rate": 0.0001953716950748227, |
|
"loss": 1.3596, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.953125, |
|
"grad_norm": 0.12709280848503113, |
|
"learning_rate": 0.0001945162873363268, |
|
"loss": 1.362, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.375438690185547, |
|
"eval_runtime": 0.8905, |
|
"eval_samples_per_second": 6.738, |
|
"eval_steps_per_second": 1.123, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 2.03125, |
|
"grad_norm": 0.1248418539762497, |
|
"learning_rate": 0.0001935905926757326, |
|
"loss": 1.3543, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.109375, |
|
"grad_norm": 0.13678818941116333, |
|
"learning_rate": 0.00019259529948474833, |
|
"loss": 1.3531, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.1875, |
|
"grad_norm": 0.12561410665512085, |
|
"learning_rate": 0.00019153114791194473, |
|
"loss": 1.3382, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.265625, |
|
"grad_norm": 0.14160117506980896, |
|
"learning_rate": 0.00019039892931234435, |
|
"loss": 1.3388, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 2.34375, |
|
"grad_norm": 0.1499018520116806, |
|
"learning_rate": 0.00018919948565893142, |
|
"loss": 1.3392, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.421875, |
|
"grad_norm": 0.14188511669635773, |
|
"learning_rate": 0.00018793370891651972, |
|
"loss": 1.3406, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 0.12685342133045197, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 1.3312, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.578125, |
|
"grad_norm": 0.1338503062725067, |
|
"learning_rate": 0.00018520696996656788, |
|
"loss": 1.3386, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 2.65625, |
|
"grad_norm": 0.1542833298444748, |
|
"learning_rate": 0.0001837480354951308, |
|
"loss": 1.3339, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.734375, |
|
"grad_norm": 0.1419745534658432, |
|
"learning_rate": 0.00018222682189897752, |
|
"loss": 1.3256, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 2.8125, |
|
"grad_norm": 0.12559981644153595, |
|
"learning_rate": 0.00018064446042674828, |
|
"loss": 1.3187, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.890625, |
|
"grad_norm": 0.1430642306804657, |
|
"learning_rate": 0.0001790021277996269, |
|
"loss": 1.3193, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 2.96875, |
|
"grad_norm": 0.12510241568088531, |
|
"learning_rate": 0.0001773010453362737, |
|
"loss": 1.3229, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 2.3739588260650635, |
|
"eval_runtime": 0.8887, |
|
"eval_samples_per_second": 6.751, |
|
"eval_steps_per_second": 1.125, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 3.046875, |
|
"grad_norm": 0.13525685667991638, |
|
"learning_rate": 0.00017554247804459316, |
|
"loss": 1.3138, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 3.125, |
|
"grad_norm": 0.1221759021282196, |
|
"learning_rate": 0.0001737277336810124, |
|
"loss": 1.3114, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.203125, |
|
"grad_norm": 0.1508011817932129, |
|
"learning_rate": 0.0001718581617779698, |
|
"loss": 1.3121, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 3.28125, |
|
"grad_norm": 0.15374642610549927, |
|
"learning_rate": 0.00016993515264033672, |
|
"loss": 1.3061, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 3.359375, |
|
"grad_norm": 0.13054175674915314, |
|
"learning_rate": 0.00016796013631151897, |
|
"loss": 1.3131, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 3.4375, |
|
"grad_norm": 0.13617338240146637, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 1.3055, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 3.515625, |
|
"grad_norm": 0.1574951857328415, |
|
"learning_rate": 0.00016385999453716454, |
|
"loss": 1.3081, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 3.59375, |
|
"grad_norm": 0.13046763837337494, |
|
"learning_rate": 0.00016173791815707051, |
|
"loss": 1.2971, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 3.671875, |
|
"grad_norm": 0.12328305840492249, |
|
"learning_rate": 0.00015956993044924334, |
|
"loss": 1.3004, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"grad_norm": 0.1457005739212036, |
|
"learning_rate": 0.0001573576436351046, |
|
"loss": 1.301, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 3.828125, |
|
"grad_norm": 0.14775606989860535, |
|
"learning_rate": 0.0001551027028790524, |
|
"loss": 1.2981, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 3.90625, |
|
"grad_norm": 0.13573531806468964, |
|
"learning_rate": 0.0001528067850650368, |
|
"loss": 1.2964, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 3.984375, |
|
"grad_norm": 0.14599502086639404, |
|
"learning_rate": 0.0001504715975495472, |
|
"loss": 1.2996, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 2.3706400394439697, |
|
"eval_runtime": 0.8905, |
|
"eval_samples_per_second": 6.738, |
|
"eval_steps_per_second": 1.123, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 4.0625, |
|
"grad_norm": 0.18352623283863068, |
|
"learning_rate": 0.00014809887689193877, |
|
"loss": 1.2791, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 4.140625, |
|
"grad_norm": 0.12809208035469055, |
|
"learning_rate": 0.00014569038756304207, |
|
"loss": 1.2838, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 4.21875, |
|
"grad_norm": 0.14866453409194946, |
|
"learning_rate": 0.00014324792063301662, |
|
"loss": 1.2855, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 4.296875, |
|
"grad_norm": 0.1483401656150818, |
|
"learning_rate": 0.00014077329243942369, |
|
"loss": 1.2906, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 4.375, |
|
"grad_norm": 0.1301499307155609, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 1.2832, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 4.453125, |
|
"grad_norm": 0.12229609489440918, |
|
"learning_rate": 0.00013573493582670003, |
|
"loss": 1.2855, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 4.53125, |
|
"grad_norm": 0.1406661719083786, |
|
"learning_rate": 0.00013317495417533524, |
|
"loss": 1.2885, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 4.609375, |
|
"grad_norm": 0.13113024830818176, |
|
"learning_rate": 0.00013059030200965536, |
|
"loss": 1.2899, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 4.6875, |
|
"grad_norm": 0.13431058824062347, |
|
"learning_rate": 0.00012798290140309923, |
|
"loss": 1.2819, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 4.765625, |
|
"grad_norm": 0.14978773891925812, |
|
"learning_rate": 0.00012535469134595595, |
|
"loss": 1.2852, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 4.84375, |
|
"grad_norm": 0.12423586845397949, |
|
"learning_rate": 0.00012270762630343734, |
|
"loss": 1.2802, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 4.921875, |
|
"grad_norm": 0.14971400797367096, |
|
"learning_rate": 0.00012004367476224206, |
|
"loss": 1.2829, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.14705318212509155, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 1.2878, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 2.3755218982696533, |
|
"eval_runtime": 0.8842, |
|
"eval_samples_per_second": 6.786, |
|
"eval_steps_per_second": 1.131, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 5.078125, |
|
"grad_norm": 0.17323575913906097, |
|
"learning_rate": 0.00011467304744553618, |
|
"loss": 1.2791, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 5.15625, |
|
"grad_norm": 0.15446850657463074, |
|
"learning_rate": 0.00011197036553049625, |
|
"loss": 1.2663, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 5.234375, |
|
"grad_norm": 0.1357167363166809, |
|
"learning_rate": 0.00010925878186769158, |
|
"loss": 1.2707, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 5.3125, |
|
"grad_norm": 0.13887612521648407, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 1.2713, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 5.390625, |
|
"grad_norm": 0.18401511013507843, |
|
"learning_rate": 0.00010381698028258817, |
|
"loss": 1.2708, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 5.46875, |
|
"grad_norm": 0.14264121651649475, |
|
"learning_rate": 0.00010109080914941824, |
|
"loss": 1.2716, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 5.546875, |
|
"grad_norm": 0.1242898479104042, |
|
"learning_rate": 9.836382683735132e-05, |
|
"loss": 1.2778, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 5.625, |
|
"grad_norm": 0.11967791616916656, |
|
"learning_rate": 9.563806126346642e-05, |
|
"loss": 1.2761, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 5.703125, |
|
"grad_norm": 0.1407729834318161, |
|
"learning_rate": 9.29155394400166e-05, |
|
"loss": 1.271, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 5.78125, |
|
"grad_norm": 0.127610981464386, |
|
"learning_rate": 9.019828596704394e-05, |
|
"loss": 1.2707, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 5.859375, |
|
"grad_norm": 0.131484255194664, |
|
"learning_rate": 8.74883215267881e-05, |
|
"loss": 1.2703, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 5.9375, |
|
"grad_norm": 0.14869672060012817, |
|
"learning_rate": 8.478766138100834e-05, |
|
"loss": 1.2746, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 2.3722288608551025, |
|
"eval_runtime": 0.8691, |
|
"eval_samples_per_second": 6.904, |
|
"eval_steps_per_second": 1.151, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 6.015625, |
|
"grad_norm": 0.12939053773880005, |
|
"learning_rate": 8.209831387233676e-05, |
|
"loss": 1.267, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 6.09375, |
|
"grad_norm": 0.1274917721748352, |
|
"learning_rate": 7.942227893077652e-05, |
|
"loss": 1.2609, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 6.171875, |
|
"grad_norm": 0.128373920917511, |
|
"learning_rate": 7.676154658645656e-05, |
|
"loss": 1.2613, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"grad_norm": 0.12212257087230682, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 1.2608, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 6.328125, |
|
"grad_norm": 0.12020603567361832, |
|
"learning_rate": 7.149389143984295e-05, |
|
"loss": 1.2648, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 6.40625, |
|
"grad_norm": 0.13269896805286407, |
|
"learning_rate": 6.889088592289093e-05, |
|
"loss": 1.263, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 6.484375, |
|
"grad_norm": 0.12962587177753448, |
|
"learning_rate": 6.6311014660778e-05, |
|
"loss": 1.2688, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 6.5625, |
|
"grad_norm": 0.13470596075057983, |
|
"learning_rate": 6.375619617162985e-05, |
|
"loss": 1.2606, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 6.640625, |
|
"grad_norm": 0.1306695193052292, |
|
"learning_rate": 6.122833034310793e-05, |
|
"loss": 1.264, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 6.71875, |
|
"grad_norm": 0.12205986678600311, |
|
"learning_rate": 5.872929701956054e-05, |
|
"loss": 1.2624, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 6.796875, |
|
"grad_norm": 0.12223726511001587, |
|
"learning_rate": 5.6260954604078585e-05, |
|
"loss": 1.2624, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 6.875, |
|
"grad_norm": 0.11978649348020554, |
|
"learning_rate": 5.382513867649663e-05, |
|
"loss": 1.2684, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 6.953125, |
|
"grad_norm": 0.13118350505828857, |
|
"learning_rate": 5.142366062836599e-05, |
|
"loss": 1.2617, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 2.3756062984466553, |
|
"eval_runtime": 0.8698, |
|
"eval_samples_per_second": 6.898, |
|
"eval_steps_per_second": 1.15, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 7.03125, |
|
"grad_norm": 0.12501190602779388, |
|
"learning_rate": 4.9058306315915826e-05, |
|
"loss": 1.2686, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 7.109375, |
|
"grad_norm": 0.11916686594486237, |
|
"learning_rate": 4.6730834732003104e-05, |
|
"loss": 1.2589, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 7.1875, |
|
"grad_norm": 0.1307828575372696, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 1.2641, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 7.265625, |
|
"grad_norm": 0.1322871744632721, |
|
"learning_rate": 4.219643357686967e-05, |
|
"loss": 1.2637, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 7.34375, |
|
"grad_norm": 0.12456091493368149, |
|
"learning_rate": 3.999287600755192e-05, |
|
"loss": 1.2596, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 7.421875, |
|
"grad_norm": 0.12441947311162949, |
|
"learning_rate": 3.783394266299228e-05, |
|
"loss": 1.2609, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"grad_norm": 0.1233881339430809, |
|
"learning_rate": 3.5721239031346066e-05, |
|
"loss": 1.258, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 7.578125, |
|
"grad_norm": 0.11959797888994217, |
|
"learning_rate": 3.365633622209891e-05, |
|
"loss": 1.2535, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 7.65625, |
|
"grad_norm": 0.13552451133728027, |
|
"learning_rate": 3.164076979771287e-05, |
|
"loss": 1.2567, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 7.734375, |
|
"grad_norm": 0.1312321126461029, |
|
"learning_rate": 2.9676038631707593e-05, |
|
"loss": 1.2546, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 7.8125, |
|
"grad_norm": 0.12341451644897461, |
|
"learning_rate": 2.776360379402445e-05, |
|
"loss": 1.2589, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 7.890625, |
|
"grad_norm": 0.11580604314804077, |
|
"learning_rate": 2.5904887464504114e-05, |
|
"loss": 1.2514, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 7.96875, |
|
"grad_norm": 0.1164306178689003, |
|
"learning_rate": 2.4101271875283817e-05, |
|
"loss": 1.2497, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 2.3754327297210693, |
|
"eval_runtime": 0.8715, |
|
"eval_samples_per_second": 6.885, |
|
"eval_steps_per_second": 1.147, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 8.046875, |
|
"grad_norm": 0.11353053152561188, |
|
"learning_rate": 2.2354098282902446e-05, |
|
"loss": 1.2515, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 8.125, |
|
"grad_norm": 0.11808107793331146, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 1.2568, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 8.203125, |
|
"grad_norm": 0.12227106839418411, |
|
"learning_rate": 1.903423128348959e-05, |
|
"loss": 1.2526, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 8.28125, |
|
"grad_norm": 0.11858697980642319, |
|
"learning_rate": 1.7464006691513623e-05, |
|
"loss": 1.2546, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 8.359375, |
|
"grad_norm": 0.11320216953754425, |
|
"learning_rate": 1.595515989055618e-05, |
|
"loss": 1.2513, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 8.4375, |
|
"grad_norm": 0.11540042608976364, |
|
"learning_rate": 1.4508812932705363e-05, |
|
"loss": 1.2539, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 8.515625, |
|
"grad_norm": 0.11582642793655396, |
|
"learning_rate": 1.3126041392116772e-05, |
|
"loss": 1.2552, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 8.59375, |
|
"grad_norm": 0.11406353861093521, |
|
"learning_rate": 1.1807873565164506e-05, |
|
"loss": 1.2451, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 8.671875, |
|
"grad_norm": 0.11722878366708755, |
|
"learning_rate": 1.0555289705749483e-05, |
|
"loss": 1.2555, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 8.75, |
|
"grad_norm": 0.11788377165794373, |
|
"learning_rate": 9.369221296335006e-06, |
|
"loss": 1.2644, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 8.828125, |
|
"grad_norm": 0.12228409945964813, |
|
"learning_rate": 8.250550355250875e-06, |
|
"loss": 1.2554, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 8.90625, |
|
"grad_norm": 0.11590871214866638, |
|
"learning_rate": 7.200108780781556e-06, |
|
"loss": 1.2581, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 8.984375, |
|
"grad_norm": 0.11986227333545685, |
|
"learning_rate": 6.218677732526035e-06, |
|
"loss": 1.2549, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 2.3762052059173584, |
|
"eval_runtime": 0.8701, |
|
"eval_samples_per_second": 6.896, |
|
"eval_steps_per_second": 1.149, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 9.0625, |
|
"grad_norm": 0.11538632959127426, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 1.2553, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 9.140625, |
|
"grad_norm": 0.11359097808599472, |
|
"learning_rate": 4.465714712338398e-06, |
|
"loss": 1.2438, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 9.21875, |
|
"grad_norm": 0.1167021319270134, |
|
"learning_rate": 3.6954863292237297e-06, |
|
"loss": 1.2508, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 9.296875, |
|
"grad_norm": 0.11401596665382385, |
|
"learning_rate": 2.996874680545603e-06, |
|
"loss": 1.2555, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 9.375, |
|
"grad_norm": 0.11593978106975555, |
|
"learning_rate": 2.3703992880066638e-06, |
|
"loss": 1.2491, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 9.453125, |
|
"grad_norm": 0.11369957774877548, |
|
"learning_rate": 1.8165260292704711e-06, |
|
"loss": 1.2556, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 9.53125, |
|
"grad_norm": 0.11447325348854065, |
|
"learning_rate": 1.3356667915121025e-06, |
|
"loss": 1.2559, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 9.609375, |
|
"grad_norm": 0.11217474192380905, |
|
"learning_rate": 9.281791651187366e-07, |
|
"loss": 1.2635, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 9.6875, |
|
"grad_norm": 0.11821179836988449, |
|
"learning_rate": 5.943661777680354e-07, |
|
"loss": 1.2521, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 9.765625, |
|
"grad_norm": 0.113133005797863, |
|
"learning_rate": 3.3447606908196817e-07, |
|
"loss": 1.2539, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 9.84375, |
|
"grad_norm": 0.11367449164390564, |
|
"learning_rate": 1.487021060236904e-07, |
|
"loss": 1.2528, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 9.921875, |
|
"grad_norm": 0.11390256881713867, |
|
"learning_rate": 3.7182439174832106e-08, |
|
"loss": 1.2581, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.11366365104913712, |
|
"learning_rate": 0.0, |
|
"loss": 1.2494, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 2.376882791519165, |
|
"eval_runtime": 0.8974, |
|
"eval_samples_per_second": 6.686, |
|
"eval_steps_per_second": 1.114, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 640, |
|
"total_flos": 2.846679860191953e+18, |
|
"train_loss": 1.3252380434423685, |
|
"train_runtime": 3360.483, |
|
"train_samples_per_second": 48.669, |
|
"train_steps_per_second": 0.19 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 640, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.846679860191953e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|