|
{ |
|
"best_metric": 0.772973358631134, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.06555227794165848, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0006555227794165847, |
|
"grad_norm": 0.2373015284538269, |
|
"learning_rate": 5e-06, |
|
"loss": 0.8465, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0006555227794165847, |
|
"eval_loss": 1.0142732858657837, |
|
"eval_runtime": 197.6224, |
|
"eval_samples_per_second": 13.005, |
|
"eval_steps_per_second": 6.502, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0013110455588331695, |
|
"grad_norm": 0.28709691762924194, |
|
"learning_rate": 1e-05, |
|
"loss": 0.9051, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0019665683382497543, |
|
"grad_norm": 0.3491666615009308, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.9242, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.002622091117666339, |
|
"grad_norm": 0.3462134897708893, |
|
"learning_rate": 2e-05, |
|
"loss": 0.918, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0032776138970829235, |
|
"grad_norm": 0.28024375438690186, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.8966, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.003933136676499509, |
|
"grad_norm": 0.29582786560058594, |
|
"learning_rate": 3e-05, |
|
"loss": 0.9393, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.004588659455916093, |
|
"grad_norm": 0.308539479970932, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.9266, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.005244182235332678, |
|
"grad_norm": 0.2761336863040924, |
|
"learning_rate": 4e-05, |
|
"loss": 0.9699, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0058997050147492625, |
|
"grad_norm": 0.2822662591934204, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.9126, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.006555227794165847, |
|
"grad_norm": 0.2669486999511719, |
|
"learning_rate": 5e-05, |
|
"loss": 0.942, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007210750573582432, |
|
"grad_norm": 0.2491784691810608, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.9112, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.007866273352999017, |
|
"grad_norm": 0.22748543322086334, |
|
"learning_rate": 6e-05, |
|
"loss": 0.8695, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.008521796132415601, |
|
"grad_norm": 0.2544311285018921, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 0.8578, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.009177318911832186, |
|
"grad_norm": 0.24937686324119568, |
|
"learning_rate": 7e-05, |
|
"loss": 0.8732, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.00983284169124877, |
|
"grad_norm": 0.25596895813941956, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.8058, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.010488364470665356, |
|
"grad_norm": 0.2677898406982422, |
|
"learning_rate": 8e-05, |
|
"loss": 0.8617, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.011143887250081941, |
|
"grad_norm": 0.27905362844467163, |
|
"learning_rate": 8.5e-05, |
|
"loss": 0.8626, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.011799410029498525, |
|
"grad_norm": 0.25651392340660095, |
|
"learning_rate": 9e-05, |
|
"loss": 0.8348, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01245493280891511, |
|
"grad_norm": 0.2798261046409607, |
|
"learning_rate": 9.5e-05, |
|
"loss": 0.8304, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.013110455588331694, |
|
"grad_norm": 0.26732340455055237, |
|
"learning_rate": 0.0001, |
|
"loss": 0.8372, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01376597836774828, |
|
"grad_norm": 0.25484147667884827, |
|
"learning_rate": 9.999238475781957e-05, |
|
"loss": 0.8088, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.014421501147164863, |
|
"grad_norm": 0.2536450922489166, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 0.8054, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.015077023926581449, |
|
"grad_norm": 0.2574830949306488, |
|
"learning_rate": 9.99314767377287e-05, |
|
"loss": 0.854, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.015732546705998034, |
|
"grad_norm": 0.2322714626789093, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.813, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.01638806948541462, |
|
"grad_norm": 0.2515113055706024, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 0.8065, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.017043592264831202, |
|
"grad_norm": 0.24610289931297302, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 0.7927, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.017699115044247787, |
|
"grad_norm": 0.24373719096183777, |
|
"learning_rate": 9.962730758206611e-05, |
|
"loss": 0.8351, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.018354637823664373, |
|
"grad_norm": 0.24467414617538452, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 0.813, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.01901016060308096, |
|
"grad_norm": 0.23405858874320984, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 0.8165, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.01966568338249754, |
|
"grad_norm": 0.24256618320941925, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.7617, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.020321206161914126, |
|
"grad_norm": 0.2607885003089905, |
|
"learning_rate": 9.908135917238321e-05, |
|
"loss": 0.7873, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.02097672894133071, |
|
"grad_norm": 0.2551879286766052, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 0.7735, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.021632251720747297, |
|
"grad_norm": 0.2627292275428772, |
|
"learning_rate": 9.871850323926177e-05, |
|
"loss": 0.8321, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.022287774500163882, |
|
"grad_norm": 0.29124778509140015, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 0.7953, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.022943297279580464, |
|
"grad_norm": 0.26475605368614197, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.7745, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.02359882005899705, |
|
"grad_norm": 0.2607128322124481, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 0.7786, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.024254342838413635, |
|
"grad_norm": 0.2592867314815521, |
|
"learning_rate": 9.781523779815179e-05, |
|
"loss": 0.7604, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.02490986561783022, |
|
"grad_norm": 0.25272342562675476, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 0.7898, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.025565388397246803, |
|
"grad_norm": 0.2672499716281891, |
|
"learning_rate": 9.727592877996585e-05, |
|
"loss": 0.7601, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.02622091117666339, |
|
"grad_norm": 0.29855430126190186, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.8142, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.026876433956079974, |
|
"grad_norm": 0.3550460934638977, |
|
"learning_rate": 9.667902132486009e-05, |
|
"loss": 0.7327, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.02753195673549656, |
|
"grad_norm": 0.29364699125289917, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 0.7738, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.028187479514913145, |
|
"grad_norm": 0.32683664560317993, |
|
"learning_rate": 9.602524267262203e-05, |
|
"loss": 0.7557, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.028843002294329727, |
|
"grad_norm": 0.31279873847961426, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 0.7635, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.029498525073746312, |
|
"grad_norm": 0.30517908930778503, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 0.7931, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.030154047853162898, |
|
"grad_norm": 0.3420467972755432, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 0.8058, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.030809570632579483, |
|
"grad_norm": 0.3307579755783081, |
|
"learning_rate": 9.45503262094184e-05, |
|
"loss": 0.7355, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03146509341199607, |
|
"grad_norm": 0.6385186910629272, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.7978, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.032120616191412654, |
|
"grad_norm": 0.6117073893547058, |
|
"learning_rate": 9.373098535696979e-05, |
|
"loss": 0.7463, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.03277613897082924, |
|
"grad_norm": 0.9974555373191833, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.7761, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03277613897082924, |
|
"eval_loss": 0.8445320129394531, |
|
"eval_runtime": 199.0087, |
|
"eval_samples_per_second": 12.914, |
|
"eval_steps_per_second": 6.457, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03343166175024582, |
|
"grad_norm": 0.45800045132637024, |
|
"learning_rate": 9.285836503510562e-05, |
|
"loss": 0.8983, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.034087184529662404, |
|
"grad_norm": 0.394972026348114, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 0.8675, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.03474270730907899, |
|
"grad_norm": 0.31172430515289307, |
|
"learning_rate": 9.193352839727121e-05, |
|
"loss": 0.8291, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.035398230088495575, |
|
"grad_norm": 0.22538970410823822, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 0.7883, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.03605375286791216, |
|
"grad_norm": 0.20795033872127533, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 0.752, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.036709275647328746, |
|
"grad_norm": 0.2245747447013855, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.7782, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.03736479842674533, |
|
"grad_norm": 0.19541659951210022, |
|
"learning_rate": 8.993177550236464e-05, |
|
"loss": 0.7945, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.03802032120616192, |
|
"grad_norm": 0.1979525238275528, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 0.8191, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0386758439855785, |
|
"grad_norm": 0.21741431951522827, |
|
"learning_rate": 8.885729807284856e-05, |
|
"loss": 0.8025, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.03933136676499508, |
|
"grad_norm": 0.2003290057182312, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.7927, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.039986889544411666, |
|
"grad_norm": 0.21580374240875244, |
|
"learning_rate": 8.773547901113862e-05, |
|
"loss": 0.8017, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.04064241232382825, |
|
"grad_norm": 0.20684310793876648, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 0.8022, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.04129793510324484, |
|
"grad_norm": 0.20254190266132355, |
|
"learning_rate": 8.656768508095853e-05, |
|
"loss": 0.838, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.04195345788266142, |
|
"grad_norm": 0.2019943743944168, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 0.7911, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.04260898066207801, |
|
"grad_norm": 0.19427140057086945, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.7759, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.043264503441494594, |
|
"grad_norm": 0.19595062732696533, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 0.7871, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.04392002622091118, |
|
"grad_norm": 0.2017510086297989, |
|
"learning_rate": 8.409991800312493e-05, |
|
"loss": 0.8125, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.044575549000327765, |
|
"grad_norm": 0.22166839241981506, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 0.8228, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.04523107177974434, |
|
"grad_norm": 0.19774554669857025, |
|
"learning_rate": 8.280295144952536e-05, |
|
"loss": 0.7538, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.04588659455916093, |
|
"grad_norm": 0.20463129878044128, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.7343, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.046542117338577514, |
|
"grad_norm": 0.22063031792640686, |
|
"learning_rate": 8.146601955249188e-05, |
|
"loss": 0.7974, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.0471976401179941, |
|
"grad_norm": 0.20627279579639435, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 0.7727, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.047853162897410685, |
|
"grad_norm": 0.2119884043931961, |
|
"learning_rate": 8.009075115760243e-05, |
|
"loss": 0.7318, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.04850868567682727, |
|
"grad_norm": 0.24390168488025665, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 0.7685, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.049164208456243856, |
|
"grad_norm": 0.21942038834095, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 0.7906, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.04981973123566044, |
|
"grad_norm": 0.20912085473537445, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 0.7866, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.05047525401507703, |
|
"grad_norm": 0.21996894478797913, |
|
"learning_rate": 7.723195175075136e-05, |
|
"loss": 0.7765, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.051130776794493606, |
|
"grad_norm": 0.2343721091747284, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 0.772, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.05178629957391019, |
|
"grad_norm": 0.23541605472564697, |
|
"learning_rate": 7.575190374550272e-05, |
|
"loss": 0.7334, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.05244182235332678, |
|
"grad_norm": 0.24592329561710358, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.7377, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.05309734513274336, |
|
"grad_norm": 0.227330282330513, |
|
"learning_rate": 7.424048101231686e-05, |
|
"loss": 0.7109, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.05375286791215995, |
|
"grad_norm": 0.22559882700443268, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 0.7267, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.05440839069157653, |
|
"grad_norm": 0.2438517063856125, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 0.7595, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.05506391347099312, |
|
"grad_norm": 0.24995003640651703, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 0.7756, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.055719436250409704, |
|
"grad_norm": 0.2487722635269165, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 0.7314, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.05637495902982629, |
|
"grad_norm": 0.2623230516910553, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 0.7438, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.05703048180924287, |
|
"grad_norm": 0.28028956055641174, |
|
"learning_rate": 6.953655642446368e-05, |
|
"loss": 0.7467, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.057686004588659454, |
|
"grad_norm": 0.28336915373802185, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 0.7406, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.05834152736807604, |
|
"grad_norm": 0.3217703700065613, |
|
"learning_rate": 6.7918397477265e-05, |
|
"loss": 0.7428, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.058997050147492625, |
|
"grad_norm": 0.2729189693927765, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.7873, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.05965257292690921, |
|
"grad_norm": 0.2695431709289551, |
|
"learning_rate": 6.627840772285784e-05, |
|
"loss": 0.7392, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.060308095706325796, |
|
"grad_norm": 0.2915373742580414, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.7131, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.06096361848574238, |
|
"grad_norm": 0.783824622631073, |
|
"learning_rate": 6.461858523613684e-05, |
|
"loss": 0.7253, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.061619141265158967, |
|
"grad_norm": 0.2767072916030884, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 0.7848, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.06227466404457555, |
|
"grad_norm": 0.29190996289253235, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.7908, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.06293018682399214, |
|
"grad_norm": 0.30580437183380127, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 0.7437, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.06358570960340872, |
|
"grad_norm": 0.3551975190639496, |
|
"learning_rate": 6.124755271719325e-05, |
|
"loss": 0.7034, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.06424123238282531, |
|
"grad_norm": 0.3854832351207733, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 0.7428, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.06489675516224189, |
|
"grad_norm": 0.4369230270385742, |
|
"learning_rate": 5.9540449768827246e-05, |
|
"loss": 0.7368, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.06555227794165848, |
|
"grad_norm": 0.7531588673591614, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.7087, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.06555227794165848, |
|
"eval_loss": 0.772973358631134, |
|
"eval_runtime": 198.9609, |
|
"eval_samples_per_second": 12.917, |
|
"eval_steps_per_second": 6.459, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.3371612024589517e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|