atsuki-yamaguchi's picture
Upload folder using huggingface_hub
5f47842 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.13006882737752776,
"eval_steps": 500,
"global_step": 3052,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001321144707963093,
"grad_norm": 29.45369529724121,
"learning_rate": 1.0157273918741808e-06,
"loss": 9.356,
"step": 31
},
{
"epoch": 0.002642289415926186,
"grad_norm": 17.081205368041992,
"learning_rate": 2.0314547837483616e-06,
"loss": 7.7447,
"step": 62
},
{
"epoch": 0.00396343412388928,
"grad_norm": 17.875776290893555,
"learning_rate": 3.0471821756225426e-06,
"loss": 6.3787,
"step": 93
},
{
"epoch": 0.005284578831852372,
"grad_norm": 38.05577087402344,
"learning_rate": 4.062909567496723e-06,
"loss": 5.1973,
"step": 124
},
{
"epoch": 0.006605723539815466,
"grad_norm": 20.137739181518555,
"learning_rate": 5.078636959370905e-06,
"loss": 4.6404,
"step": 155
},
{
"epoch": 0.00792686824777856,
"grad_norm": 15.796778678894043,
"learning_rate": 6.094364351245085e-06,
"loss": 4.3168,
"step": 186
},
{
"epoch": 0.009248012955741652,
"grad_norm": 18.784709930419922,
"learning_rate": 7.110091743119267e-06,
"loss": 4.0553,
"step": 217
},
{
"epoch": 0.010569157663704745,
"grad_norm": 23.772998809814453,
"learning_rate": 8.125819134993446e-06,
"loss": 3.84,
"step": 248
},
{
"epoch": 0.011890302371667839,
"grad_norm": 14.564051628112793,
"learning_rate": 9.141546526867629e-06,
"loss": 3.7012,
"step": 279
},
{
"epoch": 0.013211447079630931,
"grad_norm": 14.735116004943848,
"learning_rate": 1.015727391874181e-05,
"loss": 3.5595,
"step": 310
},
{
"epoch": 0.014532591787594026,
"grad_norm": 13.88759708404541,
"learning_rate": 1.117300131061599e-05,
"loss": 3.4515,
"step": 341
},
{
"epoch": 0.01585373649555712,
"grad_norm": 10.884685516357422,
"learning_rate": 1.218872870249017e-05,
"loss": 3.3299,
"step": 372
},
{
"epoch": 0.01717488120352021,
"grad_norm": 12.495129585266113,
"learning_rate": 1.3204456094364351e-05,
"loss": 3.2453,
"step": 403
},
{
"epoch": 0.018496025911483305,
"grad_norm": 11.930519104003906,
"learning_rate": 1.4220183486238533e-05,
"loss": 3.1447,
"step": 434
},
{
"epoch": 0.0198171706194464,
"grad_norm": 7.951854228973389,
"learning_rate": 1.5235910878112714e-05,
"loss": 3.0765,
"step": 465
},
{
"epoch": 0.02113831532740949,
"grad_norm": 8.87326717376709,
"learning_rate": 1.6251638269986893e-05,
"loss": 3.0296,
"step": 496
},
{
"epoch": 0.022459460035372584,
"grad_norm": 8.348654747009277,
"learning_rate": 1.7267365661861077e-05,
"loss": 2.9469,
"step": 527
},
{
"epoch": 0.023780604743335678,
"grad_norm": 7.966938495635986,
"learning_rate": 1.8283093053735257e-05,
"loss": 2.8941,
"step": 558
},
{
"epoch": 0.02510174945129877,
"grad_norm": 6.656867027282715,
"learning_rate": 1.9298820445609438e-05,
"loss": 2.8428,
"step": 589
},
{
"epoch": 0.026422894159261863,
"grad_norm": 6.973931312561035,
"learning_rate": 2.031454783748362e-05,
"loss": 2.7875,
"step": 620
},
{
"epoch": 0.027744038867224957,
"grad_norm": 6.903556823730469,
"learning_rate": 2.13302752293578e-05,
"loss": 2.7264,
"step": 651
},
{
"epoch": 0.02906518357518805,
"grad_norm": 7.422015190124512,
"learning_rate": 2.234600262123198e-05,
"loss": 2.6833,
"step": 682
},
{
"epoch": 0.030386328283151142,
"grad_norm": 6.663395404815674,
"learning_rate": 2.336173001310616e-05,
"loss": 2.6303,
"step": 713
},
{
"epoch": 0.03170747299111424,
"grad_norm": 6.743407249450684,
"learning_rate": 2.437745740498034e-05,
"loss": 2.578,
"step": 744
},
{
"epoch": 0.03302861769907733,
"grad_norm": 5.852759838104248,
"learning_rate": 2.5393184796854525e-05,
"loss": 2.5248,
"step": 775
},
{
"epoch": 0.03434976240704042,
"grad_norm": 5.186204433441162,
"learning_rate": 2.6408912188728702e-05,
"loss": 2.5051,
"step": 806
},
{
"epoch": 0.035670907115003515,
"grad_norm": 5.873666286468506,
"learning_rate": 2.7424639580602886e-05,
"loss": 2.4878,
"step": 837
},
{
"epoch": 0.03699205182296661,
"grad_norm": 6.269757270812988,
"learning_rate": 2.8440366972477066e-05,
"loss": 2.4272,
"step": 868
},
{
"epoch": 0.038313196530929704,
"grad_norm": 5.196881294250488,
"learning_rate": 2.9456094364351244e-05,
"loss": 2.3863,
"step": 899
},
{
"epoch": 0.0396343412388928,
"grad_norm": 4.899730682373047,
"learning_rate": 3.0471821756225428e-05,
"loss": 2.3714,
"step": 930
},
{
"epoch": 0.04095548594685589,
"grad_norm": 4.69920015335083,
"learning_rate": 3.148754914809961e-05,
"loss": 2.3456,
"step": 961
},
{
"epoch": 0.04227663065481898,
"grad_norm": 5.293018341064453,
"learning_rate": 3.2503276539973785e-05,
"loss": 2.3162,
"step": 992
},
{
"epoch": 0.043597775362782074,
"grad_norm": 4.992343902587891,
"learning_rate": 3.351900393184797e-05,
"loss": 2.2939,
"step": 1023
},
{
"epoch": 0.04491892007074517,
"grad_norm": 4.305331230163574,
"learning_rate": 3.453473132372215e-05,
"loss": 2.275,
"step": 1054
},
{
"epoch": 0.04624006477870826,
"grad_norm": 4.863211631774902,
"learning_rate": 3.555045871559633e-05,
"loss": 2.2618,
"step": 1085
},
{
"epoch": 0.047561209486671356,
"grad_norm": 4.6537251472473145,
"learning_rate": 3.6566186107470514e-05,
"loss": 2.2584,
"step": 1116
},
{
"epoch": 0.04888235419463445,
"grad_norm": 7.502182483673096,
"learning_rate": 3.7581913499344695e-05,
"loss": 2.244,
"step": 1147
},
{
"epoch": 0.05020349890259754,
"grad_norm": 4.560093879699707,
"learning_rate": 3.8597640891218876e-05,
"loss": 2.2054,
"step": 1178
},
{
"epoch": 0.05152464361056063,
"grad_norm": 4.21810245513916,
"learning_rate": 3.9613368283093056e-05,
"loss": 2.1923,
"step": 1209
},
{
"epoch": 0.052845788318523726,
"grad_norm": 3.815044403076172,
"learning_rate": 4.062909567496724e-05,
"loss": 2.1746,
"step": 1240
},
{
"epoch": 0.05416693302648682,
"grad_norm": 3.4870846271514893,
"learning_rate": 4.164482306684142e-05,
"loss": 2.1751,
"step": 1271
},
{
"epoch": 0.055488077734449914,
"grad_norm": 3.8318536281585693,
"learning_rate": 4.26605504587156e-05,
"loss": 2.1773,
"step": 1302
},
{
"epoch": 0.05680922244241301,
"grad_norm": 3.6395630836486816,
"learning_rate": 4.367627785058978e-05,
"loss": 2.1534,
"step": 1333
},
{
"epoch": 0.0581303671503761,
"grad_norm": 3.4870851039886475,
"learning_rate": 4.469200524246396e-05,
"loss": 2.1373,
"step": 1364
},
{
"epoch": 0.05945151185833919,
"grad_norm": 3.533464193344116,
"learning_rate": 4.570773263433814e-05,
"loss": 2.104,
"step": 1395
},
{
"epoch": 0.060772656566302284,
"grad_norm": 3.7605605125427246,
"learning_rate": 4.672346002621232e-05,
"loss": 2.1497,
"step": 1426
},
{
"epoch": 0.06209380127426538,
"grad_norm": 3.331261396408081,
"learning_rate": 4.77391874180865e-05,
"loss": 2.103,
"step": 1457
},
{
"epoch": 0.06341494598222848,
"grad_norm": 3.4388535022735596,
"learning_rate": 4.875491480996068e-05,
"loss": 2.1017,
"step": 1488
},
{
"epoch": 0.06473609069019157,
"grad_norm": 3.3586745262145996,
"learning_rate": 4.977064220183487e-05,
"loss": 2.0883,
"step": 1519
},
{
"epoch": 0.06605723539815465,
"grad_norm": 3.0805885791778564,
"learning_rate": 4.9999915451558777e-05,
"loss": 2.0741,
"step": 1550
},
{
"epoch": 0.06737838010611776,
"grad_norm": 3.1708438396453857,
"learning_rate": 4.999955597496219e-05,
"loss": 2.0718,
"step": 1581
},
{
"epoch": 0.06869952481408084,
"grad_norm": 3.03056001663208,
"learning_rate": 4.9998914381774255e-05,
"loss": 2.0673,
"step": 1612
},
{
"epoch": 0.07002066952204394,
"grad_norm": 2.7995362281799316,
"learning_rate": 4.999799067923527e-05,
"loss": 2.0683,
"step": 1643
},
{
"epoch": 0.07134181423000703,
"grad_norm": 3.116344451904297,
"learning_rate": 4.999678487776908e-05,
"loss": 2.0231,
"step": 1674
},
{
"epoch": 0.07266295893797013,
"grad_norm": 3.1621696949005127,
"learning_rate": 4.9995296990983006e-05,
"loss": 2.0391,
"step": 1705
},
{
"epoch": 0.07398410364593322,
"grad_norm": 2.8406624794006348,
"learning_rate": 4.999352703566763e-05,
"loss": 2.0132,
"step": 1736
},
{
"epoch": 0.0753052483538963,
"grad_norm": 2.570624351501465,
"learning_rate": 4.999147503179668e-05,
"loss": 2.0116,
"step": 1767
},
{
"epoch": 0.07662639306185941,
"grad_norm": 2.7663609981536865,
"learning_rate": 4.998914100252672e-05,
"loss": 2.0127,
"step": 1798
},
{
"epoch": 0.0779475377698225,
"grad_norm": 2.5649290084838867,
"learning_rate": 4.998652497419696e-05,
"loss": 2.0033,
"step": 1829
},
{
"epoch": 0.0792686824777856,
"grad_norm": 2.575486183166504,
"learning_rate": 4.9983626976328927e-05,
"loss": 2.005,
"step": 1860
},
{
"epoch": 0.08058982718574868,
"grad_norm": 2.509678840637207,
"learning_rate": 4.998044704162613e-05,
"loss": 1.9824,
"step": 1891
},
{
"epoch": 0.08191097189371178,
"grad_norm": 2.6347391605377197,
"learning_rate": 4.9976985205973705e-05,
"loss": 1.9571,
"step": 1922
},
{
"epoch": 0.08323211660167487,
"grad_norm": 2.5539751052856445,
"learning_rate": 4.997324150843799e-05,
"loss": 1.9782,
"step": 1953
},
{
"epoch": 0.08455326130963796,
"grad_norm": 10.463569641113281,
"learning_rate": 4.99692159912661e-05,
"loss": 1.9713,
"step": 1984
},
{
"epoch": 0.08587440601760106,
"grad_norm": 2.604071617126465,
"learning_rate": 4.996490869988546e-05,
"loss": 1.9769,
"step": 2015
},
{
"epoch": 0.08719555072556415,
"grad_norm": 2.4612362384796143,
"learning_rate": 4.996031968290326e-05,
"loss": 1.9463,
"step": 2046
},
{
"epoch": 0.08851669543352725,
"grad_norm": 2.539299488067627,
"learning_rate": 4.995544899210594e-05,
"loss": 1.9414,
"step": 2077
},
{
"epoch": 0.08983784014149034,
"grad_norm": 2.6650774478912354,
"learning_rate": 4.9950296682458583e-05,
"loss": 1.9371,
"step": 2108
},
{
"epoch": 0.09115898484945342,
"grad_norm": 2.5480291843414307,
"learning_rate": 4.994486281210429e-05,
"loss": 1.9581,
"step": 2139
},
{
"epoch": 0.09248012955741652,
"grad_norm": 2.4799551963806152,
"learning_rate": 4.9939147442363566e-05,
"loss": 1.9524,
"step": 2170
},
{
"epoch": 0.09380127426537961,
"grad_norm": 2.3326504230499268,
"learning_rate": 4.9933150637733574e-05,
"loss": 1.9319,
"step": 2201
},
{
"epoch": 0.09512241897334271,
"grad_norm": 2.3550055027008057,
"learning_rate": 4.992687246588743e-05,
"loss": 1.9015,
"step": 2232
},
{
"epoch": 0.0964435636813058,
"grad_norm": 2.422797918319702,
"learning_rate": 4.992031299767347e-05,
"loss": 1.9364,
"step": 2263
},
{
"epoch": 0.0977647083892689,
"grad_norm": 2.2776873111724854,
"learning_rate": 4.9913472307114386e-05,
"loss": 1.9102,
"step": 2294
},
{
"epoch": 0.09908585309723199,
"grad_norm": 2.3031086921691895,
"learning_rate": 4.9906350471406446e-05,
"loss": 1.9193,
"step": 2325
},
{
"epoch": 0.10040699780519508,
"grad_norm": 2.2281057834625244,
"learning_rate": 4.989894757091861e-05,
"loss": 1.9143,
"step": 2356
},
{
"epoch": 0.10172814251315818,
"grad_norm": 2.39752459526062,
"learning_rate": 4.989126368919158e-05,
"loss": 1.905,
"step": 2387
},
{
"epoch": 0.10304928722112126,
"grad_norm": 2.3098957538604736,
"learning_rate": 4.988329891293693e-05,
"loss": 1.9204,
"step": 2418
},
{
"epoch": 0.10437043192908436,
"grad_norm": 2.244088649749756,
"learning_rate": 4.987505333203608e-05,
"loss": 1.8836,
"step": 2449
},
{
"epoch": 0.10569157663704745,
"grad_norm": 2.3035812377929688,
"learning_rate": 4.9866527039539276e-05,
"loss": 1.9097,
"step": 2480
},
{
"epoch": 0.10701272134501055,
"grad_norm": 2.2249033451080322,
"learning_rate": 4.9857720131664594e-05,
"loss": 1.8804,
"step": 2511
},
{
"epoch": 0.10833386605297364,
"grad_norm": 2.180612325668335,
"learning_rate": 4.9848632707796773e-05,
"loss": 1.8848,
"step": 2542
},
{
"epoch": 0.10965501076093673,
"grad_norm": 2.2042863368988037,
"learning_rate": 4.9839264870486155e-05,
"loss": 1.8879,
"step": 2573
},
{
"epoch": 0.11097615546889983,
"grad_norm": 2.278650999069214,
"learning_rate": 4.9829616725447526e-05,
"loss": 1.8728,
"step": 2604
},
{
"epoch": 0.11229730017686292,
"grad_norm": 2.2380714416503906,
"learning_rate": 4.981968838155888e-05,
"loss": 1.8927,
"step": 2635
},
{
"epoch": 0.11361844488482602,
"grad_norm": 2.062289237976074,
"learning_rate": 4.980947995086024e-05,
"loss": 1.8706,
"step": 2666
},
{
"epoch": 0.1149395895927891,
"grad_norm": 2.169900894165039,
"learning_rate": 4.979899154855234e-05,
"loss": 1.8753,
"step": 2697
},
{
"epoch": 0.1162607343007522,
"grad_norm": 2.1450119018554688,
"learning_rate": 4.9788223292995386e-05,
"loss": 1.8685,
"step": 2728
},
{
"epoch": 0.11758187900871529,
"grad_norm": 2.285205125808716,
"learning_rate": 4.977717530570768e-05,
"loss": 1.8601,
"step": 2759
},
{
"epoch": 0.11890302371667838,
"grad_norm": 2.1305880546569824,
"learning_rate": 4.976584771136425e-05,
"loss": 1.8557,
"step": 2790
},
{
"epoch": 0.12022416842464148,
"grad_norm": 2.0517284870147705,
"learning_rate": 4.975424063779547e-05,
"loss": 1.8587,
"step": 2821
},
{
"epoch": 0.12154531313260457,
"grad_norm": 2.190729856491089,
"learning_rate": 4.974235421598557e-05,
"loss": 1.8431,
"step": 2852
},
{
"epoch": 0.12286645784056767,
"grad_norm": 2.133803367614746,
"learning_rate": 4.973018858007122e-05,
"loss": 1.8492,
"step": 2883
},
{
"epoch": 0.12418760254853076,
"grad_norm": 2.3807687759399414,
"learning_rate": 4.9717743867339963e-05,
"loss": 1.8391,
"step": 2914
},
{
"epoch": 0.12550874725649386,
"grad_norm": 2.0250234603881836,
"learning_rate": 4.9705020218228695e-05,
"loss": 1.8465,
"step": 2945
},
{
"epoch": 0.12682989196445696,
"grad_norm": 2.1557273864746094,
"learning_rate": 4.969201777632205e-05,
"loss": 1.8346,
"step": 2976
},
{
"epoch": 0.12815103667242003,
"grad_norm": 1.9695699214935303,
"learning_rate": 4.9678736688350846e-05,
"loss": 1.8423,
"step": 3007
},
{
"epoch": 0.12947218138038313,
"grad_norm": 2.1525843143463135,
"learning_rate": 4.966517710419033e-05,
"loss": 1.851,
"step": 3038
}
],
"logging_steps": 31,
"max_steps": 30517,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 3052,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.263945054022271e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}