InjecAgent-vicuna-7b-v1.5-10 / trainer_state.json
henilp105's picture
Upload folder using huggingface_hub
df0a6c5 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 2350,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0425531914893617,
"grad_norm": 0.4270687699317932,
"learning_rate": 0.00019999602855426865,
"loss": 1.013,
"step": 10
},
{
"epoch": 0.0851063829787234,
"grad_norm": 0.4152718782424927,
"learning_rate": 0.00019998411453252217,
"loss": 0.8289,
"step": 20
},
{
"epoch": 0.1276595744680851,
"grad_norm": 0.7277560234069824,
"learning_rate": 0.0001999642588810784,
"loss": 0.5959,
"step": 30
},
{
"epoch": 0.1702127659574468,
"grad_norm": 0.5505673885345459,
"learning_rate": 0.00019993646317705016,
"loss": 0.459,
"step": 40
},
{
"epoch": 0.2127659574468085,
"grad_norm": 0.528052031993866,
"learning_rate": 0.00019990072962822007,
"loss": 0.3775,
"step": 50
},
{
"epoch": 0.2553191489361702,
"grad_norm": 0.6307681202888489,
"learning_rate": 0.00019985706107286514,
"loss": 0.3285,
"step": 60
},
{
"epoch": 0.2978723404255319,
"grad_norm": 0.6954013109207153,
"learning_rate": 0.00019980546097953132,
"loss": 0.2855,
"step": 70
},
{
"epoch": 0.3404255319148936,
"grad_norm": 0.6790465116500854,
"learning_rate": 0.000199745933446758,
"loss": 0.2782,
"step": 80
},
{
"epoch": 0.3829787234042553,
"grad_norm": 1.324937105178833,
"learning_rate": 0.0001996784832027525,
"loss": 0.2635,
"step": 90
},
{
"epoch": 0.425531914893617,
"grad_norm": 0.8779314756393433,
"learning_rate": 0.00019960311560501454,
"loss": 0.1861,
"step": 100
},
{
"epoch": 0.46808510638297873,
"grad_norm": 0.693745493888855,
"learning_rate": 0.00019951983663991056,
"loss": 0.2001,
"step": 110
},
{
"epoch": 0.5106382978723404,
"grad_norm": 1.0649502277374268,
"learning_rate": 0.00019942865292219838,
"loss": 0.1378,
"step": 120
},
{
"epoch": 0.5531914893617021,
"grad_norm": 0.6962260007858276,
"learning_rate": 0.0001993295716945017,
"loss": 0.1579,
"step": 130
},
{
"epoch": 0.5957446808510638,
"grad_norm": 0.7934479713439941,
"learning_rate": 0.00019922260082673497,
"loss": 0.092,
"step": 140
},
{
"epoch": 0.6382978723404256,
"grad_norm": 1.1331907510757446,
"learning_rate": 0.000199107748815478,
"loss": 0.1208,
"step": 150
},
{
"epoch": 0.6808510638297872,
"grad_norm": 1.3689247369766235,
"learning_rate": 0.00019898502478330152,
"loss": 0.0874,
"step": 160
},
{
"epoch": 0.723404255319149,
"grad_norm": 0.5304535031318665,
"learning_rate": 0.00019885443847804211,
"loss": 0.0881,
"step": 170
},
{
"epoch": 0.7659574468085106,
"grad_norm": 0.6805845499038696,
"learning_rate": 0.0001987160002720283,
"loss": 0.0584,
"step": 180
},
{
"epoch": 0.8085106382978723,
"grad_norm": 0.2527499198913574,
"learning_rate": 0.00019856972116125653,
"loss": 0.08,
"step": 190
},
{
"epoch": 0.851063829787234,
"grad_norm": 0.799462616443634,
"learning_rate": 0.0001984156127645178,
"loss": 0.0556,
"step": 200
},
{
"epoch": 0.8936170212765957,
"grad_norm": 0.936975359916687,
"learning_rate": 0.0001982536873224748,
"loss": 0.0945,
"step": 210
},
{
"epoch": 0.9361702127659575,
"grad_norm": 0.8067993521690369,
"learning_rate": 0.00019808395769668963,
"loss": 0.0495,
"step": 220
},
{
"epoch": 0.9787234042553191,
"grad_norm": 0.45767834782600403,
"learning_rate": 0.00019790643736860227,
"loss": 0.0617,
"step": 230
},
{
"epoch": 1.0212765957446808,
"grad_norm": 0.9198794364929199,
"learning_rate": 0.00019772114043845965,
"loss": 0.0467,
"step": 240
},
{
"epoch": 1.0638297872340425,
"grad_norm": 0.7327796816825867,
"learning_rate": 0.0001975280816241959,
"loss": 0.0391,
"step": 250
},
{
"epoch": 1.1063829787234043,
"grad_norm": 0.8003076910972595,
"learning_rate": 0.00019732727626026305,
"loss": 0.0428,
"step": 260
},
{
"epoch": 1.148936170212766,
"grad_norm": 0.10251367837190628,
"learning_rate": 0.0001971187402964132,
"loss": 0.032,
"step": 270
},
{
"epoch": 1.1914893617021276,
"grad_norm": 0.45093855261802673,
"learning_rate": 0.00019690249029643162,
"loss": 0.0673,
"step": 280
},
{
"epoch": 1.2340425531914894,
"grad_norm": 0.4845767915248871,
"learning_rate": 0.0001966785434368211,
"loss": 0.033,
"step": 290
},
{
"epoch": 1.2765957446808511,
"grad_norm": 0.31195056438446045,
"learning_rate": 0.00019644691750543767,
"loss": 0.0261,
"step": 300
},
{
"epoch": 1.3191489361702127,
"grad_norm": 0.14839951694011688,
"learning_rate": 0.00019620763090007762,
"loss": 0.0298,
"step": 310
},
{
"epoch": 1.3617021276595744,
"grad_norm": 0.20573872327804565,
"learning_rate": 0.00019596070262701626,
"loss": 0.0155,
"step": 320
},
{
"epoch": 1.4042553191489362,
"grad_norm": 0.47702595591545105,
"learning_rate": 0.00019570615229949842,
"loss": 0.0369,
"step": 330
},
{
"epoch": 1.4468085106382977,
"grad_norm": 0.7073186039924622,
"learning_rate": 0.00019544400013618023,
"loss": 0.0302,
"step": 340
},
{
"epoch": 1.4893617021276595,
"grad_norm": 0.1539478451013565,
"learning_rate": 0.00019517426695952358,
"loss": 0.0223,
"step": 350
},
{
"epoch": 1.5319148936170213,
"grad_norm": 0.5202814340591431,
"learning_rate": 0.00019489697419414182,
"loss": 0.0263,
"step": 360
},
{
"epoch": 1.574468085106383,
"grad_norm": 0.968192458152771,
"learning_rate": 0.00019461214386509842,
"loss": 0.044,
"step": 370
},
{
"epoch": 1.6170212765957448,
"grad_norm": 0.5662522912025452,
"learning_rate": 0.00019431979859615726,
"loss": 0.0421,
"step": 380
},
{
"epoch": 1.6595744680851063,
"grad_norm": 0.42925137281417847,
"learning_rate": 0.00019401996160798573,
"loss": 0.0606,
"step": 390
},
{
"epoch": 1.702127659574468,
"grad_norm": 0.5803830027580261,
"learning_rate": 0.00019371265671631037,
"loss": 0.0392,
"step": 400
},
{
"epoch": 1.7446808510638299,
"grad_norm": 0.4235450327396393,
"learning_rate": 0.00019339790833002515,
"loss": 0.0286,
"step": 410
},
{
"epoch": 1.7872340425531914,
"grad_norm": 0.519207775592804,
"learning_rate": 0.00019307574144925287,
"loss": 0.0522,
"step": 420
},
{
"epoch": 1.8297872340425532,
"grad_norm": 0.2344844490289688,
"learning_rate": 0.00019274618166335912,
"loss": 0.0281,
"step": 430
},
{
"epoch": 1.872340425531915,
"grad_norm": 0.1990007758140564,
"learning_rate": 0.00019240925514892,
"loss": 0.0229,
"step": 440
},
{
"epoch": 1.9148936170212765,
"grad_norm": 0.10929415374994278,
"learning_rate": 0.00019206498866764288,
"loss": 0.0258,
"step": 450
},
{
"epoch": 1.9574468085106385,
"grad_norm": 0.4308103024959564,
"learning_rate": 0.00019171340956424074,
"loss": 0.0167,
"step": 460
},
{
"epoch": 2.0,
"grad_norm": 0.46525439620018005,
"learning_rate": 0.0001913545457642601,
"loss": 0.0283,
"step": 470
},
{
"epoch": 2.0425531914893615,
"grad_norm": 0.28837406635284424,
"learning_rate": 0.00019098842577186314,
"loss": 0.0137,
"step": 480
},
{
"epoch": 2.0851063829787235,
"grad_norm": 0.08533861488103867,
"learning_rate": 0.00019061507866756347,
"loss": 0.0182,
"step": 490
},
{
"epoch": 2.127659574468085,
"grad_norm": 0.3499375581741333,
"learning_rate": 0.00019023453410591635,
"loss": 0.0221,
"step": 500
},
{
"epoch": 2.1702127659574466,
"grad_norm": 0.07716694474220276,
"learning_rate": 0.00018984682231316333,
"loss": 0.0075,
"step": 510
},
{
"epoch": 2.2127659574468086,
"grad_norm": 0.3093757927417755,
"learning_rate": 0.00018945197408483123,
"loss": 0.0133,
"step": 520
},
{
"epoch": 2.25531914893617,
"grad_norm": 0.13492655754089355,
"learning_rate": 0.00018905002078328632,
"loss": 0.0184,
"step": 530
},
{
"epoch": 2.297872340425532,
"grad_norm": 0.07833054661750793,
"learning_rate": 0.000188640994335243,
"loss": 0.0109,
"step": 540
},
{
"epoch": 2.3404255319148937,
"grad_norm": 0.08865915983915329,
"learning_rate": 0.0001882249272292282,
"loss": 0.0121,
"step": 550
},
{
"epoch": 2.382978723404255,
"grad_norm": 0.31314581632614136,
"learning_rate": 0.00018780185251300046,
"loss": 0.0242,
"step": 560
},
{
"epoch": 2.425531914893617,
"grad_norm": 0.10387410968542099,
"learning_rate": 0.00018737180379092537,
"loss": 0.0285,
"step": 570
},
{
"epoch": 2.4680851063829787,
"grad_norm": 0.11755700409412384,
"learning_rate": 0.0001869348152213061,
"loss": 0.0281,
"step": 580
},
{
"epoch": 2.5106382978723403,
"grad_norm": 0.1438552439212799,
"learning_rate": 0.0001864909215136705,
"loss": 0.0216,
"step": 590
},
{
"epoch": 2.5531914893617023,
"grad_norm": 0.08940370380878448,
"learning_rate": 0.00018604015792601396,
"loss": 0.0226,
"step": 600
},
{
"epoch": 2.595744680851064,
"grad_norm": 0.19193390011787415,
"learning_rate": 0.00018558256026199896,
"loss": 0.0184,
"step": 610
},
{
"epoch": 2.6382978723404253,
"grad_norm": 0.4586654603481293,
"learning_rate": 0.00018511816486811134,
"loss": 0.0154,
"step": 620
},
{
"epoch": 2.6808510638297873,
"grad_norm": 0.05811255797743797,
"learning_rate": 0.00018464700863077312,
"loss": 0.0238,
"step": 630
},
{
"epoch": 2.723404255319149,
"grad_norm": 0.16280680894851685,
"learning_rate": 0.00018416912897341295,
"loss": 0.0253,
"step": 640
},
{
"epoch": 2.7659574468085104,
"grad_norm": 0.07031189650297165,
"learning_rate": 0.00018368456385349334,
"loss": 0.0146,
"step": 650
},
{
"epoch": 2.8085106382978724,
"grad_norm": 0.07851342856884003,
"learning_rate": 0.0001831933517594957,
"loss": 0.0301,
"step": 660
},
{
"epoch": 2.851063829787234,
"grad_norm": 0.11461298912763596,
"learning_rate": 0.0001826955317078636,
"loss": 0.0155,
"step": 670
},
{
"epoch": 2.8936170212765955,
"grad_norm": 0.1839868575334549,
"learning_rate": 0.00018219114323990345,
"loss": 0.0099,
"step": 680
},
{
"epoch": 2.9361702127659575,
"grad_norm": 0.04199373722076416,
"learning_rate": 0.00018168022641864377,
"loss": 0.0192,
"step": 690
},
{
"epoch": 2.978723404255319,
"grad_norm": 0.3203773498535156,
"learning_rate": 0.00018116282182565311,
"loss": 0.0272,
"step": 700
},
{
"epoch": 3.021276595744681,
"grad_norm": 0.327921599149704,
"learning_rate": 0.0001806389705578168,
"loss": 0.0081,
"step": 710
},
{
"epoch": 3.0638297872340425,
"grad_norm": 0.29155433177948,
"learning_rate": 0.00018010871422407236,
"loss": 0.0128,
"step": 720
},
{
"epoch": 3.106382978723404,
"grad_norm": 0.06926452368497849,
"learning_rate": 0.00017957209494210493,
"loss": 0.0171,
"step": 730
},
{
"epoch": 3.148936170212766,
"grad_norm": 0.08231089264154434,
"learning_rate": 0.0001790291553350016,
"loss": 0.0098,
"step": 740
},
{
"epoch": 3.1914893617021276,
"grad_norm": 0.14707215130329132,
"learning_rate": 0.0001784799385278661,
"loss": 0.0092,
"step": 750
},
{
"epoch": 3.2340425531914896,
"grad_norm": 0.2735896706581116,
"learning_rate": 0.00017792448814439333,
"loss": 0.0115,
"step": 760
},
{
"epoch": 3.276595744680851,
"grad_norm": 0.44960370659828186,
"learning_rate": 0.00017736284830340436,
"loss": 0.0195,
"step": 770
},
{
"epoch": 3.3191489361702127,
"grad_norm": 0.13413724303245544,
"learning_rate": 0.00017679506361534215,
"loss": 0.0187,
"step": 780
},
{
"epoch": 3.3617021276595747,
"grad_norm": 0.24698686599731445,
"learning_rate": 0.00017622117917872823,
"loss": 0.0125,
"step": 790
},
{
"epoch": 3.404255319148936,
"grad_norm": 0.48694342374801636,
"learning_rate": 0.00017564124057658056,
"loss": 0.0234,
"step": 800
},
{
"epoch": 3.4468085106382977,
"grad_norm": 0.6931429505348206,
"learning_rate": 0.00017505529387279277,
"loss": 0.0234,
"step": 810
},
{
"epoch": 3.4893617021276597,
"grad_norm": 0.13700473308563232,
"learning_rate": 0.00017446338560847568,
"loss": 0.0145,
"step": 820
},
{
"epoch": 3.5319148936170213,
"grad_norm": 0.3254775404930115,
"learning_rate": 0.00017386556279826021,
"loss": 0.0179,
"step": 830
},
{
"epoch": 3.574468085106383,
"grad_norm": 0.368379682302475,
"learning_rate": 0.00017326187292656333,
"loss": 0.013,
"step": 840
},
{
"epoch": 3.617021276595745,
"grad_norm": 0.2745888829231262,
"learning_rate": 0.00017265236394381633,
"loss": 0.0136,
"step": 850
},
{
"epoch": 3.6595744680851063,
"grad_norm": 0.0781714916229248,
"learning_rate": 0.00017203708426265614,
"loss": 0.0126,
"step": 860
},
{
"epoch": 3.702127659574468,
"grad_norm": 0.7518234848976135,
"learning_rate": 0.00017141608275408006,
"loss": 0.0134,
"step": 870
},
{
"epoch": 3.74468085106383,
"grad_norm": 0.4271518886089325,
"learning_rate": 0.00017078940874356392,
"loss": 0.0127,
"step": 880
},
{
"epoch": 3.7872340425531914,
"grad_norm": 0.34387120604515076,
"learning_rate": 0.00017015711200714414,
"loss": 0.0161,
"step": 890
},
{
"epoch": 3.829787234042553,
"grad_norm": 0.09641717374324799,
"learning_rate": 0.00016951924276746425,
"loss": 0.0185,
"step": 900
},
{
"epoch": 3.872340425531915,
"grad_norm": 0.44215765595436096,
"learning_rate": 0.00016887585168978562,
"loss": 0.0115,
"step": 910
},
{
"epoch": 3.9148936170212765,
"grad_norm": 0.06954783201217651,
"learning_rate": 0.0001682269898779632,
"loss": 0.0121,
"step": 920
},
{
"epoch": 3.9574468085106385,
"grad_norm": 0.21080243587493896,
"learning_rate": 0.00016757270887038654,
"loss": 0.0125,
"step": 930
},
{
"epoch": 4.0,
"grad_norm": 0.3382236063480377,
"learning_rate": 0.00016691306063588583,
"loss": 0.0109,
"step": 940
},
{
"epoch": 4.042553191489362,
"grad_norm": 0.36123183369636536,
"learning_rate": 0.00016624809756960444,
"loss": 0.0093,
"step": 950
},
{
"epoch": 4.085106382978723,
"grad_norm": 0.45228853821754456,
"learning_rate": 0.00016557787248883696,
"loss": 0.0117,
"step": 960
},
{
"epoch": 4.127659574468085,
"grad_norm": 0.2724202275276184,
"learning_rate": 0.00016490243862883413,
"loss": 0.0126,
"step": 970
},
{
"epoch": 4.170212765957447,
"grad_norm": 0.17904357612133026,
"learning_rate": 0.00016422184963857432,
"loss": 0.0103,
"step": 980
},
{
"epoch": 4.212765957446808,
"grad_norm": 0.4267734885215759,
"learning_rate": 0.00016353615957650236,
"loss": 0.0105,
"step": 990
},
{
"epoch": 4.25531914893617,
"grad_norm": 0.11816457659006119,
"learning_rate": 0.00016284542290623567,
"loss": 0.0097,
"step": 1000
},
{
"epoch": 4.297872340425532,
"grad_norm": 0.04166145250201225,
"learning_rate": 0.00016214969449223824,
"loss": 0.0116,
"step": 1010
},
{
"epoch": 4.340425531914893,
"grad_norm": 0.0687410831451416,
"learning_rate": 0.00016144902959546286,
"loss": 0.0088,
"step": 1020
},
{
"epoch": 4.382978723404255,
"grad_norm": 0.37416237592697144,
"learning_rate": 0.00016074348386896177,
"loss": 0.019,
"step": 1030
},
{
"epoch": 4.425531914893617,
"grad_norm": 0.06069188937544823,
"learning_rate": 0.00016003311335346636,
"loss": 0.0138,
"step": 1040
},
{
"epoch": 4.468085106382979,
"grad_norm": 0.0817495658993721,
"learning_rate": 0.00015931797447293552,
"loss": 0.0084,
"step": 1050
},
{
"epoch": 4.51063829787234,
"grad_norm": 0.09787007421255112,
"learning_rate": 0.00015859812403007443,
"loss": 0.009,
"step": 1060
},
{
"epoch": 4.553191489361702,
"grad_norm": 0.06179153174161911,
"learning_rate": 0.0001578736192018224,
"loss": 0.008,
"step": 1070
},
{
"epoch": 4.595744680851064,
"grad_norm": 0.3092339038848877,
"learning_rate": 0.00015714451753481168,
"loss": 0.0131,
"step": 1080
},
{
"epoch": 4.638297872340425,
"grad_norm": 0.06405780464410782,
"learning_rate": 0.0001564108769407962,
"loss": 0.0122,
"step": 1090
},
{
"epoch": 4.680851063829787,
"grad_norm": 0.21473410725593567,
"learning_rate": 0.00015567275569205218,
"loss": 0.0117,
"step": 1100
},
{
"epoch": 4.723404255319149,
"grad_norm": 0.3080317974090576,
"learning_rate": 0.00015493021241674918,
"loss": 0.011,
"step": 1110
},
{
"epoch": 4.76595744680851,
"grad_norm": 0.6501839756965637,
"learning_rate": 0.0001541833060942937,
"loss": 0.0124,
"step": 1120
},
{
"epoch": 4.808510638297872,
"grad_norm": 0.04628886282444,
"learning_rate": 0.00015343209605064422,
"loss": 0.0082,
"step": 1130
},
{
"epoch": 4.851063829787234,
"grad_norm": 0.059793177992105484,
"learning_rate": 0.00015267664195359917,
"loss": 0.0072,
"step": 1140
},
{
"epoch": 4.8936170212765955,
"grad_norm": 0.052162788808345795,
"learning_rate": 0.00015191700380805752,
"loss": 0.0095,
"step": 1150
},
{
"epoch": 4.9361702127659575,
"grad_norm": 0.06147542968392372,
"learning_rate": 0.00015115324195125274,
"loss": 0.0098,
"step": 1160
},
{
"epoch": 4.9787234042553195,
"grad_norm": 0.6586833000183105,
"learning_rate": 0.00015038541704796003,
"loss": 0.0139,
"step": 1170
},
{
"epoch": 5.0212765957446805,
"grad_norm": 0.2920892834663391,
"learning_rate": 0.0001496135900856782,
"loss": 0.0083,
"step": 1180
},
{
"epoch": 5.0638297872340425,
"grad_norm": 0.32393306493759155,
"learning_rate": 0.0001488378223697851,
"loss": 0.0155,
"step": 1190
},
{
"epoch": 5.1063829787234045,
"grad_norm": 0.49029844999313354,
"learning_rate": 0.00014805817551866838,
"loss": 0.0109,
"step": 1200
},
{
"epoch": 5.148936170212766,
"grad_norm": 0.05497799441218376,
"learning_rate": 0.00014727471145883127,
"loss": 0.0095,
"step": 1210
},
{
"epoch": 5.191489361702128,
"grad_norm": 0.4540445804595947,
"learning_rate": 0.00014648749241997363,
"loss": 0.0106,
"step": 1220
},
{
"epoch": 5.23404255319149,
"grad_norm": 0.16598157584667206,
"learning_rate": 0.00014569658093004935,
"loss": 0.0072,
"step": 1230
},
{
"epoch": 5.276595744680851,
"grad_norm": 0.07160704582929611,
"learning_rate": 0.0001449020398102996,
"loss": 0.0108,
"step": 1240
},
{
"epoch": 5.319148936170213,
"grad_norm": 0.197789266705513,
"learning_rate": 0.00014410393217026318,
"loss": 0.0118,
"step": 1250
},
{
"epoch": 5.361702127659575,
"grad_norm": 0.07983817905187607,
"learning_rate": 0.00014330232140276366,
"loss": 0.0076,
"step": 1260
},
{
"epoch": 5.404255319148936,
"grad_norm": 0.0746329054236412,
"learning_rate": 0.00014249727117887425,
"loss": 0.0089,
"step": 1270
},
{
"epoch": 5.446808510638298,
"grad_norm": 0.09392493963241577,
"learning_rate": 0.00014168884544286053,
"loss": 0.0103,
"step": 1280
},
{
"epoch": 5.48936170212766,
"grad_norm": 0.18386998772621155,
"learning_rate": 0.0001408771084071012,
"loss": 0.0096,
"step": 1290
},
{
"epoch": 5.531914893617021,
"grad_norm": 0.07400283962488174,
"learning_rate": 0.00014006212454698797,
"loss": 0.0083,
"step": 1300
},
{
"epoch": 5.574468085106383,
"grad_norm": 0.06513144075870514,
"learning_rate": 0.00013924395859580432,
"loss": 0.0093,
"step": 1310
},
{
"epoch": 5.617021276595745,
"grad_norm": 0.6950928568840027,
"learning_rate": 0.00013842267553958371,
"loss": 0.0073,
"step": 1320
},
{
"epoch": 5.659574468085106,
"grad_norm": 0.05320321023464203,
"learning_rate": 0.00013759834061194794,
"loss": 0.0098,
"step": 1330
},
{
"epoch": 5.702127659574468,
"grad_norm": 0.17569933831691742,
"learning_rate": 0.00013677101928892554,
"loss": 0.0075,
"step": 1340
},
{
"epoch": 5.74468085106383,
"grad_norm": 0.052122730761766434,
"learning_rate": 0.00013594077728375128,
"loss": 0.0107,
"step": 1350
},
{
"epoch": 5.787234042553192,
"grad_norm": 0.2108752280473709,
"learning_rate": 0.00013510768054164653,
"loss": 0.0119,
"step": 1360
},
{
"epoch": 5.829787234042553,
"grad_norm": 0.047486983239650726,
"learning_rate": 0.00013427179523458127,
"loss": 0.0092,
"step": 1370
},
{
"epoch": 5.872340425531915,
"grad_norm": 0.043320391327142715,
"learning_rate": 0.0001334331877560182,
"loss": 0.0081,
"step": 1380
},
{
"epoch": 5.914893617021277,
"grad_norm": 0.09155077487230301,
"learning_rate": 0.00013259192471563912,
"loss": 0.0091,
"step": 1390
},
{
"epoch": 5.957446808510638,
"grad_norm": 0.049143675714731216,
"learning_rate": 0.00013174807293405428,
"loss": 0.0089,
"step": 1400
},
{
"epoch": 6.0,
"grad_norm": 0.07365540415048599,
"learning_rate": 0.00013090169943749476,
"loss": 0.0068,
"step": 1410
},
{
"epoch": 6.042553191489362,
"grad_norm": 0.11437718570232391,
"learning_rate": 0.00013005287145248878,
"loss": 0.0064,
"step": 1420
},
{
"epoch": 6.085106382978723,
"grad_norm": 0.06010650098323822,
"learning_rate": 0.0001292016564005219,
"loss": 0.0074,
"step": 1430
},
{
"epoch": 6.127659574468085,
"grad_norm": 0.04595167934894562,
"learning_rate": 0.0001283481218926818,
"loss": 0.0066,
"step": 1440
},
{
"epoch": 6.170212765957447,
"grad_norm": 0.05351310595870018,
"learning_rate": 0.00012749233572428804,
"loss": 0.0097,
"step": 1450
},
{
"epoch": 6.212765957446808,
"grad_norm": 0.13630953431129456,
"learning_rate": 0.00012663436586950714,
"loss": 0.0079,
"step": 1460
},
{
"epoch": 6.25531914893617,
"grad_norm": 0.17622588574886322,
"learning_rate": 0.00012577428047595344,
"loss": 0.0084,
"step": 1470
},
{
"epoch": 6.297872340425532,
"grad_norm": 0.050954531878232956,
"learning_rate": 0.0001249121478592762,
"loss": 0.0077,
"step": 1480
},
{
"epoch": 6.340425531914893,
"grad_norm": 0.3051726818084717,
"learning_rate": 0.0001240480364977335,
"loss": 0.0085,
"step": 1490
},
{
"epoch": 6.382978723404255,
"grad_norm": 0.30509302020072937,
"learning_rate": 0.00012318201502675285,
"loss": 0.0092,
"step": 1500
},
{
"epoch": 6.425531914893617,
"grad_norm": 0.09164142608642578,
"learning_rate": 0.00012231415223347972,
"loss": 0.008,
"step": 1510
},
{
"epoch": 6.468085106382979,
"grad_norm": 0.05406223237514496,
"learning_rate": 0.0001214445170513139,
"loss": 0.0078,
"step": 1520
},
{
"epoch": 6.51063829787234,
"grad_norm": 0.05845744535326958,
"learning_rate": 0.00012057317855443395,
"loss": 0.0092,
"step": 1530
},
{
"epoch": 6.553191489361702,
"grad_norm": 0.05021122843027115,
"learning_rate": 0.00011970020595231101,
"loss": 0.007,
"step": 1540
},
{
"epoch": 6.595744680851064,
"grad_norm": 0.10315235704183578,
"learning_rate": 0.00011882566858421135,
"loss": 0.0068,
"step": 1550
},
{
"epoch": 6.638297872340425,
"grad_norm": 0.08750782907009125,
"learning_rate": 0.00011794963591368893,
"loss": 0.009,
"step": 1560
},
{
"epoch": 6.680851063829787,
"grad_norm": 0.05412838235497475,
"learning_rate": 0.0001170721775230679,
"loss": 0.0071,
"step": 1570
},
{
"epoch": 6.723404255319149,
"grad_norm": 0.17292432487010956,
"learning_rate": 0.00011619336310791586,
"loss": 0.0091,
"step": 1580
},
{
"epoch": 6.76595744680851,
"grad_norm": 0.05503688380122185,
"learning_rate": 0.00011531326247150803,
"loss": 0.0069,
"step": 1590
},
{
"epoch": 6.808510638297872,
"grad_norm": 0.05121155083179474,
"learning_rate": 0.00011443194551928266,
"loss": 0.008,
"step": 1600
},
{
"epoch": 6.851063829787234,
"grad_norm": 0.0626005157828331,
"learning_rate": 0.00011354948225328877,
"loss": 0.0065,
"step": 1610
},
{
"epoch": 6.8936170212765955,
"grad_norm": 0.058921247720718384,
"learning_rate": 0.0001126659427666257,
"loss": 0.0078,
"step": 1620
},
{
"epoch": 6.9361702127659575,
"grad_norm": 0.058523211628198624,
"learning_rate": 0.00011178139723787597,
"loss": 0.008,
"step": 1630
},
{
"epoch": 6.9787234042553195,
"grad_norm": 0.18594586849212646,
"learning_rate": 0.00011089591592553082,
"loss": 0.0076,
"step": 1640
},
{
"epoch": 7.0212765957446805,
"grad_norm": 0.053747180849313736,
"learning_rate": 0.00011000956916240985,
"loss": 0.0074,
"step": 1650
},
{
"epoch": 7.0638297872340425,
"grad_norm": 0.03964696079492569,
"learning_rate": 0.00010912242735007441,
"loss": 0.0071,
"step": 1660
},
{
"epoch": 7.1063829787234045,
"grad_norm": 0.05952566862106323,
"learning_rate": 0.00010823456095323579,
"loss": 0.0065,
"step": 1670
},
{
"epoch": 7.148936170212766,
"grad_norm": 0.21424749493598938,
"learning_rate": 0.00010734604049415822,
"loss": 0.0075,
"step": 1680
},
{
"epoch": 7.191489361702128,
"grad_norm": 0.03922433406114578,
"learning_rate": 0.0001064569365470574,
"loss": 0.0071,
"step": 1690
},
{
"epoch": 7.23404255319149,
"grad_norm": 0.05505215749144554,
"learning_rate": 0.00010556731973249485,
"loss": 0.0061,
"step": 1700
},
{
"epoch": 7.276595744680851,
"grad_norm": 0.03699268028140068,
"learning_rate": 0.00010467726071176853,
"loss": 0.0075,
"step": 1710
},
{
"epoch": 7.319148936170213,
"grad_norm": 0.04546520113945007,
"learning_rate": 0.00010378683018130047,
"loss": 0.0072,
"step": 1720
},
{
"epoch": 7.361702127659575,
"grad_norm": 0.056984953582286835,
"learning_rate": 0.0001028960988670212,
"loss": 0.007,
"step": 1730
},
{
"epoch": 7.404255319148936,
"grad_norm": 0.24714773893356323,
"learning_rate": 0.00010200513751875227,
"loss": 0.0074,
"step": 1740
},
{
"epoch": 7.446808510638298,
"grad_norm": 0.06558862328529358,
"learning_rate": 0.00010111401690458654,
"loss": 0.0064,
"step": 1750
},
{
"epoch": 7.48936170212766,
"grad_norm": 0.06254340708255768,
"learning_rate": 0.00010022280780526725,
"loss": 0.0076,
"step": 1760
},
{
"epoch": 7.531914893617021,
"grad_norm": 0.08606445789337158,
"learning_rate": 9.93315810085658e-05,
"loss": 0.0076,
"step": 1770
},
{
"epoch": 7.574468085106383,
"grad_norm": 0.051956657320261,
"learning_rate": 9.844040730365936e-05,
"loss": 0.0073,
"step": 1780
},
{
"epoch": 7.617021276595745,
"grad_norm": 0.24819940328598022,
"learning_rate": 9.754935747550804e-05,
"loss": 0.0077,
"step": 1790
},
{
"epoch": 7.659574468085106,
"grad_norm": 0.039045918732881546,
"learning_rate": 9.665850229923258e-05,
"loss": 0.0071,
"step": 1800
},
{
"epoch": 7.702127659574468,
"grad_norm": 0.04438428208231926,
"learning_rate": 9.57679125344927e-05,
"loss": 0.0074,
"step": 1810
},
{
"epoch": 7.74468085106383,
"grad_norm": 0.31067439913749695,
"learning_rate": 9.487765891986682e-05,
"loss": 0.0087,
"step": 1820
},
{
"epoch": 7.787234042553192,
"grad_norm": 0.06251853704452515,
"learning_rate": 9.398781216723331e-05,
"loss": 0.0069,
"step": 1830
},
{
"epoch": 7.829787234042553,
"grad_norm": 0.04706185683608055,
"learning_rate": 9.309844295615389e-05,
"loss": 0.0072,
"step": 1840
},
{
"epoch": 7.872340425531915,
"grad_norm": 0.03839968144893646,
"learning_rate": 9.220962192825968e-05,
"loss": 0.0069,
"step": 1850
},
{
"epoch": 7.914893617021277,
"grad_norm": 0.03722318261861801,
"learning_rate": 9.132141968164026e-05,
"loss": 0.0069,
"step": 1860
},
{
"epoch": 7.957446808510638,
"grad_norm": 0.0543668158352375,
"learning_rate": 9.043390676523604e-05,
"loss": 0.0076,
"step": 1870
},
{
"epoch": 8.0,
"grad_norm": 0.051696889102458954,
"learning_rate": 8.954715367323468e-05,
"loss": 0.0066,
"step": 1880
},
{
"epoch": 8.042553191489361,
"grad_norm": 0.08786992728710175,
"learning_rate": 8.866123083947182e-05,
"loss": 0.0062,
"step": 1890
},
{
"epoch": 8.085106382978724,
"grad_norm": 0.3556790351867676,
"learning_rate": 8.777620863183657e-05,
"loss": 0.0079,
"step": 1900
},
{
"epoch": 8.127659574468085,
"grad_norm": 0.08049122244119644,
"learning_rate": 8.689215734668232e-05,
"loss": 0.0064,
"step": 1910
},
{
"epoch": 8.170212765957446,
"grad_norm": 0.05408492311835289,
"learning_rate": 8.600914720324316e-05,
"loss": 0.0077,
"step": 1920
},
{
"epoch": 8.212765957446809,
"grad_norm": 0.034946855157613754,
"learning_rate": 8.512724833805634e-05,
"loss": 0.0067,
"step": 1930
},
{
"epoch": 8.25531914893617,
"grad_norm": 0.05422681197524071,
"learning_rate": 8.424653079939156e-05,
"loss": 0.0062,
"step": 1940
},
{
"epoch": 8.297872340425531,
"grad_norm": 0.06405791640281677,
"learning_rate": 8.336706454168701e-05,
"loss": 0.0064,
"step": 1950
},
{
"epoch": 8.340425531914894,
"grad_norm": 0.08694509416818619,
"learning_rate": 8.248891941999297e-05,
"loss": 0.006,
"step": 1960
},
{
"epoch": 8.382978723404255,
"grad_norm": 0.05589594319462776,
"learning_rate": 8.161216518442334e-05,
"loss": 0.0067,
"step": 1970
},
{
"epoch": 8.425531914893616,
"grad_norm": 0.05914654955267906,
"learning_rate": 8.073687147461547e-05,
"loss": 0.0065,
"step": 1980
},
{
"epoch": 8.46808510638298,
"grad_norm": 0.05237606540322304,
"learning_rate": 7.98631078141987e-05,
"loss": 0.0071,
"step": 1990
},
{
"epoch": 8.51063829787234,
"grad_norm": 0.056138359010219574,
"learning_rate": 7.89909436052722e-05,
"loss": 0.0079,
"step": 2000
},
{
"epoch": 8.553191489361701,
"grad_norm": 0.06285829097032547,
"learning_rate": 7.812044812289249e-05,
"loss": 0.0064,
"step": 2010
},
{
"epoch": 8.595744680851064,
"grad_norm": 0.04014954715967178,
"learning_rate": 7.72516905095709e-05,
"loss": 0.0072,
"step": 2020
},
{
"epoch": 8.638297872340425,
"grad_norm": 0.04423344135284424,
"learning_rate": 7.638473976978177e-05,
"loss": 0.0065,
"step": 2030
},
{
"epoch": 8.680851063829786,
"grad_norm": 0.05287999287247658,
"learning_rate": 7.55196647644814e-05,
"loss": 0.0081,
"step": 2040
},
{
"epoch": 8.72340425531915,
"grad_norm": 0.056493304669857025,
"learning_rate": 7.465653420563845e-05,
"loss": 0.0067,
"step": 2050
},
{
"epoch": 8.76595744680851,
"grad_norm": 0.057281821966171265,
"learning_rate": 7.379541665077643e-05,
"loss": 0.0078,
"step": 2060
},
{
"epoch": 8.808510638297872,
"grad_norm": 0.053731102496385574,
"learning_rate": 7.293638049752812e-05,
"loss": 0.0066,
"step": 2070
},
{
"epoch": 8.851063829787234,
"grad_norm": 0.18697482347488403,
"learning_rate": 7.207949397820278e-05,
"loss": 0.0069,
"step": 2080
},
{
"epoch": 8.893617021276595,
"grad_norm": 0.0387713797390461,
"learning_rate": 7.122482515436661e-05,
"loss": 0.007,
"step": 2090
},
{
"epoch": 8.936170212765958,
"grad_norm": 0.04441935196518898,
"learning_rate": 7.037244191143661e-05,
"loss": 0.0067,
"step": 2100
},
{
"epoch": 8.97872340425532,
"grad_norm": 0.05758345127105713,
"learning_rate": 6.952241195328868e-05,
"loss": 0.0065,
"step": 2110
},
{
"epoch": 9.02127659574468,
"grad_norm": 0.050706226378679276,
"learning_rate": 6.867480279687974e-05,
"loss": 0.0063,
"step": 2120
},
{
"epoch": 9.063829787234043,
"grad_norm": 0.05180887505412102,
"learning_rate": 6.782968176688514e-05,
"loss": 0.0062,
"step": 2130
},
{
"epoch": 9.106382978723405,
"grad_norm": 0.05492401868104935,
"learning_rate": 6.6987115990351e-05,
"loss": 0.006,
"step": 2140
},
{
"epoch": 9.148936170212766,
"grad_norm": 0.053439777344465256,
"learning_rate": 6.614717239136246e-05,
"loss": 0.0066,
"step": 2150
},
{
"epoch": 9.191489361702128,
"grad_norm": 0.15650008618831635,
"learning_rate": 6.530991768572794e-05,
"loss": 0.006,
"step": 2160
},
{
"epoch": 9.23404255319149,
"grad_norm": 0.04846300184726715,
"learning_rate": 6.447541837568e-05,
"loss": 0.0068,
"step": 2170
},
{
"epoch": 9.27659574468085,
"grad_norm": 0.047157011926174164,
"learning_rate": 6.364374074459307e-05,
"loss": 0.006,
"step": 2180
},
{
"epoch": 9.319148936170214,
"grad_norm": 0.047358132898807526,
"learning_rate": 6.281495085171869e-05,
"loss": 0.0058,
"step": 2190
},
{
"epoch": 9.361702127659575,
"grad_norm": 0.11200093477964401,
"learning_rate": 6.198911452693853e-05,
"loss": 0.007,
"step": 2200
},
{
"epoch": 9.404255319148936,
"grad_norm": 0.06481984257698059,
"learning_rate": 6.116629736553552e-05,
"loss": 0.0069,
"step": 2210
},
{
"epoch": 9.446808510638299,
"grad_norm": 0.04743931442499161,
"learning_rate": 6.0346564722983736e-05,
"loss": 0.0072,
"step": 2220
},
{
"epoch": 9.48936170212766,
"grad_norm": 0.07307924330234528,
"learning_rate": 5.952998170975724e-05,
"loss": 0.0062,
"step": 2230
},
{
"epoch": 9.53191489361702,
"grad_norm": 0.05650079995393753,
"learning_rate": 5.871661318615848e-05,
"loss": 0.0061,
"step": 2240
},
{
"epoch": 9.574468085106384,
"grad_norm": 0.057734813541173935,
"learning_rate": 5.790652375716652e-05,
"loss": 0.0068,
"step": 2250
},
{
"epoch": 9.617021276595745,
"grad_norm": 0.05608903244137764,
"learning_rate": 5.709977776730537e-05,
"loss": 0.0071,
"step": 2260
},
{
"epoch": 9.659574468085106,
"grad_norm": 0.07133158296346664,
"learning_rate": 5.62964392955335e-05,
"loss": 0.0067,
"step": 2270
},
{
"epoch": 9.702127659574469,
"grad_norm": 0.2167043685913086,
"learning_rate": 5.549657215015367e-05,
"loss": 0.0067,
"step": 2280
},
{
"epoch": 9.74468085106383,
"grad_norm": 0.06523007154464722,
"learning_rate": 5.470023986374516e-05,
"loss": 0.0068,
"step": 2290
},
{
"epoch": 9.787234042553191,
"grad_norm": 0.04895370826125145,
"learning_rate": 5.39075056881172e-05,
"loss": 0.0058,
"step": 2300
},
{
"epoch": 9.829787234042554,
"grad_norm": 0.056433409452438354,
"learning_rate": 5.31184325892849e-05,
"loss": 0.0067,
"step": 2310
},
{
"epoch": 9.872340425531915,
"grad_norm": 0.049970466643571854,
"learning_rate": 5.233308324246805e-05,
"loss": 0.0058,
"step": 2320
},
{
"epoch": 9.914893617021276,
"grad_norm": 0.08367093652486801,
"learning_rate": 5.155152002711285e-05,
"loss": 0.0063,
"step": 2330
},
{
"epoch": 9.957446808510639,
"grad_norm": 0.07821632921695709,
"learning_rate": 5.077380502193725e-05,
"loss": 0.0071,
"step": 2340
},
{
"epoch": 10.0,
"grad_norm": 0.04344159737229347,
"learning_rate": 5.000000000000002e-05,
"loss": 0.0078,
"step": 2350
}
],
"logging_steps": 10,
"max_steps": 3525,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9.44675050315776e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}