Qwen2-VL-2B-Instruct-SFT / trainer_state.json
ztysefd's picture
Model save
d242491 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 547,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009140767824497258,
"grad_norm": 52.43793869018555,
"learning_rate": 1.8181818181818183e-06,
"loss": 2.7415,
"step": 5
},
{
"epoch": 0.018281535648994516,
"grad_norm": 25.07411003112793,
"learning_rate": 3.6363636363636366e-06,
"loss": 1.4622,
"step": 10
},
{
"epoch": 0.027422303473491772,
"grad_norm": 4.931793212890625,
"learning_rate": 5.4545454545454545e-06,
"loss": 0.1586,
"step": 15
},
{
"epoch": 0.03656307129798903,
"grad_norm": 0.516838550567627,
"learning_rate": 7.272727272727273e-06,
"loss": 0.0265,
"step": 20
},
{
"epoch": 0.04570383912248629,
"grad_norm": 14.795572280883789,
"learning_rate": 9.090909090909091e-06,
"loss": 0.068,
"step": 25
},
{
"epoch": 0.054844606946983544,
"grad_norm": 9.015242576599121,
"learning_rate": 1.0909090909090909e-05,
"loss": 0.2448,
"step": 30
},
{
"epoch": 0.06398537477148081,
"grad_norm": 0.29367485642433167,
"learning_rate": 1.2727272727272728e-05,
"loss": 0.0298,
"step": 35
},
{
"epoch": 0.07312614259597806,
"grad_norm": 0.6809912919998169,
"learning_rate": 1.4545454545454546e-05,
"loss": 0.0029,
"step": 40
},
{
"epoch": 0.08226691042047532,
"grad_norm": 0.5797951817512512,
"learning_rate": 1.6363636363636366e-05,
"loss": 0.0031,
"step": 45
},
{
"epoch": 0.09140767824497258,
"grad_norm": 0.11719339340925217,
"learning_rate": 1.8181818181818182e-05,
"loss": 0.0025,
"step": 50
},
{
"epoch": 0.10054844606946983,
"grad_norm": 0.3921687602996826,
"learning_rate": 2e-05,
"loss": 0.002,
"step": 55
},
{
"epoch": 0.10968921389396709,
"grad_norm": 1.4710060358047485,
"learning_rate": 1.9994903844605973e-05,
"loss": 0.0019,
"step": 60
},
{
"epoch": 0.11882998171846434,
"grad_norm": 1.201702356338501,
"learning_rate": 1.9979620572583846e-05,
"loss": 0.0013,
"step": 65
},
{
"epoch": 0.12797074954296161,
"grad_norm": 0.15323181450366974,
"learning_rate": 1.995416576111945e-05,
"loss": 0.0031,
"step": 70
},
{
"epoch": 0.13711151736745886,
"grad_norm": 0.46892285346984863,
"learning_rate": 1.9918565354547738e-05,
"loss": 0.0028,
"step": 75
},
{
"epoch": 0.14625228519195613,
"grad_norm": 0.2431010752916336,
"learning_rate": 1.9872855637909506e-05,
"loss": 0.0017,
"step": 80
},
{
"epoch": 0.15539305301645337,
"grad_norm": 0.2643240690231323,
"learning_rate": 1.9817083199968552e-05,
"loss": 0.0012,
"step": 85
},
{
"epoch": 0.16453382084095064,
"grad_norm": 0.22663037478923798,
"learning_rate": 1.9751304885726967e-05,
"loss": 0.0007,
"step": 90
},
{
"epoch": 0.1736745886654479,
"grad_norm": 0.14844365417957306,
"learning_rate": 1.9675587738486935e-05,
"loss": 0.0003,
"step": 95
},
{
"epoch": 0.18281535648994515,
"grad_norm": 0.4369293749332428,
"learning_rate": 1.9590008931518133e-05,
"loss": 0.0005,
"step": 100
},
{
"epoch": 0.19195612431444242,
"grad_norm": 0.8838499784469604,
"learning_rate": 1.9494655689400294e-05,
"loss": 0.0035,
"step": 105
},
{
"epoch": 0.20109689213893966,
"grad_norm": 1.3618451356887817,
"learning_rate": 1.9389625199121264e-05,
"loss": 0.0049,
"step": 110
},
{
"epoch": 0.21023765996343693,
"grad_norm": 0.08296040445566177,
"learning_rate": 1.927502451102095e-05,
"loss": 0.003,
"step": 115
},
{
"epoch": 0.21937842778793418,
"grad_norm": 0.06567184627056122,
"learning_rate": 1.9150970429682316e-05,
"loss": 0.0028,
"step": 120
},
{
"epoch": 0.22851919561243145,
"grad_norm": 0.14650970697402954,
"learning_rate": 1.9017589394880515e-05,
"loss": 0.0018,
"step": 125
},
{
"epoch": 0.2376599634369287,
"grad_norm": 0.17963732779026031,
"learning_rate": 1.8875017352711547e-05,
"loss": 0.0016,
"step": 130
},
{
"epoch": 0.24680073126142596,
"grad_norm": 0.09263317286968231,
"learning_rate": 1.8723399617031754e-05,
"loss": 0.0014,
"step": 135
},
{
"epoch": 0.25594149908592323,
"grad_norm": 0.05322503671050072,
"learning_rate": 1.8562890721349434e-05,
"loss": 0.0008,
"step": 140
},
{
"epoch": 0.26508226691042047,
"grad_norm": 0.04636527970433235,
"learning_rate": 1.8393654261319504e-05,
"loss": 0.0008,
"step": 145
},
{
"epoch": 0.2742230347349177,
"grad_norm": 0.3017467260360718,
"learning_rate": 1.821586272800168e-05,
"loss": 0.0006,
"step": 150
},
{
"epoch": 0.283363802559415,
"grad_norm": 0.08996463567018509,
"learning_rate": 1.8029697332052277e-05,
"loss": 0.0006,
"step": 155
},
{
"epoch": 0.29250457038391225,
"grad_norm": 0.2036667764186859,
"learning_rate": 1.7835347819028642e-05,
"loss": 0.0006,
"step": 160
},
{
"epoch": 0.3016453382084095,
"grad_norm": 0.17612086236476898,
"learning_rate": 1.76330122759946e-05,
"loss": 0.0004,
"step": 165
},
{
"epoch": 0.31078610603290674,
"grad_norm": 0.20145903527736664,
"learning_rate": 1.7422896929623957e-05,
"loss": 0.0004,
"step": 170
},
{
"epoch": 0.31992687385740404,
"grad_norm": 0.2752271592617035,
"learning_rate": 1.720521593600787e-05,
"loss": 0.0004,
"step": 175
},
{
"epoch": 0.3290676416819013,
"grad_norm": 0.04029591754078865,
"learning_rate": 1.6980191162380298e-05,
"loss": 0.0006,
"step": 180
},
{
"epoch": 0.3382084095063985,
"grad_norm": 0.04862139746546745,
"learning_rate": 1.674805196098402e-05,
"loss": 0.0004,
"step": 185
},
{
"epoch": 0.3473491773308958,
"grad_norm": 0.0757121592760086,
"learning_rate": 1.6509034935307716e-05,
"loss": 0.0004,
"step": 190
},
{
"epoch": 0.35648994515539306,
"grad_norm": 0.13314923644065857,
"learning_rate": 1.6263383698932307e-05,
"loss": 0.0003,
"step": 195
},
{
"epoch": 0.3656307129798903,
"grad_norm": 0.28700515627861023,
"learning_rate": 1.6011348627232463e-05,
"loss": 0.0004,
"step": 200
},
{
"epoch": 0.37477148080438755,
"grad_norm": 0.5152389407157898,
"learning_rate": 1.5753186602186207e-05,
"loss": 0.0006,
"step": 205
},
{
"epoch": 0.38391224862888484,
"grad_norm": 0.25974079966545105,
"learning_rate": 1.5489160750552833e-05,
"loss": 0.0011,
"step": 210
},
{
"epoch": 0.3930530164533821,
"grad_norm": 1.2071099281311035,
"learning_rate": 1.5219540175685938e-05,
"loss": 0.0024,
"step": 215
},
{
"epoch": 0.40219378427787933,
"grad_norm": 0.17318445444107056,
"learning_rate": 1.4944599683254903e-05,
"loss": 0.0026,
"step": 220
},
{
"epoch": 0.4113345521023766,
"grad_norm": 0.20099027454853058,
"learning_rate": 1.4664619501154445e-05,
"loss": 0.0016,
"step": 225
},
{
"epoch": 0.42047531992687387,
"grad_norm": 0.25999948382377625,
"learning_rate": 1.4379884993887605e-05,
"loss": 0.0011,
"step": 230
},
{
"epoch": 0.4296160877513711,
"grad_norm": 0.043458275496959686,
"learning_rate": 1.4090686371713403e-05,
"loss": 0.0009,
"step": 235
},
{
"epoch": 0.43875685557586835,
"grad_norm": 0.3035805821418762,
"learning_rate": 1.3797318394855496e-05,
"loss": 0.0007,
"step": 240
},
{
"epoch": 0.44789762340036565,
"grad_norm": 0.37615489959716797,
"learning_rate": 1.3500080073073436e-05,
"loss": 0.0009,
"step": 245
},
{
"epoch": 0.4570383912248629,
"grad_norm": 0.04378453642129898,
"learning_rate": 1.319927436090259e-05,
"loss": 0.0004,
"step": 250
},
{
"epoch": 0.46617915904936014,
"grad_norm": 0.0630556121468544,
"learning_rate": 1.2895207848873488e-05,
"loss": 0.0006,
"step": 255
},
{
"epoch": 0.4753199268738574,
"grad_norm": 0.02288464456796646,
"learning_rate": 1.2588190451025209e-05,
"loss": 0.0002,
"step": 260
},
{
"epoch": 0.4844606946983547,
"grad_norm": 0.20390933752059937,
"learning_rate": 1.2278535089031377e-05,
"loss": 0.0004,
"step": 265
},
{
"epoch": 0.4936014625228519,
"grad_norm": 0.06073382869362831,
"learning_rate": 1.1966557373260654e-05,
"loss": 0.0002,
"step": 270
},
{
"epoch": 0.5027422303473492,
"grad_norm": 0.023382948711514473,
"learning_rate": 1.165257528109685e-05,
"loss": 0.0001,
"step": 275
},
{
"epoch": 0.5118829981718465,
"grad_norm": 0.025175806134939194,
"learning_rate": 1.1336908832846485e-05,
"loss": 0.0001,
"step": 280
},
{
"epoch": 0.5210237659963437,
"grad_norm": 0.010363736189901829,
"learning_rate": 1.1019879765564155e-05,
"loss": 0.0,
"step": 285
},
{
"epoch": 0.5301645338208409,
"grad_norm": 0.05363736301660538,
"learning_rate": 1.0701811205128115e-05,
"loss": 0.0002,
"step": 290
},
{
"epoch": 0.5393053016453382,
"grad_norm": 0.05416525900363922,
"learning_rate": 1.0383027336900356e-05,
"loss": 0.0001,
"step": 295
},
{
"epoch": 0.5484460694698354,
"grad_norm": 0.010775912553071976,
"learning_rate": 1.0063853075306792e-05,
"loss": 0.0001,
"step": 300
},
{
"epoch": 0.5575868372943327,
"grad_norm": 0.01685800403356552,
"learning_rate": 9.744613732674401e-06,
"loss": 0.0001,
"step": 305
},
{
"epoch": 0.56672760511883,
"grad_norm": 0.20173287391662598,
"learning_rate": 9.425634687662768e-06,
"loss": 0.0002,
"step": 310
},
{
"epoch": 0.5758683729433273,
"grad_norm": 0.2594170868396759,
"learning_rate": 9.107241053628058e-06,
"loss": 0.0001,
"step": 315
},
{
"epoch": 0.5850091407678245,
"grad_norm": 0.14655804634094238,
"learning_rate": 8.789757347257373e-06,
"loss": 0.0001,
"step": 320
},
{
"epoch": 0.5941499085923218,
"grad_norm": 0.1196817010641098,
"learning_rate": 8.473507157811254e-06,
"loss": 0.0002,
"step": 325
},
{
"epoch": 0.603290676416819,
"grad_norm": 0.022879011929035187,
"learning_rate": 8.158812817311474e-06,
"loss": 0.0,
"step": 330
},
{
"epoch": 0.6124314442413162,
"grad_norm": 0.04623665288090706,
"learning_rate": 7.845995072010188e-06,
"loss": 0.0003,
"step": 335
},
{
"epoch": 0.6215722120658135,
"grad_norm": 0.0164530910551548,
"learning_rate": 7.535372755475411e-06,
"loss": 0.0001,
"step": 340
},
{
"epoch": 0.6307129798903108,
"grad_norm": 0.06736422330141068,
"learning_rate": 7.22726246362592e-06,
"loss": 0.0001,
"step": 345
},
{
"epoch": 0.6398537477148081,
"grad_norm": 0.09347337484359741,
"learning_rate": 6.921978232046878e-06,
"loss": 0.0001,
"step": 350
},
{
"epoch": 0.6489945155393053,
"grad_norm": 0.0829305499792099,
"learning_rate": 6.619831215914974e-06,
"loss": 0.0001,
"step": 355
},
{
"epoch": 0.6581352833638026,
"grad_norm": 0.029442960396409035,
"learning_rate": 6.321129372859418e-06,
"loss": 0.0001,
"step": 360
},
{
"epoch": 0.6672760511882998,
"grad_norm": 0.17886511981487274,
"learning_rate": 6.026177149081949e-06,
"loss": 0.0003,
"step": 365
},
{
"epoch": 0.676416819012797,
"grad_norm": 0.004981564823538065,
"learning_rate": 5.7352751690558025e-06,
"loss": 0.0002,
"step": 370
},
{
"epoch": 0.6855575868372943,
"grad_norm": 0.09108416736125946,
"learning_rate": 5.448719929119916e-06,
"loss": 0.0002,
"step": 375
},
{
"epoch": 0.6946983546617916,
"grad_norm": 0.08255312591791153,
"learning_rate": 5.166803495280614e-06,
"loss": 0.0002,
"step": 380
},
{
"epoch": 0.7038391224862889,
"grad_norm": 0.09765861183404922,
"learning_rate": 4.889813205528895e-06,
"loss": 0.0002,
"step": 385
},
{
"epoch": 0.7129798903107861,
"grad_norm": 0.18912295997142792,
"learning_rate": 4.61803137697661e-06,
"loss": 0.0001,
"step": 390
},
{
"epoch": 0.7221206581352834,
"grad_norm": 0.03831862658262253,
"learning_rate": 4.351735018110066e-06,
"loss": 0.0001,
"step": 395
},
{
"epoch": 0.7312614259597806,
"grad_norm": 0.06283199042081833,
"learning_rate": 4.091195546454398e-06,
"loss": 0.0001,
"step": 400
},
{
"epoch": 0.7404021937842779,
"grad_norm": 0.02292688563466072,
"learning_rate": 3.8366785119363624e-06,
"loss": 0.0001,
"step": 405
},
{
"epoch": 0.7495429616087751,
"grad_norm": 0.07033329457044601,
"learning_rate": 3.5884433262276376e-06,
"loss": 0.0001,
"step": 410
},
{
"epoch": 0.7586837294332724,
"grad_norm": 0.0036680041812360287,
"learning_rate": 3.3467429983443477e-06,
"loss": 0.0002,
"step": 415
},
{
"epoch": 0.7678244972577697,
"grad_norm": 0.024505017325282097,
"learning_rate": 3.111823876772426e-06,
"loss": 0.0001,
"step": 420
},
{
"epoch": 0.7769652650822669,
"grad_norm": 0.20300976932048798,
"learning_rate": 2.883925398381585e-06,
"loss": 0.0001,
"step": 425
},
{
"epoch": 0.7861060329067642,
"grad_norm": 0.004944251384586096,
"learning_rate": 2.663279844383815e-06,
"loss": 0.0,
"step": 430
},
{
"epoch": 0.7952468007312614,
"grad_norm": 0.051807742565870285,
"learning_rate": 2.4501121035851494e-06,
"loss": 0.0001,
"step": 435
},
{
"epoch": 0.8043875685557587,
"grad_norm": 0.004597936756908894,
"learning_rate": 2.244639443172013e-06,
"loss": 0.0,
"step": 440
},
{
"epoch": 0.8135283363802559,
"grad_norm": 0.05087242275476456,
"learning_rate": 2.047071287265735e-06,
"loss": 0.0001,
"step": 445
},
{
"epoch": 0.8226691042047533,
"grad_norm": 0.09946884959936142,
"learning_rate": 1.857609003471007e-06,
"loss": 0.0001,
"step": 450
},
{
"epoch": 0.8318098720292505,
"grad_norm": 0.0069345757365226746,
"learning_rate": 1.6764456976357279e-06,
"loss": 0.0001,
"step": 455
},
{
"epoch": 0.8409506398537477,
"grad_norm": 0.0009497898281551898,
"learning_rate": 1.503766017031547e-06,
"loss": 0.0,
"step": 460
},
{
"epoch": 0.850091407678245,
"grad_norm": 0.004903269000351429,
"learning_rate": 1.339745962155613e-06,
"loss": 0.0001,
"step": 465
},
{
"epoch": 0.8592321755027422,
"grad_norm": 0.04905455559492111,
"learning_rate": 1.1845527073454045e-06,
"loss": 0.0,
"step": 470
},
{
"epoch": 0.8683729433272395,
"grad_norm": 0.01231853011995554,
"learning_rate": 1.0383444303894453e-06,
"loss": 0.0,
"step": 475
},
{
"epoch": 0.8775137111517367,
"grad_norm": 0.11394715309143066,
"learning_rate": 9.012701513075839e-07,
"loss": 0.0001,
"step": 480
},
{
"epoch": 0.886654478976234,
"grad_norm": 0.005672850646078587,
"learning_rate": 7.734695804651693e-07,
"loss": 0.0,
"step": 485
},
{
"epoch": 0.8957952468007313,
"grad_norm": 0.01644750125706196,
"learning_rate": 6.550729761758901e-07,
"loss": 0.0,
"step": 490
},
{
"epoch": 0.9049360146252285,
"grad_norm": 0.007940856739878654,
"learning_rate": 5.462010119384665e-07,
"loss": 0.0,
"step": 495
},
{
"epoch": 0.9140767824497258,
"grad_norm": 0.07022310048341751,
"learning_rate": 4.4696465344245874e-07,
"loss": 0.0001,
"step": 500
},
{
"epoch": 0.923217550274223,
"grad_norm": 0.0016659650718793273,
"learning_rate": 3.574650454685902e-07,
"loss": 0.0001,
"step": 505
},
{
"epoch": 0.9323583180987203,
"grad_norm": 0.03347240015864372,
"learning_rate": 2.777934087988532e-07,
"loss": 0.0,
"step": 510
},
{
"epoch": 0.9414990859232175,
"grad_norm": 0.004893618635833263,
"learning_rate": 2.0803094724143879e-07,
"loss": 0.0,
"step": 515
},
{
"epoch": 0.9506398537477148,
"grad_norm": 0.009828943759202957,
"learning_rate": 1.482487648653008e-07,
"loss": 0.0001,
"step": 520
},
{
"epoch": 0.9597806215722121,
"grad_norm": 0.05608784779906273,
"learning_rate": 9.85077935286749e-08,
"loss": 0.0001,
"step": 525
},
{
"epoch": 0.9689213893967094,
"grad_norm": 0.022456202656030655,
"learning_rate": 5.8858730775438465e-08,
"loss": 0.0,
"step": 530
},
{
"epoch": 0.9780621572212066,
"grad_norm": 0.02870968170464039,
"learning_rate": 2.9341988162595593e-08,
"loss": 0.0003,
"step": 535
},
{
"epoch": 0.9872029250457038,
"grad_norm": 0.1587427407503128,
"learning_rate": 9.987650071561217e-09,
"loss": 0.0,
"step": 540
},
{
"epoch": 0.9963436928702011,
"grad_norm": 0.018141670152544975,
"learning_rate": 8.154430452267381e-10,
"loss": 0.0001,
"step": 545
},
{
"epoch": 1.0,
"step": 547,
"total_flos": 1.0069004076135219e+18,
"train_loss": 0.04387825021601769,
"train_runtime": 3854.1911,
"train_samples_per_second": 18.162,
"train_steps_per_second": 0.142
}
],
"logging_steps": 5,
"max_steps": 547,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.0069004076135219e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}