MNLP_M3_rag_model / trainer_state.json
raduv98's picture
Upload folder using huggingface_hub
4ff966c verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.998857142857143,
"eval_steps": 500,
"global_step": 6561,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.011428571428571429,
"grad_norm": 8.4375,
"learning_rate": 1.8264840182648401e-06,
"loss": 1.3625,
"step": 25
},
{
"epoch": 0.022857142857142857,
"grad_norm": 6.46875,
"learning_rate": 3.7290715372907152e-06,
"loss": 1.3344,
"step": 50
},
{
"epoch": 0.03428571428571429,
"grad_norm": 5.25,
"learning_rate": 5.631659056316591e-06,
"loss": 1.2071,
"step": 75
},
{
"epoch": 0.045714285714285714,
"grad_norm": 4.6875,
"learning_rate": 7.5342465753424655e-06,
"loss": 1.1908,
"step": 100
},
{
"epoch": 0.05714285714285714,
"grad_norm": 3.875,
"learning_rate": 9.436834094368341e-06,
"loss": 1.1741,
"step": 125
},
{
"epoch": 0.06857142857142857,
"grad_norm": 4.34375,
"learning_rate": 1.1339421613394216e-05,
"loss": 1.0985,
"step": 150
},
{
"epoch": 0.08,
"grad_norm": 4.1875,
"learning_rate": 1.3242009132420092e-05,
"loss": 1.1444,
"step": 175
},
{
"epoch": 0.09142857142857143,
"grad_norm": 4.0,
"learning_rate": 1.5144596651445967e-05,
"loss": 1.0922,
"step": 200
},
{
"epoch": 0.10285714285714286,
"grad_norm": 3.671875,
"learning_rate": 1.7047184170471843e-05,
"loss": 1.0543,
"step": 225
},
{
"epoch": 0.11428571428571428,
"grad_norm": 4.90625,
"learning_rate": 1.8949771689497718e-05,
"loss": 1.0472,
"step": 250
},
{
"epoch": 0.12571428571428572,
"grad_norm": 4.34375,
"learning_rate": 2.0852359208523592e-05,
"loss": 1.089,
"step": 275
},
{
"epoch": 0.13714285714285715,
"grad_norm": 4.21875,
"learning_rate": 2.2754946727549467e-05,
"loss": 1.0493,
"step": 300
},
{
"epoch": 0.14857142857142858,
"grad_norm": 4.34375,
"learning_rate": 2.4657534246575342e-05,
"loss": 1.0405,
"step": 325
},
{
"epoch": 0.16,
"grad_norm": 3.859375,
"learning_rate": 2.656012176560122e-05,
"loss": 1.0518,
"step": 350
},
{
"epoch": 0.17142857142857143,
"grad_norm": 3.953125,
"learning_rate": 2.846270928462709e-05,
"loss": 1.0101,
"step": 375
},
{
"epoch": 0.18285714285714286,
"grad_norm": 4.1875,
"learning_rate": 3.036529680365297e-05,
"loss": 0.99,
"step": 400
},
{
"epoch": 0.19428571428571428,
"grad_norm": 4.4375,
"learning_rate": 3.226788432267884e-05,
"loss": 1.0216,
"step": 425
},
{
"epoch": 0.2057142857142857,
"grad_norm": 4.34375,
"learning_rate": 3.417047184170472e-05,
"loss": 0.9716,
"step": 450
},
{
"epoch": 0.21714285714285714,
"grad_norm": 4.25,
"learning_rate": 3.60730593607306e-05,
"loss": 0.9779,
"step": 475
},
{
"epoch": 0.22857142857142856,
"grad_norm": 3.8125,
"learning_rate": 3.797564687975647e-05,
"loss": 0.95,
"step": 500
},
{
"epoch": 0.24,
"grad_norm": 4.15625,
"learning_rate": 3.9878234398782346e-05,
"loss": 0.9273,
"step": 525
},
{
"epoch": 0.25142857142857145,
"grad_norm": 3.671875,
"learning_rate": 4.1780821917808224e-05,
"loss": 0.9343,
"step": 550
},
{
"epoch": 0.26285714285714284,
"grad_norm": 3.90625,
"learning_rate": 4.3683409436834095e-05,
"loss": 0.9279,
"step": 575
},
{
"epoch": 0.2742857142857143,
"grad_norm": 6.84375,
"learning_rate": 4.5585996955859973e-05,
"loss": 0.9683,
"step": 600
},
{
"epoch": 0.2857142857142857,
"grad_norm": 3.03125,
"learning_rate": 4.7488584474885845e-05,
"loss": 0.904,
"step": 625
},
{
"epoch": 0.29714285714285715,
"grad_norm": 4.71875,
"learning_rate": 4.939117199391172e-05,
"loss": 0.9745,
"step": 650
},
{
"epoch": 0.30857142857142855,
"grad_norm": 4.25,
"learning_rate": 4.9998977150017555e-05,
"loss": 0.8935,
"step": 675
},
{
"epoch": 0.32,
"grad_norm": 3.890625,
"learning_rate": 4.999375693918911e-05,
"loss": 0.8999,
"step": 700
},
{
"epoch": 0.3314285714285714,
"grad_norm": 4.875,
"learning_rate": 4.9984113776601504e-05,
"loss": 0.9305,
"step": 725
},
{
"epoch": 0.34285714285714286,
"grad_norm": 4.1875,
"learning_rate": 4.9970049368730605e-05,
"loss": 0.933,
"step": 750
},
{
"epoch": 0.35428571428571426,
"grad_norm": 3.578125,
"learning_rate": 4.9951566204445834e-05,
"loss": 0.932,
"step": 775
},
{
"epoch": 0.3657142857142857,
"grad_norm": 3.890625,
"learning_rate": 4.992866755456975e-05,
"loss": 0.8781,
"step": 800
},
{
"epoch": 0.37714285714285717,
"grad_norm": 3.578125,
"learning_rate": 4.990135747129923e-05,
"loss": 0.8927,
"step": 825
},
{
"epoch": 0.38857142857142857,
"grad_norm": 4.0625,
"learning_rate": 4.986964078748837e-05,
"loss": 0.8595,
"step": 850
},
{
"epoch": 0.4,
"grad_norm": 2.84375,
"learning_rate": 4.983352311579329e-05,
"loss": 0.8953,
"step": 875
},
{
"epoch": 0.4114285714285714,
"grad_norm": 3.71875,
"learning_rate": 4.979301084767886e-05,
"loss": 0.8421,
"step": 900
},
{
"epoch": 0.4228571428571429,
"grad_norm": 3.984375,
"learning_rate": 4.974811115228767e-05,
"loss": 0.8636,
"step": 925
},
{
"epoch": 0.4342857142857143,
"grad_norm": 4.1875,
"learning_rate": 4.969883197517137e-05,
"loss": 0.8655,
"step": 950
},
{
"epoch": 0.44571428571428573,
"grad_norm": 4.21875,
"learning_rate": 4.964518203688462e-05,
"loss": 0.8487,
"step": 975
},
{
"epoch": 0.45714285714285713,
"grad_norm": 4.0,
"learning_rate": 4.958717083144182e-05,
"loss": 0.8706,
"step": 1000
},
{
"epoch": 0.4685714285714286,
"grad_norm": 3.25,
"learning_rate": 4.952480862463712e-05,
"loss": 0.8521,
"step": 1025
},
{
"epoch": 0.48,
"grad_norm": 3.796875,
"learning_rate": 4.945810645222767e-05,
"loss": 0.8352,
"step": 1050
},
{
"epoch": 0.49142857142857144,
"grad_norm": 3.53125,
"learning_rate": 4.938707611798078e-05,
"loss": 0.8311,
"step": 1075
},
{
"epoch": 0.5028571428571429,
"grad_norm": 3.5625,
"learning_rate": 4.931173019158504e-05,
"loss": 0.8434,
"step": 1100
},
{
"epoch": 0.5142857142857142,
"grad_norm": 3.96875,
"learning_rate": 4.923208200642602e-05,
"loss": 0.8155,
"step": 1125
},
{
"epoch": 0.5257142857142857,
"grad_norm": 2.953125,
"learning_rate": 4.914814565722671e-05,
"loss": 0.8337,
"step": 1150
},
{
"epoch": 0.5371428571428571,
"grad_norm": 3.796875,
"learning_rate": 4.905993599755331e-05,
"loss": 0.838,
"step": 1175
},
{
"epoch": 0.5485714285714286,
"grad_norm": 4.21875,
"learning_rate": 4.896746863718671e-05,
"loss": 0.8077,
"step": 1200
},
{
"epoch": 0.56,
"grad_norm": 3.265625,
"learning_rate": 4.8870759939360136e-05,
"loss": 0.8583,
"step": 1225
},
{
"epoch": 0.5714285714285714,
"grad_norm": 4.125,
"learning_rate": 4.8769827017863514e-05,
"loss": 0.8133,
"step": 1250
},
{
"epoch": 0.5828571428571429,
"grad_norm": 4.125,
"learning_rate": 4.86646877340149e-05,
"loss": 0.8224,
"step": 1275
},
{
"epoch": 0.5942857142857143,
"grad_norm": 3.53125,
"learning_rate": 4.8555360693499786e-05,
"loss": 0.8146,
"step": 1300
},
{
"epoch": 0.6057142857142858,
"grad_norm": 4.0625,
"learning_rate": 4.844186524307853e-05,
"loss": 0.8039,
"step": 1325
},
{
"epoch": 0.6171428571428571,
"grad_norm": 3.828125,
"learning_rate": 4.832422146716275e-05,
"loss": 0.8203,
"step": 1350
},
{
"epoch": 0.6285714285714286,
"grad_norm": 3.34375,
"learning_rate": 4.8202450184261116e-05,
"loss": 0.7826,
"step": 1375
},
{
"epoch": 0.64,
"grad_norm": 4.0625,
"learning_rate": 4.807657294329527e-05,
"loss": 0.7926,
"step": 1400
},
{
"epoch": 0.6514285714285715,
"grad_norm": 4.5625,
"learning_rate": 4.79466120197865e-05,
"loss": 0.765,
"step": 1425
},
{
"epoch": 0.6628571428571428,
"grad_norm": 4.375,
"learning_rate": 4.781259041191375e-05,
"loss": 0.7731,
"step": 1450
},
{
"epoch": 0.6742857142857143,
"grad_norm": 3.234375,
"learning_rate": 4.767453183644391e-05,
"loss": 0.8442,
"step": 1475
},
{
"epoch": 0.6857142857142857,
"grad_norm": 4.1875,
"learning_rate": 4.753246072453474e-05,
"loss": 0.736,
"step": 1500
},
{
"epoch": 0.6971428571428572,
"grad_norm": 3.453125,
"learning_rate": 4.7386402217411555e-05,
"loss": 0.8104,
"step": 1525
},
{
"epoch": 0.7085714285714285,
"grad_norm": 3.671875,
"learning_rate": 4.723638216191811e-05,
"loss": 0.7847,
"step": 1550
},
{
"epoch": 0.72,
"grad_norm": 3.625,
"learning_rate": 4.708242710594277e-05,
"loss": 0.7486,
"step": 1575
},
{
"epoch": 0.7314285714285714,
"grad_norm": 3.6875,
"learning_rate": 4.6924564293720434e-05,
"loss": 0.7868,
"step": 1600
},
{
"epoch": 0.7428571428571429,
"grad_norm": 4.28125,
"learning_rate": 4.676282166101142e-05,
"loss": 0.7314,
"step": 1625
},
{
"epoch": 0.7542857142857143,
"grad_norm": 3.625,
"learning_rate": 4.659722783015785e-05,
"loss": 0.7461,
"step": 1650
},
{
"epoch": 0.7657142857142857,
"grad_norm": 4.6875,
"learning_rate": 4.6427812105018576e-05,
"loss": 0.7334,
"step": 1675
},
{
"epoch": 0.7771428571428571,
"grad_norm": 4.53125,
"learning_rate": 4.625460446578348e-05,
"loss": 0.7279,
"step": 1700
},
{
"epoch": 0.7885714285714286,
"grad_norm": 4.1875,
"learning_rate": 4.6077635563668195e-05,
"loss": 0.7701,
"step": 1725
},
{
"epoch": 0.8,
"grad_norm": 4.375,
"learning_rate": 4.5896936715489885e-05,
"loss": 0.7034,
"step": 1750
},
{
"epoch": 0.8114285714285714,
"grad_norm": 4.3125,
"learning_rate": 4.571253989812545e-05,
"loss": 0.7281,
"step": 1775
},
{
"epoch": 0.8228571428571428,
"grad_norm": 3.484375,
"learning_rate": 4.5524477742852745e-05,
"loss": 0.7474,
"step": 1800
},
{
"epoch": 0.8342857142857143,
"grad_norm": 4.40625,
"learning_rate": 4.5332783529576146e-05,
"loss": 0.739,
"step": 1825
},
{
"epoch": 0.8457142857142858,
"grad_norm": 3.890625,
"learning_rate": 4.5137491180937196e-05,
"loss": 0.7537,
"step": 1850
},
{
"epoch": 0.8571428571428571,
"grad_norm": 4.8125,
"learning_rate": 4.4938635256311634e-05,
"loss": 0.695,
"step": 1875
},
{
"epoch": 0.8685714285714285,
"grad_norm": 3.875,
"learning_rate": 4.4736250945693655e-05,
"loss": 0.7546,
"step": 1900
},
{
"epoch": 0.88,
"grad_norm": 3.453125,
"learning_rate": 4.453037406346862e-05,
"loss": 0.7416,
"step": 1925
},
{
"epoch": 0.8914285714285715,
"grad_norm": 3.96875,
"learning_rate": 4.4321041042075254e-05,
"loss": 0.7364,
"step": 1950
},
{
"epoch": 0.9028571428571428,
"grad_norm": 4.5625,
"learning_rate": 4.4108288925558505e-05,
"loss": 0.6852,
"step": 1975
},
{
"epoch": 0.9142857142857143,
"grad_norm": 3.21875,
"learning_rate": 4.389215536301412e-05,
"loss": 0.6998,
"step": 2000
},
{
"epoch": 0.9257142857142857,
"grad_norm": 4.03125,
"learning_rate": 4.3672678601926176e-05,
"loss": 0.756,
"step": 2025
},
{
"epoch": 0.9371428571428572,
"grad_norm": 4.40625,
"learning_rate": 4.344989748139873e-05,
"loss": 0.7293,
"step": 2050
},
{
"epoch": 0.9485714285714286,
"grad_norm": 3.84375,
"learning_rate": 4.3223851425282765e-05,
"loss": 0.6869,
"step": 2075
},
{
"epoch": 0.96,
"grad_norm": 4.3125,
"learning_rate": 4.299458043519964e-05,
"loss": 0.7269,
"step": 2100
},
{
"epoch": 0.9714285714285714,
"grad_norm": 4.0,
"learning_rate": 4.276212508346232e-05,
"loss": 0.6421,
"step": 2125
},
{
"epoch": 0.9828571428571429,
"grad_norm": 4.75,
"learning_rate": 4.252652650589563e-05,
"loss": 0.6815,
"step": 2150
},
{
"epoch": 0.9942857142857143,
"grad_norm": 3.9375,
"learning_rate": 4.228782639455674e-05,
"loss": 0.7155,
"step": 2175
},
{
"epoch": 1.0054857142857143,
"grad_norm": 3.359375,
"learning_rate": 4.2046066990357235e-05,
"loss": 0.6069,
"step": 2200
},
{
"epoch": 1.0169142857142857,
"grad_norm": 4.65625,
"learning_rate": 4.18012910755881e-05,
"loss": 0.5746,
"step": 2225
},
{
"epoch": 1.0283428571428572,
"grad_norm": 4.25,
"learning_rate": 4.155354196634886e-05,
"loss": 0.5889,
"step": 2250
},
{
"epoch": 1.0397714285714286,
"grad_norm": 4.21875,
"learning_rate": 4.130286350488224e-05,
"loss": 0.5856,
"step": 2275
},
{
"epoch": 1.0512,
"grad_norm": 4.875,
"learning_rate": 4.1049300051815763e-05,
"loss": 0.5924,
"step": 2300
},
{
"epoch": 1.0626285714285715,
"grad_norm": 3.8125,
"learning_rate": 4.0792896478311614e-05,
"loss": 0.5592,
"step": 2325
},
{
"epoch": 1.0740571428571428,
"grad_norm": 4.25,
"learning_rate": 4.0533698158126085e-05,
"loss": 0.5558,
"step": 2350
},
{
"epoch": 1.0854857142857144,
"grad_norm": 4.40625,
"learning_rate": 4.0271750959580166e-05,
"loss": 0.5913,
"step": 2375
},
{
"epoch": 1.0969142857142857,
"grad_norm": 3.9375,
"learning_rate": 4.00071012374426e-05,
"loss": 0.5583,
"step": 2400
},
{
"epoch": 1.108342857142857,
"grad_norm": 4.3125,
"learning_rate": 3.9739795824726804e-05,
"loss": 0.5687,
"step": 2425
},
{
"epoch": 1.1197714285714286,
"grad_norm": 4.09375,
"learning_rate": 3.946988202440321e-05,
"loss": 0.6084,
"step": 2450
},
{
"epoch": 1.1312,
"grad_norm": 5.125,
"learning_rate": 3.919740760102841e-05,
"loss": 0.5739,
"step": 2475
},
{
"epoch": 1.1426285714285713,
"grad_norm": 3.96875,
"learning_rate": 3.8922420772292644e-05,
"loss": 0.5585,
"step": 2500
},
{
"epoch": 1.154057142857143,
"grad_norm": 4.5625,
"learning_rate": 3.864497020048712e-05,
"loss": 0.5833,
"step": 2525
},
{
"epoch": 1.1654857142857142,
"grad_norm": 5.125,
"learning_rate": 3.8365104983892533e-05,
"loss": 0.5939,
"step": 2550
},
{
"epoch": 1.1769142857142858,
"grad_norm": 4.25,
"learning_rate": 3.808287464809063e-05,
"loss": 0.5721,
"step": 2575
},
{
"epoch": 1.1883428571428571,
"grad_norm": 4.09375,
"learning_rate": 3.77983291372e-05,
"loss": 0.5348,
"step": 2600
},
{
"epoch": 1.1997714285714285,
"grad_norm": 4.34375,
"learning_rate": 3.751151880503782e-05,
"loss": 0.6,
"step": 2625
},
{
"epoch": 1.2112,
"grad_norm": 4.25,
"learning_rate": 3.722249440620917e-05,
"loss": 0.538,
"step": 2650
},
{
"epoch": 1.2226285714285714,
"grad_norm": 5.0,
"learning_rate": 3.6931307087125425e-05,
"loss": 0.5264,
"step": 2675
},
{
"epoch": 1.234057142857143,
"grad_norm": 3.609375,
"learning_rate": 3.663800837695315e-05,
"loss": 0.5233,
"step": 2700
},
{
"epoch": 1.2454857142857143,
"grad_norm": 3.953125,
"learning_rate": 3.634265017849549e-05,
"loss": 0.5722,
"step": 2725
},
{
"epoch": 1.2569142857142857,
"grad_norm": 2.921875,
"learning_rate": 3.604528475900729e-05,
"loss": 0.5632,
"step": 2750
},
{
"epoch": 1.2683428571428572,
"grad_norm": 5.21875,
"learning_rate": 3.574596474094578e-05,
"loss": 0.5267,
"step": 2775
},
{
"epoch": 1.2797714285714286,
"grad_norm": 5.09375,
"learning_rate": 3.544474309265834e-05,
"loss": 0.5508,
"step": 2800
},
{
"epoch": 1.2912,
"grad_norm": 4.3125,
"learning_rate": 3.514167311900917e-05,
"loss": 0.5592,
"step": 2825
},
{
"epoch": 1.3026285714285715,
"grad_norm": 3.96875,
"learning_rate": 3.483680845194629e-05,
"loss": 0.5389,
"step": 2850
},
{
"epoch": 1.3140571428571428,
"grad_norm": 5.15625,
"learning_rate": 3.4530203041010745e-05,
"loss": 0.5379,
"step": 2875
},
{
"epoch": 1.3254857142857142,
"grad_norm": 4.25,
"learning_rate": 3.422191114378952e-05,
"loss": 0.5792,
"step": 2900
},
{
"epoch": 1.3369142857142857,
"grad_norm": 4.09375,
"learning_rate": 3.3911987316314054e-05,
"loss": 0.5165,
"step": 2925
},
{
"epoch": 1.348342857142857,
"grad_norm": 5.40625,
"learning_rate": 3.360048640340585e-05,
"loss": 0.4876,
"step": 2950
},
{
"epoch": 1.3597714285714286,
"grad_norm": 5.03125,
"learning_rate": 3.3287463528971e-05,
"loss": 0.5796,
"step": 2975
},
{
"epoch": 1.3712,
"grad_norm": 6.375,
"learning_rate": 3.2972974086245376e-05,
"loss": 0.5073,
"step": 3000
},
{
"epoch": 1.3826285714285715,
"grad_norm": 4.90625,
"learning_rate": 3.265707372799208e-05,
"loss": 0.5306,
"step": 3025
},
{
"epoch": 1.3940571428571429,
"grad_norm": 4.46875,
"learning_rate": 3.233981835665301e-05,
"loss": 0.5312,
"step": 3050
},
{
"epoch": 1.4054857142857142,
"grad_norm": 4.78125,
"learning_rate": 3.202126411445624e-05,
"loss": 0.5244,
"step": 3075
},
{
"epoch": 1.4169142857142858,
"grad_norm": 4.03125,
"learning_rate": 3.1701467373480995e-05,
"loss": 0.5403,
"step": 3100
},
{
"epoch": 1.4283428571428571,
"grad_norm": 4.96875,
"learning_rate": 3.1380484725681876e-05,
"loss": 0.5161,
"step": 3125
},
{
"epoch": 1.4397714285714285,
"grad_norm": 4.375,
"learning_rate": 3.105837297287423e-05,
"loss": 0.5414,
"step": 3150
},
{
"epoch": 1.4512,
"grad_norm": 4.46875,
"learning_rate": 3.0735189116682414e-05,
"loss": 0.5432,
"step": 3175
},
{
"epoch": 1.4626285714285714,
"grad_norm": 4.78125,
"learning_rate": 3.0410990348452573e-05,
"loss": 0.5056,
"step": 3200
},
{
"epoch": 1.4740571428571427,
"grad_norm": 5.21875,
"learning_rate": 3.0085834039132033e-05,
"loss": 0.4907,
"step": 3225
},
{
"epoch": 1.4854857142857143,
"grad_norm": 3.828125,
"learning_rate": 2.975977772911671e-05,
"loss": 0.4958,
"step": 3250
},
{
"epoch": 1.4969142857142856,
"grad_norm": 4.5625,
"learning_rate": 2.9432879118068722e-05,
"loss": 0.545,
"step": 3275
},
{
"epoch": 1.508342857142857,
"grad_norm": 4.03125,
"learning_rate": 2.9105196054705663e-05,
"loss": 0.4967,
"step": 3300
},
{
"epoch": 1.5197714285714286,
"grad_norm": 4.25,
"learning_rate": 2.8776786526563575e-05,
"loss": 0.5128,
"step": 3325
},
{
"epoch": 1.5312000000000001,
"grad_norm": 4.53125,
"learning_rate": 2.8447708649735356e-05,
"loss": 0.529,
"step": 3350
},
{
"epoch": 1.5426285714285715,
"grad_norm": 4.5625,
"learning_rate": 2.8118020658586415e-05,
"loss": 0.5453,
"step": 3375
},
{
"epoch": 1.5540571428571428,
"grad_norm": 4.9375,
"learning_rate": 2.778778089544935e-05,
"loss": 0.5323,
"step": 3400
},
{
"epoch": 1.5654857142857144,
"grad_norm": 5.03125,
"learning_rate": 2.7457047800299585e-05,
"loss": 0.4782,
"step": 3425
},
{
"epoch": 1.5769142857142857,
"grad_norm": 5.90625,
"learning_rate": 2.71258799004137e-05,
"loss": 0.5319,
"step": 3450
},
{
"epoch": 1.588342857142857,
"grad_norm": 3.84375,
"learning_rate": 2.6794335800012293e-05,
"loss": 0.5254,
"step": 3475
},
{
"epoch": 1.5997714285714286,
"grad_norm": 5.65625,
"learning_rate": 2.646247416988926e-05,
"loss": 0.5265,
"step": 3500
},
{
"epoch": 1.6112,
"grad_norm": 4.5,
"learning_rate": 2.6130353737029284e-05,
"loss": 0.5077,
"step": 3525
},
{
"epoch": 1.6226285714285713,
"grad_norm": 6.65625,
"learning_rate": 2.579803327421536e-05,
"loss": 0.5087,
"step": 3550
},
{
"epoch": 1.6340571428571429,
"grad_norm": 5.0,
"learning_rate": 2.5465571589628223e-05,
"loss": 0.4842,
"step": 3575
},
{
"epoch": 1.6454857142857144,
"grad_norm": 5.78125,
"learning_rate": 2.5133027516439554e-05,
"loss": 0.5079,
"step": 3600
},
{
"epoch": 1.6569142857142856,
"grad_norm": 4.3125,
"learning_rate": 2.4800459902400684e-05,
"loss": 0.5259,
"step": 3625
},
{
"epoch": 1.6683428571428571,
"grad_norm": 5.59375,
"learning_rate": 2.446792759942882e-05,
"loss": 0.4971,
"step": 3650
},
{
"epoch": 1.6797714285714287,
"grad_norm": 5.5,
"learning_rate": 2.413548945319239e-05,
"loss": 0.4991,
"step": 3675
},
{
"epoch": 1.6912,
"grad_norm": 4.9375,
"learning_rate": 2.3803204292697704e-05,
"loss": 0.5282,
"step": 3700
},
{
"epoch": 1.7026285714285714,
"grad_norm": 5.78125,
"learning_rate": 2.3471130919878304e-05,
"loss": 0.5017,
"step": 3725
},
{
"epoch": 1.714057142857143,
"grad_norm": 4.96875,
"learning_rate": 2.3139328099189334e-05,
"loss": 0.5094,
"step": 3750
},
{
"epoch": 1.7254857142857143,
"grad_norm": 5.3125,
"learning_rate": 2.280785454720843e-05,
"loss": 0.4696,
"step": 3775
},
{
"epoch": 1.7369142857142856,
"grad_norm": 4.1875,
"learning_rate": 2.2476768922245072e-05,
"loss": 0.4994,
"step": 3800
},
{
"epoch": 1.7483428571428572,
"grad_norm": 4.0625,
"learning_rate": 2.21461298139603e-05,
"loss": 0.5082,
"step": 3825
},
{
"epoch": 1.7597714285714285,
"grad_norm": 4.5625,
"learning_rate": 2.1815995732998584e-05,
"loss": 0.5247,
"step": 3850
},
{
"epoch": 1.7711999999999999,
"grad_norm": 3.671875,
"learning_rate": 2.1486425100633575e-05,
"loss": 0.4679,
"step": 3875
},
{
"epoch": 1.7826285714285715,
"grad_norm": 4.8125,
"learning_rate": 2.1157476238429803e-05,
"loss": 0.4905,
"step": 3900
},
{
"epoch": 1.794057142857143,
"grad_norm": 5.53125,
"learning_rate": 2.082920735792195e-05,
"loss": 0.4998,
"step": 3925
},
{
"epoch": 1.8054857142857141,
"grad_norm": 4.34375,
"learning_rate": 2.0501676550313594e-05,
"loss": 0.4987,
"step": 3950
},
{
"epoch": 1.8169142857142857,
"grad_norm": 4.96875,
"learning_rate": 2.017494177619725e-05,
"loss": 0.4986,
"step": 3975
},
{
"epoch": 1.8283428571428573,
"grad_norm": 4.71875,
"learning_rate": 1.984906085529758e-05,
"loss": 0.4614,
"step": 4000
},
{
"epoch": 1.8397714285714286,
"grad_norm": 5.65625,
"learning_rate": 1.9524091456239417e-05,
"loss": 0.4805,
"step": 4025
},
{
"epoch": 1.8512,
"grad_norm": 4.46875,
"learning_rate": 1.9200091086342634e-05,
"loss": 0.5164,
"step": 4050
},
{
"epoch": 1.8626285714285715,
"grad_norm": 5.40625,
"learning_rate": 1.8877117081445524e-05,
"loss": 0.4836,
"step": 4075
},
{
"epoch": 1.8740571428571429,
"grad_norm": 4.9375,
"learning_rate": 1.8555226595758485e-05,
"loss": 0.4719,
"step": 4100
},
{
"epoch": 1.8854857142857142,
"grad_norm": 4.90625,
"learning_rate": 1.8234476591749943e-05,
"loss": 0.4785,
"step": 4125
},
{
"epoch": 1.8969142857142858,
"grad_norm": 5.15625,
"learning_rate": 1.7914923830066074e-05,
"loss": 0.506,
"step": 4150
},
{
"epoch": 1.9083428571428571,
"grad_norm": 4.65625,
"learning_rate": 1.7596624859486347e-05,
"loss": 0.5037,
"step": 4175
},
{
"epoch": 1.9197714285714285,
"grad_norm": 5.03125,
"learning_rate": 1.7279636006916537e-05,
"loss": 0.5128,
"step": 4200
},
{
"epoch": 1.9312,
"grad_norm": 4.625,
"learning_rate": 1.6964013367420966e-05,
"loss": 0.4839,
"step": 4225
},
{
"epoch": 1.9426285714285716,
"grad_norm": 5.5625,
"learning_rate": 1.6649812794295782e-05,
"loss": 0.4991,
"step": 4250
},
{
"epoch": 1.9540571428571427,
"grad_norm": 4.125,
"learning_rate": 1.633708988918509e-05,
"loss": 0.468,
"step": 4275
},
{
"epoch": 1.9654857142857143,
"grad_norm": 4.53125,
"learning_rate": 1.602589999224149e-05,
"loss": 0.4796,
"step": 4300
},
{
"epoch": 1.9769142857142858,
"grad_norm": 4.6875,
"learning_rate": 1.5716298172332995e-05,
"loss": 0.4857,
"step": 4325
},
{
"epoch": 1.9883428571428572,
"grad_norm": 5.65625,
"learning_rate": 1.540833921729792e-05,
"loss": 0.4827,
"step": 4350
},
{
"epoch": 1.9997714285714285,
"grad_norm": 5.125,
"learning_rate": 1.5102077624249497e-05,
"loss": 0.4665,
"step": 4375
},
{
"epoch": 2.0109714285714286,
"grad_norm": 4.53125,
"learning_rate": 1.4797567589931924e-05,
"loss": 0.4299,
"step": 4400
},
{
"epoch": 2.0224,
"grad_norm": 4.3125,
"learning_rate": 1.4494863001129638e-05,
"loss": 0.4671,
"step": 4425
},
{
"epoch": 2.0338285714285713,
"grad_norm": 4.90625,
"learning_rate": 1.4194017425131323e-05,
"loss": 0.387,
"step": 4450
},
{
"epoch": 2.045257142857143,
"grad_norm": 4.125,
"learning_rate": 1.3895084100250569e-05,
"loss": 0.3808,
"step": 4475
},
{
"epoch": 2.0566857142857145,
"grad_norm": 4.0625,
"learning_rate": 1.3598115926404683e-05,
"loss": 0.3955,
"step": 4500
},
{
"epoch": 2.0681142857142856,
"grad_norm": 4.9375,
"learning_rate": 1.330316545575338e-05,
"loss": 0.3914,
"step": 4525
},
{
"epoch": 2.079542857142857,
"grad_norm": 4.53125,
"learning_rate": 1.3010284883399076e-05,
"loss": 0.3912,
"step": 4550
},
{
"epoch": 2.0909714285714287,
"grad_norm": 4.78125,
"learning_rate": 1.2719526038150308e-05,
"loss": 0.4035,
"step": 4575
},
{
"epoch": 2.1024,
"grad_norm": 4.34375,
"learning_rate": 1.2430940373349945e-05,
"loss": 0.4366,
"step": 4600
},
{
"epoch": 2.1138285714285714,
"grad_norm": 4.625,
"learning_rate": 1.2144578957769995e-05,
"loss": 0.4067,
"step": 4625
},
{
"epoch": 2.125257142857143,
"grad_norm": 4.3125,
"learning_rate": 1.1860492466574222e-05,
"loss": 0.4404,
"step": 4650
},
{
"epoch": 2.136685714285714,
"grad_norm": 3.921875,
"learning_rate": 1.157873117235067e-05,
"loss": 0.4218,
"step": 4675
},
{
"epoch": 2.1481142857142856,
"grad_norm": 5.28125,
"learning_rate": 1.129934493621527e-05,
"loss": 0.4129,
"step": 4700
},
{
"epoch": 2.159542857142857,
"grad_norm": 3.984375,
"learning_rate": 1.1022383198988265e-05,
"loss": 0.4041,
"step": 4725
},
{
"epoch": 2.1709714285714288,
"grad_norm": 4.40625,
"learning_rate": 1.074789497244512e-05,
"loss": 0.4286,
"step": 4750
},
{
"epoch": 2.1824,
"grad_norm": 4.5625,
"learning_rate": 1.0475928830643245e-05,
"loss": 0.3916,
"step": 4775
},
{
"epoch": 2.1938285714285715,
"grad_norm": 4.3125,
"learning_rate": 1.020653290132624e-05,
"loss": 0.4289,
"step": 4800
},
{
"epoch": 2.205257142857143,
"grad_norm": 3.78125,
"learning_rate": 9.939754857407063e-06,
"loss": 0.4146,
"step": 4825
},
{
"epoch": 2.216685714285714,
"grad_norm": 4.6875,
"learning_rate": 9.675641908531774e-06,
"loss": 0.425,
"step": 4850
},
{
"epoch": 2.2281142857142857,
"grad_norm": 4.625,
"learning_rate": 9.414240792725179e-06,
"loss": 0.432,
"step": 4875
},
{
"epoch": 2.2395428571428573,
"grad_norm": 4.6875,
"learning_rate": 9.155597768119978e-06,
"loss": 0.4114,
"step": 4900
},
{
"epoch": 2.2509714285714284,
"grad_norm": 4.28125,
"learning_rate": 8.899758604770809e-06,
"loss": 0.4102,
"step": 4925
},
{
"epoch": 2.2624,
"grad_norm": 5.40625,
"learning_rate": 8.646768576554665e-06,
"loss": 0.4174,
"step": 4950
},
{
"epoch": 2.2738285714285715,
"grad_norm": 3.984375,
"learning_rate": 8.396672453159163e-06,
"loss": 0.4372,
"step": 4975
},
{
"epoch": 2.2852571428571427,
"grad_norm": 5.75,
"learning_rate": 8.149514492159978e-06,
"loss": 0.3993,
"step": 5000
},
{
"epoch": 2.296685714285714,
"grad_norm": 5.15625,
"learning_rate": 7.90533843118889e-06,
"loss": 0.4007,
"step": 5025
},
{
"epoch": 2.308114285714286,
"grad_norm": 4.375,
"learning_rate": 7.66418748019396e-06,
"loss": 0.4236,
"step": 5050
},
{
"epoch": 2.3195428571428574,
"grad_norm": 4.5,
"learning_rate": 7.4261043137928896e-06,
"loss": 0.4146,
"step": 5075
},
{
"epoch": 2.3309714285714285,
"grad_norm": 5.375,
"learning_rate": 7.191131063721316e-06,
"loss": 0.4369,
"step": 5100
},
{
"epoch": 2.3424,
"grad_norm": 4.15625,
"learning_rate": 6.959309311377038e-06,
"loss": 0.4233,
"step": 5125
},
{
"epoch": 2.3538285714285716,
"grad_norm": 4.375,
"learning_rate": 6.7306800804616824e-06,
"loss": 0.4191,
"step": 5150
},
{
"epoch": 2.3652571428571427,
"grad_norm": 5.625,
"learning_rate": 6.505283829721068e-06,
"loss": 0.4135,
"step": 5175
},
{
"epoch": 2.3766857142857143,
"grad_norm": 4.0625,
"learning_rate": 6.283160445785532e-06,
"loss": 0.3916,
"step": 5200
},
{
"epoch": 2.388114285714286,
"grad_norm": 4.5625,
"learning_rate": 6.0643492361115026e-06,
"loss": 0.3923,
"step": 5225
},
{
"epoch": 2.399542857142857,
"grad_norm": 4.84375,
"learning_rate": 5.848888922025553e-06,
"loss": 0.3973,
"step": 5250
},
{
"epoch": 2.4109714285714285,
"grad_norm": 5.03125,
"learning_rate": 5.636817631872185e-06,
"loss": 0.4032,
"step": 5275
},
{
"epoch": 2.4224,
"grad_norm": 5.0625,
"learning_rate": 5.428172894266576e-06,
"loss": 0.4206,
"step": 5300
},
{
"epoch": 2.4338285714285712,
"grad_norm": 4.59375,
"learning_rate": 5.222991631453405e-06,
"loss": 0.436,
"step": 5325
},
{
"epoch": 2.445257142857143,
"grad_norm": 4.78125,
"learning_rate": 5.0213101527730345e-06,
"loss": 0.4113,
"step": 5350
},
{
"epoch": 2.4566857142857144,
"grad_norm": 4.4375,
"learning_rate": 4.823164148236078e-06,
"loss": 0.3966,
"step": 5375
},
{
"epoch": 2.468114285714286,
"grad_norm": 3.78125,
"learning_rate": 4.628588682207677e-06,
"loss": 0.3977,
"step": 5400
},
{
"epoch": 2.479542857142857,
"grad_norm": 4.09375,
"learning_rate": 4.4376181872024e-06,
"loss": 0.4125,
"step": 5425
},
{
"epoch": 2.4909714285714286,
"grad_norm": 4.8125,
"learning_rate": 4.250286457790961e-06,
"loss": 0.4254,
"step": 5450
},
{
"epoch": 2.5023999999999997,
"grad_norm": 4.34375,
"learning_rate": 4.066626644619934e-06,
"loss": 0.4207,
"step": 5475
},
{
"epoch": 2.5138285714285713,
"grad_norm": 4.75,
"learning_rate": 3.886671248545243e-06,
"loss": 0.4104,
"step": 5500
},
{
"epoch": 2.525257142857143,
"grad_norm": 5.0,
"learning_rate": 3.7104521148808054e-06,
"loss": 0.4128,
"step": 5525
},
{
"epoch": 2.5366857142857144,
"grad_norm": 4.21875,
"learning_rate": 3.5380004277630994e-06,
"loss": 0.417,
"step": 5550
},
{
"epoch": 2.5481142857142856,
"grad_norm": 3.421875,
"learning_rate": 3.3693467046327117e-06,
"loss": 0.4066,
"step": 5575
},
{
"epoch": 2.559542857142857,
"grad_norm": 4.65625,
"learning_rate": 3.2045207908339702e-06,
"loss": 0.4027,
"step": 5600
},
{
"epoch": 2.5709714285714287,
"grad_norm": 5.1875,
"learning_rate": 3.0435518543333765e-06,
"loss": 0.4211,
"step": 5625
},
{
"epoch": 2.5824,
"grad_norm": 3.1875,
"learning_rate": 2.8864683805580133e-06,
"loss": 0.425,
"step": 5650
},
{
"epoch": 2.5938285714285714,
"grad_norm": 4.28125,
"learning_rate": 2.733298167354703e-06,
"loss": 0.4216,
"step": 5675
},
{
"epoch": 2.605257142857143,
"grad_norm": 4.53125,
"learning_rate": 2.5840683200708067e-06,
"loss": 0.4378,
"step": 5700
},
{
"epoch": 2.6166857142857145,
"grad_norm": 4.3125,
"learning_rate": 2.4388052467576308e-06,
"loss": 0.4013,
"step": 5725
},
{
"epoch": 2.6281142857142856,
"grad_norm": 4.5625,
"learning_rate": 2.297534653497191e-06,
"loss": 0.435,
"step": 5750
},
{
"epoch": 2.639542857142857,
"grad_norm": 4.3125,
"learning_rate": 2.160281539853201e-06,
"loss": 0.4214,
"step": 5775
},
{
"epoch": 2.6509714285714283,
"grad_norm": 4.75,
"learning_rate": 2.027070194447081e-06,
"loss": 0.3948,
"step": 5800
},
{
"epoch": 2.6624,
"grad_norm": 4.03125,
"learning_rate": 1.897924190659822e-06,
"loss": 0.3921,
"step": 5825
},
{
"epoch": 2.6738285714285714,
"grad_norm": 4.0625,
"learning_rate": 1.7728663824603586e-06,
"loss": 0.3567,
"step": 5850
},
{
"epoch": 2.685257142857143,
"grad_norm": 4.03125,
"learning_rate": 1.6519189003612767e-06,
"loss": 0.3819,
"step": 5875
},
{
"epoch": 2.696685714285714,
"grad_norm": 4.59375,
"learning_rate": 1.535103147502584e-06,
"loss": 0.3772,
"step": 5900
},
{
"epoch": 2.7081142857142857,
"grad_norm": 4.28125,
"learning_rate": 1.422439795864114e-06,
"loss": 0.4203,
"step": 5925
},
{
"epoch": 2.7195428571428573,
"grad_norm": 4.125,
"learning_rate": 1.3139487826073937e-06,
"loss": 0.3637,
"step": 5950
},
{
"epoch": 2.7309714285714284,
"grad_norm": 4.3125,
"learning_rate": 1.2096493065475223e-06,
"loss": 0.4062,
"step": 5975
},
{
"epoch": 2.7424,
"grad_norm": 6.4375,
"learning_rate": 1.1095598247556793e-06,
"loss": 0.3903,
"step": 6000
},
{
"epoch": 2.7538285714285715,
"grad_norm": 3.890625,
"learning_rate": 1.0136980492929605e-06,
"loss": 0.388,
"step": 6025
},
{
"epoch": 2.765257142857143,
"grad_norm": 4.09375,
"learning_rate": 9.220809440759592e-07,
"loss": 0.3986,
"step": 6050
},
{
"epoch": 2.776685714285714,
"grad_norm": 3.46875,
"learning_rate": 8.347247218748411e-07,
"loss": 0.393,
"step": 6075
},
{
"epoch": 2.7881142857142858,
"grad_norm": 4.90625,
"learning_rate": 7.516448414442739e-07,
"loss": 0.3973,
"step": 6100
},
{
"epoch": 2.799542857142857,
"grad_norm": 4.5,
"learning_rate": 6.728560047878058e-07,
"loss": 0.3999,
"step": 6125
},
{
"epoch": 2.8109714285714285,
"grad_norm": 3.921875,
"learning_rate": 5.98372154556176e-07,
"loss": 0.3963,
"step": 6150
},
{
"epoch": 2.8224,
"grad_norm": 5.78125,
"learning_rate": 5.282064715799895e-07,
"loss": 0.4023,
"step": 6175
},
{
"epoch": 2.8338285714285716,
"grad_norm": 5.59375,
"learning_rate": 4.6237137253721527e-07,
"loss": 0.381,
"step": 6200
},
{
"epoch": 2.8452571428571427,
"grad_norm": 4.0625,
"learning_rate": 4.008785077558752e-07,
"loss": 0.4355,
"step": 6225
},
{
"epoch": 2.8566857142857143,
"grad_norm": 4.59375,
"learning_rate": 3.4373875915241493e-07,
"loss": 0.4114,
"step": 6250
},
{
"epoch": 2.868114285714286,
"grad_norm": 5.96875,
"learning_rate": 2.909622383059835e-07,
"loss": 0.397,
"step": 6275
},
{
"epoch": 2.879542857142857,
"grad_norm": 5.34375,
"learning_rate": 2.4255828466907825e-07,
"loss": 0.3944,
"step": 6300
},
{
"epoch": 2.8909714285714285,
"grad_norm": 3.640625,
"learning_rate": 1.985354639148229e-07,
"loss": 0.373,
"step": 6325
},
{
"epoch": 2.9024,
"grad_norm": 4.5,
"learning_rate": 1.5890156642114075e-07,
"loss": 0.425,
"step": 6350
},
{
"epoch": 2.9138285714285717,
"grad_norm": 4.4375,
"learning_rate": 1.2366360589217974e-07,
"loss": 0.4118,
"step": 6375
},
{
"epoch": 2.925257142857143,
"grad_norm": 5.0625,
"learning_rate": 9.282781811714159e-08,
"loss": 0.3736,
"step": 6400
},
{
"epoch": 2.9366857142857143,
"grad_norm": 4.71875,
"learning_rate": 6.63996598667671e-08,
"loss": 0.4139,
"step": 6425
},
{
"epoch": 2.9481142857142855,
"grad_norm": 4.53125,
"learning_rate": 4.438380792772534e-08,
"loss": 0.4324,
"step": 6450
},
{
"epoch": 2.959542857142857,
"grad_norm": 4.71875,
"learning_rate": 2.6784158274964498e-08,
"loss": 0.4182,
"step": 6475
},
{
"epoch": 2.9709714285714286,
"grad_norm": 4.53125,
"learning_rate": 1.3603825382293988e-08,
"loss": 0.4026,
"step": 6500
},
{
"epoch": 2.9824,
"grad_norm": 4.0,
"learning_rate": 4.845141671239173e-09,
"loss": 0.3775,
"step": 6525
},
{
"epoch": 2.9938285714285713,
"grad_norm": 4.25,
"learning_rate": 5.096570982743298e-10,
"loss": 0.4082,
"step": 6550
}
],
"logging_steps": 25,
"max_steps": 6561,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.6747303870715658e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}