Grogros's picture
Training in progress, step 2000, checkpoint
4680639 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005,
"grad_norm": 89.5,
"learning_rate": 9.000000000000001e-07,
"loss": 14.6125,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 89.5,
"learning_rate": 1.9000000000000002e-06,
"loss": 14.3688,
"step": 20
},
{
"epoch": 0.015,
"grad_norm": 115.5,
"learning_rate": 2.9e-06,
"loss": 13.9812,
"step": 30
},
{
"epoch": 0.02,
"grad_norm": 105.0,
"learning_rate": 3.900000000000001e-06,
"loss": 14.3688,
"step": 40
},
{
"epoch": 0.025,
"grad_norm": 158.0,
"learning_rate": 4.9000000000000005e-06,
"loss": 14.2438,
"step": 50
},
{
"epoch": 0.03,
"grad_norm": 131.0,
"learning_rate": 5.9e-06,
"loss": 14.15,
"step": 60
},
{
"epoch": 0.035,
"grad_norm": 109.0,
"learning_rate": 6.9e-06,
"loss": 14.1312,
"step": 70
},
{
"epoch": 0.04,
"grad_norm": 97.5,
"learning_rate": 7.9e-06,
"loss": 13.9062,
"step": 80
},
{
"epoch": 0.045,
"grad_norm": 82.0,
"learning_rate": 8.900000000000001e-06,
"loss": 13.3375,
"step": 90
},
{
"epoch": 0.05,
"grad_norm": 97.0,
"learning_rate": 9.9e-06,
"loss": 13.0437,
"step": 100
},
{
"epoch": 0.055,
"grad_norm": 113.5,
"learning_rate": 1.0900000000000002e-05,
"loss": 12.5875,
"step": 110
},
{
"epoch": 0.06,
"grad_norm": 98.0,
"learning_rate": 1.1900000000000001e-05,
"loss": 11.3625,
"step": 120
},
{
"epoch": 0.065,
"grad_norm": 106.5,
"learning_rate": 1.2900000000000002e-05,
"loss": 10.225,
"step": 130
},
{
"epoch": 0.07,
"grad_norm": 90.0,
"learning_rate": 1.39e-05,
"loss": 9.4938,
"step": 140
},
{
"epoch": 0.075,
"grad_norm": 88.5,
"learning_rate": 1.4900000000000001e-05,
"loss": 9.025,
"step": 150
},
{
"epoch": 0.08,
"grad_norm": 89.5,
"learning_rate": 1.5900000000000004e-05,
"loss": 7.65,
"step": 160
},
{
"epoch": 0.085,
"grad_norm": 84.5,
"learning_rate": 1.69e-05,
"loss": 7.3156,
"step": 170
},
{
"epoch": 0.09,
"grad_norm": 84.0,
"learning_rate": 1.79e-05,
"loss": 6.5312,
"step": 180
},
{
"epoch": 0.095,
"grad_norm": 103.0,
"learning_rate": 1.8900000000000002e-05,
"loss": 5.2281,
"step": 190
},
{
"epoch": 0.1,
"grad_norm": 66.0,
"learning_rate": 1.9900000000000003e-05,
"loss": 4.5391,
"step": 200
},
{
"epoch": 0.105,
"grad_norm": 110.5,
"learning_rate": 1.9998766324816606e-05,
"loss": 3.8406,
"step": 210
},
{
"epoch": 0.11,
"grad_norm": 25.875,
"learning_rate": 1.9994502159417576e-05,
"loss": 3.5422,
"step": 220
},
{
"epoch": 0.115,
"grad_norm": 16.375,
"learning_rate": 1.9987193571841865e-05,
"loss": 3.6594,
"step": 230
},
{
"epoch": 0.12,
"grad_norm": 31.0,
"learning_rate": 1.9976842788356054e-05,
"loss": 3.3953,
"step": 240
},
{
"epoch": 0.125,
"grad_norm": 20.75,
"learning_rate": 1.9963452961909065e-05,
"loss": 3.25,
"step": 250
},
{
"epoch": 0.13,
"grad_norm": 25.75,
"learning_rate": 1.9947028171171742e-05,
"loss": 3.1766,
"step": 260
},
{
"epoch": 0.135,
"grad_norm": 15.625,
"learning_rate": 1.9927573419294456e-05,
"loss": 2.8813,
"step": 270
},
{
"epoch": 0.14,
"grad_norm": 11.3125,
"learning_rate": 1.990509463238309e-05,
"loss": 2.9109,
"step": 280
},
{
"epoch": 0.145,
"grad_norm": 14.3125,
"learning_rate": 1.9879598657693894e-05,
"loss": 2.85,
"step": 290
},
{
"epoch": 0.15,
"grad_norm": 10.8125,
"learning_rate": 1.985109326154774e-05,
"loss": 2.7719,
"step": 300
},
{
"epoch": 0.155,
"grad_norm": 8.8125,
"learning_rate": 1.981958712696444e-05,
"loss": 2.4945,
"step": 310
},
{
"epoch": 0.16,
"grad_norm": 11.5,
"learning_rate": 1.9785089851017788e-05,
"loss": 2.6883,
"step": 320
},
{
"epoch": 0.165,
"grad_norm": 11.5,
"learning_rate": 1.974761194191222e-05,
"loss": 2.4984,
"step": 330
},
{
"epoch": 0.17,
"grad_norm": 10.0625,
"learning_rate": 1.970716481578191e-05,
"loss": 2.3766,
"step": 340
},
{
"epoch": 0.175,
"grad_norm": 10.0,
"learning_rate": 1.9663760793213297e-05,
"loss": 2.3,
"step": 350
},
{
"epoch": 0.18,
"grad_norm": 5.96875,
"learning_rate": 1.9617413095492114e-05,
"loss": 2.1961,
"step": 360
},
{
"epoch": 0.185,
"grad_norm": 9.9375,
"learning_rate": 1.956813584057608e-05,
"loss": 2.6688,
"step": 370
},
{
"epoch": 0.19,
"grad_norm": 6.46875,
"learning_rate": 1.9515944038794384e-05,
"loss": 2.1609,
"step": 380
},
{
"epoch": 0.195,
"grad_norm": 9.5,
"learning_rate": 1.9460853588275454e-05,
"loss": 2.2461,
"step": 390
},
{
"epoch": 0.2,
"grad_norm": 9.5,
"learning_rate": 1.940288127010419e-05,
"loss": 2.1758,
"step": 400
},
{
"epoch": 0.205,
"grad_norm": 8.1875,
"learning_rate": 1.9342044743210295e-05,
"loss": 2.0234,
"step": 410
},
{
"epoch": 0.21,
"grad_norm": 8.0,
"learning_rate": 1.92783625389892e-05,
"loss": 2.0633,
"step": 420
},
{
"epoch": 0.215,
"grad_norm": 7.84375,
"learning_rate": 1.9211854055657216e-05,
"loss": 1.9094,
"step": 430
},
{
"epoch": 0.22,
"grad_norm": 6.90625,
"learning_rate": 1.9142539552342638e-05,
"loss": 2.0172,
"step": 440
},
{
"epoch": 0.225,
"grad_norm": 6.4375,
"learning_rate": 1.907044014291465e-05,
"loss": 1.6531,
"step": 450
},
{
"epoch": 0.23,
"grad_norm": 5.5,
"learning_rate": 1.8995577789551806e-05,
"loss": 1.8789,
"step": 460
},
{
"epoch": 0.235,
"grad_norm": 8.5625,
"learning_rate": 1.8917975296052143e-05,
"loss": 1.9883,
"step": 470
},
{
"epoch": 0.24,
"grad_norm": 12.125,
"learning_rate": 1.8837656300886937e-05,
"loss": 1.8656,
"step": 480
},
{
"epoch": 0.245,
"grad_norm": 7.4375,
"learning_rate": 1.875464527000018e-05,
"loss": 1.7305,
"step": 490
},
{
"epoch": 0.25,
"grad_norm": 5.8125,
"learning_rate": 1.866896748935603e-05,
"loss": 1.8305,
"step": 500
},
{
"epoch": 0.255,
"grad_norm": 6.9375,
"learning_rate": 1.858064905723645e-05,
"loss": 1.5922,
"step": 510
},
{
"epoch": 0.26,
"grad_norm": 7.8125,
"learning_rate": 1.8489716876291417e-05,
"loss": 1.7203,
"step": 520
},
{
"epoch": 0.265,
"grad_norm": 3.65625,
"learning_rate": 1.8396198645344133e-05,
"loss": 1.4547,
"step": 530
},
{
"epoch": 0.27,
"grad_norm": 3.734375,
"learning_rate": 1.8300122850953678e-05,
"loss": 1.5602,
"step": 540
},
{
"epoch": 0.275,
"grad_norm": 4.46875,
"learning_rate": 1.8201518758737726e-05,
"loss": 1.4719,
"step": 550
},
{
"epoch": 0.28,
"grad_norm": 2.890625,
"learning_rate": 1.8100416404457962e-05,
"loss": 1.6156,
"step": 560
},
{
"epoch": 0.285,
"grad_norm": 3.125,
"learning_rate": 1.799684658487091e-05,
"loss": 1.5406,
"step": 570
},
{
"epoch": 0.29,
"grad_norm": 4.0625,
"learning_rate": 1.789084084834691e-05,
"loss": 1.4297,
"step": 580
},
{
"epoch": 0.295,
"grad_norm": 4.125,
"learning_rate": 1.778243148526021e-05,
"loss": 1.418,
"step": 590
},
{
"epoch": 0.3,
"grad_norm": 3.609375,
"learning_rate": 1.7671651518153e-05,
"loss": 1.4602,
"step": 600
},
{
"epoch": 0.305,
"grad_norm": 3.640625,
"learning_rate": 1.7558534691676396e-05,
"loss": 1.4289,
"step": 610
},
{
"epoch": 0.31,
"grad_norm": 5.0,
"learning_rate": 1.744311546231154e-05,
"loss": 1.4422,
"step": 620
},
{
"epoch": 0.315,
"grad_norm": 3.21875,
"learning_rate": 1.732542898787379e-05,
"loss": 1.3836,
"step": 630
},
{
"epoch": 0.32,
"grad_norm": 3.71875,
"learning_rate": 1.7205511116803306e-05,
"loss": 1.4781,
"step": 640
},
{
"epoch": 0.325,
"grad_norm": 3.21875,
"learning_rate": 1.708339837724529e-05,
"loss": 1.4258,
"step": 650
},
{
"epoch": 0.33,
"grad_norm": 2.796875,
"learning_rate": 1.6959127965923144e-05,
"loss": 1.2883,
"step": 660
},
{
"epoch": 0.335,
"grad_norm": 2.65625,
"learning_rate": 1.6832737736807994e-05,
"loss": 1.5047,
"step": 670
},
{
"epoch": 0.34,
"grad_norm": 2.859375,
"learning_rate": 1.6704266189587992e-05,
"loss": 1.3414,
"step": 680
},
{
"epoch": 0.345,
"grad_norm": 4.0625,
"learning_rate": 1.657375245794096e-05,
"loss": 1.3578,
"step": 690
},
{
"epoch": 0.35,
"grad_norm": 2.609375,
"learning_rate": 1.644123629761387e-05,
"loss": 1.3191,
"step": 700
},
{
"epoch": 0.355,
"grad_norm": 2.65625,
"learning_rate": 1.6306758074312866e-05,
"loss": 1.3449,
"step": 710
},
{
"epoch": 0.36,
"grad_norm": 3.53125,
"learning_rate": 1.617035875140749e-05,
"loss": 1.3891,
"step": 720
},
{
"epoch": 0.365,
"grad_norm": 2.28125,
"learning_rate": 1.6032079877452825e-05,
"loss": 1.2934,
"step": 730
},
{
"epoch": 0.37,
"grad_norm": 2.5625,
"learning_rate": 1.5891963573533424e-05,
"loss": 1.3562,
"step": 740
},
{
"epoch": 0.375,
"grad_norm": 3.015625,
"learning_rate": 1.575005252043279e-05,
"loss": 1.2852,
"step": 750
},
{
"epoch": 0.38,
"grad_norm": 2.65625,
"learning_rate": 1.560638994563242e-05,
"loss": 1.3336,
"step": 760
},
{
"epoch": 0.385,
"grad_norm": 2.15625,
"learning_rate": 1.5461019610144292e-05,
"loss": 1.2172,
"step": 770
},
{
"epoch": 0.39,
"grad_norm": 2.90625,
"learning_rate": 1.531398579518083e-05,
"loss": 1.1879,
"step": 780
},
{
"epoch": 0.395,
"grad_norm": 2.1875,
"learning_rate": 1.516533328866642e-05,
"loss": 1.2086,
"step": 790
},
{
"epoch": 0.4,
"grad_norm": 2.28125,
"learning_rate": 1.5015107371594576e-05,
"loss": 1.2586,
"step": 800
},
{
"epoch": 0.405,
"grad_norm": 2.46875,
"learning_rate": 1.4863353804234906e-05,
"loss": 1.2422,
"step": 810
},
{
"epoch": 0.41,
"grad_norm": 2.21875,
"learning_rate": 1.47101188121941e-05,
"loss": 1.2277,
"step": 820
},
{
"epoch": 0.415,
"grad_norm": 2.890625,
"learning_rate": 1.4555449072335157e-05,
"loss": 1.257,
"step": 830
},
{
"epoch": 0.42,
"grad_norm": 2.734375,
"learning_rate": 1.4399391698559153e-05,
"loss": 1.2312,
"step": 840
},
{
"epoch": 0.425,
"grad_norm": 3.390625,
"learning_rate": 1.4241994227453902e-05,
"loss": 1.266,
"step": 850
},
{
"epoch": 0.43,
"grad_norm": 2.375,
"learning_rate": 1.408330460381385e-05,
"loss": 1.2711,
"step": 860
},
{
"epoch": 0.435,
"grad_norm": 2.109375,
"learning_rate": 1.3923371166035615e-05,
"loss": 1.2488,
"step": 870
},
{
"epoch": 0.44,
"grad_norm": 1.9140625,
"learning_rate": 1.3762242631393656e-05,
"loss": 1.3,
"step": 880
},
{
"epoch": 0.445,
"grad_norm": 3.109375,
"learning_rate": 1.3599968081200515e-05,
"loss": 1.2879,
"step": 890
},
{
"epoch": 0.45,
"grad_norm": 4.21875,
"learning_rate": 1.3436596945856164e-05,
"loss": 1.1906,
"step": 900
},
{
"epoch": 0.455,
"grad_norm": 2.640625,
"learning_rate": 1.327217898979104e-05,
"loss": 1.1715,
"step": 910
},
{
"epoch": 0.46,
"grad_norm": 3.203125,
"learning_rate": 1.310676429630732e-05,
"loss": 1.2203,
"step": 920
},
{
"epoch": 0.465,
"grad_norm": 2.609375,
"learning_rate": 1.294040325232304e-05,
"loss": 1.102,
"step": 930
},
{
"epoch": 0.47,
"grad_norm": 2.796875,
"learning_rate": 1.2773146533023782e-05,
"loss": 1.2973,
"step": 940
},
{
"epoch": 0.475,
"grad_norm": 2.9375,
"learning_rate": 1.2605045086426487e-05,
"loss": 1.2621,
"step": 950
},
{
"epoch": 0.48,
"grad_norm": 2.265625,
"learning_rate": 1.2436150117860226e-05,
"loss": 1.2133,
"step": 960
},
{
"epoch": 0.485,
"grad_norm": 3.078125,
"learning_rate": 1.2266513074368552e-05,
"loss": 1.3336,
"step": 970
},
{
"epoch": 0.49,
"grad_norm": 2.453125,
"learning_rate": 1.2096185629038219e-05,
"loss": 1.1523,
"step": 980
},
{
"epoch": 0.495,
"grad_norm": 2.28125,
"learning_rate": 1.1925219665259076e-05,
"loss": 1.2375,
"step": 990
},
{
"epoch": 0.5,
"grad_norm": 2.75,
"learning_rate": 1.1753667260919872e-05,
"loss": 1.2437,
"step": 1000
},
{
"epoch": 0.505,
"grad_norm": 2.46875,
"learning_rate": 1.1581580672544839e-05,
"loss": 1.2043,
"step": 1010
},
{
"epoch": 0.51,
"grad_norm": 2.015625,
"learning_rate": 1.1409012319375828e-05,
"loss": 1.1527,
"step": 1020
},
{
"epoch": 0.515,
"grad_norm": 2.515625,
"learning_rate": 1.1236014767404929e-05,
"loss": 1.2484,
"step": 1030
},
{
"epoch": 0.52,
"grad_norm": 2.015625,
"learning_rate": 1.1062640713362333e-05,
"loss": 1.1777,
"step": 1040
},
{
"epoch": 0.525,
"grad_norm": 2.421875,
"learning_rate": 1.0888942968664417e-05,
"loss": 1.1758,
"step": 1050
},
{
"epoch": 0.53,
"grad_norm": 2.171875,
"learning_rate": 1.071497444332686e-05,
"loss": 1.1059,
"step": 1060
},
{
"epoch": 0.535,
"grad_norm": 2.96875,
"learning_rate": 1.0540788129847757e-05,
"loss": 1.1984,
"step": 1070
},
{
"epoch": 0.54,
"grad_norm": 3.1875,
"learning_rate": 1.0366437087065564e-05,
"loss": 1.2984,
"step": 1080
},
{
"epoch": 0.545,
"grad_norm": 2.34375,
"learning_rate": 1.01919744239969e-05,
"loss": 1.1457,
"step": 1090
},
{
"epoch": 0.55,
"grad_norm": 3.5,
"learning_rate": 1.0017453283658984e-05,
"loss": 1.2277,
"step": 1100
},
{
"epoch": 0.555,
"grad_norm": 2.21875,
"learning_rate": 9.842926826881796e-06,
"loss": 1.1781,
"step": 1110
},
{
"epoch": 0.56,
"grad_norm": 1.9375,
"learning_rate": 9.668448216114739e-06,
"loss": 1.1254,
"step": 1120
},
{
"epoch": 0.565,
"grad_norm": 2.578125,
"learning_rate": 9.494070599232868e-06,
"loss": 1.134,
"step": 1130
},
{
"epoch": 0.57,
"grad_norm": 2.25,
"learning_rate": 9.319847093347522e-06,
"loss": 1.1543,
"step": 1140
},
{
"epoch": 0.575,
"grad_norm": 2.234375,
"learning_rate": 9.145830768626326e-06,
"loss": 1.2086,
"step": 1150
},
{
"epoch": 0.58,
"grad_norm": 2.234375,
"learning_rate": 8.972074632127533e-06,
"loss": 1.0461,
"step": 1160
},
{
"epoch": 0.585,
"grad_norm": 2.28125,
"learning_rate": 8.79863161165353e-06,
"loss": 1.1824,
"step": 1170
},
{
"epoch": 0.59,
"grad_norm": 2.8125,
"learning_rate": 8.625554539628536e-06,
"loss": 1.1781,
"step": 1180
},
{
"epoch": 0.595,
"grad_norm": 3.421875,
"learning_rate": 8.452896137005322e-06,
"loss": 1.2773,
"step": 1190
},
{
"epoch": 0.6,
"grad_norm": 3.234375,
"learning_rate": 8.280708997205904e-06,
"loss": 1.1824,
"step": 1200
},
{
"epoch": 0.605,
"grad_norm": 2.328125,
"learning_rate": 8.109045570101086e-06,
"loss": 1.2672,
"step": 1210
},
{
"epoch": 0.61,
"grad_norm": 4.40625,
"learning_rate": 7.937958146033706e-06,
"loss": 1.1289,
"step": 1220
},
{
"epoch": 0.615,
"grad_norm": 2.5,
"learning_rate": 7.767498839890489e-06,
"loss": 1.1691,
"step": 1230
},
{
"epoch": 0.62,
"grad_norm": 2.5,
"learning_rate": 7.597719575227364e-06,
"loss": 1.0816,
"step": 1240
},
{
"epoch": 0.625,
"grad_norm": 1.9296875,
"learning_rate": 7.428672068453041e-06,
"loss": 1.0848,
"step": 1250
},
{
"epoch": 0.63,
"grad_norm": 2.28125,
"learning_rate": 7.260407813075676e-06,
"loss": 1.025,
"step": 1260
},
{
"epoch": 0.635,
"grad_norm": 3.0,
"learning_rate": 7.092978064017475e-06,
"loss": 1.2508,
"step": 1270
},
{
"epoch": 0.64,
"grad_norm": 2.453125,
"learning_rate": 6.92643382200193e-06,
"loss": 1.1684,
"step": 1280
},
{
"epoch": 0.645,
"grad_norm": 2.1875,
"learning_rate": 6.7608258180185085e-06,
"loss": 1.1547,
"step": 1290
},
{
"epoch": 0.65,
"grad_norm": 3.171875,
"learning_rate": 6.596204497869501e-06,
"loss": 1.0406,
"step": 1300
},
{
"epoch": 0.655,
"grad_norm": 2.03125,
"learning_rate": 6.432620006803747e-06,
"loss": 1.1742,
"step": 1310
},
{
"epoch": 0.66,
"grad_norm": 2.609375,
"learning_rate": 6.2701221742419106e-06,
"loss": 1.1414,
"step": 1320
},
{
"epoch": 0.665,
"grad_norm": 2.453125,
"learning_rate": 6.108760498597939e-06,
"loss": 1.1387,
"step": 1330
},
{
"epoch": 0.67,
"grad_norm": 3.28125,
"learning_rate": 5.948584132201376e-06,
"loss": 1.1391,
"step": 1340
},
{
"epoch": 0.675,
"grad_norm": 3.046875,
"learning_rate": 5.789641866325091e-06,
"loss": 1.1691,
"step": 1350
},
{
"epoch": 0.68,
"grad_norm": 2.15625,
"learning_rate": 5.631982116322981e-06,
"loss": 1.1891,
"step": 1360
},
{
"epoch": 0.685,
"grad_norm": 2.1875,
"learning_rate": 5.475652906882173e-06,
"loss": 1.0824,
"step": 1370
},
{
"epoch": 0.69,
"grad_norm": 2.875,
"learning_rate": 5.3207018573942684e-06,
"loss": 1.2012,
"step": 1380
},
{
"epoch": 0.695,
"grad_norm": 2.734375,
"learning_rate": 5.167176167449977e-06,
"loss": 1.1844,
"step": 1390
},
{
"epoch": 0.7,
"grad_norm": 2.59375,
"learning_rate": 5.015122602461698e-06,
"loss": 1.0848,
"step": 1400
},
{
"epoch": 0.705,
"grad_norm": 2.65625,
"learning_rate": 4.864587479418302e-06,
"loss": 1.1562,
"step": 1410
},
{
"epoch": 0.71,
"grad_norm": 2.5625,
"learning_rate": 4.71561665277653e-06,
"loss": 1.0996,
"step": 1420
},
{
"epoch": 0.715,
"grad_norm": 2.09375,
"learning_rate": 4.568255500493292e-06,
"loss": 1.1344,
"step": 1430
},
{
"epoch": 0.72,
"grad_norm": 2.359375,
"learning_rate": 4.422548910203099e-06,
"loss": 1.1488,
"step": 1440
},
{
"epoch": 0.725,
"grad_norm": 3.140625,
"learning_rate": 4.27854126554484e-06,
"loss": 1.1176,
"step": 1450
},
{
"epoch": 0.73,
"grad_norm": 2.546875,
"learning_rate": 4.136276432642107e-06,
"loss": 1.1594,
"step": 1460
},
{
"epoch": 0.735,
"grad_norm": 2.515625,
"learning_rate": 3.9957977467411615e-06,
"loss": 1.073,
"step": 1470
},
{
"epoch": 0.74,
"grad_norm": 1.953125,
"learning_rate": 3.857147999010568e-06,
"loss": 1.1074,
"step": 1480
},
{
"epoch": 0.745,
"grad_norm": 2.0,
"learning_rate": 3.7203694235066224e-06,
"loss": 1.0355,
"step": 1490
},
{
"epoch": 0.75,
"grad_norm": 2.515625,
"learning_rate": 3.5855036843084213e-06,
"loss": 1.0801,
"step": 1500
},
{
"epoch": 0.755,
"grad_norm": 2.421875,
"learning_rate": 3.452591862826603e-06,
"loss": 1.0871,
"step": 1510
},
{
"epoch": 0.76,
"grad_norm": 2.390625,
"learning_rate": 3.3216744452895356e-06,
"loss": 1.1047,
"step": 1520
},
{
"epoch": 0.765,
"grad_norm": 2.28125,
"learning_rate": 3.192791310410822e-06,
"loss": 1.068,
"step": 1530
},
{
"epoch": 0.77,
"grad_norm": 2.4375,
"learning_rate": 3.0659817172418694e-06,
"loss": 1.0977,
"step": 1540
},
{
"epoch": 0.775,
"grad_norm": 1.96875,
"learning_rate": 2.9412842932131904e-06,
"loss": 1.1402,
"step": 1550
},
{
"epoch": 0.78,
"grad_norm": 3.375,
"learning_rate": 2.8187370223681134e-06,
"loss": 1.1152,
"step": 1560
},
{
"epoch": 0.785,
"grad_norm": 2.390625,
"learning_rate": 2.698377233792476e-06,
"loss": 1.1219,
"step": 1570
},
{
"epoch": 0.79,
"grad_norm": 1.9375,
"learning_rate": 2.5802415902438373e-06,
"loss": 1.1023,
"step": 1580
},
{
"epoch": 0.795,
"grad_norm": 3.21875,
"learning_rate": 2.464366076983623e-06,
"loss": 1.2223,
"step": 1590
},
{
"epoch": 0.8,
"grad_norm": 1.9453125,
"learning_rate": 2.3507859908156828e-06,
"loss": 1.1047,
"step": 1600
},
{
"epoch": 0.805,
"grad_norm": 2.125,
"learning_rate": 2.2395359293345396e-06,
"loss": 1.1035,
"step": 1610
},
{
"epoch": 0.81,
"grad_norm": 2.328125,
"learning_rate": 2.130649780386628e-06,
"loss": 1.1062,
"step": 1620
},
{
"epoch": 0.815,
"grad_norm": 1.8359375,
"learning_rate": 2.024160711747717e-06,
"loss": 1.102,
"step": 1630
},
{
"epoch": 0.82,
"grad_norm": 2.390625,
"learning_rate": 1.9201011610196972e-06,
"loss": 1.027,
"step": 1640
},
{
"epoch": 0.825,
"grad_norm": 2.921875,
"learning_rate": 1.818502825749764e-06,
"loss": 1.1062,
"step": 1650
},
{
"epoch": 0.83,
"grad_norm": 1.984375,
"learning_rate": 1.7193966537750561e-06,
"loss": 1.1707,
"step": 1660
},
{
"epoch": 0.835,
"grad_norm": 2.046875,
"learning_rate": 1.6228128337956128e-06,
"loss": 1.1461,
"step": 1670
},
{
"epoch": 0.84,
"grad_norm": 3.234375,
"learning_rate": 1.5287807861786308e-06,
"loss": 1.2219,
"step": 1680
},
{
"epoch": 0.845,
"grad_norm": 2.46875,
"learning_rate": 1.4373291539967182e-06,
"loss": 1.0902,
"step": 1690
},
{
"epoch": 0.85,
"grad_norm": 2.28125,
"learning_rate": 1.3484857943029572e-06,
"loss": 1.1367,
"step": 1700
},
{
"epoch": 0.855,
"grad_norm": 2.640625,
"learning_rate": 1.2622777696453482e-06,
"loss": 1.1355,
"step": 1710
},
{
"epoch": 0.86,
"grad_norm": 3.0,
"learning_rate": 1.1787313398233235e-06,
"loss": 1.1258,
"step": 1720
},
{
"epoch": 0.865,
"grad_norm": 2.171875,
"learning_rate": 1.097871953888735e-06,
"loss": 1.0762,
"step": 1730
},
{
"epoch": 0.87,
"grad_norm": 2.0625,
"learning_rate": 1.0197242423938447e-06,
"loss": 1.141,
"step": 1740
},
{
"epoch": 0.875,
"grad_norm": 2.484375,
"learning_rate": 9.44312009888606e-07,
"loss": 1.1539,
"step": 1750
},
{
"epoch": 0.88,
"grad_norm": 2.0625,
"learning_rate": 8.716582276695729e-07,
"loss": 1.0684,
"step": 1760
},
{
"epoch": 0.885,
"grad_norm": 2.546875,
"learning_rate": 8.017850267826233e-07,
"loss": 1.0707,
"step": 1770
},
{
"epoch": 0.89,
"grad_norm": 2.078125,
"learning_rate": 7.347136912816277e-07,
"loss": 1.0586,
"step": 1780
},
{
"epoch": 0.895,
"grad_norm": 2.625,
"learning_rate": 6.704646517451108e-07,
"loss": 1.2242,
"step": 1790
},
{
"epoch": 0.9,
"grad_norm": 1.9765625,
"learning_rate": 6.090574790529091e-07,
"loss": 1.0977,
"step": 1800
},
{
"epoch": 0.905,
"grad_norm": 2.59375,
"learning_rate": 5.505108784246926e-07,
"loss": 1.134,
"step": 1810
},
{
"epoch": 0.91,
"grad_norm": 2.6875,
"learning_rate": 4.948426837221632e-07,
"loss": 1.1621,
"step": 1820
},
{
"epoch": 0.915,
"grad_norm": 2.328125,
"learning_rate": 4.420698520166988e-07,
"loss": 1.0797,
"step": 1830
},
{
"epoch": 0.92,
"grad_norm": 1.8046875,
"learning_rate": 3.922084584240582e-07,
"loss": 1.0406,
"step": 1840
},
{
"epoch": 0.925,
"grad_norm": 2.921875,
"learning_rate": 3.4527369120775036e-07,
"loss": 1.1031,
"step": 1850
},
{
"epoch": 0.93,
"grad_norm": 2.3125,
"learning_rate": 3.0127984715253246e-07,
"loss": 1.1258,
"step": 1860
},
{
"epoch": 0.935,
"grad_norm": 2.0625,
"learning_rate": 2.6024032720948446e-07,
"loss": 1.0246,
"step": 1870
},
{
"epoch": 0.94,
"grad_norm": 2.90625,
"learning_rate": 2.221676324139377e-07,
"loss": 1.1293,
"step": 1880
},
{
"epoch": 0.945,
"grad_norm": 2.21875,
"learning_rate": 1.8707336007754873e-07,
"loss": 1.0801,
"step": 1890
},
{
"epoch": 0.95,
"grad_norm": 3.171875,
"learning_rate": 1.549682002556341e-07,
"loss": 1.2441,
"step": 1900
},
{
"epoch": 0.955,
"grad_norm": 2.453125,
"learning_rate": 1.2586193249088607e-07,
"loss": 1.1301,
"step": 1910
},
{
"epoch": 0.96,
"grad_norm": 3.296875,
"learning_rate": 9.976342283442464e-08,
"loss": 1.243,
"step": 1920
},
{
"epoch": 0.965,
"grad_norm": 2.578125,
"learning_rate": 7.66806211451132e-08,
"loss": 1.1043,
"step": 1930
},
{
"epoch": 0.97,
"grad_norm": 2.09375,
"learning_rate": 5.662055866795357e-08,
"loss": 1.148,
"step": 1940
},
{
"epoch": 0.975,
"grad_norm": 2.015625,
"learning_rate": 3.9589345892304673e-08,
"loss": 1.0168,
"step": 1950
},
{
"epoch": 0.98,
"grad_norm": 2.046875,
"learning_rate": 2.5592170690560415e-08,
"loss": 1.0754,
"step": 1960
},
{
"epoch": 0.985,
"grad_norm": 2.21875,
"learning_rate": 1.4633296737882607e-08,
"loss": 1.0441,
"step": 1970
},
{
"epoch": 0.99,
"grad_norm": 2.25,
"learning_rate": 6.716062213437679e-09,
"loss": 1.1172,
"step": 1980
},
{
"epoch": 0.995,
"grad_norm": 2.140625,
"learning_rate": 1.8428787835578222e-09,
"loss": 1.1742,
"step": 1990
},
{
"epoch": 1.0,
"grad_norm": 2.515625,
"learning_rate": 1.5230867123072757e-11,
"loss": 1.123,
"step": 2000
}
],
"logging_steps": 10,
"max_steps": 2000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.5153945731072e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}