JW17's picture
Add files using upload-large-folder tool
020138c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"Batch Mean": 0.8096684217453003,
"accuracy": 0.625,
"epoch": 0,
"step": 0
},
{
"epoch": 0.0025,
"grad_norm": 9.15977954864502,
"learning_rate": 2.5000000000000004e-07,
"loss": 0.6694,
"step": 1
},
{
"Batch Mean": 0.8517913818359375,
"accuracy": 0.5234375,
"epoch": 0.0025,
"step": 1
},
{
"epoch": 0.005,
"grad_norm": 9.550761222839355,
"learning_rate": 5.000000000000001e-07,
"loss": 0.7001,
"step": 2
},
{
"Batch Mean": 0.8251190185546875,
"accuracy": 0.4921875,
"epoch": 0.005,
"step": 2
},
{
"epoch": 0.0075,
"grad_norm": 10.096807479858398,
"learning_rate": 7.5e-07,
"loss": 0.7277,
"step": 3
},
{
"Batch Mean": 0.8156070709228516,
"accuracy": 0.515625,
"epoch": 0.0075,
"step": 3
},
{
"epoch": 0.01,
"grad_norm": 10.845061302185059,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.7116,
"step": 4
},
{
"Batch Mean": 0.772618293762207,
"accuracy": 0.53125,
"epoch": 0.01,
"step": 4
},
{
"epoch": 0.0125,
"grad_norm": 10.683115005493164,
"learning_rate": 1.25e-06,
"loss": 0.7074,
"step": 5
},
{
"Batch Mean": 0.7320594787597656,
"accuracy": 0.5390625,
"epoch": 0.0125,
"step": 5
},
{
"epoch": 0.015,
"grad_norm": 9.102219581604004,
"learning_rate": 1.5e-06,
"loss": 0.6964,
"step": 6
},
{
"Batch Mean": 0.639228105545044,
"accuracy": 0.5703125,
"epoch": 0.015,
"step": 6
},
{
"epoch": 0.0175,
"grad_norm": 10.126181602478027,
"learning_rate": 1.75e-06,
"loss": 0.6981,
"step": 7
},
{
"Batch Mean": 0.28215163946151733,
"accuracy": 0.6015625,
"epoch": 0.0175,
"step": 7
},
{
"epoch": 0.02,
"grad_norm": 6.3671040534973145,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.6668,
"step": 8
},
{
"Batch Mean": 0.10402120649814606,
"accuracy": 0.5234375,
"epoch": 0.02,
"step": 8
},
{
"epoch": 0.0225,
"grad_norm": 6.053694248199463,
"learning_rate": 2.25e-06,
"loss": 0.6907,
"step": 9
},
{
"Batch Mean": -0.794627845287323,
"accuracy": 0.5234375,
"epoch": 0.0225,
"step": 9
},
{
"epoch": 0.025,
"grad_norm": 9.284210205078125,
"learning_rate": 2.5e-06,
"loss": 0.6879,
"step": 10
},
{
"Batch Mean": -1.1373445987701416,
"accuracy": 0.59375,
"epoch": 0.025,
"step": 10
},
{
"epoch": 0.0275,
"grad_norm": 13.110421180725098,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.6954,
"step": 11
},
{
"Batch Mean": -1.2041501998901367,
"accuracy": 0.59375,
"epoch": 0.0275,
"step": 11
},
{
"epoch": 0.03,
"grad_norm": 13.619694709777832,
"learning_rate": 3e-06,
"loss": 0.7004,
"step": 12
},
{
"Batch Mean": -1.1390533447265625,
"accuracy": 0.65625,
"epoch": 0.03,
"step": 12
},
{
"epoch": 0.0325,
"grad_norm": 16.698774337768555,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.6691,
"step": 13
},
{
"Batch Mean": -1.2006721496582031,
"accuracy": 0.6953125,
"epoch": 0.0325,
"step": 13
},
{
"epoch": 0.035,
"grad_norm": 13.031152725219727,
"learning_rate": 3.5e-06,
"loss": 0.6749,
"step": 14
},
{
"Batch Mean": -1.1018962860107422,
"accuracy": 0.6484375,
"epoch": 0.035,
"step": 14
},
{
"epoch": 0.0375,
"grad_norm": 11.913775444030762,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.6517,
"step": 15
},
{
"Batch Mean": -0.8371965289115906,
"accuracy": 0.65625,
"epoch": 0.0375,
"step": 15
},
{
"epoch": 0.04,
"grad_norm": 9.776491165161133,
"learning_rate": 4.000000000000001e-06,
"loss": 0.6435,
"step": 16
},
{
"Batch Mean": -0.25420451164245605,
"accuracy": 0.65625,
"epoch": 0.04,
"step": 16
},
{
"epoch": 0.0425,
"grad_norm": 5.094934940338135,
"learning_rate": 4.25e-06,
"loss": 0.6352,
"step": 17
},
{
"Batch Mean": 0.45112472772598267,
"accuracy": 0.625,
"epoch": 0.0425,
"step": 17
},
{
"epoch": 0.045,
"grad_norm": 6.203393459320068,
"learning_rate": 4.5e-06,
"loss": 0.6305,
"step": 18
},
{
"Batch Mean": 1.0782840251922607,
"accuracy": 0.640625,
"epoch": 0.045,
"step": 18
},
{
"epoch": 0.0475,
"grad_norm": 11.932191848754883,
"learning_rate": 4.75e-06,
"loss": 0.6401,
"step": 19
},
{
"Batch Mean": 1.6000666618347168,
"accuracy": 0.65625,
"epoch": 0.0475,
"step": 19
},
{
"epoch": 0.05,
"grad_norm": 18.466459274291992,
"learning_rate": 5e-06,
"loss": 0.6928,
"step": 20
},
{
"Batch Mean": 1.6292922496795654,
"accuracy": 0.65625,
"epoch": 0.05,
"step": 20
},
{
"epoch": 0.0525,
"grad_norm": 18.958446502685547,
"learning_rate": 4.986842105263158e-06,
"loss": 0.6456,
"step": 21
},
{
"Batch Mean": 1.0992889404296875,
"accuracy": 0.7109375,
"epoch": 0.0525,
"step": 21
},
{
"epoch": 0.055,
"grad_norm": 12.98255443572998,
"learning_rate": 4.973684210526316e-06,
"loss": 0.606,
"step": 22
},
{
"Batch Mean": 0.33066248893737793,
"accuracy": 0.703125,
"epoch": 0.055,
"step": 22
},
{
"epoch": 0.0575,
"grad_norm": 6.1022725105285645,
"learning_rate": 4.960526315789474e-06,
"loss": 0.5807,
"step": 23
},
{
"Batch Mean": -0.7870486974716187,
"accuracy": 0.7109375,
"epoch": 0.0575,
"step": 23
},
{
"epoch": 0.06,
"grad_norm": 9.311738014221191,
"learning_rate": 4.947368421052632e-06,
"loss": 0.5331,
"step": 24
},
{
"Batch Mean": -1.2838190793991089,
"accuracy": 0.6484375,
"epoch": 0.06,
"step": 24
},
{
"epoch": 0.0625,
"grad_norm": 13.955443382263184,
"learning_rate": 4.9342105263157895e-06,
"loss": 0.6186,
"step": 25
},
{
"Batch Mean": -1.324391484260559,
"accuracy": 0.6953125,
"epoch": 0.0625,
"step": 25
},
{
"epoch": 0.065,
"grad_norm": 14.612135887145996,
"learning_rate": 4.921052631578948e-06,
"loss": 0.5325,
"step": 26
},
{
"Batch Mean": -0.7885305881500244,
"accuracy": 0.7109375,
"epoch": 0.065,
"step": 26
},
{
"epoch": 0.0675,
"grad_norm": 9.373554229736328,
"learning_rate": 4.907894736842106e-06,
"loss": 0.5549,
"step": 27
},
{
"Batch Mean": -0.25717926025390625,
"accuracy": 0.6953125,
"epoch": 0.0675,
"step": 27
},
{
"epoch": 0.07,
"grad_norm": 5.559288501739502,
"learning_rate": 4.894736842105264e-06,
"loss": 0.572,
"step": 28
},
{
"Batch Mean": 0.7993221282958984,
"accuracy": 0.6171875,
"epoch": 0.07,
"step": 28
},
{
"epoch": 0.0725,
"grad_norm": 10.778632164001465,
"learning_rate": 4.881578947368422e-06,
"loss": 0.6806,
"step": 29
},
{
"Batch Mean": 1.2803305387496948,
"accuracy": 0.6796875,
"epoch": 0.0725,
"step": 29
},
{
"epoch": 0.075,
"grad_norm": 14.314054489135742,
"learning_rate": 4.8684210526315795e-06,
"loss": 0.641,
"step": 30
},
{
"Batch Mean": 1.1847546100616455,
"accuracy": 0.7890625,
"epoch": 0.075,
"step": 30
},
{
"epoch": 0.0775,
"grad_norm": 12.86508846282959,
"learning_rate": 4.855263157894737e-06,
"loss": 0.5118,
"step": 31
},
{
"Batch Mean": 0.7971279621124268,
"accuracy": 0.734375,
"epoch": 0.0775,
"step": 31
},
{
"epoch": 0.08,
"grad_norm": 8.987775802612305,
"learning_rate": 4.842105263157895e-06,
"loss": 0.5508,
"step": 32
},
{
"Batch Mean": -0.12238264083862305,
"accuracy": 0.671875,
"epoch": 0.08,
"step": 32
},
{
"epoch": 0.0825,
"grad_norm": 4.265674591064453,
"learning_rate": 4.828947368421053e-06,
"loss": 0.5533,
"step": 33
},
{
"Batch Mean": -0.7733749151229858,
"accuracy": 0.7265625,
"epoch": 0.0825,
"step": 33
},
{
"epoch": 0.085,
"grad_norm": 9.074788093566895,
"learning_rate": 4.815789473684211e-06,
"loss": 0.5455,
"step": 34
},
{
"Batch Mean": -0.9033082723617554,
"accuracy": 0.7578125,
"epoch": 0.085,
"step": 34
},
{
"epoch": 0.0875,
"grad_norm": 10.331607818603516,
"learning_rate": 4.802631578947369e-06,
"loss": 0.5539,
"step": 35
},
{
"Batch Mean": -0.5780456066131592,
"accuracy": 0.7265625,
"epoch": 0.0875,
"step": 35
},
{
"epoch": 0.09,
"grad_norm": 7.254377365112305,
"learning_rate": 4.789473684210527e-06,
"loss": 0.547,
"step": 36
},
{
"Batch Mean": 0.09664157032966614,
"accuracy": 0.6953125,
"epoch": 0.09,
"step": 36
},
{
"epoch": 0.0925,
"grad_norm": 3.9853391647338867,
"learning_rate": 4.7763157894736844e-06,
"loss": 0.5451,
"step": 37
},
{
"Batch Mean": 0.5406360626220703,
"accuracy": 0.7265625,
"epoch": 0.0925,
"step": 37
},
{
"epoch": 0.095,
"grad_norm": 7.308631896972656,
"learning_rate": 4.763157894736842e-06,
"loss": 0.5313,
"step": 38
},
{
"Batch Mean": 0.6023058891296387,
"accuracy": 0.7890625,
"epoch": 0.095,
"step": 38
},
{
"epoch": 0.0975,
"grad_norm": 7.4278717041015625,
"learning_rate": 4.75e-06,
"loss": 0.5145,
"step": 39
},
{
"Batch Mean": 0.23680943250656128,
"accuracy": 0.7890625,
"epoch": 0.0975,
"step": 39
},
{
"epoch": 0.1,
"grad_norm": 5.057112693786621,
"learning_rate": 4.736842105263158e-06,
"loss": 0.4793,
"step": 40
},
{
"Batch Mean": -0.2819175720214844,
"accuracy": 0.75,
"epoch": 0.1,
"step": 40
},
{
"epoch": 0.1025,
"grad_norm": 5.986076831817627,
"learning_rate": 4.723684210526316e-06,
"loss": 0.4989,
"step": 41
},
{
"Batch Mean": -0.2952081263065338,
"accuracy": 0.765625,
"epoch": 0.1025,
"step": 41
},
{
"epoch": 0.105,
"grad_norm": 6.123879432678223,
"learning_rate": 4.710526315789474e-06,
"loss": 0.4959,
"step": 42
},
{
"Batch Mean": -0.5327777862548828,
"accuracy": 0.8359375,
"epoch": 0.105,
"step": 42
},
{
"epoch": 0.1075,
"grad_norm": 7.773658752441406,
"learning_rate": 4.697368421052632e-06,
"loss": 0.4501,
"step": 43
},
{
"Batch Mean": -0.00808095932006836,
"accuracy": 0.7265625,
"epoch": 0.1075,
"step": 43
},
{
"epoch": 0.11,
"grad_norm": 6.165060520172119,
"learning_rate": 4.68421052631579e-06,
"loss": 0.4952,
"step": 44
},
{
"Batch Mean": 0.23638486862182617,
"accuracy": 0.7109375,
"epoch": 0.11,
"step": 44
},
{
"epoch": 0.1125,
"grad_norm": 6.963595390319824,
"learning_rate": 4.671052631578948e-06,
"loss": 0.5261,
"step": 45
},
{
"Batch Mean": 0.07831740379333496,
"accuracy": 0.8125,
"epoch": 0.1125,
"step": 45
},
{
"epoch": 0.115,
"grad_norm": 5.393492221832275,
"learning_rate": 4.657894736842106e-06,
"loss": 0.405,
"step": 46
},
{
"Batch Mean": 0.10195636749267578,
"accuracy": 0.75,
"epoch": 0.115,
"step": 46
},
{
"epoch": 0.1175,
"grad_norm": 9.352888107299805,
"learning_rate": 4.6447368421052635e-06,
"loss": 0.5726,
"step": 47
},
{
"Batch Mean": 0.4460281729698181,
"accuracy": 0.7578125,
"epoch": 0.1175,
"step": 47
},
{
"epoch": 0.12,
"grad_norm": 9.130428314208984,
"learning_rate": 4.631578947368421e-06,
"loss": 0.5347,
"step": 48
},
{
"Batch Mean": -0.2610776424407959,
"accuracy": 0.796875,
"epoch": 0.12,
"step": 48
},
{
"epoch": 0.1225,
"grad_norm": 6.863037586212158,
"learning_rate": 4.618421052631579e-06,
"loss": 0.4456,
"step": 49
},
{
"Batch Mean": -0.19384944438934326,
"accuracy": 0.6328125,
"epoch": 0.1225,
"step": 49
},
{
"epoch": 0.125,
"grad_norm": 6.617422103881836,
"learning_rate": 4.605263157894737e-06,
"loss": 0.5905,
"step": 50
},
{
"Batch Mean": -0.4514150619506836,
"accuracy": 0.7890625,
"epoch": 0.125,
"step": 50
},
{
"epoch": 0.1275,
"grad_norm": 7.564980506896973,
"learning_rate": 4.592105263157895e-06,
"loss": 0.4885,
"step": 51
},
{
"Batch Mean": -0.003515481948852539,
"accuracy": 0.734375,
"epoch": 0.1275,
"step": 51
},
{
"epoch": 0.13,
"grad_norm": 5.258878707885742,
"learning_rate": 4.578947368421053e-06,
"loss": 0.5032,
"step": 52
},
{
"Batch Mean": -0.10907495021820068,
"accuracy": 0.71875,
"epoch": 0.13,
"step": 52
},
{
"epoch": 0.1325,
"grad_norm": 5.931639194488525,
"learning_rate": 4.565789473684211e-06,
"loss": 0.542,
"step": 53
},
{
"Batch Mean": 0.46248769760131836,
"accuracy": 0.7890625,
"epoch": 0.1325,
"step": 53
},
{
"epoch": 0.135,
"grad_norm": 7.1796112060546875,
"learning_rate": 4.552631578947369e-06,
"loss": 0.4685,
"step": 54
},
{
"Batch Mean": 0.1234641969203949,
"accuracy": 0.7265625,
"epoch": 0.135,
"step": 54
},
{
"epoch": 0.1375,
"grad_norm": 6.273733139038086,
"learning_rate": 4.539473684210527e-06,
"loss": 0.4989,
"step": 55
},
{
"Batch Mean": -0.15052831172943115,
"accuracy": 0.78125,
"epoch": 0.1375,
"step": 55
},
{
"epoch": 0.14,
"grad_norm": 5.630648612976074,
"learning_rate": 4.526315789473685e-06,
"loss": 0.4518,
"step": 56
},
{
"Batch Mean": 0.10203218460083008,
"accuracy": 0.75,
"epoch": 0.14,
"step": 56
},
{
"epoch": 0.1425,
"grad_norm": 5.465209484100342,
"learning_rate": 4.513157894736843e-06,
"loss": 0.4775,
"step": 57
},
{
"Batch Mean": -0.2146952748298645,
"accuracy": 0.7578125,
"epoch": 0.1425,
"step": 57
},
{
"epoch": 0.145,
"grad_norm": 5.516883373260498,
"learning_rate": 4.5e-06,
"loss": 0.4672,
"step": 58
},
{
"Batch Mean": 0.39426422119140625,
"accuracy": 0.7734375,
"epoch": 0.145,
"step": 58
},
{
"epoch": 0.1475,
"grad_norm": 6.986379146575928,
"learning_rate": 4.4868421052631584e-06,
"loss": 0.4851,
"step": 59
},
{
"Batch Mean": 0.1964409053325653,
"accuracy": 0.75,
"epoch": 0.1475,
"step": 59
},
{
"epoch": 0.15,
"grad_norm": 6.413231372833252,
"learning_rate": 4.473684210526316e-06,
"loss": 0.5073,
"step": 60
},
{
"Batch Mean": -0.21541327238082886,
"accuracy": 0.7890625,
"epoch": 0.15,
"step": 60
},
{
"epoch": 0.1525,
"grad_norm": 5.495061874389648,
"learning_rate": 4.460526315789474e-06,
"loss": 0.4751,
"step": 61
},
{
"Batch Mean": -0.3059917688369751,
"accuracy": 0.7890625,
"epoch": 0.1525,
"step": 61
},
{
"epoch": 0.155,
"grad_norm": 6.379850387573242,
"learning_rate": 4.447368421052632e-06,
"loss": 0.4679,
"step": 62
},
{
"Batch Mean": -0.5860270261764526,
"accuracy": 0.8046875,
"epoch": 0.155,
"step": 62
},
{
"epoch": 0.1575,
"grad_norm": 8.42182731628418,
"learning_rate": 4.43421052631579e-06,
"loss": 0.4152,
"step": 63
},
{
"Batch Mean": 0.14216375350952148,
"accuracy": 0.7578125,
"epoch": 0.1575,
"step": 63
},
{
"epoch": 0.16,
"grad_norm": 6.759493827819824,
"learning_rate": 4.4210526315789476e-06,
"loss": 0.4852,
"step": 64
},
{
"Batch Mean": 0.3515692353248596,
"accuracy": 0.796875,
"epoch": 0.16,
"step": 64
},
{
"epoch": 0.1625,
"grad_norm": 7.567401885986328,
"learning_rate": 4.407894736842105e-06,
"loss": 0.4449,
"step": 65
},
{
"Batch Mean": 0.281324565410614,
"accuracy": 0.765625,
"epoch": 0.1625,
"step": 65
},
{
"epoch": 0.165,
"grad_norm": 7.912868022918701,
"learning_rate": 4.394736842105263e-06,
"loss": 0.5717,
"step": 66
},
{
"Batch Mean": 0.07387387752532959,
"accuracy": 0.796875,
"epoch": 0.165,
"step": 66
},
{
"epoch": 0.1675,
"grad_norm": 5.737614631652832,
"learning_rate": 4.381578947368421e-06,
"loss": 0.4532,
"step": 67
},
{
"Batch Mean": -0.45137548446655273,
"accuracy": 0.7734375,
"epoch": 0.1675,
"step": 67
},
{
"epoch": 0.17,
"grad_norm": 9.204909324645996,
"learning_rate": 4.368421052631579e-06,
"loss": 0.501,
"step": 68
},
{
"Batch Mean": -0.12238574028015137,
"accuracy": 0.828125,
"epoch": 0.17,
"step": 68
},
{
"epoch": 0.1725,
"grad_norm": 6.6219162940979,
"learning_rate": 4.3552631578947375e-06,
"loss": 0.416,
"step": 69
},
{
"Batch Mean": 0.06974506378173828,
"accuracy": 0.78125,
"epoch": 0.1725,
"step": 69
},
{
"epoch": 0.175,
"grad_norm": 5.526142120361328,
"learning_rate": 4.342105263157895e-06,
"loss": 0.4364,
"step": 70
},
{
"Batch Mean": 0.3401278257369995,
"accuracy": 0.8359375,
"epoch": 0.175,
"step": 70
},
{
"epoch": 0.1775,
"grad_norm": 6.567529678344727,
"learning_rate": 4.328947368421053e-06,
"loss": 0.4396,
"step": 71
},
{
"Batch Mean": -0.12450069189071655,
"accuracy": 0.75,
"epoch": 0.1775,
"step": 71
},
{
"epoch": 0.18,
"grad_norm": 6.054138660430908,
"learning_rate": 4.315789473684211e-06,
"loss": 0.4604,
"step": 72
},
{
"Batch Mean": 0.0036773681640625,
"accuracy": 0.7890625,
"epoch": 0.18,
"step": 72
},
{
"epoch": 0.1825,
"grad_norm": 6.112196445465088,
"learning_rate": 4.302631578947369e-06,
"loss": 0.4091,
"step": 73
},
{
"Batch Mean": -0.3432164192199707,
"accuracy": 0.7890625,
"epoch": 0.1825,
"step": 73
},
{
"epoch": 0.185,
"grad_norm": 6.806588649749756,
"learning_rate": 4.289473684210527e-06,
"loss": 0.4194,
"step": 74
},
{
"Batch Mean": -0.10890483856201172,
"accuracy": 0.671875,
"epoch": 0.185,
"step": 74
},
{
"epoch": 0.1875,
"grad_norm": 7.000553131103516,
"learning_rate": 4.276315789473684e-06,
"loss": 0.5318,
"step": 75
},
{
"Batch Mean": 0.6419280171394348,
"accuracy": 0.7265625,
"epoch": 0.1875,
"step": 75
},
{
"epoch": 0.19,
"grad_norm": 8.422764778137207,
"learning_rate": 4.2631578947368425e-06,
"loss": 0.4539,
"step": 76
},
{
"Batch Mean": 0.28451067209243774,
"accuracy": 0.7734375,
"epoch": 0.19,
"step": 76
},
{
"epoch": 0.1925,
"grad_norm": 6.512566566467285,
"learning_rate": 4.25e-06,
"loss": 0.4522,
"step": 77
},
{
"Batch Mean": -0.07123541831970215,
"accuracy": 0.8125,
"epoch": 0.1925,
"step": 77
},
{
"epoch": 0.195,
"grad_norm": 6.979062080383301,
"learning_rate": 4.236842105263158e-06,
"loss": 0.4827,
"step": 78
},
{
"Batch Mean": -0.2889130115509033,
"accuracy": 0.71875,
"epoch": 0.195,
"step": 78
},
{
"epoch": 0.1975,
"grad_norm": 7.523491859436035,
"learning_rate": 4.223684210526316e-06,
"loss": 0.4501,
"step": 79
},
{
"Batch Mean": -0.2912619113922119,
"accuracy": 0.8046875,
"epoch": 0.1975,
"step": 79
},
{
"epoch": 0.2,
"grad_norm": 6.1573662757873535,
"learning_rate": 4.210526315789474e-06,
"loss": 0.4537,
"step": 80
},
{
"Batch Mean": 0.07626897096633911,
"accuracy": 0.84375,
"epoch": 0.2,
"step": 80
},
{
"epoch": 0.2025,
"grad_norm": 5.308783054351807,
"learning_rate": 4.197368421052632e-06,
"loss": 0.3777,
"step": 81
},
{
"Batch Mean": 0.44231414794921875,
"accuracy": 0.78125,
"epoch": 0.2025,
"step": 81
},
{
"epoch": 0.205,
"grad_norm": 7.6725969314575195,
"learning_rate": 4.18421052631579e-06,
"loss": 0.4681,
"step": 82
},
{
"Batch Mean": 0.05066095292568207,
"accuracy": 0.7734375,
"epoch": 0.205,
"step": 82
},
{
"epoch": 0.2075,
"grad_norm": 6.136054039001465,
"learning_rate": 4.171052631578948e-06,
"loss": 0.4111,
"step": 83
},
{
"Batch Mean": 0.11537289619445801,
"accuracy": 0.8515625,
"epoch": 0.2075,
"step": 83
},
{
"epoch": 0.21,
"grad_norm": 5.31056022644043,
"learning_rate": 4.157894736842106e-06,
"loss": 0.345,
"step": 84
},
{
"Batch Mean": 0.05826067924499512,
"accuracy": 0.734375,
"epoch": 0.21,
"step": 84
},
{
"epoch": 0.2125,
"grad_norm": 6.368407249450684,
"learning_rate": 4.144736842105263e-06,
"loss": 0.4834,
"step": 85
},
{
"Batch Mean": -0.05756664276123047,
"accuracy": 0.75,
"epoch": 0.2125,
"step": 85
},
{
"epoch": 0.215,
"grad_norm": 7.063327312469482,
"learning_rate": 4.1315789473684216e-06,
"loss": 0.5013,
"step": 86
},
{
"Batch Mean": -0.08471214771270752,
"accuracy": 0.8046875,
"epoch": 0.215,
"step": 86
},
{
"epoch": 0.2175,
"grad_norm": 6.508166313171387,
"learning_rate": 4.118421052631579e-06,
"loss": 0.4431,
"step": 87
},
{
"Batch Mean": -0.06070905923843384,
"accuracy": 0.703125,
"epoch": 0.2175,
"step": 87
},
{
"epoch": 0.22,
"grad_norm": 7.8081583976745605,
"learning_rate": 4.105263157894737e-06,
"loss": 0.5481,
"step": 88
},
{
"Batch Mean": -0.15125751495361328,
"accuracy": 0.8046875,
"epoch": 0.22,
"step": 88
},
{
"epoch": 0.2225,
"grad_norm": 5.806808948516846,
"learning_rate": 4.092105263157895e-06,
"loss": 0.3994,
"step": 89
},
{
"Batch Mean": -0.0017851591110229492,
"accuracy": 0.828125,
"epoch": 0.2225,
"step": 89
},
{
"epoch": 0.225,
"grad_norm": 5.560466766357422,
"learning_rate": 4.078947368421053e-06,
"loss": 0.4,
"step": 90
},
{
"Batch Mean": 0.07748031616210938,
"accuracy": 0.8046875,
"epoch": 0.225,
"step": 90
},
{
"epoch": 0.2275,
"grad_norm": 5.754173278808594,
"learning_rate": 4.065789473684211e-06,
"loss": 0.4019,
"step": 91
},
{
"Batch Mean": -0.24073825776576996,
"accuracy": 0.78125,
"epoch": 0.2275,
"step": 91
},
{
"epoch": 0.23,
"grad_norm": 6.231720447540283,
"learning_rate": 4.052631578947368e-06,
"loss": 0.4585,
"step": 92
},
{
"Batch Mean": -0.45170480012893677,
"accuracy": 0.75,
"epoch": 0.23,
"step": 92
},
{
"epoch": 0.2325,
"grad_norm": 7.313357353210449,
"learning_rate": 4.0394736842105265e-06,
"loss": 0.4762,
"step": 93
},
{
"Batch Mean": 0.024792194366455078,
"accuracy": 0.796875,
"epoch": 0.2325,
"step": 93
},
{
"epoch": 0.235,
"grad_norm": 4.933104515075684,
"learning_rate": 4.026315789473684e-06,
"loss": 0.4138,
"step": 94
},
{
"Batch Mean": -0.02751898765563965,
"accuracy": 0.7421875,
"epoch": 0.235,
"step": 94
},
{
"epoch": 0.2375,
"grad_norm": 5.39032506942749,
"learning_rate": 4.013157894736842e-06,
"loss": 0.4787,
"step": 95
},
{
"Batch Mean": 0.06753873825073242,
"accuracy": 0.8203125,
"epoch": 0.2375,
"step": 95
},
{
"epoch": 0.24,
"grad_norm": 5.1791462898254395,
"learning_rate": 4.000000000000001e-06,
"loss": 0.4488,
"step": 96
},
{
"Batch Mean": 0.22616100311279297,
"accuracy": 0.828125,
"epoch": 0.24,
"step": 96
},
{
"epoch": 0.2425,
"grad_norm": 5.300809383392334,
"learning_rate": 3.986842105263158e-06,
"loss": 0.4142,
"step": 97
},
{
"Batch Mean": -0.15787070989608765,
"accuracy": 0.8515625,
"epoch": 0.2425,
"step": 97
},
{
"epoch": 0.245,
"grad_norm": 5.704384803771973,
"learning_rate": 3.9736842105263165e-06,
"loss": 0.3797,
"step": 98
},
{
"Batch Mean": 0.13187718391418457,
"accuracy": 0.8046875,
"epoch": 0.245,
"step": 98
},
{
"epoch": 0.2475,
"grad_norm": 6.045483589172363,
"learning_rate": 3.960526315789474e-06,
"loss": 0.5195,
"step": 99
},
{
"Batch Mean": -0.08533608913421631,
"accuracy": 0.8203125,
"epoch": 0.2475,
"step": 99
},
{
"epoch": 0.25,
"grad_norm": 4.997817516326904,
"learning_rate": 3.947368421052632e-06,
"loss": 0.3816,
"step": 100
},
{
"Batch Mean": 0.028887659311294556,
"accuracy": 0.7421875,
"epoch": 0.25,
"step": 100
},
{
"epoch": 0.2525,
"grad_norm": 5.863315105438232,
"learning_rate": 3.93421052631579e-06,
"loss": 0.478,
"step": 101
},
{
"Batch Mean": 0.17493167519569397,
"accuracy": 0.796875,
"epoch": 0.2525,
"step": 101
},
{
"epoch": 0.255,
"grad_norm": 5.986959934234619,
"learning_rate": 3.921052631578947e-06,
"loss": 0.4283,
"step": 102
},
{
"Batch Mean": 0.19265401363372803,
"accuracy": 0.8125,
"epoch": 0.255,
"step": 102
},
{
"epoch": 0.2575,
"grad_norm": 5.091299533843994,
"learning_rate": 3.907894736842106e-06,
"loss": 0.352,
"step": 103
},
{
"Batch Mean": -0.2604933977127075,
"accuracy": 0.75,
"epoch": 0.2575,
"step": 103
},
{
"epoch": 0.26,
"grad_norm": 6.963299751281738,
"learning_rate": 3.894736842105263e-06,
"loss": 0.4583,
"step": 104
},
{
"Batch Mean": -0.21034908294677734,
"accuracy": 0.7734375,
"epoch": 0.26,
"step": 104
},
{
"epoch": 0.2625,
"grad_norm": 7.000269889831543,
"learning_rate": 3.8815789473684214e-06,
"loss": 0.3837,
"step": 105
},
{
"Batch Mean": -0.10478854179382324,
"accuracy": 0.8359375,
"epoch": 0.2625,
"step": 105
},
{
"epoch": 0.265,
"grad_norm": 6.25542688369751,
"learning_rate": 3.868421052631579e-06,
"loss": 0.3928,
"step": 106
},
{
"Batch Mean": 0.35255885124206543,
"accuracy": 0.7734375,
"epoch": 0.265,
"step": 106
},
{
"epoch": 0.2675,
"grad_norm": 8.28545093536377,
"learning_rate": 3.855263157894737e-06,
"loss": 0.4785,
"step": 107
},
{
"Batch Mean": 0.4886665344238281,
"accuracy": 0.7734375,
"epoch": 0.2675,
"step": 107
},
{
"epoch": 0.27,
"grad_norm": 9.611257553100586,
"learning_rate": 3.842105263157895e-06,
"loss": 0.4635,
"step": 108
},
{
"Batch Mean": -0.06474494934082031,
"accuracy": 0.8046875,
"epoch": 0.27,
"step": 108
},
{
"epoch": 0.2725,
"grad_norm": 7.419112682342529,
"learning_rate": 3.828947368421053e-06,
"loss": 0.4225,
"step": 109
},
{
"Batch Mean": -0.12506628036499023,
"accuracy": 0.8515625,
"epoch": 0.2725,
"step": 109
},
{
"epoch": 0.275,
"grad_norm": 6.5173020362854,
"learning_rate": 3.815789473684211e-06,
"loss": 0.3518,
"step": 110
},
{
"Batch Mean": -0.12815812230110168,
"accuracy": 0.921875,
"epoch": 0.275,
"step": 110
},
{
"epoch": 0.2775,
"grad_norm": 5.361306667327881,
"learning_rate": 3.802631578947369e-06,
"loss": 0.269,
"step": 111
},
{
"Batch Mean": -0.20336151123046875,
"accuracy": 0.890625,
"epoch": 0.2775,
"step": 111
},
{
"epoch": 0.28,
"grad_norm": 6.405649662017822,
"learning_rate": 3.789473684210527e-06,
"loss": 0.2563,
"step": 112
},
{
"Batch Mean": 0.22764039039611816,
"accuracy": 0.8203125,
"epoch": 0.28,
"step": 112
},
{
"epoch": 0.2825,
"grad_norm": 8.156770706176758,
"learning_rate": 3.7763157894736847e-06,
"loss": 0.4163,
"step": 113
},
{
"Batch Mean": 0.27307558059692383,
"accuracy": 0.859375,
"epoch": 0.2825,
"step": 113
},
{
"epoch": 0.285,
"grad_norm": 7.346747875213623,
"learning_rate": 3.7631578947368426e-06,
"loss": 0.3815,
"step": 114
},
{
"Batch Mean": 0.6259889602661133,
"accuracy": 0.75,
"epoch": 0.285,
"step": 114
},
{
"epoch": 0.2875,
"grad_norm": 12.56270980834961,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.5411,
"step": 115
},
{
"Batch Mean": -0.13174670934677124,
"accuracy": 0.8046875,
"epoch": 0.2875,
"step": 115
},
{
"epoch": 0.29,
"grad_norm": 7.563482761383057,
"learning_rate": 3.736842105263158e-06,
"loss": 0.4455,
"step": 116
},
{
"Batch Mean": -0.8023077249526978,
"accuracy": 0.8046875,
"epoch": 0.29,
"step": 116
},
{
"epoch": 0.2925,
"grad_norm": 10.422194480895996,
"learning_rate": 3.723684210526316e-06,
"loss": 0.3826,
"step": 117
},
{
"Batch Mean": -0.7135447263717651,
"accuracy": 0.796875,
"epoch": 0.2925,
"step": 117
},
{
"epoch": 0.295,
"grad_norm": 9.252284049987793,
"learning_rate": 3.710526315789474e-06,
"loss": 0.4201,
"step": 118
},
{
"Batch Mean": -0.24731099605560303,
"accuracy": 0.7578125,
"epoch": 0.295,
"step": 118
},
{
"epoch": 0.2975,
"grad_norm": 6.945714473724365,
"learning_rate": 3.6973684210526317e-06,
"loss": 0.461,
"step": 119
},
{
"Batch Mean": 0.4304164946079254,
"accuracy": 0.8359375,
"epoch": 0.2975,
"step": 119
},
{
"epoch": 0.3,
"grad_norm": 7.393038749694824,
"learning_rate": 3.6842105263157896e-06,
"loss": 0.3897,
"step": 120
},
{
"Batch Mean": 0.24881935119628906,
"accuracy": 0.7890625,
"epoch": 0.3,
"step": 120
},
{
"epoch": 0.3025,
"grad_norm": 6.129741668701172,
"learning_rate": 3.6710526315789476e-06,
"loss": 0.3894,
"step": 121
},
{
"Batch Mean": 0.09764862060546875,
"accuracy": 0.828125,
"epoch": 0.3025,
"step": 121
},
{
"epoch": 0.305,
"grad_norm": 5.265136241912842,
"learning_rate": 3.657894736842106e-06,
"loss": 0.3733,
"step": 122
},
{
"Batch Mean": 0.17956316471099854,
"accuracy": 0.8046875,
"epoch": 0.305,
"step": 122
},
{
"epoch": 0.3075,
"grad_norm": 4.876620769500732,
"learning_rate": 3.644736842105264e-06,
"loss": 0.3689,
"step": 123
},
{
"Batch Mean": 0.19482755661010742,
"accuracy": 0.8203125,
"epoch": 0.3075,
"step": 123
},
{
"epoch": 0.31,
"grad_norm": 6.0671162605285645,
"learning_rate": 3.6315789473684217e-06,
"loss": 0.4151,
"step": 124
},
{
"Batch Mean": -0.07992386817932129,
"accuracy": 0.828125,
"epoch": 0.31,
"step": 124
},
{
"epoch": 0.3125,
"grad_norm": 5.630014419555664,
"learning_rate": 3.618421052631579e-06,
"loss": 0.406,
"step": 125
},
{
"Batch Mean": -0.01944279670715332,
"accuracy": 0.8515625,
"epoch": 0.3125,
"step": 125
},
{
"epoch": 0.315,
"grad_norm": 5.250101566314697,
"learning_rate": 3.605263157894737e-06,
"loss": 0.3816,
"step": 126
},
{
"Batch Mean": -0.18266761302947998,
"accuracy": 0.8203125,
"epoch": 0.315,
"step": 126
},
{
"epoch": 0.3175,
"grad_norm": 6.644626617431641,
"learning_rate": 3.592105263157895e-06,
"loss": 0.4098,
"step": 127
},
{
"Batch Mean": 0.12618303298950195,
"accuracy": 0.8359375,
"epoch": 0.3175,
"step": 127
},
{
"epoch": 0.32,
"grad_norm": 5.627920627593994,
"learning_rate": 3.578947368421053e-06,
"loss": 0.3854,
"step": 128
},
{
"Batch Mean": -0.33390331268310547,
"accuracy": 0.8046875,
"epoch": 0.32,
"step": 128
},
{
"epoch": 0.3225,
"grad_norm": 7.37267541885376,
"learning_rate": 3.565789473684211e-06,
"loss": 0.4615,
"step": 129
},
{
"Batch Mean": -0.06647157669067383,
"accuracy": 0.8125,
"epoch": 0.3225,
"step": 129
},
{
"epoch": 0.325,
"grad_norm": 6.135495185852051,
"learning_rate": 3.5526315789473687e-06,
"loss": 0.3621,
"step": 130
},
{
"Batch Mean": 0.4405427575111389,
"accuracy": 0.75,
"epoch": 0.325,
"step": 130
},
{
"epoch": 0.3275,
"grad_norm": 8.818106651306152,
"learning_rate": 3.5394736842105266e-06,
"loss": 0.5184,
"step": 131
},
{
"Batch Mean": -0.026287078857421875,
"accuracy": 0.859375,
"epoch": 0.3275,
"step": 131
},
{
"epoch": 0.33,
"grad_norm": 5.420790195465088,
"learning_rate": 3.5263157894736846e-06,
"loss": 0.329,
"step": 132
},
{
"Batch Mean": 0.05462455749511719,
"accuracy": 0.796875,
"epoch": 0.33,
"step": 132
},
{
"epoch": 0.3325,
"grad_norm": 6.507498264312744,
"learning_rate": 3.513157894736842e-06,
"loss": 0.4115,
"step": 133
},
{
"Batch Mean": -0.24898433685302734,
"accuracy": 0.765625,
"epoch": 0.3325,
"step": 133
},
{
"epoch": 0.335,
"grad_norm": 7.103653430938721,
"learning_rate": 3.5e-06,
"loss": 0.47,
"step": 134
},
{
"Batch Mean": 0.0019243955612182617,
"accuracy": 0.7890625,
"epoch": 0.335,
"step": 134
},
{
"epoch": 0.3375,
"grad_norm": 6.966567039489746,
"learning_rate": 3.486842105263158e-06,
"loss": 0.4961,
"step": 135
},
{
"Batch Mean": -0.3426704406738281,
"accuracy": 0.8515625,
"epoch": 0.3375,
"step": 135
},
{
"epoch": 0.34,
"grad_norm": 6.283776760101318,
"learning_rate": 3.473684210526316e-06,
"loss": 0.3601,
"step": 136
},
{
"Batch Mean": -0.20534992218017578,
"accuracy": 0.8203125,
"epoch": 0.34,
"step": 136
},
{
"epoch": 0.3425,
"grad_norm": 6.146775245666504,
"learning_rate": 3.460526315789474e-06,
"loss": 0.3778,
"step": 137
},
{
"Batch Mean": 0.08529424667358398,
"accuracy": 0.8359375,
"epoch": 0.3425,
"step": 137
},
{
"epoch": 0.345,
"grad_norm": 6.416910171508789,
"learning_rate": 3.447368421052632e-06,
"loss": 0.4167,
"step": 138
},
{
"Batch Mean": -0.047773122787475586,
"accuracy": 0.8828125,
"epoch": 0.345,
"step": 138
},
{
"epoch": 0.3475,
"grad_norm": 5.484972953796387,
"learning_rate": 3.43421052631579e-06,
"loss": 0.3506,
"step": 139
},
{
"Batch Mean": 0.5931057929992676,
"accuracy": 0.828125,
"epoch": 0.3475,
"step": 139
},
{
"epoch": 0.35,
"grad_norm": 7.790515899658203,
"learning_rate": 3.421052631578948e-06,
"loss": 0.3479,
"step": 140
},
{
"Batch Mean": 0.15590977668762207,
"accuracy": 0.84375,
"epoch": 0.35,
"step": 140
},
{
"epoch": 0.3525,
"grad_norm": 6.013650417327881,
"learning_rate": 3.4078947368421057e-06,
"loss": 0.3612,
"step": 141
},
{
"Batch Mean": 0.46294069290161133,
"accuracy": 0.78125,
"epoch": 0.3525,
"step": 141
},
{
"epoch": 0.355,
"grad_norm": 8.1344633102417,
"learning_rate": 3.3947368421052636e-06,
"loss": 0.4946,
"step": 142
},
{
"Batch Mean": -0.12668347358703613,
"accuracy": 0.796875,
"epoch": 0.355,
"step": 142
},
{
"epoch": 0.3575,
"grad_norm": 5.713737487792969,
"learning_rate": 3.381578947368421e-06,
"loss": 0.3976,
"step": 143
},
{
"Batch Mean": -0.4500846862792969,
"accuracy": 0.8203125,
"epoch": 0.3575,
"step": 143
},
{
"epoch": 0.36,
"grad_norm": 7.036086559295654,
"learning_rate": 3.368421052631579e-06,
"loss": 0.3463,
"step": 144
},
{
"Batch Mean": -0.37799644470214844,
"accuracy": 0.7890625,
"epoch": 0.36,
"step": 144
},
{
"epoch": 0.3625,
"grad_norm": 6.1970062255859375,
"learning_rate": 3.355263157894737e-06,
"loss": 0.3877,
"step": 145
},
{
"Batch Mean": -0.10559439659118652,
"accuracy": 0.7578125,
"epoch": 0.3625,
"step": 145
},
{
"epoch": 0.365,
"grad_norm": 5.835550308227539,
"learning_rate": 3.342105263157895e-06,
"loss": 0.3851,
"step": 146
},
{
"Batch Mean": 0.44306373596191406,
"accuracy": 0.75,
"epoch": 0.365,
"step": 146
},
{
"epoch": 0.3675,
"grad_norm": 7.37814474105835,
"learning_rate": 3.3289473684210528e-06,
"loss": 0.4315,
"step": 147
},
{
"Batch Mean": -0.04085433483123779,
"accuracy": 0.796875,
"epoch": 0.3675,
"step": 147
},
{
"epoch": 0.37,
"grad_norm": 6.159938812255859,
"learning_rate": 3.3157894736842107e-06,
"loss": 0.4498,
"step": 148
},
{
"Batch Mean": 0.40547609329223633,
"accuracy": 0.828125,
"epoch": 0.37,
"step": 148
},
{
"epoch": 0.3725,
"grad_norm": 6.4657301902771,
"learning_rate": 3.302631578947369e-06,
"loss": 0.4086,
"step": 149
},
{
"Batch Mean": 0.18158531188964844,
"accuracy": 0.890625,
"epoch": 0.3725,
"step": 149
},
{
"epoch": 0.375,
"grad_norm": 5.317295074462891,
"learning_rate": 3.289473684210527e-06,
"loss": 0.3154,
"step": 150
},
{
"Batch Mean": 0.09602212905883789,
"accuracy": 0.8515625,
"epoch": 0.375,
"step": 150
},
{
"epoch": 0.3775,
"grad_norm": 5.428357124328613,
"learning_rate": 3.276315789473685e-06,
"loss": 0.3893,
"step": 151
},
{
"Batch Mean": -0.3176734447479248,
"accuracy": 0.765625,
"epoch": 0.3775,
"step": 151
},
{
"epoch": 0.38,
"grad_norm": 6.5872087478637695,
"learning_rate": 3.2631578947368423e-06,
"loss": 0.4477,
"step": 152
},
{
"Batch Mean": -0.34884822368621826,
"accuracy": 0.8515625,
"epoch": 0.38,
"step": 152
},
{
"epoch": 0.3825,
"grad_norm": 6.333802223205566,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.3661,
"step": 153
},
{
"Batch Mean": -0.11426492035388947,
"accuracy": 0.8203125,
"epoch": 0.3825,
"step": 153
},
{
"epoch": 0.385,
"grad_norm": 6.027217388153076,
"learning_rate": 3.236842105263158e-06,
"loss": 0.3967,
"step": 154
},
{
"Batch Mean": 0.2238612174987793,
"accuracy": 0.828125,
"epoch": 0.385,
"step": 154
},
{
"epoch": 0.3875,
"grad_norm": 5.728042125701904,
"learning_rate": 3.223684210526316e-06,
"loss": 0.3469,
"step": 155
},
{
"Batch Mean": 0.2194676399230957,
"accuracy": 0.8359375,
"epoch": 0.3875,
"step": 155
},
{
"epoch": 0.39,
"grad_norm": 6.083252906799316,
"learning_rate": 3.210526315789474e-06,
"loss": 0.3512,
"step": 156
},
{
"Batch Mean": 0.4024663269519806,
"accuracy": 0.84375,
"epoch": 0.39,
"step": 156
},
{
"epoch": 0.3925,
"grad_norm": 6.562926292419434,
"learning_rate": 3.197368421052632e-06,
"loss": 0.3141,
"step": 157
},
{
"Batch Mean": -0.39385366439819336,
"accuracy": 0.859375,
"epoch": 0.3925,
"step": 157
},
{
"epoch": 0.395,
"grad_norm": 7.200568199157715,
"learning_rate": 3.1842105263157898e-06,
"loss": 0.3309,
"step": 158
},
{
"Batch Mean": -0.46120989322662354,
"accuracy": 0.796875,
"epoch": 0.395,
"step": 158
},
{
"epoch": 0.3975,
"grad_norm": 8.028575897216797,
"learning_rate": 3.1710526315789477e-06,
"loss": 0.4198,
"step": 159
},
{
"Batch Mean": -0.01939535140991211,
"accuracy": 0.828125,
"epoch": 0.3975,
"step": 159
},
{
"epoch": 0.4,
"grad_norm": 6.117007732391357,
"learning_rate": 3.157894736842105e-06,
"loss": 0.3203,
"step": 160
},
{
"Batch Mean": 0.30785632133483887,
"accuracy": 0.8046875,
"epoch": 0.4,
"step": 160
},
{
"epoch": 0.4025,
"grad_norm": 6.923008918762207,
"learning_rate": 3.144736842105263e-06,
"loss": 0.3664,
"step": 161
},
{
"Batch Mean": 0.7033977508544922,
"accuracy": 0.828125,
"epoch": 0.4025,
"step": 161
},
{
"epoch": 0.405,
"grad_norm": 9.752437591552734,
"learning_rate": 3.131578947368421e-06,
"loss": 0.3907,
"step": 162
},
{
"Batch Mean": -0.347902774810791,
"accuracy": 0.828125,
"epoch": 0.405,
"step": 162
},
{
"epoch": 0.4075,
"grad_norm": 7.6583991050720215,
"learning_rate": 3.1184210526315793e-06,
"loss": 0.374,
"step": 163
},
{
"Batch Mean": -0.569427490234375,
"accuracy": 0.84375,
"epoch": 0.4075,
"step": 163
},
{
"epoch": 0.41,
"grad_norm": 8.497271537780762,
"learning_rate": 3.1052631578947372e-06,
"loss": 0.3917,
"step": 164
},
{
"Batch Mean": -0.094696044921875,
"accuracy": 0.796875,
"epoch": 0.41,
"step": 164
},
{
"epoch": 0.4125,
"grad_norm": 8.36490249633789,
"learning_rate": 3.092105263157895e-06,
"loss": 0.4532,
"step": 165
},
{
"Batch Mean": -0.3759317398071289,
"accuracy": 0.859375,
"epoch": 0.4125,
"step": 165
},
{
"epoch": 0.415,
"grad_norm": 7.4028425216674805,
"learning_rate": 3.078947368421053e-06,
"loss": 0.3049,
"step": 166
},
{
"Batch Mean": 0.3960217833518982,
"accuracy": 0.828125,
"epoch": 0.415,
"step": 166
},
{
"epoch": 0.4175,
"grad_norm": 7.194099426269531,
"learning_rate": 3.065789473684211e-06,
"loss": 0.3577,
"step": 167
},
{
"Batch Mean": 0.5912597179412842,
"accuracy": 0.875,
"epoch": 0.4175,
"step": 167
},
{
"epoch": 0.42,
"grad_norm": 7.954378128051758,
"learning_rate": 3.052631578947369e-06,
"loss": 0.3329,
"step": 168
},
{
"Batch Mean": -0.014994144439697266,
"accuracy": 0.796875,
"epoch": 0.42,
"step": 168
},
{
"epoch": 0.4225,
"grad_norm": 7.277341365814209,
"learning_rate": 3.0394736842105268e-06,
"loss": 0.423,
"step": 169
},
{
"Batch Mean": 0.0011951625347137451,
"accuracy": 0.8671875,
"epoch": 0.4225,
"step": 169
},
{
"epoch": 0.425,
"grad_norm": 6.068748950958252,
"learning_rate": 3.0263157894736843e-06,
"loss": 0.3711,
"step": 170
},
{
"Batch Mean": -0.23294401168823242,
"accuracy": 0.9375,
"epoch": 0.425,
"step": 170
},
{
"epoch": 0.4275,
"grad_norm": 5.278029441833496,
"learning_rate": 3.013157894736842e-06,
"loss": 0.237,
"step": 171
},
{
"Batch Mean": -0.40133750438690186,
"accuracy": 0.7890625,
"epoch": 0.4275,
"step": 171
},
{
"epoch": 0.43,
"grad_norm": 7.065173149108887,
"learning_rate": 3e-06,
"loss": 0.4043,
"step": 172
},
{
"Batch Mean": 0.1028299331665039,
"accuracy": 0.84375,
"epoch": 0.43,
"step": 172
},
{
"epoch": 0.4325,
"grad_norm": 8.89666748046875,
"learning_rate": 2.986842105263158e-06,
"loss": 0.3427,
"step": 173
},
{
"Batch Mean": 0.08979487419128418,
"accuracy": 0.828125,
"epoch": 0.4325,
"step": 173
},
{
"epoch": 0.435,
"grad_norm": 5.712543487548828,
"learning_rate": 2.973684210526316e-06,
"loss": 0.3642,
"step": 174
},
{
"Batch Mean": 0.6362553834915161,
"accuracy": 0.84375,
"epoch": 0.435,
"step": 174
},
{
"epoch": 0.4375,
"grad_norm": 8.968149185180664,
"learning_rate": 2.960526315789474e-06,
"loss": 0.3986,
"step": 175
},
{
"Batch Mean": 0.03002166748046875,
"accuracy": 0.8125,
"epoch": 0.4375,
"step": 175
},
{
"epoch": 0.44,
"grad_norm": 5.981250286102295,
"learning_rate": 2.9473684210526317e-06,
"loss": 0.371,
"step": 176
},
{
"Batch Mean": 0.15925252437591553,
"accuracy": 0.7578125,
"epoch": 0.44,
"step": 176
},
{
"epoch": 0.4425,
"grad_norm": 7.630130290985107,
"learning_rate": 2.93421052631579e-06,
"loss": 0.4573,
"step": 177
},
{
"Batch Mean": -0.5218077898025513,
"accuracy": 0.875,
"epoch": 0.4425,
"step": 177
},
{
"epoch": 0.445,
"grad_norm": 8.40796184539795,
"learning_rate": 2.921052631578948e-06,
"loss": 0.318,
"step": 178
},
{
"Batch Mean": -0.21337127685546875,
"accuracy": 0.8671875,
"epoch": 0.445,
"step": 178
},
{
"epoch": 0.4475,
"grad_norm": 6.911371231079102,
"learning_rate": 2.907894736842106e-06,
"loss": 0.3225,
"step": 179
},
{
"Batch Mean": -0.4858684539794922,
"accuracy": 0.796875,
"epoch": 0.4475,
"step": 179
},
{
"epoch": 0.45,
"grad_norm": 7.585726737976074,
"learning_rate": 2.8947368421052634e-06,
"loss": 0.4169,
"step": 180
},
{
"Batch Mean": 0.6967126131057739,
"accuracy": 0.8359375,
"epoch": 0.45,
"step": 180
},
{
"epoch": 0.4525,
"grad_norm": 9.311128616333008,
"learning_rate": 2.8815789473684213e-06,
"loss": 0.3541,
"step": 181
},
{
"Batch Mean": 0.40191876888275146,
"accuracy": 0.875,
"epoch": 0.4525,
"step": 181
},
{
"epoch": 0.455,
"grad_norm": 6.452785015106201,
"learning_rate": 2.868421052631579e-06,
"loss": 0.2999,
"step": 182
},
{
"Batch Mean": 0.35803985595703125,
"accuracy": 0.765625,
"epoch": 0.455,
"step": 182
},
{
"epoch": 0.4575,
"grad_norm": 7.8335418701171875,
"learning_rate": 2.855263157894737e-06,
"loss": 0.4661,
"step": 183
},
{
"Batch Mean": -0.17188113927841187,
"accuracy": 0.8203125,
"epoch": 0.4575,
"step": 183
},
{
"epoch": 0.46,
"grad_norm": 6.696692943572998,
"learning_rate": 2.842105263157895e-06,
"loss": 0.4276,
"step": 184
},
{
"Batch Mean": -0.7287430763244629,
"accuracy": 0.8125,
"epoch": 0.46,
"step": 184
},
{
"epoch": 0.4625,
"grad_norm": 9.841276168823242,
"learning_rate": 2.828947368421053e-06,
"loss": 0.4608,
"step": 185
},
{
"Batch Mean": -0.4493199586868286,
"accuracy": 0.765625,
"epoch": 0.4625,
"step": 185
},
{
"epoch": 0.465,
"grad_norm": 6.939754486083984,
"learning_rate": 2.815789473684211e-06,
"loss": 0.385,
"step": 186
},
{
"Batch Mean": -0.001141861081123352,
"accuracy": 0.796875,
"epoch": 0.465,
"step": 186
},
{
"epoch": 0.4675,
"grad_norm": 5.819330215454102,
"learning_rate": 2.8026315789473683e-06,
"loss": 0.3872,
"step": 187
},
{
"Batch Mean": 0.11795490980148315,
"accuracy": 0.8984375,
"epoch": 0.4675,
"step": 187
},
{
"epoch": 0.47,
"grad_norm": 5.095602512359619,
"learning_rate": 2.789473684210526e-06,
"loss": 0.2691,
"step": 188
},
{
"Batch Mean": 0.0792817771434784,
"accuracy": 0.7734375,
"epoch": 0.47,
"step": 188
},
{
"epoch": 0.4725,
"grad_norm": 6.671010971069336,
"learning_rate": 2.776315789473684e-06,
"loss": 0.4304,
"step": 189
},
{
"Batch Mean": 0.11675047874450684,
"accuracy": 0.8828125,
"epoch": 0.4725,
"step": 189
},
{
"epoch": 0.475,
"grad_norm": 5.194698810577393,
"learning_rate": 2.7631578947368424e-06,
"loss": 0.3043,
"step": 190
},
{
"Batch Mean": 0.507703423500061,
"accuracy": 0.8359375,
"epoch": 0.475,
"step": 190
},
{
"epoch": 0.4775,
"grad_norm": 8.995157241821289,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.4023,
"step": 191
},
{
"Batch Mean": 0.007342934608459473,
"accuracy": 0.8828125,
"epoch": 0.4775,
"step": 191
},
{
"epoch": 0.48,
"grad_norm": 4.653019428253174,
"learning_rate": 2.7368421052631583e-06,
"loss": 0.2997,
"step": 192
},
{
"Batch Mean": -0.11206340789794922,
"accuracy": 0.84375,
"epoch": 0.48,
"step": 192
},
{
"epoch": 0.4825,
"grad_norm": 5.593299388885498,
"learning_rate": 2.723684210526316e-06,
"loss": 0.3592,
"step": 193
},
{
"Batch Mean": 0.34449005126953125,
"accuracy": 0.8203125,
"epoch": 0.4825,
"step": 193
},
{
"epoch": 0.485,
"grad_norm": 7.729828834533691,
"learning_rate": 2.710526315789474e-06,
"loss": 0.4174,
"step": 194
},
{
"Batch Mean": 0.3511536121368408,
"accuracy": 0.796875,
"epoch": 0.485,
"step": 194
},
{
"epoch": 0.4875,
"grad_norm": 6.8778791427612305,
"learning_rate": 2.697368421052632e-06,
"loss": 0.3958,
"step": 195
},
{
"Batch Mean": -0.4304494857788086,
"accuracy": 0.828125,
"epoch": 0.4875,
"step": 195
},
{
"epoch": 0.49,
"grad_norm": 7.1837897300720215,
"learning_rate": 2.68421052631579e-06,
"loss": 0.3644,
"step": 196
},
{
"Batch Mean": -0.5402705669403076,
"accuracy": 0.8828125,
"epoch": 0.49,
"step": 196
},
{
"epoch": 0.4925,
"grad_norm": 7.711959362030029,
"learning_rate": 2.6710526315789474e-06,
"loss": 0.3134,
"step": 197
},
{
"Batch Mean": -0.4601917266845703,
"accuracy": 0.8203125,
"epoch": 0.4925,
"step": 197
},
{
"epoch": 0.495,
"grad_norm": 7.087433815002441,
"learning_rate": 2.6578947368421053e-06,
"loss": 0.3694,
"step": 198
},
{
"Batch Mean": 0.003908276557922363,
"accuracy": 0.859375,
"epoch": 0.495,
"step": 198
},
{
"epoch": 0.4975,
"grad_norm": 5.323395252227783,
"learning_rate": 2.644736842105263e-06,
"loss": 0.3269,
"step": 199
},
{
"Batch Mean": 0.3632014989852905,
"accuracy": 0.8828125,
"epoch": 0.4975,
"step": 199
},
{
"epoch": 0.5,
"grad_norm": 6.865387439727783,
"learning_rate": 2.631578947368421e-06,
"loss": 0.3261,
"step": 200
},
{
"Batch Mean": 0.3652181625366211,
"accuracy": 0.796875,
"epoch": 0.5,
"step": 200
},
{
"epoch": 0.5025,
"grad_norm": 7.002910614013672,
"learning_rate": 2.618421052631579e-06,
"loss": 0.4199,
"step": 201
},
{
"Batch Mean": -0.0046329498291015625,
"accuracy": 0.8515625,
"epoch": 0.5025,
"step": 201
},
{
"epoch": 0.505,
"grad_norm": 6.56704044342041,
"learning_rate": 2.605263157894737e-06,
"loss": 0.323,
"step": 202
},
{
"Batch Mean": 0.0460200309753418,
"accuracy": 0.8828125,
"epoch": 0.505,
"step": 202
},
{
"epoch": 0.5075,
"grad_norm": 5.225709438323975,
"learning_rate": 2.592105263157895e-06,
"loss": 0.3021,
"step": 203
},
{
"Batch Mean": 0.11225366592407227,
"accuracy": 0.8359375,
"epoch": 0.5075,
"step": 203
},
{
"epoch": 0.51,
"grad_norm": 6.092679023742676,
"learning_rate": 2.578947368421053e-06,
"loss": 0.3584,
"step": 204
},
{
"Batch Mean": -0.08475446701049805,
"accuracy": 0.8125,
"epoch": 0.51,
"step": 204
},
{
"epoch": 0.5125,
"grad_norm": 6.27672004699707,
"learning_rate": 2.565789473684211e-06,
"loss": 0.3787,
"step": 205
},
{
"Batch Mean": 0.041245490312576294,
"accuracy": 0.8125,
"epoch": 0.5125,
"step": 205
},
{
"epoch": 0.515,
"grad_norm": 6.466290473937988,
"learning_rate": 2.552631578947369e-06,
"loss": 0.422,
"step": 206
},
{
"Batch Mean": -0.4405149221420288,
"accuracy": 0.8671875,
"epoch": 0.515,
"step": 206
},
{
"epoch": 0.5175,
"grad_norm": 6.9925456047058105,
"learning_rate": 2.5394736842105265e-06,
"loss": 0.3365,
"step": 207
},
{
"Batch Mean": -0.3760061264038086,
"accuracy": 0.875,
"epoch": 0.5175,
"step": 207
},
{
"epoch": 0.52,
"grad_norm": 6.5773844718933105,
"learning_rate": 2.5263157894736844e-06,
"loss": 0.3214,
"step": 208
},
{
"Batch Mean": 0.4471302032470703,
"accuracy": 0.8359375,
"epoch": 0.52,
"step": 208
},
{
"epoch": 0.5225,
"grad_norm": 7.389886379241943,
"learning_rate": 2.5131578947368423e-06,
"loss": 0.3702,
"step": 209
},
{
"Batch Mean": 0.08597373962402344,
"accuracy": 0.828125,
"epoch": 0.5225,
"step": 209
},
{
"epoch": 0.525,
"grad_norm": 6.150123119354248,
"learning_rate": 2.5e-06,
"loss": 0.4026,
"step": 210
},
{
"Batch Mean": 0.20080137252807617,
"accuracy": 0.8359375,
"epoch": 0.525,
"step": 210
},
{
"epoch": 0.5275,
"grad_norm": 6.605398654937744,
"learning_rate": 2.486842105263158e-06,
"loss": 0.4046,
"step": 211
},
{
"Batch Mean": 0.03975629806518555,
"accuracy": 0.8125,
"epoch": 0.5275,
"step": 211
},
{
"epoch": 0.53,
"grad_norm": 6.139669418334961,
"learning_rate": 2.473684210526316e-06,
"loss": 0.3858,
"step": 212
},
{
"Batch Mean": 0.08810865879058838,
"accuracy": 0.7578125,
"epoch": 0.53,
"step": 212
},
{
"epoch": 0.5325,
"grad_norm": 7.48892068862915,
"learning_rate": 2.460526315789474e-06,
"loss": 0.5241,
"step": 213
},
{
"Batch Mean": 0.02453136444091797,
"accuracy": 0.8203125,
"epoch": 0.5325,
"step": 213
},
{
"epoch": 0.535,
"grad_norm": 5.409359455108643,
"learning_rate": 2.447368421052632e-06,
"loss": 0.3785,
"step": 214
},
{
"Batch Mean": -0.385115385055542,
"accuracy": 0.7734375,
"epoch": 0.535,
"step": 214
},
{
"epoch": 0.5375,
"grad_norm": 6.92533540725708,
"learning_rate": 2.4342105263157898e-06,
"loss": 0.4084,
"step": 215
},
{
"Batch Mean": -0.4509839415550232,
"accuracy": 0.8359375,
"epoch": 0.5375,
"step": 215
},
{
"epoch": 0.54,
"grad_norm": 6.682870388031006,
"learning_rate": 2.4210526315789477e-06,
"loss": 0.3596,
"step": 216
},
{
"Batch Mean": -0.1237635612487793,
"accuracy": 0.8515625,
"epoch": 0.54,
"step": 216
},
{
"epoch": 0.5425,
"grad_norm": 5.399717330932617,
"learning_rate": 2.4078947368421056e-06,
"loss": 0.3281,
"step": 217
},
{
"Batch Mean": 0.13233089447021484,
"accuracy": 0.8671875,
"epoch": 0.5425,
"step": 217
},
{
"epoch": 0.545,
"grad_norm": 5.121485233306885,
"learning_rate": 2.3947368421052635e-06,
"loss": 0.3375,
"step": 218
},
{
"Batch Mean": 0.2012614607810974,
"accuracy": 0.859375,
"epoch": 0.545,
"step": 218
},
{
"epoch": 0.5475,
"grad_norm": 4.99008846282959,
"learning_rate": 2.381578947368421e-06,
"loss": 0.3365,
"step": 219
},
{
"Batch Mean": 0.5368151664733887,
"accuracy": 0.8671875,
"epoch": 0.5475,
"step": 219
},
{
"epoch": 0.55,
"grad_norm": 6.544676303863525,
"learning_rate": 2.368421052631579e-06,
"loss": 0.3187,
"step": 220
},
{
"Batch Mean": 0.022375822067260742,
"accuracy": 0.828125,
"epoch": 0.55,
"step": 220
},
{
"epoch": 0.5525,
"grad_norm": 6.009125232696533,
"learning_rate": 2.355263157894737e-06,
"loss": 0.3904,
"step": 221
},
{
"Batch Mean": 0.28800055384635925,
"accuracy": 0.8125,
"epoch": 0.5525,
"step": 221
},
{
"epoch": 0.555,
"grad_norm": 6.6932268142700195,
"learning_rate": 2.342105263157895e-06,
"loss": 0.4334,
"step": 222
},
{
"Batch Mean": -0.05706942081451416,
"accuracy": 0.8046875,
"epoch": 0.555,
"step": 222
},
{
"epoch": 0.5575,
"grad_norm": 5.802476406097412,
"learning_rate": 2.328947368421053e-06,
"loss": 0.4048,
"step": 223
},
{
"Batch Mean": -0.35072553157806396,
"accuracy": 0.84375,
"epoch": 0.5575,
"step": 223
},
{
"epoch": 0.56,
"grad_norm": 6.107337474822998,
"learning_rate": 2.3157894736842105e-06,
"loss": 0.3219,
"step": 224
},
{
"Batch Mean": -0.7369738817214966,
"accuracy": 0.7734375,
"epoch": 0.56,
"step": 224
},
{
"epoch": 0.5625,
"grad_norm": 9.2208890914917,
"learning_rate": 2.3026315789473684e-06,
"loss": 0.4371,
"step": 225
},
{
"Batch Mean": -0.1235361099243164,
"accuracy": 0.8515625,
"epoch": 0.5625,
"step": 225
},
{
"epoch": 0.565,
"grad_norm": 5.8800506591796875,
"learning_rate": 2.2894736842105263e-06,
"loss": 0.3281,
"step": 226
},
{
"Batch Mean": -0.03460812568664551,
"accuracy": 0.8671875,
"epoch": 0.565,
"step": 226
},
{
"epoch": 0.5675,
"grad_norm": 6.368298053741455,
"learning_rate": 2.2763157894736847e-06,
"loss": 0.2906,
"step": 227
},
{
"Batch Mean": 0.3625354766845703,
"accuracy": 0.7265625,
"epoch": 0.5675,
"step": 227
},
{
"epoch": 0.57,
"grad_norm": 8.628252983093262,
"learning_rate": 2.2631578947368426e-06,
"loss": 0.5205,
"step": 228
},
{
"Batch Mean": 0.08192723989486694,
"accuracy": 0.7578125,
"epoch": 0.57,
"step": 228
},
{
"epoch": 0.5725,
"grad_norm": 6.3980512619018555,
"learning_rate": 2.25e-06,
"loss": 0.4337,
"step": 229
},
{
"Batch Mean": -0.2664133310317993,
"accuracy": 0.8046875,
"epoch": 0.5725,
"step": 229
},
{
"epoch": 0.575,
"grad_norm": 6.476391315460205,
"learning_rate": 2.236842105263158e-06,
"loss": 0.3604,
"step": 230
},
{
"Batch Mean": 0.4527122974395752,
"accuracy": 0.796875,
"epoch": 0.575,
"step": 230
},
{
"epoch": 0.5775,
"grad_norm": 8.095312118530273,
"learning_rate": 2.223684210526316e-06,
"loss": 0.4297,
"step": 231
},
{
"Batch Mean": -0.08241510391235352,
"accuracy": 0.8203125,
"epoch": 0.5775,
"step": 231
},
{
"epoch": 0.58,
"grad_norm": 5.840095043182373,
"learning_rate": 2.2105263157894738e-06,
"loss": 0.3712,
"step": 232
},
{
"Batch Mean": 0.04206228256225586,
"accuracy": 0.8046875,
"epoch": 0.58,
"step": 232
},
{
"epoch": 0.5825,
"grad_norm": 5.922440528869629,
"learning_rate": 2.1973684210526317e-06,
"loss": 0.3937,
"step": 233
},
{
"Batch Mean": -0.1112813949584961,
"accuracy": 0.8671875,
"epoch": 0.5825,
"step": 233
},
{
"epoch": 0.585,
"grad_norm": 4.804201126098633,
"learning_rate": 2.1842105263157896e-06,
"loss": 0.2852,
"step": 234
},
{
"Batch Mean": 0.18291091918945312,
"accuracy": 0.7890625,
"epoch": 0.585,
"step": 234
},
{
"epoch": 0.5875,
"grad_norm": 6.333522796630859,
"learning_rate": 2.1710526315789475e-06,
"loss": 0.3854,
"step": 235
},
{
"Batch Mean": 0.20920276641845703,
"accuracy": 0.8671875,
"epoch": 0.5875,
"step": 235
},
{
"epoch": 0.59,
"grad_norm": 5.435009479522705,
"learning_rate": 2.1578947368421054e-06,
"loss": 0.3728,
"step": 236
},
{
"Batch Mean": -0.10450601577758789,
"accuracy": 0.8515625,
"epoch": 0.59,
"step": 236
},
{
"epoch": 0.5925,
"grad_norm": 4.914745330810547,
"learning_rate": 2.1447368421052633e-06,
"loss": 0.3217,
"step": 237
},
{
"Batch Mean": -0.7518494129180908,
"accuracy": 0.8125,
"epoch": 0.5925,
"step": 237
},
{
"epoch": 0.595,
"grad_norm": 9.58519172668457,
"learning_rate": 2.1315789473684212e-06,
"loss": 0.3795,
"step": 238
},
{
"Batch Mean": 0.0718802809715271,
"accuracy": 0.859375,
"epoch": 0.595,
"step": 238
},
{
"epoch": 0.5975,
"grad_norm": 5.099031448364258,
"learning_rate": 2.118421052631579e-06,
"loss": 0.3308,
"step": 239
},
{
"Batch Mean": 0.4419422149658203,
"accuracy": 0.8203125,
"epoch": 0.5975,
"step": 239
},
{
"epoch": 0.6,
"grad_norm": 11.365894317626953,
"learning_rate": 2.105263157894737e-06,
"loss": 0.418,
"step": 240
},
{
"Batch Mean": 0.03586006164550781,
"accuracy": 0.8203125,
"epoch": 0.6,
"step": 240
},
{
"epoch": 0.6025,
"grad_norm": 5.9789958000183105,
"learning_rate": 2.092105263157895e-06,
"loss": 0.4069,
"step": 241
},
{
"Batch Mean": 0.29011332988739014,
"accuracy": 0.8359375,
"epoch": 0.6025,
"step": 241
},
{
"epoch": 0.605,
"grad_norm": 5.794040203094482,
"learning_rate": 2.078947368421053e-06,
"loss": 0.3484,
"step": 242
},
{
"Batch Mean": -0.5499505996704102,
"accuracy": 0.8359375,
"epoch": 0.605,
"step": 242
},
{
"epoch": 0.6075,
"grad_norm": 8.262065887451172,
"learning_rate": 2.0657894736842108e-06,
"loss": 0.31,
"step": 243
},
{
"Batch Mean": 0.24885821342468262,
"accuracy": 0.828125,
"epoch": 0.6075,
"step": 243
},
{
"epoch": 0.61,
"grad_norm": 6.190489292144775,
"learning_rate": 2.0526315789473687e-06,
"loss": 0.3916,
"step": 244
},
{
"Batch Mean": -0.26743316650390625,
"accuracy": 0.8203125,
"epoch": 0.61,
"step": 244
},
{
"epoch": 0.6125,
"grad_norm": 6.024500846862793,
"learning_rate": 2.0394736842105266e-06,
"loss": 0.3776,
"step": 245
},
{
"Batch Mean": -0.23404550552368164,
"accuracy": 0.7421875,
"epoch": 0.6125,
"step": 245
},
{
"epoch": 0.615,
"grad_norm": 7.985998630523682,
"learning_rate": 2.026315789473684e-06,
"loss": 0.534,
"step": 246
},
{
"Batch Mean": -0.15702039003372192,
"accuracy": 0.8359375,
"epoch": 0.615,
"step": 246
},
{
"epoch": 0.6175,
"grad_norm": 5.515955924987793,
"learning_rate": 2.013157894736842e-06,
"loss": 0.3521,
"step": 247
},
{
"Batch Mean": 0.08024978637695312,
"accuracy": 0.8125,
"epoch": 0.6175,
"step": 247
},
{
"epoch": 0.62,
"grad_norm": 5.263201713562012,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.3411,
"step": 248
},
{
"Batch Mean": 0.4128882884979248,
"accuracy": 0.8203125,
"epoch": 0.62,
"step": 248
},
{
"epoch": 0.6225,
"grad_norm": 6.7587103843688965,
"learning_rate": 1.9868421052631582e-06,
"loss": 0.415,
"step": 249
},
{
"Batch Mean": 0.2651102542877197,
"accuracy": 0.8046875,
"epoch": 0.6225,
"step": 249
},
{
"epoch": 0.625,
"grad_norm": 5.891829013824463,
"learning_rate": 1.973684210526316e-06,
"loss": 0.3854,
"step": 250
},
{
"Batch Mean": 0.06268704682588577,
"accuracy": 0.859375,
"epoch": 0.625,
"step": 250
},
{
"epoch": 0.6275,
"grad_norm": 4.907006740570068,
"learning_rate": 1.9605263157894736e-06,
"loss": 0.3142,
"step": 251
},
{
"Batch Mean": 0.009827375411987305,
"accuracy": 0.84375,
"epoch": 0.6275,
"step": 251
},
{
"epoch": 0.63,
"grad_norm": 5.065036296844482,
"learning_rate": 1.9473684210526315e-06,
"loss": 0.366,
"step": 252
},
{
"Batch Mean": -0.12906122207641602,
"accuracy": 0.859375,
"epoch": 0.63,
"step": 252
},
{
"epoch": 0.6325,
"grad_norm": 4.897366523742676,
"learning_rate": 1.9342105263157895e-06,
"loss": 0.3514,
"step": 253
},
{
"Batch Mean": -0.2323474884033203,
"accuracy": 0.8125,
"epoch": 0.6325,
"step": 253
},
{
"epoch": 0.635,
"grad_norm": 5.733254909515381,
"learning_rate": 1.9210526315789474e-06,
"loss": 0.3787,
"step": 254
},
{
"Batch Mean": -0.1317148208618164,
"accuracy": 0.75,
"epoch": 0.635,
"step": 254
},
{
"epoch": 0.6375,
"grad_norm": 6.474262714385986,
"learning_rate": 1.9078947368421057e-06,
"loss": 0.4739,
"step": 255
},
{
"Batch Mean": -0.0393298864364624,
"accuracy": 0.8203125,
"epoch": 0.6375,
"step": 255
},
{
"epoch": 0.64,
"grad_norm": 4.841159820556641,
"learning_rate": 1.8947368421052634e-06,
"loss": 0.3731,
"step": 256
},
{
"Batch Mean": 0.10869312286376953,
"accuracy": 0.8359375,
"epoch": 0.64,
"step": 256
},
{
"epoch": 0.6425,
"grad_norm": 5.361686706542969,
"learning_rate": 1.8815789473684213e-06,
"loss": 0.3779,
"step": 257
},
{
"Batch Mean": 0.0021914541721343994,
"accuracy": 0.8125,
"epoch": 0.6425,
"step": 257
},
{
"epoch": 0.645,
"grad_norm": 4.836275100708008,
"learning_rate": 1.868421052631579e-06,
"loss": 0.3551,
"step": 258
},
{
"Batch Mean": -0.3041563034057617,
"accuracy": 0.8125,
"epoch": 0.645,
"step": 258
},
{
"epoch": 0.6475,
"grad_norm": 5.775440692901611,
"learning_rate": 1.855263157894737e-06,
"loss": 0.3664,
"step": 259
},
{
"Batch Mean": 0.22134876251220703,
"accuracy": 0.8515625,
"epoch": 0.6475,
"step": 259
},
{
"epoch": 0.65,
"grad_norm": 5.312914848327637,
"learning_rate": 1.8421052631578948e-06,
"loss": 0.3658,
"step": 260
},
{
"Batch Mean": -0.03800356388092041,
"accuracy": 0.8671875,
"epoch": 0.65,
"step": 260
},
{
"epoch": 0.6525,
"grad_norm": 4.872611045837402,
"learning_rate": 1.828947368421053e-06,
"loss": 0.3535,
"step": 261
},
{
"Batch Mean": 0.17658138275146484,
"accuracy": 0.84375,
"epoch": 0.6525,
"step": 261
},
{
"epoch": 0.655,
"grad_norm": 5.4620280265808105,
"learning_rate": 1.8157894736842109e-06,
"loss": 0.3634,
"step": 262
},
{
"Batch Mean": -0.46340298652648926,
"accuracy": 0.84375,
"epoch": 0.655,
"step": 262
},
{
"epoch": 0.6575,
"grad_norm": 7.282100200653076,
"learning_rate": 1.8026315789473685e-06,
"loss": 0.4022,
"step": 263
},
{
"Batch Mean": -0.3810725212097168,
"accuracy": 0.8359375,
"epoch": 0.6575,
"step": 263
},
{
"epoch": 0.66,
"grad_norm": 6.1263227462768555,
"learning_rate": 1.7894736842105265e-06,
"loss": 0.3521,
"step": 264
},
{
"Batch Mean": -0.06390047073364258,
"accuracy": 0.8515625,
"epoch": 0.66,
"step": 264
},
{
"epoch": 0.6625,
"grad_norm": 5.438608646392822,
"learning_rate": 1.7763157894736844e-06,
"loss": 0.3473,
"step": 265
},
{
"Batch Mean": 0.591982364654541,
"accuracy": 0.8125,
"epoch": 0.6625,
"step": 265
},
{
"epoch": 0.665,
"grad_norm": 8.58743667602539,
"learning_rate": 1.7631578947368423e-06,
"loss": 0.4313,
"step": 266
},
{
"Batch Mean": 0.41609740257263184,
"accuracy": 0.8671875,
"epoch": 0.665,
"step": 266
},
{
"epoch": 0.6675,
"grad_norm": 6.497260570526123,
"learning_rate": 1.75e-06,
"loss": 0.289,
"step": 267
},
{
"Batch Mean": 0.10641050338745117,
"accuracy": 0.8203125,
"epoch": 0.6675,
"step": 267
},
{
"epoch": 0.67,
"grad_norm": 6.662869930267334,
"learning_rate": 1.736842105263158e-06,
"loss": 0.3987,
"step": 268
},
{
"Batch Mean": 0.5242280960083008,
"accuracy": 0.8125,
"epoch": 0.67,
"step": 268
},
{
"epoch": 0.6725,
"grad_norm": 7.127610206604004,
"learning_rate": 1.723684210526316e-06,
"loss": 0.3469,
"step": 269
},
{
"Batch Mean": 0.22644498944282532,
"accuracy": 0.875,
"epoch": 0.6725,
"step": 269
},
{
"epoch": 0.675,
"grad_norm": 5.849222183227539,
"learning_rate": 1.710526315789474e-06,
"loss": 0.3443,
"step": 270
},
{
"Batch Mean": -0.5374040603637695,
"accuracy": 0.875,
"epoch": 0.675,
"step": 270
},
{
"epoch": 0.6775,
"grad_norm": 7.3331990242004395,
"learning_rate": 1.6973684210526318e-06,
"loss": 0.3233,
"step": 271
},
{
"Batch Mean": -0.5392742156982422,
"accuracy": 0.859375,
"epoch": 0.6775,
"step": 271
},
{
"epoch": 0.68,
"grad_norm": 8.307405471801758,
"learning_rate": 1.6842105263157895e-06,
"loss": 0.3344,
"step": 272
},
{
"Batch Mean": -0.2637630105018616,
"accuracy": 0.8046875,
"epoch": 0.68,
"step": 272
},
{
"epoch": 0.6825,
"grad_norm": 6.486652374267578,
"learning_rate": 1.6710526315789474e-06,
"loss": 0.3553,
"step": 273
},
{
"Batch Mean": -0.19452619552612305,
"accuracy": 0.8046875,
"epoch": 0.6825,
"step": 273
},
{
"epoch": 0.685,
"grad_norm": 5.73057222366333,
"learning_rate": 1.6578947368421053e-06,
"loss": 0.3737,
"step": 274
},
{
"Batch Mean": 0.2827339172363281,
"accuracy": 0.828125,
"epoch": 0.685,
"step": 274
},
{
"epoch": 0.6875,
"grad_norm": 6.780818939208984,
"learning_rate": 1.6447368421052635e-06,
"loss": 0.3521,
"step": 275
},
{
"Batch Mean": 0.45183420181274414,
"accuracy": 0.84375,
"epoch": 0.6875,
"step": 275
},
{
"epoch": 0.69,
"grad_norm": 8.210237503051758,
"learning_rate": 1.6315789473684212e-06,
"loss": 0.4362,
"step": 276
},
{
"Batch Mean": 0.2845495939254761,
"accuracy": 0.765625,
"epoch": 0.69,
"step": 276
},
{
"epoch": 0.6925,
"grad_norm": 7.878791332244873,
"learning_rate": 1.618421052631579e-06,
"loss": 0.4695,
"step": 277
},
{
"Batch Mean": 0.24765238165855408,
"accuracy": 0.8359375,
"epoch": 0.6925,
"step": 277
},
{
"epoch": 0.695,
"grad_norm": 6.432671070098877,
"learning_rate": 1.605263157894737e-06,
"loss": 0.3256,
"step": 278
},
{
"Batch Mean": -0.01769864559173584,
"accuracy": 0.8046875,
"epoch": 0.695,
"step": 278
},
{
"epoch": 0.6975,
"grad_norm": 6.705868721008301,
"learning_rate": 1.5921052631578949e-06,
"loss": 0.3824,
"step": 279
},
{
"Batch Mean": -0.5642671585083008,
"accuracy": 0.8203125,
"epoch": 0.6975,
"step": 279
},
{
"epoch": 0.7,
"grad_norm": 7.755029201507568,
"learning_rate": 1.5789473684210526e-06,
"loss": 0.349,
"step": 280
},
{
"Batch Mean": -0.49985718727111816,
"accuracy": 0.8125,
"epoch": 0.7,
"step": 280
},
{
"epoch": 0.7025,
"grad_norm": 8.260653495788574,
"learning_rate": 1.5657894736842105e-06,
"loss": 0.438,
"step": 281
},
{
"Batch Mean": -0.30716943740844727,
"accuracy": 0.8515625,
"epoch": 0.7025,
"step": 281
},
{
"epoch": 0.705,
"grad_norm": 6.337352752685547,
"learning_rate": 1.5526315789473686e-06,
"loss": 0.3346,
"step": 282
},
{
"Batch Mean": 0.08174550533294678,
"accuracy": 0.828125,
"epoch": 0.705,
"step": 282
},
{
"epoch": 0.7075,
"grad_norm": 5.9810380935668945,
"learning_rate": 1.5394736842105265e-06,
"loss": 0.3852,
"step": 283
},
{
"Batch Mean": 0.15669594705104828,
"accuracy": 0.8515625,
"epoch": 0.7075,
"step": 283
},
{
"epoch": 0.71,
"grad_norm": 5.605470657348633,
"learning_rate": 1.5263157894736844e-06,
"loss": 0.3186,
"step": 284
},
{
"Batch Mean": -0.0030188560485839844,
"accuracy": 0.8515625,
"epoch": 0.71,
"step": 284
},
{
"epoch": 0.7125,
"grad_norm": 5.514898300170898,
"learning_rate": 1.5131578947368421e-06,
"loss": 0.3126,
"step": 285
},
{
"Batch Mean": 0.1930232048034668,
"accuracy": 0.84375,
"epoch": 0.7125,
"step": 285
},
{
"epoch": 0.715,
"grad_norm": 6.415936470031738,
"learning_rate": 1.5e-06,
"loss": 0.3649,
"step": 286
},
{
"Batch Mean": 0.2962369918823242,
"accuracy": 0.8671875,
"epoch": 0.715,
"step": 286
},
{
"epoch": 0.7175,
"grad_norm": 6.742696285247803,
"learning_rate": 1.486842105263158e-06,
"loss": 0.3298,
"step": 287
},
{
"Batch Mean": -0.0600665807723999,
"accuracy": 0.78125,
"epoch": 0.7175,
"step": 287
},
{
"epoch": 0.72,
"grad_norm": 5.89642858505249,
"learning_rate": 1.4736842105263159e-06,
"loss": 0.4032,
"step": 288
},
{
"Batch Mean": -0.012648344039916992,
"accuracy": 0.890625,
"epoch": 0.72,
"step": 288
},
{
"epoch": 0.7225,
"grad_norm": 4.926860809326172,
"learning_rate": 1.460526315789474e-06,
"loss": 0.2753,
"step": 289
},
{
"Batch Mean": -0.09966588020324707,
"accuracy": 0.8359375,
"epoch": 0.7225,
"step": 289
},
{
"epoch": 0.725,
"grad_norm": 6.0810394287109375,
"learning_rate": 1.4473684210526317e-06,
"loss": 0.3398,
"step": 290
},
{
"Batch Mean": 0.2990548014640808,
"accuracy": 0.8125,
"epoch": 0.725,
"step": 290
},
{
"epoch": 0.7275,
"grad_norm": 6.704870223999023,
"learning_rate": 1.4342105263157896e-06,
"loss": 0.3789,
"step": 291
},
{
"Batch Mean": -0.30427980422973633,
"accuracy": 0.78125,
"epoch": 0.7275,
"step": 291
},
{
"epoch": 0.73,
"grad_norm": 7.633101940155029,
"learning_rate": 1.4210526315789475e-06,
"loss": 0.4531,
"step": 292
},
{
"Batch Mean": -0.5276459455490112,
"accuracy": 0.765625,
"epoch": 0.73,
"step": 292
},
{
"epoch": 0.7325,
"grad_norm": 7.3620829582214355,
"learning_rate": 1.4078947368421054e-06,
"loss": 0.4443,
"step": 293
},
{
"Batch Mean": 0.03517109155654907,
"accuracy": 0.8515625,
"epoch": 0.7325,
"step": 293
},
{
"epoch": 0.735,
"grad_norm": 5.785881996154785,
"learning_rate": 1.394736842105263e-06,
"loss": 0.314,
"step": 294
},
{
"Batch Mean": 0.1681404411792755,
"accuracy": 0.8125,
"epoch": 0.735,
"step": 294
},
{
"epoch": 0.7375,
"grad_norm": 6.820896148681641,
"learning_rate": 1.3815789473684212e-06,
"loss": 0.3912,
"step": 295
},
{
"Batch Mean": -0.14338159561157227,
"accuracy": 0.84375,
"epoch": 0.7375,
"step": 295
},
{
"epoch": 0.74,
"grad_norm": 5.598686695098877,
"learning_rate": 1.3684210526315791e-06,
"loss": 0.3388,
"step": 296
},
{
"Batch Mean": 0.2691183090209961,
"accuracy": 0.8515625,
"epoch": 0.74,
"step": 296
},
{
"epoch": 0.7425,
"grad_norm": 6.6116204261779785,
"learning_rate": 1.355263157894737e-06,
"loss": 0.3936,
"step": 297
},
{
"Batch Mean": 0.0850672721862793,
"accuracy": 0.8515625,
"epoch": 0.7425,
"step": 297
},
{
"epoch": 0.745,
"grad_norm": 5.332149028778076,
"learning_rate": 1.342105263157895e-06,
"loss": 0.3688,
"step": 298
},
{
"Batch Mean": 0.010596275329589844,
"accuracy": 0.8125,
"epoch": 0.745,
"step": 298
},
{
"epoch": 0.7475,
"grad_norm": 5.3448333740234375,
"learning_rate": 1.3289473684210526e-06,
"loss": 0.3777,
"step": 299
},
{
"Batch Mean": 0.0567474365234375,
"accuracy": 0.875,
"epoch": 0.7475,
"step": 299
},
{
"epoch": 0.75,
"grad_norm": 5.18348503112793,
"learning_rate": 1.3157894736842106e-06,
"loss": 0.3376,
"step": 300
},
{
"Batch Mean": 0.3150123357772827,
"accuracy": 0.8671875,
"epoch": 0.75,
"step": 300
},
{
"epoch": 0.7525,
"grad_norm": 5.695895671844482,
"learning_rate": 1.3026315789473685e-06,
"loss": 0.3239,
"step": 301
},
{
"Batch Mean": -0.2845635414123535,
"accuracy": 0.84375,
"epoch": 0.7525,
"step": 301
},
{
"epoch": 0.755,
"grad_norm": 5.938024997711182,
"learning_rate": 1.2894736842105266e-06,
"loss": 0.3474,
"step": 302
},
{
"Batch Mean": -0.23464512825012207,
"accuracy": 0.828125,
"epoch": 0.755,
"step": 302
},
{
"epoch": 0.7575,
"grad_norm": 6.142772674560547,
"learning_rate": 1.2763157894736845e-06,
"loss": 0.3871,
"step": 303
},
{
"Batch Mean": -0.227769136428833,
"accuracy": 0.8359375,
"epoch": 0.7575,
"step": 303
},
{
"epoch": 0.76,
"grad_norm": 5.593533039093018,
"learning_rate": 1.2631578947368422e-06,
"loss": 0.3141,
"step": 304
},
{
"Batch Mean": 0.30762624740600586,
"accuracy": 0.875,
"epoch": 0.76,
"step": 304
},
{
"epoch": 0.7625,
"grad_norm": 5.888352870941162,
"learning_rate": 1.25e-06,
"loss": 0.2934,
"step": 305
},
{
"Batch Mean": 0.0720512866973877,
"accuracy": 0.828125,
"epoch": 0.7625,
"step": 305
},
{
"epoch": 0.765,
"grad_norm": 5.541831970214844,
"learning_rate": 1.236842105263158e-06,
"loss": 0.377,
"step": 306
},
{
"Batch Mean": 0.5506090521812439,
"accuracy": 0.78125,
"epoch": 0.765,
"step": 306
},
{
"epoch": 0.7675,
"grad_norm": 8.604269981384277,
"learning_rate": 1.223684210526316e-06,
"loss": 0.4982,
"step": 307
},
{
"Batch Mean": 0.12272977828979492,
"accuracy": 0.828125,
"epoch": 0.7675,
"step": 307
},
{
"epoch": 0.77,
"grad_norm": 5.901643753051758,
"learning_rate": 1.2105263157894738e-06,
"loss": 0.3943,
"step": 308
},
{
"Batch Mean": -0.10920721292495728,
"accuracy": 0.8125,
"epoch": 0.77,
"step": 308
},
{
"epoch": 0.7725,
"grad_norm": 5.373671054840088,
"learning_rate": 1.1973684210526317e-06,
"loss": 0.3375,
"step": 309
},
{
"Batch Mean": -0.25300323963165283,
"accuracy": 0.8515625,
"epoch": 0.7725,
"step": 309
},
{
"epoch": 0.775,
"grad_norm": 5.715999126434326,
"learning_rate": 1.1842105263157894e-06,
"loss": 0.3147,
"step": 310
},
{
"Batch Mean": 0.27323639392852783,
"accuracy": 0.8203125,
"epoch": 0.775,
"step": 310
},
{
"epoch": 0.7775,
"grad_norm": 5.806972503662109,
"learning_rate": 1.1710526315789476e-06,
"loss": 0.3623,
"step": 311
},
{
"Batch Mean": -0.370880126953125,
"accuracy": 0.8203125,
"epoch": 0.7775,
"step": 311
},
{
"epoch": 0.78,
"grad_norm": 6.533487319946289,
"learning_rate": 1.1578947368421053e-06,
"loss": 0.3421,
"step": 312
},
{
"Batch Mean": -0.19013309478759766,
"accuracy": 0.84375,
"epoch": 0.78,
"step": 312
},
{
"epoch": 0.7825,
"grad_norm": 6.225650787353516,
"learning_rate": 1.1447368421052632e-06,
"loss": 0.3852,
"step": 313
},
{
"Batch Mean": 0.035245418548583984,
"accuracy": 0.90625,
"epoch": 0.7825,
"step": 313
},
{
"epoch": 0.785,
"grad_norm": 4.540152549743652,
"learning_rate": 1.1315789473684213e-06,
"loss": 0.2876,
"step": 314
},
{
"Batch Mean": -0.015190362930297852,
"accuracy": 0.796875,
"epoch": 0.785,
"step": 314
},
{
"epoch": 0.7875,
"grad_norm": 6.136155605316162,
"learning_rate": 1.118421052631579e-06,
"loss": 0.4072,
"step": 315
},
{
"Batch Mean": 0.13713574409484863,
"accuracy": 0.8203125,
"epoch": 0.7875,
"step": 315
},
{
"epoch": 0.79,
"grad_norm": 6.242608070373535,
"learning_rate": 1.1052631578947369e-06,
"loss": 0.3796,
"step": 316
},
{
"Batch Mean": 0.03120565414428711,
"accuracy": 0.8828125,
"epoch": 0.79,
"step": 316
},
{
"epoch": 0.7925,
"grad_norm": 5.207064628601074,
"learning_rate": 1.0921052631578948e-06,
"loss": 0.3056,
"step": 317
},
{
"Batch Mean": 0.25341320037841797,
"accuracy": 0.828125,
"epoch": 0.7925,
"step": 317
},
{
"epoch": 0.795,
"grad_norm": 7.0822882652282715,
"learning_rate": 1.0789473684210527e-06,
"loss": 0.3858,
"step": 318
},
{
"Batch Mean": -0.12684790790081024,
"accuracy": 0.8515625,
"epoch": 0.795,
"step": 318
},
{
"epoch": 0.7975,
"grad_norm": 7.24815559387207,
"learning_rate": 1.0657894736842106e-06,
"loss": 0.429,
"step": 319
},
{
"Batch Mean": 0.04200387001037598,
"accuracy": 0.828125,
"epoch": 0.7975,
"step": 319
},
{
"epoch": 0.8,
"grad_norm": 6.244017601013184,
"learning_rate": 1.0526315789473685e-06,
"loss": 0.3508,
"step": 320
},
{
"Batch Mean": 0.11800074577331543,
"accuracy": 0.84375,
"epoch": 0.8,
"step": 320
},
{
"epoch": 0.8025,
"grad_norm": 6.847427845001221,
"learning_rate": 1.0394736842105264e-06,
"loss": 0.3942,
"step": 321
},
{
"Batch Mean": 0.21634423732757568,
"accuracy": 0.875,
"epoch": 0.8025,
"step": 321
},
{
"epoch": 0.805,
"grad_norm": 5.618517875671387,
"learning_rate": 1.0263157894736843e-06,
"loss": 0.3045,
"step": 322
},
{
"Batch Mean": -0.12757059931755066,
"accuracy": 0.828125,
"epoch": 0.805,
"step": 322
},
{
"epoch": 0.8075,
"grad_norm": 6.686208248138428,
"learning_rate": 1.013157894736842e-06,
"loss": 0.4245,
"step": 323
},
{
"Batch Mean": -0.531203031539917,
"accuracy": 0.8515625,
"epoch": 0.8075,
"step": 323
},
{
"epoch": 0.81,
"grad_norm": 7.686892032623291,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.3589,
"step": 324
},
{
"Batch Mean": -0.38658928871154785,
"accuracy": 0.8671875,
"epoch": 0.81,
"step": 324
},
{
"epoch": 0.8125,
"grad_norm": 6.230345249176025,
"learning_rate": 9.86842105263158e-07,
"loss": 0.307,
"step": 325
},
{
"Batch Mean": -0.006134852766990662,
"accuracy": 0.8203125,
"epoch": 0.8125,
"step": 325
},
{
"epoch": 0.815,
"grad_norm": 5.745806694030762,
"learning_rate": 9.736842105263158e-07,
"loss": 0.368,
"step": 326
},
{
"Batch Mean": 0.13355731964111328,
"accuracy": 0.8515625,
"epoch": 0.815,
"step": 326
},
{
"epoch": 0.8175,
"grad_norm": 5.20259952545166,
"learning_rate": 9.605263157894737e-07,
"loss": 0.319,
"step": 327
},
{
"Batch Mean": -0.030962467193603516,
"accuracy": 0.8203125,
"epoch": 0.8175,
"step": 327
},
{
"epoch": 0.82,
"grad_norm": 6.881910800933838,
"learning_rate": 9.473684210526317e-07,
"loss": 0.3913,
"step": 328
},
{
"Batch Mean": -0.2659430503845215,
"accuracy": 0.875,
"epoch": 0.82,
"step": 328
},
{
"epoch": 0.8225,
"grad_norm": 5.749542713165283,
"learning_rate": 9.342105263157895e-07,
"loss": 0.3125,
"step": 329
},
{
"Batch Mean": -0.022816181182861328,
"accuracy": 0.8515625,
"epoch": 0.8225,
"step": 329
},
{
"epoch": 0.825,
"grad_norm": 5.963510990142822,
"learning_rate": 9.210526315789474e-07,
"loss": 0.3554,
"step": 330
},
{
"Batch Mean": 0.1255784034729004,
"accuracy": 0.8203125,
"epoch": 0.825,
"step": 330
},
{
"epoch": 0.8275,
"grad_norm": 6.184539318084717,
"learning_rate": 9.078947368421054e-07,
"loss": 0.3816,
"step": 331
},
{
"Batch Mean": 0.34945011138916016,
"accuracy": 0.8515625,
"epoch": 0.8275,
"step": 331
},
{
"epoch": 0.83,
"grad_norm": 6.493666172027588,
"learning_rate": 8.947368421052632e-07,
"loss": 0.3897,
"step": 332
},
{
"Batch Mean": 0.5187950730323792,
"accuracy": 0.8046875,
"epoch": 0.83,
"step": 332
},
{
"epoch": 0.8325,
"grad_norm": 8.34493350982666,
"learning_rate": 8.815789473684211e-07,
"loss": 0.4744,
"step": 333
},
{
"Batch Mean": 0.14876115322113037,
"accuracy": 0.859375,
"epoch": 0.8325,
"step": 333
},
{
"epoch": 0.835,
"grad_norm": 5.86116886138916,
"learning_rate": 8.68421052631579e-07,
"loss": 0.346,
"step": 334
},
{
"Batch Mean": 0.2181391716003418,
"accuracy": 0.8828125,
"epoch": 0.835,
"step": 334
},
{
"epoch": 0.8375,
"grad_norm": 5.708371162414551,
"learning_rate": 8.55263157894737e-07,
"loss": 0.3156,
"step": 335
},
{
"Batch Mean": -0.09983713924884796,
"accuracy": 0.8046875,
"epoch": 0.8375,
"step": 335
},
{
"epoch": 0.84,
"grad_norm": 6.523188591003418,
"learning_rate": 8.421052631578948e-07,
"loss": 0.4152,
"step": 336
},
{
"Batch Mean": -0.3769869804382324,
"accuracy": 0.8125,
"epoch": 0.84,
"step": 336
},
{
"epoch": 0.8425,
"grad_norm": 6.4607038497924805,
"learning_rate": 8.289473684210527e-07,
"loss": 0.3696,
"step": 337
},
{
"Batch Mean": -0.4478006362915039,
"accuracy": 0.8984375,
"epoch": 0.8425,
"step": 337
},
{
"epoch": 0.845,
"grad_norm": 6.37753438949585,
"learning_rate": 8.157894736842106e-07,
"loss": 0.294,
"step": 338
},
{
"Batch Mean": -0.027586758136749268,
"accuracy": 0.8515625,
"epoch": 0.845,
"step": 338
},
{
"epoch": 0.8475,
"grad_norm": 5.023904323577881,
"learning_rate": 8.026315789473685e-07,
"loss": 0.3518,
"step": 339
},
{
"Batch Mean": 0.20917105674743652,
"accuracy": 0.8515625,
"epoch": 0.8475,
"step": 339
},
{
"epoch": 0.85,
"grad_norm": 6.015613555908203,
"learning_rate": 7.894736842105263e-07,
"loss": 0.3785,
"step": 340
},
{
"Batch Mean": 0.3118593096733093,
"accuracy": 0.8046875,
"epoch": 0.85,
"step": 340
},
{
"epoch": 0.8525,
"grad_norm": 6.1103515625,
"learning_rate": 7.763157894736843e-07,
"loss": 0.3737,
"step": 341
},
{
"Batch Mean": -0.126387357711792,
"accuracy": 0.890625,
"epoch": 0.8525,
"step": 341
},
{
"epoch": 0.855,
"grad_norm": 5.179422855377197,
"learning_rate": 7.631578947368422e-07,
"loss": 0.2561,
"step": 342
},
{
"Batch Mean": -0.09555430710315704,
"accuracy": 0.7890625,
"epoch": 0.855,
"step": 342
},
{
"epoch": 0.8575,
"grad_norm": 6.1140360832214355,
"learning_rate": 7.5e-07,
"loss": 0.3845,
"step": 343
},
{
"Batch Mean": 0.18266582489013672,
"accuracy": 0.8359375,
"epoch": 0.8575,
"step": 343
},
{
"epoch": 0.86,
"grad_norm": 5.9565887451171875,
"learning_rate": 7.368421052631579e-07,
"loss": 0.3732,
"step": 344
},
{
"Batch Mean": -0.2062819004058838,
"accuracy": 0.8359375,
"epoch": 0.86,
"step": 344
},
{
"epoch": 0.8625,
"grad_norm": 5.543845176696777,
"learning_rate": 7.236842105263158e-07,
"loss": 0.3262,
"step": 345
},
{
"Batch Mean": -0.1675710678100586,
"accuracy": 0.84375,
"epoch": 0.8625,
"step": 345
},
{
"epoch": 0.865,
"grad_norm": 5.6085524559021,
"learning_rate": 7.105263157894737e-07,
"loss": 0.3539,
"step": 346
},
{
"Batch Mean": 0.1928102970123291,
"accuracy": 0.765625,
"epoch": 0.865,
"step": 346
},
{
"epoch": 0.8675,
"grad_norm": 7.385869026184082,
"learning_rate": 6.973684210526316e-07,
"loss": 0.5133,
"step": 347
},
{
"Batch Mean": -0.34415435791015625,
"accuracy": 0.828125,
"epoch": 0.8675,
"step": 347
},
{
"epoch": 0.87,
"grad_norm": 6.721138954162598,
"learning_rate": 6.842105263157896e-07,
"loss": 0.3757,
"step": 348
},
{
"Batch Mean": -0.07728886604309082,
"accuracy": 0.8515625,
"epoch": 0.87,
"step": 348
},
{
"epoch": 0.8725,
"grad_norm": 5.331783294677734,
"learning_rate": 6.710526315789475e-07,
"loss": 0.3591,
"step": 349
},
{
"Batch Mean": -0.0012063980102539062,
"accuracy": 0.8359375,
"epoch": 0.8725,
"step": 349
},
{
"epoch": 0.875,
"grad_norm": 5.8661956787109375,
"learning_rate": 6.578947368421053e-07,
"loss": 0.3931,
"step": 350
},
{
"Batch Mean": -0.13451290130615234,
"accuracy": 0.8359375,
"epoch": 0.875,
"step": 350
},
{
"epoch": 0.8775,
"grad_norm": 5.253983020782471,
"learning_rate": 6.447368421052633e-07,
"loss": 0.3574,
"step": 351
},
{
"Batch Mean": 0.041100263595581055,
"accuracy": 0.765625,
"epoch": 0.8775,
"step": 351
},
{
"epoch": 0.88,
"grad_norm": 6.266009330749512,
"learning_rate": 6.315789473684211e-07,
"loss": 0.4274,
"step": 352
},
{
"Batch Mean": 0.017366766929626465,
"accuracy": 0.875,
"epoch": 0.88,
"step": 352
},
{
"epoch": 0.8825,
"grad_norm": 5.189981460571289,
"learning_rate": 6.18421052631579e-07,
"loss": 0.3485,
"step": 353
},
{
"Batch Mean": 0.019828438758850098,
"accuracy": 0.84375,
"epoch": 0.8825,
"step": 353
},
{
"epoch": 0.885,
"grad_norm": 5.647443771362305,
"learning_rate": 6.052631578947369e-07,
"loss": 0.3626,
"step": 354
},
{
"Batch Mean": 0.48105788230895996,
"accuracy": 0.8671875,
"epoch": 0.885,
"step": 354
},
{
"epoch": 0.8875,
"grad_norm": 6.2056779861450195,
"learning_rate": 5.921052631578947e-07,
"loss": 0.286,
"step": 355
},
{
"Batch Mean": 0.32272911071777344,
"accuracy": 0.828125,
"epoch": 0.8875,
"step": 355
},
{
"epoch": 0.89,
"grad_norm": 6.290451526641846,
"learning_rate": 5.789473684210526e-07,
"loss": 0.3615,
"step": 356
},
{
"Batch Mean": -0.0491328239440918,
"accuracy": 0.8046875,
"epoch": 0.89,
"step": 356
},
{
"epoch": 0.8925,
"grad_norm": 5.412110805511475,
"learning_rate": 5.657894736842106e-07,
"loss": 0.3734,
"step": 357
},
{
"Batch Mean": 0.009822294116020203,
"accuracy": 0.8359375,
"epoch": 0.8925,
"step": 357
},
{
"epoch": 0.895,
"grad_norm": 5.308077812194824,
"learning_rate": 5.526315789473684e-07,
"loss": 0.3564,
"step": 358
},
{
"Batch Mean": 0.3762497901916504,
"accuracy": 0.8515625,
"epoch": 0.895,
"step": 358
},
{
"epoch": 0.8975,
"grad_norm": 6.504393577575684,
"learning_rate": 5.394736842105264e-07,
"loss": 0.3592,
"step": 359
},
{
"Batch Mean": 0.03059971332550049,
"accuracy": 0.8828125,
"epoch": 0.8975,
"step": 359
},
{
"epoch": 0.9,
"grad_norm": 5.554889678955078,
"learning_rate": 5.263157894736843e-07,
"loss": 0.3073,
"step": 360
},
{
"Batch Mean": -0.11461305618286133,
"accuracy": 0.8671875,
"epoch": 0.9,
"step": 360
},
{
"epoch": 0.9025,
"grad_norm": 5.032441139221191,
"learning_rate": 5.131578947368422e-07,
"loss": 0.3115,
"step": 361
},
{
"Batch Mean": 0.0866403579711914,
"accuracy": 0.8359375,
"epoch": 0.9025,
"step": 361
},
{
"epoch": 0.905,
"grad_norm": 5.989850044250488,
"learning_rate": 5.000000000000001e-07,
"loss": 0.3859,
"step": 362
},
{
"Batch Mean": -0.08459019660949707,
"accuracy": 0.90625,
"epoch": 0.905,
"step": 362
},
{
"epoch": 0.9075,
"grad_norm": 5.128976821899414,
"learning_rate": 4.868421052631579e-07,
"loss": 0.2912,
"step": 363
},
{
"Batch Mean": -0.10844220221042633,
"accuracy": 0.8515625,
"epoch": 0.9075,
"step": 363
},
{
"epoch": 0.91,
"grad_norm": 5.089634895324707,
"learning_rate": 4.7368421052631585e-07,
"loss": 0.3585,
"step": 364
},
{
"Batch Mean": 0.07012319564819336,
"accuracy": 0.8203125,
"epoch": 0.91,
"step": 364
},
{
"epoch": 0.9125,
"grad_norm": 5.767172813415527,
"learning_rate": 4.605263157894737e-07,
"loss": 0.3794,
"step": 365
},
{
"Batch Mean": 0.0005617141723632812,
"accuracy": 0.84375,
"epoch": 0.9125,
"step": 365
},
{
"epoch": 0.915,
"grad_norm": 5.322833061218262,
"learning_rate": 4.473684210526316e-07,
"loss": 0.3441,
"step": 366
},
{
"Batch Mean": -0.19214963912963867,
"accuracy": 0.8359375,
"epoch": 0.915,
"step": 366
},
{
"epoch": 0.9175,
"grad_norm": 6.23066520690918,
"learning_rate": 4.342105263157895e-07,
"loss": 0.3964,
"step": 367
},
{
"Batch Mean": -0.04979205131530762,
"accuracy": 0.875,
"epoch": 0.9175,
"step": 367
},
{
"epoch": 0.92,
"grad_norm": 5.3933868408203125,
"learning_rate": 4.210526315789474e-07,
"loss": 0.3192,
"step": 368
},
{
"Batch Mean": -0.09100174903869629,
"accuracy": 0.8203125,
"epoch": 0.92,
"step": 368
},
{
"epoch": 0.9225,
"grad_norm": 5.608683109283447,
"learning_rate": 4.078947368421053e-07,
"loss": 0.3278,
"step": 369
},
{
"Batch Mean": 0.14683294296264648,
"accuracy": 0.8359375,
"epoch": 0.9225,
"step": 369
},
{
"epoch": 0.925,
"grad_norm": 6.044548034667969,
"learning_rate": 3.9473684210526315e-07,
"loss": 0.3752,
"step": 370
},
{
"Batch Mean": 0.013927459716796875,
"accuracy": 0.828125,
"epoch": 0.925,
"step": 370
},
{
"epoch": 0.9275,
"grad_norm": 6.061974048614502,
"learning_rate": 3.815789473684211e-07,
"loss": 0.3899,
"step": 371
},
{
"Batch Mean": -0.047991156578063965,
"accuracy": 0.875,
"epoch": 0.9275,
"step": 371
},
{
"epoch": 0.93,
"grad_norm": 5.14998197555542,
"learning_rate": 3.6842105263157896e-07,
"loss": 0.3059,
"step": 372
},
{
"Batch Mean": 0.054487720131874084,
"accuracy": 0.8515625,
"epoch": 0.93,
"step": 372
},
{
"epoch": 0.9325,
"grad_norm": 5.5619215965271,
"learning_rate": 3.5526315789473687e-07,
"loss": 0.3292,
"step": 373
},
{
"Batch Mean": 0.013424873352050781,
"accuracy": 0.8046875,
"epoch": 0.9325,
"step": 373
},
{
"epoch": 0.935,
"grad_norm": 6.551247596740723,
"learning_rate": 3.421052631578948e-07,
"loss": 0.4177,
"step": 374
},
{
"Batch Mean": 0.05659294128417969,
"accuracy": 0.8671875,
"epoch": 0.935,
"step": 374
},
{
"epoch": 0.9375,
"grad_norm": 5.278907775878906,
"learning_rate": 3.2894736842105264e-07,
"loss": 0.2964,
"step": 375
},
{
"Batch Mean": 0.29555362462997437,
"accuracy": 0.8203125,
"epoch": 0.9375,
"step": 375
},
{
"epoch": 0.94,
"grad_norm": 6.179633617401123,
"learning_rate": 3.1578947368421055e-07,
"loss": 0.3513,
"step": 376
},
{
"Batch Mean": -0.18094325065612793,
"accuracy": 0.84375,
"epoch": 0.94,
"step": 376
},
{
"epoch": 0.9425,
"grad_norm": 7.224347114562988,
"learning_rate": 3.0263157894736846e-07,
"loss": 0.3757,
"step": 377
},
{
"Batch Mean": 0.05890762805938721,
"accuracy": 0.828125,
"epoch": 0.9425,
"step": 377
},
{
"epoch": 0.945,
"grad_norm": 5.712611675262451,
"learning_rate": 2.894736842105263e-07,
"loss": 0.3725,
"step": 378
},
{
"Batch Mean": 0.1418449878692627,
"accuracy": 0.7890625,
"epoch": 0.945,
"step": 378
},
{
"epoch": 0.9475,
"grad_norm": 6.286527156829834,
"learning_rate": 2.763157894736842e-07,
"loss": 0.3898,
"step": 379
},
{
"Batch Mean": 0.05738520622253418,
"accuracy": 0.859375,
"epoch": 0.9475,
"step": 379
},
{
"epoch": 0.95,
"grad_norm": 4.583769798278809,
"learning_rate": 2.6315789473684213e-07,
"loss": 0.3115,
"step": 380
},
{
"Batch Mean": -0.22460556030273438,
"accuracy": 0.8359375,
"epoch": 0.95,
"step": 380
},
{
"epoch": 0.9525,
"grad_norm": 5.6696977615356445,
"learning_rate": 2.5000000000000004e-07,
"loss": 0.366,
"step": 381
},
{
"Batch Mean": -0.3225395679473877,
"accuracy": 0.859375,
"epoch": 0.9525,
"step": 381
},
{
"epoch": 0.955,
"grad_norm": 6.212532997131348,
"learning_rate": 2.3684210526315792e-07,
"loss": 0.3249,
"step": 382
},
{
"Batch Mean": 0.14051055908203125,
"accuracy": 0.8203125,
"epoch": 0.955,
"step": 382
},
{
"epoch": 0.9575,
"grad_norm": 6.394198417663574,
"learning_rate": 2.236842105263158e-07,
"loss": 0.3984,
"step": 383
},
{
"Batch Mean": -0.1702423095703125,
"accuracy": 0.84375,
"epoch": 0.9575,
"step": 383
},
{
"epoch": 0.96,
"grad_norm": 5.651772975921631,
"learning_rate": 2.105263157894737e-07,
"loss": 0.359,
"step": 384
},
{
"Batch Mean": -0.05308723449707031,
"accuracy": 0.8359375,
"epoch": 0.96,
"step": 384
},
{
"epoch": 0.9625,
"grad_norm": 6.010076522827148,
"learning_rate": 1.9736842105263157e-07,
"loss": 0.411,
"step": 385
},
{
"Batch Mean": -0.2975482940673828,
"accuracy": 0.8671875,
"epoch": 0.9625,
"step": 385
},
{
"epoch": 0.965,
"grad_norm": 6.509852886199951,
"learning_rate": 1.8421052631578948e-07,
"loss": 0.3368,
"step": 386
},
{
"Batch Mean": 0.14680981636047363,
"accuracy": 0.8671875,
"epoch": 0.965,
"step": 386
},
{
"epoch": 0.9675,
"grad_norm": 5.1291608810424805,
"learning_rate": 1.710526315789474e-07,
"loss": 0.3169,
"step": 387
},
{
"Batch Mean": 0.26676082611083984,
"accuracy": 0.921875,
"epoch": 0.9675,
"step": 387
},
{
"epoch": 0.97,
"grad_norm": 5.539949893951416,
"learning_rate": 1.5789473684210527e-07,
"loss": 0.2404,
"step": 388
},
{
"Batch Mean": -0.08182518929243088,
"accuracy": 0.890625,
"epoch": 0.97,
"step": 388
},
{
"epoch": 0.9725,
"grad_norm": 5.0751051902771,
"learning_rate": 1.4473684210526316e-07,
"loss": 0.3158,
"step": 389
},
{
"Batch Mean": -0.0028634369373321533,
"accuracy": 0.890625,
"epoch": 0.9725,
"step": 389
},
{
"epoch": 0.975,
"grad_norm": 5.241757869720459,
"learning_rate": 1.3157894736842107e-07,
"loss": 0.3581,
"step": 390
},
{
"Batch Mean": 0.15844666957855225,
"accuracy": 0.828125,
"epoch": 0.975,
"step": 390
},
{
"epoch": 0.9775,
"grad_norm": 6.040011405944824,
"learning_rate": 1.1842105263157896e-07,
"loss": 0.3807,
"step": 391
},
{
"Batch Mean": 0.10521447658538818,
"accuracy": 0.828125,
"epoch": 0.9775,
"step": 391
},
{
"epoch": 0.98,
"grad_norm": 6.034479141235352,
"learning_rate": 1.0526315789473685e-07,
"loss": 0.3806,
"step": 392
},
{
"Batch Mean": 0.2882518768310547,
"accuracy": 0.84375,
"epoch": 0.98,
"step": 392
},
{
"epoch": 0.9825,
"grad_norm": 5.66761589050293,
"learning_rate": 9.210526315789474e-08,
"loss": 0.3027,
"step": 393
},
{
"Batch Mean": -0.028545856475830078,
"accuracy": 0.84375,
"epoch": 0.9825,
"step": 393
},
{
"epoch": 0.985,
"grad_norm": 6.283968925476074,
"learning_rate": 7.894736842105264e-08,
"loss": 0.4155,
"step": 394
},
{
"Batch Mean": 0.27719879150390625,
"accuracy": 0.828125,
"epoch": 0.985,
"step": 394
},
{
"epoch": 0.9875,
"grad_norm": 7.055726051330566,
"learning_rate": 6.578947368421053e-08,
"loss": 0.4114,
"step": 395
},
{
"Batch Mean": -0.04994678497314453,
"accuracy": 0.796875,
"epoch": 0.9875,
"step": 395
},
{
"epoch": 0.99,
"grad_norm": 6.046277046203613,
"learning_rate": 5.263157894736842e-08,
"loss": 0.3805,
"step": 396
},
{
"Batch Mean": -0.2553279399871826,
"accuracy": 0.828125,
"epoch": 0.99,
"step": 396
},
{
"epoch": 0.9925,
"grad_norm": 6.75329065322876,
"learning_rate": 3.947368421052632e-08,
"loss": 0.3899,
"step": 397
},
{
"Batch Mean": -0.08599328994750977,
"accuracy": 0.8671875,
"epoch": 0.9925,
"step": 397
},
{
"epoch": 0.995,
"grad_norm": 5.147411823272705,
"learning_rate": 2.631578947368421e-08,
"loss": 0.3452,
"step": 398
},
{
"Batch Mean": -0.3474156856536865,
"accuracy": 0.8359375,
"epoch": 0.995,
"step": 398
},
{
"epoch": 0.9975,
"grad_norm": 6.974626541137695,
"learning_rate": 1.3157894736842106e-08,
"loss": 0.4148,
"step": 399
},
{
"Batch Mean": -0.06964653730392456,
"accuracy": 0.84375,
"epoch": 0.9975,
"step": 399
},
{
"epoch": 1.0,
"grad_norm": 6.016994476318359,
"learning_rate": 0.0,
"loss": 0.3874,
"step": 400
}
],
"logging_steps": 1,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}