JW17's picture
Add files using upload-large-folder tool
020138c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.25,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"Batch Mean": 0.8096684217453003,
"accuracy": 0.625,
"epoch": 0,
"step": 0
},
{
"epoch": 0.0025,
"grad_norm": 9.15977954864502,
"learning_rate": 2.5000000000000004e-07,
"loss": 0.6694,
"step": 1
},
{
"Batch Mean": 0.8517913818359375,
"accuracy": 0.5234375,
"epoch": 0.0025,
"step": 1
},
{
"epoch": 0.005,
"grad_norm": 9.550761222839355,
"learning_rate": 5.000000000000001e-07,
"loss": 0.7001,
"step": 2
},
{
"Batch Mean": 0.8251190185546875,
"accuracy": 0.4921875,
"epoch": 0.005,
"step": 2
},
{
"epoch": 0.0075,
"grad_norm": 10.096807479858398,
"learning_rate": 7.5e-07,
"loss": 0.7277,
"step": 3
},
{
"Batch Mean": 0.8156070709228516,
"accuracy": 0.515625,
"epoch": 0.0075,
"step": 3
},
{
"epoch": 0.01,
"grad_norm": 10.845061302185059,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.7116,
"step": 4
},
{
"Batch Mean": 0.772618293762207,
"accuracy": 0.53125,
"epoch": 0.01,
"step": 4
},
{
"epoch": 0.0125,
"grad_norm": 10.683115005493164,
"learning_rate": 1.25e-06,
"loss": 0.7074,
"step": 5
},
{
"Batch Mean": 0.7320594787597656,
"accuracy": 0.5390625,
"epoch": 0.0125,
"step": 5
},
{
"epoch": 0.015,
"grad_norm": 9.102219581604004,
"learning_rate": 1.5e-06,
"loss": 0.6964,
"step": 6
},
{
"Batch Mean": 0.639228105545044,
"accuracy": 0.5703125,
"epoch": 0.015,
"step": 6
},
{
"epoch": 0.0175,
"grad_norm": 10.126181602478027,
"learning_rate": 1.75e-06,
"loss": 0.6981,
"step": 7
},
{
"Batch Mean": 0.28215163946151733,
"accuracy": 0.6015625,
"epoch": 0.0175,
"step": 7
},
{
"epoch": 0.02,
"grad_norm": 6.3671040534973145,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.6668,
"step": 8
},
{
"Batch Mean": 0.10402120649814606,
"accuracy": 0.5234375,
"epoch": 0.02,
"step": 8
},
{
"epoch": 0.0225,
"grad_norm": 6.053694248199463,
"learning_rate": 2.25e-06,
"loss": 0.6907,
"step": 9
},
{
"Batch Mean": -0.794627845287323,
"accuracy": 0.5234375,
"epoch": 0.0225,
"step": 9
},
{
"epoch": 0.025,
"grad_norm": 9.284210205078125,
"learning_rate": 2.5e-06,
"loss": 0.6879,
"step": 10
},
{
"Batch Mean": -1.1373445987701416,
"accuracy": 0.59375,
"epoch": 0.025,
"step": 10
},
{
"epoch": 0.0275,
"grad_norm": 13.110421180725098,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.6954,
"step": 11
},
{
"Batch Mean": -1.2041501998901367,
"accuracy": 0.59375,
"epoch": 0.0275,
"step": 11
},
{
"epoch": 0.03,
"grad_norm": 13.619694709777832,
"learning_rate": 3e-06,
"loss": 0.7004,
"step": 12
},
{
"Batch Mean": -1.1390533447265625,
"accuracy": 0.65625,
"epoch": 0.03,
"step": 12
},
{
"epoch": 0.0325,
"grad_norm": 16.698774337768555,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.6691,
"step": 13
},
{
"Batch Mean": -1.2006721496582031,
"accuracy": 0.6953125,
"epoch": 0.0325,
"step": 13
},
{
"epoch": 0.035,
"grad_norm": 13.031152725219727,
"learning_rate": 3.5e-06,
"loss": 0.6749,
"step": 14
},
{
"Batch Mean": -1.1018962860107422,
"accuracy": 0.6484375,
"epoch": 0.035,
"step": 14
},
{
"epoch": 0.0375,
"grad_norm": 11.913775444030762,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.6517,
"step": 15
},
{
"Batch Mean": -0.8371965289115906,
"accuracy": 0.65625,
"epoch": 0.0375,
"step": 15
},
{
"epoch": 0.04,
"grad_norm": 9.776491165161133,
"learning_rate": 4.000000000000001e-06,
"loss": 0.6435,
"step": 16
},
{
"Batch Mean": -0.25420451164245605,
"accuracy": 0.65625,
"epoch": 0.04,
"step": 16
},
{
"epoch": 0.0425,
"grad_norm": 5.094934940338135,
"learning_rate": 4.25e-06,
"loss": 0.6352,
"step": 17
},
{
"Batch Mean": 0.45112472772598267,
"accuracy": 0.625,
"epoch": 0.0425,
"step": 17
},
{
"epoch": 0.045,
"grad_norm": 6.203393459320068,
"learning_rate": 4.5e-06,
"loss": 0.6305,
"step": 18
},
{
"Batch Mean": 1.0782840251922607,
"accuracy": 0.640625,
"epoch": 0.045,
"step": 18
},
{
"epoch": 0.0475,
"grad_norm": 11.932191848754883,
"learning_rate": 4.75e-06,
"loss": 0.6401,
"step": 19
},
{
"Batch Mean": 1.6000666618347168,
"accuracy": 0.65625,
"epoch": 0.0475,
"step": 19
},
{
"epoch": 0.05,
"grad_norm": 18.466459274291992,
"learning_rate": 5e-06,
"loss": 0.6928,
"step": 20
},
{
"Batch Mean": 1.6292922496795654,
"accuracy": 0.65625,
"epoch": 0.05,
"step": 20
},
{
"epoch": 0.0525,
"grad_norm": 18.958446502685547,
"learning_rate": 4.986842105263158e-06,
"loss": 0.6456,
"step": 21
},
{
"Batch Mean": 1.0992889404296875,
"accuracy": 0.7109375,
"epoch": 0.0525,
"step": 21
},
{
"epoch": 0.055,
"grad_norm": 12.98255443572998,
"learning_rate": 4.973684210526316e-06,
"loss": 0.606,
"step": 22
},
{
"Batch Mean": 0.33066248893737793,
"accuracy": 0.703125,
"epoch": 0.055,
"step": 22
},
{
"epoch": 0.0575,
"grad_norm": 6.1022725105285645,
"learning_rate": 4.960526315789474e-06,
"loss": 0.5807,
"step": 23
},
{
"Batch Mean": -0.7870486974716187,
"accuracy": 0.7109375,
"epoch": 0.0575,
"step": 23
},
{
"epoch": 0.06,
"grad_norm": 9.311738014221191,
"learning_rate": 4.947368421052632e-06,
"loss": 0.5331,
"step": 24
},
{
"Batch Mean": -1.2838190793991089,
"accuracy": 0.6484375,
"epoch": 0.06,
"step": 24
},
{
"epoch": 0.0625,
"grad_norm": 13.955443382263184,
"learning_rate": 4.9342105263157895e-06,
"loss": 0.6186,
"step": 25
},
{
"Batch Mean": -1.324391484260559,
"accuracy": 0.6953125,
"epoch": 0.0625,
"step": 25
},
{
"epoch": 0.065,
"grad_norm": 14.612135887145996,
"learning_rate": 4.921052631578948e-06,
"loss": 0.5325,
"step": 26
},
{
"Batch Mean": -0.7885305881500244,
"accuracy": 0.7109375,
"epoch": 0.065,
"step": 26
},
{
"epoch": 0.0675,
"grad_norm": 9.373554229736328,
"learning_rate": 4.907894736842106e-06,
"loss": 0.5549,
"step": 27
},
{
"Batch Mean": -0.25717926025390625,
"accuracy": 0.6953125,
"epoch": 0.0675,
"step": 27
},
{
"epoch": 0.07,
"grad_norm": 5.559288501739502,
"learning_rate": 4.894736842105264e-06,
"loss": 0.572,
"step": 28
},
{
"Batch Mean": 0.7993221282958984,
"accuracy": 0.6171875,
"epoch": 0.07,
"step": 28
},
{
"epoch": 0.0725,
"grad_norm": 10.778632164001465,
"learning_rate": 4.881578947368422e-06,
"loss": 0.6806,
"step": 29
},
{
"Batch Mean": 1.2803305387496948,
"accuracy": 0.6796875,
"epoch": 0.0725,
"step": 29
},
{
"epoch": 0.075,
"grad_norm": 14.314054489135742,
"learning_rate": 4.8684210526315795e-06,
"loss": 0.641,
"step": 30
},
{
"Batch Mean": 1.1847546100616455,
"accuracy": 0.7890625,
"epoch": 0.075,
"step": 30
},
{
"epoch": 0.0775,
"grad_norm": 12.86508846282959,
"learning_rate": 4.855263157894737e-06,
"loss": 0.5118,
"step": 31
},
{
"Batch Mean": 0.7971279621124268,
"accuracy": 0.734375,
"epoch": 0.0775,
"step": 31
},
{
"epoch": 0.08,
"grad_norm": 8.987775802612305,
"learning_rate": 4.842105263157895e-06,
"loss": 0.5508,
"step": 32
},
{
"Batch Mean": -0.12238264083862305,
"accuracy": 0.671875,
"epoch": 0.08,
"step": 32
},
{
"epoch": 0.0825,
"grad_norm": 4.265674591064453,
"learning_rate": 4.828947368421053e-06,
"loss": 0.5533,
"step": 33
},
{
"Batch Mean": -0.7733749151229858,
"accuracy": 0.7265625,
"epoch": 0.0825,
"step": 33
},
{
"epoch": 0.085,
"grad_norm": 9.074788093566895,
"learning_rate": 4.815789473684211e-06,
"loss": 0.5455,
"step": 34
},
{
"Batch Mean": -0.9033082723617554,
"accuracy": 0.7578125,
"epoch": 0.085,
"step": 34
},
{
"epoch": 0.0875,
"grad_norm": 10.331607818603516,
"learning_rate": 4.802631578947369e-06,
"loss": 0.5539,
"step": 35
},
{
"Batch Mean": -0.5780456066131592,
"accuracy": 0.7265625,
"epoch": 0.0875,
"step": 35
},
{
"epoch": 0.09,
"grad_norm": 7.254377365112305,
"learning_rate": 4.789473684210527e-06,
"loss": 0.547,
"step": 36
},
{
"Batch Mean": 0.09664157032966614,
"accuracy": 0.6953125,
"epoch": 0.09,
"step": 36
},
{
"epoch": 0.0925,
"grad_norm": 3.9853391647338867,
"learning_rate": 4.7763157894736844e-06,
"loss": 0.5451,
"step": 37
},
{
"Batch Mean": 0.5406360626220703,
"accuracy": 0.7265625,
"epoch": 0.0925,
"step": 37
},
{
"epoch": 0.095,
"grad_norm": 7.308631896972656,
"learning_rate": 4.763157894736842e-06,
"loss": 0.5313,
"step": 38
},
{
"Batch Mean": 0.6023058891296387,
"accuracy": 0.7890625,
"epoch": 0.095,
"step": 38
},
{
"epoch": 0.0975,
"grad_norm": 7.4278717041015625,
"learning_rate": 4.75e-06,
"loss": 0.5145,
"step": 39
},
{
"Batch Mean": 0.23680943250656128,
"accuracy": 0.7890625,
"epoch": 0.0975,
"step": 39
},
{
"epoch": 0.1,
"grad_norm": 5.057112693786621,
"learning_rate": 4.736842105263158e-06,
"loss": 0.4793,
"step": 40
},
{
"Batch Mean": -0.2819175720214844,
"accuracy": 0.75,
"epoch": 0.1,
"step": 40
},
{
"epoch": 0.1025,
"grad_norm": 5.986076831817627,
"learning_rate": 4.723684210526316e-06,
"loss": 0.4989,
"step": 41
},
{
"Batch Mean": -0.2952081263065338,
"accuracy": 0.765625,
"epoch": 0.1025,
"step": 41
},
{
"epoch": 0.105,
"grad_norm": 6.123879432678223,
"learning_rate": 4.710526315789474e-06,
"loss": 0.4959,
"step": 42
},
{
"Batch Mean": -0.5327777862548828,
"accuracy": 0.8359375,
"epoch": 0.105,
"step": 42
},
{
"epoch": 0.1075,
"grad_norm": 7.773658752441406,
"learning_rate": 4.697368421052632e-06,
"loss": 0.4501,
"step": 43
},
{
"Batch Mean": -0.00808095932006836,
"accuracy": 0.7265625,
"epoch": 0.1075,
"step": 43
},
{
"epoch": 0.11,
"grad_norm": 6.165060520172119,
"learning_rate": 4.68421052631579e-06,
"loss": 0.4952,
"step": 44
},
{
"Batch Mean": 0.23638486862182617,
"accuracy": 0.7109375,
"epoch": 0.11,
"step": 44
},
{
"epoch": 0.1125,
"grad_norm": 6.963595390319824,
"learning_rate": 4.671052631578948e-06,
"loss": 0.5261,
"step": 45
},
{
"Batch Mean": 0.07831740379333496,
"accuracy": 0.8125,
"epoch": 0.1125,
"step": 45
},
{
"epoch": 0.115,
"grad_norm": 5.393492221832275,
"learning_rate": 4.657894736842106e-06,
"loss": 0.405,
"step": 46
},
{
"Batch Mean": 0.10195636749267578,
"accuracy": 0.75,
"epoch": 0.115,
"step": 46
},
{
"epoch": 0.1175,
"grad_norm": 9.352888107299805,
"learning_rate": 4.6447368421052635e-06,
"loss": 0.5726,
"step": 47
},
{
"Batch Mean": 0.4460281729698181,
"accuracy": 0.7578125,
"epoch": 0.1175,
"step": 47
},
{
"epoch": 0.12,
"grad_norm": 9.130428314208984,
"learning_rate": 4.631578947368421e-06,
"loss": 0.5347,
"step": 48
},
{
"Batch Mean": -0.2610776424407959,
"accuracy": 0.796875,
"epoch": 0.12,
"step": 48
},
{
"epoch": 0.1225,
"grad_norm": 6.863037586212158,
"learning_rate": 4.618421052631579e-06,
"loss": 0.4456,
"step": 49
},
{
"Batch Mean": -0.19384944438934326,
"accuracy": 0.6328125,
"epoch": 0.1225,
"step": 49
},
{
"epoch": 0.125,
"grad_norm": 6.617422103881836,
"learning_rate": 4.605263157894737e-06,
"loss": 0.5905,
"step": 50
},
{
"Batch Mean": -0.4514150619506836,
"accuracy": 0.7890625,
"epoch": 0.125,
"step": 50
},
{
"epoch": 0.1275,
"grad_norm": 7.564980506896973,
"learning_rate": 4.592105263157895e-06,
"loss": 0.4885,
"step": 51
},
{
"Batch Mean": -0.003515481948852539,
"accuracy": 0.734375,
"epoch": 0.1275,
"step": 51
},
{
"epoch": 0.13,
"grad_norm": 5.258878707885742,
"learning_rate": 4.578947368421053e-06,
"loss": 0.5032,
"step": 52
},
{
"Batch Mean": -0.10907495021820068,
"accuracy": 0.71875,
"epoch": 0.13,
"step": 52
},
{
"epoch": 0.1325,
"grad_norm": 5.931639194488525,
"learning_rate": 4.565789473684211e-06,
"loss": 0.542,
"step": 53
},
{
"Batch Mean": 0.46248769760131836,
"accuracy": 0.7890625,
"epoch": 0.1325,
"step": 53
},
{
"epoch": 0.135,
"grad_norm": 7.1796112060546875,
"learning_rate": 4.552631578947369e-06,
"loss": 0.4685,
"step": 54
},
{
"Batch Mean": 0.1234641969203949,
"accuracy": 0.7265625,
"epoch": 0.135,
"step": 54
},
{
"epoch": 0.1375,
"grad_norm": 6.273733139038086,
"learning_rate": 4.539473684210527e-06,
"loss": 0.4989,
"step": 55
},
{
"Batch Mean": -0.15052831172943115,
"accuracy": 0.78125,
"epoch": 0.1375,
"step": 55
},
{
"epoch": 0.14,
"grad_norm": 5.630648612976074,
"learning_rate": 4.526315789473685e-06,
"loss": 0.4518,
"step": 56
},
{
"Batch Mean": 0.10203218460083008,
"accuracy": 0.75,
"epoch": 0.14,
"step": 56
},
{
"epoch": 0.1425,
"grad_norm": 5.465209484100342,
"learning_rate": 4.513157894736843e-06,
"loss": 0.4775,
"step": 57
},
{
"Batch Mean": -0.2146952748298645,
"accuracy": 0.7578125,
"epoch": 0.1425,
"step": 57
},
{
"epoch": 0.145,
"grad_norm": 5.516883373260498,
"learning_rate": 4.5e-06,
"loss": 0.4672,
"step": 58
},
{
"Batch Mean": 0.39426422119140625,
"accuracy": 0.7734375,
"epoch": 0.145,
"step": 58
},
{
"epoch": 0.1475,
"grad_norm": 6.986379146575928,
"learning_rate": 4.4868421052631584e-06,
"loss": 0.4851,
"step": 59
},
{
"Batch Mean": 0.1964409053325653,
"accuracy": 0.75,
"epoch": 0.1475,
"step": 59
},
{
"epoch": 0.15,
"grad_norm": 6.413231372833252,
"learning_rate": 4.473684210526316e-06,
"loss": 0.5073,
"step": 60
},
{
"Batch Mean": -0.21541327238082886,
"accuracy": 0.7890625,
"epoch": 0.15,
"step": 60
},
{
"epoch": 0.1525,
"grad_norm": 5.495061874389648,
"learning_rate": 4.460526315789474e-06,
"loss": 0.4751,
"step": 61
},
{
"Batch Mean": -0.3059917688369751,
"accuracy": 0.7890625,
"epoch": 0.1525,
"step": 61
},
{
"epoch": 0.155,
"grad_norm": 6.379850387573242,
"learning_rate": 4.447368421052632e-06,
"loss": 0.4679,
"step": 62
},
{
"Batch Mean": -0.5860270261764526,
"accuracy": 0.8046875,
"epoch": 0.155,
"step": 62
},
{
"epoch": 0.1575,
"grad_norm": 8.42182731628418,
"learning_rate": 4.43421052631579e-06,
"loss": 0.4152,
"step": 63
},
{
"Batch Mean": 0.14216375350952148,
"accuracy": 0.7578125,
"epoch": 0.1575,
"step": 63
},
{
"epoch": 0.16,
"grad_norm": 6.759493827819824,
"learning_rate": 4.4210526315789476e-06,
"loss": 0.4852,
"step": 64
},
{
"Batch Mean": 0.3515692353248596,
"accuracy": 0.796875,
"epoch": 0.16,
"step": 64
},
{
"epoch": 0.1625,
"grad_norm": 7.567401885986328,
"learning_rate": 4.407894736842105e-06,
"loss": 0.4449,
"step": 65
},
{
"Batch Mean": 0.281324565410614,
"accuracy": 0.765625,
"epoch": 0.1625,
"step": 65
},
{
"epoch": 0.165,
"grad_norm": 7.912868022918701,
"learning_rate": 4.394736842105263e-06,
"loss": 0.5717,
"step": 66
},
{
"Batch Mean": 0.07387387752532959,
"accuracy": 0.796875,
"epoch": 0.165,
"step": 66
},
{
"epoch": 0.1675,
"grad_norm": 5.737614631652832,
"learning_rate": 4.381578947368421e-06,
"loss": 0.4532,
"step": 67
},
{
"Batch Mean": -0.45137548446655273,
"accuracy": 0.7734375,
"epoch": 0.1675,
"step": 67
},
{
"epoch": 0.17,
"grad_norm": 9.204909324645996,
"learning_rate": 4.368421052631579e-06,
"loss": 0.501,
"step": 68
},
{
"Batch Mean": -0.12238574028015137,
"accuracy": 0.828125,
"epoch": 0.17,
"step": 68
},
{
"epoch": 0.1725,
"grad_norm": 6.6219162940979,
"learning_rate": 4.3552631578947375e-06,
"loss": 0.416,
"step": 69
},
{
"Batch Mean": 0.06974506378173828,
"accuracy": 0.78125,
"epoch": 0.1725,
"step": 69
},
{
"epoch": 0.175,
"grad_norm": 5.526142120361328,
"learning_rate": 4.342105263157895e-06,
"loss": 0.4364,
"step": 70
},
{
"Batch Mean": 0.3401278257369995,
"accuracy": 0.8359375,
"epoch": 0.175,
"step": 70
},
{
"epoch": 0.1775,
"grad_norm": 6.567529678344727,
"learning_rate": 4.328947368421053e-06,
"loss": 0.4396,
"step": 71
},
{
"Batch Mean": -0.12450069189071655,
"accuracy": 0.75,
"epoch": 0.1775,
"step": 71
},
{
"epoch": 0.18,
"grad_norm": 6.054138660430908,
"learning_rate": 4.315789473684211e-06,
"loss": 0.4604,
"step": 72
},
{
"Batch Mean": 0.0036773681640625,
"accuracy": 0.7890625,
"epoch": 0.18,
"step": 72
},
{
"epoch": 0.1825,
"grad_norm": 6.112196445465088,
"learning_rate": 4.302631578947369e-06,
"loss": 0.4091,
"step": 73
},
{
"Batch Mean": -0.3432164192199707,
"accuracy": 0.7890625,
"epoch": 0.1825,
"step": 73
},
{
"epoch": 0.185,
"grad_norm": 6.806588649749756,
"learning_rate": 4.289473684210527e-06,
"loss": 0.4194,
"step": 74
},
{
"Batch Mean": -0.10890483856201172,
"accuracy": 0.671875,
"epoch": 0.185,
"step": 74
},
{
"epoch": 0.1875,
"grad_norm": 7.000553131103516,
"learning_rate": 4.276315789473684e-06,
"loss": 0.5318,
"step": 75
},
{
"Batch Mean": 0.6419280171394348,
"accuracy": 0.7265625,
"epoch": 0.1875,
"step": 75
},
{
"epoch": 0.19,
"grad_norm": 8.422764778137207,
"learning_rate": 4.2631578947368425e-06,
"loss": 0.4539,
"step": 76
},
{
"Batch Mean": 0.28451067209243774,
"accuracy": 0.7734375,
"epoch": 0.19,
"step": 76
},
{
"epoch": 0.1925,
"grad_norm": 6.512566566467285,
"learning_rate": 4.25e-06,
"loss": 0.4522,
"step": 77
},
{
"Batch Mean": -0.07123541831970215,
"accuracy": 0.8125,
"epoch": 0.1925,
"step": 77
},
{
"epoch": 0.195,
"grad_norm": 6.979062080383301,
"learning_rate": 4.236842105263158e-06,
"loss": 0.4827,
"step": 78
},
{
"Batch Mean": -0.2889130115509033,
"accuracy": 0.71875,
"epoch": 0.195,
"step": 78
},
{
"epoch": 0.1975,
"grad_norm": 7.523491859436035,
"learning_rate": 4.223684210526316e-06,
"loss": 0.4501,
"step": 79
},
{
"Batch Mean": -0.2912619113922119,
"accuracy": 0.8046875,
"epoch": 0.1975,
"step": 79
},
{
"epoch": 0.2,
"grad_norm": 6.1573662757873535,
"learning_rate": 4.210526315789474e-06,
"loss": 0.4537,
"step": 80
},
{
"Batch Mean": 0.07626897096633911,
"accuracy": 0.84375,
"epoch": 0.2,
"step": 80
},
{
"epoch": 0.2025,
"grad_norm": 5.308783054351807,
"learning_rate": 4.197368421052632e-06,
"loss": 0.3777,
"step": 81
},
{
"Batch Mean": 0.44231414794921875,
"accuracy": 0.78125,
"epoch": 0.2025,
"step": 81
},
{
"epoch": 0.205,
"grad_norm": 7.6725969314575195,
"learning_rate": 4.18421052631579e-06,
"loss": 0.4681,
"step": 82
},
{
"Batch Mean": 0.05066095292568207,
"accuracy": 0.7734375,
"epoch": 0.205,
"step": 82
},
{
"epoch": 0.2075,
"grad_norm": 6.136054039001465,
"learning_rate": 4.171052631578948e-06,
"loss": 0.4111,
"step": 83
},
{
"Batch Mean": 0.11537289619445801,
"accuracy": 0.8515625,
"epoch": 0.2075,
"step": 83
},
{
"epoch": 0.21,
"grad_norm": 5.31056022644043,
"learning_rate": 4.157894736842106e-06,
"loss": 0.345,
"step": 84
},
{
"Batch Mean": 0.05826067924499512,
"accuracy": 0.734375,
"epoch": 0.21,
"step": 84
},
{
"epoch": 0.2125,
"grad_norm": 6.368407249450684,
"learning_rate": 4.144736842105263e-06,
"loss": 0.4834,
"step": 85
},
{
"Batch Mean": -0.05756664276123047,
"accuracy": 0.75,
"epoch": 0.2125,
"step": 85
},
{
"epoch": 0.215,
"grad_norm": 7.063327312469482,
"learning_rate": 4.1315789473684216e-06,
"loss": 0.5013,
"step": 86
},
{
"Batch Mean": -0.08471214771270752,
"accuracy": 0.8046875,
"epoch": 0.215,
"step": 86
},
{
"epoch": 0.2175,
"grad_norm": 6.508166313171387,
"learning_rate": 4.118421052631579e-06,
"loss": 0.4431,
"step": 87
},
{
"Batch Mean": -0.06070905923843384,
"accuracy": 0.703125,
"epoch": 0.2175,
"step": 87
},
{
"epoch": 0.22,
"grad_norm": 7.8081583976745605,
"learning_rate": 4.105263157894737e-06,
"loss": 0.5481,
"step": 88
},
{
"Batch Mean": -0.15125751495361328,
"accuracy": 0.8046875,
"epoch": 0.22,
"step": 88
},
{
"epoch": 0.2225,
"grad_norm": 5.806808948516846,
"learning_rate": 4.092105263157895e-06,
"loss": 0.3994,
"step": 89
},
{
"Batch Mean": -0.0017851591110229492,
"accuracy": 0.828125,
"epoch": 0.2225,
"step": 89
},
{
"epoch": 0.225,
"grad_norm": 5.560466766357422,
"learning_rate": 4.078947368421053e-06,
"loss": 0.4,
"step": 90
},
{
"Batch Mean": 0.07748031616210938,
"accuracy": 0.8046875,
"epoch": 0.225,
"step": 90
},
{
"epoch": 0.2275,
"grad_norm": 5.754173278808594,
"learning_rate": 4.065789473684211e-06,
"loss": 0.4019,
"step": 91
},
{
"Batch Mean": -0.24073825776576996,
"accuracy": 0.78125,
"epoch": 0.2275,
"step": 91
},
{
"epoch": 0.23,
"grad_norm": 6.231720447540283,
"learning_rate": 4.052631578947368e-06,
"loss": 0.4585,
"step": 92
},
{
"Batch Mean": -0.45170480012893677,
"accuracy": 0.75,
"epoch": 0.23,
"step": 92
},
{
"epoch": 0.2325,
"grad_norm": 7.313357353210449,
"learning_rate": 4.0394736842105265e-06,
"loss": 0.4762,
"step": 93
},
{
"Batch Mean": 0.024792194366455078,
"accuracy": 0.796875,
"epoch": 0.2325,
"step": 93
},
{
"epoch": 0.235,
"grad_norm": 4.933104515075684,
"learning_rate": 4.026315789473684e-06,
"loss": 0.4138,
"step": 94
},
{
"Batch Mean": -0.02751898765563965,
"accuracy": 0.7421875,
"epoch": 0.235,
"step": 94
},
{
"epoch": 0.2375,
"grad_norm": 5.39032506942749,
"learning_rate": 4.013157894736842e-06,
"loss": 0.4787,
"step": 95
},
{
"Batch Mean": 0.06753873825073242,
"accuracy": 0.8203125,
"epoch": 0.2375,
"step": 95
},
{
"epoch": 0.24,
"grad_norm": 5.1791462898254395,
"learning_rate": 4.000000000000001e-06,
"loss": 0.4488,
"step": 96
},
{
"Batch Mean": 0.22616100311279297,
"accuracy": 0.828125,
"epoch": 0.24,
"step": 96
},
{
"epoch": 0.2425,
"grad_norm": 5.300809383392334,
"learning_rate": 3.986842105263158e-06,
"loss": 0.4142,
"step": 97
},
{
"Batch Mean": -0.15787070989608765,
"accuracy": 0.8515625,
"epoch": 0.2425,
"step": 97
},
{
"epoch": 0.245,
"grad_norm": 5.704384803771973,
"learning_rate": 3.9736842105263165e-06,
"loss": 0.3797,
"step": 98
},
{
"Batch Mean": 0.13187718391418457,
"accuracy": 0.8046875,
"epoch": 0.245,
"step": 98
},
{
"epoch": 0.2475,
"grad_norm": 6.045483589172363,
"learning_rate": 3.960526315789474e-06,
"loss": 0.5195,
"step": 99
},
{
"Batch Mean": -0.08533608913421631,
"accuracy": 0.8203125,
"epoch": 0.2475,
"step": 99
},
{
"epoch": 0.25,
"grad_norm": 4.997817516326904,
"learning_rate": 3.947368421052632e-06,
"loss": 0.3816,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}