tamaris-sales-prediction / trainer_state.json
TKayWortmann's picture
Upload folder using huggingface_hub
66ec811 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 1110,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09009009009009009,
"grad_norm": 16.28987693786621,
"learning_rate": 9.90990990990991e-05,
"loss": 0.4883,
"step": 10
},
{
"epoch": 0.18018018018018017,
"grad_norm": 3.943525552749634,
"learning_rate": 9.81981981981982e-05,
"loss": 0.0823,
"step": 20
},
{
"epoch": 0.2702702702702703,
"grad_norm": 7.749650955200195,
"learning_rate": 9.729729729729731e-05,
"loss": 0.0279,
"step": 30
},
{
"epoch": 0.36036036036036034,
"grad_norm": 1.5577881336212158,
"learning_rate": 9.639639639639641e-05,
"loss": 0.0247,
"step": 40
},
{
"epoch": 0.45045045045045046,
"grad_norm": 1.7881258726119995,
"learning_rate": 9.54954954954955e-05,
"loss": 0.0162,
"step": 50
},
{
"epoch": 0.5405405405405406,
"grad_norm": 1.0929126739501953,
"learning_rate": 9.45945945945946e-05,
"loss": 0.0127,
"step": 60
},
{
"epoch": 0.6306306306306306,
"grad_norm": 2.1130833625793457,
"learning_rate": 9.36936936936937e-05,
"loss": 0.0069,
"step": 70
},
{
"epoch": 0.7207207207207207,
"grad_norm": 3.6001977920532227,
"learning_rate": 9.279279279279279e-05,
"loss": 0.0094,
"step": 80
},
{
"epoch": 0.8108108108108109,
"grad_norm": 0.6843027472496033,
"learning_rate": 9.18918918918919e-05,
"loss": 0.0069,
"step": 90
},
{
"epoch": 0.9009009009009009,
"grad_norm": 2.2954471111297607,
"learning_rate": 9.0990990990991e-05,
"loss": 0.0069,
"step": 100
},
{
"epoch": 0.990990990990991,
"grad_norm": 0.22960656881332397,
"learning_rate": 9.009009009009009e-05,
"loss": 0.0094,
"step": 110
},
{
"epoch": 1.0,
"eval_loss": 0.016822505742311478,
"eval_mse": 0.016822507604956627,
"eval_runtime": 26.347,
"eval_samples_per_second": 8.426,
"eval_steps_per_second": 1.063,
"step": 111
},
{
"epoch": 1.0810810810810811,
"grad_norm": 0.10217050462961197,
"learning_rate": 8.918918918918919e-05,
"loss": 0.0042,
"step": 120
},
{
"epoch": 1.1711711711711712,
"grad_norm": 5.079370498657227,
"learning_rate": 8.82882882882883e-05,
"loss": 0.0094,
"step": 130
},
{
"epoch": 1.2612612612612613,
"grad_norm": 2.9237141609191895,
"learning_rate": 8.738738738738738e-05,
"loss": 0.0099,
"step": 140
},
{
"epoch": 1.3513513513513513,
"grad_norm": 3.25941801071167,
"learning_rate": 8.64864864864865e-05,
"loss": 0.016,
"step": 150
},
{
"epoch": 1.4414414414414414,
"grad_norm": 0.9882258772850037,
"learning_rate": 8.55855855855856e-05,
"loss": 0.0061,
"step": 160
},
{
"epoch": 1.5315315315315314,
"grad_norm": 4.510200023651123,
"learning_rate": 8.468468468468469e-05,
"loss": 0.0143,
"step": 170
},
{
"epoch": 1.6216216216216215,
"grad_norm": 0.35400834679603577,
"learning_rate": 8.378378378378379e-05,
"loss": 0.0148,
"step": 180
},
{
"epoch": 1.7117117117117115,
"grad_norm": 3.6332740783691406,
"learning_rate": 8.288288288288289e-05,
"loss": 0.0113,
"step": 190
},
{
"epoch": 1.8018018018018018,
"grad_norm": 1.811373233795166,
"learning_rate": 8.198198198198198e-05,
"loss": 0.017,
"step": 200
},
{
"epoch": 1.8918918918918919,
"grad_norm": 2.500202178955078,
"learning_rate": 8.108108108108109e-05,
"loss": 0.0062,
"step": 210
},
{
"epoch": 1.981981981981982,
"grad_norm": 4.341911792755127,
"learning_rate": 8.018018018018019e-05,
"loss": 0.0142,
"step": 220
},
{
"epoch": 2.0,
"eval_loss": 0.008252176456153393,
"eval_mse": 0.008252176456153393,
"eval_runtime": 25.8199,
"eval_samples_per_second": 8.598,
"eval_steps_per_second": 1.084,
"step": 222
},
{
"epoch": 2.0720720720720722,
"grad_norm": 0.4223078489303589,
"learning_rate": 7.927927927927928e-05,
"loss": 0.01,
"step": 230
},
{
"epoch": 2.1621621621621623,
"grad_norm": 3.0677056312561035,
"learning_rate": 7.837837837837838e-05,
"loss": 0.0139,
"step": 240
},
{
"epoch": 2.2522522522522523,
"grad_norm": 0.3952648639678955,
"learning_rate": 7.747747747747748e-05,
"loss": 0.0063,
"step": 250
},
{
"epoch": 2.3423423423423424,
"grad_norm": 3.6467502117156982,
"learning_rate": 7.657657657657657e-05,
"loss": 0.0122,
"step": 260
},
{
"epoch": 2.4324324324324325,
"grad_norm": 4.3216352462768555,
"learning_rate": 7.567567567567568e-05,
"loss": 0.0106,
"step": 270
},
{
"epoch": 2.5225225225225225,
"grad_norm": 3.8619132041931152,
"learning_rate": 7.477477477477479e-05,
"loss": 0.0106,
"step": 280
},
{
"epoch": 2.6126126126126126,
"grad_norm": 1.1223838329315186,
"learning_rate": 7.387387387387387e-05,
"loss": 0.0086,
"step": 290
},
{
"epoch": 2.7027027027027026,
"grad_norm": 0.8446173667907715,
"learning_rate": 7.297297297297297e-05,
"loss": 0.0202,
"step": 300
},
{
"epoch": 2.7927927927927927,
"grad_norm": 0.9771293997764587,
"learning_rate": 7.207207207207208e-05,
"loss": 0.0071,
"step": 310
},
{
"epoch": 2.8828828828828827,
"grad_norm": 2.6441454887390137,
"learning_rate": 7.117117117117116e-05,
"loss": 0.0093,
"step": 320
},
{
"epoch": 2.972972972972973,
"grad_norm": 3.6695780754089355,
"learning_rate": 7.027027027027028e-05,
"loss": 0.0067,
"step": 330
},
{
"epoch": 3.0,
"eval_loss": 0.012067927978932858,
"eval_mse": 0.012067928910255432,
"eval_runtime": 26.2024,
"eval_samples_per_second": 8.473,
"eval_steps_per_second": 1.069,
"step": 333
},
{
"epoch": 3.063063063063063,
"grad_norm": 1.334926962852478,
"learning_rate": 6.936936936936938e-05,
"loss": 0.0129,
"step": 340
},
{
"epoch": 3.153153153153153,
"grad_norm": 3.8143014907836914,
"learning_rate": 6.846846846846847e-05,
"loss": 0.0084,
"step": 350
},
{
"epoch": 3.2432432432432434,
"grad_norm": 0.529600977897644,
"learning_rate": 6.756756756756757e-05,
"loss": 0.0059,
"step": 360
},
{
"epoch": 3.3333333333333335,
"grad_norm": 6.050387382507324,
"learning_rate": 6.666666666666667e-05,
"loss": 0.0184,
"step": 370
},
{
"epoch": 3.4234234234234235,
"grad_norm": 0.3079633414745331,
"learning_rate": 6.576576576576577e-05,
"loss": 0.0072,
"step": 380
},
{
"epoch": 3.5135135135135136,
"grad_norm": 2.165722608566284,
"learning_rate": 6.486486486486487e-05,
"loss": 0.0086,
"step": 390
},
{
"epoch": 3.6036036036036037,
"grad_norm": 1.4053255319595337,
"learning_rate": 6.396396396396397e-05,
"loss": 0.0063,
"step": 400
},
{
"epoch": 3.6936936936936937,
"grad_norm": 1.4147133827209473,
"learning_rate": 6.306306306306306e-05,
"loss": 0.0049,
"step": 410
},
{
"epoch": 3.7837837837837838,
"grad_norm": 0.1397821009159088,
"learning_rate": 6.216216216216216e-05,
"loss": 0.0143,
"step": 420
},
{
"epoch": 3.873873873873874,
"grad_norm": 2.773983955383301,
"learning_rate": 6.126126126126126e-05,
"loss": 0.0052,
"step": 430
},
{
"epoch": 3.963963963963964,
"grad_norm": 2.0396511554718018,
"learning_rate": 6.0360360360360365e-05,
"loss": 0.0051,
"step": 440
},
{
"epoch": 4.0,
"eval_loss": 0.00851067528128624,
"eval_mse": 0.00851067528128624,
"eval_runtime": 26.1258,
"eval_samples_per_second": 8.497,
"eval_steps_per_second": 1.072,
"step": 444
},
{
"epoch": 4.054054054054054,
"grad_norm": 1.728843092918396,
"learning_rate": 5.9459459459459466e-05,
"loss": 0.0071,
"step": 450
},
{
"epoch": 4.1441441441441444,
"grad_norm": 2.8978583812713623,
"learning_rate": 5.855855855855856e-05,
"loss": 0.0072,
"step": 460
},
{
"epoch": 4.2342342342342345,
"grad_norm": 0.1254826933145523,
"learning_rate": 5.765765765765766e-05,
"loss": 0.0045,
"step": 470
},
{
"epoch": 4.324324324324325,
"grad_norm": 0.3451184034347534,
"learning_rate": 5.6756756756756757e-05,
"loss": 0.0034,
"step": 480
},
{
"epoch": 4.414414414414415,
"grad_norm": 0.3993205726146698,
"learning_rate": 5.585585585585585e-05,
"loss": 0.0062,
"step": 490
},
{
"epoch": 4.504504504504505,
"grad_norm": 1.2357189655303955,
"learning_rate": 5.4954954954954966e-05,
"loss": 0.0109,
"step": 500
},
{
"epoch": 4.594594594594595,
"grad_norm": 2.587001085281372,
"learning_rate": 5.405405405405406e-05,
"loss": 0.0096,
"step": 510
},
{
"epoch": 4.684684684684685,
"grad_norm": 1.4181842803955078,
"learning_rate": 5.3153153153153155e-05,
"loss": 0.0053,
"step": 520
},
{
"epoch": 4.774774774774775,
"grad_norm": 0.5997741222381592,
"learning_rate": 5.2252252252252256e-05,
"loss": 0.0074,
"step": 530
},
{
"epoch": 4.864864864864865,
"grad_norm": 0.6971381306648254,
"learning_rate": 5.135135135135135e-05,
"loss": 0.0022,
"step": 540
},
{
"epoch": 4.954954954954955,
"grad_norm": 1.7140687704086304,
"learning_rate": 5.0450450450450445e-05,
"loss": 0.0035,
"step": 550
},
{
"epoch": 5.0,
"eval_loss": 0.011971085332334042,
"eval_mse": 0.011971085332334042,
"eval_runtime": 26.2997,
"eval_samples_per_second": 8.441,
"eval_steps_per_second": 1.065,
"step": 555
},
{
"epoch": 5.045045045045045,
"grad_norm": 1.8608264923095703,
"learning_rate": 4.954954954954955e-05,
"loss": 0.0074,
"step": 560
},
{
"epoch": 5.135135135135135,
"grad_norm": 0.60817551612854,
"learning_rate": 4.8648648648648654e-05,
"loss": 0.0068,
"step": 570
},
{
"epoch": 5.225225225225225,
"grad_norm": 1.958824634552002,
"learning_rate": 4.774774774774775e-05,
"loss": 0.006,
"step": 580
},
{
"epoch": 5.315315315315315,
"grad_norm": 0.3445253372192383,
"learning_rate": 4.684684684684685e-05,
"loss": 0.0049,
"step": 590
},
{
"epoch": 5.405405405405405,
"grad_norm": 0.22099243104457855,
"learning_rate": 4.594594594594595e-05,
"loss": 0.0039,
"step": 600
},
{
"epoch": 5.495495495495495,
"grad_norm": 1.4178491830825806,
"learning_rate": 4.5045045045045046e-05,
"loss": 0.0131,
"step": 610
},
{
"epoch": 5.585585585585585,
"grad_norm": 0.469068706035614,
"learning_rate": 4.414414414414415e-05,
"loss": 0.0025,
"step": 620
},
{
"epoch": 5.675675675675675,
"grad_norm": 0.772139847278595,
"learning_rate": 4.324324324324325e-05,
"loss": 0.0032,
"step": 630
},
{
"epoch": 5.7657657657657655,
"grad_norm": 0.21695229411125183,
"learning_rate": 4.234234234234234e-05,
"loss": 0.0026,
"step": 640
},
{
"epoch": 5.8558558558558556,
"grad_norm": 1.6817526817321777,
"learning_rate": 4.1441441441441444e-05,
"loss": 0.005,
"step": 650
},
{
"epoch": 5.945945945945946,
"grad_norm": 1.2628576755523682,
"learning_rate": 4.0540540540540545e-05,
"loss": 0.0024,
"step": 660
},
{
"epoch": 6.0,
"eval_loss": 0.008206626400351524,
"eval_mse": 0.008206626400351524,
"eval_runtime": 26.5886,
"eval_samples_per_second": 8.349,
"eval_steps_per_second": 1.053,
"step": 666
},
{
"epoch": 6.036036036036036,
"grad_norm": 1.1488395929336548,
"learning_rate": 3.963963963963964e-05,
"loss": 0.0036,
"step": 670
},
{
"epoch": 6.126126126126126,
"grad_norm": 1.4983370304107666,
"learning_rate": 3.873873873873874e-05,
"loss": 0.0032,
"step": 680
},
{
"epoch": 6.216216216216216,
"grad_norm": 0.8290634751319885,
"learning_rate": 3.783783783783784e-05,
"loss": 0.0091,
"step": 690
},
{
"epoch": 6.306306306306306,
"grad_norm": 0.4906163811683655,
"learning_rate": 3.693693693693694e-05,
"loss": 0.0013,
"step": 700
},
{
"epoch": 6.396396396396397,
"grad_norm": 0.5014496445655823,
"learning_rate": 3.603603603603604e-05,
"loss": 0.0019,
"step": 710
},
{
"epoch": 6.486486486486487,
"grad_norm": 0.38546356558799744,
"learning_rate": 3.513513513513514e-05,
"loss": 0.0013,
"step": 720
},
{
"epoch": 6.576576576576577,
"grad_norm": 2.099214553833008,
"learning_rate": 3.4234234234234234e-05,
"loss": 0.0018,
"step": 730
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.1836785525083542,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.0013,
"step": 740
},
{
"epoch": 6.756756756756757,
"grad_norm": 0.2982404828071594,
"learning_rate": 3.2432432432432436e-05,
"loss": 0.0015,
"step": 750
},
{
"epoch": 6.846846846846847,
"grad_norm": 2.1191866397857666,
"learning_rate": 3.153153153153153e-05,
"loss": 0.0037,
"step": 760
},
{
"epoch": 6.936936936936937,
"grad_norm": 1.7569901943206787,
"learning_rate": 3.063063063063063e-05,
"loss": 0.0079,
"step": 770
},
{
"epoch": 7.0,
"eval_loss": 0.007293929811567068,
"eval_mse": 0.0072939288802444935,
"eval_runtime": 23.2827,
"eval_samples_per_second": 9.535,
"eval_steps_per_second": 1.203,
"step": 777
},
{
"epoch": 7.027027027027027,
"grad_norm": 0.3839648365974426,
"learning_rate": 2.9729729729729733e-05,
"loss": 0.0013,
"step": 780
},
{
"epoch": 7.117117117117117,
"grad_norm": 0.09070757776498795,
"learning_rate": 2.882882882882883e-05,
"loss": 0.0009,
"step": 790
},
{
"epoch": 7.207207207207207,
"grad_norm": 0.15580403804779053,
"learning_rate": 2.7927927927927926e-05,
"loss": 0.0006,
"step": 800
},
{
"epoch": 7.297297297297297,
"grad_norm": 0.6153419017791748,
"learning_rate": 2.702702702702703e-05,
"loss": 0.0015,
"step": 810
},
{
"epoch": 7.387387387387387,
"grad_norm": 0.3068805932998657,
"learning_rate": 2.6126126126126128e-05,
"loss": 0.0009,
"step": 820
},
{
"epoch": 7.4774774774774775,
"grad_norm": 0.38947150111198425,
"learning_rate": 2.5225225225225222e-05,
"loss": 0.0018,
"step": 830
},
{
"epoch": 7.5675675675675675,
"grad_norm": 0.3517474830150604,
"learning_rate": 2.4324324324324327e-05,
"loss": 0.0006,
"step": 840
},
{
"epoch": 7.657657657657658,
"grad_norm": 0.26397421956062317,
"learning_rate": 2.3423423423423425e-05,
"loss": 0.0044,
"step": 850
},
{
"epoch": 7.747747747747748,
"grad_norm": 0.5078900456428528,
"learning_rate": 2.2522522522522523e-05,
"loss": 0.0018,
"step": 860
},
{
"epoch": 7.837837837837838,
"grad_norm": 0.13668565452098846,
"learning_rate": 2.1621621621621624e-05,
"loss": 0.0006,
"step": 870
},
{
"epoch": 7.927927927927928,
"grad_norm": 0.2840743660926819,
"learning_rate": 2.0720720720720722e-05,
"loss": 0.0005,
"step": 880
},
{
"epoch": 8.0,
"eval_loss": 0.006760227959603071,
"eval_mse": 0.0067602284252643585,
"eval_runtime": 25.9835,
"eval_samples_per_second": 8.544,
"eval_steps_per_second": 1.078,
"step": 888
},
{
"epoch": 8.018018018018019,
"grad_norm": 0.12077564746141434,
"learning_rate": 1.981981981981982e-05,
"loss": 0.0005,
"step": 890
},
{
"epoch": 8.108108108108109,
"grad_norm": 0.14313092827796936,
"learning_rate": 1.891891891891892e-05,
"loss": 0.0007,
"step": 900
},
{
"epoch": 8.198198198198199,
"grad_norm": 0.04330454021692276,
"learning_rate": 1.801801801801802e-05,
"loss": 0.0002,
"step": 910
},
{
"epoch": 8.288288288288289,
"grad_norm": 0.09245187044143677,
"learning_rate": 1.7117117117117117e-05,
"loss": 0.0003,
"step": 920
},
{
"epoch": 8.378378378378379,
"grad_norm": 1.145082950592041,
"learning_rate": 1.6216216216216218e-05,
"loss": 0.0005,
"step": 930
},
{
"epoch": 8.468468468468469,
"grad_norm": 0.16518352925777435,
"learning_rate": 1.5315315315315316e-05,
"loss": 0.0028,
"step": 940
},
{
"epoch": 8.558558558558559,
"grad_norm": 0.21115215122699738,
"learning_rate": 1.4414414414414416e-05,
"loss": 0.0004,
"step": 950
},
{
"epoch": 8.64864864864865,
"grad_norm": 0.5826627612113953,
"learning_rate": 1.3513513513513515e-05,
"loss": 0.0004,
"step": 960
},
{
"epoch": 8.73873873873874,
"grad_norm": 0.5220029354095459,
"learning_rate": 1.2612612612612611e-05,
"loss": 0.0003,
"step": 970
},
{
"epoch": 8.82882882882883,
"grad_norm": 0.10058625787496567,
"learning_rate": 1.1711711711711713e-05,
"loss": 0.0005,
"step": 980
},
{
"epoch": 8.91891891891892,
"grad_norm": 0.5194815993309021,
"learning_rate": 1.0810810810810812e-05,
"loss": 0.0004,
"step": 990
},
{
"epoch": 9.0,
"eval_loss": 0.006972064264118671,
"eval_mse": 0.006972064264118671,
"eval_runtime": 25.2631,
"eval_samples_per_second": 8.788,
"eval_steps_per_second": 1.108,
"step": 999
},
{
"epoch": 9.00900900900901,
"grad_norm": 0.20283883810043335,
"learning_rate": 9.90990990990991e-06,
"loss": 0.0003,
"step": 1000
},
{
"epoch": 9.0990990990991,
"grad_norm": 0.34447431564331055,
"learning_rate": 9.00900900900901e-06,
"loss": 0.0002,
"step": 1010
},
{
"epoch": 9.18918918918919,
"grad_norm": 0.08173416554927826,
"learning_rate": 8.108108108108109e-06,
"loss": 0.0002,
"step": 1020
},
{
"epoch": 9.27927927927928,
"grad_norm": 0.10527441650629044,
"learning_rate": 7.207207207207208e-06,
"loss": 0.0002,
"step": 1030
},
{
"epoch": 9.36936936936937,
"grad_norm": 0.0803741067647934,
"learning_rate": 6.306306306306306e-06,
"loss": 0.0001,
"step": 1040
},
{
"epoch": 9.45945945945946,
"grad_norm": 0.10471061617136002,
"learning_rate": 5.405405405405406e-06,
"loss": 0.0002,
"step": 1050
},
{
"epoch": 9.54954954954955,
"grad_norm": 0.05206935107707977,
"learning_rate": 4.504504504504505e-06,
"loss": 0.0002,
"step": 1060
},
{
"epoch": 9.63963963963964,
"grad_norm": 0.022533565759658813,
"learning_rate": 3.603603603603604e-06,
"loss": 0.0001,
"step": 1070
},
{
"epoch": 9.72972972972973,
"grad_norm": 0.08269208669662476,
"learning_rate": 2.702702702702703e-06,
"loss": 0.0002,
"step": 1080
},
{
"epoch": 9.81981981981982,
"grad_norm": 0.05740358307957649,
"learning_rate": 1.801801801801802e-06,
"loss": 0.0002,
"step": 1090
},
{
"epoch": 9.90990990990991,
"grad_norm": 0.2768089771270752,
"learning_rate": 9.00900900900901e-07,
"loss": 0.001,
"step": 1100
},
{
"epoch": 10.0,
"grad_norm": 0.1483727991580963,
"learning_rate": 0.0,
"loss": 0.0001,
"step": 1110
}
],
"logging_steps": 10,
"max_steps": 1110,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}