Samhita's picture
Upload 38 files
97d24bf
{
"best_metric": 1.8858510255813599,
"best_model_checkpoint": "./lora-redpajama/checkpoint-600",
"epoch": 2.745995423340961,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 2.9999999999999997e-05,
"loss": 5.6682,
"step": 10
},
{
"epoch": 0.09,
"learning_rate": 5.9999999999999995e-05,
"loss": 5.1317,
"step": 20
},
{
"epoch": 0.14,
"learning_rate": 8.999999999999999e-05,
"loss": 4.1627,
"step": 30
},
{
"epoch": 0.18,
"learning_rate": 0.00011999999999999999,
"loss": 3.1593,
"step": 40
},
{
"epoch": 0.23,
"learning_rate": 0.00015,
"loss": 2.6813,
"step": 50
},
{
"epoch": 0.27,
"learning_rate": 0.00017999999999999998,
"loss": 2.4024,
"step": 60
},
{
"epoch": 0.32,
"learning_rate": 0.00020999999999999998,
"loss": 2.3087,
"step": 70
},
{
"epoch": 0.37,
"learning_rate": 0.00023999999999999998,
"loss": 2.2026,
"step": 80
},
{
"epoch": 0.41,
"learning_rate": 0.00027,
"loss": 2.1257,
"step": 90
},
{
"epoch": 0.46,
"learning_rate": 0.0003,
"loss": 2.118,
"step": 100
},
{
"epoch": 0.5,
"learning_rate": 0.0002945848375451263,
"loss": 2.076,
"step": 110
},
{
"epoch": 0.55,
"learning_rate": 0.00028916967509025267,
"loss": 2.0684,
"step": 120
},
{
"epoch": 0.59,
"learning_rate": 0.000283754512635379,
"loss": 2.0212,
"step": 130
},
{
"epoch": 0.64,
"learning_rate": 0.00027833935018050537,
"loss": 2.0192,
"step": 140
},
{
"epoch": 0.69,
"learning_rate": 0.00027292418772563177,
"loss": 1.9991,
"step": 150
},
{
"epoch": 0.73,
"learning_rate": 0.0002675090252707581,
"loss": 2.0075,
"step": 160
},
{
"epoch": 0.78,
"learning_rate": 0.00026209386281588447,
"loss": 2.0414,
"step": 170
},
{
"epoch": 0.82,
"learning_rate": 0.0002566787003610108,
"loss": 2.0048,
"step": 180
},
{
"epoch": 0.87,
"learning_rate": 0.00025126353790613716,
"loss": 1.9604,
"step": 190
},
{
"epoch": 0.92,
"learning_rate": 0.0002458483754512635,
"loss": 1.9739,
"step": 200
},
{
"epoch": 0.92,
"eval_loss": 2.0006659030914307,
"eval_runtime": 83.0225,
"eval_samples_per_second": 24.09,
"eval_steps_per_second": 0.602,
"step": 200
},
{
"epoch": 0.96,
"learning_rate": 0.00024043321299638986,
"loss": 1.9856,
"step": 210
},
{
"epoch": 1.01,
"learning_rate": 0.00023501805054151624,
"loss": 1.9604,
"step": 220
},
{
"epoch": 1.05,
"learning_rate": 0.00022960288808664258,
"loss": 1.9357,
"step": 230
},
{
"epoch": 1.1,
"learning_rate": 0.00022418772563176893,
"loss": 1.9783,
"step": 240
},
{
"epoch": 1.14,
"learning_rate": 0.0002187725631768953,
"loss": 1.9581,
"step": 250
},
{
"epoch": 1.19,
"learning_rate": 0.00021335740072202166,
"loss": 1.9507,
"step": 260
},
{
"epoch": 1.24,
"learning_rate": 0.00020794223826714798,
"loss": 1.9258,
"step": 270
},
{
"epoch": 1.28,
"learning_rate": 0.00020252707581227435,
"loss": 1.912,
"step": 280
},
{
"epoch": 1.33,
"learning_rate": 0.0001971119133574007,
"loss": 1.9246,
"step": 290
},
{
"epoch": 1.37,
"learning_rate": 0.00019169675090252705,
"loss": 1.9106,
"step": 300
},
{
"epoch": 1.42,
"learning_rate": 0.00018628158844765343,
"loss": 1.8997,
"step": 310
},
{
"epoch": 1.46,
"learning_rate": 0.00018086642599277977,
"loss": 1.8958,
"step": 320
},
{
"epoch": 1.51,
"learning_rate": 0.00017545126353790612,
"loss": 1.8874,
"step": 330
},
{
"epoch": 1.56,
"learning_rate": 0.00017003610108303247,
"loss": 1.8956,
"step": 340
},
{
"epoch": 1.6,
"learning_rate": 0.00016462093862815885,
"loss": 1.8743,
"step": 350
},
{
"epoch": 1.65,
"learning_rate": 0.0001592057761732852,
"loss": 1.875,
"step": 360
},
{
"epoch": 1.69,
"learning_rate": 0.00015379061371841152,
"loss": 1.914,
"step": 370
},
{
"epoch": 1.74,
"learning_rate": 0.0001483754512635379,
"loss": 1.8679,
"step": 380
},
{
"epoch": 1.78,
"learning_rate": 0.00014296028880866424,
"loss": 1.8845,
"step": 390
},
{
"epoch": 1.83,
"learning_rate": 0.00013754512635379062,
"loss": 1.8715,
"step": 400
},
{
"epoch": 1.83,
"eval_loss": 1.9187651872634888,
"eval_runtime": 83.4457,
"eval_samples_per_second": 23.968,
"eval_steps_per_second": 0.599,
"step": 400
},
{
"epoch": 1.88,
"learning_rate": 0.00013212996389891696,
"loss": 1.8722,
"step": 410
},
{
"epoch": 1.92,
"learning_rate": 0.0001267148014440433,
"loss": 1.8782,
"step": 420
},
{
"epoch": 1.97,
"learning_rate": 0.00012129963898916966,
"loss": 1.8651,
"step": 430
},
{
"epoch": 2.01,
"learning_rate": 0.00011588447653429602,
"loss": 1.8639,
"step": 440
},
{
"epoch": 2.06,
"learning_rate": 0.00011046931407942237,
"loss": 1.8544,
"step": 450
},
{
"epoch": 2.11,
"learning_rate": 0.00010505415162454872,
"loss": 1.8086,
"step": 460
},
{
"epoch": 2.15,
"learning_rate": 9.963898916967508e-05,
"loss": 1.8364,
"step": 470
},
{
"epoch": 2.2,
"learning_rate": 9.422382671480144e-05,
"loss": 1.8745,
"step": 480
},
{
"epoch": 2.24,
"learning_rate": 8.880866425992779e-05,
"loss": 1.8652,
"step": 490
},
{
"epoch": 2.29,
"learning_rate": 8.339350180505415e-05,
"loss": 1.8381,
"step": 500
},
{
"epoch": 2.33,
"learning_rate": 7.797833935018049e-05,
"loss": 1.8419,
"step": 510
},
{
"epoch": 2.38,
"learning_rate": 7.256317689530685e-05,
"loss": 1.8665,
"step": 520
},
{
"epoch": 2.43,
"learning_rate": 6.71480144404332e-05,
"loss": 1.8074,
"step": 530
},
{
"epoch": 2.47,
"learning_rate": 6.173285198555956e-05,
"loss": 1.8404,
"step": 540
},
{
"epoch": 2.52,
"learning_rate": 5.6317689530685916e-05,
"loss": 1.8357,
"step": 550
},
{
"epoch": 2.56,
"learning_rate": 5.090252707581227e-05,
"loss": 1.8223,
"step": 560
},
{
"epoch": 2.61,
"learning_rate": 4.548736462093862e-05,
"loss": 1.8599,
"step": 570
},
{
"epoch": 2.65,
"learning_rate": 4.007220216606498e-05,
"loss": 1.8284,
"step": 580
},
{
"epoch": 2.7,
"learning_rate": 3.4657039711191336e-05,
"loss": 1.871,
"step": 590
},
{
"epoch": 2.75,
"learning_rate": 2.9241877256317685e-05,
"loss": 1.8523,
"step": 600
},
{
"epoch": 2.75,
"eval_loss": 1.8858510255813599,
"eval_runtime": 83.601,
"eval_samples_per_second": 23.923,
"eval_steps_per_second": 0.598,
"step": 600
}
],
"max_steps": 654,
"num_train_epochs": 3,
"total_flos": 6.234403544225546e+17,
"trial_name": null,
"trial_params": null
}