lora-color-tiny / checkpoint-49 /trainer_state.json
nassersala's picture
Upload folder using huggingface_hub
9e1f213 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 13,
"global_step": 49,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02040816326530612,
"grad_norm": 0.7881951332092285,
"learning_rate": 2e-05,
"loss": 2.7509,
"step": 1
},
{
"epoch": 0.02040816326530612,
"eval_loss": 2.6902382373809814,
"eval_runtime": 269.5606,
"eval_samples_per_second": 6.288,
"eval_steps_per_second": 3.146,
"step": 1
},
{
"epoch": 0.04081632653061224,
"grad_norm": 0.789082407951355,
"learning_rate": 4e-05,
"loss": 2.7449,
"step": 2
},
{
"epoch": 0.061224489795918366,
"grad_norm": 0.7354114055633545,
"learning_rate": 6e-05,
"loss": 2.7164,
"step": 3
},
{
"epoch": 0.08163265306122448,
"grad_norm": 0.7292255759239197,
"learning_rate": 8e-05,
"loss": 2.7174,
"step": 4
},
{
"epoch": 0.10204081632653061,
"grad_norm": 0.6898028254508972,
"learning_rate": 0.0001,
"loss": 2.6891,
"step": 5
},
{
"epoch": 0.12244897959183673,
"grad_norm": 0.6861400604248047,
"learning_rate": 0.00012,
"loss": 2.6545,
"step": 6
},
{
"epoch": 0.14285714285714285,
"grad_norm": 0.7510350346565247,
"learning_rate": 0.00014,
"loss": 2.5656,
"step": 7
},
{
"epoch": 0.16326530612244897,
"grad_norm": 0.8011165261268616,
"learning_rate": 0.00016,
"loss": 2.4519,
"step": 8
},
{
"epoch": 0.1836734693877551,
"grad_norm": 0.8624005317687988,
"learning_rate": 0.00018,
"loss": 2.3178,
"step": 9
},
{
"epoch": 0.20408163265306123,
"grad_norm": 0.8004987835884094,
"learning_rate": 0.0002,
"loss": 2.1783,
"step": 10
},
{
"epoch": 0.22448979591836735,
"grad_norm": 0.6362400054931641,
"learning_rate": 0.000199985736255971,
"loss": 2.0252,
"step": 11
},
{
"epoch": 0.24489795918367346,
"grad_norm": 0.7930936217308044,
"learning_rate": 0.0001999429490929718,
"loss": 1.8839,
"step": 12
},
{
"epoch": 0.2653061224489796,
"grad_norm": 0.5149843096733093,
"learning_rate": 0.00019987165071710527,
"loss": 1.8064,
"step": 13
},
{
"epoch": 0.2653061224489796,
"eval_loss": 1.6734941005706787,
"eval_runtime": 271.2615,
"eval_samples_per_second": 6.249,
"eval_steps_per_second": 3.126,
"step": 13
},
{
"epoch": 0.2857142857142857,
"grad_norm": 0.42121434211730957,
"learning_rate": 0.00019977186146800707,
"loss": 1.7922,
"step": 14
},
{
"epoch": 0.30612244897959184,
"grad_norm": 0.3523242771625519,
"learning_rate": 0.0001996436098130433,
"loss": 1.7711,
"step": 15
},
{
"epoch": 0.32653061224489793,
"grad_norm": 0.3384595215320587,
"learning_rate": 0.00019948693233918952,
"loss": 1.7152,
"step": 16
},
{
"epoch": 0.3469387755102041,
"grad_norm": 0.34942421317100525,
"learning_rate": 0.00019930187374259337,
"loss": 1.7112,
"step": 17
},
{
"epoch": 0.3673469387755102,
"grad_norm": 0.31712639331817627,
"learning_rate": 0.00019908848681582391,
"loss": 1.7059,
"step": 18
},
{
"epoch": 0.3877551020408163,
"grad_norm": 0.2875436842441559,
"learning_rate": 0.00019884683243281116,
"loss": 1.6468,
"step": 19
},
{
"epoch": 0.40816326530612246,
"grad_norm": 0.24433130025863647,
"learning_rate": 0.00019857697953148037,
"loss": 1.6408,
"step": 20
},
{
"epoch": 0.42857142857142855,
"grad_norm": 0.21414674818515778,
"learning_rate": 0.00019827900509408581,
"loss": 1.616,
"step": 21
},
{
"epoch": 0.4489795918367347,
"grad_norm": 0.21537622809410095,
"learning_rate": 0.00019795299412524945,
"loss": 1.609,
"step": 22
},
{
"epoch": 0.46938775510204084,
"grad_norm": 0.2432074397802353,
"learning_rate": 0.00019759903962771156,
"loss": 1.6066,
"step": 23
},
{
"epoch": 0.4897959183673469,
"grad_norm": 0.2359839379787445,
"learning_rate": 0.00019721724257579907,
"loss": 1.5851,
"step": 24
},
{
"epoch": 0.5102040816326531,
"grad_norm": 0.22065888345241547,
"learning_rate": 0.00019680771188662044,
"loss": 1.5739,
"step": 25
},
{
"epoch": 0.5306122448979592,
"grad_norm": 0.20339132845401764,
"learning_rate": 0.0001963705643889941,
"loss": 1.5513,
"step": 26
},
{
"epoch": 0.5306122448979592,
"eval_loss": 1.4832030534744263,
"eval_runtime": 271.2449,
"eval_samples_per_second": 6.249,
"eval_steps_per_second": 3.126,
"step": 26
},
{
"epoch": 0.5510204081632653,
"grad_norm": 0.18875224888324738,
"learning_rate": 0.00019590592479012023,
"loss": 1.5378,
"step": 27
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.18564417958259583,
"learning_rate": 0.00019541392564000488,
"loss": 1.5212,
"step": 28
},
{
"epoch": 0.5918367346938775,
"grad_norm": 0.16226942837238312,
"learning_rate": 0.00019489470729364692,
"loss": 1.5391,
"step": 29
},
{
"epoch": 0.6122448979591837,
"grad_norm": 0.15650039911270142,
"learning_rate": 0.00019434841787099803,
"loss": 1.511,
"step": 30
},
{
"epoch": 0.6326530612244898,
"grad_norm": 0.15976540744304657,
"learning_rate": 0.00019377521321470805,
"loss": 1.5119,
"step": 31
},
{
"epoch": 0.6530612244897959,
"grad_norm": 0.16409288346767426,
"learning_rate": 0.00019317525684566685,
"loss": 1.4909,
"step": 32
},
{
"epoch": 0.673469387755102,
"grad_norm": 0.15468019247055054,
"learning_rate": 0.00019254871991635598,
"loss": 1.4951,
"step": 33
},
{
"epoch": 0.6938775510204082,
"grad_norm": 0.1462036371231079,
"learning_rate": 0.00019189578116202307,
"loss": 1.4643,
"step": 34
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.1541963368654251,
"learning_rate": 0.00019121662684969335,
"loss": 1.5159,
"step": 35
},
{
"epoch": 0.7346938775510204,
"grad_norm": 0.14798064529895782,
"learning_rate": 0.00019051145072503215,
"loss": 1.4741,
"step": 36
},
{
"epoch": 0.7551020408163265,
"grad_norm": 0.13914817571640015,
"learning_rate": 0.00018978045395707418,
"loss": 1.4788,
"step": 37
},
{
"epoch": 0.7755102040816326,
"grad_norm": 0.15608824789524078,
"learning_rate": 0.00018902384508083517,
"loss": 1.4687,
"step": 38
},
{
"epoch": 0.7959183673469388,
"grad_norm": 0.14460116624832153,
"learning_rate": 0.00018824183993782192,
"loss": 1.482,
"step": 39
},
{
"epoch": 0.7959183673469388,
"eval_loss": 1.411073088645935,
"eval_runtime": 271.292,
"eval_samples_per_second": 6.248,
"eval_steps_per_second": 3.126,
"step": 39
},
{
"epoch": 0.8163265306122449,
"grad_norm": 0.15740551054477692,
"learning_rate": 0.00018743466161445823,
"loss": 1.4486,
"step": 40
},
{
"epoch": 0.8367346938775511,
"grad_norm": 0.14149661362171173,
"learning_rate": 0.00018660254037844388,
"loss": 1.4353,
"step": 41
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.14034292101860046,
"learning_rate": 0.0001857457136130651,
"loss": 1.4523,
"step": 42
},
{
"epoch": 0.8775510204081632,
"grad_norm": 0.1487722396850586,
"learning_rate": 0.00018486442574947511,
"loss": 1.4095,
"step": 43
},
{
"epoch": 0.8979591836734694,
"grad_norm": 0.17400234937667847,
"learning_rate": 0.00018395892819696389,
"loss": 1.4414,
"step": 44
},
{
"epoch": 0.9183673469387755,
"grad_norm": 0.1741325408220291,
"learning_rate": 0.00018302947927123766,
"loss": 1.4379,
"step": 45
},
{
"epoch": 0.9387755102040817,
"grad_norm": 0.15319454669952393,
"learning_rate": 0.00018207634412072764,
"loss": 1.405,
"step": 46
},
{
"epoch": 0.9591836734693877,
"grad_norm": 0.15876264870166779,
"learning_rate": 0.00018109979465095013,
"loss": 1.4122,
"step": 47
},
{
"epoch": 0.9795918367346939,
"grad_norm": 0.17120805382728577,
"learning_rate": 0.00018010010944693848,
"loss": 1.4132,
"step": 48
},
{
"epoch": 1.0,
"grad_norm": 0.1436116099357605,
"learning_rate": 0.00017907757369376985,
"loss": 1.416,
"step": 49
}
],
"logging_steps": 1,
"max_steps": 196,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 49,
"total_flos": 1.0209350802997248e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}