kevinwang676's picture
Upload folder using huggingface_hub
d5eed08 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.112,
"eval_steps": 500,
"global_step": 1600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"grad_norm": 0.18150772154331207,
"learning_rate": 0.0001,
"loss": 1.222,
"step": 50
},
{
"epoch": 0.32,
"grad_norm": 0.19756411015987396,
"learning_rate": 0.0002,
"loss": 1.0496,
"step": 100
},
{
"epoch": 0.48,
"grad_norm": 0.16334174573421478,
"learning_rate": 0.0001943566591422122,
"loss": 1.0084,
"step": 150
},
{
"epoch": 0.64,
"grad_norm": 0.1872260421514511,
"learning_rate": 0.0001887133182844244,
"loss": 0.9866,
"step": 200
},
{
"epoch": 0.8,
"grad_norm": 0.1755620241165161,
"learning_rate": 0.0001830699774266366,
"loss": 0.9553,
"step": 250
},
{
"epoch": 0.96,
"grad_norm": 0.18523474037647247,
"learning_rate": 0.00017742663656884877,
"loss": 0.9591,
"step": 300
},
{
"epoch": 1.1184,
"grad_norm": 0.18473008275032043,
"learning_rate": 0.00017178329571106095,
"loss": 0.9414,
"step": 350
},
{
"epoch": 1.2784,
"grad_norm": 0.18801915645599365,
"learning_rate": 0.00016613995485327313,
"loss": 0.906,
"step": 400
},
{
"epoch": 1.4384000000000001,
"grad_norm": 0.18927383422851562,
"learning_rate": 0.00016049661399548536,
"loss": 0.9321,
"step": 450
},
{
"epoch": 1.5984,
"grad_norm": 0.1739863157272339,
"learning_rate": 0.00015485327313769753,
"loss": 0.9218,
"step": 500
},
{
"epoch": 1.7584,
"grad_norm": 0.16567449271678925,
"learning_rate": 0.0001492099322799097,
"loss": 0.9324,
"step": 550
},
{
"epoch": 1.9184,
"grad_norm": 0.17867760360240936,
"learning_rate": 0.0001435665914221219,
"loss": 0.9298,
"step": 600
},
{
"epoch": 2.0768,
"grad_norm": 0.17996446788311005,
"learning_rate": 0.0001379232505643341,
"loss": 0.9146,
"step": 650
},
{
"epoch": 2.2368,
"grad_norm": 0.17823895812034607,
"learning_rate": 0.0001322799097065463,
"loss": 0.8931,
"step": 700
},
{
"epoch": 2.3968,
"grad_norm": 0.17624235153198242,
"learning_rate": 0.00012663656884875847,
"loss": 0.915,
"step": 750
},
{
"epoch": 2.5568,
"grad_norm": 0.2087327539920807,
"learning_rate": 0.00012099322799097066,
"loss": 0.8858,
"step": 800
},
{
"epoch": 2.7168,
"grad_norm": 0.19803094863891602,
"learning_rate": 0.00011534988713318284,
"loss": 0.8868,
"step": 850
},
{
"epoch": 2.8768000000000002,
"grad_norm": 0.19719158113002777,
"learning_rate": 0.00010970654627539505,
"loss": 0.9044,
"step": 900
},
{
"epoch": 3.0352,
"grad_norm": 0.18085877597332,
"learning_rate": 0.00010406320541760724,
"loss": 0.8841,
"step": 950
},
{
"epoch": 3.1952,
"grad_norm": 0.1980597972869873,
"learning_rate": 9.841986455981941e-05,
"loss": 0.8758,
"step": 1000
},
{
"epoch": 3.3552,
"grad_norm": 0.21820344030857086,
"learning_rate": 9.27765237020316e-05,
"loss": 0.8696,
"step": 1050
},
{
"epoch": 3.5152,
"grad_norm": 0.2168891280889511,
"learning_rate": 8.71331828442438e-05,
"loss": 0.874,
"step": 1100
},
{
"epoch": 3.6752000000000002,
"grad_norm": 0.18441538512706757,
"learning_rate": 8.148984198645599e-05,
"loss": 0.8641,
"step": 1150
},
{
"epoch": 3.8352,
"grad_norm": 0.18984852731227875,
"learning_rate": 7.584650112866818e-05,
"loss": 0.8836,
"step": 1200
},
{
"epoch": 3.9952,
"grad_norm": 0.20449557900428772,
"learning_rate": 7.020316027088037e-05,
"loss": 0.8687,
"step": 1250
},
{
"epoch": 4.1536,
"grad_norm": 0.18471935391426086,
"learning_rate": 6.455981941309256e-05,
"loss": 0.8615,
"step": 1300
},
{
"epoch": 4.3136,
"grad_norm": 0.1952802985906601,
"learning_rate": 5.891647855530474e-05,
"loss": 0.8669,
"step": 1350
},
{
"epoch": 4.4736,
"grad_norm": 0.1887563169002533,
"learning_rate": 5.3273137697516925e-05,
"loss": 0.8499,
"step": 1400
},
{
"epoch": 4.6336,
"grad_norm": 0.19429226219654083,
"learning_rate": 4.762979683972912e-05,
"loss": 0.8415,
"step": 1450
},
{
"epoch": 4.7936,
"grad_norm": 0.18455220758914948,
"learning_rate": 4.198645598194131e-05,
"loss": 0.8678,
"step": 1500
},
{
"epoch": 4.9536,
"grad_norm": 0.20824351906776428,
"learning_rate": 3.63431151241535e-05,
"loss": 0.8519,
"step": 1550
},
{
"epoch": 5.112,
"grad_norm": 0.1955386996269226,
"learning_rate": 3.069977426636569e-05,
"loss": 0.8507,
"step": 1600
}
],
"logging_steps": 50,
"max_steps": 1872,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.981518228386611e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}