zephyr-7b-sft-qlora / trainer_state.json
Samtari's picture
Model save
a39ad07 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 17,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.058823529411764705,
"grad_norm": 0.8823225498199463,
"learning_rate": 0.0001,
"loss": 1.1403,
"step": 1
},
{
"epoch": 0.29411764705882354,
"grad_norm": 0.6753513813018799,
"learning_rate": 0.00018090169943749476,
"loss": 1.0209,
"step": 5
},
{
"epoch": 0.5882352941176471,
"grad_norm": 0.499729186296463,
"learning_rate": 8.954715367323468e-05,
"loss": 1.0693,
"step": 10
},
{
"epoch": 0.8823529411764706,
"grad_norm": 0.993525505065918,
"learning_rate": 8.645454235739903e-06,
"loss": 1.0604,
"step": 15
},
{
"epoch": 1.0,
"eval_loss": 1.0393339395523071,
"eval_runtime": 9748.8038,
"eval_samples_per_second": 1.575,
"eval_steps_per_second": 0.197,
"step": 17
},
{
"epoch": 1.0,
"step": 17,
"total_flos": 1.1953201746018304e+16,
"train_loss": 1.045757819624508,
"train_runtime": 10046.7574,
"train_samples_per_second": 0.014,
"train_steps_per_second": 0.002
}
],
"logging_steps": 5,
"max_steps": 17,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1953201746018304e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}