Qwen2.5-7B-1m-Open-R1-Distill / trainer_state.json
skzxjus's picture
Model save
81595dc verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9955555555555555,
"eval_steps": 100,
"global_step": 168,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02962962962962963,
"grad_norm": 3.2346072118607885,
"learning_rate": 5.882352941176471e-06,
"loss": 1.0169,
"step": 5
},
{
"epoch": 0.05925925925925926,
"grad_norm": 2.1264769797556,
"learning_rate": 1.1764705882352942e-05,
"loss": 0.8756,
"step": 10
},
{
"epoch": 0.08888888888888889,
"grad_norm": 1.4231223829611963,
"learning_rate": 1.7647058823529414e-05,
"loss": 0.7716,
"step": 15
},
{
"epoch": 0.11851851851851852,
"grad_norm": 1.0239519248217772,
"learning_rate": 1.9980527694749952e-05,
"loss": 0.733,
"step": 20
},
{
"epoch": 0.14814814814814814,
"grad_norm": 0.7202299506697141,
"learning_rate": 1.986180478852149e-05,
"loss": 0.7059,
"step": 25
},
{
"epoch": 0.17777777777777778,
"grad_norm": 0.5695527206070538,
"learning_rate": 1.963645895935632e-05,
"loss": 0.6695,
"step": 30
},
{
"epoch": 0.2074074074074074,
"grad_norm": 0.6039878702944135,
"learning_rate": 1.930692657985482e-05,
"loss": 0.6779,
"step": 35
},
{
"epoch": 0.23703703703703705,
"grad_norm": 0.5093109436831765,
"learning_rate": 1.887677045685188e-05,
"loss": 0.6563,
"step": 40
},
{
"epoch": 0.26666666666666666,
"grad_norm": 0.6018876927034159,
"learning_rate": 1.8350641311400813e-05,
"loss": 0.6597,
"step": 45
},
{
"epoch": 0.2962962962962963,
"grad_norm": 0.5268268034517721,
"learning_rate": 1.773422749654988e-05,
"loss": 0.6483,
"step": 50
},
{
"epoch": 0.32592592592592595,
"grad_norm": 0.5861151838314477,
"learning_rate": 1.7034193496547903e-05,
"loss": 0.649,
"step": 55
},
{
"epoch": 0.35555555555555557,
"grad_norm": 0.4503680956414544,
"learning_rate": 1.6258107872407376e-05,
"loss": 0.6319,
"step": 60
},
{
"epoch": 0.3851851851851852,
"grad_norm": 0.5307966629696642,
"learning_rate": 1.5414361432856475e-05,
"loss": 0.631,
"step": 65
},
{
"epoch": 0.4148148148148148,
"grad_norm": 0.5782202441621853,
"learning_rate": 1.4512076515391375e-05,
"loss": 0.6328,
"step": 70
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.48265196188960013,
"learning_rate": 1.356100835825547e-05,
"loss": 0.6254,
"step": 75
},
{
"epoch": 0.4740740740740741,
"grad_norm": 0.4813265336090251,
"learning_rate": 1.257143962968246e-05,
"loss": 0.6175,
"step": 80
},
{
"epoch": 0.5037037037037037,
"grad_norm": 0.4515213148563428,
"learning_rate": 1.155406925472205e-05,
"loss": 0.6298,
"step": 85
},
{
"epoch": 0.5333333333333333,
"grad_norm": 0.44644143858157415,
"learning_rate": 1.0519896741619803e-05,
"loss": 0.6215,
"step": 90
},
{
"epoch": 0.562962962962963,
"grad_norm": 0.4691339692298715,
"learning_rate": 9.480103258380198e-06,
"loss": 0.6245,
"step": 95
},
{
"epoch": 0.5925925925925926,
"grad_norm": 0.4311309942505263,
"learning_rate": 8.445930745277953e-06,
"loss": 0.6157,
"step": 100
},
{
"epoch": 0.5925925925925926,
"eval_loss": 0.6399185061454773,
"eval_runtime": 2.407,
"eval_samples_per_second": 52.763,
"eval_steps_per_second": 1.662,
"step": 100
},
{
"epoch": 0.6222222222222222,
"grad_norm": 0.42038278194306516,
"learning_rate": 7.428560370317542e-06,
"loss": 0.6167,
"step": 105
},
{
"epoch": 0.6518518518518519,
"grad_norm": 0.478843319690418,
"learning_rate": 6.438991641744531e-06,
"loss": 0.6219,
"step": 110
},
{
"epoch": 0.6814814814814815,
"grad_norm": 0.4347536528553071,
"learning_rate": 5.487923484608629e-06,
"loss": 0.6041,
"step": 115
},
{
"epoch": 0.7111111111111111,
"grad_norm": 0.3769111515881631,
"learning_rate": 4.5856385671435285e-06,
"loss": 0.5991,
"step": 120
},
{
"epoch": 0.7407407407407407,
"grad_norm": 0.4099151217963583,
"learning_rate": 3.7418921275926245e-06,
"loss": 0.5976,
"step": 125
},
{
"epoch": 0.7703703703703704,
"grad_norm": 0.36781593886520214,
"learning_rate": 2.965806503452098e-06,
"loss": 0.6132,
"step": 130
},
{
"epoch": 0.8,
"grad_norm": 0.3627464720674499,
"learning_rate": 2.265772503450122e-06,
"loss": 0.6104,
"step": 135
},
{
"epoch": 0.8296296296296296,
"grad_norm": 0.3608772496918306,
"learning_rate": 1.6493586885991908e-06,
"loss": 0.606,
"step": 140
},
{
"epoch": 0.8592592592592593,
"grad_norm": 0.34531094549512426,
"learning_rate": 1.1232295431481222e-06,
"loss": 0.6084,
"step": 145
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.3242007784943698,
"learning_rate": 6.930734201451817e-07,
"loss": 0.6146,
"step": 150
},
{
"epoch": 0.9185185185185185,
"grad_norm": 0.3386165753528116,
"learning_rate": 3.635410406436857e-07,
"loss": 0.6066,
"step": 155
},
{
"epoch": 0.9481481481481482,
"grad_norm": 0.32706837162712976,
"learning_rate": 1.3819521147851122e-07,
"loss": 0.6098,
"step": 160
},
{
"epoch": 0.9777777777777777,
"grad_norm": 0.3256703232456208,
"learning_rate": 1.947230525005006e-08,
"loss": 0.6117,
"step": 165
},
{
"epoch": 0.9955555555555555,
"step": 168,
"total_flos": 175887366488064.0,
"train_loss": 0.6538166212184089,
"train_runtime": 1597.3219,
"train_samples_per_second": 13.504,
"train_steps_per_second": 0.105
}
],
"logging_steps": 5,
"max_steps": 168,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 175887366488064.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}