BS-riche-Qwen2.5-lora-2 / trainer_state.json
jpraysz's picture
Upload 20 files
620b87d verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9904761904761905,
"eval_steps": 500,
"global_step": 195,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0761904761904762,
"grad_norm": 0.2616604268550873,
"learning_rate": 4.9918932703355256e-05,
"loss": 0.3151,
"num_input_tokens_seen": 211280,
"step": 5
},
{
"epoch": 0.1523809523809524,
"grad_norm": 0.2265331745147705,
"learning_rate": 4.967625656594782e-05,
"loss": 0.2406,
"num_input_tokens_seen": 419136,
"step": 10
},
{
"epoch": 0.22857142857142856,
"grad_norm": 0.22684457898139954,
"learning_rate": 4.92735454356513e-05,
"loss": 0.1707,
"num_input_tokens_seen": 631632,
"step": 15
},
{
"epoch": 0.3047619047619048,
"grad_norm": 0.1457625925540924,
"learning_rate": 4.8713411048678635e-05,
"loss": 0.1223,
"num_input_tokens_seen": 836896,
"step": 20
},
{
"epoch": 0.38095238095238093,
"grad_norm": 0.12437087297439575,
"learning_rate": 4.799948609147061e-05,
"loss": 0.0945,
"num_input_tokens_seen": 1042416,
"step": 25
},
{
"epoch": 0.45714285714285713,
"grad_norm": 0.12409742921590805,
"learning_rate": 4.713640064133025e-05,
"loss": 0.0874,
"num_input_tokens_seen": 1244384,
"step": 30
},
{
"epoch": 0.5333333333333333,
"grad_norm": 0.09352266043424606,
"learning_rate": 4.6129752138594874e-05,
"loss": 0.0631,
"num_input_tokens_seen": 1445168,
"step": 35
},
{
"epoch": 0.6095238095238096,
"grad_norm": 0.11023324728012085,
"learning_rate": 4.498606908508754e-05,
"loss": 0.0786,
"num_input_tokens_seen": 1651760,
"step": 40
},
{
"epoch": 0.6857142857142857,
"grad_norm": 0.11427821964025497,
"learning_rate": 4.371276870427753e-05,
"loss": 0.0628,
"num_input_tokens_seen": 1849920,
"step": 45
},
{
"epoch": 0.7619047619047619,
"grad_norm": 0.12007594108581543,
"learning_rate": 4.231810883773999e-05,
"loss": 0.0712,
"num_input_tokens_seen": 2056592,
"step": 50
},
{
"epoch": 0.8380952380952381,
"grad_norm": 0.08302884548902512,
"learning_rate": 4.0811134389884433e-05,
"loss": 0.059,
"num_input_tokens_seen": 2265056,
"step": 55
},
{
"epoch": 0.9142857142857143,
"grad_norm": 0.10222816467285156,
"learning_rate": 3.920161866827889e-05,
"loss": 0.0635,
"num_input_tokens_seen": 2471328,
"step": 60
},
{
"epoch": 0.9904761904761905,
"grad_norm": 0.12720927596092224,
"learning_rate": 3.7500000000000003e-05,
"loss": 0.058,
"num_input_tokens_seen": 2676640,
"step": 65
},
{
"epoch": 1.0761904761904761,
"grad_norm": 0.11531339585781097,
"learning_rate": 3.5717314035076355e-05,
"loss": 0.0693,
"num_input_tokens_seen": 2897712,
"step": 70
},
{
"epoch": 1.1523809523809523,
"grad_norm": 0.09975585341453552,
"learning_rate": 3.386512217606339e-05,
"loss": 0.0537,
"num_input_tokens_seen": 3089808,
"step": 75
},
{
"epoch": 1.2285714285714286,
"grad_norm": 0.10007397085428238,
"learning_rate": 3.195543659791132e-05,
"loss": 0.0547,
"num_input_tokens_seen": 3296576,
"step": 80
},
{
"epoch": 1.3047619047619048,
"grad_norm": 0.10609045624732971,
"learning_rate": 3.0000642344401113e-05,
"loss": 0.0446,
"num_input_tokens_seen": 3508416,
"step": 85
},
{
"epoch": 1.380952380952381,
"grad_norm": 0.12080738693475723,
"learning_rate": 2.8013417006383076e-05,
"loss": 0.0535,
"num_input_tokens_seen": 3706512,
"step": 90
},
{
"epoch": 1.457142857142857,
"grad_norm": 0.09527820348739624,
"learning_rate": 2.600664850273538e-05,
"loss": 0.049,
"num_input_tokens_seen": 3913120,
"step": 95
},
{
"epoch": 1.5333333333333332,
"grad_norm": 0.11391786485910416,
"learning_rate": 2.399335149726463e-05,
"loss": 0.0438,
"num_input_tokens_seen": 4121248,
"step": 100
},
{
"epoch": 1.6095238095238096,
"grad_norm": 0.12547072768211365,
"learning_rate": 2.1986582993616926e-05,
"loss": 0.0474,
"num_input_tokens_seen": 4327824,
"step": 105
},
{
"epoch": 1.6857142857142857,
"grad_norm": 0.09639491140842438,
"learning_rate": 1.9999357655598893e-05,
"loss": 0.0443,
"num_input_tokens_seen": 4537936,
"step": 110
},
{
"epoch": 1.7619047619047619,
"grad_norm": 0.14130523800849915,
"learning_rate": 1.8044563402088684e-05,
"loss": 0.0429,
"num_input_tokens_seen": 4744432,
"step": 115
},
{
"epoch": 1.8380952380952382,
"grad_norm": 0.08459249138832092,
"learning_rate": 1.613487782393661e-05,
"loss": 0.0421,
"num_input_tokens_seen": 4955296,
"step": 120
},
{
"epoch": 1.9142857142857141,
"grad_norm": 0.12624752521514893,
"learning_rate": 1.4282685964923642e-05,
"loss": 0.0438,
"num_input_tokens_seen": 5154784,
"step": 125
},
{
"epoch": 1.9904761904761905,
"grad_norm": 0.09155242145061493,
"learning_rate": 1.2500000000000006e-05,
"loss": 0.0387,
"num_input_tokens_seen": 5358160,
"step": 130
},
{
"epoch": 2.0761904761904764,
"grad_norm": 0.08326593786478043,
"learning_rate": 1.0798381331721109e-05,
"loss": 0.0462,
"num_input_tokens_seen": 5577808,
"step": 135
},
{
"epoch": 2.1523809523809523,
"grad_norm": 0.09486888349056244,
"learning_rate": 9.18886561011557e-06,
"loss": 0.0455,
"num_input_tokens_seen": 5775552,
"step": 140
},
{
"epoch": 2.2285714285714286,
"grad_norm": 0.086710125207901,
"learning_rate": 7.681891162260015e-06,
"loss": 0.0389,
"num_input_tokens_seen": 5986112,
"step": 145
},
{
"epoch": 2.3047619047619046,
"grad_norm": 0.09076906740665436,
"learning_rate": 6.28723129572247e-06,
"loss": 0.0505,
"num_input_tokens_seen": 6185568,
"step": 150
},
{
"epoch": 2.380952380952381,
"grad_norm": 0.13775157928466797,
"learning_rate": 5.013930914912476e-06,
"loss": 0.0469,
"num_input_tokens_seen": 6389392,
"step": 155
},
{
"epoch": 2.4571428571428573,
"grad_norm": 0.0946015864610672,
"learning_rate": 3.8702478614051355e-06,
"loss": 0.0382,
"num_input_tokens_seen": 6594256,
"step": 160
},
{
"epoch": 2.533333333333333,
"grad_norm": 0.08834231644868851,
"learning_rate": 2.8635993586697553e-06,
"loss": 0.0359,
"num_input_tokens_seen": 6802240,
"step": 165
},
{
"epoch": 2.6095238095238096,
"grad_norm": 0.10152524709701538,
"learning_rate": 2.0005139085293945e-06,
"loss": 0.0379,
"num_input_tokens_seen": 7004816,
"step": 170
},
{
"epoch": 2.685714285714286,
"grad_norm": 0.10987098515033722,
"learning_rate": 1.286588951321363e-06,
"loss": 0.0416,
"num_input_tokens_seen": 7208608,
"step": 175
},
{
"epoch": 2.761904761904762,
"grad_norm": 0.09526591747999191,
"learning_rate": 7.264545643486997e-07,
"loss": 0.0394,
"num_input_tokens_seen": 7418320,
"step": 180
},
{
"epoch": 2.8380952380952382,
"grad_norm": 0.10505763441324234,
"learning_rate": 3.237434340521789e-07,
"loss": 0.0421,
"num_input_tokens_seen": 7633248,
"step": 185
},
{
"epoch": 2.914285714285714,
"grad_norm": 0.09359151870012283,
"learning_rate": 8.106729664475176e-08,
"loss": 0.0377,
"num_input_tokens_seen": 7838576,
"step": 190
},
{
"epoch": 2.9904761904761905,
"grad_norm": 0.10142253339290619,
"learning_rate": 0.0,
"loss": 0.0468,
"num_input_tokens_seen": 8039616,
"step": 195
},
{
"epoch": 2.9904761904761905,
"num_input_tokens_seen": 8039616,
"step": 195,
"total_flos": 3.746792787278561e+17,
"train_loss": 0.0682682229922368,
"train_runtime": 5945.5431,
"train_samples_per_second": 0.53,
"train_steps_per_second": 0.033
}
],
"logging_steps": 5,
"max_steps": 195,
"num_input_tokens_seen": 8039616,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.746792787278561e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}