|
{ |
|
"best_metric": 1.0443669557571411, |
|
"best_model_checkpoint": "./output/checkpoints/2024-06-10_23-12-21/checkpoint-40", |
|
"epoch": 1.0, |
|
"eval_steps": 1, |
|
"global_step": 43, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.023255813953488372, |
|
"grad_norm": 3.1748037338256836, |
|
"learning_rate": 8e-05, |
|
"loss": 5.1513, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.023255813953488372, |
|
"eval_loss": 5.098567485809326, |
|
"eval_runtime": 10.4736, |
|
"eval_samples_per_second": 11.171, |
|
"eval_steps_per_second": 0.764, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.046511627906976744, |
|
"grad_norm": 3.149711847305298, |
|
"learning_rate": 0.00016, |
|
"loss": 5.1556, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.046511627906976744, |
|
"eval_loss": 4.689513683319092, |
|
"eval_runtime": 10.5409, |
|
"eval_samples_per_second": 11.1, |
|
"eval_steps_per_second": 0.759, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.06976744186046512, |
|
"grad_norm": 3.1611013412475586, |
|
"learning_rate": 0.00024, |
|
"loss": 4.6255, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.06976744186046512, |
|
"eval_loss": 3.2834692001342773, |
|
"eval_runtime": 10.5481, |
|
"eval_samples_per_second": 11.092, |
|
"eval_steps_per_second": 0.758, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.09302325581395349, |
|
"grad_norm": 3.4506750106811523, |
|
"learning_rate": 0.00032, |
|
"loss": 3.268, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.09302325581395349, |
|
"eval_loss": 2.105466604232788, |
|
"eval_runtime": 10.6124, |
|
"eval_samples_per_second": 11.025, |
|
"eval_steps_per_second": 0.754, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.11627906976744186, |
|
"grad_norm": 2.139575719833374, |
|
"learning_rate": 0.0004, |
|
"loss": 2.0627, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.11627906976744186, |
|
"eval_loss": 1.586738109588623, |
|
"eval_runtime": 10.5613, |
|
"eval_samples_per_second": 11.078, |
|
"eval_steps_per_second": 0.757, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.13953488372093023, |
|
"grad_norm": 1.1181566715240479, |
|
"learning_rate": 0.00038947368421052633, |
|
"loss": 1.5525, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.13953488372093023, |
|
"eval_loss": 1.442232608795166, |
|
"eval_runtime": 10.458, |
|
"eval_samples_per_second": 11.188, |
|
"eval_steps_per_second": 0.765, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.16279069767441862, |
|
"grad_norm": 1.6108548641204834, |
|
"learning_rate": 0.00037894736842105265, |
|
"loss": 1.4249, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.16279069767441862, |
|
"eval_loss": 1.2906938791275024, |
|
"eval_runtime": 10.5694, |
|
"eval_samples_per_second": 11.07, |
|
"eval_steps_per_second": 0.757, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.18604651162790697, |
|
"grad_norm": 0.19699308276176453, |
|
"learning_rate": 0.00036842105263157896, |
|
"loss": 1.2713, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.18604651162790697, |
|
"eval_loss": 1.2516891956329346, |
|
"eval_runtime": 10.4427, |
|
"eval_samples_per_second": 11.204, |
|
"eval_steps_per_second": 0.766, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.20930232558139536, |
|
"grad_norm": 0.12418746948242188, |
|
"learning_rate": 0.0003578947368421053, |
|
"loss": 1.2447, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.20930232558139536, |
|
"eval_loss": 1.2212988138198853, |
|
"eval_runtime": 10.4966, |
|
"eval_samples_per_second": 11.146, |
|
"eval_steps_per_second": 0.762, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.23255813953488372, |
|
"grad_norm": 0.10525760799646378, |
|
"learning_rate": 0.0003473684210526316, |
|
"loss": 1.2071, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.23255813953488372, |
|
"eval_loss": 1.1963647603988647, |
|
"eval_runtime": 10.5914, |
|
"eval_samples_per_second": 11.047, |
|
"eval_steps_per_second": 0.755, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2558139534883721, |
|
"grad_norm": 0.1781289279460907, |
|
"learning_rate": 0.0003368421052631579, |
|
"loss": 1.1832, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.2558139534883721, |
|
"eval_loss": 1.1730906963348389, |
|
"eval_runtime": 10.565, |
|
"eval_samples_per_second": 11.074, |
|
"eval_steps_per_second": 0.757, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.27906976744186046, |
|
"grad_norm": 0.16088849306106567, |
|
"learning_rate": 0.0003263157894736842, |
|
"loss": 1.1447, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.27906976744186046, |
|
"eval_loss": 1.1521227359771729, |
|
"eval_runtime": 10.5741, |
|
"eval_samples_per_second": 11.065, |
|
"eval_steps_per_second": 0.757, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.3023255813953488, |
|
"grad_norm": 0.11435263603925705, |
|
"learning_rate": 0.00031578947368421053, |
|
"loss": 1.1098, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.3023255813953488, |
|
"eval_loss": 1.131661295890808, |
|
"eval_runtime": 10.5531, |
|
"eval_samples_per_second": 11.087, |
|
"eval_steps_per_second": 0.758, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.32558139534883723, |
|
"grad_norm": 0.0963783711194992, |
|
"learning_rate": 0.0003052631578947369, |
|
"loss": 1.1298, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.32558139534883723, |
|
"eval_loss": 1.115828275680542, |
|
"eval_runtime": 10.5575, |
|
"eval_samples_per_second": 11.082, |
|
"eval_steps_per_second": 0.758, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.3488372093023256, |
|
"grad_norm": 0.0737914890050888, |
|
"learning_rate": 0.00029473684210526316, |
|
"loss": 1.0838, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.3488372093023256, |
|
"eval_loss": 1.1041659116744995, |
|
"eval_runtime": 10.4999, |
|
"eval_samples_per_second": 11.143, |
|
"eval_steps_per_second": 0.762, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.37209302325581395, |
|
"grad_norm": 0.06079930067062378, |
|
"learning_rate": 0.00028421052631578947, |
|
"loss": 1.0878, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.37209302325581395, |
|
"eval_loss": 1.0959372520446777, |
|
"eval_runtime": 10.5731, |
|
"eval_samples_per_second": 11.066, |
|
"eval_steps_per_second": 0.757, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.3953488372093023, |
|
"grad_norm": 0.05798230320215225, |
|
"learning_rate": 0.0002736842105263158, |
|
"loss": 1.0726, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.3953488372093023, |
|
"eval_loss": 1.0892282724380493, |
|
"eval_runtime": 10.5989, |
|
"eval_samples_per_second": 11.039, |
|
"eval_steps_per_second": 0.755, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.4186046511627907, |
|
"grad_norm": 0.05506344139575958, |
|
"learning_rate": 0.00026315789473684215, |
|
"loss": 1.0643, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.4186046511627907, |
|
"eval_loss": 1.0838031768798828, |
|
"eval_runtime": 10.583, |
|
"eval_samples_per_second": 11.055, |
|
"eval_steps_per_second": 0.756, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.4418604651162791, |
|
"grad_norm": 0.05198650062084198, |
|
"learning_rate": 0.0002526315789473684, |
|
"loss": 1.0661, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.4418604651162791, |
|
"eval_loss": 1.0791563987731934, |
|
"eval_runtime": 10.5263, |
|
"eval_samples_per_second": 11.115, |
|
"eval_steps_per_second": 0.76, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.46511627906976744, |
|
"grad_norm": 0.051451779901981354, |
|
"learning_rate": 0.00024210526315789475, |
|
"loss": 1.062, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.46511627906976744, |
|
"eval_loss": 1.0747723579406738, |
|
"eval_runtime": 10.5633, |
|
"eval_samples_per_second": 11.076, |
|
"eval_steps_per_second": 0.757, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.4883720930232558, |
|
"grad_norm": 0.04886360839009285, |
|
"learning_rate": 0.00023157894736842107, |
|
"loss": 1.0482, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.4883720930232558, |
|
"eval_loss": 1.0707547664642334, |
|
"eval_runtime": 10.5723, |
|
"eval_samples_per_second": 11.067, |
|
"eval_steps_per_second": 0.757, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.5116279069767442, |
|
"grad_norm": 0.04511698707938194, |
|
"learning_rate": 0.0002210526315789474, |
|
"loss": 1.0526, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.5116279069767442, |
|
"eval_loss": 1.0674015283584595, |
|
"eval_runtime": 10.5187, |
|
"eval_samples_per_second": 11.123, |
|
"eval_steps_per_second": 0.761, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.5348837209302325, |
|
"grad_norm": 0.049303531646728516, |
|
"learning_rate": 0.0002105263157894737, |
|
"loss": 1.0154, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.5348837209302325, |
|
"eval_loss": 1.0645886659622192, |
|
"eval_runtime": 10.5824, |
|
"eval_samples_per_second": 11.056, |
|
"eval_steps_per_second": 0.756, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.5581395348837209, |
|
"grad_norm": 0.13970467448234558, |
|
"learning_rate": 0.0002, |
|
"loss": 1.0336, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.5581395348837209, |
|
"eval_loss": 1.0622761249542236, |
|
"eval_runtime": 10.5378, |
|
"eval_samples_per_second": 11.103, |
|
"eval_steps_per_second": 0.759, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.5813953488372093, |
|
"grad_norm": 0.04498777911067009, |
|
"learning_rate": 0.00018947368421052632, |
|
"loss": 1.0727, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.5813953488372093, |
|
"eval_loss": 1.0603291988372803, |
|
"eval_runtime": 10.5222, |
|
"eval_samples_per_second": 11.119, |
|
"eval_steps_per_second": 0.76, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.6046511627906976, |
|
"grad_norm": 0.04431827366352081, |
|
"learning_rate": 0.00017894736842105264, |
|
"loss": 1.035, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.6046511627906976, |
|
"eval_loss": 1.0584354400634766, |
|
"eval_runtime": 10.5855, |
|
"eval_samples_per_second": 11.053, |
|
"eval_steps_per_second": 0.756, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.627906976744186, |
|
"grad_norm": 0.0488644503057003, |
|
"learning_rate": 0.00016842105263157895, |
|
"loss": 1.057, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.627906976744186, |
|
"eval_loss": 1.0564749240875244, |
|
"eval_runtime": 10.5114, |
|
"eval_samples_per_second": 11.131, |
|
"eval_steps_per_second": 0.761, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.6511627906976745, |
|
"grad_norm": 0.04599970951676369, |
|
"learning_rate": 0.00015789473684210527, |
|
"loss": 1.0386, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.6511627906976745, |
|
"eval_loss": 1.0546391010284424, |
|
"eval_runtime": 10.536, |
|
"eval_samples_per_second": 11.105, |
|
"eval_steps_per_second": 0.759, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.6744186046511628, |
|
"grad_norm": 0.04661059379577637, |
|
"learning_rate": 0.00014736842105263158, |
|
"loss": 1.0552, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.6744186046511628, |
|
"eval_loss": 1.0529735088348389, |
|
"eval_runtime": 10.5645, |
|
"eval_samples_per_second": 11.075, |
|
"eval_steps_per_second": 0.757, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.6976744186046512, |
|
"grad_norm": 0.04191095009446144, |
|
"learning_rate": 0.0001368421052631579, |
|
"loss": 1.0261, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.6976744186046512, |
|
"eval_loss": 1.0515702962875366, |
|
"eval_runtime": 10.5459, |
|
"eval_samples_per_second": 11.094, |
|
"eval_steps_per_second": 0.759, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.7209302325581395, |
|
"grad_norm": 0.04263598471879959, |
|
"learning_rate": 0.0001263157894736842, |
|
"loss": 1.0265, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.7209302325581395, |
|
"eval_loss": 1.0504158735275269, |
|
"eval_runtime": 10.5618, |
|
"eval_samples_per_second": 11.078, |
|
"eval_steps_per_second": 0.757, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.7441860465116279, |
|
"grad_norm": 0.043207377195358276, |
|
"learning_rate": 0.00011578947368421053, |
|
"loss": 1.038, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.7441860465116279, |
|
"eval_loss": 1.049385905265808, |
|
"eval_runtime": 10.5765, |
|
"eval_samples_per_second": 11.062, |
|
"eval_steps_per_second": 0.756, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.7674418604651163, |
|
"grad_norm": 0.0437220074236393, |
|
"learning_rate": 0.00010526315789473685, |
|
"loss": 1.0456, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.7674418604651163, |
|
"eval_loss": 1.0484665632247925, |
|
"eval_runtime": 10.6182, |
|
"eval_samples_per_second": 11.019, |
|
"eval_steps_per_second": 0.753, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.7906976744186046, |
|
"grad_norm": 0.04280296340584755, |
|
"learning_rate": 9.473684210526316e-05, |
|
"loss": 1.0339, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.7906976744186046, |
|
"eval_loss": 1.047668218612671, |
|
"eval_runtime": 10.5375, |
|
"eval_samples_per_second": 11.103, |
|
"eval_steps_per_second": 0.759, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.813953488372093, |
|
"grad_norm": 0.048476554453372955, |
|
"learning_rate": 8.421052631578948e-05, |
|
"loss": 1.0452, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.813953488372093, |
|
"eval_loss": 1.0469459295272827, |
|
"eval_runtime": 10.5569, |
|
"eval_samples_per_second": 11.083, |
|
"eval_steps_per_second": 0.758, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.8372093023255814, |
|
"grad_norm": 0.03983575850725174, |
|
"learning_rate": 7.368421052631579e-05, |
|
"loss": 1.0112, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.8372093023255814, |
|
"eval_loss": 1.0462400913238525, |
|
"eval_runtime": 10.5644, |
|
"eval_samples_per_second": 11.075, |
|
"eval_steps_per_second": 0.757, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.8604651162790697, |
|
"grad_norm": 0.04240667447447777, |
|
"learning_rate": 6.31578947368421e-05, |
|
"loss": 1.032, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.8604651162790697, |
|
"eval_loss": 1.0456737279891968, |
|
"eval_runtime": 10.5478, |
|
"eval_samples_per_second": 11.092, |
|
"eval_steps_per_second": 0.758, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.8837209302325582, |
|
"grad_norm": 0.04697669297456741, |
|
"learning_rate": 5.2631578947368424e-05, |
|
"loss": 1.0216, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.8837209302325582, |
|
"eval_loss": 1.0451328754425049, |
|
"eval_runtime": 10.5909, |
|
"eval_samples_per_second": 11.047, |
|
"eval_steps_per_second": 0.755, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.9069767441860465, |
|
"grad_norm": 0.04411022737622261, |
|
"learning_rate": 4.210526315789474e-05, |
|
"loss": 1.0367, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.9069767441860465, |
|
"eval_loss": 1.0447440147399902, |
|
"eval_runtime": 10.5309, |
|
"eval_samples_per_second": 11.11, |
|
"eval_steps_per_second": 0.76, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.9302325581395349, |
|
"grad_norm": 0.051756396889686584, |
|
"learning_rate": 3.157894736842105e-05, |
|
"loss": 1.0144, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9302325581395349, |
|
"eval_loss": 1.0443669557571411, |
|
"eval_runtime": 10.545, |
|
"eval_samples_per_second": 11.095, |
|
"eval_steps_per_second": 0.759, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9534883720930233, |
|
"grad_norm": 0.05069413781166077, |
|
"learning_rate": 2.105263157894737e-05, |
|
"loss": 1.0339, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.9534883720930233, |
|
"eval_loss": 1.0441150665283203, |
|
"eval_runtime": 10.5475, |
|
"eval_samples_per_second": 11.093, |
|
"eval_steps_per_second": 0.758, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.9767441860465116, |
|
"grad_norm": 0.03964732587337494, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 1.0046, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.9767441860465116, |
|
"eval_loss": 1.043948769569397, |
|
"eval_runtime": 10.5196, |
|
"eval_samples_per_second": 11.122, |
|
"eval_steps_per_second": 0.76, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.06997141987085342, |
|
"learning_rate": 0.0, |
|
"loss": 1.0082, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.0438673496246338, |
|
"eval_runtime": 10.5692, |
|
"eval_samples_per_second": 11.07, |
|
"eval_steps_per_second": 0.757, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 43, |
|
"total_flos": 1.5716557095174144e+16, |
|
"train_loss": 1.4361294785211252, |
|
"train_runtime": 852.0177, |
|
"train_samples_per_second": 1.592, |
|
"train_steps_per_second": 0.05 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 43, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.5716557095174144e+16, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|