| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.99972616910579, | |
| "eval_steps": 500, | |
| "global_step": 24648, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 17.19582176208496, | |
| "learning_rate": 1.0101419878296147e-05, | |
| "loss": 3.88, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 22.53339385986328, | |
| "learning_rate": 2.0223123732251522e-05, | |
| "loss": 1.9753, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 20.645118713378906, | |
| "learning_rate": 3.0344827586206897e-05, | |
| "loss": 1.4051, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 11.275988578796387, | |
| "learning_rate": 4.0486815415821506e-05, | |
| "loss": 1.2663, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 9.870928764343262, | |
| "learning_rate": 4.993012667357887e-05, | |
| "loss": 1.2106, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 13.5437650680542, | |
| "learning_rate": 4.880313753775414e-05, | |
| "loss": 1.179, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "grad_norm": 14.121590614318848, | |
| "learning_rate": 4.767614840192941e-05, | |
| "loss": 1.1044, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "grad_norm": 11.260817527770996, | |
| "learning_rate": 4.6549159266104676e-05, | |
| "loss": 1.043, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 15.443611145019531, | |
| "learning_rate": 4.542217013027995e-05, | |
| "loss": 1.0212, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "grad_norm": 14.637772560119629, | |
| "learning_rate": 4.429518099445522e-05, | |
| "loss": 0.9764, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "grad_norm": 18.14031410217285, | |
| "learning_rate": 4.316819185863049e-05, | |
| "loss": 0.9848, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "grad_norm": 10.293859481811523, | |
| "learning_rate": 4.204120272280576e-05, | |
| "loss": 0.9378, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "grad_norm": 10.198644638061523, | |
| "learning_rate": 4.0916467565252675e-05, | |
| "loss": 0.9363, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 9.705594062805176, | |
| "learning_rate": 3.978947842942794e-05, | |
| "loss": 0.9482, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "grad_norm": 9.531152725219727, | |
| "learning_rate": 3.866248929360321e-05, | |
| "loss": 0.9033, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "grad_norm": 11.644086837768555, | |
| "learning_rate": 3.753550015777848e-05, | |
| "loss": 0.9042, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "grad_norm": 9.761592864990234, | |
| "learning_rate": 3.640851102195375e-05, | |
| "loss": 0.7736, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 8.461353302001953, | |
| "learning_rate": 3.528152188612902e-05, | |
| "loss": 0.6728, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 7.326571941375732, | |
| "learning_rate": 3.415453275030429e-05, | |
| "loss": 0.6811, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "grad_norm": 7.815227031707764, | |
| "learning_rate": 3.3027543614479556e-05, | |
| "loss": 0.6769, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "grad_norm": 11.3690824508667, | |
| "learning_rate": 3.190055447865483e-05, | |
| "loss": 0.6725, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "grad_norm": 16.308076858520508, | |
| "learning_rate": 3.077581932110174e-05, | |
| "loss": 0.6596, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 8.466190338134766, | |
| "learning_rate": 2.9651084163548663e-05, | |
| "loss": 0.6683, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "grad_norm": 15.437102317810059, | |
| "learning_rate": 2.8524095027723934e-05, | |
| "loss": 0.6908, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "grad_norm": 7.6999406814575195, | |
| "learning_rate": 2.7397105891899204e-05, | |
| "loss": 0.6875, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "grad_norm": 9.09485912322998, | |
| "learning_rate": 2.6270116756074475e-05, | |
| "loss": 0.6514, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "grad_norm": 9.5618314743042, | |
| "learning_rate": 2.514312762024974e-05, | |
| "loss": 0.6452, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "grad_norm": 11.317076683044434, | |
| "learning_rate": 2.401613848442501e-05, | |
| "loss": 0.6474, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "grad_norm": 9.310576438903809, | |
| "learning_rate": 2.288914934860028e-05, | |
| "loss": 0.645, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "grad_norm": 10.67317008972168, | |
| "learning_rate": 2.176216021277555e-05, | |
| "loss": 0.6259, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "grad_norm": 9.449134826660156, | |
| "learning_rate": 2.0635171076950818e-05, | |
| "loss": 0.6342, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "grad_norm": 5.870083808898926, | |
| "learning_rate": 1.9510435919397738e-05, | |
| "loss": 0.6219, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.01, | |
| "grad_norm": 12.512371063232422, | |
| "learning_rate": 1.838344678357301e-05, | |
| "loss": 0.6211, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "grad_norm": 7.509356498718262, | |
| "learning_rate": 1.7256457647748276e-05, | |
| "loss": 0.416, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 2.13, | |
| "grad_norm": 28.715145111083984, | |
| "learning_rate": 1.6129468511923546e-05, | |
| "loss": 0.4208, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "grad_norm": 5.760591983795166, | |
| "learning_rate": 1.5002479376098815e-05, | |
| "loss": 0.4239, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "grad_norm": 7.888522624969482, | |
| "learning_rate": 1.3877744218545735e-05, | |
| "loss": 0.4135, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "grad_norm": 16.289827346801758, | |
| "learning_rate": 1.2750755082721002e-05, | |
| "loss": 0.4387, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "grad_norm": 12.066337585449219, | |
| "learning_rate": 1.1623765946896273e-05, | |
| "loss": 0.4299, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 2.43, | |
| "grad_norm": 12.49575424194336, | |
| "learning_rate": 1.0496776811071542e-05, | |
| "loss": 0.4255, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "grad_norm": 9.139410972595215, | |
| "learning_rate": 9.369787675246811e-06, | |
| "loss": 0.4237, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "grad_norm": 18.495920181274414, | |
| "learning_rate": 8.24505251769373e-06, | |
| "loss": 0.4293, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "grad_norm": 22.488752365112305, | |
| "learning_rate": 7.118063381868999e-06, | |
| "loss": 0.4201, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "grad_norm": 3.9392380714416504, | |
| "learning_rate": 5.9910742460442686e-06, | |
| "loss": 0.3984, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 2.74, | |
| "grad_norm": 5.216261386871338, | |
| "learning_rate": 4.8640851102195375e-06, | |
| "loss": 0.4006, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 8.506802558898926, | |
| "learning_rate": 3.7393499526664564e-06, | |
| "loss": 0.4085, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "grad_norm": 15.280343055725098, | |
| "learning_rate": 2.6123608168417258e-06, | |
| "loss": 0.4029, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "grad_norm": 22.39061164855957, | |
| "learning_rate": 1.485371681016995e-06, | |
| "loss": 0.4135, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "grad_norm": 4.789835453033447, | |
| "learning_rate": 3.583825451922644e-07, | |
| "loss": 0.4149, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "step": 24648, | |
| "total_flos": 3.86452500250624e+16, | |
| "train_loss": 0.7860646006427233, | |
| "train_runtime": 13844.0651, | |
| "train_samples_per_second": 28.489, | |
| "train_steps_per_second": 1.78 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 24648, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 5000, | |
| "total_flos": 3.86452500250624e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |