{ "best_metric": 0.7202689051628113, "best_model_checkpoint": "miner_id_24/checkpoint-450", "epoch": 0.06827802813054759, "eval_steps": 50, "global_step": 500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.00013655605626109517, "eval_loss": 0.9949688911437988, "eval_runtime": 302.7429, "eval_samples_per_second": 10.187, "eval_steps_per_second": 2.547, "step": 1 }, { "epoch": 0.0013655605626109518, "grad_norm": 0.2990376055240631, "learning_rate": 4.22e-05, "loss": 0.8081, "step": 10 }, { "epoch": 0.0027311211252219036, "grad_norm": 0.18796958029270172, "learning_rate": 8.44e-05, "loss": 0.7504, "step": 20 }, { "epoch": 0.004096681687832855, "grad_norm": 0.2915318012237549, "learning_rate": 0.0001266, "loss": 0.7859, "step": 30 }, { "epoch": 0.005462242250443807, "grad_norm": 0.4626297354698181, "learning_rate": 0.0001688, "loss": 0.946, "step": 40 }, { "epoch": 0.006827802813054759, "grad_norm": 0.713054895401001, "learning_rate": 0.000211, "loss": 0.9734, "step": 50 }, { "epoch": 0.006827802813054759, "eval_loss": 0.8493964076042175, "eval_runtime": 302.6927, "eval_samples_per_second": 10.189, "eval_steps_per_second": 2.547, "step": 50 }, { "epoch": 0.00819336337566571, "grad_norm": 0.2116241157054901, "learning_rate": 0.00021074300730241147, "loss": 0.7354, "step": 60 }, { "epoch": 0.009558923938276663, "grad_norm": 0.18535198271274567, "learning_rate": 0.00020997328125223568, "loss": 0.7021, "step": 70 }, { "epoch": 0.010924484500887614, "grad_norm": 0.2994266748428345, "learning_rate": 0.0002086945718774165, "loss": 0.697, "step": 80 }, { "epoch": 0.012290045063498567, "grad_norm": 0.5045783519744873, "learning_rate": 0.00020691310892149265, "loss": 0.83, "step": 90 }, { "epoch": 0.013655605626109518, "grad_norm": 1.0774104595184326, "learning_rate": 0.00020463757149291335, "loss": 0.9565, "step": 100 }, { "epoch": 0.013655605626109518, "eval_loss": 0.8299235105514526, "eval_runtime": 303.4517, "eval_samples_per_second": 10.163, "eval_steps_per_second": 2.541, "step": 100 }, { "epoch": 0.01502116618872047, "grad_norm": 0.1881781816482544, "learning_rate": 0.0002018790457812944, "loss": 0.7155, "step": 110 }, { "epoch": 0.01638672675133142, "grad_norm": 0.17206980288028717, "learning_rate": 0.0001986509710466168, "loss": 0.6885, "step": 120 }, { "epoch": 0.017752287313942374, "grad_norm": 0.2561348080635071, "learning_rate": 0.00019496907414450293, "loss": 0.7351, "step": 130 }, { "epoch": 0.019117847876553327, "grad_norm": 0.5087417364120483, "learning_rate": 0.00019085129290655697, "loss": 0.751, "step": 140 }, { "epoch": 0.020483408439164276, "grad_norm": 0.7206597924232483, "learning_rate": 0.00018631768874905217, "loss": 1.0318, "step": 150 }, { "epoch": 0.020483408439164276, "eval_loss": 0.8001604676246643, "eval_runtime": 303.0845, "eval_samples_per_second": 10.175, "eval_steps_per_second": 2.544, "step": 150 }, { "epoch": 0.02184896900177523, "grad_norm": 0.23315006494522095, "learning_rate": 0.0001813903489357277, "loss": 0.6814, "step": 160 }, { "epoch": 0.02321452956438618, "grad_norm": 0.21046818792819977, "learning_rate": 0.00017609327897085954, "loss": 0.6699, "step": 170 }, { "epoch": 0.024580090126997134, "grad_norm": 0.31327730417251587, "learning_rate": 0.00017045228564685694, "loss": 0.6939, "step": 180 }, { "epoch": 0.025945650689608083, "grad_norm": 0.6563451290130615, "learning_rate": 0.0001644948513161638, "loss": 0.7772, "step": 190 }, { "epoch": 0.027311211252219036, "grad_norm": 5.301021575927734, "learning_rate": 0.00015825, "loss": 0.8194, "step": 200 }, { "epoch": 0.027311211252219036, "eval_loss": 0.7733496427536011, "eval_runtime": 302.7702, "eval_samples_per_second": 10.186, "eval_steps_per_second": 2.546, "step": 200 }, { "epoch": 0.02867677181482999, "grad_norm": 0.20063194632530212, "learning_rate": 0.00015174815598624768, "loss": 0.6995, "step": 210 }, { "epoch": 0.03004233237744094, "grad_norm": 0.16673311591148376, "learning_rate": 0.00014502099560537873, "loss": 0.6325, "step": 220 }, { "epoch": 0.03140789294005189, "grad_norm": 0.2341037541627884, "learning_rate": 0.00013810129290655696, "loss": 0.6814, "step": 230 }, { "epoch": 0.03277345350266284, "grad_norm": 0.29294663667678833, "learning_rate": 0.00013102275998576495, "loss": 0.73, "step": 240 }, { "epoch": 0.034139014065273796, "grad_norm": 0.6317477226257324, "learning_rate": 0.00012381988274386116, "loss": 1.0111, "step": 250 }, { "epoch": 0.034139014065273796, "eval_loss": 0.7544133067131042, "eval_runtime": 302.7432, "eval_samples_per_second": 10.187, "eval_steps_per_second": 2.547, "step": 250 }, { "epoch": 0.03550457462788475, "grad_norm": 0.20855742692947388, "learning_rate": 0.00011652775287473745, "loss": 0.6858, "step": 260 }, { "epoch": 0.0368701351904957, "grad_norm": 0.2107452005147934, "learning_rate": 0.00010918189690211387, "loss": 0.7005, "step": 270 }, { "epoch": 0.038235695753106654, "grad_norm": 0.26034247875213623, "learning_rate": 0.00010181810309788618, "loss": 0.6453, "step": 280 }, { "epoch": 0.0396012563157176, "grad_norm": 0.3944404125213623, "learning_rate": 9.447224712526258e-05, "loss": 0.7761, "step": 290 }, { "epoch": 0.04096681687832855, "grad_norm": 0.8833209276199341, "learning_rate": 8.718011725613886e-05, "loss": 0.8044, "step": 300 }, { "epoch": 0.04096681687832855, "eval_loss": 0.740537703037262, "eval_runtime": 302.8602, "eval_samples_per_second": 10.183, "eval_steps_per_second": 2.546, "step": 300 }, { "epoch": 0.042332377440939505, "grad_norm": 0.17050737142562866, "learning_rate": 7.997724001423507e-05, "loss": 0.6244, "step": 310 }, { "epoch": 0.04369793800355046, "grad_norm": 0.21495510637760162, "learning_rate": 7.289870709344306e-05, "loss": 0.6406, "step": 320 }, { "epoch": 0.04506349856616141, "grad_norm": 0.23069900274276733, "learning_rate": 6.597900439462128e-05, "loss": 0.7098, "step": 330 }, { "epoch": 0.04642905912877236, "grad_norm": 0.5239390134811401, "learning_rate": 5.9251844013752326e-05, "loss": 0.7881, "step": 340 }, { "epoch": 0.047794619691383315, "grad_norm": 0.8861454725265503, "learning_rate": 5.275000000000002e-05, "loss": 0.9388, "step": 350 }, { "epoch": 0.047794619691383315, "eval_loss": 0.7316343784332275, "eval_runtime": 303.1651, "eval_samples_per_second": 10.173, "eval_steps_per_second": 2.543, "step": 350 }, { "epoch": 0.04916018025399427, "grad_norm": 0.18755333125591278, "learning_rate": 4.650514868383623e-05, "loss": 0.6713, "step": 360 }, { "epoch": 0.050525740816605214, "grad_norm": 0.20082293450832367, "learning_rate": 4.054771435314305e-05, "loss": 0.618, "step": 370 }, { "epoch": 0.051891301379216166, "grad_norm": 0.2751503586769104, "learning_rate": 3.4906721029140495e-05, "loss": 0.6994, "step": 380 }, { "epoch": 0.05325686194182712, "grad_norm": 0.8389333486557007, "learning_rate": 2.9609651064272323e-05, "loss": 0.819, "step": 390 }, { "epoch": 0.05462242250443807, "grad_norm": 0.7859941720962524, "learning_rate": 2.468231125094783e-05, "loss": 0.8555, "step": 400 }, { "epoch": 0.05462242250443807, "eval_loss": 0.7245707511901855, "eval_runtime": 303.0447, "eval_samples_per_second": 10.177, "eval_steps_per_second": 2.544, "step": 400 }, { "epoch": 0.055987983067049024, "grad_norm": 0.19546158611774445, "learning_rate": 2.0148707093443057e-05, "loss": 0.626, "step": 410 }, { "epoch": 0.05735354362965998, "grad_norm": 0.21460984647274017, "learning_rate": 1.603092585549706e-05, "loss": 0.6617, "step": 420 }, { "epoch": 0.05871910419227093, "grad_norm": 0.23666520416736603, "learning_rate": 1.2349028953383204e-05, "loss": 0.646, "step": 430 }, { "epoch": 0.06008466475488188, "grad_norm": 0.4160224497318268, "learning_rate": 9.120954218705596e-06, "loss": 0.7236, "step": 440 }, { "epoch": 0.06145022531749283, "grad_norm": 0.5936741232872009, "learning_rate": 6.362428507086673e-06, "loss": 0.8672, "step": 450 }, { "epoch": 0.06145022531749283, "eval_loss": 0.7202689051628113, "eval_runtime": 303.8246, "eval_samples_per_second": 10.151, "eval_steps_per_second": 2.538, "step": 450 }, { "epoch": 0.06281578588010378, "grad_norm": 0.1634497493505478, "learning_rate": 4.0868910785073565e-06, "loss": 0.6793, "step": 460 }, { "epoch": 0.06418134644271474, "grad_norm": 0.20016777515411377, "learning_rate": 2.3054281225835e-06, "loss": 0.6925, "step": 470 }, { "epoch": 0.06554690700532569, "grad_norm": 0.2985592782497406, "learning_rate": 1.026718747764327e-06, "loss": 0.6575, "step": 480 }, { "epoch": 0.06691246756793663, "grad_norm": 0.4534226059913635, "learning_rate": 2.5699269758854715e-07, "loss": 0.7295, "step": 490 }, { "epoch": 0.06827802813054759, "grad_norm": 0.5993149876594543, "learning_rate": 0.0, "loss": 0.8247, "step": 500 }, { "epoch": 0.06827802813054759, "eval_loss": 0.7215211987495422, "eval_runtime": 304.1302, "eval_samples_per_second": 10.14, "eval_steps_per_second": 2.535, "step": 500 } ], "logging_steps": 10, "max_steps": 500, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 50, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 3, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 1 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.5642309924225024e+17, "train_batch_size": 4, "trial_name": null, "trial_params": null }