|
{ |
|
"best_metric": 11.073678970336914, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.0505836575875485, |
|
"eval_steps": 25, |
|
"global_step": 49, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0622568093385214, |
|
"grad_norm": 5.861987590789795, |
|
"learning_rate": 5e-05, |
|
"loss": 354.9364, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0622568093385214, |
|
"eval_loss": 11.090806007385254, |
|
"eval_runtime": 0.1221, |
|
"eval_samples_per_second": 409.632, |
|
"eval_steps_per_second": 106.504, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1245136186770428, |
|
"grad_norm": 6.5997748374938965, |
|
"learning_rate": 0.0001, |
|
"loss": 354.8755, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.1867704280155642, |
|
"grad_norm": 7.065485000610352, |
|
"learning_rate": 9.98995095380419e-05, |
|
"loss": 354.9341, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.2490272373540856, |
|
"grad_norm": 9.113116264343262, |
|
"learning_rate": 9.959848696696512e-05, |
|
"loss": 354.8707, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.311284046692607, |
|
"grad_norm": 5.728683948516846, |
|
"learning_rate": 9.90982767266464e-05, |
|
"loss": 354.9383, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.3735408560311284, |
|
"grad_norm": 6.870616436004639, |
|
"learning_rate": 9.840111287744695e-05, |
|
"loss": 354.8618, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.4357976653696498, |
|
"grad_norm": 6.92086935043335, |
|
"learning_rate": 9.751010912235635e-05, |
|
"loss": 354.724, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.4980544747081712, |
|
"grad_norm": 8.461061477661133, |
|
"learning_rate": 9.642924490043929e-05, |
|
"loss": 354.7021, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.5603112840466926, |
|
"grad_norm": 5.888168811798096, |
|
"learning_rate": 9.516334761369466e-05, |
|
"loss": 354.8149, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.622568093385214, |
|
"grad_norm": 6.828727722167969, |
|
"learning_rate": 9.371807106670628e-05, |
|
"loss": 354.8371, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.6848249027237354, |
|
"grad_norm": 7.382378101348877, |
|
"learning_rate": 9.209987021537921e-05, |
|
"loss": 354.7276, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.7470817120622568, |
|
"grad_norm": 8.413900375366211, |
|
"learning_rate": 9.031597233753974e-05, |
|
"loss": 354.7408, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.8093385214007782, |
|
"grad_norm": 5.8578925132751465, |
|
"learning_rate": 8.83743447541581e-05, |
|
"loss": 354.7767, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.8715953307392996, |
|
"grad_norm": 6.483746528625488, |
|
"learning_rate": 8.628365924535892e-05, |
|
"loss": 354.5992, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.933852140077821, |
|
"grad_norm": 7.467772483825684, |
|
"learning_rate": 8.405325332014611e-05, |
|
"loss": 354.4274, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.9961089494163424, |
|
"grad_norm": 8.34915828704834, |
|
"learning_rate": 8.169308851282099e-05, |
|
"loss": 354.4875, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.0583657587548638, |
|
"grad_norm": 5.741427421569824, |
|
"learning_rate": 7.921370589235178e-05, |
|
"loss": 354.7455, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.1206225680933852, |
|
"grad_norm": 6.490324974060059, |
|
"learning_rate": 7.662617898340078e-05, |
|
"loss": 354.5006, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.1828793774319066, |
|
"grad_norm": 7.084279537200928, |
|
"learning_rate": 7.394206430927509e-05, |
|
"loss": 354.3848, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.245136186770428, |
|
"grad_norm": 7.677549362182617, |
|
"learning_rate": 7.117334977768807e-05, |
|
"loss": 354.4633, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.3073929961089494, |
|
"grad_norm": 6.072847366333008, |
|
"learning_rate": 6.833240113985353e-05, |
|
"loss": 354.5162, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.3696498054474708, |
|
"grad_norm": 5.878195762634277, |
|
"learning_rate": 6.543190676203878e-05, |
|
"loss": 354.5641, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.4319066147859922, |
|
"grad_norm": 6.611732482910156, |
|
"learning_rate": 6.248482095624086e-05, |
|
"loss": 354.472, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.4941634241245136, |
|
"grad_norm": 8.098542213439941, |
|
"learning_rate": 5.950430612308444e-05, |
|
"loss": 354.3851, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.556420233463035, |
|
"grad_norm": 6.138418197631836, |
|
"learning_rate": 5.650367396534536e-05, |
|
"loss": 354.4496, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.556420233463035, |
|
"eval_loss": 11.073678970336914, |
|
"eval_runtime": 0.128, |
|
"eval_samples_per_second": 390.526, |
|
"eval_steps_per_second": 101.537, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.6186770428015564, |
|
"grad_norm": 6.747447490692139, |
|
"learning_rate": 5.349632603465466e-05, |
|
"loss": 354.3671, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.6809338521400778, |
|
"grad_norm": 6.908236980438232, |
|
"learning_rate": 5.049569387691557e-05, |
|
"loss": 354.321, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.7431906614785992, |
|
"grad_norm": 8.76644515991211, |
|
"learning_rate": 4.751517904375915e-05, |
|
"loss": 354.1532, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.8054474708171206, |
|
"grad_norm": 6.134677410125732, |
|
"learning_rate": 4.456809323796123e-05, |
|
"loss": 354.4615, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.867704280155642, |
|
"grad_norm": 6.538084506988525, |
|
"learning_rate": 4.166759886014648e-05, |
|
"loss": 354.2479, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.9299610894941635, |
|
"grad_norm": 6.925525188446045, |
|
"learning_rate": 3.882665022231193e-05, |
|
"loss": 354.2771, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.9922178988326849, |
|
"grad_norm": 8.646419525146484, |
|
"learning_rate": 3.605793569072493e-05, |
|
"loss": 354.1879, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.054474708171206, |
|
"grad_norm": 5.98707389831543, |
|
"learning_rate": 3.337382101659923e-05, |
|
"loss": 354.2527, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.1167315175097277, |
|
"grad_norm": 6.75242805480957, |
|
"learning_rate": 3.078629410764824e-05, |
|
"loss": 354.1018, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.178988326848249, |
|
"grad_norm": 7.0720367431640625, |
|
"learning_rate": 2.830691148717902e-05, |
|
"loss": 354.2889, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.2412451361867705, |
|
"grad_norm": 8.26923656463623, |
|
"learning_rate": 2.5946746679853894e-05, |
|
"loss": 354.068, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.3035019455252916, |
|
"grad_norm": 6.108309268951416, |
|
"learning_rate": 2.3716340754641102e-05, |
|
"loss": 354.3232, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.3657587548638133, |
|
"grad_norm": 6.104388236999512, |
|
"learning_rate": 2.162565524584191e-05, |
|
"loss": 354.2594, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.4280155642023344, |
|
"grad_norm": 6.536022663116455, |
|
"learning_rate": 1.9684027662460257e-05, |
|
"loss": 354.216, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.490272373540856, |
|
"grad_norm": 7.577517509460449, |
|
"learning_rate": 1.79001297846208e-05, |
|
"loss": 354.1721, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.5525291828793772, |
|
"grad_norm": 6.564501762390137, |
|
"learning_rate": 1.628192893329374e-05, |
|
"loss": 354.4042, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.614785992217899, |
|
"grad_norm": 6.541054725646973, |
|
"learning_rate": 1.4836652386305349e-05, |
|
"loss": 354.275, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.6770428015564205, |
|
"grad_norm": 6.654796600341797, |
|
"learning_rate": 1.35707550995607e-05, |
|
"loss": 354.1229, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.7392996108949417, |
|
"grad_norm": 8.07349967956543, |
|
"learning_rate": 1.248989087764366e-05, |
|
"loss": 354.0651, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.801556420233463, |
|
"grad_norm": 6.307331562042236, |
|
"learning_rate": 1.1598887122553061e-05, |
|
"loss": 354.4648, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.8638132295719845, |
|
"grad_norm": 6.029254913330078, |
|
"learning_rate": 1.0901723273353597e-05, |
|
"loss": 354.2171, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.926070038910506, |
|
"grad_norm": 6.842432022094727, |
|
"learning_rate": 1.04015130330349e-05, |
|
"loss": 354.2176, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 2.9883268482490273, |
|
"grad_norm": 7.468156814575195, |
|
"learning_rate": 1.0100490461958109e-05, |
|
"loss": 354.0009, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 3.0505836575875485, |
|
"grad_norm": 6.788173198699951, |
|
"learning_rate": 1e-05, |
|
"loss": 354.2026, |
|
"step": 49 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 49, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4118638755840.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|