|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 9.828478964401295, |
|
"eval_steps": 500, |
|
"global_step": 40, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.20711974110032363, |
|
"grad_norm": 16.311434606415148, |
|
"learning_rate": 2e-05, |
|
"loss": 0.6532, |
|
"mean_token_accuracy": 0.8563957028090954, |
|
"num_tokens": 523828.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.41423948220064727, |
|
"grad_norm": 15.736907422857884, |
|
"learning_rate": 1.9e-05, |
|
"loss": 0.4982, |
|
"mean_token_accuracy": 0.8662955602630973, |
|
"num_tokens": 1047723.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.6213592233009708, |
|
"grad_norm": 3.447251157289743, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.4425, |
|
"mean_token_accuracy": 0.873533152975142, |
|
"num_tokens": 1565521.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.8284789644012945, |
|
"grad_norm": 4.507555220293444, |
|
"learning_rate": 1.7e-05, |
|
"loss": 0.413, |
|
"mean_token_accuracy": 0.8806356741115451, |
|
"num_tokens": 2089809.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 1.2071197411003236, |
|
"grad_norm": 1.9178218589930014, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.8156, |
|
"mean_token_accuracy": 0.8827247977256775, |
|
"num_tokens": 2940027.0, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 1.4142394822006472, |
|
"grad_norm": 0.9419526829426662, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 0.375, |
|
"mean_token_accuracy": 0.8892204724252224, |
|
"num_tokens": 3459739.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 1.6213592233009708, |
|
"grad_norm": 0.73637220317646, |
|
"learning_rate": 1.4e-05, |
|
"loss": 0.3594, |
|
"mean_token_accuracy": 0.8936486886814237, |
|
"num_tokens": 3983567.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 1.8284789644012944, |
|
"grad_norm": 0.6243754300549781, |
|
"learning_rate": 1.3000000000000001e-05, |
|
"loss": 0.3622, |
|
"mean_token_accuracy": 0.8927655592560768, |
|
"num_tokens": 4507855.0, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 2.207119741100324, |
|
"grad_norm": 0.5542153802684493, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.6934, |
|
"mean_token_accuracy": 0.8979568992342267, |
|
"num_tokens": 5364879.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 2.414239482200647, |
|
"grad_norm": 0.5137676948302884, |
|
"learning_rate": 1.1000000000000001e-05, |
|
"loss": 0.3317, |
|
"mean_token_accuracy": 0.9016338279470801, |
|
"num_tokens": 5888452.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 2.6213592233009706, |
|
"grad_norm": 0.4227012567455179, |
|
"learning_rate": 1e-05, |
|
"loss": 0.3075, |
|
"mean_token_accuracy": 0.9077446917071939, |
|
"num_tokens": 6411678.0, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 2.8284789644012944, |
|
"grad_norm": 0.521003496128672, |
|
"learning_rate": 9e-06, |
|
"loss": 0.3296, |
|
"mean_token_accuracy": 0.9018635489046574, |
|
"num_tokens": 6929430.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 3.207119741100324, |
|
"grad_norm": 0.6009267217587392, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.5879, |
|
"mean_token_accuracy": 0.9123640724590847, |
|
"num_tokens": 7788482.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 3.414239482200647, |
|
"grad_norm": 0.38313891986777304, |
|
"learning_rate": 7e-06, |
|
"loss": 0.2942, |
|
"mean_token_accuracy": 0.9121569795534015, |
|
"num_tokens": 8307992.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 3.6213592233009706, |
|
"grad_norm": 0.42965764040529053, |
|
"learning_rate": 6e-06, |
|
"loss": 0.2851, |
|
"mean_token_accuracy": 0.914524182677269, |
|
"num_tokens": 8832280.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 3.8284789644012944, |
|
"grad_norm": 0.39075483271320793, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2846, |
|
"mean_token_accuracy": 0.9148006569594145, |
|
"num_tokens": 9350219.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 4.207119741100324, |
|
"grad_norm": 0.5041579865544957, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.5225, |
|
"mean_token_accuracy": 0.9207058066413516, |
|
"num_tokens": 10206773.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 4.414239482200648, |
|
"grad_norm": 0.3226220487928564, |
|
"learning_rate": 3e-06, |
|
"loss": 0.2554, |
|
"mean_token_accuracy": 0.9235557727515697, |
|
"num_tokens": 10731061.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 4.621359223300971, |
|
"grad_norm": 0.29138584575319715, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 0.2589, |
|
"mean_token_accuracy": 0.9219849593937397, |
|
"num_tokens": 11254174.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 4.828478964401294, |
|
"grad_norm": 0.3074072577482687, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 0.2573, |
|
"mean_token_accuracy": 0.9227853054180741, |
|
"num_tokens": 11772003.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 5.207119741100324, |
|
"grad_norm": 0.29226028229762824, |
|
"learning_rate": 0.0, |
|
"loss": 0.2581, |
|
"mean_token_accuracy": 0.9223490478470922, |
|
"num_tokens": 520682.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 5.414239482200648, |
|
"grad_norm": 0.3039506268459287, |
|
"learning_rate": 9.5e-06, |
|
"loss": 0.2612, |
|
"mean_token_accuracy": 0.921809334307909, |
|
"num_tokens": 1042545.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 5.621359223300971, |
|
"grad_norm": 0.3292461938787561, |
|
"learning_rate": 9e-06, |
|
"loss": 0.2338, |
|
"mean_token_accuracy": 0.9292812049388885, |
|
"num_tokens": 1563697.0, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 5.828478964401294, |
|
"grad_norm": 0.46420193174977303, |
|
"learning_rate": 8.5e-06, |
|
"loss": 0.2446, |
|
"mean_token_accuracy": 0.9261427698656917, |
|
"num_tokens": 2087270.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 6.207119741100324, |
|
"grad_norm": 0.5152631282390204, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.4273, |
|
"mean_token_accuracy": 0.9354684165545872, |
|
"num_tokens": 2942515.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 6.414239482200648, |
|
"grad_norm": 0.397171201247213, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.1971, |
|
"mean_token_accuracy": 0.9410504633560777, |
|
"num_tokens": 3464980.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 6.621359223300971, |
|
"grad_norm": 0.4038092235036345, |
|
"learning_rate": 7e-06, |
|
"loss": 0.1936, |
|
"mean_token_accuracy": 0.9419528925791383, |
|
"num_tokens": 3988553.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 6.828478964401294, |
|
"grad_norm": 0.4383287277331565, |
|
"learning_rate": 6.5000000000000004e-06, |
|
"loss": 0.1982, |
|
"mean_token_accuracy": 0.9402873041108251, |
|
"num_tokens": 4512841.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 7.207119741100324, |
|
"grad_norm": 0.5819451162543645, |
|
"learning_rate": 6e-06, |
|
"loss": 0.3495, |
|
"mean_token_accuracy": 0.9478773713111878, |
|
"num_tokens": 5363834.0, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 7.414239482200648, |
|
"grad_norm": 0.35512055236444356, |
|
"learning_rate": 5.500000000000001e-06, |
|
"loss": 0.1599, |
|
"mean_token_accuracy": 0.9522946244105697, |
|
"num_tokens": 5887407.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 7.621359223300971, |
|
"grad_norm": 0.6014961197056318, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1586, |
|
"mean_token_accuracy": 0.9526052679866552, |
|
"num_tokens": 6408559.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 7.828478964401294, |
|
"grad_norm": 0.4044887603769801, |
|
"learning_rate": 4.5e-06, |
|
"loss": 0.1515, |
|
"mean_token_accuracy": 0.9546759780496359, |
|
"num_tokens": 6930887.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 8.207119741100323, |
|
"grad_norm": 0.527657049732786, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.2954, |
|
"mean_token_accuracy": 0.9571067219688779, |
|
"num_tokens": 7784698.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 8.414239482200648, |
|
"grad_norm": 0.5655954589470128, |
|
"learning_rate": 3.5e-06, |
|
"loss": 0.1301, |
|
"mean_token_accuracy": 0.9615885661914945, |
|
"num_tokens": 8307878.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 8.62135922330097, |
|
"grad_norm": 0.3905499777914469, |
|
"learning_rate": 3e-06, |
|
"loss": 0.1309, |
|
"mean_token_accuracy": 0.9616770427674055, |
|
"num_tokens": 8831564.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 8.828478964401295, |
|
"grad_norm": 0.3609762708880154, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.1301, |
|
"mean_token_accuracy": 0.961113647557795, |
|
"num_tokens": 9354489.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 9.207119741100323, |
|
"grad_norm": 0.6575328899879626, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 0.2498, |
|
"mean_token_accuracy": 0.9636295852207002, |
|
"num_tokens": 10210406.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 9.414239482200648, |
|
"grad_norm": 0.28343130576610354, |
|
"learning_rate": 1.5e-06, |
|
"loss": 0.1245, |
|
"mean_token_accuracy": 0.9638419672846794, |
|
"num_tokens": 10729521.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 9.62135922330097, |
|
"grad_norm": 0.3576238970586715, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 0.1181, |
|
"mean_token_accuracy": 0.965806245803833, |
|
"num_tokens": 11253094.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 9.828478964401295, |
|
"grad_norm": 0.38762707391762197, |
|
"learning_rate": 5.000000000000001e-07, |
|
"loss": 0.1138, |
|
"mean_token_accuracy": 0.9672208018600941, |
|
"num_tokens": 11773251.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 9.828478964401295, |
|
"step": 40, |
|
"total_flos": 47054728527872.0, |
|
"train_loss": 0.10314766578376293, |
|
"train_runtime": 4354.8726, |
|
"train_samples_per_second": 1.417, |
|
"train_steps_per_second": 0.009 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 40, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 47054728527872.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|