| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 110, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09090909090909091, | |
| "grad_norm": 0.4488663077354431, | |
| "learning_rate": 0.0001989040187322164, | |
| "loss": 1.4071, | |
| "mean_token_accuracy": 0.7116096287965774, | |
| "num_tokens": 9087.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.18181818181818182, | |
| "grad_norm": 0.22684404253959656, | |
| "learning_rate": 0.00019027978299657436, | |
| "loss": 0.683, | |
| "mean_token_accuracy": 0.8242586374282836, | |
| "num_tokens": 18205.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.2727272727272727, | |
| "grad_norm": 0.23908470571041107, | |
| "learning_rate": 0.00017378332790417273, | |
| "loss": 0.6071, | |
| "mean_token_accuracy": 0.838360658288002, | |
| "num_tokens": 27096.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.36363636363636365, | |
| "grad_norm": 0.26060426235198975, | |
| "learning_rate": 0.00015085311186492206, | |
| "loss": 0.5811, | |
| "mean_token_accuracy": 0.8467051446437835, | |
| "num_tokens": 36026.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.45454545454545453, | |
| "grad_norm": 0.2519456148147583, | |
| "learning_rate": 0.00012348860457809838, | |
| "loss": 0.5426, | |
| "mean_token_accuracy": 0.850423663854599, | |
| "num_tokens": 45060.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5454545454545454, | |
| "grad_norm": 0.25340786576271057, | |
| "learning_rate": 9.407593721062859e-05, | |
| "loss": 0.5446, | |
| "mean_token_accuracy": 0.8483250468969346, | |
| "num_tokens": 54214.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.6363636363636364, | |
| "grad_norm": 0.25936585664749146, | |
| "learning_rate": 6.517983645656014e-05, | |
| "loss": 0.5366, | |
| "mean_token_accuracy": 0.8502849280834198, | |
| "num_tokens": 63554.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.7272727272727273, | |
| "grad_norm": 0.2906036674976349, | |
| "learning_rate": 3.931998541814069e-05, | |
| "loss": 0.506, | |
| "mean_token_accuracy": 0.8614971518516541, | |
| "num_tokens": 72552.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.8181818181818182, | |
| "grad_norm": 0.2950091063976288, | |
| "learning_rate": 1.875131219943187e-05, | |
| "loss": 0.4966, | |
| "mean_token_accuracy": 0.8643338441848755, | |
| "num_tokens": 81571.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.9090909090909091, | |
| "grad_norm": 0.2428123503923416, | |
| "learning_rate": 5.267364614580861e-06, | |
| "loss": 0.4775, | |
| "mean_token_accuracy": 0.86156085729599, | |
| "num_tokens": 90475.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.24808728694915771, | |
| "learning_rate": 4.391634912056519e-08, | |
| "loss": 0.4841, | |
| "mean_token_accuracy": 0.8593197286128997, | |
| "num_tokens": 99667.0, | |
| "step": 110 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 110, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 711162121568256.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |