|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 820, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.024390243902439025, |
|
"grad_norm": 0.13146615028381348, |
|
"learning_rate": 0.00019631901840490797, |
|
"loss": 2.6062, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04878048780487805, |
|
"grad_norm": 0.11177688837051392, |
|
"learning_rate": 0.0001914110429447853, |
|
"loss": 2.3258, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07317073170731707, |
|
"grad_norm": 0.1093992292881012, |
|
"learning_rate": 0.00018650306748466258, |
|
"loss": 2.327, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0975609756097561, |
|
"grad_norm": 0.1171504482626915, |
|
"learning_rate": 0.00018159509202453987, |
|
"loss": 2.2707, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.12195121951219512, |
|
"grad_norm": 0.10171514004468918, |
|
"learning_rate": 0.0001766871165644172, |
|
"loss": 2.2441, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.14634146341463414, |
|
"grad_norm": 0.12730270624160767, |
|
"learning_rate": 0.0001717791411042945, |
|
"loss": 2.1971, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.17073170731707318, |
|
"grad_norm": 0.15077818930149078, |
|
"learning_rate": 0.00016687116564417177, |
|
"loss": 2.2968, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.1951219512195122, |
|
"grad_norm": 0.10773204267024994, |
|
"learning_rate": 0.00016196319018404909, |
|
"loss": 2.188, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.21951219512195122, |
|
"grad_norm": 0.16448411345481873, |
|
"learning_rate": 0.0001570552147239264, |
|
"loss": 2.1903, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.24390243902439024, |
|
"grad_norm": 0.1230635941028595, |
|
"learning_rate": 0.0001521472392638037, |
|
"loss": 2.2464, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.2682926829268293, |
|
"grad_norm": 0.11764346063137054, |
|
"learning_rate": 0.00014723926380368098, |
|
"loss": 2.2082, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.2926829268292683, |
|
"grad_norm": 0.1021975502371788, |
|
"learning_rate": 0.00014233128834355828, |
|
"loss": 2.1972, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.3170731707317073, |
|
"grad_norm": 0.12520350515842438, |
|
"learning_rate": 0.0001374233128834356, |
|
"loss": 2.1768, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.34146341463414637, |
|
"grad_norm": 0.10592948645353317, |
|
"learning_rate": 0.00013251533742331288, |
|
"loss": 2.1624, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.36585365853658536, |
|
"grad_norm": 0.11086229979991913, |
|
"learning_rate": 0.00012760736196319017, |
|
"loss": 2.1809, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.3902439024390244, |
|
"grad_norm": 0.1199096217751503, |
|
"learning_rate": 0.0001226993865030675, |
|
"loss": 2.1735, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.4146341463414634, |
|
"grad_norm": 0.11156365275382996, |
|
"learning_rate": 0.0001177914110429448, |
|
"loss": 2.2331, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.43902439024390244, |
|
"grad_norm": 0.10787837207317352, |
|
"learning_rate": 0.00011288343558282209, |
|
"loss": 2.1008, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.4634146341463415, |
|
"grad_norm": 0.1065380647778511, |
|
"learning_rate": 0.00010797546012269939, |
|
"loss": 2.228, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.4878048780487805, |
|
"grad_norm": 0.13571393489837646, |
|
"learning_rate": 0.0001030674846625767, |
|
"loss": 2.2502, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.5121951219512195, |
|
"grad_norm": 0.12763665616512299, |
|
"learning_rate": 9.815950920245399e-05, |
|
"loss": 2.2069, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.5365853658536586, |
|
"grad_norm": 0.12322117388248444, |
|
"learning_rate": 9.325153374233129e-05, |
|
"loss": 2.1653, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.5609756097560976, |
|
"grad_norm": 0.11602451652288437, |
|
"learning_rate": 8.83435582822086e-05, |
|
"loss": 2.204, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.5853658536585366, |
|
"grad_norm": 0.13550212979316711, |
|
"learning_rate": 8.343558282208588e-05, |
|
"loss": 2.1583, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.6097560975609756, |
|
"grad_norm": 0.09856979548931122, |
|
"learning_rate": 7.85276073619632e-05, |
|
"loss": 2.1924, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.6341463414634146, |
|
"grad_norm": 0.1261129528284073, |
|
"learning_rate": 7.361963190184049e-05, |
|
"loss": 2.1347, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.6585365853658537, |
|
"grad_norm": 0.1323924958705902, |
|
"learning_rate": 6.87116564417178e-05, |
|
"loss": 2.1493, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.6829268292682927, |
|
"grad_norm": 0.12028100341558456, |
|
"learning_rate": 6.380368098159509e-05, |
|
"loss": 2.1703, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.7073170731707317, |
|
"grad_norm": 0.11706235259771347, |
|
"learning_rate": 5.88957055214724e-05, |
|
"loss": 2.1665, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.7317073170731707, |
|
"grad_norm": 0.12799355387687683, |
|
"learning_rate": 5.3987730061349695e-05, |
|
"loss": 2.1631, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.7560975609756098, |
|
"grad_norm": 0.10057336091995239, |
|
"learning_rate": 4.907975460122699e-05, |
|
"loss": 2.2172, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.7804878048780488, |
|
"grad_norm": 0.15589593350887299, |
|
"learning_rate": 4.41717791411043e-05, |
|
"loss": 2.1731, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.8048780487804879, |
|
"grad_norm": 0.11708714812994003, |
|
"learning_rate": 3.92638036809816e-05, |
|
"loss": 2.1731, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.8292682926829268, |
|
"grad_norm": 0.11283290386199951, |
|
"learning_rate": 3.43558282208589e-05, |
|
"loss": 2.1074, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.8536585365853658, |
|
"grad_norm": 0.11966817826032639, |
|
"learning_rate": 2.94478527607362e-05, |
|
"loss": 2.1979, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.8780487804878049, |
|
"grad_norm": 0.15318670868873596, |
|
"learning_rate": 2.4539877300613496e-05, |
|
"loss": 2.1476, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.9024390243902439, |
|
"grad_norm": 0.12104146927595139, |
|
"learning_rate": 1.96319018404908e-05, |
|
"loss": 2.2062, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.926829268292683, |
|
"grad_norm": 0.14861464500427246, |
|
"learning_rate": 1.47239263803681e-05, |
|
"loss": 2.1754, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.9512195121951219, |
|
"grad_norm": 0.11242931336164474, |
|
"learning_rate": 9.8159509202454e-06, |
|
"loss": 2.172, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.975609756097561, |
|
"grad_norm": 0.1298566460609436, |
|
"learning_rate": 4.9079754601227e-06, |
|
"loss": 2.1327, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.14202716946601868, |
|
"learning_rate": 0.0, |
|
"loss": 2.1028, |
|
"step": 820 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 820, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 200, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.1324741394592563e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|