|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.15808974886784685, |
|
"eval_steps": 500, |
|
"global_step": 12, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.013174145738987238, |
|
"grad_norm": 0.39343583583831787, |
|
"learning_rate": 6.25e-08, |
|
"logits/chosen": 10.071717262268066, |
|
"logits/rejected": 10.610974311828613, |
|
"logps/chosen": -121.14067077636719, |
|
"logps/ref_chosen": -121.14067077636719, |
|
"logps/ref_rejected": -137.65684509277344, |
|
"logps/rejected": -137.65684509277344, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.026348291477974475, |
|
"grad_norm": 0.7521647214889526, |
|
"learning_rate": 1.25e-07, |
|
"logits/chosen": 10.222262382507324, |
|
"logits/rejected": 10.754176139831543, |
|
"logps/chosen": -116.48068237304688, |
|
"logps/ref_chosen": -116.48068237304688, |
|
"logps/ref_rejected": -130.27796936035156, |
|
"logps/rejected": -130.27796936035156, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03952243721696171, |
|
"grad_norm": 0.5880491137504578, |
|
"learning_rate": 1.875e-07, |
|
"logits/chosen": 10.01984977722168, |
|
"logits/rejected": 10.62405776977539, |
|
"logps/chosen": -123.36822509765625, |
|
"logps/ref_chosen": -122.6683349609375, |
|
"logps/ref_rejected": -132.69850158691406, |
|
"logps/rejected": -133.207275390625, |
|
"loss": 0.6941, |
|
"rewards/accuracies": 0.4453125, |
|
"rewards/chosen": -0.006998830940574408, |
|
"rewards/margins": -0.0019110905705019832, |
|
"rewards/rejected": -0.005087739787995815, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05269658295594895, |
|
"grad_norm": 0.4605408310890198, |
|
"learning_rate": 2.5e-07, |
|
"logits/chosen": 9.99990463256836, |
|
"logits/rejected": 10.736846923828125, |
|
"logps/chosen": -123.02133178710938, |
|
"logps/ref_chosen": -122.59739685058594, |
|
"logps/ref_rejected": -129.70767211914062, |
|
"logps/rejected": -129.98374938964844, |
|
"loss": 0.6939, |
|
"rewards/accuracies": 0.4765625, |
|
"rewards/chosen": -0.004239337984472513, |
|
"rewards/margins": -0.0014785109087824821, |
|
"rewards/rejected": -0.0027608266100287437, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06587072869493618, |
|
"grad_norm": 0.469856321811676, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": 10.075482368469238, |
|
"logits/rejected": 10.892666816711426, |
|
"logps/chosen": -117.554931640625, |
|
"logps/ref_chosen": -117.5941162109375, |
|
"logps/ref_rejected": -132.1708984375, |
|
"logps/rejected": -132.1227264404297, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.546875, |
|
"rewards/chosen": 0.0003917121794074774, |
|
"rewards/margins": -9.010493522509933e-05, |
|
"rewards/rejected": 0.0004818170564249158, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07904487443392343, |
|
"grad_norm": 0.5541566610336304, |
|
"learning_rate": 3.75e-07, |
|
"logits/chosen": 10.660999298095703, |
|
"logits/rejected": 11.290507316589355, |
|
"logps/chosen": -127.00320434570312, |
|
"logps/ref_chosen": -126.12411499023438, |
|
"logps/ref_rejected": -136.9976043701172, |
|
"logps/rejected": -137.75950622558594, |
|
"loss": 0.6938, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": -0.00879070907831192, |
|
"rewards/margins": -0.0011717069428414106, |
|
"rewards/rejected": -0.007619001902639866, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09221902017291066, |
|
"grad_norm": 0.4422788619995117, |
|
"learning_rate": 4.375e-07, |
|
"logits/chosen": 9.958097457885742, |
|
"logits/rejected": 10.642163276672363, |
|
"logps/chosen": -115.61244201660156, |
|
"logps/ref_chosen": -115.08863830566406, |
|
"logps/ref_rejected": -125.91255187988281, |
|
"logps/rejected": -126.15577697753906, |
|
"loss": 0.6946, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.005238103214651346, |
|
"rewards/margins": -0.0028058765456080437, |
|
"rewards/rejected": -0.002432226436212659, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.1053931659118979, |
|
"grad_norm": 0.5495327115058899, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": 10.345503807067871, |
|
"logits/rejected": 10.97708797454834, |
|
"logps/chosen": -121.48406982421875, |
|
"logps/ref_chosen": -121.4114761352539, |
|
"logps/ref_rejected": -134.62770080566406, |
|
"logps/rejected": -134.7586212158203, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.5078125, |
|
"rewards/chosen": -0.0007259202538989484, |
|
"rewards/margins": 0.0005832896567881107, |
|
"rewards/rejected": -0.0013092098524793983, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.11856731165088513, |
|
"grad_norm": 0.43922048807144165, |
|
"learning_rate": 4.997252228714278e-07, |
|
"logits/chosen": 10.159126281738281, |
|
"logits/rejected": 11.002123832702637, |
|
"logps/chosen": -122.51399230957031, |
|
"logps/ref_chosen": -121.59207153320312, |
|
"logps/ref_rejected": -134.70025634765625, |
|
"logps/rejected": -135.55740356445312, |
|
"loss": 0.6935, |
|
"rewards/accuracies": 0.4453125, |
|
"rewards/chosen": -0.00921926274895668, |
|
"rewards/margins": -0.0006479143630713224, |
|
"rewards/rejected": -0.008571348153054714, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.13174145738987236, |
|
"grad_norm": 0.6643485426902771, |
|
"learning_rate": 4.989014955054745e-07, |
|
"logits/chosen": 9.9464111328125, |
|
"logits/rejected": 10.739057540893555, |
|
"logps/chosen": -117.91310119628906, |
|
"logps/ref_chosen": -117.16349029541016, |
|
"logps/ref_rejected": -129.98167419433594, |
|
"logps/rejected": -130.65924072265625, |
|
"loss": 0.6935, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.007496046833693981, |
|
"rewards/margins": -0.0007203805143944919, |
|
"rewards/rejected": -0.00677566509693861, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14491560312885962, |
|
"grad_norm": 0.4432956576347351, |
|
"learning_rate": 4.975306286336627e-07, |
|
"logits/chosen": 10.095781326293945, |
|
"logits/rejected": 11.001167297363281, |
|
"logps/chosen": -123.26506042480469, |
|
"logps/ref_chosen": -121.95927429199219, |
|
"logps/ref_rejected": -136.18655395507812, |
|
"logps/rejected": -137.39865112304688, |
|
"loss": 0.6936, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": -0.01305788941681385, |
|
"rewards/margins": -0.0009369202307425439, |
|
"rewards/rejected": -0.012120969593524933, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.15808974886784685, |
|
"grad_norm": 0.3955974280834198, |
|
"learning_rate": 4.956156357188939e-07, |
|
"logits/chosen": 10.129995346069336, |
|
"logits/rejected": 10.673677444458008, |
|
"logps/chosen": -126.05357360839844, |
|
"logps/ref_chosen": -124.12315368652344, |
|
"logps/ref_rejected": -134.275390625, |
|
"logps/rejected": -136.24940490722656, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.5234375, |
|
"rewards/chosen": -0.01930420845746994, |
|
"rewards/margins": 0.00043603626545518637, |
|
"rewards/rejected": -0.01974024437367916, |
|
"step": 12 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 12, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|