|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.15808974886784685, |
|
"eval_steps": 500, |
|
"global_step": 12, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.013174145738987238, |
|
"grad_norm": 0.48070791363716125, |
|
"learning_rate": 6.25e-08, |
|
"logits/chosen": 10.32492733001709, |
|
"logits/rejected": 10.282785415649414, |
|
"logps/chosen": -176.09544372558594, |
|
"logps/ref_chosen": -176.09544372558594, |
|
"logps/ref_rejected": -181.75552368164062, |
|
"logps/rejected": -181.75552368164062, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.026348291477974475, |
|
"grad_norm": 0.4185059070587158, |
|
"learning_rate": 1.25e-07, |
|
"logits/chosen": 10.530074119567871, |
|
"logits/rejected": 10.672085762023926, |
|
"logps/chosen": -173.2084503173828, |
|
"logps/ref_chosen": -173.2084503173828, |
|
"logps/ref_rejected": -187.02206420898438, |
|
"logps/rejected": -187.02206420898438, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03952243721696171, |
|
"grad_norm": 0.3848375082015991, |
|
"learning_rate": 1.875e-07, |
|
"logits/chosen": 10.340475082397461, |
|
"logits/rejected": 10.486526489257812, |
|
"logps/chosen": -169.6623992919922, |
|
"logps/ref_chosen": -169.080810546875, |
|
"logps/ref_rejected": -182.52792358398438, |
|
"logps/rejected": -182.80642700195312, |
|
"loss": 0.6947, |
|
"rewards/accuracies": 0.3828125, |
|
"rewards/chosen": -0.005815833806991577, |
|
"rewards/margins": -0.0030306153930723667, |
|
"rewards/rejected": -0.002785218646749854, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05269658295594895, |
|
"grad_norm": 0.7261964678764343, |
|
"learning_rate": 2.5e-07, |
|
"logits/chosen": 10.3652925491333, |
|
"logits/rejected": 10.412069320678711, |
|
"logps/chosen": -177.03428649902344, |
|
"logps/ref_chosen": -176.78369140625, |
|
"logps/ref_rejected": -180.0931396484375, |
|
"logps/rejected": -180.21463012695312, |
|
"loss": 0.6938, |
|
"rewards/accuracies": 0.4765625, |
|
"rewards/chosen": -0.0025058696046471596, |
|
"rewards/margins": -0.0012909012148156762, |
|
"rewards/rejected": -0.0012149682734161615, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06587072869493618, |
|
"grad_norm": 0.3572319746017456, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": 10.757452011108398, |
|
"logits/rejected": 10.791389465332031, |
|
"logps/chosen": -169.12631225585938, |
|
"logps/ref_chosen": -168.522216796875, |
|
"logps/ref_rejected": -176.08982849121094, |
|
"logps/rejected": -176.59901428222656, |
|
"loss": 0.6936, |
|
"rewards/accuracies": 0.4609375, |
|
"rewards/chosen": -0.0060411859303712845, |
|
"rewards/margins": -0.0009493756806477904, |
|
"rewards/rejected": -0.0050918105989694595, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07904487443392343, |
|
"grad_norm": 0.4071093201637268, |
|
"learning_rate": 3.75e-07, |
|
"logits/chosen": 11.003137588500977, |
|
"logits/rejected": 11.056475639343262, |
|
"logps/chosen": -174.662353515625, |
|
"logps/ref_chosen": -174.5843048095703, |
|
"logps/ref_rejected": -181.65040588378906, |
|
"logps/rejected": -181.72401428222656, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.5078125, |
|
"rewards/chosen": -0.0007804610067978501, |
|
"rewards/margins": -4.43047538283281e-05, |
|
"rewards/rejected": -0.0007361561874859035, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09221902017291066, |
|
"grad_norm": 0.7951124906539917, |
|
"learning_rate": 4.375e-07, |
|
"logits/chosen": 10.003807067871094, |
|
"logits/rejected": 10.067156791687012, |
|
"logps/chosen": -171.4049530029297, |
|
"logps/ref_chosen": -170.4031219482422, |
|
"logps/ref_rejected": -181.00917053222656, |
|
"logps/rejected": -181.8280792236328, |
|
"loss": 0.6941, |
|
"rewards/accuracies": 0.4453125, |
|
"rewards/chosen": -0.010018287226557732, |
|
"rewards/margins": -0.0018291514134034514, |
|
"rewards/rejected": -0.008189136162400246, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.1053931659118979, |
|
"grad_norm": 0.5591660737991333, |
|
"learning_rate": 5e-07, |
|
"logits/chosen": 10.874868392944336, |
|
"logits/rejected": 10.79828929901123, |
|
"logps/chosen": -179.36700439453125, |
|
"logps/ref_chosen": -178.47369384765625, |
|
"logps/ref_rejected": -184.52243041992188, |
|
"logps/rejected": -185.44284057617188, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.5078125, |
|
"rewards/chosen": -0.008933190256357193, |
|
"rewards/margins": 0.0002710025873966515, |
|
"rewards/rejected": -0.009204192087054253, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.11856731165088513, |
|
"grad_norm": 0.45744186639785767, |
|
"learning_rate": 4.997252228714278e-07, |
|
"logits/chosen": 10.472944259643555, |
|
"logits/rejected": 10.640350341796875, |
|
"logps/chosen": -175.36553955078125, |
|
"logps/ref_chosen": -174.38418579101562, |
|
"logps/ref_rejected": -182.0985565185547, |
|
"logps/rejected": -183.1609649658203, |
|
"loss": 0.6928, |
|
"rewards/accuracies": 0.5234375, |
|
"rewards/chosen": -0.009813489392399788, |
|
"rewards/margins": 0.0008107352769002318, |
|
"rewards/rejected": -0.010624224320054054, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.13174145738987236, |
|
"grad_norm": 0.3784727156162262, |
|
"learning_rate": 4.989014955054745e-07, |
|
"logits/chosen": 10.471797943115234, |
|
"logits/rejected": 10.57437515258789, |
|
"logps/chosen": -173.78768920898438, |
|
"logps/ref_chosen": -172.48837280273438, |
|
"logps/ref_rejected": -179.625, |
|
"logps/rejected": -180.91595458984375, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.4765625, |
|
"rewards/chosen": -0.012993087992072105, |
|
"rewards/margins": -8.335959864780307e-05, |
|
"rewards/rejected": -0.012909728102385998, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14491560312885962, |
|
"grad_norm": 0.4041476547718048, |
|
"learning_rate": 4.975306286336627e-07, |
|
"logits/chosen": 10.539186477661133, |
|
"logits/rejected": 10.477883338928223, |
|
"logps/chosen": -172.62416076660156, |
|
"logps/ref_chosen": -169.97216796875, |
|
"logps/ref_rejected": -180.00955200195312, |
|
"logps/rejected": -182.72525024414062, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.546875, |
|
"rewards/chosen": -0.026519589126110077, |
|
"rewards/margins": 0.0006374535150825977, |
|
"rewards/rejected": -0.027157040312886238, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.15808974886784685, |
|
"grad_norm": 0.5964677333831787, |
|
"learning_rate": 4.956156357188939e-07, |
|
"logits/chosen": 10.28010082244873, |
|
"logits/rejected": 10.401304244995117, |
|
"logps/chosen": -178.43714904785156, |
|
"logps/ref_chosen": -175.83639526367188, |
|
"logps/ref_rejected": -181.73045349121094, |
|
"logps/rejected": -184.20223999023438, |
|
"loss": 0.6938, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.026007818058133125, |
|
"rewards/margins": -0.0012897354317829013, |
|
"rewards/rejected": -0.024718083441257477, |
|
"step": 12 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 12, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|