|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 36, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 3737.096435546875, |
|
"epoch": 0.027972027972027972, |
|
"grad_norm": 0.4781525433063507, |
|
"kl": 0.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.0, |
|
"reward": 0.49642856419086456, |
|
"reward_std": 0.14968311414122581, |
|
"rewards/accuracy_reward": 0.09285714384168386, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.4035714343190193, |
|
"step": 1 |
|
}, |
|
{ |
|
"completion_length": 3919.4143676757812, |
|
"epoch": 0.055944055944055944, |
|
"grad_norm": 0.8725160360336304, |
|
"kl": 0.0, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.0, |
|
"reward": 0.41160714626312256, |
|
"reward_std": 0.1353183900937438, |
|
"rewards/accuracy_reward": 0.05000000260770321, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.3616071417927742, |
|
"step": 2 |
|
}, |
|
{ |
|
"completion_length": 3489.6964721679688, |
|
"epoch": 0.08391608391608392, |
|
"grad_norm": 0.5938112735748291, |
|
"kl": 0.000396728515625, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0, |
|
"reward": 0.5473214313387871, |
|
"reward_std": 0.15705320611596107, |
|
"rewards/accuracy_reward": 0.11785714374855161, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.42946429550647736, |
|
"step": 3 |
|
}, |
|
{ |
|
"completion_length": 3473.7571411132812, |
|
"epoch": 0.11188811188811189, |
|
"grad_norm": 0.5854588747024536, |
|
"kl": 0.0013608932495117188, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.0001, |
|
"reward": 0.5562500059604645, |
|
"reward_std": 0.17247037403285503, |
|
"rewards/accuracy_reward": 0.14285714388825, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.4133928641676903, |
|
"step": 4 |
|
}, |
|
{ |
|
"completion_length": 3737.9000244140625, |
|
"epoch": 0.13986013986013987, |
|
"grad_norm": 8.259861946105957, |
|
"kl": 0.006000518798828125, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0002, |
|
"reward": 0.5544642880558968, |
|
"reward_std": 0.19513687305152416, |
|
"rewards/accuracy_reward": 0.157142860814929, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.3973214253783226, |
|
"step": 5 |
|
}, |
|
{ |
|
"completion_length": 3746.992919921875, |
|
"epoch": 0.16783216783216784, |
|
"grad_norm": 0.48524144291877747, |
|
"kl": 0.0187225341796875, |
|
"learning_rate": 9.978331270024887e-06, |
|
"loss": 0.0007, |
|
"reward": 0.5526785850524902, |
|
"reward_std": 0.27357756346464157, |
|
"rewards/accuracy_reward": 0.12142857350409031, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.4312500059604645, |
|
"step": 6 |
|
}, |
|
{ |
|
"completion_length": 3918.8643188476562, |
|
"epoch": 0.1958041958041958, |
|
"grad_norm": 0.29106539487838745, |
|
"kl": 0.025238037109375, |
|
"learning_rate": 9.913533761814537e-06, |
|
"loss": 0.001, |
|
"reward": 0.5473214462399483, |
|
"reward_std": 0.2193572111427784, |
|
"rewards/accuracy_reward": 0.12857143231667578, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.41875000298023224, |
|
"step": 7 |
|
}, |
|
{ |
|
"completion_length": 3901.175048828125, |
|
"epoch": 0.22377622377622378, |
|
"grad_norm": 0.34263718128204346, |
|
"kl": 0.0513916015625, |
|
"learning_rate": 9.80623151079494e-06, |
|
"loss": 0.0021, |
|
"reward": 0.4044642969965935, |
|
"reward_std": 0.12892552372068167, |
|
"rewards/accuracy_reward": 0.04642857238650322, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.35803572088479996, |
|
"step": 8 |
|
}, |
|
{ |
|
"completion_length": 3889.1786499023438, |
|
"epoch": 0.2517482517482518, |
|
"grad_norm": 1.2209001779556274, |
|
"kl": 0.0618896484375, |
|
"learning_rate": 9.65745789630079e-06, |
|
"loss": 0.0025, |
|
"reward": 0.48303572088479996, |
|
"reward_std": 0.21392671391367912, |
|
"rewards/accuracy_reward": 0.11428571795113385, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.3687500059604645, |
|
"step": 9 |
|
}, |
|
{ |
|
"completion_length": 4003.5607299804688, |
|
"epoch": 0.27972027972027974, |
|
"grad_norm": 0.3369362950325012, |
|
"kl": 0.068359375, |
|
"learning_rate": 9.468645689567599e-06, |
|
"loss": 0.0027, |
|
"reward": 0.46339286118745804, |
|
"reward_std": 0.22164541110396385, |
|
"rewards/accuracy_reward": 0.09285714570432901, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.3705357164144516, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 3949.378662109375, |
|
"epoch": 0.3076923076923077, |
|
"grad_norm": 0.7158792018890381, |
|
"kl": 0.0810546875, |
|
"learning_rate": 9.241613255361455e-06, |
|
"loss": 0.0032, |
|
"reward": 0.4375000074505806, |
|
"reward_std": 0.1808728687465191, |
|
"rewards/accuracy_reward": 0.08214285899884999, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.3553571403026581, |
|
"step": 11 |
|
}, |
|
{ |
|
"completion_length": 4065.3965454101562, |
|
"epoch": 0.3356643356643357, |
|
"grad_norm": 0.26943907141685486, |
|
"kl": 0.0992431640625, |
|
"learning_rate": 8.978547040132317e-06, |
|
"loss": 0.004, |
|
"reward": 0.35625000298023224, |
|
"reward_std": 0.11728819366544485, |
|
"rewards/accuracy_reward": 0.0321428575553, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.3241071403026581, |
|
"step": 12 |
|
}, |
|
{ |
|
"completion_length": 3880.08203125, |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 0.3461170792579651, |
|
"kl": 0.1202392578125, |
|
"learning_rate": 8.681980515339464e-06, |
|
"loss": 0.0048, |
|
"reward": 0.6062500029802322, |
|
"reward_std": 0.22258323803544044, |
|
"rewards/accuracy_reward": 0.2607142962515354, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.3455357179045677, |
|
"step": 13 |
|
}, |
|
{ |
|
"completion_length": 3993.267822265625, |
|
"epoch": 0.3916083916083916, |
|
"grad_norm": 0.32656392455101013, |
|
"kl": 0.13916015625, |
|
"learning_rate": 8.354769778736407e-06, |
|
"loss": 0.0056, |
|
"reward": 0.6285714283585548, |
|
"reward_std": 0.29084962233901024, |
|
"rewards/accuracy_reward": 0.29285714589059353, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.33571428060531616, |
|
"step": 14 |
|
}, |
|
{ |
|
"completion_length": 4049.6571044921875, |
|
"epoch": 0.4195804195804196, |
|
"grad_norm": 0.3071815073490143, |
|
"kl": 0.1650390625, |
|
"learning_rate": 8.00006604858821e-06, |
|
"loss": 0.0066, |
|
"reward": 0.5178571417927742, |
|
"reward_std": 0.17362720984965563, |
|
"rewards/accuracy_reward": 0.20714286155998707, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.3107142820954323, |
|
"step": 15 |
|
}, |
|
{ |
|
"completion_length": 4045.789306640625, |
|
"epoch": 0.44755244755244755, |
|
"grad_norm": 0.40854963660240173, |
|
"kl": 0.2138671875, |
|
"learning_rate": 7.621285315716991e-06, |
|
"loss": 0.0085, |
|
"reward": 0.5482142865657806, |
|
"reward_std": 0.2475740760564804, |
|
"rewards/accuracy_reward": 0.22500000521540642, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.3232142925262451, |
|
"step": 16 |
|
}, |
|
{ |
|
"completion_length": 3979.9750366210938, |
|
"epoch": 0.4755244755244755, |
|
"grad_norm": 0.48029232025146484, |
|
"kl": 0.250732421875, |
|
"learning_rate": 7.222075445642904e-06, |
|
"loss": 0.01, |
|
"reward": 0.5223214402794838, |
|
"reward_std": 0.2250591404736042, |
|
"rewards/accuracy_reward": 0.20357143506407738, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.3187500014901161, |
|
"step": 17 |
|
}, |
|
{ |
|
"completion_length": 3790.7999877929688, |
|
"epoch": 0.5034965034965035, |
|
"grad_norm": 0.59281986951828, |
|
"kl": 0.26953125, |
|
"learning_rate": 6.80628104764508e-06, |
|
"loss": 0.0108, |
|
"reward": 0.5714285746216774, |
|
"reward_std": 0.20503208599984646, |
|
"rewards/accuracy_reward": 0.21428572107106447, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.3571428582072258, |
|
"step": 18 |
|
}, |
|
{ |
|
"completion_length": 3469.2178344726562, |
|
"epoch": 0.5314685314685315, |
|
"grad_norm": 5.099720478057861, |
|
"kl": 0.376953125, |
|
"learning_rate": 6.377906449072578e-06, |
|
"loss": 0.0151, |
|
"reward": 0.6991071403026581, |
|
"reward_std": 0.2952312082052231, |
|
"rewards/accuracy_reward": 0.30000000819563866, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.3991071358323097, |
|
"step": 19 |
|
}, |
|
{ |
|
"completion_length": 3258.9036254882812, |
|
"epoch": 0.5594405594405595, |
|
"grad_norm": 0.476639062166214, |
|
"kl": 0.423828125, |
|
"learning_rate": 5.9410771314830255e-06, |
|
"loss": 0.017, |
|
"reward": 0.7660714238882065, |
|
"reward_std": 0.3425598815083504, |
|
"rewards/accuracy_reward": 0.3107142932713032, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.4553571492433548, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 2936.7607421875, |
|
"epoch": 0.5874125874125874, |
|
"grad_norm": 0.9111078977584839, |
|
"kl": 0.4521484375, |
|
"learning_rate": 5.500000000000001e-06, |
|
"loss": 0.0181, |
|
"reward": 0.7901785671710968, |
|
"reward_std": 0.2908039838075638, |
|
"rewards/accuracy_reward": 0.30714286491274834, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.48303571343421936, |
|
"step": 21 |
|
}, |
|
{ |
|
"completion_length": 2737.4607543945312, |
|
"epoch": 0.6153846153846154, |
|
"grad_norm": 0.5476343035697937, |
|
"kl": 0.54345703125, |
|
"learning_rate": 5.0589228685169776e-06, |
|
"loss": 0.0217, |
|
"reward": 0.7124999910593033, |
|
"reward_std": 0.22632914781570435, |
|
"rewards/accuracy_reward": 0.22500000917352736, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.48750000447034836, |
|
"step": 22 |
|
}, |
|
{ |
|
"completion_length": 2191.2464599609375, |
|
"epoch": 0.6433566433566433, |
|
"grad_norm": 0.6787258386611938, |
|
"kl": 0.5205078125, |
|
"learning_rate": 4.622093550927423e-06, |
|
"loss": 0.0208, |
|
"reward": 0.8017857074737549, |
|
"reward_std": 0.24978942424058914, |
|
"rewards/accuracy_reward": 0.32500000670552254, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.47678571939468384, |
|
"step": 23 |
|
}, |
|
{ |
|
"completion_length": 2145.5035705566406, |
|
"epoch": 0.6713286713286714, |
|
"grad_norm": 0.9464374780654907, |
|
"kl": 0.6494140625, |
|
"learning_rate": 4.193718952354921e-06, |
|
"loss": 0.026, |
|
"reward": 0.6982142925262451, |
|
"reward_std": 0.2445763349533081, |
|
"rewards/accuracy_reward": 0.19642857555299997, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.5017857104539871, |
|
"step": 24 |
|
}, |
|
{ |
|
"completion_length": 1999.6785888671875, |
|
"epoch": 0.6993006993006993, |
|
"grad_norm": 0.7832499742507935, |
|
"kl": 0.626953125, |
|
"learning_rate": 3.777924554357096e-06, |
|
"loss": 0.025, |
|
"reward": 0.7687499970197678, |
|
"reward_std": 0.2781074270606041, |
|
"rewards/accuracy_reward": 0.2678571492433548, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.500892847776413, |
|
"step": 25 |
|
}, |
|
{ |
|
"completion_length": 2141.8035888671875, |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 0.8674513697624207, |
|
"kl": 0.765625, |
|
"learning_rate": 3.378714684283011e-06, |
|
"loss": 0.0306, |
|
"reward": 0.7142857015132904, |
|
"reward_std": 0.226736880838871, |
|
"rewards/accuracy_reward": 0.22500000149011612, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.4892857298254967, |
|
"step": 26 |
|
}, |
|
{ |
|
"completion_length": 1950.5678100585938, |
|
"epoch": 0.7552447552447552, |
|
"grad_norm": 0.7500644326210022, |
|
"kl": 0.65234375, |
|
"learning_rate": 2.9999339514117913e-06, |
|
"loss": 0.0261, |
|
"reward": 0.7374999821186066, |
|
"reward_std": 0.25540266558527946, |
|
"rewards/accuracy_reward": 0.19285714533179998, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.5446428656578064, |
|
"step": 27 |
|
}, |
|
{ |
|
"completion_length": 2502.7821655273438, |
|
"epoch": 0.7832167832167832, |
|
"grad_norm": 1.129561185836792, |
|
"kl": 0.869140625, |
|
"learning_rate": 2.645230221263596e-06, |
|
"loss": 0.0348, |
|
"reward": 0.5919642895460129, |
|
"reward_std": 0.18119085393846035, |
|
"rewards/accuracy_reward": 0.10714286030270159, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.4848214387893677, |
|
"step": 28 |
|
}, |
|
{ |
|
"completion_length": 2735.4535522460938, |
|
"epoch": 0.8111888111888111, |
|
"grad_norm": 0.673635721206665, |
|
"kl": 0.8076171875, |
|
"learning_rate": 2.3180194846605367e-06, |
|
"loss": 0.0323, |
|
"reward": 0.6232142895460129, |
|
"reward_std": 0.23368354886770248, |
|
"rewards/accuracy_reward": 0.1428571476135403, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.4803571552038193, |
|
"step": 29 |
|
}, |
|
{ |
|
"completion_length": 2827.32861328125, |
|
"epoch": 0.8391608391608392, |
|
"grad_norm": 1.255906343460083, |
|
"kl": 0.8408203125, |
|
"learning_rate": 2.021452959867684e-06, |
|
"loss": 0.0336, |
|
"reward": 0.5776785612106323, |
|
"reward_std": 0.23459363728761673, |
|
"rewards/accuracy_reward": 0.08928571734577417, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.4883928596973419, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 3130.0, |
|
"epoch": 0.8671328671328671, |
|
"grad_norm": 0.8778608441352844, |
|
"kl": 0.8134765625, |
|
"learning_rate": 1.7583867446385461e-06, |
|
"loss": 0.0325, |
|
"reward": 0.5687500089406967, |
|
"reward_std": 0.21473033353686333, |
|
"rewards/accuracy_reward": 0.11785714444704354, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.4508928656578064, |
|
"step": 31 |
|
}, |
|
{ |
|
"completion_length": 3108.8750610351562, |
|
"epoch": 0.8951048951048951, |
|
"grad_norm": 0.703495979309082, |
|
"kl": 0.6640625, |
|
"learning_rate": 1.531354310432403e-06, |
|
"loss": 0.0266, |
|
"reward": 0.6116071492433548, |
|
"reward_std": 0.24049308896064758, |
|
"rewards/accuracy_reward": 0.15000000223517418, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.4616071507334709, |
|
"step": 32 |
|
}, |
|
{ |
|
"completion_length": 3385.7571411132812, |
|
"epoch": 0.9230769230769231, |
|
"grad_norm": 0.9375730752944946, |
|
"kl": 0.7919921875, |
|
"learning_rate": 1.3425421036992098e-06, |
|
"loss": 0.0316, |
|
"reward": 0.4883928596973419, |
|
"reward_std": 0.172358563169837, |
|
"rewards/accuracy_reward": 0.06428571604192257, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.4241071343421936, |
|
"step": 33 |
|
}, |
|
{ |
|
"completion_length": 3170.0820922851562, |
|
"epoch": 0.951048951048951, |
|
"grad_norm": 1.1337316036224365, |
|
"kl": 0.58447265625, |
|
"learning_rate": 1.1937684892050606e-06, |
|
"loss": 0.0234, |
|
"reward": 0.6321428567171097, |
|
"reward_std": 0.288823913782835, |
|
"rewards/accuracy_reward": 0.17857143469154835, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.4535714313387871, |
|
"step": 34 |
|
}, |
|
{ |
|
"completion_length": 3635.4571533203125, |
|
"epoch": 0.9790209790209791, |
|
"grad_norm": 0.7671982049942017, |
|
"kl": 0.62109375, |
|
"learning_rate": 1.0864662381854632e-06, |
|
"loss": 0.0248, |
|
"reward": 0.557142861187458, |
|
"reward_std": 0.18924793601036072, |
|
"rewards/accuracy_reward": 0.14642857760190964, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.410714291036129, |
|
"step": 35 |
|
}, |
|
{ |
|
"completion_length": 3688.6334635416665, |
|
"epoch": 1.0, |
|
"grad_norm": 0.7671982049942017, |
|
"kl": 0.62109375, |
|
"learning_rate": 1.0216687299751146e-06, |
|
"loss": 0.0186, |
|
"reward": 0.5226190686225891, |
|
"reward_std": 0.2011700620253881, |
|
"rewards/accuracy_reward": 0.10952381292978923, |
|
"rewards/format_reward": 0.0, |
|
"rewards/tag_count_reward": 0.41309523582458496, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 36, |
|
"total_flos": 0.0, |
|
"train_loss": 0.014487237919349916, |
|
"train_runtime": 13938.4252, |
|
"train_samples_per_second": 0.072, |
|
"train_steps_per_second": 0.003 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 36, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 10, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|