|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.995334370139968, |
|
"eval_steps": 500, |
|
"global_step": 450, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 766.9769439697266, |
|
"epoch": 0.049766718506998445, |
|
"grad_norm": 0.1882091611623764, |
|
"kl": 0.0, |
|
"learning_rate": 7.142857142857142e-08, |
|
"loss": 0.0, |
|
"reward": 0.056659227251657285, |
|
"reward_std": 0.0960957952629542, |
|
"rewards/equation_reward_func": 0.0030877977133059176, |
|
"rewards/format_reward_func": 0.05357142974389717, |
|
"step": 2 |
|
}, |
|
{ |
|
"completion_length": 761.3757600784302, |
|
"epoch": 0.09953343701399689, |
|
"grad_norm": 0.1693430244922638, |
|
"kl": 3.890312857635081e-05, |
|
"learning_rate": 1.4285714285714285e-07, |
|
"loss": 0.0, |
|
"reward": 0.06296131052658893, |
|
"reward_std": 0.09832033820566721, |
|
"rewards/equation_reward_func": 0.0026934524921671255, |
|
"rewards/format_reward_func": 0.06026785844005644, |
|
"step": 4 |
|
}, |
|
{ |
|
"completion_length": 765.7619190216064, |
|
"epoch": 0.14930015552099535, |
|
"grad_norm": 0.18038436770439148, |
|
"kl": 0.0003851112737720541, |
|
"learning_rate": 2.1428571428571426e-07, |
|
"loss": 0.0, |
|
"reward": 0.054226191830821335, |
|
"reward_std": 0.09056254595634528, |
|
"rewards/equation_reward_func": 0.003630952454841463, |
|
"rewards/format_reward_func": 0.05059523927047849, |
|
"step": 6 |
|
}, |
|
{ |
|
"completion_length": 771.5922727584839, |
|
"epoch": 0.19906687402799378, |
|
"grad_norm": 0.1860150694847107, |
|
"kl": 0.0017208610979650985, |
|
"learning_rate": 2.857142857142857e-07, |
|
"loss": 0.0, |
|
"reward": 0.07269345303211594, |
|
"reward_std": 0.11444784759078175, |
|
"rewards/equation_reward_func": 0.0034970239394169766, |
|
"rewards/format_reward_func": 0.06919643009314314, |
|
"step": 8 |
|
}, |
|
{ |
|
"completion_length": 768.6547737121582, |
|
"epoch": 0.24883359253499224, |
|
"grad_norm": 0.18981724977493286, |
|
"kl": 0.007042938013910316, |
|
"learning_rate": 3.5714285714285716e-07, |
|
"loss": 0.0, |
|
"reward": 0.06845982308732346, |
|
"reward_std": 0.11329028639011085, |
|
"rewards/equation_reward_func": 0.002983631022289046, |
|
"rewards/format_reward_func": 0.06547619227785617, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 764.2269496917725, |
|
"epoch": 0.2986003110419907, |
|
"grad_norm": 0.15985120832920074, |
|
"kl": 0.030209030941477977, |
|
"learning_rate": 4.285714285714285e-07, |
|
"loss": 0.0, |
|
"reward": 0.07950148951204028, |
|
"reward_std": 0.12200771895004436, |
|
"rewards/equation_reward_func": 0.005096726257761475, |
|
"rewards/format_reward_func": 0.07440476369811222, |
|
"step": 12 |
|
}, |
|
{ |
|
"completion_length": 777.1413831710815, |
|
"epoch": 0.3483670295489891, |
|
"grad_norm": 0.3316055238246918, |
|
"kl": 0.24638308194698766, |
|
"learning_rate": 5e-07, |
|
"loss": 0.0002, |
|
"reward": 0.06816220411565155, |
|
"reward_std": 0.10573185497196391, |
|
"rewards/equation_reward_func": 0.004174107223661849, |
|
"rewards/format_reward_func": 0.06398809689562768, |
|
"step": 14 |
|
}, |
|
{ |
|
"completion_length": 766.7388534545898, |
|
"epoch": 0.39813374805598756, |
|
"grad_norm": 0.22750402987003326, |
|
"kl": 0.36214487318648025, |
|
"learning_rate": 4.999740409224932e-07, |
|
"loss": 0.0004, |
|
"reward": 0.08724702637118753, |
|
"reward_std": 0.1364312534351484, |
|
"rewards/equation_reward_func": 0.003169642919601756, |
|
"rewards/format_reward_func": 0.08407738310052082, |
|
"step": 16 |
|
}, |
|
{ |
|
"completion_length": 750.9323081970215, |
|
"epoch": 0.447900466562986, |
|
"grad_norm": 0.14867445826530457, |
|
"kl": 0.1888283666339703, |
|
"learning_rate": 4.998961690809627e-07, |
|
"loss": 0.0002, |
|
"reward": 0.0880877989839064, |
|
"reward_std": 0.13909767809673212, |
|
"rewards/equation_reward_func": 0.004010416825622087, |
|
"rewards/format_reward_func": 0.0840773832751438, |
|
"step": 18 |
|
}, |
|
{ |
|
"completion_length": 761.3616189956665, |
|
"epoch": 0.4976671850699845, |
|
"grad_norm": 0.17096517980098724, |
|
"kl": 0.1737799496622756, |
|
"learning_rate": 4.997664006472578e-07, |
|
"loss": 0.0002, |
|
"reward": 0.09410714514524443, |
|
"reward_std": 0.1441169898607768, |
|
"rewards/equation_reward_func": 0.004077381056049489, |
|
"rewards/format_reward_func": 0.09002976410556585, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 749.1599817276001, |
|
"epoch": 0.5474339035769828, |
|
"grad_norm": 0.16154693067073822, |
|
"kl": 0.11445438460214064, |
|
"learning_rate": 4.995847625707292e-07, |
|
"loss": 0.0001, |
|
"reward": 0.11768601481526275, |
|
"reward_std": 0.1828837317698344, |
|
"rewards/equation_reward_func": 0.006822916879173135, |
|
"rewards/format_reward_func": 0.1108630983508192, |
|
"step": 22 |
|
}, |
|
{ |
|
"completion_length": 762.6986713409424, |
|
"epoch": 0.5972006220839814, |
|
"grad_norm": 0.1822424829006195, |
|
"kl": 0.13439486949937418, |
|
"learning_rate": 4.993512925726318e-07, |
|
"loss": 0.0001, |
|
"reward": 0.12162946694297716, |
|
"reward_std": 0.19308959832414985, |
|
"rewards/equation_reward_func": 0.0055580357930011814, |
|
"rewards/format_reward_func": 0.11607143172295764, |
|
"step": 24 |
|
}, |
|
{ |
|
"completion_length": 744.5833473205566, |
|
"epoch": 0.6469673405909798, |
|
"grad_norm": 0.20237824320793152, |
|
"kl": 0.1873470548307523, |
|
"learning_rate": 4.990660391382923e-07, |
|
"loss": 0.0002, |
|
"reward": 0.1205357170838397, |
|
"reward_std": 0.18955813159118406, |
|
"rewards/equation_reward_func": 0.005208333370319451, |
|
"rewards/format_reward_func": 0.11532738379901275, |
|
"step": 26 |
|
}, |
|
{ |
|
"completion_length": 740.8712930679321, |
|
"epoch": 0.6967340590979783, |
|
"grad_norm": 0.20918512344360352, |
|
"kl": 0.29402227653190494, |
|
"learning_rate": 4.987290615070384e-07, |
|
"loss": 0.0003, |
|
"reward": 0.14720238508743932, |
|
"reward_std": 0.21827496998594142, |
|
"rewards/equation_reward_func": 0.006577381089300616, |
|
"rewards/format_reward_func": 0.14062500419095159, |
|
"step": 28 |
|
}, |
|
{ |
|
"completion_length": 730.9189138412476, |
|
"epoch": 0.7465007776049767, |
|
"grad_norm": 0.1985033005475998, |
|
"kl": 0.4376440930645913, |
|
"learning_rate": 4.983404296598978e-07, |
|
"loss": 0.0004, |
|
"reward": 0.17537202849052846, |
|
"reward_std": 0.24408150045201182, |
|
"rewards/equation_reward_func": 0.009449404875340406, |
|
"rewards/format_reward_func": 0.16592262318590656, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 722.1607275009155, |
|
"epoch": 0.7962674961119751, |
|
"grad_norm": 0.23082222044467926, |
|
"kl": 0.5781177286989987, |
|
"learning_rate": 4.979002243050646e-07, |
|
"loss": 0.0006, |
|
"reward": 0.16087798058288172, |
|
"reward_std": 0.23381112061906606, |
|
"rewards/equation_reward_func": 0.005372023912059376, |
|
"rewards/format_reward_func": 0.15550595690729097, |
|
"step": 32 |
|
}, |
|
{ |
|
"completion_length": 715.540937423706, |
|
"epoch": 0.8460342146189735, |
|
"grad_norm": 0.2513209283351898, |
|
"kl": 0.5908648972399533, |
|
"learning_rate": 4.974085368611381e-07, |
|
"loss": 0.0006, |
|
"reward": 0.19839285931084305, |
|
"reward_std": 0.2854806184768677, |
|
"rewards/equation_reward_func": 0.007916666922028526, |
|
"rewards/format_reward_func": 0.19047619437333196, |
|
"step": 34 |
|
}, |
|
{ |
|
"completion_length": 733.3735246658325, |
|
"epoch": 0.895800933125972, |
|
"grad_norm": 0.7895224690437317, |
|
"kl": 0.974291667342186, |
|
"learning_rate": 4.968654694381379e-07, |
|
"loss": 0.001, |
|
"reward": 0.2064955406531226, |
|
"reward_std": 0.29307361885730643, |
|
"rewards/equation_reward_func": 0.01006696438525978, |
|
"rewards/format_reward_func": 0.19642857578583062, |
|
"step": 36 |
|
}, |
|
{ |
|
"completion_length": 712.1808166503906, |
|
"epoch": 0.9455676516329704, |
|
"grad_norm": 0.27443817257881165, |
|
"kl": 0.5672950851731002, |
|
"learning_rate": 4.962711348162987e-07, |
|
"loss": 0.0006, |
|
"reward": 0.23159970679262187, |
|
"reward_std": 0.30910343912546523, |
|
"rewards/equation_reward_func": 0.009129464311627089, |
|
"rewards/format_reward_func": 0.22247024369426072, |
|
"step": 38 |
|
}, |
|
{ |
|
"completion_length": 724.0312623977661, |
|
"epoch": 0.995334370139969, |
|
"grad_norm": 0.2718716859817505, |
|
"kl": 0.7035379374865443, |
|
"learning_rate": 4.956256564226487e-07, |
|
"loss": 0.0007, |
|
"reward": 0.22910714593308512, |
|
"reward_std": 0.31004760426003486, |
|
"rewards/equation_reward_func": 0.009613095318854903, |
|
"rewards/format_reward_func": 0.21949405351188034, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 689.2218210320724, |
|
"epoch": 1.0248833592534992, |
|
"grad_norm": 0.26647260785102844, |
|
"kl": 0.780534968172249, |
|
"learning_rate": 4.949291683053768e-07, |
|
"loss": 0.0005, |
|
"reward": 0.27011278850075443, |
|
"reward_std": 0.3508555128386146, |
|
"rewards/equation_reward_func": 0.010714285780283573, |
|
"rewards/format_reward_func": 0.2593985022486825, |
|
"step": 42 |
|
}, |
|
{ |
|
"completion_length": 679.3750143051147, |
|
"epoch": 1.0746500777604977, |
|
"grad_norm": 0.3014063239097595, |
|
"kl": 0.8605983196757734, |
|
"learning_rate": 4.941818151059955e-07, |
|
"loss": 0.0009, |
|
"reward": 0.26439732742437627, |
|
"reward_std": 0.34150049267918803, |
|
"rewards/equation_reward_func": 0.01290922645421233, |
|
"rewards/format_reward_func": 0.25148810172686353, |
|
"step": 44 |
|
}, |
|
{ |
|
"completion_length": 678.1852836608887, |
|
"epoch": 1.124416796267496, |
|
"grad_norm": 0.2869322597980499, |
|
"kl": 1.1382544473744929, |
|
"learning_rate": 4.933837520293017e-07, |
|
"loss": 0.0011, |
|
"reward": 0.28466518549248576, |
|
"reward_std": 0.3586017652414739, |
|
"rewards/equation_reward_func": 0.010855655014893273, |
|
"rewards/format_reward_func": 0.27380952949170023, |
|
"step": 46 |
|
}, |
|
{ |
|
"completion_length": 665.9114689826965, |
|
"epoch": 1.1741835147744946, |
|
"grad_norm": 0.4286658465862274, |
|
"kl": 1.101041450863704, |
|
"learning_rate": 4.925351448111454e-07, |
|
"loss": 0.0011, |
|
"reward": 0.35581102105788887, |
|
"reward_std": 0.3954307301901281, |
|
"rewards/equation_reward_func": 0.012805059952370357, |
|
"rewards/format_reward_func": 0.34300595964305103, |
|
"step": 48 |
|
}, |
|
{ |
|
"completion_length": 648.8006086349487, |
|
"epoch": 1.223950233281493, |
|
"grad_norm": 0.6011645793914795, |
|
"kl": 1.5983671792782843, |
|
"learning_rate": 4.91636169684011e-07, |
|
"loss": 0.0016, |
|
"reward": 0.36582590208854526, |
|
"reward_std": 0.40516745822969824, |
|
"rewards/equation_reward_func": 0.017611607605431345, |
|
"rewards/format_reward_func": 0.34821429441217333, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 637.3727793693542, |
|
"epoch": 1.2737169517884914, |
|
"grad_norm": 0.4506984353065491, |
|
"kl": 1.99405955709517, |
|
"learning_rate": 4.906870133404186e-07, |
|
"loss": 0.002, |
|
"reward": 0.37937500968109816, |
|
"reward_std": 0.40684798220172524, |
|
"rewards/equation_reward_func": 0.01851190527304425, |
|
"rewards/format_reward_func": 0.3608631034148857, |
|
"step": 52 |
|
}, |
|
{ |
|
"completion_length": 611.6599831581116, |
|
"epoch": 1.32348367029549, |
|
"grad_norm": 0.3486514985561371, |
|
"kl": 1.575268767774105, |
|
"learning_rate": 4.896878728941531e-07, |
|
"loss": 0.0016, |
|
"reward": 0.41054316400550306, |
|
"reward_std": 0.402535158675164, |
|
"rewards/equation_reward_func": 0.014709821767610265, |
|
"rewards/format_reward_func": 0.39583334256894886, |
|
"step": 54 |
|
}, |
|
{ |
|
"completion_length": 588.9256038665771, |
|
"epoch": 1.3732503888024883, |
|
"grad_norm": 0.35510003566741943, |
|
"kl": 1.2928779851645231, |
|
"learning_rate": 4.886389558393284e-07, |
|
"loss": 0.0013, |
|
"reward": 0.44056548294611275, |
|
"reward_std": 0.4229291075025685, |
|
"rewards/equation_reward_func": 0.020922619543853216, |
|
"rewards/format_reward_func": 0.41964286752045155, |
|
"step": 56 |
|
}, |
|
{ |
|
"completion_length": 568.6056680679321, |
|
"epoch": 1.4230171073094868, |
|
"grad_norm": 0.4101411998271942, |
|
"kl": 2.0492603620514274, |
|
"learning_rate": 4.875404800072976e-07, |
|
"loss": 0.002, |
|
"reward": 0.4984003081917763, |
|
"reward_std": 0.47362194606103003, |
|
"rewards/equation_reward_func": 0.020721726599731483, |
|
"rewards/format_reward_func": 0.47767858440056443, |
|
"step": 58 |
|
}, |
|
{ |
|
"completion_length": 566.1391491889954, |
|
"epoch": 1.4727838258164851, |
|
"grad_norm": 0.9513494968414307, |
|
"kl": 2.287347287638113, |
|
"learning_rate": 4.86392673521415e-07, |
|
"loss": 0.0023, |
|
"reward": 0.496495544211939, |
|
"reward_std": 0.4519720033276826, |
|
"rewards/equation_reward_func": 0.021049107548606116, |
|
"rewards/format_reward_func": 0.475446441443637, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 555.5959930419922, |
|
"epoch": 1.5225505443234837, |
|
"grad_norm": 3.8236348628997803, |
|
"kl": 4.3232480408623815, |
|
"learning_rate": 4.851957747496606e-07, |
|
"loss": 0.0043, |
|
"reward": 0.5090178686659783, |
|
"reward_std": 0.45009147142991424, |
|
"rewards/equation_reward_func": 0.02241071459138766, |
|
"rewards/format_reward_func": 0.48660715692676604, |
|
"step": 62 |
|
}, |
|
{ |
|
"completion_length": 545.1212873458862, |
|
"epoch": 1.5723172628304822, |
|
"grad_norm": 1.2665306329727173, |
|
"kl": 2.19858290348202, |
|
"learning_rate": 4.839500322551386e-07, |
|
"loss": 0.0022, |
|
"reward": 0.5529836406931281, |
|
"reward_std": 0.44346457067877054, |
|
"rewards/equation_reward_func": 0.021733631443566992, |
|
"rewards/format_reward_func": 0.5312500153668225, |
|
"step": 64 |
|
}, |
|
{ |
|
"completion_length": 519.7120614051819, |
|
"epoch": 1.6220839813374806, |
|
"grad_norm": 0.6547808051109314, |
|
"kl": 2.3480111258104444, |
|
"learning_rate": 4.826557047444563e-07, |
|
"loss": 0.0023, |
|
"reward": 0.576257451903075, |
|
"reward_std": 0.45106819295324385, |
|
"rewards/equation_reward_func": 0.023430060044120182, |
|
"rewards/format_reward_func": 0.5528273996897042, |
|
"step": 66 |
|
}, |
|
{ |
|
"completion_length": 500.1666741371155, |
|
"epoch": 1.6718506998444789, |
|
"grad_norm": 1.3800803422927856, |
|
"kl": 3.120940439403057, |
|
"learning_rate": 4.813130610139993e-07, |
|
"loss": 0.0031, |
|
"reward": 0.5781473349779844, |
|
"reward_std": 0.4549011045601219, |
|
"rewards/equation_reward_func": 0.02531994113815017, |
|
"rewards/format_reward_func": 0.5528274001553655, |
|
"step": 68 |
|
}, |
|
{ |
|
"completion_length": 500.9419732093811, |
|
"epoch": 1.7216174183514774, |
|
"grad_norm": 1.0396491289138794, |
|
"kl": 6.374570010229945, |
|
"learning_rate": 4.799223798941089e-07, |
|
"loss": 0.0064, |
|
"reward": 0.6029836446978152, |
|
"reward_std": 0.4363493148703128, |
|
"rewards/equation_reward_func": 0.02932291675824672, |
|
"rewards/format_reward_func": 0.5736607322469354, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 469.58408641815186, |
|
"epoch": 1.771384136858476, |
|
"grad_norm": 1.1942294836044312, |
|
"kl": 24.733395729679614, |
|
"learning_rate": 4.78483950191177e-07, |
|
"loss": 0.0247, |
|
"reward": 0.6590253091417253, |
|
"reward_std": 0.43813786492682993, |
|
"rewards/equation_reward_func": 0.03328125060033926, |
|
"rewards/format_reward_func": 0.6257440662011504, |
|
"step": 72 |
|
}, |
|
{ |
|
"completion_length": 457.6049189567566, |
|
"epoch": 1.8211508553654743, |
|
"grad_norm": 1.2962743043899536, |
|
"kl": 5.970435416325927, |
|
"learning_rate": 4.769980706276687e-07, |
|
"loss": 0.006, |
|
"reward": 0.6895312615670264, |
|
"reward_std": 0.4353426368907094, |
|
"rewards/equation_reward_func": 0.0370014887466823, |
|
"rewards/format_reward_func": 0.6525297802872956, |
|
"step": 74 |
|
}, |
|
{ |
|
"completion_length": 442.77977418899536, |
|
"epoch": 1.8709175738724728, |
|
"grad_norm": 6.384627819061279, |
|
"kl": 5289.785693533719, |
|
"learning_rate": 4.7546504978008595e-07, |
|
"loss": 5.2898, |
|
"reward": 0.7037648935802281, |
|
"reward_std": 0.41037949989549816, |
|
"rewards/equation_reward_func": 0.04081845350447111, |
|
"rewards/format_reward_func": 0.6629464458674192, |
|
"step": 76 |
|
}, |
|
{ |
|
"completion_length": 438.72842931747437, |
|
"epoch": 1.9206842923794714, |
|
"grad_norm": 1.6061675548553467, |
|
"kl": 11.556056022644043, |
|
"learning_rate": 4.738852060148848e-07, |
|
"loss": 0.0116, |
|
"reward": 0.7011681636795402, |
|
"reward_std": 0.43840571492910385, |
|
"rewards/equation_reward_func": 0.04789434606209397, |
|
"rewards/format_reward_func": 0.6532738278619945, |
|
"step": 78 |
|
}, |
|
{ |
|
"completion_length": 412.28348779678345, |
|
"epoch": 1.9704510108864697, |
|
"grad_norm": 2.0230860710144043, |
|
"kl": 5.730300394818187, |
|
"learning_rate": 4.722588674223593e-07, |
|
"loss": 0.0057, |
|
"reward": 0.7024256153963506, |
|
"reward_std": 0.4186383159831166, |
|
"rewards/equation_reward_func": 0.04840773875184823, |
|
"rewards/format_reward_func": 0.654017876368016, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 389.4373498213919, |
|
"epoch": 2.0, |
|
"grad_norm": 1.0441092252731323, |
|
"kl": 5.438737201063256, |
|
"learning_rate": 4.70586371748506e-07, |
|
"loss": 0.0032, |
|
"reward": 0.7210902367767534, |
|
"reward_std": 0.41276825533101436, |
|
"rewards/equation_reward_func": 0.049411029113750705, |
|
"rewards/format_reward_func": 0.6716792152116173, |
|
"step": 82 |
|
}, |
|
{ |
|
"completion_length": 378.0401859283447, |
|
"epoch": 2.0497667185069983, |
|
"grad_norm": 3.568232297897339, |
|
"kl": 14.175606586039066, |
|
"learning_rate": 4.6886806632488363e-07, |
|
"loss": 0.0142, |
|
"reward": 0.7700893068686128, |
|
"reward_std": 0.4058987263124436, |
|
"rewards/equation_reward_func": 0.057291668590551126, |
|
"rewards/format_reward_func": 0.7127976361662149, |
|
"step": 84 |
|
}, |
|
{ |
|
"completion_length": 365.3497085571289, |
|
"epoch": 2.099533437013997, |
|
"grad_norm": 4.238552093505859, |
|
"kl": 20.433330222964287, |
|
"learning_rate": 4.6710430799648143e-07, |
|
"loss": 0.0204, |
|
"reward": 0.7515401933342218, |
|
"reward_std": 0.4172304559033364, |
|
"rewards/equation_reward_func": 0.06031994195654988, |
|
"rewards/format_reward_func": 0.6912202551029623, |
|
"step": 86 |
|
}, |
|
{ |
|
"completion_length": 357.0818510055542, |
|
"epoch": 2.1493001555209954, |
|
"grad_norm": 26.27466583251953, |
|
"kl": 24.954067958518863, |
|
"learning_rate": 4.652954630476127e-07, |
|
"loss": 0.025, |
|
"reward": 0.8057515062391758, |
|
"reward_std": 0.37641601357609034, |
|
"rewards/equation_reward_func": 0.0631919659790583, |
|
"rewards/format_reward_func": 0.7425595386885107, |
|
"step": 88 |
|
}, |
|
{ |
|
"completion_length": 361.0506021976471, |
|
"epoch": 2.1990668740279937, |
|
"grad_norm": 28.50054359436035, |
|
"kl": 18.05073036905378, |
|
"learning_rate": 4.6344190712584713e-07, |
|
"loss": 0.0181, |
|
"reward": 0.8062277045100927, |
|
"reward_std": 0.3896879474632442, |
|
"rewards/equation_reward_func": 0.07259672792861238, |
|
"rewards/format_reward_func": 0.7336309677921236, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 351.3601257801056, |
|
"epoch": 2.248833592534992, |
|
"grad_norm": 6.563501834869385, |
|
"kl": 13.980788078159094, |
|
"learning_rate": 4.615440251639995e-07, |
|
"loss": 0.014, |
|
"reward": 0.8148214407265186, |
|
"reward_std": 0.3750967951491475, |
|
"rewards/equation_reward_func": 0.07449404880753718, |
|
"rewards/format_reward_func": 0.7403273959644139, |
|
"step": 92 |
|
}, |
|
{ |
|
"completion_length": 340.1547691822052, |
|
"epoch": 2.298600311041991, |
|
"grad_norm": 4.388554573059082, |
|
"kl": 28.781219646334648, |
|
"learning_rate": 4.596022113001894e-07, |
|
"loss": 0.0288, |
|
"reward": 0.8234449606388807, |
|
"reward_std": 0.3968581879744306, |
|
"rewards/equation_reward_func": 0.0779092287120875, |
|
"rewards/format_reward_func": 0.7455357294529676, |
|
"step": 94 |
|
}, |
|
{ |
|
"completion_length": 325.18229627609253, |
|
"epoch": 2.348367029548989, |
|
"grad_norm": 6.165853500366211, |
|
"kl": 38.15451553463936, |
|
"learning_rate": 4.576168687959895e-07, |
|
"loss": 0.0382, |
|
"reward": 0.8252604268491268, |
|
"reward_std": 0.3835659438627772, |
|
"rewards/equation_reward_func": 0.07674851396586746, |
|
"rewards/format_reward_func": 0.7485119192861021, |
|
"step": 96 |
|
}, |
|
{ |
|
"completion_length": 325.9955415725708, |
|
"epoch": 2.3981337480559874, |
|
"grad_norm": 4.9900288581848145, |
|
"kl": 29.846170015633106, |
|
"learning_rate": 4.555884099526793e-07, |
|
"loss": 0.0298, |
|
"reward": 0.836130972020328, |
|
"reward_std": 0.3841063065920025, |
|
"rewards/equation_reward_func": 0.08017857247614302, |
|
"rewards/format_reward_func": 0.7559523954987526, |
|
"step": 98 |
|
}, |
|
{ |
|
"completion_length": 325.5736680030823, |
|
"epoch": 2.447900466562986, |
|
"grad_norm": 6.271454334259033, |
|
"kl": 39.699058301746845, |
|
"learning_rate": 4.5351725602562174e-07, |
|
"loss": 0.0397, |
|
"reward": 0.8406399013474584, |
|
"reward_std": 0.38198388437740505, |
|
"rewards/equation_reward_func": 0.08022321574389935, |
|
"rewards/format_reward_func": 0.7604166809469461, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 306.04911255836487, |
|
"epoch": 2.4976671850699845, |
|
"grad_norm": 59.40867614746094, |
|
"kl": 39.07959423214197, |
|
"learning_rate": 4.514038371367791e-07, |
|
"loss": 0.0391, |
|
"reward": 0.8590625217184424, |
|
"reward_std": 0.3790609473362565, |
|
"rewards/equation_reward_func": 0.0897172634722665, |
|
"rewards/format_reward_func": 0.7693452518433332, |
|
"step": 102 |
|
}, |
|
{ |
|
"completion_length": 305.7552146911621, |
|
"epoch": 2.547433903576983, |
|
"grad_norm": 7.068971157073975, |
|
"kl": 41.63126154243946, |
|
"learning_rate": 4.4924859218538936e-07, |
|
"loss": 0.0416, |
|
"reward": 0.8449925715103745, |
|
"reward_std": 0.3808569728862494, |
|
"rewards/equation_reward_func": 0.0853199407865759, |
|
"rewards/format_reward_func": 0.7596726333722472, |
|
"step": 104 |
|
}, |
|
{ |
|
"completion_length": 304.3958387374878, |
|
"epoch": 2.5972006220839816, |
|
"grad_norm": 16.367332458496094, |
|
"kl": 61.27933597564697, |
|
"learning_rate": 4.470519687568185e-07, |
|
"loss": 0.0613, |
|
"reward": 0.8741964548826218, |
|
"reward_std": 0.3497261651791632, |
|
"rewards/equation_reward_func": 0.08625000189931598, |
|
"rewards/format_reward_func": 0.7879464407451451, |
|
"step": 106 |
|
}, |
|
{ |
|
"completion_length": 290.72768449783325, |
|
"epoch": 2.64696734059098, |
|
"grad_norm": 9.564668655395508, |
|
"kl": 56.39366825670004, |
|
"learning_rate": 4.4481442302960923e-07, |
|
"loss": 0.0564, |
|
"reward": 0.8775223465636373, |
|
"reward_std": 0.3703669502865523, |
|
"rewards/equation_reward_func": 0.0947842278110329, |
|
"rewards/format_reward_func": 0.7827381081879139, |
|
"step": 108 |
|
}, |
|
{ |
|
"completion_length": 285.9531292915344, |
|
"epoch": 2.6967340590979783, |
|
"grad_norm": 9.019147872924805, |
|
"kl": 57.990618377923965, |
|
"learning_rate": 4.4253641968074505e-07, |
|
"loss": 0.058, |
|
"reward": 0.8902678797021508, |
|
"reward_std": 0.3635682419408113, |
|
"rewards/equation_reward_func": 0.10083333519287407, |
|
"rewards/format_reward_func": 0.7894345363602042, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 268.639142036438, |
|
"epoch": 2.7465007776049766, |
|
"grad_norm": 11.662919044494629, |
|
"kl": 45.52927339076996, |
|
"learning_rate": 4.402184317891501e-07, |
|
"loss": 0.0455, |
|
"reward": 0.897909251973033, |
|
"reward_std": 0.33927053899969906, |
|
"rewards/equation_reward_func": 0.101034227933269, |
|
"rewards/format_reward_func": 0.7968750121071935, |
|
"step": 112 |
|
}, |
|
{ |
|
"completion_length": 275.512654542923, |
|
"epoch": 2.796267496111975, |
|
"grad_norm": 52.24915313720703, |
|
"kl": 80.93976804614067, |
|
"learning_rate": 4.37860940737443e-07, |
|
"loss": 0.0809, |
|
"reward": 0.9052157932892442, |
|
"reward_std": 0.33712050085887313, |
|
"rewards/equation_reward_func": 0.09941220440668985, |
|
"rewards/format_reward_func": 0.8058035830035806, |
|
"step": 114 |
|
}, |
|
{ |
|
"completion_length": 276.7753026485443, |
|
"epoch": 2.8460342146189737, |
|
"grad_norm": 6.988640308380127, |
|
"kl": 73.49725632369518, |
|
"learning_rate": 4.354644361119671e-07, |
|
"loss": 0.0735, |
|
"reward": 0.8936904948204756, |
|
"reward_std": 0.35725354170426726, |
|
"rewards/equation_reward_func": 0.1020238118362613, |
|
"rewards/format_reward_func": 0.791666679084301, |
|
"step": 116 |
|
}, |
|
{ |
|
"completion_length": 281.043904542923, |
|
"epoch": 2.895800933125972, |
|
"grad_norm": 14.06199836730957, |
|
"kl": 67.49630101025105, |
|
"learning_rate": 4.3302941560111716e-07, |
|
"loss": 0.0675, |
|
"reward": 0.8816443579271436, |
|
"reward_std": 0.36942707875277847, |
|
"rewards/equation_reward_func": 0.10337053792318329, |
|
"rewards/format_reward_func": 0.7782738227397203, |
|
"step": 118 |
|
}, |
|
{ |
|
"completion_length": 275.93155312538147, |
|
"epoch": 2.9455676516329703, |
|
"grad_norm": 7.618069648742676, |
|
"kl": 61.22661107778549, |
|
"learning_rate": 4.3055638489198236e-07, |
|
"loss": 0.0612, |
|
"reward": 0.895319958217442, |
|
"reward_std": 0.3638994665816426, |
|
"rewards/equation_reward_func": 0.1006770851672627, |
|
"rewards/format_reward_func": 0.7946428693830967, |
|
"step": 120 |
|
}, |
|
{ |
|
"completion_length": 272.8802127838135, |
|
"epoch": 2.995334370139969, |
|
"grad_norm": 17.16042137145996, |
|
"kl": 92.38909322023392, |
|
"learning_rate": 4.280458575653296e-07, |
|
"loss": 0.0924, |
|
"reward": 0.9042931590229273, |
|
"reward_std": 0.3583536515943706, |
|
"rewards/equation_reward_func": 0.1156026819953695, |
|
"rewards/format_reward_func": 0.7886904887855053, |
|
"step": 122 |
|
}, |
|
{ |
|
"completion_length": 267.62281196995787, |
|
"epoch": 3.0248833592534994, |
|
"grad_norm": 9.52210521697998, |
|
"kl": 76.53048851615505, |
|
"learning_rate": 4.2549835498894665e-07, |
|
"loss": 0.0454, |
|
"reward": 0.8889097963508806, |
|
"reward_std": 0.3777208249819906, |
|
"rewards/equation_reward_func": 0.09943609174929167, |
|
"rewards/format_reward_func": 0.7894736967588726, |
|
"step": 124 |
|
}, |
|
{ |
|
"completion_length": 253.41592621803284, |
|
"epoch": 3.0746500777604977, |
|
"grad_norm": 12.951403617858887, |
|
"kl": 69.25402422249317, |
|
"learning_rate": 4.229144062093679e-07, |
|
"loss": 0.0693, |
|
"reward": 0.9256026921793818, |
|
"reward_std": 0.33047901722602546, |
|
"rewards/equation_reward_func": 0.11161458486458287, |
|
"rewards/format_reward_func": 0.8139881063252687, |
|
"step": 126 |
|
}, |
|
{ |
|
"completion_length": 260.8653337955475, |
|
"epoch": 3.124416796267496, |
|
"grad_norm": 11.296874046325684, |
|
"kl": 92.7376360297203, |
|
"learning_rate": 4.2029454784200675e-07, |
|
"loss": 0.0927, |
|
"reward": 0.9119196608662605, |
|
"reward_std": 0.34998451662249863, |
|
"rewards/equation_reward_func": 0.10686012083897367, |
|
"rewards/format_reward_func": 0.8050595354288816, |
|
"step": 128 |
|
}, |
|
{ |
|
"completion_length": 264.3511941432953, |
|
"epoch": 3.1741835147744943, |
|
"grad_norm": 13.988784790039062, |
|
"kl": 95.21338140964508, |
|
"learning_rate": 4.1763932395971433e-07, |
|
"loss": 0.0952, |
|
"reward": 0.9192336574196815, |
|
"reward_std": 0.3349384202156216, |
|
"rewards/equation_reward_func": 0.11119791801320389, |
|
"rewards/format_reward_func": 0.8080357257276773, |
|
"step": 130 |
|
}, |
|
{ |
|
"completion_length": 254.3474748134613, |
|
"epoch": 3.223950233281493, |
|
"grad_norm": 10.847155570983887, |
|
"kl": 63.423495560884476, |
|
"learning_rate": 4.1494928597979117e-07, |
|
"loss": 0.0634, |
|
"reward": 0.9145759223029017, |
|
"reward_std": 0.33581931074149907, |
|
"rewards/equation_reward_func": 0.10281994252000004, |
|
"rewards/format_reward_func": 0.811755963601172, |
|
"step": 132 |
|
}, |
|
{ |
|
"completion_length": 250.99033308029175, |
|
"epoch": 3.2737169517884914, |
|
"grad_norm": 18.836917877197266, |
|
"kl": 84.4757269024849, |
|
"learning_rate": 4.122249925494726e-07, |
|
"loss": 0.0845, |
|
"reward": 0.9351265020668507, |
|
"reward_std": 0.32899824890773743, |
|
"rewards/equation_reward_func": 0.111465776222758, |
|
"rewards/format_reward_func": 0.8236607247963548, |
|
"step": 134 |
|
}, |
|
{ |
|
"completion_length": 259.0275332927704, |
|
"epoch": 3.3234836702954897, |
|
"grad_norm": 27.394779205322266, |
|
"kl": 127.20685377717018, |
|
"learning_rate": 4.094670094299131e-07, |
|
"loss": 0.1272, |
|
"reward": 0.9070387203246355, |
|
"reward_std": 0.36526231234893203, |
|
"rewards/equation_reward_func": 0.10867559676989913, |
|
"rewards/format_reward_func": 0.7983631072565913, |
|
"step": 136 |
|
}, |
|
{ |
|
"completion_length": 249.11384463310242, |
|
"epoch": 3.3732503888024885, |
|
"grad_norm": 17.246868133544922, |
|
"kl": 91.59087002277374, |
|
"learning_rate": 4.066759093786931e-07, |
|
"loss": 0.0916, |
|
"reward": 0.9155283067375422, |
|
"reward_std": 0.33937901724129915, |
|
"rewards/equation_reward_func": 0.11270089336903766, |
|
"rewards/format_reward_func": 0.8028273927047849, |
|
"step": 138 |
|
}, |
|
{ |
|
"completion_length": 253.11607432365417, |
|
"epoch": 3.423017107309487, |
|
"grad_norm": 25.47807502746582, |
|
"kl": 66.04804906249046, |
|
"learning_rate": 4.038522720308732e-07, |
|
"loss": 0.066, |
|
"reward": 0.9110640026628971, |
|
"reward_std": 0.35778610594570637, |
|
"rewards/equation_reward_func": 0.10898065764922649, |
|
"rewards/format_reward_func": 0.802083345130086, |
|
"step": 140 |
|
}, |
|
{ |
|
"completion_length": 256.49181938171387, |
|
"epoch": 3.472783825816485, |
|
"grad_norm": 6.512887954711914, |
|
"kl": 91.01838940382004, |
|
"learning_rate": 4.009966837786194e-07, |
|
"loss": 0.091, |
|
"reward": 0.9077157909050584, |
|
"reward_std": 0.34360028826631606, |
|
"rewards/equation_reward_func": 0.10340029885992408, |
|
"rewards/format_reward_func": 0.8043154878541827, |
|
"step": 142 |
|
}, |
|
{ |
|
"completion_length": 245.66815972328186, |
|
"epoch": 3.522550544323484, |
|
"grad_norm": 18.00103759765625, |
|
"kl": 110.25812494754791, |
|
"learning_rate": 3.981097376494259e-07, |
|
"loss": 0.1103, |
|
"reward": 0.9331994261592627, |
|
"reward_std": 0.33608464046847075, |
|
"rewards/equation_reward_func": 0.11772321787429973, |
|
"rewards/format_reward_func": 0.8154762014746666, |
|
"step": 144 |
|
}, |
|
{ |
|
"completion_length": 236.0089328289032, |
|
"epoch": 3.5723172628304822, |
|
"grad_norm": 10.365816116333008, |
|
"kl": 78.26719461381435, |
|
"learning_rate": 3.951920331829592e-07, |
|
"loss": 0.0783, |
|
"reward": 0.9498809697106481, |
|
"reward_std": 0.3238170698750764, |
|
"rewards/equation_reward_func": 0.11059524107258767, |
|
"rewards/format_reward_func": 0.8392857238650322, |
|
"step": 146 |
|
}, |
|
{ |
|
"completion_length": 249.16890382766724, |
|
"epoch": 3.6220839813374806, |
|
"grad_norm": 19.683488845825195, |
|
"kl": 104.068582624197, |
|
"learning_rate": 3.922441763065506e-07, |
|
"loss": 0.1041, |
|
"reward": 0.9355059750378132, |
|
"reward_std": 0.3465363304130733, |
|
"rewards/equation_reward_func": 0.11556547920918092, |
|
"rewards/format_reward_func": 0.8199404869228601, |
|
"step": 148 |
|
}, |
|
{ |
|
"completion_length": 236.11310124397278, |
|
"epoch": 3.671850699844479, |
|
"grad_norm": 7.490208148956299, |
|
"kl": 107.21742415428162, |
|
"learning_rate": 3.8926677920936093e-07, |
|
"loss": 0.1072, |
|
"reward": 0.9279613336548209, |
|
"reward_std": 0.3398581095971167, |
|
"rewards/equation_reward_func": 0.10950893117114902, |
|
"rewards/format_reward_func": 0.8184523917734623, |
|
"step": 150 |
|
}, |
|
{ |
|
"completion_length": 232.6324450969696, |
|
"epoch": 3.721617418351477, |
|
"grad_norm": 8.263045310974121, |
|
"kl": 95.21342560648918, |
|
"learning_rate": 3.862604602152464e-07, |
|
"loss": 0.0952, |
|
"reward": 0.9363021049648523, |
|
"reward_std": 0.32941144425421953, |
|
"rewards/equation_reward_func": 0.1141294669941999, |
|
"rewards/format_reward_func": 0.8221726296469569, |
|
"step": 152 |
|
}, |
|
{ |
|
"completion_length": 236.56548023223877, |
|
"epoch": 3.771384136858476, |
|
"grad_norm": 7.246555805206299, |
|
"kl": 99.01788094639778, |
|
"learning_rate": 3.8322584365434934e-07, |
|
"loss": 0.099, |
|
"reward": 0.9559821607545018, |
|
"reward_std": 0.33185221953317523, |
|
"rewards/equation_reward_func": 0.1219047627528198, |
|
"rewards/format_reward_func": 0.8340773908421397, |
|
"step": 154 |
|
}, |
|
{ |
|
"completion_length": 228.0424153804779, |
|
"epoch": 3.8211508553654743, |
|
"grad_norm": 10.294706344604492, |
|
"kl": 93.09150391817093, |
|
"learning_rate": 3.8016355973344173e-07, |
|
"loss": 0.0931, |
|
"reward": 0.9507738389074802, |
|
"reward_std": 0.3249152133939788, |
|
"rewards/equation_reward_func": 0.1226488123065792, |
|
"rewards/format_reward_func": 0.8281250102445483, |
|
"step": 156 |
|
}, |
|
{ |
|
"completion_length": 233.0967297554016, |
|
"epoch": 3.8709175738724726, |
|
"grad_norm": 139.66383361816406, |
|
"kl": 165.564805701375, |
|
"learning_rate": 3.7707424440504863e-07, |
|
"loss": 0.1656, |
|
"reward": 0.9361756034195423, |
|
"reward_std": 0.3188853694591671, |
|
"rewards/equation_reward_func": 0.11028273997362703, |
|
"rewards/format_reward_func": 0.8258928675204515, |
|
"step": 158 |
|
}, |
|
{ |
|
"completion_length": 228.4992606639862, |
|
"epoch": 3.9206842923794714, |
|
"grad_norm": 10.041633605957031, |
|
"kl": 112.89981982111931, |
|
"learning_rate": 3.739585392353787e-07, |
|
"loss": 0.1129, |
|
"reward": 0.9519345378503203, |
|
"reward_std": 0.3280767105752602, |
|
"rewards/equation_reward_func": 0.12306547746993601, |
|
"rewards/format_reward_func": 0.8288690578192472, |
|
"step": 160 |
|
}, |
|
{ |
|
"completion_length": 235.31845617294312, |
|
"epoch": 3.9704510108864697, |
|
"grad_norm": 32.37154006958008, |
|
"kl": 111.56759664416313, |
|
"learning_rate": 3.7081709127108767e-07, |
|
"loss": 0.1116, |
|
"reward": 0.9412202695384622, |
|
"reward_std": 0.3349445411004126, |
|
"rewards/equation_reward_func": 0.11681547819171101, |
|
"rewards/format_reward_func": 0.8244047723710537, |
|
"step": 162 |
|
}, |
|
{ |
|
"completion_length": 229.56516787880346, |
|
"epoch": 4.0, |
|
"grad_norm": 2.2472083568573, |
|
"kl": 72.366690811358, |
|
"learning_rate": 3.6765055290490513e-07, |
|
"loss": 0.043, |
|
"reward": 0.9589724556395882, |
|
"reward_std": 0.32855926924630213, |
|
"rewards/equation_reward_func": 0.11937343593882888, |
|
"rewards/format_reward_func": 0.8395990070543791, |
|
"step": 164 |
|
}, |
|
{ |
|
"completion_length": 234.28199791908264, |
|
"epoch": 4.049766718506999, |
|
"grad_norm": 5.950051784515381, |
|
"kl": 91.56283241510391, |
|
"learning_rate": 3.644595817401501e-07, |
|
"loss": 0.0916, |
|
"reward": 0.9548512073233724, |
|
"reward_std": 0.3294029913377017, |
|
"rewards/equation_reward_func": 0.12226190732326359, |
|
"rewards/format_reward_func": 0.8325892956927419, |
|
"step": 166 |
|
}, |
|
{ |
|
"completion_length": 224.3154797554016, |
|
"epoch": 4.099533437013997, |
|
"grad_norm": 5.77777624130249, |
|
"kl": 109.78453694283962, |
|
"learning_rate": 3.6124484045416483e-07, |
|
"loss": 0.1098, |
|
"reward": 0.9643006175756454, |
|
"reward_std": 0.3065133059863001, |
|
"rewards/equation_reward_func": 0.11683035839814693, |
|
"rewards/format_reward_func": 0.8474702471867204, |
|
"step": 168 |
|
}, |
|
{ |
|
"completion_length": 232.4531307220459, |
|
"epoch": 4.149300155520995, |
|
"grad_norm": 11.240461349487305, |
|
"kl": 93.93413895368576, |
|
"learning_rate": 3.580069966606949e-07, |
|
"loss": 0.0939, |
|
"reward": 0.9517262112349272, |
|
"reward_std": 0.33299871208146214, |
|
"rewards/equation_reward_func": 0.12285714561585337, |
|
"rewards/format_reward_func": 0.8288690578192472, |
|
"step": 170 |
|
}, |
|
{ |
|
"completion_length": 238.5349748134613, |
|
"epoch": 4.199066874027994, |
|
"grad_norm": 9.954313278198242, |
|
"kl": 92.95667415857315, |
|
"learning_rate": 3.547467227712444e-07, |
|
"loss": 0.093, |
|
"reward": 0.9439657982438803, |
|
"reward_std": 0.32343020231928676, |
|
"rewards/equation_reward_func": 0.12328125181375071, |
|
"rewards/format_reward_func": 0.8206845344975591, |
|
"step": 172 |
|
}, |
|
{ |
|
"completion_length": 233.2410750389099, |
|
"epoch": 4.248833592534992, |
|
"grad_norm": 8.026561737060547, |
|
"kl": 106.31019732356071, |
|
"learning_rate": 3.5146469585543386e-07, |
|
"loss": 0.1063, |
|
"reward": 0.9394568698480725, |
|
"reward_std": 0.33057266171090305, |
|
"rewards/equation_reward_func": 0.1202604187419638, |
|
"rewards/format_reward_func": 0.8191964393481612, |
|
"step": 174 |
|
}, |
|
{ |
|
"completion_length": 229.54911065101624, |
|
"epoch": 4.298600311041991, |
|
"grad_norm": 15.306875228881836, |
|
"kl": 105.12126851081848, |
|
"learning_rate": 3.481615975003922e-07, |
|
"loss": 0.1051, |
|
"reward": 0.9428199585527182, |
|
"reward_std": 0.32898445217870176, |
|
"rewards/equation_reward_func": 0.11618303920840845, |
|
"rewards/format_reward_func": 0.8266369150951505, |
|
"step": 176 |
|
}, |
|
{ |
|
"completion_length": 224.49479627609253, |
|
"epoch": 4.348367029548989, |
|
"grad_norm": 19.5836238861084, |
|
"kl": 118.43205219507217, |
|
"learning_rate": 3.448381136692089e-07, |
|
"loss": 0.1184, |
|
"reward": 0.9482961613684893, |
|
"reward_std": 0.334218667820096, |
|
"rewards/equation_reward_func": 0.12240327592007816, |
|
"rewards/format_reward_func": 0.8258928675204515, |
|
"step": 178 |
|
}, |
|
{ |
|
"completion_length": 227.92634320259094, |
|
"epoch": 4.3981337480559874, |
|
"grad_norm": 11.565065383911133, |
|
"kl": 99.0534802004695, |
|
"learning_rate": 3.4149493455847897e-07, |
|
"loss": 0.0991, |
|
"reward": 0.9628125233575702, |
|
"reward_std": 0.31036914128344506, |
|
"rewards/equation_reward_func": 0.12055059859994799, |
|
"rewards/format_reward_func": 0.8422619141638279, |
|
"step": 180 |
|
}, |
|
{ |
|
"completion_length": 228.84821903705597, |
|
"epoch": 4.447900466562986, |
|
"grad_norm": 11.746493339538574, |
|
"kl": 94.81702630221844, |
|
"learning_rate": 3.3813275445496766e-07, |
|
"loss": 0.0948, |
|
"reward": 0.9356473386287689, |
|
"reward_std": 0.34875215077772737, |
|
"rewards/equation_reward_func": 0.1201711333123967, |
|
"rewards/format_reward_func": 0.8154762014746666, |
|
"step": 182 |
|
}, |
|
{ |
|
"completion_length": 230.1302137374878, |
|
"epoch": 4.497667185069984, |
|
"grad_norm": 6.934517860412598, |
|
"kl": 104.89102700352669, |
|
"learning_rate": 3.347522715914262e-07, |
|
"loss": 0.1049, |
|
"reward": 0.941220261156559, |
|
"reward_std": 0.3412000882672146, |
|
"rewards/equation_reward_func": 0.11607142945285887, |
|
"rewards/format_reward_func": 0.8251488199457526, |
|
"step": 184 |
|
}, |
|
{ |
|
"completion_length": 237.3623547554016, |
|
"epoch": 4.547433903576983, |
|
"grad_norm": 8.869417190551758, |
|
"kl": 115.99200537800789, |
|
"learning_rate": 3.313541880015877e-07, |
|
"loss": 0.116, |
|
"reward": 0.9583631167188287, |
|
"reward_std": 0.3228510069893673, |
|
"rewards/equation_reward_func": 0.11982142983470112, |
|
"rewards/format_reward_func": 0.8385416762903333, |
|
"step": 186 |
|
}, |
|
{ |
|
"completion_length": 214.82440757751465, |
|
"epoch": 4.597200622083982, |
|
"grad_norm": 6.815430164337158, |
|
"kl": 199.46320822834969, |
|
"learning_rate": 3.279392093743747e-07, |
|
"loss": 0.1995, |
|
"reward": 0.943742586299777, |
|
"reward_std": 0.3288773400709033, |
|
"rewards/equation_reward_func": 0.12305803812341765, |
|
"rewards/format_reward_func": 0.8206845344975591, |
|
"step": 188 |
|
}, |
|
{ |
|
"completion_length": 223.35640358924866, |
|
"epoch": 4.6469673405909795, |
|
"grad_norm": 14.837751388549805, |
|
"kl": 92.87921611964703, |
|
"learning_rate": 3.245080449073459e-07, |
|
"loss": 0.0929, |
|
"reward": 0.9597470490261912, |
|
"reward_std": 0.3248986240942031, |
|
"rewards/equation_reward_func": 0.1286458355607465, |
|
"rewards/format_reward_func": 0.831101200543344, |
|
"step": 190 |
|
}, |
|
{ |
|
"completion_length": 227.95610523223877, |
|
"epoch": 4.696734059097978, |
|
"grad_norm": 7.916383266448975, |
|
"kl": 84.76704713702202, |
|
"learning_rate": 3.210614071594162e-07, |
|
"loss": 0.0848, |
|
"reward": 0.9576339479535818, |
|
"reward_std": 0.32447043631691486, |
|
"rewards/equation_reward_func": 0.12281250266823918, |
|
"rewards/format_reward_func": 0.8348214384168386, |
|
"step": 192 |
|
}, |
|
{ |
|
"completion_length": 230.10714721679688, |
|
"epoch": 4.746500777604977, |
|
"grad_norm": 57.35398864746094, |
|
"kl": 135.87937945127487, |
|
"learning_rate": 3.1760001190287695e-07, |
|
"loss": 0.1359, |
|
"reward": 0.9570536017417908, |
|
"reward_std": 0.3135519213974476, |
|
"rewards/equation_reward_func": 0.1185119078727439, |
|
"rewards/format_reward_func": 0.8385416762903333, |
|
"step": 194 |
|
}, |
|
{ |
|
"completion_length": 223.31771206855774, |
|
"epoch": 4.796267496111975, |
|
"grad_norm": 62.215885162353516, |
|
"kl": 186.61959165334702, |
|
"learning_rate": 3.141245779747502e-07, |
|
"loss": 0.1866, |
|
"reward": 0.9525744337588549, |
|
"reward_std": 0.32037264411337674, |
|
"rewards/equation_reward_func": 0.1207291692844592, |
|
"rewards/format_reward_func": 0.831845248118043, |
|
"step": 196 |
|
}, |
|
{ |
|
"completion_length": 228.9754502773285, |
|
"epoch": 4.846034214618974, |
|
"grad_norm": 16.842937469482422, |
|
"kl": 158.74550223350525, |
|
"learning_rate": 3.106358271275056e-07, |
|
"loss": 0.1587, |
|
"reward": 0.9604613315314054, |
|
"reward_std": 0.3215845621889457, |
|
"rewards/equation_reward_func": 0.13010416802717373, |
|
"rewards/format_reward_func": 0.8303571529686451, |
|
"step": 198 |
|
}, |
|
{ |
|
"completion_length": 221.91443800926208, |
|
"epoch": 4.895800933125972, |
|
"grad_norm": 9.416200637817383, |
|
"kl": 88.87509013712406, |
|
"learning_rate": 3.0713448387917227e-07, |
|
"loss": 0.0889, |
|
"reward": 0.9541145944967866, |
|
"reward_std": 0.3186596838058904, |
|
"rewards/equation_reward_func": 0.12078125210246071, |
|
"rewards/format_reward_func": 0.8333333432674408, |
|
"step": 200 |
|
}, |
|
{ |
|
"completion_length": 226.09821820259094, |
|
"epoch": 4.94556765163297, |
|
"grad_norm": 7.521836280822754, |
|
"kl": 72.75468015670776, |
|
"learning_rate": 3.0362127536287636e-07, |
|
"loss": 0.0728, |
|
"reward": 0.9630208564922214, |
|
"reward_std": 0.3088786149164662, |
|
"rewards/equation_reward_func": 0.12224702612729743, |
|
"rewards/format_reward_func": 0.84077381901443, |
|
"step": 202 |
|
}, |
|
{ |
|
"completion_length": 220.4330394268036, |
|
"epoch": 4.995334370139969, |
|
"grad_norm": 6.782564163208008, |
|
"kl": 95.6797014772892, |
|
"learning_rate": 3.0009693117583523e-07, |
|
"loss": 0.0957, |
|
"reward": 0.9688095478340983, |
|
"reward_std": 0.3198074179235846, |
|
"rewards/equation_reward_func": 0.1258035731734708, |
|
"rewards/format_reward_func": 0.8430059617385268, |
|
"step": 204 |
|
}, |
|
{ |
|
"completion_length": 223.10902605558695, |
|
"epoch": 5.024883359253499, |
|
"grad_norm": 14.31734561920166, |
|
"kl": 147.89477518985146, |
|
"learning_rate": 2.965621832278401e-07, |
|
"loss": 0.0878, |
|
"reward": 0.9550376173697019, |
|
"reward_std": 0.32527210092858266, |
|
"rewards/equation_reward_func": 0.12421052804902981, |
|
"rewards/format_reward_func": 0.8308270777526655, |
|
"step": 206 |
|
}, |
|
{ |
|
"completion_length": 219.39360547065735, |
|
"epoch": 5.074650077760498, |
|
"grad_norm": 13.403369903564453, |
|
"kl": 121.50938338041306, |
|
"learning_rate": 2.9301776558925875e-07, |
|
"loss": 0.1215, |
|
"reward": 0.9764509275555611, |
|
"reward_std": 0.3119593682931736, |
|
"rewards/equation_reward_func": 0.12898065720219165, |
|
"rewards/format_reward_func": 0.8474702471867204, |
|
"step": 208 |
|
}, |
|
{ |
|
"completion_length": 221.6532769203186, |
|
"epoch": 5.1244167962674965, |
|
"grad_norm": 8.643827438354492, |
|
"kl": 100.20702999830246, |
|
"learning_rate": 2.894644143385885e-07, |
|
"loss": 0.1002, |
|
"reward": 0.9445982417091727, |
|
"reward_std": 0.33305030199699104, |
|
"rewards/equation_reward_func": 0.12614583619870245, |
|
"rewards/format_reward_func": 0.8184523917734623, |
|
"step": 210 |
|
}, |
|
{ |
|
"completion_length": 222.4598252773285, |
|
"epoch": 5.174183514774494, |
|
"grad_norm": 9.457352638244629, |
|
"kl": 77.77037671208382, |
|
"learning_rate": 2.859028674095937e-07, |
|
"loss": 0.0778, |
|
"reward": 0.9626786010339856, |
|
"reward_std": 0.32365662849042565, |
|
"rewards/equation_reward_func": 0.12711309758014977, |
|
"rewards/format_reward_func": 0.8355654859915376, |
|
"step": 212 |
|
}, |
|
{ |
|
"completion_length": 216.18601536750793, |
|
"epoch": 5.223950233281493, |
|
"grad_norm": 5.520247459411621, |
|
"kl": 108.70252940058708, |
|
"learning_rate": 2.823338644380566e-07, |
|
"loss": 0.1087, |
|
"reward": 0.9588839625939727, |
|
"reward_std": 0.31889201514422894, |
|
"rewards/equation_reward_func": 0.12629464478231966, |
|
"rewards/format_reward_func": 0.8325892956927419, |
|
"step": 214 |
|
}, |
|
{ |
|
"completion_length": 217.8199441432953, |
|
"epoch": 5.273716951788492, |
|
"grad_norm": 8.16267204284668, |
|
"kl": 144.65436732769012, |
|
"learning_rate": 2.7875814660817504e-07, |
|
"loss": 0.1447, |
|
"reward": 0.9597247261554003, |
|
"reward_std": 0.33260277623776346, |
|
"rewards/equation_reward_func": 0.12490327673731372, |
|
"rewards/format_reward_func": 0.8348214384168386, |
|
"step": 216 |
|
}, |
|
{ |
|
"completion_length": 216.7961344718933, |
|
"epoch": 5.32348367029549, |
|
"grad_norm": 5.935340404510498, |
|
"kl": 100.77805884182453, |
|
"learning_rate": 2.751764564986396e-07, |
|
"loss": 0.1008, |
|
"reward": 0.9963318752124906, |
|
"reward_std": 0.29392986895982176, |
|
"rewards/equation_reward_func": 0.13174851471558213, |
|
"rewards/format_reward_func": 0.8645833414047956, |
|
"step": 218 |
|
}, |
|
{ |
|
"completion_length": 220.86533164978027, |
|
"epoch": 5.3732503888024885, |
|
"grad_norm": 4.818342208862305, |
|
"kl": 93.73328730463982, |
|
"learning_rate": 2.715895379284194e-07, |
|
"loss": 0.0937, |
|
"reward": 0.9783780043944716, |
|
"reward_std": 0.30780787393450737, |
|
"rewards/equation_reward_func": 0.12941964401397854, |
|
"rewards/format_reward_func": 0.8489583423361182, |
|
"step": 220 |
|
}, |
|
{ |
|
"completion_length": 213.26414012908936, |
|
"epoch": 5.423017107309486, |
|
"grad_norm": 11.288951873779297, |
|
"kl": 102.97140884399414, |
|
"learning_rate": 2.6799813580229174e-07, |
|
"loss": 0.103, |
|
"reward": 0.9846949661150575, |
|
"reward_std": 0.3030894915573299, |
|
"rewards/equation_reward_func": 0.13573660928523168, |
|
"rewards/format_reward_func": 0.8489583423361182, |
|
"step": 222 |
|
}, |
|
{ |
|
"completion_length": 213.67411065101624, |
|
"epoch": 5.472783825816485, |
|
"grad_norm": 5.07380485534668, |
|
"kl": 106.15928809344769, |
|
"learning_rate": 2.6440299595614606e-07, |
|
"loss": 0.1062, |
|
"reward": 0.9686905024573207, |
|
"reward_std": 0.3141279687406495, |
|
"rewards/equation_reward_func": 0.12791666924022138, |
|
"rewards/format_reward_func": 0.84077381901443, |
|
"step": 224 |
|
}, |
|
{ |
|
"completion_length": 217.79241490364075, |
|
"epoch": 5.522550544323484, |
|
"grad_norm": 13.45128345489502, |
|
"kl": 100.54976835846901, |
|
"learning_rate": 2.6080486500209347e-07, |
|
"loss": 0.1006, |
|
"reward": 0.9762128228321671, |
|
"reward_std": 0.30432219384238124, |
|
"rewards/equation_reward_func": 0.11906994262244552, |
|
"rewards/format_reward_func": 0.8571428656578064, |
|
"step": 226 |
|
}, |
|
{ |
|
"completion_length": 213.45833671092987, |
|
"epoch": 5.572317262830482, |
|
"grad_norm": 5.673408031463623, |
|
"kl": 107.341905772686, |
|
"learning_rate": 2.572044901734166e-07, |
|
"loss": 0.1073, |
|
"reward": 0.987425615079701, |
|
"reward_std": 0.3038695892319083, |
|
"rewards/equation_reward_func": 0.13400297972839326, |
|
"rewards/format_reward_func": 0.8534226277843118, |
|
"step": 228 |
|
}, |
|
{ |
|
"completion_length": 215.05655169487, |
|
"epoch": 5.6220839813374806, |
|
"grad_norm": 10.91450023651123, |
|
"kl": 104.37228363752365, |
|
"learning_rate": 2.536026191693893e-07, |
|
"loss": 0.1044, |
|
"reward": 0.9818452587351203, |
|
"reward_std": 0.3116321135312319, |
|
"rewards/equation_reward_func": 0.13511905062478036, |
|
"rewards/format_reward_func": 0.8467261996120214, |
|
"step": 230 |
|
}, |
|
{ |
|
"completion_length": 221.6837841272354, |
|
"epoch": 5.671850699844479, |
|
"grad_norm": 7.277580738067627, |
|
"kl": 102.12091478705406, |
|
"learning_rate": 2.5e-07, |
|
"loss": 0.1021, |
|
"reward": 0.9730580523610115, |
|
"reward_std": 0.31462284130975604, |
|
"rewards/equation_reward_func": 0.13079613307490945, |
|
"rewards/format_reward_func": 0.8422619141638279, |
|
"step": 232 |
|
}, |
|
{ |
|
"completion_length": 213.37202835083008, |
|
"epoch": 5.721617418351477, |
|
"grad_norm": 6.820380210876465, |
|
"kl": 110.825957685709, |
|
"learning_rate": 2.4639738083061073e-07, |
|
"loss": 0.1108, |
|
"reward": 0.9786160914227366, |
|
"reward_std": 0.2993557241279632, |
|
"rewards/equation_reward_func": 0.12370535923400894, |
|
"rewards/format_reward_func": 0.8549107229337096, |
|
"step": 234 |
|
}, |
|
{ |
|
"completion_length": 219.30580830574036, |
|
"epoch": 5.771384136858476, |
|
"grad_norm": 5.3565287590026855, |
|
"kl": 112.17789036035538, |
|
"learning_rate": 2.4279550982658345e-07, |
|
"loss": 0.1122, |
|
"reward": 0.9671205542981625, |
|
"reward_std": 0.31208174559287727, |
|
"rewards/equation_reward_func": 0.12783482612576336, |
|
"rewards/format_reward_func": 0.8392857238650322, |
|
"step": 236 |
|
}, |
|
{ |
|
"completion_length": 224.95908081531525, |
|
"epoch": 5.821150855365475, |
|
"grad_norm": 8.847481727600098, |
|
"kl": 89.92903319001198, |
|
"learning_rate": 2.3919513499790646e-07, |
|
"loss": 0.0899, |
|
"reward": 0.9810863379389048, |
|
"reward_std": 0.2922416543588042, |
|
"rewards/equation_reward_func": 0.12840774038340896, |
|
"rewards/format_reward_func": 0.8526785802096128, |
|
"step": 238 |
|
}, |
|
{ |
|
"completion_length": 213.0193498134613, |
|
"epoch": 5.870917573872473, |
|
"grad_norm": 8.37535285949707, |
|
"kl": 91.88962814211845, |
|
"learning_rate": 2.3559700404385394e-07, |
|
"loss": 0.0919, |
|
"reward": 0.9962723441421986, |
|
"reward_std": 0.2967066681012511, |
|
"rewards/equation_reward_func": 0.133921132190153, |
|
"rewards/format_reward_func": 0.8623511986806989, |
|
"step": 240 |
|
}, |
|
{ |
|
"completion_length": 215.90848636627197, |
|
"epoch": 5.920684292379471, |
|
"grad_norm": 11.996051788330078, |
|
"kl": 123.01588797569275, |
|
"learning_rate": 2.3200186419770823e-07, |
|
"loss": 0.123, |
|
"reward": 0.9946056772023439, |
|
"reward_std": 0.2892517475411296, |
|
"rewards/equation_reward_func": 0.137462800135836, |
|
"rewards/format_reward_func": 0.8571428656578064, |
|
"step": 242 |
|
}, |
|
{ |
|
"completion_length": 207.04836869239807, |
|
"epoch": 5.970451010886469, |
|
"grad_norm": 20.457063674926758, |
|
"kl": 106.15135931968689, |
|
"learning_rate": 2.284104620715807e-07, |
|
"loss": 0.1062, |
|
"reward": 0.9806399028748274, |
|
"reward_std": 0.3068936343770474, |
|
"rewards/equation_reward_func": 0.12870536016998813, |
|
"rewards/format_reward_func": 0.8519345326349139, |
|
"step": 244 |
|
}, |
|
{ |
|
"completion_length": 204.9586510909231, |
|
"epoch": 6.0, |
|
"grad_norm": 2.4967029094696045, |
|
"kl": 93.90482782062732, |
|
"learning_rate": 2.2482354350136043e-07, |
|
"loss": 0.0558, |
|
"reward": 0.9888596707268765, |
|
"reward_std": 0.30840789526700974, |
|
"rewards/equation_reward_func": 0.12795739620923996, |
|
"rewards/format_reward_func": 0.8609022639299694, |
|
"step": 246 |
|
}, |
|
{ |
|
"completion_length": 216.3809564113617, |
|
"epoch": 6.049766718506999, |
|
"grad_norm": 14.058419227600098, |
|
"kl": 95.39833578467369, |
|
"learning_rate": 2.2124185339182496e-07, |
|
"loss": 0.0954, |
|
"reward": 0.9900744194164872, |
|
"reward_std": 0.2951568606076762, |
|
"rewards/equation_reward_func": 0.12772321642842144, |
|
"rewards/format_reward_func": 0.8623511986806989, |
|
"step": 248 |
|
}, |
|
{ |
|
"completion_length": 215.97768187522888, |
|
"epoch": 6.099533437013997, |
|
"grad_norm": 8.967341423034668, |
|
"kl": 130.6636378467083, |
|
"learning_rate": 2.1766613556194344e-07, |
|
"loss": 0.1307, |
|
"reward": 0.9844643184915185, |
|
"reward_std": 0.3151676075067371, |
|
"rewards/equation_reward_func": 0.13252976315561682, |
|
"rewards/format_reward_func": 0.8519345326349139, |
|
"step": 250 |
|
}, |
|
{ |
|
"completion_length": 215.37574863433838, |
|
"epoch": 6.149300155520995, |
|
"grad_norm": 8.772759437561035, |
|
"kl": 123.94878220558167, |
|
"learning_rate": 2.1409713259040628e-07, |
|
"loss": 0.1239, |
|
"reward": 0.9948214497417212, |
|
"reward_std": 0.2969008543295786, |
|
"rewards/equation_reward_func": 0.1324702415149659, |
|
"rewards/format_reward_func": 0.8623511986806989, |
|
"step": 252 |
|
}, |
|
{ |
|
"completion_length": 204.1808078289032, |
|
"epoch": 6.199066874027994, |
|
"grad_norm": 8.49015998840332, |
|
"kl": 115.75950698554516, |
|
"learning_rate": 2.105355856614115e-07, |
|
"loss": 0.1158, |
|
"reward": 0.9843080537393689, |
|
"reward_std": 0.2868085833033547, |
|
"rewards/equation_reward_func": 0.12716518191155046, |
|
"rewards/format_reward_func": 0.8571428656578064, |
|
"step": 254 |
|
}, |
|
{ |
|
"completion_length": 227.52753329277039, |
|
"epoch": 6.248833592534992, |
|
"grad_norm": 7.344859600067139, |
|
"kl": 94.37721315026283, |
|
"learning_rate": 2.069822344107413e-07, |
|
"loss": 0.0944, |
|
"reward": 0.9889434743672609, |
|
"reward_std": 0.2974336859770119, |
|
"rewards/equation_reward_func": 0.1258482167031616, |
|
"rewards/format_reward_func": 0.8630952462553978, |
|
"step": 256 |
|
}, |
|
{ |
|
"completion_length": 213.5558066368103, |
|
"epoch": 6.298600311041991, |
|
"grad_norm": 7.32083797454834, |
|
"kl": 90.64998903870583, |
|
"learning_rate": 2.034378167721599e-07, |
|
"loss": 0.0907, |
|
"reward": 0.9939955649897456, |
|
"reward_std": 0.2892067184438929, |
|
"rewards/equation_reward_func": 0.13536458619637415, |
|
"rewards/format_reward_func": 0.8586309608072042, |
|
"step": 258 |
|
}, |
|
{ |
|
"completion_length": 223.00298142433167, |
|
"epoch": 6.348367029548989, |
|
"grad_norm": 15.500726699829102, |
|
"kl": 107.22374087572098, |
|
"learning_rate": 1.9990306882416485e-07, |
|
"loss": 0.1072, |
|
"reward": 0.9919122280552983, |
|
"reward_std": 0.291991472709924, |
|
"rewards/equation_reward_func": 0.12732887221500278, |
|
"rewards/format_reward_func": 0.8645833414047956, |
|
"step": 260 |
|
}, |
|
{ |
|
"completion_length": 213.41815984249115, |
|
"epoch": 6.3981337480559874, |
|
"grad_norm": 21.084169387817383, |
|
"kl": 135.0164725780487, |
|
"learning_rate": 1.9637872463712362e-07, |
|
"loss": 0.135, |
|
"reward": 0.9947991305962205, |
|
"reward_std": 0.2926121799973771, |
|
"rewards/equation_reward_func": 0.1294717276468873, |
|
"rewards/format_reward_func": 0.8653273889794946, |
|
"step": 262 |
|
}, |
|
{ |
|
"completion_length": 218.77678954601288, |
|
"epoch": 6.447900466562986, |
|
"grad_norm": 7.433415412902832, |
|
"kl": 126.0510938167572, |
|
"learning_rate": 1.9286551612082773e-07, |
|
"loss": 0.1261, |
|
"reward": 0.9961086511611938, |
|
"reward_std": 0.29620418848935515, |
|
"rewards/equation_reward_func": 0.13152529881335795, |
|
"rewards/format_reward_func": 0.8645833414047956, |
|
"step": 264 |
|
}, |
|
{ |
|
"completion_length": 224.87351608276367, |
|
"epoch": 6.497667185069984, |
|
"grad_norm": 10.848310470581055, |
|
"kl": 108.13037157058716, |
|
"learning_rate": 1.8936417287249446e-07, |
|
"loss": 0.1081, |
|
"reward": 0.9826934821903706, |
|
"reward_std": 0.29942381975706667, |
|
"rewards/equation_reward_func": 0.12703869334654883, |
|
"rewards/format_reward_func": 0.8556547705084085, |
|
"step": 266 |
|
}, |
|
{ |
|
"completion_length": 205.85045075416565, |
|
"epoch": 6.547433903576983, |
|
"grad_norm": 5.818633556365967, |
|
"kl": 93.97169226408005, |
|
"learning_rate": 1.8587542202524985e-07, |
|
"loss": 0.094, |
|
"reward": 0.9770759316161275, |
|
"reward_std": 0.3137034277897328, |
|
"rewards/equation_reward_func": 0.1348139907931909, |
|
"rewards/format_reward_func": 0.8422619141638279, |
|
"step": 268 |
|
}, |
|
{ |
|
"completion_length": 225.59821772575378, |
|
"epoch": 6.597200622083982, |
|
"grad_norm": 8.91329574584961, |
|
"kl": 92.77950745820999, |
|
"learning_rate": 1.82399988097123e-07, |
|
"loss": 0.0928, |
|
"reward": 0.9724628226831555, |
|
"reward_std": 0.31350986543111503, |
|
"rewards/equation_reward_func": 0.12722470529843122, |
|
"rewards/format_reward_func": 0.8452381044626236, |
|
"step": 270 |
|
}, |
|
{ |
|
"completion_length": 210.45536065101624, |
|
"epoch": 6.6469673405909795, |
|
"grad_norm": 66.8660659790039, |
|
"kl": 119.46630339324474, |
|
"learning_rate": 1.7893859284058378e-07, |
|
"loss": 0.1195, |
|
"reward": 0.9536458635702729, |
|
"reward_std": 0.33827802538871765, |
|
"rewards/equation_reward_func": 0.12626488378737122, |
|
"rewards/format_reward_func": 0.8273809626698494, |
|
"step": 272 |
|
}, |
|
{ |
|
"completion_length": 213.05655217170715, |
|
"epoch": 6.696734059097978, |
|
"grad_norm": 10.287453651428223, |
|
"kl": 129.00853845477104, |
|
"learning_rate": 1.7549195509265407e-07, |
|
"loss": 0.129, |
|
"reward": 1.006264909170568, |
|
"reward_std": 0.2873664147919044, |
|
"rewards/equation_reward_func": 0.1349851224804297, |
|
"rewards/format_reward_func": 0.871279769577086, |
|
"step": 274 |
|
}, |
|
{ |
|
"completion_length": 208.0654811859131, |
|
"epoch": 6.746500777604977, |
|
"grad_norm": 9.000530242919922, |
|
"kl": 123.86803221702576, |
|
"learning_rate": 1.7206079062562536e-07, |
|
"loss": 0.1239, |
|
"reward": 1.0023512253537774, |
|
"reward_std": 0.2862590797012672, |
|
"rewards/equation_reward_func": 0.13404762104619294, |
|
"rewards/format_reward_func": 0.8683035792782903, |
|
"step": 276 |
|
}, |
|
{ |
|
"completion_length": 215.65551018714905, |
|
"epoch": 6.796267496111975, |
|
"grad_norm": 7.632518768310547, |
|
"kl": 110.37519046664238, |
|
"learning_rate": 1.6864581199841226e-07, |
|
"loss": 0.1104, |
|
"reward": 0.9910863311961293, |
|
"reward_std": 0.3006105379899964, |
|
"rewards/equation_reward_func": 0.1354315506760031, |
|
"rewards/format_reward_func": 0.8556547705084085, |
|
"step": 278 |
|
}, |
|
{ |
|
"completion_length": 218.75967693328857, |
|
"epoch": 6.846034214618974, |
|
"grad_norm": 8.187287330627441, |
|
"kl": 96.99012359976768, |
|
"learning_rate": 1.6524772840857388e-07, |
|
"loss": 0.097, |
|
"reward": 0.9843527041375637, |
|
"reward_std": 0.3118166974745691, |
|
"rewards/equation_reward_func": 0.13241815741639584, |
|
"rewards/format_reward_func": 0.8519345326349139, |
|
"step": 280 |
|
}, |
|
{ |
|
"completion_length": 215.47321796417236, |
|
"epoch": 6.895800933125972, |
|
"grad_norm": 6.7685017585754395, |
|
"kl": 102.11789983510971, |
|
"learning_rate": 1.6186724554503237e-07, |
|
"loss": 0.1021, |
|
"reward": 0.9951934739947319, |
|
"reward_std": 0.29620578815229237, |
|
"rewards/equation_reward_func": 0.13805059966398403, |
|
"rewards/format_reward_func": 0.8571428656578064, |
|
"step": 282 |
|
}, |
|
{ |
|
"completion_length": 217.3355691432953, |
|
"epoch": 6.94556765163297, |
|
"grad_norm": 7.022984504699707, |
|
"kl": 123.25619751214981, |
|
"learning_rate": 1.5850506544152103e-07, |
|
"loss": 0.1233, |
|
"reward": 0.9815848367288709, |
|
"reward_std": 0.3131761790718883, |
|
"rewards/equation_reward_func": 0.13560268166474998, |
|
"rewards/format_reward_func": 0.8459821520373225, |
|
"step": 284 |
|
}, |
|
{ |
|
"completion_length": 226.38690876960754, |
|
"epoch": 6.995334370139969, |
|
"grad_norm": 7.854581832885742, |
|
"kl": 118.38798774778843, |
|
"learning_rate": 1.5516188633079107e-07, |
|
"loss": 0.1184, |
|
"reward": 0.9945535939186811, |
|
"reward_std": 0.2934186675120145, |
|
"rewards/equation_reward_func": 0.13145833683665842, |
|
"rewards/format_reward_func": 0.8630952462553978, |
|
"step": 286 |
|
}, |
|
{ |
|
"completion_length": 209.86215812281557, |
|
"epoch": 7.024883359253499, |
|
"grad_norm": 7.611196994781494, |
|
"kl": 103.30944246994822, |
|
"learning_rate": 1.5183840249960784e-07, |
|
"loss": 0.0613, |
|
"reward": 1.0007143130427913, |
|
"reward_std": 0.2819862887263298, |
|
"rewards/equation_reward_func": 0.13354636826797536, |
|
"rewards/format_reward_func": 0.8671679277169076, |
|
"step": 288 |
|
}, |
|
{ |
|
"completion_length": 215.6183067560196, |
|
"epoch": 7.074650077760498, |
|
"grad_norm": 189.0833740234375, |
|
"kl": 133.04253885149956, |
|
"learning_rate": 1.4853530414456612e-07, |
|
"loss": 0.133, |
|
"reward": 1.0034300796687603, |
|
"reward_std": 0.29149999690707773, |
|
"rewards/equation_reward_func": 0.13289434765465558, |
|
"rewards/format_reward_func": 0.870535722002387, |
|
"step": 290 |
|
}, |
|
{ |
|
"completion_length": 205.84524059295654, |
|
"epoch": 7.1244167962674965, |
|
"grad_norm": 7.47841739654541, |
|
"kl": 92.62634913623333, |
|
"learning_rate": 1.4525327722875568e-07, |
|
"loss": 0.0926, |
|
"reward": 1.006636930629611, |
|
"reward_std": 0.29602639051154256, |
|
"rewards/equation_reward_func": 0.1413095265161246, |
|
"rewards/format_reward_func": 0.8653273889794946, |
|
"step": 292 |
|
}, |
|
{ |
|
"completion_length": 216.39211654663086, |
|
"epoch": 7.174183514774494, |
|
"grad_norm": 60.437217712402344, |
|
"kl": 154.1758649945259, |
|
"learning_rate": 1.4199300333930515e-07, |
|
"loss": 0.1542, |
|
"reward": 0.981852700933814, |
|
"reward_std": 0.3057260474888608, |
|
"rewards/equation_reward_func": 0.1269419682212174, |
|
"rewards/format_reward_func": 0.8549107229337096, |
|
"step": 294 |
|
}, |
|
{ |
|
"completion_length": 220.96652054786682, |
|
"epoch": 7.223950233281493, |
|
"grad_norm": 8.677336692810059, |
|
"kl": 113.40557888150215, |
|
"learning_rate": 1.3875515954583523e-07, |
|
"loss": 0.1134, |
|
"reward": 0.9865774074569345, |
|
"reward_std": 0.32082074461504817, |
|
"rewards/equation_reward_func": 0.13761905138380826, |
|
"rewards/format_reward_func": 0.8489583423361182, |
|
"step": 296 |
|
}, |
|
{ |
|
"completion_length": 214.886164188385, |
|
"epoch": 7.273716951788492, |
|
"grad_norm": 7.040713787078857, |
|
"kl": 104.41407984495163, |
|
"learning_rate": 1.3554041825985e-07, |
|
"loss": 0.1044, |
|
"reward": 0.9882515128701925, |
|
"reward_std": 0.3001827346161008, |
|
"rewards/equation_reward_func": 0.12515625066589564, |
|
"rewards/format_reward_func": 0.8630952462553978, |
|
"step": 298 |
|
}, |
|
{ |
|
"completion_length": 213.91815853118896, |
|
"epoch": 7.32348367029549, |
|
"grad_norm": 5.163806438446045, |
|
"kl": 110.24214600026608, |
|
"learning_rate": 1.323494470950949e-07, |
|
"loss": 0.1102, |
|
"reward": 0.9937500273808837, |
|
"reward_std": 0.29727713589090854, |
|
"rewards/equation_reward_func": 0.14032738481182605, |
|
"rewards/format_reward_func": 0.8534226277843118, |
|
"step": 300 |
|
}, |
|
{ |
|
"completion_length": 221.51190876960754, |
|
"epoch": 7.3732503888024885, |
|
"grad_norm": 5.808466911315918, |
|
"kl": 105.10413581132889, |
|
"learning_rate": 1.2918290872891236e-07, |
|
"loss": 0.1051, |
|
"reward": 0.9996800795197487, |
|
"reward_std": 0.282098678057082, |
|
"rewards/equation_reward_func": 0.1358407783554867, |
|
"rewards/format_reward_func": 0.8638392938300967, |
|
"step": 302 |
|
}, |
|
{ |
|
"completion_length": 216.43155217170715, |
|
"epoch": 7.423017107309486, |
|
"grad_norm": 5.777767658233643, |
|
"kl": 105.67957159876823, |
|
"learning_rate": 1.260414607646213e-07, |
|
"loss": 0.1057, |
|
"reward": 0.9903050791472197, |
|
"reward_std": 0.29644761607050896, |
|
"rewards/equation_reward_func": 0.12869791907723993, |
|
"rewards/format_reward_func": 0.861607151106, |
|
"step": 304 |
|
}, |
|
{ |
|
"completion_length": 219.77455735206604, |
|
"epoch": 7.472783825816485, |
|
"grad_norm": 5.2907562255859375, |
|
"kl": 106.08304291963577, |
|
"learning_rate": 1.2292575559495143e-07, |
|
"loss": 0.1061, |
|
"reward": 1.0122916884720325, |
|
"reward_std": 0.28524951916188, |
|
"rewards/equation_reward_func": 0.13803571776952595, |
|
"rewards/format_reward_func": 0.8742559598758817, |
|
"step": 306 |
|
}, |
|
{ |
|
"completion_length": 217.84301114082336, |
|
"epoch": 7.522550544323484, |
|
"grad_norm": 24.005651473999023, |
|
"kl": 124.43787896633148, |
|
"learning_rate": 1.1983644026655835e-07, |
|
"loss": 0.1244, |
|
"reward": 0.9638169929385185, |
|
"reward_std": 0.3310524779371917, |
|
"rewards/equation_reward_func": 0.12973958678776398, |
|
"rewards/format_reward_func": 0.8340773908421397, |
|
"step": 308 |
|
}, |
|
{ |
|
"completion_length": 224.14881300926208, |
|
"epoch": 7.572317262830482, |
|
"grad_norm": 4.903006076812744, |
|
"kl": 116.65262049436569, |
|
"learning_rate": 1.1677415634565066e-07, |
|
"loss": 0.1167, |
|
"reward": 0.9956845454871655, |
|
"reward_std": 0.2962368195876479, |
|
"rewards/equation_reward_func": 0.13556547928601503, |
|
"rewards/format_reward_func": 0.8601190559566021, |
|
"step": 310 |
|
}, |
|
{ |
|
"completion_length": 228.49107551574707, |
|
"epoch": 7.6220839813374806, |
|
"grad_norm": 7.504365921020508, |
|
"kl": 93.5646702349186, |
|
"learning_rate": 1.1373953978475353e-07, |
|
"loss": 0.0936, |
|
"reward": 0.9905357416719198, |
|
"reward_std": 0.29693792841862887, |
|
"rewards/equation_reward_func": 0.13116071629337966, |
|
"rewards/format_reward_func": 0.8593750083819032, |
|
"step": 312 |
|
}, |
|
{ |
|
"completion_length": 210.6443498134613, |
|
"epoch": 7.671850699844479, |
|
"grad_norm": 6.315691947937012, |
|
"kl": 88.66525352746248, |
|
"learning_rate": 1.1073322079063913e-07, |
|
"loss": 0.0887, |
|
"reward": 1.0083631156012416, |
|
"reward_std": 0.2873585835332051, |
|
"rewards/equation_reward_func": 0.13782738358713686, |
|
"rewards/format_reward_func": 0.870535722002387, |
|
"step": 314 |
|
}, |
|
{ |
|
"completion_length": 216.66592574119568, |
|
"epoch": 7.721617418351477, |
|
"grad_norm": 5.726399898529053, |
|
"kl": 94.92659878730774, |
|
"learning_rate": 1.0775582369344946e-07, |
|
"loss": 0.0949, |
|
"reward": 0.9947470482438803, |
|
"reward_std": 0.3048968045040965, |
|
"rewards/equation_reward_func": 0.14206845592707396, |
|
"rewards/format_reward_func": 0.8526785802096128, |
|
"step": 316 |
|
}, |
|
{ |
|
"completion_length": 221.99702906608582, |
|
"epoch": 7.771384136858476, |
|
"grad_norm": 8.633785247802734, |
|
"kl": 127.03314569592476, |
|
"learning_rate": 1.0480796681704077e-07, |
|
"loss": 0.127, |
|
"reward": 0.9732217434793711, |
|
"reward_std": 0.3270513039897196, |
|
"rewards/equation_reward_func": 0.1331919669173658, |
|
"rewards/format_reward_func": 0.8400297714397311, |
|
"step": 318 |
|
}, |
|
{ |
|
"completion_length": 217.00595712661743, |
|
"epoch": 7.821150855365475, |
|
"grad_norm": 10.5038480758667, |
|
"kl": 114.0509955137968, |
|
"learning_rate": 1.018902623505741e-07, |
|
"loss": 0.1141, |
|
"reward": 0.9919643187895417, |
|
"reward_std": 0.2979181023547426, |
|
"rewards/equation_reward_func": 0.1348214315949008, |
|
"rewards/format_reward_func": 0.8571428656578064, |
|
"step": 320 |
|
}, |
|
{ |
|
"completion_length": 221.0297656059265, |
|
"epoch": 7.870917573872473, |
|
"grad_norm": 5.655003547668457, |
|
"kl": 103.35251066088676, |
|
"learning_rate": 9.900331622138063e-08, |
|
"loss": 0.1034, |
|
"reward": 1.003906267695129, |
|
"reward_std": 0.28754603571724147, |
|
"rewards/equation_reward_func": 0.1363467294140719, |
|
"rewards/format_reward_func": 0.8675595317035913, |
|
"step": 322 |
|
}, |
|
{ |
|
"completion_length": 219.47693920135498, |
|
"epoch": 7.920684292379471, |
|
"grad_norm": 10.707879066467285, |
|
"kl": 114.98984357714653, |
|
"learning_rate": 9.614772796912681e-08, |
|
"loss": 0.115, |
|
"reward": 0.9944940665736794, |
|
"reward_std": 0.31038948777131736, |
|
"rewards/equation_reward_func": 0.1380952401086688, |
|
"rewards/format_reward_func": 0.8563988180831075, |
|
"step": 324 |
|
}, |
|
{ |
|
"completion_length": 205.16146230697632, |
|
"epoch": 7.970451010886469, |
|
"grad_norm": 13.826483726501465, |
|
"kl": 117.3221942782402, |
|
"learning_rate": 9.332409062130686e-08, |
|
"loss": 0.1173, |
|
"reward": 0.9965178854763508, |
|
"reward_std": 0.2970969423186034, |
|
"rewards/equation_reward_func": 0.13788690755609423, |
|
"rewards/format_reward_func": 0.8586309608072042, |
|
"step": 326 |
|
}, |
|
{ |
|
"completion_length": 221.76065625642477, |
|
"epoch": 8.0, |
|
"grad_norm": 2.785691976547241, |
|
"kl": 124.96122721621865, |
|
"learning_rate": 9.053299057008699e-08, |
|
"loss": 0.0742, |
|
"reward": 0.9635088020249417, |
|
"reward_std": 0.33088268221993195, |
|
"rewards/equation_reward_func": 0.12766917579268156, |
|
"rewards/format_reward_func": 0.8358396087822161, |
|
"step": 328 |
|
}, |
|
{ |
|
"completion_length": 224.9680097103119, |
|
"epoch": 8.049766718506998, |
|
"grad_norm": 6.289979934692383, |
|
"kl": 101.34825879335403, |
|
"learning_rate": 8.777500745052743e-08, |
|
"loss": 0.1013, |
|
"reward": 1.0041592558845878, |
|
"reward_std": 0.2731919828802347, |
|
"rewards/equation_reward_func": 0.12543899181764573, |
|
"rewards/format_reward_func": 0.8787202453240752, |
|
"step": 330 |
|
}, |
|
{ |
|
"completion_length": 211.30134320259094, |
|
"epoch": 8.099533437013998, |
|
"grad_norm": 8.478480339050293, |
|
"kl": 95.52975815534592, |
|
"learning_rate": 8.505071402020892e-08, |
|
"loss": 0.0955, |
|
"reward": 1.0018899161368608, |
|
"reward_std": 0.2962851036572829, |
|
"rewards/equation_reward_func": 0.13805059832520783, |
|
"rewards/format_reward_func": 0.8638392938300967, |
|
"step": 332 |
|
}, |
|
{ |
|
"completion_length": 210.46205806732178, |
|
"epoch": 8.149300155520995, |
|
"grad_norm": 7.800393581390381, |
|
"kl": 93.41920287907124, |
|
"learning_rate": 8.236067604028562e-08, |
|
"loss": 0.0934, |
|
"reward": 1.0006845509633422, |
|
"reward_std": 0.28785590920597315, |
|
"rewards/equation_reward_func": 0.1361011965200305, |
|
"rewards/format_reward_func": 0.8645833414047956, |
|
"step": 334 |
|
}, |
|
{ |
|
"completion_length": 213.39583683013916, |
|
"epoch": 8.199066874027993, |
|
"grad_norm": 14.319684028625488, |
|
"kl": 103.72171029448509, |
|
"learning_rate": 7.970545215799327e-08, |
|
"loss": 0.1037, |
|
"reward": 0.9992336509749293, |
|
"reward_std": 0.2858052357332781, |
|
"rewards/equation_reward_func": 0.1353943480644375, |
|
"rewards/format_reward_func": 0.8638392938300967, |
|
"step": 336 |
|
}, |
|
{ |
|
"completion_length": 222.47917068004608, |
|
"epoch": 8.248833592534993, |
|
"grad_norm": 9.096053123474121, |
|
"kl": 112.85475918650627, |
|
"learning_rate": 7.708559379063204e-08, |
|
"loss": 0.1129, |
|
"reward": 0.9990848414599895, |
|
"reward_std": 0.28263663535472006, |
|
"rewards/equation_reward_func": 0.13003720622509718, |
|
"rewards/format_reward_func": 0.8690476268529892, |
|
"step": 338 |
|
}, |
|
{ |
|
"completion_length": 218.63988590240479, |
|
"epoch": 8.29860031104199, |
|
"grad_norm": 6.8109869956970215, |
|
"kl": 129.12899693846703, |
|
"learning_rate": 7.45016450110534e-08, |
|
"loss": 0.1291, |
|
"reward": 0.9899181788787246, |
|
"reward_std": 0.29375347727909684, |
|
"rewards/equation_reward_func": 0.13128720642998815, |
|
"rewards/format_reward_func": 0.8586309608072042, |
|
"step": 340 |
|
}, |
|
{ |
|
"completion_length": 218.00372409820557, |
|
"epoch": 8.348367029548989, |
|
"grad_norm": 14.347524642944336, |
|
"kl": 117.67749184370041, |
|
"learning_rate": 7.195414243467029e-08, |
|
"loss": 0.1177, |
|
"reward": 1.00003722589463, |
|
"reward_std": 0.2877940039616078, |
|
"rewards/equation_reward_func": 0.13619791890960187, |
|
"rewards/format_reward_func": 0.8638392938300967, |
|
"step": 342 |
|
}, |
|
{ |
|
"completion_length": 216.2247062921524, |
|
"epoch": 8.398133748055988, |
|
"grad_norm": 13.07165241241455, |
|
"kl": 106.50291386246681, |
|
"learning_rate": 6.944361510801763e-08, |
|
"loss": 0.1065, |
|
"reward": 1.0014137290418148, |
|
"reward_std": 0.2730475334683433, |
|
"rewards/equation_reward_func": 0.13013393071014434, |
|
"rewards/format_reward_func": 0.871279769577086, |
|
"step": 344 |
|
}, |
|
{ |
|
"completion_length": 220.52009344100952, |
|
"epoch": 8.447900466562986, |
|
"grad_norm": 6.053815841674805, |
|
"kl": 101.54940500855446, |
|
"learning_rate": 6.697058439888283e-08, |
|
"loss": 0.1015, |
|
"reward": 1.0040476340800524, |
|
"reward_std": 0.29517007851973176, |
|
"rewards/equation_reward_func": 0.14020833862014115, |
|
"rewards/format_reward_func": 0.8638392938300967, |
|
"step": 346 |
|
}, |
|
{ |
|
"completion_length": 222.90997338294983, |
|
"epoch": 8.497667185069984, |
|
"grad_norm": 11.188461303710938, |
|
"kl": 96.22405216097832, |
|
"learning_rate": 6.453556388803288e-08, |
|
"loss": 0.0962, |
|
"reward": 0.9793526967987418, |
|
"reward_std": 0.30874833301641047, |
|
"rewards/equation_reward_func": 0.131882444024086, |
|
"rewards/format_reward_func": 0.8474702471867204, |
|
"step": 348 |
|
}, |
|
{ |
|
"completion_length": 218.2782781124115, |
|
"epoch": 8.547433903576984, |
|
"grad_norm": 5.3335490226745605, |
|
"kl": 100.43345513939857, |
|
"learning_rate": 6.213905926255697e-08, |
|
"loss": 0.1004, |
|
"reward": 1.0058556757867336, |
|
"reward_std": 0.30363914044573903, |
|
"rewards/equation_reward_func": 0.1449925625929609, |
|
"rewards/format_reward_func": 0.860863103531301, |
|
"step": 350 |
|
}, |
|
{ |
|
"completion_length": 214.927832365036, |
|
"epoch": 8.597200622083982, |
|
"grad_norm": 5.736093521118164, |
|
"kl": 106.57747828960419, |
|
"learning_rate": 5.978156821084987e-08, |
|
"loss": 0.1066, |
|
"reward": 0.9937872216105461, |
|
"reward_std": 0.2965135461417958, |
|
"rewards/equation_reward_func": 0.1373883937485516, |
|
"rewards/format_reward_func": 0.8563988180831075, |
|
"step": 352 |
|
}, |
|
{ |
|
"completion_length": 209.39360523223877, |
|
"epoch": 8.64696734059098, |
|
"grad_norm": 10.456531524658203, |
|
"kl": 115.35611689090729, |
|
"learning_rate": 5.7463580319254853e-08, |
|
"loss": 0.1154, |
|
"reward": 0.9899628274142742, |
|
"reward_std": 0.2897207149071619, |
|
"rewards/equation_reward_func": 0.1305878018029034, |
|
"rewards/format_reward_func": 0.8593750083819032, |
|
"step": 354 |
|
}, |
|
{ |
|
"completion_length": 215.7492594718933, |
|
"epoch": 8.696734059097977, |
|
"grad_norm": 8.979085922241211, |
|
"kl": 144.21495592594147, |
|
"learning_rate": 5.518557697039081e-08, |
|
"loss": 0.1442, |
|
"reward": 0.9727529920637608, |
|
"reward_std": 0.30324876098893583, |
|
"rewards/equation_reward_func": 0.13495535892434418, |
|
"rewards/format_reward_func": 0.8377976287156343, |
|
"step": 356 |
|
}, |
|
{ |
|
"completion_length": 215.43675899505615, |
|
"epoch": 8.746500777604977, |
|
"grad_norm": 9.370060920715332, |
|
"kl": 118.48588564991951, |
|
"learning_rate": 5.294803124318145e-08, |
|
"loss": 0.1185, |
|
"reward": 1.003474730066955, |
|
"reward_std": 0.29565710644237697, |
|
"rewards/equation_reward_func": 0.14037946588359773, |
|
"rewards/format_reward_func": 0.8630952462553978, |
|
"step": 358 |
|
}, |
|
{ |
|
"completion_length": 216.35863542556763, |
|
"epoch": 8.796267496111975, |
|
"grad_norm": 10.175962448120117, |
|
"kl": 125.35718154907227, |
|
"learning_rate": 5.07514078146106e-08, |
|
"loss": 0.1254, |
|
"reward": 0.9844866311177611, |
|
"reward_std": 0.3086923783412203, |
|
"rewards/equation_reward_func": 0.13106399052776396, |
|
"rewards/format_reward_func": 0.8534226277843118, |
|
"step": 360 |
|
}, |
|
{ |
|
"completion_length": 216.0572965145111, |
|
"epoch": 8.846034214618973, |
|
"grad_norm": 14.969239234924316, |
|
"kl": 114.87217891216278, |
|
"learning_rate": 4.859616286322094e-08, |
|
"loss": 0.1149, |
|
"reward": 0.9952976554632187, |
|
"reward_std": 0.30335904110688716, |
|
"rewards/equation_reward_func": 0.1433630979154259, |
|
"rewards/format_reward_func": 0.8519345326349139, |
|
"step": 362 |
|
}, |
|
{ |
|
"completion_length": 221.4404798746109, |
|
"epoch": 8.895800933125972, |
|
"grad_norm": 6.283931255340576, |
|
"kl": 110.71545603871346, |
|
"learning_rate": 4.648274397437829e-08, |
|
"loss": 0.1107, |
|
"reward": 0.9908779989928007, |
|
"reward_std": 0.3113563316874206, |
|
"rewards/equation_reward_func": 0.13968750124331564, |
|
"rewards/format_reward_func": 0.851190485060215, |
|
"step": 364 |
|
}, |
|
{ |
|
"completion_length": 218.71875619888306, |
|
"epoch": 8.94556765163297, |
|
"grad_norm": 11.783354759216309, |
|
"kl": 100.00087642669678, |
|
"learning_rate": 4.4411590047320617e-08, |
|
"loss": 0.1, |
|
"reward": 0.982001505792141, |
|
"reward_std": 0.3113343908917159, |
|
"rewards/equation_reward_func": 0.13527529931161553, |
|
"rewards/format_reward_func": 0.8467261996120214, |
|
"step": 366 |
|
}, |
|
{ |
|
"completion_length": 217.48289155960083, |
|
"epoch": 8.995334370139968, |
|
"grad_norm": 5.263770580291748, |
|
"kl": 103.37672221660614, |
|
"learning_rate": 4.2383131204010494e-08, |
|
"loss": 0.1034, |
|
"reward": 0.9819196779280901, |
|
"reward_std": 0.3062695120461285, |
|
"rewards/equation_reward_func": 0.13370536046568304, |
|
"rewards/format_reward_func": 0.8482142947614193, |
|
"step": 368 |
|
}, |
|
{ |
|
"completion_length": 214.46491683156867, |
|
"epoch": 9.024883359253499, |
|
"grad_norm": 12.257465362548828, |
|
"kl": 99.55310023458381, |
|
"learning_rate": 4.039778869981064e-08, |
|
"loss": 0.0591, |
|
"reward": 0.9891980086502276, |
|
"reward_std": 0.29844278195186663, |
|
"rewards/equation_reward_func": 0.13456140644848347, |
|
"rewards/format_reward_func": 0.8546366001430311, |
|
"step": 370 |
|
}, |
|
{ |
|
"completion_length": 204.44047844409943, |
|
"epoch": 9.074650077760497, |
|
"grad_norm": 6.687661647796631, |
|
"kl": 102.91622030735016, |
|
"learning_rate": 3.845597483600049e-08, |
|
"loss": 0.1029, |
|
"reward": 1.00175597704947, |
|
"reward_std": 0.28237274824641645, |
|
"rewards/equation_reward_func": 0.1341964314924553, |
|
"rewards/format_reward_func": 0.8675595317035913, |
|
"step": 372 |
|
}, |
|
{ |
|
"completion_length": 215.5305094718933, |
|
"epoch": 9.124416796267496, |
|
"grad_norm": 6.855081558227539, |
|
"kl": 114.0059272646904, |
|
"learning_rate": 3.655809287415284e-08, |
|
"loss": 0.114, |
|
"reward": 0.9824330499395728, |
|
"reward_std": 0.31068597990088165, |
|
"rewards/equation_reward_func": 0.1319866105914116, |
|
"rewards/format_reward_func": 0.8504464374855161, |
|
"step": 374 |
|
}, |
|
{ |
|
"completion_length": 213.50446844100952, |
|
"epoch": 9.174183514774494, |
|
"grad_norm": 8.869424819946289, |
|
"kl": 116.45409867167473, |
|
"learning_rate": 3.4704536952387285e-08, |
|
"loss": 0.1165, |
|
"reward": 0.996569961309433, |
|
"reward_std": 0.29541023285128176, |
|
"rewards/equation_reward_func": 0.13719494419638067, |
|
"rewards/format_reward_func": 0.8593750083819032, |
|
"step": 376 |
|
}, |
|
{ |
|
"completion_length": 209.6510455608368, |
|
"epoch": 9.223950233281492, |
|
"grad_norm": 5.935553550720215, |
|
"kl": 105.62733614444733, |
|
"learning_rate": 3.2895692003518575e-08, |
|
"loss": 0.1056, |
|
"reward": 1.018154788762331, |
|
"reward_std": 0.2831184302922338, |
|
"rewards/equation_reward_func": 0.1409226229880005, |
|
"rewards/format_reward_func": 0.8772321501746774, |
|
"step": 378 |
|
}, |
|
{ |
|
"completion_length": 211.81324791908264, |
|
"epoch": 9.273716951788492, |
|
"grad_norm": 7.783820629119873, |
|
"kl": 121.14890000224113, |
|
"learning_rate": 3.113193367511635e-08, |
|
"loss": 0.1211, |
|
"reward": 0.9982961546629667, |
|
"reward_std": 0.2949765467783436, |
|
"rewards/equation_reward_func": 0.13296875241212547, |
|
"rewards/format_reward_func": 0.8653273889794946, |
|
"step": 380 |
|
}, |
|
{ |
|
"completion_length": 216.39137244224548, |
|
"epoch": 9.32348367029549, |
|
"grad_norm": 6.670078754425049, |
|
"kl": 126.6070476770401, |
|
"learning_rate": 2.9413628251493934e-08, |
|
"loss": 0.1266, |
|
"reward": 1.0011904956772923, |
|
"reward_std": 0.2955527463927865, |
|
"rewards/equation_reward_func": 0.13288690755143762, |
|
"rewards/format_reward_func": 0.8683035792782903, |
|
"step": 382 |
|
}, |
|
{ |
|
"completion_length": 212.9427134990692, |
|
"epoch": 9.373250388802488, |
|
"grad_norm": 5.376703262329102, |
|
"kl": 127.98397767543793, |
|
"learning_rate": 2.774113257764066e-08, |
|
"loss": 0.128, |
|
"reward": 0.982187514193356, |
|
"reward_std": 0.32169102667830884, |
|
"rewards/equation_reward_func": 0.13620536087546498, |
|
"rewards/format_reward_func": 0.8459821520373225, |
|
"step": 384 |
|
}, |
|
{ |
|
"completion_length": 211.45015382766724, |
|
"epoch": 9.423017107309487, |
|
"grad_norm": 8.18055248260498, |
|
"kl": 119.37251782417297, |
|
"learning_rate": 2.611479398511518e-08, |
|
"loss": 0.1194, |
|
"reward": 0.9903794955462217, |
|
"reward_std": 0.2968536267289892, |
|
"rewards/equation_reward_func": 0.13026041921693832, |
|
"rewards/format_reward_func": 0.8601190559566021, |
|
"step": 386 |
|
}, |
|
{ |
|
"completion_length": 204.16592621803284, |
|
"epoch": 9.472783825816485, |
|
"grad_norm": 7.270776748657227, |
|
"kl": 101.86209693551064, |
|
"learning_rate": 2.4534950219914057e-08, |
|
"loss": 0.1019, |
|
"reward": 0.9984970455989242, |
|
"reward_std": 0.29997857997659594, |
|
"rewards/equation_reward_func": 0.13837797672022134, |
|
"rewards/format_reward_func": 0.8601190559566021, |
|
"step": 388 |
|
}, |
|
{ |
|
"completion_length": 214.3392894268036, |
|
"epoch": 9.522550544323483, |
|
"grad_norm": 7.736979007720947, |
|
"kl": 105.20895183086395, |
|
"learning_rate": 2.300192937233128e-08, |
|
"loss": 0.1052, |
|
"reward": 0.9999851500615478, |
|
"reward_std": 0.28676424000877887, |
|
"rewards/equation_reward_func": 0.1346577397780493, |
|
"rewards/format_reward_func": 0.8653273889794946, |
|
"step": 390 |
|
}, |
|
{ |
|
"completion_length": 210.4516406059265, |
|
"epoch": 9.572317262830483, |
|
"grad_norm": 6.528294563293457, |
|
"kl": 106.73470288515091, |
|
"learning_rate": 2.1516049808822935e-08, |
|
"loss": 0.1067, |
|
"reward": 0.9907887103036046, |
|
"reward_std": 0.30266366386786103, |
|
"rewards/equation_reward_func": 0.13438988395500928, |
|
"rewards/format_reward_func": 0.8563988180831075, |
|
"step": 392 |
|
}, |
|
{ |
|
"completion_length": 217.03423070907593, |
|
"epoch": 9.62208398133748, |
|
"grad_norm": 8.291276931762695, |
|
"kl": 112.02839949727058, |
|
"learning_rate": 2.007762010589098e-08, |
|
"loss": 0.112, |
|
"reward": 0.9856399018317461, |
|
"reward_std": 0.29987687326502055, |
|
"rewards/equation_reward_func": 0.13519345549866557, |
|
"rewards/format_reward_func": 0.8504464374855161, |
|
"step": 394 |
|
}, |
|
{ |
|
"completion_length": 212.06771171092987, |
|
"epoch": 9.671850699844478, |
|
"grad_norm": 23.273250579833984, |
|
"kl": 114.7417793571949, |
|
"learning_rate": 1.8686938986000627e-08, |
|
"loss": 0.1147, |
|
"reward": 1.0074405102059245, |
|
"reward_std": 0.2830245600780472, |
|
"rewards/equation_reward_func": 0.1391369072953239, |
|
"rewards/format_reward_func": 0.8683035792782903, |
|
"step": 396 |
|
}, |
|
{ |
|
"completion_length": 208.95982575416565, |
|
"epoch": 9.721617418351478, |
|
"grad_norm": 8.359769821166992, |
|
"kl": 105.31409025192261, |
|
"learning_rate": 1.734429525554365e-08, |
|
"loss": 0.1053, |
|
"reward": 0.9976785909384489, |
|
"reward_std": 0.2738517120014876, |
|
"rewards/equation_reward_func": 0.13309523789212108, |
|
"rewards/format_reward_func": 0.8645833414047956, |
|
"step": 398 |
|
}, |
|
{ |
|
"completion_length": 220.13095664978027, |
|
"epoch": 9.771384136858476, |
|
"grad_norm": 5.326533317565918, |
|
"kl": 104.11170220375061, |
|
"learning_rate": 1.604996774486145e-08, |
|
"loss": 0.1041, |
|
"reward": 0.9921280033886433, |
|
"reward_std": 0.2880916509311646, |
|
"rewards/equation_reward_func": 0.13796131452545524, |
|
"rewards/format_reward_func": 0.8541666753590107, |
|
"step": 400 |
|
}, |
|
{ |
|
"completion_length": 203.88393235206604, |
|
"epoch": 9.821150855365474, |
|
"grad_norm": 5.928884029388428, |
|
"kl": 100.95771364122629, |
|
"learning_rate": 1.4804225250339281e-08, |
|
"loss": 0.101, |
|
"reward": 0.988816992379725, |
|
"reward_std": 0.31290939077734947, |
|
"rewards/equation_reward_func": 0.13613839482422918, |
|
"rewards/format_reward_func": 0.8526785802096128, |
|
"step": 402 |
|
}, |
|
{ |
|
"completion_length": 217.1480677127838, |
|
"epoch": 9.870917573872473, |
|
"grad_norm": 4.631835460662842, |
|
"kl": 93.6852602660656, |
|
"learning_rate": 1.360732647858498e-08, |
|
"loss": 0.0937, |
|
"reward": 0.9889360275119543, |
|
"reward_std": 0.286705979029648, |
|
"rewards/equation_reward_func": 0.13030506228096783, |
|
"rewards/format_reward_func": 0.8586309608072042, |
|
"step": 404 |
|
}, |
|
{ |
|
"completion_length": 222.09077751636505, |
|
"epoch": 9.920684292379471, |
|
"grad_norm": 5.06677770614624, |
|
"kl": 95.56222039461136, |
|
"learning_rate": 1.2459519992702311e-08, |
|
"loss": 0.0956, |
|
"reward": 1.0050446651875973, |
|
"reward_std": 0.2898379734251648, |
|
"rewards/equation_reward_func": 0.1322767889359966, |
|
"rewards/format_reward_func": 0.8727678647264838, |
|
"step": 406 |
|
}, |
|
{ |
|
"completion_length": 223.86830830574036, |
|
"epoch": 9.97045101088647, |
|
"grad_norm": 4.548260688781738, |
|
"kl": 99.37585133314133, |
|
"learning_rate": 1.1361044160671629e-08, |
|
"loss": 0.0994, |
|
"reward": 1.0121131222695112, |
|
"reward_std": 0.29200624965596944, |
|
"rewards/equation_reward_func": 0.13934524141950533, |
|
"rewards/format_reward_func": 0.8727678647264838, |
|
"step": 408 |
|
}, |
|
{ |
|
"completion_length": 207.73434006540398, |
|
"epoch": 10.0, |
|
"grad_norm": 1.7612920999526978, |
|
"kl": 90.04842346592953, |
|
"learning_rate": 1.0312127105846947e-08, |
|
"loss": 0.0535, |
|
"reward": 1.0120050326773995, |
|
"reward_std": 0.2693111804362975, |
|
"rewards/equation_reward_func": 0.146090227718416, |
|
"rewards/format_reward_func": 0.8659147949595201, |
|
"step": 410 |
|
}, |
|
{ |
|
"completion_length": 212.9203917980194, |
|
"epoch": 10.049766718506998, |
|
"grad_norm": 9.370685577392578, |
|
"kl": 89.39757791161537, |
|
"learning_rate": 9.312986659581301e-09, |
|
"loss": 0.0894, |
|
"reward": 1.0107515146955848, |
|
"reward_std": 0.27970271767117083, |
|
"rewards/equation_reward_func": 0.13351934927050024, |
|
"rewards/format_reward_func": 0.8772321501746774, |
|
"step": 412 |
|
}, |
|
{ |
|
"completion_length": 216.9858672618866, |
|
"epoch": 10.099533437013998, |
|
"grad_norm": 5.712127208709717, |
|
"kl": 97.55916061997414, |
|
"learning_rate": 8.363830315988945e-09, |
|
"loss": 0.0976, |
|
"reward": 1.0012500202283263, |
|
"reward_std": 0.2857985398732126, |
|
"rewards/equation_reward_func": 0.13592262053862214, |
|
"rewards/format_reward_func": 0.8653273889794946, |
|
"step": 414 |
|
}, |
|
{ |
|
"completion_length": 206.04910945892334, |
|
"epoch": 10.149300155520995, |
|
"grad_norm": 8.428742408752441, |
|
"kl": 103.54971778392792, |
|
"learning_rate": 7.46485518885462e-09, |
|
"loss": 0.1035, |
|
"reward": 0.9935044907033443, |
|
"reward_std": 0.29330903699155897, |
|
"rewards/equation_reward_func": 0.13933780090883374, |
|
"rewards/format_reward_func": 0.8541666753590107, |
|
"step": 416 |
|
}, |
|
{ |
|
"completion_length": 216.40104627609253, |
|
"epoch": 10.199066874027993, |
|
"grad_norm": 7.276556968688965, |
|
"kl": 96.23605534434319, |
|
"learning_rate": 6.616247970698319e-09, |
|
"loss": 0.0962, |
|
"reward": 1.003549126908183, |
|
"reward_std": 0.28676802827976644, |
|
"rewards/equation_reward_func": 0.13822172884829342, |
|
"rewards/format_reward_func": 0.8653273889794946, |
|
"step": 418 |
|
}, |
|
{ |
|
"completion_length": 217.37574791908264, |
|
"epoch": 10.248833592534993, |
|
"grad_norm": 7.763327598571777, |
|
"kl": 105.16229781508446, |
|
"learning_rate": 5.8181848940044855e-09, |
|
"loss": 0.1052, |
|
"reward": 1.0146056832745671, |
|
"reward_std": 0.2754618857288733, |
|
"rewards/equation_reward_func": 0.13886161183472723, |
|
"rewards/format_reward_func": 0.8757440550252795, |
|
"step": 420 |
|
}, |
|
{ |
|
"completion_length": 205.62202739715576, |
|
"epoch": 10.29860031104199, |
|
"grad_norm": 10.51353931427002, |
|
"kl": 95.59216642379761, |
|
"learning_rate": 5.070831694623135e-09, |
|
"loss": 0.0956, |
|
"reward": 0.999791688285768, |
|
"reward_std": 0.3051185356453061, |
|
"rewards/equation_reward_func": 0.13818452623672783, |
|
"rewards/format_reward_func": 0.861607151106, |
|
"step": 422 |
|
}, |
|
{ |
|
"completion_length": 217.62425827980042, |
|
"epoch": 10.348367029548989, |
|
"grad_norm": 7.500198841094971, |
|
"kl": 105.92476946115494, |
|
"learning_rate": 4.374343577351336e-09, |
|
"loss": 0.1059, |
|
"reward": 0.9995759120211005, |
|
"reward_std": 0.28294855484273285, |
|
"rewards/equation_reward_func": 0.13648065709276125, |
|
"rewards/format_reward_func": 0.8630952462553978, |
|
"step": 424 |
|
}, |
|
{ |
|
"completion_length": 218.08780217170715, |
|
"epoch": 10.398133748055988, |
|
"grad_norm": 5.859328269958496, |
|
"kl": 112.09179133176804, |
|
"learning_rate": 3.7288651837012745e-09, |
|
"loss": 0.1121, |
|
"reward": 0.9871056908741593, |
|
"reward_std": 0.305632587405853, |
|
"rewards/equation_reward_func": 0.13442708633374423, |
|
"rewards/format_reward_func": 0.8526785802096128, |
|
"step": 426 |
|
}, |
|
{ |
|
"completion_length": 207.83631467819214, |
|
"epoch": 10.447900466562986, |
|
"grad_norm": 8.897568702697754, |
|
"kl": 107.37559473514557, |
|
"learning_rate": 3.134530561862081e-09, |
|
"loss": 0.1074, |
|
"reward": 0.9991220543161035, |
|
"reward_std": 0.2914143274538219, |
|
"rewards/equation_reward_func": 0.1375148834194988, |
|
"rewards/format_reward_func": 0.861607151106, |
|
"step": 428 |
|
}, |
|
{ |
|
"completion_length": 212.729914188385, |
|
"epoch": 10.497667185069984, |
|
"grad_norm": 8.106195449829102, |
|
"kl": 115.35286539793015, |
|
"learning_rate": 2.5914631388619103e-09, |
|
"loss": 0.1154, |
|
"reward": 0.9826562711969018, |
|
"reward_std": 0.3164314874447882, |
|
"rewards/equation_reward_func": 0.13146577589213848, |
|
"rewards/format_reward_func": 0.851190485060215, |
|
"step": 430 |
|
}, |
|
{ |
|
"completion_length": 215.90774369239807, |
|
"epoch": 10.547433903576984, |
|
"grad_norm": 13.18673038482666, |
|
"kl": 112.25744771957397, |
|
"learning_rate": 2.0997756949353297e-09, |
|
"loss": 0.1123, |
|
"reward": 1.0021726433187723, |
|
"reward_std": 0.29007076751440763, |
|
"rewards/equation_reward_func": 0.1346130990423262, |
|
"rewards/format_reward_func": 0.8675595317035913, |
|
"step": 432 |
|
}, |
|
{ |
|
"completion_length": 209.4575924873352, |
|
"epoch": 10.597200622083982, |
|
"grad_norm": 7.412752628326416, |
|
"kl": 92.43682231009007, |
|
"learning_rate": 1.6595703401020844e-09, |
|
"loss": 0.0924, |
|
"reward": 1.0252753188833594, |
|
"reward_std": 0.26438383577624336, |
|
"rewards/equation_reward_func": 0.1361383959883824, |
|
"rewards/format_reward_func": 0.8891369113698602, |
|
"step": 434 |
|
}, |
|
{ |
|
"completion_length": 212.07961678504944, |
|
"epoch": 10.64696734059098, |
|
"grad_norm": 9.115935325622559, |
|
"kl": 98.72861561179161, |
|
"learning_rate": 1.2709384929615596e-09, |
|
"loss": 0.0987, |
|
"reward": 1.0122768012806773, |
|
"reward_std": 0.2858473288360983, |
|
"rewards/equation_reward_func": 0.14174107427243143, |
|
"rewards/format_reward_func": 0.870535722002387, |
|
"step": 436 |
|
}, |
|
{ |
|
"completion_length": 213.41666972637177, |
|
"epoch": 10.696734059097977, |
|
"grad_norm": 7.424521446228027, |
|
"kl": 101.00204753875732, |
|
"learning_rate": 9.339608617077165e-10, |
|
"loss": 0.101, |
|
"reward": 1.0160119328647852, |
|
"reward_std": 0.27073649514932185, |
|
"rewards/equation_reward_func": 0.13654762151418254, |
|
"rewards/format_reward_func": 0.8794642928987741, |
|
"step": 438 |
|
}, |
|
{ |
|
"completion_length": 214.2976235151291, |
|
"epoch": 10.746500777604977, |
|
"grad_norm": 6.853705883026123, |
|
"kl": 103.29757300019264, |
|
"learning_rate": 6.487074273681114e-10, |
|
"loss": 0.1033, |
|
"reward": 0.9976488519459963, |
|
"reward_std": 0.2918668915517628, |
|
"rewards/equation_reward_func": 0.13604166894219816, |
|
"rewards/format_reward_func": 0.861607151106, |
|
"step": 440 |
|
}, |
|
{ |
|
"completion_length": 212.39434838294983, |
|
"epoch": 10.796267496111975, |
|
"grad_norm": 5.543258190155029, |
|
"kl": 106.93525078892708, |
|
"learning_rate": 4.152374292708538e-10, |
|
"loss": 0.1069, |
|
"reward": 1.0103422962129116, |
|
"reward_std": 0.28469230665359646, |
|
"rewards/equation_reward_func": 0.13683036074507982, |
|
"rewards/format_reward_func": 0.8735119123011827, |
|
"step": 442 |
|
}, |
|
{ |
|
"completion_length": 213.31473565101624, |
|
"epoch": 10.846034214618973, |
|
"grad_norm": 7.63397741317749, |
|
"kl": 108.80926709622145, |
|
"learning_rate": 2.3359935274214204e-10, |
|
"loss": 0.1088, |
|
"reward": 0.9913095412775874, |
|
"reward_std": 0.2936742458259687, |
|
"rewards/equation_reward_func": 0.13044643041212112, |
|
"rewards/format_reward_func": 0.860863103531301, |
|
"step": 444 |
|
}, |
|
{ |
|
"completion_length": 217.44345545768738, |
|
"epoch": 10.895800933125972, |
|
"grad_norm": 6.197990894317627, |
|
"kl": 106.08692070841789, |
|
"learning_rate": 1.0383091903720665e-10, |
|
"loss": 0.1061, |
|
"reward": 0.994486641138792, |
|
"reward_std": 0.29480275977402925, |
|
"rewards/equation_reward_func": 0.1373437523143366, |
|
"rewards/format_reward_func": 0.8571428656578064, |
|
"step": 446 |
|
}, |
|
{ |
|
"completion_length": 215.1421172618866, |
|
"epoch": 10.94556765163297, |
|
"grad_norm": 13.626302719116211, |
|
"kl": 108.21286904811859, |
|
"learning_rate": 2.595907750671533e-11, |
|
"loss": 0.1082, |
|
"reward": 1.0048437686637044, |
|
"reward_std": 0.28983362135477364, |
|
"rewards/equation_reward_func": 0.1410044669173658, |
|
"rewards/format_reward_func": 0.8638392938300967, |
|
"step": 448 |
|
}, |
|
{ |
|
"completion_length": 212.74553871154785, |
|
"epoch": 10.995334370139968, |
|
"grad_norm": 12.845934867858887, |
|
"kl": 108.94018495082855, |
|
"learning_rate": 0.0, |
|
"loss": 0.1089, |
|
"reward": 1.0015253257006407, |
|
"reward_std": 0.2928236851003021, |
|
"rewards/equation_reward_func": 0.13619791949167848, |
|
"rewards/format_reward_func": 0.8653273889794946, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 10.995334370139968, |
|
"step": 450, |
|
"total_flos": 0.0, |
|
"train_loss": 0.10479868455554182, |
|
"train_runtime": 84441.5126, |
|
"train_samples_per_second": 1.194, |
|
"train_steps_per_second": 0.005 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 450, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 12, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|