Qwen3-0.6B-TLDR-Lora / trainer_state.json
phh's picture
Upload folder using huggingface_hub
5481798 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.009424101711759565,
"eval_steps": 500,
"global_step": 1100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"completion_length": 117.2125,
"epoch": 8.567365192508696e-05,
"grad_norm": 1.5428545475006104,
"kl": 7.6057333103563e-06,
"learning_rate": 9.999742979044224e-07,
"loss": 0.0,
"reward": -304.1875,
"reward_std": 100.79027481079102,
"rewards/reward_len": -304.1875,
"step": 10
},
{
"completion_length": 106.55,
"epoch": 0.00017134730385017391,
"grad_norm": 1.7018674612045288,
"kl": 4.6649108844576405e-05,
"learning_rate": 9.999457400204474e-07,
"loss": 0.0,
"reward": -268.175,
"reward_std": 85.13249053955079,
"rewards/reward_len": -268.175,
"step": 20
},
{
"completion_length": 114.1,
"epoch": 0.00025702095577526086,
"grad_norm": 1.4646943807601929,
"kl": 0.00014046951691852881,
"learning_rate": 9.999171821364723e-07,
"loss": 0.0,
"reward": -295.625,
"reward_std": 92.77591400146484,
"rewards/reward_len": -295.625,
"step": 30
},
{
"completion_length": 112.45,
"epoch": 0.00034269460770034783,
"grad_norm": 1.4721007347106934,
"kl": 0.0002871266377042048,
"learning_rate": 9.998886242524973e-07,
"loss": 0.0,
"reward": -287.4375,
"reward_std": 98.72801208496094,
"rewards/reward_len": -287.4375,
"step": 40
},
{
"completion_length": 103.3375,
"epoch": 0.0004283682596254348,
"grad_norm": 1.5164705514907837,
"kl": 0.00045935034868307414,
"learning_rate": 9.998600663685224e-07,
"loss": 0.0,
"reward": -254.0625,
"reward_std": 92.05970458984375,
"rewards/reward_len": -254.0625,
"step": 50
},
{
"completion_length": 100.4375,
"epoch": 0.0005140419115505217,
"grad_norm": 1.267320156097412,
"kl": 0.0006870983401313424,
"learning_rate": 9.998315084845474e-07,
"loss": 0.0,
"reward": -229.725,
"reward_std": 107.81015129089356,
"rewards/reward_len": -229.725,
"step": 60
},
{
"completion_length": 104.0375,
"epoch": 0.0005997155634756087,
"grad_norm": 1.2804882526397705,
"kl": 0.0007431535341311246,
"learning_rate": 9.998029506005722e-07,
"loss": 0.0,
"reward": -276.275,
"reward_std": 99.2921049118042,
"rewards/reward_len": -276.275,
"step": 70
},
{
"completion_length": 102.2875,
"epoch": 0.0006853892154006957,
"grad_norm": 1.4880214929580688,
"kl": 0.0008916158636566252,
"learning_rate": 9.997743927165973e-07,
"loss": 0.0,
"reward": -266.025,
"reward_std": 89.98284072875977,
"rewards/reward_len": -266.025,
"step": 80
},
{
"completion_length": 124.3625,
"epoch": 0.0007710628673257826,
"grad_norm": 1.6408586502075195,
"kl": 0.0010400827683042734,
"learning_rate": 9.997458348326221e-07,
"loss": 0.0,
"reward": -318.0375,
"reward_std": 99.00584564208984,
"rewards/reward_len": -318.0375,
"step": 90
},
{
"completion_length": 92.075,
"epoch": 0.0008567365192508696,
"grad_norm": 1.535640835762024,
"kl": 0.001465974550228566,
"learning_rate": 9.997172769486472e-07,
"loss": 0.0001,
"reward": -206.9125,
"reward_std": 85.6998176574707,
"rewards/reward_len": -206.9125,
"step": 100
},
{
"completion_length": 99.125,
"epoch": 0.0009424101711759565,
"grad_norm": 2.4441370964050293,
"kl": 0.0018776856479234994,
"learning_rate": 9.99688719064672e-07,
"loss": 0.0001,
"reward": -247.55,
"reward_std": 92.91195907592774,
"rewards/reward_len": -247.55,
"step": 110
},
{
"completion_length": 104.35,
"epoch": 0.0010280838231010434,
"grad_norm": 1.3777748346328735,
"kl": 0.002180302981287241,
"learning_rate": 9.99660161180697e-07,
"loss": 0.0001,
"reward": -257.2625,
"reward_std": 95.76971626281738,
"rewards/reward_len": -257.2625,
"step": 120
},
{
"completion_length": 94.4125,
"epoch": 0.0011137574750261306,
"grad_norm": 1.3118126392364502,
"kl": 0.002771495224442333,
"learning_rate": 9.99631603296722e-07,
"loss": 0.0001,
"reward": -202.2125,
"reward_std": 84.88922119140625,
"rewards/reward_len": -202.2125,
"step": 130
},
{
"completion_length": 89.275,
"epoch": 0.0011994311269512175,
"grad_norm": 1.551858901977539,
"kl": 0.0034235142404213546,
"learning_rate": 9.996030454127471e-07,
"loss": 0.0001,
"reward": -211.7375,
"reward_std": 80.14296417236328,
"rewards/reward_len": -211.7375,
"step": 140
},
{
"completion_length": 115.8,
"epoch": 0.0012851047788763044,
"grad_norm": 1.333330750465393,
"kl": 0.0028527263493742794,
"learning_rate": 9.99574487528772e-07,
"loss": 0.0001,
"reward": -279.3625,
"reward_std": 72.2537956237793,
"rewards/reward_len": -279.3625,
"step": 150
},
{
"completion_length": 113.575,
"epoch": 0.0013707784308013913,
"grad_norm": 1.7015572786331177,
"kl": 0.0033493154449388387,
"learning_rate": 9.99545929644797e-07,
"loss": 0.0001,
"reward": -274.15,
"reward_std": 101.25760879516602,
"rewards/reward_len": -274.15,
"step": 160
},
{
"completion_length": 106.65,
"epoch": 0.0014564520827264782,
"grad_norm": 1.95174241065979,
"kl": 0.004216998419724405,
"learning_rate": 9.995173717608219e-07,
"loss": 0.0002,
"reward": -263.525,
"reward_std": 89.78757858276367,
"rewards/reward_len": -263.525,
"step": 170
},
{
"completion_length": 97.4875,
"epoch": 0.0015421257346515652,
"grad_norm": 1.5441001653671265,
"kl": 0.0053229076787829396,
"learning_rate": 9.99488813876847e-07,
"loss": 0.0002,
"reward": -236.775,
"reward_std": 93.90848999023437,
"rewards/reward_len": -236.775,
"step": 180
},
{
"completion_length": 102.7125,
"epoch": 0.0016277993865766523,
"grad_norm": 1.435225009918213,
"kl": 0.005796084133908153,
"learning_rate": 9.99460255992872e-07,
"loss": 0.0002,
"reward": -243.8375,
"reward_std": 95.30473594665527,
"rewards/reward_len": -243.8375,
"step": 190
},
{
"completion_length": 112.8,
"epoch": 0.0017134730385017392,
"grad_norm": 1.3907208442687988,
"kl": 0.006385851232334971,
"learning_rate": 9.994316981088968e-07,
"loss": 0.0003,
"reward": -292.475,
"reward_std": 99.54066009521485,
"rewards/reward_len": -292.475,
"step": 200
},
{
"completion_length": 106.2875,
"epoch": 0.0017991466904268261,
"grad_norm": 1.0377769470214844,
"kl": 0.00682140497956425,
"learning_rate": 9.994031402249218e-07,
"loss": 0.0003,
"reward": -263.15,
"reward_std": 81.53659133911133,
"rewards/reward_len": -263.15,
"step": 210
},
{
"completion_length": 94.15,
"epoch": 0.001884820342351913,
"grad_norm": 1.9323745965957642,
"kl": 0.007357935793697834,
"learning_rate": 9.993745823409469e-07,
"loss": 0.0003,
"reward": -217.4625,
"reward_std": 90.16817970275879,
"rewards/reward_len": -217.4625,
"step": 220
},
{
"completion_length": 92.8,
"epoch": 0.001970493994277,
"grad_norm": 1.8935438394546509,
"kl": 0.008906404627487064,
"learning_rate": 9.993460244569717e-07,
"loss": 0.0004,
"reward": -220.675,
"reward_std": 74.1481372833252,
"rewards/reward_len": -220.675,
"step": 230
},
{
"completion_length": 103.4875,
"epoch": 0.002056167646202087,
"grad_norm": 1.4551963806152344,
"kl": 0.008559457911178469,
"learning_rate": 9.993174665729968e-07,
"loss": 0.0003,
"reward": -242.5,
"reward_std": 96.66918106079102,
"rewards/reward_len": -242.5,
"step": 240
},
{
"completion_length": 96.7375,
"epoch": 0.002141841298127174,
"grad_norm": 1.3759669065475464,
"kl": 0.010590891446918249,
"learning_rate": 9.992889086890218e-07,
"loss": 0.0004,
"reward": -240.6125,
"reward_std": 107.75033836364746,
"rewards/reward_len": -240.6125,
"step": 250
},
{
"completion_length": 95.9125,
"epoch": 0.002227514950052261,
"grad_norm": 1.7361980676651,
"kl": 0.009502861043438315,
"learning_rate": 9.992603508050467e-07,
"loss": 0.0004,
"reward": -219.2125,
"reward_std": 86.29145584106445,
"rewards/reward_len": -219.2125,
"step": 260
},
{
"completion_length": 91.35,
"epoch": 0.002313188601977348,
"grad_norm": 1.6296714544296265,
"kl": 0.012998782657086849,
"learning_rate": 9.992317929210717e-07,
"loss": 0.0005,
"reward": -205.9,
"reward_std": 104.51896514892579,
"rewards/reward_len": -205.9,
"step": 270
},
{
"completion_length": 92.825,
"epoch": 0.002398862253902435,
"grad_norm": 2.636399269104004,
"kl": 0.013483931683003903,
"learning_rate": 9.992032350370967e-07,
"loss": 0.0005,
"reward": -211.0375,
"reward_std": 72.49085922241211,
"rewards/reward_len": -211.0375,
"step": 280
},
{
"completion_length": 96.475,
"epoch": 0.002484535905827522,
"grad_norm": 1.7585633993148804,
"kl": 0.013156655337661504,
"learning_rate": 9.991746771531216e-07,
"loss": 0.0005,
"reward": -233.4,
"reward_std": 77.94148178100586,
"rewards/reward_len": -233.4,
"step": 290
},
{
"completion_length": 78.575,
"epoch": 0.002570209557752609,
"grad_norm": 1.8656781911849976,
"kl": 0.019020712468773127,
"learning_rate": 9.991461192691466e-07,
"loss": 0.0008,
"reward": -165.3625,
"reward_std": 83.65277671813965,
"rewards/reward_len": -165.3625,
"step": 300
},
{
"completion_length": 80.9,
"epoch": 0.0026558832096776957,
"grad_norm": 1.886581540107727,
"kl": 0.0151671776548028,
"learning_rate": 9.991175613851715e-07,
"loss": 0.0006,
"reward": -153.625,
"reward_std": 61.95796432495117,
"rewards/reward_len": -153.625,
"step": 310
},
{
"completion_length": 79.9,
"epoch": 0.0027415568616027826,
"grad_norm": 3.363229990005493,
"kl": 0.018376798275858162,
"learning_rate": 9.990890035011965e-07,
"loss": 0.0007,
"reward": -170.775,
"reward_std": 83.77405395507813,
"rewards/reward_len": -170.775,
"step": 320
},
{
"completion_length": 90.9625,
"epoch": 0.0028272305135278696,
"grad_norm": 3.4404351711273193,
"kl": 0.018607683992013336,
"learning_rate": 9.990604456172216e-07,
"loss": 0.0007,
"reward": -189.1625,
"reward_std": 93.11449737548828,
"rewards/reward_len": -189.1625,
"step": 330
},
{
"completion_length": 90.8375,
"epoch": 0.0029129041654529565,
"grad_norm": 1.3329404592514038,
"kl": 0.01699499851092696,
"learning_rate": 9.990318877332464e-07,
"loss": 0.0007,
"reward": -203.4,
"reward_std": 93.15925168991089,
"rewards/reward_len": -203.4,
"step": 340
},
{
"completion_length": 93.7125,
"epoch": 0.0029985778173780434,
"grad_norm": 2.384962797164917,
"kl": 0.022056785970926286,
"learning_rate": 9.990033298492714e-07,
"loss": 0.0009,
"reward": -209.9,
"reward_std": 95.43540840148925,
"rewards/reward_len": -209.9,
"step": 350
},
{
"completion_length": 77.975,
"epoch": 0.0030842514693031303,
"grad_norm": 2.1530113220214844,
"kl": 0.02068719444796443,
"learning_rate": 9.989747719652965e-07,
"loss": 0.0008,
"reward": -145.9,
"reward_std": 78.50229835510254,
"rewards/reward_len": -145.9,
"step": 360
},
{
"completion_length": 77.175,
"epoch": 0.0031699251212282177,
"grad_norm": 2.1676974296569824,
"kl": 0.028167350962758065,
"learning_rate": 9.989462140813213e-07,
"loss": 0.0011,
"reward": -152.125,
"reward_std": 77.3315586090088,
"rewards/reward_len": -152.125,
"step": 370
},
{
"completion_length": 93.9,
"epoch": 0.0032555987731533046,
"grad_norm": 2.6751251220703125,
"kl": 0.028882794454693793,
"learning_rate": 9.989176561973464e-07,
"loss": 0.0012,
"reward": -192.5875,
"reward_std": 93.17231407165528,
"rewards/reward_len": -192.5875,
"step": 380
},
{
"completion_length": 62.6375,
"epoch": 0.0033412724250783915,
"grad_norm": 1.9541772603988647,
"kl": 0.03603251725435257,
"learning_rate": 9.988890983133714e-07,
"loss": 0.0014,
"reward": -95.625,
"reward_std": 76.96504859924316,
"rewards/reward_len": -95.625,
"step": 390
},
{
"completion_length": 76.5625,
"epoch": 0.0034269460770034784,
"grad_norm": 2.578866720199585,
"kl": 0.036808890849351884,
"learning_rate": 9.988605404293962e-07,
"loss": 0.0015,
"reward": -159.45,
"reward_std": 81.1762622833252,
"rewards/reward_len": -159.45,
"step": 400
},
{
"completion_length": 72.0875,
"epoch": 0.0035126197289285653,
"grad_norm": 2.2174038887023926,
"kl": 0.03971981406211853,
"learning_rate": 9.988319825454213e-07,
"loss": 0.0016,
"reward": -125.9625,
"reward_std": 70.27665119171142,
"rewards/reward_len": -125.9625,
"step": 410
},
{
"completion_length": 70.15,
"epoch": 0.0035982933808536522,
"grad_norm": 2.2227845191955566,
"kl": 0.03956588953733444,
"learning_rate": 9.988034246614463e-07,
"loss": 0.0016,
"reward": -110.5125,
"reward_std": 68.30635490417481,
"rewards/reward_len": -110.5125,
"step": 420
},
{
"completion_length": 66.8,
"epoch": 0.003683967032778739,
"grad_norm": 2.6529383659362793,
"kl": 0.052050336450338366,
"learning_rate": 9.987748667774712e-07,
"loss": 0.0021,
"reward": -110.275,
"reward_std": 86.19376144409179,
"rewards/reward_len": -110.275,
"step": 430
},
{
"completion_length": 71.2375,
"epoch": 0.003769640684703826,
"grad_norm": 2.401475429534912,
"kl": 0.05769926495850086,
"learning_rate": 9.987463088934962e-07,
"loss": 0.0023,
"reward": -128.325,
"reward_std": 84.20119457244873,
"rewards/reward_len": -128.325,
"step": 440
},
{
"completion_length": 90.3875,
"epoch": 0.003855314336628913,
"grad_norm": 2.8859000205993652,
"kl": 0.043742291163653135,
"learning_rate": 9.98717751009521e-07,
"loss": 0.0017,
"reward": -181.6,
"reward_std": 75.12719230651855,
"rewards/reward_len": -181.6,
"step": 450
},
{
"completion_length": 67.7625,
"epoch": 0.003940987988554,
"grad_norm": 2.0204262733459473,
"kl": 0.0622777983546257,
"learning_rate": 9.98689193125546e-07,
"loss": 0.0025,
"reward": -107.1125,
"reward_std": 67.32327232360839,
"rewards/reward_len": -107.1125,
"step": 460
},
{
"completion_length": 66.6,
"epoch": 0.004026661640479087,
"grad_norm": 2.5924794673919678,
"kl": 0.056221196055412294,
"learning_rate": 9.986606352415711e-07,
"loss": 0.0022,
"reward": -114.975,
"reward_std": 75.61366195678711,
"rewards/reward_len": -114.975,
"step": 470
},
{
"completion_length": 63.25,
"epoch": 0.004112335292404174,
"grad_norm": 2.5766448974609375,
"kl": 0.06269333846867084,
"learning_rate": 9.98632077357596e-07,
"loss": 0.0025,
"reward": -87.025,
"reward_std": 75.8617115020752,
"rewards/reward_len": -87.025,
"step": 480
},
{
"completion_length": 54.6625,
"epoch": 0.004198008944329261,
"grad_norm": 3.059234142303467,
"kl": 0.06824085712432862,
"learning_rate": 9.98603519473621e-07,
"loss": 0.0027,
"reward": -58.3375,
"reward_std": 65.2177251815796,
"rewards/reward_len": -58.3375,
"step": 490
},
{
"completion_length": 78.8125,
"epoch": 0.004283682596254348,
"grad_norm": 2.9885940551757812,
"kl": 0.07582498777192086,
"learning_rate": 9.98574961589646e-07,
"loss": 0.003,
"reward": -143.9,
"reward_std": 63.89678440093994,
"rewards/reward_len": -143.9,
"step": 500
},
{
"completion_length": 58.8375,
"epoch": 0.004369356248179435,
"grad_norm": 2.7045488357543945,
"kl": 0.10633525140583515,
"learning_rate": 9.98546403705671e-07,
"loss": 0.0043,
"reward": -71.625,
"reward_std": 78.84037780761719,
"rewards/reward_len": -71.625,
"step": 510
},
{
"completion_length": 66.35,
"epoch": 0.004455029900104522,
"grad_norm": 2.9650001525878906,
"kl": 0.08462142385542393,
"learning_rate": 9.98517845821696e-07,
"loss": 0.0034,
"reward": -93.275,
"reward_std": 69.99792709350587,
"rewards/reward_len": -93.275,
"step": 520
},
{
"completion_length": 81.0125,
"epoch": 0.004540703552029609,
"grad_norm": 3.4145212173461914,
"kl": 0.08472472308203578,
"learning_rate": 9.98489287937721e-07,
"loss": 0.0034,
"reward": -150.125,
"reward_std": 64.46914005279541,
"rewards/reward_len": -150.125,
"step": 530
},
{
"completion_length": 58.9625,
"epoch": 0.004626377203954696,
"grad_norm": 2.866358757019043,
"kl": 0.11239302009344102,
"learning_rate": 9.984607300537458e-07,
"loss": 0.0045,
"reward": -73.075,
"reward_std": 66.32526473999023,
"rewards/reward_len": -73.075,
"step": 540
},
{
"completion_length": 65.3875,
"epoch": 0.004712050855879783,
"grad_norm": 3.9847896099090576,
"kl": 0.1188249222934246,
"learning_rate": 9.984321721697709e-07,
"loss": 0.0048,
"reward": -104.2125,
"reward_std": 84.05291709899902,
"rewards/reward_len": -104.2125,
"step": 550
},
{
"completion_length": 55.425,
"epoch": 0.00479772450780487,
"grad_norm": 1.9827405214309692,
"kl": 0.13096430897712708,
"learning_rate": 9.984036142857957e-07,
"loss": 0.0052,
"reward": -52.8875,
"reward_std": 76.0460901260376,
"rewards/reward_len": -52.8875,
"step": 560
},
{
"completion_length": 63.4,
"epoch": 0.004883398159729956,
"grad_norm": 2.7263333797454834,
"kl": 0.11985208839178085,
"learning_rate": 9.983750564018208e-07,
"loss": 0.0048,
"reward": -85.3,
"reward_std": 75.41236038208008,
"rewards/reward_len": -85.3,
"step": 570
},
{
"completion_length": 60.8125,
"epoch": 0.004969071811655044,
"grad_norm": 2.395049810409546,
"kl": 0.11624505072832107,
"learning_rate": 9.983464985178458e-07,
"loss": 0.0046,
"reward": -79.4,
"reward_std": 51.84059715270996,
"rewards/reward_len": -79.4,
"step": 580
},
{
"completion_length": 51.7125,
"epoch": 0.00505474546358013,
"grad_norm": 3.295987606048584,
"kl": 0.16594674922525882,
"learning_rate": 9.983179406338709e-07,
"loss": 0.0066,
"reward": -43.55,
"reward_std": 40.574602890014646,
"rewards/reward_len": -43.55,
"step": 590
},
{
"completion_length": 67.3375,
"epoch": 0.005140419115505218,
"grad_norm": 4.078490257263184,
"kl": 0.13739465102553367,
"learning_rate": 9.982893827498957e-07,
"loss": 0.0055,
"reward": -98.5625,
"reward_std": 72.55281829833984,
"rewards/reward_len": -98.5625,
"step": 600
},
{
"completion_length": 46.9625,
"epoch": 0.005226092767430304,
"grad_norm": 4.195443153381348,
"kl": 0.16595774739980698,
"learning_rate": 9.982608248659207e-07,
"loss": 0.0066,
"reward": -25.4375,
"reward_std": 67.01236572265626,
"rewards/reward_len": -25.4375,
"step": 610
},
{
"completion_length": 48.8625,
"epoch": 0.0053117664193553914,
"grad_norm": 2.9587647914886475,
"kl": 0.19849314391613007,
"learning_rate": 9.982322669819456e-07,
"loss": 0.0079,
"reward": -32.075,
"reward_std": 57.74148025512695,
"rewards/reward_len": -32.075,
"step": 620
},
{
"completion_length": 55.6125,
"epoch": 0.005397440071280479,
"grad_norm": 3.3368797302246094,
"kl": 0.1759088508784771,
"learning_rate": 9.982037090979706e-07,
"loss": 0.007,
"reward": -60.65,
"reward_std": 58.34819984436035,
"rewards/reward_len": -60.65,
"step": 630
},
{
"completion_length": 51.225,
"epoch": 0.005483113723205565,
"grad_norm": 2.436890125274658,
"kl": 0.19748679026961327,
"learning_rate": 9.981751512139957e-07,
"loss": 0.0079,
"reward": -42.5,
"reward_std": 70.55487613677978,
"rewards/reward_len": -42.5,
"step": 640
},
{
"completion_length": 49.6875,
"epoch": 0.005568787375130653,
"grad_norm": 2.7104854583740234,
"kl": 0.2179098792374134,
"learning_rate": 9.981465933300205e-07,
"loss": 0.0087,
"reward": -41.025,
"reward_std": 67.48074893951416,
"rewards/reward_len": -41.025,
"step": 650
},
{
"completion_length": 45.3625,
"epoch": 0.005654461027055739,
"grad_norm": 2.9114573001861572,
"kl": 0.292061947286129,
"learning_rate": 9.981180354460456e-07,
"loss": 0.0117,
"reward": -22.2875,
"reward_std": 67.80285606384277,
"rewards/reward_len": -22.2875,
"step": 660
},
{
"completion_length": 43.25,
"epoch": 0.0057401346789808265,
"grad_norm": 4.684057712554932,
"kl": 0.23551913797855378,
"learning_rate": 9.980894775620706e-07,
"loss": 0.0094,
"reward": -7.725,
"reward_std": 54.08637924194336,
"rewards/reward_len": -7.725,
"step": 670
},
{
"completion_length": 39.75,
"epoch": 0.005825808330905913,
"grad_norm": 3.1786115169525146,
"kl": 0.32834408432245255,
"learning_rate": 9.980609196780954e-07,
"loss": 0.0131,
"reward": 6.0375,
"reward_std": 58.28718643188476,
"rewards/reward_len": 6.0375,
"step": 680
},
{
"completion_length": 49.025,
"epoch": 0.005911481982831,
"grad_norm": 4.3211870193481445,
"kl": 0.30903512686491014,
"learning_rate": 9.980323617941205e-07,
"loss": 0.0124,
"reward": -36.75,
"reward_std": 74.52720184326172,
"rewards/reward_len": -36.75,
"step": 690
},
{
"completion_length": 37.65,
"epoch": 0.005997155634756087,
"grad_norm": 2.111032247543335,
"kl": 0.32604690343141557,
"learning_rate": 9.980038039101453e-07,
"loss": 0.013,
"reward": 18.4125,
"reward_std": 50.093895721435544,
"rewards/reward_len": 18.4125,
"step": 700
},
{
"completion_length": 33.475,
"epoch": 0.006082829286681174,
"grad_norm": 4.225799560546875,
"kl": 0.3358478359878063,
"learning_rate": 9.979752460261704e-07,
"loss": 0.0134,
"reward": 29.3375,
"reward_std": 45.97467594146728,
"rewards/reward_len": 29.3375,
"step": 710
},
{
"completion_length": 36.1875,
"epoch": 0.006168502938606261,
"grad_norm": 3.743486166000366,
"kl": 0.3429659426212311,
"learning_rate": 9.979466881421954e-07,
"loss": 0.0137,
"reward": 14.65,
"reward_std": 42.70287170410156,
"rewards/reward_len": 14.65,
"step": 720
},
{
"completion_length": 36.1375,
"epoch": 0.006254176590531348,
"grad_norm": 3.634866952896118,
"kl": 0.41900385320186617,
"learning_rate": 9.979181302582205e-07,
"loss": 0.0168,
"reward": 21.025,
"reward_std": 38.98911609649658,
"rewards/reward_len": 21.025,
"step": 730
},
{
"completion_length": 38.7,
"epoch": 0.006339850242456435,
"grad_norm": 5.735452175140381,
"kl": 0.3897597849369049,
"learning_rate": 9.978895723742453e-07,
"loss": 0.0156,
"reward": 11.05,
"reward_std": 49.65783863067627,
"rewards/reward_len": 11.05,
"step": 740
},
{
"completion_length": 38.1125,
"epoch": 0.006425523894381522,
"grad_norm": 2.927375555038452,
"kl": 0.435553178191185,
"learning_rate": 9.978610144902703e-07,
"loss": 0.0174,
"reward": 8.8125,
"reward_std": 55.300292015075684,
"rewards/reward_len": 8.8125,
"step": 750
},
{
"completion_length": 29.675,
"epoch": 0.006511197546306609,
"grad_norm": 3.246641159057617,
"kl": 0.43810410499572755,
"learning_rate": 9.978324566062952e-07,
"loss": 0.0175,
"reward": 40.5375,
"reward_std": 35.55714553594589,
"rewards/reward_len": 40.5375,
"step": 760
},
{
"completion_length": 32.3125,
"epoch": 0.006596871198231696,
"grad_norm": 3.6894195079803467,
"kl": 0.5961927145719528,
"learning_rate": 9.978038987223202e-07,
"loss": 0.0238,
"reward": 28.3625,
"reward_std": 40.15065488815308,
"rewards/reward_len": 28.3625,
"step": 770
},
{
"completion_length": 33.0625,
"epoch": 0.006682544850156783,
"grad_norm": 4.511296272277832,
"kl": 0.5751866042613983,
"learning_rate": 9.977753408383453e-07,
"loss": 0.023,
"reward": 27.575,
"reward_std": 46.356824684143064,
"rewards/reward_len": 27.575,
"step": 780
},
{
"completion_length": 33.0375,
"epoch": 0.0067682185020818695,
"grad_norm": 3.548011302947998,
"kl": 0.42039217352867125,
"learning_rate": 9.9774678295437e-07,
"loss": 0.0168,
"reward": 26.5125,
"reward_std": 46.602904319763184,
"rewards/reward_len": 26.5125,
"step": 790
},
{
"completion_length": 26.85,
"epoch": 0.006853892154006957,
"grad_norm": 5.255430221557617,
"kl": 0.5921719163656235,
"learning_rate": 9.977182250703951e-07,
"loss": 0.0237,
"reward": 60.1375,
"reward_std": 30.120204424858095,
"rewards/reward_len": 60.1375,
"step": 800
},
{
"completion_length": 31.825,
"epoch": 0.006939565805932043,
"grad_norm": 2.8932251930236816,
"kl": 0.5926052063703537,
"learning_rate": 9.976896671864202e-07,
"loss": 0.0237,
"reward": 37.225,
"reward_std": 37.70037169456482,
"rewards/reward_len": 37.225,
"step": 810
},
{
"completion_length": 43.175,
"epoch": 0.007025239457857131,
"grad_norm": 2.886444091796875,
"kl": 0.5511217802762985,
"learning_rate": 9.97661109302445e-07,
"loss": 0.022,
"reward": -2.45,
"reward_std": 56.478550338745116,
"rewards/reward_len": -2.45,
"step": 820
},
{
"completion_length": 30.2625,
"epoch": 0.007110913109782218,
"grad_norm": 2.814298629760742,
"kl": 0.7488752245903015,
"learning_rate": 9.9763255141847e-07,
"loss": 0.03,
"reward": 43.575,
"reward_std": 47.09467077255249,
"rewards/reward_len": 43.575,
"step": 830
},
{
"completion_length": 25.85,
"epoch": 0.0071965867617073045,
"grad_norm": 5.516399383544922,
"kl": 0.7932253420352936,
"learning_rate": 9.97603993534495e-07,
"loss": 0.0317,
"reward": 63.85,
"reward_std": 25.45775556564331,
"rewards/reward_len": 63.85,
"step": 840
},
{
"completion_length": 27.95,
"epoch": 0.007282260413632392,
"grad_norm": 4.706915378570557,
"kl": 0.6608895510435104,
"learning_rate": 9.9757543565052e-07,
"loss": 0.0264,
"reward": 52.7,
"reward_std": 39.781547927856444,
"rewards/reward_len": 52.7,
"step": 850
},
{
"completion_length": 26.9125,
"epoch": 0.007367934065557478,
"grad_norm": 14.130315780639648,
"kl": 0.6030549615621567,
"learning_rate": 9.97546877766545e-07,
"loss": 0.0241,
"reward": 62.875,
"reward_std": 28.904275512695314,
"rewards/reward_len": 62.875,
"step": 860
},
{
"completion_length": 27.35,
"epoch": 0.007453607717482566,
"grad_norm": 5.187411785125732,
"kl": 0.7092643141746521,
"learning_rate": 9.9751831988257e-07,
"loss": 0.0284,
"reward": 47.6375,
"reward_std": 32.484121799468994,
"rewards/reward_len": 47.6375,
"step": 870
},
{
"completion_length": 46.7875,
"epoch": 0.007539281369407652,
"grad_norm": 2.9746391773223877,
"kl": 0.6793952610343694,
"learning_rate": 9.974897619985949e-07,
"loss": 0.0272,
"reward": -19.2625,
"reward_std": 51.298577308654785,
"rewards/reward_len": -19.2625,
"step": 880
},
{
"completion_length": 26.6375,
"epoch": 0.0076249550213327395,
"grad_norm": 4.254567623138428,
"kl": 0.7589318662881851,
"learning_rate": 9.9746120411462e-07,
"loss": 0.0304,
"reward": 57.5625,
"reward_std": 39.20279521942139,
"rewards/reward_len": 57.5625,
"step": 890
},
{
"completion_length": 24.1375,
"epoch": 0.007710628673257826,
"grad_norm": 4.250896453857422,
"kl": 0.8811014115810394,
"learning_rate": 9.974326462306448e-07,
"loss": 0.0352,
"reward": 73.8625,
"reward_std": 19.012751007080077,
"rewards/reward_len": 73.8625,
"step": 900
},
{
"completion_length": 25.4,
"epoch": 0.007796302325182913,
"grad_norm": 3.278289556503296,
"kl": 0.8711494505405426,
"learning_rate": 9.974040883466698e-07,
"loss": 0.0348,
"reward": 69.4375,
"reward_std": 24.089942455291748,
"rewards/reward_len": 69.4375,
"step": 910
},
{
"completion_length": 25.275,
"epoch": 0.007881975977108,
"grad_norm": 4.476153373718262,
"kl": 0.8596636682748795,
"learning_rate": 9.973755304626946e-07,
"loss": 0.0344,
"reward": 63.3875,
"reward_std": 20.410217094421387,
"rewards/reward_len": 63.3875,
"step": 920
},
{
"completion_length": 24.8625,
"epoch": 0.007967649629033088,
"grad_norm": 5.825425624847412,
"kl": 1.0523226261138916,
"learning_rate": 9.9734697257872e-07,
"loss": 0.0421,
"reward": 66.95,
"reward_std": 25.993260192871094,
"rewards/reward_len": 66.95,
"step": 930
},
{
"completion_length": 23.0875,
"epoch": 0.008053323280958175,
"grad_norm": 4.372145652770996,
"kl": 0.9800844460725784,
"learning_rate": 9.973184146947447e-07,
"loss": 0.0392,
"reward": 67.175,
"reward_std": 27.855006313323976,
"rewards/reward_len": 67.175,
"step": 940
},
{
"completion_length": 23.325,
"epoch": 0.008138996932883261,
"grad_norm": 5.127079010009766,
"kl": 0.9943849921226502,
"learning_rate": 9.972898568107698e-07,
"loss": 0.0398,
"reward": 78.1125,
"reward_std": 17.191841506958006,
"rewards/reward_len": 78.1125,
"step": 950
},
{
"completion_length": 22.9375,
"epoch": 0.008224670584808347,
"grad_norm": 4.183666229248047,
"kl": 1.068257564306259,
"learning_rate": 9.972612989267946e-07,
"loss": 0.0427,
"reward": 79.1625,
"reward_std": 18.828048992156983,
"rewards/reward_len": 79.1625,
"step": 960
},
{
"completion_length": 22.9375,
"epoch": 0.008310344236733436,
"grad_norm": 4.63860559463501,
"kl": 1.0732409298419952,
"learning_rate": 9.972327410428197e-07,
"loss": 0.0429,
"reward": 71.4875,
"reward_std": 22.58247709274292,
"rewards/reward_len": 71.4875,
"step": 970
},
{
"completion_length": 25.125,
"epoch": 0.008396017888658522,
"grad_norm": 5.824211597442627,
"kl": 0.9688010692596436,
"learning_rate": 9.972041831588445e-07,
"loss": 0.0388,
"reward": 67.6,
"reward_std": 23.308015012741087,
"rewards/reward_len": 67.6,
"step": 980
},
{
"completion_length": 22.6375,
"epoch": 0.008481691540583609,
"grad_norm": 2.8945517539978027,
"kl": 1.118459904193878,
"learning_rate": 9.971756252748695e-07,
"loss": 0.0447,
"reward": 81.375,
"reward_std": 17.22281708717346,
"rewards/reward_len": 81.375,
"step": 990
},
{
"completion_length": 21.275,
"epoch": 0.008567365192508695,
"grad_norm": 4.304519176483154,
"kl": 0.9363485395908355,
"learning_rate": 9.971470673908946e-07,
"loss": 0.0375,
"reward": 75.775,
"reward_std": 23.021865463256837,
"rewards/reward_len": 75.775,
"step": 1000
},
{
"completion_length": 18.8625,
"epoch": 0.008653038844433783,
"grad_norm": 4.940158843994141,
"kl": 1.0820651412010194,
"learning_rate": 9.971185095069196e-07,
"loss": 0.0433,
"reward": 76.725,
"reward_std": 19.4085599899292,
"rewards/reward_len": 76.725,
"step": 1010
},
{
"completion_length": 22.45,
"epoch": 0.00873871249635887,
"grad_norm": 6.257218837738037,
"kl": 1.3543362498283387,
"learning_rate": 9.970899516229445e-07,
"loss": 0.0542,
"reward": 73.275,
"reward_std": 21.16908242702484,
"rewards/reward_len": 73.275,
"step": 1020
},
{
"completion_length": 21.1875,
"epoch": 0.008824386148283956,
"grad_norm": 7.533606052398682,
"kl": 0.9111672580242157,
"learning_rate": 9.970613937389695e-07,
"loss": 0.0364,
"reward": 79.5375,
"reward_std": 17.676442378759383,
"rewards/reward_len": 79.5375,
"step": 1030
},
{
"completion_length": 17.7,
"epoch": 0.008910059800209045,
"grad_norm": 6.559030532836914,
"kl": 1.283721137046814,
"learning_rate": 9.970328358549944e-07,
"loss": 0.0513,
"reward": 86.375,
"reward_std": 15.953600931167603,
"rewards/reward_len": 86.375,
"step": 1040
},
{
"completion_length": 21.5125,
"epoch": 0.008995733452134131,
"grad_norm": 9.517870903015137,
"kl": 1.2262637436389923,
"learning_rate": 9.970042779710194e-07,
"loss": 0.0491,
"reward": 75.275,
"reward_std": 21.458053398132325,
"rewards/reward_len": 75.275,
"step": 1050
},
{
"completion_length": 21.75,
"epoch": 0.009081407104059218,
"grad_norm": 12.434151649475098,
"kl": 1.324447250366211,
"learning_rate": 9.969757200870442e-07,
"loss": 0.053,
"reward": 81.1125,
"reward_std": 16.958592414855957,
"rewards/reward_len": 81.1125,
"step": 1060
},
{
"completion_length": 22.225,
"epoch": 0.009167080755984304,
"grad_norm": 5.839966297149658,
"kl": 1.3111671924591064,
"learning_rate": 9.969471622030695e-07,
"loss": 0.0524,
"reward": 74.425,
"reward_std": 27.983480072021486,
"rewards/reward_len": 74.425,
"step": 1070
},
{
"completion_length": 20.3,
"epoch": 0.009252754407909392,
"grad_norm": 5.334268093109131,
"kl": 1.2456512570381164,
"learning_rate": 9.969186043190943e-07,
"loss": 0.0498,
"reward": 81.6,
"reward_std": 18.58235924243927,
"rewards/reward_len": 81.6,
"step": 1080
},
{
"completion_length": 22.3125,
"epoch": 0.009338428059834479,
"grad_norm": 4.366571426391602,
"kl": 1.3097686409950255,
"learning_rate": 9.968900464351194e-07,
"loss": 0.0524,
"reward": 77.85,
"reward_std": 20.753051567077637,
"rewards/reward_len": 77.85,
"step": 1090
},
{
"completion_length": 17.3875,
"epoch": 0.009424101711759565,
"grad_norm": 6.299209117889404,
"kl": 1.508902084827423,
"learning_rate": 9.968614885511442e-07,
"loss": 0.0604,
"reward": 83.225,
"reward_std": 13.49931423664093,
"rewards/reward_len": 83.225,
"step": 1100
}
],
"logging_steps": 10,
"max_steps": 350166,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}