OpenR1-Qwen-7B-Sparse-P50 / trainer_state.json
ZMC2019's picture
Model save
e165e37 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1073,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004659832246039142,
"grad_norm": 1.8795702761938184,
"learning_rate": 9.259259259259259e-07,
"loss": 0.3874,
"step": 5
},
{
"epoch": 0.009319664492078284,
"grad_norm": 1.810960933535657,
"learning_rate": 1.8518518518518519e-06,
"loss": 0.3745,
"step": 10
},
{
"epoch": 0.013979496738117428,
"grad_norm": 0.689820489630518,
"learning_rate": 2.7777777777777783e-06,
"loss": 0.3574,
"step": 15
},
{
"epoch": 0.01863932898415657,
"grad_norm": 0.23671360073050024,
"learning_rate": 3.7037037037037037e-06,
"loss": 0.3465,
"step": 20
},
{
"epoch": 0.023299161230195712,
"grad_norm": 0.5035003762355204,
"learning_rate": 4.62962962962963e-06,
"loss": 0.3273,
"step": 25
},
{
"epoch": 0.027958993476234855,
"grad_norm": 0.2861970991097442,
"learning_rate": 5.555555555555557e-06,
"loss": 0.3177,
"step": 30
},
{
"epoch": 0.032618825722273995,
"grad_norm": 0.20083968480929654,
"learning_rate": 6.481481481481482e-06,
"loss": 0.3161,
"step": 35
},
{
"epoch": 0.03727865796831314,
"grad_norm": 0.2135927219205515,
"learning_rate": 7.4074074074074075e-06,
"loss": 0.3186,
"step": 40
},
{
"epoch": 0.04193849021435228,
"grad_norm": 0.20149881773912096,
"learning_rate": 8.333333333333334e-06,
"loss": 0.3185,
"step": 45
},
{
"epoch": 0.046598322460391424,
"grad_norm": 0.17751016241192105,
"learning_rate": 9.25925925925926e-06,
"loss": 0.3248,
"step": 50
},
{
"epoch": 0.05125815470643057,
"grad_norm": 0.18396942525421686,
"learning_rate": 1.0185185185185186e-05,
"loss": 0.3175,
"step": 55
},
{
"epoch": 0.05591798695246971,
"grad_norm": 0.17229279499140202,
"learning_rate": 1.1111111111111113e-05,
"loss": 0.3147,
"step": 60
},
{
"epoch": 0.06057781919850885,
"grad_norm": 0.1737383934121573,
"learning_rate": 1.2037037037037039e-05,
"loss": 0.309,
"step": 65
},
{
"epoch": 0.06523765144454799,
"grad_norm": 0.1928149352536188,
"learning_rate": 1.2962962962962964e-05,
"loss": 0.3157,
"step": 70
},
{
"epoch": 0.06989748369058714,
"grad_norm": 0.17826829224749713,
"learning_rate": 1.388888888888889e-05,
"loss": 0.3099,
"step": 75
},
{
"epoch": 0.07455731593662628,
"grad_norm": 0.18477204996403923,
"learning_rate": 1.4814814814814815e-05,
"loss": 0.3067,
"step": 80
},
{
"epoch": 0.07921714818266543,
"grad_norm": 0.1897409236877168,
"learning_rate": 1.5740740740740744e-05,
"loss": 0.31,
"step": 85
},
{
"epoch": 0.08387698042870456,
"grad_norm": 0.17327980240361615,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.3163,
"step": 90
},
{
"epoch": 0.08853681267474371,
"grad_norm": 0.1841696908552682,
"learning_rate": 1.7592592592592595e-05,
"loss": 0.3083,
"step": 95
},
{
"epoch": 0.09319664492078285,
"grad_norm": 0.18204159918734777,
"learning_rate": 1.851851851851852e-05,
"loss": 0.3053,
"step": 100
},
{
"epoch": 0.097856477166822,
"grad_norm": 0.17179833079133283,
"learning_rate": 1.9444444444444445e-05,
"loss": 0.3058,
"step": 105
},
{
"epoch": 0.10251630941286113,
"grad_norm": 0.19459644226361492,
"learning_rate": 1.995854922279793e-05,
"loss": 0.3065,
"step": 110
},
{
"epoch": 0.10717614165890028,
"grad_norm": 0.1901341829663568,
"learning_rate": 1.985492227979275e-05,
"loss": 0.3078,
"step": 115
},
{
"epoch": 0.11183597390493942,
"grad_norm": 0.18214653715281157,
"learning_rate": 1.9751295336787565e-05,
"loss": 0.3059,
"step": 120
},
{
"epoch": 0.11649580615097857,
"grad_norm": 0.17815775257629182,
"learning_rate": 1.9647668393782386e-05,
"loss": 0.3017,
"step": 125
},
{
"epoch": 0.1211556383970177,
"grad_norm": 0.1844645558957454,
"learning_rate": 1.9544041450777206e-05,
"loss": 0.3095,
"step": 130
},
{
"epoch": 0.12581547064305684,
"grad_norm": 0.2030421375294079,
"learning_rate": 1.9440414507772023e-05,
"loss": 0.305,
"step": 135
},
{
"epoch": 0.13047530288909598,
"grad_norm": 0.2070381771570163,
"learning_rate": 1.9336787564766843e-05,
"loss": 0.3032,
"step": 140
},
{
"epoch": 0.13513513513513514,
"grad_norm": 0.19750341679314554,
"learning_rate": 1.923316062176166e-05,
"loss": 0.3183,
"step": 145
},
{
"epoch": 0.13979496738117428,
"grad_norm": 0.19731313864346534,
"learning_rate": 1.9129533678756477e-05,
"loss": 0.3067,
"step": 150
},
{
"epoch": 0.14445479962721341,
"grad_norm": 0.17260148498158165,
"learning_rate": 1.9025906735751297e-05,
"loss": 0.3186,
"step": 155
},
{
"epoch": 0.14911463187325255,
"grad_norm": 0.2008627001637739,
"learning_rate": 1.8922279792746117e-05,
"loss": 0.3085,
"step": 160
},
{
"epoch": 0.15377446411929171,
"grad_norm": 0.18712990541821012,
"learning_rate": 1.8818652849740934e-05,
"loss": 0.3085,
"step": 165
},
{
"epoch": 0.15843429636533085,
"grad_norm": 0.17292039090794106,
"learning_rate": 1.8715025906735754e-05,
"loss": 0.2908,
"step": 170
},
{
"epoch": 0.16309412861137,
"grad_norm": 0.19172387862691817,
"learning_rate": 1.861139896373057e-05,
"loss": 0.3081,
"step": 175
},
{
"epoch": 0.16775396085740912,
"grad_norm": 0.18688008270471848,
"learning_rate": 1.850777202072539e-05,
"loss": 0.306,
"step": 180
},
{
"epoch": 0.1724137931034483,
"grad_norm": 0.1745486683593482,
"learning_rate": 1.8404145077720208e-05,
"loss": 0.3032,
"step": 185
},
{
"epoch": 0.17707362534948742,
"grad_norm": 0.19257694931627597,
"learning_rate": 1.8300518134715028e-05,
"loss": 0.3101,
"step": 190
},
{
"epoch": 0.18173345759552656,
"grad_norm": 0.2104231700336429,
"learning_rate": 1.8196891191709845e-05,
"loss": 0.3073,
"step": 195
},
{
"epoch": 0.1863932898415657,
"grad_norm": 0.2198068709198768,
"learning_rate": 1.8093264248704665e-05,
"loss": 0.3097,
"step": 200
},
{
"epoch": 0.19105312208760486,
"grad_norm": 0.19860541284894057,
"learning_rate": 1.7989637305699482e-05,
"loss": 0.3057,
"step": 205
},
{
"epoch": 0.195712954333644,
"grad_norm": 0.19761516589964884,
"learning_rate": 1.7886010362694302e-05,
"loss": 0.31,
"step": 210
},
{
"epoch": 0.20037278657968313,
"grad_norm": 0.1857003842960702,
"learning_rate": 1.778238341968912e-05,
"loss": 0.3035,
"step": 215
},
{
"epoch": 0.20503261882572227,
"grad_norm": 0.19655970430783293,
"learning_rate": 1.767875647668394e-05,
"loss": 0.309,
"step": 220
},
{
"epoch": 0.2096924510717614,
"grad_norm": 0.18373076556409915,
"learning_rate": 1.757512953367876e-05,
"loss": 0.2992,
"step": 225
},
{
"epoch": 0.21435228331780057,
"grad_norm": 0.1855241535339271,
"learning_rate": 1.7471502590673576e-05,
"loss": 0.307,
"step": 230
},
{
"epoch": 0.2190121155638397,
"grad_norm": 0.18789691001904157,
"learning_rate": 1.7367875647668397e-05,
"loss": 0.3034,
"step": 235
},
{
"epoch": 0.22367194780987884,
"grad_norm": 0.225904963129947,
"learning_rate": 1.7264248704663214e-05,
"loss": 0.3073,
"step": 240
},
{
"epoch": 0.22833178005591798,
"grad_norm": 0.2176659893463581,
"learning_rate": 1.716062176165803e-05,
"loss": 0.3082,
"step": 245
},
{
"epoch": 0.23299161230195714,
"grad_norm": 0.19279163806598115,
"learning_rate": 1.705699481865285e-05,
"loss": 0.3049,
"step": 250
},
{
"epoch": 0.23765144454799628,
"grad_norm": 0.19980935074977862,
"learning_rate": 1.695336787564767e-05,
"loss": 0.3059,
"step": 255
},
{
"epoch": 0.2423112767940354,
"grad_norm": 0.20286292649885426,
"learning_rate": 1.6849740932642488e-05,
"loss": 0.3104,
"step": 260
},
{
"epoch": 0.24697110904007455,
"grad_norm": 0.20806622057526192,
"learning_rate": 1.6746113989637308e-05,
"loss": 0.309,
"step": 265
},
{
"epoch": 0.2516309412861137,
"grad_norm": 0.1837006492305824,
"learning_rate": 1.6642487046632125e-05,
"loss": 0.3054,
"step": 270
},
{
"epoch": 0.25629077353215285,
"grad_norm": 0.19343204990996146,
"learning_rate": 1.6538860103626945e-05,
"loss": 0.3065,
"step": 275
},
{
"epoch": 0.26095060577819196,
"grad_norm": 0.17569927907529842,
"learning_rate": 1.6435233160621765e-05,
"loss": 0.3038,
"step": 280
},
{
"epoch": 0.2656104380242311,
"grad_norm": 0.1964720824239362,
"learning_rate": 1.6331606217616582e-05,
"loss": 0.3057,
"step": 285
},
{
"epoch": 0.2702702702702703,
"grad_norm": 0.17919773464670466,
"learning_rate": 1.6227979274611402e-05,
"loss": 0.3119,
"step": 290
},
{
"epoch": 0.2749301025163094,
"grad_norm": 0.20289962735519462,
"learning_rate": 1.612435233160622e-05,
"loss": 0.322,
"step": 295
},
{
"epoch": 0.27958993476234856,
"grad_norm": 0.20635974949126276,
"learning_rate": 1.6020725388601036e-05,
"loss": 0.3176,
"step": 300
},
{
"epoch": 0.2842497670083877,
"grad_norm": 0.2128465330229402,
"learning_rate": 1.5917098445595856e-05,
"loss": 0.3076,
"step": 305
},
{
"epoch": 0.28890959925442683,
"grad_norm": 0.21095377132964316,
"learning_rate": 1.5813471502590673e-05,
"loss": 0.305,
"step": 310
},
{
"epoch": 0.293569431500466,
"grad_norm": 0.20117897001414128,
"learning_rate": 1.5709844559585493e-05,
"loss": 0.3107,
"step": 315
},
{
"epoch": 0.2982292637465051,
"grad_norm": 0.210307217026029,
"learning_rate": 1.5606217616580313e-05,
"loss": 0.2969,
"step": 320
},
{
"epoch": 0.30288909599254427,
"grad_norm": 0.1953188277902909,
"learning_rate": 1.550259067357513e-05,
"loss": 0.2988,
"step": 325
},
{
"epoch": 0.30754892823858343,
"grad_norm": 0.20624395571895365,
"learning_rate": 1.539896373056995e-05,
"loss": 0.3077,
"step": 330
},
{
"epoch": 0.31220876048462254,
"grad_norm": 0.19328155978476294,
"learning_rate": 1.5295336787564767e-05,
"loss": 0.3059,
"step": 335
},
{
"epoch": 0.3168685927306617,
"grad_norm": 0.18260179855898995,
"learning_rate": 1.5191709844559586e-05,
"loss": 0.3044,
"step": 340
},
{
"epoch": 0.32152842497670087,
"grad_norm": 0.1840438952163473,
"learning_rate": 1.5088082901554406e-05,
"loss": 0.3059,
"step": 345
},
{
"epoch": 0.32618825722274,
"grad_norm": 0.19461016289059585,
"learning_rate": 1.4984455958549225e-05,
"loss": 0.316,
"step": 350
},
{
"epoch": 0.33084808946877914,
"grad_norm": 0.19356848595491336,
"learning_rate": 1.4880829015544043e-05,
"loss": 0.3125,
"step": 355
},
{
"epoch": 0.33550792171481825,
"grad_norm": 0.1903893603560245,
"learning_rate": 1.4777202072538862e-05,
"loss": 0.3046,
"step": 360
},
{
"epoch": 0.3401677539608574,
"grad_norm": 0.1888576016162768,
"learning_rate": 1.4673575129533678e-05,
"loss": 0.3027,
"step": 365
},
{
"epoch": 0.3448275862068966,
"grad_norm": 0.18928501902596923,
"learning_rate": 1.4569948186528497e-05,
"loss": 0.31,
"step": 370
},
{
"epoch": 0.3494874184529357,
"grad_norm": 0.1966355160370978,
"learning_rate": 1.4466321243523317e-05,
"loss": 0.3041,
"step": 375
},
{
"epoch": 0.35414725069897485,
"grad_norm": 0.1901672871632349,
"learning_rate": 1.4362694300518136e-05,
"loss": 0.3136,
"step": 380
},
{
"epoch": 0.35880708294501396,
"grad_norm": 0.19972593288293097,
"learning_rate": 1.4259067357512954e-05,
"loss": 0.3097,
"step": 385
},
{
"epoch": 0.3634669151910531,
"grad_norm": 0.19468214914369028,
"learning_rate": 1.4155440414507773e-05,
"loss": 0.3068,
"step": 390
},
{
"epoch": 0.3681267474370923,
"grad_norm": 0.24360814017146365,
"learning_rate": 1.4051813471502591e-05,
"loss": 0.3083,
"step": 395
},
{
"epoch": 0.3727865796831314,
"grad_norm": 0.20121403150161737,
"learning_rate": 1.394818652849741e-05,
"loss": 0.2992,
"step": 400
},
{
"epoch": 0.37744641192917056,
"grad_norm": 0.18275980204979905,
"learning_rate": 1.384455958549223e-05,
"loss": 0.3191,
"step": 405
},
{
"epoch": 0.3821062441752097,
"grad_norm": 0.19597361835647514,
"learning_rate": 1.3740932642487049e-05,
"loss": 0.3102,
"step": 410
},
{
"epoch": 0.38676607642124883,
"grad_norm": 0.19121995822742002,
"learning_rate": 1.3637305699481867e-05,
"loss": 0.3036,
"step": 415
},
{
"epoch": 0.391425908667288,
"grad_norm": 0.21411409371703122,
"learning_rate": 1.3533678756476684e-05,
"loss": 0.3138,
"step": 420
},
{
"epoch": 0.3960857409133271,
"grad_norm": 0.20555940672664647,
"learning_rate": 1.3430051813471503e-05,
"loss": 0.3169,
"step": 425
},
{
"epoch": 0.40074557315936626,
"grad_norm": 0.19221801982606176,
"learning_rate": 1.3326424870466321e-05,
"loss": 0.3121,
"step": 430
},
{
"epoch": 0.40540540540540543,
"grad_norm": 0.2014890349514093,
"learning_rate": 1.3222797927461141e-05,
"loss": 0.3175,
"step": 435
},
{
"epoch": 0.41006523765144454,
"grad_norm": 0.24792004508641477,
"learning_rate": 1.311917098445596e-05,
"loss": 0.3084,
"step": 440
},
{
"epoch": 0.4147250698974837,
"grad_norm": 0.1954769185865929,
"learning_rate": 1.3015544041450778e-05,
"loss": 0.3076,
"step": 445
},
{
"epoch": 0.4193849021435228,
"grad_norm": 0.20194311212258356,
"learning_rate": 1.2911917098445597e-05,
"loss": 0.3005,
"step": 450
},
{
"epoch": 0.424044734389562,
"grad_norm": 0.19502464315598184,
"learning_rate": 1.2808290155440415e-05,
"loss": 0.308,
"step": 455
},
{
"epoch": 0.42870456663560114,
"grad_norm": 0.1985626146517041,
"learning_rate": 1.2704663212435234e-05,
"loss": 0.3057,
"step": 460
},
{
"epoch": 0.43336439888164024,
"grad_norm": 0.21421287702611994,
"learning_rate": 1.2601036269430054e-05,
"loss": 0.3033,
"step": 465
},
{
"epoch": 0.4380242311276794,
"grad_norm": 0.1987802920868664,
"learning_rate": 1.2497409326424873e-05,
"loss": 0.3126,
"step": 470
},
{
"epoch": 0.4426840633737186,
"grad_norm": 0.1895491437169421,
"learning_rate": 1.239378238341969e-05,
"loss": 0.304,
"step": 475
},
{
"epoch": 0.4473438956197577,
"grad_norm": 0.1985509321068275,
"learning_rate": 1.2290155440414508e-05,
"loss": 0.3189,
"step": 480
},
{
"epoch": 0.45200372786579684,
"grad_norm": 0.2173910116765384,
"learning_rate": 1.2186528497409327e-05,
"loss": 0.3092,
"step": 485
},
{
"epoch": 0.45666356011183595,
"grad_norm": 0.17261612546150368,
"learning_rate": 1.2082901554404145e-05,
"loss": 0.3039,
"step": 490
},
{
"epoch": 0.4613233923578751,
"grad_norm": 0.20048439029565604,
"learning_rate": 1.1979274611398965e-05,
"loss": 0.3077,
"step": 495
},
{
"epoch": 0.4659832246039143,
"grad_norm": 0.20844885780214473,
"learning_rate": 1.1875647668393784e-05,
"loss": 0.3179,
"step": 500
},
{
"epoch": 0.4706430568499534,
"grad_norm": 0.19509047480375188,
"learning_rate": 1.1772020725388602e-05,
"loss": 0.307,
"step": 505
},
{
"epoch": 0.47530288909599255,
"grad_norm": 0.18379044121686752,
"learning_rate": 1.1668393782383421e-05,
"loss": 0.3076,
"step": 510
},
{
"epoch": 0.47996272134203166,
"grad_norm": 0.20046163502948242,
"learning_rate": 1.1564766839378238e-05,
"loss": 0.3073,
"step": 515
},
{
"epoch": 0.4846225535880708,
"grad_norm": 0.1891503524111604,
"learning_rate": 1.1461139896373056e-05,
"loss": 0.3119,
"step": 520
},
{
"epoch": 0.48928238583411,
"grad_norm": 0.20769187983919965,
"learning_rate": 1.1357512953367878e-05,
"loss": 0.312,
"step": 525
},
{
"epoch": 0.4939422180801491,
"grad_norm": 0.1956521662876443,
"learning_rate": 1.1253886010362695e-05,
"loss": 0.3069,
"step": 530
},
{
"epoch": 0.49860205032618826,
"grad_norm": 0.1851287031638996,
"learning_rate": 1.1150259067357514e-05,
"loss": 0.3117,
"step": 535
},
{
"epoch": 0.5032618825722274,
"grad_norm": 0.20050732452739828,
"learning_rate": 1.1046632124352332e-05,
"loss": 0.2985,
"step": 540
},
{
"epoch": 0.5079217148182665,
"grad_norm": 0.2219598360154172,
"learning_rate": 1.094300518134715e-05,
"loss": 0.3058,
"step": 545
},
{
"epoch": 0.5125815470643057,
"grad_norm": 0.22159014441089023,
"learning_rate": 1.083937823834197e-05,
"loss": 0.3114,
"step": 550
},
{
"epoch": 0.5172413793103449,
"grad_norm": 0.21189175620802284,
"learning_rate": 1.073575129533679e-05,
"loss": 0.3097,
"step": 555
},
{
"epoch": 0.5219012115563839,
"grad_norm": 0.20194991625949968,
"learning_rate": 1.0632124352331608e-05,
"loss": 0.3195,
"step": 560
},
{
"epoch": 0.5265610438024231,
"grad_norm": 0.17929820608315875,
"learning_rate": 1.0528497409326426e-05,
"loss": 0.2966,
"step": 565
},
{
"epoch": 0.5312208760484622,
"grad_norm": 0.18847514867987192,
"learning_rate": 1.0424870466321243e-05,
"loss": 0.3013,
"step": 570
},
{
"epoch": 0.5358807082945014,
"grad_norm": 0.1918865802913081,
"learning_rate": 1.0321243523316062e-05,
"loss": 0.3054,
"step": 575
},
{
"epoch": 0.5405405405405406,
"grad_norm": 0.19646625910996846,
"learning_rate": 1.021761658031088e-05,
"loss": 0.3039,
"step": 580
},
{
"epoch": 0.5452003727865797,
"grad_norm": 0.1957026436292748,
"learning_rate": 1.01139896373057e-05,
"loss": 0.3085,
"step": 585
},
{
"epoch": 0.5498602050326188,
"grad_norm": 0.2215747140393126,
"learning_rate": 1.0010362694300519e-05,
"loss": 0.3004,
"step": 590
},
{
"epoch": 0.554520037278658,
"grad_norm": 0.2096798005029143,
"learning_rate": 9.906735751295338e-06,
"loss": 0.3045,
"step": 595
},
{
"epoch": 0.5591798695246971,
"grad_norm": 0.21043804051484524,
"learning_rate": 9.803108808290156e-06,
"loss": 0.3061,
"step": 600
},
{
"epoch": 0.5638397017707363,
"grad_norm": 0.18777718502993346,
"learning_rate": 9.699481865284975e-06,
"loss": 0.3048,
"step": 605
},
{
"epoch": 0.5684995340167754,
"grad_norm": 0.20237085315201214,
"learning_rate": 9.595854922279793e-06,
"loss": 0.3132,
"step": 610
},
{
"epoch": 0.5731593662628145,
"grad_norm": 0.193385995407008,
"learning_rate": 9.492227979274612e-06,
"loss": 0.3042,
"step": 615
},
{
"epoch": 0.5778191985088537,
"grad_norm": 0.1933950220530074,
"learning_rate": 9.388601036269432e-06,
"loss": 0.2979,
"step": 620
},
{
"epoch": 0.5824790307548928,
"grad_norm": 0.19132327866819904,
"learning_rate": 9.284974093264249e-06,
"loss": 0.3121,
"step": 625
},
{
"epoch": 0.587138863000932,
"grad_norm": 0.18145967807189134,
"learning_rate": 9.181347150259067e-06,
"loss": 0.3104,
"step": 630
},
{
"epoch": 0.5917986952469712,
"grad_norm": 0.1938112202212723,
"learning_rate": 9.077720207253888e-06,
"loss": 0.3114,
"step": 635
},
{
"epoch": 0.5964585274930102,
"grad_norm": 0.18005219766713837,
"learning_rate": 8.974093264248706e-06,
"loss": 0.3109,
"step": 640
},
{
"epoch": 0.6011183597390494,
"grad_norm": 0.19954264682643283,
"learning_rate": 8.870466321243523e-06,
"loss": 0.3073,
"step": 645
},
{
"epoch": 0.6057781919850885,
"grad_norm": 0.2029508363977868,
"learning_rate": 8.766839378238343e-06,
"loss": 0.3099,
"step": 650
},
{
"epoch": 0.6104380242311277,
"grad_norm": 0.20067540450864405,
"learning_rate": 8.663212435233162e-06,
"loss": 0.3131,
"step": 655
},
{
"epoch": 0.6150978564771669,
"grad_norm": 0.18278420923843008,
"learning_rate": 8.55958549222798e-06,
"loss": 0.2968,
"step": 660
},
{
"epoch": 0.6197576887232059,
"grad_norm": 0.19255194330618958,
"learning_rate": 8.455958549222799e-06,
"loss": 0.3,
"step": 665
},
{
"epoch": 0.6244175209692451,
"grad_norm": 0.17805047451733982,
"learning_rate": 8.352331606217617e-06,
"loss": 0.3002,
"step": 670
},
{
"epoch": 0.6290773532152842,
"grad_norm": 0.1809729112938702,
"learning_rate": 8.248704663212436e-06,
"loss": 0.3004,
"step": 675
},
{
"epoch": 0.6337371854613234,
"grad_norm": 0.19843229643919744,
"learning_rate": 8.145077720207254e-06,
"loss": 0.3093,
"step": 680
},
{
"epoch": 0.6383970177073626,
"grad_norm": 0.17094593426100432,
"learning_rate": 8.041450777202073e-06,
"loss": 0.3118,
"step": 685
},
{
"epoch": 0.6430568499534017,
"grad_norm": 0.17842406465044058,
"learning_rate": 7.937823834196891e-06,
"loss": 0.3026,
"step": 690
},
{
"epoch": 0.6477166821994408,
"grad_norm": 0.18735190688774842,
"learning_rate": 7.834196891191712e-06,
"loss": 0.3097,
"step": 695
},
{
"epoch": 0.65237651444548,
"grad_norm": 0.18672212273790229,
"learning_rate": 7.730569948186528e-06,
"loss": 0.3149,
"step": 700
},
{
"epoch": 0.6570363466915191,
"grad_norm": 0.16216816399314543,
"learning_rate": 7.626943005181348e-06,
"loss": 0.3026,
"step": 705
},
{
"epoch": 0.6616961789375583,
"grad_norm": 0.19617575721215516,
"learning_rate": 7.523316062176167e-06,
"loss": 0.3036,
"step": 710
},
{
"epoch": 0.6663560111835974,
"grad_norm": 0.1783695592863534,
"learning_rate": 7.419689119170985e-06,
"loss": 0.2985,
"step": 715
},
{
"epoch": 0.6710158434296365,
"grad_norm": 0.17934516453245036,
"learning_rate": 7.3160621761658035e-06,
"loss": 0.3031,
"step": 720
},
{
"epoch": 0.6756756756756757,
"grad_norm": 0.19399978320829833,
"learning_rate": 7.212435233160623e-06,
"loss": 0.3077,
"step": 725
},
{
"epoch": 0.6803355079217148,
"grad_norm": 0.19060796532512359,
"learning_rate": 7.108808290155441e-06,
"loss": 0.3039,
"step": 730
},
{
"epoch": 0.684995340167754,
"grad_norm": 0.17663402079064713,
"learning_rate": 7.005181347150259e-06,
"loss": 0.304,
"step": 735
},
{
"epoch": 0.6896551724137931,
"grad_norm": 0.18728492382652162,
"learning_rate": 6.9015544041450784e-06,
"loss": 0.2996,
"step": 740
},
{
"epoch": 0.6943150046598322,
"grad_norm": 0.16539414234955993,
"learning_rate": 6.797927461139897e-06,
"loss": 0.3007,
"step": 745
},
{
"epoch": 0.6989748369058714,
"grad_norm": 0.18796932042651304,
"learning_rate": 6.6943005181347155e-06,
"loss": 0.3006,
"step": 750
},
{
"epoch": 0.7036346691519105,
"grad_norm": 0.1934535934904552,
"learning_rate": 6.590673575129535e-06,
"loss": 0.3051,
"step": 755
},
{
"epoch": 0.7082945013979497,
"grad_norm": 0.17511509631442268,
"learning_rate": 6.487046632124353e-06,
"loss": 0.3047,
"step": 760
},
{
"epoch": 0.7129543336439889,
"grad_norm": 0.16967569477610708,
"learning_rate": 6.383419689119171e-06,
"loss": 0.2981,
"step": 765
},
{
"epoch": 0.7176141658900279,
"grad_norm": 0.20551530112906796,
"learning_rate": 6.2797927461139905e-06,
"loss": 0.3048,
"step": 770
},
{
"epoch": 0.7222739981360671,
"grad_norm": 0.1772568831952956,
"learning_rate": 6.176165803108809e-06,
"loss": 0.3106,
"step": 775
},
{
"epoch": 0.7269338303821062,
"grad_norm": 0.17122628778280205,
"learning_rate": 6.0725388601036275e-06,
"loss": 0.2986,
"step": 780
},
{
"epoch": 0.7315936626281454,
"grad_norm": 0.19005996568436556,
"learning_rate": 5.968911917098445e-06,
"loss": 0.3024,
"step": 785
},
{
"epoch": 0.7362534948741846,
"grad_norm": 0.1896569557324295,
"learning_rate": 5.865284974093265e-06,
"loss": 0.3068,
"step": 790
},
{
"epoch": 0.7409133271202236,
"grad_norm": 0.17553068397844512,
"learning_rate": 5.761658031088083e-06,
"loss": 0.306,
"step": 795
},
{
"epoch": 0.7455731593662628,
"grad_norm": 0.1902146433481209,
"learning_rate": 5.658031088082902e-06,
"loss": 0.3044,
"step": 800
},
{
"epoch": 0.750232991612302,
"grad_norm": 0.18379958493058496,
"learning_rate": 5.554404145077721e-06,
"loss": 0.3046,
"step": 805
},
{
"epoch": 0.7548928238583411,
"grad_norm": 0.19238104735204387,
"learning_rate": 5.4507772020725395e-06,
"loss": 0.3133,
"step": 810
},
{
"epoch": 0.7595526561043803,
"grad_norm": 0.16684993046883195,
"learning_rate": 5.347150259067357e-06,
"loss": 0.3019,
"step": 815
},
{
"epoch": 0.7642124883504194,
"grad_norm": 0.17317168720572065,
"learning_rate": 5.243523316062177e-06,
"loss": 0.3092,
"step": 820
},
{
"epoch": 0.7688723205964585,
"grad_norm": 0.17257530643463354,
"learning_rate": 5.139896373056995e-06,
"loss": 0.3012,
"step": 825
},
{
"epoch": 0.7735321528424977,
"grad_norm": 0.19022509153976733,
"learning_rate": 5.036269430051814e-06,
"loss": 0.3006,
"step": 830
},
{
"epoch": 0.7781919850885368,
"grad_norm": 0.18273563180618016,
"learning_rate": 4.932642487046633e-06,
"loss": 0.302,
"step": 835
},
{
"epoch": 0.782851817334576,
"grad_norm": 0.2068935985590348,
"learning_rate": 4.829015544041451e-06,
"loss": 0.3083,
"step": 840
},
{
"epoch": 0.7875116495806151,
"grad_norm": 0.1787063525187819,
"learning_rate": 4.72538860103627e-06,
"loss": 0.3158,
"step": 845
},
{
"epoch": 0.7921714818266542,
"grad_norm": 0.17589355462106077,
"learning_rate": 4.621761658031089e-06,
"loss": 0.3015,
"step": 850
},
{
"epoch": 0.7968313140726934,
"grad_norm": 0.19008199962840902,
"learning_rate": 4.518134715025907e-06,
"loss": 0.3087,
"step": 855
},
{
"epoch": 0.8014911463187325,
"grad_norm": 0.1744284628031719,
"learning_rate": 4.414507772020726e-06,
"loss": 0.3146,
"step": 860
},
{
"epoch": 0.8061509785647717,
"grad_norm": 0.1764675363887709,
"learning_rate": 4.310880829015544e-06,
"loss": 0.3028,
"step": 865
},
{
"epoch": 0.8108108108108109,
"grad_norm": 0.18723433018807362,
"learning_rate": 4.207253886010363e-06,
"loss": 0.3082,
"step": 870
},
{
"epoch": 0.8154706430568499,
"grad_norm": 0.16928853740679736,
"learning_rate": 4.103626943005182e-06,
"loss": 0.3059,
"step": 875
},
{
"epoch": 0.8201304753028891,
"grad_norm": 0.17841937199548402,
"learning_rate": 4.000000000000001e-06,
"loss": 0.3042,
"step": 880
},
{
"epoch": 0.8247903075489282,
"grad_norm": 0.1656726857328673,
"learning_rate": 3.896373056994819e-06,
"loss": 0.3079,
"step": 885
},
{
"epoch": 0.8294501397949674,
"grad_norm": 0.17487630016211303,
"learning_rate": 3.7927461139896377e-06,
"loss": 0.3027,
"step": 890
},
{
"epoch": 0.8341099720410066,
"grad_norm": 0.16843425617538177,
"learning_rate": 3.6891191709844567e-06,
"loss": 0.3172,
"step": 895
},
{
"epoch": 0.8387698042870456,
"grad_norm": 0.17177773487516515,
"learning_rate": 3.5854922279792748e-06,
"loss": 0.3055,
"step": 900
},
{
"epoch": 0.8434296365330848,
"grad_norm": 0.16684455749445157,
"learning_rate": 3.4818652849740937e-06,
"loss": 0.3031,
"step": 905
},
{
"epoch": 0.848089468779124,
"grad_norm": 0.171250184663666,
"learning_rate": 3.3782383419689123e-06,
"loss": 0.3068,
"step": 910
},
{
"epoch": 0.8527493010251631,
"grad_norm": 0.1904755654711732,
"learning_rate": 3.274611398963731e-06,
"loss": 0.3091,
"step": 915
},
{
"epoch": 0.8574091332712023,
"grad_norm": 0.17322382387681076,
"learning_rate": 3.1709844559585493e-06,
"loss": 0.3029,
"step": 920
},
{
"epoch": 0.8620689655172413,
"grad_norm": 0.17587838098911934,
"learning_rate": 3.0673575129533683e-06,
"loss": 0.2978,
"step": 925
},
{
"epoch": 0.8667287977632805,
"grad_norm": 0.17389743789103038,
"learning_rate": 2.963730569948187e-06,
"loss": 0.2979,
"step": 930
},
{
"epoch": 0.8713886300093197,
"grad_norm": 0.18137733240826867,
"learning_rate": 2.8601036269430053e-06,
"loss": 0.3027,
"step": 935
},
{
"epoch": 0.8760484622553588,
"grad_norm": 0.17290656767359902,
"learning_rate": 2.7564766839378243e-06,
"loss": 0.3054,
"step": 940
},
{
"epoch": 0.880708294501398,
"grad_norm": 0.17569375365058235,
"learning_rate": 2.6528497409326424e-06,
"loss": 0.3023,
"step": 945
},
{
"epoch": 0.8853681267474371,
"grad_norm": 0.1727950452551694,
"learning_rate": 2.5492227979274614e-06,
"loss": 0.3137,
"step": 950
},
{
"epoch": 0.8900279589934762,
"grad_norm": 0.1740200974029668,
"learning_rate": 2.44559585492228e-06,
"loss": 0.307,
"step": 955
},
{
"epoch": 0.8946877912395154,
"grad_norm": 0.1732738746462953,
"learning_rate": 2.3419689119170984e-06,
"loss": 0.3111,
"step": 960
},
{
"epoch": 0.8993476234855545,
"grad_norm": 0.18648726628836773,
"learning_rate": 2.2383419689119174e-06,
"loss": 0.3069,
"step": 965
},
{
"epoch": 0.9040074557315937,
"grad_norm": 0.1697331695842795,
"learning_rate": 2.134715025906736e-06,
"loss": 0.297,
"step": 970
},
{
"epoch": 0.9086672879776329,
"grad_norm": 0.16507665028070173,
"learning_rate": 2.0310880829015544e-06,
"loss": 0.3125,
"step": 975
},
{
"epoch": 0.9133271202236719,
"grad_norm": 0.17494951036076584,
"learning_rate": 1.9274611398963734e-06,
"loss": 0.3041,
"step": 980
},
{
"epoch": 0.9179869524697111,
"grad_norm": 0.17876267200484872,
"learning_rate": 1.823834196891192e-06,
"loss": 0.3024,
"step": 985
},
{
"epoch": 0.9226467847157502,
"grad_norm": 0.17300125358384327,
"learning_rate": 1.7202072538860104e-06,
"loss": 0.3007,
"step": 990
},
{
"epoch": 0.9273066169617894,
"grad_norm": 0.1739041985560771,
"learning_rate": 1.6165803108808292e-06,
"loss": 0.3039,
"step": 995
},
{
"epoch": 0.9319664492078286,
"grad_norm": 0.17113300633163106,
"learning_rate": 1.5129533678756477e-06,
"loss": 0.3035,
"step": 1000
},
{
"epoch": 0.9366262814538676,
"grad_norm": 0.16281913618369226,
"learning_rate": 1.4093264248704663e-06,
"loss": 0.3071,
"step": 1005
},
{
"epoch": 0.9412861136999068,
"grad_norm": 0.17924475619300242,
"learning_rate": 1.3056994818652852e-06,
"loss": 0.3058,
"step": 1010
},
{
"epoch": 0.9459459459459459,
"grad_norm": 0.17688052898291365,
"learning_rate": 1.2020725388601037e-06,
"loss": 0.3087,
"step": 1015
},
{
"epoch": 0.9506057781919851,
"grad_norm": 0.16179921554930488,
"learning_rate": 1.0984455958549225e-06,
"loss": 0.3044,
"step": 1020
},
{
"epoch": 0.9552656104380243,
"grad_norm": 0.17553736079048324,
"learning_rate": 9.94818652849741e-07,
"loss": 0.3129,
"step": 1025
},
{
"epoch": 0.9599254426840633,
"grad_norm": 0.1704446487232818,
"learning_rate": 8.911917098445596e-07,
"loss": 0.3046,
"step": 1030
},
{
"epoch": 0.9645852749301025,
"grad_norm": 0.169056664565789,
"learning_rate": 7.875647668393784e-07,
"loss": 0.3072,
"step": 1035
},
{
"epoch": 0.9692451071761417,
"grad_norm": 0.16668251867193293,
"learning_rate": 6.839378238341969e-07,
"loss": 0.3036,
"step": 1040
},
{
"epoch": 0.9739049394221808,
"grad_norm": 0.1639424483827266,
"learning_rate": 5.803108808290156e-07,
"loss": 0.2989,
"step": 1045
},
{
"epoch": 0.97856477166822,
"grad_norm": 0.17474417966467756,
"learning_rate": 4.7668393782383424e-07,
"loss": 0.3187,
"step": 1050
},
{
"epoch": 0.983224603914259,
"grad_norm": 0.17412618700034416,
"learning_rate": 3.730569948186528e-07,
"loss": 0.2996,
"step": 1055
},
{
"epoch": 0.9878844361602982,
"grad_norm": 0.16453567016761128,
"learning_rate": 2.694300518134715e-07,
"loss": 0.3028,
"step": 1060
},
{
"epoch": 0.9925442684063374,
"grad_norm": 0.1609387610584271,
"learning_rate": 1.6580310880829015e-07,
"loss": 0.3061,
"step": 1065
},
{
"epoch": 0.9972041006523765,
"grad_norm": 0.1665161978210062,
"learning_rate": 6.217616580310881e-08,
"loss": 0.303,
"step": 1070
},
{
"epoch": 1.0,
"step": 1073,
"total_flos": 9.186429923093381e+17,
"train_loss": 0.30834408108585926,
"train_runtime": 35203.5942,
"train_samples_per_second": 0.488,
"train_steps_per_second": 0.03
}
],
"logging_steps": 5,
"max_steps": 1073,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.186429923093381e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}