|
{ |
|
"best_metric": 0.4674791693687439, |
|
"best_model_checkpoint": "saves/sycophancy/Llama-3.1-8B-Instruct/dpo-mistral-1000/train/checkpoint-250", |
|
"epoch": 9.977728285077951, |
|
"eval_steps": 50, |
|
"global_step": 560, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.17817371937639198, |
|
"grad_norm": 1.6836061477661133, |
|
"learning_rate": 8.928571428571429e-07, |
|
"logits/chosen": -0.34122365713119507, |
|
"logits/rejected": -0.392149418592453, |
|
"logps/chosen": -22.754064559936523, |
|
"logps/rejected": -24.806787490844727, |
|
"loss": 0.6925, |
|
"rewards/accuracies": 0.4000000059604645, |
|
"rewards/chosen": -0.00038957837386988103, |
|
"rewards/margins": 0.0013258380349725485, |
|
"rewards/rejected": -0.0017154163215309381, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.35634743875278396, |
|
"grad_norm": 1.5660648345947266, |
|
"learning_rate": 1.7857142857142859e-06, |
|
"logits/chosen": -0.3305678367614746, |
|
"logits/rejected": -0.36542031168937683, |
|
"logps/chosen": -22.68667221069336, |
|
"logps/rejected": -24.383272171020508, |
|
"loss": 0.694, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": -0.0019774259999394417, |
|
"rewards/margins": -0.0016633094055578113, |
|
"rewards/rejected": -0.0003141165361739695, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.534521158129176, |
|
"grad_norm": 1.7988139390945435, |
|
"learning_rate": 2.6785714285714285e-06, |
|
"logits/chosen": -0.33457204699516296, |
|
"logits/rejected": -0.3815780282020569, |
|
"logps/chosen": -23.1612548828125, |
|
"logps/rejected": -24.836502075195312, |
|
"loss": 0.6913, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.004869468975812197, |
|
"rewards/margins": 0.0037120466586202383, |
|
"rewards/rejected": 0.001157421967945993, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.7126948775055679, |
|
"grad_norm": 1.3268821239471436, |
|
"learning_rate": 3.5714285714285718e-06, |
|
"logits/chosen": -0.3601827919483185, |
|
"logits/rejected": -0.39572954177856445, |
|
"logps/chosen": -22.90369987487793, |
|
"logps/rejected": -24.570892333984375, |
|
"loss": 0.6885, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": 0.01631110906600952, |
|
"rewards/margins": 0.009484974667429924, |
|
"rewards/rejected": 0.006826136261224747, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.89086859688196, |
|
"grad_norm": 1.776597261428833, |
|
"learning_rate": 4.464285714285715e-06, |
|
"logits/chosen": -0.3659666180610657, |
|
"logits/rejected": -0.39071187376976013, |
|
"logps/chosen": -23.908523559570312, |
|
"logps/rejected": -24.262924194335938, |
|
"loss": 0.6891, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.026137981563806534, |
|
"rewards/margins": 0.008756262250244617, |
|
"rewards/rejected": 0.017381716519594193, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.89086859688196, |
|
"eval_logits/chosen": -0.32067814469337463, |
|
"eval_logits/rejected": -0.36896541714668274, |
|
"eval_logps/chosen": -22.66472053527832, |
|
"eval_logps/rejected": -24.953529357910156, |
|
"eval_loss": 0.6832554340362549, |
|
"eval_rewards/accuracies": 0.6200000047683716, |
|
"eval_rewards/chosen": 0.048666905611753464, |
|
"eval_rewards/margins": 0.02107813023030758, |
|
"eval_rewards/rejected": 0.027588771656155586, |
|
"eval_runtime": 9.7251, |
|
"eval_samples_per_second": 10.283, |
|
"eval_steps_per_second": 5.141, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.069042316258352, |
|
"grad_norm": 1.7856236696243286, |
|
"learning_rate": 4.999222955002041e-06, |
|
"logits/chosen": -0.35137540102005005, |
|
"logits/rejected": -0.3762962818145752, |
|
"logps/chosen": -22.667428970336914, |
|
"logps/rejected": -24.934234619140625, |
|
"loss": 0.675, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.06478282064199448, |
|
"rewards/margins": 0.0387502983212471, |
|
"rewards/rejected": 0.026032526046037674, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.247216035634744, |
|
"grad_norm": 2.1098411083221436, |
|
"learning_rate": 4.990486745229364e-06, |
|
"logits/chosen": -0.34582623839378357, |
|
"logits/rejected": -0.3823074400424957, |
|
"logps/chosen": -21.945472717285156, |
|
"logps/rejected": -23.575443267822266, |
|
"loss": 0.6683, |
|
"rewards/accuracies": 0.6500000357627869, |
|
"rewards/chosen": 0.1263795644044876, |
|
"rewards/margins": 0.05839619040489197, |
|
"rewards/rejected": 0.06798337399959564, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.4253897550111359, |
|
"grad_norm": 2.4574801921844482, |
|
"learning_rate": 4.9720770655628216e-06, |
|
"logits/chosen": -0.3288664221763611, |
|
"logits/rejected": -0.3651907444000244, |
|
"logps/chosen": -21.0363712310791, |
|
"logps/rejected": -24.36761474609375, |
|
"loss": 0.6223, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.26030755043029785, |
|
"rewards/margins": 0.1713070124387741, |
|
"rewards/rejected": 0.08900053799152374, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.6035634743875278, |
|
"grad_norm": 2.382906675338745, |
|
"learning_rate": 4.944065422298262e-06, |
|
"logits/chosen": -0.3467506468296051, |
|
"logits/rejected": -0.38536104559898376, |
|
"logps/chosen": -19.177209854125977, |
|
"logps/rejected": -23.13625144958496, |
|
"loss": 0.6035, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.41219550371170044, |
|
"rewards/margins": 0.25021034479141235, |
|
"rewards/rejected": 0.1619851440191269, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.7817371937639197, |
|
"grad_norm": 2.123657703399658, |
|
"learning_rate": 4.90656061737503e-06, |
|
"logits/chosen": -0.3112158477306366, |
|
"logits/rejected": -0.355999618768692, |
|
"logps/chosen": -17.31022834777832, |
|
"logps/rejected": -22.826955795288086, |
|
"loss": 0.5716, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.5237475633621216, |
|
"rewards/margins": 0.35302406549453735, |
|
"rewards/rejected": 0.17072349786758423, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.7817371937639197, |
|
"eval_logits/chosen": -0.2933846712112427, |
|
"eval_logits/rejected": -0.34558260440826416, |
|
"eval_logps/chosen": -17.070594787597656, |
|
"eval_logps/rejected": -23.3165225982666, |
|
"eval_loss": 0.5618187785148621, |
|
"eval_rewards/accuracies": 0.699999988079071, |
|
"eval_rewards/chosen": 0.6080796718597412, |
|
"eval_rewards/margins": 0.4167901575565338, |
|
"eval_rewards/rejected": 0.191289484500885, |
|
"eval_runtime": 9.6952, |
|
"eval_samples_per_second": 10.314, |
|
"eval_steps_per_second": 5.157, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.9599109131403119, |
|
"grad_norm": 3.316772699356079, |
|
"learning_rate": 4.859708325770919e-06, |
|
"logits/chosen": -0.3041900396347046, |
|
"logits/rejected": -0.35448360443115234, |
|
"logps/chosen": -16.122028350830078, |
|
"logps/rejected": -22.659238815307617, |
|
"loss": 0.5299, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.7380782961845398, |
|
"rewards/margins": 0.5233559608459473, |
|
"rewards/rejected": 0.21472235023975372, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.138084632516704, |
|
"grad_norm": 2.7125298976898193, |
|
"learning_rate": 4.80369052967602e-06, |
|
"logits/chosen": -0.2812982499599457, |
|
"logits/rejected": -0.3268232047557831, |
|
"logps/chosen": -15.398541450500488, |
|
"logps/rejected": -23.535791397094727, |
|
"loss": 0.4948, |
|
"rewards/accuracies": 0.7750000357627869, |
|
"rewards/chosen": 0.8048359155654907, |
|
"rewards/margins": 0.660244882106781, |
|
"rewards/rejected": 0.14459095895290375, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.316258351893096, |
|
"grad_norm": 2.149165391921997, |
|
"learning_rate": 4.7387248116432524e-06, |
|
"logits/chosen": -0.24634817242622375, |
|
"logits/rejected": -0.313901424407959, |
|
"logps/chosen": -13.057968139648438, |
|
"logps/rejected": -23.697710037231445, |
|
"loss": 0.4275, |
|
"rewards/accuracies": 0.7750000357627869, |
|
"rewards/chosen": 1.0445865392684937, |
|
"rewards/margins": 0.944391667842865, |
|
"rewards/rejected": 0.10019483417272568, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.494432071269488, |
|
"grad_norm": 2.2673017978668213, |
|
"learning_rate": 4.665063509461098e-06, |
|
"logits/chosen": -0.2440420240163803, |
|
"logits/rejected": -0.29979586601257324, |
|
"logps/chosen": -13.353204727172852, |
|
"logps/rejected": -24.62551498413086, |
|
"loss": 0.4575, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.9695870280265808, |
|
"rewards/margins": 0.9478777050971985, |
|
"rewards/rejected": 0.021709401160478592, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.6726057906458798, |
|
"grad_norm": 2.5609817504882812, |
|
"learning_rate": 4.5829927360311224e-06, |
|
"logits/chosen": -0.21621806919574738, |
|
"logits/rejected": -0.27183279395103455, |
|
"logps/chosen": -12.837681770324707, |
|
"logps/rejected": -24.354141235351562, |
|
"loss": 0.4581, |
|
"rewards/accuracies": 0.7750000357627869, |
|
"rewards/chosen": 0.9683197140693665, |
|
"rewards/margins": 0.9282311797142029, |
|
"rewards/rejected": 0.04008860886096954, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.6726057906458798, |
|
"eval_logits/chosen": -0.2092563956975937, |
|
"eval_logits/rejected": -0.2738884389400482, |
|
"eval_logps/chosen": -13.789220809936523, |
|
"eval_logps/rejected": -25.666584014892578, |
|
"eval_loss": 0.4760795533657074, |
|
"eval_rewards/accuracies": 0.7599999904632568, |
|
"eval_rewards/chosen": 0.9362172484397888, |
|
"eval_rewards/margins": 0.9799338579177856, |
|
"eval_rewards/rejected": -0.04371662810444832, |
|
"eval_runtime": 9.724, |
|
"eval_samples_per_second": 10.284, |
|
"eval_steps_per_second": 5.142, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.8507795100222717, |
|
"grad_norm": 6.471484661102295, |
|
"learning_rate": 4.492831268057307e-06, |
|
"logits/chosen": -0.25587916374206543, |
|
"logits/rejected": -0.3184296786785126, |
|
"logps/chosen": -12.521145820617676, |
|
"logps/rejected": -25.58576011657715, |
|
"loss": 0.406, |
|
"rewards/accuracies": 0.7750000357627869, |
|
"rewards/chosen": 1.077256202697754, |
|
"rewards/margins": 1.1716722249984741, |
|
"rewards/rejected": -0.09441610425710678, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 3.0289532293986636, |
|
"grad_norm": 6.336172580718994, |
|
"learning_rate": 4.394929307863633e-06, |
|
"logits/chosen": -0.23687370121479034, |
|
"logits/rejected": -0.2813915014266968, |
|
"logps/chosen": -14.616838455200195, |
|
"logps/rejected": -26.1534366607666, |
|
"loss": 0.452, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.8590637445449829, |
|
"rewards/margins": 1.0506114959716797, |
|
"rewards/rejected": -0.19154782593250275, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 3.2071269487750556, |
|
"grad_norm": 11.07475471496582, |
|
"learning_rate": 4.289667123149296e-06, |
|
"logits/chosen": -0.2059316188097, |
|
"logits/rejected": -0.2756357192993164, |
|
"logps/chosen": -12.392863273620605, |
|
"logps/rejected": -26.90511131286621, |
|
"loss": 0.4051, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": 1.0431259870529175, |
|
"rewards/margins": 1.3048280477523804, |
|
"rewards/rejected": -0.26170212030410767, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.3853006681514475, |
|
"grad_norm": 6.2247819900512695, |
|
"learning_rate": 4.177453569964925e-06, |
|
"logits/chosen": -0.17799881100654602, |
|
"logits/rejected": -0.24945221841335297, |
|
"logps/chosen": -12.190529823303223, |
|
"logps/rejected": -27.486194610595703, |
|
"loss": 0.3954, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 1.0829803943634033, |
|
"rewards/margins": 1.3739397525787354, |
|
"rewards/rejected": -0.29095926880836487, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 3.5634743875278394, |
|
"grad_norm": 2.581493377685547, |
|
"learning_rate": 4.058724504646834e-06, |
|
"logits/chosen": -0.17182210087776184, |
|
"logits/rejected": -0.23636431992053986, |
|
"logps/chosen": -12.724395751953125, |
|
"logps/rejected": -27.663299560546875, |
|
"loss": 0.4032, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": 1.032086968421936, |
|
"rewards/margins": 1.3668633699417114, |
|
"rewards/rejected": -0.3347764015197754, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.5634743875278394, |
|
"eval_logits/chosen": -0.16310954093933105, |
|
"eval_logits/rejected": -0.23061738908290863, |
|
"eval_logps/chosen": -13.548603057861328, |
|
"eval_logps/rejected": -28.07323455810547, |
|
"eval_loss": 0.4708513021469116, |
|
"eval_rewards/accuracies": 0.8100000023841858, |
|
"eval_rewards/chosen": 0.9602789282798767, |
|
"eval_rewards/margins": 1.244660496711731, |
|
"eval_rewards/rejected": -0.28438156843185425, |
|
"eval_runtime": 9.734, |
|
"eval_samples_per_second": 10.273, |
|
"eval_steps_per_second": 5.137, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.7416481069042318, |
|
"grad_norm": 2.387537717819214, |
|
"learning_rate": 3.933941090877615e-06, |
|
"logits/chosen": -0.15226894617080688, |
|
"logits/rejected": -0.23664240539073944, |
|
"logps/chosen": -12.170495986938477, |
|
"logps/rejected": -28.98239517211914, |
|
"loss": 0.3887, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 1.1489580869674683, |
|
"rewards/margins": 1.5401805639266968, |
|
"rewards/rejected": -0.39122244715690613, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 3.9198218262806237, |
|
"grad_norm": 7.2560529708862305, |
|
"learning_rate": 3.8035880084487454e-06, |
|
"logits/chosen": -0.19596333801746368, |
|
"logits/rejected": -0.2662833034992218, |
|
"logps/chosen": -13.05119514465332, |
|
"logps/rejected": -29.070032119750977, |
|
"loss": 0.4093, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 1.0079821348190308, |
|
"rewards/margins": 1.3972289562225342, |
|
"rewards/rejected": -0.38924697041511536, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 4.097995545657016, |
|
"grad_norm": 8.643023490905762, |
|
"learning_rate": 3.6681715706826555e-06, |
|
"logits/chosen": -0.1443440169095993, |
|
"logits/rejected": -0.2094365656375885, |
|
"logps/chosen": -12.210596084594727, |
|
"logps/rejected": -28.835458755493164, |
|
"loss": 0.3526, |
|
"rewards/accuracies": 0.831250011920929, |
|
"rewards/chosen": 1.1435935497283936, |
|
"rewards/margins": 1.6025713682174683, |
|
"rewards/rejected": -0.45897769927978516, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 4.276169265033408, |
|
"grad_norm": 8.177175521850586, |
|
"learning_rate": 3.5282177578265295e-06, |
|
"logits/chosen": -0.16080088913440704, |
|
"logits/rejected": -0.22377625107765198, |
|
"logps/chosen": -12.389388084411621, |
|
"logps/rejected": -29.504175186157227, |
|
"loss": 0.3327, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 1.1701444387435913, |
|
"rewards/margins": 1.6151951551437378, |
|
"rewards/rejected": -0.44505080580711365, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 4.4543429844097995, |
|
"grad_norm": 3.6266558170318604, |
|
"learning_rate": 3.384270174056454e-06, |
|
"logits/chosen": -0.17858387529850006, |
|
"logits/rejected": -0.23888497054576874, |
|
"logps/chosen": -12.181685447692871, |
|
"logps/rejected": -29.275564193725586, |
|
"loss": 0.3836, |
|
"rewards/accuracies": 0.8187500238418579, |
|
"rewards/chosen": 1.0620548725128174, |
|
"rewards/margins": 1.5715227127075195, |
|
"rewards/rejected": -0.5094677805900574, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 4.4543429844097995, |
|
"eval_logits/chosen": -0.13963328301906586, |
|
"eval_logits/rejected": -0.2079545557498932, |
|
"eval_logps/chosen": -13.248819351196289, |
|
"eval_logps/rejected": -29.226905822753906, |
|
"eval_loss": 0.4674791693687439, |
|
"eval_rewards/accuracies": 0.7899999618530273, |
|
"eval_rewards/chosen": 0.9902573823928833, |
|
"eval_rewards/margins": 1.390006184577942, |
|
"eval_rewards/rejected": -0.3997488021850586, |
|
"eval_runtime": 9.715, |
|
"eval_samples_per_second": 10.293, |
|
"eval_steps_per_second": 5.147, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 4.632516703786192, |
|
"grad_norm": 4.119279384613037, |
|
"learning_rate": 3.236887936027261e-06, |
|
"logits/chosen": -0.12825655937194824, |
|
"logits/rejected": -0.19981206953525543, |
|
"logps/chosen": -11.074287414550781, |
|
"logps/rejected": -31.082468032836914, |
|
"loss": 0.3368, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 1.162680983543396, |
|
"rewards/margins": 1.7504284381866455, |
|
"rewards/rejected": -0.5877474546432495, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 4.810690423162583, |
|
"grad_norm": 6.509561061859131, |
|
"learning_rate": 3.0866435011692884e-06, |
|
"logits/chosen": -0.15560267865657806, |
|
"logits/rejected": -0.2208879441022873, |
|
"logps/chosen": -11.37994384765625, |
|
"logps/rejected": -31.601425170898438, |
|
"loss": 0.338, |
|
"rewards/accuracies": 0.8812500238418579, |
|
"rewards/chosen": 1.1841315031051636, |
|
"rewards/margins": 1.8628618717193604, |
|
"rewards/rejected": -0.6787301898002625, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 4.988864142538976, |
|
"grad_norm": 4.498559951782227, |
|
"learning_rate": 2.9341204441673267e-06, |
|
"logits/chosen": -0.1727777123451233, |
|
"logits/rejected": -0.2310466766357422, |
|
"logps/chosen": -13.475479125976562, |
|
"logps/rejected": -27.789844512939453, |
|
"loss": 0.5027, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 0.9239163398742676, |
|
"rewards/margins": 1.258786678314209, |
|
"rewards/rejected": -0.33487027883529663, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 5.167037861915367, |
|
"grad_norm": 3.030965805053711, |
|
"learning_rate": 2.7799111902582697e-06, |
|
"logits/chosen": -0.15166734158992767, |
|
"logits/rejected": -0.22125867009162903, |
|
"logps/chosen": -12.045249938964844, |
|
"logps/rejected": -31.249042510986328, |
|
"loss": 0.32, |
|
"rewards/accuracies": 0.856249988079071, |
|
"rewards/chosen": 1.147273063659668, |
|
"rewards/margins": 1.7833982706069946, |
|
"rewards/rejected": -0.6361253261566162, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 5.3452115812917596, |
|
"grad_norm": 7.184595584869385, |
|
"learning_rate": 2.624614714151743e-06, |
|
"logits/chosen": -0.0994877815246582, |
|
"logits/rejected": -0.17965565621852875, |
|
"logps/chosen": -12.375929832458496, |
|
"logps/rejected": -31.211719512939453, |
|
"loss": 0.3588, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 1.1141376495361328, |
|
"rewards/margins": 1.7681716680526733, |
|
"rewards/rejected": -0.6540343165397644, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 5.3452115812917596, |
|
"eval_logits/chosen": -0.1254928857088089, |
|
"eval_logits/rejected": -0.19310244917869568, |
|
"eval_logps/chosen": -13.40658187866211, |
|
"eval_logps/rejected": -29.754491806030273, |
|
"eval_loss": 0.4751954674720764, |
|
"eval_rewards/accuracies": 0.7699999809265137, |
|
"eval_rewards/chosen": 0.9744812846183777, |
|
"eval_rewards/margins": 1.4269884824752808, |
|
"eval_rewards/rejected": -0.452507346868515, |
|
"eval_runtime": 9.7087, |
|
"eval_samples_per_second": 10.3, |
|
"eval_steps_per_second": 5.15, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 5.523385300668151, |
|
"grad_norm": 3.5659422874450684, |
|
"learning_rate": 2.4688342135114625e-06, |
|
"logits/chosen": -0.13226279616355896, |
|
"logits/rejected": -0.21120555698871613, |
|
"logps/chosen": -11.256922721862793, |
|
"logps/rejected": -30.438766479492188, |
|
"loss": 0.3473, |
|
"rewards/accuracies": 0.831250011920929, |
|
"rewards/chosen": 1.2133493423461914, |
|
"rewards/margins": 1.790173888206482, |
|
"rewards/rejected": -0.5768246054649353, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 5.701559020044543, |
|
"grad_norm": 1.8460242748260498, |
|
"learning_rate": 2.3131747660339396e-06, |
|
"logits/chosen": -0.14635451138019562, |
|
"logits/rejected": -0.19989195466041565, |
|
"logps/chosen": -12.754526138305664, |
|
"logps/rejected": -30.52984619140625, |
|
"loss": 0.3378, |
|
"rewards/accuracies": 0.856249988079071, |
|
"rewards/chosen": 1.090704321861267, |
|
"rewards/margins": 1.7227219343185425, |
|
"rewards/rejected": -0.6320176124572754, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 5.879732739420936, |
|
"grad_norm": 5.234955310821533, |
|
"learning_rate": 2.158240979224817e-06, |
|
"logits/chosen": -0.1211402416229248, |
|
"logits/rejected": -0.19876351952552795, |
|
"logps/chosen": -10.352498054504395, |
|
"logps/rejected": -31.713010787963867, |
|
"loss": 0.3241, |
|
"rewards/accuracies": 0.8812500238418579, |
|
"rewards/chosen": 1.1969298124313354, |
|
"rewards/margins": 1.8751522302627563, |
|
"rewards/rejected": -0.6782223582267761, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 6.057906458797327, |
|
"grad_norm": 4.994526386260986, |
|
"learning_rate": 2.004634642001507e-06, |
|
"logits/chosen": -0.13834324479103088, |
|
"logits/rejected": -0.2308368682861328, |
|
"logps/chosen": -11.07118034362793, |
|
"logps/rejected": -31.32500648498535, |
|
"loss": 0.3868, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 1.1334035396575928, |
|
"rewards/margins": 1.7996925115585327, |
|
"rewards/rejected": -0.6662889719009399, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 6.23608017817372, |
|
"grad_norm": 3.519150733947754, |
|
"learning_rate": 1.852952387243698e-06, |
|
"logits/chosen": -0.1219867691397667, |
|
"logits/rejected": -0.19822999835014343, |
|
"logps/chosen": -10.62753677368164, |
|
"logps/rejected": -32.93171310424805, |
|
"loss": 0.2861, |
|
"rewards/accuracies": 0.893750011920929, |
|
"rewards/chosen": 1.1698468923568726, |
|
"rewards/margins": 2.0219147205352783, |
|
"rewards/rejected": -0.8520679473876953, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 6.23608017817372, |
|
"eval_logits/chosen": -0.11021164059638977, |
|
"eval_logits/rejected": -0.17845386266708374, |
|
"eval_logps/chosen": -13.759096145629883, |
|
"eval_logps/rejected": -30.731969833374023, |
|
"eval_loss": 0.4811996519565582, |
|
"eval_rewards/accuracies": 0.7699999809265137, |
|
"eval_rewards/chosen": 0.9392297267913818, |
|
"eval_rewards/margins": 1.4894847869873047, |
|
"eval_rewards/rejected": -0.5502550601959229, |
|
"eval_runtime": 9.6948, |
|
"eval_samples_per_second": 10.315, |
|
"eval_steps_per_second": 5.157, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 6.414253897550111, |
|
"grad_norm": 2.1364710330963135, |
|
"learning_rate": 1.7037833743707892e-06, |
|
"logits/chosen": -0.13026945292949677, |
|
"logits/rejected": -0.2250211238861084, |
|
"logps/chosen": -11.657172203063965, |
|
"logps/rejected": -31.794097900390625, |
|
"loss": 0.3096, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 1.1625641584396362, |
|
"rewards/margins": 1.8979129791259766, |
|
"rewards/rejected": -0.7353487610816956, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 6.5924276169265035, |
|
"grad_norm": 6.27597713470459, |
|
"learning_rate": 1.5577070009474872e-06, |
|
"logits/chosen": -0.08007471263408661, |
|
"logits/rejected": -0.14471502602100372, |
|
"logps/chosen": -12.703398704528809, |
|
"logps/rejected": -31.92864418029785, |
|
"loss": 0.3395, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 1.0662773847579956, |
|
"rewards/margins": 1.84355628490448, |
|
"rewards/rejected": -0.7772787809371948, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 6.770601336302895, |
|
"grad_norm": 1.5935697555541992, |
|
"learning_rate": 1.415290652206105e-06, |
|
"logits/chosen": -0.15049786865711212, |
|
"logits/rejected": -0.21002641320228577, |
|
"logps/chosen": -12.739409446716309, |
|
"logps/rejected": -32.949214935302734, |
|
"loss": 0.3588, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 1.0807316303253174, |
|
"rewards/margins": 1.8172613382339478, |
|
"rewards/rejected": -0.7365297675132751, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 6.948775055679287, |
|
"grad_norm": 12.438933372497559, |
|
"learning_rate": 1.2770874972267777e-06, |
|
"logits/chosen": -0.10974359512329102, |
|
"logits/rejected": -0.1568942815065384, |
|
"logps/chosen": -12.335737228393555, |
|
"logps/rejected": -33.184993743896484, |
|
"loss": 0.3357, |
|
"rewards/accuracies": 0.8687500357627869, |
|
"rewards/chosen": 1.1363105773925781, |
|
"rewards/margins": 1.9653358459472656, |
|
"rewards/rejected": -0.8290252685546875, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 7.12694877505568, |
|
"grad_norm": 2.4859490394592285, |
|
"learning_rate": 1.1436343403356019e-06, |
|
"logits/chosen": -0.11985152959823608, |
|
"logits/rejected": -0.18024906516075134, |
|
"logps/chosen": -13.101457595825195, |
|
"logps/rejected": -32.767181396484375, |
|
"loss": 0.3662, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 0.9988664984703064, |
|
"rewards/margins": 1.8207927942276, |
|
"rewards/rejected": -0.8219264149665833, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 7.12694877505568, |
|
"eval_logits/chosen": -0.099003367125988, |
|
"eval_logits/rejected": -0.16790008544921875, |
|
"eval_logps/chosen": -13.986225128173828, |
|
"eval_logps/rejected": -31.5858154296875, |
|
"eval_loss": 0.4867512881755829, |
|
"eval_rewards/accuracies": 0.7699999809265137, |
|
"eval_rewards/chosen": 0.9165167212486267, |
|
"eval_rewards/margins": 1.5521563291549683, |
|
"eval_rewards/rejected": -0.6356395483016968, |
|
"eval_runtime": 9.7105, |
|
"eval_samples_per_second": 10.298, |
|
"eval_steps_per_second": 5.149, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 7.305122494432071, |
|
"grad_norm": 4.4709153175354, |
|
"learning_rate": 1.0154495360662464e-06, |
|
"logits/chosen": -0.12129449844360352, |
|
"logits/rejected": -0.18251024186611176, |
|
"logps/chosen": -13.18463134765625, |
|
"logps/rejected": -32.92716598510742, |
|
"loss": 0.366, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 1.057719349861145, |
|
"rewards/margins": 1.885604739189148, |
|
"rewards/rejected": -0.8278852701187134, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 7.4832962138084635, |
|
"grad_norm": 3.132544994354248, |
|
"learning_rate": 8.930309757836517e-07, |
|
"logits/chosen": -0.10683136433362961, |
|
"logits/rejected": -0.1750011295080185, |
|
"logps/chosen": -11.23306941986084, |
|
"logps/rejected": -32.81039047241211, |
|
"loss": 0.3044, |
|
"rewards/accuracies": 0.8812500238418579, |
|
"rewards/chosen": 1.1720637083053589, |
|
"rewards/margins": 2.0555508136749268, |
|
"rewards/rejected": -0.8834872245788574, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 7.661469933184855, |
|
"grad_norm": 6.80822229385376, |
|
"learning_rate": 7.768541537901325e-07, |
|
"logits/chosen": -0.11390836536884308, |
|
"logits/rejected": -0.2062007039785385, |
|
"logps/chosen": -11.768353462219238, |
|
"logps/rejected": -33.399192810058594, |
|
"loss": 0.3337, |
|
"rewards/accuracies": 0.8687500357627869, |
|
"rewards/chosen": 1.1444917917251587, |
|
"rewards/margins": 1.9670883417129517, |
|
"rewards/rejected": -0.8225963711738586, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 7.839643652561247, |
|
"grad_norm": 4.160062789916992, |
|
"learning_rate": 6.673703204254348e-07, |
|
"logits/chosen": -0.1045166477560997, |
|
"logits/rejected": -0.18522079288959503, |
|
"logps/chosen": -10.534876823425293, |
|
"logps/rejected": -34.59806442260742, |
|
"loss": 0.2644, |
|
"rewards/accuracies": 0.893750011920929, |
|
"rewards/chosen": 1.2115627527236938, |
|
"rewards/margins": 2.1745359897613525, |
|
"rewards/rejected": -0.9629732370376587, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 8.017817371937639, |
|
"grad_norm": 2.6154518127441406, |
|
"learning_rate": 5.650047293344316e-07, |
|
"logits/chosen": -0.07358330488204956, |
|
"logits/rejected": -0.14352913200855255, |
|
"logps/chosen": -10.867822647094727, |
|
"logps/rejected": -33.30778884887695, |
|
"loss": 0.2822, |
|
"rewards/accuracies": 0.8687500357627869, |
|
"rewards/chosen": 1.2662678956985474, |
|
"rewards/margins": 2.097227096557617, |
|
"rewards/rejected": -0.830959141254425, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 8.017817371937639, |
|
"eval_logits/chosen": -0.09359554946422577, |
|
"eval_logits/rejected": -0.16219539940357208, |
|
"eval_logps/chosen": -14.051919937133789, |
|
"eval_logps/rejected": -31.741615295410156, |
|
"eval_loss": 0.49268820881843567, |
|
"eval_rewards/accuracies": 0.7599999904632568, |
|
"eval_rewards/chosen": 0.9099473357200623, |
|
"eval_rewards/margins": 1.5611672401428223, |
|
"eval_rewards/rejected": -0.6512197852134705, |
|
"eval_runtime": 9.7252, |
|
"eval_samples_per_second": 10.283, |
|
"eval_steps_per_second": 5.141, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 8.195991091314031, |
|
"grad_norm": 3.175278425216675, |
|
"learning_rate": 4.7015498571035877e-07, |
|
"logits/chosen": -0.08536185324192047, |
|
"logits/rejected": -0.16373853385448456, |
|
"logps/chosen": -10.688807487487793, |
|
"logps/rejected": -35.358089447021484, |
|
"loss": 0.2417, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 1.2969163656234741, |
|
"rewards/margins": 2.3196523189544678, |
|
"rewards/rejected": -1.022735834121704, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 8.374164810690424, |
|
"grad_norm": 10.432084083557129, |
|
"learning_rate": 3.831895019292897e-07, |
|
"logits/chosen": -0.11144526302814484, |
|
"logits/rejected": -0.18342572450637817, |
|
"logps/chosen": -12.896112442016602, |
|
"logps/rejected": -34.08356857299805, |
|
"loss": 0.3277, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.0476747751235962, |
|
"rewards/margins": 1.9816349744796753, |
|
"rewards/rejected": -0.9339599609375, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 8.552338530066816, |
|
"grad_norm": 4.943639278411865, |
|
"learning_rate": 3.044460665744284e-07, |
|
"logits/chosen": -0.06191817671060562, |
|
"logits/rejected": -0.13474522531032562, |
|
"logps/chosen": -11.490986824035645, |
|
"logps/rejected": -33.3411750793457, |
|
"loss": 0.3482, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 1.0883612632751465, |
|
"rewards/margins": 1.9929107427597046, |
|
"rewards/rejected": -0.9045494198799133, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 8.730512249443207, |
|
"grad_norm": 4.994484901428223, |
|
"learning_rate": 2.3423053240837518e-07, |
|
"logits/chosen": -0.11047738045454025, |
|
"logits/rejected": -0.1758051961660385, |
|
"logps/chosen": -12.3035306930542, |
|
"logps/rejected": -33.04331588745117, |
|
"loss": 0.3433, |
|
"rewards/accuracies": 0.831250011920929, |
|
"rewards/chosen": 1.051175594329834, |
|
"rewards/margins": 1.9459151029586792, |
|
"rewards/rejected": -0.8947394490242004, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 8.908685968819599, |
|
"grad_norm": 2.0751497745513916, |
|
"learning_rate": 1.7281562838948968e-07, |
|
"logits/chosen": -0.12217041105031967, |
|
"logits/rejected": -0.19585369527339935, |
|
"logps/chosen": -10.848115921020508, |
|
"logps/rejected": -34.68525314331055, |
|
"loss": 0.2416, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 1.2858046293258667, |
|
"rewards/margins": 2.253690481185913, |
|
"rewards/rejected": -0.9678859114646912, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 8.908685968819599, |
|
"eval_logits/chosen": -0.08983445167541504, |
|
"eval_logits/rejected": -0.15845781564712524, |
|
"eval_logps/chosen": -14.23983097076416, |
|
"eval_logps/rejected": -32.18784713745117, |
|
"eval_loss": 0.49790748953819275, |
|
"eval_rewards/accuracies": 0.7599999904632568, |
|
"eval_rewards/chosen": 0.8911561369895935, |
|
"eval_rewards/margins": 1.586998701095581, |
|
"eval_rewards/rejected": -0.6958425641059875, |
|
"eval_runtime": 9.7042, |
|
"eval_samples_per_second": 10.305, |
|
"eval_steps_per_second": 5.152, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 9.086859688195991, |
|
"grad_norm": 4.091846466064453, |
|
"learning_rate": 1.2043990034669413e-07, |
|
"logits/chosen": -0.10412635654211044, |
|
"logits/rejected": -0.20009151101112366, |
|
"logps/chosen": -11.962130546569824, |
|
"logps/rejected": -34.19890594482422, |
|
"loss": 0.3145, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 1.1489673852920532, |
|
"rewards/margins": 2.0444374084472656, |
|
"rewards/rejected": -0.8954699635505676, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 9.265033407572384, |
|
"grad_norm": 2.8685364723205566, |
|
"learning_rate": 7.730678442730539e-08, |
|
"logits/chosen": -0.13043461740016937, |
|
"logits/rejected": -0.1936294585466385, |
|
"logps/chosen": -12.278414726257324, |
|
"logps/rejected": -35.482120513916016, |
|
"loss": 0.2503, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.113724946975708, |
|
"rewards/margins": 2.1855549812316895, |
|
"rewards/rejected": -1.0718300342559814, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 9.443207126948774, |
|
"grad_norm": 8.583150863647461, |
|
"learning_rate": 4.358381691677932e-08, |
|
"logits/chosen": -0.1350499838590622, |
|
"logits/rejected": -0.1929396241903305, |
|
"logps/chosen": -12.875258445739746, |
|
"logps/rejected": -33.197200775146484, |
|
"loss": 0.3269, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 1.0666710138320923, |
|
"rewards/margins": 1.9351733922958374, |
|
"rewards/rejected": -0.8685024380683899, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 9.621380846325167, |
|
"grad_norm": 11.428977012634277, |
|
"learning_rate": 1.9401983499569843e-08, |
|
"logits/chosen": -0.1044853925704956, |
|
"logits/rejected": -0.17102347314357758, |
|
"logps/chosen": -11.800514221191406, |
|
"logps/rejected": -33.656124114990234, |
|
"loss": 0.3398, |
|
"rewards/accuracies": 0.856249988079071, |
|
"rewards/chosen": 1.1207963228225708, |
|
"rewards/margins": 1.9993568658828735, |
|
"rewards/rejected": -0.878560483455658, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 9.799554565701559, |
|
"grad_norm": 2.2846410274505615, |
|
"learning_rate": 4.855210488670381e-09, |
|
"logits/chosen": -0.08743849396705627, |
|
"logits/rejected": -0.16115520894527435, |
|
"logps/chosen": -11.04604434967041, |
|
"logps/rejected": -34.041011810302734, |
|
"loss": 0.3096, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 1.1628315448760986, |
|
"rewards/margins": 2.1141316890716553, |
|
"rewards/rejected": -0.9513001441955566, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 9.799554565701559, |
|
"eval_logits/chosen": -0.08727238327264786, |
|
"eval_logits/rejected": -0.15482930839061737, |
|
"eval_logps/chosen": -14.208100318908691, |
|
"eval_logps/rejected": -32.24625015258789, |
|
"eval_loss": 0.4933530390262604, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": 0.8943293690681458, |
|
"eval_rewards/margins": 1.5960127115249634, |
|
"eval_rewards/rejected": -0.7016833424568176, |
|
"eval_runtime": 9.7018, |
|
"eval_samples_per_second": 10.307, |
|
"eval_steps_per_second": 5.154, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 9.977728285077951, |
|
"grad_norm": 2.3474748134613037, |
|
"learning_rate": 0.0, |
|
"logits/chosen": -0.030874544754624367, |
|
"logits/rejected": -0.118435338139534, |
|
"logps/chosen": -10.919882774353027, |
|
"logps/rejected": -32.95122146606445, |
|
"loss": 0.3123, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 1.201145052909851, |
|
"rewards/margins": 2.0579233169555664, |
|
"rewards/rejected": -0.8567783236503601, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 9.977728285077951, |
|
"step": 560, |
|
"total_flos": 5.23282185018409e+16, |
|
"train_loss": 0.4115582968507494, |
|
"train_runtime": 2017.6604, |
|
"train_samples_per_second": 4.451, |
|
"train_steps_per_second": 0.278 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 560, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.23282185018409e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|