Muqeeth's picture
Training in progress, step 365, checkpoint
0ff65bd verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.20012336371736,
"eval_steps": 365,
"global_step": 365,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005482831882667398,
"grad_norm": 1.8271640439809084e-13,
"learning_rate": 9.999257569732963e-06,
"loss": 0.0547,
"step": 10
},
{
"epoch": 0.010965663765334796,
"grad_norm": 1.8681511551469146e-14,
"learning_rate": 9.997030499412933e-06,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.016448495648002194,
"grad_norm": 1.2511433729323063e-11,
"learning_rate": 9.993319450417669e-06,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.021931327530669592,
"grad_norm": 3.090452546650191e-11,
"learning_rate": 9.988125524825216e-06,
"loss": 0.0,
"step": 40
},
{
"epoch": 0.02741415941333699,
"grad_norm": 1.458404726736688e-14,
"learning_rate": 9.981450265086598e-06,
"loss": 0.0,
"step": 50
},
{
"epoch": 0.03289699129600439,
"grad_norm": 3.242672515051115e-14,
"learning_rate": 9.973295653567764e-06,
"loss": 0.0,
"step": 60
},
{
"epoch": 0.038379823178671786,
"grad_norm": 2.718872761479918e-14,
"learning_rate": 9.963664111960875e-06,
"loss": 0.0,
"step": 70
},
{
"epoch": 0.043862655061339184,
"grad_norm": 9.169036085943105e-10,
"learning_rate": 9.952558500565135e-06,
"loss": 0.0,
"step": 80
},
{
"epoch": 0.04934548694400658,
"grad_norm": 6.057082835297933e-09,
"learning_rate": 9.939982117437356e-06,
"loss": 0.0,
"step": 90
},
{
"epoch": 0.05482831882667398,
"grad_norm": 2.746146947174566e-06,
"learning_rate": 9.925938697412536e-06,
"loss": 0.0,
"step": 100
},
{
"epoch": 0.06031115070934138,
"grad_norm": 2.1543860384554137e-07,
"learning_rate": 9.910432410994701e-06,
"loss": 0.0,
"step": 110
},
{
"epoch": 0.06579398259200878,
"grad_norm": 1.2734583378914976e-06,
"learning_rate": 9.893467863118402e-06,
"loss": 0.0,
"step": 120
},
{
"epoch": 0.07127681447467617,
"grad_norm": 3.238647128256389e-09,
"learning_rate": 9.87505009178116e-06,
"loss": 0.0,
"step": 130
},
{
"epoch": 0.07675964635734357,
"grad_norm": 1.8630474585279444e-07,
"learning_rate": 9.855184566547333e-06,
"loss": 0.0,
"step": 140
},
{
"epoch": 0.08224247824001096,
"grad_norm": 1.8918351329944016e-09,
"learning_rate": 9.833877186923803e-06,
"loss": 0.0,
"step": 150
},
{
"epoch": 0.08772531012267837,
"grad_norm": 1.6541060188224321e-10,
"learning_rate": 9.811134280607988e-06,
"loss": 0.0,
"step": 160
},
{
"epoch": 0.09320814200534576,
"grad_norm": 0.0067967576906085014,
"learning_rate": 9.786962601608689e-06,
"loss": 0.0015,
"step": 170
},
{
"epoch": 0.09869097388801316,
"grad_norm": 7.394525164272636e-05,
"learning_rate": 9.761369328240347e-06,
"loss": 0.0,
"step": 180
},
{
"epoch": 0.10417380577068056,
"grad_norm": 2.62540652329335e-07,
"learning_rate": 9.734362060991274e-06,
"loss": 0.0,
"step": 190
},
{
"epoch": 0.10965663765334796,
"grad_norm": 4.609726822479843e-09,
"learning_rate": 9.70594882026652e-06,
"loss": 0.0,
"step": 200
},
{
"epoch": 0.11513946953601535,
"grad_norm": 8.970529319363152e-10,
"learning_rate": 9.676138044006051e-06,
"loss": 0.0,
"step": 210
},
{
"epoch": 0.12062230141868276,
"grad_norm": 7.758627629250725e-10,
"learning_rate": 9.644938585178894e-06,
"loss": 0.0,
"step": 220
},
{
"epoch": 0.12610513330135015,
"grad_norm": 3.484149357912969e-10,
"learning_rate": 9.61235970915407e-06,
"loss": 0.0,
"step": 230
},
{
"epoch": 0.13158796518401755,
"grad_norm": 3.5987784974267356e-10,
"learning_rate": 9.57841109094903e-06,
"loss": 0.0,
"step": 240
},
{
"epoch": 0.13707079706668493,
"grad_norm": 2.9926486289078014e-10,
"learning_rate": 9.543102812356446e-06,
"loss": 0.0,
"step": 250
},
{
"epoch": 0.14255362894935233,
"grad_norm": 2.17573001282112e-10,
"learning_rate": 9.506445358950199e-06,
"loss": 0.0,
"step": 260
},
{
"epoch": 0.14803646083201974,
"grad_norm": 1.5743054082584251e-10,
"learning_rate": 9.468449616971458e-06,
"loss": 0.0,
"step": 270
},
{
"epoch": 0.15351929271468714,
"grad_norm": 3.0337881185005244e-07,
"learning_rate": 9.429126870095766e-06,
"loss": 0.0,
"step": 280
},
{
"epoch": 0.15900212459735452,
"grad_norm": 2.075781768695606e-10,
"learning_rate": 9.388488796082113e-06,
"loss": 0.0,
"step": 290
},
{
"epoch": 0.16448495648002193,
"grad_norm": 2.6534453922977264e-07,
"learning_rate": 9.346547463304953e-06,
"loss": 0.0,
"step": 300
},
{
"epoch": 0.16996778836268933,
"grad_norm": 2.213984316767892e-06,
"learning_rate": 9.303315327170244e-06,
"loss": 0.0,
"step": 310
},
{
"epoch": 0.17545062024535674,
"grad_norm": 1.0965933022744068e-10,
"learning_rate": 9.258805226416536e-06,
"loss": 0.0,
"step": 320
},
{
"epoch": 0.1809334521280241,
"grad_norm": 9.682482593476038e-11,
"learning_rate": 9.213030379302223e-06,
"loss": 0.0,
"step": 330
},
{
"epoch": 0.18641628401069152,
"grad_norm": 1.8911376631347565e-10,
"learning_rate": 9.166004379680094e-06,
"loss": 0.0,
"step": 340
},
{
"epoch": 0.19189911589335892,
"grad_norm": 2.287967731939844e-10,
"learning_rate": 9.11774119296033e-06,
"loss": 0.0,
"step": 350
},
{
"epoch": 0.19738194777602633,
"grad_norm": 1.6538861946635564e-10,
"learning_rate": 9.068255151963174e-06,
"loss": 0.0,
"step": 360
},
{
"epoch": 0.20012336371736,
"eval_accuracy": 1.0,
"eval_loss": 8.152250430271124e-09,
"eval_runtime": 172.1258,
"eval_samples_per_second": 37.455,
"eval_steps_per_second": 4.683,
"step": 365
}
],
"logging_steps": 10,
"max_steps": 1823,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 365,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}