lesso08's picture
Training in progress, step 75, checkpoint
9e19805 verified
raw
history blame
15.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.36496350364963503,
"eval_steps": 9,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004866180048661801,
"grad_norm": 1.2298153638839722,
"learning_rate": 1e-05,
"loss": 1.6578,
"step": 1
},
{
"epoch": 0.004866180048661801,
"eval_loss": 1.3629190921783447,
"eval_runtime": 19.7574,
"eval_samples_per_second": 8.756,
"eval_steps_per_second": 1.114,
"step": 1
},
{
"epoch": 0.009732360097323601,
"grad_norm": 0.9850460886955261,
"learning_rate": 2e-05,
"loss": 1.32,
"step": 2
},
{
"epoch": 0.014598540145985401,
"grad_norm": 1.2559576034545898,
"learning_rate": 3e-05,
"loss": 1.7337,
"step": 3
},
{
"epoch": 0.019464720194647202,
"grad_norm": 1.5054972171783447,
"learning_rate": 4e-05,
"loss": 1.5134,
"step": 4
},
{
"epoch": 0.024330900243309004,
"grad_norm": 1.3522080183029175,
"learning_rate": 5e-05,
"loss": 2.0051,
"step": 5
},
{
"epoch": 0.029197080291970802,
"grad_norm": 1.0276397466659546,
"learning_rate": 6e-05,
"loss": 1.1049,
"step": 6
},
{
"epoch": 0.0340632603406326,
"grad_norm": 0.992424488067627,
"learning_rate": 7e-05,
"loss": 1.2848,
"step": 7
},
{
"epoch": 0.038929440389294405,
"grad_norm": 1.1700958013534546,
"learning_rate": 8e-05,
"loss": 1.2834,
"step": 8
},
{
"epoch": 0.043795620437956206,
"grad_norm": 1.5791586637496948,
"learning_rate": 9e-05,
"loss": 1.2998,
"step": 9
},
{
"epoch": 0.043795620437956206,
"eval_loss": 1.24652099609375,
"eval_runtime": 19.0477,
"eval_samples_per_second": 9.082,
"eval_steps_per_second": 1.155,
"step": 9
},
{
"epoch": 0.04866180048661801,
"grad_norm": 1.362705111503601,
"learning_rate": 0.0001,
"loss": 1.4453,
"step": 10
},
{
"epoch": 0.0535279805352798,
"grad_norm": 1.109766960144043,
"learning_rate": 9.99695413509548e-05,
"loss": 1.083,
"step": 11
},
{
"epoch": 0.058394160583941604,
"grad_norm": 1.4562040567398071,
"learning_rate": 9.987820251299122e-05,
"loss": 1.232,
"step": 12
},
{
"epoch": 0.06326034063260341,
"grad_norm": 1.8894864320755005,
"learning_rate": 9.972609476841367e-05,
"loss": 1.3801,
"step": 13
},
{
"epoch": 0.0681265206812652,
"grad_norm": 1.7615281343460083,
"learning_rate": 9.951340343707852e-05,
"loss": 1.4841,
"step": 14
},
{
"epoch": 0.072992700729927,
"grad_norm": 2.0172765254974365,
"learning_rate": 9.924038765061042e-05,
"loss": 1.7566,
"step": 15
},
{
"epoch": 0.07785888077858881,
"grad_norm": 1.9831361770629883,
"learning_rate": 9.890738003669029e-05,
"loss": 1.3183,
"step": 16
},
{
"epoch": 0.0827250608272506,
"grad_norm": 1.6174662113189697,
"learning_rate": 9.851478631379982e-05,
"loss": 1.1416,
"step": 17
},
{
"epoch": 0.08759124087591241,
"grad_norm": 1.2913496494293213,
"learning_rate": 9.806308479691595e-05,
"loss": 1.0978,
"step": 18
},
{
"epoch": 0.08759124087591241,
"eval_loss": 1.1273767948150635,
"eval_runtime": 19.0434,
"eval_samples_per_second": 9.085,
"eval_steps_per_second": 1.155,
"step": 18
},
{
"epoch": 0.09245742092457421,
"grad_norm": 1.3372280597686768,
"learning_rate": 9.755282581475769e-05,
"loss": 1.3552,
"step": 19
},
{
"epoch": 0.09732360097323602,
"grad_norm": 1.3020259141921997,
"learning_rate": 9.698463103929542e-05,
"loss": 1.1153,
"step": 20
},
{
"epoch": 0.10218978102189781,
"grad_norm": 1.0988091230392456,
"learning_rate": 9.635919272833938e-05,
"loss": 1.1972,
"step": 21
},
{
"epoch": 0.1070559610705596,
"grad_norm": 1.1756395101547241,
"learning_rate": 9.567727288213005e-05,
"loss": 0.8024,
"step": 22
},
{
"epoch": 0.11192214111922141,
"grad_norm": 1.6778271198272705,
"learning_rate": 9.493970231495835e-05,
"loss": 1.1661,
"step": 23
},
{
"epoch": 0.11678832116788321,
"grad_norm": 1.229446530342102,
"learning_rate": 9.414737964294636e-05,
"loss": 1.1406,
"step": 24
},
{
"epoch": 0.12165450121654502,
"grad_norm": 1.4911742210388184,
"learning_rate": 9.330127018922194e-05,
"loss": 1.2144,
"step": 25
},
{
"epoch": 0.12652068126520682,
"grad_norm": 1.994720697402954,
"learning_rate": 9.24024048078213e-05,
"loss": 1.5322,
"step": 26
},
{
"epoch": 0.13138686131386862,
"grad_norm": 1.6444065570831299,
"learning_rate": 9.145187862775209e-05,
"loss": 1.2071,
"step": 27
},
{
"epoch": 0.13138686131386862,
"eval_loss": 1.0755081176757812,
"eval_runtime": 19.0541,
"eval_samples_per_second": 9.079,
"eval_steps_per_second": 1.155,
"step": 27
},
{
"epoch": 0.1362530413625304,
"grad_norm": 1.2277100086212158,
"learning_rate": 9.045084971874738e-05,
"loss": 1.2957,
"step": 28
},
{
"epoch": 0.1411192214111922,
"grad_norm": 1.3212194442749023,
"learning_rate": 8.940053768033609e-05,
"loss": 1.1548,
"step": 29
},
{
"epoch": 0.145985401459854,
"grad_norm": 1.3625956773757935,
"learning_rate": 8.83022221559489e-05,
"loss": 1.3376,
"step": 30
},
{
"epoch": 0.15085158150851583,
"grad_norm": 1.485705852508545,
"learning_rate": 8.715724127386972e-05,
"loss": 1.1851,
"step": 31
},
{
"epoch": 0.15571776155717762,
"grad_norm": 1.6349189281463623,
"learning_rate": 8.596699001693255e-05,
"loss": 1.3334,
"step": 32
},
{
"epoch": 0.16058394160583941,
"grad_norm": 1.17620849609375,
"learning_rate": 8.473291852294987e-05,
"loss": 0.9048,
"step": 33
},
{
"epoch": 0.1654501216545012,
"grad_norm": 1.4480947256088257,
"learning_rate": 8.345653031794292e-05,
"loss": 1.2862,
"step": 34
},
{
"epoch": 0.170316301703163,
"grad_norm": 1.0143804550170898,
"learning_rate": 8.213938048432697e-05,
"loss": 0.8808,
"step": 35
},
{
"epoch": 0.17518248175182483,
"grad_norm": 1.331067681312561,
"learning_rate": 8.07830737662829e-05,
"loss": 0.7566,
"step": 36
},
{
"epoch": 0.17518248175182483,
"eval_loss": 1.0483735799789429,
"eval_runtime": 19.1079,
"eval_samples_per_second": 9.054,
"eval_steps_per_second": 1.151,
"step": 36
},
{
"epoch": 0.18004866180048662,
"grad_norm": 1.112722396850586,
"learning_rate": 7.938926261462366e-05,
"loss": 0.8664,
"step": 37
},
{
"epoch": 0.18491484184914841,
"grad_norm": 1.5237404108047485,
"learning_rate": 7.795964517353735e-05,
"loss": 1.7592,
"step": 38
},
{
"epoch": 0.1897810218978102,
"grad_norm": 1.3811498880386353,
"learning_rate": 7.649596321166024e-05,
"loss": 1.114,
"step": 39
},
{
"epoch": 0.19464720194647203,
"grad_norm": 1.8014518022537231,
"learning_rate": 7.500000000000001e-05,
"loss": 1.0599,
"step": 40
},
{
"epoch": 0.19951338199513383,
"grad_norm": 1.097190022468567,
"learning_rate": 7.347357813929454e-05,
"loss": 1.014,
"step": 41
},
{
"epoch": 0.20437956204379562,
"grad_norm": 1.0327268838882446,
"learning_rate": 7.191855733945387e-05,
"loss": 0.9165,
"step": 42
},
{
"epoch": 0.20924574209245742,
"grad_norm": 1.215659260749817,
"learning_rate": 7.033683215379002e-05,
"loss": 0.9118,
"step": 43
},
{
"epoch": 0.2141119221411192,
"grad_norm": 1.2937884330749512,
"learning_rate": 6.873032967079561e-05,
"loss": 1.1294,
"step": 44
},
{
"epoch": 0.21897810218978103,
"grad_norm": 1.3080188035964966,
"learning_rate": 6.710100716628344e-05,
"loss": 1.472,
"step": 45
},
{
"epoch": 0.21897810218978103,
"eval_loss": 1.0341076850891113,
"eval_runtime": 19.0928,
"eval_samples_per_second": 9.061,
"eval_steps_per_second": 1.152,
"step": 45
},
{
"epoch": 0.22384428223844283,
"grad_norm": 1.4121612310409546,
"learning_rate": 6.545084971874738e-05,
"loss": 1.3627,
"step": 46
},
{
"epoch": 0.22871046228710462,
"grad_norm": 1.0651960372924805,
"learning_rate": 6.378186779084995e-05,
"loss": 1.377,
"step": 47
},
{
"epoch": 0.23357664233576642,
"grad_norm": 1.1841092109680176,
"learning_rate": 6.209609477998338e-05,
"loss": 1.4281,
"step": 48
},
{
"epoch": 0.2384428223844282,
"grad_norm": 1.1239032745361328,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.0758,
"step": 49
},
{
"epoch": 0.24330900243309003,
"grad_norm": 1.1765258312225342,
"learning_rate": 5.868240888334653e-05,
"loss": 1.1838,
"step": 50
},
{
"epoch": 0.24817518248175183,
"grad_norm": 1.2569478750228882,
"learning_rate": 5.695865504800327e-05,
"loss": 1.3188,
"step": 51
},
{
"epoch": 0.25304136253041365,
"grad_norm": 0.9744678735733032,
"learning_rate": 5.522642316338268e-05,
"loss": 1.0088,
"step": 52
},
{
"epoch": 0.25790754257907544,
"grad_norm": 1.0326542854309082,
"learning_rate": 5.348782368720626e-05,
"loss": 1.3964,
"step": 53
},
{
"epoch": 0.26277372262773724,
"grad_norm": 1.4304847717285156,
"learning_rate": 5.174497483512506e-05,
"loss": 1.2375,
"step": 54
},
{
"epoch": 0.26277372262773724,
"eval_loss": 1.0286833047866821,
"eval_runtime": 19.0435,
"eval_samples_per_second": 9.084,
"eval_steps_per_second": 1.155,
"step": 54
},
{
"epoch": 0.26763990267639903,
"grad_norm": 1.2375237941741943,
"learning_rate": 5e-05,
"loss": 1.2123,
"step": 55
},
{
"epoch": 0.2725060827250608,
"grad_norm": 1.014127254486084,
"learning_rate": 4.825502516487497e-05,
"loss": 1.3642,
"step": 56
},
{
"epoch": 0.2773722627737226,
"grad_norm": 1.0220732688903809,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.8832,
"step": 57
},
{
"epoch": 0.2822384428223844,
"grad_norm": 1.0092318058013916,
"learning_rate": 4.477357683661734e-05,
"loss": 0.7852,
"step": 58
},
{
"epoch": 0.2871046228710462,
"grad_norm": 0.9593155980110168,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.9424,
"step": 59
},
{
"epoch": 0.291970802919708,
"grad_norm": 1.1366311311721802,
"learning_rate": 4.131759111665349e-05,
"loss": 1.3309,
"step": 60
},
{
"epoch": 0.29683698296836986,
"grad_norm": 1.061318039894104,
"learning_rate": 3.960441545911204e-05,
"loss": 0.8699,
"step": 61
},
{
"epoch": 0.30170316301703165,
"grad_norm": 1.5996885299682617,
"learning_rate": 3.790390522001662e-05,
"loss": 1.4451,
"step": 62
},
{
"epoch": 0.30656934306569344,
"grad_norm": 1.3761976957321167,
"learning_rate": 3.6218132209150045e-05,
"loss": 1.2438,
"step": 63
},
{
"epoch": 0.30656934306569344,
"eval_loss": 1.0191401243209839,
"eval_runtime": 19.0485,
"eval_samples_per_second": 9.082,
"eval_steps_per_second": 1.155,
"step": 63
},
{
"epoch": 0.31143552311435524,
"grad_norm": 1.4792134761810303,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.4994,
"step": 64
},
{
"epoch": 0.31630170316301703,
"grad_norm": 1.02501380443573,
"learning_rate": 3.289899283371657e-05,
"loss": 0.8248,
"step": 65
},
{
"epoch": 0.32116788321167883,
"grad_norm": 1.9706014394760132,
"learning_rate": 3.12696703292044e-05,
"loss": 1.2776,
"step": 66
},
{
"epoch": 0.3260340632603406,
"grad_norm": 1.3999546766281128,
"learning_rate": 2.9663167846209998e-05,
"loss": 1.1599,
"step": 67
},
{
"epoch": 0.3309002433090024,
"grad_norm": 1.561223030090332,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.8594,
"step": 68
},
{
"epoch": 0.3357664233576642,
"grad_norm": 1.1113779544830322,
"learning_rate": 2.6526421860705473e-05,
"loss": 1.0403,
"step": 69
},
{
"epoch": 0.340632603406326,
"grad_norm": 1.1394065618515015,
"learning_rate": 2.500000000000001e-05,
"loss": 1.4539,
"step": 70
},
{
"epoch": 0.34549878345498786,
"grad_norm": 1.26051926612854,
"learning_rate": 2.350403678833976e-05,
"loss": 1.5036,
"step": 71
},
{
"epoch": 0.35036496350364965,
"grad_norm": 0.9440858960151672,
"learning_rate": 2.2040354826462668e-05,
"loss": 1.1182,
"step": 72
},
{
"epoch": 0.35036496350364965,
"eval_loss": 1.0112653970718384,
"eval_runtime": 19.0703,
"eval_samples_per_second": 9.072,
"eval_steps_per_second": 1.154,
"step": 72
},
{
"epoch": 0.35523114355231145,
"grad_norm": 1.110795021057129,
"learning_rate": 2.061073738537635e-05,
"loss": 0.8803,
"step": 73
},
{
"epoch": 0.36009732360097324,
"grad_norm": 1.3027766942977905,
"learning_rate": 1.9216926233717085e-05,
"loss": 1.238,
"step": 74
},
{
"epoch": 0.36496350364963503,
"grad_norm": 1.2464637756347656,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.8754,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.675437719237427e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}