diff --git "a/trainer_state.json" "b/trainer_state.json" new file mode 100644--- /dev/null +++ "b/trainer_state.json" @@ -0,0 +1,93784 @@ +{ + "best_global_step": null, + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 3.0, + "eval_steps": 500, + "global_step": 9375, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.00032, + "grad_norm": 12.630151409918152, + "learning_rate": 1.0660980810234543e-08, + "loss": 1.0067662000656128, + "memory(GiB)": 24.01, + "step": 1, + "token_acc": 0.7926304464766003, + "train_speed(iter/s)": 0.049663 + }, + { + "epoch": 0.00064, + "grad_norm": 13.38800108883809, + "learning_rate": 2.1321961620469085e-08, + "loss": 0.9624192714691162, + "memory(GiB)": 24.01, + "step": 2, + "token_acc": 0.8481939883809043, + "train_speed(iter/s)": 0.082942 + }, + { + "epoch": 0.00096, + "grad_norm": 11.704569793690775, + "learning_rate": 3.1982942430703625e-08, + "loss": 0.9444349408149719, + "memory(GiB)": 24.01, + "step": 3, + "token_acc": 0.8333333333333334, + "train_speed(iter/s)": 0.105464 + }, + { + "epoch": 0.00128, + "grad_norm": 13.737566703023898, + "learning_rate": 4.264392324093817e-08, + "loss": 1.1268188953399658, + "memory(GiB)": 24.01, + "step": 4, + "token_acc": 0.808525754884547, + "train_speed(iter/s)": 0.122973 + }, + { + "epoch": 0.0016, + "grad_norm": 11.472179637210196, + "learning_rate": 5.330490405117271e-08, + "loss": 0.8305326700210571, + "memory(GiB)": 24.01, + "step": 5, + "token_acc": 0.8618470855412567, + "train_speed(iter/s)": 0.136709 + }, + { + "epoch": 0.00192, + "grad_norm": 10.90050048784965, + "learning_rate": 6.396588486140725e-08, + "loss": 0.9607402086257935, + "memory(GiB)": 25.75, + "step": 6, + "token_acc": 0.7736532367587143, + "train_speed(iter/s)": 0.147431 + }, + { + "epoch": 0.00224, + "grad_norm": 12.615946907941442, + "learning_rate": 7.462686567164179e-08, + "loss": 0.9604150652885437, + "memory(GiB)": 25.75, + "step": 7, + "token_acc": 0.7777418311226141, + "train_speed(iter/s)": 0.156666 + }, + { + "epoch": 0.00256, + "grad_norm": 12.302489993448557, + "learning_rate": 8.528784648187634e-08, + "loss": 0.8963150978088379, + "memory(GiB)": 25.76, + "step": 8, + "token_acc": 0.8479709267110842, + "train_speed(iter/s)": 0.164324 + }, + { + "epoch": 0.00288, + "grad_norm": 10.511729982659768, + "learning_rate": 9.59488272921109e-08, + "loss": 0.8355842232704163, + "memory(GiB)": 25.76, + "step": 9, + "token_acc": 0.8565285379202502, + "train_speed(iter/s)": 0.170356 + }, + { + "epoch": 0.0032, + "grad_norm": 12.978546145493135, + "learning_rate": 1.0660980810234542e-07, + "loss": 0.9579042196273804, + "memory(GiB)": 25.76, + "step": 10, + "token_acc": 0.8502427804753386, + "train_speed(iter/s)": 0.173216 + }, + { + "epoch": 0.00352, + "grad_norm": 13.314079488016148, + "learning_rate": 1.1727078891257997e-07, + "loss": 1.0107107162475586, + "memory(GiB)": 31.64, + "step": 11, + "token_acc": 0.7640667442762903, + "train_speed(iter/s)": 0.177833 + }, + { + "epoch": 0.00384, + "grad_norm": 11.746933642597515, + "learning_rate": 1.279317697228145e-07, + "loss": 0.8989206552505493, + "memory(GiB)": 31.64, + "step": 12, + "token_acc": 0.817986577181208, + "train_speed(iter/s)": 0.18219 + }, + { + "epoch": 0.00416, + "grad_norm": 13.309772265369517, + "learning_rate": 1.3859275053304905e-07, + "loss": 1.0773546695709229, + "memory(GiB)": 31.64, + "step": 13, + "token_acc": 0.8230370665603827, + "train_speed(iter/s)": 0.185803 + }, + { + "epoch": 0.00448, + "grad_norm": 12.76982067412176, + "learning_rate": 1.4925373134328358e-07, + "loss": 0.9630119800567627, + "memory(GiB)": 31.64, + "step": 14, + "token_acc": 0.818010372465818, + "train_speed(iter/s)": 0.188527 + }, + { + "epoch": 0.0048, + "grad_norm": 12.37131520343411, + "learning_rate": 1.5991471215351813e-07, + "loss": 0.9258483052253723, + "memory(GiB)": 41.86, + "step": 15, + "token_acc": 0.8312439729990356, + "train_speed(iter/s)": 0.190477 + }, + { + "epoch": 0.00512, + "grad_norm": 11.481832543277982, + "learning_rate": 1.7057569296375268e-07, + "loss": 0.9314000606536865, + "memory(GiB)": 41.86, + "step": 16, + "token_acc": 0.8041146859268987, + "train_speed(iter/s)": 0.192959 + }, + { + "epoch": 0.00544, + "grad_norm": 12.342302950145875, + "learning_rate": 1.812366737739872e-07, + "loss": 0.9455907940864563, + "memory(GiB)": 41.86, + "step": 17, + "token_acc": 0.8456899609790706, + "train_speed(iter/s)": 0.195528 + }, + { + "epoch": 0.00576, + "grad_norm": 11.410847485843847, + "learning_rate": 1.918976545842218e-07, + "loss": 0.8774665594100952, + "memory(GiB)": 41.86, + "step": 18, + "token_acc": 0.8424959406170263, + "train_speed(iter/s)": 0.197586 + }, + { + "epoch": 0.00608, + "grad_norm": 11.33945731695935, + "learning_rate": 2.0255863539445632e-07, + "loss": 0.9473937153816223, + "memory(GiB)": 41.86, + "step": 19, + "token_acc": 0.7847124824684432, + "train_speed(iter/s)": 0.199369 + }, + { + "epoch": 0.0064, + "grad_norm": 11.973784602772334, + "learning_rate": 2.1321961620469084e-07, + "loss": 0.9350627660751343, + "memory(GiB)": 41.86, + "step": 20, + "token_acc": 0.8573454339194555, + "train_speed(iter/s)": 0.200962 + }, + { + "epoch": 0.00672, + "grad_norm": 10.883829386099931, + "learning_rate": 2.2388059701492537e-07, + "loss": 0.90641188621521, + "memory(GiB)": 41.86, + "step": 21, + "token_acc": 0.7802690582959642, + "train_speed(iter/s)": 0.202281 + }, + { + "epoch": 0.00704, + "grad_norm": 11.391651753621645, + "learning_rate": 2.3454157782515995e-07, + "loss": 0.9643319249153137, + "memory(GiB)": 41.86, + "step": 22, + "token_acc": 0.7747747747747747, + "train_speed(iter/s)": 0.204109 + }, + { + "epoch": 0.00736, + "grad_norm": 11.050101303319748, + "learning_rate": 2.4520255863539447e-07, + "loss": 0.9134818911552429, + "memory(GiB)": 41.86, + "step": 23, + "token_acc": 0.8051920641620937, + "train_speed(iter/s)": 0.205276 + }, + { + "epoch": 0.00768, + "grad_norm": 12.517216398066635, + "learning_rate": 2.55863539445629e-07, + "loss": 0.9008537530899048, + "memory(GiB)": 41.86, + "step": 24, + "token_acc": 0.8337760910815939, + "train_speed(iter/s)": 0.206739 + }, + { + "epoch": 0.008, + "grad_norm": 11.495583868820669, + "learning_rate": 2.665245202558635e-07, + "loss": 0.9621918797492981, + "memory(GiB)": 41.86, + "step": 25, + "token_acc": 0.83390494855463, + "train_speed(iter/s)": 0.208123 + }, + { + "epoch": 0.00832, + "grad_norm": 11.629960966505951, + "learning_rate": 2.771855010660981e-07, + "loss": 0.9758769273757935, + "memory(GiB)": 41.86, + "step": 26, + "token_acc": 0.8202143950995405, + "train_speed(iter/s)": 0.209218 + }, + { + "epoch": 0.00864, + "grad_norm": 11.711041914923973, + "learning_rate": 2.8784648187633263e-07, + "loss": 0.8994331359863281, + "memory(GiB)": 41.86, + "step": 27, + "token_acc": 0.8296025582457743, + "train_speed(iter/s)": 0.210492 + }, + { + "epoch": 0.00896, + "grad_norm": 10.785916927327476, + "learning_rate": 2.9850746268656716e-07, + "loss": 0.9457482099533081, + "memory(GiB)": 41.86, + "step": 28, + "token_acc": 0.8038379530916845, + "train_speed(iter/s)": 0.211079 + }, + { + "epoch": 0.00928, + "grad_norm": 10.2611989778238, + "learning_rate": 3.0916844349680174e-07, + "loss": 0.928992509841919, + "memory(GiB)": 41.86, + "step": 29, + "token_acc": 0.8572097378277154, + "train_speed(iter/s)": 0.212095 + }, + { + "epoch": 0.0096, + "grad_norm": 10.065341096521472, + "learning_rate": 3.1982942430703626e-07, + "loss": 0.9656261801719666, + "memory(GiB)": 41.86, + "step": 30, + "token_acc": 0.7274481427882297, + "train_speed(iter/s)": 0.213143 + }, + { + "epoch": 0.00992, + "grad_norm": 10.213449698904558, + "learning_rate": 3.3049040511727084e-07, + "loss": 0.9150421619415283, + "memory(GiB)": 41.86, + "step": 31, + "token_acc": 0.825136612021858, + "train_speed(iter/s)": 0.214224 + }, + { + "epoch": 0.01024, + "grad_norm": 9.510232791014243, + "learning_rate": 3.4115138592750537e-07, + "loss": 0.8923230171203613, + "memory(GiB)": 41.86, + "step": 32, + "token_acc": 0.7665655032878098, + "train_speed(iter/s)": 0.215311 + }, + { + "epoch": 0.01056, + "grad_norm": 9.751395582652036, + "learning_rate": 3.518123667377399e-07, + "loss": 0.8603218197822571, + "memory(GiB)": 41.86, + "step": 33, + "token_acc": 0.8501154734411085, + "train_speed(iter/s)": 0.216002 + }, + { + "epoch": 0.01088, + "grad_norm": 8.72651409913653, + "learning_rate": 3.624733475479744e-07, + "loss": 0.8310045003890991, + "memory(GiB)": 41.86, + "step": 34, + "token_acc": 0.775260029717682, + "train_speed(iter/s)": 0.216883 + }, + { + "epoch": 0.0112, + "grad_norm": 9.538701146438706, + "learning_rate": 3.7313432835820895e-07, + "loss": 0.9402002692222595, + "memory(GiB)": 41.86, + "step": 35, + "token_acc": 0.8320392317123008, + "train_speed(iter/s)": 0.217714 + }, + { + "epoch": 0.01152, + "grad_norm": 9.911327823544632, + "learning_rate": 3.837953091684436e-07, + "loss": 0.8608855605125427, + "memory(GiB)": 41.86, + "step": 36, + "token_acc": 0.8093935248518012, + "train_speed(iter/s)": 0.218245 + }, + { + "epoch": 0.01184, + "grad_norm": 8.581463570910069, + "learning_rate": 3.944562899786781e-07, + "loss": 0.8183090686798096, + "memory(GiB)": 41.86, + "step": 37, + "token_acc": 0.8510108864696734, + "train_speed(iter/s)": 0.218961 + }, + { + "epoch": 0.01216, + "grad_norm": 9.147755780525086, + "learning_rate": 4.0511727078891263e-07, + "loss": 0.8847682476043701, + "memory(GiB)": 41.86, + "step": 38, + "token_acc": 0.7877668308702791, + "train_speed(iter/s)": 0.219385 + }, + { + "epoch": 0.01248, + "grad_norm": 9.449463471520723, + "learning_rate": 4.1577825159914716e-07, + "loss": 0.9718679189682007, + "memory(GiB)": 41.86, + "step": 39, + "token_acc": 0.766468548786528, + "train_speed(iter/s)": 0.219872 + }, + { + "epoch": 0.0128, + "grad_norm": 7.9478431881253755, + "learning_rate": 4.264392324093817e-07, + "loss": 0.8065295815467834, + "memory(GiB)": 41.86, + "step": 40, + "token_acc": 0.8382038488952245, + "train_speed(iter/s)": 0.220279 + }, + { + "epoch": 0.01312, + "grad_norm": 7.177907490208328, + "learning_rate": 4.371002132196162e-07, + "loss": 0.856386661529541, + "memory(GiB)": 41.86, + "step": 41, + "token_acc": 0.8228523301516002, + "train_speed(iter/s)": 0.220749 + }, + { + "epoch": 0.01344, + "grad_norm": 7.080348101874288, + "learning_rate": 4.4776119402985074e-07, + "loss": 0.8856309652328491, + "memory(GiB)": 41.86, + "step": 42, + "token_acc": 0.8283200908059024, + "train_speed(iter/s)": 0.220944 + }, + { + "epoch": 0.01376, + "grad_norm": 6.492924393857743, + "learning_rate": 4.5842217484008537e-07, + "loss": 0.7905886769294739, + "memory(GiB)": 41.86, + "step": 43, + "token_acc": 0.8580116959064328, + "train_speed(iter/s)": 0.221532 + }, + { + "epoch": 0.01408, + "grad_norm": 6.354420059549542, + "learning_rate": 4.690831556503199e-07, + "loss": 0.7550309896469116, + "memory(GiB)": 41.86, + "step": 44, + "token_acc": 0.8385296381832179, + "train_speed(iter/s)": 0.221908 + }, + { + "epoch": 0.0144, + "grad_norm": 6.324149130111326, + "learning_rate": 4.797441364605544e-07, + "loss": 0.7065809965133667, + "memory(GiB)": 41.86, + "step": 45, + "token_acc": 0.8461538461538461, + "train_speed(iter/s)": 0.22259 + }, + { + "epoch": 0.01472, + "grad_norm": 6.755821013832618, + "learning_rate": 4.904051172707889e-07, + "loss": 0.8370662331581116, + "memory(GiB)": 41.86, + "step": 46, + "token_acc": 0.8457082675092154, + "train_speed(iter/s)": 0.223177 + }, + { + "epoch": 0.01504, + "grad_norm": 5.896440191628225, + "learning_rate": 5.010660980810235e-07, + "loss": 0.7614850401878357, + "memory(GiB)": 41.86, + "step": 47, + "token_acc": 0.8266993263931415, + "train_speed(iter/s)": 0.223759 + }, + { + "epoch": 0.01536, + "grad_norm": 5.724633591406352, + "learning_rate": 5.11727078891258e-07, + "loss": 0.7332110404968262, + "memory(GiB)": 41.86, + "step": 48, + "token_acc": 0.8371324743970928, + "train_speed(iter/s)": 0.223985 + }, + { + "epoch": 0.01568, + "grad_norm": 6.147468645922813, + "learning_rate": 5.223880597014925e-07, + "loss": 0.903264582157135, + "memory(GiB)": 41.86, + "step": 49, + "token_acc": 0.8229950687775759, + "train_speed(iter/s)": 0.224003 + }, + { + "epoch": 0.016, + "grad_norm": 6.082452685268613, + "learning_rate": 5.33049040511727e-07, + "loss": 0.7833185195922852, + "memory(GiB)": 41.86, + "step": 50, + "token_acc": 0.8471470220741357, + "train_speed(iter/s)": 0.224149 + }, + { + "epoch": 0.01632, + "grad_norm": 5.5846495715060565, + "learning_rate": 5.437100213219617e-07, + "loss": 0.7637509107589722, + "memory(GiB)": 41.86, + "step": 51, + "token_acc": 0.8642515923566879, + "train_speed(iter/s)": 0.224504 + }, + { + "epoch": 0.01664, + "grad_norm": 5.653561510084641, + "learning_rate": 5.543710021321962e-07, + "loss": 0.7513374090194702, + "memory(GiB)": 41.86, + "step": 52, + "token_acc": 0.8494167550371156, + "train_speed(iter/s)": 0.224748 + }, + { + "epoch": 0.01696, + "grad_norm": 5.707174455747126, + "learning_rate": 5.650319829424307e-07, + "loss": 0.7322826385498047, + "memory(GiB)": 41.86, + "step": 53, + "token_acc": 0.8340917045852293, + "train_speed(iter/s)": 0.225195 + }, + { + "epoch": 0.01728, + "grad_norm": 5.281958208617387, + "learning_rate": 5.756929637526653e-07, + "loss": 0.7290368676185608, + "memory(GiB)": 41.86, + "step": 54, + "token_acc": 0.8070818070818071, + "train_speed(iter/s)": 0.225322 + }, + { + "epoch": 0.0176, + "grad_norm": 5.179699404154042, + "learning_rate": 5.863539445628998e-07, + "loss": 0.7871674299240112, + "memory(GiB)": 41.86, + "step": 55, + "token_acc": 0.7850802055754555, + "train_speed(iter/s)": 0.225297 + }, + { + "epoch": 0.01792, + "grad_norm": 4.097766625100472, + "learning_rate": 5.970149253731343e-07, + "loss": 0.7158313393592834, + "memory(GiB)": 41.86, + "step": 56, + "token_acc": 0.7493461203138623, + "train_speed(iter/s)": 0.225401 + }, + { + "epoch": 0.01824, + "grad_norm": 3.5233384497044344, + "learning_rate": 6.076759061833689e-07, + "loss": 0.6825019121170044, + "memory(GiB)": 41.86, + "step": 57, + "token_acc": 0.8602409638554217, + "train_speed(iter/s)": 0.225467 + }, + { + "epoch": 0.01856, + "grad_norm": 3.3170874169858964, + "learning_rate": 6.183368869936035e-07, + "loss": 0.6249011754989624, + "memory(GiB)": 41.86, + "step": 58, + "token_acc": 0.870265563778842, + "train_speed(iter/s)": 0.225714 + }, + { + "epoch": 0.01888, + "grad_norm": 3.2437917049159672, + "learning_rate": 6.28997867803838e-07, + "loss": 0.6608279943466187, + "memory(GiB)": 41.86, + "step": 59, + "token_acc": 0.8439363817097415, + "train_speed(iter/s)": 0.225986 + }, + { + "epoch": 0.0192, + "grad_norm": 2.700709739886274, + "learning_rate": 6.396588486140725e-07, + "loss": 0.5468295216560364, + "memory(GiB)": 41.86, + "step": 60, + "token_acc": 0.8739739256397876, + "train_speed(iter/s)": 0.22628 + }, + { + "epoch": 0.01952, + "grad_norm": 2.6830732664609047, + "learning_rate": 6.50319829424307e-07, + "loss": 0.6257410049438477, + "memory(GiB)": 41.86, + "step": 61, + "token_acc": 0.8179658820988988, + "train_speed(iter/s)": 0.22667 + }, + { + "epoch": 0.01984, + "grad_norm": 2.611507083164799, + "learning_rate": 6.609808102345417e-07, + "loss": 0.5938126444816589, + "memory(GiB)": 41.86, + "step": 62, + "token_acc": 0.8622568093385214, + "train_speed(iter/s)": 0.227023 + }, + { + "epoch": 0.02016, + "grad_norm": 2.4413487278007966, + "learning_rate": 6.716417910447762e-07, + "loss": 0.6034793853759766, + "memory(GiB)": 41.86, + "step": 63, + "token_acc": 0.8708348932983901, + "train_speed(iter/s)": 0.227434 + }, + { + "epoch": 0.02048, + "grad_norm": 2.5425028450477307, + "learning_rate": 6.823027718550107e-07, + "loss": 0.6213764548301697, + "memory(GiB)": 41.86, + "step": 64, + "token_acc": 0.8638399522245447, + "train_speed(iter/s)": 0.227662 + }, + { + "epoch": 0.0208, + "grad_norm": 2.4211624745247007, + "learning_rate": 6.929637526652453e-07, + "loss": 0.5741841793060303, + "memory(GiB)": 41.86, + "step": 65, + "token_acc": 0.8432214087351679, + "train_speed(iter/s)": 0.227988 + }, + { + "epoch": 0.02112, + "grad_norm": 2.6082493583894113, + "learning_rate": 7.036247334754798e-07, + "loss": 0.6302884817123413, + "memory(GiB)": 41.86, + "step": 66, + "token_acc": 0.8225772981946462, + "train_speed(iter/s)": 0.228287 + }, + { + "epoch": 0.02144, + "grad_norm": 2.2699988838493828, + "learning_rate": 7.142857142857143e-07, + "loss": 0.5586453676223755, + "memory(GiB)": 41.86, + "step": 67, + "token_acc": 0.833620987228167, + "train_speed(iter/s)": 0.228486 + }, + { + "epoch": 0.02176, + "grad_norm": 2.2389086881261715, + "learning_rate": 7.249466950959488e-07, + "loss": 0.6138612031936646, + "memory(GiB)": 41.86, + "step": 68, + "token_acc": 0.8359233827249729, + "train_speed(iter/s)": 0.22861 + }, + { + "epoch": 0.02208, + "grad_norm": 2.1298342519571096, + "learning_rate": 7.356076759061834e-07, + "loss": 0.5674803256988525, + "memory(GiB)": 41.86, + "step": 69, + "token_acc": 0.7831400814791601, + "train_speed(iter/s)": 0.228916 + }, + { + "epoch": 0.0224, + "grad_norm": 2.073626609971217, + "learning_rate": 7.462686567164179e-07, + "loss": 0.5966504216194153, + "memory(GiB)": 41.86, + "step": 70, + "token_acc": 0.8468691878487291, + "train_speed(iter/s)": 0.229031 + }, + { + "epoch": 0.02272, + "grad_norm": 2.432774097587118, + "learning_rate": 7.569296375266526e-07, + "loss": 0.6539902091026306, + "memory(GiB)": 41.86, + "step": 71, + "token_acc": 0.8310303987366758, + "train_speed(iter/s)": 0.229292 + }, + { + "epoch": 0.02304, + "grad_norm": 2.2444626770520193, + "learning_rate": 7.675906183368872e-07, + "loss": 0.6395382881164551, + "memory(GiB)": 41.86, + "step": 72, + "token_acc": 0.8452003459210147, + "train_speed(iter/s)": 0.229472 + }, + { + "epoch": 0.02336, + "grad_norm": 2.7335287160786828, + "learning_rate": 7.782515991471217e-07, + "loss": 0.734038233757019, + "memory(GiB)": 41.86, + "step": 73, + "token_acc": 0.7965226840532464, + "train_speed(iter/s)": 0.229666 + }, + { + "epoch": 0.02368, + "grad_norm": 2.1205931687315185, + "learning_rate": 7.889125799573562e-07, + "loss": 0.6473275423049927, + "memory(GiB)": 41.86, + "step": 74, + "token_acc": 0.83222533240028, + "train_speed(iter/s)": 0.229828 + }, + { + "epoch": 0.024, + "grad_norm": 1.8926243761954264, + "learning_rate": 7.995735607675907e-07, + "loss": 0.5122984647750854, + "memory(GiB)": 41.86, + "step": 75, + "token_acc": 0.8067132867132867, + "train_speed(iter/s)": 0.230078 + }, + { + "epoch": 0.02432, + "grad_norm": 1.9733938708243608, + "learning_rate": 8.102345415778253e-07, + "loss": 0.5528514385223389, + "memory(GiB)": 41.86, + "step": 76, + "token_acc": 0.8699318845280571, + "train_speed(iter/s)": 0.230283 + }, + { + "epoch": 0.02464, + "grad_norm": 1.6599233892153682, + "learning_rate": 8.208955223880598e-07, + "loss": 0.5430408716201782, + "memory(GiB)": 41.86, + "step": 77, + "token_acc": 0.8487674883411059, + "train_speed(iter/s)": 0.229836 + }, + { + "epoch": 0.02496, + "grad_norm": 1.637800922933086, + "learning_rate": 8.315565031982943e-07, + "loss": 0.5647838115692139, + "memory(GiB)": 41.86, + "step": 78, + "token_acc": 0.8370253164556962, + "train_speed(iter/s)": 0.229846 + }, + { + "epoch": 0.02528, + "grad_norm": 1.4985484689501922, + "learning_rate": 8.422174840085288e-07, + "loss": 0.5339977741241455, + "memory(GiB)": 41.86, + "step": 79, + "token_acc": 0.8401814973531636, + "train_speed(iter/s)": 0.229936 + }, + { + "epoch": 0.0256, + "grad_norm": 1.2924824597469082, + "learning_rate": 8.528784648187634e-07, + "loss": 0.5280715227127075, + "memory(GiB)": 41.86, + "step": 80, + "token_acc": 0.878323932312651, + "train_speed(iter/s)": 0.229971 + }, + { + "epoch": 0.02592, + "grad_norm": 1.295869648558741, + "learning_rate": 8.635394456289979e-07, + "loss": 0.5567734837532043, + "memory(GiB)": 41.86, + "step": 81, + "token_acc": 0.8562417871222077, + "train_speed(iter/s)": 0.230211 + }, + { + "epoch": 0.02624, + "grad_norm": 1.2278463033010003, + "learning_rate": 8.742004264392324e-07, + "loss": 0.5421440601348877, + "memory(GiB)": 41.86, + "step": 82, + "token_acc": 0.8538952745849298, + "train_speed(iter/s)": 0.230398 + }, + { + "epoch": 0.02656, + "grad_norm": 1.1394157258164557, + "learning_rate": 8.848614072494669e-07, + "loss": 0.5822359919548035, + "memory(GiB)": 41.86, + "step": 83, + "token_acc": 0.8400244548604036, + "train_speed(iter/s)": 0.23047 + }, + { + "epoch": 0.02688, + "grad_norm": 1.1795203267711198, + "learning_rate": 8.955223880597015e-07, + "loss": 0.45736944675445557, + "memory(GiB)": 41.86, + "step": 84, + "token_acc": 0.8560663149962321, + "train_speed(iter/s)": 0.230626 + }, + { + "epoch": 0.0272, + "grad_norm": 1.262428298429841, + "learning_rate": 9.06183368869936e-07, + "loss": 0.5114102363586426, + "memory(GiB)": 41.86, + "step": 85, + "token_acc": 0.883007667573584, + "train_speed(iter/s)": 0.230679 + }, + { + "epoch": 0.02752, + "grad_norm": 1.2224139151378215, + "learning_rate": 9.168443496801707e-07, + "loss": 0.586413562297821, + "memory(GiB)": 41.86, + "step": 86, + "token_acc": 0.8241758241758241, + "train_speed(iter/s)": 0.230794 + }, + { + "epoch": 0.02784, + "grad_norm": 1.0612665433923012, + "learning_rate": 9.275053304904053e-07, + "loss": 0.5422711372375488, + "memory(GiB)": 41.86, + "step": 87, + "token_acc": 0.846322121957889, + "train_speed(iter/s)": 0.230743 + }, + { + "epoch": 0.02816, + "grad_norm": 1.127390169602271, + "learning_rate": 9.381663113006398e-07, + "loss": 0.40012305974960327, + "memory(GiB)": 41.86, + "step": 88, + "token_acc": 0.8782666225603705, + "train_speed(iter/s)": 0.230921 + }, + { + "epoch": 0.02848, + "grad_norm": 1.1856423457796859, + "learning_rate": 9.488272921108743e-07, + "loss": 0.5698226690292358, + "memory(GiB)": 41.86, + "step": 89, + "token_acc": 0.8412204234122043, + "train_speed(iter/s)": 0.231155 + }, + { + "epoch": 0.0288, + "grad_norm": 1.003187215942443, + "learning_rate": 9.594882729211088e-07, + "loss": 0.4580455720424652, + "memory(GiB)": 41.86, + "step": 90, + "token_acc": 0.8859060402684564, + "train_speed(iter/s)": 0.231226 + }, + { + "epoch": 0.02912, + "grad_norm": 1.0236676038748946, + "learning_rate": 9.701492537313434e-07, + "loss": 0.5453003644943237, + "memory(GiB)": 41.86, + "step": 91, + "token_acc": 0.904822986146742, + "train_speed(iter/s)": 0.230312 + }, + { + "epoch": 0.02944, + "grad_norm": 1.0852240558689916, + "learning_rate": 9.808102345415779e-07, + "loss": 0.47642844915390015, + "memory(GiB)": 41.86, + "step": 92, + "token_acc": 0.8649074438755415, + "train_speed(iter/s)": 0.230489 + }, + { + "epoch": 0.02976, + "grad_norm": 1.0986779418172614, + "learning_rate": 9.914712153518124e-07, + "loss": 0.49293196201324463, + "memory(GiB)": 41.86, + "step": 93, + "token_acc": 0.873972602739726, + "train_speed(iter/s)": 0.230651 + }, + { + "epoch": 0.03008, + "grad_norm": 0.9828273792651537, + "learning_rate": 1.002132196162047e-06, + "loss": 0.4937012493610382, + "memory(GiB)": 41.86, + "step": 94, + "token_acc": 0.8306538049303323, + "train_speed(iter/s)": 0.23089 + }, + { + "epoch": 0.0304, + "grad_norm": 0.9564821546295516, + "learning_rate": 1.0127931769722815e-06, + "loss": 0.4360889792442322, + "memory(GiB)": 41.86, + "step": 95, + "token_acc": 0.8956228956228957, + "train_speed(iter/s)": 0.231016 + }, + { + "epoch": 0.03072, + "grad_norm": 0.9362577635247741, + "learning_rate": 1.023454157782516e-06, + "loss": 0.47929883003234863, + "memory(GiB)": 41.86, + "step": 96, + "token_acc": 0.8147727272727273, + "train_speed(iter/s)": 0.231144 + }, + { + "epoch": 0.03104, + "grad_norm": 1.0437643740232678, + "learning_rate": 1.0341151385927505e-06, + "loss": 0.5539300441741943, + "memory(GiB)": 41.86, + "step": 97, + "token_acc": 0.7844458052663809, + "train_speed(iter/s)": 0.231346 + }, + { + "epoch": 0.03136, + "grad_norm": 0.9889415016298371, + "learning_rate": 1.044776119402985e-06, + "loss": 0.4341806173324585, + "memory(GiB)": 41.86, + "step": 98, + "token_acc": 0.8960292580982236, + "train_speed(iter/s)": 0.231575 + }, + { + "epoch": 0.03168, + "grad_norm": 0.9308074153904029, + "learning_rate": 1.0554371002132196e-06, + "loss": 0.554280698299408, + "memory(GiB)": 41.86, + "step": 99, + "token_acc": 0.8141711229946524, + "train_speed(iter/s)": 0.231775 + }, + { + "epoch": 0.032, + "grad_norm": 0.8869033839084672, + "learning_rate": 1.066098081023454e-06, + "loss": 0.48680251836776733, + "memory(GiB)": 41.86, + "step": 100, + "token_acc": 0.8549107142857143, + "train_speed(iter/s)": 0.231887 + }, + { + "epoch": 0.03232, + "grad_norm": 0.9657715121039673, + "learning_rate": 1.0767590618336886e-06, + "loss": 0.5429348945617676, + "memory(GiB)": 41.86, + "step": 101, + "token_acc": 0.8461323652611002, + "train_speed(iter/s)": 0.23198 + }, + { + "epoch": 0.03264, + "grad_norm": 0.9368879562962339, + "learning_rate": 1.0874200426439234e-06, + "loss": 0.49918490648269653, + "memory(GiB)": 41.86, + "step": 102, + "token_acc": 0.8915584415584416, + "train_speed(iter/s)": 0.2321 + }, + { + "epoch": 0.03296, + "grad_norm": 0.8895025870432464, + "learning_rate": 1.0980810234541579e-06, + "loss": 0.5613017082214355, + "memory(GiB)": 41.86, + "step": 103, + "token_acc": 0.8549111501659832, + "train_speed(iter/s)": 0.23225 + }, + { + "epoch": 0.03328, + "grad_norm": 0.9907965045916807, + "learning_rate": 1.1087420042643924e-06, + "loss": 0.5262157917022705, + "memory(GiB)": 41.86, + "step": 104, + "token_acc": 0.8001420790906938, + "train_speed(iter/s)": 0.232127 + }, + { + "epoch": 0.0336, + "grad_norm": 0.9150000461244515, + "learning_rate": 1.119402985074627e-06, + "loss": 0.4397706091403961, + "memory(GiB)": 41.86, + "step": 105, + "token_acc": 0.9156232988568318, + "train_speed(iter/s)": 0.232293 + }, + { + "epoch": 0.03392, + "grad_norm": 0.9483314540067643, + "learning_rate": 1.1300639658848615e-06, + "loss": 0.5002495646476746, + "memory(GiB)": 41.86, + "step": 106, + "token_acc": 0.8736717827626919, + "train_speed(iter/s)": 0.232464 + }, + { + "epoch": 0.03424, + "grad_norm": 1.00539554696486, + "learning_rate": 1.140724946695096e-06, + "loss": 0.5133095383644104, + "memory(GiB)": 41.86, + "step": 107, + "token_acc": 0.8579306722689075, + "train_speed(iter/s)": 0.232641 + }, + { + "epoch": 0.03456, + "grad_norm": 0.9319977772283852, + "learning_rate": 1.1513859275053305e-06, + "loss": 0.3769652843475342, + "memory(GiB)": 41.86, + "step": 108, + "token_acc": 0.8978531855955678, + "train_speed(iter/s)": 0.232802 + }, + { + "epoch": 0.03488, + "grad_norm": 0.9199851190211769, + "learning_rate": 1.162046908315565e-06, + "loss": 0.4927418529987335, + "memory(GiB)": 41.86, + "step": 109, + "token_acc": 0.8251398292611127, + "train_speed(iter/s)": 0.232941 + }, + { + "epoch": 0.0352, + "grad_norm": 0.890613163395056, + "learning_rate": 1.1727078891257996e-06, + "loss": 0.554315984249115, + "memory(GiB)": 41.86, + "step": 110, + "token_acc": 0.8050210738501008, + "train_speed(iter/s)": 0.232924 + }, + { + "epoch": 0.03552, + "grad_norm": 0.8915295577644938, + "learning_rate": 1.183368869936034e-06, + "loss": 0.4747316837310791, + "memory(GiB)": 41.86, + "step": 111, + "token_acc": 0.8324889170360988, + "train_speed(iter/s)": 0.233083 + }, + { + "epoch": 0.03584, + "grad_norm": 0.9142643943631918, + "learning_rate": 1.1940298507462686e-06, + "loss": 0.5642600655555725, + "memory(GiB)": 41.86, + "step": 112, + "token_acc": 0.8345487693710119, + "train_speed(iter/s)": 0.233205 + }, + { + "epoch": 0.03616, + "grad_norm": 0.9090419491125442, + "learning_rate": 1.2046908315565034e-06, + "loss": 0.44631150364875793, + "memory(GiB)": 41.86, + "step": 113, + "token_acc": 0.9043229497774953, + "train_speed(iter/s)": 0.233272 + }, + { + "epoch": 0.03648, + "grad_norm": 0.8608949907402743, + "learning_rate": 1.2153518123667379e-06, + "loss": 0.43426257371902466, + "memory(GiB)": 41.86, + "step": 114, + "token_acc": 0.8882938026013772, + "train_speed(iter/s)": 0.233393 + }, + { + "epoch": 0.0368, + "grad_norm": 0.9903540099187894, + "learning_rate": 1.2260127931769724e-06, + "loss": 0.47570085525512695, + "memory(GiB)": 41.86, + "step": 115, + "token_acc": 0.7727748691099476, + "train_speed(iter/s)": 0.233498 + }, + { + "epoch": 0.03712, + "grad_norm": 0.9763002307422793, + "learning_rate": 1.236673773987207e-06, + "loss": 0.4752451181411743, + "memory(GiB)": 41.86, + "step": 116, + "token_acc": 0.8640904311251314, + "train_speed(iter/s)": 0.233666 + }, + { + "epoch": 0.03744, + "grad_norm": 0.8700117412556084, + "learning_rate": 1.2473347547974415e-06, + "loss": 0.5492661595344543, + "memory(GiB)": 41.86, + "step": 117, + "token_acc": 0.8322700144062565, + "train_speed(iter/s)": 0.233744 + }, + { + "epoch": 0.03776, + "grad_norm": 0.8367771273097806, + "learning_rate": 1.257995735607676e-06, + "loss": 0.5371458530426025, + "memory(GiB)": 41.86, + "step": 118, + "token_acc": 0.865781990521327, + "train_speed(iter/s)": 0.233816 + }, + { + "epoch": 0.03808, + "grad_norm": 0.9101326093227952, + "learning_rate": 1.2686567164179105e-06, + "loss": 0.5292797088623047, + "memory(GiB)": 41.86, + "step": 119, + "token_acc": 0.8307056086844146, + "train_speed(iter/s)": 0.233873 + }, + { + "epoch": 0.0384, + "grad_norm": 0.9113612620762511, + "learning_rate": 1.279317697228145e-06, + "loss": 0.588639497756958, + "memory(GiB)": 41.86, + "step": 120, + "token_acc": 0.8729470096064457, + "train_speed(iter/s)": 0.233938 + }, + { + "epoch": 0.03872, + "grad_norm": 0.879340400582431, + "learning_rate": 1.2899786780383796e-06, + "loss": 0.5034235119819641, + "memory(GiB)": 41.86, + "step": 121, + "token_acc": 0.8257011004614838, + "train_speed(iter/s)": 0.234053 + }, + { + "epoch": 0.03904, + "grad_norm": 0.7877202325408076, + "learning_rate": 1.300639658848614e-06, + "loss": 0.4837523400783539, + "memory(GiB)": 41.86, + "step": 122, + "token_acc": 0.8523979261179521, + "train_speed(iter/s)": 0.234086 + }, + { + "epoch": 0.03936, + "grad_norm": 0.859931447595137, + "learning_rate": 1.3113006396588488e-06, + "loss": 0.47967207431793213, + "memory(GiB)": 41.86, + "step": 123, + "token_acc": 0.8570583012725659, + "train_speed(iter/s)": 0.234183 + }, + { + "epoch": 0.03968, + "grad_norm": 0.9419523457291691, + "learning_rate": 1.3219616204690834e-06, + "loss": 0.447654128074646, + "memory(GiB)": 41.86, + "step": 124, + "token_acc": 0.8917990553885788, + "train_speed(iter/s)": 0.234311 + }, + { + "epoch": 0.04, + "grad_norm": 0.8326556720825181, + "learning_rate": 1.3326226012793179e-06, + "loss": 0.4918779134750366, + "memory(GiB)": 41.86, + "step": 125, + "token_acc": 0.8258229466283158, + "train_speed(iter/s)": 0.234427 + }, + { + "epoch": 0.04032, + "grad_norm": 0.9375401682808183, + "learning_rate": 1.3432835820895524e-06, + "loss": 0.52419114112854, + "memory(GiB)": 41.86, + "step": 126, + "token_acc": 0.8412541254125413, + "train_speed(iter/s)": 0.234539 + }, + { + "epoch": 0.04064, + "grad_norm": 0.9224365368053279, + "learning_rate": 1.353944562899787e-06, + "loss": 0.5088470578193665, + "memory(GiB)": 41.86, + "step": 127, + "token_acc": 0.854043392504931, + "train_speed(iter/s)": 0.234636 + }, + { + "epoch": 0.04096, + "grad_norm": 0.873248662883819, + "learning_rate": 1.3646055437100215e-06, + "loss": 0.44337016344070435, + "memory(GiB)": 41.86, + "step": 128, + "token_acc": 0.8839086859688196, + "train_speed(iter/s)": 0.234667 + }, + { + "epoch": 0.04128, + "grad_norm": 0.8492214238996687, + "learning_rate": 1.375266524520256e-06, + "loss": 0.5351183414459229, + "memory(GiB)": 41.86, + "step": 129, + "token_acc": 0.8527407407407407, + "train_speed(iter/s)": 0.234784 + }, + { + "epoch": 0.0416, + "grad_norm": 0.8713065471783953, + "learning_rate": 1.3859275053304905e-06, + "loss": 0.4296875, + "memory(GiB)": 41.86, + "step": 130, + "token_acc": 0.8420095476244601, + "train_speed(iter/s)": 0.234866 + }, + { + "epoch": 0.04192, + "grad_norm": 0.8449256183957116, + "learning_rate": 1.396588486140725e-06, + "loss": 0.5023010969161987, + "memory(GiB)": 41.86, + "step": 131, + "token_acc": 0.874447391688771, + "train_speed(iter/s)": 0.235002 + }, + { + "epoch": 0.04224, + "grad_norm": 0.9135487156939148, + "learning_rate": 1.4072494669509596e-06, + "loss": 0.49614638090133667, + "memory(GiB)": 41.86, + "step": 132, + "token_acc": 0.9115942028985508, + "train_speed(iter/s)": 0.235015 + }, + { + "epoch": 0.04256, + "grad_norm": 0.861491358042832, + "learning_rate": 1.417910447761194e-06, + "loss": 0.4219094216823578, + "memory(GiB)": 41.86, + "step": 133, + "token_acc": 0.9188269180413721, + "train_speed(iter/s)": 0.235151 + }, + { + "epoch": 0.04288, + "grad_norm": 0.8970911408978183, + "learning_rate": 1.4285714285714286e-06, + "loss": 0.3816481828689575, + "memory(GiB)": 41.86, + "step": 134, + "token_acc": 0.8998726114649681, + "train_speed(iter/s)": 0.235264 + }, + { + "epoch": 0.0432, + "grad_norm": 0.9191102479319776, + "learning_rate": 1.4392324093816632e-06, + "loss": 0.4161341190338135, + "memory(GiB)": 41.86, + "step": 135, + "token_acc": 0.8923125794155019, + "train_speed(iter/s)": 0.235253 + }, + { + "epoch": 0.04352, + "grad_norm": 0.8548339481199706, + "learning_rate": 1.4498933901918977e-06, + "loss": 0.4477112591266632, + "memory(GiB)": 41.86, + "step": 136, + "token_acc": 0.8246704799801045, + "train_speed(iter/s)": 0.23538 + }, + { + "epoch": 0.04384, + "grad_norm": 0.8830170669258355, + "learning_rate": 1.4605543710021322e-06, + "loss": 0.4537021815776825, + "memory(GiB)": 41.86, + "step": 137, + "token_acc": 0.8584441161989168, + "train_speed(iter/s)": 0.235481 + }, + { + "epoch": 0.04416, + "grad_norm": 0.8246269741123975, + "learning_rate": 1.4712153518123667e-06, + "loss": 0.43747270107269287, + "memory(GiB)": 41.86, + "step": 138, + "token_acc": 0.8994068801897983, + "train_speed(iter/s)": 0.235557 + }, + { + "epoch": 0.04448, + "grad_norm": 0.8592477323610538, + "learning_rate": 1.4818763326226013e-06, + "loss": 0.45325133204460144, + "memory(GiB)": 41.86, + "step": 139, + "token_acc": 0.8904225352112676, + "train_speed(iter/s)": 0.235647 + }, + { + "epoch": 0.0448, + "grad_norm": 0.8420462095437626, + "learning_rate": 1.4925373134328358e-06, + "loss": 0.41720783710479736, + "memory(GiB)": 41.86, + "step": 140, + "token_acc": 0.8754340277777778, + "train_speed(iter/s)": 0.235665 + }, + { + "epoch": 0.04512, + "grad_norm": 0.9273055212315943, + "learning_rate": 1.5031982942430705e-06, + "loss": 0.4626120328903198, + "memory(GiB)": 41.86, + "step": 141, + "token_acc": 0.8923418423973363, + "train_speed(iter/s)": 0.235693 + }, + { + "epoch": 0.04544, + "grad_norm": 0.8271293458047386, + "learning_rate": 1.5138592750533053e-06, + "loss": 0.471557080745697, + "memory(GiB)": 41.86, + "step": 142, + "token_acc": 0.9012496190185919, + "train_speed(iter/s)": 0.235413 + }, + { + "epoch": 0.04576, + "grad_norm": 0.8448358175397689, + "learning_rate": 1.5245202558635398e-06, + "loss": 0.4251336455345154, + "memory(GiB)": 41.86, + "step": 143, + "token_acc": 0.9325113562621674, + "train_speed(iter/s)": 0.2355 + }, + { + "epoch": 0.04608, + "grad_norm": 0.839155410810311, + "learning_rate": 1.5351812366737743e-06, + "loss": 0.49288544058799744, + "memory(GiB)": 41.86, + "step": 144, + "token_acc": 0.8526694848911852, + "train_speed(iter/s)": 0.235532 + }, + { + "epoch": 0.0464, + "grad_norm": 0.8471308884872383, + "learning_rate": 1.5458422174840088e-06, + "loss": 0.3644421100616455, + "memory(GiB)": 41.86, + "step": 145, + "token_acc": 0.8905295315682281, + "train_speed(iter/s)": 0.235637 + }, + { + "epoch": 0.04672, + "grad_norm": 0.8828620809002611, + "learning_rate": 1.5565031982942434e-06, + "loss": 0.4570612609386444, + "memory(GiB)": 41.86, + "step": 146, + "token_acc": 0.8185266520263526, + "train_speed(iter/s)": 0.235628 + }, + { + "epoch": 0.04704, + "grad_norm": 0.9205590309840889, + "learning_rate": 1.5671641791044779e-06, + "loss": 0.4176858365535736, + "memory(GiB)": 41.86, + "step": 147, + "token_acc": 0.8582358235823583, + "train_speed(iter/s)": 0.235755 + }, + { + "epoch": 0.04736, + "grad_norm": 0.9452668955275582, + "learning_rate": 1.5778251599147124e-06, + "loss": 0.5206543207168579, + "memory(GiB)": 41.86, + "step": 148, + "token_acc": 0.8697441928844457, + "train_speed(iter/s)": 0.235836 + }, + { + "epoch": 0.04768, + "grad_norm": 0.8530941772791294, + "learning_rate": 1.588486140724947e-06, + "loss": 0.41571375727653503, + "memory(GiB)": 41.86, + "step": 149, + "token_acc": 0.8801026358759039, + "train_speed(iter/s)": 0.235891 + }, + { + "epoch": 0.048, + "grad_norm": 0.8062091341024678, + "learning_rate": 1.5991471215351815e-06, + "loss": 0.46106261014938354, + "memory(GiB)": 41.86, + "step": 150, + "token_acc": 0.8738060781476121, + "train_speed(iter/s)": 0.235882 + }, + { + "epoch": 0.04832, + "grad_norm": 0.7931678174406668, + "learning_rate": 1.609808102345416e-06, + "loss": 0.4713793694972992, + "memory(GiB)": 41.86, + "step": 151, + "token_acc": 0.854895515379197, + "train_speed(iter/s)": 0.235941 + }, + { + "epoch": 0.04864, + "grad_norm": 0.7592280403571188, + "learning_rate": 1.6204690831556505e-06, + "loss": 0.4360312521457672, + "memory(GiB)": 41.86, + "step": 152, + "token_acc": 0.8783898305084745, + "train_speed(iter/s)": 0.235861 + }, + { + "epoch": 0.04896, + "grad_norm": 0.831618908647479, + "learning_rate": 1.631130063965885e-06, + "loss": 0.4255671501159668, + "memory(GiB)": 41.86, + "step": 153, + "token_acc": 0.8779661016949153, + "train_speed(iter/s)": 0.235873 + }, + { + "epoch": 0.04928, + "grad_norm": 0.792294531110576, + "learning_rate": 1.6417910447761196e-06, + "loss": 0.42781883478164673, + "memory(GiB)": 41.86, + "step": 154, + "token_acc": 0.8050131926121372, + "train_speed(iter/s)": 0.235891 + }, + { + "epoch": 0.0496, + "grad_norm": 0.8103964270336658, + "learning_rate": 1.652452025586354e-06, + "loss": 0.4064710736274719, + "memory(GiB)": 41.86, + "step": 155, + "token_acc": 0.9088607594936708, + "train_speed(iter/s)": 0.235968 + }, + { + "epoch": 0.04992, + "grad_norm": 0.8441643893148615, + "learning_rate": 1.6631130063965886e-06, + "loss": 0.465557724237442, + "memory(GiB)": 41.86, + "step": 156, + "token_acc": 0.7990523368511738, + "train_speed(iter/s)": 0.236074 + }, + { + "epoch": 0.05024, + "grad_norm": 0.8538053587979075, + "learning_rate": 1.6737739872068232e-06, + "loss": 0.45971211791038513, + "memory(GiB)": 41.86, + "step": 157, + "token_acc": 0.8736528319192846, + "train_speed(iter/s)": 0.236172 + }, + { + "epoch": 0.05056, + "grad_norm": 0.7914191517200332, + "learning_rate": 1.6844349680170577e-06, + "loss": 0.49210643768310547, + "memory(GiB)": 41.86, + "step": 158, + "token_acc": 0.8660714285714286, + "train_speed(iter/s)": 0.236141 + }, + { + "epoch": 0.05088, + "grad_norm": 0.8765626159763841, + "learning_rate": 1.6950959488272922e-06, + "loss": 0.46361881494522095, + "memory(GiB)": 41.86, + "step": 159, + "token_acc": 0.9271501925545571, + "train_speed(iter/s)": 0.236159 + }, + { + "epoch": 0.0512, + "grad_norm": 0.8527165156486491, + "learning_rate": 1.7057569296375267e-06, + "loss": 0.415084570646286, + "memory(GiB)": 41.86, + "step": 160, + "token_acc": 0.8965417029933159, + "train_speed(iter/s)": 0.236265 + }, + { + "epoch": 0.05152, + "grad_norm": 0.8029156767814807, + "learning_rate": 1.7164179104477613e-06, + "loss": 0.40730902552604675, + "memory(GiB)": 41.86, + "step": 161, + "token_acc": 0.823206843606054, + "train_speed(iter/s)": 0.236365 + }, + { + "epoch": 0.05184, + "grad_norm": 0.7957247192583061, + "learning_rate": 1.7270788912579958e-06, + "loss": 0.39644187688827515, + "memory(GiB)": 41.86, + "step": 162, + "token_acc": 0.8838555858310627, + "train_speed(iter/s)": 0.236433 + }, + { + "epoch": 0.05216, + "grad_norm": 0.7728962468386367, + "learning_rate": 1.7377398720682303e-06, + "loss": 0.5090023279190063, + "memory(GiB)": 41.86, + "step": 163, + "token_acc": 0.8309020541827925, + "train_speed(iter/s)": 0.23639 + }, + { + "epoch": 0.05248, + "grad_norm": 0.7948628680551149, + "learning_rate": 1.7484008528784648e-06, + "loss": 0.5244089961051941, + "memory(GiB)": 41.86, + "step": 164, + "token_acc": 0.8624174115818111, + "train_speed(iter/s)": 0.236335 + }, + { + "epoch": 0.0528, + "grad_norm": 0.8418524666803685, + "learning_rate": 1.7590618336886994e-06, + "loss": 0.3893824517726898, + "memory(GiB)": 41.86, + "step": 165, + "token_acc": 0.8750761730652041, + "train_speed(iter/s)": 0.236416 + }, + { + "epoch": 0.05312, + "grad_norm": 0.7717267221545162, + "learning_rate": 1.7697228144989339e-06, + "loss": 0.403408020734787, + "memory(GiB)": 41.86, + "step": 166, + "token_acc": 0.9134172551427694, + "train_speed(iter/s)": 0.236504 + }, + { + "epoch": 0.05344, + "grad_norm": 0.8209516056332954, + "learning_rate": 1.7803837953091684e-06, + "loss": 0.4181719422340393, + "memory(GiB)": 41.86, + "step": 167, + "token_acc": 0.8729306487695749, + "train_speed(iter/s)": 0.236596 + }, + { + "epoch": 0.05376, + "grad_norm": 0.8094041383421026, + "learning_rate": 1.791044776119403e-06, + "loss": 0.42891108989715576, + "memory(GiB)": 41.86, + "step": 168, + "token_acc": 0.8914919852034525, + "train_speed(iter/s)": 0.236636 + }, + { + "epoch": 0.05408, + "grad_norm": 0.8115990093335068, + "learning_rate": 1.8017057569296375e-06, + "loss": 0.36025285720825195, + "memory(GiB)": 41.86, + "step": 169, + "token_acc": 0.8765086206896552, + "train_speed(iter/s)": 0.236728 + }, + { + "epoch": 0.0544, + "grad_norm": 0.8967581385557998, + "learning_rate": 1.812366737739872e-06, + "loss": 0.44346532225608826, + "memory(GiB)": 41.86, + "step": 170, + "token_acc": 0.8869936034115139, + "train_speed(iter/s)": 0.236808 + }, + { + "epoch": 0.05472, + "grad_norm": 0.8649159338570668, + "learning_rate": 1.8230277185501067e-06, + "loss": 0.48292624950408936, + "memory(GiB)": 41.86, + "step": 171, + "token_acc": 0.7990570273911091, + "train_speed(iter/s)": 0.236894 + }, + { + "epoch": 0.05504, + "grad_norm": 0.9152803000438953, + "learning_rate": 1.8336886993603415e-06, + "loss": 0.43237584829330444, + "memory(GiB)": 41.86, + "step": 172, + "token_acc": 0.9394602479941648, + "train_speed(iter/s)": 0.236973 + }, + { + "epoch": 0.05536, + "grad_norm": 0.7976308165132434, + "learning_rate": 1.844349680170576e-06, + "loss": 0.4843261241912842, + "memory(GiB)": 41.86, + "step": 173, + "token_acc": 0.895743766122098, + "train_speed(iter/s)": 0.237025 + }, + { + "epoch": 0.05568, + "grad_norm": 0.8224446893478293, + "learning_rate": 1.8550106609808105e-06, + "loss": 0.4896657168865204, + "memory(GiB)": 41.86, + "step": 174, + "token_acc": 0.874000761324705, + "train_speed(iter/s)": 0.237032 + }, + { + "epoch": 0.056, + "grad_norm": 1.005431850490073, + "learning_rate": 1.865671641791045e-06, + "loss": 0.41729021072387695, + "memory(GiB)": 41.86, + "step": 175, + "token_acc": 0.8711972522080471, + "train_speed(iter/s)": 0.237037 + }, + { + "epoch": 0.05632, + "grad_norm": 0.8160731233376054, + "learning_rate": 1.8763326226012796e-06, + "loss": 0.5022497177124023, + "memory(GiB)": 41.86, + "step": 176, + "token_acc": 0.8404059040590406, + "train_speed(iter/s)": 0.236994 + }, + { + "epoch": 0.05664, + "grad_norm": 0.8168576229681378, + "learning_rate": 1.886993603411514e-06, + "loss": 0.45563817024230957, + "memory(GiB)": 41.86, + "step": 177, + "token_acc": 0.9467312348668281, + "train_speed(iter/s)": 0.236945 + }, + { + "epoch": 0.05696, + "grad_norm": 0.8708536778376783, + "learning_rate": 1.8976545842217486e-06, + "loss": 0.37782585620880127, + "memory(GiB)": 41.86, + "step": 178, + "token_acc": 0.8462204270051933, + "train_speed(iter/s)": 0.236996 + }, + { + "epoch": 0.05728, + "grad_norm": 0.74860145847988, + "learning_rate": 1.908315565031983e-06, + "loss": 0.46520254015922546, + "memory(GiB)": 41.86, + "step": 179, + "token_acc": 0.857251714503429, + "train_speed(iter/s)": 0.236997 + }, + { + "epoch": 0.0576, + "grad_norm": 0.8474029876167471, + "learning_rate": 1.9189765458422177e-06, + "loss": 0.46798792481422424, + "memory(GiB)": 41.86, + "step": 180, + "token_acc": 0.8447676943117672, + "train_speed(iter/s)": 0.237084 + }, + { + "epoch": 0.05792, + "grad_norm": 0.8084839618494143, + "learning_rate": 1.929637526652452e-06, + "loss": 0.42136165499687195, + "memory(GiB)": 41.86, + "step": 181, + "token_acc": 0.9157795867251096, + "train_speed(iter/s)": 0.237079 + }, + { + "epoch": 0.05824, + "grad_norm": 0.9096306663541507, + "learning_rate": 1.9402985074626867e-06, + "loss": 0.46599751710891724, + "memory(GiB)": 41.86, + "step": 182, + "token_acc": 0.8673218673218673, + "train_speed(iter/s)": 0.237176 + }, + { + "epoch": 0.05856, + "grad_norm": 0.7740773810513154, + "learning_rate": 1.9509594882729213e-06, + "loss": 0.37147605419158936, + "memory(GiB)": 41.86, + "step": 183, + "token_acc": 0.85650953155017, + "train_speed(iter/s)": 0.237177 + }, + { + "epoch": 0.05888, + "grad_norm": 0.7436697200363671, + "learning_rate": 1.9616204690831558e-06, + "loss": 0.40564876794815063, + "memory(GiB)": 41.86, + "step": 184, + "token_acc": 0.8889148191365227, + "train_speed(iter/s)": 0.237177 + }, + { + "epoch": 0.0592, + "grad_norm": 0.8454271607632562, + "learning_rate": 1.9722814498933903e-06, + "loss": 0.47249865531921387, + "memory(GiB)": 41.86, + "step": 185, + "token_acc": 0.796291459911204, + "train_speed(iter/s)": 0.23721 + }, + { + "epoch": 0.05952, + "grad_norm": 0.728473770512436, + "learning_rate": 1.982942430703625e-06, + "loss": 0.44339311122894287, + "memory(GiB)": 41.86, + "step": 186, + "token_acc": 0.8901489882143652, + "train_speed(iter/s)": 0.237203 + }, + { + "epoch": 0.05984, + "grad_norm": 0.7711791269615742, + "learning_rate": 1.9936034115138594e-06, + "loss": 0.4792044758796692, + "memory(GiB)": 41.86, + "step": 187, + "token_acc": 0.8399344441409451, + "train_speed(iter/s)": 0.237229 + }, + { + "epoch": 0.06016, + "grad_norm": 0.8159205179298074, + "learning_rate": 2.004264392324094e-06, + "loss": 0.36532050371170044, + "memory(GiB)": 41.86, + "step": 188, + "token_acc": 0.8588912886969042, + "train_speed(iter/s)": 0.237247 + }, + { + "epoch": 0.06048, + "grad_norm": 0.8263439480704128, + "learning_rate": 2.0149253731343284e-06, + "loss": 0.42820823192596436, + "memory(GiB)": 41.86, + "step": 189, + "token_acc": 0.8633213859020311, + "train_speed(iter/s)": 0.237326 + }, + { + "epoch": 0.0608, + "grad_norm": 0.7313691432753597, + "learning_rate": 2.025586353944563e-06, + "loss": 0.5361946821212769, + "memory(GiB)": 41.86, + "step": 190, + "token_acc": 0.8082428818625138, + "train_speed(iter/s)": 0.237348 + }, + { + "epoch": 0.06112, + "grad_norm": 0.8611816464500565, + "learning_rate": 2.0362473347547975e-06, + "loss": 0.4607084393501282, + "memory(GiB)": 41.86, + "step": 191, + "token_acc": 0.8523531221162719, + "train_speed(iter/s)": 0.237402 + }, + { + "epoch": 0.06144, + "grad_norm": 0.8401166663047531, + "learning_rate": 2.046908315565032e-06, + "loss": 0.3820692002773285, + "memory(GiB)": 41.86, + "step": 192, + "token_acc": 0.8834688346883469, + "train_speed(iter/s)": 0.23748 + }, + { + "epoch": 0.06176, + "grad_norm": 1.713538069670579, + "learning_rate": 2.0575692963752665e-06, + "loss": 0.4359162151813507, + "memory(GiB)": 41.86, + "step": 193, + "token_acc": 0.9255247122545701, + "train_speed(iter/s)": 0.237525 + }, + { + "epoch": 0.06208, + "grad_norm": 0.8544181943536923, + "learning_rate": 2.068230277185501e-06, + "loss": 0.4442211389541626, + "memory(GiB)": 41.86, + "step": 194, + "token_acc": 0.8725108720531014, + "train_speed(iter/s)": 0.23749 + }, + { + "epoch": 0.0624, + "grad_norm": 0.7721683579076613, + "learning_rate": 2.0788912579957356e-06, + "loss": 0.42826682329177856, + "memory(GiB)": 41.86, + "step": 195, + "token_acc": 0.8228659885006634, + "train_speed(iter/s)": 0.237506 + }, + { + "epoch": 0.06272, + "grad_norm": 0.7514343975504116, + "learning_rate": 2.08955223880597e-06, + "loss": 0.4385693073272705, + "memory(GiB)": 41.86, + "step": 196, + "token_acc": 0.8841320553780617, + "train_speed(iter/s)": 0.237498 + }, + { + "epoch": 0.06304, + "grad_norm": 0.7708215089748411, + "learning_rate": 2.1002132196162046e-06, + "loss": 0.48154687881469727, + "memory(GiB)": 41.86, + "step": 197, + "token_acc": 0.8822409573021485, + "train_speed(iter/s)": 0.237526 + }, + { + "epoch": 0.06336, + "grad_norm": 0.8708607372356321, + "learning_rate": 2.110874200426439e-06, + "loss": 0.4659211039543152, + "memory(GiB)": 41.86, + "step": 198, + "token_acc": 0.9185158239359767, + "train_speed(iter/s)": 0.237514 + }, + { + "epoch": 0.06368, + "grad_norm": 0.9010347669667264, + "learning_rate": 2.1215351812366737e-06, + "loss": 0.45319920778274536, + "memory(GiB)": 41.86, + "step": 199, + "token_acc": 0.9077196095829636, + "train_speed(iter/s)": 0.2376 + }, + { + "epoch": 0.064, + "grad_norm": 0.8446836034567332, + "learning_rate": 2.132196162046908e-06, + "loss": 0.5213237404823303, + "memory(GiB)": 41.86, + "step": 200, + "token_acc": 0.8647272727272727, + "train_speed(iter/s)": 0.237634 + }, + { + "epoch": 0.06432, + "grad_norm": 0.8556092777267248, + "learning_rate": 2.1428571428571427e-06, + "loss": 0.500628650188446, + "memory(GiB)": 41.86, + "step": 201, + "token_acc": 0.8512843623253717, + "train_speed(iter/s)": 0.237683 + }, + { + "epoch": 0.06464, + "grad_norm": 0.7476503939630328, + "learning_rate": 2.1535181236673773e-06, + "loss": 0.48504340648651123, + "memory(GiB)": 41.86, + "step": 202, + "token_acc": 0.8763596809282088, + "train_speed(iter/s)": 0.23756 + }, + { + "epoch": 0.06496, + "grad_norm": 0.860361597093958, + "learning_rate": 2.1641791044776118e-06, + "loss": 0.5184949040412903, + "memory(GiB)": 41.86, + "step": 203, + "token_acc": 0.8831455169034786, + "train_speed(iter/s)": 0.237573 + }, + { + "epoch": 0.06528, + "grad_norm": 0.8139398459770928, + "learning_rate": 2.1748400852878467e-06, + "loss": 0.46868783235549927, + "memory(GiB)": 41.86, + "step": 204, + "token_acc": 0.8680926916221033, + "train_speed(iter/s)": 0.237637 + }, + { + "epoch": 0.0656, + "grad_norm": 0.88703156287696, + "learning_rate": 2.1855010660980813e-06, + "loss": 0.4294508099555969, + "memory(GiB)": 41.86, + "step": 205, + "token_acc": 0.8958938199917047, + "train_speed(iter/s)": 0.237702 + }, + { + "epoch": 0.06592, + "grad_norm": 0.7902589197351996, + "learning_rate": 2.1961620469083158e-06, + "loss": 0.5315119028091431, + "memory(GiB)": 41.86, + "step": 206, + "token_acc": 0.869279176201373, + "train_speed(iter/s)": 0.237647 + }, + { + "epoch": 0.06624, + "grad_norm": 0.8135152803533094, + "learning_rate": 2.2068230277185503e-06, + "loss": 0.4581015408039093, + "memory(GiB)": 41.86, + "step": 207, + "token_acc": 0.8437705998681608, + "train_speed(iter/s)": 0.237689 + }, + { + "epoch": 0.06656, + "grad_norm": 0.7156199542120756, + "learning_rate": 2.217484008528785e-06, + "loss": 0.5436166524887085, + "memory(GiB)": 41.86, + "step": 208, + "token_acc": 0.7692307692307693, + "train_speed(iter/s)": 0.23763 + }, + { + "epoch": 0.06688, + "grad_norm": 0.8122947613873571, + "learning_rate": 2.2281449893390194e-06, + "loss": 0.46804407238960266, + "memory(GiB)": 41.86, + "step": 209, + "token_acc": 0.8608932833276509, + "train_speed(iter/s)": 0.237684 + }, + { + "epoch": 0.0672, + "grad_norm": 0.8569833540185648, + "learning_rate": 2.238805970149254e-06, + "loss": 0.38038086891174316, + "memory(GiB)": 41.86, + "step": 210, + "token_acc": 0.846065808297568, + "train_speed(iter/s)": 0.237726 + }, + { + "epoch": 0.06752, + "grad_norm": 0.8959778758888763, + "learning_rate": 2.2494669509594884e-06, + "loss": 0.4419552683830261, + "memory(GiB)": 41.86, + "step": 211, + "token_acc": 0.8525963149078727, + "train_speed(iter/s)": 0.237734 + }, + { + "epoch": 0.06784, + "grad_norm": 0.8072309737386063, + "learning_rate": 2.260127931769723e-06, + "loss": 0.43088221549987793, + "memory(GiB)": 41.86, + "step": 212, + "token_acc": 0.8725602755453502, + "train_speed(iter/s)": 0.237793 + }, + { + "epoch": 0.06816, + "grad_norm": 1.106274213983276, + "learning_rate": 2.2707889125799575e-06, + "loss": 0.41723954677581787, + "memory(GiB)": 41.86, + "step": 213, + "token_acc": 0.9232012934518997, + "train_speed(iter/s)": 0.23785 + }, + { + "epoch": 0.06848, + "grad_norm": 0.8140839187184906, + "learning_rate": 2.281449893390192e-06, + "loss": 0.38298842310905457, + "memory(GiB)": 41.86, + "step": 214, + "token_acc": 0.9247661651077674, + "train_speed(iter/s)": 0.23788 + }, + { + "epoch": 0.0688, + "grad_norm": 0.8204315306319542, + "learning_rate": 2.2921108742004265e-06, + "loss": 0.40582361817359924, + "memory(GiB)": 41.86, + "step": 215, + "token_acc": 0.914054054054054, + "train_speed(iter/s)": 0.23793 + }, + { + "epoch": 0.06912, + "grad_norm": 0.8009870042095883, + "learning_rate": 2.302771855010661e-06, + "loss": 0.3909275531768799, + "memory(GiB)": 41.86, + "step": 216, + "token_acc": 0.8840949706407966, + "train_speed(iter/s)": 0.237978 + }, + { + "epoch": 0.06944, + "grad_norm": 0.8830878128448973, + "learning_rate": 2.3134328358208956e-06, + "loss": 0.3932962417602539, + "memory(GiB)": 41.86, + "step": 217, + "token_acc": 0.9292631578947368, + "train_speed(iter/s)": 0.238048 + }, + { + "epoch": 0.06976, + "grad_norm": 0.9151928900378004, + "learning_rate": 2.32409381663113e-06, + "loss": 0.34649908542633057, + "memory(GiB)": 41.86, + "step": 218, + "token_acc": 0.8782629330802089, + "train_speed(iter/s)": 0.238077 + }, + { + "epoch": 0.07008, + "grad_norm": 0.8554988040501391, + "learning_rate": 2.3347547974413646e-06, + "loss": 0.450014591217041, + "memory(GiB)": 41.86, + "step": 219, + "token_acc": 0.846788990825688, + "train_speed(iter/s)": 0.23807 + }, + { + "epoch": 0.0704, + "grad_norm": 0.7663903826148445, + "learning_rate": 2.345415778251599e-06, + "loss": 0.44531285762786865, + "memory(GiB)": 41.86, + "step": 220, + "token_acc": 0.8728813559322034, + "train_speed(iter/s)": 0.238125 + }, + { + "epoch": 0.07072, + "grad_norm": 0.8013588415788543, + "learning_rate": 2.3560767590618337e-06, + "loss": 0.41598182916641235, + "memory(GiB)": 41.86, + "step": 221, + "token_acc": 0.8204656862745098, + "train_speed(iter/s)": 0.238141 + }, + { + "epoch": 0.07104, + "grad_norm": 0.8770486851906605, + "learning_rate": 2.366737739872068e-06, + "loss": 0.5007616281509399, + "memory(GiB)": 41.86, + "step": 222, + "token_acc": 0.8428745432399513, + "train_speed(iter/s)": 0.238179 + }, + { + "epoch": 0.07136, + "grad_norm": 1.0031669771259017, + "learning_rate": 2.3773987206823027e-06, + "loss": 0.42271238565444946, + "memory(GiB)": 41.86, + "step": 223, + "token_acc": 0.876834148510449, + "train_speed(iter/s)": 0.238157 + }, + { + "epoch": 0.07168, + "grad_norm": 0.7718766266882297, + "learning_rate": 2.3880597014925373e-06, + "loss": 0.43677568435668945, + "memory(GiB)": 41.86, + "step": 224, + "token_acc": 0.8706467661691543, + "train_speed(iter/s)": 0.238138 + }, + { + "epoch": 0.072, + "grad_norm": 0.7949712705696272, + "learning_rate": 2.398720682302772e-06, + "loss": 0.39655208587646484, + "memory(GiB)": 41.86, + "step": 225, + "token_acc": 0.9146688338073954, + "train_speed(iter/s)": 0.238137 + }, + { + "epoch": 0.07232, + "grad_norm": 0.7605409106017403, + "learning_rate": 2.4093816631130067e-06, + "loss": 0.3562984764575958, + "memory(GiB)": 41.86, + "step": 226, + "token_acc": 0.9402585822559073, + "train_speed(iter/s)": 0.23816 + }, + { + "epoch": 0.07264, + "grad_norm": 0.7416255215492054, + "learning_rate": 2.4200426439232413e-06, + "loss": 0.46417832374572754, + "memory(GiB)": 41.86, + "step": 227, + "token_acc": 0.8871699669966997, + "train_speed(iter/s)": 0.238177 + }, + { + "epoch": 0.07296, + "grad_norm": 0.8085451711583856, + "learning_rate": 2.4307036247334758e-06, + "loss": 0.4530346989631653, + "memory(GiB)": 41.86, + "step": 228, + "token_acc": 0.8, + "train_speed(iter/s)": 0.238224 + }, + { + "epoch": 0.07328, + "grad_norm": 0.7644753812388857, + "learning_rate": 2.4413646055437103e-06, + "loss": 0.4983330965042114, + "memory(GiB)": 41.86, + "step": 229, + "token_acc": 0.8782894736842105, + "train_speed(iter/s)": 0.238262 + }, + { + "epoch": 0.0736, + "grad_norm": 0.8144786052649817, + "learning_rate": 2.452025586353945e-06, + "loss": 0.4731840491294861, + "memory(GiB)": 41.86, + "step": 230, + "token_acc": 0.8175675675675675, + "train_speed(iter/s)": 0.238308 + }, + { + "epoch": 0.07392, + "grad_norm": 0.8016669555546865, + "learning_rate": 2.4626865671641794e-06, + "loss": 0.49591851234436035, + "memory(GiB)": 41.86, + "step": 231, + "token_acc": 0.9157088122605364, + "train_speed(iter/s)": 0.238248 + }, + { + "epoch": 0.07424, + "grad_norm": 0.7985778385901379, + "learning_rate": 2.473347547974414e-06, + "loss": 0.4217742085456848, + "memory(GiB)": 41.86, + "step": 232, + "token_acc": 0.8717330116606353, + "train_speed(iter/s)": 0.238259 + }, + { + "epoch": 0.07456, + "grad_norm": 0.7270346053076612, + "learning_rate": 2.4840085287846484e-06, + "loss": 0.4165884852409363, + "memory(GiB)": 41.86, + "step": 233, + "token_acc": 0.8313452617627568, + "train_speed(iter/s)": 0.23821 + }, + { + "epoch": 0.07488, + "grad_norm": 0.8570636077103895, + "learning_rate": 2.494669509594883e-06, + "loss": 0.45557162165641785, + "memory(GiB)": 41.86, + "step": 234, + "token_acc": 0.9165097300690521, + "train_speed(iter/s)": 0.238238 + }, + { + "epoch": 0.0752, + "grad_norm": 0.7926291297808056, + "learning_rate": 2.5053304904051175e-06, + "loss": 0.34986788034439087, + "memory(GiB)": 41.86, + "step": 235, + "token_acc": 0.8230411686586986, + "train_speed(iter/s)": 0.238255 + }, + { + "epoch": 0.07552, + "grad_norm": 0.7712623900956994, + "learning_rate": 2.515991471215352e-06, + "loss": 0.4790201783180237, + "memory(GiB)": 41.86, + "step": 236, + "token_acc": 0.8261179828734538, + "train_speed(iter/s)": 0.23825 + }, + { + "epoch": 0.07584, + "grad_norm": 0.8814418416285378, + "learning_rate": 2.5266524520255865e-06, + "loss": 0.48645997047424316, + "memory(GiB)": 41.86, + "step": 237, + "token_acc": 0.8608964451313755, + "train_speed(iter/s)": 0.238314 + }, + { + "epoch": 0.07616, + "grad_norm": 0.7987760798074977, + "learning_rate": 2.537313432835821e-06, + "loss": 0.3226853311061859, + "memory(GiB)": 41.86, + "step": 238, + "token_acc": 0.9142185663924794, + "train_speed(iter/s)": 0.238385 + }, + { + "epoch": 0.07648, + "grad_norm": 0.8239518374370381, + "learning_rate": 2.5479744136460556e-06, + "loss": 0.459033340215683, + "memory(GiB)": 41.86, + "step": 239, + "token_acc": 0.8367633302151544, + "train_speed(iter/s)": 0.238447 + }, + { + "epoch": 0.0768, + "grad_norm": 0.8541241250905263, + "learning_rate": 2.55863539445629e-06, + "loss": 0.45003989338874817, + "memory(GiB)": 41.86, + "step": 240, + "token_acc": 0.9227019498607242, + "train_speed(iter/s)": 0.238516 + }, + { + "epoch": 0.07712, + "grad_norm": 0.8784925713077107, + "learning_rate": 2.5692963752665246e-06, + "loss": 0.42039692401885986, + "memory(GiB)": 41.86, + "step": 241, + "token_acc": 0.8805970149253731, + "train_speed(iter/s)": 0.238545 + }, + { + "epoch": 0.07744, + "grad_norm": 0.8112724825843277, + "learning_rate": 2.579957356076759e-06, + "loss": 0.3838690221309662, + "memory(GiB)": 41.86, + "step": 242, + "token_acc": 0.8423857347817175, + "train_speed(iter/s)": 0.238583 + }, + { + "epoch": 0.07776, + "grad_norm": 0.7910654621550316, + "learning_rate": 2.5906183368869937e-06, + "loss": 0.32918781042099, + "memory(GiB)": 41.86, + "step": 243, + "token_acc": 0.9362211001860218, + "train_speed(iter/s)": 0.238652 + }, + { + "epoch": 0.07808, + "grad_norm": 0.8106438237016057, + "learning_rate": 2.601279317697228e-06, + "loss": 0.3768569231033325, + "memory(GiB)": 41.86, + "step": 244, + "token_acc": 0.8347185941581572, + "train_speed(iter/s)": 0.238624 + }, + { + "epoch": 0.0784, + "grad_norm": 0.9036025594264395, + "learning_rate": 2.6119402985074627e-06, + "loss": 0.40150022506713867, + "memory(GiB)": 41.86, + "step": 245, + "token_acc": 0.9010615711252654, + "train_speed(iter/s)": 0.238695 + }, + { + "epoch": 0.07872, + "grad_norm": 0.8041111832627525, + "learning_rate": 2.6226012793176977e-06, + "loss": 0.3304884433746338, + "memory(GiB)": 41.86, + "step": 246, + "token_acc": 0.9079930994824612, + "train_speed(iter/s)": 0.238709 + }, + { + "epoch": 0.07904, + "grad_norm": 0.7844480070895389, + "learning_rate": 2.6332622601279318e-06, + "loss": 0.4810687303543091, + "memory(GiB)": 41.86, + "step": 247, + "token_acc": 0.8745964738018376, + "train_speed(iter/s)": 0.238692 + }, + { + "epoch": 0.07936, + "grad_norm": 1.1146758793159208, + "learning_rate": 2.6439232409381667e-06, + "loss": 0.38047271966934204, + "memory(GiB)": 41.86, + "step": 248, + "token_acc": 0.8577777777777778, + "train_speed(iter/s)": 0.238728 + }, + { + "epoch": 0.07968, + "grad_norm": 0.7924035790049286, + "learning_rate": 2.654584221748401e-06, + "loss": 0.39099615812301636, + "memory(GiB)": 41.86, + "step": 249, + "token_acc": 0.9238799478033928, + "train_speed(iter/s)": 0.238749 + }, + { + "epoch": 0.08, + "grad_norm": 0.7404236183123053, + "learning_rate": 2.6652452025586358e-06, + "loss": 0.39462414383888245, + "memory(GiB)": 41.86, + "step": 250, + "token_acc": 0.8765837634913186, + "train_speed(iter/s)": 0.238762 + }, + { + "epoch": 0.08032, + "grad_norm": 0.7836625996597706, + "learning_rate": 2.6759061833688703e-06, + "loss": 0.5299619436264038, + "memory(GiB)": 41.86, + "step": 251, + "token_acc": 0.8648777012586084, + "train_speed(iter/s)": 0.238759 + }, + { + "epoch": 0.08064, + "grad_norm": 0.7871230967478383, + "learning_rate": 2.686567164179105e-06, + "loss": 0.38073286414146423, + "memory(GiB)": 41.86, + "step": 252, + "token_acc": 0.9192933841357811, + "train_speed(iter/s)": 0.23878 + }, + { + "epoch": 0.08096, + "grad_norm": 0.8981297354469057, + "learning_rate": 2.6972281449893394e-06, + "loss": 0.46103090047836304, + "memory(GiB)": 41.86, + "step": 253, + "token_acc": 0.8792016806722689, + "train_speed(iter/s)": 0.238823 + }, + { + "epoch": 0.08128, + "grad_norm": 0.8523386942144723, + "learning_rate": 2.707889125799574e-06, + "loss": 0.5090248584747314, + "memory(GiB)": 41.86, + "step": 254, + "token_acc": 0.914975845410628, + "train_speed(iter/s)": 0.238802 + }, + { + "epoch": 0.0816, + "grad_norm": 0.8363012543495664, + "learning_rate": 2.7185501066098084e-06, + "loss": 0.406773179769516, + "memory(GiB)": 41.86, + "step": 255, + "token_acc": 0.8165983606557377, + "train_speed(iter/s)": 0.238858 + }, + { + "epoch": 0.08192, + "grad_norm": 0.7839143445212261, + "learning_rate": 2.729211087420043e-06, + "loss": 0.48697754740715027, + "memory(GiB)": 41.86, + "step": 256, + "token_acc": 0.8552311435523114, + "train_speed(iter/s)": 0.23888 + }, + { + "epoch": 0.08224, + "grad_norm": 0.7816507311363058, + "learning_rate": 2.7398720682302775e-06, + "loss": 0.47162342071533203, + "memory(GiB)": 41.86, + "step": 257, + "token_acc": 0.8781925343811395, + "train_speed(iter/s)": 0.238873 + }, + { + "epoch": 0.08256, + "grad_norm": 0.7664080574153175, + "learning_rate": 2.750533049040512e-06, + "loss": 0.4288128912448883, + "memory(GiB)": 41.86, + "step": 258, + "token_acc": 0.8552231237322515, + "train_speed(iter/s)": 0.238853 + }, + { + "epoch": 0.08288, + "grad_norm": 0.821735665785851, + "learning_rate": 2.7611940298507465e-06, + "loss": 0.4514979422092438, + "memory(GiB)": 41.86, + "step": 259, + "token_acc": 0.9143426294820717, + "train_speed(iter/s)": 0.23885 + }, + { + "epoch": 0.0832, + "grad_norm": 0.814315596977121, + "learning_rate": 2.771855010660981e-06, + "loss": 0.4167838394641876, + "memory(GiB)": 41.86, + "step": 260, + "token_acc": 0.8996787744007907, + "train_speed(iter/s)": 0.238903 + }, + { + "epoch": 0.08352, + "grad_norm": 0.7364174627910478, + "learning_rate": 2.7825159914712156e-06, + "loss": 0.5202943682670593, + "memory(GiB)": 41.86, + "step": 261, + "token_acc": 0.8606952550114184, + "train_speed(iter/s)": 0.238885 + }, + { + "epoch": 0.08384, + "grad_norm": 0.8333322310525604, + "learning_rate": 2.79317697228145e-06, + "loss": 0.4717528223991394, + "memory(GiB)": 41.86, + "step": 262, + "token_acc": 0.9054347826086957, + "train_speed(iter/s)": 0.238921 + }, + { + "epoch": 0.08416, + "grad_norm": 0.869594181534333, + "learning_rate": 2.8038379530916846e-06, + "loss": 0.40337133407592773, + "memory(GiB)": 41.86, + "step": 263, + "token_acc": 0.9378277153558052, + "train_speed(iter/s)": 0.238946 + }, + { + "epoch": 0.08448, + "grad_norm": 0.7802824694096941, + "learning_rate": 2.814498933901919e-06, + "loss": 0.4327160716056824, + "memory(GiB)": 41.86, + "step": 264, + "token_acc": 0.8579910935180604, + "train_speed(iter/s)": 0.238839 + }, + { + "epoch": 0.0848, + "grad_norm": 0.8356780652956406, + "learning_rate": 2.825159914712154e-06, + "loss": 0.4424020051956177, + "memory(GiB)": 41.86, + "step": 265, + "token_acc": 0.8804623625599097, + "train_speed(iter/s)": 0.238852 + }, + { + "epoch": 0.08512, + "grad_norm": 0.733814281564822, + "learning_rate": 2.835820895522388e-06, + "loss": 0.48017603158950806, + "memory(GiB)": 41.86, + "step": 266, + "token_acc": 0.8956921587608906, + "train_speed(iter/s)": 0.238837 + }, + { + "epoch": 0.08544, + "grad_norm": 0.8460489851711144, + "learning_rate": 2.846481876332623e-06, + "loss": 0.37948840856552124, + "memory(GiB)": 41.86, + "step": 267, + "token_acc": 0.8501709077098367, + "train_speed(iter/s)": 0.238848 + }, + { + "epoch": 0.08576, + "grad_norm": 0.78032691431983, + "learning_rate": 2.8571428571428573e-06, + "loss": 0.47093185782432556, + "memory(GiB)": 41.86, + "step": 268, + "token_acc": 0.8442019099590723, + "train_speed(iter/s)": 0.238878 + }, + { + "epoch": 0.08608, + "grad_norm": 0.8694695107085626, + "learning_rate": 2.867803837953092e-06, + "loss": 0.40558913350105286, + "memory(GiB)": 41.86, + "step": 269, + "token_acc": 0.8708071278825996, + "train_speed(iter/s)": 0.238887 + }, + { + "epoch": 0.0864, + "grad_norm": 0.762062404680544, + "learning_rate": 2.8784648187633263e-06, + "loss": 0.4456389844417572, + "memory(GiB)": 41.86, + "step": 270, + "token_acc": 0.9062415563361254, + "train_speed(iter/s)": 0.238835 + }, + { + "epoch": 0.08672, + "grad_norm": 0.8175720175207222, + "learning_rate": 2.8891257995735613e-06, + "loss": 0.39426881074905396, + "memory(GiB)": 41.86, + "step": 271, + "token_acc": 0.9198617221873036, + "train_speed(iter/s)": 0.238901 + }, + { + "epoch": 0.08704, + "grad_norm": 0.751949509555342, + "learning_rate": 2.8997867803837954e-06, + "loss": 0.43765342235565186, + "memory(GiB)": 41.86, + "step": 272, + "token_acc": 0.8897126969416126, + "train_speed(iter/s)": 0.238911 + }, + { + "epoch": 0.08736, + "grad_norm": 0.815707721305127, + "learning_rate": 2.9104477611940303e-06, + "loss": 0.38230466842651367, + "memory(GiB)": 41.86, + "step": 273, + "token_acc": 0.9289617486338798, + "train_speed(iter/s)": 0.238872 + }, + { + "epoch": 0.08768, + "grad_norm": 0.7935828765845511, + "learning_rate": 2.9211087420042644e-06, + "loss": 0.4605436325073242, + "memory(GiB)": 41.86, + "step": 274, + "token_acc": 0.8269720101781171, + "train_speed(iter/s)": 0.238898 + }, + { + "epoch": 0.088, + "grad_norm": 0.7828391626259436, + "learning_rate": 2.9317697228144994e-06, + "loss": 0.4469219148159027, + "memory(GiB)": 41.86, + "step": 275, + "token_acc": 0.8416918429003021, + "train_speed(iter/s)": 0.238904 + }, + { + "epoch": 0.08832, + "grad_norm": 0.8971801566895942, + "learning_rate": 2.9424307036247335e-06, + "loss": 0.47280406951904297, + "memory(GiB)": 41.86, + "step": 276, + "token_acc": 0.8455019556714471, + "train_speed(iter/s)": 0.2389 + }, + { + "epoch": 0.08864, + "grad_norm": 0.8857551802405319, + "learning_rate": 2.9530916844349684e-06, + "loss": 0.3692026734352112, + "memory(GiB)": 41.86, + "step": 277, + "token_acc": 0.9241913746630728, + "train_speed(iter/s)": 0.238958 + }, + { + "epoch": 0.08896, + "grad_norm": 0.7906716943771644, + "learning_rate": 2.9637526652452025e-06, + "loss": 0.4652382731437683, + "memory(GiB)": 41.86, + "step": 278, + "token_acc": 0.8336431226765799, + "train_speed(iter/s)": 0.238911 + }, + { + "epoch": 0.08928, + "grad_norm": 0.7734502634828265, + "learning_rate": 2.9744136460554375e-06, + "loss": 0.4486645758152008, + "memory(GiB)": 41.86, + "step": 279, + "token_acc": 0.8685785536159601, + "train_speed(iter/s)": 0.238897 + }, + { + "epoch": 0.0896, + "grad_norm": 0.7686137576599014, + "learning_rate": 2.9850746268656716e-06, + "loss": 0.41775035858154297, + "memory(GiB)": 41.86, + "step": 280, + "token_acc": 0.8477222630418809, + "train_speed(iter/s)": 0.238937 + }, + { + "epoch": 0.08992, + "grad_norm": 0.7445375842872007, + "learning_rate": 2.9957356076759065e-06, + "loss": 0.4182976186275482, + "memory(GiB)": 41.86, + "step": 281, + "token_acc": 0.8573144367042997, + "train_speed(iter/s)": 0.238951 + }, + { + "epoch": 0.09024, + "grad_norm": 0.7555545104283435, + "learning_rate": 3.006396588486141e-06, + "loss": 0.4727635085582733, + "memory(GiB)": 41.86, + "step": 282, + "token_acc": 0.9028764204545454, + "train_speed(iter/s)": 0.238947 + }, + { + "epoch": 0.09056, + "grad_norm": 0.754969832530047, + "learning_rate": 3.0170575692963756e-06, + "loss": 0.44284188747406006, + "memory(GiB)": 41.86, + "step": 283, + "token_acc": 0.9028256374913852, + "train_speed(iter/s)": 0.23895 + }, + { + "epoch": 0.09088, + "grad_norm": 0.764420772270067, + "learning_rate": 3.0277185501066105e-06, + "loss": 0.385779470205307, + "memory(GiB)": 41.86, + "step": 284, + "token_acc": 0.9163674762407603, + "train_speed(iter/s)": 0.238924 + }, + { + "epoch": 0.0912, + "grad_norm": 0.7628713620927181, + "learning_rate": 3.0383795309168446e-06, + "loss": 0.4512256979942322, + "memory(GiB)": 41.86, + "step": 285, + "token_acc": 0.8524711089254979, + "train_speed(iter/s)": 0.238932 + }, + { + "epoch": 0.09152, + "grad_norm": 0.8001255688626848, + "learning_rate": 3.0490405117270796e-06, + "loss": 0.44787895679473877, + "memory(GiB)": 41.86, + "step": 286, + "token_acc": 0.8956479923058428, + "train_speed(iter/s)": 0.238967 + }, + { + "epoch": 0.09184, + "grad_norm": 0.7297465689017628, + "learning_rate": 3.0597014925373137e-06, + "loss": 0.46175825595855713, + "memory(GiB)": 41.86, + "step": 287, + "token_acc": 0.8029499502817369, + "train_speed(iter/s)": 0.238952 + }, + { + "epoch": 0.09216, + "grad_norm": 0.7623848894911398, + "learning_rate": 3.0703624733475486e-06, + "loss": 0.4042823314666748, + "memory(GiB)": 41.86, + "step": 288, + "token_acc": 0.877628159697614, + "train_speed(iter/s)": 0.239 + }, + { + "epoch": 0.09248, + "grad_norm": 0.7481121915415929, + "learning_rate": 3.0810234541577827e-06, + "loss": 0.48246750235557556, + "memory(GiB)": 41.86, + "step": 289, + "token_acc": 0.885910990902385, + "train_speed(iter/s)": 0.239006 + }, + { + "epoch": 0.0928, + "grad_norm": 0.7198908860435124, + "learning_rate": 3.0916844349680177e-06, + "loss": 0.41685357689857483, + "memory(GiB)": 41.86, + "step": 290, + "token_acc": 0.8800938141307535, + "train_speed(iter/s)": 0.238989 + }, + { + "epoch": 0.09312, + "grad_norm": 0.8075725912135797, + "learning_rate": 3.1023454157782518e-06, + "loss": 0.37431731820106506, + "memory(GiB)": 41.86, + "step": 291, + "token_acc": 0.9073020388809863, + "train_speed(iter/s)": 0.23901 + }, + { + "epoch": 0.09344, + "grad_norm": 0.7424290897950819, + "learning_rate": 3.1130063965884867e-06, + "loss": 0.39317965507507324, + "memory(GiB)": 41.86, + "step": 292, + "token_acc": 0.9119260756569448, + "train_speed(iter/s)": 0.239039 + }, + { + "epoch": 0.09376, + "grad_norm": 0.797518054464604, + "learning_rate": 3.123667377398721e-06, + "loss": 0.4403042793273926, + "memory(GiB)": 41.86, + "step": 293, + "token_acc": 0.902165932452276, + "train_speed(iter/s)": 0.239044 + }, + { + "epoch": 0.09408, + "grad_norm": 0.7940153929766716, + "learning_rate": 3.1343283582089558e-06, + "loss": 0.40593621134757996, + "memory(GiB)": 41.86, + "step": 294, + "token_acc": 0.8728943338437979, + "train_speed(iter/s)": 0.239062 + }, + { + "epoch": 0.0944, + "grad_norm": 0.7082409407505589, + "learning_rate": 3.14498933901919e-06, + "loss": 0.4122522473335266, + "memory(GiB)": 41.86, + "step": 295, + "token_acc": 0.9226006191950464, + "train_speed(iter/s)": 0.239063 + }, + { + "epoch": 0.09472, + "grad_norm": 0.7538691559681522, + "learning_rate": 3.155650319829425e-06, + "loss": 0.4231248199939728, + "memory(GiB)": 41.86, + "step": 296, + "token_acc": 0.8822470291681671, + "train_speed(iter/s)": 0.23903 + }, + { + "epoch": 0.09504, + "grad_norm": 0.7768552976034606, + "learning_rate": 3.166311300639659e-06, + "loss": 0.5054468512535095, + "memory(GiB)": 41.86, + "step": 297, + "token_acc": 0.8565075600355766, + "train_speed(iter/s)": 0.239064 + }, + { + "epoch": 0.09536, + "grad_norm": 0.7834102706560788, + "learning_rate": 3.176972281449894e-06, + "loss": 0.43437373638153076, + "memory(GiB)": 41.86, + "step": 298, + "token_acc": 0.8475440222428174, + "train_speed(iter/s)": 0.239088 + }, + { + "epoch": 0.09568, + "grad_norm": 0.8017123713142555, + "learning_rate": 3.187633262260128e-06, + "loss": 0.38961392641067505, + "memory(GiB)": 41.86, + "step": 299, + "token_acc": 0.9274255156608098, + "train_speed(iter/s)": 0.239014 + }, + { + "epoch": 0.096, + "grad_norm": 0.7999226350266968, + "learning_rate": 3.198294243070363e-06, + "loss": 0.3929305672645569, + "memory(GiB)": 41.86, + "step": 300, + "token_acc": 0.9069212410501193, + "train_speed(iter/s)": 0.23907 + }, + { + "epoch": 0.09632, + "grad_norm": 0.757149206713092, + "learning_rate": 3.208955223880597e-06, + "loss": 0.5028648972511292, + "memory(GiB)": 41.86, + "step": 301, + "token_acc": 0.8434428194677536, + "train_speed(iter/s)": 0.239101 + }, + { + "epoch": 0.09664, + "grad_norm": 0.7950750526744113, + "learning_rate": 3.219616204690832e-06, + "loss": 0.3768015205860138, + "memory(GiB)": 41.86, + "step": 302, + "token_acc": 0.9394471259324265, + "train_speed(iter/s)": 0.239098 + }, + { + "epoch": 0.09696, + "grad_norm": 0.8747463381323577, + "learning_rate": 3.230277185501066e-06, + "loss": 0.3932783603668213, + "memory(GiB)": 41.86, + "step": 303, + "token_acc": 0.8559708295350957, + "train_speed(iter/s)": 0.2391 + }, + { + "epoch": 0.09728, + "grad_norm": 0.7647462426549961, + "learning_rate": 3.240938166311301e-06, + "loss": 0.43029674887657166, + "memory(GiB)": 41.86, + "step": 304, + "token_acc": 0.8871398078975453, + "train_speed(iter/s)": 0.239114 + }, + { + "epoch": 0.0976, + "grad_norm": 0.8273688830860688, + "learning_rate": 3.251599147121535e-06, + "loss": 0.42678073048591614, + "memory(GiB)": 41.86, + "step": 305, + "token_acc": 0.9146341463414634, + "train_speed(iter/s)": 0.239091 + }, + { + "epoch": 0.09792, + "grad_norm": 0.7861801704390484, + "learning_rate": 3.26226012793177e-06, + "loss": 0.519094705581665, + "memory(GiB)": 41.86, + "step": 306, + "token_acc": 0.8009603841536614, + "train_speed(iter/s)": 0.239116 + }, + { + "epoch": 0.09824, + "grad_norm": 0.756081520165375, + "learning_rate": 3.272921108742004e-06, + "loss": 0.38784074783325195, + "memory(GiB)": 41.86, + "step": 307, + "token_acc": 0.8992583436341162, + "train_speed(iter/s)": 0.239134 + }, + { + "epoch": 0.09856, + "grad_norm": 0.845218868331866, + "learning_rate": 3.283582089552239e-06, + "loss": 0.4429062008857727, + "memory(GiB)": 41.86, + "step": 308, + "token_acc": 0.8673100120627262, + "train_speed(iter/s)": 0.239141 + }, + { + "epoch": 0.09888, + "grad_norm": 0.7785448962897669, + "learning_rate": 3.2942430703624733e-06, + "loss": 0.45241111516952515, + "memory(GiB)": 41.86, + "step": 309, + "token_acc": 0.8548887010425472, + "train_speed(iter/s)": 0.239186 + }, + { + "epoch": 0.0992, + "grad_norm": 0.7650730096151205, + "learning_rate": 3.304904051172708e-06, + "loss": 0.43362128734588623, + "memory(GiB)": 41.86, + "step": 310, + "token_acc": 0.8212187958883994, + "train_speed(iter/s)": 0.239198 + }, + { + "epoch": 0.09952, + "grad_norm": 0.7727126548383325, + "learning_rate": 3.3155650319829423e-06, + "loss": 0.3836996555328369, + "memory(GiB)": 41.86, + "step": 311, + "token_acc": 0.8742546020222971, + "train_speed(iter/s)": 0.239232 + }, + { + "epoch": 0.09984, + "grad_norm": 0.8188981205824096, + "learning_rate": 3.3262260127931773e-06, + "loss": 0.4058418273925781, + "memory(GiB)": 41.86, + "step": 312, + "token_acc": 0.8456768859800207, + "train_speed(iter/s)": 0.239291 + }, + { + "epoch": 0.10016, + "grad_norm": 0.8199139245566323, + "learning_rate": 3.336886993603412e-06, + "loss": 0.42839229106903076, + "memory(GiB)": 41.86, + "step": 313, + "token_acc": 0.938368580060423, + "train_speed(iter/s)": 0.239342 + }, + { + "epoch": 0.10048, + "grad_norm": 0.8317891147215691, + "learning_rate": 3.3475479744136463e-06, + "loss": 0.40215349197387695, + "memory(GiB)": 41.86, + "step": 314, + "token_acc": 0.8580721466587817, + "train_speed(iter/s)": 0.239379 + }, + { + "epoch": 0.1008, + "grad_norm": 0.8161061304757473, + "learning_rate": 3.3582089552238813e-06, + "loss": 0.4352240562438965, + "memory(GiB)": 41.86, + "step": 315, + "token_acc": 0.9280388978930308, + "train_speed(iter/s)": 0.239387 + }, + { + "epoch": 0.10112, + "grad_norm": 0.716959642401145, + "learning_rate": 3.3688699360341154e-06, + "loss": 0.40640610456466675, + "memory(GiB)": 41.86, + "step": 316, + "token_acc": 0.9138906348208674, + "train_speed(iter/s)": 0.239424 + }, + { + "epoch": 0.10144, + "grad_norm": 0.7897846883863022, + "learning_rate": 3.3795309168443503e-06, + "loss": 0.4262651801109314, + "memory(GiB)": 41.86, + "step": 317, + "token_acc": 0.8898140438523453, + "train_speed(iter/s)": 0.239466 + }, + { + "epoch": 0.10176, + "grad_norm": 0.7860447024150933, + "learning_rate": 3.3901918976545844e-06, + "loss": 0.4878777265548706, + "memory(GiB)": 41.86, + "step": 318, + "token_acc": 0.7632069608452455, + "train_speed(iter/s)": 0.239442 + }, + { + "epoch": 0.10208, + "grad_norm": 0.7935062527025972, + "learning_rate": 3.4008528784648194e-06, + "loss": 0.41996830701828003, + "memory(GiB)": 41.86, + "step": 319, + "token_acc": 0.801345059493016, + "train_speed(iter/s)": 0.239442 + }, + { + "epoch": 0.1024, + "grad_norm": 0.7550998850510244, + "learning_rate": 3.4115138592750535e-06, + "loss": 0.5519275665283203, + "memory(GiB)": 41.86, + "step": 320, + "token_acc": 0.8776470588235294, + "train_speed(iter/s)": 0.239448 + }, + { + "epoch": 0.10272, + "grad_norm": 0.74507182594586, + "learning_rate": 3.4221748400852884e-06, + "loss": 0.4754410982131958, + "memory(GiB)": 41.86, + "step": 321, + "token_acc": 0.8518024032042724, + "train_speed(iter/s)": 0.239453 + }, + { + "epoch": 0.10304, + "grad_norm": 0.73124985253951, + "learning_rate": 3.4328358208955225e-06, + "loss": 0.4766056537628174, + "memory(GiB)": 41.86, + "step": 322, + "token_acc": 0.8598766744631087, + "train_speed(iter/s)": 0.239474 + }, + { + "epoch": 0.10336, + "grad_norm": 0.7984992961006573, + "learning_rate": 3.4434968017057575e-06, + "loss": 0.3986496925354004, + "memory(GiB)": 41.86, + "step": 323, + "token_acc": 0.855722891566265, + "train_speed(iter/s)": 0.239502 + }, + { + "epoch": 0.10368, + "grad_norm": 0.8850571993628852, + "learning_rate": 3.4541577825159916e-06, + "loss": 0.42794644832611084, + "memory(GiB)": 41.86, + "step": 324, + "token_acc": 0.8945, + "train_speed(iter/s)": 0.239541 + }, + { + "epoch": 0.104, + "grad_norm": 0.7561382219460823, + "learning_rate": 3.4648187633262265e-06, + "loss": 0.3156163990497589, + "memory(GiB)": 41.86, + "step": 325, + "token_acc": 0.9014801110083256, + "train_speed(iter/s)": 0.239579 + }, + { + "epoch": 0.10432, + "grad_norm": 0.8018850874679382, + "learning_rate": 3.4754797441364606e-06, + "loss": 0.37084078788757324, + "memory(GiB)": 41.86, + "step": 326, + "token_acc": 0.9344957587181904, + "train_speed(iter/s)": 0.239627 + }, + { + "epoch": 0.10464, + "grad_norm": 0.7957597062260352, + "learning_rate": 3.4861407249466956e-06, + "loss": 0.4108327031135559, + "memory(GiB)": 41.86, + "step": 327, + "token_acc": 0.9014998880680546, + "train_speed(iter/s)": 0.239637 + }, + { + "epoch": 0.10496, + "grad_norm": 0.8152126421762352, + "learning_rate": 3.4968017057569297e-06, + "loss": 0.4432401955127716, + "memory(GiB)": 41.86, + "step": 328, + "token_acc": 0.8400329828901257, + "train_speed(iter/s)": 0.239583 + }, + { + "epoch": 0.10528, + "grad_norm": 0.952214803581771, + "learning_rate": 3.5074626865671646e-06, + "loss": 0.35002297163009644, + "memory(GiB)": 41.86, + "step": 329, + "token_acc": 0.9234937838699394, + "train_speed(iter/s)": 0.239601 + }, + { + "epoch": 0.1056, + "grad_norm": 0.7613667645598268, + "learning_rate": 3.5181236673773987e-06, + "loss": 0.3845504820346832, + "memory(GiB)": 41.86, + "step": 330, + "token_acc": 0.8657171922685656, + "train_speed(iter/s)": 0.239595 + }, + { + "epoch": 0.10592, + "grad_norm": 0.8622450304184315, + "learning_rate": 3.5287846481876337e-06, + "loss": 0.3711111545562744, + "memory(GiB)": 41.86, + "step": 331, + "token_acc": 0.854476782937125, + "train_speed(iter/s)": 0.239636 + }, + { + "epoch": 0.10624, + "grad_norm": 0.7874981708092135, + "learning_rate": 3.5394456289978678e-06, + "loss": 0.4309169352054596, + "memory(GiB)": 41.86, + "step": 332, + "token_acc": 0.8622674933569531, + "train_speed(iter/s)": 0.239669 + }, + { + "epoch": 0.10656, + "grad_norm": 0.7742216210053041, + "learning_rate": 3.5501066098081027e-06, + "loss": 0.3693404495716095, + "memory(GiB)": 41.86, + "step": 333, + "token_acc": 0.8703291264538481, + "train_speed(iter/s)": 0.23969 + }, + { + "epoch": 0.10688, + "grad_norm": 0.8296664786067333, + "learning_rate": 3.560767590618337e-06, + "loss": 0.45051077008247375, + "memory(GiB)": 41.86, + "step": 334, + "token_acc": 0.8963150289017341, + "train_speed(iter/s)": 0.239683 + }, + { + "epoch": 0.1072, + "grad_norm": 0.7307221043450637, + "learning_rate": 3.5714285714285718e-06, + "loss": 0.3946268856525421, + "memory(GiB)": 41.86, + "step": 335, + "token_acc": 0.9003807106598984, + "train_speed(iter/s)": 0.23968 + }, + { + "epoch": 0.10752, + "grad_norm": 0.7410211155249213, + "learning_rate": 3.582089552238806e-06, + "loss": 0.4462299346923828, + "memory(GiB)": 41.86, + "step": 336, + "token_acc": 0.8479094076655053, + "train_speed(iter/s)": 0.239666 + }, + { + "epoch": 0.10784, + "grad_norm": 0.7754806339585028, + "learning_rate": 3.592750533049041e-06, + "loss": 0.4262782633304596, + "memory(GiB)": 41.86, + "step": 337, + "token_acc": 0.9305511309334182, + "train_speed(iter/s)": 0.239664 + }, + { + "epoch": 0.10816, + "grad_norm": 0.8051320992828622, + "learning_rate": 3.603411513859275e-06, + "loss": 0.3572915494441986, + "memory(GiB)": 41.86, + "step": 338, + "token_acc": 0.8873587570621468, + "train_speed(iter/s)": 0.239678 + }, + { + "epoch": 0.10848, + "grad_norm": 0.733246847654479, + "learning_rate": 3.61407249466951e-06, + "loss": 0.44792264699935913, + "memory(GiB)": 41.86, + "step": 339, + "token_acc": 0.8476424093758518, + "train_speed(iter/s)": 0.239692 + }, + { + "epoch": 0.1088, + "grad_norm": 0.7912846266452469, + "learning_rate": 3.624733475479744e-06, + "loss": 0.46731969714164734, + "memory(GiB)": 41.86, + "step": 340, + "token_acc": 0.8555702333773668, + "train_speed(iter/s)": 0.239703 + }, + { + "epoch": 0.10912, + "grad_norm": 0.736927356523485, + "learning_rate": 3.635394456289979e-06, + "loss": 0.4212226867675781, + "memory(GiB)": 41.86, + "step": 341, + "token_acc": 0.9051490514905149, + "train_speed(iter/s)": 0.239729 + }, + { + "epoch": 0.10944, + "grad_norm": 0.7778660498560286, + "learning_rate": 3.6460554371002135e-06, + "loss": 0.3919551968574524, + "memory(GiB)": 41.86, + "step": 342, + "token_acc": 0.8815516047102285, + "train_speed(iter/s)": 0.239772 + }, + { + "epoch": 0.10976, + "grad_norm": 0.8149419361790912, + "learning_rate": 3.656716417910448e-06, + "loss": 0.39901018142700195, + "memory(GiB)": 41.86, + "step": 343, + "token_acc": 0.9210890632210429, + "train_speed(iter/s)": 0.239771 + }, + { + "epoch": 0.11008, + "grad_norm": 0.824174717150513, + "learning_rate": 3.667377398720683e-06, + "loss": 0.526077151298523, + "memory(GiB)": 41.86, + "step": 344, + "token_acc": 0.8182519280205656, + "train_speed(iter/s)": 0.239766 + }, + { + "epoch": 0.1104, + "grad_norm": 0.8050247094860181, + "learning_rate": 3.678038379530917e-06, + "loss": 0.4048915505409241, + "memory(GiB)": 41.86, + "step": 345, + "token_acc": 0.8137369033760187, + "train_speed(iter/s)": 0.2398 + }, + { + "epoch": 0.11072, + "grad_norm": 0.8889319303598108, + "learning_rate": 3.688699360341152e-06, + "loss": 0.48911845684051514, + "memory(GiB)": 41.86, + "step": 346, + "token_acc": 0.88710109949048, + "train_speed(iter/s)": 0.239831 + }, + { + "epoch": 0.11104, + "grad_norm": 0.771962006372408, + "learning_rate": 3.699360341151386e-06, + "loss": 0.44773414731025696, + "memory(GiB)": 41.86, + "step": 347, + "token_acc": 0.8486900206064174, + "train_speed(iter/s)": 0.239837 + }, + { + "epoch": 0.11136, + "grad_norm": 0.7855268083064286, + "learning_rate": 3.710021321961621e-06, + "loss": 0.3922199010848999, + "memory(GiB)": 41.86, + "step": 348, + "token_acc": 0.8862332695984704, + "train_speed(iter/s)": 0.239886 + }, + { + "epoch": 0.11168, + "grad_norm": 0.7780333044775207, + "learning_rate": 3.720682302771855e-06, + "loss": 0.4848020374774933, + "memory(GiB)": 41.86, + "step": 349, + "token_acc": 0.8541792547834844, + "train_speed(iter/s)": 0.239876 + }, + { + "epoch": 0.112, + "grad_norm": 0.7531072521634083, + "learning_rate": 3.73134328358209e-06, + "loss": 0.3592837452888489, + "memory(GiB)": 41.86, + "step": 350, + "token_acc": 0.8406832298136646, + "train_speed(iter/s)": 0.239897 + }, + { + "epoch": 0.11232, + "grad_norm": 0.7498724144086311, + "learning_rate": 3.742004264392324e-06, + "loss": 0.3431110978126526, + "memory(GiB)": 41.86, + "step": 351, + "token_acc": 0.9127155172413793, + "train_speed(iter/s)": 0.239921 + }, + { + "epoch": 0.11264, + "grad_norm": 0.8302428042709774, + "learning_rate": 3.752665245202559e-06, + "loss": 0.330912709236145, + "memory(GiB)": 41.86, + "step": 352, + "token_acc": 0.8949831037171823, + "train_speed(iter/s)": 0.239962 + }, + { + "epoch": 0.11296, + "grad_norm": 0.7261921312597923, + "learning_rate": 3.7633262260127933e-06, + "loss": 0.3568934500217438, + "memory(GiB)": 41.86, + "step": 353, + "token_acc": 0.9226932668329177, + "train_speed(iter/s)": 0.239989 + }, + { + "epoch": 0.11328, + "grad_norm": 0.8421382692673692, + "learning_rate": 3.773987206823028e-06, + "loss": 0.47149673104286194, + "memory(GiB)": 41.86, + "step": 354, + "token_acc": 0.8835212023617821, + "train_speed(iter/s)": 0.240025 + }, + { + "epoch": 0.1136, + "grad_norm": 0.8178309177653161, + "learning_rate": 3.7846481876332623e-06, + "loss": 0.4269692897796631, + "memory(GiB)": 41.86, + "step": 355, + "token_acc": 0.865825307206908, + "train_speed(iter/s)": 0.240065 + }, + { + "epoch": 0.11392, + "grad_norm": 0.7520845032619242, + "learning_rate": 3.7953091684434973e-06, + "loss": 0.352092444896698, + "memory(GiB)": 41.86, + "step": 356, + "token_acc": 0.9217210990150337, + "train_speed(iter/s)": 0.240087 + }, + { + "epoch": 0.11424, + "grad_norm": 0.7553131788375012, + "learning_rate": 3.8059701492537314e-06, + "loss": 0.36718976497650146, + "memory(GiB)": 41.86, + "step": 357, + "token_acc": 0.8894668400520156, + "train_speed(iter/s)": 0.240066 + }, + { + "epoch": 0.11456, + "grad_norm": 0.7282423141331218, + "learning_rate": 3.816631130063966e-06, + "loss": 0.3996508717536926, + "memory(GiB)": 41.86, + "step": 358, + "token_acc": 0.8990802883420334, + "train_speed(iter/s)": 0.240057 + }, + { + "epoch": 0.11488, + "grad_norm": 0.7526335807089151, + "learning_rate": 3.827292110874201e-06, + "loss": 0.3580285310745239, + "memory(GiB)": 41.86, + "step": 359, + "token_acc": 0.9510130027214998, + "train_speed(iter/s)": 0.240074 + }, + { + "epoch": 0.1152, + "grad_norm": 0.8218029887615841, + "learning_rate": 3.837953091684435e-06, + "loss": 0.450802206993103, + "memory(GiB)": 41.86, + "step": 360, + "token_acc": 0.8497251069028711, + "train_speed(iter/s)": 0.240097 + }, + { + "epoch": 0.11552, + "grad_norm": 0.7904519584196095, + "learning_rate": 3.84861407249467e-06, + "loss": 0.4368705749511719, + "memory(GiB)": 41.86, + "step": 361, + "token_acc": 0.8363897878460985, + "train_speed(iter/s)": 0.24012 + }, + { + "epoch": 0.11584, + "grad_norm": 0.7302492344779764, + "learning_rate": 3.859275053304904e-06, + "loss": 0.3044324517250061, + "memory(GiB)": 41.86, + "step": 362, + "token_acc": 0.8982739625413148, + "train_speed(iter/s)": 0.240154 + }, + { + "epoch": 0.11616, + "grad_norm": 0.7774724645471484, + "learning_rate": 3.869936034115139e-06, + "loss": 0.4781341552734375, + "memory(GiB)": 41.86, + "step": 363, + "token_acc": 0.857950974230044, + "train_speed(iter/s)": 0.240123 + }, + { + "epoch": 0.11648, + "grad_norm": 0.7545295257361067, + "learning_rate": 3.8805970149253735e-06, + "loss": 0.4697112441062927, + "memory(GiB)": 41.86, + "step": 364, + "token_acc": 0.8474221408008374, + "train_speed(iter/s)": 0.240143 + }, + { + "epoch": 0.1168, + "grad_norm": 0.7476157279067588, + "learning_rate": 3.891257995735608e-06, + "loss": 0.4799872040748596, + "memory(GiB)": 41.86, + "step": 365, + "token_acc": 0.8468684447108181, + "train_speed(iter/s)": 0.240112 + }, + { + "epoch": 0.11712, + "grad_norm": 0.7677194936221938, + "learning_rate": 3.9019189765458425e-06, + "loss": 0.465701162815094, + "memory(GiB)": 41.86, + "step": 366, + "token_acc": 0.8381742738589212, + "train_speed(iter/s)": 0.240099 + }, + { + "epoch": 0.11744, + "grad_norm": 0.6863213111500606, + "learning_rate": 3.912579957356077e-06, + "loss": 0.385974645614624, + "memory(GiB)": 41.86, + "step": 367, + "token_acc": 0.9273029966703663, + "train_speed(iter/s)": 0.240123 + }, + { + "epoch": 0.11776, + "grad_norm": 0.7275958820899372, + "learning_rate": 3.9232409381663116e-06, + "loss": 0.3946908712387085, + "memory(GiB)": 41.86, + "step": 368, + "token_acc": 0.870161857846587, + "train_speed(iter/s)": 0.24013 + }, + { + "epoch": 0.11808, + "grad_norm": 0.7407315515360782, + "learning_rate": 3.933901918976546e-06, + "loss": 0.40255075693130493, + "memory(GiB)": 41.86, + "step": 369, + "token_acc": 0.9352920601608954, + "train_speed(iter/s)": 0.240141 + }, + { + "epoch": 0.1184, + "grad_norm": 0.773261969816718, + "learning_rate": 3.944562899786781e-06, + "loss": 0.422658771276474, + "memory(GiB)": 41.86, + "step": 370, + "token_acc": 0.8209449292807703, + "train_speed(iter/s)": 0.240182 + }, + { + "epoch": 0.11872, + "grad_norm": 0.7700094457892259, + "learning_rate": 3.955223880597015e-06, + "loss": 0.44361627101898193, + "memory(GiB)": 41.86, + "step": 371, + "token_acc": 0.9455719557195572, + "train_speed(iter/s)": 0.240218 + }, + { + "epoch": 0.11904, + "grad_norm": 0.6989705349409174, + "learning_rate": 3.96588486140725e-06, + "loss": 0.4145239293575287, + "memory(GiB)": 41.86, + "step": 372, + "token_acc": 0.9332899446794664, + "train_speed(iter/s)": 0.240189 + }, + { + "epoch": 0.11936, + "grad_norm": 0.735679223840469, + "learning_rate": 3.976545842217484e-06, + "loss": 0.4269680678844452, + "memory(GiB)": 41.86, + "step": 373, + "token_acc": 0.912778366914104, + "train_speed(iter/s)": 0.240158 + }, + { + "epoch": 0.11968, + "grad_norm": 0.8093411445983377, + "learning_rate": 3.987206823027719e-06, + "loss": 0.48450934886932373, + "memory(GiB)": 41.86, + "step": 374, + "token_acc": 0.826995819231535, + "train_speed(iter/s)": 0.240158 + }, + { + "epoch": 0.12, + "grad_norm": 0.8135031846129209, + "learning_rate": 3.997867803837953e-06, + "loss": 0.4589880406856537, + "memory(GiB)": 41.86, + "step": 375, + "token_acc": 0.8431681091004458, + "train_speed(iter/s)": 0.240165 + }, + { + "epoch": 0.12032, + "grad_norm": 0.7815460707460408, + "learning_rate": 4.008528784648188e-06, + "loss": 0.4284605085849762, + "memory(GiB)": 41.86, + "step": 376, + "token_acc": 0.898191365227538, + "train_speed(iter/s)": 0.240166 + }, + { + "epoch": 0.12064, + "grad_norm": 0.7435575573863752, + "learning_rate": 4.019189765458423e-06, + "loss": 0.3345129191875458, + "memory(GiB)": 41.86, + "step": 377, + "token_acc": 0.8550092297365329, + "train_speed(iter/s)": 0.240056 + }, + { + "epoch": 0.12096, + "grad_norm": 0.7655961900097236, + "learning_rate": 4.029850746268657e-06, + "loss": 0.4017585813999176, + "memory(GiB)": 41.86, + "step": 378, + "token_acc": 0.9394449116904963, + "train_speed(iter/s)": 0.240052 + }, + { + "epoch": 0.12128, + "grad_norm": 0.8067643385072492, + "learning_rate": 4.040511727078892e-06, + "loss": 0.3989643454551697, + "memory(GiB)": 41.86, + "step": 379, + "token_acc": 0.8813004032258065, + "train_speed(iter/s)": 0.239943 + }, + { + "epoch": 0.1216, + "grad_norm": 1.718016733899616, + "learning_rate": 4.051172707889126e-06, + "loss": 0.39728641510009766, + "memory(GiB)": 41.86, + "step": 380, + "token_acc": 0.9034386939909691, + "train_speed(iter/s)": 0.239935 + }, + { + "epoch": 0.12192, + "grad_norm": 1.757951900483168, + "learning_rate": 4.061833688699361e-06, + "loss": 0.379787415266037, + "memory(GiB)": 41.86, + "step": 381, + "token_acc": 0.8648801128349789, + "train_speed(iter/s)": 0.239964 + }, + { + "epoch": 0.12224, + "grad_norm": 0.775454136881726, + "learning_rate": 4.072494669509595e-06, + "loss": 0.4977129399776459, + "memory(GiB)": 41.86, + "step": 382, + "token_acc": 0.8454140276800375, + "train_speed(iter/s)": 0.239919 + }, + { + "epoch": 0.12256, + "grad_norm": 0.7733519340415651, + "learning_rate": 4.08315565031983e-06, + "loss": 0.3979816734790802, + "memory(GiB)": 41.86, + "step": 383, + "token_acc": 0.7728927471296556, + "train_speed(iter/s)": 0.239909 + }, + { + "epoch": 0.12288, + "grad_norm": 0.8402691151342786, + "learning_rate": 4.093816631130064e-06, + "loss": 0.5106043219566345, + "memory(GiB)": 41.86, + "step": 384, + "token_acc": 0.8933107535986452, + "train_speed(iter/s)": 0.239935 + }, + { + "epoch": 0.1232, + "grad_norm": 0.7023813981668535, + "learning_rate": 4.104477611940299e-06, + "loss": 0.5310304164886475, + "memory(GiB)": 41.86, + "step": 385, + "token_acc": 0.8799791720906014, + "train_speed(iter/s)": 0.239951 + }, + { + "epoch": 0.12352, + "grad_norm": 0.7265062456834858, + "learning_rate": 4.115138592750533e-06, + "loss": 0.39796650409698486, + "memory(GiB)": 41.86, + "step": 386, + "token_acc": 0.8823662737987308, + "train_speed(iter/s)": 0.239942 + }, + { + "epoch": 0.12384, + "grad_norm": 0.7506293693035885, + "learning_rate": 4.125799573560768e-06, + "loss": 0.413953959941864, + "memory(GiB)": 41.86, + "step": 387, + "token_acc": 0.923922734026746, + "train_speed(iter/s)": 0.239935 + }, + { + "epoch": 0.12416, + "grad_norm": 0.7663775978180519, + "learning_rate": 4.136460554371002e-06, + "loss": 0.3362416625022888, + "memory(GiB)": 41.86, + "step": 388, + "token_acc": 0.880661784648766, + "train_speed(iter/s)": 0.239977 + }, + { + "epoch": 0.12448, + "grad_norm": 0.9162291808368352, + "learning_rate": 4.1471215351812375e-06, + "loss": 0.3517610430717468, + "memory(GiB)": 41.86, + "step": 389, + "token_acc": 0.9295361127422196, + "train_speed(iter/s)": 0.239971 + }, + { + "epoch": 0.1248, + "grad_norm": 0.7663814440527071, + "learning_rate": 4.157782515991471e-06, + "loss": 0.377957820892334, + "memory(GiB)": 41.86, + "step": 390, + "token_acc": 0.8766485647788984, + "train_speed(iter/s)": 0.23998 + }, + { + "epoch": 0.12512, + "grad_norm": 0.6921934023407951, + "learning_rate": 4.1684434968017065e-06, + "loss": 0.4640156626701355, + "memory(GiB)": 41.86, + "step": 391, + "token_acc": 0.8496423927178154, + "train_speed(iter/s)": 0.239965 + }, + { + "epoch": 0.12544, + "grad_norm": 0.7079993243828792, + "learning_rate": 4.17910447761194e-06, + "loss": 0.39487916231155396, + "memory(GiB)": 41.86, + "step": 392, + "token_acc": 0.8871794871794871, + "train_speed(iter/s)": 0.23997 + }, + { + "epoch": 0.12576, + "grad_norm": 0.7766078757175032, + "learning_rate": 4.1897654584221756e-06, + "loss": 0.43649888038635254, + "memory(GiB)": 41.86, + "step": 393, + "token_acc": 0.8670482060312569, + "train_speed(iter/s)": 0.239972 + }, + { + "epoch": 0.12608, + "grad_norm": 0.7316527075838458, + "learning_rate": 4.200426439232409e-06, + "loss": 0.3862738609313965, + "memory(GiB)": 41.86, + "step": 394, + "token_acc": 0.9273544723142452, + "train_speed(iter/s)": 0.240008 + }, + { + "epoch": 0.1264, + "grad_norm": 0.7434056255747994, + "learning_rate": 4.211087420042645e-06, + "loss": 0.41236862540245056, + "memory(GiB)": 41.86, + "step": 395, + "token_acc": 0.9092567259461924, + "train_speed(iter/s)": 0.240018 + }, + { + "epoch": 0.12672, + "grad_norm": 0.7153061373961977, + "learning_rate": 4.221748400852878e-06, + "loss": 0.5322939157485962, + "memory(GiB)": 41.86, + "step": 396, + "token_acc": 0.8658568787958807, + "train_speed(iter/s)": 0.239998 + }, + { + "epoch": 0.12704, + "grad_norm": 0.7601207770566222, + "learning_rate": 4.232409381663114e-06, + "loss": 0.4317474961280823, + "memory(GiB)": 41.86, + "step": 397, + "token_acc": 0.7674003569303985, + "train_speed(iter/s)": 0.240019 + }, + { + "epoch": 0.12736, + "grad_norm": 0.6946456867117251, + "learning_rate": 4.243070362473347e-06, + "loss": 0.35670924186706543, + "memory(GiB)": 41.86, + "step": 398, + "token_acc": 0.8712706029626539, + "train_speed(iter/s)": 0.239993 + }, + { + "epoch": 0.12768, + "grad_norm": 0.7553498390201221, + "learning_rate": 4.253731343283583e-06, + "loss": 0.40526312589645386, + "memory(GiB)": 41.86, + "step": 399, + "token_acc": 0.8783199505867819, + "train_speed(iter/s)": 0.239988 + }, + { + "epoch": 0.128, + "grad_norm": 0.7811725173896594, + "learning_rate": 4.264392324093816e-06, + "loss": 0.3615596294403076, + "memory(GiB)": 41.86, + "step": 400, + "token_acc": 0.9290891900409707, + "train_speed(iter/s)": 0.240012 + }, + { + "epoch": 0.12832, + "grad_norm": 0.7866015499091803, + "learning_rate": 4.275053304904052e-06, + "loss": 0.38750189542770386, + "memory(GiB)": 41.86, + "step": 401, + "token_acc": 0.8667833041739565, + "train_speed(iter/s)": 0.240027 + }, + { + "epoch": 0.12864, + "grad_norm": 0.7373717445797089, + "learning_rate": 4.2857142857142855e-06, + "loss": 0.4126596450805664, + "memory(GiB)": 41.86, + "step": 402, + "token_acc": 0.9262400411205346, + "train_speed(iter/s)": 0.240044 + }, + { + "epoch": 0.12896, + "grad_norm": 0.7773800117067331, + "learning_rate": 4.296375266524521e-06, + "loss": 0.3075929880142212, + "memory(GiB)": 41.86, + "step": 403, + "token_acc": 0.9309576837416481, + "train_speed(iter/s)": 0.240018 + }, + { + "epoch": 0.12928, + "grad_norm": 0.712221683785275, + "learning_rate": 4.3070362473347545e-06, + "loss": 0.4430937170982361, + "memory(GiB)": 41.86, + "step": 404, + "token_acc": 0.9010587102983638, + "train_speed(iter/s)": 0.24001 + }, + { + "epoch": 0.1296, + "grad_norm": 0.7826397610224037, + "learning_rate": 4.31769722814499e-06, + "loss": 0.44257938861846924, + "memory(GiB)": 41.86, + "step": 405, + "token_acc": 0.8016378525932666, + "train_speed(iter/s)": 0.240014 + }, + { + "epoch": 0.12992, + "grad_norm": 0.8285583514033186, + "learning_rate": 4.3283582089552236e-06, + "loss": 0.45782554149627686, + "memory(GiB)": 41.86, + "step": 406, + "token_acc": 0.7941988950276243, + "train_speed(iter/s)": 0.240046 + }, + { + "epoch": 0.13024, + "grad_norm": 0.768155231125396, + "learning_rate": 4.339019189765459e-06, + "loss": 0.4277716875076294, + "memory(GiB)": 41.86, + "step": 407, + "token_acc": 0.8998406555884362, + "train_speed(iter/s)": 0.240059 + }, + { + "epoch": 0.13056, + "grad_norm": 0.7180765512185124, + "learning_rate": 4.3496801705756935e-06, + "loss": 0.4476096034049988, + "memory(GiB)": 41.86, + "step": 408, + "token_acc": 0.8737541528239202, + "train_speed(iter/s)": 0.240056 + }, + { + "epoch": 0.13088, + "grad_norm": 0.744575187407348, + "learning_rate": 4.360341151385928e-06, + "loss": 0.44965660572052, + "memory(GiB)": 41.86, + "step": 409, + "token_acc": 0.8786349435231915, + "train_speed(iter/s)": 0.240061 + }, + { + "epoch": 0.1312, + "grad_norm": 0.9191869571838321, + "learning_rate": 4.3710021321961625e-06, + "loss": 0.40606701374053955, + "memory(GiB)": 41.86, + "step": 410, + "token_acc": 0.8825796505652621, + "train_speed(iter/s)": 0.240083 + }, + { + "epoch": 0.13152, + "grad_norm": 0.7465810851327056, + "learning_rate": 4.381663113006397e-06, + "loss": 0.4159611165523529, + "memory(GiB)": 41.86, + "step": 411, + "token_acc": 0.9019670050761421, + "train_speed(iter/s)": 0.240099 + }, + { + "epoch": 0.13184, + "grad_norm": 0.7034535707270476, + "learning_rate": 4.3923240938166316e-06, + "loss": 0.4414001405239105, + "memory(GiB)": 41.86, + "step": 412, + "token_acc": 0.9004438807863031, + "train_speed(iter/s)": 0.240129 + }, + { + "epoch": 0.13216, + "grad_norm": 0.7835451099058026, + "learning_rate": 4.402985074626866e-06, + "loss": 0.46758919954299927, + "memory(GiB)": 41.86, + "step": 413, + "token_acc": 0.8863487916394513, + "train_speed(iter/s)": 0.240153 + }, + { + "epoch": 0.13248, + "grad_norm": 0.752558344923947, + "learning_rate": 4.413646055437101e-06, + "loss": 0.4206882417201996, + "memory(GiB)": 41.86, + "step": 414, + "token_acc": 0.8710639708319523, + "train_speed(iter/s)": 0.240139 + }, + { + "epoch": 0.1328, + "grad_norm": 0.7430963428060228, + "learning_rate": 4.424307036247335e-06, + "loss": 0.41270163655281067, + "memory(GiB)": 41.86, + "step": 415, + "token_acc": 0.8815420560747663, + "train_speed(iter/s)": 0.240167 + }, + { + "epoch": 0.13312, + "grad_norm": 0.7888810815113609, + "learning_rate": 4.43496801705757e-06, + "loss": 0.38972175121307373, + "memory(GiB)": 41.86, + "step": 416, + "token_acc": 0.9167408726625111, + "train_speed(iter/s)": 0.240185 + }, + { + "epoch": 0.13344, + "grad_norm": 0.7056235377476282, + "learning_rate": 4.445628997867804e-06, + "loss": 0.3448014557361603, + "memory(GiB)": 41.86, + "step": 417, + "token_acc": 0.8757145881854753, + "train_speed(iter/s)": 0.240209 + }, + { + "epoch": 0.13376, + "grad_norm": 0.7454870663762059, + "learning_rate": 4.456289978678039e-06, + "loss": 0.4793194830417633, + "memory(GiB)": 41.86, + "step": 418, + "token_acc": 0.8903645097485166, + "train_speed(iter/s)": 0.240192 + }, + { + "epoch": 0.13408, + "grad_norm": 0.7707599928901607, + "learning_rate": 4.466950959488273e-06, + "loss": 0.4438665509223938, + "memory(GiB)": 41.86, + "step": 419, + "token_acc": 0.8519658929417337, + "train_speed(iter/s)": 0.240211 + }, + { + "epoch": 0.1344, + "grad_norm": 0.7187785205838252, + "learning_rate": 4.477611940298508e-06, + "loss": 0.4235180616378784, + "memory(GiB)": 41.86, + "step": 420, + "token_acc": 0.9256472004816376, + "train_speed(iter/s)": 0.240219 + }, + { + "epoch": 0.13472, + "grad_norm": 0.7404468763941686, + "learning_rate": 4.488272921108742e-06, + "loss": 0.4464147090911865, + "memory(GiB)": 41.86, + "step": 421, + "token_acc": 0.8301381158524016, + "train_speed(iter/s)": 0.240222 + }, + { + "epoch": 0.13504, + "grad_norm": 0.8017665523022893, + "learning_rate": 4.498933901918977e-06, + "loss": 0.3687342405319214, + "memory(GiB)": 41.86, + "step": 422, + "token_acc": 0.9111111111111111, + "train_speed(iter/s)": 0.240244 + }, + { + "epoch": 0.13536, + "grad_norm": 0.761276366099309, + "learning_rate": 4.509594882729211e-06, + "loss": 0.4876147508621216, + "memory(GiB)": 41.86, + "step": 423, + "token_acc": 0.8697703680402642, + "train_speed(iter/s)": 0.240242 + }, + { + "epoch": 0.13568, + "grad_norm": 0.7978566775395347, + "learning_rate": 4.520255863539446e-06, + "loss": 0.37016117572784424, + "memory(GiB)": 41.86, + "step": 424, + "token_acc": 0.9299495399228258, + "train_speed(iter/s)": 0.240234 + }, + { + "epoch": 0.136, + "grad_norm": 0.7095380060039164, + "learning_rate": 4.53091684434968e-06, + "loss": 0.4326656460762024, + "memory(GiB)": 41.86, + "step": 425, + "token_acc": 0.9297205757832345, + "train_speed(iter/s)": 0.240232 + }, + { + "epoch": 0.13632, + "grad_norm": 0.7372710984992958, + "learning_rate": 4.541577825159915e-06, + "loss": 0.35109943151474, + "memory(GiB)": 41.86, + "step": 426, + "token_acc": 0.8405618531296205, + "train_speed(iter/s)": 0.240262 + }, + { + "epoch": 0.13664, + "grad_norm": 0.7662770574387595, + "learning_rate": 4.5522388059701495e-06, + "loss": 0.3724941611289978, + "memory(GiB)": 41.86, + "step": 427, + "token_acc": 0.9322444041137327, + "train_speed(iter/s)": 0.240288 + }, + { + "epoch": 0.13696, + "grad_norm": 0.8432262416769227, + "learning_rate": 4.562899786780384e-06, + "loss": 0.5000088214874268, + "memory(GiB)": 41.86, + "step": 428, + "token_acc": 0.8317720530835285, + "train_speed(iter/s)": 0.240319 + }, + { + "epoch": 0.13728, + "grad_norm": 0.7153037158778891, + "learning_rate": 4.5735607675906185e-06, + "loss": 0.3968162536621094, + "memory(GiB)": 41.86, + "step": 429, + "token_acc": 0.9204374057315233, + "train_speed(iter/s)": 0.240317 + }, + { + "epoch": 0.1376, + "grad_norm": 0.7879976690757311, + "learning_rate": 4.584221748400853e-06, + "loss": 0.4961619973182678, + "memory(GiB)": 41.86, + "step": 430, + "token_acc": 0.8190070921985816, + "train_speed(iter/s)": 0.240338 + }, + { + "epoch": 0.13792, + "grad_norm": 0.6977160477577248, + "learning_rate": 4.5948827292110876e-06, + "loss": 0.30530205368995667, + "memory(GiB)": 41.86, + "step": 431, + "token_acc": 0.9016697588126159, + "train_speed(iter/s)": 0.240335 + }, + { + "epoch": 0.13824, + "grad_norm": 0.6953309402251618, + "learning_rate": 4.605543710021322e-06, + "loss": 0.41661763191223145, + "memory(GiB)": 41.86, + "step": 432, + "token_acc": 0.8389189189189189, + "train_speed(iter/s)": 0.240304 + }, + { + "epoch": 0.13856, + "grad_norm": 0.7474166896200634, + "learning_rate": 4.616204690831557e-06, + "loss": 0.36193764209747314, + "memory(GiB)": 41.86, + "step": 433, + "token_acc": 0.9236089280100598, + "train_speed(iter/s)": 0.240336 + }, + { + "epoch": 0.13888, + "grad_norm": 0.7738101619319373, + "learning_rate": 4.626865671641791e-06, + "loss": 0.3570512533187866, + "memory(GiB)": 41.86, + "step": 434, + "token_acc": 0.9176392130710237, + "train_speed(iter/s)": 0.240367 + }, + { + "epoch": 0.1392, + "grad_norm": 0.8087937542556393, + "learning_rate": 4.637526652452026e-06, + "loss": 0.43259045481681824, + "memory(GiB)": 41.86, + "step": 435, + "token_acc": 0.9086770981507823, + "train_speed(iter/s)": 0.240384 + }, + { + "epoch": 0.13952, + "grad_norm": 0.6726719607850847, + "learning_rate": 4.64818763326226e-06, + "loss": 0.41652774810791016, + "memory(GiB)": 41.86, + "step": 436, + "token_acc": 0.9400399733510992, + "train_speed(iter/s)": 0.240094 + }, + { + "epoch": 0.13984, + "grad_norm": 0.6616737389189811, + "learning_rate": 4.658848614072495e-06, + "loss": 0.4289194643497467, + "memory(GiB)": 41.86, + "step": 437, + "token_acc": 0.9163356504468719, + "train_speed(iter/s)": 0.240061 + }, + { + "epoch": 0.14016, + "grad_norm": 0.7403211547819922, + "learning_rate": 4.669509594882729e-06, + "loss": 0.49350762367248535, + "memory(GiB)": 41.86, + "step": 438, + "token_acc": 0.8215900527505652, + "train_speed(iter/s)": 0.240037 + }, + { + "epoch": 0.14048, + "grad_norm": 0.7161783880219333, + "learning_rate": 4.680170575692965e-06, + "loss": 0.35738128423690796, + "memory(GiB)": 41.86, + "step": 439, + "token_acc": 0.9297218155197657, + "train_speed(iter/s)": 0.240044 + }, + { + "epoch": 0.1408, + "grad_norm": 0.7504870165713982, + "learning_rate": 4.690831556503198e-06, + "loss": 0.3570151627063751, + "memory(GiB)": 41.86, + "step": 440, + "token_acc": 0.9445692883895132, + "train_speed(iter/s)": 0.240046 + }, + { + "epoch": 0.14112, + "grad_norm": 0.7592062989689758, + "learning_rate": 4.701492537313434e-06, + "loss": 0.3971911072731018, + "memory(GiB)": 41.86, + "step": 441, + "token_acc": 0.9380883417813178, + "train_speed(iter/s)": 0.240077 + }, + { + "epoch": 0.14144, + "grad_norm": 0.8042941701744262, + "learning_rate": 4.712153518123667e-06, + "loss": 0.4420316517353058, + "memory(GiB)": 41.86, + "step": 442, + "token_acc": 0.8800949742777998, + "train_speed(iter/s)": 0.239943 + }, + { + "epoch": 0.14176, + "grad_norm": 0.6990729085460707, + "learning_rate": 4.722814498933903e-06, + "loss": 0.4328658878803253, + "memory(GiB)": 41.86, + "step": 443, + "token_acc": 0.8723994452149791, + "train_speed(iter/s)": 0.239934 + }, + { + "epoch": 0.14208, + "grad_norm": 0.7089131366462694, + "learning_rate": 4.733475479744136e-06, + "loss": 0.3094528913497925, + "memory(GiB)": 41.86, + "step": 444, + "token_acc": 0.9335020708697653, + "train_speed(iter/s)": 0.239942 + }, + { + "epoch": 0.1424, + "grad_norm": 0.7633606194441576, + "learning_rate": 4.744136460554372e-06, + "loss": 0.4399607479572296, + "memory(GiB)": 41.86, + "step": 445, + "token_acc": 0.9007518796992481, + "train_speed(iter/s)": 0.239953 + }, + { + "epoch": 0.14272, + "grad_norm": 0.7430419652234066, + "learning_rate": 4.7547974413646055e-06, + "loss": 0.49510475993156433, + "memory(GiB)": 41.86, + "step": 446, + "token_acc": 0.7972686602307512, + "train_speed(iter/s)": 0.239925 + }, + { + "epoch": 0.14304, + "grad_norm": 0.7440256957472273, + "learning_rate": 4.765458422174841e-06, + "loss": 0.4689873456954956, + "memory(GiB)": 41.86, + "step": 447, + "token_acc": 0.8642521426596627, + "train_speed(iter/s)": 0.239932 + }, + { + "epoch": 0.14336, + "grad_norm": 0.6725343500564533, + "learning_rate": 4.7761194029850745e-06, + "loss": 0.3653256595134735, + "memory(GiB)": 41.86, + "step": 448, + "token_acc": 0.8990066225165563, + "train_speed(iter/s)": 0.239948 + }, + { + "epoch": 0.14368, + "grad_norm": 0.7904043217230909, + "learning_rate": 4.78678038379531e-06, + "loss": 0.4147486686706543, + "memory(GiB)": 41.86, + "step": 449, + "token_acc": 0.8922895821071218, + "train_speed(iter/s)": 0.239959 + }, + { + "epoch": 0.144, + "grad_norm": 0.9201541246575352, + "learning_rate": 4.797441364605544e-06, + "loss": 0.3903222382068634, + "memory(GiB)": 41.86, + "step": 450, + "token_acc": 0.8998664886515354, + "train_speed(iter/s)": 0.239966 + }, + { + "epoch": 0.14432, + "grad_norm": 0.7552304831771183, + "learning_rate": 4.808102345415779e-06, + "loss": 0.3928183913230896, + "memory(GiB)": 41.86, + "step": 451, + "token_acc": 0.8961079723791588, + "train_speed(iter/s)": 0.239951 + }, + { + "epoch": 0.14464, + "grad_norm": 0.7736122525851844, + "learning_rate": 4.8187633262260135e-06, + "loss": 0.4385005235671997, + "memory(GiB)": 41.86, + "step": 452, + "token_acc": 0.9022761009401287, + "train_speed(iter/s)": 0.239951 + }, + { + "epoch": 0.14496, + "grad_norm": 0.7072463280257577, + "learning_rate": 4.829424307036248e-06, + "loss": 0.3216584324836731, + "memory(GiB)": 41.86, + "step": 453, + "token_acc": 0.9247311827956989, + "train_speed(iter/s)": 0.239941 + }, + { + "epoch": 0.14528, + "grad_norm": 0.7457360886970802, + "learning_rate": 4.8400852878464825e-06, + "loss": 0.3786621689796448, + "memory(GiB)": 41.86, + "step": 454, + "token_acc": 0.8299975886182783, + "train_speed(iter/s)": 0.239947 + }, + { + "epoch": 0.1456, + "grad_norm": 0.7864093748532963, + "learning_rate": 4.850746268656717e-06, + "loss": 0.49379590153694153, + "memory(GiB)": 41.86, + "step": 455, + "token_acc": 0.8751440645409143, + "train_speed(iter/s)": 0.23997 + }, + { + "epoch": 0.14592, + "grad_norm": 0.7345535425279534, + "learning_rate": 4.8614072494669516e-06, + "loss": 0.41603416204452515, + "memory(GiB)": 41.86, + "step": 456, + "token_acc": 0.8583906829010058, + "train_speed(iter/s)": 0.239838 + }, + { + "epoch": 0.14624, + "grad_norm": 0.7091103517940073, + "learning_rate": 4.872068230277186e-06, + "loss": 0.39392971992492676, + "memory(GiB)": 41.86, + "step": 457, + "token_acc": 0.8898061737257718, + "train_speed(iter/s)": 0.239827 + }, + { + "epoch": 0.14656, + "grad_norm": 0.7782122897677997, + "learning_rate": 4.882729211087421e-06, + "loss": 0.3989648222923279, + "memory(GiB)": 41.86, + "step": 458, + "token_acc": 0.8714069591527988, + "train_speed(iter/s)": 0.239848 + }, + { + "epoch": 0.14688, + "grad_norm": 0.7247215470470478, + "learning_rate": 4.893390191897655e-06, + "loss": 0.41792333126068115, + "memory(GiB)": 41.86, + "step": 459, + "token_acc": 0.8623000432338954, + "train_speed(iter/s)": 0.239862 + }, + { + "epoch": 0.1472, + "grad_norm": 0.7411204235082078, + "learning_rate": 4.90405117270789e-06, + "loss": 0.3991488218307495, + "memory(GiB)": 41.86, + "step": 460, + "token_acc": 0.909718228867165, + "train_speed(iter/s)": 0.239853 + }, + { + "epoch": 0.14752, + "grad_norm": 0.726473952304273, + "learning_rate": 4.914712153518124e-06, + "loss": 0.4299595355987549, + "memory(GiB)": 41.86, + "step": 461, + "token_acc": 0.8895800933125972, + "train_speed(iter/s)": 0.239853 + }, + { + "epoch": 0.14784, + "grad_norm": 0.7654793955413849, + "learning_rate": 4.925373134328359e-06, + "loss": 0.3551526665687561, + "memory(GiB)": 41.86, + "step": 462, + "token_acc": 0.9072555205047319, + "train_speed(iter/s)": 0.239873 + }, + { + "epoch": 0.14816, + "grad_norm": 0.7279016828892926, + "learning_rate": 4.936034115138593e-06, + "loss": 0.3636777997016907, + "memory(GiB)": 41.86, + "step": 463, + "token_acc": 0.9270650263620387, + "train_speed(iter/s)": 0.239884 + }, + { + "epoch": 0.14848, + "grad_norm": 0.7040017645343032, + "learning_rate": 4.946695095948828e-06, + "loss": 0.42778918147087097, + "memory(GiB)": 41.86, + "step": 464, + "token_acc": 0.9013877207737595, + "train_speed(iter/s)": 0.239853 + }, + { + "epoch": 0.1488, + "grad_norm": 0.7737532111703186, + "learning_rate": 4.957356076759062e-06, + "loss": 0.4469106197357178, + "memory(GiB)": 41.86, + "step": 465, + "token_acc": 0.8488549618320611, + "train_speed(iter/s)": 0.239854 + }, + { + "epoch": 0.14912, + "grad_norm": 0.9057637791546586, + "learning_rate": 4.968017057569297e-06, + "loss": 0.30127114057540894, + "memory(GiB)": 41.86, + "step": 466, + "token_acc": 0.9355459355459356, + "train_speed(iter/s)": 0.239886 + }, + { + "epoch": 0.14944, + "grad_norm": 0.7049343362262128, + "learning_rate": 4.978678038379531e-06, + "loss": 0.36840489506721497, + "memory(GiB)": 41.86, + "step": 467, + "token_acc": 0.9286043298019346, + "train_speed(iter/s)": 0.239917 + }, + { + "epoch": 0.14976, + "grad_norm": 0.720030589818441, + "learning_rate": 4.989339019189766e-06, + "loss": 0.34791100025177, + "memory(GiB)": 41.86, + "step": 468, + "token_acc": 0.9290364583333334, + "train_speed(iter/s)": 0.239905 + }, + { + "epoch": 0.15008, + "grad_norm": 0.704374308701066, + "learning_rate": 5e-06, + "loss": 0.4260786771774292, + "memory(GiB)": 41.86, + "step": 469, + "token_acc": 0.8302900107411385, + "train_speed(iter/s)": 0.239905 + }, + { + "epoch": 0.1504, + "grad_norm": 1.6087275965721095, + "learning_rate": 4.9999998444591845e-06, + "loss": 0.43194711208343506, + "memory(GiB)": 41.86, + "step": 470, + "token_acc": 0.860344356381525, + "train_speed(iter/s)": 0.239915 + }, + { + "epoch": 0.15072, + "grad_norm": 0.7135343064631954, + "learning_rate": 4.999999377836757e-06, + "loss": 0.4190048575401306, + "memory(GiB)": 41.86, + "step": 471, + "token_acc": 0.8626387813064808, + "train_speed(iter/s)": 0.23994 + }, + { + "epoch": 0.15104, + "grad_norm": 0.782678070824646, + "learning_rate": 4.9999986001327745e-06, + "loss": 0.4323235750198364, + "memory(GiB)": 41.86, + "step": 472, + "token_acc": 0.8596032265097013, + "train_speed(iter/s)": 0.239931 + }, + { + "epoch": 0.15136, + "grad_norm": 0.70330573232026, + "learning_rate": 4.9999975113473356e-06, + "loss": 0.4587031900882721, + "memory(GiB)": 41.86, + "step": 473, + "token_acc": 0.8145837814583782, + "train_speed(iter/s)": 0.239935 + }, + { + "epoch": 0.15168, + "grad_norm": 0.722100170075211, + "learning_rate": 4.999996111480575e-06, + "loss": 0.3733265995979309, + "memory(GiB)": 41.86, + "step": 474, + "token_acc": 0.8543113671014738, + "train_speed(iter/s)": 0.239944 + }, + { + "epoch": 0.152, + "grad_norm": 0.7827326854948343, + "learning_rate": 4.999994400532666e-06, + "loss": 0.5017992258071899, + "memory(GiB)": 41.86, + "step": 475, + "token_acc": 0.798049573344169, + "train_speed(iter/s)": 0.239947 + }, + { + "epoch": 0.15232, + "grad_norm": 0.7294363979848841, + "learning_rate": 4.999992378503823e-06, + "loss": 0.4337218999862671, + "memory(GiB)": 41.86, + "step": 476, + "token_acc": 0.8789083200847907, + "train_speed(iter/s)": 0.239912 + }, + { + "epoch": 0.15264, + "grad_norm": 0.6830801834186843, + "learning_rate": 4.999990045394296e-06, + "loss": 0.3844829201698303, + "memory(GiB)": 41.86, + "step": 477, + "token_acc": 0.9175998309740122, + "train_speed(iter/s)": 0.239924 + }, + { + "epoch": 0.15296, + "grad_norm": 0.7574361362858754, + "learning_rate": 4.999987401204377e-06, + "loss": 0.3935595154762268, + "memory(GiB)": 41.86, + "step": 478, + "token_acc": 0.8889947594092424, + "train_speed(iter/s)": 0.239945 + }, + { + "epoch": 0.15328, + "grad_norm": 0.6644581137612858, + "learning_rate": 4.999984445934394e-06, + "loss": 0.38212037086486816, + "memory(GiB)": 41.86, + "step": 479, + "token_acc": 0.8682705580753303, + "train_speed(iter/s)": 0.239903 + }, + { + "epoch": 0.1536, + "grad_norm": 0.7145717768232145, + "learning_rate": 4.9999811795847145e-06, + "loss": 0.43310773372650146, + "memory(GiB)": 41.86, + "step": 480, + "token_acc": 0.9031683873264507, + "train_speed(iter/s)": 0.239924 + }, + { + "epoch": 0.15392, + "grad_norm": 0.75434365548136, + "learning_rate": 4.999977602155746e-06, + "loss": 0.4588850140571594, + "memory(GiB)": 41.86, + "step": 481, + "token_acc": 0.8225524475524476, + "train_speed(iter/s)": 0.239925 + }, + { + "epoch": 0.15424, + "grad_norm": 0.7117648039518493, + "learning_rate": 4.999973713647933e-06, + "loss": 0.3876141607761383, + "memory(GiB)": 41.86, + "step": 482, + "token_acc": 0.930784442979565, + "train_speed(iter/s)": 0.23991 + }, + { + "epoch": 0.15456, + "grad_norm": 0.7151517376887505, + "learning_rate": 4.999969514061759e-06, + "loss": 0.3974360227584839, + "memory(GiB)": 41.86, + "step": 483, + "token_acc": 0.928996036988111, + "train_speed(iter/s)": 0.239918 + }, + { + "epoch": 0.15488, + "grad_norm": 0.7945811449111028, + "learning_rate": 4.999965003397747e-06, + "loss": 0.4497455358505249, + "memory(GiB)": 41.86, + "step": 484, + "token_acc": 0.9050081654872074, + "train_speed(iter/s)": 0.239934 + }, + { + "epoch": 0.1552, + "grad_norm": 0.7239201026055758, + "learning_rate": 4.999960181656458e-06, + "loss": 0.43310630321502686, + "memory(GiB)": 41.86, + "step": 485, + "token_acc": 0.8402323892519971, + "train_speed(iter/s)": 0.23996 + }, + { + "epoch": 0.15552, + "grad_norm": 0.7143665166598663, + "learning_rate": 4.999955048838493e-06, + "loss": 0.4903789162635803, + "memory(GiB)": 41.86, + "step": 486, + "token_acc": 0.8678033658104517, + "train_speed(iter/s)": 0.239973 + }, + { + "epoch": 0.15584, + "grad_norm": 0.6746481004802994, + "learning_rate": 4.999949604944489e-06, + "loss": 0.4141741991043091, + "memory(GiB)": 41.86, + "step": 487, + "token_acc": 0.8668025823989127, + "train_speed(iter/s)": 0.23998 + }, + { + "epoch": 0.15616, + "grad_norm": 0.7023917672452247, + "learning_rate": 4.999943849975125e-06, + "loss": 0.35007524490356445, + "memory(GiB)": 41.86, + "step": 488, + "token_acc": 0.8707372523546606, + "train_speed(iter/s)": 0.239958 + }, + { + "epoch": 0.15648, + "grad_norm": 0.6993281414771114, + "learning_rate": 4.999937783931117e-06, + "loss": 0.33885467052459717, + "memory(GiB)": 41.86, + "step": 489, + "token_acc": 0.9425287356321839, + "train_speed(iter/s)": 0.239966 + }, + { + "epoch": 0.1568, + "grad_norm": 0.73068306885795, + "learning_rate": 4.999931406813218e-06, + "loss": 0.4645715653896332, + "memory(GiB)": 41.86, + "step": 490, + "token_acc": 0.8745748299319728, + "train_speed(iter/s)": 0.239957 + }, + { + "epoch": 0.15712, + "grad_norm": 0.7457218009697603, + "learning_rate": 4.999924718622223e-06, + "loss": 0.4251176416873932, + "memory(GiB)": 41.86, + "step": 491, + "token_acc": 0.8239684843458428, + "train_speed(iter/s)": 0.239984 + }, + { + "epoch": 0.15744, + "grad_norm": 0.7729913739203381, + "learning_rate": 4.999917719358965e-06, + "loss": 0.4445386826992035, + "memory(GiB)": 41.86, + "step": 492, + "token_acc": 0.8920515574650913, + "train_speed(iter/s)": 0.240007 + }, + { + "epoch": 0.15776, + "grad_norm": 0.6920276138470032, + "learning_rate": 4.9999104090243125e-06, + "loss": 0.386310875415802, + "memory(GiB)": 41.86, + "step": 493, + "token_acc": 0.8777651083238313, + "train_speed(iter/s)": 0.239998 + }, + { + "epoch": 0.15808, + "grad_norm": 0.6995403635948096, + "learning_rate": 4.999902787619177e-06, + "loss": 0.37021100521087646, + "memory(GiB)": 41.86, + "step": 494, + "token_acc": 0.9275627615062761, + "train_speed(iter/s)": 0.240002 + }, + { + "epoch": 0.1584, + "grad_norm": 0.7336325203968346, + "learning_rate": 4.999894855144507e-06, + "loss": 0.5206668376922607, + "memory(GiB)": 41.86, + "step": 495, + "token_acc": 0.9339049660593068, + "train_speed(iter/s)": 0.239982 + }, + { + "epoch": 0.15872, + "grad_norm": 0.6851661809304453, + "learning_rate": 4.999886611601288e-06, + "loss": 0.34049439430236816, + "memory(GiB)": 41.86, + "step": 496, + "token_acc": 0.9146005509641874, + "train_speed(iter/s)": 0.239954 + }, + { + "epoch": 0.15904, + "grad_norm": 0.7470618077545236, + "learning_rate": 4.9998780569905485e-06, + "loss": 0.35181865096092224, + "memory(GiB)": 41.86, + "step": 497, + "token_acc": 0.9552562988705473, + "train_speed(iter/s)": 0.239953 + }, + { + "epoch": 0.15936, + "grad_norm": 0.7126891205308642, + "learning_rate": 4.999869191313349e-06, + "loss": 0.41131922602653503, + "memory(GiB)": 41.86, + "step": 498, + "token_acc": 0.800531914893617, + "train_speed(iter/s)": 0.239955 + }, + { + "epoch": 0.15968, + "grad_norm": 0.7492784128769695, + "learning_rate": 4.999860014570796e-06, + "loss": 0.37963297963142395, + "memory(GiB)": 41.86, + "step": 499, + "token_acc": 0.9110520094562647, + "train_speed(iter/s)": 0.239982 + }, + { + "epoch": 0.16, + "grad_norm": 0.7319764465986598, + "learning_rate": 4.999850526764031e-06, + "loss": 0.41188380122184753, + "memory(GiB)": 41.86, + "step": 500, + "token_acc": 0.8327289211242067, + "train_speed(iter/s)": 0.239998 + }, + { + "epoch": 0.16032, + "grad_norm": 0.6541867353538434, + "learning_rate": 4.999840727894232e-06, + "loss": 0.43531447649002075, + "memory(GiB)": 41.86, + "step": 501, + "token_acc": 0.8830073568993361, + "train_speed(iter/s)": 0.239982 + }, + { + "epoch": 0.16064, + "grad_norm": 0.7211162488690752, + "learning_rate": 4.999830617962622e-06, + "loss": 0.4297073483467102, + "memory(GiB)": 41.86, + "step": 502, + "token_acc": 0.9025035619784246, + "train_speed(iter/s)": 0.239991 + }, + { + "epoch": 0.16096, + "grad_norm": 0.7429641781043748, + "learning_rate": 4.999820196970457e-06, + "loss": 0.43720877170562744, + "memory(GiB)": 41.86, + "step": 503, + "token_acc": 0.879475982532751, + "train_speed(iter/s)": 0.239977 + }, + { + "epoch": 0.16128, + "grad_norm": 0.8418981909807456, + "learning_rate": 4.999809464919032e-06, + "loss": 0.4410746693611145, + "memory(GiB)": 41.86, + "step": 504, + "token_acc": 0.8964255558682803, + "train_speed(iter/s)": 0.24 + }, + { + "epoch": 0.1616, + "grad_norm": 0.7705898057157492, + "learning_rate": 4.9997984218096865e-06, + "loss": 0.40692082047462463, + "memory(GiB)": 41.86, + "step": 505, + "token_acc": 0.9370851370851371, + "train_speed(iter/s)": 0.240028 + }, + { + "epoch": 0.16192, + "grad_norm": 0.7322499613672894, + "learning_rate": 4.999787067643791e-06, + "loss": 0.37406277656555176, + "memory(GiB)": 41.86, + "step": 506, + "token_acc": 0.9265745007680491, + "train_speed(iter/s)": 0.240056 + }, + { + "epoch": 0.16224, + "grad_norm": 0.7037997526265463, + "learning_rate": 4.99977540242276e-06, + "loss": 0.376261830329895, + "memory(GiB)": 41.86, + "step": 507, + "token_acc": 0.908705575480926, + "train_speed(iter/s)": 0.240072 + }, + { + "epoch": 0.16256, + "grad_norm": 0.710591469760506, + "learning_rate": 4.999763426148045e-06, + "loss": 0.4308719336986542, + "memory(GiB)": 41.86, + "step": 508, + "token_acc": 0.8405507439484788, + "train_speed(iter/s)": 0.240066 + }, + { + "epoch": 0.16288, + "grad_norm": 0.8212473162207397, + "learning_rate": 4.999751138821136e-06, + "loss": 0.3660429120063782, + "memory(GiB)": 41.86, + "step": 509, + "token_acc": 0.9233965203843157, + "train_speed(iter/s)": 0.240082 + }, + { + "epoch": 0.1632, + "grad_norm": 0.7368889912060217, + "learning_rate": 4.9997385404435626e-06, + "loss": 0.43292951583862305, + "memory(GiB)": 41.86, + "step": 510, + "token_acc": 0.9106370712020755, + "train_speed(iter/s)": 0.240091 + }, + { + "epoch": 0.16352, + "grad_norm": 0.7426471075659408, + "learning_rate": 4.999725631016891e-06, + "loss": 0.35908281803131104, + "memory(GiB)": 41.86, + "step": 511, + "token_acc": 0.8613861386138614, + "train_speed(iter/s)": 0.240112 + }, + { + "epoch": 0.16384, + "grad_norm": 0.6928641455543809, + "learning_rate": 4.999712410542728e-06, + "loss": 0.4103066921234131, + "memory(GiB)": 41.86, + "step": 512, + "token_acc": 0.8858123009066405, + "train_speed(iter/s)": 0.24011 + }, + { + "epoch": 0.16416, + "grad_norm": 0.8179877283629033, + "learning_rate": 4.99969887902272e-06, + "loss": 0.46859943866729736, + "memory(GiB)": 41.86, + "step": 513, + "token_acc": 0.8619561661759896, + "train_speed(iter/s)": 0.240119 + }, + { + "epoch": 0.16448, + "grad_norm": 0.720989863693229, + "learning_rate": 4.99968503645855e-06, + "loss": 0.34554195404052734, + "memory(GiB)": 41.86, + "step": 514, + "token_acc": 0.905693950177936, + "train_speed(iter/s)": 0.240149 + }, + { + "epoch": 0.1648, + "grad_norm": 0.6318179685987427, + "learning_rate": 4.99967088285194e-06, + "loss": 0.4476335346698761, + "memory(GiB)": 41.86, + "step": 515, + "token_acc": 0.9117511520737327, + "train_speed(iter/s)": 0.240108 + }, + { + "epoch": 0.16512, + "grad_norm": 0.8064297036277955, + "learning_rate": 4.999656418204651e-06, + "loss": 0.42241039872169495, + "memory(GiB)": 41.86, + "step": 516, + "token_acc": 0.9046734757041164, + "train_speed(iter/s)": 0.240132 + }, + { + "epoch": 0.16544, + "grad_norm": 0.688824632997362, + "learning_rate": 4.999641642518484e-06, + "loss": 0.3794514536857605, + "memory(GiB)": 41.86, + "step": 517, + "token_acc": 0.8403505429605639, + "train_speed(iter/s)": 0.240124 + }, + { + "epoch": 0.16576, + "grad_norm": 0.7229498671074948, + "learning_rate": 4.999626555795276e-06, + "loss": 0.40179306268692017, + "memory(GiB)": 41.86, + "step": 518, + "token_acc": 0.8962199312714777, + "train_speed(iter/s)": 0.24013 + }, + { + "epoch": 0.16608, + "grad_norm": 0.7127941836089953, + "learning_rate": 4.999611158036906e-06, + "loss": 0.33091676235198975, + "memory(GiB)": 41.86, + "step": 519, + "token_acc": 0.8896637608966376, + "train_speed(iter/s)": 0.240158 + }, + { + "epoch": 0.1664, + "grad_norm": 0.7255001447010705, + "learning_rate": 4.999595449245288e-06, + "loss": 0.417441725730896, + "memory(GiB)": 41.86, + "step": 520, + "token_acc": 0.8853100541842264, + "train_speed(iter/s)": 0.240131 + }, + { + "epoch": 0.16672, + "grad_norm": 0.7349217193131398, + "learning_rate": 4.999579429422379e-06, + "loss": 0.4136850833892822, + "memory(GiB)": 41.86, + "step": 521, + "token_acc": 0.8637349024465779, + "train_speed(iter/s)": 0.240127 + }, + { + "epoch": 0.16704, + "grad_norm": 0.7080752589533771, + "learning_rate": 4.99956309857017e-06, + "loss": 0.4458681643009186, + "memory(GiB)": 41.86, + "step": 522, + "token_acc": 0.945049504950495, + "train_speed(iter/s)": 0.240139 + }, + { + "epoch": 0.16736, + "grad_norm": 0.6671939414985892, + "learning_rate": 4.999546456690696e-06, + "loss": 0.37234577536582947, + "memory(GiB)": 41.86, + "step": 523, + "token_acc": 0.8761958356781092, + "train_speed(iter/s)": 0.240138 + }, + { + "epoch": 0.16768, + "grad_norm": 0.7426671863636843, + "learning_rate": 4.999529503786025e-06, + "loss": 0.4264715909957886, + "memory(GiB)": 41.86, + "step": 524, + "token_acc": 0.8872738059922871, + "train_speed(iter/s)": 0.240143 + }, + { + "epoch": 0.168, + "grad_norm": 0.6724534676660313, + "learning_rate": 4.999512239858267e-06, + "loss": 0.38832327723503113, + "memory(GiB)": 41.86, + "step": 525, + "token_acc": 0.9262295081967213, + "train_speed(iter/s)": 0.240128 + }, + { + "epoch": 0.16832, + "grad_norm": 0.7434934974411659, + "learning_rate": 4.999494664909572e-06, + "loss": 0.519243597984314, + "memory(GiB)": 41.86, + "step": 526, + "token_acc": 0.833595470273671, + "train_speed(iter/s)": 0.240148 + }, + { + "epoch": 0.16864, + "grad_norm": 0.7692363117707088, + "learning_rate": 4.9994767789421255e-06, + "loss": 0.38746243715286255, + "memory(GiB)": 41.86, + "step": 527, + "token_acc": 0.8986568986568987, + "train_speed(iter/s)": 0.240168 + }, + { + "epoch": 0.16896, + "grad_norm": 0.6785312211485801, + "learning_rate": 4.999458581958153e-06, + "loss": 0.4877493381500244, + "memory(GiB)": 41.86, + "step": 528, + "token_acc": 0.8467210956017909, + "train_speed(iter/s)": 0.240182 + }, + { + "epoch": 0.16928, + "grad_norm": 0.7267195174853616, + "learning_rate": 4.9994400739599195e-06, + "loss": 0.4311027228832245, + "memory(GiB)": 41.86, + "step": 529, + "token_acc": 0.8937235271467078, + "train_speed(iter/s)": 0.240196 + }, + { + "epoch": 0.1696, + "grad_norm": 0.6990368031342535, + "learning_rate": 4.999421254949728e-06, + "loss": 0.35965317487716675, + "memory(GiB)": 41.86, + "step": 530, + "token_acc": 0.8974587605884975, + "train_speed(iter/s)": 0.240201 + }, + { + "epoch": 0.16992, + "grad_norm": 0.697891417784386, + "learning_rate": 4.999402124929918e-06, + "loss": 0.409152090549469, + "memory(GiB)": 41.86, + "step": 531, + "token_acc": 0.8995107263831389, + "train_speed(iter/s)": 0.240207 + }, + { + "epoch": 0.17024, + "grad_norm": 0.7337019010588666, + "learning_rate": 4.9993826839028735e-06, + "loss": 0.39932721853256226, + "memory(GiB)": 41.86, + "step": 532, + "token_acc": 0.9195816804904435, + "train_speed(iter/s)": 0.240225 + }, + { + "epoch": 0.17056, + "grad_norm": 0.7207867798194345, + "learning_rate": 4.999362931871011e-06, + "loss": 0.3743005096912384, + "memory(GiB)": 41.86, + "step": 533, + "token_acc": 0.8564383561643836, + "train_speed(iter/s)": 0.240248 + }, + { + "epoch": 0.17088, + "grad_norm": 0.700887407213289, + "learning_rate": 4.9993428688367896e-06, + "loss": 0.37754279375076294, + "memory(GiB)": 41.86, + "step": 534, + "token_acc": 0.9059539918809202, + "train_speed(iter/s)": 0.240251 + }, + { + "epoch": 0.1712, + "grad_norm": 0.8180199812260442, + "learning_rate": 4.9993224948027045e-06, + "loss": 0.4487009048461914, + "memory(GiB)": 41.86, + "step": 535, + "token_acc": 0.9076664801343033, + "train_speed(iter/s)": 0.240263 + }, + { + "epoch": 0.17152, + "grad_norm": 0.7219699133019961, + "learning_rate": 4.999301809771293e-06, + "loss": 0.3877941071987152, + "memory(GiB)": 41.86, + "step": 536, + "token_acc": 0.925767586474932, + "train_speed(iter/s)": 0.240267 + }, + { + "epoch": 0.17184, + "grad_norm": 0.7813544857160303, + "learning_rate": 4.999280813745127e-06, + "loss": 0.35562509298324585, + "memory(GiB)": 41.86, + "step": 537, + "token_acc": 0.8670520231213873, + "train_speed(iter/s)": 0.240287 + }, + { + "epoch": 0.17216, + "grad_norm": 0.7242394710528224, + "learning_rate": 4.999259506726819e-06, + "loss": 0.43010619282722473, + "memory(GiB)": 41.86, + "step": 538, + "token_acc": 0.893792071802543, + "train_speed(iter/s)": 0.240227 + }, + { + "epoch": 0.17248, + "grad_norm": 0.7227769901146698, + "learning_rate": 4.9992378887190214e-06, + "loss": 0.35778316855430603, + "memory(GiB)": 41.86, + "step": 539, + "token_acc": 0.9102605339337407, + "train_speed(iter/s)": 0.240256 + }, + { + "epoch": 0.1728, + "grad_norm": 0.7193282752452127, + "learning_rate": 4.9992159597244236e-06, + "loss": 0.40651825070381165, + "memory(GiB)": 41.86, + "step": 540, + "token_acc": 0.8412249705535925, + "train_speed(iter/s)": 0.240255 + }, + { + "epoch": 0.17312, + "grad_norm": 0.704997040377488, + "learning_rate": 4.999193719745756e-06, + "loss": 0.4186462163925171, + "memory(GiB)": 41.86, + "step": 541, + "token_acc": 0.9107537054556922, + "train_speed(iter/s)": 0.240279 + }, + { + "epoch": 0.17344, + "grad_norm": 0.7588144768914918, + "learning_rate": 4.999171168785783e-06, + "loss": 0.4886937737464905, + "memory(GiB)": 41.86, + "step": 542, + "token_acc": 0.8825613768666161, + "train_speed(iter/s)": 0.24028 + }, + { + "epoch": 0.17376, + "grad_norm": 0.7314963380024697, + "learning_rate": 4.999148306847313e-06, + "loss": 0.3259052634239197, + "memory(GiB)": 41.86, + "step": 543, + "token_acc": 0.9088, + "train_speed(iter/s)": 0.240299 + }, + { + "epoch": 0.17408, + "grad_norm": 0.7310219502301045, + "learning_rate": 4.9991251339331895e-06, + "loss": 0.3796643614768982, + "memory(GiB)": 41.86, + "step": 544, + "token_acc": 0.9209164818920916, + "train_speed(iter/s)": 0.240277 + }, + { + "epoch": 0.1744, + "grad_norm": 0.7466674737347796, + "learning_rate": 4.999101650046296e-06, + "loss": 0.4011804759502411, + "memory(GiB)": 41.86, + "step": 545, + "token_acc": 0.8580128205128205, + "train_speed(iter/s)": 0.240297 + }, + { + "epoch": 0.17472, + "grad_norm": 0.7101991963517899, + "learning_rate": 4.999077855189557e-06, + "loss": 0.5033053159713745, + "memory(GiB)": 41.86, + "step": 546, + "token_acc": 0.9077069457659372, + "train_speed(iter/s)": 0.240295 + }, + { + "epoch": 0.17504, + "grad_norm": 0.7045310960686249, + "learning_rate": 4.99905374936593e-06, + "loss": 0.356934130191803, + "memory(GiB)": 41.86, + "step": 547, + "token_acc": 0.8250407830342578, + "train_speed(iter/s)": 0.240304 + }, + { + "epoch": 0.17536, + "grad_norm": 0.6675808003876188, + "learning_rate": 4.999029332578416e-06, + "loss": 0.3722524046897888, + "memory(GiB)": 41.86, + "step": 548, + "token_acc": 0.8863207547169811, + "train_speed(iter/s)": 0.240331 + }, + { + "epoch": 0.17568, + "grad_norm": 0.7627899220770223, + "learning_rate": 4.9990046048300526e-06, + "loss": 0.41290193796157837, + "memory(GiB)": 41.86, + "step": 549, + "token_acc": 0.932952380952381, + "train_speed(iter/s)": 0.240358 + }, + { + "epoch": 0.176, + "grad_norm": 0.6799753010388639, + "learning_rate": 4.998979566123918e-06, + "loss": 0.4469655156135559, + "memory(GiB)": 41.86, + "step": 550, + "token_acc": 0.8945104983556792, + "train_speed(iter/s)": 0.240363 + }, + { + "epoch": 0.17632, + "grad_norm": 0.701193171577847, + "learning_rate": 4.998954216463128e-06, + "loss": 0.30783504247665405, + "memory(GiB)": 41.86, + "step": 551, + "token_acc": 0.9347626339969373, + "train_speed(iter/s)": 0.240364 + }, + { + "epoch": 0.17664, + "grad_norm": 0.6678607410388682, + "learning_rate": 4.998928555850835e-06, + "loss": 0.4034227728843689, + "memory(GiB)": 41.86, + "step": 552, + "token_acc": 0.9083613771680041, + "train_speed(iter/s)": 0.240346 + }, + { + "epoch": 0.17696, + "grad_norm": 0.7067561049582544, + "learning_rate": 4.998902584290234e-06, + "loss": 0.43521934747695923, + "memory(GiB)": 41.86, + "step": 553, + "token_acc": 0.851675903932436, + "train_speed(iter/s)": 0.240357 + }, + { + "epoch": 0.17728, + "grad_norm": 0.6654317468403823, + "learning_rate": 4.998876301784556e-06, + "loss": 0.3983107805252075, + "memory(GiB)": 41.86, + "step": 554, + "token_acc": 0.8349956255468066, + "train_speed(iter/s)": 0.24036 + }, + { + "epoch": 0.1776, + "grad_norm": 0.7316024202711119, + "learning_rate": 4.99884970833707e-06, + "loss": 0.3884185254573822, + "memory(GiB)": 41.86, + "step": 555, + "token_acc": 0.9602240896358544, + "train_speed(iter/s)": 0.240372 + }, + { + "epoch": 0.17792, + "grad_norm": 0.6909036921137306, + "learning_rate": 4.998822803951088e-06, + "loss": 0.40356987714767456, + "memory(GiB)": 41.86, + "step": 556, + "token_acc": 0.8415942769545223, + "train_speed(iter/s)": 0.240371 + }, + { + "epoch": 0.17824, + "grad_norm": 0.8573381495450896, + "learning_rate": 4.9987955886299545e-06, + "loss": 0.38136205077171326, + "memory(GiB)": 41.86, + "step": 557, + "token_acc": 0.8584952665670155, + "train_speed(iter/s)": 0.240369 + }, + { + "epoch": 0.17856, + "grad_norm": 0.6964208573118333, + "learning_rate": 4.998768062377058e-06, + "loss": 0.39367440342903137, + "memory(GiB)": 41.86, + "step": 558, + "token_acc": 0.8946288060212111, + "train_speed(iter/s)": 0.240367 + }, + { + "epoch": 0.17888, + "grad_norm": 0.7570067768711339, + "learning_rate": 4.998740225195824e-06, + "loss": 0.3773024082183838, + "memory(GiB)": 41.86, + "step": 559, + "token_acc": 0.9304388422035481, + "train_speed(iter/s)": 0.240383 + }, + { + "epoch": 0.1792, + "grad_norm": 1.3261977413909418, + "learning_rate": 4.998712077089716e-06, + "loss": 0.4005555510520935, + "memory(GiB)": 41.86, + "step": 560, + "token_acc": 0.8703662597114318, + "train_speed(iter/s)": 0.240368 + }, + { + "epoch": 0.17952, + "grad_norm": 0.7932299114661436, + "learning_rate": 4.998683618062235e-06, + "loss": 0.3728886842727661, + "memory(GiB)": 41.86, + "step": 561, + "token_acc": 0.9546130952380952, + "train_speed(iter/s)": 0.240382 + }, + { + "epoch": 0.17984, + "grad_norm": 0.717010869983448, + "learning_rate": 4.998654848116924e-06, + "loss": 0.420939564704895, + "memory(GiB)": 41.86, + "step": 562, + "token_acc": 0.8821102269378132, + "train_speed(iter/s)": 0.240385 + }, + { + "epoch": 0.18016, + "grad_norm": 0.7762042692611968, + "learning_rate": 4.998625767257362e-06, + "loss": 0.4041133522987366, + "memory(GiB)": 41.86, + "step": 563, + "token_acc": 0.8707692307692307, + "train_speed(iter/s)": 0.240381 + }, + { + "epoch": 0.18048, + "grad_norm": 0.719747085518272, + "learning_rate": 4.9985963754871684e-06, + "loss": 0.43100330233573914, + "memory(GiB)": 41.86, + "step": 564, + "token_acc": 0.8861693861693861, + "train_speed(iter/s)": 0.240391 + }, + { + "epoch": 0.1808, + "grad_norm": 0.7614233951906714, + "learning_rate": 4.99856667281e-06, + "loss": 0.3803737461566925, + "memory(GiB)": 41.86, + "step": 565, + "token_acc": 0.880469583778015, + "train_speed(iter/s)": 0.240393 + }, + { + "epoch": 0.18112, + "grad_norm": 0.7314640643496934, + "learning_rate": 4.9985366592295525e-06, + "loss": 0.3606047034263611, + "memory(GiB)": 41.86, + "step": 566, + "token_acc": 0.9113463446907046, + "train_speed(iter/s)": 0.240413 + }, + { + "epoch": 0.18144, + "grad_norm": 0.7295855799942644, + "learning_rate": 4.9985063347495615e-06, + "loss": 0.4645580053329468, + "memory(GiB)": 41.86, + "step": 567, + "token_acc": 0.8910810810810811, + "train_speed(iter/s)": 0.240413 + }, + { + "epoch": 0.18176, + "grad_norm": 0.7887665968756749, + "learning_rate": 4.9984756993738e-06, + "loss": 0.4417746067047119, + "memory(GiB)": 41.86, + "step": 568, + "token_acc": 0.94201564657156, + "train_speed(iter/s)": 0.240425 + }, + { + "epoch": 0.18208, + "grad_norm": 0.7490808155481795, + "learning_rate": 4.9984447531060785e-06, + "loss": 0.38317275047302246, + "memory(GiB)": 41.86, + "step": 569, + "token_acc": 0.871661463753035, + "train_speed(iter/s)": 0.240447 + }, + { + "epoch": 0.1824, + "grad_norm": 0.714256420739665, + "learning_rate": 4.99841349595025e-06, + "loss": 0.36582478880882263, + "memory(GiB)": 41.86, + "step": 570, + "token_acc": 0.9073665637406264, + "train_speed(iter/s)": 0.240467 + }, + { + "epoch": 0.18272, + "grad_norm": 0.7206218226587658, + "learning_rate": 4.998381927910202e-06, + "loss": 0.42719489336013794, + "memory(GiB)": 41.86, + "step": 571, + "token_acc": 0.8439891940567312, + "train_speed(iter/s)": 0.240485 + }, + { + "epoch": 0.18304, + "grad_norm": 0.6566178167562026, + "learning_rate": 4.998350048989864e-06, + "loss": 0.36069872975349426, + "memory(GiB)": 41.86, + "step": 572, + "token_acc": 0.92187967674349, + "train_speed(iter/s)": 0.24049 + }, + { + "epoch": 0.18336, + "grad_norm": 0.6632231618493382, + "learning_rate": 4.998317859193202e-06, + "loss": 0.35563305020332336, + "memory(GiB)": 41.86, + "step": 573, + "token_acc": 0.9249110320284698, + "train_speed(iter/s)": 0.240494 + }, + { + "epoch": 0.18368, + "grad_norm": 0.7008643935083931, + "learning_rate": 4.998285358524222e-06, + "loss": 0.42981112003326416, + "memory(GiB)": 41.86, + "step": 574, + "token_acc": 0.7943280531425652, + "train_speed(iter/s)": 0.240483 + }, + { + "epoch": 0.184, + "grad_norm": 0.699254545077358, + "learning_rate": 4.998252546986968e-06, + "loss": 0.40948110818862915, + "memory(GiB)": 41.86, + "step": 575, + "token_acc": 0.8648788035069623, + "train_speed(iter/s)": 0.240471 + }, + { + "epoch": 0.18432, + "grad_norm": 0.7752934212931661, + "learning_rate": 4.998219424585523e-06, + "loss": 0.3346802592277527, + "memory(GiB)": 41.86, + "step": 576, + "token_acc": 0.9201732673267327, + "train_speed(iter/s)": 0.24049 + }, + { + "epoch": 0.18464, + "grad_norm": 0.7612484285659876, + "learning_rate": 4.998185991324008e-06, + "loss": 0.3833213448524475, + "memory(GiB)": 41.86, + "step": 577, + "token_acc": 0.8590287600188591, + "train_speed(iter/s)": 0.240515 + }, + { + "epoch": 0.18496, + "grad_norm": 0.7626696937140971, + "learning_rate": 4.998152247206584e-06, + "loss": 0.3548380136489868, + "memory(GiB)": 41.86, + "step": 578, + "token_acc": 0.9172777940745086, + "train_speed(iter/s)": 0.240535 + }, + { + "epoch": 0.18528, + "grad_norm": 0.6722808780391896, + "learning_rate": 4.9981181922374475e-06, + "loss": 0.39473259449005127, + "memory(GiB)": 41.86, + "step": 579, + "token_acc": 0.937059652418976, + "train_speed(iter/s)": 0.240539 + }, + { + "epoch": 0.1856, + "grad_norm": 0.7102040915588876, + "learning_rate": 4.99808382642084e-06, + "loss": 0.38578078150749207, + "memory(GiB)": 41.86, + "step": 580, + "token_acc": 0.8708791208791209, + "train_speed(iter/s)": 0.240536 + }, + { + "epoch": 0.18592, + "grad_norm": 0.6566941932299604, + "learning_rate": 4.998049149761034e-06, + "loss": 0.3175215721130371, + "memory(GiB)": 41.86, + "step": 581, + "token_acc": 0.9702276707530648, + "train_speed(iter/s)": 0.240545 + }, + { + "epoch": 0.18624, + "grad_norm": 0.692942632328679, + "learning_rate": 4.998014162262347e-06, + "loss": 0.3402339518070221, + "memory(GiB)": 41.86, + "step": 582, + "token_acc": 0.9054395226072987, + "train_speed(iter/s)": 0.240525 + }, + { + "epoch": 0.18656, + "grad_norm": 0.6827632356957141, + "learning_rate": 4.997978863929131e-06, + "loss": 0.350196897983551, + "memory(GiB)": 41.86, + "step": 583, + "token_acc": 0.7907068320535539, + "train_speed(iter/s)": 0.240518 + }, + { + "epoch": 0.18688, + "grad_norm": 0.7318762633401616, + "learning_rate": 4.997943254765779e-06, + "loss": 0.3818226158618927, + "memory(GiB)": 41.86, + "step": 584, + "token_acc": 0.7990768395329894, + "train_speed(iter/s)": 0.240541 + }, + { + "epoch": 0.1872, + "grad_norm": 0.7079086614000479, + "learning_rate": 4.997907334776722e-06, + "loss": 0.44802767038345337, + "memory(GiB)": 41.86, + "step": 585, + "token_acc": 0.812691914022518, + "train_speed(iter/s)": 0.240508 + }, + { + "epoch": 0.18752, + "grad_norm": 0.7541375581891403, + "learning_rate": 4.997871103966429e-06, + "loss": 0.42247796058654785, + "memory(GiB)": 41.86, + "step": 586, + "token_acc": 0.8794132272501243, + "train_speed(iter/s)": 0.240525 + }, + { + "epoch": 0.18784, + "grad_norm": 0.7288502850474142, + "learning_rate": 4.997834562339409e-06, + "loss": 0.3354640007019043, + "memory(GiB)": 41.86, + "step": 587, + "token_acc": 0.8979449669104842, + "train_speed(iter/s)": 0.240536 + }, + { + "epoch": 0.18816, + "grad_norm": 0.7150580398990505, + "learning_rate": 4.997797709900209e-06, + "loss": 0.3432292938232422, + "memory(GiB)": 41.86, + "step": 588, + "token_acc": 0.9255429162357808, + "train_speed(iter/s)": 0.240546 + }, + { + "epoch": 0.18848, + "grad_norm": 0.7107770223881175, + "learning_rate": 4.997760546653414e-06, + "loss": 0.5230749845504761, + "memory(GiB)": 41.86, + "step": 589, + "token_acc": 0.8551136363636364, + "train_speed(iter/s)": 0.240551 + }, + { + "epoch": 0.1888, + "grad_norm": 0.7070378092994455, + "learning_rate": 4.9977230726036485e-06, + "loss": 0.39623939990997314, + "memory(GiB)": 41.86, + "step": 590, + "token_acc": 0.8286311389759665, + "train_speed(iter/s)": 0.240557 + }, + { + "epoch": 0.18912, + "grad_norm": 0.6911786126779319, + "learning_rate": 4.9976852877555755e-06, + "loss": 0.39785629510879517, + "memory(GiB)": 41.86, + "step": 591, + "token_acc": 0.9223254705742197, + "train_speed(iter/s)": 0.240565 + }, + { + "epoch": 0.18944, + "grad_norm": 0.7495228501551652, + "learning_rate": 4.997647192113897e-06, + "loss": 0.3889058530330658, + "memory(GiB)": 41.86, + "step": 592, + "token_acc": 0.8795856711264566, + "train_speed(iter/s)": 0.240581 + }, + { + "epoch": 0.18976, + "grad_norm": 0.726904913182407, + "learning_rate": 4.997608785683353e-06, + "loss": 0.4155130982398987, + "memory(GiB)": 41.86, + "step": 593, + "token_acc": 0.8987175271292338, + "train_speed(iter/s)": 0.240591 + }, + { + "epoch": 0.19008, + "grad_norm": 0.7298826496071571, + "learning_rate": 4.997570068468723e-06, + "loss": 0.47346314787864685, + "memory(GiB)": 41.86, + "step": 594, + "token_acc": 0.9125456760048721, + "train_speed(iter/s)": 0.240587 + }, + { + "epoch": 0.1904, + "grad_norm": 0.695997842712418, + "learning_rate": 4.997531040474824e-06, + "loss": 0.4436187148094177, + "memory(GiB)": 41.86, + "step": 595, + "token_acc": 0.8410443463236705, + "train_speed(iter/s)": 0.240587 + }, + { + "epoch": 0.19072, + "grad_norm": 0.7463401086554157, + "learning_rate": 4.997491701706513e-06, + "loss": 0.3639387786388397, + "memory(GiB)": 41.86, + "step": 596, + "token_acc": 0.9289940828402367, + "train_speed(iter/s)": 0.240608 + }, + { + "epoch": 0.19104, + "grad_norm": 0.7071974015609407, + "learning_rate": 4.997452052168684e-06, + "loss": 0.3634309768676758, + "memory(GiB)": 41.86, + "step": 597, + "token_acc": 0.9278485145282402, + "train_speed(iter/s)": 0.240622 + }, + { + "epoch": 0.19136, + "grad_norm": 0.6366899927652607, + "learning_rate": 4.997412091866273e-06, + "loss": 0.39992132782936096, + "memory(GiB)": 41.86, + "step": 598, + "token_acc": 0.8722996992070002, + "train_speed(iter/s)": 0.240615 + }, + { + "epoch": 0.19168, + "grad_norm": 0.7487868327688413, + "learning_rate": 4.997371820804249e-06, + "loss": 0.3806472718715668, + "memory(GiB)": 41.86, + "step": 599, + "token_acc": 0.910048266783677, + "train_speed(iter/s)": 0.240635 + }, + { + "epoch": 0.192, + "grad_norm": 0.7448494133914384, + "learning_rate": 4.9973312389876265e-06, + "loss": 0.3898313045501709, + "memory(GiB)": 41.86, + "step": 600, + "token_acc": 0.8377777777777777, + "train_speed(iter/s)": 0.240631 + }, + { + "epoch": 0.19232, + "grad_norm": 0.6899476925520243, + "learning_rate": 4.997290346421451e-06, + "loss": 0.355000376701355, + "memory(GiB)": 41.86, + "step": 601, + "token_acc": 0.9196900317013033, + "train_speed(iter/s)": 0.240646 + }, + { + "epoch": 0.19264, + "grad_norm": 0.7324549370099168, + "learning_rate": 4.997249143110816e-06, + "loss": 0.4301047921180725, + "memory(GiB)": 41.86, + "step": 602, + "token_acc": 0.8924143727673881, + "train_speed(iter/s)": 0.240656 + }, + { + "epoch": 0.19296, + "grad_norm": 0.7356784503240977, + "learning_rate": 4.997207629060845e-06, + "loss": 0.46152374148368835, + "memory(GiB)": 41.86, + "step": 603, + "token_acc": 0.9217616580310881, + "train_speed(iter/s)": 0.24065 + }, + { + "epoch": 0.19328, + "grad_norm": 0.7663266212783498, + "learning_rate": 4.997165804276705e-06, + "loss": 0.3720739483833313, + "memory(GiB)": 41.86, + "step": 604, + "token_acc": 0.9042263122017723, + "train_speed(iter/s)": 0.240652 + }, + { + "epoch": 0.1936, + "grad_norm": 0.9399264100619061, + "learning_rate": 4.997123668763599e-06, + "loss": 0.3939239978790283, + "memory(GiB)": 41.86, + "step": 605, + "token_acc": 0.8625429553264605, + "train_speed(iter/s)": 0.240629 + }, + { + "epoch": 0.19392, + "grad_norm": 0.6777912336787236, + "learning_rate": 4.997081222526772e-06, + "loss": 0.37303873896598816, + "memory(GiB)": 41.86, + "step": 606, + "token_acc": 0.9304884594739667, + "train_speed(iter/s)": 0.240648 + }, + { + "epoch": 0.19424, + "grad_norm": 0.7892460850101191, + "learning_rate": 4.997038465571504e-06, + "loss": 0.49259454011917114, + "memory(GiB)": 41.86, + "step": 607, + "token_acc": 0.8501669449081803, + "train_speed(iter/s)": 0.240652 + }, + { + "epoch": 0.19456, + "grad_norm": 0.6890807872556636, + "learning_rate": 4.9969953979031174e-06, + "loss": 0.41470372676849365, + "memory(GiB)": 41.86, + "step": 608, + "token_acc": 0.9322147651006711, + "train_speed(iter/s)": 0.240639 + }, + { + "epoch": 0.19488, + "grad_norm": 0.7174067179656343, + "learning_rate": 4.996952019526968e-06, + "loss": 0.3633441925048828, + "memory(GiB)": 41.86, + "step": 609, + "token_acc": 0.892267365661861, + "train_speed(iter/s)": 0.240662 + }, + { + "epoch": 0.1952, + "grad_norm": 0.6960540570229644, + "learning_rate": 4.996908330448456e-06, + "loss": 0.310346394777298, + "memory(GiB)": 41.86, + "step": 610, + "token_acc": 0.8476098034457656, + "train_speed(iter/s)": 0.240676 + }, + { + "epoch": 0.19552, + "grad_norm": 0.7351546859104893, + "learning_rate": 4.996864330673019e-06, + "loss": 0.367519736289978, + "memory(GiB)": 41.86, + "step": 611, + "token_acc": 0.836912362159025, + "train_speed(iter/s)": 0.240691 + }, + { + "epoch": 0.19584, + "grad_norm": 0.6633069760681427, + "learning_rate": 4.9968200202061275e-06, + "loss": 0.41480374336242676, + "memory(GiB)": 41.86, + "step": 612, + "token_acc": 0.9037171350861287, + "train_speed(iter/s)": 0.240691 + }, + { + "epoch": 0.19616, + "grad_norm": 0.7297234628369268, + "learning_rate": 4.9967753990533e-06, + "loss": 0.3049129247665405, + "memory(GiB)": 41.86, + "step": 613, + "token_acc": 0.9025934861278649, + "train_speed(iter/s)": 0.240713 + }, + { + "epoch": 0.19648, + "grad_norm": 0.7008172814466513, + "learning_rate": 4.996730467220086e-06, + "loss": 0.4790416359901428, + "memory(GiB)": 41.86, + "step": 614, + "token_acc": 0.9024451726745651, + "train_speed(iter/s)": 0.240724 + }, + { + "epoch": 0.1968, + "grad_norm": 0.7100437959428243, + "learning_rate": 4.996685224712077e-06, + "loss": 0.30980467796325684, + "memory(GiB)": 41.86, + "step": 615, + "token_acc": 0.8532873959230548, + "train_speed(iter/s)": 0.240741 + }, + { + "epoch": 0.19712, + "grad_norm": 0.6337352556093074, + "learning_rate": 4.996639671534902e-06, + "loss": 0.36125442385673523, + "memory(GiB)": 41.86, + "step": 616, + "token_acc": 0.9189397838394235, + "train_speed(iter/s)": 0.240754 + }, + { + "epoch": 0.19744, + "grad_norm": 0.6694176123236347, + "learning_rate": 4.996593807694231e-06, + "loss": 0.36232417821884155, + "memory(GiB)": 41.86, + "step": 617, + "token_acc": 0.8979942693409743, + "train_speed(iter/s)": 0.240763 + }, + { + "epoch": 0.19776, + "grad_norm": 0.6834012430437612, + "learning_rate": 4.99654763319577e-06, + "loss": 0.500540018081665, + "memory(GiB)": 41.86, + "step": 618, + "token_acc": 0.8259456264775413, + "train_speed(iter/s)": 0.240764 + }, + { + "epoch": 0.19808, + "grad_norm": 0.7347964370077852, + "learning_rate": 4.996501148045265e-06, + "loss": 0.35871589183807373, + "memory(GiB)": 41.86, + "step": 619, + "token_acc": 0.9084830756372754, + "train_speed(iter/s)": 0.24078 + }, + { + "epoch": 0.1984, + "grad_norm": 0.7178315887019402, + "learning_rate": 4.996454352248499e-06, + "loss": 0.510735809803009, + "memory(GiB)": 41.86, + "step": 620, + "token_acc": 0.7712082262210797, + "train_speed(iter/s)": 0.240793 + }, + { + "epoch": 0.19872, + "grad_norm": 0.7401470454396356, + "learning_rate": 4.996407245811297e-06, + "loss": 0.37660109996795654, + "memory(GiB)": 41.86, + "step": 621, + "token_acc": 0.9276848354020507, + "train_speed(iter/s)": 0.240811 + }, + { + "epoch": 0.19904, + "grad_norm": 0.7795618965043085, + "learning_rate": 4.996359828739519e-06, + "loss": 0.5003116130828857, + "memory(GiB)": 41.86, + "step": 622, + "token_acc": 0.8593436034829203, + "train_speed(iter/s)": 0.240823 + }, + { + "epoch": 0.19936, + "grad_norm": 0.6578701411810297, + "learning_rate": 4.996312101039066e-06, + "loss": 0.30227798223495483, + "memory(GiB)": 41.86, + "step": 623, + "token_acc": 0.9114774114774115, + "train_speed(iter/s)": 0.240841 + }, + { + "epoch": 0.19968, + "grad_norm": 0.6824758352628882, + "learning_rate": 4.996264062715875e-06, + "loss": 0.430012047290802, + "memory(GiB)": 41.86, + "step": 624, + "token_acc": 0.9412997903563941, + "train_speed(iter/s)": 0.240841 + }, + { + "epoch": 0.2, + "grad_norm": 0.7186723504036854, + "learning_rate": 4.9962157137759265e-06, + "loss": 0.37046653032302856, + "memory(GiB)": 41.86, + "step": 625, + "token_acc": 0.9190948543087415, + "train_speed(iter/s)": 0.240828 + }, + { + "epoch": 0.20032, + "grad_norm": 0.7437554921480349, + "learning_rate": 4.996167054225235e-06, + "loss": 0.4950665831565857, + "memory(GiB)": 41.86, + "step": 626, + "token_acc": 0.8419008453278501, + "train_speed(iter/s)": 0.240796 + }, + { + "epoch": 0.20064, + "grad_norm": 0.7267636720711464, + "learning_rate": 4.996118084069855e-06, + "loss": 0.3634376525878906, + "memory(GiB)": 41.86, + "step": 627, + "token_acc": 0.9135602377093462, + "train_speed(iter/s)": 0.240804 + }, + { + "epoch": 0.20096, + "grad_norm": 0.6916318702322536, + "learning_rate": 4.996068803315882e-06, + "loss": 0.2752354145050049, + "memory(GiB)": 41.86, + "step": 628, + "token_acc": 0.9358974358974359, + "train_speed(iter/s)": 0.240831 + }, + { + "epoch": 0.20128, + "grad_norm": 0.7302995746598735, + "learning_rate": 4.996019211969446e-06, + "loss": 0.4127858281135559, + "memory(GiB)": 41.86, + "step": 629, + "token_acc": 0.9296465968586387, + "train_speed(iter/s)": 0.240818 + }, + { + "epoch": 0.2016, + "grad_norm": 0.7507055248710407, + "learning_rate": 4.995969310036719e-06, + "loss": 0.4005252718925476, + "memory(GiB)": 41.86, + "step": 630, + "token_acc": 0.8391862436425285, + "train_speed(iter/s)": 0.240824 + }, + { + "epoch": 0.20192, + "grad_norm": 0.6891238082953958, + "learning_rate": 4.995919097523909e-06, + "loss": 0.45887523889541626, + "memory(GiB)": 41.86, + "step": 631, + "token_acc": 0.8497017892644135, + "train_speed(iter/s)": 0.240819 + }, + { + "epoch": 0.20224, + "grad_norm": 0.724869974115601, + "learning_rate": 4.995868574437265e-06, + "loss": 0.48080503940582275, + "memory(GiB)": 41.86, + "step": 632, + "token_acc": 0.9182754182754183, + "train_speed(iter/s)": 0.240827 + }, + { + "epoch": 0.20256, + "grad_norm": 0.707258837009197, + "learning_rate": 4.995817740783075e-06, + "loss": 0.40979158878326416, + "memory(GiB)": 41.86, + "step": 633, + "token_acc": 0.9125315391084945, + "train_speed(iter/s)": 0.240848 + }, + { + "epoch": 0.20288, + "grad_norm": 0.6605348126576681, + "learning_rate": 4.995766596567662e-06, + "loss": 0.4081265330314636, + "memory(GiB)": 41.86, + "step": 634, + "token_acc": 0.910455764075067, + "train_speed(iter/s)": 0.240829 + }, + { + "epoch": 0.2032, + "grad_norm": 0.7610637007795256, + "learning_rate": 4.995715141797392e-06, + "loss": 0.4674842655658722, + "memory(GiB)": 41.86, + "step": 635, + "token_acc": 0.839852738150023, + "train_speed(iter/s)": 0.24083 + }, + { + "epoch": 0.20352, + "grad_norm": 0.6639084080518016, + "learning_rate": 4.995663376478666e-06, + "loss": 0.3504132032394409, + "memory(GiB)": 41.86, + "step": 636, + "token_acc": 0.919965075669383, + "train_speed(iter/s)": 0.240779 + }, + { + "epoch": 0.20384, + "grad_norm": 0.6633018581482668, + "learning_rate": 4.995611300617927e-06, + "loss": 0.3760378956794739, + "memory(GiB)": 41.86, + "step": 637, + "token_acc": 0.925868001251173, + "train_speed(iter/s)": 0.240783 + }, + { + "epoch": 0.20416, + "grad_norm": 0.7178556433270188, + "learning_rate": 4.995558914221653e-06, + "loss": 0.4086587429046631, + "memory(GiB)": 41.86, + "step": 638, + "token_acc": 0.8946564885496183, + "train_speed(iter/s)": 0.240804 + }, + { + "epoch": 0.20448, + "grad_norm": 0.7071764519079325, + "learning_rate": 4.995506217296364e-06, + "loss": 0.4297142028808594, + "memory(GiB)": 41.86, + "step": 639, + "token_acc": 0.8668494820231566, + "train_speed(iter/s)": 0.240808 + }, + { + "epoch": 0.2048, + "grad_norm": 0.7265850580914968, + "learning_rate": 4.995453209848617e-06, + "loss": 0.4079035818576813, + "memory(GiB)": 41.86, + "step": 640, + "token_acc": 0.9159061277705346, + "train_speed(iter/s)": 0.240822 + }, + { + "epoch": 0.20512, + "grad_norm": 0.7348518577795692, + "learning_rate": 4.995399891885007e-06, + "loss": 0.4221140444278717, + "memory(GiB)": 41.86, + "step": 641, + "token_acc": 0.8953846153846153, + "train_speed(iter/s)": 0.240831 + }, + { + "epoch": 0.20544, + "grad_norm": 0.725309552126381, + "learning_rate": 4.9953462634121705e-06, + "loss": 0.3429161012172699, + "memory(GiB)": 41.86, + "step": 642, + "token_acc": 0.9297820823244553, + "train_speed(iter/s)": 0.240854 + }, + { + "epoch": 0.20576, + "grad_norm": 0.7299671807968264, + "learning_rate": 4.9952923244367776e-06, + "loss": 0.3431488275527954, + "memory(GiB)": 41.86, + "step": 643, + "token_acc": 0.9114194236926361, + "train_speed(iter/s)": 0.240869 + }, + { + "epoch": 0.20608, + "grad_norm": 0.6680015750914127, + "learning_rate": 4.995238074965544e-06, + "loss": 0.36122021079063416, + "memory(GiB)": 41.86, + "step": 644, + "token_acc": 0.948925909688733, + "train_speed(iter/s)": 0.240869 + }, + { + "epoch": 0.2064, + "grad_norm": 0.7002113194215094, + "learning_rate": 4.9951835150052165e-06, + "loss": 0.3564288318157196, + "memory(GiB)": 41.86, + "step": 645, + "token_acc": 0.9147208121827411, + "train_speed(iter/s)": 0.24089 + }, + { + "epoch": 0.20672, + "grad_norm": 0.7461916728505239, + "learning_rate": 4.995128644562585e-06, + "loss": 0.339659184217453, + "memory(GiB)": 41.86, + "step": 646, + "token_acc": 0.8916857360793288, + "train_speed(iter/s)": 0.240892 + }, + { + "epoch": 0.20704, + "grad_norm": 0.7348731244200202, + "learning_rate": 4.995073463644478e-06, + "loss": 0.43801093101501465, + "memory(GiB)": 41.86, + "step": 647, + "token_acc": 0.8897408778424114, + "train_speed(iter/s)": 0.240887 + }, + { + "epoch": 0.20736, + "grad_norm": 0.6893289352824309, + "learning_rate": 4.9950179722577614e-06, + "loss": 0.28794151544570923, + "memory(GiB)": 41.86, + "step": 648, + "token_acc": 0.9357933579335793, + "train_speed(iter/s)": 0.240911 + }, + { + "epoch": 0.20768, + "grad_norm": 0.762750783860591, + "learning_rate": 4.994962170409342e-06, + "loss": 0.4345610737800598, + "memory(GiB)": 41.86, + "step": 649, + "token_acc": 0.8886608517188301, + "train_speed(iter/s)": 0.240922 + }, + { + "epoch": 0.208, + "grad_norm": 0.6813465873051964, + "learning_rate": 4.9949060581061595e-06, + "loss": 0.39386433362960815, + "memory(GiB)": 41.86, + "step": 650, + "token_acc": 0.8239918843520162, + "train_speed(iter/s)": 0.240933 + }, + { + "epoch": 0.20832, + "grad_norm": 0.727317211921378, + "learning_rate": 4.994849635355199e-06, + "loss": 0.4502859115600586, + "memory(GiB)": 41.86, + "step": 651, + "token_acc": 0.8494623655913979, + "train_speed(iter/s)": 0.240928 + }, + { + "epoch": 0.20864, + "grad_norm": 0.6871709937485635, + "learning_rate": 4.9947929021634815e-06, + "loss": 0.41390347480773926, + "memory(GiB)": 41.86, + "step": 652, + "token_acc": 0.8099173553719008, + "train_speed(iter/s)": 0.240941 + }, + { + "epoch": 0.20896, + "grad_norm": 0.7361720668304206, + "learning_rate": 4.994735858538064e-06, + "loss": 0.46877622604370117, + "memory(GiB)": 41.86, + "step": 653, + "token_acc": 0.9154310818231741, + "train_speed(iter/s)": 0.240948 + }, + { + "epoch": 0.20928, + "grad_norm": 0.7092564357737654, + "learning_rate": 4.994678504486047e-06, + "loss": 0.3681297302246094, + "memory(GiB)": 41.86, + "step": 654, + "token_acc": 0.8501619870410367, + "train_speed(iter/s)": 0.240936 + }, + { + "epoch": 0.2096, + "grad_norm": 0.688002563646692, + "learning_rate": 4.994620840014565e-06, + "loss": 0.4735531806945801, + "memory(GiB)": 41.86, + "step": 655, + "token_acc": 0.8019751835907825, + "train_speed(iter/s)": 0.240913 + }, + { + "epoch": 0.20992, + "grad_norm": 0.7182833563451828, + "learning_rate": 4.994562865130796e-06, + "loss": 0.40688467025756836, + "memory(GiB)": 41.86, + "step": 656, + "token_acc": 0.8517273005197188, + "train_speed(iter/s)": 0.240933 + }, + { + "epoch": 0.21024, + "grad_norm": 0.712301795121346, + "learning_rate": 4.9945045798419524e-06, + "loss": 0.3910367488861084, + "memory(GiB)": 41.86, + "step": 657, + "token_acc": 0.8931464174454828, + "train_speed(iter/s)": 0.240934 + }, + { + "epoch": 0.21056, + "grad_norm": 0.7154843256917051, + "learning_rate": 4.994445984155287e-06, + "loss": 0.4038703739643097, + "memory(GiB)": 41.86, + "step": 658, + "token_acc": 0.9252018699532512, + "train_speed(iter/s)": 0.240951 + }, + { + "epoch": 0.21088, + "grad_norm": 0.7186817358197332, + "learning_rate": 4.994387078078091e-06, + "loss": 0.3840501308441162, + "memory(GiB)": 41.86, + "step": 659, + "token_acc": 0.9356233485467211, + "train_speed(iter/s)": 0.240962 + }, + { + "epoch": 0.2112, + "grad_norm": 0.6388204153615546, + "learning_rate": 4.9943278616176945e-06, + "loss": 0.4145182967185974, + "memory(GiB)": 41.86, + "step": 660, + "token_acc": 0.9159792239535595, + "train_speed(iter/s)": 0.240941 + }, + { + "epoch": 0.21152, + "grad_norm": 0.7337065491737129, + "learning_rate": 4.994268334781465e-06, + "loss": 0.4388319253921509, + "memory(GiB)": 41.86, + "step": 661, + "token_acc": 0.8877693814721522, + "train_speed(iter/s)": 0.240954 + }, + { + "epoch": 0.21184, + "grad_norm": 0.8094002264166715, + "learning_rate": 4.994208497576811e-06, + "loss": 0.4007093608379364, + "memory(GiB)": 41.86, + "step": 662, + "token_acc": 0.799672131147541, + "train_speed(iter/s)": 0.240964 + }, + { + "epoch": 0.21216, + "grad_norm": 0.7393964954195534, + "learning_rate": 4.994148350011178e-06, + "loss": 0.4640263020992279, + "memory(GiB)": 41.86, + "step": 663, + "token_acc": 0.8587026332691072, + "train_speed(iter/s)": 0.240949 + }, + { + "epoch": 0.21248, + "grad_norm": 0.6793244554997642, + "learning_rate": 4.994087892092049e-06, + "loss": 0.3085007071495056, + "memory(GiB)": 41.86, + "step": 664, + "token_acc": 0.934462915601023, + "train_speed(iter/s)": 0.240962 + }, + { + "epoch": 0.2128, + "grad_norm": 0.7333968359230266, + "learning_rate": 4.9940271238269475e-06, + "loss": 0.3759646415710449, + "memory(GiB)": 41.86, + "step": 665, + "token_acc": 0.9125619352958321, + "train_speed(iter/s)": 0.240961 + }, + { + "epoch": 0.21312, + "grad_norm": 0.7434999652954282, + "learning_rate": 4.993966045223436e-06, + "loss": 0.42632484436035156, + "memory(GiB)": 41.86, + "step": 666, + "token_acc": 0.8646654795217502, + "train_speed(iter/s)": 0.240975 + }, + { + "epoch": 0.21344, + "grad_norm": 0.6675589250344625, + "learning_rate": 4.993904656289113e-06, + "loss": 0.368966281414032, + "memory(GiB)": 41.86, + "step": 667, + "token_acc": 0.8954918032786885, + "train_speed(iter/s)": 0.240958 + }, + { + "epoch": 0.21376, + "grad_norm": 0.7260795862486636, + "learning_rate": 4.993842957031619e-06, + "loss": 0.3905546963214874, + "memory(GiB)": 41.86, + "step": 668, + "token_acc": 0.8984397163120568, + "train_speed(iter/s)": 0.240978 + }, + { + "epoch": 0.21408, + "grad_norm": 0.7383524200328477, + "learning_rate": 4.993780947458632e-06, + "loss": 0.392816424369812, + "memory(GiB)": 41.86, + "step": 669, + "token_acc": 0.890621875624875, + "train_speed(iter/s)": 0.24096 + }, + { + "epoch": 0.2144, + "grad_norm": 0.6683192719987626, + "learning_rate": 4.9937186275778646e-06, + "loss": 0.3148327171802521, + "memory(GiB)": 41.86, + "step": 670, + "token_acc": 0.8907902924704418, + "train_speed(iter/s)": 0.24097 + }, + { + "epoch": 0.21472, + "grad_norm": 0.7205793263368735, + "learning_rate": 4.993655997397075e-06, + "loss": 0.47282326221466064, + "memory(GiB)": 41.86, + "step": 671, + "token_acc": 0.8849921011058451, + "train_speed(iter/s)": 0.240968 + }, + { + "epoch": 0.21504, + "grad_norm": 0.9123442927137485, + "learning_rate": 4.993593056924055e-06, + "loss": 0.38779354095458984, + "memory(GiB)": 41.86, + "step": 672, + "token_acc": 0.9281559045956952, + "train_speed(iter/s)": 0.240981 + }, + { + "epoch": 0.21536, + "grad_norm": 0.6943919384296121, + "learning_rate": 4.9935298061666356e-06, + "loss": 0.4451703131198883, + "memory(GiB)": 41.86, + "step": 673, + "token_acc": 0.8061052631578948, + "train_speed(iter/s)": 0.240976 + }, + { + "epoch": 0.21568, + "grad_norm": 0.6887922012887568, + "learning_rate": 4.9934662451326885e-06, + "loss": 0.3671219050884247, + "memory(GiB)": 41.86, + "step": 674, + "token_acc": 0.8544500119303269, + "train_speed(iter/s)": 0.24097 + }, + { + "epoch": 0.216, + "grad_norm": 0.6847091472772892, + "learning_rate": 4.9934023738301215e-06, + "loss": 0.34528207778930664, + "memory(GiB)": 41.86, + "step": 675, + "token_acc": 0.9237835998638992, + "train_speed(iter/s)": 0.240984 + }, + { + "epoch": 0.21632, + "grad_norm": 0.6858998952654874, + "learning_rate": 4.993338192266885e-06, + "loss": 0.39834946393966675, + "memory(GiB)": 41.86, + "step": 676, + "token_acc": 0.8597758405977584, + "train_speed(iter/s)": 0.240959 + }, + { + "epoch": 0.21664, + "grad_norm": 0.686641174823756, + "learning_rate": 4.993273700450962e-06, + "loss": 0.37345531582832336, + "memory(GiB)": 41.86, + "step": 677, + "token_acc": 0.9496176338281601, + "train_speed(iter/s)": 0.240969 + }, + { + "epoch": 0.21696, + "grad_norm": 0.6805802712438582, + "learning_rate": 4.9932088983903795e-06, + "loss": 0.4547409117221832, + "memory(GiB)": 41.86, + "step": 678, + "token_acc": 0.915282392026578, + "train_speed(iter/s)": 0.24094 + }, + { + "epoch": 0.21728, + "grad_norm": 0.664417238464341, + "learning_rate": 4.9931437860932e-06, + "loss": 0.41881075501441956, + "memory(GiB)": 41.86, + "step": 679, + "token_acc": 0.9328712148850784, + "train_speed(iter/s)": 0.240928 + }, + { + "epoch": 0.2176, + "grad_norm": 0.7080536559680454, + "learning_rate": 4.993078363567526e-06, + "loss": 0.31501907110214233, + "memory(GiB)": 41.86, + "step": 680, + "token_acc": 0.9295430763864667, + "train_speed(iter/s)": 0.240946 + }, + { + "epoch": 0.21792, + "grad_norm": 0.7391345860639904, + "learning_rate": 4.993012630821498e-06, + "loss": 0.35557496547698975, + "memory(GiB)": 41.86, + "step": 681, + "token_acc": 0.8859910581222057, + "train_speed(iter/s)": 0.240949 + }, + { + "epoch": 0.21824, + "grad_norm": 0.6767188115269217, + "learning_rate": 4.992946587863295e-06, + "loss": 0.3342413306236267, + "memory(GiB)": 41.86, + "step": 682, + "token_acc": 0.9353140278300113, + "train_speed(iter/s)": 0.240965 + }, + { + "epoch": 0.21856, + "grad_norm": 0.6851841808904401, + "learning_rate": 4.992880234701136e-06, + "loss": 0.3321181535720825, + "memory(GiB)": 41.86, + "step": 683, + "token_acc": 0.9253255381344672, + "train_speed(iter/s)": 0.240982 + }, + { + "epoch": 0.21888, + "grad_norm": 0.6774063128016391, + "learning_rate": 4.992813571343276e-06, + "loss": 0.3438548743724823, + "memory(GiB)": 41.86, + "step": 684, + "token_acc": 0.8530805687203792, + "train_speed(iter/s)": 0.240967 + }, + { + "epoch": 0.2192, + "grad_norm": 0.7448192956757836, + "learning_rate": 4.992746597798012e-06, + "loss": 0.40210121870040894, + "memory(GiB)": 41.86, + "step": 685, + "token_acc": 0.9288014311270125, + "train_speed(iter/s)": 0.240984 + }, + { + "epoch": 0.21952, + "grad_norm": 0.741257000523544, + "learning_rate": 4.9926793140736756e-06, + "loss": 0.5914468765258789, + "memory(GiB)": 41.86, + "step": 686, + "token_acc": 0.8430114787305875, + "train_speed(iter/s)": 0.240981 + }, + { + "epoch": 0.21984, + "grad_norm": 0.6668227853801081, + "learning_rate": 4.9926117201786405e-06, + "loss": 0.36227187514305115, + "memory(GiB)": 41.86, + "step": 687, + "token_acc": 0.8855659911023233, + "train_speed(iter/s)": 0.240999 + }, + { + "epoch": 0.22016, + "grad_norm": 0.8048767755071963, + "learning_rate": 4.992543816121317e-06, + "loss": 0.44223666191101074, + "memory(GiB)": 41.86, + "step": 688, + "token_acc": 0.9143029571514786, + "train_speed(iter/s)": 0.24101 + }, + { + "epoch": 0.22048, + "grad_norm": 0.6978728199884829, + "learning_rate": 4.992475601910155e-06, + "loss": 0.42237889766693115, + "memory(GiB)": 41.86, + "step": 689, + "token_acc": 0.906876227897839, + "train_speed(iter/s)": 0.241012 + }, + { + "epoch": 0.2208, + "grad_norm": 0.6953847446727337, + "learning_rate": 4.992407077553643e-06, + "loss": 0.49450770020484924, + "memory(GiB)": 41.86, + "step": 690, + "token_acc": 0.8197539075490522, + "train_speed(iter/s)": 0.241016 + }, + { + "epoch": 0.22112, + "grad_norm": 0.6369955643516892, + "learning_rate": 4.992338243060305e-06, + "loss": 0.39748892188072205, + "memory(GiB)": 41.86, + "step": 691, + "token_acc": 0.9084359749012317, + "train_speed(iter/s)": 0.240994 + }, + { + "epoch": 0.22144, + "grad_norm": 0.7194077036465691, + "learning_rate": 4.9922690984387105e-06, + "loss": 0.4647546410560608, + "memory(GiB)": 41.86, + "step": 692, + "token_acc": 0.8419638057695753, + "train_speed(iter/s)": 0.24099 + }, + { + "epoch": 0.22176, + "grad_norm": 0.7664964095767078, + "learning_rate": 4.9921996436974595e-06, + "loss": 0.39649444818496704, + "memory(GiB)": 41.86, + "step": 693, + "token_acc": 0.8478792822185971, + "train_speed(iter/s)": 0.240996 + }, + { + "epoch": 0.22208, + "grad_norm": 0.6827671298657165, + "learning_rate": 4.992129878845197e-06, + "loss": 0.36891406774520874, + "memory(GiB)": 41.86, + "step": 694, + "token_acc": 0.8773034756239795, + "train_speed(iter/s)": 0.240994 + }, + { + "epoch": 0.2224, + "grad_norm": 0.6654600490631626, + "learning_rate": 4.992059803890602e-06, + "loss": 0.49363040924072266, + "memory(GiB)": 41.86, + "step": 695, + "token_acc": 0.8214101904271744, + "train_speed(iter/s)": 0.241002 + }, + { + "epoch": 0.22272, + "grad_norm": 0.7027668271033155, + "learning_rate": 4.9919894188423965e-06, + "loss": 0.3547956943511963, + "memory(GiB)": 41.86, + "step": 696, + "token_acc": 0.840042372881356, + "train_speed(iter/s)": 0.241013 + }, + { + "epoch": 0.22304, + "grad_norm": 0.6861007676527083, + "learning_rate": 4.991918723709337e-06, + "loss": 0.4164801239967346, + "memory(GiB)": 41.86, + "step": 697, + "token_acc": 0.9363662539591131, + "train_speed(iter/s)": 0.240989 + }, + { + "epoch": 0.22336, + "grad_norm": 0.7049227479366047, + "learning_rate": 4.99184771850022e-06, + "loss": 0.3631105422973633, + "memory(GiB)": 41.86, + "step": 698, + "token_acc": 0.9063709961281239, + "train_speed(iter/s)": 0.241 + }, + { + "epoch": 0.22368, + "grad_norm": 0.699942029026459, + "learning_rate": 4.991776403223882e-06, + "loss": 0.45336928963661194, + "memory(GiB)": 41.86, + "step": 699, + "token_acc": 0.880465644520159, + "train_speed(iter/s)": 0.241004 + }, + { + "epoch": 0.224, + "grad_norm": 0.7016149244053942, + "learning_rate": 4.991704777889196e-06, + "loss": 0.3199717402458191, + "memory(GiB)": 41.86, + "step": 700, + "token_acc": 0.8741429970617042, + "train_speed(iter/s)": 0.241021 + }, + { + "epoch": 0.22432, + "grad_norm": 0.7022150031987149, + "learning_rate": 4.991632842505076e-06, + "loss": 0.3656160235404968, + "memory(GiB)": 41.86, + "step": 701, + "token_acc": 0.8396176314391928, + "train_speed(iter/s)": 0.241044 + }, + { + "epoch": 0.22464, + "grad_norm": 0.7289044318829413, + "learning_rate": 4.991560597080471e-06, + "loss": 0.402595192193985, + "memory(GiB)": 41.86, + "step": 702, + "token_acc": 0.8854103343465045, + "train_speed(iter/s)": 0.241054 + }, + { + "epoch": 0.22496, + "grad_norm": 0.6591403781970326, + "learning_rate": 4.991488041624373e-06, + "loss": 0.40790414810180664, + "memory(GiB)": 41.86, + "step": 703, + "token_acc": 0.9103699843668578, + "train_speed(iter/s)": 0.241054 + }, + { + "epoch": 0.22528, + "grad_norm": 0.7040653699282617, + "learning_rate": 4.9914151761458084e-06, + "loss": 0.4283745288848877, + "memory(GiB)": 41.86, + "step": 704, + "token_acc": 0.7714620568414884, + "train_speed(iter/s)": 0.241065 + }, + { + "epoch": 0.2256, + "grad_norm": 0.6860353000403746, + "learning_rate": 4.991342000653845e-06, + "loss": 0.5238885879516602, + "memory(GiB)": 41.86, + "step": 705, + "token_acc": 0.8626214867349619, + "train_speed(iter/s)": 0.241066 + }, + { + "epoch": 0.22592, + "grad_norm": 0.7619410160164504, + "learning_rate": 4.991268515157587e-06, + "loss": 0.44094744324684143, + "memory(GiB)": 41.86, + "step": 706, + "token_acc": 0.8567408544384754, + "train_speed(iter/s)": 0.241083 + }, + { + "epoch": 0.22624, + "grad_norm": 0.7010678425196203, + "learning_rate": 4.99119471966618e-06, + "loss": 0.37000611424446106, + "memory(GiB)": 41.86, + "step": 707, + "token_acc": 0.8783783783783784, + "train_speed(iter/s)": 0.241085 + }, + { + "epoch": 0.22656, + "grad_norm": 0.6578009571827106, + "learning_rate": 4.991120614188807e-06, + "loss": 0.44439181685447693, + "memory(GiB)": 41.86, + "step": 708, + "token_acc": 0.8354007633587787, + "train_speed(iter/s)": 0.241099 + }, + { + "epoch": 0.22688, + "grad_norm": 0.9938102576951828, + "learning_rate": 4.991046198734686e-06, + "loss": 0.47150009870529175, + "memory(GiB)": 41.86, + "step": 709, + "token_acc": 0.8784576697401508, + "train_speed(iter/s)": 0.241103 + }, + { + "epoch": 0.2272, + "grad_norm": 0.7226908817772437, + "learning_rate": 4.990971473313081e-06, + "loss": 0.4176260530948639, + "memory(GiB)": 41.86, + "step": 710, + "token_acc": 0.8262603246938194, + "train_speed(iter/s)": 0.241113 + }, + { + "epoch": 0.22752, + "grad_norm": 0.6808667852870662, + "learning_rate": 4.990896437933286e-06, + "loss": 0.4292218089103699, + "memory(GiB)": 41.86, + "step": 711, + "token_acc": 0.8457552809884417, + "train_speed(iter/s)": 0.24112 + }, + { + "epoch": 0.22784, + "grad_norm": 0.7011431408123251, + "learning_rate": 4.9908210926046405e-06, + "loss": 0.29058289527893066, + "memory(GiB)": 41.86, + "step": 712, + "token_acc": 0.9057009680889208, + "train_speed(iter/s)": 0.241122 + }, + { + "epoch": 0.22816, + "grad_norm": 0.7208696004909105, + "learning_rate": 4.99074543733652e-06, + "loss": 0.3040674328804016, + "memory(GiB)": 41.86, + "step": 713, + "token_acc": 0.9348575007829627, + "train_speed(iter/s)": 0.241132 + }, + { + "epoch": 0.22848, + "grad_norm": 0.6422848741537666, + "learning_rate": 4.990669472138337e-06, + "loss": 0.4201911687850952, + "memory(GiB)": 41.86, + "step": 714, + "token_acc": 0.8763141620284477, + "train_speed(iter/s)": 0.241132 + }, + { + "epoch": 0.2288, + "grad_norm": 0.7204342722880653, + "learning_rate": 4.990593197019545e-06, + "loss": 0.46834367513656616, + "memory(GiB)": 41.86, + "step": 715, + "token_acc": 0.8136551424222657, + "train_speed(iter/s)": 0.241134 + }, + { + "epoch": 0.22912, + "grad_norm": 0.7308820312406343, + "learning_rate": 4.990516611989635e-06, + "loss": 0.4614957869052887, + "memory(GiB)": 41.86, + "step": 716, + "token_acc": 0.9403166869671132, + "train_speed(iter/s)": 0.241143 + }, + { + "epoch": 0.22944, + "grad_norm": 0.7144913280032881, + "learning_rate": 4.9904397170581375e-06, + "loss": 0.3112773001194, + "memory(GiB)": 41.86, + "step": 717, + "token_acc": 0.8633208756006406, + "train_speed(iter/s)": 0.241152 + }, + { + "epoch": 0.22976, + "grad_norm": 1.3895308866489895, + "learning_rate": 4.990362512234619e-06, + "loss": 0.37629514932632446, + "memory(GiB)": 41.86, + "step": 718, + "token_acc": 0.8763992537313433, + "train_speed(iter/s)": 0.24116 + }, + { + "epoch": 0.23008, + "grad_norm": 0.7005165692441128, + "learning_rate": 4.9902849975286875e-06, + "loss": 0.4847871661186218, + "memory(GiB)": 41.86, + "step": 719, + "token_acc": 0.8423889607589479, + "train_speed(iter/s)": 0.241178 + }, + { + "epoch": 0.2304, + "grad_norm": 0.6743386397509609, + "learning_rate": 4.9902071729499875e-06, + "loss": 0.3656957745552063, + "memory(GiB)": 41.86, + "step": 720, + "token_acc": 0.896329928111994, + "train_speed(iter/s)": 0.241187 + }, + { + "epoch": 0.23072, + "grad_norm": 0.6962408456353995, + "learning_rate": 4.990129038508204e-06, + "loss": 0.2761991024017334, + "memory(GiB)": 41.86, + "step": 721, + "token_acc": 0.9480326651818857, + "train_speed(iter/s)": 0.241177 + }, + { + "epoch": 0.23104, + "grad_norm": 0.6613538662665709, + "learning_rate": 4.990050594213059e-06, + "loss": 0.3765658438205719, + "memory(GiB)": 41.86, + "step": 722, + "token_acc": 0.8843237524246165, + "train_speed(iter/s)": 0.241177 + }, + { + "epoch": 0.23136, + "grad_norm": 0.692821096264721, + "learning_rate": 4.989971840074314e-06, + "loss": 0.3937222957611084, + "memory(GiB)": 41.86, + "step": 723, + "token_acc": 0.8708718626155878, + "train_speed(iter/s)": 0.241175 + }, + { + "epoch": 0.23168, + "grad_norm": 0.669704367795362, + "learning_rate": 4.989892776101767e-06, + "loss": 0.39325904846191406, + "memory(GiB)": 41.86, + "step": 724, + "token_acc": 0.8430664684646422, + "train_speed(iter/s)": 0.241179 + }, + { + "epoch": 0.232, + "grad_norm": 0.7396164844927292, + "learning_rate": 4.989813402305257e-06, + "loss": 0.33057376742362976, + "memory(GiB)": 41.86, + "step": 725, + "token_acc": 0.9015617605300521, + "train_speed(iter/s)": 0.241192 + }, + { + "epoch": 0.23232, + "grad_norm": 0.6959833975600347, + "learning_rate": 4.9897337186946614e-06, + "loss": 0.48343226313591003, + "memory(GiB)": 41.86, + "step": 726, + "token_acc": 0.8811978399607265, + "train_speed(iter/s)": 0.2412 + }, + { + "epoch": 0.23264, + "grad_norm": 0.7517759667828947, + "learning_rate": 4.989653725279895e-06, + "loss": 0.3619033694267273, + "memory(GiB)": 41.86, + "step": 727, + "token_acc": 0.9105952654562169, + "train_speed(iter/s)": 0.241212 + }, + { + "epoch": 0.23296, + "grad_norm": 0.679091624494513, + "learning_rate": 4.989573422070911e-06, + "loss": 0.35209378600120544, + "memory(GiB)": 41.86, + "step": 728, + "token_acc": 0.9167331737164139, + "train_speed(iter/s)": 0.241207 + }, + { + "epoch": 0.23328, + "grad_norm": 0.6581875098344869, + "learning_rate": 4.989492809077703e-06, + "loss": 0.32696542143821716, + "memory(GiB)": 41.86, + "step": 729, + "token_acc": 0.908515686791458, + "train_speed(iter/s)": 0.241223 + }, + { + "epoch": 0.2336, + "grad_norm": 0.712515495437383, + "learning_rate": 4.989411886310301e-06, + "loss": 0.42448902130126953, + "memory(GiB)": 41.86, + "step": 730, + "token_acc": 0.8153745072273325, + "train_speed(iter/s)": 0.241234 + }, + { + "epoch": 0.23392, + "grad_norm": 0.6264798279474986, + "learning_rate": 4.989330653778775e-06, + "loss": 0.3822171688079834, + "memory(GiB)": 41.86, + "step": 731, + "token_acc": 0.8916116870876531, + "train_speed(iter/s)": 0.241233 + }, + { + "epoch": 0.23424, + "grad_norm": 0.653708236217313, + "learning_rate": 4.989249111493232e-06, + "loss": 0.3497483730316162, + "memory(GiB)": 41.86, + "step": 732, + "token_acc": 0.8794877658358107, + "train_speed(iter/s)": 0.241219 + }, + { + "epoch": 0.23456, + "grad_norm": 0.6870266358600734, + "learning_rate": 4.989167259463819e-06, + "loss": 0.3854964077472687, + "memory(GiB)": 41.86, + "step": 733, + "token_acc": 0.8794093519278097, + "train_speed(iter/s)": 0.241233 + }, + { + "epoch": 0.23488, + "grad_norm": 0.7211605122105674, + "learning_rate": 4.989085097700721e-06, + "loss": 0.4352648854255676, + "memory(GiB)": 41.86, + "step": 734, + "token_acc": 0.8755118755118755, + "train_speed(iter/s)": 0.241227 + }, + { + "epoch": 0.2352, + "grad_norm": 0.6449815787597096, + "learning_rate": 4.989002626214162e-06, + "loss": 0.45732951164245605, + "memory(GiB)": 41.86, + "step": 735, + "token_acc": 0.8550685668190374, + "train_speed(iter/s)": 0.241224 + }, + { + "epoch": 0.23552, + "grad_norm": 0.6825320982404367, + "learning_rate": 4.988919845014404e-06, + "loss": 0.3792175352573395, + "memory(GiB)": 41.86, + "step": 736, + "token_acc": 0.9140625, + "train_speed(iter/s)": 0.241215 + }, + { + "epoch": 0.23584, + "grad_norm": 0.6569404038097346, + "learning_rate": 4.988836754111748e-06, + "loss": 0.4009462594985962, + "memory(GiB)": 41.86, + "step": 737, + "token_acc": 0.8227104633456602, + "train_speed(iter/s)": 0.241228 + }, + { + "epoch": 0.23616, + "grad_norm": 0.763748339126226, + "learning_rate": 4.988753353516533e-06, + "loss": 0.4065232276916504, + "memory(GiB)": 41.86, + "step": 738, + "token_acc": 0.872663139329806, + "train_speed(iter/s)": 0.241244 + }, + { + "epoch": 0.23648, + "grad_norm": 0.6484439905928115, + "learning_rate": 4.9886696432391355e-06, + "loss": 0.36816778779029846, + "memory(GiB)": 41.86, + "step": 739, + "token_acc": 0.9186390532544378, + "train_speed(iter/s)": 0.241237 + }, + { + "epoch": 0.2368, + "grad_norm": 0.7449373468515752, + "learning_rate": 4.988585623289973e-06, + "loss": 0.35024338960647583, + "memory(GiB)": 41.86, + "step": 740, + "token_acc": 0.9211914365497983, + "train_speed(iter/s)": 0.241258 + }, + { + "epoch": 0.23712, + "grad_norm": 0.665838614086917, + "learning_rate": 4.988501293679501e-06, + "loss": 0.3503490090370178, + "memory(GiB)": 41.86, + "step": 741, + "token_acc": 0.867621776504298, + "train_speed(iter/s)": 0.241269 + }, + { + "epoch": 0.23744, + "grad_norm": 0.6455009166314636, + "learning_rate": 4.988416654418211e-06, + "loss": 0.3522324562072754, + "memory(GiB)": 41.86, + "step": 742, + "token_acc": 0.885190976100067, + "train_speed(iter/s)": 0.241277 + }, + { + "epoch": 0.23776, + "grad_norm": 0.6837894337396082, + "learning_rate": 4.988331705516637e-06, + "loss": 0.3517313599586487, + "memory(GiB)": 41.86, + "step": 743, + "token_acc": 0.92005772005772, + "train_speed(iter/s)": 0.24127 + }, + { + "epoch": 0.23808, + "grad_norm": 0.6999495531834725, + "learning_rate": 4.988246446985348e-06, + "loss": 0.4222472310066223, + "memory(GiB)": 41.86, + "step": 744, + "token_acc": 0.8715296679368536, + "train_speed(iter/s)": 0.241275 + }, + { + "epoch": 0.2384, + "grad_norm": 0.6960290683825049, + "learning_rate": 4.988160878834953e-06, + "loss": 0.3205401599407196, + "memory(GiB)": 41.86, + "step": 745, + "token_acc": 0.9015350056158742, + "train_speed(iter/s)": 0.241287 + }, + { + "epoch": 0.23872, + "grad_norm": 0.7282264763228963, + "learning_rate": 4.9880750010761e-06, + "loss": 0.3726102411746979, + "memory(GiB)": 41.86, + "step": 746, + "token_acc": 0.8978449482227819, + "train_speed(iter/s)": 0.2413 + }, + { + "epoch": 0.23904, + "grad_norm": 0.6621095431182941, + "learning_rate": 4.987988813719474e-06, + "loss": 0.3230005204677582, + "memory(GiB)": 41.86, + "step": 747, + "token_acc": 0.8990590248075278, + "train_speed(iter/s)": 0.241308 + }, + { + "epoch": 0.23936, + "grad_norm": 0.7044842055217044, + "learning_rate": 4.987902316775801e-06, + "loss": 0.431286096572876, + "memory(GiB)": 41.86, + "step": 748, + "token_acc": 0.9382022471910112, + "train_speed(iter/s)": 0.241307 + }, + { + "epoch": 0.23968, + "grad_norm": 0.7492547891098454, + "learning_rate": 4.987815510255843e-06, + "loss": 0.41462385654449463, + "memory(GiB)": 41.86, + "step": 749, + "token_acc": 0.8279151943462898, + "train_speed(iter/s)": 0.241316 + }, + { + "epoch": 0.24, + "grad_norm": 0.754762540271955, + "learning_rate": 4.987728394170403e-06, + "loss": 0.36191433668136597, + "memory(GiB)": 41.86, + "step": 750, + "token_acc": 0.9187082405345212, + "train_speed(iter/s)": 0.241327 + }, + { + "epoch": 0.24032, + "grad_norm": 0.7456847776182678, + "learning_rate": 4.987640968530319e-06, + "loss": 0.4400700330734253, + "memory(GiB)": 41.86, + "step": 751, + "token_acc": 0.8670796958603211, + "train_speed(iter/s)": 0.241337 + }, + { + "epoch": 0.24064, + "grad_norm": 0.6840735503303398, + "learning_rate": 4.987553233346471e-06, + "loss": 0.36238688230514526, + "memory(GiB)": 41.86, + "step": 752, + "token_acc": 0.8555758683729433, + "train_speed(iter/s)": 0.241353 + }, + { + "epoch": 0.24096, + "grad_norm": 0.6784248987588408, + "learning_rate": 4.987465188629775e-06, + "loss": 0.42072951793670654, + "memory(GiB)": 41.86, + "step": 753, + "token_acc": 0.8812056737588653, + "train_speed(iter/s)": 0.241355 + }, + { + "epoch": 0.24128, + "grad_norm": 0.7320781072838469, + "learning_rate": 4.987376834391188e-06, + "loss": 0.4233395755290985, + "memory(GiB)": 41.86, + "step": 754, + "token_acc": 0.8050464175196382, + "train_speed(iter/s)": 0.241367 + }, + { + "epoch": 0.2416, + "grad_norm": 0.7031800247150163, + "learning_rate": 4.9872881706417034e-06, + "loss": 0.43180492520332336, + "memory(GiB)": 41.86, + "step": 755, + "token_acc": 0.8751970572779821, + "train_speed(iter/s)": 0.24138 + }, + { + "epoch": 0.24192, + "grad_norm": 0.6810445635931163, + "learning_rate": 4.987199197392354e-06, + "loss": 0.4446945786476135, + "memory(GiB)": 41.86, + "step": 756, + "token_acc": 0.921304347826087, + "train_speed(iter/s)": 0.241377 + }, + { + "epoch": 0.24224, + "grad_norm": 0.772729495236444, + "learning_rate": 4.987109914654211e-06, + "loss": 0.3828134536743164, + "memory(GiB)": 41.86, + "step": 757, + "token_acc": 0.9398355754857997, + "train_speed(iter/s)": 0.241392 + }, + { + "epoch": 0.24256, + "grad_norm": 0.7171051894635622, + "learning_rate": 4.987020322438384e-06, + "loss": 0.5047861337661743, + "memory(GiB)": 41.86, + "step": 758, + "token_acc": 0.8178846602848471, + "train_speed(iter/s)": 0.241401 + }, + { + "epoch": 0.24288, + "grad_norm": 0.6580636309149671, + "learning_rate": 4.986930420756021e-06, + "loss": 0.4189501106739044, + "memory(GiB)": 41.86, + "step": 759, + "token_acc": 0.8768400392541708, + "train_speed(iter/s)": 0.241413 + }, + { + "epoch": 0.2432, + "grad_norm": 0.6569513565247892, + "learning_rate": 4.9868402096183085e-06, + "loss": 0.36138713359832764, + "memory(GiB)": 41.86, + "step": 760, + "token_acc": 0.819581428915083, + "train_speed(iter/s)": 0.241414 + }, + { + "epoch": 0.24352, + "grad_norm": 1.841501369395737, + "learning_rate": 4.9867496890364734e-06, + "loss": 0.4113994240760803, + "memory(GiB)": 41.86, + "step": 761, + "token_acc": 0.8580765639589168, + "train_speed(iter/s)": 0.241398 + }, + { + "epoch": 0.24384, + "grad_norm": 0.6581233382677719, + "learning_rate": 4.986658859021777e-06, + "loss": 0.3386306166648865, + "memory(GiB)": 41.86, + "step": 762, + "token_acc": 0.9150157378263285, + "train_speed(iter/s)": 0.241375 + }, + { + "epoch": 0.24416, + "grad_norm": 0.6944938239300734, + "learning_rate": 4.9865677195855235e-06, + "loss": 0.3702167868614197, + "memory(GiB)": 41.86, + "step": 763, + "token_acc": 0.9303818857722204, + "train_speed(iter/s)": 0.241383 + }, + { + "epoch": 0.24448, + "grad_norm": 0.7663926229244526, + "learning_rate": 4.9864762707390525e-06, + "loss": 0.4663710594177246, + "memory(GiB)": 41.86, + "step": 764, + "token_acc": 0.8321114369501467, + "train_speed(iter/s)": 0.241384 + }, + { + "epoch": 0.2448, + "grad_norm": 0.6475569117266546, + "learning_rate": 4.986384512493743e-06, + "loss": 0.47731685638427734, + "memory(GiB)": 41.86, + "step": 765, + "token_acc": 0.8602356810084955, + "train_speed(iter/s)": 0.241362 + }, + { + "epoch": 0.24512, + "grad_norm": 0.6365655931023085, + "learning_rate": 4.986292444861014e-06, + "loss": 0.36407917737960815, + "memory(GiB)": 41.86, + "step": 766, + "token_acc": 0.9302940204823258, + "train_speed(iter/s)": 0.241361 + }, + { + "epoch": 0.24544, + "grad_norm": 0.6555825694329673, + "learning_rate": 4.98620006785232e-06, + "loss": 0.4675138592720032, + "memory(GiB)": 41.86, + "step": 767, + "token_acc": 0.857653201428964, + "train_speed(iter/s)": 0.241357 + }, + { + "epoch": 0.24576, + "grad_norm": 0.7086865197701768, + "learning_rate": 4.986107381479158e-06, + "loss": 0.3552117943763733, + "memory(GiB)": 41.86, + "step": 768, + "token_acc": 0.9206049149338374, + "train_speed(iter/s)": 0.241363 + }, + { + "epoch": 0.24608, + "grad_norm": 1.08562762378281, + "learning_rate": 4.986014385753058e-06, + "loss": 0.38791224360466003, + "memory(GiB)": 41.86, + "step": 769, + "token_acc": 0.9213813372520205, + "train_speed(iter/s)": 0.241346 + }, + { + "epoch": 0.2464, + "grad_norm": 0.7123815211398292, + "learning_rate": 4.9859210806855955e-06, + "loss": 0.3464595675468445, + "memory(GiB)": 41.86, + "step": 770, + "token_acc": 0.8723623262995368, + "train_speed(iter/s)": 0.241362 + }, + { + "epoch": 0.24672, + "grad_norm": 0.6783356082662719, + "learning_rate": 4.985827466288378e-06, + "loss": 0.3627921938896179, + "memory(GiB)": 41.86, + "step": 771, + "token_acc": 0.9193635382955772, + "train_speed(iter/s)": 0.241367 + }, + { + "epoch": 0.24704, + "grad_norm": 0.7338676216265779, + "learning_rate": 4.985733542573055e-06, + "loss": 0.35144561529159546, + "memory(GiB)": 41.86, + "step": 772, + "token_acc": 0.8917599770312948, + "train_speed(iter/s)": 0.241367 + }, + { + "epoch": 0.24736, + "grad_norm": 0.6444559516121929, + "learning_rate": 4.985639309551315e-06, + "loss": 0.33224761486053467, + "memory(GiB)": 41.86, + "step": 773, + "token_acc": 0.8685483870967742, + "train_speed(iter/s)": 0.241382 + }, + { + "epoch": 0.24768, + "grad_norm": 0.6336186523131149, + "learning_rate": 4.98554476723488e-06, + "loss": 0.3296525180339813, + "memory(GiB)": 41.86, + "step": 774, + "token_acc": 0.9042929292929293, + "train_speed(iter/s)": 0.241386 + }, + { + "epoch": 0.248, + "grad_norm": 0.6505253103033791, + "learning_rate": 4.9854499156355175e-06, + "loss": 0.4456222653388977, + "memory(GiB)": 41.86, + "step": 775, + "token_acc": 0.9387755102040817, + "train_speed(iter/s)": 0.241373 + }, + { + "epoch": 0.24832, + "grad_norm": 0.6783573497958472, + "learning_rate": 4.98535475476503e-06, + "loss": 0.37147411704063416, + "memory(GiB)": 41.86, + "step": 776, + "token_acc": 0.8556048131728943, + "train_speed(iter/s)": 0.241331 + }, + { + "epoch": 0.24864, + "grad_norm": 0.6892156662907595, + "learning_rate": 4.9852592846352565e-06, + "loss": 0.4287664294242859, + "memory(GiB)": 41.86, + "step": 777, + "token_acc": 0.9594972067039106, + "train_speed(iter/s)": 0.241335 + }, + { + "epoch": 0.24896, + "grad_norm": 0.642461448861593, + "learning_rate": 4.9851635052580784e-06, + "loss": 0.34628570079803467, + "memory(GiB)": 41.86, + "step": 778, + "token_acc": 0.8839709136895353, + "train_speed(iter/s)": 0.241312 + }, + { + "epoch": 0.24928, + "grad_norm": 0.800238947068369, + "learning_rate": 4.985067416645412e-06, + "loss": 0.4460781216621399, + "memory(GiB)": 41.86, + "step": 779, + "token_acc": 0.8925554382259767, + "train_speed(iter/s)": 0.241325 + }, + { + "epoch": 0.2496, + "grad_norm": 0.6653529839690546, + "learning_rate": 4.984971018809217e-06, + "loss": 0.4186139702796936, + "memory(GiB)": 41.86, + "step": 780, + "token_acc": 0.8422459893048129, + "train_speed(iter/s)": 0.241334 + }, + { + "epoch": 0.24992, + "grad_norm": 0.6887542780956875, + "learning_rate": 4.984874311761485e-06, + "loss": 0.375389039516449, + "memory(GiB)": 41.86, + "step": 781, + "token_acc": 0.8824769433465086, + "train_speed(iter/s)": 0.241337 + }, + { + "epoch": 0.25024, + "grad_norm": 0.6491334878149633, + "learning_rate": 4.984777295514252e-06, + "loss": 0.4598641097545624, + "memory(GiB)": 41.86, + "step": 782, + "token_acc": 0.882145998240985, + "train_speed(iter/s)": 0.241346 + }, + { + "epoch": 0.25056, + "grad_norm": 0.6657680858295223, + "learning_rate": 4.984679970079589e-06, + "loss": 0.40942925214767456, + "memory(GiB)": 41.86, + "step": 783, + "token_acc": 0.8514492753623188, + "train_speed(iter/s)": 0.241342 + }, + { + "epoch": 0.25088, + "grad_norm": 0.7525216471147947, + "learning_rate": 4.984582335469606e-06, + "loss": 0.4095529317855835, + "memory(GiB)": 41.86, + "step": 784, + "token_acc": 0.8382521162205445, + "train_speed(iter/s)": 0.241355 + }, + { + "epoch": 0.2512, + "grad_norm": 0.634684240047649, + "learning_rate": 4.984484391696453e-06, + "loss": 0.4507801830768585, + "memory(GiB)": 41.86, + "step": 785, + "token_acc": 0.9057507987220448, + "train_speed(iter/s)": 0.241351 + }, + { + "epoch": 0.25152, + "grad_norm": 0.6538312727816594, + "learning_rate": 4.984386138772316e-06, + "loss": 0.3365633487701416, + "memory(GiB)": 41.86, + "step": 786, + "token_acc": 0.9039064727687482, + "train_speed(iter/s)": 0.24136 + }, + { + "epoch": 0.25184, + "grad_norm": 0.7525592140274128, + "learning_rate": 4.984287576709422e-06, + "loss": 0.3403449058532715, + "memory(GiB)": 41.86, + "step": 787, + "token_acc": 0.9132356361944638, + "train_speed(iter/s)": 0.241365 + }, + { + "epoch": 0.25216, + "grad_norm": 0.6605425015786026, + "learning_rate": 4.984188705520035e-06, + "loss": 0.3794463276863098, + "memory(GiB)": 41.86, + "step": 788, + "token_acc": 0.862798131300713, + "train_speed(iter/s)": 0.24136 + }, + { + "epoch": 0.25248, + "grad_norm": 0.6567516781038947, + "learning_rate": 4.984089525216458e-06, + "loss": 0.436498761177063, + "memory(GiB)": 41.86, + "step": 789, + "token_acc": 0.8891170431211499, + "train_speed(iter/s)": 0.241368 + }, + { + "epoch": 0.2528, + "grad_norm": 0.6250929727139392, + "learning_rate": 4.983990035811032e-06, + "loss": 0.3370034098625183, + "memory(GiB)": 41.86, + "step": 790, + "token_acc": 0.8714312027997789, + "train_speed(iter/s)": 0.241349 + }, + { + "epoch": 0.25312, + "grad_norm": 0.707631890563472, + "learning_rate": 4.983890237316137e-06, + "loss": 0.3521242141723633, + "memory(GiB)": 41.86, + "step": 791, + "token_acc": 0.8526678141135973, + "train_speed(iter/s)": 0.24134 + }, + { + "epoch": 0.25344, + "grad_norm": 0.6813724909580211, + "learning_rate": 4.98379012974419e-06, + "loss": 0.4086916148662567, + "memory(GiB)": 41.86, + "step": 792, + "token_acc": 0.8824358612912321, + "train_speed(iter/s)": 0.241346 + }, + { + "epoch": 0.25376, + "grad_norm": 0.6535661275566989, + "learning_rate": 4.98368971310765e-06, + "loss": 0.2912856340408325, + "memory(GiB)": 41.86, + "step": 793, + "token_acc": 0.9539170506912442, + "train_speed(iter/s)": 0.24133 + }, + { + "epoch": 0.25408, + "grad_norm": 0.7034241854577231, + "learning_rate": 4.98358898741901e-06, + "loss": 0.41266027092933655, + "memory(GiB)": 41.86, + "step": 794, + "token_acc": 0.849502487562189, + "train_speed(iter/s)": 0.241343 + }, + { + "epoch": 0.2544, + "grad_norm": 0.7919564929282495, + "learning_rate": 4.9834879526908055e-06, + "loss": 0.4953688979148865, + "memory(GiB)": 41.86, + "step": 795, + "token_acc": 0.8152119700748129, + "train_speed(iter/s)": 0.241324 + }, + { + "epoch": 0.25472, + "grad_norm": 0.707477914261136, + "learning_rate": 4.9833866089356065e-06, + "loss": 0.43112221360206604, + "memory(GiB)": 41.86, + "step": 796, + "token_acc": 0.8519607843137255, + "train_speed(iter/s)": 0.241332 + }, + { + "epoch": 0.25504, + "grad_norm": 0.6815745086398735, + "learning_rate": 4.983284956166024e-06, + "loss": 0.3807457685470581, + "memory(GiB)": 41.86, + "step": 797, + "token_acc": 0.8558266932270916, + "train_speed(iter/s)": 0.241328 + }, + { + "epoch": 0.25536, + "grad_norm": 0.6981132809686224, + "learning_rate": 4.983182994394707e-06, + "loss": 0.48848676681518555, + "memory(GiB)": 41.86, + "step": 798, + "token_acc": 0.8606431852986217, + "train_speed(iter/s)": 0.241332 + }, + { + "epoch": 0.25568, + "grad_norm": 0.6959000766229894, + "learning_rate": 4.983080723634344e-06, + "loss": 0.41059327125549316, + "memory(GiB)": 41.86, + "step": 799, + "token_acc": 0.8616296947067867, + "train_speed(iter/s)": 0.241332 + }, + { + "epoch": 0.256, + "grad_norm": 0.6993805170109814, + "learning_rate": 4.98297814389766e-06, + "loss": 0.44298049807548523, + "memory(GiB)": 41.86, + "step": 800, + "token_acc": 0.8124255657006749, + "train_speed(iter/s)": 0.241342 + }, + { + "epoch": 0.25632, + "grad_norm": 0.6844304054831327, + "learning_rate": 4.982875255197419e-06, + "loss": 0.38893401622772217, + "memory(GiB)": 41.86, + "step": 801, + "token_acc": 0.9067966733581876, + "train_speed(iter/s)": 0.241332 + }, + { + "epoch": 0.25664, + "grad_norm": 0.6888090745147593, + "learning_rate": 4.982772057546424e-06, + "loss": 0.36828774213790894, + "memory(GiB)": 41.86, + "step": 802, + "token_acc": 0.9373626373626374, + "train_speed(iter/s)": 0.24132 + }, + { + "epoch": 0.25696, + "grad_norm": 0.6753780411690784, + "learning_rate": 4.982668550957516e-06, + "loss": 0.32084327936172485, + "memory(GiB)": 41.86, + "step": 803, + "token_acc": 0.9209710743801653, + "train_speed(iter/s)": 0.241322 + }, + { + "epoch": 0.25728, + "grad_norm": 0.7717722337181671, + "learning_rate": 4.982564735443574e-06, + "loss": 0.33746790885925293, + "memory(GiB)": 41.86, + "step": 804, + "token_acc": 0.9091801669121257, + "train_speed(iter/s)": 0.241339 + }, + { + "epoch": 0.2576, + "grad_norm": 0.6483289121322274, + "learning_rate": 4.982460611017518e-06, + "loss": 0.3582516610622406, + "memory(GiB)": 41.86, + "step": 805, + "token_acc": 0.8964165733482643, + "train_speed(iter/s)": 0.241347 + }, + { + "epoch": 0.25792, + "grad_norm": 0.6775382485165395, + "learning_rate": 4.982356177692303e-06, + "loss": 0.3853127360343933, + "memory(GiB)": 41.86, + "step": 806, + "token_acc": 0.8727388130752142, + "train_speed(iter/s)": 0.24135 + }, + { + "epoch": 0.25824, + "grad_norm": 0.7090027184658955, + "learning_rate": 4.982251435480924e-06, + "loss": 0.40342938899993896, + "memory(GiB)": 41.86, + "step": 807, + "token_acc": 0.9217588102166182, + "train_speed(iter/s)": 0.241358 + }, + { + "epoch": 0.25856, + "grad_norm": 0.69050605610944, + "learning_rate": 4.982146384396414e-06, + "loss": 0.4222428798675537, + "memory(GiB)": 41.86, + "step": 808, + "token_acc": 0.8914838405284266, + "train_speed(iter/s)": 0.24136 + }, + { + "epoch": 0.25888, + "grad_norm": 0.7312880636066453, + "learning_rate": 4.982041024451844e-06, + "loss": 0.3391638994216919, + "memory(GiB)": 41.86, + "step": 809, + "token_acc": 0.9014503532911863, + "train_speed(iter/s)": 0.24137 + }, + { + "epoch": 0.2592, + "grad_norm": 0.726917616596126, + "learning_rate": 4.9819353556603275e-06, + "loss": 0.3670068681240082, + "memory(GiB)": 41.86, + "step": 810, + "token_acc": 0.8563027358731782, + "train_speed(iter/s)": 0.241326 + }, + { + "epoch": 0.25952, + "grad_norm": 0.7153282293704708, + "learning_rate": 4.981829378035011e-06, + "loss": 0.39773887395858765, + "memory(GiB)": 41.86, + "step": 811, + "token_acc": 0.8734496124031008, + "train_speed(iter/s)": 0.241321 + }, + { + "epoch": 0.25984, + "grad_norm": 0.6730086556977734, + "learning_rate": 4.981723091589081e-06, + "loss": 0.4113270044326782, + "memory(GiB)": 41.86, + "step": 812, + "token_acc": 0.837037037037037, + "train_speed(iter/s)": 0.241323 + }, + { + "epoch": 0.26016, + "grad_norm": 0.6684345083277575, + "learning_rate": 4.981616496335765e-06, + "loss": 0.39969900250434875, + "memory(GiB)": 41.86, + "step": 813, + "token_acc": 0.9255411255411256, + "train_speed(iter/s)": 0.241308 + }, + { + "epoch": 0.26048, + "grad_norm": 0.7425312709228351, + "learning_rate": 4.981509592288324e-06, + "loss": 0.3884389400482178, + "memory(GiB)": 41.86, + "step": 814, + "token_acc": 0.8001942218985191, + "train_speed(iter/s)": 0.241307 + }, + { + "epoch": 0.2608, + "grad_norm": 0.705543637913348, + "learning_rate": 4.981402379460063e-06, + "loss": 0.41326478123664856, + "memory(GiB)": 41.86, + "step": 815, + "token_acc": 0.8603896103896104, + "train_speed(iter/s)": 0.241318 + }, + { + "epoch": 0.26112, + "grad_norm": 0.6905577692197622, + "learning_rate": 4.981294857864321e-06, + "loss": 0.3848215341567993, + "memory(GiB)": 41.86, + "step": 816, + "token_acc": 0.89366391184573, + "train_speed(iter/s)": 0.241314 + }, + { + "epoch": 0.26144, + "grad_norm": 0.6915911520138801, + "learning_rate": 4.981187027514479e-06, + "loss": 0.32628118991851807, + "memory(GiB)": 41.86, + "step": 817, + "token_acc": 0.8826461259628455, + "train_speed(iter/s)": 0.241305 + }, + { + "epoch": 0.26176, + "grad_norm": 0.63707654148838, + "learning_rate": 4.981078888423953e-06, + "loss": 0.3992425203323364, + "memory(GiB)": 41.86, + "step": 818, + "token_acc": 0.860707919531449, + "train_speed(iter/s)": 0.241315 + }, + { + "epoch": 0.26208, + "grad_norm": 2.897692433516807, + "learning_rate": 4.980970440606199e-06, + "loss": 0.4190906286239624, + "memory(GiB)": 41.86, + "step": 819, + "token_acc": 0.8718359500160205, + "train_speed(iter/s)": 0.24132 + }, + { + "epoch": 0.2624, + "grad_norm": 0.7168804356956344, + "learning_rate": 4.980861684074713e-06, + "loss": 0.30127066373825073, + "memory(GiB)": 41.86, + "step": 820, + "token_acc": 0.90494200706001, + "train_speed(iter/s)": 0.241316 + }, + { + "epoch": 0.26272, + "grad_norm": 0.6868062499033037, + "learning_rate": 4.980752618843027e-06, + "loss": 0.47249680757522583, + "memory(GiB)": 41.86, + "step": 821, + "token_acc": 0.8727156139788393, + "train_speed(iter/s)": 0.241314 + }, + { + "epoch": 0.26304, + "grad_norm": 0.7845338213721126, + "learning_rate": 4.980643244924712e-06, + "loss": 0.3796151578426361, + "memory(GiB)": 41.86, + "step": 822, + "token_acc": 0.9494184473897755, + "train_speed(iter/s)": 0.241324 + }, + { + "epoch": 0.26336, + "grad_norm": 0.6912670439373593, + "learning_rate": 4.980533562333377e-06, + "loss": 0.42436304688453674, + "memory(GiB)": 41.86, + "step": 823, + "token_acc": 0.8411049723756906, + "train_speed(iter/s)": 0.241318 + }, + { + "epoch": 0.26368, + "grad_norm": 0.6996169595456391, + "learning_rate": 4.980423571082672e-06, + "loss": 0.3788377642631531, + "memory(GiB)": 41.86, + "step": 824, + "token_acc": 0.9317668323542703, + "train_speed(iter/s)": 0.241317 + }, + { + "epoch": 0.264, + "grad_norm": 0.6936779498744393, + "learning_rate": 4.980313271186282e-06, + "loss": 0.379010409116745, + "memory(GiB)": 41.86, + "step": 825, + "token_acc": 0.8841698841698842, + "train_speed(iter/s)": 0.241322 + }, + { + "epoch": 0.26432, + "grad_norm": 0.72951035377029, + "learning_rate": 4.980202662657933e-06, + "loss": 0.4177994728088379, + "memory(GiB)": 41.86, + "step": 826, + "token_acc": 0.8436003830194702, + "train_speed(iter/s)": 0.241332 + }, + { + "epoch": 0.26464, + "grad_norm": 0.6911541112352191, + "learning_rate": 4.980091745511388e-06, + "loss": 0.42674410343170166, + "memory(GiB)": 41.86, + "step": 827, + "token_acc": 0.9075520833333334, + "train_speed(iter/s)": 0.241336 + }, + { + "epoch": 0.26496, + "grad_norm": 0.8651348661067667, + "learning_rate": 4.979980519760447e-06, + "loss": 0.400503933429718, + "memory(GiB)": 41.86, + "step": 828, + "token_acc": 0.9559322033898305, + "train_speed(iter/s)": 0.241336 + }, + { + "epoch": 0.26528, + "grad_norm": 0.6474931160281914, + "learning_rate": 4.979868985418953e-06, + "loss": 0.3410487473011017, + "memory(GiB)": 41.86, + "step": 829, + "token_acc": 0.8765267599378193, + "train_speed(iter/s)": 0.241341 + }, + { + "epoch": 0.2656, + "grad_norm": 0.6808174378997404, + "learning_rate": 4.979757142500782e-06, + "loss": 0.4173216223716736, + "memory(GiB)": 41.86, + "step": 830, + "token_acc": 0.9035258490157906, + "train_speed(iter/s)": 0.24133 + }, + { + "epoch": 0.26592, + "grad_norm": 0.6214789550030165, + "learning_rate": 4.979644991019852e-06, + "loss": 0.30535757541656494, + "memory(GiB)": 41.86, + "step": 831, + "token_acc": 0.9414239482200647, + "train_speed(iter/s)": 0.24134 + }, + { + "epoch": 0.26624, + "grad_norm": 0.7182890601191392, + "learning_rate": 4.979532530990118e-06, + "loss": 0.4334990382194519, + "memory(GiB)": 41.86, + "step": 832, + "token_acc": 0.9479843953185956, + "train_speed(iter/s)": 0.241323 + }, + { + "epoch": 0.26656, + "grad_norm": 0.7854666212182484, + "learning_rate": 4.979419762425576e-06, + "loss": 0.3788972496986389, + "memory(GiB)": 41.86, + "step": 833, + "token_acc": 0.9253781512605042, + "train_speed(iter/s)": 0.241336 + }, + { + "epoch": 0.26688, + "grad_norm": 0.7282545221992143, + "learning_rate": 4.9793066853402535e-06, + "loss": 0.39225584268569946, + "memory(GiB)": 41.86, + "step": 834, + "token_acc": 0.8950012559658377, + "train_speed(iter/s)": 0.241348 + }, + { + "epoch": 0.2672, + "grad_norm": 0.6812636918428608, + "learning_rate": 4.979193299748225e-06, + "loss": 0.4447840750217438, + "memory(GiB)": 41.86, + "step": 835, + "token_acc": 0.7971737323358271, + "train_speed(iter/s)": 0.24134 + }, + { + "epoch": 0.26752, + "grad_norm": 0.7918577363539823, + "learning_rate": 4.9790796056635986e-06, + "loss": 0.4043129086494446, + "memory(GiB)": 41.86, + "step": 836, + "token_acc": 0.9116561181434599, + "train_speed(iter/s)": 0.241344 + }, + { + "epoch": 0.26784, + "grad_norm": 0.7347274033246837, + "learning_rate": 4.97896560310052e-06, + "loss": 0.4476478397846222, + "memory(GiB)": 41.86, + "step": 837, + "token_acc": 0.8962395543175488, + "train_speed(iter/s)": 0.241344 + }, + { + "epoch": 0.26816, + "grad_norm": 0.7158952317937803, + "learning_rate": 4.978851292073175e-06, + "loss": 0.4438498616218567, + "memory(GiB)": 41.86, + "step": 838, + "token_acc": 0.8884364820846905, + "train_speed(iter/s)": 0.24135 + }, + { + "epoch": 0.26848, + "grad_norm": 0.6663404870479899, + "learning_rate": 4.978736672595789e-06, + "loss": 0.41425442695617676, + "memory(GiB)": 41.86, + "step": 839, + "token_acc": 0.8471512770137525, + "train_speed(iter/s)": 0.241364 + }, + { + "epoch": 0.2688, + "grad_norm": 0.7056818448545903, + "learning_rate": 4.978621744682623e-06, + "loss": 0.4381216764450073, + "memory(GiB)": 41.86, + "step": 840, + "token_acc": 0.918200408997955, + "train_speed(iter/s)": 0.241359 + }, + { + "epoch": 0.26912, + "grad_norm": 0.7104931244041744, + "learning_rate": 4.97850650834798e-06, + "loss": 0.4284476935863495, + "memory(GiB)": 41.86, + "step": 841, + "token_acc": 0.7816764132553606, + "train_speed(iter/s)": 0.241362 + }, + { + "epoch": 0.26944, + "grad_norm": 0.6706265453124689, + "learning_rate": 4.978390963606197e-06, + "loss": 0.4102025032043457, + "memory(GiB)": 41.86, + "step": 842, + "token_acc": 0.9453551912568307, + "train_speed(iter/s)": 0.241353 + }, + { + "epoch": 0.26976, + "grad_norm": 0.6865381757157815, + "learning_rate": 4.9782751104716525e-06, + "loss": 0.37260371446609497, + "memory(GiB)": 41.86, + "step": 843, + "token_acc": 0.896551724137931, + "train_speed(iter/s)": 0.241358 + }, + { + "epoch": 0.27008, + "grad_norm": 0.6948020861222234, + "learning_rate": 4.9781589489587615e-06, + "loss": 0.4424787759780884, + "memory(GiB)": 41.86, + "step": 844, + "token_acc": 0.8721947508558388, + "train_speed(iter/s)": 0.241363 + }, + { + "epoch": 0.2704, + "grad_norm": 0.6737972931101782, + "learning_rate": 4.978042479081979e-06, + "loss": 0.3889673948287964, + "memory(GiB)": 41.86, + "step": 845, + "token_acc": 0.9353233830845771, + "train_speed(iter/s)": 0.241338 + }, + { + "epoch": 0.27072, + "grad_norm": 0.6929477994027078, + "learning_rate": 4.977925700855799e-06, + "loss": 0.38353538513183594, + "memory(GiB)": 41.86, + "step": 846, + "token_acc": 0.903707518022657, + "train_speed(iter/s)": 0.241344 + }, + { + "epoch": 0.27104, + "grad_norm": 0.7011954440399505, + "learning_rate": 4.97780861429475e-06, + "loss": 0.37761881947517395, + "memory(GiB)": 41.86, + "step": 847, + "token_acc": 0.901171875, + "train_speed(iter/s)": 0.24136 + }, + { + "epoch": 0.27136, + "grad_norm": 0.7370393665583859, + "learning_rate": 4.977691219413402e-06, + "loss": 0.4818401038646698, + "memory(GiB)": 41.86, + "step": 848, + "token_acc": 0.9256797583081571, + "train_speed(iter/s)": 0.241364 + }, + { + "epoch": 0.27168, + "grad_norm": 0.7475680733834088, + "learning_rate": 4.977573516226364e-06, + "loss": 0.4123767018318176, + "memory(GiB)": 41.86, + "step": 849, + "token_acc": 0.8808446455505279, + "train_speed(iter/s)": 0.241375 + }, + { + "epoch": 0.272, + "grad_norm": 0.7474821675081605, + "learning_rate": 4.9774555047482805e-06, + "loss": 0.4663333296775818, + "memory(GiB)": 41.86, + "step": 850, + "token_acc": 0.8616370521409845, + "train_speed(iter/s)": 0.241387 + }, + { + "epoch": 0.27232, + "grad_norm": 0.7227493769777527, + "learning_rate": 4.977337184993838e-06, + "loss": 0.4266231656074524, + "memory(GiB)": 41.86, + "step": 851, + "token_acc": 0.9375, + "train_speed(iter/s)": 0.241394 + }, + { + "epoch": 0.27264, + "grad_norm": 0.7309698174390097, + "learning_rate": 4.977218556977758e-06, + "loss": 0.3907462954521179, + "memory(GiB)": 41.86, + "step": 852, + "token_acc": 0.853035143769968, + "train_speed(iter/s)": 0.241394 + }, + { + "epoch": 0.27296, + "grad_norm": 0.6888381853197825, + "learning_rate": 4.977099620714802e-06, + "loss": 0.37958580255508423, + "memory(GiB)": 41.86, + "step": 853, + "token_acc": 0.8546786389413988, + "train_speed(iter/s)": 0.241404 + }, + { + "epoch": 0.27328, + "grad_norm": 0.6969882686487512, + "learning_rate": 4.9769803762197685e-06, + "loss": 0.4280955195426941, + "memory(GiB)": 41.86, + "step": 854, + "token_acc": 0.9014008620689655, + "train_speed(iter/s)": 0.241416 + }, + { + "epoch": 0.2736, + "grad_norm": 0.7008938544446637, + "learning_rate": 4.976860823507497e-06, + "loss": 0.3477323055267334, + "memory(GiB)": 41.86, + "step": 855, + "token_acc": 0.9141705069124424, + "train_speed(iter/s)": 0.241424 + }, + { + "epoch": 0.27392, + "grad_norm": 0.6754437648650162, + "learning_rate": 4.976740962592863e-06, + "loss": 0.3738710284233093, + "memory(GiB)": 41.86, + "step": 856, + "token_acc": 0.9194785276073619, + "train_speed(iter/s)": 0.241423 + }, + { + "epoch": 0.27424, + "grad_norm": 0.667734211719335, + "learning_rate": 4.976620793490781e-06, + "loss": 0.35820943117141724, + "memory(GiB)": 41.86, + "step": 857, + "token_acc": 0.8852619233776388, + "train_speed(iter/s)": 0.241435 + }, + { + "epoch": 0.27456, + "grad_norm": 0.6557798340838397, + "learning_rate": 4.976500316216205e-06, + "loss": 0.3082352876663208, + "memory(GiB)": 41.86, + "step": 858, + "token_acc": 0.8979591836734694, + "train_speed(iter/s)": 0.241447 + }, + { + "epoch": 0.27488, + "grad_norm": 0.68058949296096, + "learning_rate": 4.976379530784125e-06, + "loss": 0.3448728919029236, + "memory(GiB)": 41.86, + "step": 859, + "token_acc": 0.9538943598925694, + "train_speed(iter/s)": 0.241449 + }, + { + "epoch": 0.2752, + "grad_norm": 0.7756601499198893, + "learning_rate": 4.976258437209571e-06, + "loss": 0.47984373569488525, + "memory(GiB)": 41.86, + "step": 860, + "token_acc": 0.8789907312049433, + "train_speed(iter/s)": 0.241442 + }, + { + "epoch": 0.27552, + "grad_norm": 0.6334807836789396, + "learning_rate": 4.976137035507612e-06, + "loss": 0.4275950789451599, + "memory(GiB)": 41.86, + "step": 861, + "token_acc": 0.8337969401947148, + "train_speed(iter/s)": 0.241431 + }, + { + "epoch": 0.27584, + "grad_norm": 0.7025979774680785, + "learning_rate": 4.976015325693352e-06, + "loss": 0.3921700716018677, + "memory(GiB)": 41.86, + "step": 862, + "token_acc": 0.8516780945507038, + "train_speed(iter/s)": 0.241432 + }, + { + "epoch": 0.27616, + "grad_norm": 0.6751265115688638, + "learning_rate": 4.975893307781938e-06, + "loss": 0.3886798620223999, + "memory(GiB)": 41.86, + "step": 863, + "token_acc": 0.8591885441527446, + "train_speed(iter/s)": 0.241433 + }, + { + "epoch": 0.27648, + "grad_norm": 0.6674161722210321, + "learning_rate": 4.9757709817885525e-06, + "loss": 0.37476640939712524, + "memory(GiB)": 41.86, + "step": 864, + "token_acc": 0.8940772185162725, + "train_speed(iter/s)": 0.241433 + }, + { + "epoch": 0.2768, + "grad_norm": 0.7089221887975327, + "learning_rate": 4.9756483477284166e-06, + "loss": 0.3924105763435364, + "memory(GiB)": 41.86, + "step": 865, + "token_acc": 0.910453808752026, + "train_speed(iter/s)": 0.241435 + }, + { + "epoch": 0.27712, + "grad_norm": 0.662114873025913, + "learning_rate": 4.975525405616789e-06, + "loss": 0.3919695019721985, + "memory(GiB)": 41.86, + "step": 866, + "token_acc": 0.9111014744145707, + "train_speed(iter/s)": 0.241445 + }, + { + "epoch": 0.27744, + "grad_norm": 0.6645344776655074, + "learning_rate": 4.975402155468969e-06, + "loss": 0.42643189430236816, + "memory(GiB)": 41.86, + "step": 867, + "token_acc": 0.8357969151670951, + "train_speed(iter/s)": 0.241441 + }, + { + "epoch": 0.27776, + "grad_norm": 0.7312173531041785, + "learning_rate": 4.975278597300293e-06, + "loss": 0.3553208112716675, + "memory(GiB)": 41.86, + "step": 868, + "token_acc": 0.8707280832095097, + "train_speed(iter/s)": 0.241446 + }, + { + "epoch": 0.27808, + "grad_norm": 0.7590644549950091, + "learning_rate": 4.975154731126135e-06, + "loss": 0.45626798272132874, + "memory(GiB)": 41.86, + "step": 869, + "token_acc": 0.8275613275613276, + "train_speed(iter/s)": 0.241447 + }, + { + "epoch": 0.2784, + "grad_norm": 0.6787017579013671, + "learning_rate": 4.9750305569619085e-06, + "loss": 0.349330872297287, + "memory(GiB)": 41.86, + "step": 870, + "token_acc": 0.9083790133124511, + "train_speed(iter/s)": 0.241456 + }, + { + "epoch": 0.27872, + "grad_norm": 0.698948305076856, + "learning_rate": 4.974906074823064e-06, + "loss": 0.3812295198440552, + "memory(GiB)": 41.86, + "step": 871, + "token_acc": 0.9295494149863082, + "train_speed(iter/s)": 0.24146 + }, + { + "epoch": 0.27904, + "grad_norm": 0.7199487021079038, + "learning_rate": 4.974781284725092e-06, + "loss": 0.34978869557380676, + "memory(GiB)": 41.86, + "step": 872, + "token_acc": 0.8286747780335416, + "train_speed(iter/s)": 0.241473 + }, + { + "epoch": 0.27936, + "grad_norm": 0.6865360531047445, + "learning_rate": 4.97465618668352e-06, + "loss": 0.38167816400527954, + "memory(GiB)": 41.86, + "step": 873, + "token_acc": 0.8775993682548039, + "train_speed(iter/s)": 0.241455 + }, + { + "epoch": 0.27968, + "grad_norm": 0.6751513035519379, + "learning_rate": 4.974530780713914e-06, + "loss": 0.3769749402999878, + "memory(GiB)": 41.86, + "step": 874, + "token_acc": 0.8235294117647058, + "train_speed(iter/s)": 0.241464 + }, + { + "epoch": 0.28, + "grad_norm": 0.646982009890138, + "learning_rate": 4.97440506683188e-06, + "loss": 0.40149784088134766, + "memory(GiB)": 41.86, + "step": 875, + "token_acc": 0.8576504026527711, + "train_speed(iter/s)": 0.241465 + }, + { + "epoch": 0.28032, + "grad_norm": 0.8460064247150936, + "learning_rate": 4.974279045053059e-06, + "loss": 0.34345951676368713, + "memory(GiB)": 41.86, + "step": 876, + "token_acc": 0.922945205479452, + "train_speed(iter/s)": 0.241481 + }, + { + "epoch": 0.28064, + "grad_norm": 0.6686883055467452, + "learning_rate": 4.974152715393134e-06, + "loss": 0.3293968737125397, + "memory(GiB)": 41.86, + "step": 877, + "token_acc": 0.9156148575549946, + "train_speed(iter/s)": 0.241494 + }, + { + "epoch": 0.28096, + "grad_norm": 0.6391706717860814, + "learning_rate": 4.974026077867823e-06, + "loss": 0.42422202229499817, + "memory(GiB)": 41.86, + "step": 878, + "token_acc": 0.8443293347873501, + "train_speed(iter/s)": 0.241499 + }, + { + "epoch": 0.28128, + "grad_norm": 0.7055869022720442, + "learning_rate": 4.973899132492886e-06, + "loss": 0.36827290058135986, + "memory(GiB)": 41.86, + "step": 879, + "token_acc": 0.9271719038817006, + "train_speed(iter/s)": 0.241503 + }, + { + "epoch": 0.2816, + "grad_norm": 0.6699293792004666, + "learning_rate": 4.973771879284116e-06, + "loss": 0.3548522889614105, + "memory(GiB)": 41.86, + "step": 880, + "token_acc": 0.9261926192619262, + "train_speed(iter/s)": 0.241492 + }, + { + "epoch": 0.28192, + "grad_norm": 0.6562773527409582, + "learning_rate": 4.973644318257349e-06, + "loss": 0.31184637546539307, + "memory(GiB)": 41.86, + "step": 881, + "token_acc": 0.902882797731569, + "train_speed(iter/s)": 0.24148 + }, + { + "epoch": 0.28224, + "grad_norm": 0.6405973075614393, + "learning_rate": 4.9735164494284595e-06, + "loss": 0.4276396334171295, + "memory(GiB)": 41.86, + "step": 882, + "token_acc": 0.9028029844726759, + "train_speed(iter/s)": 0.24148 + }, + { + "epoch": 0.28256, + "grad_norm": 0.6473536741908208, + "learning_rate": 4.973388272813355e-06, + "loss": 0.3479858338832855, + "memory(GiB)": 41.86, + "step": 883, + "token_acc": 0.9328621908127208, + "train_speed(iter/s)": 0.241478 + }, + { + "epoch": 0.28288, + "grad_norm": 0.6113653397974941, + "learning_rate": 4.9732597884279885e-06, + "loss": 0.2951425611972809, + "memory(GiB)": 41.86, + "step": 884, + "token_acc": 0.9305435720448663, + "train_speed(iter/s)": 0.24149 + }, + { + "epoch": 0.2832, + "grad_norm": 0.6730281148733999, + "learning_rate": 4.973130996288345e-06, + "loss": 0.36195170879364014, + "memory(GiB)": 41.86, + "step": 885, + "token_acc": 0.9180274330502939, + "train_speed(iter/s)": 0.241492 + }, + { + "epoch": 0.28352, + "grad_norm": 0.7241950728876418, + "learning_rate": 4.9730018964104524e-06, + "loss": 0.3859631419181824, + "memory(GiB)": 41.86, + "step": 886, + "token_acc": 0.8826130653266332, + "train_speed(iter/s)": 0.241499 + }, + { + "epoch": 0.28384, + "grad_norm": 0.6755341219842091, + "learning_rate": 4.972872488810373e-06, + "loss": 0.37077081203460693, + "memory(GiB)": 41.86, + "step": 887, + "token_acc": 0.867237687366167, + "train_speed(iter/s)": 0.241494 + }, + { + "epoch": 0.28416, + "grad_norm": 0.6812963468992782, + "learning_rate": 4.9727427735042115e-06, + "loss": 0.2684915065765381, + "memory(GiB)": 41.86, + "step": 888, + "token_acc": 0.9121046892039258, + "train_speed(iter/s)": 0.241506 + }, + { + "epoch": 0.28448, + "grad_norm": 0.7525157746680431, + "learning_rate": 4.972612750508107e-06, + "loss": 0.44266653060913086, + "memory(GiB)": 41.86, + "step": 889, + "token_acc": 0.9112011790714812, + "train_speed(iter/s)": 0.241504 + }, + { + "epoch": 0.2848, + "grad_norm": 0.6453016199975026, + "learning_rate": 4.972482419838238e-06, + "loss": 0.32151272892951965, + "memory(GiB)": 41.86, + "step": 890, + "token_acc": 0.8936689836558331, + "train_speed(iter/s)": 0.241488 + }, + { + "epoch": 0.28512, + "grad_norm": 0.6212688994067938, + "learning_rate": 4.972351781510824e-06, + "loss": 0.39796680212020874, + "memory(GiB)": 41.86, + "step": 891, + "token_acc": 0.9022353891731754, + "train_speed(iter/s)": 0.241492 + }, + { + "epoch": 0.28544, + "grad_norm": 0.7089115102373374, + "learning_rate": 4.97222083554212e-06, + "loss": 0.3897097110748291, + "memory(GiB)": 41.86, + "step": 892, + "token_acc": 0.7951541850220264, + "train_speed(iter/s)": 0.241498 + }, + { + "epoch": 0.28576, + "grad_norm": 0.6546675408734224, + "learning_rate": 4.972089581948418e-06, + "loss": 0.36820483207702637, + "memory(GiB)": 41.86, + "step": 893, + "token_acc": 0.9186360567184335, + "train_speed(iter/s)": 0.24148 + }, + { + "epoch": 0.28608, + "grad_norm": 0.7262540906460178, + "learning_rate": 4.971958020746054e-06, + "loss": 0.4262128174304962, + "memory(GiB)": 41.86, + "step": 894, + "token_acc": 0.8278301886792453, + "train_speed(iter/s)": 0.241492 + }, + { + "epoch": 0.2864, + "grad_norm": 0.6608761438052999, + "learning_rate": 4.971826151951395e-06, + "loss": 0.34088167548179626, + "memory(GiB)": 41.86, + "step": 895, + "token_acc": 0.8382084095063985, + "train_speed(iter/s)": 0.241493 + }, + { + "epoch": 0.28672, + "grad_norm": 0.734419232580609, + "learning_rate": 4.971693975580851e-06, + "loss": 0.34007054567337036, + "memory(GiB)": 41.86, + "step": 896, + "token_acc": 0.8838174273858921, + "train_speed(iter/s)": 0.2415 + }, + { + "epoch": 0.28704, + "grad_norm": 0.6341645963169661, + "learning_rate": 4.9715614916508704e-06, + "loss": 0.4050477147102356, + "memory(GiB)": 41.86, + "step": 897, + "token_acc": 0.8474803602222648, + "train_speed(iter/s)": 0.241479 + }, + { + "epoch": 0.28736, + "grad_norm": 0.6844718425361049, + "learning_rate": 4.971428700177937e-06, + "loss": 0.36214596033096313, + "memory(GiB)": 41.86, + "step": 898, + "token_acc": 0.931599572497328, + "train_speed(iter/s)": 0.241492 + }, + { + "epoch": 0.28768, + "grad_norm": 0.6748809128081812, + "learning_rate": 4.971295601178574e-06, + "loss": 0.3591195344924927, + "memory(GiB)": 41.86, + "step": 899, + "token_acc": 0.862404447533009, + "train_speed(iter/s)": 0.241507 + }, + { + "epoch": 0.288, + "grad_norm": 0.6682543082440642, + "learning_rate": 4.971162194669345e-06, + "loss": 0.42554935812950134, + "memory(GiB)": 41.86, + "step": 900, + "token_acc": 0.9150422949613829, + "train_speed(iter/s)": 0.241505 + }, + { + "epoch": 0.28832, + "grad_norm": 0.6541925224309108, + "learning_rate": 4.971028480666848e-06, + "loss": 0.40456196665763855, + "memory(GiB)": 41.86, + "step": 901, + "token_acc": 0.8156996587030717, + "train_speed(iter/s)": 0.241512 + }, + { + "epoch": 0.28864, + "grad_norm": 0.6814521711269722, + "learning_rate": 4.9708944591877224e-06, + "loss": 0.35836970806121826, + "memory(GiB)": 41.86, + "step": 902, + "token_acc": 0.9325091881055797, + "train_speed(iter/s)": 0.241516 + }, + { + "epoch": 0.28896, + "grad_norm": 0.649545456888139, + "learning_rate": 4.9707601302486464e-06, + "loss": 0.3607296347618103, + "memory(GiB)": 41.86, + "step": 903, + "token_acc": 0.8624255719210279, + "train_speed(iter/s)": 0.241516 + }, + { + "epoch": 0.28928, + "grad_norm": 0.6891263598886528, + "learning_rate": 4.970625493866333e-06, + "loss": 0.3262513279914856, + "memory(GiB)": 41.86, + "step": 904, + "token_acc": 0.8658731975280385, + "train_speed(iter/s)": 0.241525 + }, + { + "epoch": 0.2896, + "grad_norm": 0.7705794545296857, + "learning_rate": 4.9704905500575355e-06, + "loss": 0.5154157876968384, + "memory(GiB)": 41.86, + "step": 905, + "token_acc": 0.8821788040260509, + "train_speed(iter/s)": 0.241523 + }, + { + "epoch": 0.28992, + "grad_norm": 0.6751814971991484, + "learning_rate": 4.9703552988390456e-06, + "loss": 0.35065460205078125, + "memory(GiB)": 41.86, + "step": 906, + "token_acc": 0.9142586451918522, + "train_speed(iter/s)": 0.241534 + }, + { + "epoch": 0.29024, + "grad_norm": 0.699380857405434, + "learning_rate": 4.970219740227693e-06, + "loss": 0.38087108731269836, + "memory(GiB)": 41.86, + "step": 907, + "token_acc": 0.8528839922229423, + "train_speed(iter/s)": 0.241543 + }, + { + "epoch": 0.29056, + "grad_norm": 0.7079328478059318, + "learning_rate": 4.970083874240346e-06, + "loss": 0.42072010040283203, + "memory(GiB)": 41.86, + "step": 908, + "token_acc": 0.8306157215867608, + "train_speed(iter/s)": 0.241535 + }, + { + "epoch": 0.29088, + "grad_norm": 0.7533036330931112, + "learning_rate": 4.96994770089391e-06, + "loss": 0.4003120958805084, + "memory(GiB)": 41.86, + "step": 909, + "token_acc": 0.896037804434751, + "train_speed(iter/s)": 0.241538 + }, + { + "epoch": 0.2912, + "grad_norm": 0.6625219352484201, + "learning_rate": 4.969811220205331e-06, + "loss": 0.3053428530693054, + "memory(GiB)": 41.86, + "step": 910, + "token_acc": 0.9239284700539313, + "train_speed(iter/s)": 0.241534 + }, + { + "epoch": 0.29152, + "grad_norm": 0.658103618512496, + "learning_rate": 4.969674432191589e-06, + "loss": 0.3694823980331421, + "memory(GiB)": 41.86, + "step": 911, + "token_acc": 0.8607216731785796, + "train_speed(iter/s)": 0.241532 + }, + { + "epoch": 0.29184, + "grad_norm": 0.6765561916534816, + "learning_rate": 4.969537336869707e-06, + "loss": 0.36112353205680847, + "memory(GiB)": 41.86, + "step": 912, + "token_acc": 0.8812270582066072, + "train_speed(iter/s)": 0.241534 + }, + { + "epoch": 0.29216, + "grad_norm": 0.6308621603918786, + "learning_rate": 4.9693999342567435e-06, + "loss": 0.39021438360214233, + "memory(GiB)": 41.86, + "step": 913, + "token_acc": 0.8794765840220385, + "train_speed(iter/s)": 0.241525 + }, + { + "epoch": 0.29248, + "grad_norm": 0.6717470788996405, + "learning_rate": 4.969262224369795e-06, + "loss": 0.3180088400840759, + "memory(GiB)": 41.86, + "step": 914, + "token_acc": 0.9439976169198689, + "train_speed(iter/s)": 0.241511 + }, + { + "epoch": 0.2928, + "grad_norm": 0.6678257452086631, + "learning_rate": 4.969124207225998e-06, + "loss": 0.42820611596107483, + "memory(GiB)": 41.86, + "step": 915, + "token_acc": 0.8360393931328187, + "train_speed(iter/s)": 0.241505 + }, + { + "epoch": 0.29312, + "grad_norm": 0.7360949685416199, + "learning_rate": 4.968985882842527e-06, + "loss": 0.3855406641960144, + "memory(GiB)": 41.86, + "step": 916, + "token_acc": 0.931981981981982, + "train_speed(iter/s)": 0.241517 + }, + { + "epoch": 0.29344, + "grad_norm": 0.7044535152488363, + "learning_rate": 4.968847251236594e-06, + "loss": 0.3625899851322174, + "memory(GiB)": 41.86, + "step": 917, + "token_acc": 0.9040910322405852, + "train_speed(iter/s)": 0.241526 + }, + { + "epoch": 0.29376, + "grad_norm": 0.6537938220168457, + "learning_rate": 4.968708312425449e-06, + "loss": 0.4835781455039978, + "memory(GiB)": 41.86, + "step": 918, + "token_acc": 0.8565353625783348, + "train_speed(iter/s)": 0.241518 + }, + { + "epoch": 0.29408, + "grad_norm": 0.686896162098333, + "learning_rate": 4.968569066426379e-06, + "loss": 0.32849764823913574, + "memory(GiB)": 41.86, + "step": 919, + "token_acc": 0.9011274934952298, + "train_speed(iter/s)": 0.241526 + }, + { + "epoch": 0.2944, + "grad_norm": 0.687683913617958, + "learning_rate": 4.9684295132567115e-06, + "loss": 0.39233213663101196, + "memory(GiB)": 41.86, + "step": 920, + "token_acc": 0.8454443194600675, + "train_speed(iter/s)": 0.241527 + }, + { + "epoch": 0.29472, + "grad_norm": 0.693908437796633, + "learning_rate": 4.968289652933813e-06, + "loss": 0.3255015015602112, + "memory(GiB)": 41.86, + "step": 921, + "token_acc": 0.9359557867360208, + "train_speed(iter/s)": 0.241525 + }, + { + "epoch": 0.29504, + "grad_norm": 0.7267143275291124, + "learning_rate": 4.968149485475085e-06, + "loss": 0.4273531436920166, + "memory(GiB)": 41.86, + "step": 922, + "token_acc": 0.8772574227119682, + "train_speed(iter/s)": 0.241531 + }, + { + "epoch": 0.29536, + "grad_norm": 0.6452649615739668, + "learning_rate": 4.96800901089797e-06, + "loss": 0.43422120809555054, + "memory(GiB)": 41.86, + "step": 923, + "token_acc": 0.8881557241834378, + "train_speed(iter/s)": 0.241514 + }, + { + "epoch": 0.29568, + "grad_norm": 0.7010011491284999, + "learning_rate": 4.967868229219947e-06, + "loss": 0.3614301085472107, + "memory(GiB)": 41.86, + "step": 924, + "token_acc": 0.883982683982684, + "train_speed(iter/s)": 0.241518 + }, + { + "epoch": 0.296, + "grad_norm": 0.7471760335070439, + "learning_rate": 4.967727140458533e-06, + "loss": 0.44364720582962036, + "memory(GiB)": 41.86, + "step": 925, + "token_acc": 0.8213296398891967, + "train_speed(iter/s)": 0.24152 + }, + { + "epoch": 0.29632, + "grad_norm": 0.6928377160078811, + "learning_rate": 4.967585744631287e-06, + "loss": 0.3463733196258545, + "memory(GiB)": 41.86, + "step": 926, + "token_acc": 0.9182530795072789, + "train_speed(iter/s)": 0.241535 + }, + { + "epoch": 0.29664, + "grad_norm": 0.6782065839536974, + "learning_rate": 4.9674440417558e-06, + "loss": 0.4339632987976074, + "memory(GiB)": 41.86, + "step": 927, + "token_acc": 0.913106655042139, + "train_speed(iter/s)": 0.241544 + }, + { + "epoch": 0.29696, + "grad_norm": 0.6914247003294781, + "learning_rate": 4.967302031849706e-06, + "loss": 0.31901341676712036, + "memory(GiB)": 41.86, + "step": 928, + "token_acc": 0.9366944655041698, + "train_speed(iter/s)": 0.241551 + }, + { + "epoch": 0.29728, + "grad_norm": 0.7903881695999362, + "learning_rate": 4.9671597149306764e-06, + "loss": 0.42360836267471313, + "memory(GiB)": 41.86, + "step": 929, + "token_acc": 0.8554804804804805, + "train_speed(iter/s)": 0.241564 + }, + { + "epoch": 0.2976, + "grad_norm": 0.7492555503394457, + "learning_rate": 4.9670170910164175e-06, + "loss": 0.46021217107772827, + "memory(GiB)": 41.86, + "step": 930, + "token_acc": 0.8565543071161049, + "train_speed(iter/s)": 0.241512 + }, + { + "epoch": 0.29792, + "grad_norm": 0.6731570935279273, + "learning_rate": 4.966874160124678e-06, + "loss": 0.38581928610801697, + "memory(GiB)": 41.86, + "step": 931, + "token_acc": 0.9304123711340206, + "train_speed(iter/s)": 0.241513 + }, + { + "epoch": 0.29824, + "grad_norm": 0.65811219530057, + "learning_rate": 4.966730922273244e-06, + "loss": 0.4658651351928711, + "memory(GiB)": 41.86, + "step": 932, + "token_acc": 0.8298251872993222, + "train_speed(iter/s)": 0.241522 + }, + { + "epoch": 0.29856, + "grad_norm": 0.7093310177772169, + "learning_rate": 4.9665873774799385e-06, + "loss": 0.5289594531059265, + "memory(GiB)": 41.86, + "step": 933, + "token_acc": 0.8338162251655629, + "train_speed(iter/s)": 0.24153 + }, + { + "epoch": 0.29888, + "grad_norm": 0.6748828403599597, + "learning_rate": 4.966443525762622e-06, + "loss": 0.3499439060688019, + "memory(GiB)": 41.86, + "step": 934, + "token_acc": 0.9264825345247766, + "train_speed(iter/s)": 0.241527 + }, + { + "epoch": 0.2992, + "grad_norm": 0.6396606656757102, + "learning_rate": 4.966299367139195e-06, + "loss": 0.333096981048584, + "memory(GiB)": 41.86, + "step": 935, + "token_acc": 0.8704022988505747, + "train_speed(iter/s)": 0.241527 + }, + { + "epoch": 0.29952, + "grad_norm": 0.6875572036699754, + "learning_rate": 4.966154901627596e-06, + "loss": 0.3107609152793884, + "memory(GiB)": 41.86, + "step": 936, + "token_acc": 0.8930993218986837, + "train_speed(iter/s)": 0.241529 + }, + { + "epoch": 0.29984, + "grad_norm": 0.689030173413658, + "learning_rate": 4.966010129245801e-06, + "loss": 0.36445799469947815, + "memory(GiB)": 41.86, + "step": 937, + "token_acc": 0.9247430249632893, + "train_speed(iter/s)": 0.24153 + }, + { + "epoch": 0.30016, + "grad_norm": 0.687135331688922, + "learning_rate": 4.965865050011825e-06, + "loss": 0.36872753500938416, + "memory(GiB)": 41.86, + "step": 938, + "token_acc": 0.8692786525082388, + "train_speed(iter/s)": 0.241521 + }, + { + "epoch": 0.30048, + "grad_norm": 0.6710288075772929, + "learning_rate": 4.965719663943718e-06, + "loss": 0.35664430260658264, + "memory(GiB)": 41.86, + "step": 939, + "token_acc": 0.9474777448071217, + "train_speed(iter/s)": 0.24153 + }, + { + "epoch": 0.3008, + "grad_norm": 0.6879440183609316, + "learning_rate": 4.9655739710595744e-06, + "loss": 0.32241836190223694, + "memory(GiB)": 41.86, + "step": 940, + "token_acc": 0.9285714285714286, + "train_speed(iter/s)": 0.241525 + }, + { + "epoch": 0.30112, + "grad_norm": 0.7165970758183136, + "learning_rate": 4.96542797137752e-06, + "loss": 0.42401638627052307, + "memory(GiB)": 41.86, + "step": 941, + "token_acc": 0.8841492971400873, + "train_speed(iter/s)": 0.241538 + }, + { + "epoch": 0.30144, + "grad_norm": 0.6365745664107615, + "learning_rate": 4.965281664915724e-06, + "loss": 0.31001606583595276, + "memory(GiB)": 41.86, + "step": 942, + "token_acc": 0.9116642264519278, + "train_speed(iter/s)": 0.241539 + }, + { + "epoch": 0.30176, + "grad_norm": 0.656340432291695, + "learning_rate": 4.965135051692391e-06, + "loss": 0.3231452405452728, + "memory(GiB)": 41.86, + "step": 943, + "token_acc": 0.9170944558521561, + "train_speed(iter/s)": 0.241535 + }, + { + "epoch": 0.30208, + "grad_norm": 0.756832797324496, + "learning_rate": 4.964988131725765e-06, + "loss": 0.33907341957092285, + "memory(GiB)": 41.86, + "step": 944, + "token_acc": 0.9287203001250521, + "train_speed(iter/s)": 0.241517 + }, + { + "epoch": 0.3024, + "grad_norm": 0.6757359542610164, + "learning_rate": 4.964840905034126e-06, + "loss": 0.33235907554626465, + "memory(GiB)": 41.86, + "step": 945, + "token_acc": 0.843654540405443, + "train_speed(iter/s)": 0.241514 + }, + { + "epoch": 0.30272, + "grad_norm": 0.7048647704697871, + "learning_rate": 4.9646933716357955e-06, + "loss": 0.43321874737739563, + "memory(GiB)": 41.86, + "step": 946, + "token_acc": 0.9497939303109779, + "train_speed(iter/s)": 0.241523 + }, + { + "epoch": 0.30304, + "grad_norm": 0.719203615853158, + "learning_rate": 4.964545531549132e-06, + "loss": 0.34369271993637085, + "memory(GiB)": 41.86, + "step": 947, + "token_acc": 0.8483916083916084, + "train_speed(iter/s)": 0.241534 + }, + { + "epoch": 0.30336, + "grad_norm": 0.7014787982390394, + "learning_rate": 4.96439738479253e-06, + "loss": 0.4344375431537628, + "memory(GiB)": 41.86, + "step": 948, + "token_acc": 0.881524926686217, + "train_speed(iter/s)": 0.241545 + }, + { + "epoch": 0.30368, + "grad_norm": 0.6472731865624812, + "learning_rate": 4.964248931384424e-06, + "loss": 0.39773064851760864, + "memory(GiB)": 41.86, + "step": 949, + "token_acc": 0.8263301088270859, + "train_speed(iter/s)": 0.241549 + }, + { + "epoch": 0.304, + "grad_norm": 0.64673083388736, + "learning_rate": 4.964100171343287e-06, + "loss": 0.3737943172454834, + "memory(GiB)": 41.86, + "step": 950, + "token_acc": 0.8646560319042872, + "train_speed(iter/s)": 0.241556 + }, + { + "epoch": 0.30432, + "grad_norm": 0.676158685316066, + "learning_rate": 4.963951104687629e-06, + "loss": 0.4138943552970886, + "memory(GiB)": 41.86, + "step": 951, + "token_acc": 0.9096804075961094, + "train_speed(iter/s)": 0.241565 + }, + { + "epoch": 0.30464, + "grad_norm": 0.6766321710285448, + "learning_rate": 4.9638017314359995e-06, + "loss": 0.35969242453575134, + "memory(GiB)": 41.86, + "step": 952, + "token_acc": 0.8726790450928382, + "train_speed(iter/s)": 0.241569 + }, + { + "epoch": 0.30496, + "grad_norm": 0.6429303444148192, + "learning_rate": 4.963652051606985e-06, + "loss": 0.34353214502334595, + "memory(GiB)": 41.86, + "step": 953, + "token_acc": 0.9011948529411765, + "train_speed(iter/s)": 0.241554 + }, + { + "epoch": 0.30528, + "grad_norm": 0.6376362337856939, + "learning_rate": 4.9635020652192115e-06, + "loss": 0.3803999423980713, + "memory(GiB)": 41.86, + "step": 954, + "token_acc": 0.935048231511254, + "train_speed(iter/s)": 0.241559 + }, + { + "epoch": 0.3056, + "grad_norm": 0.6515075835660972, + "learning_rate": 4.96335177229134e-06, + "loss": 0.31367772817611694, + "memory(GiB)": 41.86, + "step": 955, + "token_acc": 0.9181309904153354, + "train_speed(iter/s)": 0.241575 + }, + { + "epoch": 0.30592, + "grad_norm": 0.7959635426232597, + "learning_rate": 4.963201172842073e-06, + "loss": 0.4915127456188202, + "memory(GiB)": 41.86, + "step": 956, + "token_acc": 0.8430507406240151, + "train_speed(iter/s)": 0.24156 + }, + { + "epoch": 0.30624, + "grad_norm": 0.7154725681158437, + "learning_rate": 4.963050266890152e-06, + "loss": 0.40798258781433105, + "memory(GiB)": 41.86, + "step": 957, + "token_acc": 0.883199079401611, + "train_speed(iter/s)": 0.241565 + }, + { + "epoch": 0.30656, + "grad_norm": 0.6467815122017245, + "learning_rate": 4.962899054454352e-06, + "loss": 0.39871275424957275, + "memory(GiB)": 41.86, + "step": 958, + "token_acc": 0.9438382541720154, + "train_speed(iter/s)": 0.241567 + }, + { + "epoch": 0.30688, + "grad_norm": 0.7028902373577559, + "learning_rate": 4.9627475355534895e-06, + "loss": 0.3961467444896698, + "memory(GiB)": 41.86, + "step": 959, + "token_acc": 0.8863779033270559, + "train_speed(iter/s)": 0.241569 + }, + { + "epoch": 0.3072, + "grad_norm": 0.6560061641040955, + "learning_rate": 4.962595710206418e-06, + "loss": 0.34120070934295654, + "memory(GiB)": 41.86, + "step": 960, + "token_acc": 0.8376825100963032, + "train_speed(iter/s)": 0.241576 + }, + { + "epoch": 0.30752, + "grad_norm": 0.6324110110528821, + "learning_rate": 4.9624435784320304e-06, + "loss": 0.4010915160179138, + "memory(GiB)": 41.86, + "step": 961, + "token_acc": 0.9414990859232175, + "train_speed(iter/s)": 0.241565 + }, + { + "epoch": 0.30784, + "grad_norm": 0.7237947756103527, + "learning_rate": 4.962291140249257e-06, + "loss": 0.47616103291511536, + "memory(GiB)": 41.86, + "step": 962, + "token_acc": 0.9331405854716299, + "train_speed(iter/s)": 0.241552 + }, + { + "epoch": 0.30816, + "grad_norm": 0.7424256205140244, + "learning_rate": 4.9621383956770656e-06, + "loss": 0.4085035026073456, + "memory(GiB)": 41.86, + "step": 963, + "token_acc": 0.8971126474176494, + "train_speed(iter/s)": 0.241566 + }, + { + "epoch": 0.30848, + "grad_norm": 0.6873003395077195, + "learning_rate": 4.961985344734461e-06, + "loss": 0.3208773136138916, + "memory(GiB)": 41.86, + "step": 964, + "token_acc": 0.8979676482787226, + "train_speed(iter/s)": 0.241565 + }, + { + "epoch": 0.3088, + "grad_norm": 0.7669685826075835, + "learning_rate": 4.961831987440491e-06, + "loss": 0.35845038294792175, + "memory(GiB)": 41.86, + "step": 965, + "token_acc": 0.9046849757673667, + "train_speed(iter/s)": 0.241576 + }, + { + "epoch": 0.30912, + "grad_norm": 0.7117847514460486, + "learning_rate": 4.9616783238142355e-06, + "loss": 0.3642219007015228, + "memory(GiB)": 41.86, + "step": 966, + "token_acc": 0.8754716981132076, + "train_speed(iter/s)": 0.241588 + }, + { + "epoch": 0.30944, + "grad_norm": 0.6974943386877127, + "learning_rate": 4.961524353874817e-06, + "loss": 0.3851352632045746, + "memory(GiB)": 41.86, + "step": 967, + "token_acc": 0.8839706652697747, + "train_speed(iter/s)": 0.241579 + }, + { + "epoch": 0.30976, + "grad_norm": 0.6490229758901854, + "learning_rate": 4.961370077641393e-06, + "loss": 0.46044978499412537, + "memory(GiB)": 41.86, + "step": 968, + "token_acc": 0.8507890961262554, + "train_speed(iter/s)": 0.241584 + }, + { + "epoch": 0.31008, + "grad_norm": 0.6443416318287054, + "learning_rate": 4.961215495133163e-06, + "loss": 0.30034536123275757, + "memory(GiB)": 41.86, + "step": 969, + "token_acc": 0.9131075110456554, + "train_speed(iter/s)": 0.241593 + }, + { + "epoch": 0.3104, + "grad_norm": 0.6726885120571315, + "learning_rate": 4.961060606369358e-06, + "loss": 0.3777090907096863, + "memory(GiB)": 41.86, + "step": 970, + "token_acc": 0.794488001900689, + "train_speed(iter/s)": 0.241607 + }, + { + "epoch": 0.31072, + "grad_norm": 0.7198028288028475, + "learning_rate": 4.960905411369254e-06, + "loss": 0.40755361318588257, + "memory(GiB)": 41.86, + "step": 971, + "token_acc": 0.8454728370221328, + "train_speed(iter/s)": 0.241615 + }, + { + "epoch": 0.31104, + "grad_norm": 0.7751462576613326, + "learning_rate": 4.960749910152163e-06, + "loss": 0.371136873960495, + "memory(GiB)": 41.86, + "step": 972, + "token_acc": 0.8503206209922376, + "train_speed(iter/s)": 0.241626 + }, + { + "epoch": 0.31136, + "grad_norm": 0.7093532516024367, + "learning_rate": 4.960594102737433e-06, + "loss": 0.42763015627861023, + "memory(GiB)": 41.86, + "step": 973, + "token_acc": 0.8802768166089966, + "train_speed(iter/s)": 0.241625 + }, + { + "epoch": 0.31168, + "grad_norm": 0.690143372001636, + "learning_rate": 4.960437989144452e-06, + "loss": 0.4157477021217346, + "memory(GiB)": 41.86, + "step": 974, + "token_acc": 0.9145597210113339, + "train_speed(iter/s)": 0.241638 + }, + { + "epoch": 0.312, + "grad_norm": 0.651656437287309, + "learning_rate": 4.960281569392646e-06, + "loss": 0.3468869924545288, + "memory(GiB)": 41.86, + "step": 975, + "token_acc": 0.8351550960118168, + "train_speed(iter/s)": 0.24154 + }, + { + "epoch": 0.31232, + "grad_norm": 0.655364140860849, + "learning_rate": 4.960124843501476e-06, + "loss": 0.36141568422317505, + "memory(GiB)": 41.86, + "step": 976, + "token_acc": 0.9117647058823529, + "train_speed(iter/s)": 0.241527 + }, + { + "epoch": 0.31264, + "grad_norm": 0.9610000105611087, + "learning_rate": 4.9599678114904475e-06, + "loss": 0.418308824300766, + "memory(GiB)": 41.86, + "step": 977, + "token_acc": 0.9174652241112828, + "train_speed(iter/s)": 0.241527 + }, + { + "epoch": 0.31296, + "grad_norm": 0.7293321997535062, + "learning_rate": 4.959810473379099e-06, + "loss": 0.426521360874176, + "memory(GiB)": 41.86, + "step": 978, + "token_acc": 0.8969750109601052, + "train_speed(iter/s)": 0.241537 + }, + { + "epoch": 0.31328, + "grad_norm": 0.7086196763606638, + "learning_rate": 4.959652829187008e-06, + "loss": 0.44412344694137573, + "memory(GiB)": 41.86, + "step": 979, + "token_acc": 0.8377947737412365, + "train_speed(iter/s)": 0.241542 + }, + { + "epoch": 0.3136, + "grad_norm": 0.6790496316669521, + "learning_rate": 4.959494878933792e-06, + "loss": 0.3211071193218231, + "memory(GiB)": 41.86, + "step": 980, + "token_acc": 0.9030318870883429, + "train_speed(iter/s)": 0.241545 + }, + { + "epoch": 0.31392, + "grad_norm": 0.7595760736020764, + "learning_rate": 4.959336622639103e-06, + "loss": 0.3688046336174011, + "memory(GiB)": 41.86, + "step": 981, + "token_acc": 0.9394338380513496, + "train_speed(iter/s)": 0.241551 + }, + { + "epoch": 0.31424, + "grad_norm": 0.6593257106960728, + "learning_rate": 4.959178060322634e-06, + "loss": 0.3620453476905823, + "memory(GiB)": 41.86, + "step": 982, + "token_acc": 0.9118501775912173, + "train_speed(iter/s)": 0.241551 + }, + { + "epoch": 0.31456, + "grad_norm": 0.6822043358664656, + "learning_rate": 4.959019192004117e-06, + "loss": 0.407000333070755, + "memory(GiB)": 41.86, + "step": 983, + "token_acc": 0.8108839446782923, + "train_speed(iter/s)": 0.241545 + }, + { + "epoch": 0.31488, + "grad_norm": 0.8137166998431616, + "learning_rate": 4.958860017703319e-06, + "loss": 0.3293525278568268, + "memory(GiB)": 41.86, + "step": 984, + "token_acc": 0.8594428826986966, + "train_speed(iter/s)": 0.241555 + }, + { + "epoch": 0.3152, + "grad_norm": 0.6483660618510628, + "learning_rate": 4.958700537440046e-06, + "loss": 0.36643415689468384, + "memory(GiB)": 41.86, + "step": 985, + "token_acc": 0.8746982134234669, + "train_speed(iter/s)": 0.241564 + }, + { + "epoch": 0.31552, + "grad_norm": 0.7314331263493626, + "learning_rate": 4.958540751234143e-06, + "loss": 0.4243761897087097, + "memory(GiB)": 41.86, + "step": 986, + "token_acc": 0.8707196029776675, + "train_speed(iter/s)": 0.241575 + }, + { + "epoch": 0.31584, + "grad_norm": 0.7155088680321686, + "learning_rate": 4.958380659105494e-06, + "loss": 0.42605161666870117, + "memory(GiB)": 41.86, + "step": 987, + "token_acc": 0.8307178631051753, + "train_speed(iter/s)": 0.241575 + }, + { + "epoch": 0.31616, + "grad_norm": 0.7014508103615489, + "learning_rate": 4.958220261074018e-06, + "loss": 0.4260730743408203, + "memory(GiB)": 41.86, + "step": 988, + "token_acc": 0.9423195558297347, + "train_speed(iter/s)": 0.241572 + }, + { + "epoch": 0.31648, + "grad_norm": 0.747960960960738, + "learning_rate": 4.958059557159674e-06, + "loss": 0.4288990795612335, + "memory(GiB)": 41.86, + "step": 989, + "token_acc": 0.8430858806404657, + "train_speed(iter/s)": 0.241573 + }, + { + "epoch": 0.3168, + "grad_norm": 0.6511621063111808, + "learning_rate": 4.9578985473824594e-06, + "loss": 0.3697865903377533, + "memory(GiB)": 41.86, + "step": 990, + "token_acc": 0.937160811196801, + "train_speed(iter/s)": 0.241582 + }, + { + "epoch": 0.31712, + "grad_norm": 0.6803491779852046, + "learning_rate": 4.9577372317624085e-06, + "loss": 0.4827129542827606, + "memory(GiB)": 41.86, + "step": 991, + "token_acc": 0.8279078999603017, + "train_speed(iter/s)": 0.241575 + }, + { + "epoch": 0.31744, + "grad_norm": 0.6610022822145006, + "learning_rate": 4.957575610319594e-06, + "loss": 0.48004278540611267, + "memory(GiB)": 41.86, + "step": 992, + "token_acc": 0.8658008658008658, + "train_speed(iter/s)": 0.241572 + }, + { + "epoch": 0.31776, + "grad_norm": 0.75144427607101, + "learning_rate": 4.957413683074128e-06, + "loss": 0.41778141260147095, + "memory(GiB)": 41.86, + "step": 993, + "token_acc": 0.8972292191435768, + "train_speed(iter/s)": 0.241574 + }, + { + "epoch": 0.31808, + "grad_norm": 0.6556340506850887, + "learning_rate": 4.957251450046159e-06, + "loss": 0.38153591752052307, + "memory(GiB)": 41.86, + "step": 994, + "token_acc": 0.9180639038279026, + "train_speed(iter/s)": 0.24158 + }, + { + "epoch": 0.3184, + "grad_norm": 0.6735728046899795, + "learning_rate": 4.957088911255874e-06, + "loss": 0.3668578565120697, + "memory(GiB)": 41.86, + "step": 995, + "token_acc": 0.8867084766336233, + "train_speed(iter/s)": 0.241584 + }, + { + "epoch": 0.31872, + "grad_norm": 0.6554964821452093, + "learning_rate": 4.956926066723498e-06, + "loss": 0.41147565841674805, + "memory(GiB)": 41.86, + "step": 996, + "token_acc": 0.8859154929577465, + "train_speed(iter/s)": 0.24159 + }, + { + "epoch": 0.31904, + "grad_norm": 0.6709924681261463, + "learning_rate": 4.956762916469294e-06, + "loss": 0.25895392894744873, + "memory(GiB)": 41.86, + "step": 997, + "token_acc": 0.9096712416214491, + "train_speed(iter/s)": 0.241595 + }, + { + "epoch": 0.31936, + "grad_norm": 0.6725725911167624, + "learning_rate": 4.956599460513564e-06, + "loss": 0.4014821946620941, + "memory(GiB)": 41.86, + "step": 998, + "token_acc": 0.8690749235474006, + "train_speed(iter/s)": 0.241582 + }, + { + "epoch": 0.31968, + "grad_norm": 0.6404288581638261, + "learning_rate": 4.956435698876646e-06, + "loss": 0.42094728350639343, + "memory(GiB)": 41.86, + "step": 999, + "token_acc": 0.7922720247295209, + "train_speed(iter/s)": 0.241582 + }, + { + "epoch": 0.32, + "grad_norm": 0.6657477773913097, + "learning_rate": 4.956271631578919e-06, + "loss": 0.3939965069293976, + "memory(GiB)": 41.86, + "step": 1000, + "token_acc": 0.8648879402347919, + "train_speed(iter/s)": 0.241593 + }, + { + "epoch": 0.32032, + "grad_norm": 0.9523643549253229, + "learning_rate": 4.956107258640796e-06, + "loss": 0.38130831718444824, + "memory(GiB)": 41.86, + "step": 1001, + "token_acc": 0.8752895752895753, + "train_speed(iter/s)": 0.2416 + }, + { + "epoch": 0.32064, + "grad_norm": 0.6626284349945473, + "learning_rate": 4.955942580082733e-06, + "loss": 0.3861386477947235, + "memory(GiB)": 41.86, + "step": 1002, + "token_acc": 0.8783173348390739, + "train_speed(iter/s)": 0.241607 + }, + { + "epoch": 0.32096, + "grad_norm": 0.6953711512420279, + "learning_rate": 4.955777595925219e-06, + "loss": 0.38329997658729553, + "memory(GiB)": 41.86, + "step": 1003, + "token_acc": 0.8404473864414517, + "train_speed(iter/s)": 0.241617 + }, + { + "epoch": 0.32128, + "grad_norm": 0.6729126241928775, + "learning_rate": 4.955612306188786e-06, + "loss": 0.34899717569351196, + "memory(GiB)": 41.86, + "step": 1004, + "token_acc": 0.921830985915493, + "train_speed(iter/s)": 0.241618 + }, + { + "epoch": 0.3216, + "grad_norm": 0.6495551002485624, + "learning_rate": 4.9554467108939995e-06, + "loss": 0.3741927146911621, + "memory(GiB)": 41.86, + "step": 1005, + "token_acc": 0.9096349491322562, + "train_speed(iter/s)": 0.241614 + }, + { + "epoch": 0.32192, + "grad_norm": 0.630422037928398, + "learning_rate": 4.955280810061466e-06, + "loss": 0.430209755897522, + "memory(GiB)": 41.86, + "step": 1006, + "token_acc": 0.9273834621691263, + "train_speed(iter/s)": 0.241621 + }, + { + "epoch": 0.32224, + "grad_norm": 0.6689575792558569, + "learning_rate": 4.955114603711827e-06, + "loss": 0.2834753394126892, + "memory(GiB)": 41.86, + "step": 1007, + "token_acc": 0.9242048274306339, + "train_speed(iter/s)": 0.241634 + }, + { + "epoch": 0.32256, + "grad_norm": 0.6245742544905631, + "learning_rate": 4.954948091865767e-06, + "loss": 0.35588037967681885, + "memory(GiB)": 41.86, + "step": 1008, + "token_acc": 0.9187468160978095, + "train_speed(iter/s)": 0.241644 + }, + { + "epoch": 0.32288, + "grad_norm": 0.7449460552439107, + "learning_rate": 4.954781274544003e-06, + "loss": 0.4113908112049103, + "memory(GiB)": 41.86, + "step": 1009, + "token_acc": 0.8832929782082325, + "train_speed(iter/s)": 0.241655 + }, + { + "epoch": 0.3232, + "grad_norm": 0.7063344463900884, + "learning_rate": 4.9546141517672926e-06, + "loss": 0.4783664047718048, + "memory(GiB)": 41.86, + "step": 1010, + "token_acc": 0.9095955590800952, + "train_speed(iter/s)": 0.241656 + }, + { + "epoch": 0.32352, + "grad_norm": 0.613640136360638, + "learning_rate": 4.954446723556434e-06, + "loss": 0.3812958598136902, + "memory(GiB)": 41.86, + "step": 1011, + "token_acc": 0.916202270381837, + "train_speed(iter/s)": 0.241645 + }, + { + "epoch": 0.32384, + "grad_norm": 0.6330400106245359, + "learning_rate": 4.954278989932259e-06, + "loss": 0.33548861742019653, + "memory(GiB)": 41.86, + "step": 1012, + "token_acc": 0.8412897822445561, + "train_speed(iter/s)": 0.241654 + }, + { + "epoch": 0.32416, + "grad_norm": 0.7211298064966235, + "learning_rate": 4.954110950915637e-06, + "loss": 0.3335844576358795, + "memory(GiB)": 41.86, + "step": 1013, + "token_acc": 0.9252901353965184, + "train_speed(iter/s)": 0.241659 + }, + { + "epoch": 0.32448, + "grad_norm": 0.7142141715567591, + "learning_rate": 4.953942606527481e-06, + "loss": 0.33803191781044006, + "memory(GiB)": 41.86, + "step": 1014, + "token_acc": 0.8642480983031012, + "train_speed(iter/s)": 0.241668 + }, + { + "epoch": 0.3248, + "grad_norm": 0.7289077881922351, + "learning_rate": 4.9537739567887375e-06, + "loss": 0.3617907166481018, + "memory(GiB)": 41.86, + "step": 1015, + "token_acc": 0.881083202511774, + "train_speed(iter/s)": 0.241674 + }, + { + "epoch": 0.32512, + "grad_norm": 0.7729384625657163, + "learning_rate": 4.953605001720391e-06, + "loss": 0.44356679916381836, + "memory(GiB)": 41.86, + "step": 1016, + "token_acc": 0.9472751439964555, + "train_speed(iter/s)": 0.241671 + }, + { + "epoch": 0.32544, + "grad_norm": 0.6755624930071814, + "learning_rate": 4.953435741343467e-06, + "loss": 0.38764992356300354, + "memory(GiB)": 41.86, + "step": 1017, + "token_acc": 0.9303857008466604, + "train_speed(iter/s)": 0.241672 + }, + { + "epoch": 0.32576, + "grad_norm": 0.7123496794990265, + "learning_rate": 4.953266175679023e-06, + "loss": 0.3153836727142334, + "memory(GiB)": 41.86, + "step": 1018, + "token_acc": 0.885049365303244, + "train_speed(iter/s)": 0.241677 + }, + { + "epoch": 0.32608, + "grad_norm": 0.6504895125695942, + "learning_rate": 4.953096304748164e-06, + "loss": 0.34001272916793823, + "memory(GiB)": 41.86, + "step": 1019, + "token_acc": 0.856120826709062, + "train_speed(iter/s)": 0.24168 + }, + { + "epoch": 0.3264, + "grad_norm": 0.6760416395826648, + "learning_rate": 4.952926128572023e-06, + "loss": 0.39267638325691223, + "memory(GiB)": 41.86, + "step": 1020, + "token_acc": 0.8095507205425262, + "train_speed(iter/s)": 0.241669 + }, + { + "epoch": 0.32672, + "grad_norm": 0.6528138921995089, + "learning_rate": 4.952755647171778e-06, + "loss": 0.3707922697067261, + "memory(GiB)": 41.86, + "step": 1021, + "token_acc": 0.8345665961945031, + "train_speed(iter/s)": 0.241674 + }, + { + "epoch": 0.32704, + "grad_norm": 0.7022747455094215, + "learning_rate": 4.952584860568642e-06, + "loss": 0.42911237478256226, + "memory(GiB)": 41.86, + "step": 1022, + "token_acc": 0.8903796745646588, + "train_speed(iter/s)": 0.241664 + }, + { + "epoch": 0.32736, + "grad_norm": 0.6609118834321253, + "learning_rate": 4.952413768783866e-06, + "loss": 0.38422489166259766, + "memory(GiB)": 41.86, + "step": 1023, + "token_acc": 0.9246753246753247, + "train_speed(iter/s)": 0.241669 + }, + { + "epoch": 0.32768, + "grad_norm": 0.6820913683930098, + "learning_rate": 4.952242371838738e-06, + "loss": 0.4425528943538666, + "memory(GiB)": 41.86, + "step": 1024, + "token_acc": 0.9285481239804242, + "train_speed(iter/s)": 0.241679 + }, + { + "epoch": 0.328, + "grad_norm": 0.7217433850363421, + "learning_rate": 4.952070669754588e-06, + "loss": 0.34991931915283203, + "memory(GiB)": 41.86, + "step": 1025, + "token_acc": 0.947928207400842, + "train_speed(iter/s)": 0.241682 + }, + { + "epoch": 0.32832, + "grad_norm": 0.6798058554343708, + "learning_rate": 4.951898662552781e-06, + "loss": 0.4296380281448364, + "memory(GiB)": 41.86, + "step": 1026, + "token_acc": 0.8853333333333333, + "train_speed(iter/s)": 0.241688 + }, + { + "epoch": 0.32864, + "grad_norm": 0.7145131275541953, + "learning_rate": 4.9517263502547185e-06, + "loss": 0.3679383099079132, + "memory(GiB)": 41.86, + "step": 1027, + "token_acc": 0.9034730538922155, + "train_speed(iter/s)": 0.241694 + }, + { + "epoch": 0.32896, + "grad_norm": 0.6912580432476901, + "learning_rate": 4.951553732881843e-06, + "loss": 0.41160011291503906, + "memory(GiB)": 41.86, + "step": 1028, + "token_acc": 0.8494199535962877, + "train_speed(iter/s)": 0.241697 + }, + { + "epoch": 0.32928, + "grad_norm": 0.6707161560748471, + "learning_rate": 4.951380810455634e-06, + "loss": 0.45262056589126587, + "memory(GiB)": 41.86, + "step": 1029, + "token_acc": 0.8594429939077458, + "train_speed(iter/s)": 0.2417 + }, + { + "epoch": 0.3296, + "grad_norm": 0.6504642640004275, + "learning_rate": 4.951207582997607e-06, + "loss": 0.3786610960960388, + "memory(GiB)": 41.86, + "step": 1030, + "token_acc": 0.8936361592084552, + "train_speed(iter/s)": 0.241693 + }, + { + "epoch": 0.32992, + "grad_norm": 0.7446991680152788, + "learning_rate": 4.9510340505293195e-06, + "loss": 0.3011898398399353, + "memory(GiB)": 41.86, + "step": 1031, + "token_acc": 0.8705932932072227, + "train_speed(iter/s)": 0.241699 + }, + { + "epoch": 0.33024, + "grad_norm": 0.640116351049959, + "learning_rate": 4.950860213072364e-06, + "loss": 0.3395574986934662, + "memory(GiB)": 41.86, + "step": 1032, + "token_acc": 0.8471194077888639, + "train_speed(iter/s)": 0.241697 + }, + { + "epoch": 0.33056, + "grad_norm": 0.6531998010477197, + "learning_rate": 4.95068607064837e-06, + "loss": 0.2554836571216583, + "memory(GiB)": 41.86, + "step": 1033, + "token_acc": 0.9306418219461697, + "train_speed(iter/s)": 0.24171 + }, + { + "epoch": 0.33088, + "grad_norm": 0.584187152964851, + "learning_rate": 4.950511623279007e-06, + "loss": 0.3896667957305908, + "memory(GiB)": 41.86, + "step": 1034, + "token_acc": 0.8530228583635975, + "train_speed(iter/s)": 0.241708 + }, + { + "epoch": 0.3312, + "grad_norm": 0.6799782142300212, + "learning_rate": 4.9503368709859844e-06, + "loss": 0.448594331741333, + "memory(GiB)": 41.86, + "step": 1035, + "token_acc": 0.9213641488162345, + "train_speed(iter/s)": 0.241712 + }, + { + "epoch": 0.33152, + "grad_norm": 0.7204365619479748, + "learning_rate": 4.950161813791044e-06, + "loss": 0.47708818316459656, + "memory(GiB)": 41.86, + "step": 1036, + "token_acc": 0.8646788990825688, + "train_speed(iter/s)": 0.241714 + }, + { + "epoch": 0.33184, + "grad_norm": 0.6912233665096669, + "learning_rate": 4.94998645171597e-06, + "loss": 0.46789658069610596, + "memory(GiB)": 41.86, + "step": 1037, + "token_acc": 0.8602329450915142, + "train_speed(iter/s)": 0.241722 + }, + { + "epoch": 0.33216, + "grad_norm": 0.6776789809884591, + "learning_rate": 4.949810784782583e-06, + "loss": 0.38782215118408203, + "memory(GiB)": 41.86, + "step": 1038, + "token_acc": 0.887836853605244, + "train_speed(iter/s)": 0.241732 + }, + { + "epoch": 0.33248, + "grad_norm": 0.7189582121687214, + "learning_rate": 4.949634813012741e-06, + "loss": 0.46340587735176086, + "memory(GiB)": 41.86, + "step": 1039, + "token_acc": 0.9068203650336215, + "train_speed(iter/s)": 0.241734 + }, + { + "epoch": 0.3328, + "grad_norm": 0.7368112324181394, + "learning_rate": 4.949458536428343e-06, + "loss": 0.39033952355384827, + "memory(GiB)": 41.86, + "step": 1040, + "token_acc": 0.8238074774387624, + "train_speed(iter/s)": 0.241738 + }, + { + "epoch": 0.33312, + "grad_norm": 0.6507799204174719, + "learning_rate": 4.94928195505132e-06, + "loss": 0.44069159030914307, + "memory(GiB)": 41.86, + "step": 1041, + "token_acc": 0.8488587291795188, + "train_speed(iter/s)": 0.241727 + }, + { + "epoch": 0.33344, + "grad_norm": 0.6736124802625697, + "learning_rate": 4.949105068903648e-06, + "loss": 0.4081144332885742, + "memory(GiB)": 41.86, + "step": 1042, + "token_acc": 0.9478527607361963, + "train_speed(iter/s)": 0.241736 + }, + { + "epoch": 0.33376, + "grad_norm": 0.7206568869755781, + "learning_rate": 4.948927878007334e-06, + "loss": 0.39775562286376953, + "memory(GiB)": 41.86, + "step": 1043, + "token_acc": 0.8473118279569892, + "train_speed(iter/s)": 0.241742 + }, + { + "epoch": 0.33408, + "grad_norm": 0.6843934866901329, + "learning_rate": 4.94875038238443e-06, + "loss": 0.41933223605155945, + "memory(GiB)": 41.86, + "step": 1044, + "token_acc": 0.9016233766233767, + "train_speed(iter/s)": 0.241749 + }, + { + "epoch": 0.3344, + "grad_norm": 0.702910893438572, + "learning_rate": 4.94857258205702e-06, + "loss": 0.34401804208755493, + "memory(GiB)": 41.86, + "step": 1045, + "token_acc": 0.878168202764977, + "train_speed(iter/s)": 0.241758 + }, + { + "epoch": 0.33472, + "grad_norm": 0.6569564029468583, + "learning_rate": 4.948394477047228e-06, + "loss": 0.3540724515914917, + "memory(GiB)": 41.86, + "step": 1046, + "token_acc": 0.8186228774025005, + "train_speed(iter/s)": 0.241747 + }, + { + "epoch": 0.33504, + "grad_norm": 0.6450039506662548, + "learning_rate": 4.948216067377216e-06, + "loss": 0.30413153767585754, + "memory(GiB)": 41.86, + "step": 1047, + "token_acc": 0.9082875098193244, + "train_speed(iter/s)": 0.241757 + }, + { + "epoch": 0.33536, + "grad_norm": 0.7203608030534339, + "learning_rate": 4.948037353069186e-06, + "loss": 0.36445608735084534, + "memory(GiB)": 41.86, + "step": 1048, + "token_acc": 0.9238171611868484, + "train_speed(iter/s)": 0.24176 + }, + { + "epoch": 0.33568, + "grad_norm": 0.8603871751478659, + "learning_rate": 4.947858334145373e-06, + "loss": 0.39330965280532837, + "memory(GiB)": 41.86, + "step": 1049, + "token_acc": 0.9191499755740108, + "train_speed(iter/s)": 0.241755 + }, + { + "epoch": 0.336, + "grad_norm": 0.6555291499642897, + "learning_rate": 4.947679010628056e-06, + "loss": 0.34528446197509766, + "memory(GiB)": 41.86, + "step": 1050, + "token_acc": 0.9535353535353536, + "train_speed(iter/s)": 0.241753 + }, + { + "epoch": 0.33632, + "grad_norm": 0.6248073650451396, + "learning_rate": 4.947499382539547e-06, + "loss": 0.41887032985687256, + "memory(GiB)": 41.86, + "step": 1051, + "token_acc": 0.8956383190599482, + "train_speed(iter/s)": 0.241748 + }, + { + "epoch": 0.33664, + "grad_norm": 0.670933716640286, + "learning_rate": 4.947319449902196e-06, + "loss": 0.434817910194397, + "memory(GiB)": 41.86, + "step": 1052, + "token_acc": 0.8432264241263763, + "train_speed(iter/s)": 0.241748 + }, + { + "epoch": 0.33696, + "grad_norm": 0.6909867137645899, + "learning_rate": 4.947139212738395e-06, + "loss": 0.3792175352573395, + "memory(GiB)": 41.86, + "step": 1053, + "token_acc": 0.8768303186907838, + "train_speed(iter/s)": 0.241746 + }, + { + "epoch": 0.33728, + "grad_norm": 0.6749176047375498, + "learning_rate": 4.9469586710705705e-06, + "loss": 0.39573103189468384, + "memory(GiB)": 41.86, + "step": 1054, + "token_acc": 0.8690341976700489, + "train_speed(iter/s)": 0.241737 + }, + { + "epoch": 0.3376, + "grad_norm": 0.6921414874815627, + "learning_rate": 4.946777824921187e-06, + "loss": 0.3517976999282837, + "memory(GiB)": 41.86, + "step": 1055, + "token_acc": 0.8405588658311075, + "train_speed(iter/s)": 0.241747 + }, + { + "epoch": 0.33792, + "grad_norm": 0.6751923323476415, + "learning_rate": 4.94659667431275e-06, + "loss": 0.38809794187545776, + "memory(GiB)": 41.86, + "step": 1056, + "token_acc": 0.8897378017016844, + "train_speed(iter/s)": 0.241748 + }, + { + "epoch": 0.33824, + "grad_norm": 0.6895372935242522, + "learning_rate": 4.946415219267798e-06, + "loss": 0.4410780668258667, + "memory(GiB)": 41.86, + "step": 1057, + "token_acc": 0.9129587528418318, + "train_speed(iter/s)": 0.241749 + }, + { + "epoch": 0.33856, + "grad_norm": 0.6998110009311518, + "learning_rate": 4.946233459808911e-06, + "loss": 0.3271169066429138, + "memory(GiB)": 41.86, + "step": 1058, + "token_acc": 0.9075268817204301, + "train_speed(iter/s)": 0.241757 + }, + { + "epoch": 0.33888, + "grad_norm": 0.6698930835383077, + "learning_rate": 4.946051395958706e-06, + "loss": 0.42115816473960876, + "memory(GiB)": 41.86, + "step": 1059, + "token_acc": 0.9074029418857005, + "train_speed(iter/s)": 0.241747 + }, + { + "epoch": 0.3392, + "grad_norm": 0.6878599460607422, + "learning_rate": 4.945869027739837e-06, + "loss": 0.3501240313053131, + "memory(GiB)": 41.86, + "step": 1060, + "token_acc": 0.9381818181818182, + "train_speed(iter/s)": 0.241745 + }, + { + "epoch": 0.33952, + "grad_norm": 0.6709027612428627, + "learning_rate": 4.9456863551749975e-06, + "loss": 0.4142116606235504, + "memory(GiB)": 41.86, + "step": 1061, + "token_acc": 0.9436201780415431, + "train_speed(iter/s)": 0.241746 + }, + { + "epoch": 0.33984, + "grad_norm": 0.7631310435433923, + "learning_rate": 4.945503378286917e-06, + "loss": 0.37564173340797424, + "memory(GiB)": 41.86, + "step": 1062, + "token_acc": 0.918918918918919, + "train_speed(iter/s)": 0.241735 + }, + { + "epoch": 0.34016, + "grad_norm": 0.6308933910409932, + "learning_rate": 4.945320097098364e-06, + "loss": 0.43829452991485596, + "memory(GiB)": 41.86, + "step": 1063, + "token_acc": 0.8815922920892495, + "train_speed(iter/s)": 0.24173 + }, + { + "epoch": 0.34048, + "grad_norm": 0.6795472278415157, + "learning_rate": 4.945136511632145e-06, + "loss": 0.47781962156295776, + "memory(GiB)": 41.86, + "step": 1064, + "token_acc": 0.8337078651685393, + "train_speed(iter/s)": 0.241731 + }, + { + "epoch": 0.3408, + "grad_norm": 0.6251405069931333, + "learning_rate": 4.944952621911104e-06, + "loss": 0.3921976685523987, + "memory(GiB)": 41.86, + "step": 1065, + "token_acc": 0.7466933867735471, + "train_speed(iter/s)": 0.241719 + }, + { + "epoch": 0.34112, + "grad_norm": 0.7135380332662487, + "learning_rate": 4.9447684279581234e-06, + "loss": 0.4389476478099823, + "memory(GiB)": 41.86, + "step": 1066, + "token_acc": 0.9294920394238059, + "train_speed(iter/s)": 0.241723 + }, + { + "epoch": 0.34144, + "grad_norm": 0.6776616512792416, + "learning_rate": 4.944583929796122e-06, + "loss": 0.35050854086875916, + "memory(GiB)": 41.86, + "step": 1067, + "token_acc": 0.9236155315085932, + "train_speed(iter/s)": 0.241725 + }, + { + "epoch": 0.34176, + "grad_norm": 0.6389345099016255, + "learning_rate": 4.944399127448057e-06, + "loss": 0.3605591356754303, + "memory(GiB)": 41.86, + "step": 1068, + "token_acc": 0.883224152663059, + "train_speed(iter/s)": 0.241735 + }, + { + "epoch": 0.34208, + "grad_norm": 0.6810958361622542, + "learning_rate": 4.944214020936926e-06, + "loss": 0.3437727391719818, + "memory(GiB)": 41.86, + "step": 1069, + "token_acc": 0.8635700389105059, + "train_speed(iter/s)": 0.241745 + }, + { + "epoch": 0.3424, + "grad_norm": 0.7030062331761373, + "learning_rate": 4.944028610285759e-06, + "loss": 0.41491276025772095, + "memory(GiB)": 41.86, + "step": 1070, + "token_acc": 0.9341619079610346, + "train_speed(iter/s)": 0.241748 + }, + { + "epoch": 0.34272, + "grad_norm": 0.7076725174562205, + "learning_rate": 4.943842895517631e-06, + "loss": 0.37624073028564453, + "memory(GiB)": 41.86, + "step": 1071, + "token_acc": 0.9047003018542475, + "train_speed(iter/s)": 0.241745 + }, + { + "epoch": 0.34304, + "grad_norm": 0.6771166164612088, + "learning_rate": 4.943656876655648e-06, + "loss": 0.3783903121948242, + "memory(GiB)": 41.86, + "step": 1072, + "token_acc": 0.8821165438713998, + "train_speed(iter/s)": 0.241752 + }, + { + "epoch": 0.34336, + "grad_norm": 0.703462793337863, + "learning_rate": 4.9434705537229574e-06, + "loss": 0.3551523685455322, + "memory(GiB)": 41.86, + "step": 1073, + "token_acc": 0.8453214513049013, + "train_speed(iter/s)": 0.241751 + }, + { + "epoch": 0.34368, + "grad_norm": 0.6887465039287771, + "learning_rate": 4.943283926742745e-06, + "loss": 0.3794945180416107, + "memory(GiB)": 41.86, + "step": 1074, + "token_acc": 0.9053227633069083, + "train_speed(iter/s)": 0.241765 + }, + { + "epoch": 0.344, + "grad_norm": 0.6876534938960177, + "learning_rate": 4.943096995738233e-06, + "loss": 0.35381942987442017, + "memory(GiB)": 41.86, + "step": 1075, + "token_acc": 0.8259236067626801, + "train_speed(iter/s)": 0.241768 + }, + { + "epoch": 0.34432, + "grad_norm": 0.6457344205965833, + "learning_rate": 4.94290976073268e-06, + "loss": 0.47036880254745483, + "memory(GiB)": 41.86, + "step": 1076, + "token_acc": 0.9330357142857143, + "train_speed(iter/s)": 0.241757 + }, + { + "epoch": 0.34464, + "grad_norm": 0.711353448068408, + "learning_rate": 4.9427222217493855e-06, + "loss": 0.4176962375640869, + "memory(GiB)": 41.86, + "step": 1077, + "token_acc": 0.9120452137054045, + "train_speed(iter/s)": 0.241769 + }, + { + "epoch": 0.34496, + "grad_norm": 0.6699211751357865, + "learning_rate": 4.942534378811687e-06, + "loss": 0.35760003328323364, + "memory(GiB)": 41.86, + "step": 1078, + "token_acc": 0.9115822130299897, + "train_speed(iter/s)": 0.241764 + }, + { + "epoch": 0.34528, + "grad_norm": 0.7154746650066534, + "learning_rate": 4.942346231942955e-06, + "loss": 0.3730897903442383, + "memory(GiB)": 41.86, + "step": 1079, + "token_acc": 0.8691860465116279, + "train_speed(iter/s)": 0.241766 + }, + { + "epoch": 0.3456, + "grad_norm": 0.6978603050491902, + "learning_rate": 4.942157781166604e-06, + "loss": 0.3949849009513855, + "memory(GiB)": 41.86, + "step": 1080, + "token_acc": 0.9230114844746916, + "train_speed(iter/s)": 0.241779 + }, + { + "epoch": 0.34592, + "grad_norm": 0.6898533390265277, + "learning_rate": 4.9419690265060805e-06, + "loss": 0.3743131160736084, + "memory(GiB)": 41.86, + "step": 1081, + "token_acc": 0.941398865784499, + "train_speed(iter/s)": 0.241785 + }, + { + "epoch": 0.34624, + "grad_norm": 0.6489661496391887, + "learning_rate": 4.9417799679848746e-06, + "loss": 0.3678518533706665, + "memory(GiB)": 41.86, + "step": 1082, + "token_acc": 0.928115552569701, + "train_speed(iter/s)": 0.241784 + }, + { + "epoch": 0.34656, + "grad_norm": 0.6808402783983941, + "learning_rate": 4.94159060562651e-06, + "loss": 0.3900475800037384, + "memory(GiB)": 41.86, + "step": 1083, + "token_acc": 0.9519443566234588, + "train_speed(iter/s)": 0.241795 + }, + { + "epoch": 0.34688, + "grad_norm": 0.7062166542541406, + "learning_rate": 4.94140093945455e-06, + "loss": 0.4727829396724701, + "memory(GiB)": 41.86, + "step": 1084, + "token_acc": 0.8528102392877017, + "train_speed(iter/s)": 0.241801 + }, + { + "epoch": 0.3472, + "grad_norm": 0.6623544272524805, + "learning_rate": 4.941210969492596e-06, + "loss": 0.4872356951236725, + "memory(GiB)": 41.86, + "step": 1085, + "token_acc": 0.8134311253724501, + "train_speed(iter/s)": 0.241779 + }, + { + "epoch": 0.34752, + "grad_norm": 0.6811092097711963, + "learning_rate": 4.941020695764284e-06, + "loss": 0.37408387660980225, + "memory(GiB)": 41.86, + "step": 1086, + "token_acc": 0.8588516746411483, + "train_speed(iter/s)": 0.24178 + }, + { + "epoch": 0.34784, + "grad_norm": 0.7291610143345499, + "learning_rate": 4.940830118293292e-06, + "loss": 0.37366509437561035, + "memory(GiB)": 41.86, + "step": 1087, + "token_acc": 0.933755942947702, + "train_speed(iter/s)": 0.241758 + }, + { + "epoch": 0.34816, + "grad_norm": 0.6416577874791866, + "learning_rate": 4.940639237103334e-06, + "loss": 0.30158889293670654, + "memory(GiB)": 41.86, + "step": 1088, + "token_acc": 0.9515550239234449, + "train_speed(iter/s)": 0.241744 + }, + { + "epoch": 0.34848, + "grad_norm": 0.7011676704182548, + "learning_rate": 4.940448052218163e-06, + "loss": 0.3744981586933136, + "memory(GiB)": 41.86, + "step": 1089, + "token_acc": 0.8829717291255753, + "train_speed(iter/s)": 0.241753 + }, + { + "epoch": 0.3488, + "grad_norm": 0.7185423471294144, + "learning_rate": 4.940256563661567e-06, + "loss": 0.41718363761901855, + "memory(GiB)": 41.86, + "step": 1090, + "token_acc": 0.8824457593688363, + "train_speed(iter/s)": 0.241762 + }, + { + "epoch": 0.34912, + "grad_norm": 0.6852517068291895, + "learning_rate": 4.940064771457372e-06, + "loss": 0.4089573323726654, + "memory(GiB)": 41.86, + "step": 1091, + "token_acc": 0.8609744094488189, + "train_speed(iter/s)": 0.241761 + }, + { + "epoch": 0.34944, + "grad_norm": 0.6851081004645617, + "learning_rate": 4.939872675629446e-06, + "loss": 0.4145240783691406, + "memory(GiB)": 41.86, + "step": 1092, + "token_acc": 0.8397667314634824, + "train_speed(iter/s)": 0.241761 + }, + { + "epoch": 0.34976, + "grad_norm": 0.7738099378707483, + "learning_rate": 4.939680276201692e-06, + "loss": 0.3972246050834656, + "memory(GiB)": 41.86, + "step": 1093, + "token_acc": 0.8936049801924165, + "train_speed(iter/s)": 0.241765 + }, + { + "epoch": 0.35008, + "grad_norm": 0.6193826122392153, + "learning_rate": 4.939487573198048e-06, + "loss": 0.4223909378051758, + "memory(GiB)": 41.86, + "step": 1094, + "token_acc": 0.8365563406470807, + "train_speed(iter/s)": 0.241759 + }, + { + "epoch": 0.3504, + "grad_norm": 0.7032139892133532, + "learning_rate": 4.939294566642495e-06, + "loss": 0.42995506525039673, + "memory(GiB)": 41.86, + "step": 1095, + "token_acc": 0.9166121648136036, + "train_speed(iter/s)": 0.241766 + }, + { + "epoch": 0.35072, + "grad_norm": 0.6859780986295941, + "learning_rate": 4.939101256559049e-06, + "loss": 0.4055121839046478, + "memory(GiB)": 41.86, + "step": 1096, + "token_acc": 0.8750417641162713, + "train_speed(iter/s)": 0.241776 + }, + { + "epoch": 0.35104, + "grad_norm": 0.6708110233130613, + "learning_rate": 4.9389076429717635e-06, + "loss": 0.35146331787109375, + "memory(GiB)": 41.86, + "step": 1097, + "token_acc": 0.8775349478243749, + "train_speed(iter/s)": 0.241777 + }, + { + "epoch": 0.35136, + "grad_norm": 0.6480919097223063, + "learning_rate": 4.93871372590473e-06, + "loss": 0.40469130873680115, + "memory(GiB)": 41.86, + "step": 1098, + "token_acc": 0.8575268817204301, + "train_speed(iter/s)": 0.241766 + }, + { + "epoch": 0.35168, + "grad_norm": 0.6091721521713579, + "learning_rate": 4.93851950538208e-06, + "loss": 0.3288300037384033, + "memory(GiB)": 41.86, + "step": 1099, + "token_acc": 0.9411146161934806, + "train_speed(iter/s)": 0.241742 + }, + { + "epoch": 0.352, + "grad_norm": 0.673778802554477, + "learning_rate": 4.938324981427978e-06, + "loss": 0.36022132635116577, + "memory(GiB)": 41.86, + "step": 1100, + "token_acc": 0.8976034858387799, + "train_speed(iter/s)": 0.241752 + }, + { + "epoch": 0.35232, + "grad_norm": 0.6469164440986273, + "learning_rate": 4.938130154066632e-06, + "loss": 0.3891223073005676, + "memory(GiB)": 41.86, + "step": 1101, + "token_acc": 0.9247217340363211, + "train_speed(iter/s)": 0.241758 + }, + { + "epoch": 0.35264, + "grad_norm": 0.6910427647261406, + "learning_rate": 4.937935023322282e-06, + "loss": 0.3942750096321106, + "memory(GiB)": 41.86, + "step": 1102, + "token_acc": 0.8662646328485278, + "train_speed(iter/s)": 0.241767 + }, + { + "epoch": 0.35296, + "grad_norm": 0.6825205390504593, + "learning_rate": 4.937739589219212e-06, + "loss": 0.31600990891456604, + "memory(GiB)": 41.86, + "step": 1103, + "token_acc": 0.9042763157894737, + "train_speed(iter/s)": 0.241765 + }, + { + "epoch": 0.35328, + "grad_norm": 0.6784097151886025, + "learning_rate": 4.937543851781737e-06, + "loss": 0.30458056926727295, + "memory(GiB)": 41.86, + "step": 1104, + "token_acc": 0.9145125553914328, + "train_speed(iter/s)": 0.241748 + }, + { + "epoch": 0.3536, + "grad_norm": 0.6967168007513448, + "learning_rate": 4.937347811034216e-06, + "loss": 0.4172084927558899, + "memory(GiB)": 41.86, + "step": 1105, + "token_acc": 0.892027972027972, + "train_speed(iter/s)": 0.241745 + }, + { + "epoch": 0.35392, + "grad_norm": 0.6082566458676967, + "learning_rate": 4.937151467001041e-06, + "loss": 0.36090007424354553, + "memory(GiB)": 41.86, + "step": 1106, + "token_acc": 0.8916573348264277, + "train_speed(iter/s)": 0.241743 + }, + { + "epoch": 0.35424, + "grad_norm": 0.6691130311835397, + "learning_rate": 4.936954819706644e-06, + "loss": 0.40014395117759705, + "memory(GiB)": 41.86, + "step": 1107, + "token_acc": 0.9053346265761397, + "train_speed(iter/s)": 0.241732 + }, + { + "epoch": 0.35456, + "grad_norm": 0.6927884285081477, + "learning_rate": 4.9367578691754945e-06, + "loss": 0.3111475706100464, + "memory(GiB)": 41.86, + "step": 1108, + "token_acc": 0.8666136303367807, + "train_speed(iter/s)": 0.241741 + }, + { + "epoch": 0.35488, + "grad_norm": 0.7107899679212164, + "learning_rate": 4.9365606154321e-06, + "loss": 0.38657039403915405, + "memory(GiB)": 41.86, + "step": 1109, + "token_acc": 0.8705679862306368, + "train_speed(iter/s)": 0.241747 + }, + { + "epoch": 0.3552, + "grad_norm": 0.6522591424559467, + "learning_rate": 4.936363058501005e-06, + "loss": 0.37128379940986633, + "memory(GiB)": 41.86, + "step": 1110, + "token_acc": 0.9171156237049316, + "train_speed(iter/s)": 0.241738 + }, + { + "epoch": 0.35552, + "grad_norm": 0.6754761586672714, + "learning_rate": 4.936165198406791e-06, + "loss": 0.3615378439426422, + "memory(GiB)": 41.86, + "step": 1111, + "token_acc": 0.8706424661480842, + "train_speed(iter/s)": 0.241748 + }, + { + "epoch": 0.35584, + "grad_norm": 0.634825366047745, + "learning_rate": 4.93596703517408e-06, + "loss": 0.40686851739883423, + "memory(GiB)": 41.86, + "step": 1112, + "token_acc": 0.8639191290824261, + "train_speed(iter/s)": 0.24174 + }, + { + "epoch": 0.35616, + "grad_norm": 0.697063175832958, + "learning_rate": 4.93576856882753e-06, + "loss": 0.41464829444885254, + "memory(GiB)": 41.86, + "step": 1113, + "token_acc": 0.9200483091787439, + "train_speed(iter/s)": 0.241743 + }, + { + "epoch": 0.35648, + "grad_norm": 0.7294070261385714, + "learning_rate": 4.935569799391835e-06, + "loss": 0.32030242681503296, + "memory(GiB)": 41.86, + "step": 1114, + "token_acc": 0.8916558861578266, + "train_speed(iter/s)": 0.241756 + }, + { + "epoch": 0.3568, + "grad_norm": 0.6895225812926, + "learning_rate": 4.935370726891729e-06, + "loss": 0.3237505555152893, + "memory(GiB)": 41.86, + "step": 1115, + "token_acc": 0.9324675324675324, + "train_speed(iter/s)": 0.241763 + }, + { + "epoch": 0.35712, + "grad_norm": 0.6516465092404718, + "learning_rate": 4.935171351351984e-06, + "loss": 0.4073963761329651, + "memory(GiB)": 41.86, + "step": 1116, + "token_acc": 0.8517436197155659, + "train_speed(iter/s)": 0.241753 + }, + { + "epoch": 0.35744, + "grad_norm": 0.6948335576999625, + "learning_rate": 4.934971672797408e-06, + "loss": 0.41810113191604614, + "memory(GiB)": 41.86, + "step": 1117, + "token_acc": 0.8858490566037736, + "train_speed(iter/s)": 0.241744 + }, + { + "epoch": 0.35776, + "grad_norm": 0.6224863145580068, + "learning_rate": 4.9347716912528475e-06, + "loss": 0.3677929937839508, + "memory(GiB)": 41.86, + "step": 1118, + "token_acc": 0.8866436668309512, + "train_speed(iter/s)": 0.241727 + }, + { + "epoch": 0.35808, + "grad_norm": 0.7130037098741611, + "learning_rate": 4.9345714067431875e-06, + "loss": 0.3657136559486389, + "memory(GiB)": 41.86, + "step": 1119, + "token_acc": 0.8947696139476962, + "train_speed(iter/s)": 0.241735 + }, + { + "epoch": 0.3584, + "grad_norm": 0.64470976358948, + "learning_rate": 4.9343708192933485e-06, + "loss": 0.38068661093711853, + "memory(GiB)": 41.86, + "step": 1120, + "token_acc": 0.8431585816837067, + "train_speed(iter/s)": 0.241743 + }, + { + "epoch": 0.35872, + "grad_norm": 0.656015823275593, + "learning_rate": 4.934169928928292e-06, + "loss": 0.4394170045852661, + "memory(GiB)": 41.86, + "step": 1121, + "token_acc": 0.8100164589701387, + "train_speed(iter/s)": 0.241739 + }, + { + "epoch": 0.35904, + "grad_norm": 0.711579921360886, + "learning_rate": 4.933968735673014e-06, + "loss": 0.41988426446914673, + "memory(GiB)": 41.86, + "step": 1122, + "token_acc": 0.9545009784735812, + "train_speed(iter/s)": 0.241736 + }, + { + "epoch": 0.35936, + "grad_norm": 0.6595551296372992, + "learning_rate": 4.93376723955255e-06, + "loss": 0.30958184599876404, + "memory(GiB)": 41.86, + "step": 1123, + "token_acc": 0.9328220858895706, + "train_speed(iter/s)": 0.241738 + }, + { + "epoch": 0.35968, + "grad_norm": 0.6330432187421717, + "learning_rate": 4.933565440591972e-06, + "loss": 0.34373384714126587, + "memory(GiB)": 41.86, + "step": 1124, + "token_acc": 0.9066886870355079, + "train_speed(iter/s)": 0.241743 + }, + { + "epoch": 0.36, + "grad_norm": 0.6905504709253509, + "learning_rate": 4.933363338816392e-06, + "loss": 0.46866172552108765, + "memory(GiB)": 41.86, + "step": 1125, + "token_acc": 0.8960176991150443, + "train_speed(iter/s)": 0.241739 + }, + { + "epoch": 0.36032, + "grad_norm": 0.7360761758270332, + "learning_rate": 4.933160934250957e-06, + "loss": 0.4028562903404236, + "memory(GiB)": 41.86, + "step": 1126, + "token_acc": 0.8769771528998243, + "train_speed(iter/s)": 0.241739 + }, + { + "epoch": 0.36064, + "grad_norm": 0.6850779688072975, + "learning_rate": 4.932958226920852e-06, + "loss": 0.4001652002334595, + "memory(GiB)": 41.86, + "step": 1127, + "token_acc": 0.8523866827115925, + "train_speed(iter/s)": 0.241745 + }, + { + "epoch": 0.36096, + "grad_norm": 0.6357546565893425, + "learning_rate": 4.932755216851302e-06, + "loss": 0.3784424066543579, + "memory(GiB)": 41.86, + "step": 1128, + "token_acc": 0.9150990099009901, + "train_speed(iter/s)": 0.241744 + }, + { + "epoch": 0.36128, + "grad_norm": 0.6627555587921015, + "learning_rate": 4.932551904067566e-06, + "loss": 0.456253319978714, + "memory(GiB)": 41.86, + "step": 1129, + "token_acc": 0.8464014716026673, + "train_speed(iter/s)": 0.241732 + }, + { + "epoch": 0.3616, + "grad_norm": 0.6851720246085475, + "learning_rate": 4.932348288594945e-06, + "loss": 0.4036809206008911, + "memory(GiB)": 41.86, + "step": 1130, + "token_acc": 0.8876443822191109, + "train_speed(iter/s)": 0.241739 + }, + { + "epoch": 0.36192, + "grad_norm": 0.66097432071818, + "learning_rate": 4.932144370458774e-06, + "loss": 0.5042710304260254, + "memory(GiB)": 41.86, + "step": 1131, + "token_acc": 0.849217088729944, + "train_speed(iter/s)": 0.241739 + }, + { + "epoch": 0.36224, + "grad_norm": 0.6773498930442703, + "learning_rate": 4.931940149684428e-06, + "loss": 0.3493019938468933, + "memory(GiB)": 41.86, + "step": 1132, + "token_acc": 0.9154034229828851, + "train_speed(iter/s)": 0.241726 + }, + { + "epoch": 0.36256, + "grad_norm": 0.7144635376453711, + "learning_rate": 4.931735626297318e-06, + "loss": 0.3882347345352173, + "memory(GiB)": 41.86, + "step": 1133, + "token_acc": 0.8629531388152077, + "train_speed(iter/s)": 0.241733 + }, + { + "epoch": 0.36288, + "grad_norm": 0.6264789557526304, + "learning_rate": 4.931530800322893e-06, + "loss": 0.3641466498374939, + "memory(GiB)": 41.86, + "step": 1134, + "token_acc": 0.8949232585596222, + "train_speed(iter/s)": 0.241738 + }, + { + "epoch": 0.3632, + "grad_norm": 0.6711752637018957, + "learning_rate": 4.931325671786641e-06, + "loss": 0.41077274084091187, + "memory(GiB)": 41.86, + "step": 1135, + "token_acc": 0.9482327740092824, + "train_speed(iter/s)": 0.241744 + }, + { + "epoch": 0.36352, + "grad_norm": 0.687224253913364, + "learning_rate": 4.931120240714087e-06, + "loss": 0.3374726176261902, + "memory(GiB)": 41.86, + "step": 1136, + "token_acc": 0.91725768321513, + "train_speed(iter/s)": 0.241756 + }, + { + "epoch": 0.36384, + "grad_norm": 0.7097046731677357, + "learning_rate": 4.930914507130792e-06, + "loss": 0.43960127234458923, + "memory(GiB)": 41.86, + "step": 1137, + "token_acc": 0.9224683544303798, + "train_speed(iter/s)": 0.241759 + }, + { + "epoch": 0.36416, + "grad_norm": 0.6267255491012794, + "learning_rate": 4.930708471062357e-06, + "loss": 0.3667910695075989, + "memory(GiB)": 41.86, + "step": 1138, + "token_acc": 0.9364186851211073, + "train_speed(iter/s)": 0.241758 + }, + { + "epoch": 0.36448, + "grad_norm": 0.6659234206040214, + "learning_rate": 4.9305021325344195e-06, + "loss": 0.3990696966648102, + "memory(GiB)": 41.86, + "step": 1139, + "token_acc": 0.8620801033591732, + "train_speed(iter/s)": 0.241767 + }, + { + "epoch": 0.3648, + "grad_norm": 0.6849169961778803, + "learning_rate": 4.9302954915726535e-06, + "loss": 0.3535306453704834, + "memory(GiB)": 41.86, + "step": 1140, + "token_acc": 0.8868406125921724, + "train_speed(iter/s)": 0.241773 + }, + { + "epoch": 0.36512, + "grad_norm": 0.6677108033299477, + "learning_rate": 4.930088548202774e-06, + "loss": 0.36606258153915405, + "memory(GiB)": 41.86, + "step": 1141, + "token_acc": 0.8998161764705882, + "train_speed(iter/s)": 0.24176 + }, + { + "epoch": 0.36544, + "grad_norm": 0.6729585071300545, + "learning_rate": 4.92988130245053e-06, + "loss": 0.3869559168815613, + "memory(GiB)": 41.86, + "step": 1142, + "token_acc": 0.9055140723721998, + "train_speed(iter/s)": 0.241745 + }, + { + "epoch": 0.36576, + "grad_norm": 0.6698570710268877, + "learning_rate": 4.929673754341709e-06, + "loss": 0.38185203075408936, + "memory(GiB)": 41.86, + "step": 1143, + "token_acc": 0.8783068783068783, + "train_speed(iter/s)": 0.24175 + }, + { + "epoch": 0.36608, + "grad_norm": 0.6696950006787806, + "learning_rate": 4.92946590390214e-06, + "loss": 0.40389806032180786, + "memory(GiB)": 41.86, + "step": 1144, + "token_acc": 0.9007936507936508, + "train_speed(iter/s)": 0.241755 + }, + { + "epoch": 0.3664, + "grad_norm": 0.672659052240664, + "learning_rate": 4.929257751157682e-06, + "loss": 0.3612380921840668, + "memory(GiB)": 41.86, + "step": 1145, + "token_acc": 0.8755862068965518, + "train_speed(iter/s)": 0.24176 + }, + { + "epoch": 0.36672, + "grad_norm": 0.7074407230394344, + "learning_rate": 4.929049296134239e-06, + "loss": 0.38194817304611206, + "memory(GiB)": 41.86, + "step": 1146, + "token_acc": 0.8665951359084406, + "train_speed(iter/s)": 0.241765 + }, + { + "epoch": 0.36704, + "grad_norm": 0.6361356626590131, + "learning_rate": 4.92884053885775e-06, + "loss": 0.34763285517692566, + "memory(GiB)": 41.86, + "step": 1147, + "token_acc": 0.9160063391442155, + "train_speed(iter/s)": 0.241769 + }, + { + "epoch": 0.36736, + "grad_norm": 0.633676992190601, + "learning_rate": 4.928631479354189e-06, + "loss": 0.42593854665756226, + "memory(GiB)": 41.86, + "step": 1148, + "token_acc": 0.9316678912564291, + "train_speed(iter/s)": 0.241765 + }, + { + "epoch": 0.36768, + "grad_norm": 0.7030079137097175, + "learning_rate": 4.928422117649571e-06, + "loss": 0.5178966522216797, + "memory(GiB)": 41.86, + "step": 1149, + "token_acc": 0.8204469560750064, + "train_speed(iter/s)": 0.241763 + }, + { + "epoch": 0.368, + "grad_norm": 0.6586554549553464, + "learning_rate": 4.928212453769947e-06, + "loss": 0.41002601385116577, + "memory(GiB)": 41.86, + "step": 1150, + "token_acc": 0.8994235738421785, + "train_speed(iter/s)": 0.241761 + }, + { + "epoch": 0.36832, + "grad_norm": 0.6409697340377992, + "learning_rate": 4.9280024877414066e-06, + "loss": 0.4111045002937317, + "memory(GiB)": 41.86, + "step": 1151, + "token_acc": 0.9194902082685732, + "train_speed(iter/s)": 0.241752 + }, + { + "epoch": 0.36864, + "grad_norm": 0.6577341888863575, + "learning_rate": 4.927792219590075e-06, + "loss": 0.4457213580608368, + "memory(GiB)": 41.86, + "step": 1152, + "token_acc": 0.9073625993778085, + "train_speed(iter/s)": 0.241753 + }, + { + "epoch": 0.36896, + "grad_norm": 0.6232443739417185, + "learning_rate": 4.927581649342119e-06, + "loss": 0.2606956660747528, + "memory(GiB)": 41.86, + "step": 1153, + "token_acc": 0.9258638040925864, + "train_speed(iter/s)": 0.241761 + }, + { + "epoch": 0.36928, + "grad_norm": 0.7510861430719112, + "learning_rate": 4.927370777023739e-06, + "loss": 0.45594000816345215, + "memory(GiB)": 41.86, + "step": 1154, + "token_acc": 0.8718132854578097, + "train_speed(iter/s)": 0.241768 + }, + { + "epoch": 0.3696, + "grad_norm": 0.6878323362981834, + "learning_rate": 4.927159602661173e-06, + "loss": 0.3695269227027893, + "memory(GiB)": 41.86, + "step": 1155, + "token_acc": 0.8955042527339003, + "train_speed(iter/s)": 0.241763 + }, + { + "epoch": 0.36992, + "grad_norm": 0.6625686322644976, + "learning_rate": 4.926948126280701e-06, + "loss": 0.414303719997406, + "memory(GiB)": 41.86, + "step": 1156, + "token_acc": 0.9228925289652418, + "train_speed(iter/s)": 0.241763 + }, + { + "epoch": 0.37024, + "grad_norm": 0.6265838430735825, + "learning_rate": 4.926736347908635e-06, + "loss": 0.3817584812641144, + "memory(GiB)": 41.86, + "step": 1157, + "token_acc": 0.8501144164759725, + "train_speed(iter/s)": 0.241747 + }, + { + "epoch": 0.37056, + "grad_norm": 4.119687271251784, + "learning_rate": 4.926524267571329e-06, + "loss": 0.47082966566085815, + "memory(GiB)": 41.86, + "step": 1158, + "token_acc": 0.8178209086898985, + "train_speed(iter/s)": 0.241746 + }, + { + "epoch": 0.37088, + "grad_norm": 0.6808331941957133, + "learning_rate": 4.926311885295171e-06, + "loss": 0.394379585981369, + "memory(GiB)": 41.86, + "step": 1159, + "token_acc": 0.809587573647563, + "train_speed(iter/s)": 0.24175 + }, + { + "epoch": 0.3712, + "grad_norm": 0.6224612204840775, + "learning_rate": 4.926099201106589e-06, + "loss": 0.4529285430908203, + "memory(GiB)": 41.86, + "step": 1160, + "token_acc": 0.8982188295165394, + "train_speed(iter/s)": 0.241747 + }, + { + "epoch": 0.37152, + "grad_norm": 0.6644035281480009, + "learning_rate": 4.9258862150320486e-06, + "loss": 0.4340992569923401, + "memory(GiB)": 41.86, + "step": 1161, + "token_acc": 0.8733668341708543, + "train_speed(iter/s)": 0.24174 + }, + { + "epoch": 0.37184, + "grad_norm": 0.6464055302721461, + "learning_rate": 4.925672927098051e-06, + "loss": 0.37106865644454956, + "memory(GiB)": 41.86, + "step": 1162, + "token_acc": 0.8918213457076566, + "train_speed(iter/s)": 0.24174 + }, + { + "epoch": 0.37216, + "grad_norm": 0.6302720269584053, + "learning_rate": 4.925459337331136e-06, + "loss": 0.39688047766685486, + "memory(GiB)": 41.86, + "step": 1163, + "token_acc": 0.8548114800225098, + "train_speed(iter/s)": 0.241734 + }, + { + "epoch": 0.37248, + "grad_norm": 0.6550728370633362, + "learning_rate": 4.925245445757884e-06, + "loss": 0.35905706882476807, + "memory(GiB)": 41.86, + "step": 1164, + "token_acc": 0.9190270935960592, + "train_speed(iter/s)": 0.241735 + }, + { + "epoch": 0.3728, + "grad_norm": 0.7235184570301425, + "learning_rate": 4.925031252404907e-06, + "loss": 0.37296849489212036, + "memory(GiB)": 41.86, + "step": 1165, + "token_acc": 0.9597355769230769, + "train_speed(iter/s)": 0.241744 + }, + { + "epoch": 0.37312, + "grad_norm": 0.7335476183589521, + "learning_rate": 4.924816757298858e-06, + "loss": 0.47302213311195374, + "memory(GiB)": 41.86, + "step": 1166, + "token_acc": 0.8397090517241379, + "train_speed(iter/s)": 0.241752 + }, + { + "epoch": 0.37344, + "grad_norm": 0.6502101574322988, + "learning_rate": 4.924601960466429e-06, + "loss": 0.39004456996917725, + "memory(GiB)": 41.86, + "step": 1167, + "token_acc": 0.9252806813782424, + "train_speed(iter/s)": 0.241747 + }, + { + "epoch": 0.37376, + "grad_norm": 0.6999674139802613, + "learning_rate": 4.9243868619343454e-06, + "loss": 0.45789167284965515, + "memory(GiB)": 41.86, + "step": 1168, + "token_acc": 0.9388111888111889, + "train_speed(iter/s)": 0.241756 + }, + { + "epoch": 0.37408, + "grad_norm": 0.6513707902946715, + "learning_rate": 4.924171461729375e-06, + "loss": 0.37105005979537964, + "memory(GiB)": 41.86, + "step": 1169, + "token_acc": 0.9406617445993984, + "train_speed(iter/s)": 0.241763 + }, + { + "epoch": 0.3744, + "grad_norm": 0.7351170326734847, + "learning_rate": 4.923955759878317e-06, + "loss": 0.4217795729637146, + "memory(GiB)": 41.86, + "step": 1170, + "token_acc": 0.8902097902097902, + "train_speed(iter/s)": 0.241767 + }, + { + "epoch": 0.37472, + "grad_norm": 0.6490667751872011, + "learning_rate": 4.9237397564080155e-06, + "loss": 0.39287328720092773, + "memory(GiB)": 41.86, + "step": 1171, + "token_acc": 0.904895461499235, + "train_speed(iter/s)": 0.241775 + }, + { + "epoch": 0.37504, + "grad_norm": 0.7079383090451424, + "learning_rate": 4.923523451345348e-06, + "loss": 0.3905605971813202, + "memory(GiB)": 41.86, + "step": 1172, + "token_acc": 0.8644732870510111, + "train_speed(iter/s)": 0.241784 + }, + { + "epoch": 0.37536, + "grad_norm": 0.6661722965535525, + "learning_rate": 4.9233068447172275e-06, + "loss": 0.4579063355922699, + "memory(GiB)": 41.86, + "step": 1173, + "token_acc": 0.8763396537510305, + "train_speed(iter/s)": 0.241783 + }, + { + "epoch": 0.37568, + "grad_norm": 0.6444545661706125, + "learning_rate": 4.923089936550608e-06, + "loss": 0.29571324586868286, + "memory(GiB)": 41.86, + "step": 1174, + "token_acc": 0.9402099497946144, + "train_speed(iter/s)": 0.241777 + }, + { + "epoch": 0.376, + "grad_norm": 0.7068403579868691, + "learning_rate": 4.922872726872481e-06, + "loss": 0.43438851833343506, + "memory(GiB)": 41.86, + "step": 1175, + "token_acc": 0.9277218664226898, + "train_speed(iter/s)": 0.241788 + }, + { + "epoch": 0.37632, + "grad_norm": 0.6455929147372934, + "learning_rate": 4.922655215709873e-06, + "loss": 0.3272331655025482, + "memory(GiB)": 41.86, + "step": 1176, + "token_acc": 0.9211756847027388, + "train_speed(iter/s)": 0.241789 + }, + { + "epoch": 0.37664, + "grad_norm": 0.6654705870058897, + "learning_rate": 4.922437403089851e-06, + "loss": 0.4420502185821533, + "memory(GiB)": 41.86, + "step": 1177, + "token_acc": 0.8059221113614419, + "train_speed(iter/s)": 0.241787 + }, + { + "epoch": 0.37696, + "grad_norm": 0.6559584523568502, + "learning_rate": 4.922219289039517e-06, + "loss": 0.34142589569091797, + "memory(GiB)": 41.86, + "step": 1178, + "token_acc": 0.926995457495133, + "train_speed(iter/s)": 0.241796 + }, + { + "epoch": 0.37728, + "grad_norm": 0.6681676742441536, + "learning_rate": 4.922000873586011e-06, + "loss": 0.4660865068435669, + "memory(GiB)": 41.86, + "step": 1179, + "token_acc": 0.815196394075982, + "train_speed(iter/s)": 0.241796 + }, + { + "epoch": 0.3776, + "grad_norm": 0.6727522374795533, + "learning_rate": 4.921782156756512e-06, + "loss": 0.4172089695930481, + "memory(GiB)": 41.86, + "step": 1180, + "token_acc": 0.8236613400616765, + "train_speed(iter/s)": 0.241798 + }, + { + "epoch": 0.37792, + "grad_norm": 0.843437709292062, + "learning_rate": 4.921563138578236e-06, + "loss": 0.40737003087997437, + "memory(GiB)": 41.86, + "step": 1181, + "token_acc": 0.9272373540856031, + "train_speed(iter/s)": 0.241795 + }, + { + "epoch": 0.37824, + "grad_norm": 0.67337451906828, + "learning_rate": 4.921343819078435e-06, + "loss": 0.34142768383026123, + "memory(GiB)": 41.86, + "step": 1182, + "token_acc": 0.9274258219727346, + "train_speed(iter/s)": 0.24179 + }, + { + "epoch": 0.37856, + "grad_norm": 0.7129055699289771, + "learning_rate": 4.921124198284399e-06, + "loss": 0.4573971629142761, + "memory(GiB)": 41.86, + "step": 1183, + "token_acc": 0.8558091286307054, + "train_speed(iter/s)": 0.241786 + }, + { + "epoch": 0.37888, + "grad_norm": 0.6225298854307416, + "learning_rate": 4.920904276223457e-06, + "loss": 0.3546687960624695, + "memory(GiB)": 41.86, + "step": 1184, + "token_acc": 0.8893748392076152, + "train_speed(iter/s)": 0.241769 + }, + { + "epoch": 0.3792, + "grad_norm": 0.7590166155739679, + "learning_rate": 4.920684052922975e-06, + "loss": 0.44733312726020813, + "memory(GiB)": 41.86, + "step": 1185, + "token_acc": 0.9061640066042927, + "train_speed(iter/s)": 0.241782 + }, + { + "epoch": 0.37952, + "grad_norm": 0.7228457109013919, + "learning_rate": 4.920463528410354e-06, + "loss": 0.46552446484565735, + "memory(GiB)": 41.86, + "step": 1186, + "token_acc": 0.7802926829268293, + "train_speed(iter/s)": 0.241786 + }, + { + "epoch": 0.37984, + "grad_norm": 0.7600234775059914, + "learning_rate": 4.920242702713037e-06, + "loss": 0.3014151453971863, + "memory(GiB)": 41.86, + "step": 1187, + "token_acc": 0.9319945230488361, + "train_speed(iter/s)": 0.241795 + }, + { + "epoch": 0.38016, + "grad_norm": 0.6665412736917736, + "learning_rate": 4.9200215758585e-06, + "loss": 0.3813929259777069, + "memory(GiB)": 41.86, + "step": 1188, + "token_acc": 0.931189229618549, + "train_speed(iter/s)": 0.2418 + }, + { + "epoch": 0.38048, + "grad_norm": 0.6384311523947301, + "learning_rate": 4.919800147874259e-06, + "loss": 0.3531501591205597, + "memory(GiB)": 41.86, + "step": 1189, + "token_acc": 0.8704294478527608, + "train_speed(iter/s)": 0.24181 + }, + { + "epoch": 0.3808, + "grad_norm": 0.6291720877357635, + "learning_rate": 4.919578418787866e-06, + "loss": 0.4022018313407898, + "memory(GiB)": 41.86, + "step": 1190, + "token_acc": 0.9055327373761778, + "train_speed(iter/s)": 0.241807 + }, + { + "epoch": 0.38112, + "grad_norm": 0.6952673888517285, + "learning_rate": 4.919356388626913e-06, + "loss": 0.4953402280807495, + "memory(GiB)": 41.86, + "step": 1191, + "token_acc": 0.8755641521598968, + "train_speed(iter/s)": 0.241815 + }, + { + "epoch": 0.38144, + "grad_norm": 0.6726992152501142, + "learning_rate": 4.9191340574190274e-06, + "loss": 0.39621368050575256, + "memory(GiB)": 41.86, + "step": 1192, + "token_acc": 0.8839738941261784, + "train_speed(iter/s)": 0.241826 + }, + { + "epoch": 0.38176, + "grad_norm": 0.6908550529832542, + "learning_rate": 4.918911425191873e-06, + "loss": 0.4029190242290497, + "memory(GiB)": 41.86, + "step": 1193, + "token_acc": 0.9177570093457944, + "train_speed(iter/s)": 0.241824 + }, + { + "epoch": 0.38208, + "grad_norm": 0.6978236537801533, + "learning_rate": 4.918688491973154e-06, + "loss": 0.507056474685669, + "memory(GiB)": 41.86, + "step": 1194, + "token_acc": 0.9136377757067412, + "train_speed(iter/s)": 0.24183 + }, + { + "epoch": 0.3824, + "grad_norm": 0.6597534033668986, + "learning_rate": 4.9184652577906105e-06, + "loss": 0.34853118658065796, + "memory(GiB)": 41.86, + "step": 1195, + "token_acc": 0.9379422972237343, + "train_speed(iter/s)": 0.241834 + }, + { + "epoch": 0.38272, + "grad_norm": 0.6405611525435052, + "learning_rate": 4.91824172267202e-06, + "loss": 0.29890239238739014, + "memory(GiB)": 41.86, + "step": 1196, + "token_acc": 0.9506225848003435, + "train_speed(iter/s)": 0.241842 + }, + { + "epoch": 0.38304, + "grad_norm": 0.7429654757354823, + "learning_rate": 4.918017886645197e-06, + "loss": 0.40424633026123047, + "memory(GiB)": 41.86, + "step": 1197, + "token_acc": 0.805796488258938, + "train_speed(iter/s)": 0.241829 + }, + { + "epoch": 0.38336, + "grad_norm": 0.636474728976666, + "learning_rate": 4.917793749737993e-06, + "loss": 0.4385982155799866, + "memory(GiB)": 41.86, + "step": 1198, + "token_acc": 0.9071259709557582, + "train_speed(iter/s)": 0.241832 + }, + { + "epoch": 0.38368, + "grad_norm": 0.754397692601058, + "learning_rate": 4.917569311978301e-06, + "loss": 0.45639294385910034, + "memory(GiB)": 41.86, + "step": 1199, + "token_acc": 0.8225186982009298, + "train_speed(iter/s)": 0.241831 + }, + { + "epoch": 0.384, + "grad_norm": 0.693403017315352, + "learning_rate": 4.917344573394046e-06, + "loss": 0.34493494033813477, + "memory(GiB)": 41.86, + "step": 1200, + "token_acc": 0.8523509174311926, + "train_speed(iter/s)": 0.24184 + }, + { + "epoch": 0.38432, + "grad_norm": 0.6627636954742108, + "learning_rate": 4.917119534013194e-06, + "loss": 0.3570840656757355, + "memory(GiB)": 41.86, + "step": 1201, + "token_acc": 0.9141392567278941, + "train_speed(iter/s)": 0.241838 + }, + { + "epoch": 0.38464, + "grad_norm": 0.6386393946459572, + "learning_rate": 4.916894193863747e-06, + "loss": 0.4296616017818451, + "memory(GiB)": 41.86, + "step": 1202, + "token_acc": 0.8115250504679757, + "train_speed(iter/s)": 0.241826 + }, + { + "epoch": 0.38496, + "grad_norm": 0.6337198934410836, + "learning_rate": 4.916668552973743e-06, + "loss": 0.366534560918808, + "memory(GiB)": 41.86, + "step": 1203, + "token_acc": 0.9160521837776517, + "train_speed(iter/s)": 0.241828 + }, + { + "epoch": 0.38528, + "grad_norm": 0.7070414887436854, + "learning_rate": 4.916442611371262e-06, + "loss": 0.48895466327667236, + "memory(GiB)": 41.86, + "step": 1204, + "token_acc": 0.8914709517923363, + "train_speed(iter/s)": 0.241839 + }, + { + "epoch": 0.3856, + "grad_norm": 0.726825962842169, + "learning_rate": 4.916216369084417e-06, + "loss": 0.37846821546554565, + "memory(GiB)": 41.86, + "step": 1205, + "token_acc": 0.88135103926097, + "train_speed(iter/s)": 0.241844 + }, + { + "epoch": 0.38592, + "grad_norm": 0.7188549103775239, + "learning_rate": 4.915989826141359e-06, + "loss": 0.40104061365127563, + "memory(GiB)": 41.86, + "step": 1206, + "token_acc": 0.8613020622725435, + "train_speed(iter/s)": 0.241829 + }, + { + "epoch": 0.38624, + "grad_norm": 0.7286024555990904, + "learning_rate": 4.915762982570279e-06, + "loss": 0.39014601707458496, + "memory(GiB)": 41.86, + "step": 1207, + "token_acc": 0.8764253513656854, + "train_speed(iter/s)": 0.24184 + }, + { + "epoch": 0.38656, + "grad_norm": 0.695541091414225, + "learning_rate": 4.915535838399403e-06, + "loss": 0.36902227997779846, + "memory(GiB)": 41.86, + "step": 1208, + "token_acc": 0.940974605353466, + "train_speed(iter/s)": 0.241833 + }, + { + "epoch": 0.38688, + "grad_norm": 0.6431958480102833, + "learning_rate": 4.915308393656995e-06, + "loss": 0.397407203912735, + "memory(GiB)": 41.86, + "step": 1209, + "token_acc": 0.8522214302684723, + "train_speed(iter/s)": 0.241811 + }, + { + "epoch": 0.3872, + "grad_norm": 0.5946700706891079, + "learning_rate": 4.915080648371356e-06, + "loss": 0.2938351333141327, + "memory(GiB)": 41.86, + "step": 1210, + "token_acc": 0.9416149068322981, + "train_speed(iter/s)": 0.241817 + }, + { + "epoch": 0.38752, + "grad_norm": 0.6266178205566414, + "learning_rate": 4.9148526025708265e-06, + "loss": 0.3515031933784485, + "memory(GiB)": 41.86, + "step": 1211, + "token_acc": 0.8622662266226623, + "train_speed(iter/s)": 0.241796 + }, + { + "epoch": 0.38784, + "grad_norm": 0.6256068778987887, + "learning_rate": 4.914624256283782e-06, + "loss": 0.3115041255950928, + "memory(GiB)": 41.86, + "step": 1212, + "token_acc": 0.8772889917912019, + "train_speed(iter/s)": 0.241804 + }, + { + "epoch": 0.38816, + "grad_norm": 0.6732607754491646, + "learning_rate": 4.914395609538635e-06, + "loss": 0.3414084315299988, + "memory(GiB)": 41.86, + "step": 1213, + "token_acc": 0.8855482566953007, + "train_speed(iter/s)": 0.24181 + }, + { + "epoch": 0.38848, + "grad_norm": 0.6043768378809788, + "learning_rate": 4.91416666236384e-06, + "loss": 0.3297019600868225, + "memory(GiB)": 41.86, + "step": 1214, + "token_acc": 0.8486761353673505, + "train_speed(iter/s)": 0.241816 + }, + { + "epoch": 0.3888, + "grad_norm": 0.6785425306994809, + "learning_rate": 4.913937414787883e-06, + "loss": 0.37749630212783813, + "memory(GiB)": 41.86, + "step": 1215, + "token_acc": 0.8884940026654821, + "train_speed(iter/s)": 0.241818 + }, + { + "epoch": 0.38912, + "grad_norm": 0.5931015639640956, + "learning_rate": 4.913707866839289e-06, + "loss": 0.37859880924224854, + "memory(GiB)": 41.86, + "step": 1216, + "token_acc": 0.9026143790849673, + "train_speed(iter/s)": 0.241815 + }, + { + "epoch": 0.38944, + "grad_norm": 0.6641848978681966, + "learning_rate": 4.9134780185466235e-06, + "loss": 0.3591747581958771, + "memory(GiB)": 41.86, + "step": 1217, + "token_acc": 0.8765625, + "train_speed(iter/s)": 0.241817 + }, + { + "epoch": 0.38976, + "grad_norm": 0.6886237469900918, + "learning_rate": 4.913247869938486e-06, + "loss": 0.4051769971847534, + "memory(GiB)": 41.86, + "step": 1218, + "token_acc": 0.8421866941722538, + "train_speed(iter/s)": 0.241826 + }, + { + "epoch": 0.39008, + "grad_norm": 0.6404557259309062, + "learning_rate": 4.913017421043515e-06, + "loss": 0.2845143675804138, + "memory(GiB)": 41.86, + "step": 1219, + "token_acc": 0.9006609252954135, + "train_speed(iter/s)": 0.241833 + }, + { + "epoch": 0.3904, + "grad_norm": 0.7065019630133275, + "learning_rate": 4.912786671890385e-06, + "loss": 0.41734203696250916, + "memory(GiB)": 41.86, + "step": 1220, + "token_acc": 0.8467128027681661, + "train_speed(iter/s)": 0.241835 + }, + { + "epoch": 0.39072, + "grad_norm": 0.6627709988236835, + "learning_rate": 4.912555622507809e-06, + "loss": 0.3521880507469177, + "memory(GiB)": 41.86, + "step": 1221, + "token_acc": 0.92608, + "train_speed(iter/s)": 0.241824 + }, + { + "epoch": 0.39104, + "grad_norm": 0.7339084472401267, + "learning_rate": 4.9123242729245385e-06, + "loss": 0.46014976501464844, + "memory(GiB)": 41.86, + "step": 1222, + "token_acc": 0.8298649142022636, + "train_speed(iter/s)": 0.24182 + }, + { + "epoch": 0.39136, + "grad_norm": 0.6917399666886811, + "learning_rate": 4.912092623169359e-06, + "loss": 0.35904645919799805, + "memory(GiB)": 41.86, + "step": 1223, + "token_acc": 0.9060235975988408, + "train_speed(iter/s)": 0.241826 + }, + { + "epoch": 0.39168, + "grad_norm": 0.6661464163720064, + "learning_rate": 4.911860673271096e-06, + "loss": 0.3546287417411804, + "memory(GiB)": 41.86, + "step": 1224, + "token_acc": 0.8770855332629356, + "train_speed(iter/s)": 0.241827 + }, + { + "epoch": 0.392, + "grad_norm": 0.7446428900027107, + "learning_rate": 4.911628423258613e-06, + "loss": 0.4017045497894287, + "memory(GiB)": 41.86, + "step": 1225, + "token_acc": 0.9037958115183246, + "train_speed(iter/s)": 0.241801 + }, + { + "epoch": 0.39232, + "grad_norm": 0.761212677967474, + "learning_rate": 4.9113958731608065e-06, + "loss": 0.3983163833618164, + "memory(GiB)": 41.86, + "step": 1226, + "token_acc": 0.8756441855254313, + "train_speed(iter/s)": 0.241798 + }, + { + "epoch": 0.39264, + "grad_norm": 0.7339870612776599, + "learning_rate": 4.911163023006616e-06, + "loss": 0.4625524580478668, + "memory(GiB)": 41.86, + "step": 1227, + "token_acc": 0.9090499774876182, + "train_speed(iter/s)": 0.241805 + }, + { + "epoch": 0.39296, + "grad_norm": 0.6983561260249203, + "learning_rate": 4.910929872825014e-06, + "loss": 0.40505319833755493, + "memory(GiB)": 41.86, + "step": 1228, + "token_acc": 0.8510420923579893, + "train_speed(iter/s)": 0.241795 + }, + { + "epoch": 0.39328, + "grad_norm": 0.6183721034355633, + "learning_rate": 4.910696422645014e-06, + "loss": 0.30205339193344116, + "memory(GiB)": 41.86, + "step": 1229, + "token_acc": 0.8776041666666666, + "train_speed(iter/s)": 0.241786 + }, + { + "epoch": 0.3936, + "grad_norm": 0.7292607827967701, + "learning_rate": 4.9104626724956624e-06, + "loss": 0.36497604846954346, + "memory(GiB)": 41.86, + "step": 1230, + "token_acc": 0.9373318988703604, + "train_speed(iter/s)": 0.241795 + }, + { + "epoch": 0.39392, + "grad_norm": 0.6729194907914177, + "learning_rate": 4.910228622406047e-06, + "loss": 0.38161665201187134, + "memory(GiB)": 41.86, + "step": 1231, + "token_acc": 0.8459227467811159, + "train_speed(iter/s)": 0.241794 + }, + { + "epoch": 0.39424, + "grad_norm": 0.7534044697464082, + "learning_rate": 4.909994272405291e-06, + "loss": 0.4532603621482849, + "memory(GiB)": 41.86, + "step": 1232, + "token_acc": 0.8577903682719547, + "train_speed(iter/s)": 0.241795 + }, + { + "epoch": 0.39456, + "grad_norm": 0.6304337304158179, + "learning_rate": 4.909759622522554e-06, + "loss": 0.4178975224494934, + "memory(GiB)": 41.86, + "step": 1233, + "token_acc": 0.878964552238806, + "train_speed(iter/s)": 0.24178 + }, + { + "epoch": 0.39488, + "grad_norm": 0.6556346864759135, + "learning_rate": 4.909524672787036e-06, + "loss": 0.4755871891975403, + "memory(GiB)": 41.86, + "step": 1234, + "token_acc": 0.9497206703910615, + "train_speed(iter/s)": 0.241775 + }, + { + "epoch": 0.3952, + "grad_norm": 0.6754826163963453, + "learning_rate": 4.9092894232279705e-06, + "loss": 0.45528972148895264, + "memory(GiB)": 41.86, + "step": 1235, + "token_acc": 0.7545003913383772, + "train_speed(iter/s)": 0.241772 + }, + { + "epoch": 0.39552, + "grad_norm": 0.7217759079137694, + "learning_rate": 4.909053873874632e-06, + "loss": 0.38957154750823975, + "memory(GiB)": 41.86, + "step": 1236, + "token_acc": 0.9230421004979629, + "train_speed(iter/s)": 0.241764 + }, + { + "epoch": 0.39584, + "grad_norm": 0.7430376150442589, + "learning_rate": 4.90881802475633e-06, + "loss": 0.4569467604160309, + "memory(GiB)": 41.86, + "step": 1237, + "token_acc": 0.9448040885860307, + "train_speed(iter/s)": 0.241771 + }, + { + "epoch": 0.39616, + "grad_norm": 0.6884662784481184, + "learning_rate": 4.908581875902411e-06, + "loss": 0.3652569651603699, + "memory(GiB)": 41.86, + "step": 1238, + "token_acc": 0.9205572289156626, + "train_speed(iter/s)": 0.241765 + }, + { + "epoch": 0.39648, + "grad_norm": 0.6273160357497422, + "learning_rate": 4.9083454273422596e-06, + "loss": 0.45417994260787964, + "memory(GiB)": 41.86, + "step": 1239, + "token_acc": 0.8865603644646924, + "train_speed(iter/s)": 0.241765 + }, + { + "epoch": 0.3968, + "grad_norm": 0.8188890254089909, + "learning_rate": 4.908108679105299e-06, + "loss": 0.37922215461730957, + "memory(GiB)": 41.86, + "step": 1240, + "token_acc": 0.8701376421304609, + "train_speed(iter/s)": 0.241768 + }, + { + "epoch": 0.39712, + "grad_norm": 0.620364193075366, + "learning_rate": 4.9078716312209885e-06, + "loss": 0.4137468636035919, + "memory(GiB)": 41.86, + "step": 1241, + "token_acc": 0.9157801418439716, + "train_speed(iter/s)": 0.241772 + }, + { + "epoch": 0.39744, + "grad_norm": 0.6056946045549613, + "learning_rate": 4.907634283718823e-06, + "loss": 0.2970924377441406, + "memory(GiB)": 41.86, + "step": 1242, + "token_acc": 0.9325077399380804, + "train_speed(iter/s)": 0.241775 + }, + { + "epoch": 0.39776, + "grad_norm": 0.6984748084271502, + "learning_rate": 4.9073966366283365e-06, + "loss": 0.4137699604034424, + "memory(GiB)": 41.86, + "step": 1243, + "token_acc": 0.8169642857142857, + "train_speed(iter/s)": 0.241769 + }, + { + "epoch": 0.39808, + "grad_norm": 0.6521284694929559, + "learning_rate": 4.907158689979101e-06, + "loss": 0.3726159632205963, + "memory(GiB)": 41.86, + "step": 1244, + "token_acc": 0.8911675126903553, + "train_speed(iter/s)": 0.24177 + }, + { + "epoch": 0.3984, + "grad_norm": 0.6789197438937603, + "learning_rate": 4.906920443800726e-06, + "loss": 0.38499879837036133, + "memory(GiB)": 41.86, + "step": 1245, + "token_acc": 0.912289156626506, + "train_speed(iter/s)": 0.241777 + }, + { + "epoch": 0.39872, + "grad_norm": 0.6684047846074724, + "learning_rate": 4.906681898122854e-06, + "loss": 0.38312456011772156, + "memory(GiB)": 41.86, + "step": 1246, + "token_acc": 0.8865716109810599, + "train_speed(iter/s)": 0.241778 + }, + { + "epoch": 0.39904, + "grad_norm": 0.6282065736802485, + "learning_rate": 4.90644305297517e-06, + "loss": 0.3512309789657593, + "memory(GiB)": 41.86, + "step": 1247, + "token_acc": 0.9296849087893864, + "train_speed(iter/s)": 0.241774 + }, + { + "epoch": 0.39936, + "grad_norm": 0.7257045224182788, + "learning_rate": 4.906203908387394e-06, + "loss": 0.33827269077301025, + "memory(GiB)": 41.86, + "step": 1248, + "token_acc": 0.912292817679558, + "train_speed(iter/s)": 0.241777 + }, + { + "epoch": 0.39968, + "grad_norm": 0.6364319841259414, + "learning_rate": 4.905964464389282e-06, + "loss": 0.3173280954360962, + "memory(GiB)": 41.86, + "step": 1249, + "token_acc": 0.9445591271011501, + "train_speed(iter/s)": 0.241768 + }, + { + "epoch": 0.4, + "grad_norm": 0.9165513277227055, + "learning_rate": 4.90572472101063e-06, + "loss": 0.3990221619606018, + "memory(GiB)": 54.25, + "step": 1250, + "token_acc": 0.8329662261380323, + "train_speed(iter/s)": 0.241736 + }, + { + "epoch": 0.40032, + "grad_norm": 0.737797033455468, + "learning_rate": 4.9054846782812696e-06, + "loss": 0.35373908281326294, + "memory(GiB)": 54.25, + "step": 1251, + "token_acc": 0.9015907252628741, + "train_speed(iter/s)": 0.241747 + }, + { + "epoch": 0.40064, + "grad_norm": 0.627550916851112, + "learning_rate": 4.90524433623107e-06, + "loss": 0.41584205627441406, + "memory(GiB)": 54.25, + "step": 1252, + "token_acc": 0.8907161803713528, + "train_speed(iter/s)": 0.241749 + }, + { + "epoch": 0.40096, + "grad_norm": 0.7021528239286693, + "learning_rate": 4.905003694889937e-06, + "loss": 0.44098883867263794, + "memory(GiB)": 54.25, + "step": 1253, + "token_acc": 0.9196217494089834, + "train_speed(iter/s)": 0.241745 + }, + { + "epoch": 0.40128, + "grad_norm": 0.6817692052331, + "learning_rate": 4.904762754287815e-06, + "loss": 0.383311927318573, + "memory(GiB)": 54.25, + "step": 1254, + "token_acc": 0.851401179941003, + "train_speed(iter/s)": 0.241755 + }, + { + "epoch": 0.4016, + "grad_norm": 0.6982658539307417, + "learning_rate": 4.904521514454686e-06, + "loss": 0.37457275390625, + "memory(GiB)": 54.25, + "step": 1255, + "token_acc": 0.8813305364099491, + "train_speed(iter/s)": 0.241763 + }, + { + "epoch": 0.40192, + "grad_norm": 0.6507511703763804, + "learning_rate": 4.904279975420565e-06, + "loss": 0.41099709272384644, + "memory(GiB)": 54.25, + "step": 1256, + "token_acc": 0.8266789753057927, + "train_speed(iter/s)": 0.241753 + }, + { + "epoch": 0.40224, + "grad_norm": 0.6524990256219668, + "learning_rate": 4.90403813721551e-06, + "loss": 0.3188368082046509, + "memory(GiB)": 54.25, + "step": 1257, + "token_acc": 0.8872517616912236, + "train_speed(iter/s)": 0.24176 + }, + { + "epoch": 0.40256, + "grad_norm": 0.6529861659047583, + "learning_rate": 4.903795999869612e-06, + "loss": 0.3554418385028839, + "memory(GiB)": 54.25, + "step": 1258, + "token_acc": 0.911710606989577, + "train_speed(iter/s)": 0.241766 + }, + { + "epoch": 0.40288, + "grad_norm": 0.6361371541720696, + "learning_rate": 4.903553563413002e-06, + "loss": 0.34924161434173584, + "memory(GiB)": 54.25, + "step": 1259, + "token_acc": 0.9198813056379822, + "train_speed(iter/s)": 0.241772 + }, + { + "epoch": 0.4032, + "grad_norm": 0.6646010876247846, + "learning_rate": 4.903310827875846e-06, + "loss": 0.3619754910469055, + "memory(GiB)": 54.25, + "step": 1260, + "token_acc": 0.9065196548418025, + "train_speed(iter/s)": 0.241781 + }, + { + "epoch": 0.40352, + "grad_norm": 0.7414888179473273, + "learning_rate": 4.903067793288349e-06, + "loss": 0.37907034158706665, + "memory(GiB)": 54.25, + "step": 1261, + "token_acc": 0.9004001778568252, + "train_speed(iter/s)": 0.241788 + }, + { + "epoch": 0.40384, + "grad_norm": 0.6765763886977797, + "learning_rate": 4.9028244596807525e-06, + "loss": 0.3840641975402832, + "memory(GiB)": 54.25, + "step": 1262, + "token_acc": 0.8237831176833025, + "train_speed(iter/s)": 0.241786 + }, + { + "epoch": 0.40416, + "grad_norm": 0.6129379509479677, + "learning_rate": 4.902580827083334e-06, + "loss": 0.31380969285964966, + "memory(GiB)": 54.25, + "step": 1263, + "token_acc": 0.9355913381454747, + "train_speed(iter/s)": 0.241787 + }, + { + "epoch": 0.40448, + "grad_norm": 0.710066685867633, + "learning_rate": 4.902336895526411e-06, + "loss": 0.3545820116996765, + "memory(GiB)": 54.25, + "step": 1264, + "token_acc": 0.9085012740328932, + "train_speed(iter/s)": 0.241795 + }, + { + "epoch": 0.4048, + "grad_norm": 0.6859696976861693, + "learning_rate": 4.902092665040334e-06, + "loss": 0.42846930027008057, + "memory(GiB)": 54.25, + "step": 1265, + "token_acc": 0.8158686730506156, + "train_speed(iter/s)": 0.241792 + }, + { + "epoch": 0.40512, + "grad_norm": 0.673405372761433, + "learning_rate": 4.901848135655497e-06, + "loss": 0.3189171850681305, + "memory(GiB)": 54.25, + "step": 1266, + "token_acc": 0.8818691588785047, + "train_speed(iter/s)": 0.241793 + }, + { + "epoch": 0.40544, + "grad_norm": 0.691116898447295, + "learning_rate": 4.901603307402324e-06, + "loss": 0.3380736708641052, + "memory(GiB)": 54.25, + "step": 1267, + "token_acc": 0.9290806754221388, + "train_speed(iter/s)": 0.241792 + }, + { + "epoch": 0.40576, + "grad_norm": 0.6561264355030649, + "learning_rate": 4.901358180311282e-06, + "loss": 0.3639150857925415, + "memory(GiB)": 54.25, + "step": 1268, + "token_acc": 0.926549623790756, + "train_speed(iter/s)": 0.241784 + }, + { + "epoch": 0.40608, + "grad_norm": 0.7397625833880722, + "learning_rate": 4.901112754412871e-06, + "loss": 0.48749417066574097, + "memory(GiB)": 54.25, + "step": 1269, + "token_acc": 0.905076679005817, + "train_speed(iter/s)": 0.241784 + }, + { + "epoch": 0.4064, + "grad_norm": 0.6176811249376831, + "learning_rate": 4.900867029737631e-06, + "loss": 0.3970845937728882, + "memory(GiB)": 54.25, + "step": 1270, + "token_acc": 0.847036328871893, + "train_speed(iter/s)": 0.241769 + }, + { + "epoch": 0.40672, + "grad_norm": 0.7071864186200545, + "learning_rate": 4.900621006316138e-06, + "loss": 0.38648155331611633, + "memory(GiB)": 54.25, + "step": 1271, + "token_acc": 0.8641402423304976, + "train_speed(iter/s)": 0.241773 + }, + { + "epoch": 0.40704, + "grad_norm": 0.7597324346283704, + "learning_rate": 4.900374684179005e-06, + "loss": 0.42800837755203247, + "memory(GiB)": 54.25, + "step": 1272, + "token_acc": 0.8444615978386724, + "train_speed(iter/s)": 0.241754 + }, + { + "epoch": 0.40736, + "grad_norm": 0.635838698864964, + "learning_rate": 4.900128063356883e-06, + "loss": 0.4137975871562958, + "memory(GiB)": 54.25, + "step": 1273, + "token_acc": 0.8675443310048361, + "train_speed(iter/s)": 0.24175 + }, + { + "epoch": 0.40768, + "grad_norm": 0.7121970266140076, + "learning_rate": 4.89988114388046e-06, + "loss": 0.4319247603416443, + "memory(GiB)": 54.25, + "step": 1274, + "token_acc": 0.874356333676622, + "train_speed(iter/s)": 0.241744 + }, + { + "epoch": 0.408, + "grad_norm": 0.6497283912080786, + "learning_rate": 4.899633925780459e-06, + "loss": 0.3982517719268799, + "memory(GiB)": 54.25, + "step": 1275, + "token_acc": 0.8873112463376155, + "train_speed(iter/s)": 0.241752 + }, + { + "epoch": 0.40832, + "grad_norm": 0.6549022941167305, + "learning_rate": 4.899386409087644e-06, + "loss": 0.35507720708847046, + "memory(GiB)": 54.25, + "step": 1276, + "token_acc": 0.8572237960339943, + "train_speed(iter/s)": 0.241755 + }, + { + "epoch": 0.40864, + "grad_norm": 0.6313690332398607, + "learning_rate": 4.899138593832815e-06, + "loss": 0.3065604269504547, + "memory(GiB)": 54.25, + "step": 1277, + "token_acc": 0.9199124726477024, + "train_speed(iter/s)": 0.241752 + }, + { + "epoch": 0.40896, + "grad_norm": 0.627005255118462, + "learning_rate": 4.898890480046805e-06, + "loss": 0.37982720136642456, + "memory(GiB)": 54.25, + "step": 1278, + "token_acc": 0.877572448551029, + "train_speed(iter/s)": 0.241753 + }, + { + "epoch": 0.40928, + "grad_norm": 0.625771566915828, + "learning_rate": 4.89864206776049e-06, + "loss": 0.37364453077316284, + "memory(GiB)": 54.25, + "step": 1279, + "token_acc": 0.9465668559628291, + "train_speed(iter/s)": 0.241753 + }, + { + "epoch": 0.4096, + "grad_norm": 0.6081702196108284, + "learning_rate": 4.8983933570047806e-06, + "loss": 0.39983755350112915, + "memory(GiB)": 54.25, + "step": 1280, + "token_acc": 0.9480954374215153, + "train_speed(iter/s)": 0.241743 + }, + { + "epoch": 0.40992, + "grad_norm": 0.6003866736523666, + "learning_rate": 4.898144347810623e-06, + "loss": 0.3459395170211792, + "memory(GiB)": 54.25, + "step": 1281, + "token_acc": 0.9392789373814042, + "train_speed(iter/s)": 0.241742 + }, + { + "epoch": 0.41024, + "grad_norm": 0.6441754691657811, + "learning_rate": 4.897895040209003e-06, + "loss": 0.3157588243484497, + "memory(GiB)": 54.25, + "step": 1282, + "token_acc": 0.9266409266409267, + "train_speed(iter/s)": 0.241743 + }, + { + "epoch": 0.41056, + "grad_norm": 0.705399238988508, + "learning_rate": 4.8976454342309425e-06, + "loss": 0.4245535135269165, + "memory(GiB)": 54.25, + "step": 1283, + "token_acc": 0.8266242937853108, + "train_speed(iter/s)": 0.241744 + }, + { + "epoch": 0.41088, + "grad_norm": 0.6492565073619292, + "learning_rate": 4.8973955299075e-06, + "loss": 0.41515523195266724, + "memory(GiB)": 54.25, + "step": 1284, + "token_acc": 0.8345938033666748, + "train_speed(iter/s)": 0.241735 + }, + { + "epoch": 0.4112, + "grad_norm": 0.6405969355880544, + "learning_rate": 4.897145327269773e-06, + "loss": 0.3403509259223938, + "memory(GiB)": 54.25, + "step": 1285, + "token_acc": 0.865297833935018, + "train_speed(iter/s)": 0.241733 + }, + { + "epoch": 0.41152, + "grad_norm": 0.688385763050708, + "learning_rate": 4.896894826348895e-06, + "loss": 0.4499348998069763, + "memory(GiB)": 54.25, + "step": 1286, + "token_acc": 0.8558012723417147, + "train_speed(iter/s)": 0.241731 + }, + { + "epoch": 0.41184, + "grad_norm": 0.636303979395194, + "learning_rate": 4.896644027176034e-06, + "loss": 0.3668064475059509, + "memory(GiB)": 54.25, + "step": 1287, + "token_acc": 0.8994315697420201, + "train_speed(iter/s)": 0.241711 + }, + { + "epoch": 0.41216, + "grad_norm": 0.622575566146382, + "learning_rate": 4.896392929782401e-06, + "loss": 0.3714810013771057, + "memory(GiB)": 54.25, + "step": 1288, + "token_acc": 0.8903148528405201, + "train_speed(iter/s)": 0.241709 + }, + { + "epoch": 0.41248, + "grad_norm": 0.7370857702254667, + "learning_rate": 4.896141534199239e-06, + "loss": 0.40914779901504517, + "memory(GiB)": 54.25, + "step": 1289, + "token_acc": 0.8649052841475573, + "train_speed(iter/s)": 0.241708 + }, + { + "epoch": 0.4128, + "grad_norm": 0.629535190598867, + "learning_rate": 4.895889840457829e-06, + "loss": 0.44377973675727844, + "memory(GiB)": 54.25, + "step": 1290, + "token_acc": 0.8575321136629038, + "train_speed(iter/s)": 0.2417 + }, + { + "epoch": 0.41312, + "grad_norm": 0.6838835517234205, + "learning_rate": 4.895637848589491e-06, + "loss": 0.3481270372867584, + "memory(GiB)": 54.25, + "step": 1291, + "token_acc": 0.8770240700218819, + "train_speed(iter/s)": 0.241703 + }, + { + "epoch": 0.41344, + "grad_norm": 0.6510539302712932, + "learning_rate": 4.895385558625581e-06, + "loss": 0.40787768363952637, + "memory(GiB)": 54.25, + "step": 1292, + "token_acc": 0.9059978954752719, + "train_speed(iter/s)": 0.241705 + }, + { + "epoch": 0.41376, + "grad_norm": 0.6444807679930352, + "learning_rate": 4.895132970597493e-06, + "loss": 0.3275423049926758, + "memory(GiB)": 54.25, + "step": 1293, + "token_acc": 0.8946264744429882, + "train_speed(iter/s)": 0.241712 + }, + { + "epoch": 0.41408, + "grad_norm": 0.6957545316830331, + "learning_rate": 4.894880084536655e-06, + "loss": 0.3324955701828003, + "memory(GiB)": 54.25, + "step": 1294, + "token_acc": 0.891290527654164, + "train_speed(iter/s)": 0.241717 + }, + { + "epoch": 0.4144, + "grad_norm": 0.7299550298580185, + "learning_rate": 4.894626900474535e-06, + "loss": 0.4040944576263428, + "memory(GiB)": 54.25, + "step": 1295, + "token_acc": 0.9099471628761774, + "train_speed(iter/s)": 0.241725 + }, + { + "epoch": 0.41472, + "grad_norm": 0.7101258840617345, + "learning_rate": 4.894373418442639e-06, + "loss": 0.3955199420452118, + "memory(GiB)": 54.25, + "step": 1296, + "token_acc": 0.8971014492753623, + "train_speed(iter/s)": 0.241697 + }, + { + "epoch": 0.41504, + "grad_norm": 0.6619105282006725, + "learning_rate": 4.894119638472507e-06, + "loss": 0.37354910373687744, + "memory(GiB)": 54.25, + "step": 1297, + "token_acc": 0.9166142227816236, + "train_speed(iter/s)": 0.241702 + }, + { + "epoch": 0.41536, + "grad_norm": 0.6147724952701049, + "learning_rate": 4.893865560595718e-06, + "loss": 0.358009397983551, + "memory(GiB)": 54.25, + "step": 1298, + "token_acc": 0.8949063231850117, + "train_speed(iter/s)": 0.241698 + }, + { + "epoch": 0.41568, + "grad_norm": 0.6553311718461237, + "learning_rate": 4.893611184843886e-06, + "loss": 0.33957356214523315, + "memory(GiB)": 54.25, + "step": 1299, + "token_acc": 0.9161269430051814, + "train_speed(iter/s)": 0.241707 + }, + { + "epoch": 0.416, + "grad_norm": 0.7956429190939939, + "learning_rate": 4.893356511248666e-06, + "loss": 0.3854532241821289, + "memory(GiB)": 54.25, + "step": 1300, + "token_acc": 0.8570826306913997, + "train_speed(iter/s)": 0.241714 + }, + { + "epoch": 0.41632, + "grad_norm": 0.7251902712436933, + "learning_rate": 4.893101539841746e-06, + "loss": 0.5117073059082031, + "memory(GiB)": 54.25, + "step": 1301, + "token_acc": 0.7856827220503756, + "train_speed(iter/s)": 0.241706 + }, + { + "epoch": 0.41664, + "grad_norm": 0.6976432920541932, + "learning_rate": 4.892846270654854e-06, + "loss": 0.36443769931793213, + "memory(GiB)": 54.25, + "step": 1302, + "token_acc": 0.9099442651804048, + "train_speed(iter/s)": 0.241716 + }, + { + "epoch": 0.41696, + "grad_norm": 0.6417589035018723, + "learning_rate": 4.892590703719754e-06, + "loss": 0.375042587518692, + "memory(GiB)": 54.25, + "step": 1303, + "token_acc": 0.9102256361017763, + "train_speed(iter/s)": 0.241722 + }, + { + "epoch": 0.41728, + "grad_norm": 0.7903217463976845, + "learning_rate": 4.892334839068245e-06, + "loss": 0.26590147614479065, + "memory(GiB)": 54.25, + "step": 1304, + "token_acc": 0.9175475687103594, + "train_speed(iter/s)": 0.241732 + }, + { + "epoch": 0.4176, + "grad_norm": 0.6913579017355007, + "learning_rate": 4.892078676732167e-06, + "loss": 0.3165406882762909, + "memory(GiB)": 54.25, + "step": 1305, + "token_acc": 0.9093439363817097, + "train_speed(iter/s)": 0.241742 + }, + { + "epoch": 0.41792, + "grad_norm": 0.6754529121849585, + "learning_rate": 4.891822216743393e-06, + "loss": 0.3947480320930481, + "memory(GiB)": 54.25, + "step": 1306, + "token_acc": 0.9085529854760624, + "train_speed(iter/s)": 0.241735 + }, + { + "epoch": 0.41824, + "grad_norm": 0.6407465899126465, + "learning_rate": 4.891565459133837e-06, + "loss": 0.45267099142074585, + "memory(GiB)": 54.25, + "step": 1307, + "token_acc": 0.8574519880984582, + "train_speed(iter/s)": 0.241732 + }, + { + "epoch": 0.41856, + "grad_norm": 0.6242970813827109, + "learning_rate": 4.891308403935446e-06, + "loss": 0.35364830493927, + "memory(GiB)": 54.25, + "step": 1308, + "token_acc": 0.9550438596491229, + "train_speed(iter/s)": 0.241723 + }, + { + "epoch": 0.41888, + "grad_norm": 0.6393041293301966, + "learning_rate": 4.891051051180208e-06, + "loss": 0.32948967814445496, + "memory(GiB)": 54.25, + "step": 1309, + "token_acc": 0.930955497382199, + "train_speed(iter/s)": 0.241726 + }, + { + "epoch": 0.4192, + "grad_norm": 0.7121487740748008, + "learning_rate": 4.890793400900146e-06, + "loss": 0.432616651058197, + "memory(GiB)": 54.25, + "step": 1310, + "token_acc": 0.8199837089329352, + "train_speed(iter/s)": 0.241726 + }, + { + "epoch": 0.41952, + "grad_norm": 0.6257557247969497, + "learning_rate": 4.890535453127318e-06, + "loss": 0.388999342918396, + "memory(GiB)": 54.25, + "step": 1311, + "token_acc": 0.9109102646868947, + "train_speed(iter/s)": 0.241717 + }, + { + "epoch": 0.41984, + "grad_norm": 0.6754626932627489, + "learning_rate": 4.890277207893823e-06, + "loss": 0.2897656559944153, + "memory(GiB)": 54.25, + "step": 1312, + "token_acc": 0.9165664823274826, + "train_speed(iter/s)": 0.241723 + }, + { + "epoch": 0.42016, + "grad_norm": 0.6532311067131089, + "learning_rate": 4.890018665231794e-06, + "loss": 0.3545244336128235, + "memory(GiB)": 54.25, + "step": 1313, + "token_acc": 0.9062611806797853, + "train_speed(iter/s)": 0.241717 + }, + { + "epoch": 0.42048, + "grad_norm": 1.2123928083988964, + "learning_rate": 4.889759825173403e-06, + "loss": 0.3855384588241577, + "memory(GiB)": 54.25, + "step": 1314, + "token_acc": 0.9325173668541185, + "train_speed(iter/s)": 0.241721 + }, + { + "epoch": 0.4208, + "grad_norm": 0.6384937154662139, + "learning_rate": 4.889500687750859e-06, + "loss": 0.3532355725765228, + "memory(GiB)": 54.25, + "step": 1315, + "token_acc": 0.8454649827784156, + "train_speed(iter/s)": 0.241725 + }, + { + "epoch": 0.42112, + "grad_norm": 0.6766427851222891, + "learning_rate": 4.8892412529964045e-06, + "loss": 0.3886632025241852, + "memory(GiB)": 54.25, + "step": 1316, + "token_acc": 0.9055459272097054, + "train_speed(iter/s)": 0.241733 + }, + { + "epoch": 0.42144, + "grad_norm": 0.6876635572878447, + "learning_rate": 4.888981520942324e-06, + "loss": 0.4070173501968384, + "memory(GiB)": 54.25, + "step": 1317, + "token_acc": 0.8337581937363437, + "train_speed(iter/s)": 0.241733 + }, + { + "epoch": 0.42176, + "grad_norm": 0.7141017033517059, + "learning_rate": 4.888721491620936e-06, + "loss": 0.4543421268463135, + "memory(GiB)": 54.25, + "step": 1318, + "token_acc": 0.8679823069071113, + "train_speed(iter/s)": 0.241741 + }, + { + "epoch": 0.42208, + "grad_norm": 0.6817767844070617, + "learning_rate": 4.888461165064596e-06, + "loss": 0.4173312783241272, + "memory(GiB)": 54.25, + "step": 1319, + "token_acc": 0.8495924270312911, + "train_speed(iter/s)": 0.241739 + }, + { + "epoch": 0.4224, + "grad_norm": 0.6726990538624524, + "learning_rate": 4.888200541305698e-06, + "loss": 0.34635308384895325, + "memory(GiB)": 54.25, + "step": 1320, + "token_acc": 0.914006327293644, + "train_speed(iter/s)": 0.241741 + }, + { + "epoch": 0.42272, + "grad_norm": 0.6149293574233768, + "learning_rate": 4.887939620376671e-06, + "loss": 0.4448007643222809, + "memory(GiB)": 54.25, + "step": 1321, + "token_acc": 0.9057883264713006, + "train_speed(iter/s)": 0.241732 + }, + { + "epoch": 0.42304, + "grad_norm": 0.6827509473641352, + "learning_rate": 4.887678402309984e-06, + "loss": 0.3387151062488556, + "memory(GiB)": 54.25, + "step": 1322, + "token_acc": 0.9044358440011908, + "train_speed(iter/s)": 0.241739 + }, + { + "epoch": 0.42336, + "grad_norm": 0.7921684084468664, + "learning_rate": 4.887416887138139e-06, + "loss": 0.4071877896785736, + "memory(GiB)": 54.25, + "step": 1323, + "token_acc": 0.8978193146417446, + "train_speed(iter/s)": 0.241743 + }, + { + "epoch": 0.42368, + "grad_norm": 0.8693170541591382, + "learning_rate": 4.887155074893677e-06, + "loss": 0.4522135257720947, + "memory(GiB)": 54.25, + "step": 1324, + "token_acc": 0.8634380453752182, + "train_speed(iter/s)": 0.241739 + }, + { + "epoch": 0.424, + "grad_norm": 2.9542991947574455, + "learning_rate": 4.886892965609179e-06, + "loss": 0.3745822310447693, + "memory(GiB)": 54.25, + "step": 1325, + "token_acc": 0.8757489300998573, + "train_speed(iter/s)": 0.241738 + }, + { + "epoch": 0.42432, + "grad_norm": 0.7356233075442105, + "learning_rate": 4.886630559317256e-06, + "loss": 0.40559709072113037, + "memory(GiB)": 54.25, + "step": 1326, + "token_acc": 0.8984529948433162, + "train_speed(iter/s)": 0.241745 + }, + { + "epoch": 0.42464, + "grad_norm": 0.7786860003507864, + "learning_rate": 4.8863678560505626e-06, + "loss": 0.3849676251411438, + "memory(GiB)": 54.25, + "step": 1327, + "token_acc": 0.9595861623019722, + "train_speed(iter/s)": 0.241745 + }, + { + "epoch": 0.42496, + "grad_norm": 0.6749504488093332, + "learning_rate": 4.8861048558417865e-06, + "loss": 0.38850563764572144, + "memory(GiB)": 54.25, + "step": 1328, + "token_acc": 0.8091386095321381, + "train_speed(iter/s)": 0.241738 + }, + { + "epoch": 0.42528, + "grad_norm": 0.6155887509908604, + "learning_rate": 4.885841558723654e-06, + "loss": 0.3630657196044922, + "memory(GiB)": 54.25, + "step": 1329, + "token_acc": 0.8816952678107124, + "train_speed(iter/s)": 0.24174 + }, + { + "epoch": 0.4256, + "grad_norm": 0.6690164081070308, + "learning_rate": 4.885577964728928e-06, + "loss": 0.44777727127075195, + "memory(GiB)": 54.25, + "step": 1330, + "token_acc": 0.9311475409836065, + "train_speed(iter/s)": 0.241724 + }, + { + "epoch": 0.42592, + "grad_norm": 0.6900324027023143, + "learning_rate": 4.885314073890408e-06, + "loss": 0.40067026019096375, + "memory(GiB)": 54.25, + "step": 1331, + "token_acc": 0.8907611853573504, + "train_speed(iter/s)": 0.241728 + }, + { + "epoch": 0.42624, + "grad_norm": 0.7304230246566736, + "learning_rate": 4.88504988624093e-06, + "loss": 0.37547188997268677, + "memory(GiB)": 54.25, + "step": 1332, + "token_acc": 0.9328941951420338, + "train_speed(iter/s)": 0.241729 + }, + { + "epoch": 0.42656, + "grad_norm": 0.7708386430270687, + "learning_rate": 4.884785401813368e-06, + "loss": 0.4293668270111084, + "memory(GiB)": 54.25, + "step": 1333, + "token_acc": 0.8931380526173041, + "train_speed(iter/s)": 0.241732 + }, + { + "epoch": 0.42688, + "grad_norm": 0.704622487415475, + "learning_rate": 4.8845206206406324e-06, + "loss": 0.40498292446136475, + "memory(GiB)": 54.25, + "step": 1334, + "token_acc": 0.8460053101617185, + "train_speed(iter/s)": 0.24174 + }, + { + "epoch": 0.4272, + "grad_norm": 0.6097909135926494, + "learning_rate": 4.884255542755672e-06, + "loss": 0.3631764352321625, + "memory(GiB)": 54.25, + "step": 1335, + "token_acc": 0.9273550334123734, + "train_speed(iter/s)": 0.241744 + }, + { + "epoch": 0.42752, + "grad_norm": 0.6782029921510473, + "learning_rate": 4.883990168191469e-06, + "loss": 0.3965047299861908, + "memory(GiB)": 54.25, + "step": 1336, + "token_acc": 0.9194856024601622, + "train_speed(iter/s)": 0.241753 + }, + { + "epoch": 0.42784, + "grad_norm": 0.6916957224416845, + "learning_rate": 4.883724496981046e-06, + "loss": 0.3426264226436615, + "memory(GiB)": 54.25, + "step": 1337, + "token_acc": 0.8814493895234344, + "train_speed(iter/s)": 0.241747 + }, + { + "epoch": 0.42816, + "grad_norm": 0.6374334814171009, + "learning_rate": 4.88345852915746e-06, + "loss": 0.4270450174808502, + "memory(GiB)": 54.25, + "step": 1338, + "token_acc": 0.8339589950909616, + "train_speed(iter/s)": 0.241747 + }, + { + "epoch": 0.42848, + "grad_norm": 0.9303860754170122, + "learning_rate": 4.883192264753808e-06, + "loss": 0.3680263161659241, + "memory(GiB)": 54.25, + "step": 1339, + "token_acc": 0.8525641025641025, + "train_speed(iter/s)": 0.241744 + }, + { + "epoch": 0.4288, + "grad_norm": 0.6230151742431499, + "learning_rate": 4.88292570380322e-06, + "loss": 0.32483208179473877, + "memory(GiB)": 54.25, + "step": 1340, + "token_acc": 0.9105827193569993, + "train_speed(iter/s)": 0.241744 + }, + { + "epoch": 0.42912, + "grad_norm": 0.9671569387834574, + "learning_rate": 4.8826588463388656e-06, + "loss": 0.3627716898918152, + "memory(GiB)": 54.25, + "step": 1341, + "token_acc": 0.9321876451463075, + "train_speed(iter/s)": 0.241753 + }, + { + "epoch": 0.42944, + "grad_norm": 0.6262835190342954, + "learning_rate": 4.882391692393952e-06, + "loss": 0.336712121963501, + "memory(GiB)": 54.25, + "step": 1342, + "token_acc": 0.9130917981773727, + "train_speed(iter/s)": 0.241745 + }, + { + "epoch": 0.42976, + "grad_norm": 0.6431543008111696, + "learning_rate": 4.882124242001719e-06, + "loss": 0.34673011302948, + "memory(GiB)": 54.25, + "step": 1343, + "token_acc": 0.8947157726180944, + "train_speed(iter/s)": 0.241748 + }, + { + "epoch": 0.43008, + "grad_norm": 0.6376212985474835, + "learning_rate": 4.881856495195449e-06, + "loss": 0.32342901825904846, + "memory(GiB)": 54.25, + "step": 1344, + "token_acc": 0.8936904037330087, + "train_speed(iter/s)": 0.241739 + }, + { + "epoch": 0.4304, + "grad_norm": 0.776995995729355, + "learning_rate": 4.881588452008457e-06, + "loss": 0.37168586254119873, + "memory(GiB)": 54.25, + "step": 1345, + "token_acc": 0.8378859857482185, + "train_speed(iter/s)": 0.241749 + }, + { + "epoch": 0.43072, + "grad_norm": 0.7035786906633952, + "learning_rate": 4.8813201124740965e-06, + "loss": 0.3587205708026886, + "memory(GiB)": 54.25, + "step": 1346, + "token_acc": 0.889165186500888, + "train_speed(iter/s)": 0.241758 + }, + { + "epoch": 0.43104, + "grad_norm": 0.685779909253052, + "learning_rate": 4.881051476625757e-06, + "loss": 0.4495074152946472, + "memory(GiB)": 54.25, + "step": 1347, + "token_acc": 0.8226822682268227, + "train_speed(iter/s)": 0.241765 + }, + { + "epoch": 0.43136, + "grad_norm": 0.7058207515548859, + "learning_rate": 4.880782544496867e-06, + "loss": 0.36446863412857056, + "memory(GiB)": 54.25, + "step": 1348, + "token_acc": 0.9193363844393593, + "train_speed(iter/s)": 0.241767 + }, + { + "epoch": 0.43168, + "grad_norm": 0.6653790284935017, + "learning_rate": 4.880513316120889e-06, + "loss": 0.4173201322555542, + "memory(GiB)": 54.25, + "step": 1349, + "token_acc": 0.9273522975929979, + "train_speed(iter/s)": 0.24177 + }, + { + "epoch": 0.432, + "grad_norm": 0.6590010841135678, + "learning_rate": 4.8802437915313256e-06, + "loss": 0.32026103138923645, + "memory(GiB)": 54.25, + "step": 1350, + "token_acc": 0.8931171409662475, + "train_speed(iter/s)": 0.241776 + }, + { + "epoch": 0.43232, + "grad_norm": 0.6653377196723516, + "learning_rate": 4.879973970761713e-06, + "loss": 0.4857190251350403, + "memory(GiB)": 54.25, + "step": 1351, + "token_acc": 0.8658346333853354, + "train_speed(iter/s)": 0.241763 + }, + { + "epoch": 0.43264, + "grad_norm": 0.643701279316601, + "learning_rate": 4.8797038538456255e-06, + "loss": 0.4505487382411957, + "memory(GiB)": 54.25, + "step": 1352, + "token_acc": 0.840297121634169, + "train_speed(iter/s)": 0.241766 + }, + { + "epoch": 0.43296, + "grad_norm": 0.7072956786410945, + "learning_rate": 4.879433440816676e-06, + "loss": 0.3793249726295471, + "memory(GiB)": 54.25, + "step": 1353, + "token_acc": 0.9338877338877339, + "train_speed(iter/s)": 0.241768 + }, + { + "epoch": 0.43328, + "grad_norm": 0.6548441343549255, + "learning_rate": 4.879162731708511e-06, + "loss": 0.3847993016242981, + "memory(GiB)": 54.25, + "step": 1354, + "token_acc": 0.9567930868939031, + "train_speed(iter/s)": 0.241773 + }, + { + "epoch": 0.4336, + "grad_norm": 0.7276207618472077, + "learning_rate": 4.8788917265548174e-06, + "loss": 0.39764106273651123, + "memory(GiB)": 54.25, + "step": 1355, + "token_acc": 0.8936260025327142, + "train_speed(iter/s)": 0.241783 + }, + { + "epoch": 0.43392, + "grad_norm": 0.5988014696239595, + "learning_rate": 4.878620425389316e-06, + "loss": 0.4102374017238617, + "memory(GiB)": 54.25, + "step": 1356, + "token_acc": 0.8941371071494211, + "train_speed(iter/s)": 0.241767 + }, + { + "epoch": 0.43424, + "grad_norm": 0.7019136326631011, + "learning_rate": 4.878348828245764e-06, + "loss": 0.390442430973053, + "memory(GiB)": 54.25, + "step": 1357, + "token_acc": 0.9356955380577427, + "train_speed(iter/s)": 0.24177 + }, + { + "epoch": 0.43456, + "grad_norm": 0.6300566115884247, + "learning_rate": 4.87807693515796e-06, + "loss": 0.4639202654361725, + "memory(GiB)": 54.25, + "step": 1358, + "token_acc": 0.8793208004851425, + "train_speed(iter/s)": 0.241764 + }, + { + "epoch": 0.43488, + "grad_norm": 0.6481040854266029, + "learning_rate": 4.877804746159734e-06, + "loss": 0.36727380752563477, + "memory(GiB)": 54.25, + "step": 1359, + "token_acc": 0.8824557570461, + "train_speed(iter/s)": 0.241757 + }, + { + "epoch": 0.4352, + "grad_norm": 0.7178941696898775, + "learning_rate": 4.877532261284957e-06, + "loss": 0.41963109374046326, + "memory(GiB)": 54.25, + "step": 1360, + "token_acc": 0.8943488943488943, + "train_speed(iter/s)": 0.241765 + }, + { + "epoch": 0.43552, + "grad_norm": 0.6821774169619288, + "learning_rate": 4.877259480567533e-06, + "loss": 0.3929774761199951, + "memory(GiB)": 54.25, + "step": 1361, + "token_acc": 0.870832239432922, + "train_speed(iter/s)": 0.241765 + }, + { + "epoch": 0.43584, + "grad_norm": 0.683295876338886, + "learning_rate": 4.876986404041406e-06, + "loss": 0.33743149042129517, + "memory(GiB)": 54.25, + "step": 1362, + "token_acc": 0.8763050787471244, + "train_speed(iter/s)": 0.241766 + }, + { + "epoch": 0.43616, + "grad_norm": 0.6864854894073937, + "learning_rate": 4.8767130317405564e-06, + "loss": 0.33623820543289185, + "memory(GiB)": 54.25, + "step": 1363, + "token_acc": 0.8551587301587301, + "train_speed(iter/s)": 0.241774 + }, + { + "epoch": 0.43648, + "grad_norm": 0.6968798442310556, + "learning_rate": 4.876439363698999e-06, + "loss": 0.42134207487106323, + "memory(GiB)": 54.25, + "step": 1364, + "token_acc": 0.8449117536612842, + "train_speed(iter/s)": 0.241759 + }, + { + "epoch": 0.4368, + "grad_norm": 0.7123028277605915, + "learning_rate": 4.876165399950789e-06, + "loss": 0.4160800576210022, + "memory(GiB)": 54.25, + "step": 1365, + "token_acc": 0.8751445086705202, + "train_speed(iter/s)": 0.241764 + }, + { + "epoch": 0.43712, + "grad_norm": 0.5869527000099894, + "learning_rate": 4.875891140530014e-06, + "loss": 0.35796743631362915, + "memory(GiB)": 54.25, + "step": 1366, + "token_acc": 0.908835904628331, + "train_speed(iter/s)": 0.241742 + }, + { + "epoch": 0.43744, + "grad_norm": 0.6776948942224121, + "learning_rate": 4.875616585470803e-06, + "loss": 0.40424638986587524, + "memory(GiB)": 54.25, + "step": 1367, + "token_acc": 0.9126625962304221, + "train_speed(iter/s)": 0.241745 + }, + { + "epoch": 0.43776, + "grad_norm": 0.6581718543812415, + "learning_rate": 4.875341734807319e-06, + "loss": 0.34139484167099, + "memory(GiB)": 54.25, + "step": 1368, + "token_acc": 0.9657941701368233, + "train_speed(iter/s)": 0.241745 + }, + { + "epoch": 0.43808, + "grad_norm": 0.6192494889890449, + "learning_rate": 4.875066588573761e-06, + "loss": 0.30496746301651, + "memory(GiB)": 54.25, + "step": 1369, + "token_acc": 0.9213592233009709, + "train_speed(iter/s)": 0.241732 + }, + { + "epoch": 0.4384, + "grad_norm": 0.8148400108243367, + "learning_rate": 4.874791146804367e-06, + "loss": 0.368156373500824, + "memory(GiB)": 54.25, + "step": 1370, + "token_acc": 0.9042227662178702, + "train_speed(iter/s)": 0.241742 + }, + { + "epoch": 0.43872, + "grad_norm": 0.692692565750196, + "learning_rate": 4.874515409533412e-06, + "loss": 0.44202888011932373, + "memory(GiB)": 54.25, + "step": 1371, + "token_acc": 0.8262847965738758, + "train_speed(iter/s)": 0.241734 + }, + { + "epoch": 0.43904, + "grad_norm": 0.6299090368989769, + "learning_rate": 4.874239376795207e-06, + "loss": 0.3335915505886078, + "memory(GiB)": 54.25, + "step": 1372, + "token_acc": 0.9132250580046404, + "train_speed(iter/s)": 0.241739 + }, + { + "epoch": 0.43936, + "grad_norm": 0.6086910217331085, + "learning_rate": 4.873963048624097e-06, + "loss": 0.36554571986198425, + "memory(GiB)": 54.25, + "step": 1373, + "token_acc": 0.8383018867924529, + "train_speed(iter/s)": 0.241739 + }, + { + "epoch": 0.43968, + "grad_norm": 0.7187015918760711, + "learning_rate": 4.873686425054468e-06, + "loss": 0.4994755983352661, + "memory(GiB)": 54.25, + "step": 1374, + "token_acc": 0.9003508771929825, + "train_speed(iter/s)": 0.241742 + }, + { + "epoch": 0.44, + "grad_norm": 0.9578438953893976, + "learning_rate": 4.873409506120741e-06, + "loss": 0.40452802181243896, + "memory(GiB)": 54.25, + "step": 1375, + "token_acc": 0.8866779089376053, + "train_speed(iter/s)": 0.241744 + }, + { + "epoch": 0.44032, + "grad_norm": 0.630047407045523, + "learning_rate": 4.873132291857374e-06, + "loss": 0.432367205619812, + "memory(GiB)": 54.25, + "step": 1376, + "token_acc": 0.8572647142450952, + "train_speed(iter/s)": 0.241744 + }, + { + "epoch": 0.44064, + "grad_norm": 0.6651826273155501, + "learning_rate": 4.87285478229886e-06, + "loss": 0.390887975692749, + "memory(GiB)": 54.25, + "step": 1377, + "token_acc": 0.8797976792621244, + "train_speed(iter/s)": 0.241749 + }, + { + "epoch": 0.44096, + "grad_norm": 0.6492655693883252, + "learning_rate": 4.872576977479732e-06, + "loss": 0.31533730030059814, + "memory(GiB)": 54.25, + "step": 1378, + "token_acc": 0.9054097829608033, + "train_speed(iter/s)": 0.241756 + }, + { + "epoch": 0.44128, + "grad_norm": 1.0382289676680765, + "learning_rate": 4.872298877434557e-06, + "loss": 0.4353906512260437, + "memory(GiB)": 54.25, + "step": 1379, + "token_acc": 0.8958955223880597, + "train_speed(iter/s)": 0.241755 + }, + { + "epoch": 0.4416, + "grad_norm": 0.6111652253950935, + "learning_rate": 4.8720204821979386e-06, + "loss": 0.2878170609474182, + "memory(GiB)": 54.25, + "step": 1380, + "token_acc": 0.9040322580645161, + "train_speed(iter/s)": 0.241761 + }, + { + "epoch": 0.44192, + "grad_norm": 0.6440809698426532, + "learning_rate": 4.871741791804521e-06, + "loss": 0.4502704441547394, + "memory(GiB)": 54.25, + "step": 1381, + "token_acc": 0.9533497018589968, + "train_speed(iter/s)": 0.241764 + }, + { + "epoch": 0.44224, + "grad_norm": 0.6718976875694662, + "learning_rate": 4.87146280628898e-06, + "loss": 0.3850383758544922, + "memory(GiB)": 54.25, + "step": 1382, + "token_acc": 0.8748890860692103, + "train_speed(iter/s)": 0.241769 + }, + { + "epoch": 0.44256, + "grad_norm": 0.6740787531737835, + "learning_rate": 4.871183525686033e-06, + "loss": 0.4799865484237671, + "memory(GiB)": 54.25, + "step": 1383, + "token_acc": 0.7734513274336283, + "train_speed(iter/s)": 0.241775 + }, + { + "epoch": 0.44288, + "grad_norm": 0.6379428482452528, + "learning_rate": 4.870903950030429e-06, + "loss": 0.4027191400527954, + "memory(GiB)": 54.25, + "step": 1384, + "token_acc": 0.9470899470899471, + "train_speed(iter/s)": 0.241773 + }, + { + "epoch": 0.4432, + "grad_norm": 0.6590909772232544, + "learning_rate": 4.8706240793569585e-06, + "loss": 0.2858898639678955, + "memory(GiB)": 54.25, + "step": 1385, + "token_acc": 0.8855829050581553, + "train_speed(iter/s)": 0.241779 + }, + { + "epoch": 0.44352, + "grad_norm": 0.6537595133374319, + "learning_rate": 4.870343913700445e-06, + "loss": 0.45104703307151794, + "memory(GiB)": 54.25, + "step": 1386, + "token_acc": 0.9395861148197597, + "train_speed(iter/s)": 0.241783 + }, + { + "epoch": 0.44384, + "grad_norm": 0.757842911660058, + "learning_rate": 4.87006345309575e-06, + "loss": 0.4908878207206726, + "memory(GiB)": 54.25, + "step": 1387, + "token_acc": 0.8656876790830945, + "train_speed(iter/s)": 0.241792 + }, + { + "epoch": 0.44416, + "grad_norm": 0.752644178388717, + "learning_rate": 4.869782697577773e-06, + "loss": 0.37011563777923584, + "memory(GiB)": 54.25, + "step": 1388, + "token_acc": 0.9490595611285266, + "train_speed(iter/s)": 0.241798 + }, + { + "epoch": 0.44448, + "grad_norm": 0.6532003645317793, + "learning_rate": 4.869501647181449e-06, + "loss": 0.3889579176902771, + "memory(GiB)": 54.25, + "step": 1389, + "token_acc": 0.8910225636101776, + "train_speed(iter/s)": 0.241803 + }, + { + "epoch": 0.4448, + "grad_norm": 0.7214426378114299, + "learning_rate": 4.86922030194175e-06, + "loss": 0.375772088766098, + "memory(GiB)": 54.25, + "step": 1390, + "token_acc": 0.8952193844138834, + "train_speed(iter/s)": 0.241804 + }, + { + "epoch": 0.44512, + "grad_norm": 0.6553689380810623, + "learning_rate": 4.868938661893684e-06, + "loss": 0.4477576017379761, + "memory(GiB)": 54.25, + "step": 1391, + "token_acc": 0.8201898856810695, + "train_speed(iter/s)": 0.241789 + }, + { + "epoch": 0.44544, + "grad_norm": 0.6399429044127155, + "learning_rate": 4.868656727072296e-06, + "loss": 0.29409581422805786, + "memory(GiB)": 54.25, + "step": 1392, + "token_acc": 0.9045092838196287, + "train_speed(iter/s)": 0.241794 + }, + { + "epoch": 0.44576, + "grad_norm": 0.6251744267895303, + "learning_rate": 4.868374497512669e-06, + "loss": 0.33881592750549316, + "memory(GiB)": 54.25, + "step": 1393, + "token_acc": 0.95645821157218, + "train_speed(iter/s)": 0.241797 + }, + { + "epoch": 0.44608, + "grad_norm": 0.6443831560707102, + "learning_rate": 4.868091973249921e-06, + "loss": 0.32782113552093506, + "memory(GiB)": 54.25, + "step": 1394, + "token_acc": 0.8597240473061761, + "train_speed(iter/s)": 0.241803 + }, + { + "epoch": 0.4464, + "grad_norm": 0.6341180388337093, + "learning_rate": 4.867809154319207e-06, + "loss": 0.3600013852119446, + "memory(GiB)": 54.25, + "step": 1395, + "token_acc": 0.9317614424410541, + "train_speed(iter/s)": 0.241811 + }, + { + "epoch": 0.44672, + "grad_norm": 0.7620381680679035, + "learning_rate": 4.867526040755719e-06, + "loss": 0.45745474100112915, + "memory(GiB)": 54.25, + "step": 1396, + "token_acc": 0.9389279437609842, + "train_speed(iter/s)": 0.24182 + }, + { + "epoch": 0.44704, + "grad_norm": 0.6688361618426312, + "learning_rate": 4.867242632594687e-06, + "loss": 0.33844512701034546, + "memory(GiB)": 54.25, + "step": 1397, + "token_acc": 0.8840116279069767, + "train_speed(iter/s)": 0.241823 + }, + { + "epoch": 0.44736, + "grad_norm": 0.6650186372729323, + "learning_rate": 4.866958929871373e-06, + "loss": 0.44632455706596375, + "memory(GiB)": 54.25, + "step": 1398, + "token_acc": 0.8635761589403973, + "train_speed(iter/s)": 0.241824 + }, + { + "epoch": 0.44768, + "grad_norm": 0.6467007295381163, + "learning_rate": 4.8666749326210814e-06, + "loss": 0.4723467230796814, + "memory(GiB)": 54.25, + "step": 1399, + "token_acc": 0.8733905579399142, + "train_speed(iter/s)": 0.241823 + }, + { + "epoch": 0.448, + "grad_norm": 0.5847950426484317, + "learning_rate": 4.86639064087915e-06, + "loss": 0.2702223062515259, + "memory(GiB)": 54.25, + "step": 1400, + "token_acc": 0.9106769016050245, + "train_speed(iter/s)": 0.241831 + }, + { + "epoch": 0.44832, + "grad_norm": 0.6400932130615912, + "learning_rate": 4.866106054680954e-06, + "loss": 0.35586994886398315, + "memory(GiB)": 54.25, + "step": 1401, + "token_acc": 0.8234945705824285, + "train_speed(iter/s)": 0.241835 + }, + { + "epoch": 0.44864, + "grad_norm": 0.6077877688346182, + "learning_rate": 4.865821174061906e-06, + "loss": 0.3937463164329529, + "memory(GiB)": 54.25, + "step": 1402, + "token_acc": 0.8877931769722814, + "train_speed(iter/s)": 0.241833 + }, + { + "epoch": 0.44896, + "grad_norm": 0.6743756791422052, + "learning_rate": 4.8655359990574535e-06, + "loss": 0.43177270889282227, + "memory(GiB)": 54.25, + "step": 1403, + "token_acc": 0.8096820123398196, + "train_speed(iter/s)": 0.241837 + }, + { + "epoch": 0.44928, + "grad_norm": 0.5779646985167436, + "learning_rate": 4.865250529703082e-06, + "loss": 0.4202505052089691, + "memory(GiB)": 54.25, + "step": 1404, + "token_acc": 0.8896658896658897, + "train_speed(iter/s)": 0.241835 + }, + { + "epoch": 0.4496, + "grad_norm": 0.6698168537027995, + "learning_rate": 4.864964766034313e-06, + "loss": 0.41024893522262573, + "memory(GiB)": 54.25, + "step": 1405, + "token_acc": 0.9192499043245312, + "train_speed(iter/s)": 0.241831 + }, + { + "epoch": 0.44992, + "grad_norm": 0.6315902468603016, + "learning_rate": 4.8646787080867045e-06, + "loss": 0.3904609978199005, + "memory(GiB)": 54.25, + "step": 1406, + "token_acc": 0.8261114237478897, + "train_speed(iter/s)": 0.241829 + }, + { + "epoch": 0.45024, + "grad_norm": 0.6545834324005627, + "learning_rate": 4.864392355895851e-06, + "loss": 0.32289209961891174, + "memory(GiB)": 54.25, + "step": 1407, + "token_acc": 0.8791242676534073, + "train_speed(iter/s)": 0.241836 + }, + { + "epoch": 0.45056, + "grad_norm": 0.6407019217077954, + "learning_rate": 4.864105709497386e-06, + "loss": 0.41863322257995605, + "memory(GiB)": 54.25, + "step": 1408, + "token_acc": 0.8626737260092654, + "train_speed(iter/s)": 0.241832 + }, + { + "epoch": 0.45088, + "grad_norm": 0.6601611243138582, + "learning_rate": 4.863818768926977e-06, + "loss": 0.40609967708587646, + "memory(GiB)": 54.25, + "step": 1409, + "token_acc": 0.8688734924300744, + "train_speed(iter/s)": 0.241836 + }, + { + "epoch": 0.4512, + "grad_norm": 0.7174621221962957, + "learning_rate": 4.863531534220327e-06, + "loss": 0.4263560473918915, + "memory(GiB)": 54.25, + "step": 1410, + "token_acc": 0.9122380807774066, + "train_speed(iter/s)": 0.241841 + }, + { + "epoch": 0.45152, + "grad_norm": 0.6198772361611069, + "learning_rate": 4.863244005413179e-06, + "loss": 0.41117462515830994, + "memory(GiB)": 54.25, + "step": 1411, + "token_acc": 0.9384303112313938, + "train_speed(iter/s)": 0.241841 + }, + { + "epoch": 0.45184, + "grad_norm": 0.7828455993448517, + "learning_rate": 4.862956182541312e-06, + "loss": 0.4140855669975281, + "memory(GiB)": 54.25, + "step": 1412, + "token_acc": 0.8926909389542842, + "train_speed(iter/s)": 0.241833 + }, + { + "epoch": 0.45216, + "grad_norm": 0.6365803769316366, + "learning_rate": 4.862668065640538e-06, + "loss": 0.38020676374435425, + "memory(GiB)": 54.25, + "step": 1413, + "token_acc": 0.8582312518673438, + "train_speed(iter/s)": 0.241834 + }, + { + "epoch": 0.45248, + "grad_norm": 0.7173878773917757, + "learning_rate": 4.86237965474671e-06, + "loss": 0.42698922753334045, + "memory(GiB)": 54.25, + "step": 1414, + "token_acc": 0.8704983630411058, + "train_speed(iter/s)": 0.241837 + }, + { + "epoch": 0.4528, + "grad_norm": 0.6741683041942471, + "learning_rate": 4.862090949895714e-06, + "loss": 0.38656336069107056, + "memory(GiB)": 54.25, + "step": 1415, + "token_acc": 0.9337142857142857, + "train_speed(iter/s)": 0.241843 + }, + { + "epoch": 0.45312, + "grad_norm": 0.6370471806657942, + "learning_rate": 4.861801951123477e-06, + "loss": 0.38649970293045044, + "memory(GiB)": 54.25, + "step": 1416, + "token_acc": 0.8512533664802154, + "train_speed(iter/s)": 0.241837 + }, + { + "epoch": 0.45344, + "grad_norm": 0.6224484950736938, + "learning_rate": 4.861512658465957e-06, + "loss": 0.3709070086479187, + "memory(GiB)": 54.25, + "step": 1417, + "token_acc": 0.8943862987630827, + "train_speed(iter/s)": 0.241837 + }, + { + "epoch": 0.45376, + "grad_norm": 0.621045901740749, + "learning_rate": 4.8612230719591535e-06, + "loss": 0.30436834692955017, + "memory(GiB)": 54.25, + "step": 1418, + "token_acc": 0.8833922261484098, + "train_speed(iter/s)": 0.241842 + }, + { + "epoch": 0.45408, + "grad_norm": 0.6907364202268645, + "learning_rate": 4.8609331916391e-06, + "loss": 0.4163115322589874, + "memory(GiB)": 54.25, + "step": 1419, + "token_acc": 0.8853907134767837, + "train_speed(iter/s)": 0.241848 + }, + { + "epoch": 0.4544, + "grad_norm": 0.6682014147669898, + "learning_rate": 4.860643017541866e-06, + "loss": 0.3684418797492981, + "memory(GiB)": 54.25, + "step": 1420, + "token_acc": 0.9164914388705316, + "train_speed(iter/s)": 0.241856 + }, + { + "epoch": 0.45472, + "grad_norm": 0.6387981982425855, + "learning_rate": 4.860352549703561e-06, + "loss": 0.4106927812099457, + "memory(GiB)": 54.25, + "step": 1421, + "token_acc": 0.8775804128660586, + "train_speed(iter/s)": 0.24185 + }, + { + "epoch": 0.45504, + "grad_norm": 0.7421759184381147, + "learning_rate": 4.860061788160325e-06, + "loss": 0.4439920485019684, + "memory(GiB)": 54.25, + "step": 1422, + "token_acc": 0.9016608391608392, + "train_speed(iter/s)": 0.241855 + }, + { + "epoch": 0.45536, + "grad_norm": 0.6762732921973349, + "learning_rate": 4.859770732948342e-06, + "loss": 0.5197451114654541, + "memory(GiB)": 54.25, + "step": 1423, + "token_acc": 0.8647369379209904, + "train_speed(iter/s)": 0.241837 + }, + { + "epoch": 0.45568, + "grad_norm": 0.6780065106167816, + "learning_rate": 4.859479384103827e-06, + "loss": 0.3608526587486267, + "memory(GiB)": 54.25, + "step": 1424, + "token_acc": 0.9382278481012658, + "train_speed(iter/s)": 0.241842 + }, + { + "epoch": 0.456, + "grad_norm": 0.6209295612932021, + "learning_rate": 4.859187741663033e-06, + "loss": 0.4193663001060486, + "memory(GiB)": 54.25, + "step": 1425, + "token_acc": 0.7814110186286167, + "train_speed(iter/s)": 0.241847 + }, + { + "epoch": 0.45632, + "grad_norm": 0.689968943463644, + "learning_rate": 4.858895805662251e-06, + "loss": 0.4211796820163727, + "memory(GiB)": 54.25, + "step": 1426, + "token_acc": 0.8766481101670085, + "train_speed(iter/s)": 0.241849 + }, + { + "epoch": 0.45664, + "grad_norm": 0.6236653297898024, + "learning_rate": 4.858603576137806e-06, + "loss": 0.3624248802661896, + "memory(GiB)": 54.25, + "step": 1427, + "token_acc": 0.8976634131913636, + "train_speed(iter/s)": 0.241851 + }, + { + "epoch": 0.45696, + "grad_norm": 0.6230356018543871, + "learning_rate": 4.858311053126062e-06, + "loss": 0.46036577224731445, + "memory(GiB)": 54.25, + "step": 1428, + "token_acc": 0.873999483604441, + "train_speed(iter/s)": 0.24185 + }, + { + "epoch": 0.45728, + "grad_norm": 0.6502356504031781, + "learning_rate": 4.858018236663418e-06, + "loss": 0.3810836672782898, + "memory(GiB)": 54.25, + "step": 1429, + "token_acc": 0.9039000764720877, + "train_speed(iter/s)": 0.241854 + }, + { + "epoch": 0.4576, + "grad_norm": 0.67277355389728, + "learning_rate": 4.85772512678631e-06, + "loss": 0.37741619348526, + "memory(GiB)": 54.25, + "step": 1430, + "token_acc": 0.8572294634737799, + "train_speed(iter/s)": 0.241863 + }, + { + "epoch": 0.45792, + "grad_norm": 0.7015264804989461, + "learning_rate": 4.857431723531211e-06, + "loss": 0.38409554958343506, + "memory(GiB)": 54.25, + "step": 1431, + "token_acc": 0.9203454894433781, + "train_speed(iter/s)": 0.241866 + }, + { + "epoch": 0.45824, + "grad_norm": 0.6602312344225245, + "learning_rate": 4.857138026934628e-06, + "loss": 0.4160633385181427, + "memory(GiB)": 54.25, + "step": 1432, + "token_acc": 0.9458456973293768, + "train_speed(iter/s)": 0.241864 + }, + { + "epoch": 0.45856, + "grad_norm": 0.6651053958693808, + "learning_rate": 4.856844037033109e-06, + "loss": 0.3292998969554901, + "memory(GiB)": 54.25, + "step": 1433, + "token_acc": 0.9495713565305093, + "train_speed(iter/s)": 0.241861 + }, + { + "epoch": 0.45888, + "grad_norm": 0.6288774590470451, + "learning_rate": 4.856549753863234e-06, + "loss": 0.3308975100517273, + "memory(GiB)": 54.25, + "step": 1434, + "token_acc": 0.9386898096304591, + "train_speed(iter/s)": 0.241861 + }, + { + "epoch": 0.4592, + "grad_norm": 0.6981969093269803, + "learning_rate": 4.856255177461623e-06, + "loss": 0.4663718342781067, + "memory(GiB)": 54.25, + "step": 1435, + "token_acc": 0.8670309653916212, + "train_speed(iter/s)": 0.241857 + }, + { + "epoch": 0.45952, + "grad_norm": 0.6757392435448585, + "learning_rate": 4.855960307864929e-06, + "loss": 0.3273339867591858, + "memory(GiB)": 54.25, + "step": 1436, + "token_acc": 0.8778813143697891, + "train_speed(iter/s)": 0.241857 + }, + { + "epoch": 0.45984, + "grad_norm": 0.6534729529161497, + "learning_rate": 4.855665145109845e-06, + "loss": 0.4435362219810486, + "memory(GiB)": 54.25, + "step": 1437, + "token_acc": 0.9369095816464238, + "train_speed(iter/s)": 0.241829 + }, + { + "epoch": 0.46016, + "grad_norm": 0.6606576754168623, + "learning_rate": 4.855369689233098e-06, + "loss": 0.30629080533981323, + "memory(GiB)": 54.25, + "step": 1438, + "token_acc": 0.9394673123486683, + "train_speed(iter/s)": 0.241838 + }, + { + "epoch": 0.46048, + "grad_norm": 0.6574659112723522, + "learning_rate": 4.855073940271453e-06, + "loss": 0.396911084651947, + "memory(GiB)": 54.25, + "step": 1439, + "token_acc": 0.8672438672438673, + "train_speed(iter/s)": 0.241846 + }, + { + "epoch": 0.4608, + "grad_norm": 0.6903621256491916, + "learning_rate": 4.854777898261711e-06, + "loss": 0.36794042587280273, + "memory(GiB)": 54.25, + "step": 1440, + "token_acc": 0.9265905383360522, + "train_speed(iter/s)": 0.241848 + }, + { + "epoch": 0.46112, + "grad_norm": 0.6563508622912902, + "learning_rate": 4.854481563240708e-06, + "loss": 0.3901998996734619, + "memory(GiB)": 54.25, + "step": 1441, + "token_acc": 0.9206934710438952, + "train_speed(iter/s)": 0.241851 + }, + { + "epoch": 0.46144, + "grad_norm": 0.6295791113472529, + "learning_rate": 4.854184935245319e-06, + "loss": 0.35266730189323425, + "memory(GiB)": 54.25, + "step": 1442, + "token_acc": 0.8841442072103605, + "train_speed(iter/s)": 0.241857 + }, + { + "epoch": 0.46176, + "grad_norm": 0.6513398921539877, + "learning_rate": 4.853888014312454e-06, + "loss": 0.42213016748428345, + "memory(GiB)": 54.25, + "step": 1443, + "token_acc": 0.8437278525868178, + "train_speed(iter/s)": 0.241851 + }, + { + "epoch": 0.46208, + "grad_norm": 0.6216878599183627, + "learning_rate": 4.853590800479059e-06, + "loss": 0.4010796546936035, + "memory(GiB)": 54.25, + "step": 1444, + "token_acc": 0.8480710139979515, + "train_speed(iter/s)": 0.241847 + }, + { + "epoch": 0.4624, + "grad_norm": 0.6597357930461395, + "learning_rate": 4.853293293782118e-06, + "loss": 0.3940823972225189, + "memory(GiB)": 54.25, + "step": 1445, + "token_acc": 0.8692848769050411, + "train_speed(iter/s)": 0.241853 + }, + { + "epoch": 0.46272, + "grad_norm": 0.709769341503366, + "learning_rate": 4.85299549425865e-06, + "loss": 0.39852815866470337, + "memory(GiB)": 54.25, + "step": 1446, + "token_acc": 0.859825327510917, + "train_speed(iter/s)": 0.241854 + }, + { + "epoch": 0.46304, + "grad_norm": 0.6185340107948676, + "learning_rate": 4.852697401945711e-06, + "loss": 0.34628188610076904, + "memory(GiB)": 54.25, + "step": 1447, + "token_acc": 0.8734142305570877, + "train_speed(iter/s)": 0.241849 + }, + { + "epoch": 0.46336, + "grad_norm": 0.653357378265671, + "learning_rate": 4.8523990168803935e-06, + "loss": 0.37518754601478577, + "memory(GiB)": 54.25, + "step": 1448, + "token_acc": 0.8452289588129956, + "train_speed(iter/s)": 0.241842 + }, + { + "epoch": 0.46368, + "grad_norm": 0.7376966583836165, + "learning_rate": 4.852100339099826e-06, + "loss": 0.4429072141647339, + "memory(GiB)": 54.25, + "step": 1449, + "token_acc": 0.9375764993880049, + "train_speed(iter/s)": 0.241841 + }, + { + "epoch": 0.464, + "grad_norm": 0.6090636384243845, + "learning_rate": 4.851801368641176e-06, + "loss": 0.38109028339385986, + "memory(GiB)": 54.25, + "step": 1450, + "token_acc": 0.8765571913929785, + "train_speed(iter/s)": 0.241845 + }, + { + "epoch": 0.46432, + "grad_norm": 0.6214327788669147, + "learning_rate": 4.8515021055416415e-06, + "loss": 0.3926343023777008, + "memory(GiB)": 54.25, + "step": 1451, + "token_acc": 0.8627140974967062, + "train_speed(iter/s)": 0.241834 + }, + { + "epoch": 0.46464, + "grad_norm": 0.6687320358216613, + "learning_rate": 4.851202549838463e-06, + "loss": 0.36032289266586304, + "memory(GiB)": 54.25, + "step": 1452, + "token_acc": 0.9139603542808942, + "train_speed(iter/s)": 0.241833 + }, + { + "epoch": 0.46496, + "grad_norm": 0.6533852356700143, + "learning_rate": 4.850902701568915e-06, + "loss": 0.39747369289398193, + "memory(GiB)": 54.25, + "step": 1453, + "token_acc": 0.8532467532467533, + "train_speed(iter/s)": 0.241832 + }, + { + "epoch": 0.46528, + "grad_norm": 0.6051285510988066, + "learning_rate": 4.850602560770307e-06, + "loss": 0.3518303632736206, + "memory(GiB)": 54.25, + "step": 1454, + "token_acc": 0.9437153689122193, + "train_speed(iter/s)": 0.241832 + }, + { + "epoch": 0.4656, + "grad_norm": 0.6432812992491144, + "learning_rate": 4.850302127479987e-06, + "loss": 0.31848493218421936, + "memory(GiB)": 54.25, + "step": 1455, + "token_acc": 0.9306615776081425, + "train_speed(iter/s)": 0.241832 + }, + { + "epoch": 0.46592, + "grad_norm": 0.6913765588708352, + "learning_rate": 4.850001401735339e-06, + "loss": 0.32415902614593506, + "memory(GiB)": 54.25, + "step": 1456, + "token_acc": 0.9162857142857143, + "train_speed(iter/s)": 0.241833 + }, + { + "epoch": 0.46624, + "grad_norm": 0.5892988793974099, + "learning_rate": 4.849700383573783e-06, + "loss": 0.2838408052921295, + "memory(GiB)": 54.25, + "step": 1457, + "token_acc": 0.9307750188111362, + "train_speed(iter/s)": 0.241791 + }, + { + "epoch": 0.46656, + "grad_norm": 0.6413030759681971, + "learning_rate": 4.849399073032777e-06, + "loss": 0.3857778310775757, + "memory(GiB)": 54.25, + "step": 1458, + "token_acc": 0.8776358776358776, + "train_speed(iter/s)": 0.241795 + }, + { + "epoch": 0.46688, + "grad_norm": 0.6412609023018992, + "learning_rate": 4.849097470149811e-06, + "loss": 0.3490535616874695, + "memory(GiB)": 54.25, + "step": 1459, + "token_acc": 0.9368770764119602, + "train_speed(iter/s)": 0.241803 + }, + { + "epoch": 0.4672, + "grad_norm": 0.6305974918176782, + "learning_rate": 4.848795574962415e-06, + "loss": 0.37057244777679443, + "memory(GiB)": 54.25, + "step": 1460, + "token_acc": 0.9397865853658537, + "train_speed(iter/s)": 0.241807 + }, + { + "epoch": 0.46752, + "grad_norm": 0.6525717209730841, + "learning_rate": 4.848493387508156e-06, + "loss": 0.35291391611099243, + "memory(GiB)": 54.25, + "step": 1461, + "token_acc": 0.8712100139082058, + "train_speed(iter/s)": 0.24181 + }, + { + "epoch": 0.46784, + "grad_norm": 0.6603824576447729, + "learning_rate": 4.848190907824635e-06, + "loss": 0.3310721218585968, + "memory(GiB)": 54.25, + "step": 1462, + "token_acc": 0.8476442994085254, + "train_speed(iter/s)": 0.241816 + }, + { + "epoch": 0.46816, + "grad_norm": 0.6519847830709892, + "learning_rate": 4.847888135949491e-06, + "loss": 0.310594379901886, + "memory(GiB)": 54.25, + "step": 1463, + "token_acc": 0.8964852121731676, + "train_speed(iter/s)": 0.24181 + }, + { + "epoch": 0.46848, + "grad_norm": 0.6609194551586883, + "learning_rate": 4.8475850719203976e-06, + "loss": 0.3937799036502838, + "memory(GiB)": 54.25, + "step": 1464, + "token_acc": 0.8400970445522717, + "train_speed(iter/s)": 0.241809 + }, + { + "epoch": 0.4688, + "grad_norm": 0.6950441717947323, + "learning_rate": 4.8472817157750665e-06, + "loss": 0.4669187664985657, + "memory(GiB)": 54.25, + "step": 1465, + "token_acc": 0.8882193431756553, + "train_speed(iter/s)": 0.241815 + }, + { + "epoch": 0.46912, + "grad_norm": 0.6286104048099016, + "learning_rate": 4.846978067551245e-06, + "loss": 0.3690120577812195, + "memory(GiB)": 54.25, + "step": 1466, + "token_acc": 0.8618996798292423, + "train_speed(iter/s)": 0.241806 + }, + { + "epoch": 0.46944, + "grad_norm": 0.6379902047758267, + "learning_rate": 4.846674127286718e-06, + "loss": 0.37685227394104004, + "memory(GiB)": 54.25, + "step": 1467, + "token_acc": 0.8419886003799874, + "train_speed(iter/s)": 0.241809 + }, + { + "epoch": 0.46976, + "grad_norm": 0.667203562243937, + "learning_rate": 4.8463698950193035e-06, + "loss": 0.41773244738578796, + "memory(GiB)": 54.25, + "step": 1468, + "token_acc": 0.9384277075316108, + "train_speed(iter/s)": 0.241803 + }, + { + "epoch": 0.47008, + "grad_norm": 0.6235944122375147, + "learning_rate": 4.84606537078686e-06, + "loss": 0.34197184443473816, + "memory(GiB)": 54.25, + "step": 1469, + "token_acc": 0.8976997578692494, + "train_speed(iter/s)": 0.241802 + }, + { + "epoch": 0.4704, + "grad_norm": 0.6159679808956801, + "learning_rate": 4.845760554627279e-06, + "loss": 0.365743488073349, + "memory(GiB)": 54.25, + "step": 1470, + "token_acc": 0.9079391891891891, + "train_speed(iter/s)": 0.241801 + }, + { + "epoch": 0.47072, + "grad_norm": 0.6544436873052003, + "learning_rate": 4.84545544657849e-06, + "loss": 0.3941645622253418, + "memory(GiB)": 54.25, + "step": 1471, + "token_acc": 0.8896401835305482, + "train_speed(iter/s)": 0.241804 + }, + { + "epoch": 0.47104, + "grad_norm": 0.606961454974234, + "learning_rate": 4.845150046678457e-06, + "loss": 0.36063116788864136, + "memory(GiB)": 54.25, + "step": 1472, + "token_acc": 0.8695852534562212, + "train_speed(iter/s)": 0.241802 + }, + { + "epoch": 0.47136, + "grad_norm": 0.6603971529411751, + "learning_rate": 4.844844354965184e-06, + "loss": 0.41867026686668396, + "memory(GiB)": 54.25, + "step": 1473, + "token_acc": 0.8959276018099548, + "train_speed(iter/s)": 0.241785 + }, + { + "epoch": 0.47168, + "grad_norm": 0.6329646160652255, + "learning_rate": 4.844538371476708e-06, + "loss": 0.4201732575893402, + "memory(GiB)": 54.25, + "step": 1474, + "token_acc": 0.909328968903437, + "train_speed(iter/s)": 0.241774 + }, + { + "epoch": 0.472, + "grad_norm": 0.6019775292480608, + "learning_rate": 4.8442320962511034e-06, + "loss": 0.36481499671936035, + "memory(GiB)": 54.25, + "step": 1475, + "token_acc": 0.9222542163718634, + "train_speed(iter/s)": 0.241775 + }, + { + "epoch": 0.47232, + "grad_norm": 0.6941887679680937, + "learning_rate": 4.84392552932648e-06, + "loss": 0.4066670835018158, + "memory(GiB)": 54.25, + "step": 1476, + "token_acc": 0.8852320675105485, + "train_speed(iter/s)": 0.241771 + }, + { + "epoch": 0.47264, + "grad_norm": 0.6412840439059975, + "learning_rate": 4.843618670740986e-06, + "loss": 0.30425935983657837, + "memory(GiB)": 54.25, + "step": 1477, + "token_acc": 0.9144320578487496, + "train_speed(iter/s)": 0.241769 + }, + { + "epoch": 0.47296, + "grad_norm": 0.6645101114657694, + "learning_rate": 4.843311520532804e-06, + "loss": 0.39848119020462036, + "memory(GiB)": 54.25, + "step": 1478, + "token_acc": 0.9406257698940625, + "train_speed(iter/s)": 0.241769 + }, + { + "epoch": 0.47328, + "grad_norm": 0.6360468398863378, + "learning_rate": 4.843004078740154e-06, + "loss": 0.39256346225738525, + "memory(GiB)": 54.25, + "step": 1479, + "token_acc": 0.8783116299955096, + "train_speed(iter/s)": 0.241773 + }, + { + "epoch": 0.4736, + "grad_norm": 0.7308225363677872, + "learning_rate": 4.842696345401291e-06, + "loss": 0.45674777030944824, + "memory(GiB)": 54.25, + "step": 1480, + "token_acc": 0.9075882794891059, + "train_speed(iter/s)": 0.241772 + }, + { + "epoch": 0.47392, + "grad_norm": 0.6950063504024618, + "learning_rate": 4.842388320554507e-06, + "loss": 0.46128541231155396, + "memory(GiB)": 54.25, + "step": 1481, + "token_acc": 0.8768042967438738, + "train_speed(iter/s)": 0.241775 + }, + { + "epoch": 0.47424, + "grad_norm": 0.7479808257057875, + "learning_rate": 4.842080004238132e-06, + "loss": 0.4864889979362488, + "memory(GiB)": 54.25, + "step": 1482, + "token_acc": 0.8570251339101772, + "train_speed(iter/s)": 0.241779 + }, + { + "epoch": 0.47456, + "grad_norm": 0.6565587099321127, + "learning_rate": 4.8417713964905294e-06, + "loss": 0.3807644844055176, + "memory(GiB)": 54.25, + "step": 1483, + "token_acc": 0.8772038354469532, + "train_speed(iter/s)": 0.241783 + }, + { + "epoch": 0.47488, + "grad_norm": 0.7543911231574814, + "learning_rate": 4.8414624973501e-06, + "loss": 0.38640326261520386, + "memory(GiB)": 54.25, + "step": 1484, + "token_acc": 0.925770841263799, + "train_speed(iter/s)": 0.241774 + }, + { + "epoch": 0.4752, + "grad_norm": 0.6708951466646187, + "learning_rate": 4.841153306855281e-06, + "loss": 0.36758190393447876, + "memory(GiB)": 54.25, + "step": 1485, + "token_acc": 0.8763216917654598, + "train_speed(iter/s)": 0.241765 + }, + { + "epoch": 0.47552, + "grad_norm": 0.5844437826967844, + "learning_rate": 4.840843825044546e-06, + "loss": 0.37181928753852844, + "memory(GiB)": 54.25, + "step": 1486, + "token_acc": 0.9137651821862348, + "train_speed(iter/s)": 0.241756 + }, + { + "epoch": 0.47584, + "grad_norm": 0.6359591727994109, + "learning_rate": 4.840534051956404e-06, + "loss": 0.2721696197986603, + "memory(GiB)": 54.25, + "step": 1487, + "token_acc": 0.9221751872354282, + "train_speed(iter/s)": 0.241762 + }, + { + "epoch": 0.47616, + "grad_norm": 0.5697667248037063, + "learning_rate": 4.840223987629402e-06, + "loss": 0.3613870143890381, + "memory(GiB)": 54.25, + "step": 1488, + "token_acc": 0.8189181371131038, + "train_speed(iter/s)": 0.24175 + }, + { + "epoch": 0.47648, + "grad_norm": 0.6689593299164841, + "learning_rate": 4.839913632102121e-06, + "loss": 0.38320374488830566, + "memory(GiB)": 54.25, + "step": 1489, + "token_acc": 0.8707326428845417, + "train_speed(iter/s)": 0.241758 + }, + { + "epoch": 0.4768, + "grad_norm": 0.696072179864704, + "learning_rate": 4.839602985413181e-06, + "loss": 0.47587648034095764, + "memory(GiB)": 54.25, + "step": 1490, + "token_acc": 0.8665058303176518, + "train_speed(iter/s)": 0.241759 + }, + { + "epoch": 0.47712, + "grad_norm": 0.7018430495647888, + "learning_rate": 4.839292047601234e-06, + "loss": 0.36884844303131104, + "memory(GiB)": 54.25, + "step": 1491, + "token_acc": 0.8866194247603167, + "train_speed(iter/s)": 0.241766 + }, + { + "epoch": 0.47744, + "grad_norm": 0.6754145978653285, + "learning_rate": 4.838980818704974e-06, + "loss": 0.3894452154636383, + "memory(GiB)": 54.25, + "step": 1492, + "token_acc": 0.8992069583013559, + "train_speed(iter/s)": 0.241773 + }, + { + "epoch": 0.47776, + "grad_norm": 0.6686512740450504, + "learning_rate": 4.838669298763125e-06, + "loss": 0.4050920009613037, + "memory(GiB)": 54.25, + "step": 1493, + "token_acc": 0.9028930817610062, + "train_speed(iter/s)": 0.241775 + }, + { + "epoch": 0.47808, + "grad_norm": 0.730996499552223, + "learning_rate": 4.8383574878144524e-06, + "loss": 0.43009987473487854, + "memory(GiB)": 54.25, + "step": 1494, + "token_acc": 0.8817619783616693, + "train_speed(iter/s)": 0.241777 + }, + { + "epoch": 0.4784, + "grad_norm": 0.6771846406261665, + "learning_rate": 4.838045385897755e-06, + "loss": 0.4533268213272095, + "memory(GiB)": 54.25, + "step": 1495, + "token_acc": 0.934375, + "train_speed(iter/s)": 0.241778 + }, + { + "epoch": 0.47872, + "grad_norm": 0.6176501871414141, + "learning_rate": 4.837732993051868e-06, + "loss": 0.36898577213287354, + "memory(GiB)": 54.25, + "step": 1496, + "token_acc": 0.8668032786885246, + "train_speed(iter/s)": 0.241773 + }, + { + "epoch": 0.47904, + "grad_norm": 0.6444717398866658, + "learning_rate": 4.837420309315663e-06, + "loss": 0.4033448100090027, + "memory(GiB)": 54.25, + "step": 1497, + "token_acc": 0.8979766315189512, + "train_speed(iter/s)": 0.241761 + }, + { + "epoch": 0.47936, + "grad_norm": 0.635695026558458, + "learning_rate": 4.83710733472805e-06, + "loss": 0.353000283241272, + "memory(GiB)": 54.25, + "step": 1498, + "token_acc": 0.8586702305379218, + "train_speed(iter/s)": 0.241763 + }, + { + "epoch": 0.47968, + "grad_norm": 0.6512841613292042, + "learning_rate": 4.836794069327971e-06, + "loss": 0.32141709327697754, + "memory(GiB)": 54.25, + "step": 1499, + "token_acc": 0.9395348837209302, + "train_speed(iter/s)": 0.241761 + }, + { + "epoch": 0.48, + "grad_norm": 0.6676330768676315, + "learning_rate": 4.8364805131544075e-06, + "loss": 0.4188171625137329, + "memory(GiB)": 54.25, + "step": 1500, + "token_acc": 0.8808653260207191, + "train_speed(iter/s)": 0.241762 + }, + { + "epoch": 0.48032, + "grad_norm": 0.6493968981066774, + "learning_rate": 4.836166666246376e-06, + "loss": 0.40889984369277954, + "memory(GiB)": 54.25, + "step": 1501, + "token_acc": 0.9105992582471208, + "train_speed(iter/s)": 0.241755 + }, + { + "epoch": 0.48064, + "grad_norm": 0.6191748725204426, + "learning_rate": 4.835852528642929e-06, + "loss": 0.3153862953186035, + "memory(GiB)": 54.25, + "step": 1502, + "token_acc": 0.9281183932346723, + "train_speed(iter/s)": 0.241761 + }, + { + "epoch": 0.48096, + "grad_norm": 0.7009957365600362, + "learning_rate": 4.835538100383157e-06, + "loss": 0.39617669582366943, + "memory(GiB)": 54.25, + "step": 1503, + "token_acc": 0.9291942797349145, + "train_speed(iter/s)": 0.241767 + }, + { + "epoch": 0.48128, + "grad_norm": 0.601580433358991, + "learning_rate": 4.8352233815061825e-06, + "loss": 0.3203248381614685, + "memory(GiB)": 54.25, + "step": 1504, + "token_acc": 0.9537296690970275, + "train_speed(iter/s)": 0.241772 + }, + { + "epoch": 0.4816, + "grad_norm": 0.6310861600781692, + "learning_rate": 4.834908372051169e-06, + "loss": 0.35158130526542664, + "memory(GiB)": 54.25, + "step": 1505, + "token_acc": 0.9183006535947712, + "train_speed(iter/s)": 0.241774 + }, + { + "epoch": 0.48192, + "grad_norm": 0.5974947379680431, + "learning_rate": 4.834593072057313e-06, + "loss": 0.36691075563430786, + "memory(GiB)": 54.25, + "step": 1506, + "token_acc": 0.8986948415164698, + "train_speed(iter/s)": 0.241753 + }, + { + "epoch": 0.48224, + "grad_norm": 0.6786729285566938, + "learning_rate": 4.834277481563849e-06, + "loss": 0.3334679901599884, + "memory(GiB)": 54.25, + "step": 1507, + "token_acc": 0.8978925497180172, + "train_speed(iter/s)": 0.241761 + }, + { + "epoch": 0.48256, + "grad_norm": 0.6447024176817246, + "learning_rate": 4.833961600610045e-06, + "loss": 0.32462602853775024, + "memory(GiB)": 54.25, + "step": 1508, + "token_acc": 0.9096146921137919, + "train_speed(iter/s)": 0.241767 + }, + { + "epoch": 0.48288, + "grad_norm": 0.6625293392031373, + "learning_rate": 4.8336454292352085e-06, + "loss": 0.326140820980072, + "memory(GiB)": 54.25, + "step": 1509, + "token_acc": 0.8853059728838403, + "train_speed(iter/s)": 0.241776 + }, + { + "epoch": 0.4832, + "grad_norm": 0.6971449673108976, + "learning_rate": 4.833328967478682e-06, + "loss": 0.3713795840740204, + "memory(GiB)": 54.25, + "step": 1510, + "token_acc": 0.8594958822061393, + "train_speed(iter/s)": 0.241774 + }, + { + "epoch": 0.48352, + "grad_norm": 0.640245658212126, + "learning_rate": 4.833012215379841e-06, + "loss": 0.39579901099205017, + "memory(GiB)": 54.25, + "step": 1511, + "token_acc": 0.8377710678012626, + "train_speed(iter/s)": 0.241761 + }, + { + "epoch": 0.48384, + "grad_norm": 0.6696428885912792, + "learning_rate": 4.832695172978102e-06, + "loss": 0.3933345079421997, + "memory(GiB)": 54.25, + "step": 1512, + "token_acc": 0.8623234916559692, + "train_speed(iter/s)": 0.24176 + }, + { + "epoch": 0.48416, + "grad_norm": 0.6356316518681527, + "learning_rate": 4.832377840312916e-06, + "loss": 0.33851271867752075, + "memory(GiB)": 54.25, + "step": 1513, + "token_acc": 0.9359104781281791, + "train_speed(iter/s)": 0.241763 + }, + { + "epoch": 0.48448, + "grad_norm": 0.6559001420913474, + "learning_rate": 4.832060217423767e-06, + "loss": 0.36699724197387695, + "memory(GiB)": 54.25, + "step": 1514, + "token_acc": 0.9138913891389139, + "train_speed(iter/s)": 0.24176 + }, + { + "epoch": 0.4848, + "grad_norm": 0.6383498012737833, + "learning_rate": 4.8317423043501795e-06, + "loss": 0.377672016620636, + "memory(GiB)": 54.25, + "step": 1515, + "token_acc": 0.9257241891557316, + "train_speed(iter/s)": 0.241764 + }, + { + "epoch": 0.48512, + "grad_norm": 0.6706135025753575, + "learning_rate": 4.831424101131713e-06, + "loss": 0.44356995820999146, + "memory(GiB)": 54.25, + "step": 1516, + "token_acc": 0.870640904806786, + "train_speed(iter/s)": 0.241766 + }, + { + "epoch": 0.48544, + "grad_norm": 0.7188096196737824, + "learning_rate": 4.83110560780796e-06, + "loss": 0.3727024793624878, + "memory(GiB)": 54.25, + "step": 1517, + "token_acc": 0.9497041420118343, + "train_speed(iter/s)": 0.241769 + }, + { + "epoch": 0.48576, + "grad_norm": 0.6623039055080492, + "learning_rate": 4.830786824418554e-06, + "loss": 0.3090604543685913, + "memory(GiB)": 54.25, + "step": 1518, + "token_acc": 0.9146948941469489, + "train_speed(iter/s)": 0.241777 + }, + { + "epoch": 0.48608, + "grad_norm": 0.6278899077648262, + "learning_rate": 4.83046775100316e-06, + "loss": 0.35559195280075073, + "memory(GiB)": 54.25, + "step": 1519, + "token_acc": 0.8754098360655738, + "train_speed(iter/s)": 0.241785 + }, + { + "epoch": 0.4864, + "grad_norm": 0.6143198708057166, + "learning_rate": 4.830148387601482e-06, + "loss": 0.35513895750045776, + "memory(GiB)": 54.25, + "step": 1520, + "token_acc": 0.8890905790524596, + "train_speed(iter/s)": 0.241784 + }, + { + "epoch": 0.48672, + "grad_norm": 0.6661181904411209, + "learning_rate": 4.829828734253259e-06, + "loss": 0.4068344831466675, + "memory(GiB)": 54.25, + "step": 1521, + "token_acc": 0.8639519609682867, + "train_speed(iter/s)": 0.241791 + }, + { + "epoch": 0.48704, + "grad_norm": 0.6435077737521738, + "learning_rate": 4.8295087909982664e-06, + "loss": 0.44284987449645996, + "memory(GiB)": 54.25, + "step": 1522, + "token_acc": 0.9510751012776566, + "train_speed(iter/s)": 0.241786 + }, + { + "epoch": 0.48736, + "grad_norm": 0.6251708416823181, + "learning_rate": 4.829188557876317e-06, + "loss": 0.3747749626636505, + "memory(GiB)": 54.25, + "step": 1523, + "token_acc": 0.8716991197652707, + "train_speed(iter/s)": 0.241785 + }, + { + "epoch": 0.48768, + "grad_norm": 0.6960858505963365, + "learning_rate": 4.828868034927256e-06, + "loss": 0.46793705224990845, + "memory(GiB)": 54.25, + "step": 1524, + "token_acc": 0.8477357252242398, + "train_speed(iter/s)": 0.241788 + }, + { + "epoch": 0.488, + "grad_norm": 0.6032279459356941, + "learning_rate": 4.828547222190967e-06, + "loss": 0.36403191089630127, + "memory(GiB)": 54.25, + "step": 1525, + "token_acc": 0.9422761561167596, + "train_speed(iter/s)": 0.241782 + }, + { + "epoch": 0.48832, + "grad_norm": 0.716463648036825, + "learning_rate": 4.828226119707372e-06, + "loss": 0.3849448561668396, + "memory(GiB)": 54.25, + "step": 1526, + "token_acc": 0.8573236317621006, + "train_speed(iter/s)": 0.241783 + }, + { + "epoch": 0.48864, + "grad_norm": 0.6977974878718189, + "learning_rate": 4.827904727516424e-06, + "loss": 0.4114413559436798, + "memory(GiB)": 54.25, + "step": 1527, + "token_acc": 0.8507884208048953, + "train_speed(iter/s)": 0.241788 + }, + { + "epoch": 0.48896, + "grad_norm": 0.6222228787187861, + "learning_rate": 4.827583045658116e-06, + "loss": 0.40755054354667664, + "memory(GiB)": 54.25, + "step": 1528, + "token_acc": 0.9228222636043865, + "train_speed(iter/s)": 0.241787 + }, + { + "epoch": 0.48928, + "grad_norm": 1.1219248675523057, + "learning_rate": 4.827261074172476e-06, + "loss": 0.3799545466899872, + "memory(GiB)": 54.25, + "step": 1529, + "token_acc": 0.9113880235375562, + "train_speed(iter/s)": 0.241791 + }, + { + "epoch": 0.4896, + "grad_norm": 0.6492501607014584, + "learning_rate": 4.826938813099567e-06, + "loss": 0.35900962352752686, + "memory(GiB)": 54.25, + "step": 1530, + "token_acc": 0.8904252543699452, + "train_speed(iter/s)": 0.241797 + }, + { + "epoch": 0.48992, + "grad_norm": 0.6670911967974511, + "learning_rate": 4.826616262479489e-06, + "loss": 0.3292381763458252, + "memory(GiB)": 54.25, + "step": 1531, + "token_acc": 0.9409282700421941, + "train_speed(iter/s)": 0.241804 + }, + { + "epoch": 0.49024, + "grad_norm": 0.67262318613554, + "learning_rate": 4.8262934223523775e-06, + "loss": 0.42597293853759766, + "memory(GiB)": 54.25, + "step": 1532, + "token_acc": 0.8135292627311882, + "train_speed(iter/s)": 0.241806 + }, + { + "epoch": 0.49056, + "grad_norm": 0.6446637664265896, + "learning_rate": 4.825970292758405e-06, + "loss": 0.418919175863266, + "memory(GiB)": 54.25, + "step": 1533, + "token_acc": 0.8364470665355621, + "train_speed(iter/s)": 0.241798 + }, + { + "epoch": 0.49088, + "grad_norm": 0.6174944583522314, + "learning_rate": 4.825646873737779e-06, + "loss": 0.3500853180885315, + "memory(GiB)": 54.25, + "step": 1534, + "token_acc": 0.9029905178701677, + "train_speed(iter/s)": 0.241802 + }, + { + "epoch": 0.4912, + "grad_norm": 0.695069199117932, + "learning_rate": 4.825323165330744e-06, + "loss": 0.4295937716960907, + "memory(GiB)": 54.25, + "step": 1535, + "token_acc": 0.9277797640328924, + "train_speed(iter/s)": 0.241804 + }, + { + "epoch": 0.49152, + "grad_norm": 0.695473573788252, + "learning_rate": 4.8249991675775795e-06, + "loss": 0.4199369549751282, + "memory(GiB)": 54.25, + "step": 1536, + "token_acc": 0.9023941068139963, + "train_speed(iter/s)": 0.241812 + }, + { + "epoch": 0.49184, + "grad_norm": 0.667972055900042, + "learning_rate": 4.8246748805186e-06, + "loss": 0.3536589741706848, + "memory(GiB)": 54.25, + "step": 1537, + "token_acc": 0.8998870907038012, + "train_speed(iter/s)": 0.241812 + }, + { + "epoch": 0.49216, + "grad_norm": 0.671943236216822, + "learning_rate": 4.824350304194161e-06, + "loss": 0.503467321395874, + "memory(GiB)": 54.25, + "step": 1538, + "token_acc": 0.8883196721311475, + "train_speed(iter/s)": 0.24181 + }, + { + "epoch": 0.49248, + "grad_norm": 0.6682744042843137, + "learning_rate": 4.824025438644646e-06, + "loss": 0.3976234495639801, + "memory(GiB)": 54.25, + "step": 1539, + "token_acc": 0.8398382204246714, + "train_speed(iter/s)": 0.24181 + }, + { + "epoch": 0.4928, + "grad_norm": 0.733585513358462, + "learning_rate": 4.823700283910482e-06, + "loss": 0.46138644218444824, + "memory(GiB)": 54.25, + "step": 1540, + "token_acc": 0.8795215038450583, + "train_speed(iter/s)": 0.241812 + }, + { + "epoch": 0.49312, + "grad_norm": 0.6802333660508275, + "learning_rate": 4.823374840032128e-06, + "loss": 0.34198933839797974, + "memory(GiB)": 54.25, + "step": 1541, + "token_acc": 0.8833333333333333, + "train_speed(iter/s)": 0.24182 + }, + { + "epoch": 0.49344, + "grad_norm": 0.6577572612007987, + "learning_rate": 4.823049107050079e-06, + "loss": 0.42427170276641846, + "memory(GiB)": 54.25, + "step": 1542, + "token_acc": 0.8414872798434442, + "train_speed(iter/s)": 0.241816 + }, + { + "epoch": 0.49376, + "grad_norm": 0.6482692286676788, + "learning_rate": 4.822723085004868e-06, + "loss": 0.3193492889404297, + "memory(GiB)": 54.25, + "step": 1543, + "token_acc": 0.9561978545887961, + "train_speed(iter/s)": 0.24182 + }, + { + "epoch": 0.49408, + "grad_norm": 0.7534609808855515, + "learning_rate": 4.822396773937061e-06, + "loss": 0.4268062114715576, + "memory(GiB)": 54.25, + "step": 1544, + "token_acc": 0.888268156424581, + "train_speed(iter/s)": 0.241823 + }, + { + "epoch": 0.4944, + "grad_norm": 0.659343628102621, + "learning_rate": 4.8220701738872645e-06, + "loss": 0.3726666569709778, + "memory(GiB)": 54.25, + "step": 1545, + "token_acc": 0.8811447034387261, + "train_speed(iter/s)": 0.241829 + }, + { + "epoch": 0.49472, + "grad_norm": 0.6091462481234744, + "learning_rate": 4.821743284896117e-06, + "loss": 0.35635942220687866, + "memory(GiB)": 54.25, + "step": 1546, + "token_acc": 0.8236594803758983, + "train_speed(iter/s)": 0.241817 + }, + { + "epoch": 0.49504, + "grad_norm": 0.6397814082943194, + "learning_rate": 4.821416107004293e-06, + "loss": 0.40842103958129883, + "memory(GiB)": 54.25, + "step": 1547, + "token_acc": 0.8251144769284959, + "train_speed(iter/s)": 0.241817 + }, + { + "epoch": 0.49536, + "grad_norm": 0.6451523341984937, + "learning_rate": 4.821088640252505e-06, + "loss": 0.3273735046386719, + "memory(GiB)": 54.25, + "step": 1548, + "token_acc": 0.8848177376925968, + "train_speed(iter/s)": 0.241823 + }, + { + "epoch": 0.49568, + "grad_norm": 0.6636007048450937, + "learning_rate": 4.820760884681501e-06, + "loss": 0.3489418029785156, + "memory(GiB)": 54.25, + "step": 1549, + "token_acc": 0.8920599039276632, + "train_speed(iter/s)": 0.24183 + }, + { + "epoch": 0.496, + "grad_norm": 0.57434845545398, + "learning_rate": 4.820432840332064e-06, + "loss": 0.3211090862751007, + "memory(GiB)": 54.25, + "step": 1550, + "token_acc": 0.8776748642606196, + "train_speed(iter/s)": 0.241832 + }, + { + "epoch": 0.49632, + "grad_norm": 0.6523802183872731, + "learning_rate": 4.820104507245014e-06, + "loss": 0.3728730082511902, + "memory(GiB)": 54.25, + "step": 1551, + "token_acc": 0.9095607235142119, + "train_speed(iter/s)": 0.241831 + }, + { + "epoch": 0.49664, + "grad_norm": 0.7767849702675478, + "learning_rate": 4.819775885461206e-06, + "loss": 0.44868987798690796, + "memory(GiB)": 54.25, + "step": 1552, + "token_acc": 0.8925809223718132, + "train_speed(iter/s)": 0.241834 + }, + { + "epoch": 0.49696, + "grad_norm": 0.6774921582299002, + "learning_rate": 4.81944697502153e-06, + "loss": 0.46661466360092163, + "memory(GiB)": 54.25, + "step": 1553, + "token_acc": 0.8666044776119403, + "train_speed(iter/s)": 0.241837 + }, + { + "epoch": 0.49728, + "grad_norm": 0.6358405427571643, + "learning_rate": 4.819117775966915e-06, + "loss": 0.38544756174087524, + "memory(GiB)": 54.25, + "step": 1554, + "token_acc": 0.817628298057408, + "train_speed(iter/s)": 0.241837 + }, + { + "epoch": 0.4976, + "grad_norm": 0.5984621377904535, + "learning_rate": 4.818788288338323e-06, + "loss": 0.4033125638961792, + "memory(GiB)": 54.25, + "step": 1555, + "token_acc": 0.8910472972972973, + "train_speed(iter/s)": 0.241838 + }, + { + "epoch": 0.49792, + "grad_norm": 0.8161557551222747, + "learning_rate": 4.818458512176754e-06, + "loss": 0.3106280565261841, + "memory(GiB)": 54.25, + "step": 1556, + "token_acc": 0.8919198895027625, + "train_speed(iter/s)": 0.241836 + }, + { + "epoch": 0.49824, + "grad_norm": 0.6101679214362422, + "learning_rate": 4.818128447523242e-06, + "loss": 0.3878135681152344, + "memory(GiB)": 54.25, + "step": 1557, + "token_acc": 0.9247558874210224, + "train_speed(iter/s)": 0.241831 + }, + { + "epoch": 0.49856, + "grad_norm": 0.6335982132060257, + "learning_rate": 4.8177980944188586e-06, + "loss": 0.33754998445510864, + "memory(GiB)": 54.25, + "step": 1558, + "token_acc": 0.9201435621354868, + "train_speed(iter/s)": 0.24183 + }, + { + "epoch": 0.49888, + "grad_norm": 0.673952288678112, + "learning_rate": 4.81746745290471e-06, + "loss": 0.33724695444107056, + "memory(GiB)": 54.25, + "step": 1559, + "token_acc": 0.9014577259475218, + "train_speed(iter/s)": 0.241828 + }, + { + "epoch": 0.4992, + "grad_norm": 0.7091780650342607, + "learning_rate": 4.8171365230219395e-06, + "loss": 0.43171244859695435, + "memory(GiB)": 54.25, + "step": 1560, + "token_acc": 0.8598097289535093, + "train_speed(iter/s)": 0.241832 + }, + { + "epoch": 0.49952, + "grad_norm": 0.6070920147110431, + "learning_rate": 4.8168053048117235e-06, + "loss": 0.4040122628211975, + "memory(GiB)": 54.25, + "step": 1561, + "token_acc": 0.8661281015579919, + "train_speed(iter/s)": 0.241837 + }, + { + "epoch": 0.49984, + "grad_norm": 0.6578992226751443, + "learning_rate": 4.81647379831528e-06, + "loss": 0.4357371926307678, + "memory(GiB)": 54.25, + "step": 1562, + "token_acc": 0.8640749931072512, + "train_speed(iter/s)": 0.241839 + }, + { + "epoch": 0.50016, + "grad_norm": 0.6254833198102008, + "learning_rate": 4.816142003573855e-06, + "loss": 0.33640122413635254, + "memory(GiB)": 54.25, + "step": 1563, + "token_acc": 0.9168463934819076, + "train_speed(iter/s)": 0.241838 + }, + { + "epoch": 0.50048, + "grad_norm": 0.6241395083808589, + "learning_rate": 4.815809920628738e-06, + "loss": 0.39594364166259766, + "memory(GiB)": 54.25, + "step": 1564, + "token_acc": 0.9241603466955579, + "train_speed(iter/s)": 0.241841 + }, + { + "epoch": 0.5008, + "grad_norm": 0.6079885031096157, + "learning_rate": 4.815477549521249e-06, + "loss": 0.3507034182548523, + "memory(GiB)": 54.25, + "step": 1565, + "token_acc": 0.8901705115346038, + "train_speed(iter/s)": 0.241838 + }, + { + "epoch": 0.50112, + "grad_norm": 0.6896896912006278, + "learning_rate": 4.815144890292746e-06, + "loss": 0.4295618236064911, + "memory(GiB)": 54.25, + "step": 1566, + "token_acc": 0.8753221010901883, + "train_speed(iter/s)": 0.241836 + }, + { + "epoch": 0.50144, + "grad_norm": 0.6660443304133443, + "learning_rate": 4.814811942984625e-06, + "loss": 0.34144657850265503, + "memory(GiB)": 54.25, + "step": 1567, + "token_acc": 0.9249692496924969, + "train_speed(iter/s)": 0.241841 + }, + { + "epoch": 0.50176, + "grad_norm": 0.6401540881970936, + "learning_rate": 4.814478707638312e-06, + "loss": 0.43878406286239624, + "memory(GiB)": 54.25, + "step": 1568, + "token_acc": 0.866039654295882, + "train_speed(iter/s)": 0.241844 + }, + { + "epoch": 0.50208, + "grad_norm": 0.5938266611309596, + "learning_rate": 4.8141451842952755e-06, + "loss": 0.3193020224571228, + "memory(GiB)": 54.25, + "step": 1569, + "token_acc": 0.9028850060950833, + "train_speed(iter/s)": 0.241836 + }, + { + "epoch": 0.5024, + "grad_norm": 0.6323854832513351, + "learning_rate": 4.813811372997014e-06, + "loss": 0.4334718883037567, + "memory(GiB)": 54.25, + "step": 1570, + "token_acc": 0.8531027466937945, + "train_speed(iter/s)": 0.24184 + }, + { + "epoch": 0.50272, + "grad_norm": 0.6937280734717718, + "learning_rate": 4.813477273785066e-06, + "loss": 0.4528685212135315, + "memory(GiB)": 54.25, + "step": 1571, + "token_acc": 0.8383902906419675, + "train_speed(iter/s)": 0.241844 + }, + { + "epoch": 0.50304, + "grad_norm": 0.6868142779654376, + "learning_rate": 4.813142886701005e-06, + "loss": 0.3815188705921173, + "memory(GiB)": 54.25, + "step": 1572, + "token_acc": 0.9255981645362177, + "train_speed(iter/s)": 0.241851 + }, + { + "epoch": 0.50336, + "grad_norm": 0.6812174360971962, + "learning_rate": 4.812808211786438e-06, + "loss": 0.39587730169296265, + "memory(GiB)": 54.25, + "step": 1573, + "token_acc": 0.8877551020408163, + "train_speed(iter/s)": 0.241848 + }, + { + "epoch": 0.50368, + "grad_norm": 0.7953682821796039, + "learning_rate": 4.812473249083011e-06, + "loss": 0.41292816400527954, + "memory(GiB)": 54.25, + "step": 1574, + "token_acc": 0.916626036079961, + "train_speed(iter/s)": 0.241855 + }, + { + "epoch": 0.504, + "grad_norm": 0.6607210462240795, + "learning_rate": 4.8121379986324025e-06, + "loss": 0.44143953919410706, + "memory(GiB)": 54.25, + "step": 1575, + "token_acc": 0.8284191829484903, + "train_speed(iter/s)": 0.241857 + }, + { + "epoch": 0.50432, + "grad_norm": 0.6752927524693729, + "learning_rate": 4.81180246047633e-06, + "loss": 0.35833513736724854, + "memory(GiB)": 54.25, + "step": 1576, + "token_acc": 0.8459079283887468, + "train_speed(iter/s)": 0.241863 + }, + { + "epoch": 0.50464, + "grad_norm": 0.6307803202911265, + "learning_rate": 4.811466634656545e-06, + "loss": 0.3719581067562103, + "memory(GiB)": 54.25, + "step": 1577, + "token_acc": 0.8958999709217796, + "train_speed(iter/s)": 0.241859 + }, + { + "epoch": 0.50496, + "grad_norm": 0.6873395561472704, + "learning_rate": 4.811130521214836e-06, + "loss": 0.36361265182495117, + "memory(GiB)": 54.25, + "step": 1578, + "token_acc": 0.9124605678233438, + "train_speed(iter/s)": 0.24186 + }, + { + "epoch": 0.50528, + "grad_norm": 0.6626714323595104, + "learning_rate": 4.810794120193025e-06, + "loss": 0.3004828095436096, + "memory(GiB)": 54.25, + "step": 1579, + "token_acc": 0.889227421109902, + "train_speed(iter/s)": 0.241863 + }, + { + "epoch": 0.5056, + "grad_norm": 0.6859527977639291, + "learning_rate": 4.810457431632972e-06, + "loss": 0.3555012345314026, + "memory(GiB)": 54.25, + "step": 1580, + "token_acc": 0.8946188340807175, + "train_speed(iter/s)": 0.241866 + }, + { + "epoch": 0.50592, + "grad_norm": 0.6075685087727429, + "learning_rate": 4.810120455576572e-06, + "loss": 0.3691089153289795, + "memory(GiB)": 54.25, + "step": 1581, + "token_acc": 0.8935909980430529, + "train_speed(iter/s)": 0.24186 + }, + { + "epoch": 0.50624, + "grad_norm": 0.5997089655705168, + "learning_rate": 4.809783192065757e-06, + "loss": 0.35233819484710693, + "memory(GiB)": 54.25, + "step": 1582, + "token_acc": 0.9202635914332784, + "train_speed(iter/s)": 0.241854 + }, + { + "epoch": 0.50656, + "grad_norm": 0.6966341002165961, + "learning_rate": 4.809445641142492e-06, + "loss": 0.37033599615097046, + "memory(GiB)": 54.25, + "step": 1583, + "token_acc": 0.8956597983340641, + "train_speed(iter/s)": 0.24185 + }, + { + "epoch": 0.50688, + "grad_norm": 0.6407207307031189, + "learning_rate": 4.809107802848779e-06, + "loss": 0.34409695863723755, + "memory(GiB)": 54.25, + "step": 1584, + "token_acc": 0.8212417557425518, + "train_speed(iter/s)": 0.241854 + }, + { + "epoch": 0.5072, + "grad_norm": 0.6417287622534791, + "learning_rate": 4.808769677226657e-06, + "loss": 0.34822577238082886, + "memory(GiB)": 54.25, + "step": 1585, + "token_acc": 0.9421579532814238, + "train_speed(iter/s)": 0.241842 + }, + { + "epoch": 0.50752, + "grad_norm": 0.6336346888181467, + "learning_rate": 4.808431264318201e-06, + "loss": 0.3390272855758667, + "memory(GiB)": 54.25, + "step": 1586, + "token_acc": 0.9395267309377738, + "train_speed(iter/s)": 0.241843 + }, + { + "epoch": 0.50784, + "grad_norm": 0.6545563089850327, + "learning_rate": 4.808092564165518e-06, + "loss": 0.35041338205337524, + "memory(GiB)": 54.25, + "step": 1587, + "token_acc": 0.9076406381192276, + "train_speed(iter/s)": 0.241843 + }, + { + "epoch": 0.50816, + "grad_norm": 0.6511557655512387, + "learning_rate": 4.807753576810756e-06, + "loss": 0.32432517409324646, + "memory(GiB)": 54.25, + "step": 1588, + "token_acc": 0.9040907528360261, + "train_speed(iter/s)": 0.241848 + }, + { + "epoch": 0.50848, + "grad_norm": 0.6516212602223845, + "learning_rate": 4.807414302296095e-06, + "loss": 0.36840689182281494, + "memory(GiB)": 54.25, + "step": 1589, + "token_acc": 0.9152864512625577, + "train_speed(iter/s)": 0.241845 + }, + { + "epoch": 0.5088, + "grad_norm": 0.7303349748276434, + "learning_rate": 4.807074740663751e-06, + "loss": 0.44645851850509644, + "memory(GiB)": 54.25, + "step": 1590, + "token_acc": 0.87630128597673, + "train_speed(iter/s)": 0.241853 + }, + { + "epoch": 0.50912, + "grad_norm": 0.6670902748201561, + "learning_rate": 4.806734891955977e-06, + "loss": 0.3895314335823059, + "memory(GiB)": 54.25, + "step": 1591, + "token_acc": 0.9294431731502669, + "train_speed(iter/s)": 0.241858 + }, + { + "epoch": 0.50944, + "grad_norm": 0.6995448554701168, + "learning_rate": 4.806394756215063e-06, + "loss": 0.3255465030670166, + "memory(GiB)": 54.25, + "step": 1592, + "token_acc": 0.903954802259887, + "train_speed(iter/s)": 0.241858 + }, + { + "epoch": 0.50976, + "grad_norm": 0.6035513224747423, + "learning_rate": 4.80605433348333e-06, + "loss": 0.33167362213134766, + "memory(GiB)": 54.25, + "step": 1593, + "token_acc": 0.9237536656891495, + "train_speed(iter/s)": 0.241853 + }, + { + "epoch": 0.51008, + "grad_norm": 0.6744075091935525, + "learning_rate": 4.80571362380314e-06, + "loss": 0.3918830156326294, + "memory(GiB)": 54.25, + "step": 1594, + "token_acc": 0.9102250489236791, + "train_speed(iter/s)": 0.241853 + }, + { + "epoch": 0.5104, + "grad_norm": 0.603107044502817, + "learning_rate": 4.805372627216888e-06, + "loss": 0.31479907035827637, + "memory(GiB)": 54.25, + "step": 1595, + "token_acc": 0.9257463638683338, + "train_speed(iter/s)": 0.241854 + }, + { + "epoch": 0.51072, + "grad_norm": 0.6404574241175068, + "learning_rate": 4.805031343767005e-06, + "loss": 0.35303178429603577, + "memory(GiB)": 54.25, + "step": 1596, + "token_acc": 0.89888, + "train_speed(iter/s)": 0.241861 + }, + { + "epoch": 0.51104, + "grad_norm": 0.6301922227185892, + "learning_rate": 4.804689773495956e-06, + "loss": 0.4279358983039856, + "memory(GiB)": 54.25, + "step": 1597, + "token_acc": 0.872135503155098, + "train_speed(iter/s)": 0.241859 + }, + { + "epoch": 0.51136, + "grad_norm": 0.7631396285487042, + "learning_rate": 4.804347916446246e-06, + "loss": 0.46427690982818604, + "memory(GiB)": 54.25, + "step": 1598, + "token_acc": 0.904497843499692, + "train_speed(iter/s)": 0.241863 + }, + { + "epoch": 0.51168, + "grad_norm": 0.673034189780144, + "learning_rate": 4.804005772660412e-06, + "loss": 0.37359654903411865, + "memory(GiB)": 54.25, + "step": 1599, + "token_acc": 0.9162345432788193, + "train_speed(iter/s)": 0.241868 + }, + { + "epoch": 0.512, + "grad_norm": 0.6631672343178693, + "learning_rate": 4.80366334218103e-06, + "loss": 0.44712120294570923, + "memory(GiB)": 54.25, + "step": 1600, + "token_acc": 0.8462394303515799, + "train_speed(iter/s)": 0.241865 + }, + { + "epoch": 0.51232, + "grad_norm": 0.634016968133855, + "learning_rate": 4.803320625050706e-06, + "loss": 0.29124611616134644, + "memory(GiB)": 54.25, + "step": 1601, + "token_acc": 0.95391532409142, + "train_speed(iter/s)": 0.24186 + }, + { + "epoch": 0.51264, + "grad_norm": 0.6565284519126027, + "learning_rate": 4.802977621312086e-06, + "loss": 0.3875572383403778, + "memory(GiB)": 54.25, + "step": 1602, + "token_acc": 0.9003115264797508, + "train_speed(iter/s)": 0.241863 + }, + { + "epoch": 0.51296, + "grad_norm": 0.6882269261543599, + "learning_rate": 4.802634331007853e-06, + "loss": 0.45121821761131287, + "memory(GiB)": 54.25, + "step": 1603, + "token_acc": 0.9124245038826575, + "train_speed(iter/s)": 0.241863 + }, + { + "epoch": 0.51328, + "grad_norm": 0.6581739850346053, + "learning_rate": 4.802290754180722e-06, + "loss": 0.3274728059768677, + "memory(GiB)": 54.25, + "step": 1604, + "token_acc": 0.9330531371960372, + "train_speed(iter/s)": 0.241868 + }, + { + "epoch": 0.5136, + "grad_norm": 0.6720473433056398, + "learning_rate": 4.801946890873445e-06, + "loss": 0.43131762742996216, + "memory(GiB)": 54.25, + "step": 1605, + "token_acc": 0.8587777523460746, + "train_speed(iter/s)": 0.241874 + }, + { + "epoch": 0.51392, + "grad_norm": 0.6893650649603072, + "learning_rate": 4.801602741128811e-06, + "loss": 0.3550516963005066, + "memory(GiB)": 54.25, + "step": 1606, + "token_acc": 0.8899769585253456, + "train_speed(iter/s)": 0.241875 + }, + { + "epoch": 0.51424, + "grad_norm": 0.6632233484649679, + "learning_rate": 4.801258304989642e-06, + "loss": 0.38765859603881836, + "memory(GiB)": 54.25, + "step": 1607, + "token_acc": 0.8972746331236897, + "train_speed(iter/s)": 0.241875 + }, + { + "epoch": 0.51456, + "grad_norm": 0.630304241564044, + "learning_rate": 4.800913582498799e-06, + "loss": 0.35500484704971313, + "memory(GiB)": 54.25, + "step": 1608, + "token_acc": 0.898895790200138, + "train_speed(iter/s)": 0.241871 + }, + { + "epoch": 0.51488, + "grad_norm": 0.6845849956422666, + "learning_rate": 4.800568573699174e-06, + "loss": 0.3986101746559143, + "memory(GiB)": 54.25, + "step": 1609, + "token_acc": 0.8815060908084164, + "train_speed(iter/s)": 0.241879 + }, + { + "epoch": 0.5152, + "grad_norm": 0.6254596716219368, + "learning_rate": 4.800223278633699e-06, + "loss": 0.347015917301178, + "memory(GiB)": 54.25, + "step": 1610, + "token_acc": 0.9135297326786689, + "train_speed(iter/s)": 0.241865 + }, + { + "epoch": 0.51552, + "grad_norm": 4.654225400016807, + "learning_rate": 4.799877697345341e-06, + "loss": 0.44562438130378723, + "memory(GiB)": 54.25, + "step": 1611, + "token_acc": 0.9245147375988497, + "train_speed(iter/s)": 0.241867 + }, + { + "epoch": 0.51584, + "grad_norm": 0.6691539818992568, + "learning_rate": 4.7995318298771e-06, + "loss": 0.3658391833305359, + "memory(GiB)": 54.25, + "step": 1612, + "token_acc": 0.8493107769423559, + "train_speed(iter/s)": 0.241869 + }, + { + "epoch": 0.51616, + "grad_norm": 0.5895834254379761, + "learning_rate": 4.7991856762720135e-06, + "loss": 0.30213695764541626, + "memory(GiB)": 54.25, + "step": 1613, + "token_acc": 0.9024137036075786, + "train_speed(iter/s)": 0.241876 + }, + { + "epoch": 0.51648, + "grad_norm": 0.6573414730284204, + "learning_rate": 4.798839236573154e-06, + "loss": 0.42023444175720215, + "memory(GiB)": 54.25, + "step": 1614, + "token_acc": 0.8461417816813049, + "train_speed(iter/s)": 0.241875 + }, + { + "epoch": 0.5168, + "grad_norm": 0.5863915934592941, + "learning_rate": 4.798492510823631e-06, + "loss": 0.3499654531478882, + "memory(GiB)": 54.25, + "step": 1615, + "token_acc": 0.9248875958741074, + "train_speed(iter/s)": 0.24187 + }, + { + "epoch": 0.51712, + "grad_norm": 0.6132500161344128, + "learning_rate": 4.7981454990665885e-06, + "loss": 0.4065876305103302, + "memory(GiB)": 54.25, + "step": 1616, + "token_acc": 0.9456484348125215, + "train_speed(iter/s)": 0.241859 + }, + { + "epoch": 0.51744, + "grad_norm": 0.6597652497530269, + "learning_rate": 4.7977982013452055e-06, + "loss": 0.4040476083755493, + "memory(GiB)": 54.25, + "step": 1617, + "token_acc": 0.8952967525195968, + "train_speed(iter/s)": 0.241845 + }, + { + "epoch": 0.51776, + "grad_norm": 0.6284607063563155, + "learning_rate": 4.797450617702696e-06, + "loss": 0.379196435213089, + "memory(GiB)": 54.25, + "step": 1618, + "token_acc": 0.8120373977405532, + "train_speed(iter/s)": 0.241827 + }, + { + "epoch": 0.51808, + "grad_norm": 0.6555189728507588, + "learning_rate": 4.797102748182312e-06, + "loss": 0.3872315287590027, + "memory(GiB)": 54.25, + "step": 1619, + "token_acc": 0.852808988764045, + "train_speed(iter/s)": 0.241829 + }, + { + "epoch": 0.5184, + "grad_norm": 0.6123392858170771, + "learning_rate": 4.79675459282734e-06, + "loss": 0.34902381896972656, + "memory(GiB)": 54.25, + "step": 1620, + "token_acc": 0.9648823729969315, + "train_speed(iter/s)": 0.241835 + }, + { + "epoch": 0.51872, + "grad_norm": 0.6880900526797497, + "learning_rate": 4.796406151681103e-06, + "loss": 0.36194103956222534, + "memory(GiB)": 54.25, + "step": 1621, + "token_acc": 0.9397590361445783, + "train_speed(iter/s)": 0.241838 + }, + { + "epoch": 0.51904, + "grad_norm": 0.6468327592440479, + "learning_rate": 4.796057424786956e-06, + "loss": 0.4041372537612915, + "memory(GiB)": 54.25, + "step": 1622, + "token_acc": 0.8850795392210642, + "train_speed(iter/s)": 0.241818 + }, + { + "epoch": 0.51936, + "grad_norm": 1.0821288604011743, + "learning_rate": 4.795708412188293e-06, + "loss": 0.45212557911872864, + "memory(GiB)": 54.25, + "step": 1623, + "token_acc": 0.8201296870594869, + "train_speed(iter/s)": 0.241815 + }, + { + "epoch": 0.51968, + "grad_norm": 0.6713804660702141, + "learning_rate": 4.795359113928543e-06, + "loss": 0.3753165304660797, + "memory(GiB)": 54.25, + "step": 1624, + "token_acc": 0.8111876903349896, + "train_speed(iter/s)": 0.24182 + }, + { + "epoch": 0.52, + "grad_norm": 0.644549811314491, + "learning_rate": 4.7950095300511696e-06, + "loss": 0.3454955816268921, + "memory(GiB)": 54.25, + "step": 1625, + "token_acc": 0.8629441624365483, + "train_speed(iter/s)": 0.241824 + }, + { + "epoch": 0.52032, + "grad_norm": 0.6808180534242114, + "learning_rate": 4.794659660599673e-06, + "loss": 0.41125786304473877, + "memory(GiB)": 54.25, + "step": 1626, + "token_acc": 0.8359303391384051, + "train_speed(iter/s)": 0.241826 + }, + { + "epoch": 0.52064, + "grad_norm": 0.6612406491962356, + "learning_rate": 4.794309505617588e-06, + "loss": 0.369584321975708, + "memory(GiB)": 54.25, + "step": 1627, + "token_acc": 0.9459876543209876, + "train_speed(iter/s)": 0.241812 + }, + { + "epoch": 0.52096, + "grad_norm": 0.6367372138971551, + "learning_rate": 4.793959065148484e-06, + "loss": 0.46744924783706665, + "memory(GiB)": 54.25, + "step": 1628, + "token_acc": 0.8505025125628141, + "train_speed(iter/s)": 0.241817 + }, + { + "epoch": 0.52128, + "grad_norm": 0.6437790122874422, + "learning_rate": 4.79360833923597e-06, + "loss": 0.4287160038948059, + "memory(GiB)": 54.25, + "step": 1629, + "token_acc": 0.8909224011713031, + "train_speed(iter/s)": 0.241816 + }, + { + "epoch": 0.5216, + "grad_norm": 0.6335410353242426, + "learning_rate": 4.793257327923686e-06, + "loss": 0.3581119477748871, + "memory(GiB)": 54.25, + "step": 1630, + "token_acc": 0.9400137899333486, + "train_speed(iter/s)": 0.241812 + }, + { + "epoch": 0.52192, + "grad_norm": 0.6193725939287823, + "learning_rate": 4.79290603125531e-06, + "loss": 0.2771455943584442, + "memory(GiB)": 54.25, + "step": 1631, + "token_acc": 0.9149250061470371, + "train_speed(iter/s)": 0.241812 + }, + { + "epoch": 0.52224, + "grad_norm": 0.614416331276897, + "learning_rate": 4.792554449274555e-06, + "loss": 0.4105945825576782, + "memory(GiB)": 54.25, + "step": 1632, + "token_acc": 0.8787436084733382, + "train_speed(iter/s)": 0.241815 + }, + { + "epoch": 0.52256, + "grad_norm": 0.6381483751178989, + "learning_rate": 4.792202582025167e-06, + "loss": 0.3688350319862366, + "memory(GiB)": 54.25, + "step": 1633, + "token_acc": 0.8716999753269183, + "train_speed(iter/s)": 0.241822 + }, + { + "epoch": 0.52288, + "grad_norm": 0.6205060405276, + "learning_rate": 4.7918504295509326e-06, + "loss": 0.33853060007095337, + "memory(GiB)": 54.25, + "step": 1634, + "token_acc": 0.8977181208053692, + "train_speed(iter/s)": 0.241824 + }, + { + "epoch": 0.5232, + "grad_norm": 0.7155316143155707, + "learning_rate": 4.7914979918956685e-06, + "loss": 0.38431063294410706, + "memory(GiB)": 54.25, + "step": 1635, + "token_acc": 0.8289521065898452, + "train_speed(iter/s)": 0.24183 + }, + { + "epoch": 0.52352, + "grad_norm": 0.666800248019191, + "learning_rate": 4.7911452691032325e-06, + "loss": 0.37178653478622437, + "memory(GiB)": 54.25, + "step": 1636, + "token_acc": 0.8224276908487435, + "train_speed(iter/s)": 0.241839 + }, + { + "epoch": 0.52384, + "grad_norm": 0.643515450030943, + "learning_rate": 4.790792261217513e-06, + "loss": 0.31211981177330017, + "memory(GiB)": 54.25, + "step": 1637, + "token_acc": 0.849727651393784, + "train_speed(iter/s)": 0.24184 + }, + { + "epoch": 0.52416, + "grad_norm": 0.6618358006144097, + "learning_rate": 4.7904389682824345e-06, + "loss": 0.4168909192085266, + "memory(GiB)": 54.25, + "step": 1638, + "token_acc": 0.9230769230769231, + "train_speed(iter/s)": 0.241837 + }, + { + "epoch": 0.52448, + "grad_norm": 0.6552964670420218, + "learning_rate": 4.790085390341961e-06, + "loss": 0.3379371762275696, + "memory(GiB)": 54.25, + "step": 1639, + "token_acc": 0.892036344200962, + "train_speed(iter/s)": 0.241839 + }, + { + "epoch": 0.5248, + "grad_norm": 0.6475929675939892, + "learning_rate": 4.789731527440087e-06, + "loss": 0.39542946219444275, + "memory(GiB)": 54.25, + "step": 1640, + "token_acc": 0.8661080711354309, + "train_speed(iter/s)": 0.241836 + }, + { + "epoch": 0.52512, + "grad_norm": 0.6288211455039003, + "learning_rate": 4.789377379620845e-06, + "loss": 0.40078893303871155, + "memory(GiB)": 54.25, + "step": 1641, + "token_acc": 0.8841690387562952, + "train_speed(iter/s)": 0.241829 + }, + { + "epoch": 0.52544, + "grad_norm": 0.5985121440996761, + "learning_rate": 4.7890229469283035e-06, + "loss": 0.40689998865127563, + "memory(GiB)": 54.25, + "step": 1642, + "token_acc": 0.9043683589138135, + "train_speed(iter/s)": 0.241822 + }, + { + "epoch": 0.52576, + "grad_norm": 0.635867504393454, + "learning_rate": 4.788668229406565e-06, + "loss": 0.35504063963890076, + "memory(GiB)": 54.25, + "step": 1643, + "token_acc": 0.8297356293960708, + "train_speed(iter/s)": 0.241823 + }, + { + "epoch": 0.52608, + "grad_norm": 0.671602326457427, + "learning_rate": 4.788313227099768e-06, + "loss": 0.392402321100235, + "memory(GiB)": 54.25, + "step": 1644, + "token_acc": 0.8622152395915161, + "train_speed(iter/s)": 0.241829 + }, + { + "epoch": 0.5264, + "grad_norm": 0.677736890847526, + "learning_rate": 4.787957940052085e-06, + "loss": 0.34257280826568604, + "memory(GiB)": 54.25, + "step": 1645, + "token_acc": 0.8680358313305658, + "train_speed(iter/s)": 0.241829 + }, + { + "epoch": 0.52672, + "grad_norm": 0.5935925447844256, + "learning_rate": 4.787602368307728e-06, + "loss": 0.3644135296344757, + "memory(GiB)": 54.25, + "step": 1646, + "token_acc": 0.9214157168566287, + "train_speed(iter/s)": 0.241832 + }, + { + "epoch": 0.52704, + "grad_norm": 0.6800017328081486, + "learning_rate": 4.787246511910939e-06, + "loss": 0.39088839292526245, + "memory(GiB)": 54.25, + "step": 1647, + "token_acc": 0.9255125875940825, + "train_speed(iter/s)": 0.241835 + }, + { + "epoch": 0.52736, + "grad_norm": 0.6500793266763433, + "learning_rate": 4.786890370906e-06, + "loss": 0.3825957477092743, + "memory(GiB)": 54.25, + "step": 1648, + "token_acc": 0.9043017456359103, + "train_speed(iter/s)": 0.241841 + }, + { + "epoch": 0.52768, + "grad_norm": 0.6029687698582313, + "learning_rate": 4.7865339453372255e-06, + "loss": 0.33596399426460266, + "memory(GiB)": 54.25, + "step": 1649, + "token_acc": 0.9499254843517139, + "train_speed(iter/s)": 0.241838 + }, + { + "epoch": 0.528, + "grad_norm": 0.6622778813659829, + "learning_rate": 4.786177235248968e-06, + "loss": 0.3753988742828369, + "memory(GiB)": 54.25, + "step": 1650, + "token_acc": 0.911515940143136, + "train_speed(iter/s)": 0.241837 + }, + { + "epoch": 0.52832, + "grad_norm": 0.6918405922568939, + "learning_rate": 4.785820240685611e-06, + "loss": 0.3988703489303589, + "memory(GiB)": 54.25, + "step": 1651, + "token_acc": 0.9597565099763273, + "train_speed(iter/s)": 0.241844 + }, + { + "epoch": 0.52864, + "grad_norm": 0.6787767694872086, + "learning_rate": 4.7854629616915795e-06, + "loss": 0.30893445014953613, + "memory(GiB)": 54.25, + "step": 1652, + "token_acc": 0.8657233612474733, + "train_speed(iter/s)": 0.241852 + }, + { + "epoch": 0.52896, + "grad_norm": 0.679224941541078, + "learning_rate": 4.785105398311329e-06, + "loss": 0.42479339241981506, + "memory(GiB)": 54.25, + "step": 1653, + "token_acc": 0.9409918392969241, + "train_speed(iter/s)": 0.241854 + }, + { + "epoch": 0.52928, + "grad_norm": 0.666820751335812, + "learning_rate": 4.784747550589353e-06, + "loss": 0.4075589179992676, + "memory(GiB)": 54.25, + "step": 1654, + "token_acc": 0.9220447284345048, + "train_speed(iter/s)": 0.241862 + }, + { + "epoch": 0.5296, + "grad_norm": 0.7726105078623063, + "learning_rate": 4.7843894185701775e-06, + "loss": 0.30400118231773376, + "memory(GiB)": 54.25, + "step": 1655, + "token_acc": 0.9073543457497613, + "train_speed(iter/s)": 0.241868 + }, + { + "epoch": 0.52992, + "grad_norm": 0.6114909214912815, + "learning_rate": 4.784031002298368e-06, + "loss": 0.2954246997833252, + "memory(GiB)": 54.25, + "step": 1656, + "token_acc": 0.9099045346062052, + "train_speed(iter/s)": 0.241874 + }, + { + "epoch": 0.53024, + "grad_norm": 0.6342354595241302, + "learning_rate": 4.783672301818522e-06, + "loss": 0.4098246991634369, + "memory(GiB)": 54.25, + "step": 1657, + "token_acc": 0.8244387971198645, + "train_speed(iter/s)": 0.241877 + }, + { + "epoch": 0.53056, + "grad_norm": 0.6182507803907549, + "learning_rate": 4.7833133171752735e-06, + "loss": 0.36787742376327515, + "memory(GiB)": 54.25, + "step": 1658, + "token_acc": 0.8704632634995574, + "train_speed(iter/s)": 0.241875 + }, + { + "epoch": 0.53088, + "grad_norm": 0.7056581360089234, + "learning_rate": 4.782954048413292e-06, + "loss": 0.3797305226325989, + "memory(GiB)": 54.25, + "step": 1659, + "token_acc": 0.919853539462978, + "train_speed(iter/s)": 0.241883 + }, + { + "epoch": 0.5312, + "grad_norm": 0.6129606934140552, + "learning_rate": 4.782594495577283e-06, + "loss": 0.4137730002403259, + "memory(GiB)": 54.25, + "step": 1660, + "token_acc": 0.8471312289060949, + "train_speed(iter/s)": 0.241887 + }, + { + "epoch": 0.53152, + "grad_norm": 0.6599823619375234, + "learning_rate": 4.782234658711987e-06, + "loss": 0.3478569984436035, + "memory(GiB)": 54.25, + "step": 1661, + "token_acc": 0.8912893587680993, + "train_speed(iter/s)": 0.241896 + }, + { + "epoch": 0.53184, + "grad_norm": 0.6874816297329593, + "learning_rate": 4.781874537862177e-06, + "loss": 0.372641921043396, + "memory(GiB)": 54.25, + "step": 1662, + "token_acc": 0.9451901565995525, + "train_speed(iter/s)": 0.241898 + }, + { + "epoch": 0.53216, + "grad_norm": 0.6086871339955046, + "learning_rate": 4.781514133072666e-06, + "loss": 0.3830077350139618, + "memory(GiB)": 54.25, + "step": 1663, + "token_acc": 0.9150913168856136, + "train_speed(iter/s)": 0.241894 + }, + { + "epoch": 0.53248, + "grad_norm": 0.6361215835171247, + "learning_rate": 4.7811534443883e-06, + "loss": 0.4059317111968994, + "memory(GiB)": 54.25, + "step": 1664, + "token_acc": 0.9067055393586005, + "train_speed(iter/s)": 0.241896 + }, + { + "epoch": 0.5328, + "grad_norm": 0.6453663787311871, + "learning_rate": 4.7807924718539595e-06, + "loss": 0.4620547890663147, + "memory(GiB)": 54.25, + "step": 1665, + "token_acc": 0.8688860435339308, + "train_speed(iter/s)": 0.241903 + }, + { + "epoch": 0.53312, + "grad_norm": 0.6298090525826134, + "learning_rate": 4.780431215514562e-06, + "loss": 0.4217415153980255, + "memory(GiB)": 54.25, + "step": 1666, + "token_acc": 0.9143148588915335, + "train_speed(iter/s)": 0.241905 + }, + { + "epoch": 0.53344, + "grad_norm": 0.6972933841981127, + "learning_rate": 4.780069675415059e-06, + "loss": 0.4003632664680481, + "memory(GiB)": 54.25, + "step": 1667, + "token_acc": 0.9112903225806451, + "train_speed(iter/s)": 0.241911 + }, + { + "epoch": 0.53376, + "grad_norm": 0.6808730627526265, + "learning_rate": 4.779707851600438e-06, + "loss": 0.4512426257133484, + "memory(GiB)": 54.25, + "step": 1668, + "token_acc": 0.8991092528631158, + "train_speed(iter/s)": 0.241896 + }, + { + "epoch": 0.53408, + "grad_norm": 0.6671312447088457, + "learning_rate": 4.779345744115722e-06, + "loss": 0.42650216817855835, + "memory(GiB)": 54.25, + "step": 1669, + "token_acc": 0.7958693563880884, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.5344, + "grad_norm": 0.5732031365440313, + "learning_rate": 4.7789833530059694e-06, + "loss": 0.33369913697242737, + "memory(GiB)": 54.25, + "step": 1670, + "token_acc": 0.8655131734172238, + "train_speed(iter/s)": 0.24189 + }, + { + "epoch": 0.53472, + "grad_norm": 0.7251141784338081, + "learning_rate": 4.778620678316273e-06, + "loss": 0.34540894627571106, + "memory(GiB)": 54.25, + "step": 1671, + "token_acc": 0.9279210925644916, + "train_speed(iter/s)": 0.241894 + }, + { + "epoch": 0.53504, + "grad_norm": 0.5786178023800073, + "learning_rate": 4.77825772009176e-06, + "loss": 0.4079790711402893, + "memory(GiB)": 54.25, + "step": 1672, + "token_acc": 0.8609343755680785, + "train_speed(iter/s)": 0.241885 + }, + { + "epoch": 0.53536, + "grad_norm": 0.6837056857485796, + "learning_rate": 4.777894478377596e-06, + "loss": 0.3558025360107422, + "memory(GiB)": 54.25, + "step": 1673, + "token_acc": 0.8825867052023122, + "train_speed(iter/s)": 0.241886 + }, + { + "epoch": 0.53568, + "grad_norm": 0.629079855936947, + "learning_rate": 4.777530953218981e-06, + "loss": 0.2840113043785095, + "memory(GiB)": 54.25, + "step": 1674, + "token_acc": 0.8981295925183701, + "train_speed(iter/s)": 0.241894 + }, + { + "epoch": 0.536, + "grad_norm": 0.698559478444381, + "learning_rate": 4.777167144661147e-06, + "loss": 0.4973347783088684, + "memory(GiB)": 54.25, + "step": 1675, + "token_acc": 0.7928917609046849, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.53632, + "grad_norm": 0.6569750164824181, + "learning_rate": 4.776803052749366e-06, + "loss": 0.43085965514183044, + "memory(GiB)": 54.25, + "step": 1676, + "token_acc": 0.864881693648817, + "train_speed(iter/s)": 0.241902 + }, + { + "epoch": 0.53664, + "grad_norm": 0.6730266695924702, + "learning_rate": 4.776438677528941e-06, + "loss": 0.42223477363586426, + "memory(GiB)": 54.25, + "step": 1677, + "token_acc": 0.8520535511685954, + "train_speed(iter/s)": 0.241904 + }, + { + "epoch": 0.53696, + "grad_norm": 0.6817090856849857, + "learning_rate": 4.776074019045213e-06, + "loss": 0.41543567180633545, + "memory(GiB)": 54.25, + "step": 1678, + "token_acc": 0.8829393335232127, + "train_speed(iter/s)": 0.241906 + }, + { + "epoch": 0.53728, + "grad_norm": 0.5738165434447707, + "learning_rate": 4.775709077343556e-06, + "loss": 0.3298349976539612, + "memory(GiB)": 54.25, + "step": 1679, + "token_acc": 0.8658512131542009, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.5376, + "grad_norm": 0.6425538064520745, + "learning_rate": 4.7753438524693825e-06, + "loss": 0.3995852768421173, + "memory(GiB)": 54.25, + "step": 1680, + "token_acc": 0.8970489604292421, + "train_speed(iter/s)": 0.241896 + }, + { + "epoch": 0.53792, + "grad_norm": 0.6973746808434016, + "learning_rate": 4.77497834446814e-06, + "loss": 0.4343576729297638, + "memory(GiB)": 54.25, + "step": 1681, + "token_acc": 0.9092620481927711, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.53824, + "grad_norm": 0.6547836065565994, + "learning_rate": 4.774612553385305e-06, + "loss": 0.3457886576652527, + "memory(GiB)": 54.25, + "step": 1682, + "token_acc": 0.9035136642498606, + "train_speed(iter/s)": 0.241908 + }, + { + "epoch": 0.53856, + "grad_norm": 0.6627187176186636, + "learning_rate": 4.7742464792663975e-06, + "loss": 0.37080442905426025, + "memory(GiB)": 54.25, + "step": 1683, + "token_acc": 0.8486183420104125, + "train_speed(iter/s)": 0.241911 + }, + { + "epoch": 0.53888, + "grad_norm": 0.5718973022310798, + "learning_rate": 4.773880122156968e-06, + "loss": 0.3257960081100464, + "memory(GiB)": 54.25, + "step": 1684, + "token_acc": 0.9392605633802817, + "train_speed(iter/s)": 0.241912 + }, + { + "epoch": 0.5392, + "grad_norm": 0.6645310054394056, + "learning_rate": 4.773513482102603e-06, + "loss": 0.346232533454895, + "memory(GiB)": 54.25, + "step": 1685, + "token_acc": 0.8704214257581725, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.53952, + "grad_norm": 0.6878970139102694, + "learning_rate": 4.773146559148925e-06, + "loss": 0.44456738233566284, + "memory(GiB)": 54.25, + "step": 1686, + "token_acc": 0.8782224404615763, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.53984, + "grad_norm": 0.7395358804786457, + "learning_rate": 4.77277935334159e-06, + "loss": 0.41306424140930176, + "memory(GiB)": 54.25, + "step": 1687, + "token_acc": 0.8511254019292605, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.54016, + "grad_norm": 0.6986152402230384, + "learning_rate": 4.772411864726292e-06, + "loss": 0.4318428039550781, + "memory(GiB)": 54.25, + "step": 1688, + "token_acc": 0.8574600355239786, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.54048, + "grad_norm": 0.6594252076888413, + "learning_rate": 4.772044093348757e-06, + "loss": 0.35141289234161377, + "memory(GiB)": 54.25, + "step": 1689, + "token_acc": 0.9043361645060348, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.5408, + "grad_norm": 0.6679944428591881, + "learning_rate": 4.77167603925475e-06, + "loss": 0.3280572295188904, + "memory(GiB)": 54.25, + "step": 1690, + "token_acc": 0.9424960505529226, + "train_speed(iter/s)": 0.241919 + }, + { + "epoch": 0.54112, + "grad_norm": 0.6350824423618522, + "learning_rate": 4.771307702490068e-06, + "loss": 0.3758777379989624, + "memory(GiB)": 54.25, + "step": 1691, + "token_acc": 0.950062034739454, + "train_speed(iter/s)": 0.241924 + }, + { + "epoch": 0.54144, + "grad_norm": 0.6720172159191258, + "learning_rate": 4.770939083100542e-06, + "loss": 0.3800051808357239, + "memory(GiB)": 54.25, + "step": 1692, + "token_acc": 0.9349749903809157, + "train_speed(iter/s)": 0.241917 + }, + { + "epoch": 0.54176, + "grad_norm": 0.6118357633665251, + "learning_rate": 4.770570181132043e-06, + "loss": 0.3875639736652374, + "memory(GiB)": 54.25, + "step": 1693, + "token_acc": 0.8560331434489902, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.54208, + "grad_norm": 0.7114622374802463, + "learning_rate": 4.770200996630473e-06, + "loss": 0.4644817113876343, + "memory(GiB)": 54.25, + "step": 1694, + "token_acc": 0.8904149620105202, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.5424, + "grad_norm": 0.632844250638788, + "learning_rate": 4.7698315296417714e-06, + "loss": 0.3635478615760803, + "memory(GiB)": 54.25, + "step": 1695, + "token_acc": 0.8728323699421965, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.54272, + "grad_norm": 0.7421622930228027, + "learning_rate": 4.769461780211911e-06, + "loss": 0.3639235496520996, + "memory(GiB)": 54.25, + "step": 1696, + "token_acc": 0.8532792427315754, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.54304, + "grad_norm": 0.6907978176815959, + "learning_rate": 4.7690917483869026e-06, + "loss": 0.4361364245414734, + "memory(GiB)": 54.25, + "step": 1697, + "token_acc": 0.832114118507681, + "train_speed(iter/s)": 0.241924 + }, + { + "epoch": 0.54336, + "grad_norm": 0.6085925871100911, + "learning_rate": 4.768721434212788e-06, + "loss": 0.34516531229019165, + "memory(GiB)": 54.25, + "step": 1698, + "token_acc": 0.8069865133760779, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.54368, + "grad_norm": 0.6709564261179265, + "learning_rate": 4.768350837735648e-06, + "loss": 0.3944128751754761, + "memory(GiB)": 54.25, + "step": 1699, + "token_acc": 0.8417963017317288, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.544, + "grad_norm": 0.6682134735059763, + "learning_rate": 4.7679799590015956e-06, + "loss": 0.4138646125793457, + "memory(GiB)": 54.25, + "step": 1700, + "token_acc": 0.8513434579439252, + "train_speed(iter/s)": 0.241929 + }, + { + "epoch": 0.54432, + "grad_norm": 0.6382750261277393, + "learning_rate": 4.767608798056781e-06, + "loss": 0.3381836414337158, + "memory(GiB)": 54.25, + "step": 1701, + "token_acc": 0.8767914012738853, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.54464, + "grad_norm": 0.647074403620439, + "learning_rate": 4.767237354947389e-06, + "loss": 0.39217454195022583, + "memory(GiB)": 54.25, + "step": 1702, + "token_acc": 0.8707881629065338, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.54496, + "grad_norm": 0.680735455958201, + "learning_rate": 4.766865629719638e-06, + "loss": 0.4876878261566162, + "memory(GiB)": 54.25, + "step": 1703, + "token_acc": 0.8819064966605951, + "train_speed(iter/s)": 0.241919 + }, + { + "epoch": 0.54528, + "grad_norm": 0.622501310420022, + "learning_rate": 4.766493622419784e-06, + "loss": 0.3153845965862274, + "memory(GiB)": 54.25, + "step": 1704, + "token_acc": 0.9454042847270214, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.5456, + "grad_norm": 0.6876552669453849, + "learning_rate": 4.7661213330941166e-06, + "loss": 0.3760117292404175, + "memory(GiB)": 54.25, + "step": 1705, + "token_acc": 0.8664302600472813, + "train_speed(iter/s)": 0.241924 + }, + { + "epoch": 0.54592, + "grad_norm": 0.6948196966954684, + "learning_rate": 4.765748761788961e-06, + "loss": 0.40493452548980713, + "memory(GiB)": 54.25, + "step": 1706, + "token_acc": 0.8377448385389095, + "train_speed(iter/s)": 0.241928 + }, + { + "epoch": 0.54624, + "grad_norm": 0.6640445199945471, + "learning_rate": 4.765375908550677e-06, + "loss": 0.40276744961738586, + "memory(GiB)": 54.25, + "step": 1707, + "token_acc": 0.8650908042663592, + "train_speed(iter/s)": 0.241927 + }, + { + "epoch": 0.54656, + "grad_norm": 0.6512549612288703, + "learning_rate": 4.765002773425659e-06, + "loss": 0.4037300646305084, + "memory(GiB)": 54.25, + "step": 1708, + "token_acc": 0.8687202053538687, + "train_speed(iter/s)": 0.241927 + }, + { + "epoch": 0.54688, + "grad_norm": 0.6967699107890171, + "learning_rate": 4.764629356460338e-06, + "loss": 0.29643842577934265, + "memory(GiB)": 54.25, + "step": 1709, + "token_acc": 0.9545655050727834, + "train_speed(iter/s)": 0.241933 + }, + { + "epoch": 0.5472, + "grad_norm": 0.6363515478507082, + "learning_rate": 4.764255657701179e-06, + "loss": 0.399474561214447, + "memory(GiB)": 54.25, + "step": 1710, + "token_acc": 0.8621291448516579, + "train_speed(iter/s)": 0.241931 + }, + { + "epoch": 0.54752, + "grad_norm": 0.6333970131710334, + "learning_rate": 4.763881677194683e-06, + "loss": 0.43602702021598816, + "memory(GiB)": 54.25, + "step": 1711, + "token_acc": 0.8336842105263158, + "train_speed(iter/s)": 0.241928 + }, + { + "epoch": 0.54784, + "grad_norm": 0.6579658377447896, + "learning_rate": 4.763507414987384e-06, + "loss": 0.3302151560783386, + "memory(GiB)": 54.25, + "step": 1712, + "token_acc": 0.9284436493738819, + "train_speed(iter/s)": 0.24193 + }, + { + "epoch": 0.54816, + "grad_norm": 0.6242143697570357, + "learning_rate": 4.763132871125853e-06, + "loss": 0.3588391840457916, + "memory(GiB)": 54.25, + "step": 1713, + "token_acc": 0.9063625450180072, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.54848, + "grad_norm": 0.5809802291359155, + "learning_rate": 4.762758045656696e-06, + "loss": 0.3254948556423187, + "memory(GiB)": 54.25, + "step": 1714, + "token_acc": 0.8943922573299175, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.5488, + "grad_norm": 0.6442313615817059, + "learning_rate": 4.762382938626553e-06, + "loss": 0.35911470651626587, + "memory(GiB)": 54.25, + "step": 1715, + "token_acc": 0.8413284132841329, + "train_speed(iter/s)": 0.241919 + }, + { + "epoch": 0.54912, + "grad_norm": 0.7200684976230624, + "learning_rate": 4.7620075500820995e-06, + "loss": 0.3551109731197357, + "memory(GiB)": 54.25, + "step": 1716, + "token_acc": 0.9095419847328244, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.54944, + "grad_norm": 0.6189451924537228, + "learning_rate": 4.761631880070047e-06, + "loss": 0.32985198497772217, + "memory(GiB)": 54.25, + "step": 1717, + "token_acc": 0.9235639981908639, + "train_speed(iter/s)": 0.241928 + }, + { + "epoch": 0.54976, + "grad_norm": 0.7096353757175814, + "learning_rate": 4.7612559286371406e-06, + "loss": 0.4556298851966858, + "memory(GiB)": 54.25, + "step": 1718, + "token_acc": 0.850204081632653, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.55008, + "grad_norm": 0.6814885897397271, + "learning_rate": 4.76087969583016e-06, + "loss": 0.3932510018348694, + "memory(GiB)": 54.25, + "step": 1719, + "token_acc": 0.9061855670103093, + "train_speed(iter/s)": 0.241929 + }, + { + "epoch": 0.5504, + "grad_norm": 0.6801154087769582, + "learning_rate": 4.760503181695922e-06, + "loss": 0.4726923704147339, + "memory(GiB)": 54.25, + "step": 1720, + "token_acc": 0.8443611036699705, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.55072, + "grad_norm": 0.64268904197292, + "learning_rate": 4.760126386281278e-06, + "loss": 0.3851279318332672, + "memory(GiB)": 54.25, + "step": 1721, + "token_acc": 0.8143522438611346, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.55104, + "grad_norm": 0.6537651108197885, + "learning_rate": 4.759749309633111e-06, + "loss": 0.4191160202026367, + "memory(GiB)": 54.25, + "step": 1722, + "token_acc": 0.8718002081165452, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.55136, + "grad_norm": 0.6391447343222275, + "learning_rate": 4.759371951798344e-06, + "loss": 0.41589897871017456, + "memory(GiB)": 54.25, + "step": 1723, + "token_acc": 0.8307245080500895, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.55168, + "grad_norm": 0.6214570133804137, + "learning_rate": 4.758994312823931e-06, + "loss": 0.2881065607070923, + "memory(GiB)": 54.25, + "step": 1724, + "token_acc": 0.9447274579724911, + "train_speed(iter/s)": 0.241933 + }, + { + "epoch": 0.552, + "grad_norm": 0.6494886008345389, + "learning_rate": 4.758616392756864e-06, + "loss": 0.43334123492240906, + "memory(GiB)": 54.25, + "step": 1725, + "token_acc": 0.9001627780792186, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.55232, + "grad_norm": 0.6160110742212347, + "learning_rate": 4.758238191644169e-06, + "loss": 0.3807106018066406, + "memory(GiB)": 54.25, + "step": 1726, + "token_acc": 0.8847177848775293, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.55264, + "grad_norm": 0.5863614978633048, + "learning_rate": 4.757859709532905e-06, + "loss": 0.37055861949920654, + "memory(GiB)": 54.25, + "step": 1727, + "token_acc": 0.9147104851330203, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.55296, + "grad_norm": 0.6630303898878792, + "learning_rate": 4.7574809464701676e-06, + "loss": 0.3719576597213745, + "memory(GiB)": 54.25, + "step": 1728, + "token_acc": 0.8937790943715616, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.55328, + "grad_norm": 0.662982040862542, + "learning_rate": 4.7571019025030885e-06, + "loss": 0.3322984278202057, + "memory(GiB)": 54.25, + "step": 1729, + "token_acc": 0.9484966592427617, + "train_speed(iter/s)": 0.241928 + }, + { + "epoch": 0.5536, + "grad_norm": 0.6455154849236364, + "learning_rate": 4.7567225776788314e-06, + "loss": 0.37174028158187866, + "memory(GiB)": 54.25, + "step": 1730, + "token_acc": 0.8652380952380953, + "train_speed(iter/s)": 0.241931 + }, + { + "epoch": 0.55392, + "grad_norm": 0.6492975191194683, + "learning_rate": 4.756342972044599e-06, + "loss": 0.412747323513031, + "memory(GiB)": 54.25, + "step": 1731, + "token_acc": 0.8422986092889005, + "train_speed(iter/s)": 0.241931 + }, + { + "epoch": 0.55424, + "grad_norm": 0.6409994365716849, + "learning_rate": 4.755963085647625e-06, + "loss": 0.3946393132209778, + "memory(GiB)": 54.25, + "step": 1732, + "token_acc": 0.8106921487603306, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.55456, + "grad_norm": 0.5930301438941721, + "learning_rate": 4.75558291853518e-06, + "loss": 0.43866413831710815, + "memory(GiB)": 54.25, + "step": 1733, + "token_acc": 0.8455414012738853, + "train_speed(iter/s)": 0.241933 + }, + { + "epoch": 0.55488, + "grad_norm": 0.6418114078480659, + "learning_rate": 4.75520247075457e-06, + "loss": 0.41745686531066895, + "memory(GiB)": 54.25, + "step": 1734, + "token_acc": 0.8533659730722154, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.5552, + "grad_norm": 0.662076139879933, + "learning_rate": 4.754821742353134e-06, + "loss": 0.3308243751525879, + "memory(GiB)": 54.25, + "step": 1735, + "token_acc": 0.9476614699331849, + "train_speed(iter/s)": 0.241924 + }, + { + "epoch": 0.55552, + "grad_norm": 0.6326045245369947, + "learning_rate": 4.754440733378247e-06, + "loss": 0.3312709927558899, + "memory(GiB)": 54.25, + "step": 1736, + "token_acc": 0.9100575985821887, + "train_speed(iter/s)": 0.24193 + }, + { + "epoch": 0.55584, + "grad_norm": 0.6364880221183564, + "learning_rate": 4.75405944387732e-06, + "loss": 0.3870372176170349, + "memory(GiB)": 54.25, + "step": 1737, + "token_acc": 0.8506581933726737, + "train_speed(iter/s)": 0.241928 + }, + { + "epoch": 0.55616, + "grad_norm": 0.6616714454919996, + "learning_rate": 4.753677873897796e-06, + "loss": 0.4567233920097351, + "memory(GiB)": 54.25, + "step": 1738, + "token_acc": 0.7932737535277516, + "train_speed(iter/s)": 0.241931 + }, + { + "epoch": 0.55648, + "grad_norm": 0.6706903457549486, + "learning_rate": 4.753296023487158e-06, + "loss": 0.4100452661514282, + "memory(GiB)": 54.25, + "step": 1739, + "token_acc": 0.8880097382836275, + "train_speed(iter/s)": 0.241931 + }, + { + "epoch": 0.5568, + "grad_norm": 0.6382531822361515, + "learning_rate": 4.752913892692918e-06, + "loss": 0.4082014560699463, + "memory(GiB)": 54.25, + "step": 1740, + "token_acc": 0.915084388185654, + "train_speed(iter/s)": 0.241933 + }, + { + "epoch": 0.55712, + "grad_norm": 0.6337767839615603, + "learning_rate": 4.7525314815626255e-06, + "loss": 0.3920116424560547, + "memory(GiB)": 54.25, + "step": 1741, + "token_acc": 0.8507140932363244, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.55744, + "grad_norm": 0.6111844204727378, + "learning_rate": 4.752148790143867e-06, + "loss": 0.39341235160827637, + "memory(GiB)": 54.25, + "step": 1742, + "token_acc": 0.857307249712313, + "train_speed(iter/s)": 0.241929 + }, + { + "epoch": 0.55776, + "grad_norm": 0.6249758244803715, + "learning_rate": 4.75176581848426e-06, + "loss": 0.3675069808959961, + "memory(GiB)": 54.25, + "step": 1743, + "token_acc": 0.8603714477511748, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.55808, + "grad_norm": 0.6242039810737627, + "learning_rate": 4.751382566631458e-06, + "loss": 0.4128805696964264, + "memory(GiB)": 54.25, + "step": 1744, + "token_acc": 0.8898357929744336, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.5584, + "grad_norm": 0.6519932496275362, + "learning_rate": 4.7509990346331525e-06, + "loss": 0.4031781554222107, + "memory(GiB)": 54.25, + "step": 1745, + "token_acc": 0.8840384615384616, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.55872, + "grad_norm": 0.6227677515827402, + "learning_rate": 4.750615222537066e-06, + "loss": 0.3471803367137909, + "memory(GiB)": 54.25, + "step": 1746, + "token_acc": 0.9231022017745646, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.55904, + "grad_norm": 0.6185863126570115, + "learning_rate": 4.750231130390957e-06, + "loss": 0.4443768262863159, + "memory(GiB)": 54.25, + "step": 1747, + "token_acc": 0.8952421591636441, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.55936, + "grad_norm": 0.6551728809582249, + "learning_rate": 4.74984675824262e-06, + "loss": 0.3390619456768036, + "memory(GiB)": 54.25, + "step": 1748, + "token_acc": 0.8777943368107303, + "train_speed(iter/s)": 0.24191 + }, + { + "epoch": 0.55968, + "grad_norm": 0.6195254350242192, + "learning_rate": 4.749462106139883e-06, + "loss": 0.34302324056625366, + "memory(GiB)": 54.25, + "step": 1749, + "token_acc": 0.9469931804091755, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.56, + "grad_norm": 0.6430803819230743, + "learning_rate": 4.749077174130609e-06, + "loss": 0.366433322429657, + "memory(GiB)": 54.25, + "step": 1750, + "token_acc": 0.9177190968235744, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.56032, + "grad_norm": 0.706299485224754, + "learning_rate": 4.748691962262696e-06, + "loss": 0.40689200162887573, + "memory(GiB)": 54.25, + "step": 1751, + "token_acc": 0.8337247534053547, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.56064, + "grad_norm": 0.6419720956736918, + "learning_rate": 4.748306470584077e-06, + "loss": 0.3770413398742676, + "memory(GiB)": 54.25, + "step": 1752, + "token_acc": 0.8844331024425811, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.56096, + "grad_norm": 0.7193389826909394, + "learning_rate": 4.747920699142721e-06, + "loss": 0.4500507712364197, + "memory(GiB)": 54.25, + "step": 1753, + "token_acc": 0.8817795551112222, + "train_speed(iter/s)": 0.241904 + }, + { + "epoch": 0.56128, + "grad_norm": 0.6182018889032513, + "learning_rate": 4.747534647986629e-06, + "loss": 0.34165963530540466, + "memory(GiB)": 54.25, + "step": 1754, + "token_acc": 0.9302446642373764, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.5616, + "grad_norm": 0.6445899027455283, + "learning_rate": 4.7471483171638395e-06, + "loss": 0.3784472644329071, + "memory(GiB)": 54.25, + "step": 1755, + "token_acc": 0.8795503211991434, + "train_speed(iter/s)": 0.241897 + }, + { + "epoch": 0.56192, + "grad_norm": 0.6587525538814646, + "learning_rate": 4.746761706722424e-06, + "loss": 0.32692939043045044, + "memory(GiB)": 54.25, + "step": 1756, + "token_acc": 0.916058394160584, + "train_speed(iter/s)": 0.241903 + }, + { + "epoch": 0.56224, + "grad_norm": 0.6134595182958407, + "learning_rate": 4.74637481671049e-06, + "loss": 0.329830527305603, + "memory(GiB)": 54.25, + "step": 1757, + "token_acc": 0.932182864062973, + "train_speed(iter/s)": 0.24191 + }, + { + "epoch": 0.56256, + "grad_norm": 0.9104070959234093, + "learning_rate": 4.7459876471761775e-06, + "loss": 0.42434442043304443, + "memory(GiB)": 54.25, + "step": 1758, + "token_acc": 0.8130161876445325, + "train_speed(iter/s)": 0.241912 + }, + { + "epoch": 0.56288, + "grad_norm": 0.7258884521884202, + "learning_rate": 4.745600198167665e-06, + "loss": 0.36804088950157166, + "memory(GiB)": 54.25, + "step": 1759, + "token_acc": 0.9528115286804182, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.5632, + "grad_norm": 0.6467003418809625, + "learning_rate": 4.745212469733163e-06, + "loss": 0.3190022110939026, + "memory(GiB)": 54.25, + "step": 1760, + "token_acc": 0.906721536351166, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.56352, + "grad_norm": 0.6330817935833253, + "learning_rate": 4.744824461920918e-06, + "loss": 0.30715835094451904, + "memory(GiB)": 54.25, + "step": 1761, + "token_acc": 0.8520637515324888, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.56384, + "grad_norm": 0.7226087708235928, + "learning_rate": 4.744436174779211e-06, + "loss": 0.3926074206829071, + "memory(GiB)": 54.25, + "step": 1762, + "token_acc": 0.8765020026702269, + "train_speed(iter/s)": 0.24191 + }, + { + "epoch": 0.56416, + "grad_norm": 0.6294294787652727, + "learning_rate": 4.744047608356357e-06, + "loss": 0.389980286359787, + "memory(GiB)": 54.25, + "step": 1763, + "token_acc": 0.8578610603290676, + "train_speed(iter/s)": 0.241911 + }, + { + "epoch": 0.56448, + "grad_norm": 0.6456305987084016, + "learning_rate": 4.743658762700706e-06, + "loss": 0.4418475329875946, + "memory(GiB)": 54.25, + "step": 1764, + "token_acc": 0.9219595556821418, + "train_speed(iter/s)": 0.241903 + }, + { + "epoch": 0.5648, + "grad_norm": 0.6330252097968619, + "learning_rate": 4.743269637860644e-06, + "loss": 0.34322622418403625, + "memory(GiB)": 54.25, + "step": 1765, + "token_acc": 0.9049128367670365, + "train_speed(iter/s)": 0.241906 + }, + { + "epoch": 0.56512, + "grad_norm": 0.6812734926345403, + "learning_rate": 4.74288023388459e-06, + "loss": 0.34449827671051025, + "memory(GiB)": 54.25, + "step": 1766, + "token_acc": 0.9157142857142857, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.56544, + "grad_norm": 0.6031007303975944, + "learning_rate": 4.742490550820999e-06, + "loss": 0.3827779293060303, + "memory(GiB)": 54.25, + "step": 1767, + "token_acc": 0.8848056537102473, + "train_speed(iter/s)": 0.24191 + }, + { + "epoch": 0.56576, + "grad_norm": 0.6795076298086795, + "learning_rate": 4.742100588718361e-06, + "loss": 0.41281187534332275, + "memory(GiB)": 54.25, + "step": 1768, + "token_acc": 0.867983367983368, + "train_speed(iter/s)": 0.241917 + }, + { + "epoch": 0.56608, + "grad_norm": 0.6620192910337483, + "learning_rate": 4.7417103476252e-06, + "loss": 0.42870399355888367, + "memory(GiB)": 54.25, + "step": 1769, + "token_acc": 0.8718302094818081, + "train_speed(iter/s)": 0.241919 + }, + { + "epoch": 0.5664, + "grad_norm": 0.6681739659244372, + "learning_rate": 4.741319827590073e-06, + "loss": 0.481192022562027, + "memory(GiB)": 54.25, + "step": 1770, + "token_acc": 0.842260805319542, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.56672, + "grad_norm": 0.6506421994090563, + "learning_rate": 4.740929028661575e-06, + "loss": 0.3987448215484619, + "memory(GiB)": 54.25, + "step": 1771, + "token_acc": 0.8671271840715157, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.56704, + "grad_norm": 0.6966160746078027, + "learning_rate": 4.740537950888334e-06, + "loss": 0.424798846244812, + "memory(GiB)": 54.25, + "step": 1772, + "token_acc": 0.9041970802919708, + "train_speed(iter/s)": 0.241917 + }, + { + "epoch": 0.56736, + "grad_norm": 0.6987433377234162, + "learning_rate": 4.740146594319012e-06, + "loss": 0.3548508882522583, + "memory(GiB)": 54.25, + "step": 1773, + "token_acc": 0.8272921108742004, + "train_speed(iter/s)": 0.241924 + }, + { + "epoch": 0.56768, + "grad_norm": 0.6569243226011326, + "learning_rate": 4.739754959002307e-06, + "loss": 0.3921222686767578, + "memory(GiB)": 54.25, + "step": 1774, + "token_acc": 0.9029711198836484, + "train_speed(iter/s)": 0.241928 + }, + { + "epoch": 0.568, + "grad_norm": 0.5978769608082144, + "learning_rate": 4.739363044986951e-06, + "loss": 0.3292708396911621, + "memory(GiB)": 54.25, + "step": 1775, + "token_acc": 0.9397746967071057, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.56832, + "grad_norm": 0.676326631944193, + "learning_rate": 4.738970852321712e-06, + "loss": 0.3797937035560608, + "memory(GiB)": 54.25, + "step": 1776, + "token_acc": 0.8772246582409079, + "train_speed(iter/s)": 0.241927 + }, + { + "epoch": 0.56864, + "grad_norm": 0.6361639046063012, + "learning_rate": 4.73857838105539e-06, + "loss": 0.3331979811191559, + "memory(GiB)": 54.25, + "step": 1777, + "token_acc": 0.9504310344827587, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.56896, + "grad_norm": 0.6066442116758758, + "learning_rate": 4.738185631236823e-06, + "loss": 0.3624189496040344, + "memory(GiB)": 54.25, + "step": 1778, + "token_acc": 0.9283811778632731, + "train_speed(iter/s)": 0.24193 + }, + { + "epoch": 0.56928, + "grad_norm": 0.7025203276723303, + "learning_rate": 4.73779260291488e-06, + "loss": 0.38638049364089966, + "memory(GiB)": 54.25, + "step": 1779, + "token_acc": 0.8603683897801545, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.5696, + "grad_norm": 0.6812612438271654, + "learning_rate": 4.737399296138468e-06, + "loss": 0.49551159143447876, + "memory(GiB)": 54.25, + "step": 1780, + "token_acc": 0.8763405559203327, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.56992, + "grad_norm": 0.6372318257625379, + "learning_rate": 4.737005710956526e-06, + "loss": 0.3790011405944824, + "memory(GiB)": 54.25, + "step": 1781, + "token_acc": 0.8664815749621403, + "train_speed(iter/s)": 0.241943 + }, + { + "epoch": 0.57024, + "grad_norm": 0.64916617877153, + "learning_rate": 4.73661184741803e-06, + "loss": 0.32817262411117554, + "memory(GiB)": 54.25, + "step": 1782, + "token_acc": 0.9374217772215269, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.57056, + "grad_norm": 0.7019179118872425, + "learning_rate": 4.736217705571989e-06, + "loss": 0.37319111824035645, + "memory(GiB)": 54.25, + "step": 1783, + "token_acc": 0.8443296997513865, + "train_speed(iter/s)": 0.241952 + }, + { + "epoch": 0.57088, + "grad_norm": 0.6616505207652147, + "learning_rate": 4.735823285467447e-06, + "loss": 0.3489619791507721, + "memory(GiB)": 54.25, + "step": 1784, + "token_acc": 0.8849241748438894, + "train_speed(iter/s)": 0.24196 + }, + { + "epoch": 0.5712, + "grad_norm": 0.7171809246762367, + "learning_rate": 4.735428587153483e-06, + "loss": 0.36661937832832336, + "memory(GiB)": 54.25, + "step": 1785, + "token_acc": 0.9030769230769231, + "train_speed(iter/s)": 0.241966 + }, + { + "epoch": 0.57152, + "grad_norm": 0.7088312659915446, + "learning_rate": 4.7350336106792105e-06, + "loss": 0.4244718849658966, + "memory(GiB)": 54.25, + "step": 1786, + "token_acc": 0.8711453744493393, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.57184, + "grad_norm": 0.6126567710753559, + "learning_rate": 4.734638356093777e-06, + "loss": 0.3673321008682251, + "memory(GiB)": 54.25, + "step": 1787, + "token_acc": 0.8972348328518366, + "train_speed(iter/s)": 0.241968 + }, + { + "epoch": 0.57216, + "grad_norm": 0.6244567494961059, + "learning_rate": 4.7342428234463655e-06, + "loss": 0.35194259881973267, + "memory(GiB)": 54.25, + "step": 1788, + "token_acc": 0.899352983465133, + "train_speed(iter/s)": 0.241964 + }, + { + "epoch": 0.57248, + "grad_norm": 0.6922175261521196, + "learning_rate": 4.7338470127861924e-06, + "loss": 0.43254345655441284, + "memory(GiB)": 54.25, + "step": 1789, + "token_acc": 0.8546135512427647, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.5728, + "grad_norm": 0.6329375587795568, + "learning_rate": 4.73345092416251e-06, + "loss": 0.3455372750759125, + "memory(GiB)": 54.25, + "step": 1790, + "token_acc": 0.8648737290915054, + "train_speed(iter/s)": 0.24196 + }, + { + "epoch": 0.57312, + "grad_norm": 0.617555770224745, + "learning_rate": 4.733054557624605e-06, + "loss": 0.36615675687789917, + "memory(GiB)": 54.25, + "step": 1791, + "token_acc": 0.809421265141319, + "train_speed(iter/s)": 0.24196 + }, + { + "epoch": 0.57344, + "grad_norm": 0.689462334849659, + "learning_rate": 4.732657913221798e-06, + "loss": 0.39402520656585693, + "memory(GiB)": 54.25, + "step": 1792, + "token_acc": 0.8873541319361753, + "train_speed(iter/s)": 0.241964 + }, + { + "epoch": 0.57376, + "grad_norm": 1.1449909380908412, + "learning_rate": 4.732260991003444e-06, + "loss": 0.4429539442062378, + "memory(GiB)": 54.25, + "step": 1793, + "token_acc": 0.910054347826087, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.57408, + "grad_norm": 0.6492172549658219, + "learning_rate": 4.731863791018935e-06, + "loss": 0.377286434173584, + "memory(GiB)": 54.25, + "step": 1794, + "token_acc": 0.9003721729172631, + "train_speed(iter/s)": 0.24196 + }, + { + "epoch": 0.5744, + "grad_norm": 0.6145238846646529, + "learning_rate": 4.731466313317693e-06, + "loss": 0.3163700997829437, + "memory(GiB)": 54.25, + "step": 1795, + "token_acc": 0.9000888888888889, + "train_speed(iter/s)": 0.241954 + }, + { + "epoch": 0.57472, + "grad_norm": 0.6744761942249956, + "learning_rate": 4.731068557949178e-06, + "loss": 0.3915477991104126, + "memory(GiB)": 54.25, + "step": 1796, + "token_acc": 0.8525739320920044, + "train_speed(iter/s)": 0.241956 + }, + { + "epoch": 0.57504, + "grad_norm": 0.6913480388638615, + "learning_rate": 4.7306705249628856e-06, + "loss": 0.4396110773086548, + "memory(GiB)": 54.25, + "step": 1797, + "token_acc": 0.9209809264305178, + "train_speed(iter/s)": 0.241954 + }, + { + "epoch": 0.57536, + "grad_norm": 0.6033203614890584, + "learning_rate": 4.7302722144083415e-06, + "loss": 0.4024497866630554, + "memory(GiB)": 54.25, + "step": 1798, + "token_acc": 0.8845190380761523, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.57568, + "grad_norm": 0.6215289406299345, + "learning_rate": 4.729873626335111e-06, + "loss": 0.31602048873901367, + "memory(GiB)": 54.25, + "step": 1799, + "token_acc": 0.8740914419695194, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.576, + "grad_norm": 0.640265226718409, + "learning_rate": 4.729474760792789e-06, + "loss": 0.324046790599823, + "memory(GiB)": 54.25, + "step": 1800, + "token_acc": 0.837017310252996, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.57632, + "grad_norm": 0.587115950466922, + "learning_rate": 4.729075617831009e-06, + "loss": 0.38621726632118225, + "memory(GiB)": 54.25, + "step": 1801, + "token_acc": 0.9049114151154968, + "train_speed(iter/s)": 0.241953 + }, + { + "epoch": 0.57664, + "grad_norm": 0.6358495764201721, + "learning_rate": 4.728676197499438e-06, + "loss": 0.408303439617157, + "memory(GiB)": 54.25, + "step": 1802, + "token_acc": 0.899607843137255, + "train_speed(iter/s)": 0.241946 + }, + { + "epoch": 0.57696, + "grad_norm": 0.6479458324247545, + "learning_rate": 4.728276499847775e-06, + "loss": 0.3367271423339844, + "memory(GiB)": 54.25, + "step": 1803, + "token_acc": 0.8420427553444181, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.57728, + "grad_norm": 0.570521557486852, + "learning_rate": 4.727876524925756e-06, + "loss": 0.3597154915332794, + "memory(GiB)": 54.25, + "step": 1804, + "token_acc": 0.9480789200415368, + "train_speed(iter/s)": 0.24194 + }, + { + "epoch": 0.5776, + "grad_norm": 0.7208093788346057, + "learning_rate": 4.727476272783153e-06, + "loss": 0.37555748224258423, + "memory(GiB)": 54.25, + "step": 1805, + "token_acc": 0.8320715760495526, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.57792, + "grad_norm": 0.8592805808101602, + "learning_rate": 4.727075743469768e-06, + "loss": 0.43025392293930054, + "memory(GiB)": 54.25, + "step": 1806, + "token_acc": 0.892229154849688, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.57824, + "grad_norm": 0.6420075880317323, + "learning_rate": 4.72667493703544e-06, + "loss": 0.5147736668586731, + "memory(GiB)": 54.25, + "step": 1807, + "token_acc": 0.856243854473943, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.57856, + "grad_norm": 0.6936085879410177, + "learning_rate": 4.7262738535300434e-06, + "loss": 0.4516546130180359, + "memory(GiB)": 54.25, + "step": 1808, + "token_acc": 0.8926517571884984, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.57888, + "grad_norm": 0.5999944949610937, + "learning_rate": 4.725872493003486e-06, + "loss": 0.3590053915977478, + "memory(GiB)": 54.25, + "step": 1809, + "token_acc": 0.9182287421896224, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.5792, + "grad_norm": 0.6397805375371721, + "learning_rate": 4.72547085550571e-06, + "loss": 0.3638008236885071, + "memory(GiB)": 54.25, + "step": 1810, + "token_acc": 0.8770197486535009, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.57952, + "grad_norm": 0.8087116314177479, + "learning_rate": 4.725068941086693e-06, + "loss": 0.3281588554382324, + "memory(GiB)": 54.25, + "step": 1811, + "token_acc": 0.8910313901345291, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.57984, + "grad_norm": 0.6531301040830392, + "learning_rate": 4.724666749796444e-06, + "loss": 0.44599318504333496, + "memory(GiB)": 54.25, + "step": 1812, + "token_acc": 0.8816083395383469, + "train_speed(iter/s)": 0.24194 + }, + { + "epoch": 0.58016, + "grad_norm": 0.5994763511531765, + "learning_rate": 4.72426428168501e-06, + "loss": 0.31098484992980957, + "memory(GiB)": 54.25, + "step": 1813, + "token_acc": 0.9493809176984706, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.58048, + "grad_norm": 0.6413351296359074, + "learning_rate": 4.723861536802472e-06, + "loss": 0.3934440016746521, + "memory(GiB)": 54.25, + "step": 1814, + "token_acc": 0.9531645569620253, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.5808, + "grad_norm": 1.29522276485793, + "learning_rate": 4.7234585151989445e-06, + "loss": 0.36183491349220276, + "memory(GiB)": 54.25, + "step": 1815, + "token_acc": 0.8933992234380516, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.58112, + "grad_norm": 0.686069620007176, + "learning_rate": 4.723055216924576e-06, + "loss": 0.4371922016143799, + "memory(GiB)": 54.25, + "step": 1816, + "token_acc": 0.9045267489711935, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.58144, + "grad_norm": 0.687803413396611, + "learning_rate": 4.72265164202955e-06, + "loss": 0.4018932580947876, + "memory(GiB)": 54.25, + "step": 1817, + "token_acc": 0.8973230656398973, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.58176, + "grad_norm": 0.6656764600578396, + "learning_rate": 4.722247790564084e-06, + "loss": 0.3364885747432709, + "memory(GiB)": 54.25, + "step": 1818, + "token_acc": 0.913997840949982, + "train_speed(iter/s)": 0.241946 + }, + { + "epoch": 0.58208, + "grad_norm": 0.6570459968425892, + "learning_rate": 4.72184366257843e-06, + "loss": 0.4190482497215271, + "memory(GiB)": 54.25, + "step": 1819, + "token_acc": 0.9146275149537793, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.5824, + "grad_norm": 0.5836856622715874, + "learning_rate": 4.721439258122877e-06, + "loss": 0.3488476276397705, + "memory(GiB)": 54.25, + "step": 1820, + "token_acc": 0.8268376068376069, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.58272, + "grad_norm": 0.592588370972377, + "learning_rate": 4.721034577247743e-06, + "loss": 0.3563780188560486, + "memory(GiB)": 54.25, + "step": 1821, + "token_acc": 0.897908979089791, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.58304, + "grad_norm": 0.5947776414987895, + "learning_rate": 4.720629620003386e-06, + "loss": 0.4122748374938965, + "memory(GiB)": 54.25, + "step": 1822, + "token_acc": 0.8900881057268722, + "train_speed(iter/s)": 0.241943 + }, + { + "epoch": 0.58336, + "grad_norm": 0.895958775524217, + "learning_rate": 4.720224386440195e-06, + "loss": 0.4483799338340759, + "memory(GiB)": 54.25, + "step": 1823, + "token_acc": 0.9371900826446281, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.58368, + "grad_norm": 0.6493686163908919, + "learning_rate": 4.7198188766085936e-06, + "loss": 0.3813807964324951, + "memory(GiB)": 54.25, + "step": 1824, + "token_acc": 0.9162466072120977, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.584, + "grad_norm": 0.6605989580604787, + "learning_rate": 4.719413090559042e-06, + "loss": 0.4767861068248749, + "memory(GiB)": 54.25, + "step": 1825, + "token_acc": 0.7813552188552189, + "train_speed(iter/s)": 0.241927 + }, + { + "epoch": 0.58432, + "grad_norm": 0.679270998077196, + "learning_rate": 4.719007028342032e-06, + "loss": 0.4362083077430725, + "memory(GiB)": 54.25, + "step": 1826, + "token_acc": 0.891832229580574, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.58464, + "grad_norm": 0.6466767187048821, + "learning_rate": 4.718600690008091e-06, + "loss": 0.3476477861404419, + "memory(GiB)": 54.25, + "step": 1827, + "token_acc": 0.903483116192502, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.58496, + "grad_norm": 0.6412182348366258, + "learning_rate": 4.718194075607781e-06, + "loss": 0.38519108295440674, + "memory(GiB)": 54.25, + "step": 1828, + "token_acc": 0.8177391304347826, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.58528, + "grad_norm": 0.6360706873761225, + "learning_rate": 4.717787185191698e-06, + "loss": 0.39760780334472656, + "memory(GiB)": 54.25, + "step": 1829, + "token_acc": 0.8770883054892601, + "train_speed(iter/s)": 0.24194 + }, + { + "epoch": 0.5856, + "grad_norm": 0.6566022569903045, + "learning_rate": 4.717380018810473e-06, + "loss": 0.3461950421333313, + "memory(GiB)": 54.25, + "step": 1830, + "token_acc": 0.8887801696020874, + "train_speed(iter/s)": 0.241946 + }, + { + "epoch": 0.58592, + "grad_norm": 0.6412332900861341, + "learning_rate": 4.716972576514771e-06, + "loss": 0.342499703168869, + "memory(GiB)": 54.25, + "step": 1831, + "token_acc": 0.9278227727489281, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.58624, + "grad_norm": 0.6056872772958543, + "learning_rate": 4.716564858355291e-06, + "loss": 0.36778876185417175, + "memory(GiB)": 54.25, + "step": 1832, + "token_acc": 0.9292831886853102, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.58656, + "grad_norm": 0.6334258745699023, + "learning_rate": 4.716156864382765e-06, + "loss": 0.34735170006752014, + "memory(GiB)": 54.25, + "step": 1833, + "token_acc": 0.8962264150943396, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.58688, + "grad_norm": 0.625785684451839, + "learning_rate": 4.715748594647961e-06, + "loss": 0.3761109709739685, + "memory(GiB)": 54.25, + "step": 1834, + "token_acc": 0.8864833235810415, + "train_speed(iter/s)": 0.241941 + }, + { + "epoch": 0.5872, + "grad_norm": 0.6504366539374913, + "learning_rate": 4.715340049201683e-06, + "loss": 0.40765106678009033, + "memory(GiB)": 54.25, + "step": 1835, + "token_acc": 0.8242280285035629, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.58752, + "grad_norm": 0.6279645898902287, + "learning_rate": 4.714931228094767e-06, + "loss": 0.31422537565231323, + "memory(GiB)": 54.25, + "step": 1836, + "token_acc": 0.9406087602078693, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.58784, + "grad_norm": 0.6697938993513486, + "learning_rate": 4.714522131378082e-06, + "loss": 0.4501338601112366, + "memory(GiB)": 54.25, + "step": 1837, + "token_acc": 0.8982019363762103, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.58816, + "grad_norm": 0.6468227570729663, + "learning_rate": 4.714112759102534e-06, + "loss": 0.31853756308555603, + "memory(GiB)": 54.25, + "step": 1838, + "token_acc": 0.9458154506437768, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.58848, + "grad_norm": 0.6443090516958139, + "learning_rate": 4.713703111319062e-06, + "loss": 0.3752342462539673, + "memory(GiB)": 54.25, + "step": 1839, + "token_acc": 0.9326012873911397, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.5888, + "grad_norm": 0.69319889507301, + "learning_rate": 4.71329318807864e-06, + "loss": 0.4515884518623352, + "memory(GiB)": 54.25, + "step": 1840, + "token_acc": 0.8789459953039395, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.58912, + "grad_norm": 0.6336320306050697, + "learning_rate": 4.712882989432276e-06, + "loss": 0.4417129456996918, + "memory(GiB)": 54.25, + "step": 1841, + "token_acc": 0.8995307756003312, + "train_speed(iter/s)": 0.24193 + }, + { + "epoch": 0.58944, + "grad_norm": 0.6785911710982593, + "learning_rate": 4.7124725154310116e-06, + "loss": 0.4416154623031616, + "memory(GiB)": 54.25, + "step": 1842, + "token_acc": 0.9128962757771623, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.58976, + "grad_norm": 0.6877340495884596, + "learning_rate": 4.712061766125923e-06, + "loss": 0.47496819496154785, + "memory(GiB)": 54.25, + "step": 1843, + "token_acc": 0.9429669977571291, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.59008, + "grad_norm": 0.6666697917489173, + "learning_rate": 4.711650741568122e-06, + "loss": 0.3794732689857483, + "memory(GiB)": 54.25, + "step": 1844, + "token_acc": 0.8656527249683144, + "train_speed(iter/s)": 0.241933 + }, + { + "epoch": 0.5904, + "grad_norm": 0.6672772481583105, + "learning_rate": 4.711239441808751e-06, + "loss": 0.3951161205768585, + "memory(GiB)": 54.25, + "step": 1845, + "token_acc": 0.9240421241317499, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.59072, + "grad_norm": 0.7592715206788123, + "learning_rate": 4.710827866898992e-06, + "loss": 0.4339952766895294, + "memory(GiB)": 54.25, + "step": 1846, + "token_acc": 0.9477855477855478, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.59104, + "grad_norm": 0.6448940581241043, + "learning_rate": 4.7104160168900575e-06, + "loss": 0.3923218250274658, + "memory(GiB)": 54.25, + "step": 1847, + "token_acc": 0.887249443207127, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.59136, + "grad_norm": 0.6317991578652563, + "learning_rate": 4.710003891833194e-06, + "loss": 0.34738385677337646, + "memory(GiB)": 54.25, + "step": 1848, + "token_acc": 0.9151329243353783, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.59168, + "grad_norm": 0.6395302922754686, + "learning_rate": 4.709591491779684e-06, + "loss": 0.36953243613243103, + "memory(GiB)": 54.25, + "step": 1849, + "token_acc": 0.9124423963133641, + "train_speed(iter/s)": 0.241951 + }, + { + "epoch": 0.592, + "grad_norm": 0.667646933114188, + "learning_rate": 4.709178816780844e-06, + "loss": 0.38187217712402344, + "memory(GiB)": 54.25, + "step": 1850, + "token_acc": 0.9005186232909005, + "train_speed(iter/s)": 0.241951 + }, + { + "epoch": 0.59232, + "grad_norm": 0.7084046084628721, + "learning_rate": 4.708765866888023e-06, + "loss": 0.4074920415878296, + "memory(GiB)": 54.25, + "step": 1851, + "token_acc": 0.9400129701686122, + "train_speed(iter/s)": 0.241957 + }, + { + "epoch": 0.59264, + "grad_norm": 0.6722459900887927, + "learning_rate": 4.708352642152606e-06, + "loss": 0.3094560503959656, + "memory(GiB)": 54.25, + "step": 1852, + "token_acc": 0.8766622340425532, + "train_speed(iter/s)": 0.241961 + }, + { + "epoch": 0.59296, + "grad_norm": 0.6216506467272805, + "learning_rate": 4.707939142626013e-06, + "loss": 0.34115493297576904, + "memory(GiB)": 54.25, + "step": 1853, + "token_acc": 0.9430051813471503, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.59328, + "grad_norm": 0.5954181410720727, + "learning_rate": 4.707525368359696e-06, + "loss": 0.29457253217697144, + "memory(GiB)": 54.25, + "step": 1854, + "token_acc": 0.918580375782881, + "train_speed(iter/s)": 0.241971 + }, + { + "epoch": 0.5936, + "grad_norm": 0.6408381584626066, + "learning_rate": 4.7071113194051395e-06, + "loss": 0.40060746669769287, + "memory(GiB)": 54.25, + "step": 1855, + "token_acc": 0.8712299465240642, + "train_speed(iter/s)": 0.241973 + }, + { + "epoch": 0.59392, + "grad_norm": 0.8159051145030883, + "learning_rate": 4.706696995813869e-06, + "loss": 0.41655248403549194, + "memory(GiB)": 54.25, + "step": 1856, + "token_acc": 0.9377389404696886, + "train_speed(iter/s)": 0.241969 + }, + { + "epoch": 0.59424, + "grad_norm": 0.6507983338240121, + "learning_rate": 4.706282397637437e-06, + "loss": 0.36722099781036377, + "memory(GiB)": 54.25, + "step": 1857, + "token_acc": 0.9196797520661157, + "train_speed(iter/s)": 0.241976 + }, + { + "epoch": 0.59456, + "grad_norm": 0.6834787536619207, + "learning_rate": 4.705867524927435e-06, + "loss": 0.41155320405960083, + "memory(GiB)": 54.25, + "step": 1858, + "token_acc": 0.8975755924816127, + "train_speed(iter/s)": 0.241975 + }, + { + "epoch": 0.59488, + "grad_norm": 0.7105329781030931, + "learning_rate": 4.705452377735484e-06, + "loss": 0.3797593116760254, + "memory(GiB)": 54.25, + "step": 1859, + "token_acc": 0.927658019729631, + "train_speed(iter/s)": 0.24197 + }, + { + "epoch": 0.5952, + "grad_norm": 0.6736718643235267, + "learning_rate": 4.7050369561132446e-06, + "loss": 0.31527775526046753, + "memory(GiB)": 54.25, + "step": 1860, + "token_acc": 0.9477175185329691, + "train_speed(iter/s)": 0.241977 + }, + { + "epoch": 0.59552, + "grad_norm": 0.683308371401664, + "learning_rate": 4.704621260112407e-06, + "loss": 0.4316435754299164, + "memory(GiB)": 54.25, + "step": 1861, + "token_acc": 0.8573717948717948, + "train_speed(iter/s)": 0.241982 + }, + { + "epoch": 0.59584, + "grad_norm": 0.9329077541439923, + "learning_rate": 4.704205289784698e-06, + "loss": 0.4739769995212555, + "memory(GiB)": 54.25, + "step": 1862, + "token_acc": 0.9328268692522991, + "train_speed(iter/s)": 0.241981 + }, + { + "epoch": 0.59616, + "grad_norm": 0.6911107500397672, + "learning_rate": 4.703789045181879e-06, + "loss": 0.4041091799736023, + "memory(GiB)": 54.25, + "step": 1863, + "token_acc": 0.8242894056847545, + "train_speed(iter/s)": 0.241976 + }, + { + "epoch": 0.59648, + "grad_norm": 0.6347736849308343, + "learning_rate": 4.703372526355741e-06, + "loss": 0.3799161911010742, + "memory(GiB)": 54.25, + "step": 1864, + "token_acc": 0.8602731929425157, + "train_speed(iter/s)": 0.241977 + }, + { + "epoch": 0.5968, + "grad_norm": 0.6461436582916157, + "learning_rate": 4.702955733358118e-06, + "loss": 0.3418427109718323, + "memory(GiB)": 54.25, + "step": 1865, + "token_acc": 0.9265362169584621, + "train_speed(iter/s)": 0.241984 + }, + { + "epoch": 0.59712, + "grad_norm": 0.6602360139390672, + "learning_rate": 4.702538666240868e-06, + "loss": 0.35695260763168335, + "memory(GiB)": 54.25, + "step": 1866, + "token_acc": 0.9421433267587114, + "train_speed(iter/s)": 0.241992 + }, + { + "epoch": 0.59744, + "grad_norm": 0.9608637311795543, + "learning_rate": 4.7021213250558885e-06, + "loss": 0.4163212776184082, + "memory(GiB)": 54.25, + "step": 1867, + "token_acc": 0.9281650071123755, + "train_speed(iter/s)": 0.241996 + }, + { + "epoch": 0.59776, + "grad_norm": 0.6586169491849986, + "learning_rate": 4.701703709855113e-06, + "loss": 0.47320181131362915, + "memory(GiB)": 54.25, + "step": 1868, + "token_acc": 0.8569587628865979, + "train_speed(iter/s)": 0.241996 + }, + { + "epoch": 0.59808, + "grad_norm": 0.6213705579438341, + "learning_rate": 4.701285820690503e-06, + "loss": 0.33058369159698486, + "memory(GiB)": 54.25, + "step": 1869, + "token_acc": 0.8992660086054164, + "train_speed(iter/s)": 0.242003 + }, + { + "epoch": 0.5984, + "grad_norm": 0.6362014078866778, + "learning_rate": 4.70086765761406e-06, + "loss": 0.2701127529144287, + "memory(GiB)": 54.25, + "step": 1870, + "token_acc": 0.9195612431444241, + "train_speed(iter/s)": 0.242011 + }, + { + "epoch": 0.59872, + "grad_norm": 0.7336699859138293, + "learning_rate": 4.700449220677816e-06, + "loss": 0.37908437848091125, + "memory(GiB)": 54.25, + "step": 1871, + "token_acc": 0.927004797806717, + "train_speed(iter/s)": 0.242017 + }, + { + "epoch": 0.59904, + "grad_norm": 0.6833601433245742, + "learning_rate": 4.70003050993384e-06, + "loss": 0.32923102378845215, + "memory(GiB)": 54.25, + "step": 1872, + "token_acc": 0.948948948948949, + "train_speed(iter/s)": 0.242013 + }, + { + "epoch": 0.59936, + "grad_norm": 0.5955902019286784, + "learning_rate": 4.69961152543423e-06, + "loss": 0.3059242367744446, + "memory(GiB)": 54.25, + "step": 1873, + "token_acc": 0.8874543239951279, + "train_speed(iter/s)": 0.242014 + }, + { + "epoch": 0.59968, + "grad_norm": 0.6180921030848663, + "learning_rate": 4.699192267231124e-06, + "loss": 0.34250980615615845, + "memory(GiB)": 54.25, + "step": 1874, + "token_acc": 0.8904688700999231, + "train_speed(iter/s)": 0.24201 + }, + { + "epoch": 0.6, + "grad_norm": 0.6091369428891455, + "learning_rate": 4.698772735376691e-06, + "loss": 0.37265241146087646, + "memory(GiB)": 54.25, + "step": 1875, + "token_acc": 0.8041040829278612, + "train_speed(iter/s)": 0.242013 + }, + { + "epoch": 0.60032, + "grad_norm": 0.6270883297612253, + "learning_rate": 4.698352929923133e-06, + "loss": 0.4025137424468994, + "memory(GiB)": 54.25, + "step": 1876, + "token_acc": 0.889751552795031, + "train_speed(iter/s)": 0.242005 + }, + { + "epoch": 0.60064, + "grad_norm": 0.6529258699250116, + "learning_rate": 4.69793285092269e-06, + "loss": 0.43084967136383057, + "memory(GiB)": 54.25, + "step": 1877, + "token_acc": 0.837655707576067, + "train_speed(iter/s)": 0.24201 + }, + { + "epoch": 0.60096, + "grad_norm": 0.6323453878315589, + "learning_rate": 4.697512498427631e-06, + "loss": 0.3335462212562561, + "memory(GiB)": 54.25, + "step": 1878, + "token_acc": 0.8627136752136753, + "train_speed(iter/s)": 0.242011 + }, + { + "epoch": 0.60128, + "grad_norm": 0.6025354077401586, + "learning_rate": 4.697091872490263e-06, + "loss": 0.3304687440395355, + "memory(GiB)": 54.25, + "step": 1879, + "token_acc": 0.8813559322033898, + "train_speed(iter/s)": 0.242012 + }, + { + "epoch": 0.6016, + "grad_norm": 0.7098066729322163, + "learning_rate": 4.696670973162926e-06, + "loss": 0.4528351426124573, + "memory(GiB)": 54.25, + "step": 1880, + "token_acc": 0.8277919863597613, + "train_speed(iter/s)": 0.242017 + }, + { + "epoch": 0.60192, + "grad_norm": 0.6286003219626017, + "learning_rate": 4.696249800497992e-06, + "loss": 0.4293079972267151, + "memory(GiB)": 54.25, + "step": 1881, + "token_acc": 0.8551959114139693, + "train_speed(iter/s)": 0.242019 + }, + { + "epoch": 0.60224, + "grad_norm": 0.6267677115086208, + "learning_rate": 4.69582835454787e-06, + "loss": 0.3244718313217163, + "memory(GiB)": 54.25, + "step": 1882, + "token_acc": 0.8753661784287616, + "train_speed(iter/s)": 0.242022 + }, + { + "epoch": 0.60256, + "grad_norm": 0.7294831268484209, + "learning_rate": 4.6954066353650005e-06, + "loss": 0.42103976011276245, + "memory(GiB)": 54.25, + "step": 1883, + "token_acc": 0.8690941919951851, + "train_speed(iter/s)": 0.242024 + }, + { + "epoch": 0.60288, + "grad_norm": 0.685928400116101, + "learning_rate": 4.694984643001861e-06, + "loss": 0.4847121834754944, + "memory(GiB)": 54.25, + "step": 1884, + "token_acc": 0.8554086862692204, + "train_speed(iter/s)": 0.242021 + }, + { + "epoch": 0.6032, + "grad_norm": 0.6090671993851012, + "learning_rate": 4.694562377510959e-06, + "loss": 0.3983410596847534, + "memory(GiB)": 54.25, + "step": 1885, + "token_acc": 0.910727969348659, + "train_speed(iter/s)": 0.242022 + }, + { + "epoch": 0.60352, + "grad_norm": 0.7061843461097482, + "learning_rate": 4.69413983894484e-06, + "loss": 0.3444811701774597, + "memory(GiB)": 54.25, + "step": 1886, + "token_acc": 0.9184839044652129, + "train_speed(iter/s)": 0.242026 + }, + { + "epoch": 0.60384, + "grad_norm": 0.6686790874061302, + "learning_rate": 4.6937170273560805e-06, + "loss": 0.39927420020103455, + "memory(GiB)": 54.25, + "step": 1887, + "token_acc": 0.8744710860366713, + "train_speed(iter/s)": 0.242026 + }, + { + "epoch": 0.60416, + "grad_norm": 0.6887617854651125, + "learning_rate": 4.693293942797292e-06, + "loss": 0.4538082480430603, + "memory(GiB)": 54.25, + "step": 1888, + "token_acc": 0.9013065326633166, + "train_speed(iter/s)": 0.242023 + }, + { + "epoch": 0.60448, + "grad_norm": 0.6296564028091076, + "learning_rate": 4.692870585321119e-06, + "loss": 0.3960420489311218, + "memory(GiB)": 54.25, + "step": 1889, + "token_acc": 0.8922423429465683, + "train_speed(iter/s)": 0.242024 + }, + { + "epoch": 0.6048, + "grad_norm": 0.6663456704196793, + "learning_rate": 4.692446954980244e-06, + "loss": 0.44303804636001587, + "memory(GiB)": 54.25, + "step": 1890, + "token_acc": 0.8431729518855656, + "train_speed(iter/s)": 0.242027 + }, + { + "epoch": 0.60512, + "grad_norm": 0.6719079958366911, + "learning_rate": 4.692023051827379e-06, + "loss": 0.37473976612091064, + "memory(GiB)": 54.25, + "step": 1891, + "token_acc": 0.9331514324693042, + "train_speed(iter/s)": 0.242029 + }, + { + "epoch": 0.60544, + "grad_norm": 0.6759622389259655, + "learning_rate": 4.69159887591527e-06, + "loss": 0.4063390791416168, + "memory(GiB)": 54.25, + "step": 1892, + "token_acc": 0.8887043189368771, + "train_speed(iter/s)": 0.24203 + }, + { + "epoch": 0.60576, + "grad_norm": 0.6719731170815866, + "learning_rate": 4.691174427296699e-06, + "loss": 0.4363413155078888, + "memory(GiB)": 54.25, + "step": 1893, + "token_acc": 0.8720605819051415, + "train_speed(iter/s)": 0.242032 + }, + { + "epoch": 0.60608, + "grad_norm": 0.6759919681589908, + "learning_rate": 4.690749706024483e-06, + "loss": 0.39094769954681396, + "memory(GiB)": 54.25, + "step": 1894, + "token_acc": 0.8842235257795616, + "train_speed(iter/s)": 0.242039 + }, + { + "epoch": 0.6064, + "grad_norm": 0.635368899438073, + "learning_rate": 4.69032471215147e-06, + "loss": 0.39960670471191406, + "memory(GiB)": 54.25, + "step": 1895, + "token_acc": 0.8227477477477477, + "train_speed(iter/s)": 0.242038 + }, + { + "epoch": 0.60672, + "grad_norm": 0.6869858828075381, + "learning_rate": 4.689899445730542e-06, + "loss": 0.37585121393203735, + "memory(GiB)": 54.25, + "step": 1896, + "token_acc": 0.8812615955473099, + "train_speed(iter/s)": 0.24204 + }, + { + "epoch": 0.60704, + "grad_norm": 0.6145271284641804, + "learning_rate": 4.689473906814618e-06, + "loss": 0.5391930937767029, + "memory(GiB)": 54.25, + "step": 1897, + "token_acc": 0.890400604686319, + "train_speed(iter/s)": 0.242038 + }, + { + "epoch": 0.60736, + "grad_norm": 0.9958731516341256, + "learning_rate": 4.689048095456647e-06, + "loss": 0.34210747480392456, + "memory(GiB)": 54.25, + "step": 1898, + "token_acc": 0.893970189701897, + "train_speed(iter/s)": 0.242043 + }, + { + "epoch": 0.60768, + "grad_norm": 0.6171473879454615, + "learning_rate": 4.688622011709616e-06, + "loss": 0.24367280304431915, + "memory(GiB)": 54.25, + "step": 1899, + "token_acc": 0.9184270393240169, + "train_speed(iter/s)": 0.242049 + }, + { + "epoch": 0.608, + "grad_norm": 0.6636318918283806, + "learning_rate": 4.688195655626542e-06, + "loss": 0.35518679022789, + "memory(GiB)": 54.25, + "step": 1900, + "token_acc": 0.886354034643008, + "train_speed(iter/s)": 0.242045 + }, + { + "epoch": 0.60832, + "grad_norm": 0.6257673688315202, + "learning_rate": 4.6877690272604785e-06, + "loss": 0.3325139284133911, + "memory(GiB)": 54.25, + "step": 1901, + "token_acc": 0.9424541607898449, + "train_speed(iter/s)": 0.242041 + }, + { + "epoch": 0.60864, + "grad_norm": 0.6411932096589633, + "learning_rate": 4.687342126664511e-06, + "loss": 0.3099289536476135, + "memory(GiB)": 54.25, + "step": 1902, + "token_acc": 0.8996068152031454, + "train_speed(iter/s)": 0.242046 + }, + { + "epoch": 0.60896, + "grad_norm": 0.611239666517639, + "learning_rate": 4.68691495389176e-06, + "loss": 0.3109439015388489, + "memory(GiB)": 54.25, + "step": 1903, + "token_acc": 0.8896297999540124, + "train_speed(iter/s)": 0.242041 + }, + { + "epoch": 0.60928, + "grad_norm": 0.6396525879272598, + "learning_rate": 4.686487508995382e-06, + "loss": 0.3729288578033447, + "memory(GiB)": 54.25, + "step": 1904, + "token_acc": 0.8600430636727161, + "train_speed(iter/s)": 0.242042 + }, + { + "epoch": 0.6096, + "grad_norm": 0.5937077517501832, + "learning_rate": 4.6860597920285625e-06, + "loss": 0.3882848620414734, + "memory(GiB)": 54.25, + "step": 1905, + "token_acc": 0.8452762209767815, + "train_speed(iter/s)": 0.242038 + }, + { + "epoch": 0.60992, + "grad_norm": 0.638139295245692, + "learning_rate": 4.685631803044523e-06, + "loss": 0.3926452696323395, + "memory(GiB)": 54.25, + "step": 1906, + "token_acc": 0.893478765374797, + "train_speed(iter/s)": 0.242044 + }, + { + "epoch": 0.61024, + "grad_norm": 0.6331222361335191, + "learning_rate": 4.685203542096523e-06, + "loss": 0.4422401487827301, + "memory(GiB)": 54.25, + "step": 1907, + "token_acc": 0.948995983935743, + "train_speed(iter/s)": 0.242044 + }, + { + "epoch": 0.61056, + "grad_norm": 0.6580522386970272, + "learning_rate": 4.6847750092378484e-06, + "loss": 0.41037675738334656, + "memory(GiB)": 54.25, + "step": 1908, + "token_acc": 0.8828236639039726, + "train_speed(iter/s)": 0.242045 + }, + { + "epoch": 0.61088, + "grad_norm": 0.6594293709603198, + "learning_rate": 4.6843462045218245e-06, + "loss": 0.38065552711486816, + "memory(GiB)": 54.25, + "step": 1909, + "token_acc": 0.8214818498259573, + "train_speed(iter/s)": 0.242049 + }, + { + "epoch": 0.6112, + "grad_norm": 0.6693211634529578, + "learning_rate": 4.683917128001809e-06, + "loss": 0.31737011671066284, + "memory(GiB)": 54.25, + "step": 1910, + "token_acc": 0.9434276206322796, + "train_speed(iter/s)": 0.24205 + }, + { + "epoch": 0.61152, + "grad_norm": 0.553076646384481, + "learning_rate": 4.683487779731191e-06, + "loss": 0.2934996485710144, + "memory(GiB)": 54.25, + "step": 1911, + "token_acc": 0.9506972111553785, + "train_speed(iter/s)": 0.242054 + }, + { + "epoch": 0.61184, + "grad_norm": 0.624833882979184, + "learning_rate": 4.683058159763397e-06, + "loss": 0.3682295083999634, + "memory(GiB)": 54.25, + "step": 1912, + "token_acc": 0.8346807917479788, + "train_speed(iter/s)": 0.242045 + }, + { + "epoch": 0.61216, + "grad_norm": 0.6374055807584373, + "learning_rate": 4.682628268151885e-06, + "loss": 0.4138621389865875, + "memory(GiB)": 54.25, + "step": 1913, + "token_acc": 0.8655419222903885, + "train_speed(iter/s)": 0.242044 + }, + { + "epoch": 0.61248, + "grad_norm": 0.6030902687937366, + "learning_rate": 4.682198104950149e-06, + "loss": 0.41247934103012085, + "memory(GiB)": 54.25, + "step": 1914, + "token_acc": 0.8995826812728221, + "train_speed(iter/s)": 0.242049 + }, + { + "epoch": 0.6128, + "grad_norm": 0.6095215506676755, + "learning_rate": 4.681767670211715e-06, + "loss": 0.3288329541683197, + "memory(GiB)": 54.25, + "step": 1915, + "token_acc": 0.9098765432098765, + "train_speed(iter/s)": 0.242044 + }, + { + "epoch": 0.61312, + "grad_norm": 0.5611592594607614, + "learning_rate": 4.68133696399014e-06, + "loss": 0.35218214988708496, + "memory(GiB)": 54.25, + "step": 1916, + "token_acc": 0.8915527671969528, + "train_speed(iter/s)": 0.242027 + }, + { + "epoch": 0.61344, + "grad_norm": 0.6131758589114785, + "learning_rate": 4.680905986339021e-06, + "loss": 0.3461551368236542, + "memory(GiB)": 54.25, + "step": 1917, + "token_acc": 0.9346361185983828, + "train_speed(iter/s)": 0.242027 + }, + { + "epoch": 0.61376, + "grad_norm": 0.6733723425226066, + "learning_rate": 4.6804747373119864e-06, + "loss": 0.4124794006347656, + "memory(GiB)": 54.25, + "step": 1918, + "token_acc": 0.8363411619283065, + "train_speed(iter/s)": 0.242027 + }, + { + "epoch": 0.61408, + "grad_norm": 0.6445150794726912, + "learning_rate": 4.6800432169626954e-06, + "loss": 0.39320889115333557, + "memory(GiB)": 54.25, + "step": 1919, + "token_acc": 0.8606949314631814, + "train_speed(iter/s)": 0.242032 + }, + { + "epoch": 0.6144, + "grad_norm": 0.6690439351008404, + "learning_rate": 4.679611425344844e-06, + "loss": 0.3687692880630493, + "memory(GiB)": 54.25, + "step": 1920, + "token_acc": 0.8342397729959802, + "train_speed(iter/s)": 0.242032 + }, + { + "epoch": 0.61472, + "grad_norm": 0.6511019178402989, + "learning_rate": 4.679179362512162e-06, + "loss": 0.3744128942489624, + "memory(GiB)": 54.25, + "step": 1921, + "token_acc": 0.9264442052386078, + "train_speed(iter/s)": 0.242035 + }, + { + "epoch": 0.61504, + "grad_norm": 0.8125773604510147, + "learning_rate": 4.678747028518411e-06, + "loss": 0.44436532258987427, + "memory(GiB)": 54.25, + "step": 1922, + "token_acc": 0.8687040181097906, + "train_speed(iter/s)": 0.242031 + }, + { + "epoch": 0.61536, + "grad_norm": 0.6627307084794668, + "learning_rate": 4.678314423417388e-06, + "loss": 0.3515511155128479, + "memory(GiB)": 54.25, + "step": 1923, + "token_acc": 0.8471794871794872, + "train_speed(iter/s)": 0.242031 + }, + { + "epoch": 0.61568, + "grad_norm": 0.5892589385073054, + "learning_rate": 4.677881547262924e-06, + "loss": 0.3269920349121094, + "memory(GiB)": 54.25, + "step": 1924, + "token_acc": 0.92372634643377, + "train_speed(iter/s)": 0.242025 + }, + { + "epoch": 0.616, + "grad_norm": 0.6962898247967905, + "learning_rate": 4.677448400108881e-06, + "loss": 0.44587835669517517, + "memory(GiB)": 54.25, + "step": 1925, + "token_acc": 0.8223140495867769, + "train_speed(iter/s)": 0.242029 + }, + { + "epoch": 0.61632, + "grad_norm": 0.6434032269263447, + "learning_rate": 4.677014982009158e-06, + "loss": 0.391081303358078, + "memory(GiB)": 54.25, + "step": 1926, + "token_acc": 0.8771213748657357, + "train_speed(iter/s)": 0.242035 + }, + { + "epoch": 0.61664, + "grad_norm": 0.6583660100600912, + "learning_rate": 4.676581293017686e-06, + "loss": 0.4154641032218933, + "memory(GiB)": 54.25, + "step": 1927, + "token_acc": 0.8408373960424433, + "train_speed(iter/s)": 0.242039 + }, + { + "epoch": 0.61696, + "grad_norm": 0.6591230375850977, + "learning_rate": 4.67614733318843e-06, + "loss": 0.41621851921081543, + "memory(GiB)": 54.25, + "step": 1928, + "token_acc": 0.9416628387689481, + "train_speed(iter/s)": 0.242041 + }, + { + "epoch": 0.61728, + "grad_norm": 0.7500770788685944, + "learning_rate": 4.675713102575389e-06, + "loss": 0.45208632946014404, + "memory(GiB)": 54.25, + "step": 1929, + "token_acc": 0.9364493644936449, + "train_speed(iter/s)": 0.242045 + }, + { + "epoch": 0.6176, + "grad_norm": 0.6107721329727152, + "learning_rate": 4.675278601232595e-06, + "loss": 0.43528154492378235, + "memory(GiB)": 54.25, + "step": 1930, + "token_acc": 0.8519603058402473, + "train_speed(iter/s)": 0.242048 + }, + { + "epoch": 0.61792, + "grad_norm": 0.6256210710831153, + "learning_rate": 4.674843829214115e-06, + "loss": 0.328519344329834, + "memory(GiB)": 54.25, + "step": 1931, + "token_acc": 0.9315143246930423, + "train_speed(iter/s)": 0.242052 + }, + { + "epoch": 0.61824, + "grad_norm": 0.6985663237485349, + "learning_rate": 4.674408786574048e-06, + "loss": 0.38824960589408875, + "memory(GiB)": 54.25, + "step": 1932, + "token_acc": 0.8914621500172831, + "train_speed(iter/s)": 0.24206 + }, + { + "epoch": 0.61856, + "grad_norm": 0.6393424779321478, + "learning_rate": 4.6739734733665275e-06, + "loss": 0.3771669566631317, + "memory(GiB)": 54.25, + "step": 1933, + "token_acc": 0.9106858054226475, + "train_speed(iter/s)": 0.242064 + }, + { + "epoch": 0.61888, + "grad_norm": 0.7063339235028138, + "learning_rate": 4.673537889645722e-06, + "loss": 0.3413848280906677, + "memory(GiB)": 54.25, + "step": 1934, + "token_acc": 0.9116171362408336, + "train_speed(iter/s)": 0.242069 + }, + { + "epoch": 0.6192, + "grad_norm": 0.6661681196723963, + "learning_rate": 4.673102035465831e-06, + "loss": 0.37112918496131897, + "memory(GiB)": 54.25, + "step": 1935, + "token_acc": 0.8762997490139836, + "train_speed(iter/s)": 0.242075 + }, + { + "epoch": 0.61952, + "grad_norm": 0.6655464387768352, + "learning_rate": 4.672665910881089e-06, + "loss": 0.40073874592781067, + "memory(GiB)": 54.25, + "step": 1936, + "token_acc": 0.886223440712817, + "train_speed(iter/s)": 0.242079 + }, + { + "epoch": 0.61984, + "grad_norm": 0.7172907896340235, + "learning_rate": 4.672229515945765e-06, + "loss": 0.4278913736343384, + "memory(GiB)": 54.25, + "step": 1937, + "token_acc": 0.8958558558558558, + "train_speed(iter/s)": 0.24208 + }, + { + "epoch": 0.62016, + "grad_norm": 0.5801782345701638, + "learning_rate": 4.671792850714161e-06, + "loss": 0.3664628863334656, + "memory(GiB)": 54.25, + "step": 1938, + "token_acc": 0.9258191349934469, + "train_speed(iter/s)": 0.242072 + }, + { + "epoch": 0.62048, + "grad_norm": 0.6020387715869386, + "learning_rate": 4.67135591524061e-06, + "loss": 0.4127838611602783, + "memory(GiB)": 54.25, + "step": 1939, + "token_acc": 0.8545420420420421, + "train_speed(iter/s)": 0.242068 + }, + { + "epoch": 0.6208, + "grad_norm": 0.6130209495356201, + "learning_rate": 4.670918709579484e-06, + "loss": 0.3207791745662689, + "memory(GiB)": 54.25, + "step": 1940, + "token_acc": 0.9087146470452978, + "train_speed(iter/s)": 0.24207 + }, + { + "epoch": 0.62112, + "grad_norm": 0.5758717297431536, + "learning_rate": 4.670481233785184e-06, + "loss": 0.3832203447818756, + "memory(GiB)": 54.25, + "step": 1941, + "token_acc": 0.9295703025680142, + "train_speed(iter/s)": 0.242049 + }, + { + "epoch": 0.62144, + "grad_norm": 0.6400320457718781, + "learning_rate": 4.670043487912146e-06, + "loss": 0.4048970639705658, + "memory(GiB)": 54.25, + "step": 1942, + "token_acc": 0.8995479658463084, + "train_speed(iter/s)": 0.242048 + }, + { + "epoch": 0.62176, + "grad_norm": 0.6492863867539608, + "learning_rate": 4.669605472014841e-06, + "loss": 0.29992765188217163, + "memory(GiB)": 54.25, + "step": 1943, + "token_acc": 0.8724489795918368, + "train_speed(iter/s)": 0.242042 + }, + { + "epoch": 0.62208, + "grad_norm": 0.6826304081522052, + "learning_rate": 4.669167186147773e-06, + "loss": 0.44557544589042664, + "memory(GiB)": 54.25, + "step": 1944, + "token_acc": 0.7934342357706246, + "train_speed(iter/s)": 0.24204 + }, + { + "epoch": 0.6224, + "grad_norm": 0.6015684385594757, + "learning_rate": 4.6687286303654775e-06, + "loss": 0.43096548318862915, + "memory(GiB)": 54.25, + "step": 1945, + "token_acc": 0.8508193232602681, + "train_speed(iter/s)": 0.242038 + }, + { + "epoch": 0.62272, + "grad_norm": 0.5982715357783842, + "learning_rate": 4.668289804722526e-06, + "loss": 0.37632519006729126, + "memory(GiB)": 54.25, + "step": 1946, + "token_acc": 0.8693638800877407, + "train_speed(iter/s)": 0.242038 + }, + { + "epoch": 0.62304, + "grad_norm": 0.7194312785344958, + "learning_rate": 4.667850709273522e-06, + "loss": 0.3658541142940521, + "memory(GiB)": 54.25, + "step": 1947, + "token_acc": 0.9107005388760585, + "train_speed(iter/s)": 0.242041 + }, + { + "epoch": 0.62336, + "grad_norm": 0.6323809563943787, + "learning_rate": 4.667411344073104e-06, + "loss": 0.4155728220939636, + "memory(GiB)": 54.25, + "step": 1948, + "token_acc": 0.9113247863247863, + "train_speed(iter/s)": 0.24204 + }, + { + "epoch": 0.62368, + "grad_norm": 0.6120883913881235, + "learning_rate": 4.6669717091759424e-06, + "loss": 0.35690414905548096, + "memory(GiB)": 54.25, + "step": 1949, + "token_acc": 0.9311967068509261, + "train_speed(iter/s)": 0.242042 + }, + { + "epoch": 0.624, + "grad_norm": 0.6407002890741392, + "learning_rate": 4.666531804636744e-06, + "loss": 0.46792322397232056, + "memory(GiB)": 54.25, + "step": 1950, + "token_acc": 0.920631067961165, + "train_speed(iter/s)": 0.242044 + }, + { + "epoch": 0.62432, + "grad_norm": 0.6852187598624393, + "learning_rate": 4.666091630510246e-06, + "loss": 0.47894978523254395, + "memory(GiB)": 54.25, + "step": 1951, + "token_acc": 0.8026147141758524, + "train_speed(iter/s)": 0.242049 + }, + { + "epoch": 0.62464, + "grad_norm": 0.6448692780158057, + "learning_rate": 4.665651186851221e-06, + "loss": 0.41521334648132324, + "memory(GiB)": 54.25, + "step": 1952, + "token_acc": 0.8548329141214517, + "train_speed(iter/s)": 0.242051 + }, + { + "epoch": 0.62496, + "grad_norm": 0.5992232273325262, + "learning_rate": 4.665210473714473e-06, + "loss": 0.32369518280029297, + "memory(GiB)": 54.25, + "step": 1953, + "token_acc": 0.8945074106364429, + "train_speed(iter/s)": 0.242036 + }, + { + "epoch": 0.62528, + "grad_norm": 0.6861653300694217, + "learning_rate": 4.664769491154844e-06, + "loss": 0.4114079475402832, + "memory(GiB)": 54.25, + "step": 1954, + "token_acc": 0.8989374262101535, + "train_speed(iter/s)": 0.242038 + }, + { + "epoch": 0.6256, + "grad_norm": 0.6545052627943361, + "learning_rate": 4.664328239227204e-06, + "loss": 0.3037782907485962, + "memory(GiB)": 54.25, + "step": 1955, + "token_acc": 0.9089108910891089, + "train_speed(iter/s)": 0.242045 + }, + { + "epoch": 0.62592, + "grad_norm": 0.5892655807709288, + "learning_rate": 4.66388671798646e-06, + "loss": 0.3009677529335022, + "memory(GiB)": 54.25, + "step": 1956, + "token_acc": 0.9447969543147208, + "train_speed(iter/s)": 0.242044 + }, + { + "epoch": 0.62624, + "grad_norm": 0.6340947463775206, + "learning_rate": 4.663444927487552e-06, + "loss": 0.3781493306159973, + "memory(GiB)": 54.25, + "step": 1957, + "token_acc": 0.8541274817136886, + "train_speed(iter/s)": 0.242047 + }, + { + "epoch": 0.62656, + "grad_norm": 0.5835855069123074, + "learning_rate": 4.663002867785453e-06, + "loss": 0.34882017970085144, + "memory(GiB)": 54.25, + "step": 1958, + "token_acc": 0.9020618556701031, + "train_speed(iter/s)": 0.242045 + }, + { + "epoch": 0.62688, + "grad_norm": 0.570161492618228, + "learning_rate": 4.662560538935169e-06, + "loss": 0.35626018047332764, + "memory(GiB)": 54.25, + "step": 1959, + "token_acc": 0.8886574512402954, + "train_speed(iter/s)": 0.242043 + }, + { + "epoch": 0.6272, + "grad_norm": 0.6214163063133312, + "learning_rate": 4.662117940991742e-06, + "loss": 0.43439650535583496, + "memory(GiB)": 54.25, + "step": 1960, + "token_acc": 0.8762665627435697, + "train_speed(iter/s)": 0.242035 + }, + { + "epoch": 0.62752, + "grad_norm": 0.6590667418085603, + "learning_rate": 4.661675074010244e-06, + "loss": 0.40593114495277405, + "memory(GiB)": 54.25, + "step": 1961, + "token_acc": 0.8704943357363543, + "train_speed(iter/s)": 0.242014 + }, + { + "epoch": 0.62784, + "grad_norm": 0.6134012650704175, + "learning_rate": 4.661231938045781e-06, + "loss": 0.4027106761932373, + "memory(GiB)": 54.25, + "step": 1962, + "token_acc": 0.8677086240580519, + "train_speed(iter/s)": 0.242011 + }, + { + "epoch": 0.62816, + "grad_norm": 0.6615636934626108, + "learning_rate": 4.660788533153497e-06, + "loss": 0.31980371475219727, + "memory(GiB)": 54.25, + "step": 1963, + "token_acc": 0.8617466174661746, + "train_speed(iter/s)": 0.241969 + }, + { + "epoch": 0.62848, + "grad_norm": 0.6957258939923855, + "learning_rate": 4.660344859388563e-06, + "loss": 0.45962297916412354, + "memory(GiB)": 54.25, + "step": 1964, + "token_acc": 0.8320070733863837, + "train_speed(iter/s)": 0.241968 + }, + { + "epoch": 0.6288, + "grad_norm": 0.6585152487876823, + "learning_rate": 4.659900916806189e-06, + "loss": 0.33487510681152344, + "memory(GiB)": 54.25, + "step": 1965, + "token_acc": 0.9023000633044946, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.62912, + "grad_norm": 0.8561296661257789, + "learning_rate": 4.659456705461613e-06, + "loss": 0.34425294399261475, + "memory(GiB)": 54.25, + "step": 1966, + "token_acc": 0.8652760736196319, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.62944, + "grad_norm": 0.6337954709104856, + "learning_rate": 4.659012225410111e-06, + "loss": 0.38371890783309937, + "memory(GiB)": 54.25, + "step": 1967, + "token_acc": 0.8574670903313664, + "train_speed(iter/s)": 0.241941 + }, + { + "epoch": 0.62976, + "grad_norm": 0.7433509438870004, + "learning_rate": 4.658567476706991e-06, + "loss": 0.34408414363861084, + "memory(GiB)": 54.25, + "step": 1968, + "token_acc": 0.9043893959148196, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.63008, + "grad_norm": 0.6123349219108039, + "learning_rate": 4.658122459407595e-06, + "loss": 0.45814818143844604, + "memory(GiB)": 54.25, + "step": 1969, + "token_acc": 0.8709597215315763, + "train_speed(iter/s)": 0.24194 + }, + { + "epoch": 0.6304, + "grad_norm": 0.6231311716384937, + "learning_rate": 4.6576771735672965e-06, + "loss": 0.37207040190696716, + "memory(GiB)": 54.25, + "step": 1970, + "token_acc": 0.8472195862592522, + "train_speed(iter/s)": 0.241941 + }, + { + "epoch": 0.63072, + "grad_norm": 0.7121828518277857, + "learning_rate": 4.657231619241503e-06, + "loss": 0.46325820684432983, + "memory(GiB)": 54.25, + "step": 1971, + "token_acc": 0.8310152990264256, + "train_speed(iter/s)": 0.241917 + }, + { + "epoch": 0.63104, + "grad_norm": 0.6555747894973979, + "learning_rate": 4.656785796485658e-06, + "loss": 0.3829246759414673, + "memory(GiB)": 54.25, + "step": 1972, + "token_acc": 0.874902114330462, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.63136, + "grad_norm": 0.6036981327202522, + "learning_rate": 4.656339705355235e-06, + "loss": 0.33706676959991455, + "memory(GiB)": 54.25, + "step": 1973, + "token_acc": 0.8633107839235973, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.63168, + "grad_norm": 0.6013159337480306, + "learning_rate": 4.655893345905742e-06, + "loss": 0.3934800922870636, + "memory(GiB)": 54.25, + "step": 1974, + "token_acc": 0.9341550437885177, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.632, + "grad_norm": 0.5928805877717043, + "learning_rate": 4.655446718192721e-06, + "loss": 0.3024771809577942, + "memory(GiB)": 54.25, + "step": 1975, + "token_acc": 0.891358024691358, + "train_speed(iter/s)": 0.241921 + }, + { + "epoch": 0.63232, + "grad_norm": 0.6712217941312572, + "learning_rate": 4.654999822271748e-06, + "loss": 0.4432399272918701, + "memory(GiB)": 54.25, + "step": 1976, + "token_acc": 0.8484666265784726, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.63264, + "grad_norm": 0.7144408547992509, + "learning_rate": 4.654552658198431e-06, + "loss": 0.31795239448547363, + "memory(GiB)": 54.25, + "step": 1977, + "token_acc": 0.896976483762598, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.63296, + "grad_norm": 0.5914120295231738, + "learning_rate": 4.654105226028411e-06, + "loss": 0.2936224937438965, + "memory(GiB)": 54.25, + "step": 1978, + "token_acc": 0.9297629499561019, + "train_speed(iter/s)": 0.241898 + }, + { + "epoch": 0.63328, + "grad_norm": 0.7866156436185314, + "learning_rate": 4.653657525817364e-06, + "loss": 0.4134766459465027, + "memory(GiB)": 54.25, + "step": 1979, + "token_acc": 0.8478522681653954, + "train_speed(iter/s)": 0.241897 + }, + { + "epoch": 0.6336, + "grad_norm": 0.6325449102731839, + "learning_rate": 4.653209557620998e-06, + "loss": 0.38820523023605347, + "memory(GiB)": 54.25, + "step": 1980, + "token_acc": 0.9485294117647058, + "train_speed(iter/s)": 0.241896 + }, + { + "epoch": 0.63392, + "grad_norm": 0.6856943005088856, + "learning_rate": 4.652761321495056e-06, + "loss": 0.36519330739974976, + "memory(GiB)": 54.25, + "step": 1981, + "token_acc": 0.9010869565217391, + "train_speed(iter/s)": 0.241902 + }, + { + "epoch": 0.63424, + "grad_norm": 0.6746391825182912, + "learning_rate": 4.6523128174953115e-06, + "loss": 0.4218871593475342, + "memory(GiB)": 54.25, + "step": 1982, + "token_acc": 0.9385253630020743, + "train_speed(iter/s)": 0.241903 + }, + { + "epoch": 0.63456, + "grad_norm": 0.6852187465264686, + "learning_rate": 4.651864045677574e-06, + "loss": 0.3585265874862671, + "memory(GiB)": 54.25, + "step": 1983, + "token_acc": 0.95104, + "train_speed(iter/s)": 0.241911 + }, + { + "epoch": 0.63488, + "grad_norm": 0.5963745948751106, + "learning_rate": 4.651415006097686e-06, + "loss": 0.3162480592727661, + "memory(GiB)": 54.25, + "step": 1984, + "token_acc": 0.9218390804597701, + "train_speed(iter/s)": 0.241912 + }, + { + "epoch": 0.6352, + "grad_norm": 0.6698850234769488, + "learning_rate": 4.650965698811522e-06, + "loss": 0.28856557607650757, + "memory(GiB)": 54.25, + "step": 1985, + "token_acc": 0.8960070206230804, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.63552, + "grad_norm": 0.6503935669315343, + "learning_rate": 4.650516123874989e-06, + "loss": 0.4100812077522278, + "memory(GiB)": 54.25, + "step": 1986, + "token_acc": 0.8784313725490196, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.63584, + "grad_norm": 0.682257565274035, + "learning_rate": 4.6500662813440315e-06, + "loss": 0.4286814332008362, + "memory(GiB)": 54.25, + "step": 1987, + "token_acc": 0.8797202797202798, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.63616, + "grad_norm": 0.5859257948703694, + "learning_rate": 4.649616171274623e-06, + "loss": 0.45097866654396057, + "memory(GiB)": 54.25, + "step": 1988, + "token_acc": 0.8346854155399315, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.63648, + "grad_norm": 0.6715666615654339, + "learning_rate": 4.649165793722772e-06, + "loss": 0.3921581506729126, + "memory(GiB)": 54.25, + "step": 1989, + "token_acc": 0.8721359940872137, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.6368, + "grad_norm": 0.6679428744472259, + "learning_rate": 4.648715148744521e-06, + "loss": 0.39210036396980286, + "memory(GiB)": 54.25, + "step": 1990, + "token_acc": 0.8772554002541296, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.63712, + "grad_norm": 0.6105463015728254, + "learning_rate": 4.648264236395944e-06, + "loss": 0.40102678537368774, + "memory(GiB)": 54.25, + "step": 1991, + "token_acc": 0.9180390654921486, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.63744, + "grad_norm": 0.5836394313566068, + "learning_rate": 4.647813056733149e-06, + "loss": 0.40373939275741577, + "memory(GiB)": 54.25, + "step": 1992, + "token_acc": 0.8356374807987711, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.63776, + "grad_norm": 0.743432504482787, + "learning_rate": 4.647361609812279e-06, + "loss": 0.3083990514278412, + "memory(GiB)": 54.25, + "step": 1993, + "token_acc": 0.9408194233687405, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.63808, + "grad_norm": 0.6940948195938488, + "learning_rate": 4.646909895689508e-06, + "loss": 0.4222117066383362, + "memory(GiB)": 54.25, + "step": 1994, + "token_acc": 0.8818955942243614, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.6384, + "grad_norm": 0.5863672894270952, + "learning_rate": 4.646457914421043e-06, + "loss": 0.4935317635536194, + "memory(GiB)": 54.25, + "step": 1995, + "token_acc": 0.8701025506179332, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.63872, + "grad_norm": 0.6392220453590921, + "learning_rate": 4.646005666063127e-06, + "loss": 0.40153807401657104, + "memory(GiB)": 54.25, + "step": 1996, + "token_acc": 0.8771701388888888, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.63904, + "grad_norm": 0.674067885523956, + "learning_rate": 4.645553150672032e-06, + "loss": 0.39282283186912537, + "memory(GiB)": 54.25, + "step": 1997, + "token_acc": 0.8999574286930608, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.63936, + "grad_norm": 0.6683170417323876, + "learning_rate": 4.645100368304068e-06, + "loss": 0.41772860288619995, + "memory(GiB)": 54.25, + "step": 1998, + "token_acc": 0.8809681697612732, + "train_speed(iter/s)": 0.241929 + }, + { + "epoch": 0.63968, + "grad_norm": 0.6051290153323855, + "learning_rate": 4.644647319015576e-06, + "loss": 0.3073748052120209, + "memory(GiB)": 54.25, + "step": 1999, + "token_acc": 0.9615947329919532, + "train_speed(iter/s)": 0.241929 + }, + { + "epoch": 0.64, + "grad_norm": 0.5779922266235421, + "learning_rate": 4.644194002862929e-06, + "loss": 0.4151149392127991, + "memory(GiB)": 54.25, + "step": 2000, + "token_acc": 0.8276368981991915, + "train_speed(iter/s)": 0.241927 + }, + { + "epoch": 0.64032, + "grad_norm": 0.6629886815133891, + "learning_rate": 4.643740419902533e-06, + "loss": 0.340211421251297, + "memory(GiB)": 54.25, + "step": 2001, + "token_acc": 0.8524788391777509, + "train_speed(iter/s)": 0.24193 + }, + { + "epoch": 0.64064, + "grad_norm": 0.7068132698874501, + "learning_rate": 4.643286570190832e-06, + "loss": 0.3366629481315613, + "memory(GiB)": 54.25, + "step": 2002, + "token_acc": 0.9130434782608695, + "train_speed(iter/s)": 0.241928 + }, + { + "epoch": 0.64096, + "grad_norm": 0.6167134638153261, + "learning_rate": 4.642832453784296e-06, + "loss": 0.39373624324798584, + "memory(GiB)": 54.25, + "step": 2003, + "token_acc": 0.9160179287567822, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.64128, + "grad_norm": 0.6234042202709343, + "learning_rate": 4.6423780707394344e-06, + "loss": 0.34421026706695557, + "memory(GiB)": 54.25, + "step": 2004, + "token_acc": 0.9164413196322336, + "train_speed(iter/s)": 0.24193 + }, + { + "epoch": 0.6416, + "grad_norm": 0.6830850079838252, + "learning_rate": 4.641923421112787e-06, + "loss": 0.46718519926071167, + "memory(GiB)": 54.25, + "step": 2005, + "token_acc": 0.8484706981088023, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.64192, + "grad_norm": 0.5890724567990054, + "learning_rate": 4.641468504960926e-06, + "loss": 0.39781153202056885, + "memory(GiB)": 54.25, + "step": 2006, + "token_acc": 0.8553964373035278, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.64224, + "grad_norm": 0.7403012205269474, + "learning_rate": 4.64101332234046e-06, + "loss": 0.4962840676307678, + "memory(GiB)": 54.25, + "step": 2007, + "token_acc": 0.8544644514572118, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.64256, + "grad_norm": 0.6545240346155525, + "learning_rate": 4.640557873308025e-06, + "loss": 0.37028154730796814, + "memory(GiB)": 54.25, + "step": 2008, + "token_acc": 0.9378208784940103, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.64288, + "grad_norm": 0.632219530060106, + "learning_rate": 4.640102157920297e-06, + "loss": 0.31671416759490967, + "memory(GiB)": 54.25, + "step": 2009, + "token_acc": 0.9390524967989756, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.6432, + "grad_norm": 0.6186565820220504, + "learning_rate": 4.63964617623398e-06, + "loss": 0.3908785581588745, + "memory(GiB)": 54.25, + "step": 2010, + "token_acc": 0.927143778207608, + "train_speed(iter/s)": 0.241933 + }, + { + "epoch": 0.64352, + "grad_norm": 0.6541592982153122, + "learning_rate": 4.639189928305815e-06, + "loss": 0.36169523000717163, + "memory(GiB)": 54.25, + "step": 2011, + "token_acc": 0.837671905697446, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.64384, + "grad_norm": 0.6108354472420352, + "learning_rate": 4.638733414192571e-06, + "loss": 0.35303133726119995, + "memory(GiB)": 54.25, + "step": 2012, + "token_acc": 0.86670598643468, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.64416, + "grad_norm": 0.6336700608815171, + "learning_rate": 4.638276633951057e-06, + "loss": 0.4193176031112671, + "memory(GiB)": 54.25, + "step": 2013, + "token_acc": 0.8591749644381224, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.64448, + "grad_norm": 0.6936179796931091, + "learning_rate": 4.637819587638108e-06, + "loss": 0.45419371128082275, + "memory(GiB)": 54.25, + "step": 2014, + "token_acc": 0.922247882986913, + "train_speed(iter/s)": 0.241933 + }, + { + "epoch": 0.6448, + "grad_norm": 0.6250333369336042, + "learning_rate": 4.637362275310597e-06, + "loss": 0.3159584403038025, + "memory(GiB)": 54.25, + "step": 2015, + "token_acc": 0.927801724137931, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.64512, + "grad_norm": 0.6100808729239398, + "learning_rate": 4.63690469702543e-06, + "loss": 0.33871686458587646, + "memory(GiB)": 54.25, + "step": 2016, + "token_acc": 0.9607097591888466, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.64544, + "grad_norm": 0.6167614891465284, + "learning_rate": 4.636446852839541e-06, + "loss": 0.3372134566307068, + "memory(GiB)": 54.25, + "step": 2017, + "token_acc": 0.8985507246376812, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.64576, + "grad_norm": 0.7077988843180245, + "learning_rate": 4.635988742809905e-06, + "loss": 0.4795536398887634, + "memory(GiB)": 54.25, + "step": 2018, + "token_acc": 0.8321711949987978, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.64608, + "grad_norm": 0.6363273506912489, + "learning_rate": 4.635530366993522e-06, + "loss": 0.4402506351470947, + "memory(GiB)": 54.25, + "step": 2019, + "token_acc": 0.890176322418136, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.6464, + "grad_norm": 0.6459912455700317, + "learning_rate": 4.635071725447432e-06, + "loss": 0.4209737777709961, + "memory(GiB)": 54.25, + "step": 2020, + "token_acc": 0.8588192152548588, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.64672, + "grad_norm": 0.6548196775571677, + "learning_rate": 4.634612818228703e-06, + "loss": 0.41652634739875793, + "memory(GiB)": 54.25, + "step": 2021, + "token_acc": 0.8542795232936078, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.64704, + "grad_norm": 0.6782700013593458, + "learning_rate": 4.6341536453944394e-06, + "loss": 0.487751305103302, + "memory(GiB)": 54.25, + "step": 2022, + "token_acc": 0.8822803045894216, + "train_speed(iter/s)": 0.241931 + }, + { + "epoch": 0.64736, + "grad_norm": 0.6582736642760078, + "learning_rate": 4.633694207001776e-06, + "loss": 0.3341176211833954, + "memory(GiB)": 54.25, + "step": 2023, + "token_acc": 0.9016756244072084, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.64768, + "grad_norm": 0.6358191322977322, + "learning_rate": 4.633234503107884e-06, + "loss": 0.41950321197509766, + "memory(GiB)": 54.25, + "step": 2024, + "token_acc": 0.8387978142076503, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.648, + "grad_norm": 0.6081236204122868, + "learning_rate": 4.632774533769963e-06, + "loss": 0.315687358379364, + "memory(GiB)": 54.25, + "step": 2025, + "token_acc": 0.9358710562414266, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.64832, + "grad_norm": 0.6114502136454347, + "learning_rate": 4.632314299045249e-06, + "loss": 0.31237876415252686, + "memory(GiB)": 54.25, + "step": 2026, + "token_acc": 0.9118621603711067, + "train_speed(iter/s)": 0.241943 + }, + { + "epoch": 0.64864, + "grad_norm": 0.5958088889311552, + "learning_rate": 4.631853798991012e-06, + "loss": 0.3062520921230316, + "memory(GiB)": 54.25, + "step": 2027, + "token_acc": 0.9429102496016994, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.64896, + "grad_norm": 0.8181729476789952, + "learning_rate": 4.6313930336645506e-06, + "loss": 0.34753796458244324, + "memory(GiB)": 54.25, + "step": 2028, + "token_acc": 0.9056546719080738, + "train_speed(iter/s)": 0.241951 + }, + { + "epoch": 0.64928, + "grad_norm": 0.6083288098388433, + "learning_rate": 4.630932003123201e-06, + "loss": 0.2872673273086548, + "memory(GiB)": 54.25, + "step": 2029, + "token_acc": 0.9361340971780182, + "train_speed(iter/s)": 0.241954 + }, + { + "epoch": 0.6496, + "grad_norm": 0.6789443463323817, + "learning_rate": 4.630470707424329e-06, + "loss": 0.4063149392604828, + "memory(GiB)": 54.25, + "step": 2030, + "token_acc": 0.8516347933374461, + "train_speed(iter/s)": 0.241957 + }, + { + "epoch": 0.64992, + "grad_norm": 0.7578602977510688, + "learning_rate": 4.630009146625337e-06, + "loss": 0.432778000831604, + "memory(GiB)": 54.25, + "step": 2031, + "token_acc": 0.8950459652706844, + "train_speed(iter/s)": 0.241956 + }, + { + "epoch": 0.65024, + "grad_norm": 0.6615068124313122, + "learning_rate": 4.629547320783656e-06, + "loss": 0.3806997537612915, + "memory(GiB)": 54.25, + "step": 2032, + "token_acc": 0.8676992578251048, + "train_speed(iter/s)": 0.24196 + }, + { + "epoch": 0.65056, + "grad_norm": 0.6303480066474006, + "learning_rate": 4.629085229956753e-06, + "loss": 0.3620893061161041, + "memory(GiB)": 54.25, + "step": 2033, + "token_acc": 0.9239884393063584, + "train_speed(iter/s)": 0.241964 + }, + { + "epoch": 0.65088, + "grad_norm": 0.6844789051571102, + "learning_rate": 4.628622874202127e-06, + "loss": 0.4069259762763977, + "memory(GiB)": 54.25, + "step": 2034, + "token_acc": 0.8399233165588306, + "train_speed(iter/s)": 0.241969 + }, + { + "epoch": 0.6512, + "grad_norm": 0.6230152290679987, + "learning_rate": 4.628160253577311e-06, + "loss": 0.3692387044429779, + "memory(GiB)": 54.25, + "step": 2035, + "token_acc": 0.872922578029996, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.65152, + "grad_norm": 0.69229268175849, + "learning_rate": 4.627697368139868e-06, + "loss": 0.35073792934417725, + "memory(GiB)": 54.25, + "step": 2036, + "token_acc": 0.9123563218390804, + "train_speed(iter/s)": 0.241965 + }, + { + "epoch": 0.65184, + "grad_norm": 0.646534589899056, + "learning_rate": 4.6272342179474e-06, + "loss": 0.36655715107917786, + "memory(GiB)": 54.25, + "step": 2037, + "token_acc": 0.8486497801967762, + "train_speed(iter/s)": 0.24197 + }, + { + "epoch": 0.65216, + "grad_norm": 0.6685988847234601, + "learning_rate": 4.626770803057534e-06, + "loss": 0.3943156599998474, + "memory(GiB)": 54.25, + "step": 2038, + "token_acc": 0.9032767469403868, + "train_speed(iter/s)": 0.24196 + }, + { + "epoch": 0.65248, + "grad_norm": 0.7221798358966357, + "learning_rate": 4.626307123527936e-06, + "loss": 0.34491991996765137, + "memory(GiB)": 54.25, + "step": 2039, + "token_acc": 0.9388560157790927, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.6528, + "grad_norm": 0.6037597485826991, + "learning_rate": 4.625843179416301e-06, + "loss": 0.3360830843448639, + "memory(GiB)": 54.25, + "step": 2040, + "token_acc": 0.9140565317035906, + "train_speed(iter/s)": 0.241952 + }, + { + "epoch": 0.65312, + "grad_norm": 0.6739052603933086, + "learning_rate": 4.625378970780362e-06, + "loss": 0.5124650001525879, + "memory(GiB)": 54.25, + "step": 2041, + "token_acc": 0.8967457793002203, + "train_speed(iter/s)": 0.241954 + }, + { + "epoch": 0.65344, + "grad_norm": 0.5997829142737697, + "learning_rate": 4.6249144976778796e-06, + "loss": 0.34596186876296997, + "memory(GiB)": 54.25, + "step": 2042, + "token_acc": 0.8308465780084333, + "train_speed(iter/s)": 0.24194 + }, + { + "epoch": 0.65376, + "grad_norm": 0.6717055459100214, + "learning_rate": 4.62444976016665e-06, + "loss": 0.348200261592865, + "memory(GiB)": 54.25, + "step": 2043, + "token_acc": 0.8891464699683878, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.65408, + "grad_norm": 0.6209538441239699, + "learning_rate": 4.623984758304501e-06, + "loss": 0.3759641647338867, + "memory(GiB)": 54.25, + "step": 2044, + "token_acc": 0.8896310432569975, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.6544, + "grad_norm": 0.6096028056135795, + "learning_rate": 4.623519492149294e-06, + "loss": 0.35934293270111084, + "memory(GiB)": 54.25, + "step": 2045, + "token_acc": 0.8953161592505855, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.65472, + "grad_norm": 0.6451759293904669, + "learning_rate": 4.623053961758924e-06, + "loss": 0.3638477921485901, + "memory(GiB)": 54.25, + "step": 2046, + "token_acc": 0.85025, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.65504, + "grad_norm": 0.5919676786391546, + "learning_rate": 4.622588167191317e-06, + "loss": 0.3444078266620636, + "memory(GiB)": 54.25, + "step": 2047, + "token_acc": 0.8945273631840795, + "train_speed(iter/s)": 0.241943 + }, + { + "epoch": 0.65536, + "grad_norm": 0.6449805279514448, + "learning_rate": 4.622122108504436e-06, + "loss": 0.41620326042175293, + "memory(GiB)": 54.25, + "step": 2048, + "token_acc": 0.8190148911798396, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.65568, + "grad_norm": 0.65439087644287, + "learning_rate": 4.62165578575627e-06, + "loss": 0.3882453441619873, + "memory(GiB)": 54.25, + "step": 2049, + "token_acc": 0.9472631842039491, + "train_speed(iter/s)": 0.241953 + }, + { + "epoch": 0.656, + "grad_norm": 0.5586698252243611, + "learning_rate": 4.621189199004849e-06, + "loss": 0.2821381986141205, + "memory(GiB)": 54.25, + "step": 2050, + "token_acc": 0.9378401604124893, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.65632, + "grad_norm": 0.5760846566692731, + "learning_rate": 4.6207223483082275e-06, + "loss": 0.3274834454059601, + "memory(GiB)": 54.25, + "step": 2051, + "token_acc": 0.9545038167938932, + "train_speed(iter/s)": 0.241933 + }, + { + "epoch": 0.65664, + "grad_norm": 0.5865702677139969, + "learning_rate": 4.620255233724499e-06, + "loss": 0.41875532269477844, + "memory(GiB)": 54.25, + "step": 2052, + "token_acc": 0.8635014836795252, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.65696, + "grad_norm": 0.5753153600639342, + "learning_rate": 4.6197878553117885e-06, + "loss": 0.3889971673488617, + "memory(GiB)": 54.25, + "step": 2053, + "token_acc": 0.9152755905511811, + "train_speed(iter/s)": 0.241931 + }, + { + "epoch": 0.65728, + "grad_norm": 0.6133846291828488, + "learning_rate": 4.619320213128251e-06, + "loss": 0.37228280305862427, + "memory(GiB)": 54.25, + "step": 2054, + "token_acc": 0.8883955600403632, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.6576, + "grad_norm": 0.6017133886318397, + "learning_rate": 4.618852307232078e-06, + "loss": 0.38927024602890015, + "memory(GiB)": 54.25, + "step": 2055, + "token_acc": 0.884521484375, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.65792, + "grad_norm": 0.6143848776566504, + "learning_rate": 4.618384137681492e-06, + "loss": 0.37542271614074707, + "memory(GiB)": 54.25, + "step": 2056, + "token_acc": 0.8614540466392319, + "train_speed(iter/s)": 0.241931 + }, + { + "epoch": 0.65824, + "grad_norm": 0.6544808557562906, + "learning_rate": 4.6179157045347485e-06, + "loss": 0.38059288263320923, + "memory(GiB)": 54.25, + "step": 2057, + "token_acc": 0.9151515151515152, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.65856, + "grad_norm": 0.9525591073186699, + "learning_rate": 4.6174470078501365e-06, + "loss": 0.4132039546966553, + "memory(GiB)": 54.25, + "step": 2058, + "token_acc": 0.9257352941176471, + "train_speed(iter/s)": 0.24194 + }, + { + "epoch": 0.65888, + "grad_norm": 0.6265906810848679, + "learning_rate": 4.616978047685977e-06, + "loss": 0.3340199589729309, + "memory(GiB)": 54.25, + "step": 2059, + "token_acc": 0.8852333588370314, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.6592, + "grad_norm": 0.7163257728526217, + "learning_rate": 4.616508824100622e-06, + "loss": 0.4100877642631531, + "memory(GiB)": 54.25, + "step": 2060, + "token_acc": 0.8400735294117647, + "train_speed(iter/s)": 0.241946 + }, + { + "epoch": 0.65952, + "grad_norm": 2.613214506998481, + "learning_rate": 4.616039337152461e-06, + "loss": 0.4122094511985779, + "memory(GiB)": 54.25, + "step": 2061, + "token_acc": 0.920236013986014, + "train_speed(iter/s)": 0.241949 + }, + { + "epoch": 0.65984, + "grad_norm": 0.6174912430256636, + "learning_rate": 4.6155695868999114e-06, + "loss": 0.370755672454834, + "memory(GiB)": 54.25, + "step": 2062, + "token_acc": 0.8726857142857143, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.66016, + "grad_norm": 0.6462741954815353, + "learning_rate": 4.615099573401427e-06, + "loss": 0.40816307067871094, + "memory(GiB)": 54.25, + "step": 2063, + "token_acc": 0.8156996587030717, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.66048, + "grad_norm": 0.5966705591677498, + "learning_rate": 4.614629296715492e-06, + "loss": 0.31806522607803345, + "memory(GiB)": 54.25, + "step": 2064, + "token_acc": 0.9099232132309509, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.6608, + "grad_norm": 0.6214278474110535, + "learning_rate": 4.614158756900624e-06, + "loss": 0.45688724517822266, + "memory(GiB)": 54.25, + "step": 2065, + "token_acc": 0.8472968700600696, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.66112, + "grad_norm": 0.8074573347871916, + "learning_rate": 4.613687954015374e-06, + "loss": 0.38269513845443726, + "memory(GiB)": 54.25, + "step": 2066, + "token_acc": 0.9007473143390938, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.66144, + "grad_norm": 0.7581864113087823, + "learning_rate": 4.613216888118326e-06, + "loss": 0.38170015811920166, + "memory(GiB)": 54.25, + "step": 2067, + "token_acc": 0.9175946547884187, + "train_speed(iter/s)": 0.241946 + }, + { + "epoch": 0.66176, + "grad_norm": 0.62431133056928, + "learning_rate": 4.612745559268095e-06, + "loss": 0.3905591666698456, + "memory(GiB)": 54.25, + "step": 2068, + "token_acc": 0.8775367389783065, + "train_speed(iter/s)": 0.241949 + }, + { + "epoch": 0.66208, + "grad_norm": 0.6332906295747384, + "learning_rate": 4.612273967523328e-06, + "loss": 0.48065271973609924, + "memory(GiB)": 54.25, + "step": 2069, + "token_acc": 0.8532981530343008, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.6624, + "grad_norm": 0.6753326261922787, + "learning_rate": 4.61180211294271e-06, + "loss": 0.4285285770893097, + "memory(GiB)": 54.25, + "step": 2070, + "token_acc": 0.8654406409322651, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.66272, + "grad_norm": 0.61176556671619, + "learning_rate": 4.611329995584953e-06, + "loss": 0.3361331522464752, + "memory(GiB)": 54.25, + "step": 2071, + "token_acc": 0.9166134185303514, + "train_speed(iter/s)": 0.241955 + }, + { + "epoch": 0.66304, + "grad_norm": 0.6202813806225204, + "learning_rate": 4.6108576155088045e-06, + "loss": 0.4028382897377014, + "memory(GiB)": 54.25, + "step": 2072, + "token_acc": 0.929745493107105, + "train_speed(iter/s)": 0.241956 + }, + { + "epoch": 0.66336, + "grad_norm": 0.6260606104232999, + "learning_rate": 4.610384972773043e-06, + "loss": 0.328859806060791, + "memory(GiB)": 54.25, + "step": 2073, + "token_acc": 0.9050887573964497, + "train_speed(iter/s)": 0.241959 + }, + { + "epoch": 0.66368, + "grad_norm": 0.5670334579926387, + "learning_rate": 4.6099120674364815e-06, + "loss": 0.35724568367004395, + "memory(GiB)": 54.25, + "step": 2074, + "token_acc": 0.8738385298368779, + "train_speed(iter/s)": 0.241954 + }, + { + "epoch": 0.664, + "grad_norm": 0.5858668023184108, + "learning_rate": 4.609438899557964e-06, + "loss": 0.3535904884338379, + "memory(GiB)": 54.25, + "step": 2075, + "token_acc": 0.9038951583545686, + "train_speed(iter/s)": 0.241954 + }, + { + "epoch": 0.66432, + "grad_norm": 0.5695502775357897, + "learning_rate": 4.6089654691963704e-06, + "loss": 0.2902165353298187, + "memory(GiB)": 54.25, + "step": 2076, + "token_acc": 0.924122926298613, + "train_speed(iter/s)": 0.241953 + }, + { + "epoch": 0.66464, + "grad_norm": 0.6152175153317431, + "learning_rate": 4.608491776410608e-06, + "loss": 0.3361961245536804, + "memory(GiB)": 54.25, + "step": 2077, + "token_acc": 0.9079025549613785, + "train_speed(iter/s)": 0.241952 + }, + { + "epoch": 0.66496, + "grad_norm": 0.6420348981996388, + "learning_rate": 4.6080178212596215e-06, + "loss": 0.3566691279411316, + "memory(GiB)": 54.25, + "step": 2078, + "token_acc": 0.8826197901899632, + "train_speed(iter/s)": 0.241949 + }, + { + "epoch": 0.66528, + "grad_norm": 0.5914700276320629, + "learning_rate": 4.607543603802384e-06, + "loss": 0.35351434350013733, + "memory(GiB)": 54.25, + "step": 2079, + "token_acc": 0.8895429362880887, + "train_speed(iter/s)": 0.241949 + }, + { + "epoch": 0.6656, + "grad_norm": 0.6331932894411402, + "learning_rate": 4.607069124097908e-06, + "loss": 0.3798407316207886, + "memory(GiB)": 54.25, + "step": 2080, + "token_acc": 0.9367547952306895, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.66592, + "grad_norm": 0.6588887835998242, + "learning_rate": 4.60659438220523e-06, + "loss": 0.34420979022979736, + "memory(GiB)": 54.25, + "step": 2081, + "token_acc": 0.8739616155829275, + "train_speed(iter/s)": 0.241949 + }, + { + "epoch": 0.66624, + "grad_norm": 0.6064299671895841, + "learning_rate": 4.6061193781834254e-06, + "loss": 0.3535541892051697, + "memory(GiB)": 66.66, + "step": 2082, + "token_acc": 0.9264341528492472, + "train_speed(iter/s)": 0.241933 + }, + { + "epoch": 0.66656, + "grad_norm": 0.6643955778897458, + "learning_rate": 4.605644112091601e-06, + "loss": 0.4716654419898987, + "memory(GiB)": 66.66, + "step": 2083, + "token_acc": 0.915096915096915, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.66688, + "grad_norm": 0.7427422104981996, + "learning_rate": 4.605168583988893e-06, + "loss": 0.48347264528274536, + "memory(GiB)": 66.66, + "step": 2084, + "token_acc": 0.7676503548748599, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.6672, + "grad_norm": 0.5998895369659307, + "learning_rate": 4.6046927939344744e-06, + "loss": 0.36312007904052734, + "memory(GiB)": 66.66, + "step": 2085, + "token_acc": 0.90764684152198, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.66752, + "grad_norm": 0.6299176056002245, + "learning_rate": 4.6042167419875485e-06, + "loss": 0.3303380310535431, + "memory(GiB)": 66.66, + "step": 2086, + "token_acc": 0.9118273841236015, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.66784, + "grad_norm": 0.6390386712915288, + "learning_rate": 4.603740428207352e-06, + "loss": 0.3847258687019348, + "memory(GiB)": 66.66, + "step": 2087, + "token_acc": 0.8677660236465463, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.66816, + "grad_norm": 0.6280629636749778, + "learning_rate": 4.603263852653154e-06, + "loss": 0.4362924098968506, + "memory(GiB)": 66.66, + "step": 2088, + "token_acc": 0.8034188034188035, + "train_speed(iter/s)": 0.241943 + }, + { + "epoch": 0.66848, + "grad_norm": 0.6700819965253005, + "learning_rate": 4.602787015384255e-06, + "loss": 0.34015822410583496, + "memory(GiB)": 66.66, + "step": 2089, + "token_acc": 0.9022692889561271, + "train_speed(iter/s)": 0.241949 + }, + { + "epoch": 0.6688, + "grad_norm": 0.6197654859324425, + "learning_rate": 4.60230991645999e-06, + "loss": 0.36401107907295227, + "memory(GiB)": 66.66, + "step": 2090, + "token_acc": 0.9431230610134437, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.66912, + "grad_norm": 0.62961440867017, + "learning_rate": 4.601832555939726e-06, + "loss": 0.3315191864967346, + "memory(GiB)": 66.66, + "step": 2091, + "token_acc": 0.9357326478149101, + "train_speed(iter/s)": 0.241951 + }, + { + "epoch": 0.66944, + "grad_norm": 0.6021007071939545, + "learning_rate": 4.601354933882861e-06, + "loss": 0.33663082122802734, + "memory(GiB)": 66.66, + "step": 2092, + "token_acc": 0.9345117357287742, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.66976, + "grad_norm": 0.6760393530208358, + "learning_rate": 4.6008770503488275e-06, + "loss": 0.35370802879333496, + "memory(GiB)": 66.66, + "step": 2093, + "token_acc": 0.8898993179603768, + "train_speed(iter/s)": 0.241957 + }, + { + "epoch": 0.67008, + "grad_norm": 0.5831127111204929, + "learning_rate": 4.6003989053970905e-06, + "loss": 0.3690981864929199, + "memory(GiB)": 66.66, + "step": 2094, + "token_acc": 0.8812383239925273, + "train_speed(iter/s)": 0.241955 + }, + { + "epoch": 0.6704, + "grad_norm": 0.6396477893428273, + "learning_rate": 4.599920499087146e-06, + "loss": 0.4141695499420166, + "memory(GiB)": 66.66, + "step": 2095, + "token_acc": 0.918219944082013, + "train_speed(iter/s)": 0.241946 + }, + { + "epoch": 0.67072, + "grad_norm": 0.5961902936136415, + "learning_rate": 4.599441831478523e-06, + "loss": 0.30224013328552246, + "memory(GiB)": 66.66, + "step": 2096, + "token_acc": 0.9240362811791383, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.67104, + "grad_norm": 0.6497351755364918, + "learning_rate": 4.598962902630784e-06, + "loss": 0.4059687554836273, + "memory(GiB)": 66.66, + "step": 2097, + "token_acc": 0.8451563691838292, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.67136, + "grad_norm": 0.6936529270202738, + "learning_rate": 4.598483712603524e-06, + "loss": 0.4416879415512085, + "memory(GiB)": 66.66, + "step": 2098, + "token_acc": 0.8608247422680413, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.67168, + "grad_norm": 0.6507534775731216, + "learning_rate": 4.5980042614563695e-06, + "loss": 0.37378990650177, + "memory(GiB)": 66.66, + "step": 2099, + "token_acc": 0.912041884816754, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.672, + "grad_norm": 0.6320941759509382, + "learning_rate": 4.597524549248979e-06, + "loss": 0.39986032247543335, + "memory(GiB)": 66.66, + "step": 2100, + "token_acc": 0.8616517622304051, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.67232, + "grad_norm": 0.6173746528757661, + "learning_rate": 4.597044576041045e-06, + "loss": 0.3840849995613098, + "memory(GiB)": 66.66, + "step": 2101, + "token_acc": 0.8965050121918179, + "train_speed(iter/s)": 0.241927 + }, + { + "epoch": 0.67264, + "grad_norm": 0.6380635544152559, + "learning_rate": 4.596564341892292e-06, + "loss": 0.2522818446159363, + "memory(GiB)": 66.66, + "step": 2102, + "token_acc": 0.9172899252089749, + "train_speed(iter/s)": 0.241927 + }, + { + "epoch": 0.67296, + "grad_norm": 0.604750374436301, + "learning_rate": 4.596083846862476e-06, + "loss": 0.4752596616744995, + "memory(GiB)": 66.66, + "step": 2103, + "token_acc": 0.8619367209971237, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.67328, + "grad_norm": 0.6328637942629206, + "learning_rate": 4.5956030910113875e-06, + "loss": 0.33216527104377747, + "memory(GiB)": 66.66, + "step": 2104, + "token_acc": 0.8018300024078979, + "train_speed(iter/s)": 0.241929 + }, + { + "epoch": 0.6736, + "grad_norm": 0.6554307736943312, + "learning_rate": 4.595122074398848e-06, + "loss": 0.3700454831123352, + "memory(GiB)": 66.66, + "step": 2105, + "token_acc": 0.8256743256743256, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.67392, + "grad_norm": 0.6589429896060419, + "learning_rate": 4.59464079708471e-06, + "loss": 0.4724145531654358, + "memory(GiB)": 66.66, + "step": 2106, + "token_acc": 0.8332036316472115, + "train_speed(iter/s)": 0.241941 + }, + { + "epoch": 0.67424, + "grad_norm": 0.6437928932708973, + "learning_rate": 4.594159259128862e-06, + "loss": 0.4298544228076935, + "memory(GiB)": 66.66, + "step": 2107, + "token_acc": 0.815004659832246, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.67456, + "grad_norm": 0.5906437413685823, + "learning_rate": 4.593677460591223e-06, + "loss": 0.31621092557907104, + "memory(GiB)": 66.66, + "step": 2108, + "token_acc": 0.9021419571608568, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.67488, + "grad_norm": 0.6214903370505065, + "learning_rate": 4.593195401531743e-06, + "loss": 0.36188435554504395, + "memory(GiB)": 66.66, + "step": 2109, + "token_acc": 0.914664202094886, + "train_speed(iter/s)": 0.241933 + }, + { + "epoch": 0.6752, + "grad_norm": 0.6543737259312984, + "learning_rate": 4.592713082010407e-06, + "loss": 0.296764075756073, + "memory(GiB)": 66.66, + "step": 2110, + "token_acc": 0.857095709570957, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.67552, + "grad_norm": 0.6639010350610421, + "learning_rate": 4.5922305020872314e-06, + "loss": 0.4564563035964966, + "memory(GiB)": 66.66, + "step": 2111, + "token_acc": 0.85041430440471, + "train_speed(iter/s)": 0.241933 + }, + { + "epoch": 0.67584, + "grad_norm": 0.5760642010426181, + "learning_rate": 4.5917476618222644e-06, + "loss": 0.363985538482666, + "memory(GiB)": 66.66, + "step": 2112, + "token_acc": 0.9061980527190691, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.67616, + "grad_norm": 0.6407755681072149, + "learning_rate": 4.591264561275588e-06, + "loss": 0.3577921390533447, + "memory(GiB)": 66.66, + "step": 2113, + "token_acc": 0.9134328358208955, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.67648, + "grad_norm": 0.6198783869496914, + "learning_rate": 4.590781200507314e-06, + "loss": 0.3807160258293152, + "memory(GiB)": 66.66, + "step": 2114, + "token_acc": 0.8914073071718539, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.6768, + "grad_norm": 0.6428905914114583, + "learning_rate": 4.59029757957759e-06, + "loss": 0.329864501953125, + "memory(GiB)": 66.66, + "step": 2115, + "token_acc": 0.8403108548508398, + "train_speed(iter/s)": 0.24194 + }, + { + "epoch": 0.67712, + "grad_norm": 0.6413959749885232, + "learning_rate": 4.589813698546592e-06, + "loss": 0.37567228078842163, + "memory(GiB)": 66.66, + "step": 2116, + "token_acc": 0.9140340218712029, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.67744, + "grad_norm": 0.6259933403665949, + "learning_rate": 4.589329557474533e-06, + "loss": 0.3571789860725403, + "memory(GiB)": 66.66, + "step": 2117, + "token_acc": 0.839509612705489, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.67776, + "grad_norm": 0.611094912215572, + "learning_rate": 4.5888451564216555e-06, + "loss": 0.33005228638648987, + "memory(GiB)": 66.66, + "step": 2118, + "token_acc": 0.7807123818754543, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.67808, + "grad_norm": 0.6510612677512541, + "learning_rate": 4.588360495448234e-06, + "loss": 0.42811495065689087, + "memory(GiB)": 66.66, + "step": 2119, + "token_acc": 0.8782435129740519, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.6784, + "grad_norm": 0.6961962684133498, + "learning_rate": 4.587875574614576e-06, + "loss": 0.44399771094322205, + "memory(GiB)": 66.66, + "step": 2120, + "token_acc": 0.880957810718358, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.67872, + "grad_norm": 0.6138490688465464, + "learning_rate": 4.587390393981023e-06, + "loss": 0.31776294112205505, + "memory(GiB)": 66.66, + "step": 2121, + "token_acc": 0.8728699551569506, + "train_speed(iter/s)": 0.241941 + }, + { + "epoch": 0.67904, + "grad_norm": 0.6165062113582763, + "learning_rate": 4.586904953607946e-06, + "loss": 0.38529279828071594, + "memory(GiB)": 66.66, + "step": 2122, + "token_acc": 0.9003677699765965, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.67936, + "grad_norm": 0.6756093265742715, + "learning_rate": 4.5864192535557494e-06, + "loss": 0.4088912010192871, + "memory(GiB)": 66.66, + "step": 2123, + "token_acc": 0.9281702630801064, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.67968, + "grad_norm": 0.6291993465033577, + "learning_rate": 4.585933293884871e-06, + "loss": 0.39276057481765747, + "memory(GiB)": 66.66, + "step": 2124, + "token_acc": 0.8942891859052248, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.68, + "grad_norm": 0.6245822991004416, + "learning_rate": 4.585447074655779e-06, + "loss": 0.3790951371192932, + "memory(GiB)": 66.66, + "step": 2125, + "token_acc": 0.8757196744093707, + "train_speed(iter/s)": 0.241921 + }, + { + "epoch": 0.68032, + "grad_norm": 0.6352966455771603, + "learning_rate": 4.584960595928977e-06, + "loss": 0.29404351115226746, + "memory(GiB)": 66.66, + "step": 2126, + "token_acc": 0.9338810641627543, + "train_speed(iter/s)": 0.241927 + }, + { + "epoch": 0.68064, + "grad_norm": 0.6552007807555748, + "learning_rate": 4.584473857764996e-06, + "loss": 0.44398263096809387, + "memory(GiB)": 66.66, + "step": 2127, + "token_acc": 0.9443234836702955, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.68096, + "grad_norm": 0.6315181241889366, + "learning_rate": 4.583986860224405e-06, + "loss": 0.4832325279712677, + "memory(GiB)": 66.66, + "step": 2128, + "token_acc": 0.8283447529751172, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.68128, + "grad_norm": 0.5894806224248129, + "learning_rate": 4.5834996033678e-06, + "loss": 0.3831925094127655, + "memory(GiB)": 66.66, + "step": 2129, + "token_acc": 0.8548098434004474, + "train_speed(iter/s)": 0.241929 + }, + { + "epoch": 0.6816, + "grad_norm": 0.6098435935138452, + "learning_rate": 4.583012087255813e-06, + "loss": 0.42164528369903564, + "memory(GiB)": 66.66, + "step": 2130, + "token_acc": 0.9367122552963261, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.68192, + "grad_norm": 0.6602889824029997, + "learning_rate": 4.582524311949107e-06, + "loss": 0.44841066002845764, + "memory(GiB)": 66.66, + "step": 2131, + "token_acc": 0.839123006833713, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.68224, + "grad_norm": 0.5741619111738433, + "learning_rate": 4.582036277508376e-06, + "loss": 0.3732197880744934, + "memory(GiB)": 66.66, + "step": 2132, + "token_acc": 0.9272415482606565, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.68256, + "grad_norm": 0.6396698595001332, + "learning_rate": 4.581547983994349e-06, + "loss": 0.3272702693939209, + "memory(GiB)": 66.66, + "step": 2133, + "token_acc": 0.9139240506329114, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.68288, + "grad_norm": 0.6178833976341476, + "learning_rate": 4.581059431467784e-06, + "loss": 0.36985206604003906, + "memory(GiB)": 66.66, + "step": 2134, + "token_acc": 0.8716012084592145, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.6832, + "grad_norm": 0.6753932103086924, + "learning_rate": 4.580570619989474e-06, + "loss": 0.37054723501205444, + "memory(GiB)": 66.66, + "step": 2135, + "token_acc": 0.9228876127973749, + "train_speed(iter/s)": 0.241911 + }, + { + "epoch": 0.68352, + "grad_norm": 0.6619880898521451, + "learning_rate": 4.5800815496202436e-06, + "loss": 0.33415859937667847, + "memory(GiB)": 66.66, + "step": 2136, + "token_acc": 0.865278628291488, + "train_speed(iter/s)": 0.241908 + }, + { + "epoch": 0.68384, + "grad_norm": 0.6059879257986326, + "learning_rate": 4.579592220420948e-06, + "loss": 0.3559541702270508, + "memory(GiB)": 66.66, + "step": 2137, + "token_acc": 0.9437180216031836, + "train_speed(iter/s)": 0.241912 + }, + { + "epoch": 0.68416, + "grad_norm": 0.6561693550648184, + "learning_rate": 4.579102632452476e-06, + "loss": 0.26052045822143555, + "memory(GiB)": 66.66, + "step": 2138, + "token_acc": 0.9216867469879518, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.68448, + "grad_norm": 0.6315726122957034, + "learning_rate": 4.578612785775748e-06, + "loss": 0.31627053022384644, + "memory(GiB)": 66.66, + "step": 2139, + "token_acc": 0.9206174200661521, + "train_speed(iter/s)": 0.24191 + }, + { + "epoch": 0.6848, + "grad_norm": 0.6358594123971809, + "learning_rate": 4.578122680451718e-06, + "loss": 0.37110453844070435, + "memory(GiB)": 66.66, + "step": 2140, + "token_acc": 0.8277375047691721, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.68512, + "grad_norm": 0.6551230669070177, + "learning_rate": 4.577632316541369e-06, + "loss": 0.38707032799720764, + "memory(GiB)": 66.66, + "step": 2141, + "token_acc": 0.918939393939394, + "train_speed(iter/s)": 0.241911 + }, + { + "epoch": 0.68544, + "grad_norm": 0.6891598983168619, + "learning_rate": 4.577141694105721e-06, + "loss": 0.3869237005710602, + "memory(GiB)": 66.66, + "step": 2142, + "token_acc": 0.8334698893895944, + "train_speed(iter/s)": 0.24191 + }, + { + "epoch": 0.68576, + "grad_norm": 0.6401254030796424, + "learning_rate": 4.576650813205823e-06, + "loss": 0.3270450234413147, + "memory(GiB)": 66.66, + "step": 2143, + "token_acc": 0.9494692144373673, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.68608, + "grad_norm": 0.6542137285421442, + "learning_rate": 4.576159673902755e-06, + "loss": 0.3280731439590454, + "memory(GiB)": 66.66, + "step": 2144, + "token_acc": 0.922683923705722, + "train_speed(iter/s)": 0.241904 + }, + { + "epoch": 0.6864, + "grad_norm": 0.6557418636640218, + "learning_rate": 4.575668276257631e-06, + "loss": 0.447399765253067, + "memory(GiB)": 66.66, + "step": 2145, + "token_acc": 0.9373529411764706, + "train_speed(iter/s)": 0.241873 + }, + { + "epoch": 0.68672, + "grad_norm": 0.6364094545133695, + "learning_rate": 4.5751766203315975e-06, + "loss": 0.3775416612625122, + "memory(GiB)": 66.66, + "step": 2146, + "token_acc": 0.8839928057553957, + "train_speed(iter/s)": 0.241873 + }, + { + "epoch": 0.68704, + "grad_norm": 0.6495127532463587, + "learning_rate": 4.574684706185834e-06, + "loss": 0.3963262438774109, + "memory(GiB)": 66.66, + "step": 2147, + "token_acc": 0.8719646799116998, + "train_speed(iter/s)": 0.241877 + }, + { + "epoch": 0.68736, + "grad_norm": 0.5978300170603517, + "learning_rate": 4.574192533881547e-06, + "loss": 0.3631603717803955, + "memory(GiB)": 66.66, + "step": 2148, + "token_acc": 0.9315211709357031, + "train_speed(iter/s)": 0.241877 + }, + { + "epoch": 0.68768, + "grad_norm": 0.6686374109879806, + "learning_rate": 4.573700103479983e-06, + "loss": 0.36967140436172485, + "memory(GiB)": 66.66, + "step": 2149, + "token_acc": 0.8543098370312193, + "train_speed(iter/s)": 0.24188 + }, + { + "epoch": 0.688, + "grad_norm": 0.7135694520382568, + "learning_rate": 4.5732074150424135e-06, + "loss": 0.37544798851013184, + "memory(GiB)": 66.66, + "step": 2150, + "token_acc": 0.9271317829457364, + "train_speed(iter/s)": 0.241887 + }, + { + "epoch": 0.68832, + "grad_norm": 0.5935127277034095, + "learning_rate": 4.572714468630146e-06, + "loss": 0.3864448070526123, + "memory(GiB)": 66.66, + "step": 2151, + "token_acc": 0.8717166979362101, + "train_speed(iter/s)": 0.241886 + }, + { + "epoch": 0.68864, + "grad_norm": 0.5983321331797017, + "learning_rate": 4.5722212643045194e-06, + "loss": 0.3286162316799164, + "memory(GiB)": 66.66, + "step": 2152, + "token_acc": 0.9300378173960021, + "train_speed(iter/s)": 0.241889 + }, + { + "epoch": 0.68896, + "grad_norm": 0.6804548189642188, + "learning_rate": 4.571727802126904e-06, + "loss": 0.39980602264404297, + "memory(GiB)": 66.66, + "step": 2153, + "token_acc": 0.8805918788713007, + "train_speed(iter/s)": 0.241893 + }, + { + "epoch": 0.68928, + "grad_norm": 0.7394006530736564, + "learning_rate": 4.571234082158703e-06, + "loss": 0.4268699288368225, + "memory(GiB)": 66.66, + "step": 2154, + "token_acc": 0.8925178147268409, + "train_speed(iter/s)": 0.241888 + }, + { + "epoch": 0.6896, + "grad_norm": 0.6407744831797149, + "learning_rate": 4.57074010446135e-06, + "loss": 0.40289071202278137, + "memory(GiB)": 66.66, + "step": 2155, + "token_acc": 0.9043311403508771, + "train_speed(iter/s)": 0.241888 + }, + { + "epoch": 0.68992, + "grad_norm": 0.7235008284851321, + "learning_rate": 4.570245869096314e-06, + "loss": 0.4115726053714752, + "memory(GiB)": 66.66, + "step": 2156, + "token_acc": 0.8900892437764208, + "train_speed(iter/s)": 0.241888 + }, + { + "epoch": 0.69024, + "grad_norm": 0.583319221936498, + "learning_rate": 4.569751376125093e-06, + "loss": 0.3529212176799774, + "memory(GiB)": 66.66, + "step": 2157, + "token_acc": 0.9405144694533762, + "train_speed(iter/s)": 0.24189 + }, + { + "epoch": 0.69056, + "grad_norm": 0.6465397901665138, + "learning_rate": 4.5692566256092176e-06, + "loss": 0.3817978501319885, + "memory(GiB)": 66.66, + "step": 2158, + "token_acc": 0.8673122219314315, + "train_speed(iter/s)": 0.241892 + }, + { + "epoch": 0.69088, + "grad_norm": 0.652315674744363, + "learning_rate": 4.568761617610251e-06, + "loss": 0.3009967803955078, + "memory(GiB)": 66.66, + "step": 2159, + "token_acc": 0.9485148514851485, + "train_speed(iter/s)": 0.241898 + }, + { + "epoch": 0.6912, + "grad_norm": 0.6112313858557129, + "learning_rate": 4.5682663521897895e-06, + "loss": 0.3448061943054199, + "memory(GiB)": 66.66, + "step": 2160, + "token_acc": 0.9157455170719725, + "train_speed(iter/s)": 0.241899 + }, + { + "epoch": 0.69152, + "grad_norm": 0.9919316397759345, + "learning_rate": 4.567770829409459e-06, + "loss": 0.39226585626602173, + "memory(GiB)": 66.66, + "step": 2161, + "token_acc": 0.9122306717363752, + "train_speed(iter/s)": 0.241904 + }, + { + "epoch": 0.69184, + "grad_norm": 0.6346811278650359, + "learning_rate": 4.5672750493309196e-06, + "loss": 0.3229006826877594, + "memory(GiB)": 66.66, + "step": 2162, + "token_acc": 0.8937774984286612, + "train_speed(iter/s)": 0.241908 + }, + { + "epoch": 0.69216, + "grad_norm": 0.6356076041568732, + "learning_rate": 4.566779012015862e-06, + "loss": 0.3478655517101288, + "memory(GiB)": 66.66, + "step": 2163, + "token_acc": 0.8820162887959498, + "train_speed(iter/s)": 0.241903 + }, + { + "epoch": 0.69248, + "grad_norm": 0.6678393359999601, + "learning_rate": 4.56628271752601e-06, + "loss": 0.3077484965324402, + "memory(GiB)": 66.66, + "step": 2164, + "token_acc": 0.9063786008230452, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.6928, + "grad_norm": 0.7020514049375605, + "learning_rate": 4.565786165923119e-06, + "loss": 0.38340437412261963, + "memory(GiB)": 66.66, + "step": 2165, + "token_acc": 0.8737422137038812, + "train_speed(iter/s)": 0.241903 + }, + { + "epoch": 0.69312, + "grad_norm": 0.6325057405887875, + "learning_rate": 4.565289357268974e-06, + "loss": 0.43164825439453125, + "memory(GiB)": 66.66, + "step": 2166, + "token_acc": 0.8840655055887705, + "train_speed(iter/s)": 0.241899 + }, + { + "epoch": 0.69344, + "grad_norm": 0.6173935360101478, + "learning_rate": 4.564792291625397e-06, + "loss": 0.36961185932159424, + "memory(GiB)": 66.66, + "step": 2167, + "token_acc": 0.8776559287183002, + "train_speed(iter/s)": 0.241897 + }, + { + "epoch": 0.69376, + "grad_norm": 0.6179602421001225, + "learning_rate": 4.564294969054237e-06, + "loss": 0.3414817452430725, + "memory(GiB)": 66.66, + "step": 2168, + "token_acc": 0.9444781553398058, + "train_speed(iter/s)": 0.241895 + }, + { + "epoch": 0.69408, + "grad_norm": 0.6236960103018123, + "learning_rate": 4.5637973896173794e-06, + "loss": 0.35302066802978516, + "memory(GiB)": 66.66, + "step": 2169, + "token_acc": 0.9491392801251957, + "train_speed(iter/s)": 0.241899 + }, + { + "epoch": 0.6944, + "grad_norm": 0.6795697130874947, + "learning_rate": 4.563299553376738e-06, + "loss": 0.4218558073043823, + "memory(GiB)": 66.66, + "step": 2170, + "token_acc": 0.8102564102564103, + "train_speed(iter/s)": 0.241894 + }, + { + "epoch": 0.69472, + "grad_norm": 0.6149009311068019, + "learning_rate": 4.56280146039426e-06, + "loss": 0.41696619987487793, + "memory(GiB)": 66.66, + "step": 2171, + "token_acc": 0.8622361665715915, + "train_speed(iter/s)": 0.241889 + }, + { + "epoch": 0.69504, + "grad_norm": 0.5795035375731575, + "learning_rate": 4.5623031107319245e-06, + "loss": 0.3453983664512634, + "memory(GiB)": 66.66, + "step": 2172, + "token_acc": 0.8467005076142132, + "train_speed(iter/s)": 0.241889 + }, + { + "epoch": 0.69536, + "grad_norm": 0.6437685001010309, + "learning_rate": 4.5618045044517425e-06, + "loss": 0.35478460788726807, + "memory(GiB)": 66.66, + "step": 2173, + "token_acc": 0.8970641536788692, + "train_speed(iter/s)": 0.241893 + }, + { + "epoch": 0.69568, + "grad_norm": 0.6565994530264878, + "learning_rate": 4.561305641615756e-06, + "loss": 0.29987362027168274, + "memory(GiB)": 66.66, + "step": 2174, + "token_acc": 0.872412815423873, + "train_speed(iter/s)": 0.241893 + }, + { + "epoch": 0.696, + "grad_norm": 0.6455429220383431, + "learning_rate": 4.560806522286042e-06, + "loss": 0.36573559045791626, + "memory(GiB)": 66.66, + "step": 2175, + "token_acc": 0.9331164135936371, + "train_speed(iter/s)": 0.241897 + }, + { + "epoch": 0.69632, + "grad_norm": 0.6545881923823849, + "learning_rate": 4.560307146524706e-06, + "loss": 0.3873975872993469, + "memory(GiB)": 66.66, + "step": 2176, + "token_acc": 0.9144503546099291, + "train_speed(iter/s)": 0.2419 + }, + { + "epoch": 0.69664, + "grad_norm": 0.6050089415052097, + "learning_rate": 4.5598075143938855e-06, + "loss": 0.42902037501335144, + "memory(GiB)": 66.66, + "step": 2177, + "token_acc": 0.8939354838709678, + "train_speed(iter/s)": 0.241897 + }, + { + "epoch": 0.69696, + "grad_norm": 0.6593991328452867, + "learning_rate": 4.559307625955754e-06, + "loss": 0.3220551013946533, + "memory(GiB)": 66.66, + "step": 2178, + "token_acc": 0.8699708454810495, + "train_speed(iter/s)": 0.241903 + }, + { + "epoch": 0.69728, + "grad_norm": 0.6854891283732276, + "learning_rate": 4.558807481272511e-06, + "loss": 0.34670785069465637, + "memory(GiB)": 66.66, + "step": 2179, + "token_acc": 0.913909224011713, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.6976, + "grad_norm": 0.6509823714976329, + "learning_rate": 4.5583070804063915e-06, + "loss": 0.40689513087272644, + "memory(GiB)": 66.66, + "step": 2180, + "token_acc": 0.8947475570032574, + "train_speed(iter/s)": 0.241902 + }, + { + "epoch": 0.69792, + "grad_norm": 0.5858818462219659, + "learning_rate": 4.5578064234196615e-06, + "loss": 0.2752559185028076, + "memory(GiB)": 66.66, + "step": 2181, + "token_acc": 0.9388349514563107, + "train_speed(iter/s)": 0.241908 + }, + { + "epoch": 0.69824, + "grad_norm": 0.6838208909284136, + "learning_rate": 4.557305510374621e-06, + "loss": 0.4067864418029785, + "memory(GiB)": 66.66, + "step": 2182, + "token_acc": 0.8828282828282829, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.69856, + "grad_norm": 0.6213043548815798, + "learning_rate": 4.5568043413335985e-06, + "loss": 0.3504504859447479, + "memory(GiB)": 66.66, + "step": 2183, + "token_acc": 0.8715083798882681, + "train_speed(iter/s)": 0.241917 + }, + { + "epoch": 0.69888, + "grad_norm": 0.5970008735609831, + "learning_rate": 4.5563029163589555e-06, + "loss": 0.3025510311126709, + "memory(GiB)": 66.66, + "step": 2184, + "token_acc": 0.8812911446166914, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.6992, + "grad_norm": 0.7532248304271942, + "learning_rate": 4.555801235513087e-06, + "loss": 0.3334563672542572, + "memory(GiB)": 66.66, + "step": 2185, + "token_acc": 0.9435975609756098, + "train_speed(iter/s)": 0.241911 + }, + { + "epoch": 0.69952, + "grad_norm": 0.6681633657240745, + "learning_rate": 4.555299298858416e-06, + "loss": 0.3974588215351105, + "memory(GiB)": 66.66, + "step": 2186, + "token_acc": 0.9030718759320012, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.69984, + "grad_norm": 0.6827054837797257, + "learning_rate": 4.554797106457402e-06, + "loss": 0.40698710083961487, + "memory(GiB)": 66.66, + "step": 2187, + "token_acc": 0.9048567870485679, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.70016, + "grad_norm": 0.6014070182006518, + "learning_rate": 4.554294658372535e-06, + "loss": 0.27859893441200256, + "memory(GiB)": 66.66, + "step": 2188, + "token_acc": 0.8677124795129946, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.70048, + "grad_norm": 0.6343320305964928, + "learning_rate": 4.553791954666333e-06, + "loss": 0.39292633533477783, + "memory(GiB)": 66.66, + "step": 2189, + "token_acc": 0.9112812850205454, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.7008, + "grad_norm": 0.573313086811636, + "learning_rate": 4.553288995401349e-06, + "loss": 0.3936372399330139, + "memory(GiB)": 66.66, + "step": 2190, + "token_acc": 0.9084479371316306, + "train_speed(iter/s)": 0.24191 + }, + { + "epoch": 0.70112, + "grad_norm": 0.5958071780222463, + "learning_rate": 4.552785780640171e-06, + "loss": 0.3080025315284729, + "memory(GiB)": 66.66, + "step": 2191, + "token_acc": 0.9431898376852506, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.70144, + "grad_norm": 0.6504987273680198, + "learning_rate": 4.552282310445412e-06, + "loss": 0.40135809779167175, + "memory(GiB)": 66.66, + "step": 2192, + "token_acc": 0.8847638847638848, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.70176, + "grad_norm": 0.6997619546883974, + "learning_rate": 4.551778584879722e-06, + "loss": 0.40952008962631226, + "memory(GiB)": 66.66, + "step": 2193, + "token_acc": 0.9224852071005917, + "train_speed(iter/s)": 0.241919 + }, + { + "epoch": 0.70208, + "grad_norm": 0.679337322804823, + "learning_rate": 4.55127460400578e-06, + "loss": 0.45890527963638306, + "memory(GiB)": 66.66, + "step": 2194, + "token_acc": 0.8474709702814407, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.7024, + "grad_norm": 0.6329329452075569, + "learning_rate": 4.550770367886297e-06, + "loss": 0.35766366124153137, + "memory(GiB)": 66.66, + "step": 2195, + "token_acc": 0.8956259426847663, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.70272, + "grad_norm": 0.7131022307849278, + "learning_rate": 4.550265876584018e-06, + "loss": 0.30238479375839233, + "memory(GiB)": 66.66, + "step": 2196, + "token_acc": 0.9437533802055165, + "train_speed(iter/s)": 0.241924 + }, + { + "epoch": 0.70304, + "grad_norm": 0.6919828206559959, + "learning_rate": 4.5497611301617175e-06, + "loss": 0.4196329712867737, + "memory(GiB)": 66.66, + "step": 2197, + "token_acc": 0.8698024948024948, + "train_speed(iter/s)": 0.241924 + }, + { + "epoch": 0.70336, + "grad_norm": 0.6070581187988257, + "learning_rate": 4.549256128682201e-06, + "loss": 0.38020047545433044, + "memory(GiB)": 66.66, + "step": 2198, + "token_acc": 0.8403698534542917, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.70368, + "grad_norm": 0.659646946027682, + "learning_rate": 4.54875087220831e-06, + "loss": 0.4337690472602844, + "memory(GiB)": 66.66, + "step": 2199, + "token_acc": 0.8148969889064976, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.704, + "grad_norm": 0.6254982812238143, + "learning_rate": 4.548245360802913e-06, + "loss": 0.4367392063140869, + "memory(GiB)": 66.66, + "step": 2200, + "token_acc": 0.8356401384083045, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.70432, + "grad_norm": 0.617671228353355, + "learning_rate": 4.5477395945289136e-06, + "loss": 0.3836784362792969, + "memory(GiB)": 66.66, + "step": 2201, + "token_acc": 0.8639112903225806, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.70464, + "grad_norm": 0.6779697407643356, + "learning_rate": 4.547233573449243e-06, + "loss": 0.36723485589027405, + "memory(GiB)": 66.66, + "step": 2202, + "token_acc": 0.9276116168183789, + "train_speed(iter/s)": 0.241919 + }, + { + "epoch": 0.70496, + "grad_norm": 0.6528060437939257, + "learning_rate": 4.54672729762687e-06, + "loss": 0.40992218255996704, + "memory(GiB)": 66.66, + "step": 2203, + "token_acc": 0.8893645044451761, + "train_speed(iter/s)": 0.241916 + }, + { + "epoch": 0.70528, + "grad_norm": 0.6777194547580365, + "learning_rate": 4.5462207671247885e-06, + "loss": 0.405839741230011, + "memory(GiB)": 66.66, + "step": 2204, + "token_acc": 0.9237046103631171, + "train_speed(iter/s)": 0.241921 + }, + { + "epoch": 0.7056, + "grad_norm": 0.6225565024039299, + "learning_rate": 4.5457139820060305e-06, + "loss": 0.40161317586898804, + "memory(GiB)": 66.66, + "step": 2205, + "token_acc": 0.882208253621208, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.70592, + "grad_norm": 0.682001169194645, + "learning_rate": 4.545206942333654e-06, + "loss": 0.43687495589256287, + "memory(GiB)": 66.66, + "step": 2206, + "token_acc": 0.9508888219343176, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.70624, + "grad_norm": 0.6215674740415579, + "learning_rate": 4.544699648170754e-06, + "loss": 0.34620827436447144, + "memory(GiB)": 66.66, + "step": 2207, + "token_acc": 0.9444444444444444, + "train_speed(iter/s)": 0.241919 + }, + { + "epoch": 0.70656, + "grad_norm": 0.7143435967177282, + "learning_rate": 4.544192099580452e-06, + "loss": 0.4370485544204712, + "memory(GiB)": 66.66, + "step": 2208, + "token_acc": 0.8678294573643411, + "train_speed(iter/s)": 0.241924 + }, + { + "epoch": 0.70688, + "grad_norm": 0.6177605724855207, + "learning_rate": 4.543684296625906e-06, + "loss": 0.407867968082428, + "memory(GiB)": 66.66, + "step": 2209, + "token_acc": 0.9063291139240506, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.7072, + "grad_norm": 0.586595141651053, + "learning_rate": 4.543176239370301e-06, + "loss": 0.3397759795188904, + "memory(GiB)": 66.66, + "step": 2210, + "token_acc": 0.9142526071842411, + "train_speed(iter/s)": 0.241929 + }, + { + "epoch": 0.70752, + "grad_norm": 0.6386978105492691, + "learning_rate": 4.542667927876856e-06, + "loss": 0.34699517488479614, + "memory(GiB)": 66.66, + "step": 2211, + "token_acc": 0.8712121212121212, + "train_speed(iter/s)": 0.241931 + }, + { + "epoch": 0.70784, + "grad_norm": 0.6417261442802997, + "learning_rate": 4.542159362208823e-06, + "loss": 0.4436630606651306, + "memory(GiB)": 66.66, + "step": 2212, + "token_acc": 0.9385382059800664, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.70816, + "grad_norm": 0.6347990776474527, + "learning_rate": 4.541650542429484e-06, + "loss": 0.3466145396232605, + "memory(GiB)": 66.66, + "step": 2213, + "token_acc": 0.9449311639549437, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.70848, + "grad_norm": 0.6573763879598704, + "learning_rate": 4.541141468602151e-06, + "loss": 0.30758148431777954, + "memory(GiB)": 66.66, + "step": 2214, + "token_acc": 0.9420745397395599, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.7088, + "grad_norm": 0.7035874929392837, + "learning_rate": 4.540632140790173e-06, + "loss": 0.39630067348480225, + "memory(GiB)": 66.66, + "step": 2215, + "token_acc": 0.8758248350329934, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.70912, + "grad_norm": 0.6889804103049396, + "learning_rate": 4.540122559056923e-06, + "loss": 0.3702808618545532, + "memory(GiB)": 66.66, + "step": 2216, + "token_acc": 0.9068873780668046, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.70944, + "grad_norm": 0.6668932486008611, + "learning_rate": 4.5396127234658124e-06, + "loss": 0.37195855379104614, + "memory(GiB)": 66.66, + "step": 2217, + "token_acc": 0.9494561740243123, + "train_speed(iter/s)": 0.241946 + }, + { + "epoch": 0.70976, + "grad_norm": 0.5857549036509433, + "learning_rate": 4.5391026340802796e-06, + "loss": 0.39823824167251587, + "memory(GiB)": 66.66, + "step": 2218, + "token_acc": 0.925281473899693, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.71008, + "grad_norm": 0.6528273695584925, + "learning_rate": 4.538592290963797e-06, + "loss": 0.43834370374679565, + "memory(GiB)": 66.66, + "step": 2219, + "token_acc": 0.8799736928641894, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.7104, + "grad_norm": 0.6317006036478464, + "learning_rate": 4.538081694179869e-06, + "loss": 0.36580684781074524, + "memory(GiB)": 66.66, + "step": 2220, + "token_acc": 0.9050596930073905, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.71072, + "grad_norm": 0.6053253026498918, + "learning_rate": 4.537570843792028e-06, + "loss": 0.3012773394584656, + "memory(GiB)": 66.66, + "step": 2221, + "token_acc": 0.9231301939058172, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.71104, + "grad_norm": 0.6700079060276917, + "learning_rate": 4.537059739863844e-06, + "loss": 0.4506133198738098, + "memory(GiB)": 66.66, + "step": 2222, + "token_acc": 0.8707671043538355, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.71136, + "grad_norm": 0.6461038507562129, + "learning_rate": 4.5365483824589115e-06, + "loss": 0.34407898783683777, + "memory(GiB)": 66.66, + "step": 2223, + "token_acc": 0.867485624673288, + "train_speed(iter/s)": 0.24194 + }, + { + "epoch": 0.71168, + "grad_norm": 0.5727473305449349, + "learning_rate": 4.5360367716408625e-06, + "loss": 0.26562780141830444, + "memory(GiB)": 66.66, + "step": 2224, + "token_acc": 0.9195979899497487, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.712, + "grad_norm": 0.6310716859097847, + "learning_rate": 4.5355249074733574e-06, + "loss": 0.44165873527526855, + "memory(GiB)": 66.66, + "step": 2225, + "token_acc": 0.8817204301075269, + "train_speed(iter/s)": 0.24193 + }, + { + "epoch": 0.71232, + "grad_norm": 0.7391014790772849, + "learning_rate": 4.535012790020088e-06, + "loss": 0.4608016014099121, + "memory(GiB)": 66.66, + "step": 2226, + "token_acc": 0.8844621513944223, + "train_speed(iter/s)": 0.24193 + }, + { + "epoch": 0.71264, + "grad_norm": 0.7007484963216721, + "learning_rate": 4.534500419344779e-06, + "loss": 0.37445223331451416, + "memory(GiB)": 66.66, + "step": 2227, + "token_acc": 0.878412813978886, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.71296, + "grad_norm": 0.6754753710683292, + "learning_rate": 4.533987795511187e-06, + "loss": 0.4188425540924072, + "memory(GiB)": 66.66, + "step": 2228, + "token_acc": 0.8352059925093633, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.71328, + "grad_norm": 0.6402602102277322, + "learning_rate": 4.533474918583098e-06, + "loss": 0.395877480506897, + "memory(GiB)": 66.66, + "step": 2229, + "token_acc": 0.8788909233586188, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.7136, + "grad_norm": 0.6387423868558149, + "learning_rate": 4.5329617886243305e-06, + "loss": 0.37150782346725464, + "memory(GiB)": 66.66, + "step": 2230, + "token_acc": 0.8784869976359339, + "train_speed(iter/s)": 0.241931 + }, + { + "epoch": 0.71392, + "grad_norm": 0.6202058018636495, + "learning_rate": 4.532448405698735e-06, + "loss": 0.3293929696083069, + "memory(GiB)": 66.66, + "step": 2231, + "token_acc": 0.8771676300578035, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.71424, + "grad_norm": 0.6385627987307073, + "learning_rate": 4.531934769870193e-06, + "loss": 0.41140466928482056, + "memory(GiB)": 66.66, + "step": 2232, + "token_acc": 0.8897124221761044, + "train_speed(iter/s)": 0.241929 + }, + { + "epoch": 0.71456, + "grad_norm": 0.6689247827149711, + "learning_rate": 4.531420881202618e-06, + "loss": 0.48568442463874817, + "memory(GiB)": 66.66, + "step": 2233, + "token_acc": 0.8546286518831397, + "train_speed(iter/s)": 0.241929 + }, + { + "epoch": 0.71488, + "grad_norm": 0.6181804483635377, + "learning_rate": 4.530906739759955e-06, + "loss": 0.4053623676300049, + "memory(GiB)": 66.66, + "step": 2234, + "token_acc": 0.9186795491143317, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.7152, + "grad_norm": 0.5984801916981093, + "learning_rate": 4.530392345606179e-06, + "loss": 0.45379704236984253, + "memory(GiB)": 66.66, + "step": 2235, + "token_acc": 0.8447361777328958, + "train_speed(iter/s)": 0.241927 + }, + { + "epoch": 0.71552, + "grad_norm": 0.6333151146680343, + "learning_rate": 4.529877698805296e-06, + "loss": 0.3287976384162903, + "memory(GiB)": 66.66, + "step": 2236, + "token_acc": 0.9236074270557029, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.71584, + "grad_norm": 0.6634088033470119, + "learning_rate": 4.529362799421348e-06, + "loss": 0.37503811717033386, + "memory(GiB)": 66.66, + "step": 2237, + "token_acc": 0.8864280195724955, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.71616, + "grad_norm": 0.6515888572893508, + "learning_rate": 4.528847647518403e-06, + "loss": 0.3736875653266907, + "memory(GiB)": 66.66, + "step": 2238, + "token_acc": 0.9500773993808049, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.71648, + "grad_norm": 0.616937538280776, + "learning_rate": 4.528332243160563e-06, + "loss": 0.36249929666519165, + "memory(GiB)": 66.66, + "step": 2239, + "token_acc": 0.9025600835945663, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.7168, + "grad_norm": 0.6189805175795066, + "learning_rate": 4.527816586411964e-06, + "loss": 0.3958510756492615, + "memory(GiB)": 66.66, + "step": 2240, + "token_acc": 0.8443446088794926, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.71712, + "grad_norm": 0.7283020217456979, + "learning_rate": 4.5273006773367655e-06, + "loss": 0.36741068959236145, + "memory(GiB)": 66.66, + "step": 2241, + "token_acc": 0.8968128747238877, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.71744, + "grad_norm": 0.6922688674508563, + "learning_rate": 4.526784515999167e-06, + "loss": 0.3689715266227722, + "memory(GiB)": 66.66, + "step": 2242, + "token_acc": 0.8877659574468085, + "train_speed(iter/s)": 0.241928 + }, + { + "epoch": 0.71776, + "grad_norm": 0.6265800077923732, + "learning_rate": 4.5262681024633955e-06, + "loss": 0.3386674225330353, + "memory(GiB)": 66.66, + "step": 2243, + "token_acc": 0.9066764132553606, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.71808, + "grad_norm": 0.5596100918584989, + "learning_rate": 4.525751436793709e-06, + "loss": 0.3000979721546173, + "memory(GiB)": 66.66, + "step": 2244, + "token_acc": 0.8645073766321859, + "train_speed(iter/s)": 0.241924 + }, + { + "epoch": 0.7184, + "grad_norm": 0.6759790211566864, + "learning_rate": 4.525234519054398e-06, + "loss": 0.4159969687461853, + "memory(GiB)": 66.66, + "step": 2245, + "token_acc": 0.8711360899237254, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.71872, + "grad_norm": 0.6159212440992663, + "learning_rate": 4.524717349309783e-06, + "loss": 0.33716559410095215, + "memory(GiB)": 66.66, + "step": 2246, + "token_acc": 0.9139344262295082, + "train_speed(iter/s)": 0.241928 + }, + { + "epoch": 0.71904, + "grad_norm": 0.7197464316835495, + "learning_rate": 4.524199927624218e-06, + "loss": 0.3619437515735626, + "memory(GiB)": 66.66, + "step": 2247, + "token_acc": 0.8894760017613386, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.71936, + "grad_norm": 0.5980153228131493, + "learning_rate": 4.5236822540620864e-06, + "loss": 0.40523654222488403, + "memory(GiB)": 66.66, + "step": 2248, + "token_acc": 0.8903241530587375, + "train_speed(iter/s)": 0.241931 + }, + { + "epoch": 0.71968, + "grad_norm": 3.003697878248175, + "learning_rate": 4.523164328687804e-06, + "loss": 0.36298537254333496, + "memory(GiB)": 66.66, + "step": 2249, + "token_acc": 0.8479402820016588, + "train_speed(iter/s)": 0.241929 + }, + { + "epoch": 0.72, + "grad_norm": 0.6298273725345949, + "learning_rate": 4.522646151565817e-06, + "loss": 0.3588675260543823, + "memory(GiB)": 66.66, + "step": 2250, + "token_acc": 0.91173209137281, + "train_speed(iter/s)": 0.241916 + }, + { + "epoch": 0.72032, + "grad_norm": 0.6347484203079096, + "learning_rate": 4.522127722760605e-06, + "loss": 0.42654114961624146, + "memory(GiB)": 66.66, + "step": 2251, + "token_acc": 0.8299221677716921, + "train_speed(iter/s)": 0.241912 + }, + { + "epoch": 0.72064, + "grad_norm": 0.5916933458438125, + "learning_rate": 4.521609042336676e-06, + "loss": 0.40652114152908325, + "memory(GiB)": 66.66, + "step": 2252, + "token_acc": 0.9483264826776278, + "train_speed(iter/s)": 0.241908 + }, + { + "epoch": 0.72096, + "grad_norm": 0.5997495145064539, + "learning_rate": 4.521090110358572e-06, + "loss": 0.37064242362976074, + "memory(GiB)": 66.66, + "step": 2253, + "token_acc": 0.9265103697024346, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.72128, + "grad_norm": 0.67808562418646, + "learning_rate": 4.520570926890864e-06, + "loss": 0.3554508090019226, + "memory(GiB)": 66.66, + "step": 2254, + "token_acc": 0.9527786700931578, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.7216, + "grad_norm": 0.6206341612017234, + "learning_rate": 4.520051491998155e-06, + "loss": 0.362976998090744, + "memory(GiB)": 66.66, + "step": 2255, + "token_acc": 0.950753960679519, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.72192, + "grad_norm": 0.6340504310865848, + "learning_rate": 4.519531805745081e-06, + "loss": 0.39064350724220276, + "memory(GiB)": 66.66, + "step": 2256, + "token_acc": 0.9431372549019608, + "train_speed(iter/s)": 0.241908 + }, + { + "epoch": 0.72224, + "grad_norm": 0.6248383048893782, + "learning_rate": 4.519011868196308e-06, + "loss": 0.3981291651725769, + "memory(GiB)": 66.66, + "step": 2257, + "token_acc": 0.8682839173405211, + "train_speed(iter/s)": 0.2419 + }, + { + "epoch": 0.72256, + "grad_norm": 0.5927830354773495, + "learning_rate": 4.518491679416533e-06, + "loss": 0.3201013505458832, + "memory(GiB)": 66.66, + "step": 2258, + "token_acc": 0.9018909290216498, + "train_speed(iter/s)": 0.241904 + }, + { + "epoch": 0.72288, + "grad_norm": 0.6384343877585834, + "learning_rate": 4.517971239470482e-06, + "loss": 0.3111628293991089, + "memory(GiB)": 66.66, + "step": 2259, + "token_acc": 0.9176182707993474, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.7232, + "grad_norm": 0.6474563549857586, + "learning_rate": 4.517450548422919e-06, + "loss": 0.43023887276649475, + "memory(GiB)": 66.66, + "step": 2260, + "token_acc": 0.8861405575186494, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.72352, + "grad_norm": 0.6269178236460488, + "learning_rate": 4.516929606338631e-06, + "loss": 0.3472389578819275, + "memory(GiB)": 66.66, + "step": 2261, + "token_acc": 0.8893956670467503, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.72384, + "grad_norm": 0.6907031715620581, + "learning_rate": 4.5164084132824436e-06, + "loss": 0.38050320744514465, + "memory(GiB)": 66.66, + "step": 2262, + "token_acc": 0.8146446078431373, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.72416, + "grad_norm": 0.5788345852674578, + "learning_rate": 4.515886969319208e-06, + "loss": 0.30631324648857117, + "memory(GiB)": 66.66, + "step": 2263, + "token_acc": 0.9032485875706214, + "train_speed(iter/s)": 0.241917 + }, + { + "epoch": 0.72448, + "grad_norm": 0.6008996587578963, + "learning_rate": 4.515365274513809e-06, + "loss": 0.32442528009414673, + "memory(GiB)": 66.66, + "step": 2264, + "token_acc": 0.9400862068965518, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.7248, + "grad_norm": 0.5836944416182771, + "learning_rate": 4.514843328931164e-06, + "loss": 0.28834444284439087, + "memory(GiB)": 66.66, + "step": 2265, + "token_acc": 0.9423791821561338, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.72512, + "grad_norm": 0.6406982270151497, + "learning_rate": 4.514321132636218e-06, + "loss": 0.36105501651763916, + "memory(GiB)": 66.66, + "step": 2266, + "token_acc": 0.9266895761741123, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.72544, + "grad_norm": 0.6821828824024868, + "learning_rate": 4.5137986856939505e-06, + "loss": 0.3647525906562805, + "memory(GiB)": 66.66, + "step": 2267, + "token_acc": 0.9510144927536232, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.72576, + "grad_norm": 0.6197061014137485, + "learning_rate": 4.513275988169371e-06, + "loss": 0.3639979064464569, + "memory(GiB)": 66.66, + "step": 2268, + "token_acc": 0.8243675487060191, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.72608, + "grad_norm": 0.6218038721035293, + "learning_rate": 4.51275304012752e-06, + "loss": 0.3438589572906494, + "memory(GiB)": 66.66, + "step": 2269, + "token_acc": 0.9567676767676768, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.7264, + "grad_norm": 0.6414175264500473, + "learning_rate": 4.51222984163347e-06, + "loss": 0.38737952709198, + "memory(GiB)": 66.66, + "step": 2270, + "token_acc": 0.9050966608084359, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.72672, + "grad_norm": 0.6650100710602539, + "learning_rate": 4.511706392752321e-06, + "loss": 0.43656253814697266, + "memory(GiB)": 66.66, + "step": 2271, + "token_acc": 0.873645091359554, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.72704, + "grad_norm": 0.6366657726365555, + "learning_rate": 4.511182693549211e-06, + "loss": 0.4190371036529541, + "memory(GiB)": 66.66, + "step": 2272, + "token_acc": 0.8558480794130341, + "train_speed(iter/s)": 0.241928 + }, + { + "epoch": 0.72736, + "grad_norm": 0.6045026993524032, + "learning_rate": 4.510658744089303e-06, + "loss": 0.3093340992927551, + "memory(GiB)": 66.66, + "step": 2273, + "token_acc": 0.8927566807313643, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.72768, + "grad_norm": 0.7162904923323148, + "learning_rate": 4.510134544437794e-06, + "loss": 0.45689475536346436, + "memory(GiB)": 66.66, + "step": 2274, + "token_acc": 0.9211281070745698, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.728, + "grad_norm": 0.6501553988077944, + "learning_rate": 4.509610094659912e-06, + "loss": 0.4161341190338135, + "memory(GiB)": 66.66, + "step": 2275, + "token_acc": 0.8700086805555556, + "train_speed(iter/s)": 0.241933 + }, + { + "epoch": 0.72832, + "grad_norm": 0.8230159524235051, + "learning_rate": 4.509085394820915e-06, + "loss": 0.41152966022491455, + "memory(GiB)": 66.66, + "step": 2276, + "token_acc": 0.8493698124807869, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.72864, + "grad_norm": 0.6405507707606832, + "learning_rate": 4.5085604449860925e-06, + "loss": 0.4416384994983673, + "memory(GiB)": 66.66, + "step": 2277, + "token_acc": 0.8876484560570072, + "train_speed(iter/s)": 0.241941 + }, + { + "epoch": 0.72896, + "grad_norm": 0.638070060358356, + "learning_rate": 4.508035245220766e-06, + "loss": 0.42204582691192627, + "memory(GiB)": 66.66, + "step": 2278, + "token_acc": 0.9558875844895055, + "train_speed(iter/s)": 0.241943 + }, + { + "epoch": 0.72928, + "grad_norm": 0.6409577785828099, + "learning_rate": 4.5075097955902885e-06, + "loss": 0.35541293025016785, + "memory(GiB)": 66.66, + "step": 2279, + "token_acc": 0.9278600269179004, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.7296, + "grad_norm": 0.6335930983150414, + "learning_rate": 4.506984096160041e-06, + "loss": 0.432003915309906, + "memory(GiB)": 66.66, + "step": 2280, + "token_acc": 0.8862439917601281, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.72992, + "grad_norm": 0.6632748773327183, + "learning_rate": 4.5064581469954394e-06, + "loss": 0.3595622181892395, + "memory(GiB)": 66.66, + "step": 2281, + "token_acc": 0.8917865707434053, + "train_speed(iter/s)": 0.241949 + }, + { + "epoch": 0.73024, + "grad_norm": 0.6286834434226733, + "learning_rate": 4.505931948161928e-06, + "loss": 0.4275304079055786, + "memory(GiB)": 66.66, + "step": 2282, + "token_acc": 0.8749423165666821, + "train_speed(iter/s)": 0.241954 + }, + { + "epoch": 0.73056, + "grad_norm": 0.6341210302674365, + "learning_rate": 4.505405499724984e-06, + "loss": 0.39816951751708984, + "memory(GiB)": 66.66, + "step": 2283, + "token_acc": 0.9348739495798319, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.73088, + "grad_norm": 0.6611843888411678, + "learning_rate": 4.504878801750112e-06, + "loss": 0.3634353280067444, + "memory(GiB)": 66.66, + "step": 2284, + "token_acc": 0.8456265423635865, + "train_speed(iter/s)": 0.241956 + }, + { + "epoch": 0.7312, + "grad_norm": 0.565096802314165, + "learning_rate": 4.5043518543028554e-06, + "loss": 0.3436451554298401, + "memory(GiB)": 66.66, + "step": 2285, + "token_acc": 0.880457933972311, + "train_speed(iter/s)": 0.241955 + }, + { + "epoch": 0.73152, + "grad_norm": 0.6082205826601953, + "learning_rate": 4.503824657448778e-06, + "loss": 0.34370943903923035, + "memory(GiB)": 66.66, + "step": 2286, + "token_acc": 0.9020660048296216, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.73184, + "grad_norm": 0.6399780087982376, + "learning_rate": 4.5032972112534855e-06, + "loss": 0.33162540197372437, + "memory(GiB)": 66.66, + "step": 2287, + "token_acc": 0.9113712374581939, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.73216, + "grad_norm": 0.6212951675046136, + "learning_rate": 4.502769515782606e-06, + "loss": 0.38207489252090454, + "memory(GiB)": 66.66, + "step": 2288, + "token_acc": 0.941190533110208, + "train_speed(iter/s)": 0.241943 + }, + { + "epoch": 0.73248, + "grad_norm": 0.5964469003927122, + "learning_rate": 4.502241571101803e-06, + "loss": 0.3911234140396118, + "memory(GiB)": 66.66, + "step": 2289, + "token_acc": 0.8839631062183874, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.7328, + "grad_norm": 0.573174284439981, + "learning_rate": 4.5017133772767716e-06, + "loss": 0.3411495089530945, + "memory(GiB)": 66.66, + "step": 2290, + "token_acc": 0.912448347107438, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.73312, + "grad_norm": 0.5866351041617478, + "learning_rate": 4.501184934373233e-06, + "loss": 0.34431758522987366, + "memory(GiB)": 66.66, + "step": 2291, + "token_acc": 0.9151225343693963, + "train_speed(iter/s)": 0.24194 + }, + { + "epoch": 0.73344, + "grad_norm": 0.5951608071502142, + "learning_rate": 4.500656242456946e-06, + "loss": 0.35972821712493896, + "memory(GiB)": 66.66, + "step": 2292, + "token_acc": 0.9378296910324039, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.73376, + "grad_norm": 0.5962524271380277, + "learning_rate": 4.500127301593695e-06, + "loss": 0.41590794920921326, + "memory(GiB)": 66.66, + "step": 2293, + "token_acc": 0.790268456375839, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.73408, + "grad_norm": 0.6931307222070935, + "learning_rate": 4.499598111849299e-06, + "loss": 0.40134477615356445, + "memory(GiB)": 66.66, + "step": 2294, + "token_acc": 0.8111968009140246, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.7344, + "grad_norm": 0.6684270601852399, + "learning_rate": 4.499068673289605e-06, + "loss": 0.38892999291419983, + "memory(GiB)": 66.66, + "step": 2295, + "token_acc": 0.9416890080428955, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.73472, + "grad_norm": 0.6165684747741812, + "learning_rate": 4.498538985980494e-06, + "loss": 0.38226738572120667, + "memory(GiB)": 66.66, + "step": 2296, + "token_acc": 0.9004291845493563, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.73504, + "grad_norm": 0.6221012763688007, + "learning_rate": 4.498009049987876e-06, + "loss": 0.32989540696144104, + "memory(GiB)": 66.66, + "step": 2297, + "token_acc": 0.95995670995671, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.73536, + "grad_norm": 0.6783945388525364, + "learning_rate": 4.497478865377692e-06, + "loss": 0.40120983123779297, + "memory(GiB)": 66.66, + "step": 2298, + "token_acc": 0.8981328291184117, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.73568, + "grad_norm": 0.6608024112903373, + "learning_rate": 4.4969484322159125e-06, + "loss": 0.36497020721435547, + "memory(GiB)": 66.66, + "step": 2299, + "token_acc": 0.8586145648312611, + "train_speed(iter/s)": 0.24193 + }, + { + "epoch": 0.736, + "grad_norm": 0.648828443250563, + "learning_rate": 4.496417750568544e-06, + "loss": 0.3205887973308563, + "memory(GiB)": 66.66, + "step": 2300, + "token_acc": 0.9007717750826902, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.73632, + "grad_norm": 0.6623851660676874, + "learning_rate": 4.4958868205016185e-06, + "loss": 0.45788776874542236, + "memory(GiB)": 66.66, + "step": 2301, + "token_acc": 0.8347670250896058, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.73664, + "grad_norm": 0.61501641715061, + "learning_rate": 4.4953556420812025e-06, + "loss": 0.33361154794692993, + "memory(GiB)": 66.66, + "step": 2302, + "token_acc": 0.9187145557655955, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.73696, + "grad_norm": 0.6123482709450195, + "learning_rate": 4.49482421537339e-06, + "loss": 0.41219770908355713, + "memory(GiB)": 66.66, + "step": 2303, + "token_acc": 0.9313423645320197, + "train_speed(iter/s)": 0.241941 + }, + { + "epoch": 0.73728, + "grad_norm": 0.653073487242392, + "learning_rate": 4.494292540444309e-06, + "loss": 0.3993935286998749, + "memory(GiB)": 66.66, + "step": 2304, + "token_acc": 0.9289227742252828, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.7376, + "grad_norm": 0.6422676034405285, + "learning_rate": 4.4937606173601184e-06, + "loss": 0.3768670856952667, + "memory(GiB)": 66.66, + "step": 2305, + "token_acc": 0.8609148382298252, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.73792, + "grad_norm": 0.6016718184629992, + "learning_rate": 4.493228446187004e-06, + "loss": 0.3484560251235962, + "memory(GiB)": 66.66, + "step": 2306, + "token_acc": 0.9106290672451193, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.73824, + "grad_norm": 0.6206450625955714, + "learning_rate": 4.492696026991189e-06, + "loss": 0.39425235986709595, + "memory(GiB)": 66.66, + "step": 2307, + "token_acc": 0.8409215578716401, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.73856, + "grad_norm": 0.5881186487321138, + "learning_rate": 4.492163359838919e-06, + "loss": 0.3352866768836975, + "memory(GiB)": 66.66, + "step": 2308, + "token_acc": 0.9554162312783002, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.73888, + "grad_norm": 0.6311295296557154, + "learning_rate": 4.491630444796479e-06, + "loss": 0.36606886982917786, + "memory(GiB)": 66.66, + "step": 2309, + "token_acc": 0.8374259102455546, + "train_speed(iter/s)": 0.241946 + }, + { + "epoch": 0.7392, + "grad_norm": 0.6540137538945113, + "learning_rate": 4.49109728193018e-06, + "loss": 0.42932531237602234, + "memory(GiB)": 66.66, + "step": 2310, + "token_acc": 0.8246044127479385, + "train_speed(iter/s)": 0.241943 + }, + { + "epoch": 0.73952, + "grad_norm": 0.6094189878686871, + "learning_rate": 4.490563871306364e-06, + "loss": 0.3632028102874756, + "memory(GiB)": 66.66, + "step": 2311, + "token_acc": 0.8536962573461182, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.73984, + "grad_norm": 0.6952521972382109, + "learning_rate": 4.490030212991406e-06, + "loss": 0.3287809193134308, + "memory(GiB)": 66.66, + "step": 2312, + "token_acc": 0.9243547908632453, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.74016, + "grad_norm": 0.6462672730167507, + "learning_rate": 4.48949630705171e-06, + "loss": 0.4144185781478882, + "memory(GiB)": 66.66, + "step": 2313, + "token_acc": 0.9074980675083741, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.74048, + "grad_norm": 0.7094705802618271, + "learning_rate": 4.48896215355371e-06, + "loss": 0.4711051881313324, + "memory(GiB)": 66.66, + "step": 2314, + "token_acc": 0.932396839332748, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.7408, + "grad_norm": 0.6354199644908963, + "learning_rate": 4.488427752563874e-06, + "loss": 0.29967373609542847, + "memory(GiB)": 66.66, + "step": 2315, + "token_acc": 0.8795027904616946, + "train_speed(iter/s)": 0.241949 + }, + { + "epoch": 0.74112, + "grad_norm": 0.6371154071258502, + "learning_rate": 4.4878931041486986e-06, + "loss": 0.403756320476532, + "memory(GiB)": 66.66, + "step": 2316, + "token_acc": 0.9024209486166008, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.74144, + "grad_norm": 0.593103792010903, + "learning_rate": 4.48735820837471e-06, + "loss": 0.3554255962371826, + "memory(GiB)": 66.66, + "step": 2317, + "token_acc": 0.9510117145899893, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.74176, + "grad_norm": 0.6223201970688879, + "learning_rate": 4.486823065308469e-06, + "loss": 0.4249178469181061, + "memory(GiB)": 66.66, + "step": 2318, + "token_acc": 0.8324246311738294, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.74208, + "grad_norm": 0.6121361734555318, + "learning_rate": 4.486287675016564e-06, + "loss": 0.35477808117866516, + "memory(GiB)": 66.66, + "step": 2319, + "token_acc": 0.9508617528419508, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.7424, + "grad_norm": 0.6226286444129349, + "learning_rate": 4.485752037565614e-06, + "loss": 0.40045222640037537, + "memory(GiB)": 66.66, + "step": 2320, + "token_acc": 0.8709433962264151, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.74272, + "grad_norm": 0.712263905762989, + "learning_rate": 4.485216153022271e-06, + "loss": 0.4376879334449768, + "memory(GiB)": 66.66, + "step": 2321, + "token_acc": 0.9466800804828974, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.74304, + "grad_norm": 0.6457005060128559, + "learning_rate": 4.484680021453216e-06, + "loss": 0.37418413162231445, + "memory(GiB)": 66.66, + "step": 2322, + "token_acc": 0.8788697788697789, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.74336, + "grad_norm": 0.6281378868580517, + "learning_rate": 4.484143642925161e-06, + "loss": 0.3504526615142822, + "memory(GiB)": 66.66, + "step": 2323, + "token_acc": 0.905685618729097, + "train_speed(iter/s)": 0.241946 + }, + { + "epoch": 0.74368, + "grad_norm": 0.6792051230976313, + "learning_rate": 4.4836070175048495e-06, + "loss": 0.33095717430114746, + "memory(GiB)": 66.66, + "step": 2324, + "token_acc": 0.956140350877193, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.744, + "grad_norm": 0.6026855001584274, + "learning_rate": 4.483070145259056e-06, + "loss": 0.33868086338043213, + "memory(GiB)": 66.66, + "step": 2325, + "token_acc": 0.9240180296200902, + "train_speed(iter/s)": 0.241953 + }, + { + "epoch": 0.74432, + "grad_norm": 0.5746134755317546, + "learning_rate": 4.482533026254583e-06, + "loss": 0.38976407051086426, + "memory(GiB)": 66.66, + "step": 2326, + "token_acc": 0.891629711751663, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.74464, + "grad_norm": 0.7149689483250419, + "learning_rate": 4.481995660558267e-06, + "loss": 0.3659779727458954, + "memory(GiB)": 66.66, + "step": 2327, + "token_acc": 0.8328434254360181, + "train_speed(iter/s)": 0.241949 + }, + { + "epoch": 0.74496, + "grad_norm": 0.6748384520166442, + "learning_rate": 4.481458048236974e-06, + "loss": 0.4559290111064911, + "memory(GiB)": 66.66, + "step": 2328, + "token_acc": 0.8321623731459797, + "train_speed(iter/s)": 0.241953 + }, + { + "epoch": 0.74528, + "grad_norm": 0.5483432394179815, + "learning_rate": 4.4809201893575995e-06, + "loss": 0.30518805980682373, + "memory(GiB)": 66.66, + "step": 2329, + "token_acc": 0.9006228765571914, + "train_speed(iter/s)": 0.241952 + }, + { + "epoch": 0.7456, + "grad_norm": 0.5930845581760376, + "learning_rate": 4.480382083987072e-06, + "loss": 0.3804892301559448, + "memory(GiB)": 66.66, + "step": 2330, + "token_acc": 0.8834080717488789, + "train_speed(iter/s)": 0.241954 + }, + { + "epoch": 0.74592, + "grad_norm": 0.6399340726087777, + "learning_rate": 4.479843732192347e-06, + "loss": 0.3875330686569214, + "memory(GiB)": 66.66, + "step": 2331, + "token_acc": 0.8787577639751553, + "train_speed(iter/s)": 0.241953 + }, + { + "epoch": 0.74624, + "grad_norm": 0.6335713945665106, + "learning_rate": 4.479305134040416e-06, + "loss": 0.45809289813041687, + "memory(GiB)": 66.66, + "step": 2332, + "token_acc": 0.8377241531990259, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.74656, + "grad_norm": 0.6214659086099811, + "learning_rate": 4.478766289598296e-06, + "loss": 0.3606780469417572, + "memory(GiB)": 66.66, + "step": 2333, + "token_acc": 0.8967190388170055, + "train_speed(iter/s)": 0.241946 + }, + { + "epoch": 0.74688, + "grad_norm": 0.680773954395919, + "learning_rate": 4.478227198933039e-06, + "loss": 0.4865780472755432, + "memory(GiB)": 66.66, + "step": 2334, + "token_acc": 0.8887905604719764, + "train_speed(iter/s)": 0.241941 + }, + { + "epoch": 0.7472, + "grad_norm": 0.6560039695820196, + "learning_rate": 4.477687862111723e-06, + "loss": 0.3204301595687866, + "memory(GiB)": 66.66, + "step": 2335, + "token_acc": 0.8862016293279023, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.74752, + "grad_norm": 0.6405716017529522, + "learning_rate": 4.477148279201461e-06, + "loss": 0.3768501877784729, + "memory(GiB)": 66.66, + "step": 2336, + "token_acc": 0.8683274021352313, + "train_speed(iter/s)": 0.241949 + }, + { + "epoch": 0.74784, + "grad_norm": 0.6285231514044943, + "learning_rate": 4.476608450269394e-06, + "loss": 0.3634309768676758, + "memory(GiB)": 66.66, + "step": 2337, + "token_acc": 0.9344351124356542, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.74816, + "grad_norm": 0.6414782490536218, + "learning_rate": 4.476068375382694e-06, + "loss": 0.3834984302520752, + "memory(GiB)": 66.66, + "step": 2338, + "token_acc": 0.8794722151738138, + "train_speed(iter/s)": 0.241952 + }, + { + "epoch": 0.74848, + "grad_norm": 0.6894263348224916, + "learning_rate": 4.475528054608565e-06, + "loss": 0.3303259611129761, + "memory(GiB)": 66.66, + "step": 2339, + "token_acc": 0.9247787610619469, + "train_speed(iter/s)": 0.241953 + }, + { + "epoch": 0.7488, + "grad_norm": 0.635191612875476, + "learning_rate": 4.474987488014239e-06, + "loss": 0.390461802482605, + "memory(GiB)": 66.66, + "step": 2340, + "token_acc": 0.922077922077922, + "train_speed(iter/s)": 0.241955 + }, + { + "epoch": 0.74912, + "grad_norm": 0.6064054548830631, + "learning_rate": 4.4744466756669824e-06, + "loss": 0.3724749684333801, + "memory(GiB)": 66.66, + "step": 2341, + "token_acc": 0.8688563893550707, + "train_speed(iter/s)": 0.241953 + }, + { + "epoch": 0.74944, + "grad_norm": 0.7009465565574101, + "learning_rate": 4.473905617634088e-06, + "loss": 0.3923909068107605, + "memory(GiB)": 66.66, + "step": 2342, + "token_acc": 0.9392405063291139, + "train_speed(iter/s)": 0.241951 + }, + { + "epoch": 0.74976, + "grad_norm": 0.6916753611984967, + "learning_rate": 4.473364313982881e-06, + "loss": 0.39365869760513306, + "memory(GiB)": 66.66, + "step": 2343, + "token_acc": 0.879462707670555, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.75008, + "grad_norm": 0.6030189086520813, + "learning_rate": 4.4728227647807185e-06, + "loss": 0.3376174867153168, + "memory(GiB)": 66.66, + "step": 2344, + "token_acc": 0.9644766997708174, + "train_speed(iter/s)": 0.241949 + }, + { + "epoch": 0.7504, + "grad_norm": 0.6495039878365281, + "learning_rate": 4.472280970094985e-06, + "loss": 0.4301387667655945, + "memory(GiB)": 66.66, + "step": 2345, + "token_acc": 0.8786379511059371, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.75072, + "grad_norm": 0.6355959022975082, + "learning_rate": 4.4717389299931e-06, + "loss": 0.358798623085022, + "memory(GiB)": 66.66, + "step": 2346, + "token_acc": 0.8944790739091718, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.75104, + "grad_norm": 0.6755459197902476, + "learning_rate": 4.47119664454251e-06, + "loss": 0.4416292905807495, + "memory(GiB)": 66.66, + "step": 2347, + "token_acc": 0.9098730028676771, + "train_speed(iter/s)": 0.241941 + }, + { + "epoch": 0.75136, + "grad_norm": 0.6061523625573861, + "learning_rate": 4.470654113810692e-06, + "loss": 0.32143115997314453, + "memory(GiB)": 66.66, + "step": 2348, + "token_acc": 0.9414860681114551, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.75168, + "grad_norm": 0.634748386916178, + "learning_rate": 4.470111337865156e-06, + "loss": 0.38649874925613403, + "memory(GiB)": 66.66, + "step": 2349, + "token_acc": 0.7951541850220264, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.752, + "grad_norm": 0.5913737919526105, + "learning_rate": 4.4695683167734395e-06, + "loss": 0.33932751417160034, + "memory(GiB)": 66.66, + "step": 2350, + "token_acc": 0.9495356037151703, + "train_speed(iter/s)": 0.241949 + }, + { + "epoch": 0.75232, + "grad_norm": 0.670226427901379, + "learning_rate": 4.469025050603113e-06, + "loss": 0.4333738684654236, + "memory(GiB)": 66.66, + "step": 2351, + "token_acc": 0.8521023228462217, + "train_speed(iter/s)": 0.241953 + }, + { + "epoch": 0.75264, + "grad_norm": 0.6265641518489752, + "learning_rate": 4.468481539421776e-06, + "loss": 0.4289180636405945, + "memory(GiB)": 66.66, + "step": 2352, + "token_acc": 0.9148174659985684, + "train_speed(iter/s)": 0.241951 + }, + { + "epoch": 0.75296, + "grad_norm": 0.7199508641683001, + "learning_rate": 4.467937783297061e-06, + "loss": 0.48492032289505005, + "memory(GiB)": 66.66, + "step": 2353, + "token_acc": 0.8427876823338736, + "train_speed(iter/s)": 0.241952 + }, + { + "epoch": 0.75328, + "grad_norm": 0.6218936083719754, + "learning_rate": 4.467393782296626e-06, + "loss": 0.3720551133155823, + "memory(GiB)": 66.66, + "step": 2354, + "token_acc": 0.8713108215900026, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.7536, + "grad_norm": 0.5332356916327654, + "learning_rate": 4.466849536488165e-06, + "loss": 0.32571443915367126, + "memory(GiB)": 66.66, + "step": 2355, + "token_acc": 0.9077840112201964, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.75392, + "grad_norm": 0.6468960914825984, + "learning_rate": 4.466305045939399e-06, + "loss": 0.38340628147125244, + "memory(GiB)": 66.66, + "step": 2356, + "token_acc": 0.8319484835895306, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.75424, + "grad_norm": 0.5584550126701309, + "learning_rate": 4.465760310718079e-06, + "loss": 0.3343712091445923, + "memory(GiB)": 66.66, + "step": 2357, + "token_acc": 0.8621544327931363, + "train_speed(iter/s)": 0.241933 + }, + { + "epoch": 0.75456, + "grad_norm": 0.5916254069730643, + "learning_rate": 4.46521533089199e-06, + "loss": 0.3333103060722351, + "memory(GiB)": 66.66, + "step": 2358, + "token_acc": 0.9249920204277051, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.75488, + "grad_norm": 0.6326573137592801, + "learning_rate": 4.4646701065289445e-06, + "loss": 0.4118788540363312, + "memory(GiB)": 66.66, + "step": 2359, + "token_acc": 0.8828158230540196, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.7552, + "grad_norm": 0.6398994353056883, + "learning_rate": 4.464124637696786e-06, + "loss": 0.3634151220321655, + "memory(GiB)": 66.66, + "step": 2360, + "token_acc": 0.8935171604576122, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.75552, + "grad_norm": 0.6394936892342552, + "learning_rate": 4.463578924463389e-06, + "loss": 0.3712594509124756, + "memory(GiB)": 66.66, + "step": 2361, + "token_acc": 0.8448810754912099, + "train_speed(iter/s)": 0.24193 + }, + { + "epoch": 0.75584, + "grad_norm": 0.6148022706281038, + "learning_rate": 4.463032966896658e-06, + "loss": 0.449304461479187, + "memory(GiB)": 66.66, + "step": 2362, + "token_acc": 0.8116803278688525, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.75616, + "grad_norm": 0.6424550934997812, + "learning_rate": 4.462486765064527e-06, + "loss": 0.4176827073097229, + "memory(GiB)": 66.66, + "step": 2363, + "token_acc": 0.903405572755418, + "train_speed(iter/s)": 0.241927 + }, + { + "epoch": 0.75648, + "grad_norm": 0.654239747341214, + "learning_rate": 4.461940319034963e-06, + "loss": 0.3585636615753174, + "memory(GiB)": 66.66, + "step": 2364, + "token_acc": 0.8986719134284309, + "train_speed(iter/s)": 0.241931 + }, + { + "epoch": 0.7568, + "grad_norm": 0.6672188383817046, + "learning_rate": 4.46139362887596e-06, + "loss": 0.39768439531326294, + "memory(GiB)": 66.66, + "step": 2365, + "token_acc": 0.8346084608460846, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.75712, + "grad_norm": 0.588642927109659, + "learning_rate": 4.460846694655546e-06, + "loss": 0.3226599395275116, + "memory(GiB)": 66.66, + "step": 2366, + "token_acc": 0.9186130508066458, + "train_speed(iter/s)": 0.241916 + }, + { + "epoch": 0.75744, + "grad_norm": 0.6400356135145746, + "learning_rate": 4.460299516441777e-06, + "loss": 0.33131885528564453, + "memory(GiB)": 66.66, + "step": 2367, + "token_acc": 0.8784103114930183, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.75776, + "grad_norm": 0.6329167250033393, + "learning_rate": 4.459752094302738e-06, + "loss": 0.41495102643966675, + "memory(GiB)": 66.66, + "step": 2368, + "token_acc": 0.8871119473189087, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.75808, + "grad_norm": 0.6965926854565442, + "learning_rate": 4.459204428306547e-06, + "loss": 0.31331944465637207, + "memory(GiB)": 66.66, + "step": 2369, + "token_acc": 0.9454478437154441, + "train_speed(iter/s)": 0.241917 + }, + { + "epoch": 0.7584, + "grad_norm": 0.6401446252270176, + "learning_rate": 4.458656518521354e-06, + "loss": 0.3585125803947449, + "memory(GiB)": 66.66, + "step": 2370, + "token_acc": 0.8936912751677852, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.75872, + "grad_norm": 0.569250016843301, + "learning_rate": 4.458108365015333e-06, + "loss": 0.3623235821723938, + "memory(GiB)": 66.66, + "step": 2371, + "token_acc": 0.889920680982782, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.75904, + "grad_norm": 1.0168437021129164, + "learning_rate": 4.457559967856695e-06, + "loss": 0.38215717673301697, + "memory(GiB)": 66.66, + "step": 2372, + "token_acc": 0.8880338591766064, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.75936, + "grad_norm": 0.6663294577465615, + "learning_rate": 4.457011327113677e-06, + "loss": 0.3759962320327759, + "memory(GiB)": 66.66, + "step": 2373, + "token_acc": 0.8277344799785004, + "train_speed(iter/s)": 0.241916 + }, + { + "epoch": 0.75968, + "grad_norm": 0.6668961025715663, + "learning_rate": 4.456462442854549e-06, + "loss": 0.4026271104812622, + "memory(GiB)": 66.66, + "step": 2374, + "token_acc": 0.8301096067053514, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.76, + "grad_norm": 0.5816170036277196, + "learning_rate": 4.45591331514761e-06, + "loss": 0.34961944818496704, + "memory(GiB)": 66.66, + "step": 2375, + "token_acc": 0.8588337182448037, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.76032, + "grad_norm": 0.6684522701433409, + "learning_rate": 4.455363944061187e-06, + "loss": 0.2876832187175751, + "memory(GiB)": 66.66, + "step": 2376, + "token_acc": 0.9487892020643113, + "train_speed(iter/s)": 0.24191 + }, + { + "epoch": 0.76064, + "grad_norm": 0.5942680956011711, + "learning_rate": 4.4548143296636434e-06, + "loss": 0.34215617179870605, + "memory(GiB)": 66.66, + "step": 2377, + "token_acc": 0.9219318557724115, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.76096, + "grad_norm": 0.655377408208955, + "learning_rate": 4.454264472023368e-06, + "loss": 0.36981484293937683, + "memory(GiB)": 66.66, + "step": 2378, + "token_acc": 0.8482245131729668, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.76128, + "grad_norm": 0.6279125313589858, + "learning_rate": 4.453714371208778e-06, + "loss": 0.35414987802505493, + "memory(GiB)": 66.66, + "step": 2379, + "token_acc": 0.9346201743462017, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.7616, + "grad_norm": 0.6641840186443339, + "learning_rate": 4.453164027288328e-06, + "loss": 0.3707915246486664, + "memory(GiB)": 66.66, + "step": 2380, + "token_acc": 0.9022835900159321, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.76192, + "grad_norm": 0.6106893192711781, + "learning_rate": 4.452613440330497e-06, + "loss": 0.2785664200782776, + "memory(GiB)": 66.66, + "step": 2381, + "token_acc": 0.9349247874427731, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.76224, + "grad_norm": 0.6605227207618364, + "learning_rate": 4.4520626104037965e-06, + "loss": 0.3450506329536438, + "memory(GiB)": 66.66, + "step": 2382, + "token_acc": 0.8849804578447794, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.76256, + "grad_norm": 0.6160414434706935, + "learning_rate": 4.451511537576767e-06, + "loss": 0.36785876750946045, + "memory(GiB)": 66.66, + "step": 2383, + "token_acc": 0.9352876106194691, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.76288, + "grad_norm": 0.7355721319947203, + "learning_rate": 4.45096022191798e-06, + "loss": 0.2821945548057556, + "memory(GiB)": 66.66, + "step": 2384, + "token_acc": 0.9374358974358974, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.7632, + "grad_norm": 0.6368267778275447, + "learning_rate": 4.450408663496037e-06, + "loss": 0.4065840244293213, + "memory(GiB)": 66.66, + "step": 2385, + "token_acc": 0.8313756199425737, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.76352, + "grad_norm": 0.6636007935697926, + "learning_rate": 4.449856862379572e-06, + "loss": 0.3584074079990387, + "memory(GiB)": 66.66, + "step": 2386, + "token_acc": 0.8932835820895523, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.76384, + "grad_norm": 0.6140475304208066, + "learning_rate": 4.449304818637244e-06, + "loss": 0.30375152826309204, + "memory(GiB)": 66.66, + "step": 2387, + "token_acc": 0.8273604410751206, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.76416, + "grad_norm": 0.6252604719343278, + "learning_rate": 4.4487525323377474e-06, + "loss": 0.38926592469215393, + "memory(GiB)": 66.66, + "step": 2388, + "token_acc": 0.9069206008583691, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.76448, + "grad_norm": 0.6561494723080781, + "learning_rate": 4.448200003549804e-06, + "loss": 0.346186101436615, + "memory(GiB)": 66.66, + "step": 2389, + "token_acc": 0.8901947615849564, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.7648, + "grad_norm": 0.6254038985390367, + "learning_rate": 4.447647232342166e-06, + "loss": 0.35254916548728943, + "memory(GiB)": 66.66, + "step": 2390, + "token_acc": 0.9183908045977012, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.76512, + "grad_norm": 0.6359204617813196, + "learning_rate": 4.4470942187836174e-06, + "loss": 0.3178269565105438, + "memory(GiB)": 66.66, + "step": 2391, + "token_acc": 0.9617368873602752, + "train_speed(iter/s)": 0.241931 + }, + { + "epoch": 0.76544, + "grad_norm": 0.5858680728079038, + "learning_rate": 4.446540962942969e-06, + "loss": 0.36808812618255615, + "memory(GiB)": 66.66, + "step": 2392, + "token_acc": 0.8820335636722606, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.76576, + "grad_norm": 0.5921375590803991, + "learning_rate": 4.445987464889067e-06, + "loss": 0.4470038414001465, + "memory(GiB)": 66.66, + "step": 2393, + "token_acc": 0.8929970617042116, + "train_speed(iter/s)": 0.241924 + }, + { + "epoch": 0.76608, + "grad_norm": 0.6877599256450047, + "learning_rate": 4.4454337246907805e-06, + "loss": 0.4446695148944855, + "memory(GiB)": 66.66, + "step": 2394, + "token_acc": 0.8609254498714652, + "train_speed(iter/s)": 0.241928 + }, + { + "epoch": 0.7664, + "grad_norm": 0.6785866184077451, + "learning_rate": 4.444879742417016e-06, + "loss": 0.3870832920074463, + "memory(GiB)": 66.66, + "step": 2395, + "token_acc": 0.8716861081654295, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.76672, + "grad_norm": 0.6460029874640297, + "learning_rate": 4.444325518136707e-06, + "loss": 0.40786170959472656, + "memory(GiB)": 66.66, + "step": 2396, + "token_acc": 0.9067321178120618, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.76704, + "grad_norm": 0.6358768321994196, + "learning_rate": 4.443771051918816e-06, + "loss": 0.38380032777786255, + "memory(GiB)": 66.66, + "step": 2397, + "token_acc": 0.9150406504065041, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.76736, + "grad_norm": 0.6410019956744821, + "learning_rate": 4.4432163438323375e-06, + "loss": 0.35993334650993347, + "memory(GiB)": 66.66, + "step": 2398, + "token_acc": 0.854296066252588, + "train_speed(iter/s)": 0.24193 + }, + { + "epoch": 0.76768, + "grad_norm": 0.604999576485535, + "learning_rate": 4.442661393946294e-06, + "loss": 0.35407179594039917, + "memory(GiB)": 66.66, + "step": 2399, + "token_acc": 0.8823333333333333, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.768, + "grad_norm": 0.627441726256792, + "learning_rate": 4.44210620232974e-06, + "loss": 0.355832040309906, + "memory(GiB)": 66.66, + "step": 2400, + "token_acc": 0.901675702316412, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.76832, + "grad_norm": 0.5889488446277545, + "learning_rate": 4.441550769051759e-06, + "loss": 0.32824474573135376, + "memory(GiB)": 66.66, + "step": 2401, + "token_acc": 0.8780093424362199, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.76864, + "grad_norm": 0.6039704188875132, + "learning_rate": 4.440995094181467e-06, + "loss": 0.3529846966266632, + "memory(GiB)": 66.66, + "step": 2402, + "token_acc": 0.8984951313071703, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.76896, + "grad_norm": 0.5975561730286117, + "learning_rate": 4.440439177788007e-06, + "loss": 0.3385690450668335, + "memory(GiB)": 66.66, + "step": 2403, + "token_acc": 0.9403497813866334, + "train_speed(iter/s)": 0.241928 + }, + { + "epoch": 0.76928, + "grad_norm": 0.6889358280719341, + "learning_rate": 4.439883019940552e-06, + "loss": 0.38117602467536926, + "memory(GiB)": 66.66, + "step": 2404, + "token_acc": 0.9054126473740621, + "train_speed(iter/s)": 0.241927 + }, + { + "epoch": 0.7696, + "grad_norm": 0.6585355327158497, + "learning_rate": 4.439326620708307e-06, + "loss": 0.3493140637874603, + "memory(GiB)": 66.66, + "step": 2405, + "token_acc": 0.9129239230064161, + "train_speed(iter/s)": 0.241932 + }, + { + "epoch": 0.76992, + "grad_norm": 0.7019386033132493, + "learning_rate": 4.4387699801605065e-06, + "loss": 0.408677875995636, + "memory(GiB)": 66.66, + "step": 2406, + "token_acc": 0.9127533366287691, + "train_speed(iter/s)": 0.241933 + }, + { + "epoch": 0.77024, + "grad_norm": 0.6321225453379364, + "learning_rate": 4.4382130983664154e-06, + "loss": 0.3209949731826782, + "memory(GiB)": 66.66, + "step": 2407, + "token_acc": 0.9129865881583251, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.77056, + "grad_norm": 0.5823335524190284, + "learning_rate": 4.437655975395326e-06, + "loss": 0.37358659505844116, + "memory(GiB)": 66.66, + "step": 2408, + "token_acc": 0.9409474367293965, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.77088, + "grad_norm": 0.6104147883903075, + "learning_rate": 4.4370986113165646e-06, + "loss": 0.3822305202484131, + "memory(GiB)": 66.66, + "step": 2409, + "token_acc": 0.8663721700717836, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.7712, + "grad_norm": 0.6087666798948553, + "learning_rate": 4.436541006199484e-06, + "loss": 0.3740164041519165, + "memory(GiB)": 66.66, + "step": 2410, + "token_acc": 0.882224048205971, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.77152, + "grad_norm": 0.6150956027202327, + "learning_rate": 4.43598316011347e-06, + "loss": 0.3540389835834503, + "memory(GiB)": 66.66, + "step": 2411, + "token_acc": 0.8731019522776573, + "train_speed(iter/s)": 0.24194 + }, + { + "epoch": 0.77184, + "grad_norm": 0.6408167827701482, + "learning_rate": 4.4354250731279356e-06, + "loss": 0.4032268524169922, + "memory(GiB)": 66.66, + "step": 2412, + "token_acc": 0.8300486900069557, + "train_speed(iter/s)": 0.241943 + }, + { + "epoch": 0.77216, + "grad_norm": 0.6640627279439061, + "learning_rate": 4.434866745312325e-06, + "loss": 0.30428797006607056, + "memory(GiB)": 66.66, + "step": 2413, + "token_acc": 0.945758435824661, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.77248, + "grad_norm": 0.6324455670771332, + "learning_rate": 4.434308176736113e-06, + "loss": 0.4316268861293793, + "memory(GiB)": 66.66, + "step": 2414, + "token_acc": 0.8832258064516129, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.7728, + "grad_norm": 0.6704946295503776, + "learning_rate": 4.433749367468804e-06, + "loss": 0.37814533710479736, + "memory(GiB)": 66.66, + "step": 2415, + "token_acc": 0.8746074472857783, + "train_speed(iter/s)": 0.241943 + }, + { + "epoch": 0.77312, + "grad_norm": 0.6225631178685314, + "learning_rate": 4.433190317579932e-06, + "loss": 0.3984758257865906, + "memory(GiB)": 66.66, + "step": 2416, + "token_acc": 0.9329182566780478, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.77344, + "grad_norm": 0.5681807281140846, + "learning_rate": 4.4326310271390605e-06, + "loss": 0.42029252648353577, + "memory(GiB)": 66.66, + "step": 2417, + "token_acc": 0.9049071955250445, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.77376, + "grad_norm": 0.7041543740422399, + "learning_rate": 4.432071496215784e-06, + "loss": 0.39477357268333435, + "memory(GiB)": 66.66, + "step": 2418, + "token_acc": 0.8971693940734189, + "train_speed(iter/s)": 0.24194 + }, + { + "epoch": 0.77408, + "grad_norm": 0.624489050535428, + "learning_rate": 4.4315117248797255e-06, + "loss": 0.3743709325790405, + "memory(GiB)": 66.66, + "step": 2419, + "token_acc": 0.8507638072855465, + "train_speed(iter/s)": 0.24194 + }, + { + "epoch": 0.7744, + "grad_norm": 0.7053794595145426, + "learning_rate": 4.430951713200539e-06, + "loss": 0.4187149703502655, + "memory(GiB)": 66.66, + "step": 2420, + "token_acc": 0.8289855072463768, + "train_speed(iter/s)": 0.24194 + }, + { + "epoch": 0.77472, + "grad_norm": 0.6237447041667222, + "learning_rate": 4.430391461247911e-06, + "loss": 0.41170695424079895, + "memory(GiB)": 66.66, + "step": 2421, + "token_acc": 0.8657492354740061, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.77504, + "grad_norm": 0.6913800748720564, + "learning_rate": 4.429830969091552e-06, + "loss": 0.44134050607681274, + "memory(GiB)": 66.66, + "step": 2422, + "token_acc": 0.9377016629436585, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.77536, + "grad_norm": 0.6200570928170335, + "learning_rate": 4.429270236801206e-06, + "loss": 0.38567712903022766, + "memory(GiB)": 66.66, + "step": 2423, + "token_acc": 0.8720826652414039, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.77568, + "grad_norm": 0.590072394099874, + "learning_rate": 4.428709264446647e-06, + "loss": 0.32350024580955505, + "memory(GiB)": 66.66, + "step": 2424, + "token_acc": 0.9531368102796675, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.776, + "grad_norm": 0.707305715621205, + "learning_rate": 4.428148052097678e-06, + "loss": 0.4459986686706543, + "memory(GiB)": 66.66, + "step": 2425, + "token_acc": 0.8842975206611571, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.77632, + "grad_norm": 0.6302753684763989, + "learning_rate": 4.427586599824133e-06, + "loss": 0.4107821583747864, + "memory(GiB)": 66.66, + "step": 2426, + "token_acc": 0.8773960216998191, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.77664, + "grad_norm": 0.608628244796495, + "learning_rate": 4.427024907695874e-06, + "loss": 0.35817912220954895, + "memory(GiB)": 66.66, + "step": 2427, + "token_acc": 0.8911278195488722, + "train_speed(iter/s)": 0.241943 + }, + { + "epoch": 0.77696, + "grad_norm": 0.8630618426587866, + "learning_rate": 4.426462975782794e-06, + "loss": 0.37963247299194336, + "memory(GiB)": 66.66, + "step": 2428, + "token_acc": 0.8480360592401803, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.77728, + "grad_norm": 0.6556206649603785, + "learning_rate": 4.425900804154816e-06, + "loss": 0.3806759715080261, + "memory(GiB)": 66.66, + "step": 2429, + "token_acc": 0.8997524752475248, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.7776, + "grad_norm": 0.579044496518352, + "learning_rate": 4.425338392881892e-06, + "loss": 0.27928709983825684, + "memory(GiB)": 66.66, + "step": 2430, + "token_acc": 0.9254772393538914, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.77792, + "grad_norm": 0.6648481269374116, + "learning_rate": 4.424775742034004e-06, + "loss": 0.4078843891620636, + "memory(GiB)": 66.66, + "step": 2431, + "token_acc": 0.918918918918919, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.77824, + "grad_norm": 0.6684451371430472, + "learning_rate": 4.424212851681165e-06, + "loss": 0.4300846457481384, + "memory(GiB)": 66.66, + "step": 2432, + "token_acc": 0.9432478632478632, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.77856, + "grad_norm": 0.633514600268109, + "learning_rate": 4.423649721893418e-06, + "loss": 0.36955833435058594, + "memory(GiB)": 66.66, + "step": 2433, + "token_acc": 0.9215344376634699, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.77888, + "grad_norm": 0.7232745254841567, + "learning_rate": 4.4230863527408325e-06, + "loss": 0.43967604637145996, + "memory(GiB)": 66.66, + "step": 2434, + "token_acc": 0.9215181730459955, + "train_speed(iter/s)": 0.241946 + }, + { + "epoch": 0.7792, + "grad_norm": 0.632238596723006, + "learning_rate": 4.422522744293511e-06, + "loss": 0.4224435091018677, + "memory(GiB)": 66.66, + "step": 2435, + "token_acc": 0.9238820171265462, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.77952, + "grad_norm": 0.6612895401772909, + "learning_rate": 4.421958896621585e-06, + "loss": 0.3553462326526642, + "memory(GiB)": 66.66, + "step": 2436, + "token_acc": 0.9375410913872453, + "train_speed(iter/s)": 0.241951 + }, + { + "epoch": 0.77984, + "grad_norm": 0.65420206171841, + "learning_rate": 4.4213948097952155e-06, + "loss": 0.3237501084804535, + "memory(GiB)": 66.66, + "step": 2437, + "token_acc": 0.8771156967308138, + "train_speed(iter/s)": 0.241951 + }, + { + "epoch": 0.78016, + "grad_norm": 0.6372225825618159, + "learning_rate": 4.420830483884592e-06, + "loss": 0.40747398138046265, + "memory(GiB)": 66.66, + "step": 2438, + "token_acc": 0.8806060606060606, + "train_speed(iter/s)": 0.241955 + }, + { + "epoch": 0.78048, + "grad_norm": 0.6143216654616832, + "learning_rate": 4.4202659189599374e-06, + "loss": 0.4205804765224457, + "memory(GiB)": 66.66, + "step": 2439, + "token_acc": 0.8983928084990466, + "train_speed(iter/s)": 0.241953 + }, + { + "epoch": 0.7808, + "grad_norm": 0.6538806470026362, + "learning_rate": 4.4197011150915e-06, + "loss": 0.3343961834907532, + "memory(GiB)": 66.66, + "step": 2440, + "token_acc": 0.8819291574944967, + "train_speed(iter/s)": 0.241958 + }, + { + "epoch": 0.78112, + "grad_norm": 0.6551529923606069, + "learning_rate": 4.419136072349561e-06, + "loss": 0.341858446598053, + "memory(GiB)": 66.66, + "step": 2441, + "token_acc": 0.8600292825768667, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.78144, + "grad_norm": 0.6309186849394915, + "learning_rate": 4.41857079080443e-06, + "loss": 0.4239308536052704, + "memory(GiB)": 66.66, + "step": 2442, + "token_acc": 0.8781790437436419, + "train_speed(iter/s)": 0.241966 + }, + { + "epoch": 0.78176, + "grad_norm": 0.6303957850396712, + "learning_rate": 4.418005270526447e-06, + "loss": 0.33065682649612427, + "memory(GiB)": 66.66, + "step": 2443, + "token_acc": 0.9249221183800623, + "train_speed(iter/s)": 0.241966 + }, + { + "epoch": 0.78208, + "grad_norm": 0.6455437624182903, + "learning_rate": 4.417439511585979e-06, + "loss": 0.2436257153749466, + "memory(GiB)": 66.66, + "step": 2444, + "token_acc": 0.9231702385766276, + "train_speed(iter/s)": 0.241968 + }, + { + "epoch": 0.7824, + "grad_norm": 0.63566082740106, + "learning_rate": 4.416873514053428e-06, + "loss": 0.4177039861679077, + "memory(GiB)": 66.66, + "step": 2445, + "token_acc": 0.8275065010112684, + "train_speed(iter/s)": 0.241972 + }, + { + "epoch": 0.78272, + "grad_norm": 0.6394907038358684, + "learning_rate": 4.41630727799922e-06, + "loss": 0.3092973828315735, + "memory(GiB)": 66.66, + "step": 2446, + "token_acc": 0.9447724904480722, + "train_speed(iter/s)": 0.241969 + }, + { + "epoch": 0.78304, + "grad_norm": 0.5721172464598521, + "learning_rate": 4.415740803493814e-06, + "loss": 0.30088767409324646, + "memory(GiB)": 66.66, + "step": 2447, + "token_acc": 0.9015047879616963, + "train_speed(iter/s)": 0.241961 + }, + { + "epoch": 0.78336, + "grad_norm": 0.6367205196540078, + "learning_rate": 4.415174090607698e-06, + "loss": 0.34827691316604614, + "memory(GiB)": 66.66, + "step": 2448, + "token_acc": 0.8841084307386222, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.78368, + "grad_norm": 0.6434604003029074, + "learning_rate": 4.414607139411391e-06, + "loss": 0.40259599685668945, + "memory(GiB)": 66.66, + "step": 2449, + "token_acc": 0.8793565683646113, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.784, + "grad_norm": 0.6599013383912969, + "learning_rate": 4.414039949975438e-06, + "loss": 0.3869887888431549, + "memory(GiB)": 66.66, + "step": 2450, + "token_acc": 0.8903861103912931, + "train_speed(iter/s)": 0.241961 + }, + { + "epoch": 0.78432, + "grad_norm": 0.6989789382712776, + "learning_rate": 4.413472522370417e-06, + "loss": 0.4334059953689575, + "memory(GiB)": 66.66, + "step": 2451, + "token_acc": 0.879840196681008, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.78464, + "grad_norm": 0.6039805943077836, + "learning_rate": 4.412904856666934e-06, + "loss": 0.3679129481315613, + "memory(GiB)": 66.66, + "step": 2452, + "token_acc": 0.9575230296827022, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.78496, + "grad_norm": 0.6088589018845971, + "learning_rate": 4.412336952935626e-06, + "loss": 0.34608060121536255, + "memory(GiB)": 66.66, + "step": 2453, + "token_acc": 0.9014450867052023, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.78528, + "grad_norm": 0.6893651144045911, + "learning_rate": 4.411768811247158e-06, + "loss": 0.4084704518318176, + "memory(GiB)": 66.66, + "step": 2454, + "token_acc": 0.9029358897543439, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.7856, + "grad_norm": 0.6601074453626112, + "learning_rate": 4.411200431672226e-06, + "loss": 0.3432855010032654, + "memory(GiB)": 66.66, + "step": 2455, + "token_acc": 0.9104915627292737, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.78592, + "grad_norm": 0.6870400265935088, + "learning_rate": 4.410631814281555e-06, + "loss": 0.4125280976295471, + "memory(GiB)": 66.66, + "step": 2456, + "token_acc": 0.8805284319356692, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.78624, + "grad_norm": 0.5658964216274966, + "learning_rate": 4.410062959145898e-06, + "loss": 0.3347938656806946, + "memory(GiB)": 66.66, + "step": 2457, + "token_acc": 0.8389203308663474, + "train_speed(iter/s)": 0.241936 + }, + { + "epoch": 0.78656, + "grad_norm": 0.6514992887670004, + "learning_rate": 4.409493866336041e-06, + "loss": 0.3289263844490051, + "memory(GiB)": 66.66, + "step": 2458, + "token_acc": 0.9380352644836272, + "train_speed(iter/s)": 0.241938 + }, + { + "epoch": 0.78688, + "grad_norm": 0.6606823404699758, + "learning_rate": 4.4089245359227975e-06, + "loss": 0.3863104581832886, + "memory(GiB)": 66.66, + "step": 2459, + "token_acc": 0.8893352812271731, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.7872, + "grad_norm": 0.59072063385709, + "learning_rate": 4.408354967977011e-06, + "loss": 0.34928691387176514, + "memory(GiB)": 66.66, + "step": 2460, + "token_acc": 0.8996458087367178, + "train_speed(iter/s)": 0.241937 + }, + { + "epoch": 0.78752, + "grad_norm": 0.6466478255658541, + "learning_rate": 4.407785162569552e-06, + "loss": 0.44787755608558655, + "memory(GiB)": 66.66, + "step": 2461, + "token_acc": 0.9218340611353711, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.78784, + "grad_norm": 0.5837318388426683, + "learning_rate": 4.407215119771326e-06, + "loss": 0.3502548038959503, + "memory(GiB)": 66.66, + "step": 2462, + "token_acc": 0.8651718983557548, + "train_speed(iter/s)": 0.241927 + }, + { + "epoch": 0.78816, + "grad_norm": 0.5834937391226165, + "learning_rate": 4.406644839653263e-06, + "loss": 0.4166458249092102, + "memory(GiB)": 66.66, + "step": 2463, + "token_acc": 0.9103268730112815, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.78848, + "grad_norm": 0.6919007309928884, + "learning_rate": 4.406074322286325e-06, + "loss": 0.3360409140586853, + "memory(GiB)": 66.66, + "step": 2464, + "token_acc": 0.9141055949566588, + "train_speed(iter/s)": 0.241927 + }, + { + "epoch": 0.7888, + "grad_norm": 0.630824217637133, + "learning_rate": 4.405503567741504e-06, + "loss": 0.3444675803184509, + "memory(GiB)": 66.66, + "step": 2465, + "token_acc": 0.9247881355932204, + "train_speed(iter/s)": 0.241927 + }, + { + "epoch": 0.78912, + "grad_norm": 0.6261727946604343, + "learning_rate": 4.404932576089818e-06, + "loss": 0.4217478632926941, + "memory(GiB)": 66.66, + "step": 2466, + "token_acc": 0.8816499614494988, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.78944, + "grad_norm": 0.6403164221284977, + "learning_rate": 4.40436134740232e-06, + "loss": 0.42213425040245056, + "memory(GiB)": 66.66, + "step": 2467, + "token_acc": 0.92, + "train_speed(iter/s)": 0.241924 + }, + { + "epoch": 0.78976, + "grad_norm": 0.6281285287880811, + "learning_rate": 4.403789881750087e-06, + "loss": 0.48537832498550415, + "memory(GiB)": 66.66, + "step": 2468, + "token_acc": 0.8790731354091238, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.79008, + "grad_norm": 0.5987569437946888, + "learning_rate": 4.4032181792042286e-06, + "loss": 0.4157838225364685, + "memory(GiB)": 66.66, + "step": 2469, + "token_acc": 0.8448693778615675, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.7904, + "grad_norm": 0.6640120927942572, + "learning_rate": 4.402646239835885e-06, + "loss": 0.424528032541275, + "memory(GiB)": 66.66, + "step": 2470, + "token_acc": 0.8353892821031345, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.79072, + "grad_norm": 0.6816381706912408, + "learning_rate": 4.402074063716222e-06, + "loss": 0.34996211528778076, + "memory(GiB)": 66.66, + "step": 2471, + "token_acc": 0.9559300064808814, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.79104, + "grad_norm": 0.5957211463434351, + "learning_rate": 4.401501650916438e-06, + "loss": 0.2928246259689331, + "memory(GiB)": 66.66, + "step": 2472, + "token_acc": 0.8985231062410671, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.79136, + "grad_norm": 0.6915490178770445, + "learning_rate": 4.40092900150776e-06, + "loss": 0.49202829599380493, + "memory(GiB)": 66.66, + "step": 2473, + "token_acc": 0.8703030303030304, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.79168, + "grad_norm": 0.6347086063012041, + "learning_rate": 4.4003561155614435e-06, + "loss": 0.348741739988327, + "memory(GiB)": 66.66, + "step": 2474, + "token_acc": 0.9325173668541185, + "train_speed(iter/s)": 0.241921 + }, + { + "epoch": 0.792, + "grad_norm": 0.7064404846758509, + "learning_rate": 4.399782993148775e-06, + "loss": 0.33668121695518494, + "memory(GiB)": 66.66, + "step": 2475, + "token_acc": 0.9230337078651686, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.79232, + "grad_norm": 0.6133423648170381, + "learning_rate": 4.39920963434107e-06, + "loss": 0.34115317463874817, + "memory(GiB)": 66.66, + "step": 2476, + "token_acc": 0.9238668555240793, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.79264, + "grad_norm": 0.619043183765327, + "learning_rate": 4.398636039209671e-06, + "loss": 0.3269515335559845, + "memory(GiB)": 66.66, + "step": 2477, + "token_acc": 0.8754380375915897, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.79296, + "grad_norm": 0.6176795829506896, + "learning_rate": 4.398062207825954e-06, + "loss": 0.39546045660972595, + "memory(GiB)": 66.66, + "step": 2478, + "token_acc": 0.9051873198847262, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.79328, + "grad_norm": 2.066791935765497, + "learning_rate": 4.397488140261321e-06, + "loss": 0.40682530403137207, + "memory(GiB)": 66.66, + "step": 2479, + "token_acc": 0.8875319511502414, + "train_speed(iter/s)": 0.241916 + }, + { + "epoch": 0.7936, + "grad_norm": 0.5921771249897206, + "learning_rate": 4.3969138365872064e-06, + "loss": 0.3332481384277344, + "memory(GiB)": 66.66, + "step": 2480, + "token_acc": 0.9205167506021459, + "train_speed(iter/s)": 0.241916 + }, + { + "epoch": 0.79392, + "grad_norm": 0.6631772222909441, + "learning_rate": 4.3963392968750706e-06, + "loss": 0.46063804626464844, + "memory(GiB)": 66.66, + "step": 2481, + "token_acc": 0.8565251572327044, + "train_speed(iter/s)": 0.241911 + }, + { + "epoch": 0.79424, + "grad_norm": 0.5343648183551837, + "learning_rate": 4.3957645211964065e-06, + "loss": 0.41508981585502625, + "memory(GiB)": 66.66, + "step": 2482, + "token_acc": 0.825923942153187, + "train_speed(iter/s)": 0.241906 + }, + { + "epoch": 0.79456, + "grad_norm": 0.5626864877337528, + "learning_rate": 4.395189509622734e-06, + "loss": 0.36599451303482056, + "memory(GiB)": 66.66, + "step": 2483, + "token_acc": 0.8753446238676644, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.79488, + "grad_norm": 0.6467014338432308, + "learning_rate": 4.3946142622256035e-06, + "loss": 0.4457034170627594, + "memory(GiB)": 66.66, + "step": 2484, + "token_acc": 0.8579363743477529, + "train_speed(iter/s)": 0.2419 + }, + { + "epoch": 0.7952, + "grad_norm": 0.6627665888923155, + "learning_rate": 4.394038779076596e-06, + "loss": 0.4236917495727539, + "memory(GiB)": 66.66, + "step": 2485, + "token_acc": 0.8429532577903682, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.79552, + "grad_norm": 0.539697974975763, + "learning_rate": 4.393463060247317e-06, + "loss": 0.33331823348999023, + "memory(GiB)": 66.66, + "step": 2486, + "token_acc": 0.8465430016863407, + "train_speed(iter/s)": 0.241902 + }, + { + "epoch": 0.79584, + "grad_norm": 0.6143508732054094, + "learning_rate": 4.392887105809409e-06, + "loss": 0.4863896071910858, + "memory(GiB)": 66.66, + "step": 2487, + "token_acc": 0.8457432612756872, + "train_speed(iter/s)": 0.241897 + }, + { + "epoch": 0.79616, + "grad_norm": 0.6289023288924466, + "learning_rate": 4.392310915834536e-06, + "loss": 0.37140628695487976, + "memory(GiB)": 66.66, + "step": 2488, + "token_acc": 0.8772144166157605, + "train_speed(iter/s)": 0.241896 + }, + { + "epoch": 0.79648, + "grad_norm": 0.6537335493406257, + "learning_rate": 4.3917344903943965e-06, + "loss": 0.4256563186645508, + "memory(GiB)": 66.66, + "step": 2489, + "token_acc": 0.9035288725939505, + "train_speed(iter/s)": 0.24189 + }, + { + "epoch": 0.7968, + "grad_norm": 0.5845869421867717, + "learning_rate": 4.391157829560716e-06, + "loss": 0.3602452874183655, + "memory(GiB)": 66.66, + "step": 2490, + "token_acc": 0.8660098522167488, + "train_speed(iter/s)": 0.241888 + }, + { + "epoch": 0.79712, + "grad_norm": 0.5671604368198514, + "learning_rate": 4.39058093340525e-06, + "loss": 0.40519750118255615, + "memory(GiB)": 66.66, + "step": 2491, + "token_acc": 0.9077683228108702, + "train_speed(iter/s)": 0.241887 + }, + { + "epoch": 0.79744, + "grad_norm": 0.567556643545243, + "learning_rate": 4.390003801999785e-06, + "loss": 0.31330394744873047, + "memory(GiB)": 66.66, + "step": 2492, + "token_acc": 0.8974039460020768, + "train_speed(iter/s)": 0.241888 + }, + { + "epoch": 0.79776, + "grad_norm": 0.6611174130574419, + "learning_rate": 4.389426435416133e-06, + "loss": 0.3812761902809143, + "memory(GiB)": 66.66, + "step": 2493, + "token_acc": 0.8846516007532956, + "train_speed(iter/s)": 0.241884 + }, + { + "epoch": 0.79808, + "grad_norm": 0.6150711886987122, + "learning_rate": 4.388848833726137e-06, + "loss": 0.37150296568870544, + "memory(GiB)": 66.66, + "step": 2494, + "token_acc": 0.8531862745098039, + "train_speed(iter/s)": 0.241886 + }, + { + "epoch": 0.7984, + "grad_norm": 0.6496840073092388, + "learning_rate": 4.388270997001671e-06, + "loss": 0.3411356806755066, + "memory(GiB)": 66.66, + "step": 2495, + "token_acc": 0.8735549132947977, + "train_speed(iter/s)": 0.241889 + }, + { + "epoch": 0.79872, + "grad_norm": 0.6797339265783741, + "learning_rate": 4.3876929253146355e-06, + "loss": 0.3624635636806488, + "memory(GiB)": 66.66, + "step": 2496, + "token_acc": 0.8726851851851852, + "train_speed(iter/s)": 0.241884 + }, + { + "epoch": 0.79904, + "grad_norm": 0.6172641110190881, + "learning_rate": 4.387114618736963e-06, + "loss": 0.3558436334133148, + "memory(GiB)": 66.66, + "step": 2497, + "token_acc": 0.8992121706058136, + "train_speed(iter/s)": 0.241884 + }, + { + "epoch": 0.79936, + "grad_norm": 0.6425368708835462, + "learning_rate": 4.386536077340612e-06, + "loss": 0.33670830726623535, + "memory(GiB)": 66.66, + "step": 2498, + "token_acc": 0.8977469670710572, + "train_speed(iter/s)": 0.24189 + }, + { + "epoch": 0.79968, + "grad_norm": 0.66495452614764, + "learning_rate": 4.385957301197572e-06, + "loss": 0.4176880717277527, + "memory(GiB)": 66.66, + "step": 2499, + "token_acc": 0.8687566418703507, + "train_speed(iter/s)": 0.241895 + }, + { + "epoch": 0.8, + "grad_norm": 0.6926653588601963, + "learning_rate": 4.385378290379864e-06, + "loss": 0.3967697322368622, + "memory(GiB)": 66.66, + "step": 2500, + "token_acc": 0.8387360392263689, + "train_speed(iter/s)": 0.241895 + }, + { + "epoch": 0.80032, + "grad_norm": 0.6757210550584478, + "learning_rate": 4.384799044959533e-06, + "loss": 0.4154754877090454, + "memory(GiB)": 66.66, + "step": 2501, + "token_acc": 0.8764994547437296, + "train_speed(iter/s)": 0.241892 + }, + { + "epoch": 0.80064, + "grad_norm": 0.6251277325430908, + "learning_rate": 4.3842195650086575e-06, + "loss": 0.39245331287384033, + "memory(GiB)": 66.66, + "step": 2502, + "token_acc": 0.9079072721498889, + "train_speed(iter/s)": 0.241892 + }, + { + "epoch": 0.80096, + "grad_norm": 0.6350634464200532, + "learning_rate": 4.383639850599343e-06, + "loss": 0.39024317264556885, + "memory(GiB)": 66.66, + "step": 2503, + "token_acc": 0.9288702928870293, + "train_speed(iter/s)": 0.241891 + }, + { + "epoch": 0.80128, + "grad_norm": 0.6549799868103235, + "learning_rate": 4.3830599018037256e-06, + "loss": 0.39158058166503906, + "memory(GiB)": 66.66, + "step": 2504, + "token_acc": 0.8921933085501859, + "train_speed(iter/s)": 0.241892 + }, + { + "epoch": 0.8016, + "grad_norm": 0.608844799965166, + "learning_rate": 4.382479718693969e-06, + "loss": 0.3427756726741791, + "memory(GiB)": 66.66, + "step": 2505, + "token_acc": 0.9014016433059449, + "train_speed(iter/s)": 0.241886 + }, + { + "epoch": 0.80192, + "grad_norm": 0.7314722515613213, + "learning_rate": 4.381899301342269e-06, + "loss": 0.4120371341705322, + "memory(GiB)": 66.66, + "step": 2506, + "token_acc": 0.8837209302325582, + "train_speed(iter/s)": 0.24189 + }, + { + "epoch": 0.80224, + "grad_norm": 0.7003018260446199, + "learning_rate": 4.381318649820846e-06, + "loss": 0.34016841650009155, + "memory(GiB)": 66.66, + "step": 2507, + "token_acc": 0.8965209634255129, + "train_speed(iter/s)": 0.241895 + }, + { + "epoch": 0.80256, + "grad_norm": 0.6183617264610963, + "learning_rate": 4.3807377642019534e-06, + "loss": 0.37370967864990234, + "memory(GiB)": 66.66, + "step": 2508, + "token_acc": 0.927360774818402, + "train_speed(iter/s)": 0.241898 + }, + { + "epoch": 0.80288, + "grad_norm": 0.6679919581221901, + "learning_rate": 4.380156644557873e-06, + "loss": 0.39290910959243774, + "memory(GiB)": 66.66, + "step": 2509, + "token_acc": 0.8528356066044508, + "train_speed(iter/s)": 0.241898 + }, + { + "epoch": 0.8032, + "grad_norm": 0.6129431790640782, + "learning_rate": 4.379575290960913e-06, + "loss": 0.35219240188598633, + "memory(GiB)": 66.66, + "step": 2510, + "token_acc": 0.9342622365130404, + "train_speed(iter/s)": 0.241897 + }, + { + "epoch": 0.80352, + "grad_norm": 0.6409256552836365, + "learning_rate": 4.378993703483413e-06, + "loss": 0.3374325633049011, + "memory(GiB)": 66.66, + "step": 2511, + "token_acc": 0.8896857923497268, + "train_speed(iter/s)": 0.241896 + }, + { + "epoch": 0.80384, + "grad_norm": 0.6217298807702569, + "learning_rate": 4.378411882197743e-06, + "loss": 0.3572263717651367, + "memory(GiB)": 66.66, + "step": 2512, + "token_acc": 0.8285582255083179, + "train_speed(iter/s)": 0.241894 + }, + { + "epoch": 0.80416, + "grad_norm": 0.6547023956954515, + "learning_rate": 4.3778298271762995e-06, + "loss": 0.42528364062309265, + "memory(GiB)": 66.66, + "step": 2513, + "token_acc": 0.9267589388696655, + "train_speed(iter/s)": 0.241899 + }, + { + "epoch": 0.80448, + "grad_norm": 0.6649539497105795, + "learning_rate": 4.37724753849151e-06, + "loss": 0.40682435035705566, + "memory(GiB)": 66.66, + "step": 2514, + "token_acc": 0.8793182865039152, + "train_speed(iter/s)": 0.241884 + }, + { + "epoch": 0.8048, + "grad_norm": 0.6289090777423948, + "learning_rate": 4.37666501621583e-06, + "loss": 0.3258202373981476, + "memory(GiB)": 66.66, + "step": 2515, + "token_acc": 0.8874567745991826, + "train_speed(iter/s)": 0.241889 + }, + { + "epoch": 0.80512, + "grad_norm": 0.6697863487435537, + "learning_rate": 4.3760822604217436e-06, + "loss": 0.3811214566230774, + "memory(GiB)": 66.66, + "step": 2516, + "token_acc": 0.9391836734693878, + "train_speed(iter/s)": 0.241888 + }, + { + "epoch": 0.80544, + "grad_norm": 0.6706605974333413, + "learning_rate": 4.375499271181765e-06, + "loss": 0.39804691076278687, + "memory(GiB)": 66.66, + "step": 2517, + "token_acc": 0.7910783055198973, + "train_speed(iter/s)": 0.24189 + }, + { + "epoch": 0.80576, + "grad_norm": 0.6160716109360458, + "learning_rate": 4.374916048568437e-06, + "loss": 0.3580317497253418, + "memory(GiB)": 66.66, + "step": 2518, + "token_acc": 0.8746482476336659, + "train_speed(iter/s)": 0.241891 + }, + { + "epoch": 0.80608, + "grad_norm": 0.6680445544906036, + "learning_rate": 4.374332592654332e-06, + "loss": 0.3706471920013428, + "memory(GiB)": 66.66, + "step": 2519, + "token_acc": 0.9159062885326757, + "train_speed(iter/s)": 0.241885 + }, + { + "epoch": 0.8064, + "grad_norm": 0.6826965142912911, + "learning_rate": 4.37374890351205e-06, + "loss": 0.475322961807251, + "memory(GiB)": 66.66, + "step": 2520, + "token_acc": 0.8687711974954344, + "train_speed(iter/s)": 0.241883 + }, + { + "epoch": 0.80672, + "grad_norm": 0.622134646633935, + "learning_rate": 4.373164981214223e-06, + "loss": 0.3979111313819885, + "memory(GiB)": 66.66, + "step": 2521, + "token_acc": 0.9501761449421238, + "train_speed(iter/s)": 0.241885 + }, + { + "epoch": 0.80704, + "grad_norm": 0.6343101200874475, + "learning_rate": 4.372580825833508e-06, + "loss": 0.3456279933452606, + "memory(GiB)": 66.66, + "step": 2522, + "token_acc": 0.869921075708857, + "train_speed(iter/s)": 0.241887 + }, + { + "epoch": 0.80736, + "grad_norm": 0.6545777160548274, + "learning_rate": 4.371996437442594e-06, + "loss": 0.43766987323760986, + "memory(GiB)": 66.66, + "step": 2523, + "token_acc": 0.9485170677112479, + "train_speed(iter/s)": 0.241888 + }, + { + "epoch": 0.80768, + "grad_norm": 0.6209994175135195, + "learning_rate": 4.371411816114196e-06, + "loss": 0.340350866317749, + "memory(GiB)": 66.66, + "step": 2524, + "token_acc": 0.8900896757875374, + "train_speed(iter/s)": 0.241892 + }, + { + "epoch": 0.808, + "grad_norm": 0.6397116989891853, + "learning_rate": 4.370826961921063e-06, + "loss": 0.45023393630981445, + "memory(GiB)": 66.66, + "step": 2525, + "token_acc": 0.882951052258217, + "train_speed(iter/s)": 0.241894 + }, + { + "epoch": 0.80832, + "grad_norm": 0.646136511668519, + "learning_rate": 4.370241874935969e-06, + "loss": 0.4308702051639557, + "memory(GiB)": 66.66, + "step": 2526, + "token_acc": 0.8951165371809101, + "train_speed(iter/s)": 0.241896 + }, + { + "epoch": 0.80864, + "grad_norm": 0.6883628747159346, + "learning_rate": 4.369656555231716e-06, + "loss": 0.3477991223335266, + "memory(GiB)": 66.66, + "step": 2527, + "token_acc": 0.9126808928133432, + "train_speed(iter/s)": 0.241897 + }, + { + "epoch": 0.80896, + "grad_norm": 0.6578714671313085, + "learning_rate": 4.3690710028811394e-06, + "loss": 0.37179625034332275, + "memory(GiB)": 66.66, + "step": 2528, + "token_acc": 0.9020245842371656, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.80928, + "grad_norm": 0.5626803758573975, + "learning_rate": 4.3684852179571005e-06, + "loss": 0.35040438175201416, + "memory(GiB)": 66.66, + "step": 2529, + "token_acc": 0.9252364967958498, + "train_speed(iter/s)": 0.241904 + }, + { + "epoch": 0.8096, + "grad_norm": 0.6676960490152285, + "learning_rate": 4.367899200532488e-06, + "loss": 0.37589216232299805, + "memory(GiB)": 66.66, + "step": 2530, + "token_acc": 0.8277641945123607, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.80992, + "grad_norm": 0.6298244175221637, + "learning_rate": 4.3673129506802245e-06, + "loss": 0.40632206201553345, + "memory(GiB)": 66.66, + "step": 2531, + "token_acc": 0.8757763975155279, + "train_speed(iter/s)": 0.241906 + }, + { + "epoch": 0.81024, + "grad_norm": 0.6101877317016476, + "learning_rate": 4.366726468473257e-06, + "loss": 0.39226752519607544, + "memory(GiB)": 66.66, + "step": 2532, + "token_acc": 0.8644749290444654, + "train_speed(iter/s)": 0.241906 + }, + { + "epoch": 0.81056, + "grad_norm": 0.6144475001141755, + "learning_rate": 4.366139753984564e-06, + "loss": 0.35625773668289185, + "memory(GiB)": 66.66, + "step": 2533, + "token_acc": 0.9038624787775892, + "train_speed(iter/s)": 0.241905 + }, + { + "epoch": 0.81088, + "grad_norm": 0.5852444911238153, + "learning_rate": 4.36555280728715e-06, + "loss": 0.28513437509536743, + "memory(GiB)": 66.66, + "step": 2534, + "token_acc": 0.9204829308909243, + "train_speed(iter/s)": 0.241903 + }, + { + "epoch": 0.8112, + "grad_norm": 0.6120068317505531, + "learning_rate": 4.364965628454053e-06, + "loss": 0.36027631163597107, + "memory(GiB)": 66.66, + "step": 2535, + "token_acc": 0.8635962979286029, + "train_speed(iter/s)": 0.241908 + }, + { + "epoch": 0.81152, + "grad_norm": 1.2166022365693283, + "learning_rate": 4.364378217558335e-06, + "loss": 0.3889350891113281, + "memory(GiB)": 66.66, + "step": 2536, + "token_acc": 0.8545012165450122, + "train_speed(iter/s)": 0.241905 + }, + { + "epoch": 0.81184, + "grad_norm": 0.7182765794220086, + "learning_rate": 4.36379057467309e-06, + "loss": 0.47111618518829346, + "memory(GiB)": 66.66, + "step": 2537, + "token_acc": 0.8931018730013704, + "train_speed(iter/s)": 0.241908 + }, + { + "epoch": 0.81216, + "grad_norm": 0.6224379928992477, + "learning_rate": 4.36320269987144e-06, + "loss": 0.3979683816432953, + "memory(GiB)": 66.66, + "step": 2538, + "token_acc": 0.8992926911417986, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.81248, + "grad_norm": 0.5977016481681638, + "learning_rate": 4.362614593226536e-06, + "loss": 0.2977004051208496, + "memory(GiB)": 66.66, + "step": 2539, + "token_acc": 0.902330743618202, + "train_speed(iter/s)": 0.24191 + }, + { + "epoch": 0.8128, + "grad_norm": 0.6544721086796439, + "learning_rate": 4.362026254811556e-06, + "loss": 0.42674577236175537, + "memory(GiB)": 66.66, + "step": 2540, + "token_acc": 0.8652931854199684, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.81312, + "grad_norm": 0.56437245646023, + "learning_rate": 4.361437684699712e-06, + "loss": 0.27474692463874817, + "memory(GiB)": 66.66, + "step": 2541, + "token_acc": 0.9136307818256242, + "train_speed(iter/s)": 0.241912 + }, + { + "epoch": 0.81344, + "grad_norm": 1.4055709648630794, + "learning_rate": 4.3608488829642385e-06, + "loss": 0.41406598687171936, + "memory(GiB)": 66.66, + "step": 2542, + "token_acc": 0.88, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.81376, + "grad_norm": 0.6791156983332794, + "learning_rate": 4.360259849678402e-06, + "loss": 0.45298171043395996, + "memory(GiB)": 66.66, + "step": 2543, + "token_acc": 0.9127215022480826, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.81408, + "grad_norm": 0.5890761939756836, + "learning_rate": 4.359670584915499e-06, + "loss": 0.3414680063724518, + "memory(GiB)": 66.66, + "step": 2544, + "token_acc": 0.877220896313975, + "train_speed(iter/s)": 0.241912 + }, + { + "epoch": 0.8144, + "grad_norm": 0.6081163747660937, + "learning_rate": 4.35908108874885e-06, + "loss": 0.3200152814388275, + "memory(GiB)": 66.66, + "step": 2545, + "token_acc": 0.9430680021085925, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.81472, + "grad_norm": 0.6316583724624821, + "learning_rate": 4.358491361251811e-06, + "loss": 0.3993412256240845, + "memory(GiB)": 66.66, + "step": 2546, + "token_acc": 0.9027689706193194, + "train_speed(iter/s)": 0.24191 + }, + { + "epoch": 0.81504, + "grad_norm": 0.6267007941572876, + "learning_rate": 4.357901402497763e-06, + "loss": 0.40503907203674316, + "memory(GiB)": 66.66, + "step": 2547, + "token_acc": 0.9054737810487581, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.81536, + "grad_norm": 0.6675056744213816, + "learning_rate": 4.357311212560114e-06, + "loss": 0.3563908636569977, + "memory(GiB)": 66.66, + "step": 2548, + "token_acc": 0.946524064171123, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.81568, + "grad_norm": 0.6517399367763588, + "learning_rate": 4.356720791512304e-06, + "loss": 0.4044995903968811, + "memory(GiB)": 66.66, + "step": 2549, + "token_acc": 0.9318555008210181, + "train_speed(iter/s)": 0.241911 + }, + { + "epoch": 0.816, + "grad_norm": 0.7090909791281524, + "learning_rate": 4.356130139427802e-06, + "loss": 0.46175724267959595, + "memory(GiB)": 66.66, + "step": 2550, + "token_acc": 0.8845698032709173, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.81632, + "grad_norm": 0.5932263762210305, + "learning_rate": 4.355539256380103e-06, + "loss": 0.34192317724227905, + "memory(GiB)": 66.66, + "step": 2551, + "token_acc": 0.8860677083333334, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.81664, + "grad_norm": 0.6424072341018178, + "learning_rate": 4.354948142442731e-06, + "loss": 0.3116324543952942, + "memory(GiB)": 66.66, + "step": 2552, + "token_acc": 0.8812294837361981, + "train_speed(iter/s)": 0.241919 + }, + { + "epoch": 0.81696, + "grad_norm": 0.6393985502269458, + "learning_rate": 4.354356797689242e-06, + "loss": 0.34711694717407227, + "memory(GiB)": 66.66, + "step": 2553, + "token_acc": 0.9306107697867727, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.81728, + "grad_norm": 0.6632607978743558, + "learning_rate": 4.353765222193218e-06, + "loss": 0.3699304461479187, + "memory(GiB)": 66.66, + "step": 2554, + "token_acc": 0.8839986352780621, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.8176, + "grad_norm": 0.6084956336065481, + "learning_rate": 4.35317341602827e-06, + "loss": 0.3562045693397522, + "memory(GiB)": 66.66, + "step": 2555, + "token_acc": 0.9312070043777361, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.81792, + "grad_norm": 0.5604724792790741, + "learning_rate": 4.3525813792680384e-06, + "loss": 0.30543601512908936, + "memory(GiB)": 66.66, + "step": 2556, + "token_acc": 0.9097546728971962, + "train_speed(iter/s)": 0.241902 + }, + { + "epoch": 0.81824, + "grad_norm": 0.6030397682207082, + "learning_rate": 4.351989111986191e-06, + "loss": 0.35919293761253357, + "memory(GiB)": 66.66, + "step": 2557, + "token_acc": 0.8701527614571093, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.81856, + "grad_norm": 0.5569324245182307, + "learning_rate": 4.351396614256427e-06, + "loss": 0.29939529299736023, + "memory(GiB)": 66.66, + "step": 2558, + "token_acc": 0.9128375177640928, + "train_speed(iter/s)": 0.2419 + }, + { + "epoch": 0.81888, + "grad_norm": 0.6464843271435565, + "learning_rate": 4.35080388615247e-06, + "loss": 0.3664732873439789, + "memory(GiB)": 66.66, + "step": 2559, + "token_acc": 0.8273809523809523, + "train_speed(iter/s)": 0.2419 + }, + { + "epoch": 0.8192, + "grad_norm": 0.6182700264394201, + "learning_rate": 4.3502109277480764e-06, + "loss": 0.42619919776916504, + "memory(GiB)": 66.66, + "step": 2560, + "token_acc": 0.8931937172774869, + "train_speed(iter/s)": 0.2419 + }, + { + "epoch": 0.81952, + "grad_norm": 0.6384613190538713, + "learning_rate": 4.349617739117029e-06, + "loss": 0.36751455068588257, + "memory(GiB)": 66.66, + "step": 2561, + "token_acc": 0.9473524962178518, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.81984, + "grad_norm": 0.6527651450532802, + "learning_rate": 4.349024320333141e-06, + "loss": 0.3691257834434509, + "memory(GiB)": 66.66, + "step": 2562, + "token_acc": 0.9043241402791965, + "train_speed(iter/s)": 0.241905 + }, + { + "epoch": 0.82016, + "grad_norm": 0.6607404956029829, + "learning_rate": 4.348430671470251e-06, + "loss": 0.35538342595100403, + "memory(GiB)": 66.66, + "step": 2563, + "token_acc": 0.8945063694267515, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.82048, + "grad_norm": 0.5356661041979398, + "learning_rate": 4.34783679260223e-06, + "loss": 0.2415996938943863, + "memory(GiB)": 66.66, + "step": 2564, + "token_acc": 0.8925831202046036, + "train_speed(iter/s)": 0.24191 + }, + { + "epoch": 0.8208, + "grad_norm": 0.7080289803615654, + "learning_rate": 4.347242683802975e-06, + "loss": 0.36969810724258423, + "memory(GiB)": 66.66, + "step": 2565, + "token_acc": 0.8629761578514662, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.82112, + "grad_norm": 0.6002297862129392, + "learning_rate": 4.346648345146413e-06, + "loss": 0.31757092475891113, + "memory(GiB)": 66.66, + "step": 2566, + "token_acc": 0.9281847708408517, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.82144, + "grad_norm": 0.6348684185457473, + "learning_rate": 4.346053776706499e-06, + "loss": 0.3935149610042572, + "memory(GiB)": 66.66, + "step": 2567, + "token_acc": 0.9115913555992141, + "train_speed(iter/s)": 0.241917 + }, + { + "epoch": 0.82176, + "grad_norm": 0.6647794522602055, + "learning_rate": 4.3454589785572174e-06, + "loss": 0.4472460448741913, + "memory(GiB)": 66.66, + "step": 2568, + "token_acc": 0.8355405730797014, + "train_speed(iter/s)": 0.241921 + }, + { + "epoch": 0.82208, + "grad_norm": 0.7015102189947829, + "learning_rate": 4.344863950772578e-06, + "loss": 0.3052337169647217, + "memory(GiB)": 66.66, + "step": 2569, + "token_acc": 0.8570536828963795, + "train_speed(iter/s)": 0.241919 + }, + { + "epoch": 0.8224, + "grad_norm": 0.6338352641207415, + "learning_rate": 4.344268693426626e-06, + "loss": 0.31592607498168945, + "memory(GiB)": 66.66, + "step": 2570, + "token_acc": 0.9229222520107239, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.82272, + "grad_norm": 0.6534676343664461, + "learning_rate": 4.343673206593427e-06, + "loss": 0.40562039613723755, + "memory(GiB)": 66.66, + "step": 2571, + "token_acc": 0.9057093425605537, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.82304, + "grad_norm": 0.6880672309539775, + "learning_rate": 4.3430774903470805e-06, + "loss": 0.35365045070648193, + "memory(GiB)": 66.66, + "step": 2572, + "token_acc": 0.892530897367007, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.82336, + "grad_norm": 0.6219358492302446, + "learning_rate": 4.342481544761713e-06, + "loss": 0.37076786160469055, + "memory(GiB)": 66.66, + "step": 2573, + "token_acc": 0.9229504345783416, + "train_speed(iter/s)": 0.241926 + }, + { + "epoch": 0.82368, + "grad_norm": 0.6478722371743788, + "learning_rate": 4.341885369911479e-06, + "loss": 0.3535159230232239, + "memory(GiB)": 66.66, + "step": 2574, + "token_acc": 0.9215262778977682, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.824, + "grad_norm": 0.601660922856023, + "learning_rate": 4.3412889658705635e-06, + "loss": 0.3967282772064209, + "memory(GiB)": 66.66, + "step": 2575, + "token_acc": 0.9508833922261484, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.82432, + "grad_norm": 0.5930225327641309, + "learning_rate": 4.3406923327131775e-06, + "loss": 0.3676755428314209, + "memory(GiB)": 66.66, + "step": 2576, + "token_acc": 0.8758434547908233, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.82464, + "grad_norm": 0.6342511810725815, + "learning_rate": 4.340095470513562e-06, + "loss": 0.32607418298721313, + "memory(GiB)": 66.66, + "step": 2577, + "token_acc": 0.905032021957914, + "train_speed(iter/s)": 0.241902 + }, + { + "epoch": 0.82496, + "grad_norm": 0.6188561861092942, + "learning_rate": 4.339498379345986e-06, + "loss": 0.40681758522987366, + "memory(GiB)": 66.66, + "step": 2578, + "token_acc": 0.927390950961799, + "train_speed(iter/s)": 0.241902 + }, + { + "epoch": 0.82528, + "grad_norm": 0.6557128018675412, + "learning_rate": 4.338901059284748e-06, + "loss": 0.45666706562042236, + "memory(GiB)": 66.66, + "step": 2579, + "token_acc": 0.8264074015994982, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.8256, + "grad_norm": 0.6988764485985517, + "learning_rate": 4.338303510404172e-06, + "loss": 0.4182717800140381, + "memory(GiB)": 66.66, + "step": 2580, + "token_acc": 0.8879184861717613, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.82592, + "grad_norm": 0.6017851703351587, + "learning_rate": 4.337705732778614e-06, + "loss": 0.3056812882423401, + "memory(GiB)": 66.66, + "step": 2581, + "token_acc": 0.9194107452339688, + "train_speed(iter/s)": 0.24191 + }, + { + "epoch": 0.82624, + "grad_norm": 0.6823503195024789, + "learning_rate": 4.337107726482458e-06, + "loss": 0.4138156771659851, + "memory(GiB)": 66.66, + "step": 2582, + "token_acc": 0.8260493292946777, + "train_speed(iter/s)": 0.241912 + }, + { + "epoch": 0.82656, + "grad_norm": 0.5882131592668324, + "learning_rate": 4.336509491590114e-06, + "loss": 0.3436381220817566, + "memory(GiB)": 66.66, + "step": 2583, + "token_acc": 0.9328819546658816, + "train_speed(iter/s)": 0.241911 + }, + { + "epoch": 0.82688, + "grad_norm": 0.6219944313874092, + "learning_rate": 4.335911028176022e-06, + "loss": 0.42706388235092163, + "memory(GiB)": 66.66, + "step": 2584, + "token_acc": 0.9342021614748888, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.8272, + "grad_norm": 0.5973772841731725, + "learning_rate": 4.335312336314651e-06, + "loss": 0.349089115858078, + "memory(GiB)": 66.66, + "step": 2585, + "token_acc": 0.8587078651685394, + "train_speed(iter/s)": 0.241912 + }, + { + "epoch": 0.82752, + "grad_norm": 0.6067786960645464, + "learning_rate": 4.334713416080498e-06, + "loss": 0.30551910400390625, + "memory(GiB)": 66.66, + "step": 2586, + "token_acc": 0.9349162011173184, + "train_speed(iter/s)": 0.241897 + }, + { + "epoch": 0.82784, + "grad_norm": 0.5990198274353161, + "learning_rate": 4.334114267548088e-06, + "loss": 0.3483770489692688, + "memory(GiB)": 66.66, + "step": 2587, + "token_acc": 0.8659700136301681, + "train_speed(iter/s)": 0.241899 + }, + { + "epoch": 0.82816, + "grad_norm": 1.1991467527484387, + "learning_rate": 4.333514890791975e-06, + "loss": 0.39251604676246643, + "memory(GiB)": 66.66, + "step": 2588, + "token_acc": 0.8572751118654529, + "train_speed(iter/s)": 0.241899 + }, + { + "epoch": 0.82848, + "grad_norm": 0.6765031046014794, + "learning_rate": 4.332915285886739e-06, + "loss": 0.42317691445350647, + "memory(GiB)": 66.66, + "step": 2589, + "token_acc": 0.8671645772205921, + "train_speed(iter/s)": 0.241899 + }, + { + "epoch": 0.8288, + "grad_norm": 0.6584954055261868, + "learning_rate": 4.332315452906993e-06, + "loss": 0.4205325245857239, + "memory(GiB)": 66.66, + "step": 2590, + "token_acc": 0.8231124807395994, + "train_speed(iter/s)": 0.241902 + }, + { + "epoch": 0.82912, + "grad_norm": 0.6225250697757372, + "learning_rate": 4.331715391927375e-06, + "loss": 0.3045922517776489, + "memory(GiB)": 66.66, + "step": 2591, + "token_acc": 0.9136160714285714, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.82944, + "grad_norm": 0.6518504207098849, + "learning_rate": 4.331115103022552e-06, + "loss": 0.3939827084541321, + "memory(GiB)": 66.66, + "step": 2592, + "token_acc": 0.8472505091649695, + "train_speed(iter/s)": 0.241905 + }, + { + "epoch": 0.82976, + "grad_norm": 0.6297799025894838, + "learning_rate": 4.330514586267218e-06, + "loss": 0.3792271912097931, + "memory(GiB)": 66.66, + "step": 2593, + "token_acc": 0.9537815126050421, + "train_speed(iter/s)": 0.241904 + }, + { + "epoch": 0.83008, + "grad_norm": 0.6602122465937522, + "learning_rate": 4.3299138417361e-06, + "loss": 0.4199506938457489, + "memory(GiB)": 66.66, + "step": 2594, + "token_acc": 0.9185270425776755, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.8304, + "grad_norm": 0.6336091756200902, + "learning_rate": 4.329312869503948e-06, + "loss": 0.2834576368331909, + "memory(GiB)": 66.66, + "step": 2595, + "token_acc": 0.8836594394500265, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.83072, + "grad_norm": 0.6629364690279814, + "learning_rate": 4.328711669645545e-06, + "loss": 0.4663510322570801, + "memory(GiB)": 66.66, + "step": 2596, + "token_acc": 0.8255951375991896, + "train_speed(iter/s)": 0.241899 + }, + { + "epoch": 0.83104, + "grad_norm": 0.5988464102068844, + "learning_rate": 4.328110242235696e-06, + "loss": 0.36785605549812317, + "memory(GiB)": 66.66, + "step": 2597, + "token_acc": 0.8646159513722601, + "train_speed(iter/s)": 0.241896 + }, + { + "epoch": 0.83136, + "grad_norm": 0.6532640170676822, + "learning_rate": 4.3275085873492406e-06, + "loss": 0.3795081377029419, + "memory(GiB)": 66.66, + "step": 2598, + "token_acc": 0.8490092801605217, + "train_speed(iter/s)": 0.241899 + }, + { + "epoch": 0.83168, + "grad_norm": 0.7371224259030997, + "learning_rate": 4.326906705061045e-06, + "loss": 0.4198778569698334, + "memory(GiB)": 66.66, + "step": 2599, + "token_acc": 0.8131055583885772, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.832, + "grad_norm": 0.5971890445521167, + "learning_rate": 4.326304595446001e-06, + "loss": 0.35708269476890564, + "memory(GiB)": 66.66, + "step": 2600, + "token_acc": 0.863455497382199, + "train_speed(iter/s)": 0.2419 + }, + { + "epoch": 0.83232, + "grad_norm": 0.6465385440290912, + "learning_rate": 4.325702258579032e-06, + "loss": 0.3196990489959717, + "memory(GiB)": 66.66, + "step": 2601, + "token_acc": 0.9285899766294469, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.83264, + "grad_norm": 0.670496975022721, + "learning_rate": 4.325099694535089e-06, + "loss": 0.4136509299278259, + "memory(GiB)": 66.66, + "step": 2602, + "token_acc": 0.8938736131210806, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.83296, + "grad_norm": 0.5996942842636827, + "learning_rate": 4.324496903389148e-06, + "loss": 0.2988620698451996, + "memory(GiB)": 66.66, + "step": 2603, + "token_acc": 0.93428501107556, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.83328, + "grad_norm": 0.619152937636698, + "learning_rate": 4.3238938852162195e-06, + "loss": 0.35555800795555115, + "memory(GiB)": 66.66, + "step": 2604, + "token_acc": 0.9029522613065326, + "train_speed(iter/s)": 0.241902 + }, + { + "epoch": 0.8336, + "grad_norm": 0.607645021457939, + "learning_rate": 4.323290640091335e-06, + "loss": 0.3514510691165924, + "memory(GiB)": 66.66, + "step": 2605, + "token_acc": 0.9460431654676259, + "train_speed(iter/s)": 0.241898 + }, + { + "epoch": 0.83392, + "grad_norm": 0.6280736989646046, + "learning_rate": 4.322687168089561e-06, + "loss": 0.291256844997406, + "memory(GiB)": 66.66, + "step": 2606, + "token_acc": 0.9175006102025872, + "train_speed(iter/s)": 0.2419 + }, + { + "epoch": 0.83424, + "grad_norm": 0.6917641594780229, + "learning_rate": 4.322083469285988e-06, + "loss": 0.3592594265937805, + "memory(GiB)": 66.66, + "step": 2607, + "token_acc": 0.8930279385854518, + "train_speed(iter/s)": 0.241905 + }, + { + "epoch": 0.83456, + "grad_norm": 0.6697996409790153, + "learning_rate": 4.3214795437557356e-06, + "loss": 0.3139106035232544, + "memory(GiB)": 66.66, + "step": 2608, + "token_acc": 0.9280347963621985, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.83488, + "grad_norm": 0.6418871429970715, + "learning_rate": 4.320875391573951e-06, + "loss": 0.3353157341480255, + "memory(GiB)": 66.66, + "step": 2609, + "token_acc": 0.9126016260162602, + "train_speed(iter/s)": 0.241912 + }, + { + "epoch": 0.8352, + "grad_norm": 0.6086693765426863, + "learning_rate": 4.320271012815812e-06, + "loss": 0.3180418014526367, + "memory(GiB)": 66.66, + "step": 2610, + "token_acc": 0.896774193548387, + "train_speed(iter/s)": 0.241917 + }, + { + "epoch": 0.83552, + "grad_norm": 0.6874016809989176, + "learning_rate": 4.319666407556523e-06, + "loss": 0.41051632165908813, + "memory(GiB)": 66.66, + "step": 2611, + "token_acc": 0.9214697406340058, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.83584, + "grad_norm": 0.6197173249441624, + "learning_rate": 4.319061575871315e-06, + "loss": 0.37518489360809326, + "memory(GiB)": 66.66, + "step": 2612, + "token_acc": 0.9038251366120219, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.83616, + "grad_norm": 0.6370486069374685, + "learning_rate": 4.3184565178354506e-06, + "loss": 0.49059054255485535, + "memory(GiB)": 66.66, + "step": 2613, + "token_acc": 0.8319194061505832, + "train_speed(iter/s)": 0.241903 + }, + { + "epoch": 0.83648, + "grad_norm": 0.643540289442154, + "learning_rate": 4.317851233524217e-06, + "loss": 0.4225635826587677, + "memory(GiB)": 66.66, + "step": 2614, + "token_acc": 0.9262981574539364, + "train_speed(iter/s)": 0.241902 + }, + { + "epoch": 0.8368, + "grad_norm": 0.6121761329506159, + "learning_rate": 4.317245723012934e-06, + "loss": 0.35521620512008667, + "memory(GiB)": 66.66, + "step": 2615, + "token_acc": 0.8694646397884996, + "train_speed(iter/s)": 0.241899 + }, + { + "epoch": 0.83712, + "grad_norm": 0.6088456092731096, + "learning_rate": 4.316639986376945e-06, + "loss": 0.30063217878341675, + "memory(GiB)": 66.66, + "step": 2616, + "token_acc": 0.9131164742917104, + "train_speed(iter/s)": 0.241901 + }, + { + "epoch": 0.83744, + "grad_norm": 0.6093793050754798, + "learning_rate": 4.316034023691623e-06, + "loss": 0.3018786311149597, + "memory(GiB)": 66.66, + "step": 2617, + "token_acc": 0.9040404040404041, + "train_speed(iter/s)": 0.241905 + }, + { + "epoch": 0.83776, + "grad_norm": 0.6141899065982225, + "learning_rate": 4.31542783503237e-06, + "loss": 0.33316293358802795, + "memory(GiB)": 66.66, + "step": 2618, + "token_acc": 0.9066232356134636, + "train_speed(iter/s)": 0.241906 + }, + { + "epoch": 0.83808, + "grad_norm": 0.6107732431090777, + "learning_rate": 4.314821420474616e-06, + "loss": 0.28905636072158813, + "memory(GiB)": 66.66, + "step": 2619, + "token_acc": 0.9510888968225634, + "train_speed(iter/s)": 0.241908 + }, + { + "epoch": 0.8384, + "grad_norm": 0.577441912579924, + "learning_rate": 4.314214780093819e-06, + "loss": 0.28335070610046387, + "memory(GiB)": 66.66, + "step": 2620, + "token_acc": 0.9167180752621839, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.83872, + "grad_norm": 0.6267660335139318, + "learning_rate": 4.313607913965465e-06, + "loss": 0.3296903371810913, + "memory(GiB)": 66.66, + "step": 2621, + "token_acc": 0.9155184916606236, + "train_speed(iter/s)": 0.241912 + }, + { + "epoch": 0.83904, + "grad_norm": 0.6738861047398864, + "learning_rate": 4.313000822165067e-06, + "loss": 0.4897744655609131, + "memory(GiB)": 66.66, + "step": 2622, + "token_acc": 0.8867155664221679, + "train_speed(iter/s)": 0.241911 + }, + { + "epoch": 0.83936, + "grad_norm": 0.6351337454546649, + "learning_rate": 4.312393504768167e-06, + "loss": 0.3748997449874878, + "memory(GiB)": 66.66, + "step": 2623, + "token_acc": 0.8464411703672962, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.83968, + "grad_norm": 0.5788235150345312, + "learning_rate": 4.3117859618503365e-06, + "loss": 0.42125892639160156, + "memory(GiB)": 66.66, + "step": 2624, + "token_acc": 0.7958758591960008, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.84, + "grad_norm": 0.6321455224851676, + "learning_rate": 4.311178193487173e-06, + "loss": 0.36763590574264526, + "memory(GiB)": 66.66, + "step": 2625, + "token_acc": 0.8754171301446051, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.84032, + "grad_norm": 0.5995364951112382, + "learning_rate": 4.310570199754302e-06, + "loss": 0.3292451500892639, + "memory(GiB)": 66.66, + "step": 2626, + "token_acc": 0.90646492434663, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.84064, + "grad_norm": 0.5949206826647869, + "learning_rate": 4.3099619807273785e-06, + "loss": 0.36967018246650696, + "memory(GiB)": 66.66, + "step": 2627, + "token_acc": 0.8587524045067326, + "train_speed(iter/s)": 0.241912 + }, + { + "epoch": 0.84096, + "grad_norm": 0.6945944253926968, + "learning_rate": 4.309353536482085e-06, + "loss": 0.4345400333404541, + "memory(GiB)": 66.66, + "step": 2628, + "token_acc": 0.8133817009270455, + "train_speed(iter/s)": 0.241913 + }, + { + "epoch": 0.84128, + "grad_norm": 0.6519830603578652, + "learning_rate": 4.30874486709413e-06, + "loss": 0.4034682512283325, + "memory(GiB)": 66.66, + "step": 2629, + "token_acc": 0.8469719350073855, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.8416, + "grad_norm": 0.6308248476614589, + "learning_rate": 4.308135972639255e-06, + "loss": 0.36140817403793335, + "memory(GiB)": 66.66, + "step": 2630, + "token_acc": 0.8227880658436214, + "train_speed(iter/s)": 0.241912 + }, + { + "epoch": 0.84192, + "grad_norm": 0.6354580854624036, + "learning_rate": 4.307526853193224e-06, + "loss": 0.3653317987918854, + "memory(GiB)": 66.66, + "step": 2631, + "token_acc": 0.9107142857142857, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.84224, + "grad_norm": 0.6441828634337772, + "learning_rate": 4.306917508831832e-06, + "loss": 0.390286386013031, + "memory(GiB)": 66.66, + "step": 2632, + "token_acc": 0.9247176913425345, + "train_speed(iter/s)": 0.241904 + }, + { + "epoch": 0.84256, + "grad_norm": 0.6006882096585077, + "learning_rate": 4.306307939630901e-06, + "loss": 0.445855975151062, + "memory(GiB)": 66.66, + "step": 2633, + "token_acc": 0.8463667820069204, + "train_speed(iter/s)": 0.241904 + }, + { + "epoch": 0.84288, + "grad_norm": 0.6274681725646795, + "learning_rate": 4.3056981456662825e-06, + "loss": 0.3528636395931244, + "memory(GiB)": 66.66, + "step": 2634, + "token_acc": 0.855249916135525, + "train_speed(iter/s)": 0.241903 + }, + { + "epoch": 0.8432, + "grad_norm": 0.7043214608521889, + "learning_rate": 4.3050881270138535e-06, + "loss": 0.3944837749004364, + "memory(GiB)": 66.66, + "step": 2635, + "token_acc": 0.8380476982806434, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.84352, + "grad_norm": 0.6643895016798901, + "learning_rate": 4.304477883749521e-06, + "loss": 0.4135827422142029, + "memory(GiB)": 66.66, + "step": 2636, + "token_acc": 0.8745334481768591, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.84384, + "grad_norm": 0.5511167978405869, + "learning_rate": 4.303867415949219e-06, + "loss": 0.30548858642578125, + "memory(GiB)": 66.66, + "step": 2637, + "token_acc": 0.9441298603246508, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.84416, + "grad_norm": 0.629397914358916, + "learning_rate": 4.303256723688909e-06, + "loss": 0.4032500386238098, + "memory(GiB)": 66.66, + "step": 2638, + "token_acc": 0.8575676479333928, + "train_speed(iter/s)": 0.241905 + }, + { + "epoch": 0.84448, + "grad_norm": 0.6626357163572345, + "learning_rate": 4.302645807044582e-06, + "loss": 0.33329689502716064, + "memory(GiB)": 66.66, + "step": 2639, + "token_acc": 0.9223263075722092, + "train_speed(iter/s)": 0.24191 + }, + { + "epoch": 0.8448, + "grad_norm": 0.557378160172938, + "learning_rate": 4.302034666092255e-06, + "loss": 0.22703176736831665, + "memory(GiB)": 66.66, + "step": 2640, + "token_acc": 0.9254623044096728, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.84512, + "grad_norm": 0.6557011808191883, + "learning_rate": 4.301423300907975e-06, + "loss": 0.42500537633895874, + "memory(GiB)": 66.66, + "step": 2641, + "token_acc": 0.8384976525821596, + "train_speed(iter/s)": 0.241917 + }, + { + "epoch": 0.84544, + "grad_norm": 0.5919103462296917, + "learning_rate": 4.300811711567815e-06, + "loss": 0.39680230617523193, + "memory(GiB)": 66.66, + "step": 2642, + "token_acc": 0.9241462677546086, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.84576, + "grad_norm": 0.6898554517365441, + "learning_rate": 4.300199898147877e-06, + "loss": 0.3859935402870178, + "memory(GiB)": 66.66, + "step": 2643, + "token_acc": 0.8812949640287769, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.84608, + "grad_norm": 0.5865797057421384, + "learning_rate": 4.29958786072429e-06, + "loss": 0.28775399923324585, + "memory(GiB)": 66.66, + "step": 2644, + "token_acc": 0.9513242662848962, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.8464, + "grad_norm": 0.6655986454171817, + "learning_rate": 4.298975599373212e-06, + "loss": 0.34131479263305664, + "memory(GiB)": 66.66, + "step": 2645, + "token_acc": 0.9035676439420699, + "train_speed(iter/s)": 0.241915 + }, + { + "epoch": 0.84672, + "grad_norm": 0.6798847544088599, + "learning_rate": 4.298363114170828e-06, + "loss": 0.48895055055618286, + "memory(GiB)": 66.66, + "step": 2646, + "token_acc": 0.8768208535650396, + "train_speed(iter/s)": 0.241917 + }, + { + "epoch": 0.84704, + "grad_norm": 0.5902911638521231, + "learning_rate": 4.297750405193352e-06, + "loss": 0.41538989543914795, + "memory(GiB)": 66.66, + "step": 2647, + "token_acc": 0.938884331419196, + "train_speed(iter/s)": 0.241916 + }, + { + "epoch": 0.84736, + "grad_norm": 0.631765578611971, + "learning_rate": 4.297137472517024e-06, + "loss": 0.4133908152580261, + "memory(GiB)": 66.66, + "step": 2648, + "token_acc": 0.8671611598111936, + "train_speed(iter/s)": 0.241907 + }, + { + "epoch": 0.84768, + "grad_norm": 0.623055428143679, + "learning_rate": 4.296524316218114e-06, + "loss": 0.41531556844711304, + "memory(GiB)": 66.66, + "step": 2649, + "token_acc": 0.8190310666956333, + "train_speed(iter/s)": 0.241908 + }, + { + "epoch": 0.848, + "grad_norm": 0.6263543860631572, + "learning_rate": 4.295910936372917e-06, + "loss": 0.4477734863758087, + "memory(GiB)": 66.66, + "step": 2650, + "token_acc": 0.9188826215417674, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.84832, + "grad_norm": 0.6461642760500399, + "learning_rate": 4.29529733305776e-06, + "loss": 0.35652798414230347, + "memory(GiB)": 66.66, + "step": 2651, + "token_acc": 0.9133185700727617, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.84864, + "grad_norm": 0.5686120168007497, + "learning_rate": 4.294683506348992e-06, + "loss": 0.3393004536628723, + "memory(GiB)": 66.66, + "step": 2652, + "token_acc": 0.8185177135405347, + "train_speed(iter/s)": 0.241909 + }, + { + "epoch": 0.84896, + "grad_norm": 0.6457352853220938, + "learning_rate": 4.294069456322994e-06, + "loss": 0.40521958470344543, + "memory(GiB)": 66.66, + "step": 2653, + "token_acc": 0.8384919711426577, + "train_speed(iter/s)": 0.24191 + }, + { + "epoch": 0.84928, + "grad_norm": 0.6692294491675554, + "learning_rate": 4.293455183056176e-06, + "loss": 0.3810324966907501, + "memory(GiB)": 66.66, + "step": 2654, + "token_acc": 0.9085014409221902, + "train_speed(iter/s)": 0.241911 + }, + { + "epoch": 0.8496, + "grad_norm": 0.6154358961072727, + "learning_rate": 4.2928406866249725e-06, + "loss": 0.33151835203170776, + "memory(GiB)": 66.66, + "step": 2655, + "token_acc": 0.9098049151254117, + "train_speed(iter/s)": 0.241914 + }, + { + "epoch": 0.84992, + "grad_norm": 0.6079726762457529, + "learning_rate": 4.292225967105846e-06, + "loss": 0.318006694316864, + "memory(GiB)": 66.66, + "step": 2656, + "token_acc": 0.8694021101992966, + "train_speed(iter/s)": 0.241917 + }, + { + "epoch": 0.85024, + "grad_norm": 0.5923331822190598, + "learning_rate": 4.2916110245752886e-06, + "loss": 0.3549345135688782, + "memory(GiB)": 66.66, + "step": 2657, + "token_acc": 0.9334840167904424, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.85056, + "grad_norm": 0.6999052416830922, + "learning_rate": 4.290995859109819e-06, + "loss": 0.46636080741882324, + "memory(GiB)": 66.66, + "step": 2658, + "token_acc": 0.8566151004386977, + "train_speed(iter/s)": 0.241918 + }, + { + "epoch": 0.85088, + "grad_norm": 0.5751057893321181, + "learning_rate": 4.290380470785984e-06, + "loss": 0.32378697395324707, + "memory(GiB)": 66.66, + "step": 2659, + "token_acc": 0.9380300065231572, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.8512, + "grad_norm": 0.6604097622439502, + "learning_rate": 4.289764859680358e-06, + "loss": 0.4226677715778351, + "memory(GiB)": 66.66, + "step": 2660, + "token_acc": 0.8358855822016759, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.85152, + "grad_norm": 0.6629670000723643, + "learning_rate": 4.2891490258695424e-06, + "loss": 0.5170182585716248, + "memory(GiB)": 66.66, + "step": 2661, + "token_acc": 0.8069366965801601, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.85184, + "grad_norm": 0.7074547904311246, + "learning_rate": 4.2885329694301675e-06, + "loss": 0.451351523399353, + "memory(GiB)": 66.66, + "step": 2662, + "token_acc": 0.9390862944162437, + "train_speed(iter/s)": 0.241923 + }, + { + "epoch": 0.85216, + "grad_norm": 0.6208592552059723, + "learning_rate": 4.287916690438891e-06, + "loss": 0.4204781949520111, + "memory(GiB)": 66.66, + "step": 2663, + "token_acc": 0.8754208754208754, + "train_speed(iter/s)": 0.24192 + }, + { + "epoch": 0.85248, + "grad_norm": 0.6635202253909964, + "learning_rate": 4.287300188972399e-06, + "loss": 0.405011922121048, + "memory(GiB)": 66.66, + "step": 2664, + "token_acc": 0.8242117787031529, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.8528, + "grad_norm": 0.5771617371457657, + "learning_rate": 4.286683465107403e-06, + "loss": 0.3581511974334717, + "memory(GiB)": 66.66, + "step": 2665, + "token_acc": 0.8900445765230312, + "train_speed(iter/s)": 0.241922 + }, + { + "epoch": 0.85312, + "grad_norm": 0.6021182911124487, + "learning_rate": 4.286066518920644e-06, + "loss": 0.1994592547416687, + "memory(GiB)": 66.66, + "step": 2666, + "token_acc": 0.9515852613538989, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.85344, + "grad_norm": 0.6111713443679595, + "learning_rate": 4.285449350488891e-06, + "loss": 0.4362776279449463, + "memory(GiB)": 66.66, + "step": 2667, + "token_acc": 0.9003446295232624, + "train_speed(iter/s)": 0.241925 + }, + { + "epoch": 0.85376, + "grad_norm": 0.6239504113865674, + "learning_rate": 4.284831959888938e-06, + "loss": 0.3832011818885803, + "memory(GiB)": 66.66, + "step": 2668, + "token_acc": 0.8366740905716407, + "train_speed(iter/s)": 0.24193 + }, + { + "epoch": 0.85408, + "grad_norm": 0.6592534567449533, + "learning_rate": 4.28421434719761e-06, + "loss": 0.40083926916122437, + "memory(GiB)": 66.66, + "step": 2669, + "token_acc": 0.8731859790131726, + "train_speed(iter/s)": 0.241934 + }, + { + "epoch": 0.8544, + "grad_norm": 0.6656730568513511, + "learning_rate": 4.2835965124917585e-06, + "loss": 0.3303273022174835, + "memory(GiB)": 66.66, + "step": 2670, + "token_acc": 0.9305699481865285, + "train_speed(iter/s)": 0.241935 + }, + { + "epoch": 0.85472, + "grad_norm": 0.6302564253161044, + "learning_rate": 4.282978455848262e-06, + "loss": 0.3613819479942322, + "memory(GiB)": 66.66, + "step": 2671, + "token_acc": 0.8490932642487047, + "train_speed(iter/s)": 0.241939 + }, + { + "epoch": 0.85504, + "grad_norm": 0.6869848524567415, + "learning_rate": 4.282360177344026e-06, + "loss": 0.37360453605651855, + "memory(GiB)": 66.66, + "step": 2672, + "token_acc": 0.9153976311336718, + "train_speed(iter/s)": 0.241942 + }, + { + "epoch": 0.85536, + "grad_norm": 0.602158044819893, + "learning_rate": 4.281741677055986e-06, + "loss": 0.4023306369781494, + "memory(GiB)": 66.66, + "step": 2673, + "token_acc": 0.911796420099202, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.85568, + "grad_norm": 0.6227041722052477, + "learning_rate": 4.281122955061102e-06, + "loss": 0.36169782280921936, + "memory(GiB)": 66.66, + "step": 2674, + "token_acc": 0.9036711641041733, + "train_speed(iter/s)": 0.241949 + }, + { + "epoch": 0.856, + "grad_norm": 0.7000916543946196, + "learning_rate": 4.280504011436365e-06, + "loss": 0.3920516073703766, + "memory(GiB)": 66.66, + "step": 2675, + "token_acc": 0.8940623665100385, + "train_speed(iter/s)": 0.241954 + }, + { + "epoch": 0.85632, + "grad_norm": 0.6305893509261379, + "learning_rate": 4.279884846258791e-06, + "loss": 0.37687334418296814, + "memory(GiB)": 66.66, + "step": 2676, + "token_acc": 0.8811239193083573, + "train_speed(iter/s)": 0.241955 + }, + { + "epoch": 0.85664, + "grad_norm": 0.627782821070806, + "learning_rate": 4.279265459605424e-06, + "loss": 0.34372395277023315, + "memory(GiB)": 66.66, + "step": 2677, + "token_acc": 0.8830520560969812, + "train_speed(iter/s)": 0.241958 + }, + { + "epoch": 0.85696, + "grad_norm": 0.5812140077071948, + "learning_rate": 4.278645851553336e-06, + "loss": 0.341675728559494, + "memory(GiB)": 66.66, + "step": 2678, + "token_acc": 0.8114161849710982, + "train_speed(iter/s)": 0.24196 + }, + { + "epoch": 0.85728, + "grad_norm": 0.5834619537044832, + "learning_rate": 4.278026022179627e-06, + "loss": 0.389030784368515, + "memory(GiB)": 66.66, + "step": 2679, + "token_acc": 0.9291170180262756, + "train_speed(iter/s)": 0.241961 + }, + { + "epoch": 0.8576, + "grad_norm": 0.6550241252801293, + "learning_rate": 4.277405971561423e-06, + "loss": 0.35168561339378357, + "memory(GiB)": 66.66, + "step": 2680, + "token_acc": 0.872557003257329, + "train_speed(iter/s)": 0.241966 + }, + { + "epoch": 0.85792, + "grad_norm": 0.6158129508272479, + "learning_rate": 4.27678569977588e-06, + "loss": 0.44698935747146606, + "memory(GiB)": 66.66, + "step": 2681, + "token_acc": 0.8473282442748091, + "train_speed(iter/s)": 0.241965 + }, + { + "epoch": 0.85824, + "grad_norm": 0.6327133197404478, + "learning_rate": 4.276165206900178e-06, + "loss": 0.42184072732925415, + "memory(GiB)": 66.66, + "step": 2682, + "token_acc": 0.8920454545454546, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.85856, + "grad_norm": 0.6333309623763489, + "learning_rate": 4.27554449301153e-06, + "loss": 0.5103187561035156, + "memory(GiB)": 66.66, + "step": 2683, + "token_acc": 0.821689259645464, + "train_speed(iter/s)": 0.241958 + }, + { + "epoch": 0.85888, + "grad_norm": 0.5750146431303439, + "learning_rate": 4.274923558187169e-06, + "loss": 0.3415602445602417, + "memory(GiB)": 66.66, + "step": 2684, + "token_acc": 0.8685104318292092, + "train_speed(iter/s)": 0.241956 + }, + { + "epoch": 0.8592, + "grad_norm": 0.6004059483383978, + "learning_rate": 4.274302402504362e-06, + "loss": 0.3801884055137634, + "memory(GiB)": 66.66, + "step": 2685, + "token_acc": 0.898375103277334, + "train_speed(iter/s)": 0.241954 + }, + { + "epoch": 0.85952, + "grad_norm": 0.6484613290610767, + "learning_rate": 4.273681026040401e-06, + "loss": 0.33811724185943604, + "memory(GiB)": 66.66, + "step": 2686, + "token_acc": 0.9590924716397388, + "train_speed(iter/s)": 0.241955 + }, + { + "epoch": 0.85984, + "grad_norm": 0.6322400133266121, + "learning_rate": 4.273059428872605e-06, + "loss": 0.4019932150840759, + "memory(GiB)": 66.66, + "step": 2687, + "token_acc": 0.860876558102131, + "train_speed(iter/s)": 0.241958 + }, + { + "epoch": 0.86016, + "grad_norm": 0.6552391885234631, + "learning_rate": 4.27243761107832e-06, + "loss": 0.3164243698120117, + "memory(GiB)": 66.66, + "step": 2688, + "token_acc": 0.8862629246676514, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.86048, + "grad_norm": 0.6412067170996973, + "learning_rate": 4.271815572734923e-06, + "loss": 0.34415045380592346, + "memory(GiB)": 66.66, + "step": 2689, + "token_acc": 0.865979381443299, + "train_speed(iter/s)": 0.241965 + }, + { + "epoch": 0.8608, + "grad_norm": 0.6574235995355644, + "learning_rate": 4.271193313919814e-06, + "loss": 0.4226762652397156, + "memory(GiB)": 66.66, + "step": 2690, + "token_acc": 0.8686818632309217, + "train_speed(iter/s)": 0.24197 + }, + { + "epoch": 0.86112, + "grad_norm": 0.5852152252013813, + "learning_rate": 4.270570834710423e-06, + "loss": 0.28654566407203674, + "memory(GiB)": 66.66, + "step": 2691, + "token_acc": 0.8674932196822936, + "train_speed(iter/s)": 0.241969 + }, + { + "epoch": 0.86144, + "grad_norm": 0.6016068124467884, + "learning_rate": 4.269948135184205e-06, + "loss": 0.41184496879577637, + "memory(GiB)": 66.66, + "step": 2692, + "token_acc": 0.9278698588090853, + "train_speed(iter/s)": 0.241966 + }, + { + "epoch": 0.86176, + "grad_norm": 0.6181459590395347, + "learning_rate": 4.269325215418647e-06, + "loss": 0.4093039035797119, + "memory(GiB)": 66.66, + "step": 2693, + "token_acc": 0.9147496617050067, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.86208, + "grad_norm": 0.5894536960297289, + "learning_rate": 4.268702075491259e-06, + "loss": 0.37303587794303894, + "memory(GiB)": 66.66, + "step": 2694, + "token_acc": 0.8985324947589098, + "train_speed(iter/s)": 0.241957 + }, + { + "epoch": 0.8624, + "grad_norm": 0.6055188992345372, + "learning_rate": 4.26807871547958e-06, + "loss": 0.3592067062854767, + "memory(GiB)": 66.66, + "step": 2695, + "token_acc": 0.8338084378563284, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.86272, + "grad_norm": 0.5929986484285298, + "learning_rate": 4.267455135461176e-06, + "loss": 0.31712836027145386, + "memory(GiB)": 66.66, + "step": 2696, + "token_acc": 0.896402398401066, + "train_speed(iter/s)": 0.241952 + }, + { + "epoch": 0.86304, + "grad_norm": 0.6407763734495161, + "learning_rate": 4.266831335513641e-06, + "loss": 0.3340831398963928, + "memory(GiB)": 66.66, + "step": 2697, + "token_acc": 0.8666980687706076, + "train_speed(iter/s)": 0.241952 + }, + { + "epoch": 0.86336, + "grad_norm": 0.596326371729916, + "learning_rate": 4.266207315714596e-06, + "loss": 0.3483515977859497, + "memory(GiB)": 66.66, + "step": 2698, + "token_acc": 0.8703782405439864, + "train_speed(iter/s)": 0.241953 + }, + { + "epoch": 0.86368, + "grad_norm": 0.6360036503438792, + "learning_rate": 4.26558307614169e-06, + "loss": 0.4267374575138092, + "memory(GiB)": 66.66, + "step": 2699, + "token_acc": 0.9281984334203656, + "train_speed(iter/s)": 0.241956 + }, + { + "epoch": 0.864, + "grad_norm": 0.8117041799971234, + "learning_rate": 4.264958616872599e-06, + "loss": 0.4933997094631195, + "memory(GiB)": 66.66, + "step": 2700, + "token_acc": 0.7550923732828043, + "train_speed(iter/s)": 0.241958 + }, + { + "epoch": 0.86432, + "grad_norm": 0.6024249563542547, + "learning_rate": 4.264333937985026e-06, + "loss": 0.3833807110786438, + "memory(GiB)": 66.66, + "step": 2701, + "token_acc": 0.8349781960064264, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.86464, + "grad_norm": 0.603581395946252, + "learning_rate": 4.2637090395567e-06, + "loss": 0.3722117245197296, + "memory(GiB)": 66.66, + "step": 2702, + "token_acc": 0.9124564459930313, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.86496, + "grad_norm": 0.6775497505439603, + "learning_rate": 4.263083921665381e-06, + "loss": 0.4034203290939331, + "memory(GiB)": 66.66, + "step": 2703, + "token_acc": 0.8785046728971962, + "train_speed(iter/s)": 0.241959 + }, + { + "epoch": 0.86528, + "grad_norm": 0.680015321781127, + "learning_rate": 4.262458584388852e-06, + "loss": 0.3293622136116028, + "memory(GiB)": 66.66, + "step": 2704, + "token_acc": 0.9016018306636155, + "train_speed(iter/s)": 0.241959 + }, + { + "epoch": 0.8656, + "grad_norm": 0.7470332644464485, + "learning_rate": 4.261833027804926e-06, + "loss": 0.3542885184288025, + "memory(GiB)": 66.66, + "step": 2705, + "token_acc": 0.9253503960999391, + "train_speed(iter/s)": 0.241958 + }, + { + "epoch": 0.86592, + "grad_norm": 0.6553443991849813, + "learning_rate": 4.261207251991443e-06, + "loss": 0.4047054350376129, + "memory(GiB)": 66.66, + "step": 2706, + "token_acc": 0.9188732394366197, + "train_speed(iter/s)": 0.241959 + }, + { + "epoch": 0.86624, + "grad_norm": 0.6113425595286135, + "learning_rate": 4.26058125702627e-06, + "loss": 0.3857710361480713, + "memory(GiB)": 66.66, + "step": 2707, + "token_acc": 0.8890719384953323, + "train_speed(iter/s)": 0.241955 + }, + { + "epoch": 0.86656, + "grad_norm": 0.6430852826668572, + "learning_rate": 4.259955042987302e-06, + "loss": 0.38054466247558594, + "memory(GiB)": 66.66, + "step": 2708, + "token_acc": 0.856988082340195, + "train_speed(iter/s)": 0.24196 + }, + { + "epoch": 0.86688, + "grad_norm": 0.6130803970270883, + "learning_rate": 4.259328609952458e-06, + "loss": 0.36988842487335205, + "memory(GiB)": 66.66, + "step": 2709, + "token_acc": 0.9436828454983327, + "train_speed(iter/s)": 0.24196 + }, + { + "epoch": 0.8672, + "grad_norm": 0.6588127369444515, + "learning_rate": 4.258701957999689e-06, + "loss": 0.48945164680480957, + "memory(GiB)": 66.66, + "step": 2710, + "token_acc": 0.8214397008413836, + "train_speed(iter/s)": 0.24196 + }, + { + "epoch": 0.86752, + "grad_norm": 0.6421426061331666, + "learning_rate": 4.258075087206971e-06, + "loss": 0.30105406045913696, + "memory(GiB)": 66.66, + "step": 2711, + "token_acc": 0.928821099459803, + "train_speed(iter/s)": 0.241961 + }, + { + "epoch": 0.86784, + "grad_norm": 0.6633776063167705, + "learning_rate": 4.257447997652306e-06, + "loss": 0.3678281903266907, + "memory(GiB)": 66.66, + "step": 2712, + "token_acc": 0.8667110963012329, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.86816, + "grad_norm": 0.618141037649668, + "learning_rate": 4.2568206894137235e-06, + "loss": 0.330152690410614, + "memory(GiB)": 66.66, + "step": 2713, + "token_acc": 0.8758647194465795, + "train_speed(iter/s)": 0.241971 + }, + { + "epoch": 0.86848, + "grad_norm": 0.6459200780612091, + "learning_rate": 4.256193162569284e-06, + "loss": 0.35358691215515137, + "memory(GiB)": 66.66, + "step": 2714, + "token_acc": 0.9284994964753273, + "train_speed(iter/s)": 0.241973 + }, + { + "epoch": 0.8688, + "grad_norm": 0.6416472143202769, + "learning_rate": 4.2555654171970705e-06, + "loss": 0.38863033056259155, + "memory(GiB)": 66.66, + "step": 2715, + "token_acc": 0.8578720062819003, + "train_speed(iter/s)": 0.241976 + }, + { + "epoch": 0.86912, + "grad_norm": 0.6875590911573871, + "learning_rate": 4.254937453375195e-06, + "loss": 0.49018800258636475, + "memory(GiB)": 66.66, + "step": 2716, + "token_acc": 0.8961713173264114, + "train_speed(iter/s)": 0.241979 + }, + { + "epoch": 0.86944, + "grad_norm": 0.6574153451645752, + "learning_rate": 4.2543092711817965e-06, + "loss": 0.41659149527549744, + "memory(GiB)": 66.66, + "step": 2717, + "token_acc": 0.8883288096161303, + "train_speed(iter/s)": 0.24198 + }, + { + "epoch": 0.86976, + "grad_norm": 0.626790184674263, + "learning_rate": 4.253680870695043e-06, + "loss": 0.42807430028915405, + "memory(GiB)": 66.66, + "step": 2718, + "token_acc": 0.8637969804047543, + "train_speed(iter/s)": 0.24197 + }, + { + "epoch": 0.87008, + "grad_norm": 0.5843387137282307, + "learning_rate": 4.253052251993126e-06, + "loss": 0.2970905005931854, + "memory(GiB)": 66.66, + "step": 2719, + "token_acc": 0.8962912087912088, + "train_speed(iter/s)": 0.241971 + }, + { + "epoch": 0.8704, + "grad_norm": 0.6634761625244867, + "learning_rate": 4.2524234151542685e-06, + "loss": 0.44301682710647583, + "memory(GiB)": 66.66, + "step": 2720, + "token_acc": 0.8886442277445737, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.87072, + "grad_norm": 0.6023722090673516, + "learning_rate": 4.251794360256717e-06, + "loss": 0.3395492732524872, + "memory(GiB)": 66.66, + "step": 2721, + "token_acc": 0.9312596006144394, + "train_speed(iter/s)": 0.24197 + }, + { + "epoch": 0.87104, + "grad_norm": 0.6215763986987292, + "learning_rate": 4.251165087378745e-06, + "loss": 0.2806827425956726, + "memory(GiB)": 66.66, + "step": 2722, + "token_acc": 0.8686548223350253, + "train_speed(iter/s)": 0.241969 + }, + { + "epoch": 0.87136, + "grad_norm": 0.5925866094256642, + "learning_rate": 4.250535596598658e-06, + "loss": 0.30444610118865967, + "memory(GiB)": 66.66, + "step": 2723, + "token_acc": 0.9216886883660973, + "train_speed(iter/s)": 0.241974 + }, + { + "epoch": 0.87168, + "grad_norm": 0.6604563642385592, + "learning_rate": 4.2499058879947805e-06, + "loss": 0.4126582145690918, + "memory(GiB)": 66.66, + "step": 2724, + "token_acc": 0.7605633802816901, + "train_speed(iter/s)": 0.241978 + }, + { + "epoch": 0.872, + "grad_norm": 0.5947233946626903, + "learning_rate": 4.2492759616454735e-06, + "loss": 0.37414366006851196, + "memory(GiB)": 66.66, + "step": 2725, + "token_acc": 0.867650346523586, + "train_speed(iter/s)": 0.241978 + }, + { + "epoch": 0.87232, + "grad_norm": 0.6385019838291407, + "learning_rate": 4.2486458176291176e-06, + "loss": 0.44996678829193115, + "memory(GiB)": 66.66, + "step": 2726, + "token_acc": 0.8347146578261899, + "train_speed(iter/s)": 0.241976 + }, + { + "epoch": 0.87264, + "grad_norm": 0.7117124164235423, + "learning_rate": 4.248015456024125e-06, + "loss": 0.3658551871776581, + "memory(GiB)": 66.66, + "step": 2727, + "token_acc": 0.875, + "train_speed(iter/s)": 0.241981 + }, + { + "epoch": 0.87296, + "grad_norm": 0.6209083909645375, + "learning_rate": 4.247384876908932e-06, + "loss": 0.392792671918869, + "memory(GiB)": 66.66, + "step": 2728, + "token_acc": 0.8758965442295154, + "train_speed(iter/s)": 0.241979 + }, + { + "epoch": 0.87328, + "grad_norm": 0.6308198815509362, + "learning_rate": 4.246754080362004e-06, + "loss": 0.3949275016784668, + "memory(GiB)": 66.66, + "step": 2729, + "token_acc": 0.8745901639344262, + "train_speed(iter/s)": 0.241982 + }, + { + "epoch": 0.8736, + "grad_norm": 0.6255779679407834, + "learning_rate": 4.246123066461832e-06, + "loss": 0.2951090633869171, + "memory(GiB)": 66.66, + "step": 2730, + "token_acc": 0.9092331768388107, + "train_speed(iter/s)": 0.241978 + }, + { + "epoch": 0.87392, + "grad_norm": 0.637191982012142, + "learning_rate": 4.245491835286935e-06, + "loss": 0.36921730637550354, + "memory(GiB)": 66.66, + "step": 2731, + "token_acc": 0.873507747015494, + "train_speed(iter/s)": 0.241982 + }, + { + "epoch": 0.87424, + "grad_norm": 0.6414761564230752, + "learning_rate": 4.2448603869158585e-06, + "loss": 0.38242679834365845, + "memory(GiB)": 66.66, + "step": 2732, + "token_acc": 0.9058581706063721, + "train_speed(iter/s)": 0.241982 + }, + { + "epoch": 0.87456, + "grad_norm": 0.6324406127676048, + "learning_rate": 4.244228721427177e-06, + "loss": 0.47354042530059814, + "memory(GiB)": 66.66, + "step": 2733, + "token_acc": 0.9461252653927813, + "train_speed(iter/s)": 0.24198 + }, + { + "epoch": 0.87488, + "grad_norm": 0.6564154012037594, + "learning_rate": 4.243596838899488e-06, + "loss": 0.41902047395706177, + "memory(GiB)": 66.66, + "step": 2734, + "token_acc": 0.8962432915921288, + "train_speed(iter/s)": 0.241984 + }, + { + "epoch": 0.8752, + "grad_norm": 0.6089160928489797, + "learning_rate": 4.242964739411419e-06, + "loss": 0.40372684597969055, + "memory(GiB)": 66.66, + "step": 2735, + "token_acc": 0.8494323469197841, + "train_speed(iter/s)": 0.241986 + }, + { + "epoch": 0.87552, + "grad_norm": 0.6950245828458408, + "learning_rate": 4.242332423041625e-06, + "loss": 0.4424767792224884, + "memory(GiB)": 66.66, + "step": 2736, + "token_acc": 0.8406862745098039, + "train_speed(iter/s)": 0.241988 + }, + { + "epoch": 0.87584, + "grad_norm": 0.658718777635752, + "learning_rate": 4.241699889868786e-06, + "loss": 0.4773024022579193, + "memory(GiB)": 66.66, + "step": 2737, + "token_acc": 0.9075879610004239, + "train_speed(iter/s)": 0.24199 + }, + { + "epoch": 0.87616, + "grad_norm": 0.5578358083354564, + "learning_rate": 4.241067139971609e-06, + "loss": 0.44102734327316284, + "memory(GiB)": 66.66, + "step": 2738, + "token_acc": 0.8104743507190384, + "train_speed(iter/s)": 0.24199 + }, + { + "epoch": 0.87648, + "grad_norm": 0.5880308610845321, + "learning_rate": 4.240434173428829e-06, + "loss": 0.3997608721256256, + "memory(GiB)": 66.66, + "step": 2739, + "token_acc": 0.9479338842975207, + "train_speed(iter/s)": 0.241991 + }, + { + "epoch": 0.8768, + "grad_norm": 0.6468822957856663, + "learning_rate": 4.239800990319209e-06, + "loss": 0.47861623764038086, + "memory(GiB)": 66.66, + "step": 2740, + "token_acc": 0.8650577124868836, + "train_speed(iter/s)": 0.241993 + }, + { + "epoch": 0.87712, + "grad_norm": 0.6476090493535672, + "learning_rate": 4.239167590721536e-06, + "loss": 0.3533022999763489, + "memory(GiB)": 66.66, + "step": 2741, + "token_acc": 0.8925964546402503, + "train_speed(iter/s)": 0.241993 + }, + { + "epoch": 0.87744, + "grad_norm": 0.6119357291694715, + "learning_rate": 4.238533974714627e-06, + "loss": 0.4397846460342407, + "memory(GiB)": 66.66, + "step": 2742, + "token_acc": 0.9192666452235445, + "train_speed(iter/s)": 0.241993 + }, + { + "epoch": 0.87776, + "grad_norm": 0.5937888164814307, + "learning_rate": 4.237900142377324e-06, + "loss": 0.3211444914340973, + "memory(GiB)": 66.66, + "step": 2743, + "token_acc": 0.878727634194831, + "train_speed(iter/s)": 0.241991 + }, + { + "epoch": 0.87808, + "grad_norm": 0.6504459963529499, + "learning_rate": 4.237266093788496e-06, + "loss": 0.43034958839416504, + "memory(GiB)": 66.66, + "step": 2744, + "token_acc": 0.845123482628715, + "train_speed(iter/s)": 0.24199 + }, + { + "epoch": 0.8784, + "grad_norm": 0.6345366094555415, + "learning_rate": 4.23663182902704e-06, + "loss": 0.4151901602745056, + "memory(GiB)": 66.66, + "step": 2745, + "token_acc": 0.8778416187859106, + "train_speed(iter/s)": 0.24199 + }, + { + "epoch": 0.87872, + "grad_norm": 0.5802572642316275, + "learning_rate": 4.235997348171879e-06, + "loss": 0.33383482694625854, + "memory(GiB)": 66.66, + "step": 2746, + "token_acc": 0.8852889667250438, + "train_speed(iter/s)": 0.241988 + }, + { + "epoch": 0.87904, + "grad_norm": 0.5602054356842581, + "learning_rate": 4.2353626513019625e-06, + "loss": 0.33759188652038574, + "memory(GiB)": 66.66, + "step": 2747, + "token_acc": 0.9138283378746594, + "train_speed(iter/s)": 0.241988 + }, + { + "epoch": 0.87936, + "grad_norm": 0.6260921073894296, + "learning_rate": 4.234727738496268e-06, + "loss": 0.36013439297676086, + "memory(GiB)": 66.66, + "step": 2748, + "token_acc": 0.8730201342281879, + "train_speed(iter/s)": 0.241985 + }, + { + "epoch": 0.87968, + "grad_norm": 0.6163364980489192, + "learning_rate": 4.2340926098338e-06, + "loss": 0.45746955275535583, + "memory(GiB)": 66.66, + "step": 2749, + "token_acc": 0.8661591355599214, + "train_speed(iter/s)": 0.24198 + }, + { + "epoch": 0.88, + "grad_norm": 0.6088669792241002, + "learning_rate": 4.233457265393589e-06, + "loss": 0.3596654534339905, + "memory(GiB)": 66.66, + "step": 2750, + "token_acc": 0.8906945681211041, + "train_speed(iter/s)": 0.241978 + }, + { + "epoch": 0.88032, + "grad_norm": 0.5781982258972846, + "learning_rate": 4.232821705254692e-06, + "loss": 0.310103178024292, + "memory(GiB)": 66.66, + "step": 2751, + "token_acc": 0.9540816326530612, + "train_speed(iter/s)": 0.241979 + }, + { + "epoch": 0.88064, + "grad_norm": 0.6762469008874803, + "learning_rate": 4.232185929496193e-06, + "loss": 0.455264687538147, + "memory(GiB)": 66.66, + "step": 2752, + "token_acc": 0.8571151984511133, + "train_speed(iter/s)": 0.241981 + }, + { + "epoch": 0.88096, + "grad_norm": 0.5632647949958026, + "learning_rate": 4.231549938197205e-06, + "loss": 0.36701396107673645, + "memory(GiB)": 66.66, + "step": 2753, + "token_acc": 0.8954988154777573, + "train_speed(iter/s)": 0.241981 + }, + { + "epoch": 0.88128, + "grad_norm": 0.6258250924223145, + "learning_rate": 4.230913731436864e-06, + "loss": 0.4167162775993347, + "memory(GiB)": 66.66, + "step": 2754, + "token_acc": 0.8640462427745664, + "train_speed(iter/s)": 0.241979 + }, + { + "epoch": 0.8816, + "grad_norm": 0.5938967240287926, + "learning_rate": 4.230277309294337e-06, + "loss": 0.2884720265865326, + "memory(GiB)": 66.66, + "step": 2755, + "token_acc": 0.8911159263271939, + "train_speed(iter/s)": 0.24197 + }, + { + "epoch": 0.88192, + "grad_norm": 0.6305736019878028, + "learning_rate": 4.229640671848815e-06, + "loss": 0.40411436557769775, + "memory(GiB)": 66.66, + "step": 2756, + "token_acc": 0.9103491664045297, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.88224, + "grad_norm": 0.5927611535178307, + "learning_rate": 4.229003819179516e-06, + "loss": 0.4101211428642273, + "memory(GiB)": 66.66, + "step": 2757, + "token_acc": 0.958148893360161, + "train_speed(iter/s)": 0.241964 + }, + { + "epoch": 0.88256, + "grad_norm": 0.6759615514068517, + "learning_rate": 4.228366751365685e-06, + "loss": 0.4016323685646057, + "memory(GiB)": 66.66, + "step": 2758, + "token_acc": 0.8462897526501767, + "train_speed(iter/s)": 0.241966 + }, + { + "epoch": 0.88288, + "grad_norm": 0.6661660831580801, + "learning_rate": 4.227729468486594e-06, + "loss": 0.5390846133232117, + "memory(GiB)": 66.66, + "step": 2759, + "token_acc": 0.8109414266383144, + "train_speed(iter/s)": 0.241966 + }, + { + "epoch": 0.8832, + "grad_norm": 0.650550666861897, + "learning_rate": 4.227091970621543e-06, + "loss": 0.39194604754447937, + "memory(GiB)": 66.66, + "step": 2760, + "token_acc": 0.9495705181490718, + "train_speed(iter/s)": 0.241969 + }, + { + "epoch": 0.88352, + "grad_norm": 0.586991804620682, + "learning_rate": 4.226454257849857e-06, + "loss": 0.34203973412513733, + "memory(GiB)": 66.66, + "step": 2761, + "token_acc": 0.943010752688172, + "train_speed(iter/s)": 0.241966 + }, + { + "epoch": 0.88384, + "grad_norm": 0.6168841926890587, + "learning_rate": 4.225816330250887e-06, + "loss": 0.40939778089523315, + "memory(GiB)": 66.66, + "step": 2762, + "token_acc": 0.8806896551724138, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.88416, + "grad_norm": 0.6051252328275581, + "learning_rate": 4.225178187904014e-06, + "loss": 0.3494575023651123, + "memory(GiB)": 66.66, + "step": 2763, + "token_acc": 0.9078862314156432, + "train_speed(iter/s)": 0.241971 + }, + { + "epoch": 0.88448, + "grad_norm": 0.6746539531492346, + "learning_rate": 4.224539830888643e-06, + "loss": 0.3644985556602478, + "memory(GiB)": 66.66, + "step": 2764, + "token_acc": 0.7675832127351664, + "train_speed(iter/s)": 0.241973 + }, + { + "epoch": 0.8848, + "grad_norm": 0.6308150802974518, + "learning_rate": 4.223901259284206e-06, + "loss": 0.3826584219932556, + "memory(GiB)": 66.66, + "step": 2765, + "token_acc": 0.9064131245339299, + "train_speed(iter/s)": 0.241966 + }, + { + "epoch": 0.88512, + "grad_norm": 0.626115988399976, + "learning_rate": 4.223262473170162e-06, + "loss": 0.37937480211257935, + "memory(GiB)": 66.66, + "step": 2766, + "token_acc": 0.9095406360424029, + "train_speed(iter/s)": 0.241968 + }, + { + "epoch": 0.88544, + "grad_norm": 0.5866325118033842, + "learning_rate": 4.2226234726259985e-06, + "loss": 0.33188965916633606, + "memory(GiB)": 66.66, + "step": 2767, + "token_acc": 0.8841492971400873, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.88576, + "grad_norm": 0.6177459554221354, + "learning_rate": 4.221984257731226e-06, + "loss": 0.4752567410469055, + "memory(GiB)": 66.66, + "step": 2768, + "token_acc": 0.8330510525042342, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.88608, + "grad_norm": 0.6209895907344971, + "learning_rate": 4.2213448285653845e-06, + "loss": 0.408283531665802, + "memory(GiB)": 66.66, + "step": 2769, + "token_acc": 0.9082115219260533, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.8864, + "grad_norm": 0.6647297529293297, + "learning_rate": 4.22070518520804e-06, + "loss": 0.36840173602104187, + "memory(GiB)": 66.66, + "step": 2770, + "token_acc": 0.9086803813346713, + "train_speed(iter/s)": 0.241965 + }, + { + "epoch": 0.88672, + "grad_norm": 0.6366485524407124, + "learning_rate": 4.220065327738786e-06, + "loss": 0.43449944257736206, + "memory(GiB)": 66.66, + "step": 2771, + "token_acc": 0.8505315822388994, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.88704, + "grad_norm": 0.6334660374308607, + "learning_rate": 4.21942525623724e-06, + "loss": 0.3764113783836365, + "memory(GiB)": 66.66, + "step": 2772, + "token_acc": 0.837253829321663, + "train_speed(iter/s)": 0.241971 + }, + { + "epoch": 0.88736, + "grad_norm": 0.6873112317488659, + "learning_rate": 4.2187849707830486e-06, + "loss": 0.3912735879421234, + "memory(GiB)": 66.66, + "step": 2773, + "token_acc": 0.8651452282157677, + "train_speed(iter/s)": 0.241976 + }, + { + "epoch": 0.88768, + "grad_norm": 0.6550833749721948, + "learning_rate": 4.218144471455884e-06, + "loss": 0.3915499448776245, + "memory(GiB)": 66.66, + "step": 2774, + "token_acc": 0.9157560803665844, + "train_speed(iter/s)": 0.241981 + }, + { + "epoch": 0.888, + "grad_norm": 0.621888515801699, + "learning_rate": 4.217503758335445e-06, + "loss": 0.2976590692996979, + "memory(GiB)": 66.66, + "step": 2775, + "token_acc": 0.9615055603079555, + "train_speed(iter/s)": 0.241979 + }, + { + "epoch": 0.88832, + "grad_norm": 0.6690453479492067, + "learning_rate": 4.216862831501457e-06, + "loss": 0.4207008183002472, + "memory(GiB)": 66.66, + "step": 2776, + "token_acc": 0.9107891727030119, + "train_speed(iter/s)": 0.241984 + }, + { + "epoch": 0.88864, + "grad_norm": 0.5552572060555523, + "learning_rate": 4.216221691033674e-06, + "loss": 0.3537760078907013, + "memory(GiB)": 66.66, + "step": 2777, + "token_acc": 0.9498159919705588, + "train_speed(iter/s)": 0.241981 + }, + { + "epoch": 0.88896, + "grad_norm": 0.6266773289812866, + "learning_rate": 4.215580337011873e-06, + "loss": 0.3407539427280426, + "memory(GiB)": 66.66, + "step": 2778, + "token_acc": 0.9192504258943782, + "train_speed(iter/s)": 0.241986 + }, + { + "epoch": 0.88928, + "grad_norm": 0.6675625730593217, + "learning_rate": 4.21493876951586e-06, + "loss": 0.3567861318588257, + "memory(GiB)": 66.66, + "step": 2779, + "token_acc": 0.8652368758002561, + "train_speed(iter/s)": 0.241985 + }, + { + "epoch": 0.8896, + "grad_norm": 0.5907564511826744, + "learning_rate": 4.214296988625466e-06, + "loss": 0.31456419825553894, + "memory(GiB)": 66.66, + "step": 2780, + "token_acc": 0.900592325521504, + "train_speed(iter/s)": 0.241989 + }, + { + "epoch": 0.88992, + "grad_norm": 0.5739220355305062, + "learning_rate": 4.213654994420551e-06, + "loss": 0.3639344871044159, + "memory(GiB)": 66.66, + "step": 2781, + "token_acc": 0.8853267570900123, + "train_speed(iter/s)": 0.241983 + }, + { + "epoch": 0.89024, + "grad_norm": 0.5699583663778225, + "learning_rate": 4.213012786981e-06, + "loss": 0.398431658744812, + "memory(GiB)": 66.66, + "step": 2782, + "token_acc": 0.8831289483463397, + "train_speed(iter/s)": 0.241987 + }, + { + "epoch": 0.89056, + "grad_norm": 0.608786312682516, + "learning_rate": 4.212370366386723e-06, + "loss": 0.3947691321372986, + "memory(GiB)": 66.66, + "step": 2783, + "token_acc": 0.8702724684831232, + "train_speed(iter/s)": 0.241989 + }, + { + "epoch": 0.89088, + "grad_norm": 0.6070311802799768, + "learning_rate": 4.21172773271766e-06, + "loss": 0.3561690151691437, + "memory(GiB)": 66.66, + "step": 2784, + "token_acc": 0.9479289940828403, + "train_speed(iter/s)": 0.241991 + }, + { + "epoch": 0.8912, + "grad_norm": 0.634859133621835, + "learning_rate": 4.211084886053774e-06, + "loss": 0.41397830843925476, + "memory(GiB)": 66.66, + "step": 2785, + "token_acc": 0.9292328042328042, + "train_speed(iter/s)": 0.241989 + }, + { + "epoch": 0.89152, + "grad_norm": 0.6177564599577703, + "learning_rate": 4.210441826475057e-06, + "loss": 0.40559321641921997, + "memory(GiB)": 66.66, + "step": 2786, + "token_acc": 0.9328635014836796, + "train_speed(iter/s)": 0.241992 + }, + { + "epoch": 0.89184, + "grad_norm": 0.6021253979318901, + "learning_rate": 4.209798554061527e-06, + "loss": 0.3435715436935425, + "memory(GiB)": 66.66, + "step": 2787, + "token_acc": 0.9246913580246914, + "train_speed(iter/s)": 0.241993 + }, + { + "epoch": 0.89216, + "grad_norm": 0.6212310701293403, + "learning_rate": 4.209155068893227e-06, + "loss": 0.3130069077014923, + "memory(GiB)": 66.66, + "step": 2788, + "token_acc": 0.8810693925692943, + "train_speed(iter/s)": 0.241993 + }, + { + "epoch": 0.89248, + "grad_norm": 0.6445455348530277, + "learning_rate": 4.208511371050228e-06, + "loss": 0.42668014764785767, + "memory(GiB)": 66.66, + "step": 2789, + "token_acc": 0.9387509042681457, + "train_speed(iter/s)": 0.241994 + }, + { + "epoch": 0.8928, + "grad_norm": 0.5659727313875089, + "learning_rate": 4.207867460612627e-06, + "loss": 0.39238250255584717, + "memory(GiB)": 66.66, + "step": 2790, + "token_acc": 0.8728179551122195, + "train_speed(iter/s)": 0.241994 + }, + { + "epoch": 0.89312, + "grad_norm": 0.6210605769865368, + "learning_rate": 4.207223337660548e-06, + "loss": 0.3839040994644165, + "memory(GiB)": 66.66, + "step": 2791, + "token_acc": 0.9176, + "train_speed(iter/s)": 0.241997 + }, + { + "epoch": 0.89344, + "grad_norm": 0.6274523406799791, + "learning_rate": 4.20657900227414e-06, + "loss": 0.325257807970047, + "memory(GiB)": 66.66, + "step": 2792, + "token_acc": 0.9426685198054204, + "train_speed(iter/s)": 0.241999 + }, + { + "epoch": 0.89376, + "grad_norm": 0.5915141994458549, + "learning_rate": 4.205934454533581e-06, + "loss": 0.3207491338253021, + "memory(GiB)": 66.66, + "step": 2793, + "token_acc": 0.916003293988471, + "train_speed(iter/s)": 0.24199 + }, + { + "epoch": 0.89408, + "grad_norm": 0.6206996783476884, + "learning_rate": 4.205289694519072e-06, + "loss": 0.27051830291748047, + "memory(GiB)": 66.66, + "step": 2794, + "token_acc": 0.9316338354577057, + "train_speed(iter/s)": 0.241992 + }, + { + "epoch": 0.8944, + "grad_norm": 0.628729070711734, + "learning_rate": 4.204644722310842e-06, + "loss": 0.3622612953186035, + "memory(GiB)": 66.66, + "step": 2795, + "token_acc": 0.8639369277721262, + "train_speed(iter/s)": 0.241992 + }, + { + "epoch": 0.89472, + "grad_norm": 0.5749264724054969, + "learning_rate": 4.203999537989148e-06, + "loss": 0.3448949456214905, + "memory(GiB)": 66.66, + "step": 2796, + "token_acc": 0.9394109396914446, + "train_speed(iter/s)": 0.241979 + }, + { + "epoch": 0.89504, + "grad_norm": 0.5550862853431716, + "learning_rate": 4.2033541416342725e-06, + "loss": 0.35123974084854126, + "memory(GiB)": 66.66, + "step": 2797, + "token_acc": 0.938973897389739, + "train_speed(iter/s)": 0.241969 + }, + { + "epoch": 0.89536, + "grad_norm": 0.6195688391234724, + "learning_rate": 4.202708533326522e-06, + "loss": 0.3467400074005127, + "memory(GiB)": 66.66, + "step": 2798, + "token_acc": 0.9214890016920474, + "train_speed(iter/s)": 0.24197 + }, + { + "epoch": 0.89568, + "grad_norm": 0.719467380122505, + "learning_rate": 4.202062713146232e-06, + "loss": 0.443705677986145, + "memory(GiB)": 66.66, + "step": 2799, + "token_acc": 0.8548644338118022, + "train_speed(iter/s)": 0.241964 + }, + { + "epoch": 0.896, + "grad_norm": 0.57203683181338, + "learning_rate": 4.2014166811737645e-06, + "loss": 0.36948347091674805, + "memory(GiB)": 66.66, + "step": 2800, + "token_acc": 0.8303145853193518, + "train_speed(iter/s)": 0.241965 + }, + { + "epoch": 0.89632, + "grad_norm": 0.6593274213141876, + "learning_rate": 4.200770437489505e-06, + "loss": 0.4335978925228119, + "memory(GiB)": 66.66, + "step": 2801, + "token_acc": 0.8903446311592139, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.89664, + "grad_norm": 0.742317330493897, + "learning_rate": 4.200123982173869e-06, + "loss": 0.4212910234928131, + "memory(GiB)": 66.66, + "step": 2802, + "token_acc": 0.9221508828250401, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.89696, + "grad_norm": 0.6573666954479488, + "learning_rate": 4.199477315307297e-06, + "loss": 0.3536713719367981, + "memory(GiB)": 66.66, + "step": 2803, + "token_acc": 0.9361963190184049, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.89728, + "grad_norm": 0.6427957677833065, + "learning_rate": 4.198830436970254e-06, + "loss": 0.4118232727050781, + "memory(GiB)": 66.66, + "step": 2804, + "token_acc": 0.9226554946825652, + "train_speed(iter/s)": 0.241969 + }, + { + "epoch": 0.8976, + "grad_norm": 0.6313966186033412, + "learning_rate": 4.198183347243233e-06, + "loss": 0.32043570280075073, + "memory(GiB)": 66.66, + "step": 2805, + "token_acc": 0.8631379164909321, + "train_speed(iter/s)": 0.241969 + }, + { + "epoch": 0.89792, + "grad_norm": 0.6299601243454398, + "learning_rate": 4.197536046206754e-06, + "loss": 0.3696286678314209, + "memory(GiB)": 66.66, + "step": 2806, + "token_acc": 0.9405805038335159, + "train_speed(iter/s)": 0.24197 + }, + { + "epoch": 0.89824, + "grad_norm": 0.5812948080282655, + "learning_rate": 4.196888533941362e-06, + "loss": 0.36351796984672546, + "memory(GiB)": 66.66, + "step": 2807, + "token_acc": 0.9520590043023971, + "train_speed(iter/s)": 0.241969 + }, + { + "epoch": 0.89856, + "grad_norm": 0.5819250572912579, + "learning_rate": 4.196240810527629e-06, + "loss": 0.3521096408367157, + "memory(GiB)": 66.66, + "step": 2808, + "token_acc": 0.9000290613193839, + "train_speed(iter/s)": 0.241964 + }, + { + "epoch": 0.89888, + "grad_norm": 0.6511820831760246, + "learning_rate": 4.1955928760461515e-06, + "loss": 0.41333672404289246, + "memory(GiB)": 66.66, + "step": 2809, + "token_acc": 0.8375617792421747, + "train_speed(iter/s)": 0.241969 + }, + { + "epoch": 0.8992, + "grad_norm": 0.6060524378381547, + "learning_rate": 4.194944730577555e-06, + "loss": 0.36598044633865356, + "memory(GiB)": 66.66, + "step": 2810, + "token_acc": 0.9207419898819561, + "train_speed(iter/s)": 0.241968 + }, + { + "epoch": 0.89952, + "grad_norm": 0.592866662655607, + "learning_rate": 4.1942963742024896e-06, + "loss": 0.4256974458694458, + "memory(GiB)": 66.66, + "step": 2811, + "token_acc": 0.900377191036166, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.89984, + "grad_norm": 0.666863227832339, + "learning_rate": 4.193647807001632e-06, + "loss": 0.3943021297454834, + "memory(GiB)": 66.66, + "step": 2812, + "token_acc": 0.8751363140676118, + "train_speed(iter/s)": 0.241972 + }, + { + "epoch": 0.90016, + "grad_norm": 0.6933707763336775, + "learning_rate": 4.192999029055686e-06, + "loss": 0.39822375774383545, + "memory(GiB)": 66.66, + "step": 2813, + "token_acc": 0.8742166517457476, + "train_speed(iter/s)": 0.241971 + }, + { + "epoch": 0.90048, + "grad_norm": 0.5793432702988545, + "learning_rate": 4.192350040445379e-06, + "loss": 0.2939651310443878, + "memory(GiB)": 66.66, + "step": 2814, + "token_acc": 0.925776397515528, + "train_speed(iter/s)": 0.241975 + }, + { + "epoch": 0.9008, + "grad_norm": 0.6521732902539502, + "learning_rate": 4.191700841251468e-06, + "loss": 0.3281785845756531, + "memory(GiB)": 66.66, + "step": 2815, + "token_acc": 0.8804247460757156, + "train_speed(iter/s)": 0.241977 + }, + { + "epoch": 0.90112, + "grad_norm": 0.6835822232175337, + "learning_rate": 4.191051431554734e-06, + "loss": 0.3268094062805176, + "memory(GiB)": 66.66, + "step": 2816, + "token_acc": 0.8825831702544031, + "train_speed(iter/s)": 0.24198 + }, + { + "epoch": 0.90144, + "grad_norm": 0.6473607480700502, + "learning_rate": 4.1904018114359836e-06, + "loss": 0.38597571849823, + "memory(GiB)": 66.66, + "step": 2817, + "token_acc": 0.8668402777777777, + "train_speed(iter/s)": 0.241981 + }, + { + "epoch": 0.90176, + "grad_norm": 0.6726956733237922, + "learning_rate": 4.189751980976053e-06, + "loss": 0.4159366488456726, + "memory(GiB)": 66.66, + "step": 2818, + "token_acc": 0.8438842738470352, + "train_speed(iter/s)": 0.241983 + }, + { + "epoch": 0.90208, + "grad_norm": 0.641955698789572, + "learning_rate": 4.189101940255801e-06, + "loss": 0.4194986820220947, + "memory(GiB)": 66.66, + "step": 2819, + "token_acc": 0.8631735783461634, + "train_speed(iter/s)": 0.241987 + }, + { + "epoch": 0.9024, + "grad_norm": 0.6561151729680409, + "learning_rate": 4.188451689356113e-06, + "loss": 0.40885573625564575, + "memory(GiB)": 66.66, + "step": 2820, + "token_acc": 0.8744033412887828, + "train_speed(iter/s)": 0.241988 + }, + { + "epoch": 0.90272, + "grad_norm": 0.6199446848790633, + "learning_rate": 4.187801228357904e-06, + "loss": 0.3528479039669037, + "memory(GiB)": 66.66, + "step": 2821, + "token_acc": 0.8934719064631373, + "train_speed(iter/s)": 0.241991 + }, + { + "epoch": 0.90304, + "grad_norm": 0.5908081127203654, + "learning_rate": 4.18715055734211e-06, + "loss": 0.3761516809463501, + "memory(GiB)": 66.66, + "step": 2822, + "token_acc": 0.8381062355658199, + "train_speed(iter/s)": 0.241993 + }, + { + "epoch": 0.90336, + "grad_norm": 0.6295530406380434, + "learning_rate": 4.186499676389698e-06, + "loss": 0.36932289600372314, + "memory(GiB)": 66.66, + "step": 2823, + "token_acc": 0.8856263319914752, + "train_speed(iter/s)": 0.241994 + }, + { + "epoch": 0.90368, + "grad_norm": 0.6131438251962031, + "learning_rate": 4.185848585581657e-06, + "loss": 0.36505433917045593, + "memory(GiB)": 66.66, + "step": 2824, + "token_acc": 0.8647316538882804, + "train_speed(iter/s)": 0.241995 + }, + { + "epoch": 0.904, + "grad_norm": 0.5865605984375885, + "learning_rate": 4.185197284999004e-06, + "loss": 0.38936227560043335, + "memory(GiB)": 66.66, + "step": 2825, + "token_acc": 0.8439059158945118, + "train_speed(iter/s)": 0.241988 + }, + { + "epoch": 0.90432, + "grad_norm": 0.6590853270732768, + "learning_rate": 4.184545774722784e-06, + "loss": 0.425952672958374, + "memory(GiB)": 66.66, + "step": 2826, + "token_acc": 0.8966292134831461, + "train_speed(iter/s)": 0.241988 + }, + { + "epoch": 0.90464, + "grad_norm": 0.6291129906189357, + "learning_rate": 4.183894054834064e-06, + "loss": 0.4082595407962799, + "memory(GiB)": 66.66, + "step": 2827, + "token_acc": 0.8945560253699789, + "train_speed(iter/s)": 0.241988 + }, + { + "epoch": 0.90496, + "grad_norm": 0.6204833540876868, + "learning_rate": 4.18324212541394e-06, + "loss": 0.3324819803237915, + "memory(GiB)": 66.66, + "step": 2828, + "token_acc": 0.9080980287693128, + "train_speed(iter/s)": 0.241986 + }, + { + "epoch": 0.90528, + "grad_norm": 0.6350989140105531, + "learning_rate": 4.182589986543534e-06, + "loss": 0.3367905616760254, + "memory(GiB)": 66.66, + "step": 2829, + "token_acc": 0.9102091020910209, + "train_speed(iter/s)": 0.241988 + }, + { + "epoch": 0.9056, + "grad_norm": 0.6102083859385622, + "learning_rate": 4.181937638303993e-06, + "loss": 0.3785122036933899, + "memory(GiB)": 66.66, + "step": 2830, + "token_acc": 0.825097678694553, + "train_speed(iter/s)": 0.241985 + }, + { + "epoch": 0.90592, + "grad_norm": 0.628925635249673, + "learning_rate": 4.18128508077649e-06, + "loss": 0.3501740097999573, + "memory(GiB)": 66.66, + "step": 2831, + "token_acc": 0.916875, + "train_speed(iter/s)": 0.241989 + }, + { + "epoch": 0.90624, + "grad_norm": 0.6576523716548736, + "learning_rate": 4.180632314042223e-06, + "loss": 0.31177395582199097, + "memory(GiB)": 66.66, + "step": 2832, + "token_acc": 0.8497854077253219, + "train_speed(iter/s)": 0.241989 + }, + { + "epoch": 0.90656, + "grad_norm": 0.6488841769625252, + "learning_rate": 4.17997933818242e-06, + "loss": 0.43167632818222046, + "memory(GiB)": 66.66, + "step": 2833, + "token_acc": 0.9255893212155638, + "train_speed(iter/s)": 0.241991 + }, + { + "epoch": 0.90688, + "grad_norm": 0.5982405093567339, + "learning_rate": 4.179326153278333e-06, + "loss": 0.37242236733436584, + "memory(GiB)": 66.66, + "step": 2834, + "token_acc": 0.9121522693997072, + "train_speed(iter/s)": 0.241989 + }, + { + "epoch": 0.9072, + "grad_norm": 0.5988061582258023, + "learning_rate": 4.1786727594112365e-06, + "loss": 0.36429208517074585, + "memory(GiB)": 66.66, + "step": 2835, + "token_acc": 0.8415330252786083, + "train_speed(iter/s)": 0.241988 + }, + { + "epoch": 0.90752, + "grad_norm": 0.6780958960532386, + "learning_rate": 4.178019156662436e-06, + "loss": 0.43688228726387024, + "memory(GiB)": 66.66, + "step": 2836, + "token_acc": 0.8199731303179579, + "train_speed(iter/s)": 0.241988 + }, + { + "epoch": 0.90784, + "grad_norm": 0.5994660648553743, + "learning_rate": 4.177365345113261e-06, + "loss": 0.30778980255126953, + "memory(GiB)": 66.66, + "step": 2837, + "token_acc": 0.9465346534653465, + "train_speed(iter/s)": 0.241989 + }, + { + "epoch": 0.90816, + "grad_norm": 0.6778090777574226, + "learning_rate": 4.176711324845067e-06, + "loss": 0.41336095333099365, + "memory(GiB)": 66.66, + "step": 2838, + "token_acc": 0.8898584905660377, + "train_speed(iter/s)": 0.241989 + }, + { + "epoch": 0.90848, + "grad_norm": 0.6122072827530655, + "learning_rate": 4.1760570959392355e-06, + "loss": 0.34675318002700806, + "memory(GiB)": 66.66, + "step": 2839, + "token_acc": 0.836150552174893, + "train_speed(iter/s)": 0.241989 + }, + { + "epoch": 0.9088, + "grad_norm": 0.6404102970501611, + "learning_rate": 4.175402658477173e-06, + "loss": 0.424371600151062, + "memory(GiB)": 66.66, + "step": 2840, + "token_acc": 0.9286898839137645, + "train_speed(iter/s)": 0.241989 + }, + { + "epoch": 0.90912, + "grad_norm": 0.6124826427507508, + "learning_rate": 4.174748012540314e-06, + "loss": 0.35368433594703674, + "memory(GiB)": 66.66, + "step": 2841, + "token_acc": 0.9333891914537076, + "train_speed(iter/s)": 0.241985 + }, + { + "epoch": 0.90944, + "grad_norm": 0.6301361383260203, + "learning_rate": 4.174093158210117e-06, + "loss": 0.4114921987056732, + "memory(GiB)": 66.66, + "step": 2842, + "token_acc": 0.8932542624166049, + "train_speed(iter/s)": 0.241983 + }, + { + "epoch": 0.90976, + "grad_norm": 0.6575808780661407, + "learning_rate": 4.173438095568067e-06, + "loss": 0.4560542702674866, + "memory(GiB)": 66.66, + "step": 2843, + "token_acc": 0.8347371478361894, + "train_speed(iter/s)": 0.241986 + }, + { + "epoch": 0.91008, + "grad_norm": 0.6955678970359704, + "learning_rate": 4.172782824695677e-06, + "loss": 0.47312480211257935, + "memory(GiB)": 66.66, + "step": 2844, + "token_acc": 0.8935617860851506, + "train_speed(iter/s)": 0.24199 + }, + { + "epoch": 0.9104, + "grad_norm": 0.6018277157943983, + "learning_rate": 4.172127345674483e-06, + "loss": 0.3461216390132904, + "memory(GiB)": 66.66, + "step": 2845, + "token_acc": 0.9037227214377407, + "train_speed(iter/s)": 0.241993 + }, + { + "epoch": 0.91072, + "grad_norm": 0.6632902706792271, + "learning_rate": 4.171471658586047e-06, + "loss": 0.42720332741737366, + "memory(GiB)": 66.66, + "step": 2846, + "token_acc": 0.9285932255111382, + "train_speed(iter/s)": 0.241991 + }, + { + "epoch": 0.91104, + "grad_norm": 0.6171200362401577, + "learning_rate": 4.17081576351196e-06, + "loss": 0.40089181065559387, + "memory(GiB)": 66.66, + "step": 2847, + "token_acc": 0.8148719531008948, + "train_speed(iter/s)": 0.241993 + }, + { + "epoch": 0.91136, + "grad_norm": 0.7483927819481633, + "learning_rate": 4.170159660533834e-06, + "loss": 0.48795169591903687, + "memory(GiB)": 66.66, + "step": 2848, + "token_acc": 0.9475457170356112, + "train_speed(iter/s)": 0.241997 + }, + { + "epoch": 0.91168, + "grad_norm": 0.6076240108508284, + "learning_rate": 4.169503349733312e-06, + "loss": 0.40277254581451416, + "memory(GiB)": 66.66, + "step": 2849, + "token_acc": 0.8763138138138138, + "train_speed(iter/s)": 0.241994 + }, + { + "epoch": 0.912, + "grad_norm": 0.618302317419277, + "learning_rate": 4.16884683119206e-06, + "loss": 0.31804656982421875, + "memory(GiB)": 66.66, + "step": 2850, + "token_acc": 0.8483088486504954, + "train_speed(iter/s)": 0.241991 + }, + { + "epoch": 0.91232, + "grad_norm": 0.6302689113705127, + "learning_rate": 4.1681901049917696e-06, + "loss": 0.3822234272956848, + "memory(GiB)": 66.66, + "step": 2851, + "token_acc": 0.8885793871866295, + "train_speed(iter/s)": 0.241993 + }, + { + "epoch": 0.91264, + "grad_norm": 0.6182257897173703, + "learning_rate": 4.167533171214158e-06, + "loss": 0.38994699716567993, + "memory(GiB)": 66.66, + "step": 2852, + "token_acc": 0.883854818523154, + "train_speed(iter/s)": 0.241996 + }, + { + "epoch": 0.91296, + "grad_norm": 0.6271663051800229, + "learning_rate": 4.166876029940972e-06, + "loss": 0.3747294545173645, + "memory(GiB)": 66.66, + "step": 2853, + "token_acc": 0.9480830670926518, + "train_speed(iter/s)": 0.241999 + }, + { + "epoch": 0.91328, + "grad_norm": 0.654009357880039, + "learning_rate": 4.1662186812539815e-06, + "loss": 0.3901631236076355, + "memory(GiB)": 66.66, + "step": 2854, + "token_acc": 0.9262472885032538, + "train_speed(iter/s)": 0.241999 + }, + { + "epoch": 0.9136, + "grad_norm": 0.5561578579629396, + "learning_rate": 4.1655611252349795e-06, + "loss": 0.312466025352478, + "memory(GiB)": 66.66, + "step": 2855, + "token_acc": 0.9206021860177356, + "train_speed(iter/s)": 0.242 + }, + { + "epoch": 0.91392, + "grad_norm": 0.5874391577057015, + "learning_rate": 4.164903361965787e-06, + "loss": 0.3104172646999359, + "memory(GiB)": 66.66, + "step": 2856, + "token_acc": 0.9516320474777448, + "train_speed(iter/s)": 0.241999 + }, + { + "epoch": 0.91424, + "grad_norm": 0.615571004210912, + "learning_rate": 4.1642453915282545e-06, + "loss": 0.3653981685638428, + "memory(GiB)": 66.66, + "step": 2857, + "token_acc": 0.9463848039215687, + "train_speed(iter/s)": 0.241999 + }, + { + "epoch": 0.91456, + "grad_norm": 0.6395493303988445, + "learning_rate": 4.1635872140042545e-06, + "loss": 0.3566439151763916, + "memory(GiB)": 66.66, + "step": 2858, + "token_acc": 0.8089103596349974, + "train_speed(iter/s)": 0.241997 + }, + { + "epoch": 0.91488, + "grad_norm": 0.649718902995997, + "learning_rate": 4.162928829475683e-06, + "loss": 0.3596912622451782, + "memory(GiB)": 66.66, + "step": 2859, + "token_acc": 0.8324682814302191, + "train_speed(iter/s)": 0.242 + }, + { + "epoch": 0.9152, + "grad_norm": 0.6120254312847329, + "learning_rate": 4.162270238024466e-06, + "loss": 0.38426291942596436, + "memory(GiB)": 66.66, + "step": 2860, + "token_acc": 0.9062730627306274, + "train_speed(iter/s)": 0.241998 + }, + { + "epoch": 0.91552, + "grad_norm": 0.6257545463866689, + "learning_rate": 4.1616114397325545e-06, + "loss": 0.3397254943847656, + "memory(GiB)": 66.66, + "step": 2861, + "token_acc": 0.8146283683742968, + "train_speed(iter/s)": 0.242 + }, + { + "epoch": 0.91584, + "grad_norm": 0.6578997542917135, + "learning_rate": 4.160952434681924e-06, + "loss": 0.39425593614578247, + "memory(GiB)": 66.66, + "step": 2862, + "token_acc": 0.9344854268764509, + "train_speed(iter/s)": 0.241997 + }, + { + "epoch": 0.91616, + "grad_norm": 0.6319739500967119, + "learning_rate": 4.160293222954576e-06, + "loss": 0.2772360146045685, + "memory(GiB)": 66.66, + "step": 2863, + "token_acc": 0.9295268516669994, + "train_speed(iter/s)": 0.241998 + }, + { + "epoch": 0.91648, + "grad_norm": 0.6087122481668373, + "learning_rate": 4.159633804632538e-06, + "loss": 0.3980293571949005, + "memory(GiB)": 66.66, + "step": 2864, + "token_acc": 0.9116455696202531, + "train_speed(iter/s)": 0.241998 + }, + { + "epoch": 0.9168, + "grad_norm": 0.6165025825805747, + "learning_rate": 4.158974179797864e-06, + "loss": 0.3978361189365387, + "memory(GiB)": 66.66, + "step": 2865, + "token_acc": 0.8578692493946731, + "train_speed(iter/s)": 0.24199 + }, + { + "epoch": 0.91712, + "grad_norm": 0.6299127806096525, + "learning_rate": 4.1583143485326325e-06, + "loss": 0.35704049468040466, + "memory(GiB)": 66.66, + "step": 2866, + "token_acc": 0.8955959347089621, + "train_speed(iter/s)": 0.241992 + }, + { + "epoch": 0.91744, + "grad_norm": 0.6153131233144756, + "learning_rate": 4.157654310918947e-06, + "loss": 0.39874839782714844, + "memory(GiB)": 66.66, + "step": 2867, + "token_acc": 0.8833664678595096, + "train_speed(iter/s)": 0.241983 + }, + { + "epoch": 0.91776, + "grad_norm": 0.6198940810089844, + "learning_rate": 4.156994067038939e-06, + "loss": 0.39577794075012207, + "memory(GiB)": 66.66, + "step": 2868, + "token_acc": 0.8676383691156194, + "train_speed(iter/s)": 0.241976 + }, + { + "epoch": 0.91808, + "grad_norm": 0.6092049692995067, + "learning_rate": 4.1563336169747624e-06, + "loss": 0.3929671049118042, + "memory(GiB)": 66.66, + "step": 2869, + "token_acc": 0.8844221105527639, + "train_speed(iter/s)": 0.241974 + }, + { + "epoch": 0.9184, + "grad_norm": 0.6400330572375402, + "learning_rate": 4.155672960808602e-06, + "loss": 0.4481660723686218, + "memory(GiB)": 66.66, + "step": 2870, + "token_acc": 0.8375307125307125, + "train_speed(iter/s)": 0.241971 + }, + { + "epoch": 0.91872, + "grad_norm": 0.6287185107266791, + "learning_rate": 4.155012098622663e-06, + "loss": 0.4335385859012604, + "memory(GiB)": 66.66, + "step": 2871, + "token_acc": 0.829104315766645, + "train_speed(iter/s)": 0.241972 + }, + { + "epoch": 0.91904, + "grad_norm": 0.6067587667657115, + "learning_rate": 4.154351030499178e-06, + "loss": 0.3867063820362091, + "memory(GiB)": 66.66, + "step": 2872, + "token_acc": 0.8857074109720885, + "train_speed(iter/s)": 0.24196 + }, + { + "epoch": 0.91936, + "grad_norm": 0.6419100089060076, + "learning_rate": 4.153689756520406e-06, + "loss": 0.3854118585586548, + "memory(GiB)": 66.66, + "step": 2873, + "token_acc": 0.8793342579750347, + "train_speed(iter/s)": 0.241959 + }, + { + "epoch": 0.91968, + "grad_norm": 0.5943225062360735, + "learning_rate": 4.153028276768631e-06, + "loss": 0.3353898525238037, + "memory(GiB)": 66.66, + "step": 2874, + "token_acc": 0.8718854592785422, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.92, + "grad_norm": 0.6233163775073138, + "learning_rate": 4.1523665913261635e-06, + "loss": 0.35426223278045654, + "memory(GiB)": 66.66, + "step": 2875, + "token_acc": 0.9021810971579644, + "train_speed(iter/s)": 0.241965 + }, + { + "epoch": 0.92032, + "grad_norm": 0.5742697716043673, + "learning_rate": 4.1517047002753375e-06, + "loss": 0.3041720390319824, + "memory(GiB)": 66.66, + "step": 2876, + "token_acc": 0.8981513777467737, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.92064, + "grad_norm": 0.5887225218453093, + "learning_rate": 4.1510426036985136e-06, + "loss": 0.3404286801815033, + "memory(GiB)": 66.66, + "step": 2877, + "token_acc": 0.9293836026331538, + "train_speed(iter/s)": 0.241959 + }, + { + "epoch": 0.92096, + "grad_norm": 0.6532473805481854, + "learning_rate": 4.15038030167808e-06, + "loss": 0.372935950756073, + "memory(GiB)": 66.66, + "step": 2878, + "token_acc": 0.8863205759757484, + "train_speed(iter/s)": 0.241958 + }, + { + "epoch": 0.92128, + "grad_norm": 0.6044628955620216, + "learning_rate": 4.149717794296447e-06, + "loss": 0.3440532684326172, + "memory(GiB)": 66.66, + "step": 2879, + "token_acc": 0.8708163265306123, + "train_speed(iter/s)": 0.241957 + }, + { + "epoch": 0.9216, + "grad_norm": 0.6924865180937194, + "learning_rate": 4.149055081636053e-06, + "loss": 0.44844743609428406, + "memory(GiB)": 66.66, + "step": 2880, + "token_acc": 0.9225329476052716, + "train_speed(iter/s)": 0.241958 + }, + { + "epoch": 0.92192, + "grad_norm": 0.6683392616489452, + "learning_rate": 4.148392163779361e-06, + "loss": 0.42156192660331726, + "memory(GiB)": 66.66, + "step": 2881, + "token_acc": 0.8404582285554624, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.92224, + "grad_norm": 0.6589383005808556, + "learning_rate": 4.14772904080886e-06, + "loss": 0.31023067235946655, + "memory(GiB)": 66.66, + "step": 2882, + "token_acc": 0.9329381252315673, + "train_speed(iter/s)": 0.241966 + }, + { + "epoch": 0.92256, + "grad_norm": 0.6014764702873173, + "learning_rate": 4.147065712807063e-06, + "loss": 0.4576483368873596, + "memory(GiB)": 66.66, + "step": 2883, + "token_acc": 0.8922974324774925, + "train_speed(iter/s)": 0.241965 + }, + { + "epoch": 0.92288, + "grad_norm": 0.6362605494944622, + "learning_rate": 4.146402179856511e-06, + "loss": 0.4352269768714905, + "memory(GiB)": 66.66, + "step": 2884, + "token_acc": 0.949166004765687, + "train_speed(iter/s)": 0.241964 + }, + { + "epoch": 0.9232, + "grad_norm": 0.6407773749815381, + "learning_rate": 4.145738442039768e-06, + "loss": 0.32348719239234924, + "memory(GiB)": 66.66, + "step": 2885, + "token_acc": 0.9293419633225458, + "train_speed(iter/s)": 0.241964 + }, + { + "epoch": 0.92352, + "grad_norm": 0.6538057298550295, + "learning_rate": 4.145074499439426e-06, + "loss": 0.36404547095298767, + "memory(GiB)": 66.66, + "step": 2886, + "token_acc": 0.9376601195559351, + "train_speed(iter/s)": 0.241968 + }, + { + "epoch": 0.92384, + "grad_norm": 0.7292653115450158, + "learning_rate": 4.144410352138099e-06, + "loss": 0.3887181282043457, + "memory(GiB)": 66.66, + "step": 2887, + "token_acc": 0.9183266932270916, + "train_speed(iter/s)": 0.241964 + }, + { + "epoch": 0.92416, + "grad_norm": 0.5910306789257189, + "learning_rate": 4.14374600021843e-06, + "loss": 0.3959887623786926, + "memory(GiB)": 66.66, + "step": 2888, + "token_acc": 0.832178903621611, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.92448, + "grad_norm": 0.6621034938673431, + "learning_rate": 4.143081443763088e-06, + "loss": 0.3498196005821228, + "memory(GiB)": 66.66, + "step": 2889, + "token_acc": 0.9344380403458213, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.9248, + "grad_norm": 0.6633739257882585, + "learning_rate": 4.142416682854761e-06, + "loss": 0.35044679045677185, + "memory(GiB)": 66.66, + "step": 2890, + "token_acc": 0.9235869908430692, + "train_speed(iter/s)": 0.241956 + }, + { + "epoch": 0.92512, + "grad_norm": 0.600816253602654, + "learning_rate": 4.141751717576171e-06, + "loss": 0.3262496590614319, + "memory(GiB)": 66.66, + "step": 2891, + "token_acc": 0.8575518969219756, + "train_speed(iter/s)": 0.241957 + }, + { + "epoch": 0.92544, + "grad_norm": 0.6394312597365971, + "learning_rate": 4.141086548010059e-06, + "loss": 0.2785445749759674, + "memory(GiB)": 66.66, + "step": 2892, + "token_acc": 0.9415154134255258, + "train_speed(iter/s)": 0.241961 + }, + { + "epoch": 0.92576, + "grad_norm": 0.7150120359200851, + "learning_rate": 4.1404211742391955e-06, + "loss": 0.46983349323272705, + "memory(GiB)": 66.66, + "step": 2893, + "token_acc": 0.8857431749241659, + "train_speed(iter/s)": 0.241961 + }, + { + "epoch": 0.92608, + "grad_norm": 0.6205512149930323, + "learning_rate": 4.139755596346375e-06, + "loss": 0.2915668189525604, + "memory(GiB)": 66.66, + "step": 2894, + "token_acc": 0.944573418456181, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.9264, + "grad_norm": 0.6443605364007352, + "learning_rate": 4.139089814414413e-06, + "loss": 0.3622692823410034, + "memory(GiB)": 66.66, + "step": 2895, + "token_acc": 0.9538738738738739, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.92672, + "grad_norm": 0.5674918528562272, + "learning_rate": 4.13842382852616e-06, + "loss": 0.33774465322494507, + "memory(GiB)": 66.66, + "step": 2896, + "token_acc": 0.9018680539134547, + "train_speed(iter/s)": 0.241961 + }, + { + "epoch": 0.92704, + "grad_norm": 0.6510906775604471, + "learning_rate": 4.137757638764482e-06, + "loss": 0.40617066621780396, + "memory(GiB)": 66.66, + "step": 2897, + "token_acc": 0.9623015873015873, + "train_speed(iter/s)": 0.241964 + }, + { + "epoch": 0.92736, + "grad_norm": 0.6735411526851484, + "learning_rate": 4.137091245212278e-06, + "loss": 0.4234699606895447, + "memory(GiB)": 66.66, + "step": 2898, + "token_acc": 0.8520569620253164, + "train_speed(iter/s)": 0.241959 + }, + { + "epoch": 0.92768, + "grad_norm": 0.5817295952028607, + "learning_rate": 4.136424647952468e-06, + "loss": 0.3494800925254822, + "memory(GiB)": 66.66, + "step": 2899, + "token_acc": 0.8462579771804293, + "train_speed(iter/s)": 0.241958 + }, + { + "epoch": 0.928, + "grad_norm": 0.5821587824858144, + "learning_rate": 4.1357578470679985e-06, + "loss": 0.359661728143692, + "memory(GiB)": 66.66, + "step": 2900, + "token_acc": 0.93354943273906, + "train_speed(iter/s)": 0.241956 + }, + { + "epoch": 0.92832, + "grad_norm": 0.6688235155901585, + "learning_rate": 4.13509084264184e-06, + "loss": 0.3960998058319092, + "memory(GiB)": 66.66, + "step": 2901, + "token_acc": 0.9388145315487572, + "train_speed(iter/s)": 0.241958 + }, + { + "epoch": 0.92864, + "grad_norm": 0.5739508318780632, + "learning_rate": 4.134423634756992e-06, + "loss": 0.3053055703639984, + "memory(GiB)": 66.66, + "step": 2902, + "token_acc": 0.9105648535564853, + "train_speed(iter/s)": 0.241958 + }, + { + "epoch": 0.92896, + "grad_norm": 0.6002866172648322, + "learning_rate": 4.133756223496474e-06, + "loss": 0.35672658681869507, + "memory(GiB)": 66.66, + "step": 2903, + "token_acc": 0.8963815789473685, + "train_speed(iter/s)": 0.241958 + }, + { + "epoch": 0.92928, + "grad_norm": 0.6547317965600552, + "learning_rate": 4.133088608943337e-06, + "loss": 0.4138880968093872, + "memory(GiB)": 66.66, + "step": 2904, + "token_acc": 0.9499192245557351, + "train_speed(iter/s)": 0.241959 + }, + { + "epoch": 0.9296, + "grad_norm": 0.6107366709980583, + "learning_rate": 4.132420791180652e-06, + "loss": 0.3044928312301636, + "memory(GiB)": 66.66, + "step": 2905, + "token_acc": 0.907177033492823, + "train_speed(iter/s)": 0.241959 + }, + { + "epoch": 0.92992, + "grad_norm": 0.622492564092005, + "learning_rate": 4.131752770291517e-06, + "loss": 0.302993506193161, + "memory(GiB)": 66.66, + "step": 2906, + "token_acc": 0.8862275449101796, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.93024, + "grad_norm": 0.638621834598751, + "learning_rate": 4.131084546359058e-06, + "loss": 0.4519605040550232, + "memory(GiB)": 66.66, + "step": 2907, + "token_acc": 0.8805570433851098, + "train_speed(iter/s)": 0.241957 + }, + { + "epoch": 0.93056, + "grad_norm": 0.6177632748422991, + "learning_rate": 4.130416119466421e-06, + "loss": 0.2813361883163452, + "memory(GiB)": 66.66, + "step": 2908, + "token_acc": 0.9390316796174537, + "train_speed(iter/s)": 0.24196 + }, + { + "epoch": 0.93088, + "grad_norm": 0.6385786195384608, + "learning_rate": 4.129747489696781e-06, + "loss": 0.4961473345756531, + "memory(GiB)": 66.66, + "step": 2909, + "token_acc": 0.7822836429542365, + "train_speed(iter/s)": 0.241959 + }, + { + "epoch": 0.9312, + "grad_norm": 0.6398677411378381, + "learning_rate": 4.12907865713334e-06, + "loss": 0.44176948070526123, + "memory(GiB)": 66.66, + "step": 2910, + "token_acc": 0.9267902813299232, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.93152, + "grad_norm": 0.6512667257921944, + "learning_rate": 4.1284096218593185e-06, + "loss": 0.3655795454978943, + "memory(GiB)": 66.66, + "step": 2911, + "token_acc": 0.8618947368421053, + "train_speed(iter/s)": 0.241965 + }, + { + "epoch": 0.93184, + "grad_norm": 0.6588380895615531, + "learning_rate": 4.127740383957969e-06, + "loss": 0.35860782861709595, + "memory(GiB)": 66.66, + "step": 2912, + "token_acc": 0.9226713532513181, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.93216, + "grad_norm": 0.5899696039537213, + "learning_rate": 4.127070943512565e-06, + "loss": 0.32395651936531067, + "memory(GiB)": 66.66, + "step": 2913, + "token_acc": 0.9124603755181663, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.93248, + "grad_norm": 0.642701305977869, + "learning_rate": 4.126401300606408e-06, + "loss": 0.44578787684440613, + "memory(GiB)": 66.66, + "step": 2914, + "token_acc": 0.8042639593908629, + "train_speed(iter/s)": 0.241962 + }, + { + "epoch": 0.9328, + "grad_norm": 0.6057457405504517, + "learning_rate": 4.125731455322823e-06, + "loss": 0.4649224579334259, + "memory(GiB)": 66.66, + "step": 2915, + "token_acc": 0.8558974358974359, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.93312, + "grad_norm": 0.571703124713236, + "learning_rate": 4.125061407745161e-06, + "loss": 0.371432900428772, + "memory(GiB)": 66.66, + "step": 2916, + "token_acc": 0.8968430413517119, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.93344, + "grad_norm": 0.6234635347332752, + "learning_rate": 4.124391157956797e-06, + "loss": 0.4323589503765106, + "memory(GiB)": 66.66, + "step": 2917, + "token_acc": 0.8888449367088608, + "train_speed(iter/s)": 0.241957 + }, + { + "epoch": 0.93376, + "grad_norm": 0.5976193318184442, + "learning_rate": 4.123720706041132e-06, + "loss": 0.4138857424259186, + "memory(GiB)": 66.66, + "step": 2918, + "token_acc": 0.8962205053247024, + "train_speed(iter/s)": 0.241956 + }, + { + "epoch": 0.93408, + "grad_norm": 0.6192519541198338, + "learning_rate": 4.123050052081593e-06, + "loss": 0.3502770662307739, + "memory(GiB)": 66.66, + "step": 2919, + "token_acc": 0.8882531134298216, + "train_speed(iter/s)": 0.241954 + }, + { + "epoch": 0.9344, + "grad_norm": 0.6071701300247223, + "learning_rate": 4.122379196161631e-06, + "loss": 0.33955395221710205, + "memory(GiB)": 66.66, + "step": 2920, + "token_acc": 0.908842523596622, + "train_speed(iter/s)": 0.241952 + }, + { + "epoch": 0.93472, + "grad_norm": 0.6244027740180339, + "learning_rate": 4.121708138364722e-06, + "loss": 0.41131335496902466, + "memory(GiB)": 66.66, + "step": 2921, + "token_acc": 0.8880057803468208, + "train_speed(iter/s)": 0.241954 + }, + { + "epoch": 0.93504, + "grad_norm": 0.5810412674982729, + "learning_rate": 4.121036878774367e-06, + "loss": 0.38819658756256104, + "memory(GiB)": 66.66, + "step": 2922, + "token_acc": 0.8701964133219471, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.93536, + "grad_norm": 0.6529107136058476, + "learning_rate": 4.1203654174740934e-06, + "loss": 0.4315199553966522, + "memory(GiB)": 66.66, + "step": 2923, + "token_acc": 0.924992314786351, + "train_speed(iter/s)": 0.241949 + }, + { + "epoch": 0.93568, + "grad_norm": 0.5869871734513932, + "learning_rate": 4.119693754547453e-06, + "loss": 0.3431830406188965, + "memory(GiB)": 66.66, + "step": 2924, + "token_acc": 0.861845051500224, + "train_speed(iter/s)": 0.24195 + }, + { + "epoch": 0.936, + "grad_norm": 0.5856229946793259, + "learning_rate": 4.119021890078022e-06, + "loss": 0.34740936756134033, + "memory(GiB)": 66.66, + "step": 2925, + "token_acc": 0.8650571243802544, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.93632, + "grad_norm": 0.6014528241536774, + "learning_rate": 4.118349824149403e-06, + "loss": 0.32921451330184937, + "memory(GiB)": 66.66, + "step": 2926, + "token_acc": 0.8974993129980764, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.93664, + "grad_norm": 0.5943196152130494, + "learning_rate": 4.1176775568452236e-06, + "loss": 0.36843007802963257, + "memory(GiB)": 66.66, + "step": 2927, + "token_acc": 0.9050355774493706, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.93696, + "grad_norm": 0.6433776502831484, + "learning_rate": 4.117005088249133e-06, + "loss": 0.34510159492492676, + "memory(GiB)": 66.66, + "step": 2928, + "token_acc": 0.9081783289046353, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.93728, + "grad_norm": 0.6350265779957687, + "learning_rate": 4.11633241844481e-06, + "loss": 0.3672202229499817, + "memory(GiB)": 66.66, + "step": 2929, + "token_acc": 0.8624416605256694, + "train_speed(iter/s)": 0.241944 + }, + { + "epoch": 0.9376, + "grad_norm": 0.6522014728379575, + "learning_rate": 4.1156595475159576e-06, + "loss": 0.3533129394054413, + "memory(GiB)": 66.66, + "step": 2930, + "token_acc": 0.8316082802547771, + "train_speed(iter/s)": 0.241946 + }, + { + "epoch": 0.93792, + "grad_norm": 0.6151902243391963, + "learning_rate": 4.114986475546302e-06, + "loss": 0.31591346859931946, + "memory(GiB)": 66.66, + "step": 2931, + "token_acc": 0.8450257629805786, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.93824, + "grad_norm": 0.612205768350081, + "learning_rate": 4.114313202619595e-06, + "loss": 0.4112699627876282, + "memory(GiB)": 66.66, + "step": 2932, + "token_acc": 0.9312297734627831, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.93856, + "grad_norm": 0.5982338398408271, + "learning_rate": 4.113639728819614e-06, + "loss": 0.38273996114730835, + "memory(GiB)": 66.66, + "step": 2933, + "token_acc": 0.9098474341192788, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.93888, + "grad_norm": 0.5875464875521255, + "learning_rate": 4.112966054230161e-06, + "loss": 0.3821977972984314, + "memory(GiB)": 66.66, + "step": 2934, + "token_acc": 0.8183209026582521, + "train_speed(iter/s)": 0.241945 + }, + { + "epoch": 0.9392, + "grad_norm": 0.5968318901403439, + "learning_rate": 4.112292178935065e-06, + "loss": 0.38466328382492065, + "memory(GiB)": 66.66, + "step": 2935, + "token_acc": 0.9095449500554939, + "train_speed(iter/s)": 0.241946 + }, + { + "epoch": 0.93952, + "grad_norm": 0.6330803062942606, + "learning_rate": 4.111618103018175e-06, + "loss": 0.4156482517719269, + "memory(GiB)": 66.66, + "step": 2936, + "token_acc": 0.8959440559440559, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.93984, + "grad_norm": 0.6100801329319423, + "learning_rate": 4.1109438265633695e-06, + "loss": 0.3016294240951538, + "memory(GiB)": 66.66, + "step": 2937, + "token_acc": 0.8141923436041083, + "train_speed(iter/s)": 0.241947 + }, + { + "epoch": 0.94016, + "grad_norm": 0.6318446969147109, + "learning_rate": 4.11026934965455e-06, + "loss": 0.4491426944732666, + "memory(GiB)": 66.66, + "step": 2938, + "token_acc": 0.8576525821596244, + "train_speed(iter/s)": 0.241948 + }, + { + "epoch": 0.94048, + "grad_norm": 0.7028418629146035, + "learning_rate": 4.1095946723756444e-06, + "loss": 0.4204963743686676, + "memory(GiB)": 66.66, + "step": 2939, + "token_acc": 0.8414198161389173, + "train_speed(iter/s)": 0.241951 + }, + { + "epoch": 0.9408, + "grad_norm": 0.5912495864284278, + "learning_rate": 4.108919794810604e-06, + "loss": 0.33847230672836304, + "memory(GiB)": 66.66, + "step": 2940, + "token_acc": 0.8582717746091737, + "train_speed(iter/s)": 0.241953 + }, + { + "epoch": 0.94112, + "grad_norm": 0.6090067544602069, + "learning_rate": 4.1082447170434064e-06, + "loss": 0.2856263816356659, + "memory(GiB)": 66.66, + "step": 2941, + "token_acc": 0.9563212154096582, + "train_speed(iter/s)": 0.241951 + }, + { + "epoch": 0.94144, + "grad_norm": 0.6731152044018797, + "learning_rate": 4.107569439158052e-06, + "loss": 0.49434053897857666, + "memory(GiB)": 66.66, + "step": 2942, + "token_acc": 0.8163156491602239, + "train_speed(iter/s)": 0.241953 + }, + { + "epoch": 0.94176, + "grad_norm": 0.6714764303584655, + "learning_rate": 4.1068939612385685e-06, + "loss": 0.41708290576934814, + "memory(GiB)": 66.66, + "step": 2943, + "token_acc": 0.8676176176176176, + "train_speed(iter/s)": 0.241957 + }, + { + "epoch": 0.94208, + "grad_norm": 0.6116673295729926, + "learning_rate": 4.106218283369007e-06, + "loss": 0.33226558566093445, + "memory(GiB)": 66.66, + "step": 2944, + "token_acc": 0.9056468906361687, + "train_speed(iter/s)": 0.241954 + }, + { + "epoch": 0.9424, + "grad_norm": 0.6834885544254642, + "learning_rate": 4.105542405633443e-06, + "loss": 0.4159301221370697, + "memory(GiB)": 66.66, + "step": 2945, + "token_acc": 0.9168474331164136, + "train_speed(iter/s)": 0.241954 + }, + { + "epoch": 0.94272, + "grad_norm": 0.6140868941763095, + "learning_rate": 4.104866328115979e-06, + "loss": 0.4040244221687317, + "memory(GiB)": 66.66, + "step": 2946, + "token_acc": 0.8057909604519774, + "train_speed(iter/s)": 0.241957 + }, + { + "epoch": 0.94304, + "grad_norm": 0.6122029989583873, + "learning_rate": 4.104190050900741e-06, + "loss": 0.38114869594573975, + "memory(GiB)": 66.66, + "step": 2947, + "token_acc": 0.8622912393788456, + "train_speed(iter/s)": 0.24196 + }, + { + "epoch": 0.94336, + "grad_norm": 0.7084521410614195, + "learning_rate": 4.10351357407188e-06, + "loss": 0.37367966771125793, + "memory(GiB)": 66.66, + "step": 2948, + "token_acc": 0.9103448275862069, + "train_speed(iter/s)": 0.241964 + }, + { + "epoch": 0.94368, + "grad_norm": 0.6762606145293129, + "learning_rate": 4.102836897713571e-06, + "loss": 0.3899470567703247, + "memory(GiB)": 66.66, + "step": 2949, + "token_acc": 0.9567791592658378, + "train_speed(iter/s)": 0.241965 + }, + { + "epoch": 0.944, + "grad_norm": 0.6619061785879006, + "learning_rate": 4.102160021910016e-06, + "loss": 0.38056236505508423, + "memory(GiB)": 66.66, + "step": 2950, + "token_acc": 0.8500611995104039, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.94432, + "grad_norm": 0.5910012478496788, + "learning_rate": 4.101482946745438e-06, + "loss": 0.39300402998924255, + "memory(GiB)": 66.66, + "step": 2951, + "token_acc": 0.8503620273531778, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.94464, + "grad_norm": 0.6274786330413604, + "learning_rate": 4.10080567230409e-06, + "loss": 0.3887956738471985, + "memory(GiB)": 66.66, + "step": 2952, + "token_acc": 0.8898840885142255, + "train_speed(iter/s)": 0.241961 + }, + { + "epoch": 0.94496, + "grad_norm": 0.5908871305217908, + "learning_rate": 4.100128198670246e-06, + "loss": 0.3808152675628662, + "memory(GiB)": 66.66, + "step": 2953, + "token_acc": 0.8850987432675045, + "train_speed(iter/s)": 0.241961 + }, + { + "epoch": 0.94528, + "grad_norm": 0.6672717756104202, + "learning_rate": 4.099450525928204e-06, + "loss": 0.368002712726593, + "memory(GiB)": 66.66, + "step": 2954, + "token_acc": 0.9303030303030303, + "train_speed(iter/s)": 0.241965 + }, + { + "epoch": 0.9456, + "grad_norm": 0.5715030830468448, + "learning_rate": 4.098772654162293e-06, + "loss": 0.42624080181121826, + "memory(GiB)": 66.66, + "step": 2955, + "token_acc": 0.8327705418877633, + "train_speed(iter/s)": 0.241963 + }, + { + "epoch": 0.94592, + "grad_norm": 0.6185689227030715, + "learning_rate": 4.098094583456858e-06, + "loss": 0.31410109996795654, + "memory(GiB)": 66.66, + "step": 2956, + "token_acc": 0.9094296359988535, + "train_speed(iter/s)": 0.241966 + }, + { + "epoch": 0.94624, + "grad_norm": 0.616551327237591, + "learning_rate": 4.097416313896275e-06, + "loss": 0.3542863130569458, + "memory(GiB)": 66.66, + "step": 2957, + "token_acc": 0.9197926197011284, + "train_speed(iter/s)": 0.241964 + }, + { + "epoch": 0.94656, + "grad_norm": 0.7217351035176977, + "learning_rate": 4.096737845564944e-06, + "loss": 0.3976970911026001, + "memory(GiB)": 66.66, + "step": 2958, + "token_acc": 0.8387997208653175, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.94688, + "grad_norm": 0.6316196797746252, + "learning_rate": 4.096059178547287e-06, + "loss": 0.3226151466369629, + "memory(GiB)": 66.66, + "step": 2959, + "token_acc": 0.9224102352455633, + "train_speed(iter/s)": 0.241968 + }, + { + "epoch": 0.9472, + "grad_norm": 0.6185276034160787, + "learning_rate": 4.095380312927752e-06, + "loss": 0.34229812026023865, + "memory(GiB)": 66.66, + "step": 2960, + "token_acc": 0.894698085419735, + "train_speed(iter/s)": 0.241971 + }, + { + "epoch": 0.94752, + "grad_norm": 0.7193843367381781, + "learning_rate": 4.094701248790813e-06, + "loss": 0.40861397981643677, + "memory(GiB)": 66.66, + "step": 2961, + "token_acc": 0.8497138591689475, + "train_speed(iter/s)": 0.241972 + }, + { + "epoch": 0.94784, + "grad_norm": 0.6016694015757362, + "learning_rate": 4.094021986220967e-06, + "loss": 0.3742164373397827, + "memory(GiB)": 66.66, + "step": 2962, + "token_acc": 0.9395424836601307, + "train_speed(iter/s)": 0.241975 + }, + { + "epoch": 0.94816, + "grad_norm": 0.6342376626994407, + "learning_rate": 4.093342525302738e-06, + "loss": 0.35628917813301086, + "memory(GiB)": 66.66, + "step": 2963, + "token_acc": 0.8737078651685394, + "train_speed(iter/s)": 0.241977 + }, + { + "epoch": 0.94848, + "grad_norm": 0.6296730556887804, + "learning_rate": 4.092662866120671e-06, + "loss": 0.33454737067222595, + "memory(GiB)": 66.66, + "step": 2964, + "token_acc": 0.8515185601799775, + "train_speed(iter/s)": 0.241981 + }, + { + "epoch": 0.9488, + "grad_norm": 0.6417179547890193, + "learning_rate": 4.091983008759341e-06, + "loss": 0.3254338204860687, + "memory(GiB)": 66.66, + "step": 2965, + "token_acc": 0.9098451327433629, + "train_speed(iter/s)": 0.241984 + }, + { + "epoch": 0.94912, + "grad_norm": 0.658290508605123, + "learning_rate": 4.0913029533033396e-06, + "loss": 0.3956305682659149, + "memory(GiB)": 66.66, + "step": 2966, + "token_acc": 0.9237894736842105, + "train_speed(iter/s)": 0.241982 + }, + { + "epoch": 0.94944, + "grad_norm": 0.6058754853577845, + "learning_rate": 4.090622699837293e-06, + "loss": 0.4270566999912262, + "memory(GiB)": 66.66, + "step": 2967, + "token_acc": 0.8706192990878541, + "train_speed(iter/s)": 0.241976 + }, + { + "epoch": 0.94976, + "grad_norm": 0.5822890144738433, + "learning_rate": 4.089942248445843e-06, + "loss": 0.2548324763774872, + "memory(GiB)": 66.66, + "step": 2968, + "token_acc": 0.9579288025889967, + "train_speed(iter/s)": 0.241979 + }, + { + "epoch": 0.95008, + "grad_norm": 0.6179659813511624, + "learning_rate": 4.089261599213662e-06, + "loss": 0.45496895909309387, + "memory(GiB)": 66.66, + "step": 2969, + "token_acc": 0.8260309278350515, + "train_speed(iter/s)": 0.241979 + }, + { + "epoch": 0.9504, + "grad_norm": 0.5991793350387122, + "learning_rate": 4.0885807522254435e-06, + "loss": 0.39090144634246826, + "memory(GiB)": 66.66, + "step": 2970, + "token_acc": 0.936648717136522, + "train_speed(iter/s)": 0.241975 + }, + { + "epoch": 0.95072, + "grad_norm": 0.5822639025591528, + "learning_rate": 4.08789970756591e-06, + "loss": 0.3107174038887024, + "memory(GiB)": 66.66, + "step": 2971, + "token_acc": 0.910126582278481, + "train_speed(iter/s)": 0.241977 + }, + { + "epoch": 0.95104, + "grad_norm": 0.6065274390245422, + "learning_rate": 4.087218465319802e-06, + "loss": 0.39779365062713623, + "memory(GiB)": 66.66, + "step": 2972, + "token_acc": 0.8569556671762394, + "train_speed(iter/s)": 0.241974 + }, + { + "epoch": 0.95136, + "grad_norm": 0.6381535002301381, + "learning_rate": 4.086537025571893e-06, + "loss": 0.33821016550064087, + "memory(GiB)": 66.66, + "step": 2973, + "token_acc": 0.8676822633297062, + "train_speed(iter/s)": 0.241971 + }, + { + "epoch": 0.95168, + "grad_norm": 0.5887102029119905, + "learning_rate": 4.085855388406971e-06, + "loss": 0.3720998764038086, + "memory(GiB)": 66.66, + "step": 2974, + "token_acc": 0.8368659204572517, + "train_speed(iter/s)": 0.241972 + }, + { + "epoch": 0.952, + "grad_norm": 0.6437600487936562, + "learning_rate": 4.085173553909857e-06, + "loss": 0.3438633382320404, + "memory(GiB)": 66.66, + "step": 2975, + "token_acc": 0.8991743807855892, + "train_speed(iter/s)": 0.241976 + }, + { + "epoch": 0.95232, + "grad_norm": 0.6072093227722747, + "learning_rate": 4.0844915221653925e-06, + "loss": 0.39963439106941223, + "memory(GiB)": 66.66, + "step": 2976, + "token_acc": 0.8821027043451838, + "train_speed(iter/s)": 0.241977 + }, + { + "epoch": 0.95264, + "grad_norm": 0.7221441655301971, + "learning_rate": 4.083809293258445e-06, + "loss": 0.3541724681854248, + "memory(GiB)": 66.66, + "step": 2977, + "token_acc": 0.8941914371420779, + "train_speed(iter/s)": 0.241976 + }, + { + "epoch": 0.95296, + "grad_norm": 0.7528018155202961, + "learning_rate": 4.083126867273907e-06, + "loss": 0.45705369114875793, + "memory(GiB)": 66.66, + "step": 2978, + "token_acc": 0.8940364711680631, + "train_speed(iter/s)": 0.241979 + }, + { + "epoch": 0.95328, + "grad_norm": 0.6067131992814113, + "learning_rate": 4.082444244296692e-06, + "loss": 0.3199448585510254, + "memory(GiB)": 66.66, + "step": 2979, + "token_acc": 0.9192047377326565, + "train_speed(iter/s)": 0.241982 + }, + { + "epoch": 0.9536, + "grad_norm": 0.749447828226131, + "learning_rate": 4.081761424411743e-06, + "loss": 0.37176263332366943, + "memory(GiB)": 66.66, + "step": 2980, + "token_acc": 0.9027375201288245, + "train_speed(iter/s)": 0.241983 + }, + { + "epoch": 0.95392, + "grad_norm": 0.6496557733638445, + "learning_rate": 4.081078407704024e-06, + "loss": 0.3466184139251709, + "memory(GiB)": 66.66, + "step": 2981, + "token_acc": 0.8631656804733728, + "train_speed(iter/s)": 0.241987 + }, + { + "epoch": 0.95424, + "grad_norm": 0.6258478464582238, + "learning_rate": 4.080395194258525e-06, + "loss": 0.37454187870025635, + "memory(GiB)": 66.66, + "step": 2982, + "token_acc": 0.9440231130371975, + "train_speed(iter/s)": 0.241987 + }, + { + "epoch": 0.95456, + "grad_norm": 0.6003382652497385, + "learning_rate": 4.07971178416026e-06, + "loss": 0.3810883164405823, + "memory(GiB)": 66.66, + "step": 2983, + "token_acc": 0.9255663430420712, + "train_speed(iter/s)": 0.241985 + }, + { + "epoch": 0.95488, + "grad_norm": 0.6250185623024115, + "learning_rate": 4.079028177494266e-06, + "loss": 0.4427996575832367, + "memory(GiB)": 66.66, + "step": 2984, + "token_acc": 0.8139147802929427, + "train_speed(iter/s)": 0.241984 + }, + { + "epoch": 0.9552, + "grad_norm": 0.6135423036147017, + "learning_rate": 4.078344374345609e-06, + "loss": 0.33052393794059753, + "memory(GiB)": 66.66, + "step": 2985, + "token_acc": 0.9467418546365914, + "train_speed(iter/s)": 0.241987 + }, + { + "epoch": 0.95552, + "grad_norm": 0.6546441545660255, + "learning_rate": 4.077660374799373e-06, + "loss": 0.35952991247177124, + "memory(GiB)": 66.66, + "step": 2986, + "token_acc": 0.8387665198237886, + "train_speed(iter/s)": 0.241989 + }, + { + "epoch": 0.95584, + "grad_norm": 0.660392477909989, + "learning_rate": 4.076976178940674e-06, + "loss": 0.3669391870498657, + "memory(GiB)": 66.66, + "step": 2987, + "token_acc": 0.936, + "train_speed(iter/s)": 0.241991 + }, + { + "epoch": 0.95616, + "grad_norm": 0.6273378811480925, + "learning_rate": 4.076291786854645e-06, + "loss": 0.37068483233451843, + "memory(GiB)": 66.66, + "step": 2988, + "token_acc": 0.913337250293772, + "train_speed(iter/s)": 0.241989 + }, + { + "epoch": 0.95648, + "grad_norm": 0.6794380367555433, + "learning_rate": 4.0756071986264466e-06, + "loss": 0.39124903082847595, + "memory(GiB)": 66.66, + "step": 2989, + "token_acc": 0.8602002748870999, + "train_speed(iter/s)": 0.241985 + }, + { + "epoch": 0.9568, + "grad_norm": 0.6615056594296518, + "learning_rate": 4.074922414341266e-06, + "loss": 0.4679560363292694, + "memory(GiB)": 66.66, + "step": 2990, + "token_acc": 0.9139633286318759, + "train_speed(iter/s)": 0.241979 + }, + { + "epoch": 0.95712, + "grad_norm": 0.6507779827555994, + "learning_rate": 4.074237434084312e-06, + "loss": 0.39188140630722046, + "memory(GiB)": 66.66, + "step": 2991, + "token_acc": 0.8710816777041943, + "train_speed(iter/s)": 0.241977 + }, + { + "epoch": 0.95744, + "grad_norm": 0.6414540904377284, + "learning_rate": 4.0735522579408175e-06, + "loss": 0.42853617668151855, + "memory(GiB)": 66.66, + "step": 2992, + "token_acc": 0.7780212899185974, + "train_speed(iter/s)": 0.241976 + }, + { + "epoch": 0.95776, + "grad_norm": 0.610022496744564, + "learning_rate": 4.072866885996042e-06, + "loss": 0.3788983225822449, + "memory(GiB)": 66.66, + "step": 2993, + "token_acc": 0.8505906879777624, + "train_speed(iter/s)": 0.241977 + }, + { + "epoch": 0.95808, + "grad_norm": 0.5392509630539425, + "learning_rate": 4.072181318335268e-06, + "loss": 0.2577515244483948, + "memory(GiB)": 66.66, + "step": 2994, + "token_acc": 0.9489414694894147, + "train_speed(iter/s)": 0.24198 + }, + { + "epoch": 0.9584, + "grad_norm": 0.5947118426695396, + "learning_rate": 4.071495555043802e-06, + "loss": 0.3890516757965088, + "memory(GiB)": 66.66, + "step": 2995, + "token_acc": 0.8427968851395198, + "train_speed(iter/s)": 0.241975 + }, + { + "epoch": 0.95872, + "grad_norm": 0.583465048973209, + "learning_rate": 4.070809596206976e-06, + "loss": 0.3674396872520447, + "memory(GiB)": 66.66, + "step": 2996, + "token_acc": 0.8515733227785474, + "train_speed(iter/s)": 0.241971 + }, + { + "epoch": 0.95904, + "grad_norm": 0.594771638851312, + "learning_rate": 4.0701234419101445e-06, + "loss": 0.33433184027671814, + "memory(GiB)": 66.66, + "step": 2997, + "token_acc": 0.9198092761161681, + "train_speed(iter/s)": 0.241967 + }, + { + "epoch": 0.95936, + "grad_norm": 0.6361016313798437, + "learning_rate": 4.069437092238689e-06, + "loss": 0.3571557402610779, + "memory(GiB)": 66.66, + "step": 2998, + "token_acc": 0.8709959231217239, + "train_speed(iter/s)": 0.24197 + }, + { + "epoch": 0.95968, + "grad_norm": 0.6548228557823401, + "learning_rate": 4.068750547278011e-06, + "loss": 0.409174382686615, + "memory(GiB)": 66.66, + "step": 2999, + "token_acc": 0.8993672793009942, + "train_speed(iter/s)": 0.241973 + }, + { + "epoch": 0.96, + "grad_norm": 0.6535223083405278, + "learning_rate": 4.068063807113543e-06, + "loss": 0.3579319715499878, + "memory(GiB)": 66.66, + "step": 3000, + "token_acc": 0.7806144526179143, + "train_speed(iter/s)": 0.241974 + }, + { + "epoch": 0.96032, + "grad_norm": 0.7078368438447441, + "learning_rate": 4.067376871830736e-06, + "loss": 0.3747713565826416, + "memory(GiB)": 66.66, + "step": 3001, + "token_acc": 0.9166243009659379, + "train_speed(iter/s)": 0.241977 + }, + { + "epoch": 0.96064, + "grad_norm": 0.6456715886886405, + "learning_rate": 4.066689741515067e-06, + "loss": 0.328687846660614, + "memory(GiB)": 66.66, + "step": 3002, + "token_acc": 0.877457581470509, + "train_speed(iter/s)": 0.241981 + }, + { + "epoch": 0.96096, + "grad_norm": 0.6609699174036103, + "learning_rate": 4.066002416252037e-06, + "loss": 0.3238036334514618, + "memory(GiB)": 66.66, + "step": 3003, + "token_acc": 0.8964850048371493, + "train_speed(iter/s)": 0.241985 + }, + { + "epoch": 0.96128, + "grad_norm": 0.5855820448140924, + "learning_rate": 4.065314896127172e-06, + "loss": 0.36559781432151794, + "memory(GiB)": 66.66, + "step": 3004, + "token_acc": 0.8336743044189853, + "train_speed(iter/s)": 0.241982 + }, + { + "epoch": 0.9616, + "grad_norm": 0.6681246800696711, + "learning_rate": 4.064627181226024e-06, + "loss": 0.43714722990989685, + "memory(GiB)": 66.66, + "step": 3005, + "token_acc": 0.8302542925450989, + "train_speed(iter/s)": 0.241974 + }, + { + "epoch": 0.96192, + "grad_norm": 0.5946887311925589, + "learning_rate": 4.063939271634165e-06, + "loss": 0.40838074684143066, + "memory(GiB)": 66.66, + "step": 3006, + "token_acc": 0.8466494845360825, + "train_speed(iter/s)": 0.241971 + }, + { + "epoch": 0.96224, + "grad_norm": 0.6520419172405106, + "learning_rate": 4.063251167437194e-06, + "loss": 0.3868313133716583, + "memory(GiB)": 66.66, + "step": 3007, + "token_acc": 0.9199457259158752, + "train_speed(iter/s)": 0.241974 + }, + { + "epoch": 0.96256, + "grad_norm": 0.711206408629767, + "learning_rate": 4.062562868720733e-06, + "loss": 0.40721631050109863, + "memory(GiB)": 66.66, + "step": 3008, + "token_acc": 0.888021534320323, + "train_speed(iter/s)": 0.241973 + }, + { + "epoch": 0.96288, + "grad_norm": 0.6236447191350873, + "learning_rate": 4.061874375570429e-06, + "loss": 0.40300124883651733, + "memory(GiB)": 66.66, + "step": 3009, + "token_acc": 0.8934329532048761, + "train_speed(iter/s)": 0.241975 + }, + { + "epoch": 0.9632, + "grad_norm": 0.6360245984150391, + "learning_rate": 4.0611856880719545e-06, + "loss": 0.4260992705821991, + "memory(GiB)": 66.66, + "step": 3010, + "token_acc": 0.8623459096002989, + "train_speed(iter/s)": 0.241979 + }, + { + "epoch": 0.96352, + "grad_norm": 0.6212492912295527, + "learning_rate": 4.0604968063110025e-06, + "loss": 0.3947071433067322, + "memory(GiB)": 66.66, + "step": 3011, + "token_acc": 0.8578117299162149, + "train_speed(iter/s)": 0.241981 + }, + { + "epoch": 0.96384, + "grad_norm": 0.6347943959276141, + "learning_rate": 4.059807730373295e-06, + "loss": 0.28062084317207336, + "memory(GiB)": 66.66, + "step": 3012, + "token_acc": 0.9196113074204947, + "train_speed(iter/s)": 0.241983 + }, + { + "epoch": 0.96416, + "grad_norm": 0.6547350471980983, + "learning_rate": 4.059118460344573e-06, + "loss": 0.42081892490386963, + "memory(GiB)": 66.66, + "step": 3013, + "token_acc": 0.8570176975643946, + "train_speed(iter/s)": 0.241983 + }, + { + "epoch": 0.96448, + "grad_norm": 0.6059409795702251, + "learning_rate": 4.058428996310606e-06, + "loss": 0.4065864682197571, + "memory(GiB)": 66.66, + "step": 3014, + "token_acc": 0.8469165659008464, + "train_speed(iter/s)": 0.241983 + }, + { + "epoch": 0.9648, + "grad_norm": 0.6178606642858232, + "learning_rate": 4.057739338357185e-06, + "loss": 0.4114588797092438, + "memory(GiB)": 66.66, + "step": 3015, + "token_acc": 0.9301407059985188, + "train_speed(iter/s)": 0.241976 + }, + { + "epoch": 0.96512, + "grad_norm": 0.7333862762957146, + "learning_rate": 4.057049486570126e-06, + "loss": 0.424064576625824, + "memory(GiB)": 66.66, + "step": 3016, + "token_acc": 0.8674188998589563, + "train_speed(iter/s)": 0.241976 + }, + { + "epoch": 0.96544, + "grad_norm": 0.5976711442064185, + "learning_rate": 4.056359441035268e-06, + "loss": 0.3474409580230713, + "memory(GiB)": 66.66, + "step": 3017, + "token_acc": 0.9341959334565619, + "train_speed(iter/s)": 0.241976 + }, + { + "epoch": 0.96576, + "grad_norm": 0.7912110655993918, + "learning_rate": 4.055669201838478e-06, + "loss": 0.33244842290878296, + "memory(GiB)": 66.66, + "step": 3018, + "token_acc": 0.8931829092654825, + "train_speed(iter/s)": 0.241974 + }, + { + "epoch": 0.96608, + "grad_norm": 0.5873231619590626, + "learning_rate": 4.054978769065641e-06, + "loss": 0.4252549707889557, + "memory(GiB)": 66.66, + "step": 3019, + "token_acc": 0.830471584038694, + "train_speed(iter/s)": 0.241974 + }, + { + "epoch": 0.9664, + "grad_norm": 0.6538011855817928, + "learning_rate": 4.054288142802673e-06, + "loss": 0.40989792346954346, + "memory(GiB)": 66.66, + "step": 3020, + "token_acc": 0.9036144578313253, + "train_speed(iter/s)": 0.241972 + }, + { + "epoch": 0.96672, + "grad_norm": 0.6421693405707248, + "learning_rate": 4.053597323135508e-06, + "loss": 0.3694264888763428, + "memory(GiB)": 66.66, + "step": 3021, + "token_acc": 0.8996763754045307, + "train_speed(iter/s)": 0.241974 + }, + { + "epoch": 0.96704, + "grad_norm": 0.6492726041084904, + "learning_rate": 4.052906310150105e-06, + "loss": 0.29673632979393005, + "memory(GiB)": 66.66, + "step": 3022, + "token_acc": 0.9205869659041864, + "train_speed(iter/s)": 0.241978 + }, + { + "epoch": 0.96736, + "grad_norm": 0.638977194344911, + "learning_rate": 4.052215103932453e-06, + "loss": 0.38601431250572205, + "memory(GiB)": 66.66, + "step": 3023, + "token_acc": 0.9122926607589185, + "train_speed(iter/s)": 0.241978 + }, + { + "epoch": 0.96768, + "grad_norm": 0.6488796481461078, + "learning_rate": 4.051523704568557e-06, + "loss": 0.43887829780578613, + "memory(GiB)": 66.66, + "step": 3024, + "token_acc": 0.8970113085621971, + "train_speed(iter/s)": 0.241982 + }, + { + "epoch": 0.968, + "grad_norm": 0.6149078994195457, + "learning_rate": 4.050832112144452e-06, + "loss": 0.3890456259250641, + "memory(GiB)": 66.66, + "step": 3025, + "token_acc": 0.9226856561546287, + "train_speed(iter/s)": 0.241985 + }, + { + "epoch": 0.96832, + "grad_norm": 0.5959930769205526, + "learning_rate": 4.050140326746192e-06, + "loss": 0.3309323489665985, + "memory(GiB)": 66.66, + "step": 3026, + "token_acc": 0.9132087424770352, + "train_speed(iter/s)": 0.241986 + }, + { + "epoch": 0.96864, + "grad_norm": 0.6329782974598205, + "learning_rate": 4.04944834845986e-06, + "loss": 0.3551032841205597, + "memory(GiB)": 66.66, + "step": 3027, + "token_acc": 0.9223300970873787, + "train_speed(iter/s)": 0.241985 + }, + { + "epoch": 0.96896, + "grad_norm": 0.6669643143294147, + "learning_rate": 4.04875617737156e-06, + "loss": 0.31331178545951843, + "memory(GiB)": 66.66, + "step": 3028, + "token_acc": 0.8830542151575362, + "train_speed(iter/s)": 0.241988 + }, + { + "epoch": 0.96928, + "grad_norm": 0.6930815751283106, + "learning_rate": 4.048063813567421e-06, + "loss": 0.4143233299255371, + "memory(GiB)": 66.66, + "step": 3029, + "token_acc": 0.8482734565748169, + "train_speed(iter/s)": 0.241992 + }, + { + "epoch": 0.9696, + "grad_norm": 0.5961288250526351, + "learning_rate": 4.0473712571335955e-06, + "loss": 0.3835461437702179, + "memory(GiB)": 66.66, + "step": 3030, + "token_acc": 0.9125151883353585, + "train_speed(iter/s)": 0.241989 + }, + { + "epoch": 0.96992, + "grad_norm": 0.6240211585758586, + "learning_rate": 4.046678508156259e-06, + "loss": 0.39511752128601074, + "memory(GiB)": 66.66, + "step": 3031, + "token_acc": 0.9034285714285715, + "train_speed(iter/s)": 0.241983 + }, + { + "epoch": 0.97024, + "grad_norm": 0.6192760491311721, + "learning_rate": 4.045985566721613e-06, + "loss": 0.3823796808719635, + "memory(GiB)": 66.66, + "step": 3032, + "token_acc": 0.8722285714285715, + "train_speed(iter/s)": 0.24198 + }, + { + "epoch": 0.97056, + "grad_norm": 0.6554877546092317, + "learning_rate": 4.045292432915883e-06, + "loss": 0.42295968532562256, + "memory(GiB)": 66.66, + "step": 3033, + "token_acc": 0.9489207095533233, + "train_speed(iter/s)": 0.241979 + }, + { + "epoch": 0.97088, + "grad_norm": 0.6123194440168944, + "learning_rate": 4.044599106825315e-06, + "loss": 0.35482463240623474, + "memory(GiB)": 66.66, + "step": 3034, + "token_acc": 0.8937415578568213, + "train_speed(iter/s)": 0.241978 + }, + { + "epoch": 0.9712, + "grad_norm": 0.6177202350856451, + "learning_rate": 4.0439055885361844e-06, + "loss": 0.31271353363990784, + "memory(GiB)": 66.66, + "step": 3035, + "token_acc": 0.9207317073170732, + "train_speed(iter/s)": 0.24198 + }, + { + "epoch": 0.97152, + "grad_norm": 0.6342172139394221, + "learning_rate": 4.043211878134786e-06, + "loss": 0.4732389450073242, + "memory(GiB)": 66.66, + "step": 3036, + "token_acc": 0.847056401811445, + "train_speed(iter/s)": 0.241982 + }, + { + "epoch": 0.97184, + "grad_norm": 0.6263504559655914, + "learning_rate": 4.04251797570744e-06, + "loss": 0.42907002568244934, + "memory(GiB)": 66.66, + "step": 3037, + "token_acc": 0.8604263824117255, + "train_speed(iter/s)": 0.241983 + }, + { + "epoch": 0.97216, + "grad_norm": 0.6699522346085816, + "learning_rate": 4.04182388134049e-06, + "loss": 0.30108776688575745, + "memory(GiB)": 66.66, + "step": 3038, + "token_acc": 0.9192371085472097, + "train_speed(iter/s)": 0.241985 + }, + { + "epoch": 0.97248, + "grad_norm": 0.6785237224029843, + "learning_rate": 4.041129595120304e-06, + "loss": 0.40432223677635193, + "memory(GiB)": 66.66, + "step": 3039, + "token_acc": 0.8921513269339356, + "train_speed(iter/s)": 0.241989 + }, + { + "epoch": 0.9728, + "grad_norm": 0.6558325608189489, + "learning_rate": 4.040435117133276e-06, + "loss": 0.41510009765625, + "memory(GiB)": 66.66, + "step": 3040, + "token_acc": 0.9177710843373494, + "train_speed(iter/s)": 0.241991 + }, + { + "epoch": 0.97312, + "grad_norm": 0.5789790016313526, + "learning_rate": 4.039740447465819e-06, + "loss": 0.32819241285324097, + "memory(GiB)": 66.66, + "step": 3041, + "token_acc": 0.9107098689092258, + "train_speed(iter/s)": 0.24199 + }, + { + "epoch": 0.97344, + "grad_norm": 0.5719887499657216, + "learning_rate": 4.039045586204375e-06, + "loss": 0.3085390329360962, + "memory(GiB)": 66.66, + "step": 3042, + "token_acc": 0.9004898169631348, + "train_speed(iter/s)": 0.241995 + }, + { + "epoch": 0.97376, + "grad_norm": 0.6097209023561188, + "learning_rate": 4.038350533435406e-06, + "loss": 0.4046974182128906, + "memory(GiB)": 66.66, + "step": 3043, + "token_acc": 0.8865103635745838, + "train_speed(iter/s)": 0.241997 + }, + { + "epoch": 0.97408, + "grad_norm": 0.5964135191805874, + "learning_rate": 4.037655289245399e-06, + "loss": 0.3420829474925995, + "memory(GiB)": 66.66, + "step": 3044, + "token_acc": 0.8837209302325582, + "train_speed(iter/s)": 0.241997 + }, + { + "epoch": 0.9744, + "grad_norm": 0.643216989534053, + "learning_rate": 4.036959853720866e-06, + "loss": 0.42706575989723206, + "memory(GiB)": 66.66, + "step": 3045, + "token_acc": 0.8903225806451613, + "train_speed(iter/s)": 0.242001 + }, + { + "epoch": 0.97472, + "grad_norm": 0.6355862007661173, + "learning_rate": 4.036264226948342e-06, + "loss": 0.3125585913658142, + "memory(GiB)": 66.66, + "step": 3046, + "token_acc": 0.885859687386405, + "train_speed(iter/s)": 0.242005 + }, + { + "epoch": 0.97504, + "grad_norm": 0.62487232607928, + "learning_rate": 4.0355684090143835e-06, + "loss": 0.38233259320259094, + "memory(GiB)": 66.66, + "step": 3047, + "token_acc": 0.8932504942106749, + "train_speed(iter/s)": 0.242003 + }, + { + "epoch": 0.97536, + "grad_norm": 0.5956650957750433, + "learning_rate": 4.034872400005576e-06, + "loss": 0.29165118932724, + "memory(GiB)": 66.66, + "step": 3048, + "token_acc": 0.9084791386271871, + "train_speed(iter/s)": 0.242006 + }, + { + "epoch": 0.97568, + "grad_norm": 0.6732009298353611, + "learning_rate": 4.034176200008524e-06, + "loss": 0.437656044960022, + "memory(GiB)": 66.66, + "step": 3049, + "token_acc": 0.8262086988986374, + "train_speed(iter/s)": 0.242006 + }, + { + "epoch": 0.976, + "grad_norm": 0.6157048922237716, + "learning_rate": 4.033479809109857e-06, + "loss": 0.33805835247039795, + "memory(GiB)": 66.66, + "step": 3050, + "token_acc": 0.9194863432531594, + "train_speed(iter/s)": 0.242008 + }, + { + "epoch": 0.97632, + "grad_norm": 0.5957551252412256, + "learning_rate": 4.032783227396231e-06, + "loss": 0.3574420213699341, + "memory(GiB)": 66.66, + "step": 3051, + "token_acc": 0.8747199022204115, + "train_speed(iter/s)": 0.242006 + }, + { + "epoch": 0.97664, + "grad_norm": 0.6151630216800531, + "learning_rate": 4.032086454954322e-06, + "loss": 0.3582664728164673, + "memory(GiB)": 66.66, + "step": 3052, + "token_acc": 0.8758486905916586, + "train_speed(iter/s)": 0.242008 + }, + { + "epoch": 0.97696, + "grad_norm": 0.6457811049569099, + "learning_rate": 4.0313894918708305e-06, + "loss": 0.3863130211830139, + "memory(GiB)": 66.66, + "step": 3053, + "token_acc": 0.8688423645320197, + "train_speed(iter/s)": 0.242008 + }, + { + "epoch": 0.97728, + "grad_norm": 0.5805900608662979, + "learning_rate": 4.030692338232483e-06, + "loss": 0.3959566652774811, + "memory(GiB)": 66.66, + "step": 3054, + "token_acc": 0.8498609823911029, + "train_speed(iter/s)": 0.242007 + }, + { + "epoch": 0.9776, + "grad_norm": 0.585777588540006, + "learning_rate": 4.029994994126027e-06, + "loss": 0.26518505811691284, + "memory(GiB)": 66.66, + "step": 3055, + "token_acc": 0.9594312003345881, + "train_speed(iter/s)": 0.24201 + }, + { + "epoch": 0.97792, + "grad_norm": 0.6824273725247003, + "learning_rate": 4.029297459638236e-06, + "loss": 0.36761602759361267, + "memory(GiB)": 66.66, + "step": 3056, + "token_acc": 0.8891566265060241, + "train_speed(iter/s)": 0.242011 + }, + { + "epoch": 0.97824, + "grad_norm": 0.6399825732239752, + "learning_rate": 4.0285997348559056e-06, + "loss": 0.4300648272037506, + "memory(GiB)": 66.66, + "step": 3057, + "token_acc": 0.8690176322418136, + "train_speed(iter/s)": 0.242012 + }, + { + "epoch": 0.97856, + "grad_norm": 0.7028747302274885, + "learning_rate": 4.027901819865855e-06, + "loss": 0.5156171321868896, + "memory(GiB)": 66.66, + "step": 3058, + "token_acc": 0.8499902210052807, + "train_speed(iter/s)": 0.242014 + }, + { + "epoch": 0.97888, + "grad_norm": 0.5841563531278968, + "learning_rate": 4.027203714754929e-06, + "loss": 0.29757222533226013, + "memory(GiB)": 66.66, + "step": 3059, + "token_acc": 0.9158508158508158, + "train_speed(iter/s)": 0.242013 + }, + { + "epoch": 0.9792, + "grad_norm": 0.6859532689883132, + "learning_rate": 4.026505419609993e-06, + "loss": 0.42499250173568726, + "memory(GiB)": 66.66, + "step": 3060, + "token_acc": 0.8440951571792693, + "train_speed(iter/s)": 0.242015 + }, + { + "epoch": 0.97952, + "grad_norm": 0.6545486530634237, + "learning_rate": 4.025806934517938e-06, + "loss": 0.38066357374191284, + "memory(GiB)": 66.66, + "step": 3061, + "token_acc": 0.8532753202803964, + "train_speed(iter/s)": 0.242017 + }, + { + "epoch": 0.97984, + "grad_norm": 0.5848648904677717, + "learning_rate": 4.025108259565679e-06, + "loss": 0.3859281539916992, + "memory(GiB)": 66.66, + "step": 3062, + "token_acc": 0.8835216915785636, + "train_speed(iter/s)": 0.242017 + }, + { + "epoch": 0.98016, + "grad_norm": 0.6550237788335322, + "learning_rate": 4.024409394840154e-06, + "loss": 0.37879571318626404, + "memory(GiB)": 66.66, + "step": 3063, + "token_acc": 0.8685412160538332, + "train_speed(iter/s)": 0.242018 + }, + { + "epoch": 0.98048, + "grad_norm": 0.6346114842801273, + "learning_rate": 4.023710340428324e-06, + "loss": 0.33777114748954773, + "memory(GiB)": 66.66, + "step": 3064, + "token_acc": 0.9242123190462674, + "train_speed(iter/s)": 0.242018 + }, + { + "epoch": 0.9808, + "grad_norm": 0.5576756953387635, + "learning_rate": 4.0230110964171755e-06, + "loss": 0.27697834372520447, + "memory(GiB)": 66.66, + "step": 3065, + "token_acc": 0.9598287783997366, + "train_speed(iter/s)": 0.242016 + }, + { + "epoch": 0.98112, + "grad_norm": 0.6430088781168787, + "learning_rate": 4.022311662893716e-06, + "loss": 0.4060218036174774, + "memory(GiB)": 66.66, + "step": 3066, + "token_acc": 0.808172826652785, + "train_speed(iter/s)": 0.242019 + }, + { + "epoch": 0.98144, + "grad_norm": 0.5933877697551321, + "learning_rate": 4.021612039944978e-06, + "loss": 0.3358010947704315, + "memory(GiB)": 66.66, + "step": 3067, + "token_acc": 0.847968864023352, + "train_speed(iter/s)": 0.242019 + }, + { + "epoch": 0.98176, + "grad_norm": 0.6284930021197036, + "learning_rate": 4.020912227658018e-06, + "loss": 0.3550563454627991, + "memory(GiB)": 66.66, + "step": 3068, + "token_acc": 0.9057017543859649, + "train_speed(iter/s)": 0.242022 + }, + { + "epoch": 0.98208, + "grad_norm": 0.5768157968813774, + "learning_rate": 4.020212226119914e-06, + "loss": 0.4372912049293518, + "memory(GiB)": 66.66, + "step": 3069, + "token_acc": 0.8149286101572384, + "train_speed(iter/s)": 0.24202 + }, + { + "epoch": 0.9824, + "grad_norm": 0.6632308482860538, + "learning_rate": 4.019512035417771e-06, + "loss": 0.4395124912261963, + "memory(GiB)": 66.66, + "step": 3070, + "token_acc": 0.821021021021021, + "train_speed(iter/s)": 0.242023 + }, + { + "epoch": 0.98272, + "grad_norm": 0.588070520243379, + "learning_rate": 4.0188116556387145e-06, + "loss": 0.3384595215320587, + "memory(GiB)": 66.66, + "step": 3071, + "token_acc": 0.8620534711300831, + "train_speed(iter/s)": 0.242023 + }, + { + "epoch": 0.98304, + "grad_norm": 0.6640342725295721, + "learning_rate": 4.0181110868698955e-06, + "loss": 0.37875986099243164, + "memory(GiB)": 66.66, + "step": 3072, + "token_acc": 0.8907960935187925, + "train_speed(iter/s)": 0.242021 + }, + { + "epoch": 0.98336, + "grad_norm": 0.6480266666979494, + "learning_rate": 4.017410329198487e-06, + "loss": 0.2924925982952118, + "memory(GiB)": 66.66, + "step": 3073, + "token_acc": 0.9177502267916541, + "train_speed(iter/s)": 0.242024 + }, + { + "epoch": 0.98368, + "grad_norm": 0.6334646880443127, + "learning_rate": 4.016709382711686e-06, + "loss": 0.34500551223754883, + "memory(GiB)": 66.66, + "step": 3074, + "token_acc": 0.9367378048780488, + "train_speed(iter/s)": 0.242022 + }, + { + "epoch": 0.984, + "grad_norm": 0.6736538385485612, + "learning_rate": 4.016008247496713e-06, + "loss": 0.35507336258888245, + "memory(GiB)": 66.66, + "step": 3075, + "token_acc": 0.83872, + "train_speed(iter/s)": 0.242026 + }, + { + "epoch": 0.98432, + "grad_norm": 0.6261666129494896, + "learning_rate": 4.015306923640813e-06, + "loss": 0.3508971035480499, + "memory(GiB)": 66.66, + "step": 3076, + "token_acc": 0.9273120940303125, + "train_speed(iter/s)": 0.242025 + }, + { + "epoch": 0.98464, + "grad_norm": 0.6281003883521509, + "learning_rate": 4.014605411231252e-06, + "loss": 0.3870149850845337, + "memory(GiB)": 66.66, + "step": 3077, + "token_acc": 0.9018691588785047, + "train_speed(iter/s)": 0.242024 + }, + { + "epoch": 0.98496, + "grad_norm": 0.6101524524002656, + "learning_rate": 4.013903710355323e-06, + "loss": 0.372799813747406, + "memory(GiB)": 66.66, + "step": 3078, + "token_acc": 0.8485523385300668, + "train_speed(iter/s)": 0.242022 + }, + { + "epoch": 0.98528, + "grad_norm": 0.6731558936329355, + "learning_rate": 4.013201821100338e-06, + "loss": 0.4002857208251953, + "memory(GiB)": 66.66, + "step": 3079, + "token_acc": 0.8614746249601022, + "train_speed(iter/s)": 0.242021 + }, + { + "epoch": 0.9856, + "grad_norm": 0.6164304063231683, + "learning_rate": 4.012499743553639e-06, + "loss": 0.4224347472190857, + "memory(GiB)": 66.66, + "step": 3080, + "token_acc": 0.9188637207575195, + "train_speed(iter/s)": 0.242012 + }, + { + "epoch": 0.98592, + "grad_norm": 0.6097258875317317, + "learning_rate": 4.0117974778025835e-06, + "loss": 0.3656595051288605, + "memory(GiB)": 66.66, + "step": 3081, + "token_acc": 0.8874271440466278, + "train_speed(iter/s)": 0.242013 + }, + { + "epoch": 0.98624, + "grad_norm": 0.6066694907435044, + "learning_rate": 4.0110950239345576e-06, + "loss": 0.36167988181114197, + "memory(GiB)": 66.66, + "step": 3082, + "token_acc": 0.9019132309350579, + "train_speed(iter/s)": 0.242015 + }, + { + "epoch": 0.98656, + "grad_norm": 0.6938196828135058, + "learning_rate": 4.010392382036969e-06, + "loss": 0.3722038269042969, + "memory(GiB)": 66.66, + "step": 3083, + "token_acc": 0.9199739752765127, + "train_speed(iter/s)": 0.242017 + }, + { + "epoch": 0.98688, + "grad_norm": 0.6635417708267721, + "learning_rate": 4.00968955219725e-06, + "loss": 0.4014733135700226, + "memory(GiB)": 66.66, + "step": 3084, + "token_acc": 0.8659295093296475, + "train_speed(iter/s)": 0.242016 + }, + { + "epoch": 0.9872, + "grad_norm": 0.6009543277214905, + "learning_rate": 4.008986534502857e-06, + "loss": 0.36181601881980896, + "memory(GiB)": 66.66, + "step": 3085, + "token_acc": 0.9239543726235742, + "train_speed(iter/s)": 0.242017 + }, + { + "epoch": 0.98752, + "grad_norm": 0.7636861386398599, + "learning_rate": 4.008283329041265e-06, + "loss": 0.28985148668289185, + "memory(GiB)": 66.66, + "step": 3086, + "token_acc": 0.9227053140096618, + "train_speed(iter/s)": 0.242022 + }, + { + "epoch": 0.98784, + "grad_norm": 0.6530344666120057, + "learning_rate": 4.0075799358999786e-06, + "loss": 0.36181965470314026, + "memory(GiB)": 66.66, + "step": 3087, + "token_acc": 0.8832946635730858, + "train_speed(iter/s)": 0.242024 + }, + { + "epoch": 0.98816, + "grad_norm": 0.5997951517435104, + "learning_rate": 4.006876355166521e-06, + "loss": 0.37263351678848267, + "memory(GiB)": 66.66, + "step": 3088, + "token_acc": 0.9103889709502708, + "train_speed(iter/s)": 0.242026 + }, + { + "epoch": 0.98848, + "grad_norm": 0.5952937790357733, + "learning_rate": 4.006172586928442e-06, + "loss": 0.33403676748275757, + "memory(GiB)": 66.66, + "step": 3089, + "token_acc": 0.8666839916839917, + "train_speed(iter/s)": 0.24203 + }, + { + "epoch": 0.9888, + "grad_norm": 0.597724420692452, + "learning_rate": 4.005468631273312e-06, + "loss": 0.3427974581718445, + "memory(GiB)": 66.66, + "step": 3090, + "token_acc": 0.8937386334112757, + "train_speed(iter/s)": 0.242033 + }, + { + "epoch": 0.98912, + "grad_norm": 0.6567586101385148, + "learning_rate": 4.004764488288728e-06, + "loss": 0.34106123447418213, + "memory(GiB)": 66.66, + "step": 3091, + "token_acc": 0.8790560471976401, + "train_speed(iter/s)": 0.242037 + }, + { + "epoch": 0.98944, + "grad_norm": 0.5660414986485244, + "learning_rate": 4.004060158062306e-06, + "loss": 0.35858964920043945, + "memory(GiB)": 66.66, + "step": 3092, + "token_acc": 0.8699256718124643, + "train_speed(iter/s)": 0.242039 + }, + { + "epoch": 0.98976, + "grad_norm": 0.5640703917292885, + "learning_rate": 4.00335564068169e-06, + "loss": 0.335178017616272, + "memory(GiB)": 66.66, + "step": 3093, + "token_acc": 0.8048289738430584, + "train_speed(iter/s)": 0.24204 + }, + { + "epoch": 0.99008, + "grad_norm": 0.763409342059242, + "learning_rate": 4.002650936234543e-06, + "loss": 0.3779940605163574, + "memory(GiB)": 66.66, + "step": 3094, + "token_acc": 0.8968858131487889, + "train_speed(iter/s)": 0.242041 + }, + { + "epoch": 0.9904, + "grad_norm": 0.6529328635406166, + "learning_rate": 4.001946044808555e-06, + "loss": 0.4183294177055359, + "memory(GiB)": 66.66, + "step": 3095, + "token_acc": 0.8680333119795003, + "train_speed(iter/s)": 0.242043 + }, + { + "epoch": 0.99072, + "grad_norm": 0.6478054550920296, + "learning_rate": 4.0012409664914355e-06, + "loss": 0.37891075015068054, + "memory(GiB)": 66.66, + "step": 3096, + "token_acc": 0.9141494435612083, + "train_speed(iter/s)": 0.242045 + }, + { + "epoch": 0.99104, + "grad_norm": 0.6016764966808266, + "learning_rate": 4.0005357013709215e-06, + "loss": 0.2899223864078522, + "memory(GiB)": 66.66, + "step": 3097, + "token_acc": 0.927613104524181, + "train_speed(iter/s)": 0.242045 + }, + { + "epoch": 0.99136, + "grad_norm": 0.6400553454330346, + "learning_rate": 3.9998302495347685e-06, + "loss": 0.40308839082717896, + "memory(GiB)": 66.66, + "step": 3098, + "token_acc": 0.8404392764857881, + "train_speed(iter/s)": 0.242046 + }, + { + "epoch": 0.99168, + "grad_norm": 0.6758659659345682, + "learning_rate": 3.99912461107076e-06, + "loss": 0.34148359298706055, + "memory(GiB)": 66.66, + "step": 3099, + "token_acc": 0.854655056932351, + "train_speed(iter/s)": 0.242049 + }, + { + "epoch": 0.992, + "grad_norm": 0.5769562833397032, + "learning_rate": 3.998418786066699e-06, + "loss": 0.389863520860672, + "memory(GiB)": 66.66, + "step": 3100, + "token_acc": 0.8770161290322581, + "train_speed(iter/s)": 0.242047 + }, + { + "epoch": 0.99232, + "grad_norm": 0.6088011164316033, + "learning_rate": 3.997712774610414e-06, + "loss": 0.3965756297111511, + "memory(GiB)": 66.66, + "step": 3101, + "token_acc": 0.8821740782972254, + "train_speed(iter/s)": 0.242043 + }, + { + "epoch": 0.99264, + "grad_norm": 0.6322101334219713, + "learning_rate": 3.997006576789756e-06, + "loss": 0.4359557628631592, + "memory(GiB)": 66.66, + "step": 3102, + "token_acc": 0.8356659142212189, + "train_speed(iter/s)": 0.242039 + }, + { + "epoch": 0.99296, + "grad_norm": 0.5950308903677025, + "learning_rate": 3.9963001926925985e-06, + "loss": 0.30856961011886597, + "memory(GiB)": 66.66, + "step": 3103, + "token_acc": 0.9288267793305167, + "train_speed(iter/s)": 0.242039 + }, + { + "epoch": 0.99328, + "grad_norm": 0.5739059662365252, + "learning_rate": 3.9955936224068395e-06, + "loss": 0.32066798210144043, + "memory(GiB)": 66.66, + "step": 3104, + "token_acc": 0.9717420212765957, + "train_speed(iter/s)": 0.242037 + }, + { + "epoch": 0.9936, + "grad_norm": 0.5513030090576488, + "learning_rate": 3.9948868660203975e-06, + "loss": 0.281091570854187, + "memory(GiB)": 66.66, + "step": 3105, + "token_acc": 0.936340206185567, + "train_speed(iter/s)": 0.24204 + }, + { + "epoch": 0.99392, + "grad_norm": 0.6006391901257054, + "learning_rate": 3.994179923621219e-06, + "loss": 0.4155902862548828, + "memory(GiB)": 66.66, + "step": 3106, + "token_acc": 0.8892475287472261, + "train_speed(iter/s)": 0.242041 + }, + { + "epoch": 0.99424, + "grad_norm": 0.6421510653438899, + "learning_rate": 3.9934727952972675e-06, + "loss": 0.3789929151535034, + "memory(GiB)": 66.66, + "step": 3107, + "token_acc": 0.9046099290780142, + "train_speed(iter/s)": 0.242043 + }, + { + "epoch": 0.99456, + "grad_norm": 0.6357451938545702, + "learning_rate": 3.9927654811365355e-06, + "loss": 0.44484463334083557, + "memory(GiB)": 66.66, + "step": 3108, + "token_acc": 0.922690240942971, + "train_speed(iter/s)": 0.242043 + }, + { + "epoch": 0.99488, + "grad_norm": 0.5797222699810971, + "learning_rate": 3.992057981227035e-06, + "loss": 0.35074299573898315, + "memory(GiB)": 66.66, + "step": 3109, + "token_acc": 0.8651128192412596, + "train_speed(iter/s)": 0.24203 + }, + { + "epoch": 0.9952, + "grad_norm": 0.6200849364134592, + "learning_rate": 3.9913502956568014e-06, + "loss": 0.35084646940231323, + "memory(GiB)": 66.66, + "step": 3110, + "token_acc": 0.9066469719350074, + "train_speed(iter/s)": 0.242027 + }, + { + "epoch": 0.99552, + "grad_norm": 0.6818032519815111, + "learning_rate": 3.990642424513895e-06, + "loss": 0.4333032965660095, + "memory(GiB)": 66.66, + "step": 3111, + "token_acc": 0.8694567627494457, + "train_speed(iter/s)": 0.242027 + }, + { + "epoch": 0.99584, + "grad_norm": 0.6567062927723352, + "learning_rate": 3.9899343678863975e-06, + "loss": 0.35835060477256775, + "memory(GiB)": 66.66, + "step": 3112, + "token_acc": 0.8950377315344157, + "train_speed(iter/s)": 0.242028 + }, + { + "epoch": 0.99616, + "grad_norm": 0.6244706390946302, + "learning_rate": 3.9892261258624156e-06, + "loss": 0.39497095346450806, + "memory(GiB)": 66.66, + "step": 3113, + "token_acc": 0.9319912948857454, + "train_speed(iter/s)": 0.242032 + }, + { + "epoch": 0.99648, + "grad_norm": 0.5599091619333585, + "learning_rate": 3.988517698530075e-06, + "loss": 0.3835628032684326, + "memory(GiB)": 66.66, + "step": 3114, + "token_acc": 0.8729046785088816, + "train_speed(iter/s)": 0.242027 + }, + { + "epoch": 0.9968, + "grad_norm": 0.6275907755772118, + "learning_rate": 3.987809085977529e-06, + "loss": 0.3961995840072632, + "memory(GiB)": 66.66, + "step": 3115, + "token_acc": 0.8807670928293496, + "train_speed(iter/s)": 0.242025 + }, + { + "epoch": 0.99712, + "grad_norm": 0.6224864489423445, + "learning_rate": 3.987100288292953e-06, + "loss": 0.40550118684768677, + "memory(GiB)": 66.66, + "step": 3116, + "token_acc": 0.8588377723970945, + "train_speed(iter/s)": 0.242021 + }, + { + "epoch": 0.99744, + "grad_norm": 0.6717362261827764, + "learning_rate": 3.986391305564542e-06, + "loss": 0.3589247465133667, + "memory(GiB)": 66.66, + "step": 3117, + "token_acc": 0.8956135480288728, + "train_speed(iter/s)": 0.242022 + }, + { + "epoch": 0.99776, + "grad_norm": 0.6222380988458677, + "learning_rate": 3.985682137880519e-06, + "loss": 0.4265488386154175, + "memory(GiB)": 66.66, + "step": 3118, + "token_acc": 0.9115314215985357, + "train_speed(iter/s)": 0.242021 + }, + { + "epoch": 0.99808, + "grad_norm": 0.5945929735057967, + "learning_rate": 3.984972785329126e-06, + "loss": 0.3854430317878723, + "memory(GiB)": 66.66, + "step": 3119, + "token_acc": 0.9112375533428165, + "train_speed(iter/s)": 0.242019 + }, + { + "epoch": 0.9984, + "grad_norm": 0.6222289357200256, + "learning_rate": 3.984263247998631e-06, + "loss": 0.28845036029815674, + "memory(GiB)": 66.66, + "step": 3120, + "token_acc": 0.9475138121546961, + "train_speed(iter/s)": 0.242021 + }, + { + "epoch": 0.99872, + "grad_norm": 0.5543950464985425, + "learning_rate": 3.983553525977323e-06, + "loss": 0.2624782621860504, + "memory(GiB)": 66.66, + "step": 3121, + "token_acc": 0.8928057553956834, + "train_speed(iter/s)": 0.242021 + }, + { + "epoch": 0.99904, + "grad_norm": 0.6419713684320644, + "learning_rate": 3.982843619353514e-06, + "loss": 0.43620073795318604, + "memory(GiB)": 66.66, + "step": 3122, + "token_acc": 0.8563268892794376, + "train_speed(iter/s)": 0.242025 + }, + { + "epoch": 0.99936, + "grad_norm": 0.5701939093074034, + "learning_rate": 3.98213352821554e-06, + "loss": 0.3354141414165497, + "memory(GiB)": 66.66, + "step": 3123, + "token_acc": 0.89981718464351, + "train_speed(iter/s)": 0.242028 + }, + { + "epoch": 0.99968, + "grad_norm": 0.7074398165764338, + "learning_rate": 3.9814232526517594e-06, + "loss": 0.4287683367729187, + "memory(GiB)": 66.66, + "step": 3124, + "token_acc": 0.8511083228774571, + "train_speed(iter/s)": 0.242025 + }, + { + "epoch": 1.0, + "grad_norm": 0.6398078426978572, + "learning_rate": 3.980712792750555e-06, + "loss": 0.38725709915161133, + "memory(GiB)": 66.66, + "step": 3125, + "token_acc": 0.9159907300115875, + "train_speed(iter/s)": 0.242024 + }, + { + "epoch": 1.00032, + "grad_norm": 0.5652272452141696, + "learning_rate": 3.9800021486003284e-06, + "loss": 0.290088027715683, + "memory(GiB)": 21.81, + "step": 3126, + "token_acc": 0.9140762463343108, + "train_speed(iter/s)": 90.964351 + }, + { + "epoch": 1.00064, + "grad_norm": 0.5920748695370283, + "learning_rate": 3.97929132028951e-06, + "loss": 0.3566083014011383, + "memory(GiB)": 23.24, + "step": 3127, + "token_acc": 0.9307593307593307, + "train_speed(iter/s)": 81.710531 + }, + { + "epoch": 1.00096, + "grad_norm": 0.6677514708794794, + "learning_rate": 3.978580307906547e-06, + "loss": 0.35384273529052734, + "memory(GiB)": 23.24, + "step": 3128, + "token_acc": 0.8563895781637717, + "train_speed(iter/s)": 73.782838 + }, + { + "epoch": 1.00128, + "grad_norm": 0.5545268465887327, + "learning_rate": 3.977869111539916e-06, + "loss": 0.2807948887348175, + "memory(GiB)": 23.24, + "step": 3129, + "token_acc": 0.9506057781919851, + "train_speed(iter/s)": 67.267054 + }, + { + "epoch": 1.0016, + "grad_norm": 0.596923867978272, + "learning_rate": 3.9771577312781105e-06, + "loss": 0.4190564751625061, + "memory(GiB)": 23.24, + "step": 3130, + "token_acc": 0.8907074973600845, + "train_speed(iter/s)": 61.91928 + }, + { + "epoch": 1.00192, + "grad_norm": 0.633965479201658, + "learning_rate": 3.976446167209651e-06, + "loss": 0.3766147494316101, + "memory(GiB)": 23.24, + "step": 3131, + "token_acc": 0.9321215563760296, + "train_speed(iter/s)": 57.385758 + }, + { + "epoch": 1.00224, + "grad_norm": 0.5965646648968507, + "learning_rate": 3.975734419423077e-06, + "loss": 0.32973071932792664, + "memory(GiB)": 25.07, + "step": 3132, + "token_acc": 0.8443054641211323, + "train_speed(iter/s)": 53.587972 + }, + { + "epoch": 1.00256, + "grad_norm": 0.6270587998579104, + "learning_rate": 3.975022488006956e-06, + "loss": 0.2923009991645813, + "memory(GiB)": 25.07, + "step": 3133, + "token_acc": 0.8988563259471051, + "train_speed(iter/s)": 50.260637 + }, + { + "epoch": 1.00288, + "grad_norm": 0.6470229322864708, + "learning_rate": 3.974310373049873e-06, + "loss": 0.3080177903175354, + "memory(GiB)": 25.07, + "step": 3134, + "token_acc": 0.9108040201005025, + "train_speed(iter/s)": 47.294373 + }, + { + "epoch": 1.0032, + "grad_norm": 0.6218836224273444, + "learning_rate": 3.9735980746404416e-06, + "loss": 0.3077126741409302, + "memory(GiB)": 25.07, + "step": 3135, + "token_acc": 0.9446267432321575, + "train_speed(iter/s)": 44.543699 + }, + { + "epoch": 1.00352, + "grad_norm": 0.5812799033401354, + "learning_rate": 3.9728855928672905e-06, + "loss": 0.3192828297615051, + "memory(GiB)": 25.07, + "step": 3136, + "token_acc": 0.9313632030505243, + "train_speed(iter/s)": 41.930327 + }, + { + "epoch": 1.00384, + "grad_norm": 0.6196718824938934, + "learning_rate": 3.972172927819079e-06, + "loss": 0.37129127979278564, + "memory(GiB)": 25.07, + "step": 3137, + "token_acc": 0.9260814783704326, + "train_speed(iter/s)": 39.837584 + }, + { + "epoch": 1.00416, + "grad_norm": 0.6226317990856505, + "learning_rate": 3.971460079584486e-06, + "loss": 0.3404998779296875, + "memory(GiB)": 25.07, + "step": 3138, + "token_acc": 0.9108865411116215, + "train_speed(iter/s)": 37.950525 + }, + { + "epoch": 1.00448, + "grad_norm": 0.577572597538489, + "learning_rate": 3.970747048252211e-06, + "loss": 0.24486540257930756, + "memory(GiB)": 25.07, + "step": 3139, + "token_acc": 0.9436568520537986, + "train_speed(iter/s)": 36.232403 + }, + { + "epoch": 1.0048, + "grad_norm": 0.5771326810430991, + "learning_rate": 3.97003383391098e-06, + "loss": 0.3247727155685425, + "memory(GiB)": 25.07, + "step": 3140, + "token_acc": 0.9431739431739432, + "train_speed(iter/s)": 34.532895 + }, + { + "epoch": 1.00512, + "grad_norm": 0.6663745235187681, + "learning_rate": 3.969320436649538e-06, + "loss": 0.3895089030265808, + "memory(GiB)": 30.73, + "step": 3141, + "token_acc": 0.8828880045480386, + "train_speed(iter/s)": 32.990189 + }, + { + "epoch": 1.0054400000000001, + "grad_norm": 0.6709925635551655, + "learning_rate": 3.968606856556659e-06, + "loss": 0.3266175389289856, + "memory(GiB)": 30.73, + "step": 3142, + "token_acc": 0.9284607438016529, + "train_speed(iter/s)": 31.694238 + }, + { + "epoch": 1.00576, + "grad_norm": 0.6618567948714008, + "learning_rate": 3.9678930937211325e-06, + "loss": 0.3643980026245117, + "memory(GiB)": 30.73, + "step": 3143, + "token_acc": 0.8677429369760944, + "train_speed(iter/s)": 30.453617 + }, + { + "epoch": 1.00608, + "grad_norm": 0.6898491213653986, + "learning_rate": 3.967179148231774e-06, + "loss": 0.2866407036781311, + "memory(GiB)": 30.73, + "step": 3144, + "token_acc": 0.8694553621560921, + "train_speed(iter/s)": 29.358917 + }, + { + "epoch": 1.0064, + "grad_norm": 0.6772368603897228, + "learning_rate": 3.966465020177423e-06, + "loss": 0.3295811116695404, + "memory(GiB)": 30.73, + "step": 3145, + "token_acc": 0.8940412186379928, + "train_speed(iter/s)": 28.133102 + }, + { + "epoch": 1.00672, + "grad_norm": 0.6460256763047797, + "learning_rate": 3.965750709646939e-06, + "loss": 0.34385165572166443, + "memory(GiB)": 30.73, + "step": 3146, + "token_acc": 0.8955549495500409, + "train_speed(iter/s)": 27.107277 + }, + { + "epoch": 1.00704, + "grad_norm": 0.6321607768123595, + "learning_rate": 3.965036216729206e-06, + "loss": 0.4027659595012665, + "memory(GiB)": 30.73, + "step": 3147, + "token_acc": 0.8871745419479267, + "train_speed(iter/s)": 26.203348 + }, + { + "epoch": 1.00736, + "grad_norm": 0.6500603079475453, + "learning_rate": 3.964321541513131e-06, + "loss": 0.3250289559364319, + "memory(GiB)": 30.73, + "step": 3148, + "token_acc": 0.9369688385269122, + "train_speed(iter/s)": 25.324235 + }, + { + "epoch": 1.00768, + "grad_norm": 0.7173937976248449, + "learning_rate": 3.963606684087642e-06, + "loss": 0.405106782913208, + "memory(GiB)": 30.73, + "step": 3149, + "token_acc": 0.9107913669064748, + "train_speed(iter/s)": 24.518374 + }, + { + "epoch": 1.008, + "grad_norm": 0.6609143158468491, + "learning_rate": 3.962891644541691e-06, + "loss": 0.34746992588043213, + "memory(GiB)": 30.73, + "step": 3150, + "token_acc": 0.9444043321299639, + "train_speed(iter/s)": 23.775182 + }, + { + "epoch": 1.00832, + "grad_norm": 0.631581330464547, + "learning_rate": 3.962176422964252e-06, + "loss": 0.4089304804801941, + "memory(GiB)": 37.2, + "step": 3151, + "token_acc": 0.8850069412309116, + "train_speed(iter/s)": 23.081372 + }, + { + "epoch": 1.00864, + "grad_norm": 0.6435554674511667, + "learning_rate": 3.961461019444322e-06, + "loss": 0.3344465494155884, + "memory(GiB)": 37.2, + "step": 3152, + "token_acc": 0.9450680678289946, + "train_speed(iter/s)": 22.434326 + }, + { + "epoch": 1.00896, + "grad_norm": 0.6793828871964103, + "learning_rate": 3.9607454340709215e-06, + "loss": 0.32530516386032104, + "memory(GiB)": 37.2, + "step": 3153, + "token_acc": 0.8726549175667994, + "train_speed(iter/s)": 21.838619 + }, + { + "epoch": 1.00928, + "grad_norm": 0.6290217359725313, + "learning_rate": 3.9600296669330915e-06, + "loss": 0.3607853055000305, + "memory(GiB)": 37.2, + "step": 3154, + "token_acc": 0.8498715853373804, + "train_speed(iter/s)": 21.252678 + }, + { + "epoch": 1.0096, + "grad_norm": 0.6769964117268205, + "learning_rate": 3.959313718119897e-06, + "loss": 0.34305131435394287, + "memory(GiB)": 37.2, + "step": 3155, + "token_acc": 0.89002849002849, + "train_speed(iter/s)": 20.699206 + }, + { + "epoch": 1.00992, + "grad_norm": 0.6070368091330045, + "learning_rate": 3.958597587720426e-06, + "loss": 0.29942283034324646, + "memory(GiB)": 37.2, + "step": 3156, + "token_acc": 0.8899685624464133, + "train_speed(iter/s)": 20.189545 + }, + { + "epoch": 1.01024, + "grad_norm": 0.645106259058411, + "learning_rate": 3.957881275823788e-06, + "loss": 0.35384702682495117, + "memory(GiB)": 43.82, + "step": 3157, + "token_acc": 0.9071257005604484, + "train_speed(iter/s)": 19.551289 + }, + { + "epoch": 1.01056, + "grad_norm": 0.6894697097760919, + "learning_rate": 3.957164782519115e-06, + "loss": 0.28738275170326233, + "memory(GiB)": 43.82, + "step": 3158, + "token_acc": 0.8880263923927809, + "train_speed(iter/s)": 19.095222 + }, + { + "epoch": 1.01088, + "grad_norm": 0.7007586865227807, + "learning_rate": 3.956448107895564e-06, + "loss": 0.36806386709213257, + "memory(GiB)": 43.82, + "step": 3159, + "token_acc": 0.9297321036451471, + "train_speed(iter/s)": 18.647464 + }, + { + "epoch": 1.0112, + "grad_norm": 0.5774497044599876, + "learning_rate": 3.9557312520423105e-06, + "loss": 0.2085527628660202, + "memory(GiB)": 43.82, + "step": 3160, + "token_acc": 0.9342619745845552, + "train_speed(iter/s)": 18.218122 + }, + { + "epoch": 1.01152, + "grad_norm": 0.5902512082000282, + "learning_rate": 3.955014215048556e-06, + "loss": 0.3395346701145172, + "memory(GiB)": 43.82, + "step": 3161, + "token_acc": 0.9359101420548398, + "train_speed(iter/s)": 17.77714 + }, + { + "epoch": 1.01184, + "grad_norm": 0.6522763495396956, + "learning_rate": 3.954296997003523e-06, + "loss": 0.3549140691757202, + "memory(GiB)": 43.82, + "step": 3162, + "token_acc": 0.9357517482517482, + "train_speed(iter/s)": 17.385276 + }, + { + "epoch": 1.01216, + "grad_norm": 0.6969644934463253, + "learning_rate": 3.953579597996457e-06, + "loss": 0.29081594944000244, + "memory(GiB)": 43.82, + "step": 3163, + "token_acc": 0.919066317626527, + "train_speed(iter/s)": 17.019988 + }, + { + "epoch": 1.01248, + "grad_norm": 0.6760379636309022, + "learning_rate": 3.952862018116627e-06, + "loss": 0.3158886730670929, + "memory(GiB)": 43.82, + "step": 3164, + "token_acc": 0.9213340122199593, + "train_speed(iter/s)": 16.665821 + }, + { + "epoch": 1.0128, + "grad_norm": 0.6393397381214676, + "learning_rate": 3.952144257453322e-06, + "loss": 0.29758232831954956, + "memory(GiB)": 43.82, + "step": 3165, + "token_acc": 0.8919338159255429, + "train_speed(iter/s)": 16.320696 + }, + { + "epoch": 1.01312, + "grad_norm": 0.7212488126033124, + "learning_rate": 3.951426316095855e-06, + "loss": 0.4164792001247406, + "memory(GiB)": 43.82, + "step": 3166, + "token_acc": 0.8895853423336548, + "train_speed(iter/s)": 16.003489 + }, + { + "epoch": 1.01344, + "grad_norm": 0.6368086052348453, + "learning_rate": 3.95070819413356e-06, + "loss": 0.3594931960105896, + "memory(GiB)": 43.82, + "step": 3167, + "token_acc": 0.8953409858203917, + "train_speed(iter/s)": 15.689026 + }, + { + "epoch": 1.01376, + "grad_norm": 0.6238359776789488, + "learning_rate": 3.949989891655798e-06, + "loss": 0.29308125376701355, + "memory(GiB)": 43.82, + "step": 3168, + "token_acc": 0.9467618002195389, + "train_speed(iter/s)": 15.395148 + }, + { + "epoch": 1.01408, + "grad_norm": 0.6218910395584197, + "learning_rate": 3.949271408751946e-06, + "loss": 0.3094208240509033, + "memory(GiB)": 43.82, + "step": 3169, + "token_acc": 0.923432089265102, + "train_speed(iter/s)": 15.110249 + }, + { + "epoch": 1.0144, + "grad_norm": 0.6445294561375552, + "learning_rate": 3.9485527455114095e-06, + "loss": 0.3001214861869812, + "memory(GiB)": 43.82, + "step": 3170, + "token_acc": 0.9292929292929293, + "train_speed(iter/s)": 14.837451 + }, + { + "epoch": 1.01472, + "grad_norm": 0.6404032630355886, + "learning_rate": 3.947833902023612e-06, + "loss": 0.4107641577720642, + "memory(GiB)": 43.82, + "step": 3171, + "token_acc": 0.8544235345014061, + "train_speed(iter/s)": 14.542076 + }, + { + "epoch": 1.01504, + "grad_norm": 0.6539316729304654, + "learning_rate": 3.947114878378002e-06, + "loss": 0.4076525568962097, + "memory(GiB)": 43.82, + "step": 3172, + "token_acc": 0.904320987654321, + "train_speed(iter/s)": 14.270061 + }, + { + "epoch": 1.01536, + "grad_norm": 0.6824455451049695, + "learning_rate": 3.946395674664049e-06, + "loss": 0.45071280002593994, + "memory(GiB)": 43.82, + "step": 3173, + "token_acc": 0.8421052631578947, + "train_speed(iter/s)": 14.020129 + }, + { + "epoch": 1.01568, + "grad_norm": 0.6759517544932774, + "learning_rate": 3.945676290971246e-06, + "loss": 0.30259618163108826, + "memory(GiB)": 43.82, + "step": 3174, + "token_acc": 0.8908467939304944, + "train_speed(iter/s)": 13.784015 + }, + { + "epoch": 1.016, + "grad_norm": 0.6741826706972814, + "learning_rate": 3.9449567273891055e-06, + "loss": 0.36763739585876465, + "memory(GiB)": 43.82, + "step": 3175, + "token_acc": 0.9143005991143527, + "train_speed(iter/s)": 13.54328 + }, + { + "epoch": 1.01632, + "grad_norm": 0.591127768796123, + "learning_rate": 3.944236984007167e-06, + "loss": 0.29976019263267517, + "memory(GiB)": 43.82, + "step": 3176, + "token_acc": 0.9250785105428443, + "train_speed(iter/s)": 13.314281 + }, + { + "epoch": 1.01664, + "grad_norm": 0.6229226289129306, + "learning_rate": 3.9435170609149905e-06, + "loss": 0.3207018971443176, + "memory(GiB)": 43.82, + "step": 3177, + "token_acc": 0.8771929824561403, + "train_speed(iter/s)": 13.090312 + }, + { + "epoch": 1.01696, + "grad_norm": 0.6251351253401871, + "learning_rate": 3.942796958202157e-06, + "loss": 0.2770608961582184, + "memory(GiB)": 50.49, + "step": 3178, + "token_acc": 0.8962476547842402, + "train_speed(iter/s)": 12.877963 + }, + { + "epoch": 1.01728, + "grad_norm": 0.6604508967227765, + "learning_rate": 3.9420766759582705e-06, + "loss": 0.2831418812274933, + "memory(GiB)": 50.49, + "step": 3179, + "token_acc": 0.8875271792844436, + "train_speed(iter/s)": 12.683308 + }, + { + "epoch": 1.0176, + "grad_norm": 0.6290175438397092, + "learning_rate": 3.941356214272958e-06, + "loss": 0.2551324963569641, + "memory(GiB)": 50.49, + "step": 3180, + "token_acc": 0.9212737726669615, + "train_speed(iter/s)": 12.47319 + }, + { + "epoch": 1.01792, + "grad_norm": 0.7637033696798602, + "learning_rate": 3.940635573235868e-06, + "loss": 0.3826860785484314, + "memory(GiB)": 58.06, + "step": 3181, + "token_acc": 0.8333963691376702, + "train_speed(iter/s)": 12.274004 + }, + { + "epoch": 1.01824, + "grad_norm": 0.627013403600347, + "learning_rate": 3.939914752936673e-06, + "loss": 0.3953869342803955, + "memory(GiB)": 58.06, + "step": 3182, + "token_acc": 0.8275862068965517, + "train_speed(iter/s)": 12.088544 + }, + { + "epoch": 1.01856, + "grad_norm": 0.6529736760047801, + "learning_rate": 3.939193753465066e-06, + "loss": 0.395064115524292, + "memory(GiB)": 58.06, + "step": 3183, + "token_acc": 0.8265971316818774, + "train_speed(iter/s)": 11.897993 + }, + { + "epoch": 1.01888, + "grad_norm": 0.6632305059573246, + "learning_rate": 3.938472574910762e-06, + "loss": 0.33610376715660095, + "memory(GiB)": 58.06, + "step": 3184, + "token_acc": 0.9177043854899838, + "train_speed(iter/s)": 11.722775 + }, + { + "epoch": 1.0192, + "grad_norm": 0.6323673050580984, + "learning_rate": 3.9377512173635e-06, + "loss": 0.3501428961753845, + "memory(GiB)": 58.06, + "step": 3185, + "token_acc": 0.8712374581939799, + "train_speed(iter/s)": 11.555427 + }, + { + "epoch": 1.01952, + "grad_norm": 0.6380845599662051, + "learning_rate": 3.9370296809130395e-06, + "loss": 0.3098258972167969, + "memory(GiB)": 58.06, + "step": 3186, + "token_acc": 0.9526881720430107, + "train_speed(iter/s)": 11.391464 + }, + { + "epoch": 1.01984, + "grad_norm": 0.707475879497853, + "learning_rate": 3.936307965649165e-06, + "loss": 0.36391544342041016, + "memory(GiB)": 58.06, + "step": 3187, + "token_acc": 0.8814834721848965, + "train_speed(iter/s)": 11.239814 + }, + { + "epoch": 1.02016, + "grad_norm": 0.6604602775891862, + "learning_rate": 3.935586071661681e-06, + "loss": 0.31336894631385803, + "memory(GiB)": 58.06, + "step": 3188, + "token_acc": 0.8589647411852963, + "train_speed(iter/s)": 11.074799 + }, + { + "epoch": 1.02048, + "grad_norm": 0.6072779766625258, + "learning_rate": 3.9348639990404125e-06, + "loss": 0.34444335103034973, + "memory(GiB)": 58.06, + "step": 3189, + "token_acc": 0.9140061791967045, + "train_speed(iter/s)": 10.922544 + }, + { + "epoch": 1.0208, + "grad_norm": 0.6227127681701178, + "learning_rate": 3.934141747875211e-06, + "loss": 0.3639755845069885, + "memory(GiB)": 58.06, + "step": 3190, + "token_acc": 0.9324324324324325, + "train_speed(iter/s)": 10.769576 + }, + { + "epoch": 1.02112, + "grad_norm": 0.9466579473803354, + "learning_rate": 3.933419318255947e-06, + "loss": 0.27469655871391296, + "memory(GiB)": 58.06, + "step": 3191, + "token_acc": 0.9000487567040468, + "train_speed(iter/s)": 10.633093 + }, + { + "epoch": 1.02144, + "grad_norm": 0.5754914891530166, + "learning_rate": 3.9326967102725165e-06, + "loss": 0.28635209798812866, + "memory(GiB)": 68.74, + "step": 3192, + "token_acc": 0.945648854961832, + "train_speed(iter/s)": 10.47939 + }, + { + "epoch": 1.02176, + "grad_norm": 0.7033298232655905, + "learning_rate": 3.931973924014833e-06, + "loss": 0.2506576478481293, + "memory(GiB)": 68.74, + "step": 3193, + "token_acc": 0.9654648956356736, + "train_speed(iter/s)": 10.345582 + }, + { + "epoch": 1.02208, + "grad_norm": 0.6065329251636059, + "learning_rate": 3.931250959572835e-06, + "loss": 0.3540075719356537, + "memory(GiB)": 68.74, + "step": 3194, + "token_acc": 0.867218987908643, + "train_speed(iter/s)": 10.210221 + }, + { + "epoch": 1.0224, + "grad_norm": 0.6308720213681971, + "learning_rate": 3.930527817036485e-06, + "loss": 0.3110886216163635, + "memory(GiB)": 68.74, + "step": 3195, + "token_acc": 0.8754633061527057, + "train_speed(iter/s)": 10.078073 + }, + { + "epoch": 1.02272, + "grad_norm": 0.6071345211715911, + "learning_rate": 3.9298044964957636e-06, + "loss": 0.39681798219680786, + "memory(GiB)": 68.74, + "step": 3196, + "token_acc": 0.802322206095791, + "train_speed(iter/s)": 9.932401 + }, + { + "epoch": 1.02304, + "grad_norm": 0.6604670759081263, + "learning_rate": 3.929080998040676e-06, + "loss": 0.35403141379356384, + "memory(GiB)": 68.74, + "step": 3197, + "token_acc": 0.9469901168014375, + "train_speed(iter/s)": 9.816332 + }, + { + "epoch": 1.02336, + "grad_norm": 0.7425156985563357, + "learning_rate": 3.928357321761249e-06, + "loss": 0.4334304928779602, + "memory(GiB)": 68.74, + "step": 3198, + "token_acc": 0.8252243741143127, + "train_speed(iter/s)": 9.70275 + }, + { + "epoch": 1.02368, + "grad_norm": 0.6775466614790605, + "learning_rate": 3.927633467747532e-06, + "loss": 0.38116419315338135, + "memory(GiB)": 68.74, + "step": 3199, + "token_acc": 0.8815900761206653, + "train_speed(iter/s)": 9.590095 + }, + { + "epoch": 1.024, + "grad_norm": 0.6440721456408138, + "learning_rate": 3.926909436089595e-06, + "loss": 0.3240508437156677, + "memory(GiB)": 68.74, + "step": 3200, + "token_acc": 0.8849079754601227, + "train_speed(iter/s)": 9.482319 + }, + { + "epoch": 1.02432, + "grad_norm": 0.6324790291713014, + "learning_rate": 3.926185226877532e-06, + "loss": 0.32029759883880615, + "memory(GiB)": 68.74, + "step": 3201, + "token_acc": 0.8313447927199191, + "train_speed(iter/s)": 9.377431 + }, + { + "epoch": 1.02464, + "grad_norm": 0.6453812247537026, + "learning_rate": 3.9254608402014585e-06, + "loss": 0.37813466787338257, + "memory(GiB)": 68.74, + "step": 3202, + "token_acc": 0.8895086321381143, + "train_speed(iter/s)": 9.270841 + }, + { + "epoch": 1.02496, + "grad_norm": 0.6434980145950646, + "learning_rate": 3.924736276151512e-06, + "loss": 0.4055531322956085, + "memory(GiB)": 68.74, + "step": 3203, + "token_acc": 0.8925487687879757, + "train_speed(iter/s)": 9.15557 + }, + { + "epoch": 1.02528, + "grad_norm": 0.7427188489247728, + "learning_rate": 3.924011534817851e-06, + "loss": 0.31394702196121216, + "memory(GiB)": 68.74, + "step": 3204, + "token_acc": 0.9203093476798924, + "train_speed(iter/s)": 9.056419 + }, + { + "epoch": 1.0256, + "grad_norm": 0.6800082390808948, + "learning_rate": 3.923286616290657e-06, + "loss": 0.3287276029586792, + "memory(GiB)": 68.74, + "step": 3205, + "token_acc": 0.9243039443155452, + "train_speed(iter/s)": 8.957699 + }, + { + "epoch": 1.02592, + "grad_norm": 0.57264290451204, + "learning_rate": 3.922561520660133e-06, + "loss": 0.33427107334136963, + "memory(GiB)": 68.74, + "step": 3206, + "token_acc": 0.9244140110613642, + "train_speed(iter/s)": 8.858154 + }, + { + "epoch": 1.02624, + "grad_norm": 0.6734144465542115, + "learning_rate": 3.921836248016507e-06, + "loss": 0.30203336477279663, + "memory(GiB)": 68.74, + "step": 3207, + "token_acc": 0.947515745276417, + "train_speed(iter/s)": 8.766156 + }, + { + "epoch": 1.02656, + "grad_norm": 0.7033424227591797, + "learning_rate": 3.921110798450024e-06, + "loss": 0.4121806025505066, + "memory(GiB)": 68.74, + "step": 3208, + "token_acc": 0.8176337603617182, + "train_speed(iter/s)": 8.675765 + }, + { + "epoch": 1.02688, + "grad_norm": 0.7144462301652174, + "learning_rate": 3.9203851720509556e-06, + "loss": 0.37410613894462585, + "memory(GiB)": 68.74, + "step": 3209, + "token_acc": 0.8067102137767221, + "train_speed(iter/s)": 8.587636 + }, + { + "epoch": 1.0272, + "grad_norm": 0.6765288463903513, + "learning_rate": 3.919659368909592e-06, + "loss": 0.33428841829299927, + "memory(GiB)": 68.74, + "step": 3210, + "token_acc": 0.9471218206157965, + "train_speed(iter/s)": 8.499593 + }, + { + "epoch": 1.02752, + "grad_norm": 0.7149988902549796, + "learning_rate": 3.918933389116248e-06, + "loss": 0.37774085998535156, + "memory(GiB)": 68.74, + "step": 3211, + "token_acc": 0.9325153374233128, + "train_speed(iter/s)": 8.411362 + }, + { + "epoch": 1.02784, + "grad_norm": 0.8380813211646924, + "learning_rate": 3.918207232761259e-06, + "loss": 0.2810710072517395, + "memory(GiB)": 68.74, + "step": 3212, + "token_acc": 0.9468340903018969, + "train_speed(iter/s)": 8.325012 + }, + { + "epoch": 1.02816, + "grad_norm": 0.6369894651729457, + "learning_rate": 3.917480899934981e-06, + "loss": 0.41335001587867737, + "memory(GiB)": 68.74, + "step": 3213, + "token_acc": 0.8577586206896551, + "train_speed(iter/s)": 8.233427 + }, + { + "epoch": 1.02848, + "grad_norm": 0.7230641079044765, + "learning_rate": 3.916754390727795e-06, + "loss": 0.3596895933151245, + "memory(GiB)": 68.74, + "step": 3214, + "token_acc": 0.8795486600846263, + "train_speed(iter/s)": 8.152137 + }, + { + "epoch": 1.0288, + "grad_norm": 0.6377636668522555, + "learning_rate": 3.916027705230101e-06, + "loss": 0.2808958888053894, + "memory(GiB)": 68.74, + "step": 3215, + "token_acc": 0.9226993865030675, + "train_speed(iter/s)": 8.073498 + }, + { + "epoch": 1.02912, + "grad_norm": 0.6470764576512434, + "learning_rate": 3.915300843532325e-06, + "loss": 0.3960762023925781, + "memory(GiB)": 68.74, + "step": 3216, + "token_acc": 0.937152629328773, + "train_speed(iter/s)": 7.995595 + }, + { + "epoch": 1.02944, + "grad_norm": 0.6978208286261345, + "learning_rate": 3.914573805724911e-06, + "loss": 0.43906286358833313, + "memory(GiB)": 68.74, + "step": 3217, + "token_acc": 0.8605898123324397, + "train_speed(iter/s)": 7.914989 + }, + { + "epoch": 1.02976, + "grad_norm": 0.6148630311931923, + "learning_rate": 3.913846591898325e-06, + "loss": 0.3144587278366089, + "memory(GiB)": 68.74, + "step": 3218, + "token_acc": 0.8943244387971199, + "train_speed(iter/s)": 7.840516 + }, + { + "epoch": 1.03008, + "grad_norm": 0.6599576628818493, + "learning_rate": 3.913119202143058e-06, + "loss": 0.29291462898254395, + "memory(GiB)": 68.74, + "step": 3219, + "token_acc": 0.8467463479415671, + "train_speed(iter/s)": 7.768875 + }, + { + "epoch": 1.0304, + "grad_norm": 0.6667305499045343, + "learning_rate": 3.912391636549619e-06, + "loss": 0.31361833214759827, + "memory(GiB)": 68.74, + "step": 3220, + "token_acc": 0.8893979057591623, + "train_speed(iter/s)": 7.697851 + }, + { + "epoch": 1.03072, + "grad_norm": 0.6874939760186143, + "learning_rate": 3.911663895208543e-06, + "loss": 0.3626358211040497, + "memory(GiB)": 68.74, + "step": 3221, + "token_acc": 0.9559515803631473, + "train_speed(iter/s)": 7.620665 + }, + { + "epoch": 1.03104, + "grad_norm": 0.570817358290107, + "learning_rate": 3.910935978210384e-06, + "loss": 0.329135000705719, + "memory(GiB)": 68.74, + "step": 3222, + "token_acc": 0.8972614686900401, + "train_speed(iter/s)": 7.548409 + }, + { + "epoch": 1.03136, + "grad_norm": 0.6215962891494812, + "learning_rate": 3.910207885645719e-06, + "loss": 0.32757580280303955, + "memory(GiB)": 68.74, + "step": 3223, + "token_acc": 0.9245005875440658, + "train_speed(iter/s)": 7.48113 + }, + { + "epoch": 1.03168, + "grad_norm": 0.6665522544336819, + "learning_rate": 3.909479617605145e-06, + "loss": 0.35847777128219604, + "memory(GiB)": 68.74, + "step": 3224, + "token_acc": 0.9389978213507625, + "train_speed(iter/s)": 7.412129 + }, + { + "epoch": 1.032, + "grad_norm": 0.6820968910766201, + "learning_rate": 3.9087511741792846e-06, + "loss": 0.3029909133911133, + "memory(GiB)": 68.74, + "step": 3225, + "token_acc": 0.9087288416860272, + "train_speed(iter/s)": 7.348687 + }, + { + "epoch": 1.03232, + "grad_norm": 0.6921590421288313, + "learning_rate": 3.908022555458778e-06, + "loss": 0.2777128517627716, + "memory(GiB)": 68.74, + "step": 3226, + "token_acc": 0.9594716301410988, + "train_speed(iter/s)": 7.286973 + }, + { + "epoch": 1.03264, + "grad_norm": 0.6854554596416587, + "learning_rate": 3.90729376153429e-06, + "loss": 0.34269028902053833, + "memory(GiB)": 68.74, + "step": 3227, + "token_acc": 0.9436854381872709, + "train_speed(iter/s)": 7.224711 + }, + { + "epoch": 1.03296, + "grad_norm": 0.6912231107757725, + "learning_rate": 3.906564792496506e-06, + "loss": 0.4532022476196289, + "memory(GiB)": 68.74, + "step": 3228, + "token_acc": 0.9064413421503796, + "train_speed(iter/s)": 7.159964 + }, + { + "epoch": 1.03328, + "grad_norm": 0.6672385946831044, + "learning_rate": 3.905835648436134e-06, + "loss": 0.3763331174850464, + "memory(GiB)": 68.74, + "step": 3229, + "token_acc": 0.8808391608391608, + "train_speed(iter/s)": 7.098927 + }, + { + "epoch": 1.0336, + "grad_norm": 0.6218390957198016, + "learning_rate": 3.905106329443904e-06, + "loss": 0.3411739766597748, + "memory(GiB)": 68.74, + "step": 3230, + "token_acc": 0.9445378151260504, + "train_speed(iter/s)": 7.033608 + }, + { + "epoch": 1.03392, + "grad_norm": 0.6175524138969589, + "learning_rate": 3.904376835610565e-06, + "loss": 0.28472286462783813, + "memory(GiB)": 68.74, + "step": 3231, + "token_acc": 0.9320388349514563, + "train_speed(iter/s)": 6.977436 + }, + { + "epoch": 1.03424, + "grad_norm": 0.6789526778471839, + "learning_rate": 3.903647167026892e-06, + "loss": 0.3511594533920288, + "memory(GiB)": 68.74, + "step": 3232, + "token_acc": 0.846092291277674, + "train_speed(iter/s)": 6.918526 + }, + { + "epoch": 1.03456, + "grad_norm": 0.6158568109462881, + "learning_rate": 3.902917323783678e-06, + "loss": 0.33516913652420044, + "memory(GiB)": 68.74, + "step": 3233, + "token_acc": 0.9134175361522973, + "train_speed(iter/s)": 6.858174 + }, + { + "epoch": 1.03488, + "grad_norm": 0.6984829009915599, + "learning_rate": 3.9021873059717404e-06, + "loss": 0.3922877311706543, + "memory(GiB)": 68.74, + "step": 3234, + "token_acc": 0.876641390789717, + "train_speed(iter/s)": 6.803536 + }, + { + "epoch": 1.0352, + "grad_norm": 0.6360564734610196, + "learning_rate": 3.901457113681917e-06, + "loss": 0.337102472782135, + "memory(GiB)": 68.74, + "step": 3235, + "token_acc": 0.9218588640275387, + "train_speed(iter/s)": 6.745684 + }, + { + "epoch": 1.03552, + "grad_norm": 0.6898251180196788, + "learning_rate": 3.900726747005067e-06, + "loss": 0.4030301570892334, + "memory(GiB)": 68.74, + "step": 3236, + "token_acc": 0.9015942825728422, + "train_speed(iter/s)": 6.688136 + }, + { + "epoch": 1.03584, + "grad_norm": 0.7421425243769012, + "learning_rate": 3.899996206032074e-06, + "loss": 0.29078179597854614, + "memory(GiB)": 68.74, + "step": 3237, + "token_acc": 0.9466628636622932, + "train_speed(iter/s)": 6.635124 + }, + { + "epoch": 1.03616, + "grad_norm": 0.6319620240639592, + "learning_rate": 3.899265490853837e-06, + "loss": 0.3173001706600189, + "memory(GiB)": 68.74, + "step": 3238, + "token_acc": 0.9418291862811029, + "train_speed(iter/s)": 6.583553 + }, + { + "epoch": 1.03648, + "grad_norm": 0.6411623921282427, + "learning_rate": 3.898534601561285e-06, + "loss": 0.3236410617828369, + "memory(GiB)": 68.74, + "step": 3239, + "token_acc": 0.8748301630434783, + "train_speed(iter/s)": 6.530126 + }, + { + "epoch": 1.0368, + "grad_norm": 0.6431245774963325, + "learning_rate": 3.897803538245363e-06, + "loss": 0.34660208225250244, + "memory(GiB)": 68.74, + "step": 3240, + "token_acc": 0.8480791618160652, + "train_speed(iter/s)": 6.476578 + }, + { + "epoch": 1.03712, + "grad_norm": 1.1672331057326473, + "learning_rate": 3.897072300997038e-06, + "loss": 0.35535210371017456, + "memory(GiB)": 68.74, + "step": 3241, + "token_acc": 0.9041606886657102, + "train_speed(iter/s)": 6.421958 + }, + { + "epoch": 1.03744, + "grad_norm": 0.621985739092646, + "learning_rate": 3.896340889907301e-06, + "loss": 0.33523470163345337, + "memory(GiB)": 68.74, + "step": 3242, + "token_acc": 0.8343142500445871, + "train_speed(iter/s)": 6.373798 + }, + { + "epoch": 1.03776, + "grad_norm": 0.6057373604179175, + "learning_rate": 3.895609305067162e-06, + "loss": 0.33291783928871155, + "memory(GiB)": 68.74, + "step": 3243, + "token_acc": 0.8773978315262719, + "train_speed(iter/s)": 6.326416 + }, + { + "epoch": 1.03808, + "grad_norm": 0.6427552839102506, + "learning_rate": 3.894877546567656e-06, + "loss": 0.355975866317749, + "memory(GiB)": 68.74, + "step": 3244, + "token_acc": 0.8728363720286176, + "train_speed(iter/s)": 6.277519 + }, + { + "epoch": 1.0384, + "grad_norm": 0.7353686600393355, + "learning_rate": 3.894145614499838e-06, + "loss": 0.41415929794311523, + "memory(GiB)": 68.74, + "step": 3245, + "token_acc": 0.8122796709753232, + "train_speed(iter/s)": 6.230081 + }, + { + "epoch": 1.03872, + "grad_norm": 0.6686058167124143, + "learning_rate": 3.893413508954782e-06, + "loss": 0.3675537705421448, + "memory(GiB)": 68.74, + "step": 3246, + "token_acc": 0.8756698821007503, + "train_speed(iter/s)": 6.183899 + }, + { + "epoch": 1.03904, + "grad_norm": 0.667319882837921, + "learning_rate": 3.892681230023587e-06, + "loss": 0.354352205991745, + "memory(GiB)": 68.74, + "step": 3247, + "token_acc": 0.9206842923794712, + "train_speed(iter/s)": 6.133354 + }, + { + "epoch": 1.03936, + "grad_norm": 0.6065500543829857, + "learning_rate": 3.891948777797372e-06, + "loss": 0.3620920181274414, + "memory(GiB)": 68.74, + "step": 3248, + "token_acc": 0.9277555682467161, + "train_speed(iter/s)": 6.086043 + }, + { + "epoch": 1.03968, + "grad_norm": 0.6409334302308186, + "learning_rate": 3.891216152367279e-06, + "loss": 0.36018532514572144, + "memory(GiB)": 68.74, + "step": 3249, + "token_acc": 0.894137194398291, + "train_speed(iter/s)": 6.04169 + }, + { + "epoch": 1.04, + "grad_norm": 0.6566217566592493, + "learning_rate": 3.890483353824469e-06, + "loss": 0.3156551718711853, + "memory(GiB)": 68.74, + "step": 3250, + "token_acc": 0.8384094754653131, + "train_speed(iter/s)": 5.996947 + }, + { + "epoch": 1.04032, + "grad_norm": 0.5817289250739865, + "learning_rate": 3.889750382260128e-06, + "loss": 0.2182731330394745, + "memory(GiB)": 68.74, + "step": 3251, + "token_acc": 0.9644280022766079, + "train_speed(iter/s)": 5.955939 + }, + { + "epoch": 1.04064, + "grad_norm": 0.6485559542223808, + "learning_rate": 3.8890172377654596e-06, + "loss": 0.39428481459617615, + "memory(GiB)": 68.74, + "step": 3252, + "token_acc": 0.9132575757575757, + "train_speed(iter/s)": 5.91428 + }, + { + "epoch": 1.04096, + "grad_norm": 0.5529877014958257, + "learning_rate": 3.888283920431693e-06, + "loss": 0.27049899101257324, + "memory(GiB)": 68.74, + "step": 3253, + "token_acc": 0.9339519650655022, + "train_speed(iter/s)": 5.872855 + }, + { + "epoch": 1.04128, + "grad_norm": 0.6442820262710396, + "learning_rate": 3.887550430350074e-06, + "loss": 0.36548054218292236, + "memory(GiB)": 77.0, + "step": 3254, + "token_acc": 0.8242662848962061, + "train_speed(iter/s)": 5.8212 + }, + { + "epoch": 1.0416, + "grad_norm": 0.670659557274049, + "learning_rate": 3.886816767611875e-06, + "loss": 0.3349798619747162, + "memory(GiB)": 77.0, + "step": 3255, + "token_acc": 0.9418579234972677, + "train_speed(iter/s)": 5.772546 + }, + { + "epoch": 1.04192, + "grad_norm": 0.5968590429529287, + "learning_rate": 3.886082932308387e-06, + "loss": 0.3481326103210449, + "memory(GiB)": 77.0, + "step": 3256, + "token_acc": 0.9108941104668904, + "train_speed(iter/s)": 5.733393 + }, + { + "epoch": 1.04224, + "grad_norm": 0.6132498717778617, + "learning_rate": 3.885348924530923e-06, + "loss": 0.34927111864089966, + "memory(GiB)": 77.0, + "step": 3257, + "token_acc": 0.9367333763718528, + "train_speed(iter/s)": 5.694037 + }, + { + "epoch": 1.04256, + "grad_norm": 0.62337486873224, + "learning_rate": 3.8846147443708185e-06, + "loss": 0.2883740961551666, + "memory(GiB)": 77.0, + "step": 3258, + "token_acc": 0.8963956120494515, + "train_speed(iter/s)": 5.654979 + }, + { + "epoch": 1.04288, + "grad_norm": 0.7083853027756456, + "learning_rate": 3.883880391919427e-06, + "loss": 0.30894607305526733, + "memory(GiB)": 77.0, + "step": 3259, + "token_acc": 0.9014313597918022, + "train_speed(iter/s)": 5.618907 + }, + { + "epoch": 1.0432, + "grad_norm": 0.6595510073033134, + "learning_rate": 3.883145867268128e-06, + "loss": 0.34716543555259705, + "memory(GiB)": 77.0, + "step": 3260, + "token_acc": 0.9517697394010113, + "train_speed(iter/s)": 5.581241 + }, + { + "epoch": 1.04352, + "grad_norm": 0.6148147886190675, + "learning_rate": 3.88241117050832e-06, + "loss": 0.325467050075531, + "memory(GiB)": 77.0, + "step": 3261, + "token_acc": 0.9189058339385137, + "train_speed(iter/s)": 5.542589 + }, + { + "epoch": 1.04384, + "grad_norm": 0.6966873141197385, + "learning_rate": 3.881676301731423e-06, + "loss": 0.34977367520332336, + "memory(GiB)": 77.0, + "step": 3262, + "token_acc": 0.8769716088328076, + "train_speed(iter/s)": 5.50682 + }, + { + "epoch": 1.04416, + "grad_norm": 0.6806512212052771, + "learning_rate": 3.880941261028879e-06, + "loss": 0.3709196150302887, + "memory(GiB)": 77.0, + "step": 3263, + "token_acc": 0.8080147209159682, + "train_speed(iter/s)": 5.471466 + }, + { + "epoch": 1.04448, + "grad_norm": 0.6335282959104213, + "learning_rate": 3.88020604849215e-06, + "loss": 0.306686133146286, + "memory(GiB)": 77.0, + "step": 3264, + "token_acc": 0.8702621157835188, + "train_speed(iter/s)": 5.43626 + }, + { + "epoch": 1.0448, + "grad_norm": 0.6759665990470612, + "learning_rate": 3.879470664212722e-06, + "loss": 0.3425697982311249, + "memory(GiB)": 77.0, + "step": 3265, + "token_acc": 0.912109375, + "train_speed(iter/s)": 5.402168 + }, + { + "epoch": 1.04512, + "grad_norm": 0.669554279156481, + "learning_rate": 3.8787351082821e-06, + "loss": 0.33481940627098083, + "memory(GiB)": 77.0, + "step": 3266, + "token_acc": 0.9298185552892845, + "train_speed(iter/s)": 5.368939 + }, + { + "epoch": 1.04544, + "grad_norm": 0.6669219993061316, + "learning_rate": 3.877999380791811e-06, + "loss": 0.33645230531692505, + "memory(GiB)": 77.0, + "step": 3267, + "token_acc": 0.8520880477268051, + "train_speed(iter/s)": 5.334853 + }, + { + "epoch": 1.04576, + "grad_norm": 0.6741399258867916, + "learning_rate": 3.877263481833404e-06, + "loss": 0.3015664219856262, + "memory(GiB)": 77.0, + "step": 3268, + "token_acc": 0.9308035714285714, + "train_speed(iter/s)": 5.302542 + }, + { + "epoch": 1.04608, + "grad_norm": 0.6293876992827153, + "learning_rate": 3.876527411498449e-06, + "loss": 0.3765403628349304, + "memory(GiB)": 77.0, + "step": 3269, + "token_acc": 0.9397496087636933, + "train_speed(iter/s)": 5.269339 + }, + { + "epoch": 1.0464, + "grad_norm": 0.70919035048069, + "learning_rate": 3.8757911698785365e-06, + "loss": 0.3767227530479431, + "memory(GiB)": 77.0, + "step": 3270, + "token_acc": 0.9247757073844031, + "train_speed(iter/s)": 5.237984 + }, + { + "epoch": 1.04672, + "grad_norm": 0.6761564456713398, + "learning_rate": 3.875054757065279e-06, + "loss": 0.3002434968948364, + "memory(GiB)": 77.0, + "step": 3271, + "token_acc": 0.900866624392306, + "train_speed(iter/s)": 5.206382 + }, + { + "epoch": 1.04704, + "grad_norm": 0.6016158345947076, + "learning_rate": 3.874318173150311e-06, + "loss": 0.3154761791229248, + "memory(GiB)": 77.0, + "step": 3272, + "token_acc": 0.8973029045643154, + "train_speed(iter/s)": 5.173541 + }, + { + "epoch": 1.04736, + "grad_norm": 0.6553344358106333, + "learning_rate": 3.873581418225287e-06, + "loss": 0.3442355990409851, + "memory(GiB)": 77.0, + "step": 3273, + "token_acc": 0.9180395387149918, + "train_speed(iter/s)": 5.14355 + }, + { + "epoch": 1.04768, + "grad_norm": 0.6876972789943042, + "learning_rate": 3.8728444923818836e-06, + "loss": 0.38603323698043823, + "memory(GiB)": 77.0, + "step": 3274, + "token_acc": 0.9196511990034257, + "train_speed(iter/s)": 5.112992 + }, + { + "epoch": 1.048, + "grad_norm": 0.7172949184965209, + "learning_rate": 3.872107395711799e-06, + "loss": 0.3509473204612732, + "memory(GiB)": 77.0, + "step": 3275, + "token_acc": 0.8318988703604088, + "train_speed(iter/s)": 5.083474 + }, + { + "epoch": 1.04832, + "grad_norm": 0.6223750948472931, + "learning_rate": 3.87137012830675e-06, + "loss": 0.34280890226364136, + "memory(GiB)": 77.0, + "step": 3276, + "token_acc": 0.8713692946058091, + "train_speed(iter/s)": 5.053203 + }, + { + "epoch": 1.04864, + "grad_norm": 0.6592226631186067, + "learning_rate": 3.87063269025848e-06, + "loss": 0.35134732723236084, + "memory(GiB)": 77.0, + "step": 3277, + "token_acc": 0.9386098427194317, + "train_speed(iter/s)": 5.023081 + }, + { + "epoch": 1.04896, + "grad_norm": 0.6938945824994291, + "learning_rate": 3.869895081658748e-06, + "loss": 0.37972646951675415, + "memory(GiB)": 77.0, + "step": 3278, + "token_acc": 0.9349936682144365, + "train_speed(iter/s)": 4.994081 + }, + { + "epoch": 1.04928, + "grad_norm": 0.6446139132986777, + "learning_rate": 3.8691573025993364e-06, + "loss": 0.3441675901412964, + "memory(GiB)": 77.0, + "step": 3279, + "token_acc": 0.8862713241267263, + "train_speed(iter/s)": 4.965805 + }, + { + "epoch": 1.0496, + "grad_norm": 0.656157461924699, + "learning_rate": 3.8684193531720505e-06, + "loss": 0.4149530231952667, + "memory(GiB)": 77.0, + "step": 3280, + "token_acc": 0.9075182967398536, + "train_speed(iter/s)": 4.935716 + }, + { + "epoch": 1.04992, + "grad_norm": 0.6788994214435585, + "learning_rate": 3.867681233468715e-06, + "loss": 0.4145277440547943, + "memory(GiB)": 77.0, + "step": 3281, + "token_acc": 0.852760736196319, + "train_speed(iter/s)": 4.906638 + }, + { + "epoch": 1.05024, + "grad_norm": 0.6970410950347674, + "learning_rate": 3.866942943581175e-06, + "loss": 0.35254228115081787, + "memory(GiB)": 77.0, + "step": 3282, + "token_acc": 0.9246231155778895, + "train_speed(iter/s)": 4.877769 + }, + { + "epoch": 1.05056, + "grad_norm": 0.6351270724357762, + "learning_rate": 3.866204483601299e-06, + "loss": 0.3329002857208252, + "memory(GiB)": 77.0, + "step": 3283, + "token_acc": 0.9585477415666095, + "train_speed(iter/s)": 4.850748 + }, + { + "epoch": 1.05088, + "grad_norm": 0.627110493454707, + "learning_rate": 3.865465853620975e-06, + "loss": 0.25494444370269775, + "memory(GiB)": 77.0, + "step": 3284, + "token_acc": 0.9139638101741209, + "train_speed(iter/s)": 4.824609 + }, + { + "epoch": 1.0512, + "grad_norm": 0.6194468603692406, + "learning_rate": 3.864727053732113e-06, + "loss": 0.2896907329559326, + "memory(GiB)": 77.0, + "step": 3285, + "token_acc": 0.9116161616161617, + "train_speed(iter/s)": 4.7974 + }, + { + "epoch": 1.05152, + "grad_norm": 0.6952128099228604, + "learning_rate": 3.863988084026643e-06, + "loss": 0.36843737959861755, + "memory(GiB)": 77.0, + "step": 3286, + "token_acc": 0.9340486783564512, + "train_speed(iter/s)": 4.770353 + }, + { + "epoch": 1.0518399999999999, + "grad_norm": 0.6303460295215385, + "learning_rate": 3.863248944596518e-06, + "loss": 0.32086610794067383, + "memory(GiB)": 77.0, + "step": 3287, + "token_acc": 0.9066967644845748, + "train_speed(iter/s)": 4.743361 + }, + { + "epoch": 1.05216, + "grad_norm": 0.6747051270828308, + "learning_rate": 3.862509635533711e-06, + "loss": 0.35316669940948486, + "memory(GiB)": 77.0, + "step": 3288, + "token_acc": 0.8673918074519787, + "train_speed(iter/s)": 4.716801 + }, + { + "epoch": 1.05248, + "grad_norm": 0.6044681665084966, + "learning_rate": 3.861770156930216e-06, + "loss": 0.327358603477478, + "memory(GiB)": 77.0, + "step": 3289, + "token_acc": 0.9459886291850916, + "train_speed(iter/s)": 4.691889 + }, + { + "epoch": 1.0528, + "grad_norm": 0.6310266421299451, + "learning_rate": 3.861030508878047e-06, + "loss": 0.28956156969070435, + "memory(GiB)": 77.0, + "step": 3290, + "token_acc": 0.9139360442163442, + "train_speed(iter/s)": 4.666069 + }, + { + "epoch": 1.05312, + "grad_norm": 0.7511946530861218, + "learning_rate": 3.860290691469243e-06, + "loss": 0.41793355345726013, + "memory(GiB)": 77.0, + "step": 3291, + "token_acc": 0.892872205820329, + "train_speed(iter/s)": 4.641654 + }, + { + "epoch": 1.05344, + "grad_norm": 0.6742547208546613, + "learning_rate": 3.859550704795859e-06, + "loss": 0.37165430188179016, + "memory(GiB)": 77.0, + "step": 3292, + "token_acc": 0.9089939566299324, + "train_speed(iter/s)": 4.61768 + }, + { + "epoch": 1.05376, + "grad_norm": 0.6815996290750265, + "learning_rate": 3.8588105489499745e-06, + "loss": 0.34858494997024536, + "memory(GiB)": 77.0, + "step": 3293, + "token_acc": 0.9184331797235024, + "train_speed(iter/s)": 4.594108 + }, + { + "epoch": 1.05408, + "grad_norm": 0.6318121091680028, + "learning_rate": 3.858070224023689e-06, + "loss": 0.3675995171070099, + "memory(GiB)": 77.0, + "step": 3294, + "token_acc": 0.9411300919842313, + "train_speed(iter/s)": 4.570582 + }, + { + "epoch": 1.0544, + "grad_norm": 0.5916139864622809, + "learning_rate": 3.857329730109124e-06, + "loss": 0.28416556119918823, + "memory(GiB)": 77.0, + "step": 3295, + "token_acc": 0.9114441416893733, + "train_speed(iter/s)": 4.543332 + }, + { + "epoch": 1.05472, + "grad_norm": 0.6395163043859391, + "learning_rate": 3.85658906729842e-06, + "loss": 0.3216592073440552, + "memory(GiB)": 77.0, + "step": 3296, + "token_acc": 0.9050951847704367, + "train_speed(iter/s)": 4.519869 + }, + { + "epoch": 1.05504, + "grad_norm": 0.6468063492590111, + "learning_rate": 3.855848235683739e-06, + "loss": 0.4285566210746765, + "memory(GiB)": 77.0, + "step": 3297, + "token_acc": 0.9319556451612904, + "train_speed(iter/s)": 4.494923 + }, + { + "epoch": 1.05536, + "grad_norm": 0.6413897865171583, + "learning_rate": 3.8551072353572665e-06, + "loss": 0.3300699293613434, + "memory(GiB)": 77.0, + "step": 3298, + "token_acc": 0.8634552698297718, + "train_speed(iter/s)": 4.469805 + }, + { + "epoch": 1.05568, + "grad_norm": 0.615453056674284, + "learning_rate": 3.854366066411205e-06, + "loss": 0.40828341245651245, + "memory(GiB)": 77.0, + "step": 3299, + "token_acc": 0.8841731131089274, + "train_speed(iter/s)": 4.441953 + }, + { + "epoch": 1.056, + "grad_norm": 0.6697236833503695, + "learning_rate": 3.853624728937781e-06, + "loss": 0.4014461040496826, + "memory(GiB)": 77.0, + "step": 3300, + "token_acc": 0.8675745480159662, + "train_speed(iter/s)": 4.41938 + }, + { + "epoch": 1.05632, + "grad_norm": 0.6656270297708938, + "learning_rate": 3.852883223029243e-06, + "loss": 0.3673269748687744, + "memory(GiB)": 77.0, + "step": 3301, + "token_acc": 0.9268209666439755, + "train_speed(iter/s)": 4.397254 + }, + { + "epoch": 1.05664, + "grad_norm": 0.631599769768974, + "learning_rate": 3.852141548777856e-06, + "loss": 0.37372255325317383, + "memory(GiB)": 77.0, + "step": 3302, + "token_acc": 0.8544955387783116, + "train_speed(iter/s)": 4.373019 + }, + { + "epoch": 1.05696, + "grad_norm": 0.6748006819232328, + "learning_rate": 3.8513997062759105e-06, + "loss": 0.3296646773815155, + "memory(GiB)": 77.0, + "step": 3303, + "token_acc": 0.9290211791642816, + "train_speed(iter/s)": 4.351894 + }, + { + "epoch": 1.05728, + "grad_norm": 0.6398373325220268, + "learning_rate": 3.850657695615714e-06, + "loss": 0.3449357748031616, + "memory(GiB)": 77.0, + "step": 3304, + "token_acc": 0.916709379805228, + "train_speed(iter/s)": 4.330962 + }, + { + "epoch": 1.0576, + "grad_norm": 0.649179916670139, + "learning_rate": 3.849915516889597e-06, + "loss": 0.3318617343902588, + "memory(GiB)": 77.0, + "step": 3305, + "token_acc": 0.8763288447909284, + "train_speed(iter/s)": 4.309598 + }, + { + "epoch": 1.05792, + "grad_norm": 0.6506231452940509, + "learning_rate": 3.849173170189912e-06, + "loss": 0.29488664865493774, + "memory(GiB)": 77.0, + "step": 3306, + "token_acc": 0.9298050139275766, + "train_speed(iter/s)": 4.289208 + }, + { + "epoch": 1.05824, + "grad_norm": 0.6006316901959372, + "learning_rate": 3.848430655609031e-06, + "loss": 0.3059118390083313, + "memory(GiB)": 77.0, + "step": 3307, + "token_acc": 0.9009632751354606, + "train_speed(iter/s)": 4.268055 + }, + { + "epoch": 1.05856, + "grad_norm": 0.5883263010804552, + "learning_rate": 3.847687973239346e-06, + "loss": 0.2933390736579895, + "memory(GiB)": 77.0, + "step": 3308, + "token_acc": 0.89105283455457, + "train_speed(iter/s)": 4.247308 + }, + { + "epoch": 1.05888, + "grad_norm": 0.6619334380701559, + "learning_rate": 3.846945123173272e-06, + "loss": 0.3289869427680969, + "memory(GiB)": 77.0, + "step": 3309, + "token_acc": 0.8914821944177094, + "train_speed(iter/s)": 4.226956 + }, + { + "epoch": 1.0592, + "grad_norm": 0.6760258609935659, + "learning_rate": 3.846202105503244e-06, + "loss": 0.4142027497291565, + "memory(GiB)": 77.0, + "step": 3310, + "token_acc": 0.8277613203554803, + "train_speed(iter/s)": 4.206133 + }, + { + "epoch": 1.05952, + "grad_norm": 0.6129744298107473, + "learning_rate": 3.845458920321717e-06, + "loss": 0.32273203134536743, + "memory(GiB)": 77.0, + "step": 3311, + "token_acc": 0.9319203596660244, + "train_speed(iter/s)": 4.184611 + }, + { + "epoch": 1.05984, + "grad_norm": 0.5915539614200593, + "learning_rate": 3.8447155677211664e-06, + "loss": 0.25599703192710876, + "memory(GiB)": 77.0, + "step": 3312, + "token_acc": 0.9295729764181007, + "train_speed(iter/s)": 4.163644 + }, + { + "epoch": 1.06016, + "grad_norm": 0.6729527960944369, + "learning_rate": 3.843972047794092e-06, + "loss": 0.33820483088493347, + "memory(GiB)": 77.0, + "step": 3313, + "token_acc": 0.9658944658944659, + "train_speed(iter/s)": 4.144732 + }, + { + "epoch": 1.06048, + "grad_norm": 0.6690994302766357, + "learning_rate": 3.843228360633009e-06, + "loss": 0.29185909032821655, + "memory(GiB)": 77.0, + "step": 3314, + "token_acc": 0.8840304182509505, + "train_speed(iter/s)": 4.126078 + }, + { + "epoch": 1.0608, + "grad_norm": 0.6939367651448778, + "learning_rate": 3.842484506330459e-06, + "loss": 0.2904887795448303, + "memory(GiB)": 77.0, + "step": 3315, + "token_acc": 0.8954558954558954, + "train_speed(iter/s)": 4.107385 + }, + { + "epoch": 1.06112, + "grad_norm": 0.6346558302278438, + "learning_rate": 3.841740484979002e-06, + "loss": 0.35364237427711487, + "memory(GiB)": 77.0, + "step": 3316, + "token_acc": 0.8887691893347698, + "train_speed(iter/s)": 4.088334 + }, + { + "epoch": 1.06144, + "grad_norm": 0.619011019805936, + "learning_rate": 3.8409962966712154e-06, + "loss": 0.284739226102829, + "memory(GiB)": 77.0, + "step": 3317, + "token_acc": 0.9088074824629774, + "train_speed(iter/s)": 4.069114 + }, + { + "epoch": 1.06176, + "grad_norm": 0.5893323226101024, + "learning_rate": 3.840251941499704e-06, + "loss": 0.3019380569458008, + "memory(GiB)": 77.0, + "step": 3318, + "token_acc": 0.8972539339709966, + "train_speed(iter/s)": 4.04957 + }, + { + "epoch": 1.06208, + "grad_norm": 0.6349995052147084, + "learning_rate": 3.839507419557088e-06, + "loss": 0.2534206509590149, + "memory(GiB)": 77.0, + "step": 3319, + "token_acc": 0.9526288391462779, + "train_speed(iter/s)": 4.031718 + }, + { + "epoch": 1.0624, + "grad_norm": 0.6055708197436603, + "learning_rate": 3.83876273093601e-06, + "loss": 0.2963431477546692, + "memory(GiB)": 77.0, + "step": 3320, + "token_acc": 0.9001119820828667, + "train_speed(iter/s)": 4.013528 + }, + { + "epoch": 1.06272, + "grad_norm": 0.7853882027304347, + "learning_rate": 3.8380178757291345e-06, + "loss": 0.32535284757614136, + "memory(GiB)": 77.0, + "step": 3321, + "token_acc": 0.9086935405536668, + "train_speed(iter/s)": 3.993954 + }, + { + "epoch": 1.06304, + "grad_norm": 0.5962608897039257, + "learning_rate": 3.837272854029146e-06, + "loss": 0.2856634557247162, + "memory(GiB)": 77.0, + "step": 3322, + "token_acc": 0.9496738117427772, + "train_speed(iter/s)": 3.975572 + }, + { + "epoch": 1.06336, + "grad_norm": 0.6283554596703659, + "learning_rate": 3.836527665928749e-06, + "loss": 0.28991255164146423, + "memory(GiB)": 77.0, + "step": 3323, + "token_acc": 0.9574422121408511, + "train_speed(iter/s)": 3.956185 + }, + { + "epoch": 1.06368, + "grad_norm": 0.7262269172337735, + "learning_rate": 3.835782311520669e-06, + "loss": 0.32087135314941406, + "memory(GiB)": 77.0, + "step": 3324, + "token_acc": 0.9034403669724771, + "train_speed(iter/s)": 3.938242 + }, + { + "epoch": 1.064, + "grad_norm": 0.6912158936695127, + "learning_rate": 3.835036790897652e-06, + "loss": 0.34834444522857666, + "memory(GiB)": 77.0, + "step": 3325, + "token_acc": 0.907205982324949, + "train_speed(iter/s)": 3.920519 + }, + { + "epoch": 1.06432, + "grad_norm": 0.6152355124709724, + "learning_rate": 3.8342911041524666e-06, + "loss": 0.3337547183036804, + "memory(GiB)": 77.0, + "step": 3326, + "token_acc": 0.9461426491994177, + "train_speed(iter/s)": 3.901829 + }, + { + "epoch": 1.06464, + "grad_norm": 0.7307175780929431, + "learning_rate": 3.833545251377899e-06, + "loss": 0.43172770738601685, + "memory(GiB)": 77.0, + "step": 3327, + "token_acc": 0.855313257182416, + "train_speed(iter/s)": 3.884787 + }, + { + "epoch": 1.06496, + "grad_norm": 0.6373213215040663, + "learning_rate": 3.8327992326667595e-06, + "loss": 0.3305622935295105, + "memory(GiB)": 77.0, + "step": 3328, + "token_acc": 0.8397411496003045, + "train_speed(iter/s)": 3.867029 + }, + { + "epoch": 1.06528, + "grad_norm": 0.5886503758414268, + "learning_rate": 3.832053048111875e-06, + "loss": 0.34097886085510254, + "memory(GiB)": 77.0, + "step": 3329, + "token_acc": 0.9337487700885536, + "train_speed(iter/s)": 3.849003 + }, + { + "epoch": 1.0656, + "grad_norm": 0.6315199214403059, + "learning_rate": 3.831306697806098e-06, + "loss": 0.38873565196990967, + "memory(GiB)": 77.0, + "step": 3330, + "token_acc": 0.875, + "train_speed(iter/s)": 3.832236 + }, + { + "epoch": 1.06592, + "grad_norm": 0.7032443257810317, + "learning_rate": 3.830560181842295e-06, + "loss": 0.38807499408721924, + "memory(GiB)": 77.0, + "step": 3331, + "token_acc": 0.8281374900079936, + "train_speed(iter/s)": 3.815819 + }, + { + "epoch": 1.06624, + "grad_norm": 0.7060185714118169, + "learning_rate": 3.829813500313362e-06, + "loss": 0.34714192152023315, + "memory(GiB)": 77.0, + "step": 3332, + "token_acc": 0.9155860666158149, + "train_speed(iter/s)": 3.798961 + }, + { + "epoch": 1.06656, + "grad_norm": 0.5911149482083533, + "learning_rate": 3.829066653312205e-06, + "loss": 0.35343241691589355, + "memory(GiB)": 77.0, + "step": 3333, + "token_acc": 0.9637340496977838, + "train_speed(iter/s)": 3.782537 + }, + { + "epoch": 1.06688, + "grad_norm": 0.6004103709957359, + "learning_rate": 3.82831964093176e-06, + "loss": 0.27701473236083984, + "memory(GiB)": 77.0, + "step": 3334, + "token_acc": 0.9311173408089708, + "train_speed(iter/s)": 3.764834 + }, + { + "epoch": 1.0672, + "grad_norm": 0.6249089748660741, + "learning_rate": 3.827572463264978e-06, + "loss": 0.2778325080871582, + "memory(GiB)": 77.0, + "step": 3335, + "token_acc": 0.9529837251356239, + "train_speed(iter/s)": 3.749382 + }, + { + "epoch": 1.06752, + "grad_norm": 0.6970818874616108, + "learning_rate": 3.8268251204048335e-06, + "loss": 0.2971646189689636, + "memory(GiB)": 77.0, + "step": 3336, + "token_acc": 0.9039024390243903, + "train_speed(iter/s)": 3.733986 + }, + { + "epoch": 1.06784, + "grad_norm": 0.6667760457185024, + "learning_rate": 3.82607761244432e-06, + "loss": 0.33440840244293213, + "memory(GiB)": 77.0, + "step": 3337, + "token_acc": 0.929587235518557, + "train_speed(iter/s)": 3.718675 + }, + { + "epoch": 1.06816, + "grad_norm": 0.6330830124141711, + "learning_rate": 3.825329939476451e-06, + "loss": 0.33065399527549744, + "memory(GiB)": 77.0, + "step": 3338, + "token_acc": 0.9574923547400611, + "train_speed(iter/s)": 3.703625 + }, + { + "epoch": 1.06848, + "grad_norm": 0.653644629128481, + "learning_rate": 3.824582101594262e-06, + "loss": 0.393046498298645, + "memory(GiB)": 77.0, + "step": 3339, + "token_acc": 0.8838162130523851, + "train_speed(iter/s)": 3.68789 + }, + { + "epoch": 1.0688, + "grad_norm": 0.6702636885319473, + "learning_rate": 3.82383409889081e-06, + "loss": 0.4359252154827118, + "memory(GiB)": 77.0, + "step": 3340, + "token_acc": 0.9567053854276664, + "train_speed(iter/s)": 3.672843 + }, + { + "epoch": 1.06912, + "grad_norm": 0.6524610550507537, + "learning_rate": 3.823085931459167e-06, + "loss": 0.3124820291996002, + "memory(GiB)": 77.0, + "step": 3341, + "token_acc": 0.9482825164029333, + "train_speed(iter/s)": 3.657649 + }, + { + "epoch": 1.06944, + "grad_norm": 0.6240476224185459, + "learning_rate": 3.822337599392433e-06, + "loss": 0.34654736518859863, + "memory(GiB)": 77.0, + "step": 3342, + "token_acc": 0.9070631970260223, + "train_speed(iter/s)": 3.641578 + }, + { + "epoch": 1.06976, + "grad_norm": 0.6865704319641787, + "learning_rate": 3.8215891027837245e-06, + "loss": 0.37891435623168945, + "memory(GiB)": 77.0, + "step": 3343, + "token_acc": 0.9069905213270142, + "train_speed(iter/s)": 3.62691 + }, + { + "epoch": 1.07008, + "grad_norm": 0.6005494767667255, + "learning_rate": 3.820840441726178e-06, + "loss": 0.2594923973083496, + "memory(GiB)": 77.0, + "step": 3344, + "token_acc": 0.8714664310954063, + "train_speed(iter/s)": 3.6127 + }, + { + "epoch": 1.0704, + "grad_norm": 0.6263123499547438, + "learning_rate": 3.820091616312951e-06, + "loss": 0.3245687186717987, + "memory(GiB)": 77.0, + "step": 3345, + "token_acc": 0.9579862368706991, + "train_speed(iter/s)": 3.598098 + }, + { + "epoch": 1.0707200000000001, + "grad_norm": 0.6632283245677895, + "learning_rate": 3.819342626637223e-06, + "loss": 0.31634944677352905, + "memory(GiB)": 77.0, + "step": 3346, + "token_acc": 0.9233716475095786, + "train_speed(iter/s)": 3.583918 + }, + { + "epoch": 1.07104, + "grad_norm": 0.6370563967810006, + "learning_rate": 3.818593472792193e-06, + "loss": 0.329886257648468, + "memory(GiB)": 77.0, + "step": 3347, + "token_acc": 0.9033680834001604, + "train_speed(iter/s)": 3.570029 + }, + { + "epoch": 1.07136, + "grad_norm": 0.6148735868228228, + "learning_rate": 3.8178441548710785e-06, + "loss": 0.27222752571105957, + "memory(GiB)": 77.0, + "step": 3348, + "token_acc": 0.91654492330168, + "train_speed(iter/s)": 3.555197 + }, + { + "epoch": 1.07168, + "grad_norm": 0.6269348030432741, + "learning_rate": 3.817094672967121e-06, + "loss": 0.275507390499115, + "memory(GiB)": 77.0, + "step": 3349, + "token_acc": 0.9193020719738277, + "train_speed(iter/s)": 3.541473 + }, + { + "epoch": 1.072, + "grad_norm": 0.6638506560230193, + "learning_rate": 3.816345027173578e-06, + "loss": 0.3504664897918701, + "memory(GiB)": 77.0, + "step": 3350, + "token_acc": 0.9485322150209684, + "train_speed(iter/s)": 3.527328 + }, + { + "epoch": 1.07232, + "grad_norm": 0.6584579486753879, + "learning_rate": 3.815595217583733e-06, + "loss": 0.40240582823753357, + "memory(GiB)": 77.0, + "step": 3351, + "token_acc": 0.9470688673875924, + "train_speed(iter/s)": 3.513726 + }, + { + "epoch": 1.07264, + "grad_norm": 0.6883185872616618, + "learning_rate": 3.814845244290885e-06, + "loss": 0.31239354610443115, + "memory(GiB)": 77.0, + "step": 3352, + "token_acc": 0.9124891961970614, + "train_speed(iter/s)": 3.499971 + }, + { + "epoch": 1.07296, + "grad_norm": 0.6402232858631218, + "learning_rate": 3.814095107388356e-06, + "loss": 0.3626977205276489, + "memory(GiB)": 77.0, + "step": 3353, + "token_acc": 0.935110393107162, + "train_speed(iter/s)": 3.486397 + }, + { + "epoch": 1.07328, + "grad_norm": 0.6420246077674074, + "learning_rate": 3.813344806969487e-06, + "loss": 0.4205816984176636, + "memory(GiB)": 77.0, + "step": 3354, + "token_acc": 0.910609613130129, + "train_speed(iter/s)": 3.472218 + }, + { + "epoch": 1.0735999999999999, + "grad_norm": 0.6173477434929108, + "learning_rate": 3.812594343127639e-06, + "loss": 0.33587145805358887, + "memory(GiB)": 77.0, + "step": 3355, + "token_acc": 0.8905369326609315, + "train_speed(iter/s)": 3.458728 + }, + { + "epoch": 1.07392, + "grad_norm": 0.6596771759611537, + "learning_rate": 3.8118437159561955e-06, + "loss": 0.39440086483955383, + "memory(GiB)": 77.0, + "step": 3356, + "token_acc": 0.8628192999053926, + "train_speed(iter/s)": 3.444087 + }, + { + "epoch": 1.07424, + "grad_norm": 0.6601478315404282, + "learning_rate": 3.8110929255485583e-06, + "loss": 0.4133327901363373, + "memory(GiB)": 77.0, + "step": 3357, + "token_acc": 0.9108595684471171, + "train_speed(iter/s)": 3.430625 + }, + { + "epoch": 1.07456, + "grad_norm": 0.636916351296822, + "learning_rate": 3.8103419719981517e-06, + "loss": 0.33400508761405945, + "memory(GiB)": 77.0, + "step": 3358, + "token_acc": 0.9430018999366688, + "train_speed(iter/s)": 3.416807 + }, + { + "epoch": 1.07488, + "grad_norm": 0.600285262481048, + "learning_rate": 3.8095908553984162e-06, + "loss": 0.2913171648979187, + "memory(GiB)": 77.0, + "step": 3359, + "token_acc": 0.8859493108048021, + "train_speed(iter/s)": 3.403969 + }, + { + "epoch": 1.0752, + "grad_norm": 0.7252851355307198, + "learning_rate": 3.8088395758428176e-06, + "loss": 0.3809468448162079, + "memory(GiB)": 77.0, + "step": 3360, + "token_acc": 0.9186754321889457, + "train_speed(iter/s)": 3.39099 + }, + { + "epoch": 1.07552, + "grad_norm": 0.5811007196982576, + "learning_rate": 3.8080881334248388e-06, + "loss": 0.3131595551967621, + "memory(GiB)": 77.0, + "step": 3361, + "token_acc": 0.8689265536723164, + "train_speed(iter/s)": 3.377216 + }, + { + "epoch": 1.07584, + "grad_norm": 0.6125065729674797, + "learning_rate": 3.8073365282379835e-06, + "loss": 0.2517097592353821, + "memory(GiB)": 77.0, + "step": 3362, + "token_acc": 0.9176747311827957, + "train_speed(iter/s)": 3.364888 + }, + { + "epoch": 1.07616, + "grad_norm": 0.6819650333503848, + "learning_rate": 3.8065847603757767e-06, + "loss": 0.2680732011795044, + "memory(GiB)": 77.0, + "step": 3363, + "token_acc": 0.9189712389380531, + "train_speed(iter/s)": 3.352829 + }, + { + "epoch": 1.07648, + "grad_norm": 0.6788371617450543, + "learning_rate": 3.8058328299317617e-06, + "loss": 0.4006814956665039, + "memory(GiB)": 77.0, + "step": 3364, + "token_acc": 0.882222923902747, + "train_speed(iter/s)": 3.34002 + }, + { + "epoch": 1.0768, + "grad_norm": 0.6441394586206436, + "learning_rate": 3.805080736999504e-06, + "loss": 0.32755082845687866, + "memory(GiB)": 77.0, + "step": 3365, + "token_acc": 0.9487698986975398, + "train_speed(iter/s)": 3.326425 + }, + { + "epoch": 1.07712, + "grad_norm": 0.6511844306443881, + "learning_rate": 3.8043284816725882e-06, + "loss": 0.3834214210510254, + "memory(GiB)": 77.0, + "step": 3366, + "token_acc": 0.9328992072480181, + "train_speed(iter/s)": 3.314192 + }, + { + "epoch": 1.07744, + "grad_norm": 0.6286695747390117, + "learning_rate": 3.8035760640446195e-06, + "loss": 0.28590935468673706, + "memory(GiB)": 77.0, + "step": 3367, + "token_acc": 0.953443258971872, + "train_speed(iter/s)": 3.30174 + }, + { + "epoch": 1.07776, + "grad_norm": 0.6640479681359119, + "learning_rate": 3.802823484209224e-06, + "loss": 0.25740963220596313, + "memory(GiB)": 77.0, + "step": 3368, + "token_acc": 0.9537337662337663, + "train_speed(iter/s)": 3.289899 + }, + { + "epoch": 1.07808, + "grad_norm": 0.5777077952880919, + "learning_rate": 3.8020707422600454e-06, + "loss": 0.336011677980423, + "memory(GiB)": 77.0, + "step": 3369, + "token_acc": 0.8627024891347294, + "train_speed(iter/s)": 3.276954 + }, + { + "epoch": 1.0784, + "grad_norm": 0.6111826818557187, + "learning_rate": 3.8013178382907517e-06, + "loss": 0.39819955825805664, + "memory(GiB)": 77.0, + "step": 3370, + "token_acc": 0.8602517601877534, + "train_speed(iter/s)": 3.264889 + }, + { + "epoch": 1.07872, + "grad_norm": 0.6306270223147896, + "learning_rate": 3.8005647723950267e-06, + "loss": 0.37353992462158203, + "memory(GiB)": 77.0, + "step": 3371, + "token_acc": 0.8157145983803896, + "train_speed(iter/s)": 3.252681 + }, + { + "epoch": 1.07904, + "grad_norm": 0.6612066732737538, + "learning_rate": 3.799811544666577e-06, + "loss": 0.4074572026729584, + "memory(GiB)": 77.0, + "step": 3372, + "token_acc": 0.823966065747614, + "train_speed(iter/s)": 3.240524 + }, + { + "epoch": 1.07936, + "grad_norm": 0.646390178085598, + "learning_rate": 3.79905815519913e-06, + "loss": 0.3685073256492615, + "memory(GiB)": 77.0, + "step": 3373, + "token_acc": 0.88000942951438, + "train_speed(iter/s)": 3.228885 + }, + { + "epoch": 1.07968, + "grad_norm": 0.609422526400164, + "learning_rate": 3.7983046040864303e-06, + "loss": 0.3288106322288513, + "memory(GiB)": 77.0, + "step": 3374, + "token_acc": 0.895710928319624, + "train_speed(iter/s)": 3.217546 + }, + { + "epoch": 1.08, + "grad_norm": 0.6501292426214597, + "learning_rate": 3.7975508914222448e-06, + "loss": 0.34315797686576843, + "memory(GiB)": 77.0, + "step": 3375, + "token_acc": 0.951332560834299, + "train_speed(iter/s)": 3.206273 + }, + { + "epoch": 1.08032, + "grad_norm": 0.6601510001186005, + "learning_rate": 3.79679701730036e-06, + "loss": 0.35418701171875, + "memory(GiB)": 77.0, + "step": 3376, + "token_acc": 0.9322838580709645, + "train_speed(iter/s)": 3.194478 + }, + { + "epoch": 1.08064, + "grad_norm": 0.6983000323954156, + "learning_rate": 3.7960429818145824e-06, + "loss": 0.30605506896972656, + "memory(GiB)": 77.0, + "step": 3377, + "token_acc": 0.8949900934050382, + "train_speed(iter/s)": 3.1837 + }, + { + "epoch": 1.08096, + "grad_norm": 0.6397629612651862, + "learning_rate": 3.795288785058738e-06, + "loss": 0.3073989748954773, + "memory(GiB)": 77.0, + "step": 3378, + "token_acc": 0.9256094496104549, + "train_speed(iter/s)": 3.172075 + }, + { + "epoch": 1.08128, + "grad_norm": 0.6675160776062274, + "learning_rate": 3.7945344271266754e-06, + "loss": 0.354169100522995, + "memory(GiB)": 77.0, + "step": 3379, + "token_acc": 0.89184, + "train_speed(iter/s)": 3.161 + }, + { + "epoch": 1.0816, + "grad_norm": 0.7861917162251313, + "learning_rate": 3.7937799081122594e-06, + "loss": 0.2711828649044037, + "memory(GiB)": 77.0, + "step": 3380, + "token_acc": 0.9516841524019879, + "train_speed(iter/s)": 3.150336 + }, + { + "epoch": 1.08192, + "grad_norm": 0.6550899631293547, + "learning_rate": 3.793025228109378e-06, + "loss": 0.380423903465271, + "memory(GiB)": 77.0, + "step": 3381, + "token_acc": 0.9468451242829828, + "train_speed(iter/s)": 3.139428 + }, + { + "epoch": 1.08224, + "grad_norm": 0.6651023647188729, + "learning_rate": 3.7922703872119377e-06, + "loss": 0.2858005166053772, + "memory(GiB)": 77.0, + "step": 3382, + "token_acc": 0.907150776053215, + "train_speed(iter/s)": 3.12894 + }, + { + "epoch": 1.08256, + "grad_norm": 0.6344632156069826, + "learning_rate": 3.7915153855138647e-06, + "loss": 0.3199447989463806, + "memory(GiB)": 77.0, + "step": 3383, + "token_acc": 0.8966396292004635, + "train_speed(iter/s)": 3.118064 + }, + { + "epoch": 1.08288, + "grad_norm": 0.6574895232656163, + "learning_rate": 3.7907602231091067e-06, + "loss": 0.35369399189949036, + "memory(GiB)": 77.0, + "step": 3384, + "token_acc": 0.9343009931245225, + "train_speed(iter/s)": 3.107467 + }, + { + "epoch": 1.0832, + "grad_norm": 0.6252611123804243, + "learning_rate": 3.7900049000916305e-06, + "loss": 0.3532389998435974, + "memory(GiB)": 77.0, + "step": 3385, + "token_acc": 0.9287469287469288, + "train_speed(iter/s)": 3.097118 + }, + { + "epoch": 1.08352, + "grad_norm": 0.6903962319482039, + "learning_rate": 3.789249416555423e-06, + "loss": 0.38857924938201904, + "memory(GiB)": 77.0, + "step": 3386, + "token_acc": 0.9348450057405281, + "train_speed(iter/s)": 3.086358 + }, + { + "epoch": 1.08384, + "grad_norm": 0.6086288691235283, + "learning_rate": 3.7884937725944897e-06, + "loss": 0.25569915771484375, + "memory(GiB)": 77.0, + "step": 3387, + "token_acc": 0.9112375533428165, + "train_speed(iter/s)": 3.076118 + }, + { + "epoch": 1.08416, + "grad_norm": 0.6289237662804695, + "learning_rate": 3.7877379683028594e-06, + "loss": 0.3412872552871704, + "memory(GiB)": 77.0, + "step": 3388, + "token_acc": 0.8650176678445229, + "train_speed(iter/s)": 3.065138 + }, + { + "epoch": 1.08448, + "grad_norm": 0.6370883821115819, + "learning_rate": 3.7869820037745773e-06, + "loss": 0.3067598342895508, + "memory(GiB)": 77.0, + "step": 3389, + "token_acc": 0.9122886133032695, + "train_speed(iter/s)": 3.055182 + }, + { + "epoch": 1.0848, + "grad_norm": 0.6384368896042276, + "learning_rate": 3.786225879103711e-06, + "loss": 0.4072641134262085, + "memory(GiB)": 77.0, + "step": 3390, + "token_acc": 0.903010033444816, + "train_speed(iter/s)": 3.044874 + }, + { + "epoch": 1.08512, + "grad_norm": 0.6695665471130768, + "learning_rate": 3.7854695943843463e-06, + "loss": 0.3783531188964844, + "memory(GiB)": 77.0, + "step": 3391, + "token_acc": 0.9101312689330192, + "train_speed(iter/s)": 3.034898 + }, + { + "epoch": 1.08544, + "grad_norm": 0.5816931053803776, + "learning_rate": 3.7847131497105904e-06, + "loss": 0.2772005796432495, + "memory(GiB)": 77.0, + "step": 3392, + "token_acc": 0.928490990990991, + "train_speed(iter/s)": 3.025249 + }, + { + "epoch": 1.08576, + "grad_norm": 0.7118596495295672, + "learning_rate": 3.783956545176569e-06, + "loss": 0.4061547517776489, + "memory(GiB)": 77.0, + "step": 3393, + "token_acc": 0.9522924411400248, + "train_speed(iter/s)": 3.015309 + }, + { + "epoch": 1.08608, + "grad_norm": 0.6751958002216026, + "learning_rate": 3.7831997808764297e-06, + "loss": 0.41802945733070374, + "memory(GiB)": 77.0, + "step": 3394, + "token_acc": 0.9107981220657277, + "train_speed(iter/s)": 3.003871 + }, + { + "epoch": 1.0864, + "grad_norm": 0.62647833750371, + "learning_rate": 3.782442856904337e-06, + "loss": 0.34639716148376465, + "memory(GiB)": 77.0, + "step": 3395, + "token_acc": 0.9176747311827957, + "train_speed(iter/s)": 2.994275 + }, + { + "epoch": 1.08672, + "grad_norm": 0.6879897835006283, + "learning_rate": 3.7816857733544787e-06, + "loss": 0.39335060119628906, + "memory(GiB)": 77.0, + "step": 3396, + "token_acc": 0.9168053244592346, + "train_speed(iter/s)": 2.984598 + }, + { + "epoch": 1.08704, + "grad_norm": 0.5935822439485393, + "learning_rate": 3.78092853032106e-06, + "loss": 0.3085792660713196, + "memory(GiB)": 77.0, + "step": 3397, + "token_acc": 0.9007151872107699, + "train_speed(iter/s)": 2.974695 + }, + { + "epoch": 1.0873599999999999, + "grad_norm": 0.676034657941304, + "learning_rate": 3.7801711278983053e-06, + "loss": 0.34118419885635376, + "memory(GiB)": 77.0, + "step": 3398, + "token_acc": 0.8807887660591575, + "train_speed(iter/s)": 2.96489 + }, + { + "epoch": 1.08768, + "grad_norm": 0.6586744514399033, + "learning_rate": 3.779413566180462e-06, + "loss": 0.33560407161712646, + "memory(GiB)": 77.0, + "step": 3399, + "token_acc": 0.8925323121110579, + "train_speed(iter/s)": 2.955458 + }, + { + "epoch": 1.088, + "grad_norm": 0.6622004191775697, + "learning_rate": 3.7786558452617943e-06, + "loss": 0.3723300099372864, + "memory(GiB)": 77.0, + "step": 3400, + "token_acc": 0.8889233633260937, + "train_speed(iter/s)": 2.945084 + }, + { + "epoch": 1.08832, + "grad_norm": 0.7010411490637217, + "learning_rate": 3.777897965236589e-06, + "loss": 0.4063072204589844, + "memory(GiB)": 77.0, + "step": 3401, + "token_acc": 0.913582799255737, + "train_speed(iter/s)": 2.935363 + }, + { + "epoch": 1.08864, + "grad_norm": 0.6601301834548718, + "learning_rate": 3.7771399261991493e-06, + "loss": 0.35552921891212463, + "memory(GiB)": 77.0, + "step": 3402, + "token_acc": 0.8807017543859649, + "train_speed(iter/s)": 2.925706 + }, + { + "epoch": 1.08896, + "grad_norm": 0.7265148771531611, + "learning_rate": 3.776381728243802e-06, + "loss": 0.4426286220550537, + "memory(GiB)": 77.0, + "step": 3403, + "token_acc": 0.8815954773869347, + "train_speed(iter/s)": 2.91641 + }, + { + "epoch": 1.08928, + "grad_norm": 0.6823467797903986, + "learning_rate": 3.77562337146489e-06, + "loss": 0.3875730633735657, + "memory(GiB)": 77.0, + "step": 3404, + "token_acc": 0.9294385432473444, + "train_speed(iter/s)": 2.90677 + }, + { + "epoch": 1.0896, + "grad_norm": 0.6705348424761838, + "learning_rate": 3.774864855956778e-06, + "loss": 0.4291715621948242, + "memory(GiB)": 77.0, + "step": 3405, + "token_acc": 0.8522214302684723, + "train_speed(iter/s)": 2.897707 + }, + { + "epoch": 1.08992, + "grad_norm": 0.6727770428705534, + "learning_rate": 3.7741061818138503e-06, + "loss": 0.35272660851478577, + "memory(GiB)": 77.0, + "step": 3406, + "token_acc": 0.9557432432432432, + "train_speed(iter/s)": 2.888439 + }, + { + "epoch": 1.09024, + "grad_norm": 0.6132179700634809, + "learning_rate": 3.77334734913051e-06, + "loss": 0.3267471194267273, + "memory(GiB)": 77.0, + "step": 3407, + "token_acc": 0.9454475605071072, + "train_speed(iter/s)": 2.87965 + }, + { + "epoch": 1.09056, + "grad_norm": 0.6482183857204543, + "learning_rate": 3.7725883580011825e-06, + "loss": 0.36171603202819824, + "memory(GiB)": 77.0, + "step": 3408, + "token_acc": 0.858898847631242, + "train_speed(iter/s)": 2.870564 + }, + { + "epoch": 1.09088, + "grad_norm": 0.6564590441627403, + "learning_rate": 3.771829208520309e-06, + "loss": 0.31999343633651733, + "memory(GiB)": 77.0, + "step": 3409, + "token_acc": 0.9372724726967236, + "train_speed(iter/s)": 2.860734 + }, + { + "epoch": 1.0912, + "grad_norm": 0.6612961466960015, + "learning_rate": 3.771069900782354e-06, + "loss": 0.2854422330856323, + "memory(GiB)": 77.0, + "step": 3410, + "token_acc": 0.9425242718446601, + "train_speed(iter/s)": 2.852044 + }, + { + "epoch": 1.09152, + "grad_norm": 0.6391125896862168, + "learning_rate": 3.7703104348818e-06, + "loss": 0.3682568073272705, + "memory(GiB)": 77.0, + "step": 3411, + "token_acc": 0.9412980570483671, + "train_speed(iter/s)": 2.843347 + }, + { + "epoch": 1.09184, + "grad_norm": 0.6386797687387707, + "learning_rate": 3.7695508109131485e-06, + "loss": 0.3572200834751129, + "memory(GiB)": 77.0, + "step": 3412, + "token_acc": 0.8493310063990692, + "train_speed(iter/s)": 2.831579 + }, + { + "epoch": 1.09216, + "grad_norm": 0.7135881046509541, + "learning_rate": 3.7687910289709216e-06, + "loss": 0.4459351897239685, + "memory(GiB)": 77.0, + "step": 3413, + "token_acc": 0.8940983606557377, + "train_speed(iter/s)": 2.823128 + }, + { + "epoch": 1.0924800000000001, + "grad_norm": 0.617791479594519, + "learning_rate": 3.768031089149662e-06, + "loss": 0.30347397923469543, + "memory(GiB)": 77.0, + "step": 3414, + "token_acc": 0.9260304912478825, + "train_speed(iter/s)": 2.813869 + }, + { + "epoch": 1.0928, + "grad_norm": 0.6401670334698456, + "learning_rate": 3.7672709915439297e-06, + "loss": 0.30217260122299194, + "memory(GiB)": 77.0, + "step": 3415, + "token_acc": 0.9525792273883877, + "train_speed(iter/s)": 2.805547 + }, + { + "epoch": 1.09312, + "grad_norm": 0.6518388839147458, + "learning_rate": 3.766510736248307e-06, + "loss": 0.40092456340789795, + "memory(GiB)": 77.0, + "step": 3416, + "token_acc": 0.8355660625164171, + "train_speed(iter/s)": 2.796643 + }, + { + "epoch": 1.09344, + "grad_norm": 0.6199139936607426, + "learning_rate": 3.765750323357394e-06, + "loss": 0.3521049916744232, + "memory(GiB)": 77.0, + "step": 3417, + "token_acc": 0.9075829383886256, + "train_speed(iter/s)": 2.788314 + }, + { + "epoch": 1.09376, + "grad_norm": 0.6327022208723192, + "learning_rate": 3.7649897529658107e-06, + "loss": 0.32593464851379395, + "memory(GiB)": 77.0, + "step": 3418, + "token_acc": 0.8820725388601036, + "train_speed(iter/s)": 2.780242 + }, + { + "epoch": 1.09408, + "grad_norm": 0.7400718979363665, + "learning_rate": 3.7642290251681966e-06, + "loss": 0.32670801877975464, + "memory(GiB)": 77.0, + "step": 3419, + "token_acc": 0.8711094837056024, + "train_speed(iter/s)": 2.771937 + }, + { + "epoch": 1.0944, + "grad_norm": 0.630338500931095, + "learning_rate": 3.763468140059212e-06, + "loss": 0.32132747769355774, + "memory(GiB)": 77.0, + "step": 3420, + "token_acc": 0.8341753343239228, + "train_speed(iter/s)": 2.763382 + }, + { + "epoch": 1.09472, + "grad_norm": 0.6443908665641065, + "learning_rate": 3.7627070977335346e-06, + "loss": 0.2890018820762634, + "memory(GiB)": 77.0, + "step": 3421, + "token_acc": 0.9310168625447113, + "train_speed(iter/s)": 2.755458 + }, + { + "epoch": 1.09504, + "grad_norm": 0.6399130782380406, + "learning_rate": 3.7619458982858637e-06, + "loss": 0.38312339782714844, + "memory(GiB)": 77.0, + "step": 3422, + "token_acc": 0.7733391228831958, + "train_speed(iter/s)": 2.746962 + }, + { + "epoch": 1.09536, + "grad_norm": 0.672118572059616, + "learning_rate": 3.7611845418109174e-06, + "loss": 0.34061479568481445, + "memory(GiB)": 77.0, + "step": 3423, + "token_acc": 0.861646234676007, + "train_speed(iter/s)": 2.738948 + }, + { + "epoch": 1.09568, + "grad_norm": 0.6720250544245935, + "learning_rate": 3.7604230284034326e-06, + "loss": 0.40243273973464966, + "memory(GiB)": 77.0, + "step": 3424, + "token_acc": 0.8379857690202518, + "train_speed(iter/s)": 2.730932 + }, + { + "epoch": 1.096, + "grad_norm": 0.6957724958006603, + "learning_rate": 3.7596613581581677e-06, + "loss": 0.3286881744861603, + "memory(GiB)": 77.0, + "step": 3425, + "token_acc": 0.9071522309711286, + "train_speed(iter/s)": 2.723048 + }, + { + "epoch": 1.09632, + "grad_norm": 0.6772502049469294, + "learning_rate": 3.7588995311698985e-06, + "loss": 0.3429209589958191, + "memory(GiB)": 77.0, + "step": 3426, + "token_acc": 0.9280388978930308, + "train_speed(iter/s)": 2.715145 + }, + { + "epoch": 1.09664, + "grad_norm": 0.613493321028557, + "learning_rate": 3.7581375475334215e-06, + "loss": 0.3168732225894928, + "memory(GiB)": 77.0, + "step": 3427, + "token_acc": 0.9155966176010022, + "train_speed(iter/s)": 2.707072 + }, + { + "epoch": 1.09696, + "grad_norm": 0.6457933443483203, + "learning_rate": 3.757375407343552e-06, + "loss": 0.3475657105445862, + "memory(GiB)": 77.0, + "step": 3428, + "token_acc": 0.840177366493156, + "train_speed(iter/s)": 2.699239 + }, + { + "epoch": 1.09728, + "grad_norm": 0.5880720599837042, + "learning_rate": 3.756613110695125e-06, + "loss": 0.2992924153804779, + "memory(GiB)": 77.0, + "step": 3429, + "token_acc": 0.8702484603949883, + "train_speed(iter/s)": 2.691236 + }, + { + "epoch": 1.0976, + "grad_norm": 0.6892580668218653, + "learning_rate": 3.7558506576829952e-06, + "loss": 0.29852956533432007, + "memory(GiB)": 77.0, + "step": 3430, + "token_acc": 0.8499590051926756, + "train_speed(iter/s)": 2.683674 + }, + { + "epoch": 1.09792, + "grad_norm": 0.5918134256501162, + "learning_rate": 3.755088048402037e-06, + "loss": 0.30563902854919434, + "memory(GiB)": 77.0, + "step": 3431, + "token_acc": 0.882078769517595, + "train_speed(iter/s)": 2.676287 + }, + { + "epoch": 1.09824, + "grad_norm": 0.658790088673597, + "learning_rate": 3.754325282947143e-06, + "loss": 0.3434714674949646, + "memory(GiB)": 77.0, + "step": 3432, + "token_acc": 0.9093464511595222, + "train_speed(iter/s)": 2.668554 + }, + { + "epoch": 1.09856, + "grad_norm": 0.6831737777544276, + "learning_rate": 3.753562361413228e-06, + "loss": 0.3547285199165344, + "memory(GiB)": 77.0, + "step": 3433, + "token_acc": 0.8964169381107492, + "train_speed(iter/s)": 2.661197 + }, + { + "epoch": 1.09888, + "grad_norm": 0.6384299561766118, + "learning_rate": 3.752799283895223e-06, + "loss": 0.37814512848854065, + "memory(GiB)": 77.0, + "step": 3434, + "token_acc": 0.8351746790086593, + "train_speed(iter/s)": 2.653726 + }, + { + "epoch": 1.0992, + "grad_norm": 0.6112665595654602, + "learning_rate": 3.7520360504880794e-06, + "loss": 0.4012461304664612, + "memory(GiB)": 77.0, + "step": 3435, + "token_acc": 0.9169455430483724, + "train_speed(iter/s)": 2.646066 + }, + { + "epoch": 1.09952, + "grad_norm": 0.5742176542755084, + "learning_rate": 3.7512726612867696e-06, + "loss": 0.26728808879852295, + "memory(GiB)": 77.0, + "step": 3436, + "token_acc": 0.926525889499566, + "train_speed(iter/s)": 2.638946 + }, + { + "epoch": 1.09984, + "grad_norm": 0.662425750096849, + "learning_rate": 3.750509116386283e-06, + "loss": 0.390032023191452, + "memory(GiB)": 77.0, + "step": 3437, + "token_acc": 0.8973362930077692, + "train_speed(iter/s)": 2.631215 + }, + { + "epoch": 1.10016, + "grad_norm": 0.5774598945785482, + "learning_rate": 3.74974541588163e-06, + "loss": 0.25672149658203125, + "memory(GiB)": 77.0, + "step": 3438, + "token_acc": 0.950815494393476, + "train_speed(iter/s)": 2.623727 + }, + { + "epoch": 1.10048, + "grad_norm": 0.6524365774161639, + "learning_rate": 3.7489815598678396e-06, + "loss": 0.339842289686203, + "memory(GiB)": 77.0, + "step": 3439, + "token_acc": 0.9012345679012346, + "train_speed(iter/s)": 2.616365 + }, + { + "epoch": 1.1008, + "grad_norm": 0.6653471545154449, + "learning_rate": 3.748217548439961e-06, + "loss": 0.36706024408340454, + "memory(GiB)": 77.0, + "step": 3440, + "token_acc": 0.9515463917525773, + "train_speed(iter/s)": 2.609075 + }, + { + "epoch": 1.10112, + "grad_norm": 0.6564979043647127, + "learning_rate": 3.7474533816930613e-06, + "loss": 0.36953043937683105, + "memory(GiB)": 77.0, + "step": 3441, + "token_acc": 0.9204244031830239, + "train_speed(iter/s)": 2.60161 + }, + { + "epoch": 1.10144, + "grad_norm": 0.6318489302077246, + "learning_rate": 3.7466890597222293e-06, + "loss": 0.24820661544799805, + "memory(GiB)": 77.0, + "step": 3442, + "token_acc": 0.932632713554298, + "train_speed(iter/s)": 2.594595 + }, + { + "epoch": 1.10176, + "grad_norm": 0.6506665823782549, + "learning_rate": 3.7459245826225697e-06, + "loss": 0.31167635321617126, + "memory(GiB)": 77.0, + "step": 3443, + "token_acc": 0.8622181735367798, + "train_speed(iter/s)": 2.586965 + }, + { + "epoch": 1.10208, + "grad_norm": 0.6423473426110325, + "learning_rate": 3.74515995048921e-06, + "loss": 0.3405417203903198, + "memory(GiB)": 77.0, + "step": 3444, + "token_acc": 0.8540772532188842, + "train_speed(iter/s)": 2.580042 + }, + { + "epoch": 1.1024, + "grad_norm": 0.6281135971182765, + "learning_rate": 3.744395163417294e-06, + "loss": 0.3192143142223358, + "memory(GiB)": 77.0, + "step": 3445, + "token_acc": 0.8858673259167696, + "train_speed(iter/s)": 2.573151 + }, + { + "epoch": 1.10272, + "grad_norm": 0.6363117984830609, + "learning_rate": 3.7436302215019876e-06, + "loss": 0.35811707377433777, + "memory(GiB)": 77.0, + "step": 3446, + "token_acc": 0.9298491757278148, + "train_speed(iter/s)": 2.566285 + }, + { + "epoch": 1.10304, + "grad_norm": 0.8136736985590428, + "learning_rate": 3.742865124838474e-06, + "loss": 0.32805395126342773, + "memory(GiB)": 77.0, + "step": 3447, + "token_acc": 0.8544853245142621, + "train_speed(iter/s)": 2.559375 + }, + { + "epoch": 1.10336, + "grad_norm": 0.7018925846590165, + "learning_rate": 3.742099873521956e-06, + "loss": 0.32822269201278687, + "memory(GiB)": 77.0, + "step": 3448, + "token_acc": 0.8660212367270456, + "train_speed(iter/s)": 2.552765 + }, + { + "epoch": 1.10368, + "grad_norm": 0.6400857602408337, + "learning_rate": 3.7413344676476552e-06, + "loss": 0.3274231553077698, + "memory(GiB)": 77.0, + "step": 3449, + "token_acc": 0.9182041820418204, + "train_speed(iter/s)": 2.54561 + }, + { + "epoch": 1.104, + "grad_norm": 0.6226756487318712, + "learning_rate": 3.7405689073108147e-06, + "loss": 0.23147493600845337, + "memory(GiB)": 77.0, + "step": 3450, + "token_acc": 0.9562993956299396, + "train_speed(iter/s)": 2.538371 + }, + { + "epoch": 1.10432, + "grad_norm": 0.7096959760621171, + "learning_rate": 3.739803192606694e-06, + "loss": 0.3586459159851074, + "memory(GiB)": 77.0, + "step": 3451, + "token_acc": 0.9139482564679415, + "train_speed(iter/s)": 2.531886 + }, + { + "epoch": 1.10464, + "grad_norm": 0.6265381547438532, + "learning_rate": 3.7390373236305733e-06, + "loss": 0.409762442111969, + "memory(GiB)": 77.0, + "step": 3452, + "token_acc": 0.8995587853620556, + "train_speed(iter/s)": 2.524808 + }, + { + "epoch": 1.10496, + "grad_norm": 0.6212006327961939, + "learning_rate": 3.738271300477752e-06, + "loss": 0.2985149621963501, + "memory(GiB)": 77.0, + "step": 3453, + "token_acc": 0.9122852013336753, + "train_speed(iter/s)": 2.51792 + }, + { + "epoch": 1.10528, + "grad_norm": 0.6618198330244185, + "learning_rate": 3.737505123243549e-06, + "loss": 0.28362756967544556, + "memory(GiB)": 77.0, + "step": 3454, + "token_acc": 0.8814814814814815, + "train_speed(iter/s)": 2.511597 + }, + { + "epoch": 1.1056, + "grad_norm": 0.7346851419955273, + "learning_rate": 3.7367387920232988e-06, + "loss": 0.3549320697784424, + "memory(GiB)": 77.0, + "step": 3455, + "token_acc": 0.8318014705882353, + "train_speed(iter/s)": 2.505164 + }, + { + "epoch": 1.10592, + "grad_norm": 0.6409534746247588, + "learning_rate": 3.7359723069123616e-06, + "loss": 0.350663959980011, + "memory(GiB)": 77.0, + "step": 3456, + "token_acc": 0.9020240539747727, + "train_speed(iter/s)": 2.498363 + }, + { + "epoch": 1.1062400000000001, + "grad_norm": 0.7073876400486464, + "learning_rate": 3.735205668006111e-06, + "loss": 0.3058209717273712, + "memory(GiB)": 77.0, + "step": 3457, + "token_acc": 0.9116067740603057, + "train_speed(iter/s)": 2.491598 + }, + { + "epoch": 1.10656, + "grad_norm": 0.611375031606644, + "learning_rate": 3.7344388753999434e-06, + "loss": 0.3546198308467865, + "memory(GiB)": 77.0, + "step": 3458, + "token_acc": 0.9583194398132711, + "train_speed(iter/s)": 2.484651 + }, + { + "epoch": 1.10688, + "grad_norm": 0.6163004637201044, + "learning_rate": 3.733671929189272e-06, + "loss": 0.28805533051490784, + "memory(GiB)": 77.0, + "step": 3459, + "token_acc": 0.9339098208770846, + "train_speed(iter/s)": 2.47811 + }, + { + "epoch": 1.1072, + "grad_norm": 0.6028283768514444, + "learning_rate": 3.7329048294695293e-06, + "loss": 0.3548738360404968, + "memory(GiB)": 77.0, + "step": 3460, + "token_acc": 0.9210689869484152, + "train_speed(iter/s)": 2.471629 + }, + { + "epoch": 1.10752, + "grad_norm": 0.6443067635026601, + "learning_rate": 3.7321375763361693e-06, + "loss": 0.3084850609302521, + "memory(GiB)": 77.0, + "step": 3461, + "token_acc": 0.8681586527808554, + "train_speed(iter/s)": 2.465039 + }, + { + "epoch": 1.10784, + "grad_norm": 0.6498973542573528, + "learning_rate": 3.7313701698846616e-06, + "loss": 0.26021477580070496, + "memory(GiB)": 77.0, + "step": 3462, + "token_acc": 0.9534351145038168, + "train_speed(iter/s)": 2.458945 + }, + { + "epoch": 1.10816, + "grad_norm": 0.6444935134378206, + "learning_rate": 3.7306026102104977e-06, + "loss": 0.27008360624313354, + "memory(GiB)": 77.0, + "step": 3463, + "token_acc": 0.9496527777777778, + "train_speed(iter/s)": 2.452791 + }, + { + "epoch": 1.10848, + "grad_norm": 0.6240878573507974, + "learning_rate": 3.7298348974091856e-06, + "loss": 0.27828824520111084, + "memory(GiB)": 77.0, + "step": 3464, + "token_acc": 0.9347457627118644, + "train_speed(iter/s)": 2.446508 + }, + { + "epoch": 1.1088, + "grad_norm": 0.6872626110877826, + "learning_rate": 3.7290670315762564e-06, + "loss": 0.43768787384033203, + "memory(GiB)": 77.0, + "step": 3465, + "token_acc": 0.8998582900330656, + "train_speed(iter/s)": 2.439845 + }, + { + "epoch": 1.1091199999999999, + "grad_norm": 0.7055987096160499, + "learning_rate": 3.7282990128072556e-06, + "loss": 0.3968327045440674, + "memory(GiB)": 77.0, + "step": 3466, + "token_acc": 0.9430358906815937, + "train_speed(iter/s)": 2.433496 + }, + { + "epoch": 1.10944, + "grad_norm": 0.6110205969531418, + "learning_rate": 3.7275308411977507e-06, + "loss": 0.3583885133266449, + "memory(GiB)": 77.0, + "step": 3467, + "token_acc": 0.9543634907926342, + "train_speed(iter/s)": 2.427046 + }, + { + "epoch": 1.10976, + "grad_norm": 0.7319417742365588, + "learning_rate": 3.7267625168433268e-06, + "loss": 0.38940462470054626, + "memory(GiB)": 77.0, + "step": 3468, + "token_acc": 0.8810096153846154, + "train_speed(iter/s)": 2.421002 + }, + { + "epoch": 1.11008, + "grad_norm": 0.613659949084266, + "learning_rate": 3.7259940398395893e-06, + "loss": 0.32512879371643066, + "memory(GiB)": 77.0, + "step": 3469, + "token_acc": 0.8958737864077669, + "train_speed(iter/s)": 2.414975 + }, + { + "epoch": 1.1104, + "grad_norm": 0.638205538335545, + "learning_rate": 3.725225410282161e-06, + "loss": 0.3560227155685425, + "memory(GiB)": 77.0, + "step": 3470, + "token_acc": 0.8406282722513089, + "train_speed(iter/s)": 2.408872 + }, + { + "epoch": 1.11072, + "grad_norm": 0.6317436435325929, + "learning_rate": 3.724456628266685e-06, + "loss": 0.34699034690856934, + "memory(GiB)": 77.0, + "step": 3471, + "token_acc": 0.9315789473684211, + "train_speed(iter/s)": 2.402359 + }, + { + "epoch": 1.11104, + "grad_norm": 0.6412291876502587, + "learning_rate": 3.7236876938888223e-06, + "loss": 0.32442423701286316, + "memory(GiB)": 77.0, + "step": 3472, + "token_acc": 0.9550602849835587, + "train_speed(iter/s)": 2.396021 + }, + { + "epoch": 1.11136, + "grad_norm": 0.6149942578065893, + "learning_rate": 3.722918607244254e-06, + "loss": 0.36767303943634033, + "memory(GiB)": 77.0, + "step": 3473, + "token_acc": 0.8902869757174393, + "train_speed(iter/s)": 2.389581 + }, + { + "epoch": 1.11168, + "grad_norm": 0.6839114817103839, + "learning_rate": 3.72214936842868e-06, + "loss": 0.32017982006073, + "memory(GiB)": 77.0, + "step": 3474, + "token_acc": 0.9045151739452257, + "train_speed(iter/s)": 2.38339 + }, + { + "epoch": 1.112, + "grad_norm": 0.7035673690578623, + "learning_rate": 3.7213799775378182e-06, + "loss": 0.3928705155849457, + "memory(GiB)": 77.0, + "step": 3475, + "token_acc": 0.8774113254511512, + "train_speed(iter/s)": 2.377564 + }, + { + "epoch": 1.11232, + "grad_norm": 0.7698851040826243, + "learning_rate": 3.720610434667406e-06, + "loss": 0.3657442629337311, + "memory(GiB)": 77.0, + "step": 3476, + "token_acc": 0.9221871713985279, + "train_speed(iter/s)": 2.371862 + }, + { + "epoch": 1.11264, + "grad_norm": 0.7028483244457397, + "learning_rate": 3.7198407399131994e-06, + "loss": 0.2916962504386902, + "memory(GiB)": 77.0, + "step": 3477, + "token_acc": 0.8769961156668106, + "train_speed(iter/s)": 2.366275 + }, + { + "epoch": 1.11296, + "grad_norm": 0.6555295233657751, + "learning_rate": 3.7190708933709736e-06, + "loss": 0.3652043342590332, + "memory(GiB)": 77.0, + "step": 3478, + "token_acc": 0.9333333333333333, + "train_speed(iter/s)": 2.360441 + }, + { + "epoch": 1.11328, + "grad_norm": 0.6210861500674384, + "learning_rate": 3.718300895136523e-06, + "loss": 0.2659602463245392, + "memory(GiB)": 77.0, + "step": 3479, + "token_acc": 0.9478031053848696, + "train_speed(iter/s)": 2.354898 + }, + { + "epoch": 1.1136, + "grad_norm": 0.6629798823144613, + "learning_rate": 3.7175307453056603e-06, + "loss": 0.38965344429016113, + "memory(GiB)": 77.0, + "step": 3480, + "token_acc": 0.9497776615223646, + "train_speed(iter/s)": 2.348821 + }, + { + "epoch": 1.11392, + "grad_norm": 0.6497907474151535, + "learning_rate": 3.7167604439742174e-06, + "loss": 0.2917061448097229, + "memory(GiB)": 77.0, + "step": 3481, + "token_acc": 0.9153125, + "train_speed(iter/s)": 2.342857 + }, + { + "epoch": 1.11424, + "grad_norm": 0.5720112462402832, + "learning_rate": 3.715989991238045e-06, + "loss": 0.30136531591415405, + "memory(GiB)": 77.0, + "step": 3482, + "token_acc": 0.9230197099293418, + "train_speed(iter/s)": 2.336943 + }, + { + "epoch": 1.11456, + "grad_norm": 0.6526503403530887, + "learning_rate": 3.7152193871930127e-06, + "loss": 0.3842581510543823, + "memory(GiB)": 77.0, + "step": 3483, + "token_acc": 0.8992340832934418, + "train_speed(iter/s)": 2.33114 + }, + { + "epoch": 1.11488, + "grad_norm": 0.8160010695028317, + "learning_rate": 3.714448631935008e-06, + "loss": 0.2935848832130432, + "memory(GiB)": 77.0, + "step": 3484, + "token_acc": 0.8617070895522388, + "train_speed(iter/s)": 2.325405 + }, + { + "epoch": 1.1152, + "grad_norm": 0.6431767728357307, + "learning_rate": 3.713677725559939e-06, + "loss": 0.2740894556045532, + "memory(GiB)": 77.0, + "step": 3485, + "token_acc": 0.9561699258260283, + "train_speed(iter/s)": 2.320041 + }, + { + "epoch": 1.11552, + "grad_norm": 0.6256816031006203, + "learning_rate": 3.712906668163731e-06, + "loss": 0.3019065260887146, + "memory(GiB)": 77.0, + "step": 3486, + "token_acc": 0.953551912568306, + "train_speed(iter/s)": 2.31437 + }, + { + "epoch": 1.11584, + "grad_norm": 0.6359131649075958, + "learning_rate": 3.7121354598423285e-06, + "loss": 0.3307640552520752, + "memory(GiB)": 77.0, + "step": 3487, + "token_acc": 0.9112404510731175, + "train_speed(iter/s)": 2.308597 + }, + { + "epoch": 1.11616, + "grad_norm": 0.6807019767509546, + "learning_rate": 3.7113641006916964e-06, + "loss": 0.4302297532558441, + "memory(GiB)": 77.0, + "step": 3488, + "token_acc": 0.8341187558906692, + "train_speed(iter/s)": 2.302764 + }, + { + "epoch": 1.11648, + "grad_norm": 0.6306800284580153, + "learning_rate": 3.710592590807815e-06, + "loss": 0.318650484085083, + "memory(GiB)": 77.0, + "step": 3489, + "token_acc": 0.9404031551270815, + "train_speed(iter/s)": 2.297523 + }, + { + "epoch": 1.1168, + "grad_norm": 0.6678474599452142, + "learning_rate": 3.709820930286687e-06, + "loss": 0.403790682554245, + "memory(GiB)": 77.0, + "step": 3490, + "token_acc": 0.9075993091537133, + "train_speed(iter/s)": 2.292109 + }, + { + "epoch": 1.11712, + "grad_norm": 0.6445687903443925, + "learning_rate": 3.7090491192243316e-06, + "loss": 0.3495504558086395, + "memory(GiB)": 77.0, + "step": 3491, + "token_acc": 0.8715629522431259, + "train_speed(iter/s)": 2.2866 + }, + { + "epoch": 1.11744, + "grad_norm": 0.7241753442584439, + "learning_rate": 3.708277157716787e-06, + "loss": 0.44297319650650024, + "memory(GiB)": 77.0, + "step": 3492, + "token_acc": 0.8567760342368046, + "train_speed(iter/s)": 2.281171 + }, + { + "epoch": 1.11776, + "grad_norm": 0.6661037016227027, + "learning_rate": 3.7075050458601104e-06, + "loss": 0.2319803684949875, + "memory(GiB)": 77.0, + "step": 3493, + "token_acc": 0.8791773778920309, + "train_speed(iter/s)": 2.276043 + }, + { + "epoch": 1.11808, + "grad_norm": 0.6447242163031242, + "learning_rate": 3.706732783750378e-06, + "loss": 0.31256532669067383, + "memory(GiB)": 77.0, + "step": 3494, + "token_acc": 0.865872604867944, + "train_speed(iter/s)": 2.270916 + }, + { + "epoch": 1.1184, + "grad_norm": 0.6774673272276672, + "learning_rate": 3.7059603714836843e-06, + "loss": 0.2732524275779724, + "memory(GiB)": 77.0, + "step": 3495, + "token_acc": 0.9081081081081082, + "train_speed(iter/s)": 2.265604 + }, + { + "epoch": 1.11872, + "grad_norm": 0.6776145489918214, + "learning_rate": 3.7051878091561427e-06, + "loss": 0.28811803460121155, + "memory(GiB)": 77.0, + "step": 3496, + "token_acc": 0.9081081081081082, + "train_speed(iter/s)": 2.26057 + }, + { + "epoch": 1.11904, + "grad_norm": 0.6555884924090604, + "learning_rate": 3.704415096863885e-06, + "loss": 0.40581488609313965, + "memory(GiB)": 77.0, + "step": 3497, + "token_acc": 0.9375896700143472, + "train_speed(iter/s)": 2.254734 + }, + { + "epoch": 1.11936, + "grad_norm": 0.5988850151018854, + "learning_rate": 3.703642234703062e-06, + "loss": 0.2662951946258545, + "memory(GiB)": 77.0, + "step": 3498, + "token_acc": 0.9170498084291188, + "train_speed(iter/s)": 2.249239 + }, + { + "epoch": 1.11968, + "grad_norm": 0.62550953040115, + "learning_rate": 3.7028692227698434e-06, + "loss": 0.34689879417419434, + "memory(GiB)": 77.0, + "step": 3499, + "token_acc": 0.8639097744360902, + "train_speed(iter/s)": 2.243803 + }, + { + "epoch": 1.12, + "grad_norm": 0.6408841915425466, + "learning_rate": 3.7020960611604163e-06, + "loss": 0.34453529119491577, + "memory(GiB)": 77.0, + "step": 3500, + "token_acc": 0.8526582278481013, + "train_speed(iter/s)": 2.237965 + }, + { + "epoch": 1.12032, + "grad_norm": 0.6025357716994296, + "learning_rate": 3.7013227499709876e-06, + "loss": 0.3496503531932831, + "memory(GiB)": 77.0, + "step": 3501, + "token_acc": 0.9083094555873925, + "train_speed(iter/s)": 2.232847 + }, + { + "epoch": 1.12064, + "grad_norm": 0.6330219982547622, + "learning_rate": 3.700549289297783e-06, + "loss": 0.3191274404525757, + "memory(GiB)": 77.0, + "step": 3502, + "token_acc": 0.8905852417302799, + "train_speed(iter/s)": 2.226329 + }, + { + "epoch": 1.12096, + "grad_norm": 0.6960358113989854, + "learning_rate": 3.6997756792370452e-06, + "loss": 0.3886886537075043, + "memory(GiB)": 77.0, + "step": 3503, + "token_acc": 0.8295290972650627, + "train_speed(iter/s)": 2.22136 + }, + { + "epoch": 1.12128, + "grad_norm": 0.6988946254593569, + "learning_rate": 3.6990019198850367e-06, + "loss": 0.332724004983902, + "memory(GiB)": 77.0, + "step": 3504, + "token_acc": 0.8824328916072035, + "train_speed(iter/s)": 2.216065 + }, + { + "epoch": 1.1216, + "grad_norm": 0.6010700001743857, + "learning_rate": 3.69822801133804e-06, + "loss": 0.278572142124176, + "memory(GiB)": 77.0, + "step": 3505, + "token_acc": 0.8335266821345708, + "train_speed(iter/s)": 2.211156 + }, + { + "epoch": 1.12192, + "grad_norm": 0.6076876207427032, + "learning_rate": 3.6974539536923528e-06, + "loss": 0.286885142326355, + "memory(GiB)": 77.0, + "step": 3506, + "token_acc": 0.9097504352872896, + "train_speed(iter/s)": 2.205983 + }, + { + "epoch": 1.12224, + "grad_norm": 0.6594123805833557, + "learning_rate": 3.696679747044294e-06, + "loss": 0.3488190174102783, + "memory(GiB)": 77.0, + "step": 3507, + "token_acc": 0.8090696769170883, + "train_speed(iter/s)": 2.200592 + }, + { + "epoch": 1.12256, + "grad_norm": 0.6319118892474654, + "learning_rate": 3.6959053914902e-06, + "loss": 0.36091363430023193, + "memory(GiB)": 77.0, + "step": 3508, + "token_acc": 0.8332916145181477, + "train_speed(iter/s)": 2.195721 + }, + { + "epoch": 1.12288, + "grad_norm": 0.5924286561877972, + "learning_rate": 3.695130887126426e-06, + "loss": 0.26944470405578613, + "memory(GiB)": 77.0, + "step": 3509, + "token_acc": 0.8938002296211252, + "train_speed(iter/s)": 2.19083 + }, + { + "epoch": 1.1232, + "grad_norm": 0.6451549900146669, + "learning_rate": 3.6943562340493443e-06, + "loss": 0.318203330039978, + "memory(GiB)": 77.0, + "step": 3510, + "token_acc": 0.8773307163886163, + "train_speed(iter/s)": 2.185509 + }, + { + "epoch": 1.12352, + "grad_norm": 0.6276256087070645, + "learning_rate": 3.69358143235535e-06, + "loss": 0.3974899351596832, + "memory(GiB)": 77.0, + "step": 3511, + "token_acc": 0.8952002272081795, + "train_speed(iter/s)": 2.180704 + }, + { + "epoch": 1.12384, + "grad_norm": 0.6627541019787742, + "learning_rate": 3.6928064821408506e-06, + "loss": 0.28968703746795654, + "memory(GiB)": 77.0, + "step": 3512, + "token_acc": 0.9169305724725944, + "train_speed(iter/s)": 2.175955 + }, + { + "epoch": 1.12416, + "grad_norm": 0.6710092907333536, + "learning_rate": 3.6920313835022774e-06, + "loss": 0.3670633137226105, + "memory(GiB)": 77.0, + "step": 3513, + "token_acc": 0.9025491237387148, + "train_speed(iter/s)": 2.171101 + }, + { + "epoch": 1.12448, + "grad_norm": 0.6594566049376919, + "learning_rate": 3.691256136536077e-06, + "loss": 0.38731032609939575, + "memory(GiB)": 77.0, + "step": 3514, + "token_acc": 0.8885760257441674, + "train_speed(iter/s)": 2.166032 + }, + { + "epoch": 1.1248, + "grad_norm": 0.6353907996686687, + "learning_rate": 3.6904807413387158e-06, + "loss": 0.29306215047836304, + "memory(GiB)": 77.0, + "step": 3515, + "token_acc": 0.9310508796956728, + "train_speed(iter/s)": 2.16139 + }, + { + "epoch": 1.12512, + "grad_norm": 1.38477425409253, + "learning_rate": 3.6897051980066776e-06, + "loss": 0.2953008711338043, + "memory(GiB)": 77.0, + "step": 3516, + "token_acc": 0.9528265107212476, + "train_speed(iter/s)": 2.156558 + }, + { + "epoch": 1.12544, + "grad_norm": 0.719882646149517, + "learning_rate": 3.688929506636466e-06, + "loss": 0.37634220719337463, + "memory(GiB)": 77.0, + "step": 3517, + "token_acc": 0.9219481735872619, + "train_speed(iter/s)": 2.151945 + }, + { + "epoch": 1.12576, + "grad_norm": 0.5862571674740035, + "learning_rate": 3.6881536673246028e-06, + "loss": 0.23809599876403809, + "memory(GiB)": 77.0, + "step": 3518, + "token_acc": 0.9370849353372946, + "train_speed(iter/s)": 2.147418 + }, + { + "epoch": 1.12608, + "grad_norm": 0.6392940903138256, + "learning_rate": 3.6873776801676265e-06, + "loss": 0.2901216447353363, + "memory(GiB)": 77.0, + "step": 3519, + "token_acc": 0.9552071668533034, + "train_speed(iter/s)": 2.142743 + }, + { + "epoch": 1.1264, + "grad_norm": 0.6455374070727568, + "learning_rate": 3.6866015452620953e-06, + "loss": 0.3011288642883301, + "memory(GiB)": 77.0, + "step": 3520, + "token_acc": 0.9399025446670276, + "train_speed(iter/s)": 2.13818 + }, + { + "epoch": 1.12672, + "grad_norm": 0.5791303533303866, + "learning_rate": 3.6858252627045864e-06, + "loss": 0.2804592251777649, + "memory(GiB)": 77.0, + "step": 3521, + "token_acc": 0.9349383777586701, + "train_speed(iter/s)": 2.133539 + }, + { + "epoch": 1.12704, + "grad_norm": 0.6332844430723858, + "learning_rate": 3.6850488325916957e-06, + "loss": 0.3373507857322693, + "memory(GiB)": 77.0, + "step": 3522, + "token_acc": 0.8673096976016684, + "train_speed(iter/s)": 2.128969 + }, + { + "epoch": 1.12736, + "grad_norm": 0.6445024637832848, + "learning_rate": 3.684272255020034e-06, + "loss": 0.33747875690460205, + "memory(GiB)": 77.0, + "step": 3523, + "token_acc": 0.8684587243518371, + "train_speed(iter/s)": 2.124199 + }, + { + "epoch": 1.12768, + "grad_norm": 0.655101436588192, + "learning_rate": 3.6834955300862352e-06, + "loss": 0.34931015968322754, + "memory(GiB)": 77.0, + "step": 3524, + "token_acc": 0.868225019265348, + "train_speed(iter/s)": 2.1196 + }, + { + "epoch": 1.1280000000000001, + "grad_norm": 0.640115771555475, + "learning_rate": 3.682718657886948e-06, + "loss": 0.23629824817180634, + "memory(GiB)": 77.0, + "step": 3525, + "token_acc": 0.9365689572321, + "train_speed(iter/s)": 2.115159 + }, + { + "epoch": 1.12832, + "grad_norm": 0.6046654514415639, + "learning_rate": 3.6819416385188406e-06, + "loss": 0.32615357637405396, + "memory(GiB)": 77.0, + "step": 3526, + "token_acc": 0.9515477792732167, + "train_speed(iter/s)": 2.110468 + }, + { + "epoch": 1.12864, + "grad_norm": 0.6501999197386537, + "learning_rate": 3.6811644720786e-06, + "loss": 0.3387291431427002, + "memory(GiB)": 77.0, + "step": 3527, + "token_acc": 0.8809964998970558, + "train_speed(iter/s)": 2.105987 + }, + { + "epoch": 1.12896, + "grad_norm": 0.6684658564921914, + "learning_rate": 3.6803871586629313e-06, + "loss": 0.33507150411605835, + "memory(GiB)": 77.0, + "step": 3528, + "token_acc": 0.909702209414025, + "train_speed(iter/s)": 2.101518 + }, + { + "epoch": 1.12928, + "grad_norm": 0.5987687727556416, + "learning_rate": 3.6796096983685564e-06, + "loss": 0.287381112575531, + "memory(GiB)": 77.0, + "step": 3529, + "token_acc": 0.8295227524972253, + "train_speed(iter/s)": 2.097025 + }, + { + "epoch": 1.1296, + "grad_norm": 0.7448764997006996, + "learning_rate": 3.6788320912922186e-06, + "loss": 0.36116352677345276, + "memory(GiB)": 77.0, + "step": 3530, + "token_acc": 0.9264774313634249, + "train_speed(iter/s)": 2.092154 + }, + { + "epoch": 1.12992, + "grad_norm": 0.630845302666453, + "learning_rate": 3.6780543375306766e-06, + "loss": 0.2788395285606384, + "memory(GiB)": 77.0, + "step": 3531, + "token_acc": 0.9257933667382486, + "train_speed(iter/s)": 2.087329 + }, + { + "epoch": 1.13024, + "grad_norm": 0.6841127715803978, + "learning_rate": 3.6772764371807084e-06, + "loss": 0.36177465319633484, + "memory(GiB)": 77.0, + "step": 3532, + "token_acc": 0.8644455747711088, + "train_speed(iter/s)": 2.082949 + }, + { + "epoch": 1.13056, + "grad_norm": 0.6331915079476832, + "learning_rate": 3.6764983903391107e-06, + "loss": 0.2831811308860779, + "memory(GiB)": 77.0, + "step": 3533, + "token_acc": 0.8383932741709481, + "train_speed(iter/s)": 2.078387 + }, + { + "epoch": 1.1308799999999999, + "grad_norm": 0.6109921227513959, + "learning_rate": 3.675720197102697e-06, + "loss": 0.27229028940200806, + "memory(GiB)": 77.0, + "step": 3534, + "token_acc": 0.9254826254826255, + "train_speed(iter/s)": 2.073904 + }, + { + "epoch": 1.1312, + "grad_norm": 0.6387799142965865, + "learning_rate": 3.6749418575683005e-06, + "loss": 0.35352104902267456, + "memory(GiB)": 77.0, + "step": 3535, + "token_acc": 0.8601777585085627, + "train_speed(iter/s)": 2.069615 + }, + { + "epoch": 1.13152, + "grad_norm": 0.6537205941090796, + "learning_rate": 3.6741633718327724e-06, + "loss": 0.3246819078922272, + "memory(GiB)": 77.0, + "step": 3536, + "token_acc": 0.9432098765432099, + "train_speed(iter/s)": 2.065399 + }, + { + "epoch": 1.13184, + "grad_norm": 0.7201493450736153, + "learning_rate": 3.6733847399929806e-06, + "loss": 0.39557403326034546, + "memory(GiB)": 77.0, + "step": 3537, + "token_acc": 0.8567415730337079, + "train_speed(iter/s)": 2.06114 + }, + { + "epoch": 1.13216, + "grad_norm": 0.6802731213073561, + "learning_rate": 3.6726059621458134e-06, + "loss": 0.3514856696128845, + "memory(GiB)": 77.0, + "step": 3538, + "token_acc": 0.8908883176235362, + "train_speed(iter/s)": 2.056994 + }, + { + "epoch": 1.13248, + "grad_norm": 0.6603601144370903, + "learning_rate": 3.671827038388176e-06, + "loss": 0.4067857563495636, + "memory(GiB)": 77.0, + "step": 3539, + "token_acc": 0.8342447026657553, + "train_speed(iter/s)": 2.052377 + }, + { + "epoch": 1.1328, + "grad_norm": 0.6659339045651463, + "learning_rate": 3.6710479688169925e-06, + "loss": 0.349808007478714, + "memory(GiB)": 77.0, + "step": 3540, + "token_acc": 0.8883943854844232, + "train_speed(iter/s)": 2.047953 + }, + { + "epoch": 1.13312, + "grad_norm": 0.6778176363996822, + "learning_rate": 3.6702687535292036e-06, + "loss": 0.28644514083862305, + "memory(GiB)": 77.0, + "step": 3541, + "token_acc": 0.8745874587458746, + "train_speed(iter/s)": 2.043649 + }, + { + "epoch": 1.13344, + "grad_norm": 0.6572380377444775, + "learning_rate": 3.669489392621769e-06, + "loss": 0.38248008489608765, + "memory(GiB)": 77.0, + "step": 3542, + "token_acc": 0.9082914572864321, + "train_speed(iter/s)": 2.039399 + }, + { + "epoch": 1.13376, + "grad_norm": 0.6749944314773707, + "learning_rate": 3.668709886191667e-06, + "loss": 0.3284773826599121, + "memory(GiB)": 77.0, + "step": 3543, + "token_acc": 0.8656282998944034, + "train_speed(iter/s)": 2.035379 + }, + { + "epoch": 1.13408, + "grad_norm": 0.6470754602037923, + "learning_rate": 3.667930234335894e-06, + "loss": 0.3239328861236572, + "memory(GiB)": 77.0, + "step": 3544, + "token_acc": 0.8695359784801614, + "train_speed(iter/s)": 2.031216 + }, + { + "epoch": 1.1344, + "grad_norm": 0.6936071102789433, + "learning_rate": 3.6671504371514642e-06, + "loss": 0.3671151101589203, + "memory(GiB)": 77.0, + "step": 3545, + "token_acc": 0.9004683840749415, + "train_speed(iter/s)": 2.02705 + }, + { + "epoch": 1.13472, + "grad_norm": 0.6220013837968401, + "learning_rate": 3.6663704947354085e-06, + "loss": 0.32247593998908997, + "memory(GiB)": 77.0, + "step": 3546, + "token_acc": 0.8698034160489848, + "train_speed(iter/s)": 2.022827 + }, + { + "epoch": 1.13504, + "grad_norm": 0.5925738488146396, + "learning_rate": 3.66559040718478e-06, + "loss": 0.4320060610771179, + "memory(GiB)": 77.0, + "step": 3547, + "token_acc": 0.9045662100456621, + "train_speed(iter/s)": 2.018572 + }, + { + "epoch": 1.13536, + "grad_norm": 0.6412966154975492, + "learning_rate": 3.6648101745966436e-06, + "loss": 0.34116876125335693, + "memory(GiB)": 77.0, + "step": 3548, + "token_acc": 0.9000385951370128, + "train_speed(iter/s)": 2.01444 + }, + { + "epoch": 1.13568, + "grad_norm": 0.6261429392109175, + "learning_rate": 3.6640297970680883e-06, + "loss": 0.31621843576431274, + "memory(GiB)": 77.0, + "step": 3549, + "token_acc": 0.8891809023215068, + "train_speed(iter/s)": 2.010518 + }, + { + "epoch": 1.1360000000000001, + "grad_norm": 0.7610903598740227, + "learning_rate": 3.663249274696217e-06, + "loss": 0.4352112412452698, + "memory(GiB)": 77.0, + "step": 3550, + "token_acc": 0.9428868120456906, + "train_speed(iter/s)": 2.006481 + }, + { + "epoch": 1.13632, + "grad_norm": 0.6857802817461531, + "learning_rate": 3.6624686075781536e-06, + "loss": 0.4132325053215027, + "memory(GiB)": 77.0, + "step": 3551, + "token_acc": 0.8158439255099025, + "train_speed(iter/s)": 2.002511 + }, + { + "epoch": 1.13664, + "grad_norm": 0.6931426171311961, + "learning_rate": 3.6616877958110373e-06, + "loss": 0.308498740196228, + "memory(GiB)": 77.0, + "step": 3552, + "token_acc": 0.9068074262832181, + "train_speed(iter/s)": 1.998679 + }, + { + "epoch": 1.13696, + "grad_norm": 0.6537453974164585, + "learning_rate": 3.6609068394920278e-06, + "loss": 0.3458290100097656, + "memory(GiB)": 77.0, + "step": 3553, + "token_acc": 0.9005437245129134, + "train_speed(iter/s)": 1.994717 + }, + { + "epoch": 1.13728, + "grad_norm": 0.6571395047840654, + "learning_rate": 3.6601257387183e-06, + "loss": 0.3818706274032593, + "memory(GiB)": 77.0, + "step": 3554, + "token_acc": 0.9216247139588101, + "train_speed(iter/s)": 1.990645 + }, + { + "epoch": 1.1376, + "grad_norm": 0.6136762502537341, + "learning_rate": 3.659344493587049e-06, + "loss": 0.3159882426261902, + "memory(GiB)": 77.0, + "step": 3555, + "token_acc": 0.8849388379204893, + "train_speed(iter/s)": 1.986766 + }, + { + "epoch": 1.13792, + "grad_norm": 0.6355811728210663, + "learning_rate": 3.6585631041954884e-06, + "loss": 0.41659629344940186, + "memory(GiB)": 77.0, + "step": 3556, + "token_acc": 0.8539663461538461, + "train_speed(iter/s)": 1.982446 + }, + { + "epoch": 1.13824, + "grad_norm": 0.6247703628584157, + "learning_rate": 3.657781570640847e-06, + "loss": 0.3801080584526062, + "memory(GiB)": 77.0, + "step": 3557, + "token_acc": 0.8906673233193795, + "train_speed(iter/s)": 1.978459 + }, + { + "epoch": 1.13856, + "grad_norm": 0.6264636769856317, + "learning_rate": 3.656999893020374e-06, + "loss": 0.2850533425807953, + "memory(GiB)": 77.0, + "step": 3558, + "token_acc": 0.8452499502091216, + "train_speed(iter/s)": 1.974579 + }, + { + "epoch": 1.13888, + "grad_norm": 0.6811288076740211, + "learning_rate": 3.6562180714313344e-06, + "loss": 0.3032044768333435, + "memory(GiB)": 77.0, + "step": 3559, + "token_acc": 0.9504885993485342, + "train_speed(iter/s)": 1.970836 + }, + { + "epoch": 1.1392, + "grad_norm": 0.6668988112732461, + "learning_rate": 3.655436105971014e-06, + "loss": 0.3826543688774109, + "memory(GiB)": 77.0, + "step": 3560, + "token_acc": 0.9446575342465754, + "train_speed(iter/s)": 1.966939 + }, + { + "epoch": 1.13952, + "grad_norm": 0.6115666165989655, + "learning_rate": 3.6546539967367136e-06, + "loss": 0.28612515330314636, + "memory(GiB)": 77.0, + "step": 3561, + "token_acc": 0.8932245477119546, + "train_speed(iter/s)": 1.96301 + }, + { + "epoch": 1.13984, + "grad_norm": 0.6542332202118515, + "learning_rate": 3.6538717438257537e-06, + "loss": 0.27581533789634705, + "memory(GiB)": 77.0, + "step": 3562, + "token_acc": 0.909404990403071, + "train_speed(iter/s)": 1.959393 + }, + { + "epoch": 1.14016, + "grad_norm": 0.6845422422460697, + "learning_rate": 3.6530893473354723e-06, + "loss": 0.3554941415786743, + "memory(GiB)": 77.0, + "step": 3563, + "token_acc": 0.8168708020545239, + "train_speed(iter/s)": 1.955657 + }, + { + "epoch": 1.14048, + "grad_norm": 0.6624516131572644, + "learning_rate": 3.652306807363224e-06, + "loss": 0.27749568223953247, + "memory(GiB)": 77.0, + "step": 3564, + "token_acc": 0.9314090626939789, + "train_speed(iter/s)": 1.951602 + }, + { + "epoch": 1.1408, + "grad_norm": 0.6417897843490613, + "learning_rate": 3.6515241240063836e-06, + "loss": 0.37941548228263855, + "memory(GiB)": 77.0, + "step": 3565, + "token_acc": 0.8490613901572805, + "train_speed(iter/s)": 1.94762 + }, + { + "epoch": 1.14112, + "grad_norm": 0.6897521490183139, + "learning_rate": 3.650741297362342e-06, + "loss": 0.3903597295284271, + "memory(GiB)": 77.0, + "step": 3566, + "token_acc": 0.9110660486674391, + "train_speed(iter/s)": 1.943861 + }, + { + "epoch": 1.14144, + "grad_norm": 0.7344705894285207, + "learning_rate": 3.649958327528508e-06, + "loss": 0.466346800327301, + "memory(GiB)": 77.0, + "step": 3567, + "token_acc": 0.8585434173669467, + "train_speed(iter/s)": 1.940005 + }, + { + "epoch": 1.14176, + "grad_norm": 0.6446470201574291, + "learning_rate": 3.6491752146023093e-06, + "loss": 0.31701725721359253, + "memory(GiB)": 77.0, + "step": 3568, + "token_acc": 0.8875461564283317, + "train_speed(iter/s)": 1.9364 + }, + { + "epoch": 1.14208, + "grad_norm": 0.7033554314745157, + "learning_rate": 3.64839195868119e-06, + "loss": 0.41198301315307617, + "memory(GiB)": 77.0, + "step": 3569, + "token_acc": 0.8238021638330757, + "train_speed(iter/s)": 1.932742 + }, + { + "epoch": 1.1424, + "grad_norm": 0.6145249638754366, + "learning_rate": 3.647608559862613e-06, + "loss": 0.31174173951148987, + "memory(GiB)": 77.0, + "step": 3570, + "token_acc": 0.8174990612091626, + "train_speed(iter/s)": 1.929009 + }, + { + "epoch": 1.14272, + "grad_norm": 0.6145800788716731, + "learning_rate": 3.646825018244059e-06, + "loss": 0.32191893458366394, + "memory(GiB)": 77.0, + "step": 3571, + "token_acc": 0.8706911047587018, + "train_speed(iter/s)": 1.925402 + }, + { + "epoch": 1.14304, + "grad_norm": 0.6848602991420656, + "learning_rate": 3.646041333923026e-06, + "loss": 0.34460580348968506, + "memory(GiB)": 77.0, + "step": 3572, + "token_acc": 0.8623279098873592, + "train_speed(iter/s)": 1.92165 + }, + { + "epoch": 1.14336, + "grad_norm": 0.6275547006557064, + "learning_rate": 3.6452575069970296e-06, + "loss": 0.28840339183807373, + "memory(GiB)": 77.0, + "step": 3573, + "token_acc": 0.8776183087664856, + "train_speed(iter/s)": 1.917892 + }, + { + "epoch": 1.14368, + "grad_norm": 0.6493006362572498, + "learning_rate": 3.6444735375636035e-06, + "loss": 0.330863356590271, + "memory(GiB)": 77.0, + "step": 3574, + "token_acc": 0.9351208209202251, + "train_speed(iter/s)": 1.914133 + }, + { + "epoch": 1.144, + "grad_norm": 0.5816394381082414, + "learning_rate": 3.6436894257203e-06, + "loss": 0.20655174553394318, + "memory(GiB)": 77.0, + "step": 3575, + "token_acc": 0.949054419143188, + "train_speed(iter/s)": 1.910529 + }, + { + "epoch": 1.14432, + "grad_norm": 0.5788792818682525, + "learning_rate": 3.6429051715646873e-06, + "loss": 0.3017307221889496, + "memory(GiB)": 77.0, + "step": 3576, + "token_acc": 0.9462012320328542, + "train_speed(iter/s)": 1.906401 + }, + { + "epoch": 1.1446399999999999, + "grad_norm": 0.6854605399231729, + "learning_rate": 3.6421207751943516e-06, + "loss": 0.36696094274520874, + "memory(GiB)": 77.0, + "step": 3577, + "token_acc": 0.9107796228539262, + "train_speed(iter/s)": 1.902821 + }, + { + "epoch": 1.14496, + "grad_norm": 0.6664850307276097, + "learning_rate": 3.641336236706899e-06, + "loss": 0.315309077501297, + "memory(GiB)": 77.0, + "step": 3578, + "token_acc": 0.9109291016124904, + "train_speed(iter/s)": 1.899104 + }, + { + "epoch": 1.14528, + "grad_norm": 0.6523424793400482, + "learning_rate": 3.640551556199951e-06, + "loss": 0.3141142427921295, + "memory(GiB)": 77.0, + "step": 3579, + "token_acc": 0.8792661274413099, + "train_speed(iter/s)": 1.895723 + }, + { + "epoch": 1.1456, + "grad_norm": 0.6771339784620579, + "learning_rate": 3.6397667337711475e-06, + "loss": 0.2940749526023865, + "memory(GiB)": 77.0, + "step": 3580, + "token_acc": 0.9317939609236234, + "train_speed(iter/s)": 1.892365 + }, + { + "epoch": 1.14592, + "grad_norm": 0.6866595731604332, + "learning_rate": 3.6389817695181458e-06, + "loss": 0.43107810616493225, + "memory(GiB)": 77.0, + "step": 3581, + "token_acc": 0.872421152478065, + "train_speed(iter/s)": 1.888502 + }, + { + "epoch": 1.14624, + "grad_norm": 0.6060594419329419, + "learning_rate": 3.638196663538621e-06, + "loss": 0.3286105692386627, + "memory(GiB)": 77.0, + "step": 3582, + "token_acc": 0.892925066565234, + "train_speed(iter/s)": 1.884942 + }, + { + "epoch": 1.14656, + "grad_norm": 0.6998379413592722, + "learning_rate": 3.637411415930267e-06, + "loss": 0.3567737638950348, + "memory(GiB)": 77.0, + "step": 3583, + "token_acc": 0.9142572283150548, + "train_speed(iter/s)": 1.881487 + }, + { + "epoch": 1.14688, + "grad_norm": 0.6195653566647824, + "learning_rate": 3.6366260267907927e-06, + "loss": 0.36254093050956726, + "memory(GiB)": 77.0, + "step": 3584, + "token_acc": 0.8700654184525152, + "train_speed(iter/s)": 1.877959 + }, + { + "epoch": 1.1472, + "grad_norm": 0.6928819091887256, + "learning_rate": 3.635840496217927e-06, + "loss": 0.3441115617752075, + "memory(GiB)": 77.0, + "step": 3585, + "token_acc": 0.8355814801312431, + "train_speed(iter/s)": 1.874627 + }, + { + "epoch": 1.14752, + "grad_norm": 0.6142364184578485, + "learning_rate": 3.6350548243094154e-06, + "loss": 0.2832269072532654, + "memory(GiB)": 77.0, + "step": 3586, + "token_acc": 0.9351824817518248, + "train_speed(iter/s)": 1.871158 + }, + { + "epoch": 1.14784, + "grad_norm": 0.6503296719123421, + "learning_rate": 3.634269011163021e-06, + "loss": 0.44322896003723145, + "memory(GiB)": 77.0, + "step": 3587, + "token_acc": 0.821949602122016, + "train_speed(iter/s)": 1.867552 + }, + { + "epoch": 1.14816, + "grad_norm": 0.6188064153096035, + "learning_rate": 3.633483056876525e-06, + "loss": 0.3232051730155945, + "memory(GiB)": 77.0, + "step": 3588, + "token_acc": 0.8318471337579618, + "train_speed(iter/s)": 1.864038 + }, + { + "epoch": 1.14848, + "grad_norm": 0.6230722862841253, + "learning_rate": 3.632696961547726e-06, + "loss": 0.26821911334991455, + "memory(GiB)": 77.0, + "step": 3589, + "token_acc": 0.9321266968325792, + "train_speed(iter/s)": 1.860465 + }, + { + "epoch": 1.1488, + "grad_norm": 0.6170597197456319, + "learning_rate": 3.631910725274439e-06, + "loss": 0.3192797899246216, + "memory(GiB)": 77.0, + "step": 3590, + "token_acc": 0.921280276816609, + "train_speed(iter/s)": 1.857016 + }, + { + "epoch": 1.14912, + "grad_norm": 0.7047364810236553, + "learning_rate": 3.6311243481544985e-06, + "loss": 0.32326406240463257, + "memory(GiB)": 77.0, + "step": 3591, + "token_acc": 0.8132393084622384, + "train_speed(iter/s)": 1.853712 + }, + { + "epoch": 1.14944, + "grad_norm": 0.6699233439177746, + "learning_rate": 3.6303378302857538e-06, + "loss": 0.3014705777168274, + "memory(GiB)": 77.0, + "step": 3592, + "token_acc": 0.9207383279044516, + "train_speed(iter/s)": 1.850374 + }, + { + "epoch": 1.1497600000000001, + "grad_norm": 0.6259812293910895, + "learning_rate": 3.629551171766076e-06, + "loss": 0.28643798828125, + "memory(GiB)": 77.0, + "step": 3593, + "token_acc": 0.8838748495788207, + "train_speed(iter/s)": 1.847082 + }, + { + "epoch": 1.15008, + "grad_norm": 0.7211364378573989, + "learning_rate": 3.628764372693348e-06, + "loss": 0.3470749855041504, + "memory(GiB)": 77.0, + "step": 3594, + "token_acc": 0.8641207815275311, + "train_speed(iter/s)": 1.843875 + }, + { + "epoch": 1.1504, + "grad_norm": 0.6407837712700762, + "learning_rate": 3.627977433165476e-06, + "loss": 0.26119571924209595, + "memory(GiB)": 77.0, + "step": 3595, + "token_acc": 0.8694646397884996, + "train_speed(iter/s)": 1.840675 + }, + { + "epoch": 1.15072, + "grad_norm": 0.6291752626422732, + "learning_rate": 3.62719035328038e-06, + "loss": 0.3188675045967102, + "memory(GiB)": 77.0, + "step": 3596, + "token_acc": 0.8514741742407449, + "train_speed(iter/s)": 1.83752 + }, + { + "epoch": 1.15104, + "grad_norm": 0.5858679752414906, + "learning_rate": 3.6264031331359983e-06, + "loss": 0.27052706480026245, + "memory(GiB)": 77.0, + "step": 3597, + "token_acc": 0.9234285714285714, + "train_speed(iter/s)": 1.834187 + }, + { + "epoch": 1.15136, + "grad_norm": 0.6378639223982914, + "learning_rate": 3.6256157728302864e-06, + "loss": 0.31900328397750854, + "memory(GiB)": 77.0, + "step": 3598, + "token_acc": 0.9102641056422569, + "train_speed(iter/s)": 1.83096 + }, + { + "epoch": 1.15168, + "grad_norm": 0.6226636785654236, + "learning_rate": 3.624828272461219e-06, + "loss": 0.39409831166267395, + "memory(GiB)": 77.0, + "step": 3599, + "token_acc": 0.9253837915030346, + "train_speed(iter/s)": 1.827498 + }, + { + "epoch": 1.152, + "grad_norm": 0.7980596998935068, + "learning_rate": 3.624040632126785e-06, + "loss": 0.37453439831733704, + "memory(GiB)": 77.0, + "step": 3600, + "token_acc": 0.8730822873082287, + "train_speed(iter/s)": 1.824371 + }, + { + "epoch": 1.15232, + "grad_norm": 0.6054784466161208, + "learning_rate": 3.623252851924994e-06, + "loss": 0.3909461498260498, + "memory(GiB)": 77.0, + "step": 3601, + "token_acc": 0.8956366420005103, + "train_speed(iter/s)": 1.821128 + }, + { + "epoch": 1.1526399999999999, + "grad_norm": 0.6201521375311255, + "learning_rate": 3.622464931953871e-06, + "loss": 0.27674728631973267, + "memory(GiB)": 77.0, + "step": 3602, + "token_acc": 0.9648774022531478, + "train_speed(iter/s)": 1.817952 + }, + { + "epoch": 1.15296, + "grad_norm": 0.6326049855770897, + "learning_rate": 3.6216768723114586e-06, + "loss": 0.28646594285964966, + "memory(GiB)": 77.0, + "step": 3603, + "token_acc": 0.8651244853950206, + "train_speed(iter/s)": 1.814899 + }, + { + "epoch": 1.15328, + "grad_norm": 0.7757722636173978, + "learning_rate": 3.6208886730958183e-06, + "loss": 0.37313950061798096, + "memory(GiB)": 77.0, + "step": 3604, + "token_acc": 0.8706080370292447, + "train_speed(iter/s)": 1.811806 + }, + { + "epoch": 1.1536, + "grad_norm": 0.6277869028030757, + "learning_rate": 3.6201003344050275e-06, + "loss": 0.32831308245658875, + "memory(GiB)": 77.0, + "step": 3605, + "token_acc": 0.9132688320663441, + "train_speed(iter/s)": 1.808648 + }, + { + "epoch": 1.15392, + "grad_norm": 0.7299578005951746, + "learning_rate": 3.6193118563371797e-06, + "loss": 0.3508565425872803, + "memory(GiB)": 77.0, + "step": 3606, + "token_acc": 0.859626802374894, + "train_speed(iter/s)": 1.805626 + }, + { + "epoch": 1.15424, + "grad_norm": 0.6603212811452173, + "learning_rate": 3.6185232389903894e-06, + "loss": 0.39734888076782227, + "memory(GiB)": 77.0, + "step": 3607, + "token_acc": 0.8736842105263158, + "train_speed(iter/s)": 1.802385 + }, + { + "epoch": 1.15456, + "grad_norm": 0.5827632089992308, + "learning_rate": 3.6177344824627854e-06, + "loss": 0.27895456552505493, + "memory(GiB)": 77.0, + "step": 3608, + "token_acc": 0.9418512658227848, + "train_speed(iter/s)": 1.799383 + }, + { + "epoch": 1.15488, + "grad_norm": 0.7179923717411046, + "learning_rate": 3.6169455868525145e-06, + "loss": 0.3828701972961426, + "memory(GiB)": 77.0, + "step": 3609, + "token_acc": 0.8192453933898801, + "train_speed(iter/s)": 1.796388 + }, + { + "epoch": 1.1552, + "grad_norm": 0.6874897017339814, + "learning_rate": 3.6161565522577417e-06, + "loss": 0.2991190552711487, + "memory(GiB)": 77.0, + "step": 3610, + "token_acc": 0.8836799436023969, + "train_speed(iter/s)": 1.793214 + }, + { + "epoch": 1.15552, + "grad_norm": 0.6819643726048226, + "learning_rate": 3.615367378776648e-06, + "loss": 0.3983204960823059, + "memory(GiB)": 77.0, + "step": 3611, + "token_acc": 0.8762283575105287, + "train_speed(iter/s)": 1.790095 + }, + { + "epoch": 1.15584, + "grad_norm": 0.59737902214831, + "learning_rate": 3.6145780665074337e-06, + "loss": 0.271129846572876, + "memory(GiB)": 77.0, + "step": 3612, + "token_acc": 0.897127588510354, + "train_speed(iter/s)": 1.787022 + }, + { + "epoch": 1.15616, + "grad_norm": 0.6534339210029032, + "learning_rate": 3.613788615548313e-06, + "loss": 0.35619109869003296, + "memory(GiB)": 77.0, + "step": 3613, + "token_acc": 0.8576923076923076, + "train_speed(iter/s)": 1.783886 + }, + { + "epoch": 1.15648, + "grad_norm": 0.616033060364833, + "learning_rate": 3.612999025997521e-06, + "loss": 0.25038421154022217, + "memory(GiB)": 77.0, + "step": 3614, + "token_acc": 0.9574747917580009, + "train_speed(iter/s)": 1.780927 + }, + { + "epoch": 1.1568, + "grad_norm": 0.6081036641617542, + "learning_rate": 3.6122092979533073e-06, + "loss": 0.3277726471424103, + "memory(GiB)": 77.0, + "step": 3615, + "token_acc": 0.9056501757231684, + "train_speed(iter/s)": 1.777753 + }, + { + "epoch": 1.15712, + "grad_norm": 0.6428515272504695, + "learning_rate": 3.611419431513941e-06, + "loss": 0.2950807809829712, + "memory(GiB)": 77.0, + "step": 3616, + "token_acc": 0.9042284062161186, + "train_speed(iter/s)": 1.774719 + }, + { + "epoch": 1.15744, + "grad_norm": 0.6750304390961652, + "learning_rate": 3.610629426777707e-06, + "loss": 0.30070656538009644, + "memory(GiB)": 77.0, + "step": 3617, + "token_acc": 0.920824295010846, + "train_speed(iter/s)": 1.771768 + }, + { + "epoch": 1.1577600000000001, + "grad_norm": 0.6370604102686728, + "learning_rate": 3.609839283842907e-06, + "loss": 0.3819066286087036, + "memory(GiB)": 77.0, + "step": 3618, + "token_acc": 0.9167676767676768, + "train_speed(iter/s)": 1.768805 + }, + { + "epoch": 1.15808, + "grad_norm": 0.6726582017648991, + "learning_rate": 3.609049002807861e-06, + "loss": 0.31671303510665894, + "memory(GiB)": 77.0, + "step": 3619, + "token_acc": 0.8880726015557476, + "train_speed(iter/s)": 1.765724 + }, + { + "epoch": 1.1584, + "grad_norm": 0.7001977801560622, + "learning_rate": 3.608258583770905e-06, + "loss": 0.4127262532711029, + "memory(GiB)": 77.0, + "step": 3620, + "token_acc": 0.871821811304896, + "train_speed(iter/s)": 1.762614 + }, + { + "epoch": 1.15872, + "grad_norm": 0.7358167846311201, + "learning_rate": 3.6074680268303947e-06, + "loss": 0.2864014506340027, + "memory(GiB)": 77.0, + "step": 3621, + "token_acc": 0.8589189189189189, + "train_speed(iter/s)": 1.759753 + }, + { + "epoch": 1.15904, + "grad_norm": 0.6934921323389949, + "learning_rate": 3.606677332084699e-06, + "loss": 0.40353143215179443, + "memory(GiB)": 77.0, + "step": 3622, + "token_acc": 0.9012803234501348, + "train_speed(iter/s)": 1.756756 + }, + { + "epoch": 1.15936, + "grad_norm": 0.6819303381910303, + "learning_rate": 3.6058864996322084e-06, + "loss": 0.28570806980133057, + "memory(GiB)": 77.0, + "step": 3623, + "token_acc": 0.951433389544688, + "train_speed(iter/s)": 1.753687 + }, + { + "epoch": 1.15968, + "grad_norm": 0.6313582803556997, + "learning_rate": 3.605095529571327e-06, + "loss": 0.3280940353870392, + "memory(GiB)": 77.0, + "step": 3624, + "token_acc": 0.8920844327176781, + "train_speed(iter/s)": 1.750744 + }, + { + "epoch": 1.16, + "grad_norm": 0.658655218858104, + "learning_rate": 3.6043044220004774e-06, + "loss": 0.3326376676559448, + "memory(GiB)": 77.0, + "step": 3625, + "token_acc": 0.9138983050847458, + "train_speed(iter/s)": 1.747761 + }, + { + "epoch": 1.16032, + "grad_norm": 0.6283551371595605, + "learning_rate": 3.603513177018099e-06, + "loss": 0.29372647404670715, + "memory(GiB)": 77.0, + "step": 3626, + "token_acc": 0.9233406045497039, + "train_speed(iter/s)": 1.744803 + }, + { + "epoch": 1.16064, + "grad_norm": 0.6348055048902126, + "learning_rate": 3.602721794722649e-06, + "loss": 0.357930451631546, + "memory(GiB)": 77.0, + "step": 3627, + "token_acc": 0.9112013929193268, + "train_speed(iter/s)": 1.741869 + }, + { + "epoch": 1.16096, + "grad_norm": 0.627448276890979, + "learning_rate": 3.601930275212601e-06, + "loss": 0.347595751285553, + "memory(GiB)": 77.0, + "step": 3628, + "token_acc": 0.8707782672540382, + "train_speed(iter/s)": 1.73885 + }, + { + "epoch": 1.16128, + "grad_norm": 0.641637701683579, + "learning_rate": 3.601138618586446e-06, + "loss": 0.36199814081192017, + "memory(GiB)": 77.0, + "step": 3629, + "token_acc": 0.9071170084439083, + "train_speed(iter/s)": 1.735809 + }, + { + "epoch": 1.1616, + "grad_norm": 0.6713757898797016, + "learning_rate": 3.600346824942692e-06, + "loss": 0.41257137060165405, + "memory(GiB)": 77.0, + "step": 3630, + "token_acc": 0.7802452767649983, + "train_speed(iter/s)": 1.732835 + }, + { + "epoch": 1.16192, + "grad_norm": 0.7066056456957611, + "learning_rate": 3.5995548943798637e-06, + "loss": 0.35115814208984375, + "memory(GiB)": 77.0, + "step": 3631, + "token_acc": 0.9162687042343203, + "train_speed(iter/s)": 1.730031 + }, + { + "epoch": 1.16224, + "grad_norm": 0.6557619624640404, + "learning_rate": 3.5987628269965023e-06, + "loss": 0.3816465735435486, + "memory(GiB)": 77.0, + "step": 3632, + "token_acc": 0.8564800494896381, + "train_speed(iter/s)": 1.726815 + }, + { + "epoch": 1.16256, + "grad_norm": 0.6848301302501283, + "learning_rate": 3.5979706228911693e-06, + "loss": 0.335328608751297, + "memory(GiB)": 77.0, + "step": 3633, + "token_acc": 0.9428778018799711, + "train_speed(iter/s)": 1.723894 + }, + { + "epoch": 1.16288, + "grad_norm": 0.6522175879160733, + "learning_rate": 3.597178282162438e-06, + "loss": 0.25217974185943604, + "memory(GiB)": 77.0, + "step": 3634, + "token_acc": 0.9620215311004785, + "train_speed(iter/s)": 1.721105 + }, + { + "epoch": 1.1632, + "grad_norm": 0.6471883031876889, + "learning_rate": 3.5963858049089034e-06, + "loss": 0.41062650084495544, + "memory(GiB)": 77.0, + "step": 3635, + "token_acc": 0.9061824470384782, + "train_speed(iter/s)": 1.718281 + }, + { + "epoch": 1.16352, + "grad_norm": 0.620214697712343, + "learning_rate": 3.595593191229174e-06, + "loss": 0.3142718970775604, + "memory(GiB)": 77.0, + "step": 3636, + "token_acc": 0.8817234581807941, + "train_speed(iter/s)": 1.715387 + }, + { + "epoch": 1.16384, + "grad_norm": 0.6421929526435168, + "learning_rate": 3.5948004412218783e-06, + "loss": 0.32360804080963135, + "memory(GiB)": 77.0, + "step": 3637, + "token_acc": 0.932062966031483, + "train_speed(iter/s)": 1.712582 + }, + { + "epoch": 1.16416, + "grad_norm": 0.6552254659920955, + "learning_rate": 3.59400755498566e-06, + "loss": 0.3694862723350525, + "memory(GiB)": 77.0, + "step": 3638, + "token_acc": 0.9055016181229774, + "train_speed(iter/s)": 1.708049 + }, + { + "epoch": 1.16448, + "grad_norm": 0.5978627216622168, + "learning_rate": 3.5932145326191793e-06, + "loss": 0.3928642272949219, + "memory(GiB)": 77.0, + "step": 3639, + "token_acc": 0.9241140215716487, + "train_speed(iter/s)": 1.705139 + }, + { + "epoch": 1.1648, + "grad_norm": 0.670515070463898, + "learning_rate": 3.5924213742211145e-06, + "loss": 0.3136131167411804, + "memory(GiB)": 77.0, + "step": 3640, + "token_acc": 0.8449438202247191, + "train_speed(iter/s)": 1.702475 + }, + { + "epoch": 1.16512, + "grad_norm": 0.7212778315152248, + "learning_rate": 3.5916280798901604e-06, + "loss": 0.37144795060157776, + "memory(GiB)": 77.0, + "step": 3641, + "token_acc": 0.9037037037037037, + "train_speed(iter/s)": 1.699804 + }, + { + "epoch": 1.16544, + "grad_norm": 0.6316517481819246, + "learning_rate": 3.590834649725028e-06, + "loss": 0.28200334310531616, + "memory(GiB)": 77.0, + "step": 3642, + "token_acc": 0.8675072343943778, + "train_speed(iter/s)": 1.696983 + }, + { + "epoch": 1.16576, + "grad_norm": 0.6546341346027491, + "learning_rate": 3.5900410838244472e-06, + "loss": 0.36204102635383606, + "memory(GiB)": 77.0, + "step": 3643, + "token_acc": 0.8664402942840973, + "train_speed(iter/s)": 1.694236 + }, + { + "epoch": 1.16608, + "grad_norm": 0.6791856546487026, + "learning_rate": 3.589247382287163e-06, + "loss": 0.39712175726890564, + "memory(GiB)": 77.0, + "step": 3644, + "token_acc": 0.8981452373467463, + "train_speed(iter/s)": 1.691453 + }, + { + "epoch": 1.1663999999999999, + "grad_norm": 0.6445573560093744, + "learning_rate": 3.5884535452119374e-06, + "loss": 0.2656974792480469, + "memory(GiB)": 77.0, + "step": 3645, + "token_acc": 0.9452054794520548, + "train_speed(iter/s)": 1.688619 + }, + { + "epoch": 1.16672, + "grad_norm": 0.636814922168023, + "learning_rate": 3.58765957269755e-06, + "loss": 0.2966352105140686, + "memory(GiB)": 77.0, + "step": 3646, + "token_acc": 0.9474206349206349, + "train_speed(iter/s)": 1.685729 + }, + { + "epoch": 1.16704, + "grad_norm": 0.6703202542703832, + "learning_rate": 3.586865464842797e-06, + "loss": 0.32198458909988403, + "memory(GiB)": 77.0, + "step": 3647, + "token_acc": 0.9228571428571428, + "train_speed(iter/s)": 1.682967 + }, + { + "epoch": 1.16736, + "grad_norm": 0.753397505772478, + "learning_rate": 3.586071221746492e-06, + "loss": 0.39454859495162964, + "memory(GiB)": 77.0, + "step": 3648, + "token_acc": 0.8506602641056422, + "train_speed(iter/s)": 1.680433 + }, + { + "epoch": 1.16768, + "grad_norm": 0.6262510969249151, + "learning_rate": 3.5852768435074627e-06, + "loss": 0.2750593423843384, + "memory(GiB)": 77.0, + "step": 3649, + "token_acc": 0.933181688939437, + "train_speed(iter/s)": 1.677849 + }, + { + "epoch": 1.168, + "grad_norm": 0.6273157018718302, + "learning_rate": 3.5844823302245567e-06, + "loss": 0.3688696324825287, + "memory(GiB)": 77.0, + "step": 3650, + "token_acc": 0.8871111111111111, + "train_speed(iter/s)": 1.675203 + }, + { + "epoch": 1.16832, + "grad_norm": 0.6539312298564423, + "learning_rate": 3.5836876819966382e-06, + "loss": 0.39703333377838135, + "memory(GiB)": 77.0, + "step": 3651, + "token_acc": 0.8664465786314526, + "train_speed(iter/s)": 1.672594 + }, + { + "epoch": 1.16864, + "grad_norm": 0.7159359852004399, + "learning_rate": 3.582892898922587e-06, + "loss": 0.3992488384246826, + "memory(GiB)": 77.0, + "step": 3652, + "token_acc": 0.890604467805519, + "train_speed(iter/s)": 1.670055 + }, + { + "epoch": 1.16896, + "grad_norm": 0.6711399760750906, + "learning_rate": 3.5820979811012994e-06, + "loss": 0.37974002957344055, + "memory(GiB)": 77.0, + "step": 3653, + "token_acc": 0.8772938920843605, + "train_speed(iter/s)": 1.667418 + }, + { + "epoch": 1.16928, + "grad_norm": 0.5779475536612132, + "learning_rate": 3.5813029286316904e-06, + "loss": 0.33978259563446045, + "memory(GiB)": 77.0, + "step": 3654, + "token_acc": 0.9305135951661632, + "train_speed(iter/s)": 1.664821 + }, + { + "epoch": 1.1696, + "grad_norm": 0.6332101548859659, + "learning_rate": 3.5805077416126897e-06, + "loss": 0.21661311388015747, + "memory(GiB)": 77.0, + "step": 3655, + "token_acc": 0.9506925207756233, + "train_speed(iter/s)": 1.662208 + }, + { + "epoch": 1.16992, + "grad_norm": 0.7188919321302288, + "learning_rate": 3.5797124201432442e-06, + "loss": 0.3496176302433014, + "memory(GiB)": 77.0, + "step": 3656, + "token_acc": 0.9030790762771168, + "train_speed(iter/s)": 1.659668 + }, + { + "epoch": 1.17024, + "grad_norm": 0.5948054313873871, + "learning_rate": 3.5789169643223188e-06, + "loss": 0.390167772769928, + "memory(GiB)": 77.0, + "step": 3657, + "token_acc": 0.8304259634888438, + "train_speed(iter/s)": 1.656809 + }, + { + "epoch": 1.17056, + "grad_norm": 0.7032146789334169, + "learning_rate": 3.578121374248893e-06, + "loss": 0.3636506199836731, + "memory(GiB)": 77.0, + "step": 3658, + "token_acc": 0.8862275449101796, + "train_speed(iter/s)": 1.654136 + }, + { + "epoch": 1.17088, + "grad_norm": 0.7440819166114769, + "learning_rate": 3.5773256500219645e-06, + "loss": 0.3313729166984558, + "memory(GiB)": 77.0, + "step": 3659, + "token_acc": 0.875324114088159, + "train_speed(iter/s)": 1.651586 + }, + { + "epoch": 1.1712, + "grad_norm": 0.7541974222745524, + "learning_rate": 3.5765297917405484e-06, + "loss": 0.40346479415893555, + "memory(GiB)": 77.0, + "step": 3660, + "token_acc": 0.900277863538129, + "train_speed(iter/s)": 1.648912 + }, + { + "epoch": 1.1715200000000001, + "grad_norm": 0.6171815790087393, + "learning_rate": 3.575733799503674e-06, + "loss": 0.31491124629974365, + "memory(GiB)": 77.0, + "step": 3661, + "token_acc": 0.8594928032899246, + "train_speed(iter/s)": 1.646213 + }, + { + "epoch": 1.17184, + "grad_norm": 0.5950399082438387, + "learning_rate": 3.57493767341039e-06, + "loss": 0.27681922912597656, + "memory(GiB)": 77.0, + "step": 3662, + "token_acc": 0.8878853939582685, + "train_speed(iter/s)": 1.643696 + }, + { + "epoch": 1.17216, + "grad_norm": 0.6292656136552995, + "learning_rate": 3.57414141355976e-06, + "loss": 0.2693694233894348, + "memory(GiB)": 77.0, + "step": 3663, + "token_acc": 0.9090531561461794, + "train_speed(iter/s)": 1.641128 + }, + { + "epoch": 1.17248, + "grad_norm": 0.675160439602387, + "learning_rate": 3.5733450200508647e-06, + "loss": 0.33163297176361084, + "memory(GiB)": 77.0, + "step": 3664, + "token_acc": 0.905551550108147, + "train_speed(iter/s)": 1.638613 + }, + { + "epoch": 1.1728, + "grad_norm": 0.6285404831208922, + "learning_rate": 3.5725484929828007e-06, + "loss": 0.3260619044303894, + "memory(GiB)": 77.0, + "step": 3665, + "token_acc": 0.9216309240137676, + "train_speed(iter/s)": 1.636097 + }, + { + "epoch": 1.17312, + "grad_norm": 0.6447864179203654, + "learning_rate": 3.5717518324546835e-06, + "loss": 0.3648034632205963, + "memory(GiB)": 77.0, + "step": 3666, + "token_acc": 0.8873689010487916, + "train_speed(iter/s)": 1.633361 + }, + { + "epoch": 1.17344, + "grad_norm": 0.6305828561598046, + "learning_rate": 3.570955038565642e-06, + "loss": 0.2704196870326996, + "memory(GiB)": 77.0, + "step": 3667, + "token_acc": 0.8970744680851064, + "train_speed(iter/s)": 1.630918 + }, + { + "epoch": 1.17376, + "grad_norm": 0.6076885281611738, + "learning_rate": 3.570158111414824e-06, + "loss": 0.2414412498474121, + "memory(GiB)": 77.0, + "step": 3668, + "token_acc": 0.8831205673758865, + "train_speed(iter/s)": 1.62849 + }, + { + "epoch": 1.17408, + "grad_norm": 0.6148867064928747, + "learning_rate": 3.5693610511013953e-06, + "loss": 0.39599883556365967, + "memory(GiB)": 77.0, + "step": 3669, + "token_acc": 0.8809613928841786, + "train_speed(iter/s)": 1.625927 + }, + { + "epoch": 1.1743999999999999, + "grad_norm": 0.6331053774150448, + "learning_rate": 3.5685638577245334e-06, + "loss": 0.36475253105163574, + "memory(GiB)": 77.0, + "step": 3670, + "token_acc": 0.8462414578587699, + "train_speed(iter/s)": 1.623423 + }, + { + "epoch": 1.17472, + "grad_norm": 0.6539503727787159, + "learning_rate": 3.567766531383436e-06, + "loss": 0.34280335903167725, + "memory(GiB)": 77.0, + "step": 3671, + "token_acc": 0.8667621776504298, + "train_speed(iter/s)": 1.62102 + }, + { + "epoch": 1.17504, + "grad_norm": 0.6951530158958741, + "learning_rate": 3.566969072177317e-06, + "loss": 0.33647793531417847, + "memory(GiB)": 77.0, + "step": 3672, + "token_acc": 0.9029505865623889, + "train_speed(iter/s)": 1.618625 + }, + { + "epoch": 1.17536, + "grad_norm": 0.6617990152789229, + "learning_rate": 3.566171480205406e-06, + "loss": 0.3327045142650604, + "memory(GiB)": 77.0, + "step": 3673, + "token_acc": 0.9142990218910108, + "train_speed(iter/s)": 1.616138 + }, + { + "epoch": 1.17568, + "grad_norm": 0.6341809905745461, + "learning_rate": 3.56537375556695e-06, + "loss": 0.3443911373615265, + "memory(GiB)": 77.0, + "step": 3674, + "token_acc": 0.9476513204019631, + "train_speed(iter/s)": 1.613781 + }, + { + "epoch": 1.176, + "grad_norm": 0.6057400731385676, + "learning_rate": 3.564575898361211e-06, + "loss": 0.28278470039367676, + "memory(GiB)": 77.0, + "step": 3675, + "token_acc": 0.9116438356164384, + "train_speed(iter/s)": 1.610932 + }, + { + "epoch": 1.17632, + "grad_norm": 0.6852317248303977, + "learning_rate": 3.5637779086874693e-06, + "loss": 0.36918461322784424, + "memory(GiB)": 77.0, + "step": 3676, + "token_acc": 0.89991961414791, + "train_speed(iter/s)": 1.608598 + }, + { + "epoch": 1.17664, + "grad_norm": 0.6652170038062273, + "learning_rate": 3.5629797866450204e-06, + "loss": 0.2952655255794525, + "memory(GiB)": 77.0, + "step": 3677, + "token_acc": 0.9081415174765558, + "train_speed(iter/s)": 1.606289 + }, + { + "epoch": 1.17696, + "grad_norm": 0.6673130177677755, + "learning_rate": 3.5621815323331778e-06, + "loss": 0.3886156678199768, + "memory(GiB)": 77.0, + "step": 3678, + "token_acc": 0.8123543123543123, + "train_speed(iter/s)": 1.60395 + }, + { + "epoch": 1.17728, + "grad_norm": 0.6399907626243192, + "learning_rate": 3.5613831458512686e-06, + "loss": 0.2815510928630829, + "memory(GiB)": 77.0, + "step": 3679, + "token_acc": 0.8646329837940896, + "train_speed(iter/s)": 1.601684 + }, + { + "epoch": 1.1776, + "grad_norm": 0.6926196452366553, + "learning_rate": 3.5605846272986398e-06, + "loss": 0.34570831060409546, + "memory(GiB)": 77.0, + "step": 3680, + "token_acc": 0.8679856115107913, + "train_speed(iter/s)": 1.599377 + }, + { + "epoch": 1.17792, + "grad_norm": 0.6446930214371878, + "learning_rate": 3.5597859767746524e-06, + "loss": 0.22810208797454834, + "memory(GiB)": 77.0, + "step": 3681, + "token_acc": 0.9499400479616307, + "train_speed(iter/s)": 1.597074 + }, + { + "epoch": 1.17824, + "grad_norm": 0.6114886100347405, + "learning_rate": 3.5589871943786848e-06, + "loss": 0.2986004054546356, + "memory(GiB)": 77.0, + "step": 3682, + "token_acc": 0.9254136823907138, + "train_speed(iter/s)": 1.594785 + }, + { + "epoch": 1.17856, + "grad_norm": 0.6618862532017316, + "learning_rate": 3.558188280210131e-06, + "loss": 0.3892286419868469, + "memory(GiB)": 77.0, + "step": 3683, + "token_acc": 0.9046725073007926, + "train_speed(iter/s)": 1.592473 + }, + { + "epoch": 1.17888, + "grad_norm": 0.6108179328463957, + "learning_rate": 3.557389234368403e-06, + "loss": 0.3466079533100128, + "memory(GiB)": 77.0, + "step": 3684, + "token_acc": 0.8512755102040817, + "train_speed(iter/s)": 1.590083 + }, + { + "epoch": 1.1792, + "grad_norm": 0.6360383360966985, + "learning_rate": 3.5565900569529276e-06, + "loss": 0.30487313866615295, + "memory(GiB)": 77.0, + "step": 3685, + "token_acc": 0.902782797253343, + "train_speed(iter/s)": 1.58781 + }, + { + "epoch": 1.1795200000000001, + "grad_norm": 0.6230804542647563, + "learning_rate": 3.5557907480631486e-06, + "loss": 0.2935771346092224, + "memory(GiB)": 77.0, + "step": 3686, + "token_acc": 0.9385113268608414, + "train_speed(iter/s)": 1.585435 + }, + { + "epoch": 1.17984, + "grad_norm": 0.6673499061856862, + "learning_rate": 3.5549913077985265e-06, + "loss": 0.37562593817710876, + "memory(GiB)": 77.0, + "step": 3687, + "token_acc": 0.8095058468502452, + "train_speed(iter/s)": 1.583065 + }, + { + "epoch": 1.1801599999999999, + "grad_norm": 0.594628632346417, + "learning_rate": 3.5541917362585376e-06, + "loss": 0.2436932623386383, + "memory(GiB)": 77.0, + "step": 3688, + "token_acc": 0.924074074074074, + "train_speed(iter/s)": 1.580747 + }, + { + "epoch": 1.18048, + "grad_norm": 0.5971622039727421, + "learning_rate": 3.553392033542674e-06, + "loss": 0.30680444836616516, + "memory(GiB)": 77.0, + "step": 3689, + "token_acc": 0.8988617886178861, + "train_speed(iter/s)": 1.578358 + }, + { + "epoch": 1.1808, + "grad_norm": 0.629443760422355, + "learning_rate": 3.5525921997504465e-06, + "loss": 0.3385128974914551, + "memory(GiB)": 77.0, + "step": 3690, + "token_acc": 0.9555963302752294, + "train_speed(iter/s)": 1.576055 + }, + { + "epoch": 1.18112, + "grad_norm": 0.6210095709339016, + "learning_rate": 3.5517922349813787e-06, + "loss": 0.31716597080230713, + "memory(GiB)": 77.0, + "step": 3691, + "token_acc": 0.861843894479195, + "train_speed(iter/s)": 1.573798 + }, + { + "epoch": 1.18144, + "grad_norm": 0.6355801173604908, + "learning_rate": 3.5509921393350133e-06, + "loss": 0.278892457485199, + "memory(GiB)": 77.0, + "step": 3692, + "token_acc": 0.9204265791632485, + "train_speed(iter/s)": 1.571512 + }, + { + "epoch": 1.18176, + "grad_norm": 0.6050325987032856, + "learning_rate": 3.550191912910908e-06, + "loss": 0.28753089904785156, + "memory(GiB)": 77.0, + "step": 3693, + "token_acc": 0.9318787878787879, + "train_speed(iter/s)": 1.569304 + }, + { + "epoch": 1.18208, + "grad_norm": 0.653295600977906, + "learning_rate": 3.5493915558086376e-06, + "loss": 0.33791351318359375, + "memory(GiB)": 77.0, + "step": 3694, + "token_acc": 0.9248771355019892, + "train_speed(iter/s)": 1.567115 + }, + { + "epoch": 1.1824, + "grad_norm": 0.6331379512526556, + "learning_rate": 3.5485910681277923e-06, + "loss": 0.36483269929885864, + "memory(GiB)": 77.0, + "step": 3695, + "token_acc": 0.9493161705551086, + "train_speed(iter/s)": 1.564612 + }, + { + "epoch": 1.18272, + "grad_norm": 0.7057741208308101, + "learning_rate": 3.5477904499679784e-06, + "loss": 0.4182601571083069, + "memory(GiB)": 77.0, + "step": 3696, + "token_acc": 0.8491010904804008, + "train_speed(iter/s)": 1.562361 + }, + { + "epoch": 1.18304, + "grad_norm": 0.6878219512027604, + "learning_rate": 3.54698970142882e-06, + "loss": 0.35009026527404785, + "memory(GiB)": 77.0, + "step": 3697, + "token_acc": 0.898061424526247, + "train_speed(iter/s)": 1.560221 + }, + { + "epoch": 1.18336, + "grad_norm": 0.6633505059684119, + "learning_rate": 3.546188822609956e-06, + "loss": 0.3064991533756256, + "memory(GiB)": 77.0, + "step": 3698, + "token_acc": 0.9511596180081855, + "train_speed(iter/s)": 1.55804 + }, + { + "epoch": 1.18368, + "grad_norm": 0.6581573834236242, + "learning_rate": 3.545387813611041e-06, + "loss": 0.32516276836395264, + "memory(GiB)": 77.0, + "step": 3699, + "token_acc": 0.9198075380914194, + "train_speed(iter/s)": 1.555895 + }, + { + "epoch": 1.184, + "grad_norm": 0.706832025032165, + "learning_rate": 3.5445866745317474e-06, + "loss": 0.3606421649456024, + "memory(GiB)": 77.0, + "step": 3700, + "token_acc": 0.8559266535690897, + "train_speed(iter/s)": 1.553724 + }, + { + "epoch": 1.18432, + "grad_norm": 0.6608416094173472, + "learning_rate": 3.5437854054717636e-06, + "loss": 0.2961413860321045, + "memory(GiB)": 77.0, + "step": 3701, + "token_acc": 0.954915466499687, + "train_speed(iter/s)": 1.551534 + }, + { + "epoch": 1.18464, + "grad_norm": 0.6291313342672359, + "learning_rate": 3.5429840065307924e-06, + "loss": 0.2627297639846802, + "memory(GiB)": 77.0, + "step": 3702, + "token_acc": 0.9175658178679327, + "train_speed(iter/s)": 1.549306 + }, + { + "epoch": 1.18496, + "grad_norm": 0.5906680900904672, + "learning_rate": 3.5421824778085552e-06, + "loss": 0.3157038688659668, + "memory(GiB)": 77.0, + "step": 3703, + "token_acc": 0.9262572533849129, + "train_speed(iter/s)": 1.547101 + }, + { + "epoch": 1.1852800000000001, + "grad_norm": 0.6581491266910386, + "learning_rate": 3.5413808194047874e-06, + "loss": 0.4096211791038513, + "memory(GiB)": 77.0, + "step": 3704, + "token_acc": 0.807412100095027, + "train_speed(iter/s)": 1.544887 + }, + { + "epoch": 1.1856, + "grad_norm": 0.7360290197967815, + "learning_rate": 3.5405790314192422e-06, + "loss": 0.3967311382293701, + "memory(GiB)": 77.0, + "step": 3705, + "token_acc": 0.9494097807757167, + "train_speed(iter/s)": 1.542771 + }, + { + "epoch": 1.18592, + "grad_norm": 0.571180595477003, + "learning_rate": 3.539777113951688e-06, + "loss": 0.2908862829208374, + "memory(GiB)": 77.0, + "step": 3706, + "token_acc": 0.9195630585898709, + "train_speed(iter/s)": 1.540485 + }, + { + "epoch": 1.18624, + "grad_norm": 0.6436482483813736, + "learning_rate": 3.5389750671019084e-06, + "loss": 0.39806947112083435, + "memory(GiB)": 77.0, + "step": 3707, + "token_acc": 0.8919902912621359, + "train_speed(iter/s)": 1.538284 + }, + { + "epoch": 1.18656, + "grad_norm": 0.674088174486554, + "learning_rate": 3.538172890969706e-06, + "loss": 0.3534397482872009, + "memory(GiB)": 77.0, + "step": 3708, + "token_acc": 0.9141630901287554, + "train_speed(iter/s)": 1.53607 + }, + { + "epoch": 1.18688, + "grad_norm": 0.6495493182403562, + "learning_rate": 3.5373705856548968e-06, + "loss": 0.3483526408672333, + "memory(GiB)": 77.0, + "step": 3709, + "token_acc": 0.9224620303756994, + "train_speed(iter/s)": 1.533837 + }, + { + "epoch": 1.1872, + "grad_norm": 0.6663138360058307, + "learning_rate": 3.536568151257314e-06, + "loss": 0.31123918294906616, + "memory(GiB)": 77.0, + "step": 3710, + "token_acc": 0.9002066725715973, + "train_speed(iter/s)": 1.531708 + }, + { + "epoch": 1.18752, + "grad_norm": 0.6242470305090486, + "learning_rate": 3.5357655878768073e-06, + "loss": 0.3231405019760132, + "memory(GiB)": 77.0, + "step": 3711, + "token_acc": 0.8948571428571429, + "train_speed(iter/s)": 1.529495 + }, + { + "epoch": 1.18784, + "grad_norm": 0.651652390633051, + "learning_rate": 3.5349628956132402e-06, + "loss": 0.36652737855911255, + "memory(GiB)": 77.0, + "step": 3712, + "token_acc": 0.8765701802293828, + "train_speed(iter/s)": 1.527371 + }, + { + "epoch": 1.1881599999999999, + "grad_norm": 0.7597423495764442, + "learning_rate": 3.534160074566495e-06, + "loss": 0.34969788789749146, + "memory(GiB)": 77.0, + "step": 3713, + "token_acc": 0.9110776186887717, + "train_speed(iter/s)": 1.52525 + }, + { + "epoch": 1.18848, + "grad_norm": 0.7631115225567314, + "learning_rate": 3.5333571248364686e-06, + "loss": 0.40546494722366333, + "memory(GiB)": 77.0, + "step": 3714, + "token_acc": 0.939380332285586, + "train_speed(iter/s)": 1.523113 + }, + { + "epoch": 1.1888, + "grad_norm": 0.7061337440140468, + "learning_rate": 3.5325540465230736e-06, + "loss": 0.3960787057876587, + "memory(GiB)": 77.0, + "step": 3715, + "token_acc": 0.8841044434264773, + "train_speed(iter/s)": 1.520978 + }, + { + "epoch": 1.18912, + "grad_norm": 0.611800076007618, + "learning_rate": 3.531750839726239e-06, + "loss": 0.35404688119888306, + "memory(GiB)": 77.0, + "step": 3716, + "token_acc": 0.8294930875576036, + "train_speed(iter/s)": 1.518857 + }, + { + "epoch": 1.18944, + "grad_norm": 0.6898605581323115, + "learning_rate": 3.5309475045459114e-06, + "loss": 0.3900269865989685, + "memory(GiB)": 77.0, + "step": 3717, + "token_acc": 0.8910162002945509, + "train_speed(iter/s)": 1.516607 + }, + { + "epoch": 1.18976, + "grad_norm": 0.6032607897102047, + "learning_rate": 3.5301440410820507e-06, + "loss": 0.34279119968414307, + "memory(GiB)": 77.0, + "step": 3718, + "token_acc": 0.9147715196599362, + "train_speed(iter/s)": 1.514334 + }, + { + "epoch": 1.19008, + "grad_norm": 0.6637361985151438, + "learning_rate": 3.529340449434636e-06, + "loss": 0.253652960062027, + "memory(GiB)": 77.0, + "step": 3719, + "token_acc": 0.9124767225325885, + "train_speed(iter/s)": 1.512353 + }, + { + "epoch": 1.1904, + "grad_norm": 0.6802024550606924, + "learning_rate": 3.5285367297036573e-06, + "loss": 0.45687222480773926, + "memory(GiB)": 77.0, + "step": 3720, + "token_acc": 0.7908022994251437, + "train_speed(iter/s)": 1.51019 + }, + { + "epoch": 1.19072, + "grad_norm": 0.6380661420660582, + "learning_rate": 3.5277328819891255e-06, + "loss": 0.32036054134368896, + "memory(GiB)": 77.0, + "step": 3721, + "token_acc": 0.955607476635514, + "train_speed(iter/s)": 1.50819 + }, + { + "epoch": 1.19104, + "grad_norm": 0.5581234547015226, + "learning_rate": 3.526928906391065e-06, + "loss": 0.273287832736969, + "memory(GiB)": 77.0, + "step": 3722, + "token_acc": 0.9571267393757051, + "train_speed(iter/s)": 1.506175 + }, + { + "epoch": 1.19136, + "grad_norm": 0.7251309652120113, + "learning_rate": 3.5261248030095163e-06, + "loss": 0.4166434705257416, + "memory(GiB)": 77.0, + "step": 3723, + "token_acc": 0.882201646090535, + "train_speed(iter/s)": 1.503981 + }, + { + "epoch": 1.19168, + "grad_norm": 0.6393598562622942, + "learning_rate": 3.525320571944536e-06, + "loss": 0.3747174143791199, + "memory(GiB)": 77.0, + "step": 3724, + "token_acc": 0.8697657913413769, + "train_speed(iter/s)": 1.501698 + }, + { + "epoch": 1.192, + "grad_norm": 0.5908777230680493, + "learning_rate": 3.524516213296198e-06, + "loss": 0.33508461713790894, + "memory(GiB)": 77.0, + "step": 3725, + "token_acc": 0.8873096446700508, + "train_speed(iter/s)": 1.499531 + }, + { + "epoch": 1.19232, + "grad_norm": 0.6873773689666357, + "learning_rate": 3.52371172716459e-06, + "loss": 0.34430187940597534, + "memory(GiB)": 77.0, + "step": 3726, + "token_acc": 0.8262835860601224, + "train_speed(iter/s)": 1.497502 + }, + { + "epoch": 1.19264, + "grad_norm": 0.6406231090955834, + "learning_rate": 3.522907113649816e-06, + "loss": 0.3046707510948181, + "memory(GiB)": 77.0, + "step": 3727, + "token_acc": 0.8671055840039015, + "train_speed(iter/s)": 1.495496 + }, + { + "epoch": 1.19296, + "grad_norm": 0.6302255548528373, + "learning_rate": 3.5221023728519972e-06, + "loss": 0.33121994137763977, + "memory(GiB)": 77.0, + "step": 3728, + "token_acc": 0.9518658734451054, + "train_speed(iter/s)": 1.49351 + }, + { + "epoch": 1.1932800000000001, + "grad_norm": 0.7012541312363026, + "learning_rate": 3.521297504871268e-06, + "loss": 0.29961758852005005, + "memory(GiB)": 77.0, + "step": 3729, + "token_acc": 0.9269676199480028, + "train_speed(iter/s)": 1.491461 + }, + { + "epoch": 1.1936, + "grad_norm": 0.6752471117231961, + "learning_rate": 3.5204925098077813e-06, + "loss": 0.27979549765586853, + "memory(GiB)": 77.0, + "step": 3730, + "token_acc": 0.9344262295081968, + "train_speed(iter/s)": 1.48948 + }, + { + "epoch": 1.19392, + "grad_norm": 0.6420571872720378, + "learning_rate": 3.5196873877617054e-06, + "loss": 0.31404897570610046, + "memory(GiB)": 77.0, + "step": 3731, + "token_acc": 0.9418604651162791, + "train_speed(iter/s)": 1.487534 + }, + { + "epoch": 1.19424, + "grad_norm": 0.6471091983486905, + "learning_rate": 3.518882138833222e-06, + "loss": 0.31717193126678467, + "memory(GiB)": 77.0, + "step": 3732, + "token_acc": 0.9008171315863623, + "train_speed(iter/s)": 1.485562 + }, + { + "epoch": 1.19456, + "grad_norm": 0.7195122173888887, + "learning_rate": 3.5180767631225317e-06, + "loss": 0.3841671645641327, + "memory(GiB)": 77.0, + "step": 3733, + "token_acc": 0.9455799693408278, + "train_speed(iter/s)": 1.483616 + }, + { + "epoch": 1.19488, + "grad_norm": 0.5798514845664451, + "learning_rate": 3.51727126072985e-06, + "loss": 0.34938573837280273, + "memory(GiB)": 77.0, + "step": 3734, + "token_acc": 0.8830882352941176, + "train_speed(iter/s)": 1.481523 + }, + { + "epoch": 1.1952, + "grad_norm": 0.5828475550990215, + "learning_rate": 3.5164656317554057e-06, + "loss": 0.3148007392883301, + "memory(GiB)": 77.0, + "step": 3735, + "token_acc": 0.8596661021050085, + "train_speed(iter/s)": 1.479443 + }, + { + "epoch": 1.19552, + "grad_norm": 0.6601141325594589, + "learning_rate": 3.5156598762994475e-06, + "loss": 0.3737661838531494, + "memory(GiB)": 77.0, + "step": 3736, + "token_acc": 0.93756727664155, + "train_speed(iter/s)": 1.477558 + }, + { + "epoch": 1.19584, + "grad_norm": 0.6975397181946289, + "learning_rate": 3.5148539944622367e-06, + "loss": 0.321427583694458, + "memory(GiB)": 77.0, + "step": 3737, + "token_acc": 0.9229094076655052, + "train_speed(iter/s)": 1.475661 + }, + { + "epoch": 1.19616, + "grad_norm": 0.6455581633702463, + "learning_rate": 3.514047986344051e-06, + "loss": 0.36930084228515625, + "memory(GiB)": 77.0, + "step": 3738, + "token_acc": 0.8929503916449086, + "train_speed(iter/s)": 1.47373 + }, + { + "epoch": 1.19648, + "grad_norm": 0.6167637189763528, + "learning_rate": 3.5132418520451844e-06, + "loss": 0.2788013219833374, + "memory(GiB)": 77.0, + "step": 3739, + "token_acc": 0.9294412607449857, + "train_speed(iter/s)": 1.471726 + }, + { + "epoch": 1.1968, + "grad_norm": 0.6950033093809023, + "learning_rate": 3.512435591665947e-06, + "loss": 0.27969080209732056, + "memory(GiB)": 77.0, + "step": 3740, + "token_acc": 0.9495087336244541, + "train_speed(iter/s)": 1.469847 + }, + { + "epoch": 1.19712, + "grad_norm": 0.6572984367921428, + "learning_rate": 3.511629205306663e-06, + "loss": 0.3700699806213379, + "memory(GiB)": 77.0, + "step": 3741, + "token_acc": 0.8808943089430894, + "train_speed(iter/s)": 1.467891 + }, + { + "epoch": 1.19744, + "grad_norm": 0.6176562993867257, + "learning_rate": 3.5108226930676737e-06, + "loss": 0.35046982765197754, + "memory(GiB)": 77.0, + "step": 3742, + "token_acc": 0.8638443935926774, + "train_speed(iter/s)": 1.465941 + }, + { + "epoch": 1.19776, + "grad_norm": 0.6535275136145395, + "learning_rate": 3.510016055049335e-06, + "loss": 0.32453566789627075, + "memory(GiB)": 77.0, + "step": 3743, + "token_acc": 0.8792729245794899, + "train_speed(iter/s)": 1.464084 + }, + { + "epoch": 1.19808, + "grad_norm": 0.6298844923880648, + "learning_rate": 3.50920929135202e-06, + "loss": 0.3479836881160736, + "memory(GiB)": 77.0, + "step": 3744, + "token_acc": 0.871996303142329, + "train_speed(iter/s)": 1.462012 + }, + { + "epoch": 1.1984, + "grad_norm": 0.6729605033274051, + "learning_rate": 3.5084024020761154e-06, + "loss": 0.34352195262908936, + "memory(GiB)": 77.0, + "step": 3745, + "token_acc": 0.8397593656002188, + "train_speed(iter/s)": 1.460143 + }, + { + "epoch": 1.19872, + "grad_norm": 0.6725037251161722, + "learning_rate": 3.507595387322025e-06, + "loss": 0.35876715183258057, + "memory(GiB)": 77.0, + "step": 3746, + "token_acc": 0.8753985773853323, + "train_speed(iter/s)": 1.458278 + }, + { + "epoch": 1.19904, + "grad_norm": 0.6376376536296529, + "learning_rate": 3.506788247190168e-06, + "loss": 0.3574231266975403, + "memory(GiB)": 77.0, + "step": 3747, + "token_acc": 0.8780732563973909, + "train_speed(iter/s)": 1.456248 + }, + { + "epoch": 1.19936, + "grad_norm": 0.6608968368443375, + "learning_rate": 3.5059809817809787e-06, + "loss": 0.3955690562725067, + "memory(GiB)": 77.0, + "step": 3748, + "token_acc": 0.9216783216783216, + "train_speed(iter/s)": 1.454326 + }, + { + "epoch": 1.19968, + "grad_norm": 0.6512729172782245, + "learning_rate": 3.5051735911949077e-06, + "loss": 0.3733240067958832, + "memory(GiB)": 77.0, + "step": 3749, + "token_acc": 0.8383052313013403, + "train_speed(iter/s)": 1.452393 + }, + { + "epoch": 1.2, + "grad_norm": 0.6361465910946682, + "learning_rate": 3.50436607553242e-06, + "loss": 0.35155826807022095, + "memory(GiB)": 77.0, + "step": 3750, + "token_acc": 0.8693433895297249, + "train_speed(iter/s)": 1.450549 + }, + { + "epoch": 1.20032, + "grad_norm": 0.6000424444642704, + "learning_rate": 3.503558434893997e-06, + "loss": 0.3047623634338379, + "memory(GiB)": 77.0, + "step": 3751, + "token_acc": 0.9174041297935103, + "train_speed(iter/s)": 1.448626 + }, + { + "epoch": 1.20064, + "grad_norm": 0.6704713573854794, + "learning_rate": 3.5027506693801368e-06, + "loss": 0.2727794647216797, + "memory(GiB)": 77.0, + "step": 3752, + "token_acc": 0.920265780730897, + "train_speed(iter/s)": 1.44676 + }, + { + "epoch": 1.20096, + "grad_norm": 0.6868413380596013, + "learning_rate": 3.5019427790913505e-06, + "loss": 0.32215631008148193, + "memory(GiB)": 77.0, + "step": 3753, + "token_acc": 0.867595818815331, + "train_speed(iter/s)": 1.444982 + }, + { + "epoch": 1.20128, + "grad_norm": 0.5871616439566463, + "learning_rate": 3.501134764128167e-06, + "loss": 0.28809022903442383, + "memory(GiB)": 77.0, + "step": 3754, + "token_acc": 0.9013093609536837, + "train_speed(iter/s)": 1.443014 + }, + { + "epoch": 1.2016, + "grad_norm": 0.6331287415607119, + "learning_rate": 3.500326624591129e-06, + "loss": 0.34821388125419617, + "memory(GiB)": 77.0, + "step": 3755, + "token_acc": 0.9333793579565067, + "train_speed(iter/s)": 1.441045 + }, + { + "epoch": 1.2019199999999999, + "grad_norm": 0.6450326061698312, + "learning_rate": 3.4995183605807946e-06, + "loss": 0.3762490451335907, + "memory(GiB)": 77.0, + "step": 3756, + "token_acc": 0.8242251223491027, + "train_speed(iter/s)": 1.439099 + }, + { + "epoch": 1.20224, + "grad_norm": 0.6345777201440916, + "learning_rate": 3.49870997219774e-06, + "loss": 0.3083510994911194, + "memory(GiB)": 77.0, + "step": 3757, + "token_acc": 0.901525136509132, + "train_speed(iter/s)": 1.437251 + }, + { + "epoch": 1.20256, + "grad_norm": 0.5886933757898383, + "learning_rate": 3.497901459542554e-06, + "loss": 0.30771404504776, + "memory(GiB)": 77.0, + "step": 3758, + "token_acc": 0.927263479145473, + "train_speed(iter/s)": 1.435368 + }, + { + "epoch": 1.20288, + "grad_norm": 0.6015850377530809, + "learning_rate": 3.4970928227158427e-06, + "loss": 0.26777786016464233, + "memory(GiB)": 77.0, + "step": 3759, + "token_acc": 0.9143192488262911, + "train_speed(iter/s)": 1.433527 + }, + { + "epoch": 1.2032, + "grad_norm": 0.5832834559871178, + "learning_rate": 3.496284061818227e-06, + "loss": 0.32170236110687256, + "memory(GiB)": 77.0, + "step": 3760, + "token_acc": 0.9377256317689531, + "train_speed(iter/s)": 1.431613 + }, + { + "epoch": 1.20352, + "grad_norm": 0.6146623964965197, + "learning_rate": 3.4954751769503426e-06, + "loss": 0.36508461833000183, + "memory(GiB)": 77.0, + "step": 3761, + "token_acc": 0.9171319424037614, + "train_speed(iter/s)": 1.429771 + }, + { + "epoch": 1.20384, + "grad_norm": 0.6485055517095359, + "learning_rate": 3.4946661682128414e-06, + "loss": 0.2970033884048462, + "memory(GiB)": 77.0, + "step": 3762, + "token_acc": 0.9475940924249643, + "train_speed(iter/s)": 1.427948 + }, + { + "epoch": 1.20416, + "grad_norm": 0.6481461725033854, + "learning_rate": 3.4938570357063906e-06, + "loss": 0.2747670114040375, + "memory(GiB)": 77.0, + "step": 3763, + "token_acc": 0.9298040362679146, + "train_speed(iter/s)": 1.42618 + }, + { + "epoch": 1.20448, + "grad_norm": 0.7676328738382249, + "learning_rate": 3.493047779531671e-06, + "loss": 0.3966876268386841, + "memory(GiB)": 77.0, + "step": 3764, + "token_acc": 0.868716577540107, + "train_speed(iter/s)": 1.424284 + }, + { + "epoch": 1.2048, + "grad_norm": 0.6418682898390575, + "learning_rate": 3.4922383997893837e-06, + "loss": 0.34623777866363525, + "memory(GiB)": 77.0, + "step": 3765, + "token_acc": 0.9224639901930739, + "train_speed(iter/s)": 1.422459 + }, + { + "epoch": 1.20512, + "grad_norm": 0.7140378605397618, + "learning_rate": 3.49142889658024e-06, + "loss": 0.3464665114879608, + "memory(GiB)": 77.0, + "step": 3766, + "token_acc": 0.9355679702048417, + "train_speed(iter/s)": 1.420758 + }, + { + "epoch": 1.20544, + "grad_norm": 0.7266709218338928, + "learning_rate": 3.4906192700049686e-06, + "loss": 0.3769528865814209, + "memory(GiB)": 77.0, + "step": 3767, + "token_acc": 0.8873873873873874, + "train_speed(iter/s)": 1.419056 + }, + { + "epoch": 1.20576, + "grad_norm": 0.6297881834209762, + "learning_rate": 3.4898095201643132e-06, + "loss": 0.3387204110622406, + "memory(GiB)": 77.0, + "step": 3768, + "token_acc": 0.9025615268709192, + "train_speed(iter/s)": 1.417293 + }, + { + "epoch": 1.20608, + "grad_norm": 0.6775666595459805, + "learning_rate": 3.4889996471590347e-06, + "loss": 0.38138869404792786, + "memory(GiB)": 77.0, + "step": 3769, + "token_acc": 0.9055306427503736, + "train_speed(iter/s)": 1.415568 + }, + { + "epoch": 1.2064, + "grad_norm": 0.6617857191674079, + "learning_rate": 3.4881896510899068e-06, + "loss": 0.33707255125045776, + "memory(GiB)": 77.0, + "step": 3770, + "token_acc": 0.8899506781750924, + "train_speed(iter/s)": 1.413837 + }, + { + "epoch": 1.20672, + "grad_norm": 0.6618236448727498, + "learning_rate": 3.4873795320577187e-06, + "loss": 0.31381022930145264, + "memory(GiB)": 77.0, + "step": 3771, + "token_acc": 0.9496644295302014, + "train_speed(iter/s)": 1.412046 + }, + { + "epoch": 1.2070400000000001, + "grad_norm": 0.6322772414390604, + "learning_rate": 3.4865692901632757e-06, + "loss": 0.32736337184906006, + "memory(GiB)": 77.0, + "step": 3772, + "token_acc": 0.9038118988243676, + "train_speed(iter/s)": 1.410334 + }, + { + "epoch": 1.20736, + "grad_norm": 0.7701408468227479, + "learning_rate": 3.4857589255074e-06, + "loss": 0.276869535446167, + "memory(GiB)": 77.0, + "step": 3773, + "token_acc": 0.9480978260869565, + "train_speed(iter/s)": 1.408466 + }, + { + "epoch": 1.20768, + "grad_norm": 0.6790303236721309, + "learning_rate": 3.484948438190926e-06, + "loss": 0.3844936490058899, + "memory(GiB)": 77.0, + "step": 3774, + "token_acc": 0.9304915514592934, + "train_speed(iter/s)": 1.40662 + }, + { + "epoch": 1.208, + "grad_norm": 0.648150468267155, + "learning_rate": 3.4841378283147047e-06, + "loss": 0.3979160189628601, + "memory(GiB)": 77.0, + "step": 3775, + "token_acc": 0.9012476771967083, + "train_speed(iter/s)": 1.404769 + }, + { + "epoch": 1.20832, + "grad_norm": 0.621929496698341, + "learning_rate": 3.4833270959796032e-06, + "loss": 0.364507257938385, + "memory(GiB)": 77.0, + "step": 3776, + "token_acc": 0.9560236511456024, + "train_speed(iter/s)": 1.402895 + }, + { + "epoch": 1.20864, + "grad_norm": 0.964793596360767, + "learning_rate": 3.482516241286504e-06, + "loss": 0.3117217421531677, + "memory(GiB)": 77.0, + "step": 3777, + "token_acc": 0.9138932910723719, + "train_speed(iter/s)": 1.401093 + }, + { + "epoch": 1.20896, + "grad_norm": 0.6665258789976418, + "learning_rate": 3.4817052643363014e-06, + "loss": 0.3403550684452057, + "memory(GiB)": 77.0, + "step": 3778, + "token_acc": 0.942314335060449, + "train_speed(iter/s)": 1.399398 + }, + { + "epoch": 1.20928, + "grad_norm": 0.6555375783125961, + "learning_rate": 3.4808941652299088e-06, + "loss": 0.3041994571685791, + "memory(GiB)": 77.0, + "step": 3779, + "token_acc": 0.9273638071383845, + "train_speed(iter/s)": 1.397663 + }, + { + "epoch": 1.2096, + "grad_norm": 0.6117184404608512, + "learning_rate": 3.480082944068254e-06, + "loss": 0.2702394723892212, + "memory(GiB)": 77.0, + "step": 3780, + "token_acc": 0.9356574637342069, + "train_speed(iter/s)": 1.395797 + }, + { + "epoch": 1.2099199999999999, + "grad_norm": 0.6469207620679545, + "learning_rate": 3.4792716009522774e-06, + "loss": 0.327369749546051, + "memory(GiB)": 77.0, + "step": 3781, + "token_acc": 0.8495526748219828, + "train_speed(iter/s)": 1.394114 + }, + { + "epoch": 1.21024, + "grad_norm": 0.5789687295452183, + "learning_rate": 3.4784601359829383e-06, + "loss": 0.3067750334739685, + "memory(GiB)": 77.0, + "step": 3782, + "token_acc": 0.8817377312952535, + "train_speed(iter/s)": 1.39232 + }, + { + "epoch": 1.21056, + "grad_norm": 0.6564756018449059, + "learning_rate": 3.4776485492612095e-06, + "loss": 0.3454298973083496, + "memory(GiB)": 77.0, + "step": 3783, + "token_acc": 0.8814352574102964, + "train_speed(iter/s)": 1.390582 + }, + { + "epoch": 1.21088, + "grad_norm": 0.63707025082739, + "learning_rate": 3.4768368408880786e-06, + "loss": 0.37730610370635986, + "memory(GiB)": 77.0, + "step": 3784, + "token_acc": 0.9009009009009009, + "train_speed(iter/s)": 1.388816 + }, + { + "epoch": 1.2112, + "grad_norm": 0.657298134595476, + "learning_rate": 3.4760250109645483e-06, + "loss": 0.39103835821151733, + "memory(GiB)": 77.0, + "step": 3785, + "token_acc": 0.8504218040233614, + "train_speed(iter/s)": 1.387109 + }, + { + "epoch": 1.21152, + "grad_norm": 0.7420026643397052, + "learning_rate": 3.4752130595916367e-06, + "loss": 0.3377802073955536, + "memory(GiB)": 77.0, + "step": 3786, + "token_acc": 0.9492753623188406, + "train_speed(iter/s)": 1.385371 + }, + { + "epoch": 1.21184, + "grad_norm": 0.6125103008502648, + "learning_rate": 3.474400986870377e-06, + "loss": 0.36462533473968506, + "memory(GiB)": 77.0, + "step": 3787, + "token_acc": 0.8957837837837838, + "train_speed(iter/s)": 1.383603 + }, + { + "epoch": 1.21216, + "grad_norm": 0.6248174767743304, + "learning_rate": 3.4735887929018187e-06, + "loss": 0.29280614852905273, + "memory(GiB)": 77.0, + "step": 3788, + "token_acc": 0.9484536082474226, + "train_speed(iter/s)": 1.381985 + }, + { + "epoch": 1.21248, + "grad_norm": 0.5960010389763659, + "learning_rate": 3.4727764777870225e-06, + "loss": 0.2665179371833801, + "memory(GiB)": 77.0, + "step": 3789, + "token_acc": 0.8925403225806452, + "train_speed(iter/s)": 1.380148 + }, + { + "epoch": 1.2128, + "grad_norm": 0.6822756443615067, + "learning_rate": 3.4719640416270706e-06, + "loss": 0.3613666892051697, + "memory(GiB)": 77.0, + "step": 3790, + "token_acc": 0.944206008583691, + "train_speed(iter/s)": 1.378496 + }, + { + "epoch": 1.21312, + "grad_norm": 0.6758475296632346, + "learning_rate": 3.4711514845230544e-06, + "loss": 0.34297189116477966, + "memory(GiB)": 77.0, + "step": 3791, + "token_acc": 0.9216400911161731, + "train_speed(iter/s)": 1.376799 + }, + { + "epoch": 1.21344, + "grad_norm": 0.6067375757410253, + "learning_rate": 3.4703388065760828e-06, + "loss": 0.2568587362766266, + "memory(GiB)": 77.0, + "step": 3792, + "token_acc": 0.915325575637534, + "train_speed(iter/s)": 1.375179 + }, + { + "epoch": 1.21376, + "grad_norm": 0.6604460453377815, + "learning_rate": 3.4695260078872793e-06, + "loss": 0.36904144287109375, + "memory(GiB)": 77.0, + "step": 3793, + "token_acc": 0.8798539661466976, + "train_speed(iter/s)": 1.373511 + }, + { + "epoch": 1.21408, + "grad_norm": 0.623230417933229, + "learning_rate": 3.4687130885577834e-06, + "loss": 0.30895131826400757, + "memory(GiB)": 77.0, + "step": 3794, + "token_acc": 0.9381368267831149, + "train_speed(iter/s)": 1.371891 + }, + { + "epoch": 1.2144, + "grad_norm": 0.6342256753612792, + "learning_rate": 3.4679000486887475e-06, + "loss": 0.3703957200050354, + "memory(GiB)": 77.0, + "step": 3795, + "token_acc": 0.7933269780743565, + "train_speed(iter/s)": 1.370154 + }, + { + "epoch": 1.21472, + "grad_norm": 0.6668392564848781, + "learning_rate": 3.467086888381342e-06, + "loss": 0.33330270648002625, + "memory(GiB)": 77.0, + "step": 3796, + "token_acc": 0.881860465116279, + "train_speed(iter/s)": 1.368561 + }, + { + "epoch": 1.2150400000000001, + "grad_norm": 0.5729384214513131, + "learning_rate": 3.466273607736749e-06, + "loss": 0.31555044651031494, + "memory(GiB)": 77.0, + "step": 3797, + "token_acc": 0.9122920717217542, + "train_speed(iter/s)": 1.366782 + }, + { + "epoch": 1.21536, + "grad_norm": 0.5610982718224439, + "learning_rate": 3.465460206856168e-06, + "loss": 0.33434340357780457, + "memory(GiB)": 77.0, + "step": 3798, + "token_acc": 0.9311780553514573, + "train_speed(iter/s)": 1.365067 + }, + { + "epoch": 1.21568, + "grad_norm": 0.5810435929746489, + "learning_rate": 3.464646685840813e-06, + "loss": 0.33887648582458496, + "memory(GiB)": 77.0, + "step": 3799, + "token_acc": 0.8981385729058945, + "train_speed(iter/s)": 1.363367 + }, + { + "epoch": 1.216, + "grad_norm": 0.642659034838161, + "learning_rate": 3.4638330447919116e-06, + "loss": 0.2942732274532318, + "memory(GiB)": 77.0, + "step": 3800, + "token_acc": 0.8157099697885196, + "train_speed(iter/s)": 1.361771 + }, + { + "epoch": 1.21632, + "grad_norm": 0.6153711119089099, + "learning_rate": 3.463019283810708e-06, + "loss": 0.33074697852134705, + "memory(GiB)": 77.0, + "step": 3801, + "token_acc": 0.863578947368421, + "train_speed(iter/s)": 1.359957 + }, + { + "epoch": 1.21664, + "grad_norm": 0.6143585378488262, + "learning_rate": 3.4622054029984595e-06, + "loss": 0.30806466937065125, + "memory(GiB)": 77.0, + "step": 3802, + "token_acc": 0.962258876153201, + "train_speed(iter/s)": 1.35832 + }, + { + "epoch": 1.21696, + "grad_norm": 0.7138352464290384, + "learning_rate": 3.4613914024564414e-06, + "loss": 0.37638306617736816, + "memory(GiB)": 77.0, + "step": 3803, + "token_acc": 0.8669675478186116, + "train_speed(iter/s)": 1.356582 + }, + { + "epoch": 1.21728, + "grad_norm": 0.7391573008202814, + "learning_rate": 3.460577282285941e-06, + "loss": 0.3284466564655304, + "memory(GiB)": 77.0, + "step": 3804, + "token_acc": 0.8919262555626192, + "train_speed(iter/s)": 1.354994 + }, + { + "epoch": 1.2176, + "grad_norm": 0.6277470613753017, + "learning_rate": 3.4597630425882604e-06, + "loss": 0.3443412184715271, + "memory(GiB)": 77.0, + "step": 3805, + "token_acc": 0.9330985915492958, + "train_speed(iter/s)": 1.353429 + }, + { + "epoch": 1.21792, + "grad_norm": 0.6320253976880079, + "learning_rate": 3.4589486834647197e-06, + "loss": 0.21481700241565704, + "memory(GiB)": 77.0, + "step": 3806, + "token_acc": 0.950401376146789, + "train_speed(iter/s)": 1.351883 + }, + { + "epoch": 1.21824, + "grad_norm": 0.6360930093758096, + "learning_rate": 3.45813420501665e-06, + "loss": 0.32873058319091797, + "memory(GiB)": 77.0, + "step": 3807, + "token_acc": 0.8419301164725458, + "train_speed(iter/s)": 1.350199 + }, + { + "epoch": 1.21856, + "grad_norm": 0.6060179375189959, + "learning_rate": 3.4573196073453997e-06, + "loss": 0.3154258728027344, + "memory(GiB)": 77.0, + "step": 3808, + "token_acc": 0.9301340860973889, + "train_speed(iter/s)": 1.348579 + }, + { + "epoch": 1.21888, + "grad_norm": 0.5946805965288953, + "learning_rate": 3.456504890552331e-06, + "loss": 0.2334372103214264, + "memory(GiB)": 77.0, + "step": 3809, + "token_acc": 0.8781349346520664, + "train_speed(iter/s)": 1.346988 + }, + { + "epoch": 1.2192, + "grad_norm": 0.6232664618986182, + "learning_rate": 3.455690054738822e-06, + "loss": 0.2637794613838196, + "memory(GiB)": 77.0, + "step": 3810, + "token_acc": 0.9362079149438866, + "train_speed(iter/s)": 1.345486 + }, + { + "epoch": 1.21952, + "grad_norm": 0.7148495514665992, + "learning_rate": 3.4548751000062643e-06, + "loss": 0.3227500319480896, + "memory(GiB)": 77.0, + "step": 3811, + "token_acc": 0.8899737138565528, + "train_speed(iter/s)": 1.343997 + }, + { + "epoch": 1.21984, + "grad_norm": 0.6674674009506745, + "learning_rate": 3.4540600264560647e-06, + "loss": 0.3418687582015991, + "memory(GiB)": 77.0, + "step": 3812, + "token_acc": 0.9164759725400458, + "train_speed(iter/s)": 1.342474 + }, + { + "epoch": 1.22016, + "grad_norm": 0.6325173237993716, + "learning_rate": 3.4532448341896456e-06, + "loss": 0.3036276698112488, + "memory(GiB)": 77.0, + "step": 3813, + "token_acc": 0.9632125807357483, + "train_speed(iter/s)": 1.340991 + }, + { + "epoch": 1.22048, + "grad_norm": 0.6268762752042686, + "learning_rate": 3.452429523308443e-06, + "loss": 0.34062471985816956, + "memory(GiB)": 77.0, + "step": 3814, + "token_acc": 0.8540410132689988, + "train_speed(iter/s)": 1.339387 + }, + { + "epoch": 1.2208, + "grad_norm": 0.6348118956192523, + "learning_rate": 3.4516140939139086e-06, + "loss": 0.31271836161613464, + "memory(GiB)": 77.0, + "step": 3815, + "token_acc": 0.929846539304729, + "train_speed(iter/s)": 1.337857 + }, + { + "epoch": 1.22112, + "grad_norm": 0.6468370762329855, + "learning_rate": 3.4507985461075085e-06, + "loss": 0.2787039875984192, + "memory(GiB)": 77.0, + "step": 3816, + "token_acc": 0.9048404840484049, + "train_speed(iter/s)": 1.336318 + }, + { + "epoch": 1.22144, + "grad_norm": 0.6472333110848486, + "learning_rate": 3.4499828799907226e-06, + "loss": 0.3584495782852173, + "memory(GiB)": 77.0, + "step": 3817, + "token_acc": 0.9278061224489796, + "train_speed(iter/s)": 1.334848 + }, + { + "epoch": 1.22176, + "grad_norm": 0.5914715630601391, + "learning_rate": 3.4491670956650476e-06, + "loss": 0.31067508459091187, + "memory(GiB)": 77.0, + "step": 3818, + "token_acc": 0.8965413533834586, + "train_speed(iter/s)": 1.333189 + }, + { + "epoch": 1.22208, + "grad_norm": 0.6642256722445575, + "learning_rate": 3.4483511932319923e-06, + "loss": 0.34429147839546204, + "memory(GiB)": 77.0, + "step": 3819, + "token_acc": 0.9260515603799185, + "train_speed(iter/s)": 1.331728 + }, + { + "epoch": 1.2224, + "grad_norm": 0.5977461165057284, + "learning_rate": 3.447535172793084e-06, + "loss": 0.32730501890182495, + "memory(GiB)": 77.0, + "step": 3820, + "token_acc": 0.8210560102712245, + "train_speed(iter/s)": 1.33023 + }, + { + "epoch": 1.22272, + "grad_norm": 0.726561465629472, + "learning_rate": 3.446719034449859e-06, + "loss": 0.2826305627822876, + "memory(GiB)": 77.0, + "step": 3821, + "token_acc": 0.9319388073680924, + "train_speed(iter/s)": 1.328704 + }, + { + "epoch": 1.22304, + "grad_norm": 0.7159040228198058, + "learning_rate": 3.445902778303874e-06, + "loss": 0.3507973551750183, + "memory(GiB)": 77.0, + "step": 3822, + "token_acc": 0.9055354993983152, + "train_speed(iter/s)": 1.327239 + }, + { + "epoch": 1.22336, + "grad_norm": 0.6405489401554341, + "learning_rate": 3.4450864044566974e-06, + "loss": 0.3510117828845978, + "memory(GiB)": 77.0, + "step": 3823, + "token_acc": 0.8799568384138117, + "train_speed(iter/s)": 1.325719 + }, + { + "epoch": 1.2236799999999999, + "grad_norm": 0.6536494120001312, + "learning_rate": 3.444269913009912e-06, + "loss": 0.3901655972003937, + "memory(GiB)": 77.0, + "step": 3824, + "token_acc": 0.8658944085542437, + "train_speed(iter/s)": 1.324192 + }, + { + "epoch": 1.224, + "grad_norm": 0.6941220223873542, + "learning_rate": 3.4434533040651175e-06, + "loss": 0.31693774461746216, + "memory(GiB)": 77.0, + "step": 3825, + "token_acc": 0.8894284646992903, + "train_speed(iter/s)": 1.322538 + }, + { + "epoch": 1.22432, + "grad_norm": 0.6980488684327483, + "learning_rate": 3.442636577723925e-06, + "loss": 0.48499858379364014, + "memory(GiB)": 77.0, + "step": 3826, + "token_acc": 0.8294440631434454, + "train_speed(iter/s)": 1.32095 + }, + { + "epoch": 1.22464, + "grad_norm": 0.6197400123411975, + "learning_rate": 3.441819734087963e-06, + "loss": 0.3130158483982086, + "memory(GiB)": 77.0, + "step": 3827, + "token_acc": 0.8838455032421765, + "train_speed(iter/s)": 1.319398 + }, + { + "epoch": 1.22496, + "grad_norm": 0.628620463508951, + "learning_rate": 3.4410027732588736e-06, + "loss": 0.287079781293869, + "memory(GiB)": 77.0, + "step": 3828, + "token_acc": 0.9423199490121096, + "train_speed(iter/s)": 1.317906 + }, + { + "epoch": 1.22528, + "grad_norm": 0.5886062387090142, + "learning_rate": 3.440185695338312e-06, + "loss": 0.25434690713882446, + "memory(GiB)": 77.0, + "step": 3829, + "token_acc": 0.8723357015985791, + "train_speed(iter/s)": 1.316465 + }, + { + "epoch": 1.2256, + "grad_norm": 0.7216858806108211, + "learning_rate": 3.439368500427951e-06, + "loss": 0.32361987233161926, + "memory(GiB)": 77.0, + "step": 3830, + "token_acc": 0.9412568306010929, + "train_speed(iter/s)": 1.315009 + }, + { + "epoch": 1.22592, + "grad_norm": 0.6611359154230259, + "learning_rate": 3.438551188629476e-06, + "loss": 0.3401278853416443, + "memory(GiB)": 77.0, + "step": 3831, + "token_acc": 0.900126422250316, + "train_speed(iter/s)": 1.313417 + }, + { + "epoch": 1.22624, + "grad_norm": 0.6319757393213801, + "learning_rate": 3.4377337600445863e-06, + "loss": 0.2916039824485779, + "memory(GiB)": 77.0, + "step": 3832, + "token_acc": 0.9478527607361963, + "train_speed(iter/s)": 1.31193 + }, + { + "epoch": 1.22656, + "grad_norm": 0.6604543022689819, + "learning_rate": 3.436916214774998e-06, + "loss": 0.3507730960845947, + "memory(GiB)": 77.0, + "step": 3833, + "token_acc": 0.9222819346837536, + "train_speed(iter/s)": 1.310477 + }, + { + "epoch": 1.22688, + "grad_norm": 0.7007660751083402, + "learning_rate": 3.43609855292244e-06, + "loss": 0.3812182545661926, + "memory(GiB)": 77.0, + "step": 3834, + "token_acc": 0.8962113127001067, + "train_speed(iter/s)": 1.308938 + }, + { + "epoch": 1.2272, + "grad_norm": 0.7053014080413393, + "learning_rate": 3.4352807745886556e-06, + "loss": 0.47540146112442017, + "memory(GiB)": 77.0, + "step": 3835, + "token_acc": 0.7989120580235721, + "train_speed(iter/s)": 1.307438 + }, + { + "epoch": 1.22752, + "grad_norm": 0.6261916258110996, + "learning_rate": 3.434462879875404e-06, + "loss": 0.30473995208740234, + "memory(GiB)": 77.0, + "step": 3836, + "token_acc": 0.900868523510033, + "train_speed(iter/s)": 1.305897 + }, + { + "epoch": 1.22784, + "grad_norm": 0.6041676029870582, + "learning_rate": 3.433644868884457e-06, + "loss": 0.3019428849220276, + "memory(GiB)": 77.0, + "step": 3837, + "token_acc": 0.8773400936037441, + "train_speed(iter/s)": 1.304324 + }, + { + "epoch": 1.22816, + "grad_norm": 0.5918529302106225, + "learning_rate": 3.432826741717602e-06, + "loss": 0.30556154251098633, + "memory(GiB)": 77.0, + "step": 3838, + "token_acc": 0.8996970716930327, + "train_speed(iter/s)": 1.302892 + }, + { + "epoch": 1.22848, + "grad_norm": 0.6863153668074278, + "learning_rate": 3.432008498476641e-06, + "loss": 0.3883437514305115, + "memory(GiB)": 77.0, + "step": 3839, + "token_acc": 0.8302231237322515, + "train_speed(iter/s)": 1.301478 + }, + { + "epoch": 1.2288000000000001, + "grad_norm": 0.6305316460345048, + "learning_rate": 3.431190139263391e-06, + "loss": 0.36305463314056396, + "memory(GiB)": 77.0, + "step": 3840, + "token_acc": 0.9238052299368801, + "train_speed(iter/s)": 1.300044 + }, + { + "epoch": 1.22912, + "grad_norm": 0.6012787763073398, + "learning_rate": 3.430371664179682e-06, + "loss": 0.32942453026771545, + "memory(GiB)": 77.0, + "step": 3841, + "token_acc": 0.9482915717539864, + "train_speed(iter/s)": 1.298599 + }, + { + "epoch": 1.22944, + "grad_norm": 0.6756898126153201, + "learning_rate": 3.4295530733273593e-06, + "loss": 0.3579546809196472, + "memory(GiB)": 77.0, + "step": 3842, + "token_acc": 0.8627962085308057, + "train_speed(iter/s)": 1.297094 + }, + { + "epoch": 1.22976, + "grad_norm": 0.6920391934545972, + "learning_rate": 3.428734366808281e-06, + "loss": 0.3265370726585388, + "memory(GiB)": 77.0, + "step": 3843, + "token_acc": 0.8895155938951559, + "train_speed(iter/s)": 1.295656 + }, + { + "epoch": 1.23008, + "grad_norm": 0.6804554335058307, + "learning_rate": 3.4279155447243223e-06, + "loss": 0.3524029850959778, + "memory(GiB)": 77.0, + "step": 3844, + "token_acc": 0.9227974568574023, + "train_speed(iter/s)": 1.294276 + }, + { + "epoch": 1.2304, + "grad_norm": 0.6369296219842873, + "learning_rate": 3.427096607177371e-06, + "loss": 0.2869453430175781, + "memory(GiB)": 77.0, + "step": 3845, + "token_acc": 0.9317507418397626, + "train_speed(iter/s)": 1.292926 + }, + { + "epoch": 1.23072, + "grad_norm": 0.637993584662995, + "learning_rate": 3.4262775542693288e-06, + "loss": 0.3712614178657532, + "memory(GiB)": 77.0, + "step": 3846, + "token_acc": 0.8667397861255827, + "train_speed(iter/s)": 1.291464 + }, + { + "epoch": 1.23104, + "grad_norm": 0.6702343792957403, + "learning_rate": 3.425458386102114e-06, + "loss": 0.3081596791744232, + "memory(GiB)": 77.0, + "step": 3847, + "token_acc": 0.8863636363636364, + "train_speed(iter/s)": 1.290131 + }, + { + "epoch": 1.23136, + "grad_norm": 0.5716864785706284, + "learning_rate": 3.4246391027776584e-06, + "loss": 0.292092502117157, + "memory(GiB)": 77.0, + "step": 3848, + "token_acc": 0.8760633861551292, + "train_speed(iter/s)": 1.288647 + }, + { + "epoch": 1.2316799999999999, + "grad_norm": 0.6468778856390793, + "learning_rate": 3.423819704397906e-06, + "loss": 0.33698466420173645, + "memory(GiB)": 77.0, + "step": 3849, + "token_acc": 0.9148124602670057, + "train_speed(iter/s)": 1.287224 + }, + { + "epoch": 1.232, + "grad_norm": 0.7016440764811818, + "learning_rate": 3.423000191064817e-06, + "loss": 0.3410181701183319, + "memory(GiB)": 77.0, + "step": 3850, + "token_acc": 0.8907644080057873, + "train_speed(iter/s)": 1.285774 + }, + { + "epoch": 1.23232, + "grad_norm": 0.6272662680113801, + "learning_rate": 3.4221805628803654e-06, + "loss": 0.33505702018737793, + "memory(GiB)": 77.0, + "step": 3851, + "token_acc": 0.8989986187845304, + "train_speed(iter/s)": 1.284363 + }, + { + "epoch": 1.23264, + "grad_norm": 0.7238494800374031, + "learning_rate": 3.421360819946541e-06, + "loss": 0.47472676634788513, + "memory(GiB)": 77.0, + "step": 3852, + "token_acc": 0.7912621359223301, + "train_speed(iter/s)": 1.282928 + }, + { + "epoch": 1.23296, + "grad_norm": 0.6230512621345933, + "learning_rate": 3.420540962365345e-06, + "loss": 0.3304237425327301, + "memory(GiB)": 77.0, + "step": 3853, + "token_acc": 0.8400447427293065, + "train_speed(iter/s)": 1.281521 + }, + { + "epoch": 1.23328, + "grad_norm": 0.634286200441787, + "learning_rate": 3.4197209902387957e-06, + "loss": 0.2998504042625427, + "memory(GiB)": 77.0, + "step": 3854, + "token_acc": 0.9114145658263305, + "train_speed(iter/s)": 1.280177 + }, + { + "epoch": 1.2336, + "grad_norm": 0.7231299773548658, + "learning_rate": 3.4189009036689235e-06, + "loss": 0.3420257270336151, + "memory(GiB)": 77.0, + "step": 3855, + "token_acc": 0.8494837172359015, + "train_speed(iter/s)": 1.278843 + }, + { + "epoch": 1.23392, + "grad_norm": 0.5821534647913612, + "learning_rate": 3.4180807027577756e-06, + "loss": 0.3085724115371704, + "memory(GiB)": 77.0, + "step": 3856, + "token_acc": 0.8970239662891757, + "train_speed(iter/s)": 1.277403 + }, + { + "epoch": 1.23424, + "grad_norm": 0.6805410940665232, + "learning_rate": 3.41726038760741e-06, + "loss": 0.31527379155158997, + "memory(GiB)": 77.0, + "step": 3857, + "token_acc": 0.9283703472005005, + "train_speed(iter/s)": 1.276091 + }, + { + "epoch": 1.23456, + "grad_norm": 0.6748316469913098, + "learning_rate": 3.416439958319901e-06, + "loss": 0.42893916368484497, + "memory(GiB)": 77.0, + "step": 3858, + "token_acc": 0.9075293444030919, + "train_speed(iter/s)": 1.274746 + }, + { + "epoch": 1.23488, + "grad_norm": 0.6838672547615197, + "learning_rate": 3.415619414997337e-06, + "loss": 0.4065627455711365, + "memory(GiB)": 77.0, + "step": 3859, + "token_acc": 0.9120814568239671, + "train_speed(iter/s)": 1.273377 + }, + { + "epoch": 1.2352, + "grad_norm": 0.6710168243558113, + "learning_rate": 3.414798757741821e-06, + "loss": 0.36716288328170776, + "memory(GiB)": 77.0, + "step": 3860, + "token_acc": 0.9251592356687898, + "train_speed(iter/s)": 1.272019 + }, + { + "epoch": 1.23552, + "grad_norm": 0.6887105807361984, + "learning_rate": 3.413977986655468e-06, + "loss": 0.4283860921859741, + "memory(GiB)": 77.0, + "step": 3861, + "token_acc": 0.9078111587982832, + "train_speed(iter/s)": 1.270622 + }, + { + "epoch": 1.23584, + "grad_norm": 0.6097788233895057, + "learning_rate": 3.4131571018404103e-06, + "loss": 0.30691322684288025, + "memory(GiB)": 77.0, + "step": 3862, + "token_acc": 0.8902812587023113, + "train_speed(iter/s)": 1.269297 + }, + { + "epoch": 1.23616, + "grad_norm": 0.6118969561902793, + "learning_rate": 3.412336103398792e-06, + "loss": 0.2718590199947357, + "memory(GiB)": 77.0, + "step": 3863, + "token_acc": 0.8834214002642008, + "train_speed(iter/s)": 1.267879 + }, + { + "epoch": 1.23648, + "grad_norm": 0.8140381576131914, + "learning_rate": 3.4115149914327726e-06, + "loss": 0.30811452865600586, + "memory(GiB)": 77.0, + "step": 3864, + "token_acc": 0.8631155156395168, + "train_speed(iter/s)": 1.266568 + }, + { + "epoch": 1.2368000000000001, + "grad_norm": 0.6113931741859644, + "learning_rate": 3.4106937660445246e-06, + "loss": 0.2745397984981537, + "memory(GiB)": 77.0, + "step": 3865, + "token_acc": 0.9123201438848921, + "train_speed(iter/s)": 1.265197 + }, + { + "epoch": 1.23712, + "grad_norm": 0.715683306292755, + "learning_rate": 3.4098724273362354e-06, + "loss": 0.4617146849632263, + "memory(GiB)": 77.0, + "step": 3866, + "token_acc": 0.8682133460894523, + "train_speed(iter/s)": 1.263777 + }, + { + "epoch": 1.23744, + "grad_norm": 0.6720345499151769, + "learning_rate": 3.409050975410107e-06, + "loss": 0.33627596497535706, + "memory(GiB)": 77.0, + "step": 3867, + "token_acc": 0.9581005586592178, + "train_speed(iter/s)": 1.262439 + }, + { + "epoch": 1.23776, + "grad_norm": 0.5987015461832168, + "learning_rate": 3.4082294103683543e-06, + "loss": 0.33245545625686646, + "memory(GiB)": 77.0, + "step": 3868, + "token_acc": 0.8910692831397677, + "train_speed(iter/s)": 1.261042 + }, + { + "epoch": 1.23808, + "grad_norm": 0.7206974316494376, + "learning_rate": 3.4074077323132072e-06, + "loss": 0.34347039461135864, + "memory(GiB)": 77.0, + "step": 3869, + "token_acc": 0.9475982532751092, + "train_speed(iter/s)": 1.259761 + }, + { + "epoch": 1.2384, + "grad_norm": 0.5958893006243187, + "learning_rate": 3.406585941346908e-06, + "loss": 0.31156444549560547, + "memory(GiB)": 77.0, + "step": 3870, + "token_acc": 0.8956849602010892, + "train_speed(iter/s)": 1.25849 + }, + { + "epoch": 1.23872, + "grad_norm": 0.6761428825931328, + "learning_rate": 3.405764037571716e-06, + "loss": 0.32030555605888367, + "memory(GiB)": 77.0, + "step": 3871, + "token_acc": 0.9241830065359478, + "train_speed(iter/s)": 1.257219 + }, + { + "epoch": 1.23904, + "grad_norm": 0.610330408320769, + "learning_rate": 3.4049420210899025e-06, + "loss": 0.30884242057800293, + "memory(GiB)": 77.0, + "step": 3872, + "token_acc": 0.9413854351687388, + "train_speed(iter/s)": 1.255916 + }, + { + "epoch": 1.23936, + "grad_norm": 0.6492444640600347, + "learning_rate": 3.4041198920037522e-06, + "loss": 0.36352449655532837, + "memory(GiB)": 77.0, + "step": 3873, + "token_acc": 0.8950596252129472, + "train_speed(iter/s)": 1.254647 + }, + { + "epoch": 1.23968, + "grad_norm": 0.7443059735452946, + "learning_rate": 3.4032976504155656e-06, + "loss": 0.43584316968917847, + "memory(GiB)": 77.0, + "step": 3874, + "token_acc": 0.8615629984051036, + "train_speed(iter/s)": 1.253337 + }, + { + "epoch": 1.24, + "grad_norm": 0.6483997660112504, + "learning_rate": 3.402475296427657e-06, + "loss": 0.3467116951942444, + "memory(GiB)": 77.0, + "step": 3875, + "token_acc": 0.8641114982578397, + "train_speed(iter/s)": 1.252077 + }, + { + "epoch": 1.24032, + "grad_norm": 0.6871775453189525, + "learning_rate": 3.4016528301423525e-06, + "loss": 0.36815863847732544, + "memory(GiB)": 77.0, + "step": 3876, + "token_acc": 0.9327398615232443, + "train_speed(iter/s)": 1.250766 + }, + { + "epoch": 1.24064, + "grad_norm": 0.6348373772735387, + "learning_rate": 3.400830251661995e-06, + "loss": 0.31200361251831055, + "memory(GiB)": 77.0, + "step": 3877, + "token_acc": 0.9209270620313565, + "train_speed(iter/s)": 1.249449 + }, + { + "epoch": 1.24096, + "grad_norm": 0.6006634136456955, + "learning_rate": 3.40000756108894e-06, + "loss": 0.2489660233259201, + "memory(GiB)": 77.0, + "step": 3878, + "token_acc": 0.8973676427092576, + "train_speed(iter/s)": 1.248178 + }, + { + "epoch": 1.24128, + "grad_norm": 0.6592177920564714, + "learning_rate": 3.399184758525557e-06, + "loss": 0.3644829988479614, + "memory(GiB)": 77.0, + "step": 3879, + "token_acc": 0.9395029431000654, + "train_speed(iter/s)": 1.246872 + }, + { + "epoch": 1.2416, + "grad_norm": 0.678905655604785, + "learning_rate": 3.3983618440742285e-06, + "loss": 0.31679457426071167, + "memory(GiB)": 77.0, + "step": 3880, + "token_acc": 0.9281437125748503, + "train_speed(iter/s)": 1.24555 + }, + { + "epoch": 1.24192, + "grad_norm": 0.6033335270243542, + "learning_rate": 3.3975388178373524e-06, + "loss": 0.30531471967697144, + "memory(GiB)": 77.0, + "step": 3881, + "token_acc": 0.8752025931928687, + "train_speed(iter/s)": 1.244258 + }, + { + "epoch": 1.24224, + "grad_norm": 0.7085929911236258, + "learning_rate": 3.3967156799173416e-06, + "loss": 0.3807566165924072, + "memory(GiB)": 77.0, + "step": 3882, + "token_acc": 0.9155888359428183, + "train_speed(iter/s)": 1.242925 + }, + { + "epoch": 1.24256, + "grad_norm": 0.6614716569514256, + "learning_rate": 3.3958924304166197e-06, + "loss": 0.36073386669158936, + "memory(GiB)": 77.0, + "step": 3883, + "token_acc": 0.907250755287009, + "train_speed(iter/s)": 1.241643 + }, + { + "epoch": 1.24288, + "grad_norm": 0.6284627509807841, + "learning_rate": 3.3950690694376258e-06, + "loss": 0.41543257236480713, + "memory(GiB)": 77.0, + "step": 3884, + "token_acc": 0.8841978287092883, + "train_speed(iter/s)": 1.240352 + }, + { + "epoch": 1.2432, + "grad_norm": 0.6873678855727852, + "learning_rate": 3.3942455970828146e-06, + "loss": 0.34062492847442627, + "memory(GiB)": 77.0, + "step": 3885, + "token_acc": 0.9188, + "train_speed(iter/s)": 1.239073 + }, + { + "epoch": 1.24352, + "grad_norm": 0.6606862164042094, + "learning_rate": 3.39342201345465e-06, + "loss": 0.37633782625198364, + "memory(GiB)": 77.0, + "step": 3886, + "token_acc": 0.8474499089253188, + "train_speed(iter/s)": 1.237808 + }, + { + "epoch": 1.24384, + "grad_norm": 0.7102569837794623, + "learning_rate": 3.3925983186556153e-06, + "loss": 0.3408900499343872, + "memory(GiB)": 77.0, + "step": 3887, + "token_acc": 0.9433831191385897, + "train_speed(iter/s)": 1.236498 + }, + { + "epoch": 1.24416, + "grad_norm": 0.6556133662470534, + "learning_rate": 3.391774512788204e-06, + "loss": 0.35581767559051514, + "memory(GiB)": 77.0, + "step": 3888, + "token_acc": 0.9314051702073202, + "train_speed(iter/s)": 1.235263 + }, + { + "epoch": 1.24448, + "grad_norm": 0.7140596047420663, + "learning_rate": 3.3909505959549245e-06, + "loss": 0.42325499653816223, + "memory(GiB)": 77.0, + "step": 3889, + "token_acc": 0.8680458970792768, + "train_speed(iter/s)": 1.233909 + }, + { + "epoch": 1.2448, + "grad_norm": 0.5730737596100275, + "learning_rate": 3.390126568258299e-06, + "loss": 0.32412874698638916, + "memory(GiB)": 77.0, + "step": 3890, + "token_acc": 0.8486556110127194, + "train_speed(iter/s)": 1.232598 + }, + { + "epoch": 1.24512, + "grad_norm": 0.6390674864946535, + "learning_rate": 3.389302429800864e-06, + "loss": 0.30773648619651794, + "memory(GiB)": 77.0, + "step": 3891, + "token_acc": 0.881638846737481, + "train_speed(iter/s)": 1.231341 + }, + { + "epoch": 1.2454399999999999, + "grad_norm": 0.6330520197073215, + "learning_rate": 3.3884781806851686e-06, + "loss": 0.31857582926750183, + "memory(GiB)": 77.0, + "step": 3892, + "token_acc": 0.9335375191424196, + "train_speed(iter/s)": 1.230074 + }, + { + "epoch": 1.24576, + "grad_norm": 0.6478841752740193, + "learning_rate": 3.3876538210137756e-06, + "loss": 0.35209035873413086, + "memory(GiB)": 77.0, + "step": 3893, + "token_acc": 0.8561578697099382, + "train_speed(iter/s)": 1.228831 + }, + { + "epoch": 1.24608, + "grad_norm": 0.6409685427867109, + "learning_rate": 3.386829350889263e-06, + "loss": 0.33839738368988037, + "memory(GiB)": 77.0, + "step": 3894, + "token_acc": 0.8638627559490869, + "train_speed(iter/s)": 1.227543 + }, + { + "epoch": 1.2464, + "grad_norm": 0.6058533381871504, + "learning_rate": 3.3860047704142224e-06, + "loss": 0.3238135278224945, + "memory(GiB)": 77.0, + "step": 3895, + "token_acc": 0.9511475409836065, + "train_speed(iter/s)": 1.226263 + }, + { + "epoch": 1.24672, + "grad_norm": 0.6147434310431574, + "learning_rate": 3.385180079691258e-06, + "loss": 0.38075804710388184, + "memory(GiB)": 77.0, + "step": 3896, + "token_acc": 0.8716054746904193, + "train_speed(iter/s)": 1.224992 + }, + { + "epoch": 1.24704, + "grad_norm": 0.6127649891113788, + "learning_rate": 3.384355278822988e-06, + "loss": 0.2944115102291107, + "memory(GiB)": 77.0, + "step": 3897, + "token_acc": 0.9079163730326522, + "train_speed(iter/s)": 1.22374 + }, + { + "epoch": 1.24736, + "grad_norm": 0.6485970490627083, + "learning_rate": 3.3835303679120457e-06, + "loss": 0.3576596975326538, + "memory(GiB)": 77.0, + "step": 3898, + "token_acc": 0.8956081081081081, + "train_speed(iter/s)": 1.22245 + }, + { + "epoch": 1.24768, + "grad_norm": 0.6442747052304735, + "learning_rate": 3.382705347061076e-06, + "loss": 0.3872779309749603, + "memory(GiB)": 77.0, + "step": 3899, + "token_acc": 0.9379465964648364, + "train_speed(iter/s)": 1.221232 + }, + { + "epoch": 1.248, + "grad_norm": 0.7087135231126966, + "learning_rate": 3.3818802163727377e-06, + "loss": 0.4094266891479492, + "memory(GiB)": 77.0, + "step": 3900, + "token_acc": 0.8852758494702229, + "train_speed(iter/s)": 1.219873 + }, + { + "epoch": 1.24832, + "grad_norm": 0.6906106728509552, + "learning_rate": 3.3810549759497047e-06, + "loss": 0.36790403723716736, + "memory(GiB)": 77.0, + "step": 3901, + "token_acc": 0.871495975575909, + "train_speed(iter/s)": 1.218702 + }, + { + "epoch": 1.24864, + "grad_norm": 0.5948804211919989, + "learning_rate": 3.380229625894664e-06, + "loss": 0.24674594402313232, + "memory(GiB)": 77.0, + "step": 3902, + "token_acc": 0.8668466036887089, + "train_speed(iter/s)": 1.217412 + }, + { + "epoch": 1.24896, + "grad_norm": 0.6838903890398006, + "learning_rate": 3.379404166310316e-06, + "loss": 0.33248063921928406, + "memory(GiB)": 77.0, + "step": 3903, + "token_acc": 0.9074290159471023, + "train_speed(iter/s)": 1.216173 + }, + { + "epoch": 1.24928, + "grad_norm": 0.6802921115791783, + "learning_rate": 3.3785785972993757e-06, + "loss": 0.3398609757423401, + "memory(GiB)": 77.0, + "step": 3904, + "token_acc": 0.9306395163856188, + "train_speed(iter/s)": 1.214996 + }, + { + "epoch": 1.2496, + "grad_norm": 0.6172609107304672, + "learning_rate": 3.3777529189645695e-06, + "loss": 0.4033782184123993, + "memory(GiB)": 77.0, + "step": 3905, + "token_acc": 0.916364650125453, + "train_speed(iter/s)": 1.213489 + }, + { + "epoch": 1.24992, + "grad_norm": 0.7212025181323908, + "learning_rate": 3.37692713140864e-06, + "loss": 0.4815841615200043, + "memory(GiB)": 77.0, + "step": 3906, + "token_acc": 0.9216104926033247, + "train_speed(iter/s)": 1.212272 + }, + { + "epoch": 1.25024, + "grad_norm": 0.651920317630241, + "learning_rate": 3.376101234734341e-06, + "loss": 0.39142104983329773, + "memory(GiB)": 77.0, + "step": 3907, + "token_acc": 0.8918145392100744, + "train_speed(iter/s)": 1.211055 + }, + { + "epoch": 1.2505600000000001, + "grad_norm": 0.6928575979650411, + "learning_rate": 3.3752752290444408e-06, + "loss": 0.36188429594039917, + "memory(GiB)": 77.0, + "step": 3908, + "token_acc": 0.8654404646660213, + "train_speed(iter/s)": 1.209874 + }, + { + "epoch": 1.25088, + "grad_norm": 0.6488011111318026, + "learning_rate": 3.3744491144417225e-06, + "loss": 0.4036030173301697, + "memory(GiB)": 77.0, + "step": 3909, + "token_acc": 0.9208860759493671, + "train_speed(iter/s)": 1.208644 + }, + { + "epoch": 1.2511999999999999, + "grad_norm": 0.6715371452074477, + "learning_rate": 3.3736228910289816e-06, + "loss": 0.3274642825126648, + "memory(GiB)": 77.0, + "step": 3910, + "token_acc": 0.9233870967741935, + "train_speed(iter/s)": 1.207369 + }, + { + "epoch": 1.25152, + "grad_norm": 0.6137219857027643, + "learning_rate": 3.372796558909026e-06, + "loss": 0.3262202739715576, + "memory(GiB)": 77.0, + "step": 3911, + "token_acc": 0.887224517906336, + "train_speed(iter/s)": 1.206054 + }, + { + "epoch": 1.25184, + "grad_norm": 0.5884764573753453, + "learning_rate": 3.37197011818468e-06, + "loss": 0.34371957182884216, + "memory(GiB)": 77.0, + "step": 3912, + "token_acc": 0.9009392167286904, + "train_speed(iter/s)": 1.204675 + }, + { + "epoch": 1.25216, + "grad_norm": 0.627354228838583, + "learning_rate": 3.37114356895878e-06, + "loss": 0.2758227586746216, + "memory(GiB)": 77.0, + "step": 3913, + "token_acc": 0.9379148770011714, + "train_speed(iter/s)": 1.203462 + }, + { + "epoch": 1.25248, + "grad_norm": 0.6442857936322051, + "learning_rate": 3.370316911334174e-06, + "loss": 0.3893001079559326, + "memory(GiB)": 77.0, + "step": 3914, + "token_acc": 0.9225199131064447, + "train_speed(iter/s)": 1.20225 + }, + { + "epoch": 1.2528000000000001, + "grad_norm": 0.6463072825054467, + "learning_rate": 3.3694901454137263e-06, + "loss": 0.3045895993709564, + "memory(GiB)": 77.0, + "step": 3915, + "token_acc": 0.9046403712296984, + "train_speed(iter/s)": 1.201012 + }, + { + "epoch": 1.25312, + "grad_norm": 0.7058051494569297, + "learning_rate": 3.3686632713003137e-06, + "loss": 0.36030113697052, + "memory(GiB)": 77.0, + "step": 3916, + "token_acc": 0.8953900709219859, + "train_speed(iter/s)": 1.199837 + }, + { + "epoch": 1.2534399999999999, + "grad_norm": 0.5846413937186242, + "learning_rate": 3.3678362890968256e-06, + "loss": 0.30428487062454224, + "memory(GiB)": 77.0, + "step": 3917, + "token_acc": 0.869150626876214, + "train_speed(iter/s)": 1.198611 + }, + { + "epoch": 1.25376, + "grad_norm": 0.5956703383621621, + "learning_rate": 3.3670091989061667e-06, + "loss": 0.3591436445713043, + "memory(GiB)": 77.0, + "step": 3918, + "token_acc": 0.8301976661109788, + "train_speed(iter/s)": 1.197218 + }, + { + "epoch": 1.25408, + "grad_norm": 0.6970886436537195, + "learning_rate": 3.3661820008312527e-06, + "loss": 0.3400678038597107, + "memory(GiB)": 77.0, + "step": 3919, + "token_acc": 0.9080107320812572, + "train_speed(iter/s)": 1.196093 + }, + { + "epoch": 1.2544, + "grad_norm": 0.6981974744503445, + "learning_rate": 3.3653546949750147e-06, + "loss": 0.30971014499664307, + "memory(GiB)": 77.0, + "step": 3920, + "token_acc": 0.8664915736485007, + "train_speed(iter/s)": 1.194987 + }, + { + "epoch": 1.25472, + "grad_norm": 0.6500367368729446, + "learning_rate": 3.3645272814403973e-06, + "loss": 0.38218095898628235, + "memory(GiB)": 77.0, + "step": 3921, + "token_acc": 0.9312267657992565, + "train_speed(iter/s)": 1.193817 + }, + { + "epoch": 1.25504, + "grad_norm": 0.6634909971714776, + "learning_rate": 3.363699760330357e-06, + "loss": 0.3938519358634949, + "memory(GiB)": 77.0, + "step": 3922, + "token_acc": 0.8575055537924469, + "train_speed(iter/s)": 1.192605 + }, + { + "epoch": 1.25536, + "grad_norm": 0.697038699643941, + "learning_rate": 3.3628721317478634e-06, + "loss": 0.4190281629562378, + "memory(GiB)": 77.0, + "step": 3923, + "token_acc": 0.8931102920328892, + "train_speed(iter/s)": 1.191451 + }, + { + "epoch": 1.25568, + "grad_norm": 0.7298060989088981, + "learning_rate": 3.3620443957959026e-06, + "loss": 0.30948787927627563, + "memory(GiB)": 77.0, + "step": 3924, + "token_acc": 0.9474966170500677, + "train_speed(iter/s)": 1.190352 + }, + { + "epoch": 1.256, + "grad_norm": 0.7139405646723114, + "learning_rate": 3.3612165525774702e-06, + "loss": 0.34068313241004944, + "memory(GiB)": 77.0, + "step": 3925, + "token_acc": 0.9180101335789959, + "train_speed(iter/s)": 1.189194 + }, + { + "epoch": 1.25632, + "grad_norm": 0.6800517581408575, + "learning_rate": 3.3603886021955778e-06, + "loss": 0.38521549105644226, + "memory(GiB)": 77.0, + "step": 3926, + "token_acc": 0.9359521776259607, + "train_speed(iter/s)": 1.188085 + }, + { + "epoch": 1.25664, + "grad_norm": 0.6435038750673191, + "learning_rate": 3.35956054475325e-06, + "loss": 0.3604387044906616, + "memory(GiB)": 77.0, + "step": 3927, + "token_acc": 0.8212276214833759, + "train_speed(iter/s)": 1.186951 + }, + { + "epoch": 1.25696, + "grad_norm": 0.6842188610907509, + "learning_rate": 3.358732380353523e-06, + "loss": 0.39333418011665344, + "memory(GiB)": 77.0, + "step": 3928, + "token_acc": 0.870277353133872, + "train_speed(iter/s)": 1.185743 + }, + { + "epoch": 1.25728, + "grad_norm": 0.6678340912151677, + "learning_rate": 3.3579041090994485e-06, + "loss": 0.38020047545433044, + "memory(GiB)": 77.0, + "step": 3929, + "token_acc": 0.8282647584973166, + "train_speed(iter/s)": 1.184647 + }, + { + "epoch": 1.2576, + "grad_norm": 0.6566254086737978, + "learning_rate": 3.35707573109409e-06, + "loss": 0.38160407543182373, + "memory(GiB)": 77.0, + "step": 3930, + "token_acc": 0.9004417261297996, + "train_speed(iter/s)": 1.183505 + }, + { + "epoch": 1.25792, + "grad_norm": 0.6080816492641322, + "learning_rate": 3.3562472464405243e-06, + "loss": 0.26031017303466797, + "memory(GiB)": 77.0, + "step": 3931, + "token_acc": 0.9084103918978423, + "train_speed(iter/s)": 1.182412 + }, + { + "epoch": 1.25824, + "grad_norm": 0.6427468571666061, + "learning_rate": 3.355418655241843e-06, + "loss": 0.2930710017681122, + "memory(GiB)": 77.0, + "step": 3932, + "token_acc": 0.8663682864450127, + "train_speed(iter/s)": 1.181252 + }, + { + "epoch": 1.2585600000000001, + "grad_norm": 0.670667933658421, + "learning_rate": 3.3545899576011485e-06, + "loss": 0.27161452174186707, + "memory(GiB)": 77.0, + "step": 3933, + "token_acc": 0.9373661670235546, + "train_speed(iter/s)": 1.180169 + }, + { + "epoch": 1.25888, + "grad_norm": 0.7325969533306131, + "learning_rate": 3.3537611536215595e-06, + "loss": 0.4048638939857483, + "memory(GiB)": 77.0, + "step": 3934, + "token_acc": 0.9093198992443325, + "train_speed(iter/s)": 1.179062 + }, + { + "epoch": 1.2591999999999999, + "grad_norm": 0.6586552521727528, + "learning_rate": 3.3529322434062055e-06, + "loss": 0.3438819944858551, + "memory(GiB)": 77.0, + "step": 3935, + "token_acc": 0.9030236992645055, + "train_speed(iter/s)": 1.177987 + }, + { + "epoch": 1.25952, + "grad_norm": 0.7067623851347139, + "learning_rate": 3.3521032270582297e-06, + "loss": 0.2811596691608429, + "memory(GiB)": 77.0, + "step": 3936, + "token_acc": 0.8838694957791468, + "train_speed(iter/s)": 1.176708 + }, + { + "epoch": 1.25984, + "grad_norm": 0.6830720677128685, + "learning_rate": 3.3512741046807897e-06, + "loss": 0.3460581302642822, + "memory(GiB)": 77.0, + "step": 3937, + "token_acc": 0.9121132323897301, + "train_speed(iter/s)": 1.175588 + }, + { + "epoch": 1.26016, + "grad_norm": 0.6650698851218196, + "learning_rate": 3.3504448763770536e-06, + "loss": 0.3304103910923004, + "memory(GiB)": 77.0, + "step": 3938, + "token_acc": 0.8906442347148132, + "train_speed(iter/s)": 1.174484 + }, + { + "epoch": 1.26048, + "grad_norm": 0.667794867520719, + "learning_rate": 3.3496155422502063e-06, + "loss": 0.3703429698944092, + "memory(GiB)": 77.0, + "step": 3939, + "token_acc": 0.9414414414414415, + "train_speed(iter/s)": 1.173325 + }, + { + "epoch": 1.2608, + "grad_norm": 0.6129945519504707, + "learning_rate": 3.3487861024034433e-06, + "loss": 0.31261852383613586, + "memory(GiB)": 77.0, + "step": 3940, + "token_acc": 0.9474916387959866, + "train_speed(iter/s)": 1.172211 + }, + { + "epoch": 1.26112, + "grad_norm": 0.65056888386597, + "learning_rate": 3.347956556939974e-06, + "loss": 0.27189186215400696, + "memory(GiB)": 77.0, + "step": 3941, + "token_acc": 0.9446518778827147, + "train_speed(iter/s)": 1.171103 + }, + { + "epoch": 1.26144, + "grad_norm": 0.6728383830651122, + "learning_rate": 3.347126905963022e-06, + "loss": 0.3346743583679199, + "memory(GiB)": 77.0, + "step": 3942, + "token_acc": 0.9424591156874621, + "train_speed(iter/s)": 1.170033 + }, + { + "epoch": 1.26176, + "grad_norm": 0.7075402862175073, + "learning_rate": 3.346297149575821e-06, + "loss": 0.41477277874946594, + "memory(GiB)": 77.0, + "step": 3943, + "token_acc": 0.8037735849056604, + "train_speed(iter/s)": 1.168956 + }, + { + "epoch": 1.26208, + "grad_norm": 0.6431186918868984, + "learning_rate": 3.3454672878816207e-06, + "loss": 0.3790450394153595, + "memory(GiB)": 77.0, + "step": 3944, + "token_acc": 0.9506531204644412, + "train_speed(iter/s)": 1.167817 + }, + { + "epoch": 1.2624, + "grad_norm": 0.6814585039932206, + "learning_rate": 3.344637320983683e-06, + "loss": 0.31726598739624023, + "memory(GiB)": 77.0, + "step": 3945, + "token_acc": 0.9236939933476814, + "train_speed(iter/s)": 1.16662 + }, + { + "epoch": 1.26272, + "grad_norm": 0.6415093603270662, + "learning_rate": 3.3438072489852837e-06, + "loss": 0.36279094219207764, + "memory(GiB)": 77.0, + "step": 3946, + "token_acc": 0.8879910213243547, + "train_speed(iter/s)": 1.165472 + }, + { + "epoch": 1.26304, + "grad_norm": 0.6700400254595384, + "learning_rate": 3.3429770719897092e-06, + "loss": 0.42554497718811035, + "memory(GiB)": 77.0, + "step": 3947, + "token_acc": 0.9000876424189308, + "train_speed(iter/s)": 1.164364 + }, + { + "epoch": 1.26336, + "grad_norm": 0.6537569983189833, + "learning_rate": 3.3421467901002623e-06, + "loss": 0.3096514046192169, + "memory(GiB)": 77.0, + "step": 3948, + "token_acc": 0.924674605372473, + "train_speed(iter/s)": 1.163256 + }, + { + "epoch": 1.26368, + "grad_norm": 0.6401679135472789, + "learning_rate": 3.341316403420257e-06, + "loss": 0.21542039513587952, + "memory(GiB)": 77.0, + "step": 3949, + "token_acc": 0.9452089704383282, + "train_speed(iter/s)": 1.162209 + }, + { + "epoch": 1.264, + "grad_norm": 0.6525133896921222, + "learning_rate": 3.3404859120530194e-06, + "loss": 0.3568952679634094, + "memory(GiB)": 77.0, + "step": 3950, + "token_acc": 0.879181338028169, + "train_speed(iter/s)": 1.161026 + }, + { + "epoch": 1.26432, + "grad_norm": 0.6813316683133949, + "learning_rate": 3.3396553161018897e-06, + "loss": 0.3022121787071228, + "memory(GiB)": 77.0, + "step": 3951, + "token_acc": 0.9349363507779349, + "train_speed(iter/s)": 1.159958 + }, + { + "epoch": 1.26464, + "grad_norm": 0.6807650857134679, + "learning_rate": 3.338824615670222e-06, + "loss": 0.23641644418239594, + "memory(GiB)": 77.0, + "step": 3952, + "token_acc": 0.8731884057971014, + "train_speed(iter/s)": 1.15891 + }, + { + "epoch": 1.2649599999999999, + "grad_norm": 0.6936063435712402, + "learning_rate": 3.3379938108613824e-06, + "loss": 0.3820550739765167, + "memory(GiB)": 77.0, + "step": 3953, + "token_acc": 0.8333333333333334, + "train_speed(iter/s)": 1.157876 + }, + { + "epoch": 1.26528, + "grad_norm": 0.6565003173824329, + "learning_rate": 3.33716290177875e-06, + "loss": 0.34699147939682007, + "memory(GiB)": 77.0, + "step": 3954, + "token_acc": 0.8895866038723181, + "train_speed(iter/s)": 1.15676 + }, + { + "epoch": 1.2656, + "grad_norm": 0.677285712604112, + "learning_rate": 3.336331888525717e-06, + "loss": 0.3453511595726013, + "memory(GiB)": 77.0, + "step": 3955, + "token_acc": 0.9316091954022988, + "train_speed(iter/s)": 1.155628 + }, + { + "epoch": 1.26592, + "grad_norm": 0.6250409401525516, + "learning_rate": 3.3355007712056897e-06, + "loss": 0.4097975492477417, + "memory(GiB)": 77.0, + "step": 3956, + "token_acc": 0.8037088183073584, + "train_speed(iter/s)": 1.154474 + }, + { + "epoch": 1.26624, + "grad_norm": 0.6569102001366449, + "learning_rate": 3.3346695499220843e-06, + "loss": 0.33327150344848633, + "memory(GiB)": 77.0, + "step": 3957, + "token_acc": 0.8740648379052369, + "train_speed(iter/s)": 1.153379 + }, + { + "epoch": 1.2665600000000001, + "grad_norm": 0.6279149790608823, + "learning_rate": 3.333838224778333e-06, + "loss": 0.39195334911346436, + "memory(GiB)": 77.0, + "step": 3958, + "token_acc": 0.9165687426556992, + "train_speed(iter/s)": 1.152306 + }, + { + "epoch": 1.26688, + "grad_norm": 0.6154013723254175, + "learning_rate": 3.333006795877879e-06, + "loss": 0.32724010944366455, + "memory(GiB)": 77.0, + "step": 3959, + "token_acc": 0.9373101353217344, + "train_speed(iter/s)": 1.151168 + }, + { + "epoch": 1.2671999999999999, + "grad_norm": 0.6848210660897892, + "learning_rate": 3.33217526332418e-06, + "loss": 0.37738704681396484, + "memory(GiB)": 77.0, + "step": 3960, + "token_acc": 0.8584029712163417, + "train_speed(iter/s)": 1.150097 + }, + { + "epoch": 1.26752, + "grad_norm": 0.6528257105639014, + "learning_rate": 3.3313436272207056e-06, + "loss": 0.31061381101608276, + "memory(GiB)": 77.0, + "step": 3961, + "token_acc": 0.9212541013488881, + "train_speed(iter/s)": 1.149063 + }, + { + "epoch": 1.26784, + "grad_norm": 0.6117175348841205, + "learning_rate": 3.3305118876709388e-06, + "loss": 0.34103283286094666, + "memory(GiB)": 77.0, + "step": 3962, + "token_acc": 0.8419967941378521, + "train_speed(iter/s)": 1.148023 + }, + { + "epoch": 1.26816, + "grad_norm": 0.6629543158714515, + "learning_rate": 3.329680044778374e-06, + "loss": 0.35095274448394775, + "memory(GiB)": 77.0, + "step": 3963, + "token_acc": 0.873898678414097, + "train_speed(iter/s)": 1.146886 + }, + { + "epoch": 1.26848, + "grad_norm": 0.6520518002151605, + "learning_rate": 3.3288480986465216e-06, + "loss": 0.2650361657142639, + "memory(GiB)": 77.0, + "step": 3964, + "token_acc": 0.9099001888319396, + "train_speed(iter/s)": 1.145863 + }, + { + "epoch": 1.2688, + "grad_norm": 0.7116634606842843, + "learning_rate": 3.328016049378901e-06, + "loss": 0.32933682203292847, + "memory(GiB)": 77.0, + "step": 3965, + "token_acc": 0.928897586431833, + "train_speed(iter/s)": 1.144795 + }, + { + "epoch": 1.26912, + "grad_norm": 0.6987217928712974, + "learning_rate": 3.3271838970790473e-06, + "loss": 0.31151440739631653, + "memory(GiB)": 77.0, + "step": 3966, + "token_acc": 0.9358172423171064, + "train_speed(iter/s)": 1.143769 + }, + { + "epoch": 1.26944, + "grad_norm": 0.6147760557402959, + "learning_rate": 3.326351641850507e-06, + "loss": 0.26970937848091125, + "memory(GiB)": 77.0, + "step": 3967, + "token_acc": 0.9408825978351374, + "train_speed(iter/s)": 1.142686 + }, + { + "epoch": 1.26976, + "grad_norm": 0.7296039342015364, + "learning_rate": 3.3255192837968394e-06, + "loss": 0.45443588495254517, + "memory(GiB)": 77.0, + "step": 3968, + "token_acc": 0.8791798877227239, + "train_speed(iter/s)": 1.141682 + }, + { + "epoch": 1.27008, + "grad_norm": 0.6183479373174984, + "learning_rate": 3.324686823021618e-06, + "loss": 0.3025016188621521, + "memory(GiB)": 77.0, + "step": 3969, + "token_acc": 0.9192307692307692, + "train_speed(iter/s)": 1.14065 + }, + { + "epoch": 1.2704, + "grad_norm": 0.6587283974075455, + "learning_rate": 3.323854259628427e-06, + "loss": 0.36988869309425354, + "memory(GiB)": 77.0, + "step": 3970, + "token_acc": 0.8827133479212254, + "train_speed(iter/s)": 1.139606 + }, + { + "epoch": 1.27072, + "grad_norm": 0.6841616380225948, + "learning_rate": 3.3230215937208666e-06, + "loss": 0.32514268159866333, + "memory(GiB)": 77.0, + "step": 3971, + "token_acc": 0.8920932468156693, + "train_speed(iter/s)": 1.138615 + }, + { + "epoch": 1.27104, + "grad_norm": 0.6401966039519809, + "learning_rate": 3.3221888254025454e-06, + "loss": 0.2612246870994568, + "memory(GiB)": 77.0, + "step": 3972, + "token_acc": 0.9524804177545692, + "train_speed(iter/s)": 1.137574 + }, + { + "epoch": 1.27136, + "grad_norm": 0.6283808295425648, + "learning_rate": 3.3213559547770873e-06, + "loss": 0.3781798779964447, + "memory(GiB)": 77.0, + "step": 3973, + "token_acc": 0.8875408306112926, + "train_speed(iter/s)": 1.13652 + }, + { + "epoch": 1.27168, + "grad_norm": 0.6215286632456133, + "learning_rate": 3.3205229819481293e-06, + "loss": 0.3052491247653961, + "memory(GiB)": 77.0, + "step": 3974, + "token_acc": 0.8776508972267537, + "train_speed(iter/s)": 1.135453 + }, + { + "epoch": 1.272, + "grad_norm": 0.6183169085544713, + "learning_rate": 3.31968990701932e-06, + "loss": 0.3104320466518402, + "memory(GiB)": 77.0, + "step": 3975, + "token_acc": 0.8843384338433843, + "train_speed(iter/s)": 1.134384 + }, + { + "epoch": 1.2723200000000001, + "grad_norm": 0.6076792315497808, + "learning_rate": 3.3188567300943214e-06, + "loss": 0.31072521209716797, + "memory(GiB)": 77.0, + "step": 3976, + "token_acc": 0.890864636585908, + "train_speed(iter/s)": 1.133403 + }, + { + "epoch": 1.27264, + "grad_norm": 0.7111046673898631, + "learning_rate": 3.3180234512768077e-06, + "loss": 0.3871678411960602, + "memory(GiB)": 77.0, + "step": 3977, + "token_acc": 0.9216485507246377, + "train_speed(iter/s)": 1.132385 + }, + { + "epoch": 1.2729599999999999, + "grad_norm": 0.6053606542036305, + "learning_rate": 3.3171900706704668e-06, + "loss": 0.4236569404602051, + "memory(GiB)": 77.0, + "step": 3978, + "token_acc": 0.8421432003844306, + "train_speed(iter/s)": 1.131311 + }, + { + "epoch": 1.27328, + "grad_norm": 0.6467246039858909, + "learning_rate": 3.316356588378997e-06, + "loss": 0.343875527381897, + "memory(GiB)": 77.0, + "step": 3979, + "token_acc": 0.9335418295543393, + "train_speed(iter/s)": 1.130213 + }, + { + "epoch": 1.2736, + "grad_norm": 0.6271996738650594, + "learning_rate": 3.3155230045061114e-06, + "loss": 0.3758961260318756, + "memory(GiB)": 77.0, + "step": 3980, + "token_acc": 0.808768656716418, + "train_speed(iter/s)": 1.129207 + }, + { + "epoch": 1.27392, + "grad_norm": 0.6490751514557322, + "learning_rate": 3.3146893191555353e-06, + "loss": 0.3778948485851288, + "memory(GiB)": 77.0, + "step": 3981, + "token_acc": 0.9585476550680787, + "train_speed(iter/s)": 1.128207 + }, + { + "epoch": 1.27424, + "grad_norm": 0.6631570507936164, + "learning_rate": 3.313855532431006e-06, + "loss": 0.3700881004333496, + "memory(GiB)": 77.0, + "step": 3982, + "token_acc": 0.8367809577784074, + "train_speed(iter/s)": 1.12725 + }, + { + "epoch": 1.2745600000000001, + "grad_norm": 0.6887042768371968, + "learning_rate": 3.3130216444362745e-06, + "loss": 0.39038175344467163, + "memory(GiB)": 77.0, + "step": 3983, + "token_acc": 0.8995354239256679, + "train_speed(iter/s)": 1.126238 + }, + { + "epoch": 1.27488, + "grad_norm": 0.6419663540970602, + "learning_rate": 3.3121876552751024e-06, + "loss": 0.30908799171447754, + "memory(GiB)": 77.0, + "step": 3984, + "token_acc": 0.9195625759416768, + "train_speed(iter/s)": 1.125218 + }, + { + "epoch": 1.2752, + "grad_norm": 0.6461217333145511, + "learning_rate": 3.311353565051267e-06, + "loss": 0.43647634983062744, + "memory(GiB)": 77.0, + "step": 3985, + "token_acc": 0.8481091134531928, + "train_speed(iter/s)": 1.124136 + }, + { + "epoch": 1.27552, + "grad_norm": 0.6747014717418117, + "learning_rate": 3.310519373868554e-06, + "loss": 0.3206164240837097, + "memory(GiB)": 77.0, + "step": 3986, + "token_acc": 0.9302130044843049, + "train_speed(iter/s)": 1.123161 + }, + { + "epoch": 1.27584, + "grad_norm": 0.6656520825311997, + "learning_rate": 3.3096850818307664e-06, + "loss": 0.36462369561195374, + "memory(GiB)": 77.0, + "step": 3987, + "token_acc": 0.8626386594288412, + "train_speed(iter/s)": 1.122111 + }, + { + "epoch": 1.27616, + "grad_norm": 0.6394864435363005, + "learning_rate": 3.308850689041716e-06, + "loss": 0.2976624369621277, + "memory(GiB)": 77.0, + "step": 3988, + "token_acc": 0.91203007518797, + "train_speed(iter/s)": 1.121143 + }, + { + "epoch": 1.27648, + "grad_norm": 0.6644026047308299, + "learning_rate": 3.3080161956052282e-06, + "loss": 0.2951023578643799, + "memory(GiB)": 77.0, + "step": 3989, + "token_acc": 0.8487856388595565, + "train_speed(iter/s)": 1.120199 + }, + { + "epoch": 1.2768, + "grad_norm": 0.6615704981716821, + "learning_rate": 3.3071816016251424e-06, + "loss": 0.3334430456161499, + "memory(GiB)": 77.0, + "step": 3990, + "token_acc": 0.9157051282051282, + "train_speed(iter/s)": 1.119264 + }, + { + "epoch": 1.27712, + "grad_norm": 0.737281125654343, + "learning_rate": 3.306346907205309e-06, + "loss": 0.35917186737060547, + "memory(GiB)": 77.0, + "step": 3991, + "token_acc": 0.9018094731240022, + "train_speed(iter/s)": 1.118249 + }, + { + "epoch": 1.27744, + "grad_norm": 0.6091529875720036, + "learning_rate": 3.3055121124495905e-06, + "loss": 0.28836530447006226, + "memory(GiB)": 77.0, + "step": 3992, + "token_acc": 0.8997105045492142, + "train_speed(iter/s)": 1.117274 + }, + { + "epoch": 1.27776, + "grad_norm": 0.687345267619497, + "learning_rate": 3.3046772174618635e-06, + "loss": 0.3031139373779297, + "memory(GiB)": 77.0, + "step": 3993, + "token_acc": 0.9188206785137318, + "train_speed(iter/s)": 1.116345 + }, + { + "epoch": 1.27808, + "grad_norm": 0.6294309945895665, + "learning_rate": 3.303842222346016e-06, + "loss": 0.35058462619781494, + "memory(GiB)": 77.0, + "step": 3994, + "token_acc": 0.9152783421373425, + "train_speed(iter/s)": 1.115353 + }, + { + "epoch": 1.2784, + "grad_norm": 0.7092376090207998, + "learning_rate": 3.303007127205948e-06, + "loss": 0.3344593644142151, + "memory(GiB)": 77.0, + "step": 3995, + "token_acc": 0.9032690246516613, + "train_speed(iter/s)": 1.114416 + }, + { + "epoch": 1.27872, + "grad_norm": 0.6961029590489842, + "learning_rate": 3.3021719321455738e-06, + "loss": 0.35440847277641296, + "memory(GiB)": 77.0, + "step": 3996, + "token_acc": 0.9416110581506196, + "train_speed(iter/s)": 1.113448 + }, + { + "epoch": 1.27904, + "grad_norm": 0.6492468206925586, + "learning_rate": 3.301336637268817e-06, + "loss": 0.33544284105300903, + "memory(GiB)": 77.0, + "step": 3997, + "token_acc": 0.8632478632478633, + "train_speed(iter/s)": 1.112523 + }, + { + "epoch": 1.27936, + "grad_norm": 0.6776002842588965, + "learning_rate": 3.3005012426796178e-06, + "loss": 0.28290754556655884, + "memory(GiB)": 77.0, + "step": 3998, + "token_acc": 0.8828828828828829, + "train_speed(iter/s)": 1.111603 + }, + { + "epoch": 1.27968, + "grad_norm": 0.6515210286822539, + "learning_rate": 3.2996657484819255e-06, + "loss": 0.3924657702445984, + "memory(GiB)": 77.0, + "step": 3999, + "token_acc": 0.8859464339452302, + "train_speed(iter/s)": 1.110613 + }, + { + "epoch": 1.28, + "grad_norm": 0.6390812286227316, + "learning_rate": 3.298830154779703e-06, + "loss": 0.35645192861557007, + "memory(GiB)": 77.0, + "step": 4000, + "token_acc": 0.841828921461387, + "train_speed(iter/s)": 1.109666 + }, + { + "epoch": 1.2803200000000001, + "grad_norm": 0.661495165096306, + "learning_rate": 3.2979944616769256e-06, + "loss": 0.30253875255584717, + "memory(GiB)": 77.0, + "step": 4001, + "token_acc": 0.8545632698768197, + "train_speed(iter/s)": 1.108693 + }, + { + "epoch": 1.28064, + "grad_norm": 0.666802979421398, + "learning_rate": 3.2971586692775797e-06, + "loss": 0.40206390619277954, + "memory(GiB)": 77.0, + "step": 4002, + "token_acc": 0.8724050632911392, + "train_speed(iter/s)": 1.107712 + }, + { + "epoch": 1.2809599999999999, + "grad_norm": 0.5982935763872026, + "learning_rate": 3.2963227776856664e-06, + "loss": 0.307683527469635, + "memory(GiB)": 77.0, + "step": 4003, + "token_acc": 0.8864994026284349, + "train_speed(iter/s)": 1.106704 + }, + { + "epoch": 1.28128, + "grad_norm": 0.6485850741401237, + "learning_rate": 3.2954867870051975e-06, + "loss": 0.3266999423503876, + "memory(GiB)": 77.0, + "step": 4004, + "token_acc": 0.9524284763805722, + "train_speed(iter/s)": 1.105752 + }, + { + "epoch": 1.2816, + "grad_norm": 0.6091503409871322, + "learning_rate": 3.2946506973401983e-06, + "loss": 0.32524001598358154, + "memory(GiB)": 77.0, + "step": 4005, + "token_acc": 0.927599189110918, + "train_speed(iter/s)": 1.104771 + }, + { + "epoch": 1.28192, + "grad_norm": 0.6864882579127106, + "learning_rate": 3.2938145087947043e-06, + "loss": 0.36349231004714966, + "memory(GiB)": 77.0, + "step": 4006, + "token_acc": 0.8755760368663594, + "train_speed(iter/s)": 1.103823 + }, + { + "epoch": 1.28224, + "grad_norm": 0.9586361466186848, + "learning_rate": 3.2929782214727657e-06, + "loss": 0.33288636803627014, + "memory(GiB)": 77.0, + "step": 4007, + "token_acc": 0.8987196323046619, + "train_speed(iter/s)": 1.102867 + }, + { + "epoch": 1.28256, + "grad_norm": 0.6217857739087825, + "learning_rate": 3.2921418354784428e-06, + "loss": 0.30541670322418213, + "memory(GiB)": 77.0, + "step": 4008, + "token_acc": 0.8731600375822111, + "train_speed(iter/s)": 1.101943 + }, + { + "epoch": 1.28288, + "grad_norm": 0.6573590725536531, + "learning_rate": 3.2913053509158106e-06, + "loss": 0.36613738536834717, + "memory(GiB)": 77.0, + "step": 4009, + "token_acc": 0.8282407407407407, + "train_speed(iter/s)": 1.101048 + }, + { + "epoch": 1.2832, + "grad_norm": 0.6510191735497518, + "learning_rate": 3.2904687678889547e-06, + "loss": 0.33787351846694946, + "memory(GiB)": 77.0, + "step": 4010, + "token_acc": 0.8519540229885058, + "train_speed(iter/s)": 1.100083 + }, + { + "epoch": 1.28352, + "grad_norm": 0.6816849333865921, + "learning_rate": 3.2896320865019724e-06, + "loss": 0.31807130575180054, + "memory(GiB)": 77.0, + "step": 4011, + "token_acc": 0.9143631436314363, + "train_speed(iter/s)": 1.099084 + }, + { + "epoch": 1.28384, + "grad_norm": 0.6245717308522846, + "learning_rate": 3.288795306858976e-06, + "loss": 0.32444146275520325, + "memory(GiB)": 77.0, + "step": 4012, + "token_acc": 0.8903763576660773, + "train_speed(iter/s)": 1.098036 + }, + { + "epoch": 1.28416, + "grad_norm": 0.6430853362099972, + "learning_rate": 3.287958429064087e-06, + "loss": 0.34661298990249634, + "memory(GiB)": 77.0, + "step": 4013, + "token_acc": 0.9125854214123007, + "train_speed(iter/s)": 1.097054 + }, + { + "epoch": 1.28448, + "grad_norm": 0.6838477119479949, + "learning_rate": 3.2871214532214407e-06, + "loss": 0.43954774737358093, + "memory(GiB)": 77.0, + "step": 4014, + "token_acc": 0.8871103622577927, + "train_speed(iter/s)": 1.096114 + }, + { + "epoch": 1.2848, + "grad_norm": 0.6668640481223094, + "learning_rate": 3.286284379435184e-06, + "loss": 0.38646399974823, + "memory(GiB)": 77.0, + "step": 4015, + "token_acc": 0.8856497175141242, + "train_speed(iter/s)": 1.095206 + }, + { + "epoch": 1.28512, + "grad_norm": 0.6645303476250574, + "learning_rate": 3.2854472078094757e-06, + "loss": 0.32107317447662354, + "memory(GiB)": 77.0, + "step": 4016, + "token_acc": 0.8768745067087609, + "train_speed(iter/s)": 1.094283 + }, + { + "epoch": 1.28544, + "grad_norm": 0.6247517316537582, + "learning_rate": 3.284609938448488e-06, + "loss": 0.28501448035240173, + "memory(GiB)": 77.0, + "step": 4017, + "token_acc": 0.9260396975425331, + "train_speed(iter/s)": 1.093327 + }, + { + "epoch": 1.28576, + "grad_norm": 0.6462638043879676, + "learning_rate": 3.2837725714564046e-06, + "loss": 0.32575470209121704, + "memory(GiB)": 77.0, + "step": 4018, + "token_acc": 0.8426966292134831, + "train_speed(iter/s)": 1.092378 + }, + { + "epoch": 1.2860800000000001, + "grad_norm": 0.6976628928824211, + "learning_rate": 3.282935106937421e-06, + "loss": 0.41661977767944336, + "memory(GiB)": 77.0, + "step": 4019, + "token_acc": 0.9085106382978724, + "train_speed(iter/s)": 1.091395 + }, + { + "epoch": 1.2864, + "grad_norm": 0.6480149359272748, + "learning_rate": 3.2820975449957455e-06, + "loss": 0.32194578647613525, + "memory(GiB)": 77.0, + "step": 4020, + "token_acc": 0.9062992125984252, + "train_speed(iter/s)": 1.090459 + }, + { + "epoch": 1.2867199999999999, + "grad_norm": 0.6249047830609541, + "learning_rate": 3.281259885735598e-06, + "loss": 0.32805076241493225, + "memory(GiB)": 77.0, + "step": 4021, + "token_acc": 0.9333154073675719, + "train_speed(iter/s)": 1.089519 + }, + { + "epoch": 1.28704, + "grad_norm": 0.6396497117608053, + "learning_rate": 3.2804221292612102e-06, + "loss": 0.3881882429122925, + "memory(GiB)": 77.0, + "step": 4022, + "token_acc": 0.8668890236506974, + "train_speed(iter/s)": 1.088543 + }, + { + "epoch": 1.28736, + "grad_norm": 0.6580696181882966, + "learning_rate": 3.279584275676827e-06, + "loss": 0.41309207677841187, + "memory(GiB)": 77.0, + "step": 4023, + "token_acc": 0.8645660585959093, + "train_speed(iter/s)": 1.087614 + }, + { + "epoch": 1.28768, + "grad_norm": 0.6839392544796866, + "learning_rate": 3.2787463250867047e-06, + "loss": 0.38371652364730835, + "memory(GiB)": 77.0, + "step": 4024, + "token_acc": 0.818348623853211, + "train_speed(iter/s)": 1.086674 + }, + { + "epoch": 1.288, + "grad_norm": 0.6208536921793054, + "learning_rate": 3.27790827759511e-06, + "loss": 0.293090283870697, + "memory(GiB)": 77.0, + "step": 4025, + "token_acc": 0.885543369890329, + "train_speed(iter/s)": 1.085704 + }, + { + "epoch": 1.2883200000000001, + "grad_norm": 0.6454768005493343, + "learning_rate": 3.277070133306326e-06, + "loss": 0.2933872938156128, + "memory(GiB)": 77.0, + "step": 4026, + "token_acc": 0.8204948646125116, + "train_speed(iter/s)": 1.084797 + }, + { + "epoch": 1.28864, + "grad_norm": 0.6712282761288919, + "learning_rate": 3.2762318923246437e-06, + "loss": 0.3743235766887665, + "memory(GiB)": 77.0, + "step": 4027, + "token_acc": 0.8868972481696541, + "train_speed(iter/s)": 1.083886 + }, + { + "epoch": 1.2889599999999999, + "grad_norm": 0.6391003414739066, + "learning_rate": 3.275393554754369e-06, + "loss": 0.31071412563323975, + "memory(GiB)": 77.0, + "step": 4028, + "token_acc": 0.9470603884011706, + "train_speed(iter/s)": 1.082987 + }, + { + "epoch": 1.28928, + "grad_norm": 0.6415192325281215, + "learning_rate": 3.2745551206998167e-06, + "loss": 0.4297857880592346, + "memory(GiB)": 77.0, + "step": 4029, + "token_acc": 0.8620397799407533, + "train_speed(iter/s)": 1.082043 + }, + { + "epoch": 1.2896, + "grad_norm": 0.7143853616772495, + "learning_rate": 3.273716590265316e-06, + "loss": 0.3963780999183655, + "memory(GiB)": 77.0, + "step": 4030, + "token_acc": 0.8620689655172413, + "train_speed(iter/s)": 1.081136 + }, + { + "epoch": 1.28992, + "grad_norm": 0.6473446683851458, + "learning_rate": 3.272877963555208e-06, + "loss": 0.33881592750549316, + "memory(GiB)": 77.0, + "step": 4031, + "token_acc": 0.9278350515463918, + "train_speed(iter/s)": 1.080167 + }, + { + "epoch": 1.29024, + "grad_norm": 0.6686968135085642, + "learning_rate": 3.2720392406738446e-06, + "loss": 0.30570748448371887, + "memory(GiB)": 77.0, + "step": 4032, + "token_acc": 0.8922594142259415, + "train_speed(iter/s)": 1.079297 + }, + { + "epoch": 1.29056, + "grad_norm": 0.6840842121209171, + "learning_rate": 3.27120042172559e-06, + "loss": 0.2903480529785156, + "memory(GiB)": 77.0, + "step": 4033, + "token_acc": 0.9069577080491132, + "train_speed(iter/s)": 1.078429 + }, + { + "epoch": 1.29088, + "grad_norm": 0.6303524746811029, + "learning_rate": 3.270361506814822e-06, + "loss": 0.2947683334350586, + "memory(GiB)": 77.0, + "step": 4034, + "token_acc": 0.9291206150888995, + "train_speed(iter/s)": 1.077552 + }, + { + "epoch": 1.2912, + "grad_norm": 0.63873493007948, + "learning_rate": 3.2695224960459287e-06, + "loss": 0.3171652555465698, + "memory(GiB)": 77.0, + "step": 4035, + "token_acc": 0.9081494822152184, + "train_speed(iter/s)": 1.076675 + }, + { + "epoch": 1.29152, + "grad_norm": 0.6375411085360772, + "learning_rate": 3.2686833895233096e-06, + "loss": 0.2756361663341522, + "memory(GiB)": 77.0, + "step": 4036, + "token_acc": 0.9129227662796567, + "train_speed(iter/s)": 1.075762 + }, + { + "epoch": 1.29184, + "grad_norm": 0.6413594481799152, + "learning_rate": 3.2678441873513774e-06, + "loss": 0.37594085931777954, + "memory(GiB)": 77.0, + "step": 4037, + "token_acc": 0.8812292358803987, + "train_speed(iter/s)": 1.074846 + }, + { + "epoch": 1.29216, + "grad_norm": 0.6406819550570892, + "learning_rate": 3.2670048896345557e-06, + "loss": 0.3317277431488037, + "memory(GiB)": 77.0, + "step": 4038, + "token_acc": 0.9594306049822064, + "train_speed(iter/s)": 1.073833 + }, + { + "epoch": 1.29248, + "grad_norm": 0.6053483282109189, + "learning_rate": 3.266165496477281e-06, + "loss": 0.3227475881576538, + "memory(GiB)": 77.0, + "step": 4039, + "token_acc": 0.9662612789329149, + "train_speed(iter/s)": 1.072933 + }, + { + "epoch": 1.2928, + "grad_norm": 0.6260219063843341, + "learning_rate": 3.2653260079840015e-06, + "loss": 0.38413405418395996, + "memory(GiB)": 77.0, + "step": 4040, + "token_acc": 0.9591775923718713, + "train_speed(iter/s)": 1.07201 + }, + { + "epoch": 1.29312, + "grad_norm": 0.6458851880829019, + "learning_rate": 3.2644864242591765e-06, + "loss": 0.3202788233757019, + "memory(GiB)": 77.0, + "step": 4041, + "token_acc": 0.9300749197288619, + "train_speed(iter/s)": 1.071122 + }, + { + "epoch": 1.29344, + "grad_norm": 0.6737276287579551, + "learning_rate": 3.263646745407278e-06, + "loss": 0.40275079011917114, + "memory(GiB)": 77.0, + "step": 4042, + "token_acc": 0.904065390406539, + "train_speed(iter/s)": 1.070288 + }, + { + "epoch": 1.29376, + "grad_norm": 0.590951845419185, + "learning_rate": 3.26280697153279e-06, + "loss": 0.28600090742111206, + "memory(GiB)": 77.0, + "step": 4043, + "token_acc": 0.8277495176284863, + "train_speed(iter/s)": 1.069401 + }, + { + "epoch": 1.2940800000000001, + "grad_norm": 0.671606371492321, + "learning_rate": 3.2619671027402066e-06, + "loss": 0.304206907749176, + "memory(GiB)": 77.0, + "step": 4044, + "token_acc": 0.9131607335490831, + "train_speed(iter/s)": 1.068536 + }, + { + "epoch": 1.2944, + "grad_norm": 0.6422833262416642, + "learning_rate": 3.2611271391340356e-06, + "loss": 0.4037313759326935, + "memory(GiB)": 77.0, + "step": 4045, + "token_acc": 0.910377358490566, + "train_speed(iter/s)": 1.067675 + }, + { + "epoch": 1.2947199999999999, + "grad_norm": 0.6078027085430114, + "learning_rate": 3.2602870808187955e-06, + "loss": 0.34495311975479126, + "memory(GiB)": 77.0, + "step": 4046, + "token_acc": 0.901305057096248, + "train_speed(iter/s)": 1.066832 + }, + { + "epoch": 1.29504, + "grad_norm": 0.7123058174888216, + "learning_rate": 3.2594469278990172e-06, + "loss": 0.3836100399494171, + "memory(GiB)": 77.0, + "step": 4047, + "token_acc": 0.8758901322482198, + "train_speed(iter/s)": 1.065984 + }, + { + "epoch": 1.29536, + "grad_norm": 0.6405667591191925, + "learning_rate": 3.2586066804792425e-06, + "loss": 0.3216109275817871, + "memory(GiB)": 77.0, + "step": 4048, + "token_acc": 0.8242548818088387, + "train_speed(iter/s)": 1.065063 + }, + { + "epoch": 1.29568, + "grad_norm": 0.6893057138021282, + "learning_rate": 3.2577663386640273e-06, + "loss": 0.3495556116104126, + "memory(GiB)": 77.0, + "step": 4049, + "token_acc": 0.8904042864101315, + "train_speed(iter/s)": 1.064225 + }, + { + "epoch": 1.296, + "grad_norm": 0.6095605502043226, + "learning_rate": 3.2569259025579358e-06, + "loss": 0.31805384159088135, + "memory(GiB)": 77.0, + "step": 4050, + "token_acc": 0.847576211894053, + "train_speed(iter/s)": 1.063341 + }, + { + "epoch": 1.29632, + "grad_norm": 0.6624928521219626, + "learning_rate": 3.2560853722655473e-06, + "loss": 0.31227678060531616, + "memory(GiB)": 77.0, + "step": 4051, + "token_acc": 0.9135173141149587, + "train_speed(iter/s)": 1.062443 + }, + { + "epoch": 1.29664, + "grad_norm": 0.5840245900979749, + "learning_rate": 3.2552447478914496e-06, + "loss": 0.3310273587703705, + "memory(GiB)": 77.0, + "step": 4052, + "token_acc": 0.889917967716327, + "train_speed(iter/s)": 1.061529 + }, + { + "epoch": 1.29696, + "grad_norm": 0.5856198899752515, + "learning_rate": 3.2544040295402446e-06, + "loss": 0.3081238865852356, + "memory(GiB)": 77.0, + "step": 4053, + "token_acc": 0.9189033189033189, + "train_speed(iter/s)": 1.060688 + }, + { + "epoch": 1.29728, + "grad_norm": 0.6362250937393266, + "learning_rate": 3.2535632173165455e-06, + "loss": 0.36858901381492615, + "memory(GiB)": 77.0, + "step": 4054, + "token_acc": 0.8390461997019374, + "train_speed(iter/s)": 1.059838 + }, + { + "epoch": 1.2976, + "grad_norm": 0.6993772465275089, + "learning_rate": 3.252722311324976e-06, + "loss": 0.41090908646583557, + "memory(GiB)": 77.0, + "step": 4055, + "token_acc": 0.8340877779964574, + "train_speed(iter/s)": 1.058967 + }, + { + "epoch": 1.29792, + "grad_norm": 0.6650833356876975, + "learning_rate": 3.251881311670173e-06, + "loss": 0.29259374737739563, + "memory(GiB)": 77.0, + "step": 4056, + "token_acc": 0.9455938697318008, + "train_speed(iter/s)": 1.05815 + }, + { + "epoch": 1.29824, + "grad_norm": 0.6214575035994044, + "learning_rate": 3.2510402184567836e-06, + "loss": 0.3125288784503937, + "memory(GiB)": 77.0, + "step": 4057, + "token_acc": 0.942916915720263, + "train_speed(iter/s)": 1.057321 + }, + { + "epoch": 1.29856, + "grad_norm": 0.683687305664165, + "learning_rate": 3.2501990317894675e-06, + "loss": 0.3229261040687561, + "memory(GiB)": 77.0, + "step": 4058, + "token_acc": 0.9271829682196853, + "train_speed(iter/s)": 1.056474 + }, + { + "epoch": 1.29888, + "grad_norm": 0.6472527929141181, + "learning_rate": 3.2493577517728963e-06, + "loss": 0.34359976649284363, + "memory(GiB)": 77.0, + "step": 4059, + "token_acc": 0.9443219404630651, + "train_speed(iter/s)": 1.055645 + }, + { + "epoch": 1.2992, + "grad_norm": 0.6806894799218178, + "learning_rate": 3.2485163785117525e-06, + "loss": 0.32895004749298096, + "memory(GiB)": 77.0, + "step": 4060, + "token_acc": 0.8771568374967809, + "train_speed(iter/s)": 1.054813 + }, + { + "epoch": 1.29952, + "grad_norm": 0.6541840658181438, + "learning_rate": 3.24767491211073e-06, + "loss": 0.34322887659072876, + "memory(GiB)": 77.0, + "step": 4061, + "token_acc": 0.8633605908731206, + "train_speed(iter/s)": 1.053973 + }, + { + "epoch": 1.29984, + "grad_norm": 0.7164421744905288, + "learning_rate": 3.2468333526745345e-06, + "loss": 0.38829100131988525, + "memory(GiB)": 77.0, + "step": 4062, + "token_acc": 0.9644509374239104, + "train_speed(iter/s)": 1.053138 + }, + { + "epoch": 1.30016, + "grad_norm": 0.6376558492222413, + "learning_rate": 3.245991700307884e-06, + "loss": 0.3036912977695465, + "memory(GiB)": 77.0, + "step": 4063, + "token_acc": 0.907707412862052, + "train_speed(iter/s)": 1.052228 + }, + { + "epoch": 1.30048, + "grad_norm": 0.7352584016423969, + "learning_rate": 3.2451499551155075e-06, + "loss": 0.309939444065094, + "memory(GiB)": 77.0, + "step": 4064, + "token_acc": 0.8904467680608364, + "train_speed(iter/s)": 1.051421 + }, + { + "epoch": 1.3008, + "grad_norm": 0.6609086936020696, + "learning_rate": 3.244308117202146e-06, + "loss": 0.36761337518692017, + "memory(GiB)": 77.0, + "step": 4065, + "token_acc": 0.835054142533367, + "train_speed(iter/s)": 1.050498 + }, + { + "epoch": 1.30112, + "grad_norm": 0.6087234974346504, + "learning_rate": 3.2434661866725504e-06, + "loss": 0.31226637959480286, + "memory(GiB)": 77.0, + "step": 4066, + "token_acc": 0.880621615257829, + "train_speed(iter/s)": 1.049665 + }, + { + "epoch": 1.30144, + "grad_norm": 0.6007883751368149, + "learning_rate": 3.2426241636314853e-06, + "loss": 0.3466009497642517, + "memory(GiB)": 77.0, + "step": 4067, + "token_acc": 0.9554020100502513, + "train_speed(iter/s)": 1.048786 + }, + { + "epoch": 1.30176, + "grad_norm": 0.5785572119738501, + "learning_rate": 3.241782048183726e-06, + "loss": 0.2750988006591797, + "memory(GiB)": 77.0, + "step": 4068, + "token_acc": 0.9222357229647631, + "train_speed(iter/s)": 1.047919 + }, + { + "epoch": 1.3020800000000001, + "grad_norm": 0.6337096948797808, + "learning_rate": 3.2409398404340585e-06, + "loss": 0.3094494342803955, + "memory(GiB)": 77.0, + "step": 4069, + "token_acc": 0.8527397260273972, + "train_speed(iter/s)": 1.047061 + }, + { + "epoch": 1.3024, + "grad_norm": 0.6870035051511398, + "learning_rate": 3.2400975404872816e-06, + "loss": 0.29577773809432983, + "memory(GiB)": 77.0, + "step": 4070, + "token_acc": 0.9506502167389129, + "train_speed(iter/s)": 1.046226 + }, + { + "epoch": 1.3027199999999999, + "grad_norm": 0.6557764590537313, + "learning_rate": 3.2392551484482045e-06, + "loss": 0.30549389123916626, + "memory(GiB)": 77.0, + "step": 4071, + "token_acc": 0.9468041237113402, + "train_speed(iter/s)": 1.045298 + }, + { + "epoch": 1.30304, + "grad_norm": 0.749153774591929, + "learning_rate": 3.238412664421648e-06, + "loss": 0.38415172696113586, + "memory(GiB)": 77.0, + "step": 4072, + "token_acc": 0.8484304932735426, + "train_speed(iter/s)": 1.04448 + }, + { + "epoch": 1.30336, + "grad_norm": 0.689677983825597, + "learning_rate": 3.2375700885124444e-06, + "loss": 0.38671165704727173, + "memory(GiB)": 77.0, + "step": 4073, + "token_acc": 0.8731263383297645, + "train_speed(iter/s)": 1.043596 + }, + { + "epoch": 1.30368, + "grad_norm": 0.6740534280224145, + "learning_rate": 3.236727420825439e-06, + "loss": 0.26512113213539124, + "memory(GiB)": 77.0, + "step": 4074, + "token_acc": 0.9039145907473309, + "train_speed(iter/s)": 1.042771 + }, + { + "epoch": 1.304, + "grad_norm": 0.6365369152946484, + "learning_rate": 3.235884661465486e-06, + "loss": 0.3448706865310669, + "memory(GiB)": 77.0, + "step": 4075, + "token_acc": 0.9510807736063709, + "train_speed(iter/s)": 1.041789 + }, + { + "epoch": 1.30432, + "grad_norm": 0.6405544235096863, + "learning_rate": 3.2350418105374528e-06, + "loss": 0.3564179539680481, + "memory(GiB)": 77.0, + "step": 4076, + "token_acc": 0.8329448329448329, + "train_speed(iter/s)": 1.040959 + }, + { + "epoch": 1.30464, + "grad_norm": 0.7228357572095023, + "learning_rate": 3.234198868146218e-06, + "loss": 0.37202560901641846, + "memory(GiB)": 77.0, + "step": 4077, + "token_acc": 0.8386313018829723, + "train_speed(iter/s)": 1.040132 + }, + { + "epoch": 1.30496, + "grad_norm": 0.6411870778665271, + "learning_rate": 3.2333558343966693e-06, + "loss": 0.38111817836761475, + "memory(GiB)": 77.0, + "step": 4078, + "token_acc": 0.9411448535339962, + "train_speed(iter/s)": 1.039311 + }, + { + "epoch": 1.30528, + "grad_norm": 0.6318004913591178, + "learning_rate": 3.2325127093937096e-06, + "loss": 0.31407541036605835, + "memory(GiB)": 77.0, + "step": 4079, + "token_acc": 0.8942189421894219, + "train_speed(iter/s)": 1.038501 + }, + { + "epoch": 1.3056, + "grad_norm": 0.6223999261931326, + "learning_rate": 3.2316694932422503e-06, + "loss": 0.30666083097457886, + "memory(GiB)": 77.0, + "step": 4080, + "token_acc": 0.9062674874090655, + "train_speed(iter/s)": 1.03769 + }, + { + "epoch": 1.30592, + "grad_norm": 0.6822230706823192, + "learning_rate": 3.2308261860472144e-06, + "loss": 0.38563668727874756, + "memory(GiB)": 77.0, + "step": 4081, + "token_acc": 0.856312292358804, + "train_speed(iter/s)": 1.036871 + }, + { + "epoch": 1.30624, + "grad_norm": 0.6970351952283708, + "learning_rate": 3.2299827879135385e-06, + "loss": 0.33684438467025757, + "memory(GiB)": 77.0, + "step": 4082, + "token_acc": 0.8685824991927672, + "train_speed(iter/s)": 1.03605 + }, + { + "epoch": 1.30656, + "grad_norm": 0.6891327508675904, + "learning_rate": 3.2291392989461677e-06, + "loss": 0.35935625433921814, + "memory(GiB)": 77.0, + "step": 4083, + "token_acc": 0.8698861888649646, + "train_speed(iter/s)": 1.035275 + }, + { + "epoch": 1.30688, + "grad_norm": 0.6744056774245721, + "learning_rate": 3.2282957192500597e-06, + "loss": 0.29864755272865295, + "memory(GiB)": 77.0, + "step": 4084, + "token_acc": 0.9125548245614035, + "train_speed(iter/s)": 1.03447 + }, + { + "epoch": 1.3072, + "grad_norm": 0.6473083343210032, + "learning_rate": 3.2274520489301838e-06, + "loss": 0.3308194875717163, + "memory(GiB)": 77.0, + "step": 4085, + "token_acc": 0.9105363578545685, + "train_speed(iter/s)": 1.033618 + }, + { + "epoch": 1.30752, + "grad_norm": 0.6714830766024162, + "learning_rate": 3.22660828809152e-06, + "loss": 0.3528052270412445, + "memory(GiB)": 77.0, + "step": 4086, + "token_acc": 0.9318702915165411, + "train_speed(iter/s)": 1.032718 + }, + { + "epoch": 1.3078400000000001, + "grad_norm": 0.6893109032643476, + "learning_rate": 3.225764436839059e-06, + "loss": 0.3471384644508362, + "memory(GiB)": 77.0, + "step": 4087, + "token_acc": 0.8346273291925466, + "train_speed(iter/s)": 1.031883 + }, + { + "epoch": 1.30816, + "grad_norm": 0.6190719477928194, + "learning_rate": 3.224920495277804e-06, + "loss": 0.28045427799224854, + "memory(GiB)": 77.0, + "step": 4088, + "token_acc": 0.847400052260256, + "train_speed(iter/s)": 1.031112 + }, + { + "epoch": 1.3084799999999999, + "grad_norm": 0.6774644273554071, + "learning_rate": 3.2240764635127692e-06, + "loss": 0.3170716166496277, + "memory(GiB)": 77.0, + "step": 4089, + "token_acc": 0.9142457122856142, + "train_speed(iter/s)": 1.030296 + }, + { + "epoch": 1.3088, + "grad_norm": 0.7132941240859507, + "learning_rate": 3.2232323416489787e-06, + "loss": 0.36161237955093384, + "memory(GiB)": 77.0, + "step": 4090, + "token_acc": 0.9293286219081273, + "train_speed(iter/s)": 1.029433 + }, + { + "epoch": 1.30912, + "grad_norm": 0.6698953241475335, + "learning_rate": 3.2223881297914696e-06, + "loss": 0.3692941665649414, + "memory(GiB)": 77.0, + "step": 4091, + "token_acc": 0.8508667774875327, + "train_speed(iter/s)": 1.028565 + }, + { + "epoch": 1.30944, + "grad_norm": 0.6805484455545199, + "learning_rate": 3.2215438280452897e-06, + "loss": 0.39271554350852966, + "memory(GiB)": 77.0, + "step": 4092, + "token_acc": 0.8910838227442606, + "train_speed(iter/s)": 1.027791 + }, + { + "epoch": 1.30976, + "grad_norm": 0.6490040934939505, + "learning_rate": 3.2206994365154975e-06, + "loss": 0.3803960978984833, + "memory(GiB)": 77.0, + "step": 4093, + "token_acc": 0.8741104294478528, + "train_speed(iter/s)": 1.027031 + }, + { + "epoch": 1.3100800000000001, + "grad_norm": 0.6115343821049424, + "learning_rate": 3.219854955307162e-06, + "loss": 0.2816506624221802, + "memory(GiB)": 77.0, + "step": 4094, + "token_acc": 0.9034792368125701, + "train_speed(iter/s)": 1.026267 + }, + { + "epoch": 1.3104, + "grad_norm": 0.7159343627983273, + "learning_rate": 3.2190103845253662e-06, + "loss": 0.3496633470058441, + "memory(GiB)": 77.0, + "step": 4095, + "token_acc": 0.8475242336913806, + "train_speed(iter/s)": 1.025511 + }, + { + "epoch": 1.3107199999999999, + "grad_norm": 0.6055468627598566, + "learning_rate": 3.2181657242751994e-06, + "loss": 0.31043508648872375, + "memory(GiB)": 77.0, + "step": 4096, + "token_acc": 0.9604214854793113, + "train_speed(iter/s)": 1.024712 + }, + { + "epoch": 1.31104, + "grad_norm": 0.5840686987966319, + "learning_rate": 3.217320974661768e-06, + "loss": 0.3213293254375458, + "memory(GiB)": 77.0, + "step": 4097, + "token_acc": 0.8973214285714286, + "train_speed(iter/s)": 1.023862 + }, + { + "epoch": 1.31136, + "grad_norm": 0.6874397060960484, + "learning_rate": 3.2164761357901835e-06, + "loss": 0.39831942319869995, + "memory(GiB)": 77.0, + "step": 4098, + "token_acc": 0.8788767812238055, + "train_speed(iter/s)": 1.023047 + }, + { + "epoch": 1.31168, + "grad_norm": 0.6999495659519158, + "learning_rate": 3.2156312077655738e-06, + "loss": 0.37932878732681274, + "memory(GiB)": 77.0, + "step": 4099, + "token_acc": 0.8510758776896942, + "train_speed(iter/s)": 1.022275 + }, + { + "epoch": 1.312, + "grad_norm": 0.6919632679081732, + "learning_rate": 3.2147861906930748e-06, + "loss": 0.32286471128463745, + "memory(GiB)": 77.0, + "step": 4100, + "token_acc": 0.9102103642893792, + "train_speed(iter/s)": 1.021437 + }, + { + "epoch": 1.31232, + "grad_norm": 0.6636697599922309, + "learning_rate": 3.2139410846778337e-06, + "loss": 0.342687726020813, + "memory(GiB)": 77.0, + "step": 4101, + "token_acc": 0.909556313993174, + "train_speed(iter/s)": 1.020638 + }, + { + "epoch": 1.31264, + "grad_norm": 0.7292750245703176, + "learning_rate": 3.2130958898250097e-06, + "loss": 0.34502485394477844, + "memory(GiB)": 77.0, + "step": 4102, + "token_acc": 0.8996311907270811, + "train_speed(iter/s)": 1.019837 + }, + { + "epoch": 1.31296, + "grad_norm": 0.6536806301814355, + "learning_rate": 3.2122506062397725e-06, + "loss": 0.3134687542915344, + "memory(GiB)": 77.0, + "step": 4103, + "token_acc": 0.9730496453900709, + "train_speed(iter/s)": 1.019009 + }, + { + "epoch": 1.31328, + "grad_norm": 0.6588174667556708, + "learning_rate": 3.211405234027303e-06, + "loss": 0.33723509311676025, + "memory(GiB)": 77.0, + "step": 4104, + "token_acc": 0.8900290416263311, + "train_speed(iter/s)": 1.018224 + }, + { + "epoch": 1.3136, + "grad_norm": 0.5955935091378585, + "learning_rate": 3.2105597732927933e-06, + "loss": 0.2559666037559509, + "memory(GiB)": 77.0, + "step": 4105, + "token_acc": 0.9715220949263502, + "train_speed(iter/s)": 1.017434 + }, + { + "epoch": 1.31392, + "grad_norm": 0.7804954975667, + "learning_rate": 3.2097142241414463e-06, + "loss": 0.3878118395805359, + "memory(GiB)": 77.0, + "step": 4106, + "token_acc": 0.8614273812829769, + "train_speed(iter/s)": 1.016628 + }, + { + "epoch": 1.31424, + "grad_norm": 0.6929525252150185, + "learning_rate": 3.208868586678475e-06, + "loss": 0.3829585313796997, + "memory(GiB)": 77.0, + "step": 4107, + "token_acc": 0.8925487045591485, + "train_speed(iter/s)": 1.015873 + }, + { + "epoch": 1.31456, + "grad_norm": 0.6730417055710075, + "learning_rate": 3.2080228610091063e-06, + "loss": 0.3521527051925659, + "memory(GiB)": 77.0, + "step": 4108, + "token_acc": 0.8471936526594182, + "train_speed(iter/s)": 1.015108 + }, + { + "epoch": 1.31488, + "grad_norm": 0.5822358670977419, + "learning_rate": 3.2071770472385743e-06, + "loss": 0.2626686096191406, + "memory(GiB)": 77.0, + "step": 4109, + "token_acc": 0.9066630698678176, + "train_speed(iter/s)": 1.014351 + }, + { + "epoch": 1.3152, + "grad_norm": 0.5780340607590623, + "learning_rate": 3.2063311454721268e-06, + "loss": 0.25447556376457214, + "memory(GiB)": 77.0, + "step": 4110, + "token_acc": 0.9296597257491113, + "train_speed(iter/s)": 1.013558 + }, + { + "epoch": 1.31552, + "grad_norm": 0.707031551587812, + "learning_rate": 3.2054851558150207e-06, + "loss": 0.33979278802871704, + "memory(GiB)": 77.0, + "step": 4111, + "token_acc": 0.8763505402160864, + "train_speed(iter/s)": 1.012803 + }, + { + "epoch": 1.3158400000000001, + "grad_norm": 0.6068235954011982, + "learning_rate": 3.204639078372526e-06, + "loss": 0.31371885538101196, + "memory(GiB)": 77.0, + "step": 4112, + "token_acc": 0.9402255639097744, + "train_speed(iter/s)": 1.011994 + }, + { + "epoch": 1.31616, + "grad_norm": 0.6014687453641769, + "learning_rate": 3.203792913249921e-06, + "loss": 0.305616170167923, + "memory(GiB)": 77.0, + "step": 4113, + "token_acc": 0.9051833122629582, + "train_speed(iter/s)": 1.011213 + }, + { + "epoch": 1.3164799999999999, + "grad_norm": 0.6494239004959889, + "learning_rate": 3.2029466605524973e-06, + "loss": 0.3216465413570404, + "memory(GiB)": 77.0, + "step": 4114, + "token_acc": 0.9165825085841244, + "train_speed(iter/s)": 1.010423 + }, + { + "epoch": 1.3168, + "grad_norm": 0.6645358689559049, + "learning_rate": 3.202100320385556e-06, + "loss": 0.33201801776885986, + "memory(GiB)": 77.0, + "step": 4115, + "token_acc": 0.9213346322454944, + "train_speed(iter/s)": 1.00971 + }, + { + "epoch": 1.31712, + "grad_norm": 0.6695368718030553, + "learning_rate": 3.201253892854409e-06, + "loss": 0.3886834383010864, + "memory(GiB)": 77.0, + "step": 4116, + "token_acc": 0.8977921111386753, + "train_speed(iter/s)": 1.008859 + }, + { + "epoch": 1.31744, + "grad_norm": 0.6456506396024234, + "learning_rate": 3.20040737806438e-06, + "loss": 0.3073853850364685, + "memory(GiB)": 77.0, + "step": 4117, + "token_acc": 0.8899308983218164, + "train_speed(iter/s)": 1.008023 + }, + { + "epoch": 1.31776, + "grad_norm": 0.6035667096270388, + "learning_rate": 3.1995607761208037e-06, + "loss": 0.32308250665664673, + "memory(GiB)": 77.0, + "step": 4118, + "token_acc": 0.9559103002660585, + "train_speed(iter/s)": 1.007235 + }, + { + "epoch": 1.31808, + "grad_norm": 0.6441160535886618, + "learning_rate": 3.198714087129024e-06, + "loss": 0.3521386384963989, + "memory(GiB)": 77.0, + "step": 4119, + "token_acc": 0.8748908296943232, + "train_speed(iter/s)": 1.006403 + }, + { + "epoch": 1.3184, + "grad_norm": 0.685394269307322, + "learning_rate": 3.197867311194397e-06, + "loss": 0.3188309073448181, + "memory(GiB)": 77.0, + "step": 4120, + "token_acc": 0.9145183175033921, + "train_speed(iter/s)": 1.005659 + }, + { + "epoch": 1.31872, + "grad_norm": 0.6409163704001505, + "learning_rate": 3.1970204484222892e-06, + "loss": 0.4012495279312134, + "memory(GiB)": 77.0, + "step": 4121, + "token_acc": 0.9386102403343782, + "train_speed(iter/s)": 1.004934 + }, + { + "epoch": 1.31904, + "grad_norm": 0.7928964075776064, + "learning_rate": 3.196173498918079e-06, + "loss": 0.3491044342517853, + "memory(GiB)": 77.0, + "step": 4122, + "token_acc": 0.9557371467483827, + "train_speed(iter/s)": 1.004144 + }, + { + "epoch": 1.31936, + "grad_norm": 0.6777928433419114, + "learning_rate": 3.1953264627871526e-06, + "loss": 0.39362281560897827, + "memory(GiB)": 77.0, + "step": 4123, + "token_acc": 0.8463625154130703, + "train_speed(iter/s)": 1.003411 + }, + { + "epoch": 1.31968, + "grad_norm": 0.640092807994432, + "learning_rate": 3.19447934013491e-06, + "loss": 0.35449838638305664, + "memory(GiB)": 77.0, + "step": 4124, + "token_acc": 0.9307006369426751, + "train_speed(iter/s)": 1.002677 + }, + { + "epoch": 1.32, + "grad_norm": 0.6516892228466665, + "learning_rate": 3.193632131066762e-06, + "loss": 0.25887778401374817, + "memory(GiB)": 77.0, + "step": 4125, + "token_acc": 0.9353172651254305, + "train_speed(iter/s)": 1.001953 + }, + { + "epoch": 1.32032, + "grad_norm": 0.7061733407660804, + "learning_rate": 3.192784835688127e-06, + "loss": 0.3465147316455841, + "memory(GiB)": 77.0, + "step": 4126, + "token_acc": 0.9043839758125473, + "train_speed(iter/s)": 1.001183 + }, + { + "epoch": 1.32064, + "grad_norm": 0.6264978585955114, + "learning_rate": 3.1919374541044375e-06, + "loss": 0.3331223726272583, + "memory(GiB)": 77.0, + "step": 4127, + "token_acc": 0.8808671439936356, + "train_speed(iter/s)": 1.000452 + }, + { + "epoch": 1.32096, + "grad_norm": 0.6590447587501084, + "learning_rate": 3.1910899864211347e-06, + "loss": 0.35588765144348145, + "memory(GiB)": 77.0, + "step": 4128, + "token_acc": 0.9201608271108558, + "train_speed(iter/s)": 0.999705 + }, + { + "epoch": 1.32128, + "grad_norm": 0.6702530212094565, + "learning_rate": 3.190242432743673e-06, + "loss": 0.3840682804584503, + "memory(GiB)": 77.0, + "step": 4129, + "token_acc": 0.8808630393996247, + "train_speed(iter/s)": 0.998942 + }, + { + "epoch": 1.3216, + "grad_norm": 0.6318373350670852, + "learning_rate": 3.189394793177513e-06, + "loss": 0.3336498737335205, + "memory(GiB)": 77.0, + "step": 4130, + "token_acc": 0.9119262025038436, + "train_speed(iter/s)": 0.998203 + }, + { + "epoch": 1.32192, + "grad_norm": 0.6448673898276401, + "learning_rate": 3.188547067828131e-06, + "loss": 0.33221200108528137, + "memory(GiB)": 77.0, + "step": 4131, + "token_acc": 0.8901200369344414, + "train_speed(iter/s)": 0.997439 + }, + { + "epoch": 1.32224, + "grad_norm": 0.6540213075990143, + "learning_rate": 3.1876992568010105e-06, + "loss": 0.39208564162254333, + "memory(GiB)": 77.0, + "step": 4132, + "token_acc": 0.8328367103694875, + "train_speed(iter/s)": 0.996618 + }, + { + "epoch": 1.32256, + "grad_norm": 0.6597436436675127, + "learning_rate": 3.1868513602016477e-06, + "loss": 0.3245825171470642, + "memory(GiB)": 77.0, + "step": 4133, + "token_acc": 0.8986995208761123, + "train_speed(iter/s)": 0.995872 + }, + { + "epoch": 1.32288, + "grad_norm": 0.6270742262475555, + "learning_rate": 3.186003378135548e-06, + "loss": 0.3326013684272766, + "memory(GiB)": 77.0, + "step": 4134, + "token_acc": 0.8837448559670782, + "train_speed(iter/s)": 0.995154 + }, + { + "epoch": 1.3232, + "grad_norm": 0.6228232867167158, + "learning_rate": 3.1851553107082283e-06, + "loss": 0.3062916100025177, + "memory(GiB)": 77.0, + "step": 4135, + "token_acc": 0.8924822695035461, + "train_speed(iter/s)": 0.994452 + }, + { + "epoch": 1.32352, + "grad_norm": 0.666444194679872, + "learning_rate": 3.1843071580252167e-06, + "loss": 0.35508501529693604, + "memory(GiB)": 77.0, + "step": 4136, + "token_acc": 0.9221585482330468, + "train_speed(iter/s)": 0.993706 + }, + { + "epoch": 1.3238400000000001, + "grad_norm": 0.747850379122088, + "learning_rate": 3.183458920192049e-06, + "loss": 0.3750177323818207, + "memory(GiB)": 77.0, + "step": 4137, + "token_acc": 0.9048374306106265, + "train_speed(iter/s)": 0.992998 + }, + { + "epoch": 1.32416, + "grad_norm": 0.6180631951195009, + "learning_rate": 3.182610597314275e-06, + "loss": 0.30248481035232544, + "memory(GiB)": 77.0, + "step": 4138, + "token_acc": 0.9318181818181818, + "train_speed(iter/s)": 0.992276 + }, + { + "epoch": 1.3244799999999999, + "grad_norm": 0.6324412454389926, + "learning_rate": 3.1817621894974537e-06, + "loss": 0.30806964635849, + "memory(GiB)": 77.0, + "step": 4139, + "token_acc": 0.9213410702772404, + "train_speed(iter/s)": 0.991557 + }, + { + "epoch": 1.3248, + "grad_norm": 0.6329414608029956, + "learning_rate": 3.1809136968471554e-06, + "loss": 0.3075336217880249, + "memory(GiB)": 77.0, + "step": 4140, + "token_acc": 0.9149880505291909, + "train_speed(iter/s)": 0.990866 + }, + { + "epoch": 1.32512, + "grad_norm": 0.5794394285554326, + "learning_rate": 3.180065119468959e-06, + "loss": 0.31016284227371216, + "memory(GiB)": 77.0, + "step": 4141, + "token_acc": 0.9284064665127021, + "train_speed(iter/s)": 0.990152 + }, + { + "epoch": 1.32544, + "grad_norm": 0.6337517697362265, + "learning_rate": 3.1792164574684565e-06, + "loss": 0.35266244411468506, + "memory(GiB)": 77.0, + "step": 4142, + "token_acc": 0.8964156527458073, + "train_speed(iter/s)": 0.989423 + }, + { + "epoch": 1.32576, + "grad_norm": 0.6386259007890637, + "learning_rate": 3.1783677109512484e-06, + "loss": 0.3639824092388153, + "memory(GiB)": 77.0, + "step": 4143, + "token_acc": 0.9128722618754614, + "train_speed(iter/s)": 0.988633 + }, + { + "epoch": 1.32608, + "grad_norm": 0.6768895154488447, + "learning_rate": 3.177518880022946e-06, + "loss": 0.37624239921569824, + "memory(GiB)": 77.0, + "step": 4144, + "token_acc": 0.8799026237489856, + "train_speed(iter/s)": 0.987936 + }, + { + "epoch": 1.3264, + "grad_norm": 0.5886376616252531, + "learning_rate": 3.176669964789173e-06, + "loss": 0.2763094902038574, + "memory(GiB)": 77.0, + "step": 4145, + "token_acc": 0.907223113964687, + "train_speed(iter/s)": 0.987243 + }, + { + "epoch": 1.32672, + "grad_norm": 0.6555352532562913, + "learning_rate": 3.1758209653555606e-06, + "loss": 0.31161630153656006, + "memory(GiB)": 77.0, + "step": 4146, + "token_acc": 0.9076445479239426, + "train_speed(iter/s)": 0.986524 + }, + { + "epoch": 1.32704, + "grad_norm": 0.6661468073635444, + "learning_rate": 3.1749718818277524e-06, + "loss": 0.2926533818244934, + "memory(GiB)": 77.0, + "step": 4147, + "token_acc": 0.9392998306041784, + "train_speed(iter/s)": 0.985827 + }, + { + "epoch": 1.32736, + "grad_norm": 0.6826372987135079, + "learning_rate": 3.1741227143114035e-06, + "loss": 0.3379167914390564, + "memory(GiB)": 77.0, + "step": 4148, + "token_acc": 0.8971046770601336, + "train_speed(iter/s)": 0.985112 + }, + { + "epoch": 1.32768, + "grad_norm": 0.6459279479048643, + "learning_rate": 3.1732734629121764e-06, + "loss": 0.30784136056900024, + "memory(GiB)": 77.0, + "step": 4149, + "token_acc": 0.943596268023749, + "train_speed(iter/s)": 0.984449 + }, + { + "epoch": 1.328, + "grad_norm": 0.6719067134836549, + "learning_rate": 3.1724241277357475e-06, + "loss": 0.3254055380821228, + "memory(GiB)": 77.0, + "step": 4150, + "token_acc": 0.8896224468743553, + "train_speed(iter/s)": 0.983743 + }, + { + "epoch": 1.32832, + "grad_norm": 0.6434002461780784, + "learning_rate": 3.1715747088878002e-06, + "loss": 0.3184865117073059, + "memory(GiB)": 77.0, + "step": 4151, + "token_acc": 0.9067008656620712, + "train_speed(iter/s)": 0.983053 + }, + { + "epoch": 1.32864, + "grad_norm": 0.6281681827207678, + "learning_rate": 3.1707252064740303e-06, + "loss": 0.3123142719268799, + "memory(GiB)": 77.0, + "step": 4152, + "token_acc": 0.9194324194324194, + "train_speed(iter/s)": 0.982312 + }, + { + "epoch": 1.32896, + "grad_norm": 0.6313444113106754, + "learning_rate": 3.1698756206001436e-06, + "loss": 0.3014337420463562, + "memory(GiB)": 77.0, + "step": 4153, + "token_acc": 0.8530280649926145, + "train_speed(iter/s)": 0.981624 + }, + { + "epoch": 1.32928, + "grad_norm": 0.6756694816984556, + "learning_rate": 3.1690259513718562e-06, + "loss": 0.3676915764808655, + "memory(GiB)": 77.0, + "step": 4154, + "token_acc": 0.8907161803713528, + "train_speed(iter/s)": 0.980877 + }, + { + "epoch": 1.3296000000000001, + "grad_norm": 0.6237230905205587, + "learning_rate": 3.168176198894895e-06, + "loss": 0.29962313175201416, + "memory(GiB)": 77.0, + "step": 4155, + "token_acc": 0.9270664505672609, + "train_speed(iter/s)": 0.980187 + }, + { + "epoch": 1.32992, + "grad_norm": 0.6696094838433666, + "learning_rate": 3.1673263632749975e-06, + "loss": 0.32145601511001587, + "memory(GiB)": 77.0, + "step": 4156, + "token_acc": 0.9133574007220217, + "train_speed(iter/s)": 0.979458 + }, + { + "epoch": 1.3302399999999999, + "grad_norm": 0.6838018347002803, + "learning_rate": 3.1664764446179107e-06, + "loss": 0.3031931519508362, + "memory(GiB)": 77.0, + "step": 4157, + "token_acc": 0.9670195439739414, + "train_speed(iter/s)": 0.978747 + }, + { + "epoch": 1.33056, + "grad_norm": 0.6107864195008362, + "learning_rate": 3.165626443029392e-06, + "loss": 0.3554525673389435, + "memory(GiB)": 77.0, + "step": 4158, + "token_acc": 0.8712977921378567, + "train_speed(iter/s)": 0.978038 + }, + { + "epoch": 1.33088, + "grad_norm": 0.6051795610833969, + "learning_rate": 3.164776358615209e-06, + "loss": 0.2876499593257904, + "memory(GiB)": 77.0, + "step": 4159, + "token_acc": 0.9327956989247311, + "train_speed(iter/s)": 0.977291 + }, + { + "epoch": 1.3312, + "grad_norm": 0.6402650170645123, + "learning_rate": 3.1639261914811403e-06, + "loss": 0.36051762104034424, + "memory(GiB)": 77.0, + "step": 4160, + "token_acc": 0.897381079636558, + "train_speed(iter/s)": 0.976511 + }, + { + "epoch": 1.33152, + "grad_norm": 0.6605504898296297, + "learning_rate": 3.163075941732975e-06, + "loss": 0.3611540198326111, + "memory(GiB)": 77.0, + "step": 4161, + "token_acc": 0.8884043099025141, + "train_speed(iter/s)": 0.975794 + }, + { + "epoch": 1.3318400000000001, + "grad_norm": 0.5933331644515486, + "learning_rate": 3.1622256094765107e-06, + "loss": 0.2560809850692749, + "memory(GiB)": 77.0, + "step": 4162, + "token_acc": 0.9139188794864312, + "train_speed(iter/s)": 0.975098 + }, + { + "epoch": 1.33216, + "grad_norm": 0.7421005103395163, + "learning_rate": 3.161375194817557e-06, + "loss": 0.3274901509284973, + "memory(GiB)": 77.0, + "step": 4163, + "token_acc": 0.9056681836988002, + "train_speed(iter/s)": 0.974427 + }, + { + "epoch": 1.3324799999999999, + "grad_norm": 0.668395853628466, + "learning_rate": 3.1605246978619337e-06, + "loss": 0.36640435457229614, + "memory(GiB)": 77.0, + "step": 4164, + "token_acc": 0.9459611595834506, + "train_speed(iter/s)": 0.973752 + }, + { + "epoch": 1.3328, + "grad_norm": 0.6139949769055448, + "learning_rate": 3.1596741187154713e-06, + "loss": 0.2934274673461914, + "memory(GiB)": 77.0, + "step": 4165, + "token_acc": 0.8827027027027027, + "train_speed(iter/s)": 0.973024 + }, + { + "epoch": 1.33312, + "grad_norm": 0.5613441395897792, + "learning_rate": 3.158823457484007e-06, + "loss": 0.24867485463619232, + "memory(GiB)": 77.0, + "step": 4166, + "token_acc": 0.9382361390405056, + "train_speed(iter/s)": 0.972273 + }, + { + "epoch": 1.33344, + "grad_norm": 0.9286574979458156, + "learning_rate": 3.157972714273393e-06, + "loss": 0.5208149552345276, + "memory(GiB)": 77.0, + "step": 4167, + "token_acc": 0.8702830188679245, + "train_speed(iter/s)": 0.971484 + }, + { + "epoch": 1.33376, + "grad_norm": 0.6499942702203186, + "learning_rate": 3.1571218891894884e-06, + "loss": 0.32234182953834534, + "memory(GiB)": 77.0, + "step": 4168, + "token_acc": 0.947429906542056, + "train_speed(iter/s)": 0.970797 + }, + { + "epoch": 1.33408, + "grad_norm": 0.6631465625283058, + "learning_rate": 3.1562709823381645e-06, + "loss": 0.3551798462867737, + "memory(GiB)": 77.0, + "step": 4169, + "token_acc": 0.8764970059880239, + "train_speed(iter/s)": 0.970061 + }, + { + "epoch": 1.3344, + "grad_norm": 0.6301159114218751, + "learning_rate": 3.155419993825301e-06, + "loss": 0.3365669250488281, + "memory(GiB)": 77.0, + "step": 4170, + "token_acc": 0.9478743068391867, + "train_speed(iter/s)": 0.969309 + }, + { + "epoch": 1.33472, + "grad_norm": 0.653723633994968, + "learning_rate": 3.1545689237567895e-06, + "loss": 0.4524996280670166, + "memory(GiB)": 77.0, + "step": 4171, + "token_acc": 0.8624090162009862, + "train_speed(iter/s)": 0.968595 + }, + { + "epoch": 1.33504, + "grad_norm": 0.6153677498748135, + "learning_rate": 3.1537177722385305e-06, + "loss": 0.38418805599212646, + "memory(GiB)": 77.0, + "step": 4172, + "token_acc": 0.8408066429418742, + "train_speed(iter/s)": 0.967825 + }, + { + "epoch": 1.33536, + "grad_norm": 0.6300870165611767, + "learning_rate": 3.1528665393764348e-06, + "loss": 0.3438742756843567, + "memory(GiB)": 77.0, + "step": 4173, + "token_acc": 0.96875, + "train_speed(iter/s)": 0.967094 + }, + { + "epoch": 1.33568, + "grad_norm": 0.5910440586368295, + "learning_rate": 3.1520152252764234e-06, + "loss": 0.28216353058815, + "memory(GiB)": 77.0, + "step": 4174, + "token_acc": 0.8587417985333848, + "train_speed(iter/s)": 0.966373 + }, + { + "epoch": 1.336, + "grad_norm": 0.7397081345090017, + "learning_rate": 3.1511638300444293e-06, + "loss": 0.2913859486579895, + "memory(GiB)": 77.0, + "step": 4175, + "token_acc": 0.8808850012863391, + "train_speed(iter/s)": 0.965708 + }, + { + "epoch": 1.33632, + "grad_norm": 0.6081147001446451, + "learning_rate": 3.1503123537863913e-06, + "loss": 0.2793465256690979, + "memory(GiB)": 77.0, + "step": 4176, + "token_acc": 0.9153135148638522, + "train_speed(iter/s)": 0.964988 + }, + { + "epoch": 1.33664, + "grad_norm": 0.6763635236423213, + "learning_rate": 3.1494607966082626e-06, + "loss": 0.2905661463737488, + "memory(GiB)": 77.0, + "step": 4177, + "token_acc": 0.9257986946066644, + "train_speed(iter/s)": 0.964327 + }, + { + "epoch": 1.33696, + "grad_norm": 0.6185744241319521, + "learning_rate": 3.148609158616004e-06, + "loss": 0.2984127998352051, + "memory(GiB)": 77.0, + "step": 4178, + "token_acc": 0.9105979869745412, + "train_speed(iter/s)": 0.963621 + }, + { + "epoch": 1.33728, + "grad_norm": 0.6131213558556171, + "learning_rate": 3.1477574399155876e-06, + "loss": 0.36760473251342773, + "memory(GiB)": 77.0, + "step": 4179, + "token_acc": 0.9308312655086849, + "train_speed(iter/s)": 0.962924 + }, + { + "epoch": 1.3376000000000001, + "grad_norm": 0.6891880475682963, + "learning_rate": 3.146905640612995e-06, + "loss": 0.451324462890625, + "memory(GiB)": 77.0, + "step": 4180, + "token_acc": 0.8076281287246723, + "train_speed(iter/s)": 0.962242 + }, + { + "epoch": 1.33792, + "grad_norm": 0.6770019076492846, + "learning_rate": 3.146053760814217e-06, + "loss": 0.3481837511062622, + "memory(GiB)": 77.0, + "step": 4181, + "token_acc": 0.871387602029561, + "train_speed(iter/s)": 0.961549 + }, + { + "epoch": 1.3382399999999999, + "grad_norm": 0.7406320347019838, + "learning_rate": 3.145201800625256e-06, + "loss": 0.3817022740840912, + "memory(GiB)": 77.0, + "step": 4182, + "token_acc": 0.8188405797101449, + "train_speed(iter/s)": 0.960845 + }, + { + "epoch": 1.33856, + "grad_norm": 0.634328293882498, + "learning_rate": 3.1443497601521235e-06, + "loss": 0.3312810957431793, + "memory(GiB)": 77.0, + "step": 4183, + "token_acc": 0.9083384426732066, + "train_speed(iter/s)": 0.960131 + }, + { + "epoch": 1.33888, + "grad_norm": 0.676161008254451, + "learning_rate": 3.143497639500841e-06, + "loss": 0.33317720890045166, + "memory(GiB)": 77.0, + "step": 4184, + "token_acc": 0.8576370409792443, + "train_speed(iter/s)": 0.959439 + }, + { + "epoch": 1.3392, + "grad_norm": 0.5999000826363875, + "learning_rate": 3.1426454387774404e-06, + "loss": 0.302845299243927, + "memory(GiB)": 77.0, + "step": 4185, + "token_acc": 0.932632398753894, + "train_speed(iter/s)": 0.958794 + }, + { + "epoch": 1.33952, + "grad_norm": 0.6999003572171836, + "learning_rate": 3.1417931580879628e-06, + "loss": 0.4016847014427185, + "memory(GiB)": 77.0, + "step": 4186, + "token_acc": 0.8086620577470517, + "train_speed(iter/s)": 0.958148 + }, + { + "epoch": 1.33984, + "grad_norm": 0.6358080451020738, + "learning_rate": 3.140940797538461e-06, + "loss": 0.312509149312973, + "memory(GiB)": 77.0, + "step": 4187, + "token_acc": 0.9370919881305638, + "train_speed(iter/s)": 0.957518 + }, + { + "epoch": 1.34016, + "grad_norm": 0.6509271738281891, + "learning_rate": 3.140088357234995e-06, + "loss": 0.4469972550868988, + "memory(GiB)": 77.0, + "step": 4188, + "token_acc": 0.8918522860492379, + "train_speed(iter/s)": 0.956824 + }, + { + "epoch": 1.34048, + "grad_norm": 0.7024569575230404, + "learning_rate": 3.139235837283637e-06, + "loss": 0.3535940647125244, + "memory(GiB)": 77.0, + "step": 4189, + "token_acc": 0.920929018789144, + "train_speed(iter/s)": 0.956135 + }, + { + "epoch": 1.3408, + "grad_norm": 0.6238345177324265, + "learning_rate": 3.1383832377904676e-06, + "loss": 0.30303090810775757, + "memory(GiB)": 77.0, + "step": 4190, + "token_acc": 0.8590361445783132, + "train_speed(iter/s)": 0.955486 + }, + { + "epoch": 1.34112, + "grad_norm": 0.7199645446558495, + "learning_rate": 3.137530558861579e-06, + "loss": 0.39534991979599, + "memory(GiB)": 77.0, + "step": 4191, + "token_acc": 0.9122865751590225, + "train_speed(iter/s)": 0.954817 + }, + { + "epoch": 1.34144, + "grad_norm": 0.6752119742597875, + "learning_rate": 3.1366778006030717e-06, + "loss": 0.3539559841156006, + "memory(GiB)": 77.0, + "step": 4192, + "token_acc": 0.8922173274596182, + "train_speed(iter/s)": 0.954183 + }, + { + "epoch": 1.34176, + "grad_norm": 0.6145249006207342, + "learning_rate": 3.1358249631210572e-06, + "loss": 0.36910590529441833, + "memory(GiB)": 77.0, + "step": 4193, + "token_acc": 0.8714020427112349, + "train_speed(iter/s)": 0.953521 + }, + { + "epoch": 1.34208, + "grad_norm": 0.7340702411040358, + "learning_rate": 3.1349720465216563e-06, + "loss": 0.3418895900249481, + "memory(GiB)": 77.0, + "step": 4194, + "token_acc": 0.876418152350081, + "train_speed(iter/s)": 0.952893 + }, + { + "epoch": 1.3424, + "grad_norm": 0.6671311597431018, + "learning_rate": 3.134119050910999e-06, + "loss": 0.48809564113616943, + "memory(GiB)": 77.0, + "step": 4195, + "token_acc": 0.888423645320197, + "train_speed(iter/s)": 0.952246 + }, + { + "epoch": 1.34272, + "grad_norm": 0.7416963004881976, + "learning_rate": 3.1332659763952255e-06, + "loss": 0.37740814685821533, + "memory(GiB)": 77.0, + "step": 4196, + "token_acc": 0.8547097156398105, + "train_speed(iter/s)": 0.951538 + }, + { + "epoch": 1.34304, + "grad_norm": 0.6170715726510967, + "learning_rate": 3.1324128230804867e-06, + "loss": 0.33540570735931396, + "memory(GiB)": 77.0, + "step": 4197, + "token_acc": 0.9388011035866566, + "train_speed(iter/s)": 0.950889 + }, + { + "epoch": 1.34336, + "grad_norm": 0.6839104236253873, + "learning_rate": 3.1315595910729434e-06, + "loss": 0.30434277653694153, + "memory(GiB)": 77.0, + "step": 4198, + "token_acc": 0.9162238199437324, + "train_speed(iter/s)": 0.950218 + }, + { + "epoch": 1.34368, + "grad_norm": 0.6525427500076673, + "learning_rate": 3.1307062804787645e-06, + "loss": 0.32010385394096375, + "memory(GiB)": 77.0, + "step": 4199, + "token_acc": 0.9207275223061084, + "train_speed(iter/s)": 0.949588 + }, + { + "epoch": 1.3439999999999999, + "grad_norm": 0.6899966001649613, + "learning_rate": 3.129852891404131e-06, + "loss": 0.35120469331741333, + "memory(GiB)": 77.0, + "step": 4200, + "token_acc": 0.900792283844299, + "train_speed(iter/s)": 0.94893 + }, + { + "epoch": 1.34432, + "grad_norm": 0.6701528056226027, + "learning_rate": 3.1289994239552314e-06, + "loss": 0.3437928855419159, + "memory(GiB)": 77.0, + "step": 4201, + "token_acc": 0.9122617763394463, + "train_speed(iter/s)": 0.948317 + }, + { + "epoch": 1.34464, + "grad_norm": 0.6372605508233894, + "learning_rate": 3.1281458782382644e-06, + "loss": 0.3543868660926819, + "memory(GiB)": 77.0, + "step": 4202, + "token_acc": 0.9347890233107111, + "train_speed(iter/s)": 0.947686 + }, + { + "epoch": 1.34496, + "grad_norm": 0.6510047228924701, + "learning_rate": 3.1272922543594386e-06, + "loss": 0.30395960807800293, + "memory(GiB)": 77.0, + "step": 4203, + "token_acc": 0.9074137341988384, + "train_speed(iter/s)": 0.947017 + }, + { + "epoch": 1.34528, + "grad_norm": 0.7027061935379041, + "learning_rate": 3.126438552424975e-06, + "loss": 0.4022306799888611, + "memory(GiB)": 77.0, + "step": 4204, + "token_acc": 0.8743611584327087, + "train_speed(iter/s)": 0.946352 + }, + { + "epoch": 1.3456000000000001, + "grad_norm": 0.7151598245163909, + "learning_rate": 3.1255847725411003e-06, + "loss": 0.32293009757995605, + "memory(GiB)": 77.0, + "step": 4205, + "token_acc": 0.907427341227126, + "train_speed(iter/s)": 0.945678 + }, + { + "epoch": 1.34592, + "grad_norm": 0.7456896955102528, + "learning_rate": 3.1247309148140525e-06, + "loss": 0.40462085604667664, + "memory(GiB)": 77.0, + "step": 4206, + "token_acc": 0.8956544366380432, + "train_speed(iter/s)": 0.945026 + }, + { + "epoch": 1.3462399999999999, + "grad_norm": 0.6493716751009992, + "learning_rate": 3.1238769793500807e-06, + "loss": 0.353664755821228, + "memory(GiB)": 77.0, + "step": 4207, + "token_acc": 0.9052029419776627, + "train_speed(iter/s)": 0.944351 + }, + { + "epoch": 1.34656, + "grad_norm": 0.8410996227108951, + "learning_rate": 3.123022966255441e-06, + "loss": 0.3470644950866699, + "memory(GiB)": 77.0, + "step": 4208, + "token_acc": 0.872856298048492, + "train_speed(iter/s)": 0.943691 + }, + { + "epoch": 1.34688, + "grad_norm": 0.7007836109432423, + "learning_rate": 3.1221688756364015e-06, + "loss": 0.31863099336624146, + "memory(GiB)": 77.0, + "step": 4209, + "token_acc": 0.8766504517025713, + "train_speed(iter/s)": 0.943046 + }, + { + "epoch": 1.3472, + "grad_norm": 0.7181016129196065, + "learning_rate": 3.121314707599238e-06, + "loss": 0.31037503480911255, + "memory(GiB)": 77.0, + "step": 4210, + "token_acc": 0.8402976531196337, + "train_speed(iter/s)": 0.942414 + }, + { + "epoch": 1.34752, + "grad_norm": 0.6629964901417426, + "learning_rate": 3.1204604622502368e-06, + "loss": 0.2808183431625366, + "memory(GiB)": 77.0, + "step": 4211, + "token_acc": 0.9040892193308551, + "train_speed(iter/s)": 0.941792 + }, + { + "epoch": 1.34784, + "grad_norm": 0.642208804833117, + "learning_rate": 3.1196061396956945e-06, + "loss": 0.37256109714508057, + "memory(GiB)": 77.0, + "step": 4212, + "token_acc": 0.9406074675952796, + "train_speed(iter/s)": 0.941129 + }, + { + "epoch": 1.34816, + "grad_norm": 0.7310758933832531, + "learning_rate": 3.118751740041917e-06, + "loss": 0.3346406817436218, + "memory(GiB)": 77.0, + "step": 4213, + "token_acc": 0.8952156334231806, + "train_speed(iter/s)": 0.94051 + }, + { + "epoch": 1.34848, + "grad_norm": 0.7284850198712585, + "learning_rate": 3.1178972633952194e-06, + "loss": 0.3641858398914337, + "memory(GiB)": 77.0, + "step": 4214, + "token_acc": 0.926052332195677, + "train_speed(iter/s)": 0.939889 + }, + { + "epoch": 1.3488, + "grad_norm": 0.6799170849419699, + "learning_rate": 3.1170427098619265e-06, + "loss": 0.418149471282959, + "memory(GiB)": 77.0, + "step": 4215, + "token_acc": 0.8973250472845177, + "train_speed(iter/s)": 0.939237 + }, + { + "epoch": 1.34912, + "grad_norm": 0.5814496486194477, + "learning_rate": 3.1161880795483725e-06, + "loss": 0.3119824528694153, + "memory(GiB)": 77.0, + "step": 4216, + "token_acc": 0.8689482470784641, + "train_speed(iter/s)": 0.9386 + }, + { + "epoch": 1.34944, + "grad_norm": 0.6455118554935563, + "learning_rate": 3.115333372560901e-06, + "loss": 0.370633065700531, + "memory(GiB)": 77.0, + "step": 4217, + "token_acc": 0.7672395040243637, + "train_speed(iter/s)": 0.937965 + }, + { + "epoch": 1.34976, + "grad_norm": 0.6409302601711867, + "learning_rate": 3.1144785890058654e-06, + "loss": 0.3903851807117462, + "memory(GiB)": 77.0, + "step": 4218, + "token_acc": 0.8632451386675167, + "train_speed(iter/s)": 0.937262 + }, + { + "epoch": 1.35008, + "grad_norm": 0.7649211926693142, + "learning_rate": 3.1136237289896295e-06, + "loss": 0.4056600332260132, + "memory(GiB)": 77.0, + "step": 4219, + "token_acc": 0.8326890034364262, + "train_speed(iter/s)": 0.936647 + }, + { + "epoch": 1.3504, + "grad_norm": 0.6467277223783852, + "learning_rate": 3.112768792618564e-06, + "loss": 0.3557656407356262, + "memory(GiB)": 77.0, + "step": 4220, + "token_acc": 0.9418075422626788, + "train_speed(iter/s)": 0.936008 + }, + { + "epoch": 1.35072, + "grad_norm": 0.6570253153122264, + "learning_rate": 3.1119137799990533e-06, + "loss": 0.3686745762825012, + "memory(GiB)": 77.0, + "step": 4221, + "token_acc": 0.8449673719596599, + "train_speed(iter/s)": 0.935375 + }, + { + "epoch": 1.35104, + "grad_norm": 0.736955505734487, + "learning_rate": 3.1110586912374885e-06, + "loss": 0.39653921127319336, + "memory(GiB)": 77.0, + "step": 4222, + "token_acc": 0.8583133493205436, + "train_speed(iter/s)": 0.934765 + }, + { + "epoch": 1.3513600000000001, + "grad_norm": 0.5983185727851538, + "learning_rate": 3.1102035264402693e-06, + "loss": 0.3046887516975403, + "memory(GiB)": 77.0, + "step": 4223, + "token_acc": 0.8558888076079005, + "train_speed(iter/s)": 0.934159 + }, + { + "epoch": 1.35168, + "grad_norm": 0.6667891484386728, + "learning_rate": 3.1093482857138063e-06, + "loss": 0.26876816153526306, + "memory(GiB)": 77.0, + "step": 4224, + "token_acc": 0.9009717723276262, + "train_speed(iter/s)": 0.933577 + }, + { + "epoch": 1.3519999999999999, + "grad_norm": 0.6399834075114266, + "learning_rate": 3.10849296916452e-06, + "loss": 0.28524988889694214, + "memory(GiB)": 77.0, + "step": 4225, + "token_acc": 0.9087528604118993, + "train_speed(iter/s)": 0.932962 + }, + { + "epoch": 1.35232, + "grad_norm": 0.6647984930172223, + "learning_rate": 3.1076375768988392e-06, + "loss": 0.36244451999664307, + "memory(GiB)": 77.0, + "step": 4226, + "token_acc": 0.8950966525223951, + "train_speed(iter/s)": 0.932328 + }, + { + "epoch": 1.35264, + "grad_norm": 0.7294080598178788, + "learning_rate": 3.106782109023203e-06, + "loss": 0.37811267375946045, + "memory(GiB)": 77.0, + "step": 4227, + "token_acc": 0.891494002181025, + "train_speed(iter/s)": 0.931706 + }, + { + "epoch": 1.35296, + "grad_norm": 0.6705874506848172, + "learning_rate": 3.105926565644059e-06, + "loss": 0.3383314609527588, + "memory(GiB)": 77.0, + "step": 4228, + "token_acc": 0.9419680403700589, + "train_speed(iter/s)": 0.931104 + }, + { + "epoch": 1.35328, + "grad_norm": 0.6524143851269577, + "learning_rate": 3.1050709468678666e-06, + "loss": 0.33524882793426514, + "memory(GiB)": 77.0, + "step": 4229, + "token_acc": 0.8543386125362481, + "train_speed(iter/s)": 0.930475 + }, + { + "epoch": 1.3536000000000001, + "grad_norm": 0.6034036833471544, + "learning_rate": 3.10421525280109e-06, + "loss": 0.2958407700061798, + "memory(GiB)": 77.0, + "step": 4230, + "token_acc": 0.9454368301396557, + "train_speed(iter/s)": 0.92989 + }, + { + "epoch": 1.35392, + "grad_norm": 0.6167091612534484, + "learning_rate": 3.1033594835502067e-06, + "loss": 0.30819523334503174, + "memory(GiB)": 77.0, + "step": 4231, + "token_acc": 0.9251115360101976, + "train_speed(iter/s)": 0.9292 + }, + { + "epoch": 1.3542399999999999, + "grad_norm": 0.7324992647801895, + "learning_rate": 3.102503639221703e-06, + "loss": 0.46198031306266785, + "memory(GiB)": 77.0, + "step": 4232, + "token_acc": 0.8670309653916212, + "train_speed(iter/s)": 0.92857 + }, + { + "epoch": 1.35456, + "grad_norm": 0.6803788657550525, + "learning_rate": 3.101647719922073e-06, + "loss": 0.30704668164253235, + "memory(GiB)": 77.0, + "step": 4233, + "token_acc": 0.8753219989696033, + "train_speed(iter/s)": 0.927973 + }, + { + "epoch": 1.35488, + "grad_norm": 0.6076146998673742, + "learning_rate": 3.1007917257578213e-06, + "loss": 0.3218598961830139, + "memory(GiB)": 77.0, + "step": 4234, + "token_acc": 0.9405684754521964, + "train_speed(iter/s)": 0.927367 + }, + { + "epoch": 1.3552, + "grad_norm": 0.6031044447717328, + "learning_rate": 3.0999356568354615e-06, + "loss": 0.3273369371891022, + "memory(GiB)": 77.0, + "step": 4235, + "token_acc": 0.8607108549471661, + "train_speed(iter/s)": 0.926752 + }, + { + "epoch": 1.35552, + "grad_norm": 0.6194750584603074, + "learning_rate": 3.099079513261517e-06, + "loss": 0.3300195038318634, + "memory(GiB)": 77.0, + "step": 4236, + "token_acc": 0.878316032295271, + "train_speed(iter/s)": 0.926133 + }, + { + "epoch": 1.35584, + "grad_norm": 0.7204882257522515, + "learning_rate": 3.098223295142519e-06, + "loss": 0.295734167098999, + "memory(GiB)": 77.0, + "step": 4237, + "token_acc": 0.9126577959740703, + "train_speed(iter/s)": 0.925565 + }, + { + "epoch": 1.35616, + "grad_norm": 0.6821603603525515, + "learning_rate": 3.0973670025850094e-06, + "loss": 0.34303271770477295, + "memory(GiB)": 77.0, + "step": 4238, + "token_acc": 0.9592795614722005, + "train_speed(iter/s)": 0.924991 + }, + { + "epoch": 1.35648, + "grad_norm": 0.6666421723381926, + "learning_rate": 3.09651063569554e-06, + "loss": 0.376889705657959, + "memory(GiB)": 77.0, + "step": 4239, + "token_acc": 0.8174077578051088, + "train_speed(iter/s)": 0.924396 + }, + { + "epoch": 1.3568, + "grad_norm": 0.6403805830945855, + "learning_rate": 3.095654194580669e-06, + "loss": 0.357494592666626, + "memory(GiB)": 77.0, + "step": 4240, + "token_acc": 0.923152131403989, + "train_speed(iter/s)": 0.923795 + }, + { + "epoch": 1.35712, + "grad_norm": 0.6686448771502953, + "learning_rate": 3.094797679346967e-06, + "loss": 0.339852511882782, + "memory(GiB)": 77.0, + "step": 4241, + "token_acc": 0.9180094786729858, + "train_speed(iter/s)": 0.923223 + }, + { + "epoch": 1.35744, + "grad_norm": 0.6475701240153825, + "learning_rate": 3.0939410901010116e-06, + "loss": 0.3335336446762085, + "memory(GiB)": 77.0, + "step": 4242, + "token_acc": 0.915958873491283, + "train_speed(iter/s)": 0.922648 + }, + { + "epoch": 1.35776, + "grad_norm": 0.6042453354740668, + "learning_rate": 3.093084426949391e-06, + "loss": 0.3697930872440338, + "memory(GiB)": 77.0, + "step": 4243, + "token_acc": 0.8852492789451998, + "train_speed(iter/s)": 0.922003 + }, + { + "epoch": 1.35808, + "grad_norm": 0.6820231479204787, + "learning_rate": 3.092227689998702e-06, + "loss": 0.37544190883636475, + "memory(GiB)": 77.0, + "step": 4244, + "token_acc": 0.8441988950276244, + "train_speed(iter/s)": 0.921385 + }, + { + "epoch": 1.3584, + "grad_norm": 0.5967279143172654, + "learning_rate": 3.0913708793555506e-06, + "loss": 0.31241148710250854, + "memory(GiB)": 77.0, + "step": 4245, + "token_acc": 0.943289224952741, + "train_speed(iter/s)": 0.920722 + }, + { + "epoch": 1.35872, + "grad_norm": 0.7247827607602102, + "learning_rate": 3.090513995126552e-06, + "loss": 0.3595418334007263, + "memory(GiB)": 77.0, + "step": 4246, + "token_acc": 0.9288692666176109, + "train_speed(iter/s)": 0.920156 + }, + { + "epoch": 1.35904, + "grad_norm": 0.6859314493201345, + "learning_rate": 3.089657037418331e-06, + "loss": 0.3020663261413574, + "memory(GiB)": 77.0, + "step": 4247, + "token_acc": 0.9208882720333103, + "train_speed(iter/s)": 0.919513 + }, + { + "epoch": 1.3593600000000001, + "grad_norm": 0.570421871116229, + "learning_rate": 3.08880000633752e-06, + "loss": 0.3131709098815918, + "memory(GiB)": 77.0, + "step": 4248, + "token_acc": 0.9137001078748651, + "train_speed(iter/s)": 0.918861 + }, + { + "epoch": 1.35968, + "grad_norm": 0.6662778155881166, + "learning_rate": 3.087942901990763e-06, + "loss": 0.3293920159339905, + "memory(GiB)": 77.0, + "step": 4249, + "token_acc": 0.8838662256523337, + "train_speed(iter/s)": 0.918248 + }, + { + "epoch": 1.3599999999999999, + "grad_norm": 0.6171343671465115, + "learning_rate": 3.0870857244847107e-06, + "loss": 0.2777538299560547, + "memory(GiB)": 77.0, + "step": 4250, + "token_acc": 0.9039357845675816, + "train_speed(iter/s)": 0.917654 + }, + { + "epoch": 1.36032, + "grad_norm": 0.6014594657806884, + "learning_rate": 3.0862284739260247e-06, + "loss": 0.3285944163799286, + "memory(GiB)": 77.0, + "step": 4251, + "token_acc": 0.80175983436853, + "train_speed(iter/s)": 0.917013 + }, + { + "epoch": 1.36064, + "grad_norm": 0.6507496650302231, + "learning_rate": 3.0853711504213746e-06, + "loss": 0.2936677932739258, + "memory(GiB)": 77.0, + "step": 4252, + "token_acc": 0.9049304081151215, + "train_speed(iter/s)": 0.916444 + }, + { + "epoch": 1.36096, + "grad_norm": 0.5952239781650991, + "learning_rate": 3.0845137540774396e-06, + "loss": 0.3640817403793335, + "memory(GiB)": 77.0, + "step": 4253, + "token_acc": 0.8874862788144896, + "train_speed(iter/s)": 0.915803 + }, + { + "epoch": 1.36128, + "grad_norm": 0.6690939081948358, + "learning_rate": 3.083656285000907e-06, + "loss": 0.331296443939209, + "memory(GiB)": 77.0, + "step": 4254, + "token_acc": 0.9272103658536586, + "train_speed(iter/s)": 0.915226 + }, + { + "epoch": 1.3616, + "grad_norm": 0.6744612524197843, + "learning_rate": 3.082798743298475e-06, + "loss": 0.31121331453323364, + "memory(GiB)": 77.0, + "step": 4255, + "token_acc": 0.9595925297113752, + "train_speed(iter/s)": 0.914659 + }, + { + "epoch": 1.36192, + "grad_norm": 0.6305968997257078, + "learning_rate": 3.08194112907685e-06, + "loss": 0.3135267198085785, + "memory(GiB)": 77.0, + "step": 4256, + "token_acc": 0.9402634054562559, + "train_speed(iter/s)": 0.914091 + }, + { + "epoch": 1.36224, + "grad_norm": 0.655673510540433, + "learning_rate": 3.081083442442746e-06, + "loss": 0.26776963472366333, + "memory(GiB)": 77.0, + "step": 4257, + "token_acc": 0.8982089552238806, + "train_speed(iter/s)": 0.91349 + }, + { + "epoch": 1.36256, + "grad_norm": 0.6596294600704888, + "learning_rate": 3.080225683502889e-06, + "loss": 0.34576094150543213, + "memory(GiB)": 77.0, + "step": 4258, + "token_acc": 0.8703703703703703, + "train_speed(iter/s)": 0.912905 + }, + { + "epoch": 1.36288, + "grad_norm": 0.6157430624659842, + "learning_rate": 3.0793678523640103e-06, + "loss": 0.36320945620536804, + "memory(GiB)": 77.0, + "step": 4259, + "token_acc": 0.9249724163295329, + "train_speed(iter/s)": 0.912272 + }, + { + "epoch": 1.3632, + "grad_norm": 0.6368727091240072, + "learning_rate": 3.0785099491328523e-06, + "loss": 0.32216769456863403, + "memory(GiB)": 77.0, + "step": 4260, + "token_acc": 0.8378854625550661, + "train_speed(iter/s)": 0.911679 + }, + { + "epoch": 1.36352, + "grad_norm": 0.6403483148128805, + "learning_rate": 3.0776519739161677e-06, + "loss": 0.3421768546104431, + "memory(GiB)": 77.0, + "step": 4261, + "token_acc": 0.8834005376344086, + "train_speed(iter/s)": 0.911081 + }, + { + "epoch": 1.36384, + "grad_norm": 0.6726616078820745, + "learning_rate": 3.076793926820715e-06, + "loss": 0.27603158354759216, + "memory(GiB)": 77.0, + "step": 4262, + "token_acc": 0.872941907424666, + "train_speed(iter/s)": 0.910525 + }, + { + "epoch": 1.36416, + "grad_norm": 0.6838007794377523, + "learning_rate": 3.075935807953264e-06, + "loss": 0.28973501920700073, + "memory(GiB)": 77.0, + "step": 4263, + "token_acc": 0.9230535279805353, + "train_speed(iter/s)": 0.909936 + }, + { + "epoch": 1.36448, + "grad_norm": 0.6248534384846347, + "learning_rate": 3.0750776174205934e-06, + "loss": 0.2818823456764221, + "memory(GiB)": 77.0, + "step": 4264, + "token_acc": 0.9448773448773449, + "train_speed(iter/s)": 0.909378 + }, + { + "epoch": 1.3648, + "grad_norm": 0.6935513775442861, + "learning_rate": 3.0742193553294896e-06, + "loss": 0.38931018114089966, + "memory(GiB)": 77.0, + "step": 4265, + "token_acc": 0.8699042102918244, + "train_speed(iter/s)": 0.908818 + }, + { + "epoch": 1.3651200000000001, + "grad_norm": 0.6819610804001038, + "learning_rate": 3.0733610217867475e-06, + "loss": 0.3695595860481262, + "memory(GiB)": 77.0, + "step": 4266, + "token_acc": 0.8862385321100917, + "train_speed(iter/s)": 0.908253 + }, + { + "epoch": 1.36544, + "grad_norm": 0.7990138497446485, + "learning_rate": 3.072502616899173e-06, + "loss": 0.3939957320690155, + "memory(GiB)": 77.0, + "step": 4267, + "token_acc": 0.875195822454308, + "train_speed(iter/s)": 0.90766 + }, + { + "epoch": 1.3657599999999999, + "grad_norm": 0.6888923323465583, + "learning_rate": 3.0716441407735787e-06, + "loss": 0.30143848061561584, + "memory(GiB)": 77.0, + "step": 4268, + "token_acc": 0.8809251856082239, + "train_speed(iter/s)": 0.90712 + }, + { + "epoch": 1.36608, + "grad_norm": 0.6652242522890913, + "learning_rate": 3.0707855935167875e-06, + "loss": 0.2839866578578949, + "memory(GiB)": 77.0, + "step": 4269, + "token_acc": 0.8744743481917577, + "train_speed(iter/s)": 0.906517 + }, + { + "epoch": 1.3664, + "grad_norm": 0.6426920637352092, + "learning_rate": 3.0699269752356316e-06, + "loss": 0.2727542519569397, + "memory(GiB)": 77.0, + "step": 4270, + "token_acc": 0.898250162022035, + "train_speed(iter/s)": 0.905975 + }, + { + "epoch": 1.36672, + "grad_norm": 0.684282036584479, + "learning_rate": 3.0690682860369496e-06, + "loss": 0.32881948351860046, + "memory(GiB)": 77.0, + "step": 4271, + "token_acc": 0.9019253910950662, + "train_speed(iter/s)": 0.905391 + }, + { + "epoch": 1.36704, + "grad_norm": 0.6615753903898511, + "learning_rate": 3.0682095260275923e-06, + "loss": 0.3827477991580963, + "memory(GiB)": 77.0, + "step": 4272, + "token_acc": 0.9221311475409836, + "train_speed(iter/s)": 0.904797 + }, + { + "epoch": 1.3673600000000001, + "grad_norm": 0.7188319167804174, + "learning_rate": 3.067350695314416e-06, + "loss": 0.3877338767051697, + "memory(GiB)": 77.0, + "step": 4273, + "token_acc": 0.91415313225058, + "train_speed(iter/s)": 0.904239 + }, + { + "epoch": 1.36768, + "grad_norm": 0.7609675543779562, + "learning_rate": 3.0664917940042875e-06, + "loss": 0.3308347165584564, + "memory(GiB)": 77.0, + "step": 4274, + "token_acc": 0.8873110398534128, + "train_speed(iter/s)": 0.903661 + }, + { + "epoch": 1.3679999999999999, + "grad_norm": 0.6488637283842804, + "learning_rate": 3.0656328222040826e-06, + "loss": 0.3296472430229187, + "memory(GiB)": 77.0, + "step": 4275, + "token_acc": 0.8862713241267263, + "train_speed(iter/s)": 0.903126 + }, + { + "epoch": 1.36832, + "grad_norm": 0.6748720710764897, + "learning_rate": 3.0647737800206854e-06, + "loss": 0.3754917085170746, + "memory(GiB)": 77.0, + "step": 4276, + "token_acc": 0.8869024485036922, + "train_speed(iter/s)": 0.902544 + }, + { + "epoch": 1.36864, + "grad_norm": 0.6891177375818561, + "learning_rate": 3.0639146675609878e-06, + "loss": 0.3759986162185669, + "memory(GiB)": 77.0, + "step": 4277, + "token_acc": 0.9541507467870789, + "train_speed(iter/s)": 0.901947 + }, + { + "epoch": 1.36896, + "grad_norm": 0.6853378843621418, + "learning_rate": 3.0630554849318928e-06, + "loss": 0.3432043790817261, + "memory(GiB)": 77.0, + "step": 4278, + "token_acc": 0.8786810974075007, + "train_speed(iter/s)": 0.901384 + }, + { + "epoch": 1.36928, + "grad_norm": 0.6202320828018655, + "learning_rate": 3.0621962322403104e-06, + "loss": 0.30370914936065674, + "memory(GiB)": 77.0, + "step": 4279, + "token_acc": 0.8547827523542045, + "train_speed(iter/s)": 0.900822 + }, + { + "epoch": 1.3696, + "grad_norm": 0.6672672524126889, + "learning_rate": 3.06133690959316e-06, + "loss": 0.2894951105117798, + "memory(GiB)": 77.0, + "step": 4280, + "token_acc": 0.8739372618000586, + "train_speed(iter/s)": 0.900277 + }, + { + "epoch": 1.36992, + "grad_norm": 0.6041556360098275, + "learning_rate": 3.0604775170973684e-06, + "loss": 0.31193941831588745, + "memory(GiB)": 77.0, + "step": 4281, + "token_acc": 0.9013035381750466, + "train_speed(iter/s)": 0.899728 + }, + { + "epoch": 1.37024, + "grad_norm": 0.6250799346532778, + "learning_rate": 3.0596180548598726e-06, + "loss": 0.37154093384742737, + "memory(GiB)": 77.0, + "step": 4282, + "token_acc": 0.9063588850174216, + "train_speed(iter/s)": 0.899142 + }, + { + "epoch": 1.37056, + "grad_norm": 0.6488133822589317, + "learning_rate": 3.0587585229876183e-06, + "loss": 0.41182881593704224, + "memory(GiB)": 77.0, + "step": 4283, + "token_acc": 0.8906066994612322, + "train_speed(iter/s)": 0.898552 + }, + { + "epoch": 1.37088, + "grad_norm": 0.6682816890900974, + "learning_rate": 3.057898921587558e-06, + "loss": 0.3666068911552429, + "memory(GiB)": 77.0, + "step": 4284, + "token_acc": 0.9405700424499697, + "train_speed(iter/s)": 0.897961 + }, + { + "epoch": 1.3712, + "grad_norm": 0.6270189962546858, + "learning_rate": 3.0570392507666546e-06, + "loss": 0.3067493438720703, + "memory(GiB)": 77.0, + "step": 4285, + "token_acc": 0.9550335570469799, + "train_speed(iter/s)": 0.8974 + }, + { + "epoch": 1.37152, + "grad_norm": 0.690898786485085, + "learning_rate": 3.0561795106318807e-06, + "loss": 0.37027430534362793, + "memory(GiB)": 77.0, + "step": 4286, + "token_acc": 0.9638310185185185, + "train_speed(iter/s)": 0.896866 + }, + { + "epoch": 1.37184, + "grad_norm": 0.6527813190958659, + "learning_rate": 3.055319701290215e-06, + "loss": 0.2765730321407318, + "memory(GiB)": 77.0, + "step": 4287, + "token_acc": 0.9460815047021943, + "train_speed(iter/s)": 0.896342 + }, + { + "epoch": 1.37216, + "grad_norm": 0.7003232964807127, + "learning_rate": 3.0544598228486456e-06, + "loss": 0.31122100353240967, + "memory(GiB)": 77.0, + "step": 4288, + "token_acc": 0.8556902620258114, + "train_speed(iter/s)": 0.89578 + }, + { + "epoch": 1.37248, + "grad_norm": 0.7325720350315864, + "learning_rate": 3.0535998754141694e-06, + "loss": 0.349945604801178, + "memory(GiB)": 77.0, + "step": 4289, + "token_acc": 0.9155165202843998, + "train_speed(iter/s)": 0.895253 + }, + { + "epoch": 1.3728, + "grad_norm": 0.6742935974303215, + "learning_rate": 3.052739859093793e-06, + "loss": 0.26659637689590454, + "memory(GiB)": 77.0, + "step": 4290, + "token_acc": 0.8745328350240257, + "train_speed(iter/s)": 0.894728 + }, + { + "epoch": 1.3731200000000001, + "grad_norm": 0.6103162639301885, + "learning_rate": 3.0518797739945287e-06, + "loss": 0.2926139831542969, + "memory(GiB)": 77.0, + "step": 4291, + "token_acc": 0.9407865665046399, + "train_speed(iter/s)": 0.894152 + }, + { + "epoch": 1.37344, + "grad_norm": 0.6249849198503761, + "learning_rate": 3.0510196202234012e-06, + "loss": 0.3290163278579712, + "memory(GiB)": 77.0, + "step": 4292, + "token_acc": 0.9303201506591338, + "train_speed(iter/s)": 0.89359 + }, + { + "epoch": 1.3737599999999999, + "grad_norm": 0.6432983954351104, + "learning_rate": 3.0501593978874394e-06, + "loss": 0.35242992639541626, + "memory(GiB)": 77.0, + "step": 4293, + "token_acc": 0.9070931849791377, + "train_speed(iter/s)": 0.893041 + }, + { + "epoch": 1.37408, + "grad_norm": 0.6853147198089216, + "learning_rate": 3.0492991070936857e-06, + "loss": 0.36351868510246277, + "memory(GiB)": 77.0, + "step": 4294, + "token_acc": 0.8616865453023645, + "train_speed(iter/s)": 0.892453 + }, + { + "epoch": 1.3744, + "grad_norm": 0.6009705075968218, + "learning_rate": 3.0484387479491863e-06, + "loss": 0.339024156332016, + "memory(GiB)": 77.0, + "step": 4295, + "token_acc": 0.8545759463344513, + "train_speed(iter/s)": 0.891929 + }, + { + "epoch": 1.37472, + "grad_norm": 0.7468351806639678, + "learning_rate": 3.0475783205609988e-06, + "loss": 0.3153824210166931, + "memory(GiB)": 77.0, + "step": 4296, + "token_acc": 0.8443456162642948, + "train_speed(iter/s)": 0.891384 + }, + { + "epoch": 1.37504, + "grad_norm": 0.6699655881381665, + "learning_rate": 3.0467178250361884e-06, + "loss": 0.33697107434272766, + "memory(GiB)": 77.0, + "step": 4297, + "token_acc": 0.9472693032015066, + "train_speed(iter/s)": 0.890849 + }, + { + "epoch": 1.3753600000000001, + "grad_norm": 0.659547145250344, + "learning_rate": 3.0458572614818294e-06, + "loss": 0.3630412817001343, + "memory(GiB)": 77.0, + "step": 4298, + "token_acc": 0.8927613941018767, + "train_speed(iter/s)": 0.890319 + }, + { + "epoch": 1.37568, + "grad_norm": 0.6871618174227611, + "learning_rate": 3.0449966300050025e-06, + "loss": 0.35042038559913635, + "memory(GiB)": 77.0, + "step": 4299, + "token_acc": 0.8539741219963032, + "train_speed(iter/s)": 0.889798 + }, + { + "epoch": 1.376, + "grad_norm": 0.6210604396706906, + "learning_rate": 3.0441359307127997e-06, + "loss": 0.2684192657470703, + "memory(GiB)": 77.0, + "step": 4300, + "token_acc": 0.9098548510313216, + "train_speed(iter/s)": 0.889188 + }, + { + "epoch": 1.37632, + "grad_norm": 0.6883877791942746, + "learning_rate": 3.04327516371232e-06, + "loss": 0.3859352469444275, + "memory(GiB)": 77.0, + "step": 4301, + "token_acc": 0.8932210899424015, + "train_speed(iter/s)": 0.888623 + }, + { + "epoch": 1.37664, + "grad_norm": 0.7143041137194588, + "learning_rate": 3.04241432911067e-06, + "loss": 0.3036700487136841, + "memory(GiB)": 77.0, + "step": 4302, + "token_acc": 0.8745844666062255, + "train_speed(iter/s)": 0.888099 + }, + { + "epoch": 1.37696, + "grad_norm": 0.6839769802056538, + "learning_rate": 3.0415534270149666e-06, + "loss": 0.3674298822879791, + "memory(GiB)": 77.0, + "step": 4303, + "token_acc": 0.9007633587786259, + "train_speed(iter/s)": 0.887548 + }, + { + "epoch": 1.37728, + "grad_norm": 0.6590978128347565, + "learning_rate": 3.040692457532334e-06, + "loss": 0.2879508137702942, + "memory(GiB)": 77.0, + "step": 4304, + "token_acc": 0.9490579204466155, + "train_speed(iter/s)": 0.887028 + }, + { + "epoch": 1.3776, + "grad_norm": 0.6008331846930393, + "learning_rate": 3.0398314207699035e-06, + "loss": 0.31876230239868164, + "memory(GiB)": 77.0, + "step": 4305, + "token_acc": 0.905668016194332, + "train_speed(iter/s)": 0.886444 + }, + { + "epoch": 1.37792, + "grad_norm": 0.6364781018087307, + "learning_rate": 3.0389703168348182e-06, + "loss": 0.35511934757232666, + "memory(GiB)": 77.0, + "step": 4306, + "token_acc": 0.8889778489458233, + "train_speed(iter/s)": 0.885887 + }, + { + "epoch": 1.37824, + "grad_norm": 0.6447320104555839, + "learning_rate": 3.0381091458342273e-06, + "loss": 0.37829822301864624, + "memory(GiB)": 77.0, + "step": 4307, + "token_acc": 0.9001540832049306, + "train_speed(iter/s)": 0.885375 + }, + { + "epoch": 1.37856, + "grad_norm": 0.6288049656807638, + "learning_rate": 3.037247907875287e-06, + "loss": 0.2716740667819977, + "memory(GiB)": 77.0, + "step": 4308, + "token_acc": 0.9605166051660516, + "train_speed(iter/s)": 0.884858 + }, + { + "epoch": 1.37888, + "grad_norm": 0.5826584692926242, + "learning_rate": 3.0363866030651644e-06, + "loss": 0.35796430706977844, + "memory(GiB)": 77.0, + "step": 4309, + "token_acc": 0.8590785907859079, + "train_speed(iter/s)": 0.884292 + }, + { + "epoch": 1.3792, + "grad_norm": 0.6623428350949894, + "learning_rate": 3.0355252315110346e-06, + "loss": 0.31746625900268555, + "memory(GiB)": 77.0, + "step": 4310, + "token_acc": 0.8750981932443048, + "train_speed(iter/s)": 0.883759 + }, + { + "epoch": 1.37952, + "grad_norm": 0.7259051938709907, + "learning_rate": 3.0346637933200795e-06, + "loss": 0.417924165725708, + "memory(GiB)": 77.0, + "step": 4311, + "token_acc": 0.8180722891566266, + "train_speed(iter/s)": 0.883244 + }, + { + "epoch": 1.37984, + "grad_norm": 0.6294511091774139, + "learning_rate": 3.0338022885994904e-06, + "loss": 0.312965452671051, + "memory(GiB)": 77.0, + "step": 4312, + "token_acc": 0.9314013206162876, + "train_speed(iter/s)": 0.882715 + }, + { + "epoch": 1.38016, + "grad_norm": 0.6354880777612829, + "learning_rate": 3.032940717456466e-06, + "loss": 0.3215983510017395, + "memory(GiB)": 77.0, + "step": 4313, + "token_acc": 0.8671904944423151, + "train_speed(iter/s)": 0.882174 + }, + { + "epoch": 1.38048, + "grad_norm": 0.660171694966673, + "learning_rate": 3.0320790799982154e-06, + "loss": 0.3238958418369293, + "memory(GiB)": 77.0, + "step": 4314, + "token_acc": 0.8860103626943006, + "train_speed(iter/s)": 0.881665 + }, + { + "epoch": 1.3808, + "grad_norm": 0.6848367762272087, + "learning_rate": 3.031217376331953e-06, + "loss": 0.39260441064834595, + "memory(GiB)": 77.0, + "step": 4315, + "token_acc": 0.8664323374340949, + "train_speed(iter/s)": 0.881115 + }, + { + "epoch": 1.3811200000000001, + "grad_norm": 0.599250532233092, + "learning_rate": 3.0303556065649036e-06, + "loss": 0.29093384742736816, + "memory(GiB)": 77.0, + "step": 4316, + "token_acc": 0.9149151683970626, + "train_speed(iter/s)": 0.880597 + }, + { + "epoch": 1.38144, + "grad_norm": 0.6278021437870787, + "learning_rate": 3.029493770804299e-06, + "loss": 0.30308997631073, + "memory(GiB)": 77.0, + "step": 4317, + "token_acc": 0.8946085897045385, + "train_speed(iter/s)": 0.88007 + }, + { + "epoch": 1.3817599999999999, + "grad_norm": 0.7177893988019207, + "learning_rate": 3.02863186915738e-06, + "loss": 0.3953600525856018, + "memory(GiB)": 77.0, + "step": 4318, + "token_acc": 0.9626502111075025, + "train_speed(iter/s)": 0.879565 + }, + { + "epoch": 1.38208, + "grad_norm": 0.662203789765646, + "learning_rate": 3.0277699017313956e-06, + "loss": 0.33073532581329346, + "memory(GiB)": 77.0, + "step": 4319, + "token_acc": 0.8588154269972452, + "train_speed(iter/s)": 0.879009 + }, + { + "epoch": 1.3824, + "grad_norm": 0.692090089076806, + "learning_rate": 3.0269078686336018e-06, + "loss": 0.34728148579597473, + "memory(GiB)": 77.0, + "step": 4320, + "token_acc": 0.7772866371982319, + "train_speed(iter/s)": 0.878513 + }, + { + "epoch": 1.38272, + "grad_norm": 0.6355950214362392, + "learning_rate": 3.026045769971264e-06, + "loss": 0.33061689138412476, + "memory(GiB)": 77.0, + "step": 4321, + "token_acc": 0.874406779661017, + "train_speed(iter/s)": 0.877983 + }, + { + "epoch": 1.38304, + "grad_norm": 0.6259535650696981, + "learning_rate": 3.0251836058516565e-06, + "loss": 0.26714175939559937, + "memory(GiB)": 77.0, + "step": 4322, + "token_acc": 0.8941767068273092, + "train_speed(iter/s)": 0.877469 + }, + { + "epoch": 1.38336, + "grad_norm": 0.6871909864421029, + "learning_rate": 3.02432137638206e-06, + "loss": 0.42674320936203003, + "memory(GiB)": 77.0, + "step": 4323, + "token_acc": 0.9374747882210569, + "train_speed(iter/s)": 0.87688 + }, + { + "epoch": 1.38368, + "grad_norm": 0.7423029048302132, + "learning_rate": 3.0234590816697634e-06, + "loss": 0.3983156085014343, + "memory(GiB)": 77.0, + "step": 4324, + "token_acc": 0.87268875192604, + "train_speed(iter/s)": 0.876349 + }, + { + "epoch": 1.384, + "grad_norm": 0.6512011581962812, + "learning_rate": 3.022596721822064e-06, + "loss": 0.35658860206604004, + "memory(GiB)": 77.0, + "step": 4325, + "token_acc": 0.8648373983739838, + "train_speed(iter/s)": 0.875848 + }, + { + "epoch": 1.38432, + "grad_norm": 0.6543081968651497, + "learning_rate": 3.021734296946269e-06, + "loss": 0.3381490111351013, + "memory(GiB)": 77.0, + "step": 4326, + "token_acc": 0.8676590538336052, + "train_speed(iter/s)": 0.875316 + }, + { + "epoch": 1.38464, + "grad_norm": 0.6277777839193575, + "learning_rate": 3.020871807149691e-06, + "loss": 0.2588089108467102, + "memory(GiB)": 77.0, + "step": 4327, + "token_acc": 0.9149734982332155, + "train_speed(iter/s)": 0.874816 + }, + { + "epoch": 1.38496, + "grad_norm": 0.6835794903224517, + "learning_rate": 3.0200092525396527e-06, + "loss": 0.35493600368499756, + "memory(GiB)": 77.0, + "step": 4328, + "token_acc": 0.8439990890457755, + "train_speed(iter/s)": 0.874304 + }, + { + "epoch": 1.38528, + "grad_norm": 0.6189416814270493, + "learning_rate": 3.0191466332234842e-06, + "loss": 0.3546558618545532, + "memory(GiB)": 77.0, + "step": 4329, + "token_acc": 0.9190569744597249, + "train_speed(iter/s)": 0.873737 + }, + { + "epoch": 1.3856, + "grad_norm": 0.6748009288157257, + "learning_rate": 3.018283949308523e-06, + "loss": 0.3504583239555359, + "memory(GiB)": 77.0, + "step": 4330, + "token_acc": 0.8807453416149068, + "train_speed(iter/s)": 0.873153 + }, + { + "epoch": 1.38592, + "grad_norm": 0.685615109435185, + "learning_rate": 3.0174212009021146e-06, + "loss": 0.32211434841156006, + "memory(GiB)": 77.0, + "step": 4331, + "token_acc": 0.9270405836753306, + "train_speed(iter/s)": 0.872623 + }, + { + "epoch": 1.38624, + "grad_norm": 0.6255213766950786, + "learning_rate": 3.0165583881116147e-06, + "loss": 0.3092009723186493, + "memory(GiB)": 77.0, + "step": 4332, + "token_acc": 0.9020494273658831, + "train_speed(iter/s)": 0.872128 + }, + { + "epoch": 1.38656, + "grad_norm": 0.6516134885670846, + "learning_rate": 3.0156955110443825e-06, + "loss": 0.37470972537994385, + "memory(GiB)": 77.0, + "step": 4333, + "token_acc": 0.8827812158718602, + "train_speed(iter/s)": 0.871595 + }, + { + "epoch": 1.3868800000000001, + "grad_norm": 0.6548480313704654, + "learning_rate": 3.0148325698077914e-06, + "loss": 0.384962260723114, + "memory(GiB)": 77.0, + "step": 4334, + "token_acc": 0.8768592182635766, + "train_speed(iter/s)": 0.871044 + }, + { + "epoch": 1.3872, + "grad_norm": 0.6268738384370176, + "learning_rate": 3.013969564509217e-06, + "loss": 0.3638661205768585, + "memory(GiB)": 77.0, + "step": 4335, + "token_acc": 0.9407258064516129, + "train_speed(iter/s)": 0.870524 + }, + { + "epoch": 1.3875199999999999, + "grad_norm": 0.6804339030501074, + "learning_rate": 3.0131064952560474e-06, + "loss": 0.41866785287857056, + "memory(GiB)": 77.0, + "step": 4336, + "token_acc": 0.8871193559677983, + "train_speed(iter/s)": 0.869902 + }, + { + "epoch": 1.38784, + "grad_norm": 0.6762750436294294, + "learning_rate": 3.012243362155675e-06, + "loss": 0.33027124404907227, + "memory(GiB)": 77.0, + "step": 4337, + "token_acc": 0.9075873475294605, + "train_speed(iter/s)": 0.86942 + }, + { + "epoch": 1.38816, + "grad_norm": 0.7303665064656562, + "learning_rate": 3.011380165315503e-06, + "loss": 0.3158634603023529, + "memory(GiB)": 77.0, + "step": 4338, + "token_acc": 0.8726877040261154, + "train_speed(iter/s)": 0.868933 + }, + { + "epoch": 1.38848, + "grad_norm": 0.6317069827908747, + "learning_rate": 3.0105169048429394e-06, + "loss": 0.2784913182258606, + "memory(GiB)": 77.0, + "step": 4339, + "token_acc": 0.910773899848255, + "train_speed(iter/s)": 0.868431 + }, + { + "epoch": 1.3888, + "grad_norm": 0.6566201982502525, + "learning_rate": 3.0096535808454036e-06, + "loss": 0.41109156608581543, + "memory(GiB)": 77.0, + "step": 4340, + "token_acc": 0.873882503192848, + "train_speed(iter/s)": 0.867938 + }, + { + "epoch": 1.3891200000000001, + "grad_norm": 0.6016657062926796, + "learning_rate": 3.008790193430321e-06, + "loss": 0.3055785894393921, + "memory(GiB)": 77.0, + "step": 4341, + "token_acc": 0.9381616939364774, + "train_speed(iter/s)": 0.867408 + }, + { + "epoch": 1.38944, + "grad_norm": 0.6962705540558414, + "learning_rate": 3.007926742705124e-06, + "loss": 0.3736867904663086, + "memory(GiB)": 77.0, + "step": 4342, + "token_acc": 0.8862427159868255, + "train_speed(iter/s)": 0.866924 + }, + { + "epoch": 1.3897599999999999, + "grad_norm": 0.652043502797017, + "learning_rate": 3.007063228777256e-06, + "loss": 0.2982235252857208, + "memory(GiB)": 77.0, + "step": 4343, + "token_acc": 0.8956406869220608, + "train_speed(iter/s)": 0.866434 + }, + { + "epoch": 1.39008, + "grad_norm": 0.6775763858716973, + "learning_rate": 3.0061996517541658e-06, + "loss": 0.3420012295246124, + "memory(GiB)": 77.0, + "step": 4344, + "token_acc": 0.9385455786958006, + "train_speed(iter/s)": 0.865943 + }, + { + "epoch": 1.3904, + "grad_norm": 0.6439761259232625, + "learning_rate": 3.00533601174331e-06, + "loss": 0.4103735089302063, + "memory(GiB)": 77.0, + "step": 4345, + "token_acc": 0.8715917035656024, + "train_speed(iter/s)": 0.865443 + }, + { + "epoch": 1.39072, + "grad_norm": 0.6352301749825224, + "learning_rate": 3.004472308852154e-06, + "loss": 0.3472548723220825, + "memory(GiB)": 77.0, + "step": 4346, + "token_acc": 0.8811204234671371, + "train_speed(iter/s)": 0.864964 + }, + { + "epoch": 1.39104, + "grad_norm": 0.6324881951809702, + "learning_rate": 3.0036085431881693e-06, + "loss": 0.266538143157959, + "memory(GiB)": 77.0, + "step": 4347, + "token_acc": 0.9507512520868113, + "train_speed(iter/s)": 0.864474 + }, + { + "epoch": 1.39136, + "grad_norm": 0.6315292337034492, + "learning_rate": 3.0027447148588383e-06, + "loss": 0.4220055341720581, + "memory(GiB)": 77.0, + "step": 4348, + "token_acc": 0.8736979166666666, + "train_speed(iter/s)": 0.863955 + }, + { + "epoch": 1.39168, + "grad_norm": 0.6872517896304224, + "learning_rate": 3.001880823971649e-06, + "loss": 0.36405542492866516, + "memory(GiB)": 77.0, + "step": 4349, + "token_acc": 0.9349537037037037, + "train_speed(iter/s)": 0.863459 + }, + { + "epoch": 1.392, + "grad_norm": 0.6101810023766838, + "learning_rate": 3.001016870634097e-06, + "loss": 0.34146150946617126, + "memory(GiB)": 77.0, + "step": 4350, + "token_acc": 0.9486221391872957, + "train_speed(iter/s)": 0.862913 + }, + { + "epoch": 1.39232, + "grad_norm": 0.703197133039131, + "learning_rate": 3.0001528549536875e-06, + "loss": 0.37216711044311523, + "memory(GiB)": 77.0, + "step": 4351, + "token_acc": 0.9417040358744395, + "train_speed(iter/s)": 0.862402 + }, + { + "epoch": 1.39264, + "grad_norm": 0.692506154907521, + "learning_rate": 2.999288777037932e-06, + "loss": 0.3151783347129822, + "memory(GiB)": 77.0, + "step": 4352, + "token_acc": 0.8955878322828172, + "train_speed(iter/s)": 0.861935 + }, + { + "epoch": 1.39296, + "grad_norm": 0.6767527155960826, + "learning_rate": 2.998424636994349e-06, + "loss": 0.365522176027298, + "memory(GiB)": 77.0, + "step": 4353, + "token_acc": 0.8605683836589698, + "train_speed(iter/s)": 0.861458 + }, + { + "epoch": 1.39328, + "grad_norm": 0.6036042072705092, + "learning_rate": 2.997560434930466e-06, + "loss": 0.31248700618743896, + "memory(GiB)": 77.0, + "step": 4354, + "token_acc": 0.9013468602413853, + "train_speed(iter/s)": 0.860966 + }, + { + "epoch": 1.3936, + "grad_norm": 0.6674024865026706, + "learning_rate": 2.9966961709538183e-06, + "loss": 0.4050583243370056, + "memory(GiB)": 77.0, + "step": 4355, + "token_acc": 0.8935762224352828, + "train_speed(iter/s)": 0.860458 + }, + { + "epoch": 1.39392, + "grad_norm": 0.5923204674482372, + "learning_rate": 2.9958318451719485e-06, + "loss": 0.3515234589576721, + "memory(GiB)": 77.0, + "step": 4356, + "token_acc": 0.9613715277777778, + "train_speed(iter/s)": 0.859896 + }, + { + "epoch": 1.39424, + "grad_norm": 0.663649130515658, + "learning_rate": 2.9949674576924072e-06, + "loss": 0.34206387400627136, + "memory(GiB)": 77.0, + "step": 4357, + "token_acc": 0.8916188289322617, + "train_speed(iter/s)": 0.859412 + }, + { + "epoch": 1.39456, + "grad_norm": 0.6978850401021492, + "learning_rate": 2.994103008622752e-06, + "loss": 0.3966566324234009, + "memory(GiB)": 77.0, + "step": 4358, + "token_acc": 0.8876156897311591, + "train_speed(iter/s)": 0.858934 + }, + { + "epoch": 1.3948800000000001, + "grad_norm": 0.7220756880948529, + "learning_rate": 2.9932384980705486e-06, + "loss": 0.39058083295822144, + "memory(GiB)": 77.0, + "step": 4359, + "token_acc": 0.8518778518778519, + "train_speed(iter/s)": 0.858449 + }, + { + "epoch": 1.3952, + "grad_norm": 0.6865224091589032, + "learning_rate": 2.992373926143371e-06, + "loss": 0.3992018699645996, + "memory(GiB)": 77.0, + "step": 4360, + "token_acc": 0.8794037940379403, + "train_speed(iter/s)": 0.857966 + }, + { + "epoch": 1.3955199999999999, + "grad_norm": 0.6312328020486533, + "learning_rate": 2.991509292948799e-06, + "loss": 0.39429235458374023, + "memory(GiB)": 77.0, + "step": 4361, + "token_acc": 0.8688343286883433, + "train_speed(iter/s)": 0.857442 + }, + { + "epoch": 1.39584, + "grad_norm": 0.5962504860035358, + "learning_rate": 2.9906445985944227e-06, + "loss": 0.2964407205581665, + "memory(GiB)": 77.0, + "step": 4362, + "token_acc": 0.9081657378240853, + "train_speed(iter/s)": 0.85693 + }, + { + "epoch": 1.39616, + "grad_norm": 0.6560833998693284, + "learning_rate": 2.989779843187837e-06, + "loss": 0.34017959237098694, + "memory(GiB)": 77.0, + "step": 4363, + "token_acc": 0.9286412512218963, + "train_speed(iter/s)": 0.856442 + }, + { + "epoch": 1.39648, + "grad_norm": 0.6143304100901719, + "learning_rate": 2.988915026836647e-06, + "loss": 0.3292248249053955, + "memory(GiB)": 77.0, + "step": 4364, + "token_acc": 0.9400456794822992, + "train_speed(iter/s)": 0.855883 + }, + { + "epoch": 1.3968, + "grad_norm": 0.6799094881486513, + "learning_rate": 2.9880501496484627e-06, + "loss": 0.4244815409183502, + "memory(GiB)": 77.0, + "step": 4365, + "token_acc": 0.8493182886694876, + "train_speed(iter/s)": 0.855358 + }, + { + "epoch": 1.39712, + "grad_norm": 0.6577128636664139, + "learning_rate": 2.9871852117309037e-06, + "loss": 0.3943932354450226, + "memory(GiB)": 77.0, + "step": 4366, + "token_acc": 0.9238975817923186, + "train_speed(iter/s)": 0.85481 + }, + { + "epoch": 1.39744, + "grad_norm": 0.6430788892926145, + "learning_rate": 2.9863202131915964e-06, + "loss": 0.36959874629974365, + "memory(GiB)": 77.0, + "step": 4367, + "token_acc": 0.8367763904653802, + "train_speed(iter/s)": 0.854325 + }, + { + "epoch": 1.39776, + "grad_norm": 0.6613758914203183, + "learning_rate": 2.985455154138175e-06, + "loss": 0.33080652356147766, + "memory(GiB)": 77.0, + "step": 4368, + "token_acc": 0.8634624581539933, + "train_speed(iter/s)": 0.853814 + }, + { + "epoch": 1.39808, + "grad_norm": 0.6651417663372003, + "learning_rate": 2.9845900346782814e-06, + "loss": 0.33011871576309204, + "memory(GiB)": 77.0, + "step": 4369, + "token_acc": 0.9481231547870097, + "train_speed(iter/s)": 0.853297 + }, + { + "epoch": 1.3984, + "grad_norm": 0.6508521605962365, + "learning_rate": 2.983724854919564e-06, + "loss": 0.30315202474594116, + "memory(GiB)": 77.0, + "step": 4370, + "token_acc": 0.8627767527675276, + "train_speed(iter/s)": 0.852801 + }, + { + "epoch": 1.39872, + "grad_norm": 0.6427835938707532, + "learning_rate": 2.9828596149696796e-06, + "loss": 0.3698379397392273, + "memory(GiB)": 77.0, + "step": 4371, + "token_acc": 0.9083236321303841, + "train_speed(iter/s)": 0.852305 + }, + { + "epoch": 1.39904, + "grad_norm": 0.9678488452223776, + "learning_rate": 2.9819943149362928e-06, + "loss": 0.3506050407886505, + "memory(GiB)": 77.0, + "step": 4372, + "token_acc": 0.8794556059624109, + "train_speed(iter/s)": 0.851846 + }, + { + "epoch": 1.39936, + "grad_norm": 0.7084810495419472, + "learning_rate": 2.981128954927075e-06, + "loss": 0.28222811222076416, + "memory(GiB)": 77.0, + "step": 4373, + "token_acc": 0.9316200091785223, + "train_speed(iter/s)": 0.851367 + }, + { + "epoch": 1.39968, + "grad_norm": 0.6885273763784686, + "learning_rate": 2.980263535049705e-06, + "loss": 0.3095555901527405, + "memory(GiB)": 77.0, + "step": 4374, + "token_acc": 0.9367928209129925, + "train_speed(iter/s)": 0.850918 + }, + { + "epoch": 1.4, + "grad_norm": 0.6498657336612653, + "learning_rate": 2.9793980554118694e-06, + "loss": 0.3608527183532715, + "memory(GiB)": 77.0, + "step": 4375, + "token_acc": 0.907267144319345, + "train_speed(iter/s)": 0.850416 + }, + { + "epoch": 1.40032, + "grad_norm": 0.6367877703958096, + "learning_rate": 2.978532516121262e-06, + "loss": 0.3264850080013275, + "memory(GiB)": 77.0, + "step": 4376, + "token_acc": 0.8839350180505415, + "train_speed(iter/s)": 0.849945 + }, + { + "epoch": 1.40064, + "grad_norm": 0.7034814916241945, + "learning_rate": 2.9776669172855844e-06, + "loss": 0.3526976704597473, + "memory(GiB)": 77.0, + "step": 4377, + "token_acc": 0.9555639954424611, + "train_speed(iter/s)": 0.849473 + }, + { + "epoch": 1.40096, + "grad_norm": 0.7170103830333117, + "learning_rate": 2.976801259012545e-06, + "loss": 0.3808807134628296, + "memory(GiB)": 77.0, + "step": 4378, + "token_acc": 0.8263653483992467, + "train_speed(iter/s)": 0.84902 + }, + { + "epoch": 1.40128, + "grad_norm": 0.631767002597509, + "learning_rate": 2.9759355414098604e-06, + "loss": 0.3508889973163605, + "memory(GiB)": 77.0, + "step": 4379, + "token_acc": 0.8727899956877965, + "train_speed(iter/s)": 0.848552 + }, + { + "epoch": 1.4016, + "grad_norm": 0.6224668911783295, + "learning_rate": 2.9750697645852546e-06, + "loss": 0.3717137575149536, + "memory(GiB)": 77.0, + "step": 4380, + "token_acc": 0.8799595653272682, + "train_speed(iter/s)": 0.848051 + }, + { + "epoch": 1.40192, + "grad_norm": 0.7237895555356081, + "learning_rate": 2.974203928646457e-06, + "loss": 0.39227569103240967, + "memory(GiB)": 77.0, + "step": 4381, + "token_acc": 0.8949348044132397, + "train_speed(iter/s)": 0.847601 + }, + { + "epoch": 1.40224, + "grad_norm": 0.705436450131952, + "learning_rate": 2.9733380337012063e-06, + "loss": 0.3477146029472351, + "memory(GiB)": 77.0, + "step": 4382, + "token_acc": 0.9184952978056427, + "train_speed(iter/s)": 0.847145 + }, + { + "epoch": 1.40256, + "grad_norm": 0.5658624412480471, + "learning_rate": 2.9724720798572485e-06, + "loss": 0.21779581904411316, + "memory(GiB)": 77.0, + "step": 4383, + "token_acc": 0.9524673439767779, + "train_speed(iter/s)": 0.846699 + }, + { + "epoch": 1.4028800000000001, + "grad_norm": 0.6541085669362559, + "learning_rate": 2.9716060672223374e-06, + "loss": 0.322834312915802, + "memory(GiB)": 77.0, + "step": 4384, + "token_acc": 0.8743595999024152, + "train_speed(iter/s)": 0.846226 + }, + { + "epoch": 1.4032, + "grad_norm": 0.6930127960571545, + "learning_rate": 2.9707399959042315e-06, + "loss": 0.4050218164920807, + "memory(GiB)": 77.0, + "step": 4385, + "token_acc": 0.8401735671668776, + "train_speed(iter/s)": 0.845684 + }, + { + "epoch": 1.4035199999999999, + "grad_norm": 0.6837852518056018, + "learning_rate": 2.9698738660107e-06, + "loss": 0.3282737731933594, + "memory(GiB)": 77.0, + "step": 4386, + "token_acc": 0.9401603948180136, + "train_speed(iter/s)": 0.845209 + }, + { + "epoch": 1.40384, + "grad_norm": 0.7686597873021215, + "learning_rate": 2.969007677649517e-06, + "loss": 0.2908940315246582, + "memory(GiB)": 77.0, + "step": 4387, + "token_acc": 0.9245124764101489, + "train_speed(iter/s)": 0.844771 + }, + { + "epoch": 1.40416, + "grad_norm": 0.5893709848954293, + "learning_rate": 2.9681414309284643e-06, + "loss": 0.2679234743118286, + "memory(GiB)": 77.0, + "step": 4388, + "token_acc": 0.8985785631963119, + "train_speed(iter/s)": 0.844297 + }, + { + "epoch": 1.40448, + "grad_norm": 0.6553757215248194, + "learning_rate": 2.967275125955332e-06, + "loss": 0.38989073038101196, + "memory(GiB)": 77.0, + "step": 4389, + "token_acc": 0.9690721649484536, + "train_speed(iter/s)": 0.843806 + }, + { + "epoch": 1.4048, + "grad_norm": 0.7275786108787488, + "learning_rate": 2.9664087628379145e-06, + "loss": 0.38360899686813354, + "memory(GiB)": 77.0, + "step": 4390, + "token_acc": 0.9152097902097902, + "train_speed(iter/s)": 0.843365 + }, + { + "epoch": 1.40512, + "grad_norm": 0.697494069040939, + "learning_rate": 2.965542341684019e-06, + "loss": 0.39419686794281006, + "memory(GiB)": 77.0, + "step": 4391, + "token_acc": 0.926440177252585, + "train_speed(iter/s)": 0.842871 + }, + { + "epoch": 1.40544, + "grad_norm": 0.6530644725923939, + "learning_rate": 2.964675862601455e-06, + "loss": 0.3980907201766968, + "memory(GiB)": 77.0, + "step": 4392, + "token_acc": 0.8425698871996077, + "train_speed(iter/s)": 0.842369 + }, + { + "epoch": 1.40576, + "grad_norm": 0.6605401933427211, + "learning_rate": 2.9638093256980404e-06, + "loss": 0.31082063913345337, + "memory(GiB)": 77.0, + "step": 4393, + "token_acc": 0.9437791084497671, + "train_speed(iter/s)": 0.84187 + }, + { + "epoch": 1.40608, + "grad_norm": 0.6682129346468689, + "learning_rate": 2.962942731081601e-06, + "loss": 0.39828264713287354, + "memory(GiB)": 77.0, + "step": 4394, + "token_acc": 0.9033775117571612, + "train_speed(iter/s)": 0.841411 + }, + { + "epoch": 1.4064, + "grad_norm": 0.693534268821213, + "learning_rate": 2.9620760788599708e-06, + "loss": 0.36502987146377563, + "memory(GiB)": 77.0, + "step": 4395, + "token_acc": 0.9288150042625746, + "train_speed(iter/s)": 0.840963 + }, + { + "epoch": 1.40672, + "grad_norm": 0.6847987234639491, + "learning_rate": 2.961209369140987e-06, + "loss": 0.3247409462928772, + "memory(GiB)": 77.0, + "step": 4396, + "token_acc": 0.9061472299519352, + "train_speed(iter/s)": 0.840516 + }, + { + "epoch": 1.40704, + "grad_norm": 0.5987726752492568, + "learning_rate": 2.960342602032499e-06, + "loss": 0.3698011636734009, + "memory(GiB)": 77.0, + "step": 4397, + "token_acc": 0.8576142131979695, + "train_speed(iter/s)": 0.840051 + }, + { + "epoch": 1.40736, + "grad_norm": 0.6255438266364343, + "learning_rate": 2.9594757776423588e-06, + "loss": 0.38386625051498413, + "memory(GiB)": 77.0, + "step": 4398, + "token_acc": 0.8656685808039376, + "train_speed(iter/s)": 0.839571 + }, + { + "epoch": 1.40768, + "grad_norm": 0.6346262805222083, + "learning_rate": 2.9586088960784293e-06, + "loss": 0.2856804430484772, + "memory(GiB)": 77.0, + "step": 4399, + "token_acc": 0.921146953405018, + "train_speed(iter/s)": 0.839099 + }, + { + "epoch": 1.408, + "grad_norm": 0.6395036970114529, + "learning_rate": 2.957741957448578e-06, + "loss": 0.3471435606479645, + "memory(GiB)": 77.0, + "step": 4400, + "token_acc": 0.8861144945188795, + "train_speed(iter/s)": 0.838641 + }, + { + "epoch": 1.40832, + "grad_norm": 0.6537569722413763, + "learning_rate": 2.9568749618606813e-06, + "loss": 0.34157562255859375, + "memory(GiB)": 77.0, + "step": 4401, + "token_acc": 0.8668188736681888, + "train_speed(iter/s)": 0.838192 + }, + { + "epoch": 1.4086400000000001, + "grad_norm": 0.7118234644061245, + "learning_rate": 2.956007909422622e-06, + "loss": 0.40095770359039307, + "memory(GiB)": 77.0, + "step": 4402, + "token_acc": 0.8861187407563913, + "train_speed(iter/s)": 0.837732 + }, + { + "epoch": 1.40896, + "grad_norm": 0.6718071308837102, + "learning_rate": 2.9551408002422875e-06, + "loss": 0.3539860248565674, + "memory(GiB)": 77.0, + "step": 4403, + "token_acc": 0.9027652268208928, + "train_speed(iter/s)": 0.837291 + }, + { + "epoch": 1.4092799999999999, + "grad_norm": 0.6849730668440378, + "learning_rate": 2.954273634427577e-06, + "loss": 0.3221147656440735, + "memory(GiB)": 77.0, + "step": 4404, + "token_acc": 0.9474835886214442, + "train_speed(iter/s)": 0.836829 + }, + { + "epoch": 1.4096, + "grad_norm": 0.744579020418985, + "learning_rate": 2.953406412086392e-06, + "loss": 0.4201926589012146, + "memory(GiB)": 77.0, + "step": 4405, + "token_acc": 0.9330985915492958, + "train_speed(iter/s)": 0.836377 + }, + { + "epoch": 1.40992, + "grad_norm": 0.6567647367863844, + "learning_rate": 2.952539133326645e-06, + "loss": 0.30585721135139465, + "memory(GiB)": 77.0, + "step": 4406, + "token_acc": 0.9360649947015189, + "train_speed(iter/s)": 0.835941 + }, + { + "epoch": 1.41024, + "grad_norm": 0.7202507482242065, + "learning_rate": 2.9516717982562525e-06, + "loss": 0.41137397289276123, + "memory(GiB)": 77.0, + "step": 4407, + "token_acc": 0.8966637781629117, + "train_speed(iter/s)": 0.835481 + }, + { + "epoch": 1.41056, + "grad_norm": 0.7024044527616975, + "learning_rate": 2.950804406983141e-06, + "loss": 0.3223690390586853, + "memory(GiB)": 77.0, + "step": 4408, + "token_acc": 0.9371451743714517, + "train_speed(iter/s)": 0.835055 + }, + { + "epoch": 1.4108800000000001, + "grad_norm": 0.6997790259625205, + "learning_rate": 2.949936959615242e-06, + "loss": 0.42398184537887573, + "memory(GiB)": 77.0, + "step": 4409, + "token_acc": 0.894020618556701, + "train_speed(iter/s)": 0.834581 + }, + { + "epoch": 1.4112, + "grad_norm": 0.6408103322168353, + "learning_rate": 2.9490694562604927e-06, + "loss": 0.33211666345596313, + "memory(GiB)": 77.0, + "step": 4410, + "token_acc": 0.9525413349663197, + "train_speed(iter/s)": 0.834138 + }, + { + "epoch": 1.4115199999999999, + "grad_norm": 0.6678673657078287, + "learning_rate": 2.9482018970268395e-06, + "loss": 0.34680718183517456, + "memory(GiB)": 77.0, + "step": 4411, + "token_acc": 0.858122001370802, + "train_speed(iter/s)": 0.83366 + }, + { + "epoch": 1.41184, + "grad_norm": 0.6471831772681935, + "learning_rate": 2.947334282022236e-06, + "loss": 0.3071349859237671, + "memory(GiB)": 77.0, + "step": 4412, + "token_acc": 0.9235369500213584, + "train_speed(iter/s)": 0.833194 + }, + { + "epoch": 1.41216, + "grad_norm": 0.5441821345474216, + "learning_rate": 2.946466611354641e-06, + "loss": 0.2998839020729065, + "memory(GiB)": 77.0, + "step": 4413, + "token_acc": 0.9156477051213894, + "train_speed(iter/s)": 0.832679 + }, + { + "epoch": 1.41248, + "grad_norm": 0.5959060852735028, + "learning_rate": 2.9455988851320206e-06, + "loss": 0.3464151620864868, + "memory(GiB)": 77.0, + "step": 4414, + "token_acc": 0.9367840475180313, + "train_speed(iter/s)": 0.83219 + }, + { + "epoch": 1.4128, + "grad_norm": 0.6482336550268041, + "learning_rate": 2.9447311034623493e-06, + "loss": 0.3395494222640991, + "memory(GiB)": 77.0, + "step": 4415, + "token_acc": 0.8849315068493151, + "train_speed(iter/s)": 0.831728 + }, + { + "epoch": 1.41312, + "grad_norm": 0.6986542232055711, + "learning_rate": 2.9438632664536076e-06, + "loss": 0.36224064230918884, + "memory(GiB)": 77.0, + "step": 4416, + "token_acc": 0.96671354899203, + "train_speed(iter/s)": 0.831293 + }, + { + "epoch": 1.41344, + "grad_norm": 0.6591057417050593, + "learning_rate": 2.9429953742137818e-06, + "loss": 0.3485783040523529, + "memory(GiB)": 77.0, + "step": 4417, + "token_acc": 0.919065898912348, + "train_speed(iter/s)": 0.83086 + }, + { + "epoch": 1.41376, + "grad_norm": 0.6879490782287883, + "learning_rate": 2.9421274268508665e-06, + "loss": 0.35285577178001404, + "memory(GiB)": 77.0, + "step": 4418, + "token_acc": 0.9085944236849669, + "train_speed(iter/s)": 0.830428 + }, + { + "epoch": 1.41408, + "grad_norm": 0.7076115019856848, + "learning_rate": 2.9412594244728632e-06, + "loss": 0.36737900972366333, + "memory(GiB)": 77.0, + "step": 4419, + "token_acc": 0.9115884115884116, + "train_speed(iter/s)": 0.829999 + }, + { + "epoch": 1.4144, + "grad_norm": 0.6508501676955607, + "learning_rate": 2.9403913671877787e-06, + "loss": 0.35258257389068604, + "memory(GiB)": 77.0, + "step": 4420, + "token_acc": 0.959493670886076, + "train_speed(iter/s)": 0.829566 + }, + { + "epoch": 1.41472, + "grad_norm": 0.6338865198650441, + "learning_rate": 2.9395232551036283e-06, + "loss": 0.37876683473587036, + "memory(GiB)": 77.0, + "step": 4421, + "token_acc": 0.8789390160961233, + "train_speed(iter/s)": 0.829088 + }, + { + "epoch": 1.41504, + "grad_norm": 0.6993989674416363, + "learning_rate": 2.9386550883284333e-06, + "loss": 0.3850798010826111, + "memory(GiB)": 77.0, + "step": 4422, + "token_acc": 0.863747354712681, + "train_speed(iter/s)": 0.828593 + }, + { + "epoch": 1.41536, + "grad_norm": 0.6003184053340506, + "learning_rate": 2.9377868669702225e-06, + "loss": 0.32168540358543396, + "memory(GiB)": 77.0, + "step": 4423, + "token_acc": 0.8789526686807654, + "train_speed(iter/s)": 0.828116 + }, + { + "epoch": 1.41568, + "grad_norm": 0.6272033844391803, + "learning_rate": 2.93691859113703e-06, + "loss": 0.3323928117752075, + "memory(GiB)": 77.0, + "step": 4424, + "token_acc": 0.8501007879787429, + "train_speed(iter/s)": 0.827655 + }, + { + "epoch": 1.416, + "grad_norm": 0.6038464711397687, + "learning_rate": 2.9360502609368986e-06, + "loss": 0.35349729657173157, + "memory(GiB)": 77.0, + "step": 4425, + "token_acc": 0.928367783321454, + "train_speed(iter/s)": 0.827162 + }, + { + "epoch": 1.41632, + "grad_norm": 0.6783824557471271, + "learning_rate": 2.9351818764778767e-06, + "loss": 0.3291381597518921, + "memory(GiB)": 77.0, + "step": 4426, + "token_acc": 0.9216589861751152, + "train_speed(iter/s)": 0.826712 + }, + { + "epoch": 1.4166400000000001, + "grad_norm": 0.6312861909039773, + "learning_rate": 2.9343134378680194e-06, + "loss": 0.28751200437545776, + "memory(GiB)": 77.0, + "step": 4427, + "token_acc": 0.9034090909090909, + "train_speed(iter/s)": 0.826287 + }, + { + "epoch": 1.41696, + "grad_norm": 0.6814518606804569, + "learning_rate": 2.9334449452153887e-06, + "loss": 0.3615512549877167, + "memory(GiB)": 77.0, + "step": 4428, + "token_acc": 0.9648197009674582, + "train_speed(iter/s)": 0.825869 + }, + { + "epoch": 1.4172799999999999, + "grad_norm": 0.6448442998859781, + "learning_rate": 2.9325763986280536e-06, + "loss": 0.3505418002605438, + "memory(GiB)": 77.0, + "step": 4429, + "token_acc": 0.9354838709677419, + "train_speed(iter/s)": 0.825417 + }, + { + "epoch": 1.4176, + "grad_norm": 0.6951016910295696, + "learning_rate": 2.9317077982140905e-06, + "loss": 0.3729747533798218, + "memory(GiB)": 77.0, + "step": 4430, + "token_acc": 0.8862736471623405, + "train_speed(iter/s)": 0.824964 + }, + { + "epoch": 1.41792, + "grad_norm": 0.6350883626070439, + "learning_rate": 2.9308391440815804e-06, + "loss": 0.3650910258293152, + "memory(GiB)": 77.0, + "step": 4431, + "token_acc": 0.7869014641867828, + "train_speed(iter/s)": 0.824476 + }, + { + "epoch": 1.41824, + "grad_norm": 0.641043126155337, + "learning_rate": 2.9299704363386127e-06, + "loss": 0.32334861159324646, + "memory(GiB)": 77.0, + "step": 4432, + "token_acc": 0.9344444444444444, + "train_speed(iter/s)": 0.824029 + }, + { + "epoch": 1.41856, + "grad_norm": 0.6326105527202097, + "learning_rate": 2.929101675093283e-06, + "loss": 0.29944291710853577, + "memory(GiB)": 77.0, + "step": 4433, + "token_acc": 0.8694992412746586, + "train_speed(iter/s)": 0.823546 + }, + { + "epoch": 1.41888, + "grad_norm": 0.6714930045022148, + "learning_rate": 2.928232860453694e-06, + "loss": 0.3780369758605957, + "memory(GiB)": 77.0, + "step": 4434, + "token_acc": 0.9115296803652968, + "train_speed(iter/s)": 0.823073 + }, + { + "epoch": 1.4192, + "grad_norm": 0.6681525685744021, + "learning_rate": 2.9273639925279547e-06, + "loss": 0.3263596296310425, + "memory(GiB)": 77.0, + "step": 4435, + "token_acc": 0.9049642978578715, + "train_speed(iter/s)": 0.822624 + }, + { + "epoch": 1.41952, + "grad_norm": 1.299061877060914, + "learning_rate": 2.926495071424179e-06, + "loss": 0.35818976163864136, + "memory(GiB)": 77.0, + "step": 4436, + "token_acc": 0.9062310949788264, + "train_speed(iter/s)": 0.822166 + }, + { + "epoch": 1.41984, + "grad_norm": 0.673911335454822, + "learning_rate": 2.9256260972504915e-06, + "loss": 0.36888352036476135, + "memory(GiB)": 77.0, + "step": 4437, + "token_acc": 0.9484404837683005, + "train_speed(iter/s)": 0.821729 + }, + { + "epoch": 1.42016, + "grad_norm": 0.6947218503746649, + "learning_rate": 2.9247570701150184e-06, + "loss": 0.3034849762916565, + "memory(GiB)": 77.0, + "step": 4438, + "token_acc": 0.9375162043038631, + "train_speed(iter/s)": 0.821306 + }, + { + "epoch": 1.42048, + "grad_norm": 0.7172624458916499, + "learning_rate": 2.923887990125898e-06, + "loss": 0.39286690950393677, + "memory(GiB)": 77.0, + "step": 4439, + "token_acc": 0.8543723554301833, + "train_speed(iter/s)": 0.820851 + }, + { + "epoch": 1.4208, + "grad_norm": 0.6523741341465861, + "learning_rate": 2.923018857391269e-06, + "loss": 0.3732038736343384, + "memory(GiB)": 77.0, + "step": 4440, + "token_acc": 0.8645226130653266, + "train_speed(iter/s)": 0.82039 + }, + { + "epoch": 1.42112, + "grad_norm": 0.648216614481341, + "learning_rate": 2.922149672019282e-06, + "loss": 0.3256353437900543, + "memory(GiB)": 77.0, + "step": 4441, + "token_acc": 0.8715940054495913, + "train_speed(iter/s)": 0.819951 + }, + { + "epoch": 1.42144, + "grad_norm": 0.6963631427265888, + "learning_rate": 2.921280434118092e-06, + "loss": 0.308993399143219, + "memory(GiB)": 77.0, + "step": 4442, + "token_acc": 0.9401652154498772, + "train_speed(iter/s)": 0.819545 + }, + { + "epoch": 1.42176, + "grad_norm": 0.6539898850448351, + "learning_rate": 2.9204111437958593e-06, + "loss": 0.3135533034801483, + "memory(GiB)": 77.0, + "step": 4443, + "token_acc": 0.9027932960893855, + "train_speed(iter/s)": 0.819131 + }, + { + "epoch": 1.42208, + "grad_norm": 0.7036208068281279, + "learning_rate": 2.9195418011607534e-06, + "loss": 0.44164222478866577, + "memory(GiB)": 77.0, + "step": 4444, + "token_acc": 0.8446631805598277, + "train_speed(iter/s)": 0.818715 + }, + { + "epoch": 1.4224, + "grad_norm": 0.7063662957329192, + "learning_rate": 2.9186724063209486e-06, + "loss": 0.40583324432373047, + "memory(GiB)": 77.0, + "step": 4445, + "token_acc": 0.8858880778588808, + "train_speed(iter/s)": 0.81829 + }, + { + "epoch": 1.42272, + "grad_norm": 0.6208517884912643, + "learning_rate": 2.917802959384625e-06, + "loss": 0.24115049839019775, + "memory(GiB)": 77.0, + "step": 4446, + "token_acc": 0.8621679827709978, + "train_speed(iter/s)": 0.817892 + }, + { + "epoch": 1.42304, + "grad_norm": 0.6739198194902355, + "learning_rate": 2.916933460459971e-06, + "loss": 0.34610089659690857, + "memory(GiB)": 77.0, + "step": 4447, + "token_acc": 0.8929159802306426, + "train_speed(iter/s)": 0.817469 + }, + { + "epoch": 1.42336, + "grad_norm": 1.2874930684812624, + "learning_rate": 2.9160639096551806e-06, + "loss": 0.3056506812572479, + "memory(GiB)": 77.0, + "step": 4448, + "token_acc": 0.905693950177936, + "train_speed(iter/s)": 0.81703 + }, + { + "epoch": 1.42368, + "grad_norm": 0.8600356882241187, + "learning_rate": 2.9151943070784546e-06, + "loss": 0.33672231435775757, + "memory(GiB)": 77.0, + "step": 4449, + "token_acc": 0.9211523881728583, + "train_speed(iter/s)": 0.816607 + }, + { + "epoch": 1.424, + "grad_norm": 0.6515390662431879, + "learning_rate": 2.914324652837999e-06, + "loss": 0.33851712942123413, + "memory(GiB)": 77.0, + "step": 4450, + "token_acc": 0.9077367718986216, + "train_speed(iter/s)": 0.816162 + }, + { + "epoch": 1.42432, + "grad_norm": 0.6972479922940391, + "learning_rate": 2.913454947042028e-06, + "loss": 0.3269636929035187, + "memory(GiB)": 77.0, + "step": 4451, + "token_acc": 0.8707964601769912, + "train_speed(iter/s)": 0.815716 + }, + { + "epoch": 1.4246400000000001, + "grad_norm": 0.634977844284515, + "learning_rate": 2.912585189798762e-06, + "loss": 0.268435001373291, + "memory(GiB)": 77.0, + "step": 4452, + "token_acc": 0.9245156980627922, + "train_speed(iter/s)": 0.815312 + }, + { + "epoch": 1.42496, + "grad_norm": 0.6041613300556354, + "learning_rate": 2.911715381216426e-06, + "loss": 0.27273765206336975, + "memory(GiB)": 77.0, + "step": 4453, + "token_acc": 0.9600676818950931, + "train_speed(iter/s)": 0.814915 + }, + { + "epoch": 1.4252799999999999, + "grad_norm": 0.6502483923317759, + "learning_rate": 2.910845521403253e-06, + "loss": 0.20564429461956024, + "memory(GiB)": 77.0, + "step": 4454, + "token_acc": 0.9620689655172414, + "train_speed(iter/s)": 0.814515 + }, + { + "epoch": 1.4256, + "grad_norm": 0.6542571971530433, + "learning_rate": 2.909975610467481e-06, + "loss": 0.3899366557598114, + "memory(GiB)": 77.0, + "step": 4455, + "token_acc": 0.8751247090123047, + "train_speed(iter/s)": 0.814063 + }, + { + "epoch": 1.42592, + "grad_norm": 0.634331185835722, + "learning_rate": 2.9091056485173575e-06, + "loss": 0.32280001044273376, + "memory(GiB)": 77.0, + "step": 4456, + "token_acc": 0.9080177078209543, + "train_speed(iter/s)": 0.813626 + }, + { + "epoch": 1.42624, + "grad_norm": 0.7401462852679375, + "learning_rate": 2.9082356356611322e-06, + "loss": 0.3933318853378296, + "memory(GiB)": 77.0, + "step": 4457, + "token_acc": 0.9210706932052162, + "train_speed(iter/s)": 0.813212 + }, + { + "epoch": 1.42656, + "grad_norm": 0.6618380243237917, + "learning_rate": 2.907365572007064e-06, + "loss": 0.36047518253326416, + "memory(GiB)": 77.0, + "step": 4458, + "token_acc": 0.9164705882352941, + "train_speed(iter/s)": 0.812804 + }, + { + "epoch": 1.42688, + "grad_norm": 0.6733730359621528, + "learning_rate": 2.906495457663418e-06, + "loss": 0.3152789771556854, + "memory(GiB)": 77.0, + "step": 4459, + "token_acc": 0.8984899328859061, + "train_speed(iter/s)": 0.812411 + }, + { + "epoch": 1.4272, + "grad_norm": 0.6710937540882269, + "learning_rate": 2.9056252927384633e-06, + "loss": 0.3241655230522156, + "memory(GiB)": 77.0, + "step": 4460, + "token_acc": 0.9270601336302895, + "train_speed(iter/s)": 0.811996 + }, + { + "epoch": 1.42752, + "grad_norm": 0.649269726708552, + "learning_rate": 2.9047550773404777e-06, + "loss": 0.42713671922683716, + "memory(GiB)": 77.0, + "step": 4461, + "token_acc": 0.9218449711723254, + "train_speed(iter/s)": 0.811526 + }, + { + "epoch": 1.42784, + "grad_norm": 0.6662302784122799, + "learning_rate": 2.903884811577744e-06, + "loss": 0.42384395003318787, + "memory(GiB)": 77.0, + "step": 4462, + "token_acc": 0.9341872791519434, + "train_speed(iter/s)": 0.811088 + }, + { + "epoch": 1.42816, + "grad_norm": 0.6437722357594209, + "learning_rate": 2.9030144955585515e-06, + "loss": 0.36643481254577637, + "memory(GiB)": 77.0, + "step": 4463, + "token_acc": 0.8661785641651414, + "train_speed(iter/s)": 0.81063 + }, + { + "epoch": 1.42848, + "grad_norm": 0.6243288495074081, + "learning_rate": 2.9021441293911963e-06, + "loss": 0.4024650752544403, + "memory(GiB)": 77.0, + "step": 4464, + "token_acc": 0.9023099133782483, + "train_speed(iter/s)": 0.810191 + }, + { + "epoch": 1.4288, + "grad_norm": 0.6594628981725152, + "learning_rate": 2.90127371318398e-06, + "loss": 0.3790937662124634, + "memory(GiB)": 77.0, + "step": 4465, + "token_acc": 0.95, + "train_speed(iter/s)": 0.809782 + }, + { + "epoch": 1.42912, + "grad_norm": 0.6943436134706311, + "learning_rate": 2.9004032470452127e-06, + "loss": 0.2949817180633545, + "memory(GiB)": 77.0, + "step": 4466, + "token_acc": 0.9480705097665555, + "train_speed(iter/s)": 0.80937 + }, + { + "epoch": 1.42944, + "grad_norm": 0.6389981057231681, + "learning_rate": 2.899532731083206e-06, + "loss": 0.26717042922973633, + "memory(GiB)": 77.0, + "step": 4467, + "token_acc": 0.9254272397721388, + "train_speed(iter/s)": 0.808955 + }, + { + "epoch": 1.42976, + "grad_norm": 0.6003159298387643, + "learning_rate": 2.8986621654062825e-06, + "loss": 0.22673743963241577, + "memory(GiB)": 77.0, + "step": 4468, + "token_acc": 0.9346991037131882, + "train_speed(iter/s)": 0.808562 + }, + { + "epoch": 1.43008, + "grad_norm": 0.6723848101896058, + "learning_rate": 2.8977915501227678e-06, + "loss": 0.4138174057006836, + "memory(GiB)": 77.0, + "step": 4469, + "token_acc": 0.940271725198667, + "train_speed(iter/s)": 0.808136 + }, + { + "epoch": 1.4304000000000001, + "grad_norm": 0.7083618865106301, + "learning_rate": 2.896920885340995e-06, + "loss": 0.37223172187805176, + "memory(GiB)": 77.0, + "step": 4470, + "token_acc": 0.9100564221705941, + "train_speed(iter/s)": 0.80773 + }, + { + "epoch": 1.43072, + "grad_norm": 0.6566141265296072, + "learning_rate": 2.8960501711693044e-06, + "loss": 0.31571099162101746, + "memory(GiB)": 77.0, + "step": 4471, + "token_acc": 0.9299424184261037, + "train_speed(iter/s)": 0.807296 + }, + { + "epoch": 1.4310399999999999, + "grad_norm": 0.6153113814808621, + "learning_rate": 2.8951794077160395e-06, + "loss": 0.36173075437545776, + "memory(GiB)": 77.0, + "step": 4472, + "token_acc": 0.8688166165619021, + "train_speed(iter/s)": 0.806867 + }, + { + "epoch": 1.43136, + "grad_norm": 0.702601705290435, + "learning_rate": 2.894308595089553e-06, + "loss": 0.3419409394264221, + "memory(GiB)": 77.0, + "step": 4473, + "token_acc": 0.8727272727272727, + "train_speed(iter/s)": 0.80647 + }, + { + "epoch": 1.43168, + "grad_norm": 0.6356127649383946, + "learning_rate": 2.893437733398203e-06, + "loss": 0.39882463216781616, + "memory(GiB)": 77.0, + "step": 4474, + "token_acc": 0.9042407660738714, + "train_speed(iter/s)": 0.806008 + }, + { + "epoch": 1.432, + "grad_norm": 0.6510566657731356, + "learning_rate": 2.892566822750351e-06, + "loss": 0.3385915458202362, + "memory(GiB)": 77.0, + "step": 4475, + "token_acc": 0.8617578579743889, + "train_speed(iter/s)": 0.805585 + }, + { + "epoch": 1.43232, + "grad_norm": 0.6476626285368151, + "learning_rate": 2.8916958632543683e-06, + "loss": 0.3913312256336212, + "memory(GiB)": 77.0, + "step": 4476, + "token_acc": 0.8754325259515571, + "train_speed(iter/s)": 0.805174 + }, + { + "epoch": 1.4326400000000001, + "grad_norm": 0.6622405077490577, + "learning_rate": 2.890824855018631e-06, + "loss": 0.31271177530288696, + "memory(GiB)": 77.0, + "step": 4477, + "token_acc": 0.8344390832328106, + "train_speed(iter/s)": 0.804759 + }, + { + "epoch": 1.43296, + "grad_norm": 0.6264387286370566, + "learning_rate": 2.889953798151519e-06, + "loss": 0.36960336565971375, + "memory(GiB)": 77.0, + "step": 4478, + "token_acc": 0.8946986201888163, + "train_speed(iter/s)": 0.804357 + }, + { + "epoch": 1.4332799999999999, + "grad_norm": 0.6471942643228005, + "learning_rate": 2.8890826927614224e-06, + "loss": 0.39539945125579834, + "memory(GiB)": 77.0, + "step": 4479, + "token_acc": 0.8062840820566086, + "train_speed(iter/s)": 0.803962 + }, + { + "epoch": 1.4336, + "grad_norm": 0.6597548188345564, + "learning_rate": 2.8882115389567333e-06, + "loss": 0.32121777534484863, + "memory(GiB)": 77.0, + "step": 4480, + "token_acc": 0.8862367661212704, + "train_speed(iter/s)": 0.803575 + }, + { + "epoch": 1.43392, + "grad_norm": 0.6887029248554696, + "learning_rate": 2.887340336845854e-06, + "loss": 0.36457642912864685, + "memory(GiB)": 77.0, + "step": 4481, + "token_acc": 0.8804428044280442, + "train_speed(iter/s)": 0.803159 + }, + { + "epoch": 1.43424, + "grad_norm": 0.7742643195062213, + "learning_rate": 2.8864690865371884e-06, + "loss": 0.3310312032699585, + "memory(GiB)": 77.0, + "step": 4482, + "token_acc": 0.8830604744403608, + "train_speed(iter/s)": 0.80277 + }, + { + "epoch": 1.43456, + "grad_norm": 0.6084422453640529, + "learning_rate": 2.8855977881391493e-06, + "loss": 0.30526742339134216, + "memory(GiB)": 77.0, + "step": 4483, + "token_acc": 0.9323220536756126, + "train_speed(iter/s)": 0.802381 + }, + { + "epoch": 1.43488, + "grad_norm": 0.589785855451261, + "learning_rate": 2.884726441760155e-06, + "loss": 0.2854624390602112, + "memory(GiB)": 77.0, + "step": 4484, + "token_acc": 0.9383424862705941, + "train_speed(iter/s)": 0.801998 + }, + { + "epoch": 1.4352, + "grad_norm": 0.6522297782775931, + "learning_rate": 2.8838550475086286e-06, + "loss": 0.352199524641037, + "memory(GiB)": 77.0, + "step": 4485, + "token_acc": 0.90715667311412, + "train_speed(iter/s)": 0.801593 + }, + { + "epoch": 1.43552, + "grad_norm": 0.6575454219777694, + "learning_rate": 2.8829836054930006e-06, + "loss": 0.3955366313457489, + "memory(GiB)": 77.0, + "step": 4486, + "token_acc": 0.8569769692277918, + "train_speed(iter/s)": 0.80118 + }, + { + "epoch": 1.43584, + "grad_norm": 0.6294476820944608, + "learning_rate": 2.882112115821706e-06, + "loss": 0.32892942428588867, + "memory(GiB)": 77.0, + "step": 4487, + "token_acc": 0.862282512533176, + "train_speed(iter/s)": 0.800788 + }, + { + "epoch": 1.43616, + "grad_norm": 0.6070184474259671, + "learning_rate": 2.8812405786031883e-06, + "loss": 0.24704822897911072, + "memory(GiB)": 77.0, + "step": 4488, + "token_acc": 0.9483960948396095, + "train_speed(iter/s)": 0.800354 + }, + { + "epoch": 1.43648, + "grad_norm": 0.6386542889606445, + "learning_rate": 2.8803689939458936e-06, + "loss": 0.3327140808105469, + "memory(GiB)": 77.0, + "step": 4489, + "token_acc": 0.8858921161825726, + "train_speed(iter/s)": 0.79996 + }, + { + "epoch": 1.4368, + "grad_norm": 0.6254069148052248, + "learning_rate": 2.879497361958276e-06, + "loss": 0.31638815999031067, + "memory(GiB)": 77.0, + "step": 4490, + "token_acc": 0.9365138993238167, + "train_speed(iter/s)": 0.799558 + }, + { + "epoch": 1.43712, + "grad_norm": 0.6441042782708604, + "learning_rate": 2.8786256827487945e-06, + "loss": 0.2916138768196106, + "memory(GiB)": 77.0, + "step": 4491, + "token_acc": 0.9712601326455417, + "train_speed(iter/s)": 0.799153 + }, + { + "epoch": 1.43744, + "grad_norm": 0.6683231540849952, + "learning_rate": 2.877753956425916e-06, + "loss": 0.2910221517086029, + "memory(GiB)": 77.0, + "step": 4492, + "token_acc": 0.913918952932293, + "train_speed(iter/s)": 0.79878 + }, + { + "epoch": 1.43776, + "grad_norm": 0.6308147893792385, + "learning_rate": 2.87688218309811e-06, + "loss": 0.317259818315506, + "memory(GiB)": 77.0, + "step": 4493, + "token_acc": 0.9496574322311587, + "train_speed(iter/s)": 0.798383 + }, + { + "epoch": 1.43808, + "grad_norm": 0.6792499138205906, + "learning_rate": 2.8760103628738544e-06, + "loss": 0.3030257225036621, + "memory(GiB)": 77.0, + "step": 4494, + "token_acc": 0.9403194172036985, + "train_speed(iter/s)": 0.798 + }, + { + "epoch": 1.4384000000000001, + "grad_norm": 0.6576217363786413, + "learning_rate": 2.8751384958616318e-06, + "loss": 0.38201236724853516, + "memory(GiB)": 77.0, + "step": 4495, + "token_acc": 0.9233333333333333, + "train_speed(iter/s)": 0.797585 + }, + { + "epoch": 1.43872, + "grad_norm": 0.6529553514498254, + "learning_rate": 2.874266582169931e-06, + "loss": 0.3091839849948883, + "memory(GiB)": 77.0, + "step": 4496, + "token_acc": 0.8854225751559841, + "train_speed(iter/s)": 0.797187 + }, + { + "epoch": 1.4390399999999999, + "grad_norm": 0.6789062244649835, + "learning_rate": 2.8733946219072474e-06, + "loss": 0.33111536502838135, + "memory(GiB)": 77.0, + "step": 4497, + "token_acc": 0.9185973700688791, + "train_speed(iter/s)": 0.796805 + }, + { + "epoch": 1.43936, + "grad_norm": 0.6938792230716844, + "learning_rate": 2.8725226151820795e-06, + "loss": 0.29155540466308594, + "memory(GiB)": 77.0, + "step": 4498, + "token_acc": 0.8708777270421106, + "train_speed(iter/s)": 0.796425 + }, + { + "epoch": 1.43968, + "grad_norm": 0.6595599237376008, + "learning_rate": 2.871650562102935e-06, + "loss": 0.3988974392414093, + "memory(GiB)": 77.0, + "step": 4499, + "token_acc": 0.8948999718230487, + "train_speed(iter/s)": 0.795949 + }, + { + "epoch": 1.44, + "grad_norm": 0.648780223469662, + "learning_rate": 2.870778462778325e-06, + "loss": 0.36671018600463867, + "memory(GiB)": 77.0, + "step": 4500, + "token_acc": 0.8734270910436713, + "train_speed(iter/s)": 0.795552 + }, + { + "epoch": 1.44032, + "grad_norm": 0.7351211737349653, + "learning_rate": 2.869906317316768e-06, + "loss": 0.3857193887233734, + "memory(GiB)": 77.0, + "step": 4501, + "token_acc": 0.8506474820143884, + "train_speed(iter/s)": 0.795161 + }, + { + "epoch": 1.44064, + "grad_norm": 0.6560445678695754, + "learning_rate": 2.8690341258267863e-06, + "loss": 0.35981935262680054, + "memory(GiB)": 77.0, + "step": 4502, + "token_acc": 0.9446196985629163, + "train_speed(iter/s)": 0.794756 + }, + { + "epoch": 1.44096, + "grad_norm": 0.6651564583924584, + "learning_rate": 2.868161888416909e-06, + "loss": 0.37547993659973145, + "memory(GiB)": 77.0, + "step": 4503, + "token_acc": 0.8475705329153606, + "train_speed(iter/s)": 0.794355 + }, + { + "epoch": 1.44128, + "grad_norm": 0.724929206473, + "learning_rate": 2.8672896051956717e-06, + "loss": 0.35948920249938965, + "memory(GiB)": 77.0, + "step": 4504, + "token_acc": 0.9242034943473793, + "train_speed(iter/s)": 0.79398 + }, + { + "epoch": 1.4416, + "grad_norm": 0.6689000694783961, + "learning_rate": 2.866417276271615e-06, + "loss": 0.38741934299468994, + "memory(GiB)": 77.0, + "step": 4505, + "token_acc": 0.8917322834645669, + "train_speed(iter/s)": 0.793558 + }, + { + "epoch": 1.44192, + "grad_norm": 0.688907514439031, + "learning_rate": 2.865544901753284e-06, + "loss": 0.34373098611831665, + "memory(GiB)": 77.0, + "step": 4506, + "token_acc": 0.908013276434329, + "train_speed(iter/s)": 0.793139 + }, + { + "epoch": 1.44224, + "grad_norm": 0.652093421420142, + "learning_rate": 2.864672481749231e-06, + "loss": 0.36953985691070557, + "memory(GiB)": 77.0, + "step": 4507, + "token_acc": 0.8943943943943944, + "train_speed(iter/s)": 0.792743 + }, + { + "epoch": 1.44256, + "grad_norm": 0.6320685804312591, + "learning_rate": 2.8638000163680143e-06, + "loss": 0.3481016755104065, + "memory(GiB)": 77.0, + "step": 4508, + "token_acc": 0.8616529774127311, + "train_speed(iter/s)": 0.79234 + }, + { + "epoch": 1.44288, + "grad_norm": 0.6160550832737524, + "learning_rate": 2.8629275057181966e-06, + "loss": 0.30131426453590393, + "memory(GiB)": 77.0, + "step": 4509, + "token_acc": 0.9169796402451077, + "train_speed(iter/s)": 0.79193 + }, + { + "epoch": 1.4432, + "grad_norm": 0.626910565164054, + "learning_rate": 2.8620549499083467e-06, + "loss": 0.37158718705177307, + "memory(GiB)": 77.0, + "step": 4510, + "token_acc": 0.8843565164185193, + "train_speed(iter/s)": 0.791558 + }, + { + "epoch": 1.44352, + "grad_norm": 0.6482893898587341, + "learning_rate": 2.8611823490470388e-06, + "loss": 0.3496326804161072, + "memory(GiB)": 77.0, + "step": 4511, + "token_acc": 0.8383894098179813, + "train_speed(iter/s)": 0.791174 + }, + { + "epoch": 1.44384, + "grad_norm": 0.679955789836824, + "learning_rate": 2.860309703242853e-06, + "loss": 0.3829043507575989, + "memory(GiB)": 77.0, + "step": 4512, + "token_acc": 0.9081684424150177, + "train_speed(iter/s)": 0.790779 + }, + { + "epoch": 1.44416, + "grad_norm": 0.6808768789697407, + "learning_rate": 2.8594370126043746e-06, + "loss": 0.38255172967910767, + "memory(GiB)": 77.0, + "step": 4513, + "token_acc": 0.8485794054853013, + "train_speed(iter/s)": 0.79037 + }, + { + "epoch": 1.44448, + "grad_norm": 0.6384632348551839, + "learning_rate": 2.858564277240196e-06, + "loss": 0.3689082860946655, + "memory(GiB)": 77.0, + "step": 4514, + "token_acc": 0.8594802694898941, + "train_speed(iter/s)": 0.789991 + }, + { + "epoch": 1.4447999999999999, + "grad_norm": 0.6462917610806118, + "learning_rate": 2.857691497258913e-06, + "loss": 0.2822076380252838, + "memory(GiB)": 77.0, + "step": 4515, + "token_acc": 0.9496040316774658, + "train_speed(iter/s)": 0.789623 + }, + { + "epoch": 1.44512, + "grad_norm": 0.6666260599773782, + "learning_rate": 2.8568186727691282e-06, + "loss": 0.36719971895217896, + "memory(GiB)": 77.0, + "step": 4516, + "token_acc": 0.8467256637168141, + "train_speed(iter/s)": 0.789257 + }, + { + "epoch": 1.44544, + "grad_norm": 0.6405272091646323, + "learning_rate": 2.855945803879451e-06, + "loss": 0.3279315233230591, + "memory(GiB)": 77.0, + "step": 4517, + "token_acc": 0.9611336032388664, + "train_speed(iter/s)": 0.788867 + }, + { + "epoch": 1.44576, + "grad_norm": 0.7156773438638411, + "learning_rate": 2.8550728906984914e-06, + "loss": 0.3844512104988098, + "memory(GiB)": 77.0, + "step": 4518, + "token_acc": 0.8586302637667746, + "train_speed(iter/s)": 0.788496 + }, + { + "epoch": 1.44608, + "grad_norm": 0.5803717797649448, + "learning_rate": 2.85419993333487e-06, + "loss": 0.31850987672805786, + "memory(GiB)": 77.0, + "step": 4519, + "token_acc": 0.8922710428798306, + "train_speed(iter/s)": 0.7881 + }, + { + "epoch": 1.4464000000000001, + "grad_norm": 0.6254671488320118, + "learning_rate": 2.853326931897212e-06, + "loss": 0.3578682541847229, + "memory(GiB)": 77.0, + "step": 4520, + "token_acc": 0.8537804624036659, + "train_speed(iter/s)": 0.787722 + }, + { + "epoch": 1.44672, + "grad_norm": 0.651386696905358, + "learning_rate": 2.8524538864941463e-06, + "loss": 0.34006232023239136, + "memory(GiB)": 77.0, + "step": 4521, + "token_acc": 0.915615906886518, + "train_speed(iter/s)": 0.787306 + }, + { + "epoch": 1.4470399999999999, + "grad_norm": 0.63405091756191, + "learning_rate": 2.851580797234309e-06, + "loss": 0.3381822407245636, + "memory(GiB)": 77.0, + "step": 4522, + "token_acc": 0.9122946475887652, + "train_speed(iter/s)": 0.786884 + }, + { + "epoch": 1.44736, + "grad_norm": 0.6397381670928933, + "learning_rate": 2.8507076642263403e-06, + "loss": 0.33816856145858765, + "memory(GiB)": 77.0, + "step": 4523, + "token_acc": 0.8967159689145149, + "train_speed(iter/s)": 0.786499 + }, + { + "epoch": 1.44768, + "grad_norm": 0.6421037043362912, + "learning_rate": 2.849834487578887e-06, + "loss": 0.3700662851333618, + "memory(GiB)": 77.0, + "step": 4524, + "token_acc": 0.8335891925084433, + "train_speed(iter/s)": 0.786117 + }, + { + "epoch": 1.448, + "grad_norm": 0.6635348142568982, + "learning_rate": 2.8489612674006e-06, + "loss": 0.3051077425479889, + "memory(GiB)": 77.0, + "step": 4525, + "token_acc": 0.8898534997287032, + "train_speed(iter/s)": 0.785742 + }, + { + "epoch": 1.44832, + "grad_norm": 0.6687414359212979, + "learning_rate": 2.8480880038001375e-06, + "loss": 0.2981453537940979, + "memory(GiB)": 77.0, + "step": 4526, + "token_acc": 0.9287298946200776, + "train_speed(iter/s)": 0.785379 + }, + { + "epoch": 1.44864, + "grad_norm": 0.9569474902395833, + "learning_rate": 2.8472146968861608e-06, + "loss": 0.3519737124443054, + "memory(GiB)": 77.0, + "step": 4527, + "token_acc": 0.8771929824561403, + "train_speed(iter/s)": 0.784988 + }, + { + "epoch": 1.44896, + "grad_norm": 0.6336811308583574, + "learning_rate": 2.8463413467673386e-06, + "loss": 0.35483768582344055, + "memory(GiB)": 77.0, + "step": 4528, + "token_acc": 0.9000527426160337, + "train_speed(iter/s)": 0.784606 + }, + { + "epoch": 1.44928, + "grad_norm": 0.5982810941424279, + "learning_rate": 2.8454679535523428e-06, + "loss": 0.20998990535736084, + "memory(GiB)": 77.0, + "step": 4529, + "token_acc": 0.9217904574520414, + "train_speed(iter/s)": 0.784241 + }, + { + "epoch": 1.4496, + "grad_norm": 0.6674564957007728, + "learning_rate": 2.844594517349854e-06, + "loss": 0.35525593161582947, + "memory(GiB)": 77.0, + "step": 4530, + "token_acc": 0.9299318128379968, + "train_speed(iter/s)": 0.783815 + }, + { + "epoch": 1.44992, + "grad_norm": 0.6122222512174024, + "learning_rate": 2.843721038268556e-06, + "loss": 0.3149735629558563, + "memory(GiB)": 77.0, + "step": 4531, + "token_acc": 0.9236329352608422, + "train_speed(iter/s)": 0.783426 + }, + { + "epoch": 1.45024, + "grad_norm": 0.6657481114888427, + "learning_rate": 2.8428475164171363e-06, + "loss": 0.3474307060241699, + "memory(GiB)": 77.0, + "step": 4532, + "token_acc": 0.9064277588168373, + "train_speed(iter/s)": 0.783071 + }, + { + "epoch": 1.45056, + "grad_norm": 0.6386275367808854, + "learning_rate": 2.8419739519042916e-06, + "loss": 0.3255157470703125, + "memory(GiB)": 77.0, + "step": 4533, + "token_acc": 0.8952749378281293, + "train_speed(iter/s)": 0.782704 + }, + { + "epoch": 1.45088, + "grad_norm": 1.1106645943254259, + "learning_rate": 2.8411003448387208e-06, + "loss": 0.3292616009712219, + "memory(GiB)": 77.0, + "step": 4534, + "token_acc": 0.9213780918727915, + "train_speed(iter/s)": 0.782336 + }, + { + "epoch": 1.4512, + "grad_norm": 0.625695672322811, + "learning_rate": 2.840226695329129e-06, + "loss": 0.33256345987319946, + "memory(GiB)": 77.0, + "step": 4535, + "token_acc": 0.9348103962505326, + "train_speed(iter/s)": 0.781967 + }, + { + "epoch": 1.45152, + "grad_norm": 0.6473721788487712, + "learning_rate": 2.839353003484227e-06, + "loss": 0.2909867465496063, + "memory(GiB)": 77.0, + "step": 4536, + "token_acc": 0.9322429906542056, + "train_speed(iter/s)": 0.781612 + }, + { + "epoch": 1.45184, + "grad_norm": 0.6019038053635278, + "learning_rate": 2.83847926941273e-06, + "loss": 0.3716350793838501, + "memory(GiB)": 77.0, + "step": 4537, + "token_acc": 0.8506770543498423, + "train_speed(iter/s)": 0.781198 + }, + { + "epoch": 1.4521600000000001, + "grad_norm": 0.6458277756792047, + "learning_rate": 2.837605493223361e-06, + "loss": 0.3783963918685913, + "memory(GiB)": 77.0, + "step": 4538, + "token_acc": 0.8844086021505376, + "train_speed(iter/s)": 0.780813 + }, + { + "epoch": 1.45248, + "grad_norm": 0.6828519451835444, + "learning_rate": 2.836731675024844e-06, + "loss": 0.4223407804965973, + "memory(GiB)": 77.0, + "step": 4539, + "token_acc": 0.9011546043368065, + "train_speed(iter/s)": 0.780426 + }, + { + "epoch": 1.4527999999999999, + "grad_norm": 0.6469429904746712, + "learning_rate": 2.835857814925912e-06, + "loss": 0.31077712774276733, + "memory(GiB)": 77.0, + "step": 4540, + "token_acc": 0.8796992481203008, + "train_speed(iter/s)": 0.780016 + }, + { + "epoch": 1.45312, + "grad_norm": 0.5997891013645121, + "learning_rate": 2.8349839130353013e-06, + "loss": 0.2792194187641144, + "memory(GiB)": 77.0, + "step": 4541, + "token_acc": 0.9373644904842207, + "train_speed(iter/s)": 0.779632 + }, + { + "epoch": 1.45344, + "grad_norm": 0.7622663904855831, + "learning_rate": 2.834109969461753e-06, + "loss": 0.4083790183067322, + "memory(GiB)": 77.0, + "step": 4542, + "token_acc": 0.8202881619937694, + "train_speed(iter/s)": 0.779272 + }, + { + "epoch": 1.45376, + "grad_norm": 0.6703315463426306, + "learning_rate": 2.8332359843140158e-06, + "loss": 0.31992751359939575, + "memory(GiB)": 77.0, + "step": 4543, + "token_acc": 0.9067725752508361, + "train_speed(iter/s)": 0.778891 + }, + { + "epoch": 1.45408, + "grad_norm": 0.6627892050330639, + "learning_rate": 2.8323619577008403e-06, + "loss": 0.3812315762042999, + "memory(GiB)": 77.0, + "step": 4544, + "token_acc": 0.8657433578292821, + "train_speed(iter/s)": 0.778492 + }, + { + "epoch": 1.4544000000000001, + "grad_norm": 0.6601436399860959, + "learning_rate": 2.831487889730985e-06, + "loss": 0.39261046051979065, + "memory(GiB)": 77.0, + "step": 4545, + "token_acc": 0.9258160237388724, + "train_speed(iter/s)": 0.778145 + }, + { + "epoch": 1.45472, + "grad_norm": 0.6236972246284397, + "learning_rate": 2.8306137805132127e-06, + "loss": 0.3572177588939667, + "memory(GiB)": 77.0, + "step": 4546, + "token_acc": 0.8972639971009241, + "train_speed(iter/s)": 0.777742 + }, + { + "epoch": 1.45504, + "grad_norm": 0.6250837278855622, + "learning_rate": 2.829739630156291e-06, + "loss": 0.32249540090560913, + "memory(GiB)": 77.0, + "step": 4547, + "token_acc": 0.899415963659961, + "train_speed(iter/s)": 0.777381 + }, + { + "epoch": 1.45536, + "grad_norm": 0.6675875946695047, + "learning_rate": 2.828865438768991e-06, + "loss": 0.3446648120880127, + "memory(GiB)": 77.0, + "step": 4548, + "token_acc": 0.8807053941908713, + "train_speed(iter/s)": 0.777021 + }, + { + "epoch": 1.45568, + "grad_norm": 0.6762768017843583, + "learning_rate": 2.8279912064600934e-06, + "loss": 0.3151264190673828, + "memory(GiB)": 77.0, + "step": 4549, + "token_acc": 0.9072134962187318, + "train_speed(iter/s)": 0.776652 + }, + { + "epoch": 1.456, + "grad_norm": 0.6354267932118138, + "learning_rate": 2.82711693333838e-06, + "loss": 0.3911912441253662, + "memory(GiB)": 77.0, + "step": 4550, + "token_acc": 0.8821859474804826, + "train_speed(iter/s)": 0.776281 + }, + { + "epoch": 1.45632, + "grad_norm": 0.6536638581111143, + "learning_rate": 2.826242619512638e-06, + "loss": 0.36042696237564087, + "memory(GiB)": 77.0, + "step": 4551, + "token_acc": 0.8701594533029613, + "train_speed(iter/s)": 0.77592 + }, + { + "epoch": 1.45664, + "grad_norm": 0.6428986120714112, + "learning_rate": 2.825368265091662e-06, + "loss": 0.3490666151046753, + "memory(GiB)": 77.0, + "step": 4552, + "token_acc": 0.8632352941176471, + "train_speed(iter/s)": 0.77553 + }, + { + "epoch": 1.45696, + "grad_norm": 0.7193819164105991, + "learning_rate": 2.8244938701842494e-06, + "loss": 0.36251211166381836, + "memory(GiB)": 77.0, + "step": 4553, + "token_acc": 0.8511207761793241, + "train_speed(iter/s)": 0.775176 + }, + { + "epoch": 1.45728, + "grad_norm": 0.7027504063626803, + "learning_rate": 2.823619434899204e-06, + "loss": 0.3558286130428314, + "memory(GiB)": 77.0, + "step": 4554, + "token_acc": 0.8850822767736715, + "train_speed(iter/s)": 0.774813 + }, + { + "epoch": 1.4576, + "grad_norm": 0.6965708658194327, + "learning_rate": 2.822744959345334e-06, + "loss": 0.37352436780929565, + "memory(GiB)": 77.0, + "step": 4555, + "token_acc": 0.8986486486486487, + "train_speed(iter/s)": 0.774455 + }, + { + "epoch": 1.45792, + "grad_norm": 0.700861462772897, + "learning_rate": 2.8218704436314525e-06, + "loss": 0.34365302324295044, + "memory(GiB)": 77.0, + "step": 4556, + "token_acc": 0.9382217090069284, + "train_speed(iter/s)": 0.77404 + }, + { + "epoch": 1.45824, + "grad_norm": 0.6361053930709359, + "learning_rate": 2.820995887866378e-06, + "loss": 0.2871834933757782, + "memory(GiB)": 77.0, + "step": 4557, + "token_acc": 0.850964891885608, + "train_speed(iter/s)": 0.77369 + }, + { + "epoch": 1.45856, + "grad_norm": 0.658246212696411, + "learning_rate": 2.820121292158933e-06, + "loss": 0.3523300588130951, + "memory(GiB)": 77.0, + "step": 4558, + "token_acc": 0.9010939941951328, + "train_speed(iter/s)": 0.773343 + }, + { + "epoch": 1.45888, + "grad_norm": 0.7069084242264171, + "learning_rate": 2.8192466566179473e-06, + "loss": 0.39891308546066284, + "memory(GiB)": 77.0, + "step": 4559, + "token_acc": 0.8648846576345661, + "train_speed(iter/s)": 0.772994 + }, + { + "epoch": 1.4592, + "grad_norm": 0.6785101330579675, + "learning_rate": 2.818371981352253e-06, + "loss": 0.4496411979198456, + "memory(GiB)": 77.0, + "step": 4560, + "token_acc": 0.8212691635940229, + "train_speed(iter/s)": 0.772616 + }, + { + "epoch": 1.45952, + "grad_norm": 0.6613557841940102, + "learning_rate": 2.8174972664706883e-06, + "loss": 0.35022038221359253, + "memory(GiB)": 77.0, + "step": 4561, + "token_acc": 0.9221604447974583, + "train_speed(iter/s)": 0.772277 + }, + { + "epoch": 1.45984, + "grad_norm": 0.6798863244018006, + "learning_rate": 2.816622512082097e-06, + "loss": 0.3040103018283844, + "memory(GiB)": 77.0, + "step": 4562, + "token_acc": 0.8871169480925578, + "train_speed(iter/s)": 0.771927 + }, + { + "epoch": 1.4601600000000001, + "grad_norm": 0.5528463672523809, + "learning_rate": 2.815747718295326e-06, + "loss": 0.32629501819610596, + "memory(GiB)": 77.0, + "step": 4563, + "token_acc": 0.9003698063954753, + "train_speed(iter/s)": 0.771546 + }, + { + "epoch": 1.46048, + "grad_norm": 0.66317567678431, + "learning_rate": 2.8148728852192296e-06, + "loss": 0.35974738001823425, + "memory(GiB)": 77.0, + "step": 4564, + "token_acc": 0.8185920577617328, + "train_speed(iter/s)": 0.771181 + }, + { + "epoch": 1.4607999999999999, + "grad_norm": 0.7510762872043727, + "learning_rate": 2.813998012962665e-06, + "loss": 0.26792770624160767, + "memory(GiB)": 77.0, + "step": 4565, + "token_acc": 0.8776243093922652, + "train_speed(iter/s)": 0.77083 + }, + { + "epoch": 1.46112, + "grad_norm": 0.6291297236688549, + "learning_rate": 2.813123101634494e-06, + "loss": 0.3010227680206299, + "memory(GiB)": 77.0, + "step": 4566, + "token_acc": 0.9315774405954766, + "train_speed(iter/s)": 0.770458 + }, + { + "epoch": 1.46144, + "grad_norm": 0.6350980169501201, + "learning_rate": 2.812248151343586e-06, + "loss": 0.3118734359741211, + "memory(GiB)": 77.0, + "step": 4567, + "token_acc": 0.9565355329949239, + "train_speed(iter/s)": 0.77009 + }, + { + "epoch": 1.46176, + "grad_norm": 0.6613418400621324, + "learning_rate": 2.8113731621988115e-06, + "loss": 0.3771818280220032, + "memory(GiB)": 77.0, + "step": 4568, + "token_acc": 0.8506787330316742, + "train_speed(iter/s)": 0.769735 + }, + { + "epoch": 1.46208, + "grad_norm": 0.6179874639089661, + "learning_rate": 2.8104981343090474e-06, + "loss": 0.3255486488342285, + "memory(GiB)": 77.0, + "step": 4569, + "token_acc": 0.9017894298793175, + "train_speed(iter/s)": 0.769374 + }, + { + "epoch": 1.4624, + "grad_norm": 0.602182123271432, + "learning_rate": 2.809623067783178e-06, + "loss": 0.34803932905197144, + "memory(GiB)": 77.0, + "step": 4570, + "token_acc": 0.8027027027027027, + "train_speed(iter/s)": 0.769015 + }, + { + "epoch": 1.46272, + "grad_norm": 0.6897683462460186, + "learning_rate": 2.808747962730089e-06, + "loss": 0.3845667243003845, + "memory(GiB)": 77.0, + "step": 4571, + "token_acc": 0.925248508946322, + "train_speed(iter/s)": 0.768657 + }, + { + "epoch": 1.46304, + "grad_norm": 0.6519428921438171, + "learning_rate": 2.807872819258672e-06, + "loss": 0.3360532224178314, + "memory(GiB)": 77.0, + "step": 4572, + "token_acc": 0.9371116082525478, + "train_speed(iter/s)": 0.76831 + }, + { + "epoch": 1.46336, + "grad_norm": 0.724042265687621, + "learning_rate": 2.8069976374778233e-06, + "loss": 0.33269214630126953, + "memory(GiB)": 77.0, + "step": 4573, + "token_acc": 0.9131792629606496, + "train_speed(iter/s)": 0.767932 + }, + { + "epoch": 1.46368, + "grad_norm": 0.6221480441606021, + "learning_rate": 2.8061224174964448e-06, + "loss": 0.3602624237537384, + "memory(GiB)": 77.0, + "step": 4574, + "token_acc": 0.9073345259391771, + "train_speed(iter/s)": 0.767569 + }, + { + "epoch": 1.464, + "grad_norm": 0.6604523065802387, + "learning_rate": 2.8052471594234414e-06, + "loss": 0.3125818371772766, + "memory(GiB)": 77.0, + "step": 4575, + "token_acc": 0.91343669250646, + "train_speed(iter/s)": 0.767205 + }, + { + "epoch": 1.46432, + "grad_norm": 0.6468858558212282, + "learning_rate": 2.804371863367724e-06, + "loss": 0.37284335494041443, + "memory(GiB)": 77.0, + "step": 4576, + "token_acc": 0.9171610635025269, + "train_speed(iter/s)": 0.766842 + }, + { + "epoch": 1.46464, + "grad_norm": 0.6294446826451466, + "learning_rate": 2.8034965294382078e-06, + "loss": 0.3578566908836365, + "memory(GiB)": 77.0, + "step": 4577, + "token_acc": 0.878735913767761, + "train_speed(iter/s)": 0.766459 + }, + { + "epoch": 1.46496, + "grad_norm": 0.6736175839402659, + "learning_rate": 2.802621157743814e-06, + "loss": 0.2735452651977539, + "memory(GiB)": 77.0, + "step": 4578, + "token_acc": 0.9386474552637695, + "train_speed(iter/s)": 0.766129 + }, + { + "epoch": 1.46528, + "grad_norm": 0.5971762893367935, + "learning_rate": 2.801745748393467e-06, + "loss": 0.3502163887023926, + "memory(GiB)": 77.0, + "step": 4579, + "token_acc": 0.851624630765735, + "train_speed(iter/s)": 0.76574 + }, + { + "epoch": 1.4656, + "grad_norm": 0.6558321464353919, + "learning_rate": 2.8008703014960956e-06, + "loss": 0.3225272297859192, + "memory(GiB)": 77.0, + "step": 4580, + "token_acc": 0.9412593984962406, + "train_speed(iter/s)": 0.7654 + }, + { + "epoch": 1.4659200000000001, + "grad_norm": 0.6243300703415585, + "learning_rate": 2.7999948171606356e-06, + "loss": 0.31898877024650574, + "memory(GiB)": 77.0, + "step": 4581, + "token_acc": 0.9043659043659044, + "train_speed(iter/s)": 0.765035 + }, + { + "epoch": 1.46624, + "grad_norm": 0.7152142267911815, + "learning_rate": 2.799119295496024e-06, + "loss": 0.3544265329837799, + "memory(GiB)": 77.0, + "step": 4582, + "token_acc": 0.919080459770115, + "train_speed(iter/s)": 0.764704 + }, + { + "epoch": 1.4665599999999999, + "grad_norm": 0.7010316680977811, + "learning_rate": 2.7982437366112054e-06, + "loss": 0.35953691601753235, + "memory(GiB)": 77.0, + "step": 4583, + "token_acc": 0.9200954084675015, + "train_speed(iter/s)": 0.764372 + }, + { + "epoch": 1.46688, + "grad_norm": 0.6326687009333222, + "learning_rate": 2.7973681406151265e-06, + "loss": 0.3023439049720764, + "memory(GiB)": 77.0, + "step": 4584, + "token_acc": 0.9457236842105263, + "train_speed(iter/s)": 0.764014 + }, + { + "epoch": 1.4672, + "grad_norm": 0.641976002499839, + "learning_rate": 2.7964925076167415e-06, + "loss": 0.3446701169013977, + "memory(GiB)": 77.0, + "step": 4585, + "token_acc": 0.8267496779733792, + "train_speed(iter/s)": 0.763651 + }, + { + "epoch": 1.46752, + "grad_norm": 0.6178102738168327, + "learning_rate": 2.7956168377250076e-06, + "loss": 0.3909762501716614, + "memory(GiB)": 77.0, + "step": 4586, + "token_acc": 0.8279908414424728, + "train_speed(iter/s)": 0.76326 + }, + { + "epoch": 1.46784, + "grad_norm": 0.7015667041861847, + "learning_rate": 2.794741131048886e-06, + "loss": 0.328866571187973, + "memory(GiB)": 77.0, + "step": 4587, + "token_acc": 0.8883259911894273, + "train_speed(iter/s)": 0.762893 + }, + { + "epoch": 1.4681600000000001, + "grad_norm": 0.6198376418548674, + "learning_rate": 2.7938653876973444e-06, + "loss": 0.32093971967697144, + "memory(GiB)": 77.0, + "step": 4588, + "token_acc": 0.8491633278340797, + "train_speed(iter/s)": 0.762563 + }, + { + "epoch": 1.46848, + "grad_norm": 0.660622807761768, + "learning_rate": 2.7929896077793528e-06, + "loss": 0.45427146553993225, + "memory(GiB)": 77.0, + "step": 4589, + "token_acc": 0.8576642335766423, + "train_speed(iter/s)": 0.762217 + }, + { + "epoch": 1.4687999999999999, + "grad_norm": 0.6182055104670663, + "learning_rate": 2.792113791403887e-06, + "loss": 0.36151057481765747, + "memory(GiB)": 77.0, + "step": 4590, + "token_acc": 0.9123190385140671, + "train_speed(iter/s)": 0.761867 + }, + { + "epoch": 1.46912, + "grad_norm": 0.6396312416404188, + "learning_rate": 2.791237938679927e-06, + "loss": 0.3642430007457733, + "memory(GiB)": 77.0, + "step": 4591, + "token_acc": 0.8807005375411826, + "train_speed(iter/s)": 0.761537 + }, + { + "epoch": 1.46944, + "grad_norm": 0.6396582022119568, + "learning_rate": 2.7903620497164585e-06, + "loss": 0.3799160122871399, + "memory(GiB)": 77.0, + "step": 4592, + "token_acc": 0.8968291528632276, + "train_speed(iter/s)": 0.761179 + }, + { + "epoch": 1.46976, + "grad_norm": 0.6835837387526155, + "learning_rate": 2.789486124622469e-06, + "loss": 0.3013668358325958, + "memory(GiB)": 77.0, + "step": 4593, + "token_acc": 0.9058278656889232, + "train_speed(iter/s)": 0.76085 + }, + { + "epoch": 1.47008, + "grad_norm": 0.708934638313599, + "learning_rate": 2.7886101635069534e-06, + "loss": 0.3249543309211731, + "memory(GiB)": 77.0, + "step": 4594, + "token_acc": 0.9411160561451558, + "train_speed(iter/s)": 0.760514 + }, + { + "epoch": 1.4704, + "grad_norm": 0.6659586002073445, + "learning_rate": 2.78773416647891e-06, + "loss": 0.26802510023117065, + "memory(GiB)": 77.0, + "step": 4595, + "token_acc": 0.9538203190596137, + "train_speed(iter/s)": 0.760164 + }, + { + "epoch": 1.47072, + "grad_norm": 0.6888364246602621, + "learning_rate": 2.786858133647341e-06, + "loss": 0.3186381459236145, + "memory(GiB)": 77.0, + "step": 4596, + "token_acc": 0.8736706413148566, + "train_speed(iter/s)": 0.759826 + }, + { + "epoch": 1.47104, + "grad_norm": 0.6399753668681046, + "learning_rate": 2.7859820651212533e-06, + "loss": 0.3009354770183563, + "memory(GiB)": 77.0, + "step": 4597, + "token_acc": 0.922920892494929, + "train_speed(iter/s)": 0.759494 + }, + { + "epoch": 1.47136, + "grad_norm": 0.6408175270030929, + "learning_rate": 2.785105961009659e-06, + "loss": 0.2975236773490906, + "memory(GiB)": 77.0, + "step": 4598, + "token_acc": 0.8260400616332819, + "train_speed(iter/s)": 0.75916 + }, + { + "epoch": 1.47168, + "grad_norm": 0.6191814255834243, + "learning_rate": 2.784229821421573e-06, + "loss": 0.3867717981338501, + "memory(GiB)": 77.0, + "step": 4599, + "token_acc": 0.8831379621280433, + "train_speed(iter/s)": 0.758821 + }, + { + "epoch": 1.472, + "grad_norm": 0.6583581124354277, + "learning_rate": 2.7833536464660173e-06, + "loss": 0.4129102826118469, + "memory(GiB)": 77.0, + "step": 4600, + "token_acc": 0.9136416861826698, + "train_speed(iter/s)": 0.758488 + }, + { + "epoch": 1.47232, + "grad_norm": 0.7696451030982492, + "learning_rate": 2.7824774362520147e-06, + "loss": 0.3552277684211731, + "memory(GiB)": 77.0, + "step": 4601, + "token_acc": 0.9230492628550881, + "train_speed(iter/s)": 0.758141 + }, + { + "epoch": 1.47264, + "grad_norm": 0.6648389086018894, + "learning_rate": 2.7816011908885952e-06, + "loss": 0.3684116005897522, + "memory(GiB)": 77.0, + "step": 4602, + "token_acc": 0.8670625856555505, + "train_speed(iter/s)": 0.757762 + }, + { + "epoch": 1.47296, + "grad_norm": 0.6275591541655634, + "learning_rate": 2.780724910484794e-06, + "loss": 0.3787533640861511, + "memory(GiB)": 77.0, + "step": 4603, + "token_acc": 0.9302884615384616, + "train_speed(iter/s)": 0.757401 + }, + { + "epoch": 1.47328, + "grad_norm": 0.6470257627595054, + "learning_rate": 2.779848595149647e-06, + "loss": 0.35422593355178833, + "memory(GiB)": 77.0, + "step": 4604, + "token_acc": 0.9167031045037166, + "train_speed(iter/s)": 0.757077 + }, + { + "epoch": 1.4736, + "grad_norm": 0.6261294928121267, + "learning_rate": 2.7789722449921963e-06, + "loss": 0.29366910457611084, + "memory(GiB)": 77.0, + "step": 4605, + "token_acc": 0.9340443446533819, + "train_speed(iter/s)": 0.756729 + }, + { + "epoch": 1.4739200000000001, + "grad_norm": 0.720774988743547, + "learning_rate": 2.77809586012149e-06, + "loss": 0.30891817808151245, + "memory(GiB)": 77.0, + "step": 4606, + "token_acc": 0.8577898550724637, + "train_speed(iter/s)": 0.756407 + }, + { + "epoch": 1.47424, + "grad_norm": 0.6463730428624685, + "learning_rate": 2.7772194406465776e-06, + "loss": 0.3735139071941376, + "memory(GiB)": 77.0, + "step": 4607, + "token_acc": 0.856687898089172, + "train_speed(iter/s)": 0.756072 + }, + { + "epoch": 1.4745599999999999, + "grad_norm": 0.6333720416576405, + "learning_rate": 2.7763429866765153e-06, + "loss": 0.2954905033111572, + "memory(GiB)": 77.0, + "step": 4608, + "token_acc": 0.9361161524500907, + "train_speed(iter/s)": 0.755745 + }, + { + "epoch": 1.47488, + "grad_norm": 0.7223992910143019, + "learning_rate": 2.7754664983203616e-06, + "loss": 0.3434819281101227, + "memory(GiB)": 77.0, + "step": 4609, + "token_acc": 0.9110802361931226, + "train_speed(iter/s)": 0.755426 + }, + { + "epoch": 1.4752, + "grad_norm": 0.7507078424145578, + "learning_rate": 2.7745899756871807e-06, + "loss": 0.34152114391326904, + "memory(GiB)": 77.0, + "step": 4610, + "token_acc": 0.8705978705978706, + "train_speed(iter/s)": 0.75511 + }, + { + "epoch": 1.47552, + "grad_norm": 0.6342595392302299, + "learning_rate": 2.7737134188860413e-06, + "loss": 0.2776348292827606, + "memory(GiB)": 77.0, + "step": 4611, + "token_acc": 0.9452991452991453, + "train_speed(iter/s)": 0.754768 + }, + { + "epoch": 1.47584, + "grad_norm": 0.7107961168855689, + "learning_rate": 2.7728368280260155e-06, + "loss": 0.3529987335205078, + "memory(GiB)": 77.0, + "step": 4612, + "token_acc": 0.9499155880697805, + "train_speed(iter/s)": 0.754423 + }, + { + "epoch": 1.4761600000000001, + "grad_norm": 0.6783665063506045, + "learning_rate": 2.7719602032161785e-06, + "loss": 0.3695982098579407, + "memory(GiB)": 77.0, + "step": 4613, + "token_acc": 0.9398652151373769, + "train_speed(iter/s)": 0.75409 + }, + { + "epoch": 1.47648, + "grad_norm": 0.7014813319886416, + "learning_rate": 2.7710835445656127e-06, + "loss": 0.38585391640663147, + "memory(GiB)": 77.0, + "step": 4614, + "token_acc": 0.8574369531652084, + "train_speed(iter/s)": 0.753747 + }, + { + "epoch": 1.4768, + "grad_norm": 0.712666082815458, + "learning_rate": 2.7702068521834017e-06, + "loss": 0.47458523511886597, + "memory(GiB)": 77.0, + "step": 4615, + "token_acc": 0.8679117911791179, + "train_speed(iter/s)": 0.753423 + }, + { + "epoch": 1.47712, + "grad_norm": 0.6158765004270792, + "learning_rate": 2.769330126178636e-06, + "loss": 0.2975250780582428, + "memory(GiB)": 77.0, + "step": 4616, + "token_acc": 0.8753267119707266, + "train_speed(iter/s)": 0.753057 + }, + { + "epoch": 1.47744, + "grad_norm": 0.7222112570199093, + "learning_rate": 2.768453366660408e-06, + "loss": 0.35452330112457275, + "memory(GiB)": 77.0, + "step": 4617, + "token_acc": 0.9322328410078193, + "train_speed(iter/s)": 0.752737 + }, + { + "epoch": 1.47776, + "grad_norm": 0.6338974579551566, + "learning_rate": 2.7675765737378153e-06, + "loss": 0.3161076605319977, + "memory(GiB)": 77.0, + "step": 4618, + "token_acc": 0.9114240290710879, + "train_speed(iter/s)": 0.752385 + }, + { + "epoch": 1.47808, + "grad_norm": 0.6791221118312981, + "learning_rate": 2.7666997475199602e-06, + "loss": 0.3767821192741394, + "memory(GiB)": 77.0, + "step": 4619, + "token_acc": 0.9415282392026578, + "train_speed(iter/s)": 0.752062 + }, + { + "epoch": 1.4784, + "grad_norm": 0.6261824369432297, + "learning_rate": 2.7658228881159477e-06, + "loss": 0.29215121269226074, + "memory(GiB)": 77.0, + "step": 4620, + "token_acc": 0.9081735620585267, + "train_speed(iter/s)": 0.751737 + }, + { + "epoch": 1.47872, + "grad_norm": 0.6226597519329888, + "learning_rate": 2.7649459956348885e-06, + "loss": 0.30850982666015625, + "memory(GiB)": 77.0, + "step": 4621, + "token_acc": 0.8678893378924464, + "train_speed(iter/s)": 0.75138 + }, + { + "epoch": 1.47904, + "grad_norm": 0.6182996571805368, + "learning_rate": 2.764069070185896e-06, + "loss": 0.2505049705505371, + "memory(GiB)": 77.0, + "step": 4622, + "token_acc": 0.8906285977434952, + "train_speed(iter/s)": 0.751063 + }, + { + "epoch": 1.47936, + "grad_norm": 0.6667367696681854, + "learning_rate": 2.7631921118780885e-06, + "loss": 0.2957471013069153, + "memory(GiB)": 77.0, + "step": 4623, + "token_acc": 0.9262317643820533, + "train_speed(iter/s)": 0.750746 + }, + { + "epoch": 1.47968, + "grad_norm": 0.6687583812569596, + "learning_rate": 2.762315120820589e-06, + "loss": 0.3658207058906555, + "memory(GiB)": 77.0, + "step": 4624, + "token_acc": 0.8532043530834341, + "train_speed(iter/s)": 0.75043 + }, + { + "epoch": 1.48, + "grad_norm": 0.6720425537538968, + "learning_rate": 2.7614380971225224e-06, + "loss": 0.2724011540412903, + "memory(GiB)": 77.0, + "step": 4625, + "token_acc": 0.9316389132340053, + "train_speed(iter/s)": 0.75012 + }, + { + "epoch": 1.48032, + "grad_norm": 0.6812388440010702, + "learning_rate": 2.76056104089302e-06, + "loss": 0.32202109694480896, + "memory(GiB)": 77.0, + "step": 4626, + "token_acc": 0.8702749140893471, + "train_speed(iter/s)": 0.749752 + }, + { + "epoch": 1.48064, + "grad_norm": 0.6458135965670556, + "learning_rate": 2.759683952241216e-06, + "loss": 0.2920938730239868, + "memory(GiB)": 77.0, + "step": 4627, + "token_acc": 0.9068870523415978, + "train_speed(iter/s)": 0.749445 + }, + { + "epoch": 1.48096, + "grad_norm": 0.7484558931132411, + "learning_rate": 2.7588068312762497e-06, + "loss": 0.4308229684829712, + "memory(GiB)": 77.0, + "step": 4628, + "token_acc": 0.894091689139495, + "train_speed(iter/s)": 0.74912 + }, + { + "epoch": 1.48128, + "grad_norm": 0.6981163280242005, + "learning_rate": 2.7579296781072625e-06, + "loss": 0.30672135949134827, + "memory(GiB)": 77.0, + "step": 4629, + "token_acc": 0.8911091804228207, + "train_speed(iter/s)": 0.748793 + }, + { + "epoch": 1.4816, + "grad_norm": 0.6077349595279674, + "learning_rate": 2.757052492843401e-06, + "loss": 0.3364139795303345, + "memory(GiB)": 77.0, + "step": 4630, + "token_acc": 0.9149067049415625, + "train_speed(iter/s)": 0.748464 + }, + { + "epoch": 1.4819200000000001, + "grad_norm": 0.6483869201504976, + "learning_rate": 2.7561752755938164e-06, + "loss": 0.322149395942688, + "memory(GiB)": 77.0, + "step": 4631, + "token_acc": 0.9429885057471264, + "train_speed(iter/s)": 0.74814 + }, + { + "epoch": 1.48224, + "grad_norm": 0.6527898253287926, + "learning_rate": 2.7552980264676633e-06, + "loss": 0.26555830240249634, + "memory(GiB)": 77.0, + "step": 4632, + "token_acc": 0.9214285714285714, + "train_speed(iter/s)": 0.747832 + }, + { + "epoch": 1.4825599999999999, + "grad_norm": 0.6361974985121425, + "learning_rate": 2.7544207455740986e-06, + "loss": 0.34661075472831726, + "memory(GiB)": 77.0, + "step": 4633, + "token_acc": 0.9101153742956801, + "train_speed(iter/s)": 0.747439 + }, + { + "epoch": 1.48288, + "grad_norm": 0.6444189975200565, + "learning_rate": 2.7535434330222853e-06, + "loss": 0.3274526000022888, + "memory(GiB)": 77.0, + "step": 4634, + "token_acc": 0.9118858954041205, + "train_speed(iter/s)": 0.747113 + }, + { + "epoch": 1.4832, + "grad_norm": 0.6409711891690328, + "learning_rate": 2.7526660889213907e-06, + "loss": 0.3202553391456604, + "memory(GiB)": 77.0, + "step": 4635, + "token_acc": 0.9152674736431081, + "train_speed(iter/s)": 0.746802 + }, + { + "epoch": 1.48352, + "grad_norm": 0.7371866184765037, + "learning_rate": 2.7517887133805843e-06, + "loss": 0.36142534017562866, + "memory(GiB)": 77.0, + "step": 4636, + "token_acc": 0.8449913644214162, + "train_speed(iter/s)": 0.746489 + }, + { + "epoch": 1.48384, + "grad_norm": 0.7283446753501768, + "learning_rate": 2.7509113065090405e-06, + "loss": 0.360288143157959, + "memory(GiB)": 77.0, + "step": 4637, + "token_acc": 0.9118745332337566, + "train_speed(iter/s)": 0.746174 + }, + { + "epoch": 1.48416, + "grad_norm": 0.6749473303050598, + "learning_rate": 2.750033868415937e-06, + "loss": 0.361852765083313, + "memory(GiB)": 77.0, + "step": 4638, + "token_acc": 0.8890728476821192, + "train_speed(iter/s)": 0.745858 + }, + { + "epoch": 1.48448, + "grad_norm": 0.6373360071738795, + "learning_rate": 2.749156399210457e-06, + "loss": 0.4055374562740326, + "memory(GiB)": 77.0, + "step": 4639, + "token_acc": 0.8970893970893971, + "train_speed(iter/s)": 0.745512 + }, + { + "epoch": 1.4848, + "grad_norm": 0.7323296963167109, + "learning_rate": 2.748278899001785e-06, + "loss": 0.39061835408210754, + "memory(GiB)": 77.0, + "step": 4640, + "token_acc": 0.8726912928759895, + "train_speed(iter/s)": 0.745188 + }, + { + "epoch": 1.48512, + "grad_norm": 0.7666302312925853, + "learning_rate": 2.7474013678991104e-06, + "loss": 0.4406793415546417, + "memory(GiB)": 77.0, + "step": 4641, + "token_acc": 0.8650168728908887, + "train_speed(iter/s)": 0.744873 + }, + { + "epoch": 1.48544, + "grad_norm": 0.7305702316448591, + "learning_rate": 2.7465238060116273e-06, + "loss": 0.3541054129600525, + "memory(GiB)": 77.0, + "step": 4642, + "token_acc": 0.8805677924620656, + "train_speed(iter/s)": 0.74455 + }, + { + "epoch": 1.48576, + "grad_norm": 0.6536164161938821, + "learning_rate": 2.745646213448533e-06, + "loss": 0.38406407833099365, + "memory(GiB)": 77.0, + "step": 4643, + "token_acc": 0.9173290937996821, + "train_speed(iter/s)": 0.74423 + }, + { + "epoch": 1.48608, + "grad_norm": 0.7197152294957194, + "learning_rate": 2.744768590319029e-06, + "loss": 0.3265560269355774, + "memory(GiB)": 77.0, + "step": 4644, + "token_acc": 0.9611650485436893, + "train_speed(iter/s)": 0.743924 + }, + { + "epoch": 1.4864, + "grad_norm": 0.626812669906154, + "learning_rate": 2.7438909367323204e-06, + "loss": 0.2904762029647827, + "memory(GiB)": 77.0, + "step": 4645, + "token_acc": 0.8968453648151732, + "train_speed(iter/s)": 0.74358 + }, + { + "epoch": 1.48672, + "grad_norm": 0.624019679986348, + "learning_rate": 2.7430132527976162e-06, + "loss": 0.3345952033996582, + "memory(GiB)": 77.0, + "step": 4646, + "token_acc": 0.8528150134048257, + "train_speed(iter/s)": 0.743229 + }, + { + "epoch": 1.48704, + "grad_norm": 0.6917737337396401, + "learning_rate": 2.7421355386241273e-06, + "loss": 0.35429492592811584, + "memory(GiB)": 77.0, + "step": 4647, + "token_acc": 0.9038844621513944, + "train_speed(iter/s)": 0.742924 + }, + { + "epoch": 1.48736, + "grad_norm": 0.6582046134904745, + "learning_rate": 2.741257794321071e-06, + "loss": 0.39522701501846313, + "memory(GiB)": 77.0, + "step": 4648, + "token_acc": 0.9307999061693643, + "train_speed(iter/s)": 0.74262 + }, + { + "epoch": 1.4876800000000001, + "grad_norm": 0.6744443512347799, + "learning_rate": 2.7403800199976677e-06, + "loss": 0.32061246037483215, + "memory(GiB)": 77.0, + "step": 4649, + "token_acc": 0.8945793149318257, + "train_speed(iter/s)": 0.7423 + }, + { + "epoch": 1.488, + "grad_norm": 0.626270949121785, + "learning_rate": 2.7395022157631406e-06, + "loss": 0.3177472949028015, + "memory(GiB)": 77.0, + "step": 4650, + "token_acc": 0.8916152096197595, + "train_speed(iter/s)": 0.741985 + }, + { + "epoch": 1.4883199999999999, + "grad_norm": 0.5962211541273826, + "learning_rate": 2.738624381726717e-06, + "loss": 0.32972952723503113, + "memory(GiB)": 77.0, + "step": 4651, + "token_acc": 0.903702244978338, + "train_speed(iter/s)": 0.741662 + }, + { + "epoch": 1.48864, + "grad_norm": 0.546189206553452, + "learning_rate": 2.7377465179976296e-06, + "loss": 0.2605956196784973, + "memory(GiB)": 77.0, + "step": 4652, + "token_acc": 0.9076393237319975, + "train_speed(iter/s)": 0.741328 + }, + { + "epoch": 1.48896, + "grad_norm": 0.5626118252603889, + "learning_rate": 2.736868624685113e-06, + "loss": 0.23988160490989685, + "memory(GiB)": 77.0, + "step": 4653, + "token_acc": 0.9308176100628931, + "train_speed(iter/s)": 0.741007 + }, + { + "epoch": 1.48928, + "grad_norm": 0.6458268603471624, + "learning_rate": 2.735990701898404e-06, + "loss": 0.3431261479854584, + "memory(GiB)": 77.0, + "step": 4654, + "token_acc": 0.844280240831965, + "train_speed(iter/s)": 0.740697 + }, + { + "epoch": 1.4896, + "grad_norm": 0.6275050064156468, + "learning_rate": 2.7351127497467454e-06, + "loss": 0.30654841661453247, + "memory(GiB)": 77.0, + "step": 4655, + "token_acc": 0.9049168474331164, + "train_speed(iter/s)": 0.740383 + }, + { + "epoch": 1.4899200000000001, + "grad_norm": 0.6257877379849571, + "learning_rate": 2.734234768339385e-06, + "loss": 0.37601372599601746, + "memory(GiB)": 77.0, + "step": 4656, + "token_acc": 0.8186284544524053, + "train_speed(iter/s)": 0.740079 + }, + { + "epoch": 1.49024, + "grad_norm": 0.6112387619399474, + "learning_rate": 2.7333567577855702e-06, + "loss": 0.27406421303749084, + "memory(GiB)": 77.0, + "step": 4657, + "token_acc": 0.9190964013659049, + "train_speed(iter/s)": 0.739767 + }, + { + "epoch": 1.4905599999999999, + "grad_norm": 0.6378862441593357, + "learning_rate": 2.7324787181945556e-06, + "loss": 0.31251469254493713, + "memory(GiB)": 77.0, + "step": 4658, + "token_acc": 0.8907849829351536, + "train_speed(iter/s)": 0.73944 + }, + { + "epoch": 1.49088, + "grad_norm": 0.6439538357105294, + "learning_rate": 2.7316006496755964e-06, + "loss": 0.3171713948249817, + "memory(GiB)": 77.0, + "step": 4659, + "token_acc": 0.9455638034641216, + "train_speed(iter/s)": 0.739116 + }, + { + "epoch": 1.4912, + "grad_norm": 0.655989797103642, + "learning_rate": 2.7307225523379547e-06, + "loss": 0.3690810203552246, + "memory(GiB)": 77.0, + "step": 4660, + "token_acc": 0.8166403785488959, + "train_speed(iter/s)": 0.738762 + }, + { + "epoch": 1.49152, + "grad_norm": 0.7079878602908817, + "learning_rate": 2.729844426290894e-06, + "loss": 0.29917237162590027, + "memory(GiB)": 77.0, + "step": 4661, + "token_acc": 0.9064114250987542, + "train_speed(iter/s)": 0.738465 + }, + { + "epoch": 1.49184, + "grad_norm": 0.7078455687772817, + "learning_rate": 2.7289662716436817e-06, + "loss": 0.2872312068939209, + "memory(GiB)": 77.0, + "step": 4662, + "token_acc": 0.9253271747498075, + "train_speed(iter/s)": 0.738157 + }, + { + "epoch": 1.49216, + "grad_norm": 0.6241812276573874, + "learning_rate": 2.7280880885055886e-06, + "loss": 0.3527149558067322, + "memory(GiB)": 77.0, + "step": 4663, + "token_acc": 0.887140902872777, + "train_speed(iter/s)": 0.737844 + }, + { + "epoch": 1.49248, + "grad_norm": 0.6014920986429059, + "learning_rate": 2.7272098769858898e-06, + "loss": 0.34671980142593384, + "memory(GiB)": 77.0, + "step": 4664, + "token_acc": 0.8398667691519344, + "train_speed(iter/s)": 0.737542 + }, + { + "epoch": 1.4928, + "grad_norm": 0.6154692601764065, + "learning_rate": 2.726331637193863e-06, + "loss": 0.2844206392765045, + "memory(GiB)": 77.0, + "step": 4665, + "token_acc": 0.9408362989323843, + "train_speed(iter/s)": 0.737238 + }, + { + "epoch": 1.49312, + "grad_norm": 0.6069977784121305, + "learning_rate": 2.72545336923879e-06, + "loss": 0.33463430404663086, + "memory(GiB)": 77.0, + "step": 4666, + "token_acc": 0.8810141169691732, + "train_speed(iter/s)": 0.736904 + }, + { + "epoch": 1.49344, + "grad_norm": 0.5822317730073145, + "learning_rate": 2.7245750732299566e-06, + "loss": 0.27832043170928955, + "memory(GiB)": 77.0, + "step": 4667, + "token_acc": 0.8881283422459894, + "train_speed(iter/s)": 0.736565 + }, + { + "epoch": 1.49376, + "grad_norm": 0.671514561446218, + "learning_rate": 2.7236967492766514e-06, + "loss": 0.34914442896842957, + "memory(GiB)": 77.0, + "step": 4668, + "token_acc": 0.8826570173171362, + "train_speed(iter/s)": 0.736266 + }, + { + "epoch": 1.49408, + "grad_norm": 0.5834523303728688, + "learning_rate": 2.722818397488166e-06, + "loss": 0.3239899277687073, + "memory(GiB)": 77.0, + "step": 4669, + "token_acc": 0.8206318043905051, + "train_speed(iter/s)": 0.735949 + }, + { + "epoch": 1.4944, + "grad_norm": 0.6113873002293552, + "learning_rate": 2.7219400179737964e-06, + "loss": 0.31959062814712524, + "memory(GiB)": 77.0, + "step": 4670, + "token_acc": 0.9319905833115354, + "train_speed(iter/s)": 0.735629 + }, + { + "epoch": 1.49472, + "grad_norm": 0.6631113465326117, + "learning_rate": 2.7210616108428418e-06, + "loss": 0.306679904460907, + "memory(GiB)": 77.0, + "step": 4671, + "token_acc": 0.9479258605472197, + "train_speed(iter/s)": 0.735314 + }, + { + "epoch": 1.49504, + "grad_norm": 0.705731456476666, + "learning_rate": 2.7201831762046044e-06, + "loss": 0.4583074450492859, + "memory(GiB)": 77.0, + "step": 4672, + "token_acc": 0.8709619846510798, + "train_speed(iter/s)": 0.734994 + }, + { + "epoch": 1.49536, + "grad_norm": 0.6536802541245967, + "learning_rate": 2.7193047141683905e-06, + "loss": 0.3010915517807007, + "memory(GiB)": 77.0, + "step": 4673, + "token_acc": 0.8575361321403991, + "train_speed(iter/s)": 0.734692 + }, + { + "epoch": 1.4956800000000001, + "grad_norm": 0.7130065697701662, + "learning_rate": 2.718426224843509e-06, + "loss": 0.4260788857936859, + "memory(GiB)": 77.0, + "step": 4674, + "token_acc": 0.9034118212397886, + "train_speed(iter/s)": 0.734379 + }, + { + "epoch": 1.496, + "grad_norm": 0.6985223697442289, + "learning_rate": 2.717547708339273e-06, + "loss": 0.32283705472946167, + "memory(GiB)": 77.0, + "step": 4675, + "token_acc": 0.8679683867968386, + "train_speed(iter/s)": 0.734072 + }, + { + "epoch": 1.4963199999999999, + "grad_norm": 0.6807113694758136, + "learning_rate": 2.7166691647649988e-06, + "loss": 0.36361372470855713, + "memory(GiB)": 77.0, + "step": 4676, + "token_acc": 0.8684153005464481, + "train_speed(iter/s)": 0.733773 + }, + { + "epoch": 1.49664, + "grad_norm": 0.6194153683101852, + "learning_rate": 2.7157905942300057e-06, + "loss": 0.24782827496528625, + "memory(GiB)": 77.0, + "step": 4677, + "token_acc": 0.9137162339365329, + "train_speed(iter/s)": 0.733475 + }, + { + "epoch": 1.49696, + "grad_norm": 0.703959228412774, + "learning_rate": 2.714911996843617e-06, + "loss": 0.37085193395614624, + "memory(GiB)": 77.0, + "step": 4678, + "token_acc": 0.9253930189181988, + "train_speed(iter/s)": 0.733177 + }, + { + "epoch": 1.49728, + "grad_norm": 0.7122262524154424, + "learning_rate": 2.7140333727151572e-06, + "loss": 0.3010864853858948, + "memory(GiB)": 77.0, + "step": 4679, + "token_acc": 0.8975075704635453, + "train_speed(iter/s)": 0.732856 + }, + { + "epoch": 1.4976, + "grad_norm": 0.618283576783371, + "learning_rate": 2.713154721953958e-06, + "loss": 0.381596177816391, + "memory(GiB)": 77.0, + "step": 4680, + "token_acc": 0.9039311467783205, + "train_speed(iter/s)": 0.732545 + }, + { + "epoch": 1.49792, + "grad_norm": 0.6338557493042457, + "learning_rate": 2.7122760446693515e-06, + "loss": 0.2859265208244324, + "memory(GiB)": 77.0, + "step": 4681, + "token_acc": 0.883540046247635, + "train_speed(iter/s)": 0.73221 + }, + { + "epoch": 1.49824, + "grad_norm": 0.6731317970475563, + "learning_rate": 2.7113973409706727e-06, + "loss": 0.41049283742904663, + "memory(GiB)": 77.0, + "step": 4682, + "token_acc": 0.8453760269643986, + "train_speed(iter/s)": 0.731892 + }, + { + "epoch": 1.49856, + "grad_norm": 0.7082854060972655, + "learning_rate": 2.7105186109672626e-06, + "loss": 0.35508859157562256, + "memory(GiB)": 77.0, + "step": 4683, + "token_acc": 0.8405929304446979, + "train_speed(iter/s)": 0.731565 + }, + { + "epoch": 1.49888, + "grad_norm": 0.6720745154218956, + "learning_rate": 2.7096398547684637e-06, + "loss": 0.37825000286102295, + "memory(GiB)": 77.0, + "step": 4684, + "token_acc": 0.9114850036576445, + "train_speed(iter/s)": 0.731266 + }, + { + "epoch": 1.4992, + "grad_norm": 0.634103408547143, + "learning_rate": 2.708761072483621e-06, + "loss": 0.2656673491001129, + "memory(GiB)": 77.0, + "step": 4685, + "token_acc": 0.8852074093399426, + "train_speed(iter/s)": 0.730952 + }, + { + "epoch": 1.49952, + "grad_norm": 0.5969944180014047, + "learning_rate": 2.707882264222085e-06, + "loss": 0.275209903717041, + "memory(GiB)": 77.0, + "step": 4686, + "token_acc": 0.8886526707414297, + "train_speed(iter/s)": 0.730648 + }, + { + "epoch": 1.49984, + "grad_norm": 0.6883359663915645, + "learning_rate": 2.7070034300932063e-06, + "loss": 0.3805559575557709, + "memory(GiB)": 77.0, + "step": 4687, + "token_acc": 0.9341795956746591, + "train_speed(iter/s)": 0.730333 + }, + { + "epoch": 1.5001600000000002, + "grad_norm": 0.5483528534926531, + "learning_rate": 2.7061245702063425e-06, + "loss": 0.23894253373146057, + "memory(GiB)": 77.0, + "step": 4688, + "token_acc": 0.9105128205128206, + "train_speed(iter/s)": 0.730031 + }, + { + "epoch": 1.50048, + "grad_norm": 0.6580333225691745, + "learning_rate": 2.705245684670852e-06, + "loss": 0.2844535708427429, + "memory(GiB)": 77.0, + "step": 4689, + "token_acc": 0.9367671691792295, + "train_speed(iter/s)": 0.729726 + }, + { + "epoch": 1.5008, + "grad_norm": 0.6504663380187428, + "learning_rate": 2.7043667735960956e-06, + "loss": 0.28084927797317505, + "memory(GiB)": 77.0, + "step": 4690, + "token_acc": 0.8772581044295966, + "train_speed(iter/s)": 0.729433 + }, + { + "epoch": 1.50112, + "grad_norm": 0.6500552251941005, + "learning_rate": 2.7034878370914396e-06, + "loss": 0.3015860915184021, + "memory(GiB)": 77.0, + "step": 4691, + "token_acc": 0.9138594802694899, + "train_speed(iter/s)": 0.729085 + }, + { + "epoch": 1.50144, + "grad_norm": 0.6690361982012931, + "learning_rate": 2.702608875266253e-06, + "loss": 0.31606319546699524, + "memory(GiB)": 77.0, + "step": 4692, + "token_acc": 0.9002247191011236, + "train_speed(iter/s)": 0.728793 + }, + { + "epoch": 1.50176, + "grad_norm": 0.6652973334383451, + "learning_rate": 2.701729888229906e-06, + "loss": 0.27856647968292236, + "memory(GiB)": 77.0, + "step": 4693, + "token_acc": 0.9023978385680513, + "train_speed(iter/s)": 0.728491 + }, + { + "epoch": 1.5020799999999999, + "grad_norm": 0.6879760802216153, + "learning_rate": 2.700850876091774e-06, + "loss": 0.30137816071510315, + "memory(GiB)": 77.0, + "step": 4694, + "token_acc": 0.9380812530592266, + "train_speed(iter/s)": 0.728155 + }, + { + "epoch": 1.5024, + "grad_norm": 0.6890055127692157, + "learning_rate": 2.699971838961235e-06, + "loss": 0.405412495136261, + "memory(GiB)": 77.0, + "step": 4695, + "token_acc": 0.9604474630443468, + "train_speed(iter/s)": 0.727864 + }, + { + "epoch": 1.50272, + "grad_norm": 0.6707877389107059, + "learning_rate": 2.69909277694767e-06, + "loss": 0.37596389651298523, + "memory(GiB)": 77.0, + "step": 4696, + "token_acc": 0.8620517928286853, + "train_speed(iter/s)": 0.727546 + }, + { + "epoch": 1.50304, + "grad_norm": 0.6131530244399821, + "learning_rate": 2.6982136901604617e-06, + "loss": 0.3316184878349304, + "memory(GiB)": 77.0, + "step": 4697, + "token_acc": 0.8683606557377049, + "train_speed(iter/s)": 0.727227 + }, + { + "epoch": 1.50336, + "grad_norm": 0.6409060266484036, + "learning_rate": 2.697334578708999e-06, + "loss": 0.33426982164382935, + "memory(GiB)": 77.0, + "step": 4698, + "token_acc": 0.9068281938325992, + "train_speed(iter/s)": 0.726939 + }, + { + "epoch": 1.5036800000000001, + "grad_norm": 0.5856120050555865, + "learning_rate": 2.6964554427026696e-06, + "loss": 0.3400508165359497, + "memory(GiB)": 77.0, + "step": 4699, + "token_acc": 0.9126906848425835, + "train_speed(iter/s)": 0.72664 + }, + { + "epoch": 1.504, + "grad_norm": 0.6392580784687554, + "learning_rate": 2.6955762822508698e-06, + "loss": 0.2911606431007385, + "memory(GiB)": 77.0, + "step": 4700, + "token_acc": 0.8668400520156047, + "train_speed(iter/s)": 0.726343 + }, + { + "epoch": 1.5043199999999999, + "grad_norm": 0.6811950036774619, + "learning_rate": 2.694697097462994e-06, + "loss": 0.2663789391517639, + "memory(GiB)": 77.0, + "step": 4701, + "token_acc": 0.9101251422070534, + "train_speed(iter/s)": 0.726059 + }, + { + "epoch": 1.50464, + "grad_norm": 0.6390888183393608, + "learning_rate": 2.6938178884484417e-06, + "loss": 0.36866164207458496, + "memory(GiB)": 77.0, + "step": 4702, + "token_acc": 0.9452290733723734, + "train_speed(iter/s)": 0.725743 + }, + { + "epoch": 1.50496, + "grad_norm": 0.6177990334026949, + "learning_rate": 2.6929386553166165e-06, + "loss": 0.34000450372695923, + "memory(GiB)": 77.0, + "step": 4703, + "token_acc": 0.8754285714285714, + "train_speed(iter/s)": 0.725441 + }, + { + "epoch": 1.50528, + "grad_norm": 0.6579930001854174, + "learning_rate": 2.692059398176922e-06, + "loss": 0.2960076332092285, + "memory(GiB)": 77.0, + "step": 4704, + "token_acc": 0.9322916666666666, + "train_speed(iter/s)": 0.725127 + }, + { + "epoch": 1.5056, + "grad_norm": 0.6516026223572436, + "learning_rate": 2.691180117138767e-06, + "loss": 0.33337876200675964, + "memory(GiB)": 77.0, + "step": 4705, + "token_acc": 0.9164780909277926, + "train_speed(iter/s)": 0.724841 + }, + { + "epoch": 1.5059200000000001, + "grad_norm": 0.6674989416828244, + "learning_rate": 2.6903008123115627e-06, + "loss": 0.36993780732154846, + "memory(GiB)": 77.0, + "step": 4706, + "token_acc": 0.8904421326397919, + "train_speed(iter/s)": 0.724529 + }, + { + "epoch": 1.50624, + "grad_norm": 0.6802616578690227, + "learning_rate": 2.6894214838047234e-06, + "loss": 0.2990211546421051, + "memory(GiB)": 77.0, + "step": 4707, + "token_acc": 0.9226006191950464, + "train_speed(iter/s)": 0.724241 + }, + { + "epoch": 1.50656, + "grad_norm": 0.5788882234381021, + "learning_rate": 2.6885421317276667e-06, + "loss": 0.2792598307132721, + "memory(GiB)": 77.0, + "step": 4708, + "token_acc": 0.927536231884058, + "train_speed(iter/s)": 0.723945 + }, + { + "epoch": 1.50688, + "grad_norm": 0.6683143006464956, + "learning_rate": 2.687662756189812e-06, + "loss": 0.3810393214225769, + "memory(GiB)": 77.0, + "step": 4709, + "token_acc": 0.9196334155798379, + "train_speed(iter/s)": 0.723641 + }, + { + "epoch": 1.5072, + "grad_norm": 0.6418047601490635, + "learning_rate": 2.6867833573005835e-06, + "loss": 0.29885634779930115, + "memory(GiB)": 77.0, + "step": 4710, + "token_acc": 0.9135483870967742, + "train_speed(iter/s)": 0.723338 + }, + { + "epoch": 1.50752, + "grad_norm": 0.6551772000250795, + "learning_rate": 2.6859039351694066e-06, + "loss": 0.34923815727233887, + "memory(GiB)": 77.0, + "step": 4711, + "token_acc": 0.9327188940092166, + "train_speed(iter/s)": 0.723044 + }, + { + "epoch": 1.5078399999999998, + "grad_norm": 0.6478021782630577, + "learning_rate": 2.6850244899057094e-06, + "loss": 0.3731371760368347, + "memory(GiB)": 77.0, + "step": 4712, + "token_acc": 0.8840619811842834, + "train_speed(iter/s)": 0.722754 + }, + { + "epoch": 1.50816, + "grad_norm": 0.6576270979697844, + "learning_rate": 2.684145021618924e-06, + "loss": 0.36607858538627625, + "memory(GiB)": 77.0, + "step": 4713, + "token_acc": 0.8554794520547945, + "train_speed(iter/s)": 0.722446 + }, + { + "epoch": 1.50848, + "grad_norm": 0.6920611538629898, + "learning_rate": 2.683265530418486e-06, + "loss": 0.36470043659210205, + "memory(GiB)": 77.0, + "step": 4714, + "token_acc": 0.9102983244789539, + "train_speed(iter/s)": 0.722127 + }, + { + "epoch": 1.5088, + "grad_norm": 0.6282572704084697, + "learning_rate": 2.6823860164138303e-06, + "loss": 0.2966056168079376, + "memory(GiB)": 77.0, + "step": 4715, + "token_acc": 0.9346838980090814, + "train_speed(iter/s)": 0.721842 + }, + { + "epoch": 1.50912, + "grad_norm": 0.6399408354749543, + "learning_rate": 2.6815064797144e-06, + "loss": 0.3151938021183014, + "memory(GiB)": 77.0, + "step": 4716, + "token_acc": 0.9518922386144965, + "train_speed(iter/s)": 0.721553 + }, + { + "epoch": 1.5094400000000001, + "grad_norm": 0.6355309589119076, + "learning_rate": 2.680626920429636e-06, + "loss": 0.3694184720516205, + "memory(GiB)": 77.0, + "step": 4717, + "token_acc": 0.9566965666563563, + "train_speed(iter/s)": 0.721236 + }, + { + "epoch": 1.50976, + "grad_norm": 0.7147968093352489, + "learning_rate": 2.6797473386689865e-06, + "loss": 0.3796667456626892, + "memory(GiB)": 77.0, + "step": 4718, + "token_acc": 0.921436004162331, + "train_speed(iter/s)": 0.720955 + }, + { + "epoch": 1.5100799999999999, + "grad_norm": 0.6561416133141352, + "learning_rate": 2.6788677345418977e-06, + "loss": 0.37548205256462097, + "memory(GiB)": 77.0, + "step": 4719, + "token_acc": 0.8898799313893654, + "train_speed(iter/s)": 0.720666 + }, + { + "epoch": 1.5104, + "grad_norm": 0.6804886012643914, + "learning_rate": 2.677988108157823e-06, + "loss": 0.29161596298217773, + "memory(GiB)": 77.0, + "step": 4720, + "token_acc": 0.9320102903344358, + "train_speed(iter/s)": 0.720363 + }, + { + "epoch": 1.51072, + "grad_norm": 0.691254206533967, + "learning_rate": 2.6771084596262153e-06, + "loss": 0.2732883393764496, + "memory(GiB)": 77.0, + "step": 4721, + "token_acc": 0.9493754109138725, + "train_speed(iter/s)": 0.720093 + }, + { + "epoch": 1.51104, + "grad_norm": 0.623659409489367, + "learning_rate": 2.6762287890565327e-06, + "loss": 0.33374857902526855, + "memory(GiB)": 77.0, + "step": 4722, + "token_acc": 0.9156289707750953, + "train_speed(iter/s)": 0.71981 + }, + { + "epoch": 1.51136, + "grad_norm": 0.7598548697823642, + "learning_rate": 2.675349096558234e-06, + "loss": 0.34833067655563354, + "memory(GiB)": 77.0, + "step": 4723, + "token_acc": 0.9219812236969893, + "train_speed(iter/s)": 0.719535 + }, + { + "epoch": 1.5116800000000001, + "grad_norm": 0.602383554948501, + "learning_rate": 2.674469382240782e-06, + "loss": 0.3053348958492279, + "memory(GiB)": 77.0, + "step": 4724, + "token_acc": 0.9540128881077915, + "train_speed(iter/s)": 0.719211 + }, + { + "epoch": 1.512, + "grad_norm": 0.7164528938637292, + "learning_rate": 2.6735896462136425e-06, + "loss": 0.39589452743530273, + "memory(GiB)": 77.0, + "step": 4725, + "token_acc": 0.8078886310904873, + "train_speed(iter/s)": 0.718918 + }, + { + "epoch": 1.5123199999999999, + "grad_norm": 0.6109559845577345, + "learning_rate": 2.6727098885862825e-06, + "loss": 0.29338228702545166, + "memory(GiB)": 77.0, + "step": 4726, + "token_acc": 0.9105345560277343, + "train_speed(iter/s)": 0.718641 + }, + { + "epoch": 1.51264, + "grad_norm": 0.591640069170593, + "learning_rate": 2.671830109468173e-06, + "loss": 0.31213676929473877, + "memory(GiB)": 77.0, + "step": 4727, + "token_acc": 0.9033270558694287, + "train_speed(iter/s)": 0.71834 + }, + { + "epoch": 1.51296, + "grad_norm": 0.673352413108749, + "learning_rate": 2.6709503089687872e-06, + "loss": 0.38801145553588867, + "memory(GiB)": 77.0, + "step": 4728, + "token_acc": 0.8677276284894339, + "train_speed(iter/s)": 0.71804 + }, + { + "epoch": 1.51328, + "grad_norm": 0.6393329000831102, + "learning_rate": 2.6700704871976008e-06, + "loss": 0.32441121339797974, + "memory(GiB)": 77.0, + "step": 4729, + "token_acc": 0.893740902474527, + "train_speed(iter/s)": 0.71774 + }, + { + "epoch": 1.5135999999999998, + "grad_norm": 0.6262649077731095, + "learning_rate": 2.6691906442640923e-06, + "loss": 0.37837085127830505, + "memory(GiB)": 77.0, + "step": 4730, + "token_acc": 0.8718349484213817, + "train_speed(iter/s)": 0.717436 + }, + { + "epoch": 1.5139200000000002, + "grad_norm": 0.6951113991685148, + "learning_rate": 2.6683107802777443e-06, + "loss": 0.4262896478176117, + "memory(GiB)": 77.0, + "step": 4731, + "token_acc": 0.9397533206831119, + "train_speed(iter/s)": 0.717148 + }, + { + "epoch": 1.51424, + "grad_norm": 0.7166849303063048, + "learning_rate": 2.6674308953480382e-06, + "loss": 0.3764309585094452, + "memory(GiB)": 77.0, + "step": 4732, + "token_acc": 0.876765855124737, + "train_speed(iter/s)": 0.716862 + }, + { + "epoch": 1.51456, + "grad_norm": 0.6154643227248124, + "learning_rate": 2.666550989584462e-06, + "loss": 0.34089961647987366, + "memory(GiB)": 77.0, + "step": 4733, + "token_acc": 0.9515984463698834, + "train_speed(iter/s)": 0.716585 + }, + { + "epoch": 1.51488, + "grad_norm": 0.6869070364933458, + "learning_rate": 2.665671063096505e-06, + "loss": 0.29720231890678406, + "memory(GiB)": 77.0, + "step": 4734, + "token_acc": 0.9515841936632253, + "train_speed(iter/s)": 0.716313 + }, + { + "epoch": 1.5152, + "grad_norm": 0.6619521448492247, + "learning_rate": 2.6647911159936573e-06, + "loss": 0.43073612451553345, + "memory(GiB)": 77.0, + "step": 4735, + "token_acc": 0.8925484699836487, + "train_speed(iter/s)": 0.716018 + }, + { + "epoch": 1.51552, + "grad_norm": 0.6139380649795748, + "learning_rate": 2.6639111483854146e-06, + "loss": 0.34235280752182007, + "memory(GiB)": 77.0, + "step": 4736, + "token_acc": 0.9268370607028754, + "train_speed(iter/s)": 0.715722 + }, + { + "epoch": 1.5158399999999999, + "grad_norm": 0.6750736443509845, + "learning_rate": 2.6630311603812725e-06, + "loss": 0.32904672622680664, + "memory(GiB)": 77.0, + "step": 4737, + "token_acc": 0.9377203290246768, + "train_speed(iter/s)": 0.715442 + }, + { + "epoch": 1.51616, + "grad_norm": 0.6133202817807053, + "learning_rate": 2.6621511520907307e-06, + "loss": 0.31487709283828735, + "memory(GiB)": 77.0, + "step": 4738, + "token_acc": 0.9560106181266591, + "train_speed(iter/s)": 0.715139 + }, + { + "epoch": 1.51648, + "grad_norm": 0.7465957649126232, + "learning_rate": 2.6612711236232915e-06, + "loss": 0.37283802032470703, + "memory(GiB)": 77.0, + "step": 4739, + "token_acc": 0.83875, + "train_speed(iter/s)": 0.714865 + }, + { + "epoch": 1.5168, + "grad_norm": 0.6612368262919883, + "learning_rate": 2.6603910750884577e-06, + "loss": 0.3253559470176697, + "memory(GiB)": 77.0, + "step": 4740, + "token_acc": 0.9092680211522405, + "train_speed(iter/s)": 0.714582 + }, + { + "epoch": 1.51712, + "grad_norm": 0.6750668158357804, + "learning_rate": 2.6595110065957373e-06, + "loss": 0.27462655305862427, + "memory(GiB)": 77.0, + "step": 4741, + "token_acc": 0.9309838472834068, + "train_speed(iter/s)": 0.714318 + }, + { + "epoch": 1.5174400000000001, + "grad_norm": 0.7011582815393044, + "learning_rate": 2.6586309182546393e-06, + "loss": 0.2977725863456726, + "memory(GiB)": 77.0, + "step": 4742, + "token_acc": 0.9018385789965722, + "train_speed(iter/s)": 0.714023 + }, + { + "epoch": 1.51776, + "grad_norm": 0.6663249502190671, + "learning_rate": 2.657750810174675e-06, + "loss": 0.32895612716674805, + "memory(GiB)": 77.0, + "step": 4743, + "token_acc": 0.867982212340189, + "train_speed(iter/s)": 0.713717 + }, + { + "epoch": 1.5180799999999999, + "grad_norm": 0.6390334073450004, + "learning_rate": 2.6568706824653596e-06, + "loss": 0.29603898525238037, + "memory(GiB)": 77.0, + "step": 4744, + "token_acc": 0.9174983366600133, + "train_speed(iter/s)": 0.713444 + }, + { + "epoch": 1.5184, + "grad_norm": 0.686574223015513, + "learning_rate": 2.6559905352362093e-06, + "loss": 0.33836841583251953, + "memory(GiB)": 77.0, + "step": 4745, + "token_acc": 0.8763681592039801, + "train_speed(iter/s)": 0.713076 + }, + { + "epoch": 1.51872, + "grad_norm": 0.6367961025401296, + "learning_rate": 2.655110368596742e-06, + "loss": 0.39797985553741455, + "memory(GiB)": 77.0, + "step": 4746, + "token_acc": 0.8876, + "train_speed(iter/s)": 0.71279 + }, + { + "epoch": 1.51904, + "grad_norm": 0.6614297299459857, + "learning_rate": 2.6542301826564805e-06, + "loss": 0.33357882499694824, + "memory(GiB)": 77.0, + "step": 4747, + "token_acc": 0.8614371600343544, + "train_speed(iter/s)": 0.712469 + }, + { + "epoch": 1.51936, + "grad_norm": 0.6238524429392563, + "learning_rate": 2.6533499775249482e-06, + "loss": 0.36464834213256836, + "memory(GiB)": 77.0, + "step": 4748, + "token_acc": 0.8547612319864368, + "train_speed(iter/s)": 0.71216 + }, + { + "epoch": 1.5196800000000001, + "grad_norm": 0.6147776753251571, + "learning_rate": 2.6524697533116716e-06, + "loss": 0.3402598798274994, + "memory(GiB)": 77.0, + "step": 4749, + "token_acc": 0.8739938080495356, + "train_speed(iter/s)": 0.711871 + }, + { + "epoch": 1.52, + "grad_norm": 0.6258109699243517, + "learning_rate": 2.651589510126179e-06, + "loss": 0.4153035283088684, + "memory(GiB)": 77.0, + "step": 4750, + "token_acc": 0.8358437579780444, + "train_speed(iter/s)": 0.71157 + }, + { + "epoch": 1.52032, + "grad_norm": 0.6472686384349262, + "learning_rate": 2.6507092480780015e-06, + "loss": 0.36649858951568604, + "memory(GiB)": 77.0, + "step": 4751, + "token_acc": 0.9378997867803838, + "train_speed(iter/s)": 0.711301 + }, + { + "epoch": 1.52064, + "grad_norm": 0.6125082848911299, + "learning_rate": 2.6498289672766725e-06, + "loss": 0.2913636863231659, + "memory(GiB)": 77.0, + "step": 4752, + "token_acc": 0.9081983308787432, + "train_speed(iter/s)": 0.711004 + }, + { + "epoch": 1.52096, + "grad_norm": 0.631239449000378, + "learning_rate": 2.6489486678317277e-06, + "loss": 0.35211917757987976, + "memory(GiB)": 77.0, + "step": 4753, + "token_acc": 0.9200385356454721, + "train_speed(iter/s)": 0.710678 + }, + { + "epoch": 1.52128, + "grad_norm": 0.6411305310818092, + "learning_rate": 2.648068349852706e-06, + "loss": 0.2921507954597473, + "memory(GiB)": 77.0, + "step": 4754, + "token_acc": 0.8977329974811084, + "train_speed(iter/s)": 0.710405 + }, + { + "epoch": 1.5215999999999998, + "grad_norm": 0.6567161779378938, + "learning_rate": 2.647188013449145e-06, + "loss": 0.31651562452316284, + "memory(GiB)": 77.0, + "step": 4755, + "token_acc": 0.9326821541710665, + "train_speed(iter/s)": 0.710136 + }, + { + "epoch": 1.5219200000000002, + "grad_norm": 0.6970010476904884, + "learning_rate": 2.6463076587305896e-06, + "loss": 0.3416633605957031, + "memory(GiB)": 77.0, + "step": 4756, + "token_acc": 0.8971488542942102, + "train_speed(iter/s)": 0.709861 + }, + { + "epoch": 1.52224, + "grad_norm": 0.6622492057734833, + "learning_rate": 2.645427285806585e-06, + "loss": 0.37915608286857605, + "memory(GiB)": 77.0, + "step": 4757, + "token_acc": 0.8619718309859155, + "train_speed(iter/s)": 0.709591 + }, + { + "epoch": 1.52256, + "grad_norm": 0.747297542277395, + "learning_rate": 2.6445468947866767e-06, + "loss": 0.3617280125617981, + "memory(GiB)": 77.0, + "step": 4758, + "token_acc": 0.8417280509655585, + "train_speed(iter/s)": 0.709294 + }, + { + "epoch": 1.52288, + "grad_norm": 0.6572536675276169, + "learning_rate": 2.643666485780415e-06, + "loss": 0.3195324242115021, + "memory(GiB)": 77.0, + "step": 4759, + "token_acc": 0.8909426987060998, + "train_speed(iter/s)": 0.708988 + }, + { + "epoch": 1.5232, + "grad_norm": 0.6756818775447865, + "learning_rate": 2.6427860588973513e-06, + "loss": 0.38988614082336426, + "memory(GiB)": 77.0, + "step": 4760, + "token_acc": 0.8528528528528528, + "train_speed(iter/s)": 0.708707 + }, + { + "epoch": 1.52352, + "grad_norm": 0.6341569815800914, + "learning_rate": 2.6419056142470404e-06, + "loss": 0.33470189571380615, + "memory(GiB)": 77.0, + "step": 4761, + "token_acc": 0.9347222222222222, + "train_speed(iter/s)": 0.708388 + }, + { + "epoch": 1.5238399999999999, + "grad_norm": 0.6426407901737332, + "learning_rate": 2.641025151939037e-06, + "loss": 0.3570539951324463, + "memory(GiB)": 77.0, + "step": 4762, + "token_acc": 0.8888888888888888, + "train_speed(iter/s)": 0.708113 + }, + { + "epoch": 1.52416, + "grad_norm": 0.7092268243666138, + "learning_rate": 2.6401446720828993e-06, + "loss": 0.37220457196235657, + "memory(GiB)": 77.0, + "step": 4763, + "token_acc": 0.8844161958568738, + "train_speed(iter/s)": 0.707853 + }, + { + "epoch": 1.52448, + "grad_norm": 0.5686812491581695, + "learning_rate": 2.639264174788188e-06, + "loss": 0.24341900646686554, + "memory(GiB)": 77.0, + "step": 4764, + "token_acc": 0.9056917324470295, + "train_speed(iter/s)": 0.707571 + }, + { + "epoch": 1.5248, + "grad_norm": 0.7244377964209284, + "learning_rate": 2.6383836601644667e-06, + "loss": 0.2915639877319336, + "memory(GiB)": 77.0, + "step": 4765, + "token_acc": 0.9126869271587071, + "train_speed(iter/s)": 0.707306 + }, + { + "epoch": 1.52512, + "grad_norm": 0.617741379480452, + "learning_rate": 2.6375031283213e-06, + "loss": 0.36844730377197266, + "memory(GiB)": 77.0, + "step": 4766, + "token_acc": 0.8818801753531418, + "train_speed(iter/s)": 0.707013 + }, + { + "epoch": 1.5254400000000001, + "grad_norm": 0.6502583795472656, + "learning_rate": 2.6366225793682537e-06, + "loss": 0.33861690759658813, + "memory(GiB)": 77.0, + "step": 4767, + "token_acc": 0.9014193548387097, + "train_speed(iter/s)": 0.706748 + }, + { + "epoch": 1.52576, + "grad_norm": 0.6085000960326162, + "learning_rate": 2.635742013414898e-06, + "loss": 0.27605652809143066, + "memory(GiB)": 77.0, + "step": 4768, + "token_acc": 0.9250962548127406, + "train_speed(iter/s)": 0.706486 + }, + { + "epoch": 1.5260799999999999, + "grad_norm": 0.7130356561849527, + "learning_rate": 2.6348614305708032e-06, + "loss": 0.36268138885498047, + "memory(GiB)": 77.0, + "step": 4769, + "token_acc": 0.8853456669912366, + "train_speed(iter/s)": 0.706188 + }, + { + "epoch": 1.5264, + "grad_norm": 0.6500753146240896, + "learning_rate": 2.6339808309455423e-06, + "loss": 0.3600583076477051, + "memory(GiB)": 77.0, + "step": 4770, + "token_acc": 0.8846907360761265, + "train_speed(iter/s)": 0.705894 + }, + { + "epoch": 1.52672, + "grad_norm": 0.685679145968327, + "learning_rate": 2.6331002146486917e-06, + "loss": 0.4335525631904602, + "memory(GiB)": 77.0, + "step": 4771, + "token_acc": 0.8388821385176185, + "train_speed(iter/s)": 0.705631 + }, + { + "epoch": 1.52704, + "grad_norm": 0.6222648082741666, + "learning_rate": 2.632219581789828e-06, + "loss": 0.3845112919807434, + "memory(GiB)": 77.0, + "step": 4772, + "token_acc": 0.8889771598808341, + "train_speed(iter/s)": 0.705353 + }, + { + "epoch": 1.52736, + "grad_norm": 0.6532272727557439, + "learning_rate": 2.631338932478531e-06, + "loss": 0.4158073365688324, + "memory(GiB)": 77.0, + "step": 4773, + "token_acc": 0.8380539058931019, + "train_speed(iter/s)": 0.705061 + }, + { + "epoch": 1.5276800000000001, + "grad_norm": 0.6224111214850214, + "learning_rate": 2.630458266824382e-06, + "loss": 0.266485333442688, + "memory(GiB)": 77.0, + "step": 4774, + "token_acc": 0.9510140405616224, + "train_speed(iter/s)": 0.704778 + }, + { + "epoch": 1.528, + "grad_norm": 0.7029577337653709, + "learning_rate": 2.6295775849369664e-06, + "loss": 0.2996477782726288, + "memory(GiB)": 77.0, + "step": 4775, + "token_acc": 0.9364016736401674, + "train_speed(iter/s)": 0.704514 + }, + { + "epoch": 1.52832, + "grad_norm": 0.5763812129255445, + "learning_rate": 2.6286968869258666e-06, + "loss": 0.27948611974716187, + "memory(GiB)": 77.0, + "step": 4776, + "token_acc": 0.8732750242954325, + "train_speed(iter/s)": 0.704228 + }, + { + "epoch": 1.52864, + "grad_norm": 0.6119663874346979, + "learning_rate": 2.6278161729006724e-06, + "loss": 0.2686648964881897, + "memory(GiB)": 77.0, + "step": 4777, + "token_acc": 0.9440279860069966, + "train_speed(iter/s)": 0.703964 + }, + { + "epoch": 1.52896, + "grad_norm": 0.64003432803051, + "learning_rate": 2.6269354429709727e-06, + "loss": 0.3267863988876343, + "memory(GiB)": 77.0, + "step": 4778, + "token_acc": 0.9502217841301134, + "train_speed(iter/s)": 0.70367 + }, + { + "epoch": 1.52928, + "grad_norm": 0.6419853801340161, + "learning_rate": 2.626054697246359e-06, + "loss": 0.34756818413734436, + "memory(GiB)": 77.0, + "step": 4779, + "token_acc": 0.9449645390070922, + "train_speed(iter/s)": 0.703412 + }, + { + "epoch": 1.5295999999999998, + "grad_norm": 0.6361829303561355, + "learning_rate": 2.6251739358364254e-06, + "loss": 0.3465234637260437, + "memory(GiB)": 77.0, + "step": 4780, + "token_acc": 0.8897442264713186, + "train_speed(iter/s)": 0.703105 + }, + { + "epoch": 1.52992, + "grad_norm": 0.6656725028285471, + "learning_rate": 2.624293158850766e-06, + "loss": 0.4343118667602539, + "memory(GiB)": 77.0, + "step": 4781, + "token_acc": 0.9721105527638191, + "train_speed(iter/s)": 0.702828 + }, + { + "epoch": 1.53024, + "grad_norm": 0.6184708586881983, + "learning_rate": 2.6234123663989795e-06, + "loss": 0.3752273917198181, + "memory(GiB)": 77.0, + "step": 4782, + "token_acc": 0.8296749683410721, + "train_speed(iter/s)": 0.702548 + }, + { + "epoch": 1.53056, + "grad_norm": 0.6656155499570598, + "learning_rate": 2.622531558590665e-06, + "loss": 0.36105823516845703, + "memory(GiB)": 77.0, + "step": 4783, + "token_acc": 0.8474977790938703, + "train_speed(iter/s)": 0.702284 + }, + { + "epoch": 1.53088, + "grad_norm": 0.6264745998442833, + "learning_rate": 2.6216507355354243e-06, + "loss": 0.2404036670923233, + "memory(GiB)": 77.0, + "step": 4784, + "token_acc": 0.8687065368567455, + "train_speed(iter/s)": 0.702025 + }, + { + "epoch": 1.5312000000000001, + "grad_norm": 0.6294518354937201, + "learning_rate": 2.620769897342859e-06, + "loss": 0.3246634900569916, + "memory(GiB)": 77.0, + "step": 4785, + "token_acc": 0.8832391713747646, + "train_speed(iter/s)": 0.701731 + }, + { + "epoch": 1.53152, + "grad_norm": 0.628154063505337, + "learning_rate": 2.6198890441225756e-06, + "loss": 0.24619506299495697, + "memory(GiB)": 77.0, + "step": 4786, + "token_acc": 0.912124060150376, + "train_speed(iter/s)": 0.701476 + }, + { + "epoch": 1.5318399999999999, + "grad_norm": 0.6536108657546605, + "learning_rate": 2.6190081759841802e-06, + "loss": 0.33632907271385193, + "memory(GiB)": 77.0, + "step": 4787, + "token_acc": 0.8600417785735601, + "train_speed(iter/s)": 0.701207 + }, + { + "epoch": 1.53216, + "grad_norm": 0.6885900060738377, + "learning_rate": 2.6181272930372815e-06, + "loss": 0.35317087173461914, + "memory(GiB)": 77.0, + "step": 4788, + "token_acc": 0.9426674713337356, + "train_speed(iter/s)": 0.700939 + }, + { + "epoch": 1.53248, + "grad_norm": 0.6667535333453637, + "learning_rate": 2.61724639539149e-06, + "loss": 0.2960125803947449, + "memory(GiB)": 77.0, + "step": 4789, + "token_acc": 0.9455872594558726, + "train_speed(iter/s)": 0.700671 + }, + { + "epoch": 1.5328, + "grad_norm": 0.664594828050636, + "learning_rate": 2.6163654831564193e-06, + "loss": 0.3385806679725647, + "memory(GiB)": 77.0, + "step": 4790, + "token_acc": 0.9209435766656041, + "train_speed(iter/s)": 0.700367 + }, + { + "epoch": 1.53312, + "grad_norm": 0.6962828759087488, + "learning_rate": 2.6154845564416833e-06, + "loss": 0.3471603989601135, + "memory(GiB)": 77.0, + "step": 4791, + "token_acc": 0.9208211143695014, + "train_speed(iter/s)": 0.700111 + }, + { + "epoch": 1.5334400000000001, + "grad_norm": 0.6923369766713516, + "learning_rate": 2.6146036153568965e-06, + "loss": 0.3609910011291504, + "memory(GiB)": 77.0, + "step": 4792, + "token_acc": 0.9379604497867391, + "train_speed(iter/s)": 0.699839 + }, + { + "epoch": 1.53376, + "grad_norm": 0.6674048149264711, + "learning_rate": 2.613722660011678e-06, + "loss": 0.2944141626358032, + "memory(GiB)": 77.0, + "step": 4793, + "token_acc": 0.875515818431912, + "train_speed(iter/s)": 0.69959 + }, + { + "epoch": 1.5340799999999999, + "grad_norm": 0.781106672328188, + "learning_rate": 2.612841690515648e-06, + "loss": 0.36534416675567627, + "memory(GiB)": 77.0, + "step": 4794, + "token_acc": 0.8712564543889845, + "train_speed(iter/s)": 0.699315 + }, + { + "epoch": 1.5344, + "grad_norm": 0.6872937126639777, + "learning_rate": 2.6119607069784265e-06, + "loss": 0.34938210248947144, + "memory(GiB)": 77.0, + "step": 4795, + "token_acc": 0.9333142530051517, + "train_speed(iter/s)": 0.699015 + }, + { + "epoch": 1.53472, + "grad_norm": 0.6145731721029524, + "learning_rate": 2.6110797095096376e-06, + "loss": 0.27072933316230774, + "memory(GiB)": 77.0, + "step": 4796, + "token_acc": 0.9007744433688286, + "train_speed(iter/s)": 0.698751 + }, + { + "epoch": 1.53504, + "grad_norm": 0.7223539571734467, + "learning_rate": 2.6101986982189052e-06, + "loss": 0.26495105028152466, + "memory(GiB)": 77.0, + "step": 4797, + "token_acc": 0.8841935483870967, + "train_speed(iter/s)": 0.698503 + }, + { + "epoch": 1.5353599999999998, + "grad_norm": 0.6677598873553057, + "learning_rate": 2.6093176732158577e-06, + "loss": 0.4217151403427124, + "memory(GiB)": 77.0, + "step": 4798, + "token_acc": 0.8872180451127819, + "train_speed(iter/s)": 0.69818 + }, + { + "epoch": 1.5356800000000002, + "grad_norm": 0.6066832880767971, + "learning_rate": 2.608436634610121e-06, + "loss": 0.3305690288543701, + "memory(GiB)": 77.0, + "step": 4799, + "token_acc": 0.9277079303675049, + "train_speed(iter/s)": 0.697903 + }, + { + "epoch": 1.536, + "grad_norm": 0.6756999210940616, + "learning_rate": 2.6075555825113265e-06, + "loss": 0.31703341007232666, + "memory(GiB)": 77.0, + "step": 4800, + "token_acc": 0.9576185671039354, + "train_speed(iter/s)": 0.697649 + }, + { + "epoch": 1.53632, + "grad_norm": 0.5916971379670732, + "learning_rate": 2.606674517029106e-06, + "loss": 0.31121891736984253, + "memory(GiB)": 77.0, + "step": 4801, + "token_acc": 0.8776500638569604, + "train_speed(iter/s)": 0.697383 + }, + { + "epoch": 1.53664, + "grad_norm": 0.6419403438696887, + "learning_rate": 2.6057934382730917e-06, + "loss": 0.3381812572479248, + "memory(GiB)": 77.0, + "step": 4802, + "token_acc": 0.9168273202974387, + "train_speed(iter/s)": 0.697097 + }, + { + "epoch": 1.53696, + "grad_norm": 0.708100915660331, + "learning_rate": 2.6049123463529196e-06, + "loss": 0.48406535387039185, + "memory(GiB)": 77.0, + "step": 4803, + "token_acc": 0.8245459871118922, + "train_speed(iter/s)": 0.696843 + }, + { + "epoch": 1.53728, + "grad_norm": 0.687404036753763, + "learning_rate": 2.6040312413782257e-06, + "loss": 0.3494476079940796, + "memory(GiB)": 77.0, + "step": 4804, + "token_acc": 0.8814636283961438, + "train_speed(iter/s)": 0.69657 + }, + { + "epoch": 1.5375999999999999, + "grad_norm": 0.622685573111054, + "learning_rate": 2.603150123458649e-06, + "loss": 0.3044223487377167, + "memory(GiB)": 77.0, + "step": 4805, + "token_acc": 0.9215189873417722, + "train_speed(iter/s)": 0.696304 + }, + { + "epoch": 1.53792, + "grad_norm": 0.6334821442827979, + "learning_rate": 2.6022689927038287e-06, + "loss": 0.265593558549881, + "memory(GiB)": 77.0, + "step": 4806, + "token_acc": 0.8819379115710254, + "train_speed(iter/s)": 0.696032 + }, + { + "epoch": 1.53824, + "grad_norm": 0.6343444382761638, + "learning_rate": 2.601387849223406e-06, + "loss": 0.3496648073196411, + "memory(GiB)": 77.0, + "step": 4807, + "token_acc": 0.8624454148471615, + "train_speed(iter/s)": 0.69577 + }, + { + "epoch": 1.53856, + "grad_norm": 0.640858862015374, + "learning_rate": 2.6005066931270244e-06, + "loss": 0.32577988505363464, + "memory(GiB)": 77.0, + "step": 4808, + "token_acc": 0.8727272727272727, + "train_speed(iter/s)": 0.695508 + }, + { + "epoch": 1.53888, + "grad_norm": 0.6255078712258869, + "learning_rate": 2.5996255245243285e-06, + "loss": 0.3722435235977173, + "memory(GiB)": 77.0, + "step": 4809, + "token_acc": 0.8556122448979592, + "train_speed(iter/s)": 0.695239 + }, + { + "epoch": 1.5392000000000001, + "grad_norm": 0.6758130686605123, + "learning_rate": 2.598744343524964e-06, + "loss": 0.3108801245689392, + "memory(GiB)": 77.0, + "step": 4810, + "token_acc": 0.9179138321995465, + "train_speed(iter/s)": 0.694995 + }, + { + "epoch": 1.53952, + "grad_norm": 0.5874902931580136, + "learning_rate": 2.5978631502385796e-06, + "loss": 0.29763099551200867, + "memory(GiB)": 77.0, + "step": 4811, + "token_acc": 0.9311811811811812, + "train_speed(iter/s)": 0.694715 + }, + { + "epoch": 1.5398399999999999, + "grad_norm": 0.7137208004592659, + "learning_rate": 2.596981944774823e-06, + "loss": 0.3585655093193054, + "memory(GiB)": 77.0, + "step": 4812, + "token_acc": 0.9095303867403315, + "train_speed(iter/s)": 0.694443 + }, + { + "epoch": 1.54016, + "grad_norm": 0.6806362341628781, + "learning_rate": 2.5961007272433464e-06, + "loss": 0.3099338114261627, + "memory(GiB)": 77.0, + "step": 4813, + "token_acc": 0.9226145755071374, + "train_speed(iter/s)": 0.694172 + }, + { + "epoch": 1.54048, + "grad_norm": 0.6942628597498298, + "learning_rate": 2.595219497753801e-06, + "loss": 0.32419756054878235, + "memory(GiB)": 77.0, + "step": 4814, + "token_acc": 0.8743068391866913, + "train_speed(iter/s)": 0.693895 + }, + { + "epoch": 1.5408, + "grad_norm": 0.6897651205794959, + "learning_rate": 2.594338256415841e-06, + "loss": 0.29877346754074097, + "memory(GiB)": 77.0, + "step": 4815, + "token_acc": 0.9049520766773163, + "train_speed(iter/s)": 0.693635 + }, + { + "epoch": 1.54112, + "grad_norm": 0.6399138228529464, + "learning_rate": 2.5934570033391217e-06, + "loss": 0.3305428624153137, + "memory(GiB)": 77.0, + "step": 4816, + "token_acc": 0.8705963619617776, + "train_speed(iter/s)": 0.693368 + }, + { + "epoch": 1.5414400000000001, + "grad_norm": 0.6388468118692917, + "learning_rate": 2.5925757386332997e-06, + "loss": 0.3298676013946533, + "memory(GiB)": 77.0, + "step": 4817, + "token_acc": 0.8815954773869347, + "train_speed(iter/s)": 0.693093 + }, + { + "epoch": 1.54176, + "grad_norm": 0.5897388991547505, + "learning_rate": 2.5916944624080324e-06, + "loss": 0.3025665879249573, + "memory(GiB)": 77.0, + "step": 4818, + "token_acc": 0.9210692485792464, + "train_speed(iter/s)": 0.692775 + }, + { + "epoch": 1.54208, + "grad_norm": 0.6316928050884978, + "learning_rate": 2.590813174772981e-06, + "loss": 0.34252509474754333, + "memory(GiB)": 77.0, + "step": 4819, + "token_acc": 0.8819734345351044, + "train_speed(iter/s)": 0.692513 + }, + { + "epoch": 1.5424, + "grad_norm": 0.6696121368883609, + "learning_rate": 2.5899318758378046e-06, + "loss": 0.341113418340683, + "memory(GiB)": 77.0, + "step": 4820, + "token_acc": 0.8499478985758944, + "train_speed(iter/s)": 0.692266 + }, + { + "epoch": 1.54272, + "grad_norm": 1.3132329250582209, + "learning_rate": 2.5890505657121654e-06, + "loss": 0.3405185341835022, + "memory(GiB)": 77.0, + "step": 4821, + "token_acc": 0.9244417252982563, + "train_speed(iter/s)": 0.692003 + }, + { + "epoch": 1.54304, + "grad_norm": 0.6363235063190472, + "learning_rate": 2.588169244505729e-06, + "loss": 0.27507284283638, + "memory(GiB)": 77.0, + "step": 4822, + "token_acc": 0.9437991757212439, + "train_speed(iter/s)": 0.691759 + }, + { + "epoch": 1.5433599999999998, + "grad_norm": 0.7425613685595372, + "learning_rate": 2.5872879123281596e-06, + "loss": 0.3620428442955017, + "memory(GiB)": 77.0, + "step": 4823, + "token_acc": 0.8187778037930227, + "train_speed(iter/s)": 0.691519 + }, + { + "epoch": 1.5436800000000002, + "grad_norm": 0.786776843388865, + "learning_rate": 2.5864065692891236e-06, + "loss": 0.402499258518219, + "memory(GiB)": 77.0, + "step": 4824, + "token_acc": 0.8948872657986663, + "train_speed(iter/s)": 0.69125 + }, + { + "epoch": 1.544, + "grad_norm": 0.6479135612676804, + "learning_rate": 2.5855252154982897e-06, + "loss": 0.36612915992736816, + "memory(GiB)": 77.0, + "step": 4825, + "token_acc": 0.9183074989526603, + "train_speed(iter/s)": 0.690989 + }, + { + "epoch": 1.54432, + "grad_norm": 0.6959810750072357, + "learning_rate": 2.584643851065326e-06, + "loss": 0.27548807859420776, + "memory(GiB)": 77.0, + "step": 4826, + "token_acc": 0.9292506656523393, + "train_speed(iter/s)": 0.690737 + }, + { + "epoch": 1.54464, + "grad_norm": 0.6551893988995954, + "learning_rate": 2.5837624760999037e-06, + "loss": 0.3222540318965912, + "memory(GiB)": 77.0, + "step": 4827, + "token_acc": 0.897069508355534, + "train_speed(iter/s)": 0.69044 + }, + { + "epoch": 1.54496, + "grad_norm": 0.6729421160563703, + "learning_rate": 2.5828810907116937e-06, + "loss": 0.295387864112854, + "memory(GiB)": 77.0, + "step": 4828, + "token_acc": 0.923904052936311, + "train_speed(iter/s)": 0.690193 + }, + { + "epoch": 1.54528, + "grad_norm": 0.6375601922290144, + "learning_rate": 2.5819996950103692e-06, + "loss": 0.35451579093933105, + "memory(GiB)": 77.0, + "step": 4829, + "token_acc": 0.823739934387116, + "train_speed(iter/s)": 0.689947 + }, + { + "epoch": 1.5455999999999999, + "grad_norm": 0.6741779420957089, + "learning_rate": 2.581118289105606e-06, + "loss": 0.3826799988746643, + "memory(GiB)": 77.0, + "step": 4830, + "token_acc": 0.8842794759825328, + "train_speed(iter/s)": 0.689664 + }, + { + "epoch": 1.54592, + "grad_norm": 0.6450725000368063, + "learning_rate": 2.5802368731070782e-06, + "loss": 0.3995567560195923, + "memory(GiB)": 77.0, + "step": 4831, + "token_acc": 0.8414296501384344, + "train_speed(iter/s)": 0.689406 + }, + { + "epoch": 1.54624, + "grad_norm": 0.6483738811829447, + "learning_rate": 2.5793554471244644e-06, + "loss": 0.4214484393596649, + "memory(GiB)": 77.0, + "step": 4832, + "token_acc": 0.8499466002135991, + "train_speed(iter/s)": 0.689126 + }, + { + "epoch": 1.54656, + "grad_norm": 0.6244190478542414, + "learning_rate": 2.5784740112674416e-06, + "loss": 0.38324809074401855, + "memory(GiB)": 77.0, + "step": 4833, + "token_acc": 0.9074635922330098, + "train_speed(iter/s)": 0.688856 + }, + { + "epoch": 1.54688, + "grad_norm": 0.658782420472657, + "learning_rate": 2.5775925656456895e-06, + "loss": 0.3087437152862549, + "memory(GiB)": 77.0, + "step": 4834, + "token_acc": 0.9104530859793079, + "train_speed(iter/s)": 0.688594 + }, + { + "epoch": 1.5472000000000001, + "grad_norm": 0.7625411900181877, + "learning_rate": 2.576711110368888e-06, + "loss": 0.3462868332862854, + "memory(GiB)": 77.0, + "step": 4835, + "token_acc": 0.8970270270270271, + "train_speed(iter/s)": 0.688318 + }, + { + "epoch": 1.54752, + "grad_norm": 0.5985862520527628, + "learning_rate": 2.57582964554672e-06, + "loss": 0.33266937732696533, + "memory(GiB)": 77.0, + "step": 4836, + "token_acc": 0.9194568016857879, + "train_speed(iter/s)": 0.688043 + }, + { + "epoch": 1.5478399999999999, + "grad_norm": 0.6579603679669361, + "learning_rate": 2.5749481712888675e-06, + "loss": 0.36969852447509766, + "memory(GiB)": 77.0, + "step": 4837, + "token_acc": 0.8778335005015045, + "train_speed(iter/s)": 0.687786 + }, + { + "epoch": 1.54816, + "grad_norm": 0.5890956287055482, + "learning_rate": 2.5740666877050156e-06, + "loss": 0.30150508880615234, + "memory(GiB)": 77.0, + "step": 4838, + "token_acc": 0.8943139678615575, + "train_speed(iter/s)": 0.687527 + }, + { + "epoch": 1.54848, + "grad_norm": 0.7028318052667664, + "learning_rate": 2.5731851949048494e-06, + "loss": 0.32663923501968384, + "memory(GiB)": 77.0, + "step": 4839, + "token_acc": 0.902676399026764, + "train_speed(iter/s)": 0.687274 + }, + { + "epoch": 1.5488, + "grad_norm": 0.6156732074688968, + "learning_rate": 2.572303692998056e-06, + "loss": 0.3451542854309082, + "memory(GiB)": 77.0, + "step": 4840, + "token_acc": 0.9181403308613805, + "train_speed(iter/s)": 0.68702 + }, + { + "epoch": 1.54912, + "grad_norm": 0.7057815368627309, + "learning_rate": 2.571422182094321e-06, + "loss": 0.42362484335899353, + "memory(GiB)": 77.0, + "step": 4841, + "token_acc": 0.9164563351842504, + "train_speed(iter/s)": 0.686781 + }, + { + "epoch": 1.5494400000000002, + "grad_norm": 0.6588064090363813, + "learning_rate": 2.5705406623033353e-06, + "loss": 0.3663850426673889, + "memory(GiB)": 77.0, + "step": 4842, + "token_acc": 0.8589426321709787, + "train_speed(iter/s)": 0.686521 + }, + { + "epoch": 1.54976, + "grad_norm": 0.6449608271112789, + "learning_rate": 2.5696591337347876e-06, + "loss": 0.39328885078430176, + "memory(GiB)": 77.0, + "step": 4843, + "token_acc": 0.9044332359968145, + "train_speed(iter/s)": 0.686266 + }, + { + "epoch": 1.55008, + "grad_norm": 0.690937287825779, + "learning_rate": 2.568777596498369e-06, + "loss": 0.35112226009368896, + "memory(GiB)": 77.0, + "step": 4844, + "token_acc": 0.8768642447418739, + "train_speed(iter/s)": 0.686024 + }, + { + "epoch": 1.5504, + "grad_norm": 0.6124647671823147, + "learning_rate": 2.5678960507037716e-06, + "loss": 0.37150731682777405, + "memory(GiB)": 77.0, + "step": 4845, + "token_acc": 0.9207920792079208, + "train_speed(iter/s)": 0.685759 + }, + { + "epoch": 1.55072, + "grad_norm": 0.6500038562285982, + "learning_rate": 2.567014496460688e-06, + "loss": 0.29830941557884216, + "memory(GiB)": 77.0, + "step": 4846, + "token_acc": 0.9556198745779064, + "train_speed(iter/s)": 0.685507 + }, + { + "epoch": 1.55104, + "grad_norm": 0.654240515551833, + "learning_rate": 2.566132933878814e-06, + "loss": 0.35341161489486694, + "memory(GiB)": 77.0, + "step": 4847, + "token_acc": 0.8870967741935484, + "train_speed(iter/s)": 0.685262 + }, + { + "epoch": 1.5513599999999999, + "grad_norm": 0.6299037938333505, + "learning_rate": 2.565251363067843e-06, + "loss": 0.33682748675346375, + "memory(GiB)": 77.0, + "step": 4848, + "token_acc": 0.8957886044591247, + "train_speed(iter/s)": 0.684981 + }, + { + "epoch": 1.55168, + "grad_norm": 0.6447617068709165, + "learning_rate": 2.5643697841374722e-06, + "loss": 0.3523980975151062, + "memory(GiB)": 77.0, + "step": 4849, + "token_acc": 0.9346718609529517, + "train_speed(iter/s)": 0.68472 + }, + { + "epoch": 1.552, + "grad_norm": 0.6455778098310324, + "learning_rate": 2.563488197197398e-06, + "loss": 0.3357963263988495, + "memory(GiB)": 77.0, + "step": 4850, + "token_acc": 0.9515588765782015, + "train_speed(iter/s)": 0.684479 + }, + { + "epoch": 1.55232, + "grad_norm": 0.6663552109140959, + "learning_rate": 2.5626066023573186e-06, + "loss": 0.3964843153953552, + "memory(GiB)": 77.0, + "step": 4851, + "token_acc": 0.8951030238158951, + "train_speed(iter/s)": 0.684213 + }, + { + "epoch": 1.55264, + "grad_norm": 0.638734126751612, + "learning_rate": 2.5617249997269344e-06, + "loss": 0.319149374961853, + "memory(GiB)": 77.0, + "step": 4852, + "token_acc": 0.8809155583049799, + "train_speed(iter/s)": 0.683962 + }, + { + "epoch": 1.5529600000000001, + "grad_norm": 0.7116054450652514, + "learning_rate": 2.560843389415944e-06, + "loss": 0.3872937262058258, + "memory(GiB)": 77.0, + "step": 4853, + "token_acc": 0.9613636363636363, + "train_speed(iter/s)": 0.68372 + }, + { + "epoch": 1.55328, + "grad_norm": 0.7178350252113407, + "learning_rate": 2.5599617715340496e-06, + "loss": 0.3785374164581299, + "memory(GiB)": 77.0, + "step": 4854, + "token_acc": 0.9021974620860415, + "train_speed(iter/s)": 0.683474 + }, + { + "epoch": 1.5535999999999999, + "grad_norm": 0.6121465572231484, + "learning_rate": 2.559080146190953e-06, + "loss": 0.31977736949920654, + "memory(GiB)": 77.0, + "step": 4855, + "token_acc": 0.939540059347181, + "train_speed(iter/s)": 0.683229 + }, + { + "epoch": 1.55392, + "grad_norm": 0.6827435934568595, + "learning_rate": 2.558198513496357e-06, + "loss": 0.3682222366333008, + "memory(GiB)": 77.0, + "step": 4856, + "token_acc": 0.8790553961597881, + "train_speed(iter/s)": 0.682977 + }, + { + "epoch": 1.55424, + "grad_norm": 0.7695030343433767, + "learning_rate": 2.5573168735599665e-06, + "loss": 0.3946138024330139, + "memory(GiB)": 77.0, + "step": 4857, + "token_acc": 0.9025752404591995, + "train_speed(iter/s)": 0.682741 + }, + { + "epoch": 1.55456, + "grad_norm": 0.693446984387124, + "learning_rate": 2.5564352264914844e-06, + "loss": 0.3746175169944763, + "memory(GiB)": 77.0, + "step": 4858, + "token_acc": 0.9031758326878389, + "train_speed(iter/s)": 0.682485 + }, + { + "epoch": 1.55488, + "grad_norm": 0.7046209414141007, + "learning_rate": 2.5555535724006177e-06, + "loss": 0.33637237548828125, + "memory(GiB)": 77.0, + "step": 4859, + "token_acc": 0.9461734693877552, + "train_speed(iter/s)": 0.682248 + }, + { + "epoch": 1.5552000000000001, + "grad_norm": 0.5747597244636281, + "learning_rate": 2.554671911397072e-06, + "loss": 0.2919706404209137, + "memory(GiB)": 77.0, + "step": 4860, + "token_acc": 0.9700619408121128, + "train_speed(iter/s)": 0.681972 + }, + { + "epoch": 1.55552, + "grad_norm": 0.6368352979858308, + "learning_rate": 2.553790243590556e-06, + "loss": 0.3471609354019165, + "memory(GiB)": 77.0, + "step": 4861, + "token_acc": 0.9203230735647239, + "train_speed(iter/s)": 0.681721 + }, + { + "epoch": 1.55584, + "grad_norm": 0.6262602958475411, + "learning_rate": 2.5529085690907767e-06, + "loss": 0.3298421800136566, + "memory(GiB)": 77.0, + "step": 4862, + "token_acc": 0.8717306186374315, + "train_speed(iter/s)": 0.681468 + }, + { + "epoch": 1.55616, + "grad_norm": 0.5905923503036009, + "learning_rate": 2.5520268880074446e-06, + "loss": 0.26914504170417786, + "memory(GiB)": 77.0, + "step": 4863, + "token_acc": 0.8970384721837807, + "train_speed(iter/s)": 0.68121 + }, + { + "epoch": 1.55648, + "grad_norm": 0.5894284158411892, + "learning_rate": 2.5511452004502684e-06, + "loss": 0.27371734380722046, + "memory(GiB)": 77.0, + "step": 4864, + "token_acc": 0.8823529411764706, + "train_speed(iter/s)": 0.680961 + }, + { + "epoch": 1.5568, + "grad_norm": 0.6093798928814175, + "learning_rate": 2.5502635065289593e-06, + "loss": 0.2577652037143707, + "memory(GiB)": 77.0, + "step": 4865, + "token_acc": 0.91306710133116, + "train_speed(iter/s)": 0.680727 + }, + { + "epoch": 1.5571199999999998, + "grad_norm": 0.5865148530373893, + "learning_rate": 2.5493818063532284e-06, + "loss": 0.2784649729728699, + "memory(GiB)": 77.0, + "step": 4866, + "token_acc": 0.9687029792356304, + "train_speed(iter/s)": 0.680479 + }, + { + "epoch": 1.5574400000000002, + "grad_norm": 0.625247717631896, + "learning_rate": 2.5485001000327884e-06, + "loss": 0.3513663411140442, + "memory(GiB)": 77.0, + "step": 4867, + "token_acc": 0.8519163763066202, + "train_speed(iter/s)": 0.680223 + }, + { + "epoch": 1.55776, + "grad_norm": 0.6686662428422545, + "learning_rate": 2.547618387677353e-06, + "loss": 0.3870820701122284, + "memory(GiB)": 77.0, + "step": 4868, + "token_acc": 0.8564422648238965, + "train_speed(iter/s)": 0.679974 + }, + { + "epoch": 1.55808, + "grad_norm": 0.6463805247422582, + "learning_rate": 2.546736669396635e-06, + "loss": 0.33903324604034424, + "memory(GiB)": 77.0, + "step": 4869, + "token_acc": 0.9033523537803139, + "train_speed(iter/s)": 0.679678 + }, + { + "epoch": 1.5584, + "grad_norm": 0.7023224766798152, + "learning_rate": 2.545854945300349e-06, + "loss": 0.35150420665740967, + "memory(GiB)": 77.0, + "step": 4870, + "token_acc": 0.880730659025788, + "train_speed(iter/s)": 0.679444 + }, + { + "epoch": 1.55872, + "grad_norm": 0.6600209435589642, + "learning_rate": 2.5449732154982105e-06, + "loss": 0.36687201261520386, + "memory(GiB)": 77.0, + "step": 4871, + "token_acc": 0.9336556540424389, + "train_speed(iter/s)": 0.6792 + }, + { + "epoch": 1.55904, + "grad_norm": 0.6347519802542843, + "learning_rate": 2.5440914800999354e-06, + "loss": 0.32812923192977905, + "memory(GiB)": 77.0, + "step": 4872, + "token_acc": 0.9106382978723404, + "train_speed(iter/s)": 0.678948 + }, + { + "epoch": 1.5593599999999999, + "grad_norm": 0.659722400447523, + "learning_rate": 2.543209739215241e-06, + "loss": 0.30715811252593994, + "memory(GiB)": 77.0, + "step": 4873, + "token_acc": 0.8767708567573645, + "train_speed(iter/s)": 0.678683 + }, + { + "epoch": 1.55968, + "grad_norm": 0.7629895249078313, + "learning_rate": 2.5423279929538443e-06, + "loss": 0.3044736981391907, + "memory(GiB)": 77.0, + "step": 4874, + "token_acc": 0.8904414099714195, + "train_speed(iter/s)": 0.678415 + }, + { + "epoch": 1.56, + "grad_norm": 0.6799081417763699, + "learning_rate": 2.5414462414254624e-06, + "loss": 0.35552120208740234, + "memory(GiB)": 77.0, + "step": 4875, + "token_acc": 0.907563025210084, + "train_speed(iter/s)": 0.678183 + }, + { + "epoch": 1.56032, + "grad_norm": 0.6285532966613054, + "learning_rate": 2.5405644847398157e-06, + "loss": 0.35089653730392456, + "memory(GiB)": 77.0, + "step": 4876, + "token_acc": 0.844992227403953, + "train_speed(iter/s)": 0.677937 + }, + { + "epoch": 1.56064, + "grad_norm": 0.6668201305760999, + "learning_rate": 2.5396827230066214e-06, + "loss": 0.32135847210884094, + "memory(GiB)": 77.0, + "step": 4877, + "token_acc": 0.8411474398519433, + "train_speed(iter/s)": 0.677705 + }, + { + "epoch": 1.5609600000000001, + "grad_norm": 0.6303061221613161, + "learning_rate": 2.538800956335601e-06, + "loss": 0.3250400424003601, + "memory(GiB)": 77.0, + "step": 4878, + "token_acc": 0.8936742934051144, + "train_speed(iter/s)": 0.677433 + }, + { + "epoch": 1.56128, + "grad_norm": 0.6962090345451861, + "learning_rate": 2.5379191848364747e-06, + "loss": 0.3802614212036133, + "memory(GiB)": 77.0, + "step": 4879, + "token_acc": 0.8967122490544079, + "train_speed(iter/s)": 0.677175 + }, + { + "epoch": 1.5615999999999999, + "grad_norm": 0.6923614440764838, + "learning_rate": 2.537037408618964e-06, + "loss": 0.2942928671836853, + "memory(GiB)": 77.0, + "step": 4880, + "token_acc": 0.874266295953043, + "train_speed(iter/s)": 0.676945 + }, + { + "epoch": 1.56192, + "grad_norm": 0.6347754603946685, + "learning_rate": 2.5361556277927903e-06, + "loss": 0.31950414180755615, + "memory(GiB)": 77.0, + "step": 4881, + "token_acc": 0.8997289972899729, + "train_speed(iter/s)": 0.676704 + }, + { + "epoch": 1.56224, + "grad_norm": 0.6237495374622387, + "learning_rate": 2.5352738424676756e-06, + "loss": 0.2970088720321655, + "memory(GiB)": 77.0, + "step": 4882, + "token_acc": 0.9317656938904052, + "train_speed(iter/s)": 0.67646 + }, + { + "epoch": 1.56256, + "grad_norm": 0.593568081274889, + "learning_rate": 2.5343920527533433e-06, + "loss": 0.29059505462646484, + "memory(GiB)": 77.0, + "step": 4883, + "token_acc": 0.8930844293593551, + "train_speed(iter/s)": 0.676216 + }, + { + "epoch": 1.56288, + "grad_norm": 0.5862163182623372, + "learning_rate": 2.5335102587595168e-06, + "loss": 0.31719309091567993, + "memory(GiB)": 77.0, + "step": 4884, + "token_acc": 0.8906291478630209, + "train_speed(iter/s)": 0.675895 + }, + { + "epoch": 1.5632000000000001, + "grad_norm": 0.6028509508093215, + "learning_rate": 2.5326284605959195e-06, + "loss": 0.2632906436920166, + "memory(GiB)": 77.0, + "step": 4885, + "token_acc": 0.9092564491654022, + "train_speed(iter/s)": 0.675655 + }, + { + "epoch": 1.56352, + "grad_norm": 0.600077640309019, + "learning_rate": 2.531746658372276e-06, + "loss": 0.2394203245639801, + "memory(GiB)": 77.0, + "step": 4886, + "token_acc": 0.9042291950886767, + "train_speed(iter/s)": 0.675403 + }, + { + "epoch": 1.56384, + "grad_norm": 0.6578177238245843, + "learning_rate": 2.530864852198312e-06, + "loss": 0.3087159991264343, + "memory(GiB)": 77.0, + "step": 4887, + "token_acc": 0.9050328227571116, + "train_speed(iter/s)": 0.675175 + }, + { + "epoch": 1.56416, + "grad_norm": 0.6783428342513061, + "learning_rate": 2.529983042183753e-06, + "loss": 0.32534438371658325, + "memory(GiB)": 77.0, + "step": 4888, + "token_acc": 0.9108191221694157, + "train_speed(iter/s)": 0.674932 + }, + { + "epoch": 1.56448, + "grad_norm": 0.6569466427685775, + "learning_rate": 2.5291012284383237e-06, + "loss": 0.31892263889312744, + "memory(GiB)": 77.0, + "step": 4889, + "token_acc": 0.9555170421721548, + "train_speed(iter/s)": 0.674703 + }, + { + "epoch": 1.5648, + "grad_norm": 0.5656359461540815, + "learning_rate": 2.528219411071752e-06, + "loss": 0.27617669105529785, + "memory(GiB)": 77.0, + "step": 4890, + "token_acc": 0.964451313755796, + "train_speed(iter/s)": 0.674436 + }, + { + "epoch": 1.5651199999999998, + "grad_norm": 0.6544829615761831, + "learning_rate": 2.527337590193765e-06, + "loss": 0.2992941737174988, + "memory(GiB)": 77.0, + "step": 4891, + "token_acc": 0.8196811434854315, + "train_speed(iter/s)": 0.674205 + }, + { + "epoch": 1.5654400000000002, + "grad_norm": 0.6722631364636962, + "learning_rate": 2.5264557659140877e-06, + "loss": 0.29776740074157715, + "memory(GiB)": 77.0, + "step": 4892, + "token_acc": 0.9270946681175191, + "train_speed(iter/s)": 0.673984 + }, + { + "epoch": 1.56576, + "grad_norm": 0.6556013218676526, + "learning_rate": 2.525573938342449e-06, + "loss": 0.32010161876678467, + "memory(GiB)": 77.0, + "step": 4893, + "token_acc": 0.9325874125874126, + "train_speed(iter/s)": 0.673757 + }, + { + "epoch": 1.56608, + "grad_norm": 0.6571876050586072, + "learning_rate": 2.5246921075885778e-06, + "loss": 0.27508068084716797, + "memory(GiB)": 77.0, + "step": 4894, + "token_acc": 0.9185151652331371, + "train_speed(iter/s)": 0.673522 + }, + { + "epoch": 1.5664, + "grad_norm": 0.7179308391534491, + "learning_rate": 2.5238102737622024e-06, + "loss": 0.4071505665779114, + "memory(GiB)": 77.0, + "step": 4895, + "token_acc": 0.8829457364341086, + "train_speed(iter/s)": 0.673297 + }, + { + "epoch": 1.5667200000000001, + "grad_norm": 0.6919776766961004, + "learning_rate": 2.522928436973051e-06, + "loss": 0.33714133501052856, + "memory(GiB)": 77.0, + "step": 4896, + "token_acc": 0.9591598599766628, + "train_speed(iter/s)": 0.673058 + }, + { + "epoch": 1.56704, + "grad_norm": 0.7036399992238398, + "learning_rate": 2.5220465973308546e-06, + "loss": 0.3552595376968384, + "memory(GiB)": 77.0, + "step": 4897, + "token_acc": 0.9340964273326396, + "train_speed(iter/s)": 0.67283 + }, + { + "epoch": 1.5673599999999999, + "grad_norm": 0.6818183236023233, + "learning_rate": 2.5211647549453404e-06, + "loss": 0.3764350116252899, + "memory(GiB)": 77.0, + "step": 4898, + "token_acc": 0.9410745233968805, + "train_speed(iter/s)": 0.672591 + }, + { + "epoch": 1.56768, + "grad_norm": 0.6774265908963262, + "learning_rate": 2.5202829099262407e-06, + "loss": 0.3634587824344635, + "memory(GiB)": 77.0, + "step": 4899, + "token_acc": 0.8842832469775475, + "train_speed(iter/s)": 0.672365 + }, + { + "epoch": 1.568, + "grad_norm": 0.5957926976839547, + "learning_rate": 2.519401062383284e-06, + "loss": 0.2989901304244995, + "memory(GiB)": 77.0, + "step": 4900, + "token_acc": 0.9190682984603237, + "train_speed(iter/s)": 0.672119 + }, + { + "epoch": 1.56832, + "grad_norm": 0.700902943150499, + "learning_rate": 2.5185192124262015e-06, + "loss": 0.42251676321029663, + "memory(GiB)": 77.0, + "step": 4901, + "token_acc": 0.8427588842185709, + "train_speed(iter/s)": 0.671878 + }, + { + "epoch": 1.56864, + "grad_norm": 0.6723405893093373, + "learning_rate": 2.517637360164724e-06, + "loss": 0.4013630151748657, + "memory(GiB)": 77.0, + "step": 4902, + "token_acc": 0.8958450308815272, + "train_speed(iter/s)": 0.671607 + }, + { + "epoch": 1.5689600000000001, + "grad_norm": 0.6156610293875022, + "learning_rate": 2.516755505708583e-06, + "loss": 0.2891809046268463, + "memory(GiB)": 77.0, + "step": 4903, + "token_acc": 0.9100251198903859, + "train_speed(iter/s)": 0.671378 + }, + { + "epoch": 1.56928, + "grad_norm": 0.6905239594313161, + "learning_rate": 2.51587364916751e-06, + "loss": 0.29849204421043396, + "memory(GiB)": 77.0, + "step": 4904, + "token_acc": 0.9202429149797571, + "train_speed(iter/s)": 0.671133 + }, + { + "epoch": 1.5695999999999999, + "grad_norm": 0.6947106585799802, + "learning_rate": 2.5149917906512376e-06, + "loss": 0.46380794048309326, + "memory(GiB)": 77.0, + "step": 4905, + "token_acc": 0.8842843481032834, + "train_speed(iter/s)": 0.6709 + }, + { + "epoch": 1.56992, + "grad_norm": 0.6534163744289799, + "learning_rate": 2.5141099302694967e-06, + "loss": 0.3229646384716034, + "memory(GiB)": 77.0, + "step": 4906, + "token_acc": 0.8941723274937433, + "train_speed(iter/s)": 0.670679 + }, + { + "epoch": 1.57024, + "grad_norm": 0.6797501628294246, + "learning_rate": 2.51322806813202e-06, + "loss": 0.3777836263179779, + "memory(GiB)": 77.0, + "step": 4907, + "token_acc": 0.8334135772749157, + "train_speed(iter/s)": 0.670434 + }, + { + "epoch": 1.57056, + "grad_norm": 0.671539927301369, + "learning_rate": 2.5123462043485388e-06, + "loss": 0.34607207775115967, + "memory(GiB)": 77.0, + "step": 4908, + "token_acc": 0.857659160411454, + "train_speed(iter/s)": 0.670213 + }, + { + "epoch": 1.57088, + "grad_norm": 0.644744063925723, + "learning_rate": 2.5114643390287868e-06, + "loss": 0.323859304189682, + "memory(GiB)": 77.0, + "step": 4909, + "token_acc": 0.946607341490545, + "train_speed(iter/s)": 0.669986 + }, + { + "epoch": 1.5712000000000002, + "grad_norm": 0.661153946176565, + "learning_rate": 2.5105824722824966e-06, + "loss": 0.2572493553161621, + "memory(GiB)": 77.0, + "step": 4910, + "token_acc": 0.919312554616953, + "train_speed(iter/s)": 0.669737 + }, + { + "epoch": 1.57152, + "grad_norm": 0.650328080081243, + "learning_rate": 2.509700604219402e-06, + "loss": 0.36625155806541443, + "memory(GiB)": 77.0, + "step": 4911, + "token_acc": 0.844474034620506, + "train_speed(iter/s)": 0.669501 + }, + { + "epoch": 1.57184, + "grad_norm": 0.7097648441403875, + "learning_rate": 2.508818734949235e-06, + "loss": 0.34597164392471313, + "memory(GiB)": 77.0, + "step": 4912, + "token_acc": 0.8443258971871969, + "train_speed(iter/s)": 0.669269 + }, + { + "epoch": 1.57216, + "grad_norm": 0.6560627951764163, + "learning_rate": 2.507936864581729e-06, + "loss": 0.28402334451675415, + "memory(GiB)": 77.0, + "step": 4913, + "token_acc": 0.9460748349229641, + "train_speed(iter/s)": 0.669015 + }, + { + "epoch": 1.57248, + "grad_norm": 0.6230517259045175, + "learning_rate": 2.5070549932266192e-06, + "loss": 0.32666313648223877, + "memory(GiB)": 77.0, + "step": 4914, + "token_acc": 0.9134419551934827, + "train_speed(iter/s)": 0.66877 + }, + { + "epoch": 1.5728, + "grad_norm": 0.6631891321856089, + "learning_rate": 2.5061731209936367e-06, + "loss": 0.33433806896209717, + "memory(GiB)": 77.0, + "step": 4915, + "token_acc": 0.9043580683156655, + "train_speed(iter/s)": 0.668546 + }, + { + "epoch": 1.5731199999999999, + "grad_norm": 0.6204572072509565, + "learning_rate": 2.505291247992517e-06, + "loss": 0.34804588556289673, + "memory(GiB)": 77.0, + "step": 4916, + "token_acc": 0.9128552097428958, + "train_speed(iter/s)": 0.66832 + }, + { + "epoch": 1.57344, + "grad_norm": 0.6359924143610104, + "learning_rate": 2.504409374332993e-06, + "loss": 0.4053494930267334, + "memory(GiB)": 77.0, + "step": 4917, + "token_acc": 0.8968627450980392, + "train_speed(iter/s)": 0.668076 + }, + { + "epoch": 1.57376, + "grad_norm": 0.6273021016013701, + "learning_rate": 2.503527500124799e-06, + "loss": 0.2816501259803772, + "memory(GiB)": 77.0, + "step": 4918, + "token_acc": 0.9359255202628697, + "train_speed(iter/s)": 0.667857 + }, + { + "epoch": 1.57408, + "grad_norm": 0.6275044595734025, + "learning_rate": 2.5026456254776683e-06, + "loss": 0.29086747765541077, + "memory(GiB)": 77.0, + "step": 4919, + "token_acc": 0.8690437601296597, + "train_speed(iter/s)": 0.667637 + }, + { + "epoch": 1.5744, + "grad_norm": 0.6046311514681831, + "learning_rate": 2.501763750501336e-06, + "loss": 0.26937028765678406, + "memory(GiB)": 77.0, + "step": 4920, + "token_acc": 0.8897367417016406, + "train_speed(iter/s)": 0.667397 + }, + { + "epoch": 1.5747200000000001, + "grad_norm": 0.6854179497286398, + "learning_rate": 2.5008818753055354e-06, + "loss": 0.4002457559108734, + "memory(GiB)": 77.0, + "step": 4921, + "token_acc": 0.8509879253567508, + "train_speed(iter/s)": 0.667166 + }, + { + "epoch": 1.57504, + "grad_norm": 0.6564468845574335, + "learning_rate": 2.5e-06, + "loss": 0.3747050166130066, + "memory(GiB)": 77.0, + "step": 4922, + "token_acc": 0.8800215691561067, + "train_speed(iter/s)": 0.666876 + }, + { + "epoch": 1.5753599999999999, + "grad_norm": 0.6075049139266311, + "learning_rate": 2.499118124694466e-06, + "loss": 0.3430497646331787, + "memory(GiB)": 77.0, + "step": 4923, + "token_acc": 0.8907035175879398, + "train_speed(iter/s)": 0.666631 + }, + { + "epoch": 1.57568, + "grad_norm": 0.6188115263294488, + "learning_rate": 2.498236249498665e-06, + "loss": 0.25220656394958496, + "memory(GiB)": 77.0, + "step": 4924, + "token_acc": 0.9035168903285516, + "train_speed(iter/s)": 0.666381 + }, + { + "epoch": 1.576, + "grad_norm": 0.6178414141912799, + "learning_rate": 2.4973543745223325e-06, + "loss": 0.26732537150382996, + "memory(GiB)": 77.0, + "step": 4925, + "token_acc": 0.9199449793672627, + "train_speed(iter/s)": 0.666158 + }, + { + "epoch": 1.57632, + "grad_norm": 0.6836002033502653, + "learning_rate": 2.4964724998752023e-06, + "loss": 0.3244878053665161, + "memory(GiB)": 77.0, + "step": 4926, + "token_acc": 0.8882460414129111, + "train_speed(iter/s)": 0.665911 + }, + { + "epoch": 1.57664, + "grad_norm": 0.6327846004169185, + "learning_rate": 2.495590625667008e-06, + "loss": 0.3577786087989807, + "memory(GiB)": 77.0, + "step": 4927, + "token_acc": 0.9203800786369594, + "train_speed(iter/s)": 0.665684 + }, + { + "epoch": 1.5769600000000001, + "grad_norm": 0.6392498247112774, + "learning_rate": 2.4947087520074835e-06, + "loss": 0.322930246591568, + "memory(GiB)": 77.0, + "step": 4928, + "token_acc": 0.8718020022246941, + "train_speed(iter/s)": 0.665446 + }, + { + "epoch": 1.57728, + "grad_norm": 0.61464577616514, + "learning_rate": 2.4938268790063637e-06, + "loss": 0.3077755570411682, + "memory(GiB)": 77.0, + "step": 4929, + "token_acc": 0.8701923076923077, + "train_speed(iter/s)": 0.665204 + }, + { + "epoch": 1.5776, + "grad_norm": 0.6808976327190737, + "learning_rate": 2.4929450067733825e-06, + "loss": 0.34590211510658264, + "memory(GiB)": 77.0, + "step": 4930, + "token_acc": 0.8803611738148984, + "train_speed(iter/s)": 0.66495 + }, + { + "epoch": 1.57792, + "grad_norm": 0.7214828598972137, + "learning_rate": 2.4920631354182717e-06, + "loss": 0.31074953079223633, + "memory(GiB)": 77.0, + "step": 4931, + "token_acc": 0.9352183249821046, + "train_speed(iter/s)": 0.664725 + }, + { + "epoch": 1.57824, + "grad_norm": 0.6585876719496945, + "learning_rate": 2.491181265050766e-06, + "loss": 0.39204907417297363, + "memory(GiB)": 77.0, + "step": 4932, + "token_acc": 0.8875870804306523, + "train_speed(iter/s)": 0.664499 + }, + { + "epoch": 1.57856, + "grad_norm": 0.6571489237878971, + "learning_rate": 2.4902993957805994e-06, + "loss": 0.3455318808555603, + "memory(GiB)": 77.0, + "step": 4933, + "token_acc": 0.9297853309481217, + "train_speed(iter/s)": 0.664279 + }, + { + "epoch": 1.5788799999999998, + "grad_norm": 0.6134676951951366, + "learning_rate": 2.489417527717504e-06, + "loss": 0.29968947172164917, + "memory(GiB)": 77.0, + "step": 4934, + "token_acc": 0.9449474827960884, + "train_speed(iter/s)": 0.664042 + }, + { + "epoch": 1.5792000000000002, + "grad_norm": 0.628351171530423, + "learning_rate": 2.4885356609712136e-06, + "loss": 0.36965420842170715, + "memory(GiB)": 77.0, + "step": 4935, + "token_acc": 0.8758198513336248, + "train_speed(iter/s)": 0.663801 + }, + { + "epoch": 1.57952, + "grad_norm": 0.638648146043753, + "learning_rate": 2.487653795651462e-06, + "loss": 0.3572106659412384, + "memory(GiB)": 77.0, + "step": 4936, + "token_acc": 0.9063444108761329, + "train_speed(iter/s)": 0.663576 + }, + { + "epoch": 1.57984, + "grad_norm": 0.6912480323368229, + "learning_rate": 2.4867719318679813e-06, + "loss": 0.3680154085159302, + "memory(GiB)": 77.0, + "step": 4937, + "token_acc": 0.8728549141965679, + "train_speed(iter/s)": 0.663352 + }, + { + "epoch": 1.58016, + "grad_norm": 1.2546933824503819, + "learning_rate": 2.4858900697305037e-06, + "loss": 0.3460584282875061, + "memory(GiB)": 77.0, + "step": 4938, + "token_acc": 0.94048, + "train_speed(iter/s)": 0.663112 + }, + { + "epoch": 1.58048, + "grad_norm": 0.6604064900232309, + "learning_rate": 2.485008209348763e-06, + "loss": 0.3304581940174103, + "memory(GiB)": 77.0, + "step": 4939, + "token_acc": 0.9561527581329562, + "train_speed(iter/s)": 0.662882 + }, + { + "epoch": 1.5808, + "grad_norm": 0.6660694421551783, + "learning_rate": 2.48412635083249e-06, + "loss": 0.2783695161342621, + "memory(GiB)": 77.0, + "step": 4940, + "token_acc": 0.9211923233973051, + "train_speed(iter/s)": 0.662638 + }, + { + "epoch": 1.5811199999999999, + "grad_norm": 0.609463618564189, + "learning_rate": 2.483244494291417e-06, + "loss": 0.3530394434928894, + "memory(GiB)": 77.0, + "step": 4941, + "token_acc": 0.8603963324460219, + "train_speed(iter/s)": 0.662398 + }, + { + "epoch": 1.58144, + "grad_norm": 0.6424821601914211, + "learning_rate": 2.4823626398352764e-06, + "loss": 0.3416900634765625, + "memory(GiB)": 77.0, + "step": 4942, + "token_acc": 0.9132756866734486, + "train_speed(iter/s)": 0.662177 + }, + { + "epoch": 1.58176, + "grad_norm": 0.6545025622812731, + "learning_rate": 2.4814807875737993e-06, + "loss": 0.3150036633014679, + "memory(GiB)": 77.0, + "step": 4943, + "token_acc": 0.8793637678165668, + "train_speed(iter/s)": 0.66196 + }, + { + "epoch": 1.58208, + "grad_norm": 0.653860527655524, + "learning_rate": 2.4805989376167178e-06, + "loss": 0.3441842198371887, + "memory(GiB)": 77.0, + "step": 4944, + "token_acc": 0.8696883852691218, + "train_speed(iter/s)": 0.661727 + }, + { + "epoch": 1.5824, + "grad_norm": 0.6963472951330205, + "learning_rate": 2.479717090073761e-06, + "loss": 0.357636034488678, + "memory(GiB)": 77.0, + "step": 4945, + "token_acc": 0.900767136847315, + "train_speed(iter/s)": 0.661514 + }, + { + "epoch": 1.5827200000000001, + "grad_norm": 0.6739731920387356, + "learning_rate": 2.4788352450546605e-06, + "loss": 0.3890877962112427, + "memory(GiB)": 77.0, + "step": 4946, + "token_acc": 0.9175679625840397, + "train_speed(iter/s)": 0.66128 + }, + { + "epoch": 1.58304, + "grad_norm": 0.5910733819768521, + "learning_rate": 2.4779534026691467e-06, + "loss": 0.3151545822620392, + "memory(GiB)": 77.0, + "step": 4947, + "token_acc": 0.8235390407595351, + "train_speed(iter/s)": 0.66105 + }, + { + "epoch": 1.5833599999999999, + "grad_norm": 0.709445771378763, + "learning_rate": 2.4770715630269495e-06, + "loss": 0.33500203490257263, + "memory(GiB)": 77.0, + "step": 4948, + "token_acc": 0.9205808940322214, + "train_speed(iter/s)": 0.660836 + }, + { + "epoch": 1.58368, + "grad_norm": 0.6534389266216277, + "learning_rate": 2.4761897262377985e-06, + "loss": 0.2930193841457367, + "memory(GiB)": 77.0, + "step": 4949, + "token_acc": 0.8982617586912065, + "train_speed(iter/s)": 0.660628 + }, + { + "epoch": 1.584, + "grad_norm": 0.61897920856758, + "learning_rate": 2.475307892411423e-06, + "loss": 0.2993030846118927, + "memory(GiB)": 77.0, + "step": 4950, + "token_acc": 0.9581993569131833, + "train_speed(iter/s)": 0.660392 + }, + { + "epoch": 1.58432, + "grad_norm": 0.6159908469821261, + "learning_rate": 2.4744260616575512e-06, + "loss": 0.2877008318901062, + "memory(GiB)": 77.0, + "step": 4951, + "token_acc": 0.8651949271958667, + "train_speed(iter/s)": 0.660167 + }, + { + "epoch": 1.58464, + "grad_norm": 0.641957682744718, + "learning_rate": 2.473544234085913e-06, + "loss": 0.3741390109062195, + "memory(GiB)": 77.0, + "step": 4952, + "token_acc": 0.8246049661399548, + "train_speed(iter/s)": 0.659938 + }, + { + "epoch": 1.5849600000000001, + "grad_norm": 0.6388821771676909, + "learning_rate": 2.4726624098062364e-06, + "loss": 0.3815426826477051, + "memory(GiB)": 77.0, + "step": 4953, + "token_acc": 0.9590192644483363, + "train_speed(iter/s)": 0.65969 + }, + { + "epoch": 1.58528, + "grad_norm": 0.6890343763522504, + "learning_rate": 2.4717805889282483e-06, + "loss": 0.3699745833873749, + "memory(GiB)": 77.0, + "step": 4954, + "token_acc": 0.8991329479768786, + "train_speed(iter/s)": 0.659473 + }, + { + "epoch": 1.5856, + "grad_norm": 0.6555353657404747, + "learning_rate": 2.4708987715616762e-06, + "loss": 0.3311530649662018, + "memory(GiB)": 77.0, + "step": 4955, + "token_acc": 0.8765493306891423, + "train_speed(iter/s)": 0.659253 + }, + { + "epoch": 1.58592, + "grad_norm": 0.6436673086933891, + "learning_rate": 2.4700169578162476e-06, + "loss": 0.3281750977039337, + "memory(GiB)": 77.0, + "step": 4956, + "token_acc": 0.883985200845666, + "train_speed(iter/s)": 0.659022 + }, + { + "epoch": 1.58624, + "grad_norm": 0.5859537697173618, + "learning_rate": 2.4691351478016882e-06, + "loss": 0.31061244010925293, + "memory(GiB)": 77.0, + "step": 4957, + "token_acc": 0.9015873015873016, + "train_speed(iter/s)": 0.658791 + }, + { + "epoch": 1.58656, + "grad_norm": 0.6547986248520671, + "learning_rate": 2.4682533416277245e-06, + "loss": 0.3456366956233978, + "memory(GiB)": 77.0, + "step": 4958, + "token_acc": 0.840567612687813, + "train_speed(iter/s)": 0.658573 + }, + { + "epoch": 1.5868799999999998, + "grad_norm": 0.6327810525314829, + "learning_rate": 2.467371539404082e-06, + "loss": 0.3296758532524109, + "memory(GiB)": 77.0, + "step": 4959, + "token_acc": 0.860705240600039, + "train_speed(iter/s)": 0.658346 + }, + { + "epoch": 1.5872000000000002, + "grad_norm": 0.6327821632859746, + "learning_rate": 2.4664897412404845e-06, + "loss": 0.24128282070159912, + "memory(GiB)": 77.0, + "step": 4960, + "token_acc": 0.8797433035714286, + "train_speed(iter/s)": 0.658123 + }, + { + "epoch": 1.58752, + "grad_norm": 0.6382776151446005, + "learning_rate": 2.4656079472466576e-06, + "loss": 0.4285864531993866, + "memory(GiB)": 77.0, + "step": 4961, + "token_acc": 0.8940524921743318, + "train_speed(iter/s)": 0.657902 + }, + { + "epoch": 1.58784, + "grad_norm": 0.6943279254421092, + "learning_rate": 2.4647261575323253e-06, + "loss": 0.3618059754371643, + "memory(GiB)": 77.0, + "step": 4962, + "token_acc": 0.9223419892209701, + "train_speed(iter/s)": 0.657679 + }, + { + "epoch": 1.58816, + "grad_norm": 0.5980674039641749, + "learning_rate": 2.463844372207211e-06, + "loss": 0.283338338136673, + "memory(GiB)": 77.0, + "step": 4963, + "token_acc": 0.931592039800995, + "train_speed(iter/s)": 0.657453 + }, + { + "epoch": 1.5884800000000001, + "grad_norm": 0.6766058778599308, + "learning_rate": 2.462962591381037e-06, + "loss": 0.35652804374694824, + "memory(GiB)": 77.0, + "step": 4964, + "token_acc": 0.8800325335502237, + "train_speed(iter/s)": 0.657232 + }, + { + "epoch": 1.5888, + "grad_norm": 0.7085469100124637, + "learning_rate": 2.4620808151635257e-06, + "loss": 0.37698113918304443, + "memory(GiB)": 77.0, + "step": 4965, + "token_acc": 0.8949866508454465, + "train_speed(iter/s)": 0.657021 + }, + { + "epoch": 1.5891199999999999, + "grad_norm": 0.6528485172105458, + "learning_rate": 2.4611990436643994e-06, + "loss": 0.28907686471939087, + "memory(GiB)": 77.0, + "step": 4966, + "token_acc": 0.9599864360800271, + "train_speed(iter/s)": 0.656797 + }, + { + "epoch": 1.58944, + "grad_norm": 0.6493817063890573, + "learning_rate": 2.4603172769933795e-06, + "loss": 0.3747904300689697, + "memory(GiB)": 77.0, + "step": 4967, + "token_acc": 0.8542468565563254, + "train_speed(iter/s)": 0.656559 + }, + { + "epoch": 1.58976, + "grad_norm": 0.5937026364106831, + "learning_rate": 2.459435515260185e-06, + "loss": 0.3374999165534973, + "memory(GiB)": 77.0, + "step": 4968, + "token_acc": 0.8841039888866838, + "train_speed(iter/s)": 0.656298 + }, + { + "epoch": 1.59008, + "grad_norm": 0.6617374516578713, + "learning_rate": 2.458553758574538e-06, + "loss": 0.36022934317588806, + "memory(GiB)": 77.0, + "step": 4969, + "token_acc": 0.8931818181818182, + "train_speed(iter/s)": 0.656085 + }, + { + "epoch": 1.5904, + "grad_norm": 0.6670770850236493, + "learning_rate": 2.457672007046156e-06, + "loss": 0.3367556929588318, + "memory(GiB)": 77.0, + "step": 4970, + "token_acc": 0.9235026041666666, + "train_speed(iter/s)": 0.655871 + }, + { + "epoch": 1.5907200000000001, + "grad_norm": 0.6373333051151345, + "learning_rate": 2.4567902607847593e-06, + "loss": 0.3163371682167053, + "memory(GiB)": 77.0, + "step": 4971, + "token_acc": 0.944469525959368, + "train_speed(iter/s)": 0.65565 + }, + { + "epoch": 1.59104, + "grad_norm": 0.6404903899429745, + "learning_rate": 2.455908519900064e-06, + "loss": 0.3573395013809204, + "memory(GiB)": 77.0, + "step": 4972, + "token_acc": 0.9377523553162853, + "train_speed(iter/s)": 0.655418 + }, + { + "epoch": 1.5913599999999999, + "grad_norm": 0.6569950630974636, + "learning_rate": 2.4550267845017908e-06, + "loss": 0.37592822313308716, + "memory(GiB)": 77.0, + "step": 4973, + "token_acc": 0.8993499215422551, + "train_speed(iter/s)": 0.655195 + }, + { + "epoch": 1.59168, + "grad_norm": 0.6051564606911711, + "learning_rate": 2.454145054699652e-06, + "loss": 0.26084089279174805, + "memory(GiB)": 77.0, + "step": 4974, + "token_acc": 0.8851529268786713, + "train_speed(iter/s)": 0.654966 + }, + { + "epoch": 1.592, + "grad_norm": 0.6291352526740115, + "learning_rate": 2.4532633306033658e-06, + "loss": 0.33320534229278564, + "memory(GiB)": 77.0, + "step": 4975, + "token_acc": 0.9292426755113322, + "train_speed(iter/s)": 0.654742 + }, + { + "epoch": 1.59232, + "grad_norm": 0.6410391240947156, + "learning_rate": 2.4523816123226478e-06, + "loss": 0.3113533556461334, + "memory(GiB)": 77.0, + "step": 4976, + "token_acc": 0.9357443820224719, + "train_speed(iter/s)": 0.654533 + }, + { + "epoch": 1.5926399999999998, + "grad_norm": 0.6861754720852515, + "learning_rate": 2.451499899967212e-06, + "loss": 0.34612399339675903, + "memory(GiB)": 77.0, + "step": 4977, + "token_acc": 0.9289996253278382, + "train_speed(iter/s)": 0.654304 + }, + { + "epoch": 1.5929600000000002, + "grad_norm": 0.6275632423942793, + "learning_rate": 2.450618193646772e-06, + "loss": 0.3571428656578064, + "memory(GiB)": 77.0, + "step": 4978, + "token_acc": 0.8409953606073387, + "train_speed(iter/s)": 0.654083 + }, + { + "epoch": 1.59328, + "grad_norm": 0.6534727020482028, + "learning_rate": 2.4497364934710415e-06, + "loss": 0.36178407073020935, + "memory(GiB)": 77.0, + "step": 4979, + "token_acc": 0.941702550513415, + "train_speed(iter/s)": 0.653872 + }, + { + "epoch": 1.5936, + "grad_norm": 0.6883243376708713, + "learning_rate": 2.4488547995497324e-06, + "loss": 0.3717919886112213, + "memory(GiB)": 77.0, + "step": 4980, + "token_acc": 0.8844757633995303, + "train_speed(iter/s)": 0.65364 + }, + { + "epoch": 1.59392, + "grad_norm": 0.6777738482128001, + "learning_rate": 2.4479731119925563e-06, + "loss": 0.35125434398651123, + "memory(GiB)": 77.0, + "step": 4981, + "token_acc": 0.8470588235294118, + "train_speed(iter/s)": 0.653411 + }, + { + "epoch": 1.59424, + "grad_norm": 0.6376391539873865, + "learning_rate": 2.4470914309092237e-06, + "loss": 0.4292029142379761, + "memory(GiB)": 77.0, + "step": 4982, + "token_acc": 0.829998129792407, + "train_speed(iter/s)": 0.653187 + }, + { + "epoch": 1.59456, + "grad_norm": 0.6638704214961526, + "learning_rate": 2.446209756409445e-06, + "loss": 0.3758625090122223, + "memory(GiB)": 77.0, + "step": 4983, + "token_acc": 0.8745630269381041, + "train_speed(iter/s)": 0.652974 + }, + { + "epoch": 1.5948799999999999, + "grad_norm": 0.5739257669192811, + "learning_rate": 2.4453280886029286e-06, + "loss": 0.25783079862594604, + "memory(GiB)": 77.0, + "step": 4984, + "token_acc": 0.9050863957167194, + "train_speed(iter/s)": 0.652756 + }, + { + "epoch": 1.5952, + "grad_norm": 0.738762158386702, + "learning_rate": 2.4444464275993828e-06, + "loss": 0.3131095767021179, + "memory(GiB)": 77.0, + "step": 4985, + "token_acc": 0.8825184664087232, + "train_speed(iter/s)": 0.652535 + }, + { + "epoch": 1.59552, + "grad_norm": 0.7845266086457245, + "learning_rate": 2.443564773508516e-06, + "loss": 0.28198960423469543, + "memory(GiB)": 77.0, + "step": 4986, + "token_acc": 0.917939366309551, + "train_speed(iter/s)": 0.652325 + }, + { + "epoch": 1.59584, + "grad_norm": 0.6504193949442505, + "learning_rate": 2.4426831264400352e-06, + "loss": 0.37103205919265747, + "memory(GiB)": 77.0, + "step": 4987, + "token_acc": 0.8966458658346333, + "train_speed(iter/s)": 0.652118 + }, + { + "epoch": 1.59616, + "grad_norm": 0.6940160200332243, + "learning_rate": 2.441801486503644e-06, + "loss": 0.3696920871734619, + "memory(GiB)": 77.0, + "step": 4988, + "token_acc": 0.9192139737991266, + "train_speed(iter/s)": 0.651902 + }, + { + "epoch": 1.5964800000000001, + "grad_norm": 0.6395540049882373, + "learning_rate": 2.440919853809048e-06, + "loss": 0.27638858556747437, + "memory(GiB)": 77.0, + "step": 4989, + "token_acc": 0.9537129537129537, + "train_speed(iter/s)": 0.651678 + }, + { + "epoch": 1.5968, + "grad_norm": 0.6276753074867721, + "learning_rate": 2.4400382284659512e-06, + "loss": 0.39938414096832275, + "memory(GiB)": 77.0, + "step": 4990, + "token_acc": 0.8542698921597202, + "train_speed(iter/s)": 0.651427 + }, + { + "epoch": 1.5971199999999999, + "grad_norm": 0.6491600027623944, + "learning_rate": 2.4391566105840566e-06, + "loss": 0.36827370524406433, + "memory(GiB)": 77.0, + "step": 4991, + "token_acc": 0.8512318382817435, + "train_speed(iter/s)": 0.651206 + }, + { + "epoch": 1.59744, + "grad_norm": 0.6251084193415744, + "learning_rate": 2.4382750002730664e-06, + "loss": 0.30578434467315674, + "memory(GiB)": 77.0, + "step": 4992, + "token_acc": 0.9598599137931034, + "train_speed(iter/s)": 0.650986 + }, + { + "epoch": 1.59776, + "grad_norm": 0.6751429598522813, + "learning_rate": 2.437393397642682e-06, + "loss": 0.3142290711402893, + "memory(GiB)": 77.0, + "step": 4993, + "token_acc": 0.9007601351351351, + "train_speed(iter/s)": 0.650774 + }, + { + "epoch": 1.59808, + "grad_norm": 0.6691678454308955, + "learning_rate": 2.436511802802603e-06, + "loss": 0.3857981264591217, + "memory(GiB)": 77.0, + "step": 4994, + "token_acc": 0.9068615892478189, + "train_speed(iter/s)": 0.650567 + }, + { + "epoch": 1.5984, + "grad_norm": 0.6854209629638409, + "learning_rate": 2.435630215862529e-06, + "loss": 0.2926594614982605, + "memory(GiB)": 77.0, + "step": 4995, + "token_acc": 0.9124044679600235, + "train_speed(iter/s)": 0.65035 + }, + { + "epoch": 1.5987200000000001, + "grad_norm": 0.6315785179125699, + "learning_rate": 2.4347486369321576e-06, + "loss": 0.3170531392097473, + "memory(GiB)": 77.0, + "step": 4996, + "token_acc": 0.8864049436568521, + "train_speed(iter/s)": 0.650129 + }, + { + "epoch": 1.59904, + "grad_norm": 0.7092879457106342, + "learning_rate": 2.4338670661211866e-06, + "loss": 0.43618738651275635, + "memory(GiB)": 77.0, + "step": 4997, + "token_acc": 0.8526437445824906, + "train_speed(iter/s)": 0.649924 + }, + { + "epoch": 1.59936, + "grad_norm": 0.628329406304438, + "learning_rate": 2.432985503539312e-06, + "loss": 0.30367517471313477, + "memory(GiB)": 77.0, + "step": 4998, + "token_acc": 0.9720956719817767, + "train_speed(iter/s)": 0.649712 + }, + { + "epoch": 1.59968, + "grad_norm": 0.6163265338835011, + "learning_rate": 2.4321039492962292e-06, + "loss": 0.3677240014076233, + "memory(GiB)": 77.0, + "step": 4999, + "token_acc": 0.9595824411134903, + "train_speed(iter/s)": 0.649484 + }, + { + "epoch": 1.6, + "grad_norm": 0.729181786126463, + "learning_rate": 2.4312224035016317e-06, + "loss": 0.3989602327346802, + "memory(GiB)": 77.0, + "step": 5000, + "token_acc": 0.8467661691542289, + "train_speed(iter/s)": 0.649177 + }, + { + "epoch": 1.60032, + "grad_norm": 0.6545590663077652, + "learning_rate": 2.4303408662652124e-06, + "loss": 0.3506210446357727, + "memory(GiB)": 77.0, + "step": 5001, + "token_acc": 0.8651710854294131, + "train_speed(iter/s)": 0.648975 + }, + { + "epoch": 1.6006399999999998, + "grad_norm": 0.6270720621952524, + "learning_rate": 2.429459337696666e-06, + "loss": 0.3075229525566101, + "memory(GiB)": 77.0, + "step": 5002, + "token_acc": 0.9138451223600402, + "train_speed(iter/s)": 0.648764 + }, + { + "epoch": 1.6009600000000002, + "grad_norm": 0.6898838357726869, + "learning_rate": 2.42857781790568e-06, + "loss": 0.29169511795043945, + "memory(GiB)": 77.0, + "step": 5003, + "token_acc": 0.9547803617571059, + "train_speed(iter/s)": 0.648554 + }, + { + "epoch": 1.60128, + "grad_norm": 0.5913829522436933, + "learning_rate": 2.4276963070019455e-06, + "loss": 0.22381305694580078, + "memory(GiB)": 77.0, + "step": 5004, + "token_acc": 0.930822255046775, + "train_speed(iter/s)": 0.648333 + }, + { + "epoch": 1.6016, + "grad_norm": 0.6401750981816247, + "learning_rate": 2.426814805095151e-06, + "loss": 0.31335389614105225, + "memory(GiB)": 77.0, + "step": 5005, + "token_acc": 0.9292505592841164, + "train_speed(iter/s)": 0.648121 + }, + { + "epoch": 1.60192, + "grad_norm": 0.5984827354066041, + "learning_rate": 2.4259333122949852e-06, + "loss": 0.29868584871292114, + "memory(GiB)": 77.0, + "step": 5006, + "token_acc": 0.8984664198836594, + "train_speed(iter/s)": 0.647889 + }, + { + "epoch": 1.60224, + "grad_norm": 0.6747964883978826, + "learning_rate": 2.4250518287111333e-06, + "loss": 0.2527707517147064, + "memory(GiB)": 77.0, + "step": 5007, + "token_acc": 0.9495560936238903, + "train_speed(iter/s)": 0.647683 + }, + { + "epoch": 1.60256, + "grad_norm": 0.5805974761715599, + "learning_rate": 2.4241703544532806e-06, + "loss": 0.2694268822669983, + "memory(GiB)": 77.0, + "step": 5008, + "token_acc": 0.9222144358794674, + "train_speed(iter/s)": 0.647464 + }, + { + "epoch": 1.6028799999999999, + "grad_norm": 0.6579426264584911, + "learning_rate": 2.4232888896311124e-06, + "loss": 0.3974955976009369, + "memory(GiB)": 77.0, + "step": 5009, + "token_acc": 0.9139626352015733, + "train_speed(iter/s)": 0.647247 + }, + { + "epoch": 1.6032, + "grad_norm": 0.7259908184336709, + "learning_rate": 2.4224074343543113e-06, + "loss": 0.31980305910110474, + "memory(GiB)": 77.0, + "step": 5010, + "token_acc": 0.8537934668071654, + "train_speed(iter/s)": 0.647044 + }, + { + "epoch": 1.60352, + "grad_norm": 0.7611935766614347, + "learning_rate": 2.421525988732559e-06, + "loss": 0.39383843541145325, + "memory(GiB)": 77.0, + "step": 5011, + "token_acc": 0.8982414542580518, + "train_speed(iter/s)": 0.646822 + }, + { + "epoch": 1.60384, + "grad_norm": 0.631020842872211, + "learning_rate": 2.420644552875536e-06, + "loss": 0.3026019036769867, + "memory(GiB)": 77.0, + "step": 5012, + "token_acc": 0.9244570349386213, + "train_speed(iter/s)": 0.646614 + }, + { + "epoch": 1.60416, + "grad_norm": 0.6968286646455149, + "learning_rate": 2.4197631268929218e-06, + "loss": 0.3297034800052643, + "memory(GiB)": 77.0, + "step": 5013, + "token_acc": 0.8753375337533753, + "train_speed(iter/s)": 0.646404 + }, + { + "epoch": 1.6044800000000001, + "grad_norm": 0.6961635179258712, + "learning_rate": 2.418881710894394e-06, + "loss": 0.4221850335597992, + "memory(GiB)": 77.0, + "step": 5014, + "token_acc": 0.8978319783197832, + "train_speed(iter/s)": 0.646198 + }, + { + "epoch": 1.6048, + "grad_norm": 0.5961315830632375, + "learning_rate": 2.418000304989631e-06, + "loss": 0.26093801856040955, + "memory(GiB)": 77.0, + "step": 5015, + "token_acc": 0.9071522309711286, + "train_speed(iter/s)": 0.645997 + }, + { + "epoch": 1.6051199999999999, + "grad_norm": 0.6351073022168057, + "learning_rate": 2.4171189092883076e-06, + "loss": 0.3332333564758301, + "memory(GiB)": 77.0, + "step": 5016, + "token_acc": 0.9595715272978577, + "train_speed(iter/s)": 0.645801 + }, + { + "epoch": 1.60544, + "grad_norm": 0.6822179733974856, + "learning_rate": 2.416237523900098e-06, + "loss": 0.34413427114486694, + "memory(GiB)": 77.0, + "step": 5017, + "token_acc": 0.8993409227082085, + "train_speed(iter/s)": 0.645588 + }, + { + "epoch": 1.60576, + "grad_norm": 0.6751473317269979, + "learning_rate": 2.4153561489346754e-06, + "loss": 0.3848549723625183, + "memory(GiB)": 77.0, + "step": 5018, + "token_acc": 0.8460985257101762, + "train_speed(iter/s)": 0.645388 + }, + { + "epoch": 1.60608, + "grad_norm": 0.6934170673737549, + "learning_rate": 2.4144747845017115e-06, + "loss": 0.38668912649154663, + "memory(GiB)": 77.0, + "step": 5019, + "token_acc": 0.8687369519832986, + "train_speed(iter/s)": 0.645178 + }, + { + "epoch": 1.6064, + "grad_norm": 0.6428360206111604, + "learning_rate": 2.413593430710877e-06, + "loss": 0.3722027540206909, + "memory(GiB)": 77.0, + "step": 5020, + "token_acc": 0.8599763173475429, + "train_speed(iter/s)": 0.644965 + }, + { + "epoch": 1.6067200000000001, + "grad_norm": 0.6894122000050181, + "learning_rate": 2.412712087671841e-06, + "loss": 0.259607195854187, + "memory(GiB)": 77.0, + "step": 5021, + "token_acc": 0.9586437194965323, + "train_speed(iter/s)": 0.644771 + }, + { + "epoch": 1.60704, + "grad_norm": 0.6343780428482283, + "learning_rate": 2.4118307554942715e-06, + "loss": 0.34582164883613586, + "memory(GiB)": 77.0, + "step": 5022, + "token_acc": 0.8766038766038766, + "train_speed(iter/s)": 0.644569 + }, + { + "epoch": 1.60736, + "grad_norm": 0.6373392559345014, + "learning_rate": 2.4109494342878354e-06, + "loss": 0.29384833574295044, + "memory(GiB)": 77.0, + "step": 5023, + "token_acc": 0.9146884272997032, + "train_speed(iter/s)": 0.644364 + }, + { + "epoch": 1.60768, + "grad_norm": 0.5934526118046555, + "learning_rate": 2.4100681241621966e-06, + "loss": 0.27614113688468933, + "memory(GiB)": 77.0, + "step": 5024, + "token_acc": 0.9080779944289693, + "train_speed(iter/s)": 0.64416 + }, + { + "epoch": 1.608, + "grad_norm": 0.6734383568636636, + "learning_rate": 2.40918682522702e-06, + "loss": 0.3237091600894928, + "memory(GiB)": 77.0, + "step": 5025, + "token_acc": 0.8784521515735388, + "train_speed(iter/s)": 0.64395 + }, + { + "epoch": 1.60832, + "grad_norm": 0.7006190193954128, + "learning_rate": 2.408305537591968e-06, + "loss": 0.4123079776763916, + "memory(GiB)": 77.0, + "step": 5026, + "token_acc": 0.9467905405405406, + "train_speed(iter/s)": 0.643746 + }, + { + "epoch": 1.6086399999999998, + "grad_norm": 0.7065658347437035, + "learning_rate": 2.4074242613667007e-06, + "loss": 0.3622466027736664, + "memory(GiB)": 77.0, + "step": 5027, + "token_acc": 0.9169024954978132, + "train_speed(iter/s)": 0.643546 + }, + { + "epoch": 1.60896, + "grad_norm": 0.6589138605127537, + "learning_rate": 2.4065429966608787e-06, + "loss": 0.33939021825790405, + "memory(GiB)": 77.0, + "step": 5028, + "token_acc": 0.9493087557603687, + "train_speed(iter/s)": 0.643343 + }, + { + "epoch": 1.60928, + "grad_norm": 0.6497542808945631, + "learning_rate": 2.405661743584159e-06, + "loss": 0.31775471568107605, + "memory(GiB)": 77.0, + "step": 5029, + "token_acc": 0.875, + "train_speed(iter/s)": 0.643141 + }, + { + "epoch": 1.6096, + "grad_norm": 0.6823390907570969, + "learning_rate": 2.4047805022462003e-06, + "loss": 0.3455643653869629, + "memory(GiB)": 77.0, + "step": 5030, + "token_acc": 0.8917541229385307, + "train_speed(iter/s)": 0.642916 + }, + { + "epoch": 1.60992, + "grad_norm": 0.67951639449115, + "learning_rate": 2.4038992727566544e-06, + "loss": 0.32108068466186523, + "memory(GiB)": 77.0, + "step": 5031, + "token_acc": 0.8729433272394881, + "train_speed(iter/s)": 0.642716 + }, + { + "epoch": 1.6102400000000001, + "grad_norm": 0.5758692085806209, + "learning_rate": 2.403018055225178e-06, + "loss": 0.2856540083885193, + "memory(GiB)": 77.0, + "step": 5032, + "token_acc": 0.9183471074380165, + "train_speed(iter/s)": 0.642512 + }, + { + "epoch": 1.61056, + "grad_norm": 0.7352909802643933, + "learning_rate": 2.4021368497614216e-06, + "loss": 0.3873307704925537, + "memory(GiB)": 77.0, + "step": 5033, + "token_acc": 0.835663478047584, + "train_speed(iter/s)": 0.642319 + }, + { + "epoch": 1.6108799999999999, + "grad_norm": 0.6544956647891083, + "learning_rate": 2.4012556564750367e-06, + "loss": 0.2777089774608612, + "memory(GiB)": 77.0, + "step": 5034, + "token_acc": 0.9032258064516129, + "train_speed(iter/s)": 0.642123 + }, + { + "epoch": 1.6112, + "grad_norm": 0.6112841967264142, + "learning_rate": 2.4003744754756723e-06, + "loss": 0.23993629217147827, + "memory(GiB)": 77.0, + "step": 5035, + "token_acc": 0.884543061301241, + "train_speed(iter/s)": 0.641933 + }, + { + "epoch": 1.61152, + "grad_norm": 0.6627255474733472, + "learning_rate": 2.3994933068729764e-06, + "loss": 0.2812083065509796, + "memory(GiB)": 77.0, + "step": 5036, + "token_acc": 0.9555707450444293, + "train_speed(iter/s)": 0.641726 + }, + { + "epoch": 1.61184, + "grad_norm": 0.6380262367016869, + "learning_rate": 2.398612150776595e-06, + "loss": 0.3554937541484833, + "memory(GiB)": 77.0, + "step": 5037, + "token_acc": 0.8666885568872107, + "train_speed(iter/s)": 0.64152 + }, + { + "epoch": 1.61216, + "grad_norm": 0.7070609410706986, + "learning_rate": 2.3977310072961726e-06, + "loss": 0.3428165316581726, + "memory(GiB)": 77.0, + "step": 5038, + "token_acc": 0.8597612958226769, + "train_speed(iter/s)": 0.641306 + }, + { + "epoch": 1.6124800000000001, + "grad_norm": 0.666862405164252, + "learning_rate": 2.396849876541352e-06, + "loss": 0.2612234652042389, + "memory(GiB)": 77.0, + "step": 5039, + "token_acc": 0.8658269441401971, + "train_speed(iter/s)": 0.641101 + }, + { + "epoch": 1.6128, + "grad_norm": 0.6129021925002915, + "learning_rate": 2.3959687586217747e-06, + "loss": 0.3421458601951599, + "memory(GiB)": 77.0, + "step": 5040, + "token_acc": 0.902246043899949, + "train_speed(iter/s)": 0.640882 + }, + { + "epoch": 1.6131199999999999, + "grad_norm": 0.6823007446796429, + "learning_rate": 2.395087653647081e-06, + "loss": 0.34878623485565186, + "memory(GiB)": 77.0, + "step": 5041, + "token_acc": 0.8886370993849142, + "train_speed(iter/s)": 0.640684 + }, + { + "epoch": 1.61344, + "grad_norm": 0.6817121516107325, + "learning_rate": 2.3942065617269083e-06, + "loss": 0.41226160526275635, + "memory(GiB)": 77.0, + "step": 5042, + "token_acc": 0.8786844554762644, + "train_speed(iter/s)": 0.64048 + }, + { + "epoch": 1.61376, + "grad_norm": 0.64441034135909, + "learning_rate": 2.3933254829708945e-06, + "loss": 0.3187774419784546, + "memory(GiB)": 77.0, + "step": 5043, + "token_acc": 0.9028741328047571, + "train_speed(iter/s)": 0.640279 + }, + { + "epoch": 1.61408, + "grad_norm": 0.6176871697417675, + "learning_rate": 2.3924444174886735e-06, + "loss": 0.2524464726448059, + "memory(GiB)": 77.0, + "step": 5044, + "token_acc": 0.9279717189571366, + "train_speed(iter/s)": 0.640091 + }, + { + "epoch": 1.6143999999999998, + "grad_norm": 0.6463658432942487, + "learning_rate": 2.3915633653898805e-06, + "loss": 0.3112378716468811, + "memory(GiB)": 77.0, + "step": 5045, + "token_acc": 0.9451579312738633, + "train_speed(iter/s)": 0.639901 + }, + { + "epoch": 1.6147200000000002, + "grad_norm": 0.6791940769261429, + "learning_rate": 2.390682326784144e-06, + "loss": 0.37995973229408264, + "memory(GiB)": 77.0, + "step": 5046, + "token_acc": 0.8907501820830298, + "train_speed(iter/s)": 0.639698 + }, + { + "epoch": 1.61504, + "grad_norm": 0.6502407408878649, + "learning_rate": 2.389801301781095e-06, + "loss": 0.2602810859680176, + "memory(GiB)": 77.0, + "step": 5047, + "token_acc": 0.8935599284436494, + "train_speed(iter/s)": 0.639508 + }, + { + "epoch": 1.61536, + "grad_norm": 0.6844226162545808, + "learning_rate": 2.388920290490363e-06, + "loss": 0.3620492219924927, + "memory(GiB)": 77.0, + "step": 5048, + "token_acc": 0.8737758433079434, + "train_speed(iter/s)": 0.639317 + }, + { + "epoch": 1.61568, + "grad_norm": 0.6687732975697156, + "learning_rate": 2.388039293021574e-06, + "loss": 0.3564148545265198, + "memory(GiB)": 77.0, + "step": 5049, + "token_acc": 0.818716577540107, + "train_speed(iter/s)": 0.6391 + }, + { + "epoch": 1.616, + "grad_norm": 0.6369622921624876, + "learning_rate": 2.387158309484353e-06, + "loss": 0.3465510606765747, + "memory(GiB)": 77.0, + "step": 5050, + "token_acc": 0.9109178164367127, + "train_speed(iter/s)": 0.638897 + }, + { + "epoch": 1.61632, + "grad_norm": 0.6125152031526415, + "learning_rate": 2.3862773399883223e-06, + "loss": 0.286571741104126, + "memory(GiB)": 77.0, + "step": 5051, + "token_acc": 0.9442671771963412, + "train_speed(iter/s)": 0.638701 + }, + { + "epoch": 1.6166399999999999, + "grad_norm": 0.6329699265153121, + "learning_rate": 2.3853963846431043e-06, + "loss": 0.31858307123184204, + "memory(GiB)": 77.0, + "step": 5052, + "token_acc": 0.9004102946661693, + "train_speed(iter/s)": 0.638494 + }, + { + "epoch": 1.61696, + "grad_norm": 0.6396339999573106, + "learning_rate": 2.384515443558318e-06, + "loss": 0.3259110450744629, + "memory(GiB)": 77.0, + "step": 5053, + "token_acc": 0.9289129741791521, + "train_speed(iter/s)": 0.638289 + }, + { + "epoch": 1.61728, + "grad_norm": 0.6714419416895512, + "learning_rate": 2.383634516843581e-06, + "loss": 0.283677875995636, + "memory(GiB)": 77.0, + "step": 5054, + "token_acc": 0.927329044555635, + "train_speed(iter/s)": 0.6381 + }, + { + "epoch": 1.6176, + "grad_norm": 0.6155585395074309, + "learning_rate": 2.3827536046085103e-06, + "loss": 0.33386820554733276, + "memory(GiB)": 77.0, + "step": 5055, + "token_acc": 0.9282336578581363, + "train_speed(iter/s)": 0.637899 + }, + { + "epoch": 1.61792, + "grad_norm": 0.6409636322188818, + "learning_rate": 2.3818727069627193e-06, + "loss": 0.31042176485061646, + "memory(GiB)": 77.0, + "step": 5056, + "token_acc": 0.8962286249707191, + "train_speed(iter/s)": 0.637699 + }, + { + "epoch": 1.6182400000000001, + "grad_norm": 0.6605201421468299, + "learning_rate": 2.380991824015821e-06, + "loss": 0.3236912190914154, + "memory(GiB)": 77.0, + "step": 5057, + "token_acc": 0.9315028048420431, + "train_speed(iter/s)": 0.637509 + }, + { + "epoch": 1.61856, + "grad_norm": 0.6577430848816416, + "learning_rate": 2.380110955877425e-06, + "loss": 0.315612256526947, + "memory(GiB)": 77.0, + "step": 5058, + "token_acc": 0.9285908967020987, + "train_speed(iter/s)": 0.637316 + }, + { + "epoch": 1.6188799999999999, + "grad_norm": 0.6458835238623788, + "learning_rate": 2.3792301026571423e-06, + "loss": 0.3744494915008545, + "memory(GiB)": 77.0, + "step": 5059, + "token_acc": 0.8510773130544994, + "train_speed(iter/s)": 0.637105 + }, + { + "epoch": 1.6192, + "grad_norm": 0.6141044987684222, + "learning_rate": 2.3783492644645774e-06, + "loss": 0.2970215976238251, + "memory(GiB)": 77.0, + "step": 5060, + "token_acc": 0.8697081792026305, + "train_speed(iter/s)": 0.636896 + }, + { + "epoch": 1.61952, + "grad_norm": 0.7026282030029937, + "learning_rate": 2.3774684414093357e-06, + "loss": 0.30569732189178467, + "memory(GiB)": 77.0, + "step": 5061, + "token_acc": 0.8472987872105844, + "train_speed(iter/s)": 0.636695 + }, + { + "epoch": 1.61984, + "grad_norm": 0.6236352259223328, + "learning_rate": 2.3765876336010213e-06, + "loss": 0.29205748438835144, + "memory(GiB)": 77.0, + "step": 5062, + "token_acc": 0.9295665634674922, + "train_speed(iter/s)": 0.636479 + }, + { + "epoch": 1.62016, + "grad_norm": 0.7865620351306388, + "learning_rate": 2.375706841149235e-06, + "loss": 0.3228900134563446, + "memory(GiB)": 77.0, + "step": 5063, + "token_acc": 0.9324939726761318, + "train_speed(iter/s)": 0.636278 + }, + { + "epoch": 1.6204800000000001, + "grad_norm": 0.5955328118580778, + "learning_rate": 2.374826064163576e-06, + "loss": 0.318695604801178, + "memory(GiB)": 77.0, + "step": 5064, + "token_acc": 0.9038624787775892, + "train_speed(iter/s)": 0.636055 + }, + { + "epoch": 1.6208, + "grad_norm": 0.6506468183097499, + "learning_rate": 2.3739453027536417e-06, + "loss": 0.3544495105743408, + "memory(GiB)": 77.0, + "step": 5065, + "token_acc": 0.9455943228858663, + "train_speed(iter/s)": 0.635817 + }, + { + "epoch": 1.62112, + "grad_norm": 0.688399832824953, + "learning_rate": 2.3730645570290277e-06, + "loss": 0.3193522095680237, + "memory(GiB)": 77.0, + "step": 5066, + "token_acc": 0.8778113460892917, + "train_speed(iter/s)": 0.635634 + }, + { + "epoch": 1.62144, + "grad_norm": 0.6090166622445867, + "learning_rate": 2.372183827099328e-06, + "loss": 0.28019022941589355, + "memory(GiB)": 77.0, + "step": 5067, + "token_acc": 0.908933432318733, + "train_speed(iter/s)": 0.635422 + }, + { + "epoch": 1.62176, + "grad_norm": 0.6971657861439182, + "learning_rate": 2.371303113074134e-06, + "loss": 0.3602087199687958, + "memory(GiB)": 77.0, + "step": 5068, + "token_acc": 0.9520282186948854, + "train_speed(iter/s)": 0.635222 + }, + { + "epoch": 1.62208, + "grad_norm": 0.6997471862057153, + "learning_rate": 2.3704224150630344e-06, + "loss": 0.419990211725235, + "memory(GiB)": 77.0, + "step": 5069, + "token_acc": 0.8883111010626272, + "train_speed(iter/s)": 0.635018 + }, + { + "epoch": 1.6223999999999998, + "grad_norm": 0.706427083484973, + "learning_rate": 2.3695417331756175e-06, + "loss": 0.4451521039009094, + "memory(GiB)": 77.0, + "step": 5070, + "token_acc": 0.8523797403919572, + "train_speed(iter/s)": 0.634812 + }, + { + "epoch": 1.6227200000000002, + "grad_norm": 0.7018734284271199, + "learning_rate": 2.3686610675214693e-06, + "loss": 0.3578057587146759, + "memory(GiB)": 77.0, + "step": 5071, + "token_acc": 0.949898442789438, + "train_speed(iter/s)": 0.634597 + }, + { + "epoch": 1.62304, + "grad_norm": 0.6735971877286238, + "learning_rate": 2.3677804182101725e-06, + "loss": 0.41400301456451416, + "memory(GiB)": 77.0, + "step": 5072, + "token_acc": 0.8718148725949038, + "train_speed(iter/s)": 0.634393 + }, + { + "epoch": 1.62336, + "grad_norm": 0.6731118157392085, + "learning_rate": 2.3668997853513096e-06, + "loss": 0.40028494596481323, + "memory(GiB)": 77.0, + "step": 5073, + "token_acc": 0.9467548834278513, + "train_speed(iter/s)": 0.634143 + }, + { + "epoch": 1.62368, + "grad_norm": 0.6579341845041653, + "learning_rate": 2.3660191690544586e-06, + "loss": 0.3750975430011749, + "memory(GiB)": 77.0, + "step": 5074, + "token_acc": 0.8366235534377128, + "train_speed(iter/s)": 0.633936 + }, + { + "epoch": 1.624, + "grad_norm": 0.6545940386351567, + "learning_rate": 2.3651385694291984e-06, + "loss": 0.40499889850616455, + "memory(GiB)": 77.0, + "step": 5075, + "token_acc": 0.9140873589817761, + "train_speed(iter/s)": 0.63372 + }, + { + "epoch": 1.62432, + "grad_norm": 0.6725115097047734, + "learning_rate": 2.3642579865851035e-06, + "loss": 0.3264042139053345, + "memory(GiB)": 77.0, + "step": 5076, + "token_acc": 0.9191063174114021, + "train_speed(iter/s)": 0.633524 + }, + { + "epoch": 1.6246399999999999, + "grad_norm": 0.6160209432103456, + "learning_rate": 2.363377420631747e-06, + "loss": 0.2868953347206116, + "memory(GiB)": 77.0, + "step": 5077, + "token_acc": 0.909148665819568, + "train_speed(iter/s)": 0.633292 + }, + { + "epoch": 1.62496, + "grad_norm": 0.6492005041791976, + "learning_rate": 2.362496871678701e-06, + "loss": 0.37829074263572693, + "memory(GiB)": 77.0, + "step": 5078, + "token_acc": 0.9141055949566588, + "train_speed(iter/s)": 0.633083 + }, + { + "epoch": 1.62528, + "grad_norm": 0.668115340874208, + "learning_rate": 2.3616163398355337e-06, + "loss": 0.35400646924972534, + "memory(GiB)": 77.0, + "step": 5079, + "token_acc": 0.8777514392143583, + "train_speed(iter/s)": 0.632891 + }, + { + "epoch": 1.6256, + "grad_norm": 0.6690971632856043, + "learning_rate": 2.3607358252118124e-06, + "loss": 0.32906556129455566, + "memory(GiB)": 77.0, + "step": 5080, + "token_acc": 0.790590641779596, + "train_speed(iter/s)": 0.632706 + }, + { + "epoch": 1.62592, + "grad_norm": 0.6905122749314343, + "learning_rate": 2.3598553279171015e-06, + "loss": 0.31879499554634094, + "memory(GiB)": 77.0, + "step": 5081, + "token_acc": 0.9260094743965712, + "train_speed(iter/s)": 0.632513 + }, + { + "epoch": 1.6262400000000001, + "grad_norm": 0.6271735726209955, + "learning_rate": 2.358974848060964e-06, + "loss": 0.23928630352020264, + "memory(GiB)": 77.0, + "step": 5082, + "token_acc": 0.9247367069098382, + "train_speed(iter/s)": 0.632322 + }, + { + "epoch": 1.62656, + "grad_norm": 0.7294016049618232, + "learning_rate": 2.3580943857529604e-06, + "loss": 0.44575032591819763, + "memory(GiB)": 77.0, + "step": 5083, + "token_acc": 0.8630447085455575, + "train_speed(iter/s)": 0.632132 + }, + { + "epoch": 1.6268799999999999, + "grad_norm": 0.6450327548339279, + "learning_rate": 2.3572139411026487e-06, + "loss": 0.29711490869522095, + "memory(GiB)": 77.0, + "step": 5084, + "token_acc": 0.9090067145172493, + "train_speed(iter/s)": 0.631946 + }, + { + "epoch": 1.6272, + "grad_norm": 0.6579724637954387, + "learning_rate": 2.3563335142195853e-06, + "loss": 0.33458253741264343, + "memory(GiB)": 77.0, + "step": 5085, + "token_acc": 0.9281345565749235, + "train_speed(iter/s)": 0.631751 + }, + { + "epoch": 1.62752, + "grad_norm": 0.6064582296085835, + "learning_rate": 2.3554531052133233e-06, + "loss": 0.29135042428970337, + "memory(GiB)": 77.0, + "step": 5086, + "token_acc": 0.910443864229765, + "train_speed(iter/s)": 0.631537 + }, + { + "epoch": 1.62784, + "grad_norm": 0.6673743397786488, + "learning_rate": 2.3545727141934155e-06, + "loss": 0.294382244348526, + "memory(GiB)": 77.0, + "step": 5087, + "token_acc": 0.9102564102564102, + "train_speed(iter/s)": 0.631346 + }, + { + "epoch": 1.62816, + "grad_norm": 0.6357682902612839, + "learning_rate": 2.3536923412694108e-06, + "loss": 0.38522058725357056, + "memory(GiB)": 77.0, + "step": 5088, + "token_acc": 0.8978149100257069, + "train_speed(iter/s)": 0.631156 + }, + { + "epoch": 1.6284800000000001, + "grad_norm": 0.6668297010207153, + "learning_rate": 2.3528119865508554e-06, + "loss": 0.3444172143936157, + "memory(GiB)": 77.0, + "step": 5089, + "token_acc": 0.8980870712401056, + "train_speed(iter/s)": 0.630958 + }, + { + "epoch": 1.6288, + "grad_norm": 0.6432262495479774, + "learning_rate": 2.3519316501472953e-06, + "loss": 0.3798764944076538, + "memory(GiB)": 77.0, + "step": 5090, + "token_acc": 0.9278714229713838, + "train_speed(iter/s)": 0.630773 + }, + { + "epoch": 1.62912, + "grad_norm": 0.6519201046541391, + "learning_rate": 2.3510513321682727e-06, + "loss": 0.3216099739074707, + "memory(GiB)": 77.0, + "step": 5091, + "token_acc": 0.8969072164948454, + "train_speed(iter/s)": 0.630587 + }, + { + "epoch": 1.62944, + "grad_norm": 0.6667899303365938, + "learning_rate": 2.3501710327233284e-06, + "loss": 0.29019755125045776, + "memory(GiB)": 77.0, + "step": 5092, + "token_acc": 0.8641020684322444, + "train_speed(iter/s)": 0.630407 + }, + { + "epoch": 1.62976, + "grad_norm": 0.6837477674114211, + "learning_rate": 2.3492907519219993e-06, + "loss": 0.3650609254837036, + "memory(GiB)": 77.0, + "step": 5093, + "token_acc": 0.9254555494202098, + "train_speed(iter/s)": 0.630199 + }, + { + "epoch": 1.63008, + "grad_norm": 0.630877268197519, + "learning_rate": 2.3484104898738217e-06, + "loss": 0.30023348331451416, + "memory(GiB)": 77.0, + "step": 5094, + "token_acc": 0.8471094123268036, + "train_speed(iter/s)": 0.630009 + }, + { + "epoch": 1.6303999999999998, + "grad_norm": 0.6576856497320385, + "learning_rate": 2.3475302466883293e-06, + "loss": 0.3745628595352173, + "memory(GiB)": 77.0, + "step": 5095, + "token_acc": 0.896594274432379, + "train_speed(iter/s)": 0.629813 + }, + { + "epoch": 1.63072, + "grad_norm": 0.7379850149077893, + "learning_rate": 2.3466500224750526e-06, + "loss": 0.3709903955459595, + "memory(GiB)": 77.0, + "step": 5096, + "token_acc": 0.9109572690379566, + "train_speed(iter/s)": 0.629631 + }, + { + "epoch": 1.63104, + "grad_norm": 0.6870977689253704, + "learning_rate": 2.3457698173435203e-06, + "loss": 0.30731791257858276, + "memory(GiB)": 77.0, + "step": 5097, + "token_acc": 0.9190848214285714, + "train_speed(iter/s)": 0.629442 + }, + { + "epoch": 1.63136, + "grad_norm": 0.6431188406496685, + "learning_rate": 2.3448896314032584e-06, + "loss": 0.2744068503379822, + "memory(GiB)": 77.0, + "step": 5098, + "token_acc": 0.9217209441290708, + "train_speed(iter/s)": 0.629252 + }, + { + "epoch": 1.63168, + "grad_norm": 0.6663470619732924, + "learning_rate": 2.344009464763791e-06, + "loss": 0.32715904712677, + "memory(GiB)": 77.0, + "step": 5099, + "token_acc": 0.8917813313499442, + "train_speed(iter/s)": 0.629066 + }, + { + "epoch": 1.6320000000000001, + "grad_norm": 0.6576849716682495, + "learning_rate": 2.3431293175346404e-06, + "loss": 0.36807936429977417, + "memory(GiB)": 77.0, + "step": 5100, + "token_acc": 0.8437719915552427, + "train_speed(iter/s)": 0.628889 + }, + { + "epoch": 1.63232, + "grad_norm": 0.6676256469507359, + "learning_rate": 2.3422491898253246e-06, + "loss": 0.3730875849723816, + "memory(GiB)": 77.0, + "step": 5101, + "token_acc": 0.8942519527338274, + "train_speed(iter/s)": 0.628692 + }, + { + "epoch": 1.6326399999999999, + "grad_norm": 0.7018386522660207, + "learning_rate": 2.341369081745362e-06, + "loss": 0.4271743595600128, + "memory(GiB)": 77.0, + "step": 5102, + "token_acc": 0.8652897657213316, + "train_speed(iter/s)": 0.628511 + }, + { + "epoch": 1.63296, + "grad_norm": 0.646035266501876, + "learning_rate": 2.340488993404264e-06, + "loss": 0.32163816690444946, + "memory(GiB)": 77.0, + "step": 5103, + "token_acc": 0.8820224719101124, + "train_speed(iter/s)": 0.628317 + }, + { + "epoch": 1.63328, + "grad_norm": 0.6703904048709581, + "learning_rate": 2.339608924911543e-06, + "loss": 0.3422986567020416, + "memory(GiB)": 77.0, + "step": 5104, + "token_acc": 0.8947791164658635, + "train_speed(iter/s)": 0.628133 + }, + { + "epoch": 1.6336, + "grad_norm": 0.6219263826396062, + "learning_rate": 2.3387288763767097e-06, + "loss": 0.34240055084228516, + "memory(GiB)": 77.0, + "step": 5105, + "token_acc": 0.9541432019308126, + "train_speed(iter/s)": 0.627935 + }, + { + "epoch": 1.63392, + "grad_norm": 0.6650893789066403, + "learning_rate": 2.3378488479092697e-06, + "loss": 0.31411677598953247, + "memory(GiB)": 77.0, + "step": 5106, + "token_acc": 0.9225319926873857, + "train_speed(iter/s)": 0.627758 + }, + { + "epoch": 1.6342400000000001, + "grad_norm": 0.6511009792888531, + "learning_rate": 2.3369688396187283e-06, + "loss": 0.31013667583465576, + "memory(GiB)": 77.0, + "step": 5107, + "token_acc": 0.8700709219858156, + "train_speed(iter/s)": 0.627584 + }, + { + "epoch": 1.63456, + "grad_norm": 0.6587682954355568, + "learning_rate": 2.3360888516145862e-06, + "loss": 0.3434988856315613, + "memory(GiB)": 77.0, + "step": 5108, + "token_acc": 0.7965783923131006, + "train_speed(iter/s)": 0.627386 + }, + { + "epoch": 1.6348799999999999, + "grad_norm": 0.6066525924032543, + "learning_rate": 2.335208884006343e-06, + "loss": 0.26681673526763916, + "memory(GiB)": 77.0, + "step": 5109, + "token_acc": 0.9205420388050508, + "train_speed(iter/s)": 0.627211 + }, + { + "epoch": 1.6352, + "grad_norm": 0.6571986999991264, + "learning_rate": 2.334328936903496e-06, + "loss": 0.3973240554332733, + "memory(GiB)": 77.0, + "step": 5110, + "token_acc": 0.9049943672549756, + "train_speed(iter/s)": 0.62701 + }, + { + "epoch": 1.63552, + "grad_norm": 0.6672913875792965, + "learning_rate": 2.3334490104155384e-06, + "loss": 0.2764206826686859, + "memory(GiB)": 77.0, + "step": 5111, + "token_acc": 0.8746274587721041, + "train_speed(iter/s)": 0.626835 + }, + { + "epoch": 1.63584, + "grad_norm": 0.6358701041808691, + "learning_rate": 2.3325691046519626e-06, + "loss": 0.26569655537605286, + "memory(GiB)": 77.0, + "step": 5112, + "token_acc": 0.8894208846329229, + "train_speed(iter/s)": 0.626634 + }, + { + "epoch": 1.6361599999999998, + "grad_norm": 0.6881105711833478, + "learning_rate": 2.331689219722257e-06, + "loss": 0.3256252408027649, + "memory(GiB)": 77.0, + "step": 5113, + "token_acc": 0.890983188996434, + "train_speed(iter/s)": 0.626443 + }, + { + "epoch": 1.6364800000000002, + "grad_norm": 0.6896311311613407, + "learning_rate": 2.330809355735908e-06, + "loss": 0.30629783868789673, + "memory(GiB)": 77.0, + "step": 5114, + "token_acc": 0.8921875, + "train_speed(iter/s)": 0.626237 + }, + { + "epoch": 1.6368, + "grad_norm": 0.5990306500052999, + "learning_rate": 2.3299295128023992e-06, + "loss": 0.3363553285598755, + "memory(GiB)": 77.0, + "step": 5115, + "token_acc": 0.9088669950738916, + "train_speed(iter/s)": 0.62604 + }, + { + "epoch": 1.63712, + "grad_norm": 0.6341047052090452, + "learning_rate": 2.329049691031214e-06, + "loss": 0.25422537326812744, + "memory(GiB)": 77.0, + "step": 5116, + "token_acc": 0.9185287040220007, + "train_speed(iter/s)": 0.625861 + }, + { + "epoch": 1.63744, + "grad_norm": 0.6340593263936323, + "learning_rate": 2.3281698905318285e-06, + "loss": 0.37601298093795776, + "memory(GiB)": 77.0, + "step": 5117, + "token_acc": 0.9112794157424939, + "train_speed(iter/s)": 0.625652 + }, + { + "epoch": 1.63776, + "grad_norm": 0.6725435617090726, + "learning_rate": 2.327290111413719e-06, + "loss": 0.2626552879810333, + "memory(GiB)": 77.0, + "step": 5118, + "token_acc": 0.9217570350034318, + "train_speed(iter/s)": 0.625465 + }, + { + "epoch": 1.63808, + "grad_norm": 0.6484075337623617, + "learning_rate": 2.3264103537863588e-06, + "loss": 0.3369981050491333, + "memory(GiB)": 77.0, + "step": 5119, + "token_acc": 0.9358802502234138, + "train_speed(iter/s)": 0.62529 + }, + { + "epoch": 1.6383999999999999, + "grad_norm": 0.7284707210497386, + "learning_rate": 2.325530617759219e-06, + "loss": 0.3683661222457886, + "memory(GiB)": 77.0, + "step": 5120, + "token_acc": 0.9038314176245211, + "train_speed(iter/s)": 0.625109 + }, + { + "epoch": 1.63872, + "grad_norm": 0.6152861737962898, + "learning_rate": 2.3246509034417665e-06, + "loss": 0.291446715593338, + "memory(GiB)": 77.0, + "step": 5121, + "token_acc": 0.920111042566317, + "train_speed(iter/s)": 0.624926 + }, + { + "epoch": 1.63904, + "grad_norm": 0.6864847444071531, + "learning_rate": 2.3237712109434677e-06, + "loss": 0.35403043031692505, + "memory(GiB)": 77.0, + "step": 5122, + "token_acc": 0.8614709569577935, + "train_speed(iter/s)": 0.62472 + }, + { + "epoch": 1.63936, + "grad_norm": 0.7496095958077731, + "learning_rate": 2.322891540373785e-06, + "loss": 0.369326651096344, + "memory(GiB)": 77.0, + "step": 5123, + "token_acc": 0.8752061572292469, + "train_speed(iter/s)": 0.624546 + }, + { + "epoch": 1.63968, + "grad_norm": 0.746258038416662, + "learning_rate": 2.3220118918421778e-06, + "loss": 0.30424535274505615, + "memory(GiB)": 77.0, + "step": 5124, + "token_acc": 0.8715162966461975, + "train_speed(iter/s)": 0.624367 + }, + { + "epoch": 1.6400000000000001, + "grad_norm": 0.6321430402317176, + "learning_rate": 2.3211322654581027e-06, + "loss": 0.3043535649776459, + "memory(GiB)": 77.0, + "step": 5125, + "token_acc": 0.9427083333333334, + "train_speed(iter/s)": 0.624189 + }, + { + "epoch": 1.64032, + "grad_norm": 0.7286884210693366, + "learning_rate": 2.3202526613310143e-06, + "loss": 0.33698663115501404, + "memory(GiB)": 77.0, + "step": 5126, + "token_acc": 0.8727114210985178, + "train_speed(iter/s)": 0.624007 + }, + { + "epoch": 1.6406399999999999, + "grad_norm": 0.6596860650107984, + "learning_rate": 2.319373079570364e-06, + "loss": 0.36867764592170715, + "memory(GiB)": 77.0, + "step": 5127, + "token_acc": 0.8706414692573862, + "train_speed(iter/s)": 0.623822 + }, + { + "epoch": 1.64096, + "grad_norm": 0.6179019806716088, + "learning_rate": 2.3184935202856006e-06, + "loss": 0.308704674243927, + "memory(GiB)": 77.0, + "step": 5128, + "token_acc": 0.8867026340844176, + "train_speed(iter/s)": 0.623627 + }, + { + "epoch": 1.64128, + "grad_norm": 0.6815529081793728, + "learning_rate": 2.31761398358617e-06, + "loss": 0.38918793201446533, + "memory(GiB)": 77.0, + "step": 5129, + "token_acc": 0.8955594145373357, + "train_speed(iter/s)": 0.623453 + }, + { + "epoch": 1.6416, + "grad_norm": 0.6215654402595002, + "learning_rate": 2.3167344695815154e-06, + "loss": 0.3551497459411621, + "memory(GiB)": 77.0, + "step": 5130, + "token_acc": 0.8398085585585585, + "train_speed(iter/s)": 0.623257 + }, + { + "epoch": 1.64192, + "grad_norm": 0.7151117658155436, + "learning_rate": 2.315854978381077e-06, + "loss": 0.37556692957878113, + "memory(GiB)": 77.0, + "step": 5131, + "token_acc": 0.8868686868686869, + "train_speed(iter/s)": 0.623049 + }, + { + "epoch": 1.6422400000000001, + "grad_norm": 0.7214249829533711, + "learning_rate": 2.3149755100942915e-06, + "loss": 0.3943411707878113, + "memory(GiB)": 77.0, + "step": 5132, + "token_acc": 0.8978562421185372, + "train_speed(iter/s)": 0.622862 + }, + { + "epoch": 1.64256, + "grad_norm": 0.6747611474456384, + "learning_rate": 2.3140960648305947e-06, + "loss": 0.4812901020050049, + "memory(GiB)": 77.0, + "step": 5133, + "token_acc": 0.8430907172995781, + "train_speed(iter/s)": 0.622675 + }, + { + "epoch": 1.64288, + "grad_norm": 0.7204897900740311, + "learning_rate": 2.3132166426994173e-06, + "loss": 0.42274656891822815, + "memory(GiB)": 77.0, + "step": 5134, + "token_acc": 0.8518518518518519, + "train_speed(iter/s)": 0.622496 + }, + { + "epoch": 1.6432, + "grad_norm": 0.6201334282981317, + "learning_rate": 2.3123372438101887e-06, + "loss": 0.31117188930511475, + "memory(GiB)": 77.0, + "step": 5135, + "token_acc": 0.9217577706323687, + "train_speed(iter/s)": 0.622314 + }, + { + "epoch": 1.64352, + "grad_norm": 0.6582192382072903, + "learning_rate": 2.311457868272334e-06, + "loss": 0.36946678161621094, + "memory(GiB)": 77.0, + "step": 5136, + "token_acc": 0.861986301369863, + "train_speed(iter/s)": 0.622131 + }, + { + "epoch": 1.64384, + "grad_norm": 0.7352395935422352, + "learning_rate": 2.3105785161952775e-06, + "loss": 0.3550911545753479, + "memory(GiB)": 77.0, + "step": 5137, + "token_acc": 0.8792908336476801, + "train_speed(iter/s)": 0.621944 + }, + { + "epoch": 1.6441599999999998, + "grad_norm": 0.6732133160835369, + "learning_rate": 2.3096991876884377e-06, + "loss": 0.3657287657260895, + "memory(GiB)": 77.0, + "step": 5138, + "token_acc": 0.8065077431703498, + "train_speed(iter/s)": 0.621757 + }, + { + "epoch": 1.6444800000000002, + "grad_norm": 0.6704318504142946, + "learning_rate": 2.3088198828612336e-06, + "loss": 0.3333240747451782, + "memory(GiB)": 77.0, + "step": 5139, + "token_acc": 0.9538973384030418, + "train_speed(iter/s)": 0.621574 + }, + { + "epoch": 1.6448, + "grad_norm": 0.5739114873505187, + "learning_rate": 2.3079406018230786e-06, + "loss": 0.2352769374847412, + "memory(GiB)": 77.0, + "step": 5140, + "token_acc": 0.9359104781281791, + "train_speed(iter/s)": 0.621378 + }, + { + "epoch": 1.64512, + "grad_norm": 0.6262007381574655, + "learning_rate": 2.3070613446833843e-06, + "loss": 0.3861508071422577, + "memory(GiB)": 77.0, + "step": 5141, + "token_acc": 0.8633248482475034, + "train_speed(iter/s)": 0.62116 + }, + { + "epoch": 1.64544, + "grad_norm": 0.6248294505797785, + "learning_rate": 2.3061821115515583e-06, + "loss": 0.2775024175643921, + "memory(GiB)": 77.0, + "step": 5142, + "token_acc": 0.8932725558000629, + "train_speed(iter/s)": 0.620968 + }, + { + "epoch": 1.6457600000000001, + "grad_norm": 0.6387952951503022, + "learning_rate": 2.305302902537006e-06, + "loss": 0.3772600293159485, + "memory(GiB)": 77.0, + "step": 5143, + "token_acc": 0.8564837299660029, + "train_speed(iter/s)": 0.62077 + }, + { + "epoch": 1.64608, + "grad_norm": 0.6522055016322709, + "learning_rate": 2.3044237177491306e-06, + "loss": 0.36378413438796997, + "memory(GiB)": 77.0, + "step": 5144, + "token_acc": 0.8990112994350282, + "train_speed(iter/s)": 0.620579 + }, + { + "epoch": 1.6463999999999999, + "grad_norm": 0.6111761545385124, + "learning_rate": 2.303544557297331e-06, + "loss": 0.2848542630672455, + "memory(GiB)": 77.0, + "step": 5145, + "token_acc": 0.8262322472848789, + "train_speed(iter/s)": 0.620394 + }, + { + "epoch": 1.64672, + "grad_norm": 0.6016158934807958, + "learning_rate": 2.3026654212910026e-06, + "loss": 0.2983163297176361, + "memory(GiB)": 77.0, + "step": 5146, + "token_acc": 0.8816864295125165, + "train_speed(iter/s)": 0.620203 + }, + { + "epoch": 1.64704, + "grad_norm": 0.649564771182049, + "learning_rate": 2.301786309839539e-06, + "loss": 0.32130035758018494, + "memory(GiB)": 77.0, + "step": 5147, + "token_acc": 0.930834059866318, + "train_speed(iter/s)": 0.619997 + }, + { + "epoch": 1.64736, + "grad_norm": 0.6216726845655102, + "learning_rate": 2.3009072230523317e-06, + "loss": 0.37713509798049927, + "memory(GiB)": 77.0, + "step": 5148, + "token_acc": 0.9287290005090786, + "train_speed(iter/s)": 0.619808 + }, + { + "epoch": 1.64768, + "grad_norm": 0.6617068481824072, + "learning_rate": 2.3000281610387657e-06, + "loss": 0.334390252828598, + "memory(GiB)": 77.0, + "step": 5149, + "token_acc": 0.8591362126245847, + "train_speed(iter/s)": 0.619634 + }, + { + "epoch": 1.6480000000000001, + "grad_norm": 0.6636562904058182, + "learning_rate": 2.2991491239082266e-06, + "loss": 0.38882976770401, + "memory(GiB)": 77.0, + "step": 5150, + "token_acc": 0.8600836281762625, + "train_speed(iter/s)": 0.61946 + }, + { + "epoch": 1.64832, + "grad_norm": 0.6370376730279577, + "learning_rate": 2.2982701117700946e-06, + "loss": 0.3439595401287079, + "memory(GiB)": 77.0, + "step": 5151, + "token_acc": 0.8482431558525544, + "train_speed(iter/s)": 0.619281 + }, + { + "epoch": 1.6486399999999999, + "grad_norm": 0.6306317325663797, + "learning_rate": 2.297391124733748e-06, + "loss": 0.29953092336654663, + "memory(GiB)": 77.0, + "step": 5152, + "token_acc": 0.9285266457680251, + "train_speed(iter/s)": 0.619073 + }, + { + "epoch": 1.64896, + "grad_norm": 0.6716347884043274, + "learning_rate": 2.296512162908561e-06, + "loss": 0.40036559104919434, + "memory(GiB)": 77.0, + "step": 5153, + "token_acc": 0.9040705949331056, + "train_speed(iter/s)": 0.618883 + }, + { + "epoch": 1.64928, + "grad_norm": 0.5523124353278531, + "learning_rate": 2.2956332264039053e-06, + "loss": 0.315411239862442, + "memory(GiB)": 77.0, + "step": 5154, + "token_acc": 0.9040325980047773, + "train_speed(iter/s)": 0.618669 + }, + { + "epoch": 1.6496, + "grad_norm": 0.652165799634818, + "learning_rate": 2.2947543153291485e-06, + "loss": 0.321357786655426, + "memory(GiB)": 77.0, + "step": 5155, + "token_acc": 0.941066417212348, + "train_speed(iter/s)": 0.618491 + }, + { + "epoch": 1.64992, + "grad_norm": 0.6511488830409636, + "learning_rate": 2.293875429793658e-06, + "loss": 0.30505266785621643, + "memory(GiB)": 77.0, + "step": 5156, + "token_acc": 0.8709677419354839, + "train_speed(iter/s)": 0.618283 + }, + { + "epoch": 1.6502400000000002, + "grad_norm": 0.6342815111167449, + "learning_rate": 2.2929965699067937e-06, + "loss": 0.37118208408355713, + "memory(GiB)": 77.0, + "step": 5157, + "token_acc": 0.8984023964053919, + "train_speed(iter/s)": 0.618077 + }, + { + "epoch": 1.65056, + "grad_norm": 0.7367777345771289, + "learning_rate": 2.2921177357779156e-06, + "loss": 0.3170279562473297, + "memory(GiB)": 77.0, + "step": 5158, + "token_acc": 0.8571428571428571, + "train_speed(iter/s)": 0.617895 + }, + { + "epoch": 1.65088, + "grad_norm": 0.6585674710981233, + "learning_rate": 2.291238927516379e-06, + "loss": 0.3150582015514374, + "memory(GiB)": 77.0, + "step": 5159, + "token_acc": 0.9250269687162891, + "train_speed(iter/s)": 0.617713 + }, + { + "epoch": 1.6512, + "grad_norm": 0.6066558823660834, + "learning_rate": 2.2903601452315376e-06, + "loss": 0.38516396284103394, + "memory(GiB)": 77.0, + "step": 5160, + "token_acc": 0.8586080586080586, + "train_speed(iter/s)": 0.617506 + }, + { + "epoch": 1.65152, + "grad_norm": 0.6295608373569462, + "learning_rate": 2.289481389032738e-06, + "loss": 0.35507404804229736, + "memory(GiB)": 77.0, + "step": 5161, + "token_acc": 0.8783850320039389, + "train_speed(iter/s)": 0.617289 + }, + { + "epoch": 1.65184, + "grad_norm": 0.5698769391506997, + "learning_rate": 2.2886026590293277e-06, + "loss": 0.219665065407753, + "memory(GiB)": 77.0, + "step": 5162, + "token_acc": 0.9430176565008026, + "train_speed(iter/s)": 0.617115 + }, + { + "epoch": 1.6521599999999999, + "grad_norm": 0.662052224822367, + "learning_rate": 2.28772395533065e-06, + "loss": 0.34341734647750854, + "memory(GiB)": 77.0, + "step": 5163, + "token_acc": 0.9181208053691275, + "train_speed(iter/s)": 0.616932 + }, + { + "epoch": 1.65248, + "grad_norm": 0.6335443679534265, + "learning_rate": 2.2868452780460426e-06, + "loss": 0.3025810420513153, + "memory(GiB)": 77.0, + "step": 5164, + "token_acc": 0.9596398633964607, + "train_speed(iter/s)": 0.616761 + }, + { + "epoch": 1.6528, + "grad_norm": 0.6432161193561009, + "learning_rate": 2.285966627284843e-06, + "loss": 0.3139094114303589, + "memory(GiB)": 77.0, + "step": 5165, + "token_acc": 0.9342391304347826, + "train_speed(iter/s)": 0.616565 + }, + { + "epoch": 1.65312, + "grad_norm": 0.5898977139117649, + "learning_rate": 2.2850880031563845e-06, + "loss": 0.3491392731666565, + "memory(GiB)": 77.0, + "step": 5166, + "token_acc": 0.9492570787776843, + "train_speed(iter/s)": 0.616378 + }, + { + "epoch": 1.65344, + "grad_norm": 0.690115322174027, + "learning_rate": 2.2842094057699947e-06, + "loss": 0.30811595916748047, + "memory(GiB)": 77.0, + "step": 5167, + "token_acc": 0.9195668986852281, + "train_speed(iter/s)": 0.616199 + }, + { + "epoch": 1.6537600000000001, + "grad_norm": 0.6528450303900106, + "learning_rate": 2.2833308352350016e-06, + "loss": 0.32201582193374634, + "memory(GiB)": 77.0, + "step": 5168, + "token_acc": 0.9570747217806042, + "train_speed(iter/s)": 0.616013 + }, + { + "epoch": 1.65408, + "grad_norm": 0.6799257054389128, + "learning_rate": 2.2824522916607276e-06, + "loss": 0.3758696913719177, + "memory(GiB)": 77.0, + "step": 5169, + "token_acc": 0.9639361154044307, + "train_speed(iter/s)": 0.615825 + }, + { + "epoch": 1.6543999999999999, + "grad_norm": 0.670963447654602, + "learning_rate": 2.2815737751564916e-06, + "loss": 0.3879663050174713, + "memory(GiB)": 77.0, + "step": 5170, + "token_acc": 0.9299496644295302, + "train_speed(iter/s)": 0.615638 + }, + { + "epoch": 1.65472, + "grad_norm": 0.6448566379340024, + "learning_rate": 2.28069528583161e-06, + "loss": 0.42777734994888306, + "memory(GiB)": 77.0, + "step": 5171, + "token_acc": 0.8703673200461209, + "train_speed(iter/s)": 0.615453 + }, + { + "epoch": 1.65504, + "grad_norm": 0.7097948812364534, + "learning_rate": 2.2798168237953956e-06, + "loss": 0.31418412923812866, + "memory(GiB)": 77.0, + "step": 5172, + "token_acc": 0.9315815567674764, + "train_speed(iter/s)": 0.615279 + }, + { + "epoch": 1.65536, + "grad_norm": 0.6360903771957085, + "learning_rate": 2.278938389157158e-06, + "loss": 0.2887929677963257, + "memory(GiB)": 77.0, + "step": 5173, + "token_acc": 0.9030992546096508, + "train_speed(iter/s)": 0.615103 + }, + { + "epoch": 1.65568, + "grad_norm": 0.6312702753753443, + "learning_rate": 2.2780599820262044e-06, + "loss": 0.31012842059135437, + "memory(GiB)": 77.0, + "step": 5174, + "token_acc": 0.8998099375509095, + "train_speed(iter/s)": 0.614923 + }, + { + "epoch": 1.6560000000000001, + "grad_norm": 0.6467319114372141, + "learning_rate": 2.277181602511835e-06, + "loss": 0.3755604922771454, + "memory(GiB)": 77.0, + "step": 5175, + "token_acc": 0.9513058875608676, + "train_speed(iter/s)": 0.614739 + }, + { + "epoch": 1.65632, + "grad_norm": 0.6821205450732121, + "learning_rate": 2.2763032507233495e-06, + "loss": 0.35360002517700195, + "memory(GiB)": 77.0, + "step": 5176, + "token_acc": 0.8124595818064237, + "train_speed(iter/s)": 0.614561 + }, + { + "epoch": 1.65664, + "grad_norm": 0.6987546605439039, + "learning_rate": 2.2754249267700443e-06, + "loss": 0.34293484687805176, + "memory(GiB)": 77.0, + "step": 5177, + "token_acc": 0.8670846394984326, + "train_speed(iter/s)": 0.61438 + }, + { + "epoch": 1.65696, + "grad_norm": 0.6931959826273288, + "learning_rate": 2.2745466307612102e-06, + "loss": 0.3715955317020416, + "memory(GiB)": 77.0, + "step": 5178, + "token_acc": 0.9073778345576493, + "train_speed(iter/s)": 0.614212 + }, + { + "epoch": 1.65728, + "grad_norm": 0.6566832723068452, + "learning_rate": 2.2736683628061375e-06, + "loss": 0.3287680745124817, + "memory(GiB)": 77.0, + "step": 5179, + "token_acc": 0.8688524590163934, + "train_speed(iter/s)": 0.614029 + }, + { + "epoch": 1.6576, + "grad_norm": 0.7644042458901129, + "learning_rate": 2.272790123014111e-06, + "loss": 0.36679232120513916, + "memory(GiB)": 77.0, + "step": 5180, + "token_acc": 0.9528079282680509, + "train_speed(iter/s)": 0.613851 + }, + { + "epoch": 1.6579199999999998, + "grad_norm": 0.675594127247396, + "learning_rate": 2.271911911494412e-06, + "loss": 0.3451712131500244, + "memory(GiB)": 77.0, + "step": 5181, + "token_acc": 0.9357970560601315, + "train_speed(iter/s)": 0.613665 + }, + { + "epoch": 1.6582400000000002, + "grad_norm": 0.7127575334435635, + "learning_rate": 2.2710337283563187e-06, + "loss": 0.3743429183959961, + "memory(GiB)": 77.0, + "step": 5182, + "token_acc": 0.8481222997673645, + "train_speed(iter/s)": 0.613491 + }, + { + "epoch": 1.65856, + "grad_norm": 0.6571899380606664, + "learning_rate": 2.2701555737091063e-06, + "loss": 0.36545872688293457, + "memory(GiB)": 77.0, + "step": 5183, + "token_acc": 0.873815041078576, + "train_speed(iter/s)": 0.613319 + }, + { + "epoch": 1.65888, + "grad_norm": 0.6316751956645331, + "learning_rate": 2.2692774476620457e-06, + "loss": 0.31366780400276184, + "memory(GiB)": 77.0, + "step": 5184, + "token_acc": 0.9137847053093034, + "train_speed(iter/s)": 0.613134 + }, + { + "epoch": 1.6592, + "grad_norm": 0.6326400917724633, + "learning_rate": 2.268399350324404e-06, + "loss": 0.33999302983283997, + "memory(GiB)": 77.0, + "step": 5185, + "token_acc": 0.963845050215208, + "train_speed(iter/s)": 0.61295 + }, + { + "epoch": 1.65952, + "grad_norm": 0.6359547131840846, + "learning_rate": 2.2675212818054452e-06, + "loss": 0.32758867740631104, + "memory(GiB)": 77.0, + "step": 5186, + "token_acc": 0.9242581988547631, + "train_speed(iter/s)": 0.612766 + }, + { + "epoch": 1.65984, + "grad_norm": 0.6673108472662369, + "learning_rate": 2.2666432422144306e-06, + "loss": 0.3524402678012848, + "memory(GiB)": 77.0, + "step": 5187, + "token_acc": 0.889819857561793, + "train_speed(iter/s)": 0.612591 + }, + { + "epoch": 1.6601599999999999, + "grad_norm": 0.6560899282290856, + "learning_rate": 2.2657652316606164e-06, + "loss": 0.3531450033187866, + "memory(GiB)": 77.0, + "step": 5188, + "token_acc": 0.8618534482758621, + "train_speed(iter/s)": 0.612404 + }, + { + "epoch": 1.66048, + "grad_norm": 0.6465564092723125, + "learning_rate": 2.2648872502532555e-06, + "loss": 0.3810408115386963, + "memory(GiB)": 77.0, + "step": 5189, + "token_acc": 0.9164077944083592, + "train_speed(iter/s)": 0.612218 + }, + { + "epoch": 1.6608, + "grad_norm": 0.6846141180024155, + "learning_rate": 2.2640092981015977e-06, + "loss": 0.4135975241661072, + "memory(GiB)": 77.0, + "step": 5190, + "token_acc": 0.8137829912023461, + "train_speed(iter/s)": 0.612035 + }, + { + "epoch": 1.66112, + "grad_norm": 0.7259807337726961, + "learning_rate": 2.2631313753148887e-06, + "loss": 0.3560057282447815, + "memory(GiB)": 77.0, + "step": 5191, + "token_acc": 0.8708062093139709, + "train_speed(iter/s)": 0.611866 + }, + { + "epoch": 1.66144, + "grad_norm": 0.575274648323955, + "learning_rate": 2.2622534820023712e-06, + "loss": 0.28467798233032227, + "memory(GiB)": 77.0, + "step": 5192, + "token_acc": 0.9059478464693818, + "train_speed(iter/s)": 0.611679 + }, + { + "epoch": 1.6617600000000001, + "grad_norm": 0.5951987848272127, + "learning_rate": 2.2613756182732837e-06, + "loss": 0.2696995735168457, + "memory(GiB)": 77.0, + "step": 5193, + "token_acc": 0.9121964238057112, + "train_speed(iter/s)": 0.611491 + }, + { + "epoch": 1.66208, + "grad_norm": 0.6576793166865067, + "learning_rate": 2.26049778423686e-06, + "loss": 0.301503986120224, + "memory(GiB)": 77.0, + "step": 5194, + "token_acc": 0.870888733379986, + "train_speed(iter/s)": 0.611314 + }, + { + "epoch": 1.6623999999999999, + "grad_norm": 0.7070394533104711, + "learning_rate": 2.259619980002333e-06, + "loss": 0.3212546706199646, + "memory(GiB)": 77.0, + "step": 5195, + "token_acc": 0.883495145631068, + "train_speed(iter/s)": 0.611146 + }, + { + "epoch": 1.66272, + "grad_norm": 0.710032173489508, + "learning_rate": 2.2587422056789297e-06, + "loss": 0.3894128203392029, + "memory(GiB)": 77.0, + "step": 5196, + "token_acc": 0.9351766513056836, + "train_speed(iter/s)": 0.610978 + }, + { + "epoch": 1.66304, + "grad_norm": 0.6495411137700569, + "learning_rate": 2.2578644613758736e-06, + "loss": 0.2983158826828003, + "memory(GiB)": 77.0, + "step": 5197, + "token_acc": 0.9443307408923455, + "train_speed(iter/s)": 0.610794 + }, + { + "epoch": 1.66336, + "grad_norm": 0.679024094722728, + "learning_rate": 2.256986747202385e-06, + "loss": 0.3856792449951172, + "memory(GiB)": 77.0, + "step": 5198, + "token_acc": 0.7697169552074746, + "train_speed(iter/s)": 0.610617 + }, + { + "epoch": 1.66368, + "grad_norm": 0.6925130186810102, + "learning_rate": 2.25610906326768e-06, + "loss": 0.36430105566978455, + "memory(GiB)": 77.0, + "step": 5199, + "token_acc": 0.8364273204903677, + "train_speed(iter/s)": 0.610428 + }, + { + "epoch": 1.6640000000000001, + "grad_norm": 0.6288058677505863, + "learning_rate": 2.2552314096809712e-06, + "loss": 0.3146984577178955, + "memory(GiB)": 77.0, + "step": 5200, + "token_acc": 0.8806993486458691, + "train_speed(iter/s)": 0.610246 + }, + { + "epoch": 1.66432, + "grad_norm": 0.6825054513282356, + "learning_rate": 2.254353786551467e-06, + "loss": 0.4193945527076721, + "memory(GiB)": 77.0, + "step": 5201, + "token_acc": 0.8963327187051834, + "train_speed(iter/s)": 0.610064 + }, + { + "epoch": 1.66464, + "grad_norm": 0.6966846658800265, + "learning_rate": 2.253476193988373e-06, + "loss": 0.38148242235183716, + "memory(GiB)": 77.0, + "step": 5202, + "token_acc": 0.8143478260869565, + "train_speed(iter/s)": 0.609894 + }, + { + "epoch": 1.66496, + "grad_norm": 0.6701952853240387, + "learning_rate": 2.252598632100891e-06, + "loss": 0.2575964331626892, + "memory(GiB)": 77.0, + "step": 5203, + "token_acc": 0.9085854092526691, + "train_speed(iter/s)": 0.60972 + }, + { + "epoch": 1.66528, + "grad_norm": 0.6278488665991916, + "learning_rate": 2.2517211009982163e-06, + "loss": 0.34075257182121277, + "memory(GiB)": 77.0, + "step": 5204, + "token_acc": 0.8762365355023082, + "train_speed(iter/s)": 0.609525 + }, + { + "epoch": 1.6656, + "grad_norm": 0.6785511181383228, + "learning_rate": 2.250843600789544e-06, + "loss": 0.3165028989315033, + "memory(GiB)": 77.0, + "step": 5205, + "token_acc": 0.937130407718643, + "train_speed(iter/s)": 0.609346 + }, + { + "epoch": 1.6659199999999998, + "grad_norm": 0.5952323154629149, + "learning_rate": 2.2499661315840636e-06, + "loss": 0.1981034278869629, + "memory(GiB)": 77.0, + "step": 5206, + "token_acc": 0.9255685733976567, + "train_speed(iter/s)": 0.609183 + }, + { + "epoch": 1.6662400000000002, + "grad_norm": 0.6295744192483533, + "learning_rate": 2.24908869349096e-06, + "loss": 0.31676018238067627, + "memory(GiB)": 77.0, + "step": 5207, + "token_acc": 0.8502415458937198, + "train_speed(iter/s)": 0.609015 + }, + { + "epoch": 1.66656, + "grad_norm": 0.6314160248484169, + "learning_rate": 2.2482112866194166e-06, + "loss": 0.36709821224212646, + "memory(GiB)": 77.0, + "step": 5208, + "token_acc": 0.8684931506849315, + "train_speed(iter/s)": 0.60883 + }, + { + "epoch": 1.66688, + "grad_norm": 0.6537980614625599, + "learning_rate": 2.24733391107861e-06, + "loss": 0.379938542842865, + "memory(GiB)": 77.0, + "step": 5209, + "token_acc": 0.8787004998077662, + "train_speed(iter/s)": 0.608655 + }, + { + "epoch": 1.6672, + "grad_norm": 0.6661032913056204, + "learning_rate": 2.246456566977715e-06, + "loss": 0.32465091347694397, + "memory(GiB)": 77.0, + "step": 5210, + "token_acc": 0.9054015432980852, + "train_speed(iter/s)": 0.608479 + }, + { + "epoch": 1.6675200000000001, + "grad_norm": 0.6524270362497935, + "learning_rate": 2.2455792544259018e-06, + "loss": 0.38859277963638306, + "memory(GiB)": 77.0, + "step": 5211, + "token_acc": 0.9417108251324754, + "train_speed(iter/s)": 0.608313 + }, + { + "epoch": 1.66784, + "grad_norm": 0.7280691473393831, + "learning_rate": 2.2447019735323375e-06, + "loss": 0.366818904876709, + "memory(GiB)": 77.0, + "step": 5212, + "token_acc": 0.8592310136638068, + "train_speed(iter/s)": 0.608126 + }, + { + "epoch": 1.6681599999999999, + "grad_norm": 0.6296389299953222, + "learning_rate": 2.2438247244061836e-06, + "loss": 0.28579866886138916, + "memory(GiB)": 77.0, + "step": 5213, + "token_acc": 0.9662447257383966, + "train_speed(iter/s)": 0.60793 + }, + { + "epoch": 1.66848, + "grad_norm": 0.5965926207847063, + "learning_rate": 2.242947507156599e-06, + "loss": 0.2826199531555176, + "memory(GiB)": 77.0, + "step": 5214, + "token_acc": 0.91303074670571, + "train_speed(iter/s)": 0.607761 + }, + { + "epoch": 1.6688, + "grad_norm": 0.7150453643183257, + "learning_rate": 2.242070321892738e-06, + "loss": 0.34559667110443115, + "memory(GiB)": 77.0, + "step": 5215, + "token_acc": 0.9624413145539906, + "train_speed(iter/s)": 0.607592 + }, + { + "epoch": 1.66912, + "grad_norm": 0.6614797141720996, + "learning_rate": 2.2411931687237503e-06, + "loss": 0.3276509642601013, + "memory(GiB)": 77.0, + "step": 5216, + "token_acc": 0.925191527715187, + "train_speed(iter/s)": 0.607412 + }, + { + "epoch": 1.66944, + "grad_norm": 0.7383786853310129, + "learning_rate": 2.2403160477587842e-06, + "loss": 0.3523688316345215, + "memory(GiB)": 77.0, + "step": 5217, + "token_acc": 0.9109937004673847, + "train_speed(iter/s)": 0.607244 + }, + { + "epoch": 1.6697600000000001, + "grad_norm": 0.6975835833958521, + "learning_rate": 2.2394389591069808e-06, + "loss": 0.27295660972595215, + "memory(GiB)": 77.0, + "step": 5218, + "token_acc": 0.8997699638514624, + "train_speed(iter/s)": 0.607079 + }, + { + "epoch": 1.67008, + "grad_norm": 0.6462414527771732, + "learning_rate": 2.2385619028774784e-06, + "loss": 0.3507212698459625, + "memory(GiB)": 77.0, + "step": 5219, + "token_acc": 0.9443892750744787, + "train_speed(iter/s)": 0.606903 + }, + { + "epoch": 1.6703999999999999, + "grad_norm": 0.6892421883646064, + "learning_rate": 2.2376848791794122e-06, + "loss": 0.3351714313030243, + "memory(GiB)": 77.0, + "step": 5220, + "token_acc": 0.9251606978879706, + "train_speed(iter/s)": 0.606714 + }, + { + "epoch": 1.67072, + "grad_norm": 0.6503536565963417, + "learning_rate": 2.2368078881219123e-06, + "loss": 0.3393906354904175, + "memory(GiB)": 77.0, + "step": 5221, + "token_acc": 0.8084469571000997, + "train_speed(iter/s)": 0.606527 + }, + { + "epoch": 1.67104, + "grad_norm": 0.6138519906566652, + "learning_rate": 2.235930929814105e-06, + "loss": 0.37738627195358276, + "memory(GiB)": 77.0, + "step": 5222, + "token_acc": 0.888952985289876, + "train_speed(iter/s)": 0.606354 + }, + { + "epoch": 1.67136, + "grad_norm": 0.6652199433550653, + "learning_rate": 2.2350540043651127e-06, + "loss": 0.3975745737552643, + "memory(GiB)": 77.0, + "step": 5223, + "token_acc": 0.8315487072342648, + "train_speed(iter/s)": 0.606178 + }, + { + "epoch": 1.67168, + "grad_norm": 0.6504840005705083, + "learning_rate": 2.234177111884053e-06, + "loss": 0.3965885639190674, + "memory(GiB)": 77.0, + "step": 5224, + "token_acc": 0.896551724137931, + "train_speed(iter/s)": 0.605996 + }, + { + "epoch": 1.6720000000000002, + "grad_norm": 0.647277018239714, + "learning_rate": 2.2333002524800406e-06, + "loss": 0.31700342893600464, + "memory(GiB)": 77.0, + "step": 5225, + "token_acc": 0.8893871449925261, + "train_speed(iter/s)": 0.605806 + }, + { + "epoch": 1.67232, + "grad_norm": 0.687702990791381, + "learning_rate": 2.232423426262185e-06, + "loss": 0.3128927946090698, + "memory(GiB)": 77.0, + "step": 5226, + "token_acc": 0.9182608695652174, + "train_speed(iter/s)": 0.605625 + }, + { + "epoch": 1.67264, + "grad_norm": 0.6546867668315564, + "learning_rate": 2.2315466333395927e-06, + "loss": 0.340276837348938, + "memory(GiB)": 77.0, + "step": 5227, + "token_acc": 0.8757062146892656, + "train_speed(iter/s)": 0.605446 + }, + { + "epoch": 1.67296, + "grad_norm": 0.606427696905237, + "learning_rate": 2.230669873821364e-06, + "loss": 0.29823654890060425, + "memory(GiB)": 77.0, + "step": 5228, + "token_acc": 0.9219600725952813, + "train_speed(iter/s)": 0.605281 + }, + { + "epoch": 1.67328, + "grad_norm": 0.6322983409831574, + "learning_rate": 2.2297931478165983e-06, + "loss": 0.3877940773963928, + "memory(GiB)": 77.0, + "step": 5229, + "token_acc": 0.8672884961544756, + "train_speed(iter/s)": 0.605109 + }, + { + "epoch": 1.6736, + "grad_norm": 0.7336361959939196, + "learning_rate": 2.2289164554343878e-06, + "loss": 0.3639827072620392, + "memory(GiB)": 77.0, + "step": 5230, + "token_acc": 0.9001122334455668, + "train_speed(iter/s)": 0.604946 + }, + { + "epoch": 1.6739199999999999, + "grad_norm": 0.6596277163108618, + "learning_rate": 2.2280397967838227e-06, + "loss": 0.3611827790737152, + "memory(GiB)": 77.0, + "step": 5231, + "token_acc": 0.9127607694391764, + "train_speed(iter/s)": 0.604783 + }, + { + "epoch": 1.67424, + "grad_norm": 0.7010253494122453, + "learning_rate": 2.227163171973986e-06, + "loss": 0.3492082357406616, + "memory(GiB)": 77.0, + "step": 5232, + "token_acc": 0.8763342879458474, + "train_speed(iter/s)": 0.604621 + }, + { + "epoch": 1.67456, + "grad_norm": 0.6953739870691645, + "learning_rate": 2.22628658111396e-06, + "loss": 0.28428375720977783, + "memory(GiB)": 77.0, + "step": 5233, + "token_acc": 0.8828828828828829, + "train_speed(iter/s)": 0.604453 + }, + { + "epoch": 1.67488, + "grad_norm": 0.5654180123687625, + "learning_rate": 2.2254100243128197e-06, + "loss": 0.3343057334423065, + "memory(GiB)": 77.0, + "step": 5234, + "token_acc": 0.8815110276865321, + "train_speed(iter/s)": 0.604203 + }, + { + "epoch": 1.6752, + "grad_norm": 0.6332297023796664, + "learning_rate": 2.2245335016796392e-06, + "loss": 0.3029119372367859, + "memory(GiB)": 77.0, + "step": 5235, + "token_acc": 0.9018105849582173, + "train_speed(iter/s)": 0.604026 + }, + { + "epoch": 1.6755200000000001, + "grad_norm": 0.7101382893071916, + "learning_rate": 2.223657013323486e-06, + "loss": 0.4365425109863281, + "memory(GiB)": 77.0, + "step": 5236, + "token_acc": 0.9467857142857142, + "train_speed(iter/s)": 0.603865 + }, + { + "epoch": 1.67584, + "grad_norm": 0.651861672036374, + "learning_rate": 2.222780559353423e-06, + "loss": 0.2658543586730957, + "memory(GiB)": 77.0, + "step": 5237, + "token_acc": 0.8574561403508771, + "train_speed(iter/s)": 0.603697 + }, + { + "epoch": 1.6761599999999999, + "grad_norm": 0.6384156359504224, + "learning_rate": 2.2219041398785105e-06, + "loss": 0.2801669239997864, + "memory(GiB)": 77.0, + "step": 5238, + "token_acc": 0.8975609756097561, + "train_speed(iter/s)": 0.603538 + }, + { + "epoch": 1.67648, + "grad_norm": 0.6567382809564999, + "learning_rate": 2.221027755007804e-06, + "loss": 0.3383275270462036, + "memory(GiB)": 77.0, + "step": 5239, + "token_acc": 0.9345878136200717, + "train_speed(iter/s)": 0.603379 + }, + { + "epoch": 1.6768, + "grad_norm": 0.6696360206784587, + "learning_rate": 2.220151404850354e-06, + "loss": 0.30115821957588196, + "memory(GiB)": 77.0, + "step": 5240, + "token_acc": 0.9659741041854863, + "train_speed(iter/s)": 0.603212 + }, + { + "epoch": 1.67712, + "grad_norm": 0.6581604078757555, + "learning_rate": 2.2192750895152068e-06, + "loss": 0.2942548394203186, + "memory(GiB)": 77.0, + "step": 5241, + "token_acc": 0.9539301828544289, + "train_speed(iter/s)": 0.603051 + }, + { + "epoch": 1.67744, + "grad_norm": 0.6459398634748426, + "learning_rate": 2.2183988091114048e-06, + "loss": 0.3407158851623535, + "memory(GiB)": 77.0, + "step": 5242, + "token_acc": 0.7889760147601476, + "train_speed(iter/s)": 0.602882 + }, + { + "epoch": 1.6777600000000001, + "grad_norm": 0.5818742847015063, + "learning_rate": 2.2175225637479857e-06, + "loss": 0.3289037048816681, + "memory(GiB)": 77.0, + "step": 5243, + "token_acc": 0.8967600219659527, + "train_speed(iter/s)": 0.602697 + }, + { + "epoch": 1.67808, + "grad_norm": 0.605893961749133, + "learning_rate": 2.216646353533984e-06, + "loss": 0.3080053925514221, + "memory(GiB)": 77.0, + "step": 5244, + "token_acc": 0.9502173054128803, + "train_speed(iter/s)": 0.602527 + }, + { + "epoch": 1.6784, + "grad_norm": 0.6908129286649046, + "learning_rate": 2.215770178578427e-06, + "loss": 0.3032040596008301, + "memory(GiB)": 77.0, + "step": 5245, + "token_acc": 0.9191374663072777, + "train_speed(iter/s)": 0.602372 + }, + { + "epoch": 1.67872, + "grad_norm": 0.7229182999478942, + "learning_rate": 2.2148940389903424e-06, + "loss": 0.28537291288375854, + "memory(GiB)": 77.0, + "step": 5246, + "token_acc": 0.8840236686390532, + "train_speed(iter/s)": 0.602201 + }, + { + "epoch": 1.67904, + "grad_norm": 0.6918814956684537, + "learning_rate": 2.214017934878748e-06, + "loss": 0.3581412136554718, + "memory(GiB)": 77.0, + "step": 5247, + "token_acc": 0.8644366197183099, + "train_speed(iter/s)": 0.602038 + }, + { + "epoch": 1.67936, + "grad_norm": 0.7213353622718336, + "learning_rate": 2.21314186635266e-06, + "loss": 0.3460131287574768, + "memory(GiB)": 77.0, + "step": 5248, + "token_acc": 0.9173094723850663, + "train_speed(iter/s)": 0.601874 + }, + { + "epoch": 1.6796799999999998, + "grad_norm": 0.6650534408810579, + "learning_rate": 2.212265833521091e-06, + "loss": 0.30011940002441406, + "memory(GiB)": 77.0, + "step": 5249, + "token_acc": 0.9261226460647031, + "train_speed(iter/s)": 0.601718 + }, + { + "epoch": 1.6800000000000002, + "grad_norm": 0.6929811057731599, + "learning_rate": 2.2113898364930474e-06, + "loss": 0.2941359877586365, + "memory(GiB)": 77.0, + "step": 5250, + "token_acc": 0.9123770965876229, + "train_speed(iter/s)": 0.601558 + }, + { + "epoch": 1.68032, + "grad_norm": 0.6204793222316902, + "learning_rate": 2.2105138753775314e-06, + "loss": 0.39010852575302124, + "memory(GiB)": 77.0, + "step": 5251, + "token_acc": 0.8906840838776212, + "train_speed(iter/s)": 0.601371 + }, + { + "epoch": 1.68064, + "grad_norm": 0.6624149862479026, + "learning_rate": 2.2096379502835424e-06, + "loss": 0.293301522731781, + "memory(GiB)": 77.0, + "step": 5252, + "token_acc": 0.9427727028479312, + "train_speed(iter/s)": 0.601207 + }, + { + "epoch": 1.68096, + "grad_norm": 0.6525756385224253, + "learning_rate": 2.2087620613200733e-06, + "loss": 0.3086351156234741, + "memory(GiB)": 77.0, + "step": 5253, + "token_acc": 0.891519250780437, + "train_speed(iter/s)": 0.601047 + }, + { + "epoch": 1.68128, + "grad_norm": 0.5900448428350638, + "learning_rate": 2.2078862085961134e-06, + "loss": 0.31120187044143677, + "memory(GiB)": 77.0, + "step": 5254, + "token_acc": 0.8444444444444444, + "train_speed(iter/s)": 0.600842 + }, + { + "epoch": 1.6816, + "grad_norm": 0.6001122619200471, + "learning_rate": 2.207010392220648e-06, + "loss": 0.26137596368789673, + "memory(GiB)": 77.0, + "step": 5255, + "token_acc": 0.872374474894979, + "train_speed(iter/s)": 0.600667 + }, + { + "epoch": 1.6819199999999999, + "grad_norm": 0.7217940404868745, + "learning_rate": 2.206134612302656e-06, + "loss": 0.37045103311538696, + "memory(GiB)": 77.0, + "step": 5256, + "token_acc": 0.8873131467995401, + "train_speed(iter/s)": 0.600512 + }, + { + "epoch": 1.68224, + "grad_norm": 0.6336735512886932, + "learning_rate": 2.205258868951114e-06, + "loss": 0.3542119264602661, + "memory(GiB)": 77.0, + "step": 5257, + "token_acc": 0.9233906204485872, + "train_speed(iter/s)": 0.600341 + }, + { + "epoch": 1.68256, + "grad_norm": 0.6497420778091527, + "learning_rate": 2.2043831622749928e-06, + "loss": 0.34985125064849854, + "memory(GiB)": 77.0, + "step": 5258, + "token_acc": 0.8717404487568223, + "train_speed(iter/s)": 0.60015 + }, + { + "epoch": 1.68288, + "grad_norm": 0.6915085092922567, + "learning_rate": 2.203507492383259e-06, + "loss": 0.3835006356239319, + "memory(GiB)": 77.0, + "step": 5259, + "token_acc": 0.8545454545454545, + "train_speed(iter/s)": 0.599967 + }, + { + "epoch": 1.6832, + "grad_norm": 0.7022794342363723, + "learning_rate": 2.2026318593848743e-06, + "loss": 0.34435755014419556, + "memory(GiB)": 77.0, + "step": 5260, + "token_acc": 0.9230554781154168, + "train_speed(iter/s)": 0.59981 + }, + { + "epoch": 1.6835200000000001, + "grad_norm": 0.6445359970945835, + "learning_rate": 2.2017562633887963e-06, + "loss": 0.35726428031921387, + "memory(GiB)": 77.0, + "step": 5261, + "token_acc": 0.927843137254902, + "train_speed(iter/s)": 0.599655 + }, + { + "epoch": 1.68384, + "grad_norm": 0.6761975141197464, + "learning_rate": 2.2008807045039773e-06, + "loss": 0.3336511254310608, + "memory(GiB)": 77.0, + "step": 5262, + "token_acc": 0.8901418697708258, + "train_speed(iter/s)": 0.599497 + }, + { + "epoch": 1.6841599999999999, + "grad_norm": 0.6053976561207519, + "learning_rate": 2.2000051828393657e-06, + "loss": 0.34530073404312134, + "memory(GiB)": 77.0, + "step": 5263, + "token_acc": 0.9261394838001098, + "train_speed(iter/s)": 0.59932 + }, + { + "epoch": 1.68448, + "grad_norm": 0.6471456464989475, + "learning_rate": 2.1991296985039048e-06, + "loss": 0.2879466116428375, + "memory(GiB)": 77.0, + "step": 5264, + "token_acc": 0.9353637901861253, + "train_speed(iter/s)": 0.599154 + }, + { + "epoch": 1.6848, + "grad_norm": 0.7390774461918803, + "learning_rate": 2.198254251606534e-06, + "loss": 0.3583691120147705, + "memory(GiB)": 77.0, + "step": 5265, + "token_acc": 0.9033189033189033, + "train_speed(iter/s)": 0.59899 + }, + { + "epoch": 1.68512, + "grad_norm": 0.6609155331933161, + "learning_rate": 2.1973788422561864e-06, + "loss": 0.310380220413208, + "memory(GiB)": 77.0, + "step": 5266, + "token_acc": 0.8785522788203753, + "train_speed(iter/s)": 0.59883 + }, + { + "epoch": 1.68544, + "grad_norm": 0.6243830980143019, + "learning_rate": 2.1965034705617926e-06, + "loss": 0.3599774241447449, + "memory(GiB)": 77.0, + "step": 5267, + "token_acc": 0.8246699669966997, + "train_speed(iter/s)": 0.598659 + }, + { + "epoch": 1.6857600000000001, + "grad_norm": 0.642933123861644, + "learning_rate": 2.195628136632277e-06, + "loss": 0.38802027702331543, + "memory(GiB)": 77.0, + "step": 5268, + "token_acc": 0.9297205757832345, + "train_speed(iter/s)": 0.598487 + }, + { + "epoch": 1.68608, + "grad_norm": 0.633765465187104, + "learning_rate": 2.1947528405765594e-06, + "loss": 0.39040642976760864, + "memory(GiB)": 77.0, + "step": 5269, + "token_acc": 0.842789820923657, + "train_speed(iter/s)": 0.598315 + }, + { + "epoch": 1.6864, + "grad_norm": 0.6553914063826315, + "learning_rate": 2.193877582503556e-06, + "loss": 0.34471142292022705, + "memory(GiB)": 77.0, + "step": 5270, + "token_acc": 0.9010547463586137, + "train_speed(iter/s)": 0.598138 + }, + { + "epoch": 1.68672, + "grad_norm": 0.6689212857378656, + "learning_rate": 2.193002362522177e-06, + "loss": 0.35834619402885437, + "memory(GiB)": 77.0, + "step": 5271, + "token_acc": 0.8410868670234665, + "train_speed(iter/s)": 0.597971 + }, + { + "epoch": 1.68704, + "grad_norm": 0.6028543760512591, + "learning_rate": 2.192127180741328e-06, + "loss": 0.33765894174575806, + "memory(GiB)": 77.0, + "step": 5272, + "token_acc": 0.8557481083719795, + "train_speed(iter/s)": 0.597802 + }, + { + "epoch": 1.68736, + "grad_norm": 0.6263500414496418, + "learning_rate": 2.191252037269911e-06, + "loss": 0.2820510268211365, + "memory(GiB)": 77.0, + "step": 5273, + "token_acc": 0.907659460902055, + "train_speed(iter/s)": 0.597642 + }, + { + "epoch": 1.6876799999999998, + "grad_norm": 0.6549939910558277, + "learning_rate": 2.1903769322168226e-06, + "loss": 0.36196404695510864, + "memory(GiB)": 77.0, + "step": 5274, + "token_acc": 0.939441978440076, + "train_speed(iter/s)": 0.597466 + }, + { + "epoch": 1.688, + "grad_norm": 0.6174135445216434, + "learning_rate": 2.189501865690953e-06, + "loss": 0.2887686491012573, + "memory(GiB)": 77.0, + "step": 5275, + "token_acc": 0.9605396290050591, + "train_speed(iter/s)": 0.597305 + }, + { + "epoch": 1.68832, + "grad_norm": 0.6911317412626582, + "learning_rate": 2.1886268378011898e-06, + "loss": 0.388485312461853, + "memory(GiB)": 77.0, + "step": 5276, + "token_acc": 0.9340277777777778, + "train_speed(iter/s)": 0.597147 + }, + { + "epoch": 1.68864, + "grad_norm": 0.7052150049975136, + "learning_rate": 2.1877518486564152e-06, + "loss": 0.40765833854675293, + "memory(GiB)": 77.0, + "step": 5277, + "token_acc": 0.888560885608856, + "train_speed(iter/s)": 0.596977 + }, + { + "epoch": 1.68896, + "grad_norm": 0.67112771399698, + "learning_rate": 2.1868768983655066e-06, + "loss": 0.29495832324028015, + "memory(GiB)": 77.0, + "step": 5278, + "token_acc": 0.9552556818181818, + "train_speed(iter/s)": 0.596824 + }, + { + "epoch": 1.6892800000000001, + "grad_norm": 0.7119881246057963, + "learning_rate": 2.186001987037336e-06, + "loss": 0.33782464265823364, + "memory(GiB)": 77.0, + "step": 5279, + "token_acc": 0.9242691088411412, + "train_speed(iter/s)": 0.596659 + }, + { + "epoch": 1.6896, + "grad_norm": 0.6607012200940723, + "learning_rate": 2.1851271147807708e-06, + "loss": 0.36501896381378174, + "memory(GiB)": 77.0, + "step": 5280, + "token_acc": 0.9357126515671471, + "train_speed(iter/s)": 0.596496 + }, + { + "epoch": 1.6899199999999999, + "grad_norm": 0.6248440820447917, + "learning_rate": 2.1842522817046742e-06, + "loss": 0.3484588861465454, + "memory(GiB)": 77.0, + "step": 5281, + "token_acc": 0.8616136919315404, + "train_speed(iter/s)": 0.596324 + }, + { + "epoch": 1.69024, + "grad_norm": 0.6235896236055399, + "learning_rate": 2.1833774879179037e-06, + "loss": 0.27175867557525635, + "memory(GiB)": 77.0, + "step": 5282, + "token_acc": 0.8930568499534017, + "train_speed(iter/s)": 0.59617 + }, + { + "epoch": 1.69056, + "grad_norm": 0.6234308726348886, + "learning_rate": 2.182502733529312e-06, + "loss": 0.3190113306045532, + "memory(GiB)": 77.0, + "step": 5283, + "token_acc": 0.9315628192032687, + "train_speed(iter/s)": 0.596006 + }, + { + "epoch": 1.69088, + "grad_norm": 0.7110308104508904, + "learning_rate": 2.181628018647748e-06, + "loss": 0.42237725853919983, + "memory(GiB)": 77.0, + "step": 5284, + "token_acc": 0.822983984265243, + "train_speed(iter/s)": 0.59584 + }, + { + "epoch": 1.6912, + "grad_norm": 0.6545408513077189, + "learning_rate": 2.180753343382053e-06, + "loss": 0.3207717537879944, + "memory(GiB)": 77.0, + "step": 5285, + "token_acc": 0.9466495945369184, + "train_speed(iter/s)": 0.59569 + }, + { + "epoch": 1.6915200000000001, + "grad_norm": 0.702288972527904, + "learning_rate": 2.179878707841067e-06, + "loss": 0.3690841794013977, + "memory(GiB)": 77.0, + "step": 5286, + "token_acc": 0.874595759971254, + "train_speed(iter/s)": 0.595531 + }, + { + "epoch": 1.69184, + "grad_norm": 0.6405373416359887, + "learning_rate": 2.1790041121336223e-06, + "loss": 0.3248850107192993, + "memory(GiB)": 77.0, + "step": 5287, + "token_acc": 0.8859840232389252, + "train_speed(iter/s)": 0.595378 + }, + { + "epoch": 1.6921599999999999, + "grad_norm": 0.5986813430693276, + "learning_rate": 2.178129556368548e-06, + "loss": 0.330269992351532, + "memory(GiB)": 77.0, + "step": 5288, + "token_acc": 0.8859791425260718, + "train_speed(iter/s)": 0.595219 + }, + { + "epoch": 1.69248, + "grad_norm": 0.5512771028193145, + "learning_rate": 2.1772550406546674e-06, + "loss": 0.29385530948638916, + "memory(GiB)": 77.0, + "step": 5289, + "token_acc": 0.9428353658536586, + "train_speed(iter/s)": 0.595047 + }, + { + "epoch": 1.6928, + "grad_norm": 0.5765163368648673, + "learning_rate": 2.1763805651007975e-06, + "loss": 0.3230323791503906, + "memory(GiB)": 77.0, + "step": 5290, + "token_acc": 0.9338235294117647, + "train_speed(iter/s)": 0.594871 + }, + { + "epoch": 1.69312, + "grad_norm": 0.679190814547808, + "learning_rate": 2.1755061298157515e-06, + "loss": 0.3365877866744995, + "memory(GiB)": 77.0, + "step": 5291, + "token_acc": 0.876458112407211, + "train_speed(iter/s)": 0.594709 + }, + { + "epoch": 1.6934399999999998, + "grad_norm": 0.6596465122676547, + "learning_rate": 2.174631734908339e-06, + "loss": 0.28610098361968994, + "memory(GiB)": 77.0, + "step": 5292, + "token_acc": 0.9030868628858578, + "train_speed(iter/s)": 0.594544 + }, + { + "epoch": 1.6937600000000002, + "grad_norm": 0.6861803114407664, + "learning_rate": 2.173757380487363e-06, + "loss": 0.43942922353744507, + "memory(GiB)": 77.0, + "step": 5293, + "token_acc": 0.9209464909922022, + "train_speed(iter/s)": 0.594383 + }, + { + "epoch": 1.69408, + "grad_norm": 0.7107778281253725, + "learning_rate": 2.1728830666616214e-06, + "loss": 0.3241244852542877, + "memory(GiB)": 77.0, + "step": 5294, + "token_acc": 0.8864520693314467, + "train_speed(iter/s)": 0.594228 + }, + { + "epoch": 1.6944, + "grad_norm": 0.7019088613142108, + "learning_rate": 2.1720087935399074e-06, + "loss": 0.3723068833351135, + "memory(GiB)": 77.0, + "step": 5295, + "token_acc": 0.9102282704126426, + "train_speed(iter/s)": 0.594072 + }, + { + "epoch": 1.69472, + "grad_norm": 0.6068023204689548, + "learning_rate": 2.1711345612310094e-06, + "loss": 0.2720417082309723, + "memory(GiB)": 77.0, + "step": 5296, + "token_acc": 0.8801208865010074, + "train_speed(iter/s)": 0.593913 + }, + { + "epoch": 1.69504, + "grad_norm": 0.6871386042240067, + "learning_rate": 2.1702603698437104e-06, + "loss": 0.39907991886138916, + "memory(GiB)": 77.0, + "step": 5297, + "token_acc": 0.846646571213263, + "train_speed(iter/s)": 0.593758 + }, + { + "epoch": 1.69536, + "grad_norm": 0.6404349624167857, + "learning_rate": 2.169386219486788e-06, + "loss": 0.2923765778541565, + "memory(GiB)": 77.0, + "step": 5298, + "token_acc": 0.9499822632139057, + "train_speed(iter/s)": 0.593597 + }, + { + "epoch": 1.6956799999999999, + "grad_norm": 0.6524120423371673, + "learning_rate": 2.1685121102690153e-06, + "loss": 0.340131938457489, + "memory(GiB)": 77.0, + "step": 5299, + "token_acc": 0.9100071994240461, + "train_speed(iter/s)": 0.593441 + }, + { + "epoch": 1.696, + "grad_norm": 0.7021345872253872, + "learning_rate": 2.16763804229916e-06, + "loss": 0.3208022117614746, + "memory(GiB)": 77.0, + "step": 5300, + "token_acc": 0.8929509632224168, + "train_speed(iter/s)": 0.593282 + }, + { + "epoch": 1.69632, + "grad_norm": 0.6726794494874935, + "learning_rate": 2.166764015685985e-06, + "loss": 0.3515181243419647, + "memory(GiB)": 77.0, + "step": 5301, + "token_acc": 0.8952205882352942, + "train_speed(iter/s)": 0.593123 + }, + { + "epoch": 1.69664, + "grad_norm": 0.6039817302797122, + "learning_rate": 2.165890030538247e-06, + "loss": 0.2520756721496582, + "memory(GiB)": 77.0, + "step": 5302, + "token_acc": 0.9367369589345172, + "train_speed(iter/s)": 0.592964 + }, + { + "epoch": 1.69696, + "grad_norm": 0.6483878171123935, + "learning_rate": 2.1650160869647004e-06, + "loss": 0.3644360303878784, + "memory(GiB)": 77.0, + "step": 5303, + "token_acc": 0.9201398225329389, + "train_speed(iter/s)": 0.592789 + }, + { + "epoch": 1.6972800000000001, + "grad_norm": 0.6985824873866714, + "learning_rate": 2.164142185074089e-06, + "loss": 0.28846004605293274, + "memory(GiB)": 77.0, + "step": 5304, + "token_acc": 0.9137792103142627, + "train_speed(iter/s)": 0.592641 + }, + { + "epoch": 1.6976, + "grad_norm": 0.6602617000386484, + "learning_rate": 2.163268324975157e-06, + "loss": 0.3963332772254944, + "memory(GiB)": 77.0, + "step": 5305, + "token_acc": 0.9130516431924882, + "train_speed(iter/s)": 0.592484 + }, + { + "epoch": 1.6979199999999999, + "grad_norm": 0.6428223934267095, + "learning_rate": 2.1623945067766403e-06, + "loss": 0.2825208604335785, + "memory(GiB)": 77.0, + "step": 5306, + "token_acc": 0.8867579908675799, + "train_speed(iter/s)": 0.592329 + }, + { + "epoch": 1.69824, + "grad_norm": 0.6461883412674916, + "learning_rate": 2.1615207305872704e-06, + "loss": 0.3701015114784241, + "memory(GiB)": 77.0, + "step": 5307, + "token_acc": 0.8389610389610389, + "train_speed(iter/s)": 0.592162 + }, + { + "epoch": 1.69856, + "grad_norm": 0.6538455806483322, + "learning_rate": 2.1606469965157735e-06, + "loss": 0.26142582297325134, + "memory(GiB)": 77.0, + "step": 5308, + "token_acc": 0.9182461103253182, + "train_speed(iter/s)": 0.592008 + }, + { + "epoch": 1.69888, + "grad_norm": 0.7069541404730615, + "learning_rate": 2.159773304670872e-06, + "loss": 0.3703899383544922, + "memory(GiB)": 77.0, + "step": 5309, + "token_acc": 0.8958199356913183, + "train_speed(iter/s)": 0.591857 + }, + { + "epoch": 1.6992, + "grad_norm": 0.6826241940753053, + "learning_rate": 2.15889965516128e-06, + "loss": 0.3505704402923584, + "memory(GiB)": 77.0, + "step": 5310, + "token_acc": 0.8726307808946171, + "train_speed(iter/s)": 0.591706 + }, + { + "epoch": 1.6995200000000001, + "grad_norm": 0.6525463911435647, + "learning_rate": 2.158026048095709e-06, + "loss": 0.2956995666027069, + "memory(GiB)": 77.0, + "step": 5311, + "token_acc": 0.9267515923566879, + "train_speed(iter/s)": 0.591547 + }, + { + "epoch": 1.69984, + "grad_norm": 0.6284501765854578, + "learning_rate": 2.157152483582864e-06, + "loss": 0.3094443082809448, + "memory(GiB)": 77.0, + "step": 5312, + "token_acc": 0.8198310916484204, + "train_speed(iter/s)": 0.591401 + }, + { + "epoch": 1.70016, + "grad_norm": 0.6444562791481097, + "learning_rate": 2.156278961731445e-06, + "loss": 0.3137660026550293, + "memory(GiB)": 77.0, + "step": 5313, + "token_acc": 0.9070717839374556, + "train_speed(iter/s)": 0.591242 + }, + { + "epoch": 1.70048, + "grad_norm": 0.6982496294137615, + "learning_rate": 2.1554054826501464e-06, + "loss": 0.30787721276283264, + "memory(GiB)": 77.0, + "step": 5314, + "token_acc": 0.9135414327419717, + "train_speed(iter/s)": 0.591088 + }, + { + "epoch": 1.7008, + "grad_norm": 0.6701675544445648, + "learning_rate": 2.154532046447657e-06, + "loss": 0.3013525605201721, + "memory(GiB)": 77.0, + "step": 5315, + "token_acc": 0.9467321496033245, + "train_speed(iter/s)": 0.590925 + }, + { + "epoch": 1.70112, + "grad_norm": 0.6772880928423635, + "learning_rate": 2.1536586532326627e-06, + "loss": 0.3565615713596344, + "memory(GiB)": 77.0, + "step": 5316, + "token_acc": 0.9105478941483414, + "train_speed(iter/s)": 0.590769 + }, + { + "epoch": 1.7014399999999998, + "grad_norm": 0.6307534940791164, + "learning_rate": 2.15278530311384e-06, + "loss": 0.28070759773254395, + "memory(GiB)": 77.0, + "step": 5317, + "token_acc": 0.9311639549436797, + "train_speed(iter/s)": 0.590607 + }, + { + "epoch": 1.7017600000000002, + "grad_norm": 0.6987350036077933, + "learning_rate": 2.151911996199864e-06, + "loss": 0.3876323997974396, + "memory(GiB)": 77.0, + "step": 5318, + "token_acc": 0.8676379417234966, + "train_speed(iter/s)": 0.59044 + }, + { + "epoch": 1.70208, + "grad_norm": 0.7070634397728753, + "learning_rate": 2.151038732599401e-06, + "loss": 0.3639335632324219, + "memory(GiB)": 77.0, + "step": 5319, + "token_acc": 0.8678395194164342, + "train_speed(iter/s)": 0.59028 + }, + { + "epoch": 1.7024, + "grad_norm": 0.6956159396583004, + "learning_rate": 2.150165512421114e-06, + "loss": 0.3948298692703247, + "memory(GiB)": 77.0, + "step": 5320, + "token_acc": 0.916604431167538, + "train_speed(iter/s)": 0.590112 + }, + { + "epoch": 1.70272, + "grad_norm": 0.7173233226758767, + "learning_rate": 2.1492923357736606e-06, + "loss": 0.38616734743118286, + "memory(GiB)": 77.0, + "step": 5321, + "token_acc": 0.8800413650465356, + "train_speed(iter/s)": 0.589957 + }, + { + "epoch": 1.70304, + "grad_norm": 0.7352727398731351, + "learning_rate": 2.148419202765692e-06, + "loss": 0.38064032793045044, + "memory(GiB)": 77.0, + "step": 5322, + "token_acc": 0.8865069356872636, + "train_speed(iter/s)": 0.589811 + }, + { + "epoch": 1.70336, + "grad_norm": 0.6734909195904353, + "learning_rate": 2.1475461135058545e-06, + "loss": 0.34335875511169434, + "memory(GiB)": 77.0, + "step": 5323, + "token_acc": 0.9463806970509383, + "train_speed(iter/s)": 0.58966 + }, + { + "epoch": 1.7036799999999999, + "grad_norm": 0.6546259252354367, + "learning_rate": 2.146673068102789e-06, + "loss": 0.33921343088150024, + "memory(GiB)": 77.0, + "step": 5324, + "token_acc": 0.9119524543253357, + "train_speed(iter/s)": 0.589501 + }, + { + "epoch": 1.704, + "grad_norm": 0.682003147566318, + "learning_rate": 2.1458000666651304e-06, + "loss": 0.3527563214302063, + "memory(GiB)": 77.0, + "step": 5325, + "token_acc": 0.8888888888888888, + "train_speed(iter/s)": 0.589352 + }, + { + "epoch": 1.70432, + "grad_norm": 0.6096462689711076, + "learning_rate": 2.1449271093015094e-06, + "loss": 0.270572304725647, + "memory(GiB)": 77.0, + "step": 5326, + "token_acc": 0.9090196078431373, + "train_speed(iter/s)": 0.589193 + }, + { + "epoch": 1.70464, + "grad_norm": 0.6592352850505535, + "learning_rate": 2.1440541961205504e-06, + "loss": 0.3808806538581848, + "memory(GiB)": 77.0, + "step": 5327, + "token_acc": 0.854726668680157, + "train_speed(iter/s)": 0.589046 + }, + { + "epoch": 1.70496, + "grad_norm": 0.6465096285076603, + "learning_rate": 2.1431813272308717e-06, + "loss": 0.3042720556259155, + "memory(GiB)": 77.0, + "step": 5328, + "token_acc": 0.9258836944127709, + "train_speed(iter/s)": 0.588897 + }, + { + "epoch": 1.7052800000000001, + "grad_norm": 0.7125142571207832, + "learning_rate": 2.142308502741087e-06, + "loss": 0.35316529870033264, + "memory(GiB)": 77.0, + "step": 5329, + "token_acc": 0.913718723037101, + "train_speed(iter/s)": 0.588751 + }, + { + "epoch": 1.7056, + "grad_norm": 0.6980876159808694, + "learning_rate": 2.141435722759804e-06, + "loss": 0.33628761768341064, + "memory(GiB)": 77.0, + "step": 5330, + "token_acc": 0.8764721277152577, + "train_speed(iter/s)": 0.588604 + }, + { + "epoch": 1.7059199999999999, + "grad_norm": 0.6059520374463914, + "learning_rate": 2.1405629873956254e-06, + "loss": 0.33039626479148865, + "memory(GiB)": 77.0, + "step": 5331, + "token_acc": 0.8676262387918829, + "train_speed(iter/s)": 0.588438 + }, + { + "epoch": 1.70624, + "grad_norm": 0.7069429763620299, + "learning_rate": 2.1396902967571483e-06, + "loss": 0.3102489113807678, + "memory(GiB)": 77.0, + "step": 5332, + "token_acc": 0.8652833694642305, + "train_speed(iter/s)": 0.588286 + }, + { + "epoch": 1.70656, + "grad_norm": 0.6222497601799261, + "learning_rate": 2.1388176509529625e-06, + "loss": 0.33671531081199646, + "memory(GiB)": 77.0, + "step": 5333, + "token_acc": 0.9353500853866796, + "train_speed(iter/s)": 0.588135 + }, + { + "epoch": 1.70688, + "grad_norm": 0.6630233820078344, + "learning_rate": 2.1379450500916545e-06, + "loss": 0.3158968687057495, + "memory(GiB)": 77.0, + "step": 5334, + "token_acc": 0.8236052886165753, + "train_speed(iter/s)": 0.587966 + }, + { + "epoch": 1.7072, + "grad_norm": 0.6590217969789504, + "learning_rate": 2.1370724942818043e-06, + "loss": 0.37230876088142395, + "memory(GiB)": 77.0, + "step": 5335, + "token_acc": 0.916083916083916, + "train_speed(iter/s)": 0.587815 + }, + { + "epoch": 1.7075200000000001, + "grad_norm": 0.6028814734634426, + "learning_rate": 2.1361999836319866e-06, + "loss": 0.2638697028160095, + "memory(GiB)": 77.0, + "step": 5336, + "token_acc": 0.8629500580720093, + "train_speed(iter/s)": 0.587665 + }, + { + "epoch": 1.70784, + "grad_norm": 0.7678812202294982, + "learning_rate": 2.13532751825077e-06, + "loss": 0.42463839054107666, + "memory(GiB)": 77.0, + "step": 5337, + "token_acc": 0.8926065839179709, + "train_speed(iter/s)": 0.587499 + }, + { + "epoch": 1.70816, + "grad_norm": 0.663512349950165, + "learning_rate": 2.134455098246717e-06, + "loss": 0.3501768410205841, + "memory(GiB)": 77.0, + "step": 5338, + "token_acc": 0.9397435897435897, + "train_speed(iter/s)": 0.587346 + }, + { + "epoch": 1.70848, + "grad_norm": 0.6630766992307743, + "learning_rate": 2.1335827237283864e-06, + "loss": 0.2974296808242798, + "memory(GiB)": 77.0, + "step": 5339, + "token_acc": 0.9018824871648602, + "train_speed(iter/s)": 0.587196 + }, + { + "epoch": 1.7088, + "grad_norm": 0.6319523653872124, + "learning_rate": 2.132710394804329e-06, + "loss": 0.3345257639884949, + "memory(GiB)": 77.0, + "step": 5340, + "token_acc": 0.9170421415319374, + "train_speed(iter/s)": 0.587045 + }, + { + "epoch": 1.70912, + "grad_norm": 0.6861982629567595, + "learning_rate": 2.1318381115830915e-06, + "loss": 0.351291298866272, + "memory(GiB)": 77.0, + "step": 5341, + "token_acc": 0.8980440097799511, + "train_speed(iter/s)": 0.5869 + }, + { + "epoch": 1.7094399999999998, + "grad_norm": 0.6706507063109813, + "learning_rate": 2.130965874173214e-06, + "loss": 0.3550332188606262, + "memory(GiB)": 77.0, + "step": 5342, + "token_acc": 0.9295774647887324, + "train_speed(iter/s)": 0.586758 + }, + { + "epoch": 1.70976, + "grad_norm": 0.6189556803943245, + "learning_rate": 2.1300936826832323e-06, + "loss": 0.2881452143192291, + "memory(GiB)": 77.0, + "step": 5343, + "token_acc": 0.8771604938271605, + "train_speed(iter/s)": 0.586597 + }, + { + "epoch": 1.71008, + "grad_norm": 0.6727139808713937, + "learning_rate": 2.129221537221675e-06, + "loss": 0.32038795948028564, + "memory(GiB)": 77.0, + "step": 5344, + "token_acc": 0.8854225751559841, + "train_speed(iter/s)": 0.586439 + }, + { + "epoch": 1.7104, + "grad_norm": 0.6730608225368753, + "learning_rate": 2.1283494378970647e-06, + "loss": 0.2765527069568634, + "memory(GiB)": 77.0, + "step": 5345, + "token_acc": 0.9192022500639223, + "train_speed(iter/s)": 0.586287 + }, + { + "epoch": 1.71072, + "grad_norm": 0.6399914952178292, + "learning_rate": 2.1274773848179214e-06, + "loss": 0.28931817412376404, + "memory(GiB)": 77.0, + "step": 5346, + "token_acc": 0.9097093382807668, + "train_speed(iter/s)": 0.586138 + }, + { + "epoch": 1.7110400000000001, + "grad_norm": 0.6349184901149495, + "learning_rate": 2.126605378092754e-06, + "loss": 0.2829344868659973, + "memory(GiB)": 77.0, + "step": 5347, + "token_acc": 0.8933426280934124, + "train_speed(iter/s)": 0.585989 + }, + { + "epoch": 1.71136, + "grad_norm": 0.7432795752683405, + "learning_rate": 2.1257334178300692e-06, + "loss": 0.34440115094184875, + "memory(GiB)": 77.0, + "step": 5348, + "token_acc": 0.8828534031413613, + "train_speed(iter/s)": 0.585829 + }, + { + "epoch": 1.7116799999999999, + "grad_norm": 0.7727211212838715, + "learning_rate": 2.1248615041383686e-06, + "loss": 0.363828182220459, + "memory(GiB)": 77.0, + "step": 5349, + "token_acc": 0.8874686144715819, + "train_speed(iter/s)": 0.585677 + }, + { + "epoch": 1.712, + "grad_norm": 0.745923808021857, + "learning_rate": 2.1239896371261464e-06, + "loss": 0.41400736570358276, + "memory(GiB)": 77.0, + "step": 5350, + "token_acc": 0.9340915718868474, + "train_speed(iter/s)": 0.585524 + }, + { + "epoch": 1.71232, + "grad_norm": 0.6175983958509915, + "learning_rate": 2.1231178169018905e-06, + "loss": 0.3032408058643341, + "memory(GiB)": 77.0, + "step": 5351, + "token_acc": 0.8894811656005686, + "train_speed(iter/s)": 0.585381 + }, + { + "epoch": 1.71264, + "grad_norm": 0.7090760121693913, + "learning_rate": 2.122246043574085e-06, + "loss": 0.3924520015716553, + "memory(GiB)": 77.0, + "step": 5352, + "token_acc": 0.8946030504497458, + "train_speed(iter/s)": 0.585229 + }, + { + "epoch": 1.71296, + "grad_norm": 0.7220549542548653, + "learning_rate": 2.121374317251206e-06, + "loss": 0.30350542068481445, + "memory(GiB)": 77.0, + "step": 5353, + "token_acc": 0.8774193548387097, + "train_speed(iter/s)": 0.585085 + }, + { + "epoch": 1.7132800000000001, + "grad_norm": 0.6280094585823486, + "learning_rate": 2.1205026380417247e-06, + "loss": 0.31076860427856445, + "memory(GiB)": 77.0, + "step": 5354, + "token_acc": 0.854118194509074, + "train_speed(iter/s)": 0.58492 + }, + { + "epoch": 1.7136, + "grad_norm": 0.7781173124998475, + "learning_rate": 2.1196310060541072e-06, + "loss": 0.34473803639411926, + "memory(GiB)": 77.0, + "step": 5355, + "token_acc": 0.8851880877742947, + "train_speed(iter/s)": 0.58478 + }, + { + "epoch": 1.7139199999999999, + "grad_norm": 0.7615739784388352, + "learning_rate": 2.1187594213968125e-06, + "loss": 0.3164230287075043, + "memory(GiB)": 77.0, + "step": 5356, + "token_acc": 0.9264828738512949, + "train_speed(iter/s)": 0.584626 + }, + { + "epoch": 1.71424, + "grad_norm": 0.6828409698696731, + "learning_rate": 2.1178878841782943e-06, + "loss": 0.3579304814338684, + "memory(GiB)": 77.0, + "step": 5357, + "token_acc": 0.9146666666666666, + "train_speed(iter/s)": 0.584484 + }, + { + "epoch": 1.71456, + "grad_norm": 0.634377794954385, + "learning_rate": 2.117016394507e-06, + "loss": 0.3520524501800537, + "memory(GiB)": 77.0, + "step": 5358, + "token_acc": 0.8532276330690827, + "train_speed(iter/s)": 0.584327 + }, + { + "epoch": 1.71488, + "grad_norm": 0.6450083347049758, + "learning_rate": 2.1161449524913714e-06, + "loss": 0.3183605670928955, + "memory(GiB)": 77.0, + "step": 5359, + "token_acc": 0.8661770705834353, + "train_speed(iter/s)": 0.584174 + }, + { + "epoch": 1.7151999999999998, + "grad_norm": 0.7006914402035923, + "learning_rate": 2.1152735582398453e-06, + "loss": 0.3796583414077759, + "memory(GiB)": 77.0, + "step": 5360, + "token_acc": 0.8572256009267304, + "train_speed(iter/s)": 0.584027 + }, + { + "epoch": 1.7155200000000002, + "grad_norm": 0.6481126217671828, + "learning_rate": 2.1144022118608516e-06, + "loss": 0.4001976251602173, + "memory(GiB)": 77.0, + "step": 5361, + "token_acc": 0.865216304076019, + "train_speed(iter/s)": 0.58388 + }, + { + "epoch": 1.71584, + "grad_norm": 0.6042330142582307, + "learning_rate": 2.113530913462813e-06, + "loss": 0.2758113145828247, + "memory(GiB)": 77.0, + "step": 5362, + "token_acc": 0.9018021331371828, + "train_speed(iter/s)": 0.583735 + }, + { + "epoch": 1.71616, + "grad_norm": 0.60213261149872, + "learning_rate": 2.1126596631541473e-06, + "loss": 0.35804277658462524, + "memory(GiB)": 77.0, + "step": 5363, + "token_acc": 0.868109507954125, + "train_speed(iter/s)": 0.58358 + }, + { + "epoch": 1.71648, + "grad_norm": 0.6396491764727563, + "learning_rate": 2.111788461043267e-06, + "loss": 0.33956682682037354, + "memory(GiB)": 77.0, + "step": 5364, + "token_acc": 0.9408973252804141, + "train_speed(iter/s)": 0.583425 + }, + { + "epoch": 1.7168, + "grad_norm": 0.5734203955128613, + "learning_rate": 2.1109173072385785e-06, + "loss": 0.27733108401298523, + "memory(GiB)": 77.0, + "step": 5365, + "token_acc": 0.9521505376344086, + "train_speed(iter/s)": 0.583286 + }, + { + "epoch": 1.71712, + "grad_norm": 0.6472917058480601, + "learning_rate": 2.1100462018484813e-06, + "loss": 0.2812873125076294, + "memory(GiB)": 77.0, + "step": 5366, + "token_acc": 0.912836227442969, + "train_speed(iter/s)": 0.583143 + }, + { + "epoch": 1.7174399999999999, + "grad_norm": 0.622196377216073, + "learning_rate": 2.1091751449813704e-06, + "loss": 0.38648486137390137, + "memory(GiB)": 77.0, + "step": 5367, + "token_acc": 0.8768812330009066, + "train_speed(iter/s)": 0.582984 + }, + { + "epoch": 1.71776, + "grad_norm": 0.6386791086191806, + "learning_rate": 2.108304136745632e-06, + "loss": 0.33256909251213074, + "memory(GiB)": 77.0, + "step": 5368, + "token_acc": 0.8711451111642362, + "train_speed(iter/s)": 0.58284 + }, + { + "epoch": 1.71808, + "grad_norm": 0.6647033915476794, + "learning_rate": 2.1074331772496497e-06, + "loss": 0.32531970739364624, + "memory(GiB)": 77.0, + "step": 5369, + "token_acc": 0.9383139136394791, + "train_speed(iter/s)": 0.582683 + }, + { + "epoch": 1.7184, + "grad_norm": 0.7058518360293385, + "learning_rate": 2.106562266601798e-06, + "loss": 0.28420427441596985, + "memory(GiB)": 77.0, + "step": 5370, + "token_acc": 0.9286613512379354, + "train_speed(iter/s)": 0.582548 + }, + { + "epoch": 1.71872, + "grad_norm": 0.6245814955835257, + "learning_rate": 2.105691404910447e-06, + "loss": 0.4098966121673584, + "memory(GiB)": 77.0, + "step": 5371, + "token_acc": 0.8374052232518955, + "train_speed(iter/s)": 0.582394 + }, + { + "epoch": 1.7190400000000001, + "grad_norm": 0.6814750032920153, + "learning_rate": 2.104820592283961e-06, + "loss": 0.3714461326599121, + "memory(GiB)": 77.0, + "step": 5372, + "token_acc": 0.8131433095803642, + "train_speed(iter/s)": 0.582246 + }, + { + "epoch": 1.71936, + "grad_norm": 0.6831173336385192, + "learning_rate": 2.1039498288306964e-06, + "loss": 0.3657996356487274, + "memory(GiB)": 77.0, + "step": 5373, + "token_acc": 0.8975478669801814, + "train_speed(iter/s)": 0.582091 + }, + { + "epoch": 1.7196799999999999, + "grad_norm": 0.6678687741863525, + "learning_rate": 2.1030791146590053e-06, + "loss": 0.30685269832611084, + "memory(GiB)": 77.0, + "step": 5374, + "token_acc": 0.8772538141470181, + "train_speed(iter/s)": 0.581934 + }, + { + "epoch": 1.72, + "grad_norm": 0.5910220470849185, + "learning_rate": 2.1022084498772335e-06, + "loss": 0.29918479919433594, + "memory(GiB)": 77.0, + "step": 5375, + "token_acc": 0.9402453502176494, + "train_speed(iter/s)": 0.581772 + }, + { + "epoch": 1.72032, + "grad_norm": 0.6973467514662478, + "learning_rate": 2.101337834593719e-06, + "loss": 0.36907413601875305, + "memory(GiB)": 77.0, + "step": 5376, + "token_acc": 0.9009287925696594, + "train_speed(iter/s)": 0.581633 + }, + { + "epoch": 1.72064, + "grad_norm": 0.6874910189278473, + "learning_rate": 2.100467268916795e-06, + "loss": 0.3022869825363159, + "memory(GiB)": 77.0, + "step": 5377, + "token_acc": 0.907074089577229, + "train_speed(iter/s)": 0.581491 + }, + { + "epoch": 1.72096, + "grad_norm": 0.6534768610157087, + "learning_rate": 2.0995967529547885e-06, + "loss": 0.32481661438941956, + "memory(GiB)": 77.0, + "step": 5378, + "token_acc": 0.9523058730784391, + "train_speed(iter/s)": 0.581343 + }, + { + "epoch": 1.7212800000000001, + "grad_norm": 0.6678996216757487, + "learning_rate": 2.0987262868160203e-06, + "loss": 0.3539204001426697, + "memory(GiB)": 77.0, + "step": 5379, + "token_acc": 0.862453531598513, + "train_speed(iter/s)": 0.581197 + }, + { + "epoch": 1.7216, + "grad_norm": 0.6409582091124164, + "learning_rate": 2.097855870608805e-06, + "loss": 0.3533562123775482, + "memory(GiB)": 77.0, + "step": 5380, + "token_acc": 0.9550827423167849, + "train_speed(iter/s)": 0.581053 + }, + { + "epoch": 1.72192, + "grad_norm": 0.6931655531118346, + "learning_rate": 2.0969855044414494e-06, + "loss": 0.34662654995918274, + "memory(GiB)": 77.0, + "step": 5381, + "token_acc": 0.8845824411134904, + "train_speed(iter/s)": 0.580908 + }, + { + "epoch": 1.72224, + "grad_norm": 0.6492587402671742, + "learning_rate": 2.096115188422257e-06, + "loss": 0.35963141918182373, + "memory(GiB)": 77.0, + "step": 5382, + "token_acc": 0.9018172640080767, + "train_speed(iter/s)": 0.58076 + }, + { + "epoch": 1.72256, + "grad_norm": 0.7522961006153047, + "learning_rate": 2.095244922659523e-06, + "loss": 0.41053661704063416, + "memory(GiB)": 77.0, + "step": 5383, + "token_acc": 0.897518530454399, + "train_speed(iter/s)": 0.580619 + }, + { + "epoch": 1.72288, + "grad_norm": 0.6915752468221519, + "learning_rate": 2.094374707261537e-06, + "loss": 0.3071290850639343, + "memory(GiB)": 77.0, + "step": 5384, + "token_acc": 0.872058087130696, + "train_speed(iter/s)": 0.58048 + }, + { + "epoch": 1.7231999999999998, + "grad_norm": 0.633546796949557, + "learning_rate": 2.093504542336582e-06, + "loss": 0.26347437500953674, + "memory(GiB)": 77.0, + "step": 5385, + "token_acc": 0.934763948497854, + "train_speed(iter/s)": 0.58034 + }, + { + "epoch": 1.7235200000000002, + "grad_norm": 0.6448426513416471, + "learning_rate": 2.0926344279929358e-06, + "loss": 0.321247398853302, + "memory(GiB)": 77.0, + "step": 5386, + "token_acc": 0.8694235588972431, + "train_speed(iter/s)": 0.580203 + }, + { + "epoch": 1.72384, + "grad_norm": 0.7024590992895122, + "learning_rate": 2.091764364338868e-06, + "loss": 0.3443640470504761, + "memory(GiB)": 77.0, + "step": 5387, + "token_acc": 0.8979591836734694, + "train_speed(iter/s)": 0.580047 + }, + { + "epoch": 1.72416, + "grad_norm": 0.6224759030029908, + "learning_rate": 2.090894351482643e-06, + "loss": 0.3109826445579529, + "memory(GiB)": 77.0, + "step": 5388, + "token_acc": 0.927120669056153, + "train_speed(iter/s)": 0.579912 + }, + { + "epoch": 1.72448, + "grad_norm": 0.7202162846170866, + "learning_rate": 2.0900243895325196e-06, + "loss": 0.36809128522872925, + "memory(GiB)": 77.0, + "step": 5389, + "token_acc": 0.9311994113318617, + "train_speed(iter/s)": 0.579766 + }, + { + "epoch": 1.7248, + "grad_norm": 0.6061236645710525, + "learning_rate": 2.0891544785967484e-06, + "loss": 0.2954886853694916, + "memory(GiB)": 77.0, + "step": 5390, + "token_acc": 0.8507425742574257, + "train_speed(iter/s)": 0.579604 + }, + { + "epoch": 1.72512, + "grad_norm": 0.6905398588265469, + "learning_rate": 2.0882846187835752e-06, + "loss": 0.30937066674232483, + "memory(GiB)": 77.0, + "step": 5391, + "token_acc": 0.8532588454376164, + "train_speed(iter/s)": 0.579467 + }, + { + "epoch": 1.7254399999999999, + "grad_norm": 0.6855754308395694, + "learning_rate": 2.0874148102012388e-06, + "loss": 0.3544342517852783, + "memory(GiB)": 77.0, + "step": 5392, + "token_acc": 0.9223806911684037, + "train_speed(iter/s)": 0.579327 + }, + { + "epoch": 1.72576, + "grad_norm": 0.6418102991921002, + "learning_rate": 2.0865450529579725e-06, + "loss": 0.37253129482269287, + "memory(GiB)": 77.0, + "step": 5393, + "token_acc": 0.9140584915341201, + "train_speed(iter/s)": 0.579187 + }, + { + "epoch": 1.72608, + "grad_norm": 0.6747407995711991, + "learning_rate": 2.0856753471620017e-06, + "loss": 0.35578328371047974, + "memory(GiB)": 77.0, + "step": 5394, + "token_acc": 0.895093608779858, + "train_speed(iter/s)": 0.579046 + }, + { + "epoch": 1.7264, + "grad_norm": 0.6877375391025881, + "learning_rate": 2.0848056929215467e-06, + "loss": 0.4209662079811096, + "memory(GiB)": 77.0, + "step": 5395, + "token_acc": 0.8186986734049273, + "train_speed(iter/s)": 0.578896 + }, + { + "epoch": 1.72672, + "grad_norm": 0.6890758583332687, + "learning_rate": 2.0839360903448202e-06, + "loss": 0.30100375413894653, + "memory(GiB)": 77.0, + "step": 5396, + "token_acc": 0.9419294411489229, + "train_speed(iter/s)": 0.57875 + }, + { + "epoch": 1.7270400000000001, + "grad_norm": 0.6983345895179753, + "learning_rate": 2.08306653954003e-06, + "loss": 0.33951395750045776, + "memory(GiB)": 77.0, + "step": 5397, + "token_acc": 0.879542395693136, + "train_speed(iter/s)": 0.578594 + }, + { + "epoch": 1.72736, + "grad_norm": 0.6498399812776602, + "learning_rate": 2.0821970406153754e-06, + "loss": 0.284197062253952, + "memory(GiB)": 77.0, + "step": 5398, + "token_acc": 0.9374837872892348, + "train_speed(iter/s)": 0.578452 + }, + { + "epoch": 1.7276799999999999, + "grad_norm": 0.6965463090499643, + "learning_rate": 2.0813275936790523e-06, + "loss": 0.3564695417881012, + "memory(GiB)": 77.0, + "step": 5399, + "token_acc": 0.8527004909983633, + "train_speed(iter/s)": 0.578291 + }, + { + "epoch": 1.728, + "grad_norm": 0.6135894664268357, + "learning_rate": 2.080458198839247e-06, + "loss": 0.23973172903060913, + "memory(GiB)": 77.0, + "step": 5400, + "token_acc": 0.9640848117697967, + "train_speed(iter/s)": 0.578141 + }, + { + "epoch": 1.72832, + "grad_norm": 0.7318936740629501, + "learning_rate": 2.0795888562041406e-06, + "loss": 0.40345048904418945, + "memory(GiB)": 77.0, + "step": 5401, + "token_acc": 0.8638102524866106, + "train_speed(iter/s)": 0.577999 + }, + { + "epoch": 1.72864, + "grad_norm": 0.7053760031315681, + "learning_rate": 2.0787195658819085e-06, + "loss": 0.4126572906970978, + "memory(GiB)": 77.0, + "step": 5402, + "token_acc": 0.8837209302325582, + "train_speed(iter/s)": 0.577831 + }, + { + "epoch": 1.72896, + "grad_norm": 0.6907437327026443, + "learning_rate": 2.077850327980718e-06, + "loss": 0.27910056710243225, + "memory(GiB)": 77.0, + "step": 5403, + "token_acc": 0.8512064343163539, + "train_speed(iter/s)": 0.577692 + }, + { + "epoch": 1.7292800000000002, + "grad_norm": 0.7261341511448741, + "learning_rate": 2.0769811426087314e-06, + "loss": 0.3954682946205139, + "memory(GiB)": 77.0, + "step": 5404, + "token_acc": 0.866514360313316, + "train_speed(iter/s)": 0.577548 + }, + { + "epoch": 1.7296, + "grad_norm": 0.7161303987882207, + "learning_rate": 2.0761120098741034e-06, + "loss": 0.3345656394958496, + "memory(GiB)": 77.0, + "step": 5405, + "token_acc": 0.9053580370555834, + "train_speed(iter/s)": 0.577413 + }, + { + "epoch": 1.72992, + "grad_norm": 0.6320323990114441, + "learning_rate": 2.075242929884982e-06, + "loss": 0.39523160457611084, + "memory(GiB)": 77.0, + "step": 5406, + "token_acc": 0.897861739671995, + "train_speed(iter/s)": 0.577252 + }, + { + "epoch": 1.73024, + "grad_norm": 0.7144492575609582, + "learning_rate": 2.0743739027495098e-06, + "loss": 0.3441142141819, + "memory(GiB)": 77.0, + "step": 5407, + "token_acc": 0.8742260061919505, + "train_speed(iter/s)": 0.577103 + }, + { + "epoch": 1.73056, + "grad_norm": 0.6020461319661123, + "learning_rate": 2.0735049285758213e-06, + "loss": 0.31562578678131104, + "memory(GiB)": 77.0, + "step": 5408, + "token_acc": 0.9430128840436075, + "train_speed(iter/s)": 0.576955 + }, + { + "epoch": 1.73088, + "grad_norm": 0.6622047598063301, + "learning_rate": 2.0726360074720465e-06, + "loss": 0.2843044400215149, + "memory(GiB)": 77.0, + "step": 5409, + "token_acc": 0.961042379573572, + "train_speed(iter/s)": 0.576817 + }, + { + "epoch": 1.7311999999999999, + "grad_norm": 0.6505778942479008, + "learning_rate": 2.0717671395463063e-06, + "loss": 0.4100533425807953, + "memory(GiB)": 77.0, + "step": 5410, + "token_acc": 0.9008491508491508, + "train_speed(iter/s)": 0.576662 + }, + { + "epoch": 1.73152, + "grad_norm": 0.6647689428136303, + "learning_rate": 2.0708983249067172e-06, + "loss": 0.3207642436027527, + "memory(GiB)": 77.0, + "step": 5411, + "token_acc": 0.9100361663652803, + "train_speed(iter/s)": 0.576514 + }, + { + "epoch": 1.73184, + "grad_norm": 0.7329803130013641, + "learning_rate": 2.0700295636613877e-06, + "loss": 0.3917211592197418, + "memory(GiB)": 77.0, + "step": 5412, + "token_acc": 0.86243072050673, + "train_speed(iter/s)": 0.576381 + }, + { + "epoch": 1.73216, + "grad_norm": 0.7570063429743814, + "learning_rate": 2.0691608559184205e-06, + "loss": 0.4576661288738251, + "memory(GiB)": 77.0, + "step": 5413, + "token_acc": 0.848569434752268, + "train_speed(iter/s)": 0.57624 + }, + { + "epoch": 1.73248, + "grad_norm": 0.6519922429334819, + "learning_rate": 2.0682922017859104e-06, + "loss": 0.37355491518974304, + "memory(GiB)": 77.0, + "step": 5414, + "token_acc": 0.9313642756680731, + "train_speed(iter/s)": 0.576098 + }, + { + "epoch": 1.7328000000000001, + "grad_norm": 0.6105058635517169, + "learning_rate": 2.067423601371946e-06, + "loss": 0.3045344948768616, + "memory(GiB)": 77.0, + "step": 5415, + "token_acc": 0.9542553191489361, + "train_speed(iter/s)": 0.575951 + }, + { + "epoch": 1.73312, + "grad_norm": 0.6136927394177023, + "learning_rate": 2.0665550547846113e-06, + "loss": 0.2759678065776825, + "memory(GiB)": 77.0, + "step": 5416, + "token_acc": 0.8775942397289285, + "train_speed(iter/s)": 0.575816 + }, + { + "epoch": 1.7334399999999999, + "grad_norm": 0.6684835713274611, + "learning_rate": 2.065686562131981e-06, + "loss": 0.3335093557834625, + "memory(GiB)": 77.0, + "step": 5417, + "token_acc": 0.9395747855277882, + "train_speed(iter/s)": 0.575681 + }, + { + "epoch": 1.73376, + "grad_norm": 0.751040267489744, + "learning_rate": 2.064818123522125e-06, + "loss": 0.25296443700790405, + "memory(GiB)": 77.0, + "step": 5418, + "token_acc": 0.9669909659485754, + "train_speed(iter/s)": 0.575551 + }, + { + "epoch": 1.73408, + "grad_norm": 0.7927865131607558, + "learning_rate": 2.0639497390631026e-06, + "loss": 0.30200666189193726, + "memory(GiB)": 77.0, + "step": 5419, + "token_acc": 0.9121552604698672, + "train_speed(iter/s)": 0.575392 + }, + { + "epoch": 1.7344, + "grad_norm": 0.6223190289032645, + "learning_rate": 2.0630814088629712e-06, + "loss": 0.265317440032959, + "memory(GiB)": 77.0, + "step": 5420, + "token_acc": 0.9224308300395256, + "train_speed(iter/s)": 0.575251 + }, + { + "epoch": 1.73472, + "grad_norm": 0.6531420350845545, + "learning_rate": 2.0622131330297783e-06, + "loss": 0.3518480658531189, + "memory(GiB)": 77.0, + "step": 5421, + "token_acc": 0.8450824332006822, + "train_speed(iter/s)": 0.575104 + }, + { + "epoch": 1.7350400000000001, + "grad_norm": 0.6517188570504164, + "learning_rate": 2.061344911671567e-06, + "loss": 0.30526602268218994, + "memory(GiB)": 77.0, + "step": 5422, + "token_acc": 0.8957831325301204, + "train_speed(iter/s)": 0.57496 + }, + { + "epoch": 1.73536, + "grad_norm": 0.6347378819012857, + "learning_rate": 2.0604767448963725e-06, + "loss": 0.34498581290245056, + "memory(GiB)": 77.0, + "step": 5423, + "token_acc": 0.8832834544677213, + "train_speed(iter/s)": 0.574822 + }, + { + "epoch": 1.73568, + "grad_norm": 0.6335801861667805, + "learning_rate": 2.059608632812222e-06, + "loss": 0.2620140016078949, + "memory(GiB)": 77.0, + "step": 5424, + "token_acc": 0.9431263084438242, + "train_speed(iter/s)": 0.574687 + }, + { + "epoch": 1.736, + "grad_norm": 0.6515201952711841, + "learning_rate": 2.0587405755271376e-06, + "loss": 0.28074076771736145, + "memory(GiB)": 77.0, + "step": 5425, + "token_acc": 0.9602319801159901, + "train_speed(iter/s)": 0.574557 + }, + { + "epoch": 1.73632, + "grad_norm": 0.6615189855170741, + "learning_rate": 2.057872573149134e-06, + "loss": 0.3148599863052368, + "memory(GiB)": 77.0, + "step": 5426, + "token_acc": 0.8993879112471308, + "train_speed(iter/s)": 0.57441 + }, + { + "epoch": 1.73664, + "grad_norm": 0.6303115923062867, + "learning_rate": 2.057004625786219e-06, + "loss": 0.3251652717590332, + "memory(GiB)": 77.0, + "step": 5427, + "token_acc": 0.8816063254265502, + "train_speed(iter/s)": 0.574259 + }, + { + "epoch": 1.7369599999999998, + "grad_norm": 0.6904677077176785, + "learning_rate": 2.056136733546393e-06, + "loss": 0.34176602959632874, + "memory(GiB)": 77.0, + "step": 5428, + "token_acc": 0.9307692307692308, + "train_speed(iter/s)": 0.574119 + }, + { + "epoch": 1.7372800000000002, + "grad_norm": 0.7153924469280823, + "learning_rate": 2.055268896537651e-06, + "loss": 0.3170933425426483, + "memory(GiB)": 77.0, + "step": 5429, + "token_acc": 0.9390804597701149, + "train_speed(iter/s)": 0.573984 + }, + { + "epoch": 1.7376, + "grad_norm": 0.6875475371077072, + "learning_rate": 2.05440111486798e-06, + "loss": 0.36860567331314087, + "memory(GiB)": 77.0, + "step": 5430, + "token_acc": 0.888261515601783, + "train_speed(iter/s)": 0.573844 + }, + { + "epoch": 1.73792, + "grad_norm": 0.6788568797923172, + "learning_rate": 2.0535333886453602e-06, + "loss": 0.330494225025177, + "memory(GiB)": 77.0, + "step": 5431, + "token_acc": 0.9670022371364653, + "train_speed(iter/s)": 0.573713 + }, + { + "epoch": 1.73824, + "grad_norm": 0.6455734859365108, + "learning_rate": 2.052665717977765e-06, + "loss": 0.34984511137008667, + "memory(GiB)": 77.0, + "step": 5432, + "token_acc": 0.8549382716049383, + "train_speed(iter/s)": 0.57357 + }, + { + "epoch": 1.73856, + "grad_norm": 0.5779735020208541, + "learning_rate": 2.0517981029731613e-06, + "loss": 0.2555270493030548, + "memory(GiB)": 77.0, + "step": 5433, + "token_acc": 0.9308739255014327, + "train_speed(iter/s)": 0.573425 + }, + { + "epoch": 1.73888, + "grad_norm": 0.6818252899737482, + "learning_rate": 2.050930543739509e-06, + "loss": 0.31154102087020874, + "memory(GiB)": 77.0, + "step": 5434, + "token_acc": 0.9182024741226963, + "train_speed(iter/s)": 0.57327 + }, + { + "epoch": 1.7391999999999999, + "grad_norm": 0.638365611503498, + "learning_rate": 2.0500630403847595e-06, + "loss": 0.37676942348480225, + "memory(GiB)": 77.0, + "step": 5435, + "token_acc": 0.9172535211267606, + "train_speed(iter/s)": 0.573131 + }, + { + "epoch": 1.73952, + "grad_norm": 0.6799394191296506, + "learning_rate": 2.0491955930168596e-06, + "loss": 0.32378286123275757, + "memory(GiB)": 77.0, + "step": 5436, + "token_acc": 0.9056016597510373, + "train_speed(iter/s)": 0.572987 + }, + { + "epoch": 1.73984, + "grad_norm": 0.6755447491475091, + "learning_rate": 2.048328201743748e-06, + "loss": 0.3239091634750366, + "memory(GiB)": 77.0, + "step": 5437, + "token_acc": 0.9090909090909091, + "train_speed(iter/s)": 0.572838 + }, + { + "epoch": 1.74016, + "grad_norm": 0.6778208776106646, + "learning_rate": 2.0474608666733556e-06, + "loss": 0.4130343198776245, + "memory(GiB)": 77.0, + "step": 5438, + "token_acc": 0.8984627831715211, + "train_speed(iter/s)": 0.572699 + }, + { + "epoch": 1.74048, + "grad_norm": 0.6515283790130717, + "learning_rate": 2.046593587913608e-06, + "loss": 0.2940884232521057, + "memory(GiB)": 77.0, + "step": 5439, + "token_acc": 0.9208963282937365, + "train_speed(iter/s)": 0.572556 + }, + { + "epoch": 1.7408000000000001, + "grad_norm": 1.032218837193921, + "learning_rate": 2.045726365572424e-06, + "loss": 0.324593186378479, + "memory(GiB)": 77.0, + "step": 5440, + "token_acc": 0.895508123606244, + "train_speed(iter/s)": 0.572418 + }, + { + "epoch": 1.74112, + "grad_norm": 0.7064390991588855, + "learning_rate": 2.044859199757713e-06, + "loss": 0.3769545555114746, + "memory(GiB)": 77.0, + "step": 5441, + "token_acc": 0.9172138167285184, + "train_speed(iter/s)": 0.572288 + }, + { + "epoch": 1.7414399999999999, + "grad_norm": 0.6319327923398336, + "learning_rate": 2.0439920905773793e-06, + "loss": 0.31932955980300903, + "memory(GiB)": 77.0, + "step": 5442, + "token_acc": 0.869263487799418, + "train_speed(iter/s)": 0.572141 + }, + { + "epoch": 1.74176, + "grad_norm": 0.6719282720352175, + "learning_rate": 2.0431250381393187e-06, + "loss": 0.4013943076133728, + "memory(GiB)": 77.0, + "step": 5443, + "token_acc": 0.8244150190458915, + "train_speed(iter/s)": 0.572 + }, + { + "epoch": 1.74208, + "grad_norm": 0.6905441259856483, + "learning_rate": 2.042258042551422e-06, + "loss": 0.38153591752052307, + "memory(GiB)": 77.0, + "step": 5444, + "token_acc": 0.8991741225051617, + "train_speed(iter/s)": 0.571856 + }, + { + "epoch": 1.7424, + "grad_norm": 0.7120443693687453, + "learning_rate": 2.041391103921571e-06, + "loss": 0.39883342385292053, + "memory(GiB)": 77.0, + "step": 5445, + "token_acc": 0.875085324232082, + "train_speed(iter/s)": 0.571725 + }, + { + "epoch": 1.74272, + "grad_norm": 0.6000989936174158, + "learning_rate": 2.0405242223576416e-06, + "loss": 0.3002015948295593, + "memory(GiB)": 77.0, + "step": 5446, + "token_acc": 0.8725862353523683, + "train_speed(iter/s)": 0.571568 + }, + { + "epoch": 1.7430400000000001, + "grad_norm": 0.662392852793818, + "learning_rate": 2.0396573979675027e-06, + "loss": 0.3533770442008972, + "memory(GiB)": 77.0, + "step": 5447, + "token_acc": 0.8909787530477186, + "train_speed(iter/s)": 0.57142 + }, + { + "epoch": 1.74336, + "grad_norm": 0.6769774236611416, + "learning_rate": 2.0387906308590138e-06, + "loss": 0.3557281196117401, + "memory(GiB)": 77.0, + "step": 5448, + "token_acc": 0.8857243319268636, + "train_speed(iter/s)": 0.571284 + }, + { + "epoch": 1.74368, + "grad_norm": 0.665650272477891, + "learning_rate": 2.0379239211400305e-06, + "loss": 0.3510602116584778, + "memory(GiB)": 77.0, + "step": 5449, + "token_acc": 0.9203020134228188, + "train_speed(iter/s)": 0.571151 + }, + { + "epoch": 1.744, + "grad_norm": 0.6781925931098433, + "learning_rate": 2.0370572689183996e-06, + "loss": 0.3448775112628937, + "memory(GiB)": 77.0, + "step": 5450, + "token_acc": 0.8472792607802875, + "train_speed(iter/s)": 0.571015 + }, + { + "epoch": 1.74432, + "grad_norm": 0.6299238674208274, + "learning_rate": 2.0361906743019604e-06, + "loss": 0.3491496741771698, + "memory(GiB)": 77.0, + "step": 5451, + "token_acc": 0.8730195712954334, + "train_speed(iter/s)": 0.570865 + }, + { + "epoch": 1.74464, + "grad_norm": 0.6843492054493338, + "learning_rate": 2.035324137398546e-06, + "loss": 0.30626267194747925, + "memory(GiB)": 77.0, + "step": 5452, + "token_acc": 0.8837074303405573, + "train_speed(iter/s)": 0.570733 + }, + { + "epoch": 1.7449599999999998, + "grad_norm": 0.6497643703402173, + "learning_rate": 2.0344576583159815e-06, + "loss": 0.37036797404289246, + "memory(GiB)": 77.0, + "step": 5453, + "token_acc": 0.8331323848565227, + "train_speed(iter/s)": 0.570602 + }, + { + "epoch": 1.7452800000000002, + "grad_norm": 0.6127246918134545, + "learning_rate": 2.033591237162086e-06, + "loss": 0.2519470751285553, + "memory(GiB)": 77.0, + "step": 5454, + "token_acc": 0.9338471419396275, + "train_speed(iter/s)": 0.570472 + }, + { + "epoch": 1.7456, + "grad_norm": 0.6240953690047589, + "learning_rate": 2.0327248740446694e-06, + "loss": 0.3268260955810547, + "memory(GiB)": 77.0, + "step": 5455, + "token_acc": 0.9176276771004942, + "train_speed(iter/s)": 0.570336 + }, + { + "epoch": 1.74592, + "grad_norm": 0.639088488457934, + "learning_rate": 2.0318585690715365e-06, + "loss": 0.340182363986969, + "memory(GiB)": 77.0, + "step": 5456, + "token_acc": 0.8983583653510304, + "train_speed(iter/s)": 0.570193 + }, + { + "epoch": 1.74624, + "grad_norm": 0.6727960747310144, + "learning_rate": 2.0309923223504836e-06, + "loss": 0.3912772536277771, + "memory(GiB)": 77.0, + "step": 5457, + "token_acc": 0.8576931307511235, + "train_speed(iter/s)": 0.570058 + }, + { + "epoch": 1.7465600000000001, + "grad_norm": 0.5877910035656989, + "learning_rate": 2.0301261339893003e-06, + "loss": 0.28509294986724854, + "memory(GiB)": 77.0, + "step": 5458, + "token_acc": 0.9547560643227038, + "train_speed(iter/s)": 0.569909 + }, + { + "epoch": 1.74688, + "grad_norm": 0.6560103413045084, + "learning_rate": 2.0292600040957685e-06, + "loss": 0.34423524141311646, + "memory(GiB)": 77.0, + "step": 5459, + "token_acc": 0.8330317911605066, + "train_speed(iter/s)": 0.569779 + }, + { + "epoch": 1.7471999999999999, + "grad_norm": 0.6066850818709623, + "learning_rate": 2.028393932777663e-06, + "loss": 0.29041314125061035, + "memory(GiB)": 77.0, + "step": 5460, + "token_acc": 0.9475287745429926, + "train_speed(iter/s)": 0.569649 + }, + { + "epoch": 1.74752, + "grad_norm": 0.6022829497591047, + "learning_rate": 2.027527920142752e-06, + "loss": 0.2971116900444031, + "memory(GiB)": 77.0, + "step": 5461, + "token_acc": 0.8908915352429997, + "train_speed(iter/s)": 0.569488 + }, + { + "epoch": 1.74784, + "grad_norm": 0.6483746515197746, + "learning_rate": 2.0266619662987946e-06, + "loss": 0.3444351255893707, + "memory(GiB)": 77.0, + "step": 5462, + "token_acc": 0.8699059561128527, + "train_speed(iter/s)": 0.569352 + }, + { + "epoch": 1.74816, + "grad_norm": 0.6434028768804919, + "learning_rate": 2.0257960713535443e-06, + "loss": 0.3115732967853546, + "memory(GiB)": 77.0, + "step": 5463, + "token_acc": 0.9510668563300142, + "train_speed(iter/s)": 0.569217 + }, + { + "epoch": 1.74848, + "grad_norm": 0.6428527897707658, + "learning_rate": 2.0249302354147467e-06, + "loss": 0.3669087290763855, + "memory(GiB)": 77.0, + "step": 5464, + "token_acc": 0.8834628190899001, + "train_speed(iter/s)": 0.569048 + }, + { + "epoch": 1.7488000000000001, + "grad_norm": 0.62184646374442, + "learning_rate": 2.0240644585901404e-06, + "loss": 0.31041428446769714, + "memory(GiB)": 77.0, + "step": 5465, + "token_acc": 0.9615531023981728, + "train_speed(iter/s)": 0.568914 + }, + { + "epoch": 1.74912, + "grad_norm": 0.632655012832996, + "learning_rate": 2.023198740987456e-06, + "loss": 0.4133935272693634, + "memory(GiB)": 77.0, + "step": 5466, + "token_acc": 0.9143415178571429, + "train_speed(iter/s)": 0.56876 + }, + { + "epoch": 1.7494399999999999, + "grad_norm": 0.6766746424495391, + "learning_rate": 2.0223330827144164e-06, + "loss": 0.4424903392791748, + "memory(GiB)": 77.0, + "step": 5467, + "token_acc": 0.8885784142507859, + "train_speed(iter/s)": 0.568617 + }, + { + "epoch": 1.74976, + "grad_norm": 0.6761298319550015, + "learning_rate": 2.0214674838787386e-06, + "loss": 0.3725707232952118, + "memory(GiB)": 77.0, + "step": 5468, + "token_acc": 0.9523809523809523, + "train_speed(iter/s)": 0.568489 + }, + { + "epoch": 1.75008, + "grad_norm": 0.7152797220052932, + "learning_rate": 2.0206019445881314e-06, + "loss": 0.31280601024627686, + "memory(GiB)": 77.0, + "step": 5469, + "token_acc": 0.8846891262667762, + "train_speed(iter/s)": 0.568341 + }, + { + "epoch": 1.7504, + "grad_norm": 0.678808730426744, + "learning_rate": 2.0197364649502956e-06, + "loss": 0.3683091104030609, + "memory(GiB)": 77.0, + "step": 5470, + "token_acc": 0.9027982326951399, + "train_speed(iter/s)": 0.568209 + }, + { + "epoch": 1.75072, + "grad_norm": 0.6486810633828116, + "learning_rate": 2.0188710450729255e-06, + "loss": 0.3439314365386963, + "memory(GiB)": 77.0, + "step": 5471, + "token_acc": 0.8755080213903743, + "train_speed(iter/s)": 0.56807 + }, + { + "epoch": 1.7510400000000002, + "grad_norm": 0.6406237700898025, + "learning_rate": 2.018005685063707e-06, + "loss": 0.27756771445274353, + "memory(GiB)": 77.0, + "step": 5472, + "token_acc": 0.9299495399228258, + "train_speed(iter/s)": 0.567944 + }, + { + "epoch": 1.75136, + "grad_norm": 0.7124278345597022, + "learning_rate": 2.01714038503032e-06, + "loss": 0.33614882826805115, + "memory(GiB)": 77.0, + "step": 5473, + "token_acc": 0.9200825715128281, + "train_speed(iter/s)": 0.567815 + }, + { + "epoch": 1.75168, + "grad_norm": 0.644391735006126, + "learning_rate": 2.016275145080436e-06, + "loss": 0.26931798458099365, + "memory(GiB)": 77.0, + "step": 5474, + "token_acc": 0.9301989150090416, + "train_speed(iter/s)": 0.567685 + }, + { + "epoch": 1.752, + "grad_norm": 0.6475783126617387, + "learning_rate": 2.0154099653217186e-06, + "loss": 0.2718438506126404, + "memory(GiB)": 77.0, + "step": 5475, + "token_acc": 0.9673300788584304, + "train_speed(iter/s)": 0.567547 + }, + { + "epoch": 1.75232, + "grad_norm": 0.74924147304275, + "learning_rate": 2.0145448458618263e-06, + "loss": 0.32640719413757324, + "memory(GiB)": 77.0, + "step": 5476, + "token_acc": 0.924908424908425, + "train_speed(iter/s)": 0.56742 + }, + { + "epoch": 1.75264, + "grad_norm": 0.6645780370402127, + "learning_rate": 2.013679786808405e-06, + "loss": 0.2903607487678528, + "memory(GiB)": 77.0, + "step": 5477, + "token_acc": 0.9122915379864114, + "train_speed(iter/s)": 0.567282 + }, + { + "epoch": 1.7529599999999999, + "grad_norm": 0.6578303827150642, + "learning_rate": 2.012814788269097e-06, + "loss": 0.3673662841320038, + "memory(GiB)": 77.0, + "step": 5478, + "token_acc": 0.9111111111111111, + "train_speed(iter/s)": 0.567151 + }, + { + "epoch": 1.75328, + "grad_norm": 0.6364811643760447, + "learning_rate": 2.011949850351538e-06, + "loss": 0.3386451005935669, + "memory(GiB)": 77.0, + "step": 5479, + "token_acc": 0.948309705561614, + "train_speed(iter/s)": 0.567023 + }, + { + "epoch": 1.7536, + "grad_norm": 0.637307715988449, + "learning_rate": 2.011084973163354e-06, + "loss": 0.34675121307373047, + "memory(GiB)": 77.0, + "step": 5480, + "token_acc": 0.9129063561377971, + "train_speed(iter/s)": 0.566879 + }, + { + "epoch": 1.75392, + "grad_norm": 0.6643250663573215, + "learning_rate": 2.0102201568121632e-06, + "loss": 0.3817083239555359, + "memory(GiB)": 77.0, + "step": 5481, + "token_acc": 0.8763545429285913, + "train_speed(iter/s)": 0.566752 + }, + { + "epoch": 1.75424, + "grad_norm": 0.6885469434993577, + "learning_rate": 2.0093554014055777e-06, + "loss": 0.4041348993778229, + "memory(GiB)": 77.0, + "step": 5482, + "token_acc": 0.9011764705882352, + "train_speed(iter/s)": 0.566602 + }, + { + "epoch": 1.7545600000000001, + "grad_norm": 0.6577974093238528, + "learning_rate": 2.0084907070512012e-06, + "loss": 0.34572073817253113, + "memory(GiB)": 77.0, + "step": 5483, + "token_acc": 0.8981553650298779, + "train_speed(iter/s)": 0.566473 + }, + { + "epoch": 1.75488, + "grad_norm": 0.6800658633536958, + "learning_rate": 2.00762607385663e-06, + "loss": 0.3912883698940277, + "memory(GiB)": 77.0, + "step": 5484, + "token_acc": 0.8754709871891485, + "train_speed(iter/s)": 0.566321 + }, + { + "epoch": 1.7551999999999999, + "grad_norm": 0.6401885869872296, + "learning_rate": 2.006761501929452e-06, + "loss": 0.4045589864253998, + "memory(GiB)": 77.0, + "step": 5485, + "token_acc": 0.9062348960850652, + "train_speed(iter/s)": 0.566189 + }, + { + "epoch": 1.75552, + "grad_norm": 0.6877562010362906, + "learning_rate": 2.0058969913772485e-06, + "loss": 0.32247787714004517, + "memory(GiB)": 77.0, + "step": 5486, + "token_acc": 0.8703703703703703, + "train_speed(iter/s)": 0.56606 + }, + { + "epoch": 1.75584, + "grad_norm": 0.6171145316258065, + "learning_rate": 2.0050325423075936e-06, + "loss": 0.33116477727890015, + "memory(GiB)": 77.0, + "step": 5487, + "token_acc": 0.8823529411764706, + "train_speed(iter/s)": 0.565927 + }, + { + "epoch": 1.75616, + "grad_norm": 0.607776909690509, + "learning_rate": 2.0041681548280523e-06, + "loss": 0.34779980778694153, + "memory(GiB)": 77.0, + "step": 5488, + "token_acc": 0.8724954462659381, + "train_speed(iter/s)": 0.565789 + }, + { + "epoch": 1.75648, + "grad_norm": 0.5602786200081105, + "learning_rate": 2.003303829046182e-06, + "loss": 0.29276415705680847, + "memory(GiB)": 77.0, + "step": 5489, + "token_acc": 0.8957464553794829, + "train_speed(iter/s)": 0.565649 + }, + { + "epoch": 1.7568000000000001, + "grad_norm": 0.632626470127934, + "learning_rate": 2.0024395650695354e-06, + "loss": 0.2840777635574341, + "memory(GiB)": 77.0, + "step": 5490, + "token_acc": 0.9316644113667117, + "train_speed(iter/s)": 0.565512 + }, + { + "epoch": 1.75712, + "grad_norm": 0.6776511525382287, + "learning_rate": 2.001575363005653e-06, + "loss": 0.3119069039821625, + "memory(GiB)": 77.0, + "step": 5491, + "token_acc": 0.9236111111111112, + "train_speed(iter/s)": 0.565378 + }, + { + "epoch": 1.75744, + "grad_norm": 0.6802909077202411, + "learning_rate": 2.0007112229620694e-06, + "loss": 0.35629552602767944, + "memory(GiB)": 77.0, + "step": 5492, + "token_acc": 0.8894095595126523, + "train_speed(iter/s)": 0.565256 + }, + { + "epoch": 1.75776, + "grad_norm": 0.6203030108209696, + "learning_rate": 1.9998471450463134e-06, + "loss": 0.3582903742790222, + "memory(GiB)": 77.0, + "step": 5493, + "token_acc": 0.9091684434968017, + "train_speed(iter/s)": 0.56512 + }, + { + "epoch": 1.75808, + "grad_norm": 0.7142376533984027, + "learning_rate": 1.9989831293659033e-06, + "loss": 0.3493253290653229, + "memory(GiB)": 77.0, + "step": 5494, + "token_acc": 0.8871369294605809, + "train_speed(iter/s)": 0.564992 + }, + { + "epoch": 1.7584, + "grad_norm": 0.6661454086848031, + "learning_rate": 1.9981191760283515e-06, + "loss": 0.4055398106575012, + "memory(GiB)": 77.0, + "step": 5495, + "token_acc": 0.8610705596107056, + "train_speed(iter/s)": 0.564853 + }, + { + "epoch": 1.7587199999999998, + "grad_norm": 0.7097433490374918, + "learning_rate": 1.997255285141162e-06, + "loss": 0.29895663261413574, + "memory(GiB)": 77.0, + "step": 5496, + "token_acc": 0.8895131086142322, + "train_speed(iter/s)": 0.564729 + }, + { + "epoch": 1.7590400000000002, + "grad_norm": 0.6776942574369214, + "learning_rate": 1.996391456811831e-06, + "loss": 0.3498537540435791, + "memory(GiB)": 77.0, + "step": 5497, + "token_acc": 0.8538439876091242, + "train_speed(iter/s)": 0.564586 + }, + { + "epoch": 1.75936, + "grad_norm": 0.6496045304986672, + "learning_rate": 1.995527691147847e-06, + "loss": 0.31243273615837097, + "memory(GiB)": 77.0, + "step": 5498, + "token_acc": 0.8776239392585976, + "train_speed(iter/s)": 0.564459 + }, + { + "epoch": 1.75968, + "grad_norm": 0.7003403859916617, + "learning_rate": 1.9946639882566907e-06, + "loss": 0.4232310950756073, + "memory(GiB)": 77.0, + "step": 5499, + "token_acc": 0.793552036199095, + "train_speed(iter/s)": 0.564334 + }, + { + "epoch": 1.76, + "grad_norm": 0.702441437605545, + "learning_rate": 1.9938003482458347e-06, + "loss": 0.32481932640075684, + "memory(GiB)": 77.0, + "step": 5500, + "token_acc": 0.8964679911699779, + "train_speed(iter/s)": 0.564206 + }, + { + "epoch": 1.76032, + "grad_norm": 0.7572097969175448, + "learning_rate": 1.992936771222744e-06, + "loss": 0.4144580364227295, + "memory(GiB)": 77.0, + "step": 5501, + "token_acc": 0.902638020125102, + "train_speed(iter/s)": 0.564079 + }, + { + "epoch": 1.76064, + "grad_norm": 0.6099696587056593, + "learning_rate": 1.9920732572948757e-06, + "loss": 0.313873291015625, + "memory(GiB)": 77.0, + "step": 5502, + "token_acc": 0.9006501950585175, + "train_speed(iter/s)": 0.563946 + }, + { + "epoch": 1.7609599999999999, + "grad_norm": 0.6626451551363651, + "learning_rate": 1.9912098065696795e-06, + "loss": 0.29488909244537354, + "memory(GiB)": 77.0, + "step": 5503, + "token_acc": 0.8715846994535519, + "train_speed(iter/s)": 0.563819 + }, + { + "epoch": 1.76128, + "grad_norm": 0.6207640060865326, + "learning_rate": 1.9903464191545972e-06, + "loss": 0.2893049120903015, + "memory(GiB)": 77.0, + "step": 5504, + "token_acc": 0.933641975308642, + "train_speed(iter/s)": 0.563684 + }, + { + "epoch": 1.7616, + "grad_norm": 0.627352193695327, + "learning_rate": 1.989483095157062e-06, + "loss": 0.34196174144744873, + "memory(GiB)": 77.0, + "step": 5505, + "token_acc": 0.9473565804274465, + "train_speed(iter/s)": 0.563543 + }, + { + "epoch": 1.76192, + "grad_norm": 0.6979677062832712, + "learning_rate": 1.988619834684499e-06, + "loss": 0.4568280279636383, + "memory(GiB)": 77.0, + "step": 5506, + "token_acc": 0.8654734411085451, + "train_speed(iter/s)": 0.563401 + }, + { + "epoch": 1.76224, + "grad_norm": 0.6357354975111483, + "learning_rate": 1.987756637844326e-06, + "loss": 0.38194504380226135, + "memory(GiB)": 77.0, + "step": 5507, + "token_acc": 0.8770656993147924, + "train_speed(iter/s)": 0.563258 + }, + { + "epoch": 1.7625600000000001, + "grad_norm": 0.6522858039004557, + "learning_rate": 1.9868935047439534e-06, + "loss": 0.3373927175998688, + "memory(GiB)": 77.0, + "step": 5508, + "token_acc": 0.9554140127388535, + "train_speed(iter/s)": 0.56312 + }, + { + "epoch": 1.76288, + "grad_norm": 0.7236964631374407, + "learning_rate": 1.9860304354907833e-06, + "loss": 0.3663256764411926, + "memory(GiB)": 77.0, + "step": 5509, + "token_acc": 0.930627654554035, + "train_speed(iter/s)": 0.56299 + }, + { + "epoch": 1.7631999999999999, + "grad_norm": 0.6593805907608233, + "learning_rate": 1.98516743019221e-06, + "loss": 0.3520326018333435, + "memory(GiB)": 77.0, + "step": 5510, + "token_acc": 0.9241110147441457, + "train_speed(iter/s)": 0.562856 + }, + { + "epoch": 1.76352, + "grad_norm": 0.6870239689103421, + "learning_rate": 1.9843044889556183e-06, + "loss": 0.40116316080093384, + "memory(GiB)": 77.0, + "step": 5511, + "token_acc": 0.9173913043478261, + "train_speed(iter/s)": 0.562727 + }, + { + "epoch": 1.76384, + "grad_norm": 0.6954597293969838, + "learning_rate": 1.9834416118883866e-06, + "loss": 0.3319357633590698, + "memory(GiB)": 77.0, + "step": 5512, + "token_acc": 0.8954205911010068, + "train_speed(iter/s)": 0.562586 + }, + { + "epoch": 1.76416, + "grad_norm": 0.6602440979878146, + "learning_rate": 1.982578799097886e-06, + "loss": 0.3302595019340515, + "memory(GiB)": 77.0, + "step": 5513, + "token_acc": 0.955237242614145, + "train_speed(iter/s)": 0.562456 + }, + { + "epoch": 1.76448, + "grad_norm": 0.6931683475499617, + "learning_rate": 1.9817160506914775e-06, + "loss": 0.2487914115190506, + "memory(GiB)": 77.0, + "step": 5514, + "token_acc": 0.9047969123322919, + "train_speed(iter/s)": 0.562328 + }, + { + "epoch": 1.7648000000000001, + "grad_norm": 0.6280101205288925, + "learning_rate": 1.9808533667765157e-06, + "loss": 0.38609176874160767, + "memory(GiB)": 77.0, + "step": 5515, + "token_acc": 0.9130531962449709, + "train_speed(iter/s)": 0.562197 + }, + { + "epoch": 1.76512, + "grad_norm": 0.6628884847762153, + "learning_rate": 1.9799907474603473e-06, + "loss": 0.3700448274612427, + "memory(GiB)": 77.0, + "step": 5516, + "token_acc": 0.8290350623007824, + "train_speed(iter/s)": 0.56207 + }, + { + "epoch": 1.76544, + "grad_norm": 0.6662934875434491, + "learning_rate": 1.979128192850309e-06, + "loss": 0.3122379183769226, + "memory(GiB)": 77.0, + "step": 5517, + "token_acc": 0.9315410199556541, + "train_speed(iter/s)": 0.561947 + }, + { + "epoch": 1.76576, + "grad_norm": 0.6000954167177518, + "learning_rate": 1.978265703053731e-06, + "loss": 0.32235047221183777, + "memory(GiB)": 77.0, + "step": 5518, + "token_acc": 0.9358441558441558, + "train_speed(iter/s)": 0.561817 + }, + { + "epoch": 1.76608, + "grad_norm": 0.6349754656841597, + "learning_rate": 1.9774032781779368e-06, + "loss": 0.36585935950279236, + "memory(GiB)": 77.0, + "step": 5519, + "token_acc": 0.8508745923510228, + "train_speed(iter/s)": 0.561687 + }, + { + "epoch": 1.7664, + "grad_norm": 0.6659735125249765, + "learning_rate": 1.9765409183302383e-06, + "loss": 0.3598938584327698, + "memory(GiB)": 77.0, + "step": 5520, + "token_acc": 0.8591210114388922, + "train_speed(iter/s)": 0.561556 + }, + { + "epoch": 1.7667199999999998, + "grad_norm": 0.6805627811636339, + "learning_rate": 1.975678623617941e-06, + "loss": 0.464578241109848, + "memory(GiB)": 77.0, + "step": 5521, + "token_acc": 0.84192037470726, + "train_speed(iter/s)": 0.561418 + }, + { + "epoch": 1.7670400000000002, + "grad_norm": 0.6879433626433104, + "learning_rate": 1.9748163941483444e-06, + "loss": 0.42927461862564087, + "memory(GiB)": 77.0, + "step": 5522, + "token_acc": 0.8746987951807229, + "train_speed(iter/s)": 0.561261 + }, + { + "epoch": 1.76736, + "grad_norm": 0.6895314385222642, + "learning_rate": 1.9739542300287363e-06, + "loss": 0.35542160272598267, + "memory(GiB)": 77.0, + "step": 5523, + "token_acc": 0.8561382947553472, + "train_speed(iter/s)": 0.561123 + }, + { + "epoch": 1.76768, + "grad_norm": 0.664485317696576, + "learning_rate": 1.973092131366399e-06, + "loss": 0.3901570439338684, + "memory(GiB)": 77.0, + "step": 5524, + "token_acc": 0.8465092402464066, + "train_speed(iter/s)": 0.56099 + }, + { + "epoch": 1.768, + "grad_norm": 0.6925482922739541, + "learning_rate": 1.9722300982686056e-06, + "loss": 0.2940865457057953, + "memory(GiB)": 77.0, + "step": 5525, + "token_acc": 0.9235156857661395, + "train_speed(iter/s)": 0.560802 + }, + { + "epoch": 1.7683200000000001, + "grad_norm": 0.5795409058063252, + "learning_rate": 1.971368130842621e-06, + "loss": 0.35688871145248413, + "memory(GiB)": 77.0, + "step": 5526, + "token_acc": 0.9358757062146893, + "train_speed(iter/s)": 0.560579 + }, + { + "epoch": 1.76864, + "grad_norm": 0.6431153519677524, + "learning_rate": 1.9705062291957015e-06, + "loss": 0.3942663073539734, + "memory(GiB)": 77.0, + "step": 5527, + "token_acc": 0.8538165620570966, + "train_speed(iter/s)": 0.560451 + }, + { + "epoch": 1.7689599999999999, + "grad_norm": 0.5956391261513221, + "learning_rate": 1.9696443934350973e-06, + "loss": 0.3154259920120239, + "memory(GiB)": 77.0, + "step": 5528, + "token_acc": 0.9183620911666237, + "train_speed(iter/s)": 0.560322 + }, + { + "epoch": 1.76928, + "grad_norm": 0.7492825715722738, + "learning_rate": 1.968782623668047e-06, + "loss": 0.44489824771881104, + "memory(GiB)": 77.0, + "step": 5529, + "token_acc": 0.8882121103547116, + "train_speed(iter/s)": 0.560201 + }, + { + "epoch": 1.7696, + "grad_norm": 0.6672492527559274, + "learning_rate": 1.9679209200017846e-06, + "loss": 0.2656043767929077, + "memory(GiB)": 77.0, + "step": 5530, + "token_acc": 0.9446714869537881, + "train_speed(iter/s)": 0.560069 + }, + { + "epoch": 1.76992, + "grad_norm": 0.623745653718874, + "learning_rate": 1.9670592825435335e-06, + "loss": 0.38505104184150696, + "memory(GiB)": 77.0, + "step": 5531, + "token_acc": 0.9385673098247949, + "train_speed(iter/s)": 0.559935 + }, + { + "epoch": 1.77024, + "grad_norm": 0.6065130725121819, + "learning_rate": 1.96619771140051e-06, + "loss": 0.3077125549316406, + "memory(GiB)": 77.0, + "step": 5532, + "token_acc": 0.9242288110212639, + "train_speed(iter/s)": 0.559806 + }, + { + "epoch": 1.7705600000000001, + "grad_norm": 0.7096635254823628, + "learning_rate": 1.9653362066799217e-06, + "loss": 0.33976876735687256, + "memory(GiB)": 77.0, + "step": 5533, + "token_acc": 0.9093009820912767, + "train_speed(iter/s)": 0.559682 + }, + { + "epoch": 1.77088, + "grad_norm": 0.670240168417915, + "learning_rate": 1.9644747684889667e-06, + "loss": 0.3080373704433441, + "memory(GiB)": 77.0, + "step": 5534, + "token_acc": 0.8989547038327527, + "train_speed(iter/s)": 0.559549 + }, + { + "epoch": 1.7711999999999999, + "grad_norm": 0.6678268637457345, + "learning_rate": 1.963613396934836e-06, + "loss": 0.32610541582107544, + "memory(GiB)": 77.0, + "step": 5535, + "token_acc": 0.9367088607594937, + "train_speed(iter/s)": 0.559408 + }, + { + "epoch": 1.77152, + "grad_norm": 0.6787289289005597, + "learning_rate": 1.9627520921247137e-06, + "loss": 0.3860577940940857, + "memory(GiB)": 77.0, + "step": 5536, + "token_acc": 0.8713905386033176, + "train_speed(iter/s)": 0.559281 + }, + { + "epoch": 1.77184, + "grad_norm": 0.5994743360736079, + "learning_rate": 1.961890854165774e-06, + "loss": 0.27974772453308105, + "memory(GiB)": 77.0, + "step": 5537, + "token_acc": 0.9424766222725984, + "train_speed(iter/s)": 0.559155 + }, + { + "epoch": 1.77216, + "grad_norm": 0.6658333167362336, + "learning_rate": 1.961029683165182e-06, + "loss": 0.3595031201839447, + "memory(GiB)": 77.0, + "step": 5538, + "token_acc": 0.8821041015265771, + "train_speed(iter/s)": 0.559019 + }, + { + "epoch": 1.77248, + "grad_norm": 0.7028618055972061, + "learning_rate": 1.960168579230097e-06, + "loss": 0.36082738637924194, + "memory(GiB)": 77.0, + "step": 5539, + "token_acc": 0.8879702356345598, + "train_speed(iter/s)": 0.5589 + }, + { + "epoch": 1.7728000000000002, + "grad_norm": 0.6594200904369322, + "learning_rate": 1.9593075424676673e-06, + "loss": 0.2861073613166809, + "memory(GiB)": 77.0, + "step": 5540, + "token_acc": 0.8864958001292268, + "train_speed(iter/s)": 0.558774 + }, + { + "epoch": 1.77312, + "grad_norm": 0.6626979680379743, + "learning_rate": 1.9584465729850342e-06, + "loss": 0.27652546763420105, + "memory(GiB)": 77.0, + "step": 5541, + "token_acc": 0.8936752136752136, + "train_speed(iter/s)": 0.558654 + }, + { + "epoch": 1.77344, + "grad_norm": 0.6065369113627487, + "learning_rate": 1.957585670889331e-06, + "loss": 0.24283450841903687, + "memory(GiB)": 77.0, + "step": 5542, + "token_acc": 0.9431326903890921, + "train_speed(iter/s)": 0.558526 + }, + { + "epoch": 1.77376, + "grad_norm": 0.7028356503858826, + "learning_rate": 1.956724836287681e-06, + "loss": 0.359344482421875, + "memory(GiB)": 77.0, + "step": 5543, + "token_acc": 0.963821892393321, + "train_speed(iter/s)": 0.5584 + }, + { + "epoch": 1.77408, + "grad_norm": 0.6383424515141144, + "learning_rate": 1.9558640692872007e-06, + "loss": 0.35357850790023804, + "memory(GiB)": 77.0, + "step": 5544, + "token_acc": 0.8483842408145197, + "train_speed(iter/s)": 0.558261 + }, + { + "epoch": 1.7744, + "grad_norm": 0.6730076336826031, + "learning_rate": 1.9550033699949975e-06, + "loss": 0.3671906292438507, + "memory(GiB)": 77.0, + "step": 5545, + "token_acc": 0.8455473098330241, + "train_speed(iter/s)": 0.558139 + }, + { + "epoch": 1.7747199999999999, + "grad_norm": 0.6333889607789491, + "learning_rate": 1.954142738518171e-06, + "loss": 0.2758379578590393, + "memory(GiB)": 77.0, + "step": 5546, + "token_acc": 0.9561643835616438, + "train_speed(iter/s)": 0.557981 + }, + { + "epoch": 1.77504, + "grad_norm": 0.675184741487565, + "learning_rate": 1.9532821749638125e-06, + "loss": 0.3381524682044983, + "memory(GiB)": 77.0, + "step": 5547, + "token_acc": 0.960352422907489, + "train_speed(iter/s)": 0.557854 + }, + { + "epoch": 1.77536, + "grad_norm": 0.6429631104734759, + "learning_rate": 1.952421679439002e-06, + "loss": 0.29887688159942627, + "memory(GiB)": 77.0, + "step": 5548, + "token_acc": 0.8868103994927077, + "train_speed(iter/s)": 0.557727 + }, + { + "epoch": 1.77568, + "grad_norm": 0.6949956205542388, + "learning_rate": 1.951561252050815e-06, + "loss": 0.3560760021209717, + "memory(GiB)": 77.0, + "step": 5549, + "token_acc": 0.8519611978068325, + "train_speed(iter/s)": 0.557604 + }, + { + "epoch": 1.776, + "grad_norm": 0.6606145881689128, + "learning_rate": 1.9507008929063156e-06, + "loss": 0.34558776021003723, + "memory(GiB)": 77.0, + "step": 5550, + "token_acc": 0.9154057771664375, + "train_speed(iter/s)": 0.557469 + }, + { + "epoch": 1.7763200000000001, + "grad_norm": 0.6392293855427733, + "learning_rate": 1.9498406021125614e-06, + "loss": 0.33673161268234253, + "memory(GiB)": 77.0, + "step": 5551, + "token_acc": 0.9404388714733543, + "train_speed(iter/s)": 0.55734 + }, + { + "epoch": 1.77664, + "grad_norm": 0.5921543907209165, + "learning_rate": 1.9489803797766e-06, + "loss": 0.2695709466934204, + "memory(GiB)": 77.0, + "step": 5552, + "token_acc": 0.9619422572178478, + "train_speed(iter/s)": 0.557212 + }, + { + "epoch": 1.7769599999999999, + "grad_norm": 0.6480588597953775, + "learning_rate": 1.9481202260054718e-06, + "loss": 0.3570890426635742, + "memory(GiB)": 77.0, + "step": 5553, + "token_acc": 0.9493891797556719, + "train_speed(iter/s)": 0.557086 + }, + { + "epoch": 1.77728, + "grad_norm": 0.6114997518376807, + "learning_rate": 1.947260140906208e-06, + "loss": 0.3858465850353241, + "memory(GiB)": 77.0, + "step": 5554, + "token_acc": 0.8840548649807115, + "train_speed(iter/s)": 0.556953 + }, + { + "epoch": 1.7776, + "grad_norm": 0.5950330027543609, + "learning_rate": 1.946400124585831e-06, + "loss": 0.3296034634113312, + "memory(GiB)": 77.0, + "step": 5555, + "token_acc": 0.9119960668633235, + "train_speed(iter/s)": 0.556819 + }, + { + "epoch": 1.77792, + "grad_norm": 0.6200045097370578, + "learning_rate": 1.945540177151355e-06, + "loss": 0.36049002408981323, + "memory(GiB)": 77.0, + "step": 5556, + "token_acc": 0.8818998716302953, + "train_speed(iter/s)": 0.556688 + }, + { + "epoch": 1.77824, + "grad_norm": 0.624391744033914, + "learning_rate": 1.9446802987097858e-06, + "loss": 0.31724321842193604, + "memory(GiB)": 77.0, + "step": 5557, + "token_acc": 0.9356270810210877, + "train_speed(iter/s)": 0.556556 + }, + { + "epoch": 1.7785600000000001, + "grad_norm": 0.7177109858981293, + "learning_rate": 1.9438204893681197e-06, + "loss": 0.23756976425647736, + "memory(GiB)": 77.0, + "step": 5558, + "token_acc": 0.9021257750221435, + "train_speed(iter/s)": 0.556441 + }, + { + "epoch": 1.77888, + "grad_norm": 0.6370725087236354, + "learning_rate": 1.9429607492333454e-06, + "loss": 0.3021664023399353, + "memory(GiB)": 77.0, + "step": 5559, + "token_acc": 0.8836199967850827, + "train_speed(iter/s)": 0.556305 + }, + { + "epoch": 1.7792, + "grad_norm": 0.7288456030054276, + "learning_rate": 1.9421010784124427e-06, + "loss": 0.2973686754703522, + "memory(GiB)": 77.0, + "step": 5560, + "token_acc": 0.9179296539891335, + "train_speed(iter/s)": 0.556192 + }, + { + "epoch": 1.77952, + "grad_norm": 0.7207293822193538, + "learning_rate": 1.941241477012383e-06, + "loss": 0.29890310764312744, + "memory(GiB)": 77.0, + "step": 5561, + "token_acc": 0.9248251748251748, + "train_speed(iter/s)": 0.556074 + }, + { + "epoch": 1.77984, + "grad_norm": 0.6084821708935111, + "learning_rate": 1.9403819451401286e-06, + "loss": 0.3245670199394226, + "memory(GiB)": 77.0, + "step": 5562, + "token_acc": 0.9425087108013938, + "train_speed(iter/s)": 0.555932 + }, + { + "epoch": 1.78016, + "grad_norm": 0.585291486449604, + "learning_rate": 1.939522482902633e-06, + "loss": 0.3011864721775055, + "memory(GiB)": 77.0, + "step": 5563, + "token_acc": 0.9422203879488238, + "train_speed(iter/s)": 0.555799 + }, + { + "epoch": 1.7804799999999998, + "grad_norm": 0.6534468588817212, + "learning_rate": 1.9386630904068413e-06, + "loss": 0.30841079354286194, + "memory(GiB)": 77.0, + "step": 5564, + "token_acc": 0.9541619797525309, + "train_speed(iter/s)": 0.555671 + }, + { + "epoch": 1.7808000000000002, + "grad_norm": 0.6839741396408291, + "learning_rate": 1.9378037677596904e-06, + "loss": 0.4170231521129608, + "memory(GiB)": 77.0, + "step": 5565, + "token_acc": 0.827906976744186, + "train_speed(iter/s)": 0.555539 + }, + { + "epoch": 1.78112, + "grad_norm": 0.6209232903017231, + "learning_rate": 1.9369445150681076e-06, + "loss": 0.3356586694717407, + "memory(GiB)": 77.0, + "step": 5566, + "token_acc": 0.9288685524126455, + "train_speed(iter/s)": 0.555399 + }, + { + "epoch": 1.78144, + "grad_norm": 0.6685816424755867, + "learning_rate": 1.9360853324390127e-06, + "loss": 0.30064457654953003, + "memory(GiB)": 77.0, + "step": 5567, + "token_acc": 0.9251844046364595, + "train_speed(iter/s)": 0.555285 + }, + { + "epoch": 1.78176, + "grad_norm": 0.6921498340422942, + "learning_rate": 1.9352262199793154e-06, + "loss": 0.42107027769088745, + "memory(GiB)": 77.0, + "step": 5568, + "token_acc": 0.8491010273972602, + "train_speed(iter/s)": 0.555159 + }, + { + "epoch": 1.78208, + "grad_norm": 0.6412389440693305, + "learning_rate": 1.934367177795918e-06, + "loss": 0.27561914920806885, + "memory(GiB)": 77.0, + "step": 5569, + "token_acc": 0.9489997182304875, + "train_speed(iter/s)": 0.555004 + }, + { + "epoch": 1.7824, + "grad_norm": 0.6414234168139118, + "learning_rate": 1.933508205995713e-06, + "loss": 0.2591477036476135, + "memory(GiB)": 77.0, + "step": 5570, + "token_acc": 0.952006294256491, + "train_speed(iter/s)": 0.554883 + }, + { + "epoch": 1.7827199999999999, + "grad_norm": 0.6732533306111961, + "learning_rate": 1.9326493046855843e-06, + "loss": 0.41768866777420044, + "memory(GiB)": 77.0, + "step": 5571, + "token_acc": 0.7817896389324961, + "train_speed(iter/s)": 0.554747 + }, + { + "epoch": 1.78304, + "grad_norm": 0.6067310900669443, + "learning_rate": 1.931790473972408e-06, + "loss": 0.3222310245037079, + "memory(GiB)": 77.0, + "step": 5572, + "token_acc": 0.8768038482095136, + "train_speed(iter/s)": 0.554623 + }, + { + "epoch": 1.78336, + "grad_norm": 0.6006104836180766, + "learning_rate": 1.93093171396305e-06, + "loss": 0.2996223568916321, + "memory(GiB)": 77.0, + "step": 5573, + "token_acc": 0.8879047882646001, + "train_speed(iter/s)": 0.554499 + }, + { + "epoch": 1.78368, + "grad_norm": 0.678641208715813, + "learning_rate": 1.930073024764369e-06, + "loss": 0.3721780776977539, + "memory(GiB)": 77.0, + "step": 5574, + "token_acc": 0.9129746835443038, + "train_speed(iter/s)": 0.554338 + }, + { + "epoch": 1.784, + "grad_norm": 0.7222277473766564, + "learning_rate": 1.9292144064832124e-06, + "loss": 0.38780733942985535, + "memory(GiB)": 77.0, + "step": 5575, + "token_acc": 0.8784604996623903, + "train_speed(iter/s)": 0.55421 + }, + { + "epoch": 1.7843200000000001, + "grad_norm": 0.6636224995348189, + "learning_rate": 1.928355859226422e-06, + "loss": 0.36413055658340454, + "memory(GiB)": 77.0, + "step": 5576, + "token_acc": 0.9278818852924475, + "train_speed(iter/s)": 0.554075 + }, + { + "epoch": 1.78464, + "grad_norm": 0.6658027860239875, + "learning_rate": 1.927497383100828e-06, + "loss": 0.3559049069881439, + "memory(GiB)": 77.0, + "step": 5577, + "token_acc": 0.8680851063829788, + "train_speed(iter/s)": 0.553955 + }, + { + "epoch": 1.7849599999999999, + "grad_norm": 0.636593618335753, + "learning_rate": 1.9266389782132537e-06, + "loss": 0.31007474660873413, + "memory(GiB)": 77.0, + "step": 5578, + "token_acc": 0.8147292789325122, + "train_speed(iter/s)": 0.553836 + }, + { + "epoch": 1.78528, + "grad_norm": 0.6115232740318759, + "learning_rate": 1.9257806446705116e-06, + "loss": 0.2578410506248474, + "memory(GiB)": 77.0, + "step": 5579, + "token_acc": 0.9697433933358867, + "train_speed(iter/s)": 0.5537 + }, + { + "epoch": 1.7856, + "grad_norm": 0.6041038592807054, + "learning_rate": 1.924922382579407e-06, + "loss": 0.3964940309524536, + "memory(GiB)": 77.0, + "step": 5580, + "token_acc": 0.8535200605601817, + "train_speed(iter/s)": 0.553576 + }, + { + "epoch": 1.78592, + "grad_norm": 0.6265513151187603, + "learning_rate": 1.9240641920467364e-06, + "loss": 0.34898194670677185, + "memory(GiB)": 77.0, + "step": 5581, + "token_acc": 0.8450087565674256, + "train_speed(iter/s)": 0.553458 + }, + { + "epoch": 1.78624, + "grad_norm": 0.6091148213160172, + "learning_rate": 1.9232060731792855e-06, + "loss": 0.35345038771629333, + "memory(GiB)": 77.0, + "step": 5582, + "token_acc": 0.8751759737212577, + "train_speed(iter/s)": 0.553325 + }, + { + "epoch": 1.7865600000000001, + "grad_norm": 0.6460075016581753, + "learning_rate": 1.9223480260838335e-06, + "loss": 0.3739475607872009, + "memory(GiB)": 77.0, + "step": 5583, + "token_acc": 0.9383561643835616, + "train_speed(iter/s)": 0.553193 + }, + { + "epoch": 1.78688, + "grad_norm": 0.5992972294258213, + "learning_rate": 1.921490050867148e-06, + "loss": 0.29689928889274597, + "memory(GiB)": 77.0, + "step": 5584, + "token_acc": 0.8848031239830785, + "train_speed(iter/s)": 0.553023 + }, + { + "epoch": 1.7872, + "grad_norm": 0.6845644890957086, + "learning_rate": 1.9206321476359906e-06, + "loss": 0.3297058343887329, + "memory(GiB)": 77.0, + "step": 5585, + "token_acc": 0.8740216175922475, + "train_speed(iter/s)": 0.552897 + }, + { + "epoch": 1.78752, + "grad_norm": 0.6985200783392237, + "learning_rate": 1.9197743164971116e-06, + "loss": 0.37946316599845886, + "memory(GiB)": 77.0, + "step": 5586, + "token_acc": 0.9278445883441259, + "train_speed(iter/s)": 0.552776 + }, + { + "epoch": 1.78784, + "grad_norm": 0.6725700398733078, + "learning_rate": 1.918916557557254e-06, + "loss": 0.3518933653831482, + "memory(GiB)": 77.0, + "step": 5587, + "token_acc": 0.9058725531028738, + "train_speed(iter/s)": 0.552661 + }, + { + "epoch": 1.78816, + "grad_norm": 0.7008633446093907, + "learning_rate": 1.9180588709231503e-06, + "loss": 0.3567865192890167, + "memory(GiB)": 77.0, + "step": 5588, + "token_acc": 0.8908296943231441, + "train_speed(iter/s)": 0.552531 + }, + { + "epoch": 1.7884799999999998, + "grad_norm": 0.6904723835751028, + "learning_rate": 1.9172012567015245e-06, + "loss": 0.3205740749835968, + "memory(GiB)": 77.0, + "step": 5589, + "token_acc": 0.9059139784946236, + "train_speed(iter/s)": 0.552415 + }, + { + "epoch": 1.7888, + "grad_norm": 0.6419386922184567, + "learning_rate": 1.916343714999094e-06, + "loss": 0.31251078844070435, + "memory(GiB)": 77.0, + "step": 5590, + "token_acc": 0.9442396313364055, + "train_speed(iter/s)": 0.552289 + }, + { + "epoch": 1.78912, + "grad_norm": 0.6154693330400728, + "learning_rate": 1.9154862459225617e-06, + "loss": 0.34095263481140137, + "memory(GiB)": 77.0, + "step": 5591, + "token_acc": 0.8785491303701501, + "train_speed(iter/s)": 0.552162 + }, + { + "epoch": 1.78944, + "grad_norm": 0.6805433223426816, + "learning_rate": 1.914628849578626e-06, + "loss": 0.29619210958480835, + "memory(GiB)": 77.0, + "step": 5592, + "token_acc": 0.9379956741167988, + "train_speed(iter/s)": 0.552046 + }, + { + "epoch": 1.78976, + "grad_norm": 0.7406936025191269, + "learning_rate": 1.913771526073976e-06, + "loss": 0.25195300579071045, + "memory(GiB)": 77.0, + "step": 5593, + "token_acc": 0.8966998968717772, + "train_speed(iter/s)": 0.55192 + }, + { + "epoch": 1.7900800000000001, + "grad_norm": 0.6376594600025949, + "learning_rate": 1.91291427551529e-06, + "loss": 0.3269922733306885, + "memory(GiB)": 77.0, + "step": 5594, + "token_acc": 0.9305555555555556, + "train_speed(iter/s)": 0.5518 + }, + { + "epoch": 1.7904, + "grad_norm": 0.6380526807301373, + "learning_rate": 1.912057098009238e-06, + "loss": 0.3591967225074768, + "memory(GiB)": 77.0, + "step": 5595, + "token_acc": 0.8901076579321552, + "train_speed(iter/s)": 0.551663 + }, + { + "epoch": 1.7907199999999999, + "grad_norm": 0.6866220837427028, + "learning_rate": 1.9111999936624804e-06, + "loss": 0.4168568551540375, + "memory(GiB)": 77.0, + "step": 5596, + "token_acc": 0.8462776659959759, + "train_speed(iter/s)": 0.551521 + }, + { + "epoch": 1.79104, + "grad_norm": 0.701921199104345, + "learning_rate": 1.91034296258167e-06, + "loss": 0.3583664298057556, + "memory(GiB)": 77.0, + "step": 5597, + "token_acc": 0.8831635710005992, + "train_speed(iter/s)": 0.551405 + }, + { + "epoch": 1.79136, + "grad_norm": 0.6848135954149346, + "learning_rate": 1.9094860048734482e-06, + "loss": 0.35170936584472656, + "memory(GiB)": 77.0, + "step": 5598, + "token_acc": 0.8972972972972973, + "train_speed(iter/s)": 0.551287 + }, + { + "epoch": 1.79168, + "grad_norm": 0.6355418249787327, + "learning_rate": 1.9086291206444498e-06, + "loss": 0.28423434495925903, + "memory(GiB)": 77.0, + "step": 5599, + "token_acc": 0.9418685121107266, + "train_speed(iter/s)": 0.551149 + }, + { + "epoch": 1.792, + "grad_norm": 0.6113596957780699, + "learning_rate": 1.9077723100012984e-06, + "loss": 0.30734705924987793, + "memory(GiB)": 77.0, + "step": 5600, + "token_acc": 0.9352059925093633, + "train_speed(iter/s)": 0.551028 + }, + { + "epoch": 1.7923200000000001, + "grad_norm": 0.6492268196127337, + "learning_rate": 1.9069155730506095e-06, + "loss": 0.3600848317146301, + "memory(GiB)": 77.0, + "step": 5601, + "token_acc": 0.9173290937996821, + "train_speed(iter/s)": 0.5509 + }, + { + "epoch": 1.79264, + "grad_norm": 0.6506144722088133, + "learning_rate": 1.9060589098989884e-06, + "loss": 0.3741301894187927, + "memory(GiB)": 77.0, + "step": 5602, + "token_acc": 0.8499584372402328, + "train_speed(iter/s)": 0.550779 + }, + { + "epoch": 1.7929599999999999, + "grad_norm": 0.6353389812854787, + "learning_rate": 1.9052023206530335e-06, + "loss": 0.2956951856613159, + "memory(GiB)": 77.0, + "step": 5603, + "token_acc": 0.897165991902834, + "train_speed(iter/s)": 0.550642 + }, + { + "epoch": 1.79328, + "grad_norm": 0.7107296377599298, + "learning_rate": 1.9043458054193312e-06, + "loss": 0.3628067970275879, + "memory(GiB)": 77.0, + "step": 5604, + "token_acc": 0.9067371202113607, + "train_speed(iter/s)": 0.550528 + }, + { + "epoch": 1.7936, + "grad_norm": 0.6487088851789791, + "learning_rate": 1.9034893643044616e-06, + "loss": 0.2500455975532532, + "memory(GiB)": 77.0, + "step": 5605, + "token_acc": 0.9198382492863939, + "train_speed(iter/s)": 0.550402 + }, + { + "epoch": 1.79392, + "grad_norm": 0.6245843529740202, + "learning_rate": 1.9026329974149915e-06, + "loss": 0.3207409083843231, + "memory(GiB)": 77.0, + "step": 5606, + "token_acc": 0.8855475763016158, + "train_speed(iter/s)": 0.550273 + }, + { + "epoch": 1.7942399999999998, + "grad_norm": 0.7108749662540168, + "learning_rate": 1.9017767048574823e-06, + "loss": 0.4463685154914856, + "memory(GiB)": 77.0, + "step": 5607, + "token_acc": 0.8655204898728215, + "train_speed(iter/s)": 0.55014 + }, + { + "epoch": 1.7945600000000002, + "grad_norm": 0.7336444033210573, + "learning_rate": 1.900920486738484e-06, + "loss": 0.31819450855255127, + "memory(GiB)": 77.0, + "step": 5608, + "token_acc": 0.9718172983479106, + "train_speed(iter/s)": 0.550029 + }, + { + "epoch": 1.79488, + "grad_norm": 0.7281090275714197, + "learning_rate": 1.900064343164539e-06, + "loss": 0.3448181748390198, + "memory(GiB)": 77.0, + "step": 5609, + "token_acc": 0.9232954545454546, + "train_speed(iter/s)": 0.54991 + }, + { + "epoch": 1.7952, + "grad_norm": 0.6813157550549613, + "learning_rate": 1.8992082742421791e-06, + "loss": 0.3559873700141907, + "memory(GiB)": 77.0, + "step": 5610, + "token_acc": 0.8659054699946893, + "train_speed(iter/s)": 0.549792 + }, + { + "epoch": 1.79552, + "grad_norm": 0.7251271046370739, + "learning_rate": 1.8983522800779275e-06, + "loss": 0.3181569278240204, + "memory(GiB)": 77.0, + "step": 5611, + "token_acc": 0.8696032672112018, + "train_speed(iter/s)": 0.54968 + }, + { + "epoch": 1.79584, + "grad_norm": 0.6220639209418124, + "learning_rate": 1.8974963607782977e-06, + "loss": 0.2835833430290222, + "memory(GiB)": 77.0, + "step": 5612, + "token_acc": 0.899365367180417, + "train_speed(iter/s)": 0.549553 + }, + { + "epoch": 1.79616, + "grad_norm": 0.6890711876915077, + "learning_rate": 1.8966405164497935e-06, + "loss": 0.24979646503925323, + "memory(GiB)": 77.0, + "step": 5613, + "token_acc": 0.9516666666666667, + "train_speed(iter/s)": 0.549444 + }, + { + "epoch": 1.7964799999999999, + "grad_norm": 0.6202444260458978, + "learning_rate": 1.8957847471989108e-06, + "loss": 0.38931000232696533, + "memory(GiB)": 77.0, + "step": 5614, + "token_acc": 0.9320539419087137, + "train_speed(iter/s)": 0.549297 + }, + { + "epoch": 1.7968, + "grad_norm": 0.6741442909419081, + "learning_rate": 1.8949290531321347e-06, + "loss": 0.3814886808395386, + "memory(GiB)": 77.0, + "step": 5615, + "token_acc": 0.8597410604192355, + "train_speed(iter/s)": 0.549176 + }, + { + "epoch": 1.79712, + "grad_norm": 0.6308793984542473, + "learning_rate": 1.8940734343559414e-06, + "loss": 0.30952972173690796, + "memory(GiB)": 77.0, + "step": 5616, + "token_acc": 0.925161611138737, + "train_speed(iter/s)": 0.54906 + }, + { + "epoch": 1.79744, + "grad_norm": 0.5628424564619434, + "learning_rate": 1.8932178909767978e-06, + "loss": 0.2783905863761902, + "memory(GiB)": 77.0, + "step": 5617, + "token_acc": 0.9007364713416587, + "train_speed(iter/s)": 0.548922 + }, + { + "epoch": 1.79776, + "grad_norm": 0.6577265505119223, + "learning_rate": 1.8923624231011616e-06, + "loss": 0.3557913303375244, + "memory(GiB)": 77.0, + "step": 5618, + "token_acc": 0.9032946195780991, + "train_speed(iter/s)": 0.548805 + }, + { + "epoch": 1.7980800000000001, + "grad_norm": 1.057751090215547, + "learning_rate": 1.8915070308354816e-06, + "loss": 0.37845277786254883, + "memory(GiB)": 77.0, + "step": 5619, + "token_acc": 0.8538139450209358, + "train_speed(iter/s)": 0.548658 + }, + { + "epoch": 1.7984, + "grad_norm": 0.6762315731764765, + "learning_rate": 1.8906517142861952e-06, + "loss": 0.3795078992843628, + "memory(GiB)": 77.0, + "step": 5620, + "token_acc": 0.885449904275686, + "train_speed(iter/s)": 0.548546 + }, + { + "epoch": 1.7987199999999999, + "grad_norm": 0.6900615881532729, + "learning_rate": 1.8897964735597324e-06, + "loss": 0.2756153345108032, + "memory(GiB)": 77.0, + "step": 5621, + "token_acc": 0.8899312070043778, + "train_speed(iter/s)": 0.548435 + }, + { + "epoch": 1.79904, + "grad_norm": 0.6387724982599973, + "learning_rate": 1.888941308762513e-06, + "loss": 0.38313400745391846, + "memory(GiB)": 77.0, + "step": 5622, + "token_acc": 0.904232578024797, + "train_speed(iter/s)": 0.548309 + }, + { + "epoch": 1.79936, + "grad_norm": 0.660889476358805, + "learning_rate": 1.8880862200009474e-06, + "loss": 0.31155461072921753, + "memory(GiB)": 77.0, + "step": 5623, + "token_acc": 0.8767570281124498, + "train_speed(iter/s)": 0.548187 + }, + { + "epoch": 1.79968, + "grad_norm": 0.728375489392616, + "learning_rate": 1.8872312073814364e-06, + "loss": 0.3735949993133545, + "memory(GiB)": 77.0, + "step": 5624, + "token_acc": 0.8348382242287434, + "train_speed(iter/s)": 0.548051 + }, + { + "epoch": 1.8, + "grad_norm": 0.6776683659140951, + "learning_rate": 1.8863762710103718e-06, + "loss": 0.33511143922805786, + "memory(GiB)": 77.0, + "step": 5625, + "token_acc": 0.869901547116737, + "train_speed(iter/s)": 0.547932 + }, + { + "epoch": 1.8003200000000001, + "grad_norm": 0.6504154144975156, + "learning_rate": 1.8855214109941352e-06, + "loss": 0.37248390913009644, + "memory(GiB)": 77.0, + "step": 5626, + "token_acc": 0.8775510204081632, + "train_speed(iter/s)": 0.547818 + }, + { + "epoch": 1.80064, + "grad_norm": 0.6530986579267676, + "learning_rate": 1.8846666274391e-06, + "loss": 0.35231253504753113, + "memory(GiB)": 77.0, + "step": 5627, + "token_acc": 0.934570619691727, + "train_speed(iter/s)": 0.547699 + }, + { + "epoch": 1.80096, + "grad_norm": 0.6024732222184407, + "learning_rate": 1.8838119204516283e-06, + "loss": 0.3008323013782501, + "memory(GiB)": 77.0, + "step": 5628, + "token_acc": 0.8991266375545851, + "train_speed(iter/s)": 0.547582 + }, + { + "epoch": 1.80128, + "grad_norm": 0.6858453152551937, + "learning_rate": 1.8829572901380739e-06, + "loss": 0.4334579110145569, + "memory(GiB)": 77.0, + "step": 5629, + "token_acc": 0.8964511424404472, + "train_speed(iter/s)": 0.547461 + }, + { + "epoch": 1.8016, + "grad_norm": 0.7373488193240681, + "learning_rate": 1.8821027366047806e-06, + "loss": 0.3861675262451172, + "memory(GiB)": 77.0, + "step": 5630, + "token_acc": 0.8975983989326217, + "train_speed(iter/s)": 0.547335 + }, + { + "epoch": 1.80192, + "grad_norm": 0.6393216121075381, + "learning_rate": 1.8812482599580831e-06, + "loss": 0.4029759466648102, + "memory(GiB)": 77.0, + "step": 5631, + "token_acc": 0.8664512654819602, + "train_speed(iter/s)": 0.547219 + }, + { + "epoch": 1.8022399999999998, + "grad_norm": 0.6174482785041201, + "learning_rate": 1.8803938603043057e-06, + "loss": 0.3351455330848694, + "memory(GiB)": 77.0, + "step": 5632, + "token_acc": 0.8914238592633315, + "train_speed(iter/s)": 0.547092 + }, + { + "epoch": 1.8025600000000002, + "grad_norm": 0.6161088180515593, + "learning_rate": 1.8795395377497638e-06, + "loss": 0.3459494709968567, + "memory(GiB)": 77.0, + "step": 5633, + "token_acc": 0.968521897810219, + "train_speed(iter/s)": 0.546975 + }, + { + "epoch": 1.80288, + "grad_norm": 0.6821887660168183, + "learning_rate": 1.8786852924007634e-06, + "loss": 0.24994246661663055, + "memory(GiB)": 77.0, + "step": 5634, + "token_acc": 0.931045859452326, + "train_speed(iter/s)": 0.546858 + }, + { + "epoch": 1.8032, + "grad_norm": 0.7348699137078472, + "learning_rate": 1.8778311243636e-06, + "loss": 0.36565259099006653, + "memory(GiB)": 77.0, + "step": 5635, + "token_acc": 0.8727944193680756, + "train_speed(iter/s)": 0.546733 + }, + { + "epoch": 1.80352, + "grad_norm": 0.6799847860288071, + "learning_rate": 1.8769770337445597e-06, + "loss": 0.3120626211166382, + "memory(GiB)": 77.0, + "step": 5636, + "token_acc": 0.8958293292331347, + "train_speed(iter/s)": 0.54662 + }, + { + "epoch": 1.80384, + "grad_norm": 0.6557239047243025, + "learning_rate": 1.8761230206499201e-06, + "loss": 0.340751588344574, + "memory(GiB)": 77.0, + "step": 5637, + "token_acc": 0.9231989844493812, + "train_speed(iter/s)": 0.546501 + }, + { + "epoch": 1.80416, + "grad_norm": 0.6392580001651176, + "learning_rate": 1.875269085185948e-06, + "loss": 0.263685405254364, + "memory(GiB)": 77.0, + "step": 5638, + "token_acc": 0.9198966408268734, + "train_speed(iter/s)": 0.54639 + }, + { + "epoch": 1.8044799999999999, + "grad_norm": 0.6697419082495114, + "learning_rate": 1.8744152274589005e-06, + "loss": 0.4070330262184143, + "memory(GiB)": 77.0, + "step": 5639, + "token_acc": 0.8121428571428572, + "train_speed(iter/s)": 0.54626 + }, + { + "epoch": 1.8048, + "grad_norm": 0.5854005065162262, + "learning_rate": 1.8735614475750259e-06, + "loss": 0.2728552222251892, + "memory(GiB)": 77.0, + "step": 5640, + "token_acc": 0.9395320571254938, + "train_speed(iter/s)": 0.546122 + }, + { + "epoch": 1.80512, + "grad_norm": 1.0634797957245932, + "learning_rate": 1.8727077456405618e-06, + "loss": 0.3842761218547821, + "memory(GiB)": 77.0, + "step": 5641, + "token_acc": 0.9066967644845748, + "train_speed(iter/s)": 0.546006 + }, + { + "epoch": 1.80544, + "grad_norm": 0.6900853569627994, + "learning_rate": 1.8718541217617365e-06, + "loss": 0.34002459049224854, + "memory(GiB)": 77.0, + "step": 5642, + "token_acc": 0.8998664886515354, + "train_speed(iter/s)": 0.545888 + }, + { + "epoch": 1.80576, + "grad_norm": 0.5786778100584345, + "learning_rate": 1.8710005760447696e-06, + "loss": 0.28281813859939575, + "memory(GiB)": 77.0, + "step": 5643, + "token_acc": 0.9400630914826499, + "train_speed(iter/s)": 0.545771 + }, + { + "epoch": 1.8060800000000001, + "grad_norm": 0.6101494269726112, + "learning_rate": 1.8701471085958694e-06, + "loss": 0.30813026428222656, + "memory(GiB)": 77.0, + "step": 5644, + "token_acc": 0.9049317943336831, + "train_speed(iter/s)": 0.545655 + }, + { + "epoch": 1.8064, + "grad_norm": 0.6412107418386014, + "learning_rate": 1.869293719521235e-06, + "loss": 0.32633262872695923, + "memory(GiB)": 77.0, + "step": 5645, + "token_acc": 0.9090909090909091, + "train_speed(iter/s)": 0.545546 + }, + { + "epoch": 1.8067199999999999, + "grad_norm": 0.6103530776679594, + "learning_rate": 1.8684404089270566e-06, + "loss": 0.3251298666000366, + "memory(GiB)": 77.0, + "step": 5646, + "token_acc": 0.935467217346412, + "train_speed(iter/s)": 0.545422 + }, + { + "epoch": 1.80704, + "grad_norm": 0.706019446871552, + "learning_rate": 1.8675871769195131e-06, + "loss": 0.3380044400691986, + "memory(GiB)": 77.0, + "step": 5647, + "token_acc": 0.9627848101265822, + "train_speed(iter/s)": 0.545309 + }, + { + "epoch": 1.80736, + "grad_norm": 0.7709298688718994, + "learning_rate": 1.8667340236047753e-06, + "loss": 0.37786251306533813, + "memory(GiB)": 77.0, + "step": 5648, + "token_acc": 0.9276640790402259, + "train_speed(iter/s)": 0.545192 + }, + { + "epoch": 1.80768, + "grad_norm": 0.6042968224104576, + "learning_rate": 1.8658809490890022e-06, + "loss": 0.35637742280960083, + "memory(GiB)": 77.0, + "step": 5649, + "token_acc": 0.8590185105467069, + "train_speed(iter/s)": 0.54506 + }, + { + "epoch": 1.808, + "grad_norm": 0.6216347097800989, + "learning_rate": 1.8650279534783447e-06, + "loss": 0.3689466714859009, + "memory(GiB)": 77.0, + "step": 5650, + "token_acc": 0.8575980392156862, + "train_speed(iter/s)": 0.544942 + }, + { + "epoch": 1.8083200000000001, + "grad_norm": 0.6617518916464984, + "learning_rate": 1.8641750368789432e-06, + "loss": 0.3244919776916504, + "memory(GiB)": 77.0, + "step": 5651, + "token_acc": 0.8774889380530974, + "train_speed(iter/s)": 0.544827 + }, + { + "epoch": 1.80864, + "grad_norm": 0.6293463879410844, + "learning_rate": 1.8633221993969285e-06, + "loss": 0.3518295884132385, + "memory(GiB)": 77.0, + "step": 5652, + "token_acc": 0.8936035465484484, + "train_speed(iter/s)": 0.544702 + }, + { + "epoch": 1.80896, + "grad_norm": 0.6307800273542666, + "learning_rate": 1.8624694411384214e-06, + "loss": 0.3971695899963379, + "memory(GiB)": 77.0, + "step": 5653, + "token_acc": 0.9484392419175028, + "train_speed(iter/s)": 0.544573 + }, + { + "epoch": 1.80928, + "grad_norm": 0.6263306953832537, + "learning_rate": 1.8616167622095328e-06, + "loss": 0.3546515703201294, + "memory(GiB)": 77.0, + "step": 5654, + "token_acc": 0.9199424736337488, + "train_speed(iter/s)": 0.544456 + }, + { + "epoch": 1.8096, + "grad_norm": 0.698324623753708, + "learning_rate": 1.860764162716364e-06, + "loss": 0.35365772247314453, + "memory(GiB)": 77.0, + "step": 5655, + "token_acc": 0.95372460496614, + "train_speed(iter/s)": 0.544328 + }, + { + "epoch": 1.80992, + "grad_norm": 0.7106479962964346, + "learning_rate": 1.8599116427650057e-06, + "loss": 0.34949424862861633, + "memory(GiB)": 77.0, + "step": 5656, + "token_acc": 0.8629946771580652, + "train_speed(iter/s)": 0.544218 + }, + { + "epoch": 1.8102399999999998, + "grad_norm": 0.535611136876963, + "learning_rate": 1.85905920246154e-06, + "loss": 0.18370206654071808, + "memory(GiB)": 77.0, + "step": 5657, + "token_acc": 0.9567959634184799, + "train_speed(iter/s)": 0.544097 + }, + { + "epoch": 1.81056, + "grad_norm": 0.7348395679648976, + "learning_rate": 1.8582068419120374e-06, + "loss": 0.3051111102104187, + "memory(GiB)": 77.0, + "step": 5658, + "token_acc": 0.9066824831949387, + "train_speed(iter/s)": 0.543985 + }, + { + "epoch": 1.81088, + "grad_norm": 0.6692700424678791, + "learning_rate": 1.8573545612225596e-06, + "loss": 0.3574029803276062, + "memory(GiB)": 77.0, + "step": 5659, + "token_acc": 0.8902609506057781, + "train_speed(iter/s)": 0.543874 + }, + { + "epoch": 1.8112, + "grad_norm": 0.6408333797451216, + "learning_rate": 1.856502360499159e-06, + "loss": 0.3853822946548462, + "memory(GiB)": 77.0, + "step": 5660, + "token_acc": 0.930162552679109, + "train_speed(iter/s)": 0.543752 + }, + { + "epoch": 1.81152, + "grad_norm": 0.5834782451379057, + "learning_rate": 1.8556502398478767e-06, + "loss": 0.285278856754303, + "memory(GiB)": 77.0, + "step": 5661, + "token_acc": 0.9466744799296807, + "train_speed(iter/s)": 0.543625 + }, + { + "epoch": 1.8118400000000001, + "grad_norm": 0.6815039953112141, + "learning_rate": 1.8547981993747455e-06, + "loss": 0.3727370500564575, + "memory(GiB)": 77.0, + "step": 5662, + "token_acc": 0.8901885891620912, + "train_speed(iter/s)": 0.54351 + }, + { + "epoch": 1.81216, + "grad_norm": 0.6381738982909694, + "learning_rate": 1.8539462391857843e-06, + "loss": 0.30523622035980225, + "memory(GiB)": 77.0, + "step": 5663, + "token_acc": 0.8931447225244832, + "train_speed(iter/s)": 0.5434 + }, + { + "epoch": 1.8124799999999999, + "grad_norm": 0.6430855520139674, + "learning_rate": 1.8530943593870065e-06, + "loss": 0.319827675819397, + "memory(GiB)": 77.0, + "step": 5664, + "token_acc": 0.8961501901140685, + "train_speed(iter/s)": 0.543271 + }, + { + "epoch": 1.8128, + "grad_norm": 0.6533622258561381, + "learning_rate": 1.852242560084413e-06, + "loss": 0.2970300614833832, + "memory(GiB)": 77.0, + "step": 5665, + "token_acc": 0.9447346714324993, + "train_speed(iter/s)": 0.543151 + }, + { + "epoch": 1.81312, + "grad_norm": 0.6678958461434715, + "learning_rate": 1.8513908413839966e-06, + "loss": 0.33008670806884766, + "memory(GiB)": 77.0, + "step": 5666, + "token_acc": 0.8036193029490617, + "train_speed(iter/s)": 0.543043 + }, + { + "epoch": 1.81344, + "grad_norm": 0.7344610253573828, + "learning_rate": 1.850539203391738e-06, + "loss": 0.42127296328544617, + "memory(GiB)": 77.0, + "step": 5667, + "token_acc": 0.9563086974275214, + "train_speed(iter/s)": 0.54293 + }, + { + "epoch": 1.81376, + "grad_norm": 0.6625354234601624, + "learning_rate": 1.8496876462136093e-06, + "loss": 0.3470376133918762, + "memory(GiB)": 77.0, + "step": 5668, + "token_acc": 0.8856887698146908, + "train_speed(iter/s)": 0.542814 + }, + { + "epoch": 1.8140800000000001, + "grad_norm": 0.62212634384824, + "learning_rate": 1.8488361699555718e-06, + "loss": 0.35989832878112793, + "memory(GiB)": 77.0, + "step": 5669, + "token_acc": 0.9132743362831859, + "train_speed(iter/s)": 0.542691 + }, + { + "epoch": 1.8144, + "grad_norm": 0.7325283584377842, + "learning_rate": 1.8479847747235768e-06, + "loss": 0.30979016423225403, + "memory(GiB)": 77.0, + "step": 5670, + "token_acc": 0.937862950058072, + "train_speed(iter/s)": 0.542579 + }, + { + "epoch": 1.8147199999999999, + "grad_norm": 0.6026519626676284, + "learning_rate": 1.8471334606235659e-06, + "loss": 0.27745455503463745, + "memory(GiB)": 77.0, + "step": 5671, + "token_acc": 0.9318238367753172, + "train_speed(iter/s)": 0.542469 + }, + { + "epoch": 1.81504, + "grad_norm": 0.6943477398727822, + "learning_rate": 1.8462822277614704e-06, + "loss": 0.36633557081222534, + "memory(GiB)": 77.0, + "step": 5672, + "token_acc": 0.9446351931330472, + "train_speed(iter/s)": 0.542351 + }, + { + "epoch": 1.81536, + "grad_norm": 0.67487810617187, + "learning_rate": 1.8454310762432111e-06, + "loss": 0.34418928623199463, + "memory(GiB)": 77.0, + "step": 5673, + "token_acc": 0.9012531328320802, + "train_speed(iter/s)": 0.54224 + }, + { + "epoch": 1.81568, + "grad_norm": 0.6159219588978045, + "learning_rate": 1.8445800061746993e-06, + "loss": 0.23995159566402435, + "memory(GiB)": 77.0, + "step": 5674, + "token_acc": 0.8815038603558241, + "train_speed(iter/s)": 0.54213 + }, + { + "epoch": 1.8159999999999998, + "grad_norm": 0.6942158435700118, + "learning_rate": 1.8437290176618361e-06, + "loss": 0.34699881076812744, + "memory(GiB)": 77.0, + "step": 5675, + "token_acc": 0.8670096972095784, + "train_speed(iter/s)": 0.542012 + }, + { + "epoch": 1.8163200000000002, + "grad_norm": 0.5961379425911021, + "learning_rate": 1.8428781108105114e-06, + "loss": 0.2886664569377899, + "memory(GiB)": 77.0, + "step": 5676, + "token_acc": 0.9185667752442996, + "train_speed(iter/s)": 0.541895 + }, + { + "epoch": 1.81664, + "grad_norm": 0.6147220924467212, + "learning_rate": 1.8420272857266082e-06, + "loss": 0.2645905613899231, + "memory(GiB)": 77.0, + "step": 5677, + "token_acc": 0.8947999041456985, + "train_speed(iter/s)": 0.541791 + }, + { + "epoch": 1.81696, + "grad_norm": 0.6862124711868418, + "learning_rate": 1.841176542515994e-06, + "loss": 0.38325241208076477, + "memory(GiB)": 77.0, + "step": 5678, + "token_acc": 0.881791483113069, + "train_speed(iter/s)": 0.541675 + }, + { + "epoch": 1.81728, + "grad_norm": 0.6305953032894158, + "learning_rate": 1.8403258812845304e-06, + "loss": 0.38239574432373047, + "memory(GiB)": 77.0, + "step": 5679, + "token_acc": 0.936618089289845, + "train_speed(iter/s)": 0.541543 + }, + { + "epoch": 1.8176, + "grad_norm": 0.6742462016216059, + "learning_rate": 1.8394753021380667e-06, + "loss": 0.26921364665031433, + "memory(GiB)": 77.0, + "step": 5680, + "token_acc": 0.9465457955611128, + "train_speed(iter/s)": 0.541436 + }, + { + "epoch": 1.81792, + "grad_norm": 0.6526059191589241, + "learning_rate": 1.8386248051824436e-06, + "loss": 0.37040403485298157, + "memory(GiB)": 77.0, + "step": 5681, + "token_acc": 0.828322440087146, + "train_speed(iter/s)": 0.541322 + }, + { + "epoch": 1.8182399999999999, + "grad_norm": 0.6406432764144939, + "learning_rate": 1.8377743905234901e-06, + "loss": 0.32115212082862854, + "memory(GiB)": 77.0, + "step": 5682, + "token_acc": 0.9138409042439752, + "train_speed(iter/s)": 0.541211 + }, + { + "epoch": 1.81856, + "grad_norm": 0.6403348274262205, + "learning_rate": 1.836924058267026e-06, + "loss": 0.3509584069252014, + "memory(GiB)": 77.0, + "step": 5683, + "token_acc": 0.959758551307847, + "train_speed(iter/s)": 0.541099 + }, + { + "epoch": 1.81888, + "grad_norm": 0.6970078704048009, + "learning_rate": 1.8360738085188601e-06, + "loss": 0.3514070510864258, + "memory(GiB)": 77.0, + "step": 5684, + "token_acc": 0.9172071728347959, + "train_speed(iter/s)": 0.540986 + }, + { + "epoch": 1.8192, + "grad_norm": 0.5978454622108899, + "learning_rate": 1.8352236413847915e-06, + "loss": 0.3198935389518738, + "memory(GiB)": 77.0, + "step": 5685, + "token_acc": 0.8576831210191083, + "train_speed(iter/s)": 0.540855 + }, + { + "epoch": 1.81952, + "grad_norm": 0.6604814980989997, + "learning_rate": 1.8343735569706087e-06, + "loss": 0.41264331340789795, + "memory(GiB)": 77.0, + "step": 5686, + "token_acc": 0.908660351826793, + "train_speed(iter/s)": 0.540732 + }, + { + "epoch": 1.8198400000000001, + "grad_norm": 0.6888316478955184, + "learning_rate": 1.8335235553820897e-06, + "loss": 0.35206839442253113, + "memory(GiB)": 77.0, + "step": 5687, + "token_acc": 0.9205472875660106, + "train_speed(iter/s)": 0.540617 + }, + { + "epoch": 1.82016, + "grad_norm": 0.6653411565646649, + "learning_rate": 1.8326736367250025e-06, + "loss": 0.4164494574069977, + "memory(GiB)": 77.0, + "step": 5688, + "token_acc": 0.7988375031589589, + "train_speed(iter/s)": 0.540492 + }, + { + "epoch": 1.8204799999999999, + "grad_norm": 0.6616092671810565, + "learning_rate": 1.8318238011051047e-06, + "loss": 0.3313421308994293, + "memory(GiB)": 77.0, + "step": 5689, + "token_acc": 0.8870122306348281, + "train_speed(iter/s)": 0.540376 + }, + { + "epoch": 1.8208, + "grad_norm": 0.6428463307253082, + "learning_rate": 1.8309740486281442e-06, + "loss": 0.2967532277107239, + "memory(GiB)": 77.0, + "step": 5690, + "token_acc": 0.9381168311549747, + "train_speed(iter/s)": 0.540262 + }, + { + "epoch": 1.82112, + "grad_norm": 0.6160447628378616, + "learning_rate": 1.830124379399858e-06, + "loss": 0.3276389241218567, + "memory(GiB)": 77.0, + "step": 5691, + "token_acc": 0.9107499287140006, + "train_speed(iter/s)": 0.540151 + }, + { + "epoch": 1.82144, + "grad_norm": 0.7151708388913977, + "learning_rate": 1.8292747935259714e-06, + "loss": 0.31628990173339844, + "memory(GiB)": 77.0, + "step": 5692, + "token_acc": 0.9038461538461539, + "train_speed(iter/s)": 0.540044 + }, + { + "epoch": 1.82176, + "grad_norm": 0.6602157155093745, + "learning_rate": 1.8284252911122013e-06, + "loss": 0.2778887152671814, + "memory(GiB)": 77.0, + "step": 5693, + "token_acc": 0.8874874874874875, + "train_speed(iter/s)": 0.539936 + }, + { + "epoch": 1.8220800000000001, + "grad_norm": 0.6713094909020594, + "learning_rate": 1.8275758722642535e-06, + "loss": 0.3411560654640198, + "memory(GiB)": 77.0, + "step": 5694, + "token_acc": 0.8844246031746031, + "train_speed(iter/s)": 0.539818 + }, + { + "epoch": 1.8224, + "grad_norm": 0.6962040162323625, + "learning_rate": 1.826726537087824e-06, + "loss": 0.3783811330795288, + "memory(GiB)": 77.0, + "step": 5695, + "token_acc": 0.8907185628742516, + "train_speed(iter/s)": 0.539702 + }, + { + "epoch": 1.82272, + "grad_norm": 0.6385883762122312, + "learning_rate": 1.8258772856885974e-06, + "loss": 0.29698824882507324, + "memory(GiB)": 77.0, + "step": 5696, + "token_acc": 0.8765169902912622, + "train_speed(iter/s)": 0.539585 + }, + { + "epoch": 1.82304, + "grad_norm": 0.6609734751268568, + "learning_rate": 1.825028118172248e-06, + "loss": 0.31288018822669983, + "memory(GiB)": 77.0, + "step": 5697, + "token_acc": 0.940399212819792, + "train_speed(iter/s)": 0.539473 + }, + { + "epoch": 1.82336, + "grad_norm": 0.6670330965550926, + "learning_rate": 1.8241790346444405e-06, + "loss": 0.3375834822654724, + "memory(GiB)": 77.0, + "step": 5698, + "token_acc": 0.9032825322391559, + "train_speed(iter/s)": 0.53936 + }, + { + "epoch": 1.82368, + "grad_norm": 0.6467930167868553, + "learning_rate": 1.823330035210828e-06, + "loss": 0.2735075354576111, + "memory(GiB)": 77.0, + "step": 5699, + "token_acc": 0.969327731092437, + "train_speed(iter/s)": 0.539253 + }, + { + "epoch": 1.8239999999999998, + "grad_norm": 0.5879187606274169, + "learning_rate": 1.8224811199770543e-06, + "loss": 0.38514959812164307, + "memory(GiB)": 77.0, + "step": 5700, + "token_acc": 0.9182068423122296, + "train_speed(iter/s)": 0.539128 + }, + { + "epoch": 1.8243200000000002, + "grad_norm": 0.6694434364738189, + "learning_rate": 1.8216322890487522e-06, + "loss": 0.3674173355102539, + "memory(GiB)": 77.0, + "step": 5701, + "token_acc": 0.8859431096382215, + "train_speed(iter/s)": 0.538998 + }, + { + "epoch": 1.82464, + "grad_norm": 0.6489520337200443, + "learning_rate": 1.8207835425315439e-06, + "loss": 0.33032605051994324, + "memory(GiB)": 77.0, + "step": 5702, + "token_acc": 0.9313940724478595, + "train_speed(iter/s)": 0.538891 + }, + { + "epoch": 1.82496, + "grad_norm": 0.5987893943266045, + "learning_rate": 1.819934880531041e-06, + "loss": 0.30612754821777344, + "memory(GiB)": 77.0, + "step": 5703, + "token_acc": 0.889280785155651, + "train_speed(iter/s)": 0.538778 + }, + { + "epoch": 1.82528, + "grad_norm": 0.6888990776747717, + "learning_rate": 1.819086303152845e-06, + "loss": 0.335906982421875, + "memory(GiB)": 77.0, + "step": 5704, + "token_acc": 0.8791748526522594, + "train_speed(iter/s)": 0.538666 + }, + { + "epoch": 1.8256000000000001, + "grad_norm": 0.6247940519753581, + "learning_rate": 1.8182378105025467e-06, + "loss": 0.3429589569568634, + "memory(GiB)": 77.0, + "step": 5705, + "token_acc": 0.8865072822203902, + "train_speed(iter/s)": 0.538543 + }, + { + "epoch": 1.82592, + "grad_norm": 0.6928801422791623, + "learning_rate": 1.8173894026857257e-06, + "loss": 0.32550060749053955, + "memory(GiB)": 77.0, + "step": 5706, + "token_acc": 0.8969410050983249, + "train_speed(iter/s)": 0.538424 + }, + { + "epoch": 1.8262399999999999, + "grad_norm": 0.7210679361257356, + "learning_rate": 1.8165410798079518e-06, + "loss": 0.34723567962646484, + "memory(GiB)": 77.0, + "step": 5707, + "token_acc": 0.9157034113335213, + "train_speed(iter/s)": 0.538315 + }, + { + "epoch": 1.82656, + "grad_norm": 0.6545351122491232, + "learning_rate": 1.815692841974785e-06, + "loss": 0.2868522107601166, + "memory(GiB)": 77.0, + "step": 5708, + "token_acc": 0.9643742255266419, + "train_speed(iter/s)": 0.538202 + }, + { + "epoch": 1.82688, + "grad_norm": 0.6485513692868057, + "learning_rate": 1.8148446892917723e-06, + "loss": 0.32352784276008606, + "memory(GiB)": 77.0, + "step": 5709, + "token_acc": 0.8271115258982734, + "train_speed(iter/s)": 0.538097 + }, + { + "epoch": 1.8272, + "grad_norm": 0.6538076617752026, + "learning_rate": 1.8139966218644529e-06, + "loss": 0.35349270701408386, + "memory(GiB)": 77.0, + "step": 5710, + "token_acc": 0.9479649665121072, + "train_speed(iter/s)": 0.537987 + }, + { + "epoch": 1.82752, + "grad_norm": 0.6200852605868887, + "learning_rate": 1.8131486397983531e-06, + "loss": 0.2946379482746124, + "memory(GiB)": 77.0, + "step": 5711, + "token_acc": 0.8858504766683392, + "train_speed(iter/s)": 0.537884 + }, + { + "epoch": 1.8278400000000001, + "grad_norm": 0.7192391003905426, + "learning_rate": 1.8123007431989903e-06, + "loss": 0.2914566993713379, + "memory(GiB)": 77.0, + "step": 5712, + "token_acc": 0.8634737131846958, + "train_speed(iter/s)": 0.537767 + }, + { + "epoch": 1.82816, + "grad_norm": 0.6158911921242854, + "learning_rate": 1.8114529321718698e-06, + "loss": 0.3176073431968689, + "memory(GiB)": 77.0, + "step": 5713, + "token_acc": 0.9662388392857143, + "train_speed(iter/s)": 0.537651 + }, + { + "epoch": 1.8284799999999999, + "grad_norm": 0.6213929970894736, + "learning_rate": 1.8106052068224877e-06, + "loss": 0.33665502071380615, + "memory(GiB)": 77.0, + "step": 5714, + "token_acc": 0.8414004208915248, + "train_speed(iter/s)": 0.537542 + }, + { + "epoch": 1.8288, + "grad_norm": 0.6783303654852537, + "learning_rate": 1.8097575672563278e-06, + "loss": 0.27134430408477783, + "memory(GiB)": 77.0, + "step": 5715, + "token_acc": 0.8885037550548815, + "train_speed(iter/s)": 0.537434 + }, + { + "epoch": 1.82912, + "grad_norm": 0.6822560743796351, + "learning_rate": 1.808910013578865e-06, + "loss": 0.2962796688079834, + "memory(GiB)": 77.0, + "step": 5716, + "token_acc": 0.944078947368421, + "train_speed(iter/s)": 0.537328 + }, + { + "epoch": 1.82944, + "grad_norm": 0.6146761518802739, + "learning_rate": 1.8080625458955631e-06, + "loss": 0.3387283384799957, + "memory(GiB)": 77.0, + "step": 5717, + "token_acc": 0.9484652210644888, + "train_speed(iter/s)": 0.537206 + }, + { + "epoch": 1.82976, + "grad_norm": 0.6294105465667453, + "learning_rate": 1.8072151643118735e-06, + "loss": 0.3608749806880951, + "memory(GiB)": 77.0, + "step": 5718, + "token_acc": 0.8951285520974289, + "train_speed(iter/s)": 0.537092 + }, + { + "epoch": 1.8300800000000002, + "grad_norm": 0.5909513656571516, + "learning_rate": 1.8063678689332388e-06, + "loss": 0.31603357195854187, + "memory(GiB)": 77.0, + "step": 5719, + "token_acc": 0.9330197132616488, + "train_speed(iter/s)": 0.536977 + }, + { + "epoch": 1.8304, + "grad_norm": 0.6827494111100909, + "learning_rate": 1.805520659865091e-06, + "loss": 0.3482048511505127, + "memory(GiB)": 77.0, + "step": 5720, + "token_acc": 0.8314809252027636, + "train_speed(iter/s)": 0.536868 + }, + { + "epoch": 1.83072, + "grad_norm": 0.6467610654785724, + "learning_rate": 1.8046735372128487e-06, + "loss": 0.3183683454990387, + "memory(GiB)": 77.0, + "step": 5721, + "token_acc": 0.9170573386224306, + "train_speed(iter/s)": 0.536762 + }, + { + "epoch": 1.83104, + "grad_norm": 0.670687587715667, + "learning_rate": 1.8038265010819222e-06, + "loss": 0.3762764036655426, + "memory(GiB)": 77.0, + "step": 5722, + "token_acc": 0.8430015965939329, + "train_speed(iter/s)": 0.536646 + }, + { + "epoch": 1.83136, + "grad_norm": 0.6492467179128916, + "learning_rate": 1.8029795515777112e-06, + "loss": 0.36258167028427124, + "memory(GiB)": 77.0, + "step": 5723, + "token_acc": 0.9053803339517625, + "train_speed(iter/s)": 0.536533 + }, + { + "epoch": 1.83168, + "grad_norm": 0.670081677036261, + "learning_rate": 1.8021326888056034e-06, + "loss": 0.3249063491821289, + "memory(GiB)": 77.0, + "step": 5724, + "token_acc": 0.9259436215957955, + "train_speed(iter/s)": 0.536415 + }, + { + "epoch": 1.8319999999999999, + "grad_norm": 0.5790190709679924, + "learning_rate": 1.8012859128709766e-06, + "loss": 0.33393752574920654, + "memory(GiB)": 77.0, + "step": 5725, + "token_acc": 0.8341268329841934, + "train_speed(iter/s)": 0.536293 + }, + { + "epoch": 1.83232, + "grad_norm": 0.6153526463268886, + "learning_rate": 1.8004392238791972e-06, + "loss": 0.2806777060031891, + "memory(GiB)": 77.0, + "step": 5726, + "token_acc": 0.9182203389830509, + "train_speed(iter/s)": 0.536126 + }, + { + "epoch": 1.83264, + "grad_norm": 0.6181188374503553, + "learning_rate": 1.7995926219356202e-06, + "loss": 0.3530924320220947, + "memory(GiB)": 77.0, + "step": 5727, + "token_acc": 0.8910925539318023, + "train_speed(iter/s)": 0.536005 + }, + { + "epoch": 1.83296, + "grad_norm": 0.6274464061539398, + "learning_rate": 1.7987461071455917e-06, + "loss": 0.3165718615055084, + "memory(GiB)": 77.0, + "step": 5728, + "token_acc": 0.9089385474860335, + "train_speed(iter/s)": 0.535891 + }, + { + "epoch": 1.83328, + "grad_norm": 0.71593317804798, + "learning_rate": 1.7978996796144449e-06, + "loss": 0.3904079496860504, + "memory(GiB)": 77.0, + "step": 5729, + "token_acc": 0.8791064388961892, + "train_speed(iter/s)": 0.535769 + }, + { + "epoch": 1.8336000000000001, + "grad_norm": 0.70408303990499, + "learning_rate": 1.7970533394475038e-06, + "loss": 0.29161861538887024, + "memory(GiB)": 77.0, + "step": 5730, + "token_acc": 0.8911917098445595, + "train_speed(iter/s)": 0.53566 + }, + { + "epoch": 1.83392, + "grad_norm": 0.695177970544029, + "learning_rate": 1.7962070867500798e-06, + "loss": 0.4358823299407959, + "memory(GiB)": 77.0, + "step": 5731, + "token_acc": 0.8679886685552408, + "train_speed(iter/s)": 0.535556 + }, + { + "epoch": 1.8342399999999999, + "grad_norm": 0.7068016888253857, + "learning_rate": 1.7953609216274744e-06, + "loss": 0.4123387336730957, + "memory(GiB)": 77.0, + "step": 5732, + "token_acc": 0.8834465498748659, + "train_speed(iter/s)": 0.535452 + }, + { + "epoch": 1.83456, + "grad_norm": 0.6622122430544484, + "learning_rate": 1.7945148441849795e-06, + "loss": 0.349564790725708, + "memory(GiB)": 77.0, + "step": 5733, + "token_acc": 0.8437094682230869, + "train_speed(iter/s)": 0.535344 + }, + { + "epoch": 1.83488, + "grad_norm": 0.6622160833072954, + "learning_rate": 1.7936688545278747e-06, + "loss": 0.3727477788925171, + "memory(GiB)": 77.0, + "step": 5734, + "token_acc": 0.8879568603954464, + "train_speed(iter/s)": 0.535222 + }, + { + "epoch": 1.8352, + "grad_norm": 0.6634984404999865, + "learning_rate": 1.7928229527614268e-06, + "loss": 0.31233930587768555, + "memory(GiB)": 77.0, + "step": 5735, + "token_acc": 0.8553092182030338, + "train_speed(iter/s)": 0.535118 + }, + { + "epoch": 1.83552, + "grad_norm": 0.6409097524382673, + "learning_rate": 1.791977138990895e-06, + "loss": 0.39110562205314636, + "memory(GiB)": 77.0, + "step": 5736, + "token_acc": 0.926919032597266, + "train_speed(iter/s)": 0.534998 + }, + { + "epoch": 1.8358400000000001, + "grad_norm": 0.6824936909180497, + "learning_rate": 1.7911314133215257e-06, + "loss": 0.4196581542491913, + "memory(GiB)": 77.0, + "step": 5737, + "token_acc": 0.8651717286403633, + "train_speed(iter/s)": 0.534885 + }, + { + "epoch": 1.83616, + "grad_norm": 0.6816800619740159, + "learning_rate": 1.7902857758585545e-06, + "loss": 0.3923693299293518, + "memory(GiB)": 77.0, + "step": 5738, + "token_acc": 0.9102105681366707, + "train_speed(iter/s)": 0.534764 + }, + { + "epoch": 1.83648, + "grad_norm": 0.6631432806075497, + "learning_rate": 1.7894402267072075e-06, + "loss": 0.2773424983024597, + "memory(GiB)": 77.0, + "step": 5739, + "token_acc": 0.9252142300701116, + "train_speed(iter/s)": 0.534662 + }, + { + "epoch": 1.8368, + "grad_norm": 0.698449534261523, + "learning_rate": 1.7885947659726976e-06, + "loss": 0.38944098353385925, + "memory(GiB)": 77.0, + "step": 5740, + "token_acc": 0.8240647118301314, + "train_speed(iter/s)": 0.534557 + }, + { + "epoch": 1.83712, + "grad_norm": 0.7368000489059904, + "learning_rate": 1.7877493937602281e-06, + "loss": 0.39701423048973083, + "memory(GiB)": 77.0, + "step": 5741, + "token_acc": 0.8940894089408941, + "train_speed(iter/s)": 0.534452 + }, + { + "epoch": 1.83744, + "grad_norm": 0.5897647656806314, + "learning_rate": 1.7869041101749912e-06, + "loss": 0.35475918650627136, + "memory(GiB)": 77.0, + "step": 5742, + "token_acc": 0.9309590752889721, + "train_speed(iter/s)": 0.534318 + }, + { + "epoch": 1.8377599999999998, + "grad_norm": 0.7256763289543755, + "learning_rate": 1.786058915322167e-06, + "loss": 0.3425423204898834, + "memory(GiB)": 77.0, + "step": 5743, + "token_acc": 0.8884169884169885, + "train_speed(iter/s)": 0.534217 + }, + { + "epoch": 1.8380800000000002, + "grad_norm": 0.6201640321094162, + "learning_rate": 1.785213809306926e-06, + "loss": 0.34731757640838623, + "memory(GiB)": 77.0, + "step": 5744, + "token_acc": 0.8840402059328267, + "train_speed(iter/s)": 0.534095 + }, + { + "epoch": 1.8384, + "grad_norm": 0.6597155991340033, + "learning_rate": 1.7843687922344266e-06, + "loss": 0.33412283658981323, + "memory(GiB)": 77.0, + "step": 5745, + "token_acc": 0.8949315575286718, + "train_speed(iter/s)": 0.533991 + }, + { + "epoch": 1.83872, + "grad_norm": 0.6703473174065389, + "learning_rate": 1.7835238642098167e-06, + "loss": 0.3426627516746521, + "memory(GiB)": 77.0, + "step": 5746, + "token_acc": 0.9488966318234611, + "train_speed(iter/s)": 0.533883 + }, + { + "epoch": 1.83904, + "grad_norm": 0.7691717218412423, + "learning_rate": 1.782679025338233e-06, + "loss": 0.33830565214157104, + "memory(GiB)": 77.0, + "step": 5747, + "token_acc": 0.9397745571658616, + "train_speed(iter/s)": 0.533779 + }, + { + "epoch": 1.83936, + "grad_norm": 0.6838714738940996, + "learning_rate": 1.7818342757248014e-06, + "loss": 0.3277372121810913, + "memory(GiB)": 77.0, + "step": 5748, + "token_acc": 0.9378563283922463, + "train_speed(iter/s)": 0.533662 + }, + { + "epoch": 1.83968, + "grad_norm": 0.6624321759953331, + "learning_rate": 1.7809896154746354e-06, + "loss": 0.29158586263656616, + "memory(GiB)": 77.0, + "step": 5749, + "token_acc": 0.92277450154451, + "train_speed(iter/s)": 0.533549 + }, + { + "epoch": 1.8399999999999999, + "grad_norm": 0.6596840662968037, + "learning_rate": 1.7801450446928387e-06, + "loss": 0.3174511790275574, + "memory(GiB)": 77.0, + "step": 5750, + "token_acc": 0.8582591708270024, + "train_speed(iter/s)": 0.533434 + }, + { + "epoch": 1.84032, + "grad_norm": 0.6982546789944237, + "learning_rate": 1.7793005634845034e-06, + "loss": 0.3087129294872284, + "memory(GiB)": 77.0, + "step": 5751, + "token_acc": 0.9092776446422458, + "train_speed(iter/s)": 0.533334 + }, + { + "epoch": 1.84064, + "grad_norm": 0.6029009832075655, + "learning_rate": 1.778456171954711e-06, + "loss": 0.24786871671676636, + "memory(GiB)": 77.0, + "step": 5752, + "token_acc": 0.8746792130025663, + "train_speed(iter/s)": 0.533218 + }, + { + "epoch": 1.84096, + "grad_norm": 0.6411366970589688, + "learning_rate": 1.777611870208531e-06, + "loss": 0.3509453535079956, + "memory(GiB)": 77.0, + "step": 5753, + "token_acc": 0.8681768558951966, + "train_speed(iter/s)": 0.533108 + }, + { + "epoch": 1.84128, + "grad_norm": 0.703235340432249, + "learning_rate": 1.7767676583510224e-06, + "loss": 0.35890018939971924, + "memory(GiB)": 77.0, + "step": 5754, + "token_acc": 0.9368491770238495, + "train_speed(iter/s)": 0.532979 + }, + { + "epoch": 1.8416000000000001, + "grad_norm": 0.6182464483835725, + "learning_rate": 1.7759235364872316e-06, + "loss": 0.3344378173351288, + "memory(GiB)": 77.0, + "step": 5755, + "token_acc": 0.886304391417686, + "train_speed(iter/s)": 0.532862 + }, + { + "epoch": 1.84192, + "grad_norm": 0.6871241595948906, + "learning_rate": 1.7750795047221963e-06, + "loss": 0.37499451637268066, + "memory(GiB)": 77.0, + "step": 5756, + "token_acc": 0.9353324641460234, + "train_speed(iter/s)": 0.532755 + }, + { + "epoch": 1.8422399999999999, + "grad_norm": 0.6691026315366714, + "learning_rate": 1.7742355631609414e-06, + "loss": 0.34217774868011475, + "memory(GiB)": 77.0, + "step": 5757, + "token_acc": 0.9270118527760449, + "train_speed(iter/s)": 0.532647 + }, + { + "epoch": 1.84256, + "grad_norm": 0.6167189702169794, + "learning_rate": 1.7733917119084807e-06, + "loss": 0.2868010401725769, + "memory(GiB)": 77.0, + "step": 5758, + "token_acc": 0.8639063591893781, + "train_speed(iter/s)": 0.532529 + }, + { + "epoch": 1.84288, + "grad_norm": 0.755105977871762, + "learning_rate": 1.7725479510698164e-06, + "loss": 0.4103390574455261, + "memory(GiB)": 77.0, + "step": 5759, + "token_acc": 0.9222672064777327, + "train_speed(iter/s)": 0.53243 + }, + { + "epoch": 1.8432, + "grad_norm": 0.6777551872737316, + "learning_rate": 1.77170428074994e-06, + "loss": 0.27602142095565796, + "memory(GiB)": 77.0, + "step": 5760, + "token_acc": 0.8760529482551144, + "train_speed(iter/s)": 0.532326 + }, + { + "epoch": 1.84352, + "grad_norm": 0.6209959008289325, + "learning_rate": 1.7708607010538327e-06, + "loss": 0.2864696681499481, + "memory(GiB)": 77.0, + "step": 5761, + "token_acc": 0.9329484902309059, + "train_speed(iter/s)": 0.532218 + }, + { + "epoch": 1.8438400000000001, + "grad_norm": 0.6374624867899555, + "learning_rate": 1.7700172120864617e-06, + "loss": 0.3686255216598511, + "memory(GiB)": 77.0, + "step": 5762, + "token_acc": 0.9550220750551877, + "train_speed(iter/s)": 0.532109 + }, + { + "epoch": 1.84416, + "grad_norm": 0.663792859912785, + "learning_rate": 1.7691738139527858e-06, + "loss": 0.30505502223968506, + "memory(GiB)": 77.0, + "step": 5763, + "token_acc": 0.9328772979905943, + "train_speed(iter/s)": 0.532008 + }, + { + "epoch": 1.84448, + "grad_norm": 0.6967947036074165, + "learning_rate": 1.7683305067577506e-06, + "loss": 0.3043736219406128, + "memory(GiB)": 77.0, + "step": 5764, + "token_acc": 0.9223158666178087, + "train_speed(iter/s)": 0.531905 + }, + { + "epoch": 1.8448, + "grad_norm": 0.7084525791933637, + "learning_rate": 1.7674872906062912e-06, + "loss": 0.31977906823158264, + "memory(GiB)": 77.0, + "step": 5765, + "token_acc": 0.8744360902255639, + "train_speed(iter/s)": 0.531803 + }, + { + "epoch": 1.84512, + "grad_norm": 0.6200615855028976, + "learning_rate": 1.7666441656033314e-06, + "loss": 0.30107420682907104, + "memory(GiB)": 77.0, + "step": 5766, + "token_acc": 0.9686263425664217, + "train_speed(iter/s)": 0.531684 + }, + { + "epoch": 1.84544, + "grad_norm": 0.7137927076872868, + "learning_rate": 1.7658011318537832e-06, + "loss": 0.38832563161849976, + "memory(GiB)": 77.0, + "step": 5767, + "token_acc": 0.8504746835443038, + "train_speed(iter/s)": 0.531549 + }, + { + "epoch": 1.8457599999999998, + "grad_norm": 0.7678607158125498, + "learning_rate": 1.7649581894625478e-06, + "loss": 0.26925766468048096, + "memory(GiB)": 77.0, + "step": 5768, + "token_acc": 0.9122591225912259, + "train_speed(iter/s)": 0.531437 + }, + { + "epoch": 1.8460800000000002, + "grad_norm": 0.6481493766309965, + "learning_rate": 1.7641153385345143e-06, + "loss": 0.3697500228881836, + "memory(GiB)": 77.0, + "step": 5769, + "token_acc": 0.8790199081163859, + "train_speed(iter/s)": 0.531322 + }, + { + "epoch": 1.8464, + "grad_norm": 0.6582332374405635, + "learning_rate": 1.763272579174562e-06, + "loss": 0.3104451596736908, + "memory(GiB)": 77.0, + "step": 5770, + "token_acc": 0.9563286454478165, + "train_speed(iter/s)": 0.531214 + }, + { + "epoch": 1.84672, + "grad_norm": 0.6593237888699363, + "learning_rate": 1.7624299114875562e-06, + "loss": 0.34326648712158203, + "memory(GiB)": 77.0, + "step": 5771, + "token_acc": 0.905201916495551, + "train_speed(iter/s)": 0.531103 + }, + { + "epoch": 1.84704, + "grad_norm": 0.682179040809741, + "learning_rate": 1.7615873355783527e-06, + "loss": 0.3714132010936737, + "memory(GiB)": 77.0, + "step": 5772, + "token_acc": 0.7994534370401514, + "train_speed(iter/s)": 0.530993 + }, + { + "epoch": 1.8473600000000001, + "grad_norm": 0.7176400829699445, + "learning_rate": 1.7607448515517963e-06, + "loss": 0.34786808490753174, + "memory(GiB)": 77.0, + "step": 5773, + "token_acc": 0.9206865936667653, + "train_speed(iter/s)": 0.530889 + }, + { + "epoch": 1.84768, + "grad_norm": 0.6566498097005764, + "learning_rate": 1.759902459512719e-06, + "loss": 0.38666895031929016, + "memory(GiB)": 77.0, + "step": 5774, + "token_acc": 0.8751225890813992, + "train_speed(iter/s)": 0.530784 + }, + { + "epoch": 1.8479999999999999, + "grad_norm": 0.6149845965264595, + "learning_rate": 1.7590601595659417e-06, + "loss": 0.3021541237831116, + "memory(GiB)": 77.0, + "step": 5775, + "token_acc": 0.8824292758889177, + "train_speed(iter/s)": 0.530682 + }, + { + "epoch": 1.84832, + "grad_norm": 0.6532243788585366, + "learning_rate": 1.7582179518162742e-06, + "loss": 0.25239530205726624, + "memory(GiB)": 77.0, + "step": 5776, + "token_acc": 0.9508248730964467, + "train_speed(iter/s)": 0.530581 + }, + { + "epoch": 1.84864, + "grad_norm": 0.6272850429185506, + "learning_rate": 1.7573758363685156e-06, + "loss": 0.3063963055610657, + "memory(GiB)": 77.0, + "step": 5777, + "token_acc": 0.9280368451352907, + "train_speed(iter/s)": 0.530477 + }, + { + "epoch": 1.84896, + "grad_norm": 0.684776237665025, + "learning_rate": 1.75653381332745e-06, + "loss": 0.33312058448791504, + "memory(GiB)": 77.0, + "step": 5778, + "token_acc": 0.9392466585662211, + "train_speed(iter/s)": 0.530365 + }, + { + "epoch": 1.84928, + "grad_norm": 0.6533734827339516, + "learning_rate": 1.7556918827978548e-06, + "loss": 0.3383854329586029, + "memory(GiB)": 77.0, + "step": 5779, + "token_acc": 0.9020632737276478, + "train_speed(iter/s)": 0.530252 + }, + { + "epoch": 1.8496000000000001, + "grad_norm": 0.6817672511825097, + "learning_rate": 1.754850044884493e-06, + "loss": 0.383637011051178, + "memory(GiB)": 77.0, + "step": 5780, + "token_acc": 0.8739848781853823, + "train_speed(iter/s)": 0.530138 + }, + { + "epoch": 1.84992, + "grad_norm": 0.6517909986507155, + "learning_rate": 1.7540082996921165e-06, + "loss": 0.3257235884666443, + "memory(GiB)": 77.0, + "step": 5781, + "token_acc": 0.9105875674141357, + "train_speed(iter/s)": 0.530034 + }, + { + "epoch": 1.8502399999999999, + "grad_norm": 0.6445035032539681, + "learning_rate": 1.753166647325466e-06, + "loss": 0.35067641735076904, + "memory(GiB)": 77.0, + "step": 5782, + "token_acc": 0.9101633393829401, + "train_speed(iter/s)": 0.529931 + }, + { + "epoch": 1.85056, + "grad_norm": 0.6921574466401482, + "learning_rate": 1.752325087889271e-06, + "loss": 0.39925897121429443, + "memory(GiB)": 77.0, + "step": 5783, + "token_acc": 0.8775811209439528, + "train_speed(iter/s)": 0.529813 + }, + { + "epoch": 1.85088, + "grad_norm": 0.6516033893700426, + "learning_rate": 1.7514836214882485e-06, + "loss": 0.3090965747833252, + "memory(GiB)": 77.0, + "step": 5784, + "token_acc": 0.9353256021409456, + "train_speed(iter/s)": 0.529711 + }, + { + "epoch": 1.8512, + "grad_norm": 0.6509774975665088, + "learning_rate": 1.750642248227104e-06, + "loss": 0.38076239824295044, + "memory(GiB)": 77.0, + "step": 5785, + "token_acc": 0.8597164796219728, + "train_speed(iter/s)": 0.529597 + }, + { + "epoch": 1.85152, + "grad_norm": 0.6832751583343578, + "learning_rate": 1.7498009682105327e-06, + "loss": 0.35229647159576416, + "memory(GiB)": 77.0, + "step": 5786, + "token_acc": 0.8719172633253779, + "train_speed(iter/s)": 0.529494 + }, + { + "epoch": 1.8518400000000002, + "grad_norm": 0.6764511140995321, + "learning_rate": 1.7489597815432169e-06, + "loss": 0.3863452076911926, + "memory(GiB)": 77.0, + "step": 5787, + "token_acc": 0.8950798990748529, + "train_speed(iter/s)": 0.529395 + }, + { + "epoch": 1.85216, + "grad_norm": 0.6697815229863069, + "learning_rate": 1.748118688329828e-06, + "loss": 0.300759881734848, + "memory(GiB)": 77.0, + "step": 5788, + "token_acc": 0.9113197704747, + "train_speed(iter/s)": 0.529296 + }, + { + "epoch": 1.85248, + "grad_norm": 0.6728626682995299, + "learning_rate": 1.747277688675024e-06, + "loss": 0.4044184684753418, + "memory(GiB)": 77.0, + "step": 5789, + "token_acc": 0.9070184426229508, + "train_speed(iter/s)": 0.52918 + }, + { + "epoch": 1.8528, + "grad_norm": 0.6158891398665798, + "learning_rate": 1.7464367826834547e-06, + "loss": 0.26435089111328125, + "memory(GiB)": 77.0, + "step": 5790, + "token_acc": 0.9269599548787366, + "train_speed(iter/s)": 0.529081 + }, + { + "epoch": 1.85312, + "grad_norm": 0.6325299419677142, + "learning_rate": 1.7455959704597554e-06, + "loss": 0.31510424613952637, + "memory(GiB)": 77.0, + "step": 5791, + "token_acc": 0.92806484295846, + "train_speed(iter/s)": 0.528981 + }, + { + "epoch": 1.85344, + "grad_norm": 0.6773008866478889, + "learning_rate": 1.7447552521085514e-06, + "loss": 0.3513510525226593, + "memory(GiB)": 77.0, + "step": 5792, + "token_acc": 0.8631134069030288, + "train_speed(iter/s)": 0.528879 + }, + { + "epoch": 1.8537599999999999, + "grad_norm": 0.6319665601826527, + "learning_rate": 1.7439146277344544e-06, + "loss": 0.34240496158599854, + "memory(GiB)": 77.0, + "step": 5793, + "token_acc": 0.9280430396772025, + "train_speed(iter/s)": 0.528781 + }, + { + "epoch": 1.85408, + "grad_norm": 0.6540022074863874, + "learning_rate": 1.743074097442065e-06, + "loss": 0.3521149158477783, + "memory(GiB)": 77.0, + "step": 5794, + "token_acc": 0.8915049315940184, + "train_speed(iter/s)": 0.528666 + }, + { + "epoch": 1.8544, + "grad_norm": 0.6140630335519282, + "learning_rate": 1.7422336613359736e-06, + "loss": 0.3141978979110718, + "memory(GiB)": 77.0, + "step": 5795, + "token_acc": 0.878072445019405, + "train_speed(iter/s)": 0.528566 + }, + { + "epoch": 1.85472, + "grad_norm": 0.6176417319182304, + "learning_rate": 1.7413933195207577e-06, + "loss": 0.3781595826148987, + "memory(GiB)": 77.0, + "step": 5796, + "token_acc": 0.9267846247712019, + "train_speed(iter/s)": 0.528463 + }, + { + "epoch": 1.85504, + "grad_norm": 0.7083752205744666, + "learning_rate": 1.7405530721009838e-06, + "loss": 0.3574787378311157, + "memory(GiB)": 77.0, + "step": 5797, + "token_acc": 0.9562951082598236, + "train_speed(iter/s)": 0.528351 + }, + { + "epoch": 1.8553600000000001, + "grad_norm": 0.633180308941228, + "learning_rate": 1.7397129191812058e-06, + "loss": 0.32498085498809814, + "memory(GiB)": 77.0, + "step": 5798, + "token_acc": 0.8658816771970133, + "train_speed(iter/s)": 0.528241 + }, + { + "epoch": 1.85568, + "grad_norm": 0.6589895416413943, + "learning_rate": 1.7388728608659656e-06, + "loss": 0.2923594117164612, + "memory(GiB)": 77.0, + "step": 5799, + "token_acc": 0.9546949214468397, + "train_speed(iter/s)": 0.528145 + }, + { + "epoch": 1.8559999999999999, + "grad_norm": 0.7070918898533098, + "learning_rate": 1.7380328972597942e-06, + "loss": 0.37271445989608765, + "memory(GiB)": 77.0, + "step": 5800, + "token_acc": 0.9142002989536622, + "train_speed(iter/s)": 0.528044 + }, + { + "epoch": 1.85632, + "grad_norm": 0.6559051199210332, + "learning_rate": 1.7371930284672106e-06, + "loss": 0.29649460315704346, + "memory(GiB)": 77.0, + "step": 5801, + "token_acc": 0.9104238869634764, + "train_speed(iter/s)": 0.527943 + }, + { + "epoch": 1.85664, + "grad_norm": 0.6294699645845919, + "learning_rate": 1.736353254592722e-06, + "loss": 0.297624796628952, + "memory(GiB)": 77.0, + "step": 5802, + "token_acc": 0.938821412505623, + "train_speed(iter/s)": 0.527844 + }, + { + "epoch": 1.85696, + "grad_norm": 0.707558358416424, + "learning_rate": 1.7355135757408237e-06, + "loss": 0.3735225200653076, + "memory(GiB)": 77.0, + "step": 5803, + "token_acc": 0.8963527239150508, + "train_speed(iter/s)": 0.527739 + }, + { + "epoch": 1.85728, + "grad_norm": 0.7080590305927023, + "learning_rate": 1.7346739920159987e-06, + "loss": 0.43868574500083923, + "memory(GiB)": 77.0, + "step": 5804, + "token_acc": 0.8645680819912153, + "train_speed(iter/s)": 0.527592 + }, + { + "epoch": 1.8576000000000001, + "grad_norm": 0.6433887901205751, + "learning_rate": 1.7338345035227194e-06, + "loss": 0.3735780119895935, + "memory(GiB)": 77.0, + "step": 5805, + "token_acc": 0.8028067650233897, + "train_speed(iter/s)": 0.527495 + }, + { + "epoch": 1.85792, + "grad_norm": 0.6213797072089103, + "learning_rate": 1.7329951103654458e-06, + "loss": 0.30698534846305847, + "memory(GiB)": 77.0, + "step": 5806, + "token_acc": 0.9081632653061225, + "train_speed(iter/s)": 0.527384 + }, + { + "epoch": 1.85824, + "grad_norm": 0.6573198176876042, + "learning_rate": 1.7321558126486243e-06, + "loss": 0.3511444926261902, + "memory(GiB)": 77.0, + "step": 5807, + "token_acc": 0.9665497707040733, + "train_speed(iter/s)": 0.527272 + }, + { + "epoch": 1.85856, + "grad_norm": 0.6558638447395686, + "learning_rate": 1.7313166104766919e-06, + "loss": 0.3031862676143646, + "memory(GiB)": 77.0, + "step": 5808, + "token_acc": 0.8704809808236403, + "train_speed(iter/s)": 0.527176 + }, + { + "epoch": 1.85888, + "grad_norm": 0.6769970437229933, + "learning_rate": 1.7304775039540725e-06, + "loss": 0.30959951877593994, + "memory(GiB)": 77.0, + "step": 5809, + "token_acc": 0.9280760339070125, + "train_speed(iter/s)": 0.527068 + }, + { + "epoch": 1.8592, + "grad_norm": 0.7231528662688272, + "learning_rate": 1.7296384931851787e-06, + "loss": 0.2832384705543518, + "memory(GiB)": 77.0, + "step": 5810, + "token_acc": 0.9405255878284924, + "train_speed(iter/s)": 0.526974 + }, + { + "epoch": 1.8595199999999998, + "grad_norm": 0.7052518567111108, + "learning_rate": 1.7287995782744105e-06, + "loss": 0.3358381986618042, + "memory(GiB)": 77.0, + "step": 5811, + "token_acc": 0.9399865138233311, + "train_speed(iter/s)": 0.52687 + }, + { + "epoch": 1.8598400000000002, + "grad_norm": 0.6206331188442551, + "learning_rate": 1.727960759326156e-06, + "loss": 0.2625206708908081, + "memory(GiB)": 77.0, + "step": 5812, + "token_acc": 0.928008407777194, + "train_speed(iter/s)": 0.526773 + }, + { + "epoch": 1.86016, + "grad_norm": 0.6771638016670536, + "learning_rate": 1.7271220364447927e-06, + "loss": 0.3874977231025696, + "memory(GiB)": 77.0, + "step": 5813, + "token_acc": 0.8015037593984963, + "train_speed(iter/s)": 0.526673 + }, + { + "epoch": 1.86048, + "grad_norm": 0.7333644237461949, + "learning_rate": 1.7262834097346846e-06, + "loss": 0.3721191883087158, + "memory(GiB)": 77.0, + "step": 5814, + "token_acc": 0.9524092801903629, + "train_speed(iter/s)": 0.526567 + }, + { + "epoch": 1.8608, + "grad_norm": 0.6722492110564927, + "learning_rate": 1.7254448793001837e-06, + "loss": 0.3399973511695862, + "memory(GiB)": 77.0, + "step": 5815, + "token_acc": 0.9302083333333333, + "train_speed(iter/s)": 0.52647 + }, + { + "epoch": 1.86112, + "grad_norm": 0.6478608659925875, + "learning_rate": 1.7246064452456319e-06, + "loss": 0.2845986485481262, + "memory(GiB)": 77.0, + "step": 5816, + "token_acc": 0.9390292819895708, + "train_speed(iter/s)": 0.526373 + }, + { + "epoch": 1.86144, + "grad_norm": 0.5998485720117469, + "learning_rate": 1.7237681076753563e-06, + "loss": 0.3303993344306946, + "memory(GiB)": 77.0, + "step": 5817, + "token_acc": 0.8632229901782467, + "train_speed(iter/s)": 0.526264 + }, + { + "epoch": 1.8617599999999999, + "grad_norm": 0.6817640449544996, + "learning_rate": 1.7229298666936742e-06, + "loss": 0.3192458748817444, + "memory(GiB)": 77.0, + "step": 5818, + "token_acc": 0.9617706237424547, + "train_speed(iter/s)": 0.526158 + }, + { + "epoch": 1.86208, + "grad_norm": 0.6780074478378993, + "learning_rate": 1.72209172240489e-06, + "loss": 0.30267050862312317, + "memory(GiB)": 77.0, + "step": 5819, + "token_acc": 0.854143232095988, + "train_speed(iter/s)": 0.52605 + }, + { + "epoch": 1.8624, + "grad_norm": 0.6609414001487613, + "learning_rate": 1.7212536749132968e-06, + "loss": 0.2999529242515564, + "memory(GiB)": 77.0, + "step": 5820, + "token_acc": 0.9255271422162404, + "train_speed(iter/s)": 0.525953 + }, + { + "epoch": 1.86272, + "grad_norm": 0.683727906038203, + "learning_rate": 1.7204157243231741e-06, + "loss": 0.34771567583084106, + "memory(GiB)": 77.0, + "step": 5821, + "token_acc": 0.9598183881952327, + "train_speed(iter/s)": 0.525855 + }, + { + "epoch": 1.86304, + "grad_norm": 0.6549727447668641, + "learning_rate": 1.7195778707387906e-06, + "loss": 0.3327407240867615, + "memory(GiB)": 77.0, + "step": 5822, + "token_acc": 0.8801405975395431, + "train_speed(iter/s)": 0.525752 + }, + { + "epoch": 1.8633600000000001, + "grad_norm": 0.6192326049874575, + "learning_rate": 1.718740114264403e-06, + "loss": 0.3251734972000122, + "memory(GiB)": 77.0, + "step": 5823, + "token_acc": 0.8953835934905348, + "train_speed(iter/s)": 0.525639 + }, + { + "epoch": 1.86368, + "grad_norm": 0.6713848176918211, + "learning_rate": 1.7179024550042553e-06, + "loss": 0.30059507489204407, + "memory(GiB)": 77.0, + "step": 5824, + "token_acc": 0.874014304052815, + "train_speed(iter/s)": 0.525531 + }, + { + "epoch": 1.8639999999999999, + "grad_norm": 0.6657173933818556, + "learning_rate": 1.7170648930625797e-06, + "loss": 0.27679207921028137, + "memory(GiB)": 77.0, + "step": 5825, + "token_acc": 0.9025348089967868, + "train_speed(iter/s)": 0.525418 + }, + { + "epoch": 1.86432, + "grad_norm": 0.6366598493092573, + "learning_rate": 1.7162274285435963e-06, + "loss": 0.3411388397216797, + "memory(GiB)": 77.0, + "step": 5826, + "token_acc": 0.8731343283582089, + "train_speed(iter/s)": 0.525319 + }, + { + "epoch": 1.86464, + "grad_norm": 0.6822410550296469, + "learning_rate": 1.7153900615515129e-06, + "loss": 0.3978847861289978, + "memory(GiB)": 77.0, + "step": 5827, + "token_acc": 0.8608165429480382, + "train_speed(iter/s)": 0.525199 + }, + { + "epoch": 1.86496, + "grad_norm": 0.6275092840731364, + "learning_rate": 1.7145527921905254e-06, + "loss": 0.4162920117378235, + "memory(GiB)": 77.0, + "step": 5828, + "token_acc": 0.899046878039292, + "train_speed(iter/s)": 0.525088 + }, + { + "epoch": 1.86528, + "grad_norm": 0.6509537004825711, + "learning_rate": 1.7137156205648167e-06, + "loss": 0.34567275643348694, + "memory(GiB)": 77.0, + "step": 5829, + "token_acc": 0.9009108473640629, + "train_speed(iter/s)": 0.524978 + }, + { + "epoch": 1.8656000000000001, + "grad_norm": 0.652998836666183, + "learning_rate": 1.7128785467785597e-06, + "loss": 0.36428719758987427, + "memory(GiB)": 77.0, + "step": 5830, + "token_acc": 0.9450497394599716, + "train_speed(iter/s)": 0.524868 + }, + { + "epoch": 1.86592, + "grad_norm": 0.6842354947501672, + "learning_rate": 1.7120415709359135e-06, + "loss": 0.41291123628616333, + "memory(GiB)": 77.0, + "step": 5831, + "token_acc": 0.8680107526881721, + "train_speed(iter/s)": 0.524769 + }, + { + "epoch": 1.86624, + "grad_norm": 0.6715112588060258, + "learning_rate": 1.7112046931410242e-06, + "loss": 0.3517717123031616, + "memory(GiB)": 77.0, + "step": 5832, + "token_acc": 0.8804582792909641, + "train_speed(iter/s)": 0.524659 + }, + { + "epoch": 1.86656, + "grad_norm": 0.6104387737728858, + "learning_rate": 1.7103679134980271e-06, + "loss": 0.33268141746520996, + "memory(GiB)": 77.0, + "step": 5833, + "token_acc": 0.9559880239520958, + "train_speed(iter/s)": 0.52456 + }, + { + "epoch": 1.86688, + "grad_norm": 0.7492249643559791, + "learning_rate": 1.7095312321110457e-06, + "loss": 0.3937787413597107, + "memory(GiB)": 77.0, + "step": 5834, + "token_acc": 0.8765801729873586, + "train_speed(iter/s)": 0.524458 + }, + { + "epoch": 1.8672, + "grad_norm": 0.6029839991333279, + "learning_rate": 1.70869464908419e-06, + "loss": 0.33661240339279175, + "memory(GiB)": 77.0, + "step": 5835, + "token_acc": 0.9206251915415262, + "train_speed(iter/s)": 0.52435 + }, + { + "epoch": 1.8675199999999998, + "grad_norm": 0.6220440103181244, + "learning_rate": 1.7078581645215578e-06, + "loss": 0.36762794852256775, + "memory(GiB)": 77.0, + "step": 5836, + "token_acc": 0.9080097936341378, + "train_speed(iter/s)": 0.524242 + }, + { + "epoch": 1.86784, + "grad_norm": 0.6378166518067205, + "learning_rate": 1.7070217785272354e-06, + "loss": 0.38569387793540955, + "memory(GiB)": 77.0, + "step": 5837, + "token_acc": 0.9114518147684606, + "train_speed(iter/s)": 0.524138 + }, + { + "epoch": 1.86816, + "grad_norm": 0.6489226095691119, + "learning_rate": 1.7061854912052967e-06, + "loss": 0.30347108840942383, + "memory(GiB)": 77.0, + "step": 5838, + "token_acc": 0.9414389291689905, + "train_speed(iter/s)": 0.524029 + }, + { + "epoch": 1.86848, + "grad_norm": 0.5590818505169177, + "learning_rate": 1.7053493026598026e-06, + "loss": 0.27857884764671326, + "memory(GiB)": 77.0, + "step": 5839, + "token_acc": 0.9004149377593361, + "train_speed(iter/s)": 0.523916 + }, + { + "epoch": 1.8688, + "grad_norm": 0.6569924358839545, + "learning_rate": 1.7045132129948027e-06, + "loss": 0.406017541885376, + "memory(GiB)": 77.0, + "step": 5840, + "token_acc": 0.8801791713325868, + "train_speed(iter/s)": 0.523813 + }, + { + "epoch": 1.8691200000000001, + "grad_norm": 0.6651248389382584, + "learning_rate": 1.7036772223143342e-06, + "loss": 0.33159565925598145, + "memory(GiB)": 77.0, + "step": 5841, + "token_acc": 0.8580034423407917, + "train_speed(iter/s)": 0.523708 + }, + { + "epoch": 1.86944, + "grad_norm": 0.7204906323279876, + "learning_rate": 1.7028413307224209e-06, + "loss": 0.40231215953826904, + "memory(GiB)": 77.0, + "step": 5842, + "token_acc": 0.8743202416918429, + "train_speed(iter/s)": 0.523614 + }, + { + "epoch": 1.8697599999999999, + "grad_norm": 0.633026850038325, + "learning_rate": 1.7020055383230755e-06, + "loss": 0.3822057247161865, + "memory(GiB)": 77.0, + "step": 5843, + "token_acc": 0.9036059391939665, + "train_speed(iter/s)": 0.523505 + }, + { + "epoch": 1.87008, + "grad_norm": 0.6078615040519939, + "learning_rate": 1.7011698452202977e-06, + "loss": 0.30184149742126465, + "memory(GiB)": 77.0, + "step": 5844, + "token_acc": 0.8804607693979569, + "train_speed(iter/s)": 0.523389 + }, + { + "epoch": 1.8704, + "grad_norm": 0.6299280087611977, + "learning_rate": 1.7003342515180749e-06, + "loss": 0.27756381034851074, + "memory(GiB)": 77.0, + "step": 5845, + "token_acc": 0.8844028899277518, + "train_speed(iter/s)": 0.523295 + }, + { + "epoch": 1.87072, + "grad_norm": 0.6426670654258233, + "learning_rate": 1.699498757320382e-06, + "loss": 0.3024557828903198, + "memory(GiB)": 77.0, + "step": 5846, + "token_acc": 0.8872921949845026, + "train_speed(iter/s)": 0.523197 + }, + { + "epoch": 1.87104, + "grad_norm": 0.6400741141013032, + "learning_rate": 1.6986633627311828e-06, + "loss": 0.37078070640563965, + "memory(GiB)": 77.0, + "step": 5847, + "token_acc": 0.9392699811202014, + "train_speed(iter/s)": 0.523079 + }, + { + "epoch": 1.8713600000000001, + "grad_norm": 0.6317038806137935, + "learning_rate": 1.6978280678544268e-06, + "loss": 0.3410934507846832, + "memory(GiB)": 77.0, + "step": 5848, + "token_acc": 0.9122672787554196, + "train_speed(iter/s)": 0.522965 + }, + { + "epoch": 1.87168, + "grad_norm": 0.6334762649202214, + "learning_rate": 1.6969928727940531e-06, + "loss": 0.2977510094642639, + "memory(GiB)": 77.0, + "step": 5849, + "token_acc": 0.8917152858809801, + "train_speed(iter/s)": 0.522868 + }, + { + "epoch": 1.8719999999999999, + "grad_norm": 0.6349599257102642, + "learning_rate": 1.6961577776539857e-06, + "loss": 0.34385591745376587, + "memory(GiB)": 77.0, + "step": 5850, + "token_acc": 0.8607242339832869, + "train_speed(iter/s)": 0.522767 + }, + { + "epoch": 1.87232, + "grad_norm": 0.6075416747710776, + "learning_rate": 1.6953227825381378e-06, + "loss": 0.3035307228565216, + "memory(GiB)": 77.0, + "step": 5851, + "token_acc": 0.945, + "train_speed(iter/s)": 0.522657 + }, + { + "epoch": 1.87264, + "grad_norm": 0.6481378894628008, + "learning_rate": 1.69448788755041e-06, + "loss": 0.31343644857406616, + "memory(GiB)": 77.0, + "step": 5852, + "token_acc": 0.8938439166262724, + "train_speed(iter/s)": 0.522556 + }, + { + "epoch": 1.87296, + "grad_norm": 0.6253649609098438, + "learning_rate": 1.6936530927946917e-06, + "loss": 0.2762243151664734, + "memory(GiB)": 77.0, + "step": 5853, + "token_acc": 0.916012084592145, + "train_speed(iter/s)": 0.522451 + }, + { + "epoch": 1.8732799999999998, + "grad_norm": 0.6688833642600852, + "learning_rate": 1.692818398374858e-06, + "loss": 0.34475675225257874, + "memory(GiB)": 77.0, + "step": 5854, + "token_acc": 0.9258620689655173, + "train_speed(iter/s)": 0.522351 + }, + { + "epoch": 1.8736000000000002, + "grad_norm": 0.7230058193925898, + "learning_rate": 1.6919838043947724e-06, + "loss": 0.2561693787574768, + "memory(GiB)": 77.0, + "step": 5855, + "token_acc": 0.9582389122693429, + "train_speed(iter/s)": 0.522258 + }, + { + "epoch": 1.87392, + "grad_norm": 0.6908659465296997, + "learning_rate": 1.691149310958285e-06, + "loss": 0.3066670298576355, + "memory(GiB)": 77.0, + "step": 5856, + "token_acc": 0.9135832521908471, + "train_speed(iter/s)": 0.522168 + }, + { + "epoch": 1.87424, + "grad_norm": 0.6343014746717573, + "learning_rate": 1.6903149181692347e-06, + "loss": 0.3357009291648865, + "memory(GiB)": 77.0, + "step": 5857, + "token_acc": 0.9082649472450176, + "train_speed(iter/s)": 0.522065 + }, + { + "epoch": 1.87456, + "grad_norm": 0.6202707943601802, + "learning_rate": 1.6894806261314465e-06, + "loss": 0.3005986213684082, + "memory(GiB)": 77.0, + "step": 5858, + "token_acc": 0.8775044382449911, + "train_speed(iter/s)": 0.521972 + }, + { + "epoch": 1.87488, + "grad_norm": 0.6464648210930288, + "learning_rate": 1.6886464349487342e-06, + "loss": 0.3135419487953186, + "memory(GiB)": 77.0, + "step": 5859, + "token_acc": 0.8542099792099792, + "train_speed(iter/s)": 0.521878 + }, + { + "epoch": 1.8752, + "grad_norm": 0.6544532902718023, + "learning_rate": 1.6878123447248978e-06, + "loss": 0.2837235927581787, + "memory(GiB)": 77.0, + "step": 5860, + "token_acc": 0.8947753014249178, + "train_speed(iter/s)": 0.521776 + }, + { + "epoch": 1.8755199999999999, + "grad_norm": 0.6826687877915505, + "learning_rate": 1.6869783555637265e-06, + "loss": 0.3441990315914154, + "memory(GiB)": 77.0, + "step": 5861, + "token_acc": 0.9108096085409253, + "train_speed(iter/s)": 0.521676 + }, + { + "epoch": 1.87584, + "grad_norm": 2.147996364009238, + "learning_rate": 1.6861444675689945e-06, + "loss": 0.4111419916152954, + "memory(GiB)": 77.0, + "step": 5862, + "token_acc": 0.8948758584257792, + "train_speed(iter/s)": 0.521571 + }, + { + "epoch": 1.87616, + "grad_norm": 0.6548425589854339, + "learning_rate": 1.6853106808444658e-06, + "loss": 0.3682023286819458, + "memory(GiB)": 77.0, + "step": 5863, + "token_acc": 0.9112992270887008, + "train_speed(iter/s)": 0.521474 + }, + { + "epoch": 1.87648, + "grad_norm": 0.6509993845703685, + "learning_rate": 1.6844769954938897e-06, + "loss": 0.35954582691192627, + "memory(GiB)": 77.0, + "step": 5864, + "token_acc": 0.8730064973419964, + "train_speed(iter/s)": 0.521356 + }, + { + "epoch": 1.8768, + "grad_norm": 0.6638504757249257, + "learning_rate": 1.6836434116210042e-06, + "loss": 0.2793375849723816, + "memory(GiB)": 77.0, + "step": 5865, + "token_acc": 0.9595219737856592, + "train_speed(iter/s)": 0.52126 + }, + { + "epoch": 1.8771200000000001, + "grad_norm": 0.6827792913803656, + "learning_rate": 1.6828099293295347e-06, + "loss": 0.32003816962242126, + "memory(GiB)": 77.0, + "step": 5866, + "token_acc": 0.9483714483714484, + "train_speed(iter/s)": 0.521154 + }, + { + "epoch": 1.87744, + "grad_norm": 0.7090839360880985, + "learning_rate": 1.6819765487231931e-06, + "loss": 0.3083997368812561, + "memory(GiB)": 77.0, + "step": 5867, + "token_acc": 0.9500657030223391, + "train_speed(iter/s)": 0.521061 + }, + { + "epoch": 1.8777599999999999, + "grad_norm": 0.6658871040294626, + "learning_rate": 1.6811432699056796e-06, + "loss": 0.3336179852485657, + "memory(GiB)": 77.0, + "step": 5868, + "token_acc": 0.8809423643247791, + "train_speed(iter/s)": 0.520952 + }, + { + "epoch": 1.87808, + "grad_norm": 0.6904603997545504, + "learning_rate": 1.6803100929806804e-06, + "loss": 0.34649619460105896, + "memory(GiB)": 77.0, + "step": 5869, + "token_acc": 0.91259591989519, + "train_speed(iter/s)": 0.520842 + }, + { + "epoch": 1.8784, + "grad_norm": 0.666748069639519, + "learning_rate": 1.6794770180518713e-06, + "loss": 0.3578915596008301, + "memory(GiB)": 77.0, + "step": 5870, + "token_acc": 0.8818539836413208, + "train_speed(iter/s)": 0.520726 + }, + { + "epoch": 1.87872, + "grad_norm": 0.6620340973624862, + "learning_rate": 1.6786440452229134e-06, + "loss": 0.31854647397994995, + "memory(GiB)": 77.0, + "step": 5871, + "token_acc": 0.8512110726643599, + "train_speed(iter/s)": 0.520634 + }, + { + "epoch": 1.87904, + "grad_norm": 0.69950058461037, + "learning_rate": 1.6778111745974557e-06, + "loss": 0.35315731167793274, + "memory(GiB)": 77.0, + "step": 5872, + "token_acc": 0.8889883616830797, + "train_speed(iter/s)": 0.520528 + }, + { + "epoch": 1.8793600000000001, + "grad_norm": 0.6946275523894783, + "learning_rate": 1.6769784062791342e-06, + "loss": 0.3733116388320923, + "memory(GiB)": 77.0, + "step": 5873, + "token_acc": 0.9098298196596393, + "train_speed(iter/s)": 0.520429 + }, + { + "epoch": 1.87968, + "grad_norm": 0.6192743544357564, + "learning_rate": 1.676145740371573e-06, + "loss": 0.33758747577667236, + "memory(GiB)": 77.0, + "step": 5874, + "token_acc": 0.905244856563315, + "train_speed(iter/s)": 0.520327 + }, + { + "epoch": 1.88, + "grad_norm": 0.6272601005810868, + "learning_rate": 1.6753131769783826e-06, + "loss": 0.31202948093414307, + "memory(GiB)": 77.0, + "step": 5875, + "token_acc": 0.9140207923574037, + "train_speed(iter/s)": 0.520236 + }, + { + "epoch": 1.88032, + "grad_norm": 0.5935876635113023, + "learning_rate": 1.6744807162031612e-06, + "loss": 0.3187175989151001, + "memory(GiB)": 77.0, + "step": 5876, + "token_acc": 0.8986071589036992, + "train_speed(iter/s)": 0.520128 + }, + { + "epoch": 1.88064, + "grad_norm": 0.6461749609712192, + "learning_rate": 1.673648358149494e-06, + "loss": 0.31421327590942383, + "memory(GiB)": 77.0, + "step": 5877, + "token_acc": 0.9497746297488732, + "train_speed(iter/s)": 0.520037 + }, + { + "epoch": 1.88096, + "grad_norm": 0.7824593770845313, + "learning_rate": 1.6728161029209538e-06, + "loss": 0.41602063179016113, + "memory(GiB)": 77.0, + "step": 5878, + "token_acc": 0.8472341645170536, + "train_speed(iter/s)": 0.519934 + }, + { + "epoch": 1.8812799999999998, + "grad_norm": 0.7349795338133199, + "learning_rate": 1.6719839506210999e-06, + "loss": 0.3712366819381714, + "memory(GiB)": 77.0, + "step": 5879, + "token_acc": 0.8714524207011686, + "train_speed(iter/s)": 0.519842 + }, + { + "epoch": 1.8816000000000002, + "grad_norm": 0.6346222865663239, + "learning_rate": 1.6711519013534794e-06, + "loss": 0.4225757420063019, + "memory(GiB)": 77.0, + "step": 5880, + "token_acc": 0.8913520097442144, + "train_speed(iter/s)": 0.519742 + }, + { + "epoch": 1.88192, + "grad_norm": 0.7795534100964677, + "learning_rate": 1.6703199552216266e-06, + "loss": 0.331820011138916, + "memory(GiB)": 77.0, + "step": 5881, + "token_acc": 0.8730385164051355, + "train_speed(iter/s)": 0.519653 + }, + { + "epoch": 1.88224, + "grad_norm": 0.7096485154202627, + "learning_rate": 1.6694881123290623e-06, + "loss": 0.33351290225982666, + "memory(GiB)": 77.0, + "step": 5882, + "token_acc": 0.9352798053527981, + "train_speed(iter/s)": 0.519556 + }, + { + "epoch": 1.88256, + "grad_norm": 0.6973808457347908, + "learning_rate": 1.6686563727792954e-06, + "loss": 0.38989609479904175, + "memory(GiB)": 77.0, + "step": 5883, + "token_acc": 0.8520359848484849, + "train_speed(iter/s)": 0.519455 + }, + { + "epoch": 1.88288, + "grad_norm": 0.6679629186804522, + "learning_rate": 1.667824736675821e-06, + "loss": 0.364469975233078, + "memory(GiB)": 77.0, + "step": 5884, + "token_acc": 0.9021767637031209, + "train_speed(iter/s)": 0.519339 + }, + { + "epoch": 1.8832, + "grad_norm": 0.625173725682246, + "learning_rate": 1.666993204122122e-06, + "loss": 0.2513069212436676, + "memory(GiB)": 77.0, + "step": 5885, + "token_acc": 0.935249621785174, + "train_speed(iter/s)": 0.519245 + }, + { + "epoch": 1.8835199999999999, + "grad_norm": 0.6384657770046884, + "learning_rate": 1.666161775221668e-06, + "loss": 0.29344087839126587, + "memory(GiB)": 77.0, + "step": 5886, + "token_acc": 0.8968687158862396, + "train_speed(iter/s)": 0.519151 + }, + { + "epoch": 1.88384, + "grad_norm": 0.7194752140527009, + "learning_rate": 1.6653304500779165e-06, + "loss": 0.4319307506084442, + "memory(GiB)": 77.0, + "step": 5887, + "token_acc": 0.8947368421052632, + "train_speed(iter/s)": 0.519052 + }, + { + "epoch": 1.88416, + "grad_norm": 0.6673096481571265, + "learning_rate": 1.6644992287943114e-06, + "loss": 0.2972384989261627, + "memory(GiB)": 77.0, + "step": 5888, + "token_acc": 0.9102200141944642, + "train_speed(iter/s)": 0.51896 + }, + { + "epoch": 1.88448, + "grad_norm": 0.7169850639370068, + "learning_rate": 1.663668111474283e-06, + "loss": 0.2854718267917633, + "memory(GiB)": 77.0, + "step": 5889, + "token_acc": 0.8824553765781454, + "train_speed(iter/s)": 0.518871 + }, + { + "epoch": 1.8848, + "grad_norm": 0.6655786909253523, + "learning_rate": 1.6628370982212502e-06, + "loss": 0.3355163633823395, + "memory(GiB)": 77.0, + "step": 5890, + "token_acc": 0.8245853215178368, + "train_speed(iter/s)": 0.518781 + }, + { + "epoch": 1.8851200000000001, + "grad_norm": 0.6771910528264398, + "learning_rate": 1.662006189138618e-06, + "loss": 0.3675715923309326, + "memory(GiB)": 77.0, + "step": 5891, + "token_acc": 0.9374836515825268, + "train_speed(iter/s)": 0.518681 + }, + { + "epoch": 1.88544, + "grad_norm": 0.6918098587733119, + "learning_rate": 1.661175384329779e-06, + "loss": 0.40482211112976074, + "memory(GiB)": 77.0, + "step": 5892, + "token_acc": 0.8961218836565097, + "train_speed(iter/s)": 0.518583 + }, + { + "epoch": 1.8857599999999999, + "grad_norm": 0.668931294773122, + "learning_rate": 1.6603446838981114e-06, + "loss": 0.3494011163711548, + "memory(GiB)": 77.0, + "step": 5893, + "token_acc": 0.8863341968911918, + "train_speed(iter/s)": 0.51848 + }, + { + "epoch": 1.88608, + "grad_norm": 0.6529550399943267, + "learning_rate": 1.6595140879469821e-06, + "loss": 0.3014255464076996, + "memory(GiB)": 77.0, + "step": 5894, + "token_acc": 0.8864468864468864, + "train_speed(iter/s)": 0.518379 + }, + { + "epoch": 1.8864, + "grad_norm": 0.6741806161143225, + "learning_rate": 1.6586835965797444e-06, + "loss": 0.3417585492134094, + "memory(GiB)": 77.0, + "step": 5895, + "token_acc": 0.8753552578156719, + "train_speed(iter/s)": 0.518277 + }, + { + "epoch": 1.88672, + "grad_norm": 0.6916599154619972, + "learning_rate": 1.6578532098997383e-06, + "loss": 0.37241485714912415, + "memory(GiB)": 77.0, + "step": 5896, + "token_acc": 0.9405624818788054, + "train_speed(iter/s)": 0.518169 + }, + { + "epoch": 1.88704, + "grad_norm": 0.7052623127773551, + "learning_rate": 1.6570229280102912e-06, + "loss": 0.4033910632133484, + "memory(GiB)": 77.0, + "step": 5897, + "token_acc": 0.8754252813399633, + "train_speed(iter/s)": 0.518077 + }, + { + "epoch": 1.8873600000000001, + "grad_norm": 0.66230818104701, + "learning_rate": 1.6561927510147172e-06, + "loss": 0.3837793469429016, + "memory(GiB)": 77.0, + "step": 5898, + "token_acc": 0.9180595930232558, + "train_speed(iter/s)": 0.517968 + }, + { + "epoch": 1.88768, + "grad_norm": 0.6840187977154725, + "learning_rate": 1.6553626790163173e-06, + "loss": 0.3388713300228119, + "memory(GiB)": 77.0, + "step": 5899, + "token_acc": 0.9330143540669856, + "train_speed(iter/s)": 0.517875 + }, + { + "epoch": 1.888, + "grad_norm": 0.6681391234045971, + "learning_rate": 1.6545327121183801e-06, + "loss": 0.3097202777862549, + "memory(GiB)": 77.0, + "step": 5900, + "token_acc": 0.9142291620848911, + "train_speed(iter/s)": 0.517783 + }, + { + "epoch": 1.88832, + "grad_norm": 0.6001640109422361, + "learning_rate": 1.6537028504241802e-06, + "loss": 0.2703160047531128, + "memory(GiB)": 77.0, + "step": 5901, + "token_acc": 0.9162532827545958, + "train_speed(iter/s)": 0.517689 + }, + { + "epoch": 1.88864, + "grad_norm": 0.6185863428574917, + "learning_rate": 1.6528730940369788e-06, + "loss": 0.3273877203464508, + "memory(GiB)": 77.0, + "step": 5902, + "token_acc": 0.94921875, + "train_speed(iter/s)": 0.517581 + }, + { + "epoch": 1.88896, + "grad_norm": 0.6429185847878507, + "learning_rate": 1.652043443060026e-06, + "loss": 0.30993905663490295, + "memory(GiB)": 77.0, + "step": 5903, + "token_acc": 0.9367283950617284, + "train_speed(iter/s)": 0.517488 + }, + { + "epoch": 1.8892799999999998, + "grad_norm": 0.7083680396291181, + "learning_rate": 1.651213897596557e-06, + "loss": 0.320132315158844, + "memory(GiB)": 77.0, + "step": 5904, + "token_acc": 0.9046887009992314, + "train_speed(iter/s)": 0.517401 + }, + { + "epoch": 1.8896, + "grad_norm": 0.7267189048039214, + "learning_rate": 1.6503844577497937e-06, + "loss": 0.31987065076828003, + "memory(GiB)": 77.0, + "step": 5905, + "token_acc": 0.9404580152671755, + "train_speed(iter/s)": 0.517309 + }, + { + "epoch": 1.88992, + "grad_norm": 0.633479969971108, + "learning_rate": 1.6495551236229474e-06, + "loss": 0.33324551582336426, + "memory(GiB)": 77.0, + "step": 5906, + "token_acc": 0.8777260475373683, + "train_speed(iter/s)": 0.517211 + }, + { + "epoch": 1.89024, + "grad_norm": 0.6447033133048178, + "learning_rate": 1.648725895319212e-06, + "loss": 0.42279672622680664, + "memory(GiB)": 77.0, + "step": 5907, + "token_acc": 0.8632495164410058, + "train_speed(iter/s)": 0.517108 + }, + { + "epoch": 1.89056, + "grad_norm": 0.7000125200927043, + "learning_rate": 1.6478967729417715e-06, + "loss": 0.4337184429168701, + "memory(GiB)": 77.0, + "step": 5908, + "token_acc": 0.8695407156945618, + "train_speed(iter/s)": 0.51701 + }, + { + "epoch": 1.8908800000000001, + "grad_norm": 0.7206046476209054, + "learning_rate": 1.6470677565937953e-06, + "loss": 0.34817638993263245, + "memory(GiB)": 77.0, + "step": 5909, + "token_acc": 0.8859231013663806, + "train_speed(iter/s)": 0.516919 + }, + { + "epoch": 1.8912, + "grad_norm": 0.6969583653476212, + "learning_rate": 1.6462388463784409e-06, + "loss": 0.4069809317588806, + "memory(GiB)": 77.0, + "step": 5910, + "token_acc": 0.9033989266547406, + "train_speed(iter/s)": 0.516819 + }, + { + "epoch": 1.8915199999999999, + "grad_norm": 0.6213400857894843, + "learning_rate": 1.6454100423988517e-06, + "loss": 0.3010268807411194, + "memory(GiB)": 77.0, + "step": 5911, + "token_acc": 0.9324522760646109, + "train_speed(iter/s)": 0.516724 + }, + { + "epoch": 1.89184, + "grad_norm": 0.6467276588675215, + "learning_rate": 1.6445813447581576e-06, + "loss": 0.3376759886741638, + "memory(GiB)": 77.0, + "step": 5912, + "token_acc": 0.9003601440576231, + "train_speed(iter/s)": 0.516515 + }, + { + "epoch": 1.89216, + "grad_norm": 0.6775879751509951, + "learning_rate": 1.6437527535594763e-06, + "loss": 0.38838914036750793, + "memory(GiB)": 77.0, + "step": 5913, + "token_acc": 0.8718139074486513, + "train_speed(iter/s)": 0.516417 + }, + { + "epoch": 1.89248, + "grad_norm": 0.668919229259021, + "learning_rate": 1.642924268905911e-06, + "loss": 0.2468084692955017, + "memory(GiB)": 77.0, + "step": 5914, + "token_acc": 0.9471399387129724, + "train_speed(iter/s)": 0.516322 + }, + { + "epoch": 1.8928, + "grad_norm": 0.6775460437200187, + "learning_rate": 1.6420958909005521e-06, + "loss": 0.34707897901535034, + "memory(GiB)": 77.0, + "step": 5915, + "token_acc": 0.8669254227890213, + "train_speed(iter/s)": 0.51623 + }, + { + "epoch": 1.8931200000000001, + "grad_norm": 0.6945373968799662, + "learning_rate": 1.6412676196464774e-06, + "loss": 0.3863135576248169, + "memory(GiB)": 77.0, + "step": 5916, + "token_acc": 0.8734402852049911, + "train_speed(iter/s)": 0.516135 + }, + { + "epoch": 1.89344, + "grad_norm": 0.6497222816275213, + "learning_rate": 1.6404394552467507e-06, + "loss": 0.37439143657684326, + "memory(GiB)": 77.0, + "step": 5917, + "token_acc": 0.8071487946799667, + "train_speed(iter/s)": 0.516033 + }, + { + "epoch": 1.8937599999999999, + "grad_norm": 0.6570971985735083, + "learning_rate": 1.6396113978044226e-06, + "loss": 0.35956549644470215, + "memory(GiB)": 77.0, + "step": 5918, + "token_acc": 0.9030265596046942, + "train_speed(iter/s)": 0.515941 + }, + { + "epoch": 1.89408, + "grad_norm": 0.6523166949286667, + "learning_rate": 1.63878344742253e-06, + "loss": 0.3437078595161438, + "memory(GiB)": 77.0, + "step": 5919, + "token_acc": 0.9140357550399392, + "train_speed(iter/s)": 0.515844 + }, + { + "epoch": 1.8944, + "grad_norm": 1.9243163065745181, + "learning_rate": 1.6379556042040978e-06, + "loss": 0.3002139925956726, + "memory(GiB)": 77.0, + "step": 5920, + "token_acc": 0.8573844419391207, + "train_speed(iter/s)": 0.51575 + }, + { + "epoch": 1.89472, + "grad_norm": 0.645572862829291, + "learning_rate": 1.6371278682521376e-06, + "loss": 0.4028485119342804, + "memory(GiB)": 77.0, + "step": 5921, + "token_acc": 0.8493124152624443, + "train_speed(iter/s)": 0.515645 + }, + { + "epoch": 1.8950399999999998, + "grad_norm": 0.6315698707653418, + "learning_rate": 1.6363002396696448e-06, + "loss": 0.315553218126297, + "memory(GiB)": 77.0, + "step": 5922, + "token_acc": 0.9589430894308943, + "train_speed(iter/s)": 0.515536 + }, + { + "epoch": 1.8953600000000002, + "grad_norm": 0.652773757554498, + "learning_rate": 1.6354727185596042e-06, + "loss": 0.28209009766578674, + "memory(GiB)": 77.0, + "step": 5923, + "token_acc": 0.9276111685625646, + "train_speed(iter/s)": 0.515445 + }, + { + "epoch": 1.89568, + "grad_norm": 0.6513220924730924, + "learning_rate": 1.634645305024986e-06, + "loss": 0.367863267660141, + "memory(GiB)": 77.0, + "step": 5924, + "token_acc": 0.8760072356520309, + "train_speed(iter/s)": 0.515347 + }, + { + "epoch": 1.896, + "grad_norm": 0.7386646630021446, + "learning_rate": 1.6338179991687477e-06, + "loss": 0.3695542514324188, + "memory(GiB)": 77.0, + "step": 5925, + "token_acc": 0.8609978035770317, + "train_speed(iter/s)": 0.515247 + }, + { + "epoch": 1.89632, + "grad_norm": 0.6755033458895403, + "learning_rate": 1.632990801093834e-06, + "loss": 0.28110504150390625, + "memory(GiB)": 77.0, + "step": 5926, + "token_acc": 0.95625, + "train_speed(iter/s)": 0.51516 + }, + { + "epoch": 1.89664, + "grad_norm": 0.6008742739584583, + "learning_rate": 1.6321637109031746e-06, + "loss": 0.266912043094635, + "memory(GiB)": 77.0, + "step": 5927, + "token_acc": 0.8987237546315356, + "train_speed(iter/s)": 0.515064 + }, + { + "epoch": 1.89696, + "grad_norm": 0.7027648228237798, + "learning_rate": 1.6313367286996867e-06, + "loss": 0.41369694471359253, + "memory(GiB)": 77.0, + "step": 5928, + "token_acc": 0.8590758233341843, + "train_speed(iter/s)": 0.514973 + }, + { + "epoch": 1.8972799999999999, + "grad_norm": 0.7238625163532255, + "learning_rate": 1.6305098545862741e-06, + "loss": 0.34576648473739624, + "memory(GiB)": 77.0, + "step": 5929, + "token_acc": 0.9245361684386074, + "train_speed(iter/s)": 0.514885 + }, + { + "epoch": 1.8976, + "grad_norm": 0.7330162961116141, + "learning_rate": 1.6296830886658267e-06, + "loss": 0.3533361554145813, + "memory(GiB)": 77.0, + "step": 5930, + "token_acc": 0.9027812578301178, + "train_speed(iter/s)": 0.514792 + }, + { + "epoch": 1.89792, + "grad_norm": 0.672353110484006, + "learning_rate": 1.6288564310412209e-06, + "loss": 0.38197940587997437, + "memory(GiB)": 77.0, + "step": 5931, + "token_acc": 0.851109329056402, + "train_speed(iter/s)": 0.514686 + }, + { + "epoch": 1.89824, + "grad_norm": 0.6601608074171741, + "learning_rate": 1.62802988181532e-06, + "loss": 0.3677070140838623, + "memory(GiB)": 77.0, + "step": 5932, + "token_acc": 0.8498999332888593, + "train_speed(iter/s)": 0.514596 + }, + { + "epoch": 1.89856, + "grad_norm": 0.6871154687622754, + "learning_rate": 1.6272034410909743e-06, + "loss": 0.3679848313331604, + "memory(GiB)": 77.0, + "step": 5933, + "token_acc": 0.8975656784767414, + "train_speed(iter/s)": 0.514505 + }, + { + "epoch": 1.8988800000000001, + "grad_norm": 0.7361442371757949, + "learning_rate": 1.6263771089710193e-06, + "loss": 0.35345959663391113, + "memory(GiB)": 77.0, + "step": 5934, + "token_acc": 0.9078356920952709, + "train_speed(iter/s)": 0.514397 + }, + { + "epoch": 1.8992, + "grad_norm": 0.6563869362937259, + "learning_rate": 1.6255508855582785e-06, + "loss": 0.3209652900695801, + "memory(GiB)": 77.0, + "step": 5935, + "token_acc": 0.9082286593181236, + "train_speed(iter/s)": 0.514297 + }, + { + "epoch": 1.8995199999999999, + "grad_norm": 0.7576338270865205, + "learning_rate": 1.6247247709555603e-06, + "loss": 0.3570002317428589, + "memory(GiB)": 77.0, + "step": 5936, + "token_acc": 0.8583019414662417, + "train_speed(iter/s)": 0.514199 + }, + { + "epoch": 1.89984, + "grad_norm": 0.6087416510382371, + "learning_rate": 1.6238987652656608e-06, + "loss": 0.2900485396385193, + "memory(GiB)": 77.0, + "step": 5937, + "token_acc": 0.9631560653458464, + "train_speed(iter/s)": 0.514104 + }, + { + "epoch": 1.90016, + "grad_norm": 0.5996487501761596, + "learning_rate": 1.6230728685913614e-06, + "loss": 0.22095973789691925, + "memory(GiB)": 77.0, + "step": 5938, + "token_acc": 0.9088228065863848, + "train_speed(iter/s)": 0.514011 + }, + { + "epoch": 1.90048, + "grad_norm": 0.6482543246104737, + "learning_rate": 1.6222470810354313e-06, + "loss": 0.4072072505950928, + "memory(GiB)": 77.0, + "step": 5939, + "token_acc": 0.9655846695346109, + "train_speed(iter/s)": 0.513911 + }, + { + "epoch": 1.9008, + "grad_norm": 0.6154700144202129, + "learning_rate": 1.621421402700625e-06, + "loss": 0.3138895034790039, + "memory(GiB)": 77.0, + "step": 5940, + "token_acc": 0.9266589057043073, + "train_speed(iter/s)": 0.513812 + }, + { + "epoch": 1.9011200000000001, + "grad_norm": 0.665963436804234, + "learning_rate": 1.6205958336896843e-06, + "loss": 0.2789718508720398, + "memory(GiB)": 77.0, + "step": 5941, + "token_acc": 0.9548997772828508, + "train_speed(iter/s)": 0.513716 + }, + { + "epoch": 1.90144, + "grad_norm": 0.6031752112619476, + "learning_rate": 1.6197703741053362e-06, + "loss": 0.24898162484169006, + "memory(GiB)": 77.0, + "step": 5942, + "token_acc": 0.8479857556198531, + "train_speed(iter/s)": 0.513624 + }, + { + "epoch": 1.90176, + "grad_norm": 0.6413279859563817, + "learning_rate": 1.6189450240502957e-06, + "loss": 0.36575382947921753, + "memory(GiB)": 77.0, + "step": 5943, + "token_acc": 0.8743279569892473, + "train_speed(iter/s)": 0.513523 + }, + { + "epoch": 1.90208, + "grad_norm": 0.6543071919157867, + "learning_rate": 1.618119783627263e-06, + "loss": 0.30369001626968384, + "memory(GiB)": 77.0, + "step": 5944, + "token_acc": 0.8864742134284788, + "train_speed(iter/s)": 0.51343 + }, + { + "epoch": 1.9024, + "grad_norm": 0.7530228655622826, + "learning_rate": 1.617294652938925e-06, + "loss": 0.4000316262245178, + "memory(GiB)": 77.0, + "step": 5945, + "token_acc": 0.8642895156998404, + "train_speed(iter/s)": 0.513338 + }, + { + "epoch": 1.90272, + "grad_norm": 0.5871014695720572, + "learning_rate": 1.6164696320879547e-06, + "loss": 0.27104994654655457, + "memory(GiB)": 77.0, + "step": 5946, + "token_acc": 0.9553289633406167, + "train_speed(iter/s)": 0.513234 + }, + { + "epoch": 1.9030399999999998, + "grad_norm": 0.6744895356603305, + "learning_rate": 1.6156447211770118e-06, + "loss": 0.3522566556930542, + "memory(GiB)": 77.0, + "step": 5947, + "token_acc": 0.8713708195516354, + "train_speed(iter/s)": 0.513145 + }, + { + "epoch": 1.9033600000000002, + "grad_norm": 0.6307397092330622, + "learning_rate": 1.6148199203087423e-06, + "loss": 0.27828389406204224, + "memory(GiB)": 77.0, + "step": 5948, + "token_acc": 0.9106305367378843, + "train_speed(iter/s)": 0.513047 + }, + { + "epoch": 1.90368, + "grad_norm": 0.7011029741340518, + "learning_rate": 1.6139952295857776e-06, + "loss": 0.3437468409538269, + "memory(GiB)": 77.0, + "step": 5949, + "token_acc": 0.8704581358609794, + "train_speed(iter/s)": 0.512957 + }, + { + "epoch": 1.904, + "grad_norm": 0.6269432467479733, + "learning_rate": 1.6131706491107375e-06, + "loss": 0.3309120535850525, + "memory(GiB)": 77.0, + "step": 5950, + "token_acc": 0.9502705488276217, + "train_speed(iter/s)": 0.512865 + }, + { + "epoch": 1.90432, + "grad_norm": 0.6946216859368972, + "learning_rate": 1.6123461789862255e-06, + "loss": 0.36909884214401245, + "memory(GiB)": 77.0, + "step": 5951, + "token_acc": 0.925904652498564, + "train_speed(iter/s)": 0.512779 + }, + { + "epoch": 1.90464, + "grad_norm": 0.6116571637006912, + "learning_rate": 1.611521819314833e-06, + "loss": 0.2557186782360077, + "memory(GiB)": 77.0, + "step": 5952, + "token_acc": 0.908207566673558, + "train_speed(iter/s)": 0.512678 + }, + { + "epoch": 1.90496, + "grad_norm": 0.5929003791825738, + "learning_rate": 1.610697570199137e-06, + "loss": 0.23494921624660492, + "memory(GiB)": 77.0, + "step": 5953, + "token_acc": 0.8690440515433023, + "train_speed(iter/s)": 0.512586 + }, + { + "epoch": 1.9052799999999999, + "grad_norm": 0.6876383094836458, + "learning_rate": 1.6098734317417018e-06, + "loss": 0.3224334120750427, + "memory(GiB)": 77.0, + "step": 5954, + "token_acc": 0.8867977528089888, + "train_speed(iter/s)": 0.512487 + }, + { + "epoch": 1.9056, + "grad_norm": 0.6792055509978094, + "learning_rate": 1.609049404045076e-06, + "loss": 0.43957000970840454, + "memory(GiB)": 77.0, + "step": 5955, + "token_acc": 0.8732859319451498, + "train_speed(iter/s)": 0.512389 + }, + { + "epoch": 1.90592, + "grad_norm": 0.6815309210386317, + "learning_rate": 1.6082254872117967e-06, + "loss": 0.341222882270813, + "memory(GiB)": 77.0, + "step": 5956, + "token_acc": 0.9339837398373984, + "train_speed(iter/s)": 0.512303 + }, + { + "epoch": 1.90624, + "grad_norm": 0.6732511344342902, + "learning_rate": 1.6074016813443855e-06, + "loss": 0.37363868951797485, + "memory(GiB)": 77.0, + "step": 5957, + "token_acc": 0.872894007251013, + "train_speed(iter/s)": 0.512216 + }, + { + "epoch": 1.90656, + "grad_norm": 0.6291479632006134, + "learning_rate": 1.6065779865453507e-06, + "loss": 0.32502609491348267, + "memory(GiB)": 77.0, + "step": 5958, + "token_acc": 0.9240992321323095, + "train_speed(iter/s)": 0.512127 + }, + { + "epoch": 1.9068800000000001, + "grad_norm": 0.6804279155260972, + "learning_rate": 1.6057544029171863e-06, + "loss": 0.3015654981136322, + "memory(GiB)": 77.0, + "step": 5959, + "token_acc": 0.8767820773930753, + "train_speed(iter/s)": 0.512027 + }, + { + "epoch": 1.9072, + "grad_norm": 0.6493506712750184, + "learning_rate": 1.6049309305623742e-06, + "loss": 0.3611510992050171, + "memory(GiB)": 77.0, + "step": 5960, + "token_acc": 0.9004566210045662, + "train_speed(iter/s)": 0.511937 + }, + { + "epoch": 1.9075199999999999, + "grad_norm": 0.6664656062523124, + "learning_rate": 1.6041075695833807e-06, + "loss": 0.41068345308303833, + "memory(GiB)": 77.0, + "step": 5961, + "token_acc": 0.8601047577477084, + "train_speed(iter/s)": 0.511787 + }, + { + "epoch": 1.90784, + "grad_norm": 0.612381978780901, + "learning_rate": 1.6032843200826586e-06, + "loss": 0.290660560131073, + "memory(GiB)": 77.0, + "step": 5962, + "token_acc": 0.9001141552511416, + "train_speed(iter/s)": 0.511685 + }, + { + "epoch": 1.90816, + "grad_norm": 0.7148255916015493, + "learning_rate": 1.6024611821626474e-06, + "loss": 0.4783347249031067, + "memory(GiB)": 77.0, + "step": 5963, + "token_acc": 0.8702005730659026, + "train_speed(iter/s)": 0.511592 + }, + { + "epoch": 1.90848, + "grad_norm": 0.6729805688955991, + "learning_rate": 1.601638155925773e-06, + "loss": 0.3557427227497101, + "memory(GiB)": 77.0, + "step": 5964, + "token_acc": 0.9270017681232634, + "train_speed(iter/s)": 0.511502 + }, + { + "epoch": 1.9088, + "grad_norm": 0.6630258786956381, + "learning_rate": 1.6008152414744444e-06, + "loss": 0.2800973057746887, + "memory(GiB)": 77.0, + "step": 5965, + "token_acc": 0.9118153634171546, + "train_speed(iter/s)": 0.511416 + }, + { + "epoch": 1.9091200000000002, + "grad_norm": 0.599445504862183, + "learning_rate": 1.5999924389110608e-06, + "loss": 0.3158845603466034, + "memory(GiB)": 77.0, + "step": 5966, + "token_acc": 0.9696758452422447, + "train_speed(iter/s)": 0.511308 + }, + { + "epoch": 1.90944, + "grad_norm": 0.6230890589520167, + "learning_rate": 1.5991697483380056e-06, + "loss": 0.32080239057540894, + "memory(GiB)": 77.0, + "step": 5967, + "token_acc": 0.9551201011378002, + "train_speed(iter/s)": 0.511219 + }, + { + "epoch": 1.90976, + "grad_norm": 0.6352274962697073, + "learning_rate": 1.598347169857648e-06, + "loss": 0.31844595074653625, + "memory(GiB)": 77.0, + "step": 5968, + "token_acc": 0.8349708576186511, + "train_speed(iter/s)": 0.511118 + }, + { + "epoch": 1.91008, + "grad_norm": 0.6652926245478032, + "learning_rate": 1.5975247035723436e-06, + "loss": 0.37599509954452515, + "memory(GiB)": 77.0, + "step": 5969, + "token_acc": 0.9166031987814166, + "train_speed(iter/s)": 0.511025 + }, + { + "epoch": 1.9104, + "grad_norm": 0.666154055946349, + "learning_rate": 1.5967023495844348e-06, + "loss": 0.35340002179145813, + "memory(GiB)": 77.0, + "step": 5970, + "token_acc": 0.9179875947622329, + "train_speed(iter/s)": 0.510936 + }, + { + "epoch": 1.91072, + "grad_norm": 0.6615449216559584, + "learning_rate": 1.5958801079962482e-06, + "loss": 0.25658994913101196, + "memory(GiB)": 77.0, + "step": 5971, + "token_acc": 0.8754285714285714, + "train_speed(iter/s)": 0.510844 + }, + { + "epoch": 1.9110399999999998, + "grad_norm": 0.6652644541707318, + "learning_rate": 1.5950579789100984e-06, + "loss": 0.2878289520740509, + "memory(GiB)": 77.0, + "step": 5972, + "token_acc": 0.8583627797408716, + "train_speed(iter/s)": 0.510757 + }, + { + "epoch": 1.91136, + "grad_norm": 0.6605008494132465, + "learning_rate": 1.5942359624282843e-06, + "loss": 0.28103071451187134, + "memory(GiB)": 77.0, + "step": 5973, + "token_acc": 0.8708364174685418, + "train_speed(iter/s)": 0.51066 + }, + { + "epoch": 1.91168, + "grad_norm": 0.6414177836611806, + "learning_rate": 1.593414058653092e-06, + "loss": 0.36052989959716797, + "memory(GiB)": 77.0, + "step": 5974, + "token_acc": 0.7996055226824458, + "train_speed(iter/s)": 0.51056 + }, + { + "epoch": 1.912, + "grad_norm": 0.730103833562978, + "learning_rate": 1.5925922676867938e-06, + "loss": 0.3823741674423218, + "memory(GiB)": 77.0, + "step": 5975, + "token_acc": 0.9077526987242395, + "train_speed(iter/s)": 0.510477 + }, + { + "epoch": 1.91232, + "grad_norm": 0.6377614271406548, + "learning_rate": 1.591770589631646e-06, + "loss": 0.3626890480518341, + "memory(GiB)": 77.0, + "step": 5976, + "token_acc": 0.852467024914509, + "train_speed(iter/s)": 0.510381 + }, + { + "epoch": 1.9126400000000001, + "grad_norm": 0.6322270887131287, + "learning_rate": 1.5909490245898929e-06, + "loss": 0.35779643058776855, + "memory(GiB)": 77.0, + "step": 5977, + "token_acc": 0.9019873532068654, + "train_speed(iter/s)": 0.510293 + }, + { + "epoch": 1.91296, + "grad_norm": 0.5694866936018242, + "learning_rate": 1.5901275726637654e-06, + "loss": 0.271369069814682, + "memory(GiB)": 77.0, + "step": 5978, + "token_acc": 0.9264458169567659, + "train_speed(iter/s)": 0.510197 + }, + { + "epoch": 1.9132799999999999, + "grad_norm": 0.6663876424030114, + "learning_rate": 1.5893062339554765e-06, + "loss": 0.3597261905670166, + "memory(GiB)": 77.0, + "step": 5979, + "token_acc": 0.9114538703227649, + "train_speed(iter/s)": 0.510097 + }, + { + "epoch": 1.9136, + "grad_norm": 0.6755425529815399, + "learning_rate": 1.5884850085672287e-06, + "loss": 0.3089495301246643, + "memory(GiB)": 77.0, + "step": 5980, + "token_acc": 0.8814262023217247, + "train_speed(iter/s)": 0.509999 + }, + { + "epoch": 1.91392, + "grad_norm": 0.6767291219489062, + "learning_rate": 1.5876638966012093e-06, + "loss": 0.3565731644630432, + "memory(GiB)": 77.0, + "step": 5981, + "token_acc": 0.8735032238256064, + "train_speed(iter/s)": 0.509907 + }, + { + "epoch": 1.91424, + "grad_norm": 0.7582281429105914, + "learning_rate": 1.5868428981595905e-06, + "loss": 0.35441499948501587, + "memory(GiB)": 77.0, + "step": 5982, + "token_acc": 0.8852071005917159, + "train_speed(iter/s)": 0.509823 + }, + { + "epoch": 1.91456, + "grad_norm": 0.6434144114681071, + "learning_rate": 1.5860220133445326e-06, + "loss": 0.34200024604797363, + "memory(GiB)": 77.0, + "step": 5983, + "token_acc": 0.9137931034482759, + "train_speed(iter/s)": 0.509723 + }, + { + "epoch": 1.9148800000000001, + "grad_norm": 0.6177266993414485, + "learning_rate": 1.5852012422581802e-06, + "loss": 0.30665868520736694, + "memory(GiB)": 77.0, + "step": 5984, + "token_acc": 0.905628581058308, + "train_speed(iter/s)": 0.509633 + }, + { + "epoch": 1.9152, + "grad_norm": 0.6518912318925961, + "learning_rate": 1.5843805850026637e-06, + "loss": 0.3176689147949219, + "memory(GiB)": 77.0, + "step": 5985, + "token_acc": 0.9284970496355432, + "train_speed(iter/s)": 0.50954 + }, + { + "epoch": 1.91552, + "grad_norm": 0.6846342118415863, + "learning_rate": 1.5835600416800997e-06, + "loss": 0.3665493130683899, + "memory(GiB)": 77.0, + "step": 5986, + "token_acc": 0.8663294797687862, + "train_speed(iter/s)": 0.509447 + }, + { + "epoch": 1.91584, + "grad_norm": 0.6677900720339002, + "learning_rate": 1.582739612392591e-06, + "loss": 0.29917752742767334, + "memory(GiB)": 77.0, + "step": 5987, + "token_acc": 0.9019715224534501, + "train_speed(iter/s)": 0.50934 + }, + { + "epoch": 1.91616, + "grad_norm": 0.6564468406955941, + "learning_rate": 1.5819192972422248e-06, + "loss": 0.2638316750526428, + "memory(GiB)": 77.0, + "step": 5988, + "token_acc": 0.9267166762839008, + "train_speed(iter/s)": 0.509252 + }, + { + "epoch": 1.91648, + "grad_norm": 0.6934277681476981, + "learning_rate": 1.5810990963310763e-06, + "loss": 0.4057156443595886, + "memory(GiB)": 77.0, + "step": 5989, + "token_acc": 0.9114914425427872, + "train_speed(iter/s)": 0.509161 + }, + { + "epoch": 1.9167999999999998, + "grad_norm": 0.6197981780958186, + "learning_rate": 1.5802790097612045e-06, + "loss": 0.3312840163707733, + "memory(GiB)": 77.0, + "step": 5990, + "token_acc": 0.8727099690697121, + "train_speed(iter/s)": 0.509067 + }, + { + "epoch": 1.9171200000000002, + "grad_norm": 0.6910084850814536, + "learning_rate": 1.5794590376346551e-06, + "loss": 0.3774956464767456, + "memory(GiB)": 77.0, + "step": 5991, + "token_acc": 0.8518421740266527, + "train_speed(iter/s)": 0.508974 + }, + { + "epoch": 1.91744, + "grad_norm": 0.6154742945869549, + "learning_rate": 1.5786391800534595e-06, + "loss": 0.2835196554660797, + "memory(GiB)": 77.0, + "step": 5992, + "token_acc": 0.9457459926017263, + "train_speed(iter/s)": 0.508856 + }, + { + "epoch": 1.91776, + "grad_norm": 0.7640441156106822, + "learning_rate": 1.5778194371196354e-06, + "loss": 0.3603259027004242, + "memory(GiB)": 77.0, + "step": 5993, + "token_acc": 0.9017589017589017, + "train_speed(iter/s)": 0.508772 + }, + { + "epoch": 1.91808, + "grad_norm": 0.7139471636791453, + "learning_rate": 1.5769998089351846e-06, + "loss": 0.3356371223926544, + "memory(GiB)": 77.0, + "step": 5994, + "token_acc": 0.9150881776616591, + "train_speed(iter/s)": 0.508688 + }, + { + "epoch": 1.9184, + "grad_norm": 0.6477422699853482, + "learning_rate": 1.5761802956020956e-06, + "loss": 0.307454377412796, + "memory(GiB)": 77.0, + "step": 5995, + "token_acc": 0.8654347826086957, + "train_speed(iter/s)": 0.508601 + }, + { + "epoch": 1.91872, + "grad_norm": 0.6658687230138055, + "learning_rate": 1.5753608972223427e-06, + "loss": 0.3014109134674072, + "memory(GiB)": 77.0, + "step": 5996, + "token_acc": 0.9146877748460862, + "train_speed(iter/s)": 0.508514 + }, + { + "epoch": 1.9190399999999999, + "grad_norm": 0.6782267142199249, + "learning_rate": 1.5745416138978865e-06, + "loss": 0.33770084381103516, + "memory(GiB)": 77.0, + "step": 5997, + "token_acc": 0.8967377343950681, + "train_speed(iter/s)": 0.508421 + }, + { + "epoch": 1.91936, + "grad_norm": 0.644855263107187, + "learning_rate": 1.5737224457306719e-06, + "loss": 0.3757537305355072, + "memory(GiB)": 77.0, + "step": 5998, + "token_acc": 0.9089565474430978, + "train_speed(iter/s)": 0.508334 + }, + { + "epoch": 1.91968, + "grad_norm": 0.6625417527598869, + "learning_rate": 1.5729033928226297e-06, + "loss": 0.34622418880462646, + "memory(GiB)": 77.0, + "step": 5999, + "token_acc": 0.9107009229234223, + "train_speed(iter/s)": 0.508245 + }, + { + "epoch": 1.92, + "grad_norm": 0.6201249549181229, + "learning_rate": 1.5720844552756783e-06, + "loss": 0.34150704741477966, + "memory(GiB)": 77.0, + "step": 6000, + "token_acc": 0.8648085438916384, + "train_speed(iter/s)": 0.508149 + }, + { + "epoch": 1.92032, + "grad_norm": 0.6108367725768369, + "learning_rate": 1.5712656331917198e-06, + "loss": 0.2775886654853821, + "memory(GiB)": 77.0, + "step": 6001, + "token_acc": 0.8806256860592755, + "train_speed(iter/s)": 0.508054 + }, + { + "epoch": 1.9206400000000001, + "grad_norm": 0.6321244657007665, + "learning_rate": 1.5704469266726418e-06, + "loss": 0.3345143795013428, + "memory(GiB)": 77.0, + "step": 6002, + "token_acc": 0.851773549447968, + "train_speed(iter/s)": 0.507967 + }, + { + "epoch": 1.92096, + "grad_norm": 0.6781590969519536, + "learning_rate": 1.5696283358203185e-06, + "loss": 0.30894213914871216, + "memory(GiB)": 77.0, + "step": 6003, + "token_acc": 0.8943089430894309, + "train_speed(iter/s)": 0.507868 + }, + { + "epoch": 1.9212799999999999, + "grad_norm": 0.6518520408935241, + "learning_rate": 1.5688098607366092e-06, + "loss": 0.35126563906669617, + "memory(GiB)": 77.0, + "step": 6004, + "token_acc": 0.9153868443929296, + "train_speed(iter/s)": 0.507783 + }, + { + "epoch": 1.9216, + "grad_norm": 0.709911271400463, + "learning_rate": 1.5679915015233588e-06, + "loss": 0.4022287130355835, + "memory(GiB)": 77.0, + "step": 6005, + "token_acc": 0.9227150537634409, + "train_speed(iter/s)": 0.507697 + }, + { + "epoch": 1.92192, + "grad_norm": 0.6449644994723969, + "learning_rate": 1.5671732582823984e-06, + "loss": 0.37432074546813965, + "memory(GiB)": 77.0, + "step": 6006, + "token_acc": 0.9493618773157678, + "train_speed(iter/s)": 0.507603 + }, + { + "epoch": 1.92224, + "grad_norm": 0.6233871138846523, + "learning_rate": 1.5663551311155445e-06, + "loss": 0.3329485058784485, + "memory(GiB)": 77.0, + "step": 6007, + "token_acc": 0.8682170542635659, + "train_speed(iter/s)": 0.507504 + }, + { + "epoch": 1.92256, + "grad_norm": 0.6325810573350451, + "learning_rate": 1.5655371201245973e-06, + "loss": 0.3238900303840637, + "memory(GiB)": 77.0, + "step": 6008, + "token_acc": 0.9624629568653277, + "train_speed(iter/s)": 0.507418 + }, + { + "epoch": 1.9228800000000001, + "grad_norm": 0.6773416198000894, + "learning_rate": 1.5647192254113452e-06, + "loss": 0.3825588822364807, + "memory(GiB)": 77.0, + "step": 6009, + "token_acc": 0.9438543247344461, + "train_speed(iter/s)": 0.507333 + }, + { + "epoch": 1.9232, + "grad_norm": 0.6703061658755304, + "learning_rate": 1.563901447077561e-06, + "loss": 0.29052239656448364, + "memory(GiB)": 77.0, + "step": 6010, + "token_acc": 0.941285081240768, + "train_speed(iter/s)": 0.507243 + }, + { + "epoch": 1.92352, + "grad_norm": 0.6665218399237242, + "learning_rate": 1.5630837852250025e-06, + "loss": 0.33912393450737, + "memory(GiB)": 77.0, + "step": 6011, + "token_acc": 0.8646373056994818, + "train_speed(iter/s)": 0.507161 + }, + { + "epoch": 1.92384, + "grad_norm": 0.6956216884967555, + "learning_rate": 1.5622662399554143e-06, + "loss": 0.3282374441623688, + "memory(GiB)": 77.0, + "step": 6012, + "token_acc": 0.8476621417797888, + "train_speed(iter/s)": 0.507053 + }, + { + "epoch": 1.92416, + "grad_norm": 0.6808195257716987, + "learning_rate": 1.5614488113705252e-06, + "loss": 0.3502233028411865, + "memory(GiB)": 77.0, + "step": 6013, + "token_acc": 0.932557205941389, + "train_speed(iter/s)": 0.506955 + }, + { + "epoch": 1.92448, + "grad_norm": 0.6141862022864907, + "learning_rate": 1.5606314995720496e-06, + "loss": 0.3568389117717743, + "memory(GiB)": 77.0, + "step": 6014, + "token_acc": 0.9047025848645281, + "train_speed(iter/s)": 0.506862 + }, + { + "epoch": 1.9247999999999998, + "grad_norm": 0.6406084104891235, + "learning_rate": 1.5598143046616887e-06, + "loss": 0.28451400995254517, + "memory(GiB)": 77.0, + "step": 6015, + "token_acc": 0.8406797663303239, + "train_speed(iter/s)": 0.506779 + }, + { + "epoch": 1.9251200000000002, + "grad_norm": 0.6985728744385904, + "learning_rate": 1.5589972267411268e-06, + "loss": 0.27204570174217224, + "memory(GiB)": 77.0, + "step": 6016, + "token_acc": 0.929368029739777, + "train_speed(iter/s)": 0.50669 + }, + { + "epoch": 1.92544, + "grad_norm": 0.6457560727686998, + "learning_rate": 1.558180265912037e-06, + "loss": 0.3037589192390442, + "memory(GiB)": 77.0, + "step": 6017, + "token_acc": 0.8139379793264422, + "train_speed(iter/s)": 0.5066 + }, + { + "epoch": 1.92576, + "grad_norm": 0.6750204447967563, + "learning_rate": 1.5573634222760753e-06, + "loss": 0.3920173943042755, + "memory(GiB)": 77.0, + "step": 6018, + "token_acc": 0.9204452891664404, + "train_speed(iter/s)": 0.506513 + }, + { + "epoch": 1.92608, + "grad_norm": 0.7185371330374538, + "learning_rate": 1.5565466959348829e-06, + "loss": 0.43290263414382935, + "memory(GiB)": 77.0, + "step": 6019, + "token_acc": 0.8964285714285715, + "train_speed(iter/s)": 0.50643 + }, + { + "epoch": 1.9264000000000001, + "grad_norm": 0.6259264276863947, + "learning_rate": 1.5557300869900876e-06, + "loss": 0.38361382484436035, + "memory(GiB)": 77.0, + "step": 6020, + "token_acc": 0.9140260950605779, + "train_speed(iter/s)": 0.506332 + }, + { + "epoch": 1.92672, + "grad_norm": 0.6233462782913133, + "learning_rate": 1.5549135955433037e-06, + "loss": 0.2670917510986328, + "memory(GiB)": 77.0, + "step": 6021, + "token_acc": 0.9267709691438505, + "train_speed(iter/s)": 0.506233 + }, + { + "epoch": 1.9270399999999999, + "grad_norm": 0.6670439138974816, + "learning_rate": 1.5540972216961265e-06, + "loss": 0.43336671590805054, + "memory(GiB)": 77.0, + "step": 6022, + "token_acc": 0.8590054578532443, + "train_speed(iter/s)": 0.506142 + }, + { + "epoch": 1.92736, + "grad_norm": 0.6502547051381914, + "learning_rate": 1.5532809655501417e-06, + "loss": 0.25833165645599365, + "memory(GiB)": 77.0, + "step": 6023, + "token_acc": 0.9441624365482234, + "train_speed(iter/s)": 0.506056 + }, + { + "epoch": 1.92768, + "grad_norm": 0.6736134117629745, + "learning_rate": 1.5524648272069174e-06, + "loss": 0.25392889976501465, + "memory(GiB)": 77.0, + "step": 6024, + "token_acc": 0.9134977016297534, + "train_speed(iter/s)": 0.505977 + }, + { + "epoch": 1.928, + "grad_norm": 0.7102353382004523, + "learning_rate": 1.551648806768008e-06, + "loss": 0.3703678846359253, + "memory(GiB)": 77.0, + "step": 6025, + "token_acc": 0.7874672385036932, + "train_speed(iter/s)": 0.50589 + }, + { + "epoch": 1.92832, + "grad_norm": 0.624303040186037, + "learning_rate": 1.5508329043349532e-06, + "loss": 0.3342549204826355, + "memory(GiB)": 77.0, + "step": 6026, + "token_acc": 0.9275410935927542, + "train_speed(iter/s)": 0.505783 + }, + { + "epoch": 1.9286400000000001, + "grad_norm": 0.6338162432736079, + "learning_rate": 1.5500171200092778e-06, + "loss": 0.31441906094551086, + "memory(GiB)": 77.0, + "step": 6027, + "token_acc": 0.9539678057046033, + "train_speed(iter/s)": 0.505701 + }, + { + "epoch": 1.92896, + "grad_norm": 0.6589548378465694, + "learning_rate": 1.5492014538924924e-06, + "loss": 0.3427368402481079, + "memory(GiB)": 77.0, + "step": 6028, + "token_acc": 0.9709431174587541, + "train_speed(iter/s)": 0.505618 + }, + { + "epoch": 1.9292799999999999, + "grad_norm": 0.7075024615367042, + "learning_rate": 1.548385906086092e-06, + "loss": 0.4246635138988495, + "memory(GiB)": 77.0, + "step": 6029, + "token_acc": 0.8001252152136484, + "train_speed(iter/s)": 0.505513 + }, + { + "epoch": 1.9296, + "grad_norm": 0.6378575286111863, + "learning_rate": 1.5475704766915576e-06, + "loss": 0.33271312713623047, + "memory(GiB)": 77.0, + "step": 6030, + "token_acc": 0.9590876176683563, + "train_speed(iter/s)": 0.505427 + }, + { + "epoch": 1.92992, + "grad_norm": 0.6604933246690321, + "learning_rate": 1.5467551658103552e-06, + "loss": 0.2808837294578552, + "memory(GiB)": 77.0, + "step": 6031, + "token_acc": 0.8721728081321474, + "train_speed(iter/s)": 0.505345 + }, + { + "epoch": 1.93024, + "grad_norm": 0.7099671409916389, + "learning_rate": 1.545939973543936e-06, + "loss": 0.37854301929473877, + "memory(GiB)": 77.0, + "step": 6032, + "token_acc": 0.8879012345679013, + "train_speed(iter/s)": 0.505257 + }, + { + "epoch": 1.93056, + "grad_norm": 0.622417535430133, + "learning_rate": 1.545124899993736e-06, + "loss": 0.26680928468704224, + "memory(GiB)": 77.0, + "step": 6033, + "token_acc": 0.8933884297520661, + "train_speed(iter/s)": 0.505167 + }, + { + "epoch": 1.9308800000000002, + "grad_norm": 0.6971113602639952, + "learning_rate": 1.5443099452611781e-06, + "loss": 0.33935546875, + "memory(GiB)": 77.0, + "step": 6034, + "token_acc": 0.8949362728212195, + "train_speed(iter/s)": 0.50508 + }, + { + "epoch": 1.9312, + "grad_norm": 0.6338463939749787, + "learning_rate": 1.543495109447669e-06, + "loss": 0.2807585597038269, + "memory(GiB)": 77.0, + "step": 6035, + "token_acc": 0.8563579277864992, + "train_speed(iter/s)": 0.504988 + }, + { + "epoch": 1.93152, + "grad_norm": 0.6786680053007716, + "learning_rate": 1.5426803926546016e-06, + "loss": 0.38433700799942017, + "memory(GiB)": 77.0, + "step": 6036, + "token_acc": 0.90112, + "train_speed(iter/s)": 0.504898 + }, + { + "epoch": 1.93184, + "grad_norm": 0.7252184395889003, + "learning_rate": 1.5418657949833515e-06, + "loss": 0.40412241220474243, + "memory(GiB)": 77.0, + "step": 6037, + "token_acc": 0.8964646464646465, + "train_speed(iter/s)": 0.50481 + }, + { + "epoch": 1.93216, + "grad_norm": 0.6661880258451147, + "learning_rate": 1.5410513165352816e-06, + "loss": 0.2931104898452759, + "memory(GiB)": 77.0, + "step": 6038, + "token_acc": 0.9016342892383595, + "train_speed(iter/s)": 0.50473 + }, + { + "epoch": 1.93248, + "grad_norm": 0.667795843482572, + "learning_rate": 1.5402369574117398e-06, + "loss": 0.3530421257019043, + "memory(GiB)": 77.0, + "step": 6039, + "token_acc": 0.8874560375146542, + "train_speed(iter/s)": 0.504634 + }, + { + "epoch": 1.9327999999999999, + "grad_norm": 0.6963298143279294, + "learning_rate": 1.5394227177140597e-06, + "loss": 0.321307510137558, + "memory(GiB)": 77.0, + "step": 6040, + "token_acc": 0.9536112742219612, + "train_speed(iter/s)": 0.50455 + }, + { + "epoch": 1.93312, + "grad_norm": 0.6539369127298421, + "learning_rate": 1.5386085975435592e-06, + "loss": 0.3067625164985657, + "memory(GiB)": 77.0, + "step": 6041, + "token_acc": 0.9632536567962897, + "train_speed(iter/s)": 0.504458 + }, + { + "epoch": 1.93344, + "grad_norm": 0.6994954950984197, + "learning_rate": 1.5377945970015407e-06, + "loss": 0.46714290976524353, + "memory(GiB)": 77.0, + "step": 6042, + "token_acc": 0.899883855981417, + "train_speed(iter/s)": 0.504355 + }, + { + "epoch": 1.93376, + "grad_norm": 0.6318263619416001, + "learning_rate": 1.536980716189293e-06, + "loss": 0.4030616879463196, + "memory(GiB)": 77.0, + "step": 6043, + "token_acc": 0.9244040150564617, + "train_speed(iter/s)": 0.504269 + }, + { + "epoch": 1.93408, + "grad_norm": 0.6812477221636873, + "learning_rate": 1.5361669552080894e-06, + "loss": 0.3695197105407715, + "memory(GiB)": 77.0, + "step": 6044, + "token_acc": 0.8422949002217295, + "train_speed(iter/s)": 0.504171 + }, + { + "epoch": 1.9344000000000001, + "grad_norm": 0.6797214277100759, + "learning_rate": 1.5353533141591882e-06, + "loss": 0.3345184326171875, + "memory(GiB)": 77.0, + "step": 6045, + "token_acc": 0.892995665069587, + "train_speed(iter/s)": 0.504087 + }, + { + "epoch": 1.93472, + "grad_norm": 0.6784745090940381, + "learning_rate": 1.5345397931438325e-06, + "loss": 0.3020884692668915, + "memory(GiB)": 77.0, + "step": 6046, + "token_acc": 0.9, + "train_speed(iter/s)": 0.503991 + }, + { + "epoch": 1.9350399999999999, + "grad_norm": 0.5809655903610983, + "learning_rate": 1.5337263922632517e-06, + "loss": 0.2703609764575958, + "memory(GiB)": 77.0, + "step": 6047, + "token_acc": 0.8734256926952141, + "train_speed(iter/s)": 0.503904 + }, + { + "epoch": 1.93536, + "grad_norm": 0.6828094461638324, + "learning_rate": 1.5329131116186589e-06, + "loss": 0.36262375116348267, + "memory(GiB)": 77.0, + "step": 6048, + "token_acc": 0.8962542565266742, + "train_speed(iter/s)": 0.503811 + }, + { + "epoch": 1.93568, + "grad_norm": 0.6593920537319119, + "learning_rate": 1.5320999513112527e-06, + "loss": 0.3847728967666626, + "memory(GiB)": 77.0, + "step": 6049, + "token_acc": 0.9362648221343873, + "train_speed(iter/s)": 0.503725 + }, + { + "epoch": 1.936, + "grad_norm": 0.6866893346955668, + "learning_rate": 1.531286911442218e-06, + "loss": 0.3226884603500366, + "memory(GiB)": 77.0, + "step": 6050, + "token_acc": 0.9379498634896997, + "train_speed(iter/s)": 0.503634 + }, + { + "epoch": 1.93632, + "grad_norm": 0.6763768257139209, + "learning_rate": 1.5304739921127218e-06, + "loss": 0.2920513153076172, + "memory(GiB)": 77.0, + "step": 6051, + "token_acc": 0.8582621082621082, + "train_speed(iter/s)": 0.503553 + }, + { + "epoch": 1.9366400000000001, + "grad_norm": 0.6828380291066422, + "learning_rate": 1.5296611934239187e-06, + "loss": 0.3402014970779419, + "memory(GiB)": 77.0, + "step": 6052, + "token_acc": 0.898302055406613, + "train_speed(iter/s)": 0.503467 + }, + { + "epoch": 1.93696, + "grad_norm": 0.7096444967240099, + "learning_rate": 1.5288485154769467e-06, + "loss": 0.32954490184783936, + "memory(GiB)": 77.0, + "step": 6053, + "token_acc": 0.86368843069874, + "train_speed(iter/s)": 0.503385 + }, + { + "epoch": 1.93728, + "grad_norm": 0.7058099591347378, + "learning_rate": 1.5280359583729304e-06, + "loss": 0.3349267840385437, + "memory(GiB)": 77.0, + "step": 6054, + "token_acc": 0.8865584122688318, + "train_speed(iter/s)": 0.503303 + }, + { + "epoch": 1.9376, + "grad_norm": 0.6947516609492177, + "learning_rate": 1.5272235222129777e-06, + "loss": 0.4195808470249176, + "memory(GiB)": 77.0, + "step": 6055, + "token_acc": 0.8077673058173546, + "train_speed(iter/s)": 0.503212 + }, + { + "epoch": 1.93792, + "grad_norm": 0.6904500164851969, + "learning_rate": 1.5264112070981823e-06, + "loss": 0.35145190358161926, + "memory(GiB)": 77.0, + "step": 6056, + "token_acc": 0.9555125725338491, + "train_speed(iter/s)": 0.503128 + }, + { + "epoch": 1.93824, + "grad_norm": 0.6467781779771915, + "learning_rate": 1.5255990131296233e-06, + "loss": 0.24750961363315582, + "memory(GiB)": 77.0, + "step": 6057, + "token_acc": 0.943824027072758, + "train_speed(iter/s)": 0.503048 + }, + { + "epoch": 1.9385599999999998, + "grad_norm": 0.6770019803864581, + "learning_rate": 1.524786940408364e-06, + "loss": 0.3618018329143524, + "memory(GiB)": 77.0, + "step": 6058, + "token_acc": 0.9459598558929491, + "train_speed(iter/s)": 0.50296 + }, + { + "epoch": 1.9388800000000002, + "grad_norm": 0.6899390040000751, + "learning_rate": 1.5239749890354525e-06, + "loss": 0.3443523645401001, + "memory(GiB)": 77.0, + "step": 6059, + "token_acc": 0.8862124340445057, + "train_speed(iter/s)": 0.502876 + }, + { + "epoch": 1.9392, + "grad_norm": 0.6096772403520763, + "learning_rate": 1.5231631591119216e-06, + "loss": 0.24699553847312927, + "memory(GiB)": 77.0, + "step": 6060, + "token_acc": 0.9511480214948705, + "train_speed(iter/s)": 0.502792 + }, + { + "epoch": 1.93952, + "grad_norm": 0.671255561763556, + "learning_rate": 1.5223514507387907e-06, + "loss": 0.31052735447883606, + "memory(GiB)": 77.0, + "step": 6061, + "token_acc": 0.8884681583476765, + "train_speed(iter/s)": 0.502704 + }, + { + "epoch": 1.93984, + "grad_norm": 0.5851601513032322, + "learning_rate": 1.5215398640170617e-06, + "loss": 0.28930819034576416, + "memory(GiB)": 77.0, + "step": 6062, + "token_acc": 0.968497576736672, + "train_speed(iter/s)": 0.502622 + }, + { + "epoch": 1.94016, + "grad_norm": 0.6581713510760857, + "learning_rate": 1.5207283990477228e-06, + "loss": 0.24589353799819946, + "memory(GiB)": 77.0, + "step": 6063, + "token_acc": 0.9215060625398851, + "train_speed(iter/s)": 0.502538 + }, + { + "epoch": 1.94048, + "grad_norm": 0.6784424778792858, + "learning_rate": 1.5199170559317479e-06, + "loss": 0.30959317088127136, + "memory(GiB)": 77.0, + "step": 6064, + "token_acc": 0.9576368876080692, + "train_speed(iter/s)": 0.502457 + }, + { + "epoch": 1.9407999999999999, + "grad_norm": 0.6283354402691355, + "learning_rate": 1.519105834770092e-06, + "loss": 0.2987596392631531, + "memory(GiB)": 77.0, + "step": 6065, + "token_acc": 0.9039707592623359, + "train_speed(iter/s)": 0.502371 + }, + { + "epoch": 1.94112, + "grad_norm": 0.6505463017146614, + "learning_rate": 1.5182947356636999e-06, + "loss": 0.2851889729499817, + "memory(GiB)": 77.0, + "step": 6066, + "token_acc": 0.9297746144721234, + "train_speed(iter/s)": 0.50229 + }, + { + "epoch": 1.94144, + "grad_norm": 0.6374289748798516, + "learning_rate": 1.5174837587134974e-06, + "loss": 0.3228454291820526, + "memory(GiB)": 77.0, + "step": 6067, + "token_acc": 0.8730453220249139, + "train_speed(iter/s)": 0.502206 + }, + { + "epoch": 1.94176, + "grad_norm": 0.7023929829815275, + "learning_rate": 1.516672904020397e-06, + "loss": 0.4094271659851074, + "memory(GiB)": 77.0, + "step": 6068, + "token_acc": 0.8403180743606276, + "train_speed(iter/s)": 0.50212 + }, + { + "epoch": 1.94208, + "grad_norm": 0.6585344142111688, + "learning_rate": 1.5158621716852959e-06, + "loss": 0.3023792803287506, + "memory(GiB)": 77.0, + "step": 6069, + "token_acc": 0.8712262890729362, + "train_speed(iter/s)": 0.502031 + }, + { + "epoch": 1.9424000000000001, + "grad_norm": 0.6437207410782874, + "learning_rate": 1.515051561809075e-06, + "loss": 0.3827216923236847, + "memory(GiB)": 77.0, + "step": 6070, + "token_acc": 0.8564701450367301, + "train_speed(iter/s)": 0.501946 + }, + { + "epoch": 1.94272, + "grad_norm": 0.6851749727854854, + "learning_rate": 1.514241074492601e-06, + "loss": 0.3642471432685852, + "memory(GiB)": 77.0, + "step": 6071, + "token_acc": 0.9154686601495112, + "train_speed(iter/s)": 0.501855 + }, + { + "epoch": 1.9430399999999999, + "grad_norm": 0.5977493055835531, + "learning_rate": 1.513430709836725e-06, + "loss": 0.29443973302841187, + "memory(GiB)": 77.0, + "step": 6072, + "token_acc": 0.8825247079964061, + "train_speed(iter/s)": 0.501771 + }, + { + "epoch": 1.94336, + "grad_norm": 0.6524610233451023, + "learning_rate": 1.512620467942282e-06, + "loss": 0.3336578905582428, + "memory(GiB)": 77.0, + "step": 6073, + "token_acc": 0.9275600921962462, + "train_speed(iter/s)": 0.501648 + }, + { + "epoch": 1.94368, + "grad_norm": 0.644687050738859, + "learning_rate": 1.511810348910094e-06, + "loss": 0.3220376968383789, + "memory(GiB)": 77.0, + "step": 6074, + "token_acc": 0.9579789894947474, + "train_speed(iter/s)": 0.501567 + }, + { + "epoch": 1.944, + "grad_norm": 0.5941087796282042, + "learning_rate": 1.5110003528409657e-06, + "loss": 0.3537965416908264, + "memory(GiB)": 77.0, + "step": 6075, + "token_acc": 0.8968493741907639, + "train_speed(iter/s)": 0.501479 + }, + { + "epoch": 1.94432, + "grad_norm": 0.6755720464637949, + "learning_rate": 1.5101904798356863e-06, + "loss": 0.37080568075180054, + "memory(GiB)": 77.0, + "step": 6076, + "token_acc": 0.8885303608096216, + "train_speed(iter/s)": 0.501391 + }, + { + "epoch": 1.9446400000000001, + "grad_norm": 0.6943861481721626, + "learning_rate": 1.509380729995032e-06, + "loss": 0.2690538763999939, + "memory(GiB)": 77.0, + "step": 6077, + "token_acc": 0.9666666666666667, + "train_speed(iter/s)": 0.50131 + }, + { + "epoch": 1.94496, + "grad_norm": 0.5876825541699544, + "learning_rate": 1.5085711034197602e-06, + "loss": 0.26952025294303894, + "memory(GiB)": 77.0, + "step": 6078, + "token_acc": 0.8691425238187828, + "train_speed(iter/s)": 0.501229 + }, + { + "epoch": 1.94528, + "grad_norm": 0.6478948055540451, + "learning_rate": 1.507761600210617e-06, + "loss": 0.307394802570343, + "memory(GiB)": 77.0, + "step": 6079, + "token_acc": 0.913128355295266, + "train_speed(iter/s)": 0.501144 + }, + { + "epoch": 1.9456, + "grad_norm": 0.6950402662665172, + "learning_rate": 1.506952220468329e-06, + "loss": 0.27547651529312134, + "memory(GiB)": 77.0, + "step": 6080, + "token_acc": 0.9471468662301216, + "train_speed(iter/s)": 0.501057 + }, + { + "epoch": 1.94592, + "grad_norm": 0.7256910315030373, + "learning_rate": 1.5061429642936107e-06, + "loss": 0.31844791769981384, + "memory(GiB)": 77.0, + "step": 6081, + "token_acc": 0.9392983839180133, + "train_speed(iter/s)": 0.500975 + }, + { + "epoch": 1.94624, + "grad_norm": 0.6531057018908032, + "learning_rate": 1.5053338317871596e-06, + "loss": 0.3420600891113281, + "memory(GiB)": 77.0, + "step": 6082, + "token_acc": 0.8337412587412587, + "train_speed(iter/s)": 0.500892 + }, + { + "epoch": 1.9465599999999998, + "grad_norm": 0.6744643950153133, + "learning_rate": 1.504524823049658e-06, + "loss": 0.2870028018951416, + "memory(GiB)": 77.0, + "step": 6083, + "token_acc": 0.8617614269788183, + "train_speed(iter/s)": 0.500808 + }, + { + "epoch": 1.9468800000000002, + "grad_norm": 0.6552257290663662, + "learning_rate": 1.5037159381817734e-06, + "loss": 0.39541250467300415, + "memory(GiB)": 77.0, + "step": 6084, + "token_acc": 0.927267458707621, + "train_speed(iter/s)": 0.500716 + }, + { + "epoch": 1.9472, + "grad_norm": 0.6756909177732693, + "learning_rate": 1.5029071772841575e-06, + "loss": 0.2820843756198883, + "memory(GiB)": 77.0, + "step": 6085, + "token_acc": 0.8726600415992605, + "train_speed(iter/s)": 0.500631 + }, + { + "epoch": 1.94752, + "grad_norm": 0.6720161532336698, + "learning_rate": 1.5020985404574463e-06, + "loss": 0.3009946942329407, + "memory(GiB)": 77.0, + "step": 6086, + "token_acc": 0.8666548295454546, + "train_speed(iter/s)": 0.500548 + }, + { + "epoch": 1.94784, + "grad_norm": 0.6547794691566544, + "learning_rate": 1.5012900278022608e-06, + "loss": 0.3260718584060669, + "memory(GiB)": 77.0, + "step": 6087, + "token_acc": 0.9112616973217167, + "train_speed(iter/s)": 0.500464 + }, + { + "epoch": 1.9481600000000001, + "grad_norm": 0.687084767526686, + "learning_rate": 1.5004816394192062e-06, + "loss": 0.32392793893814087, + "memory(GiB)": 77.0, + "step": 6088, + "token_acc": 0.907914998460117, + "train_speed(iter/s)": 0.500382 + }, + { + "epoch": 1.94848, + "grad_norm": 0.6674016750329406, + "learning_rate": 1.4996733754088718e-06, + "loss": 0.26604583859443665, + "memory(GiB)": 77.0, + "step": 6089, + "token_acc": 0.8991628614916286, + "train_speed(iter/s)": 0.500302 + }, + { + "epoch": 1.9487999999999999, + "grad_norm": 0.7361482379477633, + "learning_rate": 1.4988652358718336e-06, + "loss": 0.3266485333442688, + "memory(GiB)": 77.0, + "step": 6090, + "token_acc": 0.844110275689223, + "train_speed(iter/s)": 0.500215 + }, + { + "epoch": 1.94912, + "grad_norm": 0.7122447380954479, + "learning_rate": 1.4980572209086497e-06, + "loss": 0.4044904112815857, + "memory(GiB)": 77.0, + "step": 6091, + "token_acc": 0.8836729429971627, + "train_speed(iter/s)": 0.500123 + }, + { + "epoch": 1.94944, + "grad_norm": 0.68812267468194, + "learning_rate": 1.4972493306198632e-06, + "loss": 0.42181462049484253, + "memory(GiB)": 77.0, + "step": 6092, + "token_acc": 0.906856403622251, + "train_speed(iter/s)": 0.500042 + }, + { + "epoch": 1.94976, + "grad_norm": 0.6534877258641516, + "learning_rate": 1.4964415651060037e-06, + "loss": 0.3482051491737366, + "memory(GiB)": 77.0, + "step": 6093, + "token_acc": 0.9493670886075949, + "train_speed(iter/s)": 0.499955 + }, + { + "epoch": 1.95008, + "grad_norm": 0.6454484271420845, + "learning_rate": 1.4956339244675814e-06, + "loss": 0.34241271018981934, + "memory(GiB)": 77.0, + "step": 6094, + "token_acc": 0.9289012003693444, + "train_speed(iter/s)": 0.499873 + }, + { + "epoch": 1.9504000000000001, + "grad_norm": 0.6874196078121498, + "learning_rate": 1.494826408805094e-06, + "loss": 0.2808082401752472, + "memory(GiB)": 77.0, + "step": 6095, + "token_acc": 0.9156792504822265, + "train_speed(iter/s)": 0.499795 + }, + { + "epoch": 1.95072, + "grad_norm": 0.6514567906175884, + "learning_rate": 1.494019018219022e-06, + "loss": 0.2554033100605011, + "memory(GiB)": 77.0, + "step": 6096, + "token_acc": 0.9019762845849802, + "train_speed(iter/s)": 0.499704 + }, + { + "epoch": 1.9510399999999999, + "grad_norm": 0.6346510766023502, + "learning_rate": 1.4932117528098328e-06, + "loss": 0.3046174645423889, + "memory(GiB)": 77.0, + "step": 6097, + "token_acc": 0.932027972027972, + "train_speed(iter/s)": 0.499626 + }, + { + "epoch": 1.95136, + "grad_norm": 0.6302648789186236, + "learning_rate": 1.4924046126779757e-06, + "loss": 0.3329831659793854, + "memory(GiB)": 77.0, + "step": 6098, + "token_acc": 0.9453667370962874, + "train_speed(iter/s)": 0.499538 + }, + { + "epoch": 1.95168, + "grad_norm": 0.6893038088201816, + "learning_rate": 1.4915975979238856e-06, + "loss": 0.43125808238983154, + "memory(GiB)": 77.0, + "step": 6099, + "token_acc": 0.8751217137293087, + "train_speed(iter/s)": 0.499455 + }, + { + "epoch": 1.952, + "grad_norm": 0.6115702231217179, + "learning_rate": 1.4907907086479812e-06, + "loss": 0.3837818503379822, + "memory(GiB)": 77.0, + "step": 6100, + "token_acc": 0.8974820143884892, + "train_speed(iter/s)": 0.499361 + }, + { + "epoch": 1.95232, + "grad_norm": 0.6541273172613181, + "learning_rate": 1.4899839449506659e-06, + "loss": 0.31926968693733215, + "memory(GiB)": 77.0, + "step": 6101, + "token_acc": 0.8909519599174771, + "train_speed(iter/s)": 0.499281 + }, + { + "epoch": 1.9526400000000002, + "grad_norm": 0.6661904953900442, + "learning_rate": 1.4891773069323274e-06, + "loss": 0.3526824712753296, + "memory(GiB)": 77.0, + "step": 6102, + "token_acc": 0.9423739430003132, + "train_speed(iter/s)": 0.499197 + }, + { + "epoch": 1.95296, + "grad_norm": 0.6603217547714749, + "learning_rate": 1.488370794693338e-06, + "loss": 0.2926448583602905, + "memory(GiB)": 77.0, + "step": 6103, + "token_acc": 0.8354157690650582, + "train_speed(iter/s)": 0.49911 + }, + { + "epoch": 1.95328, + "grad_norm": 0.6297922840560644, + "learning_rate": 1.487564408334054e-06, + "loss": 0.36844706535339355, + "memory(GiB)": 77.0, + "step": 6104, + "token_acc": 0.8811207834602829, + "train_speed(iter/s)": 0.49902 + }, + { + "epoch": 1.9536, + "grad_norm": 0.657509631374229, + "learning_rate": 1.486758147954816e-06, + "loss": 0.3002452254295349, + "memory(GiB)": 77.0, + "step": 6105, + "token_acc": 0.9102015882712279, + "train_speed(iter/s)": 0.498943 + }, + { + "epoch": 1.95392, + "grad_norm": 0.615736303091523, + "learning_rate": 1.485952013655949e-06, + "loss": 0.38913261890411377, + "memory(GiB)": 77.0, + "step": 6106, + "token_acc": 0.8548490687219011, + "train_speed(iter/s)": 0.498849 + }, + { + "epoch": 1.95424, + "grad_norm": 0.6601705390014104, + "learning_rate": 1.4851460055377637e-06, + "loss": 0.2964267134666443, + "memory(GiB)": 77.0, + "step": 6107, + "token_acc": 0.8949263502454992, + "train_speed(iter/s)": 0.498771 + }, + { + "epoch": 1.9545599999999999, + "grad_norm": 0.6014094516689207, + "learning_rate": 1.4843401237005537e-06, + "loss": 0.2632162272930145, + "memory(GiB)": 77.0, + "step": 6108, + "token_acc": 0.9398606527319399, + "train_speed(iter/s)": 0.498694 + }, + { + "epoch": 1.95488, + "grad_norm": 0.6630808240243813, + "learning_rate": 1.4835343682445952e-06, + "loss": 0.31942158937454224, + "memory(GiB)": 77.0, + "step": 6109, + "token_acc": 0.8676635514018691, + "train_speed(iter/s)": 0.498604 + }, + { + "epoch": 1.9552, + "grad_norm": 0.7165848326530455, + "learning_rate": 1.4827287392701514e-06, + "loss": 0.37141695618629456, + "memory(GiB)": 77.0, + "step": 6110, + "token_acc": 0.8985700033255737, + "train_speed(iter/s)": 0.498511 + }, + { + "epoch": 1.95552, + "grad_norm": 0.6083374960699577, + "learning_rate": 1.4819232368774691e-06, + "loss": 0.26834914088249207, + "memory(GiB)": 77.0, + "step": 6111, + "token_acc": 0.9385159010600707, + "train_speed(iter/s)": 0.498425 + }, + { + "epoch": 1.95584, + "grad_norm": 0.6976346601593579, + "learning_rate": 1.4811178611667784e-06, + "loss": 0.3218190670013428, + "memory(GiB)": 77.0, + "step": 6112, + "token_acc": 0.9077649077649078, + "train_speed(iter/s)": 0.498336 + }, + { + "epoch": 1.9561600000000001, + "grad_norm": 0.6584076368343119, + "learning_rate": 1.4803126122382954e-06, + "loss": 0.35007405281066895, + "memory(GiB)": 77.0, + "step": 6113, + "token_acc": 0.8930081300813009, + "train_speed(iter/s)": 0.498258 + }, + { + "epoch": 1.95648, + "grad_norm": 0.6793302272112121, + "learning_rate": 1.4795074901922191e-06, + "loss": 0.34730130434036255, + "memory(GiB)": 77.0, + "step": 6114, + "token_acc": 0.848643006263048, + "train_speed(iter/s)": 0.498173 + }, + { + "epoch": 1.9567999999999999, + "grad_norm": 0.7125752528788022, + "learning_rate": 1.4787024951287327e-06, + "loss": 0.2612687945365906, + "memory(GiB)": 77.0, + "step": 6115, + "token_acc": 0.8959578207381371, + "train_speed(iter/s)": 0.498098 + }, + { + "epoch": 1.95712, + "grad_norm": 0.6323225109940287, + "learning_rate": 1.4778976271480038e-06, + "loss": 0.27026107907295227, + "memory(GiB)": 77.0, + "step": 6116, + "token_acc": 0.9053821577783239, + "train_speed(iter/s)": 0.498003 + }, + { + "epoch": 1.95744, + "grad_norm": 0.6308663311245524, + "learning_rate": 1.4770928863501844e-06, + "loss": 0.3441425561904907, + "memory(GiB)": 77.0, + "step": 6117, + "token_acc": 0.8651102464332037, + "train_speed(iter/s)": 0.497917 + }, + { + "epoch": 1.95776, + "grad_norm": 0.6556969131187231, + "learning_rate": 1.4762882728354106e-06, + "loss": 0.2724141776561737, + "memory(GiB)": 77.0, + "step": 6118, + "token_acc": 0.9330885352917176, + "train_speed(iter/s)": 0.497832 + }, + { + "epoch": 1.95808, + "grad_norm": 0.6880755172081827, + "learning_rate": 1.4754837867038024e-06, + "loss": 0.34172946214675903, + "memory(GiB)": 77.0, + "step": 6119, + "token_acc": 0.9342379958246346, + "train_speed(iter/s)": 0.497741 + }, + { + "epoch": 1.9584000000000001, + "grad_norm": 0.7247568836056317, + "learning_rate": 1.474679428055464e-06, + "loss": 0.45243844389915466, + "memory(GiB)": 77.0, + "step": 6120, + "token_acc": 0.893841642228739, + "train_speed(iter/s)": 0.497648 + }, + { + "epoch": 1.95872, + "grad_norm": 0.6880009685195677, + "learning_rate": 1.4738751969904845e-06, + "loss": 0.38797372579574585, + "memory(GiB)": 77.0, + "step": 6121, + "token_acc": 0.9108229690394284, + "train_speed(iter/s)": 0.497563 + }, + { + "epoch": 1.95904, + "grad_norm": 0.6203503235982606, + "learning_rate": 1.4730710936089364e-06, + "loss": 0.2575225830078125, + "memory(GiB)": 77.0, + "step": 6122, + "token_acc": 0.8891882788817784, + "train_speed(iter/s)": 0.497484 + }, + { + "epoch": 1.95936, + "grad_norm": 0.6748314587626005, + "learning_rate": 1.4722671180108761e-06, + "loss": 0.2528756260871887, + "memory(GiB)": 77.0, + "step": 6123, + "token_acc": 0.9421081376297106, + "train_speed(iter/s)": 0.497399 + }, + { + "epoch": 1.95968, + "grad_norm": 0.6831264770340943, + "learning_rate": 1.4714632702963438e-06, + "loss": 0.33375677466392517, + "memory(GiB)": 77.0, + "step": 6124, + "token_acc": 0.9359587780640413, + "train_speed(iter/s)": 0.49732 + }, + { + "epoch": 1.96, + "grad_norm": 0.6580407598584488, + "learning_rate": 1.4706595505653655e-06, + "loss": 0.34414970874786377, + "memory(GiB)": 77.0, + "step": 6125, + "token_acc": 0.8612502019059926, + "train_speed(iter/s)": 0.497234 + }, + { + "epoch": 1.9603199999999998, + "grad_norm": 0.6964517514925649, + "learning_rate": 1.4698559589179495e-06, + "loss": 0.36748045682907104, + "memory(GiB)": 77.0, + "step": 6126, + "token_acc": 0.856794425087108, + "train_speed(iter/s)": 0.497154 + }, + { + "epoch": 1.9606400000000002, + "grad_norm": 0.659019099465111, + "learning_rate": 1.469052495454089e-06, + "loss": 0.32038140296936035, + "memory(GiB)": 77.0, + "step": 6127, + "token_acc": 0.8729311265349706, + "train_speed(iter/s)": 0.497073 + }, + { + "epoch": 1.96096, + "grad_norm": 0.6748538527182637, + "learning_rate": 1.4682491602737617e-06, + "loss": 0.3626265525817871, + "memory(GiB)": 77.0, + "step": 6128, + "token_acc": 0.9328605200945627, + "train_speed(iter/s)": 0.496993 + }, + { + "epoch": 1.96128, + "grad_norm": 0.6450169995750058, + "learning_rate": 1.4674459534769275e-06, + "loss": 0.31171733140945435, + "memory(GiB)": 77.0, + "step": 6129, + "token_acc": 0.9084057971014493, + "train_speed(iter/s)": 0.496903 + }, + { + "epoch": 1.9616, + "grad_norm": 0.7152298874353891, + "learning_rate": 1.4666428751635325e-06, + "loss": 0.27280616760253906, + "memory(GiB)": 77.0, + "step": 6130, + "token_acc": 0.917312661498708, + "train_speed(iter/s)": 0.496816 + }, + { + "epoch": 1.96192, + "grad_norm": 0.5724695267917647, + "learning_rate": 1.4658399254335058e-06, + "loss": 0.3027648329734802, + "memory(GiB)": 77.0, + "step": 6131, + "token_acc": 0.966658310353472, + "train_speed(iter/s)": 0.496704 + }, + { + "epoch": 1.96224, + "grad_norm": 0.6853959890996483, + "learning_rate": 1.4650371043867604e-06, + "loss": 0.29221177101135254, + "memory(GiB)": 77.0, + "step": 6132, + "token_acc": 0.9323734729493892, + "train_speed(iter/s)": 0.496623 + }, + { + "epoch": 1.9625599999999999, + "grad_norm": 0.6722690427432759, + "learning_rate": 1.4642344121231933e-06, + "loss": 0.428528755903244, + "memory(GiB)": 77.0, + "step": 6133, + "token_acc": 0.921772754312909, + "train_speed(iter/s)": 0.496535 + }, + { + "epoch": 1.96288, + "grad_norm": 0.638549308536956, + "learning_rate": 1.4634318487426856e-06, + "loss": 0.37777599692344666, + "memory(GiB)": 77.0, + "step": 6134, + "token_acc": 0.8741119636260302, + "train_speed(iter/s)": 0.496451 + }, + { + "epoch": 1.9632, + "grad_norm": 0.630693441436697, + "learning_rate": 1.462629414345103e-06, + "loss": 0.22622519731521606, + "memory(GiB)": 77.0, + "step": 6135, + "token_acc": 0.9409566517189836, + "train_speed(iter/s)": 0.49637 + }, + { + "epoch": 1.96352, + "grad_norm": 0.6936060447269499, + "learning_rate": 1.4618271090302942e-06, + "loss": 0.34432482719421387, + "memory(GiB)": 77.0, + "step": 6136, + "token_acc": 0.9076544943820225, + "train_speed(iter/s)": 0.496289 + }, + { + "epoch": 1.96384, + "grad_norm": 0.716818182070456, + "learning_rate": 1.461024932898092e-06, + "loss": 0.3308866322040558, + "memory(GiB)": 77.0, + "step": 6137, + "token_acc": 0.9162711220808809, + "train_speed(iter/s)": 0.496203 + }, + { + "epoch": 1.9641600000000001, + "grad_norm": 0.682898773857019, + "learning_rate": 1.460222886048313e-06, + "loss": 0.41483670473098755, + "memory(GiB)": 77.0, + "step": 6138, + "token_acc": 0.8639293387443304, + "train_speed(iter/s)": 0.49611 + }, + { + "epoch": 1.96448, + "grad_norm": 0.6139091685581068, + "learning_rate": 1.4594209685807586e-06, + "loss": 0.2894442081451416, + "memory(GiB)": 77.0, + "step": 6139, + "token_acc": 0.9085746734809768, + "train_speed(iter/s)": 0.496032 + }, + { + "epoch": 1.9647999999999999, + "grad_norm": 0.6706126177028278, + "learning_rate": 1.4586191805952132e-06, + "loss": 0.3400452733039856, + "memory(GiB)": 77.0, + "step": 6140, + "token_acc": 0.9357347523536635, + "train_speed(iter/s)": 0.495953 + }, + { + "epoch": 1.96512, + "grad_norm": 0.6513412528609213, + "learning_rate": 1.4578175221914458e-06, + "loss": 0.3367159366607666, + "memory(GiB)": 77.0, + "step": 6141, + "token_acc": 0.9259818731117825, + "train_speed(iter/s)": 0.495874 + }, + { + "epoch": 1.96544, + "grad_norm": 0.7241425815388828, + "learning_rate": 1.4570159934692085e-06, + "loss": 0.3670831322669983, + "memory(GiB)": 77.0, + "step": 6142, + "token_acc": 0.8603486646884273, + "train_speed(iter/s)": 0.49579 + }, + { + "epoch": 1.96576, + "grad_norm": 0.6329138759014943, + "learning_rate": 1.4562145945282374e-06, + "loss": 0.2916443645954132, + "memory(GiB)": 77.0, + "step": 6143, + "token_acc": 0.8815952002823363, + "train_speed(iter/s)": 0.495707 + }, + { + "epoch": 1.96608, + "grad_norm": 0.6798941650212899, + "learning_rate": 1.4554133254682534e-06, + "loss": 0.3567621111869812, + "memory(GiB)": 77.0, + "step": 6144, + "token_acc": 0.8357941834451902, + "train_speed(iter/s)": 0.495615 + }, + { + "epoch": 1.9664000000000001, + "grad_norm": 0.6660583844702695, + "learning_rate": 1.45461218638896e-06, + "loss": 0.33459198474884033, + "memory(GiB)": 77.0, + "step": 6145, + "token_acc": 0.902409997024695, + "train_speed(iter/s)": 0.495532 + }, + { + "epoch": 1.96672, + "grad_norm": 0.6781978271557764, + "learning_rate": 1.4538111773900455e-06, + "loss": 0.3397834002971649, + "memory(GiB)": 77.0, + "step": 6146, + "token_acc": 0.9036805011746281, + "train_speed(iter/s)": 0.495454 + }, + { + "epoch": 1.96704, + "grad_norm": 0.6688919930509392, + "learning_rate": 1.4530102985711808e-06, + "loss": 0.27728456258773804, + "memory(GiB)": 77.0, + "step": 6147, + "token_acc": 0.9208972845336482, + "train_speed(iter/s)": 0.495368 + }, + { + "epoch": 1.96736, + "grad_norm": 0.6529162782809687, + "learning_rate": 1.452209550032022e-06, + "loss": 0.3436664342880249, + "memory(GiB)": 77.0, + "step": 6148, + "token_acc": 0.9180182181737392, + "train_speed(iter/s)": 0.495289 + }, + { + "epoch": 1.96768, + "grad_norm": 0.6289461485118846, + "learning_rate": 1.4514089318722086e-06, + "loss": 0.3414791226387024, + "memory(GiB)": 77.0, + "step": 6149, + "token_acc": 0.9230482578721818, + "train_speed(iter/s)": 0.495197 + }, + { + "epoch": 1.968, + "grad_norm": 0.6318716559268863, + "learning_rate": 1.4506084441913632e-06, + "loss": 0.3231266438961029, + "memory(GiB)": 77.0, + "step": 6150, + "token_acc": 0.9073066918893355, + "train_speed(iter/s)": 0.495112 + }, + { + "epoch": 1.9683199999999998, + "grad_norm": 0.6345849206464674, + "learning_rate": 1.4498080870890923e-06, + "loss": 0.30227941274642944, + "memory(GiB)": 77.0, + "step": 6151, + "token_acc": 0.941717791411043, + "train_speed(iter/s)": 0.495036 + }, + { + "epoch": 1.96864, + "grad_norm": 0.6036941108022849, + "learning_rate": 1.4490078606649873e-06, + "loss": 0.37779006361961365, + "memory(GiB)": 77.0, + "step": 6152, + "token_acc": 0.8754863813229572, + "train_speed(iter/s)": 0.494949 + }, + { + "epoch": 1.96896, + "grad_norm": 0.6296841088903781, + "learning_rate": 1.448207765018622e-06, + "loss": 0.3653739094734192, + "memory(GiB)": 77.0, + "step": 6153, + "token_acc": 0.8847744023009168, + "train_speed(iter/s)": 0.494864 + }, + { + "epoch": 1.96928, + "grad_norm": 0.6331322978536343, + "learning_rate": 1.4474078002495546e-06, + "loss": 0.3337702453136444, + "memory(GiB)": 77.0, + "step": 6154, + "token_acc": 0.952508524111057, + "train_speed(iter/s)": 0.494785 + }, + { + "epoch": 1.9696, + "grad_norm": 0.6476446307427521, + "learning_rate": 1.4466079664573264e-06, + "loss": 0.3606744408607483, + "memory(GiB)": 77.0, + "step": 6155, + "token_acc": 0.8330206378986866, + "train_speed(iter/s)": 0.494699 + }, + { + "epoch": 1.9699200000000001, + "grad_norm": 0.6822709530798926, + "learning_rate": 1.445808263741463e-06, + "loss": 0.2839420437812805, + "memory(GiB)": 77.0, + "step": 6156, + "token_acc": 0.9675135360266556, + "train_speed(iter/s)": 0.494624 + }, + { + "epoch": 1.97024, + "grad_norm": 0.6701540465015802, + "learning_rate": 1.4450086922014739e-06, + "loss": 0.33290109038352966, + "memory(GiB)": 77.0, + "step": 6157, + "token_acc": 0.884308876064853, + "train_speed(iter/s)": 0.494547 + }, + { + "epoch": 1.9705599999999999, + "grad_norm": 0.6085062542124331, + "learning_rate": 1.4442092519368518e-06, + "loss": 0.2800004184246063, + "memory(GiB)": 77.0, + "step": 6158, + "token_acc": 0.8954122553737568, + "train_speed(iter/s)": 0.494472 + }, + { + "epoch": 1.97088, + "grad_norm": 0.6717683747928157, + "learning_rate": 1.4434099430470732e-06, + "loss": 0.2733840048313141, + "memory(GiB)": 77.0, + "step": 6159, + "token_acc": 0.8984034833091437, + "train_speed(iter/s)": 0.494388 + }, + { + "epoch": 1.9712, + "grad_norm": 0.6758230146716436, + "learning_rate": 1.4426107656315979e-06, + "loss": 0.4019157588481903, + "memory(GiB)": 77.0, + "step": 6160, + "token_acc": 0.9378388146006505, + "train_speed(iter/s)": 0.494304 + }, + { + "epoch": 1.97152, + "grad_norm": 0.691774599877012, + "learning_rate": 1.4418117197898696e-06, + "loss": 0.36370226740837097, + "memory(GiB)": 77.0, + "step": 6161, + "token_acc": 0.9179170344218888, + "train_speed(iter/s)": 0.494221 + }, + { + "epoch": 1.97184, + "grad_norm": 0.7057601152260785, + "learning_rate": 1.4410128056213163e-06, + "loss": 0.37282243371009827, + "memory(GiB)": 77.0, + "step": 6162, + "token_acc": 0.8767295597484277, + "train_speed(iter/s)": 0.494143 + }, + { + "epoch": 1.9721600000000001, + "grad_norm": 0.6594390026562404, + "learning_rate": 1.4402140232253486e-06, + "loss": 0.34878072142601013, + "memory(GiB)": 77.0, + "step": 6163, + "token_acc": 0.8376050420168067, + "train_speed(iter/s)": 0.494061 + }, + { + "epoch": 1.97248, + "grad_norm": 0.6663053295470879, + "learning_rate": 1.439415372701361e-06, + "loss": 0.38500314950942993, + "memory(GiB)": 77.0, + "step": 6164, + "token_acc": 0.7999142183143899, + "train_speed(iter/s)": 0.49398 + }, + { + "epoch": 1.9727999999999999, + "grad_norm": 0.6557119449726049, + "learning_rate": 1.4386168541487322e-06, + "loss": 0.3142036199569702, + "memory(GiB)": 77.0, + "step": 6165, + "token_acc": 0.9041970802919708, + "train_speed(iter/s)": 0.493888 + }, + { + "epoch": 1.97312, + "grad_norm": 0.6703101216297059, + "learning_rate": 1.4378184676668233e-06, + "loss": 0.3659299612045288, + "memory(GiB)": 77.0, + "step": 6166, + "token_acc": 0.9505119453924915, + "train_speed(iter/s)": 0.493811 + }, + { + "epoch": 1.97344, + "grad_norm": 0.6565961507146222, + "learning_rate": 1.4370202133549802e-06, + "loss": 0.31276875734329224, + "memory(GiB)": 77.0, + "step": 6167, + "token_acc": 0.9022145527753811, + "train_speed(iter/s)": 0.493728 + }, + { + "epoch": 1.97376, + "grad_norm": 0.6878590477726254, + "learning_rate": 1.4362220913125313e-06, + "loss": 0.35007160902023315, + "memory(GiB)": 77.0, + "step": 6168, + "token_acc": 0.8567597765363129, + "train_speed(iter/s)": 0.493641 + }, + { + "epoch": 1.9740799999999998, + "grad_norm": 0.7284423113837982, + "learning_rate": 1.4354241016387898e-06, + "loss": 0.3119981586933136, + "memory(GiB)": 77.0, + "step": 6169, + "token_acc": 0.8834723670490093, + "train_speed(iter/s)": 0.493559 + }, + { + "epoch": 1.9744000000000002, + "grad_norm": 0.6478370527806383, + "learning_rate": 1.434626244433051e-06, + "loss": 0.3520175814628601, + "memory(GiB)": 77.0, + "step": 6170, + "token_acc": 0.8445414847161572, + "train_speed(iter/s)": 0.493479 + }, + { + "epoch": 1.97472, + "grad_norm": 0.6754769449502908, + "learning_rate": 1.4338285197945946e-06, + "loss": 0.38263267278671265, + "memory(GiB)": 77.0, + "step": 6171, + "token_acc": 0.8549382716049383, + "train_speed(iter/s)": 0.493404 + }, + { + "epoch": 1.97504, + "grad_norm": 0.6871358754455702, + "learning_rate": 1.4330309278226837e-06, + "loss": 0.3814120292663574, + "memory(GiB)": 77.0, + "step": 6172, + "token_acc": 0.8832093173730184, + "train_speed(iter/s)": 0.493321 + }, + { + "epoch": 1.97536, + "grad_norm": 0.6476006688964218, + "learning_rate": 1.4322334686165645e-06, + "loss": 0.31091099977493286, + "memory(GiB)": 77.0, + "step": 6173, + "token_acc": 0.9060825716987079, + "train_speed(iter/s)": 0.493241 + }, + { + "epoch": 1.97568, + "grad_norm": 0.6609511875141101, + "learning_rate": 1.4314361422754674e-06, + "loss": 0.3165445327758789, + "memory(GiB)": 77.0, + "step": 6174, + "token_acc": 0.9272908366533864, + "train_speed(iter/s)": 0.493168 + }, + { + "epoch": 1.976, + "grad_norm": 0.6438541974488107, + "learning_rate": 1.4306389488986056e-06, + "loss": 0.2953343987464905, + "memory(GiB)": 77.0, + "step": 6175, + "token_acc": 0.9688172043010753, + "train_speed(iter/s)": 0.49309 + }, + { + "epoch": 1.9763199999999999, + "grad_norm": 0.6728973190127209, + "learning_rate": 1.4298418885851756e-06, + "loss": 0.4668581783771515, + "memory(GiB)": 77.0, + "step": 6176, + "token_acc": 0.8550242904627973, + "train_speed(iter/s)": 0.493011 + }, + { + "epoch": 1.97664, + "grad_norm": 0.6550628360992128, + "learning_rate": 1.4290449614343582e-06, + "loss": 0.27539438009262085, + "memory(GiB)": 77.0, + "step": 6177, + "token_acc": 0.8823397579560736, + "train_speed(iter/s)": 0.492935 + }, + { + "epoch": 1.97696, + "grad_norm": 0.634801743965742, + "learning_rate": 1.4282481675453176e-06, + "loss": 0.331947386264801, + "memory(GiB)": 77.0, + "step": 6178, + "token_acc": 0.8771929824561403, + "train_speed(iter/s)": 0.492861 + }, + { + "epoch": 1.97728, + "grad_norm": 0.6394764604536459, + "learning_rate": 1.4274515070172008e-06, + "loss": 0.35051408410072327, + "memory(GiB)": 77.0, + "step": 6179, + "token_acc": 0.9157921679037625, + "train_speed(iter/s)": 0.492785 + }, + { + "epoch": 1.9776, + "grad_norm": 0.6116510037341529, + "learning_rate": 1.4266549799491371e-06, + "loss": 0.346442848443985, + "memory(GiB)": 77.0, + "step": 6180, + "token_acc": 0.9191246431969553, + "train_speed(iter/s)": 0.492699 + }, + { + "epoch": 1.9779200000000001, + "grad_norm": 0.6410519864010799, + "learning_rate": 1.4258585864402406e-06, + "loss": 0.341161847114563, + "memory(GiB)": 77.0, + "step": 6181, + "token_acc": 0.8881691101261507, + "train_speed(iter/s)": 0.492609 + }, + { + "epoch": 1.97824, + "grad_norm": 0.6785657839034878, + "learning_rate": 1.4250623265896102e-06, + "loss": 0.3302689790725708, + "memory(GiB)": 77.0, + "step": 6182, + "token_acc": 0.8757976298997265, + "train_speed(iter/s)": 0.492536 + }, + { + "epoch": 1.9785599999999999, + "grad_norm": 0.6015983576343867, + "learning_rate": 1.4242662004963262e-06, + "loss": 0.2860756516456604, + "memory(GiB)": 77.0, + "step": 6183, + "token_acc": 0.9139288623404833, + "train_speed(iter/s)": 0.492453 + }, + { + "epoch": 1.97888, + "grad_norm": 0.7051768815540522, + "learning_rate": 1.423470208259452e-06, + "loss": 0.3102530837059021, + "memory(GiB)": 77.0, + "step": 6184, + "token_acc": 0.9267241379310345, + "train_speed(iter/s)": 0.49238 + }, + { + "epoch": 1.9792, + "grad_norm": 0.7479706694616594, + "learning_rate": 1.4226743499780357e-06, + "loss": 0.37537911534309387, + "memory(GiB)": 77.0, + "step": 6185, + "token_acc": 0.8722516003339827, + "train_speed(iter/s)": 0.492302 + }, + { + "epoch": 1.97952, + "grad_norm": 0.6088481862006249, + "learning_rate": 1.4218786257511075e-06, + "loss": 0.31943970918655396, + "memory(GiB)": 77.0, + "step": 6186, + "token_acc": 0.883130081300813, + "train_speed(iter/s)": 0.492215 + }, + { + "epoch": 1.97984, + "grad_norm": 0.7134223237831429, + "learning_rate": 1.421083035677682e-06, + "loss": 0.3924112319946289, + "memory(GiB)": 77.0, + "step": 6187, + "token_acc": 0.9018764659890539, + "train_speed(iter/s)": 0.492133 + }, + { + "epoch": 1.9801600000000001, + "grad_norm": 0.6553219426852632, + "learning_rate": 1.4202875798567562e-06, + "loss": 0.3115628957748413, + "memory(GiB)": 77.0, + "step": 6188, + "token_acc": 0.8947655398037078, + "train_speed(iter/s)": 0.492045 + }, + { + "epoch": 1.98048, + "grad_norm": 0.6443117986174169, + "learning_rate": 1.4194922583873105e-06, + "loss": 0.2569299340248108, + "memory(GiB)": 77.0, + "step": 6189, + "token_acc": 0.9343501326259946, + "train_speed(iter/s)": 0.491973 + }, + { + "epoch": 1.9808, + "grad_norm": 0.6231608195803154, + "learning_rate": 1.4186970713683095e-06, + "loss": 0.3536187410354614, + "memory(GiB)": 77.0, + "step": 6190, + "token_acc": 0.8452914798206278, + "train_speed(iter/s)": 0.491893 + }, + { + "epoch": 1.98112, + "grad_norm": 0.6014706002662821, + "learning_rate": 1.4179020188987002e-06, + "loss": 0.27080798149108887, + "memory(GiB)": 77.0, + "step": 6191, + "token_acc": 0.8876623376623377, + "train_speed(iter/s)": 0.49182 + }, + { + "epoch": 1.98144, + "grad_norm": 0.6345239861239662, + "learning_rate": 1.4171071010774135e-06, + "loss": 0.37191277742385864, + "memory(GiB)": 77.0, + "step": 6192, + "token_acc": 0.959168793292016, + "train_speed(iter/s)": 0.491734 + }, + { + "epoch": 1.98176, + "grad_norm": 0.622884878753365, + "learning_rate": 1.416312318003362e-06, + "loss": 0.3039146661758423, + "memory(GiB)": 77.0, + "step": 6193, + "token_acc": 0.9296924042686755, + "train_speed(iter/s)": 0.491661 + }, + { + "epoch": 1.9820799999999998, + "grad_norm": 0.6798229606737402, + "learning_rate": 1.4155176697754448e-06, + "loss": 0.3627026081085205, + "memory(GiB)": 77.0, + "step": 6194, + "token_acc": 0.9488235294117647, + "train_speed(iter/s)": 0.491584 + }, + { + "epoch": 1.9824000000000002, + "grad_norm": 0.6304222101743455, + "learning_rate": 1.4147231564925392e-06, + "loss": 0.3129785656929016, + "memory(GiB)": 77.0, + "step": 6195, + "token_acc": 0.9243073407597829, + "train_speed(iter/s)": 0.491501 + }, + { + "epoch": 1.98272, + "grad_norm": 0.6971080205590826, + "learning_rate": 1.4139287782535105e-06, + "loss": 0.3947831690311432, + "memory(GiB)": 77.0, + "step": 6196, + "token_acc": 0.8430891238670695, + "train_speed(iter/s)": 0.491425 + }, + { + "epoch": 1.98304, + "grad_norm": 0.6358800895796982, + "learning_rate": 1.4131345351572035e-06, + "loss": 0.2658223509788513, + "memory(GiB)": 77.0, + "step": 6197, + "token_acc": 0.9347206385404789, + "train_speed(iter/s)": 0.491349 + }, + { + "epoch": 1.98336, + "grad_norm": 0.6633820058645746, + "learning_rate": 1.4123404273024505e-06, + "loss": 0.3151981830596924, + "memory(GiB)": 77.0, + "step": 6198, + "token_acc": 0.8796128251663642, + "train_speed(iter/s)": 0.491263 + }, + { + "epoch": 1.98368, + "grad_norm": 0.6419813897785169, + "learning_rate": 1.411546454788063e-06, + "loss": 0.38058170676231384, + "memory(GiB)": 77.0, + "step": 6199, + "token_acc": 0.9068880412833744, + "train_speed(iter/s)": 0.491186 + }, + { + "epoch": 1.984, + "grad_norm": 0.6076398789595148, + "learning_rate": 1.4107526177128376e-06, + "loss": 0.3448725938796997, + "memory(GiB)": 77.0, + "step": 6200, + "token_acc": 0.8998298596341983, + "train_speed(iter/s)": 0.491107 + }, + { + "epoch": 1.9843199999999999, + "grad_norm": 0.6551790222375065, + "learning_rate": 1.4099589161755532e-06, + "loss": 0.2736186981201172, + "memory(GiB)": 77.0, + "step": 6201, + "token_acc": 0.8951048951048951, + "train_speed(iter/s)": 0.491032 + }, + { + "epoch": 1.98464, + "grad_norm": 0.6398903111390416, + "learning_rate": 1.4091653502749722e-06, + "loss": 0.34731680154800415, + "memory(GiB)": 77.0, + "step": 6202, + "token_acc": 0.93515731874145, + "train_speed(iter/s)": 0.490956 + }, + { + "epoch": 1.98496, + "grad_norm": 0.6615273048434458, + "learning_rate": 1.4083719201098404e-06, + "loss": 0.3056912124156952, + "memory(GiB)": 77.0, + "step": 6203, + "token_acc": 0.9277411247251021, + "train_speed(iter/s)": 0.490878 + }, + { + "epoch": 1.98528, + "grad_norm": 0.6960572268811616, + "learning_rate": 1.407578625778886e-06, + "loss": 0.3637605905532837, + "memory(GiB)": 77.0, + "step": 6204, + "token_acc": 0.9145154464663563, + "train_speed(iter/s)": 0.490805 + }, + { + "epoch": 1.9856, + "grad_norm": 0.6947275188560189, + "learning_rate": 1.4067854673808213e-06, + "loss": 0.36626139283180237, + "memory(GiB)": 77.0, + "step": 6205, + "token_acc": 0.905532666274279, + "train_speed(iter/s)": 0.490727 + }, + { + "epoch": 1.9859200000000001, + "grad_norm": 0.6309074761091893, + "learning_rate": 1.4059924450143402e-06, + "loss": 0.323355495929718, + "memory(GiB)": 77.0, + "step": 6206, + "token_acc": 0.874821319175005, + "train_speed(iter/s)": 0.490636 + }, + { + "epoch": 1.98624, + "grad_norm": 0.6715779421485633, + "learning_rate": 1.4051995587781215e-06, + "loss": 0.3860977292060852, + "memory(GiB)": 77.0, + "step": 6207, + "token_acc": 0.846095526914329, + "train_speed(iter/s)": 0.490557 + }, + { + "epoch": 1.9865599999999999, + "grad_norm": 0.6991340040098337, + "learning_rate": 1.4044068087708269e-06, + "loss": 0.4408336281776428, + "memory(GiB)": 77.0, + "step": 6208, + "token_acc": 0.9070938215102975, + "train_speed(iter/s)": 0.490474 + }, + { + "epoch": 1.98688, + "grad_norm": 0.6724006927821253, + "learning_rate": 1.4036141950910981e-06, + "loss": 0.34312230348587036, + "memory(GiB)": 77.0, + "step": 6209, + "token_acc": 0.9055468924036534, + "train_speed(iter/s)": 0.490389 + }, + { + "epoch": 1.9872, + "grad_norm": 0.7080687930666669, + "learning_rate": 1.402821717837563e-06, + "loss": 0.38494133949279785, + "memory(GiB)": 77.0, + "step": 6210, + "token_acc": 0.8931688233523924, + "train_speed(iter/s)": 0.490316 + }, + { + "epoch": 1.98752, + "grad_norm": 0.6775292010293178, + "learning_rate": 1.4020293771088323e-06, + "loss": 0.3242555856704712, + "memory(GiB)": 77.0, + "step": 6211, + "token_acc": 0.907972177635099, + "train_speed(iter/s)": 0.490245 + }, + { + "epoch": 1.98784, + "grad_norm": 0.6743562020563878, + "learning_rate": 1.4012371730034985e-06, + "loss": 0.35410869121551514, + "memory(GiB)": 77.0, + "step": 6212, + "token_acc": 0.8885257806826434, + "train_speed(iter/s)": 0.490172 + }, + { + "epoch": 1.9881600000000001, + "grad_norm": 0.6544096825371857, + "learning_rate": 1.4004451056201378e-06, + "loss": 0.39585787057876587, + "memory(GiB)": 77.0, + "step": 6213, + "token_acc": 0.8814516129032258, + "train_speed(iter/s)": 0.490086 + }, + { + "epoch": 1.98848, + "grad_norm": 0.6550167638107588, + "learning_rate": 1.3996531750573083e-06, + "loss": 0.32186949253082275, + "memory(GiB)": 77.0, + "step": 6214, + "token_acc": 0.890463607975018, + "train_speed(iter/s)": 0.490006 + }, + { + "epoch": 1.9888, + "grad_norm": 0.6905991912456926, + "learning_rate": 1.398861381413554e-06, + "loss": 0.26942047476768494, + "memory(GiB)": 77.0, + "step": 6215, + "token_acc": 0.9508960573476702, + "train_speed(iter/s)": 0.489935 + }, + { + "epoch": 1.98912, + "grad_norm": 0.5955815940489455, + "learning_rate": 1.398069724787399e-06, + "loss": 0.24962522089481354, + "memory(GiB)": 77.0, + "step": 6216, + "token_acc": 0.934647762516615, + "train_speed(iter/s)": 0.489859 + }, + { + "epoch": 1.98944, + "grad_norm": 0.5893183977234575, + "learning_rate": 1.397278205277351e-06, + "loss": 0.2877993583679199, + "memory(GiB)": 77.0, + "step": 6217, + "token_acc": 0.8732168330955777, + "train_speed(iter/s)": 0.489764 + }, + { + "epoch": 1.98976, + "grad_norm": 0.6703783571354879, + "learning_rate": 1.3964868229819013e-06, + "loss": 0.3144659996032715, + "memory(GiB)": 77.0, + "step": 6218, + "token_acc": 0.927584919746174, + "train_speed(iter/s)": 0.489693 + }, + { + "epoch": 1.9900799999999998, + "grad_norm": 0.6691303909412214, + "learning_rate": 1.395695577999523e-06, + "loss": 0.3120879530906677, + "memory(GiB)": 77.0, + "step": 6219, + "token_acc": 0.9471246622925511, + "train_speed(iter/s)": 0.489612 + }, + { + "epoch": 1.9904, + "grad_norm": 0.6558209381829216, + "learning_rate": 1.3949044704286729e-06, + "loss": 0.32325389981269836, + "memory(GiB)": 77.0, + "step": 6220, + "token_acc": 0.9149432955303536, + "train_speed(iter/s)": 0.489535 + }, + { + "epoch": 1.99072, + "grad_norm": 0.6363810966372094, + "learning_rate": 1.3941135003677914e-06, + "loss": 0.3586939573287964, + "memory(GiB)": 77.0, + "step": 6221, + "token_acc": 0.9121495327102803, + "train_speed(iter/s)": 0.489456 + }, + { + "epoch": 1.99104, + "grad_norm": 0.6533034987277567, + "learning_rate": 1.3933226679153017e-06, + "loss": 0.38008826971054077, + "memory(GiB)": 77.0, + "step": 6222, + "token_acc": 0.9168952276467361, + "train_speed(iter/s)": 0.48938 + }, + { + "epoch": 1.99136, + "grad_norm": 0.7082170759834953, + "learning_rate": 1.3925319731696065e-06, + "loss": 0.4023202657699585, + "memory(GiB)": 77.0, + "step": 6223, + "token_acc": 0.8725818932164044, + "train_speed(iter/s)": 0.489292 + }, + { + "epoch": 1.9916800000000001, + "grad_norm": 0.689231941955759, + "learning_rate": 1.391741416229096e-06, + "loss": 0.3916324973106384, + "memory(GiB)": 77.0, + "step": 6224, + "token_acc": 0.8497062993720883, + "train_speed(iter/s)": 0.489219 + }, + { + "epoch": 1.992, + "grad_norm": 0.6906095770125976, + "learning_rate": 1.3909509971921402e-06, + "loss": 0.3288300037384033, + "memory(GiB)": 77.0, + "step": 6225, + "token_acc": 0.8863495985176034, + "train_speed(iter/s)": 0.489142 + }, + { + "epoch": 1.9923199999999999, + "grad_norm": 0.6412740888619631, + "learning_rate": 1.3901607161570946e-06, + "loss": 0.3570561707019806, + "memory(GiB)": 77.0, + "step": 6226, + "token_acc": 0.8936675461741425, + "train_speed(iter/s)": 0.489063 + }, + { + "epoch": 1.99264, + "grad_norm": 0.6564359056354431, + "learning_rate": 1.3893705732222943e-06, + "loss": 0.2990040183067322, + "memory(GiB)": 77.0, + "step": 6227, + "token_acc": 0.9163029525032093, + "train_speed(iter/s)": 0.488991 + }, + { + "epoch": 1.99296, + "grad_norm": 0.6490720960663316, + "learning_rate": 1.3885805684860599e-06, + "loss": 0.3686794638633728, + "memory(GiB)": 77.0, + "step": 6228, + "token_acc": 0.9162671780121444, + "train_speed(iter/s)": 0.48891 + }, + { + "epoch": 1.99328, + "grad_norm": 0.707433163505199, + "learning_rate": 1.3877907020466936e-06, + "loss": 0.4103863835334778, + "memory(GiB)": 77.0, + "step": 6229, + "token_acc": 0.8418249189439555, + "train_speed(iter/s)": 0.488838 + }, + { + "epoch": 1.9936, + "grad_norm": 0.639064069989334, + "learning_rate": 1.3870009740024803e-06, + "loss": 0.31012529134750366, + "memory(GiB)": 77.0, + "step": 6230, + "token_acc": 0.959013209013209, + "train_speed(iter/s)": 0.488766 + }, + { + "epoch": 1.9939200000000001, + "grad_norm": 0.6035828240918374, + "learning_rate": 1.386211384451687e-06, + "loss": 0.32281672954559326, + "memory(GiB)": 77.0, + "step": 6231, + "token_acc": 0.9324165029469548, + "train_speed(iter/s)": 0.488695 + }, + { + "epoch": 1.99424, + "grad_norm": 0.6611670437510239, + "learning_rate": 1.3854219334925668e-06, + "loss": 0.2891325354576111, + "memory(GiB)": 77.0, + "step": 6232, + "token_acc": 0.8625860373647984, + "train_speed(iter/s)": 0.48862 + }, + { + "epoch": 1.9945599999999999, + "grad_norm": 0.6403094787206112, + "learning_rate": 1.384632621223352e-06, + "loss": 0.35651183128356934, + "memory(GiB)": 77.0, + "step": 6233, + "token_acc": 0.8669043374925728, + "train_speed(iter/s)": 0.488547 + }, + { + "epoch": 1.99488, + "grad_norm": 0.7251928138229143, + "learning_rate": 1.3838434477422585e-06, + "loss": 0.3658021092414856, + "memory(GiB)": 77.0, + "step": 6234, + "token_acc": 0.8776185226019846, + "train_speed(iter/s)": 0.488475 + }, + { + "epoch": 1.9952, + "grad_norm": 0.6406720375806741, + "learning_rate": 1.3830544131474854e-06, + "loss": 0.3538901209831238, + "memory(GiB)": 77.0, + "step": 6235, + "token_acc": 0.9181937172774869, + "train_speed(iter/s)": 0.488401 + }, + { + "epoch": 1.99552, + "grad_norm": 0.674332428429209, + "learning_rate": 1.3822655175372148e-06, + "loss": 0.3063405752182007, + "memory(GiB)": 77.0, + "step": 6236, + "token_acc": 0.9696767744085305, + "train_speed(iter/s)": 0.488321 + }, + { + "epoch": 1.9958399999999998, + "grad_norm": 0.7287636348313371, + "learning_rate": 1.3814767610096118e-06, + "loss": 0.3549909293651581, + "memory(GiB)": 77.0, + "step": 6237, + "token_acc": 0.8766976411722659, + "train_speed(iter/s)": 0.48824 + }, + { + "epoch": 1.9961600000000002, + "grad_norm": 0.7489780066351677, + "learning_rate": 1.3806881436628211e-06, + "loss": 0.33412590622901917, + "memory(GiB)": 77.0, + "step": 6238, + "token_acc": 0.8697060865540385, + "train_speed(iter/s)": 0.488161 + }, + { + "epoch": 1.99648, + "grad_norm": 0.6297133863042959, + "learning_rate": 1.3798996655949742e-06, + "loss": 0.30386292934417725, + "memory(GiB)": 77.0, + "step": 6239, + "token_acc": 0.9483960948396095, + "train_speed(iter/s)": 0.488076 + }, + { + "epoch": 1.9968, + "grad_norm": 0.6804164713677914, + "learning_rate": 1.3791113269041827e-06, + "loss": 0.4344213306903839, + "memory(GiB)": 77.0, + "step": 6240, + "token_acc": 0.9181969949916527, + "train_speed(iter/s)": 0.488 + }, + { + "epoch": 1.99712, + "grad_norm": 0.723278559904203, + "learning_rate": 1.3783231276885422e-06, + "loss": 0.3871975839138031, + "memory(GiB)": 77.0, + "step": 6241, + "token_acc": 0.8600493218249076, + "train_speed(iter/s)": 0.487924 + }, + { + "epoch": 1.99744, + "grad_norm": 1.0351794460256831, + "learning_rate": 1.3775350680461301e-06, + "loss": 0.2772420048713684, + "memory(GiB)": 77.0, + "step": 6242, + "token_acc": 0.8953836930455635, + "train_speed(iter/s)": 0.487853 + }, + { + "epoch": 1.99776, + "grad_norm": 0.6908286111947363, + "learning_rate": 1.3767471480750071e-06, + "loss": 0.3954794406890869, + "memory(GiB)": 77.0, + "step": 6243, + "token_acc": 0.9441398217957505, + "train_speed(iter/s)": 0.487773 + }, + { + "epoch": 1.9980799999999999, + "grad_norm": 0.7072319895703507, + "learning_rate": 1.375959367873216e-06, + "loss": 0.34198257327079773, + "memory(GiB)": 77.0, + "step": 6244, + "token_acc": 0.9428129829984544, + "train_speed(iter/s)": 0.487702 + }, + { + "epoch": 1.9984, + "grad_norm": 0.6721993572234851, + "learning_rate": 1.3751717275387822e-06, + "loss": 0.36377573013305664, + "memory(GiB)": 77.0, + "step": 6245, + "token_acc": 0.9177737881508079, + "train_speed(iter/s)": 0.487628 + }, + { + "epoch": 1.99872, + "grad_norm": 0.7279365328562669, + "learning_rate": 1.3743842271697145e-06, + "loss": 0.29929542541503906, + "memory(GiB)": 77.0, + "step": 6246, + "token_acc": 0.8284605433376455, + "train_speed(iter/s)": 0.487557 + }, + { + "epoch": 1.99904, + "grad_norm": 0.6782404041487161, + "learning_rate": 1.3735968668640025e-06, + "loss": 0.37168416380882263, + "memory(GiB)": 77.0, + "step": 6247, + "token_acc": 0.9238438105787871, + "train_speed(iter/s)": 0.487481 + }, + { + "epoch": 1.99936, + "grad_norm": 0.6731925110788946, + "learning_rate": 1.37280964671962e-06, + "loss": 0.3807450532913208, + "memory(GiB)": 77.0, + "step": 6248, + "token_acc": 0.8717622080679406, + "train_speed(iter/s)": 0.487404 + }, + { + "epoch": 1.9996800000000001, + "grad_norm": 0.7398878637027084, + "learning_rate": 1.3720225668345238e-06, + "loss": 0.3801354169845581, + "memory(GiB)": 77.0, + "step": 6249, + "token_acc": 0.9034199726402189, + "train_speed(iter/s)": 0.487327 + }, + { + "epoch": 2.0, + "grad_norm": 0.6658311657869425, + "learning_rate": 1.3712356273066516e-06, + "loss": 0.3215499222278595, + "memory(GiB)": 77.0, + "step": 6250, + "token_acc": 0.9176470588235294, + "train_speed(iter/s)": 0.487232 + }, + { + "epoch": 2.00032, + "grad_norm": 0.6641419559759604, + "learning_rate": 1.3704488282339256e-06, + "loss": 0.35416534543037415, + "memory(GiB)": 77.0, + "step": 6251, + "token_acc": 0.9381212723658051, + "train_speed(iter/s)": 0.485888 + }, + { + "epoch": 2.00064, + "grad_norm": 0.638913669433665, + "learning_rate": 1.369662169714247e-06, + "loss": 0.33075159788131714, + "memory(GiB)": 77.0, + "step": 6252, + "token_acc": 0.8704663212435233, + "train_speed(iter/s)": 0.485806 + }, + { + "epoch": 2.00096, + "grad_norm": 0.6233847051341221, + "learning_rate": 1.368875651845503e-06, + "loss": 0.30727750062942505, + "memory(GiB)": 77.0, + "step": 6253, + "token_acc": 0.904247660187185, + "train_speed(iter/s)": 0.485731 + }, + { + "epoch": 2.00128, + "grad_norm": 0.6021007812702077, + "learning_rate": 1.368089274725562e-06, + "loss": 0.32075488567352295, + "memory(GiB)": 77.0, + "step": 6254, + "token_acc": 0.9399327246516098, + "train_speed(iter/s)": 0.485659 + }, + { + "epoch": 2.0016, + "grad_norm": 0.7051761395574246, + "learning_rate": 1.3673030384522752e-06, + "loss": 0.37023624777793884, + "memory(GiB)": 77.0, + "step": 6255, + "token_acc": 0.8925071134998419, + "train_speed(iter/s)": 0.485585 + }, + { + "epoch": 2.00192, + "grad_norm": 0.6219655814562975, + "learning_rate": 1.3665169431234757e-06, + "loss": 0.3034754991531372, + "memory(GiB)": 77.0, + "step": 6256, + "token_acc": 0.8899122807017544, + "train_speed(iter/s)": 0.485514 + }, + { + "epoch": 2.00224, + "grad_norm": 0.6135519542971858, + "learning_rate": 1.3657309888369796e-06, + "loss": 0.2762676477432251, + "memory(GiB)": 77.0, + "step": 6257, + "token_acc": 0.9266339447171273, + "train_speed(iter/s)": 0.485435 + }, + { + "epoch": 2.00256, + "grad_norm": 0.6146385063625316, + "learning_rate": 1.3649451756905857e-06, + "loss": 0.2932034730911255, + "memory(GiB)": 77.0, + "step": 6258, + "token_acc": 0.9592301324503312, + "train_speed(iter/s)": 0.485327 + }, + { + "epoch": 2.00288, + "grad_norm": 0.6652799466060941, + "learning_rate": 1.364159503782074e-06, + "loss": 0.34401726722717285, + "memory(GiB)": 77.0, + "step": 6259, + "token_acc": 0.8624661246612466, + "train_speed(iter/s)": 0.485244 + }, + { + "epoch": 2.0032, + "grad_norm": 0.6552502724320395, + "learning_rate": 1.3633739732092083e-06, + "loss": 0.32274946570396423, + "memory(GiB)": 77.0, + "step": 6260, + "token_acc": 0.9643660915228808, + "train_speed(iter/s)": 0.485168 + }, + { + "epoch": 2.00352, + "grad_norm": 0.6170705315598688, + "learning_rate": 1.3625885840697343e-06, + "loss": 0.23819832503795624, + "memory(GiB)": 77.0, + "step": 6261, + "token_acc": 0.9387056627255757, + "train_speed(iter/s)": 0.485084 + }, + { + "epoch": 2.00384, + "grad_norm": 0.6335385337496, + "learning_rate": 1.3618033364613798e-06, + "loss": 0.3825988471508026, + "memory(GiB)": 77.0, + "step": 6262, + "token_acc": 0.9053079947575361, + "train_speed(iter/s)": 0.484999 + }, + { + "epoch": 2.00416, + "grad_norm": 0.6337024656547701, + "learning_rate": 1.3610182304818553e-06, + "loss": 0.27554988861083984, + "memory(GiB)": 77.0, + "step": 6263, + "token_acc": 0.955336617405583, + "train_speed(iter/s)": 0.484919 + }, + { + "epoch": 2.00448, + "grad_norm": 0.6697183148612654, + "learning_rate": 1.3602332662288536e-06, + "loss": 0.27629971504211426, + "memory(GiB)": 77.0, + "step": 6264, + "token_acc": 0.9449400968646444, + "train_speed(iter/s)": 0.484847 + }, + { + "epoch": 2.0048, + "grad_norm": 0.6283311130261205, + "learning_rate": 1.3594484438000488e-06, + "loss": 0.3472454249858856, + "memory(GiB)": 77.0, + "step": 6265, + "token_acc": 0.9263786242183059, + "train_speed(iter/s)": 0.484766 + }, + { + "epoch": 2.00512, + "grad_norm": 0.6225044460342064, + "learning_rate": 1.3586637632931015e-06, + "loss": 0.32653355598449707, + "memory(GiB)": 77.0, + "step": 6266, + "token_acc": 0.9333694474539544, + "train_speed(iter/s)": 0.484695 + }, + { + "epoch": 2.00544, + "grad_norm": 0.6491492734998456, + "learning_rate": 1.3578792248056488e-06, + "loss": 0.2924652695655823, + "memory(GiB)": 77.0, + "step": 6267, + "token_acc": 0.9116679321690712, + "train_speed(iter/s)": 0.484624 + }, + { + "epoch": 2.00576, + "grad_norm": 0.6371408251528571, + "learning_rate": 1.357094828435314e-06, + "loss": 0.35255298018455505, + "memory(GiB)": 77.0, + "step": 6268, + "token_acc": 0.8303631472763954, + "train_speed(iter/s)": 0.484536 + }, + { + "epoch": 2.00608, + "grad_norm": 0.6522973544588254, + "learning_rate": 1.3563105742797011e-06, + "loss": 0.3472060263156891, + "memory(GiB)": 77.0, + "step": 6269, + "token_acc": 0.8892631578947369, + "train_speed(iter/s)": 0.484462 + }, + { + "epoch": 2.0064, + "grad_norm": 0.6676551760307874, + "learning_rate": 1.3555264624363973e-06, + "loss": 0.30531468987464905, + "memory(GiB)": 77.0, + "step": 6270, + "token_acc": 0.9240131578947368, + "train_speed(iter/s)": 0.484386 + }, + { + "epoch": 2.00672, + "grad_norm": 0.6470696086520049, + "learning_rate": 1.3547424930029712e-06, + "loss": 0.27221906185150146, + "memory(GiB)": 77.0, + "step": 6271, + "token_acc": 0.9446273570787189, + "train_speed(iter/s)": 0.484306 + }, + { + "epoch": 2.00704, + "grad_norm": 0.6431852809245004, + "learning_rate": 1.3539586660769748e-06, + "loss": 0.24540698528289795, + "memory(GiB)": 77.0, + "step": 6272, + "token_acc": 0.927536231884058, + "train_speed(iter/s)": 0.484223 + }, + { + "epoch": 2.00736, + "grad_norm": 0.7317224386219349, + "learning_rate": 1.353174981755942e-06, + "loss": 0.2916277050971985, + "memory(GiB)": 77.0, + "step": 6273, + "token_acc": 0.9722742600224803, + "train_speed(iter/s)": 0.484151 + }, + { + "epoch": 2.00768, + "grad_norm": 0.691297286642262, + "learning_rate": 1.3523914401373877e-06, + "loss": 0.2534326910972595, + "memory(GiB)": 77.0, + "step": 6274, + "token_acc": 0.8772744870305846, + "train_speed(iter/s)": 0.484075 + }, + { + "epoch": 2.008, + "grad_norm": 0.6282734284297579, + "learning_rate": 1.3516080413188109e-06, + "loss": 0.25862953066825867, + "memory(GiB)": 77.0, + "step": 6275, + "token_acc": 0.8987281017518598, + "train_speed(iter/s)": 0.483996 + }, + { + "epoch": 2.00832, + "grad_norm": 0.6911847415075545, + "learning_rate": 1.350824785397692e-06, + "loss": 0.343835711479187, + "memory(GiB)": 77.0, + "step": 6276, + "token_acc": 0.8819762122598354, + "train_speed(iter/s)": 0.483928 + }, + { + "epoch": 2.00864, + "grad_norm": 0.6439868207240642, + "learning_rate": 1.3500416724714926e-06, + "loss": 0.333029180765152, + "memory(GiB)": 77.0, + "step": 6277, + "token_acc": 0.8761697860962567, + "train_speed(iter/s)": 0.483845 + }, + { + "epoch": 2.00896, + "grad_norm": 0.7617077901339268, + "learning_rate": 1.3492587026376587e-06, + "loss": 0.3162164092063904, + "memory(GiB)": 77.0, + "step": 6278, + "token_acc": 0.8496062992125984, + "train_speed(iter/s)": 0.483775 + }, + { + "epoch": 2.00928, + "grad_norm": 0.7039280493509195, + "learning_rate": 1.3484758759936172e-06, + "loss": 0.3006083071231842, + "memory(GiB)": 77.0, + "step": 6279, + "token_acc": 0.9070701660417783, + "train_speed(iter/s)": 0.483702 + }, + { + "epoch": 2.0096, + "grad_norm": 0.7603105359417718, + "learning_rate": 1.3476931926367764e-06, + "loss": 0.2915211319923401, + "memory(GiB)": 77.0, + "step": 6280, + "token_acc": 0.889168765743073, + "train_speed(iter/s)": 0.483626 + }, + { + "epoch": 2.00992, + "grad_norm": 0.6758065421130672, + "learning_rate": 1.3469106526645288e-06, + "loss": 0.2539146840572357, + "memory(GiB)": 77.0, + "step": 6281, + "token_acc": 0.8873820754716981, + "train_speed(iter/s)": 0.48355 + }, + { + "epoch": 2.01024, + "grad_norm": 0.6383930903658979, + "learning_rate": 1.3461282561742469e-06, + "loss": 0.2834252715110779, + "memory(GiB)": 77.0, + "step": 6282, + "token_acc": 0.896551724137931, + "train_speed(iter/s)": 0.483477 + }, + { + "epoch": 2.01056, + "grad_norm": 0.73254528516398, + "learning_rate": 1.345346003263287e-06, + "loss": 0.3742280900478363, + "memory(GiB)": 77.0, + "step": 6283, + "token_acc": 0.8372848327379019, + "train_speed(iter/s)": 0.483397 + }, + { + "epoch": 2.0108800000000002, + "grad_norm": 0.7307756366543594, + "learning_rate": 1.3445638940289869e-06, + "loss": 0.36641359329223633, + "memory(GiB)": 77.0, + "step": 6284, + "token_acc": 0.8625753999475478, + "train_speed(iter/s)": 0.483324 + }, + { + "epoch": 2.0112, + "grad_norm": 0.6339533859862216, + "learning_rate": 1.343781928568666e-06, + "loss": 0.30423808097839355, + "memory(GiB)": 77.0, + "step": 6285, + "token_acc": 0.8886373924852875, + "train_speed(iter/s)": 0.483234 + }, + { + "epoch": 2.01152, + "grad_norm": 0.7313759621361561, + "learning_rate": 1.3430001069796273e-06, + "loss": 0.25667938590049744, + "memory(GiB)": 77.0, + "step": 6286, + "token_acc": 0.9586868314275175, + "train_speed(iter/s)": 0.483163 + }, + { + "epoch": 2.01184, + "grad_norm": 0.6592799015963545, + "learning_rate": 1.342218429359154e-06, + "loss": 0.3219464421272278, + "memory(GiB)": 77.0, + "step": 6287, + "token_acc": 0.9012219959266803, + "train_speed(iter/s)": 0.483077 + }, + { + "epoch": 2.01216, + "grad_norm": 0.6593568481271037, + "learning_rate": 1.3414368958045124e-06, + "loss": 0.3329623341560364, + "memory(GiB)": 77.0, + "step": 6288, + "token_acc": 0.9139320667818077, + "train_speed(iter/s)": 0.482996 + }, + { + "epoch": 2.01248, + "grad_norm": 0.6054461319930274, + "learning_rate": 1.3406555064129511e-06, + "loss": 0.24121907353401184, + "memory(GiB)": 77.0, + "step": 6289, + "token_acc": 0.8906485671191554, + "train_speed(iter/s)": 0.482921 + }, + { + "epoch": 2.0128, + "grad_norm": 0.6705634916168037, + "learning_rate": 1.3398742612817007e-06, + "loss": 0.281712144613266, + "memory(GiB)": 77.0, + "step": 6290, + "token_acc": 0.8817405232247731, + "train_speed(iter/s)": 0.482852 + }, + { + "epoch": 2.01312, + "grad_norm": 0.712768841947193, + "learning_rate": 1.3390931605079733e-06, + "loss": 0.3107398748397827, + "memory(GiB)": 77.0, + "step": 6291, + "token_acc": 0.9253731343283582, + "train_speed(iter/s)": 0.482779 + }, + { + "epoch": 2.01344, + "grad_norm": 0.6819805110823932, + "learning_rate": 1.3383122041889633e-06, + "loss": 0.33366483449935913, + "memory(GiB)": 77.0, + "step": 6292, + "token_acc": 0.9080664294187426, + "train_speed(iter/s)": 0.482698 + }, + { + "epoch": 2.01376, + "grad_norm": 0.6990597432101183, + "learning_rate": 1.337531392421847e-06, + "loss": 0.2920764088630676, + "memory(GiB)": 77.0, + "step": 6293, + "token_acc": 0.9071965628356605, + "train_speed(iter/s)": 0.482616 + }, + { + "epoch": 2.01408, + "grad_norm": 0.6436551774573781, + "learning_rate": 1.3367507253037832e-06, + "loss": 0.27340495586395264, + "memory(GiB)": 77.0, + "step": 6294, + "token_acc": 0.888671109368875, + "train_speed(iter/s)": 0.482541 + }, + { + "epoch": 2.0144, + "grad_norm": 0.7075170750661208, + "learning_rate": 1.3359702029319123e-06, + "loss": 0.3433653712272644, + "memory(GiB)": 77.0, + "step": 6295, + "token_acc": 0.9198429198429199, + "train_speed(iter/s)": 0.482462 + }, + { + "epoch": 2.01472, + "grad_norm": 0.6520826406229528, + "learning_rate": 1.3351898254033569e-06, + "loss": 0.29712557792663574, + "memory(GiB)": 77.0, + "step": 6296, + "token_acc": 0.9036261864200535, + "train_speed(iter/s)": 0.482388 + }, + { + "epoch": 2.01504, + "grad_norm": 0.6743697571043016, + "learning_rate": 1.3344095928152213e-06, + "loss": 0.2984038293361664, + "memory(GiB)": 77.0, + "step": 6297, + "token_acc": 0.9168877099911583, + "train_speed(iter/s)": 0.482316 + }, + { + "epoch": 2.01536, + "grad_norm": 0.6566962387093068, + "learning_rate": 1.3336295052645915e-06, + "loss": 0.27132296562194824, + "memory(GiB)": 77.0, + "step": 6298, + "token_acc": 0.9423290203327171, + "train_speed(iter/s)": 0.482249 + }, + { + "epoch": 2.01568, + "grad_norm": 0.7321499164798794, + "learning_rate": 1.3328495628485366e-06, + "loss": 0.27358710765838623, + "memory(GiB)": 77.0, + "step": 6299, + "token_acc": 0.919827320382362, + "train_speed(iter/s)": 0.482177 + }, + { + "epoch": 2.016, + "grad_norm": 0.6937681120844676, + "learning_rate": 1.3320697656641065e-06, + "loss": 0.3667391240596771, + "memory(GiB)": 77.0, + "step": 6300, + "token_acc": 0.8944409743910057, + "train_speed(iter/s)": 0.482096 + }, + { + "epoch": 2.01632, + "grad_norm": 0.6963277694523687, + "learning_rate": 1.3312901138083334e-06, + "loss": 0.30431389808654785, + "memory(GiB)": 77.0, + "step": 6301, + "token_acc": 0.8350566223535204, + "train_speed(iter/s)": 0.482015 + }, + { + "epoch": 2.01664, + "grad_norm": 0.6488750235341135, + "learning_rate": 1.3305106073782318e-06, + "loss": 0.3024393916130066, + "memory(GiB)": 77.0, + "step": 6302, + "token_acc": 0.8628608923884514, + "train_speed(iter/s)": 0.48194 + }, + { + "epoch": 2.01696, + "grad_norm": 0.6674720893951139, + "learning_rate": 1.3297312464707972e-06, + "loss": 0.272877961397171, + "memory(GiB)": 77.0, + "step": 6303, + "token_acc": 0.9145531400966184, + "train_speed(iter/s)": 0.48187 + }, + { + "epoch": 2.01728, + "grad_norm": 0.6874755001807715, + "learning_rate": 1.328952031183008e-06, + "loss": 0.29961252212524414, + "memory(GiB)": 77.0, + "step": 6304, + "token_acc": 0.9343563512361467, + "train_speed(iter/s)": 0.481798 + }, + { + "epoch": 2.0176, + "grad_norm": 0.7468638392423593, + "learning_rate": 1.328172961611824e-06, + "loss": 0.3920004665851593, + "memory(GiB)": 77.0, + "step": 6305, + "token_acc": 0.8942757009345794, + "train_speed(iter/s)": 0.481721 + }, + { + "epoch": 2.01792, + "grad_norm": 0.6941909322885776, + "learning_rate": 1.3273940378541866e-06, + "loss": 0.3227153420448303, + "memory(GiB)": 77.0, + "step": 6306, + "token_acc": 0.8898114901256733, + "train_speed(iter/s)": 0.481644 + }, + { + "epoch": 2.01824, + "grad_norm": 0.698291429053942, + "learning_rate": 1.3266152600070198e-06, + "loss": 0.3204294443130493, + "memory(GiB)": 77.0, + "step": 6307, + "token_acc": 0.8721377912867274, + "train_speed(iter/s)": 0.481567 + }, + { + "epoch": 2.01856, + "grad_norm": 0.7017348988841097, + "learning_rate": 1.3258366281672282e-06, + "loss": 0.29079121351242065, + "memory(GiB)": 77.0, + "step": 6308, + "token_acc": 0.9561360275998029, + "train_speed(iter/s)": 0.481499 + }, + { + "epoch": 2.01888, + "grad_norm": 0.7774941960841261, + "learning_rate": 1.3250581424317012e-06, + "loss": 0.32662510871887207, + "memory(GiB)": 77.0, + "step": 6309, + "token_acc": 0.9188262195121951, + "train_speed(iter/s)": 0.481424 + }, + { + "epoch": 2.0192, + "grad_norm": 0.7564675449954376, + "learning_rate": 1.3242798028973047e-06, + "loss": 0.3766101002693176, + "memory(GiB)": 77.0, + "step": 6310, + "token_acc": 0.9033665835411472, + "train_speed(iter/s)": 0.481349 + }, + { + "epoch": 2.01952, + "grad_norm": 0.7034235462150221, + "learning_rate": 1.32350160966089e-06, + "loss": 0.28203511238098145, + "memory(GiB)": 77.0, + "step": 6311, + "token_acc": 0.9278650378126818, + "train_speed(iter/s)": 0.481272 + }, + { + "epoch": 2.01984, + "grad_norm": 0.7123783069942137, + "learning_rate": 1.322723562819292e-06, + "loss": 0.3183731138706207, + "memory(GiB)": 77.0, + "step": 6312, + "token_acc": 0.8700458344567269, + "train_speed(iter/s)": 0.481202 + }, + { + "epoch": 2.02016, + "grad_norm": 0.6855027677177672, + "learning_rate": 1.3219456624693238e-06, + "loss": 0.3012785315513611, + "memory(GiB)": 77.0, + "step": 6313, + "token_acc": 0.890116763969975, + "train_speed(iter/s)": 0.481126 + }, + { + "epoch": 2.02048, + "grad_norm": 0.6819824959978578, + "learning_rate": 1.3211679087077816e-06, + "loss": 0.3888840675354004, + "memory(GiB)": 77.0, + "step": 6314, + "token_acc": 0.9103498542274052, + "train_speed(iter/s)": 0.481048 + }, + { + "epoch": 2.0208, + "grad_norm": 0.726210665802114, + "learning_rate": 1.3203903016314438e-06, + "loss": 0.3592088520526886, + "memory(GiB)": 77.0, + "step": 6315, + "token_acc": 0.8686599038974907, + "train_speed(iter/s)": 0.480973 + }, + { + "epoch": 2.02112, + "grad_norm": 0.6571295631648123, + "learning_rate": 1.3196128413370695e-06, + "loss": 0.27768653631210327, + "memory(GiB)": 77.0, + "step": 6316, + "token_acc": 0.9497860199714693, + "train_speed(iter/s)": 0.480905 + }, + { + "epoch": 2.02144, + "grad_norm": 0.6232398195339178, + "learning_rate": 1.3188355279214005e-06, + "loss": 0.25978219509124756, + "memory(GiB)": 77.0, + "step": 6317, + "token_acc": 0.9080536912751678, + "train_speed(iter/s)": 0.480836 + }, + { + "epoch": 2.02176, + "grad_norm": 0.734516006615045, + "learning_rate": 1.3180583614811598e-06, + "loss": 0.294796884059906, + "memory(GiB)": 77.0, + "step": 6318, + "token_acc": 0.9500705218617772, + "train_speed(iter/s)": 0.480762 + }, + { + "epoch": 2.02208, + "grad_norm": 0.7427390482081438, + "learning_rate": 1.3172813421130526e-06, + "loss": 0.39000117778778076, + "memory(GiB)": 77.0, + "step": 6319, + "token_acc": 0.846862127453356, + "train_speed(iter/s)": 0.480683 + }, + { + "epoch": 2.0224, + "grad_norm": 0.70333922973558, + "learning_rate": 1.3165044699137652e-06, + "loss": 0.3192846179008484, + "memory(GiB)": 77.0, + "step": 6320, + "token_acc": 0.9184301654482493, + "train_speed(iter/s)": 0.480607 + }, + { + "epoch": 2.02272, + "grad_norm": 0.7004681001077331, + "learning_rate": 1.315727744979966e-06, + "loss": 0.35214197635650635, + "memory(GiB)": 77.0, + "step": 6321, + "token_acc": 0.9158841940532081, + "train_speed(iter/s)": 0.480531 + }, + { + "epoch": 2.02304, + "grad_norm": 0.7147815360139363, + "learning_rate": 1.314951167408305e-06, + "loss": 0.26825788617134094, + "memory(GiB)": 77.0, + "step": 6322, + "token_acc": 0.8916666666666667, + "train_speed(iter/s)": 0.480464 + }, + { + "epoch": 2.02336, + "grad_norm": 0.6909300197750715, + "learning_rate": 1.3141747372954144e-06, + "loss": 0.3233186900615692, + "memory(GiB)": 77.0, + "step": 6323, + "token_acc": 0.8552087250846183, + "train_speed(iter/s)": 0.480388 + }, + { + "epoch": 2.02368, + "grad_norm": 0.6409426410364374, + "learning_rate": 1.3133984547379058e-06, + "loss": 0.2427642047405243, + "memory(GiB)": 77.0, + "step": 6324, + "token_acc": 0.8450704225352113, + "train_speed(iter/s)": 0.480316 + }, + { + "epoch": 2.024, + "grad_norm": 0.6624733147737559, + "learning_rate": 1.3126223198323752e-06, + "loss": 0.33277836441993713, + "memory(GiB)": 77.0, + "step": 6325, + "token_acc": 0.9293989862418537, + "train_speed(iter/s)": 0.480238 + }, + { + "epoch": 2.02432, + "grad_norm": 0.715854095452988, + "learning_rate": 1.3118463326753989e-06, + "loss": 0.31738874316215515, + "memory(GiB)": 77.0, + "step": 6326, + "token_acc": 0.914572864321608, + "train_speed(iter/s)": 0.480156 + }, + { + "epoch": 2.02464, + "grad_norm": 0.7484658818763328, + "learning_rate": 1.3110704933635351e-06, + "loss": 0.3013559579849243, + "memory(GiB)": 77.0, + "step": 6327, + "token_acc": 0.904565630944832, + "train_speed(iter/s)": 0.480083 + }, + { + "epoch": 2.02496, + "grad_norm": 0.6610764463570362, + "learning_rate": 1.3102948019933226e-06, + "loss": 0.36140644550323486, + "memory(GiB)": 77.0, + "step": 6328, + "token_acc": 0.9278810408921933, + "train_speed(iter/s)": 0.479998 + }, + { + "epoch": 2.02528, + "grad_norm": 0.7226798521323687, + "learning_rate": 1.3095192586612849e-06, + "loss": 0.28934913873672485, + "memory(GiB)": 77.0, + "step": 6329, + "token_acc": 0.9563124768604221, + "train_speed(iter/s)": 0.47993 + }, + { + "epoch": 2.0256, + "grad_norm": 0.7454259452540538, + "learning_rate": 1.3087438634639232e-06, + "loss": 0.34084558486938477, + "memory(GiB)": 77.0, + "step": 6330, + "token_acc": 0.8817513582614254, + "train_speed(iter/s)": 0.479858 + }, + { + "epoch": 2.02592, + "grad_norm": 0.6733631048541981, + "learning_rate": 1.3079686164977228e-06, + "loss": 0.2818094491958618, + "memory(GiB)": 77.0, + "step": 6331, + "token_acc": 0.9138062547673532, + "train_speed(iter/s)": 0.479788 + }, + { + "epoch": 2.02624, + "grad_norm": 0.6537708314814422, + "learning_rate": 1.3071935178591494e-06, + "loss": 0.2562366724014282, + "memory(GiB)": 77.0, + "step": 6332, + "token_acc": 0.9050240880935995, + "train_speed(iter/s)": 0.47972 + }, + { + "epoch": 2.02656, + "grad_norm": 0.7338703815442246, + "learning_rate": 1.3064185676446505e-06, + "loss": 0.32776084542274475, + "memory(GiB)": 77.0, + "step": 6333, + "token_acc": 0.9343474779823859, + "train_speed(iter/s)": 0.479652 + }, + { + "epoch": 2.02688, + "grad_norm": 0.6829046241357661, + "learning_rate": 1.3056437659506554e-06, + "loss": 0.2243414968252182, + "memory(GiB)": 77.0, + "step": 6334, + "token_acc": 0.8858819693511575, + "train_speed(iter/s)": 0.479581 + }, + { + "epoch": 2.0272, + "grad_norm": 0.7247771593098058, + "learning_rate": 1.3048691128735747e-06, + "loss": 0.331038236618042, + "memory(GiB)": 77.0, + "step": 6335, + "token_acc": 0.8427607680332122, + "train_speed(iter/s)": 0.479503 + }, + { + "epoch": 2.02752, + "grad_norm": 0.6239745320174128, + "learning_rate": 1.3040946085098006e-06, + "loss": 0.32337623834609985, + "memory(GiB)": 77.0, + "step": 6336, + "token_acc": 0.8675213675213675, + "train_speed(iter/s)": 0.479428 + }, + { + "epoch": 2.02784, + "grad_norm": 0.6526981298817863, + "learning_rate": 1.3033202529557073e-06, + "loss": 0.19059595465660095, + "memory(GiB)": 77.0, + "step": 6337, + "token_acc": 0.964812030075188, + "train_speed(iter/s)": 0.479364 + }, + { + "epoch": 2.02816, + "grad_norm": 0.7025830675993463, + "learning_rate": 1.3025460463076483e-06, + "loss": 0.31899476051330566, + "memory(GiB)": 77.0, + "step": 6338, + "token_acc": 0.8659966499162479, + "train_speed(iter/s)": 0.47929 + }, + { + "epoch": 2.02848, + "grad_norm": 0.7205662358332183, + "learning_rate": 1.3017719886619611e-06, + "loss": 0.3052526116371155, + "memory(GiB)": 77.0, + "step": 6339, + "token_acc": 0.9038045159294773, + "train_speed(iter/s)": 0.479216 + }, + { + "epoch": 2.0288, + "grad_norm": 0.7555970172378059, + "learning_rate": 1.3009980801149641e-06, + "loss": 0.3112773299217224, + "memory(GiB)": 77.0, + "step": 6340, + "token_acc": 0.9372530792470369, + "train_speed(iter/s)": 0.479146 + }, + { + "epoch": 2.02912, + "grad_norm": 0.9802202001495842, + "learning_rate": 1.3002243207629562e-06, + "loss": 0.3531525135040283, + "memory(GiB)": 77.0, + "step": 6341, + "token_acc": 0.954368932038835, + "train_speed(iter/s)": 0.479076 + }, + { + "epoch": 2.02944, + "grad_norm": 0.6684205871005177, + "learning_rate": 1.2994507107022186e-06, + "loss": 0.34261614084243774, + "memory(GiB)": 77.0, + "step": 6342, + "token_acc": 0.9535036018336608, + "train_speed(iter/s)": 0.478998 + }, + { + "epoch": 2.02976, + "grad_norm": 0.7113561906360865, + "learning_rate": 1.2986772500290134e-06, + "loss": 0.26058655977249146, + "memory(GiB)": 77.0, + "step": 6343, + "token_acc": 0.9122881355932203, + "train_speed(iter/s)": 0.47893 + }, + { + "epoch": 2.03008, + "grad_norm": 0.6861047354486864, + "learning_rate": 1.2979039388395848e-06, + "loss": 0.29333439469337463, + "memory(GiB)": 77.0, + "step": 6344, + "token_acc": 0.9221380471380471, + "train_speed(iter/s)": 0.478861 + }, + { + "epoch": 2.0304, + "grad_norm": 0.7281259934981017, + "learning_rate": 1.2971307772301566e-06, + "loss": 0.34404391050338745, + "memory(GiB)": 77.0, + "step": 6345, + "token_acc": 0.9089595375722543, + "train_speed(iter/s)": 0.478787 + }, + { + "epoch": 2.03072, + "grad_norm": 0.7349091837864274, + "learning_rate": 1.2963577652969378e-06, + "loss": 0.44748276472091675, + "memory(GiB)": 77.0, + "step": 6346, + "token_acc": 0.8481653310839309, + "train_speed(iter/s)": 0.478706 + }, + { + "epoch": 2.03104, + "grad_norm": 0.7398645861875466, + "learning_rate": 1.2955849031361151e-06, + "loss": 0.3676271438598633, + "memory(GiB)": 77.0, + "step": 6347, + "token_acc": 0.8611431870669746, + "train_speed(iter/s)": 0.478629 + }, + { + "epoch": 2.03136, + "grad_norm": 0.6793992168406675, + "learning_rate": 1.2948121908438577e-06, + "loss": 0.25301340222358704, + "memory(GiB)": 77.0, + "step": 6348, + "token_acc": 0.9588583183337619, + "train_speed(iter/s)": 0.47856 + }, + { + "epoch": 2.03168, + "grad_norm": 0.6636298951888899, + "learning_rate": 1.294039628516316e-06, + "loss": 0.2722693085670471, + "memory(GiB)": 77.0, + "step": 6349, + "token_acc": 0.909670920080591, + "train_speed(iter/s)": 0.478493 + }, + { + "epoch": 2.032, + "grad_norm": 0.6489327246167346, + "learning_rate": 1.2932672162496224e-06, + "loss": 0.31790903210639954, + "memory(GiB)": 77.0, + "step": 6350, + "token_acc": 0.8986175115207373, + "train_speed(iter/s)": 0.478414 + }, + { + "epoch": 2.03232, + "grad_norm": 0.6616814898502948, + "learning_rate": 1.29249495413989e-06, + "loss": 0.2465367615222931, + "memory(GiB)": 77.0, + "step": 6351, + "token_acc": 0.959792477302205, + "train_speed(iter/s)": 0.478341 + }, + { + "epoch": 2.03264, + "grad_norm": 0.7288717027606078, + "learning_rate": 1.2917228422832145e-06, + "loss": 0.3178557753562927, + "memory(GiB)": 77.0, + "step": 6352, + "token_acc": 0.9014869888475836, + "train_speed(iter/s)": 0.478277 + }, + { + "epoch": 2.03296, + "grad_norm": 0.6391284605630156, + "learning_rate": 1.29095088077567e-06, + "loss": 0.2815361022949219, + "memory(GiB)": 77.0, + "step": 6353, + "token_acc": 0.9615500150195254, + "train_speed(iter/s)": 0.47821 + }, + { + "epoch": 2.03328, + "grad_norm": 0.6729894147791484, + "learning_rate": 1.2901790697133141e-06, + "loss": 0.31838253140449524, + "memory(GiB)": 77.0, + "step": 6354, + "token_acc": 0.8967912669533576, + "train_speed(iter/s)": 0.47813 + }, + { + "epoch": 2.0336, + "grad_norm": 0.6477244287274572, + "learning_rate": 1.289407409192186e-06, + "loss": 0.257421612739563, + "memory(GiB)": 77.0, + "step": 6355, + "token_acc": 0.8940225238232746, + "train_speed(iter/s)": 0.478057 + }, + { + "epoch": 2.03392, + "grad_norm": 0.6939271076336452, + "learning_rate": 1.2886358993083049e-06, + "loss": 0.36720120906829834, + "memory(GiB)": 77.0, + "step": 6356, + "token_acc": 0.8823170731707317, + "train_speed(iter/s)": 0.477982 + }, + { + "epoch": 2.03424, + "grad_norm": 0.7702829922269913, + "learning_rate": 1.2878645401576723e-06, + "loss": 0.32178831100463867, + "memory(GiB)": 77.0, + "step": 6357, + "token_acc": 0.8855981416957027, + "train_speed(iter/s)": 0.477913 + }, + { + "epoch": 2.03456, + "grad_norm": 0.7107628194075691, + "learning_rate": 1.2870933318362705e-06, + "loss": 0.36561667919158936, + "memory(GiB)": 77.0, + "step": 6358, + "token_acc": 0.9253931080628973, + "train_speed(iter/s)": 0.477834 + }, + { + "epoch": 2.03488, + "grad_norm": 0.7179876888017364, + "learning_rate": 1.2863222744400622e-06, + "loss": 0.3397262394428253, + "memory(GiB)": 77.0, + "step": 6359, + "token_acc": 0.8946360153256705, + "train_speed(iter/s)": 0.477759 + }, + { + "epoch": 2.0352, + "grad_norm": 0.7614321120660853, + "learning_rate": 1.2855513680649933e-06, + "loss": 0.31905341148376465, + "memory(GiB)": 77.0, + "step": 6360, + "token_acc": 0.8617397454031117, + "train_speed(iter/s)": 0.477687 + }, + { + "epoch": 2.03552, + "grad_norm": 0.726422364933869, + "learning_rate": 1.284780612806988e-06, + "loss": 0.311653733253479, + "memory(GiB)": 77.0, + "step": 6361, + "token_acc": 0.8870192307692307, + "train_speed(iter/s)": 0.477615 + }, + { + "epoch": 2.03584, + "grad_norm": 0.6314733811208383, + "learning_rate": 1.284010008761955e-06, + "loss": 0.30044254660606384, + "memory(GiB)": 77.0, + "step": 6362, + "token_acc": 0.9499185667752443, + "train_speed(iter/s)": 0.477545 + }, + { + "epoch": 2.03616, + "grad_norm": 0.8491836827184126, + "learning_rate": 1.2832395560257826e-06, + "loss": 0.3141372799873352, + "memory(GiB)": 77.0, + "step": 6363, + "token_acc": 0.9071221254744363, + "train_speed(iter/s)": 0.477456 + }, + { + "epoch": 2.03648, + "grad_norm": 0.6626938025737386, + "learning_rate": 1.2824692546943399e-06, + "loss": 0.2874166965484619, + "memory(GiB)": 77.0, + "step": 6364, + "token_acc": 0.929932735426009, + "train_speed(iter/s)": 0.477388 + }, + { + "epoch": 2.0368, + "grad_norm": 0.8178581895032965, + "learning_rate": 1.2816991048634771e-06, + "loss": 0.26143983006477356, + "memory(GiB)": 77.0, + "step": 6365, + "token_acc": 0.9396958573675931, + "train_speed(iter/s)": 0.477321 + }, + { + "epoch": 2.03712, + "grad_norm": 0.7690977087784759, + "learning_rate": 1.2809291066290274e-06, + "loss": 0.38396334648132324, + "memory(GiB)": 77.0, + "step": 6366, + "token_acc": 0.9221144823271817, + "train_speed(iter/s)": 0.477257 + }, + { + "epoch": 2.03744, + "grad_norm": 0.7140302199293299, + "learning_rate": 1.2801592600868019e-06, + "loss": 0.26810505986213684, + "memory(GiB)": 77.0, + "step": 6367, + "token_acc": 0.8778752667773299, + "train_speed(iter/s)": 0.477191 + }, + { + "epoch": 2.03776, + "grad_norm": 0.6819469314803048, + "learning_rate": 1.2793895653325955e-06, + "loss": 0.34270960092544556, + "memory(GiB)": 77.0, + "step": 6368, + "token_acc": 0.9278280542986426, + "train_speed(iter/s)": 0.47712 + }, + { + "epoch": 2.03808, + "grad_norm": 0.7556147314813234, + "learning_rate": 1.278620022462183e-06, + "loss": 0.27022767066955566, + "memory(GiB)": 77.0, + "step": 6369, + "token_acc": 0.9309102815979043, + "train_speed(iter/s)": 0.477053 + }, + { + "epoch": 2.0384, + "grad_norm": 0.7055704713398672, + "learning_rate": 1.2778506315713208e-06, + "loss": 0.253045916557312, + "memory(GiB)": 77.0, + "step": 6370, + "token_acc": 0.9394235466536395, + "train_speed(iter/s)": 0.476987 + }, + { + "epoch": 2.03872, + "grad_norm": 0.7162284757196506, + "learning_rate": 1.2770813927557468e-06, + "loss": 0.3063619136810303, + "memory(GiB)": 77.0, + "step": 6371, + "token_acc": 0.907612231620039, + "train_speed(iter/s)": 0.476919 + }, + { + "epoch": 2.03904, + "grad_norm": 0.7117872802401528, + "learning_rate": 1.2763123061111787e-06, + "loss": 0.3254435658454895, + "memory(GiB)": 77.0, + "step": 6372, + "token_acc": 0.9150169000482858, + "train_speed(iter/s)": 0.476847 + }, + { + "epoch": 2.03936, + "grad_norm": 0.6692303667844917, + "learning_rate": 1.2755433717333162e-06, + "loss": 0.34764623641967773, + "memory(GiB)": 77.0, + "step": 6373, + "token_acc": 0.9118807540552389, + "train_speed(iter/s)": 0.47677 + }, + { + "epoch": 2.03968, + "grad_norm": 0.6676382360801816, + "learning_rate": 1.2747745897178402e-06, + "loss": 0.28303736448287964, + "memory(GiB)": 77.0, + "step": 6374, + "token_acc": 0.8958240781874722, + "train_speed(iter/s)": 0.476699 + }, + { + "epoch": 2.04, + "grad_norm": 0.6850123728949203, + "learning_rate": 1.2740059601604122e-06, + "loss": 0.3082212805747986, + "memory(GiB)": 77.0, + "step": 6375, + "token_acc": 0.9291652810109744, + "train_speed(iter/s)": 0.476622 + }, + { + "epoch": 2.04032, + "grad_norm": 0.6339530475306239, + "learning_rate": 1.273237483156674e-06, + "loss": 0.27580130100250244, + "memory(GiB)": 77.0, + "step": 6376, + "token_acc": 0.9303689225506964, + "train_speed(iter/s)": 0.476555 + }, + { + "epoch": 2.04064, + "grad_norm": 0.715282317151192, + "learning_rate": 1.2724691588022503e-06, + "loss": 0.33635520935058594, + "memory(GiB)": 77.0, + "step": 6377, + "token_acc": 0.9097283085013146, + "train_speed(iter/s)": 0.476478 + }, + { + "epoch": 2.04096, + "grad_norm": 0.7291745633814745, + "learning_rate": 1.2717009871927444e-06, + "loss": 0.2883354425430298, + "memory(GiB)": 77.0, + "step": 6378, + "token_acc": 0.9169104739613809, + "train_speed(iter/s)": 0.476412 + }, + { + "epoch": 2.04128, + "grad_norm": 0.724463536483351, + "learning_rate": 1.2709329684237436e-06, + "loss": 0.3835449814796448, + "memory(GiB)": 77.0, + "step": 6379, + "token_acc": 0.896561286744315, + "train_speed(iter/s)": 0.476342 + }, + { + "epoch": 2.0416, + "grad_norm": 0.7475942971966761, + "learning_rate": 1.2701651025908146e-06, + "loss": 0.32463639974594116, + "memory(GiB)": 77.0, + "step": 6380, + "token_acc": 0.8504500346180476, + "train_speed(iter/s)": 0.476276 + }, + { + "epoch": 2.04192, + "grad_norm": 0.7385336649073955, + "learning_rate": 1.2693973897895033e-06, + "loss": 0.315484881401062, + "memory(GiB)": 77.0, + "step": 6381, + "token_acc": 0.9067540322580645, + "train_speed(iter/s)": 0.476201 + }, + { + "epoch": 2.04224, + "grad_norm": 0.7093754214107406, + "learning_rate": 1.2686298301153394e-06, + "loss": 0.32450371980667114, + "memory(GiB)": 77.0, + "step": 6382, + "token_acc": 0.919744942832014, + "train_speed(iter/s)": 0.476131 + }, + { + "epoch": 2.04256, + "grad_norm": 0.7037051077901938, + "learning_rate": 1.2678624236638321e-06, + "loss": 0.3376803696155548, + "memory(GiB)": 77.0, + "step": 6383, + "token_acc": 0.8748110831234257, + "train_speed(iter/s)": 0.47606 + }, + { + "epoch": 2.04288, + "grad_norm": 0.6700933816071503, + "learning_rate": 1.2670951705304713e-06, + "loss": 0.33496779203414917, + "memory(GiB)": 77.0, + "step": 6384, + "token_acc": 0.8956573000589507, + "train_speed(iter/s)": 0.475983 + }, + { + "epoch": 2.0432, + "grad_norm": 0.7479993630730526, + "learning_rate": 1.266328070810729e-06, + "loss": 0.38534069061279297, + "memory(GiB)": 77.0, + "step": 6385, + "token_acc": 0.8514234875444839, + "train_speed(iter/s)": 0.475914 + }, + { + "epoch": 2.04352, + "grad_norm": 0.791028674329407, + "learning_rate": 1.265561124600057e-06, + "loss": 0.39407116174697876, + "memory(GiB)": 77.0, + "step": 6386, + "token_acc": 0.8376047904191617, + "train_speed(iter/s)": 0.475849 + }, + { + "epoch": 2.04384, + "grad_norm": 0.688181022592752, + "learning_rate": 1.2647943319938894e-06, + "loss": 0.3264777958393097, + "memory(GiB)": 77.0, + "step": 6387, + "token_acc": 0.9536527886881383, + "train_speed(iter/s)": 0.475778 + }, + { + "epoch": 2.04416, + "grad_norm": 0.6408994509663568, + "learning_rate": 1.2640276930876392e-06, + "loss": 0.2632952928543091, + "memory(GiB)": 77.0, + "step": 6388, + "token_acc": 0.9191558441558442, + "train_speed(iter/s)": 0.475704 + }, + { + "epoch": 2.04448, + "grad_norm": 0.7663411090180655, + "learning_rate": 1.2632612079767014e-06, + "loss": 0.29350024461746216, + "memory(GiB)": 77.0, + "step": 6389, + "token_acc": 0.8719898605830165, + "train_speed(iter/s)": 0.475638 + }, + { + "epoch": 2.0448, + "grad_norm": 0.7965266766235142, + "learning_rate": 1.2624948767564528e-06, + "loss": 0.3126998543739319, + "memory(GiB)": 77.0, + "step": 6390, + "token_acc": 0.857066808623902, + "train_speed(iter/s)": 0.475573 + }, + { + "epoch": 2.04512, + "grad_norm": 0.6896295060786629, + "learning_rate": 1.2617286995222485e-06, + "loss": 0.24616509675979614, + "memory(GiB)": 77.0, + "step": 6391, + "token_acc": 0.9601865136298422, + "train_speed(iter/s)": 0.475504 + }, + { + "epoch": 2.04544, + "grad_norm": 0.7156353454350711, + "learning_rate": 1.260962676369427e-06, + "loss": 0.29785531759262085, + "memory(GiB)": 77.0, + "step": 6392, + "token_acc": 0.900049850448654, + "train_speed(iter/s)": 0.475429 + }, + { + "epoch": 2.04576, + "grad_norm": 0.7066993825460093, + "learning_rate": 1.2601968073933066e-06, + "loss": 0.3561857044696808, + "memory(GiB)": 77.0, + "step": 6393, + "token_acc": 0.8906851424172441, + "train_speed(iter/s)": 0.475355 + }, + { + "epoch": 2.04608, + "grad_norm": 0.7235950433073847, + "learning_rate": 1.259431092689186e-06, + "loss": 0.34663301706314087, + "memory(GiB)": 77.0, + "step": 6394, + "token_acc": 0.8642378415535316, + "train_speed(iter/s)": 0.475283 + }, + { + "epoch": 2.0464, + "grad_norm": 0.6851270120206758, + "learning_rate": 1.258665532352345e-06, + "loss": 0.35816270112991333, + "memory(GiB)": 77.0, + "step": 6395, + "token_acc": 0.8780387309435517, + "train_speed(iter/s)": 0.475209 + }, + { + "epoch": 2.04672, + "grad_norm": 0.6556132348102841, + "learning_rate": 1.257900126478045e-06, + "loss": 0.3401359021663666, + "memory(GiB)": 77.0, + "step": 6396, + "token_acc": 0.8943650918223199, + "train_speed(iter/s)": 0.475133 + }, + { + "epoch": 2.04704, + "grad_norm": 0.6519476198553387, + "learning_rate": 1.2571348751615266e-06, + "loss": 0.3024353086948395, + "memory(GiB)": 77.0, + "step": 6397, + "token_acc": 0.9545706371191136, + "train_speed(iter/s)": 0.475051 + }, + { + "epoch": 2.04736, + "grad_norm": 0.7291018379562326, + "learning_rate": 1.2563697784980128e-06, + "loss": 0.2979575991630554, + "memory(GiB)": 77.0, + "step": 6398, + "token_acc": 0.9242508324084351, + "train_speed(iter/s)": 0.474987 + }, + { + "epoch": 2.04768, + "grad_norm": 0.6088096836987933, + "learning_rate": 1.2556048365827062e-06, + "loss": 0.2571676969528198, + "memory(GiB)": 77.0, + "step": 6399, + "token_acc": 0.9032075471698113, + "train_speed(iter/s)": 0.474917 + }, + { + "epoch": 2.048, + "grad_norm": 0.7070079660939993, + "learning_rate": 1.2548400495107909e-06, + "loss": 0.24614188075065613, + "memory(GiB)": 77.0, + "step": 6400, + "token_acc": 0.9044662049199904, + "train_speed(iter/s)": 0.474851 + }, + { + "epoch": 2.04832, + "grad_norm": 0.7065080719967192, + "learning_rate": 1.2540754173774307e-06, + "loss": 0.23769435286521912, + "memory(GiB)": 77.0, + "step": 6401, + "token_acc": 0.894140625, + "train_speed(iter/s)": 0.474774 + }, + { + "epoch": 2.04864, + "grad_norm": 0.6985050304228786, + "learning_rate": 1.2533109402777716e-06, + "loss": 0.3377070128917694, + "memory(GiB)": 77.0, + "step": 6402, + "token_acc": 0.966317577129918, + "train_speed(iter/s)": 0.4747 + }, + { + "epoch": 2.04896, + "grad_norm": 0.6294210672833846, + "learning_rate": 1.2525466183069391e-06, + "loss": 0.3001401126384735, + "memory(GiB)": 77.0, + "step": 6403, + "token_acc": 0.9125379170879676, + "train_speed(iter/s)": 0.474624 + }, + { + "epoch": 2.04928, + "grad_norm": 0.7003649107554833, + "learning_rate": 1.2517824515600396e-06, + "loss": 0.30816781520843506, + "memory(GiB)": 77.0, + "step": 6404, + "token_acc": 0.960185799601858, + "train_speed(iter/s)": 0.474551 + }, + { + "epoch": 2.0496, + "grad_norm": 0.7761375442916221, + "learning_rate": 1.2510184401321612e-06, + "loss": 0.35633382201194763, + "memory(GiB)": 77.0, + "step": 6405, + "token_acc": 0.9060942889996167, + "train_speed(iter/s)": 0.474472 + }, + { + "epoch": 2.04992, + "grad_norm": 0.6345033664459868, + "learning_rate": 1.2502545841183709e-06, + "loss": 0.32585862278938293, + "memory(GiB)": 77.0, + "step": 6406, + "token_acc": 0.9374149659863945, + "train_speed(iter/s)": 0.474395 + }, + { + "epoch": 2.05024, + "grad_norm": 0.7219101848235628, + "learning_rate": 1.2494908836137179e-06, + "loss": 0.2753370702266693, + "memory(GiB)": 77.0, + "step": 6407, + "token_acc": 0.9617165791911083, + "train_speed(iter/s)": 0.474309 + }, + { + "epoch": 2.05056, + "grad_norm": 0.7038922007734345, + "learning_rate": 1.2487273387132312e-06, + "loss": 0.2642561197280884, + "memory(GiB)": 77.0, + "step": 6408, + "token_acc": 0.9626959247648903, + "train_speed(iter/s)": 0.474235 + }, + { + "epoch": 2.05088, + "grad_norm": 0.6862415371307478, + "learning_rate": 1.247963949511921e-06, + "loss": 0.3930172324180603, + "memory(GiB)": 77.0, + "step": 6409, + "token_acc": 0.9362259755387303, + "train_speed(iter/s)": 0.474161 + }, + { + "epoch": 2.0512, + "grad_norm": 0.6720511456983045, + "learning_rate": 1.2472007161047777e-06, + "loss": 0.23810595273971558, + "memory(GiB)": 77.0, + "step": 6410, + "token_acc": 0.9514118544899517, + "train_speed(iter/s)": 0.474062 + }, + { + "epoch": 2.05152, + "grad_norm": 0.6677137917522139, + "learning_rate": 1.2464376385867723e-06, + "loss": 0.24806614220142365, + "memory(GiB)": 77.0, + "step": 6411, + "token_acc": 0.9513947590870668, + "train_speed(iter/s)": 0.473997 + }, + { + "epoch": 2.05184, + "grad_norm": 0.822707812740526, + "learning_rate": 1.2456747170528568e-06, + "loss": 0.3656221032142639, + "memory(GiB)": 77.0, + "step": 6412, + "token_acc": 0.8031971580817051, + "train_speed(iter/s)": 0.473931 + }, + { + "epoch": 2.05216, + "grad_norm": 0.7029242848523726, + "learning_rate": 1.2449119515979637e-06, + "loss": 0.30460038781166077, + "memory(GiB)": 77.0, + "step": 6413, + "token_acc": 0.8276440962506995, + "train_speed(iter/s)": 0.473862 + }, + { + "epoch": 2.05248, + "grad_norm": 0.6412569043149641, + "learning_rate": 1.2441493423170056e-06, + "loss": 0.26473355293273926, + "memory(GiB)": 77.0, + "step": 6414, + "token_acc": 0.9184388291218414, + "train_speed(iter/s)": 0.473797 + }, + { + "epoch": 2.0528, + "grad_norm": 0.6532260963032243, + "learning_rate": 1.243386889304876e-06, + "loss": 0.2984153926372528, + "memory(GiB)": 77.0, + "step": 6415, + "token_acc": 0.8930282064928153, + "train_speed(iter/s)": 0.473725 + }, + { + "epoch": 2.05312, + "grad_norm": 0.6931732350012403, + "learning_rate": 1.2426245926564489e-06, + "loss": 0.33972617983818054, + "memory(GiB)": 77.0, + "step": 6416, + "token_acc": 0.9169559412550067, + "train_speed(iter/s)": 0.473648 + }, + { + "epoch": 2.05344, + "grad_norm": 0.6486494307338581, + "learning_rate": 1.2418624524665793e-06, + "loss": 0.26833003759384155, + "memory(GiB)": 77.0, + "step": 6417, + "token_acc": 0.9336789803413265, + "train_speed(iter/s)": 0.473579 + }, + { + "epoch": 2.05376, + "grad_norm": 0.6908528537460398, + "learning_rate": 1.241100468830102e-06, + "loss": 0.3870032727718353, + "memory(GiB)": 77.0, + "step": 6418, + "token_acc": 0.9269458352298588, + "train_speed(iter/s)": 0.473511 + }, + { + "epoch": 2.05408, + "grad_norm": 0.6288784998185222, + "learning_rate": 1.240338641841833e-06, + "loss": 0.24596458673477173, + "memory(GiB)": 77.0, + "step": 6419, + "token_acc": 0.9394949976179133, + "train_speed(iter/s)": 0.473444 + }, + { + "epoch": 2.0544, + "grad_norm": 0.7277272898550344, + "learning_rate": 1.2395769715965678e-06, + "loss": 0.2854454219341278, + "memory(GiB)": 77.0, + "step": 6420, + "token_acc": 0.917215102182196, + "train_speed(iter/s)": 0.47338 + }, + { + "epoch": 2.05472, + "grad_norm": 0.7632912942404323, + "learning_rate": 1.2388154581890832e-06, + "loss": 0.2853459119796753, + "memory(GiB)": 77.0, + "step": 6421, + "token_acc": 0.919179229480737, + "train_speed(iter/s)": 0.473316 + }, + { + "epoch": 2.05504, + "grad_norm": 0.7464426673114839, + "learning_rate": 1.238054101714137e-06, + "loss": 0.3317272663116455, + "memory(GiB)": 77.0, + "step": 6422, + "token_acc": 0.8647002854424358, + "train_speed(iter/s)": 0.473249 + }, + { + "epoch": 2.05536, + "grad_norm": 0.7759076674677085, + "learning_rate": 1.2372929022664663e-06, + "loss": 0.26688534021377563, + "memory(GiB)": 77.0, + "step": 6423, + "token_acc": 0.9339143064633261, + "train_speed(iter/s)": 0.473186 + }, + { + "epoch": 2.05568, + "grad_norm": 0.7161583510516335, + "learning_rate": 1.236531859940789e-06, + "loss": 0.3327023983001709, + "memory(GiB)": 77.0, + "step": 6424, + "token_acc": 0.9335122242098987, + "train_speed(iter/s)": 0.473117 + }, + { + "epoch": 2.056, + "grad_norm": 0.716902653550518, + "learning_rate": 1.235770974831804e-06, + "loss": 0.3103593587875366, + "memory(GiB)": 77.0, + "step": 6425, + "token_acc": 0.8963075366717248, + "train_speed(iter/s)": 0.473054 + }, + { + "epoch": 2.05632, + "grad_norm": 0.6717950888410654, + "learning_rate": 1.2350102470341899e-06, + "loss": 0.23876360058784485, + "memory(GiB)": 77.0, + "step": 6426, + "token_acc": 0.8678775814879324, + "train_speed(iter/s)": 0.472989 + }, + { + "epoch": 2.05664, + "grad_norm": 0.7279255089352437, + "learning_rate": 1.2342496766426062e-06, + "loss": 0.35895830392837524, + "memory(GiB)": 77.0, + "step": 6427, + "token_acc": 0.9478802022559315, + "train_speed(iter/s)": 0.472922 + }, + { + "epoch": 2.05696, + "grad_norm": 0.6758133782194415, + "learning_rate": 1.2334892637516929e-06, + "loss": 0.33617186546325684, + "memory(GiB)": 77.0, + "step": 6428, + "token_acc": 0.9098428453267163, + "train_speed(iter/s)": 0.472839 + }, + { + "epoch": 2.05728, + "grad_norm": 0.8305263462264799, + "learning_rate": 1.2327290084560703e-06, + "loss": 0.35162660479545593, + "memory(GiB)": 77.0, + "step": 6429, + "token_acc": 0.9212317666126418, + "train_speed(iter/s)": 0.472776 + }, + { + "epoch": 2.0576, + "grad_norm": 1.379620596931194, + "learning_rate": 1.2319689108503386e-06, + "loss": 0.21797850728034973, + "memory(GiB)": 77.0, + "step": 6430, + "token_acc": 0.9682119205298013, + "train_speed(iter/s)": 0.472714 + }, + { + "epoch": 2.05792, + "grad_norm": 0.6536616113553144, + "learning_rate": 1.2312089710290786e-06, + "loss": 0.2798069417476654, + "memory(GiB)": 77.0, + "step": 6431, + "token_acc": 0.9144132318193752, + "train_speed(iter/s)": 0.472643 + }, + { + "epoch": 2.05824, + "grad_norm": 0.7683362529470031, + "learning_rate": 1.2304491890868521e-06, + "loss": 0.2869718670845032, + "memory(GiB)": 77.0, + "step": 6432, + "token_acc": 0.9555099247091033, + "train_speed(iter/s)": 0.472578 + }, + { + "epoch": 2.05856, + "grad_norm": 0.6690846737859663, + "learning_rate": 1.2296895651182005e-06, + "loss": 0.242498517036438, + "memory(GiB)": 77.0, + "step": 6433, + "token_acc": 0.9129052765416402, + "train_speed(iter/s)": 0.472517 + }, + { + "epoch": 2.05888, + "grad_norm": 0.7210161912987517, + "learning_rate": 1.2289300992176462e-06, + "loss": 0.24628865718841553, + "memory(GiB)": 77.0, + "step": 6434, + "token_acc": 0.9324324324324325, + "train_speed(iter/s)": 0.472455 + }, + { + "epoch": 2.0592, + "grad_norm": 0.6875697836737233, + "learning_rate": 1.2281707914796913e-06, + "loss": 0.3560788035392761, + "memory(GiB)": 77.0, + "step": 6435, + "token_acc": 0.8245578778135049, + "train_speed(iter/s)": 0.472388 + }, + { + "epoch": 2.05952, + "grad_norm": 0.6965140099799734, + "learning_rate": 1.2274116419988184e-06, + "loss": 0.27846381068229675, + "memory(GiB)": 77.0, + "step": 6436, + "token_acc": 0.9539425202652911, + "train_speed(iter/s)": 0.472317 + }, + { + "epoch": 2.05984, + "grad_norm": 0.7608861343802598, + "learning_rate": 1.2266526508694904e-06, + "loss": 0.3094162940979004, + "memory(GiB)": 77.0, + "step": 6437, + "token_acc": 0.9045438534695315, + "train_speed(iter/s)": 0.472255 + }, + { + "epoch": 2.06016, + "grad_norm": 0.7232103893043453, + "learning_rate": 1.2258938181861516e-06, + "loss": 0.35680630803108215, + "memory(GiB)": 77.0, + "step": 6438, + "token_acc": 0.9004248539564524, + "train_speed(iter/s)": 0.472193 + }, + { + "epoch": 2.06048, + "grad_norm": 0.7305905572482368, + "learning_rate": 1.2251351440432238e-06, + "loss": 0.3375553488731384, + "memory(GiB)": 77.0, + "step": 6439, + "token_acc": 0.8418410041841005, + "train_speed(iter/s)": 0.472128 + }, + { + "epoch": 2.0608, + "grad_norm": 0.7697019277374921, + "learning_rate": 1.224376628535112e-06, + "loss": 0.394290953874588, + "memory(GiB)": 77.0, + "step": 6440, + "token_acc": 0.8329417293233082, + "train_speed(iter/s)": 0.472055 + }, + { + "epoch": 2.06112, + "grad_norm": 0.7693877325214581, + "learning_rate": 1.2236182717561987e-06, + "loss": 0.34729376435279846, + "memory(GiB)": 77.0, + "step": 6441, + "token_acc": 0.8883297387970285, + "train_speed(iter/s)": 0.471991 + }, + { + "epoch": 2.06144, + "grad_norm": 0.6987511180912209, + "learning_rate": 1.2228600738008507e-06, + "loss": 0.2982039451599121, + "memory(GiB)": 77.0, + "step": 6442, + "token_acc": 0.9555027173913043, + "train_speed(iter/s)": 0.471929 + }, + { + "epoch": 2.06176, + "grad_norm": 0.71650682691757, + "learning_rate": 1.2221020347634114e-06, + "loss": 0.30986708402633667, + "memory(GiB)": 77.0, + "step": 6443, + "token_acc": 0.9255264509501797, + "train_speed(iter/s)": 0.47186 + }, + { + "epoch": 2.06208, + "grad_norm": 0.7159241832473652, + "learning_rate": 1.2213441547382057e-06, + "loss": 0.30876439809799194, + "memory(GiB)": 77.0, + "step": 6444, + "token_acc": 0.9359673024523161, + "train_speed(iter/s)": 0.471787 + }, + { + "epoch": 2.0624, + "grad_norm": 0.720431944403943, + "learning_rate": 1.2205864338195387e-06, + "loss": 0.3203968405723572, + "memory(GiB)": 77.0, + "step": 6445, + "token_acc": 0.9088643645425418, + "train_speed(iter/s)": 0.471725 + }, + { + "epoch": 2.06272, + "grad_norm": 0.7031241861363186, + "learning_rate": 1.219828872101695e-06, + "loss": 0.3167697787284851, + "memory(GiB)": 77.0, + "step": 6446, + "token_acc": 0.9419784400760939, + "train_speed(iter/s)": 0.471655 + }, + { + "epoch": 2.06304, + "grad_norm": 0.7325561524752764, + "learning_rate": 1.219071469678941e-06, + "loss": 0.28954529762268066, + "memory(GiB)": 77.0, + "step": 6447, + "token_acc": 0.9102233676975945, + "train_speed(iter/s)": 0.471593 + }, + { + "epoch": 2.06336, + "grad_norm": 0.6673485511560794, + "learning_rate": 1.2183142266455217e-06, + "loss": 0.2564908266067505, + "memory(GiB)": 77.0, + "step": 6448, + "token_acc": 0.9518633540372671, + "train_speed(iter/s)": 0.471527 + }, + { + "epoch": 2.06368, + "grad_norm": 0.7049158543475529, + "learning_rate": 1.2175571430956631e-06, + "loss": 0.29572397470474243, + "memory(GiB)": 77.0, + "step": 6449, + "token_acc": 0.8723342097575226, + "train_speed(iter/s)": 0.471461 + }, + { + "epoch": 2.064, + "grad_norm": 0.668098418971888, + "learning_rate": 1.2168002191235709e-06, + "loss": 0.2805784046649933, + "memory(GiB)": 77.0, + "step": 6450, + "token_acc": 0.8872970391595033, + "train_speed(iter/s)": 0.471396 + }, + { + "epoch": 2.06432, + "grad_norm": 0.7726733292518985, + "learning_rate": 1.216043454823431e-06, + "loss": 0.28955531120300293, + "memory(GiB)": 77.0, + "step": 6451, + "token_acc": 0.8251533742331288, + "train_speed(iter/s)": 0.471327 + }, + { + "epoch": 2.06464, + "grad_norm": 0.7405786902022111, + "learning_rate": 1.215286850289411e-06, + "loss": 0.2656901180744171, + "memory(GiB)": 77.0, + "step": 6452, + "token_acc": 0.9246305418719212, + "train_speed(iter/s)": 0.471265 + }, + { + "epoch": 2.06496, + "grad_norm": 0.6905793746647426, + "learning_rate": 1.2145304056156551e-06, + "loss": 0.3192884027957916, + "memory(GiB)": 77.0, + "step": 6453, + "token_acc": 0.9046467902229385, + "train_speed(iter/s)": 0.471192 + }, + { + "epoch": 2.06528, + "grad_norm": 0.7764800735144068, + "learning_rate": 1.2137741208962905e-06, + "loss": 0.30235183238983154, + "memory(GiB)": 77.0, + "step": 6454, + "token_acc": 0.9334898278560251, + "train_speed(iter/s)": 0.471128 + }, + { + "epoch": 2.0656, + "grad_norm": 0.7067348209223553, + "learning_rate": 1.213017996225424e-06, + "loss": 0.31283390522003174, + "memory(GiB)": 77.0, + "step": 6455, + "token_acc": 0.9546807806814422, + "train_speed(iter/s)": 0.471057 + }, + { + "epoch": 2.06592, + "grad_norm": 0.7277867047347144, + "learning_rate": 1.212262031697142e-06, + "loss": 0.3490220606327057, + "memory(GiB)": 77.0, + "step": 6456, + "token_acc": 0.8509364742228229, + "train_speed(iter/s)": 0.470987 + }, + { + "epoch": 2.06624, + "grad_norm": 0.6626408446895574, + "learning_rate": 1.2115062274055115e-06, + "loss": 0.25988462567329407, + "memory(GiB)": 77.0, + "step": 6457, + "token_acc": 0.9206019719771665, + "train_speed(iter/s)": 0.470919 + }, + { + "epoch": 2.06656, + "grad_norm": 0.8106275782325921, + "learning_rate": 1.2107505834445776e-06, + "loss": 0.34161582589149475, + "memory(GiB)": 77.0, + "step": 6458, + "token_acc": 0.9190421892816419, + "train_speed(iter/s)": 0.470854 + }, + { + "epoch": 2.06688, + "grad_norm": 0.7221899140068501, + "learning_rate": 1.2099950999083697e-06, + "loss": 0.323128879070282, + "memory(GiB)": 77.0, + "step": 6459, + "token_acc": 0.9273801250868658, + "train_speed(iter/s)": 0.470783 + }, + { + "epoch": 2.0672, + "grad_norm": 0.7234894452330041, + "learning_rate": 1.2092397768908935e-06, + "loss": 0.38392373919487, + "memory(GiB)": 77.0, + "step": 6460, + "token_acc": 0.9456282145481264, + "train_speed(iter/s)": 0.470716 + }, + { + "epoch": 2.06752, + "grad_norm": 0.647772569525249, + "learning_rate": 1.2084846144861353e-06, + "loss": 0.3037400245666504, + "memory(GiB)": 77.0, + "step": 6461, + "token_acc": 0.9417798724701968, + "train_speed(iter/s)": 0.47065 + }, + { + "epoch": 2.06784, + "grad_norm": 0.7421508393018614, + "learning_rate": 1.2077296127880625e-06, + "loss": 0.36238422989845276, + "memory(GiB)": 77.0, + "step": 6462, + "token_acc": 0.8760305343511451, + "train_speed(iter/s)": 0.470582 + }, + { + "epoch": 2.0681599999999998, + "grad_norm": 0.7990673232364526, + "learning_rate": 1.2069747718906222e-06, + "loss": 0.32919570803642273, + "memory(GiB)": 77.0, + "step": 6463, + "token_acc": 0.8751076658053403, + "train_speed(iter/s)": 0.470507 + }, + { + "epoch": 2.06848, + "grad_norm": 0.6943379287255258, + "learning_rate": 1.2062200918877404e-06, + "loss": 0.29082000255584717, + "memory(GiB)": 77.0, + "step": 6464, + "token_acc": 0.9368758915834522, + "train_speed(iter/s)": 0.470442 + }, + { + "epoch": 2.0688, + "grad_norm": 0.7001413596079493, + "learning_rate": 1.2054655728733246e-06, + "loss": 0.3410773277282715, + "memory(GiB)": 77.0, + "step": 6465, + "token_acc": 0.9326424870466321, + "train_speed(iter/s)": 0.470363 + }, + { + "epoch": 2.06912, + "grad_norm": 0.7429148075396003, + "learning_rate": 1.2047112149412619e-06, + "loss": 0.30144333839416504, + "memory(GiB)": 77.0, + "step": 6466, + "token_acc": 0.8900030385900942, + "train_speed(iter/s)": 0.470301 + }, + { + "epoch": 2.06944, + "grad_norm": 0.718569731897352, + "learning_rate": 1.203957018185419e-06, + "loss": 0.32253962755203247, + "memory(GiB)": 77.0, + "step": 6467, + "token_acc": 0.8874868559411146, + "train_speed(iter/s)": 0.470241 + }, + { + "epoch": 2.06976, + "grad_norm": 0.6358509120716644, + "learning_rate": 1.2032029826996414e-06, + "loss": 0.2594336271286011, + "memory(GiB)": 77.0, + "step": 6468, + "token_acc": 0.9129662522202486, + "train_speed(iter/s)": 0.47016 + }, + { + "epoch": 2.07008, + "grad_norm": 0.8400022085652068, + "learning_rate": 1.2024491085777565e-06, + "loss": 0.31628847122192383, + "memory(GiB)": 77.0, + "step": 6469, + "token_acc": 0.922003659652333, + "train_speed(iter/s)": 0.470097 + }, + { + "epoch": 2.0704, + "grad_norm": 0.7494369410478148, + "learning_rate": 1.201695395913571e-06, + "loss": 0.2875061631202698, + "memory(GiB)": 77.0, + "step": 6470, + "token_acc": 0.8402126328955597, + "train_speed(iter/s)": 0.470035 + }, + { + "epoch": 2.07072, + "grad_norm": 0.6905459876997785, + "learning_rate": 1.2009418448008713e-06, + "loss": 0.2748047709465027, + "memory(GiB)": 77.0, + "step": 6471, + "token_acc": 0.909799751905015, + "train_speed(iter/s)": 0.469972 + }, + { + "epoch": 2.07104, + "grad_norm": 0.6859633497627455, + "learning_rate": 1.2001884553334236e-06, + "loss": 0.3084254264831543, + "memory(GiB)": 77.0, + "step": 6472, + "token_acc": 0.8979261179520415, + "train_speed(iter/s)": 0.469891 + }, + { + "epoch": 2.07136, + "grad_norm": 0.7036901451274501, + "learning_rate": 1.1994352276049741e-06, + "loss": 0.32633334398269653, + "memory(GiB)": 77.0, + "step": 6473, + "token_acc": 0.9568062827225131, + "train_speed(iter/s)": 0.469821 + }, + { + "epoch": 2.07168, + "grad_norm": 0.6993372863406918, + "learning_rate": 1.1986821617092495e-06, + "loss": 0.3542531728744507, + "memory(GiB)": 77.0, + "step": 6474, + "token_acc": 0.8807692307692307, + "train_speed(iter/s)": 0.469748 + }, + { + "epoch": 2.072, + "grad_norm": 0.7043135061614704, + "learning_rate": 1.1979292577399544e-06, + "loss": 0.31862789392471313, + "memory(GiB)": 77.0, + "step": 6475, + "token_acc": 0.8671875, + "train_speed(iter/s)": 0.469685 + }, + { + "epoch": 2.07232, + "grad_norm": 0.6724980014504722, + "learning_rate": 1.1971765157907764e-06, + "loss": 0.2823199927806854, + "memory(GiB)": 77.0, + "step": 6476, + "token_acc": 0.8864280195724955, + "train_speed(iter/s)": 0.469617 + }, + { + "epoch": 2.07264, + "grad_norm": 0.6792535955811495, + "learning_rate": 1.1964239359553803e-06, + "loss": 0.23710300028324127, + "memory(GiB)": 77.0, + "step": 6477, + "token_acc": 0.9403063919704173, + "train_speed(iter/s)": 0.46955 + }, + { + "epoch": 2.07296, + "grad_norm": 0.6950439102587119, + "learning_rate": 1.1956715183274118e-06, + "loss": 0.34585121273994446, + "memory(GiB)": 77.0, + "step": 6478, + "token_acc": 0.9132915002852253, + "train_speed(iter/s)": 0.469486 + }, + { + "epoch": 2.07328, + "grad_norm": 0.7178569495338571, + "learning_rate": 1.1949192630004962e-06, + "loss": 0.3402740955352783, + "memory(GiB)": 77.0, + "step": 6479, + "token_acc": 0.9130990415335464, + "train_speed(iter/s)": 0.469411 + }, + { + "epoch": 2.0736, + "grad_norm": 0.7436029167624317, + "learning_rate": 1.1941671700682385e-06, + "loss": 0.41820797324180603, + "memory(GiB)": 77.0, + "step": 6480, + "token_acc": 0.8389494549058474, + "train_speed(iter/s)": 0.469349 + }, + { + "epoch": 2.07392, + "grad_norm": 0.6694541594606146, + "learning_rate": 1.1934152396242245e-06, + "loss": 0.2871382236480713, + "memory(GiB)": 77.0, + "step": 6481, + "token_acc": 0.9020039100684262, + "train_speed(iter/s)": 0.469284 + }, + { + "epoch": 2.07424, + "grad_norm": 0.7263542591643817, + "learning_rate": 1.1926634717620175e-06, + "loss": 0.23772652447223663, + "memory(GiB)": 77.0, + "step": 6482, + "token_acc": 0.950402576489533, + "train_speed(iter/s)": 0.469224 + }, + { + "epoch": 2.07456, + "grad_norm": 0.7278587285046302, + "learning_rate": 1.1919118665751623e-06, + "loss": 0.31506842374801636, + "memory(GiB)": 77.0, + "step": 6483, + "token_acc": 0.9272882805816938, + "train_speed(iter/s)": 0.469158 + }, + { + "epoch": 2.07488, + "grad_norm": 0.6545676453444585, + "learning_rate": 1.1911604241571834e-06, + "loss": 0.240620419383049, + "memory(GiB)": 77.0, + "step": 6484, + "token_acc": 0.9138785625774474, + "train_speed(iter/s)": 0.469094 + }, + { + "epoch": 2.0752, + "grad_norm": 0.7335710473699293, + "learning_rate": 1.1904091446015844e-06, + "loss": 0.2591930031776428, + "memory(GiB)": 77.0, + "step": 6485, + "token_acc": 0.930419921875, + "train_speed(iter/s)": 0.469028 + }, + { + "epoch": 2.07552, + "grad_norm": 0.704311433722728, + "learning_rate": 1.1896580280018497e-06, + "loss": 0.3245595693588257, + "memory(GiB)": 77.0, + "step": 6486, + "token_acc": 0.8734956619087602, + "train_speed(iter/s)": 0.468958 + }, + { + "epoch": 2.07584, + "grad_norm": 0.7352994007153757, + "learning_rate": 1.1889070744514425e-06, + "loss": 0.32086649537086487, + "memory(GiB)": 77.0, + "step": 6487, + "token_acc": 0.9164611590628853, + "train_speed(iter/s)": 0.468886 + }, + { + "epoch": 2.07616, + "grad_norm": 0.7357214465868117, + "learning_rate": 1.1881562840438055e-06, + "loss": 0.34716975688934326, + "memory(GiB)": 77.0, + "step": 6488, + "token_acc": 0.9461747807680677, + "train_speed(iter/s)": 0.468823 + }, + { + "epoch": 2.07648, + "grad_norm": 0.7086802126738599, + "learning_rate": 1.1874056568723622e-06, + "loss": 0.32358235120773315, + "memory(GiB)": 77.0, + "step": 6489, + "token_acc": 0.8702330508474576, + "train_speed(iter/s)": 0.46876 + }, + { + "epoch": 2.0768, + "grad_norm": 0.7228133667461032, + "learning_rate": 1.1866551930305146e-06, + "loss": 0.2852606475353241, + "memory(GiB)": 77.0, + "step": 6490, + "token_acc": 0.8747528015820699, + "train_speed(iter/s)": 0.468695 + }, + { + "epoch": 2.07712, + "grad_norm": 0.7168112308314956, + "learning_rate": 1.1859048926116451e-06, + "loss": 0.29858994483947754, + "memory(GiB)": 77.0, + "step": 6491, + "token_acc": 0.9135802469135802, + "train_speed(iter/s)": 0.468634 + }, + { + "epoch": 2.07744, + "grad_norm": 0.7105598107156338, + "learning_rate": 1.1851547557091146e-06, + "loss": 0.29086506366729736, + "memory(GiB)": 77.0, + "step": 6492, + "token_acc": 0.9362005126744517, + "train_speed(iter/s)": 0.468569 + }, + { + "epoch": 2.07776, + "grad_norm": 0.7072128135724818, + "learning_rate": 1.1844047824162663e-06, + "loss": 0.2802824378013611, + "memory(GiB)": 77.0, + "step": 6493, + "token_acc": 0.9437472955430549, + "train_speed(iter/s)": 0.468496 + }, + { + "epoch": 2.07808, + "grad_norm": 0.6725104313043413, + "learning_rate": 1.183654972826421e-06, + "loss": 0.3106282949447632, + "memory(GiB)": 77.0, + "step": 6494, + "token_acc": 0.875, + "train_speed(iter/s)": 0.468426 + }, + { + "epoch": 2.0784, + "grad_norm": 0.7038921993531897, + "learning_rate": 1.1829053270328802e-06, + "loss": 0.37401247024536133, + "memory(GiB)": 77.0, + "step": 6495, + "token_acc": 0.7987863302459278, + "train_speed(iter/s)": 0.468365 + }, + { + "epoch": 2.07872, + "grad_norm": 0.7882160966005297, + "learning_rate": 1.1821558451289221e-06, + "loss": 0.3012027144432068, + "memory(GiB)": 77.0, + "step": 6496, + "token_acc": 0.9182992913714048, + "train_speed(iter/s)": 0.468301 + }, + { + "epoch": 2.07904, + "grad_norm": 0.6814501086191862, + "learning_rate": 1.181406527207808e-06, + "loss": 0.30127793550491333, + "memory(GiB)": 77.0, + "step": 6497, + "token_acc": 0.9187206572769953, + "train_speed(iter/s)": 0.468232 + }, + { + "epoch": 2.07936, + "grad_norm": 0.787227756805786, + "learning_rate": 1.1806573733627775e-06, + "loss": 0.2747187316417694, + "memory(GiB)": 77.0, + "step": 6498, + "token_acc": 0.9408536585365853, + "train_speed(iter/s)": 0.468168 + }, + { + "epoch": 2.07968, + "grad_norm": 0.6891731537937094, + "learning_rate": 1.1799083836870495e-06, + "loss": 0.2706025242805481, + "memory(GiB)": 77.0, + "step": 6499, + "token_acc": 0.9235746057420138, + "train_speed(iter/s)": 0.468098 + }, + { + "epoch": 2.08, + "grad_norm": 0.6498909869668387, + "learning_rate": 1.179159558273823e-06, + "loss": 0.2746020555496216, + "memory(GiB)": 77.0, + "step": 6500, + "token_acc": 0.913923433308949, + "train_speed(iter/s)": 0.468018 + }, + { + "epoch": 2.08032, + "grad_norm": 0.6802040420751668, + "learning_rate": 1.1784108972162764e-06, + "loss": 0.33788740634918213, + "memory(GiB)": 77.0, + "step": 6501, + "token_acc": 0.9271440466278101, + "train_speed(iter/s)": 0.467949 + }, + { + "epoch": 2.08064, + "grad_norm": 0.7187650780203559, + "learning_rate": 1.1776624006075674e-06, + "loss": 0.3340682089328766, + "memory(GiB)": 77.0, + "step": 6502, + "token_acc": 0.9389615537059056, + "train_speed(iter/s)": 0.467889 + }, + { + "epoch": 2.08096, + "grad_norm": 0.6612517446542452, + "learning_rate": 1.1769140685408336e-06, + "loss": 0.30543428659439087, + "memory(GiB)": 77.0, + "step": 6503, + "token_acc": 0.9028647315499905, + "train_speed(iter/s)": 0.467822 + }, + { + "epoch": 2.08128, + "grad_norm": 0.8593264854768976, + "learning_rate": 1.176165901109192e-06, + "loss": 0.33701109886169434, + "memory(GiB)": 77.0, + "step": 6504, + "token_acc": 0.9122211445198836, + "train_speed(iter/s)": 0.467759 + }, + { + "epoch": 2.0816, + "grad_norm": 0.6854237387062415, + "learning_rate": 1.1754178984057389e-06, + "loss": 0.3363315165042877, + "memory(GiB)": 77.0, + "step": 6505, + "token_acc": 0.8941085868309588, + "train_speed(iter/s)": 0.467695 + }, + { + "epoch": 2.08192, + "grad_norm": 0.80196042361267, + "learning_rate": 1.17467006052355e-06, + "loss": 0.3013722002506256, + "memory(GiB)": 77.0, + "step": 6506, + "token_acc": 0.9571209800918836, + "train_speed(iter/s)": 0.467632 + }, + { + "epoch": 2.08224, + "grad_norm": 0.7366989719485593, + "learning_rate": 1.173922387555681e-06, + "loss": 0.3132396340370178, + "memory(GiB)": 77.0, + "step": 6507, + "token_acc": 0.8970775095298602, + "train_speed(iter/s)": 0.467569 + }, + { + "epoch": 2.08256, + "grad_norm": 0.8169010059306674, + "learning_rate": 1.173174879595166e-06, + "loss": 0.35111910104751587, + "memory(GiB)": 77.0, + "step": 6508, + "token_acc": 0.8746376811594203, + "train_speed(iter/s)": 0.467505 + }, + { + "epoch": 2.08288, + "grad_norm": 0.7136935030954695, + "learning_rate": 1.1724275367350217e-06, + "loss": 0.3475993871688843, + "memory(GiB)": 77.0, + "step": 6509, + "token_acc": 0.894559730790802, + "train_speed(iter/s)": 0.467444 + }, + { + "epoch": 2.0832, + "grad_norm": 0.7137818016544945, + "learning_rate": 1.1716803590682407e-06, + "loss": 0.29440420866012573, + "memory(GiB)": 77.0, + "step": 6510, + "token_acc": 0.94975404075896, + "train_speed(iter/s)": 0.467385 + }, + { + "epoch": 2.08352, + "grad_norm": 0.7897901337640698, + "learning_rate": 1.1709333466877955e-06, + "loss": 0.32654306292533875, + "memory(GiB)": 77.0, + "step": 6511, + "token_acc": 0.9039641340254837, + "train_speed(iter/s)": 0.467326 + }, + { + "epoch": 2.08384, + "grad_norm": 0.6524453321560509, + "learning_rate": 1.1701864996866397e-06, + "loss": 0.2550208568572998, + "memory(GiB)": 77.0, + "step": 6512, + "token_acc": 0.8338469440164356, + "train_speed(iter/s)": 0.467257 + }, + { + "epoch": 2.08416, + "grad_norm": 0.7822245729444602, + "learning_rate": 1.1694398181577048e-06, + "loss": 0.35225731134414673, + "memory(GiB)": 77.0, + "step": 6513, + "token_acc": 0.9224704336399474, + "train_speed(iter/s)": 0.467192 + }, + { + "epoch": 2.08448, + "grad_norm": 0.7141569287108378, + "learning_rate": 1.1686933021939032e-06, + "loss": 0.26153770089149475, + "memory(GiB)": 77.0, + "step": 6514, + "token_acc": 0.9219330855018587, + "train_speed(iter/s)": 0.467133 + }, + { + "epoch": 2.0848, + "grad_norm": 0.7210988121424857, + "learning_rate": 1.167946951888125e-06, + "loss": 0.2741261124610901, + "memory(GiB)": 77.0, + "step": 6515, + "token_acc": 0.9028094153378892, + "train_speed(iter/s)": 0.467073 + }, + { + "epoch": 2.08512, + "grad_norm": 0.7809357047834847, + "learning_rate": 1.1672007673332411e-06, + "loss": 0.2926490902900696, + "memory(GiB)": 77.0, + "step": 6516, + "token_acc": 0.9243466299862448, + "train_speed(iter/s)": 0.467012 + }, + { + "epoch": 2.08544, + "grad_norm": 0.6961985867306654, + "learning_rate": 1.1664547486221012e-06, + "loss": 0.32792550325393677, + "memory(GiB)": 77.0, + "step": 6517, + "token_acc": 0.8765466816647919, + "train_speed(iter/s)": 0.466952 + }, + { + "epoch": 2.08576, + "grad_norm": 0.636866048003604, + "learning_rate": 1.165708895847534e-06, + "loss": 0.3207111656665802, + "memory(GiB)": 77.0, + "step": 6518, + "token_acc": 0.9016489988221437, + "train_speed(iter/s)": 0.466881 + }, + { + "epoch": 2.08608, + "grad_norm": 0.7506562988552283, + "learning_rate": 1.1649632091023486e-06, + "loss": 0.2866784632205963, + "memory(GiB)": 77.0, + "step": 6519, + "token_acc": 0.873973468098547, + "train_speed(iter/s)": 0.466821 + }, + { + "epoch": 2.0864, + "grad_norm": 0.7926251290613253, + "learning_rate": 1.1642176884793321e-06, + "loss": 0.2916308045387268, + "memory(GiB)": 77.0, + "step": 6520, + "token_acc": 0.9109610070617132, + "train_speed(iter/s)": 0.466749 + }, + { + "epoch": 2.08672, + "grad_norm": 0.7652245268511009, + "learning_rate": 1.163472334071252e-06, + "loss": 0.32505035400390625, + "memory(GiB)": 77.0, + "step": 6521, + "token_acc": 0.8947368421052632, + "train_speed(iter/s)": 0.466688 + }, + { + "epoch": 2.08704, + "grad_norm": 0.6908761509721987, + "learning_rate": 1.1627271459708548e-06, + "loss": 0.26965272426605225, + "memory(GiB)": 77.0, + "step": 6522, + "token_acc": 0.9400802075961312, + "train_speed(iter/s)": 0.466617 + }, + { + "epoch": 2.08736, + "grad_norm": 0.6437890188245378, + "learning_rate": 1.161982124270866e-06, + "loss": 0.33045944571495056, + "memory(GiB)": 77.0, + "step": 6523, + "token_acc": 0.8763080786940143, + "train_speed(iter/s)": 0.466547 + }, + { + "epoch": 2.08768, + "grad_norm": 0.731479015082844, + "learning_rate": 1.1612372690639906e-06, + "loss": 0.3419252932071686, + "memory(GiB)": 77.0, + "step": 6524, + "token_acc": 0.8895027624309392, + "train_speed(iter/s)": 0.46648 + }, + { + "epoch": 2.088, + "grad_norm": 0.7440486057223699, + "learning_rate": 1.1604925804429128e-06, + "loss": 0.38466066122055054, + "memory(GiB)": 77.0, + "step": 6525, + "token_acc": 0.8491683754841649, + "train_speed(iter/s)": 0.466409 + }, + { + "epoch": 2.08832, + "grad_norm": 0.6641055115459733, + "learning_rate": 1.1597480585002968e-06, + "loss": 0.2585703134536743, + "memory(GiB)": 77.0, + "step": 6526, + "token_acc": 0.9452997052079921, + "train_speed(iter/s)": 0.466348 + }, + { + "epoch": 2.08864, + "grad_norm": 0.7090575648407076, + "learning_rate": 1.159003703328785e-06, + "loss": 0.3372510075569153, + "memory(GiB)": 77.0, + "step": 6527, + "token_acc": 0.8753327417923691, + "train_speed(iter/s)": 0.466288 + }, + { + "epoch": 2.08896, + "grad_norm": 0.7119259868815948, + "learning_rate": 1.158259515020999e-06, + "loss": 0.28889524936676025, + "memory(GiB)": 77.0, + "step": 6528, + "token_acc": 0.8755910165484634, + "train_speed(iter/s)": 0.466228 + }, + { + "epoch": 2.08928, + "grad_norm": 0.7131452010861338, + "learning_rate": 1.157515493669541e-06, + "loss": 0.3236254155635834, + "memory(GiB)": 77.0, + "step": 6529, + "token_acc": 0.8835946924004825, + "train_speed(iter/s)": 0.466166 + }, + { + "epoch": 2.0896, + "grad_norm": 0.6854330458867105, + "learning_rate": 1.1567716393669912e-06, + "loss": 0.28724247217178345, + "memory(GiB)": 77.0, + "step": 6530, + "token_acc": 0.9381956649091974, + "train_speed(iter/s)": 0.466095 + }, + { + "epoch": 2.08992, + "grad_norm": 0.659714265949395, + "learning_rate": 1.1560279522059093e-06, + "loss": 0.30959463119506836, + "memory(GiB)": 77.0, + "step": 6531, + "token_acc": 0.8795415752255548, + "train_speed(iter/s)": 0.466035 + }, + { + "epoch": 2.09024, + "grad_norm": 0.6631829142383076, + "learning_rate": 1.1552844322788346e-06, + "loss": 0.253915011882782, + "memory(GiB)": 77.0, + "step": 6532, + "token_acc": 0.9036283799399122, + "train_speed(iter/s)": 0.465973 + }, + { + "epoch": 2.09056, + "grad_norm": 0.7164091058953126, + "learning_rate": 1.1545410796782845e-06, + "loss": 0.33138948678970337, + "memory(GiB)": 77.0, + "step": 6533, + "token_acc": 0.9129878438899552, + "train_speed(iter/s)": 0.46591 + }, + { + "epoch": 2.09088, + "grad_norm": 0.7350565482809911, + "learning_rate": 1.1537978944967571e-06, + "loss": 0.27137887477874756, + "memory(GiB)": 77.0, + "step": 6534, + "token_acc": 0.9450915141430949, + "train_speed(iter/s)": 0.465843 + }, + { + "epoch": 2.0912, + "grad_norm": 0.6450723981078627, + "learning_rate": 1.1530548768267285e-06, + "loss": 0.2637596130371094, + "memory(GiB)": 77.0, + "step": 6535, + "token_acc": 0.9003800217155266, + "train_speed(iter/s)": 0.465781 + }, + { + "epoch": 2.09152, + "grad_norm": 0.721610312353985, + "learning_rate": 1.1523120267606544e-06, + "loss": 0.3289123773574829, + "memory(GiB)": 77.0, + "step": 6536, + "token_acc": 0.90721940214326, + "train_speed(iter/s)": 0.465714 + }, + { + "epoch": 2.09184, + "grad_norm": 0.6957420516109278, + "learning_rate": 1.1515693443909697e-06, + "loss": 0.32420697808265686, + "memory(GiB)": 77.0, + "step": 6537, + "token_acc": 0.9435483870967742, + "train_speed(iter/s)": 0.465644 + }, + { + "epoch": 2.09216, + "grad_norm": 0.6805618988464107, + "learning_rate": 1.1508268298100886e-06, + "loss": 0.41981151700019836, + "memory(GiB)": 77.0, + "step": 6538, + "token_acc": 0.9379345866597991, + "train_speed(iter/s)": 0.465578 + }, + { + "epoch": 2.09248, + "grad_norm": 0.729700314510941, + "learning_rate": 1.1500844831104035e-06, + "loss": 0.3133394420146942, + "memory(GiB)": 77.0, + "step": 6539, + "token_acc": 0.9001349527665317, + "train_speed(iter/s)": 0.465511 + }, + { + "epoch": 2.0928, + "grad_norm": 0.7426220840740886, + "learning_rate": 1.149342304384287e-06, + "loss": 0.36235135793685913, + "memory(GiB)": 77.0, + "step": 6540, + "token_acc": 0.8545760808534532, + "train_speed(iter/s)": 0.465448 + }, + { + "epoch": 2.09312, + "grad_norm": 0.7362015632760794, + "learning_rate": 1.1486002937240906e-06, + "loss": 0.2999619245529175, + "memory(GiB)": 77.0, + "step": 6541, + "token_acc": 0.9408602150537635, + "train_speed(iter/s)": 0.465389 + }, + { + "epoch": 2.09344, + "grad_norm": 0.6976385051733893, + "learning_rate": 1.1478584512221443e-06, + "loss": 0.2897375822067261, + "memory(GiB)": 77.0, + "step": 6542, + "token_acc": 0.9089974293059125, + "train_speed(iter/s)": 0.465328 + }, + { + "epoch": 2.09376, + "grad_norm": 0.6960578040705969, + "learning_rate": 1.1471167769707574e-06, + "loss": 0.32226037979125977, + "memory(GiB)": 77.0, + "step": 6543, + "token_acc": 0.8891705069124424, + "train_speed(iter/s)": 0.465264 + }, + { + "epoch": 2.09408, + "grad_norm": 0.6333836451646989, + "learning_rate": 1.1463752710622187e-06, + "loss": 0.2464894950389862, + "memory(GiB)": 77.0, + "step": 6544, + "token_acc": 0.927098674521355, + "train_speed(iter/s)": 0.465203 + }, + { + "epoch": 2.0944, + "grad_norm": 0.6829875249255988, + "learning_rate": 1.1456339335887955e-06, + "loss": 0.3351391851902008, + "memory(GiB)": 77.0, + "step": 6545, + "token_acc": 0.8376088491409743, + "train_speed(iter/s)": 0.465132 + }, + { + "epoch": 2.09472, + "grad_norm": 0.7121529696451253, + "learning_rate": 1.144892764642735e-06, + "loss": 0.305975079536438, + "memory(GiB)": 77.0, + "step": 6546, + "token_acc": 0.9014296463506396, + "train_speed(iter/s)": 0.465069 + }, + { + "epoch": 2.09504, + "grad_norm": 0.7799233844486866, + "learning_rate": 1.1441517643162617e-06, + "loss": 0.2779988646507263, + "memory(GiB)": 77.0, + "step": 6547, + "token_acc": 0.9239956568946797, + "train_speed(iter/s)": 0.465009 + }, + { + "epoch": 2.09536, + "grad_norm": 0.7346558471157495, + "learning_rate": 1.1434109327015813e-06, + "loss": 0.2667543590068817, + "memory(GiB)": 77.0, + "step": 6548, + "token_acc": 0.9265064437478231, + "train_speed(iter/s)": 0.464944 + }, + { + "epoch": 2.09568, + "grad_norm": 0.7441284240225634, + "learning_rate": 1.1426702698908768e-06, + "loss": 0.287833034992218, + "memory(GiB)": 77.0, + "step": 6549, + "token_acc": 0.9240963855421687, + "train_speed(iter/s)": 0.464882 + }, + { + "epoch": 2.096, + "grad_norm": 0.8030025917225985, + "learning_rate": 1.1419297759763113e-06, + "loss": 0.2674480080604553, + "memory(GiB)": 77.0, + "step": 6550, + "token_acc": 0.9388297872340425, + "train_speed(iter/s)": 0.464818 + }, + { + "epoch": 2.09632, + "grad_norm": 0.6617816334644592, + "learning_rate": 1.141189451050026e-06, + "loss": 0.22493749856948853, + "memory(GiB)": 77.0, + "step": 6551, + "token_acc": 0.9309989701338826, + "train_speed(iter/s)": 0.464754 + }, + { + "epoch": 2.09664, + "grad_norm": 0.7421785630993529, + "learning_rate": 1.1404492952041416e-06, + "loss": 0.24537362158298492, + "memory(GiB)": 77.0, + "step": 6552, + "token_acc": 0.9527921704087507, + "train_speed(iter/s)": 0.464695 + }, + { + "epoch": 2.09696, + "grad_norm": 0.7441089570200027, + "learning_rate": 1.1397093085307586e-06, + "loss": 0.36974918842315674, + "memory(GiB)": 77.0, + "step": 6553, + "token_acc": 0.8791083166619034, + "train_speed(iter/s)": 0.464634 + }, + { + "epoch": 2.09728, + "grad_norm": 0.7352751376929222, + "learning_rate": 1.1389694911219533e-06, + "loss": 0.3283521234989166, + "memory(GiB)": 77.0, + "step": 6554, + "token_acc": 0.9548435923309788, + "train_speed(iter/s)": 0.46457 + }, + { + "epoch": 2.0976, + "grad_norm": 0.7523426159071702, + "learning_rate": 1.138229843069785e-06, + "loss": 0.256580650806427, + "memory(GiB)": 77.0, + "step": 6555, + "token_acc": 0.9658333333333333, + "train_speed(iter/s)": 0.464505 + }, + { + "epoch": 2.09792, + "grad_norm": 0.6949054270774747, + "learning_rate": 1.1374903644662894e-06, + "loss": 0.2505316734313965, + "memory(GiB)": 77.0, + "step": 6556, + "token_acc": 0.9637726420986883, + "train_speed(iter/s)": 0.464446 + }, + { + "epoch": 2.09824, + "grad_norm": 2.056430977598926, + "learning_rate": 1.1367510554034821e-06, + "loss": 0.24527853727340698, + "memory(GiB)": 77.0, + "step": 6557, + "token_acc": 0.9584021129085507, + "train_speed(iter/s)": 0.464382 + }, + { + "epoch": 2.09856, + "grad_norm": 0.7305632948966911, + "learning_rate": 1.1360119159733575e-06, + "loss": 0.3054274320602417, + "memory(GiB)": 77.0, + "step": 6558, + "token_acc": 0.8677379480840544, + "train_speed(iter/s)": 0.464312 + }, + { + "epoch": 2.09888, + "grad_norm": 0.7369700056403584, + "learning_rate": 1.1352729462678878e-06, + "loss": 0.33610010147094727, + "memory(GiB)": 77.0, + "step": 6559, + "token_acc": 0.8916163927605575, + "train_speed(iter/s)": 0.46425 + }, + { + "epoch": 2.0992, + "grad_norm": 0.7511010585032587, + "learning_rate": 1.1345341463790255e-06, + "loss": 0.2907593846321106, + "memory(GiB)": 77.0, + "step": 6560, + "token_acc": 0.8888654130572575, + "train_speed(iter/s)": 0.464188 + }, + { + "epoch": 2.09952, + "grad_norm": 0.7341012944774424, + "learning_rate": 1.1337955163987017e-06, + "loss": 0.2872815430164337, + "memory(GiB)": 77.0, + "step": 6561, + "token_acc": 0.8875154511742892, + "train_speed(iter/s)": 0.464127 + }, + { + "epoch": 2.09984, + "grad_norm": 0.7566543921432484, + "learning_rate": 1.1330570564188253e-06, + "loss": 0.3437923789024353, + "memory(GiB)": 77.0, + "step": 6562, + "token_acc": 0.8879922530664945, + "train_speed(iter/s)": 0.464066 + }, + { + "epoch": 2.10016, + "grad_norm": 0.7454662603268176, + "learning_rate": 1.1323187665312858e-06, + "loss": 0.2791711688041687, + "memory(GiB)": 77.0, + "step": 6563, + "token_acc": 0.883839741866093, + "train_speed(iter/s)": 0.464006 + }, + { + "epoch": 2.10048, + "grad_norm": 0.752328044722462, + "learning_rate": 1.1315806468279497e-06, + "loss": 0.45906147360801697, + "memory(GiB)": 77.0, + "step": 6564, + "token_acc": 0.8339389534883721, + "train_speed(iter/s)": 0.46394 + }, + { + "epoch": 2.1008, + "grad_norm": 0.7587514135818177, + "learning_rate": 1.130842697400664e-06, + "loss": 0.2317083477973938, + "memory(GiB)": 77.0, + "step": 6565, + "token_acc": 0.9434523809523809, + "train_speed(iter/s)": 0.463883 + }, + { + "epoch": 2.10112, + "grad_norm": 0.785725874734793, + "learning_rate": 1.1301049183412529e-06, + "loss": 0.35244420170783997, + "memory(GiB)": 77.0, + "step": 6566, + "token_acc": 0.9063681302704274, + "train_speed(iter/s)": 0.463819 + }, + { + "epoch": 2.10144, + "grad_norm": 0.7860729103122193, + "learning_rate": 1.1293673097415217e-06, + "loss": 0.37874680757522583, + "memory(GiB)": 77.0, + "step": 6567, + "token_acc": 0.8563829787234043, + "train_speed(iter/s)": 0.463745 + }, + { + "epoch": 2.10176, + "grad_norm": 0.7054883817330936, + "learning_rate": 1.1286298716932509e-06, + "loss": 0.3242982029914856, + "memory(GiB)": 77.0, + "step": 6568, + "token_acc": 0.8625769569041337, + "train_speed(iter/s)": 0.463679 + }, + { + "epoch": 2.10208, + "grad_norm": 0.681619432302776, + "learning_rate": 1.1278926042882026e-06, + "loss": 0.23722583055496216, + "memory(GiB)": 77.0, + "step": 6569, + "token_acc": 0.899837574445046, + "train_speed(iter/s)": 0.463621 + }, + { + "epoch": 2.1024, + "grad_norm": 0.7266843459636748, + "learning_rate": 1.1271555076181177e-06, + "loss": 0.3161764144897461, + "memory(GiB)": 77.0, + "step": 6570, + "token_acc": 0.9292899408284023, + "train_speed(iter/s)": 0.46356 + }, + { + "epoch": 2.10272, + "grad_norm": 0.6817858054988535, + "learning_rate": 1.1264185817747134e-06, + "loss": 0.32791459560394287, + "memory(GiB)": 77.0, + "step": 6571, + "token_acc": 0.8938413611310807, + "train_speed(iter/s)": 0.463474 + }, + { + "epoch": 2.10304, + "grad_norm": 0.7332425017452571, + "learning_rate": 1.1256818268496895e-06, + "loss": 0.33106672763824463, + "memory(GiB)": 77.0, + "step": 6572, + "token_acc": 0.8920086393088553, + "train_speed(iter/s)": 0.463411 + }, + { + "epoch": 2.10336, + "grad_norm": 0.7653903767103756, + "learning_rate": 1.1249452429347212e-06, + "loss": 0.3750876188278198, + "memory(GiB)": 77.0, + "step": 6573, + "token_acc": 0.9349250936329588, + "train_speed(iter/s)": 0.46335 + }, + { + "epoch": 2.1036799999999998, + "grad_norm": 0.6427321096750565, + "learning_rate": 1.1242088301214641e-06, + "loss": 0.34149742126464844, + "memory(GiB)": 77.0, + "step": 6574, + "token_acc": 0.9145129224652088, + "train_speed(iter/s)": 0.463274 + }, + { + "epoch": 2.104, + "grad_norm": 0.6602243889917288, + "learning_rate": 1.1234725885015513e-06, + "loss": 0.3324717581272125, + "memory(GiB)": 77.0, + "step": 6575, + "token_acc": 0.9473684210526315, + "train_speed(iter/s)": 0.463198 + }, + { + "epoch": 2.10432, + "grad_norm": 0.7798378282691879, + "learning_rate": 1.122736518166596e-06, + "loss": 0.27770814299583435, + "memory(GiB)": 77.0, + "step": 6576, + "token_acc": 0.8767724510465902, + "train_speed(iter/s)": 0.46313 + }, + { + "epoch": 2.10464, + "grad_norm": 0.6862335874556201, + "learning_rate": 1.1220006192081889e-06, + "loss": 0.2747132182121277, + "memory(GiB)": 77.0, + "step": 6577, + "token_acc": 0.9131409358363687, + "train_speed(iter/s)": 0.463072 + }, + { + "epoch": 2.10496, + "grad_norm": 0.6528359402875658, + "learning_rate": 1.1212648917179e-06, + "loss": 0.31431061029434204, + "memory(GiB)": 77.0, + "step": 6578, + "token_acc": 0.9042492917847026, + "train_speed(iter/s)": 0.463008 + }, + { + "epoch": 2.10528, + "grad_norm": 0.7154787149893204, + "learning_rate": 1.1205293357872779e-06, + "loss": 0.29770705103874207, + "memory(GiB)": 77.0, + "step": 6579, + "token_acc": 0.8658536585365854, + "train_speed(iter/s)": 0.462944 + }, + { + "epoch": 2.1056, + "grad_norm": 0.7121200485579758, + "learning_rate": 1.1197939515078499e-06, + "loss": 0.30247411131858826, + "memory(GiB)": 77.0, + "step": 6580, + "token_acc": 0.9141283214517174, + "train_speed(iter/s)": 0.462884 + }, + { + "epoch": 2.10592, + "grad_norm": 0.685048508871217, + "learning_rate": 1.1190587389711214e-06, + "loss": 0.30727410316467285, + "memory(GiB)": 77.0, + "step": 6581, + "token_acc": 0.9554020100502513, + "train_speed(iter/s)": 0.462822 + }, + { + "epoch": 2.10624, + "grad_norm": 0.6817760578030447, + "learning_rate": 1.118323698268578e-06, + "loss": 0.23562605679035187, + "memory(GiB)": 77.0, + "step": 6582, + "token_acc": 0.946016030174446, + "train_speed(iter/s)": 0.46276 + }, + { + "epoch": 2.10656, + "grad_norm": 0.7130485209131008, + "learning_rate": 1.1175888294916812e-06, + "loss": 0.28575399518013, + "memory(GiB)": 77.0, + "step": 6583, + "token_acc": 0.9024685382381413, + "train_speed(iter/s)": 0.462699 + }, + { + "epoch": 2.10688, + "grad_norm": 0.5976595568010334, + "learning_rate": 1.116854132731873e-06, + "loss": 0.28966641426086426, + "memory(GiB)": 77.0, + "step": 6584, + "token_acc": 0.8917874396135266, + "train_speed(iter/s)": 0.462611 + }, + { + "epoch": 2.1072, + "grad_norm": 0.7424053092438664, + "learning_rate": 1.1161196080805742e-06, + "loss": 0.2695184648036957, + "memory(GiB)": 77.0, + "step": 6585, + "token_acc": 0.9393764434180139, + "train_speed(iter/s)": 0.462544 + }, + { + "epoch": 2.10752, + "grad_norm": 0.8803281133234366, + "learning_rate": 1.1153852556291834e-06, + "loss": 0.31732457876205444, + "memory(GiB)": 77.0, + "step": 6586, + "token_acc": 0.8701113331430202, + "train_speed(iter/s)": 0.462486 + }, + { + "epoch": 2.10784, + "grad_norm": 0.7375726867630893, + "learning_rate": 1.114651075469078e-06, + "loss": 0.38473302125930786, + "memory(GiB)": 77.0, + "step": 6587, + "token_acc": 0.8084025854108957, + "train_speed(iter/s)": 0.462422 + }, + { + "epoch": 2.10816, + "grad_norm": 0.8607833719254108, + "learning_rate": 1.113917067691613e-06, + "loss": 0.35673272609710693, + "memory(GiB)": 77.0, + "step": 6588, + "token_acc": 0.8458161865569273, + "train_speed(iter/s)": 0.462353 + }, + { + "epoch": 2.10848, + "grad_norm": 0.7396485261860188, + "learning_rate": 1.113183232388125e-06, + "loss": 0.2812823951244354, + "memory(GiB)": 77.0, + "step": 6589, + "token_acc": 0.8685173886516169, + "train_speed(iter/s)": 0.462296 + }, + { + "epoch": 2.1088, + "grad_norm": 0.7071595456924962, + "learning_rate": 1.1124495696499263e-06, + "loss": 0.2889666259288788, + "memory(GiB)": 77.0, + "step": 6590, + "token_acc": 0.940045766590389, + "train_speed(iter/s)": 0.462235 + }, + { + "epoch": 2.10912, + "grad_norm": 0.606860501412567, + "learning_rate": 1.111716079568308e-06, + "loss": 0.27059102058410645, + "memory(GiB)": 77.0, + "step": 6591, + "token_acc": 0.9300162752848175, + "train_speed(iter/s)": 0.462171 + }, + { + "epoch": 2.10944, + "grad_norm": 0.7267292933803866, + "learning_rate": 1.1109827622345404e-06, + "loss": 0.3547346591949463, + "memory(GiB)": 77.0, + "step": 6592, + "token_acc": 0.9367369589345172, + "train_speed(iter/s)": 0.462112 + }, + { + "epoch": 2.10976, + "grad_norm": 0.7393960299852803, + "learning_rate": 1.1102496177398726e-06, + "loss": 0.4198017120361328, + "memory(GiB)": 77.0, + "step": 6593, + "token_acc": 0.8700719917723688, + "train_speed(iter/s)": 0.462045 + }, + { + "epoch": 2.11008, + "grad_norm": 0.6954399837381613, + "learning_rate": 1.1095166461755308e-06, + "loss": 0.2732653319835663, + "memory(GiB)": 77.0, + "step": 6594, + "token_acc": 0.9361032420522506, + "train_speed(iter/s)": 0.461988 + }, + { + "epoch": 2.1104, + "grad_norm": 0.6902077441907856, + "learning_rate": 1.1087838476327212e-06, + "loss": 0.32703500986099243, + "memory(GiB)": 77.0, + "step": 6595, + "token_acc": 0.9052523171987642, + "train_speed(iter/s)": 0.461921 + }, + { + "epoch": 2.11072, + "grad_norm": 0.7013713720562971, + "learning_rate": 1.108051222202629e-06, + "loss": 0.29340746998786926, + "memory(GiB)": 77.0, + "step": 6596, + "token_acc": 0.8937329700272479, + "train_speed(iter/s)": 0.461863 + }, + { + "epoch": 2.11104, + "grad_norm": 0.6454429331346375, + "learning_rate": 1.1073187699764142e-06, + "loss": 0.2719828188419342, + "memory(GiB)": 77.0, + "step": 6597, + "token_acc": 0.9391604675876727, + "train_speed(iter/s)": 0.461804 + }, + { + "epoch": 2.11136, + "grad_norm": 0.7097815295457132, + "learning_rate": 1.1065864910452192e-06, + "loss": 0.406586229801178, + "memory(GiB)": 77.0, + "step": 6598, + "token_acc": 0.797275204359673, + "train_speed(iter/s)": 0.461741 + }, + { + "epoch": 2.11168, + "grad_norm": 0.7525823378973393, + "learning_rate": 1.1058543855001635e-06, + "loss": 0.3113897740840912, + "memory(GiB)": 77.0, + "step": 6599, + "token_acc": 0.8786026200873363, + "train_speed(iter/s)": 0.461677 + }, + { + "epoch": 2.112, + "grad_norm": 0.6861281614108315, + "learning_rate": 1.1051224534323445e-06, + "loss": 0.3353115916252136, + "memory(GiB)": 77.0, + "step": 6600, + "token_acc": 0.9551130610867364, + "train_speed(iter/s)": 0.46162 + }, + { + "epoch": 2.11232, + "grad_norm": 0.7620032912853132, + "learning_rate": 1.1043906949328387e-06, + "loss": 0.36113840341567993, + "memory(GiB)": 77.0, + "step": 6601, + "token_acc": 0.905972797161443, + "train_speed(iter/s)": 0.461552 + }, + { + "epoch": 2.11264, + "grad_norm": 0.7597131382439705, + "learning_rate": 1.1036591100927008e-06, + "loss": 0.429425448179245, + "memory(GiB)": 77.0, + "step": 6602, + "token_acc": 0.869449715370019, + "train_speed(iter/s)": 0.46149 + }, + { + "epoch": 2.11296, + "grad_norm": 0.6954889377167187, + "learning_rate": 1.1029276990029636e-06, + "loss": 0.3558781147003174, + "memory(GiB)": 77.0, + "step": 6603, + "token_acc": 0.9277566539923955, + "train_speed(iter/s)": 0.461422 + }, + { + "epoch": 2.11328, + "grad_norm": 0.6498583869966611, + "learning_rate": 1.1021964617546388e-06, + "loss": 0.32292619347572327, + "memory(GiB)": 77.0, + "step": 6604, + "token_acc": 0.9595709570957096, + "train_speed(iter/s)": 0.461364 + }, + { + "epoch": 2.1136, + "grad_norm": 0.7772059261390538, + "learning_rate": 1.101465398438715e-06, + "loss": 0.27242934703826904, + "memory(GiB)": 77.0, + "step": 6605, + "token_acc": 0.9292771710868435, + "train_speed(iter/s)": 0.461303 + }, + { + "epoch": 2.11392, + "grad_norm": 0.7468340092734276, + "learning_rate": 1.1007345091461623e-06, + "loss": 0.27828502655029297, + "memory(GiB)": 77.0, + "step": 6606, + "token_acc": 0.9740148486579098, + "train_speed(iter/s)": 0.461241 + }, + { + "epoch": 2.11424, + "grad_norm": 0.7423474471482281, + "learning_rate": 1.1000037939679267e-06, + "loss": 0.2665828466415405, + "memory(GiB)": 77.0, + "step": 6607, + "token_acc": 0.9594556280124752, + "train_speed(iter/s)": 0.461185 + }, + { + "epoch": 2.11456, + "grad_norm": 0.7599012121181759, + "learning_rate": 1.0992732529949323e-06, + "loss": 0.28658196330070496, + "memory(GiB)": 77.0, + "step": 6608, + "token_acc": 0.8641277232662564, + "train_speed(iter/s)": 0.461129 + }, + { + "epoch": 2.11488, + "grad_norm": 0.6802343700608459, + "learning_rate": 1.0985428863180828e-06, + "loss": 0.3237529397010803, + "memory(GiB)": 77.0, + "step": 6609, + "token_acc": 0.9026946107784432, + "train_speed(iter/s)": 0.461064 + }, + { + "epoch": 2.1152, + "grad_norm": 0.7308152335921666, + "learning_rate": 1.0978126940282602e-06, + "loss": 0.32050639390945435, + "memory(GiB)": 77.0, + "step": 6610, + "token_acc": 0.884756269371654, + "train_speed(iter/s)": 0.461002 + }, + { + "epoch": 2.11552, + "grad_norm": 0.6932175461631159, + "learning_rate": 1.0970826762163226e-06, + "loss": 0.3693649172782898, + "memory(GiB)": 77.0, + "step": 6611, + "token_acc": 0.9250313676286073, + "train_speed(iter/s)": 0.460943 + }, + { + "epoch": 2.11584, + "grad_norm": 0.7255527712298097, + "learning_rate": 1.0963528329731091e-06, + "loss": 0.31258606910705566, + "memory(GiB)": 77.0, + "step": 6612, + "token_acc": 0.9448689956331878, + "train_speed(iter/s)": 0.460881 + }, + { + "epoch": 2.11616, + "grad_norm": 0.7120431143343524, + "learning_rate": 1.0956231643894358e-06, + "loss": 0.2974836826324463, + "memory(GiB)": 77.0, + "step": 6613, + "token_acc": 0.9245556764599202, + "train_speed(iter/s)": 0.460821 + }, + { + "epoch": 2.11648, + "grad_norm": 0.7069047740901904, + "learning_rate": 1.0948936705560973e-06, + "loss": 0.2890322208404541, + "memory(GiB)": 77.0, + "step": 6614, + "token_acc": 0.9641116526362428, + "train_speed(iter/s)": 0.460761 + }, + { + "epoch": 2.1168, + "grad_norm": 0.7192240327398672, + "learning_rate": 1.0941643515638667e-06, + "loss": 0.39933454990386963, + "memory(GiB)": 77.0, + "step": 6615, + "token_acc": 0.8858796296296296, + "train_speed(iter/s)": 0.46069 + }, + { + "epoch": 2.11712, + "grad_norm": 0.6362408860332167, + "learning_rate": 1.093435207503495e-06, + "loss": 0.23312559723854065, + "memory(GiB)": 77.0, + "step": 6616, + "token_acc": 0.8624161073825504, + "train_speed(iter/s)": 0.460631 + }, + { + "epoch": 2.11744, + "grad_norm": 0.699756207998331, + "learning_rate": 1.0927062384657111e-06, + "loss": 0.24149198830127716, + "memory(GiB)": 77.0, + "step": 6617, + "token_acc": 0.947565543071161, + "train_speed(iter/s)": 0.460576 + }, + { + "epoch": 2.11776, + "grad_norm": 0.767583596849872, + "learning_rate": 1.0919774445412231e-06, + "loss": 0.2856700122356415, + "memory(GiB)": 77.0, + "step": 6618, + "token_acc": 0.8870234347674012, + "train_speed(iter/s)": 0.460517 + }, + { + "epoch": 2.11808, + "grad_norm": 0.7196586026175711, + "learning_rate": 1.091248825820717e-06, + "loss": 0.37919968366622925, + "memory(GiB)": 77.0, + "step": 6619, + "token_acc": 0.8647724845118564, + "train_speed(iter/s)": 0.460446 + }, + { + "epoch": 2.1184, + "grad_norm": 0.6397835905323748, + "learning_rate": 1.090520382394856e-06, + "loss": 0.3203662633895874, + "memory(GiB)": 77.0, + "step": 6620, + "token_acc": 0.9463831754102149, + "train_speed(iter/s)": 0.460384 + }, + { + "epoch": 2.11872, + "grad_norm": 0.6798045753262447, + "learning_rate": 1.0897921143542824e-06, + "loss": 0.316472589969635, + "memory(GiB)": 77.0, + "step": 6621, + "token_acc": 0.9592376919004765, + "train_speed(iter/s)": 0.460329 + }, + { + "epoch": 2.11904, + "grad_norm": 0.7929900934009616, + "learning_rate": 1.089064021789616e-06, + "loss": 0.3164336681365967, + "memory(GiB)": 77.0, + "step": 6622, + "token_acc": 0.8755958055290753, + "train_speed(iter/s)": 0.460267 + }, + { + "epoch": 2.11936, + "grad_norm": 0.6631269958187357, + "learning_rate": 1.088336104791457e-06, + "loss": 0.29060235619544983, + "memory(GiB)": 77.0, + "step": 6623, + "token_acc": 0.9358849341784495, + "train_speed(iter/s)": 0.460204 + }, + { + "epoch": 2.11968, + "grad_norm": 0.7854516458827904, + "learning_rate": 1.0876083634503809e-06, + "loss": 0.30079442262649536, + "memory(GiB)": 77.0, + "step": 6624, + "token_acc": 0.91318093385214, + "train_speed(iter/s)": 0.46014 + }, + { + "epoch": 2.12, + "grad_norm": 0.8296441045580323, + "learning_rate": 1.0868807978569437e-06, + "loss": 0.36025407910346985, + "memory(GiB)": 77.0, + "step": 6625, + "token_acc": 0.8231081403676295, + "train_speed(iter/s)": 0.460074 + }, + { + "epoch": 2.12032, + "grad_norm": 0.7252070816339198, + "learning_rate": 1.0861534081016764e-06, + "loss": 0.25334110856056213, + "memory(GiB)": 77.0, + "step": 6626, + "token_acc": 0.9106239460370995, + "train_speed(iter/s)": 0.460019 + }, + { + "epoch": 2.12064, + "grad_norm": 0.6947314583106666, + "learning_rate": 1.0854261942750906e-06, + "loss": 0.331642210483551, + "memory(GiB)": 77.0, + "step": 6627, + "token_acc": 0.8845651606163489, + "train_speed(iter/s)": 0.459956 + }, + { + "epoch": 2.12096, + "grad_norm": 0.689466775521997, + "learning_rate": 1.084699156467676e-06, + "loss": 0.2788483798503876, + "memory(GiB)": 77.0, + "step": 6628, + "token_acc": 0.9288849868305531, + "train_speed(iter/s)": 0.459885 + }, + { + "epoch": 2.12128, + "grad_norm": 0.6412499657519585, + "learning_rate": 1.0839722947698994e-06, + "loss": 0.2558538615703583, + "memory(GiB)": 77.0, + "step": 6629, + "token_acc": 0.9184168012924071, + "train_speed(iter/s)": 0.45982 + }, + { + "epoch": 2.1216, + "grad_norm": 0.6553788538489224, + "learning_rate": 1.0832456092722063e-06, + "loss": 0.26833096146583557, + "memory(GiB)": 77.0, + "step": 6630, + "token_acc": 0.8734207389749702, + "train_speed(iter/s)": 0.459761 + }, + { + "epoch": 2.12192, + "grad_norm": 0.7667313260936783, + "learning_rate": 1.0825191000650204e-06, + "loss": 0.3184695839881897, + "memory(GiB)": 77.0, + "step": 6631, + "token_acc": 0.8495548961424332, + "train_speed(iter/s)": 0.459702 + }, + { + "epoch": 2.12224, + "grad_norm": 0.9327993814835308, + "learning_rate": 1.0817927672387427e-06, + "loss": 0.29395759105682373, + "memory(GiB)": 77.0, + "step": 6632, + "token_acc": 0.9419709854927464, + "train_speed(iter/s)": 0.459645 + }, + { + "epoch": 2.12256, + "grad_norm": 0.7485423233019731, + "learning_rate": 1.0810666108837528e-06, + "loss": 0.3073633909225464, + "memory(GiB)": 77.0, + "step": 6633, + "token_acc": 0.918664047151277, + "train_speed(iter/s)": 0.459586 + }, + { + "epoch": 2.12288, + "grad_norm": 0.6976389826430195, + "learning_rate": 1.0803406310904082e-06, + "loss": 0.22497126460075378, + "memory(GiB)": 77.0, + "step": 6634, + "token_acc": 0.9338235294117647, + "train_speed(iter/s)": 0.459527 + }, + { + "epoch": 2.1232, + "grad_norm": 0.770284074703972, + "learning_rate": 1.079614827949045e-06, + "loss": 0.3073483407497406, + "memory(GiB)": 77.0, + "step": 6635, + "token_acc": 0.8801418439716312, + "train_speed(iter/s)": 0.459463 + }, + { + "epoch": 2.12352, + "grad_norm": 0.7019651208834033, + "learning_rate": 1.0788892015499764e-06, + "loss": 0.2739887833595276, + "memory(GiB)": 77.0, + "step": 6636, + "token_acc": 0.9499279538904899, + "train_speed(iter/s)": 0.459405 + }, + { + "epoch": 2.12384, + "grad_norm": 0.728989950442054, + "learning_rate": 1.078163751983494e-06, + "loss": 0.35592401027679443, + "memory(GiB)": 77.0, + "step": 6637, + "token_acc": 0.9061016949152543, + "train_speed(iter/s)": 0.459348 + }, + { + "epoch": 2.12416, + "grad_norm": 1.111075727998316, + "learning_rate": 1.0774384793398677e-06, + "loss": 0.25431180000305176, + "memory(GiB)": 77.0, + "step": 6638, + "token_acc": 0.9472541110766367, + "train_speed(iter/s)": 0.459283 + }, + { + "epoch": 2.12448, + "grad_norm": 0.7412685571453554, + "learning_rate": 1.0767133837093447e-06, + "loss": 0.30152010917663574, + "memory(GiB)": 77.0, + "step": 6639, + "token_acc": 0.9517439082656474, + "train_speed(iter/s)": 0.459222 + }, + { + "epoch": 2.1248, + "grad_norm": 0.6691536252060204, + "learning_rate": 1.0759884651821505e-06, + "loss": 0.35270455479621887, + "memory(GiB)": 77.0, + "step": 6640, + "token_acc": 0.8824470211915234, + "train_speed(iter/s)": 0.459152 + }, + { + "epoch": 2.12512, + "grad_norm": 6.122198398872372, + "learning_rate": 1.0752637238484891e-06, + "loss": 0.19883573055267334, + "memory(GiB)": 77.0, + "step": 6641, + "token_acc": 0.9462450592885375, + "train_speed(iter/s)": 0.459096 + }, + { + "epoch": 2.12544, + "grad_norm": 0.7392977480053737, + "learning_rate": 1.074539159798542e-06, + "loss": 0.3208944797515869, + "memory(GiB)": 77.0, + "step": 6642, + "token_acc": 0.8723464477780922, + "train_speed(iter/s)": 0.459039 + }, + { + "epoch": 2.12576, + "grad_norm": 0.7084813552470451, + "learning_rate": 1.0738147731224682e-06, + "loss": 0.27272748947143555, + "memory(GiB)": 77.0, + "step": 6643, + "token_acc": 0.901794784964443, + "train_speed(iter/s)": 0.45898 + }, + { + "epoch": 2.12608, + "grad_norm": 0.6552192746057471, + "learning_rate": 1.0730905639104055e-06, + "loss": 0.3537224531173706, + "memory(GiB)": 77.0, + "step": 6644, + "token_acc": 0.8914312397851973, + "train_speed(iter/s)": 0.458915 + }, + { + "epoch": 2.1264, + "grad_norm": 0.6374004306177822, + "learning_rate": 1.0723665322524688e-06, + "loss": 0.2822992205619812, + "memory(GiB)": 77.0, + "step": 6645, + "token_acc": 0.9196157735085946, + "train_speed(iter/s)": 0.458853 + }, + { + "epoch": 2.12672, + "grad_norm": 0.6902295995900707, + "learning_rate": 1.0716426782387513e-06, + "loss": 0.34453630447387695, + "memory(GiB)": 77.0, + "step": 6646, + "token_acc": 0.8194063926940639, + "train_speed(iter/s)": 0.458792 + }, + { + "epoch": 2.12704, + "grad_norm": 0.6496316362283416, + "learning_rate": 1.0709190019593243e-06, + "loss": 0.30570536851882935, + "memory(GiB)": 77.0, + "step": 6647, + "token_acc": 0.9162767791184437, + "train_speed(iter/s)": 0.458724 + }, + { + "epoch": 2.12736, + "grad_norm": 0.7388288509127611, + "learning_rate": 1.0701955035042366e-06, + "loss": 0.28786301612854004, + "memory(GiB)": 77.0, + "step": 6648, + "token_acc": 0.9192982456140351, + "train_speed(iter/s)": 0.458666 + }, + { + "epoch": 2.12768, + "grad_norm": 0.6641749135507814, + "learning_rate": 1.0694721829635152e-06, + "loss": 0.2687237858772278, + "memory(GiB)": 77.0, + "step": 6649, + "token_acc": 0.9281200631911533, + "train_speed(iter/s)": 0.458602 + }, + { + "epoch": 2.128, + "grad_norm": 0.6367121391055711, + "learning_rate": 1.0687490404271647e-06, + "loss": 0.32915806770324707, + "memory(GiB)": 77.0, + "step": 6650, + "token_acc": 0.897607122982749, + "train_speed(iter/s)": 0.458538 + }, + { + "epoch": 2.12832, + "grad_norm": 0.7269957417991525, + "learning_rate": 1.0680260759851677e-06, + "loss": 0.3746379315853119, + "memory(GiB)": 77.0, + "step": 6651, + "token_acc": 0.8633030214139044, + "train_speed(iter/s)": 0.458476 + }, + { + "epoch": 2.12864, + "grad_norm": 0.7771937291588225, + "learning_rate": 1.0673032897274843e-06, + "loss": 0.40768465399742126, + "memory(GiB)": 77.0, + "step": 6652, + "token_acc": 0.9078168180023687, + "train_speed(iter/s)": 0.458413 + }, + { + "epoch": 2.12896, + "grad_norm": 0.6429736212328128, + "learning_rate": 1.066580681744053e-06, + "loss": 0.2500952482223511, + "memory(GiB)": 77.0, + "step": 6653, + "token_acc": 0.8944800243976823, + "train_speed(iter/s)": 0.45835 + }, + { + "epoch": 2.12928, + "grad_norm": 0.7247962629072593, + "learning_rate": 1.0658582521247896e-06, + "loss": 0.2542790174484253, + "memory(GiB)": 77.0, + "step": 6654, + "token_acc": 0.9645324735145094, + "train_speed(iter/s)": 0.45829 + }, + { + "epoch": 2.1296, + "grad_norm": 0.6434952007949224, + "learning_rate": 1.0651360009595883e-06, + "loss": 0.2805558443069458, + "memory(GiB)": 77.0, + "step": 6655, + "token_acc": 0.9410371773620649, + "train_speed(iter/s)": 0.458225 + }, + { + "epoch": 2.12992, + "grad_norm": 0.9263501572659614, + "learning_rate": 1.0644139283383203e-06, + "loss": 0.3321051299571991, + "memory(GiB)": 77.0, + "step": 6656, + "token_acc": 0.9401993355481728, + "train_speed(iter/s)": 0.458158 + }, + { + "epoch": 2.13024, + "grad_norm": 0.6704889403453382, + "learning_rate": 1.0636920343508353e-06, + "loss": 0.22786477208137512, + "memory(GiB)": 77.0, + "step": 6657, + "token_acc": 0.9277081468218442, + "train_speed(iter/s)": 0.458099 + }, + { + "epoch": 2.13056, + "grad_norm": 0.7544866117256409, + "learning_rate": 1.0629703190869607e-06, + "loss": 0.33601686358451843, + "memory(GiB)": 77.0, + "step": 6658, + "token_acc": 0.8004596740493105, + "train_speed(iter/s)": 0.458039 + }, + { + "epoch": 2.13088, + "grad_norm": 0.7957313578077795, + "learning_rate": 1.0622487826365007e-06, + "loss": 0.38230520486831665, + "memory(GiB)": 77.0, + "step": 6659, + "token_acc": 0.9238788355625491, + "train_speed(iter/s)": 0.457981 + }, + { + "epoch": 2.1312, + "grad_norm": 0.7066456250223294, + "learning_rate": 1.0615274250892387e-06, + "loss": 0.34677010774612427, + "memory(GiB)": 77.0, + "step": 6660, + "token_acc": 0.9280839895013123, + "train_speed(iter/s)": 0.457924 + }, + { + "epoch": 2.13152, + "grad_norm": 0.7061595487043368, + "learning_rate": 1.0608062465349348e-06, + "loss": 0.27854228019714355, + "memory(GiB)": 77.0, + "step": 6661, + "token_acc": 0.926829268292683, + "train_speed(iter/s)": 0.457867 + }, + { + "epoch": 2.13184, + "grad_norm": 0.7208725225972928, + "learning_rate": 1.0600852470633271e-06, + "loss": 0.26821964979171753, + "memory(GiB)": 77.0, + "step": 6662, + "token_acc": 0.9490196078431372, + "train_speed(iter/s)": 0.457812 + }, + { + "epoch": 2.13216, + "grad_norm": 0.7082046504043347, + "learning_rate": 1.059364426764132e-06, + "loss": 0.3359079957008362, + "memory(GiB)": 77.0, + "step": 6663, + "token_acc": 0.9450402144772118, + "train_speed(iter/s)": 0.457756 + }, + { + "epoch": 2.13248, + "grad_norm": 0.7185197789390828, + "learning_rate": 1.0586437857270423e-06, + "loss": 0.2680734395980835, + "memory(GiB)": 77.0, + "step": 6664, + "token_acc": 0.8835555555555555, + "train_speed(iter/s)": 0.457697 + }, + { + "epoch": 2.1328, + "grad_norm": 0.6952514748408829, + "learning_rate": 1.05792332404173e-06, + "loss": 0.32253390550613403, + "memory(GiB)": 77.0, + "step": 6665, + "token_acc": 0.8336300692383778, + "train_speed(iter/s)": 0.457634 + }, + { + "epoch": 2.13312, + "grad_norm": 0.6851561201923393, + "learning_rate": 1.0572030417978436e-06, + "loss": 0.2936636507511139, + "memory(GiB)": 77.0, + "step": 6666, + "token_acc": 0.9450407900386432, + "train_speed(iter/s)": 0.457572 + }, + { + "epoch": 2.1334400000000002, + "grad_norm": 0.7075679961766589, + "learning_rate": 1.0564829390850097e-06, + "loss": 0.24004918336868286, + "memory(GiB)": 77.0, + "step": 6667, + "token_acc": 0.8789986091794159, + "train_speed(iter/s)": 0.457512 + }, + { + "epoch": 2.13376, + "grad_norm": 0.7008207847927599, + "learning_rate": 1.055763015992833e-06, + "loss": 0.27291563153266907, + "memory(GiB)": 77.0, + "step": 6668, + "token_acc": 0.9366347177848775, + "train_speed(iter/s)": 0.457458 + }, + { + "epoch": 2.13408, + "grad_norm": 0.7862528446760144, + "learning_rate": 1.055043272610895e-06, + "loss": 0.27753621339797974, + "memory(GiB)": 77.0, + "step": 6669, + "token_acc": 0.9188297404311483, + "train_speed(iter/s)": 0.4574 + }, + { + "epoch": 2.1344, + "grad_norm": 0.769055401605527, + "learning_rate": 1.0543237090287556e-06, + "loss": 0.288467675447464, + "memory(GiB)": 77.0, + "step": 6670, + "token_acc": 0.947941567065073, + "train_speed(iter/s)": 0.457345 + }, + { + "epoch": 2.13472, + "grad_norm": 0.7712650018995426, + "learning_rate": 1.0536043253359518e-06, + "loss": 0.25790148973464966, + "memory(GiB)": 77.0, + "step": 6671, + "token_acc": 0.9333333333333333, + "train_speed(iter/s)": 0.457292 + }, + { + "epoch": 2.13504, + "grad_norm": 0.7860506309166774, + "learning_rate": 1.0528851216219988e-06, + "loss": 0.2834208607673645, + "memory(GiB)": 77.0, + "step": 6672, + "token_acc": 0.9352094240837696, + "train_speed(iter/s)": 0.457236 + }, + { + "epoch": 2.13536, + "grad_norm": 0.7625177466563441, + "learning_rate": 1.0521660979763884e-06, + "loss": 0.31580960750579834, + "memory(GiB)": 77.0, + "step": 6673, + "token_acc": 0.9215442092154421, + "train_speed(iter/s)": 0.457178 + }, + { + "epoch": 2.13568, + "grad_norm": 0.7085687400000774, + "learning_rate": 1.051447254488591e-06, + "loss": 0.2827872633934021, + "memory(GiB)": 77.0, + "step": 6674, + "token_acc": 0.9509933774834437, + "train_speed(iter/s)": 0.45712 + }, + { + "epoch": 2.136, + "grad_norm": 0.665801837022622, + "learning_rate": 1.0507285912480542e-06, + "loss": 0.2824689447879791, + "memory(GiB)": 77.0, + "step": 6675, + "token_acc": 0.8735719725818736, + "train_speed(iter/s)": 0.457063 + }, + { + "epoch": 2.13632, + "grad_norm": 0.7192700829635931, + "learning_rate": 1.050010108344203e-06, + "loss": 0.2764231264591217, + "memory(GiB)": 77.0, + "step": 6676, + "token_acc": 0.9181725196045005, + "train_speed(iter/s)": 0.457005 + }, + { + "epoch": 2.13664, + "grad_norm": 0.7169566417494684, + "learning_rate": 1.0492918058664402e-06, + "loss": 0.304720938205719, + "memory(GiB)": 77.0, + "step": 6677, + "token_acc": 0.9322570433681545, + "train_speed(iter/s)": 0.456948 + }, + { + "epoch": 2.13696, + "grad_norm": 0.6821249092138076, + "learning_rate": 1.0485736839041464e-06, + "loss": 0.3343053460121155, + "memory(GiB)": 77.0, + "step": 6678, + "token_acc": 0.8869082986730046, + "train_speed(iter/s)": 0.456893 + }, + { + "epoch": 2.13728, + "grad_norm": 0.6253921735499807, + "learning_rate": 1.047855742546679e-06, + "loss": 0.25687628984451294, + "memory(GiB)": 77.0, + "step": 6679, + "token_acc": 0.9239635620003718, + "train_speed(iter/s)": 0.45683 + }, + { + "epoch": 2.1376, + "grad_norm": 0.7024534563530789, + "learning_rate": 1.0471379818833735e-06, + "loss": 0.28913795948028564, + "memory(GiB)": 77.0, + "step": 6680, + "token_acc": 0.8775841102553709, + "train_speed(iter/s)": 0.456766 + }, + { + "epoch": 2.13792, + "grad_norm": 0.7527968001258176, + "learning_rate": 1.0464204020035427e-06, + "loss": 0.31916889548301697, + "memory(GiB)": 77.0, + "step": 6681, + "token_acc": 0.8805547693178601, + "train_speed(iter/s)": 0.456702 + }, + { + "epoch": 2.13824, + "grad_norm": 0.7136166047516678, + "learning_rate": 1.045703002996478e-06, + "loss": 0.31588122248649597, + "memory(GiB)": 77.0, + "step": 6682, + "token_acc": 0.9562064965197216, + "train_speed(iter/s)": 0.456611 + }, + { + "epoch": 2.13856, + "grad_norm": 0.7109607548222571, + "learning_rate": 1.0449857849514453e-06, + "loss": 0.37056541442871094, + "memory(GiB)": 77.0, + "step": 6683, + "token_acc": 0.9008751562779068, + "train_speed(iter/s)": 0.456546 + }, + { + "epoch": 2.13888, + "grad_norm": 0.6409731569934344, + "learning_rate": 1.044268747957691e-06, + "loss": 0.28933507204055786, + "memory(GiB)": 77.0, + "step": 6684, + "token_acc": 0.9231846369273855, + "train_speed(iter/s)": 0.456482 + }, + { + "epoch": 2.1391999999999998, + "grad_norm": 0.6951660102156328, + "learning_rate": 1.043551892104437e-06, + "loss": 0.3252728581428528, + "memory(GiB)": 77.0, + "step": 6685, + "token_acc": 0.9157427937915743, + "train_speed(iter/s)": 0.45642 + }, + { + "epoch": 2.13952, + "grad_norm": 0.7140468416454946, + "learning_rate": 1.0428352174808853e-06, + "loss": 0.29098546504974365, + "memory(GiB)": 77.0, + "step": 6686, + "token_acc": 0.9232824427480916, + "train_speed(iter/s)": 0.456367 + }, + { + "epoch": 2.13984, + "grad_norm": 0.6926624513617157, + "learning_rate": 1.0421187241762128e-06, + "loss": 0.28009310364723206, + "memory(GiB)": 77.0, + "step": 6687, + "token_acc": 0.9011693700490381, + "train_speed(iter/s)": 0.456309 + }, + { + "epoch": 2.14016, + "grad_norm": 0.7821661098776064, + "learning_rate": 1.0414024122795746e-06, + "loss": 0.2915915846824646, + "memory(GiB)": 77.0, + "step": 6688, + "token_acc": 0.9431245500359972, + "train_speed(iter/s)": 0.456255 + }, + { + "epoch": 2.14048, + "grad_norm": 0.6839343597034315, + "learning_rate": 1.0406862818801034e-06, + "loss": 0.28137582540512085, + "memory(GiB)": 77.0, + "step": 6689, + "token_acc": 0.9151910531220876, + "train_speed(iter/s)": 0.45619 + }, + { + "epoch": 2.1408, + "grad_norm": 0.6651222697726143, + "learning_rate": 1.0399703330669089e-06, + "loss": 0.2746019959449768, + "memory(GiB)": 77.0, + "step": 6690, + "token_acc": 0.9144847465769878, + "train_speed(iter/s)": 0.456125 + }, + { + "epoch": 2.14112, + "grad_norm": 0.7347018444012606, + "learning_rate": 1.0392545659290789e-06, + "loss": 0.2843945324420929, + "memory(GiB)": 77.0, + "step": 6691, + "token_acc": 0.945730247406225, + "train_speed(iter/s)": 0.456065 + }, + { + "epoch": 2.1414400000000002, + "grad_norm": 0.7143463632816579, + "learning_rate": 1.0385389805556778e-06, + "loss": 0.23226886987686157, + "memory(GiB)": 77.0, + "step": 6692, + "token_acc": 0.9531854980640619, + "train_speed(iter/s)": 0.456011 + }, + { + "epoch": 2.14176, + "grad_norm": 0.732199517799932, + "learning_rate": 1.0378235770357483e-06, + "loss": 0.3052058517932892, + "memory(GiB)": 77.0, + "step": 6693, + "token_acc": 0.9095041322314049, + "train_speed(iter/s)": 0.45595 + }, + { + "epoch": 2.14208, + "grad_norm": 0.7326343231087474, + "learning_rate": 1.0371083554583095e-06, + "loss": 0.3181288242340088, + "memory(GiB)": 77.0, + "step": 6694, + "token_acc": 0.8669932177844762, + "train_speed(iter/s)": 0.455897 + }, + { + "epoch": 2.1424, + "grad_norm": 0.6819549248275228, + "learning_rate": 1.0363933159123584e-06, + "loss": 0.324481725692749, + "memory(GiB)": 77.0, + "step": 6695, + "token_acc": 0.948051948051948, + "train_speed(iter/s)": 0.455834 + }, + { + "epoch": 2.14272, + "grad_norm": 0.7013286863606711, + "learning_rate": 1.0356784584868695e-06, + "loss": 0.30825740098953247, + "memory(GiB)": 77.0, + "step": 6696, + "token_acc": 0.8982988165680473, + "train_speed(iter/s)": 0.455774 + }, + { + "epoch": 2.14304, + "grad_norm": 0.6966400727300895, + "learning_rate": 1.0349637832707954e-06, + "loss": 0.31722402572631836, + "memory(GiB)": 77.0, + "step": 6697, + "token_acc": 0.9028239202657807, + "train_speed(iter/s)": 0.455717 + }, + { + "epoch": 2.14336, + "grad_norm": 0.7599232833762842, + "learning_rate": 1.0342492903530627e-06, + "loss": 0.32604116201400757, + "memory(GiB)": 77.0, + "step": 6698, + "token_acc": 0.9330677290836653, + "train_speed(iter/s)": 0.45566 + }, + { + "epoch": 2.14368, + "grad_norm": 0.706198018883697, + "learning_rate": 1.0335349798225788e-06, + "loss": 0.39907968044281006, + "memory(GiB)": 77.0, + "step": 6699, + "token_acc": 0.8252411040904556, + "train_speed(iter/s)": 0.4556 + }, + { + "epoch": 2.144, + "grad_norm": 0.7105912780335977, + "learning_rate": 1.0328208517682274e-06, + "loss": 0.28863525390625, + "memory(GiB)": 77.0, + "step": 6700, + "token_acc": 0.9414201183431953, + "train_speed(iter/s)": 0.455545 + }, + { + "epoch": 2.14432, + "grad_norm": 0.7313862307623635, + "learning_rate": 1.0321069062788692e-06, + "loss": 0.3891780376434326, + "memory(GiB)": 77.0, + "step": 6701, + "token_acc": 0.9158259773013872, + "train_speed(iter/s)": 0.45549 + }, + { + "epoch": 2.14464, + "grad_norm": 0.8066399781293769, + "learning_rate": 1.0313931434433413e-06, + "loss": 0.26467519998550415, + "memory(GiB)": 77.0, + "step": 6702, + "token_acc": 0.9175858480749219, + "train_speed(iter/s)": 0.455422 + }, + { + "epoch": 2.14496, + "grad_norm": 0.6863610453250224, + "learning_rate": 1.0306795633504612e-06, + "loss": 0.3751182556152344, + "memory(GiB)": 77.0, + "step": 6703, + "token_acc": 0.855465884079237, + "train_speed(iter/s)": 0.455363 + }, + { + "epoch": 2.14528, + "grad_norm": 0.6968958620074822, + "learning_rate": 1.0299661660890206e-06, + "loss": 0.3414996564388275, + "memory(GiB)": 77.0, + "step": 6704, + "token_acc": 0.8973412112259971, + "train_speed(iter/s)": 0.455304 + }, + { + "epoch": 2.1456, + "grad_norm": 0.7005322766330426, + "learning_rate": 1.0292529517477894e-06, + "loss": 0.27409183979034424, + "memory(GiB)": 77.0, + "step": 6705, + "token_acc": 0.962356067316209, + "train_speed(iter/s)": 0.455248 + }, + { + "epoch": 2.14592, + "grad_norm": 0.7869585724993428, + "learning_rate": 1.0285399204155147e-06, + "loss": 0.34282028675079346, + "memory(GiB)": 77.0, + "step": 6706, + "token_acc": 0.8762319504927802, + "train_speed(iter/s)": 0.455188 + }, + { + "epoch": 2.14624, + "grad_norm": 0.7172652192113478, + "learning_rate": 1.0278270721809208e-06, + "loss": 0.38835200667381287, + "memory(GiB)": 77.0, + "step": 6707, + "token_acc": 0.9120879120879121, + "train_speed(iter/s)": 0.455131 + }, + { + "epoch": 2.14656, + "grad_norm": 0.7581605817787677, + "learning_rate": 1.0271144071327095e-06, + "loss": 0.3121344745159149, + "memory(GiB)": 77.0, + "step": 6708, + "token_acc": 0.9487341772151898, + "train_speed(iter/s)": 0.455078 + }, + { + "epoch": 2.14688, + "grad_norm": 0.7364127381860669, + "learning_rate": 1.0264019253595595e-06, + "loss": 0.27185678482055664, + "memory(GiB)": 77.0, + "step": 6709, + "token_acc": 0.9087635054021609, + "train_speed(iter/s)": 0.455009 + }, + { + "epoch": 2.1471999999999998, + "grad_norm": 0.6995227016809047, + "learning_rate": 1.0256896269501264e-06, + "loss": 0.332319438457489, + "memory(GiB)": 77.0, + "step": 6710, + "token_acc": 0.8968464485705865, + "train_speed(iter/s)": 0.454943 + }, + { + "epoch": 2.14752, + "grad_norm": 0.7492870474445739, + "learning_rate": 1.0249775119930453e-06, + "loss": 0.3329814672470093, + "memory(GiB)": 77.0, + "step": 6711, + "token_acc": 0.8341389728096676, + "train_speed(iter/s)": 0.454889 + }, + { + "epoch": 2.14784, + "grad_norm": 0.6777817115716694, + "learning_rate": 1.0242655805769236e-06, + "loss": 0.29245492815971375, + "memory(GiB)": 77.0, + "step": 6712, + "token_acc": 0.8491029272898961, + "train_speed(iter/s)": 0.454826 + }, + { + "epoch": 2.14816, + "grad_norm": 0.7292964826419451, + "learning_rate": 1.0235538327903504e-06, + "loss": 0.3514801561832428, + "memory(GiB)": 77.0, + "step": 6713, + "token_acc": 0.9538063562453807, + "train_speed(iter/s)": 0.454764 + }, + { + "epoch": 2.14848, + "grad_norm": 0.785687274901947, + "learning_rate": 1.0228422687218904e-06, + "loss": 0.33750075101852417, + "memory(GiB)": 77.0, + "step": 6714, + "token_acc": 0.9298569450853715, + "train_speed(iter/s)": 0.454693 + }, + { + "epoch": 2.1488, + "grad_norm": 0.7747052476969919, + "learning_rate": 1.0221308884600847e-06, + "loss": 0.3130114674568176, + "memory(GiB)": 77.0, + "step": 6715, + "token_acc": 0.8325227963525836, + "train_speed(iter/s)": 0.454639 + }, + { + "epoch": 2.14912, + "grad_norm": 0.7320513180402347, + "learning_rate": 1.0214196920934533e-06, + "loss": 0.37537312507629395, + "memory(GiB)": 77.0, + "step": 6716, + "token_acc": 0.8734550820881756, + "train_speed(iter/s)": 0.454572 + }, + { + "epoch": 2.14944, + "grad_norm": 0.7234068920592863, + "learning_rate": 1.0207086797104913e-06, + "loss": 0.3455793857574463, + "memory(GiB)": 77.0, + "step": 6717, + "token_acc": 0.8908967698215495, + "train_speed(iter/s)": 0.454507 + }, + { + "epoch": 2.14976, + "grad_norm": 0.6687215389167182, + "learning_rate": 1.0199978513996713e-06, + "loss": 0.3470308780670166, + "memory(GiB)": 77.0, + "step": 6718, + "token_acc": 0.8830188679245283, + "train_speed(iter/s)": 0.454449 + }, + { + "epoch": 2.15008, + "grad_norm": 0.6927335450032617, + "learning_rate": 1.019287207249446e-06, + "loss": 0.2606683373451233, + "memory(GiB)": 77.0, + "step": 6719, + "token_acc": 0.8909323116219668, + "train_speed(iter/s)": 0.454395 + }, + { + "epoch": 2.1504, + "grad_norm": 0.6753574747638694, + "learning_rate": 1.0185767473482406e-06, + "loss": 0.24635997414588928, + "memory(GiB)": 77.0, + "step": 6720, + "token_acc": 0.9713949843260188, + "train_speed(iter/s)": 0.454335 + }, + { + "epoch": 2.15072, + "grad_norm": 0.7307479019651878, + "learning_rate": 1.0178664717844601e-06, + "loss": 0.32588547468185425, + "memory(GiB)": 77.0, + "step": 6721, + "token_acc": 0.9284134881149807, + "train_speed(iter/s)": 0.454279 + }, + { + "epoch": 2.15104, + "grad_norm": 0.6415695172048863, + "learning_rate": 1.0171563806464862e-06, + "loss": 0.33122023940086365, + "memory(GiB)": 77.0, + "step": 6722, + "token_acc": 0.9086214638527167, + "train_speed(iter/s)": 0.454223 + }, + { + "epoch": 2.15136, + "grad_norm": 0.7583809773455067, + "learning_rate": 1.0164464740226773e-06, + "loss": 0.3597433567047119, + "memory(GiB)": 77.0, + "step": 6723, + "token_acc": 0.8707550280250577, + "train_speed(iter/s)": 0.454167 + }, + { + "epoch": 2.15168, + "grad_norm": 0.7277713253595829, + "learning_rate": 1.015736752001369e-06, + "loss": 0.3742882311344147, + "memory(GiB)": 77.0, + "step": 6724, + "token_acc": 0.9356748224151539, + "train_speed(iter/s)": 0.454104 + }, + { + "epoch": 2.152, + "grad_norm": 1.0439628299492865, + "learning_rate": 1.0150272146708745e-06, + "loss": 0.29539692401885986, + "memory(GiB)": 77.0, + "step": 6725, + "token_acc": 0.8774973711882229, + "train_speed(iter/s)": 0.454051 + }, + { + "epoch": 2.15232, + "grad_norm": 0.7905693713159294, + "learning_rate": 1.0143178621194818e-06, + "loss": 0.33889105916023254, + "memory(GiB)": 77.0, + "step": 6726, + "token_acc": 0.9001151410477836, + "train_speed(iter/s)": 0.453986 + }, + { + "epoch": 2.15264, + "grad_norm": 0.6499632727272052, + "learning_rate": 1.0136086944354587e-06, + "loss": 0.3307637572288513, + "memory(GiB)": 77.0, + "step": 6727, + "token_acc": 0.9207692307692308, + "train_speed(iter/s)": 0.45393 + }, + { + "epoch": 2.15296, + "grad_norm": 0.7371194734424515, + "learning_rate": 1.0128997117070482e-06, + "loss": 0.32914412021636963, + "memory(GiB)": 77.0, + "step": 6728, + "token_acc": 0.8812949640287769, + "train_speed(iter/s)": 0.453868 + }, + { + "epoch": 2.15328, + "grad_norm": 0.7616667169317749, + "learning_rate": 1.0121909140224715e-06, + "loss": 0.2608674466609955, + "memory(GiB)": 77.0, + "step": 6729, + "token_acc": 0.8827686175031708, + "train_speed(iter/s)": 0.453814 + }, + { + "epoch": 2.1536, + "grad_norm": 0.6926196103553619, + "learning_rate": 1.0114823014699257e-06, + "loss": 0.34735751152038574, + "memory(GiB)": 77.0, + "step": 6730, + "token_acc": 0.8490524940100196, + "train_speed(iter/s)": 0.453752 + }, + { + "epoch": 2.15392, + "grad_norm": 0.6988844998540007, + "learning_rate": 1.010773874137586e-06, + "loss": 0.33066731691360474, + "memory(GiB)": 77.0, + "step": 6731, + "token_acc": 0.8659663865546219, + "train_speed(iter/s)": 0.453692 + }, + { + "epoch": 2.15424, + "grad_norm": 0.7678791253488538, + "learning_rate": 1.010065632113603e-06, + "loss": 0.32382312417030334, + "memory(GiB)": 77.0, + "step": 6732, + "token_acc": 0.9079487179487179, + "train_speed(iter/s)": 0.453634 + }, + { + "epoch": 2.15456, + "grad_norm": 0.7267801614715234, + "learning_rate": 1.0093575754861056e-06, + "loss": 0.3332335948944092, + "memory(GiB)": 77.0, + "step": 6733, + "token_acc": 0.854153041203401, + "train_speed(iter/s)": 0.453576 + }, + { + "epoch": 2.15488, + "grad_norm": 0.736147054403647, + "learning_rate": 1.0086497043431992e-06, + "loss": 0.30077695846557617, + "memory(GiB)": 77.0, + "step": 6734, + "token_acc": 0.8959160521837777, + "train_speed(iter/s)": 0.453515 + }, + { + "epoch": 2.1552, + "grad_norm": 0.6994378202827295, + "learning_rate": 1.0079420187729653e-06, + "loss": 0.3138714134693146, + "memory(GiB)": 77.0, + "step": 6735, + "token_acc": 0.9204161979752531, + "train_speed(iter/s)": 0.453455 + }, + { + "epoch": 2.15552, + "grad_norm": 0.6989068639453357, + "learning_rate": 1.0072345188634647e-06, + "loss": 0.3689345121383667, + "memory(GiB)": 77.0, + "step": 6736, + "token_acc": 0.8706377858002406, + "train_speed(iter/s)": 0.453392 + }, + { + "epoch": 2.15584, + "grad_norm": 0.7213539069873223, + "learning_rate": 1.0065272047027324e-06, + "loss": 0.25739312171936035, + "memory(GiB)": 77.0, + "step": 6737, + "token_acc": 0.9134723788049606, + "train_speed(iter/s)": 0.453338 + }, + { + "epoch": 2.15616, + "grad_norm": 0.7049149018151064, + "learning_rate": 1.0058200763787819e-06, + "loss": 0.31911033391952515, + "memory(GiB)": 77.0, + "step": 6738, + "token_acc": 0.905118300338001, + "train_speed(iter/s)": 0.453275 + }, + { + "epoch": 2.15648, + "grad_norm": 0.6671725977261805, + "learning_rate": 1.0051131339796025e-06, + "loss": 0.2630133628845215, + "memory(GiB)": 77.0, + "step": 6739, + "token_acc": 0.866364270760642, + "train_speed(iter/s)": 0.45322 + }, + { + "epoch": 2.1568, + "grad_norm": 0.7887706716249029, + "learning_rate": 1.004406377593162e-06, + "loss": 0.3008832335472107, + "memory(GiB)": 77.0, + "step": 6740, + "token_acc": 0.8885379061371841, + "train_speed(iter/s)": 0.453167 + }, + { + "epoch": 2.15712, + "grad_norm": 0.7523375168792586, + "learning_rate": 1.0036998073074025e-06, + "loss": 0.3283740282058716, + "memory(GiB)": 77.0, + "step": 6741, + "token_acc": 0.8762987012987012, + "train_speed(iter/s)": 0.453109 + }, + { + "epoch": 2.15744, + "grad_norm": 0.7631443613883929, + "learning_rate": 1.0029934232102447e-06, + "loss": 0.39837461709976196, + "memory(GiB)": 77.0, + "step": 6742, + "token_acc": 0.9046454767726161, + "train_speed(iter/s)": 0.453054 + }, + { + "epoch": 2.15776, + "grad_norm": 0.7188624931704709, + "learning_rate": 1.0022872253895865e-06, + "loss": 0.313676118850708, + "memory(GiB)": 77.0, + "step": 6743, + "token_acc": 0.9377049180327869, + "train_speed(iter/s)": 0.453002 + }, + { + "epoch": 2.15808, + "grad_norm": 0.6861924646558316, + "learning_rate": 1.0015812139333018e-06, + "loss": 0.33414584398269653, + "memory(GiB)": 77.0, + "step": 6744, + "token_acc": 0.9054662379421222, + "train_speed(iter/s)": 0.452934 + }, + { + "epoch": 2.1584, + "grad_norm": 0.6676789579068666, + "learning_rate": 1.000875388929241e-06, + "loss": 0.38006749749183655, + "memory(GiB)": 77.0, + "step": 6745, + "token_acc": 0.8513853904282116, + "train_speed(iter/s)": 0.45287 + }, + { + "epoch": 2.15872, + "grad_norm": 0.7614010651279783, + "learning_rate": 1.0001697504652321e-06, + "loss": 0.24766966700553894, + "memory(GiB)": 77.0, + "step": 6746, + "token_acc": 0.9272943980929678, + "train_speed(iter/s)": 0.452818 + }, + { + "epoch": 2.15904, + "grad_norm": 0.7596470200258818, + "learning_rate": 9.994642986290797e-07, + "loss": 0.27257224917411804, + "memory(GiB)": 77.0, + "step": 6747, + "token_acc": 0.9569154774972558, + "train_speed(iter/s)": 0.452761 + }, + { + "epoch": 2.15936, + "grad_norm": 0.67576385768785, + "learning_rate": 9.987590335085654e-07, + "loss": 0.36414986848831177, + "memory(GiB)": 77.0, + "step": 6748, + "token_acc": 0.8528535411414164, + "train_speed(iter/s)": 0.452693 + }, + { + "epoch": 2.15968, + "grad_norm": 0.7500034948810164, + "learning_rate": 9.980539551914461e-07, + "loss": 0.30905723571777344, + "memory(GiB)": 77.0, + "step": 6749, + "token_acc": 0.9715817694369974, + "train_speed(iter/s)": 0.452639 + }, + { + "epoch": 2.16, + "grad_norm": 0.7122391518239832, + "learning_rate": 9.973490637654575e-07, + "loss": 0.3015301823616028, + "memory(GiB)": 77.0, + "step": 6750, + "token_acc": 0.9571428571428572, + "train_speed(iter/s)": 0.452586 + }, + { + "epoch": 2.16032, + "grad_norm": 0.657481490941943, + "learning_rate": 9.966443593183105e-07, + "loss": 0.24077066779136658, + "memory(GiB)": 77.0, + "step": 6751, + "token_acc": 0.9610591900311527, + "train_speed(iter/s)": 0.45253 + }, + { + "epoch": 2.16064, + "grad_norm": 0.7716617538081569, + "learning_rate": 9.95939841937693e-07, + "loss": 0.35151076316833496, + "memory(GiB)": 77.0, + "step": 6752, + "token_acc": 0.8947888367498148, + "train_speed(iter/s)": 0.452472 + }, + { + "epoch": 2.16096, + "grad_norm": 0.7602628605051651, + "learning_rate": 9.952355117112715e-07, + "loss": 0.24833643436431885, + "memory(GiB)": 77.0, + "step": 6753, + "token_acc": 0.9573189059212504, + "train_speed(iter/s)": 0.452416 + }, + { + "epoch": 2.16128, + "grad_norm": 0.7450462157874026, + "learning_rate": 9.94531368726688e-07, + "loss": 0.33980461955070496, + "memory(GiB)": 77.0, + "step": 6754, + "token_acc": 0.8169304886441845, + "train_speed(iter/s)": 0.452356 + }, + { + "epoch": 2.1616, + "grad_norm": 0.7156122674217446, + "learning_rate": 9.938274130715585e-07, + "loss": 0.3051651418209076, + "memory(GiB)": 77.0, + "step": 6755, + "token_acc": 0.9005160550458715, + "train_speed(iter/s)": 0.452295 + }, + { + "epoch": 2.16192, + "grad_norm": 0.8262020020796292, + "learning_rate": 9.931236448334794e-07, + "loss": 0.2733903229236603, + "memory(GiB)": 77.0, + "step": 6756, + "token_acc": 0.9082813891362422, + "train_speed(iter/s)": 0.45224 + }, + { + "epoch": 2.16224, + "grad_norm": 0.749710779557655, + "learning_rate": 9.92420064100022e-07, + "loss": 0.33805403113365173, + "memory(GiB)": 77.0, + "step": 6757, + "token_acc": 0.9275229357798165, + "train_speed(iter/s)": 0.452186 + }, + { + "epoch": 2.16256, + "grad_norm": 0.7685117954125226, + "learning_rate": 9.917166709587353e-07, + "loss": 0.3344612717628479, + "memory(GiB)": 77.0, + "step": 6758, + "token_acc": 0.8791208791208791, + "train_speed(iter/s)": 0.452133 + }, + { + "epoch": 2.16288, + "grad_norm": 0.6921029700680886, + "learning_rate": 9.91013465497144e-07, + "loss": 0.22251951694488525, + "memory(GiB)": 77.0, + "step": 6759, + "token_acc": 0.9493769470404985, + "train_speed(iter/s)": 0.45208 + }, + { + "epoch": 2.1632, + "grad_norm": 0.644903150917239, + "learning_rate": 9.903104478027498e-07, + "loss": 0.33846691250801086, + "memory(GiB)": 77.0, + "step": 6760, + "token_acc": 0.904289034439045, + "train_speed(iter/s)": 0.45201 + }, + { + "epoch": 2.16352, + "grad_norm": 0.6805584865000845, + "learning_rate": 9.896076179630312e-07, + "loss": 0.2758142352104187, + "memory(GiB)": 77.0, + "step": 6761, + "token_acc": 0.9430262671106179, + "train_speed(iter/s)": 0.45196 + }, + { + "epoch": 2.16384, + "grad_norm": 0.6650751678681478, + "learning_rate": 9.889049760654435e-07, + "loss": 0.2624501585960388, + "memory(GiB)": 77.0, + "step": 6762, + "token_acc": 0.8675777568331763, + "train_speed(iter/s)": 0.451899 + }, + { + "epoch": 2.16416, + "grad_norm": 0.7937336725874494, + "learning_rate": 9.882025221974175e-07, + "loss": 0.2948540449142456, + "memory(GiB)": 77.0, + "step": 6763, + "token_acc": 0.8793843951324266, + "train_speed(iter/s)": 0.451848 + }, + { + "epoch": 2.16448, + "grad_norm": 0.7023737512084346, + "learning_rate": 9.87500256446362e-07, + "loss": 0.28241071105003357, + "memory(GiB)": 77.0, + "step": 6764, + "token_acc": 0.9608843537414966, + "train_speed(iter/s)": 0.451795 + }, + { + "epoch": 2.1648, + "grad_norm": 0.7031496364091804, + "learning_rate": 9.867981788996616e-07, + "loss": 0.3103916645050049, + "memory(GiB)": 77.0, + "step": 6765, + "token_acc": 0.8879358567965691, + "train_speed(iter/s)": 0.451739 + }, + { + "epoch": 2.16512, + "grad_norm": 0.7249458321410698, + "learning_rate": 9.86096289644678e-07, + "loss": 0.34202346205711365, + "memory(GiB)": 77.0, + "step": 6766, + "token_acc": 0.8613057912209516, + "train_speed(iter/s)": 0.451678 + }, + { + "epoch": 2.16544, + "grad_norm": 0.7112983901141117, + "learning_rate": 9.853945887687484e-07, + "loss": 0.3014904260635376, + "memory(GiB)": 77.0, + "step": 6767, + "token_acc": 0.936485532815808, + "train_speed(iter/s)": 0.451627 + }, + { + "epoch": 2.16576, + "grad_norm": 0.7128995338565955, + "learning_rate": 9.84693076359188e-07, + "loss": 0.2225891798734665, + "memory(GiB)": 77.0, + "step": 6768, + "token_acc": 0.9752611324903794, + "train_speed(iter/s)": 0.451562 + }, + { + "epoch": 2.16608, + "grad_norm": 0.7324832798752278, + "learning_rate": 9.839917525032877e-07, + "loss": 0.3075079917907715, + "memory(GiB)": 77.0, + "step": 6769, + "token_acc": 0.8812842210054753, + "train_speed(iter/s)": 0.451503 + }, + { + "epoch": 2.1664, + "grad_norm": 0.7143171810510663, + "learning_rate": 9.83290617288315e-07, + "loss": 0.26804760098457336, + "memory(GiB)": 77.0, + "step": 6770, + "token_acc": 0.8729201331114809, + "train_speed(iter/s)": 0.451446 + }, + { + "epoch": 2.16672, + "grad_norm": 0.7199420917437082, + "learning_rate": 9.825896708015137e-07, + "loss": 0.3443790078163147, + "memory(GiB)": 77.0, + "step": 6771, + "token_acc": 0.8520655005582434, + "train_speed(iter/s)": 0.451391 + }, + { + "epoch": 2.16704, + "grad_norm": 0.6551822827000364, + "learning_rate": 9.818889131301049e-07, + "loss": 0.26192376017570496, + "memory(GiB)": 77.0, + "step": 6772, + "token_acc": 0.9714918350401328, + "train_speed(iter/s)": 0.451336 + }, + { + "epoch": 2.16736, + "grad_norm": 0.7312851097771278, + "learning_rate": 9.811883443612855e-07, + "loss": 0.31506237387657166, + "memory(GiB)": 77.0, + "step": 6773, + "token_acc": 0.893900563813429, + "train_speed(iter/s)": 0.451285 + }, + { + "epoch": 2.16768, + "grad_norm": 0.6787704903107633, + "learning_rate": 9.804879645822294e-07, + "loss": 0.2638581395149231, + "memory(GiB)": 77.0, + "step": 6774, + "token_acc": 0.9395073277206112, + "train_speed(iter/s)": 0.451221 + }, + { + "epoch": 2.168, + "grad_norm": 0.6950132109078988, + "learning_rate": 9.797877738800862e-07, + "loss": 0.27462542057037354, + "memory(GiB)": 77.0, + "step": 6775, + "token_acc": 0.9190567240280434, + "train_speed(iter/s)": 0.45117 + }, + { + "epoch": 2.16832, + "grad_norm": 0.6729284701690808, + "learning_rate": 9.790877723419832e-07, + "loss": 0.2916317284107208, + "memory(GiB)": 77.0, + "step": 6776, + "token_acc": 0.9486166007905138, + "train_speed(iter/s)": 0.451118 + }, + { + "epoch": 2.16864, + "grad_norm": 0.6862848809192855, + "learning_rate": 9.783879600550228e-07, + "loss": 0.24978429079055786, + "memory(GiB)": 77.0, + "step": 6777, + "token_acc": 0.9211126310989513, + "train_speed(iter/s)": 0.451059 + }, + { + "epoch": 2.16896, + "grad_norm": 0.736333306719917, + "learning_rate": 9.776883371062848e-07, + "loss": 0.28171277046203613, + "memory(GiB)": 77.0, + "step": 6778, + "token_acc": 0.9219165927240461, + "train_speed(iter/s)": 0.451005 + }, + { + "epoch": 2.16928, + "grad_norm": 0.8031584744563043, + "learning_rate": 9.76988903582825e-07, + "loss": 0.3460717797279358, + "memory(GiB)": 77.0, + "step": 6779, + "token_acc": 0.8242781155015197, + "train_speed(iter/s)": 0.450946 + }, + { + "epoch": 2.1696, + "grad_norm": 0.6394158835296005, + "learning_rate": 9.762896595716758e-07, + "loss": 0.32245200872421265, + "memory(GiB)": 77.0, + "step": 6780, + "token_acc": 0.9086986537797722, + "train_speed(iter/s)": 0.450892 + }, + { + "epoch": 2.16992, + "grad_norm": 0.6861709912925823, + "learning_rate": 9.755906051598463e-07, + "loss": 0.25502070784568787, + "memory(GiB)": 77.0, + "step": 6781, + "token_acc": 0.9552824267782427, + "train_speed(iter/s)": 0.450833 + }, + { + "epoch": 2.17024, + "grad_norm": 0.7613415589100561, + "learning_rate": 9.748917404343213e-07, + "loss": 0.2993633449077606, + "memory(GiB)": 77.0, + "step": 6782, + "token_acc": 0.8947123223852783, + "train_speed(iter/s)": 0.450775 + }, + { + "epoch": 2.17056, + "grad_norm": 0.7310963161467543, + "learning_rate": 9.741930654820624e-07, + "loss": 0.3046858012676239, + "memory(GiB)": 77.0, + "step": 6783, + "token_acc": 0.9716285924834193, + "train_speed(iter/s)": 0.450722 + }, + { + "epoch": 2.17088, + "grad_norm": 0.6603312524828877, + "learning_rate": 9.734945803900078e-07, + "loss": 0.27684980630874634, + "memory(GiB)": 77.0, + "step": 6784, + "token_acc": 0.886515697375193, + "train_speed(iter/s)": 0.450669 + }, + { + "epoch": 2.1712, + "grad_norm": 0.7347022407335051, + "learning_rate": 9.72796285245072e-07, + "loss": 0.35624879598617554, + "memory(GiB)": 77.0, + "step": 6785, + "token_acc": 0.8962678375411636, + "train_speed(iter/s)": 0.450605 + }, + { + "epoch": 2.17152, + "grad_norm": 0.7587258422375754, + "learning_rate": 9.720981801341454e-07, + "loss": 0.39405927062034607, + "memory(GiB)": 77.0, + "step": 6786, + "token_acc": 0.8968609865470852, + "train_speed(iter/s)": 0.450549 + }, + { + "epoch": 2.17184, + "grad_norm": 0.7176528963604838, + "learning_rate": 9.714002651440948e-07, + "loss": 0.33590027689933777, + "memory(GiB)": 77.0, + "step": 6787, + "token_acc": 0.8421889914094813, + "train_speed(iter/s)": 0.450495 + }, + { + "epoch": 2.17216, + "grad_norm": 0.7063634746643657, + "learning_rate": 9.707025403617642e-07, + "loss": 0.2653755843639374, + "memory(GiB)": 77.0, + "step": 6788, + "token_acc": 0.9672645739910314, + "train_speed(iter/s)": 0.450439 + }, + { + "epoch": 2.17248, + "grad_norm": 0.8342673769658251, + "learning_rate": 9.700050058739731e-07, + "loss": 0.263800710439682, + "memory(GiB)": 77.0, + "step": 6789, + "token_acc": 0.9436363636363636, + "train_speed(iter/s)": 0.45039 + }, + { + "epoch": 2.1728, + "grad_norm": 0.6660076943757482, + "learning_rate": 9.693076617675177e-07, + "loss": 0.2951371669769287, + "memory(GiB)": 77.0, + "step": 6790, + "token_acc": 0.897822445561139, + "train_speed(iter/s)": 0.450333 + }, + { + "epoch": 2.17312, + "grad_norm": 0.719871219957531, + "learning_rate": 9.6861050812917e-07, + "loss": 0.38446617126464844, + "memory(GiB)": 77.0, + "step": 6791, + "token_acc": 0.8656042496679947, + "train_speed(iter/s)": 0.450273 + }, + { + "epoch": 2.17344, + "grad_norm": 0.6372435019112732, + "learning_rate": 9.67913545045679e-07, + "loss": 0.2027004510164261, + "memory(GiB)": 77.0, + "step": 6792, + "token_acc": 0.9573552425665102, + "train_speed(iter/s)": 0.45022 + }, + { + "epoch": 2.17376, + "grad_norm": 0.7074646501973697, + "learning_rate": 9.672167726037696e-07, + "loss": 0.28152191638946533, + "memory(GiB)": 77.0, + "step": 6793, + "token_acc": 0.9341317365269461, + "train_speed(iter/s)": 0.450168 + }, + { + "epoch": 2.17408, + "grad_norm": 0.6339061299330198, + "learning_rate": 9.66520190890143e-07, + "loss": 0.2173091471195221, + "memory(GiB)": 77.0, + "step": 6794, + "token_acc": 0.9657439446366782, + "train_speed(iter/s)": 0.450107 + }, + { + "epoch": 2.1744, + "grad_norm": 0.710439296106914, + "learning_rate": 9.658237999914768e-07, + "loss": 0.2662120759487152, + "memory(GiB)": 77.0, + "step": 6795, + "token_acc": 0.931986704167732, + "train_speed(iter/s)": 0.450055 + }, + { + "epoch": 2.1747199999999998, + "grad_norm": 0.7132445625880782, + "learning_rate": 9.651275999944249e-07, + "loss": 0.2954667806625366, + "memory(GiB)": 77.0, + "step": 6796, + "token_acc": 0.8088467614533965, + "train_speed(iter/s)": 0.450002 + }, + { + "epoch": 2.17504, + "grad_norm": 0.7895263910233518, + "learning_rate": 9.644315909856178e-07, + "loss": 0.2987993061542511, + "memory(GiB)": 77.0, + "step": 6797, + "token_acc": 0.8432089265101962, + "train_speed(iter/s)": 0.449917 + }, + { + "epoch": 2.17536, + "grad_norm": 0.7775065408827799, + "learning_rate": 9.63735773051659e-07, + "loss": 0.2776971459388733, + "memory(GiB)": 77.0, + "step": 6798, + "token_acc": 0.9587995930824008, + "train_speed(iter/s)": 0.44986 + }, + { + "epoch": 2.17568, + "grad_norm": 0.73802313114322, + "learning_rate": 9.630401462791345e-07, + "loss": 0.2969047725200653, + "memory(GiB)": 77.0, + "step": 6799, + "token_acc": 0.9283933024581403, + "train_speed(iter/s)": 0.449803 + }, + { + "epoch": 2.176, + "grad_norm": 0.7647564115538631, + "learning_rate": 9.623447107546014e-07, + "loss": 0.364830881357193, + "memory(GiB)": 77.0, + "step": 6800, + "token_acc": 0.9078275666477595, + "train_speed(iter/s)": 0.449748 + }, + { + "epoch": 2.17632, + "grad_norm": 0.7452073178627405, + "learning_rate": 9.616494665645946e-07, + "loss": 0.269601047039032, + "memory(GiB)": 77.0, + "step": 6801, + "token_acc": 0.9277684092186622, + "train_speed(iter/s)": 0.449692 + }, + { + "epoch": 2.17664, + "grad_norm": 0.6739662496064458, + "learning_rate": 9.609544137956254e-07, + "loss": 0.34707123041152954, + "memory(GiB)": 77.0, + "step": 6802, + "token_acc": 0.9259863945578232, + "train_speed(iter/s)": 0.449623 + }, + { + "epoch": 2.1769600000000002, + "grad_norm": 0.66668793837141, + "learning_rate": 9.602595525341807e-07, + "loss": 0.2843116819858551, + "memory(GiB)": 77.0, + "step": 6803, + "token_acc": 0.9400056069526213, + "train_speed(iter/s)": 0.449566 + }, + { + "epoch": 2.17728, + "grad_norm": 0.6863245605510886, + "learning_rate": 9.595648828667243e-07, + "loss": 0.31895220279693604, + "memory(GiB)": 77.0, + "step": 6804, + "token_acc": 0.9288214702450408, + "train_speed(iter/s)": 0.449513 + }, + { + "epoch": 2.1776, + "grad_norm": 0.7114843169704976, + "learning_rate": 9.588704048796956e-07, + "loss": 0.2535121738910675, + "memory(GiB)": 77.0, + "step": 6805, + "token_acc": 0.9380714879467996, + "train_speed(iter/s)": 0.449457 + }, + { + "epoch": 2.17792, + "grad_norm": 0.7199949458866617, + "learning_rate": 9.581761186595106e-07, + "loss": 0.3160568177700043, + "memory(GiB)": 77.0, + "step": 6806, + "token_acc": 0.9186931957749939, + "train_speed(iter/s)": 0.4494 + }, + { + "epoch": 2.17824, + "grad_norm": 0.7762701049408044, + "learning_rate": 9.57482024292561e-07, + "loss": 0.2803371548652649, + "memory(GiB)": 77.0, + "step": 6807, + "token_acc": 0.9175137961080453, + "train_speed(iter/s)": 0.449352 + }, + { + "epoch": 2.17856, + "grad_norm": 0.671694422212943, + "learning_rate": 9.567881218652144e-07, + "loss": 0.25670093297958374, + "memory(GiB)": 77.0, + "step": 6808, + "token_acc": 0.9248232229251954, + "train_speed(iter/s)": 0.449296 + }, + { + "epoch": 2.17888, + "grad_norm": 0.8257198449602269, + "learning_rate": 9.560944114638158e-07, + "loss": 0.3807249665260315, + "memory(GiB)": 77.0, + "step": 6809, + "token_acc": 0.886994775914215, + "train_speed(iter/s)": 0.449245 + }, + { + "epoch": 2.1792, + "grad_norm": 0.6947974941863359, + "learning_rate": 9.554008931746846e-07, + "loss": 0.29270806908607483, + "memory(GiB)": 77.0, + "step": 6810, + "token_acc": 0.8381971465629053, + "train_speed(iter/s)": 0.449193 + }, + { + "epoch": 2.17952, + "grad_norm": 0.774196965406823, + "learning_rate": 9.547075670841186e-07, + "loss": 0.28679874539375305, + "memory(GiB)": 77.0, + "step": 6811, + "token_acc": 0.9017885158456228, + "train_speed(iter/s)": 0.449141 + }, + { + "epoch": 2.17984, + "grad_norm": 0.6578062483123737, + "learning_rate": 9.54014433278388e-07, + "loss": 0.3268728256225586, + "memory(GiB)": 77.0, + "step": 6812, + "token_acc": 0.928923426838514, + "train_speed(iter/s)": 0.449082 + }, + { + "epoch": 2.18016, + "grad_norm": 0.7685502814658642, + "learning_rate": 9.533214918437422e-07, + "loss": 0.3080410957336426, + "memory(GiB)": 77.0, + "step": 6813, + "token_acc": 0.9093412898783567, + "train_speed(iter/s)": 0.449029 + }, + { + "epoch": 2.18048, + "grad_norm": 0.7267414375957599, + "learning_rate": 9.52628742866406e-07, + "loss": 0.3087390959262848, + "memory(GiB)": 77.0, + "step": 6814, + "token_acc": 0.9306540583136328, + "train_speed(iter/s)": 0.448977 + }, + { + "epoch": 2.1808, + "grad_norm": 0.7188357598812782, + "learning_rate": 9.519361864325793e-07, + "loss": 0.24197810888290405, + "memory(GiB)": 77.0, + "step": 6815, + "token_acc": 0.9481183777858969, + "train_speed(iter/s)": 0.448927 + }, + { + "epoch": 2.18112, + "grad_norm": 0.7459081217260436, + "learning_rate": 9.512438226284398e-07, + "loss": 0.3278675079345703, + "memory(GiB)": 77.0, + "step": 6816, + "token_acc": 0.9683766690091357, + "train_speed(iter/s)": 0.448873 + }, + { + "epoch": 2.18144, + "grad_norm": 0.687270259824592, + "learning_rate": 9.505516515401397e-07, + "loss": 0.24328425526618958, + "memory(GiB)": 77.0, + "step": 6817, + "token_acc": 0.9039451114922813, + "train_speed(iter/s)": 0.448812 + }, + { + "epoch": 2.18176, + "grad_norm": 0.7964810723548338, + "learning_rate": 9.498596732538079e-07, + "loss": 0.30435559153556824, + "memory(GiB)": 77.0, + "step": 6818, + "token_acc": 0.8915226998794696, + "train_speed(iter/s)": 0.448755 + }, + { + "epoch": 2.18208, + "grad_norm": 0.6274319258878429, + "learning_rate": 9.491678878555488e-07, + "loss": 0.2511773705482483, + "memory(GiB)": 77.0, + "step": 6819, + "token_acc": 0.9270376356499653, + "train_speed(iter/s)": 0.448692 + }, + { + "epoch": 2.1824, + "grad_norm": 0.7614049501337539, + "learning_rate": 9.48476295431443e-07, + "loss": 0.3819582760334015, + "memory(GiB)": 77.0, + "step": 6820, + "token_acc": 0.8427827152926755, + "train_speed(iter/s)": 0.448634 + }, + { + "epoch": 2.1827199999999998, + "grad_norm": 0.7366321932023613, + "learning_rate": 9.477848960675473e-07, + "loss": 0.32010212540626526, + "memory(GiB)": 77.0, + "step": 6821, + "token_acc": 0.9041604754829123, + "train_speed(iter/s)": 0.44857 + }, + { + "epoch": 2.18304, + "grad_norm": 0.7287801324851895, + "learning_rate": 9.470936898498945e-07, + "loss": 0.3355240821838379, + "memory(GiB)": 77.0, + "step": 6822, + "token_acc": 0.9342795433508176, + "train_speed(iter/s)": 0.448519 + }, + { + "epoch": 2.18336, + "grad_norm": 0.8023225698390052, + "learning_rate": 9.46402676864493e-07, + "loss": 0.32868438959121704, + "memory(GiB)": 77.0, + "step": 6823, + "token_acc": 0.9326874043855176, + "train_speed(iter/s)": 0.448468 + }, + { + "epoch": 2.18368, + "grad_norm": 0.6698431369037572, + "learning_rate": 9.457118571973276e-07, + "loss": 0.3595837950706482, + "memory(GiB)": 77.0, + "step": 6824, + "token_acc": 0.9199749921850578, + "train_speed(iter/s)": 0.448409 + }, + { + "epoch": 2.184, + "grad_norm": 0.7313630252709485, + "learning_rate": 9.450212309343584e-07, + "loss": 0.30466893315315247, + "memory(GiB)": 77.0, + "step": 6825, + "token_acc": 0.8909010600706714, + "train_speed(iter/s)": 0.448347 + }, + { + "epoch": 2.18432, + "grad_norm": 0.6391400898705252, + "learning_rate": 9.443307981615235e-07, + "loss": 0.2848191559314728, + "memory(GiB)": 77.0, + "step": 6826, + "token_acc": 0.886762360446571, + "train_speed(iter/s)": 0.448283 + }, + { + "epoch": 2.18464, + "grad_norm": 0.7063927181315448, + "learning_rate": 9.436405589647326e-07, + "loss": 0.3040202260017395, + "memory(GiB)": 77.0, + "step": 6827, + "token_acc": 0.9012127337038909, + "train_speed(iter/s)": 0.448232 + }, + { + "epoch": 2.1849600000000002, + "grad_norm": 0.7509770262528014, + "learning_rate": 9.429505134298759e-07, + "loss": 0.29376187920570374, + "memory(GiB)": 77.0, + "step": 6828, + "token_acc": 0.9136363636363637, + "train_speed(iter/s)": 0.448182 + }, + { + "epoch": 2.18528, + "grad_norm": 0.7725959433134543, + "learning_rate": 9.422606616428165e-07, + "loss": 0.31306207180023193, + "memory(GiB)": 77.0, + "step": 6829, + "token_acc": 0.8670749279538905, + "train_speed(iter/s)": 0.44812 + }, + { + "epoch": 2.1856, + "grad_norm": 0.7380714953696209, + "learning_rate": 9.415710036893952e-07, + "loss": 0.2528364658355713, + "memory(GiB)": 77.0, + "step": 6830, + "token_acc": 0.92625, + "train_speed(iter/s)": 0.44806 + }, + { + "epoch": 2.18592, + "grad_norm": 0.6571524831040336, + "learning_rate": 9.408815396554278e-07, + "loss": 0.2576684355735779, + "memory(GiB)": 77.0, + "step": 6831, + "token_acc": 0.9003037834852251, + "train_speed(iter/s)": 0.447996 + }, + { + "epoch": 2.18624, + "grad_norm": 0.7195410537676576, + "learning_rate": 9.401922696267052e-07, + "loss": 0.3471038341522217, + "memory(GiB)": 77.0, + "step": 6832, + "token_acc": 0.8643373493975903, + "train_speed(iter/s)": 0.44794 + }, + { + "epoch": 2.18656, + "grad_norm": 0.6998825437556624, + "learning_rate": 9.39503193688997e-07, + "loss": 0.30948740243911743, + "memory(GiB)": 77.0, + "step": 6833, + "token_acc": 0.9007854485324515, + "train_speed(iter/s)": 0.447886 + }, + { + "epoch": 2.18688, + "grad_norm": 0.6599765864704015, + "learning_rate": 9.388143119280458e-07, + "loss": 0.2868746519088745, + "memory(GiB)": 77.0, + "step": 6834, + "token_acc": 0.9072292894818492, + "train_speed(iter/s)": 0.447831 + }, + { + "epoch": 2.1872, + "grad_norm": 0.7846082325143666, + "learning_rate": 9.381256244295708e-07, + "loss": 0.29818153381347656, + "memory(GiB)": 77.0, + "step": 6835, + "token_acc": 0.9012903225806451, + "train_speed(iter/s)": 0.447779 + }, + { + "epoch": 2.18752, + "grad_norm": 0.7413091246847153, + "learning_rate": 9.374371312792674e-07, + "loss": 0.29104557633399963, + "memory(GiB)": 77.0, + "step": 6836, + "token_acc": 0.9264931087289433, + "train_speed(iter/s)": 0.447723 + }, + { + "epoch": 2.18784, + "grad_norm": 0.7052677747883329, + "learning_rate": 9.367488325628066e-07, + "loss": 0.32230350375175476, + "memory(GiB)": 77.0, + "step": 6837, + "token_acc": 0.9095400988217408, + "train_speed(iter/s)": 0.447671 + }, + { + "epoch": 2.18816, + "grad_norm": 0.7079366947107975, + "learning_rate": 9.360607283658354e-07, + "loss": 0.2779572308063507, + "memory(GiB)": 77.0, + "step": 6838, + "token_acc": 0.9248391248391249, + "train_speed(iter/s)": 0.447617 + }, + { + "epoch": 2.18848, + "grad_norm": 0.7372261927364262, + "learning_rate": 9.353728187739761e-07, + "loss": 0.30007612705230713, + "memory(GiB)": 77.0, + "step": 6839, + "token_acc": 0.9043353636689359, + "train_speed(iter/s)": 0.447565 + }, + { + "epoch": 2.1888, + "grad_norm": 0.6554315981155306, + "learning_rate": 9.346851038728283e-07, + "loss": 0.32873615622520447, + "memory(GiB)": 77.0, + "step": 6840, + "token_acc": 0.8762151652624757, + "train_speed(iter/s)": 0.4475 + }, + { + "epoch": 2.18912, + "grad_norm": 0.7275731727190073, + "learning_rate": 9.339975837479643e-07, + "loss": 0.32437413930892944, + "memory(GiB)": 77.0, + "step": 6841, + "token_acc": 0.8869953337390951, + "train_speed(iter/s)": 0.447451 + }, + { + "epoch": 2.18944, + "grad_norm": 0.8509251311092455, + "learning_rate": 9.333102584849346e-07, + "loss": 0.3512055277824402, + "memory(GiB)": 77.0, + "step": 6842, + "token_acc": 0.9314481576692374, + "train_speed(iter/s)": 0.447398 + }, + { + "epoch": 2.18976, + "grad_norm": 0.7217739237001696, + "learning_rate": 9.326231281692655e-07, + "loss": 0.3186652660369873, + "memory(GiB)": 77.0, + "step": 6843, + "token_acc": 0.8617554858934169, + "train_speed(iter/s)": 0.447347 + }, + { + "epoch": 2.19008, + "grad_norm": 0.7003406910863392, + "learning_rate": 9.319361928864581e-07, + "loss": 0.2980138957500458, + "memory(GiB)": 77.0, + "step": 6844, + "token_acc": 0.8489425981873112, + "train_speed(iter/s)": 0.447292 + }, + { + "epoch": 2.1904, + "grad_norm": 0.7447847964617738, + "learning_rate": 9.312494527219895e-07, + "loss": 0.27174726128578186, + "memory(GiB)": 77.0, + "step": 6845, + "token_acc": 0.877341070957531, + "train_speed(iter/s)": 0.447239 + }, + { + "epoch": 2.19072, + "grad_norm": 0.731085561119316, + "learning_rate": 9.305629077613132e-07, + "loss": 0.24272257089614868, + "memory(GiB)": 77.0, + "step": 6846, + "token_acc": 0.9010568234032776, + "train_speed(iter/s)": 0.447173 + }, + { + "epoch": 2.19104, + "grad_norm": 0.8041693501897129, + "learning_rate": 9.298765580898569e-07, + "loss": 0.30144965648651123, + "memory(GiB)": 77.0, + "step": 6847, + "token_acc": 0.912559081701553, + "train_speed(iter/s)": 0.447126 + }, + { + "epoch": 2.19136, + "grad_norm": 0.7225088983729261, + "learning_rate": 9.291904037930255e-07, + "loss": 0.35978198051452637, + "memory(GiB)": 77.0, + "step": 6848, + "token_acc": 0.8457886044591247, + "train_speed(iter/s)": 0.447075 + }, + { + "epoch": 2.19168, + "grad_norm": 0.747908507761823, + "learning_rate": 9.28504444956198e-07, + "loss": 0.3321917653083801, + "memory(GiB)": 77.0, + "step": 6849, + "token_acc": 0.8964894166236448, + "train_speed(iter/s)": 0.447018 + }, + { + "epoch": 2.192, + "grad_norm": 0.7054533138876882, + "learning_rate": 9.278186816647322e-07, + "loss": 0.3132672607898712, + "memory(GiB)": 77.0, + "step": 6850, + "token_acc": 0.9313990103463787, + "train_speed(iter/s)": 0.446964 + }, + { + "epoch": 2.19232, + "grad_norm": 0.6764403245637165, + "learning_rate": 9.271331140039577e-07, + "loss": 0.29565081000328064, + "memory(GiB)": 77.0, + "step": 6851, + "token_acc": 0.9232245681381958, + "train_speed(iter/s)": 0.446905 + }, + { + "epoch": 2.19264, + "grad_norm": 0.7038537268967711, + "learning_rate": 9.264477420591822e-07, + "loss": 0.2483142614364624, + "memory(GiB)": 77.0, + "step": 6852, + "token_acc": 0.9454342984409799, + "train_speed(iter/s)": 0.446854 + }, + { + "epoch": 2.19296, + "grad_norm": 0.7065471821436321, + "learning_rate": 9.257625659156883e-07, + "loss": 0.3605177402496338, + "memory(GiB)": 77.0, + "step": 6853, + "token_acc": 0.8642678205420178, + "train_speed(iter/s)": 0.4468 + }, + { + "epoch": 2.19328, + "grad_norm": 0.6329823928646802, + "learning_rate": 9.250775856587338e-07, + "loss": 0.21119961142539978, + "memory(GiB)": 77.0, + "step": 6854, + "token_acc": 0.9626329405001437, + "train_speed(iter/s)": 0.446745 + }, + { + "epoch": 2.1936, + "grad_norm": 0.6815926740212391, + "learning_rate": 9.24392801373554e-07, + "loss": 0.262401282787323, + "memory(GiB)": 77.0, + "step": 6855, + "token_acc": 0.9487112046291426, + "train_speed(iter/s)": 0.446693 + }, + { + "epoch": 2.19392, + "grad_norm": 0.7381559731521234, + "learning_rate": 9.237082131453564e-07, + "loss": 0.32736867666244507, + "memory(GiB)": 77.0, + "step": 6856, + "token_acc": 0.9100303951367781, + "train_speed(iter/s)": 0.446639 + }, + { + "epoch": 2.19424, + "grad_norm": 0.6970294364921892, + "learning_rate": 9.230238210593274e-07, + "loss": 0.3422171175479889, + "memory(GiB)": 77.0, + "step": 6857, + "token_acc": 0.859704641350211, + "train_speed(iter/s)": 0.446585 + }, + { + "epoch": 2.19456, + "grad_norm": 0.7302515423321436, + "learning_rate": 9.22339625200627e-07, + "loss": 0.3450465500354767, + "memory(GiB)": 77.0, + "step": 6858, + "token_acc": 0.8866825522901773, + "train_speed(iter/s)": 0.446536 + }, + { + "epoch": 2.19488, + "grad_norm": 0.7267150020290744, + "learning_rate": 9.216556256543921e-07, + "loss": 0.35176903009414673, + "memory(GiB)": 77.0, + "step": 6859, + "token_acc": 0.9130971993410214, + "train_speed(iter/s)": 0.446484 + }, + { + "epoch": 2.1952, + "grad_norm": 0.7629960135234607, + "learning_rate": 9.209718225057346e-07, + "loss": 0.25223061442375183, + "memory(GiB)": 77.0, + "step": 6860, + "token_acc": 0.9552083333333333, + "train_speed(iter/s)": 0.446436 + }, + { + "epoch": 2.19552, + "grad_norm": 0.6719631169578595, + "learning_rate": 9.202882158397416e-07, + "loss": 0.24844907224178314, + "memory(GiB)": 77.0, + "step": 6861, + "token_acc": 0.9555619266055045, + "train_speed(iter/s)": 0.446383 + }, + { + "epoch": 2.19584, + "grad_norm": 0.6317851485188627, + "learning_rate": 9.196048057414761e-07, + "loss": 0.2359265387058258, + "memory(GiB)": 77.0, + "step": 6862, + "token_acc": 0.9574468085106383, + "train_speed(iter/s)": 0.446332 + }, + { + "epoch": 2.19616, + "grad_norm": 0.6631772343758262, + "learning_rate": 9.189215922959768e-07, + "loss": 0.2775424122810364, + "memory(GiB)": 77.0, + "step": 6863, + "token_acc": 0.8958910433979687, + "train_speed(iter/s)": 0.446273 + }, + { + "epoch": 2.19648, + "grad_norm": 0.6782092091703755, + "learning_rate": 9.182385755882578e-07, + "loss": 0.26813632249832153, + "memory(GiB)": 77.0, + "step": 6864, + "token_acc": 0.898041185334003, + "train_speed(iter/s)": 0.446218 + }, + { + "epoch": 2.1968, + "grad_norm": 0.7127361977387984, + "learning_rate": 9.175557557033087e-07, + "loss": 0.28329676389694214, + "memory(GiB)": 77.0, + "step": 6865, + "token_acc": 0.8583268583268583, + "train_speed(iter/s)": 0.446169 + }, + { + "epoch": 2.19712, + "grad_norm": 0.7676315447116604, + "learning_rate": 9.168731327260935e-07, + "loss": 0.32781851291656494, + "memory(GiB)": 77.0, + "step": 6866, + "token_acc": 0.8703643985811028, + "train_speed(iter/s)": 0.446118 + }, + { + "epoch": 2.19744, + "grad_norm": 0.7056416179699635, + "learning_rate": 9.161907067415549e-07, + "loss": 0.4062647819519043, + "memory(GiB)": 77.0, + "step": 6867, + "token_acc": 0.8962053571428571, + "train_speed(iter/s)": 0.446065 + }, + { + "epoch": 2.19776, + "grad_norm": 0.7063963731650085, + "learning_rate": 9.155084778346077e-07, + "loss": 0.27645137906074524, + "memory(GiB)": 77.0, + "step": 6868, + "token_acc": 0.9431859043509528, + "train_speed(iter/s)": 0.446014 + }, + { + "epoch": 2.19808, + "grad_norm": 0.6777417590178663, + "learning_rate": 9.148264460901443e-07, + "loss": 0.33724039793014526, + "memory(GiB)": 77.0, + "step": 6869, + "token_acc": 0.9143775569842197, + "train_speed(iter/s)": 0.445956 + }, + { + "epoch": 2.1984, + "grad_norm": 0.7040475369632008, + "learning_rate": 9.141446115930302e-07, + "loss": 0.3403949439525604, + "memory(GiB)": 77.0, + "step": 6870, + "token_acc": 0.8756256256256256, + "train_speed(iter/s)": 0.445907 + }, + { + "epoch": 2.19872, + "grad_norm": 0.7289475142210652, + "learning_rate": 9.134629744281089e-07, + "loss": 0.30665016174316406, + "memory(GiB)": 77.0, + "step": 6871, + "token_acc": 0.9252685660906118, + "train_speed(iter/s)": 0.445843 + }, + { + "epoch": 2.19904, + "grad_norm": 0.7238821615087332, + "learning_rate": 9.12781534680198e-07, + "loss": 0.3057876527309418, + "memory(GiB)": 77.0, + "step": 6872, + "token_acc": 0.863073317710077, + "train_speed(iter/s)": 0.445794 + }, + { + "epoch": 2.19936, + "grad_norm": 0.7123729903297558, + "learning_rate": 9.121002924340911e-07, + "loss": 0.2576475143432617, + "memory(GiB)": 77.0, + "step": 6873, + "token_acc": 0.8745003996802558, + "train_speed(iter/s)": 0.445746 + }, + { + "epoch": 2.19968, + "grad_norm": 0.6997400269334503, + "learning_rate": 9.114192477745568e-07, + "loss": 0.2618011236190796, + "memory(GiB)": 77.0, + "step": 6874, + "token_acc": 0.960098004900245, + "train_speed(iter/s)": 0.445691 + }, + { + "epoch": 2.2, + "grad_norm": 0.7770804489939191, + "learning_rate": 9.107384007863393e-07, + "loss": 0.34591546654701233, + "memory(GiB)": 77.0, + "step": 6875, + "token_acc": 0.9145842798727851, + "train_speed(iter/s)": 0.445636 + }, + { + "epoch": 2.20032, + "grad_norm": 0.695261240348369, + "learning_rate": 9.100577515541584e-07, + "loss": 0.2981776297092438, + "memory(GiB)": 77.0, + "step": 6876, + "token_acc": 0.8917981072555206, + "train_speed(iter/s)": 0.445582 + }, + { + "epoch": 2.20064, + "grad_norm": 0.768813322461266, + "learning_rate": 9.093773001627087e-07, + "loss": 0.32990604639053345, + "memory(GiB)": 77.0, + "step": 6877, + "token_acc": 0.9464015560838557, + "train_speed(iter/s)": 0.445527 + }, + { + "epoch": 2.20096, + "grad_norm": 0.663308311168454, + "learning_rate": 9.08697046696661e-07, + "loss": 0.2810009717941284, + "memory(GiB)": 77.0, + "step": 6878, + "token_acc": 0.9157810839532412, + "train_speed(iter/s)": 0.445475 + }, + { + "epoch": 2.20128, + "grad_norm": 0.7013299107041594, + "learning_rate": 9.080169912406608e-07, + "loss": 0.23774532973766327, + "memory(GiB)": 77.0, + "step": 6879, + "token_acc": 0.9196428571428571, + "train_speed(iter/s)": 0.445426 + }, + { + "epoch": 2.2016, + "grad_norm": 0.7383214049104645, + "learning_rate": 9.073371338793294e-07, + "loss": 0.3462015390396118, + "memory(GiB)": 77.0, + "step": 6880, + "token_acc": 0.8690308988764045, + "train_speed(iter/s)": 0.445375 + }, + { + "epoch": 2.20192, + "grad_norm": 0.7514949741599958, + "learning_rate": 9.066574746972628e-07, + "loss": 0.2827412486076355, + "memory(GiB)": 77.0, + "step": 6881, + "token_acc": 0.9429439567859554, + "train_speed(iter/s)": 0.445326 + }, + { + "epoch": 2.20224, + "grad_norm": 0.7406220068177358, + "learning_rate": 9.059780137790325e-07, + "loss": 0.2996991276741028, + "memory(GiB)": 77.0, + "step": 6882, + "token_acc": 0.8924256505576208, + "train_speed(iter/s)": 0.445272 + }, + { + "epoch": 2.20256, + "grad_norm": 0.7545745442605399, + "learning_rate": 9.052987512091879e-07, + "loss": 0.38935378193855286, + "memory(GiB)": 77.0, + "step": 6883, + "token_acc": 0.8645395491020252, + "train_speed(iter/s)": 0.445219 + }, + { + "epoch": 2.20288, + "grad_norm": 0.7075163514966387, + "learning_rate": 9.04619687072249e-07, + "loss": 0.33215683698654175, + "memory(GiB)": 77.0, + "step": 6884, + "token_acc": 0.9114707282246549, + "train_speed(iter/s)": 0.445164 + }, + { + "epoch": 2.2032, + "grad_norm": 0.8640538083794526, + "learning_rate": 9.039408214527143e-07, + "loss": 0.3056490421295166, + "memory(GiB)": 77.0, + "step": 6885, + "token_acc": 0.9139841688654353, + "train_speed(iter/s)": 0.445115 + }, + { + "epoch": 2.20352, + "grad_norm": 0.7669106873534679, + "learning_rate": 9.032621544350567e-07, + "loss": 0.24883447587490082, + "memory(GiB)": 77.0, + "step": 6886, + "token_acc": 0.9648464163822525, + "train_speed(iter/s)": 0.445068 + }, + { + "epoch": 2.20384, + "grad_norm": 0.697087419184514, + "learning_rate": 9.025836861037249e-07, + "loss": 0.3119775056838989, + "memory(GiB)": 77.0, + "step": 6887, + "token_acc": 0.9382845188284519, + "train_speed(iter/s)": 0.445016 + }, + { + "epoch": 2.20416, + "grad_norm": 0.7621643854132705, + "learning_rate": 9.019054165431424e-07, + "loss": 0.2958308458328247, + "memory(GiB)": 77.0, + "step": 6888, + "token_acc": 0.9330649219929542, + "train_speed(iter/s)": 0.444968 + }, + { + "epoch": 2.20448, + "grad_norm": 0.7126400835044451, + "learning_rate": 9.01227345837708e-07, + "loss": 0.3089081645011902, + "memory(GiB)": 77.0, + "step": 6889, + "token_acc": 0.8495575221238938, + "train_speed(iter/s)": 0.444917 + }, + { + "epoch": 2.2048, + "grad_norm": 0.713218270641941, + "learning_rate": 9.005494740717957e-07, + "loss": 0.27604377269744873, + "memory(GiB)": 77.0, + "step": 6890, + "token_acc": 0.95848899958489, + "train_speed(iter/s)": 0.444865 + }, + { + "epoch": 2.20512, + "grad_norm": 0.6730835431873277, + "learning_rate": 8.99871801329755e-07, + "loss": 0.31236329674720764, + "memory(GiB)": 77.0, + "step": 6891, + "token_acc": 0.878022910479423, + "train_speed(iter/s)": 0.444804 + }, + { + "epoch": 2.20544, + "grad_norm": 0.9663415954700546, + "learning_rate": 8.991943276959106e-07, + "loss": 0.27520114183425903, + "memory(GiB)": 77.0, + "step": 6892, + "token_acc": 0.8989710009354537, + "train_speed(iter/s)": 0.444753 + }, + { + "epoch": 2.20576, + "grad_norm": 0.7367845513965923, + "learning_rate": 8.985170532545623e-07, + "loss": 0.2468278706073761, + "memory(GiB)": 77.0, + "step": 6893, + "token_acc": 0.912751677852349, + "train_speed(iter/s)": 0.444705 + }, + { + "epoch": 2.20608, + "grad_norm": 0.7344521858249453, + "learning_rate": 8.978399780899852e-07, + "loss": 0.3639585077762604, + "memory(GiB)": 77.0, + "step": 6894, + "token_acc": 0.8410075839653305, + "train_speed(iter/s)": 0.44465 + }, + { + "epoch": 2.2064, + "grad_norm": 0.7434049665302107, + "learning_rate": 8.971631022864296e-07, + "loss": 0.3723274767398834, + "memory(GiB)": 77.0, + "step": 6895, + "token_acc": 0.9235993208828522, + "train_speed(iter/s)": 0.444593 + }, + { + "epoch": 2.20672, + "grad_norm": 0.6788195089579071, + "learning_rate": 8.964864259281206e-07, + "loss": 0.31177887320518494, + "memory(GiB)": 77.0, + "step": 6896, + "token_acc": 0.9534883720930233, + "train_speed(iter/s)": 0.444537 + }, + { + "epoch": 2.20704, + "grad_norm": 0.7358168869652528, + "learning_rate": 8.958099490992594e-07, + "loss": 0.31920087337493896, + "memory(GiB)": 77.0, + "step": 6897, + "token_acc": 0.8921609290750726, + "train_speed(iter/s)": 0.44449 + }, + { + "epoch": 2.20736, + "grad_norm": 0.674582219721645, + "learning_rate": 8.951336718840212e-07, + "loss": 0.34761351346969604, + "memory(GiB)": 77.0, + "step": 6898, + "token_acc": 0.8544423440453687, + "train_speed(iter/s)": 0.444436 + }, + { + "epoch": 2.20768, + "grad_norm": 0.7262051317068196, + "learning_rate": 8.944575943665573e-07, + "loss": 0.3376178443431854, + "memory(GiB)": 77.0, + "step": 6899, + "token_acc": 0.8557353976073188, + "train_speed(iter/s)": 0.444387 + }, + { + "epoch": 2.208, + "grad_norm": 0.7073985721854678, + "learning_rate": 8.93781716630994e-07, + "loss": 0.2965901494026184, + "memory(GiB)": 77.0, + "step": 6900, + "token_acc": 0.9433908859326352, + "train_speed(iter/s)": 0.444336 + }, + { + "epoch": 2.20832, + "grad_norm": 0.7457665820707494, + "learning_rate": 8.931060387614321e-07, + "loss": 0.26083409786224365, + "memory(GiB)": 77.0, + "step": 6901, + "token_acc": 0.9541666666666667, + "train_speed(iter/s)": 0.444286 + }, + { + "epoch": 2.20864, + "grad_norm": 0.9148820464379862, + "learning_rate": 8.924305608419484e-07, + "loss": 0.3440086245536804, + "memory(GiB)": 77.0, + "step": 6902, + "token_acc": 0.9422013274336283, + "train_speed(iter/s)": 0.444233 + }, + { + "epoch": 2.20896, + "grad_norm": 0.6757452965638908, + "learning_rate": 8.917552829565943e-07, + "loss": 0.30662304162979126, + "memory(GiB)": 77.0, + "step": 6903, + "token_acc": 0.9085714285714286, + "train_speed(iter/s)": 0.44418 + }, + { + "epoch": 2.20928, + "grad_norm": 0.7347020598037363, + "learning_rate": 8.910802051893963e-07, + "loss": 0.2338818609714508, + "memory(GiB)": 77.0, + "step": 6904, + "token_acc": 0.8699743370402053, + "train_speed(iter/s)": 0.444128 + }, + { + "epoch": 2.2096, + "grad_norm": 0.7232786551162638, + "learning_rate": 8.90405327624356e-07, + "loss": 0.2899092733860016, + "memory(GiB)": 77.0, + "step": 6905, + "token_acc": 0.9068690983151269, + "train_speed(iter/s)": 0.444076 + }, + { + "epoch": 2.20992, + "grad_norm": 0.7797973138868242, + "learning_rate": 8.897306503454506e-07, + "loss": 0.3108852803707123, + "memory(GiB)": 77.0, + "step": 6906, + "token_acc": 0.9502392344497608, + "train_speed(iter/s)": 0.444026 + }, + { + "epoch": 2.21024, + "grad_norm": 0.6641579259582858, + "learning_rate": 8.890561734366315e-07, + "loss": 0.31232258677482605, + "memory(GiB)": 77.0, + "step": 6907, + "token_acc": 0.9310249307479225, + "train_speed(iter/s)": 0.443972 + }, + { + "epoch": 2.21056, + "grad_norm": 0.6182078236470504, + "learning_rate": 8.883818969818262e-07, + "loss": 0.27052628993988037, + "memory(GiB)": 77.0, + "step": 6908, + "token_acc": 0.9511043987162545, + "train_speed(iter/s)": 0.443903 + }, + { + "epoch": 2.21088, + "grad_norm": 0.9552806354587846, + "learning_rate": 8.877078210649362e-07, + "loss": 0.36381322145462036, + "memory(GiB)": 77.0, + "step": 6909, + "token_acc": 0.8937764744216357, + "train_speed(iter/s)": 0.443855 + }, + { + "epoch": 2.2112, + "grad_norm": 0.7417884186292317, + "learning_rate": 8.870339457698391e-07, + "loss": 0.3171144425868988, + "memory(GiB)": 77.0, + "step": 6910, + "token_acc": 0.8950953678474114, + "train_speed(iter/s)": 0.443804 + }, + { + "epoch": 2.21152, + "grad_norm": 0.6575290834836408, + "learning_rate": 8.863602711803864e-07, + "loss": 0.2847270965576172, + "memory(GiB)": 77.0, + "step": 6911, + "token_acc": 0.8503937007874016, + "train_speed(iter/s)": 0.443751 + }, + { + "epoch": 2.21184, + "grad_norm": 0.6907258766666481, + "learning_rate": 8.856867973804057e-07, + "loss": 0.3149591088294983, + "memory(GiB)": 77.0, + "step": 6912, + "token_acc": 0.930848190167477, + "train_speed(iter/s)": 0.443692 + }, + { + "epoch": 2.21216, + "grad_norm": 0.7865803260352657, + "learning_rate": 8.850135244536986e-07, + "loss": 0.30306941270828247, + "memory(GiB)": 77.0, + "step": 6913, + "token_acc": 0.9227373068432672, + "train_speed(iter/s)": 0.443645 + }, + { + "epoch": 2.2124800000000002, + "grad_norm": 0.7274244512011222, + "learning_rate": 8.843404524840426e-07, + "loss": 0.2666372060775757, + "memory(GiB)": 77.0, + "step": 6914, + "token_acc": 0.9192139737991266, + "train_speed(iter/s)": 0.443592 + }, + { + "epoch": 2.2128, + "grad_norm": 0.7456383689136282, + "learning_rate": 8.836675815551901e-07, + "loss": 0.30165746808052063, + "memory(GiB)": 77.0, + "step": 6915, + "token_acc": 0.8951981707317073, + "train_speed(iter/s)": 0.443546 + }, + { + "epoch": 2.21312, + "grad_norm": 0.6517316678662108, + "learning_rate": 8.829949117508676e-07, + "loss": 0.28206902742385864, + "memory(GiB)": 77.0, + "step": 6916, + "token_acc": 0.9286528379302484, + "train_speed(iter/s)": 0.443484 + }, + { + "epoch": 2.21344, + "grad_norm": 0.7192591897640841, + "learning_rate": 8.823224431547775e-07, + "loss": 0.24107497930526733, + "memory(GiB)": 77.0, + "step": 6917, + "token_acc": 0.9526315789473684, + "train_speed(iter/s)": 0.443437 + }, + { + "epoch": 2.21376, + "grad_norm": 0.7806697549085828, + "learning_rate": 8.816501758505971e-07, + "loss": 0.33621060848236084, + "memory(GiB)": 77.0, + "step": 6918, + "token_acc": 0.9145559963381141, + "train_speed(iter/s)": 0.443384 + }, + { + "epoch": 2.21408, + "grad_norm": 0.7616192142132004, + "learning_rate": 8.80978109921978e-07, + "loss": 0.316300630569458, + "memory(GiB)": 77.0, + "step": 6919, + "token_acc": 0.8625059780009565, + "train_speed(iter/s)": 0.443329 + }, + { + "epoch": 2.2144, + "grad_norm": 0.6536108372089021, + "learning_rate": 8.803062454525471e-07, + "loss": 0.2313477098941803, + "memory(GiB)": 77.0, + "step": 6920, + "token_acc": 0.9596358118361153, + "train_speed(iter/s)": 0.443282 + }, + { + "epoch": 2.21472, + "grad_norm": 0.699564694325038, + "learning_rate": 8.796345825259068e-07, + "loss": 0.3175666034221649, + "memory(GiB)": 77.0, + "step": 6921, + "token_acc": 0.933809623870561, + "train_speed(iter/s)": 0.443232 + }, + { + "epoch": 2.21504, + "grad_norm": 0.7600189708966483, + "learning_rate": 8.789631212256333e-07, + "loss": 0.3256096839904785, + "memory(GiB)": 77.0, + "step": 6922, + "token_acc": 0.9136513157894737, + "train_speed(iter/s)": 0.443182 + }, + { + "epoch": 2.21536, + "grad_norm": 0.7794431929290083, + "learning_rate": 8.782918616352787e-07, + "loss": 0.3325820565223694, + "memory(GiB)": 77.0, + "step": 6923, + "token_acc": 0.7918072289156627, + "train_speed(iter/s)": 0.443134 + }, + { + "epoch": 2.21568, + "grad_norm": 0.690386039663325, + "learning_rate": 8.776208038383693e-07, + "loss": 0.27564698457717896, + "memory(GiB)": 77.0, + "step": 6924, + "token_acc": 0.9154228855721394, + "train_speed(iter/s)": 0.443085 + }, + { + "epoch": 2.216, + "grad_norm": 0.8243907508612816, + "learning_rate": 8.769499479184071e-07, + "loss": 0.29403847455978394, + "memory(GiB)": 77.0, + "step": 6925, + "token_acc": 0.8817204301075269, + "train_speed(iter/s)": 0.443038 + }, + { + "epoch": 2.21632, + "grad_norm": 0.7849127018231566, + "learning_rate": 8.762792939588688e-07, + "loss": 0.36012962460517883, + "memory(GiB)": 77.0, + "step": 6926, + "token_acc": 0.8960943257184967, + "train_speed(iter/s)": 0.442977 + }, + { + "epoch": 2.21664, + "grad_norm": 0.7231349096685613, + "learning_rate": 8.756088420432043e-07, + "loss": 0.33586385846138, + "memory(GiB)": 77.0, + "step": 6927, + "token_acc": 0.888617638083613, + "train_speed(iter/s)": 0.442924 + }, + { + "epoch": 2.21696, + "grad_norm": 0.722152170582094, + "learning_rate": 8.749385922548395e-07, + "loss": 0.3445046544075012, + "memory(GiB)": 77.0, + "step": 6928, + "token_acc": 0.9517396184062851, + "train_speed(iter/s)": 0.442877 + }, + { + "epoch": 2.21728, + "grad_norm": 0.6471737167295228, + "learning_rate": 8.742685446771773e-07, + "loss": 0.2046692967414856, + "memory(GiB)": 77.0, + "step": 6929, + "token_acc": 0.9293311273930512, + "train_speed(iter/s)": 0.442826 + }, + { + "epoch": 2.2176, + "grad_norm": 0.6857227041226511, + "learning_rate": 8.735986993935921e-07, + "loss": 0.3349118232727051, + "memory(GiB)": 77.0, + "step": 6930, + "token_acc": 0.9186238835593781, + "train_speed(iter/s)": 0.442773 + }, + { + "epoch": 2.21792, + "grad_norm": 0.7454026630641007, + "learning_rate": 8.729290564874355e-07, + "loss": 0.3254181444644928, + "memory(GiB)": 77.0, + "step": 6931, + "token_acc": 0.9004854368932039, + "train_speed(iter/s)": 0.44272 + }, + { + "epoch": 2.2182399999999998, + "grad_norm": 0.6889712847124828, + "learning_rate": 8.722596160420318e-07, + "loss": 0.29535308480262756, + "memory(GiB)": 77.0, + "step": 6932, + "token_acc": 0.9320282762370854, + "train_speed(iter/s)": 0.442672 + }, + { + "epoch": 2.21856, + "grad_norm": 0.6659959175745397, + "learning_rate": 8.71590378140682e-07, + "loss": 0.2506338357925415, + "memory(GiB)": 77.0, + "step": 6933, + "token_acc": 0.918873375060183, + "train_speed(iter/s)": 0.442618 + }, + { + "epoch": 2.21888, + "grad_norm": 0.7114373512985813, + "learning_rate": 8.70921342866661e-07, + "loss": 0.26504945755004883, + "memory(GiB)": 77.0, + "step": 6934, + "token_acc": 0.9173212071103762, + "train_speed(iter/s)": 0.442568 + }, + { + "epoch": 2.2192, + "grad_norm": 0.6421280559487059, + "learning_rate": 8.702525103032186e-07, + "loss": 0.29736366868019104, + "memory(GiB)": 77.0, + "step": 6935, + "token_acc": 0.9328554360812426, + "train_speed(iter/s)": 0.442508 + }, + { + "epoch": 2.21952, + "grad_norm": 0.8012964572883965, + "learning_rate": 8.695838805335794e-07, + "loss": 0.2994788587093353, + "memory(GiB)": 77.0, + "step": 6936, + "token_acc": 0.9174776177500973, + "train_speed(iter/s)": 0.442458 + }, + { + "epoch": 2.21984, + "grad_norm": 0.785944256539849, + "learning_rate": 8.689154536409427e-07, + "loss": 0.3221973776817322, + "memory(GiB)": 77.0, + "step": 6937, + "token_acc": 0.912211868563021, + "train_speed(iter/s)": 0.442408 + }, + { + "epoch": 2.22016, + "grad_norm": 0.7133886868972348, + "learning_rate": 8.682472297084829e-07, + "loss": 0.34618887305259705, + "memory(GiB)": 77.0, + "step": 6938, + "token_acc": 0.8770176787086856, + "train_speed(iter/s)": 0.442357 + }, + { + "epoch": 2.2204800000000002, + "grad_norm": 0.6955637780632875, + "learning_rate": 8.675792088193486e-07, + "loss": 0.27964508533477783, + "memory(GiB)": 77.0, + "step": 6939, + "token_acc": 0.906989543203082, + "train_speed(iter/s)": 0.442304 + }, + { + "epoch": 2.2208, + "grad_norm": 0.676175376103291, + "learning_rate": 8.669113910566635e-07, + "loss": 0.28420180082321167, + "memory(GiB)": 77.0, + "step": 6940, + "token_acc": 0.8939584644430459, + "train_speed(iter/s)": 0.442256 + }, + { + "epoch": 2.22112, + "grad_norm": 0.7192905736312083, + "learning_rate": 8.662437765035267e-07, + "loss": 0.28489530086517334, + "memory(GiB)": 77.0, + "step": 6941, + "token_acc": 0.9488250652741514, + "train_speed(iter/s)": 0.442201 + }, + { + "epoch": 2.22144, + "grad_norm": 0.643159772628648, + "learning_rate": 8.655763652430097e-07, + "loss": 0.3283725380897522, + "memory(GiB)": 77.0, + "step": 6942, + "token_acc": 0.938457330415755, + "train_speed(iter/s)": 0.442149 + }, + { + "epoch": 2.22176, + "grad_norm": 0.7229161471067258, + "learning_rate": 8.649091573581608e-07, + "loss": 0.2914060056209564, + "memory(GiB)": 77.0, + "step": 6943, + "token_acc": 0.937236892460773, + "train_speed(iter/s)": 0.442099 + }, + { + "epoch": 2.22208, + "grad_norm": 0.7265906686454309, + "learning_rate": 8.642421529320028e-07, + "loss": 0.3232962489128113, + "memory(GiB)": 77.0, + "step": 6944, + "token_acc": 0.9366920782234007, + "train_speed(iter/s)": 0.44204 + }, + { + "epoch": 2.2224, + "grad_norm": 0.7136957143426412, + "learning_rate": 8.63575352047532e-07, + "loss": 0.3058525323867798, + "memory(GiB)": 77.0, + "step": 6945, + "token_acc": 0.8732642715450738, + "train_speed(iter/s)": 0.441993 + }, + { + "epoch": 2.22272, + "grad_norm": 0.6040586716555596, + "learning_rate": 8.629087547877216e-07, + "loss": 0.22172990441322327, + "memory(GiB)": 77.0, + "step": 6946, + "token_acc": 0.8878595806923452, + "train_speed(iter/s)": 0.441933 + }, + { + "epoch": 2.22304, + "grad_norm": 0.6955083602643277, + "learning_rate": 8.622423612355177e-07, + "loss": 0.2935275435447693, + "memory(GiB)": 77.0, + "step": 6947, + "token_acc": 0.9080779944289693, + "train_speed(iter/s)": 0.441883 + }, + { + "epoch": 2.22336, + "grad_norm": 0.7838229544241775, + "learning_rate": 8.615761714738407e-07, + "loss": 0.2588614225387573, + "memory(GiB)": 77.0, + "step": 6948, + "token_acc": 0.8827385287691187, + "train_speed(iter/s)": 0.44183 + }, + { + "epoch": 2.22368, + "grad_norm": 0.6961271309594166, + "learning_rate": 8.60910185585587e-07, + "loss": 0.22571006417274475, + "memory(GiB)": 77.0, + "step": 6949, + "token_acc": 0.8813499111900532, + "train_speed(iter/s)": 0.441784 + }, + { + "epoch": 2.224, + "grad_norm": 0.8166117783275184, + "learning_rate": 8.602444036536267e-07, + "loss": 0.2564283609390259, + "memory(GiB)": 77.0, + "step": 6950, + "token_acc": 0.8980769230769231, + "train_speed(iter/s)": 0.441716 + }, + { + "epoch": 2.22432, + "grad_norm": 0.7006515084370518, + "learning_rate": 8.595788257608048e-07, + "loss": 0.32634830474853516, + "memory(GiB)": 77.0, + "step": 6951, + "token_acc": 0.944280442804428, + "train_speed(iter/s)": 0.441663 + }, + { + "epoch": 2.22464, + "grad_norm": 0.7099126465642064, + "learning_rate": 8.589134519899409e-07, + "loss": 0.2565026581287384, + "memory(GiB)": 77.0, + "step": 6952, + "token_acc": 0.9206984357948345, + "train_speed(iter/s)": 0.441618 + }, + { + "epoch": 2.22496, + "grad_norm": 0.7644590009642276, + "learning_rate": 8.582482824238295e-07, + "loss": 0.35149145126342773, + "memory(GiB)": 77.0, + "step": 6953, + "token_acc": 0.9116928446771378, + "train_speed(iter/s)": 0.441558 + }, + { + "epoch": 2.22528, + "grad_norm": 0.6638085717974836, + "learning_rate": 8.575833171452391e-07, + "loss": 0.2069031000137329, + "memory(GiB)": 77.0, + "step": 6954, + "token_acc": 0.937375745526839, + "train_speed(iter/s)": 0.441507 + }, + { + "epoch": 2.2256, + "grad_norm": 0.6974318974137571, + "learning_rate": 8.569185562369139e-07, + "loss": 0.3655393123626709, + "memory(GiB)": 77.0, + "step": 6955, + "token_acc": 0.9195263897250652, + "train_speed(iter/s)": 0.441449 + }, + { + "epoch": 2.22592, + "grad_norm": 0.655968786735949, + "learning_rate": 8.562539997815705e-07, + "loss": 0.25456511974334717, + "memory(GiB)": 77.0, + "step": 6956, + "token_acc": 0.9010215664018161, + "train_speed(iter/s)": 0.441401 + }, + { + "epoch": 2.2262399999999998, + "grad_norm": 0.7159296443303887, + "learning_rate": 8.555896478619022e-07, + "loss": 0.2758767604827881, + "memory(GiB)": 77.0, + "step": 6957, + "token_acc": 0.9236071223434807, + "train_speed(iter/s)": 0.441355 + }, + { + "epoch": 2.22656, + "grad_norm": 0.7323065438139494, + "learning_rate": 8.549255005605759e-07, + "loss": 0.3150811791419983, + "memory(GiB)": 77.0, + "step": 6958, + "token_acc": 0.9324324324324325, + "train_speed(iter/s)": 0.441309 + }, + { + "epoch": 2.22688, + "grad_norm": 0.9560962014669427, + "learning_rate": 8.542615579602331e-07, + "loss": 0.3266060948371887, + "memory(GiB)": 77.0, + "step": 6959, + "token_acc": 0.8908920627487708, + "train_speed(iter/s)": 0.441259 + }, + { + "epoch": 2.2272, + "grad_norm": 0.7101270890626239, + "learning_rate": 8.535978201434902e-07, + "loss": 0.38879770040512085, + "memory(GiB)": 77.0, + "step": 6960, + "token_acc": 0.8831092928112215, + "train_speed(iter/s)": 0.441209 + }, + { + "epoch": 2.22752, + "grad_norm": 0.6906662483771034, + "learning_rate": 8.52934287192938e-07, + "loss": 0.2997027039527893, + "memory(GiB)": 77.0, + "step": 6961, + "token_acc": 0.8845099383139137, + "train_speed(iter/s)": 0.441152 + }, + { + "epoch": 2.22784, + "grad_norm": 0.7719762678038805, + "learning_rate": 8.522709591911405e-07, + "loss": 0.2899912893772125, + "memory(GiB)": 77.0, + "step": 6962, + "token_acc": 0.967438737831487, + "train_speed(iter/s)": 0.441107 + }, + { + "epoch": 2.22816, + "grad_norm": 0.7208475059454492, + "learning_rate": 8.516078362206389e-07, + "loss": 0.32550984621047974, + "memory(GiB)": 77.0, + "step": 6963, + "token_acc": 0.8257403189066059, + "train_speed(iter/s)": 0.441057 + }, + { + "epoch": 2.22848, + "grad_norm": 0.6868096617314741, + "learning_rate": 8.50944918363947e-07, + "loss": 0.3181605041027069, + "memory(GiB)": 77.0, + "step": 6964, + "token_acc": 0.8878598247809762, + "train_speed(iter/s)": 0.441006 + }, + { + "epoch": 2.2288, + "grad_norm": 0.7222215218207803, + "learning_rate": 8.502822057035532e-07, + "loss": 0.27290165424346924, + "memory(GiB)": 77.0, + "step": 6965, + "token_acc": 0.9324644549763034, + "train_speed(iter/s)": 0.44096 + }, + { + "epoch": 2.22912, + "grad_norm": 0.6852754267989646, + "learning_rate": 8.496196983219205e-07, + "loss": 0.2735602855682373, + "memory(GiB)": 77.0, + "step": 6966, + "token_acc": 0.8833688699360341, + "train_speed(iter/s)": 0.44091 + }, + { + "epoch": 2.22944, + "grad_norm": 0.6597946088310063, + "learning_rate": 8.489573963014863e-07, + "loss": 0.32822245359420776, + "memory(GiB)": 77.0, + "step": 6967, + "token_acc": 0.944840409048652, + "train_speed(iter/s)": 0.44086 + }, + { + "epoch": 2.22976, + "grad_norm": 0.7833137136481374, + "learning_rate": 8.482952997246632e-07, + "loss": 0.3626452088356018, + "memory(GiB)": 77.0, + "step": 6968, + "token_acc": 0.931740614334471, + "train_speed(iter/s)": 0.440802 + }, + { + "epoch": 2.23008, + "grad_norm": 0.6869071079789966, + "learning_rate": 8.476334086738378e-07, + "loss": 0.27085819840431213, + "memory(GiB)": 77.0, + "step": 6969, + "token_acc": 0.967012987012987, + "train_speed(iter/s)": 0.440752 + }, + { + "epoch": 2.2304, + "grad_norm": 0.6493884806282736, + "learning_rate": 8.469717232313698e-07, + "loss": 0.2797114849090576, + "memory(GiB)": 77.0, + "step": 6970, + "token_acc": 0.93033381712627, + "train_speed(iter/s)": 0.440697 + }, + { + "epoch": 2.23072, + "grad_norm": 0.7159309825128175, + "learning_rate": 8.46310243479595e-07, + "loss": 0.27970385551452637, + "memory(GiB)": 77.0, + "step": 6971, + "token_acc": 0.907859078590786, + "train_speed(iter/s)": 0.440648 + }, + { + "epoch": 2.23104, + "grad_norm": 0.7523422765290064, + "learning_rate": 8.45648969500823e-07, + "loss": 0.3442903161048889, + "memory(GiB)": 77.0, + "step": 6972, + "token_acc": 0.8641625324828727, + "train_speed(iter/s)": 0.440595 + }, + { + "epoch": 2.23136, + "grad_norm": 0.6798320540433828, + "learning_rate": 8.449879013773383e-07, + "loss": 0.27787333726882935, + "memory(GiB)": 77.0, + "step": 6973, + "token_acc": 0.9453237410071943, + "train_speed(iter/s)": 0.440543 + }, + { + "epoch": 2.23168, + "grad_norm": 0.756527219704735, + "learning_rate": 8.443270391913991e-07, + "loss": 0.31745487451553345, + "memory(GiB)": 77.0, + "step": 6974, + "token_acc": 0.9346607384496424, + "train_speed(iter/s)": 0.440495 + }, + { + "epoch": 2.232, + "grad_norm": 0.6493815990039541, + "learning_rate": 8.436663830252381e-07, + "loss": 0.2770085334777832, + "memory(GiB)": 77.0, + "step": 6975, + "token_acc": 0.9108573376512699, + "train_speed(iter/s)": 0.44044 + }, + { + "epoch": 2.23232, + "grad_norm": 0.7780910565647384, + "learning_rate": 8.430059329610627e-07, + "loss": 0.41101354360580444, + "memory(GiB)": 77.0, + "step": 6976, + "token_acc": 0.9031531531531531, + "train_speed(iter/s)": 0.440389 + }, + { + "epoch": 2.23264, + "grad_norm": 0.7623110074421546, + "learning_rate": 8.423456890810544e-07, + "loss": 0.294757604598999, + "memory(GiB)": 77.0, + "step": 6977, + "token_acc": 0.8600301010535368, + "train_speed(iter/s)": 0.440342 + }, + { + "epoch": 2.23296, + "grad_norm": 0.7462218308609184, + "learning_rate": 8.416856514673691e-07, + "loss": 0.3173014521598816, + "memory(GiB)": 77.0, + "step": 6978, + "token_acc": 0.8675213675213675, + "train_speed(iter/s)": 0.440289 + }, + { + "epoch": 2.23328, + "grad_norm": 0.7939015151016426, + "learning_rate": 8.41025820202136e-07, + "loss": 0.32295647263526917, + "memory(GiB)": 77.0, + "step": 6979, + "token_acc": 0.8566300675675675, + "train_speed(iter/s)": 0.440244 + }, + { + "epoch": 2.2336, + "grad_norm": 0.7508851403722034, + "learning_rate": 8.403661953674616e-07, + "loss": 0.3032088875770569, + "memory(GiB)": 77.0, + "step": 6980, + "token_acc": 0.9467162329615861, + "train_speed(iter/s)": 0.440193 + }, + { + "epoch": 2.23392, + "grad_norm": 0.758466251829851, + "learning_rate": 8.397067770454239e-07, + "loss": 0.28166794776916504, + "memory(GiB)": 77.0, + "step": 6981, + "token_acc": 0.9592760180995475, + "train_speed(iter/s)": 0.440127 + }, + { + "epoch": 2.23424, + "grad_norm": 0.7283889982207535, + "learning_rate": 8.39047565318076e-07, + "loss": 0.3532133102416992, + "memory(GiB)": 77.0, + "step": 6982, + "token_acc": 0.9050039093041439, + "train_speed(iter/s)": 0.440078 + }, + { + "epoch": 2.23456, + "grad_norm": 0.7546360259202083, + "learning_rate": 8.383885602674455e-07, + "loss": 0.4246371388435364, + "memory(GiB)": 77.0, + "step": 6983, + "token_acc": 0.8384738405300434, + "train_speed(iter/s)": 0.440029 + }, + { + "epoch": 2.23488, + "grad_norm": 0.6679079531073521, + "learning_rate": 8.377297619755348e-07, + "loss": 0.31142657995224, + "memory(GiB)": 77.0, + "step": 6984, + "token_acc": 0.9276572064552031, + "train_speed(iter/s)": 0.439971 + }, + { + "epoch": 2.2352, + "grad_norm": 0.7048888577745344, + "learning_rate": 8.370711705243182e-07, + "loss": 0.3429127335548401, + "memory(GiB)": 77.0, + "step": 6985, + "token_acc": 0.8920953575909661, + "train_speed(iter/s)": 0.439922 + }, + { + "epoch": 2.23552, + "grad_norm": 0.7409470296649026, + "learning_rate": 8.364127859957471e-07, + "loss": 0.33332040905952454, + "memory(GiB)": 77.0, + "step": 6986, + "token_acc": 0.8545051698670606, + "train_speed(iter/s)": 0.439868 + }, + { + "epoch": 2.23584, + "grad_norm": 0.6280280683110067, + "learning_rate": 8.357546084717458e-07, + "loss": 0.29015040397644043, + "memory(GiB)": 77.0, + "step": 6987, + "token_acc": 0.8559536197413409, + "train_speed(iter/s)": 0.439809 + }, + { + "epoch": 2.23616, + "grad_norm": 0.7314747404418717, + "learning_rate": 8.35096638034213e-07, + "loss": 0.27244168519973755, + "memory(GiB)": 77.0, + "step": 6988, + "token_acc": 0.9282418089524689, + "train_speed(iter/s)": 0.439757 + }, + { + "epoch": 2.23648, + "grad_norm": 0.6503442342608605, + "learning_rate": 8.34438874765022e-07, + "loss": 0.26675522327423096, + "memory(GiB)": 77.0, + "step": 6989, + "token_acc": 0.9254058797718298, + "train_speed(iter/s)": 0.439706 + }, + { + "epoch": 2.2368, + "grad_norm": 0.7286442132277551, + "learning_rate": 8.337813187460198e-07, + "loss": 0.3145098090171814, + "memory(GiB)": 77.0, + "step": 6990, + "token_acc": 0.949885452462772, + "train_speed(iter/s)": 0.439658 + }, + { + "epoch": 2.23712, + "grad_norm": 0.6566506753555612, + "learning_rate": 8.331239700590276e-07, + "loss": 0.2874494194984436, + "memory(GiB)": 77.0, + "step": 6991, + "token_acc": 0.8836020448289422, + "train_speed(iter/s)": 0.439606 + }, + { + "epoch": 2.23744, + "grad_norm": 0.706000976392652, + "learning_rate": 8.324668287858417e-07, + "loss": 0.28429967164993286, + "memory(GiB)": 77.0, + "step": 6992, + "token_acc": 0.9375987361769352, + "train_speed(iter/s)": 0.439556 + }, + { + "epoch": 2.23776, + "grad_norm": 0.7549860395428918, + "learning_rate": 8.318098950082315e-07, + "loss": 0.3359370529651642, + "memory(GiB)": 77.0, + "step": 6993, + "token_acc": 0.9014084507042254, + "train_speed(iter/s)": 0.439502 + }, + { + "epoch": 2.23808, + "grad_norm": 0.6628251182067362, + "learning_rate": 8.311531688079413e-07, + "loss": 0.20255900919437408, + "memory(GiB)": 77.0, + "step": 6994, + "token_acc": 0.9489981785063752, + "train_speed(iter/s)": 0.439457 + }, + { + "epoch": 2.2384, + "grad_norm": 0.7210338647635074, + "learning_rate": 8.304966502666889e-07, + "loss": 0.28819406032562256, + "memory(GiB)": 77.0, + "step": 6995, + "token_acc": 0.9566539923954372, + "train_speed(iter/s)": 0.439406 + }, + { + "epoch": 2.23872, + "grad_norm": 0.7369169911304716, + "learning_rate": 8.298403394661658e-07, + "loss": 0.31234902143478394, + "memory(GiB)": 77.0, + "step": 6996, + "token_acc": 0.864167916041979, + "train_speed(iter/s)": 0.439357 + }, + { + "epoch": 2.23904, + "grad_norm": 0.7218094518677534, + "learning_rate": 8.291842364880406e-07, + "loss": 0.3579627275466919, + "memory(GiB)": 77.0, + "step": 6997, + "token_acc": 0.930327868852459, + "train_speed(iter/s)": 0.439311 + }, + { + "epoch": 2.23936, + "grad_norm": 0.7754465725108154, + "learning_rate": 8.285283414139536e-07, + "loss": 0.28557825088500977, + "memory(GiB)": 77.0, + "step": 6998, + "token_acc": 0.9376524803469775, + "train_speed(iter/s)": 0.439262 + }, + { + "epoch": 2.23968, + "grad_norm": 0.7203011517865018, + "learning_rate": 8.278726543255181e-07, + "loss": 0.3451882004737854, + "memory(GiB)": 77.0, + "step": 6999, + "token_acc": 0.9224235560588901, + "train_speed(iter/s)": 0.439212 + }, + { + "epoch": 2.24, + "grad_norm": 0.7585064231506936, + "learning_rate": 8.272171753043234e-07, + "loss": 0.30060750246047974, + "memory(GiB)": 77.0, + "step": 7000, + "token_acc": 0.9244663382594417, + "train_speed(iter/s)": 0.439156 + }, + { + "epoch": 2.24032, + "grad_norm": 0.7203174011301424, + "learning_rate": 8.265619044319332e-07, + "loss": 0.3419111669063568, + "memory(GiB)": 77.0, + "step": 7001, + "token_acc": 0.9194115765909817, + "train_speed(iter/s)": 0.439095 + }, + { + "epoch": 2.24064, + "grad_norm": 0.6833389994572526, + "learning_rate": 8.259068417898839e-07, + "loss": 0.2924104928970337, + "memory(GiB)": 77.0, + "step": 7002, + "token_acc": 0.8928785158587672, + "train_speed(iter/s)": 0.439045 + }, + { + "epoch": 2.24096, + "grad_norm": 0.7101782276010269, + "learning_rate": 8.252519874596871e-07, + "loss": 0.3840867578983307, + "memory(GiB)": 77.0, + "step": 7003, + "token_acc": 0.8589041095890411, + "train_speed(iter/s)": 0.438999 + }, + { + "epoch": 2.24128, + "grad_norm": 0.7257310784020149, + "learning_rate": 8.245973415228278e-07, + "loss": 0.3459042012691498, + "memory(GiB)": 77.0, + "step": 7004, + "token_acc": 0.902324120603015, + "train_speed(iter/s)": 0.438949 + }, + { + "epoch": 2.2416, + "grad_norm": 0.7367992043784598, + "learning_rate": 8.239429040607655e-07, + "loss": 0.3497753143310547, + "memory(GiB)": 77.0, + "step": 7005, + "token_acc": 0.8779461279461279, + "train_speed(iter/s)": 0.438902 + }, + { + "epoch": 2.24192, + "grad_norm": 0.7590835948973043, + "learning_rate": 8.232886751549335e-07, + "loss": 0.3262965679168701, + "memory(GiB)": 77.0, + "step": 7006, + "token_acc": 0.9377873951852854, + "train_speed(iter/s)": 0.438853 + }, + { + "epoch": 2.24224, + "grad_norm": 0.670253158205919, + "learning_rate": 8.226346548867392e-07, + "loss": 0.261566162109375, + "memory(GiB)": 77.0, + "step": 7007, + "token_acc": 0.9298607529654461, + "train_speed(iter/s)": 0.438806 + }, + { + "epoch": 2.24256, + "grad_norm": 0.7588209257805063, + "learning_rate": 8.219808433375645e-07, + "loss": 0.245916485786438, + "memory(GiB)": 77.0, + "step": 7008, + "token_acc": 0.8889871738168952, + "train_speed(iter/s)": 0.438758 + }, + { + "epoch": 2.24288, + "grad_norm": 0.7046382225153847, + "learning_rate": 8.213272405887642e-07, + "loss": 0.25912752747535706, + "memory(GiB)": 77.0, + "step": 7009, + "token_acc": 0.8719101123595505, + "train_speed(iter/s)": 0.438713 + }, + { + "epoch": 2.2432, + "grad_norm": 0.7726780627452259, + "learning_rate": 8.206738467216685e-07, + "loss": 0.33217909932136536, + "memory(GiB)": 77.0, + "step": 7010, + "token_acc": 0.9016438356164383, + "train_speed(iter/s)": 0.438665 + }, + { + "epoch": 2.24352, + "grad_norm": 0.6945481193483809, + "learning_rate": 8.200206618175804e-07, + "loss": 0.2611098885536194, + "memory(GiB)": 77.0, + "step": 7011, + "token_acc": 0.9173411822418445, + "train_speed(iter/s)": 0.438619 + }, + { + "epoch": 2.24384, + "grad_norm": 0.6796136333405369, + "learning_rate": 8.193676859577776e-07, + "loss": 0.2638724446296692, + "memory(GiB)": 77.0, + "step": 7012, + "token_acc": 0.8982262285548125, + "train_speed(iter/s)": 0.438559 + }, + { + "epoch": 2.24416, + "grad_norm": 0.7525196263971013, + "learning_rate": 8.187149192235119e-07, + "loss": 0.29053962230682373, + "memory(GiB)": 77.0, + "step": 7013, + "token_acc": 0.8632369095569389, + "train_speed(iter/s)": 0.438504 + }, + { + "epoch": 2.24448, + "grad_norm": 0.7891550141235971, + "learning_rate": 8.180623616960085e-07, + "loss": 0.3404170274734497, + "memory(GiB)": 77.0, + "step": 7014, + "token_acc": 0.8802844725404978, + "train_speed(iter/s)": 0.438459 + }, + { + "epoch": 2.2448, + "grad_norm": 0.716174152852761, + "learning_rate": 8.174100134564667e-07, + "loss": 0.2495344877243042, + "memory(GiB)": 77.0, + "step": 7015, + "token_acc": 0.9321608040201005, + "train_speed(iter/s)": 0.438409 + }, + { + "epoch": 2.24512, + "grad_norm": 0.715951479892342, + "learning_rate": 8.167578745860604e-07, + "loss": 0.24025095999240875, + "memory(GiB)": 77.0, + "step": 7016, + "token_acc": 0.9656, + "train_speed(iter/s)": 0.438358 + }, + { + "epoch": 2.24544, + "grad_norm": 0.7217019316268721, + "learning_rate": 8.161059451659367e-07, + "loss": 0.3234235644340515, + "memory(GiB)": 77.0, + "step": 7017, + "token_acc": 0.908475164987843, + "train_speed(iter/s)": 0.43831 + }, + { + "epoch": 2.24576, + "grad_norm": 0.7765325865756697, + "learning_rate": 8.154542252772169e-07, + "loss": 0.299052357673645, + "memory(GiB)": 77.0, + "step": 7018, + "token_acc": 0.9600290697674418, + "train_speed(iter/s)": 0.438253 + }, + { + "epoch": 2.24608, + "grad_norm": 0.6735718361733413, + "learning_rate": 8.14802715000996e-07, + "loss": 0.29054856300354004, + "memory(GiB)": 77.0, + "step": 7019, + "token_acc": 0.8508684368137353, + "train_speed(iter/s)": 0.438202 + }, + { + "epoch": 2.2464, + "grad_norm": 0.7118485149881028, + "learning_rate": 8.141514144183438e-07, + "loss": 0.37884506583213806, + "memory(GiB)": 77.0, + "step": 7020, + "token_acc": 0.8690423162583519, + "train_speed(iter/s)": 0.438151 + }, + { + "epoch": 2.24672, + "grad_norm": 0.6976463194203908, + "learning_rate": 8.135003236103028e-07, + "loss": 0.3342128098011017, + "memory(GiB)": 77.0, + "step": 7021, + "token_acc": 0.8924145700337964, + "train_speed(iter/s)": 0.438103 + }, + { + "epoch": 2.24704, + "grad_norm": 0.7449877641756597, + "learning_rate": 8.1284944265789e-07, + "loss": 0.33184802532196045, + "memory(GiB)": 77.0, + "step": 7022, + "token_acc": 0.8643109540636043, + "train_speed(iter/s)": 0.43805 + }, + { + "epoch": 2.24736, + "grad_norm": 0.7256983113172154, + "learning_rate": 8.121987716420967e-07, + "loss": 0.2816491723060608, + "memory(GiB)": 77.0, + "step": 7023, + "token_acc": 0.9474058280028429, + "train_speed(iter/s)": 0.438001 + }, + { + "epoch": 2.24768, + "grad_norm": 0.697871755044188, + "learning_rate": 8.11548310643887e-07, + "loss": 0.3158554136753082, + "memory(GiB)": 77.0, + "step": 7024, + "token_acc": 0.8836843759823955, + "train_speed(iter/s)": 0.437951 + }, + { + "epoch": 2.248, + "grad_norm": 0.7311505067834435, + "learning_rate": 8.108980597442001e-07, + "loss": 0.26440364122390747, + "memory(GiB)": 77.0, + "step": 7025, + "token_acc": 0.9498553519768563, + "train_speed(iter/s)": 0.437901 + }, + { + "epoch": 2.24832, + "grad_norm": 0.7618864022414157, + "learning_rate": 8.102480190239479e-07, + "loss": 0.23146238923072815, + "memory(GiB)": 77.0, + "step": 7026, + "token_acc": 0.9262788365095286, + "train_speed(iter/s)": 0.437853 + }, + { + "epoch": 2.24864, + "grad_norm": 0.750650230449869, + "learning_rate": 8.095981885640169e-07, + "loss": 0.363067626953125, + "memory(GiB)": 77.0, + "step": 7027, + "token_acc": 0.9235555555555556, + "train_speed(iter/s)": 0.437804 + }, + { + "epoch": 2.24896, + "grad_norm": 0.7310091479943281, + "learning_rate": 8.089485684452672e-07, + "loss": 0.3411691188812256, + "memory(GiB)": 77.0, + "step": 7028, + "token_acc": 0.8393491677576211, + "train_speed(iter/s)": 0.437755 + }, + { + "epoch": 2.24928, + "grad_norm": 0.7151937214313585, + "learning_rate": 8.082991587485331e-07, + "loss": 0.27963048219680786, + "memory(GiB)": 77.0, + "step": 7029, + "token_acc": 0.961436170212766, + "train_speed(iter/s)": 0.437711 + }, + { + "epoch": 2.2496, + "grad_norm": 0.7183259580774153, + "learning_rate": 8.076499595546217e-07, + "loss": 0.23343175649642944, + "memory(GiB)": 77.0, + "step": 7030, + "token_acc": 0.9438242707958229, + "train_speed(iter/s)": 0.437666 + }, + { + "epoch": 2.24992, + "grad_norm": 0.7356182546834885, + "learning_rate": 8.070009709443152e-07, + "loss": 0.2885283827781677, + "memory(GiB)": 77.0, + "step": 7031, + "token_acc": 0.9266279425252216, + "train_speed(iter/s)": 0.437609 + }, + { + "epoch": 2.25024, + "grad_norm": 0.7073539761858411, + "learning_rate": 8.063521929983686e-07, + "loss": 0.3496605157852173, + "memory(GiB)": 77.0, + "step": 7032, + "token_acc": 0.9383236689509752, + "train_speed(iter/s)": 0.437561 + }, + { + "epoch": 2.25056, + "grad_norm": 0.689286782277004, + "learning_rate": 8.057036257975113e-07, + "loss": 0.32512593269348145, + "memory(GiB)": 77.0, + "step": 7033, + "token_acc": 0.937457969065232, + "train_speed(iter/s)": 0.437515 + }, + { + "epoch": 2.25088, + "grad_norm": 0.7870155700192352, + "learning_rate": 8.050552694224459e-07, + "loss": 0.3956805467605591, + "memory(GiB)": 77.0, + "step": 7034, + "token_acc": 0.8189982728842833, + "train_speed(iter/s)": 0.437465 + }, + { + "epoch": 2.2512, + "grad_norm": 0.7200664342660379, + "learning_rate": 8.044071239538495e-07, + "loss": 0.24009865522384644, + "memory(GiB)": 77.0, + "step": 7035, + "token_acc": 0.907008547008547, + "train_speed(iter/s)": 0.437418 + }, + { + "epoch": 2.25152, + "grad_norm": 0.7046954420582325, + "learning_rate": 8.037591894723723e-07, + "loss": 0.2771630585193634, + "memory(GiB)": 77.0, + "step": 7036, + "token_acc": 0.9078326517772884, + "train_speed(iter/s)": 0.437371 + }, + { + "epoch": 2.25184, + "grad_norm": 0.7630976588585441, + "learning_rate": 8.031114660586387e-07, + "loss": 0.2997162938117981, + "memory(GiB)": 77.0, + "step": 7037, + "token_acc": 0.8886576482830385, + "train_speed(iter/s)": 0.437323 + }, + { + "epoch": 2.25216, + "grad_norm": 0.7141625406072692, + "learning_rate": 8.024639537932466e-07, + "loss": 0.34398671984672546, + "memory(GiB)": 77.0, + "step": 7038, + "token_acc": 0.8290115032170013, + "train_speed(iter/s)": 0.437265 + }, + { + "epoch": 2.25248, + "grad_norm": 0.6797654936810331, + "learning_rate": 8.018166527567672e-07, + "loss": 0.246562197804451, + "memory(GiB)": 77.0, + "step": 7039, + "token_acc": 0.9526381387619444, + "train_speed(iter/s)": 0.437217 + }, + { + "epoch": 2.2528, + "grad_norm": 0.7269133605619852, + "learning_rate": 8.011695630297467e-07, + "loss": 0.31956589221954346, + "memory(GiB)": 77.0, + "step": 7040, + "token_acc": 0.9276891982932854, + "train_speed(iter/s)": 0.437158 + }, + { + "epoch": 2.25312, + "grad_norm": 0.7555853878286324, + "learning_rate": 8.005226846927044e-07, + "loss": 0.28446164727211, + "memory(GiB)": 77.0, + "step": 7041, + "token_acc": 0.9367936375052324, + "train_speed(iter/s)": 0.43711 + }, + { + "epoch": 2.25344, + "grad_norm": 0.7283780050450727, + "learning_rate": 7.998760178261308e-07, + "loss": 0.34544187784194946, + "memory(GiB)": 77.0, + "step": 7042, + "token_acc": 0.8530197755211117, + "train_speed(iter/s)": 0.437055 + }, + { + "epoch": 2.2537599999999998, + "grad_norm": 0.8215707685120333, + "learning_rate": 7.992295625104951e-07, + "loss": 0.349143922328949, + "memory(GiB)": 77.0, + "step": 7043, + "token_acc": 0.9559169698266639, + "train_speed(iter/s)": 0.436997 + }, + { + "epoch": 2.25408, + "grad_norm": 0.7443903311532949, + "learning_rate": 7.985833188262363e-07, + "loss": 0.251301109790802, + "memory(GiB)": 77.0, + "step": 7044, + "token_acc": 0.9150080688542227, + "train_speed(iter/s)": 0.436946 + }, + { + "epoch": 2.2544, + "grad_norm": 0.7415963156043426, + "learning_rate": 7.979372868537683e-07, + "loss": 0.31142014265060425, + "memory(GiB)": 77.0, + "step": 7045, + "token_acc": 0.9274406332453826, + "train_speed(iter/s)": 0.4369 + }, + { + "epoch": 2.25472, + "grad_norm": 0.7234940361373318, + "learning_rate": 7.972914666734783e-07, + "loss": 0.27931854128837585, + "memory(GiB)": 77.0, + "step": 7046, + "token_acc": 0.8669012626785345, + "train_speed(iter/s)": 0.436851 + }, + { + "epoch": 2.25504, + "grad_norm": 0.8029743082355175, + "learning_rate": 7.96645858365728e-07, + "loss": 0.29531538486480713, + "memory(GiB)": 77.0, + "step": 7047, + "token_acc": 0.9661434287473069, + "train_speed(iter/s)": 0.436804 + }, + { + "epoch": 2.25536, + "grad_norm": 0.6988845297949047, + "learning_rate": 7.960004620108516e-07, + "loss": 0.36422601342201233, + "memory(GiB)": 77.0, + "step": 7048, + "token_acc": 0.8720103425985779, + "train_speed(iter/s)": 0.436755 + }, + { + "epoch": 2.25568, + "grad_norm": 0.6807730745192719, + "learning_rate": 7.95355277689158e-07, + "loss": 0.32007262110710144, + "memory(GiB)": 77.0, + "step": 7049, + "token_acc": 0.9006166122597026, + "train_speed(iter/s)": 0.436706 + }, + { + "epoch": 2.2560000000000002, + "grad_norm": 0.6919284522988796, + "learning_rate": 7.947103054809288e-07, + "loss": 0.2732784152030945, + "memory(GiB)": 77.0, + "step": 7050, + "token_acc": 0.9168093956447272, + "train_speed(iter/s)": 0.436661 + }, + { + "epoch": 2.25632, + "grad_norm": 0.6875518914262196, + "learning_rate": 7.940655454664198e-07, + "loss": 0.27277129888534546, + "memory(GiB)": 77.0, + "step": 7051, + "token_acc": 0.9517884914463453, + "train_speed(iter/s)": 0.436605 + }, + { + "epoch": 2.25664, + "grad_norm": 0.6710356667866644, + "learning_rate": 7.9342099772586e-07, + "loss": 0.33956876397132874, + "memory(GiB)": 77.0, + "step": 7052, + "token_acc": 0.8798076923076923, + "train_speed(iter/s)": 0.436553 + }, + { + "epoch": 2.25696, + "grad_norm": 0.6877516842479464, + "learning_rate": 7.927766623394523e-07, + "loss": 0.3114514946937561, + "memory(GiB)": 77.0, + "step": 7053, + "token_acc": 0.9296709930576517, + "train_speed(iter/s)": 0.436509 + }, + { + "epoch": 2.25728, + "grad_norm": 0.7343293471543052, + "learning_rate": 7.921325393873732e-07, + "loss": 0.19780752062797546, + "memory(GiB)": 77.0, + "step": 7054, + "token_acc": 0.9716144293317563, + "train_speed(iter/s)": 0.436466 + }, + { + "epoch": 2.2576, + "grad_norm": 0.6991918067237846, + "learning_rate": 7.914886289497725e-07, + "loss": 0.3609503507614136, + "memory(GiB)": 77.0, + "step": 7055, + "token_acc": 0.9204525366802192, + "train_speed(iter/s)": 0.436412 + }, + { + "epoch": 2.25792, + "grad_norm": 0.6743631740529262, + "learning_rate": 7.908449311067743e-07, + "loss": 0.2852099537849426, + "memory(GiB)": 77.0, + "step": 7056, + "token_acc": 0.881207400194742, + "train_speed(iter/s)": 0.436365 + }, + { + "epoch": 2.25824, + "grad_norm": 0.7540423609982119, + "learning_rate": 7.902014459384744e-07, + "loss": 0.29982250928878784, + "memory(GiB)": 77.0, + "step": 7057, + "token_acc": 0.9276077657124054, + "train_speed(iter/s)": 0.436321 + }, + { + "epoch": 2.25856, + "grad_norm": 0.6926470353403882, + "learning_rate": 7.895581735249438e-07, + "loss": 0.362514853477478, + "memory(GiB)": 77.0, + "step": 7058, + "token_acc": 0.8577810871183916, + "train_speed(iter/s)": 0.436269 + }, + { + "epoch": 2.25888, + "grad_norm": 0.6872852848541285, + "learning_rate": 7.889151139462262e-07, + "loss": 0.29026710987091064, + "memory(GiB)": 77.0, + "step": 7059, + "token_acc": 0.9334875650665124, + "train_speed(iter/s)": 0.436224 + }, + { + "epoch": 2.2592, + "grad_norm": 0.6840809617624147, + "learning_rate": 7.882722672823403e-07, + "loss": 0.2725260853767395, + "memory(GiB)": 77.0, + "step": 7060, + "token_acc": 0.8863409770687937, + "train_speed(iter/s)": 0.436169 + }, + { + "epoch": 2.25952, + "grad_norm": 0.7355354263317953, + "learning_rate": 7.876296336132772e-07, + "loss": 0.27062445878982544, + "memory(GiB)": 77.0, + "step": 7061, + "token_acc": 0.9452991452991453, + "train_speed(iter/s)": 0.436116 + }, + { + "epoch": 2.25984, + "grad_norm": 0.7566378342140968, + "learning_rate": 7.869872130190007e-07, + "loss": 0.2437649965286255, + "memory(GiB)": 77.0, + "step": 7062, + "token_acc": 0.9586696803358088, + "train_speed(iter/s)": 0.436068 + }, + { + "epoch": 2.26016, + "grad_norm": 0.7500272998422434, + "learning_rate": 7.863450055794492e-07, + "loss": 0.35784587264060974, + "memory(GiB)": 77.0, + "step": 7063, + "token_acc": 0.9183808891838089, + "train_speed(iter/s)": 0.436019 + }, + { + "epoch": 2.26048, + "grad_norm": 0.6756561072807622, + "learning_rate": 7.857030113745342e-07, + "loss": 0.27608048915863037, + "memory(GiB)": 77.0, + "step": 7064, + "token_acc": 0.9242553191489362, + "train_speed(iter/s)": 0.43597 + }, + { + "epoch": 2.2608, + "grad_norm": 0.7856868399575053, + "learning_rate": 7.850612304841409e-07, + "loss": 0.3229117691516876, + "memory(GiB)": 77.0, + "step": 7065, + "token_acc": 0.8866483348609838, + "train_speed(iter/s)": 0.435927 + }, + { + "epoch": 2.26112, + "grad_norm": 0.7006154755255963, + "learning_rate": 7.844196629881276e-07, + "loss": 0.2688787579536438, + "memory(GiB)": 77.0, + "step": 7066, + "token_acc": 0.9067081297201244, + "train_speed(iter/s)": 0.435878 + }, + { + "epoch": 2.26144, + "grad_norm": 0.7006847826352773, + "learning_rate": 7.837783089663267e-07, + "loss": 0.3117731213569641, + "memory(GiB)": 77.0, + "step": 7067, + "token_acc": 0.9193758127438232, + "train_speed(iter/s)": 0.435823 + }, + { + "epoch": 2.2617599999999998, + "grad_norm": 0.7494264799205657, + "learning_rate": 7.831371684985428e-07, + "loss": 0.26432743668556213, + "memory(GiB)": 77.0, + "step": 7068, + "token_acc": 0.9089403973509934, + "train_speed(iter/s)": 0.435781 + }, + { + "epoch": 2.26208, + "grad_norm": 0.7445659806171541, + "learning_rate": 7.824962416645556e-07, + "loss": 0.3282568156719208, + "memory(GiB)": 77.0, + "step": 7069, + "token_acc": 0.8893261190359075, + "train_speed(iter/s)": 0.435731 + }, + { + "epoch": 2.2624, + "grad_norm": 0.8014835150624312, + "learning_rate": 7.818555285441176e-07, + "loss": 0.3704776167869568, + "memory(GiB)": 77.0, + "step": 7070, + "token_acc": 0.897606785822478, + "train_speed(iter/s)": 0.435687 + }, + { + "epoch": 2.26272, + "grad_norm": 0.8548059464142174, + "learning_rate": 7.812150292169526e-07, + "loss": 0.22967864573001862, + "memory(GiB)": 77.0, + "step": 7071, + "token_acc": 0.8900106269925611, + "train_speed(iter/s)": 0.435639 + }, + { + "epoch": 2.26304, + "grad_norm": 0.6992988817677414, + "learning_rate": 7.805747437627612e-07, + "loss": 0.3279535174369812, + "memory(GiB)": 77.0, + "step": 7072, + "token_acc": 0.8851159007726718, + "train_speed(iter/s)": 0.435594 + }, + { + "epoch": 2.26336, + "grad_norm": 0.7310361842415302, + "learning_rate": 7.79934672261215e-07, + "loss": 0.3031453490257263, + "memory(GiB)": 77.0, + "step": 7073, + "token_acc": 0.8390778871978514, + "train_speed(iter/s)": 0.435551 + }, + { + "epoch": 2.26368, + "grad_norm": 0.6379500114155043, + "learning_rate": 7.792948147919605e-07, + "loss": 0.286711722612381, + "memory(GiB)": 77.0, + "step": 7074, + "token_acc": 0.9650884744141559, + "train_speed(iter/s)": 0.435486 + }, + { + "epoch": 2.2640000000000002, + "grad_norm": 0.8073952387345323, + "learning_rate": 7.786551714346155e-07, + "loss": 0.33788859844207764, + "memory(GiB)": 77.0, + "step": 7075, + "token_acc": 0.9007572738142686, + "train_speed(iter/s)": 0.435436 + }, + { + "epoch": 2.26432, + "grad_norm": 0.7144178026599596, + "learning_rate": 7.780157422687743e-07, + "loss": 0.34424012899398804, + "memory(GiB)": 77.0, + "step": 7076, + "token_acc": 0.9357191702758149, + "train_speed(iter/s)": 0.435389 + }, + { + "epoch": 2.26464, + "grad_norm": 0.634331111675855, + "learning_rate": 7.773765273740023e-07, + "loss": 0.3130614459514618, + "memory(GiB)": 77.0, + "step": 7077, + "token_acc": 0.9034534913032518, + "train_speed(iter/s)": 0.435342 + }, + { + "epoch": 2.26496, + "grad_norm": 0.7129720366062663, + "learning_rate": 7.767375268298379e-07, + "loss": 0.2724285423755646, + "memory(GiB)": 77.0, + "step": 7078, + "token_acc": 0.9369627507163324, + "train_speed(iter/s)": 0.43529 + }, + { + "epoch": 2.26528, + "grad_norm": 0.6506812037754064, + "learning_rate": 7.760987407157947e-07, + "loss": 0.3018769323825836, + "memory(GiB)": 77.0, + "step": 7079, + "token_acc": 0.9296187683284457, + "train_speed(iter/s)": 0.435239 + }, + { + "epoch": 2.2656, + "grad_norm": 0.7074503270174827, + "learning_rate": 7.754601691113575e-07, + "loss": 0.31703072786331177, + "memory(GiB)": 77.0, + "step": 7080, + "token_acc": 0.8935866983372922, + "train_speed(iter/s)": 0.43519 + }, + { + "epoch": 2.26592, + "grad_norm": 0.6956356113944715, + "learning_rate": 7.748218120959863e-07, + "loss": 0.2838556468486786, + "memory(GiB)": 77.0, + "step": 7081, + "token_acc": 0.8647530385311611, + "train_speed(iter/s)": 0.43514 + }, + { + "epoch": 2.26624, + "grad_norm": 0.645179765451888, + "learning_rate": 7.741836697491132e-07, + "loss": 0.3494287431240082, + "memory(GiB)": 77.0, + "step": 7082, + "token_acc": 0.880569306930693, + "train_speed(iter/s)": 0.435094 + }, + { + "epoch": 2.26656, + "grad_norm": 0.7653886373301202, + "learning_rate": 7.735457421501438e-07, + "loss": 0.26976707577705383, + "memory(GiB)": 77.0, + "step": 7083, + "token_acc": 0.9480836236933798, + "train_speed(iter/s)": 0.435051 + }, + { + "epoch": 2.26688, + "grad_norm": 0.6673182460699366, + "learning_rate": 7.729080293784583e-07, + "loss": 0.3161613643169403, + "memory(GiB)": 77.0, + "step": 7084, + "token_acc": 0.8907128401169327, + "train_speed(iter/s)": 0.434997 + }, + { + "epoch": 2.2672, + "grad_norm": 0.6880896922607381, + "learning_rate": 7.722705315134071e-07, + "loss": 0.2576984763145447, + "memory(GiB)": 77.0, + "step": 7085, + "token_acc": 0.9095890410958904, + "train_speed(iter/s)": 0.434945 + }, + { + "epoch": 2.26752, + "grad_norm": 0.7086307885181188, + "learning_rate": 7.716332486343165e-07, + "loss": 0.31589365005493164, + "memory(GiB)": 77.0, + "step": 7086, + "token_acc": 0.8966030250433921, + "train_speed(iter/s)": 0.434896 + }, + { + "epoch": 2.26784, + "grad_norm": 0.6990738442541801, + "learning_rate": 7.709961808204855e-07, + "loss": 0.2876584529876709, + "memory(GiB)": 77.0, + "step": 7087, + "token_acc": 0.9248381128584644, + "train_speed(iter/s)": 0.434853 + }, + { + "epoch": 2.26816, + "grad_norm": 0.7213529346525172, + "learning_rate": 7.703593281511862e-07, + "loss": 0.32020771503448486, + "memory(GiB)": 77.0, + "step": 7088, + "token_acc": 0.918979744936234, + "train_speed(iter/s)": 0.434803 + }, + { + "epoch": 2.26848, + "grad_norm": 0.6744019805291784, + "learning_rate": 7.697226907056634e-07, + "loss": 0.2623611092567444, + "memory(GiB)": 77.0, + "step": 7089, + "token_acc": 0.9103043246129204, + "train_speed(iter/s)": 0.434754 + }, + { + "epoch": 2.2688, + "grad_norm": 0.8224133818716094, + "learning_rate": 7.690862685631362e-07, + "loss": 0.3277227580547333, + "memory(GiB)": 77.0, + "step": 7090, + "token_acc": 0.8859721082854799, + "train_speed(iter/s)": 0.434709 + }, + { + "epoch": 2.26912, + "grad_norm": 0.7050029712803334, + "learning_rate": 7.68450061802796e-07, + "loss": 0.27743762731552124, + "memory(GiB)": 77.0, + "step": 7091, + "token_acc": 0.9097372488408038, + "train_speed(iter/s)": 0.434665 + }, + { + "epoch": 2.26944, + "grad_norm": 0.8180459541281907, + "learning_rate": 7.678140705038067e-07, + "loss": 0.2601090371608734, + "memory(GiB)": 77.0, + "step": 7092, + "token_acc": 0.8988033298647242, + "train_speed(iter/s)": 0.434624 + }, + { + "epoch": 2.2697599999999998, + "grad_norm": 0.7486307462944086, + "learning_rate": 7.671782947453085e-07, + "loss": 0.27859389781951904, + "memory(GiB)": 77.0, + "step": 7093, + "token_acc": 0.9191551347414421, + "train_speed(iter/s)": 0.434575 + }, + { + "epoch": 2.27008, + "grad_norm": 0.6573588822619089, + "learning_rate": 7.665427346064114e-07, + "loss": 0.24895772337913513, + "memory(GiB)": 77.0, + "step": 7094, + "token_acc": 0.9254223320972393, + "train_speed(iter/s)": 0.434526 + }, + { + "epoch": 2.2704, + "grad_norm": 0.78591183327193, + "learning_rate": 7.659073901662001e-07, + "loss": 0.30462372303009033, + "memory(GiB)": 77.0, + "step": 7095, + "token_acc": 0.9418432725480532, + "train_speed(iter/s)": 0.434483 + }, + { + "epoch": 2.27072, + "grad_norm": 0.7463494035109469, + "learning_rate": 7.652722615037317e-07, + "loss": 0.2851334810256958, + "memory(GiB)": 77.0, + "step": 7096, + "token_acc": 0.9119394921663966, + "train_speed(iter/s)": 0.434438 + }, + { + "epoch": 2.27104, + "grad_norm": 0.764808151043894, + "learning_rate": 7.646373486980376e-07, + "loss": 0.25173482298851013, + "memory(GiB)": 77.0, + "step": 7097, + "token_acc": 0.8570781426953568, + "train_speed(iter/s)": 0.434392 + }, + { + "epoch": 2.27136, + "grad_norm": 0.7627638201248478, + "learning_rate": 7.640026518281216e-07, + "loss": 0.24844560027122498, + "memory(GiB)": 77.0, + "step": 7098, + "token_acc": 0.9579349904397706, + "train_speed(iter/s)": 0.43435 + }, + { + "epoch": 2.27168, + "grad_norm": 0.8053239219733611, + "learning_rate": 7.633681709729609e-07, + "loss": 0.3412363827228546, + "memory(GiB)": 77.0, + "step": 7099, + "token_acc": 0.8723332773391568, + "train_speed(iter/s)": 0.434296 + }, + { + "epoch": 2.2720000000000002, + "grad_norm": 0.7762249622801152, + "learning_rate": 7.62733906211505e-07, + "loss": 0.2667122781276703, + "memory(GiB)": 77.0, + "step": 7100, + "token_acc": 0.9248366013071896, + "train_speed(iter/s)": 0.434253 + }, + { + "epoch": 2.27232, + "grad_norm": 0.7493620144787367, + "learning_rate": 7.620998576226771e-07, + "loss": 0.31598901748657227, + "memory(GiB)": 77.0, + "step": 7101, + "token_acc": 0.8853156774651217, + "train_speed(iter/s)": 0.434208 + }, + { + "epoch": 2.27264, + "grad_norm": 0.6778016011425553, + "learning_rate": 7.614660252853737e-07, + "loss": 0.24092896282672882, + "memory(GiB)": 77.0, + "step": 7102, + "token_acc": 0.9782934131736527, + "train_speed(iter/s)": 0.434166 + }, + { + "epoch": 2.27296, + "grad_norm": 0.7244929726427248, + "learning_rate": 7.608324092784646e-07, + "loss": 0.3104015588760376, + "memory(GiB)": 77.0, + "step": 7103, + "token_acc": 0.8905597326649958, + "train_speed(iter/s)": 0.434114 + }, + { + "epoch": 2.27328, + "grad_norm": 0.7332720134035249, + "learning_rate": 7.601990096807921e-07, + "loss": 0.31729355454444885, + "memory(GiB)": 77.0, + "step": 7104, + "token_acc": 0.9519413521585663, + "train_speed(iter/s)": 0.434068 + }, + { + "epoch": 2.2736, + "grad_norm": 0.7092889845053352, + "learning_rate": 7.595658265711717e-07, + "loss": 0.23648333549499512, + "memory(GiB)": 77.0, + "step": 7105, + "token_acc": 0.9146341463414634, + "train_speed(iter/s)": 0.434025 + }, + { + "epoch": 2.27392, + "grad_norm": 0.794520550545461, + "learning_rate": 7.589328600283922e-07, + "loss": 0.40436747670173645, + "memory(GiB)": 77.0, + "step": 7106, + "token_acc": 0.8596260205425336, + "train_speed(iter/s)": 0.433976 + }, + { + "epoch": 2.27424, + "grad_norm": 0.7485305756555067, + "learning_rate": 7.583001101312154e-07, + "loss": 0.3191211223602295, + "memory(GiB)": 77.0, + "step": 7107, + "token_acc": 0.8809187991134394, + "train_speed(iter/s)": 0.433926 + }, + { + "epoch": 2.27456, + "grad_norm": 0.7502744491760958, + "learning_rate": 7.576675769583758e-07, + "loss": 0.3094015121459961, + "memory(GiB)": 77.0, + "step": 7108, + "token_acc": 0.8346431939641622, + "train_speed(iter/s)": 0.43388 + }, + { + "epoch": 2.27488, + "grad_norm": 0.7157539286289775, + "learning_rate": 7.570352605885806e-07, + "loss": 0.3324131965637207, + "memory(GiB)": 77.0, + "step": 7109, + "token_acc": 0.8592973152137885, + "train_speed(iter/s)": 0.433827 + }, + { + "epoch": 2.2752, + "grad_norm": 0.7355680370434688, + "learning_rate": 7.564031611005118e-07, + "loss": 0.259281724691391, + "memory(GiB)": 77.0, + "step": 7110, + "token_acc": 0.9706477732793523, + "train_speed(iter/s)": 0.433784 + }, + { + "epoch": 2.27552, + "grad_norm": 0.6587750079629461, + "learning_rate": 7.55771278572823e-07, + "loss": 0.2812725901603699, + "memory(GiB)": 77.0, + "step": 7111, + "token_acc": 0.8947694212318168, + "train_speed(iter/s)": 0.433718 + }, + { + "epoch": 2.27584, + "grad_norm": 0.7617507631132979, + "learning_rate": 7.551396130841406e-07, + "loss": 0.316991925239563, + "memory(GiB)": 77.0, + "step": 7112, + "token_acc": 0.917563665003938, + "train_speed(iter/s)": 0.433674 + }, + { + "epoch": 2.27616, + "grad_norm": 0.7313093161188935, + "learning_rate": 7.545081647130656e-07, + "loss": 0.3038349151611328, + "memory(GiB)": 77.0, + "step": 7113, + "token_acc": 0.9001947419668939, + "train_speed(iter/s)": 0.433632 + }, + { + "epoch": 2.27648, + "grad_norm": 0.6642972714752506, + "learning_rate": 7.538769335381688e-07, + "loss": 0.25331348180770874, + "memory(GiB)": 77.0, + "step": 7114, + "token_acc": 0.9315960912052117, + "train_speed(iter/s)": 0.433591 + }, + { + "epoch": 2.2768, + "grad_norm": 0.775318791006697, + "learning_rate": 7.532459196379968e-07, + "loss": 0.36531516909599304, + "memory(GiB)": 77.0, + "step": 7115, + "token_acc": 0.9618482407799915, + "train_speed(iter/s)": 0.433549 + }, + { + "epoch": 2.27712, + "grad_norm": 0.6935419841866802, + "learning_rate": 7.526151230910686e-07, + "loss": 0.33471423387527466, + "memory(GiB)": 77.0, + "step": 7116, + "token_acc": 0.8874698795180723, + "train_speed(iter/s)": 0.433491 + }, + { + "epoch": 2.27744, + "grad_norm": 0.756655434857498, + "learning_rate": 7.519845439758758e-07, + "loss": 0.32921892404556274, + "memory(GiB)": 77.0, + "step": 7117, + "token_acc": 0.9616538846282094, + "train_speed(iter/s)": 0.433444 + }, + { + "epoch": 2.27776, + "grad_norm": 0.7247449612351217, + "learning_rate": 7.513541823708828e-07, + "loss": 0.27966636419296265, + "memory(GiB)": 77.0, + "step": 7118, + "token_acc": 0.8598130841121495, + "train_speed(iter/s)": 0.433399 + }, + { + "epoch": 2.27808, + "grad_norm": 0.6679631869887052, + "learning_rate": 7.507240383545273e-07, + "loss": 0.29748064279556274, + "memory(GiB)": 77.0, + "step": 7119, + "token_acc": 0.8212529399187514, + "train_speed(iter/s)": 0.433354 + }, + { + "epoch": 2.2784, + "grad_norm": 0.6700658296111726, + "learning_rate": 7.500941120052199e-07, + "loss": 0.2520289719104767, + "memory(GiB)": 77.0, + "step": 7120, + "token_acc": 0.9772563176895307, + "train_speed(iter/s)": 0.43331 + }, + { + "epoch": 2.27872, + "grad_norm": 0.7165043894276772, + "learning_rate": 7.494644034013437e-07, + "loss": 0.2876600921154022, + "memory(GiB)": 77.0, + "step": 7121, + "token_acc": 0.8843914610479623, + "train_speed(iter/s)": 0.433266 + }, + { + "epoch": 2.27904, + "grad_norm": 0.6280834915320904, + "learning_rate": 7.488349126212557e-07, + "loss": 0.26205524802207947, + "memory(GiB)": 77.0, + "step": 7122, + "token_acc": 0.9347767976476877, + "train_speed(iter/s)": 0.433218 + }, + { + "epoch": 2.27936, + "grad_norm": 0.7461266959317964, + "learning_rate": 7.482056397432843e-07, + "loss": 0.27335283160209656, + "memory(GiB)": 77.0, + "step": 7123, + "token_acc": 0.9310344827586207, + "train_speed(iter/s)": 0.433176 + }, + { + "epoch": 2.27968, + "grad_norm": 0.6489065414720008, + "learning_rate": 7.475765848457319e-07, + "loss": 0.27709418535232544, + "memory(GiB)": 77.0, + "step": 7124, + "token_acc": 0.851106979888457, + "train_speed(iter/s)": 0.433123 + }, + { + "epoch": 2.2800000000000002, + "grad_norm": 0.7295838817140888, + "learning_rate": 7.469477480068738e-07, + "loss": 0.32622838020324707, + "memory(GiB)": 77.0, + "step": 7125, + "token_acc": 0.8801546391752577, + "train_speed(iter/s)": 0.433078 + }, + { + "epoch": 2.28032, + "grad_norm": 0.7346358226238744, + "learning_rate": 7.463191293049568e-07, + "loss": 0.2994651198387146, + "memory(GiB)": 77.0, + "step": 7126, + "token_acc": 0.8566279535442531, + "train_speed(iter/s)": 0.433035 + }, + { + "epoch": 2.28064, + "grad_norm": 0.7540512678799549, + "learning_rate": 7.456907288182036e-07, + "loss": 0.3004564046859741, + "memory(GiB)": 77.0, + "step": 7127, + "token_acc": 0.8991359879789632, + "train_speed(iter/s)": 0.432991 + }, + { + "epoch": 2.28096, + "grad_norm": 0.7658045980863345, + "learning_rate": 7.45062546624806e-07, + "loss": 0.30104225873947144, + "memory(GiB)": 77.0, + "step": 7128, + "token_acc": 0.9421248449772633, + "train_speed(iter/s)": 0.432934 + }, + { + "epoch": 2.2812799999999998, + "grad_norm": 0.6800314196061438, + "learning_rate": 7.444345828029306e-07, + "loss": 0.3064882159233093, + "memory(GiB)": 77.0, + "step": 7129, + "token_acc": 0.9078341013824884, + "train_speed(iter/s)": 0.432888 + }, + { + "epoch": 2.2816, + "grad_norm": 0.7873623730163763, + "learning_rate": 7.43806837430717e-07, + "loss": 0.41186973452568054, + "memory(GiB)": 77.0, + "step": 7130, + "token_acc": 0.8923250564334085, + "train_speed(iter/s)": 0.432842 + }, + { + "epoch": 2.28192, + "grad_norm": 0.7543670392064429, + "learning_rate": 7.43179310586277e-07, + "loss": 0.32371842861175537, + "memory(GiB)": 77.0, + "step": 7131, + "token_acc": 0.823442574549749, + "train_speed(iter/s)": 0.432797 + }, + { + "epoch": 2.28224, + "grad_norm": 0.7819829266085095, + "learning_rate": 7.425520023476956e-07, + "loss": 0.33251914381980896, + "memory(GiB)": 77.0, + "step": 7132, + "token_acc": 0.876993166287016, + "train_speed(iter/s)": 0.432748 + }, + { + "epoch": 2.28256, + "grad_norm": 0.6668963168722529, + "learning_rate": 7.419249127930304e-07, + "loss": 0.28236597776412964, + "memory(GiB)": 77.0, + "step": 7133, + "token_acc": 0.9047769693135084, + "train_speed(iter/s)": 0.432701 + }, + { + "epoch": 2.28288, + "grad_norm": 0.7866998600211786, + "learning_rate": 7.412980420003116e-07, + "loss": 0.3035544753074646, + "memory(GiB)": 77.0, + "step": 7134, + "token_acc": 0.9198208286674132, + "train_speed(iter/s)": 0.43265 + }, + { + "epoch": 2.2832, + "grad_norm": 0.7003359231200453, + "learning_rate": 7.406713900475426e-07, + "loss": 0.27486979961395264, + "memory(GiB)": 77.0, + "step": 7135, + "token_acc": 0.9611197511664075, + "train_speed(iter/s)": 0.432605 + }, + { + "epoch": 2.28352, + "grad_norm": 0.7015914965546083, + "learning_rate": 7.400449570126994e-07, + "loss": 0.28132519125938416, + "memory(GiB)": 77.0, + "step": 7136, + "token_acc": 0.9037735849056604, + "train_speed(iter/s)": 0.432559 + }, + { + "epoch": 2.28384, + "grad_norm": 0.6628375007037745, + "learning_rate": 7.394187429737304e-07, + "loss": 0.29857000708580017, + "memory(GiB)": 77.0, + "step": 7137, + "token_acc": 0.9533213644524237, + "train_speed(iter/s)": 0.432511 + }, + { + "epoch": 2.28416, + "grad_norm": 0.7174954818688201, + "learning_rate": 7.387927480085578e-07, + "loss": 0.33504438400268555, + "memory(GiB)": 77.0, + "step": 7138, + "token_acc": 0.8608449819381184, + "train_speed(iter/s)": 0.432467 + }, + { + "epoch": 2.28448, + "grad_norm": 0.7339001410921027, + "learning_rate": 7.381669721950749e-07, + "loss": 0.3370920419692993, + "memory(GiB)": 77.0, + "step": 7139, + "token_acc": 0.8896522112494633, + "train_speed(iter/s)": 0.432422 + }, + { + "epoch": 2.2848, + "grad_norm": 0.6711212637247221, + "learning_rate": 7.37541415611149e-07, + "loss": 0.338834673166275, + "memory(GiB)": 77.0, + "step": 7140, + "token_acc": 0.8467576153989909, + "train_speed(iter/s)": 0.432377 + }, + { + "epoch": 2.28512, + "grad_norm": 0.7103738177344994, + "learning_rate": 7.369160783346202e-07, + "loss": 0.26126259565353394, + "memory(GiB)": 77.0, + "step": 7141, + "token_acc": 0.9027713625866051, + "train_speed(iter/s)": 0.432336 + }, + { + "epoch": 2.28544, + "grad_norm": 0.7535375503479438, + "learning_rate": 7.362909604433003e-07, + "loss": 0.31034451723098755, + "memory(GiB)": 77.0, + "step": 7142, + "token_acc": 0.9354407836153161, + "train_speed(iter/s)": 0.432294 + }, + { + "epoch": 2.28576, + "grad_norm": 0.725085574646857, + "learning_rate": 7.356660620149747e-07, + "loss": 0.3038851022720337, + "memory(GiB)": 77.0, + "step": 7143, + "token_acc": 0.9258641239570917, + "train_speed(iter/s)": 0.43225 + }, + { + "epoch": 2.28608, + "grad_norm": 0.7956998422317948, + "learning_rate": 7.350413831274009e-07, + "loss": 0.38275349140167236, + "memory(GiB)": 77.0, + "step": 7144, + "token_acc": 0.9360877985797289, + "train_speed(iter/s)": 0.432202 + }, + { + "epoch": 2.2864, + "grad_norm": 0.7640235509507163, + "learning_rate": 7.344169238583096e-07, + "loss": 0.33793729543685913, + "memory(GiB)": 77.0, + "step": 7145, + "token_acc": 0.9138020833333333, + "train_speed(iter/s)": 0.432152 + }, + { + "epoch": 2.28672, + "grad_norm": 0.8766967995729587, + "learning_rate": 7.33792684285404e-07, + "loss": 0.3282832205295563, + "memory(GiB)": 77.0, + "step": 7146, + "token_acc": 0.9240855762594893, + "train_speed(iter/s)": 0.432103 + }, + { + "epoch": 2.28704, + "grad_norm": 0.7811806819679552, + "learning_rate": 7.331686644863595e-07, + "loss": 0.28270918130874634, + "memory(GiB)": 77.0, + "step": 7147, + "token_acc": 0.8748414376321353, + "train_speed(iter/s)": 0.43206 + }, + { + "epoch": 2.28736, + "grad_norm": 0.7141931438621903, + "learning_rate": 7.32544864538825e-07, + "loss": 0.3754962086677551, + "memory(GiB)": 77.0, + "step": 7148, + "token_acc": 0.9485924112607099, + "train_speed(iter/s)": 0.43201 + }, + { + "epoch": 2.28768, + "grad_norm": 0.7623830243624217, + "learning_rate": 7.31921284520421e-07, + "loss": 0.3578658103942871, + "memory(GiB)": 77.0, + "step": 7149, + "token_acc": 0.9337111454576679, + "train_speed(iter/s)": 0.431957 + }, + { + "epoch": 2.288, + "grad_norm": 0.7001889866876972, + "learning_rate": 7.312979245087417e-07, + "loss": 0.25102508068084717, + "memory(GiB)": 77.0, + "step": 7150, + "token_acc": 0.9653013305504826, + "train_speed(iter/s)": 0.431915 + }, + { + "epoch": 2.28832, + "grad_norm": 0.6434840368072102, + "learning_rate": 7.306747845813536e-07, + "loss": 0.2874789834022522, + "memory(GiB)": 77.0, + "step": 7151, + "token_acc": 0.876596831885539, + "train_speed(iter/s)": 0.431868 + }, + { + "epoch": 2.28864, + "grad_norm": 0.6950379887895113, + "learning_rate": 7.30051864815795e-07, + "loss": 0.322986900806427, + "memory(GiB)": 77.0, + "step": 7152, + "token_acc": 0.8905003625815808, + "train_speed(iter/s)": 0.431818 + }, + { + "epoch": 2.28896, + "grad_norm": 0.6983322656142761, + "learning_rate": 7.29429165289578e-07, + "loss": 0.2657158374786377, + "memory(GiB)": 77.0, + "step": 7153, + "token_acc": 0.9195561719833565, + "train_speed(iter/s)": 0.431774 + }, + { + "epoch": 2.2892799999999998, + "grad_norm": 0.8443407158098494, + "learning_rate": 7.288066860801865e-07, + "loss": 0.30213862657546997, + "memory(GiB)": 77.0, + "step": 7154, + "token_acc": 0.9519230769230769, + "train_speed(iter/s)": 0.431732 + }, + { + "epoch": 2.2896, + "grad_norm": 0.6998132147090438, + "learning_rate": 7.281844272650773e-07, + "loss": 0.3892125189304352, + "memory(GiB)": 77.0, + "step": 7155, + "token_acc": 0.8916083916083916, + "train_speed(iter/s)": 0.431683 + }, + { + "epoch": 2.28992, + "grad_norm": 0.699202121707163, + "learning_rate": 7.275623889216796e-07, + "loss": 0.2276475727558136, + "memory(GiB)": 77.0, + "step": 7156, + "token_acc": 0.9175401816911251, + "train_speed(iter/s)": 0.431629 + }, + { + "epoch": 2.29024, + "grad_norm": 0.729307241583717, + "learning_rate": 7.269405711273955e-07, + "loss": 0.3702465891838074, + "memory(GiB)": 77.0, + "step": 7157, + "token_acc": 0.8434200360545969, + "train_speed(iter/s)": 0.431583 + }, + { + "epoch": 2.29056, + "grad_norm": 0.7379491136040305, + "learning_rate": 7.263189739595994e-07, + "loss": 0.28466254472732544, + "memory(GiB)": 77.0, + "step": 7158, + "token_acc": 0.8615384615384616, + "train_speed(iter/s)": 0.431537 + }, + { + "epoch": 2.29088, + "grad_norm": 0.735609348714969, + "learning_rate": 7.256975974956382e-07, + "loss": 0.31666937470436096, + "memory(GiB)": 77.0, + "step": 7159, + "token_acc": 0.8867214236824094, + "train_speed(iter/s)": 0.431486 + }, + { + "epoch": 2.2912, + "grad_norm": 0.6969419361457204, + "learning_rate": 7.250764418128314e-07, + "loss": 0.37868183851242065, + "memory(GiB)": 77.0, + "step": 7160, + "token_acc": 0.9234324758842444, + "train_speed(iter/s)": 0.43143 + }, + { + "epoch": 2.2915200000000002, + "grad_norm": 0.6774532619830783, + "learning_rate": 7.244555069884712e-07, + "loss": 0.2662107050418854, + "memory(GiB)": 77.0, + "step": 7161, + "token_acc": 0.8973641240961046, + "train_speed(iter/s)": 0.431382 + }, + { + "epoch": 2.29184, + "grad_norm": 0.7100101078812034, + "learning_rate": 7.238347930998218e-07, + "loss": 0.2794052064418793, + "memory(GiB)": 77.0, + "step": 7162, + "token_acc": 0.9098693105002253, + "train_speed(iter/s)": 0.431339 + }, + { + "epoch": 2.29216, + "grad_norm": 0.8001754701857349, + "learning_rate": 7.23214300224121e-07, + "loss": 0.3784180283546448, + "memory(GiB)": 77.0, + "step": 7163, + "token_acc": 0.9028651292802237, + "train_speed(iter/s)": 0.431291 + }, + { + "epoch": 2.29248, + "grad_norm": 0.7553453551883096, + "learning_rate": 7.225940284385774e-07, + "loss": 0.2731419801712036, + "memory(GiB)": 77.0, + "step": 7164, + "token_acc": 0.9095853161114887, + "train_speed(iter/s)": 0.431244 + }, + { + "epoch": 2.2928, + "grad_norm": 0.8104742094193075, + "learning_rate": 7.219739778203738e-07, + "loss": 0.29947149753570557, + "memory(GiB)": 77.0, + "step": 7165, + "token_acc": 0.9013975155279503, + "train_speed(iter/s)": 0.431199 + }, + { + "epoch": 2.29312, + "grad_norm": 0.7964952389753227, + "learning_rate": 7.213541484466646e-07, + "loss": 0.4244093894958496, + "memory(GiB)": 77.0, + "step": 7166, + "token_acc": 0.9, + "train_speed(iter/s)": 0.431151 + }, + { + "epoch": 2.29344, + "grad_norm": 0.698616357571513, + "learning_rate": 7.207345403945767e-07, + "loss": 0.2822749614715576, + "memory(GiB)": 77.0, + "step": 7167, + "token_acc": 0.9479238357421427, + "train_speed(iter/s)": 0.431106 + }, + { + "epoch": 2.29376, + "grad_norm": 0.7521287077407177, + "learning_rate": 7.201151537412096e-07, + "loss": 0.3558032810688019, + "memory(GiB)": 77.0, + "step": 7168, + "token_acc": 0.9030288800187838, + "train_speed(iter/s)": 0.431063 + }, + { + "epoch": 2.29408, + "grad_norm": 0.6855340489197252, + "learning_rate": 7.194959885636352e-07, + "loss": 0.30564752221107483, + "memory(GiB)": 77.0, + "step": 7169, + "token_acc": 0.9417322834645669, + "train_speed(iter/s)": 0.431018 + }, + { + "epoch": 2.2944, + "grad_norm": 0.8879608024893824, + "learning_rate": 7.18877044938898e-07, + "loss": 0.27640777826309204, + "memory(GiB)": 77.0, + "step": 7170, + "token_acc": 0.8462316641375822, + "train_speed(iter/s)": 0.430975 + }, + { + "epoch": 2.29472, + "grad_norm": 0.7098328461937535, + "learning_rate": 7.182583229440151e-07, + "loss": 0.2569010853767395, + "memory(GiB)": 77.0, + "step": 7171, + "token_acc": 0.8973727422003284, + "train_speed(iter/s)": 0.430933 + }, + { + "epoch": 2.29504, + "grad_norm": 0.6961738144272775, + "learning_rate": 7.176398226559741e-07, + "loss": 0.26872336864471436, + "memory(GiB)": 77.0, + "step": 7172, + "token_acc": 0.9393044619422573, + "train_speed(iter/s)": 0.43089 + }, + { + "epoch": 2.29536, + "grad_norm": 0.7209067397797783, + "learning_rate": 7.170215441517386e-07, + "loss": 0.365989625453949, + "memory(GiB)": 77.0, + "step": 7173, + "token_acc": 0.9123954925481643, + "train_speed(iter/s)": 0.430842 + }, + { + "epoch": 2.29568, + "grad_norm": 0.7441146645042028, + "learning_rate": 7.164034875082418e-07, + "loss": 0.307734876871109, + "memory(GiB)": 77.0, + "step": 7174, + "token_acc": 0.9001554001554002, + "train_speed(iter/s)": 0.430797 + }, + { + "epoch": 2.296, + "grad_norm": 0.8039214366490646, + "learning_rate": 7.157856528023901e-07, + "loss": 0.3838435709476471, + "memory(GiB)": 77.0, + "step": 7175, + "token_acc": 0.8238428238428238, + "train_speed(iter/s)": 0.430751 + }, + { + "epoch": 2.29632, + "grad_norm": 0.7501582503098896, + "learning_rate": 7.151680401110625e-07, + "loss": 0.25544407963752747, + "memory(GiB)": 77.0, + "step": 7176, + "token_acc": 0.9389650597080937, + "train_speed(iter/s)": 0.430703 + }, + { + "epoch": 2.29664, + "grad_norm": 0.7078794617836568, + "learning_rate": 7.145506495111102e-07, + "loss": 0.2548503875732422, + "memory(GiB)": 77.0, + "step": 7177, + "token_acc": 0.9180914512922466, + "train_speed(iter/s)": 0.430657 + }, + { + "epoch": 2.29696, + "grad_norm": 0.6990194878530682, + "learning_rate": 7.139334810793566e-07, + "loss": 0.26236122846603394, + "memory(GiB)": 77.0, + "step": 7178, + "token_acc": 0.9621113143454403, + "train_speed(iter/s)": 0.430611 + }, + { + "epoch": 2.2972799999999998, + "grad_norm": 0.6880099992565352, + "learning_rate": 7.133165348925978e-07, + "loss": 0.2412215769290924, + "memory(GiB)": 77.0, + "step": 7179, + "token_acc": 0.8909290216497671, + "train_speed(iter/s)": 0.430566 + }, + { + "epoch": 2.2976, + "grad_norm": 0.7420240461126814, + "learning_rate": 7.126998110276015e-07, + "loss": 0.32481980323791504, + "memory(GiB)": 77.0, + "step": 7180, + "token_acc": 0.8238583410997204, + "train_speed(iter/s)": 0.430522 + }, + { + "epoch": 2.29792, + "grad_norm": 0.6826380819383574, + "learning_rate": 7.120833095611091e-07, + "loss": 0.3490506708621979, + "memory(GiB)": 77.0, + "step": 7181, + "token_acc": 0.9005018546803404, + "train_speed(iter/s)": 0.430476 + }, + { + "epoch": 2.29824, + "grad_norm": 0.8257485483562874, + "learning_rate": 7.114670305698329e-07, + "loss": 0.26562315225601196, + "memory(GiB)": 77.0, + "step": 7182, + "token_acc": 0.9248466257668712, + "train_speed(iter/s)": 0.430429 + }, + { + "epoch": 2.29856, + "grad_norm": 0.7743554644236786, + "learning_rate": 7.108509741304584e-07, + "loss": 0.2663164734840393, + "memory(GiB)": 77.0, + "step": 7183, + "token_acc": 0.9342417061611374, + "train_speed(iter/s)": 0.430387 + }, + { + "epoch": 2.29888, + "grad_norm": 0.7367816673723959, + "learning_rate": 7.102351403196431e-07, + "loss": 0.33050987124443054, + "memory(GiB)": 77.0, + "step": 7184, + "token_acc": 0.926217556138816, + "train_speed(iter/s)": 0.430337 + }, + { + "epoch": 2.2992, + "grad_norm": 0.7489717592426419, + "learning_rate": 7.096195292140173e-07, + "loss": 0.3286319375038147, + "memory(GiB)": 77.0, + "step": 7185, + "token_acc": 0.9517006802721089, + "train_speed(iter/s)": 0.430286 + }, + { + "epoch": 2.2995200000000002, + "grad_norm": 0.7048321114973986, + "learning_rate": 7.090041408901824e-07, + "loss": 0.2882639169692993, + "memory(GiB)": 77.0, + "step": 7186, + "token_acc": 0.8716433941997852, + "train_speed(iter/s)": 0.430243 + }, + { + "epoch": 2.29984, + "grad_norm": 0.6697396291974914, + "learning_rate": 7.083889754247125e-07, + "loss": 0.31534838676452637, + "memory(GiB)": 77.0, + "step": 7187, + "token_acc": 0.9111475409836065, + "train_speed(iter/s)": 0.430199 + }, + { + "epoch": 2.30016, + "grad_norm": 0.7231710252708078, + "learning_rate": 7.077740328941551e-07, + "loss": 0.2906668186187744, + "memory(GiB)": 77.0, + "step": 7188, + "token_acc": 0.9000287273771904, + "train_speed(iter/s)": 0.430155 + }, + { + "epoch": 2.30048, + "grad_norm": 0.6369175286482537, + "learning_rate": 7.07159313375028e-07, + "loss": 0.29093602299690247, + "memory(GiB)": 77.0, + "step": 7189, + "token_acc": 0.9375560203167015, + "train_speed(iter/s)": 0.430104 + }, + { + "epoch": 2.3008, + "grad_norm": 0.732291699710038, + "learning_rate": 7.065448169438241e-07, + "loss": 0.27297133207321167, + "memory(GiB)": 77.0, + "step": 7190, + "token_acc": 0.928271085827356, + "train_speed(iter/s)": 0.430062 + }, + { + "epoch": 2.30112, + "grad_norm": 0.7329026611816034, + "learning_rate": 7.059305436770056e-07, + "loss": 0.37380337715148926, + "memory(GiB)": 77.0, + "step": 7191, + "token_acc": 0.8239316239316239, + "train_speed(iter/s)": 0.43002 + }, + { + "epoch": 2.30144, + "grad_norm": 0.6896892929830063, + "learning_rate": 7.053164936510087e-07, + "loss": 0.2779454290866852, + "memory(GiB)": 77.0, + "step": 7192, + "token_acc": 0.8522698127725058, + "train_speed(iter/s)": 0.429977 + }, + { + "epoch": 2.30176, + "grad_norm": 0.7278266043675271, + "learning_rate": 7.047026669422411e-07, + "loss": 0.31772106885910034, + "memory(GiB)": 77.0, + "step": 7193, + "token_acc": 0.903862660944206, + "train_speed(iter/s)": 0.429908 + }, + { + "epoch": 2.30208, + "grad_norm": 0.6743391652808781, + "learning_rate": 7.040890636270828e-07, + "loss": 0.29706674814224243, + "memory(GiB)": 77.0, + "step": 7194, + "token_acc": 0.975925925925926, + "train_speed(iter/s)": 0.429865 + }, + { + "epoch": 2.3024, + "grad_norm": 0.6963843743382113, + "learning_rate": 7.034756837818863e-07, + "loss": 0.28987520933151245, + "memory(GiB)": 77.0, + "step": 7195, + "token_acc": 0.8903699673558215, + "train_speed(iter/s)": 0.42982 + }, + { + "epoch": 2.30272, + "grad_norm": 0.645220391006776, + "learning_rate": 7.028625274829756e-07, + "loss": 0.21671552956104279, + "memory(GiB)": 77.0, + "step": 7196, + "token_acc": 0.9606211869107044, + "train_speed(iter/s)": 0.42977 + }, + { + "epoch": 2.30304, + "grad_norm": 0.6663549408046782, + "learning_rate": 7.022495948066479e-07, + "loss": 0.2935033440589905, + "memory(GiB)": 77.0, + "step": 7197, + "token_acc": 0.8744088281660536, + "train_speed(iter/s)": 0.429726 + }, + { + "epoch": 2.30336, + "grad_norm": 0.6801452008808805, + "learning_rate": 7.016368858291716e-07, + "loss": 0.30832523107528687, + "memory(GiB)": 77.0, + "step": 7198, + "token_acc": 0.9338605319913731, + "train_speed(iter/s)": 0.429677 + }, + { + "epoch": 2.30368, + "grad_norm": 0.6339168559064254, + "learning_rate": 7.010244006267889e-07, + "loss": 0.2550479769706726, + "memory(GiB)": 77.0, + "step": 7199, + "token_acc": 0.9318915429309231, + "train_speed(iter/s)": 0.429629 + }, + { + "epoch": 2.304, + "grad_norm": 0.684639368673143, + "learning_rate": 7.004121392757113e-07, + "loss": 0.3437058925628662, + "memory(GiB)": 77.0, + "step": 7200, + "token_acc": 0.9165654110976104, + "train_speed(iter/s)": 0.429585 + }, + { + "epoch": 2.30432, + "grad_norm": 0.845217826218364, + "learning_rate": 6.998001018521245e-07, + "loss": 0.31033259630203247, + "memory(GiB)": 77.0, + "step": 7201, + "token_acc": 0.8949932341001353, + "train_speed(iter/s)": 0.429543 + }, + { + "epoch": 2.30464, + "grad_norm": 0.6934135870013934, + "learning_rate": 6.991882884321863e-07, + "loss": 0.36627885699272156, + "memory(GiB)": 77.0, + "step": 7202, + "token_acc": 0.9418103448275862, + "train_speed(iter/s)": 0.429501 + }, + { + "epoch": 2.30496, + "grad_norm": 0.7176967488764826, + "learning_rate": 6.985766990920261e-07, + "loss": 0.2679932713508606, + "memory(GiB)": 77.0, + "step": 7203, + "token_acc": 0.9462686567164179, + "train_speed(iter/s)": 0.429454 + }, + { + "epoch": 2.3052799999999998, + "grad_norm": 0.8422607950199014, + "learning_rate": 6.979653339077461e-07, + "loss": 0.3788309097290039, + "memory(GiB)": 77.0, + "step": 7204, + "token_acc": 0.8729309905780493, + "train_speed(iter/s)": 0.429415 + }, + { + "epoch": 2.3056, + "grad_norm": 0.7394951764158959, + "learning_rate": 6.973541929554192e-07, + "loss": 0.25358644127845764, + "memory(GiB)": 77.0, + "step": 7205, + "token_acc": 0.9663865546218487, + "train_speed(iter/s)": 0.429364 + }, + { + "epoch": 2.30592, + "grad_norm": 0.700262165668372, + "learning_rate": 6.967432763110913e-07, + "loss": 0.27519991993904114, + "memory(GiB)": 77.0, + "step": 7206, + "token_acc": 0.9425252216447569, + "train_speed(iter/s)": 0.429318 + }, + { + "epoch": 2.30624, + "grad_norm": 0.6118322323006778, + "learning_rate": 6.961325840507812e-07, + "loss": 0.2478257715702057, + "memory(GiB)": 77.0, + "step": 7207, + "token_acc": 0.8762841530054645, + "train_speed(iter/s)": 0.429275 + }, + { + "epoch": 2.30656, + "grad_norm": 0.7758575506304463, + "learning_rate": 6.955221162504791e-07, + "loss": 0.35642993450164795, + "memory(GiB)": 77.0, + "step": 7208, + "token_acc": 0.8794705062703205, + "train_speed(iter/s)": 0.429235 + }, + { + "epoch": 2.30688, + "grad_norm": 0.735366285736517, + "learning_rate": 6.949118729861464e-07, + "loss": 0.2666056752204895, + "memory(GiB)": 77.0, + "step": 7209, + "token_acc": 0.8767988767988768, + "train_speed(iter/s)": 0.429193 + }, + { + "epoch": 2.3072, + "grad_norm": 0.7012082572327921, + "learning_rate": 6.943018543337173e-07, + "loss": 0.27521812915802, + "memory(GiB)": 77.0, + "step": 7210, + "token_acc": 0.9191605130198213, + "train_speed(iter/s)": 0.429143 + }, + { + "epoch": 2.3075200000000002, + "grad_norm": 0.7170754513975, + "learning_rate": 6.936920603690985e-07, + "loss": 0.29268521070480347, + "memory(GiB)": 77.0, + "step": 7211, + "token_acc": 0.8865588681152097, + "train_speed(iter/s)": 0.429099 + }, + { + "epoch": 2.30784, + "grad_norm": 0.6853072586592662, + "learning_rate": 6.93082491168168e-07, + "loss": 0.3186447024345398, + "memory(GiB)": 77.0, + "step": 7212, + "token_acc": 0.8657699698051057, + "train_speed(iter/s)": 0.429057 + }, + { + "epoch": 2.30816, + "grad_norm": 0.7853558642489251, + "learning_rate": 6.92473146806776e-07, + "loss": 0.3826756477355957, + "memory(GiB)": 77.0, + "step": 7213, + "token_acc": 0.825561729886446, + "train_speed(iter/s)": 0.429014 + }, + { + "epoch": 2.30848, + "grad_norm": 0.7015693527226529, + "learning_rate": 6.918640273607461e-07, + "loss": 0.2815248668193817, + "memory(GiB)": 77.0, + "step": 7214, + "token_acc": 0.9209134615384615, + "train_speed(iter/s)": 0.428974 + }, + { + "epoch": 2.3088, + "grad_norm": 0.7881457869891937, + "learning_rate": 6.912551329058703e-07, + "loss": 0.31120824813842773, + "memory(GiB)": 77.0, + "step": 7215, + "token_acc": 0.8656483790523691, + "train_speed(iter/s)": 0.428924 + }, + { + "epoch": 2.30912, + "grad_norm": 0.6799215614207688, + "learning_rate": 6.906464635179167e-07, + "loss": 0.3225153982639313, + "memory(GiB)": 77.0, + "step": 7216, + "token_acc": 0.9570167947623114, + "train_speed(iter/s)": 0.428878 + }, + { + "epoch": 2.30944, + "grad_norm": 0.7295414729148554, + "learning_rate": 6.900380192726224e-07, + "loss": 0.19873172044754028, + "memory(GiB)": 77.0, + "step": 7217, + "token_acc": 0.941812865497076, + "train_speed(iter/s)": 0.428838 + }, + { + "epoch": 2.30976, + "grad_norm": 0.6941221819309288, + "learning_rate": 6.89429800245699e-07, + "loss": 0.26336944103240967, + "memory(GiB)": 77.0, + "step": 7218, + "token_acc": 0.8916806253489671, + "train_speed(iter/s)": 0.428795 + }, + { + "epoch": 2.31008, + "grad_norm": 0.6525826884209852, + "learning_rate": 6.888218065128282e-07, + "loss": 0.195905864238739, + "memory(GiB)": 77.0, + "step": 7219, + "token_acc": 0.971335857220119, + "train_speed(iter/s)": 0.428748 + }, + { + "epoch": 2.3104, + "grad_norm": 0.7285717174948062, + "learning_rate": 6.882140381496644e-07, + "loss": 0.2562780976295471, + "memory(GiB)": 77.0, + "step": 7220, + "token_acc": 0.8882268827454719, + "train_speed(iter/s)": 0.428704 + }, + { + "epoch": 2.31072, + "grad_norm": 0.6400293305463308, + "learning_rate": 6.876064952318334e-07, + "loss": 0.30104202032089233, + "memory(GiB)": 77.0, + "step": 7221, + "token_acc": 0.9172430240877654, + "train_speed(iter/s)": 0.428651 + }, + { + "epoch": 2.31104, + "grad_norm": 0.7415281418813254, + "learning_rate": 6.869991778349342e-07, + "loss": 0.2761085629463196, + "memory(GiB)": 77.0, + "step": 7222, + "token_acc": 0.8985102420856611, + "train_speed(iter/s)": 0.428606 + }, + { + "epoch": 2.31136, + "grad_norm": 0.7475946018476003, + "learning_rate": 6.863920860345352e-07, + "loss": 0.4075564742088318, + "memory(GiB)": 77.0, + "step": 7223, + "token_acc": 0.9019607843137255, + "train_speed(iter/s)": 0.428562 + }, + { + "epoch": 2.31168, + "grad_norm": 0.7011391166792278, + "learning_rate": 6.857852199061806e-07, + "loss": 0.2697700262069702, + "memory(GiB)": 77.0, + "step": 7224, + "token_acc": 0.9510445899594637, + "train_speed(iter/s)": 0.428522 + }, + { + "epoch": 2.312, + "grad_norm": 0.7401338301627773, + "learning_rate": 6.851785795253838e-07, + "loss": 0.33727148175239563, + "memory(GiB)": 77.0, + "step": 7225, + "token_acc": 0.9106663412252868, + "train_speed(iter/s)": 0.428478 + }, + { + "epoch": 2.31232, + "grad_norm": 0.697300739706855, + "learning_rate": 6.845721649676301e-07, + "loss": 0.2789509892463684, + "memory(GiB)": 77.0, + "step": 7226, + "token_acc": 0.9093647316538883, + "train_speed(iter/s)": 0.428434 + }, + { + "epoch": 2.31264, + "grad_norm": 0.7439158102352841, + "learning_rate": 6.839659763083776e-07, + "loss": 0.23712937533855438, + "memory(GiB)": 77.0, + "step": 7227, + "token_acc": 0.8917910447761194, + "train_speed(iter/s)": 0.428394 + }, + { + "epoch": 2.31296, + "grad_norm": 0.7529347469356168, + "learning_rate": 6.833600136230565e-07, + "loss": 0.2362164407968521, + "memory(GiB)": 77.0, + "step": 7228, + "token_acc": 0.9293419633225458, + "train_speed(iter/s)": 0.428354 + }, + { + "epoch": 2.31328, + "grad_norm": 0.764209297552805, + "learning_rate": 6.827542769870668e-07, + "loss": 0.36904680728912354, + "memory(GiB)": 77.0, + "step": 7229, + "token_acc": 0.9187836797536567, + "train_speed(iter/s)": 0.428309 + }, + { + "epoch": 2.3136, + "grad_norm": 0.6923351175037472, + "learning_rate": 6.821487664757831e-07, + "loss": 0.3361387252807617, + "memory(GiB)": 77.0, + "step": 7230, + "token_acc": 0.8850885088508851, + "train_speed(iter/s)": 0.428262 + }, + { + "epoch": 2.31392, + "grad_norm": 0.6966424108952455, + "learning_rate": 6.815434821645503e-07, + "loss": 0.2876509428024292, + "memory(GiB)": 77.0, + "step": 7231, + "token_acc": 0.8623442367601246, + "train_speed(iter/s)": 0.428216 + }, + { + "epoch": 2.31424, + "grad_norm": 0.6921590237774733, + "learning_rate": 6.809384241286856e-07, + "loss": 0.29976364970207214, + "memory(GiB)": 77.0, + "step": 7232, + "token_acc": 0.8985024958402662, + "train_speed(iter/s)": 0.428163 + }, + { + "epoch": 2.31456, + "grad_norm": 0.6891099913753805, + "learning_rate": 6.803335924434781e-07, + "loss": 0.2947518229484558, + "memory(GiB)": 77.0, + "step": 7233, + "token_acc": 0.9070594210259014, + "train_speed(iter/s)": 0.42811 + }, + { + "epoch": 2.31488, + "grad_norm": 0.6903367194558976, + "learning_rate": 6.797289871841886e-07, + "loss": 0.3381298780441284, + "memory(GiB)": 77.0, + "step": 7234, + "token_acc": 0.841188336695397, + "train_speed(iter/s)": 0.428069 + }, + { + "epoch": 2.3152, + "grad_norm": 0.7031760365858124, + "learning_rate": 6.791246084260494e-07, + "loss": 0.2682584524154663, + "memory(GiB)": 77.0, + "step": 7235, + "token_acc": 0.9171296296296296, + "train_speed(iter/s)": 0.428016 + }, + { + "epoch": 2.3155200000000002, + "grad_norm": 0.7748714091032765, + "learning_rate": 6.785204562442655e-07, + "loss": 0.37311112880706787, + "memory(GiB)": 77.0, + "step": 7236, + "token_acc": 0.8891252326508907, + "train_speed(iter/s)": 0.427974 + }, + { + "epoch": 2.31584, + "grad_norm": 0.7101406952233743, + "learning_rate": 6.779165307140129e-07, + "loss": 0.25724008679389954, + "memory(GiB)": 77.0, + "step": 7237, + "token_acc": 0.9186540346292061, + "train_speed(iter/s)": 0.427925 + }, + { + "epoch": 2.31616, + "grad_norm": 0.7566881211262123, + "learning_rate": 6.773128319104394e-07, + "loss": 0.3281799554824829, + "memory(GiB)": 77.0, + "step": 7238, + "token_acc": 0.9216032266196118, + "train_speed(iter/s)": 0.427879 + }, + { + "epoch": 2.31648, + "grad_norm": 0.6687655713201025, + "learning_rate": 6.767093599086644e-07, + "loss": 0.2806682288646698, + "memory(GiB)": 77.0, + "step": 7239, + "token_acc": 0.9493670886075949, + "train_speed(iter/s)": 0.427827 + }, + { + "epoch": 2.3168, + "grad_norm": 0.6618643315720701, + "learning_rate": 6.761061147837808e-07, + "loss": 0.27787938714027405, + "memory(GiB)": 77.0, + "step": 7240, + "token_acc": 0.9436060365369341, + "train_speed(iter/s)": 0.427778 + }, + { + "epoch": 2.31712, + "grad_norm": 0.7185376568703444, + "learning_rate": 6.755030966108517e-07, + "loss": 0.26986753940582275, + "memory(GiB)": 77.0, + "step": 7241, + "token_acc": 0.917093142272262, + "train_speed(iter/s)": 0.427738 + }, + { + "epoch": 2.31744, + "grad_norm": 0.6835710459332713, + "learning_rate": 6.749003054649125e-07, + "loss": 0.27138274908065796, + "memory(GiB)": 77.0, + "step": 7242, + "token_acc": 0.9006313477902828, + "train_speed(iter/s)": 0.427698 + }, + { + "epoch": 2.31776, + "grad_norm": 0.7873803221045677, + "learning_rate": 6.742977414209686e-07, + "loss": 0.2956780195236206, + "memory(GiB)": 77.0, + "step": 7243, + "token_acc": 0.9219835754895768, + "train_speed(iter/s)": 0.427653 + }, + { + "epoch": 2.31808, + "grad_norm": 0.7487428037031821, + "learning_rate": 6.736954045539997e-07, + "loss": 0.3649539053440094, + "memory(GiB)": 77.0, + "step": 7244, + "token_acc": 0.8499031633311814, + "train_speed(iter/s)": 0.42761 + }, + { + "epoch": 2.3184, + "grad_norm": 0.782238676246106, + "learning_rate": 6.730932949389562e-07, + "loss": 0.34205079078674316, + "memory(GiB)": 77.0, + "step": 7245, + "token_acc": 0.8656387665198237, + "train_speed(iter/s)": 0.427569 + }, + { + "epoch": 2.31872, + "grad_norm": 0.7151680691543687, + "learning_rate": 6.724914126507601e-07, + "loss": 0.2512330710887909, + "memory(GiB)": 77.0, + "step": 7246, + "token_acc": 0.8428538120774073, + "train_speed(iter/s)": 0.427524 + }, + { + "epoch": 2.31904, + "grad_norm": 0.7095962018146451, + "learning_rate": 6.718897577643052e-07, + "loss": 0.28166788816452026, + "memory(GiB)": 77.0, + "step": 7247, + "token_acc": 0.8611872146118722, + "train_speed(iter/s)": 0.427481 + }, + { + "epoch": 2.31936, + "grad_norm": 0.7402522034573643, + "learning_rate": 6.712883303544568e-07, + "loss": 0.27703163027763367, + "memory(GiB)": 77.0, + "step": 7248, + "token_acc": 0.863262508567512, + "train_speed(iter/s)": 0.427442 + }, + { + "epoch": 2.31968, + "grad_norm": 0.6963074823559077, + "learning_rate": 6.706871304960521e-07, + "loss": 0.33586403727531433, + "memory(GiB)": 77.0, + "step": 7249, + "token_acc": 0.9286592865928659, + "train_speed(iter/s)": 0.4274 + }, + { + "epoch": 2.32, + "grad_norm": 0.6859741609556577, + "learning_rate": 6.700861582639003e-07, + "loss": 0.3078482747077942, + "memory(GiB)": 77.0, + "step": 7250, + "token_acc": 0.9280852994555354, + "train_speed(iter/s)": 0.42735 + }, + { + "epoch": 2.32032, + "grad_norm": 0.7431734412696923, + "learning_rate": 6.694854137327822e-07, + "loss": 0.3425975441932678, + "memory(GiB)": 77.0, + "step": 7251, + "token_acc": 0.905492923962581, + "train_speed(iter/s)": 0.427305 + }, + { + "epoch": 2.32064, + "grad_norm": 0.7334989632497364, + "learning_rate": 6.688848969774491e-07, + "loss": 0.32832300662994385, + "memory(GiB)": 77.0, + "step": 7252, + "token_acc": 0.9156540385989993, + "train_speed(iter/s)": 0.427265 + }, + { + "epoch": 2.32096, + "grad_norm": 0.7782090839340516, + "learning_rate": 6.68284608072626e-07, + "loss": 0.3402315080165863, + "memory(GiB)": 77.0, + "step": 7253, + "token_acc": 0.9027093596059114, + "train_speed(iter/s)": 0.427217 + }, + { + "epoch": 2.32128, + "grad_norm": 0.6394964590220759, + "learning_rate": 6.676845470930076e-07, + "loss": 0.2850295901298523, + "memory(GiB)": 77.0, + "step": 7254, + "token_acc": 0.8761498765986089, + "train_speed(iter/s)": 0.427172 + }, + { + "epoch": 2.3216, + "grad_norm": 0.720254016971378, + "learning_rate": 6.670847141132616e-07, + "loss": 0.3086493909358978, + "memory(GiB)": 77.0, + "step": 7255, + "token_acc": 0.9105600445806631, + "train_speed(iter/s)": 0.427122 + }, + { + "epoch": 2.32192, + "grad_norm": 0.811833935474171, + "learning_rate": 6.664851092080258e-07, + "loss": 0.3296142816543579, + "memory(GiB)": 77.0, + "step": 7256, + "token_acc": 0.9194056731202161, + "train_speed(iter/s)": 0.427083 + }, + { + "epoch": 2.32224, + "grad_norm": 0.7709982659546241, + "learning_rate": 6.658857324519127e-07, + "loss": 0.30925220251083374, + "memory(GiB)": 77.0, + "step": 7257, + "token_acc": 0.9206762028608583, + "train_speed(iter/s)": 0.427044 + }, + { + "epoch": 2.32256, + "grad_norm": 0.7319112352987199, + "learning_rate": 6.652865839195025e-07, + "loss": 0.2990841269493103, + "memory(GiB)": 77.0, + "step": 7258, + "token_acc": 0.9554195804195804, + "train_speed(iter/s)": 0.426999 + }, + { + "epoch": 2.32288, + "grad_norm": 0.7204540516108365, + "learning_rate": 6.646876636853494e-07, + "loss": 0.3868609070777893, + "memory(GiB)": 77.0, + "step": 7259, + "token_acc": 0.9007001909611713, + "train_speed(iter/s)": 0.426957 + }, + { + "epoch": 2.3232, + "grad_norm": 0.7567773594724744, + "learning_rate": 6.640889718239788e-07, + "loss": 0.37352707982063293, + "memory(GiB)": 77.0, + "step": 7260, + "token_acc": 0.9388367729831144, + "train_speed(iter/s)": 0.426917 + }, + { + "epoch": 2.32352, + "grad_norm": 0.6620699871182077, + "learning_rate": 6.634905084098869e-07, + "loss": 0.2882887125015259, + "memory(GiB)": 77.0, + "step": 7261, + "token_acc": 0.8761752350470094, + "train_speed(iter/s)": 0.426874 + }, + { + "epoch": 2.32384, + "grad_norm": 0.7958992934446198, + "learning_rate": 6.62892273517543e-07, + "loss": 0.27469348907470703, + "memory(GiB)": 77.0, + "step": 7262, + "token_acc": 0.9536271808999082, + "train_speed(iter/s)": 0.426833 + }, + { + "epoch": 2.32416, + "grad_norm": 0.706800108063189, + "learning_rate": 6.622942672213864e-07, + "loss": 0.31015142798423767, + "memory(GiB)": 77.0, + "step": 7263, + "token_acc": 0.9372406639004149, + "train_speed(iter/s)": 0.426792 + }, + { + "epoch": 2.32448, + "grad_norm": 0.7338125941777983, + "learning_rate": 6.616964895958288e-07, + "loss": 0.2577506899833679, + "memory(GiB)": 77.0, + "step": 7264, + "token_acc": 0.897334089619966, + "train_speed(iter/s)": 0.426752 + }, + { + "epoch": 2.3247999999999998, + "grad_norm": 0.7192771279107859, + "learning_rate": 6.610989407152534e-07, + "loss": 0.35418009757995605, + "memory(GiB)": 77.0, + "step": 7265, + "token_acc": 0.9082087595069499, + "train_speed(iter/s)": 0.426709 + }, + { + "epoch": 2.32512, + "grad_norm": 0.7339619360251475, + "learning_rate": 6.605016206540146e-07, + "loss": 0.2860598564147949, + "memory(GiB)": 77.0, + "step": 7266, + "token_acc": 0.9092746009294808, + "train_speed(iter/s)": 0.426661 + }, + { + "epoch": 2.32544, + "grad_norm": 0.715751580535017, + "learning_rate": 6.599045294864387e-07, + "loss": 0.20026522874832153, + "memory(GiB)": 77.0, + "step": 7267, + "token_acc": 0.9476102941176471, + "train_speed(iter/s)": 0.426616 + }, + { + "epoch": 2.32576, + "grad_norm": 0.7149923012637922, + "learning_rate": 6.593076672868229e-07, + "loss": 0.3128669857978821, + "memory(GiB)": 77.0, + "step": 7268, + "token_acc": 0.8992146596858639, + "train_speed(iter/s)": 0.426576 + }, + { + "epoch": 2.32608, + "grad_norm": 0.6931285456088695, + "learning_rate": 6.58711034129437e-07, + "loss": 0.32487165927886963, + "memory(GiB)": 77.0, + "step": 7269, + "token_acc": 0.8988741044012283, + "train_speed(iter/s)": 0.42653 + }, + { + "epoch": 2.3264, + "grad_norm": 0.7312555383496397, + "learning_rate": 6.581146300885211e-07, + "loss": 0.3358401656150818, + "memory(GiB)": 77.0, + "step": 7270, + "token_acc": 0.8736059479553904, + "train_speed(iter/s)": 0.426492 + }, + { + "epoch": 2.32672, + "grad_norm": 0.6745225490081137, + "learning_rate": 6.575184552382877e-07, + "loss": 0.24213901162147522, + "memory(GiB)": 77.0, + "step": 7271, + "token_acc": 0.9705035971223022, + "train_speed(iter/s)": 0.426452 + }, + { + "epoch": 2.32704, + "grad_norm": 0.6268927914532172, + "learning_rate": 6.5692250965292e-07, + "loss": 0.2223791927099228, + "memory(GiB)": 77.0, + "step": 7272, + "token_acc": 0.9532742155525239, + "train_speed(iter/s)": 0.42641 + }, + { + "epoch": 2.32736, + "grad_norm": 0.7279217025121806, + "learning_rate": 6.563267934065737e-07, + "loss": 0.25525665283203125, + "memory(GiB)": 77.0, + "step": 7273, + "token_acc": 0.9000672947510094, + "train_speed(iter/s)": 0.426372 + }, + { + "epoch": 2.32768, + "grad_norm": 0.7466987639287302, + "learning_rate": 6.557313065733748e-07, + "loss": 0.28657954931259155, + "memory(GiB)": 77.0, + "step": 7274, + "token_acc": 0.8847834723877632, + "train_speed(iter/s)": 0.426331 + }, + { + "epoch": 2.328, + "grad_norm": 1.391554546506411, + "learning_rate": 6.551360492274217e-07, + "loss": 0.30703043937683105, + "memory(GiB)": 77.0, + "step": 7275, + "token_acc": 0.9412935323383085, + "train_speed(iter/s)": 0.426284 + }, + { + "epoch": 2.32832, + "grad_norm": 0.6907841634309717, + "learning_rate": 6.545410214427833e-07, + "loss": 0.3254343867301941, + "memory(GiB)": 77.0, + "step": 7276, + "token_acc": 0.904746317512275, + "train_speed(iter/s)": 0.426237 + }, + { + "epoch": 2.32864, + "grad_norm": 0.6868662216666007, + "learning_rate": 6.539462232935012e-07, + "loss": 0.2614477574825287, + "memory(GiB)": 77.0, + "step": 7277, + "token_acc": 0.8857395925597874, + "train_speed(iter/s)": 0.426194 + }, + { + "epoch": 2.32896, + "grad_norm": 0.6391418802113673, + "learning_rate": 6.533516548535873e-07, + "loss": 0.24282322824001312, + "memory(GiB)": 77.0, + "step": 7278, + "token_acc": 0.9594511570755683, + "train_speed(iter/s)": 0.426156 + }, + { + "epoch": 2.32928, + "grad_norm": 0.6864763807854855, + "learning_rate": 6.527573161970255e-07, + "loss": 0.23378989100456238, + "memory(GiB)": 77.0, + "step": 7279, + "token_acc": 0.9151975683890577, + "train_speed(iter/s)": 0.426113 + }, + { + "epoch": 2.3296, + "grad_norm": 0.670615680934535, + "learning_rate": 6.521632073977704e-07, + "loss": 0.23229506611824036, + "memory(GiB)": 77.0, + "step": 7280, + "token_acc": 0.9277795375356351, + "train_speed(iter/s)": 0.426073 + }, + { + "epoch": 2.32992, + "grad_norm": 0.7573549776119036, + "learning_rate": 6.515693285297495e-07, + "loss": 0.27594929933547974, + "memory(GiB)": 77.0, + "step": 7281, + "token_acc": 0.9381520119225037, + "train_speed(iter/s)": 0.426032 + }, + { + "epoch": 2.33024, + "grad_norm": 0.6789435873134309, + "learning_rate": 6.509756796668598e-07, + "loss": 0.35853248834609985, + "memory(GiB)": 77.0, + "step": 7282, + "token_acc": 0.842737978410206, + "train_speed(iter/s)": 0.425984 + }, + { + "epoch": 2.33056, + "grad_norm": 0.8013972342073248, + "learning_rate": 6.50382260882971e-07, + "loss": 0.300415575504303, + "memory(GiB)": 77.0, + "step": 7283, + "token_acc": 0.8710801393728222, + "train_speed(iter/s)": 0.425941 + }, + { + "epoch": 2.33088, + "grad_norm": 0.6355189068575151, + "learning_rate": 6.49789072251924e-07, + "loss": 0.2768017053604126, + "memory(GiB)": 77.0, + "step": 7284, + "token_acc": 0.8705501618122977, + "train_speed(iter/s)": 0.425891 + }, + { + "epoch": 2.3312, + "grad_norm": 0.7552303962916286, + "learning_rate": 6.491961138475305e-07, + "loss": 0.2747057378292084, + "memory(GiB)": 77.0, + "step": 7285, + "token_acc": 0.8556222707423581, + "train_speed(iter/s)": 0.425852 + }, + { + "epoch": 2.33152, + "grad_norm": 0.752507428535076, + "learning_rate": 6.486033857435739e-07, + "loss": 0.2866218686103821, + "memory(GiB)": 77.0, + "step": 7286, + "token_acc": 0.9447811447811448, + "train_speed(iter/s)": 0.42581 + }, + { + "epoch": 2.33184, + "grad_norm": 0.6442826002026474, + "learning_rate": 6.480108880138092e-07, + "loss": 0.2930188775062561, + "memory(GiB)": 77.0, + "step": 7287, + "token_acc": 0.9446308724832215, + "train_speed(iter/s)": 0.425768 + }, + { + "epoch": 2.33216, + "grad_norm": 0.7916784241371029, + "learning_rate": 6.474186207319622e-07, + "loss": 0.33883607387542725, + "memory(GiB)": 77.0, + "step": 7288, + "token_acc": 0.9613277473412827, + "train_speed(iter/s)": 0.425724 + }, + { + "epoch": 2.33248, + "grad_norm": 0.729458545551691, + "learning_rate": 6.468265839717302e-07, + "loss": 0.35702812671661377, + "memory(GiB)": 77.0, + "step": 7289, + "token_acc": 0.8845671267252195, + "train_speed(iter/s)": 0.425681 + }, + { + "epoch": 2.3327999999999998, + "grad_norm": 0.6891438219348633, + "learning_rate": 6.46234777806782e-07, + "loss": 0.27553755044937134, + "memory(GiB)": 77.0, + "step": 7290, + "token_acc": 0.8815331010452961, + "train_speed(iter/s)": 0.425641 + }, + { + "epoch": 2.33312, + "grad_norm": 0.6638181297534754, + "learning_rate": 6.456432023107581e-07, + "loss": 0.2754989564418793, + "memory(GiB)": 77.0, + "step": 7291, + "token_acc": 0.9292800814042228, + "train_speed(iter/s)": 0.425598 + }, + { + "epoch": 2.33344, + "grad_norm": 0.7852535373409113, + "learning_rate": 6.450518575572693e-07, + "loss": 0.25486254692077637, + "memory(GiB)": 77.0, + "step": 7292, + "token_acc": 0.9378930817610063, + "train_speed(iter/s)": 0.42556 + }, + { + "epoch": 2.33376, + "grad_norm": 0.7624541851894957, + "learning_rate": 6.444607436198979e-07, + "loss": 0.38037392497062683, + "memory(GiB)": 77.0, + "step": 7293, + "token_acc": 0.8358916478555305, + "train_speed(iter/s)": 0.42552 + }, + { + "epoch": 2.33408, + "grad_norm": 0.7048860815355261, + "learning_rate": 6.438698605721985e-07, + "loss": 0.2851341664791107, + "memory(GiB)": 77.0, + "step": 7294, + "token_acc": 0.884450784593438, + "train_speed(iter/s)": 0.425472 + }, + { + "epoch": 2.3344, + "grad_norm": 0.6785279843268829, + "learning_rate": 6.432792084876957e-07, + "loss": 0.3130965232849121, + "memory(GiB)": 77.0, + "step": 7295, + "token_acc": 0.8528974739970282, + "train_speed(iter/s)": 0.425426 + }, + { + "epoch": 2.33472, + "grad_norm": 0.7833312014839987, + "learning_rate": 6.426887874398863e-07, + "loss": 0.3650027811527252, + "memory(GiB)": 77.0, + "step": 7296, + "token_acc": 0.8499491353001017, + "train_speed(iter/s)": 0.425384 + }, + { + "epoch": 2.3350400000000002, + "grad_norm": 0.7026657243528593, + "learning_rate": 6.420985975022376e-07, + "loss": 0.32682088017463684, + "memory(GiB)": 77.0, + "step": 7297, + "token_acc": 0.9365171811298777, + "train_speed(iter/s)": 0.425337 + }, + { + "epoch": 2.33536, + "grad_norm": 0.7555140198026249, + "learning_rate": 6.415086387481889e-07, + "loss": 0.29886430501937866, + "memory(GiB)": 77.0, + "step": 7298, + "token_acc": 0.9279411764705883, + "train_speed(iter/s)": 0.425293 + }, + { + "epoch": 2.33568, + "grad_norm": 0.675960646088449, + "learning_rate": 6.409189112511499e-07, + "loss": 0.3192295730113983, + "memory(GiB)": 77.0, + "step": 7299, + "token_acc": 0.8741704273958056, + "train_speed(iter/s)": 0.425253 + }, + { + "epoch": 2.336, + "grad_norm": 0.7045397292895131, + "learning_rate": 6.40329415084503e-07, + "loss": 0.3442401885986328, + "memory(GiB)": 77.0, + "step": 7300, + "token_acc": 0.9054447680932108, + "train_speed(iter/s)": 0.425205 + }, + { + "epoch": 2.33632, + "grad_norm": 0.6703643755601643, + "learning_rate": 6.397401503215992e-07, + "loss": 0.26340198516845703, + "memory(GiB)": 77.0, + "step": 7301, + "token_acc": 0.9192835028748342, + "train_speed(iter/s)": 0.425159 + }, + { + "epoch": 2.33664, + "grad_norm": 0.742250733695117, + "learning_rate": 6.391511170357622e-07, + "loss": 0.34126392006874084, + "memory(GiB)": 77.0, + "step": 7302, + "token_acc": 0.9285198555956679, + "train_speed(iter/s)": 0.425118 + }, + { + "epoch": 2.33696, + "grad_norm": 0.6738533406392234, + "learning_rate": 6.385623153002884e-07, + "loss": 0.31030893325805664, + "memory(GiB)": 77.0, + "step": 7303, + "token_acc": 0.9511426319936959, + "train_speed(iter/s)": 0.425071 + }, + { + "epoch": 2.33728, + "grad_norm": 0.7624168825390794, + "learning_rate": 6.379737451884435e-07, + "loss": 0.3446323275566101, + "memory(GiB)": 77.0, + "step": 7304, + "token_acc": 0.9288379530916845, + "train_speed(iter/s)": 0.42503 + }, + { + "epoch": 2.3376, + "grad_norm": 0.690849881297257, + "learning_rate": 6.373854067734647e-07, + "loss": 0.2726516127586365, + "memory(GiB)": 77.0, + "step": 7305, + "token_acc": 0.9434961742201294, + "train_speed(iter/s)": 0.424988 + }, + { + "epoch": 2.33792, + "grad_norm": 0.6887842661569634, + "learning_rate": 6.367973001285605e-07, + "loss": 0.3547106385231018, + "memory(GiB)": 77.0, + "step": 7306, + "token_acc": 0.8550646551724138, + "train_speed(iter/s)": 0.424939 + }, + { + "epoch": 2.33824, + "grad_norm": 0.6565787123815815, + "learning_rate": 6.362094253269105e-07, + "loss": 0.2475699484348297, + "memory(GiB)": 77.0, + "step": 7307, + "token_acc": 0.906734291349676, + "train_speed(iter/s)": 0.424887 + }, + { + "epoch": 2.33856, + "grad_norm": 0.7261008098775228, + "learning_rate": 6.356217824416655e-07, + "loss": 0.31940168142318726, + "memory(GiB)": 77.0, + "step": 7308, + "token_acc": 0.9423807513718869, + "train_speed(iter/s)": 0.424841 + }, + { + "epoch": 2.33888, + "grad_norm": 0.6994293887344036, + "learning_rate": 6.350343715459478e-07, + "loss": 0.3021487593650818, + "memory(GiB)": 77.0, + "step": 7309, + "token_acc": 0.8684016695310582, + "train_speed(iter/s)": 0.424801 + }, + { + "epoch": 2.3392, + "grad_norm": 0.7347710346053584, + "learning_rate": 6.344471927128501e-07, + "loss": 0.28984585404396057, + "memory(GiB)": 77.0, + "step": 7310, + "token_acc": 0.8449848024316109, + "train_speed(iter/s)": 0.424761 + }, + { + "epoch": 2.33952, + "grad_norm": 0.7841850836706307, + "learning_rate": 6.338602460154366e-07, + "loss": 0.3745746612548828, + "memory(GiB)": 77.0, + "step": 7311, + "token_acc": 0.8555929667040778, + "train_speed(iter/s)": 0.424715 + }, + { + "epoch": 2.33984, + "grad_norm": 0.7469465693277343, + "learning_rate": 6.33273531526743e-07, + "loss": 0.4453338384628296, + "memory(GiB)": 77.0, + "step": 7312, + "token_acc": 0.8213891951488423, + "train_speed(iter/s)": 0.424672 + }, + { + "epoch": 2.34016, + "grad_norm": 0.7203481432282126, + "learning_rate": 6.326870493197756e-07, + "loss": 0.2800230383872986, + "memory(GiB)": 77.0, + "step": 7313, + "token_acc": 0.9408945686900958, + "train_speed(iter/s)": 0.424631 + }, + { + "epoch": 2.34048, + "grad_norm": 0.7319436537363002, + "learning_rate": 6.321007994675124e-07, + "loss": 0.30726730823516846, + "memory(GiB)": 77.0, + "step": 7314, + "token_acc": 0.901671974522293, + "train_speed(iter/s)": 0.424591 + }, + { + "epoch": 2.3407999999999998, + "grad_norm": 0.7166308142570397, + "learning_rate": 6.31514782042901e-07, + "loss": 0.28555363416671753, + "memory(GiB)": 77.0, + "step": 7315, + "token_acc": 0.9456159822419534, + "train_speed(iter/s)": 0.424547 + }, + { + "epoch": 2.34112, + "grad_norm": 0.7053325303531833, + "learning_rate": 6.309289971188615e-07, + "loss": 0.255902498960495, + "memory(GiB)": 77.0, + "step": 7316, + "token_acc": 0.9632789317507419, + "train_speed(iter/s)": 0.424496 + }, + { + "epoch": 2.34144, + "grad_norm": 0.6765752553114065, + "learning_rate": 6.303434447682846e-07, + "loss": 0.28725409507751465, + "memory(GiB)": 77.0, + "step": 7317, + "token_acc": 0.9062200956937799, + "train_speed(iter/s)": 0.424448 + }, + { + "epoch": 2.34176, + "grad_norm": 0.781285117942506, + "learning_rate": 6.297581250640325e-07, + "loss": 0.2857213318347931, + "memory(GiB)": 77.0, + "step": 7318, + "token_acc": 0.9133646742780389, + "train_speed(iter/s)": 0.424408 + }, + { + "epoch": 2.34208, + "grad_norm": 0.7639975530858609, + "learning_rate": 6.291730380789371e-07, + "loss": 0.35608044266700745, + "memory(GiB)": 77.0, + "step": 7319, + "token_acc": 0.9007109004739337, + "train_speed(iter/s)": 0.42437 + }, + { + "epoch": 2.3424, + "grad_norm": 0.7617429510644025, + "learning_rate": 6.285881838858038e-07, + "loss": 0.30107495188713074, + "memory(GiB)": 77.0, + "step": 7320, + "token_acc": 0.8880866425992779, + "train_speed(iter/s)": 0.42433 + }, + { + "epoch": 2.34272, + "grad_norm": 0.7633008210083458, + "learning_rate": 6.280035625574071e-07, + "loss": 0.2769657373428345, + "memory(GiB)": 77.0, + "step": 7321, + "token_acc": 0.9490990990990991, + "train_speed(iter/s)": 0.424289 + }, + { + "epoch": 2.3430400000000002, + "grad_norm": 0.7033709539254128, + "learning_rate": 6.274191741664926e-07, + "loss": 0.28270626068115234, + "memory(GiB)": 77.0, + "step": 7322, + "token_acc": 0.9261106074342702, + "train_speed(iter/s)": 0.424241 + }, + { + "epoch": 2.34336, + "grad_norm": 0.756918887640566, + "learning_rate": 6.268350187857772e-07, + "loss": 0.30805304646492004, + "memory(GiB)": 77.0, + "step": 7323, + "token_acc": 0.9300758213984835, + "train_speed(iter/s)": 0.424193 + }, + { + "epoch": 2.34368, + "grad_norm": 0.6586712991370803, + "learning_rate": 6.262510964879495e-07, + "loss": 0.3179069459438324, + "memory(GiB)": 77.0, + "step": 7324, + "token_acc": 0.8868909512761021, + "train_speed(iter/s)": 0.424148 + }, + { + "epoch": 2.344, + "grad_norm": 0.853615411104734, + "learning_rate": 6.256674073456681e-07, + "loss": 0.2822341024875641, + "memory(GiB)": 77.0, + "step": 7325, + "token_acc": 0.9117291414752116, + "train_speed(iter/s)": 0.424102 + }, + { + "epoch": 2.34432, + "grad_norm": 0.765577159283577, + "learning_rate": 6.250839514315629e-07, + "loss": 0.2646973431110382, + "memory(GiB)": 77.0, + "step": 7326, + "token_acc": 0.896797153024911, + "train_speed(iter/s)": 0.424064 + }, + { + "epoch": 2.34464, + "grad_norm": 0.753706131653836, + "learning_rate": 6.24500728818235e-07, + "loss": 0.36236897110939026, + "memory(GiB)": 77.0, + "step": 7327, + "token_acc": 0.8488140101972955, + "train_speed(iter/s)": 0.42402 + }, + { + "epoch": 2.34496, + "grad_norm": 0.7346145989417079, + "learning_rate": 6.239177395782567e-07, + "loss": 0.3134596049785614, + "memory(GiB)": 77.0, + "step": 7328, + "token_acc": 0.882718065991506, + "train_speed(iter/s)": 0.423975 + }, + { + "epoch": 2.34528, + "grad_norm": 0.7473551836867139, + "learning_rate": 6.233349837841709e-07, + "loss": 0.33430325984954834, + "memory(GiB)": 77.0, + "step": 7329, + "token_acc": 0.9063640048642075, + "train_speed(iter/s)": 0.42393 + }, + { + "epoch": 2.3456, + "grad_norm": 0.7794878798849454, + "learning_rate": 6.227524615084904e-07, + "loss": 0.28808242082595825, + "memory(GiB)": 77.0, + "step": 7330, + "token_acc": 0.893223819301848, + "train_speed(iter/s)": 0.423889 + }, + { + "epoch": 2.34592, + "grad_norm": 0.682803108554531, + "learning_rate": 6.221701728237008e-07, + "loss": 0.3240036368370056, + "memory(GiB)": 77.0, + "step": 7331, + "token_acc": 0.9042951859325454, + "train_speed(iter/s)": 0.423847 + }, + { + "epoch": 2.34624, + "grad_norm": 0.9205830695030511, + "learning_rate": 6.215881178022578e-07, + "loss": 0.2654793858528137, + "memory(GiB)": 77.0, + "step": 7332, + "token_acc": 0.9280958721704394, + "train_speed(iter/s)": 0.423805 + }, + { + "epoch": 2.34656, + "grad_norm": 0.7113846718654293, + "learning_rate": 6.210062965165877e-07, + "loss": 0.25114452838897705, + "memory(GiB)": 77.0, + "step": 7333, + "token_acc": 0.9055029318899414, + "train_speed(iter/s)": 0.423763 + }, + { + "epoch": 2.34688, + "grad_norm": 0.7740660185666611, + "learning_rate": 6.204247090390885e-07, + "loss": 0.3336244523525238, + "memory(GiB)": 77.0, + "step": 7334, + "token_acc": 0.8760829614071934, + "train_speed(iter/s)": 0.423718 + }, + { + "epoch": 2.3472, + "grad_norm": 0.7447706346384498, + "learning_rate": 6.198433554421285e-07, + "loss": 0.26227864623069763, + "memory(GiB)": 77.0, + "step": 7335, + "token_acc": 0.9513707571801566, + "train_speed(iter/s)": 0.423675 + }, + { + "epoch": 2.34752, + "grad_norm": 0.709306985008759, + "learning_rate": 6.192622357980462e-07, + "loss": 0.2593643367290497, + "memory(GiB)": 77.0, + "step": 7336, + "token_acc": 0.9288009888751545, + "train_speed(iter/s)": 0.423634 + }, + { + "epoch": 2.34784, + "grad_norm": 0.795076514601858, + "learning_rate": 6.186813501791538e-07, + "loss": 0.354871928691864, + "memory(GiB)": 77.0, + "step": 7337, + "token_acc": 0.8867583212735166, + "train_speed(iter/s)": 0.423596 + }, + { + "epoch": 2.34816, + "grad_norm": 0.8863359670928519, + "learning_rate": 6.181006986577312e-07, + "loss": 0.31536203622817993, + "memory(GiB)": 77.0, + "step": 7338, + "token_acc": 0.8735275883446993, + "train_speed(iter/s)": 0.423554 + }, + { + "epoch": 2.34848, + "grad_norm": 0.7429337789202842, + "learning_rate": 6.175202813060305e-07, + "loss": 0.3000786602497101, + "memory(GiB)": 77.0, + "step": 7339, + "token_acc": 0.8512702893436839, + "train_speed(iter/s)": 0.423514 + }, + { + "epoch": 2.3487999999999998, + "grad_norm": 0.7522616600218881, + "learning_rate": 6.169400981962747e-07, + "loss": 0.29229357838630676, + "memory(GiB)": 77.0, + "step": 7340, + "token_acc": 0.9134101771155468, + "train_speed(iter/s)": 0.423467 + }, + { + "epoch": 2.34912, + "grad_norm": 0.7098575539271136, + "learning_rate": 6.163601494006574e-07, + "loss": 0.31625989079475403, + "memory(GiB)": 77.0, + "step": 7341, + "token_acc": 0.9082139725226542, + "train_speed(iter/s)": 0.423422 + }, + { + "epoch": 2.34944, + "grad_norm": 0.7102603730915116, + "learning_rate": 6.157804349913435e-07, + "loss": 0.33693718910217285, + "memory(GiB)": 77.0, + "step": 7342, + "token_acc": 0.875, + "train_speed(iter/s)": 0.423376 + }, + { + "epoch": 2.34976, + "grad_norm": 0.7412669973276874, + "learning_rate": 6.152009550404686e-07, + "loss": 0.3047869801521301, + "memory(GiB)": 77.0, + "step": 7343, + "token_acc": 0.9210766423357665, + "train_speed(iter/s)": 0.423338 + }, + { + "epoch": 2.35008, + "grad_norm": 0.7217860399343091, + "learning_rate": 6.146217096201376e-07, + "loss": 0.34344154596328735, + "memory(GiB)": 77.0, + "step": 7344, + "token_acc": 0.8522763451130213, + "train_speed(iter/s)": 0.423295 + }, + { + "epoch": 2.3504, + "grad_norm": 0.7600426380394258, + "learning_rate": 6.140426988024287e-07, + "loss": 0.27927860617637634, + "memory(GiB)": 77.0, + "step": 7345, + "token_acc": 0.9555388093443858, + "train_speed(iter/s)": 0.42325 + }, + { + "epoch": 2.35072, + "grad_norm": 0.6576333920158975, + "learning_rate": 6.134639226593894e-07, + "loss": 0.21400108933448792, + "memory(GiB)": 77.0, + "step": 7346, + "token_acc": 0.956482320942883, + "train_speed(iter/s)": 0.423211 + }, + { + "epoch": 2.3510400000000002, + "grad_norm": 0.6962840042216376, + "learning_rate": 6.128853812630384e-07, + "loss": 0.267677903175354, + "memory(GiB)": 77.0, + "step": 7347, + "token_acc": 0.9057948559033158, + "train_speed(iter/s)": 0.42317 + }, + { + "epoch": 2.35136, + "grad_norm": 0.6992109991198054, + "learning_rate": 6.123070746853652e-07, + "loss": 0.3020058274269104, + "memory(GiB)": 77.0, + "step": 7348, + "token_acc": 0.9620123203285421, + "train_speed(iter/s)": 0.423128 + }, + { + "epoch": 2.35168, + "grad_norm": 0.7208629188305041, + "learning_rate": 6.117290029983299e-07, + "loss": 0.35130441188812256, + "memory(GiB)": 77.0, + "step": 7349, + "token_acc": 0.9293055123014637, + "train_speed(iter/s)": 0.423082 + }, + { + "epoch": 2.352, + "grad_norm": 0.7444376622425186, + "learning_rate": 6.111511662738637e-07, + "loss": 0.2920183539390564, + "memory(GiB)": 77.0, + "step": 7350, + "token_acc": 0.8607764390896921, + "train_speed(iter/s)": 0.423039 + }, + { + "epoch": 2.35232, + "grad_norm": 0.7438354746403514, + "learning_rate": 6.105735645838681e-07, + "loss": 0.27398163080215454, + "memory(GiB)": 77.0, + "step": 7351, + "token_acc": 0.9182584269662921, + "train_speed(iter/s)": 0.423001 + }, + { + "epoch": 2.35264, + "grad_norm": 0.7556987094129421, + "learning_rate": 6.099961980002158e-07, + "loss": 0.3121117353439331, + "memory(GiB)": 77.0, + "step": 7352, + "token_acc": 0.9106302916274694, + "train_speed(iter/s)": 0.422958 + }, + { + "epoch": 2.35296, + "grad_norm": 0.7029638065154101, + "learning_rate": 6.094190665947491e-07, + "loss": 0.3236364722251892, + "memory(GiB)": 77.0, + "step": 7353, + "token_acc": 0.9027826699542092, + "train_speed(iter/s)": 0.422913 + }, + { + "epoch": 2.35328, + "grad_norm": 0.6870067538239755, + "learning_rate": 6.088421704392839e-07, + "loss": 0.28204768896102905, + "memory(GiB)": 77.0, + "step": 7354, + "token_acc": 0.85828025477707, + "train_speed(iter/s)": 0.422864 + }, + { + "epoch": 2.3536, + "grad_norm": 0.7896098663666726, + "learning_rate": 6.082655096056037e-07, + "loss": 0.2720426023006439, + "memory(GiB)": 77.0, + "step": 7355, + "token_acc": 0.9532273152478953, + "train_speed(iter/s)": 0.422825 + }, + { + "epoch": 2.35392, + "grad_norm": 0.8037277772662391, + "learning_rate": 6.076890841654642e-07, + "loss": 0.36498045921325684, + "memory(GiB)": 77.0, + "step": 7356, + "token_acc": 0.8590489223400616, + "train_speed(iter/s)": 0.422787 + }, + { + "epoch": 2.35424, + "grad_norm": 0.7504053155279076, + "learning_rate": 6.071128941905921e-07, + "loss": 0.3414846956729889, + "memory(GiB)": 77.0, + "step": 7357, + "token_acc": 0.8581030619865572, + "train_speed(iter/s)": 0.422748 + }, + { + "epoch": 2.35456, + "grad_norm": 0.6915937251985155, + "learning_rate": 6.065369397526832e-07, + "loss": 0.3391936123371124, + "memory(GiB)": 77.0, + "step": 7358, + "token_acc": 0.8948926720947447, + "train_speed(iter/s)": 0.422705 + }, + { + "epoch": 2.35488, + "grad_norm": 0.7401811957207352, + "learning_rate": 6.059612209234053e-07, + "loss": 0.27294063568115234, + "memory(GiB)": 77.0, + "step": 7359, + "token_acc": 0.9627293577981652, + "train_speed(iter/s)": 0.422665 + }, + { + "epoch": 2.3552, + "grad_norm": 0.7116155852542017, + "learning_rate": 6.053857377743966e-07, + "loss": 0.2702578008174896, + "memory(GiB)": 77.0, + "step": 7360, + "token_acc": 0.9186529630439536, + "train_speed(iter/s)": 0.422627 + }, + { + "epoch": 2.35552, + "grad_norm": 0.7555543214542706, + "learning_rate": 6.048104903772664e-07, + "loss": 0.3022947311401367, + "memory(GiB)": 77.0, + "step": 7361, + "token_acc": 0.9409993979530403, + "train_speed(iter/s)": 0.422584 + }, + { + "epoch": 2.35584, + "grad_norm": 0.6925798806872161, + "learning_rate": 6.042354788035943e-07, + "loss": 0.36230266094207764, + "memory(GiB)": 77.0, + "step": 7362, + "token_acc": 0.93048128342246, + "train_speed(iter/s)": 0.422543 + }, + { + "epoch": 2.35616, + "grad_norm": 0.7356642825779295, + "learning_rate": 6.036607031249297e-07, + "loss": 0.3914942443370819, + "memory(GiB)": 77.0, + "step": 7363, + "token_acc": 0.845687984496124, + "train_speed(iter/s)": 0.422498 + }, + { + "epoch": 2.35648, + "grad_norm": 0.7660341957723413, + "learning_rate": 6.030861634127944e-07, + "loss": 0.3926534950733185, + "memory(GiB)": 77.0, + "step": 7364, + "token_acc": 0.8974226804123712, + "train_speed(iter/s)": 0.422457 + }, + { + "epoch": 2.3568, + "grad_norm": 0.7695090012881252, + "learning_rate": 6.025118597386795e-07, + "loss": 0.35473889112472534, + "memory(GiB)": 77.0, + "step": 7365, + "token_acc": 0.9082404458598726, + "train_speed(iter/s)": 0.422417 + }, + { + "epoch": 2.35712, + "grad_norm": 0.7681183986607779, + "learning_rate": 6.019377921740472e-07, + "loss": 0.30517783761024475, + "memory(GiB)": 77.0, + "step": 7366, + "token_acc": 0.9075050709939148, + "train_speed(iter/s)": 0.422379 + }, + { + "epoch": 2.35744, + "grad_norm": 0.6778395630543219, + "learning_rate": 6.013639607903299e-07, + "loss": 0.2585076093673706, + "memory(GiB)": 77.0, + "step": 7367, + "token_acc": 0.9608315757758361, + "train_speed(iter/s)": 0.422337 + }, + { + "epoch": 2.35776, + "grad_norm": 0.6715800675064437, + "learning_rate": 6.007903656589314e-07, + "loss": 0.23014622926712036, + "memory(GiB)": 77.0, + "step": 7368, + "token_acc": 0.9210105532459226, + "train_speed(iter/s)": 0.422295 + }, + { + "epoch": 2.35808, + "grad_norm": 0.7121730700831297, + "learning_rate": 6.002170068512256e-07, + "loss": 0.29383915662765503, + "memory(GiB)": 77.0, + "step": 7369, + "token_acc": 0.8953316953316953, + "train_speed(iter/s)": 0.42225 + }, + { + "epoch": 2.3584, + "grad_norm": 0.6865566386445965, + "learning_rate": 5.996438844385563e-07, + "loss": 0.2620294690132141, + "memory(GiB)": 77.0, + "step": 7370, + "token_acc": 0.8955934894799523, + "train_speed(iter/s)": 0.422207 + }, + { + "epoch": 2.35872, + "grad_norm": 0.7472861654428675, + "learning_rate": 5.990709984922402e-07, + "loss": 0.2605242133140564, + "memory(GiB)": 77.0, + "step": 7371, + "token_acc": 0.8780938308090137, + "train_speed(iter/s)": 0.422166 + }, + { + "epoch": 2.3590400000000002, + "grad_norm": 0.6923401206484835, + "learning_rate": 5.984983490835625e-07, + "loss": 0.3039708435535431, + "memory(GiB)": 77.0, + "step": 7372, + "token_acc": 0.8277501131733817, + "train_speed(iter/s)": 0.422115 + }, + { + "epoch": 2.35936, + "grad_norm": 0.706204205396765, + "learning_rate": 5.979259362837786e-07, + "loss": 0.3464176058769226, + "memory(GiB)": 77.0, + "step": 7373, + "token_acc": 0.8997140972069496, + "train_speed(iter/s)": 0.42207 + }, + { + "epoch": 2.35968, + "grad_norm": 0.7590109814783014, + "learning_rate": 5.973537601641161e-07, + "loss": 0.35178542137145996, + "memory(GiB)": 77.0, + "step": 7374, + "token_acc": 0.9281961471103327, + "train_speed(iter/s)": 0.422023 + }, + { + "epoch": 2.36, + "grad_norm": 0.6492268892042693, + "learning_rate": 5.967818207957717e-07, + "loss": 0.29118412733078003, + "memory(GiB)": 77.0, + "step": 7375, + "token_acc": 0.8939795531995456, + "train_speed(iter/s)": 0.421975 + }, + { + "epoch": 2.3603199999999998, + "grad_norm": 0.6940605528032181, + "learning_rate": 5.962101182499142e-07, + "loss": 0.3155616819858551, + "memory(GiB)": 77.0, + "step": 7376, + "token_acc": 0.9056338028169014, + "train_speed(iter/s)": 0.421934 + }, + { + "epoch": 2.36064, + "grad_norm": 0.7784952731828145, + "learning_rate": 5.956386525976815e-07, + "loss": 0.34916195273399353, + "memory(GiB)": 77.0, + "step": 7377, + "token_acc": 0.9168900804289544, + "train_speed(iter/s)": 0.421886 + }, + { + "epoch": 2.36096, + "grad_norm": 0.7285628926628879, + "learning_rate": 5.950674239101828e-07, + "loss": 0.3732794523239136, + "memory(GiB)": 77.0, + "step": 7378, + "token_acc": 0.8782161234991424, + "train_speed(iter/s)": 0.421845 + }, + { + "epoch": 2.36128, + "grad_norm": 0.7388713940213686, + "learning_rate": 5.944964322584976e-07, + "loss": 0.2682355046272278, + "memory(GiB)": 77.0, + "step": 7379, + "token_acc": 0.9086453636541855, + "train_speed(iter/s)": 0.421808 + }, + { + "epoch": 2.3616, + "grad_norm": 0.7982461002108708, + "learning_rate": 5.939256777136757e-07, + "loss": 0.3034934103488922, + "memory(GiB)": 77.0, + "step": 7380, + "token_acc": 0.8554526748971193, + "train_speed(iter/s)": 0.421768 + }, + { + "epoch": 2.36192, + "grad_norm": 0.7339812520667118, + "learning_rate": 5.933551603467378e-07, + "loss": 0.3477911949157715, + "memory(GiB)": 77.0, + "step": 7381, + "token_acc": 0.9228464419475655, + "train_speed(iter/s)": 0.421724 + }, + { + "epoch": 2.36224, + "grad_norm": 0.7180591472433301, + "learning_rate": 5.927848802286748e-07, + "loss": 0.23751281201839447, + "memory(GiB)": 77.0, + "step": 7382, + "token_acc": 0.9551743220807969, + "train_speed(iter/s)": 0.421682 + }, + { + "epoch": 2.36256, + "grad_norm": 0.7171041240443393, + "learning_rate": 5.922148374304484e-07, + "loss": 0.2499302625656128, + "memory(GiB)": 77.0, + "step": 7383, + "token_acc": 0.8888888888888888, + "train_speed(iter/s)": 0.421643 + }, + { + "epoch": 2.36288, + "grad_norm": 0.6591835278058444, + "learning_rate": 5.916450320229902e-07, + "loss": 0.33871597051620483, + "memory(GiB)": 77.0, + "step": 7384, + "token_acc": 0.8921786331112755, + "train_speed(iter/s)": 0.4216 + }, + { + "epoch": 2.3632, + "grad_norm": 0.6935916611409971, + "learning_rate": 5.910754640772029e-07, + "loss": 0.2485114336013794, + "memory(GiB)": 77.0, + "step": 7385, + "token_acc": 0.956687898089172, + "train_speed(iter/s)": 0.421562 + }, + { + "epoch": 2.36352, + "grad_norm": 0.7505629733087081, + "learning_rate": 5.90506133663959e-07, + "loss": 0.31227290630340576, + "memory(GiB)": 77.0, + "step": 7386, + "token_acc": 0.8549141965678627, + "train_speed(iter/s)": 0.421522 + }, + { + "epoch": 2.36384, + "grad_norm": 0.8155378912673785, + "learning_rate": 5.899370408541022e-07, + "loss": 0.31615734100341797, + "memory(GiB)": 77.0, + "step": 7387, + "token_acc": 0.9612034837688044, + "train_speed(iter/s)": 0.421481 + }, + { + "epoch": 2.36416, + "grad_norm": 0.725134333086124, + "learning_rate": 5.893681857184457e-07, + "loss": 0.3306483328342438, + "memory(GiB)": 77.0, + "step": 7388, + "token_acc": 0.8950887710398894, + "train_speed(iter/s)": 0.421443 + }, + { + "epoch": 2.36448, + "grad_norm": 0.7195778762733924, + "learning_rate": 5.887995683277744e-07, + "loss": 0.3321485221385956, + "memory(GiB)": 77.0, + "step": 7389, + "token_acc": 0.9367904265111997, + "train_speed(iter/s)": 0.421403 + }, + { + "epoch": 2.3648, + "grad_norm": 0.6917693546619953, + "learning_rate": 5.882311887528422e-07, + "loss": 0.31640854477882385, + "memory(GiB)": 77.0, + "step": 7390, + "token_acc": 0.8927120669056153, + "train_speed(iter/s)": 0.421362 + }, + { + "epoch": 2.36512, + "grad_norm": 0.7545044003977054, + "learning_rate": 5.876630470643746e-07, + "loss": 0.284255713224411, + "memory(GiB)": 77.0, + "step": 7391, + "token_acc": 0.8916846864556713, + "train_speed(iter/s)": 0.421325 + }, + { + "epoch": 2.36544, + "grad_norm": 0.8224212027153516, + "learning_rate": 5.870951433330663e-07, + "loss": 0.2676137685775757, + "memory(GiB)": 77.0, + "step": 7392, + "token_acc": 0.9119625621526762, + "train_speed(iter/s)": 0.421283 + }, + { + "epoch": 2.36576, + "grad_norm": 0.6282208115153485, + "learning_rate": 5.865274776295838e-07, + "loss": 0.2197226732969284, + "memory(GiB)": 77.0, + "step": 7393, + "token_acc": 0.8900871959614503, + "train_speed(iter/s)": 0.421246 + }, + { + "epoch": 2.36608, + "grad_norm": 0.6411496730512205, + "learning_rate": 5.859600500245627e-07, + "loss": 0.27774813771247864, + "memory(GiB)": 77.0, + "step": 7394, + "token_acc": 0.9113924050632911, + "train_speed(iter/s)": 0.421201 + }, + { + "epoch": 2.3664, + "grad_norm": 0.7335174704550642, + "learning_rate": 5.8539286058861e-07, + "loss": 0.24329660832881927, + "memory(GiB)": 77.0, + "step": 7395, + "token_acc": 0.9123466065614826, + "train_speed(iter/s)": 0.421157 + }, + { + "epoch": 2.36672, + "grad_norm": 0.7251198141280303, + "learning_rate": 5.84825909392302e-07, + "loss": 0.3620013892650604, + "memory(GiB)": 77.0, + "step": 7396, + "token_acc": 0.9142106798949519, + "train_speed(iter/s)": 0.421116 + }, + { + "epoch": 2.36704, + "grad_norm": 0.6851698850785469, + "learning_rate": 5.842591965061864e-07, + "loss": 0.30555975437164307, + "memory(GiB)": 77.0, + "step": 7397, + "token_acc": 0.9113888099971599, + "train_speed(iter/s)": 0.421078 + }, + { + "epoch": 2.36736, + "grad_norm": 0.739165784826479, + "learning_rate": 5.836927220007807e-07, + "loss": 0.407481849193573, + "memory(GiB)": 77.0, + "step": 7398, + "token_acc": 0.8779360800924143, + "train_speed(iter/s)": 0.421027 + }, + { + "epoch": 2.36768, + "grad_norm": 0.7037332952390958, + "learning_rate": 5.83126485946573e-07, + "loss": 0.29421132802963257, + "memory(GiB)": 77.0, + "step": 7399, + "token_acc": 0.9410739191073919, + "train_speed(iter/s)": 0.420987 + }, + { + "epoch": 2.368, + "grad_norm": 0.6313539942093317, + "learning_rate": 5.82560488414021e-07, + "loss": 0.2658388614654541, + "memory(GiB)": 77.0, + "step": 7400, + "token_acc": 0.938259360277709, + "train_speed(iter/s)": 0.420949 + }, + { + "epoch": 2.3683199999999998, + "grad_norm": 0.8112537672308703, + "learning_rate": 5.819947294735537e-07, + "loss": 0.2952252924442291, + "memory(GiB)": 77.0, + "step": 7401, + "token_acc": 0.8692008992438177, + "train_speed(iter/s)": 0.420904 + }, + { + "epoch": 2.36864, + "grad_norm": 0.6599167381362727, + "learning_rate": 5.814292091955701e-07, + "loss": 0.2681121826171875, + "memory(GiB)": 77.0, + "step": 7402, + "token_acc": 0.9318005683285973, + "train_speed(iter/s)": 0.420864 + }, + { + "epoch": 2.36896, + "grad_norm": 0.7706865670235036, + "learning_rate": 5.808639276504391e-07, + "loss": 0.330768346786499, + "memory(GiB)": 77.0, + "step": 7403, + "token_acc": 0.9399836132732486, + "train_speed(iter/s)": 0.420822 + }, + { + "epoch": 2.36928, + "grad_norm": 0.7153557031574538, + "learning_rate": 5.802988849085001e-07, + "loss": 0.27948859333992004, + "memory(GiB)": 77.0, + "step": 7404, + "token_acc": 0.9433221099887766, + "train_speed(iter/s)": 0.420774 + }, + { + "epoch": 2.3696, + "grad_norm": 0.8028217220930897, + "learning_rate": 5.797340810400632e-07, + "loss": 0.2657395601272583, + "memory(GiB)": 77.0, + "step": 7405, + "token_acc": 0.9561838981221671, + "train_speed(iter/s)": 0.420735 + }, + { + "epoch": 2.36992, + "grad_norm": 0.7150534265800398, + "learning_rate": 5.791695161154082e-07, + "loss": 0.30680593848228455, + "memory(GiB)": 77.0, + "step": 7406, + "token_acc": 0.965840220385675, + "train_speed(iter/s)": 0.420696 + }, + { + "epoch": 2.37024, + "grad_norm": 0.7055861397728532, + "learning_rate": 5.786051902047854e-07, + "loss": 0.279861718416214, + "memory(GiB)": 77.0, + "step": 7407, + "token_acc": 0.9352226720647774, + "train_speed(iter/s)": 0.420658 + }, + { + "epoch": 2.3705600000000002, + "grad_norm": 0.6817986608170937, + "learning_rate": 5.780411033784156e-07, + "loss": 0.2388574182987213, + "memory(GiB)": 77.0, + "step": 7408, + "token_acc": 0.8889179984280848, + "train_speed(iter/s)": 0.420616 + }, + { + "epoch": 2.37088, + "grad_norm": 0.8416565461463279, + "learning_rate": 5.774772557064895e-07, + "loss": 0.4309021234512329, + "memory(GiB)": 77.0, + "step": 7409, + "token_acc": 0.9562894609033511, + "train_speed(iter/s)": 0.420576 + }, + { + "epoch": 2.3712, + "grad_norm": 0.7053952065493097, + "learning_rate": 5.76913647259168e-07, + "loss": 0.3137297034263611, + "memory(GiB)": 77.0, + "step": 7410, + "token_acc": 0.9463154384794932, + "train_speed(iter/s)": 0.420534 + }, + { + "epoch": 2.37152, + "grad_norm": 0.756025969298257, + "learning_rate": 5.763502781065827e-07, + "loss": 0.31972038745880127, + "memory(GiB)": 77.0, + "step": 7411, + "token_acc": 0.9403353057199211, + "train_speed(iter/s)": 0.420496 + }, + { + "epoch": 2.37184, + "grad_norm": 0.718323967207399, + "learning_rate": 5.757871483188349e-07, + "loss": 0.2614045739173889, + "memory(GiB)": 77.0, + "step": 7412, + "token_acc": 0.9444839857651246, + "train_speed(iter/s)": 0.420458 + }, + { + "epoch": 2.37216, + "grad_norm": 0.7904033031860075, + "learning_rate": 5.752242579659961e-07, + "loss": 0.3399757742881775, + "memory(GiB)": 77.0, + "step": 7413, + "token_acc": 0.8965340179717587, + "train_speed(iter/s)": 0.420416 + }, + { + "epoch": 2.37248, + "grad_norm": 0.7120427635612491, + "learning_rate": 5.746616071181088e-07, + "loss": 0.26787522435188293, + "memory(GiB)": 77.0, + "step": 7414, + "token_acc": 0.9244090159428258, + "train_speed(iter/s)": 0.420375 + }, + { + "epoch": 2.3728, + "grad_norm": 0.6911379025772451, + "learning_rate": 5.740991958451858e-07, + "loss": 0.2524358034133911, + "memory(GiB)": 77.0, + "step": 7415, + "token_acc": 0.8836161187698833, + "train_speed(iter/s)": 0.420332 + }, + { + "epoch": 2.37312, + "grad_norm": 0.7512813721420206, + "learning_rate": 5.735370242172067e-07, + "loss": 0.2600811719894409, + "memory(GiB)": 77.0, + "step": 7416, + "token_acc": 0.95250190500381, + "train_speed(iter/s)": 0.420294 + }, + { + "epoch": 2.37344, + "grad_norm": 0.6867468535459994, + "learning_rate": 5.729750923041267e-07, + "loss": 0.22256243228912354, + "memory(GiB)": 77.0, + "step": 7417, + "token_acc": 0.8910095429432446, + "train_speed(iter/s)": 0.420258 + }, + { + "epoch": 2.37376, + "grad_norm": 0.905541201980142, + "learning_rate": 5.724134001758675e-07, + "loss": 0.3335968852043152, + "memory(GiB)": 77.0, + "step": 7418, + "token_acc": 0.916243654822335, + "train_speed(iter/s)": 0.420216 + }, + { + "epoch": 2.37408, + "grad_norm": 0.6918867950285816, + "learning_rate": 5.718519479023221e-07, + "loss": 0.2580912113189697, + "memory(GiB)": 77.0, + "step": 7419, + "token_acc": 0.9141119837851532, + "train_speed(iter/s)": 0.420176 + }, + { + "epoch": 2.3744, + "grad_norm": 0.7251318957686749, + "learning_rate": 5.712907355533534e-07, + "loss": 0.31670212745666504, + "memory(GiB)": 77.0, + "step": 7420, + "token_acc": 0.9531093279839519, + "train_speed(iter/s)": 0.420133 + }, + { + "epoch": 2.37472, + "grad_norm": 0.7238976806681784, + "learning_rate": 5.707297631987943e-07, + "loss": 0.3870544731616974, + "memory(GiB)": 77.0, + "step": 7421, + "token_acc": 0.8965026433509556, + "train_speed(iter/s)": 0.420094 + }, + { + "epoch": 2.37504, + "grad_norm": 0.7329575350460155, + "learning_rate": 5.701690309084487e-07, + "loss": 0.3283689022064209, + "memory(GiB)": 77.0, + "step": 7422, + "token_acc": 0.9158455392809587, + "train_speed(iter/s)": 0.420052 + }, + { + "epoch": 2.37536, + "grad_norm": 0.7684889913846926, + "learning_rate": 5.696085387520894e-07, + "loss": 0.3249148726463318, + "memory(GiB)": 77.0, + "step": 7423, + "token_acc": 0.9275862068965517, + "train_speed(iter/s)": 0.420014 + }, + { + "epoch": 2.37568, + "grad_norm": 0.7289156996335577, + "learning_rate": 5.690482867994604e-07, + "loss": 0.3485022485256195, + "memory(GiB)": 77.0, + "step": 7424, + "token_acc": 0.90625, + "train_speed(iter/s)": 0.419975 + }, + { + "epoch": 2.376, + "grad_norm": 0.6884459899221519, + "learning_rate": 5.684882751202747e-07, + "loss": 0.259756863117218, + "memory(GiB)": 77.0, + "step": 7425, + "token_acc": 0.873046875, + "train_speed(iter/s)": 0.419937 + }, + { + "epoch": 2.3763199999999998, + "grad_norm": 0.7338986045058578, + "learning_rate": 5.679285037842169e-07, + "loss": 0.3107150197029114, + "memory(GiB)": 77.0, + "step": 7426, + "token_acc": 0.8952504879635654, + "train_speed(iter/s)": 0.419901 + }, + { + "epoch": 2.37664, + "grad_norm": 0.6850903003492731, + "learning_rate": 5.673689728609402e-07, + "loss": 0.2599097192287445, + "memory(GiB)": 77.0, + "step": 7427, + "token_acc": 0.9275929549902152, + "train_speed(iter/s)": 0.419857 + }, + { + "epoch": 2.37696, + "grad_norm": 0.8465575642384545, + "learning_rate": 5.668096824200686e-07, + "loss": 0.37584221363067627, + "memory(GiB)": 77.0, + "step": 7428, + "token_acc": 0.8900774621910734, + "train_speed(iter/s)": 0.419819 + }, + { + "epoch": 2.37728, + "grad_norm": 0.8085000019054573, + "learning_rate": 5.66250632531197e-07, + "loss": 0.36536267399787903, + "memory(GiB)": 77.0, + "step": 7429, + "token_acc": 0.8958333333333334, + "train_speed(iter/s)": 0.419782 + }, + { + "epoch": 2.3776, + "grad_norm": 0.69883629961893, + "learning_rate": 5.656918232638877e-07, + "loss": 0.29146382212638855, + "memory(GiB)": 77.0, + "step": 7430, + "token_acc": 0.9070657065706571, + "train_speed(iter/s)": 0.419739 + }, + { + "epoch": 2.37792, + "grad_norm": 0.8069609804510683, + "learning_rate": 5.651332546876761e-07, + "loss": 0.3205907940864563, + "memory(GiB)": 77.0, + "step": 7431, + "token_acc": 0.9005969374513366, + "train_speed(iter/s)": 0.419699 + }, + { + "epoch": 2.37824, + "grad_norm": 0.7511951497751037, + "learning_rate": 5.645749268720652e-07, + "loss": 0.30570968985557556, + "memory(GiB)": 77.0, + "step": 7432, + "token_acc": 0.9669952850407201, + "train_speed(iter/s)": 0.41966 + }, + { + "epoch": 2.3785600000000002, + "grad_norm": 0.8831267726170188, + "learning_rate": 5.640168398865306e-07, + "loss": 0.42898884415626526, + "memory(GiB)": 77.0, + "step": 7433, + "token_acc": 0.8476190476190476, + "train_speed(iter/s)": 0.419617 + }, + { + "epoch": 2.37888, + "grad_norm": 0.7243503568004532, + "learning_rate": 5.634589938005161e-07, + "loss": 0.3541997969150543, + "memory(GiB)": 77.0, + "step": 7434, + "token_acc": 0.9041176470588236, + "train_speed(iter/s)": 0.419578 + }, + { + "epoch": 2.3792, + "grad_norm": 0.6645695962788131, + "learning_rate": 5.629013886834358e-07, + "loss": 0.279363751411438, + "memory(GiB)": 77.0, + "step": 7435, + "token_acc": 0.9010021999511122, + "train_speed(iter/s)": 0.419536 + }, + { + "epoch": 2.37952, + "grad_norm": 0.7243226176360945, + "learning_rate": 5.623440246046741e-07, + "loss": 0.25056448578834534, + "memory(GiB)": 77.0, + "step": 7436, + "token_acc": 0.9176314918954681, + "train_speed(iter/s)": 0.419498 + }, + { + "epoch": 2.37984, + "grad_norm": 0.7315637440089361, + "learning_rate": 5.617869016335853e-07, + "loss": 0.3587563633918762, + "memory(GiB)": 77.0, + "step": 7437, + "token_acc": 0.884765625, + "train_speed(iter/s)": 0.419457 + }, + { + "epoch": 2.38016, + "grad_norm": 0.642027366784993, + "learning_rate": 5.612300198394935e-07, + "loss": 0.3075825870037079, + "memory(GiB)": 77.0, + "step": 7438, + "token_acc": 0.903169694626981, + "train_speed(iter/s)": 0.419412 + }, + { + "epoch": 2.38048, + "grad_norm": 0.7025814275090531, + "learning_rate": 5.606733792916932e-07, + "loss": 0.2695752680301666, + "memory(GiB)": 77.0, + "step": 7439, + "token_acc": 0.9311456196894228, + "train_speed(iter/s)": 0.419376 + }, + { + "epoch": 2.3808, + "grad_norm": 0.6938654933971529, + "learning_rate": 5.601169800594486e-07, + "loss": 0.2961221933364868, + "memory(GiB)": 77.0, + "step": 7440, + "token_acc": 0.9381633594831564, + "train_speed(iter/s)": 0.419338 + }, + { + "epoch": 2.38112, + "grad_norm": 0.7451954591177204, + "learning_rate": 5.595608222119939e-07, + "loss": 0.3168271481990814, + "memory(GiB)": 77.0, + "step": 7441, + "token_acc": 0.8832028898254064, + "train_speed(iter/s)": 0.419298 + }, + { + "epoch": 2.38144, + "grad_norm": 0.6172170688044205, + "learning_rate": 5.590049058185332e-07, + "loss": 0.32465651631355286, + "memory(GiB)": 77.0, + "step": 7442, + "token_acc": 0.8519540229885058, + "train_speed(iter/s)": 0.419254 + }, + { + "epoch": 2.38176, + "grad_norm": 0.6979322876904467, + "learning_rate": 5.584492309482412e-07, + "loss": 0.21763236820697784, + "memory(GiB)": 77.0, + "step": 7443, + "token_acc": 0.9058823529411765, + "train_speed(iter/s)": 0.419217 + }, + { + "epoch": 2.38208, + "grad_norm": 0.7135034478862703, + "learning_rate": 5.578937976702611e-07, + "loss": 0.31534963846206665, + "memory(GiB)": 77.0, + "step": 7444, + "token_acc": 0.906415343915344, + "train_speed(iter/s)": 0.419176 + }, + { + "epoch": 2.3824, + "grad_norm": 0.7326983443381928, + "learning_rate": 5.573386060537075e-07, + "loss": 0.28908270597457886, + "memory(GiB)": 77.0, + "step": 7445, + "token_acc": 0.9429078014184397, + "train_speed(iter/s)": 0.41914 + }, + { + "epoch": 2.38272, + "grad_norm": 0.7762824959195864, + "learning_rate": 5.56783656167664e-07, + "loss": 0.3714301288127899, + "memory(GiB)": 77.0, + "step": 7446, + "token_acc": 0.9374429223744293, + "train_speed(iter/s)": 0.419102 + }, + { + "epoch": 2.38304, + "grad_norm": 0.6837257074343431, + "learning_rate": 5.562289480811848e-07, + "loss": 0.31574785709381104, + "memory(GiB)": 77.0, + "step": 7447, + "token_acc": 0.9500496195831954, + "train_speed(iter/s)": 0.419061 + }, + { + "epoch": 2.38336, + "grad_norm": 0.7724162014190528, + "learning_rate": 5.556744818632937e-07, + "loss": 0.3142387270927429, + "memory(GiB)": 77.0, + "step": 7448, + "token_acc": 0.9606323620582765, + "train_speed(iter/s)": 0.419013 + }, + { + "epoch": 2.38368, + "grad_norm": 0.7146684445943952, + "learning_rate": 5.551202575829833e-07, + "loss": 0.30649271607398987, + "memory(GiB)": 77.0, + "step": 7449, + "token_acc": 0.9092953523238381, + "train_speed(iter/s)": 0.418973 + }, + { + "epoch": 2.384, + "grad_norm": 0.694295190223078, + "learning_rate": 5.545662753092193e-07, + "loss": 0.18214648962020874, + "memory(GiB)": 77.0, + "step": 7450, + "token_acc": 0.9673088149445418, + "train_speed(iter/s)": 0.418939 + }, + { + "epoch": 2.3843199999999998, + "grad_norm": 0.7139771196811021, + "learning_rate": 5.540125351109341e-07, + "loss": 0.3476520776748657, + "memory(GiB)": 77.0, + "step": 7451, + "token_acc": 0.877197388247112, + "train_speed(iter/s)": 0.418898 + }, + { + "epoch": 2.38464, + "grad_norm": 0.6833014814023659, + "learning_rate": 5.534590370570309e-07, + "loss": 0.3269605040550232, + "memory(GiB)": 77.0, + "step": 7452, + "token_acc": 0.9393156875403487, + "train_speed(iter/s)": 0.418856 + }, + { + "epoch": 2.38496, + "grad_norm": 0.8693132098129921, + "learning_rate": 5.529057812163832e-07, + "loss": 0.31742435693740845, + "memory(GiB)": 77.0, + "step": 7453, + "token_acc": 0.9464720194647201, + "train_speed(iter/s)": 0.418815 + }, + { + "epoch": 2.38528, + "grad_norm": 0.7602927404297847, + "learning_rate": 5.523527676578338e-07, + "loss": 0.3637724816799164, + "memory(GiB)": 77.0, + "step": 7454, + "token_acc": 0.8802278518962674, + "train_speed(iter/s)": 0.418776 + }, + { + "epoch": 2.3856, + "grad_norm": 0.6958654245201202, + "learning_rate": 5.51799996450196e-07, + "loss": 0.25309956073760986, + "memory(GiB)": 77.0, + "step": 7455, + "token_acc": 0.9167309175019275, + "train_speed(iter/s)": 0.41873 + }, + { + "epoch": 2.38592, + "grad_norm": 0.6400002503525493, + "learning_rate": 5.512474676622525e-07, + "loss": 0.2728348970413208, + "memory(GiB)": 77.0, + "step": 7456, + "token_acc": 0.9545073960368406, + "train_speed(iter/s)": 0.418691 + }, + { + "epoch": 2.38624, + "grad_norm": 0.6868654846008231, + "learning_rate": 5.506951813627556e-07, + "loss": 0.2902372479438782, + "memory(GiB)": 77.0, + "step": 7457, + "token_acc": 0.8768154922001076, + "train_speed(iter/s)": 0.41865 + }, + { + "epoch": 2.3865600000000002, + "grad_norm": 0.7321062845186546, + "learning_rate": 5.501431376204289e-07, + "loss": 0.309007465839386, + "memory(GiB)": 77.0, + "step": 7458, + "token_acc": 0.9641744548286605, + "train_speed(iter/s)": 0.41861 + }, + { + "epoch": 2.38688, + "grad_norm": 0.6110519751689824, + "learning_rate": 5.495913365039629e-07, + "loss": 0.24788574874401093, + "memory(GiB)": 77.0, + "step": 7459, + "token_acc": 0.9397484644632934, + "train_speed(iter/s)": 0.418568 + }, + { + "epoch": 2.3872, + "grad_norm": 0.8343689967762754, + "learning_rate": 5.490397780820209e-07, + "loss": 0.2990323305130005, + "memory(GiB)": 77.0, + "step": 7460, + "token_acc": 0.9389993145990404, + "train_speed(iter/s)": 0.418526 + }, + { + "epoch": 2.38752, + "grad_norm": 0.675376486133213, + "learning_rate": 5.48488462423234e-07, + "loss": 0.2981470227241516, + "memory(GiB)": 77.0, + "step": 7461, + "token_acc": 0.9408491947291362, + "train_speed(iter/s)": 0.418486 + }, + { + "epoch": 2.38784, + "grad_norm": 0.6856483227498532, + "learning_rate": 5.479373895962045e-07, + "loss": 0.24100346863269806, + "memory(GiB)": 77.0, + "step": 7462, + "token_acc": 0.9137764932562621, + "train_speed(iter/s)": 0.418449 + }, + { + "epoch": 2.38816, + "grad_norm": 0.737328427256785, + "learning_rate": 5.473865596695035e-07, + "loss": 0.2924054265022278, + "memory(GiB)": 77.0, + "step": 7463, + "token_acc": 0.8938339134758826, + "train_speed(iter/s)": 0.418414 + }, + { + "epoch": 2.38848, + "grad_norm": 0.7013715128492981, + "learning_rate": 5.468359727116726e-07, + "loss": 0.339455246925354, + "memory(GiB)": 77.0, + "step": 7464, + "token_acc": 0.8782188841201717, + "train_speed(iter/s)": 0.418369 + }, + { + "epoch": 2.3888, + "grad_norm": 0.6791835761477977, + "learning_rate": 5.462856287912224e-07, + "loss": 0.29249879717826843, + "memory(GiB)": 77.0, + "step": 7465, + "token_acc": 0.9442208165612421, + "train_speed(iter/s)": 0.418329 + }, + { + "epoch": 2.38912, + "grad_norm": 0.6540900784971323, + "learning_rate": 5.45735527976633e-07, + "loss": 0.28988662362098694, + "memory(GiB)": 77.0, + "step": 7466, + "token_acc": 0.8713355048859935, + "train_speed(iter/s)": 0.418289 + }, + { + "epoch": 2.38944, + "grad_norm": 0.7226351525735047, + "learning_rate": 5.451856703363568e-07, + "loss": 0.2629552185535431, + "memory(GiB)": 77.0, + "step": 7467, + "token_acc": 0.965958605664488, + "train_speed(iter/s)": 0.418252 + }, + { + "epoch": 2.38976, + "grad_norm": 0.7852972591120175, + "learning_rate": 5.446360559388125e-07, + "loss": 0.22069543600082397, + "memory(GiB)": 77.0, + "step": 7468, + "token_acc": 0.9173134328358209, + "train_speed(iter/s)": 0.41821 + }, + { + "epoch": 2.39008, + "grad_norm": 0.8290722925551008, + "learning_rate": 5.440866848523907e-07, + "loss": 0.29186463356018066, + "memory(GiB)": 77.0, + "step": 7469, + "token_acc": 0.9120117597648048, + "train_speed(iter/s)": 0.418176 + }, + { + "epoch": 2.3904, + "grad_norm": 0.749749061190044, + "learning_rate": 5.435375571454513e-07, + "loss": 0.28124821186065674, + "memory(GiB)": 77.0, + "step": 7470, + "token_acc": 0.9668752300331248, + "train_speed(iter/s)": 0.418142 + }, + { + "epoch": 2.39072, + "grad_norm": 0.7355169447058828, + "learning_rate": 5.429886728863229e-07, + "loss": 0.3151564598083496, + "memory(GiB)": 77.0, + "step": 7471, + "token_acc": 0.8680107526881721, + "train_speed(iter/s)": 0.418105 + }, + { + "epoch": 2.39104, + "grad_norm": 0.852256628185498, + "learning_rate": 5.424400321433059e-07, + "loss": 0.334623783826828, + "memory(GiB)": 77.0, + "step": 7472, + "token_acc": 0.8737201365187713, + "train_speed(iter/s)": 0.418054 + }, + { + "epoch": 2.39136, + "grad_norm": 0.6658687795778018, + "learning_rate": 5.418916349846678e-07, + "loss": 0.3567931056022644, + "memory(GiB)": 77.0, + "step": 7473, + "token_acc": 0.8473202614379085, + "train_speed(iter/s)": 0.418014 + }, + { + "epoch": 2.39168, + "grad_norm": 0.7469841212175479, + "learning_rate": 5.413434814786475e-07, + "loss": 0.34550800919532776, + "memory(GiB)": 77.0, + "step": 7474, + "token_acc": 0.8864059590316573, + "train_speed(iter/s)": 0.417976 + }, + { + "epoch": 2.392, + "grad_norm": 0.7743036973453661, + "learning_rate": 5.407955716934532e-07, + "loss": 0.30005937814712524, + "memory(GiB)": 77.0, + "step": 7475, + "token_acc": 0.9401579586877278, + "train_speed(iter/s)": 0.417939 + }, + { + "epoch": 2.39232, + "grad_norm": 0.7199303499466622, + "learning_rate": 5.402479056972632e-07, + "loss": 0.2750754952430725, + "memory(GiB)": 77.0, + "step": 7476, + "token_acc": 0.9013195639701663, + "train_speed(iter/s)": 0.417903 + }, + { + "epoch": 2.39264, + "grad_norm": 0.7375551275391211, + "learning_rate": 5.397004835582242e-07, + "loss": 0.33259016275405884, + "memory(GiB)": 77.0, + "step": 7477, + "token_acc": 0.9242837653478854, + "train_speed(iter/s)": 0.417866 + }, + { + "epoch": 2.39296, + "grad_norm": 0.6439016840147864, + "learning_rate": 5.391533053444545e-07, + "loss": 0.27812203764915466, + "memory(GiB)": 77.0, + "step": 7478, + "token_acc": 0.9518943170488534, + "train_speed(iter/s)": 0.41783 + }, + { + "epoch": 2.39328, + "grad_norm": 0.7128836952790946, + "learning_rate": 5.3860637112404e-07, + "loss": 0.3303201198577881, + "memory(GiB)": 77.0, + "step": 7479, + "token_acc": 0.8693481276005548, + "train_speed(iter/s)": 0.417792 + }, + { + "epoch": 2.3936, + "grad_norm": 0.6766893855268737, + "learning_rate": 5.380596809650376e-07, + "loss": 0.22959576547145844, + "memory(GiB)": 77.0, + "step": 7480, + "token_acc": 0.8876181004475385, + "train_speed(iter/s)": 0.417757 + }, + { + "epoch": 2.39392, + "grad_norm": 0.7252890865089658, + "learning_rate": 5.375132349354733e-07, + "loss": 0.3721050024032593, + "memory(GiB)": 77.0, + "step": 7481, + "token_acc": 0.919057686499547, + "train_speed(iter/s)": 0.417718 + }, + { + "epoch": 2.39424, + "grad_norm": 0.6721046164069033, + "learning_rate": 5.369670331033427e-07, + "loss": 0.313443660736084, + "memory(GiB)": 77.0, + "step": 7482, + "token_acc": 0.9198140615920977, + "train_speed(iter/s)": 0.417681 + }, + { + "epoch": 2.3945600000000002, + "grad_norm": 0.7412729248929586, + "learning_rate": 5.364210755366109e-07, + "loss": 0.3567822575569153, + "memory(GiB)": 77.0, + "step": 7483, + "token_acc": 0.891190006574622, + "train_speed(iter/s)": 0.417642 + }, + { + "epoch": 2.39488, + "grad_norm": 0.7200291925385103, + "learning_rate": 5.358753623032137e-07, + "loss": 0.2674052119255066, + "memory(GiB)": 77.0, + "step": 7484, + "token_acc": 0.946955997588909, + "train_speed(iter/s)": 0.417607 + }, + { + "epoch": 2.3952, + "grad_norm": 0.6737468633994605, + "learning_rate": 5.353298934710555e-07, + "loss": 0.29327285289764404, + "memory(GiB)": 77.0, + "step": 7485, + "token_acc": 0.8979391560353287, + "train_speed(iter/s)": 0.417568 + }, + { + "epoch": 2.39552, + "grad_norm": 0.6921587324894275, + "learning_rate": 5.3478466910801e-07, + "loss": 0.344099760055542, + "memory(GiB)": 77.0, + "step": 7486, + "token_acc": 0.8519541206457094, + "train_speed(iter/s)": 0.417525 + }, + { + "epoch": 2.39584, + "grad_norm": 0.8149926003764568, + "learning_rate": 5.342396892819216e-07, + "loss": 0.3863435387611389, + "memory(GiB)": 77.0, + "step": 7487, + "token_acc": 0.9006781013163143, + "train_speed(iter/s)": 0.41749 + }, + { + "epoch": 2.39616, + "grad_norm": 0.6575331511258821, + "learning_rate": 5.336949540606023e-07, + "loss": 0.30971211194992065, + "memory(GiB)": 77.0, + "step": 7488, + "token_acc": 0.9476145930776426, + "train_speed(iter/s)": 0.417451 + }, + { + "epoch": 2.39648, + "grad_norm": 0.6782453897604677, + "learning_rate": 5.331504635118357e-07, + "loss": 0.22124865651130676, + "memory(GiB)": 77.0, + "step": 7489, + "token_acc": 0.8953136810279667, + "train_speed(iter/s)": 0.417411 + }, + { + "epoch": 2.3968, + "grad_norm": 0.7226426959548442, + "learning_rate": 5.326062177033741e-07, + "loss": 0.2961702346801758, + "memory(GiB)": 77.0, + "step": 7490, + "token_acc": 0.9140960562280359, + "train_speed(iter/s)": 0.417366 + }, + { + "epoch": 2.39712, + "grad_norm": 1.0327973322303146, + "learning_rate": 5.320622167029399e-07, + "loss": 0.2636941075325012, + "memory(GiB)": 77.0, + "step": 7491, + "token_acc": 0.9131165919282511, + "train_speed(iter/s)": 0.417327 + }, + { + "epoch": 2.39744, + "grad_norm": 0.7822980995315304, + "learning_rate": 5.315184605782239e-07, + "loss": 0.29581332206726074, + "memory(GiB)": 77.0, + "step": 7492, + "token_acc": 0.9392616117506947, + "train_speed(iter/s)": 0.417291 + }, + { + "epoch": 2.39776, + "grad_norm": 0.6991030395917219, + "learning_rate": 5.309749493968874e-07, + "loss": 0.2898987829685211, + "memory(GiB)": 77.0, + "step": 7493, + "token_acc": 0.8654120330767038, + "train_speed(iter/s)": 0.417251 + }, + { + "epoch": 2.39808, + "grad_norm": 0.7876466049400765, + "learning_rate": 5.304316832265613e-07, + "loss": 0.3809705376625061, + "memory(GiB)": 77.0, + "step": 7494, + "token_acc": 0.913961875321999, + "train_speed(iter/s)": 0.417214 + }, + { + "epoch": 2.3984, + "grad_norm": 0.7272772063584846, + "learning_rate": 5.298886621348451e-07, + "loss": 0.21191255748271942, + "memory(GiB)": 77.0, + "step": 7495, + "token_acc": 0.8825814806820634, + "train_speed(iter/s)": 0.41718 + }, + { + "epoch": 2.39872, + "grad_norm": 0.7782835006941599, + "learning_rate": 5.293458861893087e-07, + "loss": 0.33208775520324707, + "memory(GiB)": 77.0, + "step": 7496, + "token_acc": 0.8471223021582733, + "train_speed(iter/s)": 0.417139 + }, + { + "epoch": 2.39904, + "grad_norm": 0.7445304462533195, + "learning_rate": 5.288033554574907e-07, + "loss": 0.33839088678359985, + "memory(GiB)": 77.0, + "step": 7497, + "token_acc": 0.8255329576084293, + "train_speed(iter/s)": 0.417102 + }, + { + "epoch": 2.39936, + "grad_norm": 0.722157950767124, + "learning_rate": 5.282610700069002e-07, + "loss": 0.32667288184165955, + "memory(GiB)": 77.0, + "step": 7498, + "token_acc": 0.8378618362635412, + "train_speed(iter/s)": 0.417061 + }, + { + "epoch": 2.39968, + "grad_norm": 0.7215780941074125, + "learning_rate": 5.27719029905015e-07, + "loss": 0.3260902762413025, + "memory(GiB)": 77.0, + "step": 7499, + "token_acc": 0.8472344161545216, + "train_speed(iter/s)": 0.417026 + }, + { + "epoch": 2.4, + "grad_norm": 0.7507603737292287, + "learning_rate": 5.271772352192817e-07, + "loss": 0.3286835551261902, + "memory(GiB)": 77.0, + "step": 7500, + "token_acc": 0.9569767441860465, + "train_speed(iter/s)": 0.416987 + }, + { + "epoch": 2.40032, + "grad_norm": 0.7315628051307502, + "learning_rate": 5.266356860171195e-07, + "loss": 0.33588069677352905, + "memory(GiB)": 77.0, + "step": 7501, + "token_acc": 0.9156175569921742, + "train_speed(iter/s)": 0.416948 + }, + { + "epoch": 2.40064, + "grad_norm": 0.6943880818206931, + "learning_rate": 5.260943823659129e-07, + "loss": 0.25746721029281616, + "memory(GiB)": 77.0, + "step": 7502, + "token_acc": 0.926123595505618, + "train_speed(iter/s)": 0.416911 + }, + { + "epoch": 2.40096, + "grad_norm": 0.690660668798778, + "learning_rate": 5.255533243330183e-07, + "loss": 0.24486534297466278, + "memory(GiB)": 77.0, + "step": 7503, + "token_acc": 0.9015099519560741, + "train_speed(iter/s)": 0.416872 + }, + { + "epoch": 2.40128, + "grad_norm": 0.715233263829924, + "learning_rate": 5.250125119857608e-07, + "loss": 0.2569446563720703, + "memory(GiB)": 77.0, + "step": 7504, + "token_acc": 0.9358565737051793, + "train_speed(iter/s)": 0.416834 + }, + { + "epoch": 2.4016, + "grad_norm": 0.7303173700025564, + "learning_rate": 5.244719453914354e-07, + "loss": 0.2987438440322876, + "memory(GiB)": 77.0, + "step": 7505, + "token_acc": 0.8867250892567349, + "train_speed(iter/s)": 0.41679 + }, + { + "epoch": 2.40192, + "grad_norm": 0.7294488856875219, + "learning_rate": 5.23931624617306e-07, + "loss": 0.34406378865242004, + "memory(GiB)": 77.0, + "step": 7506, + "token_acc": 0.9240314003545201, + "train_speed(iter/s)": 0.41675 + }, + { + "epoch": 2.40224, + "grad_norm": 0.7237084124480053, + "learning_rate": 5.233915497306063e-07, + "loss": 0.34508824348449707, + "memory(GiB)": 77.0, + "step": 7507, + "token_acc": 0.8240073868882734, + "train_speed(iter/s)": 0.41671 + }, + { + "epoch": 2.40256, + "grad_norm": 0.7273204090341807, + "learning_rate": 5.228517207985392e-07, + "loss": 0.3596709966659546, + "memory(GiB)": 77.0, + "step": 7508, + "token_acc": 0.8443281629601943, + "train_speed(iter/s)": 0.416672 + }, + { + "epoch": 2.40288, + "grad_norm": 0.7194147191151086, + "learning_rate": 5.22312137888277e-07, + "loss": 0.3243284821510315, + "memory(GiB)": 77.0, + "step": 7509, + "token_acc": 0.9276807980049875, + "train_speed(iter/s)": 0.416632 + }, + { + "epoch": 2.4032, + "grad_norm": 0.7184604261582066, + "learning_rate": 5.217728010669618e-07, + "loss": 0.3049904704093933, + "memory(GiB)": 77.0, + "step": 7510, + "token_acc": 0.9394697348674337, + "train_speed(iter/s)": 0.416594 + }, + { + "epoch": 2.40352, + "grad_norm": 0.7156244117419036, + "learning_rate": 5.21233710401704e-07, + "loss": 0.33978480100631714, + "memory(GiB)": 77.0, + "step": 7511, + "token_acc": 0.8856192425793245, + "train_speed(iter/s)": 0.416553 + }, + { + "epoch": 2.4038399999999998, + "grad_norm": 0.7052328449574733, + "learning_rate": 5.206948659595845e-07, + "loss": 0.32853448390960693, + "memory(GiB)": 77.0, + "step": 7512, + "token_acc": 0.934596577017115, + "train_speed(iter/s)": 0.416515 + }, + { + "epoch": 2.40416, + "grad_norm": 0.6811715729186628, + "learning_rate": 5.201562678076533e-07, + "loss": 0.31468313932418823, + "memory(GiB)": 77.0, + "step": 7513, + "token_acc": 0.9293653639297239, + "train_speed(iter/s)": 0.416472 + }, + { + "epoch": 2.40448, + "grad_norm": 0.6462163430437587, + "learning_rate": 5.196179160129295e-07, + "loss": 0.3018306493759155, + "memory(GiB)": 77.0, + "step": 7514, + "token_acc": 0.8533445874657101, + "train_speed(iter/s)": 0.416431 + }, + { + "epoch": 2.4048, + "grad_norm": 0.7178800806432972, + "learning_rate": 5.190798106424014e-07, + "loss": 0.2934058904647827, + "memory(GiB)": 77.0, + "step": 7515, + "token_acc": 0.9359618946811326, + "train_speed(iter/s)": 0.416385 + }, + { + "epoch": 2.40512, + "grad_norm": 0.6691449241524903, + "learning_rate": 5.185419517630269e-07, + "loss": 0.26688510179519653, + "memory(GiB)": 77.0, + "step": 7516, + "token_acc": 0.9280035335689046, + "train_speed(iter/s)": 0.416346 + }, + { + "epoch": 2.40544, + "grad_norm": 0.6816458450679824, + "learning_rate": 5.180043394417336e-07, + "loss": 0.276925265789032, + "memory(GiB)": 77.0, + "step": 7517, + "token_acc": 0.929541047188106, + "train_speed(iter/s)": 0.416309 + }, + { + "epoch": 2.40576, + "grad_norm": 0.7638221199749251, + "learning_rate": 5.174669737454174e-07, + "loss": 0.2766355872154236, + "memory(GiB)": 77.0, + "step": 7518, + "token_acc": 0.9053653275815584, + "train_speed(iter/s)": 0.416272 + }, + { + "epoch": 2.40608, + "grad_norm": 0.8234553369111662, + "learning_rate": 5.169298547409449e-07, + "loss": 0.3789288401603699, + "memory(GiB)": 77.0, + "step": 7519, + "token_acc": 0.8994614003590664, + "train_speed(iter/s)": 0.416236 + }, + { + "epoch": 2.4064, + "grad_norm": 0.7744402493349715, + "learning_rate": 5.163929824951508e-07, + "loss": 0.34645912051200867, + "memory(GiB)": 77.0, + "step": 7520, + "token_acc": 0.8933845245126993, + "train_speed(iter/s)": 0.416199 + }, + { + "epoch": 2.40672, + "grad_norm": 0.7769468077844871, + "learning_rate": 5.158563570748396e-07, + "loss": 0.27027708292007446, + "memory(GiB)": 77.0, + "step": 7521, + "token_acc": 0.937793427230047, + "train_speed(iter/s)": 0.416164 + }, + { + "epoch": 2.40704, + "grad_norm": 0.7866376355537785, + "learning_rate": 5.153199785467847e-07, + "loss": 0.33199572563171387, + "memory(GiB)": 77.0, + "step": 7522, + "token_acc": 0.8477226376614548, + "train_speed(iter/s)": 0.416129 + }, + { + "epoch": 2.40736, + "grad_norm": 0.7327453018092513, + "learning_rate": 5.147838469777297e-07, + "loss": 0.24751348793506622, + "memory(GiB)": 77.0, + "step": 7523, + "token_acc": 0.915402124430956, + "train_speed(iter/s)": 0.416089 + }, + { + "epoch": 2.40768, + "grad_norm": 0.7392634068373356, + "learning_rate": 5.142479624343866e-07, + "loss": 0.3987630009651184, + "memory(GiB)": 77.0, + "step": 7524, + "token_acc": 0.9400224215246636, + "train_speed(iter/s)": 0.416048 + }, + { + "epoch": 2.408, + "grad_norm": 0.6902045052919235, + "learning_rate": 5.137123249834367e-07, + "loss": 0.32130423188209534, + "memory(GiB)": 77.0, + "step": 7525, + "token_acc": 0.8823232323232323, + "train_speed(iter/s)": 0.416006 + }, + { + "epoch": 2.40832, + "grad_norm": 0.761205063021808, + "learning_rate": 5.131769346915313e-07, + "loss": 0.2981227934360504, + "memory(GiB)": 77.0, + "step": 7526, + "token_acc": 0.9231621721462874, + "train_speed(iter/s)": 0.415969 + }, + { + "epoch": 2.40864, + "grad_norm": 0.7209573427089274, + "learning_rate": 5.126417916252898e-07, + "loss": 0.27756980061531067, + "memory(GiB)": 77.0, + "step": 7527, + "token_acc": 0.9007421150278293, + "train_speed(iter/s)": 0.415932 + }, + { + "epoch": 2.40896, + "grad_norm": 0.6752587754568136, + "learning_rate": 5.121068958513023e-07, + "loss": 0.2409687042236328, + "memory(GiB)": 77.0, + "step": 7528, + "token_acc": 0.8914810887239307, + "train_speed(iter/s)": 0.415894 + }, + { + "epoch": 2.40928, + "grad_norm": 0.7057636354563268, + "learning_rate": 5.115722474361265e-07, + "loss": 0.35191214084625244, + "memory(GiB)": 77.0, + "step": 7529, + "token_acc": 0.8878081502599363, + "train_speed(iter/s)": 0.41585 + }, + { + "epoch": 2.4096, + "grad_norm": 0.7373370826652977, + "learning_rate": 5.110378464462904e-07, + "loss": 0.24067920446395874, + "memory(GiB)": 77.0, + "step": 7530, + "token_acc": 0.9414089746570565, + "train_speed(iter/s)": 0.415813 + }, + { + "epoch": 2.40992, + "grad_norm": 0.7836700152697755, + "learning_rate": 5.105036929482913e-07, + "loss": 0.3588927388191223, + "memory(GiB)": 77.0, + "step": 7531, + "token_acc": 0.861948142957253, + "train_speed(iter/s)": 0.415771 + }, + { + "epoch": 2.41024, + "grad_norm": 0.7048346505816631, + "learning_rate": 5.099697870085946e-07, + "loss": 0.368771493434906, + "memory(GiB)": 77.0, + "step": 7532, + "token_acc": 0.9231096006796942, + "train_speed(iter/s)": 0.415726 + }, + { + "epoch": 2.41056, + "grad_norm": 0.6632258069408148, + "learning_rate": 5.094361286936362e-07, + "loss": 0.29916661977767944, + "memory(GiB)": 77.0, + "step": 7533, + "token_acc": 0.8677165354330708, + "train_speed(iter/s)": 0.415685 + }, + { + "epoch": 2.41088, + "grad_norm": 0.7043832287342258, + "learning_rate": 5.089027180698205e-07, + "loss": 0.30507683753967285, + "memory(GiB)": 77.0, + "step": 7534, + "token_acc": 0.8790534618755478, + "train_speed(iter/s)": 0.415646 + }, + { + "epoch": 2.4112, + "grad_norm": 0.7186511872659944, + "learning_rate": 5.083695552035211e-07, + "loss": 0.3554905652999878, + "memory(GiB)": 77.0, + "step": 7535, + "token_acc": 0.9223057644110275, + "train_speed(iter/s)": 0.415605 + }, + { + "epoch": 2.41152, + "grad_norm": 0.7581774890711249, + "learning_rate": 5.07836640161081e-07, + "loss": 0.33850574493408203, + "memory(GiB)": 77.0, + "step": 7536, + "token_acc": 0.9378385349497034, + "train_speed(iter/s)": 0.415566 + }, + { + "epoch": 2.4118399999999998, + "grad_norm": 0.7308346448959254, + "learning_rate": 5.073039730088122e-07, + "loss": 0.2842726409435272, + "memory(GiB)": 77.0, + "step": 7537, + "token_acc": 0.9235630454408666, + "train_speed(iter/s)": 0.415528 + }, + { + "epoch": 2.41216, + "grad_norm": 0.6272073125961511, + "learning_rate": 5.067715538129958e-07, + "loss": 0.29794198274612427, + "memory(GiB)": 77.0, + "step": 7538, + "token_acc": 0.890702947845805, + "train_speed(iter/s)": 0.415481 + }, + { + "epoch": 2.41248, + "grad_norm": 0.6949911876121359, + "learning_rate": 5.062393826398821e-07, + "loss": 0.316308856010437, + "memory(GiB)": 77.0, + "step": 7539, + "token_acc": 0.899843912591051, + "train_speed(iter/s)": 0.415445 + }, + { + "epoch": 2.4128, + "grad_norm": 0.7496295566466601, + "learning_rate": 5.057074595556907e-07, + "loss": 0.2841019928455353, + "memory(GiB)": 77.0, + "step": 7540, + "token_acc": 0.9591782986568343, + "train_speed(iter/s)": 0.415409 + }, + { + "epoch": 2.41312, + "grad_norm": 0.7036933562865377, + "learning_rate": 5.051757846266103e-07, + "loss": 0.2799491882324219, + "memory(GiB)": 77.0, + "step": 7541, + "token_acc": 0.9543524416135881, + "train_speed(iter/s)": 0.415368 + }, + { + "epoch": 2.41344, + "grad_norm": 0.7017067573196574, + "learning_rate": 5.046443579187982e-07, + "loss": 0.3552981913089752, + "memory(GiB)": 77.0, + "step": 7542, + "token_acc": 0.9707692307692307, + "train_speed(iter/s)": 0.415333 + }, + { + "epoch": 2.41376, + "grad_norm": 0.7140906254896993, + "learning_rate": 5.041131794983817e-07, + "loss": 0.3815602958202362, + "memory(GiB)": 77.0, + "step": 7543, + "token_acc": 0.9181363516714925, + "train_speed(iter/s)": 0.415294 + }, + { + "epoch": 2.4140800000000002, + "grad_norm": 0.8543694428769733, + "learning_rate": 5.035822494314571e-07, + "loss": 0.34556347131729126, + "memory(GiB)": 77.0, + "step": 7544, + "token_acc": 0.9545113721569608, + "train_speed(iter/s)": 0.415258 + }, + { + "epoch": 2.4144, + "grad_norm": 0.6946892100211574, + "learning_rate": 5.030515677840883e-07, + "loss": 0.2043875902891159, + "memory(GiB)": 77.0, + "step": 7545, + "token_acc": 0.9623873173764377, + "train_speed(iter/s)": 0.415218 + }, + { + "epoch": 2.41472, + "grad_norm": 0.7316950805256949, + "learning_rate": 5.025211346223093e-07, + "loss": 0.3083495497703552, + "memory(GiB)": 77.0, + "step": 7546, + "token_acc": 0.8614072494669509, + "train_speed(iter/s)": 0.415178 + }, + { + "epoch": 2.41504, + "grad_norm": 0.6330800944205757, + "learning_rate": 5.019909500121248e-07, + "loss": 0.343381404876709, + "memory(GiB)": 77.0, + "step": 7547, + "token_acc": 0.8680699673880818, + "train_speed(iter/s)": 0.415125 + }, + { + "epoch": 2.41536, + "grad_norm": 0.7064363563188993, + "learning_rate": 5.014610140195064e-07, + "loss": 0.2299412190914154, + "memory(GiB)": 77.0, + "step": 7548, + "token_acc": 0.9642982149107455, + "train_speed(iter/s)": 0.415071 + }, + { + "epoch": 2.41568, + "grad_norm": 0.755598108583767, + "learning_rate": 5.009313267103952e-07, + "loss": 0.3306156396865845, + "memory(GiB)": 77.0, + "step": 7549, + "token_acc": 0.8877279635258358, + "train_speed(iter/s)": 0.415031 + }, + { + "epoch": 2.416, + "grad_norm": 0.7892026594626455, + "learning_rate": 5.004018881507016e-07, + "loss": 0.31287628412246704, + "memory(GiB)": 77.0, + "step": 7550, + "token_acc": 0.9109947643979057, + "train_speed(iter/s)": 0.414998 + }, + { + "epoch": 2.41632, + "grad_norm": 0.7304749490640895, + "learning_rate": 4.998726984063051e-07, + "loss": 0.2686823010444641, + "memory(GiB)": 77.0, + "step": 7551, + "token_acc": 0.9089118660758247, + "train_speed(iter/s)": 0.414957 + }, + { + "epoch": 2.41664, + "grad_norm": 0.8459611465130907, + "learning_rate": 4.993437575430546e-07, + "loss": 0.28933900594711304, + "memory(GiB)": 77.0, + "step": 7552, + "token_acc": 0.916232186305179, + "train_speed(iter/s)": 0.414924 + }, + { + "epoch": 2.41696, + "grad_norm": 0.687618685146155, + "learning_rate": 4.988150656267671e-07, + "loss": 0.2827607989311218, + "memory(GiB)": 77.0, + "step": 7553, + "token_acc": 0.9641076769690927, + "train_speed(iter/s)": 0.414888 + }, + { + "epoch": 2.41728, + "grad_norm": 0.6762884707622651, + "learning_rate": 4.982866227232292e-07, + "loss": 0.2809639573097229, + "memory(GiB)": 77.0, + "step": 7554, + "token_acc": 0.954954954954955, + "train_speed(iter/s)": 0.414852 + }, + { + "epoch": 2.4176, + "grad_norm": 0.7194297439831456, + "learning_rate": 4.977584288981968e-07, + "loss": 0.29877200722694397, + "memory(GiB)": 77.0, + "step": 7555, + "token_acc": 0.8486572598998634, + "train_speed(iter/s)": 0.414817 + }, + { + "epoch": 2.41792, + "grad_norm": 0.7522915778079746, + "learning_rate": 4.972304842173942e-07, + "loss": 0.31582096219062805, + "memory(GiB)": 77.0, + "step": 7556, + "token_acc": 0.8672794710530559, + "train_speed(iter/s)": 0.414782 + }, + { + "epoch": 2.41824, + "grad_norm": 0.6811392632358316, + "learning_rate": 4.967027887465148e-07, + "loss": 0.2583407759666443, + "memory(GiB)": 77.0, + "step": 7557, + "token_acc": 0.8826979472140762, + "train_speed(iter/s)": 0.414745 + }, + { + "epoch": 2.41856, + "grad_norm": 0.7531583010731202, + "learning_rate": 4.96175342551222e-07, + "loss": 0.3464934825897217, + "memory(GiB)": 77.0, + "step": 7558, + "token_acc": 0.9495327102803738, + "train_speed(iter/s)": 0.414706 + }, + { + "epoch": 2.41888, + "grad_norm": 0.7526432603586982, + "learning_rate": 4.956481456971459e-07, + "loss": 0.3708306849002838, + "memory(GiB)": 77.0, + "step": 7559, + "token_acc": 0.948060486522025, + "train_speed(iter/s)": 0.414666 + }, + { + "epoch": 2.4192, + "grad_norm": 1.1577468485334719, + "learning_rate": 4.951211982498882e-07, + "loss": 0.4048463702201843, + "memory(GiB)": 77.0, + "step": 7560, + "token_acc": 0.9178832116788321, + "train_speed(iter/s)": 0.414614 + }, + { + "epoch": 2.41952, + "grad_norm": 0.6606979677808839, + "learning_rate": 4.945945002750177e-07, + "loss": 0.24175956845283508, + "memory(GiB)": 77.0, + "step": 7561, + "token_acc": 0.9602344077019673, + "train_speed(iter/s)": 0.414579 + }, + { + "epoch": 2.4198399999999998, + "grad_norm": 0.8440656441705308, + "learning_rate": 4.940680518380728e-07, + "loss": 0.2780475616455078, + "memory(GiB)": 77.0, + "step": 7562, + "token_acc": 0.9381153305203939, + "train_speed(iter/s)": 0.414535 + }, + { + "epoch": 2.42016, + "grad_norm": 0.7048566345402748, + "learning_rate": 4.935418530045608e-07, + "loss": 0.24908801913261414, + "memory(GiB)": 77.0, + "step": 7563, + "token_acc": 0.9579313006561173, + "train_speed(iter/s)": 0.414496 + }, + { + "epoch": 2.42048, + "grad_norm": 0.7477992794386233, + "learning_rate": 4.93015903839959e-07, + "loss": 0.23617824912071228, + "memory(GiB)": 77.0, + "step": 7564, + "token_acc": 0.9512605042016806, + "train_speed(iter/s)": 0.41446 + }, + { + "epoch": 2.4208, + "grad_norm": 0.7292711562078189, + "learning_rate": 4.924902044097118e-07, + "loss": 0.25839781761169434, + "memory(GiB)": 77.0, + "step": 7565, + "token_acc": 0.8747050495516753, + "train_speed(iter/s)": 0.414426 + }, + { + "epoch": 2.42112, + "grad_norm": 0.7327915407016999, + "learning_rate": 4.919647547792336e-07, + "loss": 0.3974050283432007, + "memory(GiB)": 77.0, + "step": 7566, + "token_acc": 0.8487673503211104, + "train_speed(iter/s)": 0.414387 + }, + { + "epoch": 2.42144, + "grad_norm": 0.7191729114973103, + "learning_rate": 4.914395550139075e-07, + "loss": 0.33715271949768066, + "memory(GiB)": 77.0, + "step": 7567, + "token_acc": 0.9569135454801464, + "train_speed(iter/s)": 0.414345 + }, + { + "epoch": 2.42176, + "grad_norm": 0.7561432481933628, + "learning_rate": 4.909146051790856e-07, + "loss": 0.23320722579956055, + "memory(GiB)": 77.0, + "step": 7568, + "token_acc": 0.959395656279509, + "train_speed(iter/s)": 0.414312 + }, + { + "epoch": 2.4220800000000002, + "grad_norm": 0.7708550732672187, + "learning_rate": 4.903899053400885e-07, + "loss": 0.31793010234832764, + "memory(GiB)": 77.0, + "step": 7569, + "token_acc": 0.8917322834645669, + "train_speed(iter/s)": 0.41427 + }, + { + "epoch": 2.4224, + "grad_norm": 0.7657393992939704, + "learning_rate": 4.898654555622062e-07, + "loss": 0.3324423134326935, + "memory(GiB)": 77.0, + "step": 7570, + "token_acc": 0.9206637282862328, + "train_speed(iter/s)": 0.414225 + }, + { + "epoch": 2.42272, + "grad_norm": 0.6617308069311495, + "learning_rate": 4.893412559106972e-07, + "loss": 0.2672451138496399, + "memory(GiB)": 77.0, + "step": 7571, + "token_acc": 0.9386371744559401, + "train_speed(iter/s)": 0.414183 + }, + { + "epoch": 2.42304, + "grad_norm": 0.8274237909822172, + "learning_rate": 4.888173064507895e-07, + "loss": 0.3141009211540222, + "memory(GiB)": 77.0, + "step": 7572, + "token_acc": 0.8912543153049483, + "train_speed(iter/s)": 0.414149 + }, + { + "epoch": 2.42336, + "grad_norm": 0.7011910981401909, + "learning_rate": 4.882936072476796e-07, + "loss": 0.2870284616947174, + "memory(GiB)": 77.0, + "step": 7573, + "token_acc": 0.8669871794871795, + "train_speed(iter/s)": 0.414109 + }, + { + "epoch": 2.42368, + "grad_norm": 0.7114161769904503, + "learning_rate": 4.877701583665318e-07, + "loss": 0.3711289167404175, + "memory(GiB)": 77.0, + "step": 7574, + "token_acc": 0.8798029556650246, + "train_speed(iter/s)": 0.414063 + }, + { + "epoch": 2.424, + "grad_norm": 0.7918830566232253, + "learning_rate": 4.872469598724807e-07, + "loss": 0.388461709022522, + "memory(GiB)": 77.0, + "step": 7575, + "token_acc": 0.9168169522091975, + "train_speed(iter/s)": 0.414025 + }, + { + "epoch": 2.42432, + "grad_norm": 0.5940125831385974, + "learning_rate": 4.867240118306296e-07, + "loss": 0.2635729908943176, + "memory(GiB)": 77.0, + "step": 7576, + "token_acc": 0.8956682027649769, + "train_speed(iter/s)": 0.41399 + }, + { + "epoch": 2.42464, + "grad_norm": 0.7469863606285493, + "learning_rate": 4.862013143060502e-07, + "loss": 0.21317268908023834, + "memory(GiB)": 77.0, + "step": 7577, + "token_acc": 0.9230055658627088, + "train_speed(iter/s)": 0.413956 + }, + { + "epoch": 2.42496, + "grad_norm": 0.6771424332236045, + "learning_rate": 4.85678867363783e-07, + "loss": 0.2774522304534912, + "memory(GiB)": 77.0, + "step": 7578, + "token_acc": 0.9668560606060606, + "train_speed(iter/s)": 0.41392 + }, + { + "epoch": 2.42528, + "grad_norm": 0.6766252047824094, + "learning_rate": 4.851566710688374e-07, + "loss": 0.2827974855899811, + "memory(GiB)": 77.0, + "step": 7579, + "token_acc": 0.9216406489133762, + "train_speed(iter/s)": 0.413883 + }, + { + "epoch": 2.4256, + "grad_norm": 0.6381283153646664, + "learning_rate": 4.84634725486191e-07, + "loss": 0.26405274868011475, + "memory(GiB)": 77.0, + "step": 7580, + "token_acc": 0.9187981127390117, + "train_speed(iter/s)": 0.413849 + }, + { + "epoch": 2.42592, + "grad_norm": 0.7558095801065631, + "learning_rate": 4.841130306807926e-07, + "loss": 0.3323264718055725, + "memory(GiB)": 77.0, + "step": 7581, + "token_acc": 0.9014948256036796, + "train_speed(iter/s)": 0.413814 + }, + { + "epoch": 2.42624, + "grad_norm": 0.713585059680579, + "learning_rate": 4.835915867175566e-07, + "loss": 0.29745814204216003, + "memory(GiB)": 77.0, + "step": 7582, + "token_acc": 0.9161807580174927, + "train_speed(iter/s)": 0.413777 + }, + { + "epoch": 2.42656, + "grad_norm": 0.8259918906531705, + "learning_rate": 4.830703936613687e-07, + "loss": 0.3602250814437866, + "memory(GiB)": 77.0, + "step": 7583, + "token_acc": 0.925256472887152, + "train_speed(iter/s)": 0.413738 + }, + { + "epoch": 2.42688, + "grad_norm": 0.687869208176192, + "learning_rate": 4.825494515770818e-07, + "loss": 0.3204861283302307, + "memory(GiB)": 77.0, + "step": 7584, + "token_acc": 0.8957442476074119, + "train_speed(iter/s)": 0.413701 + }, + { + "epoch": 2.4272, + "grad_norm": 0.6821800836114258, + "learning_rate": 4.820287605295179e-07, + "loss": 0.28043895959854126, + "memory(GiB)": 77.0, + "step": 7585, + "token_acc": 0.9218815623687526, + "train_speed(iter/s)": 0.413661 + }, + { + "epoch": 2.42752, + "grad_norm": 0.7444646503501016, + "learning_rate": 4.815083205834681e-07, + "loss": 0.3123072385787964, + "memory(GiB)": 77.0, + "step": 7586, + "token_acc": 0.8620053311461965, + "train_speed(iter/s)": 0.413612 + }, + { + "epoch": 2.4278399999999998, + "grad_norm": 0.8031065030863223, + "learning_rate": 4.80988131803693e-07, + "loss": 0.31919166445732117, + "memory(GiB)": 77.0, + "step": 7587, + "token_acc": 0.9443123938879456, + "train_speed(iter/s)": 0.413576 + }, + { + "epoch": 2.42816, + "grad_norm": 0.7060310044706258, + "learning_rate": 4.804681942549197e-07, + "loss": 0.3019292950630188, + "memory(GiB)": 77.0, + "step": 7588, + "token_acc": 0.84993564993565, + "train_speed(iter/s)": 0.413541 + }, + { + "epoch": 2.42848, + "grad_norm": 0.6877458554305887, + "learning_rate": 4.799485080018459e-07, + "loss": 0.3562075197696686, + "memory(GiB)": 77.0, + "step": 7589, + "token_acc": 0.9274895262649049, + "train_speed(iter/s)": 0.413499 + }, + { + "epoch": 2.4288, + "grad_norm": 0.716059607219422, + "learning_rate": 4.794290731091375e-07, + "loss": 0.30842912197113037, + "memory(GiB)": 77.0, + "step": 7590, + "token_acc": 0.9106529209621993, + "train_speed(iter/s)": 0.413459 + }, + { + "epoch": 2.42912, + "grad_norm": 0.7485756231522418, + "learning_rate": 4.789098896414296e-07, + "loss": 0.3547125458717346, + "memory(GiB)": 77.0, + "step": 7591, + "token_acc": 0.8970142795326699, + "train_speed(iter/s)": 0.413424 + }, + { + "epoch": 2.42944, + "grad_norm": 0.6484433912048764, + "learning_rate": 4.78390957663325e-07, + "loss": 0.28441333770751953, + "memory(GiB)": 77.0, + "step": 7592, + "token_acc": 0.9096130592503023, + "train_speed(iter/s)": 0.413389 + }, + { + "epoch": 2.42976, + "grad_norm": 0.6737830329153459, + "learning_rate": 4.778722772393959e-07, + "loss": 0.2605074644088745, + "memory(GiB)": 77.0, + "step": 7593, + "token_acc": 0.9229785874081176, + "train_speed(iter/s)": 0.413351 + }, + { + "epoch": 2.4300800000000002, + "grad_norm": 0.6698043469918238, + "learning_rate": 4.773538484341835e-07, + "loss": 0.34186798334121704, + "memory(GiB)": 77.0, + "step": 7594, + "token_acc": 0.946555452579549, + "train_speed(iter/s)": 0.413308 + }, + { + "epoch": 2.4304, + "grad_norm": 0.7503719761437737, + "learning_rate": 4.768356713121972e-07, + "loss": 0.3323623836040497, + "memory(GiB)": 77.0, + "step": 7595, + "token_acc": 0.9229131175468483, + "train_speed(iter/s)": 0.413272 + }, + { + "epoch": 2.43072, + "grad_norm": 0.7020107535100806, + "learning_rate": 4.7631774593791396e-07, + "loss": 0.21828347444534302, + "memory(GiB)": 77.0, + "step": 7596, + "token_acc": 0.9552346570397112, + "train_speed(iter/s)": 0.413238 + }, + { + "epoch": 2.43104, + "grad_norm": 0.6750869882713825, + "learning_rate": 4.758000723757822e-07, + "loss": 0.23505963385105133, + "memory(GiB)": 77.0, + "step": 7597, + "token_acc": 0.925254190711734, + "train_speed(iter/s)": 0.413204 + }, + { + "epoch": 2.43136, + "grad_norm": 0.6607862875579796, + "learning_rate": 4.7528265069021696e-07, + "loss": 0.3225734829902649, + "memory(GiB)": 77.0, + "step": 7598, + "token_acc": 0.9078394773681755, + "train_speed(iter/s)": 0.413164 + }, + { + "epoch": 2.43168, + "grad_norm": 0.7254463260036179, + "learning_rate": 4.747654809456023e-07, + "loss": 0.3156927227973938, + "memory(GiB)": 77.0, + "step": 7599, + "token_acc": 0.9114754098360656, + "train_speed(iter/s)": 0.413126 + }, + { + "epoch": 2.432, + "grad_norm": 0.77275107633552, + "learning_rate": 4.742485632062907e-07, + "loss": 0.3688800036907196, + "memory(GiB)": 77.0, + "step": 7600, + "token_acc": 0.8840728391895358, + "train_speed(iter/s)": 0.413093 + }, + { + "epoch": 2.43232, + "grad_norm": 0.6611212555066658, + "learning_rate": 4.737318975366048e-07, + "loss": 0.23793473839759827, + "memory(GiB)": 77.0, + "step": 7601, + "token_acc": 0.9341546304163126, + "train_speed(iter/s)": 0.413056 + }, + { + "epoch": 2.43264, + "grad_norm": 0.6989389215634263, + "learning_rate": 4.732154840008329e-07, + "loss": 0.28432512283325195, + "memory(GiB)": 77.0, + "step": 7602, + "token_acc": 0.9484282907662083, + "train_speed(iter/s)": 0.413018 + }, + { + "epoch": 2.43296, + "grad_norm": 0.7788252693069022, + "learning_rate": 4.726993226632348e-07, + "loss": 0.2433902472257614, + "memory(GiB)": 77.0, + "step": 7603, + "token_acc": 0.9156050955414012, + "train_speed(iter/s)": 0.412985 + }, + { + "epoch": 2.43328, + "grad_norm": 0.6754107310814256, + "learning_rate": 4.7218341358803735e-07, + "loss": 0.30300694704055786, + "memory(GiB)": 77.0, + "step": 7604, + "token_acc": 0.9075706901793859, + "train_speed(iter/s)": 0.412945 + }, + { + "epoch": 2.4336, + "grad_norm": 0.6406578590680907, + "learning_rate": 4.716677568394371e-07, + "loss": 0.28860658407211304, + "memory(GiB)": 77.0, + "step": 7605, + "token_acc": 0.897839619187111, + "train_speed(iter/s)": 0.412893 + }, + { + "epoch": 2.43392, + "grad_norm": 0.6755460191887084, + "learning_rate": 4.711523524815978e-07, + "loss": 0.3145601749420166, + "memory(GiB)": 77.0, + "step": 7606, + "token_acc": 0.918563579277865, + "train_speed(iter/s)": 0.412851 + }, + { + "epoch": 2.43424, + "grad_norm": 0.7376479220978043, + "learning_rate": 4.7063720057865304e-07, + "loss": 0.30509424209594727, + "memory(GiB)": 77.0, + "step": 7607, + "token_acc": 0.8564131668558457, + "train_speed(iter/s)": 0.412815 + }, + { + "epoch": 2.43456, + "grad_norm": 0.7277223225151692, + "learning_rate": 4.701223011947045e-07, + "loss": 0.25637054443359375, + "memory(GiB)": 77.0, + "step": 7608, + "token_acc": 0.9306093694963015, + "train_speed(iter/s)": 0.412776 + }, + { + "epoch": 2.43488, + "grad_norm": 0.6963983051303432, + "learning_rate": 4.696076543938227e-07, + "loss": 0.2668500542640686, + "memory(GiB)": 77.0, + "step": 7609, + "token_acc": 0.9180565627266135, + "train_speed(iter/s)": 0.412736 + }, + { + "epoch": 2.4352, + "grad_norm": 0.7709514224448737, + "learning_rate": 4.690932602400461e-07, + "loss": 0.3197547197341919, + "memory(GiB)": 77.0, + "step": 7610, + "token_acc": 0.8944413250982594, + "train_speed(iter/s)": 0.412697 + }, + { + "epoch": 2.43552, + "grad_norm": 0.8047249775144714, + "learning_rate": 4.685791187973823e-07, + "loss": 0.31666240096092224, + "memory(GiB)": 77.0, + "step": 7611, + "token_acc": 0.9373549883990719, + "train_speed(iter/s)": 0.412659 + }, + { + "epoch": 2.43584, + "grad_norm": 0.7074884098114078, + "learning_rate": 4.680652301298072e-07, + "loss": 0.3583431839942932, + "memory(GiB)": 77.0, + "step": 7612, + "token_acc": 0.8920034767492394, + "train_speed(iter/s)": 0.412623 + }, + { + "epoch": 2.43616, + "grad_norm": 0.7400821416234207, + "learning_rate": 4.6755159430126483e-07, + "loss": 0.286140501499176, + "memory(GiB)": 77.0, + "step": 7613, + "token_acc": 0.897203947368421, + "train_speed(iter/s)": 0.412585 + }, + { + "epoch": 2.43648, + "grad_norm": 0.7445781250425039, + "learning_rate": 4.670382113756694e-07, + "loss": 0.2831425368785858, + "memory(GiB)": 77.0, + "step": 7614, + "token_acc": 0.9220532319391636, + "train_speed(iter/s)": 0.412549 + }, + { + "epoch": 2.4368, + "grad_norm": 1.1224334010450627, + "learning_rate": 4.665250814169023e-07, + "loss": 0.3071702718734741, + "memory(GiB)": 77.0, + "step": 7615, + "token_acc": 0.9117379435850773, + "train_speed(iter/s)": 0.412511 + }, + { + "epoch": 2.43712, + "grad_norm": 0.7555273212939896, + "learning_rate": 4.6601220448881353e-07, + "loss": 0.29722970724105835, + "memory(GiB)": 77.0, + "step": 7616, + "token_acc": 0.9181840252488468, + "train_speed(iter/s)": 0.412474 + }, + { + "epoch": 2.43744, + "grad_norm": 0.8523992547239171, + "learning_rate": 4.6549958065522095e-07, + "loss": 0.3099057078361511, + "memory(GiB)": 77.0, + "step": 7617, + "token_acc": 0.8680851063829788, + "train_speed(iter/s)": 0.412436 + }, + { + "epoch": 2.43776, + "grad_norm": 1.055981706515623, + "learning_rate": 4.6498720997991243e-07, + "loss": 0.37475815415382385, + "memory(GiB)": 77.0, + "step": 7618, + "token_acc": 0.9018095520617028, + "train_speed(iter/s)": 0.412396 + }, + { + "epoch": 2.4380800000000002, + "grad_norm": 0.728093571518188, + "learning_rate": 4.6447509252664334e-07, + "loss": 0.35785043239593506, + "memory(GiB)": 77.0, + "step": 7619, + "token_acc": 0.8980972515856237, + "train_speed(iter/s)": 0.412356 + }, + { + "epoch": 2.4384, + "grad_norm": 0.7491333391243022, + "learning_rate": 4.639632283591378e-07, + "loss": 0.36637231707572937, + "memory(GiB)": 77.0, + "step": 7620, + "token_acc": 0.8932374100719425, + "train_speed(iter/s)": 0.412319 + }, + { + "epoch": 2.43872, + "grad_norm": 0.8073201131693074, + "learning_rate": 4.634516175410886e-07, + "loss": 0.35232025384902954, + "memory(GiB)": 77.0, + "step": 7621, + "token_acc": 0.9442544459644323, + "train_speed(iter/s)": 0.412284 + }, + { + "epoch": 2.43904, + "grad_norm": 0.7755127320413592, + "learning_rate": 4.629402601361568e-07, + "loss": 0.34466081857681274, + "memory(GiB)": 77.0, + "step": 7622, + "token_acc": 0.8813174332765474, + "train_speed(iter/s)": 0.412246 + }, + { + "epoch": 2.4393599999999998, + "grad_norm": 0.6939854208325654, + "learning_rate": 4.624291562079719e-07, + "loss": 0.3148881196975708, + "memory(GiB)": 77.0, + "step": 7623, + "token_acc": 0.9137785291631445, + "train_speed(iter/s)": 0.41221 + }, + { + "epoch": 2.43968, + "grad_norm": 0.7463169947020704, + "learning_rate": 4.619183058201318e-07, + "loss": 0.3184869885444641, + "memory(GiB)": 77.0, + "step": 7624, + "token_acc": 0.874455732946299, + "train_speed(iter/s)": 0.41217 + }, + { + "epoch": 2.44, + "grad_norm": 0.7347571564698469, + "learning_rate": 4.6140770903620346e-07, + "loss": 0.3333052396774292, + "memory(GiB)": 77.0, + "step": 7625, + "token_acc": 0.9306625577812019, + "train_speed(iter/s)": 0.412131 + }, + { + "epoch": 2.44032, + "grad_norm": 0.7935755746082066, + "learning_rate": 4.6089736591972105e-07, + "loss": 0.3128449022769928, + "memory(GiB)": 77.0, + "step": 7626, + "token_acc": 0.8843951324266285, + "train_speed(iter/s)": 0.412097 + }, + { + "epoch": 2.44064, + "grad_norm": 0.7369743967054646, + "learning_rate": 4.603872765341885e-07, + "loss": 0.3482598662376404, + "memory(GiB)": 77.0, + "step": 7627, + "token_acc": 0.8897146254458977, + "train_speed(iter/s)": 0.412059 + }, + { + "epoch": 2.44096, + "grad_norm": 0.7677419669148016, + "learning_rate": 4.598774409430773e-07, + "loss": 0.3764567971229553, + "memory(GiB)": 77.0, + "step": 7628, + "token_acc": 0.9363265306122449, + "train_speed(iter/s)": 0.412024 + }, + { + "epoch": 2.44128, + "grad_norm": 0.7228261919702503, + "learning_rate": 4.5936785920982804e-07, + "loss": 0.35960468649864197, + "memory(GiB)": 77.0, + "step": 7629, + "token_acc": 0.8327507394460877, + "train_speed(iter/s)": 0.411986 + }, + { + "epoch": 2.4416, + "grad_norm": 0.7338654797694614, + "learning_rate": 4.588585313978486e-07, + "loss": 0.2903548777103424, + "memory(GiB)": 77.0, + "step": 7630, + "token_acc": 0.9344729344729344, + "train_speed(iter/s)": 0.411949 + }, + { + "epoch": 2.44192, + "grad_norm": 0.7535148440725882, + "learning_rate": 4.583494575705166e-07, + "loss": 0.26490193605422974, + "memory(GiB)": 77.0, + "step": 7631, + "token_acc": 0.8869028274293143, + "train_speed(iter/s)": 0.411914 + }, + { + "epoch": 2.44224, + "grad_norm": 0.7337525153937259, + "learning_rate": 4.5784063779117733e-07, + "loss": 0.3304031789302826, + "memory(GiB)": 77.0, + "step": 7632, + "token_acc": 0.8780741253896779, + "train_speed(iter/s)": 0.411875 + }, + { + "epoch": 2.44256, + "grad_norm": 0.6959209255635987, + "learning_rate": 4.5733207212314443e-07, + "loss": 0.2636338472366333, + "memory(GiB)": 77.0, + "step": 7633, + "token_acc": 0.8704015388314499, + "train_speed(iter/s)": 0.411837 + }, + { + "epoch": 2.44288, + "grad_norm": 1.1537042494189045, + "learning_rate": 4.568237606297002e-07, + "loss": 0.3155444264411926, + "memory(GiB)": 77.0, + "step": 7634, + "token_acc": 0.8906359189378057, + "train_speed(iter/s)": 0.411801 + }, + { + "epoch": 2.4432, + "grad_norm": 0.7473055686635356, + "learning_rate": 4.563157033740953e-07, + "loss": 0.2822931408882141, + "memory(GiB)": 77.0, + "step": 7635, + "token_acc": 0.9272349272349273, + "train_speed(iter/s)": 0.411764 + }, + { + "epoch": 2.44352, + "grad_norm": 0.703588228507305, + "learning_rate": 4.5580790041954813e-07, + "loss": 0.36502766609191895, + "memory(GiB)": 77.0, + "step": 7636, + "token_acc": 0.906050276949297, + "train_speed(iter/s)": 0.411729 + }, + { + "epoch": 2.44384, + "grad_norm": 0.6600757674716823, + "learning_rate": 4.553003518292465e-07, + "loss": 0.25349485874176025, + "memory(GiB)": 77.0, + "step": 7637, + "token_acc": 0.8898168103448276, + "train_speed(iter/s)": 0.411661 + }, + { + "epoch": 2.44416, + "grad_norm": 0.6838091902120395, + "learning_rate": 4.5479305766634593e-07, + "loss": 0.26094022393226624, + "memory(GiB)": 77.0, + "step": 7638, + "token_acc": 0.9693218514531755, + "train_speed(iter/s)": 0.411625 + }, + { + "epoch": 2.44448, + "grad_norm": 0.7080385261120296, + "learning_rate": 4.542860179939704e-07, + "loss": 0.31476640701293945, + "memory(GiB)": 77.0, + "step": 7639, + "token_acc": 0.9141630901287554, + "train_speed(iter/s)": 0.411586 + }, + { + "epoch": 2.4448, + "grad_norm": 0.6496307463448484, + "learning_rate": 4.5377923287521193e-07, + "loss": 0.2653732895851135, + "memory(GiB)": 77.0, + "step": 7640, + "token_acc": 0.960098979276214, + "train_speed(iter/s)": 0.411544 + }, + { + "epoch": 2.44512, + "grad_norm": 0.7734351095529381, + "learning_rate": 4.532727023731312e-07, + "loss": 0.33341994881629944, + "memory(GiB)": 77.0, + "step": 7641, + "token_acc": 0.9044214985438178, + "train_speed(iter/s)": 0.411507 + }, + { + "epoch": 2.44544, + "grad_norm": 0.7498295093038941, + "learning_rate": 4.5276642655075724e-07, + "loss": 0.30003175139427185, + "memory(GiB)": 77.0, + "step": 7642, + "token_acc": 0.95, + "train_speed(iter/s)": 0.411473 + }, + { + "epoch": 2.44576, + "grad_norm": 0.7388000485848573, + "learning_rate": 4.5226040547108747e-07, + "loss": 0.35176026821136475, + "memory(GiB)": 77.0, + "step": 7643, + "token_acc": 0.9202436998061478, + "train_speed(iter/s)": 0.411441 + }, + { + "epoch": 2.44608, + "grad_norm": 0.7564411836472302, + "learning_rate": 4.517546391970873e-07, + "loss": 0.3711373805999756, + "memory(GiB)": 77.0, + "step": 7644, + "token_acc": 0.9115863389722311, + "train_speed(iter/s)": 0.411406 + }, + { + "epoch": 2.4464, + "grad_norm": 0.6899755315542155, + "learning_rate": 4.512491277916903e-07, + "loss": 0.3070293962955475, + "memory(GiB)": 77.0, + "step": 7645, + "token_acc": 0.8568904593639576, + "train_speed(iter/s)": 0.411372 + }, + { + "epoch": 2.44672, + "grad_norm": 0.7263957816556303, + "learning_rate": 4.507438713177992e-07, + "loss": 0.33433401584625244, + "memory(GiB)": 77.0, + "step": 7646, + "token_acc": 0.8586144733453516, + "train_speed(iter/s)": 0.411334 + }, + { + "epoch": 2.44704, + "grad_norm": 0.7442124348966339, + "learning_rate": 4.502388698382837e-07, + "loss": 0.27783656120300293, + "memory(GiB)": 77.0, + "step": 7647, + "token_acc": 0.8955777460770328, + "train_speed(iter/s)": 0.411295 + }, + { + "epoch": 2.4473599999999998, + "grad_norm": 0.6177529331528591, + "learning_rate": 4.49734123415983e-07, + "loss": 0.2127286046743393, + "memory(GiB)": 77.0, + "step": 7648, + "token_acc": 0.8898731146756045, + "train_speed(iter/s)": 0.411257 + }, + { + "epoch": 2.44768, + "grad_norm": 0.7571150701721104, + "learning_rate": 4.4922963211370353e-07, + "loss": 0.2682623267173767, + "memory(GiB)": 77.0, + "step": 7649, + "token_acc": 0.9608778625954199, + "train_speed(iter/s)": 0.411221 + }, + { + "epoch": 2.448, + "grad_norm": 0.7281475786974229, + "learning_rate": 4.4872539599422117e-07, + "loss": 0.30598604679107666, + "memory(GiB)": 77.0, + "step": 7650, + "token_acc": 0.8796886058032555, + "train_speed(iter/s)": 0.411183 + }, + { + "epoch": 2.44832, + "grad_norm": 0.6437240407294053, + "learning_rate": 4.4822141512027876e-07, + "loss": 0.2546346187591553, + "memory(GiB)": 77.0, + "step": 7651, + "token_acc": 0.9257923215666065, + "train_speed(iter/s)": 0.411144 + }, + { + "epoch": 2.44864, + "grad_norm": 0.7501883178254479, + "learning_rate": 4.4771768955458844e-07, + "loss": 0.2873365879058838, + "memory(GiB)": 77.0, + "step": 7652, + "token_acc": 0.9436101746858719, + "train_speed(iter/s)": 0.411108 + }, + { + "epoch": 2.44896, + "grad_norm": 0.6775498743115802, + "learning_rate": 4.472142193598297e-07, + "loss": 0.2996160089969635, + "memory(GiB)": 77.0, + "step": 7653, + "token_acc": 0.91015625, + "train_speed(iter/s)": 0.411068 + }, + { + "epoch": 2.44928, + "grad_norm": 0.6741510897147339, + "learning_rate": 4.4671100459865086e-07, + "loss": 0.2742520272731781, + "memory(GiB)": 77.0, + "step": 7654, + "token_acc": 0.9115937595594983, + "train_speed(iter/s)": 0.411034 + }, + { + "epoch": 2.4496, + "grad_norm": 0.6293551258429463, + "learning_rate": 4.462080453336681e-07, + "loss": 0.31620195508003235, + "memory(GiB)": 77.0, + "step": 7655, + "token_acc": 0.895933014354067, + "train_speed(iter/s)": 0.410992 + }, + { + "epoch": 2.44992, + "grad_norm": 0.7616553986625906, + "learning_rate": 4.4570534162746647e-07, + "loss": 0.29968905448913574, + "memory(GiB)": 77.0, + "step": 7656, + "token_acc": 0.9256530475552579, + "train_speed(iter/s)": 0.410958 + }, + { + "epoch": 2.45024, + "grad_norm": 0.6459402297949824, + "learning_rate": 4.452028935425981e-07, + "loss": 0.25578099489212036, + "memory(GiB)": 77.0, + "step": 7657, + "token_acc": 0.9327981651376147, + "train_speed(iter/s)": 0.410922 + }, + { + "epoch": 2.45056, + "grad_norm": 0.7592989003428358, + "learning_rate": 4.4470070114158437e-07, + "loss": 0.32322609424591064, + "memory(GiB)": 77.0, + "step": 7658, + "token_acc": 0.855360541340664, + "train_speed(iter/s)": 0.410884 + }, + { + "epoch": 2.45088, + "grad_norm": 0.6768297153245326, + "learning_rate": 4.4419876448691417e-07, + "loss": 0.2798707187175751, + "memory(GiB)": 77.0, + "step": 7659, + "token_acc": 0.9250686543742644, + "train_speed(iter/s)": 0.410849 + }, + { + "epoch": 2.4512, + "grad_norm": 0.7104121282110633, + "learning_rate": 4.4369708364104506e-07, + "loss": 0.38813742995262146, + "memory(GiB)": 77.0, + "step": 7660, + "token_acc": 0.8652445369406868, + "train_speed(iter/s)": 0.410814 + }, + { + "epoch": 2.45152, + "grad_norm": 0.7542809194420277, + "learning_rate": 4.4319565866640214e-07, + "loss": 0.30143070220947266, + "memory(GiB)": 77.0, + "step": 7661, + "token_acc": 0.8707926167209555, + "train_speed(iter/s)": 0.410771 + }, + { + "epoch": 2.45184, + "grad_norm": 0.6730631075440184, + "learning_rate": 4.426944896253796e-07, + "loss": 0.27537184953689575, + "memory(GiB)": 77.0, + "step": 7662, + "token_acc": 0.8777110007839038, + "train_speed(iter/s)": 0.410732 + }, + { + "epoch": 2.45216, + "grad_norm": 0.715031483046496, + "learning_rate": 4.4219357658033866e-07, + "loss": 0.30200570821762085, + "memory(GiB)": 77.0, + "step": 7663, + "token_acc": 0.8747161241483724, + "train_speed(iter/s)": 0.410698 + }, + { + "epoch": 2.45248, + "grad_norm": 0.7088636580458246, + "learning_rate": 4.4169291959360956e-07, + "loss": 0.3151225745677948, + "memory(GiB)": 77.0, + "step": 7664, + "token_acc": 0.908359133126935, + "train_speed(iter/s)": 0.410663 + }, + { + "epoch": 2.4528, + "grad_norm": 0.6764352100097236, + "learning_rate": 4.411925187274904e-07, + "loss": 0.2619190216064453, + "memory(GiB)": 77.0, + "step": 7665, + "token_acc": 0.9724437998549673, + "train_speed(iter/s)": 0.410626 + }, + { + "epoch": 2.45312, + "grad_norm": 0.7273800521886818, + "learning_rate": 4.406923740442473e-07, + "loss": 0.29404985904693604, + "memory(GiB)": 77.0, + "step": 7666, + "token_acc": 0.9634228187919464, + "train_speed(iter/s)": 0.410592 + }, + { + "epoch": 2.45344, + "grad_norm": 0.7473713706006087, + "learning_rate": 4.401924856061146e-07, + "loss": 0.3175530433654785, + "memory(GiB)": 77.0, + "step": 7667, + "token_acc": 0.8684475806451613, + "train_speed(iter/s)": 0.410546 + }, + { + "epoch": 2.45376, + "grad_norm": 0.7495303883832093, + "learning_rate": 4.396928534752945e-07, + "loss": 0.3234666883945465, + "memory(GiB)": 77.0, + "step": 7668, + "token_acc": 0.9143052236258109, + "train_speed(iter/s)": 0.41051 + }, + { + "epoch": 2.45408, + "grad_norm": 0.7226599854079631, + "learning_rate": 4.391934777139581e-07, + "loss": 0.28671795129776, + "memory(GiB)": 77.0, + "step": 7669, + "token_acc": 0.9295644114921223, + "train_speed(iter/s)": 0.410476 + }, + { + "epoch": 2.4544, + "grad_norm": 0.7226087804728013, + "learning_rate": 4.3869435838424374e-07, + "loss": 0.3455315828323364, + "memory(GiB)": 77.0, + "step": 7670, + "token_acc": 0.9082706766917293, + "train_speed(iter/s)": 0.410437 + }, + { + "epoch": 2.45472, + "grad_norm": 0.7581081672326512, + "learning_rate": 4.381954955482581e-07, + "loss": 0.25106993317604065, + "memory(GiB)": 77.0, + "step": 7671, + "token_acc": 0.9370958259847149, + "train_speed(iter/s)": 0.410401 + }, + { + "epoch": 2.45504, + "grad_norm": 0.7188736130950975, + "learning_rate": 4.376968892680761e-07, + "loss": 0.3098592758178711, + "memory(GiB)": 77.0, + "step": 7672, + "token_acc": 0.9305993690851735, + "train_speed(iter/s)": 0.410367 + }, + { + "epoch": 2.4553599999999998, + "grad_norm": 1.0041688386247496, + "learning_rate": 4.3719853960574097e-07, + "loss": 0.3451231122016907, + "memory(GiB)": 77.0, + "step": 7673, + "token_acc": 0.883126110124334, + "train_speed(iter/s)": 0.410328 + }, + { + "epoch": 2.45568, + "grad_norm": 0.7949691562497017, + "learning_rate": 4.36700446623263e-07, + "loss": 0.357343465089798, + "memory(GiB)": 77.0, + "step": 7674, + "token_acc": 0.8633436680181255, + "train_speed(iter/s)": 0.410288 + }, + { + "epoch": 2.456, + "grad_norm": 0.7030236507306054, + "learning_rate": 4.362026103826214e-07, + "loss": 0.24242356419563293, + "memory(GiB)": 77.0, + "step": 7675, + "token_acc": 0.8533776867963152, + "train_speed(iter/s)": 0.410255 + }, + { + "epoch": 2.45632, + "grad_norm": 0.6849387844167494, + "learning_rate": 4.3570503094576305e-07, + "loss": 0.3679955005645752, + "memory(GiB)": 77.0, + "step": 7676, + "token_acc": 0.8718450401095676, + "train_speed(iter/s)": 0.410218 + }, + { + "epoch": 2.45664, + "grad_norm": 0.7374588600511041, + "learning_rate": 4.352077083746037e-07, + "loss": 0.29561173915863037, + "memory(GiB)": 77.0, + "step": 7677, + "token_acc": 0.8728059629718682, + "train_speed(iter/s)": 0.410183 + }, + { + "epoch": 2.45696, + "grad_norm": 0.699049425263619, + "learning_rate": 4.347106427310266e-07, + "loss": 0.31606096029281616, + "memory(GiB)": 77.0, + "step": 7678, + "token_acc": 0.8925081433224755, + "train_speed(iter/s)": 0.410137 + }, + { + "epoch": 2.45728, + "grad_norm": 0.723538625517655, + "learning_rate": 4.342138340768823e-07, + "loss": 0.325777143239975, + "memory(GiB)": 77.0, + "step": 7679, + "token_acc": 0.9479117785077429, + "train_speed(iter/s)": 0.410103 + }, + { + "epoch": 2.4576000000000002, + "grad_norm": 0.7555475765907371, + "learning_rate": 4.3371728247399046e-07, + "loss": 0.2820972204208374, + "memory(GiB)": 77.0, + "step": 7680, + "token_acc": 0.916058394160584, + "train_speed(iter/s)": 0.410069 + }, + { + "epoch": 2.45792, + "grad_norm": 0.6637035550590293, + "learning_rate": 4.3322098798413814e-07, + "loss": 0.3091801106929779, + "memory(GiB)": 77.0, + "step": 7681, + "token_acc": 0.9262159934047816, + "train_speed(iter/s)": 0.410033 + }, + { + "epoch": 2.45824, + "grad_norm": 0.7206619263476188, + "learning_rate": 4.327249506690806e-07, + "loss": 0.2712412178516388, + "memory(GiB)": 77.0, + "step": 7682, + "token_acc": 0.9567584881486226, + "train_speed(iter/s)": 0.409994 + }, + { + "epoch": 2.45856, + "grad_norm": 0.7625612264660591, + "learning_rate": 4.3222917059054127e-07, + "loss": 0.34410998225212097, + "memory(GiB)": 77.0, + "step": 7683, + "token_acc": 0.9168017287952458, + "train_speed(iter/s)": 0.409958 + }, + { + "epoch": 2.45888, + "grad_norm": 0.6874337340115633, + "learning_rate": 4.3173364781021106e-07, + "loss": 0.26875603199005127, + "memory(GiB)": 77.0, + "step": 7684, + "token_acc": 0.9516975848792439, + "train_speed(iter/s)": 0.40992 + }, + { + "epoch": 2.4592, + "grad_norm": 0.7342870751078843, + "learning_rate": 4.3123838238974924e-07, + "loss": 0.3111917972564697, + "memory(GiB)": 77.0, + "step": 7685, + "token_acc": 0.88064, + "train_speed(iter/s)": 0.409884 + }, + { + "epoch": 2.45952, + "grad_norm": 0.6581770622814572, + "learning_rate": 4.3074337439078295e-07, + "loss": 0.24321524798870087, + "memory(GiB)": 77.0, + "step": 7686, + "token_acc": 0.9348314606741573, + "train_speed(iter/s)": 0.40985 + }, + { + "epoch": 2.45984, + "grad_norm": 0.6839982535556154, + "learning_rate": 4.302486238749079e-07, + "loss": 0.23484152555465698, + "memory(GiB)": 77.0, + "step": 7687, + "token_acc": 0.9268849961919269, + "train_speed(iter/s)": 0.409816 + }, + { + "epoch": 2.46016, + "grad_norm": 0.8055136366226975, + "learning_rate": 4.2975413090368706e-07, + "loss": 0.23110312223434448, + "memory(GiB)": 77.0, + "step": 7688, + "token_acc": 0.9481765834932822, + "train_speed(iter/s)": 0.409779 + }, + { + "epoch": 2.46048, + "grad_norm": 0.7255072051711795, + "learning_rate": 4.2925989553865065e-07, + "loss": 0.3222942054271698, + "memory(GiB)": 77.0, + "step": 7689, + "token_acc": 0.8517501715854495, + "train_speed(iter/s)": 0.409743 + }, + { + "epoch": 2.4608, + "grad_norm": 0.679715885950861, + "learning_rate": 4.287659178412984e-07, + "loss": 0.30327108502388, + "memory(GiB)": 77.0, + "step": 7690, + "token_acc": 0.9144994656216602, + "train_speed(iter/s)": 0.409702 + }, + { + "epoch": 2.46112, + "grad_norm": 0.7130570002660385, + "learning_rate": 4.282721978730972e-07, + "loss": 0.3665868043899536, + "memory(GiB)": 77.0, + "step": 7691, + "token_acc": 0.9234323432343234, + "train_speed(iter/s)": 0.409668 + }, + { + "epoch": 2.46144, + "grad_norm": 0.7020850761839754, + "learning_rate": 4.2777873569548155e-07, + "loss": 0.2782079577445984, + "memory(GiB)": 77.0, + "step": 7692, + "token_acc": 0.9538622129436326, + "train_speed(iter/s)": 0.409634 + }, + { + "epoch": 2.46176, + "grad_norm": 0.7332850643296913, + "learning_rate": 4.272855313698543e-07, + "loss": 0.2590205669403076, + "memory(GiB)": 77.0, + "step": 7693, + "token_acc": 0.9300164024056862, + "train_speed(iter/s)": 0.409599 + }, + { + "epoch": 2.46208, + "grad_norm": 0.6982573495608997, + "learning_rate": 4.2679258495758687e-07, + "loss": 0.3162294030189514, + "memory(GiB)": 77.0, + "step": 7694, + "token_acc": 0.8428136109700356, + "train_speed(iter/s)": 0.409561 + }, + { + "epoch": 2.4624, + "grad_norm": 0.8892408765895928, + "learning_rate": 4.262998965200174e-07, + "loss": 0.33883827924728394, + "memory(GiB)": 77.0, + "step": 7695, + "token_acc": 0.8856209150326797, + "train_speed(iter/s)": 0.409525 + }, + { + "epoch": 2.46272, + "grad_norm": 0.7772163871022553, + "learning_rate": 4.2580746611845273e-07, + "loss": 0.4619014263153076, + "memory(GiB)": 77.0, + "step": 7696, + "token_acc": 0.8965344048216977, + "train_speed(iter/s)": 0.409484 + }, + { + "epoch": 2.46304, + "grad_norm": 0.7343456510914946, + "learning_rate": 4.2531529381416693e-07, + "loss": 0.2422061562538147, + "memory(GiB)": 77.0, + "step": 7697, + "token_acc": 0.9009779951100244, + "train_speed(iter/s)": 0.409444 + }, + { + "epoch": 2.4633599999999998, + "grad_norm": 0.7045173248901486, + "learning_rate": 4.248233796684023e-07, + "loss": 0.306294322013855, + "memory(GiB)": 77.0, + "step": 7698, + "token_acc": 0.9276712328767124, + "train_speed(iter/s)": 0.409407 + }, + { + "epoch": 2.46368, + "grad_norm": 0.6448616455063375, + "learning_rate": 4.2433172374236915e-07, + "loss": 0.23622199892997742, + "memory(GiB)": 77.0, + "step": 7699, + "token_acc": 0.9008308764406325, + "train_speed(iter/s)": 0.409372 + }, + { + "epoch": 2.464, + "grad_norm": 0.699640428216141, + "learning_rate": 4.2384032609724566e-07, + "loss": 0.3286135792732239, + "memory(GiB)": 77.0, + "step": 7700, + "token_acc": 0.9254575707154742, + "train_speed(iter/s)": 0.409328 + }, + { + "epoch": 2.46432, + "grad_norm": 0.7637529880306635, + "learning_rate": 4.233491867941777e-07, + "loss": 0.2911044657230377, + "memory(GiB)": 77.0, + "step": 7701, + "token_acc": 0.9051459293394777, + "train_speed(iter/s)": 0.409294 + }, + { + "epoch": 2.46464, + "grad_norm": 0.7192499118252261, + "learning_rate": 4.2285830589427924e-07, + "loss": 0.256476491689682, + "memory(GiB)": 77.0, + "step": 7702, + "token_acc": 0.8968031407739765, + "train_speed(iter/s)": 0.409257 + }, + { + "epoch": 2.46496, + "grad_norm": 0.7625206431789961, + "learning_rate": 4.223676834586313e-07, + "loss": 0.325825572013855, + "memory(GiB)": 77.0, + "step": 7703, + "token_acc": 0.8910110390748204, + "train_speed(iter/s)": 0.409214 + }, + { + "epoch": 2.46528, + "grad_norm": 0.7668968022254021, + "learning_rate": 4.2187731954828335e-07, + "loss": 0.3136866092681885, + "memory(GiB)": 77.0, + "step": 7704, + "token_acc": 0.9215399610136452, + "train_speed(iter/s)": 0.409178 + }, + { + "epoch": 2.4656000000000002, + "grad_norm": 0.7630599457721376, + "learning_rate": 4.213872142242531e-07, + "loss": 0.3210263252258301, + "memory(GiB)": 77.0, + "step": 7705, + "token_acc": 0.9180879173672832, + "train_speed(iter/s)": 0.409147 + }, + { + "epoch": 2.46592, + "grad_norm": 0.8124488747914331, + "learning_rate": 4.208973675475253e-07, + "loss": 0.3078575134277344, + "memory(GiB)": 77.0, + "step": 7706, + "token_acc": 0.9165801315334026, + "train_speed(iter/s)": 0.409113 + }, + { + "epoch": 2.46624, + "grad_norm": 0.7392671542164451, + "learning_rate": 4.2040777957905314e-07, + "loss": 0.2825448215007782, + "memory(GiB)": 77.0, + "step": 7707, + "token_acc": 0.9213955443463641, + "train_speed(iter/s)": 0.40907 + }, + { + "epoch": 2.46656, + "grad_norm": 0.713448419584757, + "learning_rate": 4.199184503797574e-07, + "loss": 0.30232393741607666, + "memory(GiB)": 77.0, + "step": 7708, + "token_acc": 0.8752783964365256, + "train_speed(iter/s)": 0.409034 + }, + { + "epoch": 2.46688, + "grad_norm": 0.6837020697525729, + "learning_rate": 4.1942938001052647e-07, + "loss": 0.29010969400405884, + "memory(GiB)": 77.0, + "step": 7709, + "token_acc": 0.9166000532907008, + "train_speed(iter/s)": 0.409002 + }, + { + "epoch": 2.4672, + "grad_norm": 0.8466919814624398, + "learning_rate": 4.189405685322162e-07, + "loss": 0.34900158643722534, + "memory(GiB)": 77.0, + "step": 7710, + "token_acc": 0.95625, + "train_speed(iter/s)": 0.408965 + }, + { + "epoch": 2.46752, + "grad_norm": 0.7665428764711845, + "learning_rate": 4.1845201600565156e-07, + "loss": 0.36165738105773926, + "memory(GiB)": 77.0, + "step": 7711, + "token_acc": 0.909121395036888, + "train_speed(iter/s)": 0.40893 + }, + { + "epoch": 2.46784, + "grad_norm": 0.6854734408644786, + "learning_rate": 4.1796372249162414e-07, + "loss": 0.3657244145870209, + "memory(GiB)": 77.0, + "step": 7712, + "token_acc": 0.9097164303586321, + "train_speed(iter/s)": 0.408892 + }, + { + "epoch": 2.46816, + "grad_norm": 0.6912774679589092, + "learning_rate": 4.174756880508934e-07, + "loss": 0.34124672412872314, + "memory(GiB)": 77.0, + "step": 7713, + "token_acc": 0.8351570773032121, + "train_speed(iter/s)": 0.408859 + }, + { + "epoch": 2.46848, + "grad_norm": 0.6962960287621289, + "learning_rate": 4.1698791274418724e-07, + "loss": 0.27319660782814026, + "memory(GiB)": 77.0, + "step": 7714, + "token_acc": 0.9250139899272524, + "train_speed(iter/s)": 0.408825 + }, + { + "epoch": 2.4688, + "grad_norm": 0.7998170852855567, + "learning_rate": 4.165003966322001e-07, + "loss": 0.3199734687805176, + "memory(GiB)": 77.0, + "step": 7715, + "token_acc": 0.9138998311761396, + "train_speed(iter/s)": 0.408793 + }, + { + "epoch": 2.46912, + "grad_norm": 0.6510061440283472, + "learning_rate": 4.160131397755962e-07, + "loss": 0.25616389513015747, + "memory(GiB)": 77.0, + "step": 7716, + "token_acc": 0.8861418347430059, + "train_speed(iter/s)": 0.408754 + }, + { + "epoch": 2.46944, + "grad_norm": 0.6707107749714276, + "learning_rate": 4.155261422350043e-07, + "loss": 0.30779895186424255, + "memory(GiB)": 77.0, + "step": 7717, + "token_acc": 0.9298611111111111, + "train_speed(iter/s)": 0.408721 + }, + { + "epoch": 2.46976, + "grad_norm": 0.7866146295926083, + "learning_rate": 4.150394040710243e-07, + "loss": 0.27875107526779175, + "memory(GiB)": 77.0, + "step": 7718, + "token_acc": 0.871261378413524, + "train_speed(iter/s)": 0.408689 + }, + { + "epoch": 2.47008, + "grad_norm": 0.7404897339054606, + "learning_rate": 4.1455292534422126e-07, + "loss": 0.2798263132572174, + "memory(GiB)": 77.0, + "step": 7719, + "token_acc": 0.88470066518847, + "train_speed(iter/s)": 0.408655 + }, + { + "epoch": 2.4704, + "grad_norm": 0.7081156417463101, + "learning_rate": 4.140667061151299e-07, + "loss": 0.2816838026046753, + "memory(GiB)": 77.0, + "step": 7720, + "token_acc": 0.9353632478632479, + "train_speed(iter/s)": 0.408618 + }, + { + "epoch": 2.47072, + "grad_norm": 0.7771432362415338, + "learning_rate": 4.135807464442515e-07, + "loss": 0.35069456696510315, + "memory(GiB)": 77.0, + "step": 7721, + "token_acc": 0.8604184580108913, + "train_speed(iter/s)": 0.408582 + }, + { + "epoch": 2.47104, + "grad_norm": 0.7762426088372397, + "learning_rate": 4.1309504639205496e-07, + "loss": 0.2641333043575287, + "memory(GiB)": 77.0, + "step": 7722, + "token_acc": 0.9245994344957588, + "train_speed(iter/s)": 0.40855 + }, + { + "epoch": 2.47136, + "grad_norm": 0.7046545787082182, + "learning_rate": 4.126096060189777e-07, + "loss": 0.24654580652713776, + "memory(GiB)": 77.0, + "step": 7723, + "token_acc": 0.9243151652075685, + "train_speed(iter/s)": 0.40852 + }, + { + "epoch": 2.47168, + "grad_norm": 0.7464929528848699, + "learning_rate": 4.1212442538542394e-07, + "loss": 0.40546029806137085, + "memory(GiB)": 77.0, + "step": 7724, + "token_acc": 0.8836245188036719, + "train_speed(iter/s)": 0.408481 + }, + { + "epoch": 2.472, + "grad_norm": 0.7458630931059199, + "learning_rate": 4.116395045517665e-07, + "loss": 0.34057825803756714, + "memory(GiB)": 77.0, + "step": 7725, + "token_acc": 0.8978679504814305, + "train_speed(iter/s)": 0.408444 + }, + { + "epoch": 2.47232, + "grad_norm": 0.6872204467709194, + "learning_rate": 4.1115484357834496e-07, + "loss": 0.222429096698761, + "memory(GiB)": 77.0, + "step": 7726, + "token_acc": 0.9615384615384616, + "train_speed(iter/s)": 0.408412 + }, + { + "epoch": 2.47264, + "grad_norm": 0.7292151624818096, + "learning_rate": 4.106704425254665e-07, + "loss": 0.3022392988204956, + "memory(GiB)": 77.0, + "step": 7727, + "token_acc": 0.899318859088974, + "train_speed(iter/s)": 0.408377 + }, + { + "epoch": 2.47296, + "grad_norm": 0.6586366418023525, + "learning_rate": 4.1018630145340744e-07, + "loss": 0.2555122971534729, + "memory(GiB)": 77.0, + "step": 7728, + "token_acc": 0.966715976331361, + "train_speed(iter/s)": 0.408344 + }, + { + "epoch": 2.47328, + "grad_norm": 0.7892579883296199, + "learning_rate": 4.0970242042241063e-07, + "loss": 0.37433987855911255, + "memory(GiB)": 77.0, + "step": 7729, + "token_acc": 0.8984394352241764, + "train_speed(iter/s)": 0.408306 + }, + { + "epoch": 2.4736000000000002, + "grad_norm": 0.7660396200741706, + "learning_rate": 4.092187994926861e-07, + "loss": 0.3610135316848755, + "memory(GiB)": 77.0, + "step": 7730, + "token_acc": 0.9449981543004798, + "train_speed(iter/s)": 0.408264 + }, + { + "epoch": 2.47392, + "grad_norm": 0.7558896451162249, + "learning_rate": 4.0873543872441303e-07, + "loss": 0.3690139949321747, + "memory(GiB)": 77.0, + "step": 7731, + "token_acc": 0.8986556359875905, + "train_speed(iter/s)": 0.408229 + }, + { + "epoch": 2.47424, + "grad_norm": 0.7007711467191541, + "learning_rate": 4.082523381777359e-07, + "loss": 0.3305196762084961, + "memory(GiB)": 77.0, + "step": 7732, + "token_acc": 0.8852119958634953, + "train_speed(iter/s)": 0.408188 + }, + { + "epoch": 2.47456, + "grad_norm": 0.7895954520780399, + "learning_rate": 4.0776949791276914e-07, + "loss": 0.34464240074157715, + "memory(GiB)": 77.0, + "step": 7733, + "token_acc": 0.831627250264737, + "train_speed(iter/s)": 0.408144 + }, + { + "epoch": 2.47488, + "grad_norm": 0.7563692795931771, + "learning_rate": 4.072869179895933e-07, + "loss": 0.310009241104126, + "memory(GiB)": 77.0, + "step": 7734, + "token_acc": 0.913730255164034, + "train_speed(iter/s)": 0.408112 + }, + { + "epoch": 2.4752, + "grad_norm": 0.7294125230610855, + "learning_rate": 4.068045984682575e-07, + "loss": 0.23303401470184326, + "memory(GiB)": 77.0, + "step": 7735, + "token_acc": 0.9731285988483686, + "train_speed(iter/s)": 0.408079 + }, + { + "epoch": 2.47552, + "grad_norm": 0.7026420768075726, + "learning_rate": 4.0632253940877805e-07, + "loss": 0.3452553153038025, + "memory(GiB)": 77.0, + "step": 7736, + "token_acc": 0.8864741206748642, + "train_speed(iter/s)": 0.408047 + }, + { + "epoch": 2.47584, + "grad_norm": 0.7430945116550718, + "learning_rate": 4.0584074087113847e-07, + "loss": 0.34180575609207153, + "memory(GiB)": 77.0, + "step": 7737, + "token_acc": 0.8801157503100455, + "train_speed(iter/s)": 0.408012 + }, + { + "epoch": 2.47616, + "grad_norm": 0.6408528444304674, + "learning_rate": 4.0535920291529056e-07, + "loss": 0.2600710988044739, + "memory(GiB)": 77.0, + "step": 7738, + "token_acc": 0.9536855838225701, + "train_speed(iter/s)": 0.40797 + }, + { + "epoch": 2.47648, + "grad_norm": 0.7304074684823526, + "learning_rate": 4.0487792560115315e-07, + "loss": 0.3282839059829712, + "memory(GiB)": 77.0, + "step": 7739, + "token_acc": 0.8548812664907651, + "train_speed(iter/s)": 0.40793 + }, + { + "epoch": 2.4768, + "grad_norm": 0.7093969369978402, + "learning_rate": 4.0439690898861324e-07, + "loss": 0.32274043560028076, + "memory(GiB)": 77.0, + "step": 7740, + "token_acc": 0.9660048134777377, + "train_speed(iter/s)": 0.407897 + }, + { + "epoch": 2.47712, + "grad_norm": 0.7419664833253163, + "learning_rate": 4.039161531375244e-07, + "loss": 0.2730860114097595, + "memory(GiB)": 77.0, + "step": 7741, + "token_acc": 0.8574777308954524, + "train_speed(iter/s)": 0.407866 + }, + { + "epoch": 2.47744, + "grad_norm": 0.6989482827231931, + "learning_rate": 4.0343565810770896e-07, + "loss": 0.2622404098510742, + "memory(GiB)": 77.0, + "step": 7742, + "token_acc": 0.9262779347546879, + "train_speed(iter/s)": 0.407827 + }, + { + "epoch": 2.47776, + "grad_norm": 1.0966105662502872, + "learning_rate": 4.029554239589556e-07, + "loss": 0.26379650831222534, + "memory(GiB)": 77.0, + "step": 7743, + "token_acc": 0.9614692113791862, + "train_speed(iter/s)": 0.407795 + }, + { + "epoch": 2.47808, + "grad_norm": 0.8269354288657226, + "learning_rate": 4.024754507510212e-07, + "loss": 0.3608385920524597, + "memory(GiB)": 77.0, + "step": 7744, + "token_acc": 0.881815213842638, + "train_speed(iter/s)": 0.407763 + }, + { + "epoch": 2.4784, + "grad_norm": 0.6967960646601696, + "learning_rate": 4.019957385436313e-07, + "loss": 0.2508692145347595, + "memory(GiB)": 77.0, + "step": 7745, + "token_acc": 0.932684722587431, + "train_speed(iter/s)": 0.407728 + }, + { + "epoch": 2.47872, + "grad_norm": 0.7091441917847157, + "learning_rate": 4.015162873964762e-07, + "loss": 0.3125118017196655, + "memory(GiB)": 77.0, + "step": 7746, + "token_acc": 0.8550412630068174, + "train_speed(iter/s)": 0.407689 + }, + { + "epoch": 2.47904, + "grad_norm": 0.6936580874970816, + "learning_rate": 4.01037097369216e-07, + "loss": 0.26634976267814636, + "memory(GiB)": 77.0, + "step": 7747, + "token_acc": 0.9175014731879788, + "train_speed(iter/s)": 0.407653 + }, + { + "epoch": 2.47936, + "grad_norm": 0.7721589411722376, + "learning_rate": 4.005581685214774e-07, + "loss": 0.36718279123306274, + "memory(GiB)": 77.0, + "step": 7748, + "token_acc": 0.9028514588859416, + "train_speed(iter/s)": 0.407619 + }, + { + "epoch": 2.47968, + "grad_norm": 0.6638448306489406, + "learning_rate": 4.00079500912855e-07, + "loss": 0.2641375660896301, + "memory(GiB)": 77.0, + "step": 7749, + "token_acc": 0.8879475266466248, + "train_speed(iter/s)": 0.407588 + }, + { + "epoch": 2.48, + "grad_norm": 0.7011643889997482, + "learning_rate": 3.996010946029102e-07, + "loss": 0.2831626534461975, + "memory(GiB)": 77.0, + "step": 7750, + "token_acc": 0.9422667882102704, + "train_speed(iter/s)": 0.407555 + }, + { + "epoch": 2.48032, + "grad_norm": 0.8084377541589607, + "learning_rate": 3.991229496511728e-07, + "loss": 0.31713271141052246, + "memory(GiB)": 77.0, + "step": 7751, + "token_acc": 0.8714799281006591, + "train_speed(iter/s)": 0.407522 + }, + { + "epoch": 2.48064, + "grad_norm": 0.6758298810128591, + "learning_rate": 3.986450661171398e-07, + "loss": 0.22427314519882202, + "memory(GiB)": 77.0, + "step": 7752, + "token_acc": 0.9490952955367913, + "train_speed(iter/s)": 0.407484 + }, + { + "epoch": 2.48096, + "grad_norm": 0.7567946727188637, + "learning_rate": 3.981674440602751e-07, + "loss": 0.35309648513793945, + "memory(GiB)": 77.0, + "step": 7753, + "token_acc": 0.8537360890302067, + "train_speed(iter/s)": 0.407449 + }, + { + "epoch": 2.48128, + "grad_norm": 0.7392432156512626, + "learning_rate": 3.9769008354001076e-07, + "loss": 0.2811676561832428, + "memory(GiB)": 77.0, + "step": 7754, + "token_acc": 0.9257933667382486, + "train_speed(iter/s)": 0.407414 + }, + { + "epoch": 2.4816, + "grad_norm": 0.6919698462101299, + "learning_rate": 3.9721298461574575e-07, + "loss": 0.23626470565795898, + "memory(GiB)": 77.0, + "step": 7755, + "token_acc": 0.8852560148056755, + "train_speed(iter/s)": 0.407375 + }, + { + "epoch": 2.48192, + "grad_norm": 0.705668695014707, + "learning_rate": 3.96736147346847e-07, + "loss": 0.2667505145072937, + "memory(GiB)": 77.0, + "step": 7756, + "token_acc": 0.9465463380770566, + "train_speed(iter/s)": 0.407343 + }, + { + "epoch": 2.48224, + "grad_norm": 0.7628796137201745, + "learning_rate": 3.9625957179264846e-07, + "loss": 0.2609021067619324, + "memory(GiB)": 77.0, + "step": 7757, + "token_acc": 0.9405356332274172, + "train_speed(iter/s)": 0.407311 + }, + { + "epoch": 2.48256, + "grad_norm": 0.7424341685043758, + "learning_rate": 3.9578325801245183e-07, + "loss": 0.3396444320678711, + "memory(GiB)": 77.0, + "step": 7758, + "token_acc": 0.935163996948894, + "train_speed(iter/s)": 0.407278 + }, + { + "epoch": 2.4828799999999998, + "grad_norm": 0.8353227757883587, + "learning_rate": 3.953072060655258e-07, + "loss": 0.22557516396045685, + "memory(GiB)": 77.0, + "step": 7759, + "token_acc": 0.9222654081066074, + "train_speed(iter/s)": 0.407244 + }, + { + "epoch": 2.4832, + "grad_norm": 0.7212280133510277, + "learning_rate": 3.948314160111072e-07, + "loss": 0.26963233947753906, + "memory(GiB)": 77.0, + "step": 7760, + "token_acc": 0.8726880394574599, + "train_speed(iter/s)": 0.407214 + }, + { + "epoch": 2.48352, + "grad_norm": 0.780486268482888, + "learning_rate": 3.9435588790839974e-07, + "loss": 0.3203272819519043, + "memory(GiB)": 77.0, + "step": 7761, + "token_acc": 0.8954026535020055, + "train_speed(iter/s)": 0.407175 + }, + { + "epoch": 2.48384, + "grad_norm": 0.7344906822576989, + "learning_rate": 3.9388062181657444e-07, + "loss": 0.38041990995407104, + "memory(GiB)": 77.0, + "step": 7762, + "token_acc": 0.8849703361898484, + "train_speed(iter/s)": 0.407131 + }, + { + "epoch": 2.48416, + "grad_norm": 0.7275032744185327, + "learning_rate": 3.934056177947701e-07, + "loss": 0.30345264077186584, + "memory(GiB)": 77.0, + "step": 7763, + "token_acc": 0.9402369912416281, + "train_speed(iter/s)": 0.407098 + }, + { + "epoch": 2.48448, + "grad_norm": 0.6765901527744492, + "learning_rate": 3.929308759020928e-07, + "loss": 0.2349887490272522, + "memory(GiB)": 77.0, + "step": 7764, + "token_acc": 0.9099292960344297, + "train_speed(iter/s)": 0.407066 + }, + { + "epoch": 2.4848, + "grad_norm": 0.7029937387338749, + "learning_rate": 3.924563961976155e-07, + "loss": 0.3352825343608856, + "memory(GiB)": 77.0, + "step": 7765, + "token_acc": 0.9083073976538185, + "train_speed(iter/s)": 0.407032 + }, + { + "epoch": 2.48512, + "grad_norm": 0.7321870302753597, + "learning_rate": 3.9198217874037965e-07, + "loss": 0.3455114960670471, + "memory(GiB)": 77.0, + "step": 7766, + "token_acc": 0.909245122985581, + "train_speed(iter/s)": 0.406999 + }, + { + "epoch": 2.48544, + "grad_norm": 0.7407439709730352, + "learning_rate": 3.9150822358939274e-07, + "loss": 0.2589331269264221, + "memory(GiB)": 77.0, + "step": 7767, + "token_acc": 0.8419584753641153, + "train_speed(iter/s)": 0.406968 + }, + { + "epoch": 2.48576, + "grad_norm": 0.7205213575288603, + "learning_rate": 3.910345308036306e-07, + "loss": 0.23370787501335144, + "memory(GiB)": 77.0, + "step": 7768, + "token_acc": 0.9574660633484163, + "train_speed(iter/s)": 0.406936 + }, + { + "epoch": 2.48608, + "grad_norm": 0.7088524348203209, + "learning_rate": 3.9056110044203594e-07, + "loss": 0.27556493878364563, + "memory(GiB)": 77.0, + "step": 7769, + "token_acc": 0.9397377710539586, + "train_speed(iter/s)": 0.406902 + }, + { + "epoch": 2.4864, + "grad_norm": 0.7771828039510071, + "learning_rate": 3.900879325635193e-07, + "loss": 0.3234988749027252, + "memory(GiB)": 77.0, + "step": 7770, + "token_acc": 0.8598505072076882, + "train_speed(iter/s)": 0.406867 + }, + { + "epoch": 2.48672, + "grad_norm": 0.7989276775439701, + "learning_rate": 3.8961502722695774e-07, + "loss": 0.3222135901451111, + "memory(GiB)": 77.0, + "step": 7771, + "token_acc": 0.8209441128594682, + "train_speed(iter/s)": 0.406835 + }, + { + "epoch": 2.48704, + "grad_norm": 0.7121925048224368, + "learning_rate": 3.8914238449119623e-07, + "loss": 0.31165432929992676, + "memory(GiB)": 77.0, + "step": 7772, + "token_acc": 0.9267807401512137, + "train_speed(iter/s)": 0.406796 + }, + { + "epoch": 2.48736, + "grad_norm": 0.6649940423495647, + "learning_rate": 3.886700044150474e-07, + "loss": 0.3177582323551178, + "memory(GiB)": 77.0, + "step": 7773, + "token_acc": 0.9103405185879413, + "train_speed(iter/s)": 0.40676 + }, + { + "epoch": 2.48768, + "grad_norm": 0.7041017542892847, + "learning_rate": 3.8819788705729e-07, + "loss": 0.29063716530799866, + "memory(GiB)": 77.0, + "step": 7774, + "token_acc": 0.9711538461538461, + "train_speed(iter/s)": 0.406725 + }, + { + "epoch": 2.488, + "grad_norm": 0.6735756424187931, + "learning_rate": 3.8772603247667155e-07, + "loss": 0.280782014131546, + "memory(GiB)": 77.0, + "step": 7775, + "token_acc": 0.9701876124389617, + "train_speed(iter/s)": 0.406695 + }, + { + "epoch": 2.48832, + "grad_norm": 0.7598982571301144, + "learning_rate": 3.872544407319059e-07, + "loss": 0.28861260414123535, + "memory(GiB)": 77.0, + "step": 7776, + "token_acc": 0.8839736127279783, + "train_speed(iter/s)": 0.406661 + }, + { + "epoch": 2.48864, + "grad_norm": 0.7638426624325887, + "learning_rate": 3.867831118816745e-07, + "loss": 0.29766565561294556, + "memory(GiB)": 77.0, + "step": 7777, + "token_acc": 0.9278942115768463, + "train_speed(iter/s)": 0.406622 + }, + { + "epoch": 2.48896, + "grad_norm": 0.701149115577949, + "learning_rate": 3.8631204598462564e-07, + "loss": 0.28187721967697144, + "memory(GiB)": 77.0, + "step": 7778, + "token_acc": 0.9298998569384835, + "train_speed(iter/s)": 0.406587 + }, + { + "epoch": 2.48928, + "grad_norm": 0.760705863350503, + "learning_rate": 3.8584124309937605e-07, + "loss": 0.29752498865127563, + "memory(GiB)": 77.0, + "step": 7779, + "token_acc": 0.9572014351614556, + "train_speed(iter/s)": 0.406557 + }, + { + "epoch": 2.4896, + "grad_norm": 0.7019893279155872, + "learning_rate": 3.8537070328450833e-07, + "loss": 0.3640035390853882, + "memory(GiB)": 77.0, + "step": 7780, + "token_acc": 0.9006993006993007, + "train_speed(iter/s)": 0.406523 + }, + { + "epoch": 2.48992, + "grad_norm": 0.7129279780441675, + "learning_rate": 3.8490042659857343e-07, + "loss": 0.3754693269729614, + "memory(GiB)": 77.0, + "step": 7781, + "token_acc": 0.8414884868421053, + "train_speed(iter/s)": 0.40649 + }, + { + "epoch": 2.49024, + "grad_norm": 0.6738094463185168, + "learning_rate": 3.8443041310008867e-07, + "loss": 0.2790416479110718, + "memory(GiB)": 77.0, + "step": 7782, + "token_acc": 0.9257319685789098, + "train_speed(iter/s)": 0.40645 + }, + { + "epoch": 2.49056, + "grad_norm": 0.7012502050669622, + "learning_rate": 3.839606628475395e-07, + "loss": 0.3443412780761719, + "memory(GiB)": 77.0, + "step": 7783, + "token_acc": 0.8594722798695523, + "train_speed(iter/s)": 0.406415 + }, + { + "epoch": 2.4908799999999998, + "grad_norm": 0.6803069539561188, + "learning_rate": 3.8349117589937813e-07, + "loss": 0.2669760584831238, + "memory(GiB)": 77.0, + "step": 7784, + "token_acc": 0.8675263774912075, + "train_speed(iter/s)": 0.406382 + }, + { + "epoch": 2.4912, + "grad_norm": 0.7612887104818234, + "learning_rate": 3.8302195231402384e-07, + "loss": 0.2930690050125122, + "memory(GiB)": 77.0, + "step": 7785, + "token_acc": 0.9071390230810521, + "train_speed(iter/s)": 0.40635 + }, + { + "epoch": 2.49152, + "grad_norm": 0.7654570873294274, + "learning_rate": 3.825529921498636e-07, + "loss": 0.3721054494380951, + "memory(GiB)": 77.0, + "step": 7786, + "token_acc": 0.9213813372520205, + "train_speed(iter/s)": 0.406317 + }, + { + "epoch": 2.49184, + "grad_norm": 0.7685148365957888, + "learning_rate": 3.820842954652512e-07, + "loss": 0.24476604163646698, + "memory(GiB)": 77.0, + "step": 7787, + "token_acc": 0.8921475875118259, + "train_speed(iter/s)": 0.406282 + }, + { + "epoch": 2.49216, + "grad_norm": 0.7076666518900072, + "learning_rate": 3.816158623185087e-07, + "loss": 0.269769549369812, + "memory(GiB)": 77.0, + "step": 7788, + "token_acc": 0.8701812191103789, + "train_speed(iter/s)": 0.406247 + }, + { + "epoch": 2.49248, + "grad_norm": 0.7191800108134858, + "learning_rate": 3.811476927679228e-07, + "loss": 0.29104506969451904, + "memory(GiB)": 77.0, + "step": 7789, + "token_acc": 0.8997715591090806, + "train_speed(iter/s)": 0.406217 + }, + { + "epoch": 2.4928, + "grad_norm": 0.7196778169571042, + "learning_rate": 3.806797868717493e-07, + "loss": 0.2770211696624756, + "memory(GiB)": 77.0, + "step": 7790, + "token_acc": 0.8940340909090909, + "train_speed(iter/s)": 0.406181 + }, + { + "epoch": 2.4931200000000002, + "grad_norm": 0.7609192848904364, + "learning_rate": 3.802121446882126e-07, + "loss": 0.281100332736969, + "memory(GiB)": 77.0, + "step": 7791, + "token_acc": 0.9074973031283711, + "train_speed(iter/s)": 0.406147 + }, + { + "epoch": 2.49344, + "grad_norm": 0.7600685203997565, + "learning_rate": 3.7974476627550135e-07, + "loss": 0.3183240592479706, + "memory(GiB)": 77.0, + "step": 7792, + "token_acc": 0.8785987517616267, + "train_speed(iter/s)": 0.406113 + }, + { + "epoch": 2.49376, + "grad_norm": 0.7279932397304459, + "learning_rate": 3.792776516917732e-07, + "loss": 0.29821866750717163, + "memory(GiB)": 77.0, + "step": 7793, + "token_acc": 0.9508000955337951, + "train_speed(iter/s)": 0.406079 + }, + { + "epoch": 2.49408, + "grad_norm": 0.8108282638587408, + "learning_rate": 3.788108009951519e-07, + "loss": 0.3758211135864258, + "memory(GiB)": 77.0, + "step": 7794, + "token_acc": 0.8288272157564907, + "train_speed(iter/s)": 0.406048 + }, + { + "epoch": 2.4944, + "grad_norm": 0.769671371973866, + "learning_rate": 3.783442142437296e-07, + "loss": 0.4089016914367676, + "memory(GiB)": 77.0, + "step": 7795, + "token_acc": 0.8709379128137384, + "train_speed(iter/s)": 0.40601 + }, + { + "epoch": 2.49472, + "grad_norm": 0.7097012104883712, + "learning_rate": 3.7787789149556443e-07, + "loss": 0.2946886122226715, + "memory(GiB)": 77.0, + "step": 7796, + "token_acc": 0.9395719681074276, + "train_speed(iter/s)": 0.405972 + }, + { + "epoch": 2.49504, + "grad_norm": 0.804972756377302, + "learning_rate": 3.774118328086826e-07, + "loss": 0.3309824466705322, + "memory(GiB)": 77.0, + "step": 7797, + "token_acc": 0.9637397795947387, + "train_speed(iter/s)": 0.405936 + }, + { + "epoch": 2.49536, + "grad_norm": 0.7688769197825642, + "learning_rate": 3.7694603824107667e-07, + "loss": 0.3529861271381378, + "memory(GiB)": 77.0, + "step": 7798, + "token_acc": 0.8663188783827844, + "train_speed(iter/s)": 0.405903 + }, + { + "epoch": 2.49568, + "grad_norm": 0.7994898394651827, + "learning_rate": 3.7648050785070667e-07, + "loss": 0.3459723889827728, + "memory(GiB)": 77.0, + "step": 7799, + "token_acc": 0.8734263354882613, + "train_speed(iter/s)": 0.405868 + }, + { + "epoch": 2.496, + "grad_norm": 0.759830263959762, + "learning_rate": 3.760152416955001e-07, + "loss": 0.3159812390804291, + "memory(GiB)": 77.0, + "step": 7800, + "token_acc": 0.9219006007646096, + "train_speed(iter/s)": 0.405833 + }, + { + "epoch": 2.49632, + "grad_norm": 0.7162122279313136, + "learning_rate": 3.7555023983335074e-07, + "loss": 0.26942703127861023, + "memory(GiB)": 77.0, + "step": 7801, + "token_acc": 0.8820257049683483, + "train_speed(iter/s)": 0.405797 + }, + { + "epoch": 2.49664, + "grad_norm": 0.6573662850984694, + "learning_rate": 3.750855023221206e-07, + "loss": 0.28256529569625854, + "memory(GiB)": 77.0, + "step": 7802, + "token_acc": 0.8979802143446002, + "train_speed(iter/s)": 0.405759 + }, + { + "epoch": 2.49696, + "grad_norm": 0.694257138864967, + "learning_rate": 3.746210292196387e-07, + "loss": 0.2779994010925293, + "memory(GiB)": 77.0, + "step": 7803, + "token_acc": 0.9020929407591345, + "train_speed(iter/s)": 0.405715 + }, + { + "epoch": 2.49728, + "grad_norm": 0.6425368789470682, + "learning_rate": 3.74156820583699e-07, + "loss": 0.24209606647491455, + "memory(GiB)": 77.0, + "step": 7804, + "token_acc": 0.9502933004013584, + "train_speed(iter/s)": 0.405684 + }, + { + "epoch": 2.4976, + "grad_norm": 0.8379673231109662, + "learning_rate": 3.7369287647206513e-07, + "loss": 0.278849333524704, + "memory(GiB)": 77.0, + "step": 7805, + "token_acc": 0.8337214718211458, + "train_speed(iter/s)": 0.405654 + }, + { + "epoch": 2.49792, + "grad_norm": 0.7348114619553209, + "learning_rate": 3.7322919694246646e-07, + "loss": 0.31945163011550903, + "memory(GiB)": 77.0, + "step": 7806, + "token_acc": 0.8055747523612071, + "train_speed(iter/s)": 0.405611 + }, + { + "epoch": 2.49824, + "grad_norm": 0.712761143159106, + "learning_rate": 3.7276578205260066e-07, + "loss": 0.37673890590667725, + "memory(GiB)": 77.0, + "step": 7807, + "token_acc": 0.9630746043707611, + "train_speed(iter/s)": 0.40557 + }, + { + "epoch": 2.49856, + "grad_norm": 0.7147092556331248, + "learning_rate": 3.723026318601314e-07, + "loss": 0.2724180221557617, + "memory(GiB)": 77.0, + "step": 7808, + "token_acc": 0.9147557328015952, + "train_speed(iter/s)": 0.405538 + }, + { + "epoch": 2.4988799999999998, + "grad_norm": 0.7725150480189434, + "learning_rate": 3.718397464226897e-07, + "loss": 0.3513672947883606, + "memory(GiB)": 77.0, + "step": 7809, + "token_acc": 0.9022257551669316, + "train_speed(iter/s)": 0.405507 + }, + { + "epoch": 2.4992, + "grad_norm": 0.6515812808495774, + "learning_rate": 3.713771257978732e-07, + "loss": 0.24899233877658844, + "memory(GiB)": 77.0, + "step": 7810, + "token_acc": 0.9434140207481924, + "train_speed(iter/s)": 0.405472 + }, + { + "epoch": 2.49952, + "grad_norm": 0.717466520047063, + "learning_rate": 3.709147700432475e-07, + "loss": 0.3270392417907715, + "memory(GiB)": 77.0, + "step": 7811, + "token_acc": 0.93141075604053, + "train_speed(iter/s)": 0.405436 + }, + { + "epoch": 2.49984, + "grad_norm": 0.6649126346748834, + "learning_rate": 3.7045267921634457e-07, + "loss": 0.22723284363746643, + "memory(GiB)": 77.0, + "step": 7812, + "token_acc": 0.9417808219178082, + "train_speed(iter/s)": 0.405405 + }, + { + "epoch": 2.50016, + "grad_norm": 0.7673915061308217, + "learning_rate": 3.699908533746635e-07, + "loss": 0.31221646070480347, + "memory(GiB)": 77.0, + "step": 7813, + "token_acc": 0.8916938110749185, + "train_speed(iter/s)": 0.405375 + }, + { + "epoch": 2.50048, + "grad_norm": 0.7363448268239381, + "learning_rate": 3.695292925756708e-07, + "loss": 0.37246692180633545, + "memory(GiB)": 77.0, + "step": 7814, + "token_acc": 0.8831037957675513, + "train_speed(iter/s)": 0.405342 + }, + { + "epoch": 2.5008, + "grad_norm": 0.7419329949381791, + "learning_rate": 3.6906799687679905e-07, + "loss": 0.30342209339141846, + "memory(GiB)": 77.0, + "step": 7815, + "token_acc": 0.951530612244898, + "train_speed(iter/s)": 0.405304 + }, + { + "epoch": 2.5011200000000002, + "grad_norm": 0.6744529357609627, + "learning_rate": 3.686069663354494e-07, + "loss": 0.3577004671096802, + "memory(GiB)": 77.0, + "step": 7816, + "token_acc": 0.888189738625363, + "train_speed(iter/s)": 0.405267 + }, + { + "epoch": 2.50144, + "grad_norm": 0.7127553798436013, + "learning_rate": 3.6814620100898893e-07, + "loss": 0.3604864478111267, + "memory(GiB)": 77.0, + "step": 7817, + "token_acc": 0.8717126363053239, + "train_speed(iter/s)": 0.405229 + }, + { + "epoch": 2.50176, + "grad_norm": 0.746985731644698, + "learning_rate": 3.676857009547513e-07, + "loss": 0.4335390329360962, + "memory(GiB)": 77.0, + "step": 7818, + "token_acc": 0.8233341843247384, + "train_speed(iter/s)": 0.405195 + }, + { + "epoch": 2.50208, + "grad_norm": 0.7539007909799268, + "learning_rate": 3.6722546623003785e-07, + "loss": 0.34369635581970215, + "memory(GiB)": 77.0, + "step": 7819, + "token_acc": 0.9518498367791077, + "train_speed(iter/s)": 0.405157 + }, + { + "epoch": 2.5023999999999997, + "grad_norm": 0.7540311927194162, + "learning_rate": 3.667654968921169e-07, + "loss": 0.3351466953754425, + "memory(GiB)": 77.0, + "step": 7820, + "token_acc": 0.8934736842105263, + "train_speed(iter/s)": 0.405117 + }, + { + "epoch": 2.50272, + "grad_norm": 0.725653524500604, + "learning_rate": 3.663057929982242e-07, + "loss": 0.35289961099624634, + "memory(GiB)": 77.0, + "step": 7821, + "token_acc": 0.8680913780397936, + "train_speed(iter/s)": 0.405084 + }, + { + "epoch": 2.50304, + "grad_norm": 0.650671040779971, + "learning_rate": 3.658463546055613e-07, + "loss": 0.26437658071517944, + "memory(GiB)": 77.0, + "step": 7822, + "token_acc": 0.9585152838427947, + "train_speed(iter/s)": 0.405049 + }, + { + "epoch": 2.50336, + "grad_norm": 0.7241270268781241, + "learning_rate": 3.653871817712967e-07, + "loss": 0.3616824448108673, + "memory(GiB)": 77.0, + "step": 7823, + "token_acc": 0.8612054329371817, + "train_speed(iter/s)": 0.405014 + }, + { + "epoch": 2.50368, + "grad_norm": 0.7423615098674279, + "learning_rate": 3.6492827455256815e-07, + "loss": 0.38230687379837036, + "memory(GiB)": 77.0, + "step": 7824, + "token_acc": 0.9141599413059428, + "train_speed(iter/s)": 0.404974 + }, + { + "epoch": 2.504, + "grad_norm": 0.7443511472480263, + "learning_rate": 3.6446963300647787e-07, + "loss": 0.3875437378883362, + "memory(GiB)": 77.0, + "step": 7825, + "token_acc": 0.9642147117296223, + "train_speed(iter/s)": 0.404937 + }, + { + "epoch": 2.50432, + "grad_norm": 0.7144654781235403, + "learning_rate": 3.6401125719009584e-07, + "loss": 0.26422780752182007, + "memory(GiB)": 77.0, + "step": 7826, + "token_acc": 0.9320902394106814, + "train_speed(iter/s)": 0.404904 + }, + { + "epoch": 2.50464, + "grad_norm": 0.7161872767868757, + "learning_rate": 3.6355314716045893e-07, + "loss": 0.3415130078792572, + "memory(GiB)": 77.0, + "step": 7827, + "token_acc": 0.9121338912133892, + "train_speed(iter/s)": 0.40487 + }, + { + "epoch": 2.50496, + "grad_norm": 0.7968388088904281, + "learning_rate": 3.6309530297457125e-07, + "loss": 0.3173517882823944, + "memory(GiB)": 77.0, + "step": 7828, + "token_acc": 0.9053921568627451, + "train_speed(iter/s)": 0.404839 + }, + { + "epoch": 2.50528, + "grad_norm": 0.6964439342795767, + "learning_rate": 3.6263772468940305e-07, + "loss": 0.24287870526313782, + "memory(GiB)": 77.0, + "step": 7829, + "token_acc": 0.9494441503440975, + "train_speed(iter/s)": 0.404808 + }, + { + "epoch": 2.5056000000000003, + "grad_norm": 0.7654570004922309, + "learning_rate": 3.6218041236189243e-07, + "loss": 0.28799402713775635, + "memory(GiB)": 77.0, + "step": 7830, + "token_acc": 0.9099557522123893, + "train_speed(iter/s)": 0.404776 + }, + { + "epoch": 2.50592, + "grad_norm": 0.778126723395621, + "learning_rate": 3.6172336604894444e-07, + "loss": 0.3201725482940674, + "memory(GiB)": 77.0, + "step": 7831, + "token_acc": 0.9409464215877982, + "train_speed(iter/s)": 0.404742 + }, + { + "epoch": 2.50624, + "grad_norm": 0.7770245775447787, + "learning_rate": 3.6126658580742934e-07, + "loss": 0.3383793830871582, + "memory(GiB)": 77.0, + "step": 7832, + "token_acc": 0.8894117647058823, + "train_speed(iter/s)": 0.40471 + }, + { + "epoch": 2.50656, + "grad_norm": 0.8027065904616151, + "learning_rate": 3.608100716941865e-07, + "loss": 0.3796238899230957, + "memory(GiB)": 77.0, + "step": 7833, + "token_acc": 0.8599088838268792, + "train_speed(iter/s)": 0.404674 + }, + { + "epoch": 2.5068799999999998, + "grad_norm": 0.748987438746825, + "learning_rate": 3.6035382376602035e-07, + "loss": 0.36195966601371765, + "memory(GiB)": 77.0, + "step": 7834, + "token_acc": 0.926981008513425, + "train_speed(iter/s)": 0.404643 + }, + { + "epoch": 2.5072, + "grad_norm": 0.7101814856603743, + "learning_rate": 3.598978420797039e-07, + "loss": 0.318752646446228, + "memory(GiB)": 77.0, + "step": 7835, + "token_acc": 0.8692387904066736, + "train_speed(iter/s)": 0.404612 + }, + { + "epoch": 2.50752, + "grad_norm": 0.709663241905773, + "learning_rate": 3.5944212669197566e-07, + "loss": 0.2690744698047638, + "memory(GiB)": 77.0, + "step": 7836, + "token_acc": 0.8718399648274346, + "train_speed(iter/s)": 0.404578 + }, + { + "epoch": 2.50784, + "grad_norm": 0.6891052009143482, + "learning_rate": 3.589866776595416e-07, + "loss": 0.2574140429496765, + "memory(GiB)": 77.0, + "step": 7837, + "token_acc": 0.9418427357874102, + "train_speed(iter/s)": 0.404539 + }, + { + "epoch": 2.50816, + "grad_norm": 0.678669548155976, + "learning_rate": 3.5853149503907447e-07, + "loss": 0.32714030146598816, + "memory(GiB)": 77.0, + "step": 7838, + "token_acc": 0.8935185185185185, + "train_speed(iter/s)": 0.404504 + }, + { + "epoch": 2.50848, + "grad_norm": 0.7347733744770879, + "learning_rate": 3.58076578887214e-07, + "loss": 0.24915015697479248, + "memory(GiB)": 77.0, + "step": 7839, + "token_acc": 0.9085342518178339, + "train_speed(iter/s)": 0.404473 + }, + { + "epoch": 2.5088, + "grad_norm": 0.7849332104794964, + "learning_rate": 3.5762192926056566e-07, + "loss": 0.2889407277107239, + "memory(GiB)": 77.0, + "step": 7840, + "token_acc": 0.9426310583580614, + "train_speed(iter/s)": 0.404437 + }, + { + "epoch": 2.5091200000000002, + "grad_norm": 0.844945118892546, + "learning_rate": 3.5716754621570425e-07, + "loss": 0.23778752982616425, + "memory(GiB)": 77.0, + "step": 7841, + "token_acc": 0.94100054377379, + "train_speed(iter/s)": 0.404408 + }, + { + "epoch": 2.50944, + "grad_norm": 0.7317862677685589, + "learning_rate": 3.56713429809169e-07, + "loss": 0.3321603834629059, + "memory(GiB)": 77.0, + "step": 7842, + "token_acc": 0.9298565840938723, + "train_speed(iter/s)": 0.404374 + }, + { + "epoch": 2.50976, + "grad_norm": 0.6575617063931775, + "learning_rate": 3.562595800974669e-07, + "loss": 0.343472421169281, + "memory(GiB)": 77.0, + "step": 7843, + "token_acc": 0.8944033790918691, + "train_speed(iter/s)": 0.404343 + }, + { + "epoch": 2.51008, + "grad_norm": 0.7436216299354058, + "learning_rate": 3.55805997137072e-07, + "loss": 0.33195531368255615, + "memory(GiB)": 77.0, + "step": 7844, + "token_acc": 0.8682590494951983, + "train_speed(iter/s)": 0.404311 + }, + { + "epoch": 2.5103999999999997, + "grad_norm": 0.8467845566452554, + "learning_rate": 3.5535268098442433e-07, + "loss": 0.30875372886657715, + "memory(GiB)": 77.0, + "step": 7845, + "token_acc": 0.8272604588394062, + "train_speed(iter/s)": 0.404279 + }, + { + "epoch": 2.51072, + "grad_norm": 0.7592226825925197, + "learning_rate": 3.548996316959322e-07, + "loss": 0.3419470191001892, + "memory(GiB)": 77.0, + "step": 7846, + "token_acc": 0.8961408961408961, + "train_speed(iter/s)": 0.40424 + }, + { + "epoch": 2.51104, + "grad_norm": 0.8038741254711486, + "learning_rate": 3.544468493279682e-07, + "loss": 0.32632845640182495, + "memory(GiB)": 77.0, + "step": 7847, + "token_acc": 0.914956982131039, + "train_speed(iter/s)": 0.40421 + }, + { + "epoch": 2.51136, + "grad_norm": 0.7757458619513947, + "learning_rate": 3.53994333936874e-07, + "loss": 0.3522000312805176, + "memory(GiB)": 77.0, + "step": 7848, + "token_acc": 0.8639233616372741, + "train_speed(iter/s)": 0.404178 + }, + { + "epoch": 2.51168, + "grad_norm": 0.6880519550271706, + "learning_rate": 3.535420855789576e-07, + "loss": 0.22094404697418213, + "memory(GiB)": 77.0, + "step": 7849, + "token_acc": 0.9155025553662691, + "train_speed(iter/s)": 0.404142 + }, + { + "epoch": 2.512, + "grad_norm": 0.7510313340692907, + "learning_rate": 3.5309010431049284e-07, + "loss": 0.3003043234348297, + "memory(GiB)": 77.0, + "step": 7850, + "token_acc": 0.8876217628890473, + "train_speed(iter/s)": 0.404112 + }, + { + "epoch": 2.51232, + "grad_norm": 0.7572729208322577, + "learning_rate": 3.5263839018772145e-07, + "loss": 0.3062629699707031, + "memory(GiB)": 77.0, + "step": 7851, + "token_acc": 0.9113392440503967, + "train_speed(iter/s)": 0.404077 + }, + { + "epoch": 2.51264, + "grad_norm": 0.6984826887010894, + "learning_rate": 3.521869432668512e-07, + "loss": 0.3325369656085968, + "memory(GiB)": 77.0, + "step": 7852, + "token_acc": 0.8927715654952076, + "train_speed(iter/s)": 0.404046 + }, + { + "epoch": 2.51296, + "grad_norm": 0.8018555843493321, + "learning_rate": 3.517357636040569e-07, + "loss": 0.3354831337928772, + "memory(GiB)": 77.0, + "step": 7853, + "token_acc": 0.8882135306553911, + "train_speed(iter/s)": 0.404015 + }, + { + "epoch": 2.51328, + "grad_norm": 0.7555064845286631, + "learning_rate": 3.512848512554798e-07, + "loss": 0.31954681873321533, + "memory(GiB)": 77.0, + "step": 7854, + "token_acc": 0.9148989898989899, + "train_speed(iter/s)": 0.403984 + }, + { + "epoch": 2.5136, + "grad_norm": 0.7041753755732701, + "learning_rate": 3.508342062772285e-07, + "loss": 0.3161236047744751, + "memory(GiB)": 77.0, + "step": 7855, + "token_acc": 0.8749205003179987, + "train_speed(iter/s)": 0.403953 + }, + { + "epoch": 2.51392, + "grad_norm": 0.7155342490041788, + "learning_rate": 3.5038382872537774e-07, + "loss": 0.259648859500885, + "memory(GiB)": 77.0, + "step": 7856, + "token_acc": 0.8480263157894737, + "train_speed(iter/s)": 0.403923 + }, + { + "epoch": 2.51424, + "grad_norm": 0.7105273779362877, + "learning_rate": 3.499337186559687e-07, + "loss": 0.3119957447052002, + "memory(GiB)": 77.0, + "step": 7857, + "token_acc": 0.8572752548656163, + "train_speed(iter/s)": 0.403886 + }, + { + "epoch": 2.51456, + "grad_norm": 0.7256389290699591, + "learning_rate": 3.4948387612501106e-07, + "loss": 0.29244139790534973, + "memory(GiB)": 77.0, + "step": 7858, + "token_acc": 0.8136704119850188, + "train_speed(iter/s)": 0.403856 + }, + { + "epoch": 2.51488, + "grad_norm": 0.6950403192557821, + "learning_rate": 3.490343011884789e-07, + "loss": 0.3019716739654541, + "memory(GiB)": 77.0, + "step": 7859, + "token_acc": 0.9530744336569579, + "train_speed(iter/s)": 0.403823 + }, + { + "epoch": 2.5152, + "grad_norm": 0.7693301185957478, + "learning_rate": 3.4858499390231505e-07, + "loss": 0.3038254678249359, + "memory(GiB)": 77.0, + "step": 7860, + "token_acc": 0.9061413673232909, + "train_speed(iter/s)": 0.403791 + }, + { + "epoch": 2.51552, + "grad_norm": 0.7045343008031996, + "learning_rate": 3.4813595432242635e-07, + "loss": 0.27418118715286255, + "memory(GiB)": 77.0, + "step": 7861, + "token_acc": 0.9234436343241728, + "train_speed(iter/s)": 0.403759 + }, + { + "epoch": 2.51584, + "grad_norm": 0.7334203022346776, + "learning_rate": 3.47687182504689e-07, + "loss": 0.304535448551178, + "memory(GiB)": 77.0, + "step": 7862, + "token_acc": 0.9236698160119343, + "train_speed(iter/s)": 0.403729 + }, + { + "epoch": 2.51616, + "grad_norm": 0.7324161517462499, + "learning_rate": 3.4723867850494477e-07, + "loss": 0.2369145303964615, + "memory(GiB)": 77.0, + "step": 7863, + "token_acc": 0.9366151866151866, + "train_speed(iter/s)": 0.403698 + }, + { + "epoch": 2.51648, + "grad_norm": 0.7286542085279918, + "learning_rate": 3.467904423790022e-07, + "loss": 0.28597623109817505, + "memory(GiB)": 77.0, + "step": 7864, + "token_acc": 0.9147661829071643, + "train_speed(iter/s)": 0.403668 + }, + { + "epoch": 2.5168, + "grad_norm": 0.7668490171025497, + "learning_rate": 3.4634247418263655e-07, + "loss": 0.3120729327201843, + "memory(GiB)": 77.0, + "step": 7865, + "token_acc": 0.8827425009738995, + "train_speed(iter/s)": 0.403638 + }, + { + "epoch": 2.5171200000000002, + "grad_norm": 0.7377006485486493, + "learning_rate": 3.4589477397158955e-07, + "loss": 0.25771939754486084, + "memory(GiB)": 77.0, + "step": 7866, + "token_acc": 0.9632392115077251, + "train_speed(iter/s)": 0.403605 + }, + { + "epoch": 2.51744, + "grad_norm": 0.7469655283469955, + "learning_rate": 3.454473418015697e-07, + "loss": 0.22493207454681396, + "memory(GiB)": 77.0, + "step": 7867, + "token_acc": 0.9466618287373004, + "train_speed(iter/s)": 0.403575 + }, + { + "epoch": 2.51776, + "grad_norm": 0.7305164743811523, + "learning_rate": 3.4500017772825244e-07, + "loss": 0.28406158089637756, + "memory(GiB)": 77.0, + "step": 7868, + "token_acc": 0.9348725017229497, + "train_speed(iter/s)": 0.403539 + }, + { + "epoch": 2.51808, + "grad_norm": 0.7356972951826923, + "learning_rate": 3.4455328180727914e-07, + "loss": 0.27670982480049133, + "memory(GiB)": 77.0, + "step": 7869, + "token_acc": 0.864570489108097, + "train_speed(iter/s)": 0.403507 + }, + { + "epoch": 2.5183999999999997, + "grad_norm": 0.6941439942961013, + "learning_rate": 3.441066540942589e-07, + "loss": 0.3148338198661804, + "memory(GiB)": 77.0, + "step": 7870, + "token_acc": 0.8803687879469305, + "train_speed(iter/s)": 0.403474 + }, + { + "epoch": 2.51872, + "grad_norm": 0.6919841626697546, + "learning_rate": 3.4366029464476593e-07, + "loss": 0.329802930355072, + "memory(GiB)": 77.0, + "step": 7871, + "token_acc": 0.9011827546737886, + "train_speed(iter/s)": 0.403437 + }, + { + "epoch": 2.51904, + "grad_norm": 0.7667808482309165, + "learning_rate": 3.4321420351434267e-07, + "loss": 0.2778036296367645, + "memory(GiB)": 77.0, + "step": 7872, + "token_acc": 0.9457723867285052, + "train_speed(iter/s)": 0.403406 + }, + { + "epoch": 2.51936, + "grad_norm": 0.937890671148804, + "learning_rate": 3.4276838075849724e-07, + "loss": 0.26793360710144043, + "memory(GiB)": 77.0, + "step": 7873, + "token_acc": 0.9502946451447605, + "train_speed(iter/s)": 0.403375 + }, + { + "epoch": 2.51968, + "grad_norm": 0.7734998062084131, + "learning_rate": 3.423228264327041e-07, + "loss": 0.31902825832366943, + "memory(GiB)": 77.0, + "step": 7874, + "token_acc": 0.8630907726931732, + "train_speed(iter/s)": 0.403346 + }, + { + "epoch": 2.52, + "grad_norm": 0.717439589052634, + "learning_rate": 3.418775405924055e-07, + "loss": 0.3305261731147766, + "memory(GiB)": 77.0, + "step": 7875, + "token_acc": 0.8897855660594881, + "train_speed(iter/s)": 0.403315 + }, + { + "epoch": 2.52032, + "grad_norm": 0.7238977376846524, + "learning_rate": 3.414325232930088e-07, + "loss": 0.257093608379364, + "memory(GiB)": 77.0, + "step": 7876, + "token_acc": 0.9539641943734015, + "train_speed(iter/s)": 0.403283 + }, + { + "epoch": 2.52064, + "grad_norm": 0.6580371875042431, + "learning_rate": 3.409877745898893e-07, + "loss": 0.3102168142795563, + "memory(GiB)": 77.0, + "step": 7877, + "token_acc": 0.9360675512665863, + "train_speed(iter/s)": 0.403252 + }, + { + "epoch": 2.52096, + "grad_norm": 0.737367331213871, + "learning_rate": 3.405432945383877e-07, + "loss": 0.28497573733329773, + "memory(GiB)": 77.0, + "step": 7878, + "token_acc": 0.9504950495049505, + "train_speed(iter/s)": 0.403208 + }, + { + "epoch": 2.52128, + "grad_norm": 0.6584172053479951, + "learning_rate": 3.4009908319381225e-07, + "loss": 0.28823181986808777, + "memory(GiB)": 77.0, + "step": 7879, + "token_acc": 0.8983475212819229, + "train_speed(iter/s)": 0.403179 + }, + { + "epoch": 2.5216, + "grad_norm": 0.7066620526213264, + "learning_rate": 3.3965514061143736e-07, + "loss": 0.27750810980796814, + "memory(GiB)": 77.0, + "step": 7880, + "token_acc": 0.9030779305828421, + "train_speed(iter/s)": 0.403144 + }, + { + "epoch": 2.52192, + "grad_norm": 0.7585835433464589, + "learning_rate": 3.392114668465038e-07, + "loss": 0.2921960949897766, + "memory(GiB)": 77.0, + "step": 7881, + "token_acc": 0.8855732073851439, + "train_speed(iter/s)": 0.403107 + }, + { + "epoch": 2.52224, + "grad_norm": 0.6568031438343226, + "learning_rate": 3.3876806195421884e-07, + "loss": 0.28701967000961304, + "memory(GiB)": 77.0, + "step": 7882, + "token_acc": 0.9666666666666667, + "train_speed(iter/s)": 0.403076 + }, + { + "epoch": 2.52256, + "grad_norm": 1.468321522560255, + "learning_rate": 3.3832492598975726e-07, + "loss": 0.3274395167827606, + "memory(GiB)": 77.0, + "step": 7883, + "token_acc": 0.9250989259468626, + "train_speed(iter/s)": 0.403039 + }, + { + "epoch": 2.52288, + "grad_norm": 0.7791650618794121, + "learning_rate": 3.378820590082588e-07, + "loss": 0.3640115261077881, + "memory(GiB)": 77.0, + "step": 7884, + "token_acc": 0.8550241008527995, + "train_speed(iter/s)": 0.403002 + }, + { + "epoch": 2.5232, + "grad_norm": 0.6765473092610719, + "learning_rate": 3.37439461064831e-07, + "loss": 0.2891201078891754, + "memory(GiB)": 77.0, + "step": 7885, + "token_acc": 0.9372056514913658, + "train_speed(iter/s)": 0.402963 + }, + { + "epoch": 2.52352, + "grad_norm": 0.8329172945557957, + "learning_rate": 3.3699713221454766e-07, + "loss": 0.3012772798538208, + "memory(GiB)": 77.0, + "step": 7886, + "token_acc": 0.870539419087137, + "train_speed(iter/s)": 0.402934 + }, + { + "epoch": 2.52384, + "grad_norm": 0.7773673031461154, + "learning_rate": 3.3655507251244864e-07, + "loss": 0.34905263781547546, + "memory(GiB)": 77.0, + "step": 7887, + "token_acc": 0.9690066225165563, + "train_speed(iter/s)": 0.402904 + }, + { + "epoch": 2.52416, + "grad_norm": 0.7358447041689069, + "learning_rate": 3.361132820135404e-07, + "loss": 0.3418402075767517, + "memory(GiB)": 77.0, + "step": 7888, + "token_acc": 0.9153623188405797, + "train_speed(iter/s)": 0.402867 + }, + { + "epoch": 2.52448, + "grad_norm": 0.7885665693491654, + "learning_rate": 3.356717607727969e-07, + "loss": 0.24304735660552979, + "memory(GiB)": 77.0, + "step": 7889, + "token_acc": 0.9396419437340153, + "train_speed(iter/s)": 0.402838 + }, + { + "epoch": 2.5248, + "grad_norm": 0.7547741813802025, + "learning_rate": 3.3523050884515683e-07, + "loss": 0.4008939564228058, + "memory(GiB)": 77.0, + "step": 7890, + "token_acc": 0.8575288626873004, + "train_speed(iter/s)": 0.402803 + }, + { + "epoch": 2.5251200000000003, + "grad_norm": 0.7705924542261905, + "learning_rate": 3.347895262855269e-07, + "loss": 0.32082778215408325, + "memory(GiB)": 77.0, + "step": 7891, + "token_acc": 0.9026465028355387, + "train_speed(iter/s)": 0.40277 + }, + { + "epoch": 2.52544, + "grad_norm": 0.7134708715889562, + "learning_rate": 3.3434881314877986e-07, + "loss": 0.2740676701068878, + "memory(GiB)": 77.0, + "step": 7892, + "token_acc": 0.9460500963391136, + "train_speed(iter/s)": 0.40274 + }, + { + "epoch": 2.52576, + "grad_norm": 0.6937401036400678, + "learning_rate": 3.3390836948975436e-07, + "loss": 0.2374538779258728, + "memory(GiB)": 77.0, + "step": 7893, + "token_acc": 0.9674932975871313, + "train_speed(iter/s)": 0.402709 + }, + { + "epoch": 2.52608, + "grad_norm": 0.7244875019107894, + "learning_rate": 3.3346819536325627e-07, + "loss": 0.3523375391960144, + "memory(GiB)": 77.0, + "step": 7894, + "token_acc": 0.9011850152905199, + "train_speed(iter/s)": 0.40268 + }, + { + "epoch": 2.5263999999999998, + "grad_norm": 0.7424744255595206, + "learning_rate": 3.3302829082405755e-07, + "loss": 0.26099032163619995, + "memory(GiB)": 77.0, + "step": 7895, + "token_acc": 0.9431186075722232, + "train_speed(iter/s)": 0.402642 + }, + { + "epoch": 2.52672, + "grad_norm": 0.7427678115005539, + "learning_rate": 3.3258865592689675e-07, + "loss": 0.2782902121543884, + "memory(GiB)": 77.0, + "step": 7896, + "token_acc": 0.8823229750382068, + "train_speed(iter/s)": 0.402611 + }, + { + "epoch": 2.52704, + "grad_norm": 1.1601998025490077, + "learning_rate": 3.3214929072647897e-07, + "loss": 0.28422918915748596, + "memory(GiB)": 77.0, + "step": 7897, + "token_acc": 0.9282051282051282, + "train_speed(iter/s)": 0.402575 + }, + { + "epoch": 2.52736, + "grad_norm": 0.7501707857676276, + "learning_rate": 3.317101952774751e-07, + "loss": 0.2522730231285095, + "memory(GiB)": 77.0, + "step": 7898, + "token_acc": 0.8982993197278911, + "train_speed(iter/s)": 0.402545 + }, + { + "epoch": 2.52768, + "grad_norm": 0.7285134279390335, + "learning_rate": 3.312713696345232e-07, + "loss": 0.3348669111728668, + "memory(GiB)": 77.0, + "step": 7899, + "token_acc": 0.9145024803034725, + "train_speed(iter/s)": 0.402513 + }, + { + "epoch": 2.528, + "grad_norm": 0.7522327500675405, + "learning_rate": 3.3083281385222763e-07, + "loss": 0.3631001114845276, + "memory(GiB)": 77.0, + "step": 7900, + "token_acc": 0.842687074829932, + "train_speed(iter/s)": 0.402485 + }, + { + "epoch": 2.52832, + "grad_norm": 0.7311853128037749, + "learning_rate": 3.303945279851589e-07, + "loss": 0.27697116136550903, + "memory(GiB)": 77.0, + "step": 7901, + "token_acc": 0.8818265967294046, + "train_speed(iter/s)": 0.402455 + }, + { + "epoch": 2.52864, + "grad_norm": 0.7203712137656614, + "learning_rate": 3.2995651208785396e-07, + "loss": 0.31830063462257385, + "memory(GiB)": 77.0, + "step": 7902, + "token_acc": 0.9048404840484049, + "train_speed(iter/s)": 0.402419 + }, + { + "epoch": 2.52896, + "grad_norm": 0.7479874854823105, + "learning_rate": 3.2951876621481663e-07, + "loss": 0.3508049547672272, + "memory(GiB)": 77.0, + "step": 7903, + "token_acc": 0.9158653846153846, + "train_speed(iter/s)": 0.402388 + }, + { + "epoch": 2.52928, + "grad_norm": 0.7592428077548631, + "learning_rate": 3.2908129042051676e-07, + "loss": 0.2847839593887329, + "memory(GiB)": 77.0, + "step": 7904, + "token_acc": 0.9362532523850824, + "train_speed(iter/s)": 0.402355 + }, + { + "epoch": 2.5296, + "grad_norm": 0.763772324912493, + "learning_rate": 3.2864408475939024e-07, + "loss": 0.29199960827827454, + "memory(GiB)": 77.0, + "step": 7905, + "token_acc": 0.9285203716940672, + "train_speed(iter/s)": 0.402325 + }, + { + "epoch": 2.5299199999999997, + "grad_norm": 1.0376636422625494, + "learning_rate": 3.282071492858402e-07, + "loss": 0.41062086820602417, + "memory(GiB)": 77.0, + "step": 7906, + "token_acc": 0.8855799373040752, + "train_speed(iter/s)": 0.402292 + }, + { + "epoch": 2.53024, + "grad_norm": 0.6906261171309943, + "learning_rate": 3.2777048405423535e-07, + "loss": 0.2511630356311798, + "memory(GiB)": 77.0, + "step": 7907, + "token_acc": 0.9607308426506681, + "train_speed(iter/s)": 0.40226 + }, + { + "epoch": 2.53056, + "grad_norm": 0.7469954875724636, + "learning_rate": 3.273340891189111e-07, + "loss": 0.4141421318054199, + "memory(GiB)": 77.0, + "step": 7908, + "token_acc": 0.8835016835016835, + "train_speed(iter/s)": 0.402225 + }, + { + "epoch": 2.53088, + "grad_norm": 0.7221067455029829, + "learning_rate": 3.2689796453416965e-07, + "loss": 0.3017079830169678, + "memory(GiB)": 77.0, + "step": 7909, + "token_acc": 0.9460847240051348, + "train_speed(iter/s)": 0.402192 + }, + { + "epoch": 2.5312, + "grad_norm": 0.7909784080337128, + "learning_rate": 3.2646211035427836e-07, + "loss": 0.2946508228778839, + "memory(GiB)": 77.0, + "step": 7910, + "token_acc": 0.9088714544357273, + "train_speed(iter/s)": 0.402163 + }, + { + "epoch": 2.53152, + "grad_norm": 0.8010869994885739, + "learning_rate": 3.260265266334725e-07, + "loss": 0.2913532257080078, + "memory(GiB)": 77.0, + "step": 7911, + "token_acc": 0.8858709195115874, + "train_speed(iter/s)": 0.402129 + }, + { + "epoch": 2.53184, + "grad_norm": 0.7579750650204199, + "learning_rate": 3.255912134259526e-07, + "loss": 0.3511117696762085, + "memory(GiB)": 77.0, + "step": 7912, + "token_acc": 0.898359161349134, + "train_speed(iter/s)": 0.402098 + }, + { + "epoch": 2.53216, + "grad_norm": 0.6444669980306801, + "learning_rate": 3.251561707858858e-07, + "loss": 0.25831517577171326, + "memory(GiB)": 77.0, + "step": 7913, + "token_acc": 0.9602905569007264, + "train_speed(iter/s)": 0.402045 + }, + { + "epoch": 2.53248, + "grad_norm": 0.6955812122492181, + "learning_rate": 3.247213987674058e-07, + "loss": 0.31082969903945923, + "memory(GiB)": 77.0, + "step": 7914, + "token_acc": 0.935326842837274, + "train_speed(iter/s)": 0.402011 + }, + { + "epoch": 2.5328, + "grad_norm": 0.7301063528148602, + "learning_rate": 3.2428689742461187e-07, + "loss": 0.28985488414764404, + "memory(GiB)": 77.0, + "step": 7915, + "token_acc": 0.9450704225352112, + "train_speed(iter/s)": 0.401984 + }, + { + "epoch": 2.5331200000000003, + "grad_norm": 0.7067498597341466, + "learning_rate": 3.2385266681157103e-07, + "loss": 0.29300451278686523, + "memory(GiB)": 77.0, + "step": 7916, + "token_acc": 0.87573385518591, + "train_speed(iter/s)": 0.401952 + }, + { + "epoch": 2.53344, + "grad_norm": 0.7227962236564178, + "learning_rate": 3.23418706982315e-07, + "loss": 0.23386763036251068, + "memory(GiB)": 77.0, + "step": 7917, + "token_acc": 0.9153798641136504, + "train_speed(iter/s)": 0.401925 + }, + { + "epoch": 2.53376, + "grad_norm": 0.7563661217949985, + "learning_rate": 3.2298501799084335e-07, + "loss": 0.31390854716300964, + "memory(GiB)": 77.0, + "step": 7918, + "token_acc": 0.9084117321527393, + "train_speed(iter/s)": 0.401886 + }, + { + "epoch": 2.53408, + "grad_norm": 0.713288139794961, + "learning_rate": 3.2255159989112023e-07, + "loss": 0.2732228636741638, + "memory(GiB)": 77.0, + "step": 7919, + "token_acc": 0.930699481865285, + "train_speed(iter/s)": 0.401855 + }, + { + "epoch": 2.5343999999999998, + "grad_norm": 0.8172003668593231, + "learning_rate": 3.2211845273707687e-07, + "loss": 0.3251434564590454, + "memory(GiB)": 77.0, + "step": 7920, + "token_acc": 0.9128086419753086, + "train_speed(iter/s)": 0.401825 + }, + { + "epoch": 2.53472, + "grad_norm": 0.7017262058765062, + "learning_rate": 3.2168557658261214e-07, + "loss": 0.31451743841171265, + "memory(GiB)": 77.0, + "step": 7921, + "token_acc": 0.8933570317707176, + "train_speed(iter/s)": 0.401783 + }, + { + "epoch": 2.53504, + "grad_norm": 0.6921237840615614, + "learning_rate": 3.21252971481589e-07, + "loss": 0.2023639678955078, + "memory(GiB)": 77.0, + "step": 7922, + "token_acc": 0.9617409617409617, + "train_speed(iter/s)": 0.401753 + }, + { + "epoch": 2.53536, + "grad_norm": 0.7937952527207878, + "learning_rate": 3.208206374878381e-07, + "loss": 0.36005669832229614, + "memory(GiB)": 77.0, + "step": 7923, + "token_acc": 0.8527188490612045, + "train_speed(iter/s)": 0.401715 + }, + { + "epoch": 2.53568, + "grad_norm": 0.7566520148099889, + "learning_rate": 3.203885746551558e-07, + "loss": 0.2904384136199951, + "memory(GiB)": 77.0, + "step": 7924, + "token_acc": 0.8759460348798948, + "train_speed(iter/s)": 0.401686 + }, + { + "epoch": 2.536, + "grad_norm": 0.697367518286642, + "learning_rate": 3.1995678303730486e-07, + "loss": 0.29588472843170166, + "memory(GiB)": 77.0, + "step": 7925, + "token_acc": 0.8578786722624953, + "train_speed(iter/s)": 0.401657 + }, + { + "epoch": 2.53632, + "grad_norm": 0.7353154232115886, + "learning_rate": 3.195252626880141e-07, + "loss": 0.26371824741363525, + "memory(GiB)": 77.0, + "step": 7926, + "token_acc": 0.9440130772374336, + "train_speed(iter/s)": 0.401628 + }, + { + "epoch": 2.5366400000000002, + "grad_norm": 0.9139281496679353, + "learning_rate": 3.190940136609788e-07, + "loss": 0.28055596351623535, + "memory(GiB)": 77.0, + "step": 7927, + "token_acc": 0.9082011474856564, + "train_speed(iter/s)": 0.401598 + }, + { + "epoch": 2.53696, + "grad_norm": 0.6885697471774407, + "learning_rate": 3.186630360098603e-07, + "loss": 0.26286762952804565, + "memory(GiB)": 77.0, + "step": 7928, + "token_acc": 0.9540903540903541, + "train_speed(iter/s)": 0.401567 + }, + { + "epoch": 2.53728, + "grad_norm": 0.7968287637928431, + "learning_rate": 3.182323297882864e-07, + "loss": 0.3992048501968384, + "memory(GiB)": 77.0, + "step": 7929, + "token_acc": 0.8291910331384016, + "train_speed(iter/s)": 0.401535 + }, + { + "epoch": 2.5376, + "grad_norm": 0.7697709689303032, + "learning_rate": 3.1780189504985127e-07, + "loss": 0.39603787660598755, + "memory(GiB)": 77.0, + "step": 7930, + "token_acc": 0.8442728442728443, + "train_speed(iter/s)": 0.401498 + }, + { + "epoch": 2.5379199999999997, + "grad_norm": 0.6526548708857042, + "learning_rate": 3.1737173184811484e-07, + "loss": 0.2143481820821762, + "memory(GiB)": 77.0, + "step": 7931, + "token_acc": 0.9371859296482412, + "train_speed(iter/s)": 0.401467 + }, + { + "epoch": 2.53824, + "grad_norm": 0.6814063840600193, + "learning_rate": 3.169418402366037e-07, + "loss": 0.21589480340480804, + "memory(GiB)": 77.0, + "step": 7932, + "token_acc": 0.9665653495440729, + "train_speed(iter/s)": 0.401439 + }, + { + "epoch": 2.53856, + "grad_norm": 0.7903818723699099, + "learning_rate": 3.165122202688098e-07, + "loss": 0.3471445143222809, + "memory(GiB)": 77.0, + "step": 7933, + "token_acc": 0.8153179190751445, + "train_speed(iter/s)": 0.401407 + }, + { + "epoch": 2.53888, + "grad_norm": 0.7090139631511625, + "learning_rate": 3.160828719981923e-07, + "loss": 0.32884353399276733, + "memory(GiB)": 77.0, + "step": 7934, + "token_acc": 0.9208255159474672, + "train_speed(iter/s)": 0.401378 + }, + { + "epoch": 2.5392, + "grad_norm": 0.6902311827587275, + "learning_rate": 3.1565379547817604e-07, + "loss": 0.34290170669555664, + "memory(GiB)": 77.0, + "step": 7935, + "token_acc": 0.9031657355679702, + "train_speed(iter/s)": 0.401338 + }, + { + "epoch": 2.53952, + "grad_norm": 0.8641636827052583, + "learning_rate": 3.1522499076215213e-07, + "loss": 0.378255158662796, + "memory(GiB)": 77.0, + "step": 7936, + "token_acc": 0.8440212825539065, + "train_speed(iter/s)": 0.401305 + }, + { + "epoch": 2.53984, + "grad_norm": 0.8763370851876191, + "learning_rate": 3.1479645790347737e-07, + "loss": 0.3235682249069214, + "memory(GiB)": 77.0, + "step": 7937, + "token_acc": 0.8981132075471698, + "train_speed(iter/s)": 0.401274 + }, + { + "epoch": 2.54016, + "grad_norm": 0.78224758423811, + "learning_rate": 3.1436819695547624e-07, + "loss": 0.35101455450057983, + "memory(GiB)": 77.0, + "step": 7938, + "token_acc": 0.8876380628717078, + "train_speed(iter/s)": 0.401245 + }, + { + "epoch": 2.54048, + "grad_norm": 0.6999860381942069, + "learning_rate": 3.139402079714382e-07, + "loss": 0.30207061767578125, + "memory(GiB)": 77.0, + "step": 7939, + "token_acc": 0.9430485762144054, + "train_speed(iter/s)": 0.401214 + }, + { + "epoch": 2.5408, + "grad_norm": 0.7372801221235956, + "learning_rate": 3.135124910046186e-07, + "loss": 0.36658087372779846, + "memory(GiB)": 77.0, + "step": 7940, + "token_acc": 0.9125287734297929, + "train_speed(iter/s)": 0.401178 + }, + { + "epoch": 2.5411200000000003, + "grad_norm": 0.7407633694459973, + "learning_rate": 3.1308504610823994e-07, + "loss": 0.2838343381881714, + "memory(GiB)": 77.0, + "step": 7941, + "token_acc": 0.9536512170723574, + "train_speed(iter/s)": 0.401144 + }, + { + "epoch": 2.54144, + "grad_norm": 0.7574246534967927, + "learning_rate": 3.126578733354896e-07, + "loss": 0.3270879089832306, + "memory(GiB)": 77.0, + "step": 7942, + "token_acc": 0.9256880733944954, + "train_speed(iter/s)": 0.401104 + }, + { + "epoch": 2.54176, + "grad_norm": 0.7609545700862256, + "learning_rate": 3.122309727395223e-07, + "loss": 0.28332263231277466, + "memory(GiB)": 77.0, + "step": 7943, + "token_acc": 0.9643131235610131, + "train_speed(iter/s)": 0.401075 + }, + { + "epoch": 2.54208, + "grad_norm": 0.800414309868123, + "learning_rate": 3.1180434437345863e-07, + "loss": 0.26872479915618896, + "memory(GiB)": 77.0, + "step": 7944, + "token_acc": 0.8868511035125893, + "train_speed(iter/s)": 0.401047 + }, + { + "epoch": 2.5423999999999998, + "grad_norm": 0.7186870151956478, + "learning_rate": 3.113779882903845e-07, + "loss": 0.2673051059246063, + "memory(GiB)": 77.0, + "step": 7945, + "token_acc": 0.9491315136476427, + "train_speed(iter/s)": 0.401017 + }, + { + "epoch": 2.54272, + "grad_norm": 0.6398204341754903, + "learning_rate": 3.1095190454335346e-07, + "loss": 0.25983357429504395, + "memory(GiB)": 77.0, + "step": 7946, + "token_acc": 0.9104404964711609, + "train_speed(iter/s)": 0.400978 + }, + { + "epoch": 2.54304, + "grad_norm": 0.6777341010778318, + "learning_rate": 3.105260931853829e-07, + "loss": 0.2820758521556854, + "memory(GiB)": 77.0, + "step": 7947, + "token_acc": 0.9225352112676056, + "train_speed(iter/s)": 0.40095 + }, + { + "epoch": 2.54336, + "grad_norm": 0.6435897660769385, + "learning_rate": 3.101005542694585e-07, + "loss": 0.3548731803894043, + "memory(GiB)": 77.0, + "step": 7948, + "token_acc": 0.9141630901287554, + "train_speed(iter/s)": 0.400915 + }, + { + "epoch": 2.54368, + "grad_norm": 0.7307714338595114, + "learning_rate": 3.0967528784853093e-07, + "loss": 0.3183737099170685, + "memory(GiB)": 77.0, + "step": 7949, + "token_acc": 0.9343490304709141, + "train_speed(iter/s)": 0.400884 + }, + { + "epoch": 2.544, + "grad_norm": 0.7143092287664465, + "learning_rate": 3.0925029397551736e-07, + "loss": 0.31918954849243164, + "memory(GiB)": 77.0, + "step": 7950, + "token_acc": 0.9032066508313539, + "train_speed(iter/s)": 0.400854 + }, + { + "epoch": 2.54432, + "grad_norm": 0.6609610456093008, + "learning_rate": 3.088255727033007e-07, + "loss": 0.25933486223220825, + "memory(GiB)": 77.0, + "step": 7951, + "token_acc": 0.9300092908021059, + "train_speed(iter/s)": 0.400822 + }, + { + "epoch": 2.5446400000000002, + "grad_norm": 0.7111180850629141, + "learning_rate": 3.0840112408473065e-07, + "loss": 0.35048386454582214, + "memory(GiB)": 77.0, + "step": 7952, + "token_acc": 0.9237813089146243, + "train_speed(iter/s)": 0.400785 + }, + { + "epoch": 2.54496, + "grad_norm": 0.6485989573871468, + "learning_rate": 3.0797694817262137e-07, + "loss": 0.23531845211982727, + "memory(GiB)": 77.0, + "step": 7953, + "token_acc": 0.9443298969072165, + "train_speed(iter/s)": 0.400753 + }, + { + "epoch": 2.54528, + "grad_norm": 0.7615408837801468, + "learning_rate": 3.075530450197559e-07, + "loss": 0.326619416475296, + "memory(GiB)": 77.0, + "step": 7954, + "token_acc": 0.8318444561219128, + "train_speed(iter/s)": 0.400721 + }, + { + "epoch": 2.5456, + "grad_norm": 0.7484170461360344, + "learning_rate": 3.071294146788803e-07, + "loss": 0.3588491678237915, + "memory(GiB)": 77.0, + "step": 7955, + "token_acc": 0.9024563060935286, + "train_speed(iter/s)": 0.400688 + }, + { + "epoch": 2.5459199999999997, + "grad_norm": 0.7760866100901741, + "learning_rate": 3.067060572027086e-07, + "loss": 0.3476162850856781, + "memory(GiB)": 77.0, + "step": 7956, + "token_acc": 0.9503514938488576, + "train_speed(iter/s)": 0.400658 + }, + { + "epoch": 2.54624, + "grad_norm": 0.7664837650668239, + "learning_rate": 3.062829726439201e-07, + "loss": 0.35169389843940735, + "memory(GiB)": 77.0, + "step": 7957, + "token_acc": 0.9319438536792854, + "train_speed(iter/s)": 0.400628 + }, + { + "epoch": 2.54656, + "grad_norm": 0.7645113902469539, + "learning_rate": 3.0586016105516007e-07, + "loss": 0.29427284002304077, + "memory(GiB)": 77.0, + "step": 7958, + "token_acc": 0.8958333333333334, + "train_speed(iter/s)": 0.400597 + }, + { + "epoch": 2.54688, + "grad_norm": 0.7763087611646472, + "learning_rate": 3.054376224890407e-07, + "loss": 0.3008822798728943, + "memory(GiB)": 77.0, + "step": 7959, + "token_acc": 0.9501661129568106, + "train_speed(iter/s)": 0.400568 + }, + { + "epoch": 2.5472, + "grad_norm": 0.7531178145688888, + "learning_rate": 3.050153569981393e-07, + "loss": 0.23727479577064514, + "memory(GiB)": 77.0, + "step": 7960, + "token_acc": 0.8991052144399877, + "train_speed(iter/s)": 0.40054 + }, + { + "epoch": 2.54752, + "grad_norm": 0.7481707010405293, + "learning_rate": 3.0459336463499967e-07, + "loss": 0.3188910484313965, + "memory(GiB)": 77.0, + "step": 7961, + "token_acc": 0.9516420998189811, + "train_speed(iter/s)": 0.400508 + }, + { + "epoch": 2.54784, + "grad_norm": 0.7492247551869317, + "learning_rate": 3.04171645452131e-07, + "loss": 0.2354331910610199, + "memory(GiB)": 77.0, + "step": 7962, + "token_acc": 0.912987431517886, + "train_speed(iter/s)": 0.400479 + }, + { + "epoch": 2.54816, + "grad_norm": 0.6920722017416584, + "learning_rate": 3.0375019950200867e-07, + "loss": 0.32585829496383667, + "memory(GiB)": 77.0, + "step": 7963, + "token_acc": 0.883793410507569, + "train_speed(iter/s)": 0.400448 + }, + { + "epoch": 2.54848, + "grad_norm": 0.6896752151674339, + "learning_rate": 3.0332902683707503e-07, + "loss": 0.3160707354545593, + "memory(GiB)": 77.0, + "step": 7964, + "token_acc": 0.8770851624231782, + "train_speed(iter/s)": 0.400417 + }, + { + "epoch": 2.5488, + "grad_norm": 0.7080918596729228, + "learning_rate": 3.029081275097376e-07, + "loss": 0.27071237564086914, + "memory(GiB)": 77.0, + "step": 7965, + "token_acc": 0.8859324956885932, + "train_speed(iter/s)": 0.400387 + }, + { + "epoch": 2.5491200000000003, + "grad_norm": 0.7537278036270845, + "learning_rate": 3.024875015723697e-07, + "loss": 0.28572878241539, + "memory(GiB)": 77.0, + "step": 7966, + "token_acc": 0.9205340114431023, + "train_speed(iter/s)": 0.400357 + }, + { + "epoch": 2.54944, + "grad_norm": 0.7001224177930867, + "learning_rate": 3.020671490773111e-07, + "loss": 0.33907413482666016, + "memory(GiB)": 77.0, + "step": 7967, + "token_acc": 0.9057667525773195, + "train_speed(iter/s)": 0.400321 + }, + { + "epoch": 2.54976, + "grad_norm": 0.7333573551641497, + "learning_rate": 3.0164707007686725e-07, + "loss": 0.28414851427078247, + "memory(GiB)": 77.0, + "step": 7968, + "token_acc": 0.8693867077238902, + "train_speed(iter/s)": 0.400286 + }, + { + "epoch": 2.55008, + "grad_norm": 0.7273571447943139, + "learning_rate": 3.0122726462331e-07, + "loss": 0.2557297945022583, + "memory(GiB)": 77.0, + "step": 7969, + "token_acc": 0.9581881533101045, + "train_speed(iter/s)": 0.400257 + }, + { + "epoch": 2.5504, + "grad_norm": 0.712685074694981, + "learning_rate": 3.0080773276887604e-07, + "loss": 0.29216474294662476, + "memory(GiB)": 77.0, + "step": 7970, + "token_acc": 0.9512437810945273, + "train_speed(iter/s)": 0.400226 + }, + { + "epoch": 2.55072, + "grad_norm": 0.721614130727579, + "learning_rate": 3.003884745657701e-07, + "loss": 0.3264220058917999, + "memory(GiB)": 77.0, + "step": 7971, + "token_acc": 0.8957528957528957, + "train_speed(iter/s)": 0.400196 + }, + { + "epoch": 2.55104, + "grad_norm": 0.7178268717103351, + "learning_rate": 2.9996949006616096e-07, + "loss": 0.2922949492931366, + "memory(GiB)": 77.0, + "step": 7972, + "token_acc": 0.9234210979177422, + "train_speed(iter/s)": 0.400163 + }, + { + "epoch": 2.55136, + "grad_norm": 0.7530563194089607, + "learning_rate": 2.9955077932218357e-07, + "loss": 0.3044029772281647, + "memory(GiB)": 77.0, + "step": 7973, + "token_acc": 0.8564137461721674, + "train_speed(iter/s)": 0.400133 + }, + { + "epoch": 2.55168, + "grad_norm": 0.7851526657181576, + "learning_rate": 2.991323423859399e-07, + "loss": 0.3136865496635437, + "memory(GiB)": 77.0, + "step": 7974, + "token_acc": 0.8274122807017544, + "train_speed(iter/s)": 0.400105 + }, + { + "epoch": 2.552, + "grad_norm": 0.835759554948008, + "learning_rate": 2.987141793094975e-07, + "loss": 0.33591118454933167, + "memory(GiB)": 77.0, + "step": 7975, + "token_acc": 0.8868046571798189, + "train_speed(iter/s)": 0.400075 + }, + { + "epoch": 2.55232, + "grad_norm": 0.6989345868444697, + "learning_rate": 2.982962901448883e-07, + "loss": 0.39762693643569946, + "memory(GiB)": 77.0, + "step": 7976, + "token_acc": 0.9666207845836201, + "train_speed(iter/s)": 0.400043 + }, + { + "epoch": 2.5526400000000002, + "grad_norm": 0.7805727477748203, + "learning_rate": 2.978786749441118e-07, + "loss": 0.23349323868751526, + "memory(GiB)": 77.0, + "step": 7977, + "token_acc": 0.9631728045325779, + "train_speed(iter/s)": 0.400013 + }, + { + "epoch": 2.55296, + "grad_norm": 0.7075319123821885, + "learning_rate": 2.974613337591331e-07, + "loss": 0.3378932476043701, + "memory(GiB)": 77.0, + "step": 7978, + "token_acc": 0.8935737871475743, + "train_speed(iter/s)": 0.399984 + }, + { + "epoch": 2.55328, + "grad_norm": 0.7592846430742101, + "learning_rate": 2.9704426664188324e-07, + "loss": 0.28217852115631104, + "memory(GiB)": 77.0, + "step": 7979, + "token_acc": 0.9282581453634086, + "train_speed(iter/s)": 0.399949 + }, + { + "epoch": 2.5536, + "grad_norm": 0.6479475767773062, + "learning_rate": 2.966274736442587e-07, + "loss": 0.2553636431694031, + "memory(GiB)": 77.0, + "step": 7980, + "token_acc": 0.8735456215554195, + "train_speed(iter/s)": 0.399913 + }, + { + "epoch": 2.5539199999999997, + "grad_norm": 0.7167818803646884, + "learning_rate": 2.962109548181222e-07, + "loss": 0.35802796483039856, + "memory(GiB)": 77.0, + "step": 7981, + "token_acc": 0.8268477409245234, + "train_speed(iter/s)": 0.399879 + }, + { + "epoch": 2.55424, + "grad_norm": 0.7819327148808565, + "learning_rate": 2.957947102153025e-07, + "loss": 0.31765520572662354, + "memory(GiB)": 77.0, + "step": 7982, + "token_acc": 0.9036929761042722, + "train_speed(iter/s)": 0.399846 + }, + { + "epoch": 2.55456, + "grad_norm": 0.7151700826407371, + "learning_rate": 2.953787398875937e-07, + "loss": 0.34184491634368896, + "memory(GiB)": 77.0, + "step": 7983, + "token_acc": 0.9616116751269036, + "train_speed(iter/s)": 0.399814 + }, + { + "epoch": 2.55488, + "grad_norm": 0.6661301562352945, + "learning_rate": 2.9496304388675653e-07, + "loss": 0.24036619067192078, + "memory(GiB)": 77.0, + "step": 7984, + "token_acc": 0.9426615318784767, + "train_speed(iter/s)": 0.399782 + }, + { + "epoch": 2.5552, + "grad_norm": 0.724795929911663, + "learning_rate": 2.945476222645166e-07, + "loss": 0.32398849725723267, + "memory(GiB)": 77.0, + "step": 7985, + "token_acc": 0.876425855513308, + "train_speed(iter/s)": 0.399748 + }, + { + "epoch": 2.55552, + "grad_norm": 0.741860985019808, + "learning_rate": 2.941324750725663e-07, + "loss": 0.34030312299728394, + "memory(GiB)": 77.0, + "step": 7986, + "token_acc": 0.9118190212373037, + "train_speed(iter/s)": 0.399713 + }, + { + "epoch": 2.55584, + "grad_norm": 0.7187782381707977, + "learning_rate": 2.937176023625629e-07, + "loss": 0.27470242977142334, + "memory(GiB)": 77.0, + "step": 7987, + "token_acc": 0.9632243684992571, + "train_speed(iter/s)": 0.399684 + }, + { + "epoch": 2.55616, + "grad_norm": 0.7300530417161928, + "learning_rate": 2.933030041861312e-07, + "loss": 0.3167608976364136, + "memory(GiB)": 77.0, + "step": 7988, + "token_acc": 0.9131882770870338, + "train_speed(iter/s)": 0.399655 + }, + { + "epoch": 2.55648, + "grad_norm": 0.7265345861505247, + "learning_rate": 2.9288868059486055e-07, + "loss": 0.32283204793930054, + "memory(GiB)": 77.0, + "step": 7989, + "token_acc": 0.9120450606585788, + "train_speed(iter/s)": 0.399622 + }, + { + "epoch": 2.5568, + "grad_norm": 0.7365367840340165, + "learning_rate": 2.9247463164030544e-07, + "loss": 0.2990606129169464, + "memory(GiB)": 77.0, + "step": 7990, + "token_acc": 0.9140667761357416, + "train_speed(iter/s)": 0.399584 + }, + { + "epoch": 2.55712, + "grad_norm": 0.7764569314093384, + "learning_rate": 2.920608573739878e-07, + "loss": 0.33270692825317383, + "memory(GiB)": 77.0, + "step": 7991, + "token_acc": 0.890295358649789, + "train_speed(iter/s)": 0.39955 + }, + { + "epoch": 2.55744, + "grad_norm": 0.6813249051148038, + "learning_rate": 2.9164735784739414e-07, + "loss": 0.30071496963500977, + "memory(GiB)": 77.0, + "step": 7992, + "token_acc": 0.8869118228548833, + "train_speed(iter/s)": 0.399518 + }, + { + "epoch": 2.55776, + "grad_norm": 0.7357827054004759, + "learning_rate": 2.912341331119778e-07, + "loss": 0.30350685119628906, + "memory(GiB)": 77.0, + "step": 7993, + "token_acc": 0.8857405703771849, + "train_speed(iter/s)": 0.399485 + }, + { + "epoch": 2.55808, + "grad_norm": 0.7333043482713973, + "learning_rate": 2.908211832191571e-07, + "loss": 0.3101872205734253, + "memory(GiB)": 77.0, + "step": 7994, + "token_acc": 0.8308839657819698, + "train_speed(iter/s)": 0.399454 + }, + { + "epoch": 2.5584, + "grad_norm": 0.7468647751378542, + "learning_rate": 2.9040850822031677e-07, + "loss": 0.37394940853118896, + "memory(GiB)": 77.0, + "step": 7995, + "token_acc": 0.8731813773035888, + "train_speed(iter/s)": 0.399419 + }, + { + "epoch": 2.55872, + "grad_norm": 0.6671608799845995, + "learning_rate": 2.899961081668068e-07, + "loss": 0.2776375114917755, + "memory(GiB)": 77.0, + "step": 7996, + "token_acc": 0.9411933448078026, + "train_speed(iter/s)": 0.399386 + }, + { + "epoch": 2.55904, + "grad_norm": 0.76032437753203, + "learning_rate": 2.895839831099431e-07, + "loss": 0.3303549289703369, + "memory(GiB)": 77.0, + "step": 7997, + "token_acc": 0.940512375162831, + "train_speed(iter/s)": 0.399355 + }, + { + "epoch": 2.55936, + "grad_norm": 0.7649982839960588, + "learning_rate": 2.8917213310100804e-07, + "loss": 0.378534734249115, + "memory(GiB)": 77.0, + "step": 7998, + "token_acc": 0.9114307342922029, + "train_speed(iter/s)": 0.399321 + }, + { + "epoch": 2.55968, + "grad_norm": 0.7591393011373643, + "learning_rate": 2.8876055819124893e-07, + "loss": 0.30931153893470764, + "memory(GiB)": 77.0, + "step": 7999, + "token_acc": 0.9507042253521126, + "train_speed(iter/s)": 0.399284 + }, + { + "epoch": 2.56, + "grad_norm": 0.7348761872627076, + "learning_rate": 2.8834925843187893e-07, + "loss": 0.29914674162864685, + "memory(GiB)": 77.0, + "step": 8000, + "token_acc": 0.8944693572496263, + "train_speed(iter/s)": 0.399254 + }, + { + "epoch": 2.56032, + "grad_norm": 0.751259437058413, + "learning_rate": 2.8793823387407714e-07, + "loss": 0.2463258057832718, + "memory(GiB)": 77.0, + "step": 8001, + "token_acc": 0.9290502793296089, + "train_speed(iter/s)": 0.399221 + }, + { + "epoch": 2.5606400000000002, + "grad_norm": 0.7639272490570662, + "learning_rate": 2.8752748456898905e-07, + "loss": 0.3719363212585449, + "memory(GiB)": 77.0, + "step": 8002, + "token_acc": 0.8028386300524529, + "train_speed(iter/s)": 0.399191 + }, + { + "epoch": 2.56096, + "grad_norm": 0.6821427014863609, + "learning_rate": 2.8711701056772457e-07, + "loss": 0.2691485285758972, + "memory(GiB)": 77.0, + "step": 8003, + "token_acc": 0.9681876022541356, + "train_speed(iter/s)": 0.39916 + }, + { + "epoch": 2.56128, + "grad_norm": 0.7391598105552171, + "learning_rate": 2.8670681192136033e-07, + "loss": 0.23429614305496216, + "memory(GiB)": 77.0, + "step": 8004, + "token_acc": 0.9311780553514573, + "train_speed(iter/s)": 0.399134 + }, + { + "epoch": 2.5616, + "grad_norm": 0.7234874008068193, + "learning_rate": 2.8629688868093837e-07, + "loss": 0.32664358615875244, + "memory(GiB)": 77.0, + "step": 8005, + "token_acc": 0.8746397694524496, + "train_speed(iter/s)": 0.399103 + }, + { + "epoch": 2.5619199999999998, + "grad_norm": 0.7515716932705909, + "learning_rate": 2.858872408974667e-07, + "loss": 0.33857083320617676, + "memory(GiB)": 77.0, + "step": 8006, + "token_acc": 0.9422565837158734, + "train_speed(iter/s)": 0.399072 + }, + { + "epoch": 2.56224, + "grad_norm": 0.8070229502622523, + "learning_rate": 2.8547786862191894e-07, + "loss": 0.29929518699645996, + "memory(GiB)": 77.0, + "step": 8007, + "token_acc": 0.8684715412492006, + "train_speed(iter/s)": 0.399042 + }, + { + "epoch": 2.56256, + "grad_norm": 0.7289469457364457, + "learning_rate": 2.850687719052342e-07, + "loss": 0.2894602417945862, + "memory(GiB)": 77.0, + "step": 8008, + "token_acc": 0.8934217236104028, + "train_speed(iter/s)": 0.39901 + }, + { + "epoch": 2.56288, + "grad_norm": 0.6955444209301482, + "learning_rate": 2.846599507983172e-07, + "loss": 0.26694121956825256, + "memory(GiB)": 77.0, + "step": 8009, + "token_acc": 0.9087365283191928, + "train_speed(iter/s)": 0.398972 + }, + { + "epoch": 2.5632, + "grad_norm": 0.743203300253664, + "learning_rate": 2.84251405352039e-07, + "loss": 0.2990429997444153, + "memory(GiB)": 77.0, + "step": 8010, + "token_acc": 0.9493150684931507, + "train_speed(iter/s)": 0.398939 + }, + { + "epoch": 2.56352, + "grad_norm": 0.6627934082089306, + "learning_rate": 2.838431356172361e-07, + "loss": 0.3262258768081665, + "memory(GiB)": 77.0, + "step": 8011, + "token_acc": 0.8179581795817958, + "train_speed(iter/s)": 0.398907 + }, + { + "epoch": 2.56384, + "grad_norm": 0.7359469299838491, + "learning_rate": 2.8343514164471034e-07, + "loss": 0.300197571516037, + "memory(GiB)": 77.0, + "step": 8012, + "token_acc": 0.9015617605300521, + "train_speed(iter/s)": 0.398872 + }, + { + "epoch": 2.56416, + "grad_norm": 0.6776014069235844, + "learning_rate": 2.8302742348522953e-07, + "loss": 0.2658846378326416, + "memory(GiB)": 77.0, + "step": 8013, + "token_acc": 0.860748959778086, + "train_speed(iter/s)": 0.398838 + }, + { + "epoch": 2.56448, + "grad_norm": 0.7228465775837055, + "learning_rate": 2.826199811895269e-07, + "loss": 0.3040560483932495, + "memory(GiB)": 77.0, + "step": 8014, + "token_acc": 0.8963393773520356, + "train_speed(iter/s)": 0.39881 + }, + { + "epoch": 2.5648, + "grad_norm": 1.8901991757404746, + "learning_rate": 2.8221281480830223e-07, + "loss": 0.3337239623069763, + "memory(GiB)": 77.0, + "step": 8015, + "token_acc": 0.8937795562022554, + "train_speed(iter/s)": 0.398778 + }, + { + "epoch": 2.56512, + "grad_norm": 0.7841782474172974, + "learning_rate": 2.818059243922197e-07, + "loss": 0.2782812714576721, + "memory(GiB)": 77.0, + "step": 8016, + "token_acc": 0.9069221744231521, + "train_speed(iter/s)": 0.398751 + }, + { + "epoch": 2.56544, + "grad_norm": 0.7345085552945863, + "learning_rate": 2.813993099919099e-07, + "loss": 0.264200896024704, + "memory(GiB)": 77.0, + "step": 8017, + "token_acc": 0.9295439074200136, + "train_speed(iter/s)": 0.39872 + }, + { + "epoch": 2.56576, + "grad_norm": 0.7675687490762638, + "learning_rate": 2.80992971657969e-07, + "loss": 0.29284751415252686, + "memory(GiB)": 77.0, + "step": 8018, + "token_acc": 0.9287937743190662, + "train_speed(iter/s)": 0.398686 + }, + { + "epoch": 2.56608, + "grad_norm": 0.6988095391604562, + "learning_rate": 2.8058690944095885e-07, + "loss": 0.27283617854118347, + "memory(GiB)": 77.0, + "step": 8019, + "token_acc": 0.9557404326123128, + "train_speed(iter/s)": 0.398656 + }, + { + "epoch": 2.5664, + "grad_norm": 0.7072719921360711, + "learning_rate": 2.8018112339140673e-07, + "loss": 0.3067803382873535, + "memory(GiB)": 77.0, + "step": 8020, + "token_acc": 0.9062314540059347, + "train_speed(iter/s)": 0.398625 + }, + { + "epoch": 2.56672, + "grad_norm": 0.7292545273248987, + "learning_rate": 2.7977561355980567e-07, + "loss": 0.25420889258384705, + "memory(GiB)": 77.0, + "step": 8021, + "token_acc": 0.9459586466165414, + "train_speed(iter/s)": 0.398592 + }, + { + "epoch": 2.56704, + "grad_norm": 0.69905363320212, + "learning_rate": 2.7937037999661435e-07, + "loss": 0.2824142277240753, + "memory(GiB)": 77.0, + "step": 8022, + "token_acc": 0.8797892720306514, + "train_speed(iter/s)": 0.398562 + }, + { + "epoch": 2.56736, + "grad_norm": 0.7081638318256246, + "learning_rate": 2.7896542275225703e-07, + "loss": 0.30384668707847595, + "memory(GiB)": 77.0, + "step": 8023, + "token_acc": 0.9144219308700834, + "train_speed(iter/s)": 0.398533 + }, + { + "epoch": 2.56768, + "grad_norm": 0.6940253546409221, + "learning_rate": 2.7856074187712386e-07, + "loss": 0.27131980657577515, + "memory(GiB)": 77.0, + "step": 8024, + "token_acc": 0.8753036437246964, + "train_speed(iter/s)": 0.398503 + }, + { + "epoch": 2.568, + "grad_norm": 0.745297107633284, + "learning_rate": 2.781563374215701e-07, + "loss": 0.35206079483032227, + "memory(GiB)": 77.0, + "step": 8025, + "token_acc": 0.8914209115281502, + "train_speed(iter/s)": 0.398468 + }, + { + "epoch": 2.56832, + "grad_norm": 0.7205950262650295, + "learning_rate": 2.777522094359167e-07, + "loss": 0.2986917197704315, + "memory(GiB)": 77.0, + "step": 8026, + "token_acc": 0.9139477977161501, + "train_speed(iter/s)": 0.398435 + }, + { + "epoch": 2.5686400000000003, + "grad_norm": 0.7952553457499231, + "learning_rate": 2.7734835797045084e-07, + "loss": 0.2659420371055603, + "memory(GiB)": 77.0, + "step": 8027, + "token_acc": 0.8696548171045853, + "train_speed(iter/s)": 0.398405 + }, + { + "epoch": 2.56896, + "grad_norm": 0.7336652292044341, + "learning_rate": 2.769447830754246e-07, + "loss": 0.27729350328445435, + "memory(GiB)": 77.0, + "step": 8028, + "token_acc": 0.9268730372364289, + "train_speed(iter/s)": 0.398377 + }, + { + "epoch": 2.56928, + "grad_norm": 0.795137466650159, + "learning_rate": 2.7654148480105557e-07, + "loss": 0.23634429275989532, + "memory(GiB)": 77.0, + "step": 8029, + "token_acc": 0.9179544815959539, + "train_speed(iter/s)": 0.39835 + }, + { + "epoch": 2.5696, + "grad_norm": 0.8060856402175824, + "learning_rate": 2.761384631975278e-07, + "loss": 0.34645289182662964, + "memory(GiB)": 77.0, + "step": 8030, + "token_acc": 0.8778303060462802, + "train_speed(iter/s)": 0.398319 + }, + { + "epoch": 2.5699199999999998, + "grad_norm": 0.7092493913429601, + "learning_rate": 2.7573571831499004e-07, + "loss": 0.26031380891799927, + "memory(GiB)": 77.0, + "step": 8031, + "token_acc": 0.8459086078639745, + "train_speed(iter/s)": 0.398285 + }, + { + "epoch": 2.57024, + "grad_norm": 0.7263004853939385, + "learning_rate": 2.7533325020355716e-07, + "loss": 0.3209135830402374, + "memory(GiB)": 77.0, + "step": 8032, + "token_acc": 0.8686176836861769, + "train_speed(iter/s)": 0.398256 + }, + { + "epoch": 2.57056, + "grad_norm": 0.7395882443286875, + "learning_rate": 2.7493105891330837e-07, + "loss": 0.3202364444732666, + "memory(GiB)": 77.0, + "step": 8033, + "token_acc": 0.9568473609129815, + "train_speed(iter/s)": 0.398228 + }, + { + "epoch": 2.57088, + "grad_norm": 0.7357266311626129, + "learning_rate": 2.7452914449429045e-07, + "loss": 0.2717084586620331, + "memory(GiB)": 77.0, + "step": 8034, + "token_acc": 0.9543672014260249, + "train_speed(iter/s)": 0.398197 + }, + { + "epoch": 2.5712, + "grad_norm": 0.7273284255434854, + "learning_rate": 2.7412750699651466e-07, + "loss": 0.3205256462097168, + "memory(GiB)": 77.0, + "step": 8035, + "token_acc": 0.9399855386840202, + "train_speed(iter/s)": 0.39817 + }, + { + "epoch": 2.57152, + "grad_norm": 0.717585425762535, + "learning_rate": 2.737261464699573e-07, + "loss": 0.3234030604362488, + "memory(GiB)": 77.0, + "step": 8036, + "token_acc": 0.8645994832041344, + "train_speed(iter/s)": 0.398138 + }, + { + "epoch": 2.57184, + "grad_norm": 0.69879215188562, + "learning_rate": 2.7332506296456063e-07, + "loss": 0.34685075283050537, + "memory(GiB)": 77.0, + "step": 8037, + "token_acc": 0.918853591160221, + "train_speed(iter/s)": 0.398107 + }, + { + "epoch": 2.5721600000000002, + "grad_norm": 0.7752441211039168, + "learning_rate": 2.729242565302331e-07, + "loss": 0.3624974489212036, + "memory(GiB)": 77.0, + "step": 8038, + "token_acc": 0.8890170077832228, + "train_speed(iter/s)": 0.398077 + }, + { + "epoch": 2.57248, + "grad_norm": 0.6680387301666572, + "learning_rate": 2.7252372721684793e-07, + "loss": 0.28570181131362915, + "memory(GiB)": 77.0, + "step": 8039, + "token_acc": 0.925725860904794, + "train_speed(iter/s)": 0.398037 + }, + { + "epoch": 2.5728, + "grad_norm": 0.9058647759684059, + "learning_rate": 2.7212347507424406e-07, + "loss": 0.34221935272216797, + "memory(GiB)": 77.0, + "step": 8040, + "token_acc": 0.8878986866791745, + "train_speed(iter/s)": 0.398005 + }, + { + "epoch": 2.57312, + "grad_norm": 0.7538915126220047, + "learning_rate": 2.717235001522256e-07, + "loss": 0.23128840327262878, + "memory(GiB)": 77.0, + "step": 8041, + "token_acc": 0.8878005342831701, + "train_speed(iter/s)": 0.397975 + }, + { + "epoch": 2.5734399999999997, + "grad_norm": 0.6790161377947765, + "learning_rate": 2.713238025005627e-07, + "loss": 0.32692384719848633, + "memory(GiB)": 77.0, + "step": 8042, + "token_acc": 0.9037682964808471, + "train_speed(iter/s)": 0.397947 + }, + { + "epoch": 2.57376, + "grad_norm": 0.7121903815473809, + "learning_rate": 2.709243821689911e-07, + "loss": 0.313313364982605, + "memory(GiB)": 77.0, + "step": 8043, + "token_acc": 0.9322820037105751, + "train_speed(iter/s)": 0.397914 + }, + { + "epoch": 2.57408, + "grad_norm": 0.6974365428545887, + "learning_rate": 2.7052523920721106e-07, + "loss": 0.34164488315582275, + "memory(GiB)": 77.0, + "step": 8044, + "token_acc": 0.9587020648967551, + "train_speed(iter/s)": 0.397882 + }, + { + "epoch": 2.5744, + "grad_norm": 0.7651911765316237, + "learning_rate": 2.7012637366488986e-07, + "loss": 0.30299729108810425, + "memory(GiB)": 77.0, + "step": 8045, + "token_acc": 0.8753726893261777, + "train_speed(iter/s)": 0.397852 + }, + { + "epoch": 2.57472, + "grad_norm": 0.7381497521055509, + "learning_rate": 2.697277855916586e-07, + "loss": 0.30701082944869995, + "memory(GiB)": 77.0, + "step": 8046, + "token_acc": 0.871218487394958, + "train_speed(iter/s)": 0.397821 + }, + { + "epoch": 2.57504, + "grad_norm": 0.6646554471352316, + "learning_rate": 2.693294750371156e-07, + "loss": 0.2551254630088806, + "memory(GiB)": 77.0, + "step": 8047, + "token_acc": 0.9076517150395779, + "train_speed(iter/s)": 0.39779 + }, + { + "epoch": 2.57536, + "grad_norm": 0.6928319903143443, + "learning_rate": 2.6893144205082267e-07, + "loss": 0.2783322334289551, + "memory(GiB)": 77.0, + "step": 8048, + "token_acc": 0.9218248018847719, + "train_speed(iter/s)": 0.397757 + }, + { + "epoch": 2.57568, + "grad_norm": 0.7654547912140609, + "learning_rate": 2.6853368668230824e-07, + "loss": 0.3315243124961853, + "memory(GiB)": 77.0, + "step": 8049, + "token_acc": 0.9241417497231451, + "train_speed(iter/s)": 0.397727 + }, + { + "epoch": 2.576, + "grad_norm": 0.7165239592359567, + "learning_rate": 2.6813620898106604e-07, + "loss": 0.324127197265625, + "memory(GiB)": 77.0, + "step": 8050, + "token_acc": 0.8395997574287447, + "train_speed(iter/s)": 0.397695 + }, + { + "epoch": 2.57632, + "grad_norm": 0.6630957847053318, + "learning_rate": 2.6773900899655617e-07, + "loss": 0.34468987584114075, + "memory(GiB)": 77.0, + "step": 8051, + "token_acc": 0.8980301274623407, + "train_speed(iter/s)": 0.397665 + }, + { + "epoch": 2.5766400000000003, + "grad_norm": 0.740051746689111, + "learning_rate": 2.673420867782026e-07, + "loss": 0.29056763648986816, + "memory(GiB)": 77.0, + "step": 8052, + "token_acc": 0.8922972051806408, + "train_speed(iter/s)": 0.397633 + }, + { + "epoch": 2.57696, + "grad_norm": 0.7259275991733943, + "learning_rate": 2.6694544237539534e-07, + "loss": 0.32549867033958435, + "memory(GiB)": 77.0, + "step": 8053, + "token_acc": 0.9352085967130215, + "train_speed(iter/s)": 0.397605 + }, + { + "epoch": 2.57728, + "grad_norm": 0.798742931955625, + "learning_rate": 2.6654907583749027e-07, + "loss": 0.3139360249042511, + "memory(GiB)": 77.0, + "step": 8054, + "token_acc": 0.8457978075517661, + "train_speed(iter/s)": 0.397573 + }, + { + "epoch": 2.5776, + "grad_norm": 0.7664718410711387, + "learning_rate": 2.66152987213808e-07, + "loss": 0.291207879781723, + "memory(GiB)": 77.0, + "step": 8055, + "token_acc": 0.9121653358384217, + "train_speed(iter/s)": 0.397542 + }, + { + "epoch": 2.5779199999999998, + "grad_norm": 0.8289582966440909, + "learning_rate": 2.657571765536351e-07, + "loss": 0.3114515542984009, + "memory(GiB)": 77.0, + "step": 8056, + "token_acc": 0.8643546070182639, + "train_speed(iter/s)": 0.397502 + }, + { + "epoch": 2.57824, + "grad_norm": 0.7484902202931043, + "learning_rate": 2.653616439062234e-07, + "loss": 0.2425340861082077, + "memory(GiB)": 77.0, + "step": 8057, + "token_acc": 0.8984848484848484, + "train_speed(iter/s)": 0.397474 + }, + { + "epoch": 2.57856, + "grad_norm": 0.7656160304629642, + "learning_rate": 2.649663893207896e-07, + "loss": 0.23004955053329468, + "memory(GiB)": 77.0, + "step": 8058, + "token_acc": 0.9104565537555228, + "train_speed(iter/s)": 0.397446 + }, + { + "epoch": 2.57888, + "grad_norm": 0.6917132153514572, + "learning_rate": 2.645714128465168e-07, + "loss": 0.24352332949638367, + "memory(GiB)": 77.0, + "step": 8059, + "token_acc": 0.8914792316532589, + "train_speed(iter/s)": 0.397413 + }, + { + "epoch": 2.5792, + "grad_norm": 0.8543212727639716, + "learning_rate": 2.64176714532553e-07, + "loss": 0.26232314109802246, + "memory(GiB)": 77.0, + "step": 8060, + "token_acc": 0.8643084409507767, + "train_speed(iter/s)": 0.397385 + }, + { + "epoch": 2.57952, + "grad_norm": 0.688944602440945, + "learning_rate": 2.6378229442801163e-07, + "loss": 0.31121712923049927, + "memory(GiB)": 77.0, + "step": 8061, + "token_acc": 0.8826395039858281, + "train_speed(iter/s)": 0.39735 + }, + { + "epoch": 2.57984, + "grad_norm": 0.7099010503851779, + "learning_rate": 2.6338815258197034e-07, + "loss": 0.28701990842819214, + "memory(GiB)": 77.0, + "step": 8062, + "token_acc": 0.9338397451604998, + "train_speed(iter/s)": 0.397322 + }, + { + "epoch": 2.5801600000000002, + "grad_norm": 0.6811350416062921, + "learning_rate": 2.6299428904347444e-07, + "loss": 0.28617650270462036, + "memory(GiB)": 77.0, + "step": 8063, + "token_acc": 0.897092112136514, + "train_speed(iter/s)": 0.397291 + }, + { + "epoch": 2.58048, + "grad_norm": 0.675887633816322, + "learning_rate": 2.6260070386153303e-07, + "loss": 0.2943607270717621, + "memory(GiB)": 77.0, + "step": 8064, + "token_acc": 0.9448410123296561, + "train_speed(iter/s)": 0.397255 + }, + { + "epoch": 2.5808, + "grad_norm": 0.7251127197923326, + "learning_rate": 2.622073970851208e-07, + "loss": 0.27062493562698364, + "memory(GiB)": 77.0, + "step": 8065, + "token_acc": 0.9288142796430089, + "train_speed(iter/s)": 0.397229 + }, + { + "epoch": 2.58112, + "grad_norm": 0.7331584037451101, + "learning_rate": 2.61814368763178e-07, + "loss": 0.29392534494400024, + "memory(GiB)": 77.0, + "step": 8066, + "token_acc": 0.8113298127700432, + "train_speed(iter/s)": 0.397186 + }, + { + "epoch": 2.5814399999999997, + "grad_norm": 0.7017053417732324, + "learning_rate": 2.6142161894461004e-07, + "loss": 0.2833748757839203, + "memory(GiB)": 77.0, + "step": 8067, + "token_acc": 0.9045904590459046, + "train_speed(iter/s)": 0.397153 + }, + { + "epoch": 2.58176, + "grad_norm": 0.7118625147747889, + "learning_rate": 2.6102914767828835e-07, + "loss": 0.2862091660499573, + "memory(GiB)": 77.0, + "step": 8068, + "token_acc": 0.9475285171102662, + "train_speed(iter/s)": 0.397117 + }, + { + "epoch": 2.58208, + "grad_norm": 0.7328702514944766, + "learning_rate": 2.6063695501304913e-07, + "loss": 0.3078591823577881, + "memory(GiB)": 77.0, + "step": 8069, + "token_acc": 0.9491817398794143, + "train_speed(iter/s)": 0.397085 + }, + { + "epoch": 2.5824, + "grad_norm": 0.7791181842511982, + "learning_rate": 2.602450409976934e-07, + "loss": 0.33830726146698, + "memory(GiB)": 77.0, + "step": 8070, + "token_acc": 0.8685567010309279, + "train_speed(iter/s)": 0.397057 + }, + { + "epoch": 2.58272, + "grad_norm": 0.7835091041904487, + "learning_rate": 2.598534056809887e-07, + "loss": 0.29914218187332153, + "memory(GiB)": 77.0, + "step": 8071, + "token_acc": 0.8953541858325667, + "train_speed(iter/s)": 0.397022 + }, + { + "epoch": 2.58304, + "grad_norm": 0.8218086248345522, + "learning_rate": 2.594620491116667e-07, + "loss": 0.29072248935699463, + "memory(GiB)": 77.0, + "step": 8072, + "token_acc": 0.905616224648986, + "train_speed(iter/s)": 0.396987 + }, + { + "epoch": 2.58336, + "grad_norm": 0.6890665873360319, + "learning_rate": 2.5907097133842543e-07, + "loss": 0.3368615508079529, + "memory(GiB)": 77.0, + "step": 8073, + "token_acc": 0.9393095768374164, + "train_speed(iter/s)": 0.396951 + }, + { + "epoch": 2.58368, + "grad_norm": 0.8134010491124684, + "learning_rate": 2.5868017240992723e-07, + "loss": 0.29967257380485535, + "memory(GiB)": 77.0, + "step": 8074, + "token_acc": 0.9655172413793104, + "train_speed(iter/s)": 0.396922 + }, + { + "epoch": 2.584, + "grad_norm": 0.759819711806619, + "learning_rate": 2.5828965237480046e-07, + "loss": 0.308521032333374, + "memory(GiB)": 77.0, + "step": 8075, + "token_acc": 0.9265027322404371, + "train_speed(iter/s)": 0.396888 + }, + { + "epoch": 2.58432, + "grad_norm": 0.7525748982341334, + "learning_rate": 2.5789941128163907e-07, + "loss": 0.28895851969718933, + "memory(GiB)": 77.0, + "step": 8076, + "token_acc": 0.8611615245009074, + "train_speed(iter/s)": 0.396855 + }, + { + "epoch": 2.5846400000000003, + "grad_norm": 0.7598219212453411, + "learning_rate": 2.5750944917900087e-07, + "loss": 0.34947469830513, + "memory(GiB)": 77.0, + "step": 8077, + "token_acc": 0.8668353265869365, + "train_speed(iter/s)": 0.396823 + }, + { + "epoch": 2.58496, + "grad_norm": 0.7383850734446824, + "learning_rate": 2.5711976611541037e-07, + "loss": 0.3227618932723999, + "memory(GiB)": 77.0, + "step": 8078, + "token_acc": 0.9348914858096828, + "train_speed(iter/s)": 0.396793 + }, + { + "epoch": 2.58528, + "grad_norm": 0.7646440007621816, + "learning_rate": 2.5673036213935686e-07, + "loss": 0.3569636940956116, + "memory(GiB)": 77.0, + "step": 8079, + "token_acc": 0.9426481909160893, + "train_speed(iter/s)": 0.396763 + }, + { + "epoch": 2.5856, + "grad_norm": 0.7243860570932952, + "learning_rate": 2.563412372992949e-07, + "loss": 0.3094874322414398, + "memory(GiB)": 77.0, + "step": 8080, + "token_acc": 0.9528012279355333, + "train_speed(iter/s)": 0.396732 + }, + { + "epoch": 2.5859199999999998, + "grad_norm": 0.6616240263205299, + "learning_rate": 2.5595239164364435e-07, + "loss": 0.22526976466178894, + "memory(GiB)": 77.0, + "step": 8081, + "token_acc": 0.94679186228482, + "train_speed(iter/s)": 0.396698 + }, + { + "epoch": 2.58624, + "grad_norm": 0.7156252093063474, + "learning_rate": 2.555638252207901e-07, + "loss": 0.3132299780845642, + "memory(GiB)": 77.0, + "step": 8082, + "token_acc": 0.8713202497769849, + "train_speed(iter/s)": 0.396665 + }, + { + "epoch": 2.58656, + "grad_norm": 0.6860536735616366, + "learning_rate": 2.551755380790827e-07, + "loss": 0.2662315368652344, + "memory(GiB)": 77.0, + "step": 8083, + "token_acc": 0.8874706954936181, + "train_speed(iter/s)": 0.396636 + }, + { + "epoch": 2.58688, + "grad_norm": 0.7217871582180044, + "learning_rate": 2.5478753026683704e-07, + "loss": 0.3006991446018219, + "memory(GiB)": 77.0, + "step": 8084, + "token_acc": 0.8668730650154799, + "train_speed(iter/s)": 0.396599 + }, + { + "epoch": 2.5872, + "grad_norm": 0.778944239627371, + "learning_rate": 2.543998018323354e-07, + "loss": 0.33067384362220764, + "memory(GiB)": 77.0, + "step": 8085, + "token_acc": 0.9158878504672897, + "train_speed(iter/s)": 0.396569 + }, + { + "epoch": 2.58752, + "grad_norm": 0.7987330261660264, + "learning_rate": 2.540123528238228e-07, + "loss": 0.30488213896751404, + "memory(GiB)": 77.0, + "step": 8086, + "token_acc": 0.9097909790979097, + "train_speed(iter/s)": 0.396536 + }, + { + "epoch": 2.58784, + "grad_norm": 0.7040981085175342, + "learning_rate": 2.536251832895112e-07, + "loss": 0.38719871640205383, + "memory(GiB)": 77.0, + "step": 8087, + "token_acc": 0.8669628244096329, + "train_speed(iter/s)": 0.396505 + }, + { + "epoch": 2.5881600000000002, + "grad_norm": 0.7314095368005108, + "learning_rate": 2.5323829327757647e-07, + "loss": 0.3358456790447235, + "memory(GiB)": 77.0, + "step": 8088, + "token_acc": 0.797742902486334, + "train_speed(iter/s)": 0.396476 + }, + { + "epoch": 2.58848, + "grad_norm": 0.7643648204869508, + "learning_rate": 2.5285168283616075e-07, + "loss": 0.3640691339969635, + "memory(GiB)": 77.0, + "step": 8089, + "token_acc": 0.8436132983377078, + "train_speed(iter/s)": 0.396448 + }, + { + "epoch": 2.5888, + "grad_norm": 0.7212515873557321, + "learning_rate": 2.524653520133713e-07, + "loss": 0.2841723561286926, + "memory(GiB)": 77.0, + "step": 8090, + "token_acc": 0.8713310580204778, + "train_speed(iter/s)": 0.396415 + }, + { + "epoch": 2.58912, + "grad_norm": 0.7910696327297942, + "learning_rate": 2.520793008572794e-07, + "loss": 0.2870582938194275, + "memory(GiB)": 77.0, + "step": 8091, + "token_acc": 0.9414017617770969, + "train_speed(iter/s)": 0.396386 + }, + { + "epoch": 2.5894399999999997, + "grad_norm": 0.7206299330981526, + "learning_rate": 2.5169352941592313e-07, + "loss": 0.34604090452194214, + "memory(GiB)": 77.0, + "step": 8092, + "token_acc": 0.9041147602869007, + "train_speed(iter/s)": 0.396357 + }, + { + "epoch": 2.58976, + "grad_norm": 0.6997274665775058, + "learning_rate": 2.513080377373045e-07, + "loss": 0.2715184688568115, + "memory(GiB)": 77.0, + "step": 8093, + "token_acc": 0.9335443037974683, + "train_speed(iter/s)": 0.396326 + }, + { + "epoch": 2.59008, + "grad_norm": 0.8078712486883022, + "learning_rate": 2.5092282586939187e-07, + "loss": 0.3058478832244873, + "memory(GiB)": 77.0, + "step": 8094, + "token_acc": 0.8952574919988362, + "train_speed(iter/s)": 0.396297 + }, + { + "epoch": 2.5904, + "grad_norm": 0.689131911947727, + "learning_rate": 2.5053789386011775e-07, + "loss": 0.2497454136610031, + "memory(GiB)": 77.0, + "step": 8095, + "token_acc": 0.9553571428571429, + "train_speed(iter/s)": 0.396266 + }, + { + "epoch": 2.59072, + "grad_norm": 0.6701536948403273, + "learning_rate": 2.5015324175738044e-07, + "loss": 0.28664976358413696, + "memory(GiB)": 77.0, + "step": 8096, + "token_acc": 0.8698614047451256, + "train_speed(iter/s)": 0.396229 + }, + { + "epoch": 2.59104, + "grad_norm": 0.7004297117497984, + "learning_rate": 2.4976886960904334e-07, + "loss": 0.2549686133861542, + "memory(GiB)": 77.0, + "step": 8097, + "token_acc": 0.9077402348430386, + "train_speed(iter/s)": 0.396199 + }, + { + "epoch": 2.59136, + "grad_norm": 0.7488126997834388, + "learning_rate": 2.493847774629346e-07, + "loss": 0.3149053752422333, + "memory(GiB)": 77.0, + "step": 8098, + "token_acc": 0.8377801494130203, + "train_speed(iter/s)": 0.396169 + }, + { + "epoch": 2.59168, + "grad_norm": 0.9152230356037333, + "learning_rate": 2.4900096536684793e-07, + "loss": 0.3354090452194214, + "memory(GiB)": 77.0, + "step": 8099, + "token_acc": 0.9286349488661627, + "train_speed(iter/s)": 0.396119 + }, + { + "epoch": 2.592, + "grad_norm": 0.7148458077107388, + "learning_rate": 2.486174333685423e-07, + "loss": 0.35837656259536743, + "memory(GiB)": 77.0, + "step": 8100, + "token_acc": 0.8451504313065432, + "train_speed(iter/s)": 0.39609 + }, + { + "epoch": 2.59232, + "grad_norm": 0.9386756127054944, + "learning_rate": 2.4823418151574075e-07, + "loss": 0.3243387043476105, + "memory(GiB)": 77.0, + "step": 8101, + "token_acc": 0.8658940397350994, + "train_speed(iter/s)": 0.396061 + }, + { + "epoch": 2.59264, + "grad_norm": 0.7524922224511745, + "learning_rate": 2.4785120985613366e-07, + "loss": 0.3347482681274414, + "memory(GiB)": 77.0, + "step": 8102, + "token_acc": 0.8982128982128982, + "train_speed(iter/s)": 0.396033 + }, + { + "epoch": 2.59296, + "grad_norm": 0.7423609175054435, + "learning_rate": 2.474685184373743e-07, + "loss": 0.2892594337463379, + "memory(GiB)": 77.0, + "step": 8103, + "token_acc": 0.8941543700340522, + "train_speed(iter/s)": 0.396 + }, + { + "epoch": 2.59328, + "grad_norm": 0.7770145008730124, + "learning_rate": 2.4708610730708294e-07, + "loss": 0.39435476064682007, + "memory(GiB)": 77.0, + "step": 8104, + "token_acc": 0.9027692307692308, + "train_speed(iter/s)": 0.395971 + }, + { + "epoch": 2.5936, + "grad_norm": 0.6999844388667499, + "learning_rate": 2.467039765128429e-07, + "loss": 0.28331780433654785, + "memory(GiB)": 77.0, + "step": 8105, + "token_acc": 0.9334800684764001, + "train_speed(iter/s)": 0.395936 + }, + { + "epoch": 2.59392, + "grad_norm": 0.7297384949452662, + "learning_rate": 2.4632212610220393e-07, + "loss": 0.34713393449783325, + "memory(GiB)": 77.0, + "step": 8106, + "token_acc": 0.9251955117307038, + "train_speed(iter/s)": 0.395904 + }, + { + "epoch": 2.59424, + "grad_norm": 0.7085899933459033, + "learning_rate": 2.45940556122681e-07, + "loss": 0.2643698453903198, + "memory(GiB)": 77.0, + "step": 8107, + "token_acc": 0.9645264278112806, + "train_speed(iter/s)": 0.395877 + }, + { + "epoch": 2.59456, + "grad_norm": 0.7790258285451498, + "learning_rate": 2.4555926662175376e-07, + "loss": 0.3173367977142334, + "memory(GiB)": 77.0, + "step": 8108, + "token_acc": 0.9060838747784997, + "train_speed(iter/s)": 0.395841 + }, + { + "epoch": 2.59488, + "grad_norm": 0.7091792739287875, + "learning_rate": 2.4517825764686726e-07, + "loss": 0.3398495316505432, + "memory(GiB)": 77.0, + "step": 8109, + "token_acc": 0.9251152073732719, + "train_speed(iter/s)": 0.39581 + }, + { + "epoch": 2.5952, + "grad_norm": 0.7827055904058658, + "learning_rate": 2.4479752924543103e-07, + "loss": 0.3572588264942169, + "memory(GiB)": 77.0, + "step": 8110, + "token_acc": 0.8354883081155433, + "train_speed(iter/s)": 0.39578 + }, + { + "epoch": 2.59552, + "grad_norm": 0.8572328486492938, + "learning_rate": 2.444170814648203e-07, + "loss": 0.29112643003463745, + "memory(GiB)": 77.0, + "step": 8111, + "token_acc": 0.9569331158238172, + "train_speed(iter/s)": 0.395749 + }, + { + "epoch": 2.59584, + "grad_norm": 0.7324645641454174, + "learning_rate": 2.440369143523755e-07, + "loss": 0.3457079827785492, + "memory(GiB)": 77.0, + "step": 8112, + "token_acc": 0.9095744680851063, + "train_speed(iter/s)": 0.39572 + }, + { + "epoch": 2.5961600000000002, + "grad_norm": 0.7478127218324409, + "learning_rate": 2.436570279554015e-07, + "loss": 0.26620858907699585, + "memory(GiB)": 77.0, + "step": 8113, + "token_acc": 0.8850653819683414, + "train_speed(iter/s)": 0.395692 + }, + { + "epoch": 2.59648, + "grad_norm": 0.7642820522970704, + "learning_rate": 2.432774223211687e-07, + "loss": 0.2405390441417694, + "memory(GiB)": 77.0, + "step": 8114, + "token_acc": 0.909202958093673, + "train_speed(iter/s)": 0.395665 + }, + { + "epoch": 2.5968, + "grad_norm": 0.7938579794741379, + "learning_rate": 2.4289809749691253e-07, + "loss": 0.3767911195755005, + "memory(GiB)": 77.0, + "step": 8115, + "token_acc": 0.8933367768595041, + "train_speed(iter/s)": 0.395635 + }, + { + "epoch": 2.59712, + "grad_norm": 0.7635368458834764, + "learning_rate": 2.42519053529833e-07, + "loss": 0.3517727851867676, + "memory(GiB)": 77.0, + "step": 8116, + "token_acc": 0.9226152874289324, + "train_speed(iter/s)": 0.395599 + }, + { + "epoch": 2.5974399999999997, + "grad_norm": 0.7312776213757097, + "learning_rate": 2.4214029046709554e-07, + "loss": 0.36334753036499023, + "memory(GiB)": 77.0, + "step": 8117, + "token_acc": 0.9537777777777777, + "train_speed(iter/s)": 0.395572 + }, + { + "epoch": 2.59776, + "grad_norm": 0.7151330103690048, + "learning_rate": 2.4176180835583124e-07, + "loss": 0.330707848072052, + "memory(GiB)": 77.0, + "step": 8118, + "token_acc": 0.9438934542363275, + "train_speed(iter/s)": 0.39554 + }, + { + "epoch": 2.59808, + "grad_norm": 0.6574783006108627, + "learning_rate": 2.413836072431361e-07, + "loss": 0.29850757122039795, + "memory(GiB)": 77.0, + "step": 8119, + "token_acc": 0.9309971879731775, + "train_speed(iter/s)": 0.395506 + }, + { + "epoch": 2.5984, + "grad_norm": 0.7214179944410463, + "learning_rate": 2.4100568717606907e-07, + "loss": 0.3081623613834381, + "memory(GiB)": 77.0, + "step": 8120, + "token_acc": 0.8835963923337091, + "train_speed(iter/s)": 0.395474 + }, + { + "epoch": 2.59872, + "grad_norm": 0.771321978676822, + "learning_rate": 2.406280482016568e-07, + "loss": 0.36343973875045776, + "memory(GiB)": 77.0, + "step": 8121, + "token_acc": 0.8866653838753127, + "train_speed(iter/s)": 0.39544 + }, + { + "epoch": 2.59904, + "grad_norm": 0.7634389067773389, + "learning_rate": 2.402506903668894e-07, + "loss": 0.29214945435523987, + "memory(GiB)": 77.0, + "step": 8122, + "token_acc": 0.898406374501992, + "train_speed(iter/s)": 0.395409 + }, + { + "epoch": 2.59936, + "grad_norm": 0.7609988935955765, + "learning_rate": 2.3987361371872325e-07, + "loss": 0.31079405546188354, + "memory(GiB)": 77.0, + "step": 8123, + "token_acc": 0.8647588276021232, + "train_speed(iter/s)": 0.395381 + }, + { + "epoch": 2.59968, + "grad_norm": 0.7346917652653636, + "learning_rate": 2.394968183040783e-07, + "loss": 0.3068259656429291, + "memory(GiB)": 77.0, + "step": 8124, + "token_acc": 0.8472045743329097, + "train_speed(iter/s)": 0.395353 + }, + { + "epoch": 2.6, + "grad_norm": 0.6892921580335258, + "learning_rate": 2.3912030416984035e-07, + "loss": 0.3144369125366211, + "memory(GiB)": 77.0, + "step": 8125, + "token_acc": 0.8774313882227551, + "train_speed(iter/s)": 0.395324 + }, + { + "epoch": 2.60032, + "grad_norm": 0.7164921631235768, + "learning_rate": 2.3874407136286026e-07, + "loss": 0.3048197031021118, + "memory(GiB)": 77.0, + "step": 8126, + "token_acc": 0.9424028268551237, + "train_speed(iter/s)": 0.395294 + }, + { + "epoch": 2.60064, + "grad_norm": 0.7398737418286214, + "learning_rate": 2.383681199299534e-07, + "loss": 0.30327990651130676, + "memory(GiB)": 77.0, + "step": 8127, + "token_acc": 0.9382334774552192, + "train_speed(iter/s)": 0.395265 + }, + { + "epoch": 2.60096, + "grad_norm": 0.7240028170629342, + "learning_rate": 2.3799244991790067e-07, + "loss": 0.3332672119140625, + "memory(GiB)": 77.0, + "step": 8128, + "token_acc": 0.906902935731288, + "train_speed(iter/s)": 0.395233 + }, + { + "epoch": 2.60128, + "grad_norm": 0.7308878751677989, + "learning_rate": 2.376170613734477e-07, + "loss": 0.2645277976989746, + "memory(GiB)": 77.0, + "step": 8129, + "token_acc": 0.8865400053518866, + "train_speed(iter/s)": 0.395207 + }, + { + "epoch": 2.6016, + "grad_norm": 0.673668358376012, + "learning_rate": 2.3724195434330467e-07, + "loss": 0.247841015458107, + "memory(GiB)": 77.0, + "step": 8130, + "token_acc": 0.9194289862085652, + "train_speed(iter/s)": 0.395178 + }, + { + "epoch": 2.60192, + "grad_norm": 0.6770310865769745, + "learning_rate": 2.368671288741478e-07, + "loss": 0.29879578948020935, + "memory(GiB)": 77.0, + "step": 8131, + "token_acc": 0.882090065314541, + "train_speed(iter/s)": 0.395148 + }, + { + "epoch": 2.60224, + "grad_norm": 0.7407657611852236, + "learning_rate": 2.3649258501261706e-07, + "loss": 0.40991806983947754, + "memory(GiB)": 77.0, + "step": 8132, + "token_acc": 0.9270177447052089, + "train_speed(iter/s)": 0.395119 + }, + { + "epoch": 2.60256, + "grad_norm": 0.7946445180124421, + "learning_rate": 2.3611832280531822e-07, + "loss": 0.3051074147224426, + "memory(GiB)": 77.0, + "step": 8133, + "token_acc": 0.8784933171324423, + "train_speed(iter/s)": 0.395088 + }, + { + "epoch": 2.60288, + "grad_norm": 0.7406691427469457, + "learning_rate": 2.357443422988215e-07, + "loss": 0.26167982816696167, + "memory(GiB)": 77.0, + "step": 8134, + "token_acc": 0.9497319034852547, + "train_speed(iter/s)": 0.395059 + }, + { + "epoch": 2.6032, + "grad_norm": 0.737371687948812, + "learning_rate": 2.3537064353966242e-07, + "loss": 0.29410985112190247, + "memory(GiB)": 77.0, + "step": 8135, + "token_acc": 0.967939651107968, + "train_speed(iter/s)": 0.395032 + }, + { + "epoch": 2.60352, + "grad_norm": 0.7391447623353693, + "learning_rate": 2.3499722657434138e-07, + "loss": 0.27937573194503784, + "memory(GiB)": 77.0, + "step": 8136, + "token_acc": 0.9390862944162437, + "train_speed(iter/s)": 0.395005 + }, + { + "epoch": 2.60384, + "grad_norm": 0.7596686158351654, + "learning_rate": 2.3462409144932364e-07, + "loss": 0.2651224136352539, + "memory(GiB)": 77.0, + "step": 8137, + "token_acc": 0.884311377245509, + "train_speed(iter/s)": 0.394976 + }, + { + "epoch": 2.6041600000000003, + "grad_norm": 0.6638688356415346, + "learning_rate": 2.3425123821103901e-07, + "loss": 0.2413093000650406, + "memory(GiB)": 77.0, + "step": 8138, + "token_acc": 0.9145855953673543, + "train_speed(iter/s)": 0.394949 + }, + { + "epoch": 2.60448, + "grad_norm": 0.7386720239715099, + "learning_rate": 2.338786669058832e-07, + "loss": 0.27905046939849854, + "memory(GiB)": 77.0, + "step": 8139, + "token_acc": 0.9194302247941242, + "train_speed(iter/s)": 0.394921 + }, + { + "epoch": 2.6048, + "grad_norm": 0.7260833273704496, + "learning_rate": 2.33506377580216e-07, + "loss": 0.2834111452102661, + "memory(GiB)": 77.0, + "step": 8140, + "token_acc": 0.9251058287202866, + "train_speed(iter/s)": 0.394891 + }, + { + "epoch": 2.60512, + "grad_norm": 0.8352893772503834, + "learning_rate": 2.3313437028036208e-07, + "loss": 0.2934821546077728, + "memory(GiB)": 77.0, + "step": 8141, + "token_acc": 0.9092578986039677, + "train_speed(iter/s)": 0.394861 + }, + { + "epoch": 2.6054399999999998, + "grad_norm": 0.7005316608797874, + "learning_rate": 2.3276264505261188e-07, + "loss": 0.2814090847969055, + "memory(GiB)": 77.0, + "step": 8142, + "token_acc": 0.9428861101723556, + "train_speed(iter/s)": 0.394834 + }, + { + "epoch": 2.60576, + "grad_norm": 0.8360003230982604, + "learning_rate": 2.3239120194321951e-07, + "loss": 0.3787994682788849, + "memory(GiB)": 77.0, + "step": 8143, + "token_acc": 0.8711323763955343, + "train_speed(iter/s)": 0.394808 + }, + { + "epoch": 2.60608, + "grad_norm": 0.738639735936817, + "learning_rate": 2.3202004099840498e-07, + "loss": 0.30480265617370605, + "memory(GiB)": 77.0, + "step": 8144, + "token_acc": 0.9600893997445722, + "train_speed(iter/s)": 0.394776 + }, + { + "epoch": 2.6064, + "grad_norm": 0.6488263434368521, + "learning_rate": 2.3164916226435296e-07, + "loss": 0.27564042806625366, + "memory(GiB)": 77.0, + "step": 8145, + "token_acc": 0.9332518337408313, + "train_speed(iter/s)": 0.394746 + }, + { + "epoch": 2.60672, + "grad_norm": 0.7418751816771559, + "learning_rate": 2.312785657872124e-07, + "loss": 0.3403778076171875, + "memory(GiB)": 77.0, + "step": 8146, + "token_acc": 0.8902802623732856, + "train_speed(iter/s)": 0.394711 + }, + { + "epoch": 2.60704, + "grad_norm": 0.7120524825274652, + "learning_rate": 2.309082516130981e-07, + "loss": 0.28861600160598755, + "memory(GiB)": 77.0, + "step": 8147, + "token_acc": 0.8821641448876663, + "train_speed(iter/s)": 0.394679 + }, + { + "epoch": 2.60736, + "grad_norm": 0.7548303404489913, + "learning_rate": 2.3053821978808898e-07, + "loss": 0.346574068069458, + "memory(GiB)": 77.0, + "step": 8148, + "token_acc": 0.8811188811188811, + "train_speed(iter/s)": 0.394652 + }, + { + "epoch": 2.60768, + "grad_norm": 0.7611409019938795, + "learning_rate": 2.301684703582291e-07, + "loss": 0.35428833961486816, + "memory(GiB)": 77.0, + "step": 8149, + "token_acc": 0.8763866877971473, + "train_speed(iter/s)": 0.394625 + }, + { + "epoch": 2.608, + "grad_norm": 0.7303456521718735, + "learning_rate": 2.2979900336952743e-07, + "loss": 0.3126709461212158, + "memory(GiB)": 77.0, + "step": 8150, + "token_acc": 0.8947586492645535, + "train_speed(iter/s)": 0.394596 + }, + { + "epoch": 2.60832, + "grad_norm": 0.7070210191368567, + "learning_rate": 2.294298188679578e-07, + "loss": 0.3092508912086487, + "memory(GiB)": 77.0, + "step": 8151, + "token_acc": 0.9167537443399513, + "train_speed(iter/s)": 0.394569 + }, + { + "epoch": 2.60864, + "grad_norm": 0.8102278620077003, + "learning_rate": 2.290609168994584e-07, + "loss": 0.3591754734516144, + "memory(GiB)": 77.0, + "step": 8152, + "token_acc": 0.9250112258643916, + "train_speed(iter/s)": 0.394542 + }, + { + "epoch": 2.6089599999999997, + "grad_norm": 0.7483547188849632, + "learning_rate": 2.2869229750993317e-07, + "loss": 0.35844850540161133, + "memory(GiB)": 77.0, + "step": 8153, + "token_acc": 0.8717277486910995, + "train_speed(iter/s)": 0.394515 + }, + { + "epoch": 2.60928, + "grad_norm": 0.6703273413278904, + "learning_rate": 2.283239607452503e-07, + "loss": 0.29247725009918213, + "memory(GiB)": 77.0, + "step": 8154, + "token_acc": 0.8899317406143344, + "train_speed(iter/s)": 0.394484 + }, + { + "epoch": 2.6096, + "grad_norm": 0.7758828741962313, + "learning_rate": 2.2795590665124267e-07, + "loss": 0.27963802218437195, + "memory(GiB)": 77.0, + "step": 8155, + "token_acc": 0.8989808153477218, + "train_speed(iter/s)": 0.394458 + }, + { + "epoch": 2.60992, + "grad_norm": 0.6833504692049963, + "learning_rate": 2.275881352737086e-07, + "loss": 0.26573073863983154, + "memory(GiB)": 77.0, + "step": 8156, + "token_acc": 0.9242610837438424, + "train_speed(iter/s)": 0.394426 + }, + { + "epoch": 2.61024, + "grad_norm": 0.7608577974895978, + "learning_rate": 2.2722064665841036e-07, + "loss": 0.4079968333244324, + "memory(GiB)": 77.0, + "step": 8157, + "token_acc": 0.881036217303823, + "train_speed(iter/s)": 0.394397 + }, + { + "epoch": 2.61056, + "grad_norm": 0.8254913446052793, + "learning_rate": 2.2685344085107613e-07, + "loss": 0.371115505695343, + "memory(GiB)": 77.0, + "step": 8158, + "token_acc": 0.9386232621227535, + "train_speed(iter/s)": 0.394368 + }, + { + "epoch": 2.61088, + "grad_norm": 0.6548003310665464, + "learning_rate": 2.2648651789739766e-07, + "loss": 0.2647952735424042, + "memory(GiB)": 77.0, + "step": 8159, + "token_acc": 0.9281129653401797, + "train_speed(iter/s)": 0.394337 + }, + { + "epoch": 2.6112, + "grad_norm": 0.7731223020543683, + "learning_rate": 2.2611987784303264e-07, + "loss": 0.3831683099269867, + "memory(GiB)": 77.0, + "step": 8160, + "token_acc": 0.8592726451321436, + "train_speed(iter/s)": 0.394306 + }, + { + "epoch": 2.61152, + "grad_norm": 0.6401863783443867, + "learning_rate": 2.2575352073360285e-07, + "loss": 0.24151061475276947, + "memory(GiB)": 77.0, + "step": 8161, + "token_acc": 0.9214501510574018, + "train_speed(iter/s)": 0.394274 + }, + { + "epoch": 2.61184, + "grad_norm": 0.6830951270637804, + "learning_rate": 2.2538744661469574e-07, + "loss": 0.2946963608264923, + "memory(GiB)": 77.0, + "step": 8162, + "token_acc": 0.9086308687095868, + "train_speed(iter/s)": 0.394241 + }, + { + "epoch": 2.6121600000000003, + "grad_norm": 0.7076658461824125, + "learning_rate": 2.2502165553186129e-07, + "loss": 0.28596511483192444, + "memory(GiB)": 77.0, + "step": 8163, + "token_acc": 0.9035267349260523, + "train_speed(iter/s)": 0.394215 + }, + { + "epoch": 2.61248, + "grad_norm": 0.7238638941946969, + "learning_rate": 2.246561475306172e-07, + "loss": 0.34104493260383606, + "memory(GiB)": 77.0, + "step": 8164, + "token_acc": 0.9218806509945751, + "train_speed(iter/s)": 0.394188 + }, + { + "epoch": 2.6128, + "grad_norm": 0.7901402563966848, + "learning_rate": 2.242909226564441e-07, + "loss": 0.3226346969604492, + "memory(GiB)": 77.0, + "step": 8165, + "token_acc": 0.9197416974169742, + "train_speed(iter/s)": 0.394159 + }, + { + "epoch": 2.61312, + "grad_norm": 0.8953008682924438, + "learning_rate": 2.2392598095478835e-07, + "loss": 0.35136014223098755, + "memory(GiB)": 77.0, + "step": 8166, + "token_acc": 0.8985107556536128, + "train_speed(iter/s)": 0.394127 + }, + { + "epoch": 2.6134399999999998, + "grad_norm": 0.7556789949757349, + "learning_rate": 2.2356132247106004e-07, + "loss": 0.2358391284942627, + "memory(GiB)": 77.0, + "step": 8167, + "token_acc": 0.9417857142857143, + "train_speed(iter/s)": 0.394099 + }, + { + "epoch": 2.61376, + "grad_norm": 0.6898386349080166, + "learning_rate": 2.2319694725063486e-07, + "loss": 0.27639421820640564, + "memory(GiB)": 77.0, + "step": 8168, + "token_acc": 0.8571970415323346, + "train_speed(iter/s)": 0.394073 + }, + { + "epoch": 2.61408, + "grad_norm": 0.6788451262865125, + "learning_rate": 2.2283285533885312e-07, + "loss": 0.31982266902923584, + "memory(GiB)": 77.0, + "step": 8169, + "token_acc": 0.8586876382403539, + "train_speed(iter/s)": 0.394043 + }, + { + "epoch": 2.6144, + "grad_norm": 0.776996904657247, + "learning_rate": 2.224690467810195e-07, + "loss": 0.3083280920982361, + "memory(GiB)": 77.0, + "step": 8170, + "token_acc": 0.9232954545454546, + "train_speed(iter/s)": 0.394014 + }, + { + "epoch": 2.61472, + "grad_norm": 0.6801151218867472, + "learning_rate": 2.2210552162240357e-07, + "loss": 0.33555760979652405, + "memory(GiB)": 77.0, + "step": 8171, + "token_acc": 0.8755112474437627, + "train_speed(iter/s)": 0.393984 + }, + { + "epoch": 2.61504, + "grad_norm": 0.7624379657476552, + "learning_rate": 2.2174227990824027e-07, + "loss": 0.2584116458892822, + "memory(GiB)": 77.0, + "step": 8172, + "token_acc": 0.9161904761904762, + "train_speed(iter/s)": 0.393957 + }, + { + "epoch": 2.61536, + "grad_norm": 0.6706841222308008, + "learning_rate": 2.2137932168372816e-07, + "loss": 0.2364514321088791, + "memory(GiB)": 77.0, + "step": 8173, + "token_acc": 0.8788480635551142, + "train_speed(iter/s)": 0.393925 + }, + { + "epoch": 2.6156800000000002, + "grad_norm": 0.6892937932562435, + "learning_rate": 2.2101664699403113e-07, + "loss": 0.3437042832374573, + "memory(GiB)": 77.0, + "step": 8174, + "token_acc": 0.8907746597210553, + "train_speed(iter/s)": 0.39389 + }, + { + "epoch": 2.616, + "grad_norm": 0.7237047254981608, + "learning_rate": 2.2065425588427835e-07, + "loss": 0.3241693377494812, + "memory(GiB)": 77.0, + "step": 8175, + "token_acc": 0.9026874115983027, + "train_speed(iter/s)": 0.393861 + }, + { + "epoch": 2.61632, + "grad_norm": 0.7377852461913905, + "learning_rate": 2.2029214839956292e-07, + "loss": 0.41260913014411926, + "memory(GiB)": 77.0, + "step": 8176, + "token_acc": 0.9390801096558026, + "train_speed(iter/s)": 0.393832 + }, + { + "epoch": 2.61664, + "grad_norm": 0.7358723351011921, + "learning_rate": 2.1993032458494213e-07, + "loss": 0.30639350414276123, + "memory(GiB)": 77.0, + "step": 8177, + "token_acc": 0.9224137931034483, + "train_speed(iter/s)": 0.393796 + }, + { + "epoch": 2.6169599999999997, + "grad_norm": 0.6985637078284904, + "learning_rate": 2.1956878448543912e-07, + "loss": 0.2771756649017334, + "memory(GiB)": 77.0, + "step": 8178, + "token_acc": 0.9066527342389411, + "train_speed(iter/s)": 0.393767 + }, + { + "epoch": 2.61728, + "grad_norm": 0.7227384718803432, + "learning_rate": 2.1920752814604123e-07, + "loss": 0.32218697667121887, + "memory(GiB)": 77.0, + "step": 8179, + "token_acc": 0.8714839961202716, + "train_speed(iter/s)": 0.393739 + }, + { + "epoch": 2.6176, + "grad_norm": 0.6273146208488948, + "learning_rate": 2.1884655561170033e-07, + "loss": 0.33374452590942383, + "memory(GiB)": 77.0, + "step": 8180, + "token_acc": 0.8648967551622418, + "train_speed(iter/s)": 0.393708 + }, + { + "epoch": 2.61792, + "grad_norm": 0.7680066139454307, + "learning_rate": 2.1848586692733403e-07, + "loss": 0.31791025400161743, + "memory(GiB)": 77.0, + "step": 8181, + "token_acc": 0.9045454545454545, + "train_speed(iter/s)": 0.393679 + }, + { + "epoch": 2.61824, + "grad_norm": 0.7621979881312898, + "learning_rate": 2.1812546213782315e-07, + "loss": 0.3432536721229553, + "memory(GiB)": 77.0, + "step": 8182, + "token_acc": 0.9364161849710982, + "train_speed(iter/s)": 0.393651 + }, + { + "epoch": 2.61856, + "grad_norm": 0.793629970963521, + "learning_rate": 2.1776534128801374e-07, + "loss": 0.3019747734069824, + "memory(GiB)": 77.0, + "step": 8183, + "token_acc": 0.8771420273017717, + "train_speed(iter/s)": 0.393622 + }, + { + "epoch": 2.61888, + "grad_norm": 0.7669188893262131, + "learning_rate": 2.174055044227169e-07, + "loss": 0.327018678188324, + "memory(GiB)": 77.0, + "step": 8184, + "token_acc": 0.9067340067340067, + "train_speed(iter/s)": 0.393595 + }, + { + "epoch": 2.6192, + "grad_norm": 0.817792287813377, + "learning_rate": 2.1704595158670794e-07, + "loss": 0.3728786110877991, + "memory(GiB)": 77.0, + "step": 8185, + "token_acc": 0.9561965811965812, + "train_speed(iter/s)": 0.393562 + }, + { + "epoch": 2.61952, + "grad_norm": 0.7350749810315765, + "learning_rate": 2.166866828247269e-07, + "loss": 0.3008727431297302, + "memory(GiB)": 77.0, + "step": 8186, + "token_acc": 0.8682193396226415, + "train_speed(iter/s)": 0.393526 + }, + { + "epoch": 2.61984, + "grad_norm": 0.8169133925075388, + "learning_rate": 2.1632769818147881e-07, + "loss": 0.25862833857536316, + "memory(GiB)": 77.0, + "step": 8187, + "token_acc": 0.9315295416896742, + "train_speed(iter/s)": 0.393499 + }, + { + "epoch": 2.6201600000000003, + "grad_norm": 0.752296776015592, + "learning_rate": 2.159689977016327e-07, + "loss": 0.3295991122722626, + "memory(GiB)": 77.0, + "step": 8188, + "token_acc": 0.9212442091330245, + "train_speed(iter/s)": 0.393471 + }, + { + "epoch": 2.62048, + "grad_norm": 0.727591805596202, + "learning_rate": 2.1561058142982287e-07, + "loss": 0.3097565770149231, + "memory(GiB)": 77.0, + "step": 8189, + "token_acc": 0.8594812389826241, + "train_speed(iter/s)": 0.393443 + }, + { + "epoch": 2.6208, + "grad_norm": 0.7266573858804597, + "learning_rate": 2.152524494106484e-07, + "loss": 0.358285129070282, + "memory(GiB)": 77.0, + "step": 8190, + "token_acc": 0.879623248334482, + "train_speed(iter/s)": 0.393413 + }, + { + "epoch": 2.62112, + "grad_norm": 0.7550275176158845, + "learning_rate": 2.148946016886716e-07, + "loss": 0.31982356309890747, + "memory(GiB)": 77.0, + "step": 8191, + "token_acc": 0.9539982803095443, + "train_speed(iter/s)": 0.393386 + }, + { + "epoch": 2.6214399999999998, + "grad_norm": 0.7884069205743747, + "learning_rate": 2.145370383084211e-07, + "loss": 0.298009991645813, + "memory(GiB)": 77.0, + "step": 8192, + "token_acc": 0.8677409546974765, + "train_speed(iter/s)": 0.393358 + }, + { + "epoch": 2.62176, + "grad_norm": 0.7297978305391895, + "learning_rate": 2.1417975931438933e-07, + "loss": 0.3514444828033447, + "memory(GiB)": 77.0, + "step": 8193, + "token_acc": 0.9158163265306123, + "train_speed(iter/s)": 0.39333 + }, + { + "epoch": 2.62208, + "grad_norm": 0.6826319893274515, + "learning_rate": 2.1382276475103326e-07, + "loss": 0.2666308283805847, + "memory(GiB)": 77.0, + "step": 8194, + "token_acc": 0.8800959232613909, + "train_speed(iter/s)": 0.393302 + }, + { + "epoch": 2.6224, + "grad_norm": 0.7901060255548938, + "learning_rate": 2.134660546627751e-07, + "loss": 0.39008936285972595, + "memory(GiB)": 77.0, + "step": 8195, + "token_acc": 0.9104868913857678, + "train_speed(iter/s)": 0.393272 + }, + { + "epoch": 2.62272, + "grad_norm": 0.7629735810223153, + "learning_rate": 2.13109629094001e-07, + "loss": 0.34849244356155396, + "memory(GiB)": 77.0, + "step": 8196, + "token_acc": 0.9057387057387057, + "train_speed(iter/s)": 0.393244 + }, + { + "epoch": 2.62304, + "grad_norm": 0.784861800736684, + "learning_rate": 2.1275348808906138e-07, + "loss": 0.33210110664367676, + "memory(GiB)": 77.0, + "step": 8197, + "token_acc": 0.9365190094175095, + "train_speed(iter/s)": 0.393216 + }, + { + "epoch": 2.62336, + "grad_norm": 0.7391480069808825, + "learning_rate": 2.1239763169227297e-07, + "loss": 0.3357047140598297, + "memory(GiB)": 77.0, + "step": 8198, + "token_acc": 0.8417376490630324, + "train_speed(iter/s)": 0.393186 + }, + { + "epoch": 2.6236800000000002, + "grad_norm": 0.6844899904446918, + "learning_rate": 2.1204205994791504e-07, + "loss": 0.31380191445350647, + "memory(GiB)": 77.0, + "step": 8199, + "token_acc": 0.8886916835699797, + "train_speed(iter/s)": 0.393156 + }, + { + "epoch": 2.624, + "grad_norm": 0.6733869585158461, + "learning_rate": 2.116867729002328e-07, + "loss": 0.28164470195770264, + "memory(GiB)": 77.0, + "step": 8200, + "token_acc": 0.954084863837872, + "train_speed(iter/s)": 0.393126 + }, + { + "epoch": 2.62432, + "grad_norm": 0.7746649781734873, + "learning_rate": 2.1133177059343535e-07, + "loss": 0.3186023533344269, + "memory(GiB)": 77.0, + "step": 8201, + "token_acc": 0.9036321031048623, + "train_speed(iter/s)": 0.393101 + }, + { + "epoch": 2.62464, + "grad_norm": 0.7344989552226495, + "learning_rate": 2.1097705307169647e-07, + "loss": 0.35576674342155457, + "memory(GiB)": 77.0, + "step": 8202, + "token_acc": 0.9136896256073164, + "train_speed(iter/s)": 0.393074 + }, + { + "epoch": 2.6249599999999997, + "grad_norm": 0.6839137096579182, + "learning_rate": 2.1062262037915505e-07, + "loss": 0.30193883180618286, + "memory(GiB)": 77.0, + "step": 8203, + "token_acc": 0.8342384887839434, + "train_speed(iter/s)": 0.393046 + }, + { + "epoch": 2.62528, + "grad_norm": 0.6930209229755424, + "learning_rate": 2.1026847255991356e-07, + "loss": 0.30212390422821045, + "memory(GiB)": 77.0, + "step": 8204, + "token_acc": 0.9340563991323211, + "train_speed(iter/s)": 0.393012 + }, + { + "epoch": 2.6256, + "grad_norm": 0.7870980766122732, + "learning_rate": 2.0991460965804012e-07, + "loss": 0.34544116258621216, + "memory(GiB)": 77.0, + "step": 8205, + "token_acc": 0.8940013633265167, + "train_speed(iter/s)": 0.392985 + }, + { + "epoch": 2.62592, + "grad_norm": 0.7844211447035258, + "learning_rate": 2.0956103171756614e-07, + "loss": 0.32880979776382446, + "memory(GiB)": 77.0, + "step": 8206, + "token_acc": 0.9097270818754374, + "train_speed(iter/s)": 0.392955 + }, + { + "epoch": 2.62624, + "grad_norm": 0.7620671750387625, + "learning_rate": 2.092077387824884e-07, + "loss": 0.3496764004230499, + "memory(GiB)": 77.0, + "step": 8207, + "token_acc": 0.9426032660902978, + "train_speed(iter/s)": 0.392922 + }, + { + "epoch": 2.62656, + "grad_norm": 0.6973394840807935, + "learning_rate": 2.088547308967684e-07, + "loss": 0.3392418920993805, + "memory(GiB)": 77.0, + "step": 8208, + "token_acc": 0.8573562359970127, + "train_speed(iter/s)": 0.39289 + }, + { + "epoch": 2.62688, + "grad_norm": 0.7476552220736673, + "learning_rate": 2.085020081043318e-07, + "loss": 0.340779185295105, + "memory(GiB)": 77.0, + "step": 8209, + "token_acc": 0.9513996889580093, + "train_speed(iter/s)": 0.392862 + }, + { + "epoch": 2.6272, + "grad_norm": 0.7645524034456989, + "learning_rate": 2.0814957044906852e-07, + "loss": 0.24685420095920563, + "memory(GiB)": 77.0, + "step": 8210, + "token_acc": 0.9007338740826574, + "train_speed(iter/s)": 0.392833 + }, + { + "epoch": 2.62752, + "grad_norm": 0.7224176374798511, + "learning_rate": 2.0779741797483378e-07, + "loss": 0.2717045545578003, + "memory(GiB)": 77.0, + "step": 8211, + "token_acc": 0.9630340854536726, + "train_speed(iter/s)": 0.392806 + }, + { + "epoch": 2.62784, + "grad_norm": 0.7886295323996604, + "learning_rate": 2.0744555072544663e-07, + "loss": 0.3569974899291992, + "memory(GiB)": 77.0, + "step": 8212, + "token_acc": 0.9027655838454784, + "train_speed(iter/s)": 0.392778 + }, + { + "epoch": 2.6281600000000003, + "grad_norm": 0.6647906727900912, + "learning_rate": 2.0709396874469046e-07, + "loss": 0.29362839460372925, + "memory(GiB)": 77.0, + "step": 8213, + "token_acc": 0.9231372549019607, + "train_speed(iter/s)": 0.392746 + }, + { + "epoch": 2.62848, + "grad_norm": 0.7298766130893032, + "learning_rate": 2.0674267207631382e-07, + "loss": 0.32378584146499634, + "memory(GiB)": 77.0, + "step": 8214, + "token_acc": 0.8610385842761822, + "train_speed(iter/s)": 0.392717 + }, + { + "epoch": 2.6288, + "grad_norm": 0.761317639306641, + "learning_rate": 2.0639166076402983e-07, + "loss": 0.30469608306884766, + "memory(GiB)": 77.0, + "step": 8215, + "token_acc": 0.8919428725410941, + "train_speed(iter/s)": 0.392686 + }, + { + "epoch": 2.62912, + "grad_norm": 0.7661397650420626, + "learning_rate": 2.0604093485151548e-07, + "loss": 0.3471422791481018, + "memory(GiB)": 77.0, + "step": 8216, + "token_acc": 0.8933333333333333, + "train_speed(iter/s)": 0.392657 + }, + { + "epoch": 2.6294399999999998, + "grad_norm": 0.7154537855626724, + "learning_rate": 2.0569049438241256e-07, + "loss": 0.3227437436580658, + "memory(GiB)": 77.0, + "step": 8217, + "token_acc": 0.8975095785440613, + "train_speed(iter/s)": 0.392625 + }, + { + "epoch": 2.62976, + "grad_norm": 0.7185151312605149, + "learning_rate": 2.0534033940032754e-07, + "loss": 0.30491507053375244, + "memory(GiB)": 77.0, + "step": 8218, + "token_acc": 0.8813905930470347, + "train_speed(iter/s)": 0.392592 + }, + { + "epoch": 2.63008, + "grad_norm": 0.7382658128550709, + "learning_rate": 2.0499046994883116e-07, + "loss": 0.33815842866897583, + "memory(GiB)": 77.0, + "step": 8219, + "token_acc": 0.8915853336850436, + "train_speed(iter/s)": 0.392563 + }, + { + "epoch": 2.6304, + "grad_norm": 0.6702281245729447, + "learning_rate": 2.046408860714577e-07, + "loss": 0.3042662739753723, + "memory(GiB)": 77.0, + "step": 8220, + "token_acc": 0.8942905596382137, + "train_speed(iter/s)": 0.392531 + }, + { + "epoch": 2.63072, + "grad_norm": 0.8165073712324618, + "learning_rate": 2.0429158781170773e-07, + "loss": 0.30572009086608887, + "memory(GiB)": 77.0, + "step": 8221, + "token_acc": 0.8853599516031458, + "train_speed(iter/s)": 0.392505 + }, + { + "epoch": 2.63104, + "grad_norm": 0.6641055069129568, + "learning_rate": 2.0394257521304473e-07, + "loss": 0.32746416330337524, + "memory(GiB)": 77.0, + "step": 8222, + "token_acc": 0.8740091781393409, + "train_speed(iter/s)": 0.39247 + }, + { + "epoch": 2.63136, + "grad_norm": 0.7643848976879404, + "learning_rate": 2.035938483188979e-07, + "loss": 0.28198906779289246, + "memory(GiB)": 77.0, + "step": 8223, + "token_acc": 0.9091897770527461, + "train_speed(iter/s)": 0.392442 + }, + { + "epoch": 2.6316800000000002, + "grad_norm": 0.7341179988994193, + "learning_rate": 2.0324540717266e-07, + "loss": 0.32586824893951416, + "memory(GiB)": 77.0, + "step": 8224, + "token_acc": 0.8331629523614803, + "train_speed(iter/s)": 0.392414 + }, + { + "epoch": 2.632, + "grad_norm": 0.793662998655528, + "learning_rate": 2.0289725181768805e-07, + "loss": 0.3594666123390198, + "memory(GiB)": 77.0, + "step": 8225, + "token_acc": 0.8485321673953778, + "train_speed(iter/s)": 0.392383 + }, + { + "epoch": 2.63232, + "grad_norm": 0.7004969140334218, + "learning_rate": 2.025493822973046e-07, + "loss": 0.28706032037734985, + "memory(GiB)": 77.0, + "step": 8226, + "token_acc": 0.8519835841313269, + "train_speed(iter/s)": 0.392356 + }, + { + "epoch": 2.63264, + "grad_norm": 0.7300021244594831, + "learning_rate": 2.0220179865479584e-07, + "loss": 0.2945469319820404, + "memory(GiB)": 77.0, + "step": 8227, + "token_acc": 0.9376854599406528, + "train_speed(iter/s)": 0.392325 + }, + { + "epoch": 2.6329599999999997, + "grad_norm": 0.8443527242976381, + "learning_rate": 2.018545009334122e-07, + "loss": 0.3966277837753296, + "memory(GiB)": 77.0, + "step": 8228, + "token_acc": 0.8804166666666666, + "train_speed(iter/s)": 0.392298 + }, + { + "epoch": 2.63328, + "grad_norm": 0.7729254717257428, + "learning_rate": 2.0150748917636907e-07, + "loss": 0.31448447704315186, + "memory(GiB)": 77.0, + "step": 8229, + "token_acc": 0.9479505530253741, + "train_speed(iter/s)": 0.39227 + }, + { + "epoch": 2.6336, + "grad_norm": 0.735723911472292, + "learning_rate": 2.0116076342684616e-07, + "loss": 0.34708714485168457, + "memory(GiB)": 77.0, + "step": 8230, + "token_acc": 0.9301247771836008, + "train_speed(iter/s)": 0.392236 + }, + { + "epoch": 2.63392, + "grad_norm": 0.7055116016058971, + "learning_rate": 2.0081432372798664e-07, + "loss": 0.25179436802864075, + "memory(GiB)": 77.0, + "step": 8231, + "token_acc": 0.8889721026025089, + "train_speed(iter/s)": 0.392206 + }, + { + "epoch": 2.63424, + "grad_norm": 0.7202937977816357, + "learning_rate": 2.0046817012290027e-07, + "loss": 0.300061970949173, + "memory(GiB)": 77.0, + "step": 8232, + "token_acc": 0.9390654205607477, + "train_speed(iter/s)": 0.392179 + }, + { + "epoch": 2.63456, + "grad_norm": 0.6534579807659554, + "learning_rate": 2.001223026546592e-07, + "loss": 0.26223763823509216, + "memory(GiB)": 77.0, + "step": 8233, + "token_acc": 0.864720194647202, + "train_speed(iter/s)": 0.392153 + }, + { + "epoch": 2.63488, + "grad_norm": 0.7370929338925366, + "learning_rate": 1.99776721366301e-07, + "loss": 0.2809169292449951, + "memory(GiB)": 77.0, + "step": 8234, + "token_acc": 0.9026198714780029, + "train_speed(iter/s)": 0.392126 + }, + { + "epoch": 2.6352, + "grad_norm": 0.6939297658679348, + "learning_rate": 1.9943142630082679e-07, + "loss": 0.24291113018989563, + "memory(GiB)": 77.0, + "step": 8235, + "token_acc": 0.9122020297380222, + "train_speed(iter/s)": 0.3921 + }, + { + "epoch": 2.63552, + "grad_norm": 0.6734203034812397, + "learning_rate": 1.9908641750120222e-07, + "loss": 0.3144662082195282, + "memory(GiB)": 77.0, + "step": 8236, + "token_acc": 0.9272058823529412, + "train_speed(iter/s)": 0.392072 + }, + { + "epoch": 2.63584, + "grad_norm": 0.6993822185864137, + "learning_rate": 1.987416950103585e-07, + "loss": 0.35526013374328613, + "memory(GiB)": 77.0, + "step": 8237, + "token_acc": 0.8353071798667654, + "train_speed(iter/s)": 0.392041 + }, + { + "epoch": 2.63616, + "grad_norm": 0.7038985196349153, + "learning_rate": 1.9839725887118965e-07, + "loss": 0.23145122826099396, + "memory(GiB)": 77.0, + "step": 8238, + "token_acc": 0.892589723728376, + "train_speed(iter/s)": 0.392016 + }, + { + "epoch": 2.63648, + "grad_norm": 0.7113780833746881, + "learning_rate": 1.9805310912655528e-07, + "loss": 0.36607056856155396, + "memory(GiB)": 77.0, + "step": 8239, + "token_acc": 0.9165439371624938, + "train_speed(iter/s)": 0.391989 + }, + { + "epoch": 2.6368, + "grad_norm": 0.6742889658379282, + "learning_rate": 1.9770924581927864e-07, + "loss": 0.29061368107795715, + "memory(GiB)": 77.0, + "step": 8240, + "token_acc": 0.9184508816120907, + "train_speed(iter/s)": 0.391958 + }, + { + "epoch": 2.63712, + "grad_norm": 0.7911605582765464, + "learning_rate": 1.9736566899214743e-07, + "loss": 0.24429886043071747, + "memory(GiB)": 77.0, + "step": 8241, + "token_acc": 0.9510844485463775, + "train_speed(iter/s)": 0.391932 + }, + { + "epoch": 2.63744, + "grad_norm": 0.6931415240850912, + "learning_rate": 1.9702237868791413e-07, + "loss": 0.2658781409263611, + "memory(GiB)": 77.0, + "step": 8242, + "token_acc": 0.9697136563876652, + "train_speed(iter/s)": 0.391904 + }, + { + "epoch": 2.63776, + "grad_norm": 0.696925356655092, + "learning_rate": 1.9667937494929512e-07, + "loss": 0.35757148265838623, + "memory(GiB)": 77.0, + "step": 8243, + "token_acc": 0.8981366459627329, + "train_speed(iter/s)": 0.391873 + }, + { + "epoch": 2.63808, + "grad_norm": 0.7037350569588582, + "learning_rate": 1.9633665781897127e-07, + "loss": 0.32819539308547974, + "memory(GiB)": 77.0, + "step": 8244, + "token_acc": 0.9010840108401084, + "train_speed(iter/s)": 0.391846 + }, + { + "epoch": 2.6384, + "grad_norm": 0.6685216906096337, + "learning_rate": 1.959942273395876e-07, + "loss": 0.3139991760253906, + "memory(GiB)": 77.0, + "step": 8245, + "token_acc": 0.9255828808687321, + "train_speed(iter/s)": 0.391817 + }, + { + "epoch": 2.63872, + "grad_norm": 0.7519295955017384, + "learning_rate": 1.9565208355375398e-07, + "loss": 0.28775548934936523, + "memory(GiB)": 77.0, + "step": 8246, + "token_acc": 0.9093484419263456, + "train_speed(iter/s)": 0.391788 + }, + { + "epoch": 2.63904, + "grad_norm": 0.6517770554707225, + "learning_rate": 1.9531022650404407e-07, + "loss": 0.29942744970321655, + "memory(GiB)": 77.0, + "step": 8247, + "token_acc": 0.8939190198715076, + "train_speed(iter/s)": 0.391757 + }, + { + "epoch": 2.63936, + "grad_norm": 0.7437356512169018, + "learning_rate": 1.949686562329961e-07, + "loss": 0.32383936643600464, + "memory(GiB)": 77.0, + "step": 8248, + "token_acc": 0.8891232298885207, + "train_speed(iter/s)": 0.391726 + }, + { + "epoch": 2.6396800000000002, + "grad_norm": 0.7426034281628519, + "learning_rate": 1.9462737278311244e-07, + "loss": 0.3114856481552124, + "memory(GiB)": 77.0, + "step": 8249, + "token_acc": 0.9037236648701616, + "train_speed(iter/s)": 0.391701 + }, + { + "epoch": 2.64, + "grad_norm": 0.7454651353442644, + "learning_rate": 1.9428637619685997e-07, + "loss": 0.2979574501514435, + "memory(GiB)": 77.0, + "step": 8250, + "token_acc": 0.9397082658022691, + "train_speed(iter/s)": 0.391671 + }, + { + "epoch": 2.64032, + "grad_norm": 0.7523693909151944, + "learning_rate": 1.9394566651667002e-07, + "loss": 0.2894052267074585, + "memory(GiB)": 77.0, + "step": 8251, + "token_acc": 0.8776899429073343, + "train_speed(iter/s)": 0.391642 + }, + { + "epoch": 2.64064, + "grad_norm": 0.7304709977073204, + "learning_rate": 1.9360524378493784e-07, + "loss": 0.32358357310295105, + "memory(GiB)": 77.0, + "step": 8252, + "token_acc": 0.9361921097770154, + "train_speed(iter/s)": 0.391606 + }, + { + "epoch": 2.6409599999999998, + "grad_norm": 0.7582088433261344, + "learning_rate": 1.9326510804402287e-07, + "loss": 0.3250972032546997, + "memory(GiB)": 77.0, + "step": 8253, + "token_acc": 0.9662288930581614, + "train_speed(iter/s)": 0.391578 + }, + { + "epoch": 2.64128, + "grad_norm": 0.7769658159464552, + "learning_rate": 1.9292525933624934e-07, + "loss": 0.25656312704086304, + "memory(GiB)": 77.0, + "step": 8254, + "token_acc": 0.9073475544063779, + "train_speed(iter/s)": 0.391553 + }, + { + "epoch": 2.6416, + "grad_norm": 0.7382987279627675, + "learning_rate": 1.9258569770390562e-07, + "loss": 0.308563768863678, + "memory(GiB)": 77.0, + "step": 8255, + "token_acc": 0.8997685185185185, + "train_speed(iter/s)": 0.391526 + }, + { + "epoch": 2.64192, + "grad_norm": 0.7079772987581202, + "learning_rate": 1.922464231892443e-07, + "loss": 0.3168256878852844, + "memory(GiB)": 77.0, + "step": 8256, + "token_acc": 0.8814363143631436, + "train_speed(iter/s)": 0.391495 + }, + { + "epoch": 2.64224, + "grad_norm": 0.7581424584556501, + "learning_rate": 1.919074358344819e-07, + "loss": 0.23603999614715576, + "memory(GiB)": 77.0, + "step": 8257, + "token_acc": 0.8681902123730378, + "train_speed(iter/s)": 0.391469 + }, + { + "epoch": 2.64256, + "grad_norm": 0.7230745696790086, + "learning_rate": 1.9156873568179967e-07, + "loss": 0.31495746970176697, + "memory(GiB)": 77.0, + "step": 8258, + "token_acc": 0.8666069829901522, + "train_speed(iter/s)": 0.391438 + }, + { + "epoch": 2.64288, + "grad_norm": 0.7611574849764964, + "learning_rate": 1.9123032277334307e-07, + "loss": 0.31593820452690125, + "memory(GiB)": 77.0, + "step": 8259, + "token_acc": 0.8901295221080839, + "train_speed(iter/s)": 0.391412 + }, + { + "epoch": 2.6432, + "grad_norm": 0.6469154448294271, + "learning_rate": 1.9089219715122176e-07, + "loss": 0.3062821924686432, + "memory(GiB)": 77.0, + "step": 8260, + "token_acc": 0.91809161916825, + "train_speed(iter/s)": 0.391379 + }, + { + "epoch": 2.64352, + "grad_norm": 0.7158094480123424, + "learning_rate": 1.905543588575093e-07, + "loss": 0.2865774631500244, + "memory(GiB)": 77.0, + "step": 8261, + "token_acc": 0.9264349232012935, + "train_speed(iter/s)": 0.391349 + }, + { + "epoch": 2.64384, + "grad_norm": 0.9192177353734331, + "learning_rate": 1.9021680793424397e-07, + "loss": 0.305583655834198, + "memory(GiB)": 77.0, + "step": 8262, + "token_acc": 0.8932308349348708, + "train_speed(iter/s)": 0.391324 + }, + { + "epoch": 2.64416, + "grad_norm": 0.7437220977619077, + "learning_rate": 1.8987954442342803e-07, + "loss": 0.30461955070495605, + "memory(GiB)": 77.0, + "step": 8263, + "token_acc": 0.8924137931034483, + "train_speed(iter/s)": 0.391298 + }, + { + "epoch": 2.64448, + "grad_norm": 0.7198071588045876, + "learning_rate": 1.895425683670285e-07, + "loss": 0.2713533639907837, + "memory(GiB)": 77.0, + "step": 8264, + "token_acc": 0.9388095238095238, + "train_speed(iter/s)": 0.391269 + }, + { + "epoch": 2.6448, + "grad_norm": 0.7343051908331217, + "learning_rate": 1.892058798069757e-07, + "loss": 0.26787227392196655, + "memory(GiB)": 77.0, + "step": 8265, + "token_acc": 0.9576336863859468, + "train_speed(iter/s)": 0.391242 + }, + { + "epoch": 2.64512, + "grad_norm": 0.753724428659691, + "learning_rate": 1.8886947878516498e-07, + "loss": 0.261379599571228, + "memory(GiB)": 77.0, + "step": 8266, + "token_acc": 0.8647959183673469, + "train_speed(iter/s)": 0.391218 + }, + { + "epoch": 2.64544, + "grad_norm": 0.7458771037589396, + "learning_rate": 1.885333653434554e-07, + "loss": 0.3750127851963043, + "memory(GiB)": 77.0, + "step": 8267, + "token_acc": 0.9231940818102699, + "train_speed(iter/s)": 0.39119 + }, + { + "epoch": 2.64576, + "grad_norm": 0.6974729587763872, + "learning_rate": 1.8819753952367041e-07, + "loss": 0.3557877540588379, + "memory(GiB)": 77.0, + "step": 8268, + "token_acc": 0.8799475753604193, + "train_speed(iter/s)": 0.39116 + }, + { + "epoch": 2.64608, + "grad_norm": 0.8353872832325449, + "learning_rate": 1.8786200136759803e-07, + "loss": 0.25902172923088074, + "memory(GiB)": 77.0, + "step": 8269, + "token_acc": 0.8925143953934741, + "train_speed(iter/s)": 0.391131 + }, + { + "epoch": 2.6464, + "grad_norm": 0.698044805987148, + "learning_rate": 1.8752675091699006e-07, + "loss": 0.30117619037628174, + "memory(GiB)": 77.0, + "step": 8270, + "token_acc": 0.9245450510430537, + "train_speed(iter/s)": 0.391105 + }, + { + "epoch": 2.64672, + "grad_norm": 0.7281875578477721, + "learning_rate": 1.8719178821356236e-07, + "loss": 0.39360547065734863, + "memory(GiB)": 77.0, + "step": 8271, + "token_acc": 0.8686511240632806, + "train_speed(iter/s)": 0.391072 + }, + { + "epoch": 2.64704, + "grad_norm": 0.6930227644045112, + "learning_rate": 1.8685711329899549e-07, + "loss": 0.29349666833877563, + "memory(GiB)": 77.0, + "step": 8272, + "token_acc": 0.941286989196806, + "train_speed(iter/s)": 0.391044 + }, + { + "epoch": 2.64736, + "grad_norm": 0.7821525870803606, + "learning_rate": 1.8652272621493384e-07, + "loss": 0.28288084268569946, + "memory(GiB)": 77.0, + "step": 8273, + "token_acc": 0.9317194289261328, + "train_speed(iter/s)": 0.391018 + }, + { + "epoch": 2.6476800000000003, + "grad_norm": 0.7945142299583828, + "learning_rate": 1.8618862700298613e-07, + "loss": 0.3581192195415497, + "memory(GiB)": 77.0, + "step": 8274, + "token_acc": 0.9450956199876619, + "train_speed(iter/s)": 0.390992 + }, + { + "epoch": 2.648, + "grad_norm": 0.8138319963103046, + "learning_rate": 1.858548157047252e-07, + "loss": 0.3507939875125885, + "memory(GiB)": 77.0, + "step": 8275, + "token_acc": 0.8911857594028136, + "train_speed(iter/s)": 0.390965 + }, + { + "epoch": 2.64832, + "grad_norm": 0.8025218381865155, + "learning_rate": 1.8552129236168782e-07, + "loss": 0.31508851051330566, + "memory(GiB)": 77.0, + "step": 8276, + "token_acc": 0.9576242909576242, + "train_speed(iter/s)": 0.390938 + }, + { + "epoch": 2.64864, + "grad_norm": 0.7450994923791506, + "learning_rate": 1.851880570153755e-07, + "loss": 0.37152916193008423, + "memory(GiB)": 77.0, + "step": 8277, + "token_acc": 0.9100765306122449, + "train_speed(iter/s)": 0.390909 + }, + { + "epoch": 2.6489599999999998, + "grad_norm": 0.6873490929928984, + "learning_rate": 1.8485510970725345e-07, + "loss": 0.3029119670391083, + "memory(GiB)": 77.0, + "step": 8278, + "token_acc": 0.9592, + "train_speed(iter/s)": 0.390882 + }, + { + "epoch": 2.64928, + "grad_norm": 0.793601320160345, + "learning_rate": 1.845224504787513e-07, + "loss": 0.307328462600708, + "memory(GiB)": 77.0, + "step": 8279, + "token_acc": 0.8975497159090909, + "train_speed(iter/s)": 0.390851 + }, + { + "epoch": 2.6496, + "grad_norm": 0.7535801132747936, + "learning_rate": 1.8419007937126254e-07, + "loss": 0.34868139028549194, + "memory(GiB)": 77.0, + "step": 8280, + "token_acc": 0.8759655705142353, + "train_speed(iter/s)": 0.39082 + }, + { + "epoch": 2.64992, + "grad_norm": 0.7151235106801256, + "learning_rate": 1.8385799642614526e-07, + "loss": 0.401345431804657, + "memory(GiB)": 77.0, + "step": 8281, + "token_acc": 0.9076040172166427, + "train_speed(iter/s)": 0.390789 + }, + { + "epoch": 2.65024, + "grad_norm": 0.7110437059882777, + "learning_rate": 1.8352620168472112e-07, + "loss": 0.3378289043903351, + "memory(GiB)": 77.0, + "step": 8282, + "token_acc": 0.9214493520232743, + "train_speed(iter/s)": 0.390764 + }, + { + "epoch": 2.65056, + "grad_norm": 0.7103890009837603, + "learning_rate": 1.8319469518827655e-07, + "loss": 0.28874000906944275, + "memory(GiB)": 77.0, + "step": 8283, + "token_acc": 0.876026040192471, + "train_speed(iter/s)": 0.390735 + }, + { + "epoch": 2.65088, + "grad_norm": 0.7694716371098046, + "learning_rate": 1.8286347697806155e-07, + "loss": 0.2645527124404907, + "memory(GiB)": 77.0, + "step": 8284, + "token_acc": 0.9470588235294117, + "train_speed(iter/s)": 0.390709 + }, + { + "epoch": 2.6512000000000002, + "grad_norm": 0.7830704256471486, + "learning_rate": 1.8253254709529045e-07, + "loss": 0.29670971632003784, + "memory(GiB)": 77.0, + "step": 8285, + "token_acc": 0.9220603185360895, + "train_speed(iter/s)": 0.390677 + }, + { + "epoch": 2.65152, + "grad_norm": 0.686850840197858, + "learning_rate": 1.8220190558114193e-07, + "loss": 0.3185647130012512, + "memory(GiB)": 77.0, + "step": 8286, + "token_acc": 0.9192018964836033, + "train_speed(iter/s)": 0.390648 + }, + { + "epoch": 2.65184, + "grad_norm": 0.7576406060388018, + "learning_rate": 1.8187155247675836e-07, + "loss": 0.3305199146270752, + "memory(GiB)": 77.0, + "step": 8287, + "token_acc": 0.9512652296157451, + "train_speed(iter/s)": 0.390623 + }, + { + "epoch": 2.65216, + "grad_norm": 0.6476854315793404, + "learning_rate": 1.8154148782324659e-07, + "loss": 0.27415454387664795, + "memory(GiB)": 77.0, + "step": 8288, + "token_acc": 0.9165126826810012, + "train_speed(iter/s)": 0.390591 + }, + { + "epoch": 2.6524799999999997, + "grad_norm": 0.667307157552261, + "learning_rate": 1.812117116616774e-07, + "loss": 0.31214767694473267, + "memory(GiB)": 77.0, + "step": 8289, + "token_acc": 0.8821277453153334, + "train_speed(iter/s)": 0.390563 + }, + { + "epoch": 2.6528, + "grad_norm": 0.7224866599070888, + "learning_rate": 1.808822240330857e-07, + "loss": 0.32941684126853943, + "memory(GiB)": 77.0, + "step": 8290, + "token_acc": 0.8976711362032463, + "train_speed(iter/s)": 0.390536 + }, + { + "epoch": 2.65312, + "grad_norm": 0.7661103226287438, + "learning_rate": 1.8055302497847094e-07, + "loss": 0.3057553470134735, + "memory(GiB)": 77.0, + "step": 8291, + "token_acc": 0.9166666666666666, + "train_speed(iter/s)": 0.39051 + }, + { + "epoch": 2.65344, + "grad_norm": 0.702002120657817, + "learning_rate": 1.8022411453879562e-07, + "loss": 0.2613741457462311, + "memory(GiB)": 77.0, + "step": 8292, + "token_acc": 0.9326506498621504, + "train_speed(iter/s)": 0.39048 + }, + { + "epoch": 2.65376, + "grad_norm": 0.7032925851637407, + "learning_rate": 1.79895492754987e-07, + "loss": 0.31940412521362305, + "memory(GiB)": 77.0, + "step": 8293, + "token_acc": 0.8848246674727932, + "train_speed(iter/s)": 0.390451 + }, + { + "epoch": 2.65408, + "grad_norm": 0.6356278744164835, + "learning_rate": 1.7956715966793604e-07, + "loss": 0.2915850877761841, + "memory(GiB)": 77.0, + "step": 8294, + "token_acc": 0.8822441430332922, + "train_speed(iter/s)": 0.39042 + }, + { + "epoch": 2.6544, + "grad_norm": 0.6705726734819097, + "learning_rate": 1.7923911531849913e-07, + "loss": 0.2545706033706665, + "memory(GiB)": 77.0, + "step": 8295, + "token_acc": 0.9610299234516354, + "train_speed(iter/s)": 0.390393 + }, + { + "epoch": 2.65472, + "grad_norm": 0.6826894442090264, + "learning_rate": 1.7891135974749507e-07, + "loss": 0.32064467668533325, + "memory(GiB)": 77.0, + "step": 8296, + "token_acc": 0.8854130052724077, + "train_speed(iter/s)": 0.390363 + }, + { + "epoch": 2.65504, + "grad_norm": 0.6985509096241064, + "learning_rate": 1.7858389299570733e-07, + "loss": 0.3199300169944763, + "memory(GiB)": 77.0, + "step": 8297, + "token_acc": 0.855223315669947, + "train_speed(iter/s)": 0.390335 + }, + { + "epoch": 2.65536, + "grad_norm": 0.7342683400701684, + "learning_rate": 1.782567151038836e-07, + "loss": 0.3035092055797577, + "memory(GiB)": 77.0, + "step": 8298, + "token_acc": 0.9627118644067797, + "train_speed(iter/s)": 0.390305 + }, + { + "epoch": 2.6556800000000003, + "grad_norm": 0.7529255712495968, + "learning_rate": 1.7792982611273546e-07, + "loss": 0.3524467349052429, + "memory(GiB)": 77.0, + "step": 8299, + "token_acc": 0.9400241109101869, + "train_speed(iter/s)": 0.390272 + }, + { + "epoch": 2.656, + "grad_norm": 0.7425150367403608, + "learning_rate": 1.7760322606293873e-07, + "loss": 0.2991190254688263, + "memory(GiB)": 77.0, + "step": 8300, + "token_acc": 0.9166942148760331, + "train_speed(iter/s)": 0.390246 + }, + { + "epoch": 2.65632, + "grad_norm": 0.651334883270686, + "learning_rate": 1.7727691499513283e-07, + "loss": 0.24677275121212006, + "memory(GiB)": 77.0, + "step": 8301, + "token_acc": 0.9391943248036484, + "train_speed(iter/s)": 0.390212 + }, + { + "epoch": 2.65664, + "grad_norm": 0.6953878939615828, + "learning_rate": 1.7695089294992142e-07, + "loss": 0.26430702209472656, + "memory(GiB)": 77.0, + "step": 8302, + "token_acc": 0.9571619812583668, + "train_speed(iter/s)": 0.390183 + }, + { + "epoch": 2.6569599999999998, + "grad_norm": 0.7219050291302878, + "learning_rate": 1.7662515996787284e-07, + "loss": 0.34057044982910156, + "memory(GiB)": 77.0, + "step": 8303, + "token_acc": 0.8714613618974751, + "train_speed(iter/s)": 0.390153 + }, + { + "epoch": 2.65728, + "grad_norm": 0.7474333799207616, + "learning_rate": 1.762997160895183e-07, + "loss": 0.31459593772888184, + "memory(GiB)": 77.0, + "step": 8304, + "token_acc": 0.8990328820116054, + "train_speed(iter/s)": 0.390123 + }, + { + "epoch": 2.6576, + "grad_norm": 0.763603694812164, + "learning_rate": 1.7597456135535458e-07, + "loss": 0.2992628812789917, + "memory(GiB)": 77.0, + "step": 8305, + "token_acc": 0.9110357422384555, + "train_speed(iter/s)": 0.390095 + }, + { + "epoch": 2.65792, + "grad_norm": 0.6780113101958645, + "learning_rate": 1.756496958058404e-07, + "loss": 0.2819777727127075, + "memory(GiB)": 77.0, + "step": 8306, + "token_acc": 0.8806278397356464, + "train_speed(iter/s)": 0.390062 + }, + { + "epoch": 2.65824, + "grad_norm": 0.76759515457283, + "learning_rate": 1.7532511948139984e-07, + "loss": 0.24614271521568298, + "memory(GiB)": 77.0, + "step": 8307, + "token_acc": 0.9362084456424079, + "train_speed(iter/s)": 0.390036 + }, + { + "epoch": 2.65856, + "grad_norm": 0.6787918377388446, + "learning_rate": 1.7500083242242144e-07, + "loss": 0.23796194791793823, + "memory(GiB)": 77.0, + "step": 8308, + "token_acc": 0.969921875, + "train_speed(iter/s)": 0.390007 + }, + { + "epoch": 2.65888, + "grad_norm": 0.6804761325605421, + "learning_rate": 1.746768346692565e-07, + "loss": 0.26744017004966736, + "memory(GiB)": 77.0, + "step": 8309, + "token_acc": 0.8708333333333333, + "train_speed(iter/s)": 0.389974 + }, + { + "epoch": 2.6592000000000002, + "grad_norm": 0.7389415673745149, + "learning_rate": 1.743531262622214e-07, + "loss": 0.19879506528377533, + "memory(GiB)": 77.0, + "step": 8310, + "token_acc": 0.9482363719651855, + "train_speed(iter/s)": 0.389948 + }, + { + "epoch": 2.65952, + "grad_norm": 0.6746720603558587, + "learning_rate": 1.740297072415953e-07, + "loss": 0.35064759850502014, + "memory(GiB)": 77.0, + "step": 8311, + "token_acc": 0.9016763985684686, + "train_speed(iter/s)": 0.389919 + }, + { + "epoch": 2.65984, + "grad_norm": 0.8619992857311103, + "learning_rate": 1.7370657764762295e-07, + "loss": 0.3298761248588562, + "memory(GiB)": 77.0, + "step": 8312, + "token_acc": 0.870954356846473, + "train_speed(iter/s)": 0.389894 + }, + { + "epoch": 2.66016, + "grad_norm": 0.7616478971924554, + "learning_rate": 1.7338373752051163e-07, + "loss": 0.24010147154331207, + "memory(GiB)": 77.0, + "step": 8313, + "token_acc": 0.955820895522388, + "train_speed(iter/s)": 0.389869 + }, + { + "epoch": 2.6604799999999997, + "grad_norm": 0.795200515467388, + "learning_rate": 1.7306118690043367e-07, + "loss": 0.4127449691295624, + "memory(GiB)": 77.0, + "step": 8314, + "token_acc": 0.8557779799818016, + "train_speed(iter/s)": 0.389841 + }, + { + "epoch": 2.6608, + "grad_norm": 0.813526956802831, + "learning_rate": 1.727389258275247e-07, + "loss": 0.3152651786804199, + "memory(GiB)": 77.0, + "step": 8315, + "token_acc": 0.9693424768051634, + "train_speed(iter/s)": 0.389816 + }, + { + "epoch": 2.66112, + "grad_norm": 0.7456296204810973, + "learning_rate": 1.7241695434188438e-07, + "loss": 0.24510693550109863, + "memory(GiB)": 77.0, + "step": 8316, + "token_acc": 0.9141782271337879, + "train_speed(iter/s)": 0.389788 + }, + { + "epoch": 2.66144, + "grad_norm": 0.7119014780851665, + "learning_rate": 1.7209527248357644e-07, + "loss": 0.2828027009963989, + "memory(GiB)": 77.0, + "step": 8317, + "token_acc": 0.9248466257668712, + "train_speed(iter/s)": 0.38976 + }, + { + "epoch": 2.66176, + "grad_norm": 0.6687606890319608, + "learning_rate": 1.7177388029262887e-07, + "loss": 0.24720457196235657, + "memory(GiB)": 77.0, + "step": 8318, + "token_acc": 0.9644654088050314, + "train_speed(iter/s)": 0.389736 + }, + { + "epoch": 2.66208, + "grad_norm": 0.6836110023625824, + "learning_rate": 1.714527778090333e-07, + "loss": 0.3015371561050415, + "memory(GiB)": 77.0, + "step": 8319, + "token_acc": 0.9555436529191216, + "train_speed(iter/s)": 0.389706 + }, + { + "epoch": 2.6624, + "grad_norm": 0.677554544828429, + "learning_rate": 1.7113196507274555e-07, + "loss": 0.2918495535850525, + "memory(GiB)": 77.0, + "step": 8320, + "token_acc": 0.9090280976508521, + "train_speed(iter/s)": 0.38968 + }, + { + "epoch": 2.66272, + "grad_norm": 0.7084355987871442, + "learning_rate": 1.7081144212368451e-07, + "loss": 0.2673630714416504, + "memory(GiB)": 77.0, + "step": 8321, + "token_acc": 0.9249871991807476, + "train_speed(iter/s)": 0.389654 + }, + { + "epoch": 2.66304, + "grad_norm": 0.7010727902476456, + "learning_rate": 1.704912090017341e-07, + "loss": 0.21331779658794403, + "memory(GiB)": 77.0, + "step": 8322, + "token_acc": 0.9507971412864211, + "train_speed(iter/s)": 0.389625 + }, + { + "epoch": 2.66336, + "grad_norm": 0.8001459151535878, + "learning_rate": 1.7017126574674192e-07, + "loss": 0.2717702388763428, + "memory(GiB)": 77.0, + "step": 8323, + "token_acc": 0.8932645301466594, + "train_speed(iter/s)": 0.3896 + }, + { + "epoch": 2.6636800000000003, + "grad_norm": 0.7239625645654969, + "learning_rate": 1.698516123985189e-07, + "loss": 0.3770061433315277, + "memory(GiB)": 77.0, + "step": 8324, + "token_acc": 0.8674104826154645, + "train_speed(iter/s)": 0.389573 + }, + { + "epoch": 2.664, + "grad_norm": 0.7899614355256932, + "learning_rate": 1.6953224899684095e-07, + "loss": 0.3936925530433655, + "memory(GiB)": 77.0, + "step": 8325, + "token_acc": 0.9063339731285989, + "train_speed(iter/s)": 0.389544 + }, + { + "epoch": 2.66432, + "grad_norm": 0.6516891819183195, + "learning_rate": 1.6921317558144716e-07, + "loss": 0.2850383520126343, + "memory(GiB)": 77.0, + "step": 8326, + "token_acc": 0.9453597497393118, + "train_speed(iter/s)": 0.389514 + }, + { + "epoch": 2.66464, + "grad_norm": 0.8096396386715049, + "learning_rate": 1.6889439219203997e-07, + "loss": 0.2595321238040924, + "memory(GiB)": 77.0, + "step": 8327, + "token_acc": 0.8995719459993414, + "train_speed(iter/s)": 0.389483 + }, + { + "epoch": 2.6649599999999998, + "grad_norm": 0.740656409655529, + "learning_rate": 1.685758988682873e-07, + "loss": 0.31469500064849854, + "memory(GiB)": 77.0, + "step": 8328, + "token_acc": 0.846095717884131, + "train_speed(iter/s)": 0.389455 + }, + { + "epoch": 2.66528, + "grad_norm": 0.6620020033210887, + "learning_rate": 1.6825769564982025e-07, + "loss": 0.23544242978096008, + "memory(GiB)": 77.0, + "step": 8329, + "token_acc": 0.8733757257395631, + "train_speed(iter/s)": 0.389425 + }, + { + "epoch": 2.6656, + "grad_norm": 0.7050681345803378, + "learning_rate": 1.67939782576233e-07, + "loss": 0.23169678449630737, + "memory(GiB)": 77.0, + "step": 8330, + "token_acc": 0.9646856158484065, + "train_speed(iter/s)": 0.389401 + }, + { + "epoch": 2.66592, + "grad_norm": 0.7185886966664018, + "learning_rate": 1.6762215968708474e-07, + "loss": 0.32508498430252075, + "memory(GiB)": 77.0, + "step": 8331, + "token_acc": 0.9158094206821873, + "train_speed(iter/s)": 0.389375 + }, + { + "epoch": 2.66624, + "grad_norm": 0.648087046373587, + "learning_rate": 1.6730482702189772e-07, + "loss": 0.24757789075374603, + "memory(GiB)": 77.0, + "step": 8332, + "token_acc": 0.9603148573302722, + "train_speed(iter/s)": 0.389345 + }, + { + "epoch": 2.66656, + "grad_norm": 0.7144198249716596, + "learning_rate": 1.6698778462015925e-07, + "loss": 0.31390380859375, + "memory(GiB)": 77.0, + "step": 8333, + "token_acc": 0.875072129255626, + "train_speed(iter/s)": 0.389317 + }, + { + "epoch": 2.66688, + "grad_norm": 0.7723810863344789, + "learning_rate": 1.6667103252131917e-07, + "loss": 0.242484450340271, + "memory(GiB)": 77.0, + "step": 8334, + "token_acc": 0.9504876219054764, + "train_speed(iter/s)": 0.389288 + }, + { + "epoch": 2.6672000000000002, + "grad_norm": 0.6791242212387602, + "learning_rate": 1.6635457076479178e-07, + "loss": 0.21657070517539978, + "memory(GiB)": 77.0, + "step": 8335, + "token_acc": 0.9187031212194532, + "train_speed(iter/s)": 0.389262 + }, + { + "epoch": 2.66752, + "grad_norm": 0.7078843189632531, + "learning_rate": 1.6603839938995532e-07, + "loss": 0.2428026795387268, + "memory(GiB)": 77.0, + "step": 8336, + "token_acc": 0.943104514533086, + "train_speed(iter/s)": 0.389236 + }, + { + "epoch": 2.66784, + "grad_norm": 0.7244322553252327, + "learning_rate": 1.6572251843615188e-07, + "loss": 0.32905709743499756, + "memory(GiB)": 77.0, + "step": 8337, + "token_acc": 0.908793146888385, + "train_speed(iter/s)": 0.38921 + }, + { + "epoch": 2.66816, + "grad_norm": 0.7592599339530682, + "learning_rate": 1.654069279426873e-07, + "loss": 0.3220115303993225, + "memory(GiB)": 77.0, + "step": 8338, + "token_acc": 0.9131008980454305, + "train_speed(iter/s)": 0.389185 + }, + { + "epoch": 2.6684799999999997, + "grad_norm": 0.7654464115677657, + "learning_rate": 1.6509162794883154e-07, + "loss": 0.32288098335266113, + "memory(GiB)": 77.0, + "step": 8339, + "token_acc": 0.8825803334138681, + "train_speed(iter/s)": 0.389159 + }, + { + "epoch": 2.6688, + "grad_norm": 0.7131068779432915, + "learning_rate": 1.6477661849381794e-07, + "loss": 0.291257381439209, + "memory(GiB)": 77.0, + "step": 8340, + "token_acc": 0.9598865478119936, + "train_speed(iter/s)": 0.389134 + }, + { + "epoch": 2.66912, + "grad_norm": 0.7032568501127469, + "learning_rate": 1.64461899616844e-07, + "loss": 0.2997780740261078, + "memory(GiB)": 77.0, + "step": 8341, + "token_acc": 0.9465020576131687, + "train_speed(iter/s)": 0.389109 + }, + { + "epoch": 2.66944, + "grad_norm": 0.6569142161642316, + "learning_rate": 1.6414747135707122e-07, + "loss": 0.3466072380542755, + "memory(GiB)": 77.0, + "step": 8342, + "token_acc": 0.9182093571188152, + "train_speed(iter/s)": 0.389073 + }, + { + "epoch": 2.66976, + "grad_norm": 0.6813852724392182, + "learning_rate": 1.6383333375362464e-07, + "loss": 0.3261154294013977, + "memory(GiB)": 77.0, + "step": 8343, + "token_acc": 0.8336996336996337, + "train_speed(iter/s)": 0.38904 + }, + { + "epoch": 2.67008, + "grad_norm": 0.7683533284316103, + "learning_rate": 1.6351948684559306e-07, + "loss": 0.3104892373085022, + "memory(GiB)": 77.0, + "step": 8344, + "token_acc": 0.860413737698333, + "train_speed(iter/s)": 0.38901 + }, + { + "epoch": 2.6704, + "grad_norm": 0.8391690196879787, + "learning_rate": 1.632059306720296e-07, + "loss": 0.3369613289833069, + "memory(GiB)": 77.0, + "step": 8345, + "token_acc": 0.9152887306029001, + "train_speed(iter/s)": 0.388985 + }, + { + "epoch": 2.67072, + "grad_norm": 0.7130031220367125, + "learning_rate": 1.6289266527195092e-07, + "loss": 0.26510563492774963, + "memory(GiB)": 77.0, + "step": 8346, + "token_acc": 0.950885208452313, + "train_speed(iter/s)": 0.388961 + }, + { + "epoch": 2.67104, + "grad_norm": 0.6776779749163426, + "learning_rate": 1.6257969068433717e-07, + "loss": 0.2589621841907501, + "memory(GiB)": 77.0, + "step": 8347, + "token_acc": 0.9236914600550964, + "train_speed(iter/s)": 0.388933 + }, + { + "epoch": 2.67136, + "grad_norm": 0.7737069522825752, + "learning_rate": 1.6226700694813303e-07, + "loss": 0.27638307213783264, + "memory(GiB)": 77.0, + "step": 8348, + "token_acc": 0.8681041725007, + "train_speed(iter/s)": 0.388907 + }, + { + "epoch": 2.6716800000000003, + "grad_norm": 0.7063998676994592, + "learning_rate": 1.61954614102246e-07, + "loss": 0.2272045910358429, + "memory(GiB)": 77.0, + "step": 8349, + "token_acc": 0.9321727334388424, + "train_speed(iter/s)": 0.388882 + }, + { + "epoch": 2.672, + "grad_norm": 0.7941083436496131, + "learning_rate": 1.6164251218554833e-07, + "loss": 0.32727640867233276, + "memory(GiB)": 77.0, + "step": 8350, + "token_acc": 0.8972972972972973, + "train_speed(iter/s)": 0.388851 + }, + { + "epoch": 2.67232, + "grad_norm": 0.9649482727858917, + "learning_rate": 1.6133070123687534e-07, + "loss": 0.3308938145637512, + "memory(GiB)": 77.0, + "step": 8351, + "token_acc": 0.9039039039039038, + "train_speed(iter/s)": 0.388823 + }, + { + "epoch": 2.67264, + "grad_norm": 0.7572849557037309, + "learning_rate": 1.610191812950268e-07, + "loss": 0.32228365540504456, + "memory(GiB)": 77.0, + "step": 8352, + "token_acc": 0.9142370717173867, + "train_speed(iter/s)": 0.388794 + }, + { + "epoch": 2.67296, + "grad_norm": 0.7469631179050559, + "learning_rate": 1.607079523987662e-07, + "loss": 0.29048511385917664, + "memory(GiB)": 77.0, + "step": 8353, + "token_acc": 0.9233526330300524, + "train_speed(iter/s)": 0.388765 + }, + { + "epoch": 2.67328, + "grad_norm": 0.6567581990572683, + "learning_rate": 1.6039701458682e-07, + "loss": 0.23982444405555725, + "memory(GiB)": 77.0, + "step": 8354, + "token_acc": 0.9533083645443196, + "train_speed(iter/s)": 0.38874 + }, + { + "epoch": 2.6736, + "grad_norm": 0.6742891721811927, + "learning_rate": 1.6008636789787923e-07, + "loss": 0.2723290026187897, + "memory(GiB)": 77.0, + "step": 8355, + "token_acc": 0.898076370944588, + "train_speed(iter/s)": 0.388714 + }, + { + "epoch": 2.67392, + "grad_norm": 0.6769088327484989, + "learning_rate": 1.5977601237059854e-07, + "loss": 0.31509464979171753, + "memory(GiB)": 77.0, + "step": 8356, + "token_acc": 0.9390035991665088, + "train_speed(iter/s)": 0.388684 + }, + { + "epoch": 2.67424, + "grad_norm": 0.7343808142000743, + "learning_rate": 1.5946594804359644e-07, + "loss": 0.40209078788757324, + "memory(GiB)": 77.0, + "step": 8357, + "token_acc": 0.9448178280863105, + "train_speed(iter/s)": 0.388655 + }, + { + "epoch": 2.67456, + "grad_norm": 0.7771531937952099, + "learning_rate": 1.5915617495545488e-07, + "loss": 0.2871140241622925, + "memory(GiB)": 77.0, + "step": 8358, + "token_acc": 0.8561540776406861, + "train_speed(iter/s)": 0.388628 + }, + { + "epoch": 2.67488, + "grad_norm": 0.7243374696723321, + "learning_rate": 1.5884669314471967e-07, + "loss": 0.3156958222389221, + "memory(GiB)": 77.0, + "step": 8359, + "token_acc": 0.8503774879890186, + "train_speed(iter/s)": 0.388601 + }, + { + "epoch": 2.6752000000000002, + "grad_norm": 0.7243737042629909, + "learning_rate": 1.585375026499006e-07, + "loss": 0.23813989758491516, + "memory(GiB)": 77.0, + "step": 8360, + "token_acc": 0.95004095004095, + "train_speed(iter/s)": 0.388576 + }, + { + "epoch": 2.67552, + "grad_norm": 0.6767762323933036, + "learning_rate": 1.5822860350947073e-07, + "loss": 0.2458275854587555, + "memory(GiB)": 77.0, + "step": 8361, + "token_acc": 0.9456289978678039, + "train_speed(iter/s)": 0.388546 + }, + { + "epoch": 2.67584, + "grad_norm": 0.7131152491352184, + "learning_rate": 1.5791999576186795e-07, + "loss": 0.26207029819488525, + "memory(GiB)": 77.0, + "step": 8362, + "token_acc": 0.9568909209666885, + "train_speed(iter/s)": 0.38852 + }, + { + "epoch": 2.67616, + "grad_norm": 0.753577653744562, + "learning_rate": 1.5761167944549295e-07, + "loss": 0.3235446810722351, + "memory(GiB)": 77.0, + "step": 8363, + "token_acc": 0.9040797259420741, + "train_speed(iter/s)": 0.388493 + }, + { + "epoch": 2.6764799999999997, + "grad_norm": 0.7437055231132272, + "learning_rate": 1.5730365459870944e-07, + "loss": 0.2667214870452881, + "memory(GiB)": 77.0, + "step": 8364, + "token_acc": 0.9098639455782312, + "train_speed(iter/s)": 0.388468 + }, + { + "epoch": 2.6768, + "grad_norm": 0.7007110893579256, + "learning_rate": 1.569959212598468e-07, + "loss": 0.294657438993454, + "memory(GiB)": 77.0, + "step": 8365, + "token_acc": 0.8946700507614214, + "train_speed(iter/s)": 0.38844 + }, + { + "epoch": 2.67712, + "grad_norm": 0.6745185060248854, + "learning_rate": 1.566884794671966e-07, + "loss": 0.3231689929962158, + "memory(GiB)": 77.0, + "step": 8366, + "token_acc": 0.8417462482946794, + "train_speed(iter/s)": 0.388407 + }, + { + "epoch": 2.67744, + "grad_norm": 0.7453447033298104, + "learning_rate": 1.5638132925901466e-07, + "loss": 0.32390862703323364, + "memory(GiB)": 77.0, + "step": 8367, + "token_acc": 0.897009165460685, + "train_speed(iter/s)": 0.388381 + }, + { + "epoch": 2.67776, + "grad_norm": 0.727898620060236, + "learning_rate": 1.5607447067352067e-07, + "loss": 0.32645779848098755, + "memory(GiB)": 77.0, + "step": 8368, + "token_acc": 0.9339517625231911, + "train_speed(iter/s)": 0.388353 + }, + { + "epoch": 2.67808, + "grad_norm": 0.6991805698367061, + "learning_rate": 1.5576790374889766e-07, + "loss": 0.28532254695892334, + "memory(GiB)": 77.0, + "step": 8369, + "token_acc": 0.9084410228210064, + "train_speed(iter/s)": 0.388329 + }, + { + "epoch": 2.6784, + "grad_norm": 0.7918766626764461, + "learning_rate": 1.5546162852329266e-07, + "loss": 0.32307344675064087, + "memory(GiB)": 77.0, + "step": 8370, + "token_acc": 0.917433831990794, + "train_speed(iter/s)": 0.388302 + }, + { + "epoch": 2.67872, + "grad_norm": 0.7066853828947395, + "learning_rate": 1.5515564503481655e-07, + "loss": 0.2743019461631775, + "memory(GiB)": 77.0, + "step": 8371, + "token_acc": 0.9402304368698992, + "train_speed(iter/s)": 0.388273 + }, + { + "epoch": 2.67904, + "grad_norm": 0.6681944919235961, + "learning_rate": 1.5484995332154334e-07, + "loss": 0.2556476891040802, + "memory(GiB)": 77.0, + "step": 8372, + "token_acc": 0.933786078098472, + "train_speed(iter/s)": 0.388247 + }, + { + "epoch": 2.67936, + "grad_norm": 0.7404623433950682, + "learning_rate": 1.545445534215112e-07, + "loss": 0.30142277479171753, + "memory(GiB)": 77.0, + "step": 8373, + "token_acc": 0.9250712250712251, + "train_speed(iter/s)": 0.388219 + }, + { + "epoch": 2.67968, + "grad_norm": 0.7261250746489296, + "learning_rate": 1.5423944537272166e-07, + "loss": 0.2507474720478058, + "memory(GiB)": 77.0, + "step": 8374, + "token_acc": 0.9412955465587044, + "train_speed(iter/s)": 0.388193 + }, + { + "epoch": 2.68, + "grad_norm": 0.6619424184473127, + "learning_rate": 1.539346292131405e-07, + "loss": 0.2416161745786667, + "memory(GiB)": 77.0, + "step": 8375, + "token_acc": 0.9620656678355116, + "train_speed(iter/s)": 0.388167 + }, + { + "epoch": 2.68032, + "grad_norm": 0.7446072763784206, + "learning_rate": 1.5363010498069624e-07, + "loss": 0.3368012011051178, + "memory(GiB)": 77.0, + "step": 8376, + "token_acc": 0.9036183157220621, + "train_speed(iter/s)": 0.388143 + }, + { + "epoch": 2.68064, + "grad_norm": 0.8250011187585267, + "learning_rate": 1.5332587271328247e-07, + "loss": 0.2738511562347412, + "memory(GiB)": 77.0, + "step": 8377, + "token_acc": 0.9407969639468691, + "train_speed(iter/s)": 0.388114 + }, + { + "epoch": 2.68096, + "grad_norm": 0.7116103372621291, + "learning_rate": 1.5302193244875474e-07, + "loss": 0.3121381998062134, + "memory(GiB)": 77.0, + "step": 8378, + "token_acc": 0.926605504587156, + "train_speed(iter/s)": 0.38809 + }, + { + "epoch": 2.68128, + "grad_norm": 0.6415599748719999, + "learning_rate": 1.5271828422493363e-07, + "loss": 0.32568657398223877, + "memory(GiB)": 77.0, + "step": 8379, + "token_acc": 0.9018478521718263, + "train_speed(iter/s)": 0.388066 + }, + { + "epoch": 2.6816, + "grad_norm": 0.7555805879998558, + "learning_rate": 1.524149280796028e-07, + "loss": 0.297416627407074, + "memory(GiB)": 77.0, + "step": 8380, + "token_acc": 0.8569463548830811, + "train_speed(iter/s)": 0.38804 + }, + { + "epoch": 2.68192, + "grad_norm": 0.7744658114344888, + "learning_rate": 1.521118640505098e-07, + "loss": 0.36332088708877563, + "memory(GiB)": 77.0, + "step": 8381, + "token_acc": 0.9304407713498623, + "train_speed(iter/s)": 0.388014 + }, + { + "epoch": 2.68224, + "grad_norm": 1.0653998086675922, + "learning_rate": 1.5180909217536538e-07, + "loss": 0.33961325883865356, + "memory(GiB)": 77.0, + "step": 8382, + "token_acc": 0.8866711772665764, + "train_speed(iter/s)": 0.387987 + }, + { + "epoch": 2.68256, + "grad_norm": 0.7778779239428527, + "learning_rate": 1.5150661249184435e-07, + "loss": 0.2877591550350189, + "memory(GiB)": 77.0, + "step": 8383, + "token_acc": 0.9007580978635424, + "train_speed(iter/s)": 0.387963 + }, + { + "epoch": 2.68288, + "grad_norm": 0.72669900321053, + "learning_rate": 1.512044250375855e-07, + "loss": 0.3069957494735718, + "memory(GiB)": 77.0, + "step": 8384, + "token_acc": 0.8981826654240447, + "train_speed(iter/s)": 0.387935 + }, + { + "epoch": 2.6832000000000003, + "grad_norm": 0.7531489380152214, + "learning_rate": 1.509025298501901e-07, + "loss": 0.2848142981529236, + "memory(GiB)": 77.0, + "step": 8385, + "token_acc": 0.9341623994147769, + "train_speed(iter/s)": 0.387908 + }, + { + "epoch": 2.68352, + "grad_norm": 0.8214268964553267, + "learning_rate": 1.50600926967224e-07, + "loss": 0.2600971758365631, + "memory(GiB)": 77.0, + "step": 8386, + "token_acc": 0.9373401534526854, + "train_speed(iter/s)": 0.387881 + }, + { + "epoch": 2.68384, + "grad_norm": 0.8464420488605308, + "learning_rate": 1.5029961642621687e-07, + "loss": 0.3217431604862213, + "memory(GiB)": 77.0, + "step": 8387, + "token_acc": 0.8592853992059991, + "train_speed(iter/s)": 0.387857 + }, + { + "epoch": 2.68416, + "grad_norm": 0.7120877871597413, + "learning_rate": 1.4999859826466123e-07, + "loss": 0.26472577452659607, + "memory(GiB)": 77.0, + "step": 8388, + "token_acc": 0.942794547224927, + "train_speed(iter/s)": 0.387833 + }, + { + "epoch": 2.6844799999999998, + "grad_norm": 0.7407266978674761, + "learning_rate": 1.496978725200135e-07, + "loss": 0.26526764035224915, + "memory(GiB)": 77.0, + "step": 8389, + "token_acc": 0.932509792106056, + "train_speed(iter/s)": 0.387805 + }, + { + "epoch": 2.6848, + "grad_norm": 0.708550770953494, + "learning_rate": 1.4939743922969374e-07, + "loss": 0.2413066029548645, + "memory(GiB)": 77.0, + "step": 8390, + "token_acc": 0.8947589098532495, + "train_speed(iter/s)": 0.387776 + }, + { + "epoch": 2.68512, + "grad_norm": 0.7263870475434456, + "learning_rate": 1.4909729843108595e-07, + "loss": 0.2919106185436249, + "memory(GiB)": 77.0, + "step": 8391, + "token_acc": 0.955719557195572, + "train_speed(iter/s)": 0.387749 + }, + { + "epoch": 2.68544, + "grad_norm": 0.707936045326002, + "learning_rate": 1.4879745016153746e-07, + "loss": 0.3337888717651367, + "memory(GiB)": 77.0, + "step": 8392, + "token_acc": 0.8628753653999468, + "train_speed(iter/s)": 0.387725 + }, + { + "epoch": 2.68576, + "grad_norm": 0.6813696801993834, + "learning_rate": 1.4849789445835899e-07, + "loss": 0.3336453139781952, + "memory(GiB)": 77.0, + "step": 8393, + "token_acc": 0.9034611786716558, + "train_speed(iter/s)": 0.387695 + }, + { + "epoch": 2.68608, + "grad_norm": 0.6837947955541747, + "learning_rate": 1.481986313588249e-07, + "loss": 0.24608144164085388, + "memory(GiB)": 77.0, + "step": 8394, + "token_acc": 0.94302655323537, + "train_speed(iter/s)": 0.387669 + }, + { + "epoch": 2.6864, + "grad_norm": 0.7193867797031634, + "learning_rate": 1.4789966090017344e-07, + "loss": 0.2956007719039917, + "memory(GiB)": 77.0, + "step": 8395, + "token_acc": 0.9028892455858748, + "train_speed(iter/s)": 0.387644 + }, + { + "epoch": 2.68672, + "grad_norm": 0.6741178048069963, + "learning_rate": 1.4760098311960652e-07, + "loss": 0.2754673361778259, + "memory(GiB)": 77.0, + "step": 8396, + "token_acc": 0.9488103821196827, + "train_speed(iter/s)": 0.387619 + }, + { + "epoch": 2.68704, + "grad_norm": 0.7432810941072581, + "learning_rate": 1.4730259805428915e-07, + "loss": 0.2930816411972046, + "memory(GiB)": 77.0, + "step": 8397, + "token_acc": 0.8798782175477442, + "train_speed(iter/s)": 0.38759 + }, + { + "epoch": 2.68736, + "grad_norm": 0.690231146005292, + "learning_rate": 1.470045057413505e-07, + "loss": 0.26468396186828613, + "memory(GiB)": 77.0, + "step": 8398, + "token_acc": 0.8837209302325582, + "train_speed(iter/s)": 0.387567 + }, + { + "epoch": 2.68768, + "grad_norm": 0.8316999360506352, + "learning_rate": 1.467067062178823e-07, + "loss": 0.3686768412590027, + "memory(GiB)": 77.0, + "step": 8399, + "token_acc": 0.8106508875739645, + "train_speed(iter/s)": 0.387542 + }, + { + "epoch": 2.6879999999999997, + "grad_norm": 0.7148665068864334, + "learning_rate": 1.4640919952094128e-07, + "loss": 0.3368852436542511, + "memory(GiB)": 77.0, + "step": 8400, + "token_acc": 0.8563218390804598, + "train_speed(iter/s)": 0.387517 + }, + { + "epoch": 2.68832, + "grad_norm": 0.6901286936816654, + "learning_rate": 1.4611198568754676e-07, + "loss": 0.24576842784881592, + "memory(GiB)": 77.0, + "step": 8401, + "token_acc": 0.9158544509421702, + "train_speed(iter/s)": 0.387493 + }, + { + "epoch": 2.68864, + "grad_norm": 0.7538891895800929, + "learning_rate": 1.458150647546816e-07, + "loss": 0.31485220789909363, + "memory(GiB)": 77.0, + "step": 8402, + "token_acc": 0.8996062992125984, + "train_speed(iter/s)": 0.387467 + }, + { + "epoch": 2.68896, + "grad_norm": 0.7659290538422464, + "learning_rate": 1.455184367592924e-07, + "loss": 0.31866228580474854, + "memory(GiB)": 77.0, + "step": 8403, + "token_acc": 0.9372727272727273, + "train_speed(iter/s)": 0.387441 + }, + { + "epoch": 2.68928, + "grad_norm": 0.6862833452443541, + "learning_rate": 1.4522210173828988e-07, + "loss": 0.29171663522720337, + "memory(GiB)": 77.0, + "step": 8404, + "token_acc": 0.9426211693110194, + "train_speed(iter/s)": 0.387416 + }, + { + "epoch": 2.6896, + "grad_norm": 0.7319386101298253, + "learning_rate": 1.449260597285476e-07, + "loss": 0.3385604918003082, + "memory(GiB)": 77.0, + "step": 8405, + "token_acc": 0.9, + "train_speed(iter/s)": 0.38739 + }, + { + "epoch": 2.68992, + "grad_norm": 0.7456106097038954, + "learning_rate": 1.4463031076690281e-07, + "loss": 0.3315584659576416, + "memory(GiB)": 77.0, + "step": 8406, + "token_acc": 0.8764790764790765, + "train_speed(iter/s)": 0.387366 + }, + { + "epoch": 2.69024, + "grad_norm": 0.7721846787664701, + "learning_rate": 1.4433485489015574e-07, + "loss": 0.2752486765384674, + "memory(GiB)": 77.0, + "step": 8407, + "token_acc": 0.9376141205112599, + "train_speed(iter/s)": 0.387344 + }, + { + "epoch": 2.69056, + "grad_norm": 0.7087113182024347, + "learning_rate": 1.4403969213507174e-07, + "loss": 0.26810672879219055, + "memory(GiB)": 77.0, + "step": 8408, + "token_acc": 0.9177526788300029, + "train_speed(iter/s)": 0.387314 + }, + { + "epoch": 2.69088, + "grad_norm": 0.6928049167432416, + "learning_rate": 1.4374482253837807e-07, + "loss": 0.2707950472831726, + "memory(GiB)": 77.0, + "step": 8409, + "token_acc": 0.9029259896729777, + "train_speed(iter/s)": 0.387288 + }, + { + "epoch": 2.6912000000000003, + "grad_norm": 0.6347160884861678, + "learning_rate": 1.4345024613676652e-07, + "loss": 0.2365293949842453, + "memory(GiB)": 77.0, + "step": 8410, + "token_acc": 0.9606376057254392, + "train_speed(iter/s)": 0.387262 + }, + { + "epoch": 2.69152, + "grad_norm": 0.7017164966661047, + "learning_rate": 1.431559629668916e-07, + "loss": 0.23754598200321198, + "memory(GiB)": 77.0, + "step": 8411, + "token_acc": 0.8961293483586478, + "train_speed(iter/s)": 0.387234 + }, + { + "epoch": 2.69184, + "grad_norm": 0.7091643095218502, + "learning_rate": 1.4286197306537213e-07, + "loss": 0.34804075956344604, + "memory(GiB)": 77.0, + "step": 8412, + "token_acc": 0.9069506726457399, + "train_speed(iter/s)": 0.387202 + }, + { + "epoch": 2.69216, + "grad_norm": 0.7101652970048802, + "learning_rate": 1.4256827646878968e-07, + "loss": 0.2975931167602539, + "memory(GiB)": 77.0, + "step": 8413, + "token_acc": 0.8607322325915291, + "train_speed(iter/s)": 0.387174 + }, + { + "epoch": 2.6924799999999998, + "grad_norm": 0.6791793419532338, + "learning_rate": 1.4227487321369028e-07, + "loss": 0.2735055088996887, + "memory(GiB)": 77.0, + "step": 8414, + "token_acc": 0.9162575801328328, + "train_speed(iter/s)": 0.387148 + }, + { + "epoch": 2.6928, + "grad_norm": 0.7252942184023292, + "learning_rate": 1.419817633365822e-07, + "loss": 0.2917102575302124, + "memory(GiB)": 77.0, + "step": 8415, + "token_acc": 0.950347881087919, + "train_speed(iter/s)": 0.387118 + }, + { + "epoch": 2.69312, + "grad_norm": 0.7019540429729608, + "learning_rate": 1.4168894687393798e-07, + "loss": 0.26595577597618103, + "memory(GiB)": 77.0, + "step": 8416, + "token_acc": 0.8770491803278688, + "train_speed(iter/s)": 0.387094 + }, + { + "epoch": 2.69344, + "grad_norm": 0.7023919393744408, + "learning_rate": 1.41396423862194e-07, + "loss": 0.2692003846168518, + "memory(GiB)": 77.0, + "step": 8417, + "token_acc": 0.8986335403726708, + "train_speed(iter/s)": 0.387067 + }, + { + "epoch": 2.69376, + "grad_norm": 0.7546346963330787, + "learning_rate": 1.4110419433774947e-07, + "loss": 0.3609519898891449, + "memory(GiB)": 77.0, + "step": 8418, + "token_acc": 0.9053286448471733, + "train_speed(iter/s)": 0.387039 + }, + { + "epoch": 2.69408, + "grad_norm": 0.8290555420498336, + "learning_rate": 1.4081225833696698e-07, + "loss": 0.2972118854522705, + "memory(GiB)": 77.0, + "step": 8419, + "token_acc": 0.9659908436886854, + "train_speed(iter/s)": 0.387014 + }, + { + "epoch": 2.6944, + "grad_norm": 0.704085071458583, + "learning_rate": 1.4052061589617388e-07, + "loss": 0.31959062814712524, + "memory(GiB)": 77.0, + "step": 8420, + "token_acc": 0.9316788321167884, + "train_speed(iter/s)": 0.386985 + }, + { + "epoch": 2.6947200000000002, + "grad_norm": 0.7136115402950025, + "learning_rate": 1.4022926705165863e-07, + "loss": 0.26673442125320435, + "memory(GiB)": 77.0, + "step": 8421, + "token_acc": 0.933932584269663, + "train_speed(iter/s)": 0.386959 + }, + { + "epoch": 2.69504, + "grad_norm": 0.7086872173299653, + "learning_rate": 1.3993821183967526e-07, + "loss": 0.26884979009628296, + "memory(GiB)": 77.0, + "step": 8422, + "token_acc": 0.9353662578720583, + "train_speed(iter/s)": 0.386934 + }, + { + "epoch": 2.69536, + "grad_norm": 0.7085418016298904, + "learning_rate": 1.3964745029644038e-07, + "loss": 0.35636574029922485, + "memory(GiB)": 77.0, + "step": 8423, + "token_acc": 0.8725540679711637, + "train_speed(iter/s)": 0.386905 + }, + { + "epoch": 2.69568, + "grad_norm": 0.7121418003277575, + "learning_rate": 1.3935698245813422e-07, + "loss": 0.3426216244697571, + "memory(GiB)": 77.0, + "step": 8424, + "token_acc": 0.8443413066761871, + "train_speed(iter/s)": 0.386877 + }, + { + "epoch": 2.6959999999999997, + "grad_norm": 0.7715324670207265, + "learning_rate": 1.3906680836090065e-07, + "loss": 0.24150939285755157, + "memory(GiB)": 77.0, + "step": 8425, + "token_acc": 0.948019801980198, + "train_speed(iter/s)": 0.386853 + }, + { + "epoch": 2.69632, + "grad_norm": 0.7272494347984553, + "learning_rate": 1.3877692804084687e-07, + "loss": 0.3267644941806793, + "memory(GiB)": 77.0, + "step": 8426, + "token_acc": 0.9349930843706777, + "train_speed(iter/s)": 0.386828 + }, + { + "epoch": 2.69664, + "grad_norm": 0.6679770053160602, + "learning_rate": 1.384873415340432e-07, + "loss": 0.2794435918331146, + "memory(GiB)": 77.0, + "step": 8427, + "token_acc": 0.9116294349540078, + "train_speed(iter/s)": 0.386804 + }, + { + "epoch": 2.69696, + "grad_norm": 0.7647100973877123, + "learning_rate": 1.3819804887652387e-07, + "loss": 0.33386752009391785, + "memory(GiB)": 77.0, + "step": 8428, + "token_acc": 0.9149937264742786, + "train_speed(iter/s)": 0.386776 + }, + { + "epoch": 2.69728, + "grad_norm": 0.7894253325185546, + "learning_rate": 1.3790905010428595e-07, + "loss": 0.2809128165245056, + "memory(GiB)": 77.0, + "step": 8429, + "token_acc": 0.8842897460018815, + "train_speed(iter/s)": 0.386752 + }, + { + "epoch": 2.6976, + "grad_norm": 0.7141881859713384, + "learning_rate": 1.3762034525329093e-07, + "loss": 0.27201682329177856, + "memory(GiB)": 77.0, + "step": 8430, + "token_acc": 0.9580495777717243, + "train_speed(iter/s)": 0.386729 + }, + { + "epoch": 2.69792, + "grad_norm": 0.8001354615368164, + "learning_rate": 1.3733193435946257e-07, + "loss": 0.3415378928184509, + "memory(GiB)": 77.0, + "step": 8431, + "token_acc": 0.8802047781569966, + "train_speed(iter/s)": 0.3867 + }, + { + "epoch": 2.69824, + "grad_norm": 0.6909973741350743, + "learning_rate": 1.3704381745868885e-07, + "loss": 0.31210243701934814, + "memory(GiB)": 77.0, + "step": 8432, + "token_acc": 0.8764388066713648, + "train_speed(iter/s)": 0.386674 + }, + { + "epoch": 2.69856, + "grad_norm": 0.6955508931258647, + "learning_rate": 1.3675599458682103e-07, + "loss": 0.2654351592063904, + "memory(GiB)": 77.0, + "step": 8433, + "token_acc": 0.8583959899749374, + "train_speed(iter/s)": 0.386648 + }, + { + "epoch": 2.69888, + "grad_norm": 0.8048320745407234, + "learning_rate": 1.364684657796733e-07, + "loss": 0.286732017993927, + "memory(GiB)": 77.0, + "step": 8434, + "token_acc": 0.8856358645928637, + "train_speed(iter/s)": 0.386625 + }, + { + "epoch": 2.6992000000000003, + "grad_norm": 0.6953395534665229, + "learning_rate": 1.3618123107302428e-07, + "loss": 0.3376232981681824, + "memory(GiB)": 77.0, + "step": 8435, + "token_acc": 0.8680908782638183, + "train_speed(iter/s)": 0.3866 + }, + { + "epoch": 2.69952, + "grad_norm": 0.7339994787890884, + "learning_rate": 1.3589429050261477e-07, + "loss": 0.36334413290023804, + "memory(GiB)": 77.0, + "step": 8436, + "token_acc": 0.8887802971071149, + "train_speed(iter/s)": 0.38657 + }, + { + "epoch": 2.69984, + "grad_norm": 0.7109486826470832, + "learning_rate": 1.3560764410414957e-07, + "loss": 0.337226927280426, + "memory(GiB)": 77.0, + "step": 8437, + "token_acc": 0.9225852272727273, + "train_speed(iter/s)": 0.38654 + }, + { + "epoch": 2.70016, + "grad_norm": 0.6504412478346331, + "learning_rate": 1.353212919132968e-07, + "loss": 0.36263614892959595, + "memory(GiB)": 77.0, + "step": 8438, + "token_acc": 0.8333795397837538, + "train_speed(iter/s)": 0.386507 + }, + { + "epoch": 2.7004799999999998, + "grad_norm": 0.6951289978726954, + "learning_rate": 1.3503523396568851e-07, + "loss": 0.23577189445495605, + "memory(GiB)": 77.0, + "step": 8439, + "token_acc": 0.9394459426329983, + "train_speed(iter/s)": 0.386482 + }, + { + "epoch": 2.7008, + "grad_norm": 0.71921088327438, + "learning_rate": 1.3474947029691903e-07, + "loss": 0.3991163372993469, + "memory(GiB)": 77.0, + "step": 8440, + "token_acc": 0.8509848701113332, + "train_speed(iter/s)": 0.386454 + }, + { + "epoch": 2.70112, + "grad_norm": 0.7063148989475976, + "learning_rate": 1.3446400094254685e-07, + "loss": 0.32791000604629517, + "memory(GiB)": 77.0, + "step": 8441, + "token_acc": 0.8371681415929203, + "train_speed(iter/s)": 0.386424 + }, + { + "epoch": 2.70144, + "grad_norm": 0.7581406834926856, + "learning_rate": 1.3417882593809412e-07, + "loss": 0.3525458574295044, + "memory(GiB)": 77.0, + "step": 8442, + "token_acc": 0.8285113098369279, + "train_speed(iter/s)": 0.386396 + }, + { + "epoch": 2.70176, + "grad_norm": 0.7291160976961978, + "learning_rate": 1.3389394531904577e-07, + "loss": 0.2877195179462433, + "memory(GiB)": 77.0, + "step": 8443, + "token_acc": 0.8882725832012678, + "train_speed(iter/s)": 0.386372 + }, + { + "epoch": 2.70208, + "grad_norm": 0.7183646306195581, + "learning_rate": 1.336093591208501e-07, + "loss": 0.3452446758747101, + "memory(GiB)": 77.0, + "step": 8444, + "token_acc": 0.9188564828183656, + "train_speed(iter/s)": 0.386344 + }, + { + "epoch": 2.7024, + "grad_norm": 0.8018233507066853, + "learning_rate": 1.3332506737891883e-07, + "loss": 0.30450761318206787, + "memory(GiB)": 77.0, + "step": 8445, + "token_acc": 0.8753196930946292, + "train_speed(iter/s)": 0.386319 + }, + { + "epoch": 2.7027200000000002, + "grad_norm": 0.7598331891047602, + "learning_rate": 1.3304107012862722e-07, + "loss": 0.2991259694099426, + "memory(GiB)": 77.0, + "step": 8446, + "token_acc": 0.9443736730360934, + "train_speed(iter/s)": 0.38629 + }, + { + "epoch": 2.70304, + "grad_norm": 0.7129113247436577, + "learning_rate": 1.3275736740531398e-07, + "loss": 0.23978358507156372, + "memory(GiB)": 77.0, + "step": 8447, + "token_acc": 0.8550028264556246, + "train_speed(iter/s)": 0.386264 + }, + { + "epoch": 2.70336, + "grad_norm": 0.6898933179414133, + "learning_rate": 1.3247395924428087e-07, + "loss": 0.2931159436702728, + "memory(GiB)": 77.0, + "step": 8448, + "token_acc": 0.9216032266196118, + "train_speed(iter/s)": 0.386238 + }, + { + "epoch": 2.70368, + "grad_norm": 0.6630302402518804, + "learning_rate": 1.3219084568079327e-07, + "loss": 0.32640162110328674, + "memory(GiB)": 77.0, + "step": 8449, + "token_acc": 0.8392007611798288, + "train_speed(iter/s)": 0.386198 + }, + { + "epoch": 2.7039999999999997, + "grad_norm": 0.7893223797995218, + "learning_rate": 1.3190802675007942e-07, + "loss": 0.38153910636901855, + "memory(GiB)": 77.0, + "step": 8450, + "token_acc": 0.9087313659872224, + "train_speed(iter/s)": 0.386174 + }, + { + "epoch": 2.70432, + "grad_norm": 0.7296415658503344, + "learning_rate": 1.3162550248733113e-07, + "loss": 0.34508830308914185, + "memory(GiB)": 77.0, + "step": 8451, + "token_acc": 0.8771840972398075, + "train_speed(iter/s)": 0.386147 + }, + { + "epoch": 2.70464, + "grad_norm": 0.7853964284981159, + "learning_rate": 1.313432729277042e-07, + "loss": 0.33283066749572754, + "memory(GiB)": 77.0, + "step": 8452, + "token_acc": 0.8764295676429568, + "train_speed(iter/s)": 0.386124 + }, + { + "epoch": 2.70496, + "grad_norm": 0.7521208424291052, + "learning_rate": 1.3106133810631666e-07, + "loss": 0.362188458442688, + "memory(GiB)": 77.0, + "step": 8453, + "token_acc": 0.9051937345424568, + "train_speed(iter/s)": 0.3861 + }, + { + "epoch": 2.70528, + "grad_norm": 0.6878841079056067, + "learning_rate": 1.307796980582507e-07, + "loss": 0.24377819895744324, + "memory(GiB)": 77.0, + "step": 8454, + "token_acc": 0.9314720812182741, + "train_speed(iter/s)": 0.386074 + }, + { + "epoch": 2.7056, + "grad_norm": 0.7805209145955812, + "learning_rate": 1.304983528185516e-07, + "loss": 0.3281453847885132, + "memory(GiB)": 77.0, + "step": 8455, + "token_acc": 0.8796421794524262, + "train_speed(iter/s)": 0.386049 + }, + { + "epoch": 2.70592, + "grad_norm": 0.7380376885846075, + "learning_rate": 1.3021730242222752e-07, + "loss": 0.2880699634552002, + "memory(GiB)": 77.0, + "step": 8456, + "token_acc": 0.9017958626960673, + "train_speed(iter/s)": 0.386021 + }, + { + "epoch": 2.70624, + "grad_norm": 0.6287101515354014, + "learning_rate": 1.2993654690425074e-07, + "loss": 0.24724692106246948, + "memory(GiB)": 77.0, + "step": 8457, + "token_acc": 0.9353562005277045, + "train_speed(iter/s)": 0.385979 + }, + { + "epoch": 2.70656, + "grad_norm": 0.7051093657394717, + "learning_rate": 1.2965608629955606e-07, + "loss": 0.31088292598724365, + "memory(GiB)": 77.0, + "step": 8458, + "token_acc": 0.9052287581699346, + "train_speed(iter/s)": 0.385951 + }, + { + "epoch": 2.70688, + "grad_norm": 0.7685090038912806, + "learning_rate": 1.2937592064304227e-07, + "loss": 0.3346248269081116, + "memory(GiB)": 77.0, + "step": 8459, + "token_acc": 0.9125117591721543, + "train_speed(iter/s)": 0.385928 + }, + { + "epoch": 2.7072000000000003, + "grad_norm": 0.6901079493886968, + "learning_rate": 1.2909604996957093e-07, + "loss": 0.29986572265625, + "memory(GiB)": 77.0, + "step": 8460, + "token_acc": 0.948051948051948, + "train_speed(iter/s)": 0.385902 + }, + { + "epoch": 2.70752, + "grad_norm": 0.7753569817142296, + "learning_rate": 1.2881647431396722e-07, + "loss": 0.25270211696624756, + "memory(GiB)": 77.0, + "step": 8461, + "token_acc": 0.9591272280270436, + "train_speed(iter/s)": 0.385875 + }, + { + "epoch": 2.70784, + "grad_norm": 0.7506820484181727, + "learning_rate": 1.285371937110194e-07, + "loss": 0.2868890166282654, + "memory(GiB)": 77.0, + "step": 8462, + "token_acc": 0.8572723153602175, + "train_speed(iter/s)": 0.385849 + }, + { + "epoch": 2.70816, + "grad_norm": 0.7931082088577222, + "learning_rate": 1.2825820819547945e-07, + "loss": 0.42138800024986267, + "memory(GiB)": 77.0, + "step": 8463, + "token_acc": 0.8468197214904027, + "train_speed(iter/s)": 0.38582 + }, + { + "epoch": 2.7084799999999998, + "grad_norm": 0.643371045106599, + "learning_rate": 1.279795178020615e-07, + "loss": 0.24952921271324158, + "memory(GiB)": 77.0, + "step": 8464, + "token_acc": 0.9004761904761904, + "train_speed(iter/s)": 0.385795 + }, + { + "epoch": 2.7088, + "grad_norm": 0.8420494008241666, + "learning_rate": 1.2770112256544426e-07, + "loss": 0.29786112904548645, + "memory(GiB)": 77.0, + "step": 8465, + "token_acc": 0.9481850117096019, + "train_speed(iter/s)": 0.385768 + }, + { + "epoch": 2.70912, + "grad_norm": 0.671832359057465, + "learning_rate": 1.2742302252026912e-07, + "loss": 0.28152069449424744, + "memory(GiB)": 77.0, + "step": 8466, + "token_acc": 0.8710221285563752, + "train_speed(iter/s)": 0.385741 + }, + { + "epoch": 2.70944, + "grad_norm": 1.0094437414167574, + "learning_rate": 1.2714521770114064e-07, + "loss": 0.3693629205226898, + "memory(GiB)": 77.0, + "step": 8467, + "token_acc": 0.9049217002237137, + "train_speed(iter/s)": 0.385717 + }, + { + "epoch": 2.70976, + "grad_norm": 0.7145800691840831, + "learning_rate": 1.26867708142627e-07, + "loss": 0.30011630058288574, + "memory(GiB)": 77.0, + "step": 8468, + "token_acc": 0.8963393773520356, + "train_speed(iter/s)": 0.385682 + }, + { + "epoch": 2.71008, + "grad_norm": 0.7101488358418734, + "learning_rate": 1.265904938792595e-07, + "loss": 0.30675506591796875, + "memory(GiB)": 77.0, + "step": 8469, + "token_acc": 0.8907154433896736, + "train_speed(iter/s)": 0.385655 + }, + { + "epoch": 2.7104, + "grad_norm": 0.6470498466703879, + "learning_rate": 1.2631357494553215e-07, + "loss": 0.26501721143722534, + "memory(GiB)": 77.0, + "step": 8470, + "token_acc": 0.8841582018586557, + "train_speed(iter/s)": 0.385624 + }, + { + "epoch": 2.7107200000000002, + "grad_norm": 0.6853438487965913, + "learning_rate": 1.2603695137590355e-07, + "loss": 0.2250005006790161, + "memory(GiB)": 77.0, + "step": 8471, + "token_acc": 0.9121803499327052, + "train_speed(iter/s)": 0.385601 + }, + { + "epoch": 2.71104, + "grad_norm": 0.7138342724667142, + "learning_rate": 1.2576062320479392e-07, + "loss": 0.32733476161956787, + "memory(GiB)": 77.0, + "step": 8472, + "token_acc": 0.859331216140482, + "train_speed(iter/s)": 0.385577 + }, + { + "epoch": 2.71136, + "grad_norm": 0.8067103597558517, + "learning_rate": 1.25484590466588e-07, + "loss": 0.25296515226364136, + "memory(GiB)": 77.0, + "step": 8473, + "token_acc": 0.8932291666666666, + "train_speed(iter/s)": 0.385551 + }, + { + "epoch": 2.71168, + "grad_norm": 0.8290714728732572, + "learning_rate": 1.2520885319563276e-07, + "loss": 0.3563823103904724, + "memory(GiB)": 77.0, + "step": 8474, + "token_acc": 0.9277489925158319, + "train_speed(iter/s)": 0.385525 + }, + { + "epoch": 2.7119999999999997, + "grad_norm": 0.7452963874880312, + "learning_rate": 1.249334114262396e-07, + "loss": 0.31270208954811096, + "memory(GiB)": 77.0, + "step": 8475, + "token_acc": 0.897672935926044, + "train_speed(iter/s)": 0.385495 + }, + { + "epoch": 2.71232, + "grad_norm": 0.7630577668308471, + "learning_rate": 1.2465826519268198e-07, + "loss": 0.35136157274246216, + "memory(GiB)": 77.0, + "step": 8476, + "token_acc": 0.8992980561555075, + "train_speed(iter/s)": 0.385467 + }, + { + "epoch": 2.71264, + "grad_norm": 0.7115009249595341, + "learning_rate": 1.243834145291975e-07, + "loss": 0.31302133202552795, + "memory(GiB)": 77.0, + "step": 8477, + "token_acc": 0.8782207756983997, + "train_speed(iter/s)": 0.385441 + }, + { + "epoch": 2.71296, + "grad_norm": 0.7247190671289343, + "learning_rate": 1.241088594699866e-07, + "loss": 0.3695123791694641, + "memory(GiB)": 77.0, + "step": 8478, + "token_acc": 0.847015685475657, + "train_speed(iter/s)": 0.385412 + }, + { + "epoch": 2.71328, + "grad_norm": 0.6838680924147413, + "learning_rate": 1.2383460004921217e-07, + "loss": 0.29509758949279785, + "memory(GiB)": 77.0, + "step": 8479, + "token_acc": 0.8815139879319802, + "train_speed(iter/s)": 0.385381 + }, + { + "epoch": 2.7136, + "grad_norm": 0.647651564081862, + "learning_rate": 1.235606363010014e-07, + "loss": 0.25914615392684937, + "memory(GiB)": 77.0, + "step": 8480, + "token_acc": 0.8747563352826511, + "train_speed(iter/s)": 0.385352 + }, + { + "epoch": 2.71392, + "grad_norm": 0.7627252889705933, + "learning_rate": 1.2328696825944426e-07, + "loss": 0.37988215684890747, + "memory(GiB)": 77.0, + "step": 8481, + "token_acc": 0.8903726708074534, + "train_speed(iter/s)": 0.385325 + }, + { + "epoch": 2.71424, + "grad_norm": 0.7063998345309185, + "learning_rate": 1.230135959585943e-07, + "loss": 0.28691673278808594, + "memory(GiB)": 77.0, + "step": 8482, + "token_acc": 0.9521118381915527, + "train_speed(iter/s)": 0.385297 + }, + { + "epoch": 2.71456, + "grad_norm": 0.6956029693167688, + "learning_rate": 1.2274051943246795e-07, + "loss": 0.36774742603302, + "memory(GiB)": 77.0, + "step": 8483, + "token_acc": 0.9104215456674473, + "train_speed(iter/s)": 0.385267 + }, + { + "epoch": 2.71488, + "grad_norm": 0.758890329123868, + "learning_rate": 1.2246773871504442e-07, + "loss": 0.3061351776123047, + "memory(GiB)": 77.0, + "step": 8484, + "token_acc": 0.9472598027975235, + "train_speed(iter/s)": 0.385232 + }, + { + "epoch": 2.7152, + "grad_norm": 0.7521656462728464, + "learning_rate": 1.221952538402668e-07, + "loss": 0.2466621994972229, + "memory(GiB)": 77.0, + "step": 8485, + "token_acc": 0.9116365899191039, + "train_speed(iter/s)": 0.385206 + }, + { + "epoch": 2.71552, + "grad_norm": 0.6996347871642995, + "learning_rate": 1.2192306484204108e-07, + "loss": 0.3628804683685303, + "memory(GiB)": 77.0, + "step": 8486, + "token_acc": 0.8475810646783423, + "train_speed(iter/s)": 0.38517 + }, + { + "epoch": 2.71584, + "grad_norm": 0.6635026948282102, + "learning_rate": 1.216511717542365e-07, + "loss": 0.32938826084136963, + "memory(GiB)": 77.0, + "step": 8487, + "token_acc": 0.9209995969367191, + "train_speed(iter/s)": 0.385141 + }, + { + "epoch": 2.71616, + "grad_norm": 0.6781786398742128, + "learning_rate": 1.213795746106855e-07, + "loss": 0.311684250831604, + "memory(GiB)": 77.0, + "step": 8488, + "token_acc": 0.8861979778979544, + "train_speed(iter/s)": 0.385116 + }, + { + "epoch": 2.71648, + "grad_norm": 0.8153493464571945, + "learning_rate": 1.211082734451835e-07, + "loss": 0.32359689474105835, + "memory(GiB)": 77.0, + "step": 8489, + "token_acc": 0.896361631753032, + "train_speed(iter/s)": 0.385088 + }, + { + "epoch": 2.7168, + "grad_norm": 0.7670651590915016, + "learning_rate": 1.2083726829148906e-07, + "loss": 0.3271786570549011, + "memory(GiB)": 77.0, + "step": 8490, + "token_acc": 0.9508965517241379, + "train_speed(iter/s)": 0.385061 + }, + { + "epoch": 2.71712, + "grad_norm": 0.6642813976833158, + "learning_rate": 1.205665591833241e-07, + "loss": 0.3349185883998871, + "memory(GiB)": 77.0, + "step": 8491, + "token_acc": 0.9420157862780814, + "train_speed(iter/s)": 0.385034 + }, + { + "epoch": 2.71744, + "grad_norm": 0.7001785737748834, + "learning_rate": 1.2029614615437473e-07, + "loss": 0.2580801844596863, + "memory(GiB)": 77.0, + "step": 8492, + "token_acc": 0.9535508637236084, + "train_speed(iter/s)": 0.385009 + }, + { + "epoch": 2.71776, + "grad_norm": 0.7342999888777585, + "learning_rate": 1.2002602923828764e-07, + "loss": 0.3423873782157898, + "memory(GiB)": 77.0, + "step": 8493, + "token_acc": 0.920162932790224, + "train_speed(iter/s)": 0.38498 + }, + { + "epoch": 2.71808, + "grad_norm": 0.7007182585853029, + "learning_rate": 1.1975620846867486e-07, + "loss": 0.2447746992111206, + "memory(GiB)": 77.0, + "step": 8494, + "token_acc": 0.9186471663619744, + "train_speed(iter/s)": 0.384953 + }, + { + "epoch": 2.7184, + "grad_norm": 0.7139716506944207, + "learning_rate": 1.1948668387911117e-07, + "loss": 0.2981432378292084, + "memory(GiB)": 77.0, + "step": 8495, + "token_acc": 0.9447603574329814, + "train_speed(iter/s)": 0.384924 + }, + { + "epoch": 2.7187200000000002, + "grad_norm": 0.7566590824429634, + "learning_rate": 1.1921745550313364e-07, + "loss": 0.33526721596717834, + "memory(GiB)": 77.0, + "step": 8496, + "token_acc": 0.8457465584186374, + "train_speed(iter/s)": 0.3849 + }, + { + "epoch": 2.71904, + "grad_norm": 0.6806538165147564, + "learning_rate": 1.1894852337424351e-07, + "loss": 0.30852383375167847, + "memory(GiB)": 77.0, + "step": 8497, + "token_acc": 0.8964265773311, + "train_speed(iter/s)": 0.384876 + }, + { + "epoch": 2.71936, + "grad_norm": 0.7210994640600296, + "learning_rate": 1.1867988752590459e-07, + "loss": 0.3433394134044647, + "memory(GiB)": 77.0, + "step": 8498, + "token_acc": 0.9204961939667324, + "train_speed(iter/s)": 0.384846 + }, + { + "epoch": 2.71968, + "grad_norm": 0.6937631554287702, + "learning_rate": 1.1841154799154376e-07, + "loss": 0.3855738639831543, + "memory(GiB)": 77.0, + "step": 8499, + "token_acc": 0.8836646963997851, + "train_speed(iter/s)": 0.384818 + }, + { + "epoch": 2.7199999999999998, + "grad_norm": 0.7748932074197276, + "learning_rate": 1.181435048045515e-07, + "loss": 0.3930155336856842, + "memory(GiB)": 77.0, + "step": 8500, + "token_acc": 0.8640075376884422, + "train_speed(iter/s)": 0.384791 + }, + { + "epoch": 2.72032, + "grad_norm": 0.7825492722509223, + "learning_rate": 1.1787575799828116e-07, + "loss": 0.3709595799446106, + "memory(GiB)": 77.0, + "step": 8501, + "token_acc": 0.9077658303464755, + "train_speed(iter/s)": 0.384765 + }, + { + "epoch": 2.72064, + "grad_norm": 0.652313407674877, + "learning_rate": 1.1760830760604885e-07, + "loss": 0.24320557713508606, + "memory(GiB)": 77.0, + "step": 8502, + "token_acc": 0.9493738342659206, + "train_speed(iter/s)": 0.384735 + }, + { + "epoch": 2.72096, + "grad_norm": 0.7264489115748511, + "learning_rate": 1.1734115366113435e-07, + "loss": 0.35834771394729614, + "memory(GiB)": 77.0, + "step": 8503, + "token_acc": 0.8829249319331, + "train_speed(iter/s)": 0.384707 + }, + { + "epoch": 2.72128, + "grad_norm": 0.656176446217992, + "learning_rate": 1.1707429619678052e-07, + "loss": 0.2754318118095398, + "memory(GiB)": 77.0, + "step": 8504, + "token_acc": 0.9131979695431472, + "train_speed(iter/s)": 0.384679 + }, + { + "epoch": 2.7216, + "grad_norm": 0.7852287997580639, + "learning_rate": 1.1680773524619271e-07, + "loss": 0.3514186441898346, + "memory(GiB)": 77.0, + "step": 8505, + "token_acc": 0.8744578863273226, + "train_speed(iter/s)": 0.384642 + }, + { + "epoch": 2.72192, + "grad_norm": 0.757082248364925, + "learning_rate": 1.1654147084254025e-07, + "loss": 0.2881942689418793, + "memory(GiB)": 77.0, + "step": 8506, + "token_acc": 0.9033781329458772, + "train_speed(iter/s)": 0.384615 + }, + { + "epoch": 2.72224, + "grad_norm": 0.7023877761991995, + "learning_rate": 1.1627550301895468e-07, + "loss": 0.28210484981536865, + "memory(GiB)": 77.0, + "step": 8507, + "token_acc": 0.9161603888213852, + "train_speed(iter/s)": 0.384591 + }, + { + "epoch": 2.72256, + "grad_norm": 0.7139470084808536, + "learning_rate": 1.1600983180853148e-07, + "loss": 0.32891011238098145, + "memory(GiB)": 77.0, + "step": 8508, + "token_acc": 0.8483848149021457, + "train_speed(iter/s)": 0.384561 + }, + { + "epoch": 2.72288, + "grad_norm": 0.7124729418804915, + "learning_rate": 1.1574445724432865e-07, + "loss": 0.30478957295417786, + "memory(GiB)": 77.0, + "step": 8509, + "token_acc": 0.8888450926290895, + "train_speed(iter/s)": 0.384535 + }, + { + "epoch": 2.7232, + "grad_norm": 0.7768987151624566, + "learning_rate": 1.1547937935936754e-07, + "loss": 0.2692122161388397, + "memory(GiB)": 77.0, + "step": 8510, + "token_acc": 0.9201409277745156, + "train_speed(iter/s)": 0.38451 + }, + { + "epoch": 2.72352, + "grad_norm": 0.813169548189058, + "learning_rate": 1.1521459818663233e-07, + "loss": 0.31546664237976074, + "memory(GiB)": 77.0, + "step": 8511, + "token_acc": 0.9545823195458232, + "train_speed(iter/s)": 0.384486 + }, + { + "epoch": 2.72384, + "grad_norm": 0.721876479842747, + "learning_rate": 1.1495011375907055e-07, + "loss": 0.25787848234176636, + "memory(GiB)": 77.0, + "step": 8512, + "token_acc": 0.9164658634538153, + "train_speed(iter/s)": 0.384461 + }, + { + "epoch": 2.72416, + "grad_norm": 0.7289621288053931, + "learning_rate": 1.1468592610959284e-07, + "loss": 0.3306220471858978, + "memory(GiB)": 77.0, + "step": 8513, + "token_acc": 0.8645307330663417, + "train_speed(iter/s)": 0.384435 + }, + { + "epoch": 2.72448, + "grad_norm": 0.7351406431099277, + "learning_rate": 1.1442203527107231e-07, + "loss": 0.32770687341690063, + "memory(GiB)": 77.0, + "step": 8514, + "token_acc": 0.9480427046263346, + "train_speed(iter/s)": 0.384406 + }, + { + "epoch": 2.7248, + "grad_norm": 0.7585565089513601, + "learning_rate": 1.1415844127634607e-07, + "loss": 0.26917433738708496, + "memory(GiB)": 77.0, + "step": 8515, + "token_acc": 0.9453870625662778, + "train_speed(iter/s)": 0.38438 + }, + { + "epoch": 2.72512, + "grad_norm": 0.701850818404909, + "learning_rate": 1.1389514415821368e-07, + "loss": 0.25934040546417236, + "memory(GiB)": 77.0, + "step": 8516, + "token_acc": 0.9534754915535862, + "train_speed(iter/s)": 0.384353 + }, + { + "epoch": 2.72544, + "grad_norm": 0.7514991844421332, + "learning_rate": 1.1363214394943784e-07, + "loss": 0.2423456311225891, + "memory(GiB)": 77.0, + "step": 8517, + "token_acc": 0.9606408916753745, + "train_speed(iter/s)": 0.384328 + }, + { + "epoch": 2.72576, + "grad_norm": 0.6908799863739525, + "learning_rate": 1.133694406827443e-07, + "loss": 0.37480658292770386, + "memory(GiB)": 77.0, + "step": 8518, + "token_acc": 0.8705240692435381, + "train_speed(iter/s)": 0.384301 + }, + { + "epoch": 2.72608, + "grad_norm": 0.707776179163363, + "learning_rate": 1.1310703439082194e-07, + "loss": 0.2951931953430176, + "memory(GiB)": 77.0, + "step": 8519, + "token_acc": 0.9483438485804416, + "train_speed(iter/s)": 0.384272 + }, + { + "epoch": 2.7264, + "grad_norm": 0.6921411843194389, + "learning_rate": 1.1284492510632295e-07, + "loss": 0.25181397795677185, + "memory(GiB)": 77.0, + "step": 8520, + "token_acc": 0.8962347534028637, + "train_speed(iter/s)": 0.384246 + }, + { + "epoch": 2.7267200000000003, + "grad_norm": 0.7567730436779917, + "learning_rate": 1.1258311286186208e-07, + "loss": 0.261448472738266, + "memory(GiB)": 77.0, + "step": 8521, + "token_acc": 0.927038626609442, + "train_speed(iter/s)": 0.384218 + }, + { + "epoch": 2.72704, + "grad_norm": 0.7787317997712583, + "learning_rate": 1.1232159769001716e-07, + "loss": 0.23568770289421082, + "memory(GiB)": 77.0, + "step": 8522, + "token_acc": 0.9298100743187449, + "train_speed(iter/s)": 0.384196 + }, + { + "epoch": 2.72736, + "grad_norm": 0.7967711450918366, + "learning_rate": 1.1206037962332939e-07, + "loss": 0.3624156713485718, + "memory(GiB)": 77.0, + "step": 8523, + "token_acc": 0.917184265010352, + "train_speed(iter/s)": 0.384173 + }, + { + "epoch": 2.72768, + "grad_norm": 0.7313181461443442, + "learning_rate": 1.1179945869430303e-07, + "loss": 0.3194598853588104, + "memory(GiB)": 77.0, + "step": 8524, + "token_acc": 0.8505142310452045, + "train_speed(iter/s)": 0.38415 + }, + { + "epoch": 2.7279999999999998, + "grad_norm": 0.7145730936805131, + "learning_rate": 1.1153883493540463e-07, + "loss": 0.303696870803833, + "memory(GiB)": 77.0, + "step": 8525, + "token_acc": 0.9172432881262109, + "train_speed(iter/s)": 0.384126 + }, + { + "epoch": 2.72832, + "grad_norm": 0.7428175403089521, + "learning_rate": 1.112785083790649e-07, + "loss": 0.3061353266239166, + "memory(GiB)": 77.0, + "step": 8526, + "token_acc": 0.8976593625498008, + "train_speed(iter/s)": 0.384103 + }, + { + "epoch": 2.72864, + "grad_norm": 0.7019113191959082, + "learning_rate": 1.1101847905767654e-07, + "loss": 0.2516758441925049, + "memory(GiB)": 77.0, + "step": 8527, + "token_acc": 0.8743740474635314, + "train_speed(iter/s)": 0.384076 + }, + { + "epoch": 2.72896, + "grad_norm": 0.7329024874745294, + "learning_rate": 1.1075874700359591e-07, + "loss": 0.26124870777130127, + "memory(GiB)": 77.0, + "step": 8528, + "token_acc": 0.886378170965679, + "train_speed(iter/s)": 0.384044 + }, + { + "epoch": 2.72928, + "grad_norm": 0.6963294677323973, + "learning_rate": 1.1049931224914212e-07, + "loss": 0.23955625295639038, + "memory(GiB)": 77.0, + "step": 8529, + "token_acc": 0.9602150537634409, + "train_speed(iter/s)": 0.384019 + }, + { + "epoch": 2.7296, + "grad_norm": 0.7545486132613053, + "learning_rate": 1.1024017482659716e-07, + "loss": 0.3412248194217682, + "memory(GiB)": 77.0, + "step": 8530, + "token_acc": 0.9069037656903766, + "train_speed(iter/s)": 0.383993 + }, + { + "epoch": 2.72992, + "grad_norm": 0.7132804368926768, + "learning_rate": 1.0998133476820633e-07, + "loss": 0.3238963782787323, + "memory(GiB)": 77.0, + "step": 8531, + "token_acc": 0.837653920933247, + "train_speed(iter/s)": 0.383968 + }, + { + "epoch": 2.7302400000000002, + "grad_norm": 0.7068983579066946, + "learning_rate": 1.0972279210617776e-07, + "loss": 0.34455180168151855, + "memory(GiB)": 77.0, + "step": 8532, + "token_acc": 0.8595956798670729, + "train_speed(iter/s)": 0.383937 + }, + { + "epoch": 2.73056, + "grad_norm": 0.6986537096041697, + "learning_rate": 1.0946454687268238e-07, + "loss": 0.21986636519432068, + "memory(GiB)": 77.0, + "step": 8533, + "token_acc": 0.9159120310478654, + "train_speed(iter/s)": 0.383915 + }, + { + "epoch": 2.73088, + "grad_norm": 0.8165421969050938, + "learning_rate": 1.0920659909985476e-07, + "loss": 0.31522199511528015, + "memory(GiB)": 77.0, + "step": 8534, + "token_acc": 0.9366301450992005, + "train_speed(iter/s)": 0.383891 + }, + { + "epoch": 2.7312, + "grad_norm": 0.6730397565359546, + "learning_rate": 1.0894894881979229e-07, + "loss": 0.2572120428085327, + "memory(GiB)": 77.0, + "step": 8535, + "token_acc": 0.9242463282659108, + "train_speed(iter/s)": 0.383867 + }, + { + "epoch": 2.7315199999999997, + "grad_norm": 0.7439526236230646, + "learning_rate": 1.0869159606455404e-07, + "loss": 0.33357805013656616, + "memory(GiB)": 77.0, + "step": 8536, + "token_acc": 0.9086314152410575, + "train_speed(iter/s)": 0.383842 + }, + { + "epoch": 2.73184, + "grad_norm": 0.7426364665922026, + "learning_rate": 1.0843454086616356e-07, + "loss": 0.283153772354126, + "memory(GiB)": 77.0, + "step": 8537, + "token_acc": 0.9143846395971041, + "train_speed(iter/s)": 0.38382 + }, + { + "epoch": 2.73216, + "grad_norm": 0.7219314654700641, + "learning_rate": 1.0817778325660721e-07, + "loss": 0.35040581226348877, + "memory(GiB)": 77.0, + "step": 8538, + "token_acc": 0.8905833938513678, + "train_speed(iter/s)": 0.383795 + }, + { + "epoch": 2.73248, + "grad_norm": 0.7048476830638367, + "learning_rate": 1.0792132326783362e-07, + "loss": 0.32347339391708374, + "memory(GiB)": 77.0, + "step": 8539, + "token_acc": 0.8899486426999267, + "train_speed(iter/s)": 0.38377 + }, + { + "epoch": 2.7328, + "grad_norm": 0.6858255252321148, + "learning_rate": 1.0766516093175528e-07, + "loss": 0.24843862652778625, + "memory(GiB)": 77.0, + "step": 8540, + "token_acc": 0.9189303904923599, + "train_speed(iter/s)": 0.383745 + }, + { + "epoch": 2.73312, + "grad_norm": 0.7789200013395159, + "learning_rate": 1.0740929628024671e-07, + "loss": 0.360016405582428, + "memory(GiB)": 77.0, + "step": 8541, + "token_acc": 0.8566750629722922, + "train_speed(iter/s)": 0.383712 + }, + { + "epoch": 2.73344, + "grad_norm": 0.7043232742919359, + "learning_rate": 1.0715372934514606e-07, + "loss": 0.23390430212020874, + "memory(GiB)": 77.0, + "step": 8542, + "token_acc": 0.9589905362776026, + "train_speed(iter/s)": 0.38369 + }, + { + "epoch": 2.73376, + "grad_norm": 0.6789757862323972, + "learning_rate": 1.0689846015825401e-07, + "loss": 0.29797986149787903, + "memory(GiB)": 77.0, + "step": 8543, + "token_acc": 0.8681490761039774, + "train_speed(iter/s)": 0.383661 + }, + { + "epoch": 2.73408, + "grad_norm": 0.7243888376502696, + "learning_rate": 1.0664348875133456e-07, + "loss": 0.2758384346961975, + "memory(GiB)": 77.0, + "step": 8544, + "token_acc": 0.9198158914728682, + "train_speed(iter/s)": 0.383633 + }, + { + "epoch": 2.7344, + "grad_norm": 0.828972739637715, + "learning_rate": 1.0638881515611432e-07, + "loss": 0.3168466091156006, + "memory(GiB)": 77.0, + "step": 8545, + "token_acc": 0.8441253263707572, + "train_speed(iter/s)": 0.383608 + }, + { + "epoch": 2.7347200000000003, + "grad_norm": 0.7154634029239179, + "learning_rate": 1.061344394042832e-07, + "loss": 0.30286675691604614, + "memory(GiB)": 77.0, + "step": 8546, + "token_acc": 0.931464908148846, + "train_speed(iter/s)": 0.383583 + }, + { + "epoch": 2.73504, + "grad_norm": 0.7349248629043886, + "learning_rate": 1.0588036152749393e-07, + "loss": 0.3007190525531769, + "memory(GiB)": 77.0, + "step": 8547, + "token_acc": 0.9149305555555556, + "train_speed(iter/s)": 0.383556 + }, + { + "epoch": 2.73536, + "grad_norm": 0.7101391340480002, + "learning_rate": 1.056265815573615e-07, + "loss": 0.32806527614593506, + "memory(GiB)": 77.0, + "step": 8548, + "token_acc": 0.9231927710843374, + "train_speed(iter/s)": 0.383529 + }, + { + "epoch": 2.73568, + "grad_norm": 0.6505087925302417, + "learning_rate": 1.053730995254651e-07, + "loss": 0.2577624022960663, + "memory(GiB)": 77.0, + "step": 8549, + "token_acc": 0.8771532184950136, + "train_speed(iter/s)": 0.383505 + }, + { + "epoch": 2.7359999999999998, + "grad_norm": 0.7831968077129956, + "learning_rate": 1.0511991546334588e-07, + "loss": 0.39441365003585815, + "memory(GiB)": 77.0, + "step": 8550, + "token_acc": 0.85657104736491, + "train_speed(iter/s)": 0.383479 + }, + { + "epoch": 2.73632, + "grad_norm": 0.639303589471783, + "learning_rate": 1.0486702940250837e-07, + "loss": 0.2607225179672241, + "memory(GiB)": 77.0, + "step": 8551, + "token_acc": 0.9294889314020224, + "train_speed(iter/s)": 0.38345 + }, + { + "epoch": 2.73664, + "grad_norm": 0.7897835398167236, + "learning_rate": 1.0461444137441934e-07, + "loss": 0.3680303394794464, + "memory(GiB)": 77.0, + "step": 8552, + "token_acc": 0.9287510477787091, + "train_speed(iter/s)": 0.383424 + }, + { + "epoch": 2.73696, + "grad_norm": 0.8067500563878973, + "learning_rate": 1.0436215141050949e-07, + "loss": 0.30841028690338135, + "memory(GiB)": 77.0, + "step": 8553, + "token_acc": 0.8641933287950987, + "train_speed(iter/s)": 0.383399 + }, + { + "epoch": 2.73728, + "grad_norm": 0.6752875655692053, + "learning_rate": 1.0411015954217146e-07, + "loss": 0.3155326843261719, + "memory(GiB)": 77.0, + "step": 8554, + "token_acc": 0.9553027224705404, + "train_speed(iter/s)": 0.383373 + }, + { + "epoch": 2.7376, + "grad_norm": 0.6663004162213808, + "learning_rate": 1.0385846580076186e-07, + "loss": 0.3418082594871521, + "memory(GiB)": 77.0, + "step": 8555, + "token_acc": 0.9503956177723676, + "train_speed(iter/s)": 0.383346 + }, + { + "epoch": 2.73792, + "grad_norm": 0.7788935908118096, + "learning_rate": 1.0360707021759952e-07, + "loss": 0.27244821190834045, + "memory(GiB)": 77.0, + "step": 8556, + "token_acc": 0.8894695584008362, + "train_speed(iter/s)": 0.383324 + }, + { + "epoch": 2.7382400000000002, + "grad_norm": 0.6841678588671714, + "learning_rate": 1.0335597282396581e-07, + "loss": 0.23180001974105835, + "memory(GiB)": 77.0, + "step": 8557, + "token_acc": 0.8921610169491525, + "train_speed(iter/s)": 0.383299 + }, + { + "epoch": 2.73856, + "grad_norm": 0.8055378743932778, + "learning_rate": 1.0310517365110601e-07, + "loss": 0.32696571946144104, + "memory(GiB)": 77.0, + "step": 8558, + "token_acc": 0.9343065693430657, + "train_speed(iter/s)": 0.383275 + }, + { + "epoch": 2.73888, + "grad_norm": 0.6972272001811542, + "learning_rate": 1.0285467273022737e-07, + "loss": 0.25246763229370117, + "memory(GiB)": 77.0, + "step": 8559, + "token_acc": 0.9104660670482421, + "train_speed(iter/s)": 0.383252 + }, + { + "epoch": 2.7392, + "grad_norm": 0.659155322070469, + "learning_rate": 1.0260447009250024e-07, + "loss": 0.2936546206474304, + "memory(GiB)": 77.0, + "step": 8560, + "token_acc": 0.8830504533829342, + "train_speed(iter/s)": 0.383229 + }, + { + "epoch": 2.7395199999999997, + "grad_norm": 0.6824287873367383, + "learning_rate": 1.0235456576905834e-07, + "loss": 0.32846200466156006, + "memory(GiB)": 77.0, + "step": 8561, + "token_acc": 0.9454390451832907, + "train_speed(iter/s)": 0.383206 + }, + { + "epoch": 2.73984, + "grad_norm": 0.8287672643511508, + "learning_rate": 1.0210495979099788e-07, + "loss": 0.2969398498535156, + "memory(GiB)": 77.0, + "step": 8562, + "token_acc": 0.9212279866730129, + "train_speed(iter/s)": 0.38318 + }, + { + "epoch": 2.74016, + "grad_norm": 0.7148050517291675, + "learning_rate": 1.0185565218937792e-07, + "loss": 0.347622811794281, + "memory(GiB)": 77.0, + "step": 8563, + "token_acc": 0.87354507792464, + "train_speed(iter/s)": 0.383155 + }, + { + "epoch": 2.74048, + "grad_norm": 0.7772959480683498, + "learning_rate": 1.0160664299522061e-07, + "loss": 0.32979607582092285, + "memory(GiB)": 77.0, + "step": 8564, + "token_acc": 0.913556920170052, + "train_speed(iter/s)": 0.383131 + }, + { + "epoch": 2.7408, + "grad_norm": 0.6707143199582151, + "learning_rate": 1.0135793223951057e-07, + "loss": 0.290684312582016, + "memory(GiB)": 77.0, + "step": 8565, + "token_acc": 0.9177073326929235, + "train_speed(iter/s)": 0.3831 + }, + { + "epoch": 2.74112, + "grad_norm": 0.7333610217438166, + "learning_rate": 1.0110951995319529e-07, + "loss": 0.3280867636203766, + "memory(GiB)": 77.0, + "step": 8566, + "token_acc": 0.9173310952580781, + "train_speed(iter/s)": 0.383076 + }, + { + "epoch": 2.74144, + "grad_norm": 0.8062300201205002, + "learning_rate": 1.0086140616718615e-07, + "loss": 0.2592145800590515, + "memory(GiB)": 77.0, + "step": 8567, + "token_acc": 0.9026082130965594, + "train_speed(iter/s)": 0.383054 + }, + { + "epoch": 2.74176, + "grad_norm": 0.9070794969362619, + "learning_rate": 1.0061359091235595e-07, + "loss": 0.26530104875564575, + "memory(GiB)": 77.0, + "step": 8568, + "token_acc": 0.9413180860668071, + "train_speed(iter/s)": 0.38303 + }, + { + "epoch": 2.74208, + "grad_norm": 0.7361320960677747, + "learning_rate": 1.0036607421954114e-07, + "loss": 0.2534412145614624, + "memory(GiB)": 77.0, + "step": 8569, + "token_acc": 0.9453924914675768, + "train_speed(iter/s)": 0.383004 + }, + { + "epoch": 2.7424, + "grad_norm": 0.7170251077678276, + "learning_rate": 1.0011885611954125e-07, + "loss": 0.23498137295246124, + "memory(GiB)": 77.0, + "step": 8570, + "token_acc": 0.9244904777814902, + "train_speed(iter/s)": 0.382982 + }, + { + "epoch": 2.7427200000000003, + "grad_norm": 0.7505023816820144, + "learning_rate": 9.987193664311751e-08, + "loss": 0.3335076570510864, + "memory(GiB)": 77.0, + "step": 8571, + "token_acc": 0.9105158164719294, + "train_speed(iter/s)": 0.382955 + }, + { + "epoch": 2.74304, + "grad_norm": 0.7491978462219764, + "learning_rate": 9.96253158209956e-08, + "loss": 0.2806464433670044, + "memory(GiB)": 77.0, + "step": 8572, + "token_acc": 0.9589074208363548, + "train_speed(iter/s)": 0.382931 + }, + { + "epoch": 2.74336, + "grad_norm": 0.8416336419811835, + "learning_rate": 9.937899368386267e-08, + "loss": 0.32348090410232544, + "memory(GiB)": 77.0, + "step": 8573, + "token_acc": 0.9212184873949579, + "train_speed(iter/s)": 0.382907 + }, + { + "epoch": 2.74368, + "grad_norm": 0.7055457025881939, + "learning_rate": 9.913297026236973e-08, + "loss": 0.3160310387611389, + "memory(GiB)": 77.0, + "step": 8574, + "token_acc": 0.838472485768501, + "train_speed(iter/s)": 0.382884 + }, + { + "epoch": 2.7439999999999998, + "grad_norm": 0.7314429362492019, + "learning_rate": 9.888724558712954e-08, + "loss": 0.3782891035079956, + "memory(GiB)": 77.0, + "step": 8575, + "token_acc": 0.9119973632168754, + "train_speed(iter/s)": 0.38286 + }, + { + "epoch": 2.74432, + "grad_norm": 0.7068342543153446, + "learning_rate": 9.864181968871873e-08, + "loss": 0.3289150893688202, + "memory(GiB)": 77.0, + "step": 8576, + "token_acc": 0.8848823756266873, + "train_speed(iter/s)": 0.38283 + }, + { + "epoch": 2.74464, + "grad_norm": 0.6452915575602292, + "learning_rate": 9.83966925976762e-08, + "loss": 0.176749587059021, + "memory(GiB)": 77.0, + "step": 8577, + "token_acc": 0.929399727148704, + "train_speed(iter/s)": 0.382807 + }, + { + "epoch": 2.74496, + "grad_norm": 0.7006427065022569, + "learning_rate": 9.815186434450397e-08, + "loss": 0.28062039613723755, + "memory(GiB)": 77.0, + "step": 8578, + "token_acc": 0.8598688133571855, + "train_speed(iter/s)": 0.382782 + }, + { + "epoch": 2.74528, + "grad_norm": 0.6840211396071244, + "learning_rate": 9.790733495966597e-08, + "loss": 0.2645212709903717, + "memory(GiB)": 77.0, + "step": 8579, + "token_acc": 0.8988673139158576, + "train_speed(iter/s)": 0.382756 + }, + { + "epoch": 2.7456, + "grad_norm": 0.641916550095898, + "learning_rate": 9.766310447359012e-08, + "loss": 0.3419393301010132, + "memory(GiB)": 77.0, + "step": 8580, + "token_acc": 0.9511450381679389, + "train_speed(iter/s)": 0.38273 + }, + { + "epoch": 2.74592, + "grad_norm": 0.7492545666036657, + "learning_rate": 9.741917291666653e-08, + "loss": 0.2807430922985077, + "memory(GiB)": 77.0, + "step": 8581, + "token_acc": 0.9141616566466266, + "train_speed(iter/s)": 0.382703 + }, + { + "epoch": 2.7462400000000002, + "grad_norm": 0.7343735765071578, + "learning_rate": 9.717554031924842e-08, + "loss": 0.2638736665248871, + "memory(GiB)": 77.0, + "step": 8582, + "token_acc": 0.926530612244898, + "train_speed(iter/s)": 0.382678 + }, + { + "epoch": 2.74656, + "grad_norm": 0.7456025275655678, + "learning_rate": 9.693220671165126e-08, + "loss": 0.3661622703075409, + "memory(GiB)": 77.0, + "step": 8583, + "token_acc": 0.9473407056345445, + "train_speed(iter/s)": 0.382652 + }, + { + "epoch": 2.74688, + "grad_norm": 0.689616165746984, + "learning_rate": 9.668917212415419e-08, + "loss": 0.338050901889801, + "memory(GiB)": 77.0, + "step": 8584, + "token_acc": 0.8295088998498821, + "train_speed(iter/s)": 0.382623 + }, + { + "epoch": 2.7472, + "grad_norm": 0.7272143588420485, + "learning_rate": 9.644643658699854e-08, + "loss": 0.281160831451416, + "memory(GiB)": 77.0, + "step": 8585, + "token_acc": 0.9236504622725917, + "train_speed(iter/s)": 0.382594 + }, + { + "epoch": 2.7475199999999997, + "grad_norm": 0.666246564292385, + "learning_rate": 9.62040001303885e-08, + "loss": 0.2712739408016205, + "memory(GiB)": 77.0, + "step": 8586, + "token_acc": 0.9388145315487572, + "train_speed(iter/s)": 0.382569 + }, + { + "epoch": 2.74784, + "grad_norm": 0.7136452838221161, + "learning_rate": 9.596186278449077e-08, + "loss": 0.36241328716278076, + "memory(GiB)": 77.0, + "step": 8587, + "token_acc": 0.8737488626023658, + "train_speed(iter/s)": 0.382542 + }, + { + "epoch": 2.74816, + "grad_norm": 0.7561639581956318, + "learning_rate": 9.572002457943513e-08, + "loss": 0.25739574432373047, + "memory(GiB)": 77.0, + "step": 8588, + "token_acc": 0.9613003095975232, + "train_speed(iter/s)": 0.38252 + }, + { + "epoch": 2.74848, + "grad_norm": 0.7234652681122653, + "learning_rate": 9.547848554531474e-08, + "loss": 0.29728055000305176, + "memory(GiB)": 77.0, + "step": 8589, + "token_acc": 0.9325681492109039, + "train_speed(iter/s)": 0.382493 + }, + { + "epoch": 2.7488, + "grad_norm": 0.6957966539511312, + "learning_rate": 9.523724571218473e-08, + "loss": 0.3195066452026367, + "memory(GiB)": 77.0, + "step": 8590, + "token_acc": 0.8548387096774194, + "train_speed(iter/s)": 0.382471 + }, + { + "epoch": 2.74912, + "grad_norm": 0.8035208161718634, + "learning_rate": 9.499630511006303e-08, + "loss": 0.36882394552230835, + "memory(GiB)": 77.0, + "step": 8591, + "token_acc": 0.8242242242242243, + "train_speed(iter/s)": 0.382445 + }, + { + "epoch": 2.74944, + "grad_norm": 0.6762597638698444, + "learning_rate": 9.475566376893036e-08, + "loss": 0.22372321784496307, + "memory(GiB)": 77.0, + "step": 8592, + "token_acc": 0.9614636045153756, + "train_speed(iter/s)": 0.382424 + }, + { + "epoch": 2.74976, + "grad_norm": 0.6461186972872637, + "learning_rate": 9.451532171873113e-08, + "loss": 0.2855011224746704, + "memory(GiB)": 77.0, + "step": 8593, + "token_acc": 0.9479692062649323, + "train_speed(iter/s)": 0.382395 + }, + { + "epoch": 2.75008, + "grad_norm": 0.6631095420067681, + "learning_rate": 9.427527898937083e-08, + "loss": 0.2052839696407318, + "memory(GiB)": 77.0, + "step": 8594, + "token_acc": 0.9265569917743831, + "train_speed(iter/s)": 0.382371 + }, + { + "epoch": 2.7504, + "grad_norm": 0.800997192826819, + "learning_rate": 9.403553561071894e-08, + "loss": 0.24611707031726837, + "memory(GiB)": 77.0, + "step": 8595, + "token_acc": 0.9408307210031348, + "train_speed(iter/s)": 0.382344 + }, + { + "epoch": 2.7507200000000003, + "grad_norm": 0.7833710724616446, + "learning_rate": 9.379609161260738e-08, + "loss": 0.31036376953125, + "memory(GiB)": 77.0, + "step": 8596, + "token_acc": 0.8843660876328753, + "train_speed(iter/s)": 0.382323 + }, + { + "epoch": 2.75104, + "grad_norm": 0.7541350222289169, + "learning_rate": 9.355694702483098e-08, + "loss": 0.4220927059650421, + "memory(GiB)": 77.0, + "step": 8597, + "token_acc": 0.8660595290981786, + "train_speed(iter/s)": 0.382298 + }, + { + "epoch": 2.75136, + "grad_norm": 0.6982774338861808, + "learning_rate": 9.331810187714674e-08, + "loss": 0.2928312420845032, + "memory(GiB)": 77.0, + "step": 8598, + "token_acc": 0.8892487046632125, + "train_speed(iter/s)": 0.382272 + }, + { + "epoch": 2.75168, + "grad_norm": 0.7323498445678389, + "learning_rate": 9.307955619927505e-08, + "loss": 0.3219744563102722, + "memory(GiB)": 77.0, + "step": 8599, + "token_acc": 0.8437643810400368, + "train_speed(iter/s)": 0.382249 + }, + { + "epoch": 2.752, + "grad_norm": 0.7733078727581691, + "learning_rate": 9.284131002089886e-08, + "loss": 0.27356451749801636, + "memory(GiB)": 77.0, + "step": 8600, + "token_acc": 0.9325874928284567, + "train_speed(iter/s)": 0.382225 + }, + { + "epoch": 2.75232, + "grad_norm": 0.7560239278727733, + "learning_rate": 9.260336337166386e-08, + "loss": 0.34779924154281616, + "memory(GiB)": 77.0, + "step": 8601, + "token_acc": 0.8499031633311814, + "train_speed(iter/s)": 0.382199 + }, + { + "epoch": 2.75264, + "grad_norm": 0.7630413510495058, + "learning_rate": 9.236571628117808e-08, + "loss": 0.30635666847229004, + "memory(GiB)": 77.0, + "step": 8602, + "token_acc": 0.8725361366622865, + "train_speed(iter/s)": 0.382173 + }, + { + "epoch": 2.75296, + "grad_norm": 0.6908207269372576, + "learning_rate": 9.212836877901255e-08, + "loss": 0.2628541588783264, + "memory(GiB)": 77.0, + "step": 8603, + "token_acc": 0.9623549391452024, + "train_speed(iter/s)": 0.382151 + }, + { + "epoch": 2.75328, + "grad_norm": 0.712924737543602, + "learning_rate": 9.189132089470115e-08, + "loss": 0.32641616463661194, + "memory(GiB)": 77.0, + "step": 8604, + "token_acc": 0.8435326842837274, + "train_speed(iter/s)": 0.382116 + }, + { + "epoch": 2.7536, + "grad_norm": 0.7599675076863657, + "learning_rate": 9.16545726577403e-08, + "loss": 0.31462061405181885, + "memory(GiB)": 77.0, + "step": 8605, + "token_acc": 0.9356897398134512, + "train_speed(iter/s)": 0.382093 + }, + { + "epoch": 2.75392, + "grad_norm": 0.6966759227054896, + "learning_rate": 9.141812409758949e-08, + "loss": 0.19208115339279175, + "memory(GiB)": 77.0, + "step": 8606, + "token_acc": 0.942285929837797, + "train_speed(iter/s)": 0.382071 + }, + { + "epoch": 2.7542400000000002, + "grad_norm": 0.6272498126364688, + "learning_rate": 9.1181975243671e-08, + "loss": 0.2695957124233246, + "memory(GiB)": 77.0, + "step": 8607, + "token_acc": 0.8888688760806917, + "train_speed(iter/s)": 0.382045 + }, + { + "epoch": 2.75456, + "grad_norm": 0.7691192386538513, + "learning_rate": 9.094612612536857e-08, + "loss": 0.3624827265739441, + "memory(GiB)": 77.0, + "step": 8608, + "token_acc": 0.8893175789727514, + "train_speed(iter/s)": 0.382021 + }, + { + "epoch": 2.75488, + "grad_norm": 0.726768373818495, + "learning_rate": 9.071057677202983e-08, + "loss": 0.3442375659942627, + "memory(GiB)": 77.0, + "step": 8609, + "token_acc": 0.9470198675496688, + "train_speed(iter/s)": 0.381996 + }, + { + "epoch": 2.7552, + "grad_norm": 0.757721134062687, + "learning_rate": 9.047532721296493e-08, + "loss": 0.25843819975852966, + "memory(GiB)": 77.0, + "step": 8610, + "token_acc": 0.9373937677053824, + "train_speed(iter/s)": 0.381947 + }, + { + "epoch": 2.7555199999999997, + "grad_norm": 0.6776320836442805, + "learning_rate": 9.024037747744657e-08, + "loss": 0.2703903913497925, + "memory(GiB)": 77.0, + "step": 8611, + "token_acc": 0.9159814086521273, + "train_speed(iter/s)": 0.38192 + }, + { + "epoch": 2.75584, + "grad_norm": 0.7518771515233738, + "learning_rate": 9.000572759470999e-08, + "loss": 0.3433109223842621, + "memory(GiB)": 77.0, + "step": 8612, + "token_acc": 0.8882421420256111, + "train_speed(iter/s)": 0.381898 + }, + { + "epoch": 2.75616, + "grad_norm": 0.6780644276389532, + "learning_rate": 8.977137759395349e-08, + "loss": 0.3810996115207672, + "memory(GiB)": 77.0, + "step": 8613, + "token_acc": 0.8594905505341003, + "train_speed(iter/s)": 0.381871 + }, + { + "epoch": 2.75648, + "grad_norm": 0.9646644390700674, + "learning_rate": 8.953732750433818e-08, + "loss": 0.29363811016082764, + "memory(GiB)": 77.0, + "step": 8614, + "token_acc": 0.8880153738644304, + "train_speed(iter/s)": 0.381846 + }, + { + "epoch": 2.7568, + "grad_norm": 0.7893252925368461, + "learning_rate": 8.930357735498691e-08, + "loss": 0.31862741708755493, + "memory(GiB)": 77.0, + "step": 8615, + "token_acc": 0.9301953250080052, + "train_speed(iter/s)": 0.381824 + }, + { + "epoch": 2.75712, + "grad_norm": 0.7497103757412262, + "learning_rate": 8.907012717498609e-08, + "loss": 0.2878560721874237, + "memory(GiB)": 77.0, + "step": 8616, + "token_acc": 0.9077628186765969, + "train_speed(iter/s)": 0.381783 + }, + { + "epoch": 2.75744, + "grad_norm": 0.7855733976339315, + "learning_rate": 8.883697699338445e-08, + "loss": 0.3579648733139038, + "memory(GiB)": 77.0, + "step": 8617, + "token_acc": 0.8906955736224029, + "train_speed(iter/s)": 0.381759 + }, + { + "epoch": 2.75776, + "grad_norm": 0.6907994237483914, + "learning_rate": 8.860412683919379e-08, + "loss": 0.3643833100795746, + "memory(GiB)": 77.0, + "step": 8618, + "token_acc": 0.9319235225955967, + "train_speed(iter/s)": 0.381733 + }, + { + "epoch": 2.75808, + "grad_norm": 0.7063560245753956, + "learning_rate": 8.837157674138813e-08, + "loss": 0.25346192717552185, + "memory(GiB)": 77.0, + "step": 8619, + "token_acc": 0.915406162464986, + "train_speed(iter/s)": 0.381709 + }, + { + "epoch": 2.7584, + "grad_norm": 0.784470106090287, + "learning_rate": 8.813932672890408e-08, + "loss": 0.29300570487976074, + "memory(GiB)": 77.0, + "step": 8620, + "token_acc": 0.8384737678855326, + "train_speed(iter/s)": 0.381685 + }, + { + "epoch": 2.75872, + "grad_norm": 0.7931152019087033, + "learning_rate": 8.790737683064127e-08, + "loss": 0.3141534924507141, + "memory(GiB)": 77.0, + "step": 8621, + "token_acc": 0.8935108153078203, + "train_speed(iter/s)": 0.381662 + }, + { + "epoch": 2.75904, + "grad_norm": 0.7278326775772046, + "learning_rate": 8.76757270754619e-08, + "loss": 0.3618714213371277, + "memory(GiB)": 77.0, + "step": 8622, + "token_acc": 0.8583019414662417, + "train_speed(iter/s)": 0.381636 + }, + { + "epoch": 2.75936, + "grad_norm": 0.7564281313159664, + "learning_rate": 8.744437749219098e-08, + "loss": 0.4209253787994385, + "memory(GiB)": 77.0, + "step": 8623, + "token_acc": 0.9449388209121246, + "train_speed(iter/s)": 0.38161 + }, + { + "epoch": 2.75968, + "grad_norm": 0.6886142962574916, + "learning_rate": 8.721332810961547e-08, + "loss": 0.2975091338157654, + "memory(GiB)": 77.0, + "step": 8624, + "token_acc": 0.874323279195669, + "train_speed(iter/s)": 0.381585 + }, + { + "epoch": 2.76, + "grad_norm": 0.6568913599809741, + "learning_rate": 8.698257895648571e-08, + "loss": 0.36798179149627686, + "memory(GiB)": 77.0, + "step": 8625, + "token_acc": 0.8728461081402258, + "train_speed(iter/s)": 0.381556 + }, + { + "epoch": 2.76032, + "grad_norm": 0.6939215646776725, + "learning_rate": 8.675213006151429e-08, + "loss": 0.3515787124633789, + "memory(GiB)": 77.0, + "step": 8626, + "token_acc": 0.8449296062028158, + "train_speed(iter/s)": 0.381529 + }, + { + "epoch": 2.76064, + "grad_norm": 0.7458502842056829, + "learning_rate": 8.652198145337687e-08, + "loss": 0.3125171363353729, + "memory(GiB)": 77.0, + "step": 8627, + "token_acc": 0.8520012911555842, + "train_speed(iter/s)": 0.381505 + }, + { + "epoch": 2.76096, + "grad_norm": 0.649342569074369, + "learning_rate": 8.629213316071112e-08, + "loss": 0.2985667586326599, + "memory(GiB)": 77.0, + "step": 8628, + "token_acc": 0.901093675701379, + "train_speed(iter/s)": 0.38148 + }, + { + "epoch": 2.76128, + "grad_norm": 0.6404832494322974, + "learning_rate": 8.606258521211803e-08, + "loss": 0.26272910833358765, + "memory(GiB)": 77.0, + "step": 8629, + "token_acc": 0.9077717658632563, + "train_speed(iter/s)": 0.381448 + }, + { + "epoch": 2.7616, + "grad_norm": 0.8490189570197283, + "learning_rate": 8.583333763616031e-08, + "loss": 0.291023313999176, + "memory(GiB)": 77.0, + "step": 8630, + "token_acc": 0.9584915897992403, + "train_speed(iter/s)": 0.381424 + }, + { + "epoch": 2.76192, + "grad_norm": 0.742515882938702, + "learning_rate": 8.56043904613646e-08, + "loss": 0.29913192987442017, + "memory(GiB)": 77.0, + "step": 8631, + "token_acc": 0.8727366787377134, + "train_speed(iter/s)": 0.381397 + }, + { + "epoch": 2.7622400000000003, + "grad_norm": 0.726581141337341, + "learning_rate": 8.537574371621865e-08, + "loss": 0.30637702345848083, + "memory(GiB)": 77.0, + "step": 8632, + "token_acc": 0.8968717772430388, + "train_speed(iter/s)": 0.381376 + }, + { + "epoch": 2.76256, + "grad_norm": 0.7027780422417378, + "learning_rate": 8.514739742917416e-08, + "loss": 0.2940831780433655, + "memory(GiB)": 77.0, + "step": 8633, + "token_acc": 0.9251600196947316, + "train_speed(iter/s)": 0.381353 + }, + { + "epoch": 2.76288, + "grad_norm": 0.7489175146023107, + "learning_rate": 8.49193516286445e-08, + "loss": 0.3180577754974365, + "memory(GiB)": 77.0, + "step": 8634, + "token_acc": 0.9580888516345348, + "train_speed(iter/s)": 0.381329 + }, + { + "epoch": 2.7632, + "grad_norm": 0.7142115515510983, + "learning_rate": 8.469160634300617e-08, + "loss": 0.33989787101745605, + "memory(GiB)": 77.0, + "step": 8635, + "token_acc": 0.970336189848385, + "train_speed(iter/s)": 0.381307 + }, + { + "epoch": 2.7635199999999998, + "grad_norm": 0.7853038651976282, + "learning_rate": 8.446416160059812e-08, + "loss": 0.36887767910957336, + "memory(GiB)": 77.0, + "step": 8636, + "token_acc": 0.9192100538599641, + "train_speed(iter/s)": 0.381284 + }, + { + "epoch": 2.76384, + "grad_norm": 0.7706888388363191, + "learning_rate": 8.42370174297219e-08, + "loss": 0.34378135204315186, + "memory(GiB)": 77.0, + "step": 8637, + "token_acc": 0.915152908714647, + "train_speed(iter/s)": 0.381261 + }, + { + "epoch": 2.76416, + "grad_norm": 0.6719362931830221, + "learning_rate": 8.401017385864157e-08, + "loss": 0.27824652194976807, + "memory(GiB)": 77.0, + "step": 8638, + "token_acc": 0.9359016937894388, + "train_speed(iter/s)": 0.381235 + }, + { + "epoch": 2.76448, + "grad_norm": 0.7083631293706159, + "learning_rate": 8.3783630915584e-08, + "loss": 0.24577701091766357, + "memory(GiB)": 77.0, + "step": 8639, + "token_acc": 0.9680715197956578, + "train_speed(iter/s)": 0.381211 + }, + { + "epoch": 2.7648, + "grad_norm": 0.7328026482385327, + "learning_rate": 8.35573886287383e-08, + "loss": 0.2933014929294586, + "memory(GiB)": 77.0, + "step": 8640, + "token_acc": 0.9236413043478261, + "train_speed(iter/s)": 0.381186 + }, + { + "epoch": 2.76512, + "grad_norm": 0.6885311581565257, + "learning_rate": 8.333144702625695e-08, + "loss": 0.21517717838287354, + "memory(GiB)": 77.0, + "step": 8641, + "token_acc": 0.942394578313253, + "train_speed(iter/s)": 0.381163 + }, + { + "epoch": 2.76544, + "grad_norm": 0.7630445272375002, + "learning_rate": 8.310580613625385e-08, + "loss": 0.30215948820114136, + "memory(GiB)": 77.0, + "step": 8642, + "token_acc": 0.9062814914343299, + "train_speed(iter/s)": 0.381137 + }, + { + "epoch": 2.76576, + "grad_norm": 0.7995927452674704, + "learning_rate": 8.288046598680627e-08, + "loss": 0.33619171380996704, + "memory(GiB)": 77.0, + "step": 8643, + "token_acc": 0.9011709601873537, + "train_speed(iter/s)": 0.381115 + }, + { + "epoch": 2.76608, + "grad_norm": 0.7109476433364236, + "learning_rate": 8.2655426605954e-08, + "loss": 0.28763335943222046, + "memory(GiB)": 77.0, + "step": 8644, + "token_acc": 0.9247434435575826, + "train_speed(iter/s)": 0.38109 + }, + { + "epoch": 2.7664, + "grad_norm": 0.8190673160097077, + "learning_rate": 8.243068802169906e-08, + "loss": 0.42516541481018066, + "memory(GiB)": 77.0, + "step": 8645, + "token_acc": 0.8066049537152865, + "train_speed(iter/s)": 0.381065 + }, + { + "epoch": 2.76672, + "grad_norm": 0.8152730228290984, + "learning_rate": 8.220625026200662e-08, + "loss": 0.27573299407958984, + "memory(GiB)": 77.0, + "step": 8646, + "token_acc": 0.9014292755051749, + "train_speed(iter/s)": 0.381043 + }, + { + "epoch": 2.76704, + "grad_norm": 0.7689296321039973, + "learning_rate": 8.198211335480405e-08, + "loss": 0.2875119745731354, + "memory(GiB)": 77.0, + "step": 8647, + "token_acc": 0.9635083226632523, + "train_speed(iter/s)": 0.381021 + }, + { + "epoch": 2.76736, + "grad_norm": 0.7437035947801904, + "learning_rate": 8.175827732798097e-08, + "loss": 0.3150236904621124, + "memory(GiB)": 77.0, + "step": 8648, + "token_acc": 0.8954703832752613, + "train_speed(iter/s)": 0.380998 + }, + { + "epoch": 2.76768, + "grad_norm": 0.7302168922068728, + "learning_rate": 8.153474220938984e-08, + "loss": 0.2609986662864685, + "memory(GiB)": 77.0, + "step": 8649, + "token_acc": 0.9202551834130781, + "train_speed(iter/s)": 0.380975 + }, + { + "epoch": 2.768, + "grad_norm": 0.8361511729490964, + "learning_rate": 8.131150802684645e-08, + "loss": 0.26980653405189514, + "memory(GiB)": 77.0, + "step": 8650, + "token_acc": 0.9014619883040935, + "train_speed(iter/s)": 0.380951 + }, + { + "epoch": 2.76832, + "grad_norm": 0.7442086313575255, + "learning_rate": 8.108857480812749e-08, + "loss": 0.3388967514038086, + "memory(GiB)": 77.0, + "step": 8651, + "token_acc": 0.9278732726659926, + "train_speed(iter/s)": 0.380924 + }, + { + "epoch": 2.76864, + "grad_norm": 0.6965584786871739, + "learning_rate": 8.086594258097352e-08, + "loss": 0.23909007012844086, + "memory(GiB)": 77.0, + "step": 8652, + "token_acc": 0.9432624113475178, + "train_speed(iter/s)": 0.380902 + }, + { + "epoch": 2.76896, + "grad_norm": 0.7337449319329622, + "learning_rate": 8.064361137308741e-08, + "loss": 0.3026151657104492, + "memory(GiB)": 77.0, + "step": 8653, + "token_acc": 0.9207971384772611, + "train_speed(iter/s)": 0.380875 + }, + { + "epoch": 2.76928, + "grad_norm": 0.6822717443814484, + "learning_rate": 8.042158121213422e-08, + "loss": 0.31250980496406555, + "memory(GiB)": 77.0, + "step": 8654, + "token_acc": 0.9359286293592863, + "train_speed(iter/s)": 0.380847 + }, + { + "epoch": 2.7696, + "grad_norm": 0.7746215980471075, + "learning_rate": 8.019985212574216e-08, + "loss": 0.28735411167144775, + "memory(GiB)": 77.0, + "step": 8655, + "token_acc": 0.9497621878715814, + "train_speed(iter/s)": 0.380826 + }, + { + "epoch": 2.76992, + "grad_norm": 0.6513072698806552, + "learning_rate": 7.99784241415008e-08, + "loss": 0.21386593580245972, + "memory(GiB)": 77.0, + "step": 8656, + "token_acc": 0.9587471352177235, + "train_speed(iter/s)": 0.3808 + }, + { + "epoch": 2.7702400000000003, + "grad_norm": 0.6930496655345384, + "learning_rate": 7.975729728696368e-08, + "loss": 0.30433738231658936, + "memory(GiB)": 77.0, + "step": 8657, + "token_acc": 0.8919372900335947, + "train_speed(iter/s)": 0.380772 + }, + { + "epoch": 2.77056, + "grad_norm": 0.7372911746099707, + "learning_rate": 7.9536471589646e-08, + "loss": 0.31618520617485046, + "memory(GiB)": 77.0, + "step": 8658, + "token_acc": 0.9695678636640293, + "train_speed(iter/s)": 0.380749 + }, + { + "epoch": 2.77088, + "grad_norm": 0.7101487173860654, + "learning_rate": 7.931594707702577e-08, + "loss": 0.3523734211921692, + "memory(GiB)": 77.0, + "step": 8659, + "token_acc": 0.8687565308254963, + "train_speed(iter/s)": 0.380722 + }, + { + "epoch": 2.7712, + "grad_norm": 0.7853373459812892, + "learning_rate": 7.909572377654329e-08, + "loss": 0.2785452604293823, + "memory(GiB)": 77.0, + "step": 8660, + "token_acc": 0.9394852135815991, + "train_speed(iter/s)": 0.380699 + }, + { + "epoch": 2.7715199999999998, + "grad_norm": 0.7173201702394963, + "learning_rate": 7.887580171560132e-08, + "loss": 0.255728542804718, + "memory(GiB)": 77.0, + "step": 8661, + "token_acc": 0.9429530201342282, + "train_speed(iter/s)": 0.380675 + }, + { + "epoch": 2.77184, + "grad_norm": 0.6742283342922553, + "learning_rate": 7.865618092156579e-08, + "loss": 0.24322189390659332, + "memory(GiB)": 77.0, + "step": 8662, + "token_acc": 0.8992824873770927, + "train_speed(iter/s)": 0.380652 + }, + { + "epoch": 2.77216, + "grad_norm": 0.662988683528311, + "learning_rate": 7.843686142176455e-08, + "loss": 0.22598318755626678, + "memory(GiB)": 77.0, + "step": 8663, + "token_acc": 0.8896478121664888, + "train_speed(iter/s)": 0.380629 + }, + { + "epoch": 2.77248, + "grad_norm": 0.6873577472831539, + "learning_rate": 7.821784324348824e-08, + "loss": 0.33569782972335815, + "memory(GiB)": 77.0, + "step": 8664, + "token_acc": 0.8740157480314961, + "train_speed(iter/s)": 0.380606 + }, + { + "epoch": 2.7728, + "grad_norm": 0.8266350677526428, + "learning_rate": 7.799912641398954e-08, + "loss": 0.3396875560283661, + "memory(GiB)": 77.0, + "step": 8665, + "token_acc": 0.9494753206373883, + "train_speed(iter/s)": 0.380585 + }, + { + "epoch": 2.77312, + "grad_norm": 0.682038691795817, + "learning_rate": 7.77807109604839e-08, + "loss": 0.348958820104599, + "memory(GiB)": 77.0, + "step": 8666, + "token_acc": 0.8920437740313517, + "train_speed(iter/s)": 0.380561 + }, + { + "epoch": 2.77344, + "grad_norm": 0.7585982050607619, + "learning_rate": 7.756259691014984e-08, + "loss": 0.2939075231552124, + "memory(GiB)": 77.0, + "step": 8667, + "token_acc": 0.9509735744089013, + "train_speed(iter/s)": 0.380534 + }, + { + "epoch": 2.7737600000000002, + "grad_norm": 0.7483044199214307, + "learning_rate": 7.734478429012704e-08, + "loss": 0.28582271933555603, + "memory(GiB)": 77.0, + "step": 8668, + "token_acc": 0.9501140250855188, + "train_speed(iter/s)": 0.380513 + }, + { + "epoch": 2.77408, + "grad_norm": 0.7382183233101639, + "learning_rate": 7.712727312751939e-08, + "loss": 0.2865055799484253, + "memory(GiB)": 77.0, + "step": 8669, + "token_acc": 0.9056603773584906, + "train_speed(iter/s)": 0.38049 + }, + { + "epoch": 2.7744, + "grad_norm": 0.7370150324475808, + "learning_rate": 7.691006344939217e-08, + "loss": 0.36121296882629395, + "memory(GiB)": 77.0, + "step": 8670, + "token_acc": 0.9035738368172623, + "train_speed(iter/s)": 0.380464 + }, + { + "epoch": 2.77472, + "grad_norm": 0.7622573950940549, + "learning_rate": 7.669315528277322e-08, + "loss": 0.3249049782752991, + "memory(GiB)": 77.0, + "step": 8671, + "token_acc": 0.8932140978432404, + "train_speed(iter/s)": 0.380439 + }, + { + "epoch": 2.7750399999999997, + "grad_norm": 0.6927115929113774, + "learning_rate": 7.647654865465287e-08, + "loss": 0.31167855858802795, + "memory(GiB)": 77.0, + "step": 8672, + "token_acc": 0.9294496487119438, + "train_speed(iter/s)": 0.380411 + }, + { + "epoch": 2.77536, + "grad_norm": 0.7344683824714663, + "learning_rate": 7.62602435919843e-08, + "loss": 0.24603800475597382, + "memory(GiB)": 77.0, + "step": 8673, + "token_acc": 0.9085825431558473, + "train_speed(iter/s)": 0.380388 + }, + { + "epoch": 2.77568, + "grad_norm": 0.7602388360079594, + "learning_rate": 7.604424012168266e-08, + "loss": 0.2740634083747864, + "memory(GiB)": 77.0, + "step": 8674, + "token_acc": 0.8450130851991858, + "train_speed(iter/s)": 0.380366 + }, + { + "epoch": 2.776, + "grad_norm": 0.6067256951912177, + "learning_rate": 7.582853827062614e-08, + "loss": 0.22890213131904602, + "memory(GiB)": 77.0, + "step": 8675, + "token_acc": 0.9055710306406686, + "train_speed(iter/s)": 0.380339 + }, + { + "epoch": 2.77632, + "grad_norm": 0.6850397245892667, + "learning_rate": 7.561313806565495e-08, + "loss": 0.3058319091796875, + "memory(GiB)": 77.0, + "step": 8676, + "token_acc": 0.8851534767174776, + "train_speed(iter/s)": 0.38031 + }, + { + "epoch": 2.77664, + "grad_norm": 0.9037727713181803, + "learning_rate": 7.539803953357183e-08, + "loss": 0.2971982955932617, + "memory(GiB)": 77.0, + "step": 8677, + "token_acc": 0.942294402211472, + "train_speed(iter/s)": 0.380286 + }, + { + "epoch": 2.77696, + "grad_norm": 0.77405018507981, + "learning_rate": 7.51832427011423e-08, + "loss": 0.3326225280761719, + "memory(GiB)": 77.0, + "step": 8678, + "token_acc": 0.8890274314214464, + "train_speed(iter/s)": 0.380265 + }, + { + "epoch": 2.77728, + "grad_norm": 0.6791654691183343, + "learning_rate": 7.496874759509387e-08, + "loss": 0.289174348115921, + "memory(GiB)": 77.0, + "step": 8679, + "token_acc": 0.9572049481778669, + "train_speed(iter/s)": 0.380242 + }, + { + "epoch": 2.7776, + "grad_norm": 0.7145802620499907, + "learning_rate": 7.475455424211686e-08, + "loss": 0.26380443572998047, + "memory(GiB)": 77.0, + "step": 8680, + "token_acc": 0.9290805416963649, + "train_speed(iter/s)": 0.380218 + }, + { + "epoch": 2.77792, + "grad_norm": 0.7277094641972243, + "learning_rate": 7.454066266886384e-08, + "loss": 0.3661805987358093, + "memory(GiB)": 77.0, + "step": 8681, + "token_acc": 0.895549266786725, + "train_speed(iter/s)": 0.38019 + }, + { + "epoch": 2.7782400000000003, + "grad_norm": 0.7594293317884678, + "learning_rate": 7.432707290194962e-08, + "loss": 0.2763228118419647, + "memory(GiB)": 77.0, + "step": 8682, + "token_acc": 0.9354587869362364, + "train_speed(iter/s)": 0.380167 + }, + { + "epoch": 2.77856, + "grad_norm": 0.6983615587738706, + "learning_rate": 7.41137849679524e-08, + "loss": 0.4269765615463257, + "memory(GiB)": 77.0, + "step": 8683, + "token_acc": 0.9118150684931506, + "train_speed(iter/s)": 0.380141 + }, + { + "epoch": 2.77888, + "grad_norm": 0.7198069005607981, + "learning_rate": 7.390079889341123e-08, + "loss": 0.3267989456653595, + "memory(GiB)": 77.0, + "step": 8684, + "token_acc": 0.9609494640122511, + "train_speed(iter/s)": 0.380117 + }, + { + "epoch": 2.7792, + "grad_norm": 0.7680930922916787, + "learning_rate": 7.368811470482962e-08, + "loss": 0.3415033221244812, + "memory(GiB)": 77.0, + "step": 8685, + "token_acc": 0.9374207188160677, + "train_speed(iter/s)": 0.380091 + }, + { + "epoch": 2.7795199999999998, + "grad_norm": 0.777405523548366, + "learning_rate": 7.347573242867167e-08, + "loss": 0.33557194471359253, + "memory(GiB)": 77.0, + "step": 8686, + "token_acc": 0.8822525597269625, + "train_speed(iter/s)": 0.380068 + }, + { + "epoch": 2.77984, + "grad_norm": 0.7355339174327945, + "learning_rate": 7.326365209136515e-08, + "loss": 0.305484414100647, + "memory(GiB)": 77.0, + "step": 8687, + "token_acc": 0.8937360178970917, + "train_speed(iter/s)": 0.380044 + }, + { + "epoch": 2.78016, + "grad_norm": 0.6136081222564975, + "learning_rate": 7.305187371929923e-08, + "loss": 0.22212108969688416, + "memory(GiB)": 77.0, + "step": 8688, + "token_acc": 0.9438463776549316, + "train_speed(iter/s)": 0.38002 + }, + { + "epoch": 2.78048, + "grad_norm": 0.9080490298863813, + "learning_rate": 7.28403973388267e-08, + "loss": 0.3088254928588867, + "memory(GiB)": 77.0, + "step": 8689, + "token_acc": 0.9130258899676376, + "train_speed(iter/s)": 0.379999 + }, + { + "epoch": 2.7808, + "grad_norm": 0.7073367898979405, + "learning_rate": 7.262922297626151e-08, + "loss": 0.28461527824401855, + "memory(GiB)": 77.0, + "step": 8690, + "token_acc": 0.9175150633700395, + "train_speed(iter/s)": 0.379975 + }, + { + "epoch": 2.78112, + "grad_norm": 0.7085132611713423, + "learning_rate": 7.241835065788127e-08, + "loss": 0.3210218548774719, + "memory(GiB)": 77.0, + "step": 8691, + "token_acc": 0.9471484025082114, + "train_speed(iter/s)": 0.379951 + }, + { + "epoch": 2.78144, + "grad_norm": 0.7666877389161125, + "learning_rate": 7.220778040992471e-08, + "loss": 0.3626354932785034, + "memory(GiB)": 77.0, + "step": 8692, + "token_acc": 0.8839479392624728, + "train_speed(iter/s)": 0.379928 + }, + { + "epoch": 2.7817600000000002, + "grad_norm": 0.8020101788455289, + "learning_rate": 7.199751225859447e-08, + "loss": 0.2649807333946228, + "memory(GiB)": 77.0, + "step": 8693, + "token_acc": 0.9332559489262914, + "train_speed(iter/s)": 0.379906 + }, + { + "epoch": 2.78208, + "grad_norm": 0.7416840622310137, + "learning_rate": 7.178754623005407e-08, + "loss": 0.36415427923202515, + "memory(GiB)": 77.0, + "step": 8694, + "token_acc": 0.900592325521504, + "train_speed(iter/s)": 0.379877 + }, + { + "epoch": 2.7824, + "grad_norm": 0.7161311933807663, + "learning_rate": 7.157788235043012e-08, + "loss": 0.2786153554916382, + "memory(GiB)": 77.0, + "step": 8695, + "token_acc": 0.9135554998747181, + "train_speed(iter/s)": 0.379839 + }, + { + "epoch": 2.78272, + "grad_norm": 0.7451304272163749, + "learning_rate": 7.136852064581201e-08, + "loss": 0.29388779401779175, + "memory(GiB)": 77.0, + "step": 8696, + "token_acc": 0.9129501550202719, + "train_speed(iter/s)": 0.379816 + }, + { + "epoch": 2.7830399999999997, + "grad_norm": 0.6810405501354371, + "learning_rate": 7.115946114225086e-08, + "loss": 0.22588559985160828, + "memory(GiB)": 77.0, + "step": 8697, + "token_acc": 0.9576045063741476, + "train_speed(iter/s)": 0.379791 + }, + { + "epoch": 2.78336, + "grad_norm": 0.6502247615079337, + "learning_rate": 7.095070386576086e-08, + "loss": 0.35051512718200684, + "memory(GiB)": 77.0, + "step": 8698, + "token_acc": 0.8688036433701174, + "train_speed(iter/s)": 0.379763 + }, + { + "epoch": 2.78368, + "grad_norm": 0.6725462905227666, + "learning_rate": 7.074224884231818e-08, + "loss": 0.320797324180603, + "memory(GiB)": 77.0, + "step": 8699, + "token_acc": 0.9085165529441496, + "train_speed(iter/s)": 0.379738 + }, + { + "epoch": 2.784, + "grad_norm": 0.7051641185510338, + "learning_rate": 7.053409609786122e-08, + "loss": 0.33237144351005554, + "memory(GiB)": 77.0, + "step": 8700, + "token_acc": 0.9176192353173039, + "train_speed(iter/s)": 0.379712 + }, + { + "epoch": 2.78432, + "grad_norm": 0.7440182288585918, + "learning_rate": 7.032624565829065e-08, + "loss": 0.24385058879852295, + "memory(GiB)": 77.0, + "step": 8701, + "token_acc": 0.9341670599339311, + "train_speed(iter/s)": 0.379688 + }, + { + "epoch": 2.78464, + "grad_norm": 0.7677315984951127, + "learning_rate": 7.011869754947054e-08, + "loss": 0.3238465189933777, + "memory(GiB)": 77.0, + "step": 8702, + "token_acc": 0.8458868448455398, + "train_speed(iter/s)": 0.379665 + }, + { + "epoch": 2.78496, + "grad_norm": 0.7392084702848469, + "learning_rate": 6.991145179722658e-08, + "loss": 0.3062781095504761, + "memory(GiB)": 77.0, + "step": 8703, + "token_acc": 0.9179304192685103, + "train_speed(iter/s)": 0.379639 + }, + { + "epoch": 2.78528, + "grad_norm": 0.774840873374646, + "learning_rate": 6.97045084273465e-08, + "loss": 0.3060630261898041, + "memory(GiB)": 77.0, + "step": 8704, + "token_acc": 0.9334489486234554, + "train_speed(iter/s)": 0.379613 + }, + { + "epoch": 2.7856, + "grad_norm": 0.7608190826000888, + "learning_rate": 6.949786746558135e-08, + "loss": 0.30677109956741333, + "memory(GiB)": 77.0, + "step": 8705, + "token_acc": 0.8589478044739023, + "train_speed(iter/s)": 0.379588 + }, + { + "epoch": 2.78592, + "grad_norm": 0.7228148720638613, + "learning_rate": 6.929152893764335e-08, + "loss": 0.2755729854106903, + "memory(GiB)": 77.0, + "step": 8706, + "token_acc": 0.881686149486362, + "train_speed(iter/s)": 0.379565 + }, + { + "epoch": 2.7862400000000003, + "grad_norm": 0.6588596096453484, + "learning_rate": 6.908549286920807e-08, + "loss": 0.19982479512691498, + "memory(GiB)": 77.0, + "step": 8707, + "token_acc": 0.9171369171369171, + "train_speed(iter/s)": 0.379544 + }, + { + "epoch": 2.78656, + "grad_norm": 0.7837705545419116, + "learning_rate": 6.887975928591362e-08, + "loss": 0.29861414432525635, + "memory(GiB)": 77.0, + "step": 8708, + "token_acc": 0.9046563192904656, + "train_speed(iter/s)": 0.379523 + }, + { + "epoch": 2.78688, + "grad_norm": 0.7680263507545877, + "learning_rate": 6.867432821335923e-08, + "loss": 0.28119146823883057, + "memory(GiB)": 77.0, + "step": 8709, + "token_acc": 0.9076393237319975, + "train_speed(iter/s)": 0.379497 + }, + { + "epoch": 2.7872, + "grad_norm": 0.7103567687804725, + "learning_rate": 6.846919967710724e-08, + "loss": 0.27477532625198364, + "memory(GiB)": 77.0, + "step": 8710, + "token_acc": 0.9623980134799575, + "train_speed(iter/s)": 0.379475 + }, + { + "epoch": 2.7875199999999998, + "grad_norm": 0.6921289497142357, + "learning_rate": 6.82643737026828e-08, + "loss": 0.29716068506240845, + "memory(GiB)": 77.0, + "step": 8711, + "token_acc": 0.8741484403011832, + "train_speed(iter/s)": 0.379449 + }, + { + "epoch": 2.78784, + "grad_norm": 0.6889304383514303, + "learning_rate": 6.805985031557271e-08, + "loss": 0.23579086363315582, + "memory(GiB)": 77.0, + "step": 8712, + "token_acc": 0.9329680953915566, + "train_speed(iter/s)": 0.379428 + }, + { + "epoch": 2.78816, + "grad_norm": 0.7286793958430707, + "learning_rate": 6.785562954122638e-08, + "loss": 0.28407180309295654, + "memory(GiB)": 77.0, + "step": 8713, + "token_acc": 0.920125786163522, + "train_speed(iter/s)": 0.379405 + }, + { + "epoch": 2.78848, + "grad_norm": 0.7702936317194028, + "learning_rate": 6.765171140505567e-08, + "loss": 0.33073702454566956, + "memory(GiB)": 77.0, + "step": 8714, + "token_acc": 0.9318681318681319, + "train_speed(iter/s)": 0.379379 + }, + { + "epoch": 2.7888, + "grad_norm": 0.7369575153232015, + "learning_rate": 6.744809593243446e-08, + "loss": 0.296356201171875, + "memory(GiB)": 77.0, + "step": 8715, + "token_acc": 0.9043940125543216, + "train_speed(iter/s)": 0.379356 + }, + { + "epoch": 2.78912, + "grad_norm": 0.7599551750530595, + "learning_rate": 6.724478314869915e-08, + "loss": 0.25625550746917725, + "memory(GiB)": 77.0, + "step": 8716, + "token_acc": 0.9261102603369066, + "train_speed(iter/s)": 0.379332 + }, + { + "epoch": 2.78944, + "grad_norm": 0.7460448735298448, + "learning_rate": 6.704177307914866e-08, + "loss": 0.3702077865600586, + "memory(GiB)": 77.0, + "step": 8717, + "token_acc": 0.8801652892561983, + "train_speed(iter/s)": 0.379309 + }, + { + "epoch": 2.7897600000000002, + "grad_norm": 0.7998276449415502, + "learning_rate": 6.683906574904364e-08, + "loss": 0.3480842709541321, + "memory(GiB)": 77.0, + "step": 8718, + "token_acc": 0.9081893793985925, + "train_speed(iter/s)": 0.379284 + }, + { + "epoch": 2.79008, + "grad_norm": 0.7236944613921972, + "learning_rate": 6.663666118360835e-08, + "loss": 0.31701308488845825, + "memory(GiB)": 77.0, + "step": 8719, + "token_acc": 0.8779559662951889, + "train_speed(iter/s)": 0.379258 + }, + { + "epoch": 2.7904, + "grad_norm": 0.7222036824714638, + "learning_rate": 6.643455940802762e-08, + "loss": 0.2805303931236267, + "memory(GiB)": 77.0, + "step": 8720, + "token_acc": 0.9538167938931298, + "train_speed(iter/s)": 0.379236 + }, + { + "epoch": 2.79072, + "grad_norm": 0.6947622794071818, + "learning_rate": 6.623276044745025e-08, + "loss": 0.41840532422065735, + "memory(GiB)": 77.0, + "step": 8721, + "token_acc": 0.8832895888013998, + "train_speed(iter/s)": 0.379213 + }, + { + "epoch": 2.7910399999999997, + "grad_norm": 0.7328354904383079, + "learning_rate": 6.603126432698642e-08, + "loss": 0.31828010082244873, + "memory(GiB)": 77.0, + "step": 8722, + "token_acc": 0.9398158544801257, + "train_speed(iter/s)": 0.379186 + }, + { + "epoch": 2.79136, + "grad_norm": 0.7556061965758765, + "learning_rate": 6.583007107170857e-08, + "loss": 0.3508182168006897, + "memory(GiB)": 77.0, + "step": 8723, + "token_acc": 0.9065155807365439, + "train_speed(iter/s)": 0.379162 + }, + { + "epoch": 2.79168, + "grad_norm": 0.7048089168345689, + "learning_rate": 6.562918070665169e-08, + "loss": 0.26101210713386536, + "memory(GiB)": 77.0, + "step": 8724, + "token_acc": 0.8957621326042379, + "train_speed(iter/s)": 0.379136 + }, + { + "epoch": 2.792, + "grad_norm": 0.7426297372564337, + "learning_rate": 6.542859325681327e-08, + "loss": 0.3040131628513336, + "memory(GiB)": 77.0, + "step": 8725, + "token_acc": 0.9464381360471344, + "train_speed(iter/s)": 0.379112 + }, + { + "epoch": 2.79232, + "grad_norm": 0.747184183795214, + "learning_rate": 6.522830874715308e-08, + "loss": 0.38658854365348816, + "memory(GiB)": 77.0, + "step": 8726, + "token_acc": 0.8901190146692499, + "train_speed(iter/s)": 0.379087 + }, + { + "epoch": 2.79264, + "grad_norm": 0.7369198571161298, + "learning_rate": 6.502832720259284e-08, + "loss": 0.3179304599761963, + "memory(GiB)": 77.0, + "step": 8727, + "token_acc": 0.9154334038054969, + "train_speed(iter/s)": 0.379059 + }, + { + "epoch": 2.79296, + "grad_norm": 0.7051204941668184, + "learning_rate": 6.48286486480168e-08, + "loss": 0.234177827835083, + "memory(GiB)": 77.0, + "step": 8728, + "token_acc": 0.9331958762886597, + "train_speed(iter/s)": 0.379039 + }, + { + "epoch": 2.79328, + "grad_norm": 0.7290917390804146, + "learning_rate": 6.46292731082715e-08, + "loss": 0.3497750759124756, + "memory(GiB)": 77.0, + "step": 8729, + "token_acc": 0.9480275516593613, + "train_speed(iter/s)": 0.379013 + }, + { + "epoch": 2.7936, + "grad_norm": 0.638845392856349, + "learning_rate": 6.443020060816569e-08, + "loss": 0.2933509349822998, + "memory(GiB)": 77.0, + "step": 8730, + "token_acc": 0.8957070707070707, + "train_speed(iter/s)": 0.378976 + }, + { + "epoch": 2.79392, + "grad_norm": 0.6894654285795593, + "learning_rate": 6.423143117247065e-08, + "loss": 0.28776103258132935, + "memory(GiB)": 77.0, + "step": 8731, + "token_acc": 0.9521875, + "train_speed(iter/s)": 0.378951 + }, + { + "epoch": 2.79424, + "grad_norm": 0.766562341193291, + "learning_rate": 6.40329648259197e-08, + "loss": 0.3039444088935852, + "memory(GiB)": 77.0, + "step": 8732, + "token_acc": 0.9491571506253399, + "train_speed(iter/s)": 0.378926 + }, + { + "epoch": 2.79456, + "grad_norm": 0.6848103858816307, + "learning_rate": 6.383480159320888e-08, + "loss": 0.2720380127429962, + "memory(GiB)": 77.0, + "step": 8733, + "token_acc": 0.9269216205982582, + "train_speed(iter/s)": 0.378884 + }, + { + "epoch": 2.79488, + "grad_norm": 0.6982990687055493, + "learning_rate": 6.363694149899541e-08, + "loss": 0.2699205279350281, + "memory(GiB)": 77.0, + "step": 8734, + "token_acc": 0.9019387468389998, + "train_speed(iter/s)": 0.378863 + }, + { + "epoch": 2.7952, + "grad_norm": 0.7120937684263189, + "learning_rate": 6.343938456790017e-08, + "loss": 0.26295486092567444, + "memory(GiB)": 77.0, + "step": 8735, + "token_acc": 0.8992015968063872, + "train_speed(iter/s)": 0.37884 + }, + { + "epoch": 2.79552, + "grad_norm": 0.6704869558045484, + "learning_rate": 6.32421308245057e-08, + "loss": 0.28210026025772095, + "memory(GiB)": 77.0, + "step": 8736, + "token_acc": 0.943610399121201, + "train_speed(iter/s)": 0.378812 + }, + { + "epoch": 2.79584, + "grad_norm": 0.6904987911522615, + "learning_rate": 6.304518029335627e-08, + "loss": 0.29150426387786865, + "memory(GiB)": 77.0, + "step": 8737, + "token_acc": 0.9167327517842981, + "train_speed(iter/s)": 0.378788 + }, + { + "epoch": 2.79616, + "grad_norm": 0.7710034648636523, + "learning_rate": 6.284853299895948e-08, + "loss": 0.31502774357795715, + "memory(GiB)": 77.0, + "step": 8738, + "token_acc": 0.9437125748502994, + "train_speed(iter/s)": 0.378767 + }, + { + "epoch": 2.79648, + "grad_norm": 0.7809108679353743, + "learning_rate": 6.265218896578468e-08, + "loss": 0.3135162889957428, + "memory(GiB)": 77.0, + "step": 8739, + "token_acc": 0.8870041039671682, + "train_speed(iter/s)": 0.378743 + }, + { + "epoch": 2.7968, + "grad_norm": 0.738880145362511, + "learning_rate": 6.245614821826313e-08, + "loss": 0.4287949502468109, + "memory(GiB)": 77.0, + "step": 8740, + "token_acc": 0.8020176544766708, + "train_speed(iter/s)": 0.378715 + }, + { + "epoch": 2.79712, + "grad_norm": 0.8210749523699966, + "learning_rate": 6.226041078078893e-08, + "loss": 0.3065287470817566, + "memory(GiB)": 77.0, + "step": 8741, + "token_acc": 0.9652398735995403, + "train_speed(iter/s)": 0.378691 + }, + { + "epoch": 2.79744, + "grad_norm": 0.7380922182004557, + "learning_rate": 6.206497667771788e-08, + "loss": 0.2558039724826813, + "memory(GiB)": 77.0, + "step": 8742, + "token_acc": 0.8864658273381295, + "train_speed(iter/s)": 0.37867 + }, + { + "epoch": 2.7977600000000002, + "grad_norm": 0.736675635959072, + "learning_rate": 6.186984593336886e-08, + "loss": 0.3443447947502136, + "memory(GiB)": 77.0, + "step": 8743, + "token_acc": 0.8609779482262704, + "train_speed(iter/s)": 0.378645 + }, + { + "epoch": 2.79808, + "grad_norm": 0.7242335032882202, + "learning_rate": 6.167501857202218e-08, + "loss": 0.34583422541618347, + "memory(GiB)": 77.0, + "step": 8744, + "token_acc": 0.8876187126275061, + "train_speed(iter/s)": 0.378613 + }, + { + "epoch": 2.7984, + "grad_norm": 0.808898834229451, + "learning_rate": 6.148049461792093e-08, + "loss": 0.2624685764312744, + "memory(GiB)": 77.0, + "step": 8745, + "token_acc": 0.9266709928617781, + "train_speed(iter/s)": 0.378592 + }, + { + "epoch": 2.79872, + "grad_norm": 0.7032426521596369, + "learning_rate": 6.128627409527021e-08, + "loss": 0.27802759408950806, + "memory(GiB)": 77.0, + "step": 8746, + "token_acc": 0.9236227045075125, + "train_speed(iter/s)": 0.378569 + }, + { + "epoch": 2.7990399999999998, + "grad_norm": 0.7705485125089305, + "learning_rate": 6.109235702823707e-08, + "loss": 0.32641369104385376, + "memory(GiB)": 77.0, + "step": 8747, + "token_acc": 0.909710391822828, + "train_speed(iter/s)": 0.37854 + }, + { + "epoch": 2.79936, + "grad_norm": 0.7824290073686095, + "learning_rate": 6.089874344095164e-08, + "loss": 0.3110728859901428, + "memory(GiB)": 77.0, + "step": 8748, + "token_acc": 0.9451219512195121, + "train_speed(iter/s)": 0.378519 + }, + { + "epoch": 2.79968, + "grad_norm": 0.698148379978733, + "learning_rate": 6.070543335750523e-08, + "loss": 0.2982749342918396, + "memory(GiB)": 77.0, + "step": 8749, + "token_acc": 0.8855030652722683, + "train_speed(iter/s)": 0.378492 + }, + { + "epoch": 2.8, + "grad_norm": 0.7610604282393647, + "learning_rate": 6.051242680195247e-08, + "loss": 0.38587209582328796, + "memory(GiB)": 77.0, + "step": 8750, + "token_acc": 0.8968761761385021, + "train_speed(iter/s)": 0.37847 + }, + { + "epoch": 2.80032, + "grad_norm": 0.6931731070158021, + "learning_rate": 6.031972379830914e-08, + "loss": 0.2609790563583374, + "memory(GiB)": 77.0, + "step": 8751, + "token_acc": 0.9140251845419018, + "train_speed(iter/s)": 0.378444 + }, + { + "epoch": 2.80064, + "grad_norm": 0.7512225956129589, + "learning_rate": 6.01273243705544e-08, + "loss": 0.28314125537872314, + "memory(GiB)": 77.0, + "step": 8752, + "token_acc": 0.9445529090199141, + "train_speed(iter/s)": 0.378423 + }, + { + "epoch": 2.80096, + "grad_norm": 0.803450461673814, + "learning_rate": 5.993522854262829e-08, + "loss": 0.28601714968681335, + "memory(GiB)": 77.0, + "step": 8753, + "token_acc": 0.8463573463573464, + "train_speed(iter/s)": 0.378402 + }, + { + "epoch": 2.80128, + "grad_norm": 0.7022073319544541, + "learning_rate": 5.974343633843444e-08, + "loss": 0.2995031774044037, + "memory(GiB)": 77.0, + "step": 8754, + "token_acc": 0.8489182251558489, + "train_speed(iter/s)": 0.37838 + }, + { + "epoch": 2.8016, + "grad_norm": 0.714822806911406, + "learning_rate": 5.955194778183765e-08, + "loss": 0.32874828577041626, + "memory(GiB)": 77.0, + "step": 8755, + "token_acc": 0.8264659270998416, + "train_speed(iter/s)": 0.378353 + }, + { + "epoch": 2.80192, + "grad_norm": 0.7513473484853066, + "learning_rate": 5.936076289666581e-08, + "loss": 0.29366448521614075, + "memory(GiB)": 77.0, + "step": 8756, + "token_acc": 0.8983945458544095, + "train_speed(iter/s)": 0.378327 + }, + { + "epoch": 2.80224, + "grad_norm": 0.7075510451479436, + "learning_rate": 5.9169881706707955e-08, + "loss": 0.3300280272960663, + "memory(GiB)": 77.0, + "step": 8757, + "token_acc": 0.9111769059955589, + "train_speed(iter/s)": 0.378302 + }, + { + "epoch": 2.80256, + "grad_norm": 0.7728388052317053, + "learning_rate": 5.8979304235716464e-08, + "loss": 0.3389008641242981, + "memory(GiB)": 77.0, + "step": 8758, + "token_acc": 0.8866474543707973, + "train_speed(iter/s)": 0.37828 + }, + { + "epoch": 2.80288, + "grad_norm": 0.7170654280914891, + "learning_rate": 5.878903050740514e-08, + "loss": 0.3111526370048523, + "memory(GiB)": 77.0, + "step": 8759, + "token_acc": 0.8943264318365152, + "train_speed(iter/s)": 0.37826 + }, + { + "epoch": 2.8032, + "grad_norm": 0.6607337148111155, + "learning_rate": 5.859906054545006e-08, + "loss": 0.2744569778442383, + "memory(GiB)": 77.0, + "step": 8760, + "token_acc": 0.9333496691987258, + "train_speed(iter/s)": 0.378234 + }, + { + "epoch": 2.80352, + "grad_norm": 0.669006091788199, + "learning_rate": 5.840939437349008e-08, + "loss": 0.2439461648464203, + "memory(GiB)": 77.0, + "step": 8761, + "token_acc": 0.8917211820437627, + "train_speed(iter/s)": 0.37821 + }, + { + "epoch": 2.80384, + "grad_norm": 0.724073532868759, + "learning_rate": 5.822003201512577e-08, + "loss": 0.2905500531196594, + "memory(GiB)": 77.0, + "step": 8762, + "token_acc": 0.9062576687116565, + "train_speed(iter/s)": 0.378188 + }, + { + "epoch": 2.80416, + "grad_norm": 0.8188917763080411, + "learning_rate": 5.803097349391995e-08, + "loss": 0.3067835569381714, + "memory(GiB)": 77.0, + "step": 8763, + "token_acc": 0.8876033057851239, + "train_speed(iter/s)": 0.378166 + }, + { + "epoch": 2.80448, + "grad_norm": 0.7124029790077477, + "learning_rate": 5.784221883339741e-08, + "loss": 0.27009010314941406, + "memory(GiB)": 77.0, + "step": 8764, + "token_acc": 0.8518518518518519, + "train_speed(iter/s)": 0.378141 + }, + { + "epoch": 2.8048, + "grad_norm": 0.7850144059696758, + "learning_rate": 5.7653768057045757e-08, + "loss": 0.3075641393661499, + "memory(GiB)": 77.0, + "step": 8765, + "token_acc": 0.9210969449121963, + "train_speed(iter/s)": 0.378118 + }, + { + "epoch": 2.80512, + "grad_norm": 0.697118561629582, + "learning_rate": 5.746562118831428e-08, + "loss": 0.3491590619087219, + "memory(GiB)": 77.0, + "step": 8766, + "token_acc": 0.9249174917491749, + "train_speed(iter/s)": 0.378094 + }, + { + "epoch": 2.80544, + "grad_norm": 0.6959509158084526, + "learning_rate": 5.7277778250614815e-08, + "loss": 0.20719996094703674, + "memory(GiB)": 77.0, + "step": 8767, + "token_acc": 0.9533799533799534, + "train_speed(iter/s)": 0.378074 + }, + { + "epoch": 2.8057600000000003, + "grad_norm": 0.714244516788371, + "learning_rate": 5.709023926732088e-08, + "loss": 0.3164524435997009, + "memory(GiB)": 77.0, + "step": 8768, + "token_acc": 0.9480563002680965, + "train_speed(iter/s)": 0.37805 + }, + { + "epoch": 2.80608, + "grad_norm": 0.7162392750712693, + "learning_rate": 5.690300426176826e-08, + "loss": 0.24238000810146332, + "memory(GiB)": 77.0, + "step": 8769, + "token_acc": 0.9521575984990619, + "train_speed(iter/s)": 0.378029 + }, + { + "epoch": 2.8064, + "grad_norm": 0.6481480187670287, + "learning_rate": 5.671607325725553e-08, + "loss": 0.20430682599544525, + "memory(GiB)": 77.0, + "step": 8770, + "token_acc": 0.9247954688483323, + "train_speed(iter/s)": 0.378004 + }, + { + "epoch": 2.80672, + "grad_norm": 0.7484835447073825, + "learning_rate": 5.652944627704271e-08, + "loss": 0.31963375210762024, + "memory(GiB)": 77.0, + "step": 8771, + "token_acc": 0.9143741403026134, + "train_speed(iter/s)": 0.377982 + }, + { + "epoch": 2.8070399999999998, + "grad_norm": 0.8023875730492857, + "learning_rate": 5.63431233443526e-08, + "loss": 0.33172667026519775, + "memory(GiB)": 77.0, + "step": 8772, + "token_acc": 0.9431129839346852, + "train_speed(iter/s)": 0.377958 + }, + { + "epoch": 2.80736, + "grad_norm": 0.719694402424515, + "learning_rate": 5.615710448236972e-08, + "loss": 0.3213046193122864, + "memory(GiB)": 77.0, + "step": 8773, + "token_acc": 0.8980916030534352, + "train_speed(iter/s)": 0.377933 + }, + { + "epoch": 2.80768, + "grad_norm": 0.7905635691557654, + "learning_rate": 5.597138971424082e-08, + "loss": 0.21380284428596497, + "memory(GiB)": 77.0, + "step": 8774, + "token_acc": 0.9726775956284153, + "train_speed(iter/s)": 0.377912 + }, + { + "epoch": 2.808, + "grad_norm": 0.7101910621178287, + "learning_rate": 5.578597906307465e-08, + "loss": 0.28572529554367065, + "memory(GiB)": 77.0, + "step": 8775, + "token_acc": 0.9582441113490364, + "train_speed(iter/s)": 0.377891 + }, + { + "epoch": 2.80832, + "grad_norm": 0.6875452660571543, + "learning_rate": 5.560087255194302e-08, + "loss": 0.29108133912086487, + "memory(GiB)": 77.0, + "step": 8776, + "token_acc": 0.9230192451887028, + "train_speed(iter/s)": 0.377867 + }, + { + "epoch": 2.80864, + "grad_norm": 0.8127177928372902, + "learning_rate": 5.541607020387863e-08, + "loss": 0.27656349539756775, + "memory(GiB)": 77.0, + "step": 8777, + "token_acc": 0.9298245614035088, + "train_speed(iter/s)": 0.377847 + }, + { + "epoch": 2.80896, + "grad_norm": 0.7040579313520886, + "learning_rate": 5.5231572041876955e-08, + "loss": 0.2555381953716278, + "memory(GiB)": 77.0, + "step": 8778, + "token_acc": 0.9551070840197694, + "train_speed(iter/s)": 0.377823 + }, + { + "epoch": 2.80928, + "grad_norm": 0.7583174948959623, + "learning_rate": 5.504737808889604e-08, + "loss": 0.3739853501319885, + "memory(GiB)": 77.0, + "step": 8779, + "token_acc": 0.8917565485362096, + "train_speed(iter/s)": 0.377797 + }, + { + "epoch": 2.8096, + "grad_norm": 0.7435817580829048, + "learning_rate": 5.486348836785532e-08, + "loss": 0.298606276512146, + "memory(GiB)": 77.0, + "step": 8780, + "token_acc": 0.8793056685652292, + "train_speed(iter/s)": 0.377774 + }, + { + "epoch": 2.80992, + "grad_norm": 0.7529106483555578, + "learning_rate": 5.467990290163622e-08, + "loss": 0.26324760913848877, + "memory(GiB)": 77.0, + "step": 8781, + "token_acc": 0.9409900990099009, + "train_speed(iter/s)": 0.377752 + }, + { + "epoch": 2.81024, + "grad_norm": 0.740980949120022, + "learning_rate": 5.449662171308351e-08, + "loss": 0.26175457239151, + "memory(GiB)": 77.0, + "step": 8782, + "token_acc": 0.9327891156462585, + "train_speed(iter/s)": 0.377731 + }, + { + "epoch": 2.8105599999999997, + "grad_norm": 0.6953762824048728, + "learning_rate": 5.431364482500312e-08, + "loss": 0.2896493971347809, + "memory(GiB)": 77.0, + "step": 8783, + "token_acc": 0.9497418244406196, + "train_speed(iter/s)": 0.37771 + }, + { + "epoch": 2.81088, + "grad_norm": 0.7237767804836901, + "learning_rate": 5.4130972260163226e-08, + "loss": 0.2962026596069336, + "memory(GiB)": 77.0, + "step": 8784, + "token_acc": 0.9218130311614731, + "train_speed(iter/s)": 0.377689 + }, + { + "epoch": 2.8112, + "grad_norm": 0.7500783432309729, + "learning_rate": 5.394860404129454e-08, + "loss": 0.23914362490177155, + "memory(GiB)": 77.0, + "step": 8785, + "token_acc": 0.933293803735547, + "train_speed(iter/s)": 0.377669 + }, + { + "epoch": 2.81152, + "grad_norm": 0.7236606486527793, + "learning_rate": 5.3766540191089455e-08, + "loss": 0.2548258304595947, + "memory(GiB)": 77.0, + "step": 8786, + "token_acc": 0.8888018794048551, + "train_speed(iter/s)": 0.377649 + }, + { + "epoch": 2.81184, + "grad_norm": 0.7497336540460062, + "learning_rate": 5.3584780732202354e-08, + "loss": 0.3098858594894409, + "memory(GiB)": 77.0, + "step": 8787, + "token_acc": 0.9322169059011164, + "train_speed(iter/s)": 0.377629 + }, + { + "epoch": 2.81216, + "grad_norm": 0.6999913696465084, + "learning_rate": 5.340332568725071e-08, + "loss": 0.27857884764671326, + "memory(GiB)": 77.0, + "step": 8788, + "token_acc": 0.9649737302977233, + "train_speed(iter/s)": 0.377607 + }, + { + "epoch": 2.81248, + "grad_norm": 0.696491398373573, + "learning_rate": 5.3222175078812834e-08, + "loss": 0.26863235235214233, + "memory(GiB)": 77.0, + "step": 8789, + "token_acc": 0.8695978016487634, + "train_speed(iter/s)": 0.377583 + }, + { + "epoch": 2.8128, + "grad_norm": 0.7424277085275538, + "learning_rate": 5.304132892942987e-08, + "loss": 0.3289077877998352, + "memory(GiB)": 77.0, + "step": 8790, + "token_acc": 0.9090233019335647, + "train_speed(iter/s)": 0.37756 + }, + { + "epoch": 2.81312, + "grad_norm": 0.7185471923391524, + "learning_rate": 5.2860787261605485e-08, + "loss": 0.23408374190330505, + "memory(GiB)": 77.0, + "step": 8791, + "token_acc": 0.8764064697609001, + "train_speed(iter/s)": 0.377536 + }, + { + "epoch": 2.81344, + "grad_norm": 0.775464013935614, + "learning_rate": 5.2680550097804204e-08, + "loss": 0.36452949047088623, + "memory(GiB)": 77.0, + "step": 8792, + "token_acc": 0.8575803981623277, + "train_speed(iter/s)": 0.377514 + }, + { + "epoch": 2.8137600000000003, + "grad_norm": 0.6902283021065637, + "learning_rate": 5.2500617460454206e-08, + "loss": 0.23378822207450867, + "memory(GiB)": 77.0, + "step": 8793, + "token_acc": 0.8964015585606234, + "train_speed(iter/s)": 0.377491 + }, + { + "epoch": 2.81408, + "grad_norm": 0.6727186910126178, + "learning_rate": 5.232098937194452e-08, + "loss": 0.3647726774215698, + "memory(GiB)": 77.0, + "step": 8794, + "token_acc": 0.9077351362099821, + "train_speed(iter/s)": 0.377462 + }, + { + "epoch": 2.8144, + "grad_norm": 0.7229470792194825, + "learning_rate": 5.214166585462671e-08, + "loss": 0.3518230617046356, + "memory(GiB)": 77.0, + "step": 8795, + "token_acc": 0.8563402889245586, + "train_speed(iter/s)": 0.377442 + }, + { + "epoch": 2.81472, + "grad_norm": 0.7076366604398937, + "learning_rate": 5.19626469308146e-08, + "loss": 0.3043349087238312, + "memory(GiB)": 77.0, + "step": 8796, + "token_acc": 0.9433870180575891, + "train_speed(iter/s)": 0.377417 + }, + { + "epoch": 2.8150399999999998, + "grad_norm": 0.7080049268089059, + "learning_rate": 5.178393262278397e-08, + "loss": 0.2843298316001892, + "memory(GiB)": 77.0, + "step": 8797, + "token_acc": 0.9401769911504425, + "train_speed(iter/s)": 0.377396 + }, + { + "epoch": 2.81536, + "grad_norm": 0.7444725489721471, + "learning_rate": 5.16055229527726e-08, + "loss": 0.32792919874191284, + "memory(GiB)": 77.0, + "step": 8798, + "token_acc": 0.917919544900447, + "train_speed(iter/s)": 0.377371 + }, + { + "epoch": 2.81568, + "grad_norm": 0.7303996390777857, + "learning_rate": 5.142741794298078e-08, + "loss": 0.2893381714820862, + "memory(GiB)": 77.0, + "step": 8799, + "token_acc": 0.9552012148823082, + "train_speed(iter/s)": 0.37735 + }, + { + "epoch": 2.816, + "grad_norm": 0.6957958940399506, + "learning_rate": 5.124961761557051e-08, + "loss": 0.28219443559646606, + "memory(GiB)": 77.0, + "step": 8800, + "token_acc": 0.8496099004573581, + "train_speed(iter/s)": 0.377329 + }, + { + "epoch": 2.81632, + "grad_norm": 0.7719806256605403, + "learning_rate": 5.107212199266548e-08, + "loss": 0.31054598093032837, + "memory(GiB)": 77.0, + "step": 8801, + "token_acc": 0.942899517982944, + "train_speed(iter/s)": 0.377304 + }, + { + "epoch": 2.81664, + "grad_norm": 0.7648167586111548, + "learning_rate": 5.089493109635274e-08, + "loss": 0.3535987436771393, + "memory(GiB)": 77.0, + "step": 8802, + "token_acc": 0.8868450390189521, + "train_speed(iter/s)": 0.37728 + }, + { + "epoch": 2.81696, + "grad_norm": 0.6938822819322309, + "learning_rate": 5.071804494867994e-08, + "loss": 0.27105119824409485, + "memory(GiB)": 77.0, + "step": 8803, + "token_acc": 0.8745910577971646, + "train_speed(iter/s)": 0.37725 + }, + { + "epoch": 2.8172800000000002, + "grad_norm": 0.6830753523916987, + "learning_rate": 5.054146357165779e-08, + "loss": 0.35083460807800293, + "memory(GiB)": 77.0, + "step": 8804, + "token_acc": 0.960170146945089, + "train_speed(iter/s)": 0.377223 + }, + { + "epoch": 2.8176, + "grad_norm": 0.786902720710779, + "learning_rate": 5.036518698725901e-08, + "loss": 0.29907429218292236, + "memory(GiB)": 77.0, + "step": 8805, + "token_acc": 0.952076677316294, + "train_speed(iter/s)": 0.3772 + }, + { + "epoch": 2.81792, + "grad_norm": 0.7191762994577615, + "learning_rate": 5.018921521741743e-08, + "loss": 0.3087778687477112, + "memory(GiB)": 77.0, + "step": 8806, + "token_acc": 0.9285101515584787, + "train_speed(iter/s)": 0.377176 + }, + { + "epoch": 2.81824, + "grad_norm": 0.7165851398476907, + "learning_rate": 5.0013548284030544e-08, + "loss": 0.29405638575553894, + "memory(GiB)": 77.0, + "step": 8807, + "token_acc": 0.8942128189172371, + "train_speed(iter/s)": 0.377154 + }, + { + "epoch": 2.8185599999999997, + "grad_norm": 0.7491698053826761, + "learning_rate": 4.9838186208956694e-08, + "loss": 0.3462330102920532, + "memory(GiB)": 77.0, + "step": 8808, + "token_acc": 0.9120253164556962, + "train_speed(iter/s)": 0.377133 + }, + { + "epoch": 2.81888, + "grad_norm": 0.7397868741596798, + "learning_rate": 4.966312901401649e-08, + "loss": 0.3501535654067993, + "memory(GiB)": 77.0, + "step": 8809, + "token_acc": 0.9457601222307105, + "train_speed(iter/s)": 0.377108 + }, + { + "epoch": 2.8192, + "grad_norm": 0.7933103743372695, + "learning_rate": 4.948837672099277e-08, + "loss": 0.3290877938270569, + "memory(GiB)": 77.0, + "step": 8810, + "token_acc": 0.9421199442119944, + "train_speed(iter/s)": 0.377084 + }, + { + "epoch": 2.81952, + "grad_norm": 0.6939639341854492, + "learning_rate": 4.9313929351630655e-08, + "loss": 0.3274519443511963, + "memory(GiB)": 77.0, + "step": 8811, + "token_acc": 0.8938486663037561, + "train_speed(iter/s)": 0.377061 + }, + { + "epoch": 2.81984, + "grad_norm": 0.6705413253676261, + "learning_rate": 4.913978692763693e-08, + "loss": 0.27995625138282776, + "memory(GiB)": 77.0, + "step": 8812, + "token_acc": 0.9211941904249596, + "train_speed(iter/s)": 0.377038 + }, + { + "epoch": 2.82016, + "grad_norm": 0.7426448694080782, + "learning_rate": 4.8965949470680674e-08, + "loss": 0.3331575393676758, + "memory(GiB)": 77.0, + "step": 8813, + "token_acc": 0.9231527093596059, + "train_speed(iter/s)": 0.377016 + }, + { + "epoch": 2.82048, + "grad_norm": 0.8237572744447511, + "learning_rate": 4.87924170023929e-08, + "loss": 0.3241432011127472, + "memory(GiB)": 77.0, + "step": 8814, + "token_acc": 0.9037286063569682, + "train_speed(iter/s)": 0.37699 + }, + { + "epoch": 2.8208, + "grad_norm": 0.7113444883645337, + "learning_rate": 4.861918954436662e-08, + "loss": 0.26939594745635986, + "memory(GiB)": 77.0, + "step": 8815, + "token_acc": 0.905850445563735, + "train_speed(iter/s)": 0.376967 + }, + { + "epoch": 2.82112, + "grad_norm": 0.7099896469570278, + "learning_rate": 4.8446267118157084e-08, + "loss": 0.3001909852027893, + "memory(GiB)": 77.0, + "step": 8816, + "token_acc": 0.8404483663248271, + "train_speed(iter/s)": 0.376942 + }, + { + "epoch": 2.82144, + "grad_norm": 0.7323771855626092, + "learning_rate": 4.8273649745281814e-08, + "loss": 0.30616265535354614, + "memory(GiB)": 77.0, + "step": 8817, + "token_acc": 0.9224850384724993, + "train_speed(iter/s)": 0.376921 + }, + { + "epoch": 2.8217600000000003, + "grad_norm": 0.7032179019457023, + "learning_rate": 4.810133744721945e-08, + "loss": 0.2637154459953308, + "memory(GiB)": 77.0, + "step": 8818, + "token_acc": 0.9552201257861636, + "train_speed(iter/s)": 0.376894 + }, + { + "epoch": 2.82208, + "grad_norm": 0.6982447835270724, + "learning_rate": 4.792933024541174e-08, + "loss": 0.2354787141084671, + "memory(GiB)": 77.0, + "step": 8819, + "token_acc": 0.941270157276528, + "train_speed(iter/s)": 0.37687 + }, + { + "epoch": 2.8224, + "grad_norm": 0.7429439640025001, + "learning_rate": 4.7757628161261835e-08, + "loss": 0.2955120801925659, + "memory(GiB)": 77.0, + "step": 8820, + "token_acc": 0.9256788947117676, + "train_speed(iter/s)": 0.376849 + }, + { + "epoch": 2.82272, + "grad_norm": 0.710041219241593, + "learning_rate": 4.7586231216134874e-08, + "loss": 0.3694155812263489, + "memory(GiB)": 77.0, + "step": 8821, + "token_acc": 0.9012513601741022, + "train_speed(iter/s)": 0.376826 + }, + { + "epoch": 2.8230399999999998, + "grad_norm": 0.8985927699664328, + "learning_rate": 4.741513943135878e-08, + "loss": 0.32935822010040283, + "memory(GiB)": 77.0, + "step": 8822, + "token_acc": 0.9272629310344828, + "train_speed(iter/s)": 0.376804 + }, + { + "epoch": 2.82336, + "grad_norm": 0.6945355661544557, + "learning_rate": 4.724435282822237e-08, + "loss": 0.3071962296962738, + "memory(GiB)": 77.0, + "step": 8823, + "token_acc": 0.9060614012070323, + "train_speed(iter/s)": 0.376779 + }, + { + "epoch": 2.82368, + "grad_norm": 0.6898873345178016, + "learning_rate": 4.707387142797726e-08, + "loss": 0.24782595038414001, + "memory(GiB)": 77.0, + "step": 8824, + "token_acc": 0.9084249084249084, + "train_speed(iter/s)": 0.376758 + }, + { + "epoch": 2.824, + "grad_norm": 0.7225205896623389, + "learning_rate": 4.6903695251836745e-08, + "loss": 0.34855058789253235, + "memory(GiB)": 77.0, + "step": 8825, + "token_acc": 0.9116298457690705, + "train_speed(iter/s)": 0.376734 + }, + { + "epoch": 2.82432, + "grad_norm": 0.7503994051960974, + "learning_rate": 4.6733824320976674e-08, + "loss": 0.32138729095458984, + "memory(GiB)": 77.0, + "step": 8826, + "token_acc": 0.929664508534432, + "train_speed(iter/s)": 0.376711 + }, + { + "epoch": 2.82464, + "grad_norm": 0.7219887361613766, + "learning_rate": 4.656425865653458e-08, + "loss": 0.28152257204055786, + "memory(GiB)": 77.0, + "step": 8827, + "token_acc": 0.8624885215794307, + "train_speed(iter/s)": 0.376687 + }, + { + "epoch": 2.82496, + "grad_norm": 0.7567689658210901, + "learning_rate": 4.639499827960941e-08, + "loss": 0.37103337049484253, + "memory(GiB)": 77.0, + "step": 8828, + "token_acc": 0.7881073885067672, + "train_speed(iter/s)": 0.376663 + }, + { + "epoch": 2.8252800000000002, + "grad_norm": 0.6997978951947891, + "learning_rate": 4.622604321126323e-08, + "loss": 0.2852240204811096, + "memory(GiB)": 77.0, + "step": 8829, + "token_acc": 0.9447513812154696, + "train_speed(iter/s)": 0.37664 + }, + { + "epoch": 2.8256, + "grad_norm": 0.6883034828096323, + "learning_rate": 4.60573934725192e-08, + "loss": 0.33041149377822876, + "memory(GiB)": 77.0, + "step": 8830, + "token_acc": 0.917035022101326, + "train_speed(iter/s)": 0.376618 + }, + { + "epoch": 2.82592, + "grad_norm": 0.6854577102583415, + "learning_rate": 4.588904908436303e-08, + "loss": 0.2834482192993164, + "memory(GiB)": 77.0, + "step": 8831, + "token_acc": 0.899286733238231, + "train_speed(iter/s)": 0.376581 + }, + { + "epoch": 2.82624, + "grad_norm": 0.7150526758537084, + "learning_rate": 4.5721010067742154e-08, + "loss": 0.29520031809806824, + "memory(GiB)": 77.0, + "step": 8832, + "token_acc": 0.9249368459040058, + "train_speed(iter/s)": 0.376556 + }, + { + "epoch": 2.8265599999999997, + "grad_norm": 0.6682118715424439, + "learning_rate": 4.55532764435665e-08, + "loss": 0.2831534743309021, + "memory(GiB)": 77.0, + "step": 8833, + "token_acc": 0.9068567961165048, + "train_speed(iter/s)": 0.37653 + }, + { + "epoch": 2.82688, + "grad_norm": 0.8355977390440469, + "learning_rate": 4.538584823270714e-08, + "loss": 0.3886529803276062, + "memory(GiB)": 77.0, + "step": 8834, + "token_acc": 0.9046199701937406, + "train_speed(iter/s)": 0.37651 + }, + { + "epoch": 2.8272, + "grad_norm": 0.6987714477342376, + "learning_rate": 4.5218725455997703e-08, + "loss": 0.2814595699310303, + "memory(GiB)": 77.0, + "step": 8835, + "token_acc": 0.9524281466798811, + "train_speed(iter/s)": 0.376486 + }, + { + "epoch": 2.82752, + "grad_norm": 0.7310976096206947, + "learning_rate": 4.505190813423377e-08, + "loss": 0.31830519437789917, + "memory(GiB)": 77.0, + "step": 8836, + "token_acc": 0.8967668746454907, + "train_speed(iter/s)": 0.376463 + }, + { + "epoch": 2.82784, + "grad_norm": 0.7382212970838619, + "learning_rate": 4.488539628817318e-08, + "loss": 0.37233686447143555, + "memory(GiB)": 77.0, + "step": 8837, + "token_acc": 0.8678386763185109, + "train_speed(iter/s)": 0.376439 + }, + { + "epoch": 2.82816, + "grad_norm": 0.6784295011847273, + "learning_rate": 4.471918993853519e-08, + "loss": 0.3733503222465515, + "memory(GiB)": 77.0, + "step": 8838, + "token_acc": 0.8581400123177992, + "train_speed(iter/s)": 0.376412 + }, + { + "epoch": 2.82848, + "grad_norm": 0.6692861842392641, + "learning_rate": 4.455328910600104e-08, + "loss": 0.2835652232170105, + "memory(GiB)": 77.0, + "step": 8839, + "token_acc": 0.9175888862726631, + "train_speed(iter/s)": 0.37639 + }, + { + "epoch": 2.8288, + "grad_norm": 0.6536102604454161, + "learning_rate": 4.438769381121422e-08, + "loss": 0.3045653998851776, + "memory(GiB)": 77.0, + "step": 8840, + "token_acc": 0.9274047186932849, + "train_speed(iter/s)": 0.376367 + }, + { + "epoch": 2.82912, + "grad_norm": 0.7068392486266879, + "learning_rate": 4.4222404074780734e-08, + "loss": 0.25631511211395264, + "memory(GiB)": 77.0, + "step": 8841, + "token_acc": 0.9656556645851918, + "train_speed(iter/s)": 0.376344 + }, + { + "epoch": 2.82944, + "grad_norm": 0.7024991871487307, + "learning_rate": 4.4057419917267477e-08, + "loss": 0.3414153456687927, + "memory(GiB)": 77.0, + "step": 8842, + "token_acc": 0.871913270427625, + "train_speed(iter/s)": 0.376322 + }, + { + "epoch": 2.8297600000000003, + "grad_norm": 0.7405398463851706, + "learning_rate": 4.389274135920413e-08, + "loss": 0.2556094527244568, + "memory(GiB)": 77.0, + "step": 8843, + "token_acc": 0.8589413447782547, + "train_speed(iter/s)": 0.376302 + }, + { + "epoch": 2.83008, + "grad_norm": 0.7195735623085426, + "learning_rate": 4.372836842108208e-08, + "loss": 0.19856694340705872, + "memory(GiB)": 77.0, + "step": 8844, + "token_acc": 0.9623693379790941, + "train_speed(iter/s)": 0.376238 + }, + { + "epoch": 2.8304, + "grad_norm": 0.6916350285406777, + "learning_rate": 4.356430112335469e-08, + "loss": 0.302081823348999, + "memory(GiB)": 77.0, + "step": 8845, + "token_acc": 0.8930689020650174, + "train_speed(iter/s)": 0.376216 + }, + { + "epoch": 2.83072, + "grad_norm": 0.7786738514824411, + "learning_rate": 4.340053948643702e-08, + "loss": 0.32292741537094116, + "memory(GiB)": 77.0, + "step": 8846, + "token_acc": 0.9001331557922769, + "train_speed(iter/s)": 0.376188 + }, + { + "epoch": 2.83104, + "grad_norm": 0.7018977024904329, + "learning_rate": 4.3237083530706655e-08, + "loss": 0.24241560697555542, + "memory(GiB)": 77.0, + "step": 8847, + "token_acc": 0.9586238958623896, + "train_speed(iter/s)": 0.376164 + }, + { + "epoch": 2.83136, + "grad_norm": 0.7184488643578955, + "learning_rate": 4.307393327650261e-08, + "loss": 0.2281564176082611, + "memory(GiB)": 77.0, + "step": 8848, + "token_acc": 0.973701955495617, + "train_speed(iter/s)": 0.376144 + }, + { + "epoch": 2.83168, + "grad_norm": 0.6611772521213639, + "learning_rate": 4.291108874412642e-08, + "loss": 0.3085069954395294, + "memory(GiB)": 77.0, + "step": 8849, + "token_acc": 0.9370134465675867, + "train_speed(iter/s)": 0.376121 + }, + { + "epoch": 2.832, + "grad_norm": 0.741102183113431, + "learning_rate": 4.2748549953841313e-08, + "loss": 0.25401046872138977, + "memory(GiB)": 77.0, + "step": 8850, + "token_acc": 0.9388570002495633, + "train_speed(iter/s)": 0.376101 + }, + { + "epoch": 2.83232, + "grad_norm": 0.7924026564989698, + "learning_rate": 4.258631692587223e-08, + "loss": 0.2638222575187683, + "memory(GiB)": 77.0, + "step": 8851, + "token_acc": 0.8982689115484942, + "train_speed(iter/s)": 0.376075 + }, + { + "epoch": 2.83264, + "grad_norm": 0.7431010197600308, + "learning_rate": 4.242438968040607e-08, + "loss": 0.2922322452068329, + "memory(GiB)": 77.0, + "step": 8852, + "token_acc": 0.9331826985151711, + "train_speed(iter/s)": 0.376053 + }, + { + "epoch": 2.83296, + "grad_norm": 0.6549096229194974, + "learning_rate": 4.226276823759229e-08, + "loss": 0.26488932967185974, + "memory(GiB)": 77.0, + "step": 8853, + "token_acc": 0.8649866830567506, + "train_speed(iter/s)": 0.376032 + }, + { + "epoch": 2.8332800000000002, + "grad_norm": 0.6957473303641533, + "learning_rate": 4.2101452617541446e-08, + "loss": 0.3525998592376709, + "memory(GiB)": 77.0, + "step": 8854, + "token_acc": 0.876844130853111, + "train_speed(iter/s)": 0.37601 + }, + { + "epoch": 2.8336, + "grad_norm": 0.7897365045217488, + "learning_rate": 4.194044284032639e-08, + "loss": 0.36711201071739197, + "memory(GiB)": 77.0, + "step": 8855, + "token_acc": 0.855499790882476, + "train_speed(iter/s)": 0.375987 + }, + { + "epoch": 2.83392, + "grad_norm": 0.7248100235502759, + "learning_rate": 4.177973892598275e-08, + "loss": 0.24796274304389954, + "memory(GiB)": 77.0, + "step": 8856, + "token_acc": 0.8842105263157894, + "train_speed(iter/s)": 0.375965 + }, + { + "epoch": 2.83424, + "grad_norm": 0.7724802481403297, + "learning_rate": 4.1619340894506477e-08, + "loss": 0.3096676170825958, + "memory(GiB)": 77.0, + "step": 8857, + "token_acc": 0.9092465753424658, + "train_speed(iter/s)": 0.375945 + }, + { + "epoch": 2.8345599999999997, + "grad_norm": 0.6433213301414735, + "learning_rate": 4.1459248765856884e-08, + "loss": 0.2989892363548279, + "memory(GiB)": 77.0, + "step": 8858, + "token_acc": 0.8623294346978557, + "train_speed(iter/s)": 0.375922 + }, + { + "epoch": 2.83488, + "grad_norm": 0.711474891277522, + "learning_rate": 4.129946255995443e-08, + "loss": 0.3196195960044861, + "memory(GiB)": 77.0, + "step": 8859, + "token_acc": 0.9284353741496598, + "train_speed(iter/s)": 0.3759 + }, + { + "epoch": 2.8352, + "grad_norm": 0.7436049980891023, + "learning_rate": 4.113998229668181e-08, + "loss": 0.3195651173591614, + "memory(GiB)": 77.0, + "step": 8860, + "token_acc": 0.9501915708812261, + "train_speed(iter/s)": 0.37587 + }, + { + "epoch": 2.83552, + "grad_norm": 0.7707055460576652, + "learning_rate": 4.098080799588344e-08, + "loss": 0.277517706155777, + "memory(GiB)": 77.0, + "step": 8861, + "token_acc": 0.9304347826086956, + "train_speed(iter/s)": 0.375848 + }, + { + "epoch": 2.83584, + "grad_norm": 0.7342922748782911, + "learning_rate": 4.082193967736597e-08, + "loss": 0.2962326109409332, + "memory(GiB)": 77.0, + "step": 8862, + "token_acc": 0.929192546583851, + "train_speed(iter/s)": 0.375827 + }, + { + "epoch": 2.83616, + "grad_norm": 0.7310924207119213, + "learning_rate": 4.066337736089776e-08, + "loss": 0.29847851395606995, + "memory(GiB)": 77.0, + "step": 8863, + "token_acc": 0.9420505200594353, + "train_speed(iter/s)": 0.375804 + }, + { + "epoch": 2.83648, + "grad_norm": 0.7547879903603646, + "learning_rate": 4.050512106620913e-08, + "loss": 0.37416261434555054, + "memory(GiB)": 77.0, + "step": 8864, + "token_acc": 0.9124381188118812, + "train_speed(iter/s)": 0.375779 + }, + { + "epoch": 2.8368, + "grad_norm": 0.7581974511267261, + "learning_rate": 4.034717081299211e-08, + "loss": 0.3836356997489929, + "memory(GiB)": 77.0, + "step": 8865, + "token_acc": 0.929858934169279, + "train_speed(iter/s)": 0.37575 + }, + { + "epoch": 2.83712, + "grad_norm": 0.6815176584304249, + "learning_rate": 4.018952662090153e-08, + "loss": 0.2744297385215759, + "memory(GiB)": 77.0, + "step": 8866, + "token_acc": 0.931453272951832, + "train_speed(iter/s)": 0.375727 + }, + { + "epoch": 2.83744, + "grad_norm": 0.7193168344820118, + "learning_rate": 4.003218850955282e-08, + "loss": 0.2672787606716156, + "memory(GiB)": 77.0, + "step": 8867, + "token_acc": 0.9126819126819127, + "train_speed(iter/s)": 0.375703 + }, + { + "epoch": 2.83776, + "grad_norm": 0.6789447331234606, + "learning_rate": 3.9875156498524194e-08, + "loss": 0.3194861114025116, + "memory(GiB)": 77.0, + "step": 8868, + "token_acc": 0.911965811965812, + "train_speed(iter/s)": 0.375682 + }, + { + "epoch": 2.83808, + "grad_norm": 0.7613359833612838, + "learning_rate": 3.971843060735558e-08, + "loss": 0.3076885938644409, + "memory(GiB)": 77.0, + "step": 8869, + "token_acc": 0.8971141781681304, + "train_speed(iter/s)": 0.375662 + }, + { + "epoch": 2.8384, + "grad_norm": 0.69134364389869, + "learning_rate": 3.9562010855548617e-08, + "loss": 0.36844348907470703, + "memory(GiB)": 77.0, + "step": 8870, + "token_acc": 0.9273892151628403, + "train_speed(iter/s)": 0.375637 + }, + { + "epoch": 2.83872, + "grad_norm": 0.7825251647770181, + "learning_rate": 3.940589726256716e-08, + "loss": 0.3064737021923065, + "memory(GiB)": 77.0, + "step": 8871, + "token_acc": 0.8451459201905897, + "train_speed(iter/s)": 0.375613 + }, + { + "epoch": 2.83904, + "grad_norm": 0.7430694677385111, + "learning_rate": 3.925008984783707e-08, + "loss": 0.25018200278282166, + "memory(GiB)": 77.0, + "step": 8872, + "token_acc": 0.9316056639059578, + "train_speed(iter/s)": 0.375593 + }, + { + "epoch": 2.83936, + "grad_norm": 0.7745374180759941, + "learning_rate": 3.9094588630745613e-08, + "loss": 0.29349011182785034, + "memory(GiB)": 77.0, + "step": 8873, + "token_acc": 0.8715069860279441, + "train_speed(iter/s)": 0.375573 + }, + { + "epoch": 2.83968, + "grad_norm": 0.6988413618930445, + "learning_rate": 3.8939393630642305e-08, + "loss": 0.3446754813194275, + "memory(GiB)": 77.0, + "step": 8874, + "token_acc": 0.8414855072463768, + "train_speed(iter/s)": 0.375549 + }, + { + "epoch": 2.84, + "grad_norm": 0.9367331161791791, + "learning_rate": 3.878450486683838e-08, + "loss": 0.3400956392288208, + "memory(GiB)": 77.0, + "step": 8875, + "token_acc": 0.9221073859339551, + "train_speed(iter/s)": 0.375528 + }, + { + "epoch": 2.84032, + "grad_norm": 0.7273878416433511, + "learning_rate": 3.862992235860702e-08, + "loss": 0.3450890779495239, + "memory(GiB)": 77.0, + "step": 8876, + "token_acc": 0.8302186177715092, + "train_speed(iter/s)": 0.375508 + }, + { + "epoch": 2.84064, + "grad_norm": 0.7167011171989465, + "learning_rate": 3.8475646125183396e-08, + "loss": 0.3225651681423187, + "memory(GiB)": 77.0, + "step": 8877, + "token_acc": 0.8544885585761783, + "train_speed(iter/s)": 0.375484 + }, + { + "epoch": 2.84096, + "grad_norm": 0.7329652188320197, + "learning_rate": 3.8321676185764655e-08, + "loss": 0.32316291332244873, + "memory(GiB)": 77.0, + "step": 8878, + "token_acc": 0.9090909090909091, + "train_speed(iter/s)": 0.375462 + }, + { + "epoch": 2.8412800000000002, + "grad_norm": 0.7503044415102833, + "learning_rate": 3.816801255950964e-08, + "loss": 0.3219977617263794, + "memory(GiB)": 77.0, + "step": 8879, + "token_acc": 0.9222817764165391, + "train_speed(iter/s)": 0.375441 + }, + { + "epoch": 2.8416, + "grad_norm": 0.7109262260960253, + "learning_rate": 3.801465526553916e-08, + "loss": 0.230158269405365, + "memory(GiB)": 77.0, + "step": 8880, + "token_acc": 0.9136533665835411, + "train_speed(iter/s)": 0.375417 + }, + { + "epoch": 2.84192, + "grad_norm": 0.7399688492062083, + "learning_rate": 3.786160432293545e-08, + "loss": 0.2975960969924927, + "memory(GiB)": 77.0, + "step": 8881, + "token_acc": 0.9176604202157865, + "train_speed(iter/s)": 0.37539 + }, + { + "epoch": 2.84224, + "grad_norm": 0.7805700218629272, + "learning_rate": 3.770885975074357e-08, + "loss": 0.23366093635559082, + "memory(GiB)": 77.0, + "step": 8882, + "token_acc": 0.9321953532479849, + "train_speed(iter/s)": 0.375369 + }, + { + "epoch": 2.8425599999999998, + "grad_norm": 0.655104325673206, + "learning_rate": 3.755642156796968e-08, + "loss": 0.2732256054878235, + "memory(GiB)": 77.0, + "step": 8883, + "token_acc": 0.9557091653865848, + "train_speed(iter/s)": 0.375344 + }, + { + "epoch": 2.84288, + "grad_norm": 0.7586414520754396, + "learning_rate": 3.7404289793582235e-08, + "loss": 0.25086015462875366, + "memory(GiB)": 77.0, + "step": 8884, + "token_acc": 0.9322990126939351, + "train_speed(iter/s)": 0.375322 + }, + { + "epoch": 2.8432, + "grad_norm": 0.6611616037733127, + "learning_rate": 3.725246444651109e-08, + "loss": 0.27400970458984375, + "memory(GiB)": 77.0, + "step": 8885, + "token_acc": 0.9557949479940565, + "train_speed(iter/s)": 0.375297 + }, + { + "epoch": 2.84352, + "grad_norm": 0.7231626711142954, + "learning_rate": 3.7100945545648634e-08, + "loss": 0.31886911392211914, + "memory(GiB)": 77.0, + "step": 8886, + "token_acc": 0.9041095890410958, + "train_speed(iter/s)": 0.375277 + }, + { + "epoch": 2.84384, + "grad_norm": 0.7490119757124896, + "learning_rate": 3.6949733109848395e-08, + "loss": 0.3417612314224243, + "memory(GiB)": 77.0, + "step": 8887, + "token_acc": 0.9564056939501779, + "train_speed(iter/s)": 0.375251 + }, + { + "epoch": 2.84416, + "grad_norm": 0.6304052504080807, + "learning_rate": 3.679882715792643e-08, + "loss": 0.2725124955177307, + "memory(GiB)": 77.0, + "step": 8888, + "token_acc": 0.9044075558099599, + "train_speed(iter/s)": 0.375226 + }, + { + "epoch": 2.84448, + "grad_norm": 0.6860075122520545, + "learning_rate": 3.6648227708660223e-08, + "loss": 0.26912665367126465, + "memory(GiB)": 77.0, + "step": 8889, + "token_acc": 0.9326923076923077, + "train_speed(iter/s)": 0.375203 + }, + { + "epoch": 2.8448, + "grad_norm": 0.7247458036846157, + "learning_rate": 3.6497934780789504e-08, + "loss": 0.276123583316803, + "memory(GiB)": 77.0, + "step": 8890, + "token_acc": 0.8981447799199709, + "train_speed(iter/s)": 0.375182 + }, + { + "epoch": 2.84512, + "grad_norm": 0.7739800032256531, + "learning_rate": 3.634794839301514e-08, + "loss": 0.3459535241127014, + "memory(GiB)": 77.0, + "step": 8891, + "token_acc": 0.8929247067726069, + "train_speed(iter/s)": 0.375152 + }, + { + "epoch": 2.84544, + "grad_norm": 0.6578987720742099, + "learning_rate": 3.619826856400083e-08, + "loss": 0.3261811137199402, + "memory(GiB)": 77.0, + "step": 8892, + "token_acc": 0.8588681032441392, + "train_speed(iter/s)": 0.375126 + }, + { + "epoch": 2.84576, + "grad_norm": 0.7556025439401717, + "learning_rate": 3.604889531237138e-08, + "loss": 0.3757469058036804, + "memory(GiB)": 77.0, + "step": 8893, + "token_acc": 0.9373959373959374, + "train_speed(iter/s)": 0.375105 + }, + { + "epoch": 2.84608, + "grad_norm": 0.7855189708138146, + "learning_rate": 3.589982865671388e-08, + "loss": 0.236005961894989, + "memory(GiB)": 77.0, + "step": 8894, + "token_acc": 0.8702201622247973, + "train_speed(iter/s)": 0.375086 + }, + { + "epoch": 2.8464, + "grad_norm": 0.6975119224154633, + "learning_rate": 3.575106861557681e-08, + "loss": 0.23426273465156555, + "memory(GiB)": 77.0, + "step": 8895, + "token_acc": 0.9528712871287128, + "train_speed(iter/s)": 0.375065 + }, + { + "epoch": 2.84672, + "grad_norm": 0.7379317325317216, + "learning_rate": 3.560261520747066e-08, + "loss": 0.27277496457099915, + "memory(GiB)": 77.0, + "step": 8896, + "token_acc": 0.9360720055388876, + "train_speed(iter/s)": 0.375041 + }, + { + "epoch": 2.84704, + "grad_norm": 0.7387669457662102, + "learning_rate": 3.5454468450868416e-08, + "loss": 0.28192389011383057, + "memory(GiB)": 77.0, + "step": 8897, + "token_acc": 0.8886067914947635, + "train_speed(iter/s)": 0.375019 + }, + { + "epoch": 2.84736, + "grad_norm": 0.8904944925344613, + "learning_rate": 3.530662836420423e-08, + "loss": 0.3631647229194641, + "memory(GiB)": 77.0, + "step": 8898, + "token_acc": 0.910958904109589, + "train_speed(iter/s)": 0.374992 + }, + { + "epoch": 2.84768, + "grad_norm": 0.7295096655029677, + "learning_rate": 3.515909496587394e-08, + "loss": 0.3761805295944214, + "memory(GiB)": 77.0, + "step": 8899, + "token_acc": 0.931073192635833, + "train_speed(iter/s)": 0.374968 + }, + { + "epoch": 2.848, + "grad_norm": 0.83655305963823, + "learning_rate": 3.501186827423564e-08, + "loss": 0.33032000064849854, + "memory(GiB)": 77.0, + "step": 8900, + "token_acc": 0.9582637729549248, + "train_speed(iter/s)": 0.374941 + }, + { + "epoch": 2.84832, + "grad_norm": 0.655952559120774, + "learning_rate": 3.486494830760939e-08, + "loss": 0.3082590699195862, + "memory(GiB)": 77.0, + "step": 8901, + "token_acc": 0.9035008976660682, + "train_speed(iter/s)": 0.374915 + }, + { + "epoch": 2.84864, + "grad_norm": 0.7544879884965723, + "learning_rate": 3.471833508427641e-08, + "loss": 0.2862483859062195, + "memory(GiB)": 77.0, + "step": 8902, + "token_acc": 0.9088419405320813, + "train_speed(iter/s)": 0.374893 + }, + { + "epoch": 2.84896, + "grad_norm": 0.7212847317238255, + "learning_rate": 3.457202862248044e-08, + "loss": 0.3479539453983307, + "memory(GiB)": 77.0, + "step": 8903, + "token_acc": 0.9062798852406758, + "train_speed(iter/s)": 0.374865 + }, + { + "epoch": 2.8492800000000003, + "grad_norm": 1.0035633621021358, + "learning_rate": 3.4426028940426624e-08, + "loss": 0.2422887086868286, + "memory(GiB)": 77.0, + "step": 8904, + "token_acc": 0.9218978102189781, + "train_speed(iter/s)": 0.374845 + }, + { + "epoch": 2.8496, + "grad_norm": 0.6787222048671512, + "learning_rate": 3.4280336056282396e-08, + "loss": 0.3565594553947449, + "memory(GiB)": 77.0, + "step": 8905, + "token_acc": 0.940225035161744, + "train_speed(iter/s)": 0.374824 + }, + { + "epoch": 2.84992, + "grad_norm": 0.7419517793156706, + "learning_rate": 3.413494998817629e-08, + "loss": 0.3170446455478668, + "memory(GiB)": 77.0, + "step": 8906, + "token_acc": 0.9074021953278919, + "train_speed(iter/s)": 0.3748 + }, + { + "epoch": 2.85024, + "grad_norm": 0.7455756983415514, + "learning_rate": 3.398987075419941e-08, + "loss": 0.2737947702407837, + "memory(GiB)": 77.0, + "step": 8907, + "token_acc": 0.9438399438399439, + "train_speed(iter/s)": 0.374781 + }, + { + "epoch": 2.8505599999999998, + "grad_norm": 0.6991660077142916, + "learning_rate": 3.384509837240424e-08, + "loss": 0.3456258177757263, + "memory(GiB)": 77.0, + "step": 8908, + "token_acc": 0.9350163627863488, + "train_speed(iter/s)": 0.374757 + }, + { + "epoch": 2.85088, + "grad_norm": 0.7030808259629813, + "learning_rate": 3.370063286080555e-08, + "loss": 0.2947349548339844, + "memory(GiB)": 77.0, + "step": 8909, + "token_acc": 0.9561369757599076, + "train_speed(iter/s)": 0.374737 + }, + { + "epoch": 2.8512, + "grad_norm": 0.6228039274132424, + "learning_rate": 3.355647423737868e-08, + "loss": 0.26783403754234314, + "memory(GiB)": 77.0, + "step": 8910, + "token_acc": 0.946524064171123, + "train_speed(iter/s)": 0.374711 + }, + { + "epoch": 2.85152, + "grad_norm": 0.7163561340578989, + "learning_rate": 3.341262252006205e-08, + "loss": 0.26109200716018677, + "memory(GiB)": 77.0, + "step": 8911, + "token_acc": 0.9167107350608144, + "train_speed(iter/s)": 0.374692 + }, + { + "epoch": 2.85184, + "grad_norm": 2.6486622256577848, + "learning_rate": 3.326907772675608e-08, + "loss": 0.29477760195732117, + "memory(GiB)": 77.0, + "step": 8912, + "token_acc": 0.9250585480093677, + "train_speed(iter/s)": 0.37467 + }, + { + "epoch": 2.85216, + "grad_norm": 0.7450655713391143, + "learning_rate": 3.312583987532175e-08, + "loss": 0.38165485858917236, + "memory(GiB)": 77.0, + "step": 8913, + "token_acc": 0.8653782072642453, + "train_speed(iter/s)": 0.374642 + }, + { + "epoch": 2.85248, + "grad_norm": 0.7571733242752099, + "learning_rate": 3.298290898358286e-08, + "loss": 0.26057711243629456, + "memory(GiB)": 77.0, + "step": 8914, + "token_acc": 0.9599570968895245, + "train_speed(iter/s)": 0.37462 + }, + { + "epoch": 2.8528000000000002, + "grad_norm": 0.6444276317809255, + "learning_rate": 3.284028506932463e-08, + "loss": 0.329934686422348, + "memory(GiB)": 77.0, + "step": 8915, + "token_acc": 0.9586070959264126, + "train_speed(iter/s)": 0.374598 + }, + { + "epoch": 2.85312, + "grad_norm": 0.7271687369517092, + "learning_rate": 3.2697968150294246e-08, + "loss": 0.3483092784881592, + "memory(GiB)": 77.0, + "step": 8916, + "token_acc": 0.930783242258652, + "train_speed(iter/s)": 0.374575 + }, + { + "epoch": 2.85344, + "grad_norm": 0.7679168720049987, + "learning_rate": 3.2555958244200324e-08, + "loss": 0.25502902269363403, + "memory(GiB)": 77.0, + "step": 8917, + "token_acc": 0.8842302878598248, + "train_speed(iter/s)": 0.374555 + }, + { + "epoch": 2.85376, + "grad_norm": 0.6749283089813356, + "learning_rate": 3.2414255368713455e-08, + "loss": 0.2743542492389679, + "memory(GiB)": 77.0, + "step": 8918, + "token_acc": 0.9089987325728771, + "train_speed(iter/s)": 0.374532 + }, + { + "epoch": 2.8540799999999997, + "grad_norm": 0.7095921037546015, + "learning_rate": 3.227285954146647e-08, + "loss": 0.3382253050804138, + "memory(GiB)": 77.0, + "step": 8919, + "token_acc": 0.8605846450369419, + "train_speed(iter/s)": 0.374508 + }, + { + "epoch": 2.8544, + "grad_norm": 0.7067080716186417, + "learning_rate": 3.213177078005336e-08, + "loss": 0.3010298013687134, + "memory(GiB)": 77.0, + "step": 8920, + "token_acc": 0.9059777102330294, + "train_speed(iter/s)": 0.374481 + }, + { + "epoch": 2.85472, + "grad_norm": 0.7600931148571526, + "learning_rate": 3.199098910203036e-08, + "loss": 0.3720007538795471, + "memory(GiB)": 77.0, + "step": 8921, + "token_acc": 0.8986601245518022, + "train_speed(iter/s)": 0.374458 + }, + { + "epoch": 2.85504, + "grad_norm": 0.7122357546057332, + "learning_rate": 3.185051452491511e-08, + "loss": 0.32163119316101074, + "memory(GiB)": 77.0, + "step": 8922, + "token_acc": 0.9327956989247311, + "train_speed(iter/s)": 0.374438 + }, + { + "epoch": 2.85536, + "grad_norm": 0.7372992693142338, + "learning_rate": 3.171034706618753e-08, + "loss": 0.2835473120212555, + "memory(GiB)": 77.0, + "step": 8923, + "token_acc": 0.9486373165618449, + "train_speed(iter/s)": 0.374416 + }, + { + "epoch": 2.85568, + "grad_norm": 0.736777235989294, + "learning_rate": 3.157048674328866e-08, + "loss": 0.35514095425605774, + "memory(GiB)": 77.0, + "step": 8924, + "token_acc": 0.9119846596356663, + "train_speed(iter/s)": 0.374396 + }, + { + "epoch": 2.856, + "grad_norm": 0.7873011691904046, + "learning_rate": 3.1430933573622094e-08, + "loss": 0.23536017537117004, + "memory(GiB)": 77.0, + "step": 8925, + "token_acc": 0.9242649242649242, + "train_speed(iter/s)": 0.374376 + }, + { + "epoch": 2.85632, + "grad_norm": 0.7132938690394258, + "learning_rate": 3.129168757455225e-08, + "loss": 0.29521191120147705, + "memory(GiB)": 77.0, + "step": 8926, + "token_acc": 0.8652350981118104, + "train_speed(iter/s)": 0.374355 + }, + { + "epoch": 2.85664, + "grad_norm": 0.7653666874503162, + "learning_rate": 3.1152748763406404e-08, + "loss": 0.35506099462509155, + "memory(GiB)": 77.0, + "step": 8927, + "token_acc": 0.9043683589138135, + "train_speed(iter/s)": 0.374333 + }, + { + "epoch": 2.85696, + "grad_norm": 0.7729049691837866, + "learning_rate": 3.101411715747238e-08, + "loss": 0.34666740894317627, + "memory(GiB)": 77.0, + "step": 8928, + "token_acc": 0.9194787838289342, + "train_speed(iter/s)": 0.374312 + }, + { + "epoch": 2.8572800000000003, + "grad_norm": 0.7140733792323176, + "learning_rate": 3.087579277400138e-08, + "loss": 0.2674674689769745, + "memory(GiB)": 77.0, + "step": 8929, + "token_acc": 0.9507612568837058, + "train_speed(iter/s)": 0.37429 + }, + { + "epoch": 2.8576, + "grad_norm": 0.7871258822064569, + "learning_rate": 3.073777563020519e-08, + "loss": 0.32827049493789673, + "memory(GiB)": 77.0, + "step": 8930, + "token_acc": 0.9057396928051739, + "train_speed(iter/s)": 0.374268 + }, + { + "epoch": 2.85792, + "grad_norm": 0.6797944309146726, + "learning_rate": 3.06000657432573e-08, + "loss": 0.23210187256336212, + "memory(GiB)": 77.0, + "step": 8931, + "token_acc": 0.8996231586159644, + "train_speed(iter/s)": 0.374249 + }, + { + "epoch": 2.85824, + "grad_norm": 0.7040293106790786, + "learning_rate": 3.0462663130293726e-08, + "loss": 0.28745079040527344, + "memory(GiB)": 77.0, + "step": 8932, + "token_acc": 0.9112364243943192, + "train_speed(iter/s)": 0.374228 + }, + { + "epoch": 2.8585599999999998, + "grad_norm": 0.715045015122342, + "learning_rate": 3.0325567808411614e-08, + "loss": 0.2632957100868225, + "memory(GiB)": 77.0, + "step": 8933, + "token_acc": 0.9551111111111111, + "train_speed(iter/s)": 0.374207 + }, + { + "epoch": 2.85888, + "grad_norm": 0.7248401807006222, + "learning_rate": 3.018877979467011e-08, + "loss": 0.3139210343360901, + "memory(GiB)": 77.0, + "step": 8934, + "token_acc": 0.8774956864678334, + "train_speed(iter/s)": 0.374187 + }, + { + "epoch": 2.8592, + "grad_norm": 0.6799791546044577, + "learning_rate": 3.005229910609003e-08, + "loss": 0.27800774574279785, + "memory(GiB)": 77.0, + "step": 8935, + "token_acc": 0.9079467817331895, + "train_speed(iter/s)": 0.374164 + }, + { + "epoch": 2.85952, + "grad_norm": 0.6297226715776182, + "learning_rate": 2.991612575965419e-08, + "loss": 0.30641603469848633, + "memory(GiB)": 77.0, + "step": 8936, + "token_acc": 0.924119241192412, + "train_speed(iter/s)": 0.374143 + }, + { + "epoch": 2.85984, + "grad_norm": 0.7164311493484617, + "learning_rate": 2.9780259772307362e-08, + "loss": 0.2664029896259308, + "memory(GiB)": 77.0, + "step": 8937, + "token_acc": 0.9573505654281098, + "train_speed(iter/s)": 0.374124 + }, + { + "epoch": 2.86016, + "grad_norm": 0.7603699540200183, + "learning_rate": 2.9644701160954914e-08, + "loss": 0.3514708876609802, + "memory(GiB)": 77.0, + "step": 8938, + "token_acc": 0.8299897119341564, + "train_speed(iter/s)": 0.374099 + }, + { + "epoch": 2.86048, + "grad_norm": 0.6924380038272069, + "learning_rate": 2.9509449942465017e-08, + "loss": 0.33399370312690735, + "memory(GiB)": 77.0, + "step": 8939, + "token_acc": 0.922679253764891, + "train_speed(iter/s)": 0.37408 + }, + { + "epoch": 2.8608000000000002, + "grad_norm": 0.7184866768823632, + "learning_rate": 2.9374506133667548e-08, + "loss": 0.220953106880188, + "memory(GiB)": 77.0, + "step": 8940, + "token_acc": 0.9734513274336283, + "train_speed(iter/s)": 0.374059 + }, + { + "epoch": 2.86112, + "grad_norm": 0.7830980939962534, + "learning_rate": 2.9239869751354068e-08, + "loss": 0.2831544578075409, + "memory(GiB)": 77.0, + "step": 8941, + "token_acc": 0.90625, + "train_speed(iter/s)": 0.374036 + }, + { + "epoch": 2.86144, + "grad_norm": 0.7877741527096074, + "learning_rate": 2.9105540812277288e-08, + "loss": 0.2872973680496216, + "memory(GiB)": 77.0, + "step": 8942, + "token_acc": 0.892088071618679, + "train_speed(iter/s)": 0.374017 + }, + { + "epoch": 2.86176, + "grad_norm": 0.6700796991899544, + "learning_rate": 2.8971519333152453e-08, + "loss": 0.260777086019516, + "memory(GiB)": 77.0, + "step": 8943, + "token_acc": 0.9329029282914457, + "train_speed(iter/s)": 0.373996 + }, + { + "epoch": 2.8620799999999997, + "grad_norm": 0.6543290077432257, + "learning_rate": 2.8837805330655944e-08, + "loss": 0.3486465811729431, + "memory(GiB)": 77.0, + "step": 8944, + "token_acc": 0.9069668649107901, + "train_speed(iter/s)": 0.373975 + }, + { + "epoch": 2.8624, + "grad_norm": 0.6975689118016918, + "learning_rate": 2.8704398821426395e-08, + "loss": 0.2719601094722748, + "memory(GiB)": 77.0, + "step": 8945, + "token_acc": 0.9569699686239355, + "train_speed(iter/s)": 0.373951 + }, + { + "epoch": 2.86272, + "grad_norm": 0.6947101617340328, + "learning_rate": 2.857129982206386e-08, + "loss": 0.31720489263534546, + "memory(GiB)": 77.0, + "step": 8946, + "token_acc": 0.9265658747300216, + "train_speed(iter/s)": 0.373928 + }, + { + "epoch": 2.86304, + "grad_norm": 0.7988005693051318, + "learning_rate": 2.843850834913009e-08, + "loss": 0.3161793649196625, + "memory(GiB)": 77.0, + "step": 8947, + "token_acc": 0.9273182957393483, + "train_speed(iter/s)": 0.373903 + }, + { + "epoch": 2.86336, + "grad_norm": 0.8333845331996452, + "learning_rate": 2.8306024419148814e-08, + "loss": 0.32689395546913147, + "memory(GiB)": 77.0, + "step": 8948, + "token_acc": 0.9261213720316622, + "train_speed(iter/s)": 0.373882 + }, + { + "epoch": 2.86368, + "grad_norm": 0.7754186111035551, + "learning_rate": 2.8173848048605456e-08, + "loss": 0.3193206191062927, + "memory(GiB)": 77.0, + "step": 8949, + "token_acc": 0.963452566096423, + "train_speed(iter/s)": 0.37386 + }, + { + "epoch": 2.864, + "grad_norm": 0.8080433892809423, + "learning_rate": 2.804197925394686e-08, + "loss": 0.22479504346847534, + "memory(GiB)": 77.0, + "step": 8950, + "token_acc": 0.9251592356687898, + "train_speed(iter/s)": 0.373836 + }, + { + "epoch": 2.86432, + "grad_norm": 0.7011102514997446, + "learning_rate": 2.7910418051581845e-08, + "loss": 0.3543543815612793, + "memory(GiB)": 77.0, + "step": 8951, + "token_acc": 0.8870814694320185, + "train_speed(iter/s)": 0.373812 + }, + { + "epoch": 2.86464, + "grad_norm": 0.6566059412212346, + "learning_rate": 2.7779164457881202e-08, + "loss": 0.2614983022212982, + "memory(GiB)": 77.0, + "step": 8952, + "token_acc": 0.9714946070878274, + "train_speed(iter/s)": 0.373793 + }, + { + "epoch": 2.86496, + "grad_norm": 0.7188301027377093, + "learning_rate": 2.7648218489176592e-08, + "loss": 0.2991558909416199, + "memory(GiB)": 77.0, + "step": 8953, + "token_acc": 0.8739263803680981, + "train_speed(iter/s)": 0.373771 + }, + { + "epoch": 2.8652800000000003, + "grad_norm": 0.6981286333219581, + "learning_rate": 2.751758016176248e-08, + "loss": 0.28794100880622864, + "memory(GiB)": 77.0, + "step": 8954, + "token_acc": 0.9446380178087496, + "train_speed(iter/s)": 0.373746 + }, + { + "epoch": 2.8656, + "grad_norm": 0.7228240660871, + "learning_rate": 2.7387249491894475e-08, + "loss": 0.4147053360939026, + "memory(GiB)": 77.0, + "step": 8955, + "token_acc": 0.87409200968523, + "train_speed(iter/s)": 0.373725 + }, + { + "epoch": 2.86592, + "grad_norm": 0.6865500642898503, + "learning_rate": 2.7257226495789603e-08, + "loss": 0.28789272904396057, + "memory(GiB)": 77.0, + "step": 8956, + "token_acc": 0.8669334667333667, + "train_speed(iter/s)": 0.373704 + }, + { + "epoch": 2.86624, + "grad_norm": 0.7429920025544217, + "learning_rate": 2.712751118962714e-08, + "loss": 0.29960897564888, + "memory(GiB)": 77.0, + "step": 8957, + "token_acc": 0.8960138648180243, + "train_speed(iter/s)": 0.37368 + }, + { + "epoch": 2.8665599999999998, + "grad_norm": 0.679140040506737, + "learning_rate": 2.6998103589548075e-08, + "loss": 0.32621514797210693, + "memory(GiB)": 77.0, + "step": 8958, + "token_acc": 0.9568457000931387, + "train_speed(iter/s)": 0.373657 + }, + { + "epoch": 2.86688, + "grad_norm": 0.7564824474766234, + "learning_rate": 2.6869003711655074e-08, + "loss": 0.3264716863632202, + "memory(GiB)": 77.0, + "step": 8959, + "token_acc": 0.8877040261153427, + "train_speed(iter/s)": 0.373634 + }, + { + "epoch": 2.8672, + "grad_norm": 0.739116402780837, + "learning_rate": 2.6740211572011955e-08, + "loss": 0.29062432050704956, + "memory(GiB)": 77.0, + "step": 8960, + "token_acc": 0.9147235176548968, + "train_speed(iter/s)": 0.373609 + }, + { + "epoch": 2.86752, + "grad_norm": 0.8118771458896599, + "learning_rate": 2.6611727186644788e-08, + "loss": 0.3135915994644165, + "memory(GiB)": 77.0, + "step": 8961, + "token_acc": 0.9598393574297188, + "train_speed(iter/s)": 0.37359 + }, + { + "epoch": 2.86784, + "grad_norm": 0.6828792987896124, + "learning_rate": 2.6483550571541337e-08, + "loss": 0.3182862401008606, + "memory(GiB)": 77.0, + "step": 8962, + "token_acc": 0.9723872771758126, + "train_speed(iter/s)": 0.373569 + }, + { + "epoch": 2.86816, + "grad_norm": 0.6827540127604033, + "learning_rate": 2.6355681742650796e-08, + "loss": 0.29913821816444397, + "memory(GiB)": 77.0, + "step": 8963, + "token_acc": 0.9244791666666666, + "train_speed(iter/s)": 0.37355 + }, + { + "epoch": 2.86848, + "grad_norm": 0.691734963231157, + "learning_rate": 2.6228120715884596e-08, + "loss": 0.2578310966491699, + "memory(GiB)": 77.0, + "step": 8964, + "token_acc": 0.8486707566462167, + "train_speed(iter/s)": 0.373526 + }, + { + "epoch": 2.8688000000000002, + "grad_norm": 0.753138072210965, + "learning_rate": 2.6100867507115324e-08, + "loss": 0.26625216007232666, + "memory(GiB)": 77.0, + "step": 8965, + "token_acc": 0.9068730325288562, + "train_speed(iter/s)": 0.373504 + }, + { + "epoch": 2.86912, + "grad_norm": 0.779584873990453, + "learning_rate": 2.5973922132177255e-08, + "loss": 0.2726030647754669, + "memory(GiB)": 77.0, + "step": 8966, + "token_acc": 0.936327853669831, + "train_speed(iter/s)": 0.373483 + }, + { + "epoch": 2.86944, + "grad_norm": 0.7430139661559623, + "learning_rate": 2.584728460686664e-08, + "loss": 0.28719478845596313, + "memory(GiB)": 77.0, + "step": 8967, + "token_acc": 0.9150171549221431, + "train_speed(iter/s)": 0.373463 + }, + { + "epoch": 2.86976, + "grad_norm": 0.7303135721026218, + "learning_rate": 2.5720954946941156e-08, + "loss": 0.27763521671295166, + "memory(GiB)": 77.0, + "step": 8968, + "token_acc": 0.9493564633463906, + "train_speed(iter/s)": 0.373439 + }, + { + "epoch": 2.8700799999999997, + "grad_norm": 0.7012119895294774, + "learning_rate": 2.5594933168120728e-08, + "loss": 0.29524534940719604, + "memory(GiB)": 77.0, + "step": 8969, + "token_acc": 0.9147375658950263, + "train_speed(iter/s)": 0.373415 + }, + { + "epoch": 2.8704, + "grad_norm": 0.6978786521851702, + "learning_rate": 2.5469219286086144e-08, + "loss": 0.3240591585636139, + "memory(GiB)": 77.0, + "step": 8970, + "token_acc": 0.8817733990147784, + "train_speed(iter/s)": 0.373393 + }, + { + "epoch": 2.87072, + "grad_norm": 0.7268440452310204, + "learning_rate": 2.5343813316480727e-08, + "loss": 0.2705664038658142, + "memory(GiB)": 77.0, + "step": 8971, + "token_acc": 0.9181318681318681, + "train_speed(iter/s)": 0.37337 + }, + { + "epoch": 2.87104, + "grad_norm": 0.6628239113694512, + "learning_rate": 2.5218715274908657e-08, + "loss": 0.270906537771225, + "memory(GiB)": 77.0, + "step": 8972, + "token_acc": 0.8821898821898821, + "train_speed(iter/s)": 0.373349 + }, + { + "epoch": 2.87136, + "grad_norm": 0.6567577439478457, + "learning_rate": 2.509392517693665e-08, + "loss": 0.2704901695251465, + "memory(GiB)": 77.0, + "step": 8973, + "token_acc": 0.9157058672815783, + "train_speed(iter/s)": 0.373324 + }, + { + "epoch": 2.87168, + "grad_norm": 0.7566487514640007, + "learning_rate": 2.4969443038092288e-08, + "loss": 0.299686461687088, + "memory(GiB)": 77.0, + "step": 8974, + "token_acc": 0.8611036339165545, + "train_speed(iter/s)": 0.373303 + }, + { + "epoch": 2.872, + "grad_norm": 0.7411700829135044, + "learning_rate": 2.4845268873865404e-08, + "loss": 0.3073410391807556, + "memory(GiB)": 77.0, + "step": 8975, + "token_acc": 0.8926961226330027, + "train_speed(iter/s)": 0.373283 + }, + { + "epoch": 2.87232, + "grad_norm": 0.7219368353140715, + "learning_rate": 2.4721402699707242e-08, + "loss": 0.2537820339202881, + "memory(GiB)": 77.0, + "step": 8976, + "token_acc": 0.8871391076115486, + "train_speed(iter/s)": 0.373262 + }, + { + "epoch": 2.87264, + "grad_norm": 0.6949570300399125, + "learning_rate": 2.4597844531031034e-08, + "loss": 0.26978376507759094, + "memory(GiB)": 77.0, + "step": 8977, + "token_acc": 0.9559613319011815, + "train_speed(iter/s)": 0.373241 + }, + { + "epoch": 2.87296, + "grad_norm": 0.7435518061893054, + "learning_rate": 2.4474594383211147e-08, + "loss": 0.3405592739582062, + "memory(GiB)": 77.0, + "step": 8978, + "token_acc": 0.9191402251791198, + "train_speed(iter/s)": 0.37322 + }, + { + "epoch": 2.87328, + "grad_norm": 0.7838994225331846, + "learning_rate": 2.4351652271583926e-08, + "loss": 0.3731797933578491, + "memory(GiB)": 77.0, + "step": 8979, + "token_acc": 0.8634795465746673, + "train_speed(iter/s)": 0.373196 + }, + { + "epoch": 2.8736, + "grad_norm": 0.6539771538259651, + "learning_rate": 2.4229018211447687e-08, + "loss": 0.3401296138763428, + "memory(GiB)": 77.0, + "step": 8980, + "token_acc": 0.8626387813064808, + "train_speed(iter/s)": 0.373169 + }, + { + "epoch": 2.87392, + "grad_norm": 0.8021924390850784, + "learning_rate": 2.410669221806189e-08, + "loss": 0.33973684906959534, + "memory(GiB)": 77.0, + "step": 8981, + "token_acc": 0.9166200335758254, + "train_speed(iter/s)": 0.373143 + }, + { + "epoch": 2.87424, + "grad_norm": 0.6719079384249901, + "learning_rate": 2.398467430664797e-08, + "loss": 0.28742316365242004, + "memory(GiB)": 77.0, + "step": 8982, + "token_acc": 0.8773321895775252, + "train_speed(iter/s)": 0.37312 + }, + { + "epoch": 2.87456, + "grad_norm": 0.7438535438680424, + "learning_rate": 2.3862964492388784e-08, + "loss": 0.32606276869773865, + "memory(GiB)": 77.0, + "step": 8983, + "token_acc": 0.8788968824940048, + "train_speed(iter/s)": 0.3731 + }, + { + "epoch": 2.87488, + "grad_norm": 0.7348838034261848, + "learning_rate": 2.374156279042916e-08, + "loss": 0.32686710357666016, + "memory(GiB)": 77.0, + "step": 8984, + "token_acc": 0.8639023149325872, + "train_speed(iter/s)": 0.37308 + }, + { + "epoch": 2.8752, + "grad_norm": 0.7645905963426151, + "learning_rate": 2.3620469215875353e-08, + "loss": 0.2990000247955322, + "memory(GiB)": 77.0, + "step": 8985, + "token_acc": 0.882729736388273, + "train_speed(iter/s)": 0.373059 + }, + { + "epoch": 2.87552, + "grad_norm": 0.6665933984554547, + "learning_rate": 2.34996837837953e-08, + "loss": 0.308451771736145, + "memory(GiB)": 77.0, + "step": 8986, + "token_acc": 0.9333333333333333, + "train_speed(iter/s)": 0.373039 + }, + { + "epoch": 2.87584, + "grad_norm": 0.6921843719104959, + "learning_rate": 2.3379206509218933e-08, + "loss": 0.2714043855667114, + "memory(GiB)": 77.0, + "step": 8987, + "token_acc": 0.9199190517490604, + "train_speed(iter/s)": 0.373017 + }, + { + "epoch": 2.87616, + "grad_norm": 0.725754293712637, + "learning_rate": 2.3259037407137307e-08, + "loss": 0.26748594641685486, + "memory(GiB)": 77.0, + "step": 8988, + "token_acc": 0.9539438856537851, + "train_speed(iter/s)": 0.372995 + }, + { + "epoch": 2.87648, + "grad_norm": 0.7267407645414818, + "learning_rate": 2.313917649250347e-08, + "loss": 0.242276132106781, + "memory(GiB)": 77.0, + "step": 8989, + "token_acc": 0.8530666666666666, + "train_speed(iter/s)": 0.372975 + }, + { + "epoch": 2.8768000000000002, + "grad_norm": 0.7962624478425121, + "learning_rate": 2.301962378023187e-08, + "loss": 0.2995522618293762, + "memory(GiB)": 77.0, + "step": 8990, + "token_acc": 0.8832799145299145, + "train_speed(iter/s)": 0.372954 + }, + { + "epoch": 2.87712, + "grad_norm": 0.7185801349261732, + "learning_rate": 2.290037928519895e-08, + "loss": 0.3648620843887329, + "memory(GiB)": 77.0, + "step": 8991, + "token_acc": 0.8223468006959211, + "train_speed(iter/s)": 0.372928 + }, + { + "epoch": 2.87744, + "grad_norm": 0.6572179977240761, + "learning_rate": 2.2781443022242555e-08, + "loss": 0.2855796217918396, + "memory(GiB)": 77.0, + "step": 8992, + "token_acc": 0.8318735719725818, + "train_speed(iter/s)": 0.372903 + }, + { + "epoch": 2.87776, + "grad_norm": 0.7169146590228146, + "learning_rate": 2.266281500616224e-08, + "loss": 0.4013986587524414, + "memory(GiB)": 77.0, + "step": 8993, + "token_acc": 0.8881030563273554, + "train_speed(iter/s)": 0.372877 + }, + { + "epoch": 2.8780799999999997, + "grad_norm": 0.6787043968587916, + "learning_rate": 2.2544495251719255e-08, + "loss": 0.2964688241481781, + "memory(GiB)": 77.0, + "step": 8994, + "token_acc": 0.910564342901777, + "train_speed(iter/s)": 0.372855 + }, + { + "epoch": 2.8784, + "grad_norm": 0.7647392788260318, + "learning_rate": 2.2426483773636543e-08, + "loss": 0.2776896357536316, + "memory(GiB)": 77.0, + "step": 8995, + "token_acc": 0.86810551558753, + "train_speed(iter/s)": 0.372836 + }, + { + "epoch": 2.87872, + "grad_norm": 0.7285956953902686, + "learning_rate": 2.2308780586598467e-08, + "loss": 0.34767091274261475, + "memory(GiB)": 77.0, + "step": 8996, + "token_acc": 0.9182948490230906, + "train_speed(iter/s)": 0.37281 + }, + { + "epoch": 2.87904, + "grad_norm": 0.67431470725514, + "learning_rate": 2.2191385705250812e-08, + "loss": 0.26074784994125366, + "memory(GiB)": 77.0, + "step": 8997, + "token_acc": 0.88930183169631, + "train_speed(iter/s)": 0.37279 + }, + { + "epoch": 2.87936, + "grad_norm": 0.7449091907270913, + "learning_rate": 2.2074299144201892e-08, + "loss": 0.2751113772392273, + "memory(GiB)": 77.0, + "step": 8998, + "token_acc": 0.9371850856567013, + "train_speed(iter/s)": 0.372771 + }, + { + "epoch": 2.87968, + "grad_norm": 0.6908874342963479, + "learning_rate": 2.1957520918020892e-08, + "loss": 0.33383411169052124, + "memory(GiB)": 77.0, + "step": 8999, + "token_acc": 0.8860244233378561, + "train_speed(iter/s)": 0.372745 + }, + { + "epoch": 2.88, + "grad_norm": 0.7346434386421741, + "learning_rate": 2.1841051041238682e-08, + "loss": 0.3026975989341736, + "memory(GiB)": 77.0, + "step": 9000, + "token_acc": 0.9213417026284818, + "train_speed(iter/s)": 0.372722 + }, + { + "epoch": 2.88032, + "grad_norm": 0.7309041388614098, + "learning_rate": 2.1724889528348115e-08, + "loss": 0.2817738652229309, + "memory(GiB)": 77.0, + "step": 9001, + "token_acc": 0.9177335875783166, + "train_speed(iter/s)": 0.372702 + }, + { + "epoch": 2.88064, + "grad_norm": 0.7517422128073578, + "learning_rate": 2.1609036393803183e-08, + "loss": 0.25156527757644653, + "memory(GiB)": 77.0, + "step": 9002, + "token_acc": 0.9331926863572433, + "train_speed(iter/s)": 0.372681 + }, + { + "epoch": 2.88096, + "grad_norm": 0.7111569404504661, + "learning_rate": 2.149349165202014e-08, + "loss": 0.3393073081970215, + "memory(GiB)": 77.0, + "step": 9003, + "token_acc": 0.9081095313322801, + "train_speed(iter/s)": 0.372659 + }, + { + "epoch": 2.88128, + "grad_norm": 0.6935273201891603, + "learning_rate": 2.137825531737664e-08, + "loss": 0.32699891924858093, + "memory(GiB)": 77.0, + "step": 9004, + "token_acc": 0.8582198443579766, + "train_speed(iter/s)": 0.372639 + }, + { + "epoch": 2.8816, + "grad_norm": 0.7653472601077742, + "learning_rate": 2.126332740421122e-08, + "loss": 0.28606119751930237, + "memory(GiB)": 77.0, + "step": 9005, + "token_acc": 0.8946315438479493, + "train_speed(iter/s)": 0.372618 + }, + { + "epoch": 2.88192, + "grad_norm": 0.7256043242801787, + "learning_rate": 2.1148707926825217e-08, + "loss": 0.3401833176612854, + "memory(GiB)": 77.0, + "step": 9006, + "token_acc": 0.9360354654844838, + "train_speed(iter/s)": 0.372598 + }, + { + "epoch": 2.88224, + "grad_norm": 0.6905090384429964, + "learning_rate": 2.1034396899481114e-08, + "loss": 0.34488171339035034, + "memory(GiB)": 77.0, + "step": 9007, + "token_acc": 0.9063097514340345, + "train_speed(iter/s)": 0.372576 + }, + { + "epoch": 2.88256, + "grad_norm": 0.7099921122577297, + "learning_rate": 2.092039433640225e-08, + "loss": 0.3095886707305908, + "memory(GiB)": 77.0, + "step": 9008, + "token_acc": 0.9288702928870293, + "train_speed(iter/s)": 0.372549 + }, + { + "epoch": 2.88288, + "grad_norm": 0.8122772646500963, + "learning_rate": 2.0806700251775057e-08, + "loss": 0.34773877263069153, + "memory(GiB)": 77.0, + "step": 9009, + "token_acc": 0.8974906567004806, + "train_speed(iter/s)": 0.372528 + }, + { + "epoch": 2.8832, + "grad_norm": 0.7282614543307365, + "learning_rate": 2.0693314659746276e-08, + "loss": 0.35552680492401123, + "memory(GiB)": 77.0, + "step": 9010, + "token_acc": 0.9347934295669488, + "train_speed(iter/s)": 0.372509 + }, + { + "epoch": 2.88352, + "grad_norm": 0.6237863160147736, + "learning_rate": 2.0580237574424898e-08, + "loss": 0.27689671516418457, + "memory(GiB)": 77.0, + "step": 9011, + "token_acc": 0.8657058630854897, + "train_speed(iter/s)": 0.37248 + }, + { + "epoch": 2.88384, + "grad_norm": 0.7129299366153693, + "learning_rate": 2.0467469009881613e-08, + "loss": 0.3111921548843384, + "memory(GiB)": 77.0, + "step": 9012, + "token_acc": 0.844246308163834, + "train_speed(iter/s)": 0.372458 + }, + { + "epoch": 2.88416, + "grad_norm": 0.6280805776735109, + "learning_rate": 2.035500898014825e-08, + "loss": 0.2815280556678772, + "memory(GiB)": 77.0, + "step": 9013, + "token_acc": 0.9678886047172492, + "train_speed(iter/s)": 0.372432 + }, + { + "epoch": 2.88448, + "grad_norm": 0.7703620508802811, + "learning_rate": 2.0242857499218616e-08, + "loss": 0.24248868227005005, + "memory(GiB)": 77.0, + "step": 9014, + "token_acc": 0.9486944571690334, + "train_speed(iter/s)": 0.37241 + }, + { + "epoch": 2.8848000000000003, + "grad_norm": 0.719441967218677, + "learning_rate": 2.0131014581047937e-08, + "loss": 0.3000001311302185, + "memory(GiB)": 77.0, + "step": 9015, + "token_acc": 0.892354124748491, + "train_speed(iter/s)": 0.372388 + }, + { + "epoch": 2.88512, + "grad_norm": 0.7667045613865866, + "learning_rate": 2.0019480239553414e-08, + "loss": 0.2985367178916931, + "memory(GiB)": 77.0, + "step": 9016, + "token_acc": 0.8953004164187983, + "train_speed(iter/s)": 0.372368 + }, + { + "epoch": 2.88544, + "grad_norm": 0.6911159834738955, + "learning_rate": 1.990825448861311e-08, + "loss": 0.25875288248062134, + "memory(GiB)": 77.0, + "step": 9017, + "token_acc": 0.9552683896620279, + "train_speed(iter/s)": 0.372349 + }, + { + "epoch": 2.88576, + "grad_norm": 0.7076763094547093, + "learning_rate": 1.9797337342067625e-08, + "loss": 0.2802925407886505, + "memory(GiB)": 77.0, + "step": 9018, + "token_acc": 0.8738631870304469, + "train_speed(iter/s)": 0.372329 + }, + { + "epoch": 2.8860799999999998, + "grad_norm": 0.7068154680547719, + "learning_rate": 1.9686728813718138e-08, + "loss": 0.3206325173377991, + "memory(GiB)": 77.0, + "step": 9019, + "token_acc": 0.9131159247798143, + "train_speed(iter/s)": 0.372304 + }, + { + "epoch": 2.8864, + "grad_norm": 0.708516992037563, + "learning_rate": 1.957642891732836e-08, + "loss": 0.28517860174179077, + "memory(GiB)": 77.0, + "step": 9020, + "token_acc": 0.8874160480735243, + "train_speed(iter/s)": 0.372283 + }, + { + "epoch": 2.88672, + "grad_norm": 0.7857314445131877, + "learning_rate": 1.946643766662315e-08, + "loss": 0.36962229013442993, + "memory(GiB)": 77.0, + "step": 9021, + "token_acc": 0.871896722939424, + "train_speed(iter/s)": 0.372261 + }, + { + "epoch": 2.88704, + "grad_norm": 0.7423350090551103, + "learning_rate": 1.935675507528878e-08, + "loss": 0.3207760751247406, + "memory(GiB)": 77.0, + "step": 9022, + "token_acc": 0.8721584984358707, + "train_speed(iter/s)": 0.372243 + }, + { + "epoch": 2.88736, + "grad_norm": 0.7112090744194061, + "learning_rate": 1.9247381156973776e-08, + "loss": 0.25035083293914795, + "memory(GiB)": 77.0, + "step": 9023, + "token_acc": 0.9036544850498339, + "train_speed(iter/s)": 0.372223 + }, + { + "epoch": 2.88768, + "grad_norm": 0.7104224671213305, + "learning_rate": 1.9138315925287532e-08, + "loss": 0.29242074489593506, + "memory(GiB)": 77.0, + "step": 9024, + "token_acc": 0.8567882849342269, + "train_speed(iter/s)": 0.372199 + }, + { + "epoch": 2.888, + "grad_norm": 0.7005820645036217, + "learning_rate": 1.9029559393801132e-08, + "loss": 0.23250551521778107, + "memory(GiB)": 77.0, + "step": 9025, + "token_acc": 0.9631961259079903, + "train_speed(iter/s)": 0.372181 + }, + { + "epoch": 2.88832, + "grad_norm": 0.7411018608843745, + "learning_rate": 1.8921111576047646e-08, + "loss": 0.3261237144470215, + "memory(GiB)": 77.0, + "step": 9026, + "token_acc": 0.946656649135988, + "train_speed(iter/s)": 0.372162 + }, + { + "epoch": 2.88864, + "grad_norm": 0.7618642698460095, + "learning_rate": 1.8812972485521554e-08, + "loss": 0.3321933150291443, + "memory(GiB)": 77.0, + "step": 9027, + "token_acc": 0.9735381988903116, + "train_speed(iter/s)": 0.372139 + }, + { + "epoch": 2.88896, + "grad_norm": 0.6957028325630208, + "learning_rate": 1.8705142135679035e-08, + "loss": 0.3715957701206207, + "memory(GiB)": 77.0, + "step": 9028, + "token_acc": 0.8533941814033086, + "train_speed(iter/s)": 0.372115 + }, + { + "epoch": 2.88928, + "grad_norm": 0.770865651579625, + "learning_rate": 1.859762053993741e-08, + "loss": 0.34184345602989197, + "memory(GiB)": 77.0, + "step": 9029, + "token_acc": 0.8524719322434509, + "train_speed(iter/s)": 0.372093 + }, + { + "epoch": 2.8895999999999997, + "grad_norm": 0.7545133508946277, + "learning_rate": 1.849040771167626e-08, + "loss": 0.31996703147888184, + "memory(GiB)": 77.0, + "step": 9030, + "token_acc": 0.9168440006547717, + "train_speed(iter/s)": 0.372073 + }, + { + "epoch": 2.88992, + "grad_norm": 0.920204984445389, + "learning_rate": 1.8383503664235748e-08, + "loss": 0.27843862771987915, + "memory(GiB)": 77.0, + "step": 9031, + "token_acc": 0.9360152043078872, + "train_speed(iter/s)": 0.372054 + }, + { + "epoch": 2.89024, + "grad_norm": 0.7109416523806813, + "learning_rate": 1.8276908410918836e-08, + "loss": 0.21961119771003723, + "memory(GiB)": 77.0, + "step": 9032, + "token_acc": 0.9471935007385525, + "train_speed(iter/s)": 0.372033 + }, + { + "epoch": 2.89056, + "grad_norm": 0.7498892584564643, + "learning_rate": 1.8170621964989365e-08, + "loss": 0.30758482217788696, + "memory(GiB)": 77.0, + "step": 9033, + "token_acc": 0.8730964467005076, + "train_speed(iter/s)": 0.372013 + }, + { + "epoch": 2.89088, + "grad_norm": 0.8123353771638469, + "learning_rate": 1.806464433967231e-08, + "loss": 0.314689576625824, + "memory(GiB)": 77.0, + "step": 9034, + "token_acc": 0.9595698924731183, + "train_speed(iter/s)": 0.371987 + }, + { + "epoch": 2.8912, + "grad_norm": 0.6985464660973103, + "learning_rate": 1.7958975548155454e-08, + "loss": 0.30742377042770386, + "memory(GiB)": 77.0, + "step": 9035, + "token_acc": 0.8913773796192609, + "train_speed(iter/s)": 0.371966 + }, + { + "epoch": 2.89152, + "grad_norm": 0.7136223629419552, + "learning_rate": 1.7853615603586892e-08, + "loss": 0.3090452551841736, + "memory(GiB)": 77.0, + "step": 9036, + "token_acc": 0.9029404733444896, + "train_speed(iter/s)": 0.371944 + }, + { + "epoch": 2.89184, + "grad_norm": 0.694916886306606, + "learning_rate": 1.7748564519076973e-08, + "loss": 0.27796897292137146, + "memory(GiB)": 77.0, + "step": 9037, + "token_acc": 0.8924583741429971, + "train_speed(iter/s)": 0.371924 + }, + { + "epoch": 2.89216, + "grad_norm": 0.7770438731251602, + "learning_rate": 1.764382230769801e-08, + "loss": 0.27133578062057495, + "memory(GiB)": 77.0, + "step": 9038, + "token_acc": 0.8543633211327297, + "train_speed(iter/s)": 0.371903 + }, + { + "epoch": 2.89248, + "grad_norm": 0.7934870084342442, + "learning_rate": 1.753938898248264e-08, + "loss": 0.2613781690597534, + "memory(GiB)": 77.0, + "step": 9039, + "token_acc": 0.9173658784642766, + "train_speed(iter/s)": 0.371882 + }, + { + "epoch": 2.8928000000000003, + "grad_norm": 0.7369635939997787, + "learning_rate": 1.7435264556426023e-08, + "loss": 0.3337996006011963, + "memory(GiB)": 77.0, + "step": 9040, + "token_acc": 0.9063118539696503, + "train_speed(iter/s)": 0.371861 + }, + { + "epoch": 2.89312, + "grad_norm": 0.7041269102533045, + "learning_rate": 1.7331449042484462e-08, + "loss": 0.2815530598163605, + "memory(GiB)": 77.0, + "step": 9041, + "token_acc": 0.9344660194174758, + "train_speed(iter/s)": 0.37184 + }, + { + "epoch": 2.89344, + "grad_norm": 0.730413890646214, + "learning_rate": 1.7227942453576507e-08, + "loss": 0.35290002822875977, + "memory(GiB)": 77.0, + "step": 9042, + "token_acc": 0.9657931034482758, + "train_speed(iter/s)": 0.37182 + }, + { + "epoch": 2.89376, + "grad_norm": 1.2763566185353747, + "learning_rate": 1.7124744802581307e-08, + "loss": 0.24367991089820862, + "memory(GiB)": 77.0, + "step": 9043, + "token_acc": 0.9576964095271951, + "train_speed(iter/s)": 0.3718 + }, + { + "epoch": 2.8940799999999998, + "grad_norm": 0.687234213847542, + "learning_rate": 1.702185610234025e-08, + "loss": 0.36006075143814087, + "memory(GiB)": 77.0, + "step": 9044, + "token_acc": 0.9254477301124532, + "train_speed(iter/s)": 0.371777 + }, + { + "epoch": 2.8944, + "grad_norm": 0.6968872389621954, + "learning_rate": 1.6919276365656155e-08, + "loss": 0.2974131107330322, + "memory(GiB)": 77.0, + "step": 9045, + "token_acc": 0.8948339483394834, + "train_speed(iter/s)": 0.371756 + }, + { + "epoch": 2.89472, + "grad_norm": 0.7550958314847575, + "learning_rate": 1.68170056052927e-08, + "loss": 0.3441196382045746, + "memory(GiB)": 77.0, + "step": 9046, + "token_acc": 0.854895792371215, + "train_speed(iter/s)": 0.371735 + }, + { + "epoch": 2.89504, + "grad_norm": 0.7366140760313543, + "learning_rate": 1.6715043833976373e-08, + "loss": 0.33846503496170044, + "memory(GiB)": 77.0, + "step": 9047, + "token_acc": 0.9465008675534992, + "train_speed(iter/s)": 0.371714 + }, + { + "epoch": 2.89536, + "grad_norm": 0.6795411641389523, + "learning_rate": 1.6613391064394247e-08, + "loss": 0.3846178948879242, + "memory(GiB)": 77.0, + "step": 9048, + "token_acc": 0.8635610079575596, + "train_speed(iter/s)": 0.37169 + }, + { + "epoch": 2.89568, + "grad_norm": 0.7617614238927728, + "learning_rate": 1.6512047309195366e-08, + "loss": 0.3229562044143677, + "memory(GiB)": 77.0, + "step": 9049, + "token_acc": 0.8787330316742081, + "train_speed(iter/s)": 0.371669 + }, + { + "epoch": 2.896, + "grad_norm": 0.703633060319948, + "learning_rate": 1.6411012580989928e-08, + "loss": 0.2731649577617645, + "memory(GiB)": 77.0, + "step": 9050, + "token_acc": 0.9052325581395348, + "train_speed(iter/s)": 0.371649 + }, + { + "epoch": 2.8963200000000002, + "grad_norm": 0.6534217225957593, + "learning_rate": 1.6310286892350368e-08, + "loss": 0.31836754083633423, + "memory(GiB)": 77.0, + "step": 9051, + "token_acc": 0.9245441795231416, + "train_speed(iter/s)": 0.371624 + }, + { + "epoch": 2.89664, + "grad_norm": 0.69487122186469, + "learning_rate": 1.620987025581e-08, + "loss": 0.23808856308460236, + "memory(GiB)": 77.0, + "step": 9052, + "token_acc": 0.893739230327398, + "train_speed(iter/s)": 0.371604 + }, + { + "epoch": 2.89696, + "grad_norm": 0.7221278689569636, + "learning_rate": 1.610976268386383e-08, + "loss": 0.35088205337524414, + "memory(GiB)": 77.0, + "step": 9053, + "token_acc": 0.9485190409026798, + "train_speed(iter/s)": 0.371578 + }, + { + "epoch": 2.89728, + "grad_norm": 0.6498220450118889, + "learning_rate": 1.600996418896855e-08, + "loss": 0.2253338098526001, + "memory(GiB)": 77.0, + "step": 9054, + "token_acc": 0.9059490084985836, + "train_speed(iter/s)": 0.371557 + }, + { + "epoch": 2.8975999999999997, + "grad_norm": 0.6735130736853581, + "learning_rate": 1.5910474783542574e-08, + "loss": 0.27871131896972656, + "memory(GiB)": 77.0, + "step": 9055, + "token_acc": 0.9246789309267616, + "train_speed(iter/s)": 0.371533 + }, + { + "epoch": 2.89792, + "grad_norm": 0.8307156995066682, + "learning_rate": 1.581129447996571e-08, + "loss": 0.2894759178161621, + "memory(GiB)": 77.0, + "step": 9056, + "token_acc": 0.917963224893918, + "train_speed(iter/s)": 0.371514 + }, + { + "epoch": 2.89824, + "grad_norm": 0.7789275019955303, + "learning_rate": 1.5712423290578647e-08, + "loss": 0.32204729318618774, + "memory(GiB)": 77.0, + "step": 9057, + "token_acc": 0.8807291666666667, + "train_speed(iter/s)": 0.371495 + }, + { + "epoch": 2.89856, + "grad_norm": 0.7179891473636231, + "learning_rate": 1.5613861227684602e-08, + "loss": 0.30379804968833923, + "memory(GiB)": 77.0, + "step": 9058, + "token_acc": 0.9515640766902119, + "train_speed(iter/s)": 0.371471 + }, + { + "epoch": 2.89888, + "grad_norm": 0.7995107902076051, + "learning_rate": 1.5515608303547924e-08, + "loss": 0.3842611014842987, + "memory(GiB)": 77.0, + "step": 9059, + "token_acc": 0.8715023676280672, + "train_speed(iter/s)": 0.371451 + }, + { + "epoch": 2.8992, + "grad_norm": 0.6925468478800242, + "learning_rate": 1.5417664530394672e-08, + "loss": 0.2669164538383484, + "memory(GiB)": 77.0, + "step": 9060, + "token_acc": 0.8924384525205158, + "train_speed(iter/s)": 0.371429 + }, + { + "epoch": 2.89952, + "grad_norm": 0.7264680044480091, + "learning_rate": 1.5320029920411762e-08, + "loss": 0.2811959981918335, + "memory(GiB)": 77.0, + "step": 9061, + "token_acc": 0.9316738633477267, + "train_speed(iter/s)": 0.371411 + }, + { + "epoch": 2.89984, + "grad_norm": 0.7335716222598208, + "learning_rate": 1.5222704485748363e-08, + "loss": 0.26324263215065, + "memory(GiB)": 77.0, + "step": 9062, + "token_acc": 0.9553128103277061, + "train_speed(iter/s)": 0.371391 + }, + { + "epoch": 2.90016, + "grad_norm": 0.7421504394946365, + "learning_rate": 1.5125688238515067e-08, + "loss": 0.3471965491771698, + "memory(GiB)": 77.0, + "step": 9063, + "token_acc": 0.8313077297034517, + "train_speed(iter/s)": 0.37137 + }, + { + "epoch": 2.90048, + "grad_norm": 0.687750115288777, + "learning_rate": 1.502898119078361e-08, + "loss": 0.3175849914550781, + "memory(GiB)": 77.0, + "step": 9064, + "token_acc": 0.8804223744292238, + "train_speed(iter/s)": 0.371344 + }, + { + "epoch": 2.9008000000000003, + "grad_norm": 0.7186001588978943, + "learning_rate": 1.4932583354587693e-08, + "loss": 0.3545909523963928, + "memory(GiB)": 77.0, + "step": 9065, + "token_acc": 0.8953771289537713, + "train_speed(iter/s)": 0.371321 + }, + { + "epoch": 2.90112, + "grad_norm": 0.8133380779411113, + "learning_rate": 1.4836494741922447e-08, + "loss": 0.3223455250263214, + "memory(GiB)": 77.0, + "step": 9066, + "token_acc": 0.8953726920446775, + "train_speed(iter/s)": 0.371299 + }, + { + "epoch": 2.90144, + "grad_norm": 0.7752980190496795, + "learning_rate": 1.4740715364744141e-08, + "loss": 0.32584983110427856, + "memory(GiB)": 77.0, + "step": 9067, + "token_acc": 0.9539495798319327, + "train_speed(iter/s)": 0.37128 + }, + { + "epoch": 2.90176, + "grad_norm": 0.7239702125391121, + "learning_rate": 1.4645245234971018e-08, + "loss": 0.3265604078769684, + "memory(GiB)": 77.0, + "step": 9068, + "token_acc": 0.916896551724138, + "train_speed(iter/s)": 0.371259 + }, + { + "epoch": 2.9020799999999998, + "grad_norm": 0.7781414257501501, + "learning_rate": 1.4550084364482742e-08, + "loss": 0.3306674361228943, + "memory(GiB)": 77.0, + "step": 9069, + "token_acc": 0.9026548672566371, + "train_speed(iter/s)": 0.371239 + }, + { + "epoch": 2.9024, + "grad_norm": 0.6799133416186107, + "learning_rate": 1.4455232765120397e-08, + "loss": 0.23319703340530396, + "memory(GiB)": 77.0, + "step": 9070, + "token_acc": 0.9513686662783926, + "train_speed(iter/s)": 0.371219 + }, + { + "epoch": 2.90272, + "grad_norm": 0.7205762188262572, + "learning_rate": 1.4360690448686487e-08, + "loss": 0.2962512671947479, + "memory(GiB)": 77.0, + "step": 9071, + "token_acc": 0.8790322580645161, + "train_speed(iter/s)": 0.371196 + }, + { + "epoch": 2.90304, + "grad_norm": 0.80027695447528, + "learning_rate": 1.4266457426945212e-08, + "loss": 0.2740810811519623, + "memory(GiB)": 77.0, + "step": 9072, + "token_acc": 0.9435933147632312, + "train_speed(iter/s)": 0.371178 + }, + { + "epoch": 2.90336, + "grad_norm": 0.7499698382093684, + "learning_rate": 1.4172533711622194e-08, + "loss": 0.36324524879455566, + "memory(GiB)": 77.0, + "step": 9073, + "token_acc": 0.8678471575023299, + "train_speed(iter/s)": 0.371154 + }, + { + "epoch": 2.90368, + "grad_norm": 0.6361578621969987, + "learning_rate": 1.4078919314404749e-08, + "loss": 0.28675368428230286, + "memory(GiB)": 77.0, + "step": 9074, + "token_acc": 0.921476343819668, + "train_speed(iter/s)": 0.371133 + }, + { + "epoch": 2.904, + "grad_norm": 0.781753962742139, + "learning_rate": 1.3985614246941614e-08, + "loss": 0.2602759301662445, + "memory(GiB)": 77.0, + "step": 9075, + "token_acc": 0.9520631067961165, + "train_speed(iter/s)": 0.371114 + }, + { + "epoch": 2.9043200000000002, + "grad_norm": 0.7341158442148968, + "learning_rate": 1.3892618520842672e-08, + "loss": 0.32110798358917236, + "memory(GiB)": 77.0, + "step": 9076, + "token_acc": 0.8640832851359167, + "train_speed(iter/s)": 0.371095 + }, + { + "epoch": 2.90464, + "grad_norm": 0.7917838790098898, + "learning_rate": 1.3799932147680051e-08, + "loss": 0.2724943459033966, + "memory(GiB)": 77.0, + "step": 9077, + "token_acc": 0.8880197612186085, + "train_speed(iter/s)": 0.37107 + }, + { + "epoch": 2.90496, + "grad_norm": 0.768076640193533, + "learning_rate": 1.370755513898675e-08, + "loss": 0.24479730427265167, + "memory(GiB)": 77.0, + "step": 9078, + "token_acc": 0.897742363877822, + "train_speed(iter/s)": 0.37105 + }, + { + "epoch": 2.90528, + "grad_norm": 0.7139336466864794, + "learning_rate": 1.3615487506257464e-08, + "loss": 0.27546799182891846, + "memory(GiB)": 77.0, + "step": 9079, + "token_acc": 0.9339920948616601, + "train_speed(iter/s)": 0.371029 + }, + { + "epoch": 2.9055999999999997, + "grad_norm": 0.7948254488778685, + "learning_rate": 1.3523729260948304e-08, + "loss": 0.26695236563682556, + "memory(GiB)": 77.0, + "step": 9080, + "token_acc": 0.8889711324944486, + "train_speed(iter/s)": 0.371006 + }, + { + "epoch": 2.90592, + "grad_norm": 0.6727512399263984, + "learning_rate": 1.343228041447736e-08, + "loss": 0.29951801896095276, + "memory(GiB)": 77.0, + "step": 9081, + "token_acc": 0.8986822207820263, + "train_speed(iter/s)": 0.370985 + }, + { + "epoch": 2.90624, + "grad_norm": 0.6758426603611476, + "learning_rate": 1.3341140978223311e-08, + "loss": 0.2701188325881958, + "memory(GiB)": 77.0, + "step": 9082, + "token_acc": 0.9605263157894737, + "train_speed(iter/s)": 0.370962 + }, + { + "epoch": 2.90656, + "grad_norm": 0.7340217122809263, + "learning_rate": 1.3250310963527358e-08, + "loss": 0.2839905917644501, + "memory(GiB)": 77.0, + "step": 9083, + "token_acc": 0.9090720365372639, + "train_speed(iter/s)": 0.37094 + }, + { + "epoch": 2.90688, + "grad_norm": 0.7086699211881216, + "learning_rate": 1.3159790381691573e-08, + "loss": 0.32186001539230347, + "memory(GiB)": 77.0, + "step": 9084, + "token_acc": 0.8777709359605911, + "train_speed(iter/s)": 0.370921 + }, + { + "epoch": 2.9072, + "grad_norm": 0.7554855222979722, + "learning_rate": 1.3069579243979725e-08, + "loss": 0.36593571305274963, + "memory(GiB)": 77.0, + "step": 9085, + "token_acc": 0.9361283061339336, + "train_speed(iter/s)": 0.370902 + }, + { + "epoch": 2.90752, + "grad_norm": 0.7144556265293286, + "learning_rate": 1.2979677561616721e-08, + "loss": 0.2726309895515442, + "memory(GiB)": 77.0, + "step": 9086, + "token_acc": 0.8638533511552415, + "train_speed(iter/s)": 0.370883 + }, + { + "epoch": 2.90784, + "grad_norm": 0.6498363125758722, + "learning_rate": 1.2890085345789727e-08, + "loss": 0.25313442945480347, + "memory(GiB)": 77.0, + "step": 9087, + "token_acc": 0.9374193548387096, + "train_speed(iter/s)": 0.370859 + }, + { + "epoch": 2.90816, + "grad_norm": 0.6984857732793487, + "learning_rate": 1.280080260764649e-08, + "loss": 0.33404073119163513, + "memory(GiB)": 77.0, + "step": 9088, + "token_acc": 0.8606496404661542, + "train_speed(iter/s)": 0.37084 + }, + { + "epoch": 2.90848, + "grad_norm": 0.7208880033819708, + "learning_rate": 1.2711829358297011e-08, + "loss": 0.35190749168395996, + "memory(GiB)": 77.0, + "step": 9089, + "token_acc": 0.928428285141712, + "train_speed(iter/s)": 0.370818 + }, + { + "epoch": 2.9088000000000003, + "grad_norm": 0.8212919691330254, + "learning_rate": 1.262316560881216e-08, + "loss": 0.3645915389060974, + "memory(GiB)": 77.0, + "step": 9090, + "token_acc": 0.9048723897911833, + "train_speed(iter/s)": 0.370796 + }, + { + "epoch": 2.90912, + "grad_norm": 0.7385350147304215, + "learning_rate": 1.2534811370225054e-08, + "loss": 0.3403594493865967, + "memory(GiB)": 77.0, + "step": 9091, + "token_acc": 0.8296323941033775, + "train_speed(iter/s)": 0.370772 + }, + { + "epoch": 2.90944, + "grad_norm": 0.7390949679617927, + "learning_rate": 1.2446766653529396e-08, + "loss": 0.37862539291381836, + "memory(GiB)": 77.0, + "step": 9092, + "token_acc": 0.8370755242159955, + "train_speed(iter/s)": 0.370751 + }, + { + "epoch": 2.90976, + "grad_norm": 0.7740354159012551, + "learning_rate": 1.2359031469681427e-08, + "loss": 0.3460977077484131, + "memory(GiB)": 77.0, + "step": 9093, + "token_acc": 0.923513986013986, + "train_speed(iter/s)": 0.37073 + }, + { + "epoch": 2.91008, + "grad_norm": 0.6840458989040672, + "learning_rate": 1.227160582959741e-08, + "loss": 0.2623731195926666, + "memory(GiB)": 77.0, + "step": 9094, + "token_acc": 0.9654640764724021, + "train_speed(iter/s)": 0.370709 + }, + { + "epoch": 2.9104, + "grad_norm": 0.8105464735620274, + "learning_rate": 1.2184489744156702e-08, + "loss": 0.35649099946022034, + "memory(GiB)": 77.0, + "step": 9095, + "token_acc": 0.9304862842892768, + "train_speed(iter/s)": 0.37069 + }, + { + "epoch": 2.91072, + "grad_norm": 0.7006904276289356, + "learning_rate": 1.2097683224199242e-08, + "loss": 0.2961099147796631, + "memory(GiB)": 77.0, + "step": 9096, + "token_acc": 0.9138068635275339, + "train_speed(iter/s)": 0.370668 + }, + { + "epoch": 2.91104, + "grad_norm": 0.7410624901783349, + "learning_rate": 1.2011186280526111e-08, + "loss": 0.3479321002960205, + "memory(GiB)": 77.0, + "step": 9097, + "token_acc": 0.9279251170046802, + "train_speed(iter/s)": 0.370647 + }, + { + "epoch": 2.91136, + "grad_norm": 0.7667922177135088, + "learning_rate": 1.1924998923900921e-08, + "loss": 0.3167038559913635, + "memory(GiB)": 77.0, + "step": 9098, + "token_acc": 0.8764616166751398, + "train_speed(iter/s)": 0.370627 + }, + { + "epoch": 2.91168, + "grad_norm": 0.7528544076114099, + "learning_rate": 1.1839121165047596e-08, + "loss": 0.3423294723033905, + "memory(GiB)": 77.0, + "step": 9099, + "token_acc": 0.9194715319282794, + "train_speed(iter/s)": 0.370603 + }, + { + "epoch": 2.912, + "grad_norm": 0.7418980182174734, + "learning_rate": 1.1753553014652863e-08, + "loss": 0.2608336806297302, + "memory(GiB)": 77.0, + "step": 9100, + "token_acc": 0.9484262419416003, + "train_speed(iter/s)": 0.370578 + }, + { + "epoch": 2.9123200000000002, + "grad_norm": 0.6971743028225679, + "learning_rate": 1.1668294483363762e-08, + "loss": 0.3187609016895294, + "memory(GiB)": 77.0, + "step": 9101, + "token_acc": 0.9225161987041036, + "train_speed(iter/s)": 0.370558 + }, + { + "epoch": 2.91264, + "grad_norm": 0.6763279615532966, + "learning_rate": 1.1583345581789307e-08, + "loss": 0.31431299448013306, + "memory(GiB)": 77.0, + "step": 9102, + "token_acc": 0.9229473684210526, + "train_speed(iter/s)": 0.370532 + }, + { + "epoch": 2.91296, + "grad_norm": 0.763086346394773, + "learning_rate": 1.1498706320499931e-08, + "loss": 0.36606666445732117, + "memory(GiB)": 77.0, + "step": 9103, + "token_acc": 0.8923352221890845, + "train_speed(iter/s)": 0.370512 + }, + { + "epoch": 2.91328, + "grad_norm": 0.7096560081170883, + "learning_rate": 1.1414376710027209e-08, + "loss": 0.25826317071914673, + "memory(GiB)": 77.0, + "step": 9104, + "token_acc": 0.9351851851851852, + "train_speed(iter/s)": 0.370492 + }, + { + "epoch": 2.9135999999999997, + "grad_norm": 0.7738867496999877, + "learning_rate": 1.1330356760865247e-08, + "loss": 0.3297611474990845, + "memory(GiB)": 77.0, + "step": 9105, + "token_acc": 0.9141847551741544, + "train_speed(iter/s)": 0.370473 + }, + { + "epoch": 2.91392, + "grad_norm": 0.7437845572024603, + "learning_rate": 1.1246646483468183e-08, + "loss": 0.40918809175491333, + "memory(GiB)": 77.0, + "step": 9106, + "token_acc": 0.9104084321475626, + "train_speed(iter/s)": 0.370451 + }, + { + "epoch": 2.91424, + "grad_norm": 0.7555665519208483, + "learning_rate": 1.1163245888252683e-08, + "loss": 0.35111165046691895, + "memory(GiB)": 77.0, + "step": 9107, + "token_acc": 0.929317626139142, + "train_speed(iter/s)": 0.37043 + }, + { + "epoch": 2.91456, + "grad_norm": 0.6945314344895949, + "learning_rate": 1.1080154985596281e-08, + "loss": 0.2991568148136139, + "memory(GiB)": 77.0, + "step": 9108, + "token_acc": 0.9158530915853091, + "train_speed(iter/s)": 0.370409 + }, + { + "epoch": 2.91488, + "grad_norm": 0.7414293266035454, + "learning_rate": 1.0997373785837928e-08, + "loss": 0.31649935245513916, + "memory(GiB)": 77.0, + "step": 9109, + "token_acc": 0.9310941828254847, + "train_speed(iter/s)": 0.370389 + }, + { + "epoch": 2.9152, + "grad_norm": 0.7278107057891112, + "learning_rate": 1.0914902299279106e-08, + "loss": 0.3255000114440918, + "memory(GiB)": 77.0, + "step": 9110, + "token_acc": 0.8021425448185395, + "train_speed(iter/s)": 0.370358 + }, + { + "epoch": 2.91552, + "grad_norm": 0.7345343128074214, + "learning_rate": 1.0832740536181608e-08, + "loss": 0.30252528190612793, + "memory(GiB)": 77.0, + "step": 9111, + "token_acc": 0.9041977852176153, + "train_speed(iter/s)": 0.370335 + }, + { + "epoch": 2.91584, + "grad_norm": 0.6879509320252131, + "learning_rate": 1.0750888506768643e-08, + "loss": 0.33802443742752075, + "memory(GiB)": 77.0, + "step": 9112, + "token_acc": 0.9458312531203196, + "train_speed(iter/s)": 0.370312 + }, + { + "epoch": 2.91616, + "grad_norm": 0.7344494811130122, + "learning_rate": 1.0669346221225674e-08, + "loss": 0.2456211894750595, + "memory(GiB)": 77.0, + "step": 9113, + "token_acc": 0.9623800383877159, + "train_speed(iter/s)": 0.370291 + }, + { + "epoch": 2.91648, + "grad_norm": 0.7965320288113222, + "learning_rate": 1.0588113689699309e-08, + "loss": 0.2947825789451599, + "memory(GiB)": 77.0, + "step": 9114, + "token_acc": 0.8852956038403234, + "train_speed(iter/s)": 0.370272 + }, + { + "epoch": 2.9168, + "grad_norm": 0.7350603767266309, + "learning_rate": 1.0507190922297294e-08, + "loss": 0.24634060263633728, + "memory(GiB)": 77.0, + "step": 9115, + "token_acc": 0.901103230890465, + "train_speed(iter/s)": 0.370251 + }, + { + "epoch": 2.91712, + "grad_norm": 0.7324209417402727, + "learning_rate": 1.04265779290888e-08, + "loss": 0.2857329249382019, + "memory(GiB)": 77.0, + "step": 9116, + "token_acc": 0.9505283381364072, + "train_speed(iter/s)": 0.370229 + }, + { + "epoch": 2.91744, + "grad_norm": 0.7547672635386666, + "learning_rate": 1.034627472010552e-08, + "loss": 0.30531466007232666, + "memory(GiB)": 77.0, + "step": 9117, + "token_acc": 0.9645293315143247, + "train_speed(iter/s)": 0.37021 + }, + { + "epoch": 2.91776, + "grad_norm": 0.7341905804893765, + "learning_rate": 1.026628130533891e-08, + "loss": 0.4283413290977478, + "memory(GiB)": 77.0, + "step": 9118, + "token_acc": 0.8032282859338971, + "train_speed(iter/s)": 0.370186 + }, + { + "epoch": 2.91808, + "grad_norm": 0.7090378518368816, + "learning_rate": 1.0186597694743505e-08, + "loss": 0.28940749168395996, + "memory(GiB)": 77.0, + "step": 9119, + "token_acc": 0.9145280071412631, + "train_speed(iter/s)": 0.370162 + }, + { + "epoch": 2.9184, + "grad_norm": 0.7501938129391131, + "learning_rate": 1.0107223898233876e-08, + "loss": 0.2819303870201111, + "memory(GiB)": 77.0, + "step": 9120, + "token_acc": 0.9394261424017003, + "train_speed(iter/s)": 0.370138 + }, + { + "epoch": 2.91872, + "grad_norm": 0.71784441235411, + "learning_rate": 1.002815992568712e-08, + "loss": 0.3132855296134949, + "memory(GiB)": 77.0, + "step": 9121, + "token_acc": 0.9657664511220997, + "train_speed(iter/s)": 0.370117 + }, + { + "epoch": 2.91904, + "grad_norm": 0.7158560876568408, + "learning_rate": 9.949405786941479e-09, + "loss": 0.2607344090938568, + "memory(GiB)": 77.0, + "step": 9122, + "token_acc": 0.9351927809680065, + "train_speed(iter/s)": 0.370097 + }, + { + "epoch": 2.91936, + "grad_norm": 0.7230527261368932, + "learning_rate": 9.870961491796337e-09, + "loss": 0.3255852460861206, + "memory(GiB)": 77.0, + "step": 9123, + "token_acc": 0.925869432580842, + "train_speed(iter/s)": 0.370077 + }, + { + "epoch": 2.91968, + "grad_norm": 0.7164505495329212, + "learning_rate": 9.792827050012776e-09, + "loss": 0.2796318233013153, + "memory(GiB)": 77.0, + "step": 9124, + "token_acc": 0.9400597269624573, + "train_speed(iter/s)": 0.370058 + }, + { + "epoch": 2.92, + "grad_norm": 0.7105358380369385, + "learning_rate": 9.715002471313018e-09, + "loss": 0.31541532278060913, + "memory(GiB)": 77.0, + "step": 9125, + "token_acc": 0.87391874180865, + "train_speed(iter/s)": 0.370038 + }, + { + "epoch": 2.9203200000000002, + "grad_norm": 0.6942129793286218, + "learning_rate": 9.637487765381537e-09, + "loss": 0.3013293445110321, + "memory(GiB)": 77.0, + "step": 9126, + "token_acc": 0.9563636363636364, + "train_speed(iter/s)": 0.370017 + }, + { + "epoch": 2.92064, + "grad_norm": 0.8154045616503904, + "learning_rate": 9.56028294186312e-09, + "loss": 0.26003873348236084, + "memory(GiB)": 77.0, + "step": 9127, + "token_acc": 0.9258823529411765, + "train_speed(iter/s)": 0.369998 + }, + { + "epoch": 2.92096, + "grad_norm": 0.7321188308909045, + "learning_rate": 9.483388010364802e-09, + "loss": 0.33987271785736084, + "memory(GiB)": 77.0, + "step": 9128, + "token_acc": 0.914396887159533, + "train_speed(iter/s)": 0.369979 + }, + { + "epoch": 2.92128, + "grad_norm": 0.7047951965921957, + "learning_rate": 9.406802980455043e-09, + "loss": 0.31911328434944153, + "memory(GiB)": 77.0, + "step": 9129, + "token_acc": 0.8839399338254008, + "train_speed(iter/s)": 0.369956 + }, + { + "epoch": 2.9215999999999998, + "grad_norm": 0.7044676457358974, + "learning_rate": 9.330527861663164e-09, + "loss": 0.3035579323768616, + "memory(GiB)": 77.0, + "step": 9130, + "token_acc": 0.9505102040816327, + "train_speed(iter/s)": 0.369935 + }, + { + "epoch": 2.92192, + "grad_norm": 0.7194088181451491, + "learning_rate": 9.25456266348046e-09, + "loss": 0.33598342537879944, + "memory(GiB)": 77.0, + "step": 9131, + "token_acc": 0.9742736077481841, + "train_speed(iter/s)": 0.369913 + }, + { + "epoch": 2.92224, + "grad_norm": 0.6817076639961029, + "learning_rate": 9.178907395359648e-09, + "loss": 0.2945846915245056, + "memory(GiB)": 77.0, + "step": 9132, + "token_acc": 0.9272365805168986, + "train_speed(iter/s)": 0.369893 + }, + { + "epoch": 2.92256, + "grad_norm": 0.7930797971278876, + "learning_rate": 9.103562066714311e-09, + "loss": 0.39467066526412964, + "memory(GiB)": 77.0, + "step": 9133, + "token_acc": 0.8742196107234668, + "train_speed(iter/s)": 0.369872 + }, + { + "epoch": 2.92288, + "grad_norm": 0.691101892700171, + "learning_rate": 9.02852668692028e-09, + "loss": 0.2798277735710144, + "memory(GiB)": 77.0, + "step": 9134, + "token_acc": 0.8552531162554058, + "train_speed(iter/s)": 0.369852 + }, + { + "epoch": 2.9232, + "grad_norm": 1.542282718636782, + "learning_rate": 8.953801265313977e-09, + "loss": 0.2559569478034973, + "memory(GiB)": 77.0, + "step": 9135, + "token_acc": 0.9039256198347108, + "train_speed(iter/s)": 0.36983 + }, + { + "epoch": 2.92352, + "grad_norm": 0.731180272360801, + "learning_rate": 8.879385811194075e-09, + "loss": 0.3653903603553772, + "memory(GiB)": 77.0, + "step": 9136, + "token_acc": 0.9296413980935089, + "train_speed(iter/s)": 0.369802 + }, + { + "epoch": 2.92384, + "grad_norm": 0.7973653945870541, + "learning_rate": 8.805280333819832e-09, + "loss": 0.3116019368171692, + "memory(GiB)": 77.0, + "step": 9137, + "token_acc": 0.8677982309347358, + "train_speed(iter/s)": 0.369781 + }, + { + "epoch": 2.92416, + "grad_norm": 0.6761252755905537, + "learning_rate": 8.73148484241304e-09, + "loss": 0.39909663796424866, + "memory(GiB)": 77.0, + "step": 9138, + "token_acc": 0.9014487228364468, + "train_speed(iter/s)": 0.369759 + }, + { + "epoch": 2.92448, + "grad_norm": 0.775105663274635, + "learning_rate": 8.657999346155798e-09, + "loss": 0.23569630086421967, + "memory(GiB)": 77.0, + "step": 9139, + "token_acc": 0.8710801393728222, + "train_speed(iter/s)": 0.369739 + }, + { + "epoch": 2.9248, + "grad_norm": 0.7621445609387401, + "learning_rate": 8.584823854192181e-09, + "loss": 0.2957698702812195, + "memory(GiB)": 77.0, + "step": 9140, + "token_acc": 0.8978349120433018, + "train_speed(iter/s)": 0.369719 + }, + { + "epoch": 2.92512, + "grad_norm": 0.6953236056825177, + "learning_rate": 8.511958375627683e-09, + "loss": 0.24241727590560913, + "memory(GiB)": 77.0, + "step": 9141, + "token_acc": 0.94136460554371, + "train_speed(iter/s)": 0.369701 + }, + { + "epoch": 2.92544, + "grad_norm": 0.7571966229739737, + "learning_rate": 8.439402919528938e-09, + "loss": 0.41427648067474365, + "memory(GiB)": 77.0, + "step": 9142, + "token_acc": 0.8631296449215524, + "train_speed(iter/s)": 0.369682 + }, + { + "epoch": 2.92576, + "grad_norm": 0.811563873734001, + "learning_rate": 8.36715749492456e-09, + "loss": 0.32297569513320923, + "memory(GiB)": 77.0, + "step": 9143, + "token_acc": 0.8988047808764941, + "train_speed(iter/s)": 0.369664 + }, + { + "epoch": 2.92608, + "grad_norm": 0.7282568065776656, + "learning_rate": 8.295222110804025e-09, + "loss": 0.26808711886405945, + "memory(GiB)": 77.0, + "step": 9144, + "token_acc": 0.9062658334740753, + "train_speed(iter/s)": 0.369644 + }, + { + "epoch": 2.9264, + "grad_norm": 0.7125309764481865, + "learning_rate": 8.223596776118504e-09, + "loss": 0.2982272803783417, + "memory(GiB)": 77.0, + "step": 9145, + "token_acc": 0.9253532391362304, + "train_speed(iter/s)": 0.369623 + }, + { + "epoch": 2.92672, + "grad_norm": 0.7118361384326137, + "learning_rate": 8.152281499780312e-09, + "loss": 0.2957252562046051, + "memory(GiB)": 77.0, + "step": 9146, + "token_acc": 0.8923933209647495, + "train_speed(iter/s)": 0.3696 + }, + { + "epoch": 2.92704, + "grad_norm": 0.7090958989642193, + "learning_rate": 8.081276290663742e-09, + "loss": 0.3194488286972046, + "memory(GiB)": 77.0, + "step": 9147, + "token_acc": 0.9564186184405574, + "train_speed(iter/s)": 0.369577 + }, + { + "epoch": 2.92736, + "grad_norm": 0.6813535498782197, + "learning_rate": 8.010581157604224e-09, + "loss": 0.3066102862358093, + "memory(GiB)": 77.0, + "step": 9148, + "token_acc": 0.9064339781328847, + "train_speed(iter/s)": 0.369556 + }, + { + "epoch": 2.92768, + "grad_norm": 0.7079923525575295, + "learning_rate": 7.940196109398057e-09, + "loss": 0.3305760324001312, + "memory(GiB)": 77.0, + "step": 9149, + "token_acc": 0.8752293577981651, + "train_speed(iter/s)": 0.369534 + }, + { + "epoch": 2.928, + "grad_norm": 0.7246645366790255, + "learning_rate": 7.870121154803789e-09, + "loss": 0.35167181491851807, + "memory(GiB)": 77.0, + "step": 9150, + "token_acc": 0.8746067415730338, + "train_speed(iter/s)": 0.369513 + }, + { + "epoch": 2.9283200000000003, + "grad_norm": 0.7409722370031057, + "learning_rate": 7.800356302540834e-09, + "loss": 0.3418147563934326, + "memory(GiB)": 77.0, + "step": 9151, + "token_acc": 0.8863882863340564, + "train_speed(iter/s)": 0.369494 + }, + { + "epoch": 2.92864, + "grad_norm": 0.6959219760517253, + "learning_rate": 7.730901561290583e-09, + "loss": 0.30648887157440186, + "memory(GiB)": 77.0, + "step": 9152, + "token_acc": 0.8994914747233024, + "train_speed(iter/s)": 0.369471 + }, + { + "epoch": 2.92896, + "grad_norm": 0.6861169853023363, + "learning_rate": 7.66175693969501e-09, + "loss": 0.3162272572517395, + "memory(GiB)": 77.0, + "step": 9153, + "token_acc": 0.932806324110672, + "train_speed(iter/s)": 0.369447 + }, + { + "epoch": 2.92928, + "grad_norm": 0.7693807283110717, + "learning_rate": 7.592922446358342e-09, + "loss": 0.28796401619911194, + "memory(GiB)": 77.0, + "step": 9154, + "token_acc": 0.8971816283924844, + "train_speed(iter/s)": 0.369426 + }, + { + "epoch": 2.9295999999999998, + "grad_norm": 0.7896108096077524, + "learning_rate": 7.524398089845396e-09, + "loss": 0.3416770100593567, + "memory(GiB)": 77.0, + "step": 9155, + "token_acc": 0.911158117398202, + "train_speed(iter/s)": 0.369407 + }, + { + "epoch": 2.92992, + "grad_norm": 0.7166059666690883, + "learning_rate": 7.456183878683243e-09, + "loss": 0.2842450439929962, + "memory(GiB)": 77.0, + "step": 9156, + "token_acc": 0.8816936488169365, + "train_speed(iter/s)": 0.369386 + }, + { + "epoch": 2.93024, + "grad_norm": 0.6777682720150116, + "learning_rate": 7.388279821360089e-09, + "loss": 0.30780282616615295, + "memory(GiB)": 77.0, + "step": 9157, + "token_acc": 0.8844488844488845, + "train_speed(iter/s)": 0.369363 + }, + { + "epoch": 2.93056, + "grad_norm": 0.9065514506409916, + "learning_rate": 7.320685926324733e-09, + "loss": 0.28011974692344666, + "memory(GiB)": 77.0, + "step": 9158, + "token_acc": 0.9076865109269028, + "train_speed(iter/s)": 0.369341 + }, + { + "epoch": 2.93088, + "grad_norm": 0.8116631042338289, + "learning_rate": 7.253402201988779e-09, + "loss": 0.339947372674942, + "memory(GiB)": 77.0, + "step": 9159, + "token_acc": 0.9445048966267682, + "train_speed(iter/s)": 0.36932 + }, + { + "epoch": 2.9312, + "grad_norm": 0.7091376858789153, + "learning_rate": 7.186428656724143e-09, + "loss": 0.3288813829421997, + "memory(GiB)": 77.0, + "step": 9160, + "token_acc": 0.8683068017366136, + "train_speed(iter/s)": 0.3693 + }, + { + "epoch": 2.93152, + "grad_norm": 0.641560896005483, + "learning_rate": 7.119765298864712e-09, + "loss": 0.23942044377326965, + "memory(GiB)": 77.0, + "step": 9161, + "token_acc": 0.8752399232245681, + "train_speed(iter/s)": 0.369281 + }, + { + "epoch": 2.9318400000000002, + "grad_norm": 0.7187120450305656, + "learning_rate": 7.053412136705518e-09, + "loss": 0.29333460330963135, + "memory(GiB)": 77.0, + "step": 9162, + "token_acc": 0.8875291375291375, + "train_speed(iter/s)": 0.36926 + }, + { + "epoch": 2.93216, + "grad_norm": 0.8419534603152597, + "learning_rate": 6.987369178502734e-09, + "loss": 0.3746543526649475, + "memory(GiB)": 77.0, + "step": 9163, + "token_acc": 0.8437288898896644, + "train_speed(iter/s)": 0.369241 + }, + { + "epoch": 2.93248, + "grad_norm": 0.7844732861692295, + "learning_rate": 6.921636432474787e-09, + "loss": 0.32513946294784546, + "memory(GiB)": 77.0, + "step": 9164, + "token_acc": 0.966268446943078, + "train_speed(iter/s)": 0.369219 + }, + { + "epoch": 2.9328, + "grad_norm": 0.77868179509391, + "learning_rate": 6.856213906800691e-09, + "loss": 0.3052266240119934, + "memory(GiB)": 77.0, + "step": 9165, + "token_acc": 0.9457236842105263, + "train_speed(iter/s)": 0.369201 + }, + { + "epoch": 2.9331199999999997, + "grad_norm": 0.6580643466573721, + "learning_rate": 6.791101609621153e-09, + "loss": 0.2889590859413147, + "memory(GiB)": 77.0, + "step": 9166, + "token_acc": 0.9267734553775744, + "train_speed(iter/s)": 0.369179 + }, + { + "epoch": 2.93344, + "grad_norm": 0.7133912848169032, + "learning_rate": 6.726299549038584e-09, + "loss": 0.34924769401550293, + "memory(GiB)": 77.0, + "step": 9167, + "token_acc": 0.9045158391372725, + "train_speed(iter/s)": 0.369159 + }, + { + "epoch": 2.93376, + "grad_norm": 0.731749604630232, + "learning_rate": 6.661807733115977e-09, + "loss": 0.343478798866272, + "memory(GiB)": 77.0, + "step": 9168, + "token_acc": 0.8922888616891065, + "train_speed(iter/s)": 0.369138 + }, + { + "epoch": 2.93408, + "grad_norm": 0.7210201342706134, + "learning_rate": 6.597626169878302e-09, + "loss": 0.22543124854564667, + "memory(GiB)": 77.0, + "step": 9169, + "token_acc": 0.9573748308525034, + "train_speed(iter/s)": 0.369118 + }, + { + "epoch": 2.9344, + "grad_norm": 0.6960530027148941, + "learning_rate": 6.5337548673122256e-09, + "loss": 0.29002031683921814, + "memory(GiB)": 77.0, + "step": 9170, + "token_acc": 0.896896551724138, + "train_speed(iter/s)": 0.369098 + }, + { + "epoch": 2.93472, + "grad_norm": 0.7076955249853666, + "learning_rate": 6.470193833365002e-09, + "loss": 0.279178261756897, + "memory(GiB)": 77.0, + "step": 9171, + "token_acc": 0.9116952155936208, + "train_speed(iter/s)": 0.369075 + }, + { + "epoch": 2.93504, + "grad_norm": 0.7094715630354251, + "learning_rate": 6.40694307594586e-09, + "loss": 0.3023797571659088, + "memory(GiB)": 77.0, + "step": 9172, + "token_acc": 0.9042224510813595, + "train_speed(iter/s)": 0.369058 + }, + { + "epoch": 2.93536, + "grad_norm": 0.7007840567984077, + "learning_rate": 6.344002602925448e-09, + "loss": 0.26724597811698914, + "memory(GiB)": 77.0, + "step": 9173, + "token_acc": 0.8699602498580352, + "train_speed(iter/s)": 0.369037 + }, + { + "epoch": 2.93568, + "grad_norm": 0.7444748389618052, + "learning_rate": 6.281372422135279e-09, + "loss": 0.40055933594703674, + "memory(GiB)": 77.0, + "step": 9174, + "token_acc": 0.8768913342503438, + "train_speed(iter/s)": 0.369017 + }, + { + "epoch": 2.936, + "grad_norm": 0.7196150108203558, + "learning_rate": 6.219052541368842e-09, + "loss": 0.3401404023170471, + "memory(GiB)": 77.0, + "step": 9175, + "token_acc": 0.8968696918223732, + "train_speed(iter/s)": 0.368998 + }, + { + "epoch": 2.9363200000000003, + "grad_norm": 0.6905678758656444, + "learning_rate": 6.157042968380766e-09, + "loss": 0.27820464968681335, + "memory(GiB)": 77.0, + "step": 9176, + "token_acc": 0.9403095062638173, + "train_speed(iter/s)": 0.36898 + }, + { + "epoch": 2.93664, + "grad_norm": 0.7841354092007173, + "learning_rate": 6.095343710886825e-09, + "loss": 0.3790501356124878, + "memory(GiB)": 77.0, + "step": 9177, + "token_acc": 0.8820238384821212, + "train_speed(iter/s)": 0.368959 + }, + { + "epoch": 2.93696, + "grad_norm": 0.7313040854775116, + "learning_rate": 6.033954776564488e-09, + "loss": 0.33058255910873413, + "memory(GiB)": 77.0, + "step": 9178, + "token_acc": 0.9500918554807104, + "train_speed(iter/s)": 0.368941 + }, + { + "epoch": 2.93728, + "grad_norm": 0.7868941193505635, + "learning_rate": 5.972876173052922e-09, + "loss": 0.39141830801963806, + "memory(GiB)": 77.0, + "step": 9179, + "token_acc": 0.8734486649116209, + "train_speed(iter/s)": 0.36892 + }, + { + "epoch": 2.9375999999999998, + "grad_norm": 0.9648393817179118, + "learning_rate": 5.912107907951881e-09, + "loss": 0.3438611626625061, + "memory(GiB)": 77.0, + "step": 9180, + "token_acc": 0.9060579455662863, + "train_speed(iter/s)": 0.368894 + }, + { + "epoch": 2.93792, + "grad_norm": 0.7036929879849768, + "learning_rate": 5.8516499888230935e-09, + "loss": 0.279007226228714, + "memory(GiB)": 77.0, + "step": 9181, + "token_acc": 0.9266832917705735, + "train_speed(iter/s)": 0.368871 + }, + { + "epoch": 2.93824, + "grad_norm": 0.755626403997069, + "learning_rate": 5.791502423189432e-09, + "loss": 0.23830285668373108, + "memory(GiB)": 77.0, + "step": 9182, + "token_acc": 0.8332703213610586, + "train_speed(iter/s)": 0.36885 + }, + { + "epoch": 2.93856, + "grad_norm": 0.7407927193167251, + "learning_rate": 5.731665218535187e-09, + "loss": 0.35066694021224976, + "memory(GiB)": 77.0, + "step": 9183, + "token_acc": 0.9535342157138834, + "train_speed(iter/s)": 0.368831 + }, + { + "epoch": 2.93888, + "grad_norm": 0.7601722972442714, + "learning_rate": 5.672138382306347e-09, + "loss": 0.3137689530849457, + "memory(GiB)": 77.0, + "step": 9184, + "token_acc": 0.9088035019455253, + "train_speed(iter/s)": 0.368813 + }, + { + "epoch": 2.9392, + "grad_norm": 0.7461196087151583, + "learning_rate": 5.612921921909487e-09, + "loss": 0.2697591781616211, + "memory(GiB)": 77.0, + "step": 9185, + "token_acc": 0.9117328128388635, + "train_speed(iter/s)": 0.368783 + }, + { + "epoch": 2.93952, + "grad_norm": 0.7780218616598826, + "learning_rate": 5.554015844713434e-09, + "loss": 0.2846243977546692, + "memory(GiB)": 77.0, + "step": 9186, + "token_acc": 0.9234777838727373, + "train_speed(iter/s)": 0.368763 + }, + { + "epoch": 2.9398400000000002, + "grad_norm": 0.6270681650221687, + "learning_rate": 5.495420158047881e-09, + "loss": 0.27923524379730225, + "memory(GiB)": 77.0, + "step": 9187, + "token_acc": 0.9273122959738846, + "train_speed(iter/s)": 0.368741 + }, + { + "epoch": 2.94016, + "grad_norm": 0.69384592537868, + "learning_rate": 5.437134869204219e-09, + "loss": 0.26788461208343506, + "memory(GiB)": 77.0, + "step": 9188, + "token_acc": 0.9037037037037037, + "train_speed(iter/s)": 0.368719 + }, + { + "epoch": 2.94048, + "grad_norm": 0.7348538069098369, + "learning_rate": 5.379159985434701e-09, + "loss": 0.35700520873069763, + "memory(GiB)": 77.0, + "step": 9189, + "token_acc": 0.9295810196870268, + "train_speed(iter/s)": 0.368698 + }, + { + "epoch": 2.9408, + "grad_norm": 0.7265668501890691, + "learning_rate": 5.321495513953834e-09, + "loss": 0.27159231901168823, + "memory(GiB)": 77.0, + "step": 9190, + "token_acc": 0.9329052969502408, + "train_speed(iter/s)": 0.368679 + }, + { + "epoch": 2.9411199999999997, + "grad_norm": 0.7469202326022912, + "learning_rate": 5.264141461936434e-09, + "loss": 0.3865172863006592, + "memory(GiB)": 77.0, + "step": 9191, + "token_acc": 0.8907546024681368, + "train_speed(iter/s)": 0.368659 + }, + { + "epoch": 2.94144, + "grad_norm": 0.6880283976062301, + "learning_rate": 5.20709783651957e-09, + "loss": 0.27478039264678955, + "memory(GiB)": 77.0, + "step": 9192, + "token_acc": 0.8862149039476462, + "train_speed(iter/s)": 0.36864 + }, + { + "epoch": 2.94176, + "grad_norm": 0.6995082733504051, + "learning_rate": 5.150364644801176e-09, + "loss": 0.2856408953666687, + "memory(GiB)": 77.0, + "step": 9193, + "token_acc": 0.8972121212121212, + "train_speed(iter/s)": 0.368619 + }, + { + "epoch": 2.94208, + "grad_norm": 0.6786337408177899, + "learning_rate": 5.0939418938406035e-09, + "loss": 0.28396183252334595, + "memory(GiB)": 77.0, + "step": 9194, + "token_acc": 0.9153548387096774, + "train_speed(iter/s)": 0.368601 + }, + { + "epoch": 2.9424, + "grad_norm": 0.7698487746533659, + "learning_rate": 5.0378295906589046e-09, + "loss": 0.31944936513900757, + "memory(GiB)": 77.0, + "step": 9195, + "token_acc": 0.929070929070929, + "train_speed(iter/s)": 0.368582 + }, + { + "epoch": 2.94272, + "grad_norm": 0.6730543265859442, + "learning_rate": 4.982027742238272e-09, + "loss": 0.28425872325897217, + "memory(GiB)": 77.0, + "step": 9196, + "token_acc": 0.9679633867276888, + "train_speed(iter/s)": 0.36856 + }, + { + "epoch": 2.94304, + "grad_norm": 0.7485545341982126, + "learning_rate": 4.926536355522038e-09, + "loss": 0.3892514407634735, + "memory(GiB)": 77.0, + "step": 9197, + "token_acc": 0.9011215381094072, + "train_speed(iter/s)": 0.368517 + }, + { + "epoch": 2.94336, + "grad_norm": 0.7469596121350969, + "learning_rate": 4.871355437415237e-09, + "loss": 0.21555295586585999, + "memory(GiB)": 77.0, + "step": 9198, + "token_acc": 0.9581802769143826, + "train_speed(iter/s)": 0.368496 + }, + { + "epoch": 2.94368, + "grad_norm": 0.742624425006481, + "learning_rate": 4.816484994784598e-09, + "loss": 0.3633200526237488, + "memory(GiB)": 77.0, + "step": 9199, + "token_acc": 0.8932704672096013, + "train_speed(iter/s)": 0.368476 + }, + { + "epoch": 2.944, + "grad_norm": 0.715740544130929, + "learning_rate": 4.76192503445716e-09, + "loss": 0.3047802448272705, + "memory(GiB)": 77.0, + "step": 9200, + "token_acc": 0.9227977565160013, + "train_speed(iter/s)": 0.368453 + }, + { + "epoch": 2.9443200000000003, + "grad_norm": 0.7970366781251458, + "learning_rate": 4.707675563222214e-09, + "loss": 0.357144296169281, + "memory(GiB)": 77.0, + "step": 9201, + "token_acc": 0.8901790033865505, + "train_speed(iter/s)": 0.368433 + }, + { + "epoch": 2.94464, + "grad_norm": 0.8516364037261556, + "learning_rate": 4.653736587830193e-09, + "loss": 0.3155766427516937, + "memory(GiB)": 77.0, + "step": 9202, + "token_acc": 0.9029230356663985, + "train_speed(iter/s)": 0.368414 + }, + { + "epoch": 2.94496, + "grad_norm": 0.6546439223172315, + "learning_rate": 4.600108114992952e-09, + "loss": 0.23416215181350708, + "memory(GiB)": 77.0, + "step": 9203, + "token_acc": 0.9585928911689263, + "train_speed(iter/s)": 0.368396 + }, + { + "epoch": 2.94528, + "grad_norm": 0.7680631986868375, + "learning_rate": 4.546790151383485e-09, + "loss": 0.3074779212474823, + "memory(GiB)": 77.0, + "step": 9204, + "token_acc": 0.9262687519948931, + "train_speed(iter/s)": 0.368378 + }, + { + "epoch": 2.9455999999999998, + "grad_norm": 0.6692321014983389, + "learning_rate": 4.493782703636207e-09, + "loss": 0.2424287647008896, + "memory(GiB)": 77.0, + "step": 9205, + "token_acc": 0.9447852760736196, + "train_speed(iter/s)": 0.36836 + }, + { + "epoch": 2.94592, + "grad_norm": 0.7523540751923743, + "learning_rate": 4.441085778347231e-09, + "loss": 0.26187318563461304, + "memory(GiB)": 77.0, + "step": 9206, + "token_acc": 0.9127016963177492, + "train_speed(iter/s)": 0.368342 + }, + { + "epoch": 2.94624, + "grad_norm": 0.6975362555079196, + "learning_rate": 4.388699382073813e-09, + "loss": 0.2698945701122284, + "memory(GiB)": 77.0, + "step": 9207, + "token_acc": 0.9688622754491018, + "train_speed(iter/s)": 0.368323 + }, + { + "epoch": 2.94656, + "grad_norm": 0.7627200671429761, + "learning_rate": 4.336623521334349e-09, + "loss": 0.2845701575279236, + "memory(GiB)": 77.0, + "step": 9208, + "token_acc": 0.9161920260374288, + "train_speed(iter/s)": 0.368305 + }, + { + "epoch": 2.94688, + "grad_norm": 0.69781212704614, + "learning_rate": 4.284858202608655e-09, + "loss": 0.25818389654159546, + "memory(GiB)": 77.0, + "step": 9209, + "token_acc": 0.872189077558513, + "train_speed(iter/s)": 0.368282 + }, + { + "epoch": 2.9472, + "grad_norm": 0.7032716242043416, + "learning_rate": 4.233403432338246e-09, + "loss": 0.2992146909236908, + "memory(GiB)": 77.0, + "step": 9210, + "token_acc": 0.8552948914711686, + "train_speed(iter/s)": 0.368261 + }, + { + "epoch": 2.94752, + "grad_norm": 0.7553018236309015, + "learning_rate": 4.182259216925777e-09, + "loss": 0.29111361503601074, + "memory(GiB)": 77.0, + "step": 9211, + "token_acc": 0.9521109010712036, + "train_speed(iter/s)": 0.368239 + }, + { + "epoch": 2.9478400000000002, + "grad_norm": 0.729186572173753, + "learning_rate": 4.131425562735048e-09, + "loss": 0.32787132263183594, + "memory(GiB)": 77.0, + "step": 9212, + "token_acc": 0.9506363285769379, + "train_speed(iter/s)": 0.368214 + }, + { + "epoch": 2.94816, + "grad_norm": 0.7516841181058087, + "learning_rate": 4.080902476091553e-09, + "loss": 0.30661237239837646, + "memory(GiB)": 77.0, + "step": 9213, + "token_acc": 0.8952153110047847, + "train_speed(iter/s)": 0.368194 + }, + { + "epoch": 2.94848, + "grad_norm": 0.6678447962498175, + "learning_rate": 4.030689963281931e-09, + "loss": 0.26290419697761536, + "memory(GiB)": 77.0, + "step": 9214, + "token_acc": 0.9147442326980942, + "train_speed(iter/s)": 0.368175 + }, + { + "epoch": 2.9488, + "grad_norm": 0.7505185250851915, + "learning_rate": 3.980788030554516e-09, + "loss": 0.3556656241416931, + "memory(GiB)": 77.0, + "step": 9215, + "token_acc": 0.9078993978037548, + "train_speed(iter/s)": 0.368155 + }, + { + "epoch": 2.9491199999999997, + "grad_norm": 0.6968837325613736, + "learning_rate": 3.93119668411851e-09, + "loss": 0.3380703330039978, + "memory(GiB)": 77.0, + "step": 9216, + "token_acc": 0.8430735930735931, + "train_speed(iter/s)": 0.368134 + }, + { + "epoch": 2.94944, + "grad_norm": 0.7194480207784646, + "learning_rate": 3.881915930144808e-09, + "loss": 0.3483690917491913, + "memory(GiB)": 77.0, + "step": 9217, + "token_acc": 0.9116597263533611, + "train_speed(iter/s)": 0.36811 + }, + { + "epoch": 2.94976, + "grad_norm": 0.7098766598517972, + "learning_rate": 3.8329457747654504e-09, + "loss": 0.3255244195461273, + "memory(GiB)": 77.0, + "step": 9218, + "token_acc": 0.8389302232053086, + "train_speed(iter/s)": 0.368091 + }, + { + "epoch": 2.95008, + "grad_norm": 0.7318466735570903, + "learning_rate": 3.784286224073897e-09, + "loss": 0.3048110902309418, + "memory(GiB)": 77.0, + "step": 9219, + "token_acc": 0.890253462241965, + "train_speed(iter/s)": 0.368071 + }, + { + "epoch": 2.9504, + "grad_norm": 0.7319907327706107, + "learning_rate": 3.7359372841250245e-09, + "loss": 0.3309815526008606, + "memory(GiB)": 77.0, + "step": 9220, + "token_acc": 0.906989853438557, + "train_speed(iter/s)": 0.368051 + }, + { + "epoch": 2.95072, + "grad_norm": 0.7471512089024722, + "learning_rate": 3.6878989609354097e-09, + "loss": 0.3717395067214966, + "memory(GiB)": 77.0, + "step": 9221, + "token_acc": 0.8773935410117176, + "train_speed(iter/s)": 0.368029 + }, + { + "epoch": 2.95104, + "grad_norm": 0.6967542088102036, + "learning_rate": 3.640171260481662e-09, + "loss": 0.3051544427871704, + "memory(GiB)": 77.0, + "step": 9222, + "token_acc": 0.9354199683042789, + "train_speed(iter/s)": 0.36801 + }, + { + "epoch": 2.95136, + "grad_norm": 0.8034763671839904, + "learning_rate": 3.5927541887037508e-09, + "loss": 0.30077189207077026, + "memory(GiB)": 77.0, + "step": 9223, + "token_acc": 0.8563902885936784, + "train_speed(iter/s)": 0.367991 + }, + { + "epoch": 2.95168, + "grad_norm": 0.7613281736030865, + "learning_rate": 3.5456477515011245e-09, + "loss": 0.3218039870262146, + "memory(GiB)": 77.0, + "step": 9224, + "token_acc": 0.9002356637863315, + "train_speed(iter/s)": 0.367969 + }, + { + "epoch": 2.952, + "grad_norm": 1.1306904244721159, + "learning_rate": 3.4988519547357604e-09, + "loss": 0.30509620904922485, + "memory(GiB)": 77.0, + "step": 9225, + "token_acc": 0.8921772239529249, + "train_speed(iter/s)": 0.367946 + }, + { + "epoch": 2.9523200000000003, + "grad_norm": 0.7860727369297378, + "learning_rate": 3.4523668042302227e-09, + "loss": 0.33977824449539185, + "memory(GiB)": 77.0, + "step": 9226, + "token_acc": 0.9255663430420712, + "train_speed(iter/s)": 0.367924 + }, + { + "epoch": 2.95264, + "grad_norm": 0.7221791759618075, + "learning_rate": 3.4061923057690518e-09, + "loss": 0.3204267621040344, + "memory(GiB)": 77.0, + "step": 9227, + "token_acc": 0.886586284853052, + "train_speed(iter/s)": 0.367896 + }, + { + "epoch": 2.95296, + "grad_norm": 0.7396716781846921, + "learning_rate": 3.3603284650979285e-09, + "loss": 0.2905598282814026, + "memory(GiB)": 77.0, + "step": 9228, + "token_acc": 0.9534005037783375, + "train_speed(iter/s)": 0.367875 + }, + { + "epoch": 2.95328, + "grad_norm": 0.7469687058417771, + "learning_rate": 3.3147752879236773e-09, + "loss": 0.31394660472869873, + "memory(GiB)": 77.0, + "step": 9229, + "token_acc": 0.9577464788732394, + "train_speed(iter/s)": 0.367854 + }, + { + "epoch": 2.9536, + "grad_norm": 0.7788731289543258, + "learning_rate": 3.269532779914819e-09, + "loss": 0.327108234167099, + "memory(GiB)": 77.0, + "step": 9230, + "token_acc": 0.9069651741293532, + "train_speed(iter/s)": 0.367836 + }, + { + "epoch": 2.95392, + "grad_norm": 0.7756251527422099, + "learning_rate": 3.2246009467004623e-09, + "loss": 0.2922450602054596, + "memory(GiB)": 77.0, + "step": 9231, + "token_acc": 0.9270909534259937, + "train_speed(iter/s)": 0.367816 + }, + { + "epoch": 2.95424, + "grad_norm": 0.7667944323090058, + "learning_rate": 3.1799797938722456e-09, + "loss": 0.355202317237854, + "memory(GiB)": 77.0, + "step": 9232, + "token_acc": 0.9224832214765101, + "train_speed(iter/s)": 0.367796 + }, + { + "epoch": 2.95456, + "grad_norm": 0.837525366081377, + "learning_rate": 3.1356693269818383e-09, + "loss": 0.3477572202682495, + "memory(GiB)": 77.0, + "step": 9233, + "token_acc": 0.8494088363410081, + "train_speed(iter/s)": 0.367778 + }, + { + "epoch": 2.95488, + "grad_norm": 0.756202771818834, + "learning_rate": 3.0916695515434415e-09, + "loss": 0.32284849882125854, + "memory(GiB)": 77.0, + "step": 9234, + "token_acc": 0.9113535629048756, + "train_speed(iter/s)": 0.367757 + }, + { + "epoch": 2.9552, + "grad_norm": 0.6797303990170481, + "learning_rate": 3.0479804730321193e-09, + "loss": 0.30812638998031616, + "memory(GiB)": 77.0, + "step": 9235, + "token_acc": 0.8745755517826825, + "train_speed(iter/s)": 0.367738 + }, + { + "epoch": 2.95552, + "grad_norm": 0.707900114919707, + "learning_rate": 3.0046020968835242e-09, + "loss": 0.24193553626537323, + "memory(GiB)": 77.0, + "step": 9236, + "token_acc": 0.9494584837545126, + "train_speed(iter/s)": 0.367718 + }, + { + "epoch": 2.9558400000000002, + "grad_norm": 0.7016780545113809, + "learning_rate": 2.9615344284961156e-09, + "loss": 0.34263134002685547, + "memory(GiB)": 77.0, + "step": 9237, + "token_acc": 0.8491745472030774, + "train_speed(iter/s)": 0.367697 + }, + { + "epoch": 2.95616, + "grad_norm": 0.7223170679525895, + "learning_rate": 2.9187774732286624e-09, + "loss": 0.3812520205974579, + "memory(GiB)": 77.0, + "step": 9238, + "token_acc": 0.9506131919124958, + "train_speed(iter/s)": 0.367673 + }, + { + "epoch": 2.95648, + "grad_norm": 0.7345880832040427, + "learning_rate": 2.876331236401353e-09, + "loss": 0.2970160245895386, + "memory(GiB)": 77.0, + "step": 9239, + "token_acc": 0.8989973098557105, + "train_speed(iter/s)": 0.367652 + }, + { + "epoch": 2.9568, + "grad_norm": 0.6912530804619459, + "learning_rate": 2.834195723295796e-09, + "loss": 0.28494691848754883, + "memory(GiB)": 77.0, + "step": 9240, + "token_acc": 0.9371592539454806, + "train_speed(iter/s)": 0.367629 + }, + { + "epoch": 2.9571199999999997, + "grad_norm": 0.9263902340890949, + "learning_rate": 2.792370939155575e-09, + "loss": 0.4109848141670227, + "memory(GiB)": 77.0, + "step": 9241, + "token_acc": 0.859277108433735, + "train_speed(iter/s)": 0.367611 + }, + { + "epoch": 2.95744, + "grad_norm": 0.7552230421917449, + "learning_rate": 2.7508568891845833e-09, + "loss": 0.3185860216617584, + "memory(GiB)": 77.0, + "step": 9242, + "token_acc": 0.9548519495749047, + "train_speed(iter/s)": 0.367588 + }, + { + "epoch": 2.95776, + "grad_norm": 0.7003934287895625, + "learning_rate": 2.70965357854841e-09, + "loss": 0.28754889965057373, + "memory(GiB)": 77.0, + "step": 9243, + "token_acc": 0.9548872180451128, + "train_speed(iter/s)": 0.36757 + }, + { + "epoch": 2.95808, + "grad_norm": 0.6597292413305638, + "learning_rate": 2.6687610123746212e-09, + "loss": 0.24705618619918823, + "memory(GiB)": 77.0, + "step": 9244, + "token_acc": 0.9509460406447092, + "train_speed(iter/s)": 0.367551 + }, + { + "epoch": 2.9584, + "grad_norm": 0.702667496069754, + "learning_rate": 2.628179195751368e-09, + "loss": 0.24968647956848145, + "memory(GiB)": 77.0, + "step": 9245, + "token_acc": 0.9052713987473904, + "train_speed(iter/s)": 0.367532 + }, + { + "epoch": 2.95872, + "grad_norm": 0.7334700561259382, + "learning_rate": 2.5879081337279456e-09, + "loss": 0.3502100110054016, + "memory(GiB)": 77.0, + "step": 9246, + "token_acc": 0.9192689850958127, + "train_speed(iter/s)": 0.367507 + }, + { + "epoch": 2.95904, + "grad_norm": 0.6875203841860786, + "learning_rate": 2.5479478313161777e-09, + "loss": 0.2754904627799988, + "memory(GiB)": 77.0, + "step": 9247, + "token_acc": 0.8937831927522649, + "train_speed(iter/s)": 0.367489 + }, + { + "epoch": 2.95936, + "grad_norm": 0.723694087393235, + "learning_rate": 2.508298293487643e-09, + "loss": 0.2859320640563965, + "memory(GiB)": 77.0, + "step": 9248, + "token_acc": 0.8954963921373476, + "train_speed(iter/s)": 0.367471 + }, + { + "epoch": 2.95968, + "grad_norm": 0.7718538426524733, + "learning_rate": 2.468959525176451e-09, + "loss": 0.28174102306365967, + "memory(GiB)": 77.0, + "step": 9249, + "token_acc": 0.8869395711500975, + "train_speed(iter/s)": 0.367453 + }, + { + "epoch": 2.96, + "grad_norm": 0.741441270070859, + "learning_rate": 2.429931531277574e-09, + "loss": 0.4124886691570282, + "memory(GiB)": 77.0, + "step": 9250, + "token_acc": 0.8706086956521739, + "train_speed(iter/s)": 0.367429 + }, + { + "epoch": 2.96032, + "grad_norm": 0.7098544475369217, + "learning_rate": 2.3912143166474054e-09, + "loss": 0.28321734070777893, + "memory(GiB)": 77.0, + "step": 9251, + "token_acc": 0.9372431826671648, + "train_speed(iter/s)": 0.367411 + }, + { + "epoch": 2.96064, + "grad_norm": 0.7108687425180136, + "learning_rate": 2.3528078861034807e-09, + "loss": 0.3466818034648895, + "memory(GiB)": 77.0, + "step": 9252, + "token_acc": 0.9375394321766561, + "train_speed(iter/s)": 0.36739 + }, + { + "epoch": 2.96096, + "grad_norm": 0.7041368690551978, + "learning_rate": 2.3147122444250327e-09, + "loss": 0.34023812413215637, + "memory(GiB)": 77.0, + "step": 9253, + "token_acc": 0.8248847926267281, + "train_speed(iter/s)": 0.367368 + }, + { + "epoch": 2.96128, + "grad_norm": 0.8190244181848098, + "learning_rate": 2.276927396352158e-09, + "loss": 0.29795950651168823, + "memory(GiB)": 77.0, + "step": 9254, + "token_acc": 0.908581179912238, + "train_speed(iter/s)": 0.367352 + }, + { + "epoch": 2.9616, + "grad_norm": 0.7450644089798385, + "learning_rate": 2.2394533465869283e-09, + "loss": 0.2686719298362732, + "memory(GiB)": 77.0, + "step": 9255, + "token_acc": 0.9068845963348192, + "train_speed(iter/s)": 0.36733 + }, + { + "epoch": 2.96192, + "grad_norm": 0.7388415703507722, + "learning_rate": 2.202290099791726e-09, + "loss": 0.2286093682050705, + "memory(GiB)": 77.0, + "step": 9256, + "token_acc": 0.9712619741774261, + "train_speed(iter/s)": 0.36731 + }, + { + "epoch": 2.96224, + "grad_norm": 0.787871473340881, + "learning_rate": 2.1654376605911854e-09, + "loss": 0.2803751826286316, + "memory(GiB)": 77.0, + "step": 9257, + "token_acc": 0.9236290554120011, + "train_speed(iter/s)": 0.367291 + }, + { + "epoch": 2.96256, + "grad_norm": 0.8010382348419016, + "learning_rate": 2.128896033571359e-09, + "loss": 0.2750105857849121, + "memory(GiB)": 77.0, + "step": 9258, + "token_acc": 0.9403743315508022, + "train_speed(iter/s)": 0.367274 + }, + { + "epoch": 2.96288, + "grad_norm": 0.6834966624822845, + "learning_rate": 2.0926652232786116e-09, + "loss": 0.41898640990257263, + "memory(GiB)": 77.0, + "step": 9259, + "token_acc": 0.8636670037074486, + "train_speed(iter/s)": 0.367255 + }, + { + "epoch": 2.9632, + "grad_norm": 0.7250996530941111, + "learning_rate": 2.05674523422128e-09, + "loss": 0.310701459646225, + "memory(GiB)": 77.0, + "step": 9260, + "token_acc": 0.8932644717021735, + "train_speed(iter/s)": 0.367235 + }, + { + "epoch": 2.96352, + "grad_norm": 0.7438886175297699, + "learning_rate": 2.0211360708694007e-09, + "loss": 0.2866174876689911, + "memory(GiB)": 77.0, + "step": 9261, + "token_acc": 0.871517027863777, + "train_speed(iter/s)": 0.367217 + }, + { + "epoch": 2.9638400000000003, + "grad_norm": 0.7372391276238331, + "learning_rate": 1.985837737653595e-09, + "loss": 0.3120726943016052, + "memory(GiB)": 77.0, + "step": 9262, + "token_acc": 0.9612884160756501, + "train_speed(iter/s)": 0.367201 + }, + { + "epoch": 2.96416, + "grad_norm": 0.7089781709578228, + "learning_rate": 1.9508502389661843e-09, + "loss": 0.2713015675544739, + "memory(GiB)": 77.0, + "step": 9263, + "token_acc": 0.9231941481255714, + "train_speed(iter/s)": 0.367182 + }, + { + "epoch": 2.96448, + "grad_norm": 0.8004878890643448, + "learning_rate": 1.916173579160907e-09, + "loss": 0.3381601870059967, + "memory(GiB)": 77.0, + "step": 9264, + "token_acc": 0.9388278388278388, + "train_speed(iter/s)": 0.367161 + }, + { + "epoch": 2.9648, + "grad_norm": 0.8050857094721905, + "learning_rate": 1.881807762552368e-09, + "loss": 0.29458698630332947, + "memory(GiB)": 77.0, + "step": 9265, + "token_acc": 0.8440567066521265, + "train_speed(iter/s)": 0.367142 + }, + { + "epoch": 2.9651199999999998, + "grad_norm": 0.7162465836423318, + "learning_rate": 1.8477527934171457e-09, + "loss": 0.31270238757133484, + "memory(GiB)": 77.0, + "step": 9266, + "token_acc": 0.9355828220858896, + "train_speed(iter/s)": 0.367122 + }, + { + "epoch": 2.96544, + "grad_norm": 0.6971449226120607, + "learning_rate": 1.8140086759924069e-09, + "loss": 0.3661288022994995, + "memory(GiB)": 77.0, + "step": 9267, + "token_acc": 0.8317295188556567, + "train_speed(iter/s)": 0.3671 + }, + { + "epoch": 2.96576, + "grad_norm": 0.792220530914047, + "learning_rate": 1.78057541447757e-09, + "loss": 0.2344340980052948, + "memory(GiB)": 77.0, + "step": 9268, + "token_acc": 0.8792439621981099, + "train_speed(iter/s)": 0.367075 + }, + { + "epoch": 2.96608, + "grad_norm": 1.0555936997341986, + "learning_rate": 1.747453013032363e-09, + "loss": 0.3562917709350586, + "memory(GiB)": 77.0, + "step": 9269, + "token_acc": 0.8609256274464656, + "train_speed(iter/s)": 0.367054 + }, + { + "epoch": 2.9664, + "grad_norm": 0.6383865706938943, + "learning_rate": 1.7146414757782116e-09, + "loss": 0.25699126720428467, + "memory(GiB)": 77.0, + "step": 9270, + "token_acc": 0.9398998330550918, + "train_speed(iter/s)": 0.367032 + }, + { + "epoch": 2.96672, + "grad_norm": 0.7285244919468415, + "learning_rate": 1.6821408067985158e-09, + "loss": 0.319410502910614, + "memory(GiB)": 77.0, + "step": 9271, + "token_acc": 0.9613368283093053, + "train_speed(iter/s)": 0.367013 + }, + { + "epoch": 2.96704, + "grad_norm": 0.725666887457569, + "learning_rate": 1.6499510101367078e-09, + "loss": 0.2659352421760559, + "memory(GiB)": 77.0, + "step": 9272, + "token_acc": 0.9669443697930664, + "train_speed(iter/s)": 0.366994 + }, + { + "epoch": 2.96736, + "grad_norm": 0.705517827658877, + "learning_rate": 1.61807208979875e-09, + "loss": 0.3432767987251282, + "memory(GiB)": 77.0, + "step": 9273, + "token_acc": 0.9249263984298332, + "train_speed(iter/s)": 0.366976 + }, + { + "epoch": 2.96768, + "grad_norm": 0.8088079170707578, + "learning_rate": 1.5865040497511918e-09, + "loss": 0.33268672227859497, + "memory(GiB)": 77.0, + "step": 9274, + "token_acc": 0.9021376085504342, + "train_speed(iter/s)": 0.366959 + }, + { + "epoch": 2.968, + "grad_norm": 0.6979407671919343, + "learning_rate": 1.5552468939222797e-09, + "loss": 0.28440991044044495, + "memory(GiB)": 77.0, + "step": 9275, + "token_acc": 0.9265948150305855, + "train_speed(iter/s)": 0.36694 + }, + { + "epoch": 2.96832, + "grad_norm": 0.7471511006089081, + "learning_rate": 1.5243006262014027e-09, + "loss": 0.327160507440567, + "memory(GiB)": 77.0, + "step": 9276, + "token_acc": 0.8666169895678092, + "train_speed(iter/s)": 0.366922 + }, + { + "epoch": 2.9686399999999997, + "grad_norm": 0.7726078106402288, + "learning_rate": 1.493665250439369e-09, + "loss": 0.3527548909187317, + "memory(GiB)": 77.0, + "step": 9277, + "token_acc": 0.9080103359173126, + "train_speed(iter/s)": 0.3669 + }, + { + "epoch": 2.96896, + "grad_norm": 0.7522729556909521, + "learning_rate": 1.4633407704478519e-09, + "loss": 0.2994159460067749, + "memory(GiB)": 77.0, + "step": 9278, + "token_acc": 0.8904109589041096, + "train_speed(iter/s)": 0.366883 + }, + { + "epoch": 2.96928, + "grad_norm": 0.7623283388412114, + "learning_rate": 1.4333271900004998e-09, + "loss": 0.3432791233062744, + "memory(GiB)": 77.0, + "step": 9279, + "token_acc": 0.8848307767226686, + "train_speed(iter/s)": 0.366865 + }, + { + "epoch": 2.9696, + "grad_norm": 0.745705210646453, + "learning_rate": 1.403624512831825e-09, + "loss": 0.3201412260532379, + "memory(GiB)": 77.0, + "step": 9280, + "token_acc": 0.8902382064455862, + "train_speed(iter/s)": 0.366847 + }, + { + "epoch": 2.96992, + "grad_norm": 0.659881718408621, + "learning_rate": 1.3742327426380375e-09, + "loss": 0.25861290097236633, + "memory(GiB)": 77.0, + "step": 9281, + "token_acc": 0.961863173216885, + "train_speed(iter/s)": 0.366825 + }, + { + "epoch": 2.97024, + "grad_norm": 0.7600206648622229, + "learning_rate": 1.3451518830764898e-09, + "loss": 0.3023640513420105, + "memory(GiB)": 77.0, + "step": 9282, + "token_acc": 0.9319298245614035, + "train_speed(iter/s)": 0.366808 + }, + { + "epoch": 2.97056, + "grad_norm": 0.7641718291960728, + "learning_rate": 1.3163819377653986e-09, + "loss": 0.34295016527175903, + "memory(GiB)": 77.0, + "step": 9283, + "token_acc": 0.8514907181698856, + "train_speed(iter/s)": 0.36679 + }, + { + "epoch": 2.97088, + "grad_norm": 0.689998549795871, + "learning_rate": 1.2879229102849556e-09, + "loss": 0.3288402557373047, + "memory(GiB)": 77.0, + "step": 9284, + "token_acc": 0.9260115606936417, + "train_speed(iter/s)": 0.36677 + }, + { + "epoch": 2.9712, + "grad_norm": 0.6928213706364112, + "learning_rate": 1.259774804176217e-09, + "loss": 0.3040851354598999, + "memory(GiB)": 77.0, + "step": 9285, + "token_acc": 0.9503006012024048, + "train_speed(iter/s)": 0.366749 + }, + { + "epoch": 2.97152, + "grad_norm": 0.7252469495698755, + "learning_rate": 1.2319376229419366e-09, + "loss": 0.3769296705722809, + "memory(GiB)": 77.0, + "step": 9286, + "token_acc": 0.8969676085458305, + "train_speed(iter/s)": 0.366729 + }, + { + "epoch": 2.9718400000000003, + "grad_norm": 0.7228163940627544, + "learning_rate": 1.2044113700457326e-09, + "loss": 0.2831454277038574, + "memory(GiB)": 77.0, + "step": 9287, + "token_acc": 0.8552055993000874, + "train_speed(iter/s)": 0.366709 + }, + { + "epoch": 2.97216, + "grad_norm": 0.731740233120172, + "learning_rate": 1.177196048912921e-09, + "loss": 0.356290340423584, + "memory(GiB)": 77.0, + "step": 9288, + "token_acc": 0.9305064782096584, + "train_speed(iter/s)": 0.366684 + }, + { + "epoch": 2.97248, + "grad_norm": 0.7757466339683999, + "learning_rate": 1.1502916629302364e-09, + "loss": 0.3642594814300537, + "memory(GiB)": 77.0, + "step": 9289, + "token_acc": 0.9090909090909091, + "train_speed(iter/s)": 0.366665 + }, + { + "epoch": 2.9728, + "grad_norm": 0.6618141495716686, + "learning_rate": 1.1236982154450016e-09, + "loss": 0.25724804401397705, + "memory(GiB)": 77.0, + "step": 9290, + "token_acc": 0.9316805995964255, + "train_speed(iter/s)": 0.366645 + }, + { + "epoch": 2.9731199999999998, + "grad_norm": 0.7350675874767789, + "learning_rate": 1.0974157097665138e-09, + "loss": 0.35350823402404785, + "memory(GiB)": 77.0, + "step": 9291, + "token_acc": 0.8212435233160622, + "train_speed(iter/s)": 0.366625 + }, + { + "epoch": 2.97344, + "grad_norm": 0.7068836575724122, + "learning_rate": 1.0714441491652127e-09, + "loss": 0.2638304829597473, + "memory(GiB)": 77.0, + "step": 9292, + "token_acc": 0.8825171821305842, + "train_speed(iter/s)": 0.366607 + }, + { + "epoch": 2.97376, + "grad_norm": 0.7025628627385436, + "learning_rate": 1.0457835368726798e-09, + "loss": 0.2632251977920532, + "memory(GiB)": 77.0, + "step": 9293, + "token_acc": 0.8908117016893284, + "train_speed(iter/s)": 0.366587 + }, + { + "epoch": 2.97408, + "grad_norm": 0.6711626847802593, + "learning_rate": 1.0204338760819166e-09, + "loss": 0.32345980405807495, + "memory(GiB)": 77.0, + "step": 9294, + "token_acc": 0.9735503560528993, + "train_speed(iter/s)": 0.366568 + }, + { + "epoch": 2.9744, + "grad_norm": 0.6705126064779986, + "learning_rate": 9.953951699476216e-10, + "loss": 0.28513842821121216, + "memory(GiB)": 77.0, + "step": 9295, + "token_acc": 0.9422943221320973, + "train_speed(iter/s)": 0.366549 + }, + { + "epoch": 2.97472, + "grad_norm": 0.7310889439092109, + "learning_rate": 9.706674215848034e-10, + "loss": 0.29795050621032715, + "memory(GiB)": 77.0, + "step": 9296, + "token_acc": 0.892791551882461, + "train_speed(iter/s)": 0.366531 + }, + { + "epoch": 2.97504, + "grad_norm": 0.6590251797460597, + "learning_rate": 9.462506340709998e-10, + "loss": 0.258998841047287, + "memory(GiB)": 77.0, + "step": 9297, + "token_acc": 0.9343406593406594, + "train_speed(iter/s)": 0.366511 + }, + { + "epoch": 2.9753600000000002, + "grad_norm": 0.7252552433379612, + "learning_rate": 9.221448104440589e-10, + "loss": 0.2947540283203125, + "memory(GiB)": 77.0, + "step": 9298, + "token_acc": 0.8796672077922078, + "train_speed(iter/s)": 0.36649 + }, + { + "epoch": 2.97568, + "grad_norm": 0.7169127153734166, + "learning_rate": 8.983499537038031e-10, + "loss": 0.2723312973976135, + "memory(GiB)": 77.0, + "step": 9299, + "token_acc": 0.9207878398629844, + "train_speed(iter/s)": 0.366473 + }, + { + "epoch": 2.976, + "grad_norm": 0.7888521138451593, + "learning_rate": 8.748660668109199e-10, + "loss": 0.28635674715042114, + "memory(GiB)": 77.0, + "step": 9300, + "token_acc": 0.9357712142522269, + "train_speed(iter/s)": 0.366455 + }, + { + "epoch": 2.97632, + "grad_norm": 0.7134493219895564, + "learning_rate": 8.516931526875161e-10, + "loss": 0.34433192014694214, + "memory(GiB)": 77.0, + "step": 9301, + "token_acc": 0.9182351126195618, + "train_speed(iter/s)": 0.366432 + }, + { + "epoch": 2.9766399999999997, + "grad_norm": 0.7029398975508693, + "learning_rate": 8.28831214217396e-10, + "loss": 0.3351896107196808, + "memory(GiB)": 77.0, + "step": 9302, + "token_acc": 0.8935249575027043, + "train_speed(iter/s)": 0.36641 + }, + { + "epoch": 2.97696, + "grad_norm": 0.6894504807485148, + "learning_rate": 8.062802542446735e-10, + "loss": 0.2444981336593628, + "memory(GiB)": 77.0, + "step": 9303, + "token_acc": 0.9384247171453438, + "train_speed(iter/s)": 0.366394 + }, + { + "epoch": 2.97728, + "grad_norm": 0.7874575404315811, + "learning_rate": 7.8404027557627e-10, + "loss": 0.39842674136161804, + "memory(GiB)": 77.0, + "step": 9304, + "token_acc": 0.8674071502950365, + "train_speed(iter/s)": 0.366376 + }, + { + "epoch": 2.9776, + "grad_norm": 0.671771347198875, + "learning_rate": 7.621112809788611e-10, + "loss": 0.2829517722129822, + "memory(GiB)": 77.0, + "step": 9305, + "token_acc": 0.905631324241091, + "train_speed(iter/s)": 0.366358 + }, + { + "epoch": 2.97792, + "grad_norm": 0.6895019566856221, + "learning_rate": 7.404932731816528e-10, + "loss": 0.2954998016357422, + "memory(GiB)": 77.0, + "step": 9306, + "token_acc": 0.8514187714374805, + "train_speed(iter/s)": 0.366336 + }, + { + "epoch": 2.97824, + "grad_norm": 0.7372234156804642, + "learning_rate": 7.191862548741601e-10, + "loss": 0.39056769013404846, + "memory(GiB)": 77.0, + "step": 9307, + "token_acc": 0.9384122463510146, + "train_speed(iter/s)": 0.366315 + }, + { + "epoch": 2.97856, + "grad_norm": 0.7264563915694124, + "learning_rate": 6.981902287078734e-10, + "loss": 0.36947184801101685, + "memory(GiB)": 77.0, + "step": 9308, + "token_acc": 0.858985236721534, + "train_speed(iter/s)": 0.36629 + }, + { + "epoch": 2.97888, + "grad_norm": 0.6945992844019088, + "learning_rate": 6.775051972957025e-10, + "loss": 0.3680153489112854, + "memory(GiB)": 77.0, + "step": 9309, + "token_acc": 0.915138327634442, + "train_speed(iter/s)": 0.36627 + }, + { + "epoch": 2.9792, + "grad_norm": 0.7263342982209151, + "learning_rate": 6.571311632108668e-10, + "loss": 0.27955949306488037, + "memory(GiB)": 77.0, + "step": 9310, + "token_acc": 0.8928188638799571, + "train_speed(iter/s)": 0.366253 + }, + { + "epoch": 2.97952, + "grad_norm": 0.6904713618556128, + "learning_rate": 6.370681289891156e-10, + "loss": 0.32768717408180237, + "memory(GiB)": 77.0, + "step": 9311, + "token_acc": 0.9478539911752908, + "train_speed(iter/s)": 0.366236 + }, + { + "epoch": 2.9798400000000003, + "grad_norm": 0.6676290708791149, + "learning_rate": 6.173160971267855e-10, + "loss": 0.2809533178806305, + "memory(GiB)": 77.0, + "step": 9312, + "token_acc": 0.9050520935718498, + "train_speed(iter/s)": 0.366217 + }, + { + "epoch": 2.98016, + "grad_norm": 0.7291302376192278, + "learning_rate": 5.978750700816327e-10, + "loss": 0.2843477725982666, + "memory(GiB)": 77.0, + "step": 9313, + "token_acc": 0.9432527830572902, + "train_speed(iter/s)": 0.366196 + }, + { + "epoch": 2.98048, + "grad_norm": 0.7387965661436242, + "learning_rate": 5.787450502728331e-10, + "loss": 0.3249324858188629, + "memory(GiB)": 77.0, + "step": 9314, + "token_acc": 0.8655827799404625, + "train_speed(iter/s)": 0.366173 + }, + { + "epoch": 2.9808, + "grad_norm": 0.7798544912448089, + "learning_rate": 5.599260400807049e-10, + "loss": 0.35490846633911133, + "memory(GiB)": 77.0, + "step": 9315, + "token_acc": 0.8766923557592168, + "train_speed(iter/s)": 0.366156 + }, + { + "epoch": 2.9811199999999998, + "grad_norm": 0.7011334287723722, + "learning_rate": 5.414180418469861e-10, + "loss": 0.37395378947257996, + "memory(GiB)": 77.0, + "step": 9316, + "token_acc": 0.9435567010309278, + "train_speed(iter/s)": 0.366124 + }, + { + "epoch": 2.98144, + "grad_norm": 0.6756861489717858, + "learning_rate": 5.232210578748342e-10, + "loss": 0.2610267102718353, + "memory(GiB)": 77.0, + "step": 9317, + "token_acc": 0.9255034753163429, + "train_speed(iter/s)": 0.3661 + }, + { + "epoch": 2.98176, + "grad_norm": 0.7413554672887309, + "learning_rate": 5.053350904279941e-10, + "loss": 0.3741031885147095, + "memory(GiB)": 77.0, + "step": 9318, + "token_acc": 0.8662420382165605, + "train_speed(iter/s)": 0.366081 + }, + { + "epoch": 2.98208, + "grad_norm": 0.7390326733394957, + "learning_rate": 4.877601417327404e-10, + "loss": 0.2715075612068176, + "memory(GiB)": 77.0, + "step": 9319, + "token_acc": 0.9576089545129792, + "train_speed(iter/s)": 0.366062 + }, + { + "epoch": 2.9824, + "grad_norm": 0.7326419375525974, + "learning_rate": 4.704962139756574e-10, + "loss": 0.29740941524505615, + "memory(GiB)": 77.0, + "step": 9320, + "token_acc": 0.9131979695431472, + "train_speed(iter/s)": 0.366043 + }, + { + "epoch": 2.98272, + "grad_norm": 0.6280564627524632, + "learning_rate": 4.535433093047492e-10, + "loss": 0.28482550382614136, + "memory(GiB)": 77.0, + "step": 9321, + "token_acc": 0.9002533978346003, + "train_speed(iter/s)": 0.366021 + }, + { + "epoch": 2.98304, + "grad_norm": 0.7806727139452415, + "learning_rate": 4.369014298299945e-10, + "loss": 0.33791059255599976, + "memory(GiB)": 77.0, + "step": 9322, + "token_acc": 0.9270516717325228, + "train_speed(iter/s)": 0.366003 + }, + { + "epoch": 2.9833600000000002, + "grad_norm": 0.7661305848958874, + "learning_rate": 4.2057057762168175e-10, + "loss": 0.42669588327407837, + "memory(GiB)": 77.0, + "step": 9323, + "token_acc": 0.8195121951219512, + "train_speed(iter/s)": 0.365983 + }, + { + "epoch": 2.98368, + "grad_norm": 0.7685552721465391, + "learning_rate": 4.0455075471235173e-10, + "loss": 0.3365023732185364, + "memory(GiB)": 77.0, + "step": 9324, + "token_acc": 0.9163225385785698, + "train_speed(iter/s)": 0.365964 + }, + { + "epoch": 2.984, + "grad_norm": 0.6801411074684754, + "learning_rate": 3.8884196309513234e-10, + "loss": 0.29182925820350647, + "memory(GiB)": 77.0, + "step": 9325, + "token_acc": 0.8682347567937886, + "train_speed(iter/s)": 0.365943 + }, + { + "epoch": 2.98432, + "grad_norm": 0.8118569166434845, + "learning_rate": 3.734442047245712e-10, + "loss": 0.3141028583049774, + "memory(GiB)": 77.0, + "step": 9326, + "token_acc": 0.8791785012016605, + "train_speed(iter/s)": 0.365924 + }, + { + "epoch": 2.9846399999999997, + "grad_norm": 0.689323063287398, + "learning_rate": 3.5835748151719083e-10, + "loss": 0.308938205242157, + "memory(GiB)": 77.0, + "step": 9327, + "token_acc": 0.9326884320838371, + "train_speed(iter/s)": 0.365902 + }, + { + "epoch": 2.98496, + "grad_norm": 0.7448236228051951, + "learning_rate": 3.4358179534954574e-10, + "loss": 0.2780478596687317, + "memory(GiB)": 77.0, + "step": 9328, + "token_acc": 0.9454143103024343, + "train_speed(iter/s)": 0.365883 + }, + { + "epoch": 2.98528, + "grad_norm": 0.7302007027955204, + "learning_rate": 3.2911714806072026e-10, + "loss": 0.2784683108329773, + "memory(GiB)": 77.0, + "step": 9329, + "token_acc": 0.9248814574494635, + "train_speed(iter/s)": 0.365865 + }, + { + "epoch": 2.9856, + "grad_norm": 0.7783625123339598, + "learning_rate": 3.1496354145066357e-10, + "loss": 0.28124767541885376, + "memory(GiB)": 77.0, + "step": 9330, + "token_acc": 0.8704010606562811, + "train_speed(iter/s)": 0.365847 + }, + { + "epoch": 2.98592, + "grad_norm": 0.7510828465342226, + "learning_rate": 3.0112097727991175e-10, + "loss": 0.34428149461746216, + "memory(GiB)": 77.0, + "step": 9331, + "token_acc": 0.9300648882480173, + "train_speed(iter/s)": 0.365827 + }, + { + "epoch": 2.98624, + "grad_norm": 0.7817861115782377, + "learning_rate": 2.8758945727180853e-10, + "loss": 0.2576979994773865, + "memory(GiB)": 77.0, + "step": 9332, + "token_acc": 0.9415542710340398, + "train_speed(iter/s)": 0.365809 + }, + { + "epoch": 2.98656, + "grad_norm": 0.6915808706420359, + "learning_rate": 2.74368983109452e-10, + "loss": 0.3014412522315979, + "memory(GiB)": 77.0, + "step": 9333, + "token_acc": 0.8505747126436781, + "train_speed(iter/s)": 0.365788 + }, + { + "epoch": 2.98688, + "grad_norm": 0.715019654240409, + "learning_rate": 2.6145955643819275e-10, + "loss": 0.30458396673202515, + "memory(GiB)": 77.0, + "step": 9334, + "token_acc": 0.8976109215017065, + "train_speed(iter/s)": 0.365771 + }, + { + "epoch": 2.9872, + "grad_norm": 0.6993654708038523, + "learning_rate": 2.4886117886424585e-10, + "loss": 0.28602904081344604, + "memory(GiB)": 77.0, + "step": 9335, + "token_acc": 0.9483412322274881, + "train_speed(iter/s)": 0.365753 + }, + { + "epoch": 2.98752, + "grad_norm": 0.7593318827108623, + "learning_rate": 2.365738519552463e-10, + "loss": 0.30287039279937744, + "memory(GiB)": 77.0, + "step": 9336, + "token_acc": 0.9037168141592921, + "train_speed(iter/s)": 0.365736 + }, + { + "epoch": 2.9878400000000003, + "grad_norm": 0.8123477960951855, + "learning_rate": 2.2459757724024866e-10, + "loss": 0.30093061923980713, + "memory(GiB)": 77.0, + "step": 9337, + "token_acc": 0.904892047007379, + "train_speed(iter/s)": 0.365712 + }, + { + "epoch": 2.98816, + "grad_norm": 0.7408784399286773, + "learning_rate": 2.1293235620944986e-10, + "loss": 0.31166142225265503, + "memory(GiB)": 77.0, + "step": 9338, + "token_acc": 0.8707893413775767, + "train_speed(iter/s)": 0.365692 + }, + { + "epoch": 2.98848, + "grad_norm": 0.7193634750648129, + "learning_rate": 2.0157819031446645e-10, + "loss": 0.26066115498542786, + "memory(GiB)": 77.0, + "step": 9339, + "token_acc": 0.8871015787905868, + "train_speed(iter/s)": 0.365675 + }, + { + "epoch": 2.9888, + "grad_norm": 0.7372707571593325, + "learning_rate": 1.9053508096805728e-10, + "loss": 0.3496856093406677, + "memory(GiB)": 77.0, + "step": 9340, + "token_acc": 0.8817127564674397, + "train_speed(iter/s)": 0.365656 + }, + { + "epoch": 2.9891199999999998, + "grad_norm": 0.671921664696317, + "learning_rate": 1.7980302954440088e-10, + "loss": 0.3098357319831848, + "memory(GiB)": 77.0, + "step": 9341, + "token_acc": 0.9106330161357055, + "train_speed(iter/s)": 0.365634 + }, + { + "epoch": 2.98944, + "grad_norm": 0.7135437405551219, + "learning_rate": 1.6938203737881798e-10, + "loss": 0.3403869867324829, + "memory(GiB)": 77.0, + "step": 9342, + "token_acc": 0.8900235663786331, + "train_speed(iter/s)": 0.365616 + }, + { + "epoch": 2.98976, + "grad_norm": 0.7569273939900576, + "learning_rate": 1.5927210576804907e-10, + "loss": 0.3703687787055969, + "memory(GiB)": 77.0, + "step": 9343, + "token_acc": 0.9482576557550159, + "train_speed(iter/s)": 0.365594 + }, + { + "epoch": 2.99008, + "grad_norm": 0.6794261046230603, + "learning_rate": 1.4947323596997687e-10, + "loss": 0.23172211647033691, + "memory(GiB)": 77.0, + "step": 9344, + "token_acc": 0.8879640044994376, + "train_speed(iter/s)": 0.365572 + }, + { + "epoch": 2.9904, + "grad_norm": 0.7445984844690788, + "learning_rate": 1.3998542920418136e-10, + "loss": 0.3320702314376831, + "memory(GiB)": 77.0, + "step": 9345, + "token_acc": 0.9028733306353703, + "train_speed(iter/s)": 0.365554 + }, + { + "epoch": 2.99072, + "grad_norm": 0.7492078578726555, + "learning_rate": 1.3080868665110712e-10, + "loss": 0.3065721392631531, + "memory(GiB)": 77.0, + "step": 9346, + "token_acc": 0.8718838750394446, + "train_speed(iter/s)": 0.365534 + }, + { + "epoch": 2.99104, + "grad_norm": 0.7810863150036808, + "learning_rate": 1.2194300945261862e-10, + "loss": 0.3043532371520996, + "memory(GiB)": 77.0, + "step": 9347, + "token_acc": 0.9334831460674158, + "train_speed(iter/s)": 0.365518 + }, + { + "epoch": 2.9913600000000002, + "grad_norm": 0.7248536948598838, + "learning_rate": 1.1338839871199991e-10, + "loss": 0.24473097920417786, + "memory(GiB)": 77.0, + "step": 9348, + "token_acc": 0.9678510998307953, + "train_speed(iter/s)": 0.365499 + }, + { + "epoch": 2.99168, + "grad_norm": 0.724105871304881, + "learning_rate": 1.0514485549367737e-10, + "loss": 0.3810715079307556, + "memory(GiB)": 77.0, + "step": 9349, + "token_acc": 0.825694212111074, + "train_speed(iter/s)": 0.365479 + }, + { + "epoch": 2.992, + "grad_norm": 0.774049085627102, + "learning_rate": 9.721238082321949e-11, + "loss": 0.2995602786540985, + "memory(GiB)": 77.0, + "step": 9350, + "token_acc": 0.9278083522175461, + "train_speed(iter/s)": 0.365462 + }, + { + "epoch": 2.99232, + "grad_norm": 0.7616532843230857, + "learning_rate": 8.959097568789211e-11, + "loss": 0.29217374324798584, + "memory(GiB)": 77.0, + "step": 9351, + "token_acc": 0.8783748361730013, + "train_speed(iter/s)": 0.365443 + }, + { + "epoch": 2.9926399999999997, + "grad_norm": 0.7694017860123271, + "learning_rate": 8.228064103610323e-11, + "loss": 0.3895112872123718, + "memory(GiB)": 77.0, + "step": 9352, + "token_acc": 0.8977443609022556, + "train_speed(iter/s)": 0.365423 + }, + { + "epoch": 2.99296, + "grad_norm": 0.6834561306097644, + "learning_rate": 7.528137777712552e-11, + "loss": 0.3007429242134094, + "memory(GiB)": 77.0, + "step": 9353, + "token_acc": 0.9178356713426854, + "train_speed(iter/s)": 0.3654 + }, + { + "epoch": 2.99328, + "grad_norm": 0.7735487324303002, + "learning_rate": 6.859318678248406e-11, + "loss": 0.3481735289096832, + "memory(GiB)": 77.0, + "step": 9354, + "token_acc": 0.8827734095782702, + "train_speed(iter/s)": 0.365381 + }, + { + "epoch": 2.9936, + "grad_norm": 0.7593643554164358, + "learning_rate": 6.221606888401344e-11, + "loss": 0.3533387780189514, + "memory(GiB)": 77.0, + "step": 9355, + "token_acc": 0.8943921408104789, + "train_speed(iter/s)": 0.365364 + }, + { + "epoch": 2.99392, + "grad_norm": 0.7956056052038789, + "learning_rate": 5.6150024875245566e-11, + "loss": 0.24863135814666748, + "memory(GiB)": 77.0, + "step": 9356, + "token_acc": 0.9618138424821002, + "train_speed(iter/s)": 0.365347 + }, + { + "epoch": 2.99424, + "grad_norm": 0.7088394193205403, + "learning_rate": 5.039505551085455e-11, + "loss": 0.33277344703674316, + "memory(GiB)": 77.0, + "step": 9357, + "token_acc": 0.8749711649365629, + "train_speed(iter/s)": 0.365326 + }, + { + "epoch": 2.99456, + "grad_norm": 0.6894024148207487, + "learning_rate": 4.495116150748935e-11, + "loss": 0.31354522705078125, + "memory(GiB)": 77.0, + "step": 9358, + "token_acc": 0.9112696148359486, + "train_speed(iter/s)": 0.365305 + }, + { + "epoch": 2.99488, + "grad_norm": 0.7506028218117193, + "learning_rate": 3.981834354210845e-11, + "loss": 0.2976597845554352, + "memory(GiB)": 77.0, + "step": 9359, + "token_acc": 0.953519256308101, + "train_speed(iter/s)": 0.365286 + }, + { + "epoch": 2.9952, + "grad_norm": 0.7231682650669825, + "learning_rate": 3.499660225336765e-11, + "loss": 0.3002883791923523, + "memory(GiB)": 77.0, + "step": 9360, + "token_acc": 0.966721446179129, + "train_speed(iter/s)": 0.365267 + }, + { + "epoch": 2.99552, + "grad_norm": 0.6862759303627323, + "learning_rate": 3.048593824162005e-11, + "loss": 0.2804352343082428, + "memory(GiB)": 77.0, + "step": 9361, + "token_acc": 0.9227010217681031, + "train_speed(iter/s)": 0.365248 + }, + { + "epoch": 2.99584, + "grad_norm": 0.86308655943929, + "learning_rate": 2.628635206780583e-11, + "loss": 0.34750086069107056, + "memory(GiB)": 77.0, + "step": 9362, + "token_acc": 0.8539026017344896, + "train_speed(iter/s)": 0.365229 + }, + { + "epoch": 2.99616, + "grad_norm": 0.7375853987322663, + "learning_rate": 2.2397844254562485e-11, + "loss": 0.36084622144699097, + "memory(GiB)": 77.0, + "step": 9363, + "token_acc": 0.7879034445348522, + "train_speed(iter/s)": 0.36521 + }, + { + "epoch": 2.99648, + "grad_norm": 0.7156876538675346, + "learning_rate": 1.8820415285947245e-11, + "loss": 0.35879194736480713, + "memory(GiB)": 77.0, + "step": 9364, + "token_acc": 0.8718043719896258, + "train_speed(iter/s)": 0.36519 + }, + { + "epoch": 2.9968, + "grad_norm": 0.7037076113496429, + "learning_rate": 1.5554065606881996e-11, + "loss": 0.27013635635375977, + "memory(GiB)": 77.0, + "step": 9365, + "token_acc": 0.9360786724031961, + "train_speed(iter/s)": 0.365174 + }, + { + "epoch": 2.99712, + "grad_norm": 0.7180823824419211, + "learning_rate": 1.259879562370836e-11, + "loss": 0.313199520111084, + "memory(GiB)": 77.0, + "step": 9366, + "token_acc": 0.9017221584385764, + "train_speed(iter/s)": 0.365154 + }, + { + "epoch": 2.99744, + "grad_norm": 0.8223135704506297, + "learning_rate": 9.95460570446527e-12, + "loss": 0.3054148256778717, + "memory(GiB)": 77.0, + "step": 9367, + "token_acc": 0.8879736408566722, + "train_speed(iter/s)": 0.365137 + }, + { + "epoch": 2.99776, + "grad_norm": 0.7349525204951756, + "learning_rate": 7.621496177778742e-12, + "loss": 0.3289668560028076, + "memory(GiB)": 77.0, + "step": 9368, + "token_acc": 0.8339310344827586, + "train_speed(iter/s)": 0.36512 + }, + { + "epoch": 2.99808, + "grad_norm": 0.7278776362985387, + "learning_rate": 5.5994673345272084e-12, + "loss": 0.30910754203796387, + "memory(GiB)": 77.0, + "step": 9369, + "token_acc": 0.9559017692104568, + "train_speed(iter/s)": 0.365101 + }, + { + "epoch": 2.9984, + "grad_norm": 0.762720309925579, + "learning_rate": 3.888519425898629e-12, + "loss": 0.3655961751937866, + "memory(GiB)": 77.0, + "step": 9370, + "token_acc": 0.9182200841852075, + "train_speed(iter/s)": 0.365081 + }, + { + "epoch": 2.99872, + "grad_norm": 0.7753382069888592, + "learning_rate": 2.488652664778268e-12, + "loss": 0.25179439783096313, + "memory(GiB)": 77.0, + "step": 9371, + "token_acc": 0.9328815747015167, + "train_speed(iter/s)": 0.365063 + }, + { + "epoch": 2.99904, + "grad_norm": 0.7126766227709201, + "learning_rate": 1.399867225471141e-12, + "loss": 0.2874998450279236, + "memory(GiB)": 77.0, + "step": 9372, + "token_acc": 0.8734672580224367, + "train_speed(iter/s)": 0.365042 + }, + { + "epoch": 2.9993600000000002, + "grad_norm": 0.7556236687749529, + "learning_rate": 6.22163243702012e-13, + "loss": 0.3088681697845459, + "memory(GiB)": 77.0, + "step": 9373, + "token_acc": 0.8568376068376068, + "train_speed(iter/s)": 0.365025 + }, + { + "epoch": 2.99968, + "grad_norm": 0.7013418738908778, + "learning_rate": 1.5554081578272871e-13, + "loss": 0.2467411756515503, + "memory(GiB)": 77.0, + "step": 9374, + "token_acc": 0.9120370370370371, + "train_speed(iter/s)": 0.365005 + }, + { + "epoch": 3.0, + "grad_norm": 0.700868257766105, + "learning_rate": 0.0, + "loss": 0.249166801571846, + "memory(GiB)": 77.0, + "step": 9375, + "token_acc": 0.9507449072666464, + "train_speed(iter/s)": 0.364983 + } + ], + "logging_steps": 1, + "max_steps": 9375, + "num_input_tokens_seen": 0, + "num_train_epochs": 3, + "save_steps": 500, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": true + }, + "attributes": {} + } + }, + "total_flos": 633407720206336.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +}