| { | |
| "best_metric": 1.6023043394088745, | |
| "best_model_checkpoint": "./lora-out/checkpoint-300", | |
| "epoch": 1.8662519440124417, | |
| "eval_steps": 50, | |
| "global_step": 600, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 2e-05, | |
| "loss": 1.7924, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 4e-05, | |
| "loss": 1.8083, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 6e-05, | |
| "loss": 1.8177, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 8e-05, | |
| "loss": 1.7595, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.0001, | |
| "loss": 1.6598, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00012, | |
| "loss": 1.6919, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00014, | |
| "loss": 1.6706, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00016, | |
| "loss": 1.6879, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.00018, | |
| "loss": 1.7051, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.0002, | |
| "loss": 1.7022, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.000199999456645141, | |
| "loss": 1.6809, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019999782658646859, | |
| "loss": 1.6098, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0001999951098416968, | |
| "loss": 1.7014, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019999130644034888, | |
| "loss": 1.5885, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019998641642375657, | |
| "loss": 1.6243, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019998043984506027, | |
| "loss": 1.6484, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019997337676920803, | |
| "loss": 1.6093, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019996522727295496, | |
| "loss": 1.6173, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019995599144486247, | |
| "loss": 1.646, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019994566938529712, | |
| "loss": 1.6469, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00019993426120642983, | |
| "loss": 1.6564, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00019992176703223432, | |
| "loss": 1.5901, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.000199908186998486, | |
| "loss": 1.664, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00019989352125276047, | |
| "loss": 1.6275, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00019987776995443178, | |
| "loss": 1.5839, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00019986093327467076, | |
| "loss": 1.5611, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00019984301139644334, | |
| "loss": 1.669, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.0001998240045145083, | |
| "loss": 1.5641, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00019980391283541522, | |
| "loss": 1.6023, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00019978273657750238, | |
| "loss": 1.6309, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.0001997604759708942, | |
| "loss": 1.6353, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00019973713125749884, | |
| "loss": 1.6328, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00019971270269100564, | |
| "loss": 1.5683, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00019968719053688213, | |
| "loss": 1.6217, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0001996605950723714, | |
| "loss": 1.5734, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00019963291658648896, | |
| "loss": 1.6162, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.00019960415538001957, | |
| "loss": 1.5922, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0001995743117655141, | |
| "loss": 1.5806, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.000199543386067286, | |
| "loss": 1.5938, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.00019951137862140778, | |
| "loss": 1.6386, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00019947828977570756, | |
| "loss": 1.6476, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00019944411988976496, | |
| "loss": 1.6557, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00019940886933490749, | |
| "loss": 1.5836, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019937253849420635, | |
| "loss": 1.6421, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.0001993351277624723, | |
| "loss": 1.629, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019929663754625145, | |
| "loss": 1.6392, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00019925706826382064, | |
| "loss": 1.5677, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00019921642034518317, | |
| "loss": 1.6144, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00019917469423206389, | |
| "loss": 1.6068, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00019913189037790456, | |
| "loss": 1.6421, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "eval_loss": 1.621693730354309, | |
| "eval_runtime": 233.7603, | |
| "eval_samples_per_second": 16.354, | |
| "eval_steps_per_second": 4.09, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.0001990880092478588, | |
| "loss": 1.6172, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.0001990430513187871, | |
| "loss": 1.6095, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00019899701707925166, | |
| "loss": 1.5967, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.00019894990702951106, | |
| "loss": 1.617, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.00019890172168151473, | |
| "loss": 1.5932, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.0001988524615588976, | |
| "loss": 1.6548, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00019880212719697413, | |
| "loss": 1.6033, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00019875071914273278, | |
| "loss": 1.6063, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00019869823795482986, | |
| "loss": 1.6107, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00019864468420358354, | |
| "loss": 1.5758, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00019859005847096763, | |
| "loss": 1.5723, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00019853436135060527, | |
| "loss": 1.542, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00019847759344776252, | |
| "loss": 1.5611, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00019841975537934162, | |
| "loss": 1.6157, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00019836084777387458, | |
| "loss": 1.5589, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00019830087127151598, | |
| "loss": 1.6077, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00019823982652403634, | |
| "loss": 1.5473, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00019817771419481487, | |
| "loss": 1.6265, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.0001981145349588323, | |
| "loss": 1.6074, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00019805028950266348, | |
| "loss": 1.6195, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00019798497852447006, | |
| "loss": 1.5876, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.0001979186027339928, | |
| "loss": 1.5978, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00019785116285254381, | |
| "loss": 1.533, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00019778265961299888, | |
| "loss": 1.5888, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.0001977130937597894, | |
| "loss": 1.6211, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00019764246604889415, | |
| "loss": 1.6091, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00019757077724783147, | |
| "loss": 1.6012, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.0001974980281356504, | |
| "loss": 1.6401, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.0001974242195029227, | |
| "loss": 1.6111, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00019734935215173392, | |
| "loss": 1.6208, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00019727342689567482, | |
| "loss": 1.6038, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00019719644455983256, | |
| "loss": 1.5915, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.0001971184059807817, | |
| "loss": 1.5872, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.000197039312006575, | |
| "loss": 1.5984, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.0001969591634967344, | |
| "loss": 1.5996, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00019687796132224152, | |
| "loss": 1.6056, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.0001967957063655283, | |
| "loss": 1.6099, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.0001967123995204674, | |
| "loss": 1.6295, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00019662804169236225, | |
| "loss": 1.5482, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00019654263379793773, | |
| "loss": 1.5781, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00019645617676532963, | |
| "loss": 1.5954, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.000196368671534075, | |
| "loss": 1.619, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.0001962801190551016, | |
| "loss": 1.6153, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.0001961905202907179, | |
| "loss": 1.6008, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00019609987621460232, | |
| "loss": 1.5891, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.0001960081878117929, | |
| "loss": 1.6438, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.0001959154560786764, | |
| "loss": 1.5576, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00019582168202297758, | |
| "loss": 1.646, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00019572686666374822, | |
| "loss": 1.6269, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00019563101103135602, | |
| "loss": 1.6288, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "eval_loss": 1.6143836975097656, | |
| "eval_runtime": 233.6412, | |
| "eval_samples_per_second": 16.363, | |
| "eval_steps_per_second": 4.092, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00019553411616747348, | |
| "loss": 1.5667, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00019543618312506647, | |
| "loss": 1.6221, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.0001953372129683829, | |
| "loss": 1.5992, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.0001952372067729411, | |
| "loss": 1.6138, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00019513616562551807, | |
| "loss": 1.51, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00019503409062413782, | |
| "loss": 1.6227, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00019493098287805927, | |
| "loss": 1.6014, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00019482684350776434, | |
| "loss": 1.625, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.0001947216736449457, | |
| "loss": 1.6109, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.0001946154744324945, | |
| "loss": 1.62, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00019450824702448778, | |
| "loss": 1.5878, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.0001943999925861763, | |
| "loss": 1.6264, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00019429071229397157, | |
| "loss": 1.6186, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.0001941804073354331, | |
| "loss": 1.6363, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00019406907890925562, | |
| "loss": 1.5341, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00019395672822525593, | |
| "loss": 1.5986, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00019384335650435985, | |
| "loss": 1.6181, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.0001937289649785889, | |
| "loss": 1.6118, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.0001936135548910469, | |
| "loss": 1.6404, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00019349712749590649, | |
| "loss": 1.583, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00019337968405839547, | |
| "loss": 1.5827, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00019326122585478308, | |
| "loss": 1.6392, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00019314175417236616, | |
| "loss": 1.5861, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.00019302127030945508, | |
| "loss": 1.5738, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.0001928997755753597, | |
| "loss": 1.5915, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.00019277727129037508, | |
| "loss": 1.617, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.0001926537587857672, | |
| "loss": 1.5582, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00019252923940375844, | |
| "loss": 1.6294, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00019240371449751306, | |
| "loss": 1.6087, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00019227718543112236, | |
| "loss": 1.5749, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00019214965357959005, | |
| "loss": 1.6041, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00019202112032881715, | |
| "loss": 1.6106, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00019189158707558695, | |
| "loss": 1.5553, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00019176105522754995, | |
| "loss": 1.5638, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.0001916295262032084, | |
| "loss": 1.5921, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00019149700143190096, | |
| "loss": 1.5837, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00019136348235378726, | |
| "loss": 1.6341, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00019122897041983205, | |
| "loss": 1.5678, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00019109346709178963, | |
| "loss": 1.6137, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.0001909569738421878, | |
| "loss": 1.6324, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.00019081949215431194, | |
| "loss": 1.612, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.00019068102352218897, | |
| "loss": 1.5908, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.00019054156945057097, | |
| "loss": 1.6087, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00019040113145491887, | |
| "loss": 1.5613, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.000190259711061386, | |
| "loss": 1.6072, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00019011730980680156, | |
| "loss": 1.5722, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.0001899739292386538, | |
| "loss": 1.5961, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00018982957091507325, | |
| "loss": 1.5409, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.0001896842364048159, | |
| "loss": 1.6557, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.000189537927287246, | |
| "loss": 1.5725, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "eval_loss": 1.6101970672607422, | |
| "eval_runtime": 233.5313, | |
| "eval_samples_per_second": 16.37, | |
| "eval_steps_per_second": 4.094, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00018939064515231888, | |
| "loss": 1.5949, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.0001892423916005639, | |
| "loss": 1.6191, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.00018909316824306674, | |
| "loss": 1.5487, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.00018894297670145216, | |
| "loss": 1.5104, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.00018879181860786623, | |
| "loss": 1.6392, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00018863969560495866, | |
| "loss": 1.5932, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00018848660934586491, | |
| "loss": 1.6213, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.0001883325614941882, | |
| "loss": 1.5515, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00018817755372398155, | |
| "loss": 1.6166, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00018802158771972943, | |
| "loss": 1.6552, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00018786466517632956, | |
| "loss": 1.6378, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00018770678779907448, | |
| "loss": 1.5176, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00018754795730363302, | |
| "loss": 1.5793, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00018738817541603156, | |
| "loss": 1.6616, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00018722744387263544, | |
| "loss": 1.6055, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00018706576442012994, | |
| "loss": 1.6204, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00018690313881550137, | |
| "loss": 1.5952, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00018673956882601803, | |
| "loss": 1.6271, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00018657505622921082, | |
| "loss": 1.538, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00018640960281285417, | |
| "loss": 1.5874, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.0001862432103749464, | |
| "loss": 1.5694, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00018607588072369033, | |
| "loss": 1.583, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.00018590761567747354, | |
| "loss": 1.5961, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.00018573841706484866, | |
| "loss": 1.582, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.0001855682867245134, | |
| "loss": 1.6427, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00018539722650529075, | |
| "loss": 1.604, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00018522523826610868, | |
| "loss": 1.577, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00018505232387598018, | |
| "loss": 1.6339, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00018487848521398265, | |
| "loss": 1.5993, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.0001847037241692378, | |
| "loss": 1.6286, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00018452804264089084, | |
| "loss": 1.5963, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.00018435144253809, | |
| "loss": 1.5856, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.00018417392577996578, | |
| "loss": 1.5787, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.00018399549429561006, | |
| "loss": 1.5876, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.00018381615002405509, | |
| "loss": 1.5565, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.00018363589491425248, | |
| "loss": 1.5897, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.0001834547309250521, | |
| "loss": 1.5951, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.00018327266002518056, | |
| "loss": 1.5447, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 0.00018308968419322003, | |
| "loss": 1.6087, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 0.00018290580541758668, | |
| "loss": 1.5946, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 0.00018272102569650905, | |
| "loss": 1.6148, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.00018253534703800627, | |
| "loss": 1.649, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.0001823487714598664, | |
| "loss": 1.6312, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.0001821613009896244, | |
| "loss": 1.5858, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.00018197293766454003, | |
| "loss": 1.5925, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.0001817836835315759, | |
| "loss": 1.5604, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.00018159354064737506, | |
| "loss": 1.6125, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 0.0001814025110782387, | |
| "loss": 1.5954, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 0.00018121059690010368, | |
| "loss": 1.5937, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 0.00018101780019852008, | |
| "loss": 1.5582, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "eval_loss": 1.6065257787704468, | |
| "eval_runtime": 233.7919, | |
| "eval_samples_per_second": 16.352, | |
| "eval_steps_per_second": 4.089, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.00018082412306862837, | |
| "loss": 1.5628, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.00018062956761513675, | |
| "loss": 1.5735, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.00018043413595229818, | |
| "loss": 1.6011, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.00018023783020388763, | |
| "loss": 1.5434, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.00018004065250317868, | |
| "loss": 1.5533, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.00017984260499292058, | |
| "loss": 1.6074, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.00017964368982531487, | |
| "loss": 1.5286, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 0.00017944390916199203, | |
| "loss": 1.5161, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 0.00017924326517398793, | |
| "loss": 1.6024, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 0.00017904176004172027, | |
| "loss": 1.5727, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 0.0001788393959549649, | |
| "loss": 1.5752, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 0.00017863617511283203, | |
| "loss": 1.5845, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 0.00017843209972374233, | |
| "loss": 1.6082, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 0.00017822717200540283, | |
| "loss": 1.5895, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 0.00017802139418478298, | |
| "loss": 1.5836, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 0.00017781476849809038, | |
| "loss": 1.5996, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 0.00017760729719074644, | |
| "loss": 1.6256, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 0.000177398982517362, | |
| "loss": 1.628, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 0.00017718982674171284, | |
| "loss": 1.5543, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 0.00017697983213671515, | |
| "loss": 1.5732, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 0.0001767690009844007, | |
| "loss": 1.5892, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 0.0001765573355758921, | |
| "loss": 1.6524, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 0.00017634483821137787, | |
| "loss": 1.5694, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 0.0001761315112000876, | |
| "loss": 1.6006, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 0.00017591735686026661, | |
| "loss": 1.6161, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 0.00017570237751915092, | |
| "loss": 1.595, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 0.00017548657551294192, | |
| "loss": 1.6072, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 0.000175269953186781, | |
| "loss": 1.5855, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 0.00017505251289472406, | |
| "loss": 1.597, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 0.0001748342569997158, | |
| "loss": 1.5837, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 0.00017461518787356432, | |
| "loss": 1.5422, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 0.00017439530789691506, | |
| "loss": 1.5837, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 0.0001741746194592251, | |
| "loss": 1.6038, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 0.00017395312495873717, | |
| "loss": 1.5882, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 0.00017373082680245347, | |
| "loss": 1.5763, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 0.00017350772740610976, | |
| "loss": 1.6046, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 0.00017328382919414877, | |
| "loss": 1.594, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 0.00017305913459969414, | |
| "loss": 1.5903, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 0.00017283364606452396, | |
| "loss": 1.5704, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 0.0001726073660390439, | |
| "loss": 1.588, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 0.00017238029698226113, | |
| "loss": 1.6273, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 0.00017215244136175705, | |
| "loss": 1.5166, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 0.00017192380165366092, | |
| "loss": 1.5813, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 0.0001716943803426226, | |
| "loss": 1.5654, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 0.0001714641799217858, | |
| "loss": 1.5548, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 0.00017123320289276085, | |
| "loss": 1.5491, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 0.0001710014517655976, | |
| "loss": 1.5903, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 0.00017076892905875806, | |
| "loss": 1.5687, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 0.00017053563729908905, | |
| "loss": 1.5975, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 0.00017030157902179485, | |
| "loss": 1.6055, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "eval_loss": 1.60513174533844, | |
| "eval_runtime": 233.7813, | |
| "eval_samples_per_second": 16.353, | |
| "eval_steps_per_second": 4.089, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 0.00017006675677040946, | |
| "loss": 1.4661, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 0.00016983117309676908, | |
| "loss": 1.6071, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 0.00016959483056098445, | |
| "loss": 1.5664, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 0.0001693577317314129, | |
| "loss": 1.5189, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 0.00016911987918463034, | |
| "loss": 1.5488, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 0.0001688812755054036, | |
| "loss": 1.6153, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 0.00016864192328666202, | |
| "loss": 1.536, | |
| "step": 257 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 0.00016840182512946943, | |
| "loss": 1.624, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 0.00016816098364299582, | |
| "loss": 1.569, | |
| "step": 259 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 0.00016791940144448902, | |
| "loss": 1.588, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 0.0001676770811592463, | |
| "loss": 1.5626, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 0.00016743402542058572, | |
| "loss": 1.5836, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 0.00016719023686981763, | |
| "loss": 1.5573, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 0.00016694571815621586, | |
| "loss": 1.5815, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 0.00016670047193698912, | |
| "loss": 1.64, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 0.0001664545008772518, | |
| "loss": 1.6395, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 0.00016620780764999536, | |
| "loss": 1.5927, | |
| "step": 267 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 0.00016596039493605913, | |
| "loss": 1.605, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 0.000165712265424101, | |
| "loss": 1.6219, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 0.0001654634218105686, | |
| "loss": 1.5458, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 0.0001652138667996696, | |
| "loss": 1.59, | |
| "step": 271 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 0.00016496360310334253, | |
| "loss": 1.633, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 0.0001647126334412274, | |
| "loss": 1.6108, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 0.0001644609605406358, | |
| "loss": 1.5747, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 0.0001642085871365217, | |
| "loss": 1.5393, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 0.00016395551597145133, | |
| "loss": 1.5768, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 0.00016370174979557368, | |
| "loss": 1.6278, | |
| "step": 277 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 0.0001634472913665904, | |
| "loss": 1.5983, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 0.00016319214344972602, | |
| "loss": 1.5701, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 0.00016293630881769773, | |
| "loss": 1.5874, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 0.0001626797902506853, | |
| "loss": 1.5412, | |
| "step": 281 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 0.000162422590536301, | |
| "loss": 1.5733, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 0.00016216471246955906, | |
| "loss": 1.6245, | |
| "step": 283 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 0.00016190615885284553, | |
| "loss": 1.5743, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 0.00016164693249588768, | |
| "loss": 1.5793, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 0.00016138703621572346, | |
| "loss": 1.5672, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 0.0001611264728366711, | |
| "loss": 1.5442, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 0.0001608652451902981, | |
| "loss": 1.5765, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 0.00016060335611539072, | |
| "loss": 1.6058, | |
| "step": 289 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 0.00016034080845792295, | |
| "loss": 1.6156, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 0.0001600776050710257, | |
| "loss": 1.6179, | |
| "step": 291 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 0.0001598137488149558, | |
| "loss": 1.5747, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 0.00015954924255706478, | |
| "loss": 1.5772, | |
| "step": 293 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 0.00015928408917176786, | |
| "loss": 1.6064, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 0.00015901829154051265, | |
| "loss": 1.6082, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 0.00015875185255174787, | |
| "loss": 1.5768, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 0.0001584847751008918, | |
| "loss": 1.5466, | |
| "step": 297 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 0.00015821706209030118, | |
| "loss": 1.5127, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 0.00015794871642923927, | |
| "loss": 1.5745, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 0.00015767974103384443, | |
| "loss": 1.5733, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "eval_loss": 1.6023043394088745, | |
| "eval_runtime": 233.7298, | |
| "eval_samples_per_second": 16.356, | |
| "eval_steps_per_second": 4.09, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 0.0001574101388270984, | |
| "loss": 1.6189, | |
| "step": 301 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 0.0001571399127387946, | |
| "loss": 1.54, | |
| "step": 302 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 0.00015686906570550616, | |
| "loss": 1.5419, | |
| "step": 303 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 0.00015659760067055417, | |
| "loss": 1.576, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 0.00015632552058397544, | |
| "loss": 1.6072, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 0.00015605282840249087, | |
| "loss": 1.5429, | |
| "step": 306 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 0.00015577952708947272, | |
| "loss": 1.5149, | |
| "step": 307 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 0.00015550561961491304, | |
| "loss": 1.5744, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 0.00015523110895539097, | |
| "loss": 1.6155, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 0.00015495599809404044, | |
| "loss": 1.541, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 0.000154680290020518, | |
| "loss": 1.5227, | |
| "step": 311 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 0.00015440398773097002, | |
| "loss": 1.5462, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 0.00015412709422800037, | |
| "loss": 1.56, | |
| "step": 313 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 0.00015384961252063763, | |
| "loss": 1.6597, | |
| "step": 314 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 0.00015357154562430252, | |
| "loss": 1.5917, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 0.000153292896560775, | |
| "loss": 1.6058, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 0.0001530136683581615, | |
| "loss": 1.581, | |
| "step": 317 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 0.00015273386405086209, | |
| "loss": 1.592, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 0.00015245348667953726, | |
| "loss": 1.5711, | |
| "step": 319 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.0001521725392910753, | |
| "loss": 1.5829, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.00015189102493855868, | |
| "loss": 1.5786, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.00015160894668123123, | |
| "loss": 1.5848, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.0001513263075844648, | |
| "loss": 1.482, | |
| "step": 323 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 0.000151043110719726, | |
| "loss": 1.495, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 0.00015075935916454255, | |
| "loss": 1.4535, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 0.00015047505600247028, | |
| "loss": 1.5398, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 0.0001501902043230592, | |
| "loss": 1.4649, | |
| "step": 327 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 0.00014990480722182022, | |
| "loss": 1.512, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 0.0001496188678001914, | |
| "loss": 1.4365, | |
| "step": 329 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 0.00014933238916550425, | |
| "loss": 1.5408, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 0.00014904537443094986, | |
| "loss": 1.4992, | |
| "step": 331 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 0.00014875782671554526, | |
| "loss": 1.5125, | |
| "step": 332 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 0.00014846974914409943, | |
| "loss": 1.4823, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 0.00014818114484717933, | |
| "loss": 1.4985, | |
| "step": 334 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 0.00014789201696107594, | |
| "loss": 1.457, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 0.00014760236862777, | |
| "loss": 1.4623, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 0.0001473122029948982, | |
| "loss": 1.466, | |
| "step": 337 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 0.0001470215232157186, | |
| "loss": 1.4982, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 0.00014673033244907665, | |
| "loss": 1.4369, | |
| "step": 339 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 0.00014643863385937076, | |
| "loss": 1.4698, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 0.00014614643061651772, | |
| "loss": 1.4462, | |
| "step": 341 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 0.0001458537258959186, | |
| "loss": 1.4513, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 0.00014556052287842413, | |
| "loss": 1.4304, | |
| "step": 343 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 0.00014526682475029994, | |
| "loss": 1.4953, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 0.00014497263470319215, | |
| "loss": 1.4209, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 0.00014467795593409256, | |
| "loss": 1.4522, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 0.000144382791645304, | |
| "loss": 1.495, | |
| "step": 347 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 0.0001440871450444055, | |
| "loss": 1.4461, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 0.00014379101934421736, | |
| "loss": 1.4592, | |
| "step": 349 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 0.0001434944177627664, | |
| "loss": 1.4885, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "eval_loss": 1.6130114793777466, | |
| "eval_runtime": 233.7594, | |
| "eval_samples_per_second": 16.354, | |
| "eval_steps_per_second": 4.09, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 0.00014319734352325077, | |
| "loss": 1.5119, | |
| "step": 351 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 0.00014289979985400515, | |
| "loss": 1.4618, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 0.00014260178998846547, | |
| "loss": 1.499, | |
| "step": 353 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 0.00014230331716513396, | |
| "loss": 1.4611, | |
| "step": 354 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 0.00014200438462754373, | |
| "loss": 1.4503, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 0.00014170499562422376, | |
| "loss": 1.472, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 0.00014140515340866337, | |
| "loss": 1.4654, | |
| "step": 357 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 0.00014110486123927718, | |
| "loss": 1.4245, | |
| "step": 358 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 0.0001408041223793693, | |
| "loss": 1.4944, | |
| "step": 359 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 0.00014050294009709813, | |
| "loss": 1.481, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 0.00014020131766544084, | |
| "loss": 1.4592, | |
| "step": 361 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 0.0001398992583621577, | |
| "loss": 1.5189, | |
| "step": 362 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 0.0001395967654697565, | |
| "loss": 1.4575, | |
| "step": 363 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 0.00013929384227545692, | |
| "loss": 1.5033, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 0.0001389904920711547, | |
| "loss": 1.5161, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 0.00013868671815338605, | |
| "loss": 1.4703, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 0.0001383825238232916, | |
| "loss": 1.4617, | |
| "step": 367 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 0.00013807791238658077, | |
| "loss": 1.4599, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 0.00013777288715349559, | |
| "loss": 1.4871, | |
| "step": 369 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 0.0001374674514387749, | |
| "loss": 1.4825, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 0.00013716160856161834, | |
| "loss": 1.5001, | |
| "step": 371 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 0.00013685536184565017, | |
| "loss": 1.3828, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 0.00013654871461888317, | |
| "loss": 1.4882, | |
| "step": 373 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 0.00013624167021368257, | |
| "loss": 1.4426, | |
| "step": 374 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 0.0001359342319667298, | |
| "loss": 1.4827, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 0.00013562640321898613, | |
| "loss": 1.4811, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 0.00013531818731565647, | |
| "loss": 1.4937, | |
| "step": 377 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 0.00013500958760615306, | |
| "loss": 1.4668, | |
| "step": 378 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 0.00013470060744405883, | |
| "loss": 1.4579, | |
| "step": 379 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 0.0001343912501870913, | |
| "loss": 1.4692, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 0.00013408151919706583, | |
| "loss": 1.4927, | |
| "step": 381 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 0.00013377141783985918, | |
| "loss": 1.5073, | |
| "step": 382 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 0.00013346094948537296, | |
| "loss": 1.4771, | |
| "step": 383 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 0.00013315011750749688, | |
| "loss": 1.5233, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 0.00013283892528407235, | |
| "loss": 1.4379, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 0.00013252737619685542, | |
| "loss": 1.493, | |
| "step": 386 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 0.00013221547363148034, | |
| "loss": 1.4174, | |
| "step": 387 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 0.00013190322097742259, | |
| "loss": 1.4108, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 0.00013159062162796208, | |
| "loss": 1.4713, | |
| "step": 389 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 0.00013127767898014637, | |
| "loss": 1.4511, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 0.0001309643964347536, | |
| "loss": 1.4752, | |
| "step": 391 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 0.00013065077739625566, | |
| "loss": 1.4798, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 0.00013033682527278107, | |
| "loss": 1.4372, | |
| "step": 393 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 0.0001300225434760781, | |
| "loss": 1.4556, | |
| "step": 394 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 0.00012970793542147756, | |
| "loss": 1.5026, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 0.00012939300452785574, | |
| "loss": 1.4878, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 0.00012907775421759732, | |
| "loss": 1.479, | |
| "step": 397 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 0.000128762187916558, | |
| "loss": 1.4508, | |
| "step": 398 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 0.0001284463090540275, | |
| "loss": 1.4923, | |
| "step": 399 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 0.00012813012106269208, | |
| "loss": 1.484, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "eval_loss": 1.616938829421997, | |
| "eval_runtime": 233.7894, | |
| "eval_samples_per_second": 16.352, | |
| "eval_steps_per_second": 4.089, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 0.00012781362737859735, | |
| "loss": 1.4867, | |
| "step": 401 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 0.00012749683144111095, | |
| "loss": 1.4923, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 0.00012717973669288513, | |
| "loss": 1.4858, | |
| "step": 403 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 0.00012686234657981933, | |
| "loss": 1.4464, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 0.00012654466455102272, | |
| "loss": 1.4598, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 0.00012622669405877685, | |
| "loss": 1.4237, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 0.0001259084385584979, | |
| "loss": 1.475, | |
| "step": 407 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 0.00012558990150869935, | |
| "loss": 1.5201, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 0.00012527108637095427, | |
| "loss": 1.4735, | |
| "step": 409 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 0.00012495199660985767, | |
| "loss": 1.4676, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 0.00012463263569298914, | |
| "loss": 1.4671, | |
| "step": 411 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 0.00012431300709087468, | |
| "loss": 1.4724, | |
| "step": 412 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 0.00012399311427694945, | |
| "loss": 1.5451, | |
| "step": 413 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 0.0001236729607275197, | |
| "loss": 1.492, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 0.00012335254992172512, | |
| "loss": 1.5186, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 0.0001230318853415012, | |
| "loss": 1.4622, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 0.00012271097047154096, | |
| "loss": 1.4937, | |
| "step": 417 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 0.00012238980879925756, | |
| "loss": 1.4575, | |
| "step": 418 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 0.00012206840381474608, | |
| "loss": 1.4801, | |
| "step": 419 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 0.00012174675901074577, | |
| "loss": 1.4523, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 0.00012142487788260191, | |
| "loss": 1.4957, | |
| "step": 421 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 0.00012110276392822799, | |
| "loss": 1.4757, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 0.0001207804206480677, | |
| "loss": 1.4769, | |
| "step": 423 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 0.00012045785154505676, | |
| "loss": 1.4435, | |
| "step": 424 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 0.000120135060124585, | |
| "loss": 1.5211, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 0.00011981204989445811, | |
| "loss": 1.4248, | |
| "step": 426 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 0.00011948882436485969, | |
| "loss": 1.4883, | |
| "step": 427 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 0.00011916538704831293, | |
| "loss": 1.4919, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 0.00011884174145964262, | |
| "loss": 1.4689, | |
| "step": 429 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 0.00011851789111593688, | |
| "loss": 1.4071, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 0.00011819383953650874, | |
| "loss": 1.4418, | |
| "step": 431 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 0.00011786959024285826, | |
| "loss": 1.5206, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 0.00011754514675863408, | |
| "loss": 1.446, | |
| "step": 433 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 0.000117220512609595, | |
| "loss": 1.5165, | |
| "step": 434 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 0.0001168956913235719, | |
| "loss": 1.4119, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 0.00011657068643042924, | |
| "loss": 1.503, | |
| "step": 436 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 0.00011624550146202682, | |
| "loss": 1.4573, | |
| "step": 437 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 0.00011592013995218123, | |
| "loss": 1.4707, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 0.00011559460543662768, | |
| "loss": 1.4304, | |
| "step": 439 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 0.00011526890145298137, | |
| "loss": 1.4465, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 0.0001149430315406991, | |
| "loss": 1.4912, | |
| "step": 441 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 0.0001146169992410409, | |
| "loss": 1.4549, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 0.00011429080809703145, | |
| "loss": 1.4528, | |
| "step": 443 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 0.00011396446165342165, | |
| "loss": 1.4148, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 0.00011363796345665001, | |
| "loss": 1.467, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 0.0001133113170548041, | |
| "loss": 1.492, | |
| "step": 446 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 0.00011298452599758217, | |
| "loss": 1.5244, | |
| "step": 447 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 0.00011265759383625436, | |
| "loss": 1.4553, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 0.0001123305241236243, | |
| "loss": 1.4764, | |
| "step": 449 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 0.00011200332041399027, | |
| "loss": 1.4354, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "eval_loss": 1.6193681955337524, | |
| "eval_runtime": 233.6751, | |
| "eval_samples_per_second": 16.36, | |
| "eval_steps_per_second": 4.091, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 0.00011167598626310682, | |
| "loss": 1.4946, | |
| "step": 451 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 0.00011134852522814596, | |
| "loss": 1.4558, | |
| "step": 452 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 0.0001110209408676586, | |
| "loss": 1.4549, | |
| "step": 453 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 0.00011069323674153585, | |
| "loss": 1.4992, | |
| "step": 454 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 0.0001103654164109702, | |
| "loss": 1.4828, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 0.00011003748343841711, | |
| "loss": 1.4939, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 0.00010970944138755604, | |
| "loss": 1.4761, | |
| "step": 457 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 0.00010938129382325184, | |
| "loss": 1.4394, | |
| "step": 458 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 0.00010905304431151602, | |
| "loss": 1.4852, | |
| "step": 459 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 0.00010872469641946783, | |
| "loss": 1.4479, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 0.00010839625371529583, | |
| "loss": 1.5161, | |
| "step": 461 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 0.00010806771976821872, | |
| "loss": 1.5104, | |
| "step": 462 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 0.0001077390981484469, | |
| "loss": 1.5056, | |
| "step": 463 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 0.00010741039242714337, | |
| "loss": 1.4919, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 0.00010708160617638521, | |
| "loss": 1.4605, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 0.00010675274296912452, | |
| "loss": 1.5191, | |
| "step": 466 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 0.00010642380637914975, | |
| "loss": 1.4504, | |
| "step": 467 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 0.00010609479998104684, | |
| "loss": 1.4619, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 0.00010576572735016016, | |
| "loss": 1.4619, | |
| "step": 469 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 0.00010543659206255409, | |
| "loss": 1.4962, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 0.00010510739769497378, | |
| "loss": 1.4901, | |
| "step": 471 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 0.0001047781478248063, | |
| "loss": 1.4708, | |
| "step": 472 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 0.00010444884603004213, | |
| "loss": 1.4756, | |
| "step": 473 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 0.00010411949588923577, | |
| "loss": 1.3948, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 0.00010379010098146728, | |
| "loss": 1.5183, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 0.00010346066488630308, | |
| "loss": 1.4252, | |
| "step": 476 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 0.00010313119118375727, | |
| "loss": 1.4686, | |
| "step": 477 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 0.00010280168345425256, | |
| "loss": 1.5285, | |
| "step": 478 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 0.00010247214527858149, | |
| "loss": 1.4649, | |
| "step": 479 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 0.0001021425802378674, | |
| "loss": 1.4602, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 0.00010181299191352566, | |
| "loss": 1.5102, | |
| "step": 481 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 0.00010148338388722465, | |
| "loss": 1.4894, | |
| "step": 482 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 0.00010115375974084677, | |
| "loss": 1.501, | |
| "step": 483 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 0.00010082412305644964, | |
| "loss": 1.481, | |
| "step": 484 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 0.00010049447741622717, | |
| "loss": 1.4927, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 0.00010016482640247058, | |
| "loss": 1.512, | |
| "step": 486 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 9.983517359752945e-05, | |
| "loss": 1.4622, | |
| "step": 487 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 9.950552258377284e-05, | |
| "loss": 1.4956, | |
| "step": 488 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 9.917587694355037e-05, | |
| "loss": 1.493, | |
| "step": 489 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 9.884624025915328e-05, | |
| "loss": 1.4629, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 9.851661611277537e-05, | |
| "loss": 1.4531, | |
| "step": 491 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 9.818700808647435e-05, | |
| "loss": 1.4656, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 9.785741976213261e-05, | |
| "loss": 1.4982, | |
| "step": 493 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 9.752785472141854e-05, | |
| "loss": 1.5053, | |
| "step": 494 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 9.719831654574745e-05, | |
| "loss": 1.4619, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 9.686880881624275e-05, | |
| "loss": 1.486, | |
| "step": 496 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 9.653933511369696e-05, | |
| "loss": 1.4788, | |
| "step": 497 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 9.620989901853275e-05, | |
| "loss": 1.4663, | |
| "step": 498 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 9.588050411076424e-05, | |
| "loss": 1.5138, | |
| "step": 499 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 9.555115396995788e-05, | |
| "loss": 1.4427, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "eval_loss": 1.6187018156051636, | |
| "eval_runtime": 233.6591, | |
| "eval_samples_per_second": 16.361, | |
| "eval_steps_per_second": 4.091, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 9.522185217519371e-05, | |
| "loss": 1.4696, | |
| "step": 501 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 9.489260230502626e-05, | |
| "loss": 1.4052, | |
| "step": 502 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 9.45634079374459e-05, | |
| "loss": 1.4688, | |
| "step": 503 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 9.423427264983986e-05, | |
| "loss": 1.4266, | |
| "step": 504 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 9.390520001895321e-05, | |
| "loss": 1.4887, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 9.357619362085027e-05, | |
| "loss": 1.4992, | |
| "step": 506 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 9.32472570308755e-05, | |
| "loss": 1.4626, | |
| "step": 507 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 9.291839382361481e-05, | |
| "loss": 1.4984, | |
| "step": 508 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 9.258960757285664e-05, | |
| "loss": 1.3692, | |
| "step": 509 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 9.226090185155314e-05, | |
| "loss": 1.4325, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 9.19322802317813e-05, | |
| "loss": 1.5049, | |
| "step": 511 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 9.160374628470421e-05, | |
| "loss": 1.4589, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 9.127530358053218e-05, | |
| "loss": 1.4291, | |
| "step": 513 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 9.094695568848402e-05, | |
| "loss": 1.4474, | |
| "step": 514 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 9.061870617674817e-05, | |
| "loss": 1.513, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 9.029055861244397e-05, | |
| "loss": 1.4609, | |
| "step": 516 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 8.99625165615829e-05, | |
| "loss": 1.5144, | |
| "step": 517 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 8.963458358902985e-05, | |
| "loss": 1.4294, | |
| "step": 518 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 8.93067632584642e-05, | |
| "loss": 1.4516, | |
| "step": 519 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 8.897905913234143e-05, | |
| "loss": 1.4659, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 8.865147477185405e-05, | |
| "loss": 1.4787, | |
| "step": 521 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 8.832401373689319e-05, | |
| "loss": 1.4601, | |
| "step": 522 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 8.799667958600973e-05, | |
| "loss": 1.4955, | |
| "step": 523 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 8.766947587637573e-05, | |
| "loss": 1.4231, | |
| "step": 524 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 8.734240616374565e-05, | |
| "loss": 1.4952, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 8.701547400241788e-05, | |
| "loss": 1.4707, | |
| "step": 526 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 8.668868294519593e-05, | |
| "loss": 1.5023, | |
| "step": 527 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 8.636203654335002e-05, | |
| "loss": 1.4702, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 8.603553834657836e-05, | |
| "loss": 1.4399, | |
| "step": 529 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 8.570919190296855e-05, | |
| "loss": 1.5175, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 8.53830007589591e-05, | |
| "loss": 1.4715, | |
| "step": 531 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 8.505696845930096e-05, | |
| "loss": 1.5292, | |
| "step": 532 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 8.473109854701869e-05, | |
| "loss": 1.5287, | |
| "step": 533 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 8.440539456337235e-05, | |
| "loss": 1.4762, | |
| "step": 534 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 8.407986004781879e-05, | |
| "loss": 1.4536, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 8.375449853797322e-05, | |
| "loss": 1.5018, | |
| "step": 536 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 8.342931356957076e-05, | |
| "loss": 1.4723, | |
| "step": 537 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 8.310430867642812e-05, | |
| "loss": 1.4905, | |
| "step": 538 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 8.277948739040503e-05, | |
| "loss": 1.4651, | |
| "step": 539 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 8.245485324136597e-05, | |
| "loss": 1.4482, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 8.213040975714175e-05, | |
| "loss": 1.3977, | |
| "step": 541 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 8.180616046349129e-05, | |
| "loss": 1.5594, | |
| "step": 542 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 8.148210888406316e-05, | |
| "loss": 1.4995, | |
| "step": 543 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 8.115825854035737e-05, | |
| "loss": 1.5106, | |
| "step": 544 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 8.083461295168707e-05, | |
| "loss": 1.4219, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 8.051117563514036e-05, | |
| "loss": 1.4766, | |
| "step": 546 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 8.018795010554193e-05, | |
| "loss": 1.5241, | |
| "step": 547 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 7.986493987541502e-05, | |
| "loss": 1.4673, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 7.954214845494325e-05, | |
| "loss": 1.4236, | |
| "step": 549 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 7.921957935193232e-05, | |
| "loss": 1.4687, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "eval_loss": 1.617763876914978, | |
| "eval_runtime": 233.6334, | |
| "eval_samples_per_second": 16.363, | |
| "eval_steps_per_second": 4.092, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 7.889723607177202e-05, | |
| "loss": 1.4412, | |
| "step": 551 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 7.857512211739813e-05, | |
| "loss": 1.4464, | |
| "step": 552 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 7.825324098925427e-05, | |
| "loss": 1.4043, | |
| "step": 553 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 7.793159618525393e-05, | |
| "loss": 1.4384, | |
| "step": 554 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 7.761019120074245e-05, | |
| "loss": 1.4781, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 7.728902952845905e-05, | |
| "loss": 1.4311, | |
| "step": 556 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 7.696811465849883e-05, | |
| "loss": 1.4926, | |
| "step": 557 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 7.664745007827489e-05, | |
| "loss": 1.4739, | |
| "step": 558 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 7.632703927248033e-05, | |
| "loss": 1.509, | |
| "step": 559 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 7.60068857230506e-05, | |
| "loss": 1.4555, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 7.568699290912533e-05, | |
| "loss": 1.4588, | |
| "step": 561 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 7.536736430701088e-05, | |
| "loss": 1.4574, | |
| "step": 562 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 7.504800339014232e-05, | |
| "loss": 1.4805, | |
| "step": 563 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 7.472891362904577e-05, | |
| "loss": 1.5081, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 7.441009849130067e-05, | |
| "loss": 1.5081, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 7.409156144150213e-05, | |
| "loss": 1.4548, | |
| "step": 566 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 7.377330594122317e-05, | |
| "loss": 1.4478, | |
| "step": 567 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 7.34553354489773e-05, | |
| "loss": 1.5048, | |
| "step": 568 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 7.31376534201807e-05, | |
| "loss": 1.4889, | |
| "step": 569 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 7.282026330711489e-05, | |
| "loss": 1.5045, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 7.250316855888906e-05, | |
| "loss": 1.4352, | |
| "step": 571 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 7.218637262140268e-05, | |
| "loss": 1.4881, | |
| "step": 572 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 7.186987893730797e-05, | |
| "loss": 1.449, | |
| "step": 573 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 7.155369094597253e-05, | |
| "loss": 1.4146, | |
| "step": 574 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 7.1237812083442e-05, | |
| "loss": 1.4462, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 7.092224578240269e-05, | |
| "loss": 1.4509, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 7.060699547214427e-05, | |
| "loss": 1.4483, | |
| "step": 577 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 7.029206457852247e-05, | |
| "loss": 1.4348, | |
| "step": 578 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 6.997745652392191e-05, | |
| "loss": 1.4931, | |
| "step": 579 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 6.966317472721897e-05, | |
| "loss": 1.4132, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 6.934922260374437e-05, | |
| "loss": 1.3974, | |
| "step": 581 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 6.903560356524641e-05, | |
| "loss": 1.4326, | |
| "step": 582 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 6.872232101985363e-05, | |
| "loss": 1.4349, | |
| "step": 583 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 6.840937837203791e-05, | |
| "loss": 1.4528, | |
| "step": 584 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 6.809677902257742e-05, | |
| "loss": 1.4365, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 6.778452636851968e-05, | |
| "loss": 1.4702, | |
| "step": 586 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 6.747262380314463e-05, | |
| "loss": 1.458, | |
| "step": 587 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 6.71610747159277e-05, | |
| "loss": 1.5413, | |
| "step": 588 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 6.684988249250314e-05, | |
| "loss": 1.4205, | |
| "step": 589 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 6.653905051462708e-05, | |
| "loss": 1.4643, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 6.622858216014084e-05, | |
| "loss": 1.4071, | |
| "step": 591 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 6.591848080293418e-05, | |
| "loss": 1.4669, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 6.56087498129087e-05, | |
| "loss": 1.5062, | |
| "step": 593 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 6.52993925559412e-05, | |
| "loss": 1.4334, | |
| "step": 594 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 6.499041239384698e-05, | |
| "loss": 1.4696, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 6.468181268434354e-05, | |
| "loss": 1.4575, | |
| "step": 596 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 6.437359678101389e-05, | |
| "loss": 1.4432, | |
| "step": 597 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 6.406576803327022e-05, | |
| "loss": 1.5047, | |
| "step": 598 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 6.375832978631743e-05, | |
| "loss": 1.4297, | |
| "step": 599 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 6.345128538111685e-05, | |
| "loss": 1.461, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "eval_loss": 1.6174333095550537, | |
| "eval_runtime": 233.649, | |
| "eval_samples_per_second": 16.362, | |
| "eval_steps_per_second": 4.092, | |
| "step": 600 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 963, | |
| "num_train_epochs": 3, | |
| "save_steps": 50, | |
| "total_flos": 1.6825629945102336e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |