diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..4b032ecec52acb0cd07a956aa52edecfa5428318 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+checkpoint-1000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-1500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-2000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-2500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-2730/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+checkpoint-500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+tokenizer.json filter=lfs diff=lfs merge=lfs -text
diff --git a/checkpoint-1000/config.json b/checkpoint-1000/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..40aa0a10ec7958e160bf07f2feca405387c8b288
--- /dev/null
+++ b/checkpoint-1000/config.json
@@ -0,0 +1,33 @@
+{
+ "architectures": [
+ "XLMRobertaForSequenceClassification"
+ ],
+ "attention_probs_dropout_prob": 0.1,
+ "bos_token_id": 0,
+ "classifier_dropout": null,
+ "eos_token_id": 2,
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 1024,
+ "id2label": {
+ "0": "LABEL_0"
+ },
+ "initializer_range": 0.02,
+ "intermediate_size": 4096,
+ "label2id": {
+ "LABEL_0": 0
+ },
+ "layer_norm_eps": 1e-05,
+ "max_position_embeddings": 8194,
+ "model_type": "xlm-roberta",
+ "num_attention_heads": 16,
+ "num_hidden_layers": 24,
+ "output_past": true,
+ "pad_token_id": 1,
+ "position_embedding_type": "absolute",
+ "torch_dtype": "float32",
+ "transformers_version": "4.54.0",
+ "type_vocab_size": 1,
+ "use_cache": true,
+ "vocab_size": 250002
+}
diff --git a/checkpoint-1000/global_step1000/mp_rank_00_model_states.pt b/checkpoint-1000/global_step1000/mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2d50f7bdb379e19c08bae70bbee6bdebbb94fc20
--- /dev/null
+++ b/checkpoint-1000/global_step1000/mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f14db09da159b4cd36815f27e2f09d6eaa53aa40588981e8dacb70b2c10e59e
+size 2271151845
diff --git a/checkpoint-1000/global_step1000/zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-1000/global_step1000/zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..136dfde93e8ca193b08b03c4fb97cf68a702f151
--- /dev/null
+++ b/checkpoint-1000/global_step1000/zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7b98b60220dc2ddbc37e48471c1a04217a8c0ed4cdb1057f8ba9a8e32b5a5df5
+size 3406552447
diff --git a/checkpoint-1000/global_step1000/zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-1000/global_step1000/zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f7492e9871aa9dcad03dbdafb1f5bfa920b2fc60
--- /dev/null
+++ b/checkpoint-1000/global_step1000/zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b5caf5f9850cf22e3b1b79ed354f98e6aeee60d8eb8d21ecc6315fbd54e0cd7
+size 3406564543
diff --git a/checkpoint-1000/latest b/checkpoint-1000/latest
new file mode 100644
index 0000000000000000000000000000000000000000..e2d3435fb1acf8913e6bd6c51b01adfc69b11ac6
--- /dev/null
+++ b/checkpoint-1000/latest
@@ -0,0 +1 @@
+global_step1000
\ No newline at end of file
diff --git a/checkpoint-1000/model.safetensors b/checkpoint-1000/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f4c4df4194746c09a257d78959198a303a83bd51
--- /dev/null
+++ b/checkpoint-1000/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:649ad205646771327c2817a94b100633f34dff127771ba599f326980f0c6039a
+size 2271071852
diff --git a/checkpoint-1000/rng_state_0.pth b/checkpoint-1000/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4745ade3dcede17e4617c33e6d952506f33417b0
--- /dev/null
+++ b/checkpoint-1000/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:59d5cdcad128e01a7c86df43091ad6a2f3d222c14dc2875b4f35bfe74a5811f5
+size 14853
diff --git a/checkpoint-1000/rng_state_1.pth b/checkpoint-1000/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4bbde991b08974759c891242a0a1cf5eb5deb8c4
--- /dev/null
+++ b/checkpoint-1000/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:52c106c4c447acda6ed6035b1aa023b2a71ff517c72c01ff2f021ac3a5a89be9
+size 14853
diff --git a/checkpoint-1000/scheduler.pt b/checkpoint-1000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..775c2dae6961ccbf297be7152b3239dc01cd0320
--- /dev/null
+++ b/checkpoint-1000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:57e6b9b36e5c9d9a00f8622c9bcd30d9d210ab1aeb7694e4dd1b675fb1a8cfbc
+size 1465
diff --git a/checkpoint-1000/sentencepiece.bpe.model b/checkpoint-1000/sentencepiece.bpe.model
new file mode 100644
index 0000000000000000000000000000000000000000..7a3f40a75f870bc1f21700cd414dc2acc431583c
--- /dev/null
+++ b/checkpoint-1000/sentencepiece.bpe.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
+size 5069051
diff --git a/checkpoint-1000/special_tokens_map.json b/checkpoint-1000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1879d702821e753ffe4245048eee415d54a9385
--- /dev/null
+++ b/checkpoint-1000/special_tokens_map.json
@@ -0,0 +1,51 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-1000/tokenizer.json b/checkpoint-1000/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..322d084f75a19f4fec0fc0b5f351be9a3dfefa3e
--- /dev/null
+++ b/checkpoint-1000/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50ec628ce274af8429e5aa0c573e737ef2db1c2acd3b2dd51362a33c3a534f99
+size 17082999
diff --git a/checkpoint-1000/tokenizer_config.json b/checkpoint-1000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..95bd7c849ee6a47d5c92805af18d187239c1ba4a
--- /dev/null
+++ b/checkpoint-1000/tokenizer_config.json
@@ -0,0 +1,56 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "250001": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": true,
+ "cls_token": "",
+ "eos_token": "",
+ "extra_special_tokens": {},
+ "mask_token": "",
+ "model_max_length": 8192,
+ "pad_token": "",
+ "sep_token": "",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "XLMRobertaTokenizer",
+ "unk_token": ""
+}
diff --git a/checkpoint-1000/trainer_state.json b/checkpoint-1000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..3c1cf9bcec3c29a03f2468de72390d2934549f4a
--- /dev/null
+++ b/checkpoint-1000/trainer_state.json
@@ -0,0 +1,7034 @@
+{
+ "best_global_step": null,
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 3.663003663003663,
+ "eval_steps": 500,
+ "global_step": 1000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.003663003663003663,
+ "grad_norm": 33.24192428588867,
+ "learning_rate": 0.0,
+ "loss": 0.9555,
+ "step": 1
+ },
+ {
+ "epoch": 0.007326007326007326,
+ "grad_norm": 23.005327224731445,
+ "learning_rate": 2.1978021978021978e-07,
+ "loss": 0.7557,
+ "step": 2
+ },
+ {
+ "epoch": 0.01098901098901099,
+ "grad_norm": 12.516372680664062,
+ "learning_rate": 4.3956043956043957e-07,
+ "loss": 0.2322,
+ "step": 3
+ },
+ {
+ "epoch": 0.014652014652014652,
+ "grad_norm": 22.350322723388672,
+ "learning_rate": 6.593406593406594e-07,
+ "loss": 0.5263,
+ "step": 4
+ },
+ {
+ "epoch": 0.018315018315018316,
+ "grad_norm": 37.14425277709961,
+ "learning_rate": 8.791208791208791e-07,
+ "loss": 0.547,
+ "step": 5
+ },
+ {
+ "epoch": 0.02197802197802198,
+ "grad_norm": 27.73367691040039,
+ "learning_rate": 1.098901098901099e-06,
+ "loss": 0.5922,
+ "step": 6
+ },
+ {
+ "epoch": 0.02564102564102564,
+ "grad_norm": 28.463964462280273,
+ "learning_rate": 1.3186813186813187e-06,
+ "loss": 1.0195,
+ "step": 7
+ },
+ {
+ "epoch": 0.029304029304029304,
+ "grad_norm": 12.688858032226562,
+ "learning_rate": 1.5384615384615385e-06,
+ "loss": 0.1519,
+ "step": 8
+ },
+ {
+ "epoch": 0.03296703296703297,
+ "grad_norm": 24.222930908203125,
+ "learning_rate": 1.7582417582417583e-06,
+ "loss": 0.8008,
+ "step": 9
+ },
+ {
+ "epoch": 0.03663003663003663,
+ "grad_norm": 22.45709800720215,
+ "learning_rate": 1.9780219780219782e-06,
+ "loss": 1.1024,
+ "step": 10
+ },
+ {
+ "epoch": 0.040293040293040296,
+ "grad_norm": 23.01483917236328,
+ "learning_rate": 2.197802197802198e-06,
+ "loss": 0.3072,
+ "step": 11
+ },
+ {
+ "epoch": 0.04395604395604396,
+ "grad_norm": 24.276216506958008,
+ "learning_rate": 2.4175824175824177e-06,
+ "loss": 0.8937,
+ "step": 12
+ },
+ {
+ "epoch": 0.047619047619047616,
+ "grad_norm": 24.501638412475586,
+ "learning_rate": 2.6373626373626375e-06,
+ "loss": 0.3748,
+ "step": 13
+ },
+ {
+ "epoch": 0.05128205128205128,
+ "grad_norm": 11.965837478637695,
+ "learning_rate": 2.8571428571428573e-06,
+ "loss": 0.2221,
+ "step": 14
+ },
+ {
+ "epoch": 0.054945054945054944,
+ "grad_norm": 8.884313583374023,
+ "learning_rate": 3.076923076923077e-06,
+ "loss": 0.1682,
+ "step": 15
+ },
+ {
+ "epoch": 0.05860805860805861,
+ "grad_norm": 13.486218452453613,
+ "learning_rate": 3.2967032967032968e-06,
+ "loss": 0.3324,
+ "step": 16
+ },
+ {
+ "epoch": 0.06227106227106227,
+ "grad_norm": 29.47451400756836,
+ "learning_rate": 3.5164835164835165e-06,
+ "loss": 0.9247,
+ "step": 17
+ },
+ {
+ "epoch": 0.06593406593406594,
+ "grad_norm": 38.8739128112793,
+ "learning_rate": 3.7362637362637363e-06,
+ "loss": 1.3591,
+ "step": 18
+ },
+ {
+ "epoch": 0.0695970695970696,
+ "grad_norm": 24.181066513061523,
+ "learning_rate": 3.9560439560439565e-06,
+ "loss": 0.4257,
+ "step": 19
+ },
+ {
+ "epoch": 0.07326007326007326,
+ "grad_norm": 18.25806427001953,
+ "learning_rate": 4.175824175824176e-06,
+ "loss": 0.3534,
+ "step": 20
+ },
+ {
+ "epoch": 0.07692307692307693,
+ "grad_norm": 4.121458053588867,
+ "learning_rate": 4.395604395604396e-06,
+ "loss": 0.0459,
+ "step": 21
+ },
+ {
+ "epoch": 0.08058608058608059,
+ "grad_norm": 17.89643096923828,
+ "learning_rate": 4.615384615384616e-06,
+ "loss": 0.3707,
+ "step": 22
+ },
+ {
+ "epoch": 0.08424908424908426,
+ "grad_norm": 43.25539016723633,
+ "learning_rate": 4.8351648351648355e-06,
+ "loss": 1.139,
+ "step": 23
+ },
+ {
+ "epoch": 0.08791208791208792,
+ "grad_norm": 19.56612205505371,
+ "learning_rate": 5.054945054945056e-06,
+ "loss": 0.3819,
+ "step": 24
+ },
+ {
+ "epoch": 0.09157509157509157,
+ "grad_norm": 18.20578956604004,
+ "learning_rate": 5.274725274725275e-06,
+ "loss": 0.516,
+ "step": 25
+ },
+ {
+ "epoch": 0.09523809523809523,
+ "grad_norm": 23.16927146911621,
+ "learning_rate": 5.494505494505494e-06,
+ "loss": 0.7161,
+ "step": 26
+ },
+ {
+ "epoch": 0.0989010989010989,
+ "grad_norm": 10.449734687805176,
+ "learning_rate": 5.7142857142857145e-06,
+ "loss": 0.3049,
+ "step": 27
+ },
+ {
+ "epoch": 0.10256410256410256,
+ "grad_norm": 33.13974380493164,
+ "learning_rate": 5.934065934065934e-06,
+ "loss": 1.0178,
+ "step": 28
+ },
+ {
+ "epoch": 0.10622710622710622,
+ "grad_norm": 34.373470306396484,
+ "learning_rate": 6.153846153846154e-06,
+ "loss": 1.0162,
+ "step": 29
+ },
+ {
+ "epoch": 0.10989010989010989,
+ "grad_norm": 22.710988998413086,
+ "learning_rate": 6.373626373626373e-06,
+ "loss": 0.5866,
+ "step": 30
+ },
+ {
+ "epoch": 0.11355311355311355,
+ "grad_norm": 23.314502716064453,
+ "learning_rate": 6.5934065934065935e-06,
+ "loss": 0.6159,
+ "step": 31
+ },
+ {
+ "epoch": 0.11721611721611722,
+ "grad_norm": 23.481319427490234,
+ "learning_rate": 6.813186813186814e-06,
+ "loss": 0.5441,
+ "step": 32
+ },
+ {
+ "epoch": 0.12087912087912088,
+ "grad_norm": 35.16271209716797,
+ "learning_rate": 7.032967032967033e-06,
+ "loss": 0.9091,
+ "step": 33
+ },
+ {
+ "epoch": 0.12454212454212454,
+ "grad_norm": 32.2298698425293,
+ "learning_rate": 7.252747252747253e-06,
+ "loss": 0.5156,
+ "step": 34
+ },
+ {
+ "epoch": 0.1282051282051282,
+ "grad_norm": 36.708953857421875,
+ "learning_rate": 7.4725274725274726e-06,
+ "loss": 1.5839,
+ "step": 35
+ },
+ {
+ "epoch": 0.13186813186813187,
+ "grad_norm": 34.64887619018555,
+ "learning_rate": 7.692307692307692e-06,
+ "loss": 1.2861,
+ "step": 36
+ },
+ {
+ "epoch": 0.13553113553113552,
+ "grad_norm": 20.94220733642578,
+ "learning_rate": 7.912087912087913e-06,
+ "loss": 0.5027,
+ "step": 37
+ },
+ {
+ "epoch": 0.1391941391941392,
+ "grad_norm": 30.93832015991211,
+ "learning_rate": 8.131868131868132e-06,
+ "loss": 0.3584,
+ "step": 38
+ },
+ {
+ "epoch": 0.14285714285714285,
+ "grad_norm": 19.195362091064453,
+ "learning_rate": 8.351648351648352e-06,
+ "loss": 0.6912,
+ "step": 39
+ },
+ {
+ "epoch": 0.14652014652014653,
+ "grad_norm": 21.054162979125977,
+ "learning_rate": 8.571428571428571e-06,
+ "loss": 0.8027,
+ "step": 40
+ },
+ {
+ "epoch": 0.15018315018315018,
+ "grad_norm": 16.64535903930664,
+ "learning_rate": 8.791208791208792e-06,
+ "loss": 0.3004,
+ "step": 41
+ },
+ {
+ "epoch": 0.15384615384615385,
+ "grad_norm": 12.1064453125,
+ "learning_rate": 9.010989010989011e-06,
+ "loss": 0.2158,
+ "step": 42
+ },
+ {
+ "epoch": 0.1575091575091575,
+ "grad_norm": 16.20220947265625,
+ "learning_rate": 9.230769230769232e-06,
+ "loss": 0.4137,
+ "step": 43
+ },
+ {
+ "epoch": 0.16117216117216118,
+ "grad_norm": 25.698654174804688,
+ "learning_rate": 9.45054945054945e-06,
+ "loss": 0.7716,
+ "step": 44
+ },
+ {
+ "epoch": 0.16483516483516483,
+ "grad_norm": 7.480422019958496,
+ "learning_rate": 9.670329670329671e-06,
+ "loss": 0.1046,
+ "step": 45
+ },
+ {
+ "epoch": 0.1684981684981685,
+ "grad_norm": 38.25539016723633,
+ "learning_rate": 9.89010989010989e-06,
+ "loss": 1.3913,
+ "step": 46
+ },
+ {
+ "epoch": 0.17216117216117216,
+ "grad_norm": 24.113954544067383,
+ "learning_rate": 1.0109890109890111e-05,
+ "loss": 0.4632,
+ "step": 47
+ },
+ {
+ "epoch": 0.17582417582417584,
+ "grad_norm": 22.136140823364258,
+ "learning_rate": 1.032967032967033e-05,
+ "loss": 0.6634,
+ "step": 48
+ },
+ {
+ "epoch": 0.1794871794871795,
+ "grad_norm": 19.417444229125977,
+ "learning_rate": 1.054945054945055e-05,
+ "loss": 0.3991,
+ "step": 49
+ },
+ {
+ "epoch": 0.18315018315018314,
+ "grad_norm": 13.265430450439453,
+ "learning_rate": 1.076923076923077e-05,
+ "loss": 0.2613,
+ "step": 50
+ },
+ {
+ "epoch": 0.18681318681318682,
+ "grad_norm": 25.118703842163086,
+ "learning_rate": 1.0989010989010989e-05,
+ "loss": 0.9231,
+ "step": 51
+ },
+ {
+ "epoch": 0.19047619047619047,
+ "grad_norm": 34.06997299194336,
+ "learning_rate": 1.120879120879121e-05,
+ "loss": 1.5809,
+ "step": 52
+ },
+ {
+ "epoch": 0.19413919413919414,
+ "grad_norm": 40.32486343383789,
+ "learning_rate": 1.1428571428571429e-05,
+ "loss": 1.4601,
+ "step": 53
+ },
+ {
+ "epoch": 0.1978021978021978,
+ "grad_norm": 18.847017288208008,
+ "learning_rate": 1.1648351648351648e-05,
+ "loss": 0.2345,
+ "step": 54
+ },
+ {
+ "epoch": 0.20146520146520147,
+ "grad_norm": 37.98270034790039,
+ "learning_rate": 1.1868131868131868e-05,
+ "loss": 0.9792,
+ "step": 55
+ },
+ {
+ "epoch": 0.20512820512820512,
+ "grad_norm": 35.72782897949219,
+ "learning_rate": 1.2087912087912089e-05,
+ "loss": 1.1561,
+ "step": 56
+ },
+ {
+ "epoch": 0.2087912087912088,
+ "grad_norm": 18.577186584472656,
+ "learning_rate": 1.2307692307692308e-05,
+ "loss": 0.5577,
+ "step": 57
+ },
+ {
+ "epoch": 0.21245421245421245,
+ "grad_norm": 23.086456298828125,
+ "learning_rate": 1.2527472527472529e-05,
+ "loss": 0.5807,
+ "step": 58
+ },
+ {
+ "epoch": 0.21611721611721613,
+ "grad_norm": 20.053525924682617,
+ "learning_rate": 1.2747252747252747e-05,
+ "loss": 0.7024,
+ "step": 59
+ },
+ {
+ "epoch": 0.21978021978021978,
+ "grad_norm": 22.25934410095215,
+ "learning_rate": 1.2967032967032968e-05,
+ "loss": 1.1033,
+ "step": 60
+ },
+ {
+ "epoch": 0.22344322344322345,
+ "grad_norm": 17.981454849243164,
+ "learning_rate": 1.3186813186813187e-05,
+ "loss": 0.2774,
+ "step": 61
+ },
+ {
+ "epoch": 0.2271062271062271,
+ "grad_norm": 11.286524772644043,
+ "learning_rate": 1.3406593406593408e-05,
+ "loss": 0.1802,
+ "step": 62
+ },
+ {
+ "epoch": 0.23076923076923078,
+ "grad_norm": 25.822996139526367,
+ "learning_rate": 1.3626373626373627e-05,
+ "loss": 0.651,
+ "step": 63
+ },
+ {
+ "epoch": 0.23443223443223443,
+ "grad_norm": 16.457286834716797,
+ "learning_rate": 1.3846153846153847e-05,
+ "loss": 0.2946,
+ "step": 64
+ },
+ {
+ "epoch": 0.23809523809523808,
+ "grad_norm": 26.712799072265625,
+ "learning_rate": 1.4065934065934066e-05,
+ "loss": 0.7763,
+ "step": 65
+ },
+ {
+ "epoch": 0.24175824175824176,
+ "grad_norm": 21.4671630859375,
+ "learning_rate": 1.4285714285714285e-05,
+ "loss": 0.4132,
+ "step": 66
+ },
+ {
+ "epoch": 0.2454212454212454,
+ "grad_norm": 21.834922790527344,
+ "learning_rate": 1.4505494505494506e-05,
+ "loss": 0.6544,
+ "step": 67
+ },
+ {
+ "epoch": 0.2490842490842491,
+ "grad_norm": 15.396453857421875,
+ "learning_rate": 1.4725274725274726e-05,
+ "loss": 0.2426,
+ "step": 68
+ },
+ {
+ "epoch": 0.25274725274725274,
+ "grad_norm": 8.851480484008789,
+ "learning_rate": 1.4945054945054945e-05,
+ "loss": 0.125,
+ "step": 69
+ },
+ {
+ "epoch": 0.2564102564102564,
+ "grad_norm": 22.21581268310547,
+ "learning_rate": 1.5164835164835164e-05,
+ "loss": 0.2585,
+ "step": 70
+ },
+ {
+ "epoch": 0.2600732600732601,
+ "grad_norm": 23.589736938476562,
+ "learning_rate": 1.5384615384615384e-05,
+ "loss": 0.386,
+ "step": 71
+ },
+ {
+ "epoch": 0.26373626373626374,
+ "grad_norm": 51.82280731201172,
+ "learning_rate": 1.5604395604395605e-05,
+ "loss": 1.1802,
+ "step": 72
+ },
+ {
+ "epoch": 0.2673992673992674,
+ "grad_norm": 36.43033981323242,
+ "learning_rate": 1.5824175824175826e-05,
+ "loss": 0.5574,
+ "step": 73
+ },
+ {
+ "epoch": 0.27106227106227104,
+ "grad_norm": 46.151885986328125,
+ "learning_rate": 1.6043956043956043e-05,
+ "loss": 0.9113,
+ "step": 74
+ },
+ {
+ "epoch": 0.27472527472527475,
+ "grad_norm": 34.090213775634766,
+ "learning_rate": 1.6263736263736265e-05,
+ "loss": 1.2161,
+ "step": 75
+ },
+ {
+ "epoch": 0.2783882783882784,
+ "grad_norm": 15.469125747680664,
+ "learning_rate": 1.6483516483516486e-05,
+ "loss": 0.1833,
+ "step": 76
+ },
+ {
+ "epoch": 0.28205128205128205,
+ "grad_norm": 26.77261734008789,
+ "learning_rate": 1.6703296703296703e-05,
+ "loss": 0.4095,
+ "step": 77
+ },
+ {
+ "epoch": 0.2857142857142857,
+ "grad_norm": 8.46114444732666,
+ "learning_rate": 1.6923076923076924e-05,
+ "loss": 0.0724,
+ "step": 78
+ },
+ {
+ "epoch": 0.2893772893772894,
+ "grad_norm": 7.954617500305176,
+ "learning_rate": 1.7142857142857142e-05,
+ "loss": 0.057,
+ "step": 79
+ },
+ {
+ "epoch": 0.29304029304029305,
+ "grad_norm": 32.47618103027344,
+ "learning_rate": 1.7362637362637366e-05,
+ "loss": 0.8099,
+ "step": 80
+ },
+ {
+ "epoch": 0.2967032967032967,
+ "grad_norm": 34.506927490234375,
+ "learning_rate": 1.7582417582417584e-05,
+ "loss": 0.5867,
+ "step": 81
+ },
+ {
+ "epoch": 0.30036630036630035,
+ "grad_norm": 18.276355743408203,
+ "learning_rate": 1.78021978021978e-05,
+ "loss": 0.4387,
+ "step": 82
+ },
+ {
+ "epoch": 0.304029304029304,
+ "grad_norm": 35.61729431152344,
+ "learning_rate": 1.8021978021978023e-05,
+ "loss": 0.9711,
+ "step": 83
+ },
+ {
+ "epoch": 0.3076923076923077,
+ "grad_norm": 14.001388549804688,
+ "learning_rate": 1.824175824175824e-05,
+ "loss": 0.1431,
+ "step": 84
+ },
+ {
+ "epoch": 0.31135531135531136,
+ "grad_norm": 27.521188735961914,
+ "learning_rate": 1.8461538461538465e-05,
+ "loss": 0.3686,
+ "step": 85
+ },
+ {
+ "epoch": 0.315018315018315,
+ "grad_norm": 38.0133171081543,
+ "learning_rate": 1.8681318681318682e-05,
+ "loss": 1.3866,
+ "step": 86
+ },
+ {
+ "epoch": 0.31868131868131866,
+ "grad_norm": 30.895553588867188,
+ "learning_rate": 1.89010989010989e-05,
+ "loss": 0.6676,
+ "step": 87
+ },
+ {
+ "epoch": 0.32234432234432236,
+ "grad_norm": 26.165082931518555,
+ "learning_rate": 1.912087912087912e-05,
+ "loss": 0.4763,
+ "step": 88
+ },
+ {
+ "epoch": 0.326007326007326,
+ "grad_norm": 25.6451473236084,
+ "learning_rate": 1.9340659340659342e-05,
+ "loss": 0.6921,
+ "step": 89
+ },
+ {
+ "epoch": 0.32967032967032966,
+ "grad_norm": 31.52683448791504,
+ "learning_rate": 1.9560439560439563e-05,
+ "loss": 0.8449,
+ "step": 90
+ },
+ {
+ "epoch": 0.3333333333333333,
+ "grad_norm": 27.559072494506836,
+ "learning_rate": 1.978021978021978e-05,
+ "loss": 0.9726,
+ "step": 91
+ },
+ {
+ "epoch": 0.336996336996337,
+ "grad_norm": 38.23103713989258,
+ "learning_rate": 1.9999999999999998e-05,
+ "loss": 0.2568,
+ "step": 92
+ },
+ {
+ "epoch": 0.34065934065934067,
+ "grad_norm": 28.575313568115234,
+ "learning_rate": 2.0219780219780223e-05,
+ "loss": 0.7039,
+ "step": 93
+ },
+ {
+ "epoch": 0.3443223443223443,
+ "grad_norm": 31.54847526550293,
+ "learning_rate": 2.043956043956044e-05,
+ "loss": 0.835,
+ "step": 94
+ },
+ {
+ "epoch": 0.34798534798534797,
+ "grad_norm": 34.27505111694336,
+ "learning_rate": 2.065934065934066e-05,
+ "loss": 1.0304,
+ "step": 95
+ },
+ {
+ "epoch": 0.3516483516483517,
+ "grad_norm": 23.972553253173828,
+ "learning_rate": 2.087912087912088e-05,
+ "loss": 0.775,
+ "step": 96
+ },
+ {
+ "epoch": 0.3553113553113553,
+ "grad_norm": 18.46526527404785,
+ "learning_rate": 2.10989010989011e-05,
+ "loss": 0.2856,
+ "step": 97
+ },
+ {
+ "epoch": 0.358974358974359,
+ "grad_norm": 22.087251663208008,
+ "learning_rate": 2.131868131868132e-05,
+ "loss": 0.6849,
+ "step": 98
+ },
+ {
+ "epoch": 0.3626373626373626,
+ "grad_norm": 13.144533157348633,
+ "learning_rate": 2.153846153846154e-05,
+ "loss": 0.2766,
+ "step": 99
+ },
+ {
+ "epoch": 0.3663003663003663,
+ "grad_norm": 14.740280151367188,
+ "learning_rate": 2.175824175824176e-05,
+ "loss": 0.27,
+ "step": 100
+ },
+ {
+ "epoch": 0.36996336996337,
+ "grad_norm": 17.15272331237793,
+ "learning_rate": 2.1978021978021977e-05,
+ "loss": 0.446,
+ "step": 101
+ },
+ {
+ "epoch": 0.37362637362637363,
+ "grad_norm": 45.865509033203125,
+ "learning_rate": 2.21978021978022e-05,
+ "loss": 2.4265,
+ "step": 102
+ },
+ {
+ "epoch": 0.3772893772893773,
+ "grad_norm": 22.298274993896484,
+ "learning_rate": 2.241758241758242e-05,
+ "loss": 1.5021,
+ "step": 103
+ },
+ {
+ "epoch": 0.38095238095238093,
+ "grad_norm": 20.314172744750977,
+ "learning_rate": 2.2637362637362637e-05,
+ "loss": 0.508,
+ "step": 104
+ },
+ {
+ "epoch": 0.38461538461538464,
+ "grad_norm": 11.217910766601562,
+ "learning_rate": 2.2857142857142858e-05,
+ "loss": 0.2282,
+ "step": 105
+ },
+ {
+ "epoch": 0.3882783882783883,
+ "grad_norm": 21.36184310913086,
+ "learning_rate": 2.307692307692308e-05,
+ "loss": 0.4684,
+ "step": 106
+ },
+ {
+ "epoch": 0.39194139194139194,
+ "grad_norm": 12.759861946105957,
+ "learning_rate": 2.3296703296703297e-05,
+ "loss": 0.3076,
+ "step": 107
+ },
+ {
+ "epoch": 0.3956043956043956,
+ "grad_norm": 24.42287254333496,
+ "learning_rate": 2.3516483516483518e-05,
+ "loss": 1.3607,
+ "step": 108
+ },
+ {
+ "epoch": 0.3992673992673993,
+ "grad_norm": 13.014902114868164,
+ "learning_rate": 2.3736263736263735e-05,
+ "loss": 0.4984,
+ "step": 109
+ },
+ {
+ "epoch": 0.40293040293040294,
+ "grad_norm": 12.8681640625,
+ "learning_rate": 2.395604395604396e-05,
+ "loss": 0.4529,
+ "step": 110
+ },
+ {
+ "epoch": 0.4065934065934066,
+ "grad_norm": 21.19939422607422,
+ "learning_rate": 2.4175824175824177e-05,
+ "loss": 1.0197,
+ "step": 111
+ },
+ {
+ "epoch": 0.41025641025641024,
+ "grad_norm": 20.60430145263672,
+ "learning_rate": 2.4395604395604395e-05,
+ "loss": 0.5367,
+ "step": 112
+ },
+ {
+ "epoch": 0.4139194139194139,
+ "grad_norm": 34.49782943725586,
+ "learning_rate": 2.4615384615384616e-05,
+ "loss": 1.9045,
+ "step": 113
+ },
+ {
+ "epoch": 0.4175824175824176,
+ "grad_norm": 28.380966186523438,
+ "learning_rate": 2.4835164835164834e-05,
+ "loss": 0.9019,
+ "step": 114
+ },
+ {
+ "epoch": 0.42124542124542125,
+ "grad_norm": 18.234045028686523,
+ "learning_rate": 2.5054945054945058e-05,
+ "loss": 0.5529,
+ "step": 115
+ },
+ {
+ "epoch": 0.4249084249084249,
+ "grad_norm": 18.759784698486328,
+ "learning_rate": 2.5274725274725276e-05,
+ "loss": 0.85,
+ "step": 116
+ },
+ {
+ "epoch": 0.42857142857142855,
+ "grad_norm": 15.784387588500977,
+ "learning_rate": 2.5494505494505493e-05,
+ "loss": 0.429,
+ "step": 117
+ },
+ {
+ "epoch": 0.43223443223443225,
+ "grad_norm": 23.149036407470703,
+ "learning_rate": 2.5714285714285714e-05,
+ "loss": 0.8784,
+ "step": 118
+ },
+ {
+ "epoch": 0.4358974358974359,
+ "grad_norm": 18.77080535888672,
+ "learning_rate": 2.5934065934065935e-05,
+ "loss": 0.537,
+ "step": 119
+ },
+ {
+ "epoch": 0.43956043956043955,
+ "grad_norm": 24.311708450317383,
+ "learning_rate": 2.6153846153846157e-05,
+ "loss": 0.74,
+ "step": 120
+ },
+ {
+ "epoch": 0.4432234432234432,
+ "grad_norm": 15.09874439239502,
+ "learning_rate": 2.6373626373626374e-05,
+ "loss": 0.2978,
+ "step": 121
+ },
+ {
+ "epoch": 0.4468864468864469,
+ "grad_norm": 19.65829086303711,
+ "learning_rate": 2.6593406593406592e-05,
+ "loss": 0.8287,
+ "step": 122
+ },
+ {
+ "epoch": 0.45054945054945056,
+ "grad_norm": 21.237165451049805,
+ "learning_rate": 2.6813186813186816e-05,
+ "loss": 1.1967,
+ "step": 123
+ },
+ {
+ "epoch": 0.4542124542124542,
+ "grad_norm": 25.737913131713867,
+ "learning_rate": 2.7032967032967034e-05,
+ "loss": 0.9414,
+ "step": 124
+ },
+ {
+ "epoch": 0.45787545787545786,
+ "grad_norm": 22.84954833984375,
+ "learning_rate": 2.7252747252747255e-05,
+ "loss": 0.398,
+ "step": 125
+ },
+ {
+ "epoch": 0.46153846153846156,
+ "grad_norm": 35.505027770996094,
+ "learning_rate": 2.7472527472527473e-05,
+ "loss": 1.0497,
+ "step": 126
+ },
+ {
+ "epoch": 0.4652014652014652,
+ "grad_norm": 6.610748291015625,
+ "learning_rate": 2.7692307692307694e-05,
+ "loss": 0.0491,
+ "step": 127
+ },
+ {
+ "epoch": 0.46886446886446886,
+ "grad_norm": 33.34388732910156,
+ "learning_rate": 2.7912087912087915e-05,
+ "loss": 0.8991,
+ "step": 128
+ },
+ {
+ "epoch": 0.4725274725274725,
+ "grad_norm": 17.098581314086914,
+ "learning_rate": 2.8131868131868132e-05,
+ "loss": 0.3217,
+ "step": 129
+ },
+ {
+ "epoch": 0.47619047619047616,
+ "grad_norm": 11.438309669494629,
+ "learning_rate": 2.8351648351648353e-05,
+ "loss": 0.4301,
+ "step": 130
+ },
+ {
+ "epoch": 0.47985347985347987,
+ "grad_norm": 25.803213119506836,
+ "learning_rate": 2.857142857142857e-05,
+ "loss": 0.8937,
+ "step": 131
+ },
+ {
+ "epoch": 0.4835164835164835,
+ "grad_norm": 16.61037826538086,
+ "learning_rate": 2.8791208791208792e-05,
+ "loss": 0.3603,
+ "step": 132
+ },
+ {
+ "epoch": 0.48717948717948717,
+ "grad_norm": 21.329975128173828,
+ "learning_rate": 2.9010989010989013e-05,
+ "loss": 0.4332,
+ "step": 133
+ },
+ {
+ "epoch": 0.4908424908424908,
+ "grad_norm": 24.83706283569336,
+ "learning_rate": 2.923076923076923e-05,
+ "loss": 0.3967,
+ "step": 134
+ },
+ {
+ "epoch": 0.4945054945054945,
+ "grad_norm": 8.3758544921875,
+ "learning_rate": 2.945054945054945e-05,
+ "loss": 0.1197,
+ "step": 135
+ },
+ {
+ "epoch": 0.4981684981684982,
+ "grad_norm": 31.096702575683594,
+ "learning_rate": 2.9670329670329673e-05,
+ "loss": 2.2867,
+ "step": 136
+ },
+ {
+ "epoch": 0.5018315018315018,
+ "grad_norm": 17.094390869140625,
+ "learning_rate": 2.989010989010989e-05,
+ "loss": 0.3064,
+ "step": 137
+ },
+ {
+ "epoch": 0.5054945054945055,
+ "grad_norm": 23.401243209838867,
+ "learning_rate": 3.010989010989011e-05,
+ "loss": 0.9779,
+ "step": 138
+ },
+ {
+ "epoch": 0.5091575091575091,
+ "grad_norm": 19.55811309814453,
+ "learning_rate": 3.032967032967033e-05,
+ "loss": 0.5665,
+ "step": 139
+ },
+ {
+ "epoch": 0.5128205128205128,
+ "grad_norm": 18.668622970581055,
+ "learning_rate": 3.0549450549450547e-05,
+ "loss": 0.7068,
+ "step": 140
+ },
+ {
+ "epoch": 0.5164835164835165,
+ "grad_norm": 9.49342155456543,
+ "learning_rate": 3.076923076923077e-05,
+ "loss": 0.2228,
+ "step": 141
+ },
+ {
+ "epoch": 0.5201465201465202,
+ "grad_norm": 17.131006240844727,
+ "learning_rate": 3.0989010989010995e-05,
+ "loss": 0.8947,
+ "step": 142
+ },
+ {
+ "epoch": 0.5238095238095238,
+ "grad_norm": 14.087484359741211,
+ "learning_rate": 3.120879120879121e-05,
+ "loss": 0.4394,
+ "step": 143
+ },
+ {
+ "epoch": 0.5274725274725275,
+ "grad_norm": 14.246976852416992,
+ "learning_rate": 3.142857142857143e-05,
+ "loss": 0.7608,
+ "step": 144
+ },
+ {
+ "epoch": 0.5311355311355311,
+ "grad_norm": 27.454071044921875,
+ "learning_rate": 3.164835164835165e-05,
+ "loss": 1.8982,
+ "step": 145
+ },
+ {
+ "epoch": 0.5347985347985348,
+ "grad_norm": 8.580923080444336,
+ "learning_rate": 3.1868131868131866e-05,
+ "loss": 0.2199,
+ "step": 146
+ },
+ {
+ "epoch": 0.5384615384615384,
+ "grad_norm": 12.200552940368652,
+ "learning_rate": 3.208791208791209e-05,
+ "loss": 0.4007,
+ "step": 147
+ },
+ {
+ "epoch": 0.5421245421245421,
+ "grad_norm": 11.350752830505371,
+ "learning_rate": 3.230769230769231e-05,
+ "loss": 0.5359,
+ "step": 148
+ },
+ {
+ "epoch": 0.5457875457875457,
+ "grad_norm": 21.45020866394043,
+ "learning_rate": 3.252747252747253e-05,
+ "loss": 1.4639,
+ "step": 149
+ },
+ {
+ "epoch": 0.5494505494505495,
+ "grad_norm": 29.84933090209961,
+ "learning_rate": 3.274725274725274e-05,
+ "loss": 0.8764,
+ "step": 150
+ },
+ {
+ "epoch": 0.5531135531135531,
+ "grad_norm": 14.899048805236816,
+ "learning_rate": 3.296703296703297e-05,
+ "loss": 0.3817,
+ "step": 151
+ },
+ {
+ "epoch": 0.5567765567765568,
+ "grad_norm": 14.95295238494873,
+ "learning_rate": 3.318681318681319e-05,
+ "loss": 1.0153,
+ "step": 152
+ },
+ {
+ "epoch": 0.5604395604395604,
+ "grad_norm": 13.904314994812012,
+ "learning_rate": 3.3406593406593406e-05,
+ "loss": 0.9891,
+ "step": 153
+ },
+ {
+ "epoch": 0.5641025641025641,
+ "grad_norm": 14.465546607971191,
+ "learning_rate": 3.362637362637363e-05,
+ "loss": 0.4935,
+ "step": 154
+ },
+ {
+ "epoch": 0.5677655677655677,
+ "grad_norm": 15.22211742401123,
+ "learning_rate": 3.384615384615385e-05,
+ "loss": 0.4973,
+ "step": 155
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 19.977941513061523,
+ "learning_rate": 3.406593406593406e-05,
+ "loss": 0.5768,
+ "step": 156
+ },
+ {
+ "epoch": 0.575091575091575,
+ "grad_norm": 21.778785705566406,
+ "learning_rate": 3.4285714285714284e-05,
+ "loss": 0.541,
+ "step": 157
+ },
+ {
+ "epoch": 0.5787545787545788,
+ "grad_norm": 7.957052707672119,
+ "learning_rate": 3.4505494505494505e-05,
+ "loss": 0.1676,
+ "step": 158
+ },
+ {
+ "epoch": 0.5824175824175825,
+ "grad_norm": 10.105476379394531,
+ "learning_rate": 3.472527472527473e-05,
+ "loss": 0.14,
+ "step": 159
+ },
+ {
+ "epoch": 0.5860805860805861,
+ "grad_norm": 13.895249366760254,
+ "learning_rate": 3.494505494505495e-05,
+ "loss": 0.2135,
+ "step": 160
+ },
+ {
+ "epoch": 0.5897435897435898,
+ "grad_norm": 15.14104175567627,
+ "learning_rate": 3.516483516483517e-05,
+ "loss": 0.2299,
+ "step": 161
+ },
+ {
+ "epoch": 0.5934065934065934,
+ "grad_norm": 27.537504196166992,
+ "learning_rate": 3.538461538461539e-05,
+ "loss": 0.4517,
+ "step": 162
+ },
+ {
+ "epoch": 0.5970695970695971,
+ "grad_norm": 22.290597915649414,
+ "learning_rate": 3.56043956043956e-05,
+ "loss": 0.2144,
+ "step": 163
+ },
+ {
+ "epoch": 0.6007326007326007,
+ "grad_norm": 24.176603317260742,
+ "learning_rate": 3.5824175824175824e-05,
+ "loss": 0.4184,
+ "step": 164
+ },
+ {
+ "epoch": 0.6043956043956044,
+ "grad_norm": 43.716552734375,
+ "learning_rate": 3.6043956043956045e-05,
+ "loss": 0.7672,
+ "step": 165
+ },
+ {
+ "epoch": 0.608058608058608,
+ "grad_norm": 5.516793727874756,
+ "learning_rate": 3.626373626373626e-05,
+ "loss": 0.0332,
+ "step": 166
+ },
+ {
+ "epoch": 0.6117216117216118,
+ "grad_norm": 13.202600479125977,
+ "learning_rate": 3.648351648351648e-05,
+ "loss": 0.1388,
+ "step": 167
+ },
+ {
+ "epoch": 0.6153846153846154,
+ "grad_norm": 8.389626502990723,
+ "learning_rate": 3.670329670329671e-05,
+ "loss": 0.0284,
+ "step": 168
+ },
+ {
+ "epoch": 0.6190476190476191,
+ "grad_norm": 11.500190734863281,
+ "learning_rate": 3.692307692307693e-05,
+ "loss": 0.1778,
+ "step": 169
+ },
+ {
+ "epoch": 0.6227106227106227,
+ "grad_norm": 49.76407241821289,
+ "learning_rate": 3.7142857142857143e-05,
+ "loss": 0.8075,
+ "step": 170
+ },
+ {
+ "epoch": 0.6263736263736264,
+ "grad_norm": 49.758705139160156,
+ "learning_rate": 3.7362637362637365e-05,
+ "loss": 1.3106,
+ "step": 171
+ },
+ {
+ "epoch": 0.63003663003663,
+ "grad_norm": 7.655544281005859,
+ "learning_rate": 3.7582417582417586e-05,
+ "loss": 0.1362,
+ "step": 172
+ },
+ {
+ "epoch": 0.6336996336996337,
+ "grad_norm": 29.778133392333984,
+ "learning_rate": 3.78021978021978e-05,
+ "loss": 0.2411,
+ "step": 173
+ },
+ {
+ "epoch": 0.6373626373626373,
+ "grad_norm": 23.79543113708496,
+ "learning_rate": 3.802197802197802e-05,
+ "loss": 0.5665,
+ "step": 174
+ },
+ {
+ "epoch": 0.6410256410256411,
+ "grad_norm": 25.333166122436523,
+ "learning_rate": 3.824175824175824e-05,
+ "loss": 0.5821,
+ "step": 175
+ },
+ {
+ "epoch": 0.6446886446886447,
+ "grad_norm": 38.367759704589844,
+ "learning_rate": 3.846153846153846e-05,
+ "loss": 1.1098,
+ "step": 176
+ },
+ {
+ "epoch": 0.6483516483516484,
+ "grad_norm": 31.53361701965332,
+ "learning_rate": 3.8681318681318684e-05,
+ "loss": 1.5399,
+ "step": 177
+ },
+ {
+ "epoch": 0.652014652014652,
+ "grad_norm": 8.453901290893555,
+ "learning_rate": 3.8901098901098905e-05,
+ "loss": 0.1327,
+ "step": 178
+ },
+ {
+ "epoch": 0.6556776556776557,
+ "grad_norm": 32.465980529785156,
+ "learning_rate": 3.9120879120879126e-05,
+ "loss": 0.8133,
+ "step": 179
+ },
+ {
+ "epoch": 0.6593406593406593,
+ "grad_norm": 21.503114700317383,
+ "learning_rate": 3.934065934065934e-05,
+ "loss": 0.2472,
+ "step": 180
+ },
+ {
+ "epoch": 0.663003663003663,
+ "grad_norm": 28.240659713745117,
+ "learning_rate": 3.956043956043956e-05,
+ "loss": 0.4718,
+ "step": 181
+ },
+ {
+ "epoch": 0.6666666666666666,
+ "grad_norm": 6.919331073760986,
+ "learning_rate": 3.978021978021978e-05,
+ "loss": 0.0947,
+ "step": 182
+ },
+ {
+ "epoch": 0.6703296703296703,
+ "grad_norm": 20.96783447265625,
+ "learning_rate": 3.9999999999999996e-05,
+ "loss": 1.1602,
+ "step": 183
+ },
+ {
+ "epoch": 0.673992673992674,
+ "grad_norm": 17.967914581298828,
+ "learning_rate": 4.021978021978022e-05,
+ "loss": 0.3684,
+ "step": 184
+ },
+ {
+ "epoch": 0.6776556776556777,
+ "grad_norm": 29.837678909301758,
+ "learning_rate": 4.0439560439560445e-05,
+ "loss": 0.5452,
+ "step": 185
+ },
+ {
+ "epoch": 0.6813186813186813,
+ "grad_norm": 37.0803108215332,
+ "learning_rate": 4.065934065934066e-05,
+ "loss": 0.5983,
+ "step": 186
+ },
+ {
+ "epoch": 0.684981684981685,
+ "grad_norm": 23.339448928833008,
+ "learning_rate": 4.087912087912088e-05,
+ "loss": 0.6255,
+ "step": 187
+ },
+ {
+ "epoch": 0.6886446886446886,
+ "grad_norm": 13.779767036437988,
+ "learning_rate": 4.10989010989011e-05,
+ "loss": 0.3705,
+ "step": 188
+ },
+ {
+ "epoch": 0.6923076923076923,
+ "grad_norm": 15.792436599731445,
+ "learning_rate": 4.131868131868132e-05,
+ "loss": 0.4128,
+ "step": 189
+ },
+ {
+ "epoch": 0.6959706959706959,
+ "grad_norm": 14.106623649597168,
+ "learning_rate": 4.153846153846154e-05,
+ "loss": 0.2914,
+ "step": 190
+ },
+ {
+ "epoch": 0.6996336996336996,
+ "grad_norm": 34.428951263427734,
+ "learning_rate": 4.175824175824176e-05,
+ "loss": 1.2232,
+ "step": 191
+ },
+ {
+ "epoch": 0.7032967032967034,
+ "grad_norm": 15.847033500671387,
+ "learning_rate": 4.197802197802198e-05,
+ "loss": 0.4129,
+ "step": 192
+ },
+ {
+ "epoch": 0.706959706959707,
+ "grad_norm": 17.834794998168945,
+ "learning_rate": 4.21978021978022e-05,
+ "loss": 0.4158,
+ "step": 193
+ },
+ {
+ "epoch": 0.7106227106227107,
+ "grad_norm": 29.807823181152344,
+ "learning_rate": 4.241758241758242e-05,
+ "loss": 0.9741,
+ "step": 194
+ },
+ {
+ "epoch": 0.7142857142857143,
+ "grad_norm": 15.9482421875,
+ "learning_rate": 4.263736263736264e-05,
+ "loss": 0.1953,
+ "step": 195
+ },
+ {
+ "epoch": 0.717948717948718,
+ "grad_norm": 37.89487075805664,
+ "learning_rate": 4.2857142857142856e-05,
+ "loss": 1.1018,
+ "step": 196
+ },
+ {
+ "epoch": 0.7216117216117216,
+ "grad_norm": 24.060779571533203,
+ "learning_rate": 4.307692307692308e-05,
+ "loss": 0.4774,
+ "step": 197
+ },
+ {
+ "epoch": 0.7252747252747253,
+ "grad_norm": 18.701725006103516,
+ "learning_rate": 4.32967032967033e-05,
+ "loss": 0.2641,
+ "step": 198
+ },
+ {
+ "epoch": 0.7289377289377289,
+ "grad_norm": 32.18348693847656,
+ "learning_rate": 4.351648351648352e-05,
+ "loss": 0.6958,
+ "step": 199
+ },
+ {
+ "epoch": 0.7326007326007326,
+ "grad_norm": 16.504337310791016,
+ "learning_rate": 4.3736263736263734e-05,
+ "loss": 0.1933,
+ "step": 200
+ },
+ {
+ "epoch": 0.7362637362637363,
+ "grad_norm": 34.5928840637207,
+ "learning_rate": 4.3956043956043955e-05,
+ "loss": 0.3712,
+ "step": 201
+ },
+ {
+ "epoch": 0.73992673992674,
+ "grad_norm": 47.998512268066406,
+ "learning_rate": 4.417582417582418e-05,
+ "loss": 1.4578,
+ "step": 202
+ },
+ {
+ "epoch": 0.7435897435897436,
+ "grad_norm": 29.871829986572266,
+ "learning_rate": 4.43956043956044e-05,
+ "loss": 0.7628,
+ "step": 203
+ },
+ {
+ "epoch": 0.7472527472527473,
+ "grad_norm": 53.70481491088867,
+ "learning_rate": 4.461538461538462e-05,
+ "loss": 1.4017,
+ "step": 204
+ },
+ {
+ "epoch": 0.7509157509157509,
+ "grad_norm": 58.087646484375,
+ "learning_rate": 4.483516483516484e-05,
+ "loss": 1.3168,
+ "step": 205
+ },
+ {
+ "epoch": 0.7545787545787546,
+ "grad_norm": 44.62531280517578,
+ "learning_rate": 4.505494505494505e-05,
+ "loss": 0.8959,
+ "step": 206
+ },
+ {
+ "epoch": 0.7582417582417582,
+ "grad_norm": 18.427953720092773,
+ "learning_rate": 4.5274725274725274e-05,
+ "loss": 0.4202,
+ "step": 207
+ },
+ {
+ "epoch": 0.7619047619047619,
+ "grad_norm": 32.799434661865234,
+ "learning_rate": 4.5494505494505495e-05,
+ "loss": 0.5432,
+ "step": 208
+ },
+ {
+ "epoch": 0.7655677655677655,
+ "grad_norm": 22.136354446411133,
+ "learning_rate": 4.5714285714285716e-05,
+ "loss": 1.0474,
+ "step": 209
+ },
+ {
+ "epoch": 0.7692307692307693,
+ "grad_norm": 14.09807014465332,
+ "learning_rate": 4.593406593406593e-05,
+ "loss": 0.4048,
+ "step": 210
+ },
+ {
+ "epoch": 0.7728937728937729,
+ "grad_norm": 16.818132400512695,
+ "learning_rate": 4.615384615384616e-05,
+ "loss": 0.4772,
+ "step": 211
+ },
+ {
+ "epoch": 0.7765567765567766,
+ "grad_norm": 36.87644577026367,
+ "learning_rate": 4.637362637362638e-05,
+ "loss": 1.0203,
+ "step": 212
+ },
+ {
+ "epoch": 0.7802197802197802,
+ "grad_norm": 23.279033660888672,
+ "learning_rate": 4.6593406593406593e-05,
+ "loss": 0.8223,
+ "step": 213
+ },
+ {
+ "epoch": 0.7838827838827839,
+ "grad_norm": 21.23172378540039,
+ "learning_rate": 4.6813186813186814e-05,
+ "loss": 0.6838,
+ "step": 214
+ },
+ {
+ "epoch": 0.7875457875457875,
+ "grad_norm": 15.129582405090332,
+ "learning_rate": 4.7032967032967035e-05,
+ "loss": 0.3939,
+ "step": 215
+ },
+ {
+ "epoch": 0.7912087912087912,
+ "grad_norm": 38.20903778076172,
+ "learning_rate": 4.725274725274725e-05,
+ "loss": 0.4395,
+ "step": 216
+ },
+ {
+ "epoch": 0.7948717948717948,
+ "grad_norm": 23.428571701049805,
+ "learning_rate": 4.747252747252747e-05,
+ "loss": 0.6657,
+ "step": 217
+ },
+ {
+ "epoch": 0.7985347985347986,
+ "grad_norm": 15.892741203308105,
+ "learning_rate": 4.769230769230769e-05,
+ "loss": 0.3867,
+ "step": 218
+ },
+ {
+ "epoch": 0.8021978021978022,
+ "grad_norm": 44.7977180480957,
+ "learning_rate": 4.791208791208792e-05,
+ "loss": 1.4335,
+ "step": 219
+ },
+ {
+ "epoch": 0.8058608058608059,
+ "grad_norm": 18.13700294494629,
+ "learning_rate": 4.8131868131868134e-05,
+ "loss": 0.3965,
+ "step": 220
+ },
+ {
+ "epoch": 0.8095238095238095,
+ "grad_norm": 23.00497817993164,
+ "learning_rate": 4.8351648351648355e-05,
+ "loss": 1.1319,
+ "step": 221
+ },
+ {
+ "epoch": 0.8131868131868132,
+ "grad_norm": 27.63648796081543,
+ "learning_rate": 4.8571428571428576e-05,
+ "loss": 0.7782,
+ "step": 222
+ },
+ {
+ "epoch": 0.8168498168498168,
+ "grad_norm": 23.91630744934082,
+ "learning_rate": 4.879120879120879e-05,
+ "loss": 0.7277,
+ "step": 223
+ },
+ {
+ "epoch": 0.8205128205128205,
+ "grad_norm": 27.157682418823242,
+ "learning_rate": 4.901098901098901e-05,
+ "loss": 0.8309,
+ "step": 224
+ },
+ {
+ "epoch": 0.8241758241758241,
+ "grad_norm": 20.686105728149414,
+ "learning_rate": 4.923076923076923e-05,
+ "loss": 0.4645,
+ "step": 225
+ },
+ {
+ "epoch": 0.8278388278388278,
+ "grad_norm": 18.44706916809082,
+ "learning_rate": 4.9450549450549446e-05,
+ "loss": 0.6298,
+ "step": 226
+ },
+ {
+ "epoch": 0.8315018315018315,
+ "grad_norm": 34.66194152832031,
+ "learning_rate": 4.967032967032967e-05,
+ "loss": 1.3282,
+ "step": 227
+ },
+ {
+ "epoch": 0.8351648351648352,
+ "grad_norm": 26.68456268310547,
+ "learning_rate": 4.9890109890109895e-05,
+ "loss": 0.8652,
+ "step": 228
+ },
+ {
+ "epoch": 0.8388278388278388,
+ "grad_norm": 18.36819839477539,
+ "learning_rate": 5.0109890109890116e-05,
+ "loss": 0.425,
+ "step": 229
+ },
+ {
+ "epoch": 0.8424908424908425,
+ "grad_norm": 10.212838172912598,
+ "learning_rate": 5.032967032967033e-05,
+ "loss": 0.2183,
+ "step": 230
+ },
+ {
+ "epoch": 0.8461538461538461,
+ "grad_norm": 28.40265464782715,
+ "learning_rate": 5.054945054945055e-05,
+ "loss": 1.6894,
+ "step": 231
+ },
+ {
+ "epoch": 0.8498168498168498,
+ "grad_norm": 48.70882797241211,
+ "learning_rate": 5.076923076923077e-05,
+ "loss": 0.8564,
+ "step": 232
+ },
+ {
+ "epoch": 0.8534798534798534,
+ "grad_norm": 38.576541900634766,
+ "learning_rate": 5.098901098901099e-05,
+ "loss": 0.8013,
+ "step": 233
+ },
+ {
+ "epoch": 0.8571428571428571,
+ "grad_norm": 20.17264747619629,
+ "learning_rate": 5.120879120879121e-05,
+ "loss": 0.4553,
+ "step": 234
+ },
+ {
+ "epoch": 0.8608058608058609,
+ "grad_norm": 33.383182525634766,
+ "learning_rate": 5.142857142857143e-05,
+ "loss": 0.9591,
+ "step": 235
+ },
+ {
+ "epoch": 0.8644688644688645,
+ "grad_norm": 22.734106063842773,
+ "learning_rate": 5.164835164835165e-05,
+ "loss": 0.589,
+ "step": 236
+ },
+ {
+ "epoch": 0.8681318681318682,
+ "grad_norm": 19.77442741394043,
+ "learning_rate": 5.186813186813187e-05,
+ "loss": 0.7066,
+ "step": 237
+ },
+ {
+ "epoch": 0.8717948717948718,
+ "grad_norm": 32.36431884765625,
+ "learning_rate": 5.208791208791209e-05,
+ "loss": 0.8878,
+ "step": 238
+ },
+ {
+ "epoch": 0.8754578754578755,
+ "grad_norm": 37.60574722290039,
+ "learning_rate": 5.230769230769231e-05,
+ "loss": 1.0034,
+ "step": 239
+ },
+ {
+ "epoch": 0.8791208791208791,
+ "grad_norm": 28.051666259765625,
+ "learning_rate": 5.252747252747253e-05,
+ "loss": 0.9695,
+ "step": 240
+ },
+ {
+ "epoch": 0.8827838827838828,
+ "grad_norm": 31.55886459350586,
+ "learning_rate": 5.274725274725275e-05,
+ "loss": 0.5416,
+ "step": 241
+ },
+ {
+ "epoch": 0.8864468864468864,
+ "grad_norm": 17.856632232666016,
+ "learning_rate": 5.296703296703297e-05,
+ "loss": 0.3647,
+ "step": 242
+ },
+ {
+ "epoch": 0.8901098901098901,
+ "grad_norm": 42.52962112426758,
+ "learning_rate": 5.3186813186813184e-05,
+ "loss": 1.3661,
+ "step": 243
+ },
+ {
+ "epoch": 0.8937728937728938,
+ "grad_norm": 26.439769744873047,
+ "learning_rate": 5.3406593406593405e-05,
+ "loss": 0.6629,
+ "step": 244
+ },
+ {
+ "epoch": 0.8974358974358975,
+ "grad_norm": 37.46576690673828,
+ "learning_rate": 5.362637362637363e-05,
+ "loss": 0.9631,
+ "step": 245
+ },
+ {
+ "epoch": 0.9010989010989011,
+ "grad_norm": 29.706708908081055,
+ "learning_rate": 5.384615384615385e-05,
+ "loss": 1.0034,
+ "step": 246
+ },
+ {
+ "epoch": 0.9047619047619048,
+ "grad_norm": 33.62871551513672,
+ "learning_rate": 5.406593406593407e-05,
+ "loss": 0.8036,
+ "step": 247
+ },
+ {
+ "epoch": 0.9084249084249084,
+ "grad_norm": 41.97051239013672,
+ "learning_rate": 5.428571428571429e-05,
+ "loss": 1.309,
+ "step": 248
+ },
+ {
+ "epoch": 0.9120879120879121,
+ "grad_norm": 37.57841110229492,
+ "learning_rate": 5.450549450549451e-05,
+ "loss": 1.2444,
+ "step": 249
+ },
+ {
+ "epoch": 0.9157509157509157,
+ "grad_norm": 21.220727920532227,
+ "learning_rate": 5.4725274725274724e-05,
+ "loss": 0.6556,
+ "step": 250
+ },
+ {
+ "epoch": 0.9194139194139194,
+ "grad_norm": 19.963764190673828,
+ "learning_rate": 5.4945054945054945e-05,
+ "loss": 0.7328,
+ "step": 251
+ },
+ {
+ "epoch": 0.9230769230769231,
+ "grad_norm": 21.196062088012695,
+ "learning_rate": 5.5164835164835166e-05,
+ "loss": 0.5752,
+ "step": 252
+ },
+ {
+ "epoch": 0.9267399267399268,
+ "grad_norm": 23.587268829345703,
+ "learning_rate": 5.538461538461539e-05,
+ "loss": 0.4801,
+ "step": 253
+ },
+ {
+ "epoch": 0.9304029304029304,
+ "grad_norm": 16.09604263305664,
+ "learning_rate": 5.560439560439561e-05,
+ "loss": 0.4795,
+ "step": 254
+ },
+ {
+ "epoch": 0.9340659340659341,
+ "grad_norm": 22.61296272277832,
+ "learning_rate": 5.582417582417583e-05,
+ "loss": 0.5807,
+ "step": 255
+ },
+ {
+ "epoch": 0.9377289377289377,
+ "grad_norm": 28.715890884399414,
+ "learning_rate": 5.604395604395604e-05,
+ "loss": 1.3141,
+ "step": 256
+ },
+ {
+ "epoch": 0.9413919413919414,
+ "grad_norm": 37.11213684082031,
+ "learning_rate": 5.6263736263736264e-05,
+ "loss": 1.7168,
+ "step": 257
+ },
+ {
+ "epoch": 0.945054945054945,
+ "grad_norm": 13.693246841430664,
+ "learning_rate": 5.6483516483516485e-05,
+ "loss": 0.3207,
+ "step": 258
+ },
+ {
+ "epoch": 0.9487179487179487,
+ "grad_norm": 18.186216354370117,
+ "learning_rate": 5.6703296703296706e-05,
+ "loss": 0.6265,
+ "step": 259
+ },
+ {
+ "epoch": 0.9523809523809523,
+ "grad_norm": 23.68426513671875,
+ "learning_rate": 5.692307692307692e-05,
+ "loss": 0.5226,
+ "step": 260
+ },
+ {
+ "epoch": 0.9560439560439561,
+ "grad_norm": 19.154836654663086,
+ "learning_rate": 5.714285714285714e-05,
+ "loss": 1.0116,
+ "step": 261
+ },
+ {
+ "epoch": 0.9597069597069597,
+ "grad_norm": 17.64719009399414,
+ "learning_rate": 5.736263736263737e-05,
+ "loss": 0.5992,
+ "step": 262
+ },
+ {
+ "epoch": 0.9633699633699634,
+ "grad_norm": 25.542757034301758,
+ "learning_rate": 5.7582417582417584e-05,
+ "loss": 0.8129,
+ "step": 263
+ },
+ {
+ "epoch": 0.967032967032967,
+ "grad_norm": 25.94204330444336,
+ "learning_rate": 5.7802197802197805e-05,
+ "loss": 1.2194,
+ "step": 264
+ },
+ {
+ "epoch": 0.9706959706959707,
+ "grad_norm": 13.693342208862305,
+ "learning_rate": 5.8021978021978026e-05,
+ "loss": 0.2565,
+ "step": 265
+ },
+ {
+ "epoch": 0.9743589743589743,
+ "grad_norm": 20.760122299194336,
+ "learning_rate": 5.824175824175824e-05,
+ "loss": 0.4023,
+ "step": 266
+ },
+ {
+ "epoch": 0.978021978021978,
+ "grad_norm": 20.00895118713379,
+ "learning_rate": 5.846153846153846e-05,
+ "loss": 0.2468,
+ "step": 267
+ },
+ {
+ "epoch": 0.9816849816849816,
+ "grad_norm": 25.56069564819336,
+ "learning_rate": 5.868131868131868e-05,
+ "loss": 0.5648,
+ "step": 268
+ },
+ {
+ "epoch": 0.9853479853479854,
+ "grad_norm": 38.19970703125,
+ "learning_rate": 5.89010989010989e-05,
+ "loss": 0.544,
+ "step": 269
+ },
+ {
+ "epoch": 0.989010989010989,
+ "grad_norm": 37.63619613647461,
+ "learning_rate": 5.9120879120879124e-05,
+ "loss": 0.7556,
+ "step": 270
+ },
+ {
+ "epoch": 0.9926739926739927,
+ "grad_norm": 10.586868286132812,
+ "learning_rate": 5.9340659340659345e-05,
+ "loss": 0.1003,
+ "step": 271
+ },
+ {
+ "epoch": 0.9963369963369964,
+ "grad_norm": 17.579208374023438,
+ "learning_rate": 5.9560439560439566e-05,
+ "loss": 0.2931,
+ "step": 272
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 24.657121658325195,
+ "learning_rate": 5.978021978021978e-05,
+ "loss": 0.2372,
+ "step": 273
+ },
+ {
+ "epoch": 1.0036630036630036,
+ "grad_norm": 29.52134895324707,
+ "learning_rate": 6e-05,
+ "loss": 0.5077,
+ "step": 274
+ },
+ {
+ "epoch": 1.0073260073260073,
+ "grad_norm": 51.900062561035156,
+ "learning_rate": 5.997557997557998e-05,
+ "loss": 0.4404,
+ "step": 275
+ },
+ {
+ "epoch": 1.010989010989011,
+ "grad_norm": 18.682769775390625,
+ "learning_rate": 5.995115995115995e-05,
+ "loss": 0.2405,
+ "step": 276
+ },
+ {
+ "epoch": 1.0146520146520146,
+ "grad_norm": 87.95014953613281,
+ "learning_rate": 5.992673992673993e-05,
+ "loss": 2.8585,
+ "step": 277
+ },
+ {
+ "epoch": 1.0183150183150182,
+ "grad_norm": 67.03990936279297,
+ "learning_rate": 5.990231990231991e-05,
+ "loss": 0.9746,
+ "step": 278
+ },
+ {
+ "epoch": 1.021978021978022,
+ "grad_norm": 47.63545227050781,
+ "learning_rate": 5.987789987789988e-05,
+ "loss": 0.241,
+ "step": 279
+ },
+ {
+ "epoch": 1.0256410256410255,
+ "grad_norm": 33.62876892089844,
+ "learning_rate": 5.985347985347986e-05,
+ "loss": 1.0003,
+ "step": 280
+ },
+ {
+ "epoch": 1.0293040293040292,
+ "grad_norm": 30.26620864868164,
+ "learning_rate": 5.982905982905983e-05,
+ "loss": 0.7767,
+ "step": 281
+ },
+ {
+ "epoch": 1.032967032967033,
+ "grad_norm": 33.785770416259766,
+ "learning_rate": 5.98046398046398e-05,
+ "loss": 0.899,
+ "step": 282
+ },
+ {
+ "epoch": 1.0366300366300367,
+ "grad_norm": 33.753849029541016,
+ "learning_rate": 5.978021978021978e-05,
+ "loss": 1.8225,
+ "step": 283
+ },
+ {
+ "epoch": 1.0402930402930404,
+ "grad_norm": 16.58989143371582,
+ "learning_rate": 5.975579975579976e-05,
+ "loss": 0.6211,
+ "step": 284
+ },
+ {
+ "epoch": 1.043956043956044,
+ "grad_norm": 23.08768653869629,
+ "learning_rate": 5.973137973137973e-05,
+ "loss": 0.7541,
+ "step": 285
+ },
+ {
+ "epoch": 1.0476190476190477,
+ "grad_norm": 24.57805824279785,
+ "learning_rate": 5.970695970695971e-05,
+ "loss": 0.8278,
+ "step": 286
+ },
+ {
+ "epoch": 1.0512820512820513,
+ "grad_norm": 25.1593017578125,
+ "learning_rate": 5.968253968253968e-05,
+ "loss": 0.6932,
+ "step": 287
+ },
+ {
+ "epoch": 1.054945054945055,
+ "grad_norm": 29.984054565429688,
+ "learning_rate": 5.965811965811966e-05,
+ "loss": 0.6987,
+ "step": 288
+ },
+ {
+ "epoch": 1.0586080586080586,
+ "grad_norm": 28.183151245117188,
+ "learning_rate": 5.963369963369964e-05,
+ "loss": 0.8771,
+ "step": 289
+ },
+ {
+ "epoch": 1.0622710622710623,
+ "grad_norm": 15.349969863891602,
+ "learning_rate": 5.960927960927961e-05,
+ "loss": 0.2906,
+ "step": 290
+ },
+ {
+ "epoch": 1.065934065934066,
+ "grad_norm": 17.618196487426758,
+ "learning_rate": 5.958485958485959e-05,
+ "loss": 0.595,
+ "step": 291
+ },
+ {
+ "epoch": 1.0695970695970696,
+ "grad_norm": 40.537925720214844,
+ "learning_rate": 5.9560439560439566e-05,
+ "loss": 1.3881,
+ "step": 292
+ },
+ {
+ "epoch": 1.0732600732600732,
+ "grad_norm": 41.12261962890625,
+ "learning_rate": 5.953601953601954e-05,
+ "loss": 0.5402,
+ "step": 293
+ },
+ {
+ "epoch": 1.0769230769230769,
+ "grad_norm": 38.4654655456543,
+ "learning_rate": 5.951159951159951e-05,
+ "loss": 0.3097,
+ "step": 294
+ },
+ {
+ "epoch": 1.0805860805860805,
+ "grad_norm": 34.19886016845703,
+ "learning_rate": 5.948717948717949e-05,
+ "loss": 1.0228,
+ "step": 295
+ },
+ {
+ "epoch": 1.0842490842490842,
+ "grad_norm": 19.727413177490234,
+ "learning_rate": 5.946275946275946e-05,
+ "loss": 0.1755,
+ "step": 296
+ },
+ {
+ "epoch": 1.0879120879120878,
+ "grad_norm": 33.413352966308594,
+ "learning_rate": 5.943833943833944e-05,
+ "loss": 0.8087,
+ "step": 297
+ },
+ {
+ "epoch": 1.0915750915750915,
+ "grad_norm": 29.848875045776367,
+ "learning_rate": 5.941391941391942e-05,
+ "loss": 0.673,
+ "step": 298
+ },
+ {
+ "epoch": 1.0952380952380953,
+ "grad_norm": 18.643922805786133,
+ "learning_rate": 5.938949938949939e-05,
+ "loss": 0.4759,
+ "step": 299
+ },
+ {
+ "epoch": 1.098901098901099,
+ "grad_norm": 28.923099517822266,
+ "learning_rate": 5.936507936507937e-05,
+ "loss": 0.6555,
+ "step": 300
+ },
+ {
+ "epoch": 1.1025641025641026,
+ "grad_norm": 26.4990177154541,
+ "learning_rate": 5.9340659340659345e-05,
+ "loss": 0.4679,
+ "step": 301
+ },
+ {
+ "epoch": 1.1062271062271063,
+ "grad_norm": 43.54881286621094,
+ "learning_rate": 5.931623931623932e-05,
+ "loss": 1.0861,
+ "step": 302
+ },
+ {
+ "epoch": 1.10989010989011,
+ "grad_norm": 32.66098403930664,
+ "learning_rate": 5.9291819291819295e-05,
+ "loss": 0.677,
+ "step": 303
+ },
+ {
+ "epoch": 1.1135531135531136,
+ "grad_norm": 43.79314422607422,
+ "learning_rate": 5.9267399267399274e-05,
+ "loss": 0.8883,
+ "step": 304
+ },
+ {
+ "epoch": 1.1172161172161172,
+ "grad_norm": 44.49085235595703,
+ "learning_rate": 5.9242979242979245e-05,
+ "loss": 0.9553,
+ "step": 305
+ },
+ {
+ "epoch": 1.120879120879121,
+ "grad_norm": 31.713787078857422,
+ "learning_rate": 5.9218559218559224e-05,
+ "loss": 0.6352,
+ "step": 306
+ },
+ {
+ "epoch": 1.1245421245421245,
+ "grad_norm": 19.930402755737305,
+ "learning_rate": 5.9194139194139196e-05,
+ "loss": 0.7023,
+ "step": 307
+ },
+ {
+ "epoch": 1.1282051282051282,
+ "grad_norm": 20.157196044921875,
+ "learning_rate": 5.916971916971917e-05,
+ "loss": 0.6241,
+ "step": 308
+ },
+ {
+ "epoch": 1.1318681318681318,
+ "grad_norm": 26.819135665893555,
+ "learning_rate": 5.9145299145299146e-05,
+ "loss": 0.4788,
+ "step": 309
+ },
+ {
+ "epoch": 1.1355311355311355,
+ "grad_norm": 24.948625564575195,
+ "learning_rate": 5.9120879120879124e-05,
+ "loss": 0.698,
+ "step": 310
+ },
+ {
+ "epoch": 1.1391941391941391,
+ "grad_norm": 15.883389472961426,
+ "learning_rate": 5.9096459096459096e-05,
+ "loss": 0.3325,
+ "step": 311
+ },
+ {
+ "epoch": 1.1428571428571428,
+ "grad_norm": 25.214584350585938,
+ "learning_rate": 5.9072039072039074e-05,
+ "loss": 0.4776,
+ "step": 312
+ },
+ {
+ "epoch": 1.1465201465201464,
+ "grad_norm": 27.4523983001709,
+ "learning_rate": 5.9047619047619046e-05,
+ "loss": 0.6155,
+ "step": 313
+ },
+ {
+ "epoch": 1.15018315018315,
+ "grad_norm": 48.60593795776367,
+ "learning_rate": 5.9023199023199024e-05,
+ "loss": 1.7225,
+ "step": 314
+ },
+ {
+ "epoch": 1.1538461538461537,
+ "grad_norm": 27.19314193725586,
+ "learning_rate": 5.8998778998779e-05,
+ "loss": 0.6805,
+ "step": 315
+ },
+ {
+ "epoch": 1.1575091575091574,
+ "grad_norm": 44.678768157958984,
+ "learning_rate": 5.8974358974358975e-05,
+ "loss": 0.5721,
+ "step": 316
+ },
+ {
+ "epoch": 1.1611721611721613,
+ "grad_norm": 12.109644889831543,
+ "learning_rate": 5.894993894993895e-05,
+ "loss": 0.1079,
+ "step": 317
+ },
+ {
+ "epoch": 1.164835164835165,
+ "grad_norm": 45.254730224609375,
+ "learning_rate": 5.892551892551893e-05,
+ "loss": 1.1492,
+ "step": 318
+ },
+ {
+ "epoch": 1.1684981684981686,
+ "grad_norm": 65.83439636230469,
+ "learning_rate": 5.89010989010989e-05,
+ "loss": 0.7049,
+ "step": 319
+ },
+ {
+ "epoch": 1.1721611721611722,
+ "grad_norm": 43.5418586730957,
+ "learning_rate": 5.8876678876678875e-05,
+ "loss": 0.4628,
+ "step": 320
+ },
+ {
+ "epoch": 1.1758241758241759,
+ "grad_norm": 137.285400390625,
+ "learning_rate": 5.885225885225885e-05,
+ "loss": 1.4227,
+ "step": 321
+ },
+ {
+ "epoch": 1.1794871794871795,
+ "grad_norm": 42.895565032958984,
+ "learning_rate": 5.8827838827838825e-05,
+ "loss": 0.4264,
+ "step": 322
+ },
+ {
+ "epoch": 1.1831501831501832,
+ "grad_norm": 10.602986335754395,
+ "learning_rate": 5.8803418803418803e-05,
+ "loss": 0.0494,
+ "step": 323
+ },
+ {
+ "epoch": 1.1868131868131868,
+ "grad_norm": 103.92290496826172,
+ "learning_rate": 5.877899877899878e-05,
+ "loss": 2.0111,
+ "step": 324
+ },
+ {
+ "epoch": 1.1904761904761905,
+ "grad_norm": 36.497764587402344,
+ "learning_rate": 5.8754578754578754e-05,
+ "loss": 0.4768,
+ "step": 325
+ },
+ {
+ "epoch": 1.1941391941391941,
+ "grad_norm": 45.52228546142578,
+ "learning_rate": 5.873015873015873e-05,
+ "loss": 0.994,
+ "step": 326
+ },
+ {
+ "epoch": 1.1978021978021978,
+ "grad_norm": 24.81894302368164,
+ "learning_rate": 5.870573870573871e-05,
+ "loss": 0.5563,
+ "step": 327
+ },
+ {
+ "epoch": 1.2014652014652014,
+ "grad_norm": 49.82950210571289,
+ "learning_rate": 5.868131868131868e-05,
+ "loss": 1.5448,
+ "step": 328
+ },
+ {
+ "epoch": 1.205128205128205,
+ "grad_norm": 23.945913314819336,
+ "learning_rate": 5.865689865689866e-05,
+ "loss": 0.5256,
+ "step": 329
+ },
+ {
+ "epoch": 1.2087912087912087,
+ "grad_norm": 20.63251304626465,
+ "learning_rate": 5.863247863247864e-05,
+ "loss": 0.3698,
+ "step": 330
+ },
+ {
+ "epoch": 1.2124542124542124,
+ "grad_norm": 32.270328521728516,
+ "learning_rate": 5.860805860805861e-05,
+ "loss": 0.3518,
+ "step": 331
+ },
+ {
+ "epoch": 1.2161172161172162,
+ "grad_norm": 32.445716857910156,
+ "learning_rate": 5.858363858363858e-05,
+ "loss": 0.857,
+ "step": 332
+ },
+ {
+ "epoch": 1.2197802197802199,
+ "grad_norm": 59.69521713256836,
+ "learning_rate": 5.855921855921856e-05,
+ "loss": 1.3786,
+ "step": 333
+ },
+ {
+ "epoch": 1.2234432234432235,
+ "grad_norm": 32.79878234863281,
+ "learning_rate": 5.853479853479853e-05,
+ "loss": 0.7648,
+ "step": 334
+ },
+ {
+ "epoch": 1.2271062271062272,
+ "grad_norm": 26.749393463134766,
+ "learning_rate": 5.851037851037851e-05,
+ "loss": 0.4723,
+ "step": 335
+ },
+ {
+ "epoch": 1.2307692307692308,
+ "grad_norm": 40.744102478027344,
+ "learning_rate": 5.848595848595849e-05,
+ "loss": 1.0543,
+ "step": 336
+ },
+ {
+ "epoch": 1.2344322344322345,
+ "grad_norm": 34.2275505065918,
+ "learning_rate": 5.846153846153846e-05,
+ "loss": 0.4533,
+ "step": 337
+ },
+ {
+ "epoch": 1.2380952380952381,
+ "grad_norm": 49.648136138916016,
+ "learning_rate": 5.843711843711844e-05,
+ "loss": 1.2112,
+ "step": 338
+ },
+ {
+ "epoch": 1.2417582417582418,
+ "grad_norm": 64.69720458984375,
+ "learning_rate": 5.841269841269841e-05,
+ "loss": 1.2234,
+ "step": 339
+ },
+ {
+ "epoch": 1.2454212454212454,
+ "grad_norm": 16.81964111328125,
+ "learning_rate": 5.838827838827839e-05,
+ "loss": 0.297,
+ "step": 340
+ },
+ {
+ "epoch": 1.249084249084249,
+ "grad_norm": 17.393678665161133,
+ "learning_rate": 5.836385836385837e-05,
+ "loss": 0.2504,
+ "step": 341
+ },
+ {
+ "epoch": 1.2527472527472527,
+ "grad_norm": 64.2254409790039,
+ "learning_rate": 5.833943833943834e-05,
+ "loss": 1.3656,
+ "step": 342
+ },
+ {
+ "epoch": 1.2564102564102564,
+ "grad_norm": 48.991249084472656,
+ "learning_rate": 5.831501831501832e-05,
+ "loss": 1.0819,
+ "step": 343
+ },
+ {
+ "epoch": 1.26007326007326,
+ "grad_norm": 22.78063201904297,
+ "learning_rate": 5.82905982905983e-05,
+ "loss": 0.1792,
+ "step": 344
+ },
+ {
+ "epoch": 1.2637362637362637,
+ "grad_norm": 35.463233947753906,
+ "learning_rate": 5.826617826617826e-05,
+ "loss": 0.5663,
+ "step": 345
+ },
+ {
+ "epoch": 1.2673992673992673,
+ "grad_norm": 54.528953552246094,
+ "learning_rate": 5.824175824175824e-05,
+ "loss": 1.5814,
+ "step": 346
+ },
+ {
+ "epoch": 1.271062271062271,
+ "grad_norm": 44.60401916503906,
+ "learning_rate": 5.821733821733822e-05,
+ "loss": 0.6471,
+ "step": 347
+ },
+ {
+ "epoch": 1.2747252747252746,
+ "grad_norm": 2.6468827724456787,
+ "learning_rate": 5.819291819291819e-05,
+ "loss": 0.0288,
+ "step": 348
+ },
+ {
+ "epoch": 1.2783882783882783,
+ "grad_norm": 21.465364456176758,
+ "learning_rate": 5.816849816849817e-05,
+ "loss": 0.5259,
+ "step": 349
+ },
+ {
+ "epoch": 1.282051282051282,
+ "grad_norm": 51.20866012573242,
+ "learning_rate": 5.814407814407815e-05,
+ "loss": 0.8054,
+ "step": 350
+ },
+ {
+ "epoch": 1.2857142857142856,
+ "grad_norm": 33.52774429321289,
+ "learning_rate": 5.811965811965812e-05,
+ "loss": 0.494,
+ "step": 351
+ },
+ {
+ "epoch": 1.2893772893772895,
+ "grad_norm": 39.15644836425781,
+ "learning_rate": 5.80952380952381e-05,
+ "loss": 1.6315,
+ "step": 352
+ },
+ {
+ "epoch": 1.293040293040293,
+ "grad_norm": 24.35202407836914,
+ "learning_rate": 5.8070818070818076e-05,
+ "loss": 0.6189,
+ "step": 353
+ },
+ {
+ "epoch": 1.2967032967032968,
+ "grad_norm": 39.99496841430664,
+ "learning_rate": 5.804639804639805e-05,
+ "loss": 1.2323,
+ "step": 354
+ },
+ {
+ "epoch": 1.3003663003663004,
+ "grad_norm": 26.282432556152344,
+ "learning_rate": 5.8021978021978026e-05,
+ "loss": 0.5383,
+ "step": 355
+ },
+ {
+ "epoch": 1.304029304029304,
+ "grad_norm": 36.909969329833984,
+ "learning_rate": 5.7997557997558004e-05,
+ "loss": 1.6886,
+ "step": 356
+ },
+ {
+ "epoch": 1.3076923076923077,
+ "grad_norm": 18.90056037902832,
+ "learning_rate": 5.7973137973137976e-05,
+ "loss": 0.7226,
+ "step": 357
+ },
+ {
+ "epoch": 1.3113553113553114,
+ "grad_norm": 21.10304832458496,
+ "learning_rate": 5.794871794871795e-05,
+ "loss": 0.8914,
+ "step": 358
+ },
+ {
+ "epoch": 1.315018315018315,
+ "grad_norm": 18.380769729614258,
+ "learning_rate": 5.7924297924297926e-05,
+ "loss": 1.4304,
+ "step": 359
+ },
+ {
+ "epoch": 1.3186813186813187,
+ "grad_norm": 17.992050170898438,
+ "learning_rate": 5.78998778998779e-05,
+ "loss": 1.0023,
+ "step": 360
+ },
+ {
+ "epoch": 1.3223443223443223,
+ "grad_norm": 17.944400787353516,
+ "learning_rate": 5.7875457875457876e-05,
+ "loss": 0.7734,
+ "step": 361
+ },
+ {
+ "epoch": 1.326007326007326,
+ "grad_norm": 19.117143630981445,
+ "learning_rate": 5.7851037851037855e-05,
+ "loss": 0.6923,
+ "step": 362
+ },
+ {
+ "epoch": 1.3296703296703296,
+ "grad_norm": 21.4644718170166,
+ "learning_rate": 5.7826617826617826e-05,
+ "loss": 0.666,
+ "step": 363
+ },
+ {
+ "epoch": 1.3333333333333333,
+ "grad_norm": 25.951030731201172,
+ "learning_rate": 5.7802197802197805e-05,
+ "loss": 1.522,
+ "step": 364
+ },
+ {
+ "epoch": 1.3369963369963371,
+ "grad_norm": 32.20412063598633,
+ "learning_rate": 5.7777777777777776e-05,
+ "loss": 1.5771,
+ "step": 365
+ },
+ {
+ "epoch": 1.3406593406593408,
+ "grad_norm": 26.847576141357422,
+ "learning_rate": 5.7753357753357755e-05,
+ "loss": 1.3427,
+ "step": 366
+ },
+ {
+ "epoch": 1.3443223443223444,
+ "grad_norm": 18.596710205078125,
+ "learning_rate": 5.772893772893773e-05,
+ "loss": 0.5533,
+ "step": 367
+ },
+ {
+ "epoch": 1.347985347985348,
+ "grad_norm": 23.6543025970459,
+ "learning_rate": 5.7704517704517705e-05,
+ "loss": 0.581,
+ "step": 368
+ },
+ {
+ "epoch": 1.3516483516483517,
+ "grad_norm": 13.732353210449219,
+ "learning_rate": 5.7680097680097684e-05,
+ "loss": 0.1908,
+ "step": 369
+ },
+ {
+ "epoch": 1.3553113553113554,
+ "grad_norm": 21.231159210205078,
+ "learning_rate": 5.765567765567766e-05,
+ "loss": 0.5858,
+ "step": 370
+ },
+ {
+ "epoch": 1.358974358974359,
+ "grad_norm": 18.647363662719727,
+ "learning_rate": 5.763125763125763e-05,
+ "loss": 0.6205,
+ "step": 371
+ },
+ {
+ "epoch": 1.3626373626373627,
+ "grad_norm": 20.302942276000977,
+ "learning_rate": 5.7606837606837605e-05,
+ "loss": 0.3637,
+ "step": 372
+ },
+ {
+ "epoch": 1.3663003663003663,
+ "grad_norm": 18.72137451171875,
+ "learning_rate": 5.7582417582417584e-05,
+ "loss": 0.2262,
+ "step": 373
+ },
+ {
+ "epoch": 1.36996336996337,
+ "grad_norm": 32.225738525390625,
+ "learning_rate": 5.7557997557997555e-05,
+ "loss": 0.5696,
+ "step": 374
+ },
+ {
+ "epoch": 1.3736263736263736,
+ "grad_norm": 21.453779220581055,
+ "learning_rate": 5.7533577533577534e-05,
+ "loss": 0.3533,
+ "step": 375
+ },
+ {
+ "epoch": 1.3772893772893773,
+ "grad_norm": 26.601511001586914,
+ "learning_rate": 5.750915750915751e-05,
+ "loss": 0.438,
+ "step": 376
+ },
+ {
+ "epoch": 1.380952380952381,
+ "grad_norm": 49.10448455810547,
+ "learning_rate": 5.7484737484737484e-05,
+ "loss": 0.6742,
+ "step": 377
+ },
+ {
+ "epoch": 1.3846153846153846,
+ "grad_norm": 51.251136779785156,
+ "learning_rate": 5.746031746031746e-05,
+ "loss": 0.7096,
+ "step": 378
+ },
+ {
+ "epoch": 1.3882783882783882,
+ "grad_norm": 35.14614486694336,
+ "learning_rate": 5.743589743589744e-05,
+ "loss": 1.5348,
+ "step": 379
+ },
+ {
+ "epoch": 1.3919413919413919,
+ "grad_norm": 58.83134078979492,
+ "learning_rate": 5.741147741147741e-05,
+ "loss": 1.303,
+ "step": 380
+ },
+ {
+ "epoch": 1.3956043956043955,
+ "grad_norm": 34.27029800415039,
+ "learning_rate": 5.738705738705739e-05,
+ "loss": 0.3682,
+ "step": 381
+ },
+ {
+ "epoch": 1.3992673992673992,
+ "grad_norm": 59.508628845214844,
+ "learning_rate": 5.736263736263737e-05,
+ "loss": 0.6489,
+ "step": 382
+ },
+ {
+ "epoch": 1.4029304029304028,
+ "grad_norm": 24.804059982299805,
+ "learning_rate": 5.733821733821734e-05,
+ "loss": 0.325,
+ "step": 383
+ },
+ {
+ "epoch": 1.4065934065934065,
+ "grad_norm": 20.69612693786621,
+ "learning_rate": 5.731379731379731e-05,
+ "loss": 0.1529,
+ "step": 384
+ },
+ {
+ "epoch": 1.4102564102564101,
+ "grad_norm": 29.134044647216797,
+ "learning_rate": 5.728937728937729e-05,
+ "loss": 0.8694,
+ "step": 385
+ },
+ {
+ "epoch": 1.4139194139194138,
+ "grad_norm": 37.44430923461914,
+ "learning_rate": 5.726495726495726e-05,
+ "loss": 0.9174,
+ "step": 386
+ },
+ {
+ "epoch": 1.4175824175824177,
+ "grad_norm": 36.84721755981445,
+ "learning_rate": 5.724053724053724e-05,
+ "loss": 0.3522,
+ "step": 387
+ },
+ {
+ "epoch": 1.4212454212454213,
+ "grad_norm": 44.15989685058594,
+ "learning_rate": 5.721611721611722e-05,
+ "loss": 1.4677,
+ "step": 388
+ },
+ {
+ "epoch": 1.424908424908425,
+ "grad_norm": 16.73012351989746,
+ "learning_rate": 5.719169719169719e-05,
+ "loss": 0.1621,
+ "step": 389
+ },
+ {
+ "epoch": 1.4285714285714286,
+ "grad_norm": 35.41815185546875,
+ "learning_rate": 5.716727716727717e-05,
+ "loss": 0.6702,
+ "step": 390
+ },
+ {
+ "epoch": 1.4322344322344323,
+ "grad_norm": 19.04936408996582,
+ "learning_rate": 5.714285714285714e-05,
+ "loss": 0.1845,
+ "step": 391
+ },
+ {
+ "epoch": 1.435897435897436,
+ "grad_norm": 22.89434242248535,
+ "learning_rate": 5.711843711843712e-05,
+ "loss": 0.5694,
+ "step": 392
+ },
+ {
+ "epoch": 1.4395604395604396,
+ "grad_norm": 22.125951766967773,
+ "learning_rate": 5.70940170940171e-05,
+ "loss": 0.821,
+ "step": 393
+ },
+ {
+ "epoch": 1.4432234432234432,
+ "grad_norm": 37.83376693725586,
+ "learning_rate": 5.706959706959707e-05,
+ "loss": 0.4658,
+ "step": 394
+ },
+ {
+ "epoch": 1.4468864468864469,
+ "grad_norm": 38.37764358520508,
+ "learning_rate": 5.704517704517705e-05,
+ "loss": 0.4146,
+ "step": 395
+ },
+ {
+ "epoch": 1.4505494505494505,
+ "grad_norm": 21.50092315673828,
+ "learning_rate": 5.702075702075703e-05,
+ "loss": 0.5044,
+ "step": 396
+ },
+ {
+ "epoch": 1.4542124542124542,
+ "grad_norm": 20.02173614501953,
+ "learning_rate": 5.699633699633699e-05,
+ "loss": 0.4955,
+ "step": 397
+ },
+ {
+ "epoch": 1.4578754578754578,
+ "grad_norm": 21.474336624145508,
+ "learning_rate": 5.697191697191697e-05,
+ "loss": 0.3818,
+ "step": 398
+ },
+ {
+ "epoch": 1.4615384615384617,
+ "grad_norm": 22.903839111328125,
+ "learning_rate": 5.694749694749695e-05,
+ "loss": 0.7603,
+ "step": 399
+ },
+ {
+ "epoch": 1.4652014652014653,
+ "grad_norm": 20.22893524169922,
+ "learning_rate": 5.692307692307692e-05,
+ "loss": 0.5612,
+ "step": 400
+ },
+ {
+ "epoch": 1.468864468864469,
+ "grad_norm": 32.34550857543945,
+ "learning_rate": 5.68986568986569e-05,
+ "loss": 0.4659,
+ "step": 401
+ },
+ {
+ "epoch": 1.4725274725274726,
+ "grad_norm": 49.979034423828125,
+ "learning_rate": 5.687423687423688e-05,
+ "loss": 0.6784,
+ "step": 402
+ },
+ {
+ "epoch": 1.4761904761904763,
+ "grad_norm": 79.79581451416016,
+ "learning_rate": 5.684981684981685e-05,
+ "loss": 0.9404,
+ "step": 403
+ },
+ {
+ "epoch": 1.47985347985348,
+ "grad_norm": 17.678560256958008,
+ "learning_rate": 5.682539682539683e-05,
+ "loss": 0.1675,
+ "step": 404
+ },
+ {
+ "epoch": 1.4835164835164836,
+ "grad_norm": 21.246519088745117,
+ "learning_rate": 5.6800976800976806e-05,
+ "loss": 0.2428,
+ "step": 405
+ },
+ {
+ "epoch": 1.4871794871794872,
+ "grad_norm": 34.815452575683594,
+ "learning_rate": 5.677655677655678e-05,
+ "loss": 0.3925,
+ "step": 406
+ },
+ {
+ "epoch": 1.4908424908424909,
+ "grad_norm": 73.8591079711914,
+ "learning_rate": 5.6752136752136756e-05,
+ "loss": 1.3163,
+ "step": 407
+ },
+ {
+ "epoch": 1.4945054945054945,
+ "grad_norm": 66.63922882080078,
+ "learning_rate": 5.6727716727716735e-05,
+ "loss": 0.9653,
+ "step": 408
+ },
+ {
+ "epoch": 1.4981684981684982,
+ "grad_norm": 52.39488220214844,
+ "learning_rate": 5.6703296703296706e-05,
+ "loss": 0.9322,
+ "step": 409
+ },
+ {
+ "epoch": 1.5018315018315018,
+ "grad_norm": 13.078998565673828,
+ "learning_rate": 5.667887667887668e-05,
+ "loss": 0.1168,
+ "step": 410
+ },
+ {
+ "epoch": 1.5054945054945055,
+ "grad_norm": 41.32448959350586,
+ "learning_rate": 5.6654456654456657e-05,
+ "loss": 0.9296,
+ "step": 411
+ },
+ {
+ "epoch": 1.5091575091575091,
+ "grad_norm": 26.448543548583984,
+ "learning_rate": 5.663003663003663e-05,
+ "loss": 0.5474,
+ "step": 412
+ },
+ {
+ "epoch": 1.5128205128205128,
+ "grad_norm": 29.58432960510254,
+ "learning_rate": 5.660561660561661e-05,
+ "loss": 0.6573,
+ "step": 413
+ },
+ {
+ "epoch": 1.5164835164835164,
+ "grad_norm": 28.568214416503906,
+ "learning_rate": 5.6581196581196585e-05,
+ "loss": 0.9223,
+ "step": 414
+ },
+ {
+ "epoch": 1.52014652014652,
+ "grad_norm": 31.92661476135254,
+ "learning_rate": 5.655677655677656e-05,
+ "loss": 1.0601,
+ "step": 415
+ },
+ {
+ "epoch": 1.5238095238095237,
+ "grad_norm": 31.934263229370117,
+ "learning_rate": 5.6532356532356535e-05,
+ "loss": 0.6288,
+ "step": 416
+ },
+ {
+ "epoch": 1.5274725274725274,
+ "grad_norm": 21.51350975036621,
+ "learning_rate": 5.650793650793651e-05,
+ "loss": 0.7378,
+ "step": 417
+ },
+ {
+ "epoch": 1.531135531135531,
+ "grad_norm": 19.010095596313477,
+ "learning_rate": 5.6483516483516485e-05,
+ "loss": 0.7792,
+ "step": 418
+ },
+ {
+ "epoch": 1.5347985347985347,
+ "grad_norm": 21.7001895904541,
+ "learning_rate": 5.6459096459096464e-05,
+ "loss": 0.7885,
+ "step": 419
+ },
+ {
+ "epoch": 1.5384615384615383,
+ "grad_norm": 21.400882720947266,
+ "learning_rate": 5.6434676434676436e-05,
+ "loss": 0.942,
+ "step": 420
+ },
+ {
+ "epoch": 1.542124542124542,
+ "grad_norm": 30.14664649963379,
+ "learning_rate": 5.6410256410256414e-05,
+ "loss": 0.7675,
+ "step": 421
+ },
+ {
+ "epoch": 1.5457875457875456,
+ "grad_norm": 33.25088882446289,
+ "learning_rate": 5.6385836385836386e-05,
+ "loss": 1.1349,
+ "step": 422
+ },
+ {
+ "epoch": 1.5494505494505495,
+ "grad_norm": 22.923208236694336,
+ "learning_rate": 5.636141636141636e-05,
+ "loss": 0.7145,
+ "step": 423
+ },
+ {
+ "epoch": 1.5531135531135531,
+ "grad_norm": 20.00519371032715,
+ "learning_rate": 5.6336996336996336e-05,
+ "loss": 0.5107,
+ "step": 424
+ },
+ {
+ "epoch": 1.5567765567765568,
+ "grad_norm": 21.95383071899414,
+ "learning_rate": 5.6312576312576314e-05,
+ "loss": 0.7836,
+ "step": 425
+ },
+ {
+ "epoch": 1.5604395604395604,
+ "grad_norm": 27.24031639099121,
+ "learning_rate": 5.6288156288156286e-05,
+ "loss": 0.4955,
+ "step": 426
+ },
+ {
+ "epoch": 1.564102564102564,
+ "grad_norm": 45.48428726196289,
+ "learning_rate": 5.6263736263736264e-05,
+ "loss": 1.016,
+ "step": 427
+ },
+ {
+ "epoch": 1.5677655677655677,
+ "grad_norm": 20.055965423583984,
+ "learning_rate": 5.623931623931624e-05,
+ "loss": 0.325,
+ "step": 428
+ },
+ {
+ "epoch": 1.5714285714285714,
+ "grad_norm": 22.020767211914062,
+ "learning_rate": 5.6214896214896215e-05,
+ "loss": 0.45,
+ "step": 429
+ },
+ {
+ "epoch": 1.575091575091575,
+ "grad_norm": 32.608741760253906,
+ "learning_rate": 5.619047619047619e-05,
+ "loss": 0.6561,
+ "step": 430
+ },
+ {
+ "epoch": 1.578754578754579,
+ "grad_norm": 38.14396667480469,
+ "learning_rate": 5.616605616605617e-05,
+ "loss": 0.6387,
+ "step": 431
+ },
+ {
+ "epoch": 1.5824175824175826,
+ "grad_norm": 26.266948699951172,
+ "learning_rate": 5.614163614163614e-05,
+ "loss": 0.5593,
+ "step": 432
+ },
+ {
+ "epoch": 1.5860805860805862,
+ "grad_norm": 16.37360954284668,
+ "learning_rate": 5.611721611721612e-05,
+ "loss": 0.1591,
+ "step": 433
+ },
+ {
+ "epoch": 1.5897435897435899,
+ "grad_norm": 21.9448299407959,
+ "learning_rate": 5.60927960927961e-05,
+ "loss": 0.2129,
+ "step": 434
+ },
+ {
+ "epoch": 1.5934065934065935,
+ "grad_norm": 30.096052169799805,
+ "learning_rate": 5.6068376068376065e-05,
+ "loss": 0.3384,
+ "step": 435
+ },
+ {
+ "epoch": 1.5970695970695972,
+ "grad_norm": 40.15864181518555,
+ "learning_rate": 5.604395604395604e-05,
+ "loss": 0.5181,
+ "step": 436
+ },
+ {
+ "epoch": 1.6007326007326008,
+ "grad_norm": 63.40933609008789,
+ "learning_rate": 5.601953601953602e-05,
+ "loss": 0.8834,
+ "step": 437
+ },
+ {
+ "epoch": 1.6043956043956045,
+ "grad_norm": 40.0787353515625,
+ "learning_rate": 5.5995115995115993e-05,
+ "loss": 0.437,
+ "step": 438
+ },
+ {
+ "epoch": 1.6080586080586081,
+ "grad_norm": 40.136863708496094,
+ "learning_rate": 5.597069597069597e-05,
+ "loss": 0.4834,
+ "step": 439
+ },
+ {
+ "epoch": 1.6117216117216118,
+ "grad_norm": 27.898317337036133,
+ "learning_rate": 5.594627594627595e-05,
+ "loss": 0.4862,
+ "step": 440
+ },
+ {
+ "epoch": 1.6153846153846154,
+ "grad_norm": 31.5762882232666,
+ "learning_rate": 5.592185592185592e-05,
+ "loss": 0.1878,
+ "step": 441
+ },
+ {
+ "epoch": 1.619047619047619,
+ "grad_norm": 88.90093994140625,
+ "learning_rate": 5.58974358974359e-05,
+ "loss": 1.3343,
+ "step": 442
+ },
+ {
+ "epoch": 1.6227106227106227,
+ "grad_norm": 57.7340202331543,
+ "learning_rate": 5.587301587301587e-05,
+ "loss": 0.3032,
+ "step": 443
+ },
+ {
+ "epoch": 1.6263736263736264,
+ "grad_norm": 57.28425979614258,
+ "learning_rate": 5.584859584859585e-05,
+ "loss": 1.3972,
+ "step": 444
+ },
+ {
+ "epoch": 1.63003663003663,
+ "grad_norm": 39.866302490234375,
+ "learning_rate": 5.582417582417583e-05,
+ "loss": 0.4026,
+ "step": 445
+ },
+ {
+ "epoch": 1.6336996336996337,
+ "grad_norm": 41.72932815551758,
+ "learning_rate": 5.57997557997558e-05,
+ "loss": 0.5407,
+ "step": 446
+ },
+ {
+ "epoch": 1.6373626373626373,
+ "grad_norm": 60.77634811401367,
+ "learning_rate": 5.577533577533578e-05,
+ "loss": 0.8581,
+ "step": 447
+ },
+ {
+ "epoch": 1.641025641025641,
+ "grad_norm": 28.382030487060547,
+ "learning_rate": 5.575091575091575e-05,
+ "loss": 0.3759,
+ "step": 448
+ },
+ {
+ "epoch": 1.6446886446886446,
+ "grad_norm": 62.1085205078125,
+ "learning_rate": 5.572649572649572e-05,
+ "loss": 1.0749,
+ "step": 449
+ },
+ {
+ "epoch": 1.6483516483516483,
+ "grad_norm": 41.8302001953125,
+ "learning_rate": 5.57020757020757e-05,
+ "loss": 0.5884,
+ "step": 450
+ },
+ {
+ "epoch": 1.652014652014652,
+ "grad_norm": 24.128931045532227,
+ "learning_rate": 5.567765567765568e-05,
+ "loss": 0.6113,
+ "step": 451
+ },
+ {
+ "epoch": 1.6556776556776556,
+ "grad_norm": 19.634384155273438,
+ "learning_rate": 5.565323565323565e-05,
+ "loss": 0.3902,
+ "step": 452
+ },
+ {
+ "epoch": 1.6593406593406592,
+ "grad_norm": 18.17875099182129,
+ "learning_rate": 5.562881562881563e-05,
+ "loss": 0.3137,
+ "step": 453
+ },
+ {
+ "epoch": 1.6630036630036629,
+ "grad_norm": 39.68446731567383,
+ "learning_rate": 5.560439560439561e-05,
+ "loss": 0.7587,
+ "step": 454
+ },
+ {
+ "epoch": 1.6666666666666665,
+ "grad_norm": 29.387836456298828,
+ "learning_rate": 5.557997557997558e-05,
+ "loss": 0.6397,
+ "step": 455
+ },
+ {
+ "epoch": 1.6703296703296702,
+ "grad_norm": 19.08424949645996,
+ "learning_rate": 5.555555555555556e-05,
+ "loss": 0.2484,
+ "step": 456
+ },
+ {
+ "epoch": 1.673992673992674,
+ "grad_norm": 36.07701873779297,
+ "learning_rate": 5.553113553113554e-05,
+ "loss": 0.8587,
+ "step": 457
+ },
+ {
+ "epoch": 1.6776556776556777,
+ "grad_norm": 52.062339782714844,
+ "learning_rate": 5.550671550671551e-05,
+ "loss": 1.6675,
+ "step": 458
+ },
+ {
+ "epoch": 1.6813186813186813,
+ "grad_norm": 45.415687561035156,
+ "learning_rate": 5.548229548229549e-05,
+ "loss": 1.653,
+ "step": 459
+ },
+ {
+ "epoch": 1.684981684981685,
+ "grad_norm": 31.457420349121094,
+ "learning_rate": 5.5457875457875465e-05,
+ "loss": 0.4578,
+ "step": 460
+ },
+ {
+ "epoch": 1.6886446886446886,
+ "grad_norm": 33.14665603637695,
+ "learning_rate": 5.543345543345543e-05,
+ "loss": 1.3327,
+ "step": 461
+ },
+ {
+ "epoch": 1.6923076923076923,
+ "grad_norm": 25.720529556274414,
+ "learning_rate": 5.540903540903541e-05,
+ "loss": 0.5,
+ "step": 462
+ },
+ {
+ "epoch": 1.695970695970696,
+ "grad_norm": 23.71514129638672,
+ "learning_rate": 5.538461538461539e-05,
+ "loss": 0.434,
+ "step": 463
+ },
+ {
+ "epoch": 1.6996336996336996,
+ "grad_norm": 45.231746673583984,
+ "learning_rate": 5.536019536019536e-05,
+ "loss": 0.9448,
+ "step": 464
+ },
+ {
+ "epoch": 1.7032967032967035,
+ "grad_norm": 17.44647789001465,
+ "learning_rate": 5.533577533577534e-05,
+ "loss": 0.3183,
+ "step": 465
+ },
+ {
+ "epoch": 1.7069597069597071,
+ "grad_norm": 18.627901077270508,
+ "learning_rate": 5.531135531135531e-05,
+ "loss": 0.4137,
+ "step": 466
+ },
+ {
+ "epoch": 1.7106227106227108,
+ "grad_norm": 45.57220458984375,
+ "learning_rate": 5.528693528693529e-05,
+ "loss": 1.0096,
+ "step": 467
+ },
+ {
+ "epoch": 1.7142857142857144,
+ "grad_norm": 27.329822540283203,
+ "learning_rate": 5.5262515262515266e-05,
+ "loss": 0.5416,
+ "step": 468
+ },
+ {
+ "epoch": 1.717948717948718,
+ "grad_norm": 46.70027160644531,
+ "learning_rate": 5.523809523809524e-05,
+ "loss": 0.983,
+ "step": 469
+ },
+ {
+ "epoch": 1.7216117216117217,
+ "grad_norm": 32.47868728637695,
+ "learning_rate": 5.5213675213675216e-05,
+ "loss": 1.5687,
+ "step": 470
+ },
+ {
+ "epoch": 1.7252747252747254,
+ "grad_norm": 16.49342155456543,
+ "learning_rate": 5.5189255189255194e-05,
+ "loss": 0.3101,
+ "step": 471
+ },
+ {
+ "epoch": 1.728937728937729,
+ "grad_norm": 26.58381462097168,
+ "learning_rate": 5.5164835164835166e-05,
+ "loss": 0.7027,
+ "step": 472
+ },
+ {
+ "epoch": 1.7326007326007327,
+ "grad_norm": 17.435213088989258,
+ "learning_rate": 5.5140415140415144e-05,
+ "loss": 0.3958,
+ "step": 473
+ },
+ {
+ "epoch": 1.7362637362637363,
+ "grad_norm": 19.37874412536621,
+ "learning_rate": 5.5115995115995116e-05,
+ "loss": 0.3979,
+ "step": 474
+ },
+ {
+ "epoch": 1.73992673992674,
+ "grad_norm": 16.509248733520508,
+ "learning_rate": 5.509157509157509e-05,
+ "loss": 0.5121,
+ "step": 475
+ },
+ {
+ "epoch": 1.7435897435897436,
+ "grad_norm": 9.653852462768555,
+ "learning_rate": 5.5067155067155066e-05,
+ "loss": 0.1386,
+ "step": 476
+ },
+ {
+ "epoch": 1.7472527472527473,
+ "grad_norm": 26.486963272094727,
+ "learning_rate": 5.5042735042735045e-05,
+ "loss": 1.0307,
+ "step": 477
+ },
+ {
+ "epoch": 1.750915750915751,
+ "grad_norm": 17.766828536987305,
+ "learning_rate": 5.5018315018315016e-05,
+ "loss": 0.278,
+ "step": 478
+ },
+ {
+ "epoch": 1.7545787545787546,
+ "grad_norm": 12.930633544921875,
+ "learning_rate": 5.4993894993894995e-05,
+ "loss": 0.1487,
+ "step": 479
+ },
+ {
+ "epoch": 1.7582417582417582,
+ "grad_norm": 44.64267349243164,
+ "learning_rate": 5.496947496947497e-05,
+ "loss": 0.7036,
+ "step": 480
+ },
+ {
+ "epoch": 1.7619047619047619,
+ "grad_norm": 17.474651336669922,
+ "learning_rate": 5.4945054945054945e-05,
+ "loss": 0.1666,
+ "step": 481
+ },
+ {
+ "epoch": 1.7655677655677655,
+ "grad_norm": 48.3519401550293,
+ "learning_rate": 5.4920634920634923e-05,
+ "loss": 0.6157,
+ "step": 482
+ },
+ {
+ "epoch": 1.7692307692307692,
+ "grad_norm": 18.429521560668945,
+ "learning_rate": 5.48962148962149e-05,
+ "loss": 0.2588,
+ "step": 483
+ },
+ {
+ "epoch": 1.7728937728937728,
+ "grad_norm": 66.73760986328125,
+ "learning_rate": 5.4871794871794874e-05,
+ "loss": 0.654,
+ "step": 484
+ },
+ {
+ "epoch": 1.7765567765567765,
+ "grad_norm": 53.831539154052734,
+ "learning_rate": 5.484737484737485e-05,
+ "loss": 0.7538,
+ "step": 485
+ },
+ {
+ "epoch": 1.7802197802197801,
+ "grad_norm": 52.023895263671875,
+ "learning_rate": 5.482295482295483e-05,
+ "loss": 1.6623,
+ "step": 486
+ },
+ {
+ "epoch": 1.7838827838827838,
+ "grad_norm": 38.4475212097168,
+ "learning_rate": 5.4798534798534795e-05,
+ "loss": 0.5079,
+ "step": 487
+ },
+ {
+ "epoch": 1.7875457875457874,
+ "grad_norm": 25.642650604248047,
+ "learning_rate": 5.4774114774114774e-05,
+ "loss": 0.3825,
+ "step": 488
+ },
+ {
+ "epoch": 1.791208791208791,
+ "grad_norm": 57.916900634765625,
+ "learning_rate": 5.474969474969475e-05,
+ "loss": 0.9583,
+ "step": 489
+ },
+ {
+ "epoch": 1.7948717948717947,
+ "grad_norm": 39.23340606689453,
+ "learning_rate": 5.4725274725274724e-05,
+ "loss": 0.4724,
+ "step": 490
+ },
+ {
+ "epoch": 1.7985347985347986,
+ "grad_norm": 24.188661575317383,
+ "learning_rate": 5.47008547008547e-05,
+ "loss": 0.4471,
+ "step": 491
+ },
+ {
+ "epoch": 1.8021978021978022,
+ "grad_norm": 68.73822021484375,
+ "learning_rate": 5.4676434676434674e-05,
+ "loss": 0.6618,
+ "step": 492
+ },
+ {
+ "epoch": 1.8058608058608059,
+ "grad_norm": 26.382184982299805,
+ "learning_rate": 5.465201465201465e-05,
+ "loss": 0.5835,
+ "step": 493
+ },
+ {
+ "epoch": 1.8095238095238095,
+ "grad_norm": 31.758886337280273,
+ "learning_rate": 5.462759462759463e-05,
+ "loss": 0.622,
+ "step": 494
+ },
+ {
+ "epoch": 1.8131868131868132,
+ "grad_norm": 26.657405853271484,
+ "learning_rate": 5.46031746031746e-05,
+ "loss": 0.6003,
+ "step": 495
+ },
+ {
+ "epoch": 1.8168498168498168,
+ "grad_norm": 31.248491287231445,
+ "learning_rate": 5.457875457875458e-05,
+ "loss": 0.4929,
+ "step": 496
+ },
+ {
+ "epoch": 1.8205128205128205,
+ "grad_norm": 53.82766342163086,
+ "learning_rate": 5.455433455433456e-05,
+ "loss": 2.0716,
+ "step": 497
+ },
+ {
+ "epoch": 1.8241758241758241,
+ "grad_norm": 46.39777374267578,
+ "learning_rate": 5.452991452991453e-05,
+ "loss": 1.6767,
+ "step": 498
+ },
+ {
+ "epoch": 1.8278388278388278,
+ "grad_norm": 39.58620071411133,
+ "learning_rate": 5.450549450549451e-05,
+ "loss": 0.8274,
+ "step": 499
+ },
+ {
+ "epoch": 1.8315018315018317,
+ "grad_norm": 29.395286560058594,
+ "learning_rate": 5.448107448107448e-05,
+ "loss": 1.1441,
+ "step": 500
+ },
+ {
+ "epoch": 1.8351648351648353,
+ "grad_norm": 26.250751495361328,
+ "learning_rate": 5.445665445665445e-05,
+ "loss": 0.7496,
+ "step": 501
+ },
+ {
+ "epoch": 1.838827838827839,
+ "grad_norm": 19.820999145507812,
+ "learning_rate": 5.443223443223443e-05,
+ "loss": 0.4367,
+ "step": 502
+ },
+ {
+ "epoch": 1.8424908424908426,
+ "grad_norm": 25.09316062927246,
+ "learning_rate": 5.440781440781441e-05,
+ "loss": 0.8584,
+ "step": 503
+ },
+ {
+ "epoch": 1.8461538461538463,
+ "grad_norm": 17.808509826660156,
+ "learning_rate": 5.438339438339438e-05,
+ "loss": 0.3869,
+ "step": 504
+ },
+ {
+ "epoch": 1.84981684981685,
+ "grad_norm": 28.342119216918945,
+ "learning_rate": 5.435897435897436e-05,
+ "loss": 0.8881,
+ "step": 505
+ },
+ {
+ "epoch": 1.8534798534798536,
+ "grad_norm": 33.80287551879883,
+ "learning_rate": 5.433455433455434e-05,
+ "loss": 1.2911,
+ "step": 506
+ },
+ {
+ "epoch": 1.8571428571428572,
+ "grad_norm": 55.428138732910156,
+ "learning_rate": 5.431013431013431e-05,
+ "loss": 0.8934,
+ "step": 507
+ },
+ {
+ "epoch": 1.8608058608058609,
+ "grad_norm": 27.962610244750977,
+ "learning_rate": 5.428571428571429e-05,
+ "loss": 0.662,
+ "step": 508
+ },
+ {
+ "epoch": 1.8644688644688645,
+ "grad_norm": 62.84252166748047,
+ "learning_rate": 5.426129426129427e-05,
+ "loss": 1.9216,
+ "step": 509
+ },
+ {
+ "epoch": 1.8681318681318682,
+ "grad_norm": 24.26439666748047,
+ "learning_rate": 5.423687423687424e-05,
+ "loss": 0.2164,
+ "step": 510
+ },
+ {
+ "epoch": 1.8717948717948718,
+ "grad_norm": 50.95674133300781,
+ "learning_rate": 5.421245421245422e-05,
+ "loss": 0.7023,
+ "step": 511
+ },
+ {
+ "epoch": 1.8754578754578755,
+ "grad_norm": 41.17847442626953,
+ "learning_rate": 5.418803418803419e-05,
+ "loss": 1.1081,
+ "step": 512
+ },
+ {
+ "epoch": 1.879120879120879,
+ "grad_norm": 28.701988220214844,
+ "learning_rate": 5.416361416361416e-05,
+ "loss": 0.6519,
+ "step": 513
+ },
+ {
+ "epoch": 1.8827838827838828,
+ "grad_norm": 48.42552947998047,
+ "learning_rate": 5.413919413919414e-05,
+ "loss": 1.5215,
+ "step": 514
+ },
+ {
+ "epoch": 1.8864468864468864,
+ "grad_norm": 19.71268653869629,
+ "learning_rate": 5.411477411477412e-05,
+ "loss": 0.4731,
+ "step": 515
+ },
+ {
+ "epoch": 1.89010989010989,
+ "grad_norm": 68.88224792480469,
+ "learning_rate": 5.409035409035409e-05,
+ "loss": 3.0071,
+ "step": 516
+ },
+ {
+ "epoch": 1.8937728937728937,
+ "grad_norm": 34.33188247680664,
+ "learning_rate": 5.406593406593407e-05,
+ "loss": 0.7014,
+ "step": 517
+ },
+ {
+ "epoch": 1.8974358974358974,
+ "grad_norm": 18.214942932128906,
+ "learning_rate": 5.404151404151404e-05,
+ "loss": 0.2362,
+ "step": 518
+ },
+ {
+ "epoch": 1.901098901098901,
+ "grad_norm": 31.553678512573242,
+ "learning_rate": 5.401709401709402e-05,
+ "loss": 0.5839,
+ "step": 519
+ },
+ {
+ "epoch": 1.9047619047619047,
+ "grad_norm": 15.681426048278809,
+ "learning_rate": 5.3992673992673996e-05,
+ "loss": 0.6039,
+ "step": 520
+ },
+ {
+ "epoch": 1.9084249084249083,
+ "grad_norm": 18.462688446044922,
+ "learning_rate": 5.396825396825397e-05,
+ "loss": 0.5773,
+ "step": 521
+ },
+ {
+ "epoch": 1.912087912087912,
+ "grad_norm": 10.23849105834961,
+ "learning_rate": 5.3943833943833946e-05,
+ "loss": 0.3801,
+ "step": 522
+ },
+ {
+ "epoch": 1.9157509157509156,
+ "grad_norm": 35.680973052978516,
+ "learning_rate": 5.3919413919413925e-05,
+ "loss": 1.2559,
+ "step": 523
+ },
+ {
+ "epoch": 1.9194139194139193,
+ "grad_norm": 23.97362518310547,
+ "learning_rate": 5.3894993894993897e-05,
+ "loss": 0.4112,
+ "step": 524
+ },
+ {
+ "epoch": 1.9230769230769231,
+ "grad_norm": 25.785356521606445,
+ "learning_rate": 5.387057387057387e-05,
+ "loss": 0.8993,
+ "step": 525
+ },
+ {
+ "epoch": 1.9267399267399268,
+ "grad_norm": 25.246868133544922,
+ "learning_rate": 5.384615384615385e-05,
+ "loss": 0.6534,
+ "step": 526
+ },
+ {
+ "epoch": 1.9304029304029304,
+ "grad_norm": 29.850788116455078,
+ "learning_rate": 5.382173382173382e-05,
+ "loss": 0.52,
+ "step": 527
+ },
+ {
+ "epoch": 1.934065934065934,
+ "grad_norm": 20.702608108520508,
+ "learning_rate": 5.37973137973138e-05,
+ "loss": 0.4093,
+ "step": 528
+ },
+ {
+ "epoch": 1.9377289377289377,
+ "grad_norm": 36.39994812011719,
+ "learning_rate": 5.3772893772893775e-05,
+ "loss": 1.275,
+ "step": 529
+ },
+ {
+ "epoch": 1.9413919413919414,
+ "grad_norm": 27.56822395324707,
+ "learning_rate": 5.374847374847375e-05,
+ "loss": 0.6773,
+ "step": 530
+ },
+ {
+ "epoch": 1.945054945054945,
+ "grad_norm": 26.07769012451172,
+ "learning_rate": 5.3724053724053725e-05,
+ "loss": 0.5373,
+ "step": 531
+ },
+ {
+ "epoch": 1.9487179487179487,
+ "grad_norm": 48.47615051269531,
+ "learning_rate": 5.3699633699633704e-05,
+ "loss": 1.1931,
+ "step": 532
+ },
+ {
+ "epoch": 1.9523809523809523,
+ "grad_norm": 24.416805267333984,
+ "learning_rate": 5.3675213675213675e-05,
+ "loss": 0.4523,
+ "step": 533
+ },
+ {
+ "epoch": 1.9560439560439562,
+ "grad_norm": 56.8088264465332,
+ "learning_rate": 5.3650793650793654e-05,
+ "loss": 1.8992,
+ "step": 534
+ },
+ {
+ "epoch": 1.9597069597069599,
+ "grad_norm": 36.805912017822266,
+ "learning_rate": 5.362637362637363e-05,
+ "loss": 1.0743,
+ "step": 535
+ },
+ {
+ "epoch": 1.9633699633699635,
+ "grad_norm": 17.375244140625,
+ "learning_rate": 5.3601953601953604e-05,
+ "loss": 0.3546,
+ "step": 536
+ },
+ {
+ "epoch": 1.9670329670329672,
+ "grad_norm": 35.297767639160156,
+ "learning_rate": 5.357753357753358e-05,
+ "loss": 1.4903,
+ "step": 537
+ },
+ {
+ "epoch": 1.9706959706959708,
+ "grad_norm": 38.64927673339844,
+ "learning_rate": 5.3553113553113554e-05,
+ "loss": 0.9346,
+ "step": 538
+ },
+ {
+ "epoch": 1.9743589743589745,
+ "grad_norm": 23.494552612304688,
+ "learning_rate": 5.3528693528693526e-05,
+ "loss": 0.3677,
+ "step": 539
+ },
+ {
+ "epoch": 1.978021978021978,
+ "grad_norm": 21.8272647857666,
+ "learning_rate": 5.3504273504273504e-05,
+ "loss": 0.591,
+ "step": 540
+ },
+ {
+ "epoch": 1.9816849816849818,
+ "grad_norm": 15.60590934753418,
+ "learning_rate": 5.347985347985348e-05,
+ "loss": 0.3129,
+ "step": 541
+ },
+ {
+ "epoch": 1.9853479853479854,
+ "grad_norm": 23.846555709838867,
+ "learning_rate": 5.3455433455433454e-05,
+ "loss": 0.6108,
+ "step": 542
+ },
+ {
+ "epoch": 1.989010989010989,
+ "grad_norm": 21.743024826049805,
+ "learning_rate": 5.343101343101343e-05,
+ "loss": 1.0541,
+ "step": 543
+ },
+ {
+ "epoch": 1.9926739926739927,
+ "grad_norm": 29.806121826171875,
+ "learning_rate": 5.3406593406593405e-05,
+ "loss": 0.6088,
+ "step": 544
+ },
+ {
+ "epoch": 1.9963369963369964,
+ "grad_norm": 26.778568267822266,
+ "learning_rate": 5.338217338217338e-05,
+ "loss": 0.5842,
+ "step": 545
+ },
+ {
+ "epoch": 2.0,
+ "grad_norm": 23.356237411499023,
+ "learning_rate": 5.335775335775336e-05,
+ "loss": 0.4591,
+ "step": 546
+ },
+ {
+ "epoch": 2.0036630036630036,
+ "grad_norm": 17.303443908691406,
+ "learning_rate": 5.333333333333333e-05,
+ "loss": 0.3432,
+ "step": 547
+ },
+ {
+ "epoch": 2.0073260073260073,
+ "grad_norm": 27.082172393798828,
+ "learning_rate": 5.330891330891331e-05,
+ "loss": 0.5156,
+ "step": 548
+ },
+ {
+ "epoch": 2.010989010989011,
+ "grad_norm": 26.520530700683594,
+ "learning_rate": 5.328449328449329e-05,
+ "loss": 0.3989,
+ "step": 549
+ },
+ {
+ "epoch": 2.0146520146520146,
+ "grad_norm": 23.737272262573242,
+ "learning_rate": 5.326007326007326e-05,
+ "loss": 0.5484,
+ "step": 550
+ },
+ {
+ "epoch": 2.0183150183150182,
+ "grad_norm": 24.222341537475586,
+ "learning_rate": 5.3235653235653233e-05,
+ "loss": 0.5365,
+ "step": 551
+ },
+ {
+ "epoch": 2.021978021978022,
+ "grad_norm": 29.081924438476562,
+ "learning_rate": 5.321123321123321e-05,
+ "loss": 0.6694,
+ "step": 552
+ },
+ {
+ "epoch": 2.0256410256410255,
+ "grad_norm": 32.419551849365234,
+ "learning_rate": 5.3186813186813184e-05,
+ "loss": 0.7003,
+ "step": 553
+ },
+ {
+ "epoch": 2.029304029304029,
+ "grad_norm": 42.403709411621094,
+ "learning_rate": 5.316239316239316e-05,
+ "loss": 1.5474,
+ "step": 554
+ },
+ {
+ "epoch": 2.032967032967033,
+ "grad_norm": 17.615140914916992,
+ "learning_rate": 5.313797313797314e-05,
+ "loss": 0.588,
+ "step": 555
+ },
+ {
+ "epoch": 2.0366300366300365,
+ "grad_norm": 14.864067077636719,
+ "learning_rate": 5.311355311355311e-05,
+ "loss": 0.1613,
+ "step": 556
+ },
+ {
+ "epoch": 2.04029304029304,
+ "grad_norm": 20.189815521240234,
+ "learning_rate": 5.308913308913309e-05,
+ "loss": 0.4281,
+ "step": 557
+ },
+ {
+ "epoch": 2.043956043956044,
+ "grad_norm": 28.350017547607422,
+ "learning_rate": 5.306471306471307e-05,
+ "loss": 0.6614,
+ "step": 558
+ },
+ {
+ "epoch": 2.0476190476190474,
+ "grad_norm": 19.987825393676758,
+ "learning_rate": 5.304029304029304e-05,
+ "loss": 0.6906,
+ "step": 559
+ },
+ {
+ "epoch": 2.051282051282051,
+ "grad_norm": 18.6667537689209,
+ "learning_rate": 5.301587301587302e-05,
+ "loss": 0.387,
+ "step": 560
+ },
+ {
+ "epoch": 2.0549450549450547,
+ "grad_norm": 20.930652618408203,
+ "learning_rate": 5.2991452991453e-05,
+ "loss": 0.7157,
+ "step": 561
+ },
+ {
+ "epoch": 2.0586080586080584,
+ "grad_norm": 22.05647087097168,
+ "learning_rate": 5.296703296703297e-05,
+ "loss": 0.3256,
+ "step": 562
+ },
+ {
+ "epoch": 2.062271062271062,
+ "grad_norm": 32.66161346435547,
+ "learning_rate": 5.294261294261295e-05,
+ "loss": 1.3013,
+ "step": 563
+ },
+ {
+ "epoch": 2.065934065934066,
+ "grad_norm": 37.43238067626953,
+ "learning_rate": 5.291819291819292e-05,
+ "loss": 0.186,
+ "step": 564
+ },
+ {
+ "epoch": 2.06959706959707,
+ "grad_norm": 32.39999008178711,
+ "learning_rate": 5.289377289377289e-05,
+ "loss": 0.8047,
+ "step": 565
+ },
+ {
+ "epoch": 2.0732600732600734,
+ "grad_norm": 29.727481842041016,
+ "learning_rate": 5.286935286935287e-05,
+ "loss": 0.662,
+ "step": 566
+ },
+ {
+ "epoch": 2.076923076923077,
+ "grad_norm": 16.536264419555664,
+ "learning_rate": 5.284493284493285e-05,
+ "loss": 0.4,
+ "step": 567
+ },
+ {
+ "epoch": 2.0805860805860807,
+ "grad_norm": 23.41500473022461,
+ "learning_rate": 5.282051282051282e-05,
+ "loss": 0.4945,
+ "step": 568
+ },
+ {
+ "epoch": 2.0842490842490844,
+ "grad_norm": 48.842864990234375,
+ "learning_rate": 5.27960927960928e-05,
+ "loss": 0.7584,
+ "step": 569
+ },
+ {
+ "epoch": 2.087912087912088,
+ "grad_norm": 60.06027603149414,
+ "learning_rate": 5.277167277167277e-05,
+ "loss": 0.7179,
+ "step": 570
+ },
+ {
+ "epoch": 2.0915750915750917,
+ "grad_norm": 59.2591552734375,
+ "learning_rate": 5.274725274725275e-05,
+ "loss": 0.4883,
+ "step": 571
+ },
+ {
+ "epoch": 2.0952380952380953,
+ "grad_norm": 14.527932167053223,
+ "learning_rate": 5.272283272283273e-05,
+ "loss": 0.2811,
+ "step": 572
+ },
+ {
+ "epoch": 2.098901098901099,
+ "grad_norm": 16.2915096282959,
+ "learning_rate": 5.26984126984127e-05,
+ "loss": 0.2524,
+ "step": 573
+ },
+ {
+ "epoch": 2.1025641025641026,
+ "grad_norm": 28.938081741333008,
+ "learning_rate": 5.267399267399268e-05,
+ "loss": 0.5138,
+ "step": 574
+ },
+ {
+ "epoch": 2.1062271062271063,
+ "grad_norm": 27.541440963745117,
+ "learning_rate": 5.2649572649572655e-05,
+ "loss": 0.278,
+ "step": 575
+ },
+ {
+ "epoch": 2.10989010989011,
+ "grad_norm": 23.179025650024414,
+ "learning_rate": 5.262515262515263e-05,
+ "loss": 0.1881,
+ "step": 576
+ },
+ {
+ "epoch": 2.1135531135531136,
+ "grad_norm": 42.55375671386719,
+ "learning_rate": 5.26007326007326e-05,
+ "loss": 0.7882,
+ "step": 577
+ },
+ {
+ "epoch": 2.1172161172161172,
+ "grad_norm": 8.902749061584473,
+ "learning_rate": 5.257631257631258e-05,
+ "loss": 0.0611,
+ "step": 578
+ },
+ {
+ "epoch": 2.120879120879121,
+ "grad_norm": 19.483346939086914,
+ "learning_rate": 5.255189255189255e-05,
+ "loss": 0.0978,
+ "step": 579
+ },
+ {
+ "epoch": 2.1245421245421245,
+ "grad_norm": 13.898221969604492,
+ "learning_rate": 5.252747252747253e-05,
+ "loss": 0.0797,
+ "step": 580
+ },
+ {
+ "epoch": 2.128205128205128,
+ "grad_norm": 53.42538833618164,
+ "learning_rate": 5.2503052503052506e-05,
+ "loss": 0.9066,
+ "step": 581
+ },
+ {
+ "epoch": 2.131868131868132,
+ "grad_norm": 38.467891693115234,
+ "learning_rate": 5.247863247863248e-05,
+ "loss": 0.3272,
+ "step": 582
+ },
+ {
+ "epoch": 2.1355311355311355,
+ "grad_norm": 26.421035766601562,
+ "learning_rate": 5.2454212454212456e-05,
+ "loss": 0.6537,
+ "step": 583
+ },
+ {
+ "epoch": 2.139194139194139,
+ "grad_norm": 32.80412292480469,
+ "learning_rate": 5.2429792429792434e-05,
+ "loss": 1.1225,
+ "step": 584
+ },
+ {
+ "epoch": 2.142857142857143,
+ "grad_norm": 26.87016487121582,
+ "learning_rate": 5.2405372405372406e-05,
+ "loss": 0.5749,
+ "step": 585
+ },
+ {
+ "epoch": 2.1465201465201464,
+ "grad_norm": 34.75699234008789,
+ "learning_rate": 5.2380952380952384e-05,
+ "loss": 0.6926,
+ "step": 586
+ },
+ {
+ "epoch": 2.15018315018315,
+ "grad_norm": 61.76310348510742,
+ "learning_rate": 5.235653235653236e-05,
+ "loss": 0.9029,
+ "step": 587
+ },
+ {
+ "epoch": 2.1538461538461537,
+ "grad_norm": 40.86505126953125,
+ "learning_rate": 5.2332112332112335e-05,
+ "loss": 0.5169,
+ "step": 588
+ },
+ {
+ "epoch": 2.1575091575091574,
+ "grad_norm": 16.05042839050293,
+ "learning_rate": 5.230769230769231e-05,
+ "loss": 0.5211,
+ "step": 589
+ },
+ {
+ "epoch": 2.161172161172161,
+ "grad_norm": 19.56302261352539,
+ "learning_rate": 5.2283272283272285e-05,
+ "loss": 0.5737,
+ "step": 590
+ },
+ {
+ "epoch": 2.1648351648351647,
+ "grad_norm": 22.311508178710938,
+ "learning_rate": 5.2258852258852256e-05,
+ "loss": 0.4223,
+ "step": 591
+ },
+ {
+ "epoch": 2.1684981684981683,
+ "grad_norm": 21.059213638305664,
+ "learning_rate": 5.2234432234432235e-05,
+ "loss": 0.2285,
+ "step": 592
+ },
+ {
+ "epoch": 2.172161172161172,
+ "grad_norm": 28.82351303100586,
+ "learning_rate": 5.221001221001221e-05,
+ "loss": 0.8438,
+ "step": 593
+ },
+ {
+ "epoch": 2.1758241758241756,
+ "grad_norm": 14.425333023071289,
+ "learning_rate": 5.2185592185592185e-05,
+ "loss": 0.1765,
+ "step": 594
+ },
+ {
+ "epoch": 2.1794871794871793,
+ "grad_norm": 16.967479705810547,
+ "learning_rate": 5.2161172161172163e-05,
+ "loss": 0.2465,
+ "step": 595
+ },
+ {
+ "epoch": 2.183150183150183,
+ "grad_norm": 40.79065704345703,
+ "learning_rate": 5.2136752136752135e-05,
+ "loss": 0.6077,
+ "step": 596
+ },
+ {
+ "epoch": 2.186813186813187,
+ "grad_norm": 22.434715270996094,
+ "learning_rate": 5.2112332112332114e-05,
+ "loss": 0.3748,
+ "step": 597
+ },
+ {
+ "epoch": 2.1904761904761907,
+ "grad_norm": 32.18471908569336,
+ "learning_rate": 5.208791208791209e-05,
+ "loss": 0.5163,
+ "step": 598
+ },
+ {
+ "epoch": 2.1941391941391943,
+ "grad_norm": 20.43740463256836,
+ "learning_rate": 5.2063492063492064e-05,
+ "loss": 0.4116,
+ "step": 599
+ },
+ {
+ "epoch": 2.197802197802198,
+ "grad_norm": 6.528069496154785,
+ "learning_rate": 5.203907203907204e-05,
+ "loss": 0.065,
+ "step": 600
+ },
+ {
+ "epoch": 2.2014652014652016,
+ "grad_norm": 35.0635871887207,
+ "learning_rate": 5.201465201465202e-05,
+ "loss": 1.2288,
+ "step": 601
+ },
+ {
+ "epoch": 2.2051282051282053,
+ "grad_norm": 23.499767303466797,
+ "learning_rate": 5.199023199023199e-05,
+ "loss": 0.49,
+ "step": 602
+ },
+ {
+ "epoch": 2.208791208791209,
+ "grad_norm": 20.234952926635742,
+ "learning_rate": 5.1965811965811964e-05,
+ "loss": 0.231,
+ "step": 603
+ },
+ {
+ "epoch": 2.2124542124542126,
+ "grad_norm": 9.268828392028809,
+ "learning_rate": 5.194139194139194e-05,
+ "loss": 0.0732,
+ "step": 604
+ },
+ {
+ "epoch": 2.2161172161172162,
+ "grad_norm": 52.60474395751953,
+ "learning_rate": 5.1916971916971914e-05,
+ "loss": 0.8766,
+ "step": 605
+ },
+ {
+ "epoch": 2.21978021978022,
+ "grad_norm": 41.86642074584961,
+ "learning_rate": 5.189255189255189e-05,
+ "loss": 0.4743,
+ "step": 606
+ },
+ {
+ "epoch": 2.2234432234432235,
+ "grad_norm": 30.304580688476562,
+ "learning_rate": 5.186813186813187e-05,
+ "loss": 0.4412,
+ "step": 607
+ },
+ {
+ "epoch": 2.227106227106227,
+ "grad_norm": 27.26057243347168,
+ "learning_rate": 5.184371184371184e-05,
+ "loss": 0.3496,
+ "step": 608
+ },
+ {
+ "epoch": 2.230769230769231,
+ "grad_norm": 40.55131149291992,
+ "learning_rate": 5.181929181929182e-05,
+ "loss": 0.7097,
+ "step": 609
+ },
+ {
+ "epoch": 2.2344322344322345,
+ "grad_norm": 61.97871017456055,
+ "learning_rate": 5.17948717948718e-05,
+ "loss": 1.3686,
+ "step": 610
+ },
+ {
+ "epoch": 2.238095238095238,
+ "grad_norm": 38.211700439453125,
+ "learning_rate": 5.177045177045177e-05,
+ "loss": 0.565,
+ "step": 611
+ },
+ {
+ "epoch": 2.241758241758242,
+ "grad_norm": 20.10716438293457,
+ "learning_rate": 5.174603174603175e-05,
+ "loss": 0.3468,
+ "step": 612
+ },
+ {
+ "epoch": 2.2454212454212454,
+ "grad_norm": 23.96891975402832,
+ "learning_rate": 5.172161172161173e-05,
+ "loss": 0.2295,
+ "step": 613
+ },
+ {
+ "epoch": 2.249084249084249,
+ "grad_norm": 10.14421272277832,
+ "learning_rate": 5.16971916971917e-05,
+ "loss": 0.0943,
+ "step": 614
+ },
+ {
+ "epoch": 2.2527472527472527,
+ "grad_norm": 15.786056518554688,
+ "learning_rate": 5.167277167277167e-05,
+ "loss": 0.1213,
+ "step": 615
+ },
+ {
+ "epoch": 2.2564102564102564,
+ "grad_norm": 20.907663345336914,
+ "learning_rate": 5.164835164835165e-05,
+ "loss": 0.235,
+ "step": 616
+ },
+ {
+ "epoch": 2.26007326007326,
+ "grad_norm": 32.149600982666016,
+ "learning_rate": 5.162393162393162e-05,
+ "loss": 0.4807,
+ "step": 617
+ },
+ {
+ "epoch": 2.2637362637362637,
+ "grad_norm": 33.965518951416016,
+ "learning_rate": 5.15995115995116e-05,
+ "loss": 0.4517,
+ "step": 618
+ },
+ {
+ "epoch": 2.2673992673992673,
+ "grad_norm": 49.98363494873047,
+ "learning_rate": 5.157509157509158e-05,
+ "loss": 0.6434,
+ "step": 619
+ },
+ {
+ "epoch": 2.271062271062271,
+ "grad_norm": 14.035831451416016,
+ "learning_rate": 5.155067155067155e-05,
+ "loss": 0.1117,
+ "step": 620
+ },
+ {
+ "epoch": 2.2747252747252746,
+ "grad_norm": 28.84484100341797,
+ "learning_rate": 5.152625152625153e-05,
+ "loss": 0.8002,
+ "step": 621
+ },
+ {
+ "epoch": 2.2783882783882783,
+ "grad_norm": 41.59181594848633,
+ "learning_rate": 5.15018315018315e-05,
+ "loss": 0.4465,
+ "step": 622
+ },
+ {
+ "epoch": 2.282051282051282,
+ "grad_norm": 33.10573196411133,
+ "learning_rate": 5.147741147741148e-05,
+ "loss": 0.5795,
+ "step": 623
+ },
+ {
+ "epoch": 2.2857142857142856,
+ "grad_norm": 34.79928970336914,
+ "learning_rate": 5.145299145299146e-05,
+ "loss": 0.3135,
+ "step": 624
+ },
+ {
+ "epoch": 2.2893772893772892,
+ "grad_norm": 18.095544815063477,
+ "learning_rate": 5.142857142857143e-05,
+ "loss": 0.0961,
+ "step": 625
+ },
+ {
+ "epoch": 2.293040293040293,
+ "grad_norm": 16.55453872680664,
+ "learning_rate": 5.140415140415141e-05,
+ "loss": 0.0868,
+ "step": 626
+ },
+ {
+ "epoch": 2.2967032967032965,
+ "grad_norm": 42.18946075439453,
+ "learning_rate": 5.1379731379731386e-05,
+ "loss": 0.8892,
+ "step": 627
+ },
+ {
+ "epoch": 2.3003663003663,
+ "grad_norm": 54.753448486328125,
+ "learning_rate": 5.135531135531135e-05,
+ "loss": 0.833,
+ "step": 628
+ },
+ {
+ "epoch": 2.304029304029304,
+ "grad_norm": 27.723228454589844,
+ "learning_rate": 5.133089133089133e-05,
+ "loss": 0.2744,
+ "step": 629
+ },
+ {
+ "epoch": 2.3076923076923075,
+ "grad_norm": 28.53034019470215,
+ "learning_rate": 5.130647130647131e-05,
+ "loss": 0.1696,
+ "step": 630
+ },
+ {
+ "epoch": 2.311355311355311,
+ "grad_norm": 65.4127426147461,
+ "learning_rate": 5.128205128205128e-05,
+ "loss": 0.9019,
+ "step": 631
+ },
+ {
+ "epoch": 2.315018315018315,
+ "grad_norm": 22.794870376586914,
+ "learning_rate": 5.125763125763126e-05,
+ "loss": 0.1987,
+ "step": 632
+ },
+ {
+ "epoch": 2.3186813186813184,
+ "grad_norm": 29.870113372802734,
+ "learning_rate": 5.1233211233211236e-05,
+ "loss": 0.4816,
+ "step": 633
+ },
+ {
+ "epoch": 2.3223443223443225,
+ "grad_norm": 38.91164779663086,
+ "learning_rate": 5.120879120879121e-05,
+ "loss": 0.7424,
+ "step": 634
+ },
+ {
+ "epoch": 2.326007326007326,
+ "grad_norm": 36.57811737060547,
+ "learning_rate": 5.1184371184371186e-05,
+ "loss": 1.1365,
+ "step": 635
+ },
+ {
+ "epoch": 2.32967032967033,
+ "grad_norm": 31.59128189086914,
+ "learning_rate": 5.1159951159951165e-05,
+ "loss": 0.6167,
+ "step": 636
+ },
+ {
+ "epoch": 2.3333333333333335,
+ "grad_norm": 25.956003189086914,
+ "learning_rate": 5.1135531135531136e-05,
+ "loss": 0.8808,
+ "step": 637
+ },
+ {
+ "epoch": 2.336996336996337,
+ "grad_norm": 38.18582534790039,
+ "learning_rate": 5.1111111111111115e-05,
+ "loss": 0.9417,
+ "step": 638
+ },
+ {
+ "epoch": 2.340659340659341,
+ "grad_norm": 27.436229705810547,
+ "learning_rate": 5.108669108669109e-05,
+ "loss": 0.7539,
+ "step": 639
+ },
+ {
+ "epoch": 2.3443223443223444,
+ "grad_norm": 40.86305618286133,
+ "learning_rate": 5.1062271062271065e-05,
+ "loss": 2.126,
+ "step": 640
+ },
+ {
+ "epoch": 2.347985347985348,
+ "grad_norm": 22.224748611450195,
+ "learning_rate": 5.103785103785104e-05,
+ "loss": 0.9958,
+ "step": 641
+ },
+ {
+ "epoch": 2.3516483516483517,
+ "grad_norm": 19.915552139282227,
+ "learning_rate": 5.1013431013431015e-05,
+ "loss": 1.1045,
+ "step": 642
+ },
+ {
+ "epoch": 2.3553113553113554,
+ "grad_norm": 17.045989990234375,
+ "learning_rate": 5.098901098901099e-05,
+ "loss": 0.8906,
+ "step": 643
+ },
+ {
+ "epoch": 2.358974358974359,
+ "grad_norm": 22.106670379638672,
+ "learning_rate": 5.0964590964590965e-05,
+ "loss": 0.9856,
+ "step": 644
+ },
+ {
+ "epoch": 2.3626373626373627,
+ "grad_norm": 17.583837509155273,
+ "learning_rate": 5.0940170940170944e-05,
+ "loss": 0.8328,
+ "step": 645
+ },
+ {
+ "epoch": 2.3663003663003663,
+ "grad_norm": 57.61167526245117,
+ "learning_rate": 5.0915750915750915e-05,
+ "loss": 0.578,
+ "step": 646
+ },
+ {
+ "epoch": 2.36996336996337,
+ "grad_norm": 13.941128730773926,
+ "learning_rate": 5.0891330891330894e-05,
+ "loss": 0.5892,
+ "step": 647
+ },
+ {
+ "epoch": 2.3736263736263736,
+ "grad_norm": 22.38715171813965,
+ "learning_rate": 5.0866910866910866e-05,
+ "loss": 0.7608,
+ "step": 648
+ },
+ {
+ "epoch": 2.3772893772893773,
+ "grad_norm": 22.42316436767578,
+ "learning_rate": 5.0842490842490844e-05,
+ "loss": 0.7923,
+ "step": 649
+ },
+ {
+ "epoch": 2.380952380952381,
+ "grad_norm": 32.75740432739258,
+ "learning_rate": 5.081807081807082e-05,
+ "loss": 1.0798,
+ "step": 650
+ },
+ {
+ "epoch": 2.3846153846153846,
+ "grad_norm": 19.295289993286133,
+ "learning_rate": 5.0793650793650794e-05,
+ "loss": 0.4898,
+ "step": 651
+ },
+ {
+ "epoch": 2.3882783882783882,
+ "grad_norm": 25.849227905273438,
+ "learning_rate": 5.076923076923077e-05,
+ "loss": 0.5557,
+ "step": 652
+ },
+ {
+ "epoch": 2.391941391941392,
+ "grad_norm": 21.321088790893555,
+ "learning_rate": 5.074481074481075e-05,
+ "loss": 0.2743,
+ "step": 653
+ },
+ {
+ "epoch": 2.3956043956043955,
+ "grad_norm": 28.795917510986328,
+ "learning_rate": 5.0720390720390716e-05,
+ "loss": 0.7039,
+ "step": 654
+ },
+ {
+ "epoch": 2.399267399267399,
+ "grad_norm": 19.86751937866211,
+ "learning_rate": 5.0695970695970694e-05,
+ "loss": 0.3155,
+ "step": 655
+ },
+ {
+ "epoch": 2.402930402930403,
+ "grad_norm": 33.3828010559082,
+ "learning_rate": 5.067155067155067e-05,
+ "loss": 1.0696,
+ "step": 656
+ },
+ {
+ "epoch": 2.4065934065934065,
+ "grad_norm": 37.38752746582031,
+ "learning_rate": 5.0647130647130645e-05,
+ "loss": 0.8123,
+ "step": 657
+ },
+ {
+ "epoch": 2.41025641025641,
+ "grad_norm": 29.22795867919922,
+ "learning_rate": 5.062271062271062e-05,
+ "loss": 0.9515,
+ "step": 658
+ },
+ {
+ "epoch": 2.413919413919414,
+ "grad_norm": 41.129981994628906,
+ "learning_rate": 5.05982905982906e-05,
+ "loss": 1.1329,
+ "step": 659
+ },
+ {
+ "epoch": 2.4175824175824174,
+ "grad_norm": 40.985042572021484,
+ "learning_rate": 5.057387057387057e-05,
+ "loss": 0.675,
+ "step": 660
+ },
+ {
+ "epoch": 2.421245421245421,
+ "grad_norm": 33.49393844604492,
+ "learning_rate": 5.054945054945055e-05,
+ "loss": 0.9679,
+ "step": 661
+ },
+ {
+ "epoch": 2.4249084249084247,
+ "grad_norm": 28.741533279418945,
+ "learning_rate": 5.052503052503053e-05,
+ "loss": 0.7928,
+ "step": 662
+ },
+ {
+ "epoch": 2.4285714285714284,
+ "grad_norm": 28.89700698852539,
+ "learning_rate": 5.05006105006105e-05,
+ "loss": 0.7594,
+ "step": 663
+ },
+ {
+ "epoch": 2.4322344322344325,
+ "grad_norm": 4.59797477722168,
+ "learning_rate": 5.047619047619048e-05,
+ "loss": 0.0584,
+ "step": 664
+ },
+ {
+ "epoch": 2.435897435897436,
+ "grad_norm": 29.852828979492188,
+ "learning_rate": 5.045177045177046e-05,
+ "loss": 0.614,
+ "step": 665
+ },
+ {
+ "epoch": 2.4395604395604398,
+ "grad_norm": 15.132670402526855,
+ "learning_rate": 5.042735042735043e-05,
+ "loss": 0.2353,
+ "step": 666
+ },
+ {
+ "epoch": 2.4432234432234434,
+ "grad_norm": 23.85403060913086,
+ "learning_rate": 5.04029304029304e-05,
+ "loss": 0.9065,
+ "step": 667
+ },
+ {
+ "epoch": 2.446886446886447,
+ "grad_norm": 12.384196281433105,
+ "learning_rate": 5.037851037851038e-05,
+ "loss": 0.2065,
+ "step": 668
+ },
+ {
+ "epoch": 2.4505494505494507,
+ "grad_norm": 18.347129821777344,
+ "learning_rate": 5.035409035409035e-05,
+ "loss": 0.647,
+ "step": 669
+ },
+ {
+ "epoch": 2.4542124542124544,
+ "grad_norm": 18.645936965942383,
+ "learning_rate": 5.032967032967033e-05,
+ "loss": 0.2072,
+ "step": 670
+ },
+ {
+ "epoch": 2.457875457875458,
+ "grad_norm": 9.493071556091309,
+ "learning_rate": 5.03052503052503e-05,
+ "loss": 0.1805,
+ "step": 671
+ },
+ {
+ "epoch": 2.4615384615384617,
+ "grad_norm": 18.552539825439453,
+ "learning_rate": 5.028083028083028e-05,
+ "loss": 0.4078,
+ "step": 672
+ },
+ {
+ "epoch": 2.4652014652014653,
+ "grad_norm": 21.735048294067383,
+ "learning_rate": 5.025641025641026e-05,
+ "loss": 0.4231,
+ "step": 673
+ },
+ {
+ "epoch": 2.468864468864469,
+ "grad_norm": 54.32040023803711,
+ "learning_rate": 5.023199023199023e-05,
+ "loss": 1.3927,
+ "step": 674
+ },
+ {
+ "epoch": 2.4725274725274726,
+ "grad_norm": 26.955970764160156,
+ "learning_rate": 5.020757020757021e-05,
+ "loss": 0.6899,
+ "step": 675
+ },
+ {
+ "epoch": 2.4761904761904763,
+ "grad_norm": 43.423526763916016,
+ "learning_rate": 5.018315018315019e-05,
+ "loss": 1.2084,
+ "step": 676
+ },
+ {
+ "epoch": 2.47985347985348,
+ "grad_norm": 35.98548126220703,
+ "learning_rate": 5.015873015873016e-05,
+ "loss": 1.5047,
+ "step": 677
+ },
+ {
+ "epoch": 2.4835164835164836,
+ "grad_norm": 22.593570709228516,
+ "learning_rate": 5.013431013431014e-05,
+ "loss": 0.6918,
+ "step": 678
+ },
+ {
+ "epoch": 2.4871794871794872,
+ "grad_norm": 21.29257583618164,
+ "learning_rate": 5.0109890109890116e-05,
+ "loss": 0.3578,
+ "step": 679
+ },
+ {
+ "epoch": 2.490842490842491,
+ "grad_norm": 21.672088623046875,
+ "learning_rate": 5.008547008547008e-05,
+ "loss": 0.7757,
+ "step": 680
+ },
+ {
+ "epoch": 2.4945054945054945,
+ "grad_norm": 9.625850677490234,
+ "learning_rate": 5.006105006105006e-05,
+ "loss": 0.1329,
+ "step": 681
+ },
+ {
+ "epoch": 2.498168498168498,
+ "grad_norm": 16.92123794555664,
+ "learning_rate": 5.003663003663004e-05,
+ "loss": 0.5599,
+ "step": 682
+ },
+ {
+ "epoch": 2.501831501831502,
+ "grad_norm": 15.665925025939941,
+ "learning_rate": 5.001221001221001e-05,
+ "loss": 0.3099,
+ "step": 683
+ },
+ {
+ "epoch": 2.5054945054945055,
+ "grad_norm": 21.316635131835938,
+ "learning_rate": 4.998778998778999e-05,
+ "loss": 0.5746,
+ "step": 684
+ },
+ {
+ "epoch": 2.509157509157509,
+ "grad_norm": 24.99594497680664,
+ "learning_rate": 4.996336996336997e-05,
+ "loss": 1.1274,
+ "step": 685
+ },
+ {
+ "epoch": 2.5128205128205128,
+ "grad_norm": 29.795175552368164,
+ "learning_rate": 4.993894993894994e-05,
+ "loss": 0.9991,
+ "step": 686
+ },
+ {
+ "epoch": 2.5164835164835164,
+ "grad_norm": 16.337533950805664,
+ "learning_rate": 4.991452991452992e-05,
+ "loss": 0.4101,
+ "step": 687
+ },
+ {
+ "epoch": 2.52014652014652,
+ "grad_norm": 20.065715789794922,
+ "learning_rate": 4.9890109890109895e-05,
+ "loss": 0.7786,
+ "step": 688
+ },
+ {
+ "epoch": 2.5238095238095237,
+ "grad_norm": 19.341567993164062,
+ "learning_rate": 4.986568986568987e-05,
+ "loss": 0.4989,
+ "step": 689
+ },
+ {
+ "epoch": 2.5274725274725274,
+ "grad_norm": 14.688420295715332,
+ "learning_rate": 4.9841269841269845e-05,
+ "loss": 0.4081,
+ "step": 690
+ },
+ {
+ "epoch": 2.531135531135531,
+ "grad_norm": 39.346012115478516,
+ "learning_rate": 4.9816849816849824e-05,
+ "loss": 1.7919,
+ "step": 691
+ },
+ {
+ "epoch": 2.5347985347985347,
+ "grad_norm": 21.353286743164062,
+ "learning_rate": 4.9792429792429796e-05,
+ "loss": 0.698,
+ "step": 692
+ },
+ {
+ "epoch": 2.5384615384615383,
+ "grad_norm": 35.96653366088867,
+ "learning_rate": 4.976800976800977e-05,
+ "loss": 1.6584,
+ "step": 693
+ },
+ {
+ "epoch": 2.542124542124542,
+ "grad_norm": 19.14348793029785,
+ "learning_rate": 4.9743589743589746e-05,
+ "loss": 0.885,
+ "step": 694
+ },
+ {
+ "epoch": 2.5457875457875456,
+ "grad_norm": 9.260897636413574,
+ "learning_rate": 4.971916971916972e-05,
+ "loss": 0.1629,
+ "step": 695
+ },
+ {
+ "epoch": 2.5494505494505493,
+ "grad_norm": 18.497526168823242,
+ "learning_rate": 4.9694749694749696e-05,
+ "loss": 0.7242,
+ "step": 696
+ },
+ {
+ "epoch": 2.553113553113553,
+ "grad_norm": 8.879841804504395,
+ "learning_rate": 4.967032967032967e-05,
+ "loss": 0.1302,
+ "step": 697
+ },
+ {
+ "epoch": 2.5567765567765566,
+ "grad_norm": 26.34065818786621,
+ "learning_rate": 4.9645909645909646e-05,
+ "loss": 0.7333,
+ "step": 698
+ },
+ {
+ "epoch": 2.5604395604395602,
+ "grad_norm": 15.10546588897705,
+ "learning_rate": 4.9621489621489624e-05,
+ "loss": 0.3119,
+ "step": 699
+ },
+ {
+ "epoch": 2.564102564102564,
+ "grad_norm": 10.68095874786377,
+ "learning_rate": 4.9597069597069596e-05,
+ "loss": 0.2505,
+ "step": 700
+ },
+ {
+ "epoch": 2.5677655677655675,
+ "grad_norm": 29.08888053894043,
+ "learning_rate": 4.9572649572649575e-05,
+ "loss": 0.4286,
+ "step": 701
+ },
+ {
+ "epoch": 2.571428571428571,
+ "grad_norm": 29.939416885375977,
+ "learning_rate": 4.954822954822955e-05,
+ "loss": 1.1529,
+ "step": 702
+ },
+ {
+ "epoch": 2.575091575091575,
+ "grad_norm": 32.78864669799805,
+ "learning_rate": 4.9523809523809525e-05,
+ "loss": 0.9834,
+ "step": 703
+ },
+ {
+ "epoch": 2.578754578754579,
+ "grad_norm": 13.99082088470459,
+ "learning_rate": 4.94993894993895e-05,
+ "loss": 0.1934,
+ "step": 704
+ },
+ {
+ "epoch": 2.5824175824175826,
+ "grad_norm": 31.696718215942383,
+ "learning_rate": 4.9474969474969475e-05,
+ "loss": 0.6881,
+ "step": 705
+ },
+ {
+ "epoch": 2.586080586080586,
+ "grad_norm": 39.26205062866211,
+ "learning_rate": 4.9450549450549446e-05,
+ "loss": 0.573,
+ "step": 706
+ },
+ {
+ "epoch": 2.58974358974359,
+ "grad_norm": 42.08647918701172,
+ "learning_rate": 4.9426129426129425e-05,
+ "loss": 1.5935,
+ "step": 707
+ },
+ {
+ "epoch": 2.5934065934065935,
+ "grad_norm": 24.630651473999023,
+ "learning_rate": 4.94017094017094e-05,
+ "loss": 0.7016,
+ "step": 708
+ },
+ {
+ "epoch": 2.597069597069597,
+ "grad_norm": 35.33428192138672,
+ "learning_rate": 4.9377289377289375e-05,
+ "loss": 0.9646,
+ "step": 709
+ },
+ {
+ "epoch": 2.600732600732601,
+ "grad_norm": 21.643918991088867,
+ "learning_rate": 4.9352869352869353e-05,
+ "loss": 0.3679,
+ "step": 710
+ },
+ {
+ "epoch": 2.6043956043956045,
+ "grad_norm": 10.6254301071167,
+ "learning_rate": 4.932844932844933e-05,
+ "loss": 0.1059,
+ "step": 711
+ },
+ {
+ "epoch": 2.608058608058608,
+ "grad_norm": 23.43462562561035,
+ "learning_rate": 4.9304029304029304e-05,
+ "loss": 0.5128,
+ "step": 712
+ },
+ {
+ "epoch": 2.6117216117216118,
+ "grad_norm": 25.748422622680664,
+ "learning_rate": 4.927960927960928e-05,
+ "loss": 0.6154,
+ "step": 713
+ },
+ {
+ "epoch": 2.6153846153846154,
+ "grad_norm": 23.163209915161133,
+ "learning_rate": 4.925518925518926e-05,
+ "loss": 0.3978,
+ "step": 714
+ },
+ {
+ "epoch": 2.619047619047619,
+ "grad_norm": 22.306194305419922,
+ "learning_rate": 4.923076923076923e-05,
+ "loss": 0.3984,
+ "step": 715
+ },
+ {
+ "epoch": 2.6227106227106227,
+ "grad_norm": 48.16558074951172,
+ "learning_rate": 4.920634920634921e-05,
+ "loss": 0.9568,
+ "step": 716
+ },
+ {
+ "epoch": 2.6263736263736264,
+ "grad_norm": 48.76753234863281,
+ "learning_rate": 4.918192918192919e-05,
+ "loss": 0.6579,
+ "step": 717
+ },
+ {
+ "epoch": 2.63003663003663,
+ "grad_norm": 57.938720703125,
+ "learning_rate": 4.9157509157509154e-05,
+ "loss": 1.0926,
+ "step": 718
+ },
+ {
+ "epoch": 2.6336996336996337,
+ "grad_norm": 25.495267868041992,
+ "learning_rate": 4.913308913308913e-05,
+ "loss": 0.3717,
+ "step": 719
+ },
+ {
+ "epoch": 2.6373626373626373,
+ "grad_norm": 20.054609298706055,
+ "learning_rate": 4.910866910866911e-05,
+ "loss": 0.4502,
+ "step": 720
+ },
+ {
+ "epoch": 2.641025641025641,
+ "grad_norm": 23.096263885498047,
+ "learning_rate": 4.908424908424908e-05,
+ "loss": 0.2794,
+ "step": 721
+ },
+ {
+ "epoch": 2.6446886446886446,
+ "grad_norm": 6.073278903961182,
+ "learning_rate": 4.905982905982906e-05,
+ "loss": 0.0519,
+ "step": 722
+ },
+ {
+ "epoch": 2.6483516483516483,
+ "grad_norm": 38.562618255615234,
+ "learning_rate": 4.903540903540903e-05,
+ "loss": 0.8839,
+ "step": 723
+ },
+ {
+ "epoch": 2.652014652014652,
+ "grad_norm": 23.544757843017578,
+ "learning_rate": 4.901098901098901e-05,
+ "loss": 0.3935,
+ "step": 724
+ },
+ {
+ "epoch": 2.6556776556776556,
+ "grad_norm": 22.844032287597656,
+ "learning_rate": 4.898656898656899e-05,
+ "loss": 0.2428,
+ "step": 725
+ },
+ {
+ "epoch": 2.659340659340659,
+ "grad_norm": 11.537687301635742,
+ "learning_rate": 4.896214896214896e-05,
+ "loss": 0.1538,
+ "step": 726
+ },
+ {
+ "epoch": 2.663003663003663,
+ "grad_norm": 59.37337112426758,
+ "learning_rate": 4.893772893772894e-05,
+ "loss": 1.181,
+ "step": 727
+ },
+ {
+ "epoch": 2.6666666666666665,
+ "grad_norm": 22.206314086914062,
+ "learning_rate": 4.891330891330892e-05,
+ "loss": 0.4044,
+ "step": 728
+ },
+ {
+ "epoch": 2.67032967032967,
+ "grad_norm": 27.44620132446289,
+ "learning_rate": 4.888888888888889e-05,
+ "loss": 0.585,
+ "step": 729
+ },
+ {
+ "epoch": 2.6739926739926743,
+ "grad_norm": 35.70675277709961,
+ "learning_rate": 4.886446886446887e-05,
+ "loss": 0.6853,
+ "step": 730
+ },
+ {
+ "epoch": 2.677655677655678,
+ "grad_norm": 25.653356552124023,
+ "learning_rate": 4.884004884004884e-05,
+ "loss": 0.6143,
+ "step": 731
+ },
+ {
+ "epoch": 2.6813186813186816,
+ "grad_norm": 24.242090225219727,
+ "learning_rate": 4.881562881562881e-05,
+ "loss": 0.4365,
+ "step": 732
+ },
+ {
+ "epoch": 2.684981684981685,
+ "grad_norm": 25.621902465820312,
+ "learning_rate": 4.879120879120879e-05,
+ "loss": 0.6644,
+ "step": 733
+ },
+ {
+ "epoch": 2.688644688644689,
+ "grad_norm": 14.14786434173584,
+ "learning_rate": 4.876678876678877e-05,
+ "loss": 0.4117,
+ "step": 734
+ },
+ {
+ "epoch": 2.6923076923076925,
+ "grad_norm": 37.98638916015625,
+ "learning_rate": 4.874236874236874e-05,
+ "loss": 1.0452,
+ "step": 735
+ },
+ {
+ "epoch": 2.695970695970696,
+ "grad_norm": 23.186302185058594,
+ "learning_rate": 4.871794871794872e-05,
+ "loss": 0.2642,
+ "step": 736
+ },
+ {
+ "epoch": 2.6996336996337,
+ "grad_norm": 27.23651695251465,
+ "learning_rate": 4.86935286935287e-05,
+ "loss": 0.393,
+ "step": 737
+ },
+ {
+ "epoch": 2.7032967032967035,
+ "grad_norm": 36.44395446777344,
+ "learning_rate": 4.866910866910867e-05,
+ "loss": 1.1309,
+ "step": 738
+ },
+ {
+ "epoch": 2.706959706959707,
+ "grad_norm": 9.733710289001465,
+ "learning_rate": 4.864468864468865e-05,
+ "loss": 0.2466,
+ "step": 739
+ },
+ {
+ "epoch": 2.7106227106227108,
+ "grad_norm": 24.727527618408203,
+ "learning_rate": 4.8620268620268626e-05,
+ "loss": 0.46,
+ "step": 740
+ },
+ {
+ "epoch": 2.7142857142857144,
+ "grad_norm": 15.122056007385254,
+ "learning_rate": 4.85958485958486e-05,
+ "loss": 0.3122,
+ "step": 741
+ },
+ {
+ "epoch": 2.717948717948718,
+ "grad_norm": 24.059120178222656,
+ "learning_rate": 4.8571428571428576e-05,
+ "loss": 0.2359,
+ "step": 742
+ },
+ {
+ "epoch": 2.7216117216117217,
+ "grad_norm": 7.659122467041016,
+ "learning_rate": 4.8547008547008554e-05,
+ "loss": 0.1212,
+ "step": 743
+ },
+ {
+ "epoch": 2.7252747252747254,
+ "grad_norm": 27.002117156982422,
+ "learning_rate": 4.852258852258852e-05,
+ "loss": 0.7593,
+ "step": 744
+ },
+ {
+ "epoch": 2.728937728937729,
+ "grad_norm": 6.3852009773254395,
+ "learning_rate": 4.84981684981685e-05,
+ "loss": 0.0644,
+ "step": 745
+ },
+ {
+ "epoch": 2.7326007326007327,
+ "grad_norm": 25.574190139770508,
+ "learning_rate": 4.8473748473748476e-05,
+ "loss": 0.7012,
+ "step": 746
+ },
+ {
+ "epoch": 2.7362637362637363,
+ "grad_norm": 15.720768928527832,
+ "learning_rate": 4.844932844932845e-05,
+ "loss": 0.2692,
+ "step": 747
+ },
+ {
+ "epoch": 2.73992673992674,
+ "grad_norm": 25.527997970581055,
+ "learning_rate": 4.8424908424908426e-05,
+ "loss": 0.2648,
+ "step": 748
+ },
+ {
+ "epoch": 2.7435897435897436,
+ "grad_norm": 27.791011810302734,
+ "learning_rate": 4.84004884004884e-05,
+ "loss": 0.6007,
+ "step": 749
+ },
+ {
+ "epoch": 2.7472527472527473,
+ "grad_norm": 20.487640380859375,
+ "learning_rate": 4.8376068376068376e-05,
+ "loss": 0.5715,
+ "step": 750
+ },
+ {
+ "epoch": 2.750915750915751,
+ "grad_norm": 6.386992454528809,
+ "learning_rate": 4.8351648351648355e-05,
+ "loss": 0.06,
+ "step": 751
+ },
+ {
+ "epoch": 2.7545787545787546,
+ "grad_norm": 13.110812187194824,
+ "learning_rate": 4.8327228327228327e-05,
+ "loss": 0.129,
+ "step": 752
+ },
+ {
+ "epoch": 2.758241758241758,
+ "grad_norm": 26.55845832824707,
+ "learning_rate": 4.8302808302808305e-05,
+ "loss": 0.67,
+ "step": 753
+ },
+ {
+ "epoch": 2.761904761904762,
+ "grad_norm": 38.83135223388672,
+ "learning_rate": 4.8278388278388283e-05,
+ "loss": 1.6656,
+ "step": 754
+ },
+ {
+ "epoch": 2.7655677655677655,
+ "grad_norm": 25.99518585205078,
+ "learning_rate": 4.8253968253968255e-05,
+ "loss": 0.3285,
+ "step": 755
+ },
+ {
+ "epoch": 2.769230769230769,
+ "grad_norm": 17.282081604003906,
+ "learning_rate": 4.8229548229548234e-05,
+ "loss": 0.2217,
+ "step": 756
+ },
+ {
+ "epoch": 2.772893772893773,
+ "grad_norm": 28.849924087524414,
+ "learning_rate": 4.8205128205128205e-05,
+ "loss": 0.7287,
+ "step": 757
+ },
+ {
+ "epoch": 2.7765567765567765,
+ "grad_norm": 45.79567337036133,
+ "learning_rate": 4.818070818070818e-05,
+ "loss": 1.6964,
+ "step": 758
+ },
+ {
+ "epoch": 2.78021978021978,
+ "grad_norm": 15.203421592712402,
+ "learning_rate": 4.8156288156288155e-05,
+ "loss": 0.2351,
+ "step": 759
+ },
+ {
+ "epoch": 2.7838827838827838,
+ "grad_norm": 10.686698913574219,
+ "learning_rate": 4.8131868131868134e-05,
+ "loss": 0.1533,
+ "step": 760
+ },
+ {
+ "epoch": 2.7875457875457874,
+ "grad_norm": 24.186473846435547,
+ "learning_rate": 4.8107448107448106e-05,
+ "loss": 1.0973,
+ "step": 761
+ },
+ {
+ "epoch": 2.791208791208791,
+ "grad_norm": 25.378986358642578,
+ "learning_rate": 4.8083028083028084e-05,
+ "loss": 0.5847,
+ "step": 762
+ },
+ {
+ "epoch": 2.7948717948717947,
+ "grad_norm": 20.066482543945312,
+ "learning_rate": 4.805860805860806e-05,
+ "loss": 0.2643,
+ "step": 763
+ },
+ {
+ "epoch": 2.7985347985347984,
+ "grad_norm": 56.11622619628906,
+ "learning_rate": 4.8034188034188034e-05,
+ "loss": 0.6949,
+ "step": 764
+ },
+ {
+ "epoch": 2.802197802197802,
+ "grad_norm": 27.80112648010254,
+ "learning_rate": 4.800976800976801e-05,
+ "loss": 0.5622,
+ "step": 765
+ },
+ {
+ "epoch": 2.8058608058608057,
+ "grad_norm": 30.947532653808594,
+ "learning_rate": 4.798534798534799e-05,
+ "loss": 0.6276,
+ "step": 766
+ },
+ {
+ "epoch": 2.8095238095238093,
+ "grad_norm": 8.91073226928711,
+ "learning_rate": 4.796092796092796e-05,
+ "loss": 0.1302,
+ "step": 767
+ },
+ {
+ "epoch": 2.813186813186813,
+ "grad_norm": 24.65394401550293,
+ "learning_rate": 4.793650793650794e-05,
+ "loss": 0.6811,
+ "step": 768
+ },
+ {
+ "epoch": 2.8168498168498166,
+ "grad_norm": 18.257539749145508,
+ "learning_rate": 4.791208791208792e-05,
+ "loss": 0.271,
+ "step": 769
+ },
+ {
+ "epoch": 2.8205128205128203,
+ "grad_norm": 41.41588592529297,
+ "learning_rate": 4.7887667887667884e-05,
+ "loss": 1.4149,
+ "step": 770
+ },
+ {
+ "epoch": 2.824175824175824,
+ "grad_norm": 7.753188610076904,
+ "learning_rate": 4.786324786324786e-05,
+ "loss": 0.0825,
+ "step": 771
+ },
+ {
+ "epoch": 2.8278388278388276,
+ "grad_norm": 208.88290405273438,
+ "learning_rate": 4.783882783882784e-05,
+ "loss": 1.032,
+ "step": 772
+ },
+ {
+ "epoch": 2.8315018315018317,
+ "grad_norm": 31.91672706604004,
+ "learning_rate": 4.781440781440781e-05,
+ "loss": 0.9783,
+ "step": 773
+ },
+ {
+ "epoch": 2.8351648351648353,
+ "grad_norm": 5.72416877746582,
+ "learning_rate": 4.778998778998779e-05,
+ "loss": 0.0399,
+ "step": 774
+ },
+ {
+ "epoch": 2.838827838827839,
+ "grad_norm": 30.503149032592773,
+ "learning_rate": 4.776556776556776e-05,
+ "loss": 0.6465,
+ "step": 775
+ },
+ {
+ "epoch": 2.8424908424908426,
+ "grad_norm": 29.615020751953125,
+ "learning_rate": 4.774114774114774e-05,
+ "loss": 0.5823,
+ "step": 776
+ },
+ {
+ "epoch": 2.8461538461538463,
+ "grad_norm": 49.922611236572266,
+ "learning_rate": 4.771672771672772e-05,
+ "loss": 1.2045,
+ "step": 777
+ },
+ {
+ "epoch": 2.84981684981685,
+ "grad_norm": 23.30948829650879,
+ "learning_rate": 4.769230769230769e-05,
+ "loss": 0.5962,
+ "step": 778
+ },
+ {
+ "epoch": 2.8534798534798536,
+ "grad_norm": 24.784086227416992,
+ "learning_rate": 4.766788766788767e-05,
+ "loss": 0.5702,
+ "step": 779
+ },
+ {
+ "epoch": 2.857142857142857,
+ "grad_norm": 30.03589630126953,
+ "learning_rate": 4.764346764346765e-05,
+ "loss": 0.8644,
+ "step": 780
+ },
+ {
+ "epoch": 2.860805860805861,
+ "grad_norm": 21.079742431640625,
+ "learning_rate": 4.761904761904762e-05,
+ "loss": 0.2304,
+ "step": 781
+ },
+ {
+ "epoch": 2.8644688644688645,
+ "grad_norm": 18.438365936279297,
+ "learning_rate": 4.75946275946276e-05,
+ "loss": 0.6457,
+ "step": 782
+ },
+ {
+ "epoch": 2.868131868131868,
+ "grad_norm": 16.265140533447266,
+ "learning_rate": 4.757020757020757e-05,
+ "loss": 0.3693,
+ "step": 783
+ },
+ {
+ "epoch": 2.871794871794872,
+ "grad_norm": 17.526954650878906,
+ "learning_rate": 4.754578754578754e-05,
+ "loss": 0.2614,
+ "step": 784
+ },
+ {
+ "epoch": 2.8754578754578755,
+ "grad_norm": 39.94060134887695,
+ "learning_rate": 4.752136752136752e-05,
+ "loss": 0.2829,
+ "step": 785
+ },
+ {
+ "epoch": 2.879120879120879,
+ "grad_norm": 10.09298324584961,
+ "learning_rate": 4.74969474969475e-05,
+ "loss": 0.1489,
+ "step": 786
+ },
+ {
+ "epoch": 2.8827838827838828,
+ "grad_norm": 29.092544555664062,
+ "learning_rate": 4.747252747252747e-05,
+ "loss": 0.6063,
+ "step": 787
+ },
+ {
+ "epoch": 2.8864468864468864,
+ "grad_norm": 30.071422576904297,
+ "learning_rate": 4.744810744810745e-05,
+ "loss": 0.3154,
+ "step": 788
+ },
+ {
+ "epoch": 2.89010989010989,
+ "grad_norm": 26.271251678466797,
+ "learning_rate": 4.742368742368743e-05,
+ "loss": 0.4548,
+ "step": 789
+ },
+ {
+ "epoch": 2.8937728937728937,
+ "grad_norm": 32.386775970458984,
+ "learning_rate": 4.73992673992674e-05,
+ "loss": 0.1872,
+ "step": 790
+ },
+ {
+ "epoch": 2.8974358974358974,
+ "grad_norm": 31.18532943725586,
+ "learning_rate": 4.737484737484738e-05,
+ "loss": 0.847,
+ "step": 791
+ },
+ {
+ "epoch": 2.901098901098901,
+ "grad_norm": 17.924785614013672,
+ "learning_rate": 4.7350427350427356e-05,
+ "loss": 0.1588,
+ "step": 792
+ },
+ {
+ "epoch": 2.9047619047619047,
+ "grad_norm": 16.458614349365234,
+ "learning_rate": 4.732600732600733e-05,
+ "loss": 0.1424,
+ "step": 793
+ },
+ {
+ "epoch": 2.9084249084249083,
+ "grad_norm": 50.29280471801758,
+ "learning_rate": 4.7301587301587306e-05,
+ "loss": 1.5482,
+ "step": 794
+ },
+ {
+ "epoch": 2.912087912087912,
+ "grad_norm": 58.37470245361328,
+ "learning_rate": 4.727716727716728e-05,
+ "loss": 1.8242,
+ "step": 795
+ },
+ {
+ "epoch": 2.9157509157509156,
+ "grad_norm": 32.5267448425293,
+ "learning_rate": 4.725274725274725e-05,
+ "loss": 1.1197,
+ "step": 796
+ },
+ {
+ "epoch": 2.9194139194139193,
+ "grad_norm": 43.77764892578125,
+ "learning_rate": 4.722832722832723e-05,
+ "loss": 0.7322,
+ "step": 797
+ },
+ {
+ "epoch": 2.9230769230769234,
+ "grad_norm": 25.303524017333984,
+ "learning_rate": 4.720390720390721e-05,
+ "loss": 0.6557,
+ "step": 798
+ },
+ {
+ "epoch": 2.926739926739927,
+ "grad_norm": 23.90159797668457,
+ "learning_rate": 4.717948717948718e-05,
+ "loss": 0.2669,
+ "step": 799
+ },
+ {
+ "epoch": 2.9304029304029307,
+ "grad_norm": 21.20945930480957,
+ "learning_rate": 4.715506715506716e-05,
+ "loss": 0.3279,
+ "step": 800
+ },
+ {
+ "epoch": 2.9340659340659343,
+ "grad_norm": 28.819482803344727,
+ "learning_rate": 4.713064713064713e-05,
+ "loss": 0.717,
+ "step": 801
+ },
+ {
+ "epoch": 2.937728937728938,
+ "grad_norm": 9.13611125946045,
+ "learning_rate": 4.710622710622711e-05,
+ "loss": 0.1291,
+ "step": 802
+ },
+ {
+ "epoch": 2.9413919413919416,
+ "grad_norm": 22.16252326965332,
+ "learning_rate": 4.7081807081807085e-05,
+ "loss": 0.4406,
+ "step": 803
+ },
+ {
+ "epoch": 2.9450549450549453,
+ "grad_norm": 47.73503112792969,
+ "learning_rate": 4.705738705738706e-05,
+ "loss": 0.6176,
+ "step": 804
+ },
+ {
+ "epoch": 2.948717948717949,
+ "grad_norm": 61.73493576049805,
+ "learning_rate": 4.7032967032967035e-05,
+ "loss": 0.581,
+ "step": 805
+ },
+ {
+ "epoch": 2.9523809523809526,
+ "grad_norm": 22.48004722595215,
+ "learning_rate": 4.7008547008547014e-05,
+ "loss": 0.7404,
+ "step": 806
+ },
+ {
+ "epoch": 2.956043956043956,
+ "grad_norm": 54.2432746887207,
+ "learning_rate": 4.6984126984126986e-05,
+ "loss": 1.1522,
+ "step": 807
+ },
+ {
+ "epoch": 2.95970695970696,
+ "grad_norm": 26.221921920776367,
+ "learning_rate": 4.695970695970696e-05,
+ "loss": 0.4869,
+ "step": 808
+ },
+ {
+ "epoch": 2.9633699633699635,
+ "grad_norm": 21.688526153564453,
+ "learning_rate": 4.6935286935286936e-05,
+ "loss": 0.6639,
+ "step": 809
+ },
+ {
+ "epoch": 2.967032967032967,
+ "grad_norm": 5.81218147277832,
+ "learning_rate": 4.691086691086691e-05,
+ "loss": 0.0824,
+ "step": 810
+ },
+ {
+ "epoch": 2.970695970695971,
+ "grad_norm": 39.09580612182617,
+ "learning_rate": 4.6886446886446886e-05,
+ "loss": 1.5035,
+ "step": 811
+ },
+ {
+ "epoch": 2.9743589743589745,
+ "grad_norm": 24.587574005126953,
+ "learning_rate": 4.6862026862026864e-05,
+ "loss": 1.1107,
+ "step": 812
+ },
+ {
+ "epoch": 2.978021978021978,
+ "grad_norm": 25.25336265563965,
+ "learning_rate": 4.6837606837606836e-05,
+ "loss": 0.7764,
+ "step": 813
+ },
+ {
+ "epoch": 2.9816849816849818,
+ "grad_norm": 16.311378479003906,
+ "learning_rate": 4.6813186813186814e-05,
+ "loss": 0.4079,
+ "step": 814
+ },
+ {
+ "epoch": 2.9853479853479854,
+ "grad_norm": 19.0888729095459,
+ "learning_rate": 4.678876678876679e-05,
+ "loss": 0.5259,
+ "step": 815
+ },
+ {
+ "epoch": 2.989010989010989,
+ "grad_norm": 24.599462509155273,
+ "learning_rate": 4.6764346764346765e-05,
+ "loss": 0.7475,
+ "step": 816
+ },
+ {
+ "epoch": 2.9926739926739927,
+ "grad_norm": 20.4777889251709,
+ "learning_rate": 4.673992673992674e-05,
+ "loss": 0.356,
+ "step": 817
+ },
+ {
+ "epoch": 2.9963369963369964,
+ "grad_norm": 30.4327449798584,
+ "learning_rate": 4.671550671550672e-05,
+ "loss": 0.7958,
+ "step": 818
+ },
+ {
+ "epoch": 3.0,
+ "grad_norm": 25.57271385192871,
+ "learning_rate": 4.669108669108669e-05,
+ "loss": 0.3918,
+ "step": 819
+ },
+ {
+ "epoch": 3.0036630036630036,
+ "grad_norm": 3.9672563076019287,
+ "learning_rate": 4.666666666666667e-05,
+ "loss": 0.0469,
+ "step": 820
+ },
+ {
+ "epoch": 3.0073260073260073,
+ "grad_norm": 6.657567501068115,
+ "learning_rate": 4.664224664224664e-05,
+ "loss": 0.0939,
+ "step": 821
+ },
+ {
+ "epoch": 3.010989010989011,
+ "grad_norm": 12.558409690856934,
+ "learning_rate": 4.6617826617826615e-05,
+ "loss": 0.1578,
+ "step": 822
+ },
+ {
+ "epoch": 3.0146520146520146,
+ "grad_norm": 18.909244537353516,
+ "learning_rate": 4.6593406593406593e-05,
+ "loss": 0.3209,
+ "step": 823
+ },
+ {
+ "epoch": 3.0183150183150182,
+ "grad_norm": 10.995687484741211,
+ "learning_rate": 4.656898656898657e-05,
+ "loss": 0.1198,
+ "step": 824
+ },
+ {
+ "epoch": 3.021978021978022,
+ "grad_norm": 16.14252471923828,
+ "learning_rate": 4.6544566544566544e-05,
+ "loss": 0.1431,
+ "step": 825
+ },
+ {
+ "epoch": 3.0256410256410255,
+ "grad_norm": 25.924381256103516,
+ "learning_rate": 4.652014652014652e-05,
+ "loss": 0.3989,
+ "step": 826
+ },
+ {
+ "epoch": 3.029304029304029,
+ "grad_norm": 4.87798547744751,
+ "learning_rate": 4.6495726495726494e-05,
+ "loss": 0.0472,
+ "step": 827
+ },
+ {
+ "epoch": 3.032967032967033,
+ "grad_norm": 15.078110694885254,
+ "learning_rate": 4.647130647130647e-05,
+ "loss": 0.1955,
+ "step": 828
+ },
+ {
+ "epoch": 3.0366300366300365,
+ "grad_norm": 19.74415397644043,
+ "learning_rate": 4.644688644688645e-05,
+ "loss": 0.1593,
+ "step": 829
+ },
+ {
+ "epoch": 3.04029304029304,
+ "grad_norm": 43.4788818359375,
+ "learning_rate": 4.642246642246642e-05,
+ "loss": 0.7917,
+ "step": 830
+ },
+ {
+ "epoch": 3.043956043956044,
+ "grad_norm": 27.122041702270508,
+ "learning_rate": 4.63980463980464e-05,
+ "loss": 0.1693,
+ "step": 831
+ },
+ {
+ "epoch": 3.0476190476190474,
+ "grad_norm": 9.51154899597168,
+ "learning_rate": 4.637362637362638e-05,
+ "loss": 0.0806,
+ "step": 832
+ },
+ {
+ "epoch": 3.051282051282051,
+ "grad_norm": 11.48532772064209,
+ "learning_rate": 4.634920634920635e-05,
+ "loss": 0.0815,
+ "step": 833
+ },
+ {
+ "epoch": 3.0549450549450547,
+ "grad_norm": 13.547063827514648,
+ "learning_rate": 4.632478632478632e-05,
+ "loss": 0.0817,
+ "step": 834
+ },
+ {
+ "epoch": 3.0586080586080584,
+ "grad_norm": 24.334409713745117,
+ "learning_rate": 4.63003663003663e-05,
+ "loss": 0.547,
+ "step": 835
+ },
+ {
+ "epoch": 3.062271062271062,
+ "grad_norm": 87.3517837524414,
+ "learning_rate": 4.627594627594627e-05,
+ "loss": 0.6534,
+ "step": 836
+ },
+ {
+ "epoch": 3.065934065934066,
+ "grad_norm": 16.100278854370117,
+ "learning_rate": 4.625152625152625e-05,
+ "loss": 0.2961,
+ "step": 837
+ },
+ {
+ "epoch": 3.06959706959707,
+ "grad_norm": 20.725875854492188,
+ "learning_rate": 4.622710622710623e-05,
+ "loss": 0.1114,
+ "step": 838
+ },
+ {
+ "epoch": 3.0732600732600734,
+ "grad_norm": 53.809722900390625,
+ "learning_rate": 4.62026862026862e-05,
+ "loss": 0.3808,
+ "step": 839
+ },
+ {
+ "epoch": 3.076923076923077,
+ "grad_norm": 3.237959623336792,
+ "learning_rate": 4.617826617826618e-05,
+ "loss": 0.019,
+ "step": 840
+ },
+ {
+ "epoch": 3.0805860805860807,
+ "grad_norm": 69.71659088134766,
+ "learning_rate": 4.615384615384616e-05,
+ "loss": 1.0945,
+ "step": 841
+ },
+ {
+ "epoch": 3.0842490842490844,
+ "grad_norm": 31.005935668945312,
+ "learning_rate": 4.612942612942613e-05,
+ "loss": 0.3241,
+ "step": 842
+ },
+ {
+ "epoch": 3.087912087912088,
+ "grad_norm": 66.98394775390625,
+ "learning_rate": 4.610500610500611e-05,
+ "loss": 1.0213,
+ "step": 843
+ },
+ {
+ "epoch": 3.0915750915750917,
+ "grad_norm": 23.54532814025879,
+ "learning_rate": 4.608058608058609e-05,
+ "loss": 0.2188,
+ "step": 844
+ },
+ {
+ "epoch": 3.0952380952380953,
+ "grad_norm": 25.952709197998047,
+ "learning_rate": 4.605616605616606e-05,
+ "loss": 0.4305,
+ "step": 845
+ },
+ {
+ "epoch": 3.098901098901099,
+ "grad_norm": 36.100746154785156,
+ "learning_rate": 4.603174603174604e-05,
+ "loss": 0.6497,
+ "step": 846
+ },
+ {
+ "epoch": 3.1025641025641026,
+ "grad_norm": 60.34727478027344,
+ "learning_rate": 4.600732600732601e-05,
+ "loss": 0.3083,
+ "step": 847
+ },
+ {
+ "epoch": 3.1062271062271063,
+ "grad_norm": 35.265167236328125,
+ "learning_rate": 4.598290598290598e-05,
+ "loss": 0.3222,
+ "step": 848
+ },
+ {
+ "epoch": 3.10989010989011,
+ "grad_norm": 19.180070877075195,
+ "learning_rate": 4.595848595848596e-05,
+ "loss": 0.4065,
+ "step": 849
+ },
+ {
+ "epoch": 3.1135531135531136,
+ "grad_norm": 22.92152976989746,
+ "learning_rate": 4.593406593406593e-05,
+ "loss": 0.3998,
+ "step": 850
+ },
+ {
+ "epoch": 3.1172161172161172,
+ "grad_norm": 48.91377639770508,
+ "learning_rate": 4.590964590964591e-05,
+ "loss": 0.7035,
+ "step": 851
+ },
+ {
+ "epoch": 3.120879120879121,
+ "grad_norm": 11.615083694458008,
+ "learning_rate": 4.588522588522589e-05,
+ "loss": 0.3102,
+ "step": 852
+ },
+ {
+ "epoch": 3.1245421245421245,
+ "grad_norm": 23.573801040649414,
+ "learning_rate": 4.586080586080586e-05,
+ "loss": 0.3358,
+ "step": 853
+ },
+ {
+ "epoch": 3.128205128205128,
+ "grad_norm": 16.903776168823242,
+ "learning_rate": 4.583638583638584e-05,
+ "loss": 0.2973,
+ "step": 854
+ },
+ {
+ "epoch": 3.131868131868132,
+ "grad_norm": 6.052688121795654,
+ "learning_rate": 4.5811965811965816e-05,
+ "loss": 0.0671,
+ "step": 855
+ },
+ {
+ "epoch": 3.1355311355311355,
+ "grad_norm": 34.40020751953125,
+ "learning_rate": 4.578754578754579e-05,
+ "loss": 0.508,
+ "step": 856
+ },
+ {
+ "epoch": 3.139194139194139,
+ "grad_norm": 21.39589500427246,
+ "learning_rate": 4.5763125763125766e-05,
+ "loss": 0.0805,
+ "step": 857
+ },
+ {
+ "epoch": 3.142857142857143,
+ "grad_norm": 24.03894805908203,
+ "learning_rate": 4.5738705738705744e-05,
+ "loss": 0.1884,
+ "step": 858
+ },
+ {
+ "epoch": 3.1465201465201464,
+ "grad_norm": 66.53777313232422,
+ "learning_rate": 4.5714285714285716e-05,
+ "loss": 0.5235,
+ "step": 859
+ },
+ {
+ "epoch": 3.15018315018315,
+ "grad_norm": 33.663490295410156,
+ "learning_rate": 4.568986568986569e-05,
+ "loss": 0.7579,
+ "step": 860
+ },
+ {
+ "epoch": 3.1538461538461537,
+ "grad_norm": 30.173309326171875,
+ "learning_rate": 4.5665445665445666e-05,
+ "loss": 0.2263,
+ "step": 861
+ },
+ {
+ "epoch": 3.1575091575091574,
+ "grad_norm": 37.52082824707031,
+ "learning_rate": 4.564102564102564e-05,
+ "loss": 0.5695,
+ "step": 862
+ },
+ {
+ "epoch": 3.161172161172161,
+ "grad_norm": 38.86849594116211,
+ "learning_rate": 4.5616605616605616e-05,
+ "loss": 0.6981,
+ "step": 863
+ },
+ {
+ "epoch": 3.1648351648351647,
+ "grad_norm": 42.702247619628906,
+ "learning_rate": 4.5592185592185595e-05,
+ "loss": 0.9864,
+ "step": 864
+ },
+ {
+ "epoch": 3.1684981684981683,
+ "grad_norm": 16.60870361328125,
+ "learning_rate": 4.5567765567765566e-05,
+ "loss": 0.1595,
+ "step": 865
+ },
+ {
+ "epoch": 3.172161172161172,
+ "grad_norm": 26.309768676757812,
+ "learning_rate": 4.5543345543345545e-05,
+ "loss": 0.4028,
+ "step": 866
+ },
+ {
+ "epoch": 3.1758241758241756,
+ "grad_norm": 45.7955322265625,
+ "learning_rate": 4.551892551892552e-05,
+ "loss": 1.1258,
+ "step": 867
+ },
+ {
+ "epoch": 3.1794871794871793,
+ "grad_norm": 25.780302047729492,
+ "learning_rate": 4.5494505494505495e-05,
+ "loss": 0.4018,
+ "step": 868
+ },
+ {
+ "epoch": 3.183150183150183,
+ "grad_norm": 41.65156555175781,
+ "learning_rate": 4.5470085470085474e-05,
+ "loss": 0.4543,
+ "step": 869
+ },
+ {
+ "epoch": 3.186813186813187,
+ "grad_norm": 56.92537307739258,
+ "learning_rate": 4.544566544566545e-05,
+ "loss": 0.334,
+ "step": 870
+ },
+ {
+ "epoch": 3.1904761904761907,
+ "grad_norm": 19.44786262512207,
+ "learning_rate": 4.5421245421245424e-05,
+ "loss": 0.2855,
+ "step": 871
+ },
+ {
+ "epoch": 3.1941391941391943,
+ "grad_norm": 19.75824546813965,
+ "learning_rate": 4.53968253968254e-05,
+ "loss": 0.2589,
+ "step": 872
+ },
+ {
+ "epoch": 3.197802197802198,
+ "grad_norm": 30.935569763183594,
+ "learning_rate": 4.5372405372405374e-05,
+ "loss": 0.5083,
+ "step": 873
+ },
+ {
+ "epoch": 3.2014652014652016,
+ "grad_norm": 32.59378433227539,
+ "learning_rate": 4.5347985347985345e-05,
+ "loss": 0.6806,
+ "step": 874
+ },
+ {
+ "epoch": 3.2051282051282053,
+ "grad_norm": 32.7809944152832,
+ "learning_rate": 4.5323565323565324e-05,
+ "loss": 0.7094,
+ "step": 875
+ },
+ {
+ "epoch": 3.208791208791209,
+ "grad_norm": 22.95226287841797,
+ "learning_rate": 4.5299145299145296e-05,
+ "loss": 0.3871,
+ "step": 876
+ },
+ {
+ "epoch": 3.2124542124542126,
+ "grad_norm": 13.90613079071045,
+ "learning_rate": 4.5274725274725274e-05,
+ "loss": 0.2049,
+ "step": 877
+ },
+ {
+ "epoch": 3.2161172161172162,
+ "grad_norm": 36.79647445678711,
+ "learning_rate": 4.525030525030525e-05,
+ "loss": 0.959,
+ "step": 878
+ },
+ {
+ "epoch": 3.21978021978022,
+ "grad_norm": 16.770553588867188,
+ "learning_rate": 4.5225885225885224e-05,
+ "loss": 0.3061,
+ "step": 879
+ },
+ {
+ "epoch": 3.2234432234432235,
+ "grad_norm": 22.241527557373047,
+ "learning_rate": 4.52014652014652e-05,
+ "loss": 0.1961,
+ "step": 880
+ },
+ {
+ "epoch": 3.227106227106227,
+ "grad_norm": 51.097957611083984,
+ "learning_rate": 4.517704517704518e-05,
+ "loss": 0.5272,
+ "step": 881
+ },
+ {
+ "epoch": 3.230769230769231,
+ "grad_norm": 43.70039749145508,
+ "learning_rate": 4.515262515262515e-05,
+ "loss": 0.6764,
+ "step": 882
+ },
+ {
+ "epoch": 3.2344322344322345,
+ "grad_norm": 30.666664123535156,
+ "learning_rate": 4.512820512820513e-05,
+ "loss": 0.6524,
+ "step": 883
+ },
+ {
+ "epoch": 3.238095238095238,
+ "grad_norm": 16.787954330444336,
+ "learning_rate": 4.510378510378511e-05,
+ "loss": 0.178,
+ "step": 884
+ },
+ {
+ "epoch": 3.241758241758242,
+ "grad_norm": 32.14992904663086,
+ "learning_rate": 4.507936507936508e-05,
+ "loss": 0.6206,
+ "step": 885
+ },
+ {
+ "epoch": 3.2454212454212454,
+ "grad_norm": 24.926103591918945,
+ "learning_rate": 4.505494505494505e-05,
+ "loss": 0.4696,
+ "step": 886
+ },
+ {
+ "epoch": 3.249084249084249,
+ "grad_norm": 31.044967651367188,
+ "learning_rate": 4.503052503052503e-05,
+ "loss": 0.3021,
+ "step": 887
+ },
+ {
+ "epoch": 3.2527472527472527,
+ "grad_norm": 10.355696678161621,
+ "learning_rate": 4.5006105006105e-05,
+ "loss": 0.0784,
+ "step": 888
+ },
+ {
+ "epoch": 3.2564102564102564,
+ "grad_norm": 28.19644546508789,
+ "learning_rate": 4.498168498168498e-05,
+ "loss": 0.234,
+ "step": 889
+ },
+ {
+ "epoch": 3.26007326007326,
+ "grad_norm": 21.245389938354492,
+ "learning_rate": 4.495726495726496e-05,
+ "loss": 0.2895,
+ "step": 890
+ },
+ {
+ "epoch": 3.2637362637362637,
+ "grad_norm": 27.337587356567383,
+ "learning_rate": 4.493284493284493e-05,
+ "loss": 0.4614,
+ "step": 891
+ },
+ {
+ "epoch": 3.2673992673992673,
+ "grad_norm": 37.06135177612305,
+ "learning_rate": 4.490842490842491e-05,
+ "loss": 0.2717,
+ "step": 892
+ },
+ {
+ "epoch": 3.271062271062271,
+ "grad_norm": 26.85171890258789,
+ "learning_rate": 4.488400488400489e-05,
+ "loss": 0.4965,
+ "step": 893
+ },
+ {
+ "epoch": 3.2747252747252746,
+ "grad_norm": 41.79130935668945,
+ "learning_rate": 4.485958485958486e-05,
+ "loss": 0.4209,
+ "step": 894
+ },
+ {
+ "epoch": 3.2783882783882783,
+ "grad_norm": 32.75770950317383,
+ "learning_rate": 4.483516483516484e-05,
+ "loss": 0.5126,
+ "step": 895
+ },
+ {
+ "epoch": 3.282051282051282,
+ "grad_norm": 67.75275421142578,
+ "learning_rate": 4.481074481074482e-05,
+ "loss": 0.8257,
+ "step": 896
+ },
+ {
+ "epoch": 3.2857142857142856,
+ "grad_norm": 36.773319244384766,
+ "learning_rate": 4.478632478632479e-05,
+ "loss": 1.6113,
+ "step": 897
+ },
+ {
+ "epoch": 3.2893772893772892,
+ "grad_norm": 60.94101333618164,
+ "learning_rate": 4.476190476190476e-05,
+ "loss": 0.7996,
+ "step": 898
+ },
+ {
+ "epoch": 3.293040293040293,
+ "grad_norm": 45.40288162231445,
+ "learning_rate": 4.473748473748474e-05,
+ "loss": 0.7139,
+ "step": 899
+ },
+ {
+ "epoch": 3.2967032967032965,
+ "grad_norm": 27.4019718170166,
+ "learning_rate": 4.471306471306471e-05,
+ "loss": 0.4695,
+ "step": 900
+ },
+ {
+ "epoch": 3.3003663003663,
+ "grad_norm": 20.126493453979492,
+ "learning_rate": 4.468864468864469e-05,
+ "loss": 0.2181,
+ "step": 901
+ },
+ {
+ "epoch": 3.304029304029304,
+ "grad_norm": 37.28034591674805,
+ "learning_rate": 4.466422466422466e-05,
+ "loss": 0.8902,
+ "step": 902
+ },
+ {
+ "epoch": 3.3076923076923075,
+ "grad_norm": 15.40217113494873,
+ "learning_rate": 4.463980463980464e-05,
+ "loss": 0.2428,
+ "step": 903
+ },
+ {
+ "epoch": 3.311355311355311,
+ "grad_norm": 21.924699783325195,
+ "learning_rate": 4.461538461538462e-05,
+ "loss": 0.3271,
+ "step": 904
+ },
+ {
+ "epoch": 3.315018315018315,
+ "grad_norm": 29.787410736083984,
+ "learning_rate": 4.459096459096459e-05,
+ "loss": 0.5914,
+ "step": 905
+ },
+ {
+ "epoch": 3.3186813186813184,
+ "grad_norm": 16.91995620727539,
+ "learning_rate": 4.456654456654457e-05,
+ "loss": 0.3442,
+ "step": 906
+ },
+ {
+ "epoch": 3.3223443223443225,
+ "grad_norm": 13.232250213623047,
+ "learning_rate": 4.4542124542124546e-05,
+ "loss": 0.1977,
+ "step": 907
+ },
+ {
+ "epoch": 3.326007326007326,
+ "grad_norm": 25.45724868774414,
+ "learning_rate": 4.451770451770452e-05,
+ "loss": 0.8241,
+ "step": 908
+ },
+ {
+ "epoch": 3.32967032967033,
+ "grad_norm": 20.996292114257812,
+ "learning_rate": 4.4493284493284496e-05,
+ "loss": 0.3154,
+ "step": 909
+ },
+ {
+ "epoch": 3.3333333333333335,
+ "grad_norm": 28.150684356689453,
+ "learning_rate": 4.4468864468864475e-05,
+ "loss": 0.4077,
+ "step": 910
+ },
+ {
+ "epoch": 3.336996336996337,
+ "grad_norm": 57.184322357177734,
+ "learning_rate": 4.444444444444444e-05,
+ "loss": 0.5701,
+ "step": 911
+ },
+ {
+ "epoch": 3.340659340659341,
+ "grad_norm": 26.231369018554688,
+ "learning_rate": 4.442002442002442e-05,
+ "loss": 0.4427,
+ "step": 912
+ },
+ {
+ "epoch": 3.3443223443223444,
+ "grad_norm": 32.52253723144531,
+ "learning_rate": 4.43956043956044e-05,
+ "loss": 1.014,
+ "step": 913
+ },
+ {
+ "epoch": 3.347985347985348,
+ "grad_norm": 19.39035987854004,
+ "learning_rate": 4.437118437118437e-05,
+ "loss": 0.1567,
+ "step": 914
+ },
+ {
+ "epoch": 3.3516483516483517,
+ "grad_norm": 24.542327880859375,
+ "learning_rate": 4.434676434676435e-05,
+ "loss": 0.5478,
+ "step": 915
+ },
+ {
+ "epoch": 3.3553113553113554,
+ "grad_norm": 46.6158447265625,
+ "learning_rate": 4.4322344322344325e-05,
+ "loss": 0.5636,
+ "step": 916
+ },
+ {
+ "epoch": 3.358974358974359,
+ "grad_norm": 36.008846282958984,
+ "learning_rate": 4.42979242979243e-05,
+ "loss": 0.4401,
+ "step": 917
+ },
+ {
+ "epoch": 3.3626373626373627,
+ "grad_norm": 6.922544956207275,
+ "learning_rate": 4.4273504273504275e-05,
+ "loss": 0.0885,
+ "step": 918
+ },
+ {
+ "epoch": 3.3663003663003663,
+ "grad_norm": 25.707748413085938,
+ "learning_rate": 4.4249084249084254e-05,
+ "loss": 0.3235,
+ "step": 919
+ },
+ {
+ "epoch": 3.36996336996337,
+ "grad_norm": 47.98778533935547,
+ "learning_rate": 4.4224664224664226e-05,
+ "loss": 1.3738,
+ "step": 920
+ },
+ {
+ "epoch": 3.3736263736263736,
+ "grad_norm": 26.64824104309082,
+ "learning_rate": 4.4200244200244204e-05,
+ "loss": 0.8405,
+ "step": 921
+ },
+ {
+ "epoch": 3.3772893772893773,
+ "grad_norm": 30.66206169128418,
+ "learning_rate": 4.417582417582418e-05,
+ "loss": 1.3021,
+ "step": 922
+ },
+ {
+ "epoch": 3.380952380952381,
+ "grad_norm": 33.15909194946289,
+ "learning_rate": 4.4151404151404154e-05,
+ "loss": 0.3064,
+ "step": 923
+ },
+ {
+ "epoch": 3.3846153846153846,
+ "grad_norm": 78.46485137939453,
+ "learning_rate": 4.4126984126984126e-05,
+ "loss": 0.6526,
+ "step": 924
+ },
+ {
+ "epoch": 3.3882783882783882,
+ "grad_norm": 45.584747314453125,
+ "learning_rate": 4.4102564102564104e-05,
+ "loss": 0.9546,
+ "step": 925
+ },
+ {
+ "epoch": 3.391941391941392,
+ "grad_norm": 23.244487762451172,
+ "learning_rate": 4.4078144078144076e-05,
+ "loss": 0.3334,
+ "step": 926
+ },
+ {
+ "epoch": 3.3956043956043955,
+ "grad_norm": 9.296119689941406,
+ "learning_rate": 4.4053724053724054e-05,
+ "loss": 0.1045,
+ "step": 927
+ },
+ {
+ "epoch": 3.399267399267399,
+ "grad_norm": 15.207316398620605,
+ "learning_rate": 4.4029304029304026e-05,
+ "loss": 0.087,
+ "step": 928
+ },
+ {
+ "epoch": 3.402930402930403,
+ "grad_norm": 20.554912567138672,
+ "learning_rate": 4.4004884004884005e-05,
+ "loss": 0.2658,
+ "step": 929
+ },
+ {
+ "epoch": 3.4065934065934065,
+ "grad_norm": 25.304515838623047,
+ "learning_rate": 4.398046398046398e-05,
+ "loss": 0.2862,
+ "step": 930
+ },
+ {
+ "epoch": 3.41025641025641,
+ "grad_norm": 44.320377349853516,
+ "learning_rate": 4.3956043956043955e-05,
+ "loss": 1.1972,
+ "step": 931
+ },
+ {
+ "epoch": 3.413919413919414,
+ "grad_norm": 21.3024845123291,
+ "learning_rate": 4.393162393162393e-05,
+ "loss": 0.2193,
+ "step": 932
+ },
+ {
+ "epoch": 3.4175824175824174,
+ "grad_norm": 12.274759292602539,
+ "learning_rate": 4.390720390720391e-05,
+ "loss": 0.1033,
+ "step": 933
+ },
+ {
+ "epoch": 3.421245421245421,
+ "grad_norm": 29.188446044921875,
+ "learning_rate": 4.388278388278388e-05,
+ "loss": 0.8143,
+ "step": 934
+ },
+ {
+ "epoch": 3.4249084249084247,
+ "grad_norm": 11.880194664001465,
+ "learning_rate": 4.385836385836386e-05,
+ "loss": 0.0932,
+ "step": 935
+ },
+ {
+ "epoch": 3.4285714285714284,
+ "grad_norm": 28.859825134277344,
+ "learning_rate": 4.383394383394384e-05,
+ "loss": 0.6026,
+ "step": 936
+ },
+ {
+ "epoch": 3.4322344322344325,
+ "grad_norm": 25.131824493408203,
+ "learning_rate": 4.3809523809523805e-05,
+ "loss": 0.4023,
+ "step": 937
+ },
+ {
+ "epoch": 3.435897435897436,
+ "grad_norm": 35.04637145996094,
+ "learning_rate": 4.3785103785103783e-05,
+ "loss": 0.7765,
+ "step": 938
+ },
+ {
+ "epoch": 3.4395604395604398,
+ "grad_norm": 15.831666946411133,
+ "learning_rate": 4.376068376068376e-05,
+ "loss": 0.1779,
+ "step": 939
+ },
+ {
+ "epoch": 3.4432234432234434,
+ "grad_norm": 26.455148696899414,
+ "learning_rate": 4.3736263736263734e-05,
+ "loss": 0.3165,
+ "step": 940
+ },
+ {
+ "epoch": 3.446886446886447,
+ "grad_norm": 23.840030670166016,
+ "learning_rate": 4.371184371184371e-05,
+ "loss": 0.5363,
+ "step": 941
+ },
+ {
+ "epoch": 3.4505494505494507,
+ "grad_norm": 30.517026901245117,
+ "learning_rate": 4.368742368742369e-05,
+ "loss": 0.422,
+ "step": 942
+ },
+ {
+ "epoch": 3.4542124542124544,
+ "grad_norm": 51.574703216552734,
+ "learning_rate": 4.366300366300366e-05,
+ "loss": 1.5333,
+ "step": 943
+ },
+ {
+ "epoch": 3.457875457875458,
+ "grad_norm": 57.92119216918945,
+ "learning_rate": 4.363858363858364e-05,
+ "loss": 0.5732,
+ "step": 944
+ },
+ {
+ "epoch": 3.4615384615384617,
+ "grad_norm": 34.3664436340332,
+ "learning_rate": 4.361416361416362e-05,
+ "loss": 0.5054,
+ "step": 945
+ },
+ {
+ "epoch": 3.4652014652014653,
+ "grad_norm": 14.034111976623535,
+ "learning_rate": 4.358974358974359e-05,
+ "loss": 0.0969,
+ "step": 946
+ },
+ {
+ "epoch": 3.468864468864469,
+ "grad_norm": 15.058267593383789,
+ "learning_rate": 4.356532356532357e-05,
+ "loss": 0.1877,
+ "step": 947
+ },
+ {
+ "epoch": 3.4725274725274726,
+ "grad_norm": 18.598024368286133,
+ "learning_rate": 4.354090354090355e-05,
+ "loss": 0.2378,
+ "step": 948
+ },
+ {
+ "epoch": 3.4761904761904763,
+ "grad_norm": 17.926319122314453,
+ "learning_rate": 4.351648351648352e-05,
+ "loss": 0.2935,
+ "step": 949
+ },
+ {
+ "epoch": 3.47985347985348,
+ "grad_norm": 8.25291633605957,
+ "learning_rate": 4.349206349206349e-05,
+ "loss": 0.0891,
+ "step": 950
+ },
+ {
+ "epoch": 3.4835164835164836,
+ "grad_norm": 26.152061462402344,
+ "learning_rate": 4.346764346764347e-05,
+ "loss": 0.2798,
+ "step": 951
+ },
+ {
+ "epoch": 3.4871794871794872,
+ "grad_norm": 22.669677734375,
+ "learning_rate": 4.344322344322344e-05,
+ "loss": 0.506,
+ "step": 952
+ },
+ {
+ "epoch": 3.490842490842491,
+ "grad_norm": 18.439355850219727,
+ "learning_rate": 4.341880341880342e-05,
+ "loss": 0.3034,
+ "step": 953
+ },
+ {
+ "epoch": 3.4945054945054945,
+ "grad_norm": 30.48084259033203,
+ "learning_rate": 4.339438339438339e-05,
+ "loss": 0.4366,
+ "step": 954
+ },
+ {
+ "epoch": 3.498168498168498,
+ "grad_norm": 51.792381286621094,
+ "learning_rate": 4.336996336996337e-05,
+ "loss": 0.5214,
+ "step": 955
+ },
+ {
+ "epoch": 3.501831501831502,
+ "grad_norm": 44.70718002319336,
+ "learning_rate": 4.334554334554335e-05,
+ "loss": 0.7823,
+ "step": 956
+ },
+ {
+ "epoch": 3.5054945054945055,
+ "grad_norm": 42.00168991088867,
+ "learning_rate": 4.332112332112332e-05,
+ "loss": 0.9207,
+ "step": 957
+ },
+ {
+ "epoch": 3.509157509157509,
+ "grad_norm": 28.97800636291504,
+ "learning_rate": 4.32967032967033e-05,
+ "loss": 0.279,
+ "step": 958
+ },
+ {
+ "epoch": 3.5128205128205128,
+ "grad_norm": 21.902843475341797,
+ "learning_rate": 4.327228327228328e-05,
+ "loss": 0.1969,
+ "step": 959
+ },
+ {
+ "epoch": 3.5164835164835164,
+ "grad_norm": 14.560053825378418,
+ "learning_rate": 4.324786324786325e-05,
+ "loss": 0.0976,
+ "step": 960
+ },
+ {
+ "epoch": 3.52014652014652,
+ "grad_norm": 4.2637104988098145,
+ "learning_rate": 4.322344322344323e-05,
+ "loss": 0.0277,
+ "step": 961
+ },
+ {
+ "epoch": 3.5238095238095237,
+ "grad_norm": 52.4840202331543,
+ "learning_rate": 4.3199023199023205e-05,
+ "loss": 0.2967,
+ "step": 962
+ },
+ {
+ "epoch": 3.5274725274725274,
+ "grad_norm": 48.95661163330078,
+ "learning_rate": 4.317460317460317e-05,
+ "loss": 0.2904,
+ "step": 963
+ },
+ {
+ "epoch": 3.531135531135531,
+ "grad_norm": 79.46379089355469,
+ "learning_rate": 4.315018315018315e-05,
+ "loss": 0.1644,
+ "step": 964
+ },
+ {
+ "epoch": 3.5347985347985347,
+ "grad_norm": 29.678428649902344,
+ "learning_rate": 4.312576312576313e-05,
+ "loss": 0.3498,
+ "step": 965
+ },
+ {
+ "epoch": 3.5384615384615383,
+ "grad_norm": 32.71342086791992,
+ "learning_rate": 4.31013431013431e-05,
+ "loss": 0.3509,
+ "step": 966
+ },
+ {
+ "epoch": 3.542124542124542,
+ "grad_norm": 6.679911136627197,
+ "learning_rate": 4.307692307692308e-05,
+ "loss": 0.0658,
+ "step": 967
+ },
+ {
+ "epoch": 3.5457875457875456,
+ "grad_norm": 19.8692626953125,
+ "learning_rate": 4.3052503052503056e-05,
+ "loss": 0.1626,
+ "step": 968
+ },
+ {
+ "epoch": 3.5494505494505493,
+ "grad_norm": 17.69087791442871,
+ "learning_rate": 4.302808302808303e-05,
+ "loss": 0.2592,
+ "step": 969
+ },
+ {
+ "epoch": 3.553113553113553,
+ "grad_norm": 11.734158515930176,
+ "learning_rate": 4.3003663003663006e-05,
+ "loss": 0.1007,
+ "step": 970
+ },
+ {
+ "epoch": 3.5567765567765566,
+ "grad_norm": 34.51172637939453,
+ "learning_rate": 4.2979242979242984e-05,
+ "loss": 0.2823,
+ "step": 971
+ },
+ {
+ "epoch": 3.5604395604395602,
+ "grad_norm": 15.009514808654785,
+ "learning_rate": 4.2954822954822956e-05,
+ "loss": 0.1203,
+ "step": 972
+ },
+ {
+ "epoch": 3.564102564102564,
+ "grad_norm": 67.92166137695312,
+ "learning_rate": 4.2930402930402934e-05,
+ "loss": 0.396,
+ "step": 973
+ },
+ {
+ "epoch": 3.5677655677655675,
+ "grad_norm": 66.84014129638672,
+ "learning_rate": 4.290598290598291e-05,
+ "loss": 0.6545,
+ "step": 974
+ },
+ {
+ "epoch": 3.571428571428571,
+ "grad_norm": 25.811107635498047,
+ "learning_rate": 4.2881562881562885e-05,
+ "loss": 0.1747,
+ "step": 975
+ },
+ {
+ "epoch": 3.575091575091575,
+ "grad_norm": 100.88753509521484,
+ "learning_rate": 4.2857142857142856e-05,
+ "loss": 0.3991,
+ "step": 976
+ },
+ {
+ "epoch": 3.578754578754579,
+ "grad_norm": 34.51667785644531,
+ "learning_rate": 4.2832722832722835e-05,
+ "loss": 0.1365,
+ "step": 977
+ },
+ {
+ "epoch": 3.5824175824175826,
+ "grad_norm": 26.852561950683594,
+ "learning_rate": 4.2808302808302806e-05,
+ "loss": 0.3627,
+ "step": 978
+ },
+ {
+ "epoch": 3.586080586080586,
+ "grad_norm": 24.968570709228516,
+ "learning_rate": 4.2783882783882785e-05,
+ "loss": 0.2106,
+ "step": 979
+ },
+ {
+ "epoch": 3.58974358974359,
+ "grad_norm": 27.33326530456543,
+ "learning_rate": 4.2759462759462757e-05,
+ "loss": 0.1758,
+ "step": 980
+ },
+ {
+ "epoch": 3.5934065934065935,
+ "grad_norm": 52.63814926147461,
+ "learning_rate": 4.2735042735042735e-05,
+ "loss": 0.601,
+ "step": 981
+ },
+ {
+ "epoch": 3.597069597069597,
+ "grad_norm": 37.77897262573242,
+ "learning_rate": 4.2710622710622713e-05,
+ "loss": 0.5299,
+ "step": 982
+ },
+ {
+ "epoch": 3.600732600732601,
+ "grad_norm": 27.691659927368164,
+ "learning_rate": 4.2686202686202685e-05,
+ "loss": 0.1784,
+ "step": 983
+ },
+ {
+ "epoch": 3.6043956043956045,
+ "grad_norm": 106.33782958984375,
+ "learning_rate": 4.2661782661782664e-05,
+ "loss": 0.8859,
+ "step": 984
+ },
+ {
+ "epoch": 3.608058608058608,
+ "grad_norm": 22.95706558227539,
+ "learning_rate": 4.263736263736264e-05,
+ "loss": 0.1611,
+ "step": 985
+ },
+ {
+ "epoch": 3.6117216117216118,
+ "grad_norm": 22.72148895263672,
+ "learning_rate": 4.2612942612942614e-05,
+ "loss": 0.1561,
+ "step": 986
+ },
+ {
+ "epoch": 3.6153846153846154,
+ "grad_norm": 93.37244415283203,
+ "learning_rate": 4.258852258852259e-05,
+ "loss": 0.4287,
+ "step": 987
+ },
+ {
+ "epoch": 3.619047619047619,
+ "grad_norm": 51.54584884643555,
+ "learning_rate": 4.2564102564102564e-05,
+ "loss": 0.6292,
+ "step": 988
+ },
+ {
+ "epoch": 3.6227106227106227,
+ "grad_norm": 61.58243942260742,
+ "learning_rate": 4.2539682539682536e-05,
+ "loss": 1.3205,
+ "step": 989
+ },
+ {
+ "epoch": 3.6263736263736264,
+ "grad_norm": 70.59432220458984,
+ "learning_rate": 4.2515262515262514e-05,
+ "loss": 0.7451,
+ "step": 990
+ },
+ {
+ "epoch": 3.63003663003663,
+ "grad_norm": 76.28730773925781,
+ "learning_rate": 4.249084249084249e-05,
+ "loss": 2.0314,
+ "step": 991
+ },
+ {
+ "epoch": 3.6336996336996337,
+ "grad_norm": 73.5402603149414,
+ "learning_rate": 4.2466422466422464e-05,
+ "loss": 1.6628,
+ "step": 992
+ },
+ {
+ "epoch": 3.6373626373626373,
+ "grad_norm": 75.8978042602539,
+ "learning_rate": 4.244200244200244e-05,
+ "loss": 1.652,
+ "step": 993
+ },
+ {
+ "epoch": 3.641025641025641,
+ "grad_norm": 37.04104232788086,
+ "learning_rate": 4.241758241758242e-05,
+ "loss": 1.5356,
+ "step": 994
+ },
+ {
+ "epoch": 3.6446886446886446,
+ "grad_norm": 34.31178283691406,
+ "learning_rate": 4.239316239316239e-05,
+ "loss": 1.1783,
+ "step": 995
+ },
+ {
+ "epoch": 3.6483516483516483,
+ "grad_norm": 22.934877395629883,
+ "learning_rate": 4.236874236874237e-05,
+ "loss": 1.2995,
+ "step": 996
+ },
+ {
+ "epoch": 3.652014652014652,
+ "grad_norm": 30.25251579284668,
+ "learning_rate": 4.234432234432235e-05,
+ "loss": 1.1304,
+ "step": 997
+ },
+ {
+ "epoch": 3.6556776556776556,
+ "grad_norm": 35.082027435302734,
+ "learning_rate": 4.231990231990232e-05,
+ "loss": 1.0827,
+ "step": 998
+ },
+ {
+ "epoch": 3.659340659340659,
+ "grad_norm": 24.526325225830078,
+ "learning_rate": 4.22954822954823e-05,
+ "loss": 0.8716,
+ "step": 999
+ },
+ {
+ "epoch": 3.663003663003663,
+ "grad_norm": 29.882883071899414,
+ "learning_rate": 4.227106227106228e-05,
+ "loss": 0.5432,
+ "step": 1000
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 2730,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 10,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 0.0,
+ "train_batch_size": 4,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-1000/training_args.bin b/checkpoint-1000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..efd73451f8808ee6551f09598ece18ffd5afe9a8
--- /dev/null
+++ b/checkpoint-1000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9433d412d81580f751a4a8cdb904f13acd11bf72c98d8dd9b40ffc47b121468f
+size 7249
diff --git a/checkpoint-1000/zero_to_fp32.py b/checkpoint-1000/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04
--- /dev/null
+++ b/checkpoint-1000/zero_to_fp32.py
@@ -0,0 +1,760 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example:
+# python zero_to_fp32.py . output_dir/
+# or
+# python zero_to_fp32.py . output_dir/ --safe_serialization
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+import gc
+import json
+import numpy as np
+from tqdm import tqdm
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device, weights_only=False)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+ total_files = len(files)
+ state_dicts = []
+ for f in tqdm(files, desc='Loading checkpoint shards'):
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+class GatheredTensor:
+ """
+ A pseudo tensor that collects partitioned weights.
+ It is more memory efficient when there are multiple groups.
+ """
+
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
+ self.flat_groups = flat_groups
+ self.flat_groups_offset = flat_groups_offset
+ self.offset = offset
+ self.partitioned_numel = partitioned_numel
+ self.shape = shape
+ self.dtype = self.flat_groups[0][0].dtype
+
+ def contiguous(self):
+ """
+ Merge partitioned weights from flat_groups into a single tensor.
+ """
+ end_idx = self.offset + self.partitioned_numel
+ world_size = len(self.flat_groups)
+ pad_flat_param_chunks = []
+
+ for rank_i in range(world_size):
+ # for each rank, we need to collect weights from related group/groups
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
+ start_group_id = None
+ end_group_id = None
+ for group_id in range(len(self.flat_groups_offset)):
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
+ start_group_id = group_id
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
+ end_group_id = group_id
+ break
+ # collect weights from related group/groups
+ for group_id in range(start_group_id, end_group_id + 1):
+ flat_tensor = flat_groups_at_rank_i[group_id]
+ start_offset = self.offset - self.flat_groups_offset[group_id]
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
+
+ # collect weights from all ranks
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
+ return param
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
+
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # memory efficient tensor
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
+ state_dict[name] = tensor
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def to_torch_tensor(state_dict, return_empty_tensor=False):
+ """
+ Convert state_dict of GatheredTensor to torch tensor
+ """
+ torch_state_dict = {}
+ converted_tensors = {}
+ for name, tensor in state_dict.items():
+ tensor_id = id(tensor)
+ if tensor_id in converted_tensors: # shared tensors
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
+ torch_state_dict[name] = shared_tensor
+ else:
+ converted_tensors[tensor_id] = name
+ if return_empty_tensor:
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
+ else:
+ torch_state_dict[name] = tensor.contiguous()
+ return torch_state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag=None,
+ exclude_frozen_parameters=False,
+ lazy_mode=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint. Or you can load state_dict in lazy mode ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
+ for name, lazy_tensor in state_dict.item():
+ tensor = lazy_tensor.contiguous() # to cpu
+ print(name, tensor)
+ # del tensor to release memory if it no longer in use
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+ if lazy_mode:
+ return state_dict
+ else:
+ return to_torch_tensor(state_dict)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
+ output_dir,
+ max_shard_size="5GB",
+ safe_serialization=False,
+ tag=None,
+ exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ # Dependency pre-check
+ if safe_serialization:
+ try:
+ from safetensors.torch import save_file
+ except ImportError:
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
+ raise
+ if max_shard_size is not None:
+ try:
+ from huggingface_hub import split_torch_state_dict_into_shards
+ except ImportError:
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
+ raise
+
+ # Convert zero checkpoint to state_dict
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag,
+ exclude_frozen_parameters,
+ lazy_mode=True)
+
+ # Shard the model if it is too big.
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
+ if max_shard_size is not None:
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
+ # an memory-efficient approach for sharding
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
+ filename_pattern=filename_pattern,
+ max_shard_size=max_shard_size)
+ else:
+ from collections import namedtuple
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
+ state_dict_split = StateDictSplit(is_sharded=False,
+ filename_to_tensors={weights_name: list(state_dict.keys())})
+
+ # Save the model by shard
+ os.makedirs(output_dir, exist_ok=True)
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
+ shard_state_dict = to_torch_tensor(shard_state_dict)
+ output_path = os.path.join(output_dir, shard_file)
+ if safe_serialization:
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
+ else:
+ torch.save(shard_state_dict, output_path)
+ # release the memory of current shard
+ for tensor_name in list(shard_state_dict.keys()):
+ del state_dict[tensor_name]
+ del shard_state_dict[tensor_name]
+ del shard_state_dict
+ gc.collect()
+
+ # Save index if sharded
+ if state_dict_split.is_sharded:
+ index = {
+ "metadata": state_dict_split.metadata,
+ "weight_map": state_dict_split.tensor_to_filename,
+ }
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
+ save_index_file = os.path.join(output_dir, save_index_file)
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument("output_dir",
+ type=str,
+ help="directory to the pytorch fp32 state_dict output files"
+ "(e.g. path/checkpoint-12-output/)")
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="5GB",
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
+ "without CPU OOM issues.")
+ parser.add_argument(
+ "--safe_serialization",
+ default=False,
+ action='store_true',
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_dir,
+ max_shard_size=args.max_shard_size,
+ safe_serialization=args.safe_serialization,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-1500/config.json b/checkpoint-1500/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..40aa0a10ec7958e160bf07f2feca405387c8b288
--- /dev/null
+++ b/checkpoint-1500/config.json
@@ -0,0 +1,33 @@
+{
+ "architectures": [
+ "XLMRobertaForSequenceClassification"
+ ],
+ "attention_probs_dropout_prob": 0.1,
+ "bos_token_id": 0,
+ "classifier_dropout": null,
+ "eos_token_id": 2,
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 1024,
+ "id2label": {
+ "0": "LABEL_0"
+ },
+ "initializer_range": 0.02,
+ "intermediate_size": 4096,
+ "label2id": {
+ "LABEL_0": 0
+ },
+ "layer_norm_eps": 1e-05,
+ "max_position_embeddings": 8194,
+ "model_type": "xlm-roberta",
+ "num_attention_heads": 16,
+ "num_hidden_layers": 24,
+ "output_past": true,
+ "pad_token_id": 1,
+ "position_embedding_type": "absolute",
+ "torch_dtype": "float32",
+ "transformers_version": "4.54.0",
+ "type_vocab_size": 1,
+ "use_cache": true,
+ "vocab_size": 250002
+}
diff --git a/checkpoint-1500/global_step1500/mp_rank_00_model_states.pt b/checkpoint-1500/global_step1500/mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..91630e9ab83731a7f9486820042427ecdf3be277
--- /dev/null
+++ b/checkpoint-1500/global_step1500/mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2e83c5374bd31118a5d132f9fbcd0f03a8f9d54b4e4990d67a9fa03d16c75590
+size 2271151845
diff --git a/checkpoint-1500/global_step1500/zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-1500/global_step1500/zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c17109e482f99fc3097f81744fdfe9da96ffbfcf
--- /dev/null
+++ b/checkpoint-1500/global_step1500/zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a87e2f321eb6cb752426ebdc48514a7598eef82dba1c13ac232da51fc651dad7
+size 3406552447
diff --git a/checkpoint-1500/global_step1500/zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-1500/global_step1500/zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c27ed4757d26c42e71abb847f8bd35f9a43b2a8c
--- /dev/null
+++ b/checkpoint-1500/global_step1500/zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fa6ae3c27cdf0a92f45375b7f56a17ef0da22bd793d5a299a0b8bfbc8bd236e2
+size 3406564543
diff --git a/checkpoint-1500/latest b/checkpoint-1500/latest
new file mode 100644
index 0000000000000000000000000000000000000000..c56ff7708f44fb7928fea2f70d6d7342ce0d5b67
--- /dev/null
+++ b/checkpoint-1500/latest
@@ -0,0 +1 @@
+global_step1500
\ No newline at end of file
diff --git a/checkpoint-1500/model.safetensors b/checkpoint-1500/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0ca930c7079b212176f2c3c25cd35dd7248dbccc
--- /dev/null
+++ b/checkpoint-1500/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:89b569bbbad98a6e1f51b4defcba25d9d55779fbe8c26bc3f889bd15b4aecef5
+size 2271071852
diff --git a/checkpoint-1500/rng_state_0.pth b/checkpoint-1500/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0043dddd9a02ccf02b0ef78355c8139f25f43228
--- /dev/null
+++ b/checkpoint-1500/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7426f2be0d0f0bb476bee200b31a731f154545387233799b34f2e19f7a3b563f
+size 14917
diff --git a/checkpoint-1500/rng_state_1.pth b/checkpoint-1500/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ab9778a1ec5de093650f78f218781d41fbed715c
--- /dev/null
+++ b/checkpoint-1500/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:710a4c82949ad518220fbc4b9ed190d1bace9fa38ca84f9b62f2b138c5190120
+size 14917
diff --git a/checkpoint-1500/scheduler.pt b/checkpoint-1500/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9fc6900e293088823923ad5788fc7070f46ea683
--- /dev/null
+++ b/checkpoint-1500/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1948403cce76f5ad42fff931a47af4ac21dd55e27552b42103c9528e5345ed53
+size 1465
diff --git a/checkpoint-1500/sentencepiece.bpe.model b/checkpoint-1500/sentencepiece.bpe.model
new file mode 100644
index 0000000000000000000000000000000000000000..7a3f40a75f870bc1f21700cd414dc2acc431583c
--- /dev/null
+++ b/checkpoint-1500/sentencepiece.bpe.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
+size 5069051
diff --git a/checkpoint-1500/special_tokens_map.json b/checkpoint-1500/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1879d702821e753ffe4245048eee415d54a9385
--- /dev/null
+++ b/checkpoint-1500/special_tokens_map.json
@@ -0,0 +1,51 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-1500/tokenizer.json b/checkpoint-1500/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..322d084f75a19f4fec0fc0b5f351be9a3dfefa3e
--- /dev/null
+++ b/checkpoint-1500/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50ec628ce274af8429e5aa0c573e737ef2db1c2acd3b2dd51362a33c3a534f99
+size 17082999
diff --git a/checkpoint-1500/tokenizer_config.json b/checkpoint-1500/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..95bd7c849ee6a47d5c92805af18d187239c1ba4a
--- /dev/null
+++ b/checkpoint-1500/tokenizer_config.json
@@ -0,0 +1,56 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "250001": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": true,
+ "cls_token": "",
+ "eos_token": "",
+ "extra_special_tokens": {},
+ "mask_token": "",
+ "model_max_length": 8192,
+ "pad_token": "",
+ "sep_token": "",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "XLMRobertaTokenizer",
+ "unk_token": ""
+}
diff --git a/checkpoint-1500/trainer_state.json b/checkpoint-1500/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..a88c232db431a655e7503f943f30f5979bad5ba0
--- /dev/null
+++ b/checkpoint-1500/trainer_state.json
@@ -0,0 +1,10534 @@
+{
+ "best_global_step": null,
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 5.4945054945054945,
+ "eval_steps": 500,
+ "global_step": 1500,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.003663003663003663,
+ "grad_norm": 33.24192428588867,
+ "learning_rate": 0.0,
+ "loss": 0.9555,
+ "step": 1
+ },
+ {
+ "epoch": 0.007326007326007326,
+ "grad_norm": 23.005327224731445,
+ "learning_rate": 2.1978021978021978e-07,
+ "loss": 0.7557,
+ "step": 2
+ },
+ {
+ "epoch": 0.01098901098901099,
+ "grad_norm": 12.516372680664062,
+ "learning_rate": 4.3956043956043957e-07,
+ "loss": 0.2322,
+ "step": 3
+ },
+ {
+ "epoch": 0.014652014652014652,
+ "grad_norm": 22.350322723388672,
+ "learning_rate": 6.593406593406594e-07,
+ "loss": 0.5263,
+ "step": 4
+ },
+ {
+ "epoch": 0.018315018315018316,
+ "grad_norm": 37.14425277709961,
+ "learning_rate": 8.791208791208791e-07,
+ "loss": 0.547,
+ "step": 5
+ },
+ {
+ "epoch": 0.02197802197802198,
+ "grad_norm": 27.73367691040039,
+ "learning_rate": 1.098901098901099e-06,
+ "loss": 0.5922,
+ "step": 6
+ },
+ {
+ "epoch": 0.02564102564102564,
+ "grad_norm": 28.463964462280273,
+ "learning_rate": 1.3186813186813187e-06,
+ "loss": 1.0195,
+ "step": 7
+ },
+ {
+ "epoch": 0.029304029304029304,
+ "grad_norm": 12.688858032226562,
+ "learning_rate": 1.5384615384615385e-06,
+ "loss": 0.1519,
+ "step": 8
+ },
+ {
+ "epoch": 0.03296703296703297,
+ "grad_norm": 24.222930908203125,
+ "learning_rate": 1.7582417582417583e-06,
+ "loss": 0.8008,
+ "step": 9
+ },
+ {
+ "epoch": 0.03663003663003663,
+ "grad_norm": 22.45709800720215,
+ "learning_rate": 1.9780219780219782e-06,
+ "loss": 1.1024,
+ "step": 10
+ },
+ {
+ "epoch": 0.040293040293040296,
+ "grad_norm": 23.01483917236328,
+ "learning_rate": 2.197802197802198e-06,
+ "loss": 0.3072,
+ "step": 11
+ },
+ {
+ "epoch": 0.04395604395604396,
+ "grad_norm": 24.276216506958008,
+ "learning_rate": 2.4175824175824177e-06,
+ "loss": 0.8937,
+ "step": 12
+ },
+ {
+ "epoch": 0.047619047619047616,
+ "grad_norm": 24.501638412475586,
+ "learning_rate": 2.6373626373626375e-06,
+ "loss": 0.3748,
+ "step": 13
+ },
+ {
+ "epoch": 0.05128205128205128,
+ "grad_norm": 11.965837478637695,
+ "learning_rate": 2.8571428571428573e-06,
+ "loss": 0.2221,
+ "step": 14
+ },
+ {
+ "epoch": 0.054945054945054944,
+ "grad_norm": 8.884313583374023,
+ "learning_rate": 3.076923076923077e-06,
+ "loss": 0.1682,
+ "step": 15
+ },
+ {
+ "epoch": 0.05860805860805861,
+ "grad_norm": 13.486218452453613,
+ "learning_rate": 3.2967032967032968e-06,
+ "loss": 0.3324,
+ "step": 16
+ },
+ {
+ "epoch": 0.06227106227106227,
+ "grad_norm": 29.47451400756836,
+ "learning_rate": 3.5164835164835165e-06,
+ "loss": 0.9247,
+ "step": 17
+ },
+ {
+ "epoch": 0.06593406593406594,
+ "grad_norm": 38.8739128112793,
+ "learning_rate": 3.7362637362637363e-06,
+ "loss": 1.3591,
+ "step": 18
+ },
+ {
+ "epoch": 0.0695970695970696,
+ "grad_norm": 24.181066513061523,
+ "learning_rate": 3.9560439560439565e-06,
+ "loss": 0.4257,
+ "step": 19
+ },
+ {
+ "epoch": 0.07326007326007326,
+ "grad_norm": 18.25806427001953,
+ "learning_rate": 4.175824175824176e-06,
+ "loss": 0.3534,
+ "step": 20
+ },
+ {
+ "epoch": 0.07692307692307693,
+ "grad_norm": 4.121458053588867,
+ "learning_rate": 4.395604395604396e-06,
+ "loss": 0.0459,
+ "step": 21
+ },
+ {
+ "epoch": 0.08058608058608059,
+ "grad_norm": 17.89643096923828,
+ "learning_rate": 4.615384615384616e-06,
+ "loss": 0.3707,
+ "step": 22
+ },
+ {
+ "epoch": 0.08424908424908426,
+ "grad_norm": 43.25539016723633,
+ "learning_rate": 4.8351648351648355e-06,
+ "loss": 1.139,
+ "step": 23
+ },
+ {
+ "epoch": 0.08791208791208792,
+ "grad_norm": 19.56612205505371,
+ "learning_rate": 5.054945054945056e-06,
+ "loss": 0.3819,
+ "step": 24
+ },
+ {
+ "epoch": 0.09157509157509157,
+ "grad_norm": 18.20578956604004,
+ "learning_rate": 5.274725274725275e-06,
+ "loss": 0.516,
+ "step": 25
+ },
+ {
+ "epoch": 0.09523809523809523,
+ "grad_norm": 23.16927146911621,
+ "learning_rate": 5.494505494505494e-06,
+ "loss": 0.7161,
+ "step": 26
+ },
+ {
+ "epoch": 0.0989010989010989,
+ "grad_norm": 10.449734687805176,
+ "learning_rate": 5.7142857142857145e-06,
+ "loss": 0.3049,
+ "step": 27
+ },
+ {
+ "epoch": 0.10256410256410256,
+ "grad_norm": 33.13974380493164,
+ "learning_rate": 5.934065934065934e-06,
+ "loss": 1.0178,
+ "step": 28
+ },
+ {
+ "epoch": 0.10622710622710622,
+ "grad_norm": 34.373470306396484,
+ "learning_rate": 6.153846153846154e-06,
+ "loss": 1.0162,
+ "step": 29
+ },
+ {
+ "epoch": 0.10989010989010989,
+ "grad_norm": 22.710988998413086,
+ "learning_rate": 6.373626373626373e-06,
+ "loss": 0.5866,
+ "step": 30
+ },
+ {
+ "epoch": 0.11355311355311355,
+ "grad_norm": 23.314502716064453,
+ "learning_rate": 6.5934065934065935e-06,
+ "loss": 0.6159,
+ "step": 31
+ },
+ {
+ "epoch": 0.11721611721611722,
+ "grad_norm": 23.481319427490234,
+ "learning_rate": 6.813186813186814e-06,
+ "loss": 0.5441,
+ "step": 32
+ },
+ {
+ "epoch": 0.12087912087912088,
+ "grad_norm": 35.16271209716797,
+ "learning_rate": 7.032967032967033e-06,
+ "loss": 0.9091,
+ "step": 33
+ },
+ {
+ "epoch": 0.12454212454212454,
+ "grad_norm": 32.2298698425293,
+ "learning_rate": 7.252747252747253e-06,
+ "loss": 0.5156,
+ "step": 34
+ },
+ {
+ "epoch": 0.1282051282051282,
+ "grad_norm": 36.708953857421875,
+ "learning_rate": 7.4725274725274726e-06,
+ "loss": 1.5839,
+ "step": 35
+ },
+ {
+ "epoch": 0.13186813186813187,
+ "grad_norm": 34.64887619018555,
+ "learning_rate": 7.692307692307692e-06,
+ "loss": 1.2861,
+ "step": 36
+ },
+ {
+ "epoch": 0.13553113553113552,
+ "grad_norm": 20.94220733642578,
+ "learning_rate": 7.912087912087913e-06,
+ "loss": 0.5027,
+ "step": 37
+ },
+ {
+ "epoch": 0.1391941391941392,
+ "grad_norm": 30.93832015991211,
+ "learning_rate": 8.131868131868132e-06,
+ "loss": 0.3584,
+ "step": 38
+ },
+ {
+ "epoch": 0.14285714285714285,
+ "grad_norm": 19.195362091064453,
+ "learning_rate": 8.351648351648352e-06,
+ "loss": 0.6912,
+ "step": 39
+ },
+ {
+ "epoch": 0.14652014652014653,
+ "grad_norm": 21.054162979125977,
+ "learning_rate": 8.571428571428571e-06,
+ "loss": 0.8027,
+ "step": 40
+ },
+ {
+ "epoch": 0.15018315018315018,
+ "grad_norm": 16.64535903930664,
+ "learning_rate": 8.791208791208792e-06,
+ "loss": 0.3004,
+ "step": 41
+ },
+ {
+ "epoch": 0.15384615384615385,
+ "grad_norm": 12.1064453125,
+ "learning_rate": 9.010989010989011e-06,
+ "loss": 0.2158,
+ "step": 42
+ },
+ {
+ "epoch": 0.1575091575091575,
+ "grad_norm": 16.20220947265625,
+ "learning_rate": 9.230769230769232e-06,
+ "loss": 0.4137,
+ "step": 43
+ },
+ {
+ "epoch": 0.16117216117216118,
+ "grad_norm": 25.698654174804688,
+ "learning_rate": 9.45054945054945e-06,
+ "loss": 0.7716,
+ "step": 44
+ },
+ {
+ "epoch": 0.16483516483516483,
+ "grad_norm": 7.480422019958496,
+ "learning_rate": 9.670329670329671e-06,
+ "loss": 0.1046,
+ "step": 45
+ },
+ {
+ "epoch": 0.1684981684981685,
+ "grad_norm": 38.25539016723633,
+ "learning_rate": 9.89010989010989e-06,
+ "loss": 1.3913,
+ "step": 46
+ },
+ {
+ "epoch": 0.17216117216117216,
+ "grad_norm": 24.113954544067383,
+ "learning_rate": 1.0109890109890111e-05,
+ "loss": 0.4632,
+ "step": 47
+ },
+ {
+ "epoch": 0.17582417582417584,
+ "grad_norm": 22.136140823364258,
+ "learning_rate": 1.032967032967033e-05,
+ "loss": 0.6634,
+ "step": 48
+ },
+ {
+ "epoch": 0.1794871794871795,
+ "grad_norm": 19.417444229125977,
+ "learning_rate": 1.054945054945055e-05,
+ "loss": 0.3991,
+ "step": 49
+ },
+ {
+ "epoch": 0.18315018315018314,
+ "grad_norm": 13.265430450439453,
+ "learning_rate": 1.076923076923077e-05,
+ "loss": 0.2613,
+ "step": 50
+ },
+ {
+ "epoch": 0.18681318681318682,
+ "grad_norm": 25.118703842163086,
+ "learning_rate": 1.0989010989010989e-05,
+ "loss": 0.9231,
+ "step": 51
+ },
+ {
+ "epoch": 0.19047619047619047,
+ "grad_norm": 34.06997299194336,
+ "learning_rate": 1.120879120879121e-05,
+ "loss": 1.5809,
+ "step": 52
+ },
+ {
+ "epoch": 0.19413919413919414,
+ "grad_norm": 40.32486343383789,
+ "learning_rate": 1.1428571428571429e-05,
+ "loss": 1.4601,
+ "step": 53
+ },
+ {
+ "epoch": 0.1978021978021978,
+ "grad_norm": 18.847017288208008,
+ "learning_rate": 1.1648351648351648e-05,
+ "loss": 0.2345,
+ "step": 54
+ },
+ {
+ "epoch": 0.20146520146520147,
+ "grad_norm": 37.98270034790039,
+ "learning_rate": 1.1868131868131868e-05,
+ "loss": 0.9792,
+ "step": 55
+ },
+ {
+ "epoch": 0.20512820512820512,
+ "grad_norm": 35.72782897949219,
+ "learning_rate": 1.2087912087912089e-05,
+ "loss": 1.1561,
+ "step": 56
+ },
+ {
+ "epoch": 0.2087912087912088,
+ "grad_norm": 18.577186584472656,
+ "learning_rate": 1.2307692307692308e-05,
+ "loss": 0.5577,
+ "step": 57
+ },
+ {
+ "epoch": 0.21245421245421245,
+ "grad_norm": 23.086456298828125,
+ "learning_rate": 1.2527472527472529e-05,
+ "loss": 0.5807,
+ "step": 58
+ },
+ {
+ "epoch": 0.21611721611721613,
+ "grad_norm": 20.053525924682617,
+ "learning_rate": 1.2747252747252747e-05,
+ "loss": 0.7024,
+ "step": 59
+ },
+ {
+ "epoch": 0.21978021978021978,
+ "grad_norm": 22.25934410095215,
+ "learning_rate": 1.2967032967032968e-05,
+ "loss": 1.1033,
+ "step": 60
+ },
+ {
+ "epoch": 0.22344322344322345,
+ "grad_norm": 17.981454849243164,
+ "learning_rate": 1.3186813186813187e-05,
+ "loss": 0.2774,
+ "step": 61
+ },
+ {
+ "epoch": 0.2271062271062271,
+ "grad_norm": 11.286524772644043,
+ "learning_rate": 1.3406593406593408e-05,
+ "loss": 0.1802,
+ "step": 62
+ },
+ {
+ "epoch": 0.23076923076923078,
+ "grad_norm": 25.822996139526367,
+ "learning_rate": 1.3626373626373627e-05,
+ "loss": 0.651,
+ "step": 63
+ },
+ {
+ "epoch": 0.23443223443223443,
+ "grad_norm": 16.457286834716797,
+ "learning_rate": 1.3846153846153847e-05,
+ "loss": 0.2946,
+ "step": 64
+ },
+ {
+ "epoch": 0.23809523809523808,
+ "grad_norm": 26.712799072265625,
+ "learning_rate": 1.4065934065934066e-05,
+ "loss": 0.7763,
+ "step": 65
+ },
+ {
+ "epoch": 0.24175824175824176,
+ "grad_norm": 21.4671630859375,
+ "learning_rate": 1.4285714285714285e-05,
+ "loss": 0.4132,
+ "step": 66
+ },
+ {
+ "epoch": 0.2454212454212454,
+ "grad_norm": 21.834922790527344,
+ "learning_rate": 1.4505494505494506e-05,
+ "loss": 0.6544,
+ "step": 67
+ },
+ {
+ "epoch": 0.2490842490842491,
+ "grad_norm": 15.396453857421875,
+ "learning_rate": 1.4725274725274726e-05,
+ "loss": 0.2426,
+ "step": 68
+ },
+ {
+ "epoch": 0.25274725274725274,
+ "grad_norm": 8.851480484008789,
+ "learning_rate": 1.4945054945054945e-05,
+ "loss": 0.125,
+ "step": 69
+ },
+ {
+ "epoch": 0.2564102564102564,
+ "grad_norm": 22.21581268310547,
+ "learning_rate": 1.5164835164835164e-05,
+ "loss": 0.2585,
+ "step": 70
+ },
+ {
+ "epoch": 0.2600732600732601,
+ "grad_norm": 23.589736938476562,
+ "learning_rate": 1.5384615384615384e-05,
+ "loss": 0.386,
+ "step": 71
+ },
+ {
+ "epoch": 0.26373626373626374,
+ "grad_norm": 51.82280731201172,
+ "learning_rate": 1.5604395604395605e-05,
+ "loss": 1.1802,
+ "step": 72
+ },
+ {
+ "epoch": 0.2673992673992674,
+ "grad_norm": 36.43033981323242,
+ "learning_rate": 1.5824175824175826e-05,
+ "loss": 0.5574,
+ "step": 73
+ },
+ {
+ "epoch": 0.27106227106227104,
+ "grad_norm": 46.151885986328125,
+ "learning_rate": 1.6043956043956043e-05,
+ "loss": 0.9113,
+ "step": 74
+ },
+ {
+ "epoch": 0.27472527472527475,
+ "grad_norm": 34.090213775634766,
+ "learning_rate": 1.6263736263736265e-05,
+ "loss": 1.2161,
+ "step": 75
+ },
+ {
+ "epoch": 0.2783882783882784,
+ "grad_norm": 15.469125747680664,
+ "learning_rate": 1.6483516483516486e-05,
+ "loss": 0.1833,
+ "step": 76
+ },
+ {
+ "epoch": 0.28205128205128205,
+ "grad_norm": 26.77261734008789,
+ "learning_rate": 1.6703296703296703e-05,
+ "loss": 0.4095,
+ "step": 77
+ },
+ {
+ "epoch": 0.2857142857142857,
+ "grad_norm": 8.46114444732666,
+ "learning_rate": 1.6923076923076924e-05,
+ "loss": 0.0724,
+ "step": 78
+ },
+ {
+ "epoch": 0.2893772893772894,
+ "grad_norm": 7.954617500305176,
+ "learning_rate": 1.7142857142857142e-05,
+ "loss": 0.057,
+ "step": 79
+ },
+ {
+ "epoch": 0.29304029304029305,
+ "grad_norm": 32.47618103027344,
+ "learning_rate": 1.7362637362637366e-05,
+ "loss": 0.8099,
+ "step": 80
+ },
+ {
+ "epoch": 0.2967032967032967,
+ "grad_norm": 34.506927490234375,
+ "learning_rate": 1.7582417582417584e-05,
+ "loss": 0.5867,
+ "step": 81
+ },
+ {
+ "epoch": 0.30036630036630035,
+ "grad_norm": 18.276355743408203,
+ "learning_rate": 1.78021978021978e-05,
+ "loss": 0.4387,
+ "step": 82
+ },
+ {
+ "epoch": 0.304029304029304,
+ "grad_norm": 35.61729431152344,
+ "learning_rate": 1.8021978021978023e-05,
+ "loss": 0.9711,
+ "step": 83
+ },
+ {
+ "epoch": 0.3076923076923077,
+ "grad_norm": 14.001388549804688,
+ "learning_rate": 1.824175824175824e-05,
+ "loss": 0.1431,
+ "step": 84
+ },
+ {
+ "epoch": 0.31135531135531136,
+ "grad_norm": 27.521188735961914,
+ "learning_rate": 1.8461538461538465e-05,
+ "loss": 0.3686,
+ "step": 85
+ },
+ {
+ "epoch": 0.315018315018315,
+ "grad_norm": 38.0133171081543,
+ "learning_rate": 1.8681318681318682e-05,
+ "loss": 1.3866,
+ "step": 86
+ },
+ {
+ "epoch": 0.31868131868131866,
+ "grad_norm": 30.895553588867188,
+ "learning_rate": 1.89010989010989e-05,
+ "loss": 0.6676,
+ "step": 87
+ },
+ {
+ "epoch": 0.32234432234432236,
+ "grad_norm": 26.165082931518555,
+ "learning_rate": 1.912087912087912e-05,
+ "loss": 0.4763,
+ "step": 88
+ },
+ {
+ "epoch": 0.326007326007326,
+ "grad_norm": 25.6451473236084,
+ "learning_rate": 1.9340659340659342e-05,
+ "loss": 0.6921,
+ "step": 89
+ },
+ {
+ "epoch": 0.32967032967032966,
+ "grad_norm": 31.52683448791504,
+ "learning_rate": 1.9560439560439563e-05,
+ "loss": 0.8449,
+ "step": 90
+ },
+ {
+ "epoch": 0.3333333333333333,
+ "grad_norm": 27.559072494506836,
+ "learning_rate": 1.978021978021978e-05,
+ "loss": 0.9726,
+ "step": 91
+ },
+ {
+ "epoch": 0.336996336996337,
+ "grad_norm": 38.23103713989258,
+ "learning_rate": 1.9999999999999998e-05,
+ "loss": 0.2568,
+ "step": 92
+ },
+ {
+ "epoch": 0.34065934065934067,
+ "grad_norm": 28.575313568115234,
+ "learning_rate": 2.0219780219780223e-05,
+ "loss": 0.7039,
+ "step": 93
+ },
+ {
+ "epoch": 0.3443223443223443,
+ "grad_norm": 31.54847526550293,
+ "learning_rate": 2.043956043956044e-05,
+ "loss": 0.835,
+ "step": 94
+ },
+ {
+ "epoch": 0.34798534798534797,
+ "grad_norm": 34.27505111694336,
+ "learning_rate": 2.065934065934066e-05,
+ "loss": 1.0304,
+ "step": 95
+ },
+ {
+ "epoch": 0.3516483516483517,
+ "grad_norm": 23.972553253173828,
+ "learning_rate": 2.087912087912088e-05,
+ "loss": 0.775,
+ "step": 96
+ },
+ {
+ "epoch": 0.3553113553113553,
+ "grad_norm": 18.46526527404785,
+ "learning_rate": 2.10989010989011e-05,
+ "loss": 0.2856,
+ "step": 97
+ },
+ {
+ "epoch": 0.358974358974359,
+ "grad_norm": 22.087251663208008,
+ "learning_rate": 2.131868131868132e-05,
+ "loss": 0.6849,
+ "step": 98
+ },
+ {
+ "epoch": 0.3626373626373626,
+ "grad_norm": 13.144533157348633,
+ "learning_rate": 2.153846153846154e-05,
+ "loss": 0.2766,
+ "step": 99
+ },
+ {
+ "epoch": 0.3663003663003663,
+ "grad_norm": 14.740280151367188,
+ "learning_rate": 2.175824175824176e-05,
+ "loss": 0.27,
+ "step": 100
+ },
+ {
+ "epoch": 0.36996336996337,
+ "grad_norm": 17.15272331237793,
+ "learning_rate": 2.1978021978021977e-05,
+ "loss": 0.446,
+ "step": 101
+ },
+ {
+ "epoch": 0.37362637362637363,
+ "grad_norm": 45.865509033203125,
+ "learning_rate": 2.21978021978022e-05,
+ "loss": 2.4265,
+ "step": 102
+ },
+ {
+ "epoch": 0.3772893772893773,
+ "grad_norm": 22.298274993896484,
+ "learning_rate": 2.241758241758242e-05,
+ "loss": 1.5021,
+ "step": 103
+ },
+ {
+ "epoch": 0.38095238095238093,
+ "grad_norm": 20.314172744750977,
+ "learning_rate": 2.2637362637362637e-05,
+ "loss": 0.508,
+ "step": 104
+ },
+ {
+ "epoch": 0.38461538461538464,
+ "grad_norm": 11.217910766601562,
+ "learning_rate": 2.2857142857142858e-05,
+ "loss": 0.2282,
+ "step": 105
+ },
+ {
+ "epoch": 0.3882783882783883,
+ "grad_norm": 21.36184310913086,
+ "learning_rate": 2.307692307692308e-05,
+ "loss": 0.4684,
+ "step": 106
+ },
+ {
+ "epoch": 0.39194139194139194,
+ "grad_norm": 12.759861946105957,
+ "learning_rate": 2.3296703296703297e-05,
+ "loss": 0.3076,
+ "step": 107
+ },
+ {
+ "epoch": 0.3956043956043956,
+ "grad_norm": 24.42287254333496,
+ "learning_rate": 2.3516483516483518e-05,
+ "loss": 1.3607,
+ "step": 108
+ },
+ {
+ "epoch": 0.3992673992673993,
+ "grad_norm": 13.014902114868164,
+ "learning_rate": 2.3736263736263735e-05,
+ "loss": 0.4984,
+ "step": 109
+ },
+ {
+ "epoch": 0.40293040293040294,
+ "grad_norm": 12.8681640625,
+ "learning_rate": 2.395604395604396e-05,
+ "loss": 0.4529,
+ "step": 110
+ },
+ {
+ "epoch": 0.4065934065934066,
+ "grad_norm": 21.19939422607422,
+ "learning_rate": 2.4175824175824177e-05,
+ "loss": 1.0197,
+ "step": 111
+ },
+ {
+ "epoch": 0.41025641025641024,
+ "grad_norm": 20.60430145263672,
+ "learning_rate": 2.4395604395604395e-05,
+ "loss": 0.5367,
+ "step": 112
+ },
+ {
+ "epoch": 0.4139194139194139,
+ "grad_norm": 34.49782943725586,
+ "learning_rate": 2.4615384615384616e-05,
+ "loss": 1.9045,
+ "step": 113
+ },
+ {
+ "epoch": 0.4175824175824176,
+ "grad_norm": 28.380966186523438,
+ "learning_rate": 2.4835164835164834e-05,
+ "loss": 0.9019,
+ "step": 114
+ },
+ {
+ "epoch": 0.42124542124542125,
+ "grad_norm": 18.234045028686523,
+ "learning_rate": 2.5054945054945058e-05,
+ "loss": 0.5529,
+ "step": 115
+ },
+ {
+ "epoch": 0.4249084249084249,
+ "grad_norm": 18.759784698486328,
+ "learning_rate": 2.5274725274725276e-05,
+ "loss": 0.85,
+ "step": 116
+ },
+ {
+ "epoch": 0.42857142857142855,
+ "grad_norm": 15.784387588500977,
+ "learning_rate": 2.5494505494505493e-05,
+ "loss": 0.429,
+ "step": 117
+ },
+ {
+ "epoch": 0.43223443223443225,
+ "grad_norm": 23.149036407470703,
+ "learning_rate": 2.5714285714285714e-05,
+ "loss": 0.8784,
+ "step": 118
+ },
+ {
+ "epoch": 0.4358974358974359,
+ "grad_norm": 18.77080535888672,
+ "learning_rate": 2.5934065934065935e-05,
+ "loss": 0.537,
+ "step": 119
+ },
+ {
+ "epoch": 0.43956043956043955,
+ "grad_norm": 24.311708450317383,
+ "learning_rate": 2.6153846153846157e-05,
+ "loss": 0.74,
+ "step": 120
+ },
+ {
+ "epoch": 0.4432234432234432,
+ "grad_norm": 15.09874439239502,
+ "learning_rate": 2.6373626373626374e-05,
+ "loss": 0.2978,
+ "step": 121
+ },
+ {
+ "epoch": 0.4468864468864469,
+ "grad_norm": 19.65829086303711,
+ "learning_rate": 2.6593406593406592e-05,
+ "loss": 0.8287,
+ "step": 122
+ },
+ {
+ "epoch": 0.45054945054945056,
+ "grad_norm": 21.237165451049805,
+ "learning_rate": 2.6813186813186816e-05,
+ "loss": 1.1967,
+ "step": 123
+ },
+ {
+ "epoch": 0.4542124542124542,
+ "grad_norm": 25.737913131713867,
+ "learning_rate": 2.7032967032967034e-05,
+ "loss": 0.9414,
+ "step": 124
+ },
+ {
+ "epoch": 0.45787545787545786,
+ "grad_norm": 22.84954833984375,
+ "learning_rate": 2.7252747252747255e-05,
+ "loss": 0.398,
+ "step": 125
+ },
+ {
+ "epoch": 0.46153846153846156,
+ "grad_norm": 35.505027770996094,
+ "learning_rate": 2.7472527472527473e-05,
+ "loss": 1.0497,
+ "step": 126
+ },
+ {
+ "epoch": 0.4652014652014652,
+ "grad_norm": 6.610748291015625,
+ "learning_rate": 2.7692307692307694e-05,
+ "loss": 0.0491,
+ "step": 127
+ },
+ {
+ "epoch": 0.46886446886446886,
+ "grad_norm": 33.34388732910156,
+ "learning_rate": 2.7912087912087915e-05,
+ "loss": 0.8991,
+ "step": 128
+ },
+ {
+ "epoch": 0.4725274725274725,
+ "grad_norm": 17.098581314086914,
+ "learning_rate": 2.8131868131868132e-05,
+ "loss": 0.3217,
+ "step": 129
+ },
+ {
+ "epoch": 0.47619047619047616,
+ "grad_norm": 11.438309669494629,
+ "learning_rate": 2.8351648351648353e-05,
+ "loss": 0.4301,
+ "step": 130
+ },
+ {
+ "epoch": 0.47985347985347987,
+ "grad_norm": 25.803213119506836,
+ "learning_rate": 2.857142857142857e-05,
+ "loss": 0.8937,
+ "step": 131
+ },
+ {
+ "epoch": 0.4835164835164835,
+ "grad_norm": 16.61037826538086,
+ "learning_rate": 2.8791208791208792e-05,
+ "loss": 0.3603,
+ "step": 132
+ },
+ {
+ "epoch": 0.48717948717948717,
+ "grad_norm": 21.329975128173828,
+ "learning_rate": 2.9010989010989013e-05,
+ "loss": 0.4332,
+ "step": 133
+ },
+ {
+ "epoch": 0.4908424908424908,
+ "grad_norm": 24.83706283569336,
+ "learning_rate": 2.923076923076923e-05,
+ "loss": 0.3967,
+ "step": 134
+ },
+ {
+ "epoch": 0.4945054945054945,
+ "grad_norm": 8.3758544921875,
+ "learning_rate": 2.945054945054945e-05,
+ "loss": 0.1197,
+ "step": 135
+ },
+ {
+ "epoch": 0.4981684981684982,
+ "grad_norm": 31.096702575683594,
+ "learning_rate": 2.9670329670329673e-05,
+ "loss": 2.2867,
+ "step": 136
+ },
+ {
+ "epoch": 0.5018315018315018,
+ "grad_norm": 17.094390869140625,
+ "learning_rate": 2.989010989010989e-05,
+ "loss": 0.3064,
+ "step": 137
+ },
+ {
+ "epoch": 0.5054945054945055,
+ "grad_norm": 23.401243209838867,
+ "learning_rate": 3.010989010989011e-05,
+ "loss": 0.9779,
+ "step": 138
+ },
+ {
+ "epoch": 0.5091575091575091,
+ "grad_norm": 19.55811309814453,
+ "learning_rate": 3.032967032967033e-05,
+ "loss": 0.5665,
+ "step": 139
+ },
+ {
+ "epoch": 0.5128205128205128,
+ "grad_norm": 18.668622970581055,
+ "learning_rate": 3.0549450549450547e-05,
+ "loss": 0.7068,
+ "step": 140
+ },
+ {
+ "epoch": 0.5164835164835165,
+ "grad_norm": 9.49342155456543,
+ "learning_rate": 3.076923076923077e-05,
+ "loss": 0.2228,
+ "step": 141
+ },
+ {
+ "epoch": 0.5201465201465202,
+ "grad_norm": 17.131006240844727,
+ "learning_rate": 3.0989010989010995e-05,
+ "loss": 0.8947,
+ "step": 142
+ },
+ {
+ "epoch": 0.5238095238095238,
+ "grad_norm": 14.087484359741211,
+ "learning_rate": 3.120879120879121e-05,
+ "loss": 0.4394,
+ "step": 143
+ },
+ {
+ "epoch": 0.5274725274725275,
+ "grad_norm": 14.246976852416992,
+ "learning_rate": 3.142857142857143e-05,
+ "loss": 0.7608,
+ "step": 144
+ },
+ {
+ "epoch": 0.5311355311355311,
+ "grad_norm": 27.454071044921875,
+ "learning_rate": 3.164835164835165e-05,
+ "loss": 1.8982,
+ "step": 145
+ },
+ {
+ "epoch": 0.5347985347985348,
+ "grad_norm": 8.580923080444336,
+ "learning_rate": 3.1868131868131866e-05,
+ "loss": 0.2199,
+ "step": 146
+ },
+ {
+ "epoch": 0.5384615384615384,
+ "grad_norm": 12.200552940368652,
+ "learning_rate": 3.208791208791209e-05,
+ "loss": 0.4007,
+ "step": 147
+ },
+ {
+ "epoch": 0.5421245421245421,
+ "grad_norm": 11.350752830505371,
+ "learning_rate": 3.230769230769231e-05,
+ "loss": 0.5359,
+ "step": 148
+ },
+ {
+ "epoch": 0.5457875457875457,
+ "grad_norm": 21.45020866394043,
+ "learning_rate": 3.252747252747253e-05,
+ "loss": 1.4639,
+ "step": 149
+ },
+ {
+ "epoch": 0.5494505494505495,
+ "grad_norm": 29.84933090209961,
+ "learning_rate": 3.274725274725274e-05,
+ "loss": 0.8764,
+ "step": 150
+ },
+ {
+ "epoch": 0.5531135531135531,
+ "grad_norm": 14.899048805236816,
+ "learning_rate": 3.296703296703297e-05,
+ "loss": 0.3817,
+ "step": 151
+ },
+ {
+ "epoch": 0.5567765567765568,
+ "grad_norm": 14.95295238494873,
+ "learning_rate": 3.318681318681319e-05,
+ "loss": 1.0153,
+ "step": 152
+ },
+ {
+ "epoch": 0.5604395604395604,
+ "grad_norm": 13.904314994812012,
+ "learning_rate": 3.3406593406593406e-05,
+ "loss": 0.9891,
+ "step": 153
+ },
+ {
+ "epoch": 0.5641025641025641,
+ "grad_norm": 14.465546607971191,
+ "learning_rate": 3.362637362637363e-05,
+ "loss": 0.4935,
+ "step": 154
+ },
+ {
+ "epoch": 0.5677655677655677,
+ "grad_norm": 15.22211742401123,
+ "learning_rate": 3.384615384615385e-05,
+ "loss": 0.4973,
+ "step": 155
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 19.977941513061523,
+ "learning_rate": 3.406593406593406e-05,
+ "loss": 0.5768,
+ "step": 156
+ },
+ {
+ "epoch": 0.575091575091575,
+ "grad_norm": 21.778785705566406,
+ "learning_rate": 3.4285714285714284e-05,
+ "loss": 0.541,
+ "step": 157
+ },
+ {
+ "epoch": 0.5787545787545788,
+ "grad_norm": 7.957052707672119,
+ "learning_rate": 3.4505494505494505e-05,
+ "loss": 0.1676,
+ "step": 158
+ },
+ {
+ "epoch": 0.5824175824175825,
+ "grad_norm": 10.105476379394531,
+ "learning_rate": 3.472527472527473e-05,
+ "loss": 0.14,
+ "step": 159
+ },
+ {
+ "epoch": 0.5860805860805861,
+ "grad_norm": 13.895249366760254,
+ "learning_rate": 3.494505494505495e-05,
+ "loss": 0.2135,
+ "step": 160
+ },
+ {
+ "epoch": 0.5897435897435898,
+ "grad_norm": 15.14104175567627,
+ "learning_rate": 3.516483516483517e-05,
+ "loss": 0.2299,
+ "step": 161
+ },
+ {
+ "epoch": 0.5934065934065934,
+ "grad_norm": 27.537504196166992,
+ "learning_rate": 3.538461538461539e-05,
+ "loss": 0.4517,
+ "step": 162
+ },
+ {
+ "epoch": 0.5970695970695971,
+ "grad_norm": 22.290597915649414,
+ "learning_rate": 3.56043956043956e-05,
+ "loss": 0.2144,
+ "step": 163
+ },
+ {
+ "epoch": 0.6007326007326007,
+ "grad_norm": 24.176603317260742,
+ "learning_rate": 3.5824175824175824e-05,
+ "loss": 0.4184,
+ "step": 164
+ },
+ {
+ "epoch": 0.6043956043956044,
+ "grad_norm": 43.716552734375,
+ "learning_rate": 3.6043956043956045e-05,
+ "loss": 0.7672,
+ "step": 165
+ },
+ {
+ "epoch": 0.608058608058608,
+ "grad_norm": 5.516793727874756,
+ "learning_rate": 3.626373626373626e-05,
+ "loss": 0.0332,
+ "step": 166
+ },
+ {
+ "epoch": 0.6117216117216118,
+ "grad_norm": 13.202600479125977,
+ "learning_rate": 3.648351648351648e-05,
+ "loss": 0.1388,
+ "step": 167
+ },
+ {
+ "epoch": 0.6153846153846154,
+ "grad_norm": 8.389626502990723,
+ "learning_rate": 3.670329670329671e-05,
+ "loss": 0.0284,
+ "step": 168
+ },
+ {
+ "epoch": 0.6190476190476191,
+ "grad_norm": 11.500190734863281,
+ "learning_rate": 3.692307692307693e-05,
+ "loss": 0.1778,
+ "step": 169
+ },
+ {
+ "epoch": 0.6227106227106227,
+ "grad_norm": 49.76407241821289,
+ "learning_rate": 3.7142857142857143e-05,
+ "loss": 0.8075,
+ "step": 170
+ },
+ {
+ "epoch": 0.6263736263736264,
+ "grad_norm": 49.758705139160156,
+ "learning_rate": 3.7362637362637365e-05,
+ "loss": 1.3106,
+ "step": 171
+ },
+ {
+ "epoch": 0.63003663003663,
+ "grad_norm": 7.655544281005859,
+ "learning_rate": 3.7582417582417586e-05,
+ "loss": 0.1362,
+ "step": 172
+ },
+ {
+ "epoch": 0.6336996336996337,
+ "grad_norm": 29.778133392333984,
+ "learning_rate": 3.78021978021978e-05,
+ "loss": 0.2411,
+ "step": 173
+ },
+ {
+ "epoch": 0.6373626373626373,
+ "grad_norm": 23.79543113708496,
+ "learning_rate": 3.802197802197802e-05,
+ "loss": 0.5665,
+ "step": 174
+ },
+ {
+ "epoch": 0.6410256410256411,
+ "grad_norm": 25.333166122436523,
+ "learning_rate": 3.824175824175824e-05,
+ "loss": 0.5821,
+ "step": 175
+ },
+ {
+ "epoch": 0.6446886446886447,
+ "grad_norm": 38.367759704589844,
+ "learning_rate": 3.846153846153846e-05,
+ "loss": 1.1098,
+ "step": 176
+ },
+ {
+ "epoch": 0.6483516483516484,
+ "grad_norm": 31.53361701965332,
+ "learning_rate": 3.8681318681318684e-05,
+ "loss": 1.5399,
+ "step": 177
+ },
+ {
+ "epoch": 0.652014652014652,
+ "grad_norm": 8.453901290893555,
+ "learning_rate": 3.8901098901098905e-05,
+ "loss": 0.1327,
+ "step": 178
+ },
+ {
+ "epoch": 0.6556776556776557,
+ "grad_norm": 32.465980529785156,
+ "learning_rate": 3.9120879120879126e-05,
+ "loss": 0.8133,
+ "step": 179
+ },
+ {
+ "epoch": 0.6593406593406593,
+ "grad_norm": 21.503114700317383,
+ "learning_rate": 3.934065934065934e-05,
+ "loss": 0.2472,
+ "step": 180
+ },
+ {
+ "epoch": 0.663003663003663,
+ "grad_norm": 28.240659713745117,
+ "learning_rate": 3.956043956043956e-05,
+ "loss": 0.4718,
+ "step": 181
+ },
+ {
+ "epoch": 0.6666666666666666,
+ "grad_norm": 6.919331073760986,
+ "learning_rate": 3.978021978021978e-05,
+ "loss": 0.0947,
+ "step": 182
+ },
+ {
+ "epoch": 0.6703296703296703,
+ "grad_norm": 20.96783447265625,
+ "learning_rate": 3.9999999999999996e-05,
+ "loss": 1.1602,
+ "step": 183
+ },
+ {
+ "epoch": 0.673992673992674,
+ "grad_norm": 17.967914581298828,
+ "learning_rate": 4.021978021978022e-05,
+ "loss": 0.3684,
+ "step": 184
+ },
+ {
+ "epoch": 0.6776556776556777,
+ "grad_norm": 29.837678909301758,
+ "learning_rate": 4.0439560439560445e-05,
+ "loss": 0.5452,
+ "step": 185
+ },
+ {
+ "epoch": 0.6813186813186813,
+ "grad_norm": 37.0803108215332,
+ "learning_rate": 4.065934065934066e-05,
+ "loss": 0.5983,
+ "step": 186
+ },
+ {
+ "epoch": 0.684981684981685,
+ "grad_norm": 23.339448928833008,
+ "learning_rate": 4.087912087912088e-05,
+ "loss": 0.6255,
+ "step": 187
+ },
+ {
+ "epoch": 0.6886446886446886,
+ "grad_norm": 13.779767036437988,
+ "learning_rate": 4.10989010989011e-05,
+ "loss": 0.3705,
+ "step": 188
+ },
+ {
+ "epoch": 0.6923076923076923,
+ "grad_norm": 15.792436599731445,
+ "learning_rate": 4.131868131868132e-05,
+ "loss": 0.4128,
+ "step": 189
+ },
+ {
+ "epoch": 0.6959706959706959,
+ "grad_norm": 14.106623649597168,
+ "learning_rate": 4.153846153846154e-05,
+ "loss": 0.2914,
+ "step": 190
+ },
+ {
+ "epoch": 0.6996336996336996,
+ "grad_norm": 34.428951263427734,
+ "learning_rate": 4.175824175824176e-05,
+ "loss": 1.2232,
+ "step": 191
+ },
+ {
+ "epoch": 0.7032967032967034,
+ "grad_norm": 15.847033500671387,
+ "learning_rate": 4.197802197802198e-05,
+ "loss": 0.4129,
+ "step": 192
+ },
+ {
+ "epoch": 0.706959706959707,
+ "grad_norm": 17.834794998168945,
+ "learning_rate": 4.21978021978022e-05,
+ "loss": 0.4158,
+ "step": 193
+ },
+ {
+ "epoch": 0.7106227106227107,
+ "grad_norm": 29.807823181152344,
+ "learning_rate": 4.241758241758242e-05,
+ "loss": 0.9741,
+ "step": 194
+ },
+ {
+ "epoch": 0.7142857142857143,
+ "grad_norm": 15.9482421875,
+ "learning_rate": 4.263736263736264e-05,
+ "loss": 0.1953,
+ "step": 195
+ },
+ {
+ "epoch": 0.717948717948718,
+ "grad_norm": 37.89487075805664,
+ "learning_rate": 4.2857142857142856e-05,
+ "loss": 1.1018,
+ "step": 196
+ },
+ {
+ "epoch": 0.7216117216117216,
+ "grad_norm": 24.060779571533203,
+ "learning_rate": 4.307692307692308e-05,
+ "loss": 0.4774,
+ "step": 197
+ },
+ {
+ "epoch": 0.7252747252747253,
+ "grad_norm": 18.701725006103516,
+ "learning_rate": 4.32967032967033e-05,
+ "loss": 0.2641,
+ "step": 198
+ },
+ {
+ "epoch": 0.7289377289377289,
+ "grad_norm": 32.18348693847656,
+ "learning_rate": 4.351648351648352e-05,
+ "loss": 0.6958,
+ "step": 199
+ },
+ {
+ "epoch": 0.7326007326007326,
+ "grad_norm": 16.504337310791016,
+ "learning_rate": 4.3736263736263734e-05,
+ "loss": 0.1933,
+ "step": 200
+ },
+ {
+ "epoch": 0.7362637362637363,
+ "grad_norm": 34.5928840637207,
+ "learning_rate": 4.3956043956043955e-05,
+ "loss": 0.3712,
+ "step": 201
+ },
+ {
+ "epoch": 0.73992673992674,
+ "grad_norm": 47.998512268066406,
+ "learning_rate": 4.417582417582418e-05,
+ "loss": 1.4578,
+ "step": 202
+ },
+ {
+ "epoch": 0.7435897435897436,
+ "grad_norm": 29.871829986572266,
+ "learning_rate": 4.43956043956044e-05,
+ "loss": 0.7628,
+ "step": 203
+ },
+ {
+ "epoch": 0.7472527472527473,
+ "grad_norm": 53.70481491088867,
+ "learning_rate": 4.461538461538462e-05,
+ "loss": 1.4017,
+ "step": 204
+ },
+ {
+ "epoch": 0.7509157509157509,
+ "grad_norm": 58.087646484375,
+ "learning_rate": 4.483516483516484e-05,
+ "loss": 1.3168,
+ "step": 205
+ },
+ {
+ "epoch": 0.7545787545787546,
+ "grad_norm": 44.62531280517578,
+ "learning_rate": 4.505494505494505e-05,
+ "loss": 0.8959,
+ "step": 206
+ },
+ {
+ "epoch": 0.7582417582417582,
+ "grad_norm": 18.427953720092773,
+ "learning_rate": 4.5274725274725274e-05,
+ "loss": 0.4202,
+ "step": 207
+ },
+ {
+ "epoch": 0.7619047619047619,
+ "grad_norm": 32.799434661865234,
+ "learning_rate": 4.5494505494505495e-05,
+ "loss": 0.5432,
+ "step": 208
+ },
+ {
+ "epoch": 0.7655677655677655,
+ "grad_norm": 22.136354446411133,
+ "learning_rate": 4.5714285714285716e-05,
+ "loss": 1.0474,
+ "step": 209
+ },
+ {
+ "epoch": 0.7692307692307693,
+ "grad_norm": 14.09807014465332,
+ "learning_rate": 4.593406593406593e-05,
+ "loss": 0.4048,
+ "step": 210
+ },
+ {
+ "epoch": 0.7728937728937729,
+ "grad_norm": 16.818132400512695,
+ "learning_rate": 4.615384615384616e-05,
+ "loss": 0.4772,
+ "step": 211
+ },
+ {
+ "epoch": 0.7765567765567766,
+ "grad_norm": 36.87644577026367,
+ "learning_rate": 4.637362637362638e-05,
+ "loss": 1.0203,
+ "step": 212
+ },
+ {
+ "epoch": 0.7802197802197802,
+ "grad_norm": 23.279033660888672,
+ "learning_rate": 4.6593406593406593e-05,
+ "loss": 0.8223,
+ "step": 213
+ },
+ {
+ "epoch": 0.7838827838827839,
+ "grad_norm": 21.23172378540039,
+ "learning_rate": 4.6813186813186814e-05,
+ "loss": 0.6838,
+ "step": 214
+ },
+ {
+ "epoch": 0.7875457875457875,
+ "grad_norm": 15.129582405090332,
+ "learning_rate": 4.7032967032967035e-05,
+ "loss": 0.3939,
+ "step": 215
+ },
+ {
+ "epoch": 0.7912087912087912,
+ "grad_norm": 38.20903778076172,
+ "learning_rate": 4.725274725274725e-05,
+ "loss": 0.4395,
+ "step": 216
+ },
+ {
+ "epoch": 0.7948717948717948,
+ "grad_norm": 23.428571701049805,
+ "learning_rate": 4.747252747252747e-05,
+ "loss": 0.6657,
+ "step": 217
+ },
+ {
+ "epoch": 0.7985347985347986,
+ "grad_norm": 15.892741203308105,
+ "learning_rate": 4.769230769230769e-05,
+ "loss": 0.3867,
+ "step": 218
+ },
+ {
+ "epoch": 0.8021978021978022,
+ "grad_norm": 44.7977180480957,
+ "learning_rate": 4.791208791208792e-05,
+ "loss": 1.4335,
+ "step": 219
+ },
+ {
+ "epoch": 0.8058608058608059,
+ "grad_norm": 18.13700294494629,
+ "learning_rate": 4.8131868131868134e-05,
+ "loss": 0.3965,
+ "step": 220
+ },
+ {
+ "epoch": 0.8095238095238095,
+ "grad_norm": 23.00497817993164,
+ "learning_rate": 4.8351648351648355e-05,
+ "loss": 1.1319,
+ "step": 221
+ },
+ {
+ "epoch": 0.8131868131868132,
+ "grad_norm": 27.63648796081543,
+ "learning_rate": 4.8571428571428576e-05,
+ "loss": 0.7782,
+ "step": 222
+ },
+ {
+ "epoch": 0.8168498168498168,
+ "grad_norm": 23.91630744934082,
+ "learning_rate": 4.879120879120879e-05,
+ "loss": 0.7277,
+ "step": 223
+ },
+ {
+ "epoch": 0.8205128205128205,
+ "grad_norm": 27.157682418823242,
+ "learning_rate": 4.901098901098901e-05,
+ "loss": 0.8309,
+ "step": 224
+ },
+ {
+ "epoch": 0.8241758241758241,
+ "grad_norm": 20.686105728149414,
+ "learning_rate": 4.923076923076923e-05,
+ "loss": 0.4645,
+ "step": 225
+ },
+ {
+ "epoch": 0.8278388278388278,
+ "grad_norm": 18.44706916809082,
+ "learning_rate": 4.9450549450549446e-05,
+ "loss": 0.6298,
+ "step": 226
+ },
+ {
+ "epoch": 0.8315018315018315,
+ "grad_norm": 34.66194152832031,
+ "learning_rate": 4.967032967032967e-05,
+ "loss": 1.3282,
+ "step": 227
+ },
+ {
+ "epoch": 0.8351648351648352,
+ "grad_norm": 26.68456268310547,
+ "learning_rate": 4.9890109890109895e-05,
+ "loss": 0.8652,
+ "step": 228
+ },
+ {
+ "epoch": 0.8388278388278388,
+ "grad_norm": 18.36819839477539,
+ "learning_rate": 5.0109890109890116e-05,
+ "loss": 0.425,
+ "step": 229
+ },
+ {
+ "epoch": 0.8424908424908425,
+ "grad_norm": 10.212838172912598,
+ "learning_rate": 5.032967032967033e-05,
+ "loss": 0.2183,
+ "step": 230
+ },
+ {
+ "epoch": 0.8461538461538461,
+ "grad_norm": 28.40265464782715,
+ "learning_rate": 5.054945054945055e-05,
+ "loss": 1.6894,
+ "step": 231
+ },
+ {
+ "epoch": 0.8498168498168498,
+ "grad_norm": 48.70882797241211,
+ "learning_rate": 5.076923076923077e-05,
+ "loss": 0.8564,
+ "step": 232
+ },
+ {
+ "epoch": 0.8534798534798534,
+ "grad_norm": 38.576541900634766,
+ "learning_rate": 5.098901098901099e-05,
+ "loss": 0.8013,
+ "step": 233
+ },
+ {
+ "epoch": 0.8571428571428571,
+ "grad_norm": 20.17264747619629,
+ "learning_rate": 5.120879120879121e-05,
+ "loss": 0.4553,
+ "step": 234
+ },
+ {
+ "epoch": 0.8608058608058609,
+ "grad_norm": 33.383182525634766,
+ "learning_rate": 5.142857142857143e-05,
+ "loss": 0.9591,
+ "step": 235
+ },
+ {
+ "epoch": 0.8644688644688645,
+ "grad_norm": 22.734106063842773,
+ "learning_rate": 5.164835164835165e-05,
+ "loss": 0.589,
+ "step": 236
+ },
+ {
+ "epoch": 0.8681318681318682,
+ "grad_norm": 19.77442741394043,
+ "learning_rate": 5.186813186813187e-05,
+ "loss": 0.7066,
+ "step": 237
+ },
+ {
+ "epoch": 0.8717948717948718,
+ "grad_norm": 32.36431884765625,
+ "learning_rate": 5.208791208791209e-05,
+ "loss": 0.8878,
+ "step": 238
+ },
+ {
+ "epoch": 0.8754578754578755,
+ "grad_norm": 37.60574722290039,
+ "learning_rate": 5.230769230769231e-05,
+ "loss": 1.0034,
+ "step": 239
+ },
+ {
+ "epoch": 0.8791208791208791,
+ "grad_norm": 28.051666259765625,
+ "learning_rate": 5.252747252747253e-05,
+ "loss": 0.9695,
+ "step": 240
+ },
+ {
+ "epoch": 0.8827838827838828,
+ "grad_norm": 31.55886459350586,
+ "learning_rate": 5.274725274725275e-05,
+ "loss": 0.5416,
+ "step": 241
+ },
+ {
+ "epoch": 0.8864468864468864,
+ "grad_norm": 17.856632232666016,
+ "learning_rate": 5.296703296703297e-05,
+ "loss": 0.3647,
+ "step": 242
+ },
+ {
+ "epoch": 0.8901098901098901,
+ "grad_norm": 42.52962112426758,
+ "learning_rate": 5.3186813186813184e-05,
+ "loss": 1.3661,
+ "step": 243
+ },
+ {
+ "epoch": 0.8937728937728938,
+ "grad_norm": 26.439769744873047,
+ "learning_rate": 5.3406593406593405e-05,
+ "loss": 0.6629,
+ "step": 244
+ },
+ {
+ "epoch": 0.8974358974358975,
+ "grad_norm": 37.46576690673828,
+ "learning_rate": 5.362637362637363e-05,
+ "loss": 0.9631,
+ "step": 245
+ },
+ {
+ "epoch": 0.9010989010989011,
+ "grad_norm": 29.706708908081055,
+ "learning_rate": 5.384615384615385e-05,
+ "loss": 1.0034,
+ "step": 246
+ },
+ {
+ "epoch": 0.9047619047619048,
+ "grad_norm": 33.62871551513672,
+ "learning_rate": 5.406593406593407e-05,
+ "loss": 0.8036,
+ "step": 247
+ },
+ {
+ "epoch": 0.9084249084249084,
+ "grad_norm": 41.97051239013672,
+ "learning_rate": 5.428571428571429e-05,
+ "loss": 1.309,
+ "step": 248
+ },
+ {
+ "epoch": 0.9120879120879121,
+ "grad_norm": 37.57841110229492,
+ "learning_rate": 5.450549450549451e-05,
+ "loss": 1.2444,
+ "step": 249
+ },
+ {
+ "epoch": 0.9157509157509157,
+ "grad_norm": 21.220727920532227,
+ "learning_rate": 5.4725274725274724e-05,
+ "loss": 0.6556,
+ "step": 250
+ },
+ {
+ "epoch": 0.9194139194139194,
+ "grad_norm": 19.963764190673828,
+ "learning_rate": 5.4945054945054945e-05,
+ "loss": 0.7328,
+ "step": 251
+ },
+ {
+ "epoch": 0.9230769230769231,
+ "grad_norm": 21.196062088012695,
+ "learning_rate": 5.5164835164835166e-05,
+ "loss": 0.5752,
+ "step": 252
+ },
+ {
+ "epoch": 0.9267399267399268,
+ "grad_norm": 23.587268829345703,
+ "learning_rate": 5.538461538461539e-05,
+ "loss": 0.4801,
+ "step": 253
+ },
+ {
+ "epoch": 0.9304029304029304,
+ "grad_norm": 16.09604263305664,
+ "learning_rate": 5.560439560439561e-05,
+ "loss": 0.4795,
+ "step": 254
+ },
+ {
+ "epoch": 0.9340659340659341,
+ "grad_norm": 22.61296272277832,
+ "learning_rate": 5.582417582417583e-05,
+ "loss": 0.5807,
+ "step": 255
+ },
+ {
+ "epoch": 0.9377289377289377,
+ "grad_norm": 28.715890884399414,
+ "learning_rate": 5.604395604395604e-05,
+ "loss": 1.3141,
+ "step": 256
+ },
+ {
+ "epoch": 0.9413919413919414,
+ "grad_norm": 37.11213684082031,
+ "learning_rate": 5.6263736263736264e-05,
+ "loss": 1.7168,
+ "step": 257
+ },
+ {
+ "epoch": 0.945054945054945,
+ "grad_norm": 13.693246841430664,
+ "learning_rate": 5.6483516483516485e-05,
+ "loss": 0.3207,
+ "step": 258
+ },
+ {
+ "epoch": 0.9487179487179487,
+ "grad_norm": 18.186216354370117,
+ "learning_rate": 5.6703296703296706e-05,
+ "loss": 0.6265,
+ "step": 259
+ },
+ {
+ "epoch": 0.9523809523809523,
+ "grad_norm": 23.68426513671875,
+ "learning_rate": 5.692307692307692e-05,
+ "loss": 0.5226,
+ "step": 260
+ },
+ {
+ "epoch": 0.9560439560439561,
+ "grad_norm": 19.154836654663086,
+ "learning_rate": 5.714285714285714e-05,
+ "loss": 1.0116,
+ "step": 261
+ },
+ {
+ "epoch": 0.9597069597069597,
+ "grad_norm": 17.64719009399414,
+ "learning_rate": 5.736263736263737e-05,
+ "loss": 0.5992,
+ "step": 262
+ },
+ {
+ "epoch": 0.9633699633699634,
+ "grad_norm": 25.542757034301758,
+ "learning_rate": 5.7582417582417584e-05,
+ "loss": 0.8129,
+ "step": 263
+ },
+ {
+ "epoch": 0.967032967032967,
+ "grad_norm": 25.94204330444336,
+ "learning_rate": 5.7802197802197805e-05,
+ "loss": 1.2194,
+ "step": 264
+ },
+ {
+ "epoch": 0.9706959706959707,
+ "grad_norm": 13.693342208862305,
+ "learning_rate": 5.8021978021978026e-05,
+ "loss": 0.2565,
+ "step": 265
+ },
+ {
+ "epoch": 0.9743589743589743,
+ "grad_norm": 20.760122299194336,
+ "learning_rate": 5.824175824175824e-05,
+ "loss": 0.4023,
+ "step": 266
+ },
+ {
+ "epoch": 0.978021978021978,
+ "grad_norm": 20.00895118713379,
+ "learning_rate": 5.846153846153846e-05,
+ "loss": 0.2468,
+ "step": 267
+ },
+ {
+ "epoch": 0.9816849816849816,
+ "grad_norm": 25.56069564819336,
+ "learning_rate": 5.868131868131868e-05,
+ "loss": 0.5648,
+ "step": 268
+ },
+ {
+ "epoch": 0.9853479853479854,
+ "grad_norm": 38.19970703125,
+ "learning_rate": 5.89010989010989e-05,
+ "loss": 0.544,
+ "step": 269
+ },
+ {
+ "epoch": 0.989010989010989,
+ "grad_norm": 37.63619613647461,
+ "learning_rate": 5.9120879120879124e-05,
+ "loss": 0.7556,
+ "step": 270
+ },
+ {
+ "epoch": 0.9926739926739927,
+ "grad_norm": 10.586868286132812,
+ "learning_rate": 5.9340659340659345e-05,
+ "loss": 0.1003,
+ "step": 271
+ },
+ {
+ "epoch": 0.9963369963369964,
+ "grad_norm": 17.579208374023438,
+ "learning_rate": 5.9560439560439566e-05,
+ "loss": 0.2931,
+ "step": 272
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 24.657121658325195,
+ "learning_rate": 5.978021978021978e-05,
+ "loss": 0.2372,
+ "step": 273
+ },
+ {
+ "epoch": 1.0036630036630036,
+ "grad_norm": 29.52134895324707,
+ "learning_rate": 6e-05,
+ "loss": 0.5077,
+ "step": 274
+ },
+ {
+ "epoch": 1.0073260073260073,
+ "grad_norm": 51.900062561035156,
+ "learning_rate": 5.997557997557998e-05,
+ "loss": 0.4404,
+ "step": 275
+ },
+ {
+ "epoch": 1.010989010989011,
+ "grad_norm": 18.682769775390625,
+ "learning_rate": 5.995115995115995e-05,
+ "loss": 0.2405,
+ "step": 276
+ },
+ {
+ "epoch": 1.0146520146520146,
+ "grad_norm": 87.95014953613281,
+ "learning_rate": 5.992673992673993e-05,
+ "loss": 2.8585,
+ "step": 277
+ },
+ {
+ "epoch": 1.0183150183150182,
+ "grad_norm": 67.03990936279297,
+ "learning_rate": 5.990231990231991e-05,
+ "loss": 0.9746,
+ "step": 278
+ },
+ {
+ "epoch": 1.021978021978022,
+ "grad_norm": 47.63545227050781,
+ "learning_rate": 5.987789987789988e-05,
+ "loss": 0.241,
+ "step": 279
+ },
+ {
+ "epoch": 1.0256410256410255,
+ "grad_norm": 33.62876892089844,
+ "learning_rate": 5.985347985347986e-05,
+ "loss": 1.0003,
+ "step": 280
+ },
+ {
+ "epoch": 1.0293040293040292,
+ "grad_norm": 30.26620864868164,
+ "learning_rate": 5.982905982905983e-05,
+ "loss": 0.7767,
+ "step": 281
+ },
+ {
+ "epoch": 1.032967032967033,
+ "grad_norm": 33.785770416259766,
+ "learning_rate": 5.98046398046398e-05,
+ "loss": 0.899,
+ "step": 282
+ },
+ {
+ "epoch": 1.0366300366300367,
+ "grad_norm": 33.753849029541016,
+ "learning_rate": 5.978021978021978e-05,
+ "loss": 1.8225,
+ "step": 283
+ },
+ {
+ "epoch": 1.0402930402930404,
+ "grad_norm": 16.58989143371582,
+ "learning_rate": 5.975579975579976e-05,
+ "loss": 0.6211,
+ "step": 284
+ },
+ {
+ "epoch": 1.043956043956044,
+ "grad_norm": 23.08768653869629,
+ "learning_rate": 5.973137973137973e-05,
+ "loss": 0.7541,
+ "step": 285
+ },
+ {
+ "epoch": 1.0476190476190477,
+ "grad_norm": 24.57805824279785,
+ "learning_rate": 5.970695970695971e-05,
+ "loss": 0.8278,
+ "step": 286
+ },
+ {
+ "epoch": 1.0512820512820513,
+ "grad_norm": 25.1593017578125,
+ "learning_rate": 5.968253968253968e-05,
+ "loss": 0.6932,
+ "step": 287
+ },
+ {
+ "epoch": 1.054945054945055,
+ "grad_norm": 29.984054565429688,
+ "learning_rate": 5.965811965811966e-05,
+ "loss": 0.6987,
+ "step": 288
+ },
+ {
+ "epoch": 1.0586080586080586,
+ "grad_norm": 28.183151245117188,
+ "learning_rate": 5.963369963369964e-05,
+ "loss": 0.8771,
+ "step": 289
+ },
+ {
+ "epoch": 1.0622710622710623,
+ "grad_norm": 15.349969863891602,
+ "learning_rate": 5.960927960927961e-05,
+ "loss": 0.2906,
+ "step": 290
+ },
+ {
+ "epoch": 1.065934065934066,
+ "grad_norm": 17.618196487426758,
+ "learning_rate": 5.958485958485959e-05,
+ "loss": 0.595,
+ "step": 291
+ },
+ {
+ "epoch": 1.0695970695970696,
+ "grad_norm": 40.537925720214844,
+ "learning_rate": 5.9560439560439566e-05,
+ "loss": 1.3881,
+ "step": 292
+ },
+ {
+ "epoch": 1.0732600732600732,
+ "grad_norm": 41.12261962890625,
+ "learning_rate": 5.953601953601954e-05,
+ "loss": 0.5402,
+ "step": 293
+ },
+ {
+ "epoch": 1.0769230769230769,
+ "grad_norm": 38.4654655456543,
+ "learning_rate": 5.951159951159951e-05,
+ "loss": 0.3097,
+ "step": 294
+ },
+ {
+ "epoch": 1.0805860805860805,
+ "grad_norm": 34.19886016845703,
+ "learning_rate": 5.948717948717949e-05,
+ "loss": 1.0228,
+ "step": 295
+ },
+ {
+ "epoch": 1.0842490842490842,
+ "grad_norm": 19.727413177490234,
+ "learning_rate": 5.946275946275946e-05,
+ "loss": 0.1755,
+ "step": 296
+ },
+ {
+ "epoch": 1.0879120879120878,
+ "grad_norm": 33.413352966308594,
+ "learning_rate": 5.943833943833944e-05,
+ "loss": 0.8087,
+ "step": 297
+ },
+ {
+ "epoch": 1.0915750915750915,
+ "grad_norm": 29.848875045776367,
+ "learning_rate": 5.941391941391942e-05,
+ "loss": 0.673,
+ "step": 298
+ },
+ {
+ "epoch": 1.0952380952380953,
+ "grad_norm": 18.643922805786133,
+ "learning_rate": 5.938949938949939e-05,
+ "loss": 0.4759,
+ "step": 299
+ },
+ {
+ "epoch": 1.098901098901099,
+ "grad_norm": 28.923099517822266,
+ "learning_rate": 5.936507936507937e-05,
+ "loss": 0.6555,
+ "step": 300
+ },
+ {
+ "epoch": 1.1025641025641026,
+ "grad_norm": 26.4990177154541,
+ "learning_rate": 5.9340659340659345e-05,
+ "loss": 0.4679,
+ "step": 301
+ },
+ {
+ "epoch": 1.1062271062271063,
+ "grad_norm": 43.54881286621094,
+ "learning_rate": 5.931623931623932e-05,
+ "loss": 1.0861,
+ "step": 302
+ },
+ {
+ "epoch": 1.10989010989011,
+ "grad_norm": 32.66098403930664,
+ "learning_rate": 5.9291819291819295e-05,
+ "loss": 0.677,
+ "step": 303
+ },
+ {
+ "epoch": 1.1135531135531136,
+ "grad_norm": 43.79314422607422,
+ "learning_rate": 5.9267399267399274e-05,
+ "loss": 0.8883,
+ "step": 304
+ },
+ {
+ "epoch": 1.1172161172161172,
+ "grad_norm": 44.49085235595703,
+ "learning_rate": 5.9242979242979245e-05,
+ "loss": 0.9553,
+ "step": 305
+ },
+ {
+ "epoch": 1.120879120879121,
+ "grad_norm": 31.713787078857422,
+ "learning_rate": 5.9218559218559224e-05,
+ "loss": 0.6352,
+ "step": 306
+ },
+ {
+ "epoch": 1.1245421245421245,
+ "grad_norm": 19.930402755737305,
+ "learning_rate": 5.9194139194139196e-05,
+ "loss": 0.7023,
+ "step": 307
+ },
+ {
+ "epoch": 1.1282051282051282,
+ "grad_norm": 20.157196044921875,
+ "learning_rate": 5.916971916971917e-05,
+ "loss": 0.6241,
+ "step": 308
+ },
+ {
+ "epoch": 1.1318681318681318,
+ "grad_norm": 26.819135665893555,
+ "learning_rate": 5.9145299145299146e-05,
+ "loss": 0.4788,
+ "step": 309
+ },
+ {
+ "epoch": 1.1355311355311355,
+ "grad_norm": 24.948625564575195,
+ "learning_rate": 5.9120879120879124e-05,
+ "loss": 0.698,
+ "step": 310
+ },
+ {
+ "epoch": 1.1391941391941391,
+ "grad_norm": 15.883389472961426,
+ "learning_rate": 5.9096459096459096e-05,
+ "loss": 0.3325,
+ "step": 311
+ },
+ {
+ "epoch": 1.1428571428571428,
+ "grad_norm": 25.214584350585938,
+ "learning_rate": 5.9072039072039074e-05,
+ "loss": 0.4776,
+ "step": 312
+ },
+ {
+ "epoch": 1.1465201465201464,
+ "grad_norm": 27.4523983001709,
+ "learning_rate": 5.9047619047619046e-05,
+ "loss": 0.6155,
+ "step": 313
+ },
+ {
+ "epoch": 1.15018315018315,
+ "grad_norm": 48.60593795776367,
+ "learning_rate": 5.9023199023199024e-05,
+ "loss": 1.7225,
+ "step": 314
+ },
+ {
+ "epoch": 1.1538461538461537,
+ "grad_norm": 27.19314193725586,
+ "learning_rate": 5.8998778998779e-05,
+ "loss": 0.6805,
+ "step": 315
+ },
+ {
+ "epoch": 1.1575091575091574,
+ "grad_norm": 44.678768157958984,
+ "learning_rate": 5.8974358974358975e-05,
+ "loss": 0.5721,
+ "step": 316
+ },
+ {
+ "epoch": 1.1611721611721613,
+ "grad_norm": 12.109644889831543,
+ "learning_rate": 5.894993894993895e-05,
+ "loss": 0.1079,
+ "step": 317
+ },
+ {
+ "epoch": 1.164835164835165,
+ "grad_norm": 45.254730224609375,
+ "learning_rate": 5.892551892551893e-05,
+ "loss": 1.1492,
+ "step": 318
+ },
+ {
+ "epoch": 1.1684981684981686,
+ "grad_norm": 65.83439636230469,
+ "learning_rate": 5.89010989010989e-05,
+ "loss": 0.7049,
+ "step": 319
+ },
+ {
+ "epoch": 1.1721611721611722,
+ "grad_norm": 43.5418586730957,
+ "learning_rate": 5.8876678876678875e-05,
+ "loss": 0.4628,
+ "step": 320
+ },
+ {
+ "epoch": 1.1758241758241759,
+ "grad_norm": 137.285400390625,
+ "learning_rate": 5.885225885225885e-05,
+ "loss": 1.4227,
+ "step": 321
+ },
+ {
+ "epoch": 1.1794871794871795,
+ "grad_norm": 42.895565032958984,
+ "learning_rate": 5.8827838827838825e-05,
+ "loss": 0.4264,
+ "step": 322
+ },
+ {
+ "epoch": 1.1831501831501832,
+ "grad_norm": 10.602986335754395,
+ "learning_rate": 5.8803418803418803e-05,
+ "loss": 0.0494,
+ "step": 323
+ },
+ {
+ "epoch": 1.1868131868131868,
+ "grad_norm": 103.92290496826172,
+ "learning_rate": 5.877899877899878e-05,
+ "loss": 2.0111,
+ "step": 324
+ },
+ {
+ "epoch": 1.1904761904761905,
+ "grad_norm": 36.497764587402344,
+ "learning_rate": 5.8754578754578754e-05,
+ "loss": 0.4768,
+ "step": 325
+ },
+ {
+ "epoch": 1.1941391941391941,
+ "grad_norm": 45.52228546142578,
+ "learning_rate": 5.873015873015873e-05,
+ "loss": 0.994,
+ "step": 326
+ },
+ {
+ "epoch": 1.1978021978021978,
+ "grad_norm": 24.81894302368164,
+ "learning_rate": 5.870573870573871e-05,
+ "loss": 0.5563,
+ "step": 327
+ },
+ {
+ "epoch": 1.2014652014652014,
+ "grad_norm": 49.82950210571289,
+ "learning_rate": 5.868131868131868e-05,
+ "loss": 1.5448,
+ "step": 328
+ },
+ {
+ "epoch": 1.205128205128205,
+ "grad_norm": 23.945913314819336,
+ "learning_rate": 5.865689865689866e-05,
+ "loss": 0.5256,
+ "step": 329
+ },
+ {
+ "epoch": 1.2087912087912087,
+ "grad_norm": 20.63251304626465,
+ "learning_rate": 5.863247863247864e-05,
+ "loss": 0.3698,
+ "step": 330
+ },
+ {
+ "epoch": 1.2124542124542124,
+ "grad_norm": 32.270328521728516,
+ "learning_rate": 5.860805860805861e-05,
+ "loss": 0.3518,
+ "step": 331
+ },
+ {
+ "epoch": 1.2161172161172162,
+ "grad_norm": 32.445716857910156,
+ "learning_rate": 5.858363858363858e-05,
+ "loss": 0.857,
+ "step": 332
+ },
+ {
+ "epoch": 1.2197802197802199,
+ "grad_norm": 59.69521713256836,
+ "learning_rate": 5.855921855921856e-05,
+ "loss": 1.3786,
+ "step": 333
+ },
+ {
+ "epoch": 1.2234432234432235,
+ "grad_norm": 32.79878234863281,
+ "learning_rate": 5.853479853479853e-05,
+ "loss": 0.7648,
+ "step": 334
+ },
+ {
+ "epoch": 1.2271062271062272,
+ "grad_norm": 26.749393463134766,
+ "learning_rate": 5.851037851037851e-05,
+ "loss": 0.4723,
+ "step": 335
+ },
+ {
+ "epoch": 1.2307692307692308,
+ "grad_norm": 40.744102478027344,
+ "learning_rate": 5.848595848595849e-05,
+ "loss": 1.0543,
+ "step": 336
+ },
+ {
+ "epoch": 1.2344322344322345,
+ "grad_norm": 34.2275505065918,
+ "learning_rate": 5.846153846153846e-05,
+ "loss": 0.4533,
+ "step": 337
+ },
+ {
+ "epoch": 1.2380952380952381,
+ "grad_norm": 49.648136138916016,
+ "learning_rate": 5.843711843711844e-05,
+ "loss": 1.2112,
+ "step": 338
+ },
+ {
+ "epoch": 1.2417582417582418,
+ "grad_norm": 64.69720458984375,
+ "learning_rate": 5.841269841269841e-05,
+ "loss": 1.2234,
+ "step": 339
+ },
+ {
+ "epoch": 1.2454212454212454,
+ "grad_norm": 16.81964111328125,
+ "learning_rate": 5.838827838827839e-05,
+ "loss": 0.297,
+ "step": 340
+ },
+ {
+ "epoch": 1.249084249084249,
+ "grad_norm": 17.393678665161133,
+ "learning_rate": 5.836385836385837e-05,
+ "loss": 0.2504,
+ "step": 341
+ },
+ {
+ "epoch": 1.2527472527472527,
+ "grad_norm": 64.2254409790039,
+ "learning_rate": 5.833943833943834e-05,
+ "loss": 1.3656,
+ "step": 342
+ },
+ {
+ "epoch": 1.2564102564102564,
+ "grad_norm": 48.991249084472656,
+ "learning_rate": 5.831501831501832e-05,
+ "loss": 1.0819,
+ "step": 343
+ },
+ {
+ "epoch": 1.26007326007326,
+ "grad_norm": 22.78063201904297,
+ "learning_rate": 5.82905982905983e-05,
+ "loss": 0.1792,
+ "step": 344
+ },
+ {
+ "epoch": 1.2637362637362637,
+ "grad_norm": 35.463233947753906,
+ "learning_rate": 5.826617826617826e-05,
+ "loss": 0.5663,
+ "step": 345
+ },
+ {
+ "epoch": 1.2673992673992673,
+ "grad_norm": 54.528953552246094,
+ "learning_rate": 5.824175824175824e-05,
+ "loss": 1.5814,
+ "step": 346
+ },
+ {
+ "epoch": 1.271062271062271,
+ "grad_norm": 44.60401916503906,
+ "learning_rate": 5.821733821733822e-05,
+ "loss": 0.6471,
+ "step": 347
+ },
+ {
+ "epoch": 1.2747252747252746,
+ "grad_norm": 2.6468827724456787,
+ "learning_rate": 5.819291819291819e-05,
+ "loss": 0.0288,
+ "step": 348
+ },
+ {
+ "epoch": 1.2783882783882783,
+ "grad_norm": 21.465364456176758,
+ "learning_rate": 5.816849816849817e-05,
+ "loss": 0.5259,
+ "step": 349
+ },
+ {
+ "epoch": 1.282051282051282,
+ "grad_norm": 51.20866012573242,
+ "learning_rate": 5.814407814407815e-05,
+ "loss": 0.8054,
+ "step": 350
+ },
+ {
+ "epoch": 1.2857142857142856,
+ "grad_norm": 33.52774429321289,
+ "learning_rate": 5.811965811965812e-05,
+ "loss": 0.494,
+ "step": 351
+ },
+ {
+ "epoch": 1.2893772893772895,
+ "grad_norm": 39.15644836425781,
+ "learning_rate": 5.80952380952381e-05,
+ "loss": 1.6315,
+ "step": 352
+ },
+ {
+ "epoch": 1.293040293040293,
+ "grad_norm": 24.35202407836914,
+ "learning_rate": 5.8070818070818076e-05,
+ "loss": 0.6189,
+ "step": 353
+ },
+ {
+ "epoch": 1.2967032967032968,
+ "grad_norm": 39.99496841430664,
+ "learning_rate": 5.804639804639805e-05,
+ "loss": 1.2323,
+ "step": 354
+ },
+ {
+ "epoch": 1.3003663003663004,
+ "grad_norm": 26.282432556152344,
+ "learning_rate": 5.8021978021978026e-05,
+ "loss": 0.5383,
+ "step": 355
+ },
+ {
+ "epoch": 1.304029304029304,
+ "grad_norm": 36.909969329833984,
+ "learning_rate": 5.7997557997558004e-05,
+ "loss": 1.6886,
+ "step": 356
+ },
+ {
+ "epoch": 1.3076923076923077,
+ "grad_norm": 18.90056037902832,
+ "learning_rate": 5.7973137973137976e-05,
+ "loss": 0.7226,
+ "step": 357
+ },
+ {
+ "epoch": 1.3113553113553114,
+ "grad_norm": 21.10304832458496,
+ "learning_rate": 5.794871794871795e-05,
+ "loss": 0.8914,
+ "step": 358
+ },
+ {
+ "epoch": 1.315018315018315,
+ "grad_norm": 18.380769729614258,
+ "learning_rate": 5.7924297924297926e-05,
+ "loss": 1.4304,
+ "step": 359
+ },
+ {
+ "epoch": 1.3186813186813187,
+ "grad_norm": 17.992050170898438,
+ "learning_rate": 5.78998778998779e-05,
+ "loss": 1.0023,
+ "step": 360
+ },
+ {
+ "epoch": 1.3223443223443223,
+ "grad_norm": 17.944400787353516,
+ "learning_rate": 5.7875457875457876e-05,
+ "loss": 0.7734,
+ "step": 361
+ },
+ {
+ "epoch": 1.326007326007326,
+ "grad_norm": 19.117143630981445,
+ "learning_rate": 5.7851037851037855e-05,
+ "loss": 0.6923,
+ "step": 362
+ },
+ {
+ "epoch": 1.3296703296703296,
+ "grad_norm": 21.4644718170166,
+ "learning_rate": 5.7826617826617826e-05,
+ "loss": 0.666,
+ "step": 363
+ },
+ {
+ "epoch": 1.3333333333333333,
+ "grad_norm": 25.951030731201172,
+ "learning_rate": 5.7802197802197805e-05,
+ "loss": 1.522,
+ "step": 364
+ },
+ {
+ "epoch": 1.3369963369963371,
+ "grad_norm": 32.20412063598633,
+ "learning_rate": 5.7777777777777776e-05,
+ "loss": 1.5771,
+ "step": 365
+ },
+ {
+ "epoch": 1.3406593406593408,
+ "grad_norm": 26.847576141357422,
+ "learning_rate": 5.7753357753357755e-05,
+ "loss": 1.3427,
+ "step": 366
+ },
+ {
+ "epoch": 1.3443223443223444,
+ "grad_norm": 18.596710205078125,
+ "learning_rate": 5.772893772893773e-05,
+ "loss": 0.5533,
+ "step": 367
+ },
+ {
+ "epoch": 1.347985347985348,
+ "grad_norm": 23.6543025970459,
+ "learning_rate": 5.7704517704517705e-05,
+ "loss": 0.581,
+ "step": 368
+ },
+ {
+ "epoch": 1.3516483516483517,
+ "grad_norm": 13.732353210449219,
+ "learning_rate": 5.7680097680097684e-05,
+ "loss": 0.1908,
+ "step": 369
+ },
+ {
+ "epoch": 1.3553113553113554,
+ "grad_norm": 21.231159210205078,
+ "learning_rate": 5.765567765567766e-05,
+ "loss": 0.5858,
+ "step": 370
+ },
+ {
+ "epoch": 1.358974358974359,
+ "grad_norm": 18.647363662719727,
+ "learning_rate": 5.763125763125763e-05,
+ "loss": 0.6205,
+ "step": 371
+ },
+ {
+ "epoch": 1.3626373626373627,
+ "grad_norm": 20.302942276000977,
+ "learning_rate": 5.7606837606837605e-05,
+ "loss": 0.3637,
+ "step": 372
+ },
+ {
+ "epoch": 1.3663003663003663,
+ "grad_norm": 18.72137451171875,
+ "learning_rate": 5.7582417582417584e-05,
+ "loss": 0.2262,
+ "step": 373
+ },
+ {
+ "epoch": 1.36996336996337,
+ "grad_norm": 32.225738525390625,
+ "learning_rate": 5.7557997557997555e-05,
+ "loss": 0.5696,
+ "step": 374
+ },
+ {
+ "epoch": 1.3736263736263736,
+ "grad_norm": 21.453779220581055,
+ "learning_rate": 5.7533577533577534e-05,
+ "loss": 0.3533,
+ "step": 375
+ },
+ {
+ "epoch": 1.3772893772893773,
+ "grad_norm": 26.601511001586914,
+ "learning_rate": 5.750915750915751e-05,
+ "loss": 0.438,
+ "step": 376
+ },
+ {
+ "epoch": 1.380952380952381,
+ "grad_norm": 49.10448455810547,
+ "learning_rate": 5.7484737484737484e-05,
+ "loss": 0.6742,
+ "step": 377
+ },
+ {
+ "epoch": 1.3846153846153846,
+ "grad_norm": 51.251136779785156,
+ "learning_rate": 5.746031746031746e-05,
+ "loss": 0.7096,
+ "step": 378
+ },
+ {
+ "epoch": 1.3882783882783882,
+ "grad_norm": 35.14614486694336,
+ "learning_rate": 5.743589743589744e-05,
+ "loss": 1.5348,
+ "step": 379
+ },
+ {
+ "epoch": 1.3919413919413919,
+ "grad_norm": 58.83134078979492,
+ "learning_rate": 5.741147741147741e-05,
+ "loss": 1.303,
+ "step": 380
+ },
+ {
+ "epoch": 1.3956043956043955,
+ "grad_norm": 34.27029800415039,
+ "learning_rate": 5.738705738705739e-05,
+ "loss": 0.3682,
+ "step": 381
+ },
+ {
+ "epoch": 1.3992673992673992,
+ "grad_norm": 59.508628845214844,
+ "learning_rate": 5.736263736263737e-05,
+ "loss": 0.6489,
+ "step": 382
+ },
+ {
+ "epoch": 1.4029304029304028,
+ "grad_norm": 24.804059982299805,
+ "learning_rate": 5.733821733821734e-05,
+ "loss": 0.325,
+ "step": 383
+ },
+ {
+ "epoch": 1.4065934065934065,
+ "grad_norm": 20.69612693786621,
+ "learning_rate": 5.731379731379731e-05,
+ "loss": 0.1529,
+ "step": 384
+ },
+ {
+ "epoch": 1.4102564102564101,
+ "grad_norm": 29.134044647216797,
+ "learning_rate": 5.728937728937729e-05,
+ "loss": 0.8694,
+ "step": 385
+ },
+ {
+ "epoch": 1.4139194139194138,
+ "grad_norm": 37.44430923461914,
+ "learning_rate": 5.726495726495726e-05,
+ "loss": 0.9174,
+ "step": 386
+ },
+ {
+ "epoch": 1.4175824175824177,
+ "grad_norm": 36.84721755981445,
+ "learning_rate": 5.724053724053724e-05,
+ "loss": 0.3522,
+ "step": 387
+ },
+ {
+ "epoch": 1.4212454212454213,
+ "grad_norm": 44.15989685058594,
+ "learning_rate": 5.721611721611722e-05,
+ "loss": 1.4677,
+ "step": 388
+ },
+ {
+ "epoch": 1.424908424908425,
+ "grad_norm": 16.73012351989746,
+ "learning_rate": 5.719169719169719e-05,
+ "loss": 0.1621,
+ "step": 389
+ },
+ {
+ "epoch": 1.4285714285714286,
+ "grad_norm": 35.41815185546875,
+ "learning_rate": 5.716727716727717e-05,
+ "loss": 0.6702,
+ "step": 390
+ },
+ {
+ "epoch": 1.4322344322344323,
+ "grad_norm": 19.04936408996582,
+ "learning_rate": 5.714285714285714e-05,
+ "loss": 0.1845,
+ "step": 391
+ },
+ {
+ "epoch": 1.435897435897436,
+ "grad_norm": 22.89434242248535,
+ "learning_rate": 5.711843711843712e-05,
+ "loss": 0.5694,
+ "step": 392
+ },
+ {
+ "epoch": 1.4395604395604396,
+ "grad_norm": 22.125951766967773,
+ "learning_rate": 5.70940170940171e-05,
+ "loss": 0.821,
+ "step": 393
+ },
+ {
+ "epoch": 1.4432234432234432,
+ "grad_norm": 37.83376693725586,
+ "learning_rate": 5.706959706959707e-05,
+ "loss": 0.4658,
+ "step": 394
+ },
+ {
+ "epoch": 1.4468864468864469,
+ "grad_norm": 38.37764358520508,
+ "learning_rate": 5.704517704517705e-05,
+ "loss": 0.4146,
+ "step": 395
+ },
+ {
+ "epoch": 1.4505494505494505,
+ "grad_norm": 21.50092315673828,
+ "learning_rate": 5.702075702075703e-05,
+ "loss": 0.5044,
+ "step": 396
+ },
+ {
+ "epoch": 1.4542124542124542,
+ "grad_norm": 20.02173614501953,
+ "learning_rate": 5.699633699633699e-05,
+ "loss": 0.4955,
+ "step": 397
+ },
+ {
+ "epoch": 1.4578754578754578,
+ "grad_norm": 21.474336624145508,
+ "learning_rate": 5.697191697191697e-05,
+ "loss": 0.3818,
+ "step": 398
+ },
+ {
+ "epoch": 1.4615384615384617,
+ "grad_norm": 22.903839111328125,
+ "learning_rate": 5.694749694749695e-05,
+ "loss": 0.7603,
+ "step": 399
+ },
+ {
+ "epoch": 1.4652014652014653,
+ "grad_norm": 20.22893524169922,
+ "learning_rate": 5.692307692307692e-05,
+ "loss": 0.5612,
+ "step": 400
+ },
+ {
+ "epoch": 1.468864468864469,
+ "grad_norm": 32.34550857543945,
+ "learning_rate": 5.68986568986569e-05,
+ "loss": 0.4659,
+ "step": 401
+ },
+ {
+ "epoch": 1.4725274725274726,
+ "grad_norm": 49.979034423828125,
+ "learning_rate": 5.687423687423688e-05,
+ "loss": 0.6784,
+ "step": 402
+ },
+ {
+ "epoch": 1.4761904761904763,
+ "grad_norm": 79.79581451416016,
+ "learning_rate": 5.684981684981685e-05,
+ "loss": 0.9404,
+ "step": 403
+ },
+ {
+ "epoch": 1.47985347985348,
+ "grad_norm": 17.678560256958008,
+ "learning_rate": 5.682539682539683e-05,
+ "loss": 0.1675,
+ "step": 404
+ },
+ {
+ "epoch": 1.4835164835164836,
+ "grad_norm": 21.246519088745117,
+ "learning_rate": 5.6800976800976806e-05,
+ "loss": 0.2428,
+ "step": 405
+ },
+ {
+ "epoch": 1.4871794871794872,
+ "grad_norm": 34.815452575683594,
+ "learning_rate": 5.677655677655678e-05,
+ "loss": 0.3925,
+ "step": 406
+ },
+ {
+ "epoch": 1.4908424908424909,
+ "grad_norm": 73.8591079711914,
+ "learning_rate": 5.6752136752136756e-05,
+ "loss": 1.3163,
+ "step": 407
+ },
+ {
+ "epoch": 1.4945054945054945,
+ "grad_norm": 66.63922882080078,
+ "learning_rate": 5.6727716727716735e-05,
+ "loss": 0.9653,
+ "step": 408
+ },
+ {
+ "epoch": 1.4981684981684982,
+ "grad_norm": 52.39488220214844,
+ "learning_rate": 5.6703296703296706e-05,
+ "loss": 0.9322,
+ "step": 409
+ },
+ {
+ "epoch": 1.5018315018315018,
+ "grad_norm": 13.078998565673828,
+ "learning_rate": 5.667887667887668e-05,
+ "loss": 0.1168,
+ "step": 410
+ },
+ {
+ "epoch": 1.5054945054945055,
+ "grad_norm": 41.32448959350586,
+ "learning_rate": 5.6654456654456657e-05,
+ "loss": 0.9296,
+ "step": 411
+ },
+ {
+ "epoch": 1.5091575091575091,
+ "grad_norm": 26.448543548583984,
+ "learning_rate": 5.663003663003663e-05,
+ "loss": 0.5474,
+ "step": 412
+ },
+ {
+ "epoch": 1.5128205128205128,
+ "grad_norm": 29.58432960510254,
+ "learning_rate": 5.660561660561661e-05,
+ "loss": 0.6573,
+ "step": 413
+ },
+ {
+ "epoch": 1.5164835164835164,
+ "grad_norm": 28.568214416503906,
+ "learning_rate": 5.6581196581196585e-05,
+ "loss": 0.9223,
+ "step": 414
+ },
+ {
+ "epoch": 1.52014652014652,
+ "grad_norm": 31.92661476135254,
+ "learning_rate": 5.655677655677656e-05,
+ "loss": 1.0601,
+ "step": 415
+ },
+ {
+ "epoch": 1.5238095238095237,
+ "grad_norm": 31.934263229370117,
+ "learning_rate": 5.6532356532356535e-05,
+ "loss": 0.6288,
+ "step": 416
+ },
+ {
+ "epoch": 1.5274725274725274,
+ "grad_norm": 21.51350975036621,
+ "learning_rate": 5.650793650793651e-05,
+ "loss": 0.7378,
+ "step": 417
+ },
+ {
+ "epoch": 1.531135531135531,
+ "grad_norm": 19.010095596313477,
+ "learning_rate": 5.6483516483516485e-05,
+ "loss": 0.7792,
+ "step": 418
+ },
+ {
+ "epoch": 1.5347985347985347,
+ "grad_norm": 21.7001895904541,
+ "learning_rate": 5.6459096459096464e-05,
+ "loss": 0.7885,
+ "step": 419
+ },
+ {
+ "epoch": 1.5384615384615383,
+ "grad_norm": 21.400882720947266,
+ "learning_rate": 5.6434676434676436e-05,
+ "loss": 0.942,
+ "step": 420
+ },
+ {
+ "epoch": 1.542124542124542,
+ "grad_norm": 30.14664649963379,
+ "learning_rate": 5.6410256410256414e-05,
+ "loss": 0.7675,
+ "step": 421
+ },
+ {
+ "epoch": 1.5457875457875456,
+ "grad_norm": 33.25088882446289,
+ "learning_rate": 5.6385836385836386e-05,
+ "loss": 1.1349,
+ "step": 422
+ },
+ {
+ "epoch": 1.5494505494505495,
+ "grad_norm": 22.923208236694336,
+ "learning_rate": 5.636141636141636e-05,
+ "loss": 0.7145,
+ "step": 423
+ },
+ {
+ "epoch": 1.5531135531135531,
+ "grad_norm": 20.00519371032715,
+ "learning_rate": 5.6336996336996336e-05,
+ "loss": 0.5107,
+ "step": 424
+ },
+ {
+ "epoch": 1.5567765567765568,
+ "grad_norm": 21.95383071899414,
+ "learning_rate": 5.6312576312576314e-05,
+ "loss": 0.7836,
+ "step": 425
+ },
+ {
+ "epoch": 1.5604395604395604,
+ "grad_norm": 27.24031639099121,
+ "learning_rate": 5.6288156288156286e-05,
+ "loss": 0.4955,
+ "step": 426
+ },
+ {
+ "epoch": 1.564102564102564,
+ "grad_norm": 45.48428726196289,
+ "learning_rate": 5.6263736263736264e-05,
+ "loss": 1.016,
+ "step": 427
+ },
+ {
+ "epoch": 1.5677655677655677,
+ "grad_norm": 20.055965423583984,
+ "learning_rate": 5.623931623931624e-05,
+ "loss": 0.325,
+ "step": 428
+ },
+ {
+ "epoch": 1.5714285714285714,
+ "grad_norm": 22.020767211914062,
+ "learning_rate": 5.6214896214896215e-05,
+ "loss": 0.45,
+ "step": 429
+ },
+ {
+ "epoch": 1.575091575091575,
+ "grad_norm": 32.608741760253906,
+ "learning_rate": 5.619047619047619e-05,
+ "loss": 0.6561,
+ "step": 430
+ },
+ {
+ "epoch": 1.578754578754579,
+ "grad_norm": 38.14396667480469,
+ "learning_rate": 5.616605616605617e-05,
+ "loss": 0.6387,
+ "step": 431
+ },
+ {
+ "epoch": 1.5824175824175826,
+ "grad_norm": 26.266948699951172,
+ "learning_rate": 5.614163614163614e-05,
+ "loss": 0.5593,
+ "step": 432
+ },
+ {
+ "epoch": 1.5860805860805862,
+ "grad_norm": 16.37360954284668,
+ "learning_rate": 5.611721611721612e-05,
+ "loss": 0.1591,
+ "step": 433
+ },
+ {
+ "epoch": 1.5897435897435899,
+ "grad_norm": 21.9448299407959,
+ "learning_rate": 5.60927960927961e-05,
+ "loss": 0.2129,
+ "step": 434
+ },
+ {
+ "epoch": 1.5934065934065935,
+ "grad_norm": 30.096052169799805,
+ "learning_rate": 5.6068376068376065e-05,
+ "loss": 0.3384,
+ "step": 435
+ },
+ {
+ "epoch": 1.5970695970695972,
+ "grad_norm": 40.15864181518555,
+ "learning_rate": 5.604395604395604e-05,
+ "loss": 0.5181,
+ "step": 436
+ },
+ {
+ "epoch": 1.6007326007326008,
+ "grad_norm": 63.40933609008789,
+ "learning_rate": 5.601953601953602e-05,
+ "loss": 0.8834,
+ "step": 437
+ },
+ {
+ "epoch": 1.6043956043956045,
+ "grad_norm": 40.0787353515625,
+ "learning_rate": 5.5995115995115993e-05,
+ "loss": 0.437,
+ "step": 438
+ },
+ {
+ "epoch": 1.6080586080586081,
+ "grad_norm": 40.136863708496094,
+ "learning_rate": 5.597069597069597e-05,
+ "loss": 0.4834,
+ "step": 439
+ },
+ {
+ "epoch": 1.6117216117216118,
+ "grad_norm": 27.898317337036133,
+ "learning_rate": 5.594627594627595e-05,
+ "loss": 0.4862,
+ "step": 440
+ },
+ {
+ "epoch": 1.6153846153846154,
+ "grad_norm": 31.5762882232666,
+ "learning_rate": 5.592185592185592e-05,
+ "loss": 0.1878,
+ "step": 441
+ },
+ {
+ "epoch": 1.619047619047619,
+ "grad_norm": 88.90093994140625,
+ "learning_rate": 5.58974358974359e-05,
+ "loss": 1.3343,
+ "step": 442
+ },
+ {
+ "epoch": 1.6227106227106227,
+ "grad_norm": 57.7340202331543,
+ "learning_rate": 5.587301587301587e-05,
+ "loss": 0.3032,
+ "step": 443
+ },
+ {
+ "epoch": 1.6263736263736264,
+ "grad_norm": 57.28425979614258,
+ "learning_rate": 5.584859584859585e-05,
+ "loss": 1.3972,
+ "step": 444
+ },
+ {
+ "epoch": 1.63003663003663,
+ "grad_norm": 39.866302490234375,
+ "learning_rate": 5.582417582417583e-05,
+ "loss": 0.4026,
+ "step": 445
+ },
+ {
+ "epoch": 1.6336996336996337,
+ "grad_norm": 41.72932815551758,
+ "learning_rate": 5.57997557997558e-05,
+ "loss": 0.5407,
+ "step": 446
+ },
+ {
+ "epoch": 1.6373626373626373,
+ "grad_norm": 60.77634811401367,
+ "learning_rate": 5.577533577533578e-05,
+ "loss": 0.8581,
+ "step": 447
+ },
+ {
+ "epoch": 1.641025641025641,
+ "grad_norm": 28.382030487060547,
+ "learning_rate": 5.575091575091575e-05,
+ "loss": 0.3759,
+ "step": 448
+ },
+ {
+ "epoch": 1.6446886446886446,
+ "grad_norm": 62.1085205078125,
+ "learning_rate": 5.572649572649572e-05,
+ "loss": 1.0749,
+ "step": 449
+ },
+ {
+ "epoch": 1.6483516483516483,
+ "grad_norm": 41.8302001953125,
+ "learning_rate": 5.57020757020757e-05,
+ "loss": 0.5884,
+ "step": 450
+ },
+ {
+ "epoch": 1.652014652014652,
+ "grad_norm": 24.128931045532227,
+ "learning_rate": 5.567765567765568e-05,
+ "loss": 0.6113,
+ "step": 451
+ },
+ {
+ "epoch": 1.6556776556776556,
+ "grad_norm": 19.634384155273438,
+ "learning_rate": 5.565323565323565e-05,
+ "loss": 0.3902,
+ "step": 452
+ },
+ {
+ "epoch": 1.6593406593406592,
+ "grad_norm": 18.17875099182129,
+ "learning_rate": 5.562881562881563e-05,
+ "loss": 0.3137,
+ "step": 453
+ },
+ {
+ "epoch": 1.6630036630036629,
+ "grad_norm": 39.68446731567383,
+ "learning_rate": 5.560439560439561e-05,
+ "loss": 0.7587,
+ "step": 454
+ },
+ {
+ "epoch": 1.6666666666666665,
+ "grad_norm": 29.387836456298828,
+ "learning_rate": 5.557997557997558e-05,
+ "loss": 0.6397,
+ "step": 455
+ },
+ {
+ "epoch": 1.6703296703296702,
+ "grad_norm": 19.08424949645996,
+ "learning_rate": 5.555555555555556e-05,
+ "loss": 0.2484,
+ "step": 456
+ },
+ {
+ "epoch": 1.673992673992674,
+ "grad_norm": 36.07701873779297,
+ "learning_rate": 5.553113553113554e-05,
+ "loss": 0.8587,
+ "step": 457
+ },
+ {
+ "epoch": 1.6776556776556777,
+ "grad_norm": 52.062339782714844,
+ "learning_rate": 5.550671550671551e-05,
+ "loss": 1.6675,
+ "step": 458
+ },
+ {
+ "epoch": 1.6813186813186813,
+ "grad_norm": 45.415687561035156,
+ "learning_rate": 5.548229548229549e-05,
+ "loss": 1.653,
+ "step": 459
+ },
+ {
+ "epoch": 1.684981684981685,
+ "grad_norm": 31.457420349121094,
+ "learning_rate": 5.5457875457875465e-05,
+ "loss": 0.4578,
+ "step": 460
+ },
+ {
+ "epoch": 1.6886446886446886,
+ "grad_norm": 33.14665603637695,
+ "learning_rate": 5.543345543345543e-05,
+ "loss": 1.3327,
+ "step": 461
+ },
+ {
+ "epoch": 1.6923076923076923,
+ "grad_norm": 25.720529556274414,
+ "learning_rate": 5.540903540903541e-05,
+ "loss": 0.5,
+ "step": 462
+ },
+ {
+ "epoch": 1.695970695970696,
+ "grad_norm": 23.71514129638672,
+ "learning_rate": 5.538461538461539e-05,
+ "loss": 0.434,
+ "step": 463
+ },
+ {
+ "epoch": 1.6996336996336996,
+ "grad_norm": 45.231746673583984,
+ "learning_rate": 5.536019536019536e-05,
+ "loss": 0.9448,
+ "step": 464
+ },
+ {
+ "epoch": 1.7032967032967035,
+ "grad_norm": 17.44647789001465,
+ "learning_rate": 5.533577533577534e-05,
+ "loss": 0.3183,
+ "step": 465
+ },
+ {
+ "epoch": 1.7069597069597071,
+ "grad_norm": 18.627901077270508,
+ "learning_rate": 5.531135531135531e-05,
+ "loss": 0.4137,
+ "step": 466
+ },
+ {
+ "epoch": 1.7106227106227108,
+ "grad_norm": 45.57220458984375,
+ "learning_rate": 5.528693528693529e-05,
+ "loss": 1.0096,
+ "step": 467
+ },
+ {
+ "epoch": 1.7142857142857144,
+ "grad_norm": 27.329822540283203,
+ "learning_rate": 5.5262515262515266e-05,
+ "loss": 0.5416,
+ "step": 468
+ },
+ {
+ "epoch": 1.717948717948718,
+ "grad_norm": 46.70027160644531,
+ "learning_rate": 5.523809523809524e-05,
+ "loss": 0.983,
+ "step": 469
+ },
+ {
+ "epoch": 1.7216117216117217,
+ "grad_norm": 32.47868728637695,
+ "learning_rate": 5.5213675213675216e-05,
+ "loss": 1.5687,
+ "step": 470
+ },
+ {
+ "epoch": 1.7252747252747254,
+ "grad_norm": 16.49342155456543,
+ "learning_rate": 5.5189255189255194e-05,
+ "loss": 0.3101,
+ "step": 471
+ },
+ {
+ "epoch": 1.728937728937729,
+ "grad_norm": 26.58381462097168,
+ "learning_rate": 5.5164835164835166e-05,
+ "loss": 0.7027,
+ "step": 472
+ },
+ {
+ "epoch": 1.7326007326007327,
+ "grad_norm": 17.435213088989258,
+ "learning_rate": 5.5140415140415144e-05,
+ "loss": 0.3958,
+ "step": 473
+ },
+ {
+ "epoch": 1.7362637362637363,
+ "grad_norm": 19.37874412536621,
+ "learning_rate": 5.5115995115995116e-05,
+ "loss": 0.3979,
+ "step": 474
+ },
+ {
+ "epoch": 1.73992673992674,
+ "grad_norm": 16.509248733520508,
+ "learning_rate": 5.509157509157509e-05,
+ "loss": 0.5121,
+ "step": 475
+ },
+ {
+ "epoch": 1.7435897435897436,
+ "grad_norm": 9.653852462768555,
+ "learning_rate": 5.5067155067155066e-05,
+ "loss": 0.1386,
+ "step": 476
+ },
+ {
+ "epoch": 1.7472527472527473,
+ "grad_norm": 26.486963272094727,
+ "learning_rate": 5.5042735042735045e-05,
+ "loss": 1.0307,
+ "step": 477
+ },
+ {
+ "epoch": 1.750915750915751,
+ "grad_norm": 17.766828536987305,
+ "learning_rate": 5.5018315018315016e-05,
+ "loss": 0.278,
+ "step": 478
+ },
+ {
+ "epoch": 1.7545787545787546,
+ "grad_norm": 12.930633544921875,
+ "learning_rate": 5.4993894993894995e-05,
+ "loss": 0.1487,
+ "step": 479
+ },
+ {
+ "epoch": 1.7582417582417582,
+ "grad_norm": 44.64267349243164,
+ "learning_rate": 5.496947496947497e-05,
+ "loss": 0.7036,
+ "step": 480
+ },
+ {
+ "epoch": 1.7619047619047619,
+ "grad_norm": 17.474651336669922,
+ "learning_rate": 5.4945054945054945e-05,
+ "loss": 0.1666,
+ "step": 481
+ },
+ {
+ "epoch": 1.7655677655677655,
+ "grad_norm": 48.3519401550293,
+ "learning_rate": 5.4920634920634923e-05,
+ "loss": 0.6157,
+ "step": 482
+ },
+ {
+ "epoch": 1.7692307692307692,
+ "grad_norm": 18.429521560668945,
+ "learning_rate": 5.48962148962149e-05,
+ "loss": 0.2588,
+ "step": 483
+ },
+ {
+ "epoch": 1.7728937728937728,
+ "grad_norm": 66.73760986328125,
+ "learning_rate": 5.4871794871794874e-05,
+ "loss": 0.654,
+ "step": 484
+ },
+ {
+ "epoch": 1.7765567765567765,
+ "grad_norm": 53.831539154052734,
+ "learning_rate": 5.484737484737485e-05,
+ "loss": 0.7538,
+ "step": 485
+ },
+ {
+ "epoch": 1.7802197802197801,
+ "grad_norm": 52.023895263671875,
+ "learning_rate": 5.482295482295483e-05,
+ "loss": 1.6623,
+ "step": 486
+ },
+ {
+ "epoch": 1.7838827838827838,
+ "grad_norm": 38.4475212097168,
+ "learning_rate": 5.4798534798534795e-05,
+ "loss": 0.5079,
+ "step": 487
+ },
+ {
+ "epoch": 1.7875457875457874,
+ "grad_norm": 25.642650604248047,
+ "learning_rate": 5.4774114774114774e-05,
+ "loss": 0.3825,
+ "step": 488
+ },
+ {
+ "epoch": 1.791208791208791,
+ "grad_norm": 57.916900634765625,
+ "learning_rate": 5.474969474969475e-05,
+ "loss": 0.9583,
+ "step": 489
+ },
+ {
+ "epoch": 1.7948717948717947,
+ "grad_norm": 39.23340606689453,
+ "learning_rate": 5.4725274725274724e-05,
+ "loss": 0.4724,
+ "step": 490
+ },
+ {
+ "epoch": 1.7985347985347986,
+ "grad_norm": 24.188661575317383,
+ "learning_rate": 5.47008547008547e-05,
+ "loss": 0.4471,
+ "step": 491
+ },
+ {
+ "epoch": 1.8021978021978022,
+ "grad_norm": 68.73822021484375,
+ "learning_rate": 5.4676434676434674e-05,
+ "loss": 0.6618,
+ "step": 492
+ },
+ {
+ "epoch": 1.8058608058608059,
+ "grad_norm": 26.382184982299805,
+ "learning_rate": 5.465201465201465e-05,
+ "loss": 0.5835,
+ "step": 493
+ },
+ {
+ "epoch": 1.8095238095238095,
+ "grad_norm": 31.758886337280273,
+ "learning_rate": 5.462759462759463e-05,
+ "loss": 0.622,
+ "step": 494
+ },
+ {
+ "epoch": 1.8131868131868132,
+ "grad_norm": 26.657405853271484,
+ "learning_rate": 5.46031746031746e-05,
+ "loss": 0.6003,
+ "step": 495
+ },
+ {
+ "epoch": 1.8168498168498168,
+ "grad_norm": 31.248491287231445,
+ "learning_rate": 5.457875457875458e-05,
+ "loss": 0.4929,
+ "step": 496
+ },
+ {
+ "epoch": 1.8205128205128205,
+ "grad_norm": 53.82766342163086,
+ "learning_rate": 5.455433455433456e-05,
+ "loss": 2.0716,
+ "step": 497
+ },
+ {
+ "epoch": 1.8241758241758241,
+ "grad_norm": 46.39777374267578,
+ "learning_rate": 5.452991452991453e-05,
+ "loss": 1.6767,
+ "step": 498
+ },
+ {
+ "epoch": 1.8278388278388278,
+ "grad_norm": 39.58620071411133,
+ "learning_rate": 5.450549450549451e-05,
+ "loss": 0.8274,
+ "step": 499
+ },
+ {
+ "epoch": 1.8315018315018317,
+ "grad_norm": 29.395286560058594,
+ "learning_rate": 5.448107448107448e-05,
+ "loss": 1.1441,
+ "step": 500
+ },
+ {
+ "epoch": 1.8351648351648353,
+ "grad_norm": 26.250751495361328,
+ "learning_rate": 5.445665445665445e-05,
+ "loss": 0.7496,
+ "step": 501
+ },
+ {
+ "epoch": 1.838827838827839,
+ "grad_norm": 19.820999145507812,
+ "learning_rate": 5.443223443223443e-05,
+ "loss": 0.4367,
+ "step": 502
+ },
+ {
+ "epoch": 1.8424908424908426,
+ "grad_norm": 25.09316062927246,
+ "learning_rate": 5.440781440781441e-05,
+ "loss": 0.8584,
+ "step": 503
+ },
+ {
+ "epoch": 1.8461538461538463,
+ "grad_norm": 17.808509826660156,
+ "learning_rate": 5.438339438339438e-05,
+ "loss": 0.3869,
+ "step": 504
+ },
+ {
+ "epoch": 1.84981684981685,
+ "grad_norm": 28.342119216918945,
+ "learning_rate": 5.435897435897436e-05,
+ "loss": 0.8881,
+ "step": 505
+ },
+ {
+ "epoch": 1.8534798534798536,
+ "grad_norm": 33.80287551879883,
+ "learning_rate": 5.433455433455434e-05,
+ "loss": 1.2911,
+ "step": 506
+ },
+ {
+ "epoch": 1.8571428571428572,
+ "grad_norm": 55.428138732910156,
+ "learning_rate": 5.431013431013431e-05,
+ "loss": 0.8934,
+ "step": 507
+ },
+ {
+ "epoch": 1.8608058608058609,
+ "grad_norm": 27.962610244750977,
+ "learning_rate": 5.428571428571429e-05,
+ "loss": 0.662,
+ "step": 508
+ },
+ {
+ "epoch": 1.8644688644688645,
+ "grad_norm": 62.84252166748047,
+ "learning_rate": 5.426129426129427e-05,
+ "loss": 1.9216,
+ "step": 509
+ },
+ {
+ "epoch": 1.8681318681318682,
+ "grad_norm": 24.26439666748047,
+ "learning_rate": 5.423687423687424e-05,
+ "loss": 0.2164,
+ "step": 510
+ },
+ {
+ "epoch": 1.8717948717948718,
+ "grad_norm": 50.95674133300781,
+ "learning_rate": 5.421245421245422e-05,
+ "loss": 0.7023,
+ "step": 511
+ },
+ {
+ "epoch": 1.8754578754578755,
+ "grad_norm": 41.17847442626953,
+ "learning_rate": 5.418803418803419e-05,
+ "loss": 1.1081,
+ "step": 512
+ },
+ {
+ "epoch": 1.879120879120879,
+ "grad_norm": 28.701988220214844,
+ "learning_rate": 5.416361416361416e-05,
+ "loss": 0.6519,
+ "step": 513
+ },
+ {
+ "epoch": 1.8827838827838828,
+ "grad_norm": 48.42552947998047,
+ "learning_rate": 5.413919413919414e-05,
+ "loss": 1.5215,
+ "step": 514
+ },
+ {
+ "epoch": 1.8864468864468864,
+ "grad_norm": 19.71268653869629,
+ "learning_rate": 5.411477411477412e-05,
+ "loss": 0.4731,
+ "step": 515
+ },
+ {
+ "epoch": 1.89010989010989,
+ "grad_norm": 68.88224792480469,
+ "learning_rate": 5.409035409035409e-05,
+ "loss": 3.0071,
+ "step": 516
+ },
+ {
+ "epoch": 1.8937728937728937,
+ "grad_norm": 34.33188247680664,
+ "learning_rate": 5.406593406593407e-05,
+ "loss": 0.7014,
+ "step": 517
+ },
+ {
+ "epoch": 1.8974358974358974,
+ "grad_norm": 18.214942932128906,
+ "learning_rate": 5.404151404151404e-05,
+ "loss": 0.2362,
+ "step": 518
+ },
+ {
+ "epoch": 1.901098901098901,
+ "grad_norm": 31.553678512573242,
+ "learning_rate": 5.401709401709402e-05,
+ "loss": 0.5839,
+ "step": 519
+ },
+ {
+ "epoch": 1.9047619047619047,
+ "grad_norm": 15.681426048278809,
+ "learning_rate": 5.3992673992673996e-05,
+ "loss": 0.6039,
+ "step": 520
+ },
+ {
+ "epoch": 1.9084249084249083,
+ "grad_norm": 18.462688446044922,
+ "learning_rate": 5.396825396825397e-05,
+ "loss": 0.5773,
+ "step": 521
+ },
+ {
+ "epoch": 1.912087912087912,
+ "grad_norm": 10.23849105834961,
+ "learning_rate": 5.3943833943833946e-05,
+ "loss": 0.3801,
+ "step": 522
+ },
+ {
+ "epoch": 1.9157509157509156,
+ "grad_norm": 35.680973052978516,
+ "learning_rate": 5.3919413919413925e-05,
+ "loss": 1.2559,
+ "step": 523
+ },
+ {
+ "epoch": 1.9194139194139193,
+ "grad_norm": 23.97362518310547,
+ "learning_rate": 5.3894993894993897e-05,
+ "loss": 0.4112,
+ "step": 524
+ },
+ {
+ "epoch": 1.9230769230769231,
+ "grad_norm": 25.785356521606445,
+ "learning_rate": 5.387057387057387e-05,
+ "loss": 0.8993,
+ "step": 525
+ },
+ {
+ "epoch": 1.9267399267399268,
+ "grad_norm": 25.246868133544922,
+ "learning_rate": 5.384615384615385e-05,
+ "loss": 0.6534,
+ "step": 526
+ },
+ {
+ "epoch": 1.9304029304029304,
+ "grad_norm": 29.850788116455078,
+ "learning_rate": 5.382173382173382e-05,
+ "loss": 0.52,
+ "step": 527
+ },
+ {
+ "epoch": 1.934065934065934,
+ "grad_norm": 20.702608108520508,
+ "learning_rate": 5.37973137973138e-05,
+ "loss": 0.4093,
+ "step": 528
+ },
+ {
+ "epoch": 1.9377289377289377,
+ "grad_norm": 36.39994812011719,
+ "learning_rate": 5.3772893772893775e-05,
+ "loss": 1.275,
+ "step": 529
+ },
+ {
+ "epoch": 1.9413919413919414,
+ "grad_norm": 27.56822395324707,
+ "learning_rate": 5.374847374847375e-05,
+ "loss": 0.6773,
+ "step": 530
+ },
+ {
+ "epoch": 1.945054945054945,
+ "grad_norm": 26.07769012451172,
+ "learning_rate": 5.3724053724053725e-05,
+ "loss": 0.5373,
+ "step": 531
+ },
+ {
+ "epoch": 1.9487179487179487,
+ "grad_norm": 48.47615051269531,
+ "learning_rate": 5.3699633699633704e-05,
+ "loss": 1.1931,
+ "step": 532
+ },
+ {
+ "epoch": 1.9523809523809523,
+ "grad_norm": 24.416805267333984,
+ "learning_rate": 5.3675213675213675e-05,
+ "loss": 0.4523,
+ "step": 533
+ },
+ {
+ "epoch": 1.9560439560439562,
+ "grad_norm": 56.8088264465332,
+ "learning_rate": 5.3650793650793654e-05,
+ "loss": 1.8992,
+ "step": 534
+ },
+ {
+ "epoch": 1.9597069597069599,
+ "grad_norm": 36.805912017822266,
+ "learning_rate": 5.362637362637363e-05,
+ "loss": 1.0743,
+ "step": 535
+ },
+ {
+ "epoch": 1.9633699633699635,
+ "grad_norm": 17.375244140625,
+ "learning_rate": 5.3601953601953604e-05,
+ "loss": 0.3546,
+ "step": 536
+ },
+ {
+ "epoch": 1.9670329670329672,
+ "grad_norm": 35.297767639160156,
+ "learning_rate": 5.357753357753358e-05,
+ "loss": 1.4903,
+ "step": 537
+ },
+ {
+ "epoch": 1.9706959706959708,
+ "grad_norm": 38.64927673339844,
+ "learning_rate": 5.3553113553113554e-05,
+ "loss": 0.9346,
+ "step": 538
+ },
+ {
+ "epoch": 1.9743589743589745,
+ "grad_norm": 23.494552612304688,
+ "learning_rate": 5.3528693528693526e-05,
+ "loss": 0.3677,
+ "step": 539
+ },
+ {
+ "epoch": 1.978021978021978,
+ "grad_norm": 21.8272647857666,
+ "learning_rate": 5.3504273504273504e-05,
+ "loss": 0.591,
+ "step": 540
+ },
+ {
+ "epoch": 1.9816849816849818,
+ "grad_norm": 15.60590934753418,
+ "learning_rate": 5.347985347985348e-05,
+ "loss": 0.3129,
+ "step": 541
+ },
+ {
+ "epoch": 1.9853479853479854,
+ "grad_norm": 23.846555709838867,
+ "learning_rate": 5.3455433455433454e-05,
+ "loss": 0.6108,
+ "step": 542
+ },
+ {
+ "epoch": 1.989010989010989,
+ "grad_norm": 21.743024826049805,
+ "learning_rate": 5.343101343101343e-05,
+ "loss": 1.0541,
+ "step": 543
+ },
+ {
+ "epoch": 1.9926739926739927,
+ "grad_norm": 29.806121826171875,
+ "learning_rate": 5.3406593406593405e-05,
+ "loss": 0.6088,
+ "step": 544
+ },
+ {
+ "epoch": 1.9963369963369964,
+ "grad_norm": 26.778568267822266,
+ "learning_rate": 5.338217338217338e-05,
+ "loss": 0.5842,
+ "step": 545
+ },
+ {
+ "epoch": 2.0,
+ "grad_norm": 23.356237411499023,
+ "learning_rate": 5.335775335775336e-05,
+ "loss": 0.4591,
+ "step": 546
+ },
+ {
+ "epoch": 2.0036630036630036,
+ "grad_norm": 17.303443908691406,
+ "learning_rate": 5.333333333333333e-05,
+ "loss": 0.3432,
+ "step": 547
+ },
+ {
+ "epoch": 2.0073260073260073,
+ "grad_norm": 27.082172393798828,
+ "learning_rate": 5.330891330891331e-05,
+ "loss": 0.5156,
+ "step": 548
+ },
+ {
+ "epoch": 2.010989010989011,
+ "grad_norm": 26.520530700683594,
+ "learning_rate": 5.328449328449329e-05,
+ "loss": 0.3989,
+ "step": 549
+ },
+ {
+ "epoch": 2.0146520146520146,
+ "grad_norm": 23.737272262573242,
+ "learning_rate": 5.326007326007326e-05,
+ "loss": 0.5484,
+ "step": 550
+ },
+ {
+ "epoch": 2.0183150183150182,
+ "grad_norm": 24.222341537475586,
+ "learning_rate": 5.3235653235653233e-05,
+ "loss": 0.5365,
+ "step": 551
+ },
+ {
+ "epoch": 2.021978021978022,
+ "grad_norm": 29.081924438476562,
+ "learning_rate": 5.321123321123321e-05,
+ "loss": 0.6694,
+ "step": 552
+ },
+ {
+ "epoch": 2.0256410256410255,
+ "grad_norm": 32.419551849365234,
+ "learning_rate": 5.3186813186813184e-05,
+ "loss": 0.7003,
+ "step": 553
+ },
+ {
+ "epoch": 2.029304029304029,
+ "grad_norm": 42.403709411621094,
+ "learning_rate": 5.316239316239316e-05,
+ "loss": 1.5474,
+ "step": 554
+ },
+ {
+ "epoch": 2.032967032967033,
+ "grad_norm": 17.615140914916992,
+ "learning_rate": 5.313797313797314e-05,
+ "loss": 0.588,
+ "step": 555
+ },
+ {
+ "epoch": 2.0366300366300365,
+ "grad_norm": 14.864067077636719,
+ "learning_rate": 5.311355311355311e-05,
+ "loss": 0.1613,
+ "step": 556
+ },
+ {
+ "epoch": 2.04029304029304,
+ "grad_norm": 20.189815521240234,
+ "learning_rate": 5.308913308913309e-05,
+ "loss": 0.4281,
+ "step": 557
+ },
+ {
+ "epoch": 2.043956043956044,
+ "grad_norm": 28.350017547607422,
+ "learning_rate": 5.306471306471307e-05,
+ "loss": 0.6614,
+ "step": 558
+ },
+ {
+ "epoch": 2.0476190476190474,
+ "grad_norm": 19.987825393676758,
+ "learning_rate": 5.304029304029304e-05,
+ "loss": 0.6906,
+ "step": 559
+ },
+ {
+ "epoch": 2.051282051282051,
+ "grad_norm": 18.6667537689209,
+ "learning_rate": 5.301587301587302e-05,
+ "loss": 0.387,
+ "step": 560
+ },
+ {
+ "epoch": 2.0549450549450547,
+ "grad_norm": 20.930652618408203,
+ "learning_rate": 5.2991452991453e-05,
+ "loss": 0.7157,
+ "step": 561
+ },
+ {
+ "epoch": 2.0586080586080584,
+ "grad_norm": 22.05647087097168,
+ "learning_rate": 5.296703296703297e-05,
+ "loss": 0.3256,
+ "step": 562
+ },
+ {
+ "epoch": 2.062271062271062,
+ "grad_norm": 32.66161346435547,
+ "learning_rate": 5.294261294261295e-05,
+ "loss": 1.3013,
+ "step": 563
+ },
+ {
+ "epoch": 2.065934065934066,
+ "grad_norm": 37.43238067626953,
+ "learning_rate": 5.291819291819292e-05,
+ "loss": 0.186,
+ "step": 564
+ },
+ {
+ "epoch": 2.06959706959707,
+ "grad_norm": 32.39999008178711,
+ "learning_rate": 5.289377289377289e-05,
+ "loss": 0.8047,
+ "step": 565
+ },
+ {
+ "epoch": 2.0732600732600734,
+ "grad_norm": 29.727481842041016,
+ "learning_rate": 5.286935286935287e-05,
+ "loss": 0.662,
+ "step": 566
+ },
+ {
+ "epoch": 2.076923076923077,
+ "grad_norm": 16.536264419555664,
+ "learning_rate": 5.284493284493285e-05,
+ "loss": 0.4,
+ "step": 567
+ },
+ {
+ "epoch": 2.0805860805860807,
+ "grad_norm": 23.41500473022461,
+ "learning_rate": 5.282051282051282e-05,
+ "loss": 0.4945,
+ "step": 568
+ },
+ {
+ "epoch": 2.0842490842490844,
+ "grad_norm": 48.842864990234375,
+ "learning_rate": 5.27960927960928e-05,
+ "loss": 0.7584,
+ "step": 569
+ },
+ {
+ "epoch": 2.087912087912088,
+ "grad_norm": 60.06027603149414,
+ "learning_rate": 5.277167277167277e-05,
+ "loss": 0.7179,
+ "step": 570
+ },
+ {
+ "epoch": 2.0915750915750917,
+ "grad_norm": 59.2591552734375,
+ "learning_rate": 5.274725274725275e-05,
+ "loss": 0.4883,
+ "step": 571
+ },
+ {
+ "epoch": 2.0952380952380953,
+ "grad_norm": 14.527932167053223,
+ "learning_rate": 5.272283272283273e-05,
+ "loss": 0.2811,
+ "step": 572
+ },
+ {
+ "epoch": 2.098901098901099,
+ "grad_norm": 16.2915096282959,
+ "learning_rate": 5.26984126984127e-05,
+ "loss": 0.2524,
+ "step": 573
+ },
+ {
+ "epoch": 2.1025641025641026,
+ "grad_norm": 28.938081741333008,
+ "learning_rate": 5.267399267399268e-05,
+ "loss": 0.5138,
+ "step": 574
+ },
+ {
+ "epoch": 2.1062271062271063,
+ "grad_norm": 27.541440963745117,
+ "learning_rate": 5.2649572649572655e-05,
+ "loss": 0.278,
+ "step": 575
+ },
+ {
+ "epoch": 2.10989010989011,
+ "grad_norm": 23.179025650024414,
+ "learning_rate": 5.262515262515263e-05,
+ "loss": 0.1881,
+ "step": 576
+ },
+ {
+ "epoch": 2.1135531135531136,
+ "grad_norm": 42.55375671386719,
+ "learning_rate": 5.26007326007326e-05,
+ "loss": 0.7882,
+ "step": 577
+ },
+ {
+ "epoch": 2.1172161172161172,
+ "grad_norm": 8.902749061584473,
+ "learning_rate": 5.257631257631258e-05,
+ "loss": 0.0611,
+ "step": 578
+ },
+ {
+ "epoch": 2.120879120879121,
+ "grad_norm": 19.483346939086914,
+ "learning_rate": 5.255189255189255e-05,
+ "loss": 0.0978,
+ "step": 579
+ },
+ {
+ "epoch": 2.1245421245421245,
+ "grad_norm": 13.898221969604492,
+ "learning_rate": 5.252747252747253e-05,
+ "loss": 0.0797,
+ "step": 580
+ },
+ {
+ "epoch": 2.128205128205128,
+ "grad_norm": 53.42538833618164,
+ "learning_rate": 5.2503052503052506e-05,
+ "loss": 0.9066,
+ "step": 581
+ },
+ {
+ "epoch": 2.131868131868132,
+ "grad_norm": 38.467891693115234,
+ "learning_rate": 5.247863247863248e-05,
+ "loss": 0.3272,
+ "step": 582
+ },
+ {
+ "epoch": 2.1355311355311355,
+ "grad_norm": 26.421035766601562,
+ "learning_rate": 5.2454212454212456e-05,
+ "loss": 0.6537,
+ "step": 583
+ },
+ {
+ "epoch": 2.139194139194139,
+ "grad_norm": 32.80412292480469,
+ "learning_rate": 5.2429792429792434e-05,
+ "loss": 1.1225,
+ "step": 584
+ },
+ {
+ "epoch": 2.142857142857143,
+ "grad_norm": 26.87016487121582,
+ "learning_rate": 5.2405372405372406e-05,
+ "loss": 0.5749,
+ "step": 585
+ },
+ {
+ "epoch": 2.1465201465201464,
+ "grad_norm": 34.75699234008789,
+ "learning_rate": 5.2380952380952384e-05,
+ "loss": 0.6926,
+ "step": 586
+ },
+ {
+ "epoch": 2.15018315018315,
+ "grad_norm": 61.76310348510742,
+ "learning_rate": 5.235653235653236e-05,
+ "loss": 0.9029,
+ "step": 587
+ },
+ {
+ "epoch": 2.1538461538461537,
+ "grad_norm": 40.86505126953125,
+ "learning_rate": 5.2332112332112335e-05,
+ "loss": 0.5169,
+ "step": 588
+ },
+ {
+ "epoch": 2.1575091575091574,
+ "grad_norm": 16.05042839050293,
+ "learning_rate": 5.230769230769231e-05,
+ "loss": 0.5211,
+ "step": 589
+ },
+ {
+ "epoch": 2.161172161172161,
+ "grad_norm": 19.56302261352539,
+ "learning_rate": 5.2283272283272285e-05,
+ "loss": 0.5737,
+ "step": 590
+ },
+ {
+ "epoch": 2.1648351648351647,
+ "grad_norm": 22.311508178710938,
+ "learning_rate": 5.2258852258852256e-05,
+ "loss": 0.4223,
+ "step": 591
+ },
+ {
+ "epoch": 2.1684981684981683,
+ "grad_norm": 21.059213638305664,
+ "learning_rate": 5.2234432234432235e-05,
+ "loss": 0.2285,
+ "step": 592
+ },
+ {
+ "epoch": 2.172161172161172,
+ "grad_norm": 28.82351303100586,
+ "learning_rate": 5.221001221001221e-05,
+ "loss": 0.8438,
+ "step": 593
+ },
+ {
+ "epoch": 2.1758241758241756,
+ "grad_norm": 14.425333023071289,
+ "learning_rate": 5.2185592185592185e-05,
+ "loss": 0.1765,
+ "step": 594
+ },
+ {
+ "epoch": 2.1794871794871793,
+ "grad_norm": 16.967479705810547,
+ "learning_rate": 5.2161172161172163e-05,
+ "loss": 0.2465,
+ "step": 595
+ },
+ {
+ "epoch": 2.183150183150183,
+ "grad_norm": 40.79065704345703,
+ "learning_rate": 5.2136752136752135e-05,
+ "loss": 0.6077,
+ "step": 596
+ },
+ {
+ "epoch": 2.186813186813187,
+ "grad_norm": 22.434715270996094,
+ "learning_rate": 5.2112332112332114e-05,
+ "loss": 0.3748,
+ "step": 597
+ },
+ {
+ "epoch": 2.1904761904761907,
+ "grad_norm": 32.18471908569336,
+ "learning_rate": 5.208791208791209e-05,
+ "loss": 0.5163,
+ "step": 598
+ },
+ {
+ "epoch": 2.1941391941391943,
+ "grad_norm": 20.43740463256836,
+ "learning_rate": 5.2063492063492064e-05,
+ "loss": 0.4116,
+ "step": 599
+ },
+ {
+ "epoch": 2.197802197802198,
+ "grad_norm": 6.528069496154785,
+ "learning_rate": 5.203907203907204e-05,
+ "loss": 0.065,
+ "step": 600
+ },
+ {
+ "epoch": 2.2014652014652016,
+ "grad_norm": 35.0635871887207,
+ "learning_rate": 5.201465201465202e-05,
+ "loss": 1.2288,
+ "step": 601
+ },
+ {
+ "epoch": 2.2051282051282053,
+ "grad_norm": 23.499767303466797,
+ "learning_rate": 5.199023199023199e-05,
+ "loss": 0.49,
+ "step": 602
+ },
+ {
+ "epoch": 2.208791208791209,
+ "grad_norm": 20.234952926635742,
+ "learning_rate": 5.1965811965811964e-05,
+ "loss": 0.231,
+ "step": 603
+ },
+ {
+ "epoch": 2.2124542124542126,
+ "grad_norm": 9.268828392028809,
+ "learning_rate": 5.194139194139194e-05,
+ "loss": 0.0732,
+ "step": 604
+ },
+ {
+ "epoch": 2.2161172161172162,
+ "grad_norm": 52.60474395751953,
+ "learning_rate": 5.1916971916971914e-05,
+ "loss": 0.8766,
+ "step": 605
+ },
+ {
+ "epoch": 2.21978021978022,
+ "grad_norm": 41.86642074584961,
+ "learning_rate": 5.189255189255189e-05,
+ "loss": 0.4743,
+ "step": 606
+ },
+ {
+ "epoch": 2.2234432234432235,
+ "grad_norm": 30.304580688476562,
+ "learning_rate": 5.186813186813187e-05,
+ "loss": 0.4412,
+ "step": 607
+ },
+ {
+ "epoch": 2.227106227106227,
+ "grad_norm": 27.26057243347168,
+ "learning_rate": 5.184371184371184e-05,
+ "loss": 0.3496,
+ "step": 608
+ },
+ {
+ "epoch": 2.230769230769231,
+ "grad_norm": 40.55131149291992,
+ "learning_rate": 5.181929181929182e-05,
+ "loss": 0.7097,
+ "step": 609
+ },
+ {
+ "epoch": 2.2344322344322345,
+ "grad_norm": 61.97871017456055,
+ "learning_rate": 5.17948717948718e-05,
+ "loss": 1.3686,
+ "step": 610
+ },
+ {
+ "epoch": 2.238095238095238,
+ "grad_norm": 38.211700439453125,
+ "learning_rate": 5.177045177045177e-05,
+ "loss": 0.565,
+ "step": 611
+ },
+ {
+ "epoch": 2.241758241758242,
+ "grad_norm": 20.10716438293457,
+ "learning_rate": 5.174603174603175e-05,
+ "loss": 0.3468,
+ "step": 612
+ },
+ {
+ "epoch": 2.2454212454212454,
+ "grad_norm": 23.96891975402832,
+ "learning_rate": 5.172161172161173e-05,
+ "loss": 0.2295,
+ "step": 613
+ },
+ {
+ "epoch": 2.249084249084249,
+ "grad_norm": 10.14421272277832,
+ "learning_rate": 5.16971916971917e-05,
+ "loss": 0.0943,
+ "step": 614
+ },
+ {
+ "epoch": 2.2527472527472527,
+ "grad_norm": 15.786056518554688,
+ "learning_rate": 5.167277167277167e-05,
+ "loss": 0.1213,
+ "step": 615
+ },
+ {
+ "epoch": 2.2564102564102564,
+ "grad_norm": 20.907663345336914,
+ "learning_rate": 5.164835164835165e-05,
+ "loss": 0.235,
+ "step": 616
+ },
+ {
+ "epoch": 2.26007326007326,
+ "grad_norm": 32.149600982666016,
+ "learning_rate": 5.162393162393162e-05,
+ "loss": 0.4807,
+ "step": 617
+ },
+ {
+ "epoch": 2.2637362637362637,
+ "grad_norm": 33.965518951416016,
+ "learning_rate": 5.15995115995116e-05,
+ "loss": 0.4517,
+ "step": 618
+ },
+ {
+ "epoch": 2.2673992673992673,
+ "grad_norm": 49.98363494873047,
+ "learning_rate": 5.157509157509158e-05,
+ "loss": 0.6434,
+ "step": 619
+ },
+ {
+ "epoch": 2.271062271062271,
+ "grad_norm": 14.035831451416016,
+ "learning_rate": 5.155067155067155e-05,
+ "loss": 0.1117,
+ "step": 620
+ },
+ {
+ "epoch": 2.2747252747252746,
+ "grad_norm": 28.84484100341797,
+ "learning_rate": 5.152625152625153e-05,
+ "loss": 0.8002,
+ "step": 621
+ },
+ {
+ "epoch": 2.2783882783882783,
+ "grad_norm": 41.59181594848633,
+ "learning_rate": 5.15018315018315e-05,
+ "loss": 0.4465,
+ "step": 622
+ },
+ {
+ "epoch": 2.282051282051282,
+ "grad_norm": 33.10573196411133,
+ "learning_rate": 5.147741147741148e-05,
+ "loss": 0.5795,
+ "step": 623
+ },
+ {
+ "epoch": 2.2857142857142856,
+ "grad_norm": 34.79928970336914,
+ "learning_rate": 5.145299145299146e-05,
+ "loss": 0.3135,
+ "step": 624
+ },
+ {
+ "epoch": 2.2893772893772892,
+ "grad_norm": 18.095544815063477,
+ "learning_rate": 5.142857142857143e-05,
+ "loss": 0.0961,
+ "step": 625
+ },
+ {
+ "epoch": 2.293040293040293,
+ "grad_norm": 16.55453872680664,
+ "learning_rate": 5.140415140415141e-05,
+ "loss": 0.0868,
+ "step": 626
+ },
+ {
+ "epoch": 2.2967032967032965,
+ "grad_norm": 42.18946075439453,
+ "learning_rate": 5.1379731379731386e-05,
+ "loss": 0.8892,
+ "step": 627
+ },
+ {
+ "epoch": 2.3003663003663,
+ "grad_norm": 54.753448486328125,
+ "learning_rate": 5.135531135531135e-05,
+ "loss": 0.833,
+ "step": 628
+ },
+ {
+ "epoch": 2.304029304029304,
+ "grad_norm": 27.723228454589844,
+ "learning_rate": 5.133089133089133e-05,
+ "loss": 0.2744,
+ "step": 629
+ },
+ {
+ "epoch": 2.3076923076923075,
+ "grad_norm": 28.53034019470215,
+ "learning_rate": 5.130647130647131e-05,
+ "loss": 0.1696,
+ "step": 630
+ },
+ {
+ "epoch": 2.311355311355311,
+ "grad_norm": 65.4127426147461,
+ "learning_rate": 5.128205128205128e-05,
+ "loss": 0.9019,
+ "step": 631
+ },
+ {
+ "epoch": 2.315018315018315,
+ "grad_norm": 22.794870376586914,
+ "learning_rate": 5.125763125763126e-05,
+ "loss": 0.1987,
+ "step": 632
+ },
+ {
+ "epoch": 2.3186813186813184,
+ "grad_norm": 29.870113372802734,
+ "learning_rate": 5.1233211233211236e-05,
+ "loss": 0.4816,
+ "step": 633
+ },
+ {
+ "epoch": 2.3223443223443225,
+ "grad_norm": 38.91164779663086,
+ "learning_rate": 5.120879120879121e-05,
+ "loss": 0.7424,
+ "step": 634
+ },
+ {
+ "epoch": 2.326007326007326,
+ "grad_norm": 36.57811737060547,
+ "learning_rate": 5.1184371184371186e-05,
+ "loss": 1.1365,
+ "step": 635
+ },
+ {
+ "epoch": 2.32967032967033,
+ "grad_norm": 31.59128189086914,
+ "learning_rate": 5.1159951159951165e-05,
+ "loss": 0.6167,
+ "step": 636
+ },
+ {
+ "epoch": 2.3333333333333335,
+ "grad_norm": 25.956003189086914,
+ "learning_rate": 5.1135531135531136e-05,
+ "loss": 0.8808,
+ "step": 637
+ },
+ {
+ "epoch": 2.336996336996337,
+ "grad_norm": 38.18582534790039,
+ "learning_rate": 5.1111111111111115e-05,
+ "loss": 0.9417,
+ "step": 638
+ },
+ {
+ "epoch": 2.340659340659341,
+ "grad_norm": 27.436229705810547,
+ "learning_rate": 5.108669108669109e-05,
+ "loss": 0.7539,
+ "step": 639
+ },
+ {
+ "epoch": 2.3443223443223444,
+ "grad_norm": 40.86305618286133,
+ "learning_rate": 5.1062271062271065e-05,
+ "loss": 2.126,
+ "step": 640
+ },
+ {
+ "epoch": 2.347985347985348,
+ "grad_norm": 22.224748611450195,
+ "learning_rate": 5.103785103785104e-05,
+ "loss": 0.9958,
+ "step": 641
+ },
+ {
+ "epoch": 2.3516483516483517,
+ "grad_norm": 19.915552139282227,
+ "learning_rate": 5.1013431013431015e-05,
+ "loss": 1.1045,
+ "step": 642
+ },
+ {
+ "epoch": 2.3553113553113554,
+ "grad_norm": 17.045989990234375,
+ "learning_rate": 5.098901098901099e-05,
+ "loss": 0.8906,
+ "step": 643
+ },
+ {
+ "epoch": 2.358974358974359,
+ "grad_norm": 22.106670379638672,
+ "learning_rate": 5.0964590964590965e-05,
+ "loss": 0.9856,
+ "step": 644
+ },
+ {
+ "epoch": 2.3626373626373627,
+ "grad_norm": 17.583837509155273,
+ "learning_rate": 5.0940170940170944e-05,
+ "loss": 0.8328,
+ "step": 645
+ },
+ {
+ "epoch": 2.3663003663003663,
+ "grad_norm": 57.61167526245117,
+ "learning_rate": 5.0915750915750915e-05,
+ "loss": 0.578,
+ "step": 646
+ },
+ {
+ "epoch": 2.36996336996337,
+ "grad_norm": 13.941128730773926,
+ "learning_rate": 5.0891330891330894e-05,
+ "loss": 0.5892,
+ "step": 647
+ },
+ {
+ "epoch": 2.3736263736263736,
+ "grad_norm": 22.38715171813965,
+ "learning_rate": 5.0866910866910866e-05,
+ "loss": 0.7608,
+ "step": 648
+ },
+ {
+ "epoch": 2.3772893772893773,
+ "grad_norm": 22.42316436767578,
+ "learning_rate": 5.0842490842490844e-05,
+ "loss": 0.7923,
+ "step": 649
+ },
+ {
+ "epoch": 2.380952380952381,
+ "grad_norm": 32.75740432739258,
+ "learning_rate": 5.081807081807082e-05,
+ "loss": 1.0798,
+ "step": 650
+ },
+ {
+ "epoch": 2.3846153846153846,
+ "grad_norm": 19.295289993286133,
+ "learning_rate": 5.0793650793650794e-05,
+ "loss": 0.4898,
+ "step": 651
+ },
+ {
+ "epoch": 2.3882783882783882,
+ "grad_norm": 25.849227905273438,
+ "learning_rate": 5.076923076923077e-05,
+ "loss": 0.5557,
+ "step": 652
+ },
+ {
+ "epoch": 2.391941391941392,
+ "grad_norm": 21.321088790893555,
+ "learning_rate": 5.074481074481075e-05,
+ "loss": 0.2743,
+ "step": 653
+ },
+ {
+ "epoch": 2.3956043956043955,
+ "grad_norm": 28.795917510986328,
+ "learning_rate": 5.0720390720390716e-05,
+ "loss": 0.7039,
+ "step": 654
+ },
+ {
+ "epoch": 2.399267399267399,
+ "grad_norm": 19.86751937866211,
+ "learning_rate": 5.0695970695970694e-05,
+ "loss": 0.3155,
+ "step": 655
+ },
+ {
+ "epoch": 2.402930402930403,
+ "grad_norm": 33.3828010559082,
+ "learning_rate": 5.067155067155067e-05,
+ "loss": 1.0696,
+ "step": 656
+ },
+ {
+ "epoch": 2.4065934065934065,
+ "grad_norm": 37.38752746582031,
+ "learning_rate": 5.0647130647130645e-05,
+ "loss": 0.8123,
+ "step": 657
+ },
+ {
+ "epoch": 2.41025641025641,
+ "grad_norm": 29.22795867919922,
+ "learning_rate": 5.062271062271062e-05,
+ "loss": 0.9515,
+ "step": 658
+ },
+ {
+ "epoch": 2.413919413919414,
+ "grad_norm": 41.129981994628906,
+ "learning_rate": 5.05982905982906e-05,
+ "loss": 1.1329,
+ "step": 659
+ },
+ {
+ "epoch": 2.4175824175824174,
+ "grad_norm": 40.985042572021484,
+ "learning_rate": 5.057387057387057e-05,
+ "loss": 0.675,
+ "step": 660
+ },
+ {
+ "epoch": 2.421245421245421,
+ "grad_norm": 33.49393844604492,
+ "learning_rate": 5.054945054945055e-05,
+ "loss": 0.9679,
+ "step": 661
+ },
+ {
+ "epoch": 2.4249084249084247,
+ "grad_norm": 28.741533279418945,
+ "learning_rate": 5.052503052503053e-05,
+ "loss": 0.7928,
+ "step": 662
+ },
+ {
+ "epoch": 2.4285714285714284,
+ "grad_norm": 28.89700698852539,
+ "learning_rate": 5.05006105006105e-05,
+ "loss": 0.7594,
+ "step": 663
+ },
+ {
+ "epoch": 2.4322344322344325,
+ "grad_norm": 4.59797477722168,
+ "learning_rate": 5.047619047619048e-05,
+ "loss": 0.0584,
+ "step": 664
+ },
+ {
+ "epoch": 2.435897435897436,
+ "grad_norm": 29.852828979492188,
+ "learning_rate": 5.045177045177046e-05,
+ "loss": 0.614,
+ "step": 665
+ },
+ {
+ "epoch": 2.4395604395604398,
+ "grad_norm": 15.132670402526855,
+ "learning_rate": 5.042735042735043e-05,
+ "loss": 0.2353,
+ "step": 666
+ },
+ {
+ "epoch": 2.4432234432234434,
+ "grad_norm": 23.85403060913086,
+ "learning_rate": 5.04029304029304e-05,
+ "loss": 0.9065,
+ "step": 667
+ },
+ {
+ "epoch": 2.446886446886447,
+ "grad_norm": 12.384196281433105,
+ "learning_rate": 5.037851037851038e-05,
+ "loss": 0.2065,
+ "step": 668
+ },
+ {
+ "epoch": 2.4505494505494507,
+ "grad_norm": 18.347129821777344,
+ "learning_rate": 5.035409035409035e-05,
+ "loss": 0.647,
+ "step": 669
+ },
+ {
+ "epoch": 2.4542124542124544,
+ "grad_norm": 18.645936965942383,
+ "learning_rate": 5.032967032967033e-05,
+ "loss": 0.2072,
+ "step": 670
+ },
+ {
+ "epoch": 2.457875457875458,
+ "grad_norm": 9.493071556091309,
+ "learning_rate": 5.03052503052503e-05,
+ "loss": 0.1805,
+ "step": 671
+ },
+ {
+ "epoch": 2.4615384615384617,
+ "grad_norm": 18.552539825439453,
+ "learning_rate": 5.028083028083028e-05,
+ "loss": 0.4078,
+ "step": 672
+ },
+ {
+ "epoch": 2.4652014652014653,
+ "grad_norm": 21.735048294067383,
+ "learning_rate": 5.025641025641026e-05,
+ "loss": 0.4231,
+ "step": 673
+ },
+ {
+ "epoch": 2.468864468864469,
+ "grad_norm": 54.32040023803711,
+ "learning_rate": 5.023199023199023e-05,
+ "loss": 1.3927,
+ "step": 674
+ },
+ {
+ "epoch": 2.4725274725274726,
+ "grad_norm": 26.955970764160156,
+ "learning_rate": 5.020757020757021e-05,
+ "loss": 0.6899,
+ "step": 675
+ },
+ {
+ "epoch": 2.4761904761904763,
+ "grad_norm": 43.423526763916016,
+ "learning_rate": 5.018315018315019e-05,
+ "loss": 1.2084,
+ "step": 676
+ },
+ {
+ "epoch": 2.47985347985348,
+ "grad_norm": 35.98548126220703,
+ "learning_rate": 5.015873015873016e-05,
+ "loss": 1.5047,
+ "step": 677
+ },
+ {
+ "epoch": 2.4835164835164836,
+ "grad_norm": 22.593570709228516,
+ "learning_rate": 5.013431013431014e-05,
+ "loss": 0.6918,
+ "step": 678
+ },
+ {
+ "epoch": 2.4871794871794872,
+ "grad_norm": 21.29257583618164,
+ "learning_rate": 5.0109890109890116e-05,
+ "loss": 0.3578,
+ "step": 679
+ },
+ {
+ "epoch": 2.490842490842491,
+ "grad_norm": 21.672088623046875,
+ "learning_rate": 5.008547008547008e-05,
+ "loss": 0.7757,
+ "step": 680
+ },
+ {
+ "epoch": 2.4945054945054945,
+ "grad_norm": 9.625850677490234,
+ "learning_rate": 5.006105006105006e-05,
+ "loss": 0.1329,
+ "step": 681
+ },
+ {
+ "epoch": 2.498168498168498,
+ "grad_norm": 16.92123794555664,
+ "learning_rate": 5.003663003663004e-05,
+ "loss": 0.5599,
+ "step": 682
+ },
+ {
+ "epoch": 2.501831501831502,
+ "grad_norm": 15.665925025939941,
+ "learning_rate": 5.001221001221001e-05,
+ "loss": 0.3099,
+ "step": 683
+ },
+ {
+ "epoch": 2.5054945054945055,
+ "grad_norm": 21.316635131835938,
+ "learning_rate": 4.998778998778999e-05,
+ "loss": 0.5746,
+ "step": 684
+ },
+ {
+ "epoch": 2.509157509157509,
+ "grad_norm": 24.99594497680664,
+ "learning_rate": 4.996336996336997e-05,
+ "loss": 1.1274,
+ "step": 685
+ },
+ {
+ "epoch": 2.5128205128205128,
+ "grad_norm": 29.795175552368164,
+ "learning_rate": 4.993894993894994e-05,
+ "loss": 0.9991,
+ "step": 686
+ },
+ {
+ "epoch": 2.5164835164835164,
+ "grad_norm": 16.337533950805664,
+ "learning_rate": 4.991452991452992e-05,
+ "loss": 0.4101,
+ "step": 687
+ },
+ {
+ "epoch": 2.52014652014652,
+ "grad_norm": 20.065715789794922,
+ "learning_rate": 4.9890109890109895e-05,
+ "loss": 0.7786,
+ "step": 688
+ },
+ {
+ "epoch": 2.5238095238095237,
+ "grad_norm": 19.341567993164062,
+ "learning_rate": 4.986568986568987e-05,
+ "loss": 0.4989,
+ "step": 689
+ },
+ {
+ "epoch": 2.5274725274725274,
+ "grad_norm": 14.688420295715332,
+ "learning_rate": 4.9841269841269845e-05,
+ "loss": 0.4081,
+ "step": 690
+ },
+ {
+ "epoch": 2.531135531135531,
+ "grad_norm": 39.346012115478516,
+ "learning_rate": 4.9816849816849824e-05,
+ "loss": 1.7919,
+ "step": 691
+ },
+ {
+ "epoch": 2.5347985347985347,
+ "grad_norm": 21.353286743164062,
+ "learning_rate": 4.9792429792429796e-05,
+ "loss": 0.698,
+ "step": 692
+ },
+ {
+ "epoch": 2.5384615384615383,
+ "grad_norm": 35.96653366088867,
+ "learning_rate": 4.976800976800977e-05,
+ "loss": 1.6584,
+ "step": 693
+ },
+ {
+ "epoch": 2.542124542124542,
+ "grad_norm": 19.14348793029785,
+ "learning_rate": 4.9743589743589746e-05,
+ "loss": 0.885,
+ "step": 694
+ },
+ {
+ "epoch": 2.5457875457875456,
+ "grad_norm": 9.260897636413574,
+ "learning_rate": 4.971916971916972e-05,
+ "loss": 0.1629,
+ "step": 695
+ },
+ {
+ "epoch": 2.5494505494505493,
+ "grad_norm": 18.497526168823242,
+ "learning_rate": 4.9694749694749696e-05,
+ "loss": 0.7242,
+ "step": 696
+ },
+ {
+ "epoch": 2.553113553113553,
+ "grad_norm": 8.879841804504395,
+ "learning_rate": 4.967032967032967e-05,
+ "loss": 0.1302,
+ "step": 697
+ },
+ {
+ "epoch": 2.5567765567765566,
+ "grad_norm": 26.34065818786621,
+ "learning_rate": 4.9645909645909646e-05,
+ "loss": 0.7333,
+ "step": 698
+ },
+ {
+ "epoch": 2.5604395604395602,
+ "grad_norm": 15.10546588897705,
+ "learning_rate": 4.9621489621489624e-05,
+ "loss": 0.3119,
+ "step": 699
+ },
+ {
+ "epoch": 2.564102564102564,
+ "grad_norm": 10.68095874786377,
+ "learning_rate": 4.9597069597069596e-05,
+ "loss": 0.2505,
+ "step": 700
+ },
+ {
+ "epoch": 2.5677655677655675,
+ "grad_norm": 29.08888053894043,
+ "learning_rate": 4.9572649572649575e-05,
+ "loss": 0.4286,
+ "step": 701
+ },
+ {
+ "epoch": 2.571428571428571,
+ "grad_norm": 29.939416885375977,
+ "learning_rate": 4.954822954822955e-05,
+ "loss": 1.1529,
+ "step": 702
+ },
+ {
+ "epoch": 2.575091575091575,
+ "grad_norm": 32.78864669799805,
+ "learning_rate": 4.9523809523809525e-05,
+ "loss": 0.9834,
+ "step": 703
+ },
+ {
+ "epoch": 2.578754578754579,
+ "grad_norm": 13.99082088470459,
+ "learning_rate": 4.94993894993895e-05,
+ "loss": 0.1934,
+ "step": 704
+ },
+ {
+ "epoch": 2.5824175824175826,
+ "grad_norm": 31.696718215942383,
+ "learning_rate": 4.9474969474969475e-05,
+ "loss": 0.6881,
+ "step": 705
+ },
+ {
+ "epoch": 2.586080586080586,
+ "grad_norm": 39.26205062866211,
+ "learning_rate": 4.9450549450549446e-05,
+ "loss": 0.573,
+ "step": 706
+ },
+ {
+ "epoch": 2.58974358974359,
+ "grad_norm": 42.08647918701172,
+ "learning_rate": 4.9426129426129425e-05,
+ "loss": 1.5935,
+ "step": 707
+ },
+ {
+ "epoch": 2.5934065934065935,
+ "grad_norm": 24.630651473999023,
+ "learning_rate": 4.94017094017094e-05,
+ "loss": 0.7016,
+ "step": 708
+ },
+ {
+ "epoch": 2.597069597069597,
+ "grad_norm": 35.33428192138672,
+ "learning_rate": 4.9377289377289375e-05,
+ "loss": 0.9646,
+ "step": 709
+ },
+ {
+ "epoch": 2.600732600732601,
+ "grad_norm": 21.643918991088867,
+ "learning_rate": 4.9352869352869353e-05,
+ "loss": 0.3679,
+ "step": 710
+ },
+ {
+ "epoch": 2.6043956043956045,
+ "grad_norm": 10.6254301071167,
+ "learning_rate": 4.932844932844933e-05,
+ "loss": 0.1059,
+ "step": 711
+ },
+ {
+ "epoch": 2.608058608058608,
+ "grad_norm": 23.43462562561035,
+ "learning_rate": 4.9304029304029304e-05,
+ "loss": 0.5128,
+ "step": 712
+ },
+ {
+ "epoch": 2.6117216117216118,
+ "grad_norm": 25.748422622680664,
+ "learning_rate": 4.927960927960928e-05,
+ "loss": 0.6154,
+ "step": 713
+ },
+ {
+ "epoch": 2.6153846153846154,
+ "grad_norm": 23.163209915161133,
+ "learning_rate": 4.925518925518926e-05,
+ "loss": 0.3978,
+ "step": 714
+ },
+ {
+ "epoch": 2.619047619047619,
+ "grad_norm": 22.306194305419922,
+ "learning_rate": 4.923076923076923e-05,
+ "loss": 0.3984,
+ "step": 715
+ },
+ {
+ "epoch": 2.6227106227106227,
+ "grad_norm": 48.16558074951172,
+ "learning_rate": 4.920634920634921e-05,
+ "loss": 0.9568,
+ "step": 716
+ },
+ {
+ "epoch": 2.6263736263736264,
+ "grad_norm": 48.76753234863281,
+ "learning_rate": 4.918192918192919e-05,
+ "loss": 0.6579,
+ "step": 717
+ },
+ {
+ "epoch": 2.63003663003663,
+ "grad_norm": 57.938720703125,
+ "learning_rate": 4.9157509157509154e-05,
+ "loss": 1.0926,
+ "step": 718
+ },
+ {
+ "epoch": 2.6336996336996337,
+ "grad_norm": 25.495267868041992,
+ "learning_rate": 4.913308913308913e-05,
+ "loss": 0.3717,
+ "step": 719
+ },
+ {
+ "epoch": 2.6373626373626373,
+ "grad_norm": 20.054609298706055,
+ "learning_rate": 4.910866910866911e-05,
+ "loss": 0.4502,
+ "step": 720
+ },
+ {
+ "epoch": 2.641025641025641,
+ "grad_norm": 23.096263885498047,
+ "learning_rate": 4.908424908424908e-05,
+ "loss": 0.2794,
+ "step": 721
+ },
+ {
+ "epoch": 2.6446886446886446,
+ "grad_norm": 6.073278903961182,
+ "learning_rate": 4.905982905982906e-05,
+ "loss": 0.0519,
+ "step": 722
+ },
+ {
+ "epoch": 2.6483516483516483,
+ "grad_norm": 38.562618255615234,
+ "learning_rate": 4.903540903540903e-05,
+ "loss": 0.8839,
+ "step": 723
+ },
+ {
+ "epoch": 2.652014652014652,
+ "grad_norm": 23.544757843017578,
+ "learning_rate": 4.901098901098901e-05,
+ "loss": 0.3935,
+ "step": 724
+ },
+ {
+ "epoch": 2.6556776556776556,
+ "grad_norm": 22.844032287597656,
+ "learning_rate": 4.898656898656899e-05,
+ "loss": 0.2428,
+ "step": 725
+ },
+ {
+ "epoch": 2.659340659340659,
+ "grad_norm": 11.537687301635742,
+ "learning_rate": 4.896214896214896e-05,
+ "loss": 0.1538,
+ "step": 726
+ },
+ {
+ "epoch": 2.663003663003663,
+ "grad_norm": 59.37337112426758,
+ "learning_rate": 4.893772893772894e-05,
+ "loss": 1.181,
+ "step": 727
+ },
+ {
+ "epoch": 2.6666666666666665,
+ "grad_norm": 22.206314086914062,
+ "learning_rate": 4.891330891330892e-05,
+ "loss": 0.4044,
+ "step": 728
+ },
+ {
+ "epoch": 2.67032967032967,
+ "grad_norm": 27.44620132446289,
+ "learning_rate": 4.888888888888889e-05,
+ "loss": 0.585,
+ "step": 729
+ },
+ {
+ "epoch": 2.6739926739926743,
+ "grad_norm": 35.70675277709961,
+ "learning_rate": 4.886446886446887e-05,
+ "loss": 0.6853,
+ "step": 730
+ },
+ {
+ "epoch": 2.677655677655678,
+ "grad_norm": 25.653356552124023,
+ "learning_rate": 4.884004884004884e-05,
+ "loss": 0.6143,
+ "step": 731
+ },
+ {
+ "epoch": 2.6813186813186816,
+ "grad_norm": 24.242090225219727,
+ "learning_rate": 4.881562881562881e-05,
+ "loss": 0.4365,
+ "step": 732
+ },
+ {
+ "epoch": 2.684981684981685,
+ "grad_norm": 25.621902465820312,
+ "learning_rate": 4.879120879120879e-05,
+ "loss": 0.6644,
+ "step": 733
+ },
+ {
+ "epoch": 2.688644688644689,
+ "grad_norm": 14.14786434173584,
+ "learning_rate": 4.876678876678877e-05,
+ "loss": 0.4117,
+ "step": 734
+ },
+ {
+ "epoch": 2.6923076923076925,
+ "grad_norm": 37.98638916015625,
+ "learning_rate": 4.874236874236874e-05,
+ "loss": 1.0452,
+ "step": 735
+ },
+ {
+ "epoch": 2.695970695970696,
+ "grad_norm": 23.186302185058594,
+ "learning_rate": 4.871794871794872e-05,
+ "loss": 0.2642,
+ "step": 736
+ },
+ {
+ "epoch": 2.6996336996337,
+ "grad_norm": 27.23651695251465,
+ "learning_rate": 4.86935286935287e-05,
+ "loss": 0.393,
+ "step": 737
+ },
+ {
+ "epoch": 2.7032967032967035,
+ "grad_norm": 36.44395446777344,
+ "learning_rate": 4.866910866910867e-05,
+ "loss": 1.1309,
+ "step": 738
+ },
+ {
+ "epoch": 2.706959706959707,
+ "grad_norm": 9.733710289001465,
+ "learning_rate": 4.864468864468865e-05,
+ "loss": 0.2466,
+ "step": 739
+ },
+ {
+ "epoch": 2.7106227106227108,
+ "grad_norm": 24.727527618408203,
+ "learning_rate": 4.8620268620268626e-05,
+ "loss": 0.46,
+ "step": 740
+ },
+ {
+ "epoch": 2.7142857142857144,
+ "grad_norm": 15.122056007385254,
+ "learning_rate": 4.85958485958486e-05,
+ "loss": 0.3122,
+ "step": 741
+ },
+ {
+ "epoch": 2.717948717948718,
+ "grad_norm": 24.059120178222656,
+ "learning_rate": 4.8571428571428576e-05,
+ "loss": 0.2359,
+ "step": 742
+ },
+ {
+ "epoch": 2.7216117216117217,
+ "grad_norm": 7.659122467041016,
+ "learning_rate": 4.8547008547008554e-05,
+ "loss": 0.1212,
+ "step": 743
+ },
+ {
+ "epoch": 2.7252747252747254,
+ "grad_norm": 27.002117156982422,
+ "learning_rate": 4.852258852258852e-05,
+ "loss": 0.7593,
+ "step": 744
+ },
+ {
+ "epoch": 2.728937728937729,
+ "grad_norm": 6.3852009773254395,
+ "learning_rate": 4.84981684981685e-05,
+ "loss": 0.0644,
+ "step": 745
+ },
+ {
+ "epoch": 2.7326007326007327,
+ "grad_norm": 25.574190139770508,
+ "learning_rate": 4.8473748473748476e-05,
+ "loss": 0.7012,
+ "step": 746
+ },
+ {
+ "epoch": 2.7362637362637363,
+ "grad_norm": 15.720768928527832,
+ "learning_rate": 4.844932844932845e-05,
+ "loss": 0.2692,
+ "step": 747
+ },
+ {
+ "epoch": 2.73992673992674,
+ "grad_norm": 25.527997970581055,
+ "learning_rate": 4.8424908424908426e-05,
+ "loss": 0.2648,
+ "step": 748
+ },
+ {
+ "epoch": 2.7435897435897436,
+ "grad_norm": 27.791011810302734,
+ "learning_rate": 4.84004884004884e-05,
+ "loss": 0.6007,
+ "step": 749
+ },
+ {
+ "epoch": 2.7472527472527473,
+ "grad_norm": 20.487640380859375,
+ "learning_rate": 4.8376068376068376e-05,
+ "loss": 0.5715,
+ "step": 750
+ },
+ {
+ "epoch": 2.750915750915751,
+ "grad_norm": 6.386992454528809,
+ "learning_rate": 4.8351648351648355e-05,
+ "loss": 0.06,
+ "step": 751
+ },
+ {
+ "epoch": 2.7545787545787546,
+ "grad_norm": 13.110812187194824,
+ "learning_rate": 4.8327228327228327e-05,
+ "loss": 0.129,
+ "step": 752
+ },
+ {
+ "epoch": 2.758241758241758,
+ "grad_norm": 26.55845832824707,
+ "learning_rate": 4.8302808302808305e-05,
+ "loss": 0.67,
+ "step": 753
+ },
+ {
+ "epoch": 2.761904761904762,
+ "grad_norm": 38.83135223388672,
+ "learning_rate": 4.8278388278388283e-05,
+ "loss": 1.6656,
+ "step": 754
+ },
+ {
+ "epoch": 2.7655677655677655,
+ "grad_norm": 25.99518585205078,
+ "learning_rate": 4.8253968253968255e-05,
+ "loss": 0.3285,
+ "step": 755
+ },
+ {
+ "epoch": 2.769230769230769,
+ "grad_norm": 17.282081604003906,
+ "learning_rate": 4.8229548229548234e-05,
+ "loss": 0.2217,
+ "step": 756
+ },
+ {
+ "epoch": 2.772893772893773,
+ "grad_norm": 28.849924087524414,
+ "learning_rate": 4.8205128205128205e-05,
+ "loss": 0.7287,
+ "step": 757
+ },
+ {
+ "epoch": 2.7765567765567765,
+ "grad_norm": 45.79567337036133,
+ "learning_rate": 4.818070818070818e-05,
+ "loss": 1.6964,
+ "step": 758
+ },
+ {
+ "epoch": 2.78021978021978,
+ "grad_norm": 15.203421592712402,
+ "learning_rate": 4.8156288156288155e-05,
+ "loss": 0.2351,
+ "step": 759
+ },
+ {
+ "epoch": 2.7838827838827838,
+ "grad_norm": 10.686698913574219,
+ "learning_rate": 4.8131868131868134e-05,
+ "loss": 0.1533,
+ "step": 760
+ },
+ {
+ "epoch": 2.7875457875457874,
+ "grad_norm": 24.186473846435547,
+ "learning_rate": 4.8107448107448106e-05,
+ "loss": 1.0973,
+ "step": 761
+ },
+ {
+ "epoch": 2.791208791208791,
+ "grad_norm": 25.378986358642578,
+ "learning_rate": 4.8083028083028084e-05,
+ "loss": 0.5847,
+ "step": 762
+ },
+ {
+ "epoch": 2.7948717948717947,
+ "grad_norm": 20.066482543945312,
+ "learning_rate": 4.805860805860806e-05,
+ "loss": 0.2643,
+ "step": 763
+ },
+ {
+ "epoch": 2.7985347985347984,
+ "grad_norm": 56.11622619628906,
+ "learning_rate": 4.8034188034188034e-05,
+ "loss": 0.6949,
+ "step": 764
+ },
+ {
+ "epoch": 2.802197802197802,
+ "grad_norm": 27.80112648010254,
+ "learning_rate": 4.800976800976801e-05,
+ "loss": 0.5622,
+ "step": 765
+ },
+ {
+ "epoch": 2.8058608058608057,
+ "grad_norm": 30.947532653808594,
+ "learning_rate": 4.798534798534799e-05,
+ "loss": 0.6276,
+ "step": 766
+ },
+ {
+ "epoch": 2.8095238095238093,
+ "grad_norm": 8.91073226928711,
+ "learning_rate": 4.796092796092796e-05,
+ "loss": 0.1302,
+ "step": 767
+ },
+ {
+ "epoch": 2.813186813186813,
+ "grad_norm": 24.65394401550293,
+ "learning_rate": 4.793650793650794e-05,
+ "loss": 0.6811,
+ "step": 768
+ },
+ {
+ "epoch": 2.8168498168498166,
+ "grad_norm": 18.257539749145508,
+ "learning_rate": 4.791208791208792e-05,
+ "loss": 0.271,
+ "step": 769
+ },
+ {
+ "epoch": 2.8205128205128203,
+ "grad_norm": 41.41588592529297,
+ "learning_rate": 4.7887667887667884e-05,
+ "loss": 1.4149,
+ "step": 770
+ },
+ {
+ "epoch": 2.824175824175824,
+ "grad_norm": 7.753188610076904,
+ "learning_rate": 4.786324786324786e-05,
+ "loss": 0.0825,
+ "step": 771
+ },
+ {
+ "epoch": 2.8278388278388276,
+ "grad_norm": 208.88290405273438,
+ "learning_rate": 4.783882783882784e-05,
+ "loss": 1.032,
+ "step": 772
+ },
+ {
+ "epoch": 2.8315018315018317,
+ "grad_norm": 31.91672706604004,
+ "learning_rate": 4.781440781440781e-05,
+ "loss": 0.9783,
+ "step": 773
+ },
+ {
+ "epoch": 2.8351648351648353,
+ "grad_norm": 5.72416877746582,
+ "learning_rate": 4.778998778998779e-05,
+ "loss": 0.0399,
+ "step": 774
+ },
+ {
+ "epoch": 2.838827838827839,
+ "grad_norm": 30.503149032592773,
+ "learning_rate": 4.776556776556776e-05,
+ "loss": 0.6465,
+ "step": 775
+ },
+ {
+ "epoch": 2.8424908424908426,
+ "grad_norm": 29.615020751953125,
+ "learning_rate": 4.774114774114774e-05,
+ "loss": 0.5823,
+ "step": 776
+ },
+ {
+ "epoch": 2.8461538461538463,
+ "grad_norm": 49.922611236572266,
+ "learning_rate": 4.771672771672772e-05,
+ "loss": 1.2045,
+ "step": 777
+ },
+ {
+ "epoch": 2.84981684981685,
+ "grad_norm": 23.30948829650879,
+ "learning_rate": 4.769230769230769e-05,
+ "loss": 0.5962,
+ "step": 778
+ },
+ {
+ "epoch": 2.8534798534798536,
+ "grad_norm": 24.784086227416992,
+ "learning_rate": 4.766788766788767e-05,
+ "loss": 0.5702,
+ "step": 779
+ },
+ {
+ "epoch": 2.857142857142857,
+ "grad_norm": 30.03589630126953,
+ "learning_rate": 4.764346764346765e-05,
+ "loss": 0.8644,
+ "step": 780
+ },
+ {
+ "epoch": 2.860805860805861,
+ "grad_norm": 21.079742431640625,
+ "learning_rate": 4.761904761904762e-05,
+ "loss": 0.2304,
+ "step": 781
+ },
+ {
+ "epoch": 2.8644688644688645,
+ "grad_norm": 18.438365936279297,
+ "learning_rate": 4.75946275946276e-05,
+ "loss": 0.6457,
+ "step": 782
+ },
+ {
+ "epoch": 2.868131868131868,
+ "grad_norm": 16.265140533447266,
+ "learning_rate": 4.757020757020757e-05,
+ "loss": 0.3693,
+ "step": 783
+ },
+ {
+ "epoch": 2.871794871794872,
+ "grad_norm": 17.526954650878906,
+ "learning_rate": 4.754578754578754e-05,
+ "loss": 0.2614,
+ "step": 784
+ },
+ {
+ "epoch": 2.8754578754578755,
+ "grad_norm": 39.94060134887695,
+ "learning_rate": 4.752136752136752e-05,
+ "loss": 0.2829,
+ "step": 785
+ },
+ {
+ "epoch": 2.879120879120879,
+ "grad_norm": 10.09298324584961,
+ "learning_rate": 4.74969474969475e-05,
+ "loss": 0.1489,
+ "step": 786
+ },
+ {
+ "epoch": 2.8827838827838828,
+ "grad_norm": 29.092544555664062,
+ "learning_rate": 4.747252747252747e-05,
+ "loss": 0.6063,
+ "step": 787
+ },
+ {
+ "epoch": 2.8864468864468864,
+ "grad_norm": 30.071422576904297,
+ "learning_rate": 4.744810744810745e-05,
+ "loss": 0.3154,
+ "step": 788
+ },
+ {
+ "epoch": 2.89010989010989,
+ "grad_norm": 26.271251678466797,
+ "learning_rate": 4.742368742368743e-05,
+ "loss": 0.4548,
+ "step": 789
+ },
+ {
+ "epoch": 2.8937728937728937,
+ "grad_norm": 32.386775970458984,
+ "learning_rate": 4.73992673992674e-05,
+ "loss": 0.1872,
+ "step": 790
+ },
+ {
+ "epoch": 2.8974358974358974,
+ "grad_norm": 31.18532943725586,
+ "learning_rate": 4.737484737484738e-05,
+ "loss": 0.847,
+ "step": 791
+ },
+ {
+ "epoch": 2.901098901098901,
+ "grad_norm": 17.924785614013672,
+ "learning_rate": 4.7350427350427356e-05,
+ "loss": 0.1588,
+ "step": 792
+ },
+ {
+ "epoch": 2.9047619047619047,
+ "grad_norm": 16.458614349365234,
+ "learning_rate": 4.732600732600733e-05,
+ "loss": 0.1424,
+ "step": 793
+ },
+ {
+ "epoch": 2.9084249084249083,
+ "grad_norm": 50.29280471801758,
+ "learning_rate": 4.7301587301587306e-05,
+ "loss": 1.5482,
+ "step": 794
+ },
+ {
+ "epoch": 2.912087912087912,
+ "grad_norm": 58.37470245361328,
+ "learning_rate": 4.727716727716728e-05,
+ "loss": 1.8242,
+ "step": 795
+ },
+ {
+ "epoch": 2.9157509157509156,
+ "grad_norm": 32.5267448425293,
+ "learning_rate": 4.725274725274725e-05,
+ "loss": 1.1197,
+ "step": 796
+ },
+ {
+ "epoch": 2.9194139194139193,
+ "grad_norm": 43.77764892578125,
+ "learning_rate": 4.722832722832723e-05,
+ "loss": 0.7322,
+ "step": 797
+ },
+ {
+ "epoch": 2.9230769230769234,
+ "grad_norm": 25.303524017333984,
+ "learning_rate": 4.720390720390721e-05,
+ "loss": 0.6557,
+ "step": 798
+ },
+ {
+ "epoch": 2.926739926739927,
+ "grad_norm": 23.90159797668457,
+ "learning_rate": 4.717948717948718e-05,
+ "loss": 0.2669,
+ "step": 799
+ },
+ {
+ "epoch": 2.9304029304029307,
+ "grad_norm": 21.20945930480957,
+ "learning_rate": 4.715506715506716e-05,
+ "loss": 0.3279,
+ "step": 800
+ },
+ {
+ "epoch": 2.9340659340659343,
+ "grad_norm": 28.819482803344727,
+ "learning_rate": 4.713064713064713e-05,
+ "loss": 0.717,
+ "step": 801
+ },
+ {
+ "epoch": 2.937728937728938,
+ "grad_norm": 9.13611125946045,
+ "learning_rate": 4.710622710622711e-05,
+ "loss": 0.1291,
+ "step": 802
+ },
+ {
+ "epoch": 2.9413919413919416,
+ "grad_norm": 22.16252326965332,
+ "learning_rate": 4.7081807081807085e-05,
+ "loss": 0.4406,
+ "step": 803
+ },
+ {
+ "epoch": 2.9450549450549453,
+ "grad_norm": 47.73503112792969,
+ "learning_rate": 4.705738705738706e-05,
+ "loss": 0.6176,
+ "step": 804
+ },
+ {
+ "epoch": 2.948717948717949,
+ "grad_norm": 61.73493576049805,
+ "learning_rate": 4.7032967032967035e-05,
+ "loss": 0.581,
+ "step": 805
+ },
+ {
+ "epoch": 2.9523809523809526,
+ "grad_norm": 22.48004722595215,
+ "learning_rate": 4.7008547008547014e-05,
+ "loss": 0.7404,
+ "step": 806
+ },
+ {
+ "epoch": 2.956043956043956,
+ "grad_norm": 54.2432746887207,
+ "learning_rate": 4.6984126984126986e-05,
+ "loss": 1.1522,
+ "step": 807
+ },
+ {
+ "epoch": 2.95970695970696,
+ "grad_norm": 26.221921920776367,
+ "learning_rate": 4.695970695970696e-05,
+ "loss": 0.4869,
+ "step": 808
+ },
+ {
+ "epoch": 2.9633699633699635,
+ "grad_norm": 21.688526153564453,
+ "learning_rate": 4.6935286935286936e-05,
+ "loss": 0.6639,
+ "step": 809
+ },
+ {
+ "epoch": 2.967032967032967,
+ "grad_norm": 5.81218147277832,
+ "learning_rate": 4.691086691086691e-05,
+ "loss": 0.0824,
+ "step": 810
+ },
+ {
+ "epoch": 2.970695970695971,
+ "grad_norm": 39.09580612182617,
+ "learning_rate": 4.6886446886446886e-05,
+ "loss": 1.5035,
+ "step": 811
+ },
+ {
+ "epoch": 2.9743589743589745,
+ "grad_norm": 24.587574005126953,
+ "learning_rate": 4.6862026862026864e-05,
+ "loss": 1.1107,
+ "step": 812
+ },
+ {
+ "epoch": 2.978021978021978,
+ "grad_norm": 25.25336265563965,
+ "learning_rate": 4.6837606837606836e-05,
+ "loss": 0.7764,
+ "step": 813
+ },
+ {
+ "epoch": 2.9816849816849818,
+ "grad_norm": 16.311378479003906,
+ "learning_rate": 4.6813186813186814e-05,
+ "loss": 0.4079,
+ "step": 814
+ },
+ {
+ "epoch": 2.9853479853479854,
+ "grad_norm": 19.0888729095459,
+ "learning_rate": 4.678876678876679e-05,
+ "loss": 0.5259,
+ "step": 815
+ },
+ {
+ "epoch": 2.989010989010989,
+ "grad_norm": 24.599462509155273,
+ "learning_rate": 4.6764346764346765e-05,
+ "loss": 0.7475,
+ "step": 816
+ },
+ {
+ "epoch": 2.9926739926739927,
+ "grad_norm": 20.4777889251709,
+ "learning_rate": 4.673992673992674e-05,
+ "loss": 0.356,
+ "step": 817
+ },
+ {
+ "epoch": 2.9963369963369964,
+ "grad_norm": 30.4327449798584,
+ "learning_rate": 4.671550671550672e-05,
+ "loss": 0.7958,
+ "step": 818
+ },
+ {
+ "epoch": 3.0,
+ "grad_norm": 25.57271385192871,
+ "learning_rate": 4.669108669108669e-05,
+ "loss": 0.3918,
+ "step": 819
+ },
+ {
+ "epoch": 3.0036630036630036,
+ "grad_norm": 3.9672563076019287,
+ "learning_rate": 4.666666666666667e-05,
+ "loss": 0.0469,
+ "step": 820
+ },
+ {
+ "epoch": 3.0073260073260073,
+ "grad_norm": 6.657567501068115,
+ "learning_rate": 4.664224664224664e-05,
+ "loss": 0.0939,
+ "step": 821
+ },
+ {
+ "epoch": 3.010989010989011,
+ "grad_norm": 12.558409690856934,
+ "learning_rate": 4.6617826617826615e-05,
+ "loss": 0.1578,
+ "step": 822
+ },
+ {
+ "epoch": 3.0146520146520146,
+ "grad_norm": 18.909244537353516,
+ "learning_rate": 4.6593406593406593e-05,
+ "loss": 0.3209,
+ "step": 823
+ },
+ {
+ "epoch": 3.0183150183150182,
+ "grad_norm": 10.995687484741211,
+ "learning_rate": 4.656898656898657e-05,
+ "loss": 0.1198,
+ "step": 824
+ },
+ {
+ "epoch": 3.021978021978022,
+ "grad_norm": 16.14252471923828,
+ "learning_rate": 4.6544566544566544e-05,
+ "loss": 0.1431,
+ "step": 825
+ },
+ {
+ "epoch": 3.0256410256410255,
+ "grad_norm": 25.924381256103516,
+ "learning_rate": 4.652014652014652e-05,
+ "loss": 0.3989,
+ "step": 826
+ },
+ {
+ "epoch": 3.029304029304029,
+ "grad_norm": 4.87798547744751,
+ "learning_rate": 4.6495726495726494e-05,
+ "loss": 0.0472,
+ "step": 827
+ },
+ {
+ "epoch": 3.032967032967033,
+ "grad_norm": 15.078110694885254,
+ "learning_rate": 4.647130647130647e-05,
+ "loss": 0.1955,
+ "step": 828
+ },
+ {
+ "epoch": 3.0366300366300365,
+ "grad_norm": 19.74415397644043,
+ "learning_rate": 4.644688644688645e-05,
+ "loss": 0.1593,
+ "step": 829
+ },
+ {
+ "epoch": 3.04029304029304,
+ "grad_norm": 43.4788818359375,
+ "learning_rate": 4.642246642246642e-05,
+ "loss": 0.7917,
+ "step": 830
+ },
+ {
+ "epoch": 3.043956043956044,
+ "grad_norm": 27.122041702270508,
+ "learning_rate": 4.63980463980464e-05,
+ "loss": 0.1693,
+ "step": 831
+ },
+ {
+ "epoch": 3.0476190476190474,
+ "grad_norm": 9.51154899597168,
+ "learning_rate": 4.637362637362638e-05,
+ "loss": 0.0806,
+ "step": 832
+ },
+ {
+ "epoch": 3.051282051282051,
+ "grad_norm": 11.48532772064209,
+ "learning_rate": 4.634920634920635e-05,
+ "loss": 0.0815,
+ "step": 833
+ },
+ {
+ "epoch": 3.0549450549450547,
+ "grad_norm": 13.547063827514648,
+ "learning_rate": 4.632478632478632e-05,
+ "loss": 0.0817,
+ "step": 834
+ },
+ {
+ "epoch": 3.0586080586080584,
+ "grad_norm": 24.334409713745117,
+ "learning_rate": 4.63003663003663e-05,
+ "loss": 0.547,
+ "step": 835
+ },
+ {
+ "epoch": 3.062271062271062,
+ "grad_norm": 87.3517837524414,
+ "learning_rate": 4.627594627594627e-05,
+ "loss": 0.6534,
+ "step": 836
+ },
+ {
+ "epoch": 3.065934065934066,
+ "grad_norm": 16.100278854370117,
+ "learning_rate": 4.625152625152625e-05,
+ "loss": 0.2961,
+ "step": 837
+ },
+ {
+ "epoch": 3.06959706959707,
+ "grad_norm": 20.725875854492188,
+ "learning_rate": 4.622710622710623e-05,
+ "loss": 0.1114,
+ "step": 838
+ },
+ {
+ "epoch": 3.0732600732600734,
+ "grad_norm": 53.809722900390625,
+ "learning_rate": 4.62026862026862e-05,
+ "loss": 0.3808,
+ "step": 839
+ },
+ {
+ "epoch": 3.076923076923077,
+ "grad_norm": 3.237959623336792,
+ "learning_rate": 4.617826617826618e-05,
+ "loss": 0.019,
+ "step": 840
+ },
+ {
+ "epoch": 3.0805860805860807,
+ "grad_norm": 69.71659088134766,
+ "learning_rate": 4.615384615384616e-05,
+ "loss": 1.0945,
+ "step": 841
+ },
+ {
+ "epoch": 3.0842490842490844,
+ "grad_norm": 31.005935668945312,
+ "learning_rate": 4.612942612942613e-05,
+ "loss": 0.3241,
+ "step": 842
+ },
+ {
+ "epoch": 3.087912087912088,
+ "grad_norm": 66.98394775390625,
+ "learning_rate": 4.610500610500611e-05,
+ "loss": 1.0213,
+ "step": 843
+ },
+ {
+ "epoch": 3.0915750915750917,
+ "grad_norm": 23.54532814025879,
+ "learning_rate": 4.608058608058609e-05,
+ "loss": 0.2188,
+ "step": 844
+ },
+ {
+ "epoch": 3.0952380952380953,
+ "grad_norm": 25.952709197998047,
+ "learning_rate": 4.605616605616606e-05,
+ "loss": 0.4305,
+ "step": 845
+ },
+ {
+ "epoch": 3.098901098901099,
+ "grad_norm": 36.100746154785156,
+ "learning_rate": 4.603174603174604e-05,
+ "loss": 0.6497,
+ "step": 846
+ },
+ {
+ "epoch": 3.1025641025641026,
+ "grad_norm": 60.34727478027344,
+ "learning_rate": 4.600732600732601e-05,
+ "loss": 0.3083,
+ "step": 847
+ },
+ {
+ "epoch": 3.1062271062271063,
+ "grad_norm": 35.265167236328125,
+ "learning_rate": 4.598290598290598e-05,
+ "loss": 0.3222,
+ "step": 848
+ },
+ {
+ "epoch": 3.10989010989011,
+ "grad_norm": 19.180070877075195,
+ "learning_rate": 4.595848595848596e-05,
+ "loss": 0.4065,
+ "step": 849
+ },
+ {
+ "epoch": 3.1135531135531136,
+ "grad_norm": 22.92152976989746,
+ "learning_rate": 4.593406593406593e-05,
+ "loss": 0.3998,
+ "step": 850
+ },
+ {
+ "epoch": 3.1172161172161172,
+ "grad_norm": 48.91377639770508,
+ "learning_rate": 4.590964590964591e-05,
+ "loss": 0.7035,
+ "step": 851
+ },
+ {
+ "epoch": 3.120879120879121,
+ "grad_norm": 11.615083694458008,
+ "learning_rate": 4.588522588522589e-05,
+ "loss": 0.3102,
+ "step": 852
+ },
+ {
+ "epoch": 3.1245421245421245,
+ "grad_norm": 23.573801040649414,
+ "learning_rate": 4.586080586080586e-05,
+ "loss": 0.3358,
+ "step": 853
+ },
+ {
+ "epoch": 3.128205128205128,
+ "grad_norm": 16.903776168823242,
+ "learning_rate": 4.583638583638584e-05,
+ "loss": 0.2973,
+ "step": 854
+ },
+ {
+ "epoch": 3.131868131868132,
+ "grad_norm": 6.052688121795654,
+ "learning_rate": 4.5811965811965816e-05,
+ "loss": 0.0671,
+ "step": 855
+ },
+ {
+ "epoch": 3.1355311355311355,
+ "grad_norm": 34.40020751953125,
+ "learning_rate": 4.578754578754579e-05,
+ "loss": 0.508,
+ "step": 856
+ },
+ {
+ "epoch": 3.139194139194139,
+ "grad_norm": 21.39589500427246,
+ "learning_rate": 4.5763125763125766e-05,
+ "loss": 0.0805,
+ "step": 857
+ },
+ {
+ "epoch": 3.142857142857143,
+ "grad_norm": 24.03894805908203,
+ "learning_rate": 4.5738705738705744e-05,
+ "loss": 0.1884,
+ "step": 858
+ },
+ {
+ "epoch": 3.1465201465201464,
+ "grad_norm": 66.53777313232422,
+ "learning_rate": 4.5714285714285716e-05,
+ "loss": 0.5235,
+ "step": 859
+ },
+ {
+ "epoch": 3.15018315018315,
+ "grad_norm": 33.663490295410156,
+ "learning_rate": 4.568986568986569e-05,
+ "loss": 0.7579,
+ "step": 860
+ },
+ {
+ "epoch": 3.1538461538461537,
+ "grad_norm": 30.173309326171875,
+ "learning_rate": 4.5665445665445666e-05,
+ "loss": 0.2263,
+ "step": 861
+ },
+ {
+ "epoch": 3.1575091575091574,
+ "grad_norm": 37.52082824707031,
+ "learning_rate": 4.564102564102564e-05,
+ "loss": 0.5695,
+ "step": 862
+ },
+ {
+ "epoch": 3.161172161172161,
+ "grad_norm": 38.86849594116211,
+ "learning_rate": 4.5616605616605616e-05,
+ "loss": 0.6981,
+ "step": 863
+ },
+ {
+ "epoch": 3.1648351648351647,
+ "grad_norm": 42.702247619628906,
+ "learning_rate": 4.5592185592185595e-05,
+ "loss": 0.9864,
+ "step": 864
+ },
+ {
+ "epoch": 3.1684981684981683,
+ "grad_norm": 16.60870361328125,
+ "learning_rate": 4.5567765567765566e-05,
+ "loss": 0.1595,
+ "step": 865
+ },
+ {
+ "epoch": 3.172161172161172,
+ "grad_norm": 26.309768676757812,
+ "learning_rate": 4.5543345543345545e-05,
+ "loss": 0.4028,
+ "step": 866
+ },
+ {
+ "epoch": 3.1758241758241756,
+ "grad_norm": 45.7955322265625,
+ "learning_rate": 4.551892551892552e-05,
+ "loss": 1.1258,
+ "step": 867
+ },
+ {
+ "epoch": 3.1794871794871793,
+ "grad_norm": 25.780302047729492,
+ "learning_rate": 4.5494505494505495e-05,
+ "loss": 0.4018,
+ "step": 868
+ },
+ {
+ "epoch": 3.183150183150183,
+ "grad_norm": 41.65156555175781,
+ "learning_rate": 4.5470085470085474e-05,
+ "loss": 0.4543,
+ "step": 869
+ },
+ {
+ "epoch": 3.186813186813187,
+ "grad_norm": 56.92537307739258,
+ "learning_rate": 4.544566544566545e-05,
+ "loss": 0.334,
+ "step": 870
+ },
+ {
+ "epoch": 3.1904761904761907,
+ "grad_norm": 19.44786262512207,
+ "learning_rate": 4.5421245421245424e-05,
+ "loss": 0.2855,
+ "step": 871
+ },
+ {
+ "epoch": 3.1941391941391943,
+ "grad_norm": 19.75824546813965,
+ "learning_rate": 4.53968253968254e-05,
+ "loss": 0.2589,
+ "step": 872
+ },
+ {
+ "epoch": 3.197802197802198,
+ "grad_norm": 30.935569763183594,
+ "learning_rate": 4.5372405372405374e-05,
+ "loss": 0.5083,
+ "step": 873
+ },
+ {
+ "epoch": 3.2014652014652016,
+ "grad_norm": 32.59378433227539,
+ "learning_rate": 4.5347985347985345e-05,
+ "loss": 0.6806,
+ "step": 874
+ },
+ {
+ "epoch": 3.2051282051282053,
+ "grad_norm": 32.7809944152832,
+ "learning_rate": 4.5323565323565324e-05,
+ "loss": 0.7094,
+ "step": 875
+ },
+ {
+ "epoch": 3.208791208791209,
+ "grad_norm": 22.95226287841797,
+ "learning_rate": 4.5299145299145296e-05,
+ "loss": 0.3871,
+ "step": 876
+ },
+ {
+ "epoch": 3.2124542124542126,
+ "grad_norm": 13.90613079071045,
+ "learning_rate": 4.5274725274725274e-05,
+ "loss": 0.2049,
+ "step": 877
+ },
+ {
+ "epoch": 3.2161172161172162,
+ "grad_norm": 36.79647445678711,
+ "learning_rate": 4.525030525030525e-05,
+ "loss": 0.959,
+ "step": 878
+ },
+ {
+ "epoch": 3.21978021978022,
+ "grad_norm": 16.770553588867188,
+ "learning_rate": 4.5225885225885224e-05,
+ "loss": 0.3061,
+ "step": 879
+ },
+ {
+ "epoch": 3.2234432234432235,
+ "grad_norm": 22.241527557373047,
+ "learning_rate": 4.52014652014652e-05,
+ "loss": 0.1961,
+ "step": 880
+ },
+ {
+ "epoch": 3.227106227106227,
+ "grad_norm": 51.097957611083984,
+ "learning_rate": 4.517704517704518e-05,
+ "loss": 0.5272,
+ "step": 881
+ },
+ {
+ "epoch": 3.230769230769231,
+ "grad_norm": 43.70039749145508,
+ "learning_rate": 4.515262515262515e-05,
+ "loss": 0.6764,
+ "step": 882
+ },
+ {
+ "epoch": 3.2344322344322345,
+ "grad_norm": 30.666664123535156,
+ "learning_rate": 4.512820512820513e-05,
+ "loss": 0.6524,
+ "step": 883
+ },
+ {
+ "epoch": 3.238095238095238,
+ "grad_norm": 16.787954330444336,
+ "learning_rate": 4.510378510378511e-05,
+ "loss": 0.178,
+ "step": 884
+ },
+ {
+ "epoch": 3.241758241758242,
+ "grad_norm": 32.14992904663086,
+ "learning_rate": 4.507936507936508e-05,
+ "loss": 0.6206,
+ "step": 885
+ },
+ {
+ "epoch": 3.2454212454212454,
+ "grad_norm": 24.926103591918945,
+ "learning_rate": 4.505494505494505e-05,
+ "loss": 0.4696,
+ "step": 886
+ },
+ {
+ "epoch": 3.249084249084249,
+ "grad_norm": 31.044967651367188,
+ "learning_rate": 4.503052503052503e-05,
+ "loss": 0.3021,
+ "step": 887
+ },
+ {
+ "epoch": 3.2527472527472527,
+ "grad_norm": 10.355696678161621,
+ "learning_rate": 4.5006105006105e-05,
+ "loss": 0.0784,
+ "step": 888
+ },
+ {
+ "epoch": 3.2564102564102564,
+ "grad_norm": 28.19644546508789,
+ "learning_rate": 4.498168498168498e-05,
+ "loss": 0.234,
+ "step": 889
+ },
+ {
+ "epoch": 3.26007326007326,
+ "grad_norm": 21.245389938354492,
+ "learning_rate": 4.495726495726496e-05,
+ "loss": 0.2895,
+ "step": 890
+ },
+ {
+ "epoch": 3.2637362637362637,
+ "grad_norm": 27.337587356567383,
+ "learning_rate": 4.493284493284493e-05,
+ "loss": 0.4614,
+ "step": 891
+ },
+ {
+ "epoch": 3.2673992673992673,
+ "grad_norm": 37.06135177612305,
+ "learning_rate": 4.490842490842491e-05,
+ "loss": 0.2717,
+ "step": 892
+ },
+ {
+ "epoch": 3.271062271062271,
+ "grad_norm": 26.85171890258789,
+ "learning_rate": 4.488400488400489e-05,
+ "loss": 0.4965,
+ "step": 893
+ },
+ {
+ "epoch": 3.2747252747252746,
+ "grad_norm": 41.79130935668945,
+ "learning_rate": 4.485958485958486e-05,
+ "loss": 0.4209,
+ "step": 894
+ },
+ {
+ "epoch": 3.2783882783882783,
+ "grad_norm": 32.75770950317383,
+ "learning_rate": 4.483516483516484e-05,
+ "loss": 0.5126,
+ "step": 895
+ },
+ {
+ "epoch": 3.282051282051282,
+ "grad_norm": 67.75275421142578,
+ "learning_rate": 4.481074481074482e-05,
+ "loss": 0.8257,
+ "step": 896
+ },
+ {
+ "epoch": 3.2857142857142856,
+ "grad_norm": 36.773319244384766,
+ "learning_rate": 4.478632478632479e-05,
+ "loss": 1.6113,
+ "step": 897
+ },
+ {
+ "epoch": 3.2893772893772892,
+ "grad_norm": 60.94101333618164,
+ "learning_rate": 4.476190476190476e-05,
+ "loss": 0.7996,
+ "step": 898
+ },
+ {
+ "epoch": 3.293040293040293,
+ "grad_norm": 45.40288162231445,
+ "learning_rate": 4.473748473748474e-05,
+ "loss": 0.7139,
+ "step": 899
+ },
+ {
+ "epoch": 3.2967032967032965,
+ "grad_norm": 27.4019718170166,
+ "learning_rate": 4.471306471306471e-05,
+ "loss": 0.4695,
+ "step": 900
+ },
+ {
+ "epoch": 3.3003663003663,
+ "grad_norm": 20.126493453979492,
+ "learning_rate": 4.468864468864469e-05,
+ "loss": 0.2181,
+ "step": 901
+ },
+ {
+ "epoch": 3.304029304029304,
+ "grad_norm": 37.28034591674805,
+ "learning_rate": 4.466422466422466e-05,
+ "loss": 0.8902,
+ "step": 902
+ },
+ {
+ "epoch": 3.3076923076923075,
+ "grad_norm": 15.40217113494873,
+ "learning_rate": 4.463980463980464e-05,
+ "loss": 0.2428,
+ "step": 903
+ },
+ {
+ "epoch": 3.311355311355311,
+ "grad_norm": 21.924699783325195,
+ "learning_rate": 4.461538461538462e-05,
+ "loss": 0.3271,
+ "step": 904
+ },
+ {
+ "epoch": 3.315018315018315,
+ "grad_norm": 29.787410736083984,
+ "learning_rate": 4.459096459096459e-05,
+ "loss": 0.5914,
+ "step": 905
+ },
+ {
+ "epoch": 3.3186813186813184,
+ "grad_norm": 16.91995620727539,
+ "learning_rate": 4.456654456654457e-05,
+ "loss": 0.3442,
+ "step": 906
+ },
+ {
+ "epoch": 3.3223443223443225,
+ "grad_norm": 13.232250213623047,
+ "learning_rate": 4.4542124542124546e-05,
+ "loss": 0.1977,
+ "step": 907
+ },
+ {
+ "epoch": 3.326007326007326,
+ "grad_norm": 25.45724868774414,
+ "learning_rate": 4.451770451770452e-05,
+ "loss": 0.8241,
+ "step": 908
+ },
+ {
+ "epoch": 3.32967032967033,
+ "grad_norm": 20.996292114257812,
+ "learning_rate": 4.4493284493284496e-05,
+ "loss": 0.3154,
+ "step": 909
+ },
+ {
+ "epoch": 3.3333333333333335,
+ "grad_norm": 28.150684356689453,
+ "learning_rate": 4.4468864468864475e-05,
+ "loss": 0.4077,
+ "step": 910
+ },
+ {
+ "epoch": 3.336996336996337,
+ "grad_norm": 57.184322357177734,
+ "learning_rate": 4.444444444444444e-05,
+ "loss": 0.5701,
+ "step": 911
+ },
+ {
+ "epoch": 3.340659340659341,
+ "grad_norm": 26.231369018554688,
+ "learning_rate": 4.442002442002442e-05,
+ "loss": 0.4427,
+ "step": 912
+ },
+ {
+ "epoch": 3.3443223443223444,
+ "grad_norm": 32.52253723144531,
+ "learning_rate": 4.43956043956044e-05,
+ "loss": 1.014,
+ "step": 913
+ },
+ {
+ "epoch": 3.347985347985348,
+ "grad_norm": 19.39035987854004,
+ "learning_rate": 4.437118437118437e-05,
+ "loss": 0.1567,
+ "step": 914
+ },
+ {
+ "epoch": 3.3516483516483517,
+ "grad_norm": 24.542327880859375,
+ "learning_rate": 4.434676434676435e-05,
+ "loss": 0.5478,
+ "step": 915
+ },
+ {
+ "epoch": 3.3553113553113554,
+ "grad_norm": 46.6158447265625,
+ "learning_rate": 4.4322344322344325e-05,
+ "loss": 0.5636,
+ "step": 916
+ },
+ {
+ "epoch": 3.358974358974359,
+ "grad_norm": 36.008846282958984,
+ "learning_rate": 4.42979242979243e-05,
+ "loss": 0.4401,
+ "step": 917
+ },
+ {
+ "epoch": 3.3626373626373627,
+ "grad_norm": 6.922544956207275,
+ "learning_rate": 4.4273504273504275e-05,
+ "loss": 0.0885,
+ "step": 918
+ },
+ {
+ "epoch": 3.3663003663003663,
+ "grad_norm": 25.707748413085938,
+ "learning_rate": 4.4249084249084254e-05,
+ "loss": 0.3235,
+ "step": 919
+ },
+ {
+ "epoch": 3.36996336996337,
+ "grad_norm": 47.98778533935547,
+ "learning_rate": 4.4224664224664226e-05,
+ "loss": 1.3738,
+ "step": 920
+ },
+ {
+ "epoch": 3.3736263736263736,
+ "grad_norm": 26.64824104309082,
+ "learning_rate": 4.4200244200244204e-05,
+ "loss": 0.8405,
+ "step": 921
+ },
+ {
+ "epoch": 3.3772893772893773,
+ "grad_norm": 30.66206169128418,
+ "learning_rate": 4.417582417582418e-05,
+ "loss": 1.3021,
+ "step": 922
+ },
+ {
+ "epoch": 3.380952380952381,
+ "grad_norm": 33.15909194946289,
+ "learning_rate": 4.4151404151404154e-05,
+ "loss": 0.3064,
+ "step": 923
+ },
+ {
+ "epoch": 3.3846153846153846,
+ "grad_norm": 78.46485137939453,
+ "learning_rate": 4.4126984126984126e-05,
+ "loss": 0.6526,
+ "step": 924
+ },
+ {
+ "epoch": 3.3882783882783882,
+ "grad_norm": 45.584747314453125,
+ "learning_rate": 4.4102564102564104e-05,
+ "loss": 0.9546,
+ "step": 925
+ },
+ {
+ "epoch": 3.391941391941392,
+ "grad_norm": 23.244487762451172,
+ "learning_rate": 4.4078144078144076e-05,
+ "loss": 0.3334,
+ "step": 926
+ },
+ {
+ "epoch": 3.3956043956043955,
+ "grad_norm": 9.296119689941406,
+ "learning_rate": 4.4053724053724054e-05,
+ "loss": 0.1045,
+ "step": 927
+ },
+ {
+ "epoch": 3.399267399267399,
+ "grad_norm": 15.207316398620605,
+ "learning_rate": 4.4029304029304026e-05,
+ "loss": 0.087,
+ "step": 928
+ },
+ {
+ "epoch": 3.402930402930403,
+ "grad_norm": 20.554912567138672,
+ "learning_rate": 4.4004884004884005e-05,
+ "loss": 0.2658,
+ "step": 929
+ },
+ {
+ "epoch": 3.4065934065934065,
+ "grad_norm": 25.304515838623047,
+ "learning_rate": 4.398046398046398e-05,
+ "loss": 0.2862,
+ "step": 930
+ },
+ {
+ "epoch": 3.41025641025641,
+ "grad_norm": 44.320377349853516,
+ "learning_rate": 4.3956043956043955e-05,
+ "loss": 1.1972,
+ "step": 931
+ },
+ {
+ "epoch": 3.413919413919414,
+ "grad_norm": 21.3024845123291,
+ "learning_rate": 4.393162393162393e-05,
+ "loss": 0.2193,
+ "step": 932
+ },
+ {
+ "epoch": 3.4175824175824174,
+ "grad_norm": 12.274759292602539,
+ "learning_rate": 4.390720390720391e-05,
+ "loss": 0.1033,
+ "step": 933
+ },
+ {
+ "epoch": 3.421245421245421,
+ "grad_norm": 29.188446044921875,
+ "learning_rate": 4.388278388278388e-05,
+ "loss": 0.8143,
+ "step": 934
+ },
+ {
+ "epoch": 3.4249084249084247,
+ "grad_norm": 11.880194664001465,
+ "learning_rate": 4.385836385836386e-05,
+ "loss": 0.0932,
+ "step": 935
+ },
+ {
+ "epoch": 3.4285714285714284,
+ "grad_norm": 28.859825134277344,
+ "learning_rate": 4.383394383394384e-05,
+ "loss": 0.6026,
+ "step": 936
+ },
+ {
+ "epoch": 3.4322344322344325,
+ "grad_norm": 25.131824493408203,
+ "learning_rate": 4.3809523809523805e-05,
+ "loss": 0.4023,
+ "step": 937
+ },
+ {
+ "epoch": 3.435897435897436,
+ "grad_norm": 35.04637145996094,
+ "learning_rate": 4.3785103785103783e-05,
+ "loss": 0.7765,
+ "step": 938
+ },
+ {
+ "epoch": 3.4395604395604398,
+ "grad_norm": 15.831666946411133,
+ "learning_rate": 4.376068376068376e-05,
+ "loss": 0.1779,
+ "step": 939
+ },
+ {
+ "epoch": 3.4432234432234434,
+ "grad_norm": 26.455148696899414,
+ "learning_rate": 4.3736263736263734e-05,
+ "loss": 0.3165,
+ "step": 940
+ },
+ {
+ "epoch": 3.446886446886447,
+ "grad_norm": 23.840030670166016,
+ "learning_rate": 4.371184371184371e-05,
+ "loss": 0.5363,
+ "step": 941
+ },
+ {
+ "epoch": 3.4505494505494507,
+ "grad_norm": 30.517026901245117,
+ "learning_rate": 4.368742368742369e-05,
+ "loss": 0.422,
+ "step": 942
+ },
+ {
+ "epoch": 3.4542124542124544,
+ "grad_norm": 51.574703216552734,
+ "learning_rate": 4.366300366300366e-05,
+ "loss": 1.5333,
+ "step": 943
+ },
+ {
+ "epoch": 3.457875457875458,
+ "grad_norm": 57.92119216918945,
+ "learning_rate": 4.363858363858364e-05,
+ "loss": 0.5732,
+ "step": 944
+ },
+ {
+ "epoch": 3.4615384615384617,
+ "grad_norm": 34.3664436340332,
+ "learning_rate": 4.361416361416362e-05,
+ "loss": 0.5054,
+ "step": 945
+ },
+ {
+ "epoch": 3.4652014652014653,
+ "grad_norm": 14.034111976623535,
+ "learning_rate": 4.358974358974359e-05,
+ "loss": 0.0969,
+ "step": 946
+ },
+ {
+ "epoch": 3.468864468864469,
+ "grad_norm": 15.058267593383789,
+ "learning_rate": 4.356532356532357e-05,
+ "loss": 0.1877,
+ "step": 947
+ },
+ {
+ "epoch": 3.4725274725274726,
+ "grad_norm": 18.598024368286133,
+ "learning_rate": 4.354090354090355e-05,
+ "loss": 0.2378,
+ "step": 948
+ },
+ {
+ "epoch": 3.4761904761904763,
+ "grad_norm": 17.926319122314453,
+ "learning_rate": 4.351648351648352e-05,
+ "loss": 0.2935,
+ "step": 949
+ },
+ {
+ "epoch": 3.47985347985348,
+ "grad_norm": 8.25291633605957,
+ "learning_rate": 4.349206349206349e-05,
+ "loss": 0.0891,
+ "step": 950
+ },
+ {
+ "epoch": 3.4835164835164836,
+ "grad_norm": 26.152061462402344,
+ "learning_rate": 4.346764346764347e-05,
+ "loss": 0.2798,
+ "step": 951
+ },
+ {
+ "epoch": 3.4871794871794872,
+ "grad_norm": 22.669677734375,
+ "learning_rate": 4.344322344322344e-05,
+ "loss": 0.506,
+ "step": 952
+ },
+ {
+ "epoch": 3.490842490842491,
+ "grad_norm": 18.439355850219727,
+ "learning_rate": 4.341880341880342e-05,
+ "loss": 0.3034,
+ "step": 953
+ },
+ {
+ "epoch": 3.4945054945054945,
+ "grad_norm": 30.48084259033203,
+ "learning_rate": 4.339438339438339e-05,
+ "loss": 0.4366,
+ "step": 954
+ },
+ {
+ "epoch": 3.498168498168498,
+ "grad_norm": 51.792381286621094,
+ "learning_rate": 4.336996336996337e-05,
+ "loss": 0.5214,
+ "step": 955
+ },
+ {
+ "epoch": 3.501831501831502,
+ "grad_norm": 44.70718002319336,
+ "learning_rate": 4.334554334554335e-05,
+ "loss": 0.7823,
+ "step": 956
+ },
+ {
+ "epoch": 3.5054945054945055,
+ "grad_norm": 42.00168991088867,
+ "learning_rate": 4.332112332112332e-05,
+ "loss": 0.9207,
+ "step": 957
+ },
+ {
+ "epoch": 3.509157509157509,
+ "grad_norm": 28.97800636291504,
+ "learning_rate": 4.32967032967033e-05,
+ "loss": 0.279,
+ "step": 958
+ },
+ {
+ "epoch": 3.5128205128205128,
+ "grad_norm": 21.902843475341797,
+ "learning_rate": 4.327228327228328e-05,
+ "loss": 0.1969,
+ "step": 959
+ },
+ {
+ "epoch": 3.5164835164835164,
+ "grad_norm": 14.560053825378418,
+ "learning_rate": 4.324786324786325e-05,
+ "loss": 0.0976,
+ "step": 960
+ },
+ {
+ "epoch": 3.52014652014652,
+ "grad_norm": 4.2637104988098145,
+ "learning_rate": 4.322344322344323e-05,
+ "loss": 0.0277,
+ "step": 961
+ },
+ {
+ "epoch": 3.5238095238095237,
+ "grad_norm": 52.4840202331543,
+ "learning_rate": 4.3199023199023205e-05,
+ "loss": 0.2967,
+ "step": 962
+ },
+ {
+ "epoch": 3.5274725274725274,
+ "grad_norm": 48.95661163330078,
+ "learning_rate": 4.317460317460317e-05,
+ "loss": 0.2904,
+ "step": 963
+ },
+ {
+ "epoch": 3.531135531135531,
+ "grad_norm": 79.46379089355469,
+ "learning_rate": 4.315018315018315e-05,
+ "loss": 0.1644,
+ "step": 964
+ },
+ {
+ "epoch": 3.5347985347985347,
+ "grad_norm": 29.678428649902344,
+ "learning_rate": 4.312576312576313e-05,
+ "loss": 0.3498,
+ "step": 965
+ },
+ {
+ "epoch": 3.5384615384615383,
+ "grad_norm": 32.71342086791992,
+ "learning_rate": 4.31013431013431e-05,
+ "loss": 0.3509,
+ "step": 966
+ },
+ {
+ "epoch": 3.542124542124542,
+ "grad_norm": 6.679911136627197,
+ "learning_rate": 4.307692307692308e-05,
+ "loss": 0.0658,
+ "step": 967
+ },
+ {
+ "epoch": 3.5457875457875456,
+ "grad_norm": 19.8692626953125,
+ "learning_rate": 4.3052503052503056e-05,
+ "loss": 0.1626,
+ "step": 968
+ },
+ {
+ "epoch": 3.5494505494505493,
+ "grad_norm": 17.69087791442871,
+ "learning_rate": 4.302808302808303e-05,
+ "loss": 0.2592,
+ "step": 969
+ },
+ {
+ "epoch": 3.553113553113553,
+ "grad_norm": 11.734158515930176,
+ "learning_rate": 4.3003663003663006e-05,
+ "loss": 0.1007,
+ "step": 970
+ },
+ {
+ "epoch": 3.5567765567765566,
+ "grad_norm": 34.51172637939453,
+ "learning_rate": 4.2979242979242984e-05,
+ "loss": 0.2823,
+ "step": 971
+ },
+ {
+ "epoch": 3.5604395604395602,
+ "grad_norm": 15.009514808654785,
+ "learning_rate": 4.2954822954822956e-05,
+ "loss": 0.1203,
+ "step": 972
+ },
+ {
+ "epoch": 3.564102564102564,
+ "grad_norm": 67.92166137695312,
+ "learning_rate": 4.2930402930402934e-05,
+ "loss": 0.396,
+ "step": 973
+ },
+ {
+ "epoch": 3.5677655677655675,
+ "grad_norm": 66.84014129638672,
+ "learning_rate": 4.290598290598291e-05,
+ "loss": 0.6545,
+ "step": 974
+ },
+ {
+ "epoch": 3.571428571428571,
+ "grad_norm": 25.811107635498047,
+ "learning_rate": 4.2881562881562885e-05,
+ "loss": 0.1747,
+ "step": 975
+ },
+ {
+ "epoch": 3.575091575091575,
+ "grad_norm": 100.88753509521484,
+ "learning_rate": 4.2857142857142856e-05,
+ "loss": 0.3991,
+ "step": 976
+ },
+ {
+ "epoch": 3.578754578754579,
+ "grad_norm": 34.51667785644531,
+ "learning_rate": 4.2832722832722835e-05,
+ "loss": 0.1365,
+ "step": 977
+ },
+ {
+ "epoch": 3.5824175824175826,
+ "grad_norm": 26.852561950683594,
+ "learning_rate": 4.2808302808302806e-05,
+ "loss": 0.3627,
+ "step": 978
+ },
+ {
+ "epoch": 3.586080586080586,
+ "grad_norm": 24.968570709228516,
+ "learning_rate": 4.2783882783882785e-05,
+ "loss": 0.2106,
+ "step": 979
+ },
+ {
+ "epoch": 3.58974358974359,
+ "grad_norm": 27.33326530456543,
+ "learning_rate": 4.2759462759462757e-05,
+ "loss": 0.1758,
+ "step": 980
+ },
+ {
+ "epoch": 3.5934065934065935,
+ "grad_norm": 52.63814926147461,
+ "learning_rate": 4.2735042735042735e-05,
+ "loss": 0.601,
+ "step": 981
+ },
+ {
+ "epoch": 3.597069597069597,
+ "grad_norm": 37.77897262573242,
+ "learning_rate": 4.2710622710622713e-05,
+ "loss": 0.5299,
+ "step": 982
+ },
+ {
+ "epoch": 3.600732600732601,
+ "grad_norm": 27.691659927368164,
+ "learning_rate": 4.2686202686202685e-05,
+ "loss": 0.1784,
+ "step": 983
+ },
+ {
+ "epoch": 3.6043956043956045,
+ "grad_norm": 106.33782958984375,
+ "learning_rate": 4.2661782661782664e-05,
+ "loss": 0.8859,
+ "step": 984
+ },
+ {
+ "epoch": 3.608058608058608,
+ "grad_norm": 22.95706558227539,
+ "learning_rate": 4.263736263736264e-05,
+ "loss": 0.1611,
+ "step": 985
+ },
+ {
+ "epoch": 3.6117216117216118,
+ "grad_norm": 22.72148895263672,
+ "learning_rate": 4.2612942612942614e-05,
+ "loss": 0.1561,
+ "step": 986
+ },
+ {
+ "epoch": 3.6153846153846154,
+ "grad_norm": 93.37244415283203,
+ "learning_rate": 4.258852258852259e-05,
+ "loss": 0.4287,
+ "step": 987
+ },
+ {
+ "epoch": 3.619047619047619,
+ "grad_norm": 51.54584884643555,
+ "learning_rate": 4.2564102564102564e-05,
+ "loss": 0.6292,
+ "step": 988
+ },
+ {
+ "epoch": 3.6227106227106227,
+ "grad_norm": 61.58243942260742,
+ "learning_rate": 4.2539682539682536e-05,
+ "loss": 1.3205,
+ "step": 989
+ },
+ {
+ "epoch": 3.6263736263736264,
+ "grad_norm": 70.59432220458984,
+ "learning_rate": 4.2515262515262514e-05,
+ "loss": 0.7451,
+ "step": 990
+ },
+ {
+ "epoch": 3.63003663003663,
+ "grad_norm": 76.28730773925781,
+ "learning_rate": 4.249084249084249e-05,
+ "loss": 2.0314,
+ "step": 991
+ },
+ {
+ "epoch": 3.6336996336996337,
+ "grad_norm": 73.5402603149414,
+ "learning_rate": 4.2466422466422464e-05,
+ "loss": 1.6628,
+ "step": 992
+ },
+ {
+ "epoch": 3.6373626373626373,
+ "grad_norm": 75.8978042602539,
+ "learning_rate": 4.244200244200244e-05,
+ "loss": 1.652,
+ "step": 993
+ },
+ {
+ "epoch": 3.641025641025641,
+ "grad_norm": 37.04104232788086,
+ "learning_rate": 4.241758241758242e-05,
+ "loss": 1.5356,
+ "step": 994
+ },
+ {
+ "epoch": 3.6446886446886446,
+ "grad_norm": 34.31178283691406,
+ "learning_rate": 4.239316239316239e-05,
+ "loss": 1.1783,
+ "step": 995
+ },
+ {
+ "epoch": 3.6483516483516483,
+ "grad_norm": 22.934877395629883,
+ "learning_rate": 4.236874236874237e-05,
+ "loss": 1.2995,
+ "step": 996
+ },
+ {
+ "epoch": 3.652014652014652,
+ "grad_norm": 30.25251579284668,
+ "learning_rate": 4.234432234432235e-05,
+ "loss": 1.1304,
+ "step": 997
+ },
+ {
+ "epoch": 3.6556776556776556,
+ "grad_norm": 35.082027435302734,
+ "learning_rate": 4.231990231990232e-05,
+ "loss": 1.0827,
+ "step": 998
+ },
+ {
+ "epoch": 3.659340659340659,
+ "grad_norm": 24.526325225830078,
+ "learning_rate": 4.22954822954823e-05,
+ "loss": 0.8716,
+ "step": 999
+ },
+ {
+ "epoch": 3.663003663003663,
+ "grad_norm": 29.882883071899414,
+ "learning_rate": 4.227106227106228e-05,
+ "loss": 0.5432,
+ "step": 1000
+ },
+ {
+ "epoch": 3.6666666666666665,
+ "grad_norm": 34.53218078613281,
+ "learning_rate": 4.224664224664224e-05,
+ "loss": 1.2094,
+ "step": 1001
+ },
+ {
+ "epoch": 3.67032967032967,
+ "grad_norm": 22.50905990600586,
+ "learning_rate": 4.222222222222222e-05,
+ "loss": 0.4608,
+ "step": 1002
+ },
+ {
+ "epoch": 3.6739926739926743,
+ "grad_norm": 27.33183479309082,
+ "learning_rate": 4.21978021978022e-05,
+ "loss": 0.7181,
+ "step": 1003
+ },
+ {
+ "epoch": 3.677655677655678,
+ "grad_norm": 50.09929275512695,
+ "learning_rate": 4.217338217338217e-05,
+ "loss": 1.1163,
+ "step": 1004
+ },
+ {
+ "epoch": 3.6813186813186816,
+ "grad_norm": 32.48406982421875,
+ "learning_rate": 4.214896214896215e-05,
+ "loss": 0.7101,
+ "step": 1005
+ },
+ {
+ "epoch": 3.684981684981685,
+ "grad_norm": 5.821015357971191,
+ "learning_rate": 4.212454212454212e-05,
+ "loss": 0.0695,
+ "step": 1006
+ },
+ {
+ "epoch": 3.688644688644689,
+ "grad_norm": 32.04796600341797,
+ "learning_rate": 4.21001221001221e-05,
+ "loss": 0.609,
+ "step": 1007
+ },
+ {
+ "epoch": 3.6923076923076925,
+ "grad_norm": 37.282474517822266,
+ "learning_rate": 4.207570207570208e-05,
+ "loss": 0.873,
+ "step": 1008
+ },
+ {
+ "epoch": 3.695970695970696,
+ "grad_norm": 35.74583435058594,
+ "learning_rate": 4.205128205128205e-05,
+ "loss": 0.7387,
+ "step": 1009
+ },
+ {
+ "epoch": 3.6996336996337,
+ "grad_norm": 74.91361236572266,
+ "learning_rate": 4.202686202686203e-05,
+ "loss": 1.6302,
+ "step": 1010
+ },
+ {
+ "epoch": 3.7032967032967035,
+ "grad_norm": 25.163251876831055,
+ "learning_rate": 4.200244200244201e-05,
+ "loss": 0.3866,
+ "step": 1011
+ },
+ {
+ "epoch": 3.706959706959707,
+ "grad_norm": 34.36520004272461,
+ "learning_rate": 4.197802197802198e-05,
+ "loss": 0.8413,
+ "step": 1012
+ },
+ {
+ "epoch": 3.7106227106227108,
+ "grad_norm": 41.62683868408203,
+ "learning_rate": 4.195360195360196e-05,
+ "loss": 0.4155,
+ "step": 1013
+ },
+ {
+ "epoch": 3.7142857142857144,
+ "grad_norm": 34.24674987792969,
+ "learning_rate": 4.192918192918193e-05,
+ "loss": 0.8327,
+ "step": 1014
+ },
+ {
+ "epoch": 3.717948717948718,
+ "grad_norm": 27.771732330322266,
+ "learning_rate": 4.19047619047619e-05,
+ "loss": 0.4509,
+ "step": 1015
+ },
+ {
+ "epoch": 3.7216117216117217,
+ "grad_norm": 26.55430793762207,
+ "learning_rate": 4.188034188034188e-05,
+ "loss": 0.4851,
+ "step": 1016
+ },
+ {
+ "epoch": 3.7252747252747254,
+ "grad_norm": 34.8384895324707,
+ "learning_rate": 4.185592185592186e-05,
+ "loss": 0.4105,
+ "step": 1017
+ },
+ {
+ "epoch": 3.728937728937729,
+ "grad_norm": 29.447805404663086,
+ "learning_rate": 4.183150183150183e-05,
+ "loss": 0.4129,
+ "step": 1018
+ },
+ {
+ "epoch": 3.7326007326007327,
+ "grad_norm": 66.70004272460938,
+ "learning_rate": 4.180708180708181e-05,
+ "loss": 0.4762,
+ "step": 1019
+ },
+ {
+ "epoch": 3.7362637362637363,
+ "grad_norm": 10.356173515319824,
+ "learning_rate": 4.1782661782661786e-05,
+ "loss": 0.0718,
+ "step": 1020
+ },
+ {
+ "epoch": 3.73992673992674,
+ "grad_norm": 35.98944854736328,
+ "learning_rate": 4.175824175824176e-05,
+ "loss": 0.2672,
+ "step": 1021
+ },
+ {
+ "epoch": 3.7435897435897436,
+ "grad_norm": 6.806238651275635,
+ "learning_rate": 4.1733821733821736e-05,
+ "loss": 0.0455,
+ "step": 1022
+ },
+ {
+ "epoch": 3.7472527472527473,
+ "grad_norm": 19.689456939697266,
+ "learning_rate": 4.1709401709401715e-05,
+ "loss": 0.2323,
+ "step": 1023
+ },
+ {
+ "epoch": 3.750915750915751,
+ "grad_norm": 23.971303939819336,
+ "learning_rate": 4.1684981684981687e-05,
+ "loss": 0.1393,
+ "step": 1024
+ },
+ {
+ "epoch": 3.7545787545787546,
+ "grad_norm": 43.26774215698242,
+ "learning_rate": 4.1660561660561665e-05,
+ "loss": 0.7084,
+ "step": 1025
+ },
+ {
+ "epoch": 3.758241758241758,
+ "grad_norm": 36.04475402832031,
+ "learning_rate": 4.1636141636141643e-05,
+ "loss": 0.3782,
+ "step": 1026
+ },
+ {
+ "epoch": 3.761904761904762,
+ "grad_norm": 48.78522491455078,
+ "learning_rate": 4.161172161172161e-05,
+ "loss": 0.7698,
+ "step": 1027
+ },
+ {
+ "epoch": 3.7655677655677655,
+ "grad_norm": 11.876708984375,
+ "learning_rate": 4.158730158730159e-05,
+ "loss": 0.0943,
+ "step": 1028
+ },
+ {
+ "epoch": 3.769230769230769,
+ "grad_norm": 83.1320571899414,
+ "learning_rate": 4.1562881562881565e-05,
+ "loss": 0.8116,
+ "step": 1029
+ },
+ {
+ "epoch": 3.772893772893773,
+ "grad_norm": 22.412723541259766,
+ "learning_rate": 4.153846153846154e-05,
+ "loss": 0.2086,
+ "step": 1030
+ },
+ {
+ "epoch": 3.7765567765567765,
+ "grad_norm": 11.011713981628418,
+ "learning_rate": 4.1514041514041515e-05,
+ "loss": 0.1001,
+ "step": 1031
+ },
+ {
+ "epoch": 3.78021978021978,
+ "grad_norm": 21.958040237426758,
+ "learning_rate": 4.148962148962149e-05,
+ "loss": 0.8457,
+ "step": 1032
+ },
+ {
+ "epoch": 3.7838827838827838,
+ "grad_norm": 57.3586540222168,
+ "learning_rate": 4.1465201465201465e-05,
+ "loss": 0.1605,
+ "step": 1033
+ },
+ {
+ "epoch": 3.7875457875457874,
+ "grad_norm": 24.261554718017578,
+ "learning_rate": 4.1440781440781444e-05,
+ "loss": 0.1854,
+ "step": 1034
+ },
+ {
+ "epoch": 3.791208791208791,
+ "grad_norm": 31.09326171875,
+ "learning_rate": 4.1416361416361416e-05,
+ "loss": 0.2874,
+ "step": 1035
+ },
+ {
+ "epoch": 3.7948717948717947,
+ "grad_norm": 8.3728666305542,
+ "learning_rate": 4.1391941391941394e-05,
+ "loss": 0.0496,
+ "step": 1036
+ },
+ {
+ "epoch": 3.7985347985347984,
+ "grad_norm": 47.5240592956543,
+ "learning_rate": 4.136752136752137e-05,
+ "loss": 0.2025,
+ "step": 1037
+ },
+ {
+ "epoch": 3.802197802197802,
+ "grad_norm": 51.25822448730469,
+ "learning_rate": 4.1343101343101344e-05,
+ "loss": 0.714,
+ "step": 1038
+ },
+ {
+ "epoch": 3.8058608058608057,
+ "grad_norm": 91.58492279052734,
+ "learning_rate": 4.131868131868132e-05,
+ "loss": 2.2889,
+ "step": 1039
+ },
+ {
+ "epoch": 3.8095238095238093,
+ "grad_norm": 4.206390857696533,
+ "learning_rate": 4.1294261294261294e-05,
+ "loss": 0.024,
+ "step": 1040
+ },
+ {
+ "epoch": 3.813186813186813,
+ "grad_norm": 58.49787139892578,
+ "learning_rate": 4.1269841269841266e-05,
+ "loss": 0.7162,
+ "step": 1041
+ },
+ {
+ "epoch": 3.8168498168498166,
+ "grad_norm": 33.38972091674805,
+ "learning_rate": 4.1245421245421244e-05,
+ "loss": 0.3064,
+ "step": 1042
+ },
+ {
+ "epoch": 3.8205128205128203,
+ "grad_norm": 53.251007080078125,
+ "learning_rate": 4.122100122100122e-05,
+ "loss": 0.7376,
+ "step": 1043
+ },
+ {
+ "epoch": 3.824175824175824,
+ "grad_norm": 28.314645767211914,
+ "learning_rate": 4.1196581196581195e-05,
+ "loss": 0.4608,
+ "step": 1044
+ },
+ {
+ "epoch": 3.8278388278388276,
+ "grad_norm": 538.0653076171875,
+ "learning_rate": 4.117216117216117e-05,
+ "loss": 1.5678,
+ "step": 1045
+ },
+ {
+ "epoch": 3.8315018315018317,
+ "grad_norm": 38.662925720214844,
+ "learning_rate": 4.114774114774115e-05,
+ "loss": 1.1084,
+ "step": 1046
+ },
+ {
+ "epoch": 3.8351648351648353,
+ "grad_norm": 31.877248764038086,
+ "learning_rate": 4.112332112332112e-05,
+ "loss": 0.9947,
+ "step": 1047
+ },
+ {
+ "epoch": 3.838827838827839,
+ "grad_norm": 50.17106628417969,
+ "learning_rate": 4.10989010989011e-05,
+ "loss": 0.8024,
+ "step": 1048
+ },
+ {
+ "epoch": 3.8424908424908426,
+ "grad_norm": 18.851001739501953,
+ "learning_rate": 4.107448107448108e-05,
+ "loss": 0.4245,
+ "step": 1049
+ },
+ {
+ "epoch": 3.8461538461538463,
+ "grad_norm": 35.91590881347656,
+ "learning_rate": 4.105006105006105e-05,
+ "loss": 1.1046,
+ "step": 1050
+ },
+ {
+ "epoch": 3.84981684981685,
+ "grad_norm": 24.618389129638672,
+ "learning_rate": 4.102564102564103e-05,
+ "loss": 0.8167,
+ "step": 1051
+ },
+ {
+ "epoch": 3.8534798534798536,
+ "grad_norm": 27.028446197509766,
+ "learning_rate": 4.100122100122101e-05,
+ "loss": 0.6983,
+ "step": 1052
+ },
+ {
+ "epoch": 3.857142857142857,
+ "grad_norm": 17.247610092163086,
+ "learning_rate": 4.0976800976800974e-05,
+ "loss": 0.4761,
+ "step": 1053
+ },
+ {
+ "epoch": 3.860805860805861,
+ "grad_norm": 27.187416076660156,
+ "learning_rate": 4.095238095238095e-05,
+ "loss": 0.794,
+ "step": 1054
+ },
+ {
+ "epoch": 3.8644688644688645,
+ "grad_norm": 35.990623474121094,
+ "learning_rate": 4.0927960927960924e-05,
+ "loss": 0.7874,
+ "step": 1055
+ },
+ {
+ "epoch": 3.868131868131868,
+ "grad_norm": 168.7575225830078,
+ "learning_rate": 4.09035409035409e-05,
+ "loss": 0.6028,
+ "step": 1056
+ },
+ {
+ "epoch": 3.871794871794872,
+ "grad_norm": 31.459491729736328,
+ "learning_rate": 4.087912087912088e-05,
+ "loss": 0.6256,
+ "step": 1057
+ },
+ {
+ "epoch": 3.8754578754578755,
+ "grad_norm": 25.053123474121094,
+ "learning_rate": 4.085470085470085e-05,
+ "loss": 0.3041,
+ "step": 1058
+ },
+ {
+ "epoch": 3.879120879120879,
+ "grad_norm": 56.10730743408203,
+ "learning_rate": 4.083028083028083e-05,
+ "loss": 0.8875,
+ "step": 1059
+ },
+ {
+ "epoch": 3.8827838827838828,
+ "grad_norm": 26.897689819335938,
+ "learning_rate": 4.080586080586081e-05,
+ "loss": 0.5291,
+ "step": 1060
+ },
+ {
+ "epoch": 3.8864468864468864,
+ "grad_norm": 40.36210250854492,
+ "learning_rate": 4.078144078144078e-05,
+ "loss": 1.2323,
+ "step": 1061
+ },
+ {
+ "epoch": 3.89010989010989,
+ "grad_norm": 17.556934356689453,
+ "learning_rate": 4.075702075702076e-05,
+ "loss": 0.0951,
+ "step": 1062
+ },
+ {
+ "epoch": 3.8937728937728937,
+ "grad_norm": 54.6690559387207,
+ "learning_rate": 4.073260073260074e-05,
+ "loss": 0.4311,
+ "step": 1063
+ },
+ {
+ "epoch": 3.8974358974358974,
+ "grad_norm": 27.554750442504883,
+ "learning_rate": 4.070818070818071e-05,
+ "loss": 0.2851,
+ "step": 1064
+ },
+ {
+ "epoch": 3.901098901098901,
+ "grad_norm": 14.667935371398926,
+ "learning_rate": 4.068376068376069e-05,
+ "loss": 0.0866,
+ "step": 1065
+ },
+ {
+ "epoch": 3.9047619047619047,
+ "grad_norm": 39.62594985961914,
+ "learning_rate": 4.065934065934066e-05,
+ "loss": 0.2322,
+ "step": 1066
+ },
+ {
+ "epoch": 3.9084249084249083,
+ "grad_norm": 31.457260131835938,
+ "learning_rate": 4.063492063492063e-05,
+ "loss": 0.2554,
+ "step": 1067
+ },
+ {
+ "epoch": 3.912087912087912,
+ "grad_norm": 52.82997131347656,
+ "learning_rate": 4.061050061050061e-05,
+ "loss": 0.44,
+ "step": 1068
+ },
+ {
+ "epoch": 3.9157509157509156,
+ "grad_norm": 56.15779495239258,
+ "learning_rate": 4.058608058608059e-05,
+ "loss": 0.9419,
+ "step": 1069
+ },
+ {
+ "epoch": 3.9194139194139193,
+ "grad_norm": 59.23240661621094,
+ "learning_rate": 4.056166056166056e-05,
+ "loss": 0.5084,
+ "step": 1070
+ },
+ {
+ "epoch": 3.9230769230769234,
+ "grad_norm": 9.644290924072266,
+ "learning_rate": 4.053724053724054e-05,
+ "loss": 0.0456,
+ "step": 1071
+ },
+ {
+ "epoch": 3.926739926739927,
+ "grad_norm": 24.42845916748047,
+ "learning_rate": 4.051282051282052e-05,
+ "loss": 0.0907,
+ "step": 1072
+ },
+ {
+ "epoch": 3.9304029304029307,
+ "grad_norm": 81.36042785644531,
+ "learning_rate": 4.048840048840049e-05,
+ "loss": 1.0178,
+ "step": 1073
+ },
+ {
+ "epoch": 3.9340659340659343,
+ "grad_norm": 63.134071350097656,
+ "learning_rate": 4.046398046398047e-05,
+ "loss": 1.1125,
+ "step": 1074
+ },
+ {
+ "epoch": 3.937728937728938,
+ "grad_norm": 56.59608840942383,
+ "learning_rate": 4.0439560439560445e-05,
+ "loss": 0.4465,
+ "step": 1075
+ },
+ {
+ "epoch": 3.9413919413919416,
+ "grad_norm": 48.51662063598633,
+ "learning_rate": 4.041514041514042e-05,
+ "loss": 0.5054,
+ "step": 1076
+ },
+ {
+ "epoch": 3.9450549450549453,
+ "grad_norm": 50.393524169921875,
+ "learning_rate": 4.0390720390720395e-05,
+ "loss": 0.8157,
+ "step": 1077
+ },
+ {
+ "epoch": 3.948717948717949,
+ "grad_norm": 63.414878845214844,
+ "learning_rate": 4.036630036630037e-05,
+ "loss": 0.9598,
+ "step": 1078
+ },
+ {
+ "epoch": 3.9523809523809526,
+ "grad_norm": 35.72902297973633,
+ "learning_rate": 4.034188034188034e-05,
+ "loss": 0.4764,
+ "step": 1079
+ },
+ {
+ "epoch": 3.956043956043956,
+ "grad_norm": 20.452268600463867,
+ "learning_rate": 4.031746031746032e-05,
+ "loss": 0.191,
+ "step": 1080
+ },
+ {
+ "epoch": 3.95970695970696,
+ "grad_norm": 38.23368453979492,
+ "learning_rate": 4.029304029304029e-05,
+ "loss": 0.5218,
+ "step": 1081
+ },
+ {
+ "epoch": 3.9633699633699635,
+ "grad_norm": 79.35212707519531,
+ "learning_rate": 4.026862026862027e-05,
+ "loss": 1.3695,
+ "step": 1082
+ },
+ {
+ "epoch": 3.967032967032967,
+ "grad_norm": 62.0828742980957,
+ "learning_rate": 4.0244200244200246e-05,
+ "loss": 1.4882,
+ "step": 1083
+ },
+ {
+ "epoch": 3.970695970695971,
+ "grad_norm": 35.413734436035156,
+ "learning_rate": 4.021978021978022e-05,
+ "loss": 0.1966,
+ "step": 1084
+ },
+ {
+ "epoch": 3.9743589743589745,
+ "grad_norm": 18.060728073120117,
+ "learning_rate": 4.0195360195360196e-05,
+ "loss": 0.2902,
+ "step": 1085
+ },
+ {
+ "epoch": 3.978021978021978,
+ "grad_norm": 15.263091087341309,
+ "learning_rate": 4.0170940170940174e-05,
+ "loss": 0.1325,
+ "step": 1086
+ },
+ {
+ "epoch": 3.9816849816849818,
+ "grad_norm": 35.8296012878418,
+ "learning_rate": 4.0146520146520146e-05,
+ "loss": 1.0225,
+ "step": 1087
+ },
+ {
+ "epoch": 3.9853479853479854,
+ "grad_norm": 24.120967864990234,
+ "learning_rate": 4.0122100122100125e-05,
+ "loss": 0.4432,
+ "step": 1088
+ },
+ {
+ "epoch": 3.989010989010989,
+ "grad_norm": 47.371070861816406,
+ "learning_rate": 4.00976800976801e-05,
+ "loss": 0.9703,
+ "step": 1089
+ },
+ {
+ "epoch": 3.9926739926739927,
+ "grad_norm": 44.266082763671875,
+ "learning_rate": 4.0073260073260075e-05,
+ "loss": 0.6652,
+ "step": 1090
+ },
+ {
+ "epoch": 3.9963369963369964,
+ "grad_norm": 22.17586898803711,
+ "learning_rate": 4.0048840048840046e-05,
+ "loss": 0.1324,
+ "step": 1091
+ },
+ {
+ "epoch": 4.0,
+ "grad_norm": 45.4996337890625,
+ "learning_rate": 4.0024420024420025e-05,
+ "loss": 0.3746,
+ "step": 1092
+ },
+ {
+ "epoch": 4.003663003663004,
+ "grad_norm": 31.747541427612305,
+ "learning_rate": 3.9999999999999996e-05,
+ "loss": 0.5028,
+ "step": 1093
+ },
+ {
+ "epoch": 4.007326007326007,
+ "grad_norm": 13.460674285888672,
+ "learning_rate": 3.9975579975579975e-05,
+ "loss": 0.088,
+ "step": 1094
+ },
+ {
+ "epoch": 4.010989010989011,
+ "grad_norm": 23.94148826599121,
+ "learning_rate": 3.9951159951159953e-05,
+ "loss": 0.1944,
+ "step": 1095
+ },
+ {
+ "epoch": 4.014652014652015,
+ "grad_norm": 60.94758224487305,
+ "learning_rate": 3.9926739926739925e-05,
+ "loss": 0.555,
+ "step": 1096
+ },
+ {
+ "epoch": 4.018315018315018,
+ "grad_norm": 24.47633934020996,
+ "learning_rate": 3.9902319902319904e-05,
+ "loss": 0.1314,
+ "step": 1097
+ },
+ {
+ "epoch": 4.021978021978022,
+ "grad_norm": 42.690162658691406,
+ "learning_rate": 3.987789987789988e-05,
+ "loss": 0.4734,
+ "step": 1098
+ },
+ {
+ "epoch": 4.0256410256410255,
+ "grad_norm": 69.26956939697266,
+ "learning_rate": 3.9853479853479854e-05,
+ "loss": 1.4256,
+ "step": 1099
+ },
+ {
+ "epoch": 4.029304029304029,
+ "grad_norm": 7.718477725982666,
+ "learning_rate": 3.982905982905983e-05,
+ "loss": 0.0549,
+ "step": 1100
+ },
+ {
+ "epoch": 4.032967032967033,
+ "grad_norm": 60.15462875366211,
+ "learning_rate": 3.980463980463981e-05,
+ "loss": 1.2739,
+ "step": 1101
+ },
+ {
+ "epoch": 4.0366300366300365,
+ "grad_norm": 57.749656677246094,
+ "learning_rate": 3.978021978021978e-05,
+ "loss": 1.0691,
+ "step": 1102
+ },
+ {
+ "epoch": 4.04029304029304,
+ "grad_norm": 35.57550811767578,
+ "learning_rate": 3.975579975579976e-05,
+ "loss": 0.5114,
+ "step": 1103
+ },
+ {
+ "epoch": 4.043956043956044,
+ "grad_norm": 58.007694244384766,
+ "learning_rate": 3.973137973137973e-05,
+ "loss": 1.1552,
+ "step": 1104
+ },
+ {
+ "epoch": 4.0476190476190474,
+ "grad_norm": 30.794008255004883,
+ "learning_rate": 3.9706959706959704e-05,
+ "loss": 0.7502,
+ "step": 1105
+ },
+ {
+ "epoch": 4.051282051282051,
+ "grad_norm": 35.88930892944336,
+ "learning_rate": 3.968253968253968e-05,
+ "loss": 0.6965,
+ "step": 1106
+ },
+ {
+ "epoch": 4.054945054945055,
+ "grad_norm": 25.719144821166992,
+ "learning_rate": 3.9658119658119654e-05,
+ "loss": 0.4581,
+ "step": 1107
+ },
+ {
+ "epoch": 4.058608058608058,
+ "grad_norm": 37.397640228271484,
+ "learning_rate": 3.963369963369963e-05,
+ "loss": 1.0719,
+ "step": 1108
+ },
+ {
+ "epoch": 4.062271062271062,
+ "grad_norm": 25.8681640625,
+ "learning_rate": 3.960927960927961e-05,
+ "loss": 0.7,
+ "step": 1109
+ },
+ {
+ "epoch": 4.065934065934066,
+ "grad_norm": 16.983413696289062,
+ "learning_rate": 3.958485958485958e-05,
+ "loss": 0.2394,
+ "step": 1110
+ },
+ {
+ "epoch": 4.069597069597069,
+ "grad_norm": 31.7902889251709,
+ "learning_rate": 3.956043956043956e-05,
+ "loss": 0.5662,
+ "step": 1111
+ },
+ {
+ "epoch": 4.073260073260073,
+ "grad_norm": 37.51417922973633,
+ "learning_rate": 3.953601953601954e-05,
+ "loss": 0.3483,
+ "step": 1112
+ },
+ {
+ "epoch": 4.076923076923077,
+ "grad_norm": 24.01732635498047,
+ "learning_rate": 3.951159951159951e-05,
+ "loss": 0.2527,
+ "step": 1113
+ },
+ {
+ "epoch": 4.08058608058608,
+ "grad_norm": 29.152162551879883,
+ "learning_rate": 3.948717948717949e-05,
+ "loss": 0.4485,
+ "step": 1114
+ },
+ {
+ "epoch": 4.084249084249084,
+ "grad_norm": 31.519155502319336,
+ "learning_rate": 3.946275946275947e-05,
+ "loss": 0.2485,
+ "step": 1115
+ },
+ {
+ "epoch": 4.087912087912088,
+ "grad_norm": 18.462514877319336,
+ "learning_rate": 3.943833943833944e-05,
+ "loss": 0.1057,
+ "step": 1116
+ },
+ {
+ "epoch": 4.091575091575091,
+ "grad_norm": 35.28910827636719,
+ "learning_rate": 3.941391941391941e-05,
+ "loss": 0.3589,
+ "step": 1117
+ },
+ {
+ "epoch": 4.095238095238095,
+ "grad_norm": 47.00394058227539,
+ "learning_rate": 3.938949938949939e-05,
+ "loss": 0.5148,
+ "step": 1118
+ },
+ {
+ "epoch": 4.0989010989010985,
+ "grad_norm": 24.796058654785156,
+ "learning_rate": 3.936507936507936e-05,
+ "loss": 0.2486,
+ "step": 1119
+ },
+ {
+ "epoch": 4.102564102564102,
+ "grad_norm": 27.098758697509766,
+ "learning_rate": 3.934065934065934e-05,
+ "loss": 0.196,
+ "step": 1120
+ },
+ {
+ "epoch": 4.106227106227106,
+ "grad_norm": 59.4343147277832,
+ "learning_rate": 3.931623931623932e-05,
+ "loss": 0.8093,
+ "step": 1121
+ },
+ {
+ "epoch": 4.1098901098901095,
+ "grad_norm": 57.0518684387207,
+ "learning_rate": 3.929181929181929e-05,
+ "loss": 0.6495,
+ "step": 1122
+ },
+ {
+ "epoch": 4.113553113553113,
+ "grad_norm": 42.01070022583008,
+ "learning_rate": 3.926739926739927e-05,
+ "loss": 0.3272,
+ "step": 1123
+ },
+ {
+ "epoch": 4.117216117216117,
+ "grad_norm": 72.11932373046875,
+ "learning_rate": 3.924297924297925e-05,
+ "loss": 1.2542,
+ "step": 1124
+ },
+ {
+ "epoch": 4.1208791208791204,
+ "grad_norm": 13.270249366760254,
+ "learning_rate": 3.921855921855922e-05,
+ "loss": 0.0843,
+ "step": 1125
+ },
+ {
+ "epoch": 4.124542124542124,
+ "grad_norm": 32.058258056640625,
+ "learning_rate": 3.91941391941392e-05,
+ "loss": 0.158,
+ "step": 1126
+ },
+ {
+ "epoch": 4.128205128205128,
+ "grad_norm": 37.67665481567383,
+ "learning_rate": 3.9169719169719176e-05,
+ "loss": 0.3463,
+ "step": 1127
+ },
+ {
+ "epoch": 4.131868131868132,
+ "grad_norm": 98.33348846435547,
+ "learning_rate": 3.914529914529915e-05,
+ "loss": 0.8846,
+ "step": 1128
+ },
+ {
+ "epoch": 4.135531135531136,
+ "grad_norm": 49.11083221435547,
+ "learning_rate": 3.9120879120879126e-05,
+ "loss": 0.4124,
+ "step": 1129
+ },
+ {
+ "epoch": 4.13919413919414,
+ "grad_norm": 45.87646484375,
+ "learning_rate": 3.90964590964591e-05,
+ "loss": 0.3594,
+ "step": 1130
+ },
+ {
+ "epoch": 4.142857142857143,
+ "grad_norm": 49.34445571899414,
+ "learning_rate": 3.907203907203907e-05,
+ "loss": 0.1947,
+ "step": 1131
+ },
+ {
+ "epoch": 4.146520146520147,
+ "grad_norm": 8.654282569885254,
+ "learning_rate": 3.904761904761905e-05,
+ "loss": 0.0923,
+ "step": 1132
+ },
+ {
+ "epoch": 4.1501831501831505,
+ "grad_norm": 12.46809196472168,
+ "learning_rate": 3.902319902319902e-05,
+ "loss": 0.0841,
+ "step": 1133
+ },
+ {
+ "epoch": 4.153846153846154,
+ "grad_norm": 33.9839973449707,
+ "learning_rate": 3.8998778998779e-05,
+ "loss": 0.5838,
+ "step": 1134
+ },
+ {
+ "epoch": 4.157509157509158,
+ "grad_norm": 36.68742752075195,
+ "learning_rate": 3.8974358974358976e-05,
+ "loss": 0.5483,
+ "step": 1135
+ },
+ {
+ "epoch": 4.1611721611721615,
+ "grad_norm": 26.862363815307617,
+ "learning_rate": 3.894993894993895e-05,
+ "loss": 0.2464,
+ "step": 1136
+ },
+ {
+ "epoch": 4.164835164835165,
+ "grad_norm": 16.219947814941406,
+ "learning_rate": 3.8925518925518926e-05,
+ "loss": 0.1878,
+ "step": 1137
+ },
+ {
+ "epoch": 4.168498168498169,
+ "grad_norm": 36.86198425292969,
+ "learning_rate": 3.8901098901098905e-05,
+ "loss": 0.3656,
+ "step": 1138
+ },
+ {
+ "epoch": 4.172161172161172,
+ "grad_norm": 36.241432189941406,
+ "learning_rate": 3.8876678876678877e-05,
+ "loss": 0.8421,
+ "step": 1139
+ },
+ {
+ "epoch": 4.175824175824176,
+ "grad_norm": 45.81169891357422,
+ "learning_rate": 3.8852258852258855e-05,
+ "loss": 0.6081,
+ "step": 1140
+ },
+ {
+ "epoch": 4.17948717948718,
+ "grad_norm": 30.914037704467773,
+ "learning_rate": 3.8827838827838833e-05,
+ "loss": 0.2975,
+ "step": 1141
+ },
+ {
+ "epoch": 4.183150183150183,
+ "grad_norm": 4.663424968719482,
+ "learning_rate": 3.8803418803418805e-05,
+ "loss": 0.0319,
+ "step": 1142
+ },
+ {
+ "epoch": 4.186813186813187,
+ "grad_norm": 33.163551330566406,
+ "learning_rate": 3.877899877899878e-05,
+ "loss": 0.236,
+ "step": 1143
+ },
+ {
+ "epoch": 4.190476190476191,
+ "grad_norm": 20.820547103881836,
+ "learning_rate": 3.8754578754578755e-05,
+ "loss": 0.1907,
+ "step": 1144
+ },
+ {
+ "epoch": 4.194139194139194,
+ "grad_norm": 65.4993896484375,
+ "learning_rate": 3.873015873015873e-05,
+ "loss": 0.4195,
+ "step": 1145
+ },
+ {
+ "epoch": 4.197802197802198,
+ "grad_norm": 13.253530502319336,
+ "learning_rate": 3.8705738705738705e-05,
+ "loss": 0.1496,
+ "step": 1146
+ },
+ {
+ "epoch": 4.201465201465202,
+ "grad_norm": 18.291889190673828,
+ "learning_rate": 3.8681318681318684e-05,
+ "loss": 0.1544,
+ "step": 1147
+ },
+ {
+ "epoch": 4.205128205128205,
+ "grad_norm": 32.1517448425293,
+ "learning_rate": 3.8656898656898656e-05,
+ "loss": 0.3317,
+ "step": 1148
+ },
+ {
+ "epoch": 4.208791208791209,
+ "grad_norm": 37.809669494628906,
+ "learning_rate": 3.8632478632478634e-05,
+ "loss": 0.394,
+ "step": 1149
+ },
+ {
+ "epoch": 4.212454212454213,
+ "grad_norm": 113.17266082763672,
+ "learning_rate": 3.860805860805861e-05,
+ "loss": 1.2368,
+ "step": 1150
+ },
+ {
+ "epoch": 4.216117216117216,
+ "grad_norm": 10.35407543182373,
+ "learning_rate": 3.8583638583638584e-05,
+ "loss": 0.0584,
+ "step": 1151
+ },
+ {
+ "epoch": 4.21978021978022,
+ "grad_norm": 56.98881530761719,
+ "learning_rate": 3.855921855921856e-05,
+ "loss": 0.8088,
+ "step": 1152
+ },
+ {
+ "epoch": 4.2234432234432235,
+ "grad_norm": 45.7849006652832,
+ "learning_rate": 3.853479853479854e-05,
+ "loss": 0.6471,
+ "step": 1153
+ },
+ {
+ "epoch": 4.227106227106227,
+ "grad_norm": 43.57515335083008,
+ "learning_rate": 3.851037851037851e-05,
+ "loss": 0.2924,
+ "step": 1154
+ },
+ {
+ "epoch": 4.230769230769231,
+ "grad_norm": 14.98643684387207,
+ "learning_rate": 3.848595848595849e-05,
+ "loss": 0.1108,
+ "step": 1155
+ },
+ {
+ "epoch": 4.2344322344322345,
+ "grad_norm": 27.162513732910156,
+ "learning_rate": 3.846153846153846e-05,
+ "loss": 0.3856,
+ "step": 1156
+ },
+ {
+ "epoch": 4.238095238095238,
+ "grad_norm": 56.45119094848633,
+ "learning_rate": 3.8437118437118435e-05,
+ "loss": 0.6752,
+ "step": 1157
+ },
+ {
+ "epoch": 4.241758241758242,
+ "grad_norm": 15.522347450256348,
+ "learning_rate": 3.841269841269841e-05,
+ "loss": 0.1419,
+ "step": 1158
+ },
+ {
+ "epoch": 4.245421245421245,
+ "grad_norm": 16.31126594543457,
+ "learning_rate": 3.8388278388278385e-05,
+ "loss": 0.1303,
+ "step": 1159
+ },
+ {
+ "epoch": 4.249084249084249,
+ "grad_norm": 12.398606300354004,
+ "learning_rate": 3.836385836385836e-05,
+ "loss": 0.1306,
+ "step": 1160
+ },
+ {
+ "epoch": 4.252747252747253,
+ "grad_norm": 19.660768508911133,
+ "learning_rate": 3.833943833943834e-05,
+ "loss": 0.1554,
+ "step": 1161
+ },
+ {
+ "epoch": 4.256410256410256,
+ "grad_norm": 131.451416015625,
+ "learning_rate": 3.831501831501831e-05,
+ "loss": 0.2774,
+ "step": 1162
+ },
+ {
+ "epoch": 4.26007326007326,
+ "grad_norm": 42.0703125,
+ "learning_rate": 3.829059829059829e-05,
+ "loss": 0.471,
+ "step": 1163
+ },
+ {
+ "epoch": 4.263736263736264,
+ "grad_norm": 52.415096282958984,
+ "learning_rate": 3.826617826617827e-05,
+ "loss": 0.7872,
+ "step": 1164
+ },
+ {
+ "epoch": 4.267399267399267,
+ "grad_norm": 35.990421295166016,
+ "learning_rate": 3.824175824175824e-05,
+ "loss": 0.4495,
+ "step": 1165
+ },
+ {
+ "epoch": 4.271062271062271,
+ "grad_norm": 40.330265045166016,
+ "learning_rate": 3.821733821733822e-05,
+ "loss": 0.4009,
+ "step": 1166
+ },
+ {
+ "epoch": 4.274725274725275,
+ "grad_norm": 42.55587387084961,
+ "learning_rate": 3.81929181929182e-05,
+ "loss": 1.6215,
+ "step": 1167
+ },
+ {
+ "epoch": 4.278388278388278,
+ "grad_norm": 30.704498291015625,
+ "learning_rate": 3.816849816849817e-05,
+ "loss": 0.3539,
+ "step": 1168
+ },
+ {
+ "epoch": 4.282051282051282,
+ "grad_norm": 10.239601135253906,
+ "learning_rate": 3.814407814407814e-05,
+ "loss": 0.0779,
+ "step": 1169
+ },
+ {
+ "epoch": 4.285714285714286,
+ "grad_norm": 37.00144577026367,
+ "learning_rate": 3.811965811965812e-05,
+ "loss": 0.4089,
+ "step": 1170
+ },
+ {
+ "epoch": 4.289377289377289,
+ "grad_norm": 40.18193817138672,
+ "learning_rate": 3.809523809523809e-05,
+ "loss": 0.4854,
+ "step": 1171
+ },
+ {
+ "epoch": 4.293040293040293,
+ "grad_norm": 46.78989028930664,
+ "learning_rate": 3.807081807081807e-05,
+ "loss": 0.5863,
+ "step": 1172
+ },
+ {
+ "epoch": 4.2967032967032965,
+ "grad_norm": 49.5102653503418,
+ "learning_rate": 3.804639804639805e-05,
+ "loss": 1.0118,
+ "step": 1173
+ },
+ {
+ "epoch": 4.3003663003663,
+ "grad_norm": 30.41546058654785,
+ "learning_rate": 3.802197802197802e-05,
+ "loss": 0.2616,
+ "step": 1174
+ },
+ {
+ "epoch": 4.304029304029304,
+ "grad_norm": 41.22653579711914,
+ "learning_rate": 3.7997557997558e-05,
+ "loss": 0.5852,
+ "step": 1175
+ },
+ {
+ "epoch": 4.3076923076923075,
+ "grad_norm": 4.033203125,
+ "learning_rate": 3.797313797313798e-05,
+ "loss": 0.0221,
+ "step": 1176
+ },
+ {
+ "epoch": 4.311355311355311,
+ "grad_norm": 13.03472900390625,
+ "learning_rate": 3.794871794871795e-05,
+ "loss": 0.1499,
+ "step": 1177
+ },
+ {
+ "epoch": 4.315018315018315,
+ "grad_norm": 24.690824508666992,
+ "learning_rate": 3.792429792429793e-05,
+ "loss": 0.2631,
+ "step": 1178
+ },
+ {
+ "epoch": 4.318681318681318,
+ "grad_norm": 32.594451904296875,
+ "learning_rate": 3.7899877899877906e-05,
+ "loss": 0.2988,
+ "step": 1179
+ },
+ {
+ "epoch": 4.322344322344322,
+ "grad_norm": 10.510795593261719,
+ "learning_rate": 3.787545787545788e-05,
+ "loss": 0.0499,
+ "step": 1180
+ },
+ {
+ "epoch": 4.326007326007326,
+ "grad_norm": 65.71479034423828,
+ "learning_rate": 3.785103785103785e-05,
+ "loss": 0.9048,
+ "step": 1181
+ },
+ {
+ "epoch": 4.329670329670329,
+ "grad_norm": 12.129572868347168,
+ "learning_rate": 3.782661782661783e-05,
+ "loss": 0.0629,
+ "step": 1182
+ },
+ {
+ "epoch": 4.333333333333333,
+ "grad_norm": 88.66580200195312,
+ "learning_rate": 3.78021978021978e-05,
+ "loss": 0.8276,
+ "step": 1183
+ },
+ {
+ "epoch": 4.336996336996337,
+ "grad_norm": 35.2215461730957,
+ "learning_rate": 3.777777777777778e-05,
+ "loss": 0.2996,
+ "step": 1184
+ },
+ {
+ "epoch": 4.34065934065934,
+ "grad_norm": 29.870285034179688,
+ "learning_rate": 3.775335775335775e-05,
+ "loss": 0.2152,
+ "step": 1185
+ },
+ {
+ "epoch": 4.344322344322344,
+ "grad_norm": 30.441116333007812,
+ "learning_rate": 3.772893772893773e-05,
+ "loss": 0.6761,
+ "step": 1186
+ },
+ {
+ "epoch": 4.347985347985348,
+ "grad_norm": 22.49298095703125,
+ "learning_rate": 3.770451770451771e-05,
+ "loss": 0.7508,
+ "step": 1187
+ },
+ {
+ "epoch": 4.351648351648351,
+ "grad_norm": 22.43603515625,
+ "learning_rate": 3.768009768009768e-05,
+ "loss": 0.3601,
+ "step": 1188
+ },
+ {
+ "epoch": 4.355311355311355,
+ "grad_norm": 38.21080780029297,
+ "learning_rate": 3.765567765567766e-05,
+ "loss": 0.3769,
+ "step": 1189
+ },
+ {
+ "epoch": 4.358974358974359,
+ "grad_norm": 48.90728759765625,
+ "learning_rate": 3.7631257631257635e-05,
+ "loss": 0.4259,
+ "step": 1190
+ },
+ {
+ "epoch": 4.362637362637362,
+ "grad_norm": 7.331233024597168,
+ "learning_rate": 3.760683760683761e-05,
+ "loss": 0.0697,
+ "step": 1191
+ },
+ {
+ "epoch": 4.366300366300366,
+ "grad_norm": 25.096189498901367,
+ "learning_rate": 3.7582417582417586e-05,
+ "loss": 0.2196,
+ "step": 1192
+ },
+ {
+ "epoch": 4.36996336996337,
+ "grad_norm": 53.813209533691406,
+ "learning_rate": 3.7557997557997564e-05,
+ "loss": 0.3785,
+ "step": 1193
+ },
+ {
+ "epoch": 4.373626373626374,
+ "grad_norm": 13.184123039245605,
+ "learning_rate": 3.753357753357753e-05,
+ "loss": 0.1747,
+ "step": 1194
+ },
+ {
+ "epoch": 4.377289377289378,
+ "grad_norm": 1.818351149559021,
+ "learning_rate": 3.750915750915751e-05,
+ "loss": 0.0158,
+ "step": 1195
+ },
+ {
+ "epoch": 4.380952380952381,
+ "grad_norm": 63.21619415283203,
+ "learning_rate": 3.7484737484737486e-05,
+ "loss": 0.2863,
+ "step": 1196
+ },
+ {
+ "epoch": 4.384615384615385,
+ "grad_norm": 32.59927749633789,
+ "learning_rate": 3.746031746031746e-05,
+ "loss": 0.4261,
+ "step": 1197
+ },
+ {
+ "epoch": 4.388278388278389,
+ "grad_norm": 36.5265998840332,
+ "learning_rate": 3.7435897435897436e-05,
+ "loss": 0.8064,
+ "step": 1198
+ },
+ {
+ "epoch": 4.391941391941392,
+ "grad_norm": 47.726905822753906,
+ "learning_rate": 3.7411477411477414e-05,
+ "loss": 0.8884,
+ "step": 1199
+ },
+ {
+ "epoch": 4.395604395604396,
+ "grad_norm": 12.621973037719727,
+ "learning_rate": 3.7387057387057386e-05,
+ "loss": 0.1085,
+ "step": 1200
+ },
+ {
+ "epoch": 4.3992673992674,
+ "grad_norm": 24.7711124420166,
+ "learning_rate": 3.7362637362637365e-05,
+ "loss": 0.2249,
+ "step": 1201
+ },
+ {
+ "epoch": 4.402930402930403,
+ "grad_norm": 39.19346618652344,
+ "learning_rate": 3.733821733821734e-05,
+ "loss": 0.4065,
+ "step": 1202
+ },
+ {
+ "epoch": 4.406593406593407,
+ "grad_norm": 20.3857421875,
+ "learning_rate": 3.7313797313797315e-05,
+ "loss": 0.1653,
+ "step": 1203
+ },
+ {
+ "epoch": 4.410256410256411,
+ "grad_norm": 58.15717697143555,
+ "learning_rate": 3.728937728937729e-05,
+ "loss": 0.8774,
+ "step": 1204
+ },
+ {
+ "epoch": 4.413919413919414,
+ "grad_norm": 28.05725860595703,
+ "learning_rate": 3.726495726495727e-05,
+ "loss": 0.1695,
+ "step": 1205
+ },
+ {
+ "epoch": 4.417582417582418,
+ "grad_norm": 24.635583877563477,
+ "learning_rate": 3.724053724053724e-05,
+ "loss": 0.4871,
+ "step": 1206
+ },
+ {
+ "epoch": 4.4212454212454215,
+ "grad_norm": 16.8306941986084,
+ "learning_rate": 3.7216117216117215e-05,
+ "loss": 0.0863,
+ "step": 1207
+ },
+ {
+ "epoch": 4.424908424908425,
+ "grad_norm": 16.2359676361084,
+ "learning_rate": 3.719169719169719e-05,
+ "loss": 0.077,
+ "step": 1208
+ },
+ {
+ "epoch": 4.428571428571429,
+ "grad_norm": 31.431425094604492,
+ "learning_rate": 3.7167277167277165e-05,
+ "loss": 0.2815,
+ "step": 1209
+ },
+ {
+ "epoch": 4.4322344322344325,
+ "grad_norm": 31.44464874267578,
+ "learning_rate": 3.7142857142857143e-05,
+ "loss": 0.2237,
+ "step": 1210
+ },
+ {
+ "epoch": 4.435897435897436,
+ "grad_norm": 23.390378952026367,
+ "learning_rate": 3.7118437118437115e-05,
+ "loss": 0.1791,
+ "step": 1211
+ },
+ {
+ "epoch": 4.43956043956044,
+ "grad_norm": 48.210079193115234,
+ "learning_rate": 3.7094017094017094e-05,
+ "loss": 0.517,
+ "step": 1212
+ },
+ {
+ "epoch": 4.443223443223443,
+ "grad_norm": 45.35732650756836,
+ "learning_rate": 3.706959706959707e-05,
+ "loss": 0.4638,
+ "step": 1213
+ },
+ {
+ "epoch": 4.446886446886447,
+ "grad_norm": 16.88719367980957,
+ "learning_rate": 3.7045177045177044e-05,
+ "loss": 0.1203,
+ "step": 1214
+ },
+ {
+ "epoch": 4.450549450549451,
+ "grad_norm": 58.36906433105469,
+ "learning_rate": 3.702075702075702e-05,
+ "loss": 0.7366,
+ "step": 1215
+ },
+ {
+ "epoch": 4.454212454212454,
+ "grad_norm": 49.00838088989258,
+ "learning_rate": 3.6996336996337e-05,
+ "loss": 0.739,
+ "step": 1216
+ },
+ {
+ "epoch": 4.457875457875458,
+ "grad_norm": 42.87287521362305,
+ "learning_rate": 3.697191697191697e-05,
+ "loss": 1.3861,
+ "step": 1217
+ },
+ {
+ "epoch": 4.461538461538462,
+ "grad_norm": 44.62813949584961,
+ "learning_rate": 3.694749694749695e-05,
+ "loss": 0.549,
+ "step": 1218
+ },
+ {
+ "epoch": 4.465201465201465,
+ "grad_norm": 6.473313331604004,
+ "learning_rate": 3.692307692307693e-05,
+ "loss": 0.0407,
+ "step": 1219
+ },
+ {
+ "epoch": 4.468864468864469,
+ "grad_norm": 35.04784393310547,
+ "learning_rate": 3.6898656898656894e-05,
+ "loss": 0.3146,
+ "step": 1220
+ },
+ {
+ "epoch": 4.472527472527473,
+ "grad_norm": 44.79425811767578,
+ "learning_rate": 3.687423687423687e-05,
+ "loss": 0.5206,
+ "step": 1221
+ },
+ {
+ "epoch": 4.476190476190476,
+ "grad_norm": 36.52440643310547,
+ "learning_rate": 3.684981684981685e-05,
+ "loss": 0.5977,
+ "step": 1222
+ },
+ {
+ "epoch": 4.47985347985348,
+ "grad_norm": 58.15000915527344,
+ "learning_rate": 3.682539682539682e-05,
+ "loss": 1.0533,
+ "step": 1223
+ },
+ {
+ "epoch": 4.483516483516484,
+ "grad_norm": 32.33371353149414,
+ "learning_rate": 3.68009768009768e-05,
+ "loss": 0.3928,
+ "step": 1224
+ },
+ {
+ "epoch": 4.487179487179487,
+ "grad_norm": 44.501529693603516,
+ "learning_rate": 3.677655677655678e-05,
+ "loss": 0.8471,
+ "step": 1225
+ },
+ {
+ "epoch": 4.490842490842491,
+ "grad_norm": 41.62052536010742,
+ "learning_rate": 3.675213675213675e-05,
+ "loss": 0.7731,
+ "step": 1226
+ },
+ {
+ "epoch": 4.4945054945054945,
+ "grad_norm": 12.638876914978027,
+ "learning_rate": 3.672771672771673e-05,
+ "loss": 0.1219,
+ "step": 1227
+ },
+ {
+ "epoch": 4.498168498168498,
+ "grad_norm": 12.034523010253906,
+ "learning_rate": 3.670329670329671e-05,
+ "loss": 0.158,
+ "step": 1228
+ },
+ {
+ "epoch": 4.501831501831502,
+ "grad_norm": 42.04001235961914,
+ "learning_rate": 3.667887667887668e-05,
+ "loss": 0.8556,
+ "step": 1229
+ },
+ {
+ "epoch": 4.5054945054945055,
+ "grad_norm": 36.28947448730469,
+ "learning_rate": 3.665445665445666e-05,
+ "loss": 0.6569,
+ "step": 1230
+ },
+ {
+ "epoch": 4.509157509157509,
+ "grad_norm": 40.263912200927734,
+ "learning_rate": 3.663003663003664e-05,
+ "loss": 0.7625,
+ "step": 1231
+ },
+ {
+ "epoch": 4.512820512820513,
+ "grad_norm": 23.760005950927734,
+ "learning_rate": 3.660561660561661e-05,
+ "loss": 0.2465,
+ "step": 1232
+ },
+ {
+ "epoch": 4.516483516483516,
+ "grad_norm": 23.589109420776367,
+ "learning_rate": 3.658119658119658e-05,
+ "loss": 0.4408,
+ "step": 1233
+ },
+ {
+ "epoch": 4.52014652014652,
+ "grad_norm": 30.512271881103516,
+ "learning_rate": 3.655677655677655e-05,
+ "loss": 0.8748,
+ "step": 1234
+ },
+ {
+ "epoch": 4.523809523809524,
+ "grad_norm": 8.060181617736816,
+ "learning_rate": 3.653235653235653e-05,
+ "loss": 0.0818,
+ "step": 1235
+ },
+ {
+ "epoch": 4.527472527472527,
+ "grad_norm": 14.353645324707031,
+ "learning_rate": 3.650793650793651e-05,
+ "loss": 0.1899,
+ "step": 1236
+ },
+ {
+ "epoch": 4.531135531135531,
+ "grad_norm": 12.20384693145752,
+ "learning_rate": 3.648351648351648e-05,
+ "loss": 0.1618,
+ "step": 1237
+ },
+ {
+ "epoch": 4.534798534798535,
+ "grad_norm": 182.4698028564453,
+ "learning_rate": 3.645909645909646e-05,
+ "loss": 0.9223,
+ "step": 1238
+ },
+ {
+ "epoch": 4.538461538461538,
+ "grad_norm": 33.137081146240234,
+ "learning_rate": 3.643467643467644e-05,
+ "loss": 0.7708,
+ "step": 1239
+ },
+ {
+ "epoch": 4.542124542124542,
+ "grad_norm": 19.895912170410156,
+ "learning_rate": 3.641025641025641e-05,
+ "loss": 0.164,
+ "step": 1240
+ },
+ {
+ "epoch": 4.545787545787546,
+ "grad_norm": 62.816864013671875,
+ "learning_rate": 3.638583638583639e-05,
+ "loss": 1.4675,
+ "step": 1241
+ },
+ {
+ "epoch": 4.549450549450549,
+ "grad_norm": 35.58034896850586,
+ "learning_rate": 3.6361416361416366e-05,
+ "loss": 0.4449,
+ "step": 1242
+ },
+ {
+ "epoch": 4.553113553113553,
+ "grad_norm": 21.993911743164062,
+ "learning_rate": 3.633699633699634e-05,
+ "loss": 0.2302,
+ "step": 1243
+ },
+ {
+ "epoch": 4.556776556776557,
+ "grad_norm": 33.743812561035156,
+ "learning_rate": 3.6312576312576316e-05,
+ "loss": 0.1782,
+ "step": 1244
+ },
+ {
+ "epoch": 4.56043956043956,
+ "grad_norm": 40.135711669921875,
+ "learning_rate": 3.6288156288156294e-05,
+ "loss": 0.7147,
+ "step": 1245
+ },
+ {
+ "epoch": 4.564102564102564,
+ "grad_norm": 2.47517728805542,
+ "learning_rate": 3.626373626373626e-05,
+ "loss": 0.0188,
+ "step": 1246
+ },
+ {
+ "epoch": 4.5677655677655675,
+ "grad_norm": 22.023807525634766,
+ "learning_rate": 3.623931623931624e-05,
+ "loss": 0.3182,
+ "step": 1247
+ },
+ {
+ "epoch": 4.571428571428571,
+ "grad_norm": 21.8381290435791,
+ "learning_rate": 3.6214896214896216e-05,
+ "loss": 0.4161,
+ "step": 1248
+ },
+ {
+ "epoch": 4.575091575091575,
+ "grad_norm": 20.989906311035156,
+ "learning_rate": 3.619047619047619e-05,
+ "loss": 0.2972,
+ "step": 1249
+ },
+ {
+ "epoch": 4.5787545787545785,
+ "grad_norm": 75.8060073852539,
+ "learning_rate": 3.6166056166056166e-05,
+ "loss": 0.6194,
+ "step": 1250
+ },
+ {
+ "epoch": 4.582417582417582,
+ "grad_norm": 40.85308074951172,
+ "learning_rate": 3.6141636141636145e-05,
+ "loss": 0.7707,
+ "step": 1251
+ },
+ {
+ "epoch": 4.586080586080586,
+ "grad_norm": 62.22278594970703,
+ "learning_rate": 3.6117216117216117e-05,
+ "loss": 0.6872,
+ "step": 1252
+ },
+ {
+ "epoch": 4.589743589743589,
+ "grad_norm": 30.27143669128418,
+ "learning_rate": 3.6092796092796095e-05,
+ "loss": 0.484,
+ "step": 1253
+ },
+ {
+ "epoch": 4.593406593406593,
+ "grad_norm": 44.08026123046875,
+ "learning_rate": 3.6068376068376073e-05,
+ "loss": 0.8593,
+ "step": 1254
+ },
+ {
+ "epoch": 4.597069597069597,
+ "grad_norm": 22.63222312927246,
+ "learning_rate": 3.6043956043956045e-05,
+ "loss": 0.2542,
+ "step": 1255
+ },
+ {
+ "epoch": 4.6007326007326,
+ "grad_norm": 7.024168014526367,
+ "learning_rate": 3.6019536019536024e-05,
+ "loss": 0.0777,
+ "step": 1256
+ },
+ {
+ "epoch": 4.604395604395604,
+ "grad_norm": 24.981502532958984,
+ "learning_rate": 3.5995115995116e-05,
+ "loss": 0.2332,
+ "step": 1257
+ },
+ {
+ "epoch": 4.608058608058608,
+ "grad_norm": 28.929807662963867,
+ "learning_rate": 3.5970695970695974e-05,
+ "loss": 0.3665,
+ "step": 1258
+ },
+ {
+ "epoch": 4.611721611721611,
+ "grad_norm": 36.756683349609375,
+ "learning_rate": 3.5946275946275945e-05,
+ "loss": 1.2777,
+ "step": 1259
+ },
+ {
+ "epoch": 4.615384615384615,
+ "grad_norm": 53.04755783081055,
+ "learning_rate": 3.592185592185592e-05,
+ "loss": 0.3001,
+ "step": 1260
+ },
+ {
+ "epoch": 4.619047619047619,
+ "grad_norm": 39.71099853515625,
+ "learning_rate": 3.5897435897435896e-05,
+ "loss": 0.7756,
+ "step": 1261
+ },
+ {
+ "epoch": 4.622710622710622,
+ "grad_norm": 21.80796241760254,
+ "learning_rate": 3.5873015873015874e-05,
+ "loss": 0.2329,
+ "step": 1262
+ },
+ {
+ "epoch": 4.626373626373626,
+ "grad_norm": 25.909208297729492,
+ "learning_rate": 3.5848595848595846e-05,
+ "loss": 0.5081,
+ "step": 1263
+ },
+ {
+ "epoch": 4.63003663003663,
+ "grad_norm": 46.62733840942383,
+ "learning_rate": 3.5824175824175824e-05,
+ "loss": 0.8265,
+ "step": 1264
+ },
+ {
+ "epoch": 4.633699633699633,
+ "grad_norm": 5.689383506774902,
+ "learning_rate": 3.57997557997558e-05,
+ "loss": 0.055,
+ "step": 1265
+ },
+ {
+ "epoch": 4.637362637362637,
+ "grad_norm": 23.30045509338379,
+ "learning_rate": 3.5775335775335774e-05,
+ "loss": 0.3397,
+ "step": 1266
+ },
+ {
+ "epoch": 4.641025641025641,
+ "grad_norm": 15.685534477233887,
+ "learning_rate": 3.575091575091575e-05,
+ "loss": 0.0862,
+ "step": 1267
+ },
+ {
+ "epoch": 4.644688644688645,
+ "grad_norm": 27.56009864807129,
+ "learning_rate": 3.572649572649573e-05,
+ "loss": 0.4751,
+ "step": 1268
+ },
+ {
+ "epoch": 4.648351648351649,
+ "grad_norm": 18.164905548095703,
+ "learning_rate": 3.57020757020757e-05,
+ "loss": 0.1274,
+ "step": 1269
+ },
+ {
+ "epoch": 4.652014652014652,
+ "grad_norm": 18.178728103637695,
+ "learning_rate": 3.567765567765568e-05,
+ "loss": 0.1246,
+ "step": 1270
+ },
+ {
+ "epoch": 4.655677655677656,
+ "grad_norm": 11.308391571044922,
+ "learning_rate": 3.565323565323565e-05,
+ "loss": 0.0937,
+ "step": 1271
+ },
+ {
+ "epoch": 4.65934065934066,
+ "grad_norm": 38.507469177246094,
+ "learning_rate": 3.5628815628815625e-05,
+ "loss": 0.4616,
+ "step": 1272
+ },
+ {
+ "epoch": 4.663003663003663,
+ "grad_norm": 9.642159461975098,
+ "learning_rate": 3.56043956043956e-05,
+ "loss": 0.0772,
+ "step": 1273
+ },
+ {
+ "epoch": 4.666666666666667,
+ "grad_norm": 31.854310989379883,
+ "learning_rate": 3.557997557997558e-05,
+ "loss": 0.2349,
+ "step": 1274
+ },
+ {
+ "epoch": 4.670329670329671,
+ "grad_norm": 53.341617584228516,
+ "learning_rate": 3.555555555555555e-05,
+ "loss": 0.2926,
+ "step": 1275
+ },
+ {
+ "epoch": 4.673992673992674,
+ "grad_norm": 24.003368377685547,
+ "learning_rate": 3.553113553113553e-05,
+ "loss": 0.1689,
+ "step": 1276
+ },
+ {
+ "epoch": 4.677655677655678,
+ "grad_norm": 12.198409080505371,
+ "learning_rate": 3.550671550671551e-05,
+ "loss": 0.1001,
+ "step": 1277
+ },
+ {
+ "epoch": 4.681318681318682,
+ "grad_norm": 56.559051513671875,
+ "learning_rate": 3.548229548229548e-05,
+ "loss": 0.5314,
+ "step": 1278
+ },
+ {
+ "epoch": 4.684981684981685,
+ "grad_norm": 17.89840316772461,
+ "learning_rate": 3.545787545787546e-05,
+ "loss": 0.1258,
+ "step": 1279
+ },
+ {
+ "epoch": 4.688644688644689,
+ "grad_norm": 14.37424087524414,
+ "learning_rate": 3.543345543345544e-05,
+ "loss": 0.0925,
+ "step": 1280
+ },
+ {
+ "epoch": 4.6923076923076925,
+ "grad_norm": 21.21650505065918,
+ "learning_rate": 3.540903540903541e-05,
+ "loss": 0.1541,
+ "step": 1281
+ },
+ {
+ "epoch": 4.695970695970696,
+ "grad_norm": 36.1934814453125,
+ "learning_rate": 3.538461538461539e-05,
+ "loss": 0.403,
+ "step": 1282
+ },
+ {
+ "epoch": 4.6996336996337,
+ "grad_norm": 62.917022705078125,
+ "learning_rate": 3.536019536019537e-05,
+ "loss": 1.2771,
+ "step": 1283
+ },
+ {
+ "epoch": 4.7032967032967035,
+ "grad_norm": 30.238500595092773,
+ "learning_rate": 3.533577533577533e-05,
+ "loss": 0.3149,
+ "step": 1284
+ },
+ {
+ "epoch": 4.706959706959707,
+ "grad_norm": 12.155022621154785,
+ "learning_rate": 3.531135531135531e-05,
+ "loss": 0.0543,
+ "step": 1285
+ },
+ {
+ "epoch": 4.710622710622711,
+ "grad_norm": 39.67718505859375,
+ "learning_rate": 3.528693528693528e-05,
+ "loss": 0.4201,
+ "step": 1286
+ },
+ {
+ "epoch": 4.714285714285714,
+ "grad_norm": 46.620235443115234,
+ "learning_rate": 3.526251526251526e-05,
+ "loss": 0.7735,
+ "step": 1287
+ },
+ {
+ "epoch": 4.717948717948718,
+ "grad_norm": 29.740169525146484,
+ "learning_rate": 3.523809523809524e-05,
+ "loss": 0.4753,
+ "step": 1288
+ },
+ {
+ "epoch": 4.721611721611722,
+ "grad_norm": 17.668439865112305,
+ "learning_rate": 3.521367521367521e-05,
+ "loss": 0.0738,
+ "step": 1289
+ },
+ {
+ "epoch": 4.725274725274725,
+ "grad_norm": 29.107847213745117,
+ "learning_rate": 3.518925518925519e-05,
+ "loss": 0.2967,
+ "step": 1290
+ },
+ {
+ "epoch": 4.728937728937729,
+ "grad_norm": 41.70953369140625,
+ "learning_rate": 3.516483516483517e-05,
+ "loss": 0.2407,
+ "step": 1291
+ },
+ {
+ "epoch": 4.732600732600733,
+ "grad_norm": 41.50172805786133,
+ "learning_rate": 3.514041514041514e-05,
+ "loss": 0.5012,
+ "step": 1292
+ },
+ {
+ "epoch": 4.736263736263736,
+ "grad_norm": 10.921927452087402,
+ "learning_rate": 3.511599511599512e-05,
+ "loss": 0.0583,
+ "step": 1293
+ },
+ {
+ "epoch": 4.73992673992674,
+ "grad_norm": 10.986832618713379,
+ "learning_rate": 3.5091575091575096e-05,
+ "loss": 0.1684,
+ "step": 1294
+ },
+ {
+ "epoch": 4.743589743589744,
+ "grad_norm": 77.36996459960938,
+ "learning_rate": 3.506715506715507e-05,
+ "loss": 0.1532,
+ "step": 1295
+ },
+ {
+ "epoch": 4.747252747252747,
+ "grad_norm": 2.912205457687378,
+ "learning_rate": 3.5042735042735046e-05,
+ "loss": 0.0178,
+ "step": 1296
+ },
+ {
+ "epoch": 4.750915750915751,
+ "grad_norm": 7.694264888763428,
+ "learning_rate": 3.501831501831502e-05,
+ "loss": 0.0448,
+ "step": 1297
+ },
+ {
+ "epoch": 4.754578754578755,
+ "grad_norm": 59.40597152709961,
+ "learning_rate": 3.499389499389499e-05,
+ "loss": 0.825,
+ "step": 1298
+ },
+ {
+ "epoch": 4.758241758241758,
+ "grad_norm": 44.394065856933594,
+ "learning_rate": 3.496947496947497e-05,
+ "loss": 0.2582,
+ "step": 1299
+ },
+ {
+ "epoch": 4.761904761904762,
+ "grad_norm": 48.07161331176758,
+ "learning_rate": 3.494505494505495e-05,
+ "loss": 0.5681,
+ "step": 1300
+ },
+ {
+ "epoch": 4.7655677655677655,
+ "grad_norm": 47.763275146484375,
+ "learning_rate": 3.492063492063492e-05,
+ "loss": 0.2289,
+ "step": 1301
+ },
+ {
+ "epoch": 4.769230769230769,
+ "grad_norm": 33.30193328857422,
+ "learning_rate": 3.48962148962149e-05,
+ "loss": 0.2646,
+ "step": 1302
+ },
+ {
+ "epoch": 4.772893772893773,
+ "grad_norm": 62.87331008911133,
+ "learning_rate": 3.4871794871794875e-05,
+ "loss": 0.5135,
+ "step": 1303
+ },
+ {
+ "epoch": 4.7765567765567765,
+ "grad_norm": 57.62127685546875,
+ "learning_rate": 3.484737484737485e-05,
+ "loss": 0.6126,
+ "step": 1304
+ },
+ {
+ "epoch": 4.78021978021978,
+ "grad_norm": 35.42237854003906,
+ "learning_rate": 3.4822954822954825e-05,
+ "loss": 0.2312,
+ "step": 1305
+ },
+ {
+ "epoch": 4.783882783882784,
+ "grad_norm": 38.23964309692383,
+ "learning_rate": 3.4798534798534804e-05,
+ "loss": 0.4366,
+ "step": 1306
+ },
+ {
+ "epoch": 4.787545787545787,
+ "grad_norm": 24.94087028503418,
+ "learning_rate": 3.4774114774114776e-05,
+ "loss": 0.2944,
+ "step": 1307
+ },
+ {
+ "epoch": 4.791208791208791,
+ "grad_norm": 43.400047302246094,
+ "learning_rate": 3.4749694749694754e-05,
+ "loss": 0.4749,
+ "step": 1308
+ },
+ {
+ "epoch": 4.794871794871795,
+ "grad_norm": 82.01946258544922,
+ "learning_rate": 3.472527472527473e-05,
+ "loss": 0.6972,
+ "step": 1309
+ },
+ {
+ "epoch": 4.798534798534798,
+ "grad_norm": 25.38723373413086,
+ "learning_rate": 3.47008547008547e-05,
+ "loss": 0.3361,
+ "step": 1310
+ },
+ {
+ "epoch": 4.802197802197802,
+ "grad_norm": 13.022088050842285,
+ "learning_rate": 3.4676434676434676e-05,
+ "loss": 0.1853,
+ "step": 1311
+ },
+ {
+ "epoch": 4.805860805860806,
+ "grad_norm": 30.806135177612305,
+ "learning_rate": 3.465201465201465e-05,
+ "loss": 0.3196,
+ "step": 1312
+ },
+ {
+ "epoch": 4.809523809523809,
+ "grad_norm": 26.30035972595215,
+ "learning_rate": 3.4627594627594626e-05,
+ "loss": 0.2708,
+ "step": 1313
+ },
+ {
+ "epoch": 4.813186813186813,
+ "grad_norm": 6.557223796844482,
+ "learning_rate": 3.4603174603174604e-05,
+ "loss": 0.0815,
+ "step": 1314
+ },
+ {
+ "epoch": 4.816849816849817,
+ "grad_norm": 33.60557174682617,
+ "learning_rate": 3.4578754578754576e-05,
+ "loss": 0.9938,
+ "step": 1315
+ },
+ {
+ "epoch": 4.82051282051282,
+ "grad_norm": 104.2552719116211,
+ "learning_rate": 3.4554334554334555e-05,
+ "loss": 0.1937,
+ "step": 1316
+ },
+ {
+ "epoch": 4.824175824175824,
+ "grad_norm": 41.3105583190918,
+ "learning_rate": 3.452991452991453e-05,
+ "loss": 0.3856,
+ "step": 1317
+ },
+ {
+ "epoch": 4.827838827838828,
+ "grad_norm": 43.52134323120117,
+ "learning_rate": 3.4505494505494505e-05,
+ "loss": 0.4823,
+ "step": 1318
+ },
+ {
+ "epoch": 4.831501831501831,
+ "grad_norm": 29.37596893310547,
+ "learning_rate": 3.448107448107448e-05,
+ "loss": 0.1746,
+ "step": 1319
+ },
+ {
+ "epoch": 4.835164835164835,
+ "grad_norm": 13.94152545928955,
+ "learning_rate": 3.445665445665446e-05,
+ "loss": 0.141,
+ "step": 1320
+ },
+ {
+ "epoch": 4.8388278388278385,
+ "grad_norm": 34.95270538330078,
+ "learning_rate": 3.443223443223443e-05,
+ "loss": 0.2701,
+ "step": 1321
+ },
+ {
+ "epoch": 4.842490842490842,
+ "grad_norm": 64.49109649658203,
+ "learning_rate": 3.440781440781441e-05,
+ "loss": 1.095,
+ "step": 1322
+ },
+ {
+ "epoch": 4.846153846153846,
+ "grad_norm": 61.1287727355957,
+ "learning_rate": 3.4383394383394383e-05,
+ "loss": 0.2083,
+ "step": 1323
+ },
+ {
+ "epoch": 4.8498168498168495,
+ "grad_norm": 62.69855499267578,
+ "learning_rate": 3.4358974358974355e-05,
+ "loss": 0.5077,
+ "step": 1324
+ },
+ {
+ "epoch": 4.853479853479853,
+ "grad_norm": 92.53154754638672,
+ "learning_rate": 3.4334554334554334e-05,
+ "loss": 0.7287,
+ "step": 1325
+ },
+ {
+ "epoch": 4.857142857142857,
+ "grad_norm": 98.1663589477539,
+ "learning_rate": 3.431013431013431e-05,
+ "loss": 1.2834,
+ "step": 1326
+ },
+ {
+ "epoch": 4.860805860805861,
+ "grad_norm": 52.24921417236328,
+ "learning_rate": 3.4285714285714284e-05,
+ "loss": 0.8187,
+ "step": 1327
+ },
+ {
+ "epoch": 4.864468864468865,
+ "grad_norm": 60.897544860839844,
+ "learning_rate": 3.426129426129426e-05,
+ "loss": 1.5861,
+ "step": 1328
+ },
+ {
+ "epoch": 4.868131868131869,
+ "grad_norm": 21.70830535888672,
+ "learning_rate": 3.423687423687424e-05,
+ "loss": 0.1459,
+ "step": 1329
+ },
+ {
+ "epoch": 4.871794871794872,
+ "grad_norm": 47.87598419189453,
+ "learning_rate": 3.421245421245421e-05,
+ "loss": 1.0044,
+ "step": 1330
+ },
+ {
+ "epoch": 4.875457875457876,
+ "grad_norm": 172.73670959472656,
+ "learning_rate": 3.418803418803419e-05,
+ "loss": 1.4617,
+ "step": 1331
+ },
+ {
+ "epoch": 4.8791208791208796,
+ "grad_norm": 154.93960571289062,
+ "learning_rate": 3.416361416361417e-05,
+ "loss": 1.7488,
+ "step": 1332
+ },
+ {
+ "epoch": 4.882783882783883,
+ "grad_norm": 73.78408813476562,
+ "learning_rate": 3.413919413919414e-05,
+ "loss": 0.5789,
+ "step": 1333
+ },
+ {
+ "epoch": 4.886446886446887,
+ "grad_norm": 35.67369079589844,
+ "learning_rate": 3.411477411477412e-05,
+ "loss": 0.6101,
+ "step": 1334
+ },
+ {
+ "epoch": 4.8901098901098905,
+ "grad_norm": 54.61326599121094,
+ "learning_rate": 3.40903540903541e-05,
+ "loss": 0.7433,
+ "step": 1335
+ },
+ {
+ "epoch": 4.893772893772894,
+ "grad_norm": 28.492923736572266,
+ "learning_rate": 3.406593406593406e-05,
+ "loss": 0.7661,
+ "step": 1336
+ },
+ {
+ "epoch": 4.897435897435898,
+ "grad_norm": 17.2525634765625,
+ "learning_rate": 3.404151404151404e-05,
+ "loss": 0.2423,
+ "step": 1337
+ },
+ {
+ "epoch": 4.9010989010989015,
+ "grad_norm": 55.46605682373047,
+ "learning_rate": 3.401709401709401e-05,
+ "loss": 0.4419,
+ "step": 1338
+ },
+ {
+ "epoch": 4.904761904761905,
+ "grad_norm": 23.03455352783203,
+ "learning_rate": 3.399267399267399e-05,
+ "loss": 0.3046,
+ "step": 1339
+ },
+ {
+ "epoch": 4.908424908424909,
+ "grad_norm": 20.186574935913086,
+ "learning_rate": 3.396825396825397e-05,
+ "loss": 0.3712,
+ "step": 1340
+ },
+ {
+ "epoch": 4.912087912087912,
+ "grad_norm": 22.702407836914062,
+ "learning_rate": 3.394383394383394e-05,
+ "loss": 0.4481,
+ "step": 1341
+ },
+ {
+ "epoch": 4.915750915750916,
+ "grad_norm": 25.723426818847656,
+ "learning_rate": 3.391941391941392e-05,
+ "loss": 0.1832,
+ "step": 1342
+ },
+ {
+ "epoch": 4.91941391941392,
+ "grad_norm": 18.955692291259766,
+ "learning_rate": 3.38949938949939e-05,
+ "loss": 0.1334,
+ "step": 1343
+ },
+ {
+ "epoch": 4.923076923076923,
+ "grad_norm": 20.29511833190918,
+ "learning_rate": 3.387057387057387e-05,
+ "loss": 0.1811,
+ "step": 1344
+ },
+ {
+ "epoch": 4.926739926739927,
+ "grad_norm": 22.23061752319336,
+ "learning_rate": 3.384615384615385e-05,
+ "loss": 0.2643,
+ "step": 1345
+ },
+ {
+ "epoch": 4.930402930402931,
+ "grad_norm": 52.057132720947266,
+ "learning_rate": 3.382173382173383e-05,
+ "loss": 0.5874,
+ "step": 1346
+ },
+ {
+ "epoch": 4.934065934065934,
+ "grad_norm": 66.5381851196289,
+ "learning_rate": 3.37973137973138e-05,
+ "loss": 0.4993,
+ "step": 1347
+ },
+ {
+ "epoch": 4.937728937728938,
+ "grad_norm": 8.25474739074707,
+ "learning_rate": 3.377289377289378e-05,
+ "loss": 0.0263,
+ "step": 1348
+ },
+ {
+ "epoch": 4.941391941391942,
+ "grad_norm": 31.373722076416016,
+ "learning_rate": 3.374847374847375e-05,
+ "loss": 0.288,
+ "step": 1349
+ },
+ {
+ "epoch": 4.945054945054945,
+ "grad_norm": 51.15471267700195,
+ "learning_rate": 3.372405372405372e-05,
+ "loss": 0.7586,
+ "step": 1350
+ },
+ {
+ "epoch": 4.948717948717949,
+ "grad_norm": 39.163639068603516,
+ "learning_rate": 3.36996336996337e-05,
+ "loss": 1.221,
+ "step": 1351
+ },
+ {
+ "epoch": 4.9523809523809526,
+ "grad_norm": 11.033390998840332,
+ "learning_rate": 3.367521367521368e-05,
+ "loss": 0.069,
+ "step": 1352
+ },
+ {
+ "epoch": 4.956043956043956,
+ "grad_norm": 24.14516830444336,
+ "learning_rate": 3.365079365079365e-05,
+ "loss": 0.6001,
+ "step": 1353
+ },
+ {
+ "epoch": 4.95970695970696,
+ "grad_norm": 36.211891174316406,
+ "learning_rate": 3.362637362637363e-05,
+ "loss": 0.5598,
+ "step": 1354
+ },
+ {
+ "epoch": 4.9633699633699635,
+ "grad_norm": 23.723434448242188,
+ "learning_rate": 3.3601953601953606e-05,
+ "loss": 0.3133,
+ "step": 1355
+ },
+ {
+ "epoch": 4.967032967032967,
+ "grad_norm": 21.853551864624023,
+ "learning_rate": 3.357753357753358e-05,
+ "loss": 0.1974,
+ "step": 1356
+ },
+ {
+ "epoch": 4.970695970695971,
+ "grad_norm": 25.392358779907227,
+ "learning_rate": 3.3553113553113556e-05,
+ "loss": 0.5114,
+ "step": 1357
+ },
+ {
+ "epoch": 4.9743589743589745,
+ "grad_norm": 94.81107330322266,
+ "learning_rate": 3.3528693528693534e-05,
+ "loss": 0.4609,
+ "step": 1358
+ },
+ {
+ "epoch": 4.978021978021978,
+ "grad_norm": 24.487186431884766,
+ "learning_rate": 3.3504273504273506e-05,
+ "loss": 0.6613,
+ "step": 1359
+ },
+ {
+ "epoch": 4.981684981684982,
+ "grad_norm": 18.870473861694336,
+ "learning_rate": 3.3479853479853485e-05,
+ "loss": 0.1229,
+ "step": 1360
+ },
+ {
+ "epoch": 4.985347985347985,
+ "grad_norm": 17.630233764648438,
+ "learning_rate": 3.3455433455433456e-05,
+ "loss": 0.1836,
+ "step": 1361
+ },
+ {
+ "epoch": 4.989010989010989,
+ "grad_norm": 24.850299835205078,
+ "learning_rate": 3.343101343101343e-05,
+ "loss": 0.4499,
+ "step": 1362
+ },
+ {
+ "epoch": 4.992673992673993,
+ "grad_norm": 13.472710609436035,
+ "learning_rate": 3.3406593406593406e-05,
+ "loss": 0.2,
+ "step": 1363
+ },
+ {
+ "epoch": 4.996336996336996,
+ "grad_norm": 25.112987518310547,
+ "learning_rate": 3.338217338217338e-05,
+ "loss": 0.2978,
+ "step": 1364
+ },
+ {
+ "epoch": 5.0,
+ "grad_norm": 20.6419620513916,
+ "learning_rate": 3.3357753357753356e-05,
+ "loss": 0.1711,
+ "step": 1365
+ },
+ {
+ "epoch": 5.003663003663004,
+ "grad_norm": 20.868810653686523,
+ "learning_rate": 3.3333333333333335e-05,
+ "loss": 0.1433,
+ "step": 1366
+ },
+ {
+ "epoch": 5.007326007326007,
+ "grad_norm": 15.846084594726562,
+ "learning_rate": 3.3308913308913307e-05,
+ "loss": 0.2174,
+ "step": 1367
+ },
+ {
+ "epoch": 5.010989010989011,
+ "grad_norm": 29.00075912475586,
+ "learning_rate": 3.3284493284493285e-05,
+ "loss": 0.5032,
+ "step": 1368
+ },
+ {
+ "epoch": 5.014652014652015,
+ "grad_norm": 33.520896911621094,
+ "learning_rate": 3.3260073260073264e-05,
+ "loss": 0.4061,
+ "step": 1369
+ },
+ {
+ "epoch": 5.018315018315018,
+ "grad_norm": 12.909339904785156,
+ "learning_rate": 3.3235653235653235e-05,
+ "loss": 0.0953,
+ "step": 1370
+ },
+ {
+ "epoch": 5.021978021978022,
+ "grad_norm": 0.2602078318595886,
+ "learning_rate": 3.3211233211233214e-05,
+ "loss": 0.0012,
+ "step": 1371
+ },
+ {
+ "epoch": 5.0256410256410255,
+ "grad_norm": 38.391422271728516,
+ "learning_rate": 3.318681318681319e-05,
+ "loss": 0.1825,
+ "step": 1372
+ },
+ {
+ "epoch": 5.029304029304029,
+ "grad_norm": 70.76541900634766,
+ "learning_rate": 3.3162393162393164e-05,
+ "loss": 0.846,
+ "step": 1373
+ },
+ {
+ "epoch": 5.032967032967033,
+ "grad_norm": 17.12116813659668,
+ "learning_rate": 3.3137973137973135e-05,
+ "loss": 0.0827,
+ "step": 1374
+ },
+ {
+ "epoch": 5.0366300366300365,
+ "grad_norm": 10.847224235534668,
+ "learning_rate": 3.3113553113553114e-05,
+ "loss": 0.0598,
+ "step": 1375
+ },
+ {
+ "epoch": 5.04029304029304,
+ "grad_norm": 31.552082061767578,
+ "learning_rate": 3.3089133089133086e-05,
+ "loss": 0.4466,
+ "step": 1376
+ },
+ {
+ "epoch": 5.043956043956044,
+ "grad_norm": 15.32805061340332,
+ "learning_rate": 3.3064713064713064e-05,
+ "loss": 0.0502,
+ "step": 1377
+ },
+ {
+ "epoch": 5.0476190476190474,
+ "grad_norm": 80.18537139892578,
+ "learning_rate": 3.304029304029304e-05,
+ "loss": 0.7377,
+ "step": 1378
+ },
+ {
+ "epoch": 5.051282051282051,
+ "grad_norm": 11.73173713684082,
+ "learning_rate": 3.3015873015873014e-05,
+ "loss": 0.1129,
+ "step": 1379
+ },
+ {
+ "epoch": 5.054945054945055,
+ "grad_norm": 46.249935150146484,
+ "learning_rate": 3.299145299145299e-05,
+ "loss": 0.5367,
+ "step": 1380
+ },
+ {
+ "epoch": 5.058608058608058,
+ "grad_norm": 9.185178756713867,
+ "learning_rate": 3.296703296703297e-05,
+ "loss": 0.0453,
+ "step": 1381
+ },
+ {
+ "epoch": 5.062271062271062,
+ "grad_norm": 40.83237838745117,
+ "learning_rate": 3.294261294261294e-05,
+ "loss": 0.1428,
+ "step": 1382
+ },
+ {
+ "epoch": 5.065934065934066,
+ "grad_norm": 32.31568908691406,
+ "learning_rate": 3.291819291819292e-05,
+ "loss": 0.3131,
+ "step": 1383
+ },
+ {
+ "epoch": 5.069597069597069,
+ "grad_norm": 5.372808456420898,
+ "learning_rate": 3.28937728937729e-05,
+ "loss": 0.0452,
+ "step": 1384
+ },
+ {
+ "epoch": 5.073260073260073,
+ "grad_norm": 3.0900495052337646,
+ "learning_rate": 3.286935286935287e-05,
+ "loss": 0.0175,
+ "step": 1385
+ },
+ {
+ "epoch": 5.076923076923077,
+ "grad_norm": 25.293724060058594,
+ "learning_rate": 3.284493284493285e-05,
+ "loss": 0.2162,
+ "step": 1386
+ },
+ {
+ "epoch": 5.08058608058608,
+ "grad_norm": 26.231664657592773,
+ "learning_rate": 3.282051282051282e-05,
+ "loss": 0.1764,
+ "step": 1387
+ },
+ {
+ "epoch": 5.084249084249084,
+ "grad_norm": 24.69008445739746,
+ "learning_rate": 3.279609279609279e-05,
+ "loss": 0.1019,
+ "step": 1388
+ },
+ {
+ "epoch": 5.087912087912088,
+ "grad_norm": 12.522343635559082,
+ "learning_rate": 3.277167277167277e-05,
+ "loss": 0.0424,
+ "step": 1389
+ },
+ {
+ "epoch": 5.091575091575091,
+ "grad_norm": 28.68439292907715,
+ "learning_rate": 3.274725274725274e-05,
+ "loss": 0.3441,
+ "step": 1390
+ },
+ {
+ "epoch": 5.095238095238095,
+ "grad_norm": 9.312751770019531,
+ "learning_rate": 3.272283272283272e-05,
+ "loss": 0.0675,
+ "step": 1391
+ },
+ {
+ "epoch": 5.0989010989010985,
+ "grad_norm": 12.041552543640137,
+ "learning_rate": 3.26984126984127e-05,
+ "loss": 0.049,
+ "step": 1392
+ },
+ {
+ "epoch": 5.102564102564102,
+ "grad_norm": 36.706031799316406,
+ "learning_rate": 3.267399267399267e-05,
+ "loss": 0.2947,
+ "step": 1393
+ },
+ {
+ "epoch": 5.106227106227106,
+ "grad_norm": 0.5009213089942932,
+ "learning_rate": 3.264957264957265e-05,
+ "loss": 0.0028,
+ "step": 1394
+ },
+ {
+ "epoch": 5.1098901098901095,
+ "grad_norm": 53.88454818725586,
+ "learning_rate": 3.262515262515263e-05,
+ "loss": 0.5004,
+ "step": 1395
+ },
+ {
+ "epoch": 5.113553113553113,
+ "grad_norm": 11.917198181152344,
+ "learning_rate": 3.26007326007326e-05,
+ "loss": 0.0734,
+ "step": 1396
+ },
+ {
+ "epoch": 5.117216117216117,
+ "grad_norm": 58.02888107299805,
+ "learning_rate": 3.257631257631258e-05,
+ "loss": 0.7099,
+ "step": 1397
+ },
+ {
+ "epoch": 5.1208791208791204,
+ "grad_norm": 18.3216609954834,
+ "learning_rate": 3.255189255189256e-05,
+ "loss": 0.1162,
+ "step": 1398
+ },
+ {
+ "epoch": 5.124542124542124,
+ "grad_norm": 7.598775863647461,
+ "learning_rate": 3.252747252747253e-05,
+ "loss": 0.0341,
+ "step": 1399
+ },
+ {
+ "epoch": 5.128205128205128,
+ "grad_norm": 199.40313720703125,
+ "learning_rate": 3.25030525030525e-05,
+ "loss": 0.3829,
+ "step": 1400
+ },
+ {
+ "epoch": 5.131868131868132,
+ "grad_norm": 6.528984546661377,
+ "learning_rate": 3.247863247863248e-05,
+ "loss": 0.041,
+ "step": 1401
+ },
+ {
+ "epoch": 5.135531135531136,
+ "grad_norm": 28.80277442932129,
+ "learning_rate": 3.245421245421245e-05,
+ "loss": 0.3511,
+ "step": 1402
+ },
+ {
+ "epoch": 5.13919413919414,
+ "grad_norm": 5.08656120300293,
+ "learning_rate": 3.242979242979243e-05,
+ "loss": 0.0403,
+ "step": 1403
+ },
+ {
+ "epoch": 5.142857142857143,
+ "grad_norm": 16.86358070373535,
+ "learning_rate": 3.240537240537241e-05,
+ "loss": 0.1676,
+ "step": 1404
+ },
+ {
+ "epoch": 5.146520146520147,
+ "grad_norm": 46.099613189697266,
+ "learning_rate": 3.238095238095238e-05,
+ "loss": 0.8096,
+ "step": 1405
+ },
+ {
+ "epoch": 5.1501831501831505,
+ "grad_norm": 26.01686668395996,
+ "learning_rate": 3.235653235653236e-05,
+ "loss": 0.1283,
+ "step": 1406
+ },
+ {
+ "epoch": 5.153846153846154,
+ "grad_norm": 4.826385498046875,
+ "learning_rate": 3.2332112332112336e-05,
+ "loss": 0.0328,
+ "step": 1407
+ },
+ {
+ "epoch": 5.157509157509158,
+ "grad_norm": 34.697593688964844,
+ "learning_rate": 3.230769230769231e-05,
+ "loss": 0.1306,
+ "step": 1408
+ },
+ {
+ "epoch": 5.1611721611721615,
+ "grad_norm": 21.331661224365234,
+ "learning_rate": 3.2283272283272286e-05,
+ "loss": 0.1302,
+ "step": 1409
+ },
+ {
+ "epoch": 5.164835164835165,
+ "grad_norm": 9.991851806640625,
+ "learning_rate": 3.2258852258852265e-05,
+ "loss": 0.0441,
+ "step": 1410
+ },
+ {
+ "epoch": 5.168498168498169,
+ "grad_norm": 26.641136169433594,
+ "learning_rate": 3.2234432234432237e-05,
+ "loss": 0.0894,
+ "step": 1411
+ },
+ {
+ "epoch": 5.172161172161172,
+ "grad_norm": 24.541366577148438,
+ "learning_rate": 3.2210012210012215e-05,
+ "loss": 0.1026,
+ "step": 1412
+ },
+ {
+ "epoch": 5.175824175824176,
+ "grad_norm": 44.62923049926758,
+ "learning_rate": 3.218559218559218e-05,
+ "loss": 0.1887,
+ "step": 1413
+ },
+ {
+ "epoch": 5.17948717948718,
+ "grad_norm": 19.28236198425293,
+ "learning_rate": 3.216117216117216e-05,
+ "loss": 0.0631,
+ "step": 1414
+ },
+ {
+ "epoch": 5.183150183150183,
+ "grad_norm": 10.39486026763916,
+ "learning_rate": 3.213675213675214e-05,
+ "loss": 0.0614,
+ "step": 1415
+ },
+ {
+ "epoch": 5.186813186813187,
+ "grad_norm": 32.476009368896484,
+ "learning_rate": 3.211233211233211e-05,
+ "loss": 0.2238,
+ "step": 1416
+ },
+ {
+ "epoch": 5.190476190476191,
+ "grad_norm": 9.828605651855469,
+ "learning_rate": 3.208791208791209e-05,
+ "loss": 0.0589,
+ "step": 1417
+ },
+ {
+ "epoch": 5.194139194139194,
+ "grad_norm": 50.0748291015625,
+ "learning_rate": 3.2063492063492065e-05,
+ "loss": 0.8225,
+ "step": 1418
+ },
+ {
+ "epoch": 5.197802197802198,
+ "grad_norm": 31.925779342651367,
+ "learning_rate": 3.203907203907204e-05,
+ "loss": 0.1824,
+ "step": 1419
+ },
+ {
+ "epoch": 5.201465201465202,
+ "grad_norm": 108.24534606933594,
+ "learning_rate": 3.2014652014652016e-05,
+ "loss": 2.3808,
+ "step": 1420
+ },
+ {
+ "epoch": 5.205128205128205,
+ "grad_norm": 54.39910888671875,
+ "learning_rate": 3.1990231990231994e-05,
+ "loss": 0.614,
+ "step": 1421
+ },
+ {
+ "epoch": 5.208791208791209,
+ "grad_norm": 13.70672607421875,
+ "learning_rate": 3.1965811965811966e-05,
+ "loss": 0.0366,
+ "step": 1422
+ },
+ {
+ "epoch": 5.212454212454213,
+ "grad_norm": 19.851043701171875,
+ "learning_rate": 3.1941391941391944e-05,
+ "loss": 0.1847,
+ "step": 1423
+ },
+ {
+ "epoch": 5.216117216117216,
+ "grad_norm": 1.041467547416687,
+ "learning_rate": 3.191697191697192e-05,
+ "loss": 0.0062,
+ "step": 1424
+ },
+ {
+ "epoch": 5.21978021978022,
+ "grad_norm": 10.629105567932129,
+ "learning_rate": 3.1892551892551894e-05,
+ "loss": 0.1058,
+ "step": 1425
+ },
+ {
+ "epoch": 5.2234432234432235,
+ "grad_norm": 25.597496032714844,
+ "learning_rate": 3.1868131868131866e-05,
+ "loss": 0.1786,
+ "step": 1426
+ },
+ {
+ "epoch": 5.227106227106227,
+ "grad_norm": 21.409902572631836,
+ "learning_rate": 3.1843711843711844e-05,
+ "loss": 0.1354,
+ "step": 1427
+ },
+ {
+ "epoch": 5.230769230769231,
+ "grad_norm": 252.64599609375,
+ "learning_rate": 3.1819291819291816e-05,
+ "loss": 0.476,
+ "step": 1428
+ },
+ {
+ "epoch": 5.2344322344322345,
+ "grad_norm": 22.15670394897461,
+ "learning_rate": 3.1794871794871795e-05,
+ "loss": 0.2111,
+ "step": 1429
+ },
+ {
+ "epoch": 5.238095238095238,
+ "grad_norm": 37.93739700317383,
+ "learning_rate": 3.177045177045177e-05,
+ "loss": 0.391,
+ "step": 1430
+ },
+ {
+ "epoch": 5.241758241758242,
+ "grad_norm": 25.364606857299805,
+ "learning_rate": 3.1746031746031745e-05,
+ "loss": 0.3365,
+ "step": 1431
+ },
+ {
+ "epoch": 5.245421245421245,
+ "grad_norm": 20.658681869506836,
+ "learning_rate": 3.172161172161172e-05,
+ "loss": 0.2419,
+ "step": 1432
+ },
+ {
+ "epoch": 5.249084249084249,
+ "grad_norm": 11.507100105285645,
+ "learning_rate": 3.16971916971917e-05,
+ "loss": 0.074,
+ "step": 1433
+ },
+ {
+ "epoch": 5.252747252747253,
+ "grad_norm": 32.7891845703125,
+ "learning_rate": 3.167277167277167e-05,
+ "loss": 0.261,
+ "step": 1434
+ },
+ {
+ "epoch": 5.256410256410256,
+ "grad_norm": 10.153932571411133,
+ "learning_rate": 3.164835164835165e-05,
+ "loss": 0.0317,
+ "step": 1435
+ },
+ {
+ "epoch": 5.26007326007326,
+ "grad_norm": 16.879608154296875,
+ "learning_rate": 3.162393162393163e-05,
+ "loss": 0.0668,
+ "step": 1436
+ },
+ {
+ "epoch": 5.263736263736264,
+ "grad_norm": 5.040280818939209,
+ "learning_rate": 3.15995115995116e-05,
+ "loss": 0.0197,
+ "step": 1437
+ },
+ {
+ "epoch": 5.267399267399267,
+ "grad_norm": 32.5413818359375,
+ "learning_rate": 3.157509157509158e-05,
+ "loss": 0.2659,
+ "step": 1438
+ },
+ {
+ "epoch": 5.271062271062271,
+ "grad_norm": 54.41200637817383,
+ "learning_rate": 3.1550671550671545e-05,
+ "loss": 0.6863,
+ "step": 1439
+ },
+ {
+ "epoch": 5.274725274725275,
+ "grad_norm": 13.049643516540527,
+ "learning_rate": 3.1526251526251524e-05,
+ "loss": 0.0808,
+ "step": 1440
+ },
+ {
+ "epoch": 5.278388278388278,
+ "grad_norm": 37.76680374145508,
+ "learning_rate": 3.15018315018315e-05,
+ "loss": 0.2917,
+ "step": 1441
+ },
+ {
+ "epoch": 5.282051282051282,
+ "grad_norm": 22.97549057006836,
+ "learning_rate": 3.1477411477411474e-05,
+ "loss": 0.1115,
+ "step": 1442
+ },
+ {
+ "epoch": 5.285714285714286,
+ "grad_norm": 36.935115814208984,
+ "learning_rate": 3.145299145299145e-05,
+ "loss": 0.3719,
+ "step": 1443
+ },
+ {
+ "epoch": 5.289377289377289,
+ "grad_norm": 50.726070404052734,
+ "learning_rate": 3.142857142857143e-05,
+ "loss": 0.2635,
+ "step": 1444
+ },
+ {
+ "epoch": 5.293040293040293,
+ "grad_norm": 31.919862747192383,
+ "learning_rate": 3.14041514041514e-05,
+ "loss": 0.2158,
+ "step": 1445
+ },
+ {
+ "epoch": 5.2967032967032965,
+ "grad_norm": 2.463076114654541,
+ "learning_rate": 3.137973137973138e-05,
+ "loss": 0.0125,
+ "step": 1446
+ },
+ {
+ "epoch": 5.3003663003663,
+ "grad_norm": 12.970477104187012,
+ "learning_rate": 3.135531135531136e-05,
+ "loss": 0.0701,
+ "step": 1447
+ },
+ {
+ "epoch": 5.304029304029304,
+ "grad_norm": 30.649160385131836,
+ "learning_rate": 3.133089133089133e-05,
+ "loss": 0.3443,
+ "step": 1448
+ },
+ {
+ "epoch": 5.3076923076923075,
+ "grad_norm": 50.362281799316406,
+ "learning_rate": 3.130647130647131e-05,
+ "loss": 0.2792,
+ "step": 1449
+ },
+ {
+ "epoch": 5.311355311355311,
+ "grad_norm": 25.041845321655273,
+ "learning_rate": 3.128205128205129e-05,
+ "loss": 0.2127,
+ "step": 1450
+ },
+ {
+ "epoch": 5.315018315018315,
+ "grad_norm": 44.749515533447266,
+ "learning_rate": 3.125763125763126e-05,
+ "loss": 0.5353,
+ "step": 1451
+ },
+ {
+ "epoch": 5.318681318681318,
+ "grad_norm": 66.30032348632812,
+ "learning_rate": 3.123321123321123e-05,
+ "loss": 0.5775,
+ "step": 1452
+ },
+ {
+ "epoch": 5.322344322344322,
+ "grad_norm": 3.905022382736206,
+ "learning_rate": 3.120879120879121e-05,
+ "loss": 0.0229,
+ "step": 1453
+ },
+ {
+ "epoch": 5.326007326007326,
+ "grad_norm": 50.520259857177734,
+ "learning_rate": 3.118437118437118e-05,
+ "loss": 0.6539,
+ "step": 1454
+ },
+ {
+ "epoch": 5.329670329670329,
+ "grad_norm": 12.567275047302246,
+ "learning_rate": 3.115995115995116e-05,
+ "loss": 0.0493,
+ "step": 1455
+ },
+ {
+ "epoch": 5.333333333333333,
+ "grad_norm": 24.11554718017578,
+ "learning_rate": 3.113553113553114e-05,
+ "loss": 0.401,
+ "step": 1456
+ },
+ {
+ "epoch": 5.336996336996337,
+ "grad_norm": 6.885409832000732,
+ "learning_rate": 3.111111111111111e-05,
+ "loss": 0.022,
+ "step": 1457
+ },
+ {
+ "epoch": 5.34065934065934,
+ "grad_norm": 30.46776008605957,
+ "learning_rate": 3.108669108669109e-05,
+ "loss": 0.1968,
+ "step": 1458
+ },
+ {
+ "epoch": 5.344322344322344,
+ "grad_norm": 54.408790588378906,
+ "learning_rate": 3.106227106227107e-05,
+ "loss": 0.3258,
+ "step": 1459
+ },
+ {
+ "epoch": 5.347985347985348,
+ "grad_norm": 43.48060989379883,
+ "learning_rate": 3.103785103785104e-05,
+ "loss": 0.2663,
+ "step": 1460
+ },
+ {
+ "epoch": 5.351648351648351,
+ "grad_norm": 34.339962005615234,
+ "learning_rate": 3.101343101343102e-05,
+ "loss": 0.3313,
+ "step": 1461
+ },
+ {
+ "epoch": 5.355311355311355,
+ "grad_norm": 35.54948806762695,
+ "learning_rate": 3.0989010989010995e-05,
+ "loss": 0.2377,
+ "step": 1462
+ },
+ {
+ "epoch": 5.358974358974359,
+ "grad_norm": 30.956071853637695,
+ "learning_rate": 3.096459096459097e-05,
+ "loss": 0.2388,
+ "step": 1463
+ },
+ {
+ "epoch": 5.362637362637362,
+ "grad_norm": 15.458950996398926,
+ "learning_rate": 3.094017094017094e-05,
+ "loss": 0.1196,
+ "step": 1464
+ },
+ {
+ "epoch": 5.366300366300366,
+ "grad_norm": 56.893463134765625,
+ "learning_rate": 3.091575091575091e-05,
+ "loss": 0.5377,
+ "step": 1465
+ },
+ {
+ "epoch": 5.36996336996337,
+ "grad_norm": 31.90789794921875,
+ "learning_rate": 3.089133089133089e-05,
+ "loss": 0.5008,
+ "step": 1466
+ },
+ {
+ "epoch": 5.373626373626374,
+ "grad_norm": 18.772607803344727,
+ "learning_rate": 3.086691086691087e-05,
+ "loss": 0.1838,
+ "step": 1467
+ },
+ {
+ "epoch": 5.377289377289378,
+ "grad_norm": 1.7131195068359375,
+ "learning_rate": 3.084249084249084e-05,
+ "loss": 0.0055,
+ "step": 1468
+ },
+ {
+ "epoch": 5.380952380952381,
+ "grad_norm": 6.398471355438232,
+ "learning_rate": 3.081807081807082e-05,
+ "loss": 0.0309,
+ "step": 1469
+ },
+ {
+ "epoch": 5.384615384615385,
+ "grad_norm": 13.847221374511719,
+ "learning_rate": 3.0793650793650796e-05,
+ "loss": 0.0785,
+ "step": 1470
+ },
+ {
+ "epoch": 5.388278388278389,
+ "grad_norm": 46.000179290771484,
+ "learning_rate": 3.076923076923077e-05,
+ "loss": 0.4114,
+ "step": 1471
+ },
+ {
+ "epoch": 5.391941391941392,
+ "grad_norm": 39.47720718383789,
+ "learning_rate": 3.0744810744810746e-05,
+ "loss": 0.9189,
+ "step": 1472
+ },
+ {
+ "epoch": 5.395604395604396,
+ "grad_norm": 30.588356018066406,
+ "learning_rate": 3.0720390720390724e-05,
+ "loss": 0.372,
+ "step": 1473
+ },
+ {
+ "epoch": 5.3992673992674,
+ "grad_norm": 83.61669921875,
+ "learning_rate": 3.0695970695970696e-05,
+ "loss": 0.6729,
+ "step": 1474
+ },
+ {
+ "epoch": 5.402930402930403,
+ "grad_norm": 14.384758949279785,
+ "learning_rate": 3.0671550671550675e-05,
+ "loss": 0.0825,
+ "step": 1475
+ },
+ {
+ "epoch": 5.406593406593407,
+ "grad_norm": 41.9291877746582,
+ "learning_rate": 3.064713064713065e-05,
+ "loss": 0.2128,
+ "step": 1476
+ },
+ {
+ "epoch": 5.410256410256411,
+ "grad_norm": 31.03643035888672,
+ "learning_rate": 3.062271062271062e-05,
+ "loss": 0.6978,
+ "step": 1477
+ },
+ {
+ "epoch": 5.413919413919414,
+ "grad_norm": 43.225547790527344,
+ "learning_rate": 3.0598290598290596e-05,
+ "loss": 0.6546,
+ "step": 1478
+ },
+ {
+ "epoch": 5.417582417582418,
+ "grad_norm": 37.172611236572266,
+ "learning_rate": 3.0573870573870575e-05,
+ "loss": 0.5024,
+ "step": 1479
+ },
+ {
+ "epoch": 5.4212454212454215,
+ "grad_norm": 52.93882369995117,
+ "learning_rate": 3.0549450549450547e-05,
+ "loss": 0.9954,
+ "step": 1480
+ },
+ {
+ "epoch": 5.424908424908425,
+ "grad_norm": 30.838403701782227,
+ "learning_rate": 3.0525030525030525e-05,
+ "loss": 0.2539,
+ "step": 1481
+ },
+ {
+ "epoch": 5.428571428571429,
+ "grad_norm": 8.876139640808105,
+ "learning_rate": 3.0500610500610503e-05,
+ "loss": 0.0635,
+ "step": 1482
+ },
+ {
+ "epoch": 5.4322344322344325,
+ "grad_norm": 14.970293998718262,
+ "learning_rate": 3.0476190476190475e-05,
+ "loss": 0.1337,
+ "step": 1483
+ },
+ {
+ "epoch": 5.435897435897436,
+ "grad_norm": 29.44560432434082,
+ "learning_rate": 3.0451770451770454e-05,
+ "loss": 0.3719,
+ "step": 1484
+ },
+ {
+ "epoch": 5.43956043956044,
+ "grad_norm": 3.793294668197632,
+ "learning_rate": 3.0427350427350432e-05,
+ "loss": 0.0278,
+ "step": 1485
+ },
+ {
+ "epoch": 5.443223443223443,
+ "grad_norm": 37.418731689453125,
+ "learning_rate": 3.0402930402930404e-05,
+ "loss": 0.5153,
+ "step": 1486
+ },
+ {
+ "epoch": 5.446886446886447,
+ "grad_norm": 26.718324661254883,
+ "learning_rate": 3.037851037851038e-05,
+ "loss": 0.388,
+ "step": 1487
+ },
+ {
+ "epoch": 5.450549450549451,
+ "grad_norm": 28.463197708129883,
+ "learning_rate": 3.0354090354090357e-05,
+ "loss": 0.1956,
+ "step": 1488
+ },
+ {
+ "epoch": 5.454212454212454,
+ "grad_norm": 45.390602111816406,
+ "learning_rate": 3.032967032967033e-05,
+ "loss": 0.3694,
+ "step": 1489
+ },
+ {
+ "epoch": 5.457875457875458,
+ "grad_norm": 33.20753860473633,
+ "learning_rate": 3.0305250305250307e-05,
+ "loss": 0.2946,
+ "step": 1490
+ },
+ {
+ "epoch": 5.461538461538462,
+ "grad_norm": 66.42272186279297,
+ "learning_rate": 3.028083028083028e-05,
+ "loss": 0.9082,
+ "step": 1491
+ },
+ {
+ "epoch": 5.465201465201465,
+ "grad_norm": 33.85127258300781,
+ "learning_rate": 3.0256410256410257e-05,
+ "loss": 0.2362,
+ "step": 1492
+ },
+ {
+ "epoch": 5.468864468864469,
+ "grad_norm": 51.019256591796875,
+ "learning_rate": 3.0231990231990233e-05,
+ "loss": 0.5446,
+ "step": 1493
+ },
+ {
+ "epoch": 5.472527472527473,
+ "grad_norm": 30.998769760131836,
+ "learning_rate": 3.0207570207570204e-05,
+ "loss": 0.4739,
+ "step": 1494
+ },
+ {
+ "epoch": 5.476190476190476,
+ "grad_norm": 44.187957763671875,
+ "learning_rate": 3.0183150183150183e-05,
+ "loss": 0.3439,
+ "step": 1495
+ },
+ {
+ "epoch": 5.47985347985348,
+ "grad_norm": 50.70987319946289,
+ "learning_rate": 3.015873015873016e-05,
+ "loss": 0.1625,
+ "step": 1496
+ },
+ {
+ "epoch": 5.483516483516484,
+ "grad_norm": 33.66750717163086,
+ "learning_rate": 3.0134310134310133e-05,
+ "loss": 0.1927,
+ "step": 1497
+ },
+ {
+ "epoch": 5.487179487179487,
+ "grad_norm": 41.02281951904297,
+ "learning_rate": 3.010989010989011e-05,
+ "loss": 0.4102,
+ "step": 1498
+ },
+ {
+ "epoch": 5.490842490842491,
+ "grad_norm": 10.570262908935547,
+ "learning_rate": 3.008547008547009e-05,
+ "loss": 0.0664,
+ "step": 1499
+ },
+ {
+ "epoch": 5.4945054945054945,
+ "grad_norm": 54.08304214477539,
+ "learning_rate": 3.0061050061050058e-05,
+ "loss": 0.9224,
+ "step": 1500
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 2730,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 10,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 0.0,
+ "train_batch_size": 4,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-1500/training_args.bin b/checkpoint-1500/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..efd73451f8808ee6551f09598ece18ffd5afe9a8
--- /dev/null
+++ b/checkpoint-1500/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9433d412d81580f751a4a8cdb904f13acd11bf72c98d8dd9b40ffc47b121468f
+size 7249
diff --git a/checkpoint-1500/zero_to_fp32.py b/checkpoint-1500/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04
--- /dev/null
+++ b/checkpoint-1500/zero_to_fp32.py
@@ -0,0 +1,760 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example:
+# python zero_to_fp32.py . output_dir/
+# or
+# python zero_to_fp32.py . output_dir/ --safe_serialization
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+import gc
+import json
+import numpy as np
+from tqdm import tqdm
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device, weights_only=False)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+ total_files = len(files)
+ state_dicts = []
+ for f in tqdm(files, desc='Loading checkpoint shards'):
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+class GatheredTensor:
+ """
+ A pseudo tensor that collects partitioned weights.
+ It is more memory efficient when there are multiple groups.
+ """
+
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
+ self.flat_groups = flat_groups
+ self.flat_groups_offset = flat_groups_offset
+ self.offset = offset
+ self.partitioned_numel = partitioned_numel
+ self.shape = shape
+ self.dtype = self.flat_groups[0][0].dtype
+
+ def contiguous(self):
+ """
+ Merge partitioned weights from flat_groups into a single tensor.
+ """
+ end_idx = self.offset + self.partitioned_numel
+ world_size = len(self.flat_groups)
+ pad_flat_param_chunks = []
+
+ for rank_i in range(world_size):
+ # for each rank, we need to collect weights from related group/groups
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
+ start_group_id = None
+ end_group_id = None
+ for group_id in range(len(self.flat_groups_offset)):
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
+ start_group_id = group_id
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
+ end_group_id = group_id
+ break
+ # collect weights from related group/groups
+ for group_id in range(start_group_id, end_group_id + 1):
+ flat_tensor = flat_groups_at_rank_i[group_id]
+ start_offset = self.offset - self.flat_groups_offset[group_id]
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
+
+ # collect weights from all ranks
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
+ return param
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
+
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # memory efficient tensor
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
+ state_dict[name] = tensor
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def to_torch_tensor(state_dict, return_empty_tensor=False):
+ """
+ Convert state_dict of GatheredTensor to torch tensor
+ """
+ torch_state_dict = {}
+ converted_tensors = {}
+ for name, tensor in state_dict.items():
+ tensor_id = id(tensor)
+ if tensor_id in converted_tensors: # shared tensors
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
+ torch_state_dict[name] = shared_tensor
+ else:
+ converted_tensors[tensor_id] = name
+ if return_empty_tensor:
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
+ else:
+ torch_state_dict[name] = tensor.contiguous()
+ return torch_state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag=None,
+ exclude_frozen_parameters=False,
+ lazy_mode=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint. Or you can load state_dict in lazy mode ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
+ for name, lazy_tensor in state_dict.item():
+ tensor = lazy_tensor.contiguous() # to cpu
+ print(name, tensor)
+ # del tensor to release memory if it no longer in use
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+ if lazy_mode:
+ return state_dict
+ else:
+ return to_torch_tensor(state_dict)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
+ output_dir,
+ max_shard_size="5GB",
+ safe_serialization=False,
+ tag=None,
+ exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ # Dependency pre-check
+ if safe_serialization:
+ try:
+ from safetensors.torch import save_file
+ except ImportError:
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
+ raise
+ if max_shard_size is not None:
+ try:
+ from huggingface_hub import split_torch_state_dict_into_shards
+ except ImportError:
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
+ raise
+
+ # Convert zero checkpoint to state_dict
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag,
+ exclude_frozen_parameters,
+ lazy_mode=True)
+
+ # Shard the model if it is too big.
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
+ if max_shard_size is not None:
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
+ # an memory-efficient approach for sharding
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
+ filename_pattern=filename_pattern,
+ max_shard_size=max_shard_size)
+ else:
+ from collections import namedtuple
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
+ state_dict_split = StateDictSplit(is_sharded=False,
+ filename_to_tensors={weights_name: list(state_dict.keys())})
+
+ # Save the model by shard
+ os.makedirs(output_dir, exist_ok=True)
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
+ shard_state_dict = to_torch_tensor(shard_state_dict)
+ output_path = os.path.join(output_dir, shard_file)
+ if safe_serialization:
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
+ else:
+ torch.save(shard_state_dict, output_path)
+ # release the memory of current shard
+ for tensor_name in list(shard_state_dict.keys()):
+ del state_dict[tensor_name]
+ del shard_state_dict[tensor_name]
+ del shard_state_dict
+ gc.collect()
+
+ # Save index if sharded
+ if state_dict_split.is_sharded:
+ index = {
+ "metadata": state_dict_split.metadata,
+ "weight_map": state_dict_split.tensor_to_filename,
+ }
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
+ save_index_file = os.path.join(output_dir, save_index_file)
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument("output_dir",
+ type=str,
+ help="directory to the pytorch fp32 state_dict output files"
+ "(e.g. path/checkpoint-12-output/)")
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="5GB",
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
+ "without CPU OOM issues.")
+ parser.add_argument(
+ "--safe_serialization",
+ default=False,
+ action='store_true',
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_dir,
+ max_shard_size=args.max_shard_size,
+ safe_serialization=args.safe_serialization,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-2000/config.json b/checkpoint-2000/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..40aa0a10ec7958e160bf07f2feca405387c8b288
--- /dev/null
+++ b/checkpoint-2000/config.json
@@ -0,0 +1,33 @@
+{
+ "architectures": [
+ "XLMRobertaForSequenceClassification"
+ ],
+ "attention_probs_dropout_prob": 0.1,
+ "bos_token_id": 0,
+ "classifier_dropout": null,
+ "eos_token_id": 2,
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 1024,
+ "id2label": {
+ "0": "LABEL_0"
+ },
+ "initializer_range": 0.02,
+ "intermediate_size": 4096,
+ "label2id": {
+ "LABEL_0": 0
+ },
+ "layer_norm_eps": 1e-05,
+ "max_position_embeddings": 8194,
+ "model_type": "xlm-roberta",
+ "num_attention_heads": 16,
+ "num_hidden_layers": 24,
+ "output_past": true,
+ "pad_token_id": 1,
+ "position_embedding_type": "absolute",
+ "torch_dtype": "float32",
+ "transformers_version": "4.54.0",
+ "type_vocab_size": 1,
+ "use_cache": true,
+ "vocab_size": 250002
+}
diff --git a/checkpoint-2000/global_step2000/mp_rank_00_model_states.pt b/checkpoint-2000/global_step2000/mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..50e35a2447ba6a8d85ab07c97a8dec4af4a7090e
--- /dev/null
+++ b/checkpoint-2000/global_step2000/mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f26a6d0d1ca581fff63585ad00ffab6114a9447bd60e84d7b0ff2fdb21cf58a3
+size 2271151845
diff --git a/checkpoint-2000/global_step2000/zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-2000/global_step2000/zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0569b497b4f8458bd87d48516b47f7b2c857948d
--- /dev/null
+++ b/checkpoint-2000/global_step2000/zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:997d9505304238be19cd922afad78398744538be8c99a6ea4e0f5c5fefe215ea
+size 3406552447
diff --git a/checkpoint-2000/global_step2000/zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-2000/global_step2000/zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a901fd5926cbd96ef25ae91e6ada1c4300e8aabc
--- /dev/null
+++ b/checkpoint-2000/global_step2000/zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0c67cb9d11d455a6741406993f28eafa7b12eb63a203a18478d3d0201c2722f7
+size 3406564543
diff --git a/checkpoint-2000/latest b/checkpoint-2000/latest
new file mode 100644
index 0000000000000000000000000000000000000000..2a79fdc19587e6bc9de060e90633f3a151b04516
--- /dev/null
+++ b/checkpoint-2000/latest
@@ -0,0 +1 @@
+global_step2000
\ No newline at end of file
diff --git a/checkpoint-2000/model.safetensors b/checkpoint-2000/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f49a9ba153a5d1b11cf682c76a60fa95bff7a497
--- /dev/null
+++ b/checkpoint-2000/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac430b4ff3dbbba464ef0903f0a481fd6757b8261f5e0d8e3cc26fe0dd4556c2
+size 2271071852
diff --git a/checkpoint-2000/rng_state_0.pth b/checkpoint-2000/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..69b25f6f89cfd1512b99057050ffdcba3cc685ac
--- /dev/null
+++ b/checkpoint-2000/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1c4147520888f7bb37e46741f6aeef997901df0be5d1cd02472799e438cf9e15
+size 14917
diff --git a/checkpoint-2000/rng_state_1.pth b/checkpoint-2000/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9a9ff1dfed5d160b915bab8e21972a1a579c7dd3
--- /dev/null
+++ b/checkpoint-2000/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:30cffd7d707fafc68aeaa0f59f41051af23a0e04c3636694da852cc0579fba9a
+size 14917
diff --git a/checkpoint-2000/scheduler.pt b/checkpoint-2000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a035e802077a97040517dfb45ebab365d54c50f3
--- /dev/null
+++ b/checkpoint-2000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8fac01f708f82e736e71dc0f462f6c8e7629e5deed1d27a61cc1dcaeebfba498
+size 1465
diff --git a/checkpoint-2000/sentencepiece.bpe.model b/checkpoint-2000/sentencepiece.bpe.model
new file mode 100644
index 0000000000000000000000000000000000000000..7a3f40a75f870bc1f21700cd414dc2acc431583c
--- /dev/null
+++ b/checkpoint-2000/sentencepiece.bpe.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
+size 5069051
diff --git a/checkpoint-2000/special_tokens_map.json b/checkpoint-2000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1879d702821e753ffe4245048eee415d54a9385
--- /dev/null
+++ b/checkpoint-2000/special_tokens_map.json
@@ -0,0 +1,51 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-2000/tokenizer.json b/checkpoint-2000/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..322d084f75a19f4fec0fc0b5f351be9a3dfefa3e
--- /dev/null
+++ b/checkpoint-2000/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50ec628ce274af8429e5aa0c573e737ef2db1c2acd3b2dd51362a33c3a534f99
+size 17082999
diff --git a/checkpoint-2000/tokenizer_config.json b/checkpoint-2000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..95bd7c849ee6a47d5c92805af18d187239c1ba4a
--- /dev/null
+++ b/checkpoint-2000/tokenizer_config.json
@@ -0,0 +1,56 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "250001": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": true,
+ "cls_token": "",
+ "eos_token": "",
+ "extra_special_tokens": {},
+ "mask_token": "",
+ "model_max_length": 8192,
+ "pad_token": "",
+ "sep_token": "",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "XLMRobertaTokenizer",
+ "unk_token": ""
+}
diff --git a/checkpoint-2000/trainer_state.json b/checkpoint-2000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..9739a1149e0356237012cfc5ffe0b6b122e5b5d5
--- /dev/null
+++ b/checkpoint-2000/trainer_state.json
@@ -0,0 +1,14034 @@
+{
+ "best_global_step": null,
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 7.326007326007326,
+ "eval_steps": 500,
+ "global_step": 2000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.003663003663003663,
+ "grad_norm": 33.24192428588867,
+ "learning_rate": 0.0,
+ "loss": 0.9555,
+ "step": 1
+ },
+ {
+ "epoch": 0.007326007326007326,
+ "grad_norm": 23.005327224731445,
+ "learning_rate": 2.1978021978021978e-07,
+ "loss": 0.7557,
+ "step": 2
+ },
+ {
+ "epoch": 0.01098901098901099,
+ "grad_norm": 12.516372680664062,
+ "learning_rate": 4.3956043956043957e-07,
+ "loss": 0.2322,
+ "step": 3
+ },
+ {
+ "epoch": 0.014652014652014652,
+ "grad_norm": 22.350322723388672,
+ "learning_rate": 6.593406593406594e-07,
+ "loss": 0.5263,
+ "step": 4
+ },
+ {
+ "epoch": 0.018315018315018316,
+ "grad_norm": 37.14425277709961,
+ "learning_rate": 8.791208791208791e-07,
+ "loss": 0.547,
+ "step": 5
+ },
+ {
+ "epoch": 0.02197802197802198,
+ "grad_norm": 27.73367691040039,
+ "learning_rate": 1.098901098901099e-06,
+ "loss": 0.5922,
+ "step": 6
+ },
+ {
+ "epoch": 0.02564102564102564,
+ "grad_norm": 28.463964462280273,
+ "learning_rate": 1.3186813186813187e-06,
+ "loss": 1.0195,
+ "step": 7
+ },
+ {
+ "epoch": 0.029304029304029304,
+ "grad_norm": 12.688858032226562,
+ "learning_rate": 1.5384615384615385e-06,
+ "loss": 0.1519,
+ "step": 8
+ },
+ {
+ "epoch": 0.03296703296703297,
+ "grad_norm": 24.222930908203125,
+ "learning_rate": 1.7582417582417583e-06,
+ "loss": 0.8008,
+ "step": 9
+ },
+ {
+ "epoch": 0.03663003663003663,
+ "grad_norm": 22.45709800720215,
+ "learning_rate": 1.9780219780219782e-06,
+ "loss": 1.1024,
+ "step": 10
+ },
+ {
+ "epoch": 0.040293040293040296,
+ "grad_norm": 23.01483917236328,
+ "learning_rate": 2.197802197802198e-06,
+ "loss": 0.3072,
+ "step": 11
+ },
+ {
+ "epoch": 0.04395604395604396,
+ "grad_norm": 24.276216506958008,
+ "learning_rate": 2.4175824175824177e-06,
+ "loss": 0.8937,
+ "step": 12
+ },
+ {
+ "epoch": 0.047619047619047616,
+ "grad_norm": 24.501638412475586,
+ "learning_rate": 2.6373626373626375e-06,
+ "loss": 0.3748,
+ "step": 13
+ },
+ {
+ "epoch": 0.05128205128205128,
+ "grad_norm": 11.965837478637695,
+ "learning_rate": 2.8571428571428573e-06,
+ "loss": 0.2221,
+ "step": 14
+ },
+ {
+ "epoch": 0.054945054945054944,
+ "grad_norm": 8.884313583374023,
+ "learning_rate": 3.076923076923077e-06,
+ "loss": 0.1682,
+ "step": 15
+ },
+ {
+ "epoch": 0.05860805860805861,
+ "grad_norm": 13.486218452453613,
+ "learning_rate": 3.2967032967032968e-06,
+ "loss": 0.3324,
+ "step": 16
+ },
+ {
+ "epoch": 0.06227106227106227,
+ "grad_norm": 29.47451400756836,
+ "learning_rate": 3.5164835164835165e-06,
+ "loss": 0.9247,
+ "step": 17
+ },
+ {
+ "epoch": 0.06593406593406594,
+ "grad_norm": 38.8739128112793,
+ "learning_rate": 3.7362637362637363e-06,
+ "loss": 1.3591,
+ "step": 18
+ },
+ {
+ "epoch": 0.0695970695970696,
+ "grad_norm": 24.181066513061523,
+ "learning_rate": 3.9560439560439565e-06,
+ "loss": 0.4257,
+ "step": 19
+ },
+ {
+ "epoch": 0.07326007326007326,
+ "grad_norm": 18.25806427001953,
+ "learning_rate": 4.175824175824176e-06,
+ "loss": 0.3534,
+ "step": 20
+ },
+ {
+ "epoch": 0.07692307692307693,
+ "grad_norm": 4.121458053588867,
+ "learning_rate": 4.395604395604396e-06,
+ "loss": 0.0459,
+ "step": 21
+ },
+ {
+ "epoch": 0.08058608058608059,
+ "grad_norm": 17.89643096923828,
+ "learning_rate": 4.615384615384616e-06,
+ "loss": 0.3707,
+ "step": 22
+ },
+ {
+ "epoch": 0.08424908424908426,
+ "grad_norm": 43.25539016723633,
+ "learning_rate": 4.8351648351648355e-06,
+ "loss": 1.139,
+ "step": 23
+ },
+ {
+ "epoch": 0.08791208791208792,
+ "grad_norm": 19.56612205505371,
+ "learning_rate": 5.054945054945056e-06,
+ "loss": 0.3819,
+ "step": 24
+ },
+ {
+ "epoch": 0.09157509157509157,
+ "grad_norm": 18.20578956604004,
+ "learning_rate": 5.274725274725275e-06,
+ "loss": 0.516,
+ "step": 25
+ },
+ {
+ "epoch": 0.09523809523809523,
+ "grad_norm": 23.16927146911621,
+ "learning_rate": 5.494505494505494e-06,
+ "loss": 0.7161,
+ "step": 26
+ },
+ {
+ "epoch": 0.0989010989010989,
+ "grad_norm": 10.449734687805176,
+ "learning_rate": 5.7142857142857145e-06,
+ "loss": 0.3049,
+ "step": 27
+ },
+ {
+ "epoch": 0.10256410256410256,
+ "grad_norm": 33.13974380493164,
+ "learning_rate": 5.934065934065934e-06,
+ "loss": 1.0178,
+ "step": 28
+ },
+ {
+ "epoch": 0.10622710622710622,
+ "grad_norm": 34.373470306396484,
+ "learning_rate": 6.153846153846154e-06,
+ "loss": 1.0162,
+ "step": 29
+ },
+ {
+ "epoch": 0.10989010989010989,
+ "grad_norm": 22.710988998413086,
+ "learning_rate": 6.373626373626373e-06,
+ "loss": 0.5866,
+ "step": 30
+ },
+ {
+ "epoch": 0.11355311355311355,
+ "grad_norm": 23.314502716064453,
+ "learning_rate": 6.5934065934065935e-06,
+ "loss": 0.6159,
+ "step": 31
+ },
+ {
+ "epoch": 0.11721611721611722,
+ "grad_norm": 23.481319427490234,
+ "learning_rate": 6.813186813186814e-06,
+ "loss": 0.5441,
+ "step": 32
+ },
+ {
+ "epoch": 0.12087912087912088,
+ "grad_norm": 35.16271209716797,
+ "learning_rate": 7.032967032967033e-06,
+ "loss": 0.9091,
+ "step": 33
+ },
+ {
+ "epoch": 0.12454212454212454,
+ "grad_norm": 32.2298698425293,
+ "learning_rate": 7.252747252747253e-06,
+ "loss": 0.5156,
+ "step": 34
+ },
+ {
+ "epoch": 0.1282051282051282,
+ "grad_norm": 36.708953857421875,
+ "learning_rate": 7.4725274725274726e-06,
+ "loss": 1.5839,
+ "step": 35
+ },
+ {
+ "epoch": 0.13186813186813187,
+ "grad_norm": 34.64887619018555,
+ "learning_rate": 7.692307692307692e-06,
+ "loss": 1.2861,
+ "step": 36
+ },
+ {
+ "epoch": 0.13553113553113552,
+ "grad_norm": 20.94220733642578,
+ "learning_rate": 7.912087912087913e-06,
+ "loss": 0.5027,
+ "step": 37
+ },
+ {
+ "epoch": 0.1391941391941392,
+ "grad_norm": 30.93832015991211,
+ "learning_rate": 8.131868131868132e-06,
+ "loss": 0.3584,
+ "step": 38
+ },
+ {
+ "epoch": 0.14285714285714285,
+ "grad_norm": 19.195362091064453,
+ "learning_rate": 8.351648351648352e-06,
+ "loss": 0.6912,
+ "step": 39
+ },
+ {
+ "epoch": 0.14652014652014653,
+ "grad_norm": 21.054162979125977,
+ "learning_rate": 8.571428571428571e-06,
+ "loss": 0.8027,
+ "step": 40
+ },
+ {
+ "epoch": 0.15018315018315018,
+ "grad_norm": 16.64535903930664,
+ "learning_rate": 8.791208791208792e-06,
+ "loss": 0.3004,
+ "step": 41
+ },
+ {
+ "epoch": 0.15384615384615385,
+ "grad_norm": 12.1064453125,
+ "learning_rate": 9.010989010989011e-06,
+ "loss": 0.2158,
+ "step": 42
+ },
+ {
+ "epoch": 0.1575091575091575,
+ "grad_norm": 16.20220947265625,
+ "learning_rate": 9.230769230769232e-06,
+ "loss": 0.4137,
+ "step": 43
+ },
+ {
+ "epoch": 0.16117216117216118,
+ "grad_norm": 25.698654174804688,
+ "learning_rate": 9.45054945054945e-06,
+ "loss": 0.7716,
+ "step": 44
+ },
+ {
+ "epoch": 0.16483516483516483,
+ "grad_norm": 7.480422019958496,
+ "learning_rate": 9.670329670329671e-06,
+ "loss": 0.1046,
+ "step": 45
+ },
+ {
+ "epoch": 0.1684981684981685,
+ "grad_norm": 38.25539016723633,
+ "learning_rate": 9.89010989010989e-06,
+ "loss": 1.3913,
+ "step": 46
+ },
+ {
+ "epoch": 0.17216117216117216,
+ "grad_norm": 24.113954544067383,
+ "learning_rate": 1.0109890109890111e-05,
+ "loss": 0.4632,
+ "step": 47
+ },
+ {
+ "epoch": 0.17582417582417584,
+ "grad_norm": 22.136140823364258,
+ "learning_rate": 1.032967032967033e-05,
+ "loss": 0.6634,
+ "step": 48
+ },
+ {
+ "epoch": 0.1794871794871795,
+ "grad_norm": 19.417444229125977,
+ "learning_rate": 1.054945054945055e-05,
+ "loss": 0.3991,
+ "step": 49
+ },
+ {
+ "epoch": 0.18315018315018314,
+ "grad_norm": 13.265430450439453,
+ "learning_rate": 1.076923076923077e-05,
+ "loss": 0.2613,
+ "step": 50
+ },
+ {
+ "epoch": 0.18681318681318682,
+ "grad_norm": 25.118703842163086,
+ "learning_rate": 1.0989010989010989e-05,
+ "loss": 0.9231,
+ "step": 51
+ },
+ {
+ "epoch": 0.19047619047619047,
+ "grad_norm": 34.06997299194336,
+ "learning_rate": 1.120879120879121e-05,
+ "loss": 1.5809,
+ "step": 52
+ },
+ {
+ "epoch": 0.19413919413919414,
+ "grad_norm": 40.32486343383789,
+ "learning_rate": 1.1428571428571429e-05,
+ "loss": 1.4601,
+ "step": 53
+ },
+ {
+ "epoch": 0.1978021978021978,
+ "grad_norm": 18.847017288208008,
+ "learning_rate": 1.1648351648351648e-05,
+ "loss": 0.2345,
+ "step": 54
+ },
+ {
+ "epoch": 0.20146520146520147,
+ "grad_norm": 37.98270034790039,
+ "learning_rate": 1.1868131868131868e-05,
+ "loss": 0.9792,
+ "step": 55
+ },
+ {
+ "epoch": 0.20512820512820512,
+ "grad_norm": 35.72782897949219,
+ "learning_rate": 1.2087912087912089e-05,
+ "loss": 1.1561,
+ "step": 56
+ },
+ {
+ "epoch": 0.2087912087912088,
+ "grad_norm": 18.577186584472656,
+ "learning_rate": 1.2307692307692308e-05,
+ "loss": 0.5577,
+ "step": 57
+ },
+ {
+ "epoch": 0.21245421245421245,
+ "grad_norm": 23.086456298828125,
+ "learning_rate": 1.2527472527472529e-05,
+ "loss": 0.5807,
+ "step": 58
+ },
+ {
+ "epoch": 0.21611721611721613,
+ "grad_norm": 20.053525924682617,
+ "learning_rate": 1.2747252747252747e-05,
+ "loss": 0.7024,
+ "step": 59
+ },
+ {
+ "epoch": 0.21978021978021978,
+ "grad_norm": 22.25934410095215,
+ "learning_rate": 1.2967032967032968e-05,
+ "loss": 1.1033,
+ "step": 60
+ },
+ {
+ "epoch": 0.22344322344322345,
+ "grad_norm": 17.981454849243164,
+ "learning_rate": 1.3186813186813187e-05,
+ "loss": 0.2774,
+ "step": 61
+ },
+ {
+ "epoch": 0.2271062271062271,
+ "grad_norm": 11.286524772644043,
+ "learning_rate": 1.3406593406593408e-05,
+ "loss": 0.1802,
+ "step": 62
+ },
+ {
+ "epoch": 0.23076923076923078,
+ "grad_norm": 25.822996139526367,
+ "learning_rate": 1.3626373626373627e-05,
+ "loss": 0.651,
+ "step": 63
+ },
+ {
+ "epoch": 0.23443223443223443,
+ "grad_norm": 16.457286834716797,
+ "learning_rate": 1.3846153846153847e-05,
+ "loss": 0.2946,
+ "step": 64
+ },
+ {
+ "epoch": 0.23809523809523808,
+ "grad_norm": 26.712799072265625,
+ "learning_rate": 1.4065934065934066e-05,
+ "loss": 0.7763,
+ "step": 65
+ },
+ {
+ "epoch": 0.24175824175824176,
+ "grad_norm": 21.4671630859375,
+ "learning_rate": 1.4285714285714285e-05,
+ "loss": 0.4132,
+ "step": 66
+ },
+ {
+ "epoch": 0.2454212454212454,
+ "grad_norm": 21.834922790527344,
+ "learning_rate": 1.4505494505494506e-05,
+ "loss": 0.6544,
+ "step": 67
+ },
+ {
+ "epoch": 0.2490842490842491,
+ "grad_norm": 15.396453857421875,
+ "learning_rate": 1.4725274725274726e-05,
+ "loss": 0.2426,
+ "step": 68
+ },
+ {
+ "epoch": 0.25274725274725274,
+ "grad_norm": 8.851480484008789,
+ "learning_rate": 1.4945054945054945e-05,
+ "loss": 0.125,
+ "step": 69
+ },
+ {
+ "epoch": 0.2564102564102564,
+ "grad_norm": 22.21581268310547,
+ "learning_rate": 1.5164835164835164e-05,
+ "loss": 0.2585,
+ "step": 70
+ },
+ {
+ "epoch": 0.2600732600732601,
+ "grad_norm": 23.589736938476562,
+ "learning_rate": 1.5384615384615384e-05,
+ "loss": 0.386,
+ "step": 71
+ },
+ {
+ "epoch": 0.26373626373626374,
+ "grad_norm": 51.82280731201172,
+ "learning_rate": 1.5604395604395605e-05,
+ "loss": 1.1802,
+ "step": 72
+ },
+ {
+ "epoch": 0.2673992673992674,
+ "grad_norm": 36.43033981323242,
+ "learning_rate": 1.5824175824175826e-05,
+ "loss": 0.5574,
+ "step": 73
+ },
+ {
+ "epoch": 0.27106227106227104,
+ "grad_norm": 46.151885986328125,
+ "learning_rate": 1.6043956043956043e-05,
+ "loss": 0.9113,
+ "step": 74
+ },
+ {
+ "epoch": 0.27472527472527475,
+ "grad_norm": 34.090213775634766,
+ "learning_rate": 1.6263736263736265e-05,
+ "loss": 1.2161,
+ "step": 75
+ },
+ {
+ "epoch": 0.2783882783882784,
+ "grad_norm": 15.469125747680664,
+ "learning_rate": 1.6483516483516486e-05,
+ "loss": 0.1833,
+ "step": 76
+ },
+ {
+ "epoch": 0.28205128205128205,
+ "grad_norm": 26.77261734008789,
+ "learning_rate": 1.6703296703296703e-05,
+ "loss": 0.4095,
+ "step": 77
+ },
+ {
+ "epoch": 0.2857142857142857,
+ "grad_norm": 8.46114444732666,
+ "learning_rate": 1.6923076923076924e-05,
+ "loss": 0.0724,
+ "step": 78
+ },
+ {
+ "epoch": 0.2893772893772894,
+ "grad_norm": 7.954617500305176,
+ "learning_rate": 1.7142857142857142e-05,
+ "loss": 0.057,
+ "step": 79
+ },
+ {
+ "epoch": 0.29304029304029305,
+ "grad_norm": 32.47618103027344,
+ "learning_rate": 1.7362637362637366e-05,
+ "loss": 0.8099,
+ "step": 80
+ },
+ {
+ "epoch": 0.2967032967032967,
+ "grad_norm": 34.506927490234375,
+ "learning_rate": 1.7582417582417584e-05,
+ "loss": 0.5867,
+ "step": 81
+ },
+ {
+ "epoch": 0.30036630036630035,
+ "grad_norm": 18.276355743408203,
+ "learning_rate": 1.78021978021978e-05,
+ "loss": 0.4387,
+ "step": 82
+ },
+ {
+ "epoch": 0.304029304029304,
+ "grad_norm": 35.61729431152344,
+ "learning_rate": 1.8021978021978023e-05,
+ "loss": 0.9711,
+ "step": 83
+ },
+ {
+ "epoch": 0.3076923076923077,
+ "grad_norm": 14.001388549804688,
+ "learning_rate": 1.824175824175824e-05,
+ "loss": 0.1431,
+ "step": 84
+ },
+ {
+ "epoch": 0.31135531135531136,
+ "grad_norm": 27.521188735961914,
+ "learning_rate": 1.8461538461538465e-05,
+ "loss": 0.3686,
+ "step": 85
+ },
+ {
+ "epoch": 0.315018315018315,
+ "grad_norm": 38.0133171081543,
+ "learning_rate": 1.8681318681318682e-05,
+ "loss": 1.3866,
+ "step": 86
+ },
+ {
+ "epoch": 0.31868131868131866,
+ "grad_norm": 30.895553588867188,
+ "learning_rate": 1.89010989010989e-05,
+ "loss": 0.6676,
+ "step": 87
+ },
+ {
+ "epoch": 0.32234432234432236,
+ "grad_norm": 26.165082931518555,
+ "learning_rate": 1.912087912087912e-05,
+ "loss": 0.4763,
+ "step": 88
+ },
+ {
+ "epoch": 0.326007326007326,
+ "grad_norm": 25.6451473236084,
+ "learning_rate": 1.9340659340659342e-05,
+ "loss": 0.6921,
+ "step": 89
+ },
+ {
+ "epoch": 0.32967032967032966,
+ "grad_norm": 31.52683448791504,
+ "learning_rate": 1.9560439560439563e-05,
+ "loss": 0.8449,
+ "step": 90
+ },
+ {
+ "epoch": 0.3333333333333333,
+ "grad_norm": 27.559072494506836,
+ "learning_rate": 1.978021978021978e-05,
+ "loss": 0.9726,
+ "step": 91
+ },
+ {
+ "epoch": 0.336996336996337,
+ "grad_norm": 38.23103713989258,
+ "learning_rate": 1.9999999999999998e-05,
+ "loss": 0.2568,
+ "step": 92
+ },
+ {
+ "epoch": 0.34065934065934067,
+ "grad_norm": 28.575313568115234,
+ "learning_rate": 2.0219780219780223e-05,
+ "loss": 0.7039,
+ "step": 93
+ },
+ {
+ "epoch": 0.3443223443223443,
+ "grad_norm": 31.54847526550293,
+ "learning_rate": 2.043956043956044e-05,
+ "loss": 0.835,
+ "step": 94
+ },
+ {
+ "epoch": 0.34798534798534797,
+ "grad_norm": 34.27505111694336,
+ "learning_rate": 2.065934065934066e-05,
+ "loss": 1.0304,
+ "step": 95
+ },
+ {
+ "epoch": 0.3516483516483517,
+ "grad_norm": 23.972553253173828,
+ "learning_rate": 2.087912087912088e-05,
+ "loss": 0.775,
+ "step": 96
+ },
+ {
+ "epoch": 0.3553113553113553,
+ "grad_norm": 18.46526527404785,
+ "learning_rate": 2.10989010989011e-05,
+ "loss": 0.2856,
+ "step": 97
+ },
+ {
+ "epoch": 0.358974358974359,
+ "grad_norm": 22.087251663208008,
+ "learning_rate": 2.131868131868132e-05,
+ "loss": 0.6849,
+ "step": 98
+ },
+ {
+ "epoch": 0.3626373626373626,
+ "grad_norm": 13.144533157348633,
+ "learning_rate": 2.153846153846154e-05,
+ "loss": 0.2766,
+ "step": 99
+ },
+ {
+ "epoch": 0.3663003663003663,
+ "grad_norm": 14.740280151367188,
+ "learning_rate": 2.175824175824176e-05,
+ "loss": 0.27,
+ "step": 100
+ },
+ {
+ "epoch": 0.36996336996337,
+ "grad_norm": 17.15272331237793,
+ "learning_rate": 2.1978021978021977e-05,
+ "loss": 0.446,
+ "step": 101
+ },
+ {
+ "epoch": 0.37362637362637363,
+ "grad_norm": 45.865509033203125,
+ "learning_rate": 2.21978021978022e-05,
+ "loss": 2.4265,
+ "step": 102
+ },
+ {
+ "epoch": 0.3772893772893773,
+ "grad_norm": 22.298274993896484,
+ "learning_rate": 2.241758241758242e-05,
+ "loss": 1.5021,
+ "step": 103
+ },
+ {
+ "epoch": 0.38095238095238093,
+ "grad_norm": 20.314172744750977,
+ "learning_rate": 2.2637362637362637e-05,
+ "loss": 0.508,
+ "step": 104
+ },
+ {
+ "epoch": 0.38461538461538464,
+ "grad_norm": 11.217910766601562,
+ "learning_rate": 2.2857142857142858e-05,
+ "loss": 0.2282,
+ "step": 105
+ },
+ {
+ "epoch": 0.3882783882783883,
+ "grad_norm": 21.36184310913086,
+ "learning_rate": 2.307692307692308e-05,
+ "loss": 0.4684,
+ "step": 106
+ },
+ {
+ "epoch": 0.39194139194139194,
+ "grad_norm": 12.759861946105957,
+ "learning_rate": 2.3296703296703297e-05,
+ "loss": 0.3076,
+ "step": 107
+ },
+ {
+ "epoch": 0.3956043956043956,
+ "grad_norm": 24.42287254333496,
+ "learning_rate": 2.3516483516483518e-05,
+ "loss": 1.3607,
+ "step": 108
+ },
+ {
+ "epoch": 0.3992673992673993,
+ "grad_norm": 13.014902114868164,
+ "learning_rate": 2.3736263736263735e-05,
+ "loss": 0.4984,
+ "step": 109
+ },
+ {
+ "epoch": 0.40293040293040294,
+ "grad_norm": 12.8681640625,
+ "learning_rate": 2.395604395604396e-05,
+ "loss": 0.4529,
+ "step": 110
+ },
+ {
+ "epoch": 0.4065934065934066,
+ "grad_norm": 21.19939422607422,
+ "learning_rate": 2.4175824175824177e-05,
+ "loss": 1.0197,
+ "step": 111
+ },
+ {
+ "epoch": 0.41025641025641024,
+ "grad_norm": 20.60430145263672,
+ "learning_rate": 2.4395604395604395e-05,
+ "loss": 0.5367,
+ "step": 112
+ },
+ {
+ "epoch": 0.4139194139194139,
+ "grad_norm": 34.49782943725586,
+ "learning_rate": 2.4615384615384616e-05,
+ "loss": 1.9045,
+ "step": 113
+ },
+ {
+ "epoch": 0.4175824175824176,
+ "grad_norm": 28.380966186523438,
+ "learning_rate": 2.4835164835164834e-05,
+ "loss": 0.9019,
+ "step": 114
+ },
+ {
+ "epoch": 0.42124542124542125,
+ "grad_norm": 18.234045028686523,
+ "learning_rate": 2.5054945054945058e-05,
+ "loss": 0.5529,
+ "step": 115
+ },
+ {
+ "epoch": 0.4249084249084249,
+ "grad_norm": 18.759784698486328,
+ "learning_rate": 2.5274725274725276e-05,
+ "loss": 0.85,
+ "step": 116
+ },
+ {
+ "epoch": 0.42857142857142855,
+ "grad_norm": 15.784387588500977,
+ "learning_rate": 2.5494505494505493e-05,
+ "loss": 0.429,
+ "step": 117
+ },
+ {
+ "epoch": 0.43223443223443225,
+ "grad_norm": 23.149036407470703,
+ "learning_rate": 2.5714285714285714e-05,
+ "loss": 0.8784,
+ "step": 118
+ },
+ {
+ "epoch": 0.4358974358974359,
+ "grad_norm": 18.77080535888672,
+ "learning_rate": 2.5934065934065935e-05,
+ "loss": 0.537,
+ "step": 119
+ },
+ {
+ "epoch": 0.43956043956043955,
+ "grad_norm": 24.311708450317383,
+ "learning_rate": 2.6153846153846157e-05,
+ "loss": 0.74,
+ "step": 120
+ },
+ {
+ "epoch": 0.4432234432234432,
+ "grad_norm": 15.09874439239502,
+ "learning_rate": 2.6373626373626374e-05,
+ "loss": 0.2978,
+ "step": 121
+ },
+ {
+ "epoch": 0.4468864468864469,
+ "grad_norm": 19.65829086303711,
+ "learning_rate": 2.6593406593406592e-05,
+ "loss": 0.8287,
+ "step": 122
+ },
+ {
+ "epoch": 0.45054945054945056,
+ "grad_norm": 21.237165451049805,
+ "learning_rate": 2.6813186813186816e-05,
+ "loss": 1.1967,
+ "step": 123
+ },
+ {
+ "epoch": 0.4542124542124542,
+ "grad_norm": 25.737913131713867,
+ "learning_rate": 2.7032967032967034e-05,
+ "loss": 0.9414,
+ "step": 124
+ },
+ {
+ "epoch": 0.45787545787545786,
+ "grad_norm": 22.84954833984375,
+ "learning_rate": 2.7252747252747255e-05,
+ "loss": 0.398,
+ "step": 125
+ },
+ {
+ "epoch": 0.46153846153846156,
+ "grad_norm": 35.505027770996094,
+ "learning_rate": 2.7472527472527473e-05,
+ "loss": 1.0497,
+ "step": 126
+ },
+ {
+ "epoch": 0.4652014652014652,
+ "grad_norm": 6.610748291015625,
+ "learning_rate": 2.7692307692307694e-05,
+ "loss": 0.0491,
+ "step": 127
+ },
+ {
+ "epoch": 0.46886446886446886,
+ "grad_norm": 33.34388732910156,
+ "learning_rate": 2.7912087912087915e-05,
+ "loss": 0.8991,
+ "step": 128
+ },
+ {
+ "epoch": 0.4725274725274725,
+ "grad_norm": 17.098581314086914,
+ "learning_rate": 2.8131868131868132e-05,
+ "loss": 0.3217,
+ "step": 129
+ },
+ {
+ "epoch": 0.47619047619047616,
+ "grad_norm": 11.438309669494629,
+ "learning_rate": 2.8351648351648353e-05,
+ "loss": 0.4301,
+ "step": 130
+ },
+ {
+ "epoch": 0.47985347985347987,
+ "grad_norm": 25.803213119506836,
+ "learning_rate": 2.857142857142857e-05,
+ "loss": 0.8937,
+ "step": 131
+ },
+ {
+ "epoch": 0.4835164835164835,
+ "grad_norm": 16.61037826538086,
+ "learning_rate": 2.8791208791208792e-05,
+ "loss": 0.3603,
+ "step": 132
+ },
+ {
+ "epoch": 0.48717948717948717,
+ "grad_norm": 21.329975128173828,
+ "learning_rate": 2.9010989010989013e-05,
+ "loss": 0.4332,
+ "step": 133
+ },
+ {
+ "epoch": 0.4908424908424908,
+ "grad_norm": 24.83706283569336,
+ "learning_rate": 2.923076923076923e-05,
+ "loss": 0.3967,
+ "step": 134
+ },
+ {
+ "epoch": 0.4945054945054945,
+ "grad_norm": 8.3758544921875,
+ "learning_rate": 2.945054945054945e-05,
+ "loss": 0.1197,
+ "step": 135
+ },
+ {
+ "epoch": 0.4981684981684982,
+ "grad_norm": 31.096702575683594,
+ "learning_rate": 2.9670329670329673e-05,
+ "loss": 2.2867,
+ "step": 136
+ },
+ {
+ "epoch": 0.5018315018315018,
+ "grad_norm": 17.094390869140625,
+ "learning_rate": 2.989010989010989e-05,
+ "loss": 0.3064,
+ "step": 137
+ },
+ {
+ "epoch": 0.5054945054945055,
+ "grad_norm": 23.401243209838867,
+ "learning_rate": 3.010989010989011e-05,
+ "loss": 0.9779,
+ "step": 138
+ },
+ {
+ "epoch": 0.5091575091575091,
+ "grad_norm": 19.55811309814453,
+ "learning_rate": 3.032967032967033e-05,
+ "loss": 0.5665,
+ "step": 139
+ },
+ {
+ "epoch": 0.5128205128205128,
+ "grad_norm": 18.668622970581055,
+ "learning_rate": 3.0549450549450547e-05,
+ "loss": 0.7068,
+ "step": 140
+ },
+ {
+ "epoch": 0.5164835164835165,
+ "grad_norm": 9.49342155456543,
+ "learning_rate": 3.076923076923077e-05,
+ "loss": 0.2228,
+ "step": 141
+ },
+ {
+ "epoch": 0.5201465201465202,
+ "grad_norm": 17.131006240844727,
+ "learning_rate": 3.0989010989010995e-05,
+ "loss": 0.8947,
+ "step": 142
+ },
+ {
+ "epoch": 0.5238095238095238,
+ "grad_norm": 14.087484359741211,
+ "learning_rate": 3.120879120879121e-05,
+ "loss": 0.4394,
+ "step": 143
+ },
+ {
+ "epoch": 0.5274725274725275,
+ "grad_norm": 14.246976852416992,
+ "learning_rate": 3.142857142857143e-05,
+ "loss": 0.7608,
+ "step": 144
+ },
+ {
+ "epoch": 0.5311355311355311,
+ "grad_norm": 27.454071044921875,
+ "learning_rate": 3.164835164835165e-05,
+ "loss": 1.8982,
+ "step": 145
+ },
+ {
+ "epoch": 0.5347985347985348,
+ "grad_norm": 8.580923080444336,
+ "learning_rate": 3.1868131868131866e-05,
+ "loss": 0.2199,
+ "step": 146
+ },
+ {
+ "epoch": 0.5384615384615384,
+ "grad_norm": 12.200552940368652,
+ "learning_rate": 3.208791208791209e-05,
+ "loss": 0.4007,
+ "step": 147
+ },
+ {
+ "epoch": 0.5421245421245421,
+ "grad_norm": 11.350752830505371,
+ "learning_rate": 3.230769230769231e-05,
+ "loss": 0.5359,
+ "step": 148
+ },
+ {
+ "epoch": 0.5457875457875457,
+ "grad_norm": 21.45020866394043,
+ "learning_rate": 3.252747252747253e-05,
+ "loss": 1.4639,
+ "step": 149
+ },
+ {
+ "epoch": 0.5494505494505495,
+ "grad_norm": 29.84933090209961,
+ "learning_rate": 3.274725274725274e-05,
+ "loss": 0.8764,
+ "step": 150
+ },
+ {
+ "epoch": 0.5531135531135531,
+ "grad_norm": 14.899048805236816,
+ "learning_rate": 3.296703296703297e-05,
+ "loss": 0.3817,
+ "step": 151
+ },
+ {
+ "epoch": 0.5567765567765568,
+ "grad_norm": 14.95295238494873,
+ "learning_rate": 3.318681318681319e-05,
+ "loss": 1.0153,
+ "step": 152
+ },
+ {
+ "epoch": 0.5604395604395604,
+ "grad_norm": 13.904314994812012,
+ "learning_rate": 3.3406593406593406e-05,
+ "loss": 0.9891,
+ "step": 153
+ },
+ {
+ "epoch": 0.5641025641025641,
+ "grad_norm": 14.465546607971191,
+ "learning_rate": 3.362637362637363e-05,
+ "loss": 0.4935,
+ "step": 154
+ },
+ {
+ "epoch": 0.5677655677655677,
+ "grad_norm": 15.22211742401123,
+ "learning_rate": 3.384615384615385e-05,
+ "loss": 0.4973,
+ "step": 155
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 19.977941513061523,
+ "learning_rate": 3.406593406593406e-05,
+ "loss": 0.5768,
+ "step": 156
+ },
+ {
+ "epoch": 0.575091575091575,
+ "grad_norm": 21.778785705566406,
+ "learning_rate": 3.4285714285714284e-05,
+ "loss": 0.541,
+ "step": 157
+ },
+ {
+ "epoch": 0.5787545787545788,
+ "grad_norm": 7.957052707672119,
+ "learning_rate": 3.4505494505494505e-05,
+ "loss": 0.1676,
+ "step": 158
+ },
+ {
+ "epoch": 0.5824175824175825,
+ "grad_norm": 10.105476379394531,
+ "learning_rate": 3.472527472527473e-05,
+ "loss": 0.14,
+ "step": 159
+ },
+ {
+ "epoch": 0.5860805860805861,
+ "grad_norm": 13.895249366760254,
+ "learning_rate": 3.494505494505495e-05,
+ "loss": 0.2135,
+ "step": 160
+ },
+ {
+ "epoch": 0.5897435897435898,
+ "grad_norm": 15.14104175567627,
+ "learning_rate": 3.516483516483517e-05,
+ "loss": 0.2299,
+ "step": 161
+ },
+ {
+ "epoch": 0.5934065934065934,
+ "grad_norm": 27.537504196166992,
+ "learning_rate": 3.538461538461539e-05,
+ "loss": 0.4517,
+ "step": 162
+ },
+ {
+ "epoch": 0.5970695970695971,
+ "grad_norm": 22.290597915649414,
+ "learning_rate": 3.56043956043956e-05,
+ "loss": 0.2144,
+ "step": 163
+ },
+ {
+ "epoch": 0.6007326007326007,
+ "grad_norm": 24.176603317260742,
+ "learning_rate": 3.5824175824175824e-05,
+ "loss": 0.4184,
+ "step": 164
+ },
+ {
+ "epoch": 0.6043956043956044,
+ "grad_norm": 43.716552734375,
+ "learning_rate": 3.6043956043956045e-05,
+ "loss": 0.7672,
+ "step": 165
+ },
+ {
+ "epoch": 0.608058608058608,
+ "grad_norm": 5.516793727874756,
+ "learning_rate": 3.626373626373626e-05,
+ "loss": 0.0332,
+ "step": 166
+ },
+ {
+ "epoch": 0.6117216117216118,
+ "grad_norm": 13.202600479125977,
+ "learning_rate": 3.648351648351648e-05,
+ "loss": 0.1388,
+ "step": 167
+ },
+ {
+ "epoch": 0.6153846153846154,
+ "grad_norm": 8.389626502990723,
+ "learning_rate": 3.670329670329671e-05,
+ "loss": 0.0284,
+ "step": 168
+ },
+ {
+ "epoch": 0.6190476190476191,
+ "grad_norm": 11.500190734863281,
+ "learning_rate": 3.692307692307693e-05,
+ "loss": 0.1778,
+ "step": 169
+ },
+ {
+ "epoch": 0.6227106227106227,
+ "grad_norm": 49.76407241821289,
+ "learning_rate": 3.7142857142857143e-05,
+ "loss": 0.8075,
+ "step": 170
+ },
+ {
+ "epoch": 0.6263736263736264,
+ "grad_norm": 49.758705139160156,
+ "learning_rate": 3.7362637362637365e-05,
+ "loss": 1.3106,
+ "step": 171
+ },
+ {
+ "epoch": 0.63003663003663,
+ "grad_norm": 7.655544281005859,
+ "learning_rate": 3.7582417582417586e-05,
+ "loss": 0.1362,
+ "step": 172
+ },
+ {
+ "epoch": 0.6336996336996337,
+ "grad_norm": 29.778133392333984,
+ "learning_rate": 3.78021978021978e-05,
+ "loss": 0.2411,
+ "step": 173
+ },
+ {
+ "epoch": 0.6373626373626373,
+ "grad_norm": 23.79543113708496,
+ "learning_rate": 3.802197802197802e-05,
+ "loss": 0.5665,
+ "step": 174
+ },
+ {
+ "epoch": 0.6410256410256411,
+ "grad_norm": 25.333166122436523,
+ "learning_rate": 3.824175824175824e-05,
+ "loss": 0.5821,
+ "step": 175
+ },
+ {
+ "epoch": 0.6446886446886447,
+ "grad_norm": 38.367759704589844,
+ "learning_rate": 3.846153846153846e-05,
+ "loss": 1.1098,
+ "step": 176
+ },
+ {
+ "epoch": 0.6483516483516484,
+ "grad_norm": 31.53361701965332,
+ "learning_rate": 3.8681318681318684e-05,
+ "loss": 1.5399,
+ "step": 177
+ },
+ {
+ "epoch": 0.652014652014652,
+ "grad_norm": 8.453901290893555,
+ "learning_rate": 3.8901098901098905e-05,
+ "loss": 0.1327,
+ "step": 178
+ },
+ {
+ "epoch": 0.6556776556776557,
+ "grad_norm": 32.465980529785156,
+ "learning_rate": 3.9120879120879126e-05,
+ "loss": 0.8133,
+ "step": 179
+ },
+ {
+ "epoch": 0.6593406593406593,
+ "grad_norm": 21.503114700317383,
+ "learning_rate": 3.934065934065934e-05,
+ "loss": 0.2472,
+ "step": 180
+ },
+ {
+ "epoch": 0.663003663003663,
+ "grad_norm": 28.240659713745117,
+ "learning_rate": 3.956043956043956e-05,
+ "loss": 0.4718,
+ "step": 181
+ },
+ {
+ "epoch": 0.6666666666666666,
+ "grad_norm": 6.919331073760986,
+ "learning_rate": 3.978021978021978e-05,
+ "loss": 0.0947,
+ "step": 182
+ },
+ {
+ "epoch": 0.6703296703296703,
+ "grad_norm": 20.96783447265625,
+ "learning_rate": 3.9999999999999996e-05,
+ "loss": 1.1602,
+ "step": 183
+ },
+ {
+ "epoch": 0.673992673992674,
+ "grad_norm": 17.967914581298828,
+ "learning_rate": 4.021978021978022e-05,
+ "loss": 0.3684,
+ "step": 184
+ },
+ {
+ "epoch": 0.6776556776556777,
+ "grad_norm": 29.837678909301758,
+ "learning_rate": 4.0439560439560445e-05,
+ "loss": 0.5452,
+ "step": 185
+ },
+ {
+ "epoch": 0.6813186813186813,
+ "grad_norm": 37.0803108215332,
+ "learning_rate": 4.065934065934066e-05,
+ "loss": 0.5983,
+ "step": 186
+ },
+ {
+ "epoch": 0.684981684981685,
+ "grad_norm": 23.339448928833008,
+ "learning_rate": 4.087912087912088e-05,
+ "loss": 0.6255,
+ "step": 187
+ },
+ {
+ "epoch": 0.6886446886446886,
+ "grad_norm": 13.779767036437988,
+ "learning_rate": 4.10989010989011e-05,
+ "loss": 0.3705,
+ "step": 188
+ },
+ {
+ "epoch": 0.6923076923076923,
+ "grad_norm": 15.792436599731445,
+ "learning_rate": 4.131868131868132e-05,
+ "loss": 0.4128,
+ "step": 189
+ },
+ {
+ "epoch": 0.6959706959706959,
+ "grad_norm": 14.106623649597168,
+ "learning_rate": 4.153846153846154e-05,
+ "loss": 0.2914,
+ "step": 190
+ },
+ {
+ "epoch": 0.6996336996336996,
+ "grad_norm": 34.428951263427734,
+ "learning_rate": 4.175824175824176e-05,
+ "loss": 1.2232,
+ "step": 191
+ },
+ {
+ "epoch": 0.7032967032967034,
+ "grad_norm": 15.847033500671387,
+ "learning_rate": 4.197802197802198e-05,
+ "loss": 0.4129,
+ "step": 192
+ },
+ {
+ "epoch": 0.706959706959707,
+ "grad_norm": 17.834794998168945,
+ "learning_rate": 4.21978021978022e-05,
+ "loss": 0.4158,
+ "step": 193
+ },
+ {
+ "epoch": 0.7106227106227107,
+ "grad_norm": 29.807823181152344,
+ "learning_rate": 4.241758241758242e-05,
+ "loss": 0.9741,
+ "step": 194
+ },
+ {
+ "epoch": 0.7142857142857143,
+ "grad_norm": 15.9482421875,
+ "learning_rate": 4.263736263736264e-05,
+ "loss": 0.1953,
+ "step": 195
+ },
+ {
+ "epoch": 0.717948717948718,
+ "grad_norm": 37.89487075805664,
+ "learning_rate": 4.2857142857142856e-05,
+ "loss": 1.1018,
+ "step": 196
+ },
+ {
+ "epoch": 0.7216117216117216,
+ "grad_norm": 24.060779571533203,
+ "learning_rate": 4.307692307692308e-05,
+ "loss": 0.4774,
+ "step": 197
+ },
+ {
+ "epoch": 0.7252747252747253,
+ "grad_norm": 18.701725006103516,
+ "learning_rate": 4.32967032967033e-05,
+ "loss": 0.2641,
+ "step": 198
+ },
+ {
+ "epoch": 0.7289377289377289,
+ "grad_norm": 32.18348693847656,
+ "learning_rate": 4.351648351648352e-05,
+ "loss": 0.6958,
+ "step": 199
+ },
+ {
+ "epoch": 0.7326007326007326,
+ "grad_norm": 16.504337310791016,
+ "learning_rate": 4.3736263736263734e-05,
+ "loss": 0.1933,
+ "step": 200
+ },
+ {
+ "epoch": 0.7362637362637363,
+ "grad_norm": 34.5928840637207,
+ "learning_rate": 4.3956043956043955e-05,
+ "loss": 0.3712,
+ "step": 201
+ },
+ {
+ "epoch": 0.73992673992674,
+ "grad_norm": 47.998512268066406,
+ "learning_rate": 4.417582417582418e-05,
+ "loss": 1.4578,
+ "step": 202
+ },
+ {
+ "epoch": 0.7435897435897436,
+ "grad_norm": 29.871829986572266,
+ "learning_rate": 4.43956043956044e-05,
+ "loss": 0.7628,
+ "step": 203
+ },
+ {
+ "epoch": 0.7472527472527473,
+ "grad_norm": 53.70481491088867,
+ "learning_rate": 4.461538461538462e-05,
+ "loss": 1.4017,
+ "step": 204
+ },
+ {
+ "epoch": 0.7509157509157509,
+ "grad_norm": 58.087646484375,
+ "learning_rate": 4.483516483516484e-05,
+ "loss": 1.3168,
+ "step": 205
+ },
+ {
+ "epoch": 0.7545787545787546,
+ "grad_norm": 44.62531280517578,
+ "learning_rate": 4.505494505494505e-05,
+ "loss": 0.8959,
+ "step": 206
+ },
+ {
+ "epoch": 0.7582417582417582,
+ "grad_norm": 18.427953720092773,
+ "learning_rate": 4.5274725274725274e-05,
+ "loss": 0.4202,
+ "step": 207
+ },
+ {
+ "epoch": 0.7619047619047619,
+ "grad_norm": 32.799434661865234,
+ "learning_rate": 4.5494505494505495e-05,
+ "loss": 0.5432,
+ "step": 208
+ },
+ {
+ "epoch": 0.7655677655677655,
+ "grad_norm": 22.136354446411133,
+ "learning_rate": 4.5714285714285716e-05,
+ "loss": 1.0474,
+ "step": 209
+ },
+ {
+ "epoch": 0.7692307692307693,
+ "grad_norm": 14.09807014465332,
+ "learning_rate": 4.593406593406593e-05,
+ "loss": 0.4048,
+ "step": 210
+ },
+ {
+ "epoch": 0.7728937728937729,
+ "grad_norm": 16.818132400512695,
+ "learning_rate": 4.615384615384616e-05,
+ "loss": 0.4772,
+ "step": 211
+ },
+ {
+ "epoch": 0.7765567765567766,
+ "grad_norm": 36.87644577026367,
+ "learning_rate": 4.637362637362638e-05,
+ "loss": 1.0203,
+ "step": 212
+ },
+ {
+ "epoch": 0.7802197802197802,
+ "grad_norm": 23.279033660888672,
+ "learning_rate": 4.6593406593406593e-05,
+ "loss": 0.8223,
+ "step": 213
+ },
+ {
+ "epoch": 0.7838827838827839,
+ "grad_norm": 21.23172378540039,
+ "learning_rate": 4.6813186813186814e-05,
+ "loss": 0.6838,
+ "step": 214
+ },
+ {
+ "epoch": 0.7875457875457875,
+ "grad_norm": 15.129582405090332,
+ "learning_rate": 4.7032967032967035e-05,
+ "loss": 0.3939,
+ "step": 215
+ },
+ {
+ "epoch": 0.7912087912087912,
+ "grad_norm": 38.20903778076172,
+ "learning_rate": 4.725274725274725e-05,
+ "loss": 0.4395,
+ "step": 216
+ },
+ {
+ "epoch": 0.7948717948717948,
+ "grad_norm": 23.428571701049805,
+ "learning_rate": 4.747252747252747e-05,
+ "loss": 0.6657,
+ "step": 217
+ },
+ {
+ "epoch": 0.7985347985347986,
+ "grad_norm": 15.892741203308105,
+ "learning_rate": 4.769230769230769e-05,
+ "loss": 0.3867,
+ "step": 218
+ },
+ {
+ "epoch": 0.8021978021978022,
+ "grad_norm": 44.7977180480957,
+ "learning_rate": 4.791208791208792e-05,
+ "loss": 1.4335,
+ "step": 219
+ },
+ {
+ "epoch": 0.8058608058608059,
+ "grad_norm": 18.13700294494629,
+ "learning_rate": 4.8131868131868134e-05,
+ "loss": 0.3965,
+ "step": 220
+ },
+ {
+ "epoch": 0.8095238095238095,
+ "grad_norm": 23.00497817993164,
+ "learning_rate": 4.8351648351648355e-05,
+ "loss": 1.1319,
+ "step": 221
+ },
+ {
+ "epoch": 0.8131868131868132,
+ "grad_norm": 27.63648796081543,
+ "learning_rate": 4.8571428571428576e-05,
+ "loss": 0.7782,
+ "step": 222
+ },
+ {
+ "epoch": 0.8168498168498168,
+ "grad_norm": 23.91630744934082,
+ "learning_rate": 4.879120879120879e-05,
+ "loss": 0.7277,
+ "step": 223
+ },
+ {
+ "epoch": 0.8205128205128205,
+ "grad_norm": 27.157682418823242,
+ "learning_rate": 4.901098901098901e-05,
+ "loss": 0.8309,
+ "step": 224
+ },
+ {
+ "epoch": 0.8241758241758241,
+ "grad_norm": 20.686105728149414,
+ "learning_rate": 4.923076923076923e-05,
+ "loss": 0.4645,
+ "step": 225
+ },
+ {
+ "epoch": 0.8278388278388278,
+ "grad_norm": 18.44706916809082,
+ "learning_rate": 4.9450549450549446e-05,
+ "loss": 0.6298,
+ "step": 226
+ },
+ {
+ "epoch": 0.8315018315018315,
+ "grad_norm": 34.66194152832031,
+ "learning_rate": 4.967032967032967e-05,
+ "loss": 1.3282,
+ "step": 227
+ },
+ {
+ "epoch": 0.8351648351648352,
+ "grad_norm": 26.68456268310547,
+ "learning_rate": 4.9890109890109895e-05,
+ "loss": 0.8652,
+ "step": 228
+ },
+ {
+ "epoch": 0.8388278388278388,
+ "grad_norm": 18.36819839477539,
+ "learning_rate": 5.0109890109890116e-05,
+ "loss": 0.425,
+ "step": 229
+ },
+ {
+ "epoch": 0.8424908424908425,
+ "grad_norm": 10.212838172912598,
+ "learning_rate": 5.032967032967033e-05,
+ "loss": 0.2183,
+ "step": 230
+ },
+ {
+ "epoch": 0.8461538461538461,
+ "grad_norm": 28.40265464782715,
+ "learning_rate": 5.054945054945055e-05,
+ "loss": 1.6894,
+ "step": 231
+ },
+ {
+ "epoch": 0.8498168498168498,
+ "grad_norm": 48.70882797241211,
+ "learning_rate": 5.076923076923077e-05,
+ "loss": 0.8564,
+ "step": 232
+ },
+ {
+ "epoch": 0.8534798534798534,
+ "grad_norm": 38.576541900634766,
+ "learning_rate": 5.098901098901099e-05,
+ "loss": 0.8013,
+ "step": 233
+ },
+ {
+ "epoch": 0.8571428571428571,
+ "grad_norm": 20.17264747619629,
+ "learning_rate": 5.120879120879121e-05,
+ "loss": 0.4553,
+ "step": 234
+ },
+ {
+ "epoch": 0.8608058608058609,
+ "grad_norm": 33.383182525634766,
+ "learning_rate": 5.142857142857143e-05,
+ "loss": 0.9591,
+ "step": 235
+ },
+ {
+ "epoch": 0.8644688644688645,
+ "grad_norm": 22.734106063842773,
+ "learning_rate": 5.164835164835165e-05,
+ "loss": 0.589,
+ "step": 236
+ },
+ {
+ "epoch": 0.8681318681318682,
+ "grad_norm": 19.77442741394043,
+ "learning_rate": 5.186813186813187e-05,
+ "loss": 0.7066,
+ "step": 237
+ },
+ {
+ "epoch": 0.8717948717948718,
+ "grad_norm": 32.36431884765625,
+ "learning_rate": 5.208791208791209e-05,
+ "loss": 0.8878,
+ "step": 238
+ },
+ {
+ "epoch": 0.8754578754578755,
+ "grad_norm": 37.60574722290039,
+ "learning_rate": 5.230769230769231e-05,
+ "loss": 1.0034,
+ "step": 239
+ },
+ {
+ "epoch": 0.8791208791208791,
+ "grad_norm": 28.051666259765625,
+ "learning_rate": 5.252747252747253e-05,
+ "loss": 0.9695,
+ "step": 240
+ },
+ {
+ "epoch": 0.8827838827838828,
+ "grad_norm": 31.55886459350586,
+ "learning_rate": 5.274725274725275e-05,
+ "loss": 0.5416,
+ "step": 241
+ },
+ {
+ "epoch": 0.8864468864468864,
+ "grad_norm": 17.856632232666016,
+ "learning_rate": 5.296703296703297e-05,
+ "loss": 0.3647,
+ "step": 242
+ },
+ {
+ "epoch": 0.8901098901098901,
+ "grad_norm": 42.52962112426758,
+ "learning_rate": 5.3186813186813184e-05,
+ "loss": 1.3661,
+ "step": 243
+ },
+ {
+ "epoch": 0.8937728937728938,
+ "grad_norm": 26.439769744873047,
+ "learning_rate": 5.3406593406593405e-05,
+ "loss": 0.6629,
+ "step": 244
+ },
+ {
+ "epoch": 0.8974358974358975,
+ "grad_norm": 37.46576690673828,
+ "learning_rate": 5.362637362637363e-05,
+ "loss": 0.9631,
+ "step": 245
+ },
+ {
+ "epoch": 0.9010989010989011,
+ "grad_norm": 29.706708908081055,
+ "learning_rate": 5.384615384615385e-05,
+ "loss": 1.0034,
+ "step": 246
+ },
+ {
+ "epoch": 0.9047619047619048,
+ "grad_norm": 33.62871551513672,
+ "learning_rate": 5.406593406593407e-05,
+ "loss": 0.8036,
+ "step": 247
+ },
+ {
+ "epoch": 0.9084249084249084,
+ "grad_norm": 41.97051239013672,
+ "learning_rate": 5.428571428571429e-05,
+ "loss": 1.309,
+ "step": 248
+ },
+ {
+ "epoch": 0.9120879120879121,
+ "grad_norm": 37.57841110229492,
+ "learning_rate": 5.450549450549451e-05,
+ "loss": 1.2444,
+ "step": 249
+ },
+ {
+ "epoch": 0.9157509157509157,
+ "grad_norm": 21.220727920532227,
+ "learning_rate": 5.4725274725274724e-05,
+ "loss": 0.6556,
+ "step": 250
+ },
+ {
+ "epoch": 0.9194139194139194,
+ "grad_norm": 19.963764190673828,
+ "learning_rate": 5.4945054945054945e-05,
+ "loss": 0.7328,
+ "step": 251
+ },
+ {
+ "epoch": 0.9230769230769231,
+ "grad_norm": 21.196062088012695,
+ "learning_rate": 5.5164835164835166e-05,
+ "loss": 0.5752,
+ "step": 252
+ },
+ {
+ "epoch": 0.9267399267399268,
+ "grad_norm": 23.587268829345703,
+ "learning_rate": 5.538461538461539e-05,
+ "loss": 0.4801,
+ "step": 253
+ },
+ {
+ "epoch": 0.9304029304029304,
+ "grad_norm": 16.09604263305664,
+ "learning_rate": 5.560439560439561e-05,
+ "loss": 0.4795,
+ "step": 254
+ },
+ {
+ "epoch": 0.9340659340659341,
+ "grad_norm": 22.61296272277832,
+ "learning_rate": 5.582417582417583e-05,
+ "loss": 0.5807,
+ "step": 255
+ },
+ {
+ "epoch": 0.9377289377289377,
+ "grad_norm": 28.715890884399414,
+ "learning_rate": 5.604395604395604e-05,
+ "loss": 1.3141,
+ "step": 256
+ },
+ {
+ "epoch": 0.9413919413919414,
+ "grad_norm": 37.11213684082031,
+ "learning_rate": 5.6263736263736264e-05,
+ "loss": 1.7168,
+ "step": 257
+ },
+ {
+ "epoch": 0.945054945054945,
+ "grad_norm": 13.693246841430664,
+ "learning_rate": 5.6483516483516485e-05,
+ "loss": 0.3207,
+ "step": 258
+ },
+ {
+ "epoch": 0.9487179487179487,
+ "grad_norm": 18.186216354370117,
+ "learning_rate": 5.6703296703296706e-05,
+ "loss": 0.6265,
+ "step": 259
+ },
+ {
+ "epoch": 0.9523809523809523,
+ "grad_norm": 23.68426513671875,
+ "learning_rate": 5.692307692307692e-05,
+ "loss": 0.5226,
+ "step": 260
+ },
+ {
+ "epoch": 0.9560439560439561,
+ "grad_norm": 19.154836654663086,
+ "learning_rate": 5.714285714285714e-05,
+ "loss": 1.0116,
+ "step": 261
+ },
+ {
+ "epoch": 0.9597069597069597,
+ "grad_norm": 17.64719009399414,
+ "learning_rate": 5.736263736263737e-05,
+ "loss": 0.5992,
+ "step": 262
+ },
+ {
+ "epoch": 0.9633699633699634,
+ "grad_norm": 25.542757034301758,
+ "learning_rate": 5.7582417582417584e-05,
+ "loss": 0.8129,
+ "step": 263
+ },
+ {
+ "epoch": 0.967032967032967,
+ "grad_norm": 25.94204330444336,
+ "learning_rate": 5.7802197802197805e-05,
+ "loss": 1.2194,
+ "step": 264
+ },
+ {
+ "epoch": 0.9706959706959707,
+ "grad_norm": 13.693342208862305,
+ "learning_rate": 5.8021978021978026e-05,
+ "loss": 0.2565,
+ "step": 265
+ },
+ {
+ "epoch": 0.9743589743589743,
+ "grad_norm": 20.760122299194336,
+ "learning_rate": 5.824175824175824e-05,
+ "loss": 0.4023,
+ "step": 266
+ },
+ {
+ "epoch": 0.978021978021978,
+ "grad_norm": 20.00895118713379,
+ "learning_rate": 5.846153846153846e-05,
+ "loss": 0.2468,
+ "step": 267
+ },
+ {
+ "epoch": 0.9816849816849816,
+ "grad_norm": 25.56069564819336,
+ "learning_rate": 5.868131868131868e-05,
+ "loss": 0.5648,
+ "step": 268
+ },
+ {
+ "epoch": 0.9853479853479854,
+ "grad_norm": 38.19970703125,
+ "learning_rate": 5.89010989010989e-05,
+ "loss": 0.544,
+ "step": 269
+ },
+ {
+ "epoch": 0.989010989010989,
+ "grad_norm": 37.63619613647461,
+ "learning_rate": 5.9120879120879124e-05,
+ "loss": 0.7556,
+ "step": 270
+ },
+ {
+ "epoch": 0.9926739926739927,
+ "grad_norm": 10.586868286132812,
+ "learning_rate": 5.9340659340659345e-05,
+ "loss": 0.1003,
+ "step": 271
+ },
+ {
+ "epoch": 0.9963369963369964,
+ "grad_norm": 17.579208374023438,
+ "learning_rate": 5.9560439560439566e-05,
+ "loss": 0.2931,
+ "step": 272
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 24.657121658325195,
+ "learning_rate": 5.978021978021978e-05,
+ "loss": 0.2372,
+ "step": 273
+ },
+ {
+ "epoch": 1.0036630036630036,
+ "grad_norm": 29.52134895324707,
+ "learning_rate": 6e-05,
+ "loss": 0.5077,
+ "step": 274
+ },
+ {
+ "epoch": 1.0073260073260073,
+ "grad_norm": 51.900062561035156,
+ "learning_rate": 5.997557997557998e-05,
+ "loss": 0.4404,
+ "step": 275
+ },
+ {
+ "epoch": 1.010989010989011,
+ "grad_norm": 18.682769775390625,
+ "learning_rate": 5.995115995115995e-05,
+ "loss": 0.2405,
+ "step": 276
+ },
+ {
+ "epoch": 1.0146520146520146,
+ "grad_norm": 87.95014953613281,
+ "learning_rate": 5.992673992673993e-05,
+ "loss": 2.8585,
+ "step": 277
+ },
+ {
+ "epoch": 1.0183150183150182,
+ "grad_norm": 67.03990936279297,
+ "learning_rate": 5.990231990231991e-05,
+ "loss": 0.9746,
+ "step": 278
+ },
+ {
+ "epoch": 1.021978021978022,
+ "grad_norm": 47.63545227050781,
+ "learning_rate": 5.987789987789988e-05,
+ "loss": 0.241,
+ "step": 279
+ },
+ {
+ "epoch": 1.0256410256410255,
+ "grad_norm": 33.62876892089844,
+ "learning_rate": 5.985347985347986e-05,
+ "loss": 1.0003,
+ "step": 280
+ },
+ {
+ "epoch": 1.0293040293040292,
+ "grad_norm": 30.26620864868164,
+ "learning_rate": 5.982905982905983e-05,
+ "loss": 0.7767,
+ "step": 281
+ },
+ {
+ "epoch": 1.032967032967033,
+ "grad_norm": 33.785770416259766,
+ "learning_rate": 5.98046398046398e-05,
+ "loss": 0.899,
+ "step": 282
+ },
+ {
+ "epoch": 1.0366300366300367,
+ "grad_norm": 33.753849029541016,
+ "learning_rate": 5.978021978021978e-05,
+ "loss": 1.8225,
+ "step": 283
+ },
+ {
+ "epoch": 1.0402930402930404,
+ "grad_norm": 16.58989143371582,
+ "learning_rate": 5.975579975579976e-05,
+ "loss": 0.6211,
+ "step": 284
+ },
+ {
+ "epoch": 1.043956043956044,
+ "grad_norm": 23.08768653869629,
+ "learning_rate": 5.973137973137973e-05,
+ "loss": 0.7541,
+ "step": 285
+ },
+ {
+ "epoch": 1.0476190476190477,
+ "grad_norm": 24.57805824279785,
+ "learning_rate": 5.970695970695971e-05,
+ "loss": 0.8278,
+ "step": 286
+ },
+ {
+ "epoch": 1.0512820512820513,
+ "grad_norm": 25.1593017578125,
+ "learning_rate": 5.968253968253968e-05,
+ "loss": 0.6932,
+ "step": 287
+ },
+ {
+ "epoch": 1.054945054945055,
+ "grad_norm": 29.984054565429688,
+ "learning_rate": 5.965811965811966e-05,
+ "loss": 0.6987,
+ "step": 288
+ },
+ {
+ "epoch": 1.0586080586080586,
+ "grad_norm": 28.183151245117188,
+ "learning_rate": 5.963369963369964e-05,
+ "loss": 0.8771,
+ "step": 289
+ },
+ {
+ "epoch": 1.0622710622710623,
+ "grad_norm": 15.349969863891602,
+ "learning_rate": 5.960927960927961e-05,
+ "loss": 0.2906,
+ "step": 290
+ },
+ {
+ "epoch": 1.065934065934066,
+ "grad_norm": 17.618196487426758,
+ "learning_rate": 5.958485958485959e-05,
+ "loss": 0.595,
+ "step": 291
+ },
+ {
+ "epoch": 1.0695970695970696,
+ "grad_norm": 40.537925720214844,
+ "learning_rate": 5.9560439560439566e-05,
+ "loss": 1.3881,
+ "step": 292
+ },
+ {
+ "epoch": 1.0732600732600732,
+ "grad_norm": 41.12261962890625,
+ "learning_rate": 5.953601953601954e-05,
+ "loss": 0.5402,
+ "step": 293
+ },
+ {
+ "epoch": 1.0769230769230769,
+ "grad_norm": 38.4654655456543,
+ "learning_rate": 5.951159951159951e-05,
+ "loss": 0.3097,
+ "step": 294
+ },
+ {
+ "epoch": 1.0805860805860805,
+ "grad_norm": 34.19886016845703,
+ "learning_rate": 5.948717948717949e-05,
+ "loss": 1.0228,
+ "step": 295
+ },
+ {
+ "epoch": 1.0842490842490842,
+ "grad_norm": 19.727413177490234,
+ "learning_rate": 5.946275946275946e-05,
+ "loss": 0.1755,
+ "step": 296
+ },
+ {
+ "epoch": 1.0879120879120878,
+ "grad_norm": 33.413352966308594,
+ "learning_rate": 5.943833943833944e-05,
+ "loss": 0.8087,
+ "step": 297
+ },
+ {
+ "epoch": 1.0915750915750915,
+ "grad_norm": 29.848875045776367,
+ "learning_rate": 5.941391941391942e-05,
+ "loss": 0.673,
+ "step": 298
+ },
+ {
+ "epoch": 1.0952380952380953,
+ "grad_norm": 18.643922805786133,
+ "learning_rate": 5.938949938949939e-05,
+ "loss": 0.4759,
+ "step": 299
+ },
+ {
+ "epoch": 1.098901098901099,
+ "grad_norm": 28.923099517822266,
+ "learning_rate": 5.936507936507937e-05,
+ "loss": 0.6555,
+ "step": 300
+ },
+ {
+ "epoch": 1.1025641025641026,
+ "grad_norm": 26.4990177154541,
+ "learning_rate": 5.9340659340659345e-05,
+ "loss": 0.4679,
+ "step": 301
+ },
+ {
+ "epoch": 1.1062271062271063,
+ "grad_norm": 43.54881286621094,
+ "learning_rate": 5.931623931623932e-05,
+ "loss": 1.0861,
+ "step": 302
+ },
+ {
+ "epoch": 1.10989010989011,
+ "grad_norm": 32.66098403930664,
+ "learning_rate": 5.9291819291819295e-05,
+ "loss": 0.677,
+ "step": 303
+ },
+ {
+ "epoch": 1.1135531135531136,
+ "grad_norm": 43.79314422607422,
+ "learning_rate": 5.9267399267399274e-05,
+ "loss": 0.8883,
+ "step": 304
+ },
+ {
+ "epoch": 1.1172161172161172,
+ "grad_norm": 44.49085235595703,
+ "learning_rate": 5.9242979242979245e-05,
+ "loss": 0.9553,
+ "step": 305
+ },
+ {
+ "epoch": 1.120879120879121,
+ "grad_norm": 31.713787078857422,
+ "learning_rate": 5.9218559218559224e-05,
+ "loss": 0.6352,
+ "step": 306
+ },
+ {
+ "epoch": 1.1245421245421245,
+ "grad_norm": 19.930402755737305,
+ "learning_rate": 5.9194139194139196e-05,
+ "loss": 0.7023,
+ "step": 307
+ },
+ {
+ "epoch": 1.1282051282051282,
+ "grad_norm": 20.157196044921875,
+ "learning_rate": 5.916971916971917e-05,
+ "loss": 0.6241,
+ "step": 308
+ },
+ {
+ "epoch": 1.1318681318681318,
+ "grad_norm": 26.819135665893555,
+ "learning_rate": 5.9145299145299146e-05,
+ "loss": 0.4788,
+ "step": 309
+ },
+ {
+ "epoch": 1.1355311355311355,
+ "grad_norm": 24.948625564575195,
+ "learning_rate": 5.9120879120879124e-05,
+ "loss": 0.698,
+ "step": 310
+ },
+ {
+ "epoch": 1.1391941391941391,
+ "grad_norm": 15.883389472961426,
+ "learning_rate": 5.9096459096459096e-05,
+ "loss": 0.3325,
+ "step": 311
+ },
+ {
+ "epoch": 1.1428571428571428,
+ "grad_norm": 25.214584350585938,
+ "learning_rate": 5.9072039072039074e-05,
+ "loss": 0.4776,
+ "step": 312
+ },
+ {
+ "epoch": 1.1465201465201464,
+ "grad_norm": 27.4523983001709,
+ "learning_rate": 5.9047619047619046e-05,
+ "loss": 0.6155,
+ "step": 313
+ },
+ {
+ "epoch": 1.15018315018315,
+ "grad_norm": 48.60593795776367,
+ "learning_rate": 5.9023199023199024e-05,
+ "loss": 1.7225,
+ "step": 314
+ },
+ {
+ "epoch": 1.1538461538461537,
+ "grad_norm": 27.19314193725586,
+ "learning_rate": 5.8998778998779e-05,
+ "loss": 0.6805,
+ "step": 315
+ },
+ {
+ "epoch": 1.1575091575091574,
+ "grad_norm": 44.678768157958984,
+ "learning_rate": 5.8974358974358975e-05,
+ "loss": 0.5721,
+ "step": 316
+ },
+ {
+ "epoch": 1.1611721611721613,
+ "grad_norm": 12.109644889831543,
+ "learning_rate": 5.894993894993895e-05,
+ "loss": 0.1079,
+ "step": 317
+ },
+ {
+ "epoch": 1.164835164835165,
+ "grad_norm": 45.254730224609375,
+ "learning_rate": 5.892551892551893e-05,
+ "loss": 1.1492,
+ "step": 318
+ },
+ {
+ "epoch": 1.1684981684981686,
+ "grad_norm": 65.83439636230469,
+ "learning_rate": 5.89010989010989e-05,
+ "loss": 0.7049,
+ "step": 319
+ },
+ {
+ "epoch": 1.1721611721611722,
+ "grad_norm": 43.5418586730957,
+ "learning_rate": 5.8876678876678875e-05,
+ "loss": 0.4628,
+ "step": 320
+ },
+ {
+ "epoch": 1.1758241758241759,
+ "grad_norm": 137.285400390625,
+ "learning_rate": 5.885225885225885e-05,
+ "loss": 1.4227,
+ "step": 321
+ },
+ {
+ "epoch": 1.1794871794871795,
+ "grad_norm": 42.895565032958984,
+ "learning_rate": 5.8827838827838825e-05,
+ "loss": 0.4264,
+ "step": 322
+ },
+ {
+ "epoch": 1.1831501831501832,
+ "grad_norm": 10.602986335754395,
+ "learning_rate": 5.8803418803418803e-05,
+ "loss": 0.0494,
+ "step": 323
+ },
+ {
+ "epoch": 1.1868131868131868,
+ "grad_norm": 103.92290496826172,
+ "learning_rate": 5.877899877899878e-05,
+ "loss": 2.0111,
+ "step": 324
+ },
+ {
+ "epoch": 1.1904761904761905,
+ "grad_norm": 36.497764587402344,
+ "learning_rate": 5.8754578754578754e-05,
+ "loss": 0.4768,
+ "step": 325
+ },
+ {
+ "epoch": 1.1941391941391941,
+ "grad_norm": 45.52228546142578,
+ "learning_rate": 5.873015873015873e-05,
+ "loss": 0.994,
+ "step": 326
+ },
+ {
+ "epoch": 1.1978021978021978,
+ "grad_norm": 24.81894302368164,
+ "learning_rate": 5.870573870573871e-05,
+ "loss": 0.5563,
+ "step": 327
+ },
+ {
+ "epoch": 1.2014652014652014,
+ "grad_norm": 49.82950210571289,
+ "learning_rate": 5.868131868131868e-05,
+ "loss": 1.5448,
+ "step": 328
+ },
+ {
+ "epoch": 1.205128205128205,
+ "grad_norm": 23.945913314819336,
+ "learning_rate": 5.865689865689866e-05,
+ "loss": 0.5256,
+ "step": 329
+ },
+ {
+ "epoch": 1.2087912087912087,
+ "grad_norm": 20.63251304626465,
+ "learning_rate": 5.863247863247864e-05,
+ "loss": 0.3698,
+ "step": 330
+ },
+ {
+ "epoch": 1.2124542124542124,
+ "grad_norm": 32.270328521728516,
+ "learning_rate": 5.860805860805861e-05,
+ "loss": 0.3518,
+ "step": 331
+ },
+ {
+ "epoch": 1.2161172161172162,
+ "grad_norm": 32.445716857910156,
+ "learning_rate": 5.858363858363858e-05,
+ "loss": 0.857,
+ "step": 332
+ },
+ {
+ "epoch": 1.2197802197802199,
+ "grad_norm": 59.69521713256836,
+ "learning_rate": 5.855921855921856e-05,
+ "loss": 1.3786,
+ "step": 333
+ },
+ {
+ "epoch": 1.2234432234432235,
+ "grad_norm": 32.79878234863281,
+ "learning_rate": 5.853479853479853e-05,
+ "loss": 0.7648,
+ "step": 334
+ },
+ {
+ "epoch": 1.2271062271062272,
+ "grad_norm": 26.749393463134766,
+ "learning_rate": 5.851037851037851e-05,
+ "loss": 0.4723,
+ "step": 335
+ },
+ {
+ "epoch": 1.2307692307692308,
+ "grad_norm": 40.744102478027344,
+ "learning_rate": 5.848595848595849e-05,
+ "loss": 1.0543,
+ "step": 336
+ },
+ {
+ "epoch": 1.2344322344322345,
+ "grad_norm": 34.2275505065918,
+ "learning_rate": 5.846153846153846e-05,
+ "loss": 0.4533,
+ "step": 337
+ },
+ {
+ "epoch": 1.2380952380952381,
+ "grad_norm": 49.648136138916016,
+ "learning_rate": 5.843711843711844e-05,
+ "loss": 1.2112,
+ "step": 338
+ },
+ {
+ "epoch": 1.2417582417582418,
+ "grad_norm": 64.69720458984375,
+ "learning_rate": 5.841269841269841e-05,
+ "loss": 1.2234,
+ "step": 339
+ },
+ {
+ "epoch": 1.2454212454212454,
+ "grad_norm": 16.81964111328125,
+ "learning_rate": 5.838827838827839e-05,
+ "loss": 0.297,
+ "step": 340
+ },
+ {
+ "epoch": 1.249084249084249,
+ "grad_norm": 17.393678665161133,
+ "learning_rate": 5.836385836385837e-05,
+ "loss": 0.2504,
+ "step": 341
+ },
+ {
+ "epoch": 1.2527472527472527,
+ "grad_norm": 64.2254409790039,
+ "learning_rate": 5.833943833943834e-05,
+ "loss": 1.3656,
+ "step": 342
+ },
+ {
+ "epoch": 1.2564102564102564,
+ "grad_norm": 48.991249084472656,
+ "learning_rate": 5.831501831501832e-05,
+ "loss": 1.0819,
+ "step": 343
+ },
+ {
+ "epoch": 1.26007326007326,
+ "grad_norm": 22.78063201904297,
+ "learning_rate": 5.82905982905983e-05,
+ "loss": 0.1792,
+ "step": 344
+ },
+ {
+ "epoch": 1.2637362637362637,
+ "grad_norm": 35.463233947753906,
+ "learning_rate": 5.826617826617826e-05,
+ "loss": 0.5663,
+ "step": 345
+ },
+ {
+ "epoch": 1.2673992673992673,
+ "grad_norm": 54.528953552246094,
+ "learning_rate": 5.824175824175824e-05,
+ "loss": 1.5814,
+ "step": 346
+ },
+ {
+ "epoch": 1.271062271062271,
+ "grad_norm": 44.60401916503906,
+ "learning_rate": 5.821733821733822e-05,
+ "loss": 0.6471,
+ "step": 347
+ },
+ {
+ "epoch": 1.2747252747252746,
+ "grad_norm": 2.6468827724456787,
+ "learning_rate": 5.819291819291819e-05,
+ "loss": 0.0288,
+ "step": 348
+ },
+ {
+ "epoch": 1.2783882783882783,
+ "grad_norm": 21.465364456176758,
+ "learning_rate": 5.816849816849817e-05,
+ "loss": 0.5259,
+ "step": 349
+ },
+ {
+ "epoch": 1.282051282051282,
+ "grad_norm": 51.20866012573242,
+ "learning_rate": 5.814407814407815e-05,
+ "loss": 0.8054,
+ "step": 350
+ },
+ {
+ "epoch": 1.2857142857142856,
+ "grad_norm": 33.52774429321289,
+ "learning_rate": 5.811965811965812e-05,
+ "loss": 0.494,
+ "step": 351
+ },
+ {
+ "epoch": 1.2893772893772895,
+ "grad_norm": 39.15644836425781,
+ "learning_rate": 5.80952380952381e-05,
+ "loss": 1.6315,
+ "step": 352
+ },
+ {
+ "epoch": 1.293040293040293,
+ "grad_norm": 24.35202407836914,
+ "learning_rate": 5.8070818070818076e-05,
+ "loss": 0.6189,
+ "step": 353
+ },
+ {
+ "epoch": 1.2967032967032968,
+ "grad_norm": 39.99496841430664,
+ "learning_rate": 5.804639804639805e-05,
+ "loss": 1.2323,
+ "step": 354
+ },
+ {
+ "epoch": 1.3003663003663004,
+ "grad_norm": 26.282432556152344,
+ "learning_rate": 5.8021978021978026e-05,
+ "loss": 0.5383,
+ "step": 355
+ },
+ {
+ "epoch": 1.304029304029304,
+ "grad_norm": 36.909969329833984,
+ "learning_rate": 5.7997557997558004e-05,
+ "loss": 1.6886,
+ "step": 356
+ },
+ {
+ "epoch": 1.3076923076923077,
+ "grad_norm": 18.90056037902832,
+ "learning_rate": 5.7973137973137976e-05,
+ "loss": 0.7226,
+ "step": 357
+ },
+ {
+ "epoch": 1.3113553113553114,
+ "grad_norm": 21.10304832458496,
+ "learning_rate": 5.794871794871795e-05,
+ "loss": 0.8914,
+ "step": 358
+ },
+ {
+ "epoch": 1.315018315018315,
+ "grad_norm": 18.380769729614258,
+ "learning_rate": 5.7924297924297926e-05,
+ "loss": 1.4304,
+ "step": 359
+ },
+ {
+ "epoch": 1.3186813186813187,
+ "grad_norm": 17.992050170898438,
+ "learning_rate": 5.78998778998779e-05,
+ "loss": 1.0023,
+ "step": 360
+ },
+ {
+ "epoch": 1.3223443223443223,
+ "grad_norm": 17.944400787353516,
+ "learning_rate": 5.7875457875457876e-05,
+ "loss": 0.7734,
+ "step": 361
+ },
+ {
+ "epoch": 1.326007326007326,
+ "grad_norm": 19.117143630981445,
+ "learning_rate": 5.7851037851037855e-05,
+ "loss": 0.6923,
+ "step": 362
+ },
+ {
+ "epoch": 1.3296703296703296,
+ "grad_norm": 21.4644718170166,
+ "learning_rate": 5.7826617826617826e-05,
+ "loss": 0.666,
+ "step": 363
+ },
+ {
+ "epoch": 1.3333333333333333,
+ "grad_norm": 25.951030731201172,
+ "learning_rate": 5.7802197802197805e-05,
+ "loss": 1.522,
+ "step": 364
+ },
+ {
+ "epoch": 1.3369963369963371,
+ "grad_norm": 32.20412063598633,
+ "learning_rate": 5.7777777777777776e-05,
+ "loss": 1.5771,
+ "step": 365
+ },
+ {
+ "epoch": 1.3406593406593408,
+ "grad_norm": 26.847576141357422,
+ "learning_rate": 5.7753357753357755e-05,
+ "loss": 1.3427,
+ "step": 366
+ },
+ {
+ "epoch": 1.3443223443223444,
+ "grad_norm": 18.596710205078125,
+ "learning_rate": 5.772893772893773e-05,
+ "loss": 0.5533,
+ "step": 367
+ },
+ {
+ "epoch": 1.347985347985348,
+ "grad_norm": 23.6543025970459,
+ "learning_rate": 5.7704517704517705e-05,
+ "loss": 0.581,
+ "step": 368
+ },
+ {
+ "epoch": 1.3516483516483517,
+ "grad_norm": 13.732353210449219,
+ "learning_rate": 5.7680097680097684e-05,
+ "loss": 0.1908,
+ "step": 369
+ },
+ {
+ "epoch": 1.3553113553113554,
+ "grad_norm": 21.231159210205078,
+ "learning_rate": 5.765567765567766e-05,
+ "loss": 0.5858,
+ "step": 370
+ },
+ {
+ "epoch": 1.358974358974359,
+ "grad_norm": 18.647363662719727,
+ "learning_rate": 5.763125763125763e-05,
+ "loss": 0.6205,
+ "step": 371
+ },
+ {
+ "epoch": 1.3626373626373627,
+ "grad_norm": 20.302942276000977,
+ "learning_rate": 5.7606837606837605e-05,
+ "loss": 0.3637,
+ "step": 372
+ },
+ {
+ "epoch": 1.3663003663003663,
+ "grad_norm": 18.72137451171875,
+ "learning_rate": 5.7582417582417584e-05,
+ "loss": 0.2262,
+ "step": 373
+ },
+ {
+ "epoch": 1.36996336996337,
+ "grad_norm": 32.225738525390625,
+ "learning_rate": 5.7557997557997555e-05,
+ "loss": 0.5696,
+ "step": 374
+ },
+ {
+ "epoch": 1.3736263736263736,
+ "grad_norm": 21.453779220581055,
+ "learning_rate": 5.7533577533577534e-05,
+ "loss": 0.3533,
+ "step": 375
+ },
+ {
+ "epoch": 1.3772893772893773,
+ "grad_norm": 26.601511001586914,
+ "learning_rate": 5.750915750915751e-05,
+ "loss": 0.438,
+ "step": 376
+ },
+ {
+ "epoch": 1.380952380952381,
+ "grad_norm": 49.10448455810547,
+ "learning_rate": 5.7484737484737484e-05,
+ "loss": 0.6742,
+ "step": 377
+ },
+ {
+ "epoch": 1.3846153846153846,
+ "grad_norm": 51.251136779785156,
+ "learning_rate": 5.746031746031746e-05,
+ "loss": 0.7096,
+ "step": 378
+ },
+ {
+ "epoch": 1.3882783882783882,
+ "grad_norm": 35.14614486694336,
+ "learning_rate": 5.743589743589744e-05,
+ "loss": 1.5348,
+ "step": 379
+ },
+ {
+ "epoch": 1.3919413919413919,
+ "grad_norm": 58.83134078979492,
+ "learning_rate": 5.741147741147741e-05,
+ "loss": 1.303,
+ "step": 380
+ },
+ {
+ "epoch": 1.3956043956043955,
+ "grad_norm": 34.27029800415039,
+ "learning_rate": 5.738705738705739e-05,
+ "loss": 0.3682,
+ "step": 381
+ },
+ {
+ "epoch": 1.3992673992673992,
+ "grad_norm": 59.508628845214844,
+ "learning_rate": 5.736263736263737e-05,
+ "loss": 0.6489,
+ "step": 382
+ },
+ {
+ "epoch": 1.4029304029304028,
+ "grad_norm": 24.804059982299805,
+ "learning_rate": 5.733821733821734e-05,
+ "loss": 0.325,
+ "step": 383
+ },
+ {
+ "epoch": 1.4065934065934065,
+ "grad_norm": 20.69612693786621,
+ "learning_rate": 5.731379731379731e-05,
+ "loss": 0.1529,
+ "step": 384
+ },
+ {
+ "epoch": 1.4102564102564101,
+ "grad_norm": 29.134044647216797,
+ "learning_rate": 5.728937728937729e-05,
+ "loss": 0.8694,
+ "step": 385
+ },
+ {
+ "epoch": 1.4139194139194138,
+ "grad_norm": 37.44430923461914,
+ "learning_rate": 5.726495726495726e-05,
+ "loss": 0.9174,
+ "step": 386
+ },
+ {
+ "epoch": 1.4175824175824177,
+ "grad_norm": 36.84721755981445,
+ "learning_rate": 5.724053724053724e-05,
+ "loss": 0.3522,
+ "step": 387
+ },
+ {
+ "epoch": 1.4212454212454213,
+ "grad_norm": 44.15989685058594,
+ "learning_rate": 5.721611721611722e-05,
+ "loss": 1.4677,
+ "step": 388
+ },
+ {
+ "epoch": 1.424908424908425,
+ "grad_norm": 16.73012351989746,
+ "learning_rate": 5.719169719169719e-05,
+ "loss": 0.1621,
+ "step": 389
+ },
+ {
+ "epoch": 1.4285714285714286,
+ "grad_norm": 35.41815185546875,
+ "learning_rate": 5.716727716727717e-05,
+ "loss": 0.6702,
+ "step": 390
+ },
+ {
+ "epoch": 1.4322344322344323,
+ "grad_norm": 19.04936408996582,
+ "learning_rate": 5.714285714285714e-05,
+ "loss": 0.1845,
+ "step": 391
+ },
+ {
+ "epoch": 1.435897435897436,
+ "grad_norm": 22.89434242248535,
+ "learning_rate": 5.711843711843712e-05,
+ "loss": 0.5694,
+ "step": 392
+ },
+ {
+ "epoch": 1.4395604395604396,
+ "grad_norm": 22.125951766967773,
+ "learning_rate": 5.70940170940171e-05,
+ "loss": 0.821,
+ "step": 393
+ },
+ {
+ "epoch": 1.4432234432234432,
+ "grad_norm": 37.83376693725586,
+ "learning_rate": 5.706959706959707e-05,
+ "loss": 0.4658,
+ "step": 394
+ },
+ {
+ "epoch": 1.4468864468864469,
+ "grad_norm": 38.37764358520508,
+ "learning_rate": 5.704517704517705e-05,
+ "loss": 0.4146,
+ "step": 395
+ },
+ {
+ "epoch": 1.4505494505494505,
+ "grad_norm": 21.50092315673828,
+ "learning_rate": 5.702075702075703e-05,
+ "loss": 0.5044,
+ "step": 396
+ },
+ {
+ "epoch": 1.4542124542124542,
+ "grad_norm": 20.02173614501953,
+ "learning_rate": 5.699633699633699e-05,
+ "loss": 0.4955,
+ "step": 397
+ },
+ {
+ "epoch": 1.4578754578754578,
+ "grad_norm": 21.474336624145508,
+ "learning_rate": 5.697191697191697e-05,
+ "loss": 0.3818,
+ "step": 398
+ },
+ {
+ "epoch": 1.4615384615384617,
+ "grad_norm": 22.903839111328125,
+ "learning_rate": 5.694749694749695e-05,
+ "loss": 0.7603,
+ "step": 399
+ },
+ {
+ "epoch": 1.4652014652014653,
+ "grad_norm": 20.22893524169922,
+ "learning_rate": 5.692307692307692e-05,
+ "loss": 0.5612,
+ "step": 400
+ },
+ {
+ "epoch": 1.468864468864469,
+ "grad_norm": 32.34550857543945,
+ "learning_rate": 5.68986568986569e-05,
+ "loss": 0.4659,
+ "step": 401
+ },
+ {
+ "epoch": 1.4725274725274726,
+ "grad_norm": 49.979034423828125,
+ "learning_rate": 5.687423687423688e-05,
+ "loss": 0.6784,
+ "step": 402
+ },
+ {
+ "epoch": 1.4761904761904763,
+ "grad_norm": 79.79581451416016,
+ "learning_rate": 5.684981684981685e-05,
+ "loss": 0.9404,
+ "step": 403
+ },
+ {
+ "epoch": 1.47985347985348,
+ "grad_norm": 17.678560256958008,
+ "learning_rate": 5.682539682539683e-05,
+ "loss": 0.1675,
+ "step": 404
+ },
+ {
+ "epoch": 1.4835164835164836,
+ "grad_norm": 21.246519088745117,
+ "learning_rate": 5.6800976800976806e-05,
+ "loss": 0.2428,
+ "step": 405
+ },
+ {
+ "epoch": 1.4871794871794872,
+ "grad_norm": 34.815452575683594,
+ "learning_rate": 5.677655677655678e-05,
+ "loss": 0.3925,
+ "step": 406
+ },
+ {
+ "epoch": 1.4908424908424909,
+ "grad_norm": 73.8591079711914,
+ "learning_rate": 5.6752136752136756e-05,
+ "loss": 1.3163,
+ "step": 407
+ },
+ {
+ "epoch": 1.4945054945054945,
+ "grad_norm": 66.63922882080078,
+ "learning_rate": 5.6727716727716735e-05,
+ "loss": 0.9653,
+ "step": 408
+ },
+ {
+ "epoch": 1.4981684981684982,
+ "grad_norm": 52.39488220214844,
+ "learning_rate": 5.6703296703296706e-05,
+ "loss": 0.9322,
+ "step": 409
+ },
+ {
+ "epoch": 1.5018315018315018,
+ "grad_norm": 13.078998565673828,
+ "learning_rate": 5.667887667887668e-05,
+ "loss": 0.1168,
+ "step": 410
+ },
+ {
+ "epoch": 1.5054945054945055,
+ "grad_norm": 41.32448959350586,
+ "learning_rate": 5.6654456654456657e-05,
+ "loss": 0.9296,
+ "step": 411
+ },
+ {
+ "epoch": 1.5091575091575091,
+ "grad_norm": 26.448543548583984,
+ "learning_rate": 5.663003663003663e-05,
+ "loss": 0.5474,
+ "step": 412
+ },
+ {
+ "epoch": 1.5128205128205128,
+ "grad_norm": 29.58432960510254,
+ "learning_rate": 5.660561660561661e-05,
+ "loss": 0.6573,
+ "step": 413
+ },
+ {
+ "epoch": 1.5164835164835164,
+ "grad_norm": 28.568214416503906,
+ "learning_rate": 5.6581196581196585e-05,
+ "loss": 0.9223,
+ "step": 414
+ },
+ {
+ "epoch": 1.52014652014652,
+ "grad_norm": 31.92661476135254,
+ "learning_rate": 5.655677655677656e-05,
+ "loss": 1.0601,
+ "step": 415
+ },
+ {
+ "epoch": 1.5238095238095237,
+ "grad_norm": 31.934263229370117,
+ "learning_rate": 5.6532356532356535e-05,
+ "loss": 0.6288,
+ "step": 416
+ },
+ {
+ "epoch": 1.5274725274725274,
+ "grad_norm": 21.51350975036621,
+ "learning_rate": 5.650793650793651e-05,
+ "loss": 0.7378,
+ "step": 417
+ },
+ {
+ "epoch": 1.531135531135531,
+ "grad_norm": 19.010095596313477,
+ "learning_rate": 5.6483516483516485e-05,
+ "loss": 0.7792,
+ "step": 418
+ },
+ {
+ "epoch": 1.5347985347985347,
+ "grad_norm": 21.7001895904541,
+ "learning_rate": 5.6459096459096464e-05,
+ "loss": 0.7885,
+ "step": 419
+ },
+ {
+ "epoch": 1.5384615384615383,
+ "grad_norm": 21.400882720947266,
+ "learning_rate": 5.6434676434676436e-05,
+ "loss": 0.942,
+ "step": 420
+ },
+ {
+ "epoch": 1.542124542124542,
+ "grad_norm": 30.14664649963379,
+ "learning_rate": 5.6410256410256414e-05,
+ "loss": 0.7675,
+ "step": 421
+ },
+ {
+ "epoch": 1.5457875457875456,
+ "grad_norm": 33.25088882446289,
+ "learning_rate": 5.6385836385836386e-05,
+ "loss": 1.1349,
+ "step": 422
+ },
+ {
+ "epoch": 1.5494505494505495,
+ "grad_norm": 22.923208236694336,
+ "learning_rate": 5.636141636141636e-05,
+ "loss": 0.7145,
+ "step": 423
+ },
+ {
+ "epoch": 1.5531135531135531,
+ "grad_norm": 20.00519371032715,
+ "learning_rate": 5.6336996336996336e-05,
+ "loss": 0.5107,
+ "step": 424
+ },
+ {
+ "epoch": 1.5567765567765568,
+ "grad_norm": 21.95383071899414,
+ "learning_rate": 5.6312576312576314e-05,
+ "loss": 0.7836,
+ "step": 425
+ },
+ {
+ "epoch": 1.5604395604395604,
+ "grad_norm": 27.24031639099121,
+ "learning_rate": 5.6288156288156286e-05,
+ "loss": 0.4955,
+ "step": 426
+ },
+ {
+ "epoch": 1.564102564102564,
+ "grad_norm": 45.48428726196289,
+ "learning_rate": 5.6263736263736264e-05,
+ "loss": 1.016,
+ "step": 427
+ },
+ {
+ "epoch": 1.5677655677655677,
+ "grad_norm": 20.055965423583984,
+ "learning_rate": 5.623931623931624e-05,
+ "loss": 0.325,
+ "step": 428
+ },
+ {
+ "epoch": 1.5714285714285714,
+ "grad_norm": 22.020767211914062,
+ "learning_rate": 5.6214896214896215e-05,
+ "loss": 0.45,
+ "step": 429
+ },
+ {
+ "epoch": 1.575091575091575,
+ "grad_norm": 32.608741760253906,
+ "learning_rate": 5.619047619047619e-05,
+ "loss": 0.6561,
+ "step": 430
+ },
+ {
+ "epoch": 1.578754578754579,
+ "grad_norm": 38.14396667480469,
+ "learning_rate": 5.616605616605617e-05,
+ "loss": 0.6387,
+ "step": 431
+ },
+ {
+ "epoch": 1.5824175824175826,
+ "grad_norm": 26.266948699951172,
+ "learning_rate": 5.614163614163614e-05,
+ "loss": 0.5593,
+ "step": 432
+ },
+ {
+ "epoch": 1.5860805860805862,
+ "grad_norm": 16.37360954284668,
+ "learning_rate": 5.611721611721612e-05,
+ "loss": 0.1591,
+ "step": 433
+ },
+ {
+ "epoch": 1.5897435897435899,
+ "grad_norm": 21.9448299407959,
+ "learning_rate": 5.60927960927961e-05,
+ "loss": 0.2129,
+ "step": 434
+ },
+ {
+ "epoch": 1.5934065934065935,
+ "grad_norm": 30.096052169799805,
+ "learning_rate": 5.6068376068376065e-05,
+ "loss": 0.3384,
+ "step": 435
+ },
+ {
+ "epoch": 1.5970695970695972,
+ "grad_norm": 40.15864181518555,
+ "learning_rate": 5.604395604395604e-05,
+ "loss": 0.5181,
+ "step": 436
+ },
+ {
+ "epoch": 1.6007326007326008,
+ "grad_norm": 63.40933609008789,
+ "learning_rate": 5.601953601953602e-05,
+ "loss": 0.8834,
+ "step": 437
+ },
+ {
+ "epoch": 1.6043956043956045,
+ "grad_norm": 40.0787353515625,
+ "learning_rate": 5.5995115995115993e-05,
+ "loss": 0.437,
+ "step": 438
+ },
+ {
+ "epoch": 1.6080586080586081,
+ "grad_norm": 40.136863708496094,
+ "learning_rate": 5.597069597069597e-05,
+ "loss": 0.4834,
+ "step": 439
+ },
+ {
+ "epoch": 1.6117216117216118,
+ "grad_norm": 27.898317337036133,
+ "learning_rate": 5.594627594627595e-05,
+ "loss": 0.4862,
+ "step": 440
+ },
+ {
+ "epoch": 1.6153846153846154,
+ "grad_norm": 31.5762882232666,
+ "learning_rate": 5.592185592185592e-05,
+ "loss": 0.1878,
+ "step": 441
+ },
+ {
+ "epoch": 1.619047619047619,
+ "grad_norm": 88.90093994140625,
+ "learning_rate": 5.58974358974359e-05,
+ "loss": 1.3343,
+ "step": 442
+ },
+ {
+ "epoch": 1.6227106227106227,
+ "grad_norm": 57.7340202331543,
+ "learning_rate": 5.587301587301587e-05,
+ "loss": 0.3032,
+ "step": 443
+ },
+ {
+ "epoch": 1.6263736263736264,
+ "grad_norm": 57.28425979614258,
+ "learning_rate": 5.584859584859585e-05,
+ "loss": 1.3972,
+ "step": 444
+ },
+ {
+ "epoch": 1.63003663003663,
+ "grad_norm": 39.866302490234375,
+ "learning_rate": 5.582417582417583e-05,
+ "loss": 0.4026,
+ "step": 445
+ },
+ {
+ "epoch": 1.6336996336996337,
+ "grad_norm": 41.72932815551758,
+ "learning_rate": 5.57997557997558e-05,
+ "loss": 0.5407,
+ "step": 446
+ },
+ {
+ "epoch": 1.6373626373626373,
+ "grad_norm": 60.77634811401367,
+ "learning_rate": 5.577533577533578e-05,
+ "loss": 0.8581,
+ "step": 447
+ },
+ {
+ "epoch": 1.641025641025641,
+ "grad_norm": 28.382030487060547,
+ "learning_rate": 5.575091575091575e-05,
+ "loss": 0.3759,
+ "step": 448
+ },
+ {
+ "epoch": 1.6446886446886446,
+ "grad_norm": 62.1085205078125,
+ "learning_rate": 5.572649572649572e-05,
+ "loss": 1.0749,
+ "step": 449
+ },
+ {
+ "epoch": 1.6483516483516483,
+ "grad_norm": 41.8302001953125,
+ "learning_rate": 5.57020757020757e-05,
+ "loss": 0.5884,
+ "step": 450
+ },
+ {
+ "epoch": 1.652014652014652,
+ "grad_norm": 24.128931045532227,
+ "learning_rate": 5.567765567765568e-05,
+ "loss": 0.6113,
+ "step": 451
+ },
+ {
+ "epoch": 1.6556776556776556,
+ "grad_norm": 19.634384155273438,
+ "learning_rate": 5.565323565323565e-05,
+ "loss": 0.3902,
+ "step": 452
+ },
+ {
+ "epoch": 1.6593406593406592,
+ "grad_norm": 18.17875099182129,
+ "learning_rate": 5.562881562881563e-05,
+ "loss": 0.3137,
+ "step": 453
+ },
+ {
+ "epoch": 1.6630036630036629,
+ "grad_norm": 39.68446731567383,
+ "learning_rate": 5.560439560439561e-05,
+ "loss": 0.7587,
+ "step": 454
+ },
+ {
+ "epoch": 1.6666666666666665,
+ "grad_norm": 29.387836456298828,
+ "learning_rate": 5.557997557997558e-05,
+ "loss": 0.6397,
+ "step": 455
+ },
+ {
+ "epoch": 1.6703296703296702,
+ "grad_norm": 19.08424949645996,
+ "learning_rate": 5.555555555555556e-05,
+ "loss": 0.2484,
+ "step": 456
+ },
+ {
+ "epoch": 1.673992673992674,
+ "grad_norm": 36.07701873779297,
+ "learning_rate": 5.553113553113554e-05,
+ "loss": 0.8587,
+ "step": 457
+ },
+ {
+ "epoch": 1.6776556776556777,
+ "grad_norm": 52.062339782714844,
+ "learning_rate": 5.550671550671551e-05,
+ "loss": 1.6675,
+ "step": 458
+ },
+ {
+ "epoch": 1.6813186813186813,
+ "grad_norm": 45.415687561035156,
+ "learning_rate": 5.548229548229549e-05,
+ "loss": 1.653,
+ "step": 459
+ },
+ {
+ "epoch": 1.684981684981685,
+ "grad_norm": 31.457420349121094,
+ "learning_rate": 5.5457875457875465e-05,
+ "loss": 0.4578,
+ "step": 460
+ },
+ {
+ "epoch": 1.6886446886446886,
+ "grad_norm": 33.14665603637695,
+ "learning_rate": 5.543345543345543e-05,
+ "loss": 1.3327,
+ "step": 461
+ },
+ {
+ "epoch": 1.6923076923076923,
+ "grad_norm": 25.720529556274414,
+ "learning_rate": 5.540903540903541e-05,
+ "loss": 0.5,
+ "step": 462
+ },
+ {
+ "epoch": 1.695970695970696,
+ "grad_norm": 23.71514129638672,
+ "learning_rate": 5.538461538461539e-05,
+ "loss": 0.434,
+ "step": 463
+ },
+ {
+ "epoch": 1.6996336996336996,
+ "grad_norm": 45.231746673583984,
+ "learning_rate": 5.536019536019536e-05,
+ "loss": 0.9448,
+ "step": 464
+ },
+ {
+ "epoch": 1.7032967032967035,
+ "grad_norm": 17.44647789001465,
+ "learning_rate": 5.533577533577534e-05,
+ "loss": 0.3183,
+ "step": 465
+ },
+ {
+ "epoch": 1.7069597069597071,
+ "grad_norm": 18.627901077270508,
+ "learning_rate": 5.531135531135531e-05,
+ "loss": 0.4137,
+ "step": 466
+ },
+ {
+ "epoch": 1.7106227106227108,
+ "grad_norm": 45.57220458984375,
+ "learning_rate": 5.528693528693529e-05,
+ "loss": 1.0096,
+ "step": 467
+ },
+ {
+ "epoch": 1.7142857142857144,
+ "grad_norm": 27.329822540283203,
+ "learning_rate": 5.5262515262515266e-05,
+ "loss": 0.5416,
+ "step": 468
+ },
+ {
+ "epoch": 1.717948717948718,
+ "grad_norm": 46.70027160644531,
+ "learning_rate": 5.523809523809524e-05,
+ "loss": 0.983,
+ "step": 469
+ },
+ {
+ "epoch": 1.7216117216117217,
+ "grad_norm": 32.47868728637695,
+ "learning_rate": 5.5213675213675216e-05,
+ "loss": 1.5687,
+ "step": 470
+ },
+ {
+ "epoch": 1.7252747252747254,
+ "grad_norm": 16.49342155456543,
+ "learning_rate": 5.5189255189255194e-05,
+ "loss": 0.3101,
+ "step": 471
+ },
+ {
+ "epoch": 1.728937728937729,
+ "grad_norm": 26.58381462097168,
+ "learning_rate": 5.5164835164835166e-05,
+ "loss": 0.7027,
+ "step": 472
+ },
+ {
+ "epoch": 1.7326007326007327,
+ "grad_norm": 17.435213088989258,
+ "learning_rate": 5.5140415140415144e-05,
+ "loss": 0.3958,
+ "step": 473
+ },
+ {
+ "epoch": 1.7362637362637363,
+ "grad_norm": 19.37874412536621,
+ "learning_rate": 5.5115995115995116e-05,
+ "loss": 0.3979,
+ "step": 474
+ },
+ {
+ "epoch": 1.73992673992674,
+ "grad_norm": 16.509248733520508,
+ "learning_rate": 5.509157509157509e-05,
+ "loss": 0.5121,
+ "step": 475
+ },
+ {
+ "epoch": 1.7435897435897436,
+ "grad_norm": 9.653852462768555,
+ "learning_rate": 5.5067155067155066e-05,
+ "loss": 0.1386,
+ "step": 476
+ },
+ {
+ "epoch": 1.7472527472527473,
+ "grad_norm": 26.486963272094727,
+ "learning_rate": 5.5042735042735045e-05,
+ "loss": 1.0307,
+ "step": 477
+ },
+ {
+ "epoch": 1.750915750915751,
+ "grad_norm": 17.766828536987305,
+ "learning_rate": 5.5018315018315016e-05,
+ "loss": 0.278,
+ "step": 478
+ },
+ {
+ "epoch": 1.7545787545787546,
+ "grad_norm": 12.930633544921875,
+ "learning_rate": 5.4993894993894995e-05,
+ "loss": 0.1487,
+ "step": 479
+ },
+ {
+ "epoch": 1.7582417582417582,
+ "grad_norm": 44.64267349243164,
+ "learning_rate": 5.496947496947497e-05,
+ "loss": 0.7036,
+ "step": 480
+ },
+ {
+ "epoch": 1.7619047619047619,
+ "grad_norm": 17.474651336669922,
+ "learning_rate": 5.4945054945054945e-05,
+ "loss": 0.1666,
+ "step": 481
+ },
+ {
+ "epoch": 1.7655677655677655,
+ "grad_norm": 48.3519401550293,
+ "learning_rate": 5.4920634920634923e-05,
+ "loss": 0.6157,
+ "step": 482
+ },
+ {
+ "epoch": 1.7692307692307692,
+ "grad_norm": 18.429521560668945,
+ "learning_rate": 5.48962148962149e-05,
+ "loss": 0.2588,
+ "step": 483
+ },
+ {
+ "epoch": 1.7728937728937728,
+ "grad_norm": 66.73760986328125,
+ "learning_rate": 5.4871794871794874e-05,
+ "loss": 0.654,
+ "step": 484
+ },
+ {
+ "epoch": 1.7765567765567765,
+ "grad_norm": 53.831539154052734,
+ "learning_rate": 5.484737484737485e-05,
+ "loss": 0.7538,
+ "step": 485
+ },
+ {
+ "epoch": 1.7802197802197801,
+ "grad_norm": 52.023895263671875,
+ "learning_rate": 5.482295482295483e-05,
+ "loss": 1.6623,
+ "step": 486
+ },
+ {
+ "epoch": 1.7838827838827838,
+ "grad_norm": 38.4475212097168,
+ "learning_rate": 5.4798534798534795e-05,
+ "loss": 0.5079,
+ "step": 487
+ },
+ {
+ "epoch": 1.7875457875457874,
+ "grad_norm": 25.642650604248047,
+ "learning_rate": 5.4774114774114774e-05,
+ "loss": 0.3825,
+ "step": 488
+ },
+ {
+ "epoch": 1.791208791208791,
+ "grad_norm": 57.916900634765625,
+ "learning_rate": 5.474969474969475e-05,
+ "loss": 0.9583,
+ "step": 489
+ },
+ {
+ "epoch": 1.7948717948717947,
+ "grad_norm": 39.23340606689453,
+ "learning_rate": 5.4725274725274724e-05,
+ "loss": 0.4724,
+ "step": 490
+ },
+ {
+ "epoch": 1.7985347985347986,
+ "grad_norm": 24.188661575317383,
+ "learning_rate": 5.47008547008547e-05,
+ "loss": 0.4471,
+ "step": 491
+ },
+ {
+ "epoch": 1.8021978021978022,
+ "grad_norm": 68.73822021484375,
+ "learning_rate": 5.4676434676434674e-05,
+ "loss": 0.6618,
+ "step": 492
+ },
+ {
+ "epoch": 1.8058608058608059,
+ "grad_norm": 26.382184982299805,
+ "learning_rate": 5.465201465201465e-05,
+ "loss": 0.5835,
+ "step": 493
+ },
+ {
+ "epoch": 1.8095238095238095,
+ "grad_norm": 31.758886337280273,
+ "learning_rate": 5.462759462759463e-05,
+ "loss": 0.622,
+ "step": 494
+ },
+ {
+ "epoch": 1.8131868131868132,
+ "grad_norm": 26.657405853271484,
+ "learning_rate": 5.46031746031746e-05,
+ "loss": 0.6003,
+ "step": 495
+ },
+ {
+ "epoch": 1.8168498168498168,
+ "grad_norm": 31.248491287231445,
+ "learning_rate": 5.457875457875458e-05,
+ "loss": 0.4929,
+ "step": 496
+ },
+ {
+ "epoch": 1.8205128205128205,
+ "grad_norm": 53.82766342163086,
+ "learning_rate": 5.455433455433456e-05,
+ "loss": 2.0716,
+ "step": 497
+ },
+ {
+ "epoch": 1.8241758241758241,
+ "grad_norm": 46.39777374267578,
+ "learning_rate": 5.452991452991453e-05,
+ "loss": 1.6767,
+ "step": 498
+ },
+ {
+ "epoch": 1.8278388278388278,
+ "grad_norm": 39.58620071411133,
+ "learning_rate": 5.450549450549451e-05,
+ "loss": 0.8274,
+ "step": 499
+ },
+ {
+ "epoch": 1.8315018315018317,
+ "grad_norm": 29.395286560058594,
+ "learning_rate": 5.448107448107448e-05,
+ "loss": 1.1441,
+ "step": 500
+ },
+ {
+ "epoch": 1.8351648351648353,
+ "grad_norm": 26.250751495361328,
+ "learning_rate": 5.445665445665445e-05,
+ "loss": 0.7496,
+ "step": 501
+ },
+ {
+ "epoch": 1.838827838827839,
+ "grad_norm": 19.820999145507812,
+ "learning_rate": 5.443223443223443e-05,
+ "loss": 0.4367,
+ "step": 502
+ },
+ {
+ "epoch": 1.8424908424908426,
+ "grad_norm": 25.09316062927246,
+ "learning_rate": 5.440781440781441e-05,
+ "loss": 0.8584,
+ "step": 503
+ },
+ {
+ "epoch": 1.8461538461538463,
+ "grad_norm": 17.808509826660156,
+ "learning_rate": 5.438339438339438e-05,
+ "loss": 0.3869,
+ "step": 504
+ },
+ {
+ "epoch": 1.84981684981685,
+ "grad_norm": 28.342119216918945,
+ "learning_rate": 5.435897435897436e-05,
+ "loss": 0.8881,
+ "step": 505
+ },
+ {
+ "epoch": 1.8534798534798536,
+ "grad_norm": 33.80287551879883,
+ "learning_rate": 5.433455433455434e-05,
+ "loss": 1.2911,
+ "step": 506
+ },
+ {
+ "epoch": 1.8571428571428572,
+ "grad_norm": 55.428138732910156,
+ "learning_rate": 5.431013431013431e-05,
+ "loss": 0.8934,
+ "step": 507
+ },
+ {
+ "epoch": 1.8608058608058609,
+ "grad_norm": 27.962610244750977,
+ "learning_rate": 5.428571428571429e-05,
+ "loss": 0.662,
+ "step": 508
+ },
+ {
+ "epoch": 1.8644688644688645,
+ "grad_norm": 62.84252166748047,
+ "learning_rate": 5.426129426129427e-05,
+ "loss": 1.9216,
+ "step": 509
+ },
+ {
+ "epoch": 1.8681318681318682,
+ "grad_norm": 24.26439666748047,
+ "learning_rate": 5.423687423687424e-05,
+ "loss": 0.2164,
+ "step": 510
+ },
+ {
+ "epoch": 1.8717948717948718,
+ "grad_norm": 50.95674133300781,
+ "learning_rate": 5.421245421245422e-05,
+ "loss": 0.7023,
+ "step": 511
+ },
+ {
+ "epoch": 1.8754578754578755,
+ "grad_norm": 41.17847442626953,
+ "learning_rate": 5.418803418803419e-05,
+ "loss": 1.1081,
+ "step": 512
+ },
+ {
+ "epoch": 1.879120879120879,
+ "grad_norm": 28.701988220214844,
+ "learning_rate": 5.416361416361416e-05,
+ "loss": 0.6519,
+ "step": 513
+ },
+ {
+ "epoch": 1.8827838827838828,
+ "grad_norm": 48.42552947998047,
+ "learning_rate": 5.413919413919414e-05,
+ "loss": 1.5215,
+ "step": 514
+ },
+ {
+ "epoch": 1.8864468864468864,
+ "grad_norm": 19.71268653869629,
+ "learning_rate": 5.411477411477412e-05,
+ "loss": 0.4731,
+ "step": 515
+ },
+ {
+ "epoch": 1.89010989010989,
+ "grad_norm": 68.88224792480469,
+ "learning_rate": 5.409035409035409e-05,
+ "loss": 3.0071,
+ "step": 516
+ },
+ {
+ "epoch": 1.8937728937728937,
+ "grad_norm": 34.33188247680664,
+ "learning_rate": 5.406593406593407e-05,
+ "loss": 0.7014,
+ "step": 517
+ },
+ {
+ "epoch": 1.8974358974358974,
+ "grad_norm": 18.214942932128906,
+ "learning_rate": 5.404151404151404e-05,
+ "loss": 0.2362,
+ "step": 518
+ },
+ {
+ "epoch": 1.901098901098901,
+ "grad_norm": 31.553678512573242,
+ "learning_rate": 5.401709401709402e-05,
+ "loss": 0.5839,
+ "step": 519
+ },
+ {
+ "epoch": 1.9047619047619047,
+ "grad_norm": 15.681426048278809,
+ "learning_rate": 5.3992673992673996e-05,
+ "loss": 0.6039,
+ "step": 520
+ },
+ {
+ "epoch": 1.9084249084249083,
+ "grad_norm": 18.462688446044922,
+ "learning_rate": 5.396825396825397e-05,
+ "loss": 0.5773,
+ "step": 521
+ },
+ {
+ "epoch": 1.912087912087912,
+ "grad_norm": 10.23849105834961,
+ "learning_rate": 5.3943833943833946e-05,
+ "loss": 0.3801,
+ "step": 522
+ },
+ {
+ "epoch": 1.9157509157509156,
+ "grad_norm": 35.680973052978516,
+ "learning_rate": 5.3919413919413925e-05,
+ "loss": 1.2559,
+ "step": 523
+ },
+ {
+ "epoch": 1.9194139194139193,
+ "grad_norm": 23.97362518310547,
+ "learning_rate": 5.3894993894993897e-05,
+ "loss": 0.4112,
+ "step": 524
+ },
+ {
+ "epoch": 1.9230769230769231,
+ "grad_norm": 25.785356521606445,
+ "learning_rate": 5.387057387057387e-05,
+ "loss": 0.8993,
+ "step": 525
+ },
+ {
+ "epoch": 1.9267399267399268,
+ "grad_norm": 25.246868133544922,
+ "learning_rate": 5.384615384615385e-05,
+ "loss": 0.6534,
+ "step": 526
+ },
+ {
+ "epoch": 1.9304029304029304,
+ "grad_norm": 29.850788116455078,
+ "learning_rate": 5.382173382173382e-05,
+ "loss": 0.52,
+ "step": 527
+ },
+ {
+ "epoch": 1.934065934065934,
+ "grad_norm": 20.702608108520508,
+ "learning_rate": 5.37973137973138e-05,
+ "loss": 0.4093,
+ "step": 528
+ },
+ {
+ "epoch": 1.9377289377289377,
+ "grad_norm": 36.39994812011719,
+ "learning_rate": 5.3772893772893775e-05,
+ "loss": 1.275,
+ "step": 529
+ },
+ {
+ "epoch": 1.9413919413919414,
+ "grad_norm": 27.56822395324707,
+ "learning_rate": 5.374847374847375e-05,
+ "loss": 0.6773,
+ "step": 530
+ },
+ {
+ "epoch": 1.945054945054945,
+ "grad_norm": 26.07769012451172,
+ "learning_rate": 5.3724053724053725e-05,
+ "loss": 0.5373,
+ "step": 531
+ },
+ {
+ "epoch": 1.9487179487179487,
+ "grad_norm": 48.47615051269531,
+ "learning_rate": 5.3699633699633704e-05,
+ "loss": 1.1931,
+ "step": 532
+ },
+ {
+ "epoch": 1.9523809523809523,
+ "grad_norm": 24.416805267333984,
+ "learning_rate": 5.3675213675213675e-05,
+ "loss": 0.4523,
+ "step": 533
+ },
+ {
+ "epoch": 1.9560439560439562,
+ "grad_norm": 56.8088264465332,
+ "learning_rate": 5.3650793650793654e-05,
+ "loss": 1.8992,
+ "step": 534
+ },
+ {
+ "epoch": 1.9597069597069599,
+ "grad_norm": 36.805912017822266,
+ "learning_rate": 5.362637362637363e-05,
+ "loss": 1.0743,
+ "step": 535
+ },
+ {
+ "epoch": 1.9633699633699635,
+ "grad_norm": 17.375244140625,
+ "learning_rate": 5.3601953601953604e-05,
+ "loss": 0.3546,
+ "step": 536
+ },
+ {
+ "epoch": 1.9670329670329672,
+ "grad_norm": 35.297767639160156,
+ "learning_rate": 5.357753357753358e-05,
+ "loss": 1.4903,
+ "step": 537
+ },
+ {
+ "epoch": 1.9706959706959708,
+ "grad_norm": 38.64927673339844,
+ "learning_rate": 5.3553113553113554e-05,
+ "loss": 0.9346,
+ "step": 538
+ },
+ {
+ "epoch": 1.9743589743589745,
+ "grad_norm": 23.494552612304688,
+ "learning_rate": 5.3528693528693526e-05,
+ "loss": 0.3677,
+ "step": 539
+ },
+ {
+ "epoch": 1.978021978021978,
+ "grad_norm": 21.8272647857666,
+ "learning_rate": 5.3504273504273504e-05,
+ "loss": 0.591,
+ "step": 540
+ },
+ {
+ "epoch": 1.9816849816849818,
+ "grad_norm": 15.60590934753418,
+ "learning_rate": 5.347985347985348e-05,
+ "loss": 0.3129,
+ "step": 541
+ },
+ {
+ "epoch": 1.9853479853479854,
+ "grad_norm": 23.846555709838867,
+ "learning_rate": 5.3455433455433454e-05,
+ "loss": 0.6108,
+ "step": 542
+ },
+ {
+ "epoch": 1.989010989010989,
+ "grad_norm": 21.743024826049805,
+ "learning_rate": 5.343101343101343e-05,
+ "loss": 1.0541,
+ "step": 543
+ },
+ {
+ "epoch": 1.9926739926739927,
+ "grad_norm": 29.806121826171875,
+ "learning_rate": 5.3406593406593405e-05,
+ "loss": 0.6088,
+ "step": 544
+ },
+ {
+ "epoch": 1.9963369963369964,
+ "grad_norm": 26.778568267822266,
+ "learning_rate": 5.338217338217338e-05,
+ "loss": 0.5842,
+ "step": 545
+ },
+ {
+ "epoch": 2.0,
+ "grad_norm": 23.356237411499023,
+ "learning_rate": 5.335775335775336e-05,
+ "loss": 0.4591,
+ "step": 546
+ },
+ {
+ "epoch": 2.0036630036630036,
+ "grad_norm": 17.303443908691406,
+ "learning_rate": 5.333333333333333e-05,
+ "loss": 0.3432,
+ "step": 547
+ },
+ {
+ "epoch": 2.0073260073260073,
+ "grad_norm": 27.082172393798828,
+ "learning_rate": 5.330891330891331e-05,
+ "loss": 0.5156,
+ "step": 548
+ },
+ {
+ "epoch": 2.010989010989011,
+ "grad_norm": 26.520530700683594,
+ "learning_rate": 5.328449328449329e-05,
+ "loss": 0.3989,
+ "step": 549
+ },
+ {
+ "epoch": 2.0146520146520146,
+ "grad_norm": 23.737272262573242,
+ "learning_rate": 5.326007326007326e-05,
+ "loss": 0.5484,
+ "step": 550
+ },
+ {
+ "epoch": 2.0183150183150182,
+ "grad_norm": 24.222341537475586,
+ "learning_rate": 5.3235653235653233e-05,
+ "loss": 0.5365,
+ "step": 551
+ },
+ {
+ "epoch": 2.021978021978022,
+ "grad_norm": 29.081924438476562,
+ "learning_rate": 5.321123321123321e-05,
+ "loss": 0.6694,
+ "step": 552
+ },
+ {
+ "epoch": 2.0256410256410255,
+ "grad_norm": 32.419551849365234,
+ "learning_rate": 5.3186813186813184e-05,
+ "loss": 0.7003,
+ "step": 553
+ },
+ {
+ "epoch": 2.029304029304029,
+ "grad_norm": 42.403709411621094,
+ "learning_rate": 5.316239316239316e-05,
+ "loss": 1.5474,
+ "step": 554
+ },
+ {
+ "epoch": 2.032967032967033,
+ "grad_norm": 17.615140914916992,
+ "learning_rate": 5.313797313797314e-05,
+ "loss": 0.588,
+ "step": 555
+ },
+ {
+ "epoch": 2.0366300366300365,
+ "grad_norm": 14.864067077636719,
+ "learning_rate": 5.311355311355311e-05,
+ "loss": 0.1613,
+ "step": 556
+ },
+ {
+ "epoch": 2.04029304029304,
+ "grad_norm": 20.189815521240234,
+ "learning_rate": 5.308913308913309e-05,
+ "loss": 0.4281,
+ "step": 557
+ },
+ {
+ "epoch": 2.043956043956044,
+ "grad_norm": 28.350017547607422,
+ "learning_rate": 5.306471306471307e-05,
+ "loss": 0.6614,
+ "step": 558
+ },
+ {
+ "epoch": 2.0476190476190474,
+ "grad_norm": 19.987825393676758,
+ "learning_rate": 5.304029304029304e-05,
+ "loss": 0.6906,
+ "step": 559
+ },
+ {
+ "epoch": 2.051282051282051,
+ "grad_norm": 18.6667537689209,
+ "learning_rate": 5.301587301587302e-05,
+ "loss": 0.387,
+ "step": 560
+ },
+ {
+ "epoch": 2.0549450549450547,
+ "grad_norm": 20.930652618408203,
+ "learning_rate": 5.2991452991453e-05,
+ "loss": 0.7157,
+ "step": 561
+ },
+ {
+ "epoch": 2.0586080586080584,
+ "grad_norm": 22.05647087097168,
+ "learning_rate": 5.296703296703297e-05,
+ "loss": 0.3256,
+ "step": 562
+ },
+ {
+ "epoch": 2.062271062271062,
+ "grad_norm": 32.66161346435547,
+ "learning_rate": 5.294261294261295e-05,
+ "loss": 1.3013,
+ "step": 563
+ },
+ {
+ "epoch": 2.065934065934066,
+ "grad_norm": 37.43238067626953,
+ "learning_rate": 5.291819291819292e-05,
+ "loss": 0.186,
+ "step": 564
+ },
+ {
+ "epoch": 2.06959706959707,
+ "grad_norm": 32.39999008178711,
+ "learning_rate": 5.289377289377289e-05,
+ "loss": 0.8047,
+ "step": 565
+ },
+ {
+ "epoch": 2.0732600732600734,
+ "grad_norm": 29.727481842041016,
+ "learning_rate": 5.286935286935287e-05,
+ "loss": 0.662,
+ "step": 566
+ },
+ {
+ "epoch": 2.076923076923077,
+ "grad_norm": 16.536264419555664,
+ "learning_rate": 5.284493284493285e-05,
+ "loss": 0.4,
+ "step": 567
+ },
+ {
+ "epoch": 2.0805860805860807,
+ "grad_norm": 23.41500473022461,
+ "learning_rate": 5.282051282051282e-05,
+ "loss": 0.4945,
+ "step": 568
+ },
+ {
+ "epoch": 2.0842490842490844,
+ "grad_norm": 48.842864990234375,
+ "learning_rate": 5.27960927960928e-05,
+ "loss": 0.7584,
+ "step": 569
+ },
+ {
+ "epoch": 2.087912087912088,
+ "grad_norm": 60.06027603149414,
+ "learning_rate": 5.277167277167277e-05,
+ "loss": 0.7179,
+ "step": 570
+ },
+ {
+ "epoch": 2.0915750915750917,
+ "grad_norm": 59.2591552734375,
+ "learning_rate": 5.274725274725275e-05,
+ "loss": 0.4883,
+ "step": 571
+ },
+ {
+ "epoch": 2.0952380952380953,
+ "grad_norm": 14.527932167053223,
+ "learning_rate": 5.272283272283273e-05,
+ "loss": 0.2811,
+ "step": 572
+ },
+ {
+ "epoch": 2.098901098901099,
+ "grad_norm": 16.2915096282959,
+ "learning_rate": 5.26984126984127e-05,
+ "loss": 0.2524,
+ "step": 573
+ },
+ {
+ "epoch": 2.1025641025641026,
+ "grad_norm": 28.938081741333008,
+ "learning_rate": 5.267399267399268e-05,
+ "loss": 0.5138,
+ "step": 574
+ },
+ {
+ "epoch": 2.1062271062271063,
+ "grad_norm": 27.541440963745117,
+ "learning_rate": 5.2649572649572655e-05,
+ "loss": 0.278,
+ "step": 575
+ },
+ {
+ "epoch": 2.10989010989011,
+ "grad_norm": 23.179025650024414,
+ "learning_rate": 5.262515262515263e-05,
+ "loss": 0.1881,
+ "step": 576
+ },
+ {
+ "epoch": 2.1135531135531136,
+ "grad_norm": 42.55375671386719,
+ "learning_rate": 5.26007326007326e-05,
+ "loss": 0.7882,
+ "step": 577
+ },
+ {
+ "epoch": 2.1172161172161172,
+ "grad_norm": 8.902749061584473,
+ "learning_rate": 5.257631257631258e-05,
+ "loss": 0.0611,
+ "step": 578
+ },
+ {
+ "epoch": 2.120879120879121,
+ "grad_norm": 19.483346939086914,
+ "learning_rate": 5.255189255189255e-05,
+ "loss": 0.0978,
+ "step": 579
+ },
+ {
+ "epoch": 2.1245421245421245,
+ "grad_norm": 13.898221969604492,
+ "learning_rate": 5.252747252747253e-05,
+ "loss": 0.0797,
+ "step": 580
+ },
+ {
+ "epoch": 2.128205128205128,
+ "grad_norm": 53.42538833618164,
+ "learning_rate": 5.2503052503052506e-05,
+ "loss": 0.9066,
+ "step": 581
+ },
+ {
+ "epoch": 2.131868131868132,
+ "grad_norm": 38.467891693115234,
+ "learning_rate": 5.247863247863248e-05,
+ "loss": 0.3272,
+ "step": 582
+ },
+ {
+ "epoch": 2.1355311355311355,
+ "grad_norm": 26.421035766601562,
+ "learning_rate": 5.2454212454212456e-05,
+ "loss": 0.6537,
+ "step": 583
+ },
+ {
+ "epoch": 2.139194139194139,
+ "grad_norm": 32.80412292480469,
+ "learning_rate": 5.2429792429792434e-05,
+ "loss": 1.1225,
+ "step": 584
+ },
+ {
+ "epoch": 2.142857142857143,
+ "grad_norm": 26.87016487121582,
+ "learning_rate": 5.2405372405372406e-05,
+ "loss": 0.5749,
+ "step": 585
+ },
+ {
+ "epoch": 2.1465201465201464,
+ "grad_norm": 34.75699234008789,
+ "learning_rate": 5.2380952380952384e-05,
+ "loss": 0.6926,
+ "step": 586
+ },
+ {
+ "epoch": 2.15018315018315,
+ "grad_norm": 61.76310348510742,
+ "learning_rate": 5.235653235653236e-05,
+ "loss": 0.9029,
+ "step": 587
+ },
+ {
+ "epoch": 2.1538461538461537,
+ "grad_norm": 40.86505126953125,
+ "learning_rate": 5.2332112332112335e-05,
+ "loss": 0.5169,
+ "step": 588
+ },
+ {
+ "epoch": 2.1575091575091574,
+ "grad_norm": 16.05042839050293,
+ "learning_rate": 5.230769230769231e-05,
+ "loss": 0.5211,
+ "step": 589
+ },
+ {
+ "epoch": 2.161172161172161,
+ "grad_norm": 19.56302261352539,
+ "learning_rate": 5.2283272283272285e-05,
+ "loss": 0.5737,
+ "step": 590
+ },
+ {
+ "epoch": 2.1648351648351647,
+ "grad_norm": 22.311508178710938,
+ "learning_rate": 5.2258852258852256e-05,
+ "loss": 0.4223,
+ "step": 591
+ },
+ {
+ "epoch": 2.1684981684981683,
+ "grad_norm": 21.059213638305664,
+ "learning_rate": 5.2234432234432235e-05,
+ "loss": 0.2285,
+ "step": 592
+ },
+ {
+ "epoch": 2.172161172161172,
+ "grad_norm": 28.82351303100586,
+ "learning_rate": 5.221001221001221e-05,
+ "loss": 0.8438,
+ "step": 593
+ },
+ {
+ "epoch": 2.1758241758241756,
+ "grad_norm": 14.425333023071289,
+ "learning_rate": 5.2185592185592185e-05,
+ "loss": 0.1765,
+ "step": 594
+ },
+ {
+ "epoch": 2.1794871794871793,
+ "grad_norm": 16.967479705810547,
+ "learning_rate": 5.2161172161172163e-05,
+ "loss": 0.2465,
+ "step": 595
+ },
+ {
+ "epoch": 2.183150183150183,
+ "grad_norm": 40.79065704345703,
+ "learning_rate": 5.2136752136752135e-05,
+ "loss": 0.6077,
+ "step": 596
+ },
+ {
+ "epoch": 2.186813186813187,
+ "grad_norm": 22.434715270996094,
+ "learning_rate": 5.2112332112332114e-05,
+ "loss": 0.3748,
+ "step": 597
+ },
+ {
+ "epoch": 2.1904761904761907,
+ "grad_norm": 32.18471908569336,
+ "learning_rate": 5.208791208791209e-05,
+ "loss": 0.5163,
+ "step": 598
+ },
+ {
+ "epoch": 2.1941391941391943,
+ "grad_norm": 20.43740463256836,
+ "learning_rate": 5.2063492063492064e-05,
+ "loss": 0.4116,
+ "step": 599
+ },
+ {
+ "epoch": 2.197802197802198,
+ "grad_norm": 6.528069496154785,
+ "learning_rate": 5.203907203907204e-05,
+ "loss": 0.065,
+ "step": 600
+ },
+ {
+ "epoch": 2.2014652014652016,
+ "grad_norm": 35.0635871887207,
+ "learning_rate": 5.201465201465202e-05,
+ "loss": 1.2288,
+ "step": 601
+ },
+ {
+ "epoch": 2.2051282051282053,
+ "grad_norm": 23.499767303466797,
+ "learning_rate": 5.199023199023199e-05,
+ "loss": 0.49,
+ "step": 602
+ },
+ {
+ "epoch": 2.208791208791209,
+ "grad_norm": 20.234952926635742,
+ "learning_rate": 5.1965811965811964e-05,
+ "loss": 0.231,
+ "step": 603
+ },
+ {
+ "epoch": 2.2124542124542126,
+ "grad_norm": 9.268828392028809,
+ "learning_rate": 5.194139194139194e-05,
+ "loss": 0.0732,
+ "step": 604
+ },
+ {
+ "epoch": 2.2161172161172162,
+ "grad_norm": 52.60474395751953,
+ "learning_rate": 5.1916971916971914e-05,
+ "loss": 0.8766,
+ "step": 605
+ },
+ {
+ "epoch": 2.21978021978022,
+ "grad_norm": 41.86642074584961,
+ "learning_rate": 5.189255189255189e-05,
+ "loss": 0.4743,
+ "step": 606
+ },
+ {
+ "epoch": 2.2234432234432235,
+ "grad_norm": 30.304580688476562,
+ "learning_rate": 5.186813186813187e-05,
+ "loss": 0.4412,
+ "step": 607
+ },
+ {
+ "epoch": 2.227106227106227,
+ "grad_norm": 27.26057243347168,
+ "learning_rate": 5.184371184371184e-05,
+ "loss": 0.3496,
+ "step": 608
+ },
+ {
+ "epoch": 2.230769230769231,
+ "grad_norm": 40.55131149291992,
+ "learning_rate": 5.181929181929182e-05,
+ "loss": 0.7097,
+ "step": 609
+ },
+ {
+ "epoch": 2.2344322344322345,
+ "grad_norm": 61.97871017456055,
+ "learning_rate": 5.17948717948718e-05,
+ "loss": 1.3686,
+ "step": 610
+ },
+ {
+ "epoch": 2.238095238095238,
+ "grad_norm": 38.211700439453125,
+ "learning_rate": 5.177045177045177e-05,
+ "loss": 0.565,
+ "step": 611
+ },
+ {
+ "epoch": 2.241758241758242,
+ "grad_norm": 20.10716438293457,
+ "learning_rate": 5.174603174603175e-05,
+ "loss": 0.3468,
+ "step": 612
+ },
+ {
+ "epoch": 2.2454212454212454,
+ "grad_norm": 23.96891975402832,
+ "learning_rate": 5.172161172161173e-05,
+ "loss": 0.2295,
+ "step": 613
+ },
+ {
+ "epoch": 2.249084249084249,
+ "grad_norm": 10.14421272277832,
+ "learning_rate": 5.16971916971917e-05,
+ "loss": 0.0943,
+ "step": 614
+ },
+ {
+ "epoch": 2.2527472527472527,
+ "grad_norm": 15.786056518554688,
+ "learning_rate": 5.167277167277167e-05,
+ "loss": 0.1213,
+ "step": 615
+ },
+ {
+ "epoch": 2.2564102564102564,
+ "grad_norm": 20.907663345336914,
+ "learning_rate": 5.164835164835165e-05,
+ "loss": 0.235,
+ "step": 616
+ },
+ {
+ "epoch": 2.26007326007326,
+ "grad_norm": 32.149600982666016,
+ "learning_rate": 5.162393162393162e-05,
+ "loss": 0.4807,
+ "step": 617
+ },
+ {
+ "epoch": 2.2637362637362637,
+ "grad_norm": 33.965518951416016,
+ "learning_rate": 5.15995115995116e-05,
+ "loss": 0.4517,
+ "step": 618
+ },
+ {
+ "epoch": 2.2673992673992673,
+ "grad_norm": 49.98363494873047,
+ "learning_rate": 5.157509157509158e-05,
+ "loss": 0.6434,
+ "step": 619
+ },
+ {
+ "epoch": 2.271062271062271,
+ "grad_norm": 14.035831451416016,
+ "learning_rate": 5.155067155067155e-05,
+ "loss": 0.1117,
+ "step": 620
+ },
+ {
+ "epoch": 2.2747252747252746,
+ "grad_norm": 28.84484100341797,
+ "learning_rate": 5.152625152625153e-05,
+ "loss": 0.8002,
+ "step": 621
+ },
+ {
+ "epoch": 2.2783882783882783,
+ "grad_norm": 41.59181594848633,
+ "learning_rate": 5.15018315018315e-05,
+ "loss": 0.4465,
+ "step": 622
+ },
+ {
+ "epoch": 2.282051282051282,
+ "grad_norm": 33.10573196411133,
+ "learning_rate": 5.147741147741148e-05,
+ "loss": 0.5795,
+ "step": 623
+ },
+ {
+ "epoch": 2.2857142857142856,
+ "grad_norm": 34.79928970336914,
+ "learning_rate": 5.145299145299146e-05,
+ "loss": 0.3135,
+ "step": 624
+ },
+ {
+ "epoch": 2.2893772893772892,
+ "grad_norm": 18.095544815063477,
+ "learning_rate": 5.142857142857143e-05,
+ "loss": 0.0961,
+ "step": 625
+ },
+ {
+ "epoch": 2.293040293040293,
+ "grad_norm": 16.55453872680664,
+ "learning_rate": 5.140415140415141e-05,
+ "loss": 0.0868,
+ "step": 626
+ },
+ {
+ "epoch": 2.2967032967032965,
+ "grad_norm": 42.18946075439453,
+ "learning_rate": 5.1379731379731386e-05,
+ "loss": 0.8892,
+ "step": 627
+ },
+ {
+ "epoch": 2.3003663003663,
+ "grad_norm": 54.753448486328125,
+ "learning_rate": 5.135531135531135e-05,
+ "loss": 0.833,
+ "step": 628
+ },
+ {
+ "epoch": 2.304029304029304,
+ "grad_norm": 27.723228454589844,
+ "learning_rate": 5.133089133089133e-05,
+ "loss": 0.2744,
+ "step": 629
+ },
+ {
+ "epoch": 2.3076923076923075,
+ "grad_norm": 28.53034019470215,
+ "learning_rate": 5.130647130647131e-05,
+ "loss": 0.1696,
+ "step": 630
+ },
+ {
+ "epoch": 2.311355311355311,
+ "grad_norm": 65.4127426147461,
+ "learning_rate": 5.128205128205128e-05,
+ "loss": 0.9019,
+ "step": 631
+ },
+ {
+ "epoch": 2.315018315018315,
+ "grad_norm": 22.794870376586914,
+ "learning_rate": 5.125763125763126e-05,
+ "loss": 0.1987,
+ "step": 632
+ },
+ {
+ "epoch": 2.3186813186813184,
+ "grad_norm": 29.870113372802734,
+ "learning_rate": 5.1233211233211236e-05,
+ "loss": 0.4816,
+ "step": 633
+ },
+ {
+ "epoch": 2.3223443223443225,
+ "grad_norm": 38.91164779663086,
+ "learning_rate": 5.120879120879121e-05,
+ "loss": 0.7424,
+ "step": 634
+ },
+ {
+ "epoch": 2.326007326007326,
+ "grad_norm": 36.57811737060547,
+ "learning_rate": 5.1184371184371186e-05,
+ "loss": 1.1365,
+ "step": 635
+ },
+ {
+ "epoch": 2.32967032967033,
+ "grad_norm": 31.59128189086914,
+ "learning_rate": 5.1159951159951165e-05,
+ "loss": 0.6167,
+ "step": 636
+ },
+ {
+ "epoch": 2.3333333333333335,
+ "grad_norm": 25.956003189086914,
+ "learning_rate": 5.1135531135531136e-05,
+ "loss": 0.8808,
+ "step": 637
+ },
+ {
+ "epoch": 2.336996336996337,
+ "grad_norm": 38.18582534790039,
+ "learning_rate": 5.1111111111111115e-05,
+ "loss": 0.9417,
+ "step": 638
+ },
+ {
+ "epoch": 2.340659340659341,
+ "grad_norm": 27.436229705810547,
+ "learning_rate": 5.108669108669109e-05,
+ "loss": 0.7539,
+ "step": 639
+ },
+ {
+ "epoch": 2.3443223443223444,
+ "grad_norm": 40.86305618286133,
+ "learning_rate": 5.1062271062271065e-05,
+ "loss": 2.126,
+ "step": 640
+ },
+ {
+ "epoch": 2.347985347985348,
+ "grad_norm": 22.224748611450195,
+ "learning_rate": 5.103785103785104e-05,
+ "loss": 0.9958,
+ "step": 641
+ },
+ {
+ "epoch": 2.3516483516483517,
+ "grad_norm": 19.915552139282227,
+ "learning_rate": 5.1013431013431015e-05,
+ "loss": 1.1045,
+ "step": 642
+ },
+ {
+ "epoch": 2.3553113553113554,
+ "grad_norm": 17.045989990234375,
+ "learning_rate": 5.098901098901099e-05,
+ "loss": 0.8906,
+ "step": 643
+ },
+ {
+ "epoch": 2.358974358974359,
+ "grad_norm": 22.106670379638672,
+ "learning_rate": 5.0964590964590965e-05,
+ "loss": 0.9856,
+ "step": 644
+ },
+ {
+ "epoch": 2.3626373626373627,
+ "grad_norm": 17.583837509155273,
+ "learning_rate": 5.0940170940170944e-05,
+ "loss": 0.8328,
+ "step": 645
+ },
+ {
+ "epoch": 2.3663003663003663,
+ "grad_norm": 57.61167526245117,
+ "learning_rate": 5.0915750915750915e-05,
+ "loss": 0.578,
+ "step": 646
+ },
+ {
+ "epoch": 2.36996336996337,
+ "grad_norm": 13.941128730773926,
+ "learning_rate": 5.0891330891330894e-05,
+ "loss": 0.5892,
+ "step": 647
+ },
+ {
+ "epoch": 2.3736263736263736,
+ "grad_norm": 22.38715171813965,
+ "learning_rate": 5.0866910866910866e-05,
+ "loss": 0.7608,
+ "step": 648
+ },
+ {
+ "epoch": 2.3772893772893773,
+ "grad_norm": 22.42316436767578,
+ "learning_rate": 5.0842490842490844e-05,
+ "loss": 0.7923,
+ "step": 649
+ },
+ {
+ "epoch": 2.380952380952381,
+ "grad_norm": 32.75740432739258,
+ "learning_rate": 5.081807081807082e-05,
+ "loss": 1.0798,
+ "step": 650
+ },
+ {
+ "epoch": 2.3846153846153846,
+ "grad_norm": 19.295289993286133,
+ "learning_rate": 5.0793650793650794e-05,
+ "loss": 0.4898,
+ "step": 651
+ },
+ {
+ "epoch": 2.3882783882783882,
+ "grad_norm": 25.849227905273438,
+ "learning_rate": 5.076923076923077e-05,
+ "loss": 0.5557,
+ "step": 652
+ },
+ {
+ "epoch": 2.391941391941392,
+ "grad_norm": 21.321088790893555,
+ "learning_rate": 5.074481074481075e-05,
+ "loss": 0.2743,
+ "step": 653
+ },
+ {
+ "epoch": 2.3956043956043955,
+ "grad_norm": 28.795917510986328,
+ "learning_rate": 5.0720390720390716e-05,
+ "loss": 0.7039,
+ "step": 654
+ },
+ {
+ "epoch": 2.399267399267399,
+ "grad_norm": 19.86751937866211,
+ "learning_rate": 5.0695970695970694e-05,
+ "loss": 0.3155,
+ "step": 655
+ },
+ {
+ "epoch": 2.402930402930403,
+ "grad_norm": 33.3828010559082,
+ "learning_rate": 5.067155067155067e-05,
+ "loss": 1.0696,
+ "step": 656
+ },
+ {
+ "epoch": 2.4065934065934065,
+ "grad_norm": 37.38752746582031,
+ "learning_rate": 5.0647130647130645e-05,
+ "loss": 0.8123,
+ "step": 657
+ },
+ {
+ "epoch": 2.41025641025641,
+ "grad_norm": 29.22795867919922,
+ "learning_rate": 5.062271062271062e-05,
+ "loss": 0.9515,
+ "step": 658
+ },
+ {
+ "epoch": 2.413919413919414,
+ "grad_norm": 41.129981994628906,
+ "learning_rate": 5.05982905982906e-05,
+ "loss": 1.1329,
+ "step": 659
+ },
+ {
+ "epoch": 2.4175824175824174,
+ "grad_norm": 40.985042572021484,
+ "learning_rate": 5.057387057387057e-05,
+ "loss": 0.675,
+ "step": 660
+ },
+ {
+ "epoch": 2.421245421245421,
+ "grad_norm": 33.49393844604492,
+ "learning_rate": 5.054945054945055e-05,
+ "loss": 0.9679,
+ "step": 661
+ },
+ {
+ "epoch": 2.4249084249084247,
+ "grad_norm": 28.741533279418945,
+ "learning_rate": 5.052503052503053e-05,
+ "loss": 0.7928,
+ "step": 662
+ },
+ {
+ "epoch": 2.4285714285714284,
+ "grad_norm": 28.89700698852539,
+ "learning_rate": 5.05006105006105e-05,
+ "loss": 0.7594,
+ "step": 663
+ },
+ {
+ "epoch": 2.4322344322344325,
+ "grad_norm": 4.59797477722168,
+ "learning_rate": 5.047619047619048e-05,
+ "loss": 0.0584,
+ "step": 664
+ },
+ {
+ "epoch": 2.435897435897436,
+ "grad_norm": 29.852828979492188,
+ "learning_rate": 5.045177045177046e-05,
+ "loss": 0.614,
+ "step": 665
+ },
+ {
+ "epoch": 2.4395604395604398,
+ "grad_norm": 15.132670402526855,
+ "learning_rate": 5.042735042735043e-05,
+ "loss": 0.2353,
+ "step": 666
+ },
+ {
+ "epoch": 2.4432234432234434,
+ "grad_norm": 23.85403060913086,
+ "learning_rate": 5.04029304029304e-05,
+ "loss": 0.9065,
+ "step": 667
+ },
+ {
+ "epoch": 2.446886446886447,
+ "grad_norm": 12.384196281433105,
+ "learning_rate": 5.037851037851038e-05,
+ "loss": 0.2065,
+ "step": 668
+ },
+ {
+ "epoch": 2.4505494505494507,
+ "grad_norm": 18.347129821777344,
+ "learning_rate": 5.035409035409035e-05,
+ "loss": 0.647,
+ "step": 669
+ },
+ {
+ "epoch": 2.4542124542124544,
+ "grad_norm": 18.645936965942383,
+ "learning_rate": 5.032967032967033e-05,
+ "loss": 0.2072,
+ "step": 670
+ },
+ {
+ "epoch": 2.457875457875458,
+ "grad_norm": 9.493071556091309,
+ "learning_rate": 5.03052503052503e-05,
+ "loss": 0.1805,
+ "step": 671
+ },
+ {
+ "epoch": 2.4615384615384617,
+ "grad_norm": 18.552539825439453,
+ "learning_rate": 5.028083028083028e-05,
+ "loss": 0.4078,
+ "step": 672
+ },
+ {
+ "epoch": 2.4652014652014653,
+ "grad_norm": 21.735048294067383,
+ "learning_rate": 5.025641025641026e-05,
+ "loss": 0.4231,
+ "step": 673
+ },
+ {
+ "epoch": 2.468864468864469,
+ "grad_norm": 54.32040023803711,
+ "learning_rate": 5.023199023199023e-05,
+ "loss": 1.3927,
+ "step": 674
+ },
+ {
+ "epoch": 2.4725274725274726,
+ "grad_norm": 26.955970764160156,
+ "learning_rate": 5.020757020757021e-05,
+ "loss": 0.6899,
+ "step": 675
+ },
+ {
+ "epoch": 2.4761904761904763,
+ "grad_norm": 43.423526763916016,
+ "learning_rate": 5.018315018315019e-05,
+ "loss": 1.2084,
+ "step": 676
+ },
+ {
+ "epoch": 2.47985347985348,
+ "grad_norm": 35.98548126220703,
+ "learning_rate": 5.015873015873016e-05,
+ "loss": 1.5047,
+ "step": 677
+ },
+ {
+ "epoch": 2.4835164835164836,
+ "grad_norm": 22.593570709228516,
+ "learning_rate": 5.013431013431014e-05,
+ "loss": 0.6918,
+ "step": 678
+ },
+ {
+ "epoch": 2.4871794871794872,
+ "grad_norm": 21.29257583618164,
+ "learning_rate": 5.0109890109890116e-05,
+ "loss": 0.3578,
+ "step": 679
+ },
+ {
+ "epoch": 2.490842490842491,
+ "grad_norm": 21.672088623046875,
+ "learning_rate": 5.008547008547008e-05,
+ "loss": 0.7757,
+ "step": 680
+ },
+ {
+ "epoch": 2.4945054945054945,
+ "grad_norm": 9.625850677490234,
+ "learning_rate": 5.006105006105006e-05,
+ "loss": 0.1329,
+ "step": 681
+ },
+ {
+ "epoch": 2.498168498168498,
+ "grad_norm": 16.92123794555664,
+ "learning_rate": 5.003663003663004e-05,
+ "loss": 0.5599,
+ "step": 682
+ },
+ {
+ "epoch": 2.501831501831502,
+ "grad_norm": 15.665925025939941,
+ "learning_rate": 5.001221001221001e-05,
+ "loss": 0.3099,
+ "step": 683
+ },
+ {
+ "epoch": 2.5054945054945055,
+ "grad_norm": 21.316635131835938,
+ "learning_rate": 4.998778998778999e-05,
+ "loss": 0.5746,
+ "step": 684
+ },
+ {
+ "epoch": 2.509157509157509,
+ "grad_norm": 24.99594497680664,
+ "learning_rate": 4.996336996336997e-05,
+ "loss": 1.1274,
+ "step": 685
+ },
+ {
+ "epoch": 2.5128205128205128,
+ "grad_norm": 29.795175552368164,
+ "learning_rate": 4.993894993894994e-05,
+ "loss": 0.9991,
+ "step": 686
+ },
+ {
+ "epoch": 2.5164835164835164,
+ "grad_norm": 16.337533950805664,
+ "learning_rate": 4.991452991452992e-05,
+ "loss": 0.4101,
+ "step": 687
+ },
+ {
+ "epoch": 2.52014652014652,
+ "grad_norm": 20.065715789794922,
+ "learning_rate": 4.9890109890109895e-05,
+ "loss": 0.7786,
+ "step": 688
+ },
+ {
+ "epoch": 2.5238095238095237,
+ "grad_norm": 19.341567993164062,
+ "learning_rate": 4.986568986568987e-05,
+ "loss": 0.4989,
+ "step": 689
+ },
+ {
+ "epoch": 2.5274725274725274,
+ "grad_norm": 14.688420295715332,
+ "learning_rate": 4.9841269841269845e-05,
+ "loss": 0.4081,
+ "step": 690
+ },
+ {
+ "epoch": 2.531135531135531,
+ "grad_norm": 39.346012115478516,
+ "learning_rate": 4.9816849816849824e-05,
+ "loss": 1.7919,
+ "step": 691
+ },
+ {
+ "epoch": 2.5347985347985347,
+ "grad_norm": 21.353286743164062,
+ "learning_rate": 4.9792429792429796e-05,
+ "loss": 0.698,
+ "step": 692
+ },
+ {
+ "epoch": 2.5384615384615383,
+ "grad_norm": 35.96653366088867,
+ "learning_rate": 4.976800976800977e-05,
+ "loss": 1.6584,
+ "step": 693
+ },
+ {
+ "epoch": 2.542124542124542,
+ "grad_norm": 19.14348793029785,
+ "learning_rate": 4.9743589743589746e-05,
+ "loss": 0.885,
+ "step": 694
+ },
+ {
+ "epoch": 2.5457875457875456,
+ "grad_norm": 9.260897636413574,
+ "learning_rate": 4.971916971916972e-05,
+ "loss": 0.1629,
+ "step": 695
+ },
+ {
+ "epoch": 2.5494505494505493,
+ "grad_norm": 18.497526168823242,
+ "learning_rate": 4.9694749694749696e-05,
+ "loss": 0.7242,
+ "step": 696
+ },
+ {
+ "epoch": 2.553113553113553,
+ "grad_norm": 8.879841804504395,
+ "learning_rate": 4.967032967032967e-05,
+ "loss": 0.1302,
+ "step": 697
+ },
+ {
+ "epoch": 2.5567765567765566,
+ "grad_norm": 26.34065818786621,
+ "learning_rate": 4.9645909645909646e-05,
+ "loss": 0.7333,
+ "step": 698
+ },
+ {
+ "epoch": 2.5604395604395602,
+ "grad_norm": 15.10546588897705,
+ "learning_rate": 4.9621489621489624e-05,
+ "loss": 0.3119,
+ "step": 699
+ },
+ {
+ "epoch": 2.564102564102564,
+ "grad_norm": 10.68095874786377,
+ "learning_rate": 4.9597069597069596e-05,
+ "loss": 0.2505,
+ "step": 700
+ },
+ {
+ "epoch": 2.5677655677655675,
+ "grad_norm": 29.08888053894043,
+ "learning_rate": 4.9572649572649575e-05,
+ "loss": 0.4286,
+ "step": 701
+ },
+ {
+ "epoch": 2.571428571428571,
+ "grad_norm": 29.939416885375977,
+ "learning_rate": 4.954822954822955e-05,
+ "loss": 1.1529,
+ "step": 702
+ },
+ {
+ "epoch": 2.575091575091575,
+ "grad_norm": 32.78864669799805,
+ "learning_rate": 4.9523809523809525e-05,
+ "loss": 0.9834,
+ "step": 703
+ },
+ {
+ "epoch": 2.578754578754579,
+ "grad_norm": 13.99082088470459,
+ "learning_rate": 4.94993894993895e-05,
+ "loss": 0.1934,
+ "step": 704
+ },
+ {
+ "epoch": 2.5824175824175826,
+ "grad_norm": 31.696718215942383,
+ "learning_rate": 4.9474969474969475e-05,
+ "loss": 0.6881,
+ "step": 705
+ },
+ {
+ "epoch": 2.586080586080586,
+ "grad_norm": 39.26205062866211,
+ "learning_rate": 4.9450549450549446e-05,
+ "loss": 0.573,
+ "step": 706
+ },
+ {
+ "epoch": 2.58974358974359,
+ "grad_norm": 42.08647918701172,
+ "learning_rate": 4.9426129426129425e-05,
+ "loss": 1.5935,
+ "step": 707
+ },
+ {
+ "epoch": 2.5934065934065935,
+ "grad_norm": 24.630651473999023,
+ "learning_rate": 4.94017094017094e-05,
+ "loss": 0.7016,
+ "step": 708
+ },
+ {
+ "epoch": 2.597069597069597,
+ "grad_norm": 35.33428192138672,
+ "learning_rate": 4.9377289377289375e-05,
+ "loss": 0.9646,
+ "step": 709
+ },
+ {
+ "epoch": 2.600732600732601,
+ "grad_norm": 21.643918991088867,
+ "learning_rate": 4.9352869352869353e-05,
+ "loss": 0.3679,
+ "step": 710
+ },
+ {
+ "epoch": 2.6043956043956045,
+ "grad_norm": 10.6254301071167,
+ "learning_rate": 4.932844932844933e-05,
+ "loss": 0.1059,
+ "step": 711
+ },
+ {
+ "epoch": 2.608058608058608,
+ "grad_norm": 23.43462562561035,
+ "learning_rate": 4.9304029304029304e-05,
+ "loss": 0.5128,
+ "step": 712
+ },
+ {
+ "epoch": 2.6117216117216118,
+ "grad_norm": 25.748422622680664,
+ "learning_rate": 4.927960927960928e-05,
+ "loss": 0.6154,
+ "step": 713
+ },
+ {
+ "epoch": 2.6153846153846154,
+ "grad_norm": 23.163209915161133,
+ "learning_rate": 4.925518925518926e-05,
+ "loss": 0.3978,
+ "step": 714
+ },
+ {
+ "epoch": 2.619047619047619,
+ "grad_norm": 22.306194305419922,
+ "learning_rate": 4.923076923076923e-05,
+ "loss": 0.3984,
+ "step": 715
+ },
+ {
+ "epoch": 2.6227106227106227,
+ "grad_norm": 48.16558074951172,
+ "learning_rate": 4.920634920634921e-05,
+ "loss": 0.9568,
+ "step": 716
+ },
+ {
+ "epoch": 2.6263736263736264,
+ "grad_norm": 48.76753234863281,
+ "learning_rate": 4.918192918192919e-05,
+ "loss": 0.6579,
+ "step": 717
+ },
+ {
+ "epoch": 2.63003663003663,
+ "grad_norm": 57.938720703125,
+ "learning_rate": 4.9157509157509154e-05,
+ "loss": 1.0926,
+ "step": 718
+ },
+ {
+ "epoch": 2.6336996336996337,
+ "grad_norm": 25.495267868041992,
+ "learning_rate": 4.913308913308913e-05,
+ "loss": 0.3717,
+ "step": 719
+ },
+ {
+ "epoch": 2.6373626373626373,
+ "grad_norm": 20.054609298706055,
+ "learning_rate": 4.910866910866911e-05,
+ "loss": 0.4502,
+ "step": 720
+ },
+ {
+ "epoch": 2.641025641025641,
+ "grad_norm": 23.096263885498047,
+ "learning_rate": 4.908424908424908e-05,
+ "loss": 0.2794,
+ "step": 721
+ },
+ {
+ "epoch": 2.6446886446886446,
+ "grad_norm": 6.073278903961182,
+ "learning_rate": 4.905982905982906e-05,
+ "loss": 0.0519,
+ "step": 722
+ },
+ {
+ "epoch": 2.6483516483516483,
+ "grad_norm": 38.562618255615234,
+ "learning_rate": 4.903540903540903e-05,
+ "loss": 0.8839,
+ "step": 723
+ },
+ {
+ "epoch": 2.652014652014652,
+ "grad_norm": 23.544757843017578,
+ "learning_rate": 4.901098901098901e-05,
+ "loss": 0.3935,
+ "step": 724
+ },
+ {
+ "epoch": 2.6556776556776556,
+ "grad_norm": 22.844032287597656,
+ "learning_rate": 4.898656898656899e-05,
+ "loss": 0.2428,
+ "step": 725
+ },
+ {
+ "epoch": 2.659340659340659,
+ "grad_norm": 11.537687301635742,
+ "learning_rate": 4.896214896214896e-05,
+ "loss": 0.1538,
+ "step": 726
+ },
+ {
+ "epoch": 2.663003663003663,
+ "grad_norm": 59.37337112426758,
+ "learning_rate": 4.893772893772894e-05,
+ "loss": 1.181,
+ "step": 727
+ },
+ {
+ "epoch": 2.6666666666666665,
+ "grad_norm": 22.206314086914062,
+ "learning_rate": 4.891330891330892e-05,
+ "loss": 0.4044,
+ "step": 728
+ },
+ {
+ "epoch": 2.67032967032967,
+ "grad_norm": 27.44620132446289,
+ "learning_rate": 4.888888888888889e-05,
+ "loss": 0.585,
+ "step": 729
+ },
+ {
+ "epoch": 2.6739926739926743,
+ "grad_norm": 35.70675277709961,
+ "learning_rate": 4.886446886446887e-05,
+ "loss": 0.6853,
+ "step": 730
+ },
+ {
+ "epoch": 2.677655677655678,
+ "grad_norm": 25.653356552124023,
+ "learning_rate": 4.884004884004884e-05,
+ "loss": 0.6143,
+ "step": 731
+ },
+ {
+ "epoch": 2.6813186813186816,
+ "grad_norm": 24.242090225219727,
+ "learning_rate": 4.881562881562881e-05,
+ "loss": 0.4365,
+ "step": 732
+ },
+ {
+ "epoch": 2.684981684981685,
+ "grad_norm": 25.621902465820312,
+ "learning_rate": 4.879120879120879e-05,
+ "loss": 0.6644,
+ "step": 733
+ },
+ {
+ "epoch": 2.688644688644689,
+ "grad_norm": 14.14786434173584,
+ "learning_rate": 4.876678876678877e-05,
+ "loss": 0.4117,
+ "step": 734
+ },
+ {
+ "epoch": 2.6923076923076925,
+ "grad_norm": 37.98638916015625,
+ "learning_rate": 4.874236874236874e-05,
+ "loss": 1.0452,
+ "step": 735
+ },
+ {
+ "epoch": 2.695970695970696,
+ "grad_norm": 23.186302185058594,
+ "learning_rate": 4.871794871794872e-05,
+ "loss": 0.2642,
+ "step": 736
+ },
+ {
+ "epoch": 2.6996336996337,
+ "grad_norm": 27.23651695251465,
+ "learning_rate": 4.86935286935287e-05,
+ "loss": 0.393,
+ "step": 737
+ },
+ {
+ "epoch": 2.7032967032967035,
+ "grad_norm": 36.44395446777344,
+ "learning_rate": 4.866910866910867e-05,
+ "loss": 1.1309,
+ "step": 738
+ },
+ {
+ "epoch": 2.706959706959707,
+ "grad_norm": 9.733710289001465,
+ "learning_rate": 4.864468864468865e-05,
+ "loss": 0.2466,
+ "step": 739
+ },
+ {
+ "epoch": 2.7106227106227108,
+ "grad_norm": 24.727527618408203,
+ "learning_rate": 4.8620268620268626e-05,
+ "loss": 0.46,
+ "step": 740
+ },
+ {
+ "epoch": 2.7142857142857144,
+ "grad_norm": 15.122056007385254,
+ "learning_rate": 4.85958485958486e-05,
+ "loss": 0.3122,
+ "step": 741
+ },
+ {
+ "epoch": 2.717948717948718,
+ "grad_norm": 24.059120178222656,
+ "learning_rate": 4.8571428571428576e-05,
+ "loss": 0.2359,
+ "step": 742
+ },
+ {
+ "epoch": 2.7216117216117217,
+ "grad_norm": 7.659122467041016,
+ "learning_rate": 4.8547008547008554e-05,
+ "loss": 0.1212,
+ "step": 743
+ },
+ {
+ "epoch": 2.7252747252747254,
+ "grad_norm": 27.002117156982422,
+ "learning_rate": 4.852258852258852e-05,
+ "loss": 0.7593,
+ "step": 744
+ },
+ {
+ "epoch": 2.728937728937729,
+ "grad_norm": 6.3852009773254395,
+ "learning_rate": 4.84981684981685e-05,
+ "loss": 0.0644,
+ "step": 745
+ },
+ {
+ "epoch": 2.7326007326007327,
+ "grad_norm": 25.574190139770508,
+ "learning_rate": 4.8473748473748476e-05,
+ "loss": 0.7012,
+ "step": 746
+ },
+ {
+ "epoch": 2.7362637362637363,
+ "grad_norm": 15.720768928527832,
+ "learning_rate": 4.844932844932845e-05,
+ "loss": 0.2692,
+ "step": 747
+ },
+ {
+ "epoch": 2.73992673992674,
+ "grad_norm": 25.527997970581055,
+ "learning_rate": 4.8424908424908426e-05,
+ "loss": 0.2648,
+ "step": 748
+ },
+ {
+ "epoch": 2.7435897435897436,
+ "grad_norm": 27.791011810302734,
+ "learning_rate": 4.84004884004884e-05,
+ "loss": 0.6007,
+ "step": 749
+ },
+ {
+ "epoch": 2.7472527472527473,
+ "grad_norm": 20.487640380859375,
+ "learning_rate": 4.8376068376068376e-05,
+ "loss": 0.5715,
+ "step": 750
+ },
+ {
+ "epoch": 2.750915750915751,
+ "grad_norm": 6.386992454528809,
+ "learning_rate": 4.8351648351648355e-05,
+ "loss": 0.06,
+ "step": 751
+ },
+ {
+ "epoch": 2.7545787545787546,
+ "grad_norm": 13.110812187194824,
+ "learning_rate": 4.8327228327228327e-05,
+ "loss": 0.129,
+ "step": 752
+ },
+ {
+ "epoch": 2.758241758241758,
+ "grad_norm": 26.55845832824707,
+ "learning_rate": 4.8302808302808305e-05,
+ "loss": 0.67,
+ "step": 753
+ },
+ {
+ "epoch": 2.761904761904762,
+ "grad_norm": 38.83135223388672,
+ "learning_rate": 4.8278388278388283e-05,
+ "loss": 1.6656,
+ "step": 754
+ },
+ {
+ "epoch": 2.7655677655677655,
+ "grad_norm": 25.99518585205078,
+ "learning_rate": 4.8253968253968255e-05,
+ "loss": 0.3285,
+ "step": 755
+ },
+ {
+ "epoch": 2.769230769230769,
+ "grad_norm": 17.282081604003906,
+ "learning_rate": 4.8229548229548234e-05,
+ "loss": 0.2217,
+ "step": 756
+ },
+ {
+ "epoch": 2.772893772893773,
+ "grad_norm": 28.849924087524414,
+ "learning_rate": 4.8205128205128205e-05,
+ "loss": 0.7287,
+ "step": 757
+ },
+ {
+ "epoch": 2.7765567765567765,
+ "grad_norm": 45.79567337036133,
+ "learning_rate": 4.818070818070818e-05,
+ "loss": 1.6964,
+ "step": 758
+ },
+ {
+ "epoch": 2.78021978021978,
+ "grad_norm": 15.203421592712402,
+ "learning_rate": 4.8156288156288155e-05,
+ "loss": 0.2351,
+ "step": 759
+ },
+ {
+ "epoch": 2.7838827838827838,
+ "grad_norm": 10.686698913574219,
+ "learning_rate": 4.8131868131868134e-05,
+ "loss": 0.1533,
+ "step": 760
+ },
+ {
+ "epoch": 2.7875457875457874,
+ "grad_norm": 24.186473846435547,
+ "learning_rate": 4.8107448107448106e-05,
+ "loss": 1.0973,
+ "step": 761
+ },
+ {
+ "epoch": 2.791208791208791,
+ "grad_norm": 25.378986358642578,
+ "learning_rate": 4.8083028083028084e-05,
+ "loss": 0.5847,
+ "step": 762
+ },
+ {
+ "epoch": 2.7948717948717947,
+ "grad_norm": 20.066482543945312,
+ "learning_rate": 4.805860805860806e-05,
+ "loss": 0.2643,
+ "step": 763
+ },
+ {
+ "epoch": 2.7985347985347984,
+ "grad_norm": 56.11622619628906,
+ "learning_rate": 4.8034188034188034e-05,
+ "loss": 0.6949,
+ "step": 764
+ },
+ {
+ "epoch": 2.802197802197802,
+ "grad_norm": 27.80112648010254,
+ "learning_rate": 4.800976800976801e-05,
+ "loss": 0.5622,
+ "step": 765
+ },
+ {
+ "epoch": 2.8058608058608057,
+ "grad_norm": 30.947532653808594,
+ "learning_rate": 4.798534798534799e-05,
+ "loss": 0.6276,
+ "step": 766
+ },
+ {
+ "epoch": 2.8095238095238093,
+ "grad_norm": 8.91073226928711,
+ "learning_rate": 4.796092796092796e-05,
+ "loss": 0.1302,
+ "step": 767
+ },
+ {
+ "epoch": 2.813186813186813,
+ "grad_norm": 24.65394401550293,
+ "learning_rate": 4.793650793650794e-05,
+ "loss": 0.6811,
+ "step": 768
+ },
+ {
+ "epoch": 2.8168498168498166,
+ "grad_norm": 18.257539749145508,
+ "learning_rate": 4.791208791208792e-05,
+ "loss": 0.271,
+ "step": 769
+ },
+ {
+ "epoch": 2.8205128205128203,
+ "grad_norm": 41.41588592529297,
+ "learning_rate": 4.7887667887667884e-05,
+ "loss": 1.4149,
+ "step": 770
+ },
+ {
+ "epoch": 2.824175824175824,
+ "grad_norm": 7.753188610076904,
+ "learning_rate": 4.786324786324786e-05,
+ "loss": 0.0825,
+ "step": 771
+ },
+ {
+ "epoch": 2.8278388278388276,
+ "grad_norm": 208.88290405273438,
+ "learning_rate": 4.783882783882784e-05,
+ "loss": 1.032,
+ "step": 772
+ },
+ {
+ "epoch": 2.8315018315018317,
+ "grad_norm": 31.91672706604004,
+ "learning_rate": 4.781440781440781e-05,
+ "loss": 0.9783,
+ "step": 773
+ },
+ {
+ "epoch": 2.8351648351648353,
+ "grad_norm": 5.72416877746582,
+ "learning_rate": 4.778998778998779e-05,
+ "loss": 0.0399,
+ "step": 774
+ },
+ {
+ "epoch": 2.838827838827839,
+ "grad_norm": 30.503149032592773,
+ "learning_rate": 4.776556776556776e-05,
+ "loss": 0.6465,
+ "step": 775
+ },
+ {
+ "epoch": 2.8424908424908426,
+ "grad_norm": 29.615020751953125,
+ "learning_rate": 4.774114774114774e-05,
+ "loss": 0.5823,
+ "step": 776
+ },
+ {
+ "epoch": 2.8461538461538463,
+ "grad_norm": 49.922611236572266,
+ "learning_rate": 4.771672771672772e-05,
+ "loss": 1.2045,
+ "step": 777
+ },
+ {
+ "epoch": 2.84981684981685,
+ "grad_norm": 23.30948829650879,
+ "learning_rate": 4.769230769230769e-05,
+ "loss": 0.5962,
+ "step": 778
+ },
+ {
+ "epoch": 2.8534798534798536,
+ "grad_norm": 24.784086227416992,
+ "learning_rate": 4.766788766788767e-05,
+ "loss": 0.5702,
+ "step": 779
+ },
+ {
+ "epoch": 2.857142857142857,
+ "grad_norm": 30.03589630126953,
+ "learning_rate": 4.764346764346765e-05,
+ "loss": 0.8644,
+ "step": 780
+ },
+ {
+ "epoch": 2.860805860805861,
+ "grad_norm": 21.079742431640625,
+ "learning_rate": 4.761904761904762e-05,
+ "loss": 0.2304,
+ "step": 781
+ },
+ {
+ "epoch": 2.8644688644688645,
+ "grad_norm": 18.438365936279297,
+ "learning_rate": 4.75946275946276e-05,
+ "loss": 0.6457,
+ "step": 782
+ },
+ {
+ "epoch": 2.868131868131868,
+ "grad_norm": 16.265140533447266,
+ "learning_rate": 4.757020757020757e-05,
+ "loss": 0.3693,
+ "step": 783
+ },
+ {
+ "epoch": 2.871794871794872,
+ "grad_norm": 17.526954650878906,
+ "learning_rate": 4.754578754578754e-05,
+ "loss": 0.2614,
+ "step": 784
+ },
+ {
+ "epoch": 2.8754578754578755,
+ "grad_norm": 39.94060134887695,
+ "learning_rate": 4.752136752136752e-05,
+ "loss": 0.2829,
+ "step": 785
+ },
+ {
+ "epoch": 2.879120879120879,
+ "grad_norm": 10.09298324584961,
+ "learning_rate": 4.74969474969475e-05,
+ "loss": 0.1489,
+ "step": 786
+ },
+ {
+ "epoch": 2.8827838827838828,
+ "grad_norm": 29.092544555664062,
+ "learning_rate": 4.747252747252747e-05,
+ "loss": 0.6063,
+ "step": 787
+ },
+ {
+ "epoch": 2.8864468864468864,
+ "grad_norm": 30.071422576904297,
+ "learning_rate": 4.744810744810745e-05,
+ "loss": 0.3154,
+ "step": 788
+ },
+ {
+ "epoch": 2.89010989010989,
+ "grad_norm": 26.271251678466797,
+ "learning_rate": 4.742368742368743e-05,
+ "loss": 0.4548,
+ "step": 789
+ },
+ {
+ "epoch": 2.8937728937728937,
+ "grad_norm": 32.386775970458984,
+ "learning_rate": 4.73992673992674e-05,
+ "loss": 0.1872,
+ "step": 790
+ },
+ {
+ "epoch": 2.8974358974358974,
+ "grad_norm": 31.18532943725586,
+ "learning_rate": 4.737484737484738e-05,
+ "loss": 0.847,
+ "step": 791
+ },
+ {
+ "epoch": 2.901098901098901,
+ "grad_norm": 17.924785614013672,
+ "learning_rate": 4.7350427350427356e-05,
+ "loss": 0.1588,
+ "step": 792
+ },
+ {
+ "epoch": 2.9047619047619047,
+ "grad_norm": 16.458614349365234,
+ "learning_rate": 4.732600732600733e-05,
+ "loss": 0.1424,
+ "step": 793
+ },
+ {
+ "epoch": 2.9084249084249083,
+ "grad_norm": 50.29280471801758,
+ "learning_rate": 4.7301587301587306e-05,
+ "loss": 1.5482,
+ "step": 794
+ },
+ {
+ "epoch": 2.912087912087912,
+ "grad_norm": 58.37470245361328,
+ "learning_rate": 4.727716727716728e-05,
+ "loss": 1.8242,
+ "step": 795
+ },
+ {
+ "epoch": 2.9157509157509156,
+ "grad_norm": 32.5267448425293,
+ "learning_rate": 4.725274725274725e-05,
+ "loss": 1.1197,
+ "step": 796
+ },
+ {
+ "epoch": 2.9194139194139193,
+ "grad_norm": 43.77764892578125,
+ "learning_rate": 4.722832722832723e-05,
+ "loss": 0.7322,
+ "step": 797
+ },
+ {
+ "epoch": 2.9230769230769234,
+ "grad_norm": 25.303524017333984,
+ "learning_rate": 4.720390720390721e-05,
+ "loss": 0.6557,
+ "step": 798
+ },
+ {
+ "epoch": 2.926739926739927,
+ "grad_norm": 23.90159797668457,
+ "learning_rate": 4.717948717948718e-05,
+ "loss": 0.2669,
+ "step": 799
+ },
+ {
+ "epoch": 2.9304029304029307,
+ "grad_norm": 21.20945930480957,
+ "learning_rate": 4.715506715506716e-05,
+ "loss": 0.3279,
+ "step": 800
+ },
+ {
+ "epoch": 2.9340659340659343,
+ "grad_norm": 28.819482803344727,
+ "learning_rate": 4.713064713064713e-05,
+ "loss": 0.717,
+ "step": 801
+ },
+ {
+ "epoch": 2.937728937728938,
+ "grad_norm": 9.13611125946045,
+ "learning_rate": 4.710622710622711e-05,
+ "loss": 0.1291,
+ "step": 802
+ },
+ {
+ "epoch": 2.9413919413919416,
+ "grad_norm": 22.16252326965332,
+ "learning_rate": 4.7081807081807085e-05,
+ "loss": 0.4406,
+ "step": 803
+ },
+ {
+ "epoch": 2.9450549450549453,
+ "grad_norm": 47.73503112792969,
+ "learning_rate": 4.705738705738706e-05,
+ "loss": 0.6176,
+ "step": 804
+ },
+ {
+ "epoch": 2.948717948717949,
+ "grad_norm": 61.73493576049805,
+ "learning_rate": 4.7032967032967035e-05,
+ "loss": 0.581,
+ "step": 805
+ },
+ {
+ "epoch": 2.9523809523809526,
+ "grad_norm": 22.48004722595215,
+ "learning_rate": 4.7008547008547014e-05,
+ "loss": 0.7404,
+ "step": 806
+ },
+ {
+ "epoch": 2.956043956043956,
+ "grad_norm": 54.2432746887207,
+ "learning_rate": 4.6984126984126986e-05,
+ "loss": 1.1522,
+ "step": 807
+ },
+ {
+ "epoch": 2.95970695970696,
+ "grad_norm": 26.221921920776367,
+ "learning_rate": 4.695970695970696e-05,
+ "loss": 0.4869,
+ "step": 808
+ },
+ {
+ "epoch": 2.9633699633699635,
+ "grad_norm": 21.688526153564453,
+ "learning_rate": 4.6935286935286936e-05,
+ "loss": 0.6639,
+ "step": 809
+ },
+ {
+ "epoch": 2.967032967032967,
+ "grad_norm": 5.81218147277832,
+ "learning_rate": 4.691086691086691e-05,
+ "loss": 0.0824,
+ "step": 810
+ },
+ {
+ "epoch": 2.970695970695971,
+ "grad_norm": 39.09580612182617,
+ "learning_rate": 4.6886446886446886e-05,
+ "loss": 1.5035,
+ "step": 811
+ },
+ {
+ "epoch": 2.9743589743589745,
+ "grad_norm": 24.587574005126953,
+ "learning_rate": 4.6862026862026864e-05,
+ "loss": 1.1107,
+ "step": 812
+ },
+ {
+ "epoch": 2.978021978021978,
+ "grad_norm": 25.25336265563965,
+ "learning_rate": 4.6837606837606836e-05,
+ "loss": 0.7764,
+ "step": 813
+ },
+ {
+ "epoch": 2.9816849816849818,
+ "grad_norm": 16.311378479003906,
+ "learning_rate": 4.6813186813186814e-05,
+ "loss": 0.4079,
+ "step": 814
+ },
+ {
+ "epoch": 2.9853479853479854,
+ "grad_norm": 19.0888729095459,
+ "learning_rate": 4.678876678876679e-05,
+ "loss": 0.5259,
+ "step": 815
+ },
+ {
+ "epoch": 2.989010989010989,
+ "grad_norm": 24.599462509155273,
+ "learning_rate": 4.6764346764346765e-05,
+ "loss": 0.7475,
+ "step": 816
+ },
+ {
+ "epoch": 2.9926739926739927,
+ "grad_norm": 20.4777889251709,
+ "learning_rate": 4.673992673992674e-05,
+ "loss": 0.356,
+ "step": 817
+ },
+ {
+ "epoch": 2.9963369963369964,
+ "grad_norm": 30.4327449798584,
+ "learning_rate": 4.671550671550672e-05,
+ "loss": 0.7958,
+ "step": 818
+ },
+ {
+ "epoch": 3.0,
+ "grad_norm": 25.57271385192871,
+ "learning_rate": 4.669108669108669e-05,
+ "loss": 0.3918,
+ "step": 819
+ },
+ {
+ "epoch": 3.0036630036630036,
+ "grad_norm": 3.9672563076019287,
+ "learning_rate": 4.666666666666667e-05,
+ "loss": 0.0469,
+ "step": 820
+ },
+ {
+ "epoch": 3.0073260073260073,
+ "grad_norm": 6.657567501068115,
+ "learning_rate": 4.664224664224664e-05,
+ "loss": 0.0939,
+ "step": 821
+ },
+ {
+ "epoch": 3.010989010989011,
+ "grad_norm": 12.558409690856934,
+ "learning_rate": 4.6617826617826615e-05,
+ "loss": 0.1578,
+ "step": 822
+ },
+ {
+ "epoch": 3.0146520146520146,
+ "grad_norm": 18.909244537353516,
+ "learning_rate": 4.6593406593406593e-05,
+ "loss": 0.3209,
+ "step": 823
+ },
+ {
+ "epoch": 3.0183150183150182,
+ "grad_norm": 10.995687484741211,
+ "learning_rate": 4.656898656898657e-05,
+ "loss": 0.1198,
+ "step": 824
+ },
+ {
+ "epoch": 3.021978021978022,
+ "grad_norm": 16.14252471923828,
+ "learning_rate": 4.6544566544566544e-05,
+ "loss": 0.1431,
+ "step": 825
+ },
+ {
+ "epoch": 3.0256410256410255,
+ "grad_norm": 25.924381256103516,
+ "learning_rate": 4.652014652014652e-05,
+ "loss": 0.3989,
+ "step": 826
+ },
+ {
+ "epoch": 3.029304029304029,
+ "grad_norm": 4.87798547744751,
+ "learning_rate": 4.6495726495726494e-05,
+ "loss": 0.0472,
+ "step": 827
+ },
+ {
+ "epoch": 3.032967032967033,
+ "grad_norm": 15.078110694885254,
+ "learning_rate": 4.647130647130647e-05,
+ "loss": 0.1955,
+ "step": 828
+ },
+ {
+ "epoch": 3.0366300366300365,
+ "grad_norm": 19.74415397644043,
+ "learning_rate": 4.644688644688645e-05,
+ "loss": 0.1593,
+ "step": 829
+ },
+ {
+ "epoch": 3.04029304029304,
+ "grad_norm": 43.4788818359375,
+ "learning_rate": 4.642246642246642e-05,
+ "loss": 0.7917,
+ "step": 830
+ },
+ {
+ "epoch": 3.043956043956044,
+ "grad_norm": 27.122041702270508,
+ "learning_rate": 4.63980463980464e-05,
+ "loss": 0.1693,
+ "step": 831
+ },
+ {
+ "epoch": 3.0476190476190474,
+ "grad_norm": 9.51154899597168,
+ "learning_rate": 4.637362637362638e-05,
+ "loss": 0.0806,
+ "step": 832
+ },
+ {
+ "epoch": 3.051282051282051,
+ "grad_norm": 11.48532772064209,
+ "learning_rate": 4.634920634920635e-05,
+ "loss": 0.0815,
+ "step": 833
+ },
+ {
+ "epoch": 3.0549450549450547,
+ "grad_norm": 13.547063827514648,
+ "learning_rate": 4.632478632478632e-05,
+ "loss": 0.0817,
+ "step": 834
+ },
+ {
+ "epoch": 3.0586080586080584,
+ "grad_norm": 24.334409713745117,
+ "learning_rate": 4.63003663003663e-05,
+ "loss": 0.547,
+ "step": 835
+ },
+ {
+ "epoch": 3.062271062271062,
+ "grad_norm": 87.3517837524414,
+ "learning_rate": 4.627594627594627e-05,
+ "loss": 0.6534,
+ "step": 836
+ },
+ {
+ "epoch": 3.065934065934066,
+ "grad_norm": 16.100278854370117,
+ "learning_rate": 4.625152625152625e-05,
+ "loss": 0.2961,
+ "step": 837
+ },
+ {
+ "epoch": 3.06959706959707,
+ "grad_norm": 20.725875854492188,
+ "learning_rate": 4.622710622710623e-05,
+ "loss": 0.1114,
+ "step": 838
+ },
+ {
+ "epoch": 3.0732600732600734,
+ "grad_norm": 53.809722900390625,
+ "learning_rate": 4.62026862026862e-05,
+ "loss": 0.3808,
+ "step": 839
+ },
+ {
+ "epoch": 3.076923076923077,
+ "grad_norm": 3.237959623336792,
+ "learning_rate": 4.617826617826618e-05,
+ "loss": 0.019,
+ "step": 840
+ },
+ {
+ "epoch": 3.0805860805860807,
+ "grad_norm": 69.71659088134766,
+ "learning_rate": 4.615384615384616e-05,
+ "loss": 1.0945,
+ "step": 841
+ },
+ {
+ "epoch": 3.0842490842490844,
+ "grad_norm": 31.005935668945312,
+ "learning_rate": 4.612942612942613e-05,
+ "loss": 0.3241,
+ "step": 842
+ },
+ {
+ "epoch": 3.087912087912088,
+ "grad_norm": 66.98394775390625,
+ "learning_rate": 4.610500610500611e-05,
+ "loss": 1.0213,
+ "step": 843
+ },
+ {
+ "epoch": 3.0915750915750917,
+ "grad_norm": 23.54532814025879,
+ "learning_rate": 4.608058608058609e-05,
+ "loss": 0.2188,
+ "step": 844
+ },
+ {
+ "epoch": 3.0952380952380953,
+ "grad_norm": 25.952709197998047,
+ "learning_rate": 4.605616605616606e-05,
+ "loss": 0.4305,
+ "step": 845
+ },
+ {
+ "epoch": 3.098901098901099,
+ "grad_norm": 36.100746154785156,
+ "learning_rate": 4.603174603174604e-05,
+ "loss": 0.6497,
+ "step": 846
+ },
+ {
+ "epoch": 3.1025641025641026,
+ "grad_norm": 60.34727478027344,
+ "learning_rate": 4.600732600732601e-05,
+ "loss": 0.3083,
+ "step": 847
+ },
+ {
+ "epoch": 3.1062271062271063,
+ "grad_norm": 35.265167236328125,
+ "learning_rate": 4.598290598290598e-05,
+ "loss": 0.3222,
+ "step": 848
+ },
+ {
+ "epoch": 3.10989010989011,
+ "grad_norm": 19.180070877075195,
+ "learning_rate": 4.595848595848596e-05,
+ "loss": 0.4065,
+ "step": 849
+ },
+ {
+ "epoch": 3.1135531135531136,
+ "grad_norm": 22.92152976989746,
+ "learning_rate": 4.593406593406593e-05,
+ "loss": 0.3998,
+ "step": 850
+ },
+ {
+ "epoch": 3.1172161172161172,
+ "grad_norm": 48.91377639770508,
+ "learning_rate": 4.590964590964591e-05,
+ "loss": 0.7035,
+ "step": 851
+ },
+ {
+ "epoch": 3.120879120879121,
+ "grad_norm": 11.615083694458008,
+ "learning_rate": 4.588522588522589e-05,
+ "loss": 0.3102,
+ "step": 852
+ },
+ {
+ "epoch": 3.1245421245421245,
+ "grad_norm": 23.573801040649414,
+ "learning_rate": 4.586080586080586e-05,
+ "loss": 0.3358,
+ "step": 853
+ },
+ {
+ "epoch": 3.128205128205128,
+ "grad_norm": 16.903776168823242,
+ "learning_rate": 4.583638583638584e-05,
+ "loss": 0.2973,
+ "step": 854
+ },
+ {
+ "epoch": 3.131868131868132,
+ "grad_norm": 6.052688121795654,
+ "learning_rate": 4.5811965811965816e-05,
+ "loss": 0.0671,
+ "step": 855
+ },
+ {
+ "epoch": 3.1355311355311355,
+ "grad_norm": 34.40020751953125,
+ "learning_rate": 4.578754578754579e-05,
+ "loss": 0.508,
+ "step": 856
+ },
+ {
+ "epoch": 3.139194139194139,
+ "grad_norm": 21.39589500427246,
+ "learning_rate": 4.5763125763125766e-05,
+ "loss": 0.0805,
+ "step": 857
+ },
+ {
+ "epoch": 3.142857142857143,
+ "grad_norm": 24.03894805908203,
+ "learning_rate": 4.5738705738705744e-05,
+ "loss": 0.1884,
+ "step": 858
+ },
+ {
+ "epoch": 3.1465201465201464,
+ "grad_norm": 66.53777313232422,
+ "learning_rate": 4.5714285714285716e-05,
+ "loss": 0.5235,
+ "step": 859
+ },
+ {
+ "epoch": 3.15018315018315,
+ "grad_norm": 33.663490295410156,
+ "learning_rate": 4.568986568986569e-05,
+ "loss": 0.7579,
+ "step": 860
+ },
+ {
+ "epoch": 3.1538461538461537,
+ "grad_norm": 30.173309326171875,
+ "learning_rate": 4.5665445665445666e-05,
+ "loss": 0.2263,
+ "step": 861
+ },
+ {
+ "epoch": 3.1575091575091574,
+ "grad_norm": 37.52082824707031,
+ "learning_rate": 4.564102564102564e-05,
+ "loss": 0.5695,
+ "step": 862
+ },
+ {
+ "epoch": 3.161172161172161,
+ "grad_norm": 38.86849594116211,
+ "learning_rate": 4.5616605616605616e-05,
+ "loss": 0.6981,
+ "step": 863
+ },
+ {
+ "epoch": 3.1648351648351647,
+ "grad_norm": 42.702247619628906,
+ "learning_rate": 4.5592185592185595e-05,
+ "loss": 0.9864,
+ "step": 864
+ },
+ {
+ "epoch": 3.1684981684981683,
+ "grad_norm": 16.60870361328125,
+ "learning_rate": 4.5567765567765566e-05,
+ "loss": 0.1595,
+ "step": 865
+ },
+ {
+ "epoch": 3.172161172161172,
+ "grad_norm": 26.309768676757812,
+ "learning_rate": 4.5543345543345545e-05,
+ "loss": 0.4028,
+ "step": 866
+ },
+ {
+ "epoch": 3.1758241758241756,
+ "grad_norm": 45.7955322265625,
+ "learning_rate": 4.551892551892552e-05,
+ "loss": 1.1258,
+ "step": 867
+ },
+ {
+ "epoch": 3.1794871794871793,
+ "grad_norm": 25.780302047729492,
+ "learning_rate": 4.5494505494505495e-05,
+ "loss": 0.4018,
+ "step": 868
+ },
+ {
+ "epoch": 3.183150183150183,
+ "grad_norm": 41.65156555175781,
+ "learning_rate": 4.5470085470085474e-05,
+ "loss": 0.4543,
+ "step": 869
+ },
+ {
+ "epoch": 3.186813186813187,
+ "grad_norm": 56.92537307739258,
+ "learning_rate": 4.544566544566545e-05,
+ "loss": 0.334,
+ "step": 870
+ },
+ {
+ "epoch": 3.1904761904761907,
+ "grad_norm": 19.44786262512207,
+ "learning_rate": 4.5421245421245424e-05,
+ "loss": 0.2855,
+ "step": 871
+ },
+ {
+ "epoch": 3.1941391941391943,
+ "grad_norm": 19.75824546813965,
+ "learning_rate": 4.53968253968254e-05,
+ "loss": 0.2589,
+ "step": 872
+ },
+ {
+ "epoch": 3.197802197802198,
+ "grad_norm": 30.935569763183594,
+ "learning_rate": 4.5372405372405374e-05,
+ "loss": 0.5083,
+ "step": 873
+ },
+ {
+ "epoch": 3.2014652014652016,
+ "grad_norm": 32.59378433227539,
+ "learning_rate": 4.5347985347985345e-05,
+ "loss": 0.6806,
+ "step": 874
+ },
+ {
+ "epoch": 3.2051282051282053,
+ "grad_norm": 32.7809944152832,
+ "learning_rate": 4.5323565323565324e-05,
+ "loss": 0.7094,
+ "step": 875
+ },
+ {
+ "epoch": 3.208791208791209,
+ "grad_norm": 22.95226287841797,
+ "learning_rate": 4.5299145299145296e-05,
+ "loss": 0.3871,
+ "step": 876
+ },
+ {
+ "epoch": 3.2124542124542126,
+ "grad_norm": 13.90613079071045,
+ "learning_rate": 4.5274725274725274e-05,
+ "loss": 0.2049,
+ "step": 877
+ },
+ {
+ "epoch": 3.2161172161172162,
+ "grad_norm": 36.79647445678711,
+ "learning_rate": 4.525030525030525e-05,
+ "loss": 0.959,
+ "step": 878
+ },
+ {
+ "epoch": 3.21978021978022,
+ "grad_norm": 16.770553588867188,
+ "learning_rate": 4.5225885225885224e-05,
+ "loss": 0.3061,
+ "step": 879
+ },
+ {
+ "epoch": 3.2234432234432235,
+ "grad_norm": 22.241527557373047,
+ "learning_rate": 4.52014652014652e-05,
+ "loss": 0.1961,
+ "step": 880
+ },
+ {
+ "epoch": 3.227106227106227,
+ "grad_norm": 51.097957611083984,
+ "learning_rate": 4.517704517704518e-05,
+ "loss": 0.5272,
+ "step": 881
+ },
+ {
+ "epoch": 3.230769230769231,
+ "grad_norm": 43.70039749145508,
+ "learning_rate": 4.515262515262515e-05,
+ "loss": 0.6764,
+ "step": 882
+ },
+ {
+ "epoch": 3.2344322344322345,
+ "grad_norm": 30.666664123535156,
+ "learning_rate": 4.512820512820513e-05,
+ "loss": 0.6524,
+ "step": 883
+ },
+ {
+ "epoch": 3.238095238095238,
+ "grad_norm": 16.787954330444336,
+ "learning_rate": 4.510378510378511e-05,
+ "loss": 0.178,
+ "step": 884
+ },
+ {
+ "epoch": 3.241758241758242,
+ "grad_norm": 32.14992904663086,
+ "learning_rate": 4.507936507936508e-05,
+ "loss": 0.6206,
+ "step": 885
+ },
+ {
+ "epoch": 3.2454212454212454,
+ "grad_norm": 24.926103591918945,
+ "learning_rate": 4.505494505494505e-05,
+ "loss": 0.4696,
+ "step": 886
+ },
+ {
+ "epoch": 3.249084249084249,
+ "grad_norm": 31.044967651367188,
+ "learning_rate": 4.503052503052503e-05,
+ "loss": 0.3021,
+ "step": 887
+ },
+ {
+ "epoch": 3.2527472527472527,
+ "grad_norm": 10.355696678161621,
+ "learning_rate": 4.5006105006105e-05,
+ "loss": 0.0784,
+ "step": 888
+ },
+ {
+ "epoch": 3.2564102564102564,
+ "grad_norm": 28.19644546508789,
+ "learning_rate": 4.498168498168498e-05,
+ "loss": 0.234,
+ "step": 889
+ },
+ {
+ "epoch": 3.26007326007326,
+ "grad_norm": 21.245389938354492,
+ "learning_rate": 4.495726495726496e-05,
+ "loss": 0.2895,
+ "step": 890
+ },
+ {
+ "epoch": 3.2637362637362637,
+ "grad_norm": 27.337587356567383,
+ "learning_rate": 4.493284493284493e-05,
+ "loss": 0.4614,
+ "step": 891
+ },
+ {
+ "epoch": 3.2673992673992673,
+ "grad_norm": 37.06135177612305,
+ "learning_rate": 4.490842490842491e-05,
+ "loss": 0.2717,
+ "step": 892
+ },
+ {
+ "epoch": 3.271062271062271,
+ "grad_norm": 26.85171890258789,
+ "learning_rate": 4.488400488400489e-05,
+ "loss": 0.4965,
+ "step": 893
+ },
+ {
+ "epoch": 3.2747252747252746,
+ "grad_norm": 41.79130935668945,
+ "learning_rate": 4.485958485958486e-05,
+ "loss": 0.4209,
+ "step": 894
+ },
+ {
+ "epoch": 3.2783882783882783,
+ "grad_norm": 32.75770950317383,
+ "learning_rate": 4.483516483516484e-05,
+ "loss": 0.5126,
+ "step": 895
+ },
+ {
+ "epoch": 3.282051282051282,
+ "grad_norm": 67.75275421142578,
+ "learning_rate": 4.481074481074482e-05,
+ "loss": 0.8257,
+ "step": 896
+ },
+ {
+ "epoch": 3.2857142857142856,
+ "grad_norm": 36.773319244384766,
+ "learning_rate": 4.478632478632479e-05,
+ "loss": 1.6113,
+ "step": 897
+ },
+ {
+ "epoch": 3.2893772893772892,
+ "grad_norm": 60.94101333618164,
+ "learning_rate": 4.476190476190476e-05,
+ "loss": 0.7996,
+ "step": 898
+ },
+ {
+ "epoch": 3.293040293040293,
+ "grad_norm": 45.40288162231445,
+ "learning_rate": 4.473748473748474e-05,
+ "loss": 0.7139,
+ "step": 899
+ },
+ {
+ "epoch": 3.2967032967032965,
+ "grad_norm": 27.4019718170166,
+ "learning_rate": 4.471306471306471e-05,
+ "loss": 0.4695,
+ "step": 900
+ },
+ {
+ "epoch": 3.3003663003663,
+ "grad_norm": 20.126493453979492,
+ "learning_rate": 4.468864468864469e-05,
+ "loss": 0.2181,
+ "step": 901
+ },
+ {
+ "epoch": 3.304029304029304,
+ "grad_norm": 37.28034591674805,
+ "learning_rate": 4.466422466422466e-05,
+ "loss": 0.8902,
+ "step": 902
+ },
+ {
+ "epoch": 3.3076923076923075,
+ "grad_norm": 15.40217113494873,
+ "learning_rate": 4.463980463980464e-05,
+ "loss": 0.2428,
+ "step": 903
+ },
+ {
+ "epoch": 3.311355311355311,
+ "grad_norm": 21.924699783325195,
+ "learning_rate": 4.461538461538462e-05,
+ "loss": 0.3271,
+ "step": 904
+ },
+ {
+ "epoch": 3.315018315018315,
+ "grad_norm": 29.787410736083984,
+ "learning_rate": 4.459096459096459e-05,
+ "loss": 0.5914,
+ "step": 905
+ },
+ {
+ "epoch": 3.3186813186813184,
+ "grad_norm": 16.91995620727539,
+ "learning_rate": 4.456654456654457e-05,
+ "loss": 0.3442,
+ "step": 906
+ },
+ {
+ "epoch": 3.3223443223443225,
+ "grad_norm": 13.232250213623047,
+ "learning_rate": 4.4542124542124546e-05,
+ "loss": 0.1977,
+ "step": 907
+ },
+ {
+ "epoch": 3.326007326007326,
+ "grad_norm": 25.45724868774414,
+ "learning_rate": 4.451770451770452e-05,
+ "loss": 0.8241,
+ "step": 908
+ },
+ {
+ "epoch": 3.32967032967033,
+ "grad_norm": 20.996292114257812,
+ "learning_rate": 4.4493284493284496e-05,
+ "loss": 0.3154,
+ "step": 909
+ },
+ {
+ "epoch": 3.3333333333333335,
+ "grad_norm": 28.150684356689453,
+ "learning_rate": 4.4468864468864475e-05,
+ "loss": 0.4077,
+ "step": 910
+ },
+ {
+ "epoch": 3.336996336996337,
+ "grad_norm": 57.184322357177734,
+ "learning_rate": 4.444444444444444e-05,
+ "loss": 0.5701,
+ "step": 911
+ },
+ {
+ "epoch": 3.340659340659341,
+ "grad_norm": 26.231369018554688,
+ "learning_rate": 4.442002442002442e-05,
+ "loss": 0.4427,
+ "step": 912
+ },
+ {
+ "epoch": 3.3443223443223444,
+ "grad_norm": 32.52253723144531,
+ "learning_rate": 4.43956043956044e-05,
+ "loss": 1.014,
+ "step": 913
+ },
+ {
+ "epoch": 3.347985347985348,
+ "grad_norm": 19.39035987854004,
+ "learning_rate": 4.437118437118437e-05,
+ "loss": 0.1567,
+ "step": 914
+ },
+ {
+ "epoch": 3.3516483516483517,
+ "grad_norm": 24.542327880859375,
+ "learning_rate": 4.434676434676435e-05,
+ "loss": 0.5478,
+ "step": 915
+ },
+ {
+ "epoch": 3.3553113553113554,
+ "grad_norm": 46.6158447265625,
+ "learning_rate": 4.4322344322344325e-05,
+ "loss": 0.5636,
+ "step": 916
+ },
+ {
+ "epoch": 3.358974358974359,
+ "grad_norm": 36.008846282958984,
+ "learning_rate": 4.42979242979243e-05,
+ "loss": 0.4401,
+ "step": 917
+ },
+ {
+ "epoch": 3.3626373626373627,
+ "grad_norm": 6.922544956207275,
+ "learning_rate": 4.4273504273504275e-05,
+ "loss": 0.0885,
+ "step": 918
+ },
+ {
+ "epoch": 3.3663003663003663,
+ "grad_norm": 25.707748413085938,
+ "learning_rate": 4.4249084249084254e-05,
+ "loss": 0.3235,
+ "step": 919
+ },
+ {
+ "epoch": 3.36996336996337,
+ "grad_norm": 47.98778533935547,
+ "learning_rate": 4.4224664224664226e-05,
+ "loss": 1.3738,
+ "step": 920
+ },
+ {
+ "epoch": 3.3736263736263736,
+ "grad_norm": 26.64824104309082,
+ "learning_rate": 4.4200244200244204e-05,
+ "loss": 0.8405,
+ "step": 921
+ },
+ {
+ "epoch": 3.3772893772893773,
+ "grad_norm": 30.66206169128418,
+ "learning_rate": 4.417582417582418e-05,
+ "loss": 1.3021,
+ "step": 922
+ },
+ {
+ "epoch": 3.380952380952381,
+ "grad_norm": 33.15909194946289,
+ "learning_rate": 4.4151404151404154e-05,
+ "loss": 0.3064,
+ "step": 923
+ },
+ {
+ "epoch": 3.3846153846153846,
+ "grad_norm": 78.46485137939453,
+ "learning_rate": 4.4126984126984126e-05,
+ "loss": 0.6526,
+ "step": 924
+ },
+ {
+ "epoch": 3.3882783882783882,
+ "grad_norm": 45.584747314453125,
+ "learning_rate": 4.4102564102564104e-05,
+ "loss": 0.9546,
+ "step": 925
+ },
+ {
+ "epoch": 3.391941391941392,
+ "grad_norm": 23.244487762451172,
+ "learning_rate": 4.4078144078144076e-05,
+ "loss": 0.3334,
+ "step": 926
+ },
+ {
+ "epoch": 3.3956043956043955,
+ "grad_norm": 9.296119689941406,
+ "learning_rate": 4.4053724053724054e-05,
+ "loss": 0.1045,
+ "step": 927
+ },
+ {
+ "epoch": 3.399267399267399,
+ "grad_norm": 15.207316398620605,
+ "learning_rate": 4.4029304029304026e-05,
+ "loss": 0.087,
+ "step": 928
+ },
+ {
+ "epoch": 3.402930402930403,
+ "grad_norm": 20.554912567138672,
+ "learning_rate": 4.4004884004884005e-05,
+ "loss": 0.2658,
+ "step": 929
+ },
+ {
+ "epoch": 3.4065934065934065,
+ "grad_norm": 25.304515838623047,
+ "learning_rate": 4.398046398046398e-05,
+ "loss": 0.2862,
+ "step": 930
+ },
+ {
+ "epoch": 3.41025641025641,
+ "grad_norm": 44.320377349853516,
+ "learning_rate": 4.3956043956043955e-05,
+ "loss": 1.1972,
+ "step": 931
+ },
+ {
+ "epoch": 3.413919413919414,
+ "grad_norm": 21.3024845123291,
+ "learning_rate": 4.393162393162393e-05,
+ "loss": 0.2193,
+ "step": 932
+ },
+ {
+ "epoch": 3.4175824175824174,
+ "grad_norm": 12.274759292602539,
+ "learning_rate": 4.390720390720391e-05,
+ "loss": 0.1033,
+ "step": 933
+ },
+ {
+ "epoch": 3.421245421245421,
+ "grad_norm": 29.188446044921875,
+ "learning_rate": 4.388278388278388e-05,
+ "loss": 0.8143,
+ "step": 934
+ },
+ {
+ "epoch": 3.4249084249084247,
+ "grad_norm": 11.880194664001465,
+ "learning_rate": 4.385836385836386e-05,
+ "loss": 0.0932,
+ "step": 935
+ },
+ {
+ "epoch": 3.4285714285714284,
+ "grad_norm": 28.859825134277344,
+ "learning_rate": 4.383394383394384e-05,
+ "loss": 0.6026,
+ "step": 936
+ },
+ {
+ "epoch": 3.4322344322344325,
+ "grad_norm": 25.131824493408203,
+ "learning_rate": 4.3809523809523805e-05,
+ "loss": 0.4023,
+ "step": 937
+ },
+ {
+ "epoch": 3.435897435897436,
+ "grad_norm": 35.04637145996094,
+ "learning_rate": 4.3785103785103783e-05,
+ "loss": 0.7765,
+ "step": 938
+ },
+ {
+ "epoch": 3.4395604395604398,
+ "grad_norm": 15.831666946411133,
+ "learning_rate": 4.376068376068376e-05,
+ "loss": 0.1779,
+ "step": 939
+ },
+ {
+ "epoch": 3.4432234432234434,
+ "grad_norm": 26.455148696899414,
+ "learning_rate": 4.3736263736263734e-05,
+ "loss": 0.3165,
+ "step": 940
+ },
+ {
+ "epoch": 3.446886446886447,
+ "grad_norm": 23.840030670166016,
+ "learning_rate": 4.371184371184371e-05,
+ "loss": 0.5363,
+ "step": 941
+ },
+ {
+ "epoch": 3.4505494505494507,
+ "grad_norm": 30.517026901245117,
+ "learning_rate": 4.368742368742369e-05,
+ "loss": 0.422,
+ "step": 942
+ },
+ {
+ "epoch": 3.4542124542124544,
+ "grad_norm": 51.574703216552734,
+ "learning_rate": 4.366300366300366e-05,
+ "loss": 1.5333,
+ "step": 943
+ },
+ {
+ "epoch": 3.457875457875458,
+ "grad_norm": 57.92119216918945,
+ "learning_rate": 4.363858363858364e-05,
+ "loss": 0.5732,
+ "step": 944
+ },
+ {
+ "epoch": 3.4615384615384617,
+ "grad_norm": 34.3664436340332,
+ "learning_rate": 4.361416361416362e-05,
+ "loss": 0.5054,
+ "step": 945
+ },
+ {
+ "epoch": 3.4652014652014653,
+ "grad_norm": 14.034111976623535,
+ "learning_rate": 4.358974358974359e-05,
+ "loss": 0.0969,
+ "step": 946
+ },
+ {
+ "epoch": 3.468864468864469,
+ "grad_norm": 15.058267593383789,
+ "learning_rate": 4.356532356532357e-05,
+ "loss": 0.1877,
+ "step": 947
+ },
+ {
+ "epoch": 3.4725274725274726,
+ "grad_norm": 18.598024368286133,
+ "learning_rate": 4.354090354090355e-05,
+ "loss": 0.2378,
+ "step": 948
+ },
+ {
+ "epoch": 3.4761904761904763,
+ "grad_norm": 17.926319122314453,
+ "learning_rate": 4.351648351648352e-05,
+ "loss": 0.2935,
+ "step": 949
+ },
+ {
+ "epoch": 3.47985347985348,
+ "grad_norm": 8.25291633605957,
+ "learning_rate": 4.349206349206349e-05,
+ "loss": 0.0891,
+ "step": 950
+ },
+ {
+ "epoch": 3.4835164835164836,
+ "grad_norm": 26.152061462402344,
+ "learning_rate": 4.346764346764347e-05,
+ "loss": 0.2798,
+ "step": 951
+ },
+ {
+ "epoch": 3.4871794871794872,
+ "grad_norm": 22.669677734375,
+ "learning_rate": 4.344322344322344e-05,
+ "loss": 0.506,
+ "step": 952
+ },
+ {
+ "epoch": 3.490842490842491,
+ "grad_norm": 18.439355850219727,
+ "learning_rate": 4.341880341880342e-05,
+ "loss": 0.3034,
+ "step": 953
+ },
+ {
+ "epoch": 3.4945054945054945,
+ "grad_norm": 30.48084259033203,
+ "learning_rate": 4.339438339438339e-05,
+ "loss": 0.4366,
+ "step": 954
+ },
+ {
+ "epoch": 3.498168498168498,
+ "grad_norm": 51.792381286621094,
+ "learning_rate": 4.336996336996337e-05,
+ "loss": 0.5214,
+ "step": 955
+ },
+ {
+ "epoch": 3.501831501831502,
+ "grad_norm": 44.70718002319336,
+ "learning_rate": 4.334554334554335e-05,
+ "loss": 0.7823,
+ "step": 956
+ },
+ {
+ "epoch": 3.5054945054945055,
+ "grad_norm": 42.00168991088867,
+ "learning_rate": 4.332112332112332e-05,
+ "loss": 0.9207,
+ "step": 957
+ },
+ {
+ "epoch": 3.509157509157509,
+ "grad_norm": 28.97800636291504,
+ "learning_rate": 4.32967032967033e-05,
+ "loss": 0.279,
+ "step": 958
+ },
+ {
+ "epoch": 3.5128205128205128,
+ "grad_norm": 21.902843475341797,
+ "learning_rate": 4.327228327228328e-05,
+ "loss": 0.1969,
+ "step": 959
+ },
+ {
+ "epoch": 3.5164835164835164,
+ "grad_norm": 14.560053825378418,
+ "learning_rate": 4.324786324786325e-05,
+ "loss": 0.0976,
+ "step": 960
+ },
+ {
+ "epoch": 3.52014652014652,
+ "grad_norm": 4.2637104988098145,
+ "learning_rate": 4.322344322344323e-05,
+ "loss": 0.0277,
+ "step": 961
+ },
+ {
+ "epoch": 3.5238095238095237,
+ "grad_norm": 52.4840202331543,
+ "learning_rate": 4.3199023199023205e-05,
+ "loss": 0.2967,
+ "step": 962
+ },
+ {
+ "epoch": 3.5274725274725274,
+ "grad_norm": 48.95661163330078,
+ "learning_rate": 4.317460317460317e-05,
+ "loss": 0.2904,
+ "step": 963
+ },
+ {
+ "epoch": 3.531135531135531,
+ "grad_norm": 79.46379089355469,
+ "learning_rate": 4.315018315018315e-05,
+ "loss": 0.1644,
+ "step": 964
+ },
+ {
+ "epoch": 3.5347985347985347,
+ "grad_norm": 29.678428649902344,
+ "learning_rate": 4.312576312576313e-05,
+ "loss": 0.3498,
+ "step": 965
+ },
+ {
+ "epoch": 3.5384615384615383,
+ "grad_norm": 32.71342086791992,
+ "learning_rate": 4.31013431013431e-05,
+ "loss": 0.3509,
+ "step": 966
+ },
+ {
+ "epoch": 3.542124542124542,
+ "grad_norm": 6.679911136627197,
+ "learning_rate": 4.307692307692308e-05,
+ "loss": 0.0658,
+ "step": 967
+ },
+ {
+ "epoch": 3.5457875457875456,
+ "grad_norm": 19.8692626953125,
+ "learning_rate": 4.3052503052503056e-05,
+ "loss": 0.1626,
+ "step": 968
+ },
+ {
+ "epoch": 3.5494505494505493,
+ "grad_norm": 17.69087791442871,
+ "learning_rate": 4.302808302808303e-05,
+ "loss": 0.2592,
+ "step": 969
+ },
+ {
+ "epoch": 3.553113553113553,
+ "grad_norm": 11.734158515930176,
+ "learning_rate": 4.3003663003663006e-05,
+ "loss": 0.1007,
+ "step": 970
+ },
+ {
+ "epoch": 3.5567765567765566,
+ "grad_norm": 34.51172637939453,
+ "learning_rate": 4.2979242979242984e-05,
+ "loss": 0.2823,
+ "step": 971
+ },
+ {
+ "epoch": 3.5604395604395602,
+ "grad_norm": 15.009514808654785,
+ "learning_rate": 4.2954822954822956e-05,
+ "loss": 0.1203,
+ "step": 972
+ },
+ {
+ "epoch": 3.564102564102564,
+ "grad_norm": 67.92166137695312,
+ "learning_rate": 4.2930402930402934e-05,
+ "loss": 0.396,
+ "step": 973
+ },
+ {
+ "epoch": 3.5677655677655675,
+ "grad_norm": 66.84014129638672,
+ "learning_rate": 4.290598290598291e-05,
+ "loss": 0.6545,
+ "step": 974
+ },
+ {
+ "epoch": 3.571428571428571,
+ "grad_norm": 25.811107635498047,
+ "learning_rate": 4.2881562881562885e-05,
+ "loss": 0.1747,
+ "step": 975
+ },
+ {
+ "epoch": 3.575091575091575,
+ "grad_norm": 100.88753509521484,
+ "learning_rate": 4.2857142857142856e-05,
+ "loss": 0.3991,
+ "step": 976
+ },
+ {
+ "epoch": 3.578754578754579,
+ "grad_norm": 34.51667785644531,
+ "learning_rate": 4.2832722832722835e-05,
+ "loss": 0.1365,
+ "step": 977
+ },
+ {
+ "epoch": 3.5824175824175826,
+ "grad_norm": 26.852561950683594,
+ "learning_rate": 4.2808302808302806e-05,
+ "loss": 0.3627,
+ "step": 978
+ },
+ {
+ "epoch": 3.586080586080586,
+ "grad_norm": 24.968570709228516,
+ "learning_rate": 4.2783882783882785e-05,
+ "loss": 0.2106,
+ "step": 979
+ },
+ {
+ "epoch": 3.58974358974359,
+ "grad_norm": 27.33326530456543,
+ "learning_rate": 4.2759462759462757e-05,
+ "loss": 0.1758,
+ "step": 980
+ },
+ {
+ "epoch": 3.5934065934065935,
+ "grad_norm": 52.63814926147461,
+ "learning_rate": 4.2735042735042735e-05,
+ "loss": 0.601,
+ "step": 981
+ },
+ {
+ "epoch": 3.597069597069597,
+ "grad_norm": 37.77897262573242,
+ "learning_rate": 4.2710622710622713e-05,
+ "loss": 0.5299,
+ "step": 982
+ },
+ {
+ "epoch": 3.600732600732601,
+ "grad_norm": 27.691659927368164,
+ "learning_rate": 4.2686202686202685e-05,
+ "loss": 0.1784,
+ "step": 983
+ },
+ {
+ "epoch": 3.6043956043956045,
+ "grad_norm": 106.33782958984375,
+ "learning_rate": 4.2661782661782664e-05,
+ "loss": 0.8859,
+ "step": 984
+ },
+ {
+ "epoch": 3.608058608058608,
+ "grad_norm": 22.95706558227539,
+ "learning_rate": 4.263736263736264e-05,
+ "loss": 0.1611,
+ "step": 985
+ },
+ {
+ "epoch": 3.6117216117216118,
+ "grad_norm": 22.72148895263672,
+ "learning_rate": 4.2612942612942614e-05,
+ "loss": 0.1561,
+ "step": 986
+ },
+ {
+ "epoch": 3.6153846153846154,
+ "grad_norm": 93.37244415283203,
+ "learning_rate": 4.258852258852259e-05,
+ "loss": 0.4287,
+ "step": 987
+ },
+ {
+ "epoch": 3.619047619047619,
+ "grad_norm": 51.54584884643555,
+ "learning_rate": 4.2564102564102564e-05,
+ "loss": 0.6292,
+ "step": 988
+ },
+ {
+ "epoch": 3.6227106227106227,
+ "grad_norm": 61.58243942260742,
+ "learning_rate": 4.2539682539682536e-05,
+ "loss": 1.3205,
+ "step": 989
+ },
+ {
+ "epoch": 3.6263736263736264,
+ "grad_norm": 70.59432220458984,
+ "learning_rate": 4.2515262515262514e-05,
+ "loss": 0.7451,
+ "step": 990
+ },
+ {
+ "epoch": 3.63003663003663,
+ "grad_norm": 76.28730773925781,
+ "learning_rate": 4.249084249084249e-05,
+ "loss": 2.0314,
+ "step": 991
+ },
+ {
+ "epoch": 3.6336996336996337,
+ "grad_norm": 73.5402603149414,
+ "learning_rate": 4.2466422466422464e-05,
+ "loss": 1.6628,
+ "step": 992
+ },
+ {
+ "epoch": 3.6373626373626373,
+ "grad_norm": 75.8978042602539,
+ "learning_rate": 4.244200244200244e-05,
+ "loss": 1.652,
+ "step": 993
+ },
+ {
+ "epoch": 3.641025641025641,
+ "grad_norm": 37.04104232788086,
+ "learning_rate": 4.241758241758242e-05,
+ "loss": 1.5356,
+ "step": 994
+ },
+ {
+ "epoch": 3.6446886446886446,
+ "grad_norm": 34.31178283691406,
+ "learning_rate": 4.239316239316239e-05,
+ "loss": 1.1783,
+ "step": 995
+ },
+ {
+ "epoch": 3.6483516483516483,
+ "grad_norm": 22.934877395629883,
+ "learning_rate": 4.236874236874237e-05,
+ "loss": 1.2995,
+ "step": 996
+ },
+ {
+ "epoch": 3.652014652014652,
+ "grad_norm": 30.25251579284668,
+ "learning_rate": 4.234432234432235e-05,
+ "loss": 1.1304,
+ "step": 997
+ },
+ {
+ "epoch": 3.6556776556776556,
+ "grad_norm": 35.082027435302734,
+ "learning_rate": 4.231990231990232e-05,
+ "loss": 1.0827,
+ "step": 998
+ },
+ {
+ "epoch": 3.659340659340659,
+ "grad_norm": 24.526325225830078,
+ "learning_rate": 4.22954822954823e-05,
+ "loss": 0.8716,
+ "step": 999
+ },
+ {
+ "epoch": 3.663003663003663,
+ "grad_norm": 29.882883071899414,
+ "learning_rate": 4.227106227106228e-05,
+ "loss": 0.5432,
+ "step": 1000
+ },
+ {
+ "epoch": 3.6666666666666665,
+ "grad_norm": 34.53218078613281,
+ "learning_rate": 4.224664224664224e-05,
+ "loss": 1.2094,
+ "step": 1001
+ },
+ {
+ "epoch": 3.67032967032967,
+ "grad_norm": 22.50905990600586,
+ "learning_rate": 4.222222222222222e-05,
+ "loss": 0.4608,
+ "step": 1002
+ },
+ {
+ "epoch": 3.6739926739926743,
+ "grad_norm": 27.33183479309082,
+ "learning_rate": 4.21978021978022e-05,
+ "loss": 0.7181,
+ "step": 1003
+ },
+ {
+ "epoch": 3.677655677655678,
+ "grad_norm": 50.09929275512695,
+ "learning_rate": 4.217338217338217e-05,
+ "loss": 1.1163,
+ "step": 1004
+ },
+ {
+ "epoch": 3.6813186813186816,
+ "grad_norm": 32.48406982421875,
+ "learning_rate": 4.214896214896215e-05,
+ "loss": 0.7101,
+ "step": 1005
+ },
+ {
+ "epoch": 3.684981684981685,
+ "grad_norm": 5.821015357971191,
+ "learning_rate": 4.212454212454212e-05,
+ "loss": 0.0695,
+ "step": 1006
+ },
+ {
+ "epoch": 3.688644688644689,
+ "grad_norm": 32.04796600341797,
+ "learning_rate": 4.21001221001221e-05,
+ "loss": 0.609,
+ "step": 1007
+ },
+ {
+ "epoch": 3.6923076923076925,
+ "grad_norm": 37.282474517822266,
+ "learning_rate": 4.207570207570208e-05,
+ "loss": 0.873,
+ "step": 1008
+ },
+ {
+ "epoch": 3.695970695970696,
+ "grad_norm": 35.74583435058594,
+ "learning_rate": 4.205128205128205e-05,
+ "loss": 0.7387,
+ "step": 1009
+ },
+ {
+ "epoch": 3.6996336996337,
+ "grad_norm": 74.91361236572266,
+ "learning_rate": 4.202686202686203e-05,
+ "loss": 1.6302,
+ "step": 1010
+ },
+ {
+ "epoch": 3.7032967032967035,
+ "grad_norm": 25.163251876831055,
+ "learning_rate": 4.200244200244201e-05,
+ "loss": 0.3866,
+ "step": 1011
+ },
+ {
+ "epoch": 3.706959706959707,
+ "grad_norm": 34.36520004272461,
+ "learning_rate": 4.197802197802198e-05,
+ "loss": 0.8413,
+ "step": 1012
+ },
+ {
+ "epoch": 3.7106227106227108,
+ "grad_norm": 41.62683868408203,
+ "learning_rate": 4.195360195360196e-05,
+ "loss": 0.4155,
+ "step": 1013
+ },
+ {
+ "epoch": 3.7142857142857144,
+ "grad_norm": 34.24674987792969,
+ "learning_rate": 4.192918192918193e-05,
+ "loss": 0.8327,
+ "step": 1014
+ },
+ {
+ "epoch": 3.717948717948718,
+ "grad_norm": 27.771732330322266,
+ "learning_rate": 4.19047619047619e-05,
+ "loss": 0.4509,
+ "step": 1015
+ },
+ {
+ "epoch": 3.7216117216117217,
+ "grad_norm": 26.55430793762207,
+ "learning_rate": 4.188034188034188e-05,
+ "loss": 0.4851,
+ "step": 1016
+ },
+ {
+ "epoch": 3.7252747252747254,
+ "grad_norm": 34.8384895324707,
+ "learning_rate": 4.185592185592186e-05,
+ "loss": 0.4105,
+ "step": 1017
+ },
+ {
+ "epoch": 3.728937728937729,
+ "grad_norm": 29.447805404663086,
+ "learning_rate": 4.183150183150183e-05,
+ "loss": 0.4129,
+ "step": 1018
+ },
+ {
+ "epoch": 3.7326007326007327,
+ "grad_norm": 66.70004272460938,
+ "learning_rate": 4.180708180708181e-05,
+ "loss": 0.4762,
+ "step": 1019
+ },
+ {
+ "epoch": 3.7362637362637363,
+ "grad_norm": 10.356173515319824,
+ "learning_rate": 4.1782661782661786e-05,
+ "loss": 0.0718,
+ "step": 1020
+ },
+ {
+ "epoch": 3.73992673992674,
+ "grad_norm": 35.98944854736328,
+ "learning_rate": 4.175824175824176e-05,
+ "loss": 0.2672,
+ "step": 1021
+ },
+ {
+ "epoch": 3.7435897435897436,
+ "grad_norm": 6.806238651275635,
+ "learning_rate": 4.1733821733821736e-05,
+ "loss": 0.0455,
+ "step": 1022
+ },
+ {
+ "epoch": 3.7472527472527473,
+ "grad_norm": 19.689456939697266,
+ "learning_rate": 4.1709401709401715e-05,
+ "loss": 0.2323,
+ "step": 1023
+ },
+ {
+ "epoch": 3.750915750915751,
+ "grad_norm": 23.971303939819336,
+ "learning_rate": 4.1684981684981687e-05,
+ "loss": 0.1393,
+ "step": 1024
+ },
+ {
+ "epoch": 3.7545787545787546,
+ "grad_norm": 43.26774215698242,
+ "learning_rate": 4.1660561660561665e-05,
+ "loss": 0.7084,
+ "step": 1025
+ },
+ {
+ "epoch": 3.758241758241758,
+ "grad_norm": 36.04475402832031,
+ "learning_rate": 4.1636141636141643e-05,
+ "loss": 0.3782,
+ "step": 1026
+ },
+ {
+ "epoch": 3.761904761904762,
+ "grad_norm": 48.78522491455078,
+ "learning_rate": 4.161172161172161e-05,
+ "loss": 0.7698,
+ "step": 1027
+ },
+ {
+ "epoch": 3.7655677655677655,
+ "grad_norm": 11.876708984375,
+ "learning_rate": 4.158730158730159e-05,
+ "loss": 0.0943,
+ "step": 1028
+ },
+ {
+ "epoch": 3.769230769230769,
+ "grad_norm": 83.1320571899414,
+ "learning_rate": 4.1562881562881565e-05,
+ "loss": 0.8116,
+ "step": 1029
+ },
+ {
+ "epoch": 3.772893772893773,
+ "grad_norm": 22.412723541259766,
+ "learning_rate": 4.153846153846154e-05,
+ "loss": 0.2086,
+ "step": 1030
+ },
+ {
+ "epoch": 3.7765567765567765,
+ "grad_norm": 11.011713981628418,
+ "learning_rate": 4.1514041514041515e-05,
+ "loss": 0.1001,
+ "step": 1031
+ },
+ {
+ "epoch": 3.78021978021978,
+ "grad_norm": 21.958040237426758,
+ "learning_rate": 4.148962148962149e-05,
+ "loss": 0.8457,
+ "step": 1032
+ },
+ {
+ "epoch": 3.7838827838827838,
+ "grad_norm": 57.3586540222168,
+ "learning_rate": 4.1465201465201465e-05,
+ "loss": 0.1605,
+ "step": 1033
+ },
+ {
+ "epoch": 3.7875457875457874,
+ "grad_norm": 24.261554718017578,
+ "learning_rate": 4.1440781440781444e-05,
+ "loss": 0.1854,
+ "step": 1034
+ },
+ {
+ "epoch": 3.791208791208791,
+ "grad_norm": 31.09326171875,
+ "learning_rate": 4.1416361416361416e-05,
+ "loss": 0.2874,
+ "step": 1035
+ },
+ {
+ "epoch": 3.7948717948717947,
+ "grad_norm": 8.3728666305542,
+ "learning_rate": 4.1391941391941394e-05,
+ "loss": 0.0496,
+ "step": 1036
+ },
+ {
+ "epoch": 3.7985347985347984,
+ "grad_norm": 47.5240592956543,
+ "learning_rate": 4.136752136752137e-05,
+ "loss": 0.2025,
+ "step": 1037
+ },
+ {
+ "epoch": 3.802197802197802,
+ "grad_norm": 51.25822448730469,
+ "learning_rate": 4.1343101343101344e-05,
+ "loss": 0.714,
+ "step": 1038
+ },
+ {
+ "epoch": 3.8058608058608057,
+ "grad_norm": 91.58492279052734,
+ "learning_rate": 4.131868131868132e-05,
+ "loss": 2.2889,
+ "step": 1039
+ },
+ {
+ "epoch": 3.8095238095238093,
+ "grad_norm": 4.206390857696533,
+ "learning_rate": 4.1294261294261294e-05,
+ "loss": 0.024,
+ "step": 1040
+ },
+ {
+ "epoch": 3.813186813186813,
+ "grad_norm": 58.49787139892578,
+ "learning_rate": 4.1269841269841266e-05,
+ "loss": 0.7162,
+ "step": 1041
+ },
+ {
+ "epoch": 3.8168498168498166,
+ "grad_norm": 33.38972091674805,
+ "learning_rate": 4.1245421245421244e-05,
+ "loss": 0.3064,
+ "step": 1042
+ },
+ {
+ "epoch": 3.8205128205128203,
+ "grad_norm": 53.251007080078125,
+ "learning_rate": 4.122100122100122e-05,
+ "loss": 0.7376,
+ "step": 1043
+ },
+ {
+ "epoch": 3.824175824175824,
+ "grad_norm": 28.314645767211914,
+ "learning_rate": 4.1196581196581195e-05,
+ "loss": 0.4608,
+ "step": 1044
+ },
+ {
+ "epoch": 3.8278388278388276,
+ "grad_norm": 538.0653076171875,
+ "learning_rate": 4.117216117216117e-05,
+ "loss": 1.5678,
+ "step": 1045
+ },
+ {
+ "epoch": 3.8315018315018317,
+ "grad_norm": 38.662925720214844,
+ "learning_rate": 4.114774114774115e-05,
+ "loss": 1.1084,
+ "step": 1046
+ },
+ {
+ "epoch": 3.8351648351648353,
+ "grad_norm": 31.877248764038086,
+ "learning_rate": 4.112332112332112e-05,
+ "loss": 0.9947,
+ "step": 1047
+ },
+ {
+ "epoch": 3.838827838827839,
+ "grad_norm": 50.17106628417969,
+ "learning_rate": 4.10989010989011e-05,
+ "loss": 0.8024,
+ "step": 1048
+ },
+ {
+ "epoch": 3.8424908424908426,
+ "grad_norm": 18.851001739501953,
+ "learning_rate": 4.107448107448108e-05,
+ "loss": 0.4245,
+ "step": 1049
+ },
+ {
+ "epoch": 3.8461538461538463,
+ "grad_norm": 35.91590881347656,
+ "learning_rate": 4.105006105006105e-05,
+ "loss": 1.1046,
+ "step": 1050
+ },
+ {
+ "epoch": 3.84981684981685,
+ "grad_norm": 24.618389129638672,
+ "learning_rate": 4.102564102564103e-05,
+ "loss": 0.8167,
+ "step": 1051
+ },
+ {
+ "epoch": 3.8534798534798536,
+ "grad_norm": 27.028446197509766,
+ "learning_rate": 4.100122100122101e-05,
+ "loss": 0.6983,
+ "step": 1052
+ },
+ {
+ "epoch": 3.857142857142857,
+ "grad_norm": 17.247610092163086,
+ "learning_rate": 4.0976800976800974e-05,
+ "loss": 0.4761,
+ "step": 1053
+ },
+ {
+ "epoch": 3.860805860805861,
+ "grad_norm": 27.187416076660156,
+ "learning_rate": 4.095238095238095e-05,
+ "loss": 0.794,
+ "step": 1054
+ },
+ {
+ "epoch": 3.8644688644688645,
+ "grad_norm": 35.990623474121094,
+ "learning_rate": 4.0927960927960924e-05,
+ "loss": 0.7874,
+ "step": 1055
+ },
+ {
+ "epoch": 3.868131868131868,
+ "grad_norm": 168.7575225830078,
+ "learning_rate": 4.09035409035409e-05,
+ "loss": 0.6028,
+ "step": 1056
+ },
+ {
+ "epoch": 3.871794871794872,
+ "grad_norm": 31.459491729736328,
+ "learning_rate": 4.087912087912088e-05,
+ "loss": 0.6256,
+ "step": 1057
+ },
+ {
+ "epoch": 3.8754578754578755,
+ "grad_norm": 25.053123474121094,
+ "learning_rate": 4.085470085470085e-05,
+ "loss": 0.3041,
+ "step": 1058
+ },
+ {
+ "epoch": 3.879120879120879,
+ "grad_norm": 56.10730743408203,
+ "learning_rate": 4.083028083028083e-05,
+ "loss": 0.8875,
+ "step": 1059
+ },
+ {
+ "epoch": 3.8827838827838828,
+ "grad_norm": 26.897689819335938,
+ "learning_rate": 4.080586080586081e-05,
+ "loss": 0.5291,
+ "step": 1060
+ },
+ {
+ "epoch": 3.8864468864468864,
+ "grad_norm": 40.36210250854492,
+ "learning_rate": 4.078144078144078e-05,
+ "loss": 1.2323,
+ "step": 1061
+ },
+ {
+ "epoch": 3.89010989010989,
+ "grad_norm": 17.556934356689453,
+ "learning_rate": 4.075702075702076e-05,
+ "loss": 0.0951,
+ "step": 1062
+ },
+ {
+ "epoch": 3.8937728937728937,
+ "grad_norm": 54.6690559387207,
+ "learning_rate": 4.073260073260074e-05,
+ "loss": 0.4311,
+ "step": 1063
+ },
+ {
+ "epoch": 3.8974358974358974,
+ "grad_norm": 27.554750442504883,
+ "learning_rate": 4.070818070818071e-05,
+ "loss": 0.2851,
+ "step": 1064
+ },
+ {
+ "epoch": 3.901098901098901,
+ "grad_norm": 14.667935371398926,
+ "learning_rate": 4.068376068376069e-05,
+ "loss": 0.0866,
+ "step": 1065
+ },
+ {
+ "epoch": 3.9047619047619047,
+ "grad_norm": 39.62594985961914,
+ "learning_rate": 4.065934065934066e-05,
+ "loss": 0.2322,
+ "step": 1066
+ },
+ {
+ "epoch": 3.9084249084249083,
+ "grad_norm": 31.457260131835938,
+ "learning_rate": 4.063492063492063e-05,
+ "loss": 0.2554,
+ "step": 1067
+ },
+ {
+ "epoch": 3.912087912087912,
+ "grad_norm": 52.82997131347656,
+ "learning_rate": 4.061050061050061e-05,
+ "loss": 0.44,
+ "step": 1068
+ },
+ {
+ "epoch": 3.9157509157509156,
+ "grad_norm": 56.15779495239258,
+ "learning_rate": 4.058608058608059e-05,
+ "loss": 0.9419,
+ "step": 1069
+ },
+ {
+ "epoch": 3.9194139194139193,
+ "grad_norm": 59.23240661621094,
+ "learning_rate": 4.056166056166056e-05,
+ "loss": 0.5084,
+ "step": 1070
+ },
+ {
+ "epoch": 3.9230769230769234,
+ "grad_norm": 9.644290924072266,
+ "learning_rate": 4.053724053724054e-05,
+ "loss": 0.0456,
+ "step": 1071
+ },
+ {
+ "epoch": 3.926739926739927,
+ "grad_norm": 24.42845916748047,
+ "learning_rate": 4.051282051282052e-05,
+ "loss": 0.0907,
+ "step": 1072
+ },
+ {
+ "epoch": 3.9304029304029307,
+ "grad_norm": 81.36042785644531,
+ "learning_rate": 4.048840048840049e-05,
+ "loss": 1.0178,
+ "step": 1073
+ },
+ {
+ "epoch": 3.9340659340659343,
+ "grad_norm": 63.134071350097656,
+ "learning_rate": 4.046398046398047e-05,
+ "loss": 1.1125,
+ "step": 1074
+ },
+ {
+ "epoch": 3.937728937728938,
+ "grad_norm": 56.59608840942383,
+ "learning_rate": 4.0439560439560445e-05,
+ "loss": 0.4465,
+ "step": 1075
+ },
+ {
+ "epoch": 3.9413919413919416,
+ "grad_norm": 48.51662063598633,
+ "learning_rate": 4.041514041514042e-05,
+ "loss": 0.5054,
+ "step": 1076
+ },
+ {
+ "epoch": 3.9450549450549453,
+ "grad_norm": 50.393524169921875,
+ "learning_rate": 4.0390720390720395e-05,
+ "loss": 0.8157,
+ "step": 1077
+ },
+ {
+ "epoch": 3.948717948717949,
+ "grad_norm": 63.414878845214844,
+ "learning_rate": 4.036630036630037e-05,
+ "loss": 0.9598,
+ "step": 1078
+ },
+ {
+ "epoch": 3.9523809523809526,
+ "grad_norm": 35.72902297973633,
+ "learning_rate": 4.034188034188034e-05,
+ "loss": 0.4764,
+ "step": 1079
+ },
+ {
+ "epoch": 3.956043956043956,
+ "grad_norm": 20.452268600463867,
+ "learning_rate": 4.031746031746032e-05,
+ "loss": 0.191,
+ "step": 1080
+ },
+ {
+ "epoch": 3.95970695970696,
+ "grad_norm": 38.23368453979492,
+ "learning_rate": 4.029304029304029e-05,
+ "loss": 0.5218,
+ "step": 1081
+ },
+ {
+ "epoch": 3.9633699633699635,
+ "grad_norm": 79.35212707519531,
+ "learning_rate": 4.026862026862027e-05,
+ "loss": 1.3695,
+ "step": 1082
+ },
+ {
+ "epoch": 3.967032967032967,
+ "grad_norm": 62.0828742980957,
+ "learning_rate": 4.0244200244200246e-05,
+ "loss": 1.4882,
+ "step": 1083
+ },
+ {
+ "epoch": 3.970695970695971,
+ "grad_norm": 35.413734436035156,
+ "learning_rate": 4.021978021978022e-05,
+ "loss": 0.1966,
+ "step": 1084
+ },
+ {
+ "epoch": 3.9743589743589745,
+ "grad_norm": 18.060728073120117,
+ "learning_rate": 4.0195360195360196e-05,
+ "loss": 0.2902,
+ "step": 1085
+ },
+ {
+ "epoch": 3.978021978021978,
+ "grad_norm": 15.263091087341309,
+ "learning_rate": 4.0170940170940174e-05,
+ "loss": 0.1325,
+ "step": 1086
+ },
+ {
+ "epoch": 3.9816849816849818,
+ "grad_norm": 35.8296012878418,
+ "learning_rate": 4.0146520146520146e-05,
+ "loss": 1.0225,
+ "step": 1087
+ },
+ {
+ "epoch": 3.9853479853479854,
+ "grad_norm": 24.120967864990234,
+ "learning_rate": 4.0122100122100125e-05,
+ "loss": 0.4432,
+ "step": 1088
+ },
+ {
+ "epoch": 3.989010989010989,
+ "grad_norm": 47.371070861816406,
+ "learning_rate": 4.00976800976801e-05,
+ "loss": 0.9703,
+ "step": 1089
+ },
+ {
+ "epoch": 3.9926739926739927,
+ "grad_norm": 44.266082763671875,
+ "learning_rate": 4.0073260073260075e-05,
+ "loss": 0.6652,
+ "step": 1090
+ },
+ {
+ "epoch": 3.9963369963369964,
+ "grad_norm": 22.17586898803711,
+ "learning_rate": 4.0048840048840046e-05,
+ "loss": 0.1324,
+ "step": 1091
+ },
+ {
+ "epoch": 4.0,
+ "grad_norm": 45.4996337890625,
+ "learning_rate": 4.0024420024420025e-05,
+ "loss": 0.3746,
+ "step": 1092
+ },
+ {
+ "epoch": 4.003663003663004,
+ "grad_norm": 31.747541427612305,
+ "learning_rate": 3.9999999999999996e-05,
+ "loss": 0.5028,
+ "step": 1093
+ },
+ {
+ "epoch": 4.007326007326007,
+ "grad_norm": 13.460674285888672,
+ "learning_rate": 3.9975579975579975e-05,
+ "loss": 0.088,
+ "step": 1094
+ },
+ {
+ "epoch": 4.010989010989011,
+ "grad_norm": 23.94148826599121,
+ "learning_rate": 3.9951159951159953e-05,
+ "loss": 0.1944,
+ "step": 1095
+ },
+ {
+ "epoch": 4.014652014652015,
+ "grad_norm": 60.94758224487305,
+ "learning_rate": 3.9926739926739925e-05,
+ "loss": 0.555,
+ "step": 1096
+ },
+ {
+ "epoch": 4.018315018315018,
+ "grad_norm": 24.47633934020996,
+ "learning_rate": 3.9902319902319904e-05,
+ "loss": 0.1314,
+ "step": 1097
+ },
+ {
+ "epoch": 4.021978021978022,
+ "grad_norm": 42.690162658691406,
+ "learning_rate": 3.987789987789988e-05,
+ "loss": 0.4734,
+ "step": 1098
+ },
+ {
+ "epoch": 4.0256410256410255,
+ "grad_norm": 69.26956939697266,
+ "learning_rate": 3.9853479853479854e-05,
+ "loss": 1.4256,
+ "step": 1099
+ },
+ {
+ "epoch": 4.029304029304029,
+ "grad_norm": 7.718477725982666,
+ "learning_rate": 3.982905982905983e-05,
+ "loss": 0.0549,
+ "step": 1100
+ },
+ {
+ "epoch": 4.032967032967033,
+ "grad_norm": 60.15462875366211,
+ "learning_rate": 3.980463980463981e-05,
+ "loss": 1.2739,
+ "step": 1101
+ },
+ {
+ "epoch": 4.0366300366300365,
+ "grad_norm": 57.749656677246094,
+ "learning_rate": 3.978021978021978e-05,
+ "loss": 1.0691,
+ "step": 1102
+ },
+ {
+ "epoch": 4.04029304029304,
+ "grad_norm": 35.57550811767578,
+ "learning_rate": 3.975579975579976e-05,
+ "loss": 0.5114,
+ "step": 1103
+ },
+ {
+ "epoch": 4.043956043956044,
+ "grad_norm": 58.007694244384766,
+ "learning_rate": 3.973137973137973e-05,
+ "loss": 1.1552,
+ "step": 1104
+ },
+ {
+ "epoch": 4.0476190476190474,
+ "grad_norm": 30.794008255004883,
+ "learning_rate": 3.9706959706959704e-05,
+ "loss": 0.7502,
+ "step": 1105
+ },
+ {
+ "epoch": 4.051282051282051,
+ "grad_norm": 35.88930892944336,
+ "learning_rate": 3.968253968253968e-05,
+ "loss": 0.6965,
+ "step": 1106
+ },
+ {
+ "epoch": 4.054945054945055,
+ "grad_norm": 25.719144821166992,
+ "learning_rate": 3.9658119658119654e-05,
+ "loss": 0.4581,
+ "step": 1107
+ },
+ {
+ "epoch": 4.058608058608058,
+ "grad_norm": 37.397640228271484,
+ "learning_rate": 3.963369963369963e-05,
+ "loss": 1.0719,
+ "step": 1108
+ },
+ {
+ "epoch": 4.062271062271062,
+ "grad_norm": 25.8681640625,
+ "learning_rate": 3.960927960927961e-05,
+ "loss": 0.7,
+ "step": 1109
+ },
+ {
+ "epoch": 4.065934065934066,
+ "grad_norm": 16.983413696289062,
+ "learning_rate": 3.958485958485958e-05,
+ "loss": 0.2394,
+ "step": 1110
+ },
+ {
+ "epoch": 4.069597069597069,
+ "grad_norm": 31.7902889251709,
+ "learning_rate": 3.956043956043956e-05,
+ "loss": 0.5662,
+ "step": 1111
+ },
+ {
+ "epoch": 4.073260073260073,
+ "grad_norm": 37.51417922973633,
+ "learning_rate": 3.953601953601954e-05,
+ "loss": 0.3483,
+ "step": 1112
+ },
+ {
+ "epoch": 4.076923076923077,
+ "grad_norm": 24.01732635498047,
+ "learning_rate": 3.951159951159951e-05,
+ "loss": 0.2527,
+ "step": 1113
+ },
+ {
+ "epoch": 4.08058608058608,
+ "grad_norm": 29.152162551879883,
+ "learning_rate": 3.948717948717949e-05,
+ "loss": 0.4485,
+ "step": 1114
+ },
+ {
+ "epoch": 4.084249084249084,
+ "grad_norm": 31.519155502319336,
+ "learning_rate": 3.946275946275947e-05,
+ "loss": 0.2485,
+ "step": 1115
+ },
+ {
+ "epoch": 4.087912087912088,
+ "grad_norm": 18.462514877319336,
+ "learning_rate": 3.943833943833944e-05,
+ "loss": 0.1057,
+ "step": 1116
+ },
+ {
+ "epoch": 4.091575091575091,
+ "grad_norm": 35.28910827636719,
+ "learning_rate": 3.941391941391941e-05,
+ "loss": 0.3589,
+ "step": 1117
+ },
+ {
+ "epoch": 4.095238095238095,
+ "grad_norm": 47.00394058227539,
+ "learning_rate": 3.938949938949939e-05,
+ "loss": 0.5148,
+ "step": 1118
+ },
+ {
+ "epoch": 4.0989010989010985,
+ "grad_norm": 24.796058654785156,
+ "learning_rate": 3.936507936507936e-05,
+ "loss": 0.2486,
+ "step": 1119
+ },
+ {
+ "epoch": 4.102564102564102,
+ "grad_norm": 27.098758697509766,
+ "learning_rate": 3.934065934065934e-05,
+ "loss": 0.196,
+ "step": 1120
+ },
+ {
+ "epoch": 4.106227106227106,
+ "grad_norm": 59.4343147277832,
+ "learning_rate": 3.931623931623932e-05,
+ "loss": 0.8093,
+ "step": 1121
+ },
+ {
+ "epoch": 4.1098901098901095,
+ "grad_norm": 57.0518684387207,
+ "learning_rate": 3.929181929181929e-05,
+ "loss": 0.6495,
+ "step": 1122
+ },
+ {
+ "epoch": 4.113553113553113,
+ "grad_norm": 42.01070022583008,
+ "learning_rate": 3.926739926739927e-05,
+ "loss": 0.3272,
+ "step": 1123
+ },
+ {
+ "epoch": 4.117216117216117,
+ "grad_norm": 72.11932373046875,
+ "learning_rate": 3.924297924297925e-05,
+ "loss": 1.2542,
+ "step": 1124
+ },
+ {
+ "epoch": 4.1208791208791204,
+ "grad_norm": 13.270249366760254,
+ "learning_rate": 3.921855921855922e-05,
+ "loss": 0.0843,
+ "step": 1125
+ },
+ {
+ "epoch": 4.124542124542124,
+ "grad_norm": 32.058258056640625,
+ "learning_rate": 3.91941391941392e-05,
+ "loss": 0.158,
+ "step": 1126
+ },
+ {
+ "epoch": 4.128205128205128,
+ "grad_norm": 37.67665481567383,
+ "learning_rate": 3.9169719169719176e-05,
+ "loss": 0.3463,
+ "step": 1127
+ },
+ {
+ "epoch": 4.131868131868132,
+ "grad_norm": 98.33348846435547,
+ "learning_rate": 3.914529914529915e-05,
+ "loss": 0.8846,
+ "step": 1128
+ },
+ {
+ "epoch": 4.135531135531136,
+ "grad_norm": 49.11083221435547,
+ "learning_rate": 3.9120879120879126e-05,
+ "loss": 0.4124,
+ "step": 1129
+ },
+ {
+ "epoch": 4.13919413919414,
+ "grad_norm": 45.87646484375,
+ "learning_rate": 3.90964590964591e-05,
+ "loss": 0.3594,
+ "step": 1130
+ },
+ {
+ "epoch": 4.142857142857143,
+ "grad_norm": 49.34445571899414,
+ "learning_rate": 3.907203907203907e-05,
+ "loss": 0.1947,
+ "step": 1131
+ },
+ {
+ "epoch": 4.146520146520147,
+ "grad_norm": 8.654282569885254,
+ "learning_rate": 3.904761904761905e-05,
+ "loss": 0.0923,
+ "step": 1132
+ },
+ {
+ "epoch": 4.1501831501831505,
+ "grad_norm": 12.46809196472168,
+ "learning_rate": 3.902319902319902e-05,
+ "loss": 0.0841,
+ "step": 1133
+ },
+ {
+ "epoch": 4.153846153846154,
+ "grad_norm": 33.9839973449707,
+ "learning_rate": 3.8998778998779e-05,
+ "loss": 0.5838,
+ "step": 1134
+ },
+ {
+ "epoch": 4.157509157509158,
+ "grad_norm": 36.68742752075195,
+ "learning_rate": 3.8974358974358976e-05,
+ "loss": 0.5483,
+ "step": 1135
+ },
+ {
+ "epoch": 4.1611721611721615,
+ "grad_norm": 26.862363815307617,
+ "learning_rate": 3.894993894993895e-05,
+ "loss": 0.2464,
+ "step": 1136
+ },
+ {
+ "epoch": 4.164835164835165,
+ "grad_norm": 16.219947814941406,
+ "learning_rate": 3.8925518925518926e-05,
+ "loss": 0.1878,
+ "step": 1137
+ },
+ {
+ "epoch": 4.168498168498169,
+ "grad_norm": 36.86198425292969,
+ "learning_rate": 3.8901098901098905e-05,
+ "loss": 0.3656,
+ "step": 1138
+ },
+ {
+ "epoch": 4.172161172161172,
+ "grad_norm": 36.241432189941406,
+ "learning_rate": 3.8876678876678877e-05,
+ "loss": 0.8421,
+ "step": 1139
+ },
+ {
+ "epoch": 4.175824175824176,
+ "grad_norm": 45.81169891357422,
+ "learning_rate": 3.8852258852258855e-05,
+ "loss": 0.6081,
+ "step": 1140
+ },
+ {
+ "epoch": 4.17948717948718,
+ "grad_norm": 30.914037704467773,
+ "learning_rate": 3.8827838827838833e-05,
+ "loss": 0.2975,
+ "step": 1141
+ },
+ {
+ "epoch": 4.183150183150183,
+ "grad_norm": 4.663424968719482,
+ "learning_rate": 3.8803418803418805e-05,
+ "loss": 0.0319,
+ "step": 1142
+ },
+ {
+ "epoch": 4.186813186813187,
+ "grad_norm": 33.163551330566406,
+ "learning_rate": 3.877899877899878e-05,
+ "loss": 0.236,
+ "step": 1143
+ },
+ {
+ "epoch": 4.190476190476191,
+ "grad_norm": 20.820547103881836,
+ "learning_rate": 3.8754578754578755e-05,
+ "loss": 0.1907,
+ "step": 1144
+ },
+ {
+ "epoch": 4.194139194139194,
+ "grad_norm": 65.4993896484375,
+ "learning_rate": 3.873015873015873e-05,
+ "loss": 0.4195,
+ "step": 1145
+ },
+ {
+ "epoch": 4.197802197802198,
+ "grad_norm": 13.253530502319336,
+ "learning_rate": 3.8705738705738705e-05,
+ "loss": 0.1496,
+ "step": 1146
+ },
+ {
+ "epoch": 4.201465201465202,
+ "grad_norm": 18.291889190673828,
+ "learning_rate": 3.8681318681318684e-05,
+ "loss": 0.1544,
+ "step": 1147
+ },
+ {
+ "epoch": 4.205128205128205,
+ "grad_norm": 32.1517448425293,
+ "learning_rate": 3.8656898656898656e-05,
+ "loss": 0.3317,
+ "step": 1148
+ },
+ {
+ "epoch": 4.208791208791209,
+ "grad_norm": 37.809669494628906,
+ "learning_rate": 3.8632478632478634e-05,
+ "loss": 0.394,
+ "step": 1149
+ },
+ {
+ "epoch": 4.212454212454213,
+ "grad_norm": 113.17266082763672,
+ "learning_rate": 3.860805860805861e-05,
+ "loss": 1.2368,
+ "step": 1150
+ },
+ {
+ "epoch": 4.216117216117216,
+ "grad_norm": 10.35407543182373,
+ "learning_rate": 3.8583638583638584e-05,
+ "loss": 0.0584,
+ "step": 1151
+ },
+ {
+ "epoch": 4.21978021978022,
+ "grad_norm": 56.98881530761719,
+ "learning_rate": 3.855921855921856e-05,
+ "loss": 0.8088,
+ "step": 1152
+ },
+ {
+ "epoch": 4.2234432234432235,
+ "grad_norm": 45.7849006652832,
+ "learning_rate": 3.853479853479854e-05,
+ "loss": 0.6471,
+ "step": 1153
+ },
+ {
+ "epoch": 4.227106227106227,
+ "grad_norm": 43.57515335083008,
+ "learning_rate": 3.851037851037851e-05,
+ "loss": 0.2924,
+ "step": 1154
+ },
+ {
+ "epoch": 4.230769230769231,
+ "grad_norm": 14.98643684387207,
+ "learning_rate": 3.848595848595849e-05,
+ "loss": 0.1108,
+ "step": 1155
+ },
+ {
+ "epoch": 4.2344322344322345,
+ "grad_norm": 27.162513732910156,
+ "learning_rate": 3.846153846153846e-05,
+ "loss": 0.3856,
+ "step": 1156
+ },
+ {
+ "epoch": 4.238095238095238,
+ "grad_norm": 56.45119094848633,
+ "learning_rate": 3.8437118437118435e-05,
+ "loss": 0.6752,
+ "step": 1157
+ },
+ {
+ "epoch": 4.241758241758242,
+ "grad_norm": 15.522347450256348,
+ "learning_rate": 3.841269841269841e-05,
+ "loss": 0.1419,
+ "step": 1158
+ },
+ {
+ "epoch": 4.245421245421245,
+ "grad_norm": 16.31126594543457,
+ "learning_rate": 3.8388278388278385e-05,
+ "loss": 0.1303,
+ "step": 1159
+ },
+ {
+ "epoch": 4.249084249084249,
+ "grad_norm": 12.398606300354004,
+ "learning_rate": 3.836385836385836e-05,
+ "loss": 0.1306,
+ "step": 1160
+ },
+ {
+ "epoch": 4.252747252747253,
+ "grad_norm": 19.660768508911133,
+ "learning_rate": 3.833943833943834e-05,
+ "loss": 0.1554,
+ "step": 1161
+ },
+ {
+ "epoch": 4.256410256410256,
+ "grad_norm": 131.451416015625,
+ "learning_rate": 3.831501831501831e-05,
+ "loss": 0.2774,
+ "step": 1162
+ },
+ {
+ "epoch": 4.26007326007326,
+ "grad_norm": 42.0703125,
+ "learning_rate": 3.829059829059829e-05,
+ "loss": 0.471,
+ "step": 1163
+ },
+ {
+ "epoch": 4.263736263736264,
+ "grad_norm": 52.415096282958984,
+ "learning_rate": 3.826617826617827e-05,
+ "loss": 0.7872,
+ "step": 1164
+ },
+ {
+ "epoch": 4.267399267399267,
+ "grad_norm": 35.990421295166016,
+ "learning_rate": 3.824175824175824e-05,
+ "loss": 0.4495,
+ "step": 1165
+ },
+ {
+ "epoch": 4.271062271062271,
+ "grad_norm": 40.330265045166016,
+ "learning_rate": 3.821733821733822e-05,
+ "loss": 0.4009,
+ "step": 1166
+ },
+ {
+ "epoch": 4.274725274725275,
+ "grad_norm": 42.55587387084961,
+ "learning_rate": 3.81929181929182e-05,
+ "loss": 1.6215,
+ "step": 1167
+ },
+ {
+ "epoch": 4.278388278388278,
+ "grad_norm": 30.704498291015625,
+ "learning_rate": 3.816849816849817e-05,
+ "loss": 0.3539,
+ "step": 1168
+ },
+ {
+ "epoch": 4.282051282051282,
+ "grad_norm": 10.239601135253906,
+ "learning_rate": 3.814407814407814e-05,
+ "loss": 0.0779,
+ "step": 1169
+ },
+ {
+ "epoch": 4.285714285714286,
+ "grad_norm": 37.00144577026367,
+ "learning_rate": 3.811965811965812e-05,
+ "loss": 0.4089,
+ "step": 1170
+ },
+ {
+ "epoch": 4.289377289377289,
+ "grad_norm": 40.18193817138672,
+ "learning_rate": 3.809523809523809e-05,
+ "loss": 0.4854,
+ "step": 1171
+ },
+ {
+ "epoch": 4.293040293040293,
+ "grad_norm": 46.78989028930664,
+ "learning_rate": 3.807081807081807e-05,
+ "loss": 0.5863,
+ "step": 1172
+ },
+ {
+ "epoch": 4.2967032967032965,
+ "grad_norm": 49.5102653503418,
+ "learning_rate": 3.804639804639805e-05,
+ "loss": 1.0118,
+ "step": 1173
+ },
+ {
+ "epoch": 4.3003663003663,
+ "grad_norm": 30.41546058654785,
+ "learning_rate": 3.802197802197802e-05,
+ "loss": 0.2616,
+ "step": 1174
+ },
+ {
+ "epoch": 4.304029304029304,
+ "grad_norm": 41.22653579711914,
+ "learning_rate": 3.7997557997558e-05,
+ "loss": 0.5852,
+ "step": 1175
+ },
+ {
+ "epoch": 4.3076923076923075,
+ "grad_norm": 4.033203125,
+ "learning_rate": 3.797313797313798e-05,
+ "loss": 0.0221,
+ "step": 1176
+ },
+ {
+ "epoch": 4.311355311355311,
+ "grad_norm": 13.03472900390625,
+ "learning_rate": 3.794871794871795e-05,
+ "loss": 0.1499,
+ "step": 1177
+ },
+ {
+ "epoch": 4.315018315018315,
+ "grad_norm": 24.690824508666992,
+ "learning_rate": 3.792429792429793e-05,
+ "loss": 0.2631,
+ "step": 1178
+ },
+ {
+ "epoch": 4.318681318681318,
+ "grad_norm": 32.594451904296875,
+ "learning_rate": 3.7899877899877906e-05,
+ "loss": 0.2988,
+ "step": 1179
+ },
+ {
+ "epoch": 4.322344322344322,
+ "grad_norm": 10.510795593261719,
+ "learning_rate": 3.787545787545788e-05,
+ "loss": 0.0499,
+ "step": 1180
+ },
+ {
+ "epoch": 4.326007326007326,
+ "grad_norm": 65.71479034423828,
+ "learning_rate": 3.785103785103785e-05,
+ "loss": 0.9048,
+ "step": 1181
+ },
+ {
+ "epoch": 4.329670329670329,
+ "grad_norm": 12.129572868347168,
+ "learning_rate": 3.782661782661783e-05,
+ "loss": 0.0629,
+ "step": 1182
+ },
+ {
+ "epoch": 4.333333333333333,
+ "grad_norm": 88.66580200195312,
+ "learning_rate": 3.78021978021978e-05,
+ "loss": 0.8276,
+ "step": 1183
+ },
+ {
+ "epoch": 4.336996336996337,
+ "grad_norm": 35.2215461730957,
+ "learning_rate": 3.777777777777778e-05,
+ "loss": 0.2996,
+ "step": 1184
+ },
+ {
+ "epoch": 4.34065934065934,
+ "grad_norm": 29.870285034179688,
+ "learning_rate": 3.775335775335775e-05,
+ "loss": 0.2152,
+ "step": 1185
+ },
+ {
+ "epoch": 4.344322344322344,
+ "grad_norm": 30.441116333007812,
+ "learning_rate": 3.772893772893773e-05,
+ "loss": 0.6761,
+ "step": 1186
+ },
+ {
+ "epoch": 4.347985347985348,
+ "grad_norm": 22.49298095703125,
+ "learning_rate": 3.770451770451771e-05,
+ "loss": 0.7508,
+ "step": 1187
+ },
+ {
+ "epoch": 4.351648351648351,
+ "grad_norm": 22.43603515625,
+ "learning_rate": 3.768009768009768e-05,
+ "loss": 0.3601,
+ "step": 1188
+ },
+ {
+ "epoch": 4.355311355311355,
+ "grad_norm": 38.21080780029297,
+ "learning_rate": 3.765567765567766e-05,
+ "loss": 0.3769,
+ "step": 1189
+ },
+ {
+ "epoch": 4.358974358974359,
+ "grad_norm": 48.90728759765625,
+ "learning_rate": 3.7631257631257635e-05,
+ "loss": 0.4259,
+ "step": 1190
+ },
+ {
+ "epoch": 4.362637362637362,
+ "grad_norm": 7.331233024597168,
+ "learning_rate": 3.760683760683761e-05,
+ "loss": 0.0697,
+ "step": 1191
+ },
+ {
+ "epoch": 4.366300366300366,
+ "grad_norm": 25.096189498901367,
+ "learning_rate": 3.7582417582417586e-05,
+ "loss": 0.2196,
+ "step": 1192
+ },
+ {
+ "epoch": 4.36996336996337,
+ "grad_norm": 53.813209533691406,
+ "learning_rate": 3.7557997557997564e-05,
+ "loss": 0.3785,
+ "step": 1193
+ },
+ {
+ "epoch": 4.373626373626374,
+ "grad_norm": 13.184123039245605,
+ "learning_rate": 3.753357753357753e-05,
+ "loss": 0.1747,
+ "step": 1194
+ },
+ {
+ "epoch": 4.377289377289378,
+ "grad_norm": 1.818351149559021,
+ "learning_rate": 3.750915750915751e-05,
+ "loss": 0.0158,
+ "step": 1195
+ },
+ {
+ "epoch": 4.380952380952381,
+ "grad_norm": 63.21619415283203,
+ "learning_rate": 3.7484737484737486e-05,
+ "loss": 0.2863,
+ "step": 1196
+ },
+ {
+ "epoch": 4.384615384615385,
+ "grad_norm": 32.59927749633789,
+ "learning_rate": 3.746031746031746e-05,
+ "loss": 0.4261,
+ "step": 1197
+ },
+ {
+ "epoch": 4.388278388278389,
+ "grad_norm": 36.5265998840332,
+ "learning_rate": 3.7435897435897436e-05,
+ "loss": 0.8064,
+ "step": 1198
+ },
+ {
+ "epoch": 4.391941391941392,
+ "grad_norm": 47.726905822753906,
+ "learning_rate": 3.7411477411477414e-05,
+ "loss": 0.8884,
+ "step": 1199
+ },
+ {
+ "epoch": 4.395604395604396,
+ "grad_norm": 12.621973037719727,
+ "learning_rate": 3.7387057387057386e-05,
+ "loss": 0.1085,
+ "step": 1200
+ },
+ {
+ "epoch": 4.3992673992674,
+ "grad_norm": 24.7711124420166,
+ "learning_rate": 3.7362637362637365e-05,
+ "loss": 0.2249,
+ "step": 1201
+ },
+ {
+ "epoch": 4.402930402930403,
+ "grad_norm": 39.19346618652344,
+ "learning_rate": 3.733821733821734e-05,
+ "loss": 0.4065,
+ "step": 1202
+ },
+ {
+ "epoch": 4.406593406593407,
+ "grad_norm": 20.3857421875,
+ "learning_rate": 3.7313797313797315e-05,
+ "loss": 0.1653,
+ "step": 1203
+ },
+ {
+ "epoch": 4.410256410256411,
+ "grad_norm": 58.15717697143555,
+ "learning_rate": 3.728937728937729e-05,
+ "loss": 0.8774,
+ "step": 1204
+ },
+ {
+ "epoch": 4.413919413919414,
+ "grad_norm": 28.05725860595703,
+ "learning_rate": 3.726495726495727e-05,
+ "loss": 0.1695,
+ "step": 1205
+ },
+ {
+ "epoch": 4.417582417582418,
+ "grad_norm": 24.635583877563477,
+ "learning_rate": 3.724053724053724e-05,
+ "loss": 0.4871,
+ "step": 1206
+ },
+ {
+ "epoch": 4.4212454212454215,
+ "grad_norm": 16.8306941986084,
+ "learning_rate": 3.7216117216117215e-05,
+ "loss": 0.0863,
+ "step": 1207
+ },
+ {
+ "epoch": 4.424908424908425,
+ "grad_norm": 16.2359676361084,
+ "learning_rate": 3.719169719169719e-05,
+ "loss": 0.077,
+ "step": 1208
+ },
+ {
+ "epoch": 4.428571428571429,
+ "grad_norm": 31.431425094604492,
+ "learning_rate": 3.7167277167277165e-05,
+ "loss": 0.2815,
+ "step": 1209
+ },
+ {
+ "epoch": 4.4322344322344325,
+ "grad_norm": 31.44464874267578,
+ "learning_rate": 3.7142857142857143e-05,
+ "loss": 0.2237,
+ "step": 1210
+ },
+ {
+ "epoch": 4.435897435897436,
+ "grad_norm": 23.390378952026367,
+ "learning_rate": 3.7118437118437115e-05,
+ "loss": 0.1791,
+ "step": 1211
+ },
+ {
+ "epoch": 4.43956043956044,
+ "grad_norm": 48.210079193115234,
+ "learning_rate": 3.7094017094017094e-05,
+ "loss": 0.517,
+ "step": 1212
+ },
+ {
+ "epoch": 4.443223443223443,
+ "grad_norm": 45.35732650756836,
+ "learning_rate": 3.706959706959707e-05,
+ "loss": 0.4638,
+ "step": 1213
+ },
+ {
+ "epoch": 4.446886446886447,
+ "grad_norm": 16.88719367980957,
+ "learning_rate": 3.7045177045177044e-05,
+ "loss": 0.1203,
+ "step": 1214
+ },
+ {
+ "epoch": 4.450549450549451,
+ "grad_norm": 58.36906433105469,
+ "learning_rate": 3.702075702075702e-05,
+ "loss": 0.7366,
+ "step": 1215
+ },
+ {
+ "epoch": 4.454212454212454,
+ "grad_norm": 49.00838088989258,
+ "learning_rate": 3.6996336996337e-05,
+ "loss": 0.739,
+ "step": 1216
+ },
+ {
+ "epoch": 4.457875457875458,
+ "grad_norm": 42.87287521362305,
+ "learning_rate": 3.697191697191697e-05,
+ "loss": 1.3861,
+ "step": 1217
+ },
+ {
+ "epoch": 4.461538461538462,
+ "grad_norm": 44.62813949584961,
+ "learning_rate": 3.694749694749695e-05,
+ "loss": 0.549,
+ "step": 1218
+ },
+ {
+ "epoch": 4.465201465201465,
+ "grad_norm": 6.473313331604004,
+ "learning_rate": 3.692307692307693e-05,
+ "loss": 0.0407,
+ "step": 1219
+ },
+ {
+ "epoch": 4.468864468864469,
+ "grad_norm": 35.04784393310547,
+ "learning_rate": 3.6898656898656894e-05,
+ "loss": 0.3146,
+ "step": 1220
+ },
+ {
+ "epoch": 4.472527472527473,
+ "grad_norm": 44.79425811767578,
+ "learning_rate": 3.687423687423687e-05,
+ "loss": 0.5206,
+ "step": 1221
+ },
+ {
+ "epoch": 4.476190476190476,
+ "grad_norm": 36.52440643310547,
+ "learning_rate": 3.684981684981685e-05,
+ "loss": 0.5977,
+ "step": 1222
+ },
+ {
+ "epoch": 4.47985347985348,
+ "grad_norm": 58.15000915527344,
+ "learning_rate": 3.682539682539682e-05,
+ "loss": 1.0533,
+ "step": 1223
+ },
+ {
+ "epoch": 4.483516483516484,
+ "grad_norm": 32.33371353149414,
+ "learning_rate": 3.68009768009768e-05,
+ "loss": 0.3928,
+ "step": 1224
+ },
+ {
+ "epoch": 4.487179487179487,
+ "grad_norm": 44.501529693603516,
+ "learning_rate": 3.677655677655678e-05,
+ "loss": 0.8471,
+ "step": 1225
+ },
+ {
+ "epoch": 4.490842490842491,
+ "grad_norm": 41.62052536010742,
+ "learning_rate": 3.675213675213675e-05,
+ "loss": 0.7731,
+ "step": 1226
+ },
+ {
+ "epoch": 4.4945054945054945,
+ "grad_norm": 12.638876914978027,
+ "learning_rate": 3.672771672771673e-05,
+ "loss": 0.1219,
+ "step": 1227
+ },
+ {
+ "epoch": 4.498168498168498,
+ "grad_norm": 12.034523010253906,
+ "learning_rate": 3.670329670329671e-05,
+ "loss": 0.158,
+ "step": 1228
+ },
+ {
+ "epoch": 4.501831501831502,
+ "grad_norm": 42.04001235961914,
+ "learning_rate": 3.667887667887668e-05,
+ "loss": 0.8556,
+ "step": 1229
+ },
+ {
+ "epoch": 4.5054945054945055,
+ "grad_norm": 36.28947448730469,
+ "learning_rate": 3.665445665445666e-05,
+ "loss": 0.6569,
+ "step": 1230
+ },
+ {
+ "epoch": 4.509157509157509,
+ "grad_norm": 40.263912200927734,
+ "learning_rate": 3.663003663003664e-05,
+ "loss": 0.7625,
+ "step": 1231
+ },
+ {
+ "epoch": 4.512820512820513,
+ "grad_norm": 23.760005950927734,
+ "learning_rate": 3.660561660561661e-05,
+ "loss": 0.2465,
+ "step": 1232
+ },
+ {
+ "epoch": 4.516483516483516,
+ "grad_norm": 23.589109420776367,
+ "learning_rate": 3.658119658119658e-05,
+ "loss": 0.4408,
+ "step": 1233
+ },
+ {
+ "epoch": 4.52014652014652,
+ "grad_norm": 30.512271881103516,
+ "learning_rate": 3.655677655677655e-05,
+ "loss": 0.8748,
+ "step": 1234
+ },
+ {
+ "epoch": 4.523809523809524,
+ "grad_norm": 8.060181617736816,
+ "learning_rate": 3.653235653235653e-05,
+ "loss": 0.0818,
+ "step": 1235
+ },
+ {
+ "epoch": 4.527472527472527,
+ "grad_norm": 14.353645324707031,
+ "learning_rate": 3.650793650793651e-05,
+ "loss": 0.1899,
+ "step": 1236
+ },
+ {
+ "epoch": 4.531135531135531,
+ "grad_norm": 12.20384693145752,
+ "learning_rate": 3.648351648351648e-05,
+ "loss": 0.1618,
+ "step": 1237
+ },
+ {
+ "epoch": 4.534798534798535,
+ "grad_norm": 182.4698028564453,
+ "learning_rate": 3.645909645909646e-05,
+ "loss": 0.9223,
+ "step": 1238
+ },
+ {
+ "epoch": 4.538461538461538,
+ "grad_norm": 33.137081146240234,
+ "learning_rate": 3.643467643467644e-05,
+ "loss": 0.7708,
+ "step": 1239
+ },
+ {
+ "epoch": 4.542124542124542,
+ "grad_norm": 19.895912170410156,
+ "learning_rate": 3.641025641025641e-05,
+ "loss": 0.164,
+ "step": 1240
+ },
+ {
+ "epoch": 4.545787545787546,
+ "grad_norm": 62.816864013671875,
+ "learning_rate": 3.638583638583639e-05,
+ "loss": 1.4675,
+ "step": 1241
+ },
+ {
+ "epoch": 4.549450549450549,
+ "grad_norm": 35.58034896850586,
+ "learning_rate": 3.6361416361416366e-05,
+ "loss": 0.4449,
+ "step": 1242
+ },
+ {
+ "epoch": 4.553113553113553,
+ "grad_norm": 21.993911743164062,
+ "learning_rate": 3.633699633699634e-05,
+ "loss": 0.2302,
+ "step": 1243
+ },
+ {
+ "epoch": 4.556776556776557,
+ "grad_norm": 33.743812561035156,
+ "learning_rate": 3.6312576312576316e-05,
+ "loss": 0.1782,
+ "step": 1244
+ },
+ {
+ "epoch": 4.56043956043956,
+ "grad_norm": 40.135711669921875,
+ "learning_rate": 3.6288156288156294e-05,
+ "loss": 0.7147,
+ "step": 1245
+ },
+ {
+ "epoch": 4.564102564102564,
+ "grad_norm": 2.47517728805542,
+ "learning_rate": 3.626373626373626e-05,
+ "loss": 0.0188,
+ "step": 1246
+ },
+ {
+ "epoch": 4.5677655677655675,
+ "grad_norm": 22.023807525634766,
+ "learning_rate": 3.623931623931624e-05,
+ "loss": 0.3182,
+ "step": 1247
+ },
+ {
+ "epoch": 4.571428571428571,
+ "grad_norm": 21.8381290435791,
+ "learning_rate": 3.6214896214896216e-05,
+ "loss": 0.4161,
+ "step": 1248
+ },
+ {
+ "epoch": 4.575091575091575,
+ "grad_norm": 20.989906311035156,
+ "learning_rate": 3.619047619047619e-05,
+ "loss": 0.2972,
+ "step": 1249
+ },
+ {
+ "epoch": 4.5787545787545785,
+ "grad_norm": 75.8060073852539,
+ "learning_rate": 3.6166056166056166e-05,
+ "loss": 0.6194,
+ "step": 1250
+ },
+ {
+ "epoch": 4.582417582417582,
+ "grad_norm": 40.85308074951172,
+ "learning_rate": 3.6141636141636145e-05,
+ "loss": 0.7707,
+ "step": 1251
+ },
+ {
+ "epoch": 4.586080586080586,
+ "grad_norm": 62.22278594970703,
+ "learning_rate": 3.6117216117216117e-05,
+ "loss": 0.6872,
+ "step": 1252
+ },
+ {
+ "epoch": 4.589743589743589,
+ "grad_norm": 30.27143669128418,
+ "learning_rate": 3.6092796092796095e-05,
+ "loss": 0.484,
+ "step": 1253
+ },
+ {
+ "epoch": 4.593406593406593,
+ "grad_norm": 44.08026123046875,
+ "learning_rate": 3.6068376068376073e-05,
+ "loss": 0.8593,
+ "step": 1254
+ },
+ {
+ "epoch": 4.597069597069597,
+ "grad_norm": 22.63222312927246,
+ "learning_rate": 3.6043956043956045e-05,
+ "loss": 0.2542,
+ "step": 1255
+ },
+ {
+ "epoch": 4.6007326007326,
+ "grad_norm": 7.024168014526367,
+ "learning_rate": 3.6019536019536024e-05,
+ "loss": 0.0777,
+ "step": 1256
+ },
+ {
+ "epoch": 4.604395604395604,
+ "grad_norm": 24.981502532958984,
+ "learning_rate": 3.5995115995116e-05,
+ "loss": 0.2332,
+ "step": 1257
+ },
+ {
+ "epoch": 4.608058608058608,
+ "grad_norm": 28.929807662963867,
+ "learning_rate": 3.5970695970695974e-05,
+ "loss": 0.3665,
+ "step": 1258
+ },
+ {
+ "epoch": 4.611721611721611,
+ "grad_norm": 36.756683349609375,
+ "learning_rate": 3.5946275946275945e-05,
+ "loss": 1.2777,
+ "step": 1259
+ },
+ {
+ "epoch": 4.615384615384615,
+ "grad_norm": 53.04755783081055,
+ "learning_rate": 3.592185592185592e-05,
+ "loss": 0.3001,
+ "step": 1260
+ },
+ {
+ "epoch": 4.619047619047619,
+ "grad_norm": 39.71099853515625,
+ "learning_rate": 3.5897435897435896e-05,
+ "loss": 0.7756,
+ "step": 1261
+ },
+ {
+ "epoch": 4.622710622710622,
+ "grad_norm": 21.80796241760254,
+ "learning_rate": 3.5873015873015874e-05,
+ "loss": 0.2329,
+ "step": 1262
+ },
+ {
+ "epoch": 4.626373626373626,
+ "grad_norm": 25.909208297729492,
+ "learning_rate": 3.5848595848595846e-05,
+ "loss": 0.5081,
+ "step": 1263
+ },
+ {
+ "epoch": 4.63003663003663,
+ "grad_norm": 46.62733840942383,
+ "learning_rate": 3.5824175824175824e-05,
+ "loss": 0.8265,
+ "step": 1264
+ },
+ {
+ "epoch": 4.633699633699633,
+ "grad_norm": 5.689383506774902,
+ "learning_rate": 3.57997557997558e-05,
+ "loss": 0.055,
+ "step": 1265
+ },
+ {
+ "epoch": 4.637362637362637,
+ "grad_norm": 23.30045509338379,
+ "learning_rate": 3.5775335775335774e-05,
+ "loss": 0.3397,
+ "step": 1266
+ },
+ {
+ "epoch": 4.641025641025641,
+ "grad_norm": 15.685534477233887,
+ "learning_rate": 3.575091575091575e-05,
+ "loss": 0.0862,
+ "step": 1267
+ },
+ {
+ "epoch": 4.644688644688645,
+ "grad_norm": 27.56009864807129,
+ "learning_rate": 3.572649572649573e-05,
+ "loss": 0.4751,
+ "step": 1268
+ },
+ {
+ "epoch": 4.648351648351649,
+ "grad_norm": 18.164905548095703,
+ "learning_rate": 3.57020757020757e-05,
+ "loss": 0.1274,
+ "step": 1269
+ },
+ {
+ "epoch": 4.652014652014652,
+ "grad_norm": 18.178728103637695,
+ "learning_rate": 3.567765567765568e-05,
+ "loss": 0.1246,
+ "step": 1270
+ },
+ {
+ "epoch": 4.655677655677656,
+ "grad_norm": 11.308391571044922,
+ "learning_rate": 3.565323565323565e-05,
+ "loss": 0.0937,
+ "step": 1271
+ },
+ {
+ "epoch": 4.65934065934066,
+ "grad_norm": 38.507469177246094,
+ "learning_rate": 3.5628815628815625e-05,
+ "loss": 0.4616,
+ "step": 1272
+ },
+ {
+ "epoch": 4.663003663003663,
+ "grad_norm": 9.642159461975098,
+ "learning_rate": 3.56043956043956e-05,
+ "loss": 0.0772,
+ "step": 1273
+ },
+ {
+ "epoch": 4.666666666666667,
+ "grad_norm": 31.854310989379883,
+ "learning_rate": 3.557997557997558e-05,
+ "loss": 0.2349,
+ "step": 1274
+ },
+ {
+ "epoch": 4.670329670329671,
+ "grad_norm": 53.341617584228516,
+ "learning_rate": 3.555555555555555e-05,
+ "loss": 0.2926,
+ "step": 1275
+ },
+ {
+ "epoch": 4.673992673992674,
+ "grad_norm": 24.003368377685547,
+ "learning_rate": 3.553113553113553e-05,
+ "loss": 0.1689,
+ "step": 1276
+ },
+ {
+ "epoch": 4.677655677655678,
+ "grad_norm": 12.198409080505371,
+ "learning_rate": 3.550671550671551e-05,
+ "loss": 0.1001,
+ "step": 1277
+ },
+ {
+ "epoch": 4.681318681318682,
+ "grad_norm": 56.559051513671875,
+ "learning_rate": 3.548229548229548e-05,
+ "loss": 0.5314,
+ "step": 1278
+ },
+ {
+ "epoch": 4.684981684981685,
+ "grad_norm": 17.89840316772461,
+ "learning_rate": 3.545787545787546e-05,
+ "loss": 0.1258,
+ "step": 1279
+ },
+ {
+ "epoch": 4.688644688644689,
+ "grad_norm": 14.37424087524414,
+ "learning_rate": 3.543345543345544e-05,
+ "loss": 0.0925,
+ "step": 1280
+ },
+ {
+ "epoch": 4.6923076923076925,
+ "grad_norm": 21.21650505065918,
+ "learning_rate": 3.540903540903541e-05,
+ "loss": 0.1541,
+ "step": 1281
+ },
+ {
+ "epoch": 4.695970695970696,
+ "grad_norm": 36.1934814453125,
+ "learning_rate": 3.538461538461539e-05,
+ "loss": 0.403,
+ "step": 1282
+ },
+ {
+ "epoch": 4.6996336996337,
+ "grad_norm": 62.917022705078125,
+ "learning_rate": 3.536019536019537e-05,
+ "loss": 1.2771,
+ "step": 1283
+ },
+ {
+ "epoch": 4.7032967032967035,
+ "grad_norm": 30.238500595092773,
+ "learning_rate": 3.533577533577533e-05,
+ "loss": 0.3149,
+ "step": 1284
+ },
+ {
+ "epoch": 4.706959706959707,
+ "grad_norm": 12.155022621154785,
+ "learning_rate": 3.531135531135531e-05,
+ "loss": 0.0543,
+ "step": 1285
+ },
+ {
+ "epoch": 4.710622710622711,
+ "grad_norm": 39.67718505859375,
+ "learning_rate": 3.528693528693528e-05,
+ "loss": 0.4201,
+ "step": 1286
+ },
+ {
+ "epoch": 4.714285714285714,
+ "grad_norm": 46.620235443115234,
+ "learning_rate": 3.526251526251526e-05,
+ "loss": 0.7735,
+ "step": 1287
+ },
+ {
+ "epoch": 4.717948717948718,
+ "grad_norm": 29.740169525146484,
+ "learning_rate": 3.523809523809524e-05,
+ "loss": 0.4753,
+ "step": 1288
+ },
+ {
+ "epoch": 4.721611721611722,
+ "grad_norm": 17.668439865112305,
+ "learning_rate": 3.521367521367521e-05,
+ "loss": 0.0738,
+ "step": 1289
+ },
+ {
+ "epoch": 4.725274725274725,
+ "grad_norm": 29.107847213745117,
+ "learning_rate": 3.518925518925519e-05,
+ "loss": 0.2967,
+ "step": 1290
+ },
+ {
+ "epoch": 4.728937728937729,
+ "grad_norm": 41.70953369140625,
+ "learning_rate": 3.516483516483517e-05,
+ "loss": 0.2407,
+ "step": 1291
+ },
+ {
+ "epoch": 4.732600732600733,
+ "grad_norm": 41.50172805786133,
+ "learning_rate": 3.514041514041514e-05,
+ "loss": 0.5012,
+ "step": 1292
+ },
+ {
+ "epoch": 4.736263736263736,
+ "grad_norm": 10.921927452087402,
+ "learning_rate": 3.511599511599512e-05,
+ "loss": 0.0583,
+ "step": 1293
+ },
+ {
+ "epoch": 4.73992673992674,
+ "grad_norm": 10.986832618713379,
+ "learning_rate": 3.5091575091575096e-05,
+ "loss": 0.1684,
+ "step": 1294
+ },
+ {
+ "epoch": 4.743589743589744,
+ "grad_norm": 77.36996459960938,
+ "learning_rate": 3.506715506715507e-05,
+ "loss": 0.1532,
+ "step": 1295
+ },
+ {
+ "epoch": 4.747252747252747,
+ "grad_norm": 2.912205457687378,
+ "learning_rate": 3.5042735042735046e-05,
+ "loss": 0.0178,
+ "step": 1296
+ },
+ {
+ "epoch": 4.750915750915751,
+ "grad_norm": 7.694264888763428,
+ "learning_rate": 3.501831501831502e-05,
+ "loss": 0.0448,
+ "step": 1297
+ },
+ {
+ "epoch": 4.754578754578755,
+ "grad_norm": 59.40597152709961,
+ "learning_rate": 3.499389499389499e-05,
+ "loss": 0.825,
+ "step": 1298
+ },
+ {
+ "epoch": 4.758241758241758,
+ "grad_norm": 44.394065856933594,
+ "learning_rate": 3.496947496947497e-05,
+ "loss": 0.2582,
+ "step": 1299
+ },
+ {
+ "epoch": 4.761904761904762,
+ "grad_norm": 48.07161331176758,
+ "learning_rate": 3.494505494505495e-05,
+ "loss": 0.5681,
+ "step": 1300
+ },
+ {
+ "epoch": 4.7655677655677655,
+ "grad_norm": 47.763275146484375,
+ "learning_rate": 3.492063492063492e-05,
+ "loss": 0.2289,
+ "step": 1301
+ },
+ {
+ "epoch": 4.769230769230769,
+ "grad_norm": 33.30193328857422,
+ "learning_rate": 3.48962148962149e-05,
+ "loss": 0.2646,
+ "step": 1302
+ },
+ {
+ "epoch": 4.772893772893773,
+ "grad_norm": 62.87331008911133,
+ "learning_rate": 3.4871794871794875e-05,
+ "loss": 0.5135,
+ "step": 1303
+ },
+ {
+ "epoch": 4.7765567765567765,
+ "grad_norm": 57.62127685546875,
+ "learning_rate": 3.484737484737485e-05,
+ "loss": 0.6126,
+ "step": 1304
+ },
+ {
+ "epoch": 4.78021978021978,
+ "grad_norm": 35.42237854003906,
+ "learning_rate": 3.4822954822954825e-05,
+ "loss": 0.2312,
+ "step": 1305
+ },
+ {
+ "epoch": 4.783882783882784,
+ "grad_norm": 38.23964309692383,
+ "learning_rate": 3.4798534798534804e-05,
+ "loss": 0.4366,
+ "step": 1306
+ },
+ {
+ "epoch": 4.787545787545787,
+ "grad_norm": 24.94087028503418,
+ "learning_rate": 3.4774114774114776e-05,
+ "loss": 0.2944,
+ "step": 1307
+ },
+ {
+ "epoch": 4.791208791208791,
+ "grad_norm": 43.400047302246094,
+ "learning_rate": 3.4749694749694754e-05,
+ "loss": 0.4749,
+ "step": 1308
+ },
+ {
+ "epoch": 4.794871794871795,
+ "grad_norm": 82.01946258544922,
+ "learning_rate": 3.472527472527473e-05,
+ "loss": 0.6972,
+ "step": 1309
+ },
+ {
+ "epoch": 4.798534798534798,
+ "grad_norm": 25.38723373413086,
+ "learning_rate": 3.47008547008547e-05,
+ "loss": 0.3361,
+ "step": 1310
+ },
+ {
+ "epoch": 4.802197802197802,
+ "grad_norm": 13.022088050842285,
+ "learning_rate": 3.4676434676434676e-05,
+ "loss": 0.1853,
+ "step": 1311
+ },
+ {
+ "epoch": 4.805860805860806,
+ "grad_norm": 30.806135177612305,
+ "learning_rate": 3.465201465201465e-05,
+ "loss": 0.3196,
+ "step": 1312
+ },
+ {
+ "epoch": 4.809523809523809,
+ "grad_norm": 26.30035972595215,
+ "learning_rate": 3.4627594627594626e-05,
+ "loss": 0.2708,
+ "step": 1313
+ },
+ {
+ "epoch": 4.813186813186813,
+ "grad_norm": 6.557223796844482,
+ "learning_rate": 3.4603174603174604e-05,
+ "loss": 0.0815,
+ "step": 1314
+ },
+ {
+ "epoch": 4.816849816849817,
+ "grad_norm": 33.60557174682617,
+ "learning_rate": 3.4578754578754576e-05,
+ "loss": 0.9938,
+ "step": 1315
+ },
+ {
+ "epoch": 4.82051282051282,
+ "grad_norm": 104.2552719116211,
+ "learning_rate": 3.4554334554334555e-05,
+ "loss": 0.1937,
+ "step": 1316
+ },
+ {
+ "epoch": 4.824175824175824,
+ "grad_norm": 41.3105583190918,
+ "learning_rate": 3.452991452991453e-05,
+ "loss": 0.3856,
+ "step": 1317
+ },
+ {
+ "epoch": 4.827838827838828,
+ "grad_norm": 43.52134323120117,
+ "learning_rate": 3.4505494505494505e-05,
+ "loss": 0.4823,
+ "step": 1318
+ },
+ {
+ "epoch": 4.831501831501831,
+ "grad_norm": 29.37596893310547,
+ "learning_rate": 3.448107448107448e-05,
+ "loss": 0.1746,
+ "step": 1319
+ },
+ {
+ "epoch": 4.835164835164835,
+ "grad_norm": 13.94152545928955,
+ "learning_rate": 3.445665445665446e-05,
+ "loss": 0.141,
+ "step": 1320
+ },
+ {
+ "epoch": 4.8388278388278385,
+ "grad_norm": 34.95270538330078,
+ "learning_rate": 3.443223443223443e-05,
+ "loss": 0.2701,
+ "step": 1321
+ },
+ {
+ "epoch": 4.842490842490842,
+ "grad_norm": 64.49109649658203,
+ "learning_rate": 3.440781440781441e-05,
+ "loss": 1.095,
+ "step": 1322
+ },
+ {
+ "epoch": 4.846153846153846,
+ "grad_norm": 61.1287727355957,
+ "learning_rate": 3.4383394383394383e-05,
+ "loss": 0.2083,
+ "step": 1323
+ },
+ {
+ "epoch": 4.8498168498168495,
+ "grad_norm": 62.69855499267578,
+ "learning_rate": 3.4358974358974355e-05,
+ "loss": 0.5077,
+ "step": 1324
+ },
+ {
+ "epoch": 4.853479853479853,
+ "grad_norm": 92.53154754638672,
+ "learning_rate": 3.4334554334554334e-05,
+ "loss": 0.7287,
+ "step": 1325
+ },
+ {
+ "epoch": 4.857142857142857,
+ "grad_norm": 98.1663589477539,
+ "learning_rate": 3.431013431013431e-05,
+ "loss": 1.2834,
+ "step": 1326
+ },
+ {
+ "epoch": 4.860805860805861,
+ "grad_norm": 52.24921417236328,
+ "learning_rate": 3.4285714285714284e-05,
+ "loss": 0.8187,
+ "step": 1327
+ },
+ {
+ "epoch": 4.864468864468865,
+ "grad_norm": 60.897544860839844,
+ "learning_rate": 3.426129426129426e-05,
+ "loss": 1.5861,
+ "step": 1328
+ },
+ {
+ "epoch": 4.868131868131869,
+ "grad_norm": 21.70830535888672,
+ "learning_rate": 3.423687423687424e-05,
+ "loss": 0.1459,
+ "step": 1329
+ },
+ {
+ "epoch": 4.871794871794872,
+ "grad_norm": 47.87598419189453,
+ "learning_rate": 3.421245421245421e-05,
+ "loss": 1.0044,
+ "step": 1330
+ },
+ {
+ "epoch": 4.875457875457876,
+ "grad_norm": 172.73670959472656,
+ "learning_rate": 3.418803418803419e-05,
+ "loss": 1.4617,
+ "step": 1331
+ },
+ {
+ "epoch": 4.8791208791208796,
+ "grad_norm": 154.93960571289062,
+ "learning_rate": 3.416361416361417e-05,
+ "loss": 1.7488,
+ "step": 1332
+ },
+ {
+ "epoch": 4.882783882783883,
+ "grad_norm": 73.78408813476562,
+ "learning_rate": 3.413919413919414e-05,
+ "loss": 0.5789,
+ "step": 1333
+ },
+ {
+ "epoch": 4.886446886446887,
+ "grad_norm": 35.67369079589844,
+ "learning_rate": 3.411477411477412e-05,
+ "loss": 0.6101,
+ "step": 1334
+ },
+ {
+ "epoch": 4.8901098901098905,
+ "grad_norm": 54.61326599121094,
+ "learning_rate": 3.40903540903541e-05,
+ "loss": 0.7433,
+ "step": 1335
+ },
+ {
+ "epoch": 4.893772893772894,
+ "grad_norm": 28.492923736572266,
+ "learning_rate": 3.406593406593406e-05,
+ "loss": 0.7661,
+ "step": 1336
+ },
+ {
+ "epoch": 4.897435897435898,
+ "grad_norm": 17.2525634765625,
+ "learning_rate": 3.404151404151404e-05,
+ "loss": 0.2423,
+ "step": 1337
+ },
+ {
+ "epoch": 4.9010989010989015,
+ "grad_norm": 55.46605682373047,
+ "learning_rate": 3.401709401709401e-05,
+ "loss": 0.4419,
+ "step": 1338
+ },
+ {
+ "epoch": 4.904761904761905,
+ "grad_norm": 23.03455352783203,
+ "learning_rate": 3.399267399267399e-05,
+ "loss": 0.3046,
+ "step": 1339
+ },
+ {
+ "epoch": 4.908424908424909,
+ "grad_norm": 20.186574935913086,
+ "learning_rate": 3.396825396825397e-05,
+ "loss": 0.3712,
+ "step": 1340
+ },
+ {
+ "epoch": 4.912087912087912,
+ "grad_norm": 22.702407836914062,
+ "learning_rate": 3.394383394383394e-05,
+ "loss": 0.4481,
+ "step": 1341
+ },
+ {
+ "epoch": 4.915750915750916,
+ "grad_norm": 25.723426818847656,
+ "learning_rate": 3.391941391941392e-05,
+ "loss": 0.1832,
+ "step": 1342
+ },
+ {
+ "epoch": 4.91941391941392,
+ "grad_norm": 18.955692291259766,
+ "learning_rate": 3.38949938949939e-05,
+ "loss": 0.1334,
+ "step": 1343
+ },
+ {
+ "epoch": 4.923076923076923,
+ "grad_norm": 20.29511833190918,
+ "learning_rate": 3.387057387057387e-05,
+ "loss": 0.1811,
+ "step": 1344
+ },
+ {
+ "epoch": 4.926739926739927,
+ "grad_norm": 22.23061752319336,
+ "learning_rate": 3.384615384615385e-05,
+ "loss": 0.2643,
+ "step": 1345
+ },
+ {
+ "epoch": 4.930402930402931,
+ "grad_norm": 52.057132720947266,
+ "learning_rate": 3.382173382173383e-05,
+ "loss": 0.5874,
+ "step": 1346
+ },
+ {
+ "epoch": 4.934065934065934,
+ "grad_norm": 66.5381851196289,
+ "learning_rate": 3.37973137973138e-05,
+ "loss": 0.4993,
+ "step": 1347
+ },
+ {
+ "epoch": 4.937728937728938,
+ "grad_norm": 8.25474739074707,
+ "learning_rate": 3.377289377289378e-05,
+ "loss": 0.0263,
+ "step": 1348
+ },
+ {
+ "epoch": 4.941391941391942,
+ "grad_norm": 31.373722076416016,
+ "learning_rate": 3.374847374847375e-05,
+ "loss": 0.288,
+ "step": 1349
+ },
+ {
+ "epoch": 4.945054945054945,
+ "grad_norm": 51.15471267700195,
+ "learning_rate": 3.372405372405372e-05,
+ "loss": 0.7586,
+ "step": 1350
+ },
+ {
+ "epoch": 4.948717948717949,
+ "grad_norm": 39.163639068603516,
+ "learning_rate": 3.36996336996337e-05,
+ "loss": 1.221,
+ "step": 1351
+ },
+ {
+ "epoch": 4.9523809523809526,
+ "grad_norm": 11.033390998840332,
+ "learning_rate": 3.367521367521368e-05,
+ "loss": 0.069,
+ "step": 1352
+ },
+ {
+ "epoch": 4.956043956043956,
+ "grad_norm": 24.14516830444336,
+ "learning_rate": 3.365079365079365e-05,
+ "loss": 0.6001,
+ "step": 1353
+ },
+ {
+ "epoch": 4.95970695970696,
+ "grad_norm": 36.211891174316406,
+ "learning_rate": 3.362637362637363e-05,
+ "loss": 0.5598,
+ "step": 1354
+ },
+ {
+ "epoch": 4.9633699633699635,
+ "grad_norm": 23.723434448242188,
+ "learning_rate": 3.3601953601953606e-05,
+ "loss": 0.3133,
+ "step": 1355
+ },
+ {
+ "epoch": 4.967032967032967,
+ "grad_norm": 21.853551864624023,
+ "learning_rate": 3.357753357753358e-05,
+ "loss": 0.1974,
+ "step": 1356
+ },
+ {
+ "epoch": 4.970695970695971,
+ "grad_norm": 25.392358779907227,
+ "learning_rate": 3.3553113553113556e-05,
+ "loss": 0.5114,
+ "step": 1357
+ },
+ {
+ "epoch": 4.9743589743589745,
+ "grad_norm": 94.81107330322266,
+ "learning_rate": 3.3528693528693534e-05,
+ "loss": 0.4609,
+ "step": 1358
+ },
+ {
+ "epoch": 4.978021978021978,
+ "grad_norm": 24.487186431884766,
+ "learning_rate": 3.3504273504273506e-05,
+ "loss": 0.6613,
+ "step": 1359
+ },
+ {
+ "epoch": 4.981684981684982,
+ "grad_norm": 18.870473861694336,
+ "learning_rate": 3.3479853479853485e-05,
+ "loss": 0.1229,
+ "step": 1360
+ },
+ {
+ "epoch": 4.985347985347985,
+ "grad_norm": 17.630233764648438,
+ "learning_rate": 3.3455433455433456e-05,
+ "loss": 0.1836,
+ "step": 1361
+ },
+ {
+ "epoch": 4.989010989010989,
+ "grad_norm": 24.850299835205078,
+ "learning_rate": 3.343101343101343e-05,
+ "loss": 0.4499,
+ "step": 1362
+ },
+ {
+ "epoch": 4.992673992673993,
+ "grad_norm": 13.472710609436035,
+ "learning_rate": 3.3406593406593406e-05,
+ "loss": 0.2,
+ "step": 1363
+ },
+ {
+ "epoch": 4.996336996336996,
+ "grad_norm": 25.112987518310547,
+ "learning_rate": 3.338217338217338e-05,
+ "loss": 0.2978,
+ "step": 1364
+ },
+ {
+ "epoch": 5.0,
+ "grad_norm": 20.6419620513916,
+ "learning_rate": 3.3357753357753356e-05,
+ "loss": 0.1711,
+ "step": 1365
+ },
+ {
+ "epoch": 5.003663003663004,
+ "grad_norm": 20.868810653686523,
+ "learning_rate": 3.3333333333333335e-05,
+ "loss": 0.1433,
+ "step": 1366
+ },
+ {
+ "epoch": 5.007326007326007,
+ "grad_norm": 15.846084594726562,
+ "learning_rate": 3.3308913308913307e-05,
+ "loss": 0.2174,
+ "step": 1367
+ },
+ {
+ "epoch": 5.010989010989011,
+ "grad_norm": 29.00075912475586,
+ "learning_rate": 3.3284493284493285e-05,
+ "loss": 0.5032,
+ "step": 1368
+ },
+ {
+ "epoch": 5.014652014652015,
+ "grad_norm": 33.520896911621094,
+ "learning_rate": 3.3260073260073264e-05,
+ "loss": 0.4061,
+ "step": 1369
+ },
+ {
+ "epoch": 5.018315018315018,
+ "grad_norm": 12.909339904785156,
+ "learning_rate": 3.3235653235653235e-05,
+ "loss": 0.0953,
+ "step": 1370
+ },
+ {
+ "epoch": 5.021978021978022,
+ "grad_norm": 0.2602078318595886,
+ "learning_rate": 3.3211233211233214e-05,
+ "loss": 0.0012,
+ "step": 1371
+ },
+ {
+ "epoch": 5.0256410256410255,
+ "grad_norm": 38.391422271728516,
+ "learning_rate": 3.318681318681319e-05,
+ "loss": 0.1825,
+ "step": 1372
+ },
+ {
+ "epoch": 5.029304029304029,
+ "grad_norm": 70.76541900634766,
+ "learning_rate": 3.3162393162393164e-05,
+ "loss": 0.846,
+ "step": 1373
+ },
+ {
+ "epoch": 5.032967032967033,
+ "grad_norm": 17.12116813659668,
+ "learning_rate": 3.3137973137973135e-05,
+ "loss": 0.0827,
+ "step": 1374
+ },
+ {
+ "epoch": 5.0366300366300365,
+ "grad_norm": 10.847224235534668,
+ "learning_rate": 3.3113553113553114e-05,
+ "loss": 0.0598,
+ "step": 1375
+ },
+ {
+ "epoch": 5.04029304029304,
+ "grad_norm": 31.552082061767578,
+ "learning_rate": 3.3089133089133086e-05,
+ "loss": 0.4466,
+ "step": 1376
+ },
+ {
+ "epoch": 5.043956043956044,
+ "grad_norm": 15.32805061340332,
+ "learning_rate": 3.3064713064713064e-05,
+ "loss": 0.0502,
+ "step": 1377
+ },
+ {
+ "epoch": 5.0476190476190474,
+ "grad_norm": 80.18537139892578,
+ "learning_rate": 3.304029304029304e-05,
+ "loss": 0.7377,
+ "step": 1378
+ },
+ {
+ "epoch": 5.051282051282051,
+ "grad_norm": 11.73173713684082,
+ "learning_rate": 3.3015873015873014e-05,
+ "loss": 0.1129,
+ "step": 1379
+ },
+ {
+ "epoch": 5.054945054945055,
+ "grad_norm": 46.249935150146484,
+ "learning_rate": 3.299145299145299e-05,
+ "loss": 0.5367,
+ "step": 1380
+ },
+ {
+ "epoch": 5.058608058608058,
+ "grad_norm": 9.185178756713867,
+ "learning_rate": 3.296703296703297e-05,
+ "loss": 0.0453,
+ "step": 1381
+ },
+ {
+ "epoch": 5.062271062271062,
+ "grad_norm": 40.83237838745117,
+ "learning_rate": 3.294261294261294e-05,
+ "loss": 0.1428,
+ "step": 1382
+ },
+ {
+ "epoch": 5.065934065934066,
+ "grad_norm": 32.31568908691406,
+ "learning_rate": 3.291819291819292e-05,
+ "loss": 0.3131,
+ "step": 1383
+ },
+ {
+ "epoch": 5.069597069597069,
+ "grad_norm": 5.372808456420898,
+ "learning_rate": 3.28937728937729e-05,
+ "loss": 0.0452,
+ "step": 1384
+ },
+ {
+ "epoch": 5.073260073260073,
+ "grad_norm": 3.0900495052337646,
+ "learning_rate": 3.286935286935287e-05,
+ "loss": 0.0175,
+ "step": 1385
+ },
+ {
+ "epoch": 5.076923076923077,
+ "grad_norm": 25.293724060058594,
+ "learning_rate": 3.284493284493285e-05,
+ "loss": 0.2162,
+ "step": 1386
+ },
+ {
+ "epoch": 5.08058608058608,
+ "grad_norm": 26.231664657592773,
+ "learning_rate": 3.282051282051282e-05,
+ "loss": 0.1764,
+ "step": 1387
+ },
+ {
+ "epoch": 5.084249084249084,
+ "grad_norm": 24.69008445739746,
+ "learning_rate": 3.279609279609279e-05,
+ "loss": 0.1019,
+ "step": 1388
+ },
+ {
+ "epoch": 5.087912087912088,
+ "grad_norm": 12.522343635559082,
+ "learning_rate": 3.277167277167277e-05,
+ "loss": 0.0424,
+ "step": 1389
+ },
+ {
+ "epoch": 5.091575091575091,
+ "grad_norm": 28.68439292907715,
+ "learning_rate": 3.274725274725274e-05,
+ "loss": 0.3441,
+ "step": 1390
+ },
+ {
+ "epoch": 5.095238095238095,
+ "grad_norm": 9.312751770019531,
+ "learning_rate": 3.272283272283272e-05,
+ "loss": 0.0675,
+ "step": 1391
+ },
+ {
+ "epoch": 5.0989010989010985,
+ "grad_norm": 12.041552543640137,
+ "learning_rate": 3.26984126984127e-05,
+ "loss": 0.049,
+ "step": 1392
+ },
+ {
+ "epoch": 5.102564102564102,
+ "grad_norm": 36.706031799316406,
+ "learning_rate": 3.267399267399267e-05,
+ "loss": 0.2947,
+ "step": 1393
+ },
+ {
+ "epoch": 5.106227106227106,
+ "grad_norm": 0.5009213089942932,
+ "learning_rate": 3.264957264957265e-05,
+ "loss": 0.0028,
+ "step": 1394
+ },
+ {
+ "epoch": 5.1098901098901095,
+ "grad_norm": 53.88454818725586,
+ "learning_rate": 3.262515262515263e-05,
+ "loss": 0.5004,
+ "step": 1395
+ },
+ {
+ "epoch": 5.113553113553113,
+ "grad_norm": 11.917198181152344,
+ "learning_rate": 3.26007326007326e-05,
+ "loss": 0.0734,
+ "step": 1396
+ },
+ {
+ "epoch": 5.117216117216117,
+ "grad_norm": 58.02888107299805,
+ "learning_rate": 3.257631257631258e-05,
+ "loss": 0.7099,
+ "step": 1397
+ },
+ {
+ "epoch": 5.1208791208791204,
+ "grad_norm": 18.3216609954834,
+ "learning_rate": 3.255189255189256e-05,
+ "loss": 0.1162,
+ "step": 1398
+ },
+ {
+ "epoch": 5.124542124542124,
+ "grad_norm": 7.598775863647461,
+ "learning_rate": 3.252747252747253e-05,
+ "loss": 0.0341,
+ "step": 1399
+ },
+ {
+ "epoch": 5.128205128205128,
+ "grad_norm": 199.40313720703125,
+ "learning_rate": 3.25030525030525e-05,
+ "loss": 0.3829,
+ "step": 1400
+ },
+ {
+ "epoch": 5.131868131868132,
+ "grad_norm": 6.528984546661377,
+ "learning_rate": 3.247863247863248e-05,
+ "loss": 0.041,
+ "step": 1401
+ },
+ {
+ "epoch": 5.135531135531136,
+ "grad_norm": 28.80277442932129,
+ "learning_rate": 3.245421245421245e-05,
+ "loss": 0.3511,
+ "step": 1402
+ },
+ {
+ "epoch": 5.13919413919414,
+ "grad_norm": 5.08656120300293,
+ "learning_rate": 3.242979242979243e-05,
+ "loss": 0.0403,
+ "step": 1403
+ },
+ {
+ "epoch": 5.142857142857143,
+ "grad_norm": 16.86358070373535,
+ "learning_rate": 3.240537240537241e-05,
+ "loss": 0.1676,
+ "step": 1404
+ },
+ {
+ "epoch": 5.146520146520147,
+ "grad_norm": 46.099613189697266,
+ "learning_rate": 3.238095238095238e-05,
+ "loss": 0.8096,
+ "step": 1405
+ },
+ {
+ "epoch": 5.1501831501831505,
+ "grad_norm": 26.01686668395996,
+ "learning_rate": 3.235653235653236e-05,
+ "loss": 0.1283,
+ "step": 1406
+ },
+ {
+ "epoch": 5.153846153846154,
+ "grad_norm": 4.826385498046875,
+ "learning_rate": 3.2332112332112336e-05,
+ "loss": 0.0328,
+ "step": 1407
+ },
+ {
+ "epoch": 5.157509157509158,
+ "grad_norm": 34.697593688964844,
+ "learning_rate": 3.230769230769231e-05,
+ "loss": 0.1306,
+ "step": 1408
+ },
+ {
+ "epoch": 5.1611721611721615,
+ "grad_norm": 21.331661224365234,
+ "learning_rate": 3.2283272283272286e-05,
+ "loss": 0.1302,
+ "step": 1409
+ },
+ {
+ "epoch": 5.164835164835165,
+ "grad_norm": 9.991851806640625,
+ "learning_rate": 3.2258852258852265e-05,
+ "loss": 0.0441,
+ "step": 1410
+ },
+ {
+ "epoch": 5.168498168498169,
+ "grad_norm": 26.641136169433594,
+ "learning_rate": 3.2234432234432237e-05,
+ "loss": 0.0894,
+ "step": 1411
+ },
+ {
+ "epoch": 5.172161172161172,
+ "grad_norm": 24.541366577148438,
+ "learning_rate": 3.2210012210012215e-05,
+ "loss": 0.1026,
+ "step": 1412
+ },
+ {
+ "epoch": 5.175824175824176,
+ "grad_norm": 44.62923049926758,
+ "learning_rate": 3.218559218559218e-05,
+ "loss": 0.1887,
+ "step": 1413
+ },
+ {
+ "epoch": 5.17948717948718,
+ "grad_norm": 19.28236198425293,
+ "learning_rate": 3.216117216117216e-05,
+ "loss": 0.0631,
+ "step": 1414
+ },
+ {
+ "epoch": 5.183150183150183,
+ "grad_norm": 10.39486026763916,
+ "learning_rate": 3.213675213675214e-05,
+ "loss": 0.0614,
+ "step": 1415
+ },
+ {
+ "epoch": 5.186813186813187,
+ "grad_norm": 32.476009368896484,
+ "learning_rate": 3.211233211233211e-05,
+ "loss": 0.2238,
+ "step": 1416
+ },
+ {
+ "epoch": 5.190476190476191,
+ "grad_norm": 9.828605651855469,
+ "learning_rate": 3.208791208791209e-05,
+ "loss": 0.0589,
+ "step": 1417
+ },
+ {
+ "epoch": 5.194139194139194,
+ "grad_norm": 50.0748291015625,
+ "learning_rate": 3.2063492063492065e-05,
+ "loss": 0.8225,
+ "step": 1418
+ },
+ {
+ "epoch": 5.197802197802198,
+ "grad_norm": 31.925779342651367,
+ "learning_rate": 3.203907203907204e-05,
+ "loss": 0.1824,
+ "step": 1419
+ },
+ {
+ "epoch": 5.201465201465202,
+ "grad_norm": 108.24534606933594,
+ "learning_rate": 3.2014652014652016e-05,
+ "loss": 2.3808,
+ "step": 1420
+ },
+ {
+ "epoch": 5.205128205128205,
+ "grad_norm": 54.39910888671875,
+ "learning_rate": 3.1990231990231994e-05,
+ "loss": 0.614,
+ "step": 1421
+ },
+ {
+ "epoch": 5.208791208791209,
+ "grad_norm": 13.70672607421875,
+ "learning_rate": 3.1965811965811966e-05,
+ "loss": 0.0366,
+ "step": 1422
+ },
+ {
+ "epoch": 5.212454212454213,
+ "grad_norm": 19.851043701171875,
+ "learning_rate": 3.1941391941391944e-05,
+ "loss": 0.1847,
+ "step": 1423
+ },
+ {
+ "epoch": 5.216117216117216,
+ "grad_norm": 1.041467547416687,
+ "learning_rate": 3.191697191697192e-05,
+ "loss": 0.0062,
+ "step": 1424
+ },
+ {
+ "epoch": 5.21978021978022,
+ "grad_norm": 10.629105567932129,
+ "learning_rate": 3.1892551892551894e-05,
+ "loss": 0.1058,
+ "step": 1425
+ },
+ {
+ "epoch": 5.2234432234432235,
+ "grad_norm": 25.597496032714844,
+ "learning_rate": 3.1868131868131866e-05,
+ "loss": 0.1786,
+ "step": 1426
+ },
+ {
+ "epoch": 5.227106227106227,
+ "grad_norm": 21.409902572631836,
+ "learning_rate": 3.1843711843711844e-05,
+ "loss": 0.1354,
+ "step": 1427
+ },
+ {
+ "epoch": 5.230769230769231,
+ "grad_norm": 252.64599609375,
+ "learning_rate": 3.1819291819291816e-05,
+ "loss": 0.476,
+ "step": 1428
+ },
+ {
+ "epoch": 5.2344322344322345,
+ "grad_norm": 22.15670394897461,
+ "learning_rate": 3.1794871794871795e-05,
+ "loss": 0.2111,
+ "step": 1429
+ },
+ {
+ "epoch": 5.238095238095238,
+ "grad_norm": 37.93739700317383,
+ "learning_rate": 3.177045177045177e-05,
+ "loss": 0.391,
+ "step": 1430
+ },
+ {
+ "epoch": 5.241758241758242,
+ "grad_norm": 25.364606857299805,
+ "learning_rate": 3.1746031746031745e-05,
+ "loss": 0.3365,
+ "step": 1431
+ },
+ {
+ "epoch": 5.245421245421245,
+ "grad_norm": 20.658681869506836,
+ "learning_rate": 3.172161172161172e-05,
+ "loss": 0.2419,
+ "step": 1432
+ },
+ {
+ "epoch": 5.249084249084249,
+ "grad_norm": 11.507100105285645,
+ "learning_rate": 3.16971916971917e-05,
+ "loss": 0.074,
+ "step": 1433
+ },
+ {
+ "epoch": 5.252747252747253,
+ "grad_norm": 32.7891845703125,
+ "learning_rate": 3.167277167277167e-05,
+ "loss": 0.261,
+ "step": 1434
+ },
+ {
+ "epoch": 5.256410256410256,
+ "grad_norm": 10.153932571411133,
+ "learning_rate": 3.164835164835165e-05,
+ "loss": 0.0317,
+ "step": 1435
+ },
+ {
+ "epoch": 5.26007326007326,
+ "grad_norm": 16.879608154296875,
+ "learning_rate": 3.162393162393163e-05,
+ "loss": 0.0668,
+ "step": 1436
+ },
+ {
+ "epoch": 5.263736263736264,
+ "grad_norm": 5.040280818939209,
+ "learning_rate": 3.15995115995116e-05,
+ "loss": 0.0197,
+ "step": 1437
+ },
+ {
+ "epoch": 5.267399267399267,
+ "grad_norm": 32.5413818359375,
+ "learning_rate": 3.157509157509158e-05,
+ "loss": 0.2659,
+ "step": 1438
+ },
+ {
+ "epoch": 5.271062271062271,
+ "grad_norm": 54.41200637817383,
+ "learning_rate": 3.1550671550671545e-05,
+ "loss": 0.6863,
+ "step": 1439
+ },
+ {
+ "epoch": 5.274725274725275,
+ "grad_norm": 13.049643516540527,
+ "learning_rate": 3.1526251526251524e-05,
+ "loss": 0.0808,
+ "step": 1440
+ },
+ {
+ "epoch": 5.278388278388278,
+ "grad_norm": 37.76680374145508,
+ "learning_rate": 3.15018315018315e-05,
+ "loss": 0.2917,
+ "step": 1441
+ },
+ {
+ "epoch": 5.282051282051282,
+ "grad_norm": 22.97549057006836,
+ "learning_rate": 3.1477411477411474e-05,
+ "loss": 0.1115,
+ "step": 1442
+ },
+ {
+ "epoch": 5.285714285714286,
+ "grad_norm": 36.935115814208984,
+ "learning_rate": 3.145299145299145e-05,
+ "loss": 0.3719,
+ "step": 1443
+ },
+ {
+ "epoch": 5.289377289377289,
+ "grad_norm": 50.726070404052734,
+ "learning_rate": 3.142857142857143e-05,
+ "loss": 0.2635,
+ "step": 1444
+ },
+ {
+ "epoch": 5.293040293040293,
+ "grad_norm": 31.919862747192383,
+ "learning_rate": 3.14041514041514e-05,
+ "loss": 0.2158,
+ "step": 1445
+ },
+ {
+ "epoch": 5.2967032967032965,
+ "grad_norm": 2.463076114654541,
+ "learning_rate": 3.137973137973138e-05,
+ "loss": 0.0125,
+ "step": 1446
+ },
+ {
+ "epoch": 5.3003663003663,
+ "grad_norm": 12.970477104187012,
+ "learning_rate": 3.135531135531136e-05,
+ "loss": 0.0701,
+ "step": 1447
+ },
+ {
+ "epoch": 5.304029304029304,
+ "grad_norm": 30.649160385131836,
+ "learning_rate": 3.133089133089133e-05,
+ "loss": 0.3443,
+ "step": 1448
+ },
+ {
+ "epoch": 5.3076923076923075,
+ "grad_norm": 50.362281799316406,
+ "learning_rate": 3.130647130647131e-05,
+ "loss": 0.2792,
+ "step": 1449
+ },
+ {
+ "epoch": 5.311355311355311,
+ "grad_norm": 25.041845321655273,
+ "learning_rate": 3.128205128205129e-05,
+ "loss": 0.2127,
+ "step": 1450
+ },
+ {
+ "epoch": 5.315018315018315,
+ "grad_norm": 44.749515533447266,
+ "learning_rate": 3.125763125763126e-05,
+ "loss": 0.5353,
+ "step": 1451
+ },
+ {
+ "epoch": 5.318681318681318,
+ "grad_norm": 66.30032348632812,
+ "learning_rate": 3.123321123321123e-05,
+ "loss": 0.5775,
+ "step": 1452
+ },
+ {
+ "epoch": 5.322344322344322,
+ "grad_norm": 3.905022382736206,
+ "learning_rate": 3.120879120879121e-05,
+ "loss": 0.0229,
+ "step": 1453
+ },
+ {
+ "epoch": 5.326007326007326,
+ "grad_norm": 50.520259857177734,
+ "learning_rate": 3.118437118437118e-05,
+ "loss": 0.6539,
+ "step": 1454
+ },
+ {
+ "epoch": 5.329670329670329,
+ "grad_norm": 12.567275047302246,
+ "learning_rate": 3.115995115995116e-05,
+ "loss": 0.0493,
+ "step": 1455
+ },
+ {
+ "epoch": 5.333333333333333,
+ "grad_norm": 24.11554718017578,
+ "learning_rate": 3.113553113553114e-05,
+ "loss": 0.401,
+ "step": 1456
+ },
+ {
+ "epoch": 5.336996336996337,
+ "grad_norm": 6.885409832000732,
+ "learning_rate": 3.111111111111111e-05,
+ "loss": 0.022,
+ "step": 1457
+ },
+ {
+ "epoch": 5.34065934065934,
+ "grad_norm": 30.46776008605957,
+ "learning_rate": 3.108669108669109e-05,
+ "loss": 0.1968,
+ "step": 1458
+ },
+ {
+ "epoch": 5.344322344322344,
+ "grad_norm": 54.408790588378906,
+ "learning_rate": 3.106227106227107e-05,
+ "loss": 0.3258,
+ "step": 1459
+ },
+ {
+ "epoch": 5.347985347985348,
+ "grad_norm": 43.48060989379883,
+ "learning_rate": 3.103785103785104e-05,
+ "loss": 0.2663,
+ "step": 1460
+ },
+ {
+ "epoch": 5.351648351648351,
+ "grad_norm": 34.339962005615234,
+ "learning_rate": 3.101343101343102e-05,
+ "loss": 0.3313,
+ "step": 1461
+ },
+ {
+ "epoch": 5.355311355311355,
+ "grad_norm": 35.54948806762695,
+ "learning_rate": 3.0989010989010995e-05,
+ "loss": 0.2377,
+ "step": 1462
+ },
+ {
+ "epoch": 5.358974358974359,
+ "grad_norm": 30.956071853637695,
+ "learning_rate": 3.096459096459097e-05,
+ "loss": 0.2388,
+ "step": 1463
+ },
+ {
+ "epoch": 5.362637362637362,
+ "grad_norm": 15.458950996398926,
+ "learning_rate": 3.094017094017094e-05,
+ "loss": 0.1196,
+ "step": 1464
+ },
+ {
+ "epoch": 5.366300366300366,
+ "grad_norm": 56.893463134765625,
+ "learning_rate": 3.091575091575091e-05,
+ "loss": 0.5377,
+ "step": 1465
+ },
+ {
+ "epoch": 5.36996336996337,
+ "grad_norm": 31.90789794921875,
+ "learning_rate": 3.089133089133089e-05,
+ "loss": 0.5008,
+ "step": 1466
+ },
+ {
+ "epoch": 5.373626373626374,
+ "grad_norm": 18.772607803344727,
+ "learning_rate": 3.086691086691087e-05,
+ "loss": 0.1838,
+ "step": 1467
+ },
+ {
+ "epoch": 5.377289377289378,
+ "grad_norm": 1.7131195068359375,
+ "learning_rate": 3.084249084249084e-05,
+ "loss": 0.0055,
+ "step": 1468
+ },
+ {
+ "epoch": 5.380952380952381,
+ "grad_norm": 6.398471355438232,
+ "learning_rate": 3.081807081807082e-05,
+ "loss": 0.0309,
+ "step": 1469
+ },
+ {
+ "epoch": 5.384615384615385,
+ "grad_norm": 13.847221374511719,
+ "learning_rate": 3.0793650793650796e-05,
+ "loss": 0.0785,
+ "step": 1470
+ },
+ {
+ "epoch": 5.388278388278389,
+ "grad_norm": 46.000179290771484,
+ "learning_rate": 3.076923076923077e-05,
+ "loss": 0.4114,
+ "step": 1471
+ },
+ {
+ "epoch": 5.391941391941392,
+ "grad_norm": 39.47720718383789,
+ "learning_rate": 3.0744810744810746e-05,
+ "loss": 0.9189,
+ "step": 1472
+ },
+ {
+ "epoch": 5.395604395604396,
+ "grad_norm": 30.588356018066406,
+ "learning_rate": 3.0720390720390724e-05,
+ "loss": 0.372,
+ "step": 1473
+ },
+ {
+ "epoch": 5.3992673992674,
+ "grad_norm": 83.61669921875,
+ "learning_rate": 3.0695970695970696e-05,
+ "loss": 0.6729,
+ "step": 1474
+ },
+ {
+ "epoch": 5.402930402930403,
+ "grad_norm": 14.384758949279785,
+ "learning_rate": 3.0671550671550675e-05,
+ "loss": 0.0825,
+ "step": 1475
+ },
+ {
+ "epoch": 5.406593406593407,
+ "grad_norm": 41.9291877746582,
+ "learning_rate": 3.064713064713065e-05,
+ "loss": 0.2128,
+ "step": 1476
+ },
+ {
+ "epoch": 5.410256410256411,
+ "grad_norm": 31.03643035888672,
+ "learning_rate": 3.062271062271062e-05,
+ "loss": 0.6978,
+ "step": 1477
+ },
+ {
+ "epoch": 5.413919413919414,
+ "grad_norm": 43.225547790527344,
+ "learning_rate": 3.0598290598290596e-05,
+ "loss": 0.6546,
+ "step": 1478
+ },
+ {
+ "epoch": 5.417582417582418,
+ "grad_norm": 37.172611236572266,
+ "learning_rate": 3.0573870573870575e-05,
+ "loss": 0.5024,
+ "step": 1479
+ },
+ {
+ "epoch": 5.4212454212454215,
+ "grad_norm": 52.93882369995117,
+ "learning_rate": 3.0549450549450547e-05,
+ "loss": 0.9954,
+ "step": 1480
+ },
+ {
+ "epoch": 5.424908424908425,
+ "grad_norm": 30.838403701782227,
+ "learning_rate": 3.0525030525030525e-05,
+ "loss": 0.2539,
+ "step": 1481
+ },
+ {
+ "epoch": 5.428571428571429,
+ "grad_norm": 8.876139640808105,
+ "learning_rate": 3.0500610500610503e-05,
+ "loss": 0.0635,
+ "step": 1482
+ },
+ {
+ "epoch": 5.4322344322344325,
+ "grad_norm": 14.970293998718262,
+ "learning_rate": 3.0476190476190475e-05,
+ "loss": 0.1337,
+ "step": 1483
+ },
+ {
+ "epoch": 5.435897435897436,
+ "grad_norm": 29.44560432434082,
+ "learning_rate": 3.0451770451770454e-05,
+ "loss": 0.3719,
+ "step": 1484
+ },
+ {
+ "epoch": 5.43956043956044,
+ "grad_norm": 3.793294668197632,
+ "learning_rate": 3.0427350427350432e-05,
+ "loss": 0.0278,
+ "step": 1485
+ },
+ {
+ "epoch": 5.443223443223443,
+ "grad_norm": 37.418731689453125,
+ "learning_rate": 3.0402930402930404e-05,
+ "loss": 0.5153,
+ "step": 1486
+ },
+ {
+ "epoch": 5.446886446886447,
+ "grad_norm": 26.718324661254883,
+ "learning_rate": 3.037851037851038e-05,
+ "loss": 0.388,
+ "step": 1487
+ },
+ {
+ "epoch": 5.450549450549451,
+ "grad_norm": 28.463197708129883,
+ "learning_rate": 3.0354090354090357e-05,
+ "loss": 0.1956,
+ "step": 1488
+ },
+ {
+ "epoch": 5.454212454212454,
+ "grad_norm": 45.390602111816406,
+ "learning_rate": 3.032967032967033e-05,
+ "loss": 0.3694,
+ "step": 1489
+ },
+ {
+ "epoch": 5.457875457875458,
+ "grad_norm": 33.20753860473633,
+ "learning_rate": 3.0305250305250307e-05,
+ "loss": 0.2946,
+ "step": 1490
+ },
+ {
+ "epoch": 5.461538461538462,
+ "grad_norm": 66.42272186279297,
+ "learning_rate": 3.028083028083028e-05,
+ "loss": 0.9082,
+ "step": 1491
+ },
+ {
+ "epoch": 5.465201465201465,
+ "grad_norm": 33.85127258300781,
+ "learning_rate": 3.0256410256410257e-05,
+ "loss": 0.2362,
+ "step": 1492
+ },
+ {
+ "epoch": 5.468864468864469,
+ "grad_norm": 51.019256591796875,
+ "learning_rate": 3.0231990231990233e-05,
+ "loss": 0.5446,
+ "step": 1493
+ },
+ {
+ "epoch": 5.472527472527473,
+ "grad_norm": 30.998769760131836,
+ "learning_rate": 3.0207570207570204e-05,
+ "loss": 0.4739,
+ "step": 1494
+ },
+ {
+ "epoch": 5.476190476190476,
+ "grad_norm": 44.187957763671875,
+ "learning_rate": 3.0183150183150183e-05,
+ "loss": 0.3439,
+ "step": 1495
+ },
+ {
+ "epoch": 5.47985347985348,
+ "grad_norm": 50.70987319946289,
+ "learning_rate": 3.015873015873016e-05,
+ "loss": 0.1625,
+ "step": 1496
+ },
+ {
+ "epoch": 5.483516483516484,
+ "grad_norm": 33.66750717163086,
+ "learning_rate": 3.0134310134310133e-05,
+ "loss": 0.1927,
+ "step": 1497
+ },
+ {
+ "epoch": 5.487179487179487,
+ "grad_norm": 41.02281951904297,
+ "learning_rate": 3.010989010989011e-05,
+ "loss": 0.4102,
+ "step": 1498
+ },
+ {
+ "epoch": 5.490842490842491,
+ "grad_norm": 10.570262908935547,
+ "learning_rate": 3.008547008547009e-05,
+ "loss": 0.0664,
+ "step": 1499
+ },
+ {
+ "epoch": 5.4945054945054945,
+ "grad_norm": 54.08304214477539,
+ "learning_rate": 3.0061050061050058e-05,
+ "loss": 0.9224,
+ "step": 1500
+ },
+ {
+ "epoch": 5.498168498168498,
+ "grad_norm": 67.29845428466797,
+ "learning_rate": 3.0036630036630036e-05,
+ "loss": 0.8804,
+ "step": 1501
+ },
+ {
+ "epoch": 5.501831501831502,
+ "grad_norm": 13.707988739013672,
+ "learning_rate": 3.0012210012210015e-05,
+ "loss": 0.054,
+ "step": 1502
+ },
+ {
+ "epoch": 5.5054945054945055,
+ "grad_norm": 23.2605037689209,
+ "learning_rate": 2.998778998778999e-05,
+ "loss": 0.2343,
+ "step": 1503
+ },
+ {
+ "epoch": 5.509157509157509,
+ "grad_norm": 34.8508186340332,
+ "learning_rate": 2.9963369963369965e-05,
+ "loss": 0.4967,
+ "step": 1504
+ },
+ {
+ "epoch": 5.512820512820513,
+ "grad_norm": 20.457887649536133,
+ "learning_rate": 2.993894993894994e-05,
+ "loss": 0.1206,
+ "step": 1505
+ },
+ {
+ "epoch": 5.516483516483516,
+ "grad_norm": 34.01540756225586,
+ "learning_rate": 2.9914529914529915e-05,
+ "loss": 0.5167,
+ "step": 1506
+ },
+ {
+ "epoch": 5.52014652014652,
+ "grad_norm": 20.356525421142578,
+ "learning_rate": 2.989010989010989e-05,
+ "loss": 0.1363,
+ "step": 1507
+ },
+ {
+ "epoch": 5.523809523809524,
+ "grad_norm": 42.172054290771484,
+ "learning_rate": 2.9865689865689865e-05,
+ "loss": 0.2954,
+ "step": 1508
+ },
+ {
+ "epoch": 5.527472527472527,
+ "grad_norm": 16.814903259277344,
+ "learning_rate": 2.984126984126984e-05,
+ "loss": 0.0987,
+ "step": 1509
+ },
+ {
+ "epoch": 5.531135531135531,
+ "grad_norm": 34.35768508911133,
+ "learning_rate": 2.981684981684982e-05,
+ "loss": 0.215,
+ "step": 1510
+ },
+ {
+ "epoch": 5.534798534798535,
+ "grad_norm": 43.24858474731445,
+ "learning_rate": 2.9792429792429794e-05,
+ "loss": 0.3689,
+ "step": 1511
+ },
+ {
+ "epoch": 5.538461538461538,
+ "grad_norm": 39.85542297363281,
+ "learning_rate": 2.976800976800977e-05,
+ "loss": 0.6229,
+ "step": 1512
+ },
+ {
+ "epoch": 5.542124542124542,
+ "grad_norm": 17.576234817504883,
+ "learning_rate": 2.9743589743589744e-05,
+ "loss": 0.0994,
+ "step": 1513
+ },
+ {
+ "epoch": 5.545787545787546,
+ "grad_norm": 45.81230545043945,
+ "learning_rate": 2.971916971916972e-05,
+ "loss": 0.5225,
+ "step": 1514
+ },
+ {
+ "epoch": 5.549450549450549,
+ "grad_norm": 21.293874740600586,
+ "learning_rate": 2.9694749694749694e-05,
+ "loss": 0.1139,
+ "step": 1515
+ },
+ {
+ "epoch": 5.553113553113553,
+ "grad_norm": 3.8571391105651855,
+ "learning_rate": 2.9670329670329673e-05,
+ "loss": 0.0257,
+ "step": 1516
+ },
+ {
+ "epoch": 5.556776556776557,
+ "grad_norm": 32.1104736328125,
+ "learning_rate": 2.9645909645909648e-05,
+ "loss": 0.2649,
+ "step": 1517
+ },
+ {
+ "epoch": 5.56043956043956,
+ "grad_norm": 26.141633987426758,
+ "learning_rate": 2.9621489621489623e-05,
+ "loss": 0.2003,
+ "step": 1518
+ },
+ {
+ "epoch": 5.564102564102564,
+ "grad_norm": 44.93999099731445,
+ "learning_rate": 2.9597069597069598e-05,
+ "loss": 0.4019,
+ "step": 1519
+ },
+ {
+ "epoch": 5.5677655677655675,
+ "grad_norm": 10.86503791809082,
+ "learning_rate": 2.9572649572649573e-05,
+ "loss": 0.112,
+ "step": 1520
+ },
+ {
+ "epoch": 5.571428571428571,
+ "grad_norm": 164.05909729003906,
+ "learning_rate": 2.9548229548229548e-05,
+ "loss": 0.7215,
+ "step": 1521
+ },
+ {
+ "epoch": 5.575091575091575,
+ "grad_norm": 39.3042106628418,
+ "learning_rate": 2.9523809523809523e-05,
+ "loss": 0.3393,
+ "step": 1522
+ },
+ {
+ "epoch": 5.5787545787545785,
+ "grad_norm": 28.68779182434082,
+ "learning_rate": 2.94993894993895e-05,
+ "loss": 0.1175,
+ "step": 1523
+ },
+ {
+ "epoch": 5.582417582417582,
+ "grad_norm": 19.018821716308594,
+ "learning_rate": 2.9474969474969477e-05,
+ "loss": 0.1185,
+ "step": 1524
+ },
+ {
+ "epoch": 5.586080586080586,
+ "grad_norm": 32.04712677001953,
+ "learning_rate": 2.945054945054945e-05,
+ "loss": 0.275,
+ "step": 1525
+ },
+ {
+ "epoch": 5.589743589743589,
+ "grad_norm": 51.721744537353516,
+ "learning_rate": 2.9426129426129427e-05,
+ "loss": 0.5128,
+ "step": 1526
+ },
+ {
+ "epoch": 5.593406593406593,
+ "grad_norm": 8.353523254394531,
+ "learning_rate": 2.9401709401709402e-05,
+ "loss": 0.0452,
+ "step": 1527
+ },
+ {
+ "epoch": 5.597069597069597,
+ "grad_norm": 60.5823860168457,
+ "learning_rate": 2.9377289377289377e-05,
+ "loss": 0.7654,
+ "step": 1528
+ },
+ {
+ "epoch": 5.6007326007326,
+ "grad_norm": 39.350582122802734,
+ "learning_rate": 2.9352869352869355e-05,
+ "loss": 0.2384,
+ "step": 1529
+ },
+ {
+ "epoch": 5.604395604395604,
+ "grad_norm": 13.450817108154297,
+ "learning_rate": 2.932844932844933e-05,
+ "loss": 0.045,
+ "step": 1530
+ },
+ {
+ "epoch": 5.608058608058608,
+ "grad_norm": 19.569263458251953,
+ "learning_rate": 2.9304029304029305e-05,
+ "loss": 0.0806,
+ "step": 1531
+ },
+ {
+ "epoch": 5.611721611721611,
+ "grad_norm": 15.188614845275879,
+ "learning_rate": 2.927960927960928e-05,
+ "loss": 0.0639,
+ "step": 1532
+ },
+ {
+ "epoch": 5.615384615384615,
+ "grad_norm": 64.51557922363281,
+ "learning_rate": 2.9255189255189255e-05,
+ "loss": 0.4426,
+ "step": 1533
+ },
+ {
+ "epoch": 5.619047619047619,
+ "grad_norm": 80.56137084960938,
+ "learning_rate": 2.923076923076923e-05,
+ "loss": 0.8589,
+ "step": 1534
+ },
+ {
+ "epoch": 5.622710622710622,
+ "grad_norm": 50.31480407714844,
+ "learning_rate": 2.9206349206349206e-05,
+ "loss": 1.1482,
+ "step": 1535
+ },
+ {
+ "epoch": 5.626373626373626,
+ "grad_norm": 12.077424049377441,
+ "learning_rate": 2.9181929181929184e-05,
+ "loss": 0.0766,
+ "step": 1536
+ },
+ {
+ "epoch": 5.63003663003663,
+ "grad_norm": 58.46347427368164,
+ "learning_rate": 2.915750915750916e-05,
+ "loss": 0.6512,
+ "step": 1537
+ },
+ {
+ "epoch": 5.633699633699633,
+ "grad_norm": 22.6331729888916,
+ "learning_rate": 2.913308913308913e-05,
+ "loss": 0.155,
+ "step": 1538
+ },
+ {
+ "epoch": 5.637362637362637,
+ "grad_norm": 49.88985824584961,
+ "learning_rate": 2.910866910866911e-05,
+ "loss": 0.4947,
+ "step": 1539
+ },
+ {
+ "epoch": 5.641025641025641,
+ "grad_norm": 64.37980651855469,
+ "learning_rate": 2.9084249084249084e-05,
+ "loss": 0.4665,
+ "step": 1540
+ },
+ {
+ "epoch": 5.644688644688645,
+ "grad_norm": 13.715937614440918,
+ "learning_rate": 2.905982905982906e-05,
+ "loss": 0.0937,
+ "step": 1541
+ },
+ {
+ "epoch": 5.648351648351649,
+ "grad_norm": 25.40440559387207,
+ "learning_rate": 2.9035409035409038e-05,
+ "loss": 0.2467,
+ "step": 1542
+ },
+ {
+ "epoch": 5.652014652014652,
+ "grad_norm": 52.885963439941406,
+ "learning_rate": 2.9010989010989013e-05,
+ "loss": 0.5018,
+ "step": 1543
+ },
+ {
+ "epoch": 5.655677655677656,
+ "grad_norm": 7.535051345825195,
+ "learning_rate": 2.8986568986568988e-05,
+ "loss": 0.0607,
+ "step": 1544
+ },
+ {
+ "epoch": 5.65934065934066,
+ "grad_norm": 55.79275894165039,
+ "learning_rate": 2.8962148962148963e-05,
+ "loss": 1.0288,
+ "step": 1545
+ },
+ {
+ "epoch": 5.663003663003663,
+ "grad_norm": 21.050024032592773,
+ "learning_rate": 2.8937728937728938e-05,
+ "loss": 0.1987,
+ "step": 1546
+ },
+ {
+ "epoch": 5.666666666666667,
+ "grad_norm": 24.74984359741211,
+ "learning_rate": 2.8913308913308913e-05,
+ "loss": 0.202,
+ "step": 1547
+ },
+ {
+ "epoch": 5.670329670329671,
+ "grad_norm": 15.297272682189941,
+ "learning_rate": 2.8888888888888888e-05,
+ "loss": 0.127,
+ "step": 1548
+ },
+ {
+ "epoch": 5.673992673992674,
+ "grad_norm": 12.198046684265137,
+ "learning_rate": 2.8864468864468867e-05,
+ "loss": 0.115,
+ "step": 1549
+ },
+ {
+ "epoch": 5.677655677655678,
+ "grad_norm": 18.761402130126953,
+ "learning_rate": 2.8840048840048842e-05,
+ "loss": 0.1745,
+ "step": 1550
+ },
+ {
+ "epoch": 5.681318681318682,
+ "grad_norm": 26.97224235534668,
+ "learning_rate": 2.8815628815628813e-05,
+ "loss": 0.2554,
+ "step": 1551
+ },
+ {
+ "epoch": 5.684981684981685,
+ "grad_norm": 9.772692680358887,
+ "learning_rate": 2.8791208791208792e-05,
+ "loss": 0.0927,
+ "step": 1552
+ },
+ {
+ "epoch": 5.688644688644689,
+ "grad_norm": 35.73431396484375,
+ "learning_rate": 2.8766788766788767e-05,
+ "loss": 0.4048,
+ "step": 1553
+ },
+ {
+ "epoch": 5.6923076923076925,
+ "grad_norm": 31.94872283935547,
+ "learning_rate": 2.8742368742368742e-05,
+ "loss": 0.5711,
+ "step": 1554
+ },
+ {
+ "epoch": 5.695970695970696,
+ "grad_norm": 45.44688034057617,
+ "learning_rate": 2.871794871794872e-05,
+ "loss": 0.7126,
+ "step": 1555
+ },
+ {
+ "epoch": 5.6996336996337,
+ "grad_norm": 45.74476623535156,
+ "learning_rate": 2.8693528693528696e-05,
+ "loss": 0.933,
+ "step": 1556
+ },
+ {
+ "epoch": 5.7032967032967035,
+ "grad_norm": 19.827136993408203,
+ "learning_rate": 2.866910866910867e-05,
+ "loss": 0.2433,
+ "step": 1557
+ },
+ {
+ "epoch": 5.706959706959707,
+ "grad_norm": 35.981903076171875,
+ "learning_rate": 2.8644688644688646e-05,
+ "loss": 0.3429,
+ "step": 1558
+ },
+ {
+ "epoch": 5.710622710622711,
+ "grad_norm": 19.642629623413086,
+ "learning_rate": 2.862026862026862e-05,
+ "loss": 0.1454,
+ "step": 1559
+ },
+ {
+ "epoch": 5.714285714285714,
+ "grad_norm": 25.960437774658203,
+ "learning_rate": 2.8595848595848596e-05,
+ "loss": 0.2965,
+ "step": 1560
+ },
+ {
+ "epoch": 5.717948717948718,
+ "grad_norm": 49.41150665283203,
+ "learning_rate": 2.857142857142857e-05,
+ "loss": 0.3295,
+ "step": 1561
+ },
+ {
+ "epoch": 5.721611721611722,
+ "grad_norm": 10.984975814819336,
+ "learning_rate": 2.854700854700855e-05,
+ "loss": 0.0879,
+ "step": 1562
+ },
+ {
+ "epoch": 5.725274725274725,
+ "grad_norm": 26.814556121826172,
+ "learning_rate": 2.8522588522588524e-05,
+ "loss": 0.1456,
+ "step": 1563
+ },
+ {
+ "epoch": 5.728937728937729,
+ "grad_norm": 18.65792465209961,
+ "learning_rate": 2.8498168498168496e-05,
+ "loss": 0.161,
+ "step": 1564
+ },
+ {
+ "epoch": 5.732600732600733,
+ "grad_norm": 35.959590911865234,
+ "learning_rate": 2.8473748473748475e-05,
+ "loss": 0.672,
+ "step": 1565
+ },
+ {
+ "epoch": 5.736263736263736,
+ "grad_norm": 78.56996154785156,
+ "learning_rate": 2.844932844932845e-05,
+ "loss": 1.6393,
+ "step": 1566
+ },
+ {
+ "epoch": 5.73992673992674,
+ "grad_norm": 31.604719161987305,
+ "learning_rate": 2.8424908424908425e-05,
+ "loss": 0.5395,
+ "step": 1567
+ },
+ {
+ "epoch": 5.743589743589744,
+ "grad_norm": 14.373411178588867,
+ "learning_rate": 2.8400488400488403e-05,
+ "loss": 0.0688,
+ "step": 1568
+ },
+ {
+ "epoch": 5.747252747252747,
+ "grad_norm": 3.5718555450439453,
+ "learning_rate": 2.8376068376068378e-05,
+ "loss": 0.0161,
+ "step": 1569
+ },
+ {
+ "epoch": 5.750915750915751,
+ "grad_norm": 23.164167404174805,
+ "learning_rate": 2.8351648351648353e-05,
+ "loss": 0.2169,
+ "step": 1570
+ },
+ {
+ "epoch": 5.754578754578755,
+ "grad_norm": 33.42869186401367,
+ "learning_rate": 2.8327228327228328e-05,
+ "loss": 0.3731,
+ "step": 1571
+ },
+ {
+ "epoch": 5.758241758241758,
+ "grad_norm": 32.016361236572266,
+ "learning_rate": 2.8302808302808303e-05,
+ "loss": 0.2243,
+ "step": 1572
+ },
+ {
+ "epoch": 5.761904761904762,
+ "grad_norm": 43.50716018676758,
+ "learning_rate": 2.827838827838828e-05,
+ "loss": 0.4229,
+ "step": 1573
+ },
+ {
+ "epoch": 5.7655677655677655,
+ "grad_norm": 4.828849792480469,
+ "learning_rate": 2.8253968253968253e-05,
+ "loss": 0.0295,
+ "step": 1574
+ },
+ {
+ "epoch": 5.769230769230769,
+ "grad_norm": 30.276351928710938,
+ "learning_rate": 2.8229548229548232e-05,
+ "loss": 0.399,
+ "step": 1575
+ },
+ {
+ "epoch": 5.772893772893773,
+ "grad_norm": 17.416358947753906,
+ "learning_rate": 2.8205128205128207e-05,
+ "loss": 0.1529,
+ "step": 1576
+ },
+ {
+ "epoch": 5.7765567765567765,
+ "grad_norm": 39.488468170166016,
+ "learning_rate": 2.818070818070818e-05,
+ "loss": 0.1245,
+ "step": 1577
+ },
+ {
+ "epoch": 5.78021978021978,
+ "grad_norm": 27.775489807128906,
+ "learning_rate": 2.8156288156288157e-05,
+ "loss": 0.1312,
+ "step": 1578
+ },
+ {
+ "epoch": 5.783882783882784,
+ "grad_norm": 35.964717864990234,
+ "learning_rate": 2.8131868131868132e-05,
+ "loss": 0.5796,
+ "step": 1579
+ },
+ {
+ "epoch": 5.787545787545787,
+ "grad_norm": 53.15998077392578,
+ "learning_rate": 2.8107448107448107e-05,
+ "loss": 1.2654,
+ "step": 1580
+ },
+ {
+ "epoch": 5.791208791208791,
+ "grad_norm": 22.90069007873535,
+ "learning_rate": 2.8083028083028086e-05,
+ "loss": 0.2162,
+ "step": 1581
+ },
+ {
+ "epoch": 5.794871794871795,
+ "grad_norm": 45.380470275878906,
+ "learning_rate": 2.805860805860806e-05,
+ "loss": 0.4231,
+ "step": 1582
+ },
+ {
+ "epoch": 5.798534798534798,
+ "grad_norm": 32.56012725830078,
+ "learning_rate": 2.8034188034188032e-05,
+ "loss": 0.3711,
+ "step": 1583
+ },
+ {
+ "epoch": 5.802197802197802,
+ "grad_norm": 34.63470458984375,
+ "learning_rate": 2.800976800976801e-05,
+ "loss": 0.5414,
+ "step": 1584
+ },
+ {
+ "epoch": 5.805860805860806,
+ "grad_norm": 48.173797607421875,
+ "learning_rate": 2.7985347985347986e-05,
+ "loss": 1.2363,
+ "step": 1585
+ },
+ {
+ "epoch": 5.809523809523809,
+ "grad_norm": 27.12062644958496,
+ "learning_rate": 2.796092796092796e-05,
+ "loss": 0.4824,
+ "step": 1586
+ },
+ {
+ "epoch": 5.813186813186813,
+ "grad_norm": 23.13554573059082,
+ "learning_rate": 2.7936507936507936e-05,
+ "loss": 0.2321,
+ "step": 1587
+ },
+ {
+ "epoch": 5.816849816849817,
+ "grad_norm": 50.56953430175781,
+ "learning_rate": 2.7912087912087915e-05,
+ "loss": 0.2158,
+ "step": 1588
+ },
+ {
+ "epoch": 5.82051282051282,
+ "grad_norm": 20.73900604248047,
+ "learning_rate": 2.788766788766789e-05,
+ "loss": 0.217,
+ "step": 1589
+ },
+ {
+ "epoch": 5.824175824175824,
+ "grad_norm": 17.288028717041016,
+ "learning_rate": 2.786324786324786e-05,
+ "loss": 0.2936,
+ "step": 1590
+ },
+ {
+ "epoch": 5.827838827838828,
+ "grad_norm": 22.067502975463867,
+ "learning_rate": 2.783882783882784e-05,
+ "loss": 0.1906,
+ "step": 1591
+ },
+ {
+ "epoch": 5.831501831501831,
+ "grad_norm": 14.928089141845703,
+ "learning_rate": 2.7814407814407815e-05,
+ "loss": 0.1296,
+ "step": 1592
+ },
+ {
+ "epoch": 5.835164835164835,
+ "grad_norm": 25.669342041015625,
+ "learning_rate": 2.778998778998779e-05,
+ "loss": 0.2475,
+ "step": 1593
+ },
+ {
+ "epoch": 5.8388278388278385,
+ "grad_norm": 20.302515029907227,
+ "learning_rate": 2.776556776556777e-05,
+ "loss": 0.2206,
+ "step": 1594
+ },
+ {
+ "epoch": 5.842490842490842,
+ "grad_norm": 9.004451751708984,
+ "learning_rate": 2.7741147741147743e-05,
+ "loss": 0.0694,
+ "step": 1595
+ },
+ {
+ "epoch": 5.846153846153846,
+ "grad_norm": 7.495925426483154,
+ "learning_rate": 2.7716727716727715e-05,
+ "loss": 0.0481,
+ "step": 1596
+ },
+ {
+ "epoch": 5.8498168498168495,
+ "grad_norm": 11.891450881958008,
+ "learning_rate": 2.7692307692307694e-05,
+ "loss": 0.0754,
+ "step": 1597
+ },
+ {
+ "epoch": 5.853479853479853,
+ "grad_norm": 27.53200340270996,
+ "learning_rate": 2.766788766788767e-05,
+ "loss": 0.1459,
+ "step": 1598
+ },
+ {
+ "epoch": 5.857142857142857,
+ "grad_norm": 4.103634357452393,
+ "learning_rate": 2.7643467643467644e-05,
+ "loss": 0.0256,
+ "step": 1599
+ },
+ {
+ "epoch": 5.860805860805861,
+ "grad_norm": 30.772586822509766,
+ "learning_rate": 2.761904761904762e-05,
+ "loss": 0.2748,
+ "step": 1600
+ },
+ {
+ "epoch": 5.864468864468865,
+ "grad_norm": 39.70070266723633,
+ "learning_rate": 2.7594627594627597e-05,
+ "loss": 1.3089,
+ "step": 1601
+ },
+ {
+ "epoch": 5.868131868131869,
+ "grad_norm": 54.576236724853516,
+ "learning_rate": 2.7570207570207572e-05,
+ "loss": 0.3549,
+ "step": 1602
+ },
+ {
+ "epoch": 5.871794871794872,
+ "grad_norm": 14.617592811584473,
+ "learning_rate": 2.7545787545787544e-05,
+ "loss": 0.0976,
+ "step": 1603
+ },
+ {
+ "epoch": 5.875457875457876,
+ "grad_norm": 11.900232315063477,
+ "learning_rate": 2.7521367521367522e-05,
+ "loss": 0.0518,
+ "step": 1604
+ },
+ {
+ "epoch": 5.8791208791208796,
+ "grad_norm": 62.00771713256836,
+ "learning_rate": 2.7496947496947497e-05,
+ "loss": 0.2866,
+ "step": 1605
+ },
+ {
+ "epoch": 5.882783882783883,
+ "grad_norm": 51.59067153930664,
+ "learning_rate": 2.7472527472527473e-05,
+ "loss": 0.3357,
+ "step": 1606
+ },
+ {
+ "epoch": 5.886446886446887,
+ "grad_norm": 61.792476654052734,
+ "learning_rate": 2.744810744810745e-05,
+ "loss": 0.2923,
+ "step": 1607
+ },
+ {
+ "epoch": 5.8901098901098905,
+ "grad_norm": 12.737351417541504,
+ "learning_rate": 2.7423687423687426e-05,
+ "loss": 0.0893,
+ "step": 1608
+ },
+ {
+ "epoch": 5.893772893772894,
+ "grad_norm": 7.451726913452148,
+ "learning_rate": 2.7399267399267398e-05,
+ "loss": 0.044,
+ "step": 1609
+ },
+ {
+ "epoch": 5.897435897435898,
+ "grad_norm": 41.03788757324219,
+ "learning_rate": 2.7374847374847376e-05,
+ "loss": 0.4605,
+ "step": 1610
+ },
+ {
+ "epoch": 5.9010989010989015,
+ "grad_norm": 11.49382209777832,
+ "learning_rate": 2.735042735042735e-05,
+ "loss": 0.0754,
+ "step": 1611
+ },
+ {
+ "epoch": 5.904761904761905,
+ "grad_norm": 15.952816009521484,
+ "learning_rate": 2.7326007326007326e-05,
+ "loss": 0.0748,
+ "step": 1612
+ },
+ {
+ "epoch": 5.908424908424909,
+ "grad_norm": 8.492574691772461,
+ "learning_rate": 2.73015873015873e-05,
+ "loss": 0.0254,
+ "step": 1613
+ },
+ {
+ "epoch": 5.912087912087912,
+ "grad_norm": 17.973997116088867,
+ "learning_rate": 2.727716727716728e-05,
+ "loss": 0.1038,
+ "step": 1614
+ },
+ {
+ "epoch": 5.915750915750916,
+ "grad_norm": 6.881199359893799,
+ "learning_rate": 2.7252747252747255e-05,
+ "loss": 0.0186,
+ "step": 1615
+ },
+ {
+ "epoch": 5.91941391941392,
+ "grad_norm": 28.51510238647461,
+ "learning_rate": 2.7228327228327227e-05,
+ "loss": 0.1283,
+ "step": 1616
+ },
+ {
+ "epoch": 5.923076923076923,
+ "grad_norm": 33.539485931396484,
+ "learning_rate": 2.7203907203907205e-05,
+ "loss": 0.6151,
+ "step": 1617
+ },
+ {
+ "epoch": 5.926739926739927,
+ "grad_norm": 57.307823181152344,
+ "learning_rate": 2.717948717948718e-05,
+ "loss": 0.3924,
+ "step": 1618
+ },
+ {
+ "epoch": 5.930402930402931,
+ "grad_norm": 43.010276794433594,
+ "learning_rate": 2.7155067155067155e-05,
+ "loss": 0.3942,
+ "step": 1619
+ },
+ {
+ "epoch": 5.934065934065934,
+ "grad_norm": 26.552478790283203,
+ "learning_rate": 2.7130647130647134e-05,
+ "loss": 0.1961,
+ "step": 1620
+ },
+ {
+ "epoch": 5.937728937728938,
+ "grad_norm": 78.5624008178711,
+ "learning_rate": 2.710622710622711e-05,
+ "loss": 1.0705,
+ "step": 1621
+ },
+ {
+ "epoch": 5.941391941391942,
+ "grad_norm": 37.23006057739258,
+ "learning_rate": 2.708180708180708e-05,
+ "loss": 0.4875,
+ "step": 1622
+ },
+ {
+ "epoch": 5.945054945054945,
+ "grad_norm": 42.23412322998047,
+ "learning_rate": 2.705738705738706e-05,
+ "loss": 0.3795,
+ "step": 1623
+ },
+ {
+ "epoch": 5.948717948717949,
+ "grad_norm": 42.677696228027344,
+ "learning_rate": 2.7032967032967034e-05,
+ "loss": 0.3414,
+ "step": 1624
+ },
+ {
+ "epoch": 5.9523809523809526,
+ "grad_norm": 24.182249069213867,
+ "learning_rate": 2.700854700854701e-05,
+ "loss": 0.0814,
+ "step": 1625
+ },
+ {
+ "epoch": 5.956043956043956,
+ "grad_norm": 11.87109088897705,
+ "learning_rate": 2.6984126984126984e-05,
+ "loss": 0.0816,
+ "step": 1626
+ },
+ {
+ "epoch": 5.95970695970696,
+ "grad_norm": 7.575586318969727,
+ "learning_rate": 2.6959706959706962e-05,
+ "loss": 0.049,
+ "step": 1627
+ },
+ {
+ "epoch": 5.9633699633699635,
+ "grad_norm": 4.052019119262695,
+ "learning_rate": 2.6935286935286934e-05,
+ "loss": 0.0276,
+ "step": 1628
+ },
+ {
+ "epoch": 5.967032967032967,
+ "grad_norm": 24.308481216430664,
+ "learning_rate": 2.691086691086691e-05,
+ "loss": 0.2324,
+ "step": 1629
+ },
+ {
+ "epoch": 5.970695970695971,
+ "grad_norm": 32.5918083190918,
+ "learning_rate": 2.6886446886446888e-05,
+ "loss": 0.42,
+ "step": 1630
+ },
+ {
+ "epoch": 5.9743589743589745,
+ "grad_norm": 16.758689880371094,
+ "learning_rate": 2.6862026862026863e-05,
+ "loss": 0.1857,
+ "step": 1631
+ },
+ {
+ "epoch": 5.978021978021978,
+ "grad_norm": 24.96327781677246,
+ "learning_rate": 2.6837606837606838e-05,
+ "loss": 0.3293,
+ "step": 1632
+ },
+ {
+ "epoch": 5.981684981684982,
+ "grad_norm": 7.734143257141113,
+ "learning_rate": 2.6813186813186816e-05,
+ "loss": 0.0644,
+ "step": 1633
+ },
+ {
+ "epoch": 5.985347985347985,
+ "grad_norm": 49.89662551879883,
+ "learning_rate": 2.678876678876679e-05,
+ "loss": 0.7976,
+ "step": 1634
+ },
+ {
+ "epoch": 5.989010989010989,
+ "grad_norm": 20.55232810974121,
+ "learning_rate": 2.6764346764346763e-05,
+ "loss": 0.1911,
+ "step": 1635
+ },
+ {
+ "epoch": 5.992673992673993,
+ "grad_norm": 11.190897941589355,
+ "learning_rate": 2.673992673992674e-05,
+ "loss": 0.0604,
+ "step": 1636
+ },
+ {
+ "epoch": 5.996336996336996,
+ "grad_norm": 24.896806716918945,
+ "learning_rate": 2.6715506715506716e-05,
+ "loss": 0.2467,
+ "step": 1637
+ },
+ {
+ "epoch": 6.0,
+ "grad_norm": 39.5569953918457,
+ "learning_rate": 2.669108669108669e-05,
+ "loss": 0.8073,
+ "step": 1638
+ },
+ {
+ "epoch": 6.003663003663004,
+ "grad_norm": 4.203596591949463,
+ "learning_rate": 2.6666666666666667e-05,
+ "loss": 0.0266,
+ "step": 1639
+ },
+ {
+ "epoch": 6.007326007326007,
+ "grad_norm": 6.89768648147583,
+ "learning_rate": 2.6642246642246645e-05,
+ "loss": 0.0664,
+ "step": 1640
+ },
+ {
+ "epoch": 6.010989010989011,
+ "grad_norm": 33.19546890258789,
+ "learning_rate": 2.6617826617826617e-05,
+ "loss": 0.6504,
+ "step": 1641
+ },
+ {
+ "epoch": 6.014652014652015,
+ "grad_norm": 8.577303886413574,
+ "learning_rate": 2.6593406593406592e-05,
+ "loss": 0.0715,
+ "step": 1642
+ },
+ {
+ "epoch": 6.018315018315018,
+ "grad_norm": 11.48106861114502,
+ "learning_rate": 2.656898656898657e-05,
+ "loss": 0.0952,
+ "step": 1643
+ },
+ {
+ "epoch": 6.021978021978022,
+ "grad_norm": 16.87290382385254,
+ "learning_rate": 2.6544566544566545e-05,
+ "loss": 0.1156,
+ "step": 1644
+ },
+ {
+ "epoch": 6.0256410256410255,
+ "grad_norm": 5.304442405700684,
+ "learning_rate": 2.652014652014652e-05,
+ "loss": 0.0574,
+ "step": 1645
+ },
+ {
+ "epoch": 6.029304029304029,
+ "grad_norm": 12.058186531066895,
+ "learning_rate": 2.64957264957265e-05,
+ "loss": 0.1013,
+ "step": 1646
+ },
+ {
+ "epoch": 6.032967032967033,
+ "grad_norm": 11.20624828338623,
+ "learning_rate": 2.6471306471306474e-05,
+ "loss": 0.0637,
+ "step": 1647
+ },
+ {
+ "epoch": 6.0366300366300365,
+ "grad_norm": 20.595020294189453,
+ "learning_rate": 2.6446886446886446e-05,
+ "loss": 0.1282,
+ "step": 1648
+ },
+ {
+ "epoch": 6.04029304029304,
+ "grad_norm": 32.712425231933594,
+ "learning_rate": 2.6422466422466424e-05,
+ "loss": 1.0173,
+ "step": 1649
+ },
+ {
+ "epoch": 6.043956043956044,
+ "grad_norm": 31.00687599182129,
+ "learning_rate": 2.63980463980464e-05,
+ "loss": 0.2822,
+ "step": 1650
+ },
+ {
+ "epoch": 6.0476190476190474,
+ "grad_norm": 15.361159324645996,
+ "learning_rate": 2.6373626373626374e-05,
+ "loss": 0.08,
+ "step": 1651
+ },
+ {
+ "epoch": 6.051282051282051,
+ "grad_norm": 75.07713317871094,
+ "learning_rate": 2.634920634920635e-05,
+ "loss": 0.3835,
+ "step": 1652
+ },
+ {
+ "epoch": 6.054945054945055,
+ "grad_norm": 28.741546630859375,
+ "learning_rate": 2.6324786324786328e-05,
+ "loss": 0.1257,
+ "step": 1653
+ },
+ {
+ "epoch": 6.058608058608058,
+ "grad_norm": 173.8939971923828,
+ "learning_rate": 2.63003663003663e-05,
+ "loss": 0.0744,
+ "step": 1654
+ },
+ {
+ "epoch": 6.062271062271062,
+ "grad_norm": 8.212196350097656,
+ "learning_rate": 2.6275946275946274e-05,
+ "loss": 0.027,
+ "step": 1655
+ },
+ {
+ "epoch": 6.065934065934066,
+ "grad_norm": 56.60511779785156,
+ "learning_rate": 2.6251526251526253e-05,
+ "loss": 0.7258,
+ "step": 1656
+ },
+ {
+ "epoch": 6.069597069597069,
+ "grad_norm": 14.454882621765137,
+ "learning_rate": 2.6227106227106228e-05,
+ "loss": 0.0762,
+ "step": 1657
+ },
+ {
+ "epoch": 6.073260073260073,
+ "grad_norm": 40.66373062133789,
+ "learning_rate": 2.6202686202686203e-05,
+ "loss": 0.2663,
+ "step": 1658
+ },
+ {
+ "epoch": 6.076923076923077,
+ "grad_norm": 45.68836212158203,
+ "learning_rate": 2.617826617826618e-05,
+ "loss": 0.4244,
+ "step": 1659
+ },
+ {
+ "epoch": 6.08058608058608,
+ "grad_norm": 16.69190788269043,
+ "learning_rate": 2.6153846153846157e-05,
+ "loss": 0.1249,
+ "step": 1660
+ },
+ {
+ "epoch": 6.084249084249084,
+ "grad_norm": 58.633358001708984,
+ "learning_rate": 2.6129426129426128e-05,
+ "loss": 0.3699,
+ "step": 1661
+ },
+ {
+ "epoch": 6.087912087912088,
+ "grad_norm": 8.262107849121094,
+ "learning_rate": 2.6105006105006107e-05,
+ "loss": 0.0332,
+ "step": 1662
+ },
+ {
+ "epoch": 6.091575091575091,
+ "grad_norm": 1.7256231307983398,
+ "learning_rate": 2.6080586080586082e-05,
+ "loss": 0.0073,
+ "step": 1663
+ },
+ {
+ "epoch": 6.095238095238095,
+ "grad_norm": 27.97568130493164,
+ "learning_rate": 2.6056166056166057e-05,
+ "loss": 0.3567,
+ "step": 1664
+ },
+ {
+ "epoch": 6.0989010989010985,
+ "grad_norm": 8.167609214782715,
+ "learning_rate": 2.6031746031746032e-05,
+ "loss": 0.0328,
+ "step": 1665
+ },
+ {
+ "epoch": 6.102564102564102,
+ "grad_norm": 8.547285079956055,
+ "learning_rate": 2.600732600732601e-05,
+ "loss": 0.0438,
+ "step": 1666
+ },
+ {
+ "epoch": 6.106227106227106,
+ "grad_norm": 38.85865020751953,
+ "learning_rate": 2.5982905982905982e-05,
+ "loss": 0.3492,
+ "step": 1667
+ },
+ {
+ "epoch": 6.1098901098901095,
+ "grad_norm": 18.36060333251953,
+ "learning_rate": 2.5958485958485957e-05,
+ "loss": 0.0411,
+ "step": 1668
+ },
+ {
+ "epoch": 6.113553113553113,
+ "grad_norm": 8.013274192810059,
+ "learning_rate": 2.5934065934065935e-05,
+ "loss": 0.0461,
+ "step": 1669
+ },
+ {
+ "epoch": 6.117216117216117,
+ "grad_norm": 41.88865280151367,
+ "learning_rate": 2.590964590964591e-05,
+ "loss": 0.7209,
+ "step": 1670
+ },
+ {
+ "epoch": 6.1208791208791204,
+ "grad_norm": 93.57958221435547,
+ "learning_rate": 2.5885225885225886e-05,
+ "loss": 0.5563,
+ "step": 1671
+ },
+ {
+ "epoch": 6.124542124542124,
+ "grad_norm": 6.878098964691162,
+ "learning_rate": 2.5860805860805864e-05,
+ "loss": 0.0213,
+ "step": 1672
+ },
+ {
+ "epoch": 6.128205128205128,
+ "grad_norm": 41.09592819213867,
+ "learning_rate": 2.5836385836385836e-05,
+ "loss": 0.5724,
+ "step": 1673
+ },
+ {
+ "epoch": 6.131868131868132,
+ "grad_norm": 8.257637977600098,
+ "learning_rate": 2.581196581196581e-05,
+ "loss": 0.0396,
+ "step": 1674
+ },
+ {
+ "epoch": 6.135531135531136,
+ "grad_norm": 24.022602081298828,
+ "learning_rate": 2.578754578754579e-05,
+ "loss": 0.0623,
+ "step": 1675
+ },
+ {
+ "epoch": 6.13919413919414,
+ "grad_norm": 46.46554946899414,
+ "learning_rate": 2.5763125763125764e-05,
+ "loss": 0.4135,
+ "step": 1676
+ },
+ {
+ "epoch": 6.142857142857143,
+ "grad_norm": 96.42303466796875,
+ "learning_rate": 2.573870573870574e-05,
+ "loss": 0.4724,
+ "step": 1677
+ },
+ {
+ "epoch": 6.146520146520147,
+ "grad_norm": 8.401265144348145,
+ "learning_rate": 2.5714285714285714e-05,
+ "loss": 0.0396,
+ "step": 1678
+ },
+ {
+ "epoch": 6.1501831501831505,
+ "grad_norm": 29.346588134765625,
+ "learning_rate": 2.5689865689865693e-05,
+ "loss": 0.1959,
+ "step": 1679
+ },
+ {
+ "epoch": 6.153846153846154,
+ "grad_norm": 4.874574661254883,
+ "learning_rate": 2.5665445665445665e-05,
+ "loss": 0.0295,
+ "step": 1680
+ },
+ {
+ "epoch": 6.157509157509158,
+ "grad_norm": 6.668759346008301,
+ "learning_rate": 2.564102564102564e-05,
+ "loss": 0.0408,
+ "step": 1681
+ },
+ {
+ "epoch": 6.1611721611721615,
+ "grad_norm": 21.22933006286621,
+ "learning_rate": 2.5616605616605618e-05,
+ "loss": 0.1591,
+ "step": 1682
+ },
+ {
+ "epoch": 6.164835164835165,
+ "grad_norm": 2.3441169261932373,
+ "learning_rate": 2.5592185592185593e-05,
+ "loss": 0.0138,
+ "step": 1683
+ },
+ {
+ "epoch": 6.168498168498169,
+ "grad_norm": 31.336048126220703,
+ "learning_rate": 2.5567765567765568e-05,
+ "loss": 0.321,
+ "step": 1684
+ },
+ {
+ "epoch": 6.172161172161172,
+ "grad_norm": 39.17483139038086,
+ "learning_rate": 2.5543345543345547e-05,
+ "loss": 0.5268,
+ "step": 1685
+ },
+ {
+ "epoch": 6.175824175824176,
+ "grad_norm": 6.984042644500732,
+ "learning_rate": 2.551892551892552e-05,
+ "loss": 0.0377,
+ "step": 1686
+ },
+ {
+ "epoch": 6.17948717948718,
+ "grad_norm": 21.946880340576172,
+ "learning_rate": 2.5494505494505493e-05,
+ "loss": 0.1557,
+ "step": 1687
+ },
+ {
+ "epoch": 6.183150183150183,
+ "grad_norm": 23.447084426879883,
+ "learning_rate": 2.5470085470085472e-05,
+ "loss": 0.1996,
+ "step": 1688
+ },
+ {
+ "epoch": 6.186813186813187,
+ "grad_norm": 13.904314994812012,
+ "learning_rate": 2.5445665445665447e-05,
+ "loss": 0.0327,
+ "step": 1689
+ },
+ {
+ "epoch": 6.190476190476191,
+ "grad_norm": 11.126763343811035,
+ "learning_rate": 2.5421245421245422e-05,
+ "loss": 0.0335,
+ "step": 1690
+ },
+ {
+ "epoch": 6.194139194139194,
+ "grad_norm": 42.23086929321289,
+ "learning_rate": 2.5396825396825397e-05,
+ "loss": 0.3307,
+ "step": 1691
+ },
+ {
+ "epoch": 6.197802197802198,
+ "grad_norm": 26.350086212158203,
+ "learning_rate": 2.5372405372405376e-05,
+ "loss": 0.153,
+ "step": 1692
+ },
+ {
+ "epoch": 6.201465201465202,
+ "grad_norm": 6.667046546936035,
+ "learning_rate": 2.5347985347985347e-05,
+ "loss": 0.011,
+ "step": 1693
+ },
+ {
+ "epoch": 6.205128205128205,
+ "grad_norm": 63.5737190246582,
+ "learning_rate": 2.5323565323565322e-05,
+ "loss": 0.5602,
+ "step": 1694
+ },
+ {
+ "epoch": 6.208791208791209,
+ "grad_norm": 54.20994567871094,
+ "learning_rate": 2.52991452991453e-05,
+ "loss": 0.6584,
+ "step": 1695
+ },
+ {
+ "epoch": 6.212454212454213,
+ "grad_norm": 55.79521942138672,
+ "learning_rate": 2.5274725274725276e-05,
+ "loss": 0.5259,
+ "step": 1696
+ },
+ {
+ "epoch": 6.216117216117216,
+ "grad_norm": 65.18093872070312,
+ "learning_rate": 2.525030525030525e-05,
+ "loss": 0.308,
+ "step": 1697
+ },
+ {
+ "epoch": 6.21978021978022,
+ "grad_norm": 9.979923248291016,
+ "learning_rate": 2.522588522588523e-05,
+ "loss": 0.0312,
+ "step": 1698
+ },
+ {
+ "epoch": 6.2234432234432235,
+ "grad_norm": 62.80887222290039,
+ "learning_rate": 2.52014652014652e-05,
+ "loss": 0.3198,
+ "step": 1699
+ },
+ {
+ "epoch": 6.227106227106227,
+ "grad_norm": 63.2298583984375,
+ "learning_rate": 2.5177045177045176e-05,
+ "loss": 0.5223,
+ "step": 1700
+ },
+ {
+ "epoch": 6.230769230769231,
+ "grad_norm": 49.968502044677734,
+ "learning_rate": 2.515262515262515e-05,
+ "loss": 0.5554,
+ "step": 1701
+ },
+ {
+ "epoch": 6.2344322344322345,
+ "grad_norm": 29.190656661987305,
+ "learning_rate": 2.512820512820513e-05,
+ "loss": 0.2286,
+ "step": 1702
+ },
+ {
+ "epoch": 6.238095238095238,
+ "grad_norm": 38.25267028808594,
+ "learning_rate": 2.5103785103785105e-05,
+ "loss": 0.1948,
+ "step": 1703
+ },
+ {
+ "epoch": 6.241758241758242,
+ "grad_norm": 57.620323181152344,
+ "learning_rate": 2.507936507936508e-05,
+ "loss": 0.5533,
+ "step": 1704
+ },
+ {
+ "epoch": 6.245421245421245,
+ "grad_norm": 21.61467170715332,
+ "learning_rate": 2.5054945054945058e-05,
+ "loss": 0.0935,
+ "step": 1705
+ },
+ {
+ "epoch": 6.249084249084249,
+ "grad_norm": 19.86629867553711,
+ "learning_rate": 2.503052503052503e-05,
+ "loss": 0.0852,
+ "step": 1706
+ },
+ {
+ "epoch": 6.252747252747253,
+ "grad_norm": 59.41017150878906,
+ "learning_rate": 2.5006105006105005e-05,
+ "loss": 0.5853,
+ "step": 1707
+ },
+ {
+ "epoch": 6.256410256410256,
+ "grad_norm": 24.542570114135742,
+ "learning_rate": 2.4981684981684983e-05,
+ "loss": 0.0935,
+ "step": 1708
+ },
+ {
+ "epoch": 6.26007326007326,
+ "grad_norm": 29.034879684448242,
+ "learning_rate": 2.495726495726496e-05,
+ "loss": 0.1929,
+ "step": 1709
+ },
+ {
+ "epoch": 6.263736263736264,
+ "grad_norm": 17.3880672454834,
+ "learning_rate": 2.4932844932844933e-05,
+ "loss": 0.0927,
+ "step": 1710
+ },
+ {
+ "epoch": 6.267399267399267,
+ "grad_norm": 90.0419692993164,
+ "learning_rate": 2.4908424908424912e-05,
+ "loss": 1.1172,
+ "step": 1711
+ },
+ {
+ "epoch": 6.271062271062271,
+ "grad_norm": 4.710697650909424,
+ "learning_rate": 2.4884004884004884e-05,
+ "loss": 0.0207,
+ "step": 1712
+ },
+ {
+ "epoch": 6.274725274725275,
+ "grad_norm": 95.93651580810547,
+ "learning_rate": 2.485958485958486e-05,
+ "loss": 0.7137,
+ "step": 1713
+ },
+ {
+ "epoch": 6.278388278388278,
+ "grad_norm": 92.31869506835938,
+ "learning_rate": 2.4835164835164834e-05,
+ "loss": 0.2076,
+ "step": 1714
+ },
+ {
+ "epoch": 6.282051282051282,
+ "grad_norm": 66.66917419433594,
+ "learning_rate": 2.4810744810744812e-05,
+ "loss": 0.8763,
+ "step": 1715
+ },
+ {
+ "epoch": 6.285714285714286,
+ "grad_norm": 94.52323150634766,
+ "learning_rate": 2.4786324786324787e-05,
+ "loss": 0.5962,
+ "step": 1716
+ },
+ {
+ "epoch": 6.289377289377289,
+ "grad_norm": 31.169715881347656,
+ "learning_rate": 2.4761904761904762e-05,
+ "loss": 0.1906,
+ "step": 1717
+ },
+ {
+ "epoch": 6.293040293040293,
+ "grad_norm": 54.97831726074219,
+ "learning_rate": 2.4737484737484737e-05,
+ "loss": 0.3767,
+ "step": 1718
+ },
+ {
+ "epoch": 6.2967032967032965,
+ "grad_norm": 25.52306365966797,
+ "learning_rate": 2.4713064713064712e-05,
+ "loss": 0.2128,
+ "step": 1719
+ },
+ {
+ "epoch": 6.3003663003663,
+ "grad_norm": 12.478558540344238,
+ "learning_rate": 2.4688644688644688e-05,
+ "loss": 0.0713,
+ "step": 1720
+ },
+ {
+ "epoch": 6.304029304029304,
+ "grad_norm": 27.71872329711914,
+ "learning_rate": 2.4664224664224666e-05,
+ "loss": 0.319,
+ "step": 1721
+ },
+ {
+ "epoch": 6.3076923076923075,
+ "grad_norm": 44.587589263916016,
+ "learning_rate": 2.463980463980464e-05,
+ "loss": 0.1997,
+ "step": 1722
+ },
+ {
+ "epoch": 6.311355311355311,
+ "grad_norm": 11.289876937866211,
+ "learning_rate": 2.4615384615384616e-05,
+ "loss": 0.0694,
+ "step": 1723
+ },
+ {
+ "epoch": 6.315018315018315,
+ "grad_norm": 47.27211380004883,
+ "learning_rate": 2.4590964590964595e-05,
+ "loss": 0.249,
+ "step": 1724
+ },
+ {
+ "epoch": 6.318681318681318,
+ "grad_norm": 34.143611907958984,
+ "learning_rate": 2.4566544566544566e-05,
+ "loss": 0.3645,
+ "step": 1725
+ },
+ {
+ "epoch": 6.322344322344322,
+ "grad_norm": 33.73476791381836,
+ "learning_rate": 2.454212454212454e-05,
+ "loss": 0.5412,
+ "step": 1726
+ },
+ {
+ "epoch": 6.326007326007326,
+ "grad_norm": 20.03452491760254,
+ "learning_rate": 2.4517704517704516e-05,
+ "loss": 0.0966,
+ "step": 1727
+ },
+ {
+ "epoch": 6.329670329670329,
+ "grad_norm": 39.63338088989258,
+ "learning_rate": 2.4493284493284495e-05,
+ "loss": 0.2953,
+ "step": 1728
+ },
+ {
+ "epoch": 6.333333333333333,
+ "grad_norm": 42.99127960205078,
+ "learning_rate": 2.446886446886447e-05,
+ "loss": 0.5328,
+ "step": 1729
+ },
+ {
+ "epoch": 6.336996336996337,
+ "grad_norm": 18.581249237060547,
+ "learning_rate": 2.4444444444444445e-05,
+ "loss": 0.1095,
+ "step": 1730
+ },
+ {
+ "epoch": 6.34065934065934,
+ "grad_norm": 33.29508590698242,
+ "learning_rate": 2.442002442002442e-05,
+ "loss": 0.1608,
+ "step": 1731
+ },
+ {
+ "epoch": 6.344322344322344,
+ "grad_norm": 103.12726593017578,
+ "learning_rate": 2.4395604395604395e-05,
+ "loss": 0.9665,
+ "step": 1732
+ },
+ {
+ "epoch": 6.347985347985348,
+ "grad_norm": 55.45216369628906,
+ "learning_rate": 2.437118437118437e-05,
+ "loss": 0.4441,
+ "step": 1733
+ },
+ {
+ "epoch": 6.351648351648351,
+ "grad_norm": 68.68230438232422,
+ "learning_rate": 2.434676434676435e-05,
+ "loss": 0.6929,
+ "step": 1734
+ },
+ {
+ "epoch": 6.355311355311355,
+ "grad_norm": 99.91059875488281,
+ "learning_rate": 2.4322344322344324e-05,
+ "loss": 0.599,
+ "step": 1735
+ },
+ {
+ "epoch": 6.358974358974359,
+ "grad_norm": 24.994863510131836,
+ "learning_rate": 2.42979242979243e-05,
+ "loss": 0.2212,
+ "step": 1736
+ },
+ {
+ "epoch": 6.362637362637362,
+ "grad_norm": 106.0428466796875,
+ "learning_rate": 2.4273504273504277e-05,
+ "loss": 0.685,
+ "step": 1737
+ },
+ {
+ "epoch": 6.366300366300366,
+ "grad_norm": 37.730712890625,
+ "learning_rate": 2.424908424908425e-05,
+ "loss": 0.0792,
+ "step": 1738
+ },
+ {
+ "epoch": 6.36996336996337,
+ "grad_norm": 44.056556701660156,
+ "learning_rate": 2.4224664224664224e-05,
+ "loss": 0.5062,
+ "step": 1739
+ },
+ {
+ "epoch": 6.373626373626374,
+ "grad_norm": 72.15331268310547,
+ "learning_rate": 2.42002442002442e-05,
+ "loss": 0.7541,
+ "step": 1740
+ },
+ {
+ "epoch": 6.377289377289378,
+ "grad_norm": 151.57752990722656,
+ "learning_rate": 2.4175824175824177e-05,
+ "loss": 0.9455,
+ "step": 1741
+ },
+ {
+ "epoch": 6.380952380952381,
+ "grad_norm": 62.12364196777344,
+ "learning_rate": 2.4151404151404152e-05,
+ "loss": 0.3055,
+ "step": 1742
+ },
+ {
+ "epoch": 6.384615384615385,
+ "grad_norm": 21.725858688354492,
+ "learning_rate": 2.4126984126984128e-05,
+ "loss": 0.0691,
+ "step": 1743
+ },
+ {
+ "epoch": 6.388278388278389,
+ "grad_norm": 60.754615783691406,
+ "learning_rate": 2.4102564102564103e-05,
+ "loss": 0.5273,
+ "step": 1744
+ },
+ {
+ "epoch": 6.391941391941392,
+ "grad_norm": 63.324684143066406,
+ "learning_rate": 2.4078144078144078e-05,
+ "loss": 0.2735,
+ "step": 1745
+ },
+ {
+ "epoch": 6.395604395604396,
+ "grad_norm": 79.82772064208984,
+ "learning_rate": 2.4053724053724053e-05,
+ "loss": 1.1766,
+ "step": 1746
+ },
+ {
+ "epoch": 6.3992673992674,
+ "grad_norm": 42.69222640991211,
+ "learning_rate": 2.402930402930403e-05,
+ "loss": 0.3417,
+ "step": 1747
+ },
+ {
+ "epoch": 6.402930402930403,
+ "grad_norm": 125.5120849609375,
+ "learning_rate": 2.4004884004884006e-05,
+ "loss": 0.331,
+ "step": 1748
+ },
+ {
+ "epoch": 6.406593406593407,
+ "grad_norm": 61.30012512207031,
+ "learning_rate": 2.398046398046398e-05,
+ "loss": 0.5709,
+ "step": 1749
+ },
+ {
+ "epoch": 6.410256410256411,
+ "grad_norm": 18.139734268188477,
+ "learning_rate": 2.395604395604396e-05,
+ "loss": 0.0671,
+ "step": 1750
+ },
+ {
+ "epoch": 6.413919413919414,
+ "grad_norm": 29.233678817749023,
+ "learning_rate": 2.393162393162393e-05,
+ "loss": 0.2012,
+ "step": 1751
+ },
+ {
+ "epoch": 6.417582417582418,
+ "grad_norm": 6.065537452697754,
+ "learning_rate": 2.3907203907203907e-05,
+ "loss": 0.0362,
+ "step": 1752
+ },
+ {
+ "epoch": 6.4212454212454215,
+ "grad_norm": 27.241317749023438,
+ "learning_rate": 2.388278388278388e-05,
+ "loss": 0.2462,
+ "step": 1753
+ },
+ {
+ "epoch": 6.424908424908425,
+ "grad_norm": 34.21626663208008,
+ "learning_rate": 2.385836385836386e-05,
+ "loss": 0.3341,
+ "step": 1754
+ },
+ {
+ "epoch": 6.428571428571429,
+ "grad_norm": 3.2597031593322754,
+ "learning_rate": 2.3833943833943835e-05,
+ "loss": 0.0159,
+ "step": 1755
+ },
+ {
+ "epoch": 6.4322344322344325,
+ "grad_norm": 44.21895217895508,
+ "learning_rate": 2.380952380952381e-05,
+ "loss": 0.2461,
+ "step": 1756
+ },
+ {
+ "epoch": 6.435897435897436,
+ "grad_norm": 11.0900239944458,
+ "learning_rate": 2.3785103785103785e-05,
+ "loss": 0.0343,
+ "step": 1757
+ },
+ {
+ "epoch": 6.43956043956044,
+ "grad_norm": 33.349464416503906,
+ "learning_rate": 2.376068376068376e-05,
+ "loss": 0.1605,
+ "step": 1758
+ },
+ {
+ "epoch": 6.443223443223443,
+ "grad_norm": 36.584434509277344,
+ "learning_rate": 2.3736263736263735e-05,
+ "loss": 0.291,
+ "step": 1759
+ },
+ {
+ "epoch": 6.446886446886447,
+ "grad_norm": 1.5533220767974854,
+ "learning_rate": 2.3711843711843714e-05,
+ "loss": 0.0072,
+ "step": 1760
+ },
+ {
+ "epoch": 6.450549450549451,
+ "grad_norm": 31.38529396057129,
+ "learning_rate": 2.368742368742369e-05,
+ "loss": 0.2211,
+ "step": 1761
+ },
+ {
+ "epoch": 6.454212454212454,
+ "grad_norm": 33.149131774902344,
+ "learning_rate": 2.3663003663003664e-05,
+ "loss": 0.7844,
+ "step": 1762
+ },
+ {
+ "epoch": 6.457875457875458,
+ "grad_norm": 21.318105697631836,
+ "learning_rate": 2.363858363858364e-05,
+ "loss": 0.1297,
+ "step": 1763
+ },
+ {
+ "epoch": 6.461538461538462,
+ "grad_norm": 22.11357879638672,
+ "learning_rate": 2.3614163614163614e-05,
+ "loss": 0.1063,
+ "step": 1764
+ },
+ {
+ "epoch": 6.465201465201465,
+ "grad_norm": 2.4257397651672363,
+ "learning_rate": 2.358974358974359e-05,
+ "loss": 0.0098,
+ "step": 1765
+ },
+ {
+ "epoch": 6.468864468864469,
+ "grad_norm": 11.911495208740234,
+ "learning_rate": 2.3565323565323564e-05,
+ "loss": 0.0386,
+ "step": 1766
+ },
+ {
+ "epoch": 6.472527472527473,
+ "grad_norm": 5.848181247711182,
+ "learning_rate": 2.3540903540903543e-05,
+ "loss": 0.0141,
+ "step": 1767
+ },
+ {
+ "epoch": 6.476190476190476,
+ "grad_norm": 58.96442413330078,
+ "learning_rate": 2.3516483516483518e-05,
+ "loss": 0.1635,
+ "step": 1768
+ },
+ {
+ "epoch": 6.47985347985348,
+ "grad_norm": 45.464298248291016,
+ "learning_rate": 2.3492063492063493e-05,
+ "loss": 0.5185,
+ "step": 1769
+ },
+ {
+ "epoch": 6.483516483516484,
+ "grad_norm": 363.1459045410156,
+ "learning_rate": 2.3467643467643468e-05,
+ "loss": 0.9437,
+ "step": 1770
+ },
+ {
+ "epoch": 6.487179487179487,
+ "grad_norm": 30.113380432128906,
+ "learning_rate": 2.3443223443223443e-05,
+ "loss": 0.1013,
+ "step": 1771
+ },
+ {
+ "epoch": 6.490842490842491,
+ "grad_norm": 59.738224029541016,
+ "learning_rate": 2.3418803418803418e-05,
+ "loss": 0.7901,
+ "step": 1772
+ },
+ {
+ "epoch": 6.4945054945054945,
+ "grad_norm": 20.25137710571289,
+ "learning_rate": 2.3394383394383396e-05,
+ "loss": 0.2715,
+ "step": 1773
+ },
+ {
+ "epoch": 6.498168498168498,
+ "grad_norm": 36.56110763549805,
+ "learning_rate": 2.336996336996337e-05,
+ "loss": 0.4192,
+ "step": 1774
+ },
+ {
+ "epoch": 6.501831501831502,
+ "grad_norm": 25.077024459838867,
+ "learning_rate": 2.3345543345543347e-05,
+ "loss": 0.0861,
+ "step": 1775
+ },
+ {
+ "epoch": 6.5054945054945055,
+ "grad_norm": 19.396398544311523,
+ "learning_rate": 2.332112332112332e-05,
+ "loss": 0.0352,
+ "step": 1776
+ },
+ {
+ "epoch": 6.509157509157509,
+ "grad_norm": 93.91683197021484,
+ "learning_rate": 2.3296703296703297e-05,
+ "loss": 0.1414,
+ "step": 1777
+ },
+ {
+ "epoch": 6.512820512820513,
+ "grad_norm": 30.467477798461914,
+ "learning_rate": 2.3272283272283272e-05,
+ "loss": 0.123,
+ "step": 1778
+ },
+ {
+ "epoch": 6.516483516483516,
+ "grad_norm": 135.5657196044922,
+ "learning_rate": 2.3247863247863247e-05,
+ "loss": 0.9203,
+ "step": 1779
+ },
+ {
+ "epoch": 6.52014652014652,
+ "grad_norm": 66.74224853515625,
+ "learning_rate": 2.3223443223443225e-05,
+ "loss": 1.6109,
+ "step": 1780
+ },
+ {
+ "epoch": 6.523809523809524,
+ "grad_norm": 5.672858238220215,
+ "learning_rate": 2.31990231990232e-05,
+ "loss": 0.0259,
+ "step": 1781
+ },
+ {
+ "epoch": 6.527472527472527,
+ "grad_norm": 116.89350128173828,
+ "learning_rate": 2.3174603174603175e-05,
+ "loss": 0.5468,
+ "step": 1782
+ },
+ {
+ "epoch": 6.531135531135531,
+ "grad_norm": 67.1368637084961,
+ "learning_rate": 2.315018315018315e-05,
+ "loss": 0.2192,
+ "step": 1783
+ },
+ {
+ "epoch": 6.534798534798535,
+ "grad_norm": 23.453842163085938,
+ "learning_rate": 2.3125763125763126e-05,
+ "loss": 0.1637,
+ "step": 1784
+ },
+ {
+ "epoch": 6.538461538461538,
+ "grad_norm": 10.070181846618652,
+ "learning_rate": 2.31013431013431e-05,
+ "loss": 0.0613,
+ "step": 1785
+ },
+ {
+ "epoch": 6.542124542124542,
+ "grad_norm": 76.60414123535156,
+ "learning_rate": 2.307692307692308e-05,
+ "loss": 1.1513,
+ "step": 1786
+ },
+ {
+ "epoch": 6.545787545787546,
+ "grad_norm": 28.578702926635742,
+ "learning_rate": 2.3052503052503054e-05,
+ "loss": 0.4436,
+ "step": 1787
+ },
+ {
+ "epoch": 6.549450549450549,
+ "grad_norm": 56.702999114990234,
+ "learning_rate": 2.302808302808303e-05,
+ "loss": 0.3688,
+ "step": 1788
+ },
+ {
+ "epoch": 6.553113553113553,
+ "grad_norm": 97.274658203125,
+ "learning_rate": 2.3003663003663004e-05,
+ "loss": 1.3588,
+ "step": 1789
+ },
+ {
+ "epoch": 6.556776556776557,
+ "grad_norm": 15.371636390686035,
+ "learning_rate": 2.297924297924298e-05,
+ "loss": 0.1227,
+ "step": 1790
+ },
+ {
+ "epoch": 6.56043956043956,
+ "grad_norm": 48.43988800048828,
+ "learning_rate": 2.2954822954822954e-05,
+ "loss": 0.5581,
+ "step": 1791
+ },
+ {
+ "epoch": 6.564102564102564,
+ "grad_norm": 30.510440826416016,
+ "learning_rate": 2.293040293040293e-05,
+ "loss": 0.1888,
+ "step": 1792
+ },
+ {
+ "epoch": 6.5677655677655675,
+ "grad_norm": 34.03535461425781,
+ "learning_rate": 2.2905982905982908e-05,
+ "loss": 0.3731,
+ "step": 1793
+ },
+ {
+ "epoch": 6.571428571428571,
+ "grad_norm": 41.19938659667969,
+ "learning_rate": 2.2881562881562883e-05,
+ "loss": 0.4705,
+ "step": 1794
+ },
+ {
+ "epoch": 6.575091575091575,
+ "grad_norm": 6.060940742492676,
+ "learning_rate": 2.2857142857142858e-05,
+ "loss": 0.0586,
+ "step": 1795
+ },
+ {
+ "epoch": 6.5787545787545785,
+ "grad_norm": 19.60703468322754,
+ "learning_rate": 2.2832722832722833e-05,
+ "loss": 0.2046,
+ "step": 1796
+ },
+ {
+ "epoch": 6.582417582417582,
+ "grad_norm": 30.162328720092773,
+ "learning_rate": 2.2808302808302808e-05,
+ "loss": 0.1926,
+ "step": 1797
+ },
+ {
+ "epoch": 6.586080586080586,
+ "grad_norm": 28.184131622314453,
+ "learning_rate": 2.2783882783882783e-05,
+ "loss": 0.4085,
+ "step": 1798
+ },
+ {
+ "epoch": 6.589743589743589,
+ "grad_norm": 28.77677345275879,
+ "learning_rate": 2.275946275946276e-05,
+ "loss": 0.4333,
+ "step": 1799
+ },
+ {
+ "epoch": 6.593406593406593,
+ "grad_norm": 16.47443962097168,
+ "learning_rate": 2.2735042735042737e-05,
+ "loss": 0.1579,
+ "step": 1800
+ },
+ {
+ "epoch": 6.597069597069597,
+ "grad_norm": 24.273569107055664,
+ "learning_rate": 2.2710622710622712e-05,
+ "loss": 0.1917,
+ "step": 1801
+ },
+ {
+ "epoch": 6.6007326007326,
+ "grad_norm": 43.3727912902832,
+ "learning_rate": 2.2686202686202687e-05,
+ "loss": 0.4186,
+ "step": 1802
+ },
+ {
+ "epoch": 6.604395604395604,
+ "grad_norm": 21.321182250976562,
+ "learning_rate": 2.2661782661782662e-05,
+ "loss": 0.187,
+ "step": 1803
+ },
+ {
+ "epoch": 6.608058608058608,
+ "grad_norm": 9.65528678894043,
+ "learning_rate": 2.2637362637362637e-05,
+ "loss": 0.0584,
+ "step": 1804
+ },
+ {
+ "epoch": 6.611721611721611,
+ "grad_norm": 43.85563659667969,
+ "learning_rate": 2.2612942612942612e-05,
+ "loss": 0.2249,
+ "step": 1805
+ },
+ {
+ "epoch": 6.615384615384615,
+ "grad_norm": 36.068946838378906,
+ "learning_rate": 2.258852258852259e-05,
+ "loss": 0.8459,
+ "step": 1806
+ },
+ {
+ "epoch": 6.619047619047619,
+ "grad_norm": 37.197776794433594,
+ "learning_rate": 2.2564102564102566e-05,
+ "loss": 0.4026,
+ "step": 1807
+ },
+ {
+ "epoch": 6.622710622710622,
+ "grad_norm": 11.39905071258545,
+ "learning_rate": 2.253968253968254e-05,
+ "loss": 0.0544,
+ "step": 1808
+ },
+ {
+ "epoch": 6.626373626373626,
+ "grad_norm": 6.2379150390625,
+ "learning_rate": 2.2515262515262516e-05,
+ "loss": 0.0342,
+ "step": 1809
+ },
+ {
+ "epoch": 6.63003663003663,
+ "grad_norm": 14.908777236938477,
+ "learning_rate": 2.249084249084249e-05,
+ "loss": 0.1245,
+ "step": 1810
+ },
+ {
+ "epoch": 6.633699633699633,
+ "grad_norm": 47.33977508544922,
+ "learning_rate": 2.2466422466422466e-05,
+ "loss": 0.3771,
+ "step": 1811
+ },
+ {
+ "epoch": 6.637362637362637,
+ "grad_norm": 25.724132537841797,
+ "learning_rate": 2.2442002442002444e-05,
+ "loss": 0.3055,
+ "step": 1812
+ },
+ {
+ "epoch": 6.641025641025641,
+ "grad_norm": 30.99205207824707,
+ "learning_rate": 2.241758241758242e-05,
+ "loss": 0.2163,
+ "step": 1813
+ },
+ {
+ "epoch": 6.644688644688645,
+ "grad_norm": 22.741575241088867,
+ "learning_rate": 2.2393162393162394e-05,
+ "loss": 0.136,
+ "step": 1814
+ },
+ {
+ "epoch": 6.648351648351649,
+ "grad_norm": 22.271474838256836,
+ "learning_rate": 2.236874236874237e-05,
+ "loss": 0.2299,
+ "step": 1815
+ },
+ {
+ "epoch": 6.652014652014652,
+ "grad_norm": 51.153072357177734,
+ "learning_rate": 2.2344322344322345e-05,
+ "loss": 0.8646,
+ "step": 1816
+ },
+ {
+ "epoch": 6.655677655677656,
+ "grad_norm": 4.649880409240723,
+ "learning_rate": 2.231990231990232e-05,
+ "loss": 0.0344,
+ "step": 1817
+ },
+ {
+ "epoch": 6.65934065934066,
+ "grad_norm": 2.948399305343628,
+ "learning_rate": 2.2295482295482295e-05,
+ "loss": 0.0128,
+ "step": 1818
+ },
+ {
+ "epoch": 6.663003663003663,
+ "grad_norm": 10.776185035705566,
+ "learning_rate": 2.2271062271062273e-05,
+ "loss": 0.0438,
+ "step": 1819
+ },
+ {
+ "epoch": 6.666666666666667,
+ "grad_norm": 31.777973175048828,
+ "learning_rate": 2.2246642246642248e-05,
+ "loss": 1.3552,
+ "step": 1820
+ },
+ {
+ "epoch": 6.670329670329671,
+ "grad_norm": 44.022377014160156,
+ "learning_rate": 2.222222222222222e-05,
+ "loss": 0.1928,
+ "step": 1821
+ },
+ {
+ "epoch": 6.673992673992674,
+ "grad_norm": 7.014647960662842,
+ "learning_rate": 2.21978021978022e-05,
+ "loss": 0.0675,
+ "step": 1822
+ },
+ {
+ "epoch": 6.677655677655678,
+ "grad_norm": 10.964372634887695,
+ "learning_rate": 2.2173382173382173e-05,
+ "loss": 0.0809,
+ "step": 1823
+ },
+ {
+ "epoch": 6.681318681318682,
+ "grad_norm": 42.56317901611328,
+ "learning_rate": 2.214896214896215e-05,
+ "loss": 0.2639,
+ "step": 1824
+ },
+ {
+ "epoch": 6.684981684981685,
+ "grad_norm": 25.33672523498535,
+ "learning_rate": 2.2124542124542127e-05,
+ "loss": 0.294,
+ "step": 1825
+ },
+ {
+ "epoch": 6.688644688644689,
+ "grad_norm": 9.823565483093262,
+ "learning_rate": 2.2100122100122102e-05,
+ "loss": 0.0885,
+ "step": 1826
+ },
+ {
+ "epoch": 6.6923076923076925,
+ "grad_norm": 3.2519893646240234,
+ "learning_rate": 2.2075702075702077e-05,
+ "loss": 0.0208,
+ "step": 1827
+ },
+ {
+ "epoch": 6.695970695970696,
+ "grad_norm": 14.441536903381348,
+ "learning_rate": 2.2051282051282052e-05,
+ "loss": 0.1541,
+ "step": 1828
+ },
+ {
+ "epoch": 6.6996336996337,
+ "grad_norm": 4.128608226776123,
+ "learning_rate": 2.2026862026862027e-05,
+ "loss": 0.03,
+ "step": 1829
+ },
+ {
+ "epoch": 6.7032967032967035,
+ "grad_norm": 13.953630447387695,
+ "learning_rate": 2.2002442002442002e-05,
+ "loss": 0.0781,
+ "step": 1830
+ },
+ {
+ "epoch": 6.706959706959707,
+ "grad_norm": 24.90090560913086,
+ "learning_rate": 2.1978021978021977e-05,
+ "loss": 0.33,
+ "step": 1831
+ },
+ {
+ "epoch": 6.710622710622711,
+ "grad_norm": 43.3170051574707,
+ "learning_rate": 2.1953601953601956e-05,
+ "loss": 0.1735,
+ "step": 1832
+ },
+ {
+ "epoch": 6.714285714285714,
+ "grad_norm": 5.82177734375,
+ "learning_rate": 2.192918192918193e-05,
+ "loss": 0.0281,
+ "step": 1833
+ },
+ {
+ "epoch": 6.717948717948718,
+ "grad_norm": 26.415163040161133,
+ "learning_rate": 2.1904761904761903e-05,
+ "loss": 0.4272,
+ "step": 1834
+ },
+ {
+ "epoch": 6.721611721611722,
+ "grad_norm": 40.3553581237793,
+ "learning_rate": 2.188034188034188e-05,
+ "loss": 0.3375,
+ "step": 1835
+ },
+ {
+ "epoch": 6.725274725274725,
+ "grad_norm": 39.16763687133789,
+ "learning_rate": 2.1855921855921856e-05,
+ "loss": 0.881,
+ "step": 1836
+ },
+ {
+ "epoch": 6.728937728937729,
+ "grad_norm": 14.275158882141113,
+ "learning_rate": 2.183150183150183e-05,
+ "loss": 0.0499,
+ "step": 1837
+ },
+ {
+ "epoch": 6.732600732600733,
+ "grad_norm": 40.29611587524414,
+ "learning_rate": 2.180708180708181e-05,
+ "loss": 0.2447,
+ "step": 1838
+ },
+ {
+ "epoch": 6.736263736263736,
+ "grad_norm": 33.86298751831055,
+ "learning_rate": 2.1782661782661785e-05,
+ "loss": 0.2772,
+ "step": 1839
+ },
+ {
+ "epoch": 6.73992673992674,
+ "grad_norm": 34.46928405761719,
+ "learning_rate": 2.175824175824176e-05,
+ "loss": 0.2721,
+ "step": 1840
+ },
+ {
+ "epoch": 6.743589743589744,
+ "grad_norm": 17.7811222076416,
+ "learning_rate": 2.1733821733821735e-05,
+ "loss": 0.0955,
+ "step": 1841
+ },
+ {
+ "epoch": 6.747252747252747,
+ "grad_norm": 33.17821502685547,
+ "learning_rate": 2.170940170940171e-05,
+ "loss": 0.1831,
+ "step": 1842
+ },
+ {
+ "epoch": 6.750915750915751,
+ "grad_norm": 24.910184860229492,
+ "learning_rate": 2.1684981684981685e-05,
+ "loss": 0.1617,
+ "step": 1843
+ },
+ {
+ "epoch": 6.754578754578755,
+ "grad_norm": 28.5413875579834,
+ "learning_rate": 2.166056166056166e-05,
+ "loss": 0.2048,
+ "step": 1844
+ },
+ {
+ "epoch": 6.758241758241758,
+ "grad_norm": 26.866653442382812,
+ "learning_rate": 2.163614163614164e-05,
+ "loss": 0.1637,
+ "step": 1845
+ },
+ {
+ "epoch": 6.761904761904762,
+ "grad_norm": 43.447593688964844,
+ "learning_rate": 2.1611721611721613e-05,
+ "loss": 0.2206,
+ "step": 1846
+ },
+ {
+ "epoch": 6.7655677655677655,
+ "grad_norm": 8.146500587463379,
+ "learning_rate": 2.1587301587301585e-05,
+ "loss": 0.0199,
+ "step": 1847
+ },
+ {
+ "epoch": 6.769230769230769,
+ "grad_norm": 30.458940505981445,
+ "learning_rate": 2.1562881562881564e-05,
+ "loss": 0.0963,
+ "step": 1848
+ },
+ {
+ "epoch": 6.772893772893773,
+ "grad_norm": 1.6412991285324097,
+ "learning_rate": 2.153846153846154e-05,
+ "loss": 0.0097,
+ "step": 1849
+ },
+ {
+ "epoch": 6.7765567765567765,
+ "grad_norm": 22.804906845092773,
+ "learning_rate": 2.1514041514041514e-05,
+ "loss": 0.115,
+ "step": 1850
+ },
+ {
+ "epoch": 6.78021978021978,
+ "grad_norm": 21.790761947631836,
+ "learning_rate": 2.1489621489621492e-05,
+ "loss": 0.1609,
+ "step": 1851
+ },
+ {
+ "epoch": 6.783882783882784,
+ "grad_norm": 56.942420959472656,
+ "learning_rate": 2.1465201465201467e-05,
+ "loss": 0.3725,
+ "step": 1852
+ },
+ {
+ "epoch": 6.787545787545787,
+ "grad_norm": 31.713504791259766,
+ "learning_rate": 2.1440781440781442e-05,
+ "loss": 0.3035,
+ "step": 1853
+ },
+ {
+ "epoch": 6.791208791208791,
+ "grad_norm": 14.83351993560791,
+ "learning_rate": 2.1416361416361417e-05,
+ "loss": 0.0383,
+ "step": 1854
+ },
+ {
+ "epoch": 6.794871794871795,
+ "grad_norm": 28.03726577758789,
+ "learning_rate": 2.1391941391941392e-05,
+ "loss": 0.0432,
+ "step": 1855
+ },
+ {
+ "epoch": 6.798534798534798,
+ "grad_norm": 72.7824478149414,
+ "learning_rate": 2.1367521367521368e-05,
+ "loss": 0.7678,
+ "step": 1856
+ },
+ {
+ "epoch": 6.802197802197802,
+ "grad_norm": 48.0980224609375,
+ "learning_rate": 2.1343101343101343e-05,
+ "loss": 0.7691,
+ "step": 1857
+ },
+ {
+ "epoch": 6.805860805860806,
+ "grad_norm": 44.305519104003906,
+ "learning_rate": 2.131868131868132e-05,
+ "loss": 0.4334,
+ "step": 1858
+ },
+ {
+ "epoch": 6.809523809523809,
+ "grad_norm": 37.26662826538086,
+ "learning_rate": 2.1294261294261296e-05,
+ "loss": 0.5122,
+ "step": 1859
+ },
+ {
+ "epoch": 6.813186813186813,
+ "grad_norm": 11.758150100708008,
+ "learning_rate": 2.1269841269841268e-05,
+ "loss": 0.034,
+ "step": 1860
+ },
+ {
+ "epoch": 6.816849816849817,
+ "grad_norm": 22.28230857849121,
+ "learning_rate": 2.1245421245421246e-05,
+ "loss": 0.1423,
+ "step": 1861
+ },
+ {
+ "epoch": 6.82051282051282,
+ "grad_norm": 15.02229118347168,
+ "learning_rate": 2.122100122100122e-05,
+ "loss": 0.0574,
+ "step": 1862
+ },
+ {
+ "epoch": 6.824175824175824,
+ "grad_norm": 54.3133659362793,
+ "learning_rate": 2.1196581196581196e-05,
+ "loss": 0.7862,
+ "step": 1863
+ },
+ {
+ "epoch": 6.827838827838828,
+ "grad_norm": 14.319539070129395,
+ "learning_rate": 2.1172161172161175e-05,
+ "loss": 0.0509,
+ "step": 1864
+ },
+ {
+ "epoch": 6.831501831501831,
+ "grad_norm": 21.989151000976562,
+ "learning_rate": 2.114774114774115e-05,
+ "loss": 0.1181,
+ "step": 1865
+ },
+ {
+ "epoch": 6.835164835164835,
+ "grad_norm": 35.67295455932617,
+ "learning_rate": 2.112332112332112e-05,
+ "loss": 0.5721,
+ "step": 1866
+ },
+ {
+ "epoch": 6.8388278388278385,
+ "grad_norm": 1.1201294660568237,
+ "learning_rate": 2.10989010989011e-05,
+ "loss": 0.006,
+ "step": 1867
+ },
+ {
+ "epoch": 6.842490842490842,
+ "grad_norm": 55.64126205444336,
+ "learning_rate": 2.1074481074481075e-05,
+ "loss": 0.5155,
+ "step": 1868
+ },
+ {
+ "epoch": 6.846153846153846,
+ "grad_norm": 34.077598571777344,
+ "learning_rate": 2.105006105006105e-05,
+ "loss": 0.2999,
+ "step": 1869
+ },
+ {
+ "epoch": 6.8498168498168495,
+ "grad_norm": 47.34593200683594,
+ "learning_rate": 2.1025641025641025e-05,
+ "loss": 0.5192,
+ "step": 1870
+ },
+ {
+ "epoch": 6.853479853479853,
+ "grad_norm": 15.37938117980957,
+ "learning_rate": 2.1001221001221004e-05,
+ "loss": 0.0647,
+ "step": 1871
+ },
+ {
+ "epoch": 6.857142857142857,
+ "grad_norm": 8.03809928894043,
+ "learning_rate": 2.097680097680098e-05,
+ "loss": 0.0535,
+ "step": 1872
+ },
+ {
+ "epoch": 6.860805860805861,
+ "grad_norm": 34.22372055053711,
+ "learning_rate": 2.095238095238095e-05,
+ "loss": 0.4123,
+ "step": 1873
+ },
+ {
+ "epoch": 6.864468864468865,
+ "grad_norm": 19.66349220275879,
+ "learning_rate": 2.092796092796093e-05,
+ "loss": 0.165,
+ "step": 1874
+ },
+ {
+ "epoch": 6.868131868131869,
+ "grad_norm": 4.448884010314941,
+ "learning_rate": 2.0903540903540904e-05,
+ "loss": 0.0204,
+ "step": 1875
+ },
+ {
+ "epoch": 6.871794871794872,
+ "grad_norm": 7.874554634094238,
+ "learning_rate": 2.087912087912088e-05,
+ "loss": 0.0339,
+ "step": 1876
+ },
+ {
+ "epoch": 6.875457875457876,
+ "grad_norm": 2.1591508388519287,
+ "learning_rate": 2.0854700854700857e-05,
+ "loss": 0.0069,
+ "step": 1877
+ },
+ {
+ "epoch": 6.8791208791208796,
+ "grad_norm": 7.496129512786865,
+ "learning_rate": 2.0830280830280832e-05,
+ "loss": 0.0522,
+ "step": 1878
+ },
+ {
+ "epoch": 6.882783882783883,
+ "grad_norm": 1.867928385734558,
+ "learning_rate": 2.0805860805860804e-05,
+ "loss": 0.0075,
+ "step": 1879
+ },
+ {
+ "epoch": 6.886446886446887,
+ "grad_norm": 6.0440239906311035,
+ "learning_rate": 2.0781440781440783e-05,
+ "loss": 0.0454,
+ "step": 1880
+ },
+ {
+ "epoch": 6.8901098901098905,
+ "grad_norm": 38.901275634765625,
+ "learning_rate": 2.0757020757020758e-05,
+ "loss": 0.1179,
+ "step": 1881
+ },
+ {
+ "epoch": 6.893772893772894,
+ "grad_norm": 36.98682403564453,
+ "learning_rate": 2.0732600732600733e-05,
+ "loss": 0.4722,
+ "step": 1882
+ },
+ {
+ "epoch": 6.897435897435898,
+ "grad_norm": 24.764745712280273,
+ "learning_rate": 2.0708180708180708e-05,
+ "loss": 0.1179,
+ "step": 1883
+ },
+ {
+ "epoch": 6.9010989010989015,
+ "grad_norm": 9.029558181762695,
+ "learning_rate": 2.0683760683760686e-05,
+ "loss": 0.0134,
+ "step": 1884
+ },
+ {
+ "epoch": 6.904761904761905,
+ "grad_norm": 54.04767608642578,
+ "learning_rate": 2.065934065934066e-05,
+ "loss": 0.3645,
+ "step": 1885
+ },
+ {
+ "epoch": 6.908424908424909,
+ "grad_norm": 35.74855041503906,
+ "learning_rate": 2.0634920634920633e-05,
+ "loss": 0.5228,
+ "step": 1886
+ },
+ {
+ "epoch": 6.912087912087912,
+ "grad_norm": 18.870223999023438,
+ "learning_rate": 2.061050061050061e-05,
+ "loss": 0.0564,
+ "step": 1887
+ },
+ {
+ "epoch": 6.915750915750916,
+ "grad_norm": 1.4971216917037964,
+ "learning_rate": 2.0586080586080587e-05,
+ "loss": 0.0067,
+ "step": 1888
+ },
+ {
+ "epoch": 6.91941391941392,
+ "grad_norm": 71.35897064208984,
+ "learning_rate": 2.056166056166056e-05,
+ "loss": 0.9147,
+ "step": 1889
+ },
+ {
+ "epoch": 6.923076923076923,
+ "grad_norm": 20.66876220703125,
+ "learning_rate": 2.053724053724054e-05,
+ "loss": 0.0777,
+ "step": 1890
+ },
+ {
+ "epoch": 6.926739926739927,
+ "grad_norm": 12.178057670593262,
+ "learning_rate": 2.0512820512820515e-05,
+ "loss": 0.0682,
+ "step": 1891
+ },
+ {
+ "epoch": 6.930402930402931,
+ "grad_norm": 18.622045516967773,
+ "learning_rate": 2.0488400488400487e-05,
+ "loss": 0.2268,
+ "step": 1892
+ },
+ {
+ "epoch": 6.934065934065934,
+ "grad_norm": 13.028661727905273,
+ "learning_rate": 2.0463980463980462e-05,
+ "loss": 0.0783,
+ "step": 1893
+ },
+ {
+ "epoch": 6.937728937728938,
+ "grad_norm": 52.034603118896484,
+ "learning_rate": 2.043956043956044e-05,
+ "loss": 0.2124,
+ "step": 1894
+ },
+ {
+ "epoch": 6.941391941391942,
+ "grad_norm": 15.498795509338379,
+ "learning_rate": 2.0415140415140415e-05,
+ "loss": 0.1372,
+ "step": 1895
+ },
+ {
+ "epoch": 6.945054945054945,
+ "grad_norm": 4.659972190856934,
+ "learning_rate": 2.039072039072039e-05,
+ "loss": 0.0671,
+ "step": 1896
+ },
+ {
+ "epoch": 6.948717948717949,
+ "grad_norm": 67.44121551513672,
+ "learning_rate": 2.036630036630037e-05,
+ "loss": 0.3543,
+ "step": 1897
+ },
+ {
+ "epoch": 6.9523809523809526,
+ "grad_norm": 55.583770751953125,
+ "learning_rate": 2.0341880341880344e-05,
+ "loss": 0.5827,
+ "step": 1898
+ },
+ {
+ "epoch": 6.956043956043956,
+ "grad_norm": 2.5286853313446045,
+ "learning_rate": 2.0317460317460316e-05,
+ "loss": 0.0093,
+ "step": 1899
+ },
+ {
+ "epoch": 6.95970695970696,
+ "grad_norm": 35.537654876708984,
+ "learning_rate": 2.0293040293040294e-05,
+ "loss": 0.4927,
+ "step": 1900
+ },
+ {
+ "epoch": 6.9633699633699635,
+ "grad_norm": 5.582351207733154,
+ "learning_rate": 2.026862026862027e-05,
+ "loss": 0.0266,
+ "step": 1901
+ },
+ {
+ "epoch": 6.967032967032967,
+ "grad_norm": 24.245107650756836,
+ "learning_rate": 2.0244200244200244e-05,
+ "loss": 0.1652,
+ "step": 1902
+ },
+ {
+ "epoch": 6.970695970695971,
+ "grad_norm": 15.859257698059082,
+ "learning_rate": 2.0219780219780223e-05,
+ "loss": 0.0523,
+ "step": 1903
+ },
+ {
+ "epoch": 6.9743589743589745,
+ "grad_norm": 4.049310207366943,
+ "learning_rate": 2.0195360195360198e-05,
+ "loss": 0.016,
+ "step": 1904
+ },
+ {
+ "epoch": 6.978021978021978,
+ "grad_norm": 22.330875396728516,
+ "learning_rate": 2.017094017094017e-05,
+ "loss": 0.0999,
+ "step": 1905
+ },
+ {
+ "epoch": 6.981684981684982,
+ "grad_norm": 5.005560874938965,
+ "learning_rate": 2.0146520146520144e-05,
+ "loss": 0.0186,
+ "step": 1906
+ },
+ {
+ "epoch": 6.985347985347985,
+ "grad_norm": 5.587247848510742,
+ "learning_rate": 2.0122100122100123e-05,
+ "loss": 0.0312,
+ "step": 1907
+ },
+ {
+ "epoch": 6.989010989010989,
+ "grad_norm": 46.75461959838867,
+ "learning_rate": 2.0097680097680098e-05,
+ "loss": 0.2803,
+ "step": 1908
+ },
+ {
+ "epoch": 6.992673992673993,
+ "grad_norm": 9.029139518737793,
+ "learning_rate": 2.0073260073260073e-05,
+ "loss": 0.0437,
+ "step": 1909
+ },
+ {
+ "epoch": 6.996336996336996,
+ "grad_norm": 26.199968338012695,
+ "learning_rate": 2.004884004884005e-05,
+ "loss": 0.4601,
+ "step": 1910
+ },
+ {
+ "epoch": 7.0,
+ "grad_norm": 2.2140614986419678,
+ "learning_rate": 2.0024420024420023e-05,
+ "loss": 0.0096,
+ "step": 1911
+ },
+ {
+ "epoch": 7.003663003663004,
+ "grad_norm": 52.966732025146484,
+ "learning_rate": 1.9999999999999998e-05,
+ "loss": 0.5645,
+ "step": 1912
+ },
+ {
+ "epoch": 7.007326007326007,
+ "grad_norm": 11.818926811218262,
+ "learning_rate": 1.9975579975579977e-05,
+ "loss": 0.1,
+ "step": 1913
+ },
+ {
+ "epoch": 7.010989010989011,
+ "grad_norm": 3.5507917404174805,
+ "learning_rate": 1.9951159951159952e-05,
+ "loss": 0.0124,
+ "step": 1914
+ },
+ {
+ "epoch": 7.014652014652015,
+ "grad_norm": 13.962370872497559,
+ "learning_rate": 1.9926739926739927e-05,
+ "loss": 0.0361,
+ "step": 1915
+ },
+ {
+ "epoch": 7.018315018315018,
+ "grad_norm": 18.855941772460938,
+ "learning_rate": 1.9902319902319905e-05,
+ "loss": 0.1029,
+ "step": 1916
+ },
+ {
+ "epoch": 7.021978021978022,
+ "grad_norm": 25.34268569946289,
+ "learning_rate": 1.987789987789988e-05,
+ "loss": 0.0968,
+ "step": 1917
+ },
+ {
+ "epoch": 7.0256410256410255,
+ "grad_norm": 12.053638458251953,
+ "learning_rate": 1.9853479853479852e-05,
+ "loss": 0.0473,
+ "step": 1918
+ },
+ {
+ "epoch": 7.029304029304029,
+ "grad_norm": 28.66246795654297,
+ "learning_rate": 1.9829059829059827e-05,
+ "loss": 0.477,
+ "step": 1919
+ },
+ {
+ "epoch": 7.032967032967033,
+ "grad_norm": 37.606475830078125,
+ "learning_rate": 1.9804639804639806e-05,
+ "loss": 0.3894,
+ "step": 1920
+ },
+ {
+ "epoch": 7.0366300366300365,
+ "grad_norm": 10.550342559814453,
+ "learning_rate": 1.978021978021978e-05,
+ "loss": 0.031,
+ "step": 1921
+ },
+ {
+ "epoch": 7.04029304029304,
+ "grad_norm": 8.748348236083984,
+ "learning_rate": 1.9755799755799756e-05,
+ "loss": 0.087,
+ "step": 1922
+ },
+ {
+ "epoch": 7.043956043956044,
+ "grad_norm": 16.9587345123291,
+ "learning_rate": 1.9731379731379734e-05,
+ "loss": 0.1271,
+ "step": 1923
+ },
+ {
+ "epoch": 7.0476190476190474,
+ "grad_norm": 64.79300689697266,
+ "learning_rate": 1.9706959706959706e-05,
+ "loss": 0.4748,
+ "step": 1924
+ },
+ {
+ "epoch": 7.051282051282051,
+ "grad_norm": 1.4843182563781738,
+ "learning_rate": 1.968253968253968e-05,
+ "loss": 0.0074,
+ "step": 1925
+ },
+ {
+ "epoch": 7.054945054945055,
+ "grad_norm": 6.48045539855957,
+ "learning_rate": 1.965811965811966e-05,
+ "loss": 0.0312,
+ "step": 1926
+ },
+ {
+ "epoch": 7.058608058608058,
+ "grad_norm": 13.35557746887207,
+ "learning_rate": 1.9633699633699634e-05,
+ "loss": 0.0395,
+ "step": 1927
+ },
+ {
+ "epoch": 7.062271062271062,
+ "grad_norm": 6.710418701171875,
+ "learning_rate": 1.960927960927961e-05,
+ "loss": 0.0237,
+ "step": 1928
+ },
+ {
+ "epoch": 7.065934065934066,
+ "grad_norm": 1.5964992046356201,
+ "learning_rate": 1.9584859584859588e-05,
+ "loss": 0.0069,
+ "step": 1929
+ },
+ {
+ "epoch": 7.069597069597069,
+ "grad_norm": 17.386457443237305,
+ "learning_rate": 1.9560439560439563e-05,
+ "loss": 0.1719,
+ "step": 1930
+ },
+ {
+ "epoch": 7.073260073260073,
+ "grad_norm": 9.381852149963379,
+ "learning_rate": 1.9536019536019535e-05,
+ "loss": 0.0274,
+ "step": 1931
+ },
+ {
+ "epoch": 7.076923076923077,
+ "grad_norm": 96.48052978515625,
+ "learning_rate": 1.951159951159951e-05,
+ "loss": 0.9714,
+ "step": 1932
+ },
+ {
+ "epoch": 7.08058608058608,
+ "grad_norm": 9.537943840026855,
+ "learning_rate": 1.9487179487179488e-05,
+ "loss": 0.0608,
+ "step": 1933
+ },
+ {
+ "epoch": 7.084249084249084,
+ "grad_norm": 47.1885986328125,
+ "learning_rate": 1.9462759462759463e-05,
+ "loss": 0.3678,
+ "step": 1934
+ },
+ {
+ "epoch": 7.087912087912088,
+ "grad_norm": 22.831552505493164,
+ "learning_rate": 1.9438339438339438e-05,
+ "loss": 0.1386,
+ "step": 1935
+ },
+ {
+ "epoch": 7.091575091575091,
+ "grad_norm": 7.730359077453613,
+ "learning_rate": 1.9413919413919417e-05,
+ "loss": 0.0286,
+ "step": 1936
+ },
+ {
+ "epoch": 7.095238095238095,
+ "grad_norm": 34.329349517822266,
+ "learning_rate": 1.938949938949939e-05,
+ "loss": 0.2041,
+ "step": 1937
+ },
+ {
+ "epoch": 7.0989010989010985,
+ "grad_norm": 2.7768473625183105,
+ "learning_rate": 1.9365079365079363e-05,
+ "loss": 0.0095,
+ "step": 1938
+ },
+ {
+ "epoch": 7.102564102564102,
+ "grad_norm": 52.868446350097656,
+ "learning_rate": 1.9340659340659342e-05,
+ "loss": 1.3287,
+ "step": 1939
+ },
+ {
+ "epoch": 7.106227106227106,
+ "grad_norm": 46.30121612548828,
+ "learning_rate": 1.9316239316239317e-05,
+ "loss": 0.6172,
+ "step": 1940
+ },
+ {
+ "epoch": 7.1098901098901095,
+ "grad_norm": 22.829683303833008,
+ "learning_rate": 1.9291819291819292e-05,
+ "loss": 0.2141,
+ "step": 1941
+ },
+ {
+ "epoch": 7.113553113553113,
+ "grad_norm": 5.540363311767578,
+ "learning_rate": 1.926739926739927e-05,
+ "loss": 0.0202,
+ "step": 1942
+ },
+ {
+ "epoch": 7.117216117216117,
+ "grad_norm": 12.821202278137207,
+ "learning_rate": 1.9242979242979246e-05,
+ "loss": 0.0474,
+ "step": 1943
+ },
+ {
+ "epoch": 7.1208791208791204,
+ "grad_norm": 51.50701141357422,
+ "learning_rate": 1.9218559218559217e-05,
+ "loss": 0.2716,
+ "step": 1944
+ },
+ {
+ "epoch": 7.124542124542124,
+ "grad_norm": 22.156648635864258,
+ "learning_rate": 1.9194139194139192e-05,
+ "loss": 0.4693,
+ "step": 1945
+ },
+ {
+ "epoch": 7.128205128205128,
+ "grad_norm": 21.045156478881836,
+ "learning_rate": 1.916971916971917e-05,
+ "loss": 0.471,
+ "step": 1946
+ },
+ {
+ "epoch": 7.131868131868132,
+ "grad_norm": 19.406959533691406,
+ "learning_rate": 1.9145299145299146e-05,
+ "loss": 0.1439,
+ "step": 1947
+ },
+ {
+ "epoch": 7.135531135531136,
+ "grad_norm": 3.8923749923706055,
+ "learning_rate": 1.912087912087912e-05,
+ "loss": 0.0165,
+ "step": 1948
+ },
+ {
+ "epoch": 7.13919413919414,
+ "grad_norm": 19.87603759765625,
+ "learning_rate": 1.90964590964591e-05,
+ "loss": 0.1763,
+ "step": 1949
+ },
+ {
+ "epoch": 7.142857142857143,
+ "grad_norm": 0.5241024494171143,
+ "learning_rate": 1.907203907203907e-05,
+ "loss": 0.0026,
+ "step": 1950
+ },
+ {
+ "epoch": 7.146520146520147,
+ "grad_norm": 3.141636610031128,
+ "learning_rate": 1.9047619047619046e-05,
+ "loss": 0.0217,
+ "step": 1951
+ },
+ {
+ "epoch": 7.1501831501831505,
+ "grad_norm": 7.46498966217041,
+ "learning_rate": 1.9023199023199025e-05,
+ "loss": 0.0125,
+ "step": 1952
+ },
+ {
+ "epoch": 7.153846153846154,
+ "grad_norm": 2.050363779067993,
+ "learning_rate": 1.8998778998779e-05,
+ "loss": 0.0092,
+ "step": 1953
+ },
+ {
+ "epoch": 7.157509157509158,
+ "grad_norm": 65.3537826538086,
+ "learning_rate": 1.8974358974358975e-05,
+ "loss": 0.9234,
+ "step": 1954
+ },
+ {
+ "epoch": 7.1611721611721615,
+ "grad_norm": 39.09166717529297,
+ "learning_rate": 1.8949938949938953e-05,
+ "loss": 0.4183,
+ "step": 1955
+ },
+ {
+ "epoch": 7.164835164835165,
+ "grad_norm": 7.788208961486816,
+ "learning_rate": 1.8925518925518925e-05,
+ "loss": 0.0284,
+ "step": 1956
+ },
+ {
+ "epoch": 7.168498168498169,
+ "grad_norm": 19.53957176208496,
+ "learning_rate": 1.89010989010989e-05,
+ "loss": 0.148,
+ "step": 1957
+ },
+ {
+ "epoch": 7.172161172161172,
+ "grad_norm": 11.077863693237305,
+ "learning_rate": 1.8876678876678875e-05,
+ "loss": 0.0772,
+ "step": 1958
+ },
+ {
+ "epoch": 7.175824175824176,
+ "grad_norm": 10.294413566589355,
+ "learning_rate": 1.8852258852258853e-05,
+ "loss": 0.0278,
+ "step": 1959
+ },
+ {
+ "epoch": 7.17948717948718,
+ "grad_norm": 34.725284576416016,
+ "learning_rate": 1.882783882783883e-05,
+ "loss": 0.194,
+ "step": 1960
+ },
+ {
+ "epoch": 7.183150183150183,
+ "grad_norm": 27.773906707763672,
+ "learning_rate": 1.8803418803418804e-05,
+ "loss": 0.3261,
+ "step": 1961
+ },
+ {
+ "epoch": 7.186813186813187,
+ "grad_norm": 60.96028518676758,
+ "learning_rate": 1.8778998778998782e-05,
+ "loss": 0.5915,
+ "step": 1962
+ },
+ {
+ "epoch": 7.190476190476191,
+ "grad_norm": 9.918408393859863,
+ "learning_rate": 1.8754578754578754e-05,
+ "loss": 0.0428,
+ "step": 1963
+ },
+ {
+ "epoch": 7.194139194139194,
+ "grad_norm": 42.929927825927734,
+ "learning_rate": 1.873015873015873e-05,
+ "loss": 0.3522,
+ "step": 1964
+ },
+ {
+ "epoch": 7.197802197802198,
+ "grad_norm": 33.893463134765625,
+ "learning_rate": 1.8705738705738707e-05,
+ "loss": 0.5049,
+ "step": 1965
+ },
+ {
+ "epoch": 7.201465201465202,
+ "grad_norm": 3.18776273727417,
+ "learning_rate": 1.8681318681318682e-05,
+ "loss": 0.0204,
+ "step": 1966
+ },
+ {
+ "epoch": 7.205128205128205,
+ "grad_norm": 9.548710823059082,
+ "learning_rate": 1.8656898656898657e-05,
+ "loss": 0.0711,
+ "step": 1967
+ },
+ {
+ "epoch": 7.208791208791209,
+ "grad_norm": 38.94087600708008,
+ "learning_rate": 1.8632478632478636e-05,
+ "loss": 0.5289,
+ "step": 1968
+ },
+ {
+ "epoch": 7.212454212454213,
+ "grad_norm": 5.812004566192627,
+ "learning_rate": 1.8608058608058607e-05,
+ "loss": 0.0224,
+ "step": 1969
+ },
+ {
+ "epoch": 7.216117216117216,
+ "grad_norm": 1.2060245275497437,
+ "learning_rate": 1.8583638583638583e-05,
+ "loss": 0.0077,
+ "step": 1970
+ },
+ {
+ "epoch": 7.21978021978022,
+ "grad_norm": 20.632722854614258,
+ "learning_rate": 1.8559218559218558e-05,
+ "loss": 0.0907,
+ "step": 1971
+ },
+ {
+ "epoch": 7.2234432234432235,
+ "grad_norm": 24.92366600036621,
+ "learning_rate": 1.8534798534798536e-05,
+ "loss": 0.1633,
+ "step": 1972
+ },
+ {
+ "epoch": 7.227106227106227,
+ "grad_norm": 2.3411026000976562,
+ "learning_rate": 1.851037851037851e-05,
+ "loss": 0.0098,
+ "step": 1973
+ },
+ {
+ "epoch": 7.230769230769231,
+ "grad_norm": 30.942848205566406,
+ "learning_rate": 1.8485958485958486e-05,
+ "loss": 0.1813,
+ "step": 1974
+ },
+ {
+ "epoch": 7.2344322344322345,
+ "grad_norm": 12.736541748046875,
+ "learning_rate": 1.8461538461538465e-05,
+ "loss": 0.0397,
+ "step": 1975
+ },
+ {
+ "epoch": 7.238095238095238,
+ "grad_norm": 8.892921447753906,
+ "learning_rate": 1.8437118437118436e-05,
+ "loss": 0.0255,
+ "step": 1976
+ },
+ {
+ "epoch": 7.241758241758242,
+ "grad_norm": 36.48339080810547,
+ "learning_rate": 1.841269841269841e-05,
+ "loss": 0.3125,
+ "step": 1977
+ },
+ {
+ "epoch": 7.245421245421245,
+ "grad_norm": 48.35296630859375,
+ "learning_rate": 1.838827838827839e-05,
+ "loss": 0.4951,
+ "step": 1978
+ },
+ {
+ "epoch": 7.249084249084249,
+ "grad_norm": 31.021989822387695,
+ "learning_rate": 1.8363858363858365e-05,
+ "loss": 0.2124,
+ "step": 1979
+ },
+ {
+ "epoch": 7.252747252747253,
+ "grad_norm": 32.49650955200195,
+ "learning_rate": 1.833943833943834e-05,
+ "loss": 0.309,
+ "step": 1980
+ },
+ {
+ "epoch": 7.256410256410256,
+ "grad_norm": 43.47561264038086,
+ "learning_rate": 1.831501831501832e-05,
+ "loss": 0.3206,
+ "step": 1981
+ },
+ {
+ "epoch": 7.26007326007326,
+ "grad_norm": 14.67831802368164,
+ "learning_rate": 1.829059829059829e-05,
+ "loss": 0.0806,
+ "step": 1982
+ },
+ {
+ "epoch": 7.263736263736264,
+ "grad_norm": 23.66496467590332,
+ "learning_rate": 1.8266178266178265e-05,
+ "loss": 0.1769,
+ "step": 1983
+ },
+ {
+ "epoch": 7.267399267399267,
+ "grad_norm": 1.8125004768371582,
+ "learning_rate": 1.824175824175824e-05,
+ "loss": 0.0111,
+ "step": 1984
+ },
+ {
+ "epoch": 7.271062271062271,
+ "grad_norm": 1.3189254999160767,
+ "learning_rate": 1.821733821733822e-05,
+ "loss": 0.0056,
+ "step": 1985
+ },
+ {
+ "epoch": 7.274725274725275,
+ "grad_norm": 47.977203369140625,
+ "learning_rate": 1.8192918192918194e-05,
+ "loss": 0.3898,
+ "step": 1986
+ },
+ {
+ "epoch": 7.278388278388278,
+ "grad_norm": 39.66654968261719,
+ "learning_rate": 1.816849816849817e-05,
+ "loss": 0.4953,
+ "step": 1987
+ },
+ {
+ "epoch": 7.282051282051282,
+ "grad_norm": 24.90619659423828,
+ "learning_rate": 1.8144078144078147e-05,
+ "loss": 0.116,
+ "step": 1988
+ },
+ {
+ "epoch": 7.285714285714286,
+ "grad_norm": 4.373020648956299,
+ "learning_rate": 1.811965811965812e-05,
+ "loss": 0.0263,
+ "step": 1989
+ },
+ {
+ "epoch": 7.289377289377289,
+ "grad_norm": 24.788022994995117,
+ "learning_rate": 1.8095238095238094e-05,
+ "loss": 0.2322,
+ "step": 1990
+ },
+ {
+ "epoch": 7.293040293040293,
+ "grad_norm": 6.417362213134766,
+ "learning_rate": 1.8070818070818072e-05,
+ "loss": 0.0243,
+ "step": 1991
+ },
+ {
+ "epoch": 7.2967032967032965,
+ "grad_norm": 34.0954475402832,
+ "learning_rate": 1.8046398046398047e-05,
+ "loss": 0.6666,
+ "step": 1992
+ },
+ {
+ "epoch": 7.3003663003663,
+ "grad_norm": 5.597110748291016,
+ "learning_rate": 1.8021978021978023e-05,
+ "loss": 0.0389,
+ "step": 1993
+ },
+ {
+ "epoch": 7.304029304029304,
+ "grad_norm": 70.55953979492188,
+ "learning_rate": 1.7997557997558e-05,
+ "loss": 0.7335,
+ "step": 1994
+ },
+ {
+ "epoch": 7.3076923076923075,
+ "grad_norm": 17.913522720336914,
+ "learning_rate": 1.7973137973137973e-05,
+ "loss": 0.2307,
+ "step": 1995
+ },
+ {
+ "epoch": 7.311355311355311,
+ "grad_norm": 9.62990665435791,
+ "learning_rate": 1.7948717948717948e-05,
+ "loss": 0.0515,
+ "step": 1996
+ },
+ {
+ "epoch": 7.315018315018315,
+ "grad_norm": 1.333807110786438,
+ "learning_rate": 1.7924297924297923e-05,
+ "loss": 0.0088,
+ "step": 1997
+ },
+ {
+ "epoch": 7.318681318681318,
+ "grad_norm": 12.604703903198242,
+ "learning_rate": 1.78998778998779e-05,
+ "loss": 0.0802,
+ "step": 1998
+ },
+ {
+ "epoch": 7.322344322344322,
+ "grad_norm": 57.309974670410156,
+ "learning_rate": 1.7875457875457876e-05,
+ "loss": 0.738,
+ "step": 1999
+ },
+ {
+ "epoch": 7.326007326007326,
+ "grad_norm": 12.750027656555176,
+ "learning_rate": 1.785103785103785e-05,
+ "loss": 0.0785,
+ "step": 2000
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 2730,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 10,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 0.0,
+ "train_batch_size": 4,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-2000/training_args.bin b/checkpoint-2000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..efd73451f8808ee6551f09598ece18ffd5afe9a8
--- /dev/null
+++ b/checkpoint-2000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9433d412d81580f751a4a8cdb904f13acd11bf72c98d8dd9b40ffc47b121468f
+size 7249
diff --git a/checkpoint-2000/zero_to_fp32.py b/checkpoint-2000/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04
--- /dev/null
+++ b/checkpoint-2000/zero_to_fp32.py
@@ -0,0 +1,760 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example:
+# python zero_to_fp32.py . output_dir/
+# or
+# python zero_to_fp32.py . output_dir/ --safe_serialization
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+import gc
+import json
+import numpy as np
+from tqdm import tqdm
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device, weights_only=False)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+ total_files = len(files)
+ state_dicts = []
+ for f in tqdm(files, desc='Loading checkpoint shards'):
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+class GatheredTensor:
+ """
+ A pseudo tensor that collects partitioned weights.
+ It is more memory efficient when there are multiple groups.
+ """
+
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
+ self.flat_groups = flat_groups
+ self.flat_groups_offset = flat_groups_offset
+ self.offset = offset
+ self.partitioned_numel = partitioned_numel
+ self.shape = shape
+ self.dtype = self.flat_groups[0][0].dtype
+
+ def contiguous(self):
+ """
+ Merge partitioned weights from flat_groups into a single tensor.
+ """
+ end_idx = self.offset + self.partitioned_numel
+ world_size = len(self.flat_groups)
+ pad_flat_param_chunks = []
+
+ for rank_i in range(world_size):
+ # for each rank, we need to collect weights from related group/groups
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
+ start_group_id = None
+ end_group_id = None
+ for group_id in range(len(self.flat_groups_offset)):
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
+ start_group_id = group_id
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
+ end_group_id = group_id
+ break
+ # collect weights from related group/groups
+ for group_id in range(start_group_id, end_group_id + 1):
+ flat_tensor = flat_groups_at_rank_i[group_id]
+ start_offset = self.offset - self.flat_groups_offset[group_id]
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
+
+ # collect weights from all ranks
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
+ return param
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
+
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # memory efficient tensor
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
+ state_dict[name] = tensor
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def to_torch_tensor(state_dict, return_empty_tensor=False):
+ """
+ Convert state_dict of GatheredTensor to torch tensor
+ """
+ torch_state_dict = {}
+ converted_tensors = {}
+ for name, tensor in state_dict.items():
+ tensor_id = id(tensor)
+ if tensor_id in converted_tensors: # shared tensors
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
+ torch_state_dict[name] = shared_tensor
+ else:
+ converted_tensors[tensor_id] = name
+ if return_empty_tensor:
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
+ else:
+ torch_state_dict[name] = tensor.contiguous()
+ return torch_state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag=None,
+ exclude_frozen_parameters=False,
+ lazy_mode=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint. Or you can load state_dict in lazy mode ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
+ for name, lazy_tensor in state_dict.item():
+ tensor = lazy_tensor.contiguous() # to cpu
+ print(name, tensor)
+ # del tensor to release memory if it no longer in use
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+ if lazy_mode:
+ return state_dict
+ else:
+ return to_torch_tensor(state_dict)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
+ output_dir,
+ max_shard_size="5GB",
+ safe_serialization=False,
+ tag=None,
+ exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ # Dependency pre-check
+ if safe_serialization:
+ try:
+ from safetensors.torch import save_file
+ except ImportError:
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
+ raise
+ if max_shard_size is not None:
+ try:
+ from huggingface_hub import split_torch_state_dict_into_shards
+ except ImportError:
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
+ raise
+
+ # Convert zero checkpoint to state_dict
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag,
+ exclude_frozen_parameters,
+ lazy_mode=True)
+
+ # Shard the model if it is too big.
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
+ if max_shard_size is not None:
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
+ # an memory-efficient approach for sharding
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
+ filename_pattern=filename_pattern,
+ max_shard_size=max_shard_size)
+ else:
+ from collections import namedtuple
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
+ state_dict_split = StateDictSplit(is_sharded=False,
+ filename_to_tensors={weights_name: list(state_dict.keys())})
+
+ # Save the model by shard
+ os.makedirs(output_dir, exist_ok=True)
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
+ shard_state_dict = to_torch_tensor(shard_state_dict)
+ output_path = os.path.join(output_dir, shard_file)
+ if safe_serialization:
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
+ else:
+ torch.save(shard_state_dict, output_path)
+ # release the memory of current shard
+ for tensor_name in list(shard_state_dict.keys()):
+ del state_dict[tensor_name]
+ del shard_state_dict[tensor_name]
+ del shard_state_dict
+ gc.collect()
+
+ # Save index if sharded
+ if state_dict_split.is_sharded:
+ index = {
+ "metadata": state_dict_split.metadata,
+ "weight_map": state_dict_split.tensor_to_filename,
+ }
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
+ save_index_file = os.path.join(output_dir, save_index_file)
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument("output_dir",
+ type=str,
+ help="directory to the pytorch fp32 state_dict output files"
+ "(e.g. path/checkpoint-12-output/)")
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="5GB",
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
+ "without CPU OOM issues.")
+ parser.add_argument(
+ "--safe_serialization",
+ default=False,
+ action='store_true',
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_dir,
+ max_shard_size=args.max_shard_size,
+ safe_serialization=args.safe_serialization,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-2500/config.json b/checkpoint-2500/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..40aa0a10ec7958e160bf07f2feca405387c8b288
--- /dev/null
+++ b/checkpoint-2500/config.json
@@ -0,0 +1,33 @@
+{
+ "architectures": [
+ "XLMRobertaForSequenceClassification"
+ ],
+ "attention_probs_dropout_prob": 0.1,
+ "bos_token_id": 0,
+ "classifier_dropout": null,
+ "eos_token_id": 2,
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 1024,
+ "id2label": {
+ "0": "LABEL_0"
+ },
+ "initializer_range": 0.02,
+ "intermediate_size": 4096,
+ "label2id": {
+ "LABEL_0": 0
+ },
+ "layer_norm_eps": 1e-05,
+ "max_position_embeddings": 8194,
+ "model_type": "xlm-roberta",
+ "num_attention_heads": 16,
+ "num_hidden_layers": 24,
+ "output_past": true,
+ "pad_token_id": 1,
+ "position_embedding_type": "absolute",
+ "torch_dtype": "float32",
+ "transformers_version": "4.54.0",
+ "type_vocab_size": 1,
+ "use_cache": true,
+ "vocab_size": 250002
+}
diff --git a/checkpoint-2500/global_step2500/mp_rank_00_model_states.pt b/checkpoint-2500/global_step2500/mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..0723482267ee1dd92890ab4f06204b82a1b4605a
--- /dev/null
+++ b/checkpoint-2500/global_step2500/mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ec52b76ab545ffc311f23aa27dece03ebcdb1575bec95caa0ed1d62a9a02f91c
+size 2271151845
diff --git a/checkpoint-2500/global_step2500/zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-2500/global_step2500/zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5d8c8a31fa8d5d5919c7424b420393036a906b5c
--- /dev/null
+++ b/checkpoint-2500/global_step2500/zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:28f7a07d7f040fa12203732c7b49a68e9412dc5b9433a3ae208181efd7a90dde
+size 3406552447
diff --git a/checkpoint-2500/global_step2500/zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-2500/global_step2500/zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f9344e807c359588756e06308c76c51906432c0b
--- /dev/null
+++ b/checkpoint-2500/global_step2500/zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af54a42367468027ae2818b3b9c00d2c8bee02533807618c0d5c33acd4a35d26
+size 3406564543
diff --git a/checkpoint-2500/latest b/checkpoint-2500/latest
new file mode 100644
index 0000000000000000000000000000000000000000..98f8bed9a5485ee900d9931cc06950de69499848
--- /dev/null
+++ b/checkpoint-2500/latest
@@ -0,0 +1 @@
+global_step2500
\ No newline at end of file
diff --git a/checkpoint-2500/model.safetensors b/checkpoint-2500/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ad027a962f0929ffbdccc86bcc04c3f6b060dec8
--- /dev/null
+++ b/checkpoint-2500/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1efd70bc5533a80eee857862f6465b068ad70ce298e7901b2d1a37ee2efefc40
+size 2271071852
diff --git a/checkpoint-2500/rng_state_0.pth b/checkpoint-2500/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fe8992af71a6a2000de8093c644d362289e657b8
--- /dev/null
+++ b/checkpoint-2500/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:80a3afad0752beb90a0678bf4a8869bc8c6e5e91879ddc01eba5196e057b58f9
+size 14853
diff --git a/checkpoint-2500/rng_state_1.pth b/checkpoint-2500/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4cc7e29fb162064c9b2111cbe8549990d697d353
--- /dev/null
+++ b/checkpoint-2500/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c977351142f191ef5feea242b6911b3a8599e9d71d59e4ed77d50a2b2e1256c2
+size 14853
diff --git a/checkpoint-2500/scheduler.pt b/checkpoint-2500/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a81f6b8b81716e6d3833f37c7e4f515707f0f9fd
--- /dev/null
+++ b/checkpoint-2500/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fc83afe3d48a0db4dcec49938dccee4a76d9462bf14950916f4a274c13ad737e
+size 1465
diff --git a/checkpoint-2500/sentencepiece.bpe.model b/checkpoint-2500/sentencepiece.bpe.model
new file mode 100644
index 0000000000000000000000000000000000000000..7a3f40a75f870bc1f21700cd414dc2acc431583c
--- /dev/null
+++ b/checkpoint-2500/sentencepiece.bpe.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
+size 5069051
diff --git a/checkpoint-2500/special_tokens_map.json b/checkpoint-2500/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1879d702821e753ffe4245048eee415d54a9385
--- /dev/null
+++ b/checkpoint-2500/special_tokens_map.json
@@ -0,0 +1,51 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-2500/tokenizer.json b/checkpoint-2500/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..322d084f75a19f4fec0fc0b5f351be9a3dfefa3e
--- /dev/null
+++ b/checkpoint-2500/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50ec628ce274af8429e5aa0c573e737ef2db1c2acd3b2dd51362a33c3a534f99
+size 17082999
diff --git a/checkpoint-2500/tokenizer_config.json b/checkpoint-2500/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..95bd7c849ee6a47d5c92805af18d187239c1ba4a
--- /dev/null
+++ b/checkpoint-2500/tokenizer_config.json
@@ -0,0 +1,56 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "250001": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": true,
+ "cls_token": "",
+ "eos_token": "",
+ "extra_special_tokens": {},
+ "mask_token": "",
+ "model_max_length": 8192,
+ "pad_token": "",
+ "sep_token": "",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "XLMRobertaTokenizer",
+ "unk_token": ""
+}
diff --git a/checkpoint-2500/trainer_state.json b/checkpoint-2500/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..1474c9ca18a5d080a8629fcaed73f60289159f49
--- /dev/null
+++ b/checkpoint-2500/trainer_state.json
@@ -0,0 +1,17534 @@
+{
+ "best_global_step": null,
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 9.157509157509157,
+ "eval_steps": 500,
+ "global_step": 2500,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.003663003663003663,
+ "grad_norm": 33.24192428588867,
+ "learning_rate": 0.0,
+ "loss": 0.9555,
+ "step": 1
+ },
+ {
+ "epoch": 0.007326007326007326,
+ "grad_norm": 23.005327224731445,
+ "learning_rate": 2.1978021978021978e-07,
+ "loss": 0.7557,
+ "step": 2
+ },
+ {
+ "epoch": 0.01098901098901099,
+ "grad_norm": 12.516372680664062,
+ "learning_rate": 4.3956043956043957e-07,
+ "loss": 0.2322,
+ "step": 3
+ },
+ {
+ "epoch": 0.014652014652014652,
+ "grad_norm": 22.350322723388672,
+ "learning_rate": 6.593406593406594e-07,
+ "loss": 0.5263,
+ "step": 4
+ },
+ {
+ "epoch": 0.018315018315018316,
+ "grad_norm": 37.14425277709961,
+ "learning_rate": 8.791208791208791e-07,
+ "loss": 0.547,
+ "step": 5
+ },
+ {
+ "epoch": 0.02197802197802198,
+ "grad_norm": 27.73367691040039,
+ "learning_rate": 1.098901098901099e-06,
+ "loss": 0.5922,
+ "step": 6
+ },
+ {
+ "epoch": 0.02564102564102564,
+ "grad_norm": 28.463964462280273,
+ "learning_rate": 1.3186813186813187e-06,
+ "loss": 1.0195,
+ "step": 7
+ },
+ {
+ "epoch": 0.029304029304029304,
+ "grad_norm": 12.688858032226562,
+ "learning_rate": 1.5384615384615385e-06,
+ "loss": 0.1519,
+ "step": 8
+ },
+ {
+ "epoch": 0.03296703296703297,
+ "grad_norm": 24.222930908203125,
+ "learning_rate": 1.7582417582417583e-06,
+ "loss": 0.8008,
+ "step": 9
+ },
+ {
+ "epoch": 0.03663003663003663,
+ "grad_norm": 22.45709800720215,
+ "learning_rate": 1.9780219780219782e-06,
+ "loss": 1.1024,
+ "step": 10
+ },
+ {
+ "epoch": 0.040293040293040296,
+ "grad_norm": 23.01483917236328,
+ "learning_rate": 2.197802197802198e-06,
+ "loss": 0.3072,
+ "step": 11
+ },
+ {
+ "epoch": 0.04395604395604396,
+ "grad_norm": 24.276216506958008,
+ "learning_rate": 2.4175824175824177e-06,
+ "loss": 0.8937,
+ "step": 12
+ },
+ {
+ "epoch": 0.047619047619047616,
+ "grad_norm": 24.501638412475586,
+ "learning_rate": 2.6373626373626375e-06,
+ "loss": 0.3748,
+ "step": 13
+ },
+ {
+ "epoch": 0.05128205128205128,
+ "grad_norm": 11.965837478637695,
+ "learning_rate": 2.8571428571428573e-06,
+ "loss": 0.2221,
+ "step": 14
+ },
+ {
+ "epoch": 0.054945054945054944,
+ "grad_norm": 8.884313583374023,
+ "learning_rate": 3.076923076923077e-06,
+ "loss": 0.1682,
+ "step": 15
+ },
+ {
+ "epoch": 0.05860805860805861,
+ "grad_norm": 13.486218452453613,
+ "learning_rate": 3.2967032967032968e-06,
+ "loss": 0.3324,
+ "step": 16
+ },
+ {
+ "epoch": 0.06227106227106227,
+ "grad_norm": 29.47451400756836,
+ "learning_rate": 3.5164835164835165e-06,
+ "loss": 0.9247,
+ "step": 17
+ },
+ {
+ "epoch": 0.06593406593406594,
+ "grad_norm": 38.8739128112793,
+ "learning_rate": 3.7362637362637363e-06,
+ "loss": 1.3591,
+ "step": 18
+ },
+ {
+ "epoch": 0.0695970695970696,
+ "grad_norm": 24.181066513061523,
+ "learning_rate": 3.9560439560439565e-06,
+ "loss": 0.4257,
+ "step": 19
+ },
+ {
+ "epoch": 0.07326007326007326,
+ "grad_norm": 18.25806427001953,
+ "learning_rate": 4.175824175824176e-06,
+ "loss": 0.3534,
+ "step": 20
+ },
+ {
+ "epoch": 0.07692307692307693,
+ "grad_norm": 4.121458053588867,
+ "learning_rate": 4.395604395604396e-06,
+ "loss": 0.0459,
+ "step": 21
+ },
+ {
+ "epoch": 0.08058608058608059,
+ "grad_norm": 17.89643096923828,
+ "learning_rate": 4.615384615384616e-06,
+ "loss": 0.3707,
+ "step": 22
+ },
+ {
+ "epoch": 0.08424908424908426,
+ "grad_norm": 43.25539016723633,
+ "learning_rate": 4.8351648351648355e-06,
+ "loss": 1.139,
+ "step": 23
+ },
+ {
+ "epoch": 0.08791208791208792,
+ "grad_norm": 19.56612205505371,
+ "learning_rate": 5.054945054945056e-06,
+ "loss": 0.3819,
+ "step": 24
+ },
+ {
+ "epoch": 0.09157509157509157,
+ "grad_norm": 18.20578956604004,
+ "learning_rate": 5.274725274725275e-06,
+ "loss": 0.516,
+ "step": 25
+ },
+ {
+ "epoch": 0.09523809523809523,
+ "grad_norm": 23.16927146911621,
+ "learning_rate": 5.494505494505494e-06,
+ "loss": 0.7161,
+ "step": 26
+ },
+ {
+ "epoch": 0.0989010989010989,
+ "grad_norm": 10.449734687805176,
+ "learning_rate": 5.7142857142857145e-06,
+ "loss": 0.3049,
+ "step": 27
+ },
+ {
+ "epoch": 0.10256410256410256,
+ "grad_norm": 33.13974380493164,
+ "learning_rate": 5.934065934065934e-06,
+ "loss": 1.0178,
+ "step": 28
+ },
+ {
+ "epoch": 0.10622710622710622,
+ "grad_norm": 34.373470306396484,
+ "learning_rate": 6.153846153846154e-06,
+ "loss": 1.0162,
+ "step": 29
+ },
+ {
+ "epoch": 0.10989010989010989,
+ "grad_norm": 22.710988998413086,
+ "learning_rate": 6.373626373626373e-06,
+ "loss": 0.5866,
+ "step": 30
+ },
+ {
+ "epoch": 0.11355311355311355,
+ "grad_norm": 23.314502716064453,
+ "learning_rate": 6.5934065934065935e-06,
+ "loss": 0.6159,
+ "step": 31
+ },
+ {
+ "epoch": 0.11721611721611722,
+ "grad_norm": 23.481319427490234,
+ "learning_rate": 6.813186813186814e-06,
+ "loss": 0.5441,
+ "step": 32
+ },
+ {
+ "epoch": 0.12087912087912088,
+ "grad_norm": 35.16271209716797,
+ "learning_rate": 7.032967032967033e-06,
+ "loss": 0.9091,
+ "step": 33
+ },
+ {
+ "epoch": 0.12454212454212454,
+ "grad_norm": 32.2298698425293,
+ "learning_rate": 7.252747252747253e-06,
+ "loss": 0.5156,
+ "step": 34
+ },
+ {
+ "epoch": 0.1282051282051282,
+ "grad_norm": 36.708953857421875,
+ "learning_rate": 7.4725274725274726e-06,
+ "loss": 1.5839,
+ "step": 35
+ },
+ {
+ "epoch": 0.13186813186813187,
+ "grad_norm": 34.64887619018555,
+ "learning_rate": 7.692307692307692e-06,
+ "loss": 1.2861,
+ "step": 36
+ },
+ {
+ "epoch": 0.13553113553113552,
+ "grad_norm": 20.94220733642578,
+ "learning_rate": 7.912087912087913e-06,
+ "loss": 0.5027,
+ "step": 37
+ },
+ {
+ "epoch": 0.1391941391941392,
+ "grad_norm": 30.93832015991211,
+ "learning_rate": 8.131868131868132e-06,
+ "loss": 0.3584,
+ "step": 38
+ },
+ {
+ "epoch": 0.14285714285714285,
+ "grad_norm": 19.195362091064453,
+ "learning_rate": 8.351648351648352e-06,
+ "loss": 0.6912,
+ "step": 39
+ },
+ {
+ "epoch": 0.14652014652014653,
+ "grad_norm": 21.054162979125977,
+ "learning_rate": 8.571428571428571e-06,
+ "loss": 0.8027,
+ "step": 40
+ },
+ {
+ "epoch": 0.15018315018315018,
+ "grad_norm": 16.64535903930664,
+ "learning_rate": 8.791208791208792e-06,
+ "loss": 0.3004,
+ "step": 41
+ },
+ {
+ "epoch": 0.15384615384615385,
+ "grad_norm": 12.1064453125,
+ "learning_rate": 9.010989010989011e-06,
+ "loss": 0.2158,
+ "step": 42
+ },
+ {
+ "epoch": 0.1575091575091575,
+ "grad_norm": 16.20220947265625,
+ "learning_rate": 9.230769230769232e-06,
+ "loss": 0.4137,
+ "step": 43
+ },
+ {
+ "epoch": 0.16117216117216118,
+ "grad_norm": 25.698654174804688,
+ "learning_rate": 9.45054945054945e-06,
+ "loss": 0.7716,
+ "step": 44
+ },
+ {
+ "epoch": 0.16483516483516483,
+ "grad_norm": 7.480422019958496,
+ "learning_rate": 9.670329670329671e-06,
+ "loss": 0.1046,
+ "step": 45
+ },
+ {
+ "epoch": 0.1684981684981685,
+ "grad_norm": 38.25539016723633,
+ "learning_rate": 9.89010989010989e-06,
+ "loss": 1.3913,
+ "step": 46
+ },
+ {
+ "epoch": 0.17216117216117216,
+ "grad_norm": 24.113954544067383,
+ "learning_rate": 1.0109890109890111e-05,
+ "loss": 0.4632,
+ "step": 47
+ },
+ {
+ "epoch": 0.17582417582417584,
+ "grad_norm": 22.136140823364258,
+ "learning_rate": 1.032967032967033e-05,
+ "loss": 0.6634,
+ "step": 48
+ },
+ {
+ "epoch": 0.1794871794871795,
+ "grad_norm": 19.417444229125977,
+ "learning_rate": 1.054945054945055e-05,
+ "loss": 0.3991,
+ "step": 49
+ },
+ {
+ "epoch": 0.18315018315018314,
+ "grad_norm": 13.265430450439453,
+ "learning_rate": 1.076923076923077e-05,
+ "loss": 0.2613,
+ "step": 50
+ },
+ {
+ "epoch": 0.18681318681318682,
+ "grad_norm": 25.118703842163086,
+ "learning_rate": 1.0989010989010989e-05,
+ "loss": 0.9231,
+ "step": 51
+ },
+ {
+ "epoch": 0.19047619047619047,
+ "grad_norm": 34.06997299194336,
+ "learning_rate": 1.120879120879121e-05,
+ "loss": 1.5809,
+ "step": 52
+ },
+ {
+ "epoch": 0.19413919413919414,
+ "grad_norm": 40.32486343383789,
+ "learning_rate": 1.1428571428571429e-05,
+ "loss": 1.4601,
+ "step": 53
+ },
+ {
+ "epoch": 0.1978021978021978,
+ "grad_norm": 18.847017288208008,
+ "learning_rate": 1.1648351648351648e-05,
+ "loss": 0.2345,
+ "step": 54
+ },
+ {
+ "epoch": 0.20146520146520147,
+ "grad_norm": 37.98270034790039,
+ "learning_rate": 1.1868131868131868e-05,
+ "loss": 0.9792,
+ "step": 55
+ },
+ {
+ "epoch": 0.20512820512820512,
+ "grad_norm": 35.72782897949219,
+ "learning_rate": 1.2087912087912089e-05,
+ "loss": 1.1561,
+ "step": 56
+ },
+ {
+ "epoch": 0.2087912087912088,
+ "grad_norm": 18.577186584472656,
+ "learning_rate": 1.2307692307692308e-05,
+ "loss": 0.5577,
+ "step": 57
+ },
+ {
+ "epoch": 0.21245421245421245,
+ "grad_norm": 23.086456298828125,
+ "learning_rate": 1.2527472527472529e-05,
+ "loss": 0.5807,
+ "step": 58
+ },
+ {
+ "epoch": 0.21611721611721613,
+ "grad_norm": 20.053525924682617,
+ "learning_rate": 1.2747252747252747e-05,
+ "loss": 0.7024,
+ "step": 59
+ },
+ {
+ "epoch": 0.21978021978021978,
+ "grad_norm": 22.25934410095215,
+ "learning_rate": 1.2967032967032968e-05,
+ "loss": 1.1033,
+ "step": 60
+ },
+ {
+ "epoch": 0.22344322344322345,
+ "grad_norm": 17.981454849243164,
+ "learning_rate": 1.3186813186813187e-05,
+ "loss": 0.2774,
+ "step": 61
+ },
+ {
+ "epoch": 0.2271062271062271,
+ "grad_norm": 11.286524772644043,
+ "learning_rate": 1.3406593406593408e-05,
+ "loss": 0.1802,
+ "step": 62
+ },
+ {
+ "epoch": 0.23076923076923078,
+ "grad_norm": 25.822996139526367,
+ "learning_rate": 1.3626373626373627e-05,
+ "loss": 0.651,
+ "step": 63
+ },
+ {
+ "epoch": 0.23443223443223443,
+ "grad_norm": 16.457286834716797,
+ "learning_rate": 1.3846153846153847e-05,
+ "loss": 0.2946,
+ "step": 64
+ },
+ {
+ "epoch": 0.23809523809523808,
+ "grad_norm": 26.712799072265625,
+ "learning_rate": 1.4065934065934066e-05,
+ "loss": 0.7763,
+ "step": 65
+ },
+ {
+ "epoch": 0.24175824175824176,
+ "grad_norm": 21.4671630859375,
+ "learning_rate": 1.4285714285714285e-05,
+ "loss": 0.4132,
+ "step": 66
+ },
+ {
+ "epoch": 0.2454212454212454,
+ "grad_norm": 21.834922790527344,
+ "learning_rate": 1.4505494505494506e-05,
+ "loss": 0.6544,
+ "step": 67
+ },
+ {
+ "epoch": 0.2490842490842491,
+ "grad_norm": 15.396453857421875,
+ "learning_rate": 1.4725274725274726e-05,
+ "loss": 0.2426,
+ "step": 68
+ },
+ {
+ "epoch": 0.25274725274725274,
+ "grad_norm": 8.851480484008789,
+ "learning_rate": 1.4945054945054945e-05,
+ "loss": 0.125,
+ "step": 69
+ },
+ {
+ "epoch": 0.2564102564102564,
+ "grad_norm": 22.21581268310547,
+ "learning_rate": 1.5164835164835164e-05,
+ "loss": 0.2585,
+ "step": 70
+ },
+ {
+ "epoch": 0.2600732600732601,
+ "grad_norm": 23.589736938476562,
+ "learning_rate": 1.5384615384615384e-05,
+ "loss": 0.386,
+ "step": 71
+ },
+ {
+ "epoch": 0.26373626373626374,
+ "grad_norm": 51.82280731201172,
+ "learning_rate": 1.5604395604395605e-05,
+ "loss": 1.1802,
+ "step": 72
+ },
+ {
+ "epoch": 0.2673992673992674,
+ "grad_norm": 36.43033981323242,
+ "learning_rate": 1.5824175824175826e-05,
+ "loss": 0.5574,
+ "step": 73
+ },
+ {
+ "epoch": 0.27106227106227104,
+ "grad_norm": 46.151885986328125,
+ "learning_rate": 1.6043956043956043e-05,
+ "loss": 0.9113,
+ "step": 74
+ },
+ {
+ "epoch": 0.27472527472527475,
+ "grad_norm": 34.090213775634766,
+ "learning_rate": 1.6263736263736265e-05,
+ "loss": 1.2161,
+ "step": 75
+ },
+ {
+ "epoch": 0.2783882783882784,
+ "grad_norm": 15.469125747680664,
+ "learning_rate": 1.6483516483516486e-05,
+ "loss": 0.1833,
+ "step": 76
+ },
+ {
+ "epoch": 0.28205128205128205,
+ "grad_norm": 26.77261734008789,
+ "learning_rate": 1.6703296703296703e-05,
+ "loss": 0.4095,
+ "step": 77
+ },
+ {
+ "epoch": 0.2857142857142857,
+ "grad_norm": 8.46114444732666,
+ "learning_rate": 1.6923076923076924e-05,
+ "loss": 0.0724,
+ "step": 78
+ },
+ {
+ "epoch": 0.2893772893772894,
+ "grad_norm": 7.954617500305176,
+ "learning_rate": 1.7142857142857142e-05,
+ "loss": 0.057,
+ "step": 79
+ },
+ {
+ "epoch": 0.29304029304029305,
+ "grad_norm": 32.47618103027344,
+ "learning_rate": 1.7362637362637366e-05,
+ "loss": 0.8099,
+ "step": 80
+ },
+ {
+ "epoch": 0.2967032967032967,
+ "grad_norm": 34.506927490234375,
+ "learning_rate": 1.7582417582417584e-05,
+ "loss": 0.5867,
+ "step": 81
+ },
+ {
+ "epoch": 0.30036630036630035,
+ "grad_norm": 18.276355743408203,
+ "learning_rate": 1.78021978021978e-05,
+ "loss": 0.4387,
+ "step": 82
+ },
+ {
+ "epoch": 0.304029304029304,
+ "grad_norm": 35.61729431152344,
+ "learning_rate": 1.8021978021978023e-05,
+ "loss": 0.9711,
+ "step": 83
+ },
+ {
+ "epoch": 0.3076923076923077,
+ "grad_norm": 14.001388549804688,
+ "learning_rate": 1.824175824175824e-05,
+ "loss": 0.1431,
+ "step": 84
+ },
+ {
+ "epoch": 0.31135531135531136,
+ "grad_norm": 27.521188735961914,
+ "learning_rate": 1.8461538461538465e-05,
+ "loss": 0.3686,
+ "step": 85
+ },
+ {
+ "epoch": 0.315018315018315,
+ "grad_norm": 38.0133171081543,
+ "learning_rate": 1.8681318681318682e-05,
+ "loss": 1.3866,
+ "step": 86
+ },
+ {
+ "epoch": 0.31868131868131866,
+ "grad_norm": 30.895553588867188,
+ "learning_rate": 1.89010989010989e-05,
+ "loss": 0.6676,
+ "step": 87
+ },
+ {
+ "epoch": 0.32234432234432236,
+ "grad_norm": 26.165082931518555,
+ "learning_rate": 1.912087912087912e-05,
+ "loss": 0.4763,
+ "step": 88
+ },
+ {
+ "epoch": 0.326007326007326,
+ "grad_norm": 25.6451473236084,
+ "learning_rate": 1.9340659340659342e-05,
+ "loss": 0.6921,
+ "step": 89
+ },
+ {
+ "epoch": 0.32967032967032966,
+ "grad_norm": 31.52683448791504,
+ "learning_rate": 1.9560439560439563e-05,
+ "loss": 0.8449,
+ "step": 90
+ },
+ {
+ "epoch": 0.3333333333333333,
+ "grad_norm": 27.559072494506836,
+ "learning_rate": 1.978021978021978e-05,
+ "loss": 0.9726,
+ "step": 91
+ },
+ {
+ "epoch": 0.336996336996337,
+ "grad_norm": 38.23103713989258,
+ "learning_rate": 1.9999999999999998e-05,
+ "loss": 0.2568,
+ "step": 92
+ },
+ {
+ "epoch": 0.34065934065934067,
+ "grad_norm": 28.575313568115234,
+ "learning_rate": 2.0219780219780223e-05,
+ "loss": 0.7039,
+ "step": 93
+ },
+ {
+ "epoch": 0.3443223443223443,
+ "grad_norm": 31.54847526550293,
+ "learning_rate": 2.043956043956044e-05,
+ "loss": 0.835,
+ "step": 94
+ },
+ {
+ "epoch": 0.34798534798534797,
+ "grad_norm": 34.27505111694336,
+ "learning_rate": 2.065934065934066e-05,
+ "loss": 1.0304,
+ "step": 95
+ },
+ {
+ "epoch": 0.3516483516483517,
+ "grad_norm": 23.972553253173828,
+ "learning_rate": 2.087912087912088e-05,
+ "loss": 0.775,
+ "step": 96
+ },
+ {
+ "epoch": 0.3553113553113553,
+ "grad_norm": 18.46526527404785,
+ "learning_rate": 2.10989010989011e-05,
+ "loss": 0.2856,
+ "step": 97
+ },
+ {
+ "epoch": 0.358974358974359,
+ "grad_norm": 22.087251663208008,
+ "learning_rate": 2.131868131868132e-05,
+ "loss": 0.6849,
+ "step": 98
+ },
+ {
+ "epoch": 0.3626373626373626,
+ "grad_norm": 13.144533157348633,
+ "learning_rate": 2.153846153846154e-05,
+ "loss": 0.2766,
+ "step": 99
+ },
+ {
+ "epoch": 0.3663003663003663,
+ "grad_norm": 14.740280151367188,
+ "learning_rate": 2.175824175824176e-05,
+ "loss": 0.27,
+ "step": 100
+ },
+ {
+ "epoch": 0.36996336996337,
+ "grad_norm": 17.15272331237793,
+ "learning_rate": 2.1978021978021977e-05,
+ "loss": 0.446,
+ "step": 101
+ },
+ {
+ "epoch": 0.37362637362637363,
+ "grad_norm": 45.865509033203125,
+ "learning_rate": 2.21978021978022e-05,
+ "loss": 2.4265,
+ "step": 102
+ },
+ {
+ "epoch": 0.3772893772893773,
+ "grad_norm": 22.298274993896484,
+ "learning_rate": 2.241758241758242e-05,
+ "loss": 1.5021,
+ "step": 103
+ },
+ {
+ "epoch": 0.38095238095238093,
+ "grad_norm": 20.314172744750977,
+ "learning_rate": 2.2637362637362637e-05,
+ "loss": 0.508,
+ "step": 104
+ },
+ {
+ "epoch": 0.38461538461538464,
+ "grad_norm": 11.217910766601562,
+ "learning_rate": 2.2857142857142858e-05,
+ "loss": 0.2282,
+ "step": 105
+ },
+ {
+ "epoch": 0.3882783882783883,
+ "grad_norm": 21.36184310913086,
+ "learning_rate": 2.307692307692308e-05,
+ "loss": 0.4684,
+ "step": 106
+ },
+ {
+ "epoch": 0.39194139194139194,
+ "grad_norm": 12.759861946105957,
+ "learning_rate": 2.3296703296703297e-05,
+ "loss": 0.3076,
+ "step": 107
+ },
+ {
+ "epoch": 0.3956043956043956,
+ "grad_norm": 24.42287254333496,
+ "learning_rate": 2.3516483516483518e-05,
+ "loss": 1.3607,
+ "step": 108
+ },
+ {
+ "epoch": 0.3992673992673993,
+ "grad_norm": 13.014902114868164,
+ "learning_rate": 2.3736263736263735e-05,
+ "loss": 0.4984,
+ "step": 109
+ },
+ {
+ "epoch": 0.40293040293040294,
+ "grad_norm": 12.8681640625,
+ "learning_rate": 2.395604395604396e-05,
+ "loss": 0.4529,
+ "step": 110
+ },
+ {
+ "epoch": 0.4065934065934066,
+ "grad_norm": 21.19939422607422,
+ "learning_rate": 2.4175824175824177e-05,
+ "loss": 1.0197,
+ "step": 111
+ },
+ {
+ "epoch": 0.41025641025641024,
+ "grad_norm": 20.60430145263672,
+ "learning_rate": 2.4395604395604395e-05,
+ "loss": 0.5367,
+ "step": 112
+ },
+ {
+ "epoch": 0.4139194139194139,
+ "grad_norm": 34.49782943725586,
+ "learning_rate": 2.4615384615384616e-05,
+ "loss": 1.9045,
+ "step": 113
+ },
+ {
+ "epoch": 0.4175824175824176,
+ "grad_norm": 28.380966186523438,
+ "learning_rate": 2.4835164835164834e-05,
+ "loss": 0.9019,
+ "step": 114
+ },
+ {
+ "epoch": 0.42124542124542125,
+ "grad_norm": 18.234045028686523,
+ "learning_rate": 2.5054945054945058e-05,
+ "loss": 0.5529,
+ "step": 115
+ },
+ {
+ "epoch": 0.4249084249084249,
+ "grad_norm": 18.759784698486328,
+ "learning_rate": 2.5274725274725276e-05,
+ "loss": 0.85,
+ "step": 116
+ },
+ {
+ "epoch": 0.42857142857142855,
+ "grad_norm": 15.784387588500977,
+ "learning_rate": 2.5494505494505493e-05,
+ "loss": 0.429,
+ "step": 117
+ },
+ {
+ "epoch": 0.43223443223443225,
+ "grad_norm": 23.149036407470703,
+ "learning_rate": 2.5714285714285714e-05,
+ "loss": 0.8784,
+ "step": 118
+ },
+ {
+ "epoch": 0.4358974358974359,
+ "grad_norm": 18.77080535888672,
+ "learning_rate": 2.5934065934065935e-05,
+ "loss": 0.537,
+ "step": 119
+ },
+ {
+ "epoch": 0.43956043956043955,
+ "grad_norm": 24.311708450317383,
+ "learning_rate": 2.6153846153846157e-05,
+ "loss": 0.74,
+ "step": 120
+ },
+ {
+ "epoch": 0.4432234432234432,
+ "grad_norm": 15.09874439239502,
+ "learning_rate": 2.6373626373626374e-05,
+ "loss": 0.2978,
+ "step": 121
+ },
+ {
+ "epoch": 0.4468864468864469,
+ "grad_norm": 19.65829086303711,
+ "learning_rate": 2.6593406593406592e-05,
+ "loss": 0.8287,
+ "step": 122
+ },
+ {
+ "epoch": 0.45054945054945056,
+ "grad_norm": 21.237165451049805,
+ "learning_rate": 2.6813186813186816e-05,
+ "loss": 1.1967,
+ "step": 123
+ },
+ {
+ "epoch": 0.4542124542124542,
+ "grad_norm": 25.737913131713867,
+ "learning_rate": 2.7032967032967034e-05,
+ "loss": 0.9414,
+ "step": 124
+ },
+ {
+ "epoch": 0.45787545787545786,
+ "grad_norm": 22.84954833984375,
+ "learning_rate": 2.7252747252747255e-05,
+ "loss": 0.398,
+ "step": 125
+ },
+ {
+ "epoch": 0.46153846153846156,
+ "grad_norm": 35.505027770996094,
+ "learning_rate": 2.7472527472527473e-05,
+ "loss": 1.0497,
+ "step": 126
+ },
+ {
+ "epoch": 0.4652014652014652,
+ "grad_norm": 6.610748291015625,
+ "learning_rate": 2.7692307692307694e-05,
+ "loss": 0.0491,
+ "step": 127
+ },
+ {
+ "epoch": 0.46886446886446886,
+ "grad_norm": 33.34388732910156,
+ "learning_rate": 2.7912087912087915e-05,
+ "loss": 0.8991,
+ "step": 128
+ },
+ {
+ "epoch": 0.4725274725274725,
+ "grad_norm": 17.098581314086914,
+ "learning_rate": 2.8131868131868132e-05,
+ "loss": 0.3217,
+ "step": 129
+ },
+ {
+ "epoch": 0.47619047619047616,
+ "grad_norm": 11.438309669494629,
+ "learning_rate": 2.8351648351648353e-05,
+ "loss": 0.4301,
+ "step": 130
+ },
+ {
+ "epoch": 0.47985347985347987,
+ "grad_norm": 25.803213119506836,
+ "learning_rate": 2.857142857142857e-05,
+ "loss": 0.8937,
+ "step": 131
+ },
+ {
+ "epoch": 0.4835164835164835,
+ "grad_norm": 16.61037826538086,
+ "learning_rate": 2.8791208791208792e-05,
+ "loss": 0.3603,
+ "step": 132
+ },
+ {
+ "epoch": 0.48717948717948717,
+ "grad_norm": 21.329975128173828,
+ "learning_rate": 2.9010989010989013e-05,
+ "loss": 0.4332,
+ "step": 133
+ },
+ {
+ "epoch": 0.4908424908424908,
+ "grad_norm": 24.83706283569336,
+ "learning_rate": 2.923076923076923e-05,
+ "loss": 0.3967,
+ "step": 134
+ },
+ {
+ "epoch": 0.4945054945054945,
+ "grad_norm": 8.3758544921875,
+ "learning_rate": 2.945054945054945e-05,
+ "loss": 0.1197,
+ "step": 135
+ },
+ {
+ "epoch": 0.4981684981684982,
+ "grad_norm": 31.096702575683594,
+ "learning_rate": 2.9670329670329673e-05,
+ "loss": 2.2867,
+ "step": 136
+ },
+ {
+ "epoch": 0.5018315018315018,
+ "grad_norm": 17.094390869140625,
+ "learning_rate": 2.989010989010989e-05,
+ "loss": 0.3064,
+ "step": 137
+ },
+ {
+ "epoch": 0.5054945054945055,
+ "grad_norm": 23.401243209838867,
+ "learning_rate": 3.010989010989011e-05,
+ "loss": 0.9779,
+ "step": 138
+ },
+ {
+ "epoch": 0.5091575091575091,
+ "grad_norm": 19.55811309814453,
+ "learning_rate": 3.032967032967033e-05,
+ "loss": 0.5665,
+ "step": 139
+ },
+ {
+ "epoch": 0.5128205128205128,
+ "grad_norm": 18.668622970581055,
+ "learning_rate": 3.0549450549450547e-05,
+ "loss": 0.7068,
+ "step": 140
+ },
+ {
+ "epoch": 0.5164835164835165,
+ "grad_norm": 9.49342155456543,
+ "learning_rate": 3.076923076923077e-05,
+ "loss": 0.2228,
+ "step": 141
+ },
+ {
+ "epoch": 0.5201465201465202,
+ "grad_norm": 17.131006240844727,
+ "learning_rate": 3.0989010989010995e-05,
+ "loss": 0.8947,
+ "step": 142
+ },
+ {
+ "epoch": 0.5238095238095238,
+ "grad_norm": 14.087484359741211,
+ "learning_rate": 3.120879120879121e-05,
+ "loss": 0.4394,
+ "step": 143
+ },
+ {
+ "epoch": 0.5274725274725275,
+ "grad_norm": 14.246976852416992,
+ "learning_rate": 3.142857142857143e-05,
+ "loss": 0.7608,
+ "step": 144
+ },
+ {
+ "epoch": 0.5311355311355311,
+ "grad_norm": 27.454071044921875,
+ "learning_rate": 3.164835164835165e-05,
+ "loss": 1.8982,
+ "step": 145
+ },
+ {
+ "epoch": 0.5347985347985348,
+ "grad_norm": 8.580923080444336,
+ "learning_rate": 3.1868131868131866e-05,
+ "loss": 0.2199,
+ "step": 146
+ },
+ {
+ "epoch": 0.5384615384615384,
+ "grad_norm": 12.200552940368652,
+ "learning_rate": 3.208791208791209e-05,
+ "loss": 0.4007,
+ "step": 147
+ },
+ {
+ "epoch": 0.5421245421245421,
+ "grad_norm": 11.350752830505371,
+ "learning_rate": 3.230769230769231e-05,
+ "loss": 0.5359,
+ "step": 148
+ },
+ {
+ "epoch": 0.5457875457875457,
+ "grad_norm": 21.45020866394043,
+ "learning_rate": 3.252747252747253e-05,
+ "loss": 1.4639,
+ "step": 149
+ },
+ {
+ "epoch": 0.5494505494505495,
+ "grad_norm": 29.84933090209961,
+ "learning_rate": 3.274725274725274e-05,
+ "loss": 0.8764,
+ "step": 150
+ },
+ {
+ "epoch": 0.5531135531135531,
+ "grad_norm": 14.899048805236816,
+ "learning_rate": 3.296703296703297e-05,
+ "loss": 0.3817,
+ "step": 151
+ },
+ {
+ "epoch": 0.5567765567765568,
+ "grad_norm": 14.95295238494873,
+ "learning_rate": 3.318681318681319e-05,
+ "loss": 1.0153,
+ "step": 152
+ },
+ {
+ "epoch": 0.5604395604395604,
+ "grad_norm": 13.904314994812012,
+ "learning_rate": 3.3406593406593406e-05,
+ "loss": 0.9891,
+ "step": 153
+ },
+ {
+ "epoch": 0.5641025641025641,
+ "grad_norm": 14.465546607971191,
+ "learning_rate": 3.362637362637363e-05,
+ "loss": 0.4935,
+ "step": 154
+ },
+ {
+ "epoch": 0.5677655677655677,
+ "grad_norm": 15.22211742401123,
+ "learning_rate": 3.384615384615385e-05,
+ "loss": 0.4973,
+ "step": 155
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 19.977941513061523,
+ "learning_rate": 3.406593406593406e-05,
+ "loss": 0.5768,
+ "step": 156
+ },
+ {
+ "epoch": 0.575091575091575,
+ "grad_norm": 21.778785705566406,
+ "learning_rate": 3.4285714285714284e-05,
+ "loss": 0.541,
+ "step": 157
+ },
+ {
+ "epoch": 0.5787545787545788,
+ "grad_norm": 7.957052707672119,
+ "learning_rate": 3.4505494505494505e-05,
+ "loss": 0.1676,
+ "step": 158
+ },
+ {
+ "epoch": 0.5824175824175825,
+ "grad_norm": 10.105476379394531,
+ "learning_rate": 3.472527472527473e-05,
+ "loss": 0.14,
+ "step": 159
+ },
+ {
+ "epoch": 0.5860805860805861,
+ "grad_norm": 13.895249366760254,
+ "learning_rate": 3.494505494505495e-05,
+ "loss": 0.2135,
+ "step": 160
+ },
+ {
+ "epoch": 0.5897435897435898,
+ "grad_norm": 15.14104175567627,
+ "learning_rate": 3.516483516483517e-05,
+ "loss": 0.2299,
+ "step": 161
+ },
+ {
+ "epoch": 0.5934065934065934,
+ "grad_norm": 27.537504196166992,
+ "learning_rate": 3.538461538461539e-05,
+ "loss": 0.4517,
+ "step": 162
+ },
+ {
+ "epoch": 0.5970695970695971,
+ "grad_norm": 22.290597915649414,
+ "learning_rate": 3.56043956043956e-05,
+ "loss": 0.2144,
+ "step": 163
+ },
+ {
+ "epoch": 0.6007326007326007,
+ "grad_norm": 24.176603317260742,
+ "learning_rate": 3.5824175824175824e-05,
+ "loss": 0.4184,
+ "step": 164
+ },
+ {
+ "epoch": 0.6043956043956044,
+ "grad_norm": 43.716552734375,
+ "learning_rate": 3.6043956043956045e-05,
+ "loss": 0.7672,
+ "step": 165
+ },
+ {
+ "epoch": 0.608058608058608,
+ "grad_norm": 5.516793727874756,
+ "learning_rate": 3.626373626373626e-05,
+ "loss": 0.0332,
+ "step": 166
+ },
+ {
+ "epoch": 0.6117216117216118,
+ "grad_norm": 13.202600479125977,
+ "learning_rate": 3.648351648351648e-05,
+ "loss": 0.1388,
+ "step": 167
+ },
+ {
+ "epoch": 0.6153846153846154,
+ "grad_norm": 8.389626502990723,
+ "learning_rate": 3.670329670329671e-05,
+ "loss": 0.0284,
+ "step": 168
+ },
+ {
+ "epoch": 0.6190476190476191,
+ "grad_norm": 11.500190734863281,
+ "learning_rate": 3.692307692307693e-05,
+ "loss": 0.1778,
+ "step": 169
+ },
+ {
+ "epoch": 0.6227106227106227,
+ "grad_norm": 49.76407241821289,
+ "learning_rate": 3.7142857142857143e-05,
+ "loss": 0.8075,
+ "step": 170
+ },
+ {
+ "epoch": 0.6263736263736264,
+ "grad_norm": 49.758705139160156,
+ "learning_rate": 3.7362637362637365e-05,
+ "loss": 1.3106,
+ "step": 171
+ },
+ {
+ "epoch": 0.63003663003663,
+ "grad_norm": 7.655544281005859,
+ "learning_rate": 3.7582417582417586e-05,
+ "loss": 0.1362,
+ "step": 172
+ },
+ {
+ "epoch": 0.6336996336996337,
+ "grad_norm": 29.778133392333984,
+ "learning_rate": 3.78021978021978e-05,
+ "loss": 0.2411,
+ "step": 173
+ },
+ {
+ "epoch": 0.6373626373626373,
+ "grad_norm": 23.79543113708496,
+ "learning_rate": 3.802197802197802e-05,
+ "loss": 0.5665,
+ "step": 174
+ },
+ {
+ "epoch": 0.6410256410256411,
+ "grad_norm": 25.333166122436523,
+ "learning_rate": 3.824175824175824e-05,
+ "loss": 0.5821,
+ "step": 175
+ },
+ {
+ "epoch": 0.6446886446886447,
+ "grad_norm": 38.367759704589844,
+ "learning_rate": 3.846153846153846e-05,
+ "loss": 1.1098,
+ "step": 176
+ },
+ {
+ "epoch": 0.6483516483516484,
+ "grad_norm": 31.53361701965332,
+ "learning_rate": 3.8681318681318684e-05,
+ "loss": 1.5399,
+ "step": 177
+ },
+ {
+ "epoch": 0.652014652014652,
+ "grad_norm": 8.453901290893555,
+ "learning_rate": 3.8901098901098905e-05,
+ "loss": 0.1327,
+ "step": 178
+ },
+ {
+ "epoch": 0.6556776556776557,
+ "grad_norm": 32.465980529785156,
+ "learning_rate": 3.9120879120879126e-05,
+ "loss": 0.8133,
+ "step": 179
+ },
+ {
+ "epoch": 0.6593406593406593,
+ "grad_norm": 21.503114700317383,
+ "learning_rate": 3.934065934065934e-05,
+ "loss": 0.2472,
+ "step": 180
+ },
+ {
+ "epoch": 0.663003663003663,
+ "grad_norm": 28.240659713745117,
+ "learning_rate": 3.956043956043956e-05,
+ "loss": 0.4718,
+ "step": 181
+ },
+ {
+ "epoch": 0.6666666666666666,
+ "grad_norm": 6.919331073760986,
+ "learning_rate": 3.978021978021978e-05,
+ "loss": 0.0947,
+ "step": 182
+ },
+ {
+ "epoch": 0.6703296703296703,
+ "grad_norm": 20.96783447265625,
+ "learning_rate": 3.9999999999999996e-05,
+ "loss": 1.1602,
+ "step": 183
+ },
+ {
+ "epoch": 0.673992673992674,
+ "grad_norm": 17.967914581298828,
+ "learning_rate": 4.021978021978022e-05,
+ "loss": 0.3684,
+ "step": 184
+ },
+ {
+ "epoch": 0.6776556776556777,
+ "grad_norm": 29.837678909301758,
+ "learning_rate": 4.0439560439560445e-05,
+ "loss": 0.5452,
+ "step": 185
+ },
+ {
+ "epoch": 0.6813186813186813,
+ "grad_norm": 37.0803108215332,
+ "learning_rate": 4.065934065934066e-05,
+ "loss": 0.5983,
+ "step": 186
+ },
+ {
+ "epoch": 0.684981684981685,
+ "grad_norm": 23.339448928833008,
+ "learning_rate": 4.087912087912088e-05,
+ "loss": 0.6255,
+ "step": 187
+ },
+ {
+ "epoch": 0.6886446886446886,
+ "grad_norm": 13.779767036437988,
+ "learning_rate": 4.10989010989011e-05,
+ "loss": 0.3705,
+ "step": 188
+ },
+ {
+ "epoch": 0.6923076923076923,
+ "grad_norm": 15.792436599731445,
+ "learning_rate": 4.131868131868132e-05,
+ "loss": 0.4128,
+ "step": 189
+ },
+ {
+ "epoch": 0.6959706959706959,
+ "grad_norm": 14.106623649597168,
+ "learning_rate": 4.153846153846154e-05,
+ "loss": 0.2914,
+ "step": 190
+ },
+ {
+ "epoch": 0.6996336996336996,
+ "grad_norm": 34.428951263427734,
+ "learning_rate": 4.175824175824176e-05,
+ "loss": 1.2232,
+ "step": 191
+ },
+ {
+ "epoch": 0.7032967032967034,
+ "grad_norm": 15.847033500671387,
+ "learning_rate": 4.197802197802198e-05,
+ "loss": 0.4129,
+ "step": 192
+ },
+ {
+ "epoch": 0.706959706959707,
+ "grad_norm": 17.834794998168945,
+ "learning_rate": 4.21978021978022e-05,
+ "loss": 0.4158,
+ "step": 193
+ },
+ {
+ "epoch": 0.7106227106227107,
+ "grad_norm": 29.807823181152344,
+ "learning_rate": 4.241758241758242e-05,
+ "loss": 0.9741,
+ "step": 194
+ },
+ {
+ "epoch": 0.7142857142857143,
+ "grad_norm": 15.9482421875,
+ "learning_rate": 4.263736263736264e-05,
+ "loss": 0.1953,
+ "step": 195
+ },
+ {
+ "epoch": 0.717948717948718,
+ "grad_norm": 37.89487075805664,
+ "learning_rate": 4.2857142857142856e-05,
+ "loss": 1.1018,
+ "step": 196
+ },
+ {
+ "epoch": 0.7216117216117216,
+ "grad_norm": 24.060779571533203,
+ "learning_rate": 4.307692307692308e-05,
+ "loss": 0.4774,
+ "step": 197
+ },
+ {
+ "epoch": 0.7252747252747253,
+ "grad_norm": 18.701725006103516,
+ "learning_rate": 4.32967032967033e-05,
+ "loss": 0.2641,
+ "step": 198
+ },
+ {
+ "epoch": 0.7289377289377289,
+ "grad_norm": 32.18348693847656,
+ "learning_rate": 4.351648351648352e-05,
+ "loss": 0.6958,
+ "step": 199
+ },
+ {
+ "epoch": 0.7326007326007326,
+ "grad_norm": 16.504337310791016,
+ "learning_rate": 4.3736263736263734e-05,
+ "loss": 0.1933,
+ "step": 200
+ },
+ {
+ "epoch": 0.7362637362637363,
+ "grad_norm": 34.5928840637207,
+ "learning_rate": 4.3956043956043955e-05,
+ "loss": 0.3712,
+ "step": 201
+ },
+ {
+ "epoch": 0.73992673992674,
+ "grad_norm": 47.998512268066406,
+ "learning_rate": 4.417582417582418e-05,
+ "loss": 1.4578,
+ "step": 202
+ },
+ {
+ "epoch": 0.7435897435897436,
+ "grad_norm": 29.871829986572266,
+ "learning_rate": 4.43956043956044e-05,
+ "loss": 0.7628,
+ "step": 203
+ },
+ {
+ "epoch": 0.7472527472527473,
+ "grad_norm": 53.70481491088867,
+ "learning_rate": 4.461538461538462e-05,
+ "loss": 1.4017,
+ "step": 204
+ },
+ {
+ "epoch": 0.7509157509157509,
+ "grad_norm": 58.087646484375,
+ "learning_rate": 4.483516483516484e-05,
+ "loss": 1.3168,
+ "step": 205
+ },
+ {
+ "epoch": 0.7545787545787546,
+ "grad_norm": 44.62531280517578,
+ "learning_rate": 4.505494505494505e-05,
+ "loss": 0.8959,
+ "step": 206
+ },
+ {
+ "epoch": 0.7582417582417582,
+ "grad_norm": 18.427953720092773,
+ "learning_rate": 4.5274725274725274e-05,
+ "loss": 0.4202,
+ "step": 207
+ },
+ {
+ "epoch": 0.7619047619047619,
+ "grad_norm": 32.799434661865234,
+ "learning_rate": 4.5494505494505495e-05,
+ "loss": 0.5432,
+ "step": 208
+ },
+ {
+ "epoch": 0.7655677655677655,
+ "grad_norm": 22.136354446411133,
+ "learning_rate": 4.5714285714285716e-05,
+ "loss": 1.0474,
+ "step": 209
+ },
+ {
+ "epoch": 0.7692307692307693,
+ "grad_norm": 14.09807014465332,
+ "learning_rate": 4.593406593406593e-05,
+ "loss": 0.4048,
+ "step": 210
+ },
+ {
+ "epoch": 0.7728937728937729,
+ "grad_norm": 16.818132400512695,
+ "learning_rate": 4.615384615384616e-05,
+ "loss": 0.4772,
+ "step": 211
+ },
+ {
+ "epoch": 0.7765567765567766,
+ "grad_norm": 36.87644577026367,
+ "learning_rate": 4.637362637362638e-05,
+ "loss": 1.0203,
+ "step": 212
+ },
+ {
+ "epoch": 0.7802197802197802,
+ "grad_norm": 23.279033660888672,
+ "learning_rate": 4.6593406593406593e-05,
+ "loss": 0.8223,
+ "step": 213
+ },
+ {
+ "epoch": 0.7838827838827839,
+ "grad_norm": 21.23172378540039,
+ "learning_rate": 4.6813186813186814e-05,
+ "loss": 0.6838,
+ "step": 214
+ },
+ {
+ "epoch": 0.7875457875457875,
+ "grad_norm": 15.129582405090332,
+ "learning_rate": 4.7032967032967035e-05,
+ "loss": 0.3939,
+ "step": 215
+ },
+ {
+ "epoch": 0.7912087912087912,
+ "grad_norm": 38.20903778076172,
+ "learning_rate": 4.725274725274725e-05,
+ "loss": 0.4395,
+ "step": 216
+ },
+ {
+ "epoch": 0.7948717948717948,
+ "grad_norm": 23.428571701049805,
+ "learning_rate": 4.747252747252747e-05,
+ "loss": 0.6657,
+ "step": 217
+ },
+ {
+ "epoch": 0.7985347985347986,
+ "grad_norm": 15.892741203308105,
+ "learning_rate": 4.769230769230769e-05,
+ "loss": 0.3867,
+ "step": 218
+ },
+ {
+ "epoch": 0.8021978021978022,
+ "grad_norm": 44.7977180480957,
+ "learning_rate": 4.791208791208792e-05,
+ "loss": 1.4335,
+ "step": 219
+ },
+ {
+ "epoch": 0.8058608058608059,
+ "grad_norm": 18.13700294494629,
+ "learning_rate": 4.8131868131868134e-05,
+ "loss": 0.3965,
+ "step": 220
+ },
+ {
+ "epoch": 0.8095238095238095,
+ "grad_norm": 23.00497817993164,
+ "learning_rate": 4.8351648351648355e-05,
+ "loss": 1.1319,
+ "step": 221
+ },
+ {
+ "epoch": 0.8131868131868132,
+ "grad_norm": 27.63648796081543,
+ "learning_rate": 4.8571428571428576e-05,
+ "loss": 0.7782,
+ "step": 222
+ },
+ {
+ "epoch": 0.8168498168498168,
+ "grad_norm": 23.91630744934082,
+ "learning_rate": 4.879120879120879e-05,
+ "loss": 0.7277,
+ "step": 223
+ },
+ {
+ "epoch": 0.8205128205128205,
+ "grad_norm": 27.157682418823242,
+ "learning_rate": 4.901098901098901e-05,
+ "loss": 0.8309,
+ "step": 224
+ },
+ {
+ "epoch": 0.8241758241758241,
+ "grad_norm": 20.686105728149414,
+ "learning_rate": 4.923076923076923e-05,
+ "loss": 0.4645,
+ "step": 225
+ },
+ {
+ "epoch": 0.8278388278388278,
+ "grad_norm": 18.44706916809082,
+ "learning_rate": 4.9450549450549446e-05,
+ "loss": 0.6298,
+ "step": 226
+ },
+ {
+ "epoch": 0.8315018315018315,
+ "grad_norm": 34.66194152832031,
+ "learning_rate": 4.967032967032967e-05,
+ "loss": 1.3282,
+ "step": 227
+ },
+ {
+ "epoch": 0.8351648351648352,
+ "grad_norm": 26.68456268310547,
+ "learning_rate": 4.9890109890109895e-05,
+ "loss": 0.8652,
+ "step": 228
+ },
+ {
+ "epoch": 0.8388278388278388,
+ "grad_norm": 18.36819839477539,
+ "learning_rate": 5.0109890109890116e-05,
+ "loss": 0.425,
+ "step": 229
+ },
+ {
+ "epoch": 0.8424908424908425,
+ "grad_norm": 10.212838172912598,
+ "learning_rate": 5.032967032967033e-05,
+ "loss": 0.2183,
+ "step": 230
+ },
+ {
+ "epoch": 0.8461538461538461,
+ "grad_norm": 28.40265464782715,
+ "learning_rate": 5.054945054945055e-05,
+ "loss": 1.6894,
+ "step": 231
+ },
+ {
+ "epoch": 0.8498168498168498,
+ "grad_norm": 48.70882797241211,
+ "learning_rate": 5.076923076923077e-05,
+ "loss": 0.8564,
+ "step": 232
+ },
+ {
+ "epoch": 0.8534798534798534,
+ "grad_norm": 38.576541900634766,
+ "learning_rate": 5.098901098901099e-05,
+ "loss": 0.8013,
+ "step": 233
+ },
+ {
+ "epoch": 0.8571428571428571,
+ "grad_norm": 20.17264747619629,
+ "learning_rate": 5.120879120879121e-05,
+ "loss": 0.4553,
+ "step": 234
+ },
+ {
+ "epoch": 0.8608058608058609,
+ "grad_norm": 33.383182525634766,
+ "learning_rate": 5.142857142857143e-05,
+ "loss": 0.9591,
+ "step": 235
+ },
+ {
+ "epoch": 0.8644688644688645,
+ "grad_norm": 22.734106063842773,
+ "learning_rate": 5.164835164835165e-05,
+ "loss": 0.589,
+ "step": 236
+ },
+ {
+ "epoch": 0.8681318681318682,
+ "grad_norm": 19.77442741394043,
+ "learning_rate": 5.186813186813187e-05,
+ "loss": 0.7066,
+ "step": 237
+ },
+ {
+ "epoch": 0.8717948717948718,
+ "grad_norm": 32.36431884765625,
+ "learning_rate": 5.208791208791209e-05,
+ "loss": 0.8878,
+ "step": 238
+ },
+ {
+ "epoch": 0.8754578754578755,
+ "grad_norm": 37.60574722290039,
+ "learning_rate": 5.230769230769231e-05,
+ "loss": 1.0034,
+ "step": 239
+ },
+ {
+ "epoch": 0.8791208791208791,
+ "grad_norm": 28.051666259765625,
+ "learning_rate": 5.252747252747253e-05,
+ "loss": 0.9695,
+ "step": 240
+ },
+ {
+ "epoch": 0.8827838827838828,
+ "grad_norm": 31.55886459350586,
+ "learning_rate": 5.274725274725275e-05,
+ "loss": 0.5416,
+ "step": 241
+ },
+ {
+ "epoch": 0.8864468864468864,
+ "grad_norm": 17.856632232666016,
+ "learning_rate": 5.296703296703297e-05,
+ "loss": 0.3647,
+ "step": 242
+ },
+ {
+ "epoch": 0.8901098901098901,
+ "grad_norm": 42.52962112426758,
+ "learning_rate": 5.3186813186813184e-05,
+ "loss": 1.3661,
+ "step": 243
+ },
+ {
+ "epoch": 0.8937728937728938,
+ "grad_norm": 26.439769744873047,
+ "learning_rate": 5.3406593406593405e-05,
+ "loss": 0.6629,
+ "step": 244
+ },
+ {
+ "epoch": 0.8974358974358975,
+ "grad_norm": 37.46576690673828,
+ "learning_rate": 5.362637362637363e-05,
+ "loss": 0.9631,
+ "step": 245
+ },
+ {
+ "epoch": 0.9010989010989011,
+ "grad_norm": 29.706708908081055,
+ "learning_rate": 5.384615384615385e-05,
+ "loss": 1.0034,
+ "step": 246
+ },
+ {
+ "epoch": 0.9047619047619048,
+ "grad_norm": 33.62871551513672,
+ "learning_rate": 5.406593406593407e-05,
+ "loss": 0.8036,
+ "step": 247
+ },
+ {
+ "epoch": 0.9084249084249084,
+ "grad_norm": 41.97051239013672,
+ "learning_rate": 5.428571428571429e-05,
+ "loss": 1.309,
+ "step": 248
+ },
+ {
+ "epoch": 0.9120879120879121,
+ "grad_norm": 37.57841110229492,
+ "learning_rate": 5.450549450549451e-05,
+ "loss": 1.2444,
+ "step": 249
+ },
+ {
+ "epoch": 0.9157509157509157,
+ "grad_norm": 21.220727920532227,
+ "learning_rate": 5.4725274725274724e-05,
+ "loss": 0.6556,
+ "step": 250
+ },
+ {
+ "epoch": 0.9194139194139194,
+ "grad_norm": 19.963764190673828,
+ "learning_rate": 5.4945054945054945e-05,
+ "loss": 0.7328,
+ "step": 251
+ },
+ {
+ "epoch": 0.9230769230769231,
+ "grad_norm": 21.196062088012695,
+ "learning_rate": 5.5164835164835166e-05,
+ "loss": 0.5752,
+ "step": 252
+ },
+ {
+ "epoch": 0.9267399267399268,
+ "grad_norm": 23.587268829345703,
+ "learning_rate": 5.538461538461539e-05,
+ "loss": 0.4801,
+ "step": 253
+ },
+ {
+ "epoch": 0.9304029304029304,
+ "grad_norm": 16.09604263305664,
+ "learning_rate": 5.560439560439561e-05,
+ "loss": 0.4795,
+ "step": 254
+ },
+ {
+ "epoch": 0.9340659340659341,
+ "grad_norm": 22.61296272277832,
+ "learning_rate": 5.582417582417583e-05,
+ "loss": 0.5807,
+ "step": 255
+ },
+ {
+ "epoch": 0.9377289377289377,
+ "grad_norm": 28.715890884399414,
+ "learning_rate": 5.604395604395604e-05,
+ "loss": 1.3141,
+ "step": 256
+ },
+ {
+ "epoch": 0.9413919413919414,
+ "grad_norm": 37.11213684082031,
+ "learning_rate": 5.6263736263736264e-05,
+ "loss": 1.7168,
+ "step": 257
+ },
+ {
+ "epoch": 0.945054945054945,
+ "grad_norm": 13.693246841430664,
+ "learning_rate": 5.6483516483516485e-05,
+ "loss": 0.3207,
+ "step": 258
+ },
+ {
+ "epoch": 0.9487179487179487,
+ "grad_norm": 18.186216354370117,
+ "learning_rate": 5.6703296703296706e-05,
+ "loss": 0.6265,
+ "step": 259
+ },
+ {
+ "epoch": 0.9523809523809523,
+ "grad_norm": 23.68426513671875,
+ "learning_rate": 5.692307692307692e-05,
+ "loss": 0.5226,
+ "step": 260
+ },
+ {
+ "epoch": 0.9560439560439561,
+ "grad_norm": 19.154836654663086,
+ "learning_rate": 5.714285714285714e-05,
+ "loss": 1.0116,
+ "step": 261
+ },
+ {
+ "epoch": 0.9597069597069597,
+ "grad_norm": 17.64719009399414,
+ "learning_rate": 5.736263736263737e-05,
+ "loss": 0.5992,
+ "step": 262
+ },
+ {
+ "epoch": 0.9633699633699634,
+ "grad_norm": 25.542757034301758,
+ "learning_rate": 5.7582417582417584e-05,
+ "loss": 0.8129,
+ "step": 263
+ },
+ {
+ "epoch": 0.967032967032967,
+ "grad_norm": 25.94204330444336,
+ "learning_rate": 5.7802197802197805e-05,
+ "loss": 1.2194,
+ "step": 264
+ },
+ {
+ "epoch": 0.9706959706959707,
+ "grad_norm": 13.693342208862305,
+ "learning_rate": 5.8021978021978026e-05,
+ "loss": 0.2565,
+ "step": 265
+ },
+ {
+ "epoch": 0.9743589743589743,
+ "grad_norm": 20.760122299194336,
+ "learning_rate": 5.824175824175824e-05,
+ "loss": 0.4023,
+ "step": 266
+ },
+ {
+ "epoch": 0.978021978021978,
+ "grad_norm": 20.00895118713379,
+ "learning_rate": 5.846153846153846e-05,
+ "loss": 0.2468,
+ "step": 267
+ },
+ {
+ "epoch": 0.9816849816849816,
+ "grad_norm": 25.56069564819336,
+ "learning_rate": 5.868131868131868e-05,
+ "loss": 0.5648,
+ "step": 268
+ },
+ {
+ "epoch": 0.9853479853479854,
+ "grad_norm": 38.19970703125,
+ "learning_rate": 5.89010989010989e-05,
+ "loss": 0.544,
+ "step": 269
+ },
+ {
+ "epoch": 0.989010989010989,
+ "grad_norm": 37.63619613647461,
+ "learning_rate": 5.9120879120879124e-05,
+ "loss": 0.7556,
+ "step": 270
+ },
+ {
+ "epoch": 0.9926739926739927,
+ "grad_norm": 10.586868286132812,
+ "learning_rate": 5.9340659340659345e-05,
+ "loss": 0.1003,
+ "step": 271
+ },
+ {
+ "epoch": 0.9963369963369964,
+ "grad_norm": 17.579208374023438,
+ "learning_rate": 5.9560439560439566e-05,
+ "loss": 0.2931,
+ "step": 272
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 24.657121658325195,
+ "learning_rate": 5.978021978021978e-05,
+ "loss": 0.2372,
+ "step": 273
+ },
+ {
+ "epoch": 1.0036630036630036,
+ "grad_norm": 29.52134895324707,
+ "learning_rate": 6e-05,
+ "loss": 0.5077,
+ "step": 274
+ },
+ {
+ "epoch": 1.0073260073260073,
+ "grad_norm": 51.900062561035156,
+ "learning_rate": 5.997557997557998e-05,
+ "loss": 0.4404,
+ "step": 275
+ },
+ {
+ "epoch": 1.010989010989011,
+ "grad_norm": 18.682769775390625,
+ "learning_rate": 5.995115995115995e-05,
+ "loss": 0.2405,
+ "step": 276
+ },
+ {
+ "epoch": 1.0146520146520146,
+ "grad_norm": 87.95014953613281,
+ "learning_rate": 5.992673992673993e-05,
+ "loss": 2.8585,
+ "step": 277
+ },
+ {
+ "epoch": 1.0183150183150182,
+ "grad_norm": 67.03990936279297,
+ "learning_rate": 5.990231990231991e-05,
+ "loss": 0.9746,
+ "step": 278
+ },
+ {
+ "epoch": 1.021978021978022,
+ "grad_norm": 47.63545227050781,
+ "learning_rate": 5.987789987789988e-05,
+ "loss": 0.241,
+ "step": 279
+ },
+ {
+ "epoch": 1.0256410256410255,
+ "grad_norm": 33.62876892089844,
+ "learning_rate": 5.985347985347986e-05,
+ "loss": 1.0003,
+ "step": 280
+ },
+ {
+ "epoch": 1.0293040293040292,
+ "grad_norm": 30.26620864868164,
+ "learning_rate": 5.982905982905983e-05,
+ "loss": 0.7767,
+ "step": 281
+ },
+ {
+ "epoch": 1.032967032967033,
+ "grad_norm": 33.785770416259766,
+ "learning_rate": 5.98046398046398e-05,
+ "loss": 0.899,
+ "step": 282
+ },
+ {
+ "epoch": 1.0366300366300367,
+ "grad_norm": 33.753849029541016,
+ "learning_rate": 5.978021978021978e-05,
+ "loss": 1.8225,
+ "step": 283
+ },
+ {
+ "epoch": 1.0402930402930404,
+ "grad_norm": 16.58989143371582,
+ "learning_rate": 5.975579975579976e-05,
+ "loss": 0.6211,
+ "step": 284
+ },
+ {
+ "epoch": 1.043956043956044,
+ "grad_norm": 23.08768653869629,
+ "learning_rate": 5.973137973137973e-05,
+ "loss": 0.7541,
+ "step": 285
+ },
+ {
+ "epoch": 1.0476190476190477,
+ "grad_norm": 24.57805824279785,
+ "learning_rate": 5.970695970695971e-05,
+ "loss": 0.8278,
+ "step": 286
+ },
+ {
+ "epoch": 1.0512820512820513,
+ "grad_norm": 25.1593017578125,
+ "learning_rate": 5.968253968253968e-05,
+ "loss": 0.6932,
+ "step": 287
+ },
+ {
+ "epoch": 1.054945054945055,
+ "grad_norm": 29.984054565429688,
+ "learning_rate": 5.965811965811966e-05,
+ "loss": 0.6987,
+ "step": 288
+ },
+ {
+ "epoch": 1.0586080586080586,
+ "grad_norm": 28.183151245117188,
+ "learning_rate": 5.963369963369964e-05,
+ "loss": 0.8771,
+ "step": 289
+ },
+ {
+ "epoch": 1.0622710622710623,
+ "grad_norm": 15.349969863891602,
+ "learning_rate": 5.960927960927961e-05,
+ "loss": 0.2906,
+ "step": 290
+ },
+ {
+ "epoch": 1.065934065934066,
+ "grad_norm": 17.618196487426758,
+ "learning_rate": 5.958485958485959e-05,
+ "loss": 0.595,
+ "step": 291
+ },
+ {
+ "epoch": 1.0695970695970696,
+ "grad_norm": 40.537925720214844,
+ "learning_rate": 5.9560439560439566e-05,
+ "loss": 1.3881,
+ "step": 292
+ },
+ {
+ "epoch": 1.0732600732600732,
+ "grad_norm": 41.12261962890625,
+ "learning_rate": 5.953601953601954e-05,
+ "loss": 0.5402,
+ "step": 293
+ },
+ {
+ "epoch": 1.0769230769230769,
+ "grad_norm": 38.4654655456543,
+ "learning_rate": 5.951159951159951e-05,
+ "loss": 0.3097,
+ "step": 294
+ },
+ {
+ "epoch": 1.0805860805860805,
+ "grad_norm": 34.19886016845703,
+ "learning_rate": 5.948717948717949e-05,
+ "loss": 1.0228,
+ "step": 295
+ },
+ {
+ "epoch": 1.0842490842490842,
+ "grad_norm": 19.727413177490234,
+ "learning_rate": 5.946275946275946e-05,
+ "loss": 0.1755,
+ "step": 296
+ },
+ {
+ "epoch": 1.0879120879120878,
+ "grad_norm": 33.413352966308594,
+ "learning_rate": 5.943833943833944e-05,
+ "loss": 0.8087,
+ "step": 297
+ },
+ {
+ "epoch": 1.0915750915750915,
+ "grad_norm": 29.848875045776367,
+ "learning_rate": 5.941391941391942e-05,
+ "loss": 0.673,
+ "step": 298
+ },
+ {
+ "epoch": 1.0952380952380953,
+ "grad_norm": 18.643922805786133,
+ "learning_rate": 5.938949938949939e-05,
+ "loss": 0.4759,
+ "step": 299
+ },
+ {
+ "epoch": 1.098901098901099,
+ "grad_norm": 28.923099517822266,
+ "learning_rate": 5.936507936507937e-05,
+ "loss": 0.6555,
+ "step": 300
+ },
+ {
+ "epoch": 1.1025641025641026,
+ "grad_norm": 26.4990177154541,
+ "learning_rate": 5.9340659340659345e-05,
+ "loss": 0.4679,
+ "step": 301
+ },
+ {
+ "epoch": 1.1062271062271063,
+ "grad_norm": 43.54881286621094,
+ "learning_rate": 5.931623931623932e-05,
+ "loss": 1.0861,
+ "step": 302
+ },
+ {
+ "epoch": 1.10989010989011,
+ "grad_norm": 32.66098403930664,
+ "learning_rate": 5.9291819291819295e-05,
+ "loss": 0.677,
+ "step": 303
+ },
+ {
+ "epoch": 1.1135531135531136,
+ "grad_norm": 43.79314422607422,
+ "learning_rate": 5.9267399267399274e-05,
+ "loss": 0.8883,
+ "step": 304
+ },
+ {
+ "epoch": 1.1172161172161172,
+ "grad_norm": 44.49085235595703,
+ "learning_rate": 5.9242979242979245e-05,
+ "loss": 0.9553,
+ "step": 305
+ },
+ {
+ "epoch": 1.120879120879121,
+ "grad_norm": 31.713787078857422,
+ "learning_rate": 5.9218559218559224e-05,
+ "loss": 0.6352,
+ "step": 306
+ },
+ {
+ "epoch": 1.1245421245421245,
+ "grad_norm": 19.930402755737305,
+ "learning_rate": 5.9194139194139196e-05,
+ "loss": 0.7023,
+ "step": 307
+ },
+ {
+ "epoch": 1.1282051282051282,
+ "grad_norm": 20.157196044921875,
+ "learning_rate": 5.916971916971917e-05,
+ "loss": 0.6241,
+ "step": 308
+ },
+ {
+ "epoch": 1.1318681318681318,
+ "grad_norm": 26.819135665893555,
+ "learning_rate": 5.9145299145299146e-05,
+ "loss": 0.4788,
+ "step": 309
+ },
+ {
+ "epoch": 1.1355311355311355,
+ "grad_norm": 24.948625564575195,
+ "learning_rate": 5.9120879120879124e-05,
+ "loss": 0.698,
+ "step": 310
+ },
+ {
+ "epoch": 1.1391941391941391,
+ "grad_norm": 15.883389472961426,
+ "learning_rate": 5.9096459096459096e-05,
+ "loss": 0.3325,
+ "step": 311
+ },
+ {
+ "epoch": 1.1428571428571428,
+ "grad_norm": 25.214584350585938,
+ "learning_rate": 5.9072039072039074e-05,
+ "loss": 0.4776,
+ "step": 312
+ },
+ {
+ "epoch": 1.1465201465201464,
+ "grad_norm": 27.4523983001709,
+ "learning_rate": 5.9047619047619046e-05,
+ "loss": 0.6155,
+ "step": 313
+ },
+ {
+ "epoch": 1.15018315018315,
+ "grad_norm": 48.60593795776367,
+ "learning_rate": 5.9023199023199024e-05,
+ "loss": 1.7225,
+ "step": 314
+ },
+ {
+ "epoch": 1.1538461538461537,
+ "grad_norm": 27.19314193725586,
+ "learning_rate": 5.8998778998779e-05,
+ "loss": 0.6805,
+ "step": 315
+ },
+ {
+ "epoch": 1.1575091575091574,
+ "grad_norm": 44.678768157958984,
+ "learning_rate": 5.8974358974358975e-05,
+ "loss": 0.5721,
+ "step": 316
+ },
+ {
+ "epoch": 1.1611721611721613,
+ "grad_norm": 12.109644889831543,
+ "learning_rate": 5.894993894993895e-05,
+ "loss": 0.1079,
+ "step": 317
+ },
+ {
+ "epoch": 1.164835164835165,
+ "grad_norm": 45.254730224609375,
+ "learning_rate": 5.892551892551893e-05,
+ "loss": 1.1492,
+ "step": 318
+ },
+ {
+ "epoch": 1.1684981684981686,
+ "grad_norm": 65.83439636230469,
+ "learning_rate": 5.89010989010989e-05,
+ "loss": 0.7049,
+ "step": 319
+ },
+ {
+ "epoch": 1.1721611721611722,
+ "grad_norm": 43.5418586730957,
+ "learning_rate": 5.8876678876678875e-05,
+ "loss": 0.4628,
+ "step": 320
+ },
+ {
+ "epoch": 1.1758241758241759,
+ "grad_norm": 137.285400390625,
+ "learning_rate": 5.885225885225885e-05,
+ "loss": 1.4227,
+ "step": 321
+ },
+ {
+ "epoch": 1.1794871794871795,
+ "grad_norm": 42.895565032958984,
+ "learning_rate": 5.8827838827838825e-05,
+ "loss": 0.4264,
+ "step": 322
+ },
+ {
+ "epoch": 1.1831501831501832,
+ "grad_norm": 10.602986335754395,
+ "learning_rate": 5.8803418803418803e-05,
+ "loss": 0.0494,
+ "step": 323
+ },
+ {
+ "epoch": 1.1868131868131868,
+ "grad_norm": 103.92290496826172,
+ "learning_rate": 5.877899877899878e-05,
+ "loss": 2.0111,
+ "step": 324
+ },
+ {
+ "epoch": 1.1904761904761905,
+ "grad_norm": 36.497764587402344,
+ "learning_rate": 5.8754578754578754e-05,
+ "loss": 0.4768,
+ "step": 325
+ },
+ {
+ "epoch": 1.1941391941391941,
+ "grad_norm": 45.52228546142578,
+ "learning_rate": 5.873015873015873e-05,
+ "loss": 0.994,
+ "step": 326
+ },
+ {
+ "epoch": 1.1978021978021978,
+ "grad_norm": 24.81894302368164,
+ "learning_rate": 5.870573870573871e-05,
+ "loss": 0.5563,
+ "step": 327
+ },
+ {
+ "epoch": 1.2014652014652014,
+ "grad_norm": 49.82950210571289,
+ "learning_rate": 5.868131868131868e-05,
+ "loss": 1.5448,
+ "step": 328
+ },
+ {
+ "epoch": 1.205128205128205,
+ "grad_norm": 23.945913314819336,
+ "learning_rate": 5.865689865689866e-05,
+ "loss": 0.5256,
+ "step": 329
+ },
+ {
+ "epoch": 1.2087912087912087,
+ "grad_norm": 20.63251304626465,
+ "learning_rate": 5.863247863247864e-05,
+ "loss": 0.3698,
+ "step": 330
+ },
+ {
+ "epoch": 1.2124542124542124,
+ "grad_norm": 32.270328521728516,
+ "learning_rate": 5.860805860805861e-05,
+ "loss": 0.3518,
+ "step": 331
+ },
+ {
+ "epoch": 1.2161172161172162,
+ "grad_norm": 32.445716857910156,
+ "learning_rate": 5.858363858363858e-05,
+ "loss": 0.857,
+ "step": 332
+ },
+ {
+ "epoch": 1.2197802197802199,
+ "grad_norm": 59.69521713256836,
+ "learning_rate": 5.855921855921856e-05,
+ "loss": 1.3786,
+ "step": 333
+ },
+ {
+ "epoch": 1.2234432234432235,
+ "grad_norm": 32.79878234863281,
+ "learning_rate": 5.853479853479853e-05,
+ "loss": 0.7648,
+ "step": 334
+ },
+ {
+ "epoch": 1.2271062271062272,
+ "grad_norm": 26.749393463134766,
+ "learning_rate": 5.851037851037851e-05,
+ "loss": 0.4723,
+ "step": 335
+ },
+ {
+ "epoch": 1.2307692307692308,
+ "grad_norm": 40.744102478027344,
+ "learning_rate": 5.848595848595849e-05,
+ "loss": 1.0543,
+ "step": 336
+ },
+ {
+ "epoch": 1.2344322344322345,
+ "grad_norm": 34.2275505065918,
+ "learning_rate": 5.846153846153846e-05,
+ "loss": 0.4533,
+ "step": 337
+ },
+ {
+ "epoch": 1.2380952380952381,
+ "grad_norm": 49.648136138916016,
+ "learning_rate": 5.843711843711844e-05,
+ "loss": 1.2112,
+ "step": 338
+ },
+ {
+ "epoch": 1.2417582417582418,
+ "grad_norm": 64.69720458984375,
+ "learning_rate": 5.841269841269841e-05,
+ "loss": 1.2234,
+ "step": 339
+ },
+ {
+ "epoch": 1.2454212454212454,
+ "grad_norm": 16.81964111328125,
+ "learning_rate": 5.838827838827839e-05,
+ "loss": 0.297,
+ "step": 340
+ },
+ {
+ "epoch": 1.249084249084249,
+ "grad_norm": 17.393678665161133,
+ "learning_rate": 5.836385836385837e-05,
+ "loss": 0.2504,
+ "step": 341
+ },
+ {
+ "epoch": 1.2527472527472527,
+ "grad_norm": 64.2254409790039,
+ "learning_rate": 5.833943833943834e-05,
+ "loss": 1.3656,
+ "step": 342
+ },
+ {
+ "epoch": 1.2564102564102564,
+ "grad_norm": 48.991249084472656,
+ "learning_rate": 5.831501831501832e-05,
+ "loss": 1.0819,
+ "step": 343
+ },
+ {
+ "epoch": 1.26007326007326,
+ "grad_norm": 22.78063201904297,
+ "learning_rate": 5.82905982905983e-05,
+ "loss": 0.1792,
+ "step": 344
+ },
+ {
+ "epoch": 1.2637362637362637,
+ "grad_norm": 35.463233947753906,
+ "learning_rate": 5.826617826617826e-05,
+ "loss": 0.5663,
+ "step": 345
+ },
+ {
+ "epoch": 1.2673992673992673,
+ "grad_norm": 54.528953552246094,
+ "learning_rate": 5.824175824175824e-05,
+ "loss": 1.5814,
+ "step": 346
+ },
+ {
+ "epoch": 1.271062271062271,
+ "grad_norm": 44.60401916503906,
+ "learning_rate": 5.821733821733822e-05,
+ "loss": 0.6471,
+ "step": 347
+ },
+ {
+ "epoch": 1.2747252747252746,
+ "grad_norm": 2.6468827724456787,
+ "learning_rate": 5.819291819291819e-05,
+ "loss": 0.0288,
+ "step": 348
+ },
+ {
+ "epoch": 1.2783882783882783,
+ "grad_norm": 21.465364456176758,
+ "learning_rate": 5.816849816849817e-05,
+ "loss": 0.5259,
+ "step": 349
+ },
+ {
+ "epoch": 1.282051282051282,
+ "grad_norm": 51.20866012573242,
+ "learning_rate": 5.814407814407815e-05,
+ "loss": 0.8054,
+ "step": 350
+ },
+ {
+ "epoch": 1.2857142857142856,
+ "grad_norm": 33.52774429321289,
+ "learning_rate": 5.811965811965812e-05,
+ "loss": 0.494,
+ "step": 351
+ },
+ {
+ "epoch": 1.2893772893772895,
+ "grad_norm": 39.15644836425781,
+ "learning_rate": 5.80952380952381e-05,
+ "loss": 1.6315,
+ "step": 352
+ },
+ {
+ "epoch": 1.293040293040293,
+ "grad_norm": 24.35202407836914,
+ "learning_rate": 5.8070818070818076e-05,
+ "loss": 0.6189,
+ "step": 353
+ },
+ {
+ "epoch": 1.2967032967032968,
+ "grad_norm": 39.99496841430664,
+ "learning_rate": 5.804639804639805e-05,
+ "loss": 1.2323,
+ "step": 354
+ },
+ {
+ "epoch": 1.3003663003663004,
+ "grad_norm": 26.282432556152344,
+ "learning_rate": 5.8021978021978026e-05,
+ "loss": 0.5383,
+ "step": 355
+ },
+ {
+ "epoch": 1.304029304029304,
+ "grad_norm": 36.909969329833984,
+ "learning_rate": 5.7997557997558004e-05,
+ "loss": 1.6886,
+ "step": 356
+ },
+ {
+ "epoch": 1.3076923076923077,
+ "grad_norm": 18.90056037902832,
+ "learning_rate": 5.7973137973137976e-05,
+ "loss": 0.7226,
+ "step": 357
+ },
+ {
+ "epoch": 1.3113553113553114,
+ "grad_norm": 21.10304832458496,
+ "learning_rate": 5.794871794871795e-05,
+ "loss": 0.8914,
+ "step": 358
+ },
+ {
+ "epoch": 1.315018315018315,
+ "grad_norm": 18.380769729614258,
+ "learning_rate": 5.7924297924297926e-05,
+ "loss": 1.4304,
+ "step": 359
+ },
+ {
+ "epoch": 1.3186813186813187,
+ "grad_norm": 17.992050170898438,
+ "learning_rate": 5.78998778998779e-05,
+ "loss": 1.0023,
+ "step": 360
+ },
+ {
+ "epoch": 1.3223443223443223,
+ "grad_norm": 17.944400787353516,
+ "learning_rate": 5.7875457875457876e-05,
+ "loss": 0.7734,
+ "step": 361
+ },
+ {
+ "epoch": 1.326007326007326,
+ "grad_norm": 19.117143630981445,
+ "learning_rate": 5.7851037851037855e-05,
+ "loss": 0.6923,
+ "step": 362
+ },
+ {
+ "epoch": 1.3296703296703296,
+ "grad_norm": 21.4644718170166,
+ "learning_rate": 5.7826617826617826e-05,
+ "loss": 0.666,
+ "step": 363
+ },
+ {
+ "epoch": 1.3333333333333333,
+ "grad_norm": 25.951030731201172,
+ "learning_rate": 5.7802197802197805e-05,
+ "loss": 1.522,
+ "step": 364
+ },
+ {
+ "epoch": 1.3369963369963371,
+ "grad_norm": 32.20412063598633,
+ "learning_rate": 5.7777777777777776e-05,
+ "loss": 1.5771,
+ "step": 365
+ },
+ {
+ "epoch": 1.3406593406593408,
+ "grad_norm": 26.847576141357422,
+ "learning_rate": 5.7753357753357755e-05,
+ "loss": 1.3427,
+ "step": 366
+ },
+ {
+ "epoch": 1.3443223443223444,
+ "grad_norm": 18.596710205078125,
+ "learning_rate": 5.772893772893773e-05,
+ "loss": 0.5533,
+ "step": 367
+ },
+ {
+ "epoch": 1.347985347985348,
+ "grad_norm": 23.6543025970459,
+ "learning_rate": 5.7704517704517705e-05,
+ "loss": 0.581,
+ "step": 368
+ },
+ {
+ "epoch": 1.3516483516483517,
+ "grad_norm": 13.732353210449219,
+ "learning_rate": 5.7680097680097684e-05,
+ "loss": 0.1908,
+ "step": 369
+ },
+ {
+ "epoch": 1.3553113553113554,
+ "grad_norm": 21.231159210205078,
+ "learning_rate": 5.765567765567766e-05,
+ "loss": 0.5858,
+ "step": 370
+ },
+ {
+ "epoch": 1.358974358974359,
+ "grad_norm": 18.647363662719727,
+ "learning_rate": 5.763125763125763e-05,
+ "loss": 0.6205,
+ "step": 371
+ },
+ {
+ "epoch": 1.3626373626373627,
+ "grad_norm": 20.302942276000977,
+ "learning_rate": 5.7606837606837605e-05,
+ "loss": 0.3637,
+ "step": 372
+ },
+ {
+ "epoch": 1.3663003663003663,
+ "grad_norm": 18.72137451171875,
+ "learning_rate": 5.7582417582417584e-05,
+ "loss": 0.2262,
+ "step": 373
+ },
+ {
+ "epoch": 1.36996336996337,
+ "grad_norm": 32.225738525390625,
+ "learning_rate": 5.7557997557997555e-05,
+ "loss": 0.5696,
+ "step": 374
+ },
+ {
+ "epoch": 1.3736263736263736,
+ "grad_norm": 21.453779220581055,
+ "learning_rate": 5.7533577533577534e-05,
+ "loss": 0.3533,
+ "step": 375
+ },
+ {
+ "epoch": 1.3772893772893773,
+ "grad_norm": 26.601511001586914,
+ "learning_rate": 5.750915750915751e-05,
+ "loss": 0.438,
+ "step": 376
+ },
+ {
+ "epoch": 1.380952380952381,
+ "grad_norm": 49.10448455810547,
+ "learning_rate": 5.7484737484737484e-05,
+ "loss": 0.6742,
+ "step": 377
+ },
+ {
+ "epoch": 1.3846153846153846,
+ "grad_norm": 51.251136779785156,
+ "learning_rate": 5.746031746031746e-05,
+ "loss": 0.7096,
+ "step": 378
+ },
+ {
+ "epoch": 1.3882783882783882,
+ "grad_norm": 35.14614486694336,
+ "learning_rate": 5.743589743589744e-05,
+ "loss": 1.5348,
+ "step": 379
+ },
+ {
+ "epoch": 1.3919413919413919,
+ "grad_norm": 58.83134078979492,
+ "learning_rate": 5.741147741147741e-05,
+ "loss": 1.303,
+ "step": 380
+ },
+ {
+ "epoch": 1.3956043956043955,
+ "grad_norm": 34.27029800415039,
+ "learning_rate": 5.738705738705739e-05,
+ "loss": 0.3682,
+ "step": 381
+ },
+ {
+ "epoch": 1.3992673992673992,
+ "grad_norm": 59.508628845214844,
+ "learning_rate": 5.736263736263737e-05,
+ "loss": 0.6489,
+ "step": 382
+ },
+ {
+ "epoch": 1.4029304029304028,
+ "grad_norm": 24.804059982299805,
+ "learning_rate": 5.733821733821734e-05,
+ "loss": 0.325,
+ "step": 383
+ },
+ {
+ "epoch": 1.4065934065934065,
+ "grad_norm": 20.69612693786621,
+ "learning_rate": 5.731379731379731e-05,
+ "loss": 0.1529,
+ "step": 384
+ },
+ {
+ "epoch": 1.4102564102564101,
+ "grad_norm": 29.134044647216797,
+ "learning_rate": 5.728937728937729e-05,
+ "loss": 0.8694,
+ "step": 385
+ },
+ {
+ "epoch": 1.4139194139194138,
+ "grad_norm": 37.44430923461914,
+ "learning_rate": 5.726495726495726e-05,
+ "loss": 0.9174,
+ "step": 386
+ },
+ {
+ "epoch": 1.4175824175824177,
+ "grad_norm": 36.84721755981445,
+ "learning_rate": 5.724053724053724e-05,
+ "loss": 0.3522,
+ "step": 387
+ },
+ {
+ "epoch": 1.4212454212454213,
+ "grad_norm": 44.15989685058594,
+ "learning_rate": 5.721611721611722e-05,
+ "loss": 1.4677,
+ "step": 388
+ },
+ {
+ "epoch": 1.424908424908425,
+ "grad_norm": 16.73012351989746,
+ "learning_rate": 5.719169719169719e-05,
+ "loss": 0.1621,
+ "step": 389
+ },
+ {
+ "epoch": 1.4285714285714286,
+ "grad_norm": 35.41815185546875,
+ "learning_rate": 5.716727716727717e-05,
+ "loss": 0.6702,
+ "step": 390
+ },
+ {
+ "epoch": 1.4322344322344323,
+ "grad_norm": 19.04936408996582,
+ "learning_rate": 5.714285714285714e-05,
+ "loss": 0.1845,
+ "step": 391
+ },
+ {
+ "epoch": 1.435897435897436,
+ "grad_norm": 22.89434242248535,
+ "learning_rate": 5.711843711843712e-05,
+ "loss": 0.5694,
+ "step": 392
+ },
+ {
+ "epoch": 1.4395604395604396,
+ "grad_norm": 22.125951766967773,
+ "learning_rate": 5.70940170940171e-05,
+ "loss": 0.821,
+ "step": 393
+ },
+ {
+ "epoch": 1.4432234432234432,
+ "grad_norm": 37.83376693725586,
+ "learning_rate": 5.706959706959707e-05,
+ "loss": 0.4658,
+ "step": 394
+ },
+ {
+ "epoch": 1.4468864468864469,
+ "grad_norm": 38.37764358520508,
+ "learning_rate": 5.704517704517705e-05,
+ "loss": 0.4146,
+ "step": 395
+ },
+ {
+ "epoch": 1.4505494505494505,
+ "grad_norm": 21.50092315673828,
+ "learning_rate": 5.702075702075703e-05,
+ "loss": 0.5044,
+ "step": 396
+ },
+ {
+ "epoch": 1.4542124542124542,
+ "grad_norm": 20.02173614501953,
+ "learning_rate": 5.699633699633699e-05,
+ "loss": 0.4955,
+ "step": 397
+ },
+ {
+ "epoch": 1.4578754578754578,
+ "grad_norm": 21.474336624145508,
+ "learning_rate": 5.697191697191697e-05,
+ "loss": 0.3818,
+ "step": 398
+ },
+ {
+ "epoch": 1.4615384615384617,
+ "grad_norm": 22.903839111328125,
+ "learning_rate": 5.694749694749695e-05,
+ "loss": 0.7603,
+ "step": 399
+ },
+ {
+ "epoch": 1.4652014652014653,
+ "grad_norm": 20.22893524169922,
+ "learning_rate": 5.692307692307692e-05,
+ "loss": 0.5612,
+ "step": 400
+ },
+ {
+ "epoch": 1.468864468864469,
+ "grad_norm": 32.34550857543945,
+ "learning_rate": 5.68986568986569e-05,
+ "loss": 0.4659,
+ "step": 401
+ },
+ {
+ "epoch": 1.4725274725274726,
+ "grad_norm": 49.979034423828125,
+ "learning_rate": 5.687423687423688e-05,
+ "loss": 0.6784,
+ "step": 402
+ },
+ {
+ "epoch": 1.4761904761904763,
+ "grad_norm": 79.79581451416016,
+ "learning_rate": 5.684981684981685e-05,
+ "loss": 0.9404,
+ "step": 403
+ },
+ {
+ "epoch": 1.47985347985348,
+ "grad_norm": 17.678560256958008,
+ "learning_rate": 5.682539682539683e-05,
+ "loss": 0.1675,
+ "step": 404
+ },
+ {
+ "epoch": 1.4835164835164836,
+ "grad_norm": 21.246519088745117,
+ "learning_rate": 5.6800976800976806e-05,
+ "loss": 0.2428,
+ "step": 405
+ },
+ {
+ "epoch": 1.4871794871794872,
+ "grad_norm": 34.815452575683594,
+ "learning_rate": 5.677655677655678e-05,
+ "loss": 0.3925,
+ "step": 406
+ },
+ {
+ "epoch": 1.4908424908424909,
+ "grad_norm": 73.8591079711914,
+ "learning_rate": 5.6752136752136756e-05,
+ "loss": 1.3163,
+ "step": 407
+ },
+ {
+ "epoch": 1.4945054945054945,
+ "grad_norm": 66.63922882080078,
+ "learning_rate": 5.6727716727716735e-05,
+ "loss": 0.9653,
+ "step": 408
+ },
+ {
+ "epoch": 1.4981684981684982,
+ "grad_norm": 52.39488220214844,
+ "learning_rate": 5.6703296703296706e-05,
+ "loss": 0.9322,
+ "step": 409
+ },
+ {
+ "epoch": 1.5018315018315018,
+ "grad_norm": 13.078998565673828,
+ "learning_rate": 5.667887667887668e-05,
+ "loss": 0.1168,
+ "step": 410
+ },
+ {
+ "epoch": 1.5054945054945055,
+ "grad_norm": 41.32448959350586,
+ "learning_rate": 5.6654456654456657e-05,
+ "loss": 0.9296,
+ "step": 411
+ },
+ {
+ "epoch": 1.5091575091575091,
+ "grad_norm": 26.448543548583984,
+ "learning_rate": 5.663003663003663e-05,
+ "loss": 0.5474,
+ "step": 412
+ },
+ {
+ "epoch": 1.5128205128205128,
+ "grad_norm": 29.58432960510254,
+ "learning_rate": 5.660561660561661e-05,
+ "loss": 0.6573,
+ "step": 413
+ },
+ {
+ "epoch": 1.5164835164835164,
+ "grad_norm": 28.568214416503906,
+ "learning_rate": 5.6581196581196585e-05,
+ "loss": 0.9223,
+ "step": 414
+ },
+ {
+ "epoch": 1.52014652014652,
+ "grad_norm": 31.92661476135254,
+ "learning_rate": 5.655677655677656e-05,
+ "loss": 1.0601,
+ "step": 415
+ },
+ {
+ "epoch": 1.5238095238095237,
+ "grad_norm": 31.934263229370117,
+ "learning_rate": 5.6532356532356535e-05,
+ "loss": 0.6288,
+ "step": 416
+ },
+ {
+ "epoch": 1.5274725274725274,
+ "grad_norm": 21.51350975036621,
+ "learning_rate": 5.650793650793651e-05,
+ "loss": 0.7378,
+ "step": 417
+ },
+ {
+ "epoch": 1.531135531135531,
+ "grad_norm": 19.010095596313477,
+ "learning_rate": 5.6483516483516485e-05,
+ "loss": 0.7792,
+ "step": 418
+ },
+ {
+ "epoch": 1.5347985347985347,
+ "grad_norm": 21.7001895904541,
+ "learning_rate": 5.6459096459096464e-05,
+ "loss": 0.7885,
+ "step": 419
+ },
+ {
+ "epoch": 1.5384615384615383,
+ "grad_norm": 21.400882720947266,
+ "learning_rate": 5.6434676434676436e-05,
+ "loss": 0.942,
+ "step": 420
+ },
+ {
+ "epoch": 1.542124542124542,
+ "grad_norm": 30.14664649963379,
+ "learning_rate": 5.6410256410256414e-05,
+ "loss": 0.7675,
+ "step": 421
+ },
+ {
+ "epoch": 1.5457875457875456,
+ "grad_norm": 33.25088882446289,
+ "learning_rate": 5.6385836385836386e-05,
+ "loss": 1.1349,
+ "step": 422
+ },
+ {
+ "epoch": 1.5494505494505495,
+ "grad_norm": 22.923208236694336,
+ "learning_rate": 5.636141636141636e-05,
+ "loss": 0.7145,
+ "step": 423
+ },
+ {
+ "epoch": 1.5531135531135531,
+ "grad_norm": 20.00519371032715,
+ "learning_rate": 5.6336996336996336e-05,
+ "loss": 0.5107,
+ "step": 424
+ },
+ {
+ "epoch": 1.5567765567765568,
+ "grad_norm": 21.95383071899414,
+ "learning_rate": 5.6312576312576314e-05,
+ "loss": 0.7836,
+ "step": 425
+ },
+ {
+ "epoch": 1.5604395604395604,
+ "grad_norm": 27.24031639099121,
+ "learning_rate": 5.6288156288156286e-05,
+ "loss": 0.4955,
+ "step": 426
+ },
+ {
+ "epoch": 1.564102564102564,
+ "grad_norm": 45.48428726196289,
+ "learning_rate": 5.6263736263736264e-05,
+ "loss": 1.016,
+ "step": 427
+ },
+ {
+ "epoch": 1.5677655677655677,
+ "grad_norm": 20.055965423583984,
+ "learning_rate": 5.623931623931624e-05,
+ "loss": 0.325,
+ "step": 428
+ },
+ {
+ "epoch": 1.5714285714285714,
+ "grad_norm": 22.020767211914062,
+ "learning_rate": 5.6214896214896215e-05,
+ "loss": 0.45,
+ "step": 429
+ },
+ {
+ "epoch": 1.575091575091575,
+ "grad_norm": 32.608741760253906,
+ "learning_rate": 5.619047619047619e-05,
+ "loss": 0.6561,
+ "step": 430
+ },
+ {
+ "epoch": 1.578754578754579,
+ "grad_norm": 38.14396667480469,
+ "learning_rate": 5.616605616605617e-05,
+ "loss": 0.6387,
+ "step": 431
+ },
+ {
+ "epoch": 1.5824175824175826,
+ "grad_norm": 26.266948699951172,
+ "learning_rate": 5.614163614163614e-05,
+ "loss": 0.5593,
+ "step": 432
+ },
+ {
+ "epoch": 1.5860805860805862,
+ "grad_norm": 16.37360954284668,
+ "learning_rate": 5.611721611721612e-05,
+ "loss": 0.1591,
+ "step": 433
+ },
+ {
+ "epoch": 1.5897435897435899,
+ "grad_norm": 21.9448299407959,
+ "learning_rate": 5.60927960927961e-05,
+ "loss": 0.2129,
+ "step": 434
+ },
+ {
+ "epoch": 1.5934065934065935,
+ "grad_norm": 30.096052169799805,
+ "learning_rate": 5.6068376068376065e-05,
+ "loss": 0.3384,
+ "step": 435
+ },
+ {
+ "epoch": 1.5970695970695972,
+ "grad_norm": 40.15864181518555,
+ "learning_rate": 5.604395604395604e-05,
+ "loss": 0.5181,
+ "step": 436
+ },
+ {
+ "epoch": 1.6007326007326008,
+ "grad_norm": 63.40933609008789,
+ "learning_rate": 5.601953601953602e-05,
+ "loss": 0.8834,
+ "step": 437
+ },
+ {
+ "epoch": 1.6043956043956045,
+ "grad_norm": 40.0787353515625,
+ "learning_rate": 5.5995115995115993e-05,
+ "loss": 0.437,
+ "step": 438
+ },
+ {
+ "epoch": 1.6080586080586081,
+ "grad_norm": 40.136863708496094,
+ "learning_rate": 5.597069597069597e-05,
+ "loss": 0.4834,
+ "step": 439
+ },
+ {
+ "epoch": 1.6117216117216118,
+ "grad_norm": 27.898317337036133,
+ "learning_rate": 5.594627594627595e-05,
+ "loss": 0.4862,
+ "step": 440
+ },
+ {
+ "epoch": 1.6153846153846154,
+ "grad_norm": 31.5762882232666,
+ "learning_rate": 5.592185592185592e-05,
+ "loss": 0.1878,
+ "step": 441
+ },
+ {
+ "epoch": 1.619047619047619,
+ "grad_norm": 88.90093994140625,
+ "learning_rate": 5.58974358974359e-05,
+ "loss": 1.3343,
+ "step": 442
+ },
+ {
+ "epoch": 1.6227106227106227,
+ "grad_norm": 57.7340202331543,
+ "learning_rate": 5.587301587301587e-05,
+ "loss": 0.3032,
+ "step": 443
+ },
+ {
+ "epoch": 1.6263736263736264,
+ "grad_norm": 57.28425979614258,
+ "learning_rate": 5.584859584859585e-05,
+ "loss": 1.3972,
+ "step": 444
+ },
+ {
+ "epoch": 1.63003663003663,
+ "grad_norm": 39.866302490234375,
+ "learning_rate": 5.582417582417583e-05,
+ "loss": 0.4026,
+ "step": 445
+ },
+ {
+ "epoch": 1.6336996336996337,
+ "grad_norm": 41.72932815551758,
+ "learning_rate": 5.57997557997558e-05,
+ "loss": 0.5407,
+ "step": 446
+ },
+ {
+ "epoch": 1.6373626373626373,
+ "grad_norm": 60.77634811401367,
+ "learning_rate": 5.577533577533578e-05,
+ "loss": 0.8581,
+ "step": 447
+ },
+ {
+ "epoch": 1.641025641025641,
+ "grad_norm": 28.382030487060547,
+ "learning_rate": 5.575091575091575e-05,
+ "loss": 0.3759,
+ "step": 448
+ },
+ {
+ "epoch": 1.6446886446886446,
+ "grad_norm": 62.1085205078125,
+ "learning_rate": 5.572649572649572e-05,
+ "loss": 1.0749,
+ "step": 449
+ },
+ {
+ "epoch": 1.6483516483516483,
+ "grad_norm": 41.8302001953125,
+ "learning_rate": 5.57020757020757e-05,
+ "loss": 0.5884,
+ "step": 450
+ },
+ {
+ "epoch": 1.652014652014652,
+ "grad_norm": 24.128931045532227,
+ "learning_rate": 5.567765567765568e-05,
+ "loss": 0.6113,
+ "step": 451
+ },
+ {
+ "epoch": 1.6556776556776556,
+ "grad_norm": 19.634384155273438,
+ "learning_rate": 5.565323565323565e-05,
+ "loss": 0.3902,
+ "step": 452
+ },
+ {
+ "epoch": 1.6593406593406592,
+ "grad_norm": 18.17875099182129,
+ "learning_rate": 5.562881562881563e-05,
+ "loss": 0.3137,
+ "step": 453
+ },
+ {
+ "epoch": 1.6630036630036629,
+ "grad_norm": 39.68446731567383,
+ "learning_rate": 5.560439560439561e-05,
+ "loss": 0.7587,
+ "step": 454
+ },
+ {
+ "epoch": 1.6666666666666665,
+ "grad_norm": 29.387836456298828,
+ "learning_rate": 5.557997557997558e-05,
+ "loss": 0.6397,
+ "step": 455
+ },
+ {
+ "epoch": 1.6703296703296702,
+ "grad_norm": 19.08424949645996,
+ "learning_rate": 5.555555555555556e-05,
+ "loss": 0.2484,
+ "step": 456
+ },
+ {
+ "epoch": 1.673992673992674,
+ "grad_norm": 36.07701873779297,
+ "learning_rate": 5.553113553113554e-05,
+ "loss": 0.8587,
+ "step": 457
+ },
+ {
+ "epoch": 1.6776556776556777,
+ "grad_norm": 52.062339782714844,
+ "learning_rate": 5.550671550671551e-05,
+ "loss": 1.6675,
+ "step": 458
+ },
+ {
+ "epoch": 1.6813186813186813,
+ "grad_norm": 45.415687561035156,
+ "learning_rate": 5.548229548229549e-05,
+ "loss": 1.653,
+ "step": 459
+ },
+ {
+ "epoch": 1.684981684981685,
+ "grad_norm": 31.457420349121094,
+ "learning_rate": 5.5457875457875465e-05,
+ "loss": 0.4578,
+ "step": 460
+ },
+ {
+ "epoch": 1.6886446886446886,
+ "grad_norm": 33.14665603637695,
+ "learning_rate": 5.543345543345543e-05,
+ "loss": 1.3327,
+ "step": 461
+ },
+ {
+ "epoch": 1.6923076923076923,
+ "grad_norm": 25.720529556274414,
+ "learning_rate": 5.540903540903541e-05,
+ "loss": 0.5,
+ "step": 462
+ },
+ {
+ "epoch": 1.695970695970696,
+ "grad_norm": 23.71514129638672,
+ "learning_rate": 5.538461538461539e-05,
+ "loss": 0.434,
+ "step": 463
+ },
+ {
+ "epoch": 1.6996336996336996,
+ "grad_norm": 45.231746673583984,
+ "learning_rate": 5.536019536019536e-05,
+ "loss": 0.9448,
+ "step": 464
+ },
+ {
+ "epoch": 1.7032967032967035,
+ "grad_norm": 17.44647789001465,
+ "learning_rate": 5.533577533577534e-05,
+ "loss": 0.3183,
+ "step": 465
+ },
+ {
+ "epoch": 1.7069597069597071,
+ "grad_norm": 18.627901077270508,
+ "learning_rate": 5.531135531135531e-05,
+ "loss": 0.4137,
+ "step": 466
+ },
+ {
+ "epoch": 1.7106227106227108,
+ "grad_norm": 45.57220458984375,
+ "learning_rate": 5.528693528693529e-05,
+ "loss": 1.0096,
+ "step": 467
+ },
+ {
+ "epoch": 1.7142857142857144,
+ "grad_norm": 27.329822540283203,
+ "learning_rate": 5.5262515262515266e-05,
+ "loss": 0.5416,
+ "step": 468
+ },
+ {
+ "epoch": 1.717948717948718,
+ "grad_norm": 46.70027160644531,
+ "learning_rate": 5.523809523809524e-05,
+ "loss": 0.983,
+ "step": 469
+ },
+ {
+ "epoch": 1.7216117216117217,
+ "grad_norm": 32.47868728637695,
+ "learning_rate": 5.5213675213675216e-05,
+ "loss": 1.5687,
+ "step": 470
+ },
+ {
+ "epoch": 1.7252747252747254,
+ "grad_norm": 16.49342155456543,
+ "learning_rate": 5.5189255189255194e-05,
+ "loss": 0.3101,
+ "step": 471
+ },
+ {
+ "epoch": 1.728937728937729,
+ "grad_norm": 26.58381462097168,
+ "learning_rate": 5.5164835164835166e-05,
+ "loss": 0.7027,
+ "step": 472
+ },
+ {
+ "epoch": 1.7326007326007327,
+ "grad_norm": 17.435213088989258,
+ "learning_rate": 5.5140415140415144e-05,
+ "loss": 0.3958,
+ "step": 473
+ },
+ {
+ "epoch": 1.7362637362637363,
+ "grad_norm": 19.37874412536621,
+ "learning_rate": 5.5115995115995116e-05,
+ "loss": 0.3979,
+ "step": 474
+ },
+ {
+ "epoch": 1.73992673992674,
+ "grad_norm": 16.509248733520508,
+ "learning_rate": 5.509157509157509e-05,
+ "loss": 0.5121,
+ "step": 475
+ },
+ {
+ "epoch": 1.7435897435897436,
+ "grad_norm": 9.653852462768555,
+ "learning_rate": 5.5067155067155066e-05,
+ "loss": 0.1386,
+ "step": 476
+ },
+ {
+ "epoch": 1.7472527472527473,
+ "grad_norm": 26.486963272094727,
+ "learning_rate": 5.5042735042735045e-05,
+ "loss": 1.0307,
+ "step": 477
+ },
+ {
+ "epoch": 1.750915750915751,
+ "grad_norm": 17.766828536987305,
+ "learning_rate": 5.5018315018315016e-05,
+ "loss": 0.278,
+ "step": 478
+ },
+ {
+ "epoch": 1.7545787545787546,
+ "grad_norm": 12.930633544921875,
+ "learning_rate": 5.4993894993894995e-05,
+ "loss": 0.1487,
+ "step": 479
+ },
+ {
+ "epoch": 1.7582417582417582,
+ "grad_norm": 44.64267349243164,
+ "learning_rate": 5.496947496947497e-05,
+ "loss": 0.7036,
+ "step": 480
+ },
+ {
+ "epoch": 1.7619047619047619,
+ "grad_norm": 17.474651336669922,
+ "learning_rate": 5.4945054945054945e-05,
+ "loss": 0.1666,
+ "step": 481
+ },
+ {
+ "epoch": 1.7655677655677655,
+ "grad_norm": 48.3519401550293,
+ "learning_rate": 5.4920634920634923e-05,
+ "loss": 0.6157,
+ "step": 482
+ },
+ {
+ "epoch": 1.7692307692307692,
+ "grad_norm": 18.429521560668945,
+ "learning_rate": 5.48962148962149e-05,
+ "loss": 0.2588,
+ "step": 483
+ },
+ {
+ "epoch": 1.7728937728937728,
+ "grad_norm": 66.73760986328125,
+ "learning_rate": 5.4871794871794874e-05,
+ "loss": 0.654,
+ "step": 484
+ },
+ {
+ "epoch": 1.7765567765567765,
+ "grad_norm": 53.831539154052734,
+ "learning_rate": 5.484737484737485e-05,
+ "loss": 0.7538,
+ "step": 485
+ },
+ {
+ "epoch": 1.7802197802197801,
+ "grad_norm": 52.023895263671875,
+ "learning_rate": 5.482295482295483e-05,
+ "loss": 1.6623,
+ "step": 486
+ },
+ {
+ "epoch": 1.7838827838827838,
+ "grad_norm": 38.4475212097168,
+ "learning_rate": 5.4798534798534795e-05,
+ "loss": 0.5079,
+ "step": 487
+ },
+ {
+ "epoch": 1.7875457875457874,
+ "grad_norm": 25.642650604248047,
+ "learning_rate": 5.4774114774114774e-05,
+ "loss": 0.3825,
+ "step": 488
+ },
+ {
+ "epoch": 1.791208791208791,
+ "grad_norm": 57.916900634765625,
+ "learning_rate": 5.474969474969475e-05,
+ "loss": 0.9583,
+ "step": 489
+ },
+ {
+ "epoch": 1.7948717948717947,
+ "grad_norm": 39.23340606689453,
+ "learning_rate": 5.4725274725274724e-05,
+ "loss": 0.4724,
+ "step": 490
+ },
+ {
+ "epoch": 1.7985347985347986,
+ "grad_norm": 24.188661575317383,
+ "learning_rate": 5.47008547008547e-05,
+ "loss": 0.4471,
+ "step": 491
+ },
+ {
+ "epoch": 1.8021978021978022,
+ "grad_norm": 68.73822021484375,
+ "learning_rate": 5.4676434676434674e-05,
+ "loss": 0.6618,
+ "step": 492
+ },
+ {
+ "epoch": 1.8058608058608059,
+ "grad_norm": 26.382184982299805,
+ "learning_rate": 5.465201465201465e-05,
+ "loss": 0.5835,
+ "step": 493
+ },
+ {
+ "epoch": 1.8095238095238095,
+ "grad_norm": 31.758886337280273,
+ "learning_rate": 5.462759462759463e-05,
+ "loss": 0.622,
+ "step": 494
+ },
+ {
+ "epoch": 1.8131868131868132,
+ "grad_norm": 26.657405853271484,
+ "learning_rate": 5.46031746031746e-05,
+ "loss": 0.6003,
+ "step": 495
+ },
+ {
+ "epoch": 1.8168498168498168,
+ "grad_norm": 31.248491287231445,
+ "learning_rate": 5.457875457875458e-05,
+ "loss": 0.4929,
+ "step": 496
+ },
+ {
+ "epoch": 1.8205128205128205,
+ "grad_norm": 53.82766342163086,
+ "learning_rate": 5.455433455433456e-05,
+ "loss": 2.0716,
+ "step": 497
+ },
+ {
+ "epoch": 1.8241758241758241,
+ "grad_norm": 46.39777374267578,
+ "learning_rate": 5.452991452991453e-05,
+ "loss": 1.6767,
+ "step": 498
+ },
+ {
+ "epoch": 1.8278388278388278,
+ "grad_norm": 39.58620071411133,
+ "learning_rate": 5.450549450549451e-05,
+ "loss": 0.8274,
+ "step": 499
+ },
+ {
+ "epoch": 1.8315018315018317,
+ "grad_norm": 29.395286560058594,
+ "learning_rate": 5.448107448107448e-05,
+ "loss": 1.1441,
+ "step": 500
+ },
+ {
+ "epoch": 1.8351648351648353,
+ "grad_norm": 26.250751495361328,
+ "learning_rate": 5.445665445665445e-05,
+ "loss": 0.7496,
+ "step": 501
+ },
+ {
+ "epoch": 1.838827838827839,
+ "grad_norm": 19.820999145507812,
+ "learning_rate": 5.443223443223443e-05,
+ "loss": 0.4367,
+ "step": 502
+ },
+ {
+ "epoch": 1.8424908424908426,
+ "grad_norm": 25.09316062927246,
+ "learning_rate": 5.440781440781441e-05,
+ "loss": 0.8584,
+ "step": 503
+ },
+ {
+ "epoch": 1.8461538461538463,
+ "grad_norm": 17.808509826660156,
+ "learning_rate": 5.438339438339438e-05,
+ "loss": 0.3869,
+ "step": 504
+ },
+ {
+ "epoch": 1.84981684981685,
+ "grad_norm": 28.342119216918945,
+ "learning_rate": 5.435897435897436e-05,
+ "loss": 0.8881,
+ "step": 505
+ },
+ {
+ "epoch": 1.8534798534798536,
+ "grad_norm": 33.80287551879883,
+ "learning_rate": 5.433455433455434e-05,
+ "loss": 1.2911,
+ "step": 506
+ },
+ {
+ "epoch": 1.8571428571428572,
+ "grad_norm": 55.428138732910156,
+ "learning_rate": 5.431013431013431e-05,
+ "loss": 0.8934,
+ "step": 507
+ },
+ {
+ "epoch": 1.8608058608058609,
+ "grad_norm": 27.962610244750977,
+ "learning_rate": 5.428571428571429e-05,
+ "loss": 0.662,
+ "step": 508
+ },
+ {
+ "epoch": 1.8644688644688645,
+ "grad_norm": 62.84252166748047,
+ "learning_rate": 5.426129426129427e-05,
+ "loss": 1.9216,
+ "step": 509
+ },
+ {
+ "epoch": 1.8681318681318682,
+ "grad_norm": 24.26439666748047,
+ "learning_rate": 5.423687423687424e-05,
+ "loss": 0.2164,
+ "step": 510
+ },
+ {
+ "epoch": 1.8717948717948718,
+ "grad_norm": 50.95674133300781,
+ "learning_rate": 5.421245421245422e-05,
+ "loss": 0.7023,
+ "step": 511
+ },
+ {
+ "epoch": 1.8754578754578755,
+ "grad_norm": 41.17847442626953,
+ "learning_rate": 5.418803418803419e-05,
+ "loss": 1.1081,
+ "step": 512
+ },
+ {
+ "epoch": 1.879120879120879,
+ "grad_norm": 28.701988220214844,
+ "learning_rate": 5.416361416361416e-05,
+ "loss": 0.6519,
+ "step": 513
+ },
+ {
+ "epoch": 1.8827838827838828,
+ "grad_norm": 48.42552947998047,
+ "learning_rate": 5.413919413919414e-05,
+ "loss": 1.5215,
+ "step": 514
+ },
+ {
+ "epoch": 1.8864468864468864,
+ "grad_norm": 19.71268653869629,
+ "learning_rate": 5.411477411477412e-05,
+ "loss": 0.4731,
+ "step": 515
+ },
+ {
+ "epoch": 1.89010989010989,
+ "grad_norm": 68.88224792480469,
+ "learning_rate": 5.409035409035409e-05,
+ "loss": 3.0071,
+ "step": 516
+ },
+ {
+ "epoch": 1.8937728937728937,
+ "grad_norm": 34.33188247680664,
+ "learning_rate": 5.406593406593407e-05,
+ "loss": 0.7014,
+ "step": 517
+ },
+ {
+ "epoch": 1.8974358974358974,
+ "grad_norm": 18.214942932128906,
+ "learning_rate": 5.404151404151404e-05,
+ "loss": 0.2362,
+ "step": 518
+ },
+ {
+ "epoch": 1.901098901098901,
+ "grad_norm": 31.553678512573242,
+ "learning_rate": 5.401709401709402e-05,
+ "loss": 0.5839,
+ "step": 519
+ },
+ {
+ "epoch": 1.9047619047619047,
+ "grad_norm": 15.681426048278809,
+ "learning_rate": 5.3992673992673996e-05,
+ "loss": 0.6039,
+ "step": 520
+ },
+ {
+ "epoch": 1.9084249084249083,
+ "grad_norm": 18.462688446044922,
+ "learning_rate": 5.396825396825397e-05,
+ "loss": 0.5773,
+ "step": 521
+ },
+ {
+ "epoch": 1.912087912087912,
+ "grad_norm": 10.23849105834961,
+ "learning_rate": 5.3943833943833946e-05,
+ "loss": 0.3801,
+ "step": 522
+ },
+ {
+ "epoch": 1.9157509157509156,
+ "grad_norm": 35.680973052978516,
+ "learning_rate": 5.3919413919413925e-05,
+ "loss": 1.2559,
+ "step": 523
+ },
+ {
+ "epoch": 1.9194139194139193,
+ "grad_norm": 23.97362518310547,
+ "learning_rate": 5.3894993894993897e-05,
+ "loss": 0.4112,
+ "step": 524
+ },
+ {
+ "epoch": 1.9230769230769231,
+ "grad_norm": 25.785356521606445,
+ "learning_rate": 5.387057387057387e-05,
+ "loss": 0.8993,
+ "step": 525
+ },
+ {
+ "epoch": 1.9267399267399268,
+ "grad_norm": 25.246868133544922,
+ "learning_rate": 5.384615384615385e-05,
+ "loss": 0.6534,
+ "step": 526
+ },
+ {
+ "epoch": 1.9304029304029304,
+ "grad_norm": 29.850788116455078,
+ "learning_rate": 5.382173382173382e-05,
+ "loss": 0.52,
+ "step": 527
+ },
+ {
+ "epoch": 1.934065934065934,
+ "grad_norm": 20.702608108520508,
+ "learning_rate": 5.37973137973138e-05,
+ "loss": 0.4093,
+ "step": 528
+ },
+ {
+ "epoch": 1.9377289377289377,
+ "grad_norm": 36.39994812011719,
+ "learning_rate": 5.3772893772893775e-05,
+ "loss": 1.275,
+ "step": 529
+ },
+ {
+ "epoch": 1.9413919413919414,
+ "grad_norm": 27.56822395324707,
+ "learning_rate": 5.374847374847375e-05,
+ "loss": 0.6773,
+ "step": 530
+ },
+ {
+ "epoch": 1.945054945054945,
+ "grad_norm": 26.07769012451172,
+ "learning_rate": 5.3724053724053725e-05,
+ "loss": 0.5373,
+ "step": 531
+ },
+ {
+ "epoch": 1.9487179487179487,
+ "grad_norm": 48.47615051269531,
+ "learning_rate": 5.3699633699633704e-05,
+ "loss": 1.1931,
+ "step": 532
+ },
+ {
+ "epoch": 1.9523809523809523,
+ "grad_norm": 24.416805267333984,
+ "learning_rate": 5.3675213675213675e-05,
+ "loss": 0.4523,
+ "step": 533
+ },
+ {
+ "epoch": 1.9560439560439562,
+ "grad_norm": 56.8088264465332,
+ "learning_rate": 5.3650793650793654e-05,
+ "loss": 1.8992,
+ "step": 534
+ },
+ {
+ "epoch": 1.9597069597069599,
+ "grad_norm": 36.805912017822266,
+ "learning_rate": 5.362637362637363e-05,
+ "loss": 1.0743,
+ "step": 535
+ },
+ {
+ "epoch": 1.9633699633699635,
+ "grad_norm": 17.375244140625,
+ "learning_rate": 5.3601953601953604e-05,
+ "loss": 0.3546,
+ "step": 536
+ },
+ {
+ "epoch": 1.9670329670329672,
+ "grad_norm": 35.297767639160156,
+ "learning_rate": 5.357753357753358e-05,
+ "loss": 1.4903,
+ "step": 537
+ },
+ {
+ "epoch": 1.9706959706959708,
+ "grad_norm": 38.64927673339844,
+ "learning_rate": 5.3553113553113554e-05,
+ "loss": 0.9346,
+ "step": 538
+ },
+ {
+ "epoch": 1.9743589743589745,
+ "grad_norm": 23.494552612304688,
+ "learning_rate": 5.3528693528693526e-05,
+ "loss": 0.3677,
+ "step": 539
+ },
+ {
+ "epoch": 1.978021978021978,
+ "grad_norm": 21.8272647857666,
+ "learning_rate": 5.3504273504273504e-05,
+ "loss": 0.591,
+ "step": 540
+ },
+ {
+ "epoch": 1.9816849816849818,
+ "grad_norm": 15.60590934753418,
+ "learning_rate": 5.347985347985348e-05,
+ "loss": 0.3129,
+ "step": 541
+ },
+ {
+ "epoch": 1.9853479853479854,
+ "grad_norm": 23.846555709838867,
+ "learning_rate": 5.3455433455433454e-05,
+ "loss": 0.6108,
+ "step": 542
+ },
+ {
+ "epoch": 1.989010989010989,
+ "grad_norm": 21.743024826049805,
+ "learning_rate": 5.343101343101343e-05,
+ "loss": 1.0541,
+ "step": 543
+ },
+ {
+ "epoch": 1.9926739926739927,
+ "grad_norm": 29.806121826171875,
+ "learning_rate": 5.3406593406593405e-05,
+ "loss": 0.6088,
+ "step": 544
+ },
+ {
+ "epoch": 1.9963369963369964,
+ "grad_norm": 26.778568267822266,
+ "learning_rate": 5.338217338217338e-05,
+ "loss": 0.5842,
+ "step": 545
+ },
+ {
+ "epoch": 2.0,
+ "grad_norm": 23.356237411499023,
+ "learning_rate": 5.335775335775336e-05,
+ "loss": 0.4591,
+ "step": 546
+ },
+ {
+ "epoch": 2.0036630036630036,
+ "grad_norm": 17.303443908691406,
+ "learning_rate": 5.333333333333333e-05,
+ "loss": 0.3432,
+ "step": 547
+ },
+ {
+ "epoch": 2.0073260073260073,
+ "grad_norm": 27.082172393798828,
+ "learning_rate": 5.330891330891331e-05,
+ "loss": 0.5156,
+ "step": 548
+ },
+ {
+ "epoch": 2.010989010989011,
+ "grad_norm": 26.520530700683594,
+ "learning_rate": 5.328449328449329e-05,
+ "loss": 0.3989,
+ "step": 549
+ },
+ {
+ "epoch": 2.0146520146520146,
+ "grad_norm": 23.737272262573242,
+ "learning_rate": 5.326007326007326e-05,
+ "loss": 0.5484,
+ "step": 550
+ },
+ {
+ "epoch": 2.0183150183150182,
+ "grad_norm": 24.222341537475586,
+ "learning_rate": 5.3235653235653233e-05,
+ "loss": 0.5365,
+ "step": 551
+ },
+ {
+ "epoch": 2.021978021978022,
+ "grad_norm": 29.081924438476562,
+ "learning_rate": 5.321123321123321e-05,
+ "loss": 0.6694,
+ "step": 552
+ },
+ {
+ "epoch": 2.0256410256410255,
+ "grad_norm": 32.419551849365234,
+ "learning_rate": 5.3186813186813184e-05,
+ "loss": 0.7003,
+ "step": 553
+ },
+ {
+ "epoch": 2.029304029304029,
+ "grad_norm": 42.403709411621094,
+ "learning_rate": 5.316239316239316e-05,
+ "loss": 1.5474,
+ "step": 554
+ },
+ {
+ "epoch": 2.032967032967033,
+ "grad_norm": 17.615140914916992,
+ "learning_rate": 5.313797313797314e-05,
+ "loss": 0.588,
+ "step": 555
+ },
+ {
+ "epoch": 2.0366300366300365,
+ "grad_norm": 14.864067077636719,
+ "learning_rate": 5.311355311355311e-05,
+ "loss": 0.1613,
+ "step": 556
+ },
+ {
+ "epoch": 2.04029304029304,
+ "grad_norm": 20.189815521240234,
+ "learning_rate": 5.308913308913309e-05,
+ "loss": 0.4281,
+ "step": 557
+ },
+ {
+ "epoch": 2.043956043956044,
+ "grad_norm": 28.350017547607422,
+ "learning_rate": 5.306471306471307e-05,
+ "loss": 0.6614,
+ "step": 558
+ },
+ {
+ "epoch": 2.0476190476190474,
+ "grad_norm": 19.987825393676758,
+ "learning_rate": 5.304029304029304e-05,
+ "loss": 0.6906,
+ "step": 559
+ },
+ {
+ "epoch": 2.051282051282051,
+ "grad_norm": 18.6667537689209,
+ "learning_rate": 5.301587301587302e-05,
+ "loss": 0.387,
+ "step": 560
+ },
+ {
+ "epoch": 2.0549450549450547,
+ "grad_norm": 20.930652618408203,
+ "learning_rate": 5.2991452991453e-05,
+ "loss": 0.7157,
+ "step": 561
+ },
+ {
+ "epoch": 2.0586080586080584,
+ "grad_norm": 22.05647087097168,
+ "learning_rate": 5.296703296703297e-05,
+ "loss": 0.3256,
+ "step": 562
+ },
+ {
+ "epoch": 2.062271062271062,
+ "grad_norm": 32.66161346435547,
+ "learning_rate": 5.294261294261295e-05,
+ "loss": 1.3013,
+ "step": 563
+ },
+ {
+ "epoch": 2.065934065934066,
+ "grad_norm": 37.43238067626953,
+ "learning_rate": 5.291819291819292e-05,
+ "loss": 0.186,
+ "step": 564
+ },
+ {
+ "epoch": 2.06959706959707,
+ "grad_norm": 32.39999008178711,
+ "learning_rate": 5.289377289377289e-05,
+ "loss": 0.8047,
+ "step": 565
+ },
+ {
+ "epoch": 2.0732600732600734,
+ "grad_norm": 29.727481842041016,
+ "learning_rate": 5.286935286935287e-05,
+ "loss": 0.662,
+ "step": 566
+ },
+ {
+ "epoch": 2.076923076923077,
+ "grad_norm": 16.536264419555664,
+ "learning_rate": 5.284493284493285e-05,
+ "loss": 0.4,
+ "step": 567
+ },
+ {
+ "epoch": 2.0805860805860807,
+ "grad_norm": 23.41500473022461,
+ "learning_rate": 5.282051282051282e-05,
+ "loss": 0.4945,
+ "step": 568
+ },
+ {
+ "epoch": 2.0842490842490844,
+ "grad_norm": 48.842864990234375,
+ "learning_rate": 5.27960927960928e-05,
+ "loss": 0.7584,
+ "step": 569
+ },
+ {
+ "epoch": 2.087912087912088,
+ "grad_norm": 60.06027603149414,
+ "learning_rate": 5.277167277167277e-05,
+ "loss": 0.7179,
+ "step": 570
+ },
+ {
+ "epoch": 2.0915750915750917,
+ "grad_norm": 59.2591552734375,
+ "learning_rate": 5.274725274725275e-05,
+ "loss": 0.4883,
+ "step": 571
+ },
+ {
+ "epoch": 2.0952380952380953,
+ "grad_norm": 14.527932167053223,
+ "learning_rate": 5.272283272283273e-05,
+ "loss": 0.2811,
+ "step": 572
+ },
+ {
+ "epoch": 2.098901098901099,
+ "grad_norm": 16.2915096282959,
+ "learning_rate": 5.26984126984127e-05,
+ "loss": 0.2524,
+ "step": 573
+ },
+ {
+ "epoch": 2.1025641025641026,
+ "grad_norm": 28.938081741333008,
+ "learning_rate": 5.267399267399268e-05,
+ "loss": 0.5138,
+ "step": 574
+ },
+ {
+ "epoch": 2.1062271062271063,
+ "grad_norm": 27.541440963745117,
+ "learning_rate": 5.2649572649572655e-05,
+ "loss": 0.278,
+ "step": 575
+ },
+ {
+ "epoch": 2.10989010989011,
+ "grad_norm": 23.179025650024414,
+ "learning_rate": 5.262515262515263e-05,
+ "loss": 0.1881,
+ "step": 576
+ },
+ {
+ "epoch": 2.1135531135531136,
+ "grad_norm": 42.55375671386719,
+ "learning_rate": 5.26007326007326e-05,
+ "loss": 0.7882,
+ "step": 577
+ },
+ {
+ "epoch": 2.1172161172161172,
+ "grad_norm": 8.902749061584473,
+ "learning_rate": 5.257631257631258e-05,
+ "loss": 0.0611,
+ "step": 578
+ },
+ {
+ "epoch": 2.120879120879121,
+ "grad_norm": 19.483346939086914,
+ "learning_rate": 5.255189255189255e-05,
+ "loss": 0.0978,
+ "step": 579
+ },
+ {
+ "epoch": 2.1245421245421245,
+ "grad_norm": 13.898221969604492,
+ "learning_rate": 5.252747252747253e-05,
+ "loss": 0.0797,
+ "step": 580
+ },
+ {
+ "epoch": 2.128205128205128,
+ "grad_norm": 53.42538833618164,
+ "learning_rate": 5.2503052503052506e-05,
+ "loss": 0.9066,
+ "step": 581
+ },
+ {
+ "epoch": 2.131868131868132,
+ "grad_norm": 38.467891693115234,
+ "learning_rate": 5.247863247863248e-05,
+ "loss": 0.3272,
+ "step": 582
+ },
+ {
+ "epoch": 2.1355311355311355,
+ "grad_norm": 26.421035766601562,
+ "learning_rate": 5.2454212454212456e-05,
+ "loss": 0.6537,
+ "step": 583
+ },
+ {
+ "epoch": 2.139194139194139,
+ "grad_norm": 32.80412292480469,
+ "learning_rate": 5.2429792429792434e-05,
+ "loss": 1.1225,
+ "step": 584
+ },
+ {
+ "epoch": 2.142857142857143,
+ "grad_norm": 26.87016487121582,
+ "learning_rate": 5.2405372405372406e-05,
+ "loss": 0.5749,
+ "step": 585
+ },
+ {
+ "epoch": 2.1465201465201464,
+ "grad_norm": 34.75699234008789,
+ "learning_rate": 5.2380952380952384e-05,
+ "loss": 0.6926,
+ "step": 586
+ },
+ {
+ "epoch": 2.15018315018315,
+ "grad_norm": 61.76310348510742,
+ "learning_rate": 5.235653235653236e-05,
+ "loss": 0.9029,
+ "step": 587
+ },
+ {
+ "epoch": 2.1538461538461537,
+ "grad_norm": 40.86505126953125,
+ "learning_rate": 5.2332112332112335e-05,
+ "loss": 0.5169,
+ "step": 588
+ },
+ {
+ "epoch": 2.1575091575091574,
+ "grad_norm": 16.05042839050293,
+ "learning_rate": 5.230769230769231e-05,
+ "loss": 0.5211,
+ "step": 589
+ },
+ {
+ "epoch": 2.161172161172161,
+ "grad_norm": 19.56302261352539,
+ "learning_rate": 5.2283272283272285e-05,
+ "loss": 0.5737,
+ "step": 590
+ },
+ {
+ "epoch": 2.1648351648351647,
+ "grad_norm": 22.311508178710938,
+ "learning_rate": 5.2258852258852256e-05,
+ "loss": 0.4223,
+ "step": 591
+ },
+ {
+ "epoch": 2.1684981684981683,
+ "grad_norm": 21.059213638305664,
+ "learning_rate": 5.2234432234432235e-05,
+ "loss": 0.2285,
+ "step": 592
+ },
+ {
+ "epoch": 2.172161172161172,
+ "grad_norm": 28.82351303100586,
+ "learning_rate": 5.221001221001221e-05,
+ "loss": 0.8438,
+ "step": 593
+ },
+ {
+ "epoch": 2.1758241758241756,
+ "grad_norm": 14.425333023071289,
+ "learning_rate": 5.2185592185592185e-05,
+ "loss": 0.1765,
+ "step": 594
+ },
+ {
+ "epoch": 2.1794871794871793,
+ "grad_norm": 16.967479705810547,
+ "learning_rate": 5.2161172161172163e-05,
+ "loss": 0.2465,
+ "step": 595
+ },
+ {
+ "epoch": 2.183150183150183,
+ "grad_norm": 40.79065704345703,
+ "learning_rate": 5.2136752136752135e-05,
+ "loss": 0.6077,
+ "step": 596
+ },
+ {
+ "epoch": 2.186813186813187,
+ "grad_norm": 22.434715270996094,
+ "learning_rate": 5.2112332112332114e-05,
+ "loss": 0.3748,
+ "step": 597
+ },
+ {
+ "epoch": 2.1904761904761907,
+ "grad_norm": 32.18471908569336,
+ "learning_rate": 5.208791208791209e-05,
+ "loss": 0.5163,
+ "step": 598
+ },
+ {
+ "epoch": 2.1941391941391943,
+ "grad_norm": 20.43740463256836,
+ "learning_rate": 5.2063492063492064e-05,
+ "loss": 0.4116,
+ "step": 599
+ },
+ {
+ "epoch": 2.197802197802198,
+ "grad_norm": 6.528069496154785,
+ "learning_rate": 5.203907203907204e-05,
+ "loss": 0.065,
+ "step": 600
+ },
+ {
+ "epoch": 2.2014652014652016,
+ "grad_norm": 35.0635871887207,
+ "learning_rate": 5.201465201465202e-05,
+ "loss": 1.2288,
+ "step": 601
+ },
+ {
+ "epoch": 2.2051282051282053,
+ "grad_norm": 23.499767303466797,
+ "learning_rate": 5.199023199023199e-05,
+ "loss": 0.49,
+ "step": 602
+ },
+ {
+ "epoch": 2.208791208791209,
+ "grad_norm": 20.234952926635742,
+ "learning_rate": 5.1965811965811964e-05,
+ "loss": 0.231,
+ "step": 603
+ },
+ {
+ "epoch": 2.2124542124542126,
+ "grad_norm": 9.268828392028809,
+ "learning_rate": 5.194139194139194e-05,
+ "loss": 0.0732,
+ "step": 604
+ },
+ {
+ "epoch": 2.2161172161172162,
+ "grad_norm": 52.60474395751953,
+ "learning_rate": 5.1916971916971914e-05,
+ "loss": 0.8766,
+ "step": 605
+ },
+ {
+ "epoch": 2.21978021978022,
+ "grad_norm": 41.86642074584961,
+ "learning_rate": 5.189255189255189e-05,
+ "loss": 0.4743,
+ "step": 606
+ },
+ {
+ "epoch": 2.2234432234432235,
+ "grad_norm": 30.304580688476562,
+ "learning_rate": 5.186813186813187e-05,
+ "loss": 0.4412,
+ "step": 607
+ },
+ {
+ "epoch": 2.227106227106227,
+ "grad_norm": 27.26057243347168,
+ "learning_rate": 5.184371184371184e-05,
+ "loss": 0.3496,
+ "step": 608
+ },
+ {
+ "epoch": 2.230769230769231,
+ "grad_norm": 40.55131149291992,
+ "learning_rate": 5.181929181929182e-05,
+ "loss": 0.7097,
+ "step": 609
+ },
+ {
+ "epoch": 2.2344322344322345,
+ "grad_norm": 61.97871017456055,
+ "learning_rate": 5.17948717948718e-05,
+ "loss": 1.3686,
+ "step": 610
+ },
+ {
+ "epoch": 2.238095238095238,
+ "grad_norm": 38.211700439453125,
+ "learning_rate": 5.177045177045177e-05,
+ "loss": 0.565,
+ "step": 611
+ },
+ {
+ "epoch": 2.241758241758242,
+ "grad_norm": 20.10716438293457,
+ "learning_rate": 5.174603174603175e-05,
+ "loss": 0.3468,
+ "step": 612
+ },
+ {
+ "epoch": 2.2454212454212454,
+ "grad_norm": 23.96891975402832,
+ "learning_rate": 5.172161172161173e-05,
+ "loss": 0.2295,
+ "step": 613
+ },
+ {
+ "epoch": 2.249084249084249,
+ "grad_norm": 10.14421272277832,
+ "learning_rate": 5.16971916971917e-05,
+ "loss": 0.0943,
+ "step": 614
+ },
+ {
+ "epoch": 2.2527472527472527,
+ "grad_norm": 15.786056518554688,
+ "learning_rate": 5.167277167277167e-05,
+ "loss": 0.1213,
+ "step": 615
+ },
+ {
+ "epoch": 2.2564102564102564,
+ "grad_norm": 20.907663345336914,
+ "learning_rate": 5.164835164835165e-05,
+ "loss": 0.235,
+ "step": 616
+ },
+ {
+ "epoch": 2.26007326007326,
+ "grad_norm": 32.149600982666016,
+ "learning_rate": 5.162393162393162e-05,
+ "loss": 0.4807,
+ "step": 617
+ },
+ {
+ "epoch": 2.2637362637362637,
+ "grad_norm": 33.965518951416016,
+ "learning_rate": 5.15995115995116e-05,
+ "loss": 0.4517,
+ "step": 618
+ },
+ {
+ "epoch": 2.2673992673992673,
+ "grad_norm": 49.98363494873047,
+ "learning_rate": 5.157509157509158e-05,
+ "loss": 0.6434,
+ "step": 619
+ },
+ {
+ "epoch": 2.271062271062271,
+ "grad_norm": 14.035831451416016,
+ "learning_rate": 5.155067155067155e-05,
+ "loss": 0.1117,
+ "step": 620
+ },
+ {
+ "epoch": 2.2747252747252746,
+ "grad_norm": 28.84484100341797,
+ "learning_rate": 5.152625152625153e-05,
+ "loss": 0.8002,
+ "step": 621
+ },
+ {
+ "epoch": 2.2783882783882783,
+ "grad_norm": 41.59181594848633,
+ "learning_rate": 5.15018315018315e-05,
+ "loss": 0.4465,
+ "step": 622
+ },
+ {
+ "epoch": 2.282051282051282,
+ "grad_norm": 33.10573196411133,
+ "learning_rate": 5.147741147741148e-05,
+ "loss": 0.5795,
+ "step": 623
+ },
+ {
+ "epoch": 2.2857142857142856,
+ "grad_norm": 34.79928970336914,
+ "learning_rate": 5.145299145299146e-05,
+ "loss": 0.3135,
+ "step": 624
+ },
+ {
+ "epoch": 2.2893772893772892,
+ "grad_norm": 18.095544815063477,
+ "learning_rate": 5.142857142857143e-05,
+ "loss": 0.0961,
+ "step": 625
+ },
+ {
+ "epoch": 2.293040293040293,
+ "grad_norm": 16.55453872680664,
+ "learning_rate": 5.140415140415141e-05,
+ "loss": 0.0868,
+ "step": 626
+ },
+ {
+ "epoch": 2.2967032967032965,
+ "grad_norm": 42.18946075439453,
+ "learning_rate": 5.1379731379731386e-05,
+ "loss": 0.8892,
+ "step": 627
+ },
+ {
+ "epoch": 2.3003663003663,
+ "grad_norm": 54.753448486328125,
+ "learning_rate": 5.135531135531135e-05,
+ "loss": 0.833,
+ "step": 628
+ },
+ {
+ "epoch": 2.304029304029304,
+ "grad_norm": 27.723228454589844,
+ "learning_rate": 5.133089133089133e-05,
+ "loss": 0.2744,
+ "step": 629
+ },
+ {
+ "epoch": 2.3076923076923075,
+ "grad_norm": 28.53034019470215,
+ "learning_rate": 5.130647130647131e-05,
+ "loss": 0.1696,
+ "step": 630
+ },
+ {
+ "epoch": 2.311355311355311,
+ "grad_norm": 65.4127426147461,
+ "learning_rate": 5.128205128205128e-05,
+ "loss": 0.9019,
+ "step": 631
+ },
+ {
+ "epoch": 2.315018315018315,
+ "grad_norm": 22.794870376586914,
+ "learning_rate": 5.125763125763126e-05,
+ "loss": 0.1987,
+ "step": 632
+ },
+ {
+ "epoch": 2.3186813186813184,
+ "grad_norm": 29.870113372802734,
+ "learning_rate": 5.1233211233211236e-05,
+ "loss": 0.4816,
+ "step": 633
+ },
+ {
+ "epoch": 2.3223443223443225,
+ "grad_norm": 38.91164779663086,
+ "learning_rate": 5.120879120879121e-05,
+ "loss": 0.7424,
+ "step": 634
+ },
+ {
+ "epoch": 2.326007326007326,
+ "grad_norm": 36.57811737060547,
+ "learning_rate": 5.1184371184371186e-05,
+ "loss": 1.1365,
+ "step": 635
+ },
+ {
+ "epoch": 2.32967032967033,
+ "grad_norm": 31.59128189086914,
+ "learning_rate": 5.1159951159951165e-05,
+ "loss": 0.6167,
+ "step": 636
+ },
+ {
+ "epoch": 2.3333333333333335,
+ "grad_norm": 25.956003189086914,
+ "learning_rate": 5.1135531135531136e-05,
+ "loss": 0.8808,
+ "step": 637
+ },
+ {
+ "epoch": 2.336996336996337,
+ "grad_norm": 38.18582534790039,
+ "learning_rate": 5.1111111111111115e-05,
+ "loss": 0.9417,
+ "step": 638
+ },
+ {
+ "epoch": 2.340659340659341,
+ "grad_norm": 27.436229705810547,
+ "learning_rate": 5.108669108669109e-05,
+ "loss": 0.7539,
+ "step": 639
+ },
+ {
+ "epoch": 2.3443223443223444,
+ "grad_norm": 40.86305618286133,
+ "learning_rate": 5.1062271062271065e-05,
+ "loss": 2.126,
+ "step": 640
+ },
+ {
+ "epoch": 2.347985347985348,
+ "grad_norm": 22.224748611450195,
+ "learning_rate": 5.103785103785104e-05,
+ "loss": 0.9958,
+ "step": 641
+ },
+ {
+ "epoch": 2.3516483516483517,
+ "grad_norm": 19.915552139282227,
+ "learning_rate": 5.1013431013431015e-05,
+ "loss": 1.1045,
+ "step": 642
+ },
+ {
+ "epoch": 2.3553113553113554,
+ "grad_norm": 17.045989990234375,
+ "learning_rate": 5.098901098901099e-05,
+ "loss": 0.8906,
+ "step": 643
+ },
+ {
+ "epoch": 2.358974358974359,
+ "grad_norm": 22.106670379638672,
+ "learning_rate": 5.0964590964590965e-05,
+ "loss": 0.9856,
+ "step": 644
+ },
+ {
+ "epoch": 2.3626373626373627,
+ "grad_norm": 17.583837509155273,
+ "learning_rate": 5.0940170940170944e-05,
+ "loss": 0.8328,
+ "step": 645
+ },
+ {
+ "epoch": 2.3663003663003663,
+ "grad_norm": 57.61167526245117,
+ "learning_rate": 5.0915750915750915e-05,
+ "loss": 0.578,
+ "step": 646
+ },
+ {
+ "epoch": 2.36996336996337,
+ "grad_norm": 13.941128730773926,
+ "learning_rate": 5.0891330891330894e-05,
+ "loss": 0.5892,
+ "step": 647
+ },
+ {
+ "epoch": 2.3736263736263736,
+ "grad_norm": 22.38715171813965,
+ "learning_rate": 5.0866910866910866e-05,
+ "loss": 0.7608,
+ "step": 648
+ },
+ {
+ "epoch": 2.3772893772893773,
+ "grad_norm": 22.42316436767578,
+ "learning_rate": 5.0842490842490844e-05,
+ "loss": 0.7923,
+ "step": 649
+ },
+ {
+ "epoch": 2.380952380952381,
+ "grad_norm": 32.75740432739258,
+ "learning_rate": 5.081807081807082e-05,
+ "loss": 1.0798,
+ "step": 650
+ },
+ {
+ "epoch": 2.3846153846153846,
+ "grad_norm": 19.295289993286133,
+ "learning_rate": 5.0793650793650794e-05,
+ "loss": 0.4898,
+ "step": 651
+ },
+ {
+ "epoch": 2.3882783882783882,
+ "grad_norm": 25.849227905273438,
+ "learning_rate": 5.076923076923077e-05,
+ "loss": 0.5557,
+ "step": 652
+ },
+ {
+ "epoch": 2.391941391941392,
+ "grad_norm": 21.321088790893555,
+ "learning_rate": 5.074481074481075e-05,
+ "loss": 0.2743,
+ "step": 653
+ },
+ {
+ "epoch": 2.3956043956043955,
+ "grad_norm": 28.795917510986328,
+ "learning_rate": 5.0720390720390716e-05,
+ "loss": 0.7039,
+ "step": 654
+ },
+ {
+ "epoch": 2.399267399267399,
+ "grad_norm": 19.86751937866211,
+ "learning_rate": 5.0695970695970694e-05,
+ "loss": 0.3155,
+ "step": 655
+ },
+ {
+ "epoch": 2.402930402930403,
+ "grad_norm": 33.3828010559082,
+ "learning_rate": 5.067155067155067e-05,
+ "loss": 1.0696,
+ "step": 656
+ },
+ {
+ "epoch": 2.4065934065934065,
+ "grad_norm": 37.38752746582031,
+ "learning_rate": 5.0647130647130645e-05,
+ "loss": 0.8123,
+ "step": 657
+ },
+ {
+ "epoch": 2.41025641025641,
+ "grad_norm": 29.22795867919922,
+ "learning_rate": 5.062271062271062e-05,
+ "loss": 0.9515,
+ "step": 658
+ },
+ {
+ "epoch": 2.413919413919414,
+ "grad_norm": 41.129981994628906,
+ "learning_rate": 5.05982905982906e-05,
+ "loss": 1.1329,
+ "step": 659
+ },
+ {
+ "epoch": 2.4175824175824174,
+ "grad_norm": 40.985042572021484,
+ "learning_rate": 5.057387057387057e-05,
+ "loss": 0.675,
+ "step": 660
+ },
+ {
+ "epoch": 2.421245421245421,
+ "grad_norm": 33.49393844604492,
+ "learning_rate": 5.054945054945055e-05,
+ "loss": 0.9679,
+ "step": 661
+ },
+ {
+ "epoch": 2.4249084249084247,
+ "grad_norm": 28.741533279418945,
+ "learning_rate": 5.052503052503053e-05,
+ "loss": 0.7928,
+ "step": 662
+ },
+ {
+ "epoch": 2.4285714285714284,
+ "grad_norm": 28.89700698852539,
+ "learning_rate": 5.05006105006105e-05,
+ "loss": 0.7594,
+ "step": 663
+ },
+ {
+ "epoch": 2.4322344322344325,
+ "grad_norm": 4.59797477722168,
+ "learning_rate": 5.047619047619048e-05,
+ "loss": 0.0584,
+ "step": 664
+ },
+ {
+ "epoch": 2.435897435897436,
+ "grad_norm": 29.852828979492188,
+ "learning_rate": 5.045177045177046e-05,
+ "loss": 0.614,
+ "step": 665
+ },
+ {
+ "epoch": 2.4395604395604398,
+ "grad_norm": 15.132670402526855,
+ "learning_rate": 5.042735042735043e-05,
+ "loss": 0.2353,
+ "step": 666
+ },
+ {
+ "epoch": 2.4432234432234434,
+ "grad_norm": 23.85403060913086,
+ "learning_rate": 5.04029304029304e-05,
+ "loss": 0.9065,
+ "step": 667
+ },
+ {
+ "epoch": 2.446886446886447,
+ "grad_norm": 12.384196281433105,
+ "learning_rate": 5.037851037851038e-05,
+ "loss": 0.2065,
+ "step": 668
+ },
+ {
+ "epoch": 2.4505494505494507,
+ "grad_norm": 18.347129821777344,
+ "learning_rate": 5.035409035409035e-05,
+ "loss": 0.647,
+ "step": 669
+ },
+ {
+ "epoch": 2.4542124542124544,
+ "grad_norm": 18.645936965942383,
+ "learning_rate": 5.032967032967033e-05,
+ "loss": 0.2072,
+ "step": 670
+ },
+ {
+ "epoch": 2.457875457875458,
+ "grad_norm": 9.493071556091309,
+ "learning_rate": 5.03052503052503e-05,
+ "loss": 0.1805,
+ "step": 671
+ },
+ {
+ "epoch": 2.4615384615384617,
+ "grad_norm": 18.552539825439453,
+ "learning_rate": 5.028083028083028e-05,
+ "loss": 0.4078,
+ "step": 672
+ },
+ {
+ "epoch": 2.4652014652014653,
+ "grad_norm": 21.735048294067383,
+ "learning_rate": 5.025641025641026e-05,
+ "loss": 0.4231,
+ "step": 673
+ },
+ {
+ "epoch": 2.468864468864469,
+ "grad_norm": 54.32040023803711,
+ "learning_rate": 5.023199023199023e-05,
+ "loss": 1.3927,
+ "step": 674
+ },
+ {
+ "epoch": 2.4725274725274726,
+ "grad_norm": 26.955970764160156,
+ "learning_rate": 5.020757020757021e-05,
+ "loss": 0.6899,
+ "step": 675
+ },
+ {
+ "epoch": 2.4761904761904763,
+ "grad_norm": 43.423526763916016,
+ "learning_rate": 5.018315018315019e-05,
+ "loss": 1.2084,
+ "step": 676
+ },
+ {
+ "epoch": 2.47985347985348,
+ "grad_norm": 35.98548126220703,
+ "learning_rate": 5.015873015873016e-05,
+ "loss": 1.5047,
+ "step": 677
+ },
+ {
+ "epoch": 2.4835164835164836,
+ "grad_norm": 22.593570709228516,
+ "learning_rate": 5.013431013431014e-05,
+ "loss": 0.6918,
+ "step": 678
+ },
+ {
+ "epoch": 2.4871794871794872,
+ "grad_norm": 21.29257583618164,
+ "learning_rate": 5.0109890109890116e-05,
+ "loss": 0.3578,
+ "step": 679
+ },
+ {
+ "epoch": 2.490842490842491,
+ "grad_norm": 21.672088623046875,
+ "learning_rate": 5.008547008547008e-05,
+ "loss": 0.7757,
+ "step": 680
+ },
+ {
+ "epoch": 2.4945054945054945,
+ "grad_norm": 9.625850677490234,
+ "learning_rate": 5.006105006105006e-05,
+ "loss": 0.1329,
+ "step": 681
+ },
+ {
+ "epoch": 2.498168498168498,
+ "grad_norm": 16.92123794555664,
+ "learning_rate": 5.003663003663004e-05,
+ "loss": 0.5599,
+ "step": 682
+ },
+ {
+ "epoch": 2.501831501831502,
+ "grad_norm": 15.665925025939941,
+ "learning_rate": 5.001221001221001e-05,
+ "loss": 0.3099,
+ "step": 683
+ },
+ {
+ "epoch": 2.5054945054945055,
+ "grad_norm": 21.316635131835938,
+ "learning_rate": 4.998778998778999e-05,
+ "loss": 0.5746,
+ "step": 684
+ },
+ {
+ "epoch": 2.509157509157509,
+ "grad_norm": 24.99594497680664,
+ "learning_rate": 4.996336996336997e-05,
+ "loss": 1.1274,
+ "step": 685
+ },
+ {
+ "epoch": 2.5128205128205128,
+ "grad_norm": 29.795175552368164,
+ "learning_rate": 4.993894993894994e-05,
+ "loss": 0.9991,
+ "step": 686
+ },
+ {
+ "epoch": 2.5164835164835164,
+ "grad_norm": 16.337533950805664,
+ "learning_rate": 4.991452991452992e-05,
+ "loss": 0.4101,
+ "step": 687
+ },
+ {
+ "epoch": 2.52014652014652,
+ "grad_norm": 20.065715789794922,
+ "learning_rate": 4.9890109890109895e-05,
+ "loss": 0.7786,
+ "step": 688
+ },
+ {
+ "epoch": 2.5238095238095237,
+ "grad_norm": 19.341567993164062,
+ "learning_rate": 4.986568986568987e-05,
+ "loss": 0.4989,
+ "step": 689
+ },
+ {
+ "epoch": 2.5274725274725274,
+ "grad_norm": 14.688420295715332,
+ "learning_rate": 4.9841269841269845e-05,
+ "loss": 0.4081,
+ "step": 690
+ },
+ {
+ "epoch": 2.531135531135531,
+ "grad_norm": 39.346012115478516,
+ "learning_rate": 4.9816849816849824e-05,
+ "loss": 1.7919,
+ "step": 691
+ },
+ {
+ "epoch": 2.5347985347985347,
+ "grad_norm": 21.353286743164062,
+ "learning_rate": 4.9792429792429796e-05,
+ "loss": 0.698,
+ "step": 692
+ },
+ {
+ "epoch": 2.5384615384615383,
+ "grad_norm": 35.96653366088867,
+ "learning_rate": 4.976800976800977e-05,
+ "loss": 1.6584,
+ "step": 693
+ },
+ {
+ "epoch": 2.542124542124542,
+ "grad_norm": 19.14348793029785,
+ "learning_rate": 4.9743589743589746e-05,
+ "loss": 0.885,
+ "step": 694
+ },
+ {
+ "epoch": 2.5457875457875456,
+ "grad_norm": 9.260897636413574,
+ "learning_rate": 4.971916971916972e-05,
+ "loss": 0.1629,
+ "step": 695
+ },
+ {
+ "epoch": 2.5494505494505493,
+ "grad_norm": 18.497526168823242,
+ "learning_rate": 4.9694749694749696e-05,
+ "loss": 0.7242,
+ "step": 696
+ },
+ {
+ "epoch": 2.553113553113553,
+ "grad_norm": 8.879841804504395,
+ "learning_rate": 4.967032967032967e-05,
+ "loss": 0.1302,
+ "step": 697
+ },
+ {
+ "epoch": 2.5567765567765566,
+ "grad_norm": 26.34065818786621,
+ "learning_rate": 4.9645909645909646e-05,
+ "loss": 0.7333,
+ "step": 698
+ },
+ {
+ "epoch": 2.5604395604395602,
+ "grad_norm": 15.10546588897705,
+ "learning_rate": 4.9621489621489624e-05,
+ "loss": 0.3119,
+ "step": 699
+ },
+ {
+ "epoch": 2.564102564102564,
+ "grad_norm": 10.68095874786377,
+ "learning_rate": 4.9597069597069596e-05,
+ "loss": 0.2505,
+ "step": 700
+ },
+ {
+ "epoch": 2.5677655677655675,
+ "grad_norm": 29.08888053894043,
+ "learning_rate": 4.9572649572649575e-05,
+ "loss": 0.4286,
+ "step": 701
+ },
+ {
+ "epoch": 2.571428571428571,
+ "grad_norm": 29.939416885375977,
+ "learning_rate": 4.954822954822955e-05,
+ "loss": 1.1529,
+ "step": 702
+ },
+ {
+ "epoch": 2.575091575091575,
+ "grad_norm": 32.78864669799805,
+ "learning_rate": 4.9523809523809525e-05,
+ "loss": 0.9834,
+ "step": 703
+ },
+ {
+ "epoch": 2.578754578754579,
+ "grad_norm": 13.99082088470459,
+ "learning_rate": 4.94993894993895e-05,
+ "loss": 0.1934,
+ "step": 704
+ },
+ {
+ "epoch": 2.5824175824175826,
+ "grad_norm": 31.696718215942383,
+ "learning_rate": 4.9474969474969475e-05,
+ "loss": 0.6881,
+ "step": 705
+ },
+ {
+ "epoch": 2.586080586080586,
+ "grad_norm": 39.26205062866211,
+ "learning_rate": 4.9450549450549446e-05,
+ "loss": 0.573,
+ "step": 706
+ },
+ {
+ "epoch": 2.58974358974359,
+ "grad_norm": 42.08647918701172,
+ "learning_rate": 4.9426129426129425e-05,
+ "loss": 1.5935,
+ "step": 707
+ },
+ {
+ "epoch": 2.5934065934065935,
+ "grad_norm": 24.630651473999023,
+ "learning_rate": 4.94017094017094e-05,
+ "loss": 0.7016,
+ "step": 708
+ },
+ {
+ "epoch": 2.597069597069597,
+ "grad_norm": 35.33428192138672,
+ "learning_rate": 4.9377289377289375e-05,
+ "loss": 0.9646,
+ "step": 709
+ },
+ {
+ "epoch": 2.600732600732601,
+ "grad_norm": 21.643918991088867,
+ "learning_rate": 4.9352869352869353e-05,
+ "loss": 0.3679,
+ "step": 710
+ },
+ {
+ "epoch": 2.6043956043956045,
+ "grad_norm": 10.6254301071167,
+ "learning_rate": 4.932844932844933e-05,
+ "loss": 0.1059,
+ "step": 711
+ },
+ {
+ "epoch": 2.608058608058608,
+ "grad_norm": 23.43462562561035,
+ "learning_rate": 4.9304029304029304e-05,
+ "loss": 0.5128,
+ "step": 712
+ },
+ {
+ "epoch": 2.6117216117216118,
+ "grad_norm": 25.748422622680664,
+ "learning_rate": 4.927960927960928e-05,
+ "loss": 0.6154,
+ "step": 713
+ },
+ {
+ "epoch": 2.6153846153846154,
+ "grad_norm": 23.163209915161133,
+ "learning_rate": 4.925518925518926e-05,
+ "loss": 0.3978,
+ "step": 714
+ },
+ {
+ "epoch": 2.619047619047619,
+ "grad_norm": 22.306194305419922,
+ "learning_rate": 4.923076923076923e-05,
+ "loss": 0.3984,
+ "step": 715
+ },
+ {
+ "epoch": 2.6227106227106227,
+ "grad_norm": 48.16558074951172,
+ "learning_rate": 4.920634920634921e-05,
+ "loss": 0.9568,
+ "step": 716
+ },
+ {
+ "epoch": 2.6263736263736264,
+ "grad_norm": 48.76753234863281,
+ "learning_rate": 4.918192918192919e-05,
+ "loss": 0.6579,
+ "step": 717
+ },
+ {
+ "epoch": 2.63003663003663,
+ "grad_norm": 57.938720703125,
+ "learning_rate": 4.9157509157509154e-05,
+ "loss": 1.0926,
+ "step": 718
+ },
+ {
+ "epoch": 2.6336996336996337,
+ "grad_norm": 25.495267868041992,
+ "learning_rate": 4.913308913308913e-05,
+ "loss": 0.3717,
+ "step": 719
+ },
+ {
+ "epoch": 2.6373626373626373,
+ "grad_norm": 20.054609298706055,
+ "learning_rate": 4.910866910866911e-05,
+ "loss": 0.4502,
+ "step": 720
+ },
+ {
+ "epoch": 2.641025641025641,
+ "grad_norm": 23.096263885498047,
+ "learning_rate": 4.908424908424908e-05,
+ "loss": 0.2794,
+ "step": 721
+ },
+ {
+ "epoch": 2.6446886446886446,
+ "grad_norm": 6.073278903961182,
+ "learning_rate": 4.905982905982906e-05,
+ "loss": 0.0519,
+ "step": 722
+ },
+ {
+ "epoch": 2.6483516483516483,
+ "grad_norm": 38.562618255615234,
+ "learning_rate": 4.903540903540903e-05,
+ "loss": 0.8839,
+ "step": 723
+ },
+ {
+ "epoch": 2.652014652014652,
+ "grad_norm": 23.544757843017578,
+ "learning_rate": 4.901098901098901e-05,
+ "loss": 0.3935,
+ "step": 724
+ },
+ {
+ "epoch": 2.6556776556776556,
+ "grad_norm": 22.844032287597656,
+ "learning_rate": 4.898656898656899e-05,
+ "loss": 0.2428,
+ "step": 725
+ },
+ {
+ "epoch": 2.659340659340659,
+ "grad_norm": 11.537687301635742,
+ "learning_rate": 4.896214896214896e-05,
+ "loss": 0.1538,
+ "step": 726
+ },
+ {
+ "epoch": 2.663003663003663,
+ "grad_norm": 59.37337112426758,
+ "learning_rate": 4.893772893772894e-05,
+ "loss": 1.181,
+ "step": 727
+ },
+ {
+ "epoch": 2.6666666666666665,
+ "grad_norm": 22.206314086914062,
+ "learning_rate": 4.891330891330892e-05,
+ "loss": 0.4044,
+ "step": 728
+ },
+ {
+ "epoch": 2.67032967032967,
+ "grad_norm": 27.44620132446289,
+ "learning_rate": 4.888888888888889e-05,
+ "loss": 0.585,
+ "step": 729
+ },
+ {
+ "epoch": 2.6739926739926743,
+ "grad_norm": 35.70675277709961,
+ "learning_rate": 4.886446886446887e-05,
+ "loss": 0.6853,
+ "step": 730
+ },
+ {
+ "epoch": 2.677655677655678,
+ "grad_norm": 25.653356552124023,
+ "learning_rate": 4.884004884004884e-05,
+ "loss": 0.6143,
+ "step": 731
+ },
+ {
+ "epoch": 2.6813186813186816,
+ "grad_norm": 24.242090225219727,
+ "learning_rate": 4.881562881562881e-05,
+ "loss": 0.4365,
+ "step": 732
+ },
+ {
+ "epoch": 2.684981684981685,
+ "grad_norm": 25.621902465820312,
+ "learning_rate": 4.879120879120879e-05,
+ "loss": 0.6644,
+ "step": 733
+ },
+ {
+ "epoch": 2.688644688644689,
+ "grad_norm": 14.14786434173584,
+ "learning_rate": 4.876678876678877e-05,
+ "loss": 0.4117,
+ "step": 734
+ },
+ {
+ "epoch": 2.6923076923076925,
+ "grad_norm": 37.98638916015625,
+ "learning_rate": 4.874236874236874e-05,
+ "loss": 1.0452,
+ "step": 735
+ },
+ {
+ "epoch": 2.695970695970696,
+ "grad_norm": 23.186302185058594,
+ "learning_rate": 4.871794871794872e-05,
+ "loss": 0.2642,
+ "step": 736
+ },
+ {
+ "epoch": 2.6996336996337,
+ "grad_norm": 27.23651695251465,
+ "learning_rate": 4.86935286935287e-05,
+ "loss": 0.393,
+ "step": 737
+ },
+ {
+ "epoch": 2.7032967032967035,
+ "grad_norm": 36.44395446777344,
+ "learning_rate": 4.866910866910867e-05,
+ "loss": 1.1309,
+ "step": 738
+ },
+ {
+ "epoch": 2.706959706959707,
+ "grad_norm": 9.733710289001465,
+ "learning_rate": 4.864468864468865e-05,
+ "loss": 0.2466,
+ "step": 739
+ },
+ {
+ "epoch": 2.7106227106227108,
+ "grad_norm": 24.727527618408203,
+ "learning_rate": 4.8620268620268626e-05,
+ "loss": 0.46,
+ "step": 740
+ },
+ {
+ "epoch": 2.7142857142857144,
+ "grad_norm": 15.122056007385254,
+ "learning_rate": 4.85958485958486e-05,
+ "loss": 0.3122,
+ "step": 741
+ },
+ {
+ "epoch": 2.717948717948718,
+ "grad_norm": 24.059120178222656,
+ "learning_rate": 4.8571428571428576e-05,
+ "loss": 0.2359,
+ "step": 742
+ },
+ {
+ "epoch": 2.7216117216117217,
+ "grad_norm": 7.659122467041016,
+ "learning_rate": 4.8547008547008554e-05,
+ "loss": 0.1212,
+ "step": 743
+ },
+ {
+ "epoch": 2.7252747252747254,
+ "grad_norm": 27.002117156982422,
+ "learning_rate": 4.852258852258852e-05,
+ "loss": 0.7593,
+ "step": 744
+ },
+ {
+ "epoch": 2.728937728937729,
+ "grad_norm": 6.3852009773254395,
+ "learning_rate": 4.84981684981685e-05,
+ "loss": 0.0644,
+ "step": 745
+ },
+ {
+ "epoch": 2.7326007326007327,
+ "grad_norm": 25.574190139770508,
+ "learning_rate": 4.8473748473748476e-05,
+ "loss": 0.7012,
+ "step": 746
+ },
+ {
+ "epoch": 2.7362637362637363,
+ "grad_norm": 15.720768928527832,
+ "learning_rate": 4.844932844932845e-05,
+ "loss": 0.2692,
+ "step": 747
+ },
+ {
+ "epoch": 2.73992673992674,
+ "grad_norm": 25.527997970581055,
+ "learning_rate": 4.8424908424908426e-05,
+ "loss": 0.2648,
+ "step": 748
+ },
+ {
+ "epoch": 2.7435897435897436,
+ "grad_norm": 27.791011810302734,
+ "learning_rate": 4.84004884004884e-05,
+ "loss": 0.6007,
+ "step": 749
+ },
+ {
+ "epoch": 2.7472527472527473,
+ "grad_norm": 20.487640380859375,
+ "learning_rate": 4.8376068376068376e-05,
+ "loss": 0.5715,
+ "step": 750
+ },
+ {
+ "epoch": 2.750915750915751,
+ "grad_norm": 6.386992454528809,
+ "learning_rate": 4.8351648351648355e-05,
+ "loss": 0.06,
+ "step": 751
+ },
+ {
+ "epoch": 2.7545787545787546,
+ "grad_norm": 13.110812187194824,
+ "learning_rate": 4.8327228327228327e-05,
+ "loss": 0.129,
+ "step": 752
+ },
+ {
+ "epoch": 2.758241758241758,
+ "grad_norm": 26.55845832824707,
+ "learning_rate": 4.8302808302808305e-05,
+ "loss": 0.67,
+ "step": 753
+ },
+ {
+ "epoch": 2.761904761904762,
+ "grad_norm": 38.83135223388672,
+ "learning_rate": 4.8278388278388283e-05,
+ "loss": 1.6656,
+ "step": 754
+ },
+ {
+ "epoch": 2.7655677655677655,
+ "grad_norm": 25.99518585205078,
+ "learning_rate": 4.8253968253968255e-05,
+ "loss": 0.3285,
+ "step": 755
+ },
+ {
+ "epoch": 2.769230769230769,
+ "grad_norm": 17.282081604003906,
+ "learning_rate": 4.8229548229548234e-05,
+ "loss": 0.2217,
+ "step": 756
+ },
+ {
+ "epoch": 2.772893772893773,
+ "grad_norm": 28.849924087524414,
+ "learning_rate": 4.8205128205128205e-05,
+ "loss": 0.7287,
+ "step": 757
+ },
+ {
+ "epoch": 2.7765567765567765,
+ "grad_norm": 45.79567337036133,
+ "learning_rate": 4.818070818070818e-05,
+ "loss": 1.6964,
+ "step": 758
+ },
+ {
+ "epoch": 2.78021978021978,
+ "grad_norm": 15.203421592712402,
+ "learning_rate": 4.8156288156288155e-05,
+ "loss": 0.2351,
+ "step": 759
+ },
+ {
+ "epoch": 2.7838827838827838,
+ "grad_norm": 10.686698913574219,
+ "learning_rate": 4.8131868131868134e-05,
+ "loss": 0.1533,
+ "step": 760
+ },
+ {
+ "epoch": 2.7875457875457874,
+ "grad_norm": 24.186473846435547,
+ "learning_rate": 4.8107448107448106e-05,
+ "loss": 1.0973,
+ "step": 761
+ },
+ {
+ "epoch": 2.791208791208791,
+ "grad_norm": 25.378986358642578,
+ "learning_rate": 4.8083028083028084e-05,
+ "loss": 0.5847,
+ "step": 762
+ },
+ {
+ "epoch": 2.7948717948717947,
+ "grad_norm": 20.066482543945312,
+ "learning_rate": 4.805860805860806e-05,
+ "loss": 0.2643,
+ "step": 763
+ },
+ {
+ "epoch": 2.7985347985347984,
+ "grad_norm": 56.11622619628906,
+ "learning_rate": 4.8034188034188034e-05,
+ "loss": 0.6949,
+ "step": 764
+ },
+ {
+ "epoch": 2.802197802197802,
+ "grad_norm": 27.80112648010254,
+ "learning_rate": 4.800976800976801e-05,
+ "loss": 0.5622,
+ "step": 765
+ },
+ {
+ "epoch": 2.8058608058608057,
+ "grad_norm": 30.947532653808594,
+ "learning_rate": 4.798534798534799e-05,
+ "loss": 0.6276,
+ "step": 766
+ },
+ {
+ "epoch": 2.8095238095238093,
+ "grad_norm": 8.91073226928711,
+ "learning_rate": 4.796092796092796e-05,
+ "loss": 0.1302,
+ "step": 767
+ },
+ {
+ "epoch": 2.813186813186813,
+ "grad_norm": 24.65394401550293,
+ "learning_rate": 4.793650793650794e-05,
+ "loss": 0.6811,
+ "step": 768
+ },
+ {
+ "epoch": 2.8168498168498166,
+ "grad_norm": 18.257539749145508,
+ "learning_rate": 4.791208791208792e-05,
+ "loss": 0.271,
+ "step": 769
+ },
+ {
+ "epoch": 2.8205128205128203,
+ "grad_norm": 41.41588592529297,
+ "learning_rate": 4.7887667887667884e-05,
+ "loss": 1.4149,
+ "step": 770
+ },
+ {
+ "epoch": 2.824175824175824,
+ "grad_norm": 7.753188610076904,
+ "learning_rate": 4.786324786324786e-05,
+ "loss": 0.0825,
+ "step": 771
+ },
+ {
+ "epoch": 2.8278388278388276,
+ "grad_norm": 208.88290405273438,
+ "learning_rate": 4.783882783882784e-05,
+ "loss": 1.032,
+ "step": 772
+ },
+ {
+ "epoch": 2.8315018315018317,
+ "grad_norm": 31.91672706604004,
+ "learning_rate": 4.781440781440781e-05,
+ "loss": 0.9783,
+ "step": 773
+ },
+ {
+ "epoch": 2.8351648351648353,
+ "grad_norm": 5.72416877746582,
+ "learning_rate": 4.778998778998779e-05,
+ "loss": 0.0399,
+ "step": 774
+ },
+ {
+ "epoch": 2.838827838827839,
+ "grad_norm": 30.503149032592773,
+ "learning_rate": 4.776556776556776e-05,
+ "loss": 0.6465,
+ "step": 775
+ },
+ {
+ "epoch": 2.8424908424908426,
+ "grad_norm": 29.615020751953125,
+ "learning_rate": 4.774114774114774e-05,
+ "loss": 0.5823,
+ "step": 776
+ },
+ {
+ "epoch": 2.8461538461538463,
+ "grad_norm": 49.922611236572266,
+ "learning_rate": 4.771672771672772e-05,
+ "loss": 1.2045,
+ "step": 777
+ },
+ {
+ "epoch": 2.84981684981685,
+ "grad_norm": 23.30948829650879,
+ "learning_rate": 4.769230769230769e-05,
+ "loss": 0.5962,
+ "step": 778
+ },
+ {
+ "epoch": 2.8534798534798536,
+ "grad_norm": 24.784086227416992,
+ "learning_rate": 4.766788766788767e-05,
+ "loss": 0.5702,
+ "step": 779
+ },
+ {
+ "epoch": 2.857142857142857,
+ "grad_norm": 30.03589630126953,
+ "learning_rate": 4.764346764346765e-05,
+ "loss": 0.8644,
+ "step": 780
+ },
+ {
+ "epoch": 2.860805860805861,
+ "grad_norm": 21.079742431640625,
+ "learning_rate": 4.761904761904762e-05,
+ "loss": 0.2304,
+ "step": 781
+ },
+ {
+ "epoch": 2.8644688644688645,
+ "grad_norm": 18.438365936279297,
+ "learning_rate": 4.75946275946276e-05,
+ "loss": 0.6457,
+ "step": 782
+ },
+ {
+ "epoch": 2.868131868131868,
+ "grad_norm": 16.265140533447266,
+ "learning_rate": 4.757020757020757e-05,
+ "loss": 0.3693,
+ "step": 783
+ },
+ {
+ "epoch": 2.871794871794872,
+ "grad_norm": 17.526954650878906,
+ "learning_rate": 4.754578754578754e-05,
+ "loss": 0.2614,
+ "step": 784
+ },
+ {
+ "epoch": 2.8754578754578755,
+ "grad_norm": 39.94060134887695,
+ "learning_rate": 4.752136752136752e-05,
+ "loss": 0.2829,
+ "step": 785
+ },
+ {
+ "epoch": 2.879120879120879,
+ "grad_norm": 10.09298324584961,
+ "learning_rate": 4.74969474969475e-05,
+ "loss": 0.1489,
+ "step": 786
+ },
+ {
+ "epoch": 2.8827838827838828,
+ "grad_norm": 29.092544555664062,
+ "learning_rate": 4.747252747252747e-05,
+ "loss": 0.6063,
+ "step": 787
+ },
+ {
+ "epoch": 2.8864468864468864,
+ "grad_norm": 30.071422576904297,
+ "learning_rate": 4.744810744810745e-05,
+ "loss": 0.3154,
+ "step": 788
+ },
+ {
+ "epoch": 2.89010989010989,
+ "grad_norm": 26.271251678466797,
+ "learning_rate": 4.742368742368743e-05,
+ "loss": 0.4548,
+ "step": 789
+ },
+ {
+ "epoch": 2.8937728937728937,
+ "grad_norm": 32.386775970458984,
+ "learning_rate": 4.73992673992674e-05,
+ "loss": 0.1872,
+ "step": 790
+ },
+ {
+ "epoch": 2.8974358974358974,
+ "grad_norm": 31.18532943725586,
+ "learning_rate": 4.737484737484738e-05,
+ "loss": 0.847,
+ "step": 791
+ },
+ {
+ "epoch": 2.901098901098901,
+ "grad_norm": 17.924785614013672,
+ "learning_rate": 4.7350427350427356e-05,
+ "loss": 0.1588,
+ "step": 792
+ },
+ {
+ "epoch": 2.9047619047619047,
+ "grad_norm": 16.458614349365234,
+ "learning_rate": 4.732600732600733e-05,
+ "loss": 0.1424,
+ "step": 793
+ },
+ {
+ "epoch": 2.9084249084249083,
+ "grad_norm": 50.29280471801758,
+ "learning_rate": 4.7301587301587306e-05,
+ "loss": 1.5482,
+ "step": 794
+ },
+ {
+ "epoch": 2.912087912087912,
+ "grad_norm": 58.37470245361328,
+ "learning_rate": 4.727716727716728e-05,
+ "loss": 1.8242,
+ "step": 795
+ },
+ {
+ "epoch": 2.9157509157509156,
+ "grad_norm": 32.5267448425293,
+ "learning_rate": 4.725274725274725e-05,
+ "loss": 1.1197,
+ "step": 796
+ },
+ {
+ "epoch": 2.9194139194139193,
+ "grad_norm": 43.77764892578125,
+ "learning_rate": 4.722832722832723e-05,
+ "loss": 0.7322,
+ "step": 797
+ },
+ {
+ "epoch": 2.9230769230769234,
+ "grad_norm": 25.303524017333984,
+ "learning_rate": 4.720390720390721e-05,
+ "loss": 0.6557,
+ "step": 798
+ },
+ {
+ "epoch": 2.926739926739927,
+ "grad_norm": 23.90159797668457,
+ "learning_rate": 4.717948717948718e-05,
+ "loss": 0.2669,
+ "step": 799
+ },
+ {
+ "epoch": 2.9304029304029307,
+ "grad_norm": 21.20945930480957,
+ "learning_rate": 4.715506715506716e-05,
+ "loss": 0.3279,
+ "step": 800
+ },
+ {
+ "epoch": 2.9340659340659343,
+ "grad_norm": 28.819482803344727,
+ "learning_rate": 4.713064713064713e-05,
+ "loss": 0.717,
+ "step": 801
+ },
+ {
+ "epoch": 2.937728937728938,
+ "grad_norm": 9.13611125946045,
+ "learning_rate": 4.710622710622711e-05,
+ "loss": 0.1291,
+ "step": 802
+ },
+ {
+ "epoch": 2.9413919413919416,
+ "grad_norm": 22.16252326965332,
+ "learning_rate": 4.7081807081807085e-05,
+ "loss": 0.4406,
+ "step": 803
+ },
+ {
+ "epoch": 2.9450549450549453,
+ "grad_norm": 47.73503112792969,
+ "learning_rate": 4.705738705738706e-05,
+ "loss": 0.6176,
+ "step": 804
+ },
+ {
+ "epoch": 2.948717948717949,
+ "grad_norm": 61.73493576049805,
+ "learning_rate": 4.7032967032967035e-05,
+ "loss": 0.581,
+ "step": 805
+ },
+ {
+ "epoch": 2.9523809523809526,
+ "grad_norm": 22.48004722595215,
+ "learning_rate": 4.7008547008547014e-05,
+ "loss": 0.7404,
+ "step": 806
+ },
+ {
+ "epoch": 2.956043956043956,
+ "grad_norm": 54.2432746887207,
+ "learning_rate": 4.6984126984126986e-05,
+ "loss": 1.1522,
+ "step": 807
+ },
+ {
+ "epoch": 2.95970695970696,
+ "grad_norm": 26.221921920776367,
+ "learning_rate": 4.695970695970696e-05,
+ "loss": 0.4869,
+ "step": 808
+ },
+ {
+ "epoch": 2.9633699633699635,
+ "grad_norm": 21.688526153564453,
+ "learning_rate": 4.6935286935286936e-05,
+ "loss": 0.6639,
+ "step": 809
+ },
+ {
+ "epoch": 2.967032967032967,
+ "grad_norm": 5.81218147277832,
+ "learning_rate": 4.691086691086691e-05,
+ "loss": 0.0824,
+ "step": 810
+ },
+ {
+ "epoch": 2.970695970695971,
+ "grad_norm": 39.09580612182617,
+ "learning_rate": 4.6886446886446886e-05,
+ "loss": 1.5035,
+ "step": 811
+ },
+ {
+ "epoch": 2.9743589743589745,
+ "grad_norm": 24.587574005126953,
+ "learning_rate": 4.6862026862026864e-05,
+ "loss": 1.1107,
+ "step": 812
+ },
+ {
+ "epoch": 2.978021978021978,
+ "grad_norm": 25.25336265563965,
+ "learning_rate": 4.6837606837606836e-05,
+ "loss": 0.7764,
+ "step": 813
+ },
+ {
+ "epoch": 2.9816849816849818,
+ "grad_norm": 16.311378479003906,
+ "learning_rate": 4.6813186813186814e-05,
+ "loss": 0.4079,
+ "step": 814
+ },
+ {
+ "epoch": 2.9853479853479854,
+ "grad_norm": 19.0888729095459,
+ "learning_rate": 4.678876678876679e-05,
+ "loss": 0.5259,
+ "step": 815
+ },
+ {
+ "epoch": 2.989010989010989,
+ "grad_norm": 24.599462509155273,
+ "learning_rate": 4.6764346764346765e-05,
+ "loss": 0.7475,
+ "step": 816
+ },
+ {
+ "epoch": 2.9926739926739927,
+ "grad_norm": 20.4777889251709,
+ "learning_rate": 4.673992673992674e-05,
+ "loss": 0.356,
+ "step": 817
+ },
+ {
+ "epoch": 2.9963369963369964,
+ "grad_norm": 30.4327449798584,
+ "learning_rate": 4.671550671550672e-05,
+ "loss": 0.7958,
+ "step": 818
+ },
+ {
+ "epoch": 3.0,
+ "grad_norm": 25.57271385192871,
+ "learning_rate": 4.669108669108669e-05,
+ "loss": 0.3918,
+ "step": 819
+ },
+ {
+ "epoch": 3.0036630036630036,
+ "grad_norm": 3.9672563076019287,
+ "learning_rate": 4.666666666666667e-05,
+ "loss": 0.0469,
+ "step": 820
+ },
+ {
+ "epoch": 3.0073260073260073,
+ "grad_norm": 6.657567501068115,
+ "learning_rate": 4.664224664224664e-05,
+ "loss": 0.0939,
+ "step": 821
+ },
+ {
+ "epoch": 3.010989010989011,
+ "grad_norm": 12.558409690856934,
+ "learning_rate": 4.6617826617826615e-05,
+ "loss": 0.1578,
+ "step": 822
+ },
+ {
+ "epoch": 3.0146520146520146,
+ "grad_norm": 18.909244537353516,
+ "learning_rate": 4.6593406593406593e-05,
+ "loss": 0.3209,
+ "step": 823
+ },
+ {
+ "epoch": 3.0183150183150182,
+ "grad_norm": 10.995687484741211,
+ "learning_rate": 4.656898656898657e-05,
+ "loss": 0.1198,
+ "step": 824
+ },
+ {
+ "epoch": 3.021978021978022,
+ "grad_norm": 16.14252471923828,
+ "learning_rate": 4.6544566544566544e-05,
+ "loss": 0.1431,
+ "step": 825
+ },
+ {
+ "epoch": 3.0256410256410255,
+ "grad_norm": 25.924381256103516,
+ "learning_rate": 4.652014652014652e-05,
+ "loss": 0.3989,
+ "step": 826
+ },
+ {
+ "epoch": 3.029304029304029,
+ "grad_norm": 4.87798547744751,
+ "learning_rate": 4.6495726495726494e-05,
+ "loss": 0.0472,
+ "step": 827
+ },
+ {
+ "epoch": 3.032967032967033,
+ "grad_norm": 15.078110694885254,
+ "learning_rate": 4.647130647130647e-05,
+ "loss": 0.1955,
+ "step": 828
+ },
+ {
+ "epoch": 3.0366300366300365,
+ "grad_norm": 19.74415397644043,
+ "learning_rate": 4.644688644688645e-05,
+ "loss": 0.1593,
+ "step": 829
+ },
+ {
+ "epoch": 3.04029304029304,
+ "grad_norm": 43.4788818359375,
+ "learning_rate": 4.642246642246642e-05,
+ "loss": 0.7917,
+ "step": 830
+ },
+ {
+ "epoch": 3.043956043956044,
+ "grad_norm": 27.122041702270508,
+ "learning_rate": 4.63980463980464e-05,
+ "loss": 0.1693,
+ "step": 831
+ },
+ {
+ "epoch": 3.0476190476190474,
+ "grad_norm": 9.51154899597168,
+ "learning_rate": 4.637362637362638e-05,
+ "loss": 0.0806,
+ "step": 832
+ },
+ {
+ "epoch": 3.051282051282051,
+ "grad_norm": 11.48532772064209,
+ "learning_rate": 4.634920634920635e-05,
+ "loss": 0.0815,
+ "step": 833
+ },
+ {
+ "epoch": 3.0549450549450547,
+ "grad_norm": 13.547063827514648,
+ "learning_rate": 4.632478632478632e-05,
+ "loss": 0.0817,
+ "step": 834
+ },
+ {
+ "epoch": 3.0586080586080584,
+ "grad_norm": 24.334409713745117,
+ "learning_rate": 4.63003663003663e-05,
+ "loss": 0.547,
+ "step": 835
+ },
+ {
+ "epoch": 3.062271062271062,
+ "grad_norm": 87.3517837524414,
+ "learning_rate": 4.627594627594627e-05,
+ "loss": 0.6534,
+ "step": 836
+ },
+ {
+ "epoch": 3.065934065934066,
+ "grad_norm": 16.100278854370117,
+ "learning_rate": 4.625152625152625e-05,
+ "loss": 0.2961,
+ "step": 837
+ },
+ {
+ "epoch": 3.06959706959707,
+ "grad_norm": 20.725875854492188,
+ "learning_rate": 4.622710622710623e-05,
+ "loss": 0.1114,
+ "step": 838
+ },
+ {
+ "epoch": 3.0732600732600734,
+ "grad_norm": 53.809722900390625,
+ "learning_rate": 4.62026862026862e-05,
+ "loss": 0.3808,
+ "step": 839
+ },
+ {
+ "epoch": 3.076923076923077,
+ "grad_norm": 3.237959623336792,
+ "learning_rate": 4.617826617826618e-05,
+ "loss": 0.019,
+ "step": 840
+ },
+ {
+ "epoch": 3.0805860805860807,
+ "grad_norm": 69.71659088134766,
+ "learning_rate": 4.615384615384616e-05,
+ "loss": 1.0945,
+ "step": 841
+ },
+ {
+ "epoch": 3.0842490842490844,
+ "grad_norm": 31.005935668945312,
+ "learning_rate": 4.612942612942613e-05,
+ "loss": 0.3241,
+ "step": 842
+ },
+ {
+ "epoch": 3.087912087912088,
+ "grad_norm": 66.98394775390625,
+ "learning_rate": 4.610500610500611e-05,
+ "loss": 1.0213,
+ "step": 843
+ },
+ {
+ "epoch": 3.0915750915750917,
+ "grad_norm": 23.54532814025879,
+ "learning_rate": 4.608058608058609e-05,
+ "loss": 0.2188,
+ "step": 844
+ },
+ {
+ "epoch": 3.0952380952380953,
+ "grad_norm": 25.952709197998047,
+ "learning_rate": 4.605616605616606e-05,
+ "loss": 0.4305,
+ "step": 845
+ },
+ {
+ "epoch": 3.098901098901099,
+ "grad_norm": 36.100746154785156,
+ "learning_rate": 4.603174603174604e-05,
+ "loss": 0.6497,
+ "step": 846
+ },
+ {
+ "epoch": 3.1025641025641026,
+ "grad_norm": 60.34727478027344,
+ "learning_rate": 4.600732600732601e-05,
+ "loss": 0.3083,
+ "step": 847
+ },
+ {
+ "epoch": 3.1062271062271063,
+ "grad_norm": 35.265167236328125,
+ "learning_rate": 4.598290598290598e-05,
+ "loss": 0.3222,
+ "step": 848
+ },
+ {
+ "epoch": 3.10989010989011,
+ "grad_norm": 19.180070877075195,
+ "learning_rate": 4.595848595848596e-05,
+ "loss": 0.4065,
+ "step": 849
+ },
+ {
+ "epoch": 3.1135531135531136,
+ "grad_norm": 22.92152976989746,
+ "learning_rate": 4.593406593406593e-05,
+ "loss": 0.3998,
+ "step": 850
+ },
+ {
+ "epoch": 3.1172161172161172,
+ "grad_norm": 48.91377639770508,
+ "learning_rate": 4.590964590964591e-05,
+ "loss": 0.7035,
+ "step": 851
+ },
+ {
+ "epoch": 3.120879120879121,
+ "grad_norm": 11.615083694458008,
+ "learning_rate": 4.588522588522589e-05,
+ "loss": 0.3102,
+ "step": 852
+ },
+ {
+ "epoch": 3.1245421245421245,
+ "grad_norm": 23.573801040649414,
+ "learning_rate": 4.586080586080586e-05,
+ "loss": 0.3358,
+ "step": 853
+ },
+ {
+ "epoch": 3.128205128205128,
+ "grad_norm": 16.903776168823242,
+ "learning_rate": 4.583638583638584e-05,
+ "loss": 0.2973,
+ "step": 854
+ },
+ {
+ "epoch": 3.131868131868132,
+ "grad_norm": 6.052688121795654,
+ "learning_rate": 4.5811965811965816e-05,
+ "loss": 0.0671,
+ "step": 855
+ },
+ {
+ "epoch": 3.1355311355311355,
+ "grad_norm": 34.40020751953125,
+ "learning_rate": 4.578754578754579e-05,
+ "loss": 0.508,
+ "step": 856
+ },
+ {
+ "epoch": 3.139194139194139,
+ "grad_norm": 21.39589500427246,
+ "learning_rate": 4.5763125763125766e-05,
+ "loss": 0.0805,
+ "step": 857
+ },
+ {
+ "epoch": 3.142857142857143,
+ "grad_norm": 24.03894805908203,
+ "learning_rate": 4.5738705738705744e-05,
+ "loss": 0.1884,
+ "step": 858
+ },
+ {
+ "epoch": 3.1465201465201464,
+ "grad_norm": 66.53777313232422,
+ "learning_rate": 4.5714285714285716e-05,
+ "loss": 0.5235,
+ "step": 859
+ },
+ {
+ "epoch": 3.15018315018315,
+ "grad_norm": 33.663490295410156,
+ "learning_rate": 4.568986568986569e-05,
+ "loss": 0.7579,
+ "step": 860
+ },
+ {
+ "epoch": 3.1538461538461537,
+ "grad_norm": 30.173309326171875,
+ "learning_rate": 4.5665445665445666e-05,
+ "loss": 0.2263,
+ "step": 861
+ },
+ {
+ "epoch": 3.1575091575091574,
+ "grad_norm": 37.52082824707031,
+ "learning_rate": 4.564102564102564e-05,
+ "loss": 0.5695,
+ "step": 862
+ },
+ {
+ "epoch": 3.161172161172161,
+ "grad_norm": 38.86849594116211,
+ "learning_rate": 4.5616605616605616e-05,
+ "loss": 0.6981,
+ "step": 863
+ },
+ {
+ "epoch": 3.1648351648351647,
+ "grad_norm": 42.702247619628906,
+ "learning_rate": 4.5592185592185595e-05,
+ "loss": 0.9864,
+ "step": 864
+ },
+ {
+ "epoch": 3.1684981684981683,
+ "grad_norm": 16.60870361328125,
+ "learning_rate": 4.5567765567765566e-05,
+ "loss": 0.1595,
+ "step": 865
+ },
+ {
+ "epoch": 3.172161172161172,
+ "grad_norm": 26.309768676757812,
+ "learning_rate": 4.5543345543345545e-05,
+ "loss": 0.4028,
+ "step": 866
+ },
+ {
+ "epoch": 3.1758241758241756,
+ "grad_norm": 45.7955322265625,
+ "learning_rate": 4.551892551892552e-05,
+ "loss": 1.1258,
+ "step": 867
+ },
+ {
+ "epoch": 3.1794871794871793,
+ "grad_norm": 25.780302047729492,
+ "learning_rate": 4.5494505494505495e-05,
+ "loss": 0.4018,
+ "step": 868
+ },
+ {
+ "epoch": 3.183150183150183,
+ "grad_norm": 41.65156555175781,
+ "learning_rate": 4.5470085470085474e-05,
+ "loss": 0.4543,
+ "step": 869
+ },
+ {
+ "epoch": 3.186813186813187,
+ "grad_norm": 56.92537307739258,
+ "learning_rate": 4.544566544566545e-05,
+ "loss": 0.334,
+ "step": 870
+ },
+ {
+ "epoch": 3.1904761904761907,
+ "grad_norm": 19.44786262512207,
+ "learning_rate": 4.5421245421245424e-05,
+ "loss": 0.2855,
+ "step": 871
+ },
+ {
+ "epoch": 3.1941391941391943,
+ "grad_norm": 19.75824546813965,
+ "learning_rate": 4.53968253968254e-05,
+ "loss": 0.2589,
+ "step": 872
+ },
+ {
+ "epoch": 3.197802197802198,
+ "grad_norm": 30.935569763183594,
+ "learning_rate": 4.5372405372405374e-05,
+ "loss": 0.5083,
+ "step": 873
+ },
+ {
+ "epoch": 3.2014652014652016,
+ "grad_norm": 32.59378433227539,
+ "learning_rate": 4.5347985347985345e-05,
+ "loss": 0.6806,
+ "step": 874
+ },
+ {
+ "epoch": 3.2051282051282053,
+ "grad_norm": 32.7809944152832,
+ "learning_rate": 4.5323565323565324e-05,
+ "loss": 0.7094,
+ "step": 875
+ },
+ {
+ "epoch": 3.208791208791209,
+ "grad_norm": 22.95226287841797,
+ "learning_rate": 4.5299145299145296e-05,
+ "loss": 0.3871,
+ "step": 876
+ },
+ {
+ "epoch": 3.2124542124542126,
+ "grad_norm": 13.90613079071045,
+ "learning_rate": 4.5274725274725274e-05,
+ "loss": 0.2049,
+ "step": 877
+ },
+ {
+ "epoch": 3.2161172161172162,
+ "grad_norm": 36.79647445678711,
+ "learning_rate": 4.525030525030525e-05,
+ "loss": 0.959,
+ "step": 878
+ },
+ {
+ "epoch": 3.21978021978022,
+ "grad_norm": 16.770553588867188,
+ "learning_rate": 4.5225885225885224e-05,
+ "loss": 0.3061,
+ "step": 879
+ },
+ {
+ "epoch": 3.2234432234432235,
+ "grad_norm": 22.241527557373047,
+ "learning_rate": 4.52014652014652e-05,
+ "loss": 0.1961,
+ "step": 880
+ },
+ {
+ "epoch": 3.227106227106227,
+ "grad_norm": 51.097957611083984,
+ "learning_rate": 4.517704517704518e-05,
+ "loss": 0.5272,
+ "step": 881
+ },
+ {
+ "epoch": 3.230769230769231,
+ "grad_norm": 43.70039749145508,
+ "learning_rate": 4.515262515262515e-05,
+ "loss": 0.6764,
+ "step": 882
+ },
+ {
+ "epoch": 3.2344322344322345,
+ "grad_norm": 30.666664123535156,
+ "learning_rate": 4.512820512820513e-05,
+ "loss": 0.6524,
+ "step": 883
+ },
+ {
+ "epoch": 3.238095238095238,
+ "grad_norm": 16.787954330444336,
+ "learning_rate": 4.510378510378511e-05,
+ "loss": 0.178,
+ "step": 884
+ },
+ {
+ "epoch": 3.241758241758242,
+ "grad_norm": 32.14992904663086,
+ "learning_rate": 4.507936507936508e-05,
+ "loss": 0.6206,
+ "step": 885
+ },
+ {
+ "epoch": 3.2454212454212454,
+ "grad_norm": 24.926103591918945,
+ "learning_rate": 4.505494505494505e-05,
+ "loss": 0.4696,
+ "step": 886
+ },
+ {
+ "epoch": 3.249084249084249,
+ "grad_norm": 31.044967651367188,
+ "learning_rate": 4.503052503052503e-05,
+ "loss": 0.3021,
+ "step": 887
+ },
+ {
+ "epoch": 3.2527472527472527,
+ "grad_norm": 10.355696678161621,
+ "learning_rate": 4.5006105006105e-05,
+ "loss": 0.0784,
+ "step": 888
+ },
+ {
+ "epoch": 3.2564102564102564,
+ "grad_norm": 28.19644546508789,
+ "learning_rate": 4.498168498168498e-05,
+ "loss": 0.234,
+ "step": 889
+ },
+ {
+ "epoch": 3.26007326007326,
+ "grad_norm": 21.245389938354492,
+ "learning_rate": 4.495726495726496e-05,
+ "loss": 0.2895,
+ "step": 890
+ },
+ {
+ "epoch": 3.2637362637362637,
+ "grad_norm": 27.337587356567383,
+ "learning_rate": 4.493284493284493e-05,
+ "loss": 0.4614,
+ "step": 891
+ },
+ {
+ "epoch": 3.2673992673992673,
+ "grad_norm": 37.06135177612305,
+ "learning_rate": 4.490842490842491e-05,
+ "loss": 0.2717,
+ "step": 892
+ },
+ {
+ "epoch": 3.271062271062271,
+ "grad_norm": 26.85171890258789,
+ "learning_rate": 4.488400488400489e-05,
+ "loss": 0.4965,
+ "step": 893
+ },
+ {
+ "epoch": 3.2747252747252746,
+ "grad_norm": 41.79130935668945,
+ "learning_rate": 4.485958485958486e-05,
+ "loss": 0.4209,
+ "step": 894
+ },
+ {
+ "epoch": 3.2783882783882783,
+ "grad_norm": 32.75770950317383,
+ "learning_rate": 4.483516483516484e-05,
+ "loss": 0.5126,
+ "step": 895
+ },
+ {
+ "epoch": 3.282051282051282,
+ "grad_norm": 67.75275421142578,
+ "learning_rate": 4.481074481074482e-05,
+ "loss": 0.8257,
+ "step": 896
+ },
+ {
+ "epoch": 3.2857142857142856,
+ "grad_norm": 36.773319244384766,
+ "learning_rate": 4.478632478632479e-05,
+ "loss": 1.6113,
+ "step": 897
+ },
+ {
+ "epoch": 3.2893772893772892,
+ "grad_norm": 60.94101333618164,
+ "learning_rate": 4.476190476190476e-05,
+ "loss": 0.7996,
+ "step": 898
+ },
+ {
+ "epoch": 3.293040293040293,
+ "grad_norm": 45.40288162231445,
+ "learning_rate": 4.473748473748474e-05,
+ "loss": 0.7139,
+ "step": 899
+ },
+ {
+ "epoch": 3.2967032967032965,
+ "grad_norm": 27.4019718170166,
+ "learning_rate": 4.471306471306471e-05,
+ "loss": 0.4695,
+ "step": 900
+ },
+ {
+ "epoch": 3.3003663003663,
+ "grad_norm": 20.126493453979492,
+ "learning_rate": 4.468864468864469e-05,
+ "loss": 0.2181,
+ "step": 901
+ },
+ {
+ "epoch": 3.304029304029304,
+ "grad_norm": 37.28034591674805,
+ "learning_rate": 4.466422466422466e-05,
+ "loss": 0.8902,
+ "step": 902
+ },
+ {
+ "epoch": 3.3076923076923075,
+ "grad_norm": 15.40217113494873,
+ "learning_rate": 4.463980463980464e-05,
+ "loss": 0.2428,
+ "step": 903
+ },
+ {
+ "epoch": 3.311355311355311,
+ "grad_norm": 21.924699783325195,
+ "learning_rate": 4.461538461538462e-05,
+ "loss": 0.3271,
+ "step": 904
+ },
+ {
+ "epoch": 3.315018315018315,
+ "grad_norm": 29.787410736083984,
+ "learning_rate": 4.459096459096459e-05,
+ "loss": 0.5914,
+ "step": 905
+ },
+ {
+ "epoch": 3.3186813186813184,
+ "grad_norm": 16.91995620727539,
+ "learning_rate": 4.456654456654457e-05,
+ "loss": 0.3442,
+ "step": 906
+ },
+ {
+ "epoch": 3.3223443223443225,
+ "grad_norm": 13.232250213623047,
+ "learning_rate": 4.4542124542124546e-05,
+ "loss": 0.1977,
+ "step": 907
+ },
+ {
+ "epoch": 3.326007326007326,
+ "grad_norm": 25.45724868774414,
+ "learning_rate": 4.451770451770452e-05,
+ "loss": 0.8241,
+ "step": 908
+ },
+ {
+ "epoch": 3.32967032967033,
+ "grad_norm": 20.996292114257812,
+ "learning_rate": 4.4493284493284496e-05,
+ "loss": 0.3154,
+ "step": 909
+ },
+ {
+ "epoch": 3.3333333333333335,
+ "grad_norm": 28.150684356689453,
+ "learning_rate": 4.4468864468864475e-05,
+ "loss": 0.4077,
+ "step": 910
+ },
+ {
+ "epoch": 3.336996336996337,
+ "grad_norm": 57.184322357177734,
+ "learning_rate": 4.444444444444444e-05,
+ "loss": 0.5701,
+ "step": 911
+ },
+ {
+ "epoch": 3.340659340659341,
+ "grad_norm": 26.231369018554688,
+ "learning_rate": 4.442002442002442e-05,
+ "loss": 0.4427,
+ "step": 912
+ },
+ {
+ "epoch": 3.3443223443223444,
+ "grad_norm": 32.52253723144531,
+ "learning_rate": 4.43956043956044e-05,
+ "loss": 1.014,
+ "step": 913
+ },
+ {
+ "epoch": 3.347985347985348,
+ "grad_norm": 19.39035987854004,
+ "learning_rate": 4.437118437118437e-05,
+ "loss": 0.1567,
+ "step": 914
+ },
+ {
+ "epoch": 3.3516483516483517,
+ "grad_norm": 24.542327880859375,
+ "learning_rate": 4.434676434676435e-05,
+ "loss": 0.5478,
+ "step": 915
+ },
+ {
+ "epoch": 3.3553113553113554,
+ "grad_norm": 46.6158447265625,
+ "learning_rate": 4.4322344322344325e-05,
+ "loss": 0.5636,
+ "step": 916
+ },
+ {
+ "epoch": 3.358974358974359,
+ "grad_norm": 36.008846282958984,
+ "learning_rate": 4.42979242979243e-05,
+ "loss": 0.4401,
+ "step": 917
+ },
+ {
+ "epoch": 3.3626373626373627,
+ "grad_norm": 6.922544956207275,
+ "learning_rate": 4.4273504273504275e-05,
+ "loss": 0.0885,
+ "step": 918
+ },
+ {
+ "epoch": 3.3663003663003663,
+ "grad_norm": 25.707748413085938,
+ "learning_rate": 4.4249084249084254e-05,
+ "loss": 0.3235,
+ "step": 919
+ },
+ {
+ "epoch": 3.36996336996337,
+ "grad_norm": 47.98778533935547,
+ "learning_rate": 4.4224664224664226e-05,
+ "loss": 1.3738,
+ "step": 920
+ },
+ {
+ "epoch": 3.3736263736263736,
+ "grad_norm": 26.64824104309082,
+ "learning_rate": 4.4200244200244204e-05,
+ "loss": 0.8405,
+ "step": 921
+ },
+ {
+ "epoch": 3.3772893772893773,
+ "grad_norm": 30.66206169128418,
+ "learning_rate": 4.417582417582418e-05,
+ "loss": 1.3021,
+ "step": 922
+ },
+ {
+ "epoch": 3.380952380952381,
+ "grad_norm": 33.15909194946289,
+ "learning_rate": 4.4151404151404154e-05,
+ "loss": 0.3064,
+ "step": 923
+ },
+ {
+ "epoch": 3.3846153846153846,
+ "grad_norm": 78.46485137939453,
+ "learning_rate": 4.4126984126984126e-05,
+ "loss": 0.6526,
+ "step": 924
+ },
+ {
+ "epoch": 3.3882783882783882,
+ "grad_norm": 45.584747314453125,
+ "learning_rate": 4.4102564102564104e-05,
+ "loss": 0.9546,
+ "step": 925
+ },
+ {
+ "epoch": 3.391941391941392,
+ "grad_norm": 23.244487762451172,
+ "learning_rate": 4.4078144078144076e-05,
+ "loss": 0.3334,
+ "step": 926
+ },
+ {
+ "epoch": 3.3956043956043955,
+ "grad_norm": 9.296119689941406,
+ "learning_rate": 4.4053724053724054e-05,
+ "loss": 0.1045,
+ "step": 927
+ },
+ {
+ "epoch": 3.399267399267399,
+ "grad_norm": 15.207316398620605,
+ "learning_rate": 4.4029304029304026e-05,
+ "loss": 0.087,
+ "step": 928
+ },
+ {
+ "epoch": 3.402930402930403,
+ "grad_norm": 20.554912567138672,
+ "learning_rate": 4.4004884004884005e-05,
+ "loss": 0.2658,
+ "step": 929
+ },
+ {
+ "epoch": 3.4065934065934065,
+ "grad_norm": 25.304515838623047,
+ "learning_rate": 4.398046398046398e-05,
+ "loss": 0.2862,
+ "step": 930
+ },
+ {
+ "epoch": 3.41025641025641,
+ "grad_norm": 44.320377349853516,
+ "learning_rate": 4.3956043956043955e-05,
+ "loss": 1.1972,
+ "step": 931
+ },
+ {
+ "epoch": 3.413919413919414,
+ "grad_norm": 21.3024845123291,
+ "learning_rate": 4.393162393162393e-05,
+ "loss": 0.2193,
+ "step": 932
+ },
+ {
+ "epoch": 3.4175824175824174,
+ "grad_norm": 12.274759292602539,
+ "learning_rate": 4.390720390720391e-05,
+ "loss": 0.1033,
+ "step": 933
+ },
+ {
+ "epoch": 3.421245421245421,
+ "grad_norm": 29.188446044921875,
+ "learning_rate": 4.388278388278388e-05,
+ "loss": 0.8143,
+ "step": 934
+ },
+ {
+ "epoch": 3.4249084249084247,
+ "grad_norm": 11.880194664001465,
+ "learning_rate": 4.385836385836386e-05,
+ "loss": 0.0932,
+ "step": 935
+ },
+ {
+ "epoch": 3.4285714285714284,
+ "grad_norm": 28.859825134277344,
+ "learning_rate": 4.383394383394384e-05,
+ "loss": 0.6026,
+ "step": 936
+ },
+ {
+ "epoch": 3.4322344322344325,
+ "grad_norm": 25.131824493408203,
+ "learning_rate": 4.3809523809523805e-05,
+ "loss": 0.4023,
+ "step": 937
+ },
+ {
+ "epoch": 3.435897435897436,
+ "grad_norm": 35.04637145996094,
+ "learning_rate": 4.3785103785103783e-05,
+ "loss": 0.7765,
+ "step": 938
+ },
+ {
+ "epoch": 3.4395604395604398,
+ "grad_norm": 15.831666946411133,
+ "learning_rate": 4.376068376068376e-05,
+ "loss": 0.1779,
+ "step": 939
+ },
+ {
+ "epoch": 3.4432234432234434,
+ "grad_norm": 26.455148696899414,
+ "learning_rate": 4.3736263736263734e-05,
+ "loss": 0.3165,
+ "step": 940
+ },
+ {
+ "epoch": 3.446886446886447,
+ "grad_norm": 23.840030670166016,
+ "learning_rate": 4.371184371184371e-05,
+ "loss": 0.5363,
+ "step": 941
+ },
+ {
+ "epoch": 3.4505494505494507,
+ "grad_norm": 30.517026901245117,
+ "learning_rate": 4.368742368742369e-05,
+ "loss": 0.422,
+ "step": 942
+ },
+ {
+ "epoch": 3.4542124542124544,
+ "grad_norm": 51.574703216552734,
+ "learning_rate": 4.366300366300366e-05,
+ "loss": 1.5333,
+ "step": 943
+ },
+ {
+ "epoch": 3.457875457875458,
+ "grad_norm": 57.92119216918945,
+ "learning_rate": 4.363858363858364e-05,
+ "loss": 0.5732,
+ "step": 944
+ },
+ {
+ "epoch": 3.4615384615384617,
+ "grad_norm": 34.3664436340332,
+ "learning_rate": 4.361416361416362e-05,
+ "loss": 0.5054,
+ "step": 945
+ },
+ {
+ "epoch": 3.4652014652014653,
+ "grad_norm": 14.034111976623535,
+ "learning_rate": 4.358974358974359e-05,
+ "loss": 0.0969,
+ "step": 946
+ },
+ {
+ "epoch": 3.468864468864469,
+ "grad_norm": 15.058267593383789,
+ "learning_rate": 4.356532356532357e-05,
+ "loss": 0.1877,
+ "step": 947
+ },
+ {
+ "epoch": 3.4725274725274726,
+ "grad_norm": 18.598024368286133,
+ "learning_rate": 4.354090354090355e-05,
+ "loss": 0.2378,
+ "step": 948
+ },
+ {
+ "epoch": 3.4761904761904763,
+ "grad_norm": 17.926319122314453,
+ "learning_rate": 4.351648351648352e-05,
+ "loss": 0.2935,
+ "step": 949
+ },
+ {
+ "epoch": 3.47985347985348,
+ "grad_norm": 8.25291633605957,
+ "learning_rate": 4.349206349206349e-05,
+ "loss": 0.0891,
+ "step": 950
+ },
+ {
+ "epoch": 3.4835164835164836,
+ "grad_norm": 26.152061462402344,
+ "learning_rate": 4.346764346764347e-05,
+ "loss": 0.2798,
+ "step": 951
+ },
+ {
+ "epoch": 3.4871794871794872,
+ "grad_norm": 22.669677734375,
+ "learning_rate": 4.344322344322344e-05,
+ "loss": 0.506,
+ "step": 952
+ },
+ {
+ "epoch": 3.490842490842491,
+ "grad_norm": 18.439355850219727,
+ "learning_rate": 4.341880341880342e-05,
+ "loss": 0.3034,
+ "step": 953
+ },
+ {
+ "epoch": 3.4945054945054945,
+ "grad_norm": 30.48084259033203,
+ "learning_rate": 4.339438339438339e-05,
+ "loss": 0.4366,
+ "step": 954
+ },
+ {
+ "epoch": 3.498168498168498,
+ "grad_norm": 51.792381286621094,
+ "learning_rate": 4.336996336996337e-05,
+ "loss": 0.5214,
+ "step": 955
+ },
+ {
+ "epoch": 3.501831501831502,
+ "grad_norm": 44.70718002319336,
+ "learning_rate": 4.334554334554335e-05,
+ "loss": 0.7823,
+ "step": 956
+ },
+ {
+ "epoch": 3.5054945054945055,
+ "grad_norm": 42.00168991088867,
+ "learning_rate": 4.332112332112332e-05,
+ "loss": 0.9207,
+ "step": 957
+ },
+ {
+ "epoch": 3.509157509157509,
+ "grad_norm": 28.97800636291504,
+ "learning_rate": 4.32967032967033e-05,
+ "loss": 0.279,
+ "step": 958
+ },
+ {
+ "epoch": 3.5128205128205128,
+ "grad_norm": 21.902843475341797,
+ "learning_rate": 4.327228327228328e-05,
+ "loss": 0.1969,
+ "step": 959
+ },
+ {
+ "epoch": 3.5164835164835164,
+ "grad_norm": 14.560053825378418,
+ "learning_rate": 4.324786324786325e-05,
+ "loss": 0.0976,
+ "step": 960
+ },
+ {
+ "epoch": 3.52014652014652,
+ "grad_norm": 4.2637104988098145,
+ "learning_rate": 4.322344322344323e-05,
+ "loss": 0.0277,
+ "step": 961
+ },
+ {
+ "epoch": 3.5238095238095237,
+ "grad_norm": 52.4840202331543,
+ "learning_rate": 4.3199023199023205e-05,
+ "loss": 0.2967,
+ "step": 962
+ },
+ {
+ "epoch": 3.5274725274725274,
+ "grad_norm": 48.95661163330078,
+ "learning_rate": 4.317460317460317e-05,
+ "loss": 0.2904,
+ "step": 963
+ },
+ {
+ "epoch": 3.531135531135531,
+ "grad_norm": 79.46379089355469,
+ "learning_rate": 4.315018315018315e-05,
+ "loss": 0.1644,
+ "step": 964
+ },
+ {
+ "epoch": 3.5347985347985347,
+ "grad_norm": 29.678428649902344,
+ "learning_rate": 4.312576312576313e-05,
+ "loss": 0.3498,
+ "step": 965
+ },
+ {
+ "epoch": 3.5384615384615383,
+ "grad_norm": 32.71342086791992,
+ "learning_rate": 4.31013431013431e-05,
+ "loss": 0.3509,
+ "step": 966
+ },
+ {
+ "epoch": 3.542124542124542,
+ "grad_norm": 6.679911136627197,
+ "learning_rate": 4.307692307692308e-05,
+ "loss": 0.0658,
+ "step": 967
+ },
+ {
+ "epoch": 3.5457875457875456,
+ "grad_norm": 19.8692626953125,
+ "learning_rate": 4.3052503052503056e-05,
+ "loss": 0.1626,
+ "step": 968
+ },
+ {
+ "epoch": 3.5494505494505493,
+ "grad_norm": 17.69087791442871,
+ "learning_rate": 4.302808302808303e-05,
+ "loss": 0.2592,
+ "step": 969
+ },
+ {
+ "epoch": 3.553113553113553,
+ "grad_norm": 11.734158515930176,
+ "learning_rate": 4.3003663003663006e-05,
+ "loss": 0.1007,
+ "step": 970
+ },
+ {
+ "epoch": 3.5567765567765566,
+ "grad_norm": 34.51172637939453,
+ "learning_rate": 4.2979242979242984e-05,
+ "loss": 0.2823,
+ "step": 971
+ },
+ {
+ "epoch": 3.5604395604395602,
+ "grad_norm": 15.009514808654785,
+ "learning_rate": 4.2954822954822956e-05,
+ "loss": 0.1203,
+ "step": 972
+ },
+ {
+ "epoch": 3.564102564102564,
+ "grad_norm": 67.92166137695312,
+ "learning_rate": 4.2930402930402934e-05,
+ "loss": 0.396,
+ "step": 973
+ },
+ {
+ "epoch": 3.5677655677655675,
+ "grad_norm": 66.84014129638672,
+ "learning_rate": 4.290598290598291e-05,
+ "loss": 0.6545,
+ "step": 974
+ },
+ {
+ "epoch": 3.571428571428571,
+ "grad_norm": 25.811107635498047,
+ "learning_rate": 4.2881562881562885e-05,
+ "loss": 0.1747,
+ "step": 975
+ },
+ {
+ "epoch": 3.575091575091575,
+ "grad_norm": 100.88753509521484,
+ "learning_rate": 4.2857142857142856e-05,
+ "loss": 0.3991,
+ "step": 976
+ },
+ {
+ "epoch": 3.578754578754579,
+ "grad_norm": 34.51667785644531,
+ "learning_rate": 4.2832722832722835e-05,
+ "loss": 0.1365,
+ "step": 977
+ },
+ {
+ "epoch": 3.5824175824175826,
+ "grad_norm": 26.852561950683594,
+ "learning_rate": 4.2808302808302806e-05,
+ "loss": 0.3627,
+ "step": 978
+ },
+ {
+ "epoch": 3.586080586080586,
+ "grad_norm": 24.968570709228516,
+ "learning_rate": 4.2783882783882785e-05,
+ "loss": 0.2106,
+ "step": 979
+ },
+ {
+ "epoch": 3.58974358974359,
+ "grad_norm": 27.33326530456543,
+ "learning_rate": 4.2759462759462757e-05,
+ "loss": 0.1758,
+ "step": 980
+ },
+ {
+ "epoch": 3.5934065934065935,
+ "grad_norm": 52.63814926147461,
+ "learning_rate": 4.2735042735042735e-05,
+ "loss": 0.601,
+ "step": 981
+ },
+ {
+ "epoch": 3.597069597069597,
+ "grad_norm": 37.77897262573242,
+ "learning_rate": 4.2710622710622713e-05,
+ "loss": 0.5299,
+ "step": 982
+ },
+ {
+ "epoch": 3.600732600732601,
+ "grad_norm": 27.691659927368164,
+ "learning_rate": 4.2686202686202685e-05,
+ "loss": 0.1784,
+ "step": 983
+ },
+ {
+ "epoch": 3.6043956043956045,
+ "grad_norm": 106.33782958984375,
+ "learning_rate": 4.2661782661782664e-05,
+ "loss": 0.8859,
+ "step": 984
+ },
+ {
+ "epoch": 3.608058608058608,
+ "grad_norm": 22.95706558227539,
+ "learning_rate": 4.263736263736264e-05,
+ "loss": 0.1611,
+ "step": 985
+ },
+ {
+ "epoch": 3.6117216117216118,
+ "grad_norm": 22.72148895263672,
+ "learning_rate": 4.2612942612942614e-05,
+ "loss": 0.1561,
+ "step": 986
+ },
+ {
+ "epoch": 3.6153846153846154,
+ "grad_norm": 93.37244415283203,
+ "learning_rate": 4.258852258852259e-05,
+ "loss": 0.4287,
+ "step": 987
+ },
+ {
+ "epoch": 3.619047619047619,
+ "grad_norm": 51.54584884643555,
+ "learning_rate": 4.2564102564102564e-05,
+ "loss": 0.6292,
+ "step": 988
+ },
+ {
+ "epoch": 3.6227106227106227,
+ "grad_norm": 61.58243942260742,
+ "learning_rate": 4.2539682539682536e-05,
+ "loss": 1.3205,
+ "step": 989
+ },
+ {
+ "epoch": 3.6263736263736264,
+ "grad_norm": 70.59432220458984,
+ "learning_rate": 4.2515262515262514e-05,
+ "loss": 0.7451,
+ "step": 990
+ },
+ {
+ "epoch": 3.63003663003663,
+ "grad_norm": 76.28730773925781,
+ "learning_rate": 4.249084249084249e-05,
+ "loss": 2.0314,
+ "step": 991
+ },
+ {
+ "epoch": 3.6336996336996337,
+ "grad_norm": 73.5402603149414,
+ "learning_rate": 4.2466422466422464e-05,
+ "loss": 1.6628,
+ "step": 992
+ },
+ {
+ "epoch": 3.6373626373626373,
+ "grad_norm": 75.8978042602539,
+ "learning_rate": 4.244200244200244e-05,
+ "loss": 1.652,
+ "step": 993
+ },
+ {
+ "epoch": 3.641025641025641,
+ "grad_norm": 37.04104232788086,
+ "learning_rate": 4.241758241758242e-05,
+ "loss": 1.5356,
+ "step": 994
+ },
+ {
+ "epoch": 3.6446886446886446,
+ "grad_norm": 34.31178283691406,
+ "learning_rate": 4.239316239316239e-05,
+ "loss": 1.1783,
+ "step": 995
+ },
+ {
+ "epoch": 3.6483516483516483,
+ "grad_norm": 22.934877395629883,
+ "learning_rate": 4.236874236874237e-05,
+ "loss": 1.2995,
+ "step": 996
+ },
+ {
+ "epoch": 3.652014652014652,
+ "grad_norm": 30.25251579284668,
+ "learning_rate": 4.234432234432235e-05,
+ "loss": 1.1304,
+ "step": 997
+ },
+ {
+ "epoch": 3.6556776556776556,
+ "grad_norm": 35.082027435302734,
+ "learning_rate": 4.231990231990232e-05,
+ "loss": 1.0827,
+ "step": 998
+ },
+ {
+ "epoch": 3.659340659340659,
+ "grad_norm": 24.526325225830078,
+ "learning_rate": 4.22954822954823e-05,
+ "loss": 0.8716,
+ "step": 999
+ },
+ {
+ "epoch": 3.663003663003663,
+ "grad_norm": 29.882883071899414,
+ "learning_rate": 4.227106227106228e-05,
+ "loss": 0.5432,
+ "step": 1000
+ },
+ {
+ "epoch": 3.6666666666666665,
+ "grad_norm": 34.53218078613281,
+ "learning_rate": 4.224664224664224e-05,
+ "loss": 1.2094,
+ "step": 1001
+ },
+ {
+ "epoch": 3.67032967032967,
+ "grad_norm": 22.50905990600586,
+ "learning_rate": 4.222222222222222e-05,
+ "loss": 0.4608,
+ "step": 1002
+ },
+ {
+ "epoch": 3.6739926739926743,
+ "grad_norm": 27.33183479309082,
+ "learning_rate": 4.21978021978022e-05,
+ "loss": 0.7181,
+ "step": 1003
+ },
+ {
+ "epoch": 3.677655677655678,
+ "grad_norm": 50.09929275512695,
+ "learning_rate": 4.217338217338217e-05,
+ "loss": 1.1163,
+ "step": 1004
+ },
+ {
+ "epoch": 3.6813186813186816,
+ "grad_norm": 32.48406982421875,
+ "learning_rate": 4.214896214896215e-05,
+ "loss": 0.7101,
+ "step": 1005
+ },
+ {
+ "epoch": 3.684981684981685,
+ "grad_norm": 5.821015357971191,
+ "learning_rate": 4.212454212454212e-05,
+ "loss": 0.0695,
+ "step": 1006
+ },
+ {
+ "epoch": 3.688644688644689,
+ "grad_norm": 32.04796600341797,
+ "learning_rate": 4.21001221001221e-05,
+ "loss": 0.609,
+ "step": 1007
+ },
+ {
+ "epoch": 3.6923076923076925,
+ "grad_norm": 37.282474517822266,
+ "learning_rate": 4.207570207570208e-05,
+ "loss": 0.873,
+ "step": 1008
+ },
+ {
+ "epoch": 3.695970695970696,
+ "grad_norm": 35.74583435058594,
+ "learning_rate": 4.205128205128205e-05,
+ "loss": 0.7387,
+ "step": 1009
+ },
+ {
+ "epoch": 3.6996336996337,
+ "grad_norm": 74.91361236572266,
+ "learning_rate": 4.202686202686203e-05,
+ "loss": 1.6302,
+ "step": 1010
+ },
+ {
+ "epoch": 3.7032967032967035,
+ "grad_norm": 25.163251876831055,
+ "learning_rate": 4.200244200244201e-05,
+ "loss": 0.3866,
+ "step": 1011
+ },
+ {
+ "epoch": 3.706959706959707,
+ "grad_norm": 34.36520004272461,
+ "learning_rate": 4.197802197802198e-05,
+ "loss": 0.8413,
+ "step": 1012
+ },
+ {
+ "epoch": 3.7106227106227108,
+ "grad_norm": 41.62683868408203,
+ "learning_rate": 4.195360195360196e-05,
+ "loss": 0.4155,
+ "step": 1013
+ },
+ {
+ "epoch": 3.7142857142857144,
+ "grad_norm": 34.24674987792969,
+ "learning_rate": 4.192918192918193e-05,
+ "loss": 0.8327,
+ "step": 1014
+ },
+ {
+ "epoch": 3.717948717948718,
+ "grad_norm": 27.771732330322266,
+ "learning_rate": 4.19047619047619e-05,
+ "loss": 0.4509,
+ "step": 1015
+ },
+ {
+ "epoch": 3.7216117216117217,
+ "grad_norm": 26.55430793762207,
+ "learning_rate": 4.188034188034188e-05,
+ "loss": 0.4851,
+ "step": 1016
+ },
+ {
+ "epoch": 3.7252747252747254,
+ "grad_norm": 34.8384895324707,
+ "learning_rate": 4.185592185592186e-05,
+ "loss": 0.4105,
+ "step": 1017
+ },
+ {
+ "epoch": 3.728937728937729,
+ "grad_norm": 29.447805404663086,
+ "learning_rate": 4.183150183150183e-05,
+ "loss": 0.4129,
+ "step": 1018
+ },
+ {
+ "epoch": 3.7326007326007327,
+ "grad_norm": 66.70004272460938,
+ "learning_rate": 4.180708180708181e-05,
+ "loss": 0.4762,
+ "step": 1019
+ },
+ {
+ "epoch": 3.7362637362637363,
+ "grad_norm": 10.356173515319824,
+ "learning_rate": 4.1782661782661786e-05,
+ "loss": 0.0718,
+ "step": 1020
+ },
+ {
+ "epoch": 3.73992673992674,
+ "grad_norm": 35.98944854736328,
+ "learning_rate": 4.175824175824176e-05,
+ "loss": 0.2672,
+ "step": 1021
+ },
+ {
+ "epoch": 3.7435897435897436,
+ "grad_norm": 6.806238651275635,
+ "learning_rate": 4.1733821733821736e-05,
+ "loss": 0.0455,
+ "step": 1022
+ },
+ {
+ "epoch": 3.7472527472527473,
+ "grad_norm": 19.689456939697266,
+ "learning_rate": 4.1709401709401715e-05,
+ "loss": 0.2323,
+ "step": 1023
+ },
+ {
+ "epoch": 3.750915750915751,
+ "grad_norm": 23.971303939819336,
+ "learning_rate": 4.1684981684981687e-05,
+ "loss": 0.1393,
+ "step": 1024
+ },
+ {
+ "epoch": 3.7545787545787546,
+ "grad_norm": 43.26774215698242,
+ "learning_rate": 4.1660561660561665e-05,
+ "loss": 0.7084,
+ "step": 1025
+ },
+ {
+ "epoch": 3.758241758241758,
+ "grad_norm": 36.04475402832031,
+ "learning_rate": 4.1636141636141643e-05,
+ "loss": 0.3782,
+ "step": 1026
+ },
+ {
+ "epoch": 3.761904761904762,
+ "grad_norm": 48.78522491455078,
+ "learning_rate": 4.161172161172161e-05,
+ "loss": 0.7698,
+ "step": 1027
+ },
+ {
+ "epoch": 3.7655677655677655,
+ "grad_norm": 11.876708984375,
+ "learning_rate": 4.158730158730159e-05,
+ "loss": 0.0943,
+ "step": 1028
+ },
+ {
+ "epoch": 3.769230769230769,
+ "grad_norm": 83.1320571899414,
+ "learning_rate": 4.1562881562881565e-05,
+ "loss": 0.8116,
+ "step": 1029
+ },
+ {
+ "epoch": 3.772893772893773,
+ "grad_norm": 22.412723541259766,
+ "learning_rate": 4.153846153846154e-05,
+ "loss": 0.2086,
+ "step": 1030
+ },
+ {
+ "epoch": 3.7765567765567765,
+ "grad_norm": 11.011713981628418,
+ "learning_rate": 4.1514041514041515e-05,
+ "loss": 0.1001,
+ "step": 1031
+ },
+ {
+ "epoch": 3.78021978021978,
+ "grad_norm": 21.958040237426758,
+ "learning_rate": 4.148962148962149e-05,
+ "loss": 0.8457,
+ "step": 1032
+ },
+ {
+ "epoch": 3.7838827838827838,
+ "grad_norm": 57.3586540222168,
+ "learning_rate": 4.1465201465201465e-05,
+ "loss": 0.1605,
+ "step": 1033
+ },
+ {
+ "epoch": 3.7875457875457874,
+ "grad_norm": 24.261554718017578,
+ "learning_rate": 4.1440781440781444e-05,
+ "loss": 0.1854,
+ "step": 1034
+ },
+ {
+ "epoch": 3.791208791208791,
+ "grad_norm": 31.09326171875,
+ "learning_rate": 4.1416361416361416e-05,
+ "loss": 0.2874,
+ "step": 1035
+ },
+ {
+ "epoch": 3.7948717948717947,
+ "grad_norm": 8.3728666305542,
+ "learning_rate": 4.1391941391941394e-05,
+ "loss": 0.0496,
+ "step": 1036
+ },
+ {
+ "epoch": 3.7985347985347984,
+ "grad_norm": 47.5240592956543,
+ "learning_rate": 4.136752136752137e-05,
+ "loss": 0.2025,
+ "step": 1037
+ },
+ {
+ "epoch": 3.802197802197802,
+ "grad_norm": 51.25822448730469,
+ "learning_rate": 4.1343101343101344e-05,
+ "loss": 0.714,
+ "step": 1038
+ },
+ {
+ "epoch": 3.8058608058608057,
+ "grad_norm": 91.58492279052734,
+ "learning_rate": 4.131868131868132e-05,
+ "loss": 2.2889,
+ "step": 1039
+ },
+ {
+ "epoch": 3.8095238095238093,
+ "grad_norm": 4.206390857696533,
+ "learning_rate": 4.1294261294261294e-05,
+ "loss": 0.024,
+ "step": 1040
+ },
+ {
+ "epoch": 3.813186813186813,
+ "grad_norm": 58.49787139892578,
+ "learning_rate": 4.1269841269841266e-05,
+ "loss": 0.7162,
+ "step": 1041
+ },
+ {
+ "epoch": 3.8168498168498166,
+ "grad_norm": 33.38972091674805,
+ "learning_rate": 4.1245421245421244e-05,
+ "loss": 0.3064,
+ "step": 1042
+ },
+ {
+ "epoch": 3.8205128205128203,
+ "grad_norm": 53.251007080078125,
+ "learning_rate": 4.122100122100122e-05,
+ "loss": 0.7376,
+ "step": 1043
+ },
+ {
+ "epoch": 3.824175824175824,
+ "grad_norm": 28.314645767211914,
+ "learning_rate": 4.1196581196581195e-05,
+ "loss": 0.4608,
+ "step": 1044
+ },
+ {
+ "epoch": 3.8278388278388276,
+ "grad_norm": 538.0653076171875,
+ "learning_rate": 4.117216117216117e-05,
+ "loss": 1.5678,
+ "step": 1045
+ },
+ {
+ "epoch": 3.8315018315018317,
+ "grad_norm": 38.662925720214844,
+ "learning_rate": 4.114774114774115e-05,
+ "loss": 1.1084,
+ "step": 1046
+ },
+ {
+ "epoch": 3.8351648351648353,
+ "grad_norm": 31.877248764038086,
+ "learning_rate": 4.112332112332112e-05,
+ "loss": 0.9947,
+ "step": 1047
+ },
+ {
+ "epoch": 3.838827838827839,
+ "grad_norm": 50.17106628417969,
+ "learning_rate": 4.10989010989011e-05,
+ "loss": 0.8024,
+ "step": 1048
+ },
+ {
+ "epoch": 3.8424908424908426,
+ "grad_norm": 18.851001739501953,
+ "learning_rate": 4.107448107448108e-05,
+ "loss": 0.4245,
+ "step": 1049
+ },
+ {
+ "epoch": 3.8461538461538463,
+ "grad_norm": 35.91590881347656,
+ "learning_rate": 4.105006105006105e-05,
+ "loss": 1.1046,
+ "step": 1050
+ },
+ {
+ "epoch": 3.84981684981685,
+ "grad_norm": 24.618389129638672,
+ "learning_rate": 4.102564102564103e-05,
+ "loss": 0.8167,
+ "step": 1051
+ },
+ {
+ "epoch": 3.8534798534798536,
+ "grad_norm": 27.028446197509766,
+ "learning_rate": 4.100122100122101e-05,
+ "loss": 0.6983,
+ "step": 1052
+ },
+ {
+ "epoch": 3.857142857142857,
+ "grad_norm": 17.247610092163086,
+ "learning_rate": 4.0976800976800974e-05,
+ "loss": 0.4761,
+ "step": 1053
+ },
+ {
+ "epoch": 3.860805860805861,
+ "grad_norm": 27.187416076660156,
+ "learning_rate": 4.095238095238095e-05,
+ "loss": 0.794,
+ "step": 1054
+ },
+ {
+ "epoch": 3.8644688644688645,
+ "grad_norm": 35.990623474121094,
+ "learning_rate": 4.0927960927960924e-05,
+ "loss": 0.7874,
+ "step": 1055
+ },
+ {
+ "epoch": 3.868131868131868,
+ "grad_norm": 168.7575225830078,
+ "learning_rate": 4.09035409035409e-05,
+ "loss": 0.6028,
+ "step": 1056
+ },
+ {
+ "epoch": 3.871794871794872,
+ "grad_norm": 31.459491729736328,
+ "learning_rate": 4.087912087912088e-05,
+ "loss": 0.6256,
+ "step": 1057
+ },
+ {
+ "epoch": 3.8754578754578755,
+ "grad_norm": 25.053123474121094,
+ "learning_rate": 4.085470085470085e-05,
+ "loss": 0.3041,
+ "step": 1058
+ },
+ {
+ "epoch": 3.879120879120879,
+ "grad_norm": 56.10730743408203,
+ "learning_rate": 4.083028083028083e-05,
+ "loss": 0.8875,
+ "step": 1059
+ },
+ {
+ "epoch": 3.8827838827838828,
+ "grad_norm": 26.897689819335938,
+ "learning_rate": 4.080586080586081e-05,
+ "loss": 0.5291,
+ "step": 1060
+ },
+ {
+ "epoch": 3.8864468864468864,
+ "grad_norm": 40.36210250854492,
+ "learning_rate": 4.078144078144078e-05,
+ "loss": 1.2323,
+ "step": 1061
+ },
+ {
+ "epoch": 3.89010989010989,
+ "grad_norm": 17.556934356689453,
+ "learning_rate": 4.075702075702076e-05,
+ "loss": 0.0951,
+ "step": 1062
+ },
+ {
+ "epoch": 3.8937728937728937,
+ "grad_norm": 54.6690559387207,
+ "learning_rate": 4.073260073260074e-05,
+ "loss": 0.4311,
+ "step": 1063
+ },
+ {
+ "epoch": 3.8974358974358974,
+ "grad_norm": 27.554750442504883,
+ "learning_rate": 4.070818070818071e-05,
+ "loss": 0.2851,
+ "step": 1064
+ },
+ {
+ "epoch": 3.901098901098901,
+ "grad_norm": 14.667935371398926,
+ "learning_rate": 4.068376068376069e-05,
+ "loss": 0.0866,
+ "step": 1065
+ },
+ {
+ "epoch": 3.9047619047619047,
+ "grad_norm": 39.62594985961914,
+ "learning_rate": 4.065934065934066e-05,
+ "loss": 0.2322,
+ "step": 1066
+ },
+ {
+ "epoch": 3.9084249084249083,
+ "grad_norm": 31.457260131835938,
+ "learning_rate": 4.063492063492063e-05,
+ "loss": 0.2554,
+ "step": 1067
+ },
+ {
+ "epoch": 3.912087912087912,
+ "grad_norm": 52.82997131347656,
+ "learning_rate": 4.061050061050061e-05,
+ "loss": 0.44,
+ "step": 1068
+ },
+ {
+ "epoch": 3.9157509157509156,
+ "grad_norm": 56.15779495239258,
+ "learning_rate": 4.058608058608059e-05,
+ "loss": 0.9419,
+ "step": 1069
+ },
+ {
+ "epoch": 3.9194139194139193,
+ "grad_norm": 59.23240661621094,
+ "learning_rate": 4.056166056166056e-05,
+ "loss": 0.5084,
+ "step": 1070
+ },
+ {
+ "epoch": 3.9230769230769234,
+ "grad_norm": 9.644290924072266,
+ "learning_rate": 4.053724053724054e-05,
+ "loss": 0.0456,
+ "step": 1071
+ },
+ {
+ "epoch": 3.926739926739927,
+ "grad_norm": 24.42845916748047,
+ "learning_rate": 4.051282051282052e-05,
+ "loss": 0.0907,
+ "step": 1072
+ },
+ {
+ "epoch": 3.9304029304029307,
+ "grad_norm": 81.36042785644531,
+ "learning_rate": 4.048840048840049e-05,
+ "loss": 1.0178,
+ "step": 1073
+ },
+ {
+ "epoch": 3.9340659340659343,
+ "grad_norm": 63.134071350097656,
+ "learning_rate": 4.046398046398047e-05,
+ "loss": 1.1125,
+ "step": 1074
+ },
+ {
+ "epoch": 3.937728937728938,
+ "grad_norm": 56.59608840942383,
+ "learning_rate": 4.0439560439560445e-05,
+ "loss": 0.4465,
+ "step": 1075
+ },
+ {
+ "epoch": 3.9413919413919416,
+ "grad_norm": 48.51662063598633,
+ "learning_rate": 4.041514041514042e-05,
+ "loss": 0.5054,
+ "step": 1076
+ },
+ {
+ "epoch": 3.9450549450549453,
+ "grad_norm": 50.393524169921875,
+ "learning_rate": 4.0390720390720395e-05,
+ "loss": 0.8157,
+ "step": 1077
+ },
+ {
+ "epoch": 3.948717948717949,
+ "grad_norm": 63.414878845214844,
+ "learning_rate": 4.036630036630037e-05,
+ "loss": 0.9598,
+ "step": 1078
+ },
+ {
+ "epoch": 3.9523809523809526,
+ "grad_norm": 35.72902297973633,
+ "learning_rate": 4.034188034188034e-05,
+ "loss": 0.4764,
+ "step": 1079
+ },
+ {
+ "epoch": 3.956043956043956,
+ "grad_norm": 20.452268600463867,
+ "learning_rate": 4.031746031746032e-05,
+ "loss": 0.191,
+ "step": 1080
+ },
+ {
+ "epoch": 3.95970695970696,
+ "grad_norm": 38.23368453979492,
+ "learning_rate": 4.029304029304029e-05,
+ "loss": 0.5218,
+ "step": 1081
+ },
+ {
+ "epoch": 3.9633699633699635,
+ "grad_norm": 79.35212707519531,
+ "learning_rate": 4.026862026862027e-05,
+ "loss": 1.3695,
+ "step": 1082
+ },
+ {
+ "epoch": 3.967032967032967,
+ "grad_norm": 62.0828742980957,
+ "learning_rate": 4.0244200244200246e-05,
+ "loss": 1.4882,
+ "step": 1083
+ },
+ {
+ "epoch": 3.970695970695971,
+ "grad_norm": 35.413734436035156,
+ "learning_rate": 4.021978021978022e-05,
+ "loss": 0.1966,
+ "step": 1084
+ },
+ {
+ "epoch": 3.9743589743589745,
+ "grad_norm": 18.060728073120117,
+ "learning_rate": 4.0195360195360196e-05,
+ "loss": 0.2902,
+ "step": 1085
+ },
+ {
+ "epoch": 3.978021978021978,
+ "grad_norm": 15.263091087341309,
+ "learning_rate": 4.0170940170940174e-05,
+ "loss": 0.1325,
+ "step": 1086
+ },
+ {
+ "epoch": 3.9816849816849818,
+ "grad_norm": 35.8296012878418,
+ "learning_rate": 4.0146520146520146e-05,
+ "loss": 1.0225,
+ "step": 1087
+ },
+ {
+ "epoch": 3.9853479853479854,
+ "grad_norm": 24.120967864990234,
+ "learning_rate": 4.0122100122100125e-05,
+ "loss": 0.4432,
+ "step": 1088
+ },
+ {
+ "epoch": 3.989010989010989,
+ "grad_norm": 47.371070861816406,
+ "learning_rate": 4.00976800976801e-05,
+ "loss": 0.9703,
+ "step": 1089
+ },
+ {
+ "epoch": 3.9926739926739927,
+ "grad_norm": 44.266082763671875,
+ "learning_rate": 4.0073260073260075e-05,
+ "loss": 0.6652,
+ "step": 1090
+ },
+ {
+ "epoch": 3.9963369963369964,
+ "grad_norm": 22.17586898803711,
+ "learning_rate": 4.0048840048840046e-05,
+ "loss": 0.1324,
+ "step": 1091
+ },
+ {
+ "epoch": 4.0,
+ "grad_norm": 45.4996337890625,
+ "learning_rate": 4.0024420024420025e-05,
+ "loss": 0.3746,
+ "step": 1092
+ },
+ {
+ "epoch": 4.003663003663004,
+ "grad_norm": 31.747541427612305,
+ "learning_rate": 3.9999999999999996e-05,
+ "loss": 0.5028,
+ "step": 1093
+ },
+ {
+ "epoch": 4.007326007326007,
+ "grad_norm": 13.460674285888672,
+ "learning_rate": 3.9975579975579975e-05,
+ "loss": 0.088,
+ "step": 1094
+ },
+ {
+ "epoch": 4.010989010989011,
+ "grad_norm": 23.94148826599121,
+ "learning_rate": 3.9951159951159953e-05,
+ "loss": 0.1944,
+ "step": 1095
+ },
+ {
+ "epoch": 4.014652014652015,
+ "grad_norm": 60.94758224487305,
+ "learning_rate": 3.9926739926739925e-05,
+ "loss": 0.555,
+ "step": 1096
+ },
+ {
+ "epoch": 4.018315018315018,
+ "grad_norm": 24.47633934020996,
+ "learning_rate": 3.9902319902319904e-05,
+ "loss": 0.1314,
+ "step": 1097
+ },
+ {
+ "epoch": 4.021978021978022,
+ "grad_norm": 42.690162658691406,
+ "learning_rate": 3.987789987789988e-05,
+ "loss": 0.4734,
+ "step": 1098
+ },
+ {
+ "epoch": 4.0256410256410255,
+ "grad_norm": 69.26956939697266,
+ "learning_rate": 3.9853479853479854e-05,
+ "loss": 1.4256,
+ "step": 1099
+ },
+ {
+ "epoch": 4.029304029304029,
+ "grad_norm": 7.718477725982666,
+ "learning_rate": 3.982905982905983e-05,
+ "loss": 0.0549,
+ "step": 1100
+ },
+ {
+ "epoch": 4.032967032967033,
+ "grad_norm": 60.15462875366211,
+ "learning_rate": 3.980463980463981e-05,
+ "loss": 1.2739,
+ "step": 1101
+ },
+ {
+ "epoch": 4.0366300366300365,
+ "grad_norm": 57.749656677246094,
+ "learning_rate": 3.978021978021978e-05,
+ "loss": 1.0691,
+ "step": 1102
+ },
+ {
+ "epoch": 4.04029304029304,
+ "grad_norm": 35.57550811767578,
+ "learning_rate": 3.975579975579976e-05,
+ "loss": 0.5114,
+ "step": 1103
+ },
+ {
+ "epoch": 4.043956043956044,
+ "grad_norm": 58.007694244384766,
+ "learning_rate": 3.973137973137973e-05,
+ "loss": 1.1552,
+ "step": 1104
+ },
+ {
+ "epoch": 4.0476190476190474,
+ "grad_norm": 30.794008255004883,
+ "learning_rate": 3.9706959706959704e-05,
+ "loss": 0.7502,
+ "step": 1105
+ },
+ {
+ "epoch": 4.051282051282051,
+ "grad_norm": 35.88930892944336,
+ "learning_rate": 3.968253968253968e-05,
+ "loss": 0.6965,
+ "step": 1106
+ },
+ {
+ "epoch": 4.054945054945055,
+ "grad_norm": 25.719144821166992,
+ "learning_rate": 3.9658119658119654e-05,
+ "loss": 0.4581,
+ "step": 1107
+ },
+ {
+ "epoch": 4.058608058608058,
+ "grad_norm": 37.397640228271484,
+ "learning_rate": 3.963369963369963e-05,
+ "loss": 1.0719,
+ "step": 1108
+ },
+ {
+ "epoch": 4.062271062271062,
+ "grad_norm": 25.8681640625,
+ "learning_rate": 3.960927960927961e-05,
+ "loss": 0.7,
+ "step": 1109
+ },
+ {
+ "epoch": 4.065934065934066,
+ "grad_norm": 16.983413696289062,
+ "learning_rate": 3.958485958485958e-05,
+ "loss": 0.2394,
+ "step": 1110
+ },
+ {
+ "epoch": 4.069597069597069,
+ "grad_norm": 31.7902889251709,
+ "learning_rate": 3.956043956043956e-05,
+ "loss": 0.5662,
+ "step": 1111
+ },
+ {
+ "epoch": 4.073260073260073,
+ "grad_norm": 37.51417922973633,
+ "learning_rate": 3.953601953601954e-05,
+ "loss": 0.3483,
+ "step": 1112
+ },
+ {
+ "epoch": 4.076923076923077,
+ "grad_norm": 24.01732635498047,
+ "learning_rate": 3.951159951159951e-05,
+ "loss": 0.2527,
+ "step": 1113
+ },
+ {
+ "epoch": 4.08058608058608,
+ "grad_norm": 29.152162551879883,
+ "learning_rate": 3.948717948717949e-05,
+ "loss": 0.4485,
+ "step": 1114
+ },
+ {
+ "epoch": 4.084249084249084,
+ "grad_norm": 31.519155502319336,
+ "learning_rate": 3.946275946275947e-05,
+ "loss": 0.2485,
+ "step": 1115
+ },
+ {
+ "epoch": 4.087912087912088,
+ "grad_norm": 18.462514877319336,
+ "learning_rate": 3.943833943833944e-05,
+ "loss": 0.1057,
+ "step": 1116
+ },
+ {
+ "epoch": 4.091575091575091,
+ "grad_norm": 35.28910827636719,
+ "learning_rate": 3.941391941391941e-05,
+ "loss": 0.3589,
+ "step": 1117
+ },
+ {
+ "epoch": 4.095238095238095,
+ "grad_norm": 47.00394058227539,
+ "learning_rate": 3.938949938949939e-05,
+ "loss": 0.5148,
+ "step": 1118
+ },
+ {
+ "epoch": 4.0989010989010985,
+ "grad_norm": 24.796058654785156,
+ "learning_rate": 3.936507936507936e-05,
+ "loss": 0.2486,
+ "step": 1119
+ },
+ {
+ "epoch": 4.102564102564102,
+ "grad_norm": 27.098758697509766,
+ "learning_rate": 3.934065934065934e-05,
+ "loss": 0.196,
+ "step": 1120
+ },
+ {
+ "epoch": 4.106227106227106,
+ "grad_norm": 59.4343147277832,
+ "learning_rate": 3.931623931623932e-05,
+ "loss": 0.8093,
+ "step": 1121
+ },
+ {
+ "epoch": 4.1098901098901095,
+ "grad_norm": 57.0518684387207,
+ "learning_rate": 3.929181929181929e-05,
+ "loss": 0.6495,
+ "step": 1122
+ },
+ {
+ "epoch": 4.113553113553113,
+ "grad_norm": 42.01070022583008,
+ "learning_rate": 3.926739926739927e-05,
+ "loss": 0.3272,
+ "step": 1123
+ },
+ {
+ "epoch": 4.117216117216117,
+ "grad_norm": 72.11932373046875,
+ "learning_rate": 3.924297924297925e-05,
+ "loss": 1.2542,
+ "step": 1124
+ },
+ {
+ "epoch": 4.1208791208791204,
+ "grad_norm": 13.270249366760254,
+ "learning_rate": 3.921855921855922e-05,
+ "loss": 0.0843,
+ "step": 1125
+ },
+ {
+ "epoch": 4.124542124542124,
+ "grad_norm": 32.058258056640625,
+ "learning_rate": 3.91941391941392e-05,
+ "loss": 0.158,
+ "step": 1126
+ },
+ {
+ "epoch": 4.128205128205128,
+ "grad_norm": 37.67665481567383,
+ "learning_rate": 3.9169719169719176e-05,
+ "loss": 0.3463,
+ "step": 1127
+ },
+ {
+ "epoch": 4.131868131868132,
+ "grad_norm": 98.33348846435547,
+ "learning_rate": 3.914529914529915e-05,
+ "loss": 0.8846,
+ "step": 1128
+ },
+ {
+ "epoch": 4.135531135531136,
+ "grad_norm": 49.11083221435547,
+ "learning_rate": 3.9120879120879126e-05,
+ "loss": 0.4124,
+ "step": 1129
+ },
+ {
+ "epoch": 4.13919413919414,
+ "grad_norm": 45.87646484375,
+ "learning_rate": 3.90964590964591e-05,
+ "loss": 0.3594,
+ "step": 1130
+ },
+ {
+ "epoch": 4.142857142857143,
+ "grad_norm": 49.34445571899414,
+ "learning_rate": 3.907203907203907e-05,
+ "loss": 0.1947,
+ "step": 1131
+ },
+ {
+ "epoch": 4.146520146520147,
+ "grad_norm": 8.654282569885254,
+ "learning_rate": 3.904761904761905e-05,
+ "loss": 0.0923,
+ "step": 1132
+ },
+ {
+ "epoch": 4.1501831501831505,
+ "grad_norm": 12.46809196472168,
+ "learning_rate": 3.902319902319902e-05,
+ "loss": 0.0841,
+ "step": 1133
+ },
+ {
+ "epoch": 4.153846153846154,
+ "grad_norm": 33.9839973449707,
+ "learning_rate": 3.8998778998779e-05,
+ "loss": 0.5838,
+ "step": 1134
+ },
+ {
+ "epoch": 4.157509157509158,
+ "grad_norm": 36.68742752075195,
+ "learning_rate": 3.8974358974358976e-05,
+ "loss": 0.5483,
+ "step": 1135
+ },
+ {
+ "epoch": 4.1611721611721615,
+ "grad_norm": 26.862363815307617,
+ "learning_rate": 3.894993894993895e-05,
+ "loss": 0.2464,
+ "step": 1136
+ },
+ {
+ "epoch": 4.164835164835165,
+ "grad_norm": 16.219947814941406,
+ "learning_rate": 3.8925518925518926e-05,
+ "loss": 0.1878,
+ "step": 1137
+ },
+ {
+ "epoch": 4.168498168498169,
+ "grad_norm": 36.86198425292969,
+ "learning_rate": 3.8901098901098905e-05,
+ "loss": 0.3656,
+ "step": 1138
+ },
+ {
+ "epoch": 4.172161172161172,
+ "grad_norm": 36.241432189941406,
+ "learning_rate": 3.8876678876678877e-05,
+ "loss": 0.8421,
+ "step": 1139
+ },
+ {
+ "epoch": 4.175824175824176,
+ "grad_norm": 45.81169891357422,
+ "learning_rate": 3.8852258852258855e-05,
+ "loss": 0.6081,
+ "step": 1140
+ },
+ {
+ "epoch": 4.17948717948718,
+ "grad_norm": 30.914037704467773,
+ "learning_rate": 3.8827838827838833e-05,
+ "loss": 0.2975,
+ "step": 1141
+ },
+ {
+ "epoch": 4.183150183150183,
+ "grad_norm": 4.663424968719482,
+ "learning_rate": 3.8803418803418805e-05,
+ "loss": 0.0319,
+ "step": 1142
+ },
+ {
+ "epoch": 4.186813186813187,
+ "grad_norm": 33.163551330566406,
+ "learning_rate": 3.877899877899878e-05,
+ "loss": 0.236,
+ "step": 1143
+ },
+ {
+ "epoch": 4.190476190476191,
+ "grad_norm": 20.820547103881836,
+ "learning_rate": 3.8754578754578755e-05,
+ "loss": 0.1907,
+ "step": 1144
+ },
+ {
+ "epoch": 4.194139194139194,
+ "grad_norm": 65.4993896484375,
+ "learning_rate": 3.873015873015873e-05,
+ "loss": 0.4195,
+ "step": 1145
+ },
+ {
+ "epoch": 4.197802197802198,
+ "grad_norm": 13.253530502319336,
+ "learning_rate": 3.8705738705738705e-05,
+ "loss": 0.1496,
+ "step": 1146
+ },
+ {
+ "epoch": 4.201465201465202,
+ "grad_norm": 18.291889190673828,
+ "learning_rate": 3.8681318681318684e-05,
+ "loss": 0.1544,
+ "step": 1147
+ },
+ {
+ "epoch": 4.205128205128205,
+ "grad_norm": 32.1517448425293,
+ "learning_rate": 3.8656898656898656e-05,
+ "loss": 0.3317,
+ "step": 1148
+ },
+ {
+ "epoch": 4.208791208791209,
+ "grad_norm": 37.809669494628906,
+ "learning_rate": 3.8632478632478634e-05,
+ "loss": 0.394,
+ "step": 1149
+ },
+ {
+ "epoch": 4.212454212454213,
+ "grad_norm": 113.17266082763672,
+ "learning_rate": 3.860805860805861e-05,
+ "loss": 1.2368,
+ "step": 1150
+ },
+ {
+ "epoch": 4.216117216117216,
+ "grad_norm": 10.35407543182373,
+ "learning_rate": 3.8583638583638584e-05,
+ "loss": 0.0584,
+ "step": 1151
+ },
+ {
+ "epoch": 4.21978021978022,
+ "grad_norm": 56.98881530761719,
+ "learning_rate": 3.855921855921856e-05,
+ "loss": 0.8088,
+ "step": 1152
+ },
+ {
+ "epoch": 4.2234432234432235,
+ "grad_norm": 45.7849006652832,
+ "learning_rate": 3.853479853479854e-05,
+ "loss": 0.6471,
+ "step": 1153
+ },
+ {
+ "epoch": 4.227106227106227,
+ "grad_norm": 43.57515335083008,
+ "learning_rate": 3.851037851037851e-05,
+ "loss": 0.2924,
+ "step": 1154
+ },
+ {
+ "epoch": 4.230769230769231,
+ "grad_norm": 14.98643684387207,
+ "learning_rate": 3.848595848595849e-05,
+ "loss": 0.1108,
+ "step": 1155
+ },
+ {
+ "epoch": 4.2344322344322345,
+ "grad_norm": 27.162513732910156,
+ "learning_rate": 3.846153846153846e-05,
+ "loss": 0.3856,
+ "step": 1156
+ },
+ {
+ "epoch": 4.238095238095238,
+ "grad_norm": 56.45119094848633,
+ "learning_rate": 3.8437118437118435e-05,
+ "loss": 0.6752,
+ "step": 1157
+ },
+ {
+ "epoch": 4.241758241758242,
+ "grad_norm": 15.522347450256348,
+ "learning_rate": 3.841269841269841e-05,
+ "loss": 0.1419,
+ "step": 1158
+ },
+ {
+ "epoch": 4.245421245421245,
+ "grad_norm": 16.31126594543457,
+ "learning_rate": 3.8388278388278385e-05,
+ "loss": 0.1303,
+ "step": 1159
+ },
+ {
+ "epoch": 4.249084249084249,
+ "grad_norm": 12.398606300354004,
+ "learning_rate": 3.836385836385836e-05,
+ "loss": 0.1306,
+ "step": 1160
+ },
+ {
+ "epoch": 4.252747252747253,
+ "grad_norm": 19.660768508911133,
+ "learning_rate": 3.833943833943834e-05,
+ "loss": 0.1554,
+ "step": 1161
+ },
+ {
+ "epoch": 4.256410256410256,
+ "grad_norm": 131.451416015625,
+ "learning_rate": 3.831501831501831e-05,
+ "loss": 0.2774,
+ "step": 1162
+ },
+ {
+ "epoch": 4.26007326007326,
+ "grad_norm": 42.0703125,
+ "learning_rate": 3.829059829059829e-05,
+ "loss": 0.471,
+ "step": 1163
+ },
+ {
+ "epoch": 4.263736263736264,
+ "grad_norm": 52.415096282958984,
+ "learning_rate": 3.826617826617827e-05,
+ "loss": 0.7872,
+ "step": 1164
+ },
+ {
+ "epoch": 4.267399267399267,
+ "grad_norm": 35.990421295166016,
+ "learning_rate": 3.824175824175824e-05,
+ "loss": 0.4495,
+ "step": 1165
+ },
+ {
+ "epoch": 4.271062271062271,
+ "grad_norm": 40.330265045166016,
+ "learning_rate": 3.821733821733822e-05,
+ "loss": 0.4009,
+ "step": 1166
+ },
+ {
+ "epoch": 4.274725274725275,
+ "grad_norm": 42.55587387084961,
+ "learning_rate": 3.81929181929182e-05,
+ "loss": 1.6215,
+ "step": 1167
+ },
+ {
+ "epoch": 4.278388278388278,
+ "grad_norm": 30.704498291015625,
+ "learning_rate": 3.816849816849817e-05,
+ "loss": 0.3539,
+ "step": 1168
+ },
+ {
+ "epoch": 4.282051282051282,
+ "grad_norm": 10.239601135253906,
+ "learning_rate": 3.814407814407814e-05,
+ "loss": 0.0779,
+ "step": 1169
+ },
+ {
+ "epoch": 4.285714285714286,
+ "grad_norm": 37.00144577026367,
+ "learning_rate": 3.811965811965812e-05,
+ "loss": 0.4089,
+ "step": 1170
+ },
+ {
+ "epoch": 4.289377289377289,
+ "grad_norm": 40.18193817138672,
+ "learning_rate": 3.809523809523809e-05,
+ "loss": 0.4854,
+ "step": 1171
+ },
+ {
+ "epoch": 4.293040293040293,
+ "grad_norm": 46.78989028930664,
+ "learning_rate": 3.807081807081807e-05,
+ "loss": 0.5863,
+ "step": 1172
+ },
+ {
+ "epoch": 4.2967032967032965,
+ "grad_norm": 49.5102653503418,
+ "learning_rate": 3.804639804639805e-05,
+ "loss": 1.0118,
+ "step": 1173
+ },
+ {
+ "epoch": 4.3003663003663,
+ "grad_norm": 30.41546058654785,
+ "learning_rate": 3.802197802197802e-05,
+ "loss": 0.2616,
+ "step": 1174
+ },
+ {
+ "epoch": 4.304029304029304,
+ "grad_norm": 41.22653579711914,
+ "learning_rate": 3.7997557997558e-05,
+ "loss": 0.5852,
+ "step": 1175
+ },
+ {
+ "epoch": 4.3076923076923075,
+ "grad_norm": 4.033203125,
+ "learning_rate": 3.797313797313798e-05,
+ "loss": 0.0221,
+ "step": 1176
+ },
+ {
+ "epoch": 4.311355311355311,
+ "grad_norm": 13.03472900390625,
+ "learning_rate": 3.794871794871795e-05,
+ "loss": 0.1499,
+ "step": 1177
+ },
+ {
+ "epoch": 4.315018315018315,
+ "grad_norm": 24.690824508666992,
+ "learning_rate": 3.792429792429793e-05,
+ "loss": 0.2631,
+ "step": 1178
+ },
+ {
+ "epoch": 4.318681318681318,
+ "grad_norm": 32.594451904296875,
+ "learning_rate": 3.7899877899877906e-05,
+ "loss": 0.2988,
+ "step": 1179
+ },
+ {
+ "epoch": 4.322344322344322,
+ "grad_norm": 10.510795593261719,
+ "learning_rate": 3.787545787545788e-05,
+ "loss": 0.0499,
+ "step": 1180
+ },
+ {
+ "epoch": 4.326007326007326,
+ "grad_norm": 65.71479034423828,
+ "learning_rate": 3.785103785103785e-05,
+ "loss": 0.9048,
+ "step": 1181
+ },
+ {
+ "epoch": 4.329670329670329,
+ "grad_norm": 12.129572868347168,
+ "learning_rate": 3.782661782661783e-05,
+ "loss": 0.0629,
+ "step": 1182
+ },
+ {
+ "epoch": 4.333333333333333,
+ "grad_norm": 88.66580200195312,
+ "learning_rate": 3.78021978021978e-05,
+ "loss": 0.8276,
+ "step": 1183
+ },
+ {
+ "epoch": 4.336996336996337,
+ "grad_norm": 35.2215461730957,
+ "learning_rate": 3.777777777777778e-05,
+ "loss": 0.2996,
+ "step": 1184
+ },
+ {
+ "epoch": 4.34065934065934,
+ "grad_norm": 29.870285034179688,
+ "learning_rate": 3.775335775335775e-05,
+ "loss": 0.2152,
+ "step": 1185
+ },
+ {
+ "epoch": 4.344322344322344,
+ "grad_norm": 30.441116333007812,
+ "learning_rate": 3.772893772893773e-05,
+ "loss": 0.6761,
+ "step": 1186
+ },
+ {
+ "epoch": 4.347985347985348,
+ "grad_norm": 22.49298095703125,
+ "learning_rate": 3.770451770451771e-05,
+ "loss": 0.7508,
+ "step": 1187
+ },
+ {
+ "epoch": 4.351648351648351,
+ "grad_norm": 22.43603515625,
+ "learning_rate": 3.768009768009768e-05,
+ "loss": 0.3601,
+ "step": 1188
+ },
+ {
+ "epoch": 4.355311355311355,
+ "grad_norm": 38.21080780029297,
+ "learning_rate": 3.765567765567766e-05,
+ "loss": 0.3769,
+ "step": 1189
+ },
+ {
+ "epoch": 4.358974358974359,
+ "grad_norm": 48.90728759765625,
+ "learning_rate": 3.7631257631257635e-05,
+ "loss": 0.4259,
+ "step": 1190
+ },
+ {
+ "epoch": 4.362637362637362,
+ "grad_norm": 7.331233024597168,
+ "learning_rate": 3.760683760683761e-05,
+ "loss": 0.0697,
+ "step": 1191
+ },
+ {
+ "epoch": 4.366300366300366,
+ "grad_norm": 25.096189498901367,
+ "learning_rate": 3.7582417582417586e-05,
+ "loss": 0.2196,
+ "step": 1192
+ },
+ {
+ "epoch": 4.36996336996337,
+ "grad_norm": 53.813209533691406,
+ "learning_rate": 3.7557997557997564e-05,
+ "loss": 0.3785,
+ "step": 1193
+ },
+ {
+ "epoch": 4.373626373626374,
+ "grad_norm": 13.184123039245605,
+ "learning_rate": 3.753357753357753e-05,
+ "loss": 0.1747,
+ "step": 1194
+ },
+ {
+ "epoch": 4.377289377289378,
+ "grad_norm": 1.818351149559021,
+ "learning_rate": 3.750915750915751e-05,
+ "loss": 0.0158,
+ "step": 1195
+ },
+ {
+ "epoch": 4.380952380952381,
+ "grad_norm": 63.21619415283203,
+ "learning_rate": 3.7484737484737486e-05,
+ "loss": 0.2863,
+ "step": 1196
+ },
+ {
+ "epoch": 4.384615384615385,
+ "grad_norm": 32.59927749633789,
+ "learning_rate": 3.746031746031746e-05,
+ "loss": 0.4261,
+ "step": 1197
+ },
+ {
+ "epoch": 4.388278388278389,
+ "grad_norm": 36.5265998840332,
+ "learning_rate": 3.7435897435897436e-05,
+ "loss": 0.8064,
+ "step": 1198
+ },
+ {
+ "epoch": 4.391941391941392,
+ "grad_norm": 47.726905822753906,
+ "learning_rate": 3.7411477411477414e-05,
+ "loss": 0.8884,
+ "step": 1199
+ },
+ {
+ "epoch": 4.395604395604396,
+ "grad_norm": 12.621973037719727,
+ "learning_rate": 3.7387057387057386e-05,
+ "loss": 0.1085,
+ "step": 1200
+ },
+ {
+ "epoch": 4.3992673992674,
+ "grad_norm": 24.7711124420166,
+ "learning_rate": 3.7362637362637365e-05,
+ "loss": 0.2249,
+ "step": 1201
+ },
+ {
+ "epoch": 4.402930402930403,
+ "grad_norm": 39.19346618652344,
+ "learning_rate": 3.733821733821734e-05,
+ "loss": 0.4065,
+ "step": 1202
+ },
+ {
+ "epoch": 4.406593406593407,
+ "grad_norm": 20.3857421875,
+ "learning_rate": 3.7313797313797315e-05,
+ "loss": 0.1653,
+ "step": 1203
+ },
+ {
+ "epoch": 4.410256410256411,
+ "grad_norm": 58.15717697143555,
+ "learning_rate": 3.728937728937729e-05,
+ "loss": 0.8774,
+ "step": 1204
+ },
+ {
+ "epoch": 4.413919413919414,
+ "grad_norm": 28.05725860595703,
+ "learning_rate": 3.726495726495727e-05,
+ "loss": 0.1695,
+ "step": 1205
+ },
+ {
+ "epoch": 4.417582417582418,
+ "grad_norm": 24.635583877563477,
+ "learning_rate": 3.724053724053724e-05,
+ "loss": 0.4871,
+ "step": 1206
+ },
+ {
+ "epoch": 4.4212454212454215,
+ "grad_norm": 16.8306941986084,
+ "learning_rate": 3.7216117216117215e-05,
+ "loss": 0.0863,
+ "step": 1207
+ },
+ {
+ "epoch": 4.424908424908425,
+ "grad_norm": 16.2359676361084,
+ "learning_rate": 3.719169719169719e-05,
+ "loss": 0.077,
+ "step": 1208
+ },
+ {
+ "epoch": 4.428571428571429,
+ "grad_norm": 31.431425094604492,
+ "learning_rate": 3.7167277167277165e-05,
+ "loss": 0.2815,
+ "step": 1209
+ },
+ {
+ "epoch": 4.4322344322344325,
+ "grad_norm": 31.44464874267578,
+ "learning_rate": 3.7142857142857143e-05,
+ "loss": 0.2237,
+ "step": 1210
+ },
+ {
+ "epoch": 4.435897435897436,
+ "grad_norm": 23.390378952026367,
+ "learning_rate": 3.7118437118437115e-05,
+ "loss": 0.1791,
+ "step": 1211
+ },
+ {
+ "epoch": 4.43956043956044,
+ "grad_norm": 48.210079193115234,
+ "learning_rate": 3.7094017094017094e-05,
+ "loss": 0.517,
+ "step": 1212
+ },
+ {
+ "epoch": 4.443223443223443,
+ "grad_norm": 45.35732650756836,
+ "learning_rate": 3.706959706959707e-05,
+ "loss": 0.4638,
+ "step": 1213
+ },
+ {
+ "epoch": 4.446886446886447,
+ "grad_norm": 16.88719367980957,
+ "learning_rate": 3.7045177045177044e-05,
+ "loss": 0.1203,
+ "step": 1214
+ },
+ {
+ "epoch": 4.450549450549451,
+ "grad_norm": 58.36906433105469,
+ "learning_rate": 3.702075702075702e-05,
+ "loss": 0.7366,
+ "step": 1215
+ },
+ {
+ "epoch": 4.454212454212454,
+ "grad_norm": 49.00838088989258,
+ "learning_rate": 3.6996336996337e-05,
+ "loss": 0.739,
+ "step": 1216
+ },
+ {
+ "epoch": 4.457875457875458,
+ "grad_norm": 42.87287521362305,
+ "learning_rate": 3.697191697191697e-05,
+ "loss": 1.3861,
+ "step": 1217
+ },
+ {
+ "epoch": 4.461538461538462,
+ "grad_norm": 44.62813949584961,
+ "learning_rate": 3.694749694749695e-05,
+ "loss": 0.549,
+ "step": 1218
+ },
+ {
+ "epoch": 4.465201465201465,
+ "grad_norm": 6.473313331604004,
+ "learning_rate": 3.692307692307693e-05,
+ "loss": 0.0407,
+ "step": 1219
+ },
+ {
+ "epoch": 4.468864468864469,
+ "grad_norm": 35.04784393310547,
+ "learning_rate": 3.6898656898656894e-05,
+ "loss": 0.3146,
+ "step": 1220
+ },
+ {
+ "epoch": 4.472527472527473,
+ "grad_norm": 44.79425811767578,
+ "learning_rate": 3.687423687423687e-05,
+ "loss": 0.5206,
+ "step": 1221
+ },
+ {
+ "epoch": 4.476190476190476,
+ "grad_norm": 36.52440643310547,
+ "learning_rate": 3.684981684981685e-05,
+ "loss": 0.5977,
+ "step": 1222
+ },
+ {
+ "epoch": 4.47985347985348,
+ "grad_norm": 58.15000915527344,
+ "learning_rate": 3.682539682539682e-05,
+ "loss": 1.0533,
+ "step": 1223
+ },
+ {
+ "epoch": 4.483516483516484,
+ "grad_norm": 32.33371353149414,
+ "learning_rate": 3.68009768009768e-05,
+ "loss": 0.3928,
+ "step": 1224
+ },
+ {
+ "epoch": 4.487179487179487,
+ "grad_norm": 44.501529693603516,
+ "learning_rate": 3.677655677655678e-05,
+ "loss": 0.8471,
+ "step": 1225
+ },
+ {
+ "epoch": 4.490842490842491,
+ "grad_norm": 41.62052536010742,
+ "learning_rate": 3.675213675213675e-05,
+ "loss": 0.7731,
+ "step": 1226
+ },
+ {
+ "epoch": 4.4945054945054945,
+ "grad_norm": 12.638876914978027,
+ "learning_rate": 3.672771672771673e-05,
+ "loss": 0.1219,
+ "step": 1227
+ },
+ {
+ "epoch": 4.498168498168498,
+ "grad_norm": 12.034523010253906,
+ "learning_rate": 3.670329670329671e-05,
+ "loss": 0.158,
+ "step": 1228
+ },
+ {
+ "epoch": 4.501831501831502,
+ "grad_norm": 42.04001235961914,
+ "learning_rate": 3.667887667887668e-05,
+ "loss": 0.8556,
+ "step": 1229
+ },
+ {
+ "epoch": 4.5054945054945055,
+ "grad_norm": 36.28947448730469,
+ "learning_rate": 3.665445665445666e-05,
+ "loss": 0.6569,
+ "step": 1230
+ },
+ {
+ "epoch": 4.509157509157509,
+ "grad_norm": 40.263912200927734,
+ "learning_rate": 3.663003663003664e-05,
+ "loss": 0.7625,
+ "step": 1231
+ },
+ {
+ "epoch": 4.512820512820513,
+ "grad_norm": 23.760005950927734,
+ "learning_rate": 3.660561660561661e-05,
+ "loss": 0.2465,
+ "step": 1232
+ },
+ {
+ "epoch": 4.516483516483516,
+ "grad_norm": 23.589109420776367,
+ "learning_rate": 3.658119658119658e-05,
+ "loss": 0.4408,
+ "step": 1233
+ },
+ {
+ "epoch": 4.52014652014652,
+ "grad_norm": 30.512271881103516,
+ "learning_rate": 3.655677655677655e-05,
+ "loss": 0.8748,
+ "step": 1234
+ },
+ {
+ "epoch": 4.523809523809524,
+ "grad_norm": 8.060181617736816,
+ "learning_rate": 3.653235653235653e-05,
+ "loss": 0.0818,
+ "step": 1235
+ },
+ {
+ "epoch": 4.527472527472527,
+ "grad_norm": 14.353645324707031,
+ "learning_rate": 3.650793650793651e-05,
+ "loss": 0.1899,
+ "step": 1236
+ },
+ {
+ "epoch": 4.531135531135531,
+ "grad_norm": 12.20384693145752,
+ "learning_rate": 3.648351648351648e-05,
+ "loss": 0.1618,
+ "step": 1237
+ },
+ {
+ "epoch": 4.534798534798535,
+ "grad_norm": 182.4698028564453,
+ "learning_rate": 3.645909645909646e-05,
+ "loss": 0.9223,
+ "step": 1238
+ },
+ {
+ "epoch": 4.538461538461538,
+ "grad_norm": 33.137081146240234,
+ "learning_rate": 3.643467643467644e-05,
+ "loss": 0.7708,
+ "step": 1239
+ },
+ {
+ "epoch": 4.542124542124542,
+ "grad_norm": 19.895912170410156,
+ "learning_rate": 3.641025641025641e-05,
+ "loss": 0.164,
+ "step": 1240
+ },
+ {
+ "epoch": 4.545787545787546,
+ "grad_norm": 62.816864013671875,
+ "learning_rate": 3.638583638583639e-05,
+ "loss": 1.4675,
+ "step": 1241
+ },
+ {
+ "epoch": 4.549450549450549,
+ "grad_norm": 35.58034896850586,
+ "learning_rate": 3.6361416361416366e-05,
+ "loss": 0.4449,
+ "step": 1242
+ },
+ {
+ "epoch": 4.553113553113553,
+ "grad_norm": 21.993911743164062,
+ "learning_rate": 3.633699633699634e-05,
+ "loss": 0.2302,
+ "step": 1243
+ },
+ {
+ "epoch": 4.556776556776557,
+ "grad_norm": 33.743812561035156,
+ "learning_rate": 3.6312576312576316e-05,
+ "loss": 0.1782,
+ "step": 1244
+ },
+ {
+ "epoch": 4.56043956043956,
+ "grad_norm": 40.135711669921875,
+ "learning_rate": 3.6288156288156294e-05,
+ "loss": 0.7147,
+ "step": 1245
+ },
+ {
+ "epoch": 4.564102564102564,
+ "grad_norm": 2.47517728805542,
+ "learning_rate": 3.626373626373626e-05,
+ "loss": 0.0188,
+ "step": 1246
+ },
+ {
+ "epoch": 4.5677655677655675,
+ "grad_norm": 22.023807525634766,
+ "learning_rate": 3.623931623931624e-05,
+ "loss": 0.3182,
+ "step": 1247
+ },
+ {
+ "epoch": 4.571428571428571,
+ "grad_norm": 21.8381290435791,
+ "learning_rate": 3.6214896214896216e-05,
+ "loss": 0.4161,
+ "step": 1248
+ },
+ {
+ "epoch": 4.575091575091575,
+ "grad_norm": 20.989906311035156,
+ "learning_rate": 3.619047619047619e-05,
+ "loss": 0.2972,
+ "step": 1249
+ },
+ {
+ "epoch": 4.5787545787545785,
+ "grad_norm": 75.8060073852539,
+ "learning_rate": 3.6166056166056166e-05,
+ "loss": 0.6194,
+ "step": 1250
+ },
+ {
+ "epoch": 4.582417582417582,
+ "grad_norm": 40.85308074951172,
+ "learning_rate": 3.6141636141636145e-05,
+ "loss": 0.7707,
+ "step": 1251
+ },
+ {
+ "epoch": 4.586080586080586,
+ "grad_norm": 62.22278594970703,
+ "learning_rate": 3.6117216117216117e-05,
+ "loss": 0.6872,
+ "step": 1252
+ },
+ {
+ "epoch": 4.589743589743589,
+ "grad_norm": 30.27143669128418,
+ "learning_rate": 3.6092796092796095e-05,
+ "loss": 0.484,
+ "step": 1253
+ },
+ {
+ "epoch": 4.593406593406593,
+ "grad_norm": 44.08026123046875,
+ "learning_rate": 3.6068376068376073e-05,
+ "loss": 0.8593,
+ "step": 1254
+ },
+ {
+ "epoch": 4.597069597069597,
+ "grad_norm": 22.63222312927246,
+ "learning_rate": 3.6043956043956045e-05,
+ "loss": 0.2542,
+ "step": 1255
+ },
+ {
+ "epoch": 4.6007326007326,
+ "grad_norm": 7.024168014526367,
+ "learning_rate": 3.6019536019536024e-05,
+ "loss": 0.0777,
+ "step": 1256
+ },
+ {
+ "epoch": 4.604395604395604,
+ "grad_norm": 24.981502532958984,
+ "learning_rate": 3.5995115995116e-05,
+ "loss": 0.2332,
+ "step": 1257
+ },
+ {
+ "epoch": 4.608058608058608,
+ "grad_norm": 28.929807662963867,
+ "learning_rate": 3.5970695970695974e-05,
+ "loss": 0.3665,
+ "step": 1258
+ },
+ {
+ "epoch": 4.611721611721611,
+ "grad_norm": 36.756683349609375,
+ "learning_rate": 3.5946275946275945e-05,
+ "loss": 1.2777,
+ "step": 1259
+ },
+ {
+ "epoch": 4.615384615384615,
+ "grad_norm": 53.04755783081055,
+ "learning_rate": 3.592185592185592e-05,
+ "loss": 0.3001,
+ "step": 1260
+ },
+ {
+ "epoch": 4.619047619047619,
+ "grad_norm": 39.71099853515625,
+ "learning_rate": 3.5897435897435896e-05,
+ "loss": 0.7756,
+ "step": 1261
+ },
+ {
+ "epoch": 4.622710622710622,
+ "grad_norm": 21.80796241760254,
+ "learning_rate": 3.5873015873015874e-05,
+ "loss": 0.2329,
+ "step": 1262
+ },
+ {
+ "epoch": 4.626373626373626,
+ "grad_norm": 25.909208297729492,
+ "learning_rate": 3.5848595848595846e-05,
+ "loss": 0.5081,
+ "step": 1263
+ },
+ {
+ "epoch": 4.63003663003663,
+ "grad_norm": 46.62733840942383,
+ "learning_rate": 3.5824175824175824e-05,
+ "loss": 0.8265,
+ "step": 1264
+ },
+ {
+ "epoch": 4.633699633699633,
+ "grad_norm": 5.689383506774902,
+ "learning_rate": 3.57997557997558e-05,
+ "loss": 0.055,
+ "step": 1265
+ },
+ {
+ "epoch": 4.637362637362637,
+ "grad_norm": 23.30045509338379,
+ "learning_rate": 3.5775335775335774e-05,
+ "loss": 0.3397,
+ "step": 1266
+ },
+ {
+ "epoch": 4.641025641025641,
+ "grad_norm": 15.685534477233887,
+ "learning_rate": 3.575091575091575e-05,
+ "loss": 0.0862,
+ "step": 1267
+ },
+ {
+ "epoch": 4.644688644688645,
+ "grad_norm": 27.56009864807129,
+ "learning_rate": 3.572649572649573e-05,
+ "loss": 0.4751,
+ "step": 1268
+ },
+ {
+ "epoch": 4.648351648351649,
+ "grad_norm": 18.164905548095703,
+ "learning_rate": 3.57020757020757e-05,
+ "loss": 0.1274,
+ "step": 1269
+ },
+ {
+ "epoch": 4.652014652014652,
+ "grad_norm": 18.178728103637695,
+ "learning_rate": 3.567765567765568e-05,
+ "loss": 0.1246,
+ "step": 1270
+ },
+ {
+ "epoch": 4.655677655677656,
+ "grad_norm": 11.308391571044922,
+ "learning_rate": 3.565323565323565e-05,
+ "loss": 0.0937,
+ "step": 1271
+ },
+ {
+ "epoch": 4.65934065934066,
+ "grad_norm": 38.507469177246094,
+ "learning_rate": 3.5628815628815625e-05,
+ "loss": 0.4616,
+ "step": 1272
+ },
+ {
+ "epoch": 4.663003663003663,
+ "grad_norm": 9.642159461975098,
+ "learning_rate": 3.56043956043956e-05,
+ "loss": 0.0772,
+ "step": 1273
+ },
+ {
+ "epoch": 4.666666666666667,
+ "grad_norm": 31.854310989379883,
+ "learning_rate": 3.557997557997558e-05,
+ "loss": 0.2349,
+ "step": 1274
+ },
+ {
+ "epoch": 4.670329670329671,
+ "grad_norm": 53.341617584228516,
+ "learning_rate": 3.555555555555555e-05,
+ "loss": 0.2926,
+ "step": 1275
+ },
+ {
+ "epoch": 4.673992673992674,
+ "grad_norm": 24.003368377685547,
+ "learning_rate": 3.553113553113553e-05,
+ "loss": 0.1689,
+ "step": 1276
+ },
+ {
+ "epoch": 4.677655677655678,
+ "grad_norm": 12.198409080505371,
+ "learning_rate": 3.550671550671551e-05,
+ "loss": 0.1001,
+ "step": 1277
+ },
+ {
+ "epoch": 4.681318681318682,
+ "grad_norm": 56.559051513671875,
+ "learning_rate": 3.548229548229548e-05,
+ "loss": 0.5314,
+ "step": 1278
+ },
+ {
+ "epoch": 4.684981684981685,
+ "grad_norm": 17.89840316772461,
+ "learning_rate": 3.545787545787546e-05,
+ "loss": 0.1258,
+ "step": 1279
+ },
+ {
+ "epoch": 4.688644688644689,
+ "grad_norm": 14.37424087524414,
+ "learning_rate": 3.543345543345544e-05,
+ "loss": 0.0925,
+ "step": 1280
+ },
+ {
+ "epoch": 4.6923076923076925,
+ "grad_norm": 21.21650505065918,
+ "learning_rate": 3.540903540903541e-05,
+ "loss": 0.1541,
+ "step": 1281
+ },
+ {
+ "epoch": 4.695970695970696,
+ "grad_norm": 36.1934814453125,
+ "learning_rate": 3.538461538461539e-05,
+ "loss": 0.403,
+ "step": 1282
+ },
+ {
+ "epoch": 4.6996336996337,
+ "grad_norm": 62.917022705078125,
+ "learning_rate": 3.536019536019537e-05,
+ "loss": 1.2771,
+ "step": 1283
+ },
+ {
+ "epoch": 4.7032967032967035,
+ "grad_norm": 30.238500595092773,
+ "learning_rate": 3.533577533577533e-05,
+ "loss": 0.3149,
+ "step": 1284
+ },
+ {
+ "epoch": 4.706959706959707,
+ "grad_norm": 12.155022621154785,
+ "learning_rate": 3.531135531135531e-05,
+ "loss": 0.0543,
+ "step": 1285
+ },
+ {
+ "epoch": 4.710622710622711,
+ "grad_norm": 39.67718505859375,
+ "learning_rate": 3.528693528693528e-05,
+ "loss": 0.4201,
+ "step": 1286
+ },
+ {
+ "epoch": 4.714285714285714,
+ "grad_norm": 46.620235443115234,
+ "learning_rate": 3.526251526251526e-05,
+ "loss": 0.7735,
+ "step": 1287
+ },
+ {
+ "epoch": 4.717948717948718,
+ "grad_norm": 29.740169525146484,
+ "learning_rate": 3.523809523809524e-05,
+ "loss": 0.4753,
+ "step": 1288
+ },
+ {
+ "epoch": 4.721611721611722,
+ "grad_norm": 17.668439865112305,
+ "learning_rate": 3.521367521367521e-05,
+ "loss": 0.0738,
+ "step": 1289
+ },
+ {
+ "epoch": 4.725274725274725,
+ "grad_norm": 29.107847213745117,
+ "learning_rate": 3.518925518925519e-05,
+ "loss": 0.2967,
+ "step": 1290
+ },
+ {
+ "epoch": 4.728937728937729,
+ "grad_norm": 41.70953369140625,
+ "learning_rate": 3.516483516483517e-05,
+ "loss": 0.2407,
+ "step": 1291
+ },
+ {
+ "epoch": 4.732600732600733,
+ "grad_norm": 41.50172805786133,
+ "learning_rate": 3.514041514041514e-05,
+ "loss": 0.5012,
+ "step": 1292
+ },
+ {
+ "epoch": 4.736263736263736,
+ "grad_norm": 10.921927452087402,
+ "learning_rate": 3.511599511599512e-05,
+ "loss": 0.0583,
+ "step": 1293
+ },
+ {
+ "epoch": 4.73992673992674,
+ "grad_norm": 10.986832618713379,
+ "learning_rate": 3.5091575091575096e-05,
+ "loss": 0.1684,
+ "step": 1294
+ },
+ {
+ "epoch": 4.743589743589744,
+ "grad_norm": 77.36996459960938,
+ "learning_rate": 3.506715506715507e-05,
+ "loss": 0.1532,
+ "step": 1295
+ },
+ {
+ "epoch": 4.747252747252747,
+ "grad_norm": 2.912205457687378,
+ "learning_rate": 3.5042735042735046e-05,
+ "loss": 0.0178,
+ "step": 1296
+ },
+ {
+ "epoch": 4.750915750915751,
+ "grad_norm": 7.694264888763428,
+ "learning_rate": 3.501831501831502e-05,
+ "loss": 0.0448,
+ "step": 1297
+ },
+ {
+ "epoch": 4.754578754578755,
+ "grad_norm": 59.40597152709961,
+ "learning_rate": 3.499389499389499e-05,
+ "loss": 0.825,
+ "step": 1298
+ },
+ {
+ "epoch": 4.758241758241758,
+ "grad_norm": 44.394065856933594,
+ "learning_rate": 3.496947496947497e-05,
+ "loss": 0.2582,
+ "step": 1299
+ },
+ {
+ "epoch": 4.761904761904762,
+ "grad_norm": 48.07161331176758,
+ "learning_rate": 3.494505494505495e-05,
+ "loss": 0.5681,
+ "step": 1300
+ },
+ {
+ "epoch": 4.7655677655677655,
+ "grad_norm": 47.763275146484375,
+ "learning_rate": 3.492063492063492e-05,
+ "loss": 0.2289,
+ "step": 1301
+ },
+ {
+ "epoch": 4.769230769230769,
+ "grad_norm": 33.30193328857422,
+ "learning_rate": 3.48962148962149e-05,
+ "loss": 0.2646,
+ "step": 1302
+ },
+ {
+ "epoch": 4.772893772893773,
+ "grad_norm": 62.87331008911133,
+ "learning_rate": 3.4871794871794875e-05,
+ "loss": 0.5135,
+ "step": 1303
+ },
+ {
+ "epoch": 4.7765567765567765,
+ "grad_norm": 57.62127685546875,
+ "learning_rate": 3.484737484737485e-05,
+ "loss": 0.6126,
+ "step": 1304
+ },
+ {
+ "epoch": 4.78021978021978,
+ "grad_norm": 35.42237854003906,
+ "learning_rate": 3.4822954822954825e-05,
+ "loss": 0.2312,
+ "step": 1305
+ },
+ {
+ "epoch": 4.783882783882784,
+ "grad_norm": 38.23964309692383,
+ "learning_rate": 3.4798534798534804e-05,
+ "loss": 0.4366,
+ "step": 1306
+ },
+ {
+ "epoch": 4.787545787545787,
+ "grad_norm": 24.94087028503418,
+ "learning_rate": 3.4774114774114776e-05,
+ "loss": 0.2944,
+ "step": 1307
+ },
+ {
+ "epoch": 4.791208791208791,
+ "grad_norm": 43.400047302246094,
+ "learning_rate": 3.4749694749694754e-05,
+ "loss": 0.4749,
+ "step": 1308
+ },
+ {
+ "epoch": 4.794871794871795,
+ "grad_norm": 82.01946258544922,
+ "learning_rate": 3.472527472527473e-05,
+ "loss": 0.6972,
+ "step": 1309
+ },
+ {
+ "epoch": 4.798534798534798,
+ "grad_norm": 25.38723373413086,
+ "learning_rate": 3.47008547008547e-05,
+ "loss": 0.3361,
+ "step": 1310
+ },
+ {
+ "epoch": 4.802197802197802,
+ "grad_norm": 13.022088050842285,
+ "learning_rate": 3.4676434676434676e-05,
+ "loss": 0.1853,
+ "step": 1311
+ },
+ {
+ "epoch": 4.805860805860806,
+ "grad_norm": 30.806135177612305,
+ "learning_rate": 3.465201465201465e-05,
+ "loss": 0.3196,
+ "step": 1312
+ },
+ {
+ "epoch": 4.809523809523809,
+ "grad_norm": 26.30035972595215,
+ "learning_rate": 3.4627594627594626e-05,
+ "loss": 0.2708,
+ "step": 1313
+ },
+ {
+ "epoch": 4.813186813186813,
+ "grad_norm": 6.557223796844482,
+ "learning_rate": 3.4603174603174604e-05,
+ "loss": 0.0815,
+ "step": 1314
+ },
+ {
+ "epoch": 4.816849816849817,
+ "grad_norm": 33.60557174682617,
+ "learning_rate": 3.4578754578754576e-05,
+ "loss": 0.9938,
+ "step": 1315
+ },
+ {
+ "epoch": 4.82051282051282,
+ "grad_norm": 104.2552719116211,
+ "learning_rate": 3.4554334554334555e-05,
+ "loss": 0.1937,
+ "step": 1316
+ },
+ {
+ "epoch": 4.824175824175824,
+ "grad_norm": 41.3105583190918,
+ "learning_rate": 3.452991452991453e-05,
+ "loss": 0.3856,
+ "step": 1317
+ },
+ {
+ "epoch": 4.827838827838828,
+ "grad_norm": 43.52134323120117,
+ "learning_rate": 3.4505494505494505e-05,
+ "loss": 0.4823,
+ "step": 1318
+ },
+ {
+ "epoch": 4.831501831501831,
+ "grad_norm": 29.37596893310547,
+ "learning_rate": 3.448107448107448e-05,
+ "loss": 0.1746,
+ "step": 1319
+ },
+ {
+ "epoch": 4.835164835164835,
+ "grad_norm": 13.94152545928955,
+ "learning_rate": 3.445665445665446e-05,
+ "loss": 0.141,
+ "step": 1320
+ },
+ {
+ "epoch": 4.8388278388278385,
+ "grad_norm": 34.95270538330078,
+ "learning_rate": 3.443223443223443e-05,
+ "loss": 0.2701,
+ "step": 1321
+ },
+ {
+ "epoch": 4.842490842490842,
+ "grad_norm": 64.49109649658203,
+ "learning_rate": 3.440781440781441e-05,
+ "loss": 1.095,
+ "step": 1322
+ },
+ {
+ "epoch": 4.846153846153846,
+ "grad_norm": 61.1287727355957,
+ "learning_rate": 3.4383394383394383e-05,
+ "loss": 0.2083,
+ "step": 1323
+ },
+ {
+ "epoch": 4.8498168498168495,
+ "grad_norm": 62.69855499267578,
+ "learning_rate": 3.4358974358974355e-05,
+ "loss": 0.5077,
+ "step": 1324
+ },
+ {
+ "epoch": 4.853479853479853,
+ "grad_norm": 92.53154754638672,
+ "learning_rate": 3.4334554334554334e-05,
+ "loss": 0.7287,
+ "step": 1325
+ },
+ {
+ "epoch": 4.857142857142857,
+ "grad_norm": 98.1663589477539,
+ "learning_rate": 3.431013431013431e-05,
+ "loss": 1.2834,
+ "step": 1326
+ },
+ {
+ "epoch": 4.860805860805861,
+ "grad_norm": 52.24921417236328,
+ "learning_rate": 3.4285714285714284e-05,
+ "loss": 0.8187,
+ "step": 1327
+ },
+ {
+ "epoch": 4.864468864468865,
+ "grad_norm": 60.897544860839844,
+ "learning_rate": 3.426129426129426e-05,
+ "loss": 1.5861,
+ "step": 1328
+ },
+ {
+ "epoch": 4.868131868131869,
+ "grad_norm": 21.70830535888672,
+ "learning_rate": 3.423687423687424e-05,
+ "loss": 0.1459,
+ "step": 1329
+ },
+ {
+ "epoch": 4.871794871794872,
+ "grad_norm": 47.87598419189453,
+ "learning_rate": 3.421245421245421e-05,
+ "loss": 1.0044,
+ "step": 1330
+ },
+ {
+ "epoch": 4.875457875457876,
+ "grad_norm": 172.73670959472656,
+ "learning_rate": 3.418803418803419e-05,
+ "loss": 1.4617,
+ "step": 1331
+ },
+ {
+ "epoch": 4.8791208791208796,
+ "grad_norm": 154.93960571289062,
+ "learning_rate": 3.416361416361417e-05,
+ "loss": 1.7488,
+ "step": 1332
+ },
+ {
+ "epoch": 4.882783882783883,
+ "grad_norm": 73.78408813476562,
+ "learning_rate": 3.413919413919414e-05,
+ "loss": 0.5789,
+ "step": 1333
+ },
+ {
+ "epoch": 4.886446886446887,
+ "grad_norm": 35.67369079589844,
+ "learning_rate": 3.411477411477412e-05,
+ "loss": 0.6101,
+ "step": 1334
+ },
+ {
+ "epoch": 4.8901098901098905,
+ "grad_norm": 54.61326599121094,
+ "learning_rate": 3.40903540903541e-05,
+ "loss": 0.7433,
+ "step": 1335
+ },
+ {
+ "epoch": 4.893772893772894,
+ "grad_norm": 28.492923736572266,
+ "learning_rate": 3.406593406593406e-05,
+ "loss": 0.7661,
+ "step": 1336
+ },
+ {
+ "epoch": 4.897435897435898,
+ "grad_norm": 17.2525634765625,
+ "learning_rate": 3.404151404151404e-05,
+ "loss": 0.2423,
+ "step": 1337
+ },
+ {
+ "epoch": 4.9010989010989015,
+ "grad_norm": 55.46605682373047,
+ "learning_rate": 3.401709401709401e-05,
+ "loss": 0.4419,
+ "step": 1338
+ },
+ {
+ "epoch": 4.904761904761905,
+ "grad_norm": 23.03455352783203,
+ "learning_rate": 3.399267399267399e-05,
+ "loss": 0.3046,
+ "step": 1339
+ },
+ {
+ "epoch": 4.908424908424909,
+ "grad_norm": 20.186574935913086,
+ "learning_rate": 3.396825396825397e-05,
+ "loss": 0.3712,
+ "step": 1340
+ },
+ {
+ "epoch": 4.912087912087912,
+ "grad_norm": 22.702407836914062,
+ "learning_rate": 3.394383394383394e-05,
+ "loss": 0.4481,
+ "step": 1341
+ },
+ {
+ "epoch": 4.915750915750916,
+ "grad_norm": 25.723426818847656,
+ "learning_rate": 3.391941391941392e-05,
+ "loss": 0.1832,
+ "step": 1342
+ },
+ {
+ "epoch": 4.91941391941392,
+ "grad_norm": 18.955692291259766,
+ "learning_rate": 3.38949938949939e-05,
+ "loss": 0.1334,
+ "step": 1343
+ },
+ {
+ "epoch": 4.923076923076923,
+ "grad_norm": 20.29511833190918,
+ "learning_rate": 3.387057387057387e-05,
+ "loss": 0.1811,
+ "step": 1344
+ },
+ {
+ "epoch": 4.926739926739927,
+ "grad_norm": 22.23061752319336,
+ "learning_rate": 3.384615384615385e-05,
+ "loss": 0.2643,
+ "step": 1345
+ },
+ {
+ "epoch": 4.930402930402931,
+ "grad_norm": 52.057132720947266,
+ "learning_rate": 3.382173382173383e-05,
+ "loss": 0.5874,
+ "step": 1346
+ },
+ {
+ "epoch": 4.934065934065934,
+ "grad_norm": 66.5381851196289,
+ "learning_rate": 3.37973137973138e-05,
+ "loss": 0.4993,
+ "step": 1347
+ },
+ {
+ "epoch": 4.937728937728938,
+ "grad_norm": 8.25474739074707,
+ "learning_rate": 3.377289377289378e-05,
+ "loss": 0.0263,
+ "step": 1348
+ },
+ {
+ "epoch": 4.941391941391942,
+ "grad_norm": 31.373722076416016,
+ "learning_rate": 3.374847374847375e-05,
+ "loss": 0.288,
+ "step": 1349
+ },
+ {
+ "epoch": 4.945054945054945,
+ "grad_norm": 51.15471267700195,
+ "learning_rate": 3.372405372405372e-05,
+ "loss": 0.7586,
+ "step": 1350
+ },
+ {
+ "epoch": 4.948717948717949,
+ "grad_norm": 39.163639068603516,
+ "learning_rate": 3.36996336996337e-05,
+ "loss": 1.221,
+ "step": 1351
+ },
+ {
+ "epoch": 4.9523809523809526,
+ "grad_norm": 11.033390998840332,
+ "learning_rate": 3.367521367521368e-05,
+ "loss": 0.069,
+ "step": 1352
+ },
+ {
+ "epoch": 4.956043956043956,
+ "grad_norm": 24.14516830444336,
+ "learning_rate": 3.365079365079365e-05,
+ "loss": 0.6001,
+ "step": 1353
+ },
+ {
+ "epoch": 4.95970695970696,
+ "grad_norm": 36.211891174316406,
+ "learning_rate": 3.362637362637363e-05,
+ "loss": 0.5598,
+ "step": 1354
+ },
+ {
+ "epoch": 4.9633699633699635,
+ "grad_norm": 23.723434448242188,
+ "learning_rate": 3.3601953601953606e-05,
+ "loss": 0.3133,
+ "step": 1355
+ },
+ {
+ "epoch": 4.967032967032967,
+ "grad_norm": 21.853551864624023,
+ "learning_rate": 3.357753357753358e-05,
+ "loss": 0.1974,
+ "step": 1356
+ },
+ {
+ "epoch": 4.970695970695971,
+ "grad_norm": 25.392358779907227,
+ "learning_rate": 3.3553113553113556e-05,
+ "loss": 0.5114,
+ "step": 1357
+ },
+ {
+ "epoch": 4.9743589743589745,
+ "grad_norm": 94.81107330322266,
+ "learning_rate": 3.3528693528693534e-05,
+ "loss": 0.4609,
+ "step": 1358
+ },
+ {
+ "epoch": 4.978021978021978,
+ "grad_norm": 24.487186431884766,
+ "learning_rate": 3.3504273504273506e-05,
+ "loss": 0.6613,
+ "step": 1359
+ },
+ {
+ "epoch": 4.981684981684982,
+ "grad_norm": 18.870473861694336,
+ "learning_rate": 3.3479853479853485e-05,
+ "loss": 0.1229,
+ "step": 1360
+ },
+ {
+ "epoch": 4.985347985347985,
+ "grad_norm": 17.630233764648438,
+ "learning_rate": 3.3455433455433456e-05,
+ "loss": 0.1836,
+ "step": 1361
+ },
+ {
+ "epoch": 4.989010989010989,
+ "grad_norm": 24.850299835205078,
+ "learning_rate": 3.343101343101343e-05,
+ "loss": 0.4499,
+ "step": 1362
+ },
+ {
+ "epoch": 4.992673992673993,
+ "grad_norm": 13.472710609436035,
+ "learning_rate": 3.3406593406593406e-05,
+ "loss": 0.2,
+ "step": 1363
+ },
+ {
+ "epoch": 4.996336996336996,
+ "grad_norm": 25.112987518310547,
+ "learning_rate": 3.338217338217338e-05,
+ "loss": 0.2978,
+ "step": 1364
+ },
+ {
+ "epoch": 5.0,
+ "grad_norm": 20.6419620513916,
+ "learning_rate": 3.3357753357753356e-05,
+ "loss": 0.1711,
+ "step": 1365
+ },
+ {
+ "epoch": 5.003663003663004,
+ "grad_norm": 20.868810653686523,
+ "learning_rate": 3.3333333333333335e-05,
+ "loss": 0.1433,
+ "step": 1366
+ },
+ {
+ "epoch": 5.007326007326007,
+ "grad_norm": 15.846084594726562,
+ "learning_rate": 3.3308913308913307e-05,
+ "loss": 0.2174,
+ "step": 1367
+ },
+ {
+ "epoch": 5.010989010989011,
+ "grad_norm": 29.00075912475586,
+ "learning_rate": 3.3284493284493285e-05,
+ "loss": 0.5032,
+ "step": 1368
+ },
+ {
+ "epoch": 5.014652014652015,
+ "grad_norm": 33.520896911621094,
+ "learning_rate": 3.3260073260073264e-05,
+ "loss": 0.4061,
+ "step": 1369
+ },
+ {
+ "epoch": 5.018315018315018,
+ "grad_norm": 12.909339904785156,
+ "learning_rate": 3.3235653235653235e-05,
+ "loss": 0.0953,
+ "step": 1370
+ },
+ {
+ "epoch": 5.021978021978022,
+ "grad_norm": 0.2602078318595886,
+ "learning_rate": 3.3211233211233214e-05,
+ "loss": 0.0012,
+ "step": 1371
+ },
+ {
+ "epoch": 5.0256410256410255,
+ "grad_norm": 38.391422271728516,
+ "learning_rate": 3.318681318681319e-05,
+ "loss": 0.1825,
+ "step": 1372
+ },
+ {
+ "epoch": 5.029304029304029,
+ "grad_norm": 70.76541900634766,
+ "learning_rate": 3.3162393162393164e-05,
+ "loss": 0.846,
+ "step": 1373
+ },
+ {
+ "epoch": 5.032967032967033,
+ "grad_norm": 17.12116813659668,
+ "learning_rate": 3.3137973137973135e-05,
+ "loss": 0.0827,
+ "step": 1374
+ },
+ {
+ "epoch": 5.0366300366300365,
+ "grad_norm": 10.847224235534668,
+ "learning_rate": 3.3113553113553114e-05,
+ "loss": 0.0598,
+ "step": 1375
+ },
+ {
+ "epoch": 5.04029304029304,
+ "grad_norm": 31.552082061767578,
+ "learning_rate": 3.3089133089133086e-05,
+ "loss": 0.4466,
+ "step": 1376
+ },
+ {
+ "epoch": 5.043956043956044,
+ "grad_norm": 15.32805061340332,
+ "learning_rate": 3.3064713064713064e-05,
+ "loss": 0.0502,
+ "step": 1377
+ },
+ {
+ "epoch": 5.0476190476190474,
+ "grad_norm": 80.18537139892578,
+ "learning_rate": 3.304029304029304e-05,
+ "loss": 0.7377,
+ "step": 1378
+ },
+ {
+ "epoch": 5.051282051282051,
+ "grad_norm": 11.73173713684082,
+ "learning_rate": 3.3015873015873014e-05,
+ "loss": 0.1129,
+ "step": 1379
+ },
+ {
+ "epoch": 5.054945054945055,
+ "grad_norm": 46.249935150146484,
+ "learning_rate": 3.299145299145299e-05,
+ "loss": 0.5367,
+ "step": 1380
+ },
+ {
+ "epoch": 5.058608058608058,
+ "grad_norm": 9.185178756713867,
+ "learning_rate": 3.296703296703297e-05,
+ "loss": 0.0453,
+ "step": 1381
+ },
+ {
+ "epoch": 5.062271062271062,
+ "grad_norm": 40.83237838745117,
+ "learning_rate": 3.294261294261294e-05,
+ "loss": 0.1428,
+ "step": 1382
+ },
+ {
+ "epoch": 5.065934065934066,
+ "grad_norm": 32.31568908691406,
+ "learning_rate": 3.291819291819292e-05,
+ "loss": 0.3131,
+ "step": 1383
+ },
+ {
+ "epoch": 5.069597069597069,
+ "grad_norm": 5.372808456420898,
+ "learning_rate": 3.28937728937729e-05,
+ "loss": 0.0452,
+ "step": 1384
+ },
+ {
+ "epoch": 5.073260073260073,
+ "grad_norm": 3.0900495052337646,
+ "learning_rate": 3.286935286935287e-05,
+ "loss": 0.0175,
+ "step": 1385
+ },
+ {
+ "epoch": 5.076923076923077,
+ "grad_norm": 25.293724060058594,
+ "learning_rate": 3.284493284493285e-05,
+ "loss": 0.2162,
+ "step": 1386
+ },
+ {
+ "epoch": 5.08058608058608,
+ "grad_norm": 26.231664657592773,
+ "learning_rate": 3.282051282051282e-05,
+ "loss": 0.1764,
+ "step": 1387
+ },
+ {
+ "epoch": 5.084249084249084,
+ "grad_norm": 24.69008445739746,
+ "learning_rate": 3.279609279609279e-05,
+ "loss": 0.1019,
+ "step": 1388
+ },
+ {
+ "epoch": 5.087912087912088,
+ "grad_norm": 12.522343635559082,
+ "learning_rate": 3.277167277167277e-05,
+ "loss": 0.0424,
+ "step": 1389
+ },
+ {
+ "epoch": 5.091575091575091,
+ "grad_norm": 28.68439292907715,
+ "learning_rate": 3.274725274725274e-05,
+ "loss": 0.3441,
+ "step": 1390
+ },
+ {
+ "epoch": 5.095238095238095,
+ "grad_norm": 9.312751770019531,
+ "learning_rate": 3.272283272283272e-05,
+ "loss": 0.0675,
+ "step": 1391
+ },
+ {
+ "epoch": 5.0989010989010985,
+ "grad_norm": 12.041552543640137,
+ "learning_rate": 3.26984126984127e-05,
+ "loss": 0.049,
+ "step": 1392
+ },
+ {
+ "epoch": 5.102564102564102,
+ "grad_norm": 36.706031799316406,
+ "learning_rate": 3.267399267399267e-05,
+ "loss": 0.2947,
+ "step": 1393
+ },
+ {
+ "epoch": 5.106227106227106,
+ "grad_norm": 0.5009213089942932,
+ "learning_rate": 3.264957264957265e-05,
+ "loss": 0.0028,
+ "step": 1394
+ },
+ {
+ "epoch": 5.1098901098901095,
+ "grad_norm": 53.88454818725586,
+ "learning_rate": 3.262515262515263e-05,
+ "loss": 0.5004,
+ "step": 1395
+ },
+ {
+ "epoch": 5.113553113553113,
+ "grad_norm": 11.917198181152344,
+ "learning_rate": 3.26007326007326e-05,
+ "loss": 0.0734,
+ "step": 1396
+ },
+ {
+ "epoch": 5.117216117216117,
+ "grad_norm": 58.02888107299805,
+ "learning_rate": 3.257631257631258e-05,
+ "loss": 0.7099,
+ "step": 1397
+ },
+ {
+ "epoch": 5.1208791208791204,
+ "grad_norm": 18.3216609954834,
+ "learning_rate": 3.255189255189256e-05,
+ "loss": 0.1162,
+ "step": 1398
+ },
+ {
+ "epoch": 5.124542124542124,
+ "grad_norm": 7.598775863647461,
+ "learning_rate": 3.252747252747253e-05,
+ "loss": 0.0341,
+ "step": 1399
+ },
+ {
+ "epoch": 5.128205128205128,
+ "grad_norm": 199.40313720703125,
+ "learning_rate": 3.25030525030525e-05,
+ "loss": 0.3829,
+ "step": 1400
+ },
+ {
+ "epoch": 5.131868131868132,
+ "grad_norm": 6.528984546661377,
+ "learning_rate": 3.247863247863248e-05,
+ "loss": 0.041,
+ "step": 1401
+ },
+ {
+ "epoch": 5.135531135531136,
+ "grad_norm": 28.80277442932129,
+ "learning_rate": 3.245421245421245e-05,
+ "loss": 0.3511,
+ "step": 1402
+ },
+ {
+ "epoch": 5.13919413919414,
+ "grad_norm": 5.08656120300293,
+ "learning_rate": 3.242979242979243e-05,
+ "loss": 0.0403,
+ "step": 1403
+ },
+ {
+ "epoch": 5.142857142857143,
+ "grad_norm": 16.86358070373535,
+ "learning_rate": 3.240537240537241e-05,
+ "loss": 0.1676,
+ "step": 1404
+ },
+ {
+ "epoch": 5.146520146520147,
+ "grad_norm": 46.099613189697266,
+ "learning_rate": 3.238095238095238e-05,
+ "loss": 0.8096,
+ "step": 1405
+ },
+ {
+ "epoch": 5.1501831501831505,
+ "grad_norm": 26.01686668395996,
+ "learning_rate": 3.235653235653236e-05,
+ "loss": 0.1283,
+ "step": 1406
+ },
+ {
+ "epoch": 5.153846153846154,
+ "grad_norm": 4.826385498046875,
+ "learning_rate": 3.2332112332112336e-05,
+ "loss": 0.0328,
+ "step": 1407
+ },
+ {
+ "epoch": 5.157509157509158,
+ "grad_norm": 34.697593688964844,
+ "learning_rate": 3.230769230769231e-05,
+ "loss": 0.1306,
+ "step": 1408
+ },
+ {
+ "epoch": 5.1611721611721615,
+ "grad_norm": 21.331661224365234,
+ "learning_rate": 3.2283272283272286e-05,
+ "loss": 0.1302,
+ "step": 1409
+ },
+ {
+ "epoch": 5.164835164835165,
+ "grad_norm": 9.991851806640625,
+ "learning_rate": 3.2258852258852265e-05,
+ "loss": 0.0441,
+ "step": 1410
+ },
+ {
+ "epoch": 5.168498168498169,
+ "grad_norm": 26.641136169433594,
+ "learning_rate": 3.2234432234432237e-05,
+ "loss": 0.0894,
+ "step": 1411
+ },
+ {
+ "epoch": 5.172161172161172,
+ "grad_norm": 24.541366577148438,
+ "learning_rate": 3.2210012210012215e-05,
+ "loss": 0.1026,
+ "step": 1412
+ },
+ {
+ "epoch": 5.175824175824176,
+ "grad_norm": 44.62923049926758,
+ "learning_rate": 3.218559218559218e-05,
+ "loss": 0.1887,
+ "step": 1413
+ },
+ {
+ "epoch": 5.17948717948718,
+ "grad_norm": 19.28236198425293,
+ "learning_rate": 3.216117216117216e-05,
+ "loss": 0.0631,
+ "step": 1414
+ },
+ {
+ "epoch": 5.183150183150183,
+ "grad_norm": 10.39486026763916,
+ "learning_rate": 3.213675213675214e-05,
+ "loss": 0.0614,
+ "step": 1415
+ },
+ {
+ "epoch": 5.186813186813187,
+ "grad_norm": 32.476009368896484,
+ "learning_rate": 3.211233211233211e-05,
+ "loss": 0.2238,
+ "step": 1416
+ },
+ {
+ "epoch": 5.190476190476191,
+ "grad_norm": 9.828605651855469,
+ "learning_rate": 3.208791208791209e-05,
+ "loss": 0.0589,
+ "step": 1417
+ },
+ {
+ "epoch": 5.194139194139194,
+ "grad_norm": 50.0748291015625,
+ "learning_rate": 3.2063492063492065e-05,
+ "loss": 0.8225,
+ "step": 1418
+ },
+ {
+ "epoch": 5.197802197802198,
+ "grad_norm": 31.925779342651367,
+ "learning_rate": 3.203907203907204e-05,
+ "loss": 0.1824,
+ "step": 1419
+ },
+ {
+ "epoch": 5.201465201465202,
+ "grad_norm": 108.24534606933594,
+ "learning_rate": 3.2014652014652016e-05,
+ "loss": 2.3808,
+ "step": 1420
+ },
+ {
+ "epoch": 5.205128205128205,
+ "grad_norm": 54.39910888671875,
+ "learning_rate": 3.1990231990231994e-05,
+ "loss": 0.614,
+ "step": 1421
+ },
+ {
+ "epoch": 5.208791208791209,
+ "grad_norm": 13.70672607421875,
+ "learning_rate": 3.1965811965811966e-05,
+ "loss": 0.0366,
+ "step": 1422
+ },
+ {
+ "epoch": 5.212454212454213,
+ "grad_norm": 19.851043701171875,
+ "learning_rate": 3.1941391941391944e-05,
+ "loss": 0.1847,
+ "step": 1423
+ },
+ {
+ "epoch": 5.216117216117216,
+ "grad_norm": 1.041467547416687,
+ "learning_rate": 3.191697191697192e-05,
+ "loss": 0.0062,
+ "step": 1424
+ },
+ {
+ "epoch": 5.21978021978022,
+ "grad_norm": 10.629105567932129,
+ "learning_rate": 3.1892551892551894e-05,
+ "loss": 0.1058,
+ "step": 1425
+ },
+ {
+ "epoch": 5.2234432234432235,
+ "grad_norm": 25.597496032714844,
+ "learning_rate": 3.1868131868131866e-05,
+ "loss": 0.1786,
+ "step": 1426
+ },
+ {
+ "epoch": 5.227106227106227,
+ "grad_norm": 21.409902572631836,
+ "learning_rate": 3.1843711843711844e-05,
+ "loss": 0.1354,
+ "step": 1427
+ },
+ {
+ "epoch": 5.230769230769231,
+ "grad_norm": 252.64599609375,
+ "learning_rate": 3.1819291819291816e-05,
+ "loss": 0.476,
+ "step": 1428
+ },
+ {
+ "epoch": 5.2344322344322345,
+ "grad_norm": 22.15670394897461,
+ "learning_rate": 3.1794871794871795e-05,
+ "loss": 0.2111,
+ "step": 1429
+ },
+ {
+ "epoch": 5.238095238095238,
+ "grad_norm": 37.93739700317383,
+ "learning_rate": 3.177045177045177e-05,
+ "loss": 0.391,
+ "step": 1430
+ },
+ {
+ "epoch": 5.241758241758242,
+ "grad_norm": 25.364606857299805,
+ "learning_rate": 3.1746031746031745e-05,
+ "loss": 0.3365,
+ "step": 1431
+ },
+ {
+ "epoch": 5.245421245421245,
+ "grad_norm": 20.658681869506836,
+ "learning_rate": 3.172161172161172e-05,
+ "loss": 0.2419,
+ "step": 1432
+ },
+ {
+ "epoch": 5.249084249084249,
+ "grad_norm": 11.507100105285645,
+ "learning_rate": 3.16971916971917e-05,
+ "loss": 0.074,
+ "step": 1433
+ },
+ {
+ "epoch": 5.252747252747253,
+ "grad_norm": 32.7891845703125,
+ "learning_rate": 3.167277167277167e-05,
+ "loss": 0.261,
+ "step": 1434
+ },
+ {
+ "epoch": 5.256410256410256,
+ "grad_norm": 10.153932571411133,
+ "learning_rate": 3.164835164835165e-05,
+ "loss": 0.0317,
+ "step": 1435
+ },
+ {
+ "epoch": 5.26007326007326,
+ "grad_norm": 16.879608154296875,
+ "learning_rate": 3.162393162393163e-05,
+ "loss": 0.0668,
+ "step": 1436
+ },
+ {
+ "epoch": 5.263736263736264,
+ "grad_norm": 5.040280818939209,
+ "learning_rate": 3.15995115995116e-05,
+ "loss": 0.0197,
+ "step": 1437
+ },
+ {
+ "epoch": 5.267399267399267,
+ "grad_norm": 32.5413818359375,
+ "learning_rate": 3.157509157509158e-05,
+ "loss": 0.2659,
+ "step": 1438
+ },
+ {
+ "epoch": 5.271062271062271,
+ "grad_norm": 54.41200637817383,
+ "learning_rate": 3.1550671550671545e-05,
+ "loss": 0.6863,
+ "step": 1439
+ },
+ {
+ "epoch": 5.274725274725275,
+ "grad_norm": 13.049643516540527,
+ "learning_rate": 3.1526251526251524e-05,
+ "loss": 0.0808,
+ "step": 1440
+ },
+ {
+ "epoch": 5.278388278388278,
+ "grad_norm": 37.76680374145508,
+ "learning_rate": 3.15018315018315e-05,
+ "loss": 0.2917,
+ "step": 1441
+ },
+ {
+ "epoch": 5.282051282051282,
+ "grad_norm": 22.97549057006836,
+ "learning_rate": 3.1477411477411474e-05,
+ "loss": 0.1115,
+ "step": 1442
+ },
+ {
+ "epoch": 5.285714285714286,
+ "grad_norm": 36.935115814208984,
+ "learning_rate": 3.145299145299145e-05,
+ "loss": 0.3719,
+ "step": 1443
+ },
+ {
+ "epoch": 5.289377289377289,
+ "grad_norm": 50.726070404052734,
+ "learning_rate": 3.142857142857143e-05,
+ "loss": 0.2635,
+ "step": 1444
+ },
+ {
+ "epoch": 5.293040293040293,
+ "grad_norm": 31.919862747192383,
+ "learning_rate": 3.14041514041514e-05,
+ "loss": 0.2158,
+ "step": 1445
+ },
+ {
+ "epoch": 5.2967032967032965,
+ "grad_norm": 2.463076114654541,
+ "learning_rate": 3.137973137973138e-05,
+ "loss": 0.0125,
+ "step": 1446
+ },
+ {
+ "epoch": 5.3003663003663,
+ "grad_norm": 12.970477104187012,
+ "learning_rate": 3.135531135531136e-05,
+ "loss": 0.0701,
+ "step": 1447
+ },
+ {
+ "epoch": 5.304029304029304,
+ "grad_norm": 30.649160385131836,
+ "learning_rate": 3.133089133089133e-05,
+ "loss": 0.3443,
+ "step": 1448
+ },
+ {
+ "epoch": 5.3076923076923075,
+ "grad_norm": 50.362281799316406,
+ "learning_rate": 3.130647130647131e-05,
+ "loss": 0.2792,
+ "step": 1449
+ },
+ {
+ "epoch": 5.311355311355311,
+ "grad_norm": 25.041845321655273,
+ "learning_rate": 3.128205128205129e-05,
+ "loss": 0.2127,
+ "step": 1450
+ },
+ {
+ "epoch": 5.315018315018315,
+ "grad_norm": 44.749515533447266,
+ "learning_rate": 3.125763125763126e-05,
+ "loss": 0.5353,
+ "step": 1451
+ },
+ {
+ "epoch": 5.318681318681318,
+ "grad_norm": 66.30032348632812,
+ "learning_rate": 3.123321123321123e-05,
+ "loss": 0.5775,
+ "step": 1452
+ },
+ {
+ "epoch": 5.322344322344322,
+ "grad_norm": 3.905022382736206,
+ "learning_rate": 3.120879120879121e-05,
+ "loss": 0.0229,
+ "step": 1453
+ },
+ {
+ "epoch": 5.326007326007326,
+ "grad_norm": 50.520259857177734,
+ "learning_rate": 3.118437118437118e-05,
+ "loss": 0.6539,
+ "step": 1454
+ },
+ {
+ "epoch": 5.329670329670329,
+ "grad_norm": 12.567275047302246,
+ "learning_rate": 3.115995115995116e-05,
+ "loss": 0.0493,
+ "step": 1455
+ },
+ {
+ "epoch": 5.333333333333333,
+ "grad_norm": 24.11554718017578,
+ "learning_rate": 3.113553113553114e-05,
+ "loss": 0.401,
+ "step": 1456
+ },
+ {
+ "epoch": 5.336996336996337,
+ "grad_norm": 6.885409832000732,
+ "learning_rate": 3.111111111111111e-05,
+ "loss": 0.022,
+ "step": 1457
+ },
+ {
+ "epoch": 5.34065934065934,
+ "grad_norm": 30.46776008605957,
+ "learning_rate": 3.108669108669109e-05,
+ "loss": 0.1968,
+ "step": 1458
+ },
+ {
+ "epoch": 5.344322344322344,
+ "grad_norm": 54.408790588378906,
+ "learning_rate": 3.106227106227107e-05,
+ "loss": 0.3258,
+ "step": 1459
+ },
+ {
+ "epoch": 5.347985347985348,
+ "grad_norm": 43.48060989379883,
+ "learning_rate": 3.103785103785104e-05,
+ "loss": 0.2663,
+ "step": 1460
+ },
+ {
+ "epoch": 5.351648351648351,
+ "grad_norm": 34.339962005615234,
+ "learning_rate": 3.101343101343102e-05,
+ "loss": 0.3313,
+ "step": 1461
+ },
+ {
+ "epoch": 5.355311355311355,
+ "grad_norm": 35.54948806762695,
+ "learning_rate": 3.0989010989010995e-05,
+ "loss": 0.2377,
+ "step": 1462
+ },
+ {
+ "epoch": 5.358974358974359,
+ "grad_norm": 30.956071853637695,
+ "learning_rate": 3.096459096459097e-05,
+ "loss": 0.2388,
+ "step": 1463
+ },
+ {
+ "epoch": 5.362637362637362,
+ "grad_norm": 15.458950996398926,
+ "learning_rate": 3.094017094017094e-05,
+ "loss": 0.1196,
+ "step": 1464
+ },
+ {
+ "epoch": 5.366300366300366,
+ "grad_norm": 56.893463134765625,
+ "learning_rate": 3.091575091575091e-05,
+ "loss": 0.5377,
+ "step": 1465
+ },
+ {
+ "epoch": 5.36996336996337,
+ "grad_norm": 31.90789794921875,
+ "learning_rate": 3.089133089133089e-05,
+ "loss": 0.5008,
+ "step": 1466
+ },
+ {
+ "epoch": 5.373626373626374,
+ "grad_norm": 18.772607803344727,
+ "learning_rate": 3.086691086691087e-05,
+ "loss": 0.1838,
+ "step": 1467
+ },
+ {
+ "epoch": 5.377289377289378,
+ "grad_norm": 1.7131195068359375,
+ "learning_rate": 3.084249084249084e-05,
+ "loss": 0.0055,
+ "step": 1468
+ },
+ {
+ "epoch": 5.380952380952381,
+ "grad_norm": 6.398471355438232,
+ "learning_rate": 3.081807081807082e-05,
+ "loss": 0.0309,
+ "step": 1469
+ },
+ {
+ "epoch": 5.384615384615385,
+ "grad_norm": 13.847221374511719,
+ "learning_rate": 3.0793650793650796e-05,
+ "loss": 0.0785,
+ "step": 1470
+ },
+ {
+ "epoch": 5.388278388278389,
+ "grad_norm": 46.000179290771484,
+ "learning_rate": 3.076923076923077e-05,
+ "loss": 0.4114,
+ "step": 1471
+ },
+ {
+ "epoch": 5.391941391941392,
+ "grad_norm": 39.47720718383789,
+ "learning_rate": 3.0744810744810746e-05,
+ "loss": 0.9189,
+ "step": 1472
+ },
+ {
+ "epoch": 5.395604395604396,
+ "grad_norm": 30.588356018066406,
+ "learning_rate": 3.0720390720390724e-05,
+ "loss": 0.372,
+ "step": 1473
+ },
+ {
+ "epoch": 5.3992673992674,
+ "grad_norm": 83.61669921875,
+ "learning_rate": 3.0695970695970696e-05,
+ "loss": 0.6729,
+ "step": 1474
+ },
+ {
+ "epoch": 5.402930402930403,
+ "grad_norm": 14.384758949279785,
+ "learning_rate": 3.0671550671550675e-05,
+ "loss": 0.0825,
+ "step": 1475
+ },
+ {
+ "epoch": 5.406593406593407,
+ "grad_norm": 41.9291877746582,
+ "learning_rate": 3.064713064713065e-05,
+ "loss": 0.2128,
+ "step": 1476
+ },
+ {
+ "epoch": 5.410256410256411,
+ "grad_norm": 31.03643035888672,
+ "learning_rate": 3.062271062271062e-05,
+ "loss": 0.6978,
+ "step": 1477
+ },
+ {
+ "epoch": 5.413919413919414,
+ "grad_norm": 43.225547790527344,
+ "learning_rate": 3.0598290598290596e-05,
+ "loss": 0.6546,
+ "step": 1478
+ },
+ {
+ "epoch": 5.417582417582418,
+ "grad_norm": 37.172611236572266,
+ "learning_rate": 3.0573870573870575e-05,
+ "loss": 0.5024,
+ "step": 1479
+ },
+ {
+ "epoch": 5.4212454212454215,
+ "grad_norm": 52.93882369995117,
+ "learning_rate": 3.0549450549450547e-05,
+ "loss": 0.9954,
+ "step": 1480
+ },
+ {
+ "epoch": 5.424908424908425,
+ "grad_norm": 30.838403701782227,
+ "learning_rate": 3.0525030525030525e-05,
+ "loss": 0.2539,
+ "step": 1481
+ },
+ {
+ "epoch": 5.428571428571429,
+ "grad_norm": 8.876139640808105,
+ "learning_rate": 3.0500610500610503e-05,
+ "loss": 0.0635,
+ "step": 1482
+ },
+ {
+ "epoch": 5.4322344322344325,
+ "grad_norm": 14.970293998718262,
+ "learning_rate": 3.0476190476190475e-05,
+ "loss": 0.1337,
+ "step": 1483
+ },
+ {
+ "epoch": 5.435897435897436,
+ "grad_norm": 29.44560432434082,
+ "learning_rate": 3.0451770451770454e-05,
+ "loss": 0.3719,
+ "step": 1484
+ },
+ {
+ "epoch": 5.43956043956044,
+ "grad_norm": 3.793294668197632,
+ "learning_rate": 3.0427350427350432e-05,
+ "loss": 0.0278,
+ "step": 1485
+ },
+ {
+ "epoch": 5.443223443223443,
+ "grad_norm": 37.418731689453125,
+ "learning_rate": 3.0402930402930404e-05,
+ "loss": 0.5153,
+ "step": 1486
+ },
+ {
+ "epoch": 5.446886446886447,
+ "grad_norm": 26.718324661254883,
+ "learning_rate": 3.037851037851038e-05,
+ "loss": 0.388,
+ "step": 1487
+ },
+ {
+ "epoch": 5.450549450549451,
+ "grad_norm": 28.463197708129883,
+ "learning_rate": 3.0354090354090357e-05,
+ "loss": 0.1956,
+ "step": 1488
+ },
+ {
+ "epoch": 5.454212454212454,
+ "grad_norm": 45.390602111816406,
+ "learning_rate": 3.032967032967033e-05,
+ "loss": 0.3694,
+ "step": 1489
+ },
+ {
+ "epoch": 5.457875457875458,
+ "grad_norm": 33.20753860473633,
+ "learning_rate": 3.0305250305250307e-05,
+ "loss": 0.2946,
+ "step": 1490
+ },
+ {
+ "epoch": 5.461538461538462,
+ "grad_norm": 66.42272186279297,
+ "learning_rate": 3.028083028083028e-05,
+ "loss": 0.9082,
+ "step": 1491
+ },
+ {
+ "epoch": 5.465201465201465,
+ "grad_norm": 33.85127258300781,
+ "learning_rate": 3.0256410256410257e-05,
+ "loss": 0.2362,
+ "step": 1492
+ },
+ {
+ "epoch": 5.468864468864469,
+ "grad_norm": 51.019256591796875,
+ "learning_rate": 3.0231990231990233e-05,
+ "loss": 0.5446,
+ "step": 1493
+ },
+ {
+ "epoch": 5.472527472527473,
+ "grad_norm": 30.998769760131836,
+ "learning_rate": 3.0207570207570204e-05,
+ "loss": 0.4739,
+ "step": 1494
+ },
+ {
+ "epoch": 5.476190476190476,
+ "grad_norm": 44.187957763671875,
+ "learning_rate": 3.0183150183150183e-05,
+ "loss": 0.3439,
+ "step": 1495
+ },
+ {
+ "epoch": 5.47985347985348,
+ "grad_norm": 50.70987319946289,
+ "learning_rate": 3.015873015873016e-05,
+ "loss": 0.1625,
+ "step": 1496
+ },
+ {
+ "epoch": 5.483516483516484,
+ "grad_norm": 33.66750717163086,
+ "learning_rate": 3.0134310134310133e-05,
+ "loss": 0.1927,
+ "step": 1497
+ },
+ {
+ "epoch": 5.487179487179487,
+ "grad_norm": 41.02281951904297,
+ "learning_rate": 3.010989010989011e-05,
+ "loss": 0.4102,
+ "step": 1498
+ },
+ {
+ "epoch": 5.490842490842491,
+ "grad_norm": 10.570262908935547,
+ "learning_rate": 3.008547008547009e-05,
+ "loss": 0.0664,
+ "step": 1499
+ },
+ {
+ "epoch": 5.4945054945054945,
+ "grad_norm": 54.08304214477539,
+ "learning_rate": 3.0061050061050058e-05,
+ "loss": 0.9224,
+ "step": 1500
+ },
+ {
+ "epoch": 5.498168498168498,
+ "grad_norm": 67.29845428466797,
+ "learning_rate": 3.0036630036630036e-05,
+ "loss": 0.8804,
+ "step": 1501
+ },
+ {
+ "epoch": 5.501831501831502,
+ "grad_norm": 13.707988739013672,
+ "learning_rate": 3.0012210012210015e-05,
+ "loss": 0.054,
+ "step": 1502
+ },
+ {
+ "epoch": 5.5054945054945055,
+ "grad_norm": 23.2605037689209,
+ "learning_rate": 2.998778998778999e-05,
+ "loss": 0.2343,
+ "step": 1503
+ },
+ {
+ "epoch": 5.509157509157509,
+ "grad_norm": 34.8508186340332,
+ "learning_rate": 2.9963369963369965e-05,
+ "loss": 0.4967,
+ "step": 1504
+ },
+ {
+ "epoch": 5.512820512820513,
+ "grad_norm": 20.457887649536133,
+ "learning_rate": 2.993894993894994e-05,
+ "loss": 0.1206,
+ "step": 1505
+ },
+ {
+ "epoch": 5.516483516483516,
+ "grad_norm": 34.01540756225586,
+ "learning_rate": 2.9914529914529915e-05,
+ "loss": 0.5167,
+ "step": 1506
+ },
+ {
+ "epoch": 5.52014652014652,
+ "grad_norm": 20.356525421142578,
+ "learning_rate": 2.989010989010989e-05,
+ "loss": 0.1363,
+ "step": 1507
+ },
+ {
+ "epoch": 5.523809523809524,
+ "grad_norm": 42.172054290771484,
+ "learning_rate": 2.9865689865689865e-05,
+ "loss": 0.2954,
+ "step": 1508
+ },
+ {
+ "epoch": 5.527472527472527,
+ "grad_norm": 16.814903259277344,
+ "learning_rate": 2.984126984126984e-05,
+ "loss": 0.0987,
+ "step": 1509
+ },
+ {
+ "epoch": 5.531135531135531,
+ "grad_norm": 34.35768508911133,
+ "learning_rate": 2.981684981684982e-05,
+ "loss": 0.215,
+ "step": 1510
+ },
+ {
+ "epoch": 5.534798534798535,
+ "grad_norm": 43.24858474731445,
+ "learning_rate": 2.9792429792429794e-05,
+ "loss": 0.3689,
+ "step": 1511
+ },
+ {
+ "epoch": 5.538461538461538,
+ "grad_norm": 39.85542297363281,
+ "learning_rate": 2.976800976800977e-05,
+ "loss": 0.6229,
+ "step": 1512
+ },
+ {
+ "epoch": 5.542124542124542,
+ "grad_norm": 17.576234817504883,
+ "learning_rate": 2.9743589743589744e-05,
+ "loss": 0.0994,
+ "step": 1513
+ },
+ {
+ "epoch": 5.545787545787546,
+ "grad_norm": 45.81230545043945,
+ "learning_rate": 2.971916971916972e-05,
+ "loss": 0.5225,
+ "step": 1514
+ },
+ {
+ "epoch": 5.549450549450549,
+ "grad_norm": 21.293874740600586,
+ "learning_rate": 2.9694749694749694e-05,
+ "loss": 0.1139,
+ "step": 1515
+ },
+ {
+ "epoch": 5.553113553113553,
+ "grad_norm": 3.8571391105651855,
+ "learning_rate": 2.9670329670329673e-05,
+ "loss": 0.0257,
+ "step": 1516
+ },
+ {
+ "epoch": 5.556776556776557,
+ "grad_norm": 32.1104736328125,
+ "learning_rate": 2.9645909645909648e-05,
+ "loss": 0.2649,
+ "step": 1517
+ },
+ {
+ "epoch": 5.56043956043956,
+ "grad_norm": 26.141633987426758,
+ "learning_rate": 2.9621489621489623e-05,
+ "loss": 0.2003,
+ "step": 1518
+ },
+ {
+ "epoch": 5.564102564102564,
+ "grad_norm": 44.93999099731445,
+ "learning_rate": 2.9597069597069598e-05,
+ "loss": 0.4019,
+ "step": 1519
+ },
+ {
+ "epoch": 5.5677655677655675,
+ "grad_norm": 10.86503791809082,
+ "learning_rate": 2.9572649572649573e-05,
+ "loss": 0.112,
+ "step": 1520
+ },
+ {
+ "epoch": 5.571428571428571,
+ "grad_norm": 164.05909729003906,
+ "learning_rate": 2.9548229548229548e-05,
+ "loss": 0.7215,
+ "step": 1521
+ },
+ {
+ "epoch": 5.575091575091575,
+ "grad_norm": 39.3042106628418,
+ "learning_rate": 2.9523809523809523e-05,
+ "loss": 0.3393,
+ "step": 1522
+ },
+ {
+ "epoch": 5.5787545787545785,
+ "grad_norm": 28.68779182434082,
+ "learning_rate": 2.94993894993895e-05,
+ "loss": 0.1175,
+ "step": 1523
+ },
+ {
+ "epoch": 5.582417582417582,
+ "grad_norm": 19.018821716308594,
+ "learning_rate": 2.9474969474969477e-05,
+ "loss": 0.1185,
+ "step": 1524
+ },
+ {
+ "epoch": 5.586080586080586,
+ "grad_norm": 32.04712677001953,
+ "learning_rate": 2.945054945054945e-05,
+ "loss": 0.275,
+ "step": 1525
+ },
+ {
+ "epoch": 5.589743589743589,
+ "grad_norm": 51.721744537353516,
+ "learning_rate": 2.9426129426129427e-05,
+ "loss": 0.5128,
+ "step": 1526
+ },
+ {
+ "epoch": 5.593406593406593,
+ "grad_norm": 8.353523254394531,
+ "learning_rate": 2.9401709401709402e-05,
+ "loss": 0.0452,
+ "step": 1527
+ },
+ {
+ "epoch": 5.597069597069597,
+ "grad_norm": 60.5823860168457,
+ "learning_rate": 2.9377289377289377e-05,
+ "loss": 0.7654,
+ "step": 1528
+ },
+ {
+ "epoch": 5.6007326007326,
+ "grad_norm": 39.350582122802734,
+ "learning_rate": 2.9352869352869355e-05,
+ "loss": 0.2384,
+ "step": 1529
+ },
+ {
+ "epoch": 5.604395604395604,
+ "grad_norm": 13.450817108154297,
+ "learning_rate": 2.932844932844933e-05,
+ "loss": 0.045,
+ "step": 1530
+ },
+ {
+ "epoch": 5.608058608058608,
+ "grad_norm": 19.569263458251953,
+ "learning_rate": 2.9304029304029305e-05,
+ "loss": 0.0806,
+ "step": 1531
+ },
+ {
+ "epoch": 5.611721611721611,
+ "grad_norm": 15.188614845275879,
+ "learning_rate": 2.927960927960928e-05,
+ "loss": 0.0639,
+ "step": 1532
+ },
+ {
+ "epoch": 5.615384615384615,
+ "grad_norm": 64.51557922363281,
+ "learning_rate": 2.9255189255189255e-05,
+ "loss": 0.4426,
+ "step": 1533
+ },
+ {
+ "epoch": 5.619047619047619,
+ "grad_norm": 80.56137084960938,
+ "learning_rate": 2.923076923076923e-05,
+ "loss": 0.8589,
+ "step": 1534
+ },
+ {
+ "epoch": 5.622710622710622,
+ "grad_norm": 50.31480407714844,
+ "learning_rate": 2.9206349206349206e-05,
+ "loss": 1.1482,
+ "step": 1535
+ },
+ {
+ "epoch": 5.626373626373626,
+ "grad_norm": 12.077424049377441,
+ "learning_rate": 2.9181929181929184e-05,
+ "loss": 0.0766,
+ "step": 1536
+ },
+ {
+ "epoch": 5.63003663003663,
+ "grad_norm": 58.46347427368164,
+ "learning_rate": 2.915750915750916e-05,
+ "loss": 0.6512,
+ "step": 1537
+ },
+ {
+ "epoch": 5.633699633699633,
+ "grad_norm": 22.6331729888916,
+ "learning_rate": 2.913308913308913e-05,
+ "loss": 0.155,
+ "step": 1538
+ },
+ {
+ "epoch": 5.637362637362637,
+ "grad_norm": 49.88985824584961,
+ "learning_rate": 2.910866910866911e-05,
+ "loss": 0.4947,
+ "step": 1539
+ },
+ {
+ "epoch": 5.641025641025641,
+ "grad_norm": 64.37980651855469,
+ "learning_rate": 2.9084249084249084e-05,
+ "loss": 0.4665,
+ "step": 1540
+ },
+ {
+ "epoch": 5.644688644688645,
+ "grad_norm": 13.715937614440918,
+ "learning_rate": 2.905982905982906e-05,
+ "loss": 0.0937,
+ "step": 1541
+ },
+ {
+ "epoch": 5.648351648351649,
+ "grad_norm": 25.40440559387207,
+ "learning_rate": 2.9035409035409038e-05,
+ "loss": 0.2467,
+ "step": 1542
+ },
+ {
+ "epoch": 5.652014652014652,
+ "grad_norm": 52.885963439941406,
+ "learning_rate": 2.9010989010989013e-05,
+ "loss": 0.5018,
+ "step": 1543
+ },
+ {
+ "epoch": 5.655677655677656,
+ "grad_norm": 7.535051345825195,
+ "learning_rate": 2.8986568986568988e-05,
+ "loss": 0.0607,
+ "step": 1544
+ },
+ {
+ "epoch": 5.65934065934066,
+ "grad_norm": 55.79275894165039,
+ "learning_rate": 2.8962148962148963e-05,
+ "loss": 1.0288,
+ "step": 1545
+ },
+ {
+ "epoch": 5.663003663003663,
+ "grad_norm": 21.050024032592773,
+ "learning_rate": 2.8937728937728938e-05,
+ "loss": 0.1987,
+ "step": 1546
+ },
+ {
+ "epoch": 5.666666666666667,
+ "grad_norm": 24.74984359741211,
+ "learning_rate": 2.8913308913308913e-05,
+ "loss": 0.202,
+ "step": 1547
+ },
+ {
+ "epoch": 5.670329670329671,
+ "grad_norm": 15.297272682189941,
+ "learning_rate": 2.8888888888888888e-05,
+ "loss": 0.127,
+ "step": 1548
+ },
+ {
+ "epoch": 5.673992673992674,
+ "grad_norm": 12.198046684265137,
+ "learning_rate": 2.8864468864468867e-05,
+ "loss": 0.115,
+ "step": 1549
+ },
+ {
+ "epoch": 5.677655677655678,
+ "grad_norm": 18.761402130126953,
+ "learning_rate": 2.8840048840048842e-05,
+ "loss": 0.1745,
+ "step": 1550
+ },
+ {
+ "epoch": 5.681318681318682,
+ "grad_norm": 26.97224235534668,
+ "learning_rate": 2.8815628815628813e-05,
+ "loss": 0.2554,
+ "step": 1551
+ },
+ {
+ "epoch": 5.684981684981685,
+ "grad_norm": 9.772692680358887,
+ "learning_rate": 2.8791208791208792e-05,
+ "loss": 0.0927,
+ "step": 1552
+ },
+ {
+ "epoch": 5.688644688644689,
+ "grad_norm": 35.73431396484375,
+ "learning_rate": 2.8766788766788767e-05,
+ "loss": 0.4048,
+ "step": 1553
+ },
+ {
+ "epoch": 5.6923076923076925,
+ "grad_norm": 31.94872283935547,
+ "learning_rate": 2.8742368742368742e-05,
+ "loss": 0.5711,
+ "step": 1554
+ },
+ {
+ "epoch": 5.695970695970696,
+ "grad_norm": 45.44688034057617,
+ "learning_rate": 2.871794871794872e-05,
+ "loss": 0.7126,
+ "step": 1555
+ },
+ {
+ "epoch": 5.6996336996337,
+ "grad_norm": 45.74476623535156,
+ "learning_rate": 2.8693528693528696e-05,
+ "loss": 0.933,
+ "step": 1556
+ },
+ {
+ "epoch": 5.7032967032967035,
+ "grad_norm": 19.827136993408203,
+ "learning_rate": 2.866910866910867e-05,
+ "loss": 0.2433,
+ "step": 1557
+ },
+ {
+ "epoch": 5.706959706959707,
+ "grad_norm": 35.981903076171875,
+ "learning_rate": 2.8644688644688646e-05,
+ "loss": 0.3429,
+ "step": 1558
+ },
+ {
+ "epoch": 5.710622710622711,
+ "grad_norm": 19.642629623413086,
+ "learning_rate": 2.862026862026862e-05,
+ "loss": 0.1454,
+ "step": 1559
+ },
+ {
+ "epoch": 5.714285714285714,
+ "grad_norm": 25.960437774658203,
+ "learning_rate": 2.8595848595848596e-05,
+ "loss": 0.2965,
+ "step": 1560
+ },
+ {
+ "epoch": 5.717948717948718,
+ "grad_norm": 49.41150665283203,
+ "learning_rate": 2.857142857142857e-05,
+ "loss": 0.3295,
+ "step": 1561
+ },
+ {
+ "epoch": 5.721611721611722,
+ "grad_norm": 10.984975814819336,
+ "learning_rate": 2.854700854700855e-05,
+ "loss": 0.0879,
+ "step": 1562
+ },
+ {
+ "epoch": 5.725274725274725,
+ "grad_norm": 26.814556121826172,
+ "learning_rate": 2.8522588522588524e-05,
+ "loss": 0.1456,
+ "step": 1563
+ },
+ {
+ "epoch": 5.728937728937729,
+ "grad_norm": 18.65792465209961,
+ "learning_rate": 2.8498168498168496e-05,
+ "loss": 0.161,
+ "step": 1564
+ },
+ {
+ "epoch": 5.732600732600733,
+ "grad_norm": 35.959590911865234,
+ "learning_rate": 2.8473748473748475e-05,
+ "loss": 0.672,
+ "step": 1565
+ },
+ {
+ "epoch": 5.736263736263736,
+ "grad_norm": 78.56996154785156,
+ "learning_rate": 2.844932844932845e-05,
+ "loss": 1.6393,
+ "step": 1566
+ },
+ {
+ "epoch": 5.73992673992674,
+ "grad_norm": 31.604719161987305,
+ "learning_rate": 2.8424908424908425e-05,
+ "loss": 0.5395,
+ "step": 1567
+ },
+ {
+ "epoch": 5.743589743589744,
+ "grad_norm": 14.373411178588867,
+ "learning_rate": 2.8400488400488403e-05,
+ "loss": 0.0688,
+ "step": 1568
+ },
+ {
+ "epoch": 5.747252747252747,
+ "grad_norm": 3.5718555450439453,
+ "learning_rate": 2.8376068376068378e-05,
+ "loss": 0.0161,
+ "step": 1569
+ },
+ {
+ "epoch": 5.750915750915751,
+ "grad_norm": 23.164167404174805,
+ "learning_rate": 2.8351648351648353e-05,
+ "loss": 0.2169,
+ "step": 1570
+ },
+ {
+ "epoch": 5.754578754578755,
+ "grad_norm": 33.42869186401367,
+ "learning_rate": 2.8327228327228328e-05,
+ "loss": 0.3731,
+ "step": 1571
+ },
+ {
+ "epoch": 5.758241758241758,
+ "grad_norm": 32.016361236572266,
+ "learning_rate": 2.8302808302808303e-05,
+ "loss": 0.2243,
+ "step": 1572
+ },
+ {
+ "epoch": 5.761904761904762,
+ "grad_norm": 43.50716018676758,
+ "learning_rate": 2.827838827838828e-05,
+ "loss": 0.4229,
+ "step": 1573
+ },
+ {
+ "epoch": 5.7655677655677655,
+ "grad_norm": 4.828849792480469,
+ "learning_rate": 2.8253968253968253e-05,
+ "loss": 0.0295,
+ "step": 1574
+ },
+ {
+ "epoch": 5.769230769230769,
+ "grad_norm": 30.276351928710938,
+ "learning_rate": 2.8229548229548232e-05,
+ "loss": 0.399,
+ "step": 1575
+ },
+ {
+ "epoch": 5.772893772893773,
+ "grad_norm": 17.416358947753906,
+ "learning_rate": 2.8205128205128207e-05,
+ "loss": 0.1529,
+ "step": 1576
+ },
+ {
+ "epoch": 5.7765567765567765,
+ "grad_norm": 39.488468170166016,
+ "learning_rate": 2.818070818070818e-05,
+ "loss": 0.1245,
+ "step": 1577
+ },
+ {
+ "epoch": 5.78021978021978,
+ "grad_norm": 27.775489807128906,
+ "learning_rate": 2.8156288156288157e-05,
+ "loss": 0.1312,
+ "step": 1578
+ },
+ {
+ "epoch": 5.783882783882784,
+ "grad_norm": 35.964717864990234,
+ "learning_rate": 2.8131868131868132e-05,
+ "loss": 0.5796,
+ "step": 1579
+ },
+ {
+ "epoch": 5.787545787545787,
+ "grad_norm": 53.15998077392578,
+ "learning_rate": 2.8107448107448107e-05,
+ "loss": 1.2654,
+ "step": 1580
+ },
+ {
+ "epoch": 5.791208791208791,
+ "grad_norm": 22.90069007873535,
+ "learning_rate": 2.8083028083028086e-05,
+ "loss": 0.2162,
+ "step": 1581
+ },
+ {
+ "epoch": 5.794871794871795,
+ "grad_norm": 45.380470275878906,
+ "learning_rate": 2.805860805860806e-05,
+ "loss": 0.4231,
+ "step": 1582
+ },
+ {
+ "epoch": 5.798534798534798,
+ "grad_norm": 32.56012725830078,
+ "learning_rate": 2.8034188034188032e-05,
+ "loss": 0.3711,
+ "step": 1583
+ },
+ {
+ "epoch": 5.802197802197802,
+ "grad_norm": 34.63470458984375,
+ "learning_rate": 2.800976800976801e-05,
+ "loss": 0.5414,
+ "step": 1584
+ },
+ {
+ "epoch": 5.805860805860806,
+ "grad_norm": 48.173797607421875,
+ "learning_rate": 2.7985347985347986e-05,
+ "loss": 1.2363,
+ "step": 1585
+ },
+ {
+ "epoch": 5.809523809523809,
+ "grad_norm": 27.12062644958496,
+ "learning_rate": 2.796092796092796e-05,
+ "loss": 0.4824,
+ "step": 1586
+ },
+ {
+ "epoch": 5.813186813186813,
+ "grad_norm": 23.13554573059082,
+ "learning_rate": 2.7936507936507936e-05,
+ "loss": 0.2321,
+ "step": 1587
+ },
+ {
+ "epoch": 5.816849816849817,
+ "grad_norm": 50.56953430175781,
+ "learning_rate": 2.7912087912087915e-05,
+ "loss": 0.2158,
+ "step": 1588
+ },
+ {
+ "epoch": 5.82051282051282,
+ "grad_norm": 20.73900604248047,
+ "learning_rate": 2.788766788766789e-05,
+ "loss": 0.217,
+ "step": 1589
+ },
+ {
+ "epoch": 5.824175824175824,
+ "grad_norm": 17.288028717041016,
+ "learning_rate": 2.786324786324786e-05,
+ "loss": 0.2936,
+ "step": 1590
+ },
+ {
+ "epoch": 5.827838827838828,
+ "grad_norm": 22.067502975463867,
+ "learning_rate": 2.783882783882784e-05,
+ "loss": 0.1906,
+ "step": 1591
+ },
+ {
+ "epoch": 5.831501831501831,
+ "grad_norm": 14.928089141845703,
+ "learning_rate": 2.7814407814407815e-05,
+ "loss": 0.1296,
+ "step": 1592
+ },
+ {
+ "epoch": 5.835164835164835,
+ "grad_norm": 25.669342041015625,
+ "learning_rate": 2.778998778998779e-05,
+ "loss": 0.2475,
+ "step": 1593
+ },
+ {
+ "epoch": 5.8388278388278385,
+ "grad_norm": 20.302515029907227,
+ "learning_rate": 2.776556776556777e-05,
+ "loss": 0.2206,
+ "step": 1594
+ },
+ {
+ "epoch": 5.842490842490842,
+ "grad_norm": 9.004451751708984,
+ "learning_rate": 2.7741147741147743e-05,
+ "loss": 0.0694,
+ "step": 1595
+ },
+ {
+ "epoch": 5.846153846153846,
+ "grad_norm": 7.495925426483154,
+ "learning_rate": 2.7716727716727715e-05,
+ "loss": 0.0481,
+ "step": 1596
+ },
+ {
+ "epoch": 5.8498168498168495,
+ "grad_norm": 11.891450881958008,
+ "learning_rate": 2.7692307692307694e-05,
+ "loss": 0.0754,
+ "step": 1597
+ },
+ {
+ "epoch": 5.853479853479853,
+ "grad_norm": 27.53200340270996,
+ "learning_rate": 2.766788766788767e-05,
+ "loss": 0.1459,
+ "step": 1598
+ },
+ {
+ "epoch": 5.857142857142857,
+ "grad_norm": 4.103634357452393,
+ "learning_rate": 2.7643467643467644e-05,
+ "loss": 0.0256,
+ "step": 1599
+ },
+ {
+ "epoch": 5.860805860805861,
+ "grad_norm": 30.772586822509766,
+ "learning_rate": 2.761904761904762e-05,
+ "loss": 0.2748,
+ "step": 1600
+ },
+ {
+ "epoch": 5.864468864468865,
+ "grad_norm": 39.70070266723633,
+ "learning_rate": 2.7594627594627597e-05,
+ "loss": 1.3089,
+ "step": 1601
+ },
+ {
+ "epoch": 5.868131868131869,
+ "grad_norm": 54.576236724853516,
+ "learning_rate": 2.7570207570207572e-05,
+ "loss": 0.3549,
+ "step": 1602
+ },
+ {
+ "epoch": 5.871794871794872,
+ "grad_norm": 14.617592811584473,
+ "learning_rate": 2.7545787545787544e-05,
+ "loss": 0.0976,
+ "step": 1603
+ },
+ {
+ "epoch": 5.875457875457876,
+ "grad_norm": 11.900232315063477,
+ "learning_rate": 2.7521367521367522e-05,
+ "loss": 0.0518,
+ "step": 1604
+ },
+ {
+ "epoch": 5.8791208791208796,
+ "grad_norm": 62.00771713256836,
+ "learning_rate": 2.7496947496947497e-05,
+ "loss": 0.2866,
+ "step": 1605
+ },
+ {
+ "epoch": 5.882783882783883,
+ "grad_norm": 51.59067153930664,
+ "learning_rate": 2.7472527472527473e-05,
+ "loss": 0.3357,
+ "step": 1606
+ },
+ {
+ "epoch": 5.886446886446887,
+ "grad_norm": 61.792476654052734,
+ "learning_rate": 2.744810744810745e-05,
+ "loss": 0.2923,
+ "step": 1607
+ },
+ {
+ "epoch": 5.8901098901098905,
+ "grad_norm": 12.737351417541504,
+ "learning_rate": 2.7423687423687426e-05,
+ "loss": 0.0893,
+ "step": 1608
+ },
+ {
+ "epoch": 5.893772893772894,
+ "grad_norm": 7.451726913452148,
+ "learning_rate": 2.7399267399267398e-05,
+ "loss": 0.044,
+ "step": 1609
+ },
+ {
+ "epoch": 5.897435897435898,
+ "grad_norm": 41.03788757324219,
+ "learning_rate": 2.7374847374847376e-05,
+ "loss": 0.4605,
+ "step": 1610
+ },
+ {
+ "epoch": 5.9010989010989015,
+ "grad_norm": 11.49382209777832,
+ "learning_rate": 2.735042735042735e-05,
+ "loss": 0.0754,
+ "step": 1611
+ },
+ {
+ "epoch": 5.904761904761905,
+ "grad_norm": 15.952816009521484,
+ "learning_rate": 2.7326007326007326e-05,
+ "loss": 0.0748,
+ "step": 1612
+ },
+ {
+ "epoch": 5.908424908424909,
+ "grad_norm": 8.492574691772461,
+ "learning_rate": 2.73015873015873e-05,
+ "loss": 0.0254,
+ "step": 1613
+ },
+ {
+ "epoch": 5.912087912087912,
+ "grad_norm": 17.973997116088867,
+ "learning_rate": 2.727716727716728e-05,
+ "loss": 0.1038,
+ "step": 1614
+ },
+ {
+ "epoch": 5.915750915750916,
+ "grad_norm": 6.881199359893799,
+ "learning_rate": 2.7252747252747255e-05,
+ "loss": 0.0186,
+ "step": 1615
+ },
+ {
+ "epoch": 5.91941391941392,
+ "grad_norm": 28.51510238647461,
+ "learning_rate": 2.7228327228327227e-05,
+ "loss": 0.1283,
+ "step": 1616
+ },
+ {
+ "epoch": 5.923076923076923,
+ "grad_norm": 33.539485931396484,
+ "learning_rate": 2.7203907203907205e-05,
+ "loss": 0.6151,
+ "step": 1617
+ },
+ {
+ "epoch": 5.926739926739927,
+ "grad_norm": 57.307823181152344,
+ "learning_rate": 2.717948717948718e-05,
+ "loss": 0.3924,
+ "step": 1618
+ },
+ {
+ "epoch": 5.930402930402931,
+ "grad_norm": 43.010276794433594,
+ "learning_rate": 2.7155067155067155e-05,
+ "loss": 0.3942,
+ "step": 1619
+ },
+ {
+ "epoch": 5.934065934065934,
+ "grad_norm": 26.552478790283203,
+ "learning_rate": 2.7130647130647134e-05,
+ "loss": 0.1961,
+ "step": 1620
+ },
+ {
+ "epoch": 5.937728937728938,
+ "grad_norm": 78.5624008178711,
+ "learning_rate": 2.710622710622711e-05,
+ "loss": 1.0705,
+ "step": 1621
+ },
+ {
+ "epoch": 5.941391941391942,
+ "grad_norm": 37.23006057739258,
+ "learning_rate": 2.708180708180708e-05,
+ "loss": 0.4875,
+ "step": 1622
+ },
+ {
+ "epoch": 5.945054945054945,
+ "grad_norm": 42.23412322998047,
+ "learning_rate": 2.705738705738706e-05,
+ "loss": 0.3795,
+ "step": 1623
+ },
+ {
+ "epoch": 5.948717948717949,
+ "grad_norm": 42.677696228027344,
+ "learning_rate": 2.7032967032967034e-05,
+ "loss": 0.3414,
+ "step": 1624
+ },
+ {
+ "epoch": 5.9523809523809526,
+ "grad_norm": 24.182249069213867,
+ "learning_rate": 2.700854700854701e-05,
+ "loss": 0.0814,
+ "step": 1625
+ },
+ {
+ "epoch": 5.956043956043956,
+ "grad_norm": 11.87109088897705,
+ "learning_rate": 2.6984126984126984e-05,
+ "loss": 0.0816,
+ "step": 1626
+ },
+ {
+ "epoch": 5.95970695970696,
+ "grad_norm": 7.575586318969727,
+ "learning_rate": 2.6959706959706962e-05,
+ "loss": 0.049,
+ "step": 1627
+ },
+ {
+ "epoch": 5.9633699633699635,
+ "grad_norm": 4.052019119262695,
+ "learning_rate": 2.6935286935286934e-05,
+ "loss": 0.0276,
+ "step": 1628
+ },
+ {
+ "epoch": 5.967032967032967,
+ "grad_norm": 24.308481216430664,
+ "learning_rate": 2.691086691086691e-05,
+ "loss": 0.2324,
+ "step": 1629
+ },
+ {
+ "epoch": 5.970695970695971,
+ "grad_norm": 32.5918083190918,
+ "learning_rate": 2.6886446886446888e-05,
+ "loss": 0.42,
+ "step": 1630
+ },
+ {
+ "epoch": 5.9743589743589745,
+ "grad_norm": 16.758689880371094,
+ "learning_rate": 2.6862026862026863e-05,
+ "loss": 0.1857,
+ "step": 1631
+ },
+ {
+ "epoch": 5.978021978021978,
+ "grad_norm": 24.96327781677246,
+ "learning_rate": 2.6837606837606838e-05,
+ "loss": 0.3293,
+ "step": 1632
+ },
+ {
+ "epoch": 5.981684981684982,
+ "grad_norm": 7.734143257141113,
+ "learning_rate": 2.6813186813186816e-05,
+ "loss": 0.0644,
+ "step": 1633
+ },
+ {
+ "epoch": 5.985347985347985,
+ "grad_norm": 49.89662551879883,
+ "learning_rate": 2.678876678876679e-05,
+ "loss": 0.7976,
+ "step": 1634
+ },
+ {
+ "epoch": 5.989010989010989,
+ "grad_norm": 20.55232810974121,
+ "learning_rate": 2.6764346764346763e-05,
+ "loss": 0.1911,
+ "step": 1635
+ },
+ {
+ "epoch": 5.992673992673993,
+ "grad_norm": 11.190897941589355,
+ "learning_rate": 2.673992673992674e-05,
+ "loss": 0.0604,
+ "step": 1636
+ },
+ {
+ "epoch": 5.996336996336996,
+ "grad_norm": 24.896806716918945,
+ "learning_rate": 2.6715506715506716e-05,
+ "loss": 0.2467,
+ "step": 1637
+ },
+ {
+ "epoch": 6.0,
+ "grad_norm": 39.5569953918457,
+ "learning_rate": 2.669108669108669e-05,
+ "loss": 0.8073,
+ "step": 1638
+ },
+ {
+ "epoch": 6.003663003663004,
+ "grad_norm": 4.203596591949463,
+ "learning_rate": 2.6666666666666667e-05,
+ "loss": 0.0266,
+ "step": 1639
+ },
+ {
+ "epoch": 6.007326007326007,
+ "grad_norm": 6.89768648147583,
+ "learning_rate": 2.6642246642246645e-05,
+ "loss": 0.0664,
+ "step": 1640
+ },
+ {
+ "epoch": 6.010989010989011,
+ "grad_norm": 33.19546890258789,
+ "learning_rate": 2.6617826617826617e-05,
+ "loss": 0.6504,
+ "step": 1641
+ },
+ {
+ "epoch": 6.014652014652015,
+ "grad_norm": 8.577303886413574,
+ "learning_rate": 2.6593406593406592e-05,
+ "loss": 0.0715,
+ "step": 1642
+ },
+ {
+ "epoch": 6.018315018315018,
+ "grad_norm": 11.48106861114502,
+ "learning_rate": 2.656898656898657e-05,
+ "loss": 0.0952,
+ "step": 1643
+ },
+ {
+ "epoch": 6.021978021978022,
+ "grad_norm": 16.87290382385254,
+ "learning_rate": 2.6544566544566545e-05,
+ "loss": 0.1156,
+ "step": 1644
+ },
+ {
+ "epoch": 6.0256410256410255,
+ "grad_norm": 5.304442405700684,
+ "learning_rate": 2.652014652014652e-05,
+ "loss": 0.0574,
+ "step": 1645
+ },
+ {
+ "epoch": 6.029304029304029,
+ "grad_norm": 12.058186531066895,
+ "learning_rate": 2.64957264957265e-05,
+ "loss": 0.1013,
+ "step": 1646
+ },
+ {
+ "epoch": 6.032967032967033,
+ "grad_norm": 11.20624828338623,
+ "learning_rate": 2.6471306471306474e-05,
+ "loss": 0.0637,
+ "step": 1647
+ },
+ {
+ "epoch": 6.0366300366300365,
+ "grad_norm": 20.595020294189453,
+ "learning_rate": 2.6446886446886446e-05,
+ "loss": 0.1282,
+ "step": 1648
+ },
+ {
+ "epoch": 6.04029304029304,
+ "grad_norm": 32.712425231933594,
+ "learning_rate": 2.6422466422466424e-05,
+ "loss": 1.0173,
+ "step": 1649
+ },
+ {
+ "epoch": 6.043956043956044,
+ "grad_norm": 31.00687599182129,
+ "learning_rate": 2.63980463980464e-05,
+ "loss": 0.2822,
+ "step": 1650
+ },
+ {
+ "epoch": 6.0476190476190474,
+ "grad_norm": 15.361159324645996,
+ "learning_rate": 2.6373626373626374e-05,
+ "loss": 0.08,
+ "step": 1651
+ },
+ {
+ "epoch": 6.051282051282051,
+ "grad_norm": 75.07713317871094,
+ "learning_rate": 2.634920634920635e-05,
+ "loss": 0.3835,
+ "step": 1652
+ },
+ {
+ "epoch": 6.054945054945055,
+ "grad_norm": 28.741546630859375,
+ "learning_rate": 2.6324786324786328e-05,
+ "loss": 0.1257,
+ "step": 1653
+ },
+ {
+ "epoch": 6.058608058608058,
+ "grad_norm": 173.8939971923828,
+ "learning_rate": 2.63003663003663e-05,
+ "loss": 0.0744,
+ "step": 1654
+ },
+ {
+ "epoch": 6.062271062271062,
+ "grad_norm": 8.212196350097656,
+ "learning_rate": 2.6275946275946274e-05,
+ "loss": 0.027,
+ "step": 1655
+ },
+ {
+ "epoch": 6.065934065934066,
+ "grad_norm": 56.60511779785156,
+ "learning_rate": 2.6251526251526253e-05,
+ "loss": 0.7258,
+ "step": 1656
+ },
+ {
+ "epoch": 6.069597069597069,
+ "grad_norm": 14.454882621765137,
+ "learning_rate": 2.6227106227106228e-05,
+ "loss": 0.0762,
+ "step": 1657
+ },
+ {
+ "epoch": 6.073260073260073,
+ "grad_norm": 40.66373062133789,
+ "learning_rate": 2.6202686202686203e-05,
+ "loss": 0.2663,
+ "step": 1658
+ },
+ {
+ "epoch": 6.076923076923077,
+ "grad_norm": 45.68836212158203,
+ "learning_rate": 2.617826617826618e-05,
+ "loss": 0.4244,
+ "step": 1659
+ },
+ {
+ "epoch": 6.08058608058608,
+ "grad_norm": 16.69190788269043,
+ "learning_rate": 2.6153846153846157e-05,
+ "loss": 0.1249,
+ "step": 1660
+ },
+ {
+ "epoch": 6.084249084249084,
+ "grad_norm": 58.633358001708984,
+ "learning_rate": 2.6129426129426128e-05,
+ "loss": 0.3699,
+ "step": 1661
+ },
+ {
+ "epoch": 6.087912087912088,
+ "grad_norm": 8.262107849121094,
+ "learning_rate": 2.6105006105006107e-05,
+ "loss": 0.0332,
+ "step": 1662
+ },
+ {
+ "epoch": 6.091575091575091,
+ "grad_norm": 1.7256231307983398,
+ "learning_rate": 2.6080586080586082e-05,
+ "loss": 0.0073,
+ "step": 1663
+ },
+ {
+ "epoch": 6.095238095238095,
+ "grad_norm": 27.97568130493164,
+ "learning_rate": 2.6056166056166057e-05,
+ "loss": 0.3567,
+ "step": 1664
+ },
+ {
+ "epoch": 6.0989010989010985,
+ "grad_norm": 8.167609214782715,
+ "learning_rate": 2.6031746031746032e-05,
+ "loss": 0.0328,
+ "step": 1665
+ },
+ {
+ "epoch": 6.102564102564102,
+ "grad_norm": 8.547285079956055,
+ "learning_rate": 2.600732600732601e-05,
+ "loss": 0.0438,
+ "step": 1666
+ },
+ {
+ "epoch": 6.106227106227106,
+ "grad_norm": 38.85865020751953,
+ "learning_rate": 2.5982905982905982e-05,
+ "loss": 0.3492,
+ "step": 1667
+ },
+ {
+ "epoch": 6.1098901098901095,
+ "grad_norm": 18.36060333251953,
+ "learning_rate": 2.5958485958485957e-05,
+ "loss": 0.0411,
+ "step": 1668
+ },
+ {
+ "epoch": 6.113553113553113,
+ "grad_norm": 8.013274192810059,
+ "learning_rate": 2.5934065934065935e-05,
+ "loss": 0.0461,
+ "step": 1669
+ },
+ {
+ "epoch": 6.117216117216117,
+ "grad_norm": 41.88865280151367,
+ "learning_rate": 2.590964590964591e-05,
+ "loss": 0.7209,
+ "step": 1670
+ },
+ {
+ "epoch": 6.1208791208791204,
+ "grad_norm": 93.57958221435547,
+ "learning_rate": 2.5885225885225886e-05,
+ "loss": 0.5563,
+ "step": 1671
+ },
+ {
+ "epoch": 6.124542124542124,
+ "grad_norm": 6.878098964691162,
+ "learning_rate": 2.5860805860805864e-05,
+ "loss": 0.0213,
+ "step": 1672
+ },
+ {
+ "epoch": 6.128205128205128,
+ "grad_norm": 41.09592819213867,
+ "learning_rate": 2.5836385836385836e-05,
+ "loss": 0.5724,
+ "step": 1673
+ },
+ {
+ "epoch": 6.131868131868132,
+ "grad_norm": 8.257637977600098,
+ "learning_rate": 2.581196581196581e-05,
+ "loss": 0.0396,
+ "step": 1674
+ },
+ {
+ "epoch": 6.135531135531136,
+ "grad_norm": 24.022602081298828,
+ "learning_rate": 2.578754578754579e-05,
+ "loss": 0.0623,
+ "step": 1675
+ },
+ {
+ "epoch": 6.13919413919414,
+ "grad_norm": 46.46554946899414,
+ "learning_rate": 2.5763125763125764e-05,
+ "loss": 0.4135,
+ "step": 1676
+ },
+ {
+ "epoch": 6.142857142857143,
+ "grad_norm": 96.42303466796875,
+ "learning_rate": 2.573870573870574e-05,
+ "loss": 0.4724,
+ "step": 1677
+ },
+ {
+ "epoch": 6.146520146520147,
+ "grad_norm": 8.401265144348145,
+ "learning_rate": 2.5714285714285714e-05,
+ "loss": 0.0396,
+ "step": 1678
+ },
+ {
+ "epoch": 6.1501831501831505,
+ "grad_norm": 29.346588134765625,
+ "learning_rate": 2.5689865689865693e-05,
+ "loss": 0.1959,
+ "step": 1679
+ },
+ {
+ "epoch": 6.153846153846154,
+ "grad_norm": 4.874574661254883,
+ "learning_rate": 2.5665445665445665e-05,
+ "loss": 0.0295,
+ "step": 1680
+ },
+ {
+ "epoch": 6.157509157509158,
+ "grad_norm": 6.668759346008301,
+ "learning_rate": 2.564102564102564e-05,
+ "loss": 0.0408,
+ "step": 1681
+ },
+ {
+ "epoch": 6.1611721611721615,
+ "grad_norm": 21.22933006286621,
+ "learning_rate": 2.5616605616605618e-05,
+ "loss": 0.1591,
+ "step": 1682
+ },
+ {
+ "epoch": 6.164835164835165,
+ "grad_norm": 2.3441169261932373,
+ "learning_rate": 2.5592185592185593e-05,
+ "loss": 0.0138,
+ "step": 1683
+ },
+ {
+ "epoch": 6.168498168498169,
+ "grad_norm": 31.336048126220703,
+ "learning_rate": 2.5567765567765568e-05,
+ "loss": 0.321,
+ "step": 1684
+ },
+ {
+ "epoch": 6.172161172161172,
+ "grad_norm": 39.17483139038086,
+ "learning_rate": 2.5543345543345547e-05,
+ "loss": 0.5268,
+ "step": 1685
+ },
+ {
+ "epoch": 6.175824175824176,
+ "grad_norm": 6.984042644500732,
+ "learning_rate": 2.551892551892552e-05,
+ "loss": 0.0377,
+ "step": 1686
+ },
+ {
+ "epoch": 6.17948717948718,
+ "grad_norm": 21.946880340576172,
+ "learning_rate": 2.5494505494505493e-05,
+ "loss": 0.1557,
+ "step": 1687
+ },
+ {
+ "epoch": 6.183150183150183,
+ "grad_norm": 23.447084426879883,
+ "learning_rate": 2.5470085470085472e-05,
+ "loss": 0.1996,
+ "step": 1688
+ },
+ {
+ "epoch": 6.186813186813187,
+ "grad_norm": 13.904314994812012,
+ "learning_rate": 2.5445665445665447e-05,
+ "loss": 0.0327,
+ "step": 1689
+ },
+ {
+ "epoch": 6.190476190476191,
+ "grad_norm": 11.126763343811035,
+ "learning_rate": 2.5421245421245422e-05,
+ "loss": 0.0335,
+ "step": 1690
+ },
+ {
+ "epoch": 6.194139194139194,
+ "grad_norm": 42.23086929321289,
+ "learning_rate": 2.5396825396825397e-05,
+ "loss": 0.3307,
+ "step": 1691
+ },
+ {
+ "epoch": 6.197802197802198,
+ "grad_norm": 26.350086212158203,
+ "learning_rate": 2.5372405372405376e-05,
+ "loss": 0.153,
+ "step": 1692
+ },
+ {
+ "epoch": 6.201465201465202,
+ "grad_norm": 6.667046546936035,
+ "learning_rate": 2.5347985347985347e-05,
+ "loss": 0.011,
+ "step": 1693
+ },
+ {
+ "epoch": 6.205128205128205,
+ "grad_norm": 63.5737190246582,
+ "learning_rate": 2.5323565323565322e-05,
+ "loss": 0.5602,
+ "step": 1694
+ },
+ {
+ "epoch": 6.208791208791209,
+ "grad_norm": 54.20994567871094,
+ "learning_rate": 2.52991452991453e-05,
+ "loss": 0.6584,
+ "step": 1695
+ },
+ {
+ "epoch": 6.212454212454213,
+ "grad_norm": 55.79521942138672,
+ "learning_rate": 2.5274725274725276e-05,
+ "loss": 0.5259,
+ "step": 1696
+ },
+ {
+ "epoch": 6.216117216117216,
+ "grad_norm": 65.18093872070312,
+ "learning_rate": 2.525030525030525e-05,
+ "loss": 0.308,
+ "step": 1697
+ },
+ {
+ "epoch": 6.21978021978022,
+ "grad_norm": 9.979923248291016,
+ "learning_rate": 2.522588522588523e-05,
+ "loss": 0.0312,
+ "step": 1698
+ },
+ {
+ "epoch": 6.2234432234432235,
+ "grad_norm": 62.80887222290039,
+ "learning_rate": 2.52014652014652e-05,
+ "loss": 0.3198,
+ "step": 1699
+ },
+ {
+ "epoch": 6.227106227106227,
+ "grad_norm": 63.2298583984375,
+ "learning_rate": 2.5177045177045176e-05,
+ "loss": 0.5223,
+ "step": 1700
+ },
+ {
+ "epoch": 6.230769230769231,
+ "grad_norm": 49.968502044677734,
+ "learning_rate": 2.515262515262515e-05,
+ "loss": 0.5554,
+ "step": 1701
+ },
+ {
+ "epoch": 6.2344322344322345,
+ "grad_norm": 29.190656661987305,
+ "learning_rate": 2.512820512820513e-05,
+ "loss": 0.2286,
+ "step": 1702
+ },
+ {
+ "epoch": 6.238095238095238,
+ "grad_norm": 38.25267028808594,
+ "learning_rate": 2.5103785103785105e-05,
+ "loss": 0.1948,
+ "step": 1703
+ },
+ {
+ "epoch": 6.241758241758242,
+ "grad_norm": 57.620323181152344,
+ "learning_rate": 2.507936507936508e-05,
+ "loss": 0.5533,
+ "step": 1704
+ },
+ {
+ "epoch": 6.245421245421245,
+ "grad_norm": 21.61467170715332,
+ "learning_rate": 2.5054945054945058e-05,
+ "loss": 0.0935,
+ "step": 1705
+ },
+ {
+ "epoch": 6.249084249084249,
+ "grad_norm": 19.86629867553711,
+ "learning_rate": 2.503052503052503e-05,
+ "loss": 0.0852,
+ "step": 1706
+ },
+ {
+ "epoch": 6.252747252747253,
+ "grad_norm": 59.41017150878906,
+ "learning_rate": 2.5006105006105005e-05,
+ "loss": 0.5853,
+ "step": 1707
+ },
+ {
+ "epoch": 6.256410256410256,
+ "grad_norm": 24.542570114135742,
+ "learning_rate": 2.4981684981684983e-05,
+ "loss": 0.0935,
+ "step": 1708
+ },
+ {
+ "epoch": 6.26007326007326,
+ "grad_norm": 29.034879684448242,
+ "learning_rate": 2.495726495726496e-05,
+ "loss": 0.1929,
+ "step": 1709
+ },
+ {
+ "epoch": 6.263736263736264,
+ "grad_norm": 17.3880672454834,
+ "learning_rate": 2.4932844932844933e-05,
+ "loss": 0.0927,
+ "step": 1710
+ },
+ {
+ "epoch": 6.267399267399267,
+ "grad_norm": 90.0419692993164,
+ "learning_rate": 2.4908424908424912e-05,
+ "loss": 1.1172,
+ "step": 1711
+ },
+ {
+ "epoch": 6.271062271062271,
+ "grad_norm": 4.710697650909424,
+ "learning_rate": 2.4884004884004884e-05,
+ "loss": 0.0207,
+ "step": 1712
+ },
+ {
+ "epoch": 6.274725274725275,
+ "grad_norm": 95.93651580810547,
+ "learning_rate": 2.485958485958486e-05,
+ "loss": 0.7137,
+ "step": 1713
+ },
+ {
+ "epoch": 6.278388278388278,
+ "grad_norm": 92.31869506835938,
+ "learning_rate": 2.4835164835164834e-05,
+ "loss": 0.2076,
+ "step": 1714
+ },
+ {
+ "epoch": 6.282051282051282,
+ "grad_norm": 66.66917419433594,
+ "learning_rate": 2.4810744810744812e-05,
+ "loss": 0.8763,
+ "step": 1715
+ },
+ {
+ "epoch": 6.285714285714286,
+ "grad_norm": 94.52323150634766,
+ "learning_rate": 2.4786324786324787e-05,
+ "loss": 0.5962,
+ "step": 1716
+ },
+ {
+ "epoch": 6.289377289377289,
+ "grad_norm": 31.169715881347656,
+ "learning_rate": 2.4761904761904762e-05,
+ "loss": 0.1906,
+ "step": 1717
+ },
+ {
+ "epoch": 6.293040293040293,
+ "grad_norm": 54.97831726074219,
+ "learning_rate": 2.4737484737484737e-05,
+ "loss": 0.3767,
+ "step": 1718
+ },
+ {
+ "epoch": 6.2967032967032965,
+ "grad_norm": 25.52306365966797,
+ "learning_rate": 2.4713064713064712e-05,
+ "loss": 0.2128,
+ "step": 1719
+ },
+ {
+ "epoch": 6.3003663003663,
+ "grad_norm": 12.478558540344238,
+ "learning_rate": 2.4688644688644688e-05,
+ "loss": 0.0713,
+ "step": 1720
+ },
+ {
+ "epoch": 6.304029304029304,
+ "grad_norm": 27.71872329711914,
+ "learning_rate": 2.4664224664224666e-05,
+ "loss": 0.319,
+ "step": 1721
+ },
+ {
+ "epoch": 6.3076923076923075,
+ "grad_norm": 44.587589263916016,
+ "learning_rate": 2.463980463980464e-05,
+ "loss": 0.1997,
+ "step": 1722
+ },
+ {
+ "epoch": 6.311355311355311,
+ "grad_norm": 11.289876937866211,
+ "learning_rate": 2.4615384615384616e-05,
+ "loss": 0.0694,
+ "step": 1723
+ },
+ {
+ "epoch": 6.315018315018315,
+ "grad_norm": 47.27211380004883,
+ "learning_rate": 2.4590964590964595e-05,
+ "loss": 0.249,
+ "step": 1724
+ },
+ {
+ "epoch": 6.318681318681318,
+ "grad_norm": 34.143611907958984,
+ "learning_rate": 2.4566544566544566e-05,
+ "loss": 0.3645,
+ "step": 1725
+ },
+ {
+ "epoch": 6.322344322344322,
+ "grad_norm": 33.73476791381836,
+ "learning_rate": 2.454212454212454e-05,
+ "loss": 0.5412,
+ "step": 1726
+ },
+ {
+ "epoch": 6.326007326007326,
+ "grad_norm": 20.03452491760254,
+ "learning_rate": 2.4517704517704516e-05,
+ "loss": 0.0966,
+ "step": 1727
+ },
+ {
+ "epoch": 6.329670329670329,
+ "grad_norm": 39.63338088989258,
+ "learning_rate": 2.4493284493284495e-05,
+ "loss": 0.2953,
+ "step": 1728
+ },
+ {
+ "epoch": 6.333333333333333,
+ "grad_norm": 42.99127960205078,
+ "learning_rate": 2.446886446886447e-05,
+ "loss": 0.5328,
+ "step": 1729
+ },
+ {
+ "epoch": 6.336996336996337,
+ "grad_norm": 18.581249237060547,
+ "learning_rate": 2.4444444444444445e-05,
+ "loss": 0.1095,
+ "step": 1730
+ },
+ {
+ "epoch": 6.34065934065934,
+ "grad_norm": 33.29508590698242,
+ "learning_rate": 2.442002442002442e-05,
+ "loss": 0.1608,
+ "step": 1731
+ },
+ {
+ "epoch": 6.344322344322344,
+ "grad_norm": 103.12726593017578,
+ "learning_rate": 2.4395604395604395e-05,
+ "loss": 0.9665,
+ "step": 1732
+ },
+ {
+ "epoch": 6.347985347985348,
+ "grad_norm": 55.45216369628906,
+ "learning_rate": 2.437118437118437e-05,
+ "loss": 0.4441,
+ "step": 1733
+ },
+ {
+ "epoch": 6.351648351648351,
+ "grad_norm": 68.68230438232422,
+ "learning_rate": 2.434676434676435e-05,
+ "loss": 0.6929,
+ "step": 1734
+ },
+ {
+ "epoch": 6.355311355311355,
+ "grad_norm": 99.91059875488281,
+ "learning_rate": 2.4322344322344324e-05,
+ "loss": 0.599,
+ "step": 1735
+ },
+ {
+ "epoch": 6.358974358974359,
+ "grad_norm": 24.994863510131836,
+ "learning_rate": 2.42979242979243e-05,
+ "loss": 0.2212,
+ "step": 1736
+ },
+ {
+ "epoch": 6.362637362637362,
+ "grad_norm": 106.0428466796875,
+ "learning_rate": 2.4273504273504277e-05,
+ "loss": 0.685,
+ "step": 1737
+ },
+ {
+ "epoch": 6.366300366300366,
+ "grad_norm": 37.730712890625,
+ "learning_rate": 2.424908424908425e-05,
+ "loss": 0.0792,
+ "step": 1738
+ },
+ {
+ "epoch": 6.36996336996337,
+ "grad_norm": 44.056556701660156,
+ "learning_rate": 2.4224664224664224e-05,
+ "loss": 0.5062,
+ "step": 1739
+ },
+ {
+ "epoch": 6.373626373626374,
+ "grad_norm": 72.15331268310547,
+ "learning_rate": 2.42002442002442e-05,
+ "loss": 0.7541,
+ "step": 1740
+ },
+ {
+ "epoch": 6.377289377289378,
+ "grad_norm": 151.57752990722656,
+ "learning_rate": 2.4175824175824177e-05,
+ "loss": 0.9455,
+ "step": 1741
+ },
+ {
+ "epoch": 6.380952380952381,
+ "grad_norm": 62.12364196777344,
+ "learning_rate": 2.4151404151404152e-05,
+ "loss": 0.3055,
+ "step": 1742
+ },
+ {
+ "epoch": 6.384615384615385,
+ "grad_norm": 21.725858688354492,
+ "learning_rate": 2.4126984126984128e-05,
+ "loss": 0.0691,
+ "step": 1743
+ },
+ {
+ "epoch": 6.388278388278389,
+ "grad_norm": 60.754615783691406,
+ "learning_rate": 2.4102564102564103e-05,
+ "loss": 0.5273,
+ "step": 1744
+ },
+ {
+ "epoch": 6.391941391941392,
+ "grad_norm": 63.324684143066406,
+ "learning_rate": 2.4078144078144078e-05,
+ "loss": 0.2735,
+ "step": 1745
+ },
+ {
+ "epoch": 6.395604395604396,
+ "grad_norm": 79.82772064208984,
+ "learning_rate": 2.4053724053724053e-05,
+ "loss": 1.1766,
+ "step": 1746
+ },
+ {
+ "epoch": 6.3992673992674,
+ "grad_norm": 42.69222640991211,
+ "learning_rate": 2.402930402930403e-05,
+ "loss": 0.3417,
+ "step": 1747
+ },
+ {
+ "epoch": 6.402930402930403,
+ "grad_norm": 125.5120849609375,
+ "learning_rate": 2.4004884004884006e-05,
+ "loss": 0.331,
+ "step": 1748
+ },
+ {
+ "epoch": 6.406593406593407,
+ "grad_norm": 61.30012512207031,
+ "learning_rate": 2.398046398046398e-05,
+ "loss": 0.5709,
+ "step": 1749
+ },
+ {
+ "epoch": 6.410256410256411,
+ "grad_norm": 18.139734268188477,
+ "learning_rate": 2.395604395604396e-05,
+ "loss": 0.0671,
+ "step": 1750
+ },
+ {
+ "epoch": 6.413919413919414,
+ "grad_norm": 29.233678817749023,
+ "learning_rate": 2.393162393162393e-05,
+ "loss": 0.2012,
+ "step": 1751
+ },
+ {
+ "epoch": 6.417582417582418,
+ "grad_norm": 6.065537452697754,
+ "learning_rate": 2.3907203907203907e-05,
+ "loss": 0.0362,
+ "step": 1752
+ },
+ {
+ "epoch": 6.4212454212454215,
+ "grad_norm": 27.241317749023438,
+ "learning_rate": 2.388278388278388e-05,
+ "loss": 0.2462,
+ "step": 1753
+ },
+ {
+ "epoch": 6.424908424908425,
+ "grad_norm": 34.21626663208008,
+ "learning_rate": 2.385836385836386e-05,
+ "loss": 0.3341,
+ "step": 1754
+ },
+ {
+ "epoch": 6.428571428571429,
+ "grad_norm": 3.2597031593322754,
+ "learning_rate": 2.3833943833943835e-05,
+ "loss": 0.0159,
+ "step": 1755
+ },
+ {
+ "epoch": 6.4322344322344325,
+ "grad_norm": 44.21895217895508,
+ "learning_rate": 2.380952380952381e-05,
+ "loss": 0.2461,
+ "step": 1756
+ },
+ {
+ "epoch": 6.435897435897436,
+ "grad_norm": 11.0900239944458,
+ "learning_rate": 2.3785103785103785e-05,
+ "loss": 0.0343,
+ "step": 1757
+ },
+ {
+ "epoch": 6.43956043956044,
+ "grad_norm": 33.349464416503906,
+ "learning_rate": 2.376068376068376e-05,
+ "loss": 0.1605,
+ "step": 1758
+ },
+ {
+ "epoch": 6.443223443223443,
+ "grad_norm": 36.584434509277344,
+ "learning_rate": 2.3736263736263735e-05,
+ "loss": 0.291,
+ "step": 1759
+ },
+ {
+ "epoch": 6.446886446886447,
+ "grad_norm": 1.5533220767974854,
+ "learning_rate": 2.3711843711843714e-05,
+ "loss": 0.0072,
+ "step": 1760
+ },
+ {
+ "epoch": 6.450549450549451,
+ "grad_norm": 31.38529396057129,
+ "learning_rate": 2.368742368742369e-05,
+ "loss": 0.2211,
+ "step": 1761
+ },
+ {
+ "epoch": 6.454212454212454,
+ "grad_norm": 33.149131774902344,
+ "learning_rate": 2.3663003663003664e-05,
+ "loss": 0.7844,
+ "step": 1762
+ },
+ {
+ "epoch": 6.457875457875458,
+ "grad_norm": 21.318105697631836,
+ "learning_rate": 2.363858363858364e-05,
+ "loss": 0.1297,
+ "step": 1763
+ },
+ {
+ "epoch": 6.461538461538462,
+ "grad_norm": 22.11357879638672,
+ "learning_rate": 2.3614163614163614e-05,
+ "loss": 0.1063,
+ "step": 1764
+ },
+ {
+ "epoch": 6.465201465201465,
+ "grad_norm": 2.4257397651672363,
+ "learning_rate": 2.358974358974359e-05,
+ "loss": 0.0098,
+ "step": 1765
+ },
+ {
+ "epoch": 6.468864468864469,
+ "grad_norm": 11.911495208740234,
+ "learning_rate": 2.3565323565323564e-05,
+ "loss": 0.0386,
+ "step": 1766
+ },
+ {
+ "epoch": 6.472527472527473,
+ "grad_norm": 5.848181247711182,
+ "learning_rate": 2.3540903540903543e-05,
+ "loss": 0.0141,
+ "step": 1767
+ },
+ {
+ "epoch": 6.476190476190476,
+ "grad_norm": 58.96442413330078,
+ "learning_rate": 2.3516483516483518e-05,
+ "loss": 0.1635,
+ "step": 1768
+ },
+ {
+ "epoch": 6.47985347985348,
+ "grad_norm": 45.464298248291016,
+ "learning_rate": 2.3492063492063493e-05,
+ "loss": 0.5185,
+ "step": 1769
+ },
+ {
+ "epoch": 6.483516483516484,
+ "grad_norm": 363.1459045410156,
+ "learning_rate": 2.3467643467643468e-05,
+ "loss": 0.9437,
+ "step": 1770
+ },
+ {
+ "epoch": 6.487179487179487,
+ "grad_norm": 30.113380432128906,
+ "learning_rate": 2.3443223443223443e-05,
+ "loss": 0.1013,
+ "step": 1771
+ },
+ {
+ "epoch": 6.490842490842491,
+ "grad_norm": 59.738224029541016,
+ "learning_rate": 2.3418803418803418e-05,
+ "loss": 0.7901,
+ "step": 1772
+ },
+ {
+ "epoch": 6.4945054945054945,
+ "grad_norm": 20.25137710571289,
+ "learning_rate": 2.3394383394383396e-05,
+ "loss": 0.2715,
+ "step": 1773
+ },
+ {
+ "epoch": 6.498168498168498,
+ "grad_norm": 36.56110763549805,
+ "learning_rate": 2.336996336996337e-05,
+ "loss": 0.4192,
+ "step": 1774
+ },
+ {
+ "epoch": 6.501831501831502,
+ "grad_norm": 25.077024459838867,
+ "learning_rate": 2.3345543345543347e-05,
+ "loss": 0.0861,
+ "step": 1775
+ },
+ {
+ "epoch": 6.5054945054945055,
+ "grad_norm": 19.396398544311523,
+ "learning_rate": 2.332112332112332e-05,
+ "loss": 0.0352,
+ "step": 1776
+ },
+ {
+ "epoch": 6.509157509157509,
+ "grad_norm": 93.91683197021484,
+ "learning_rate": 2.3296703296703297e-05,
+ "loss": 0.1414,
+ "step": 1777
+ },
+ {
+ "epoch": 6.512820512820513,
+ "grad_norm": 30.467477798461914,
+ "learning_rate": 2.3272283272283272e-05,
+ "loss": 0.123,
+ "step": 1778
+ },
+ {
+ "epoch": 6.516483516483516,
+ "grad_norm": 135.5657196044922,
+ "learning_rate": 2.3247863247863247e-05,
+ "loss": 0.9203,
+ "step": 1779
+ },
+ {
+ "epoch": 6.52014652014652,
+ "grad_norm": 66.74224853515625,
+ "learning_rate": 2.3223443223443225e-05,
+ "loss": 1.6109,
+ "step": 1780
+ },
+ {
+ "epoch": 6.523809523809524,
+ "grad_norm": 5.672858238220215,
+ "learning_rate": 2.31990231990232e-05,
+ "loss": 0.0259,
+ "step": 1781
+ },
+ {
+ "epoch": 6.527472527472527,
+ "grad_norm": 116.89350128173828,
+ "learning_rate": 2.3174603174603175e-05,
+ "loss": 0.5468,
+ "step": 1782
+ },
+ {
+ "epoch": 6.531135531135531,
+ "grad_norm": 67.1368637084961,
+ "learning_rate": 2.315018315018315e-05,
+ "loss": 0.2192,
+ "step": 1783
+ },
+ {
+ "epoch": 6.534798534798535,
+ "grad_norm": 23.453842163085938,
+ "learning_rate": 2.3125763125763126e-05,
+ "loss": 0.1637,
+ "step": 1784
+ },
+ {
+ "epoch": 6.538461538461538,
+ "grad_norm": 10.070181846618652,
+ "learning_rate": 2.31013431013431e-05,
+ "loss": 0.0613,
+ "step": 1785
+ },
+ {
+ "epoch": 6.542124542124542,
+ "grad_norm": 76.60414123535156,
+ "learning_rate": 2.307692307692308e-05,
+ "loss": 1.1513,
+ "step": 1786
+ },
+ {
+ "epoch": 6.545787545787546,
+ "grad_norm": 28.578702926635742,
+ "learning_rate": 2.3052503052503054e-05,
+ "loss": 0.4436,
+ "step": 1787
+ },
+ {
+ "epoch": 6.549450549450549,
+ "grad_norm": 56.702999114990234,
+ "learning_rate": 2.302808302808303e-05,
+ "loss": 0.3688,
+ "step": 1788
+ },
+ {
+ "epoch": 6.553113553113553,
+ "grad_norm": 97.274658203125,
+ "learning_rate": 2.3003663003663004e-05,
+ "loss": 1.3588,
+ "step": 1789
+ },
+ {
+ "epoch": 6.556776556776557,
+ "grad_norm": 15.371636390686035,
+ "learning_rate": 2.297924297924298e-05,
+ "loss": 0.1227,
+ "step": 1790
+ },
+ {
+ "epoch": 6.56043956043956,
+ "grad_norm": 48.43988800048828,
+ "learning_rate": 2.2954822954822954e-05,
+ "loss": 0.5581,
+ "step": 1791
+ },
+ {
+ "epoch": 6.564102564102564,
+ "grad_norm": 30.510440826416016,
+ "learning_rate": 2.293040293040293e-05,
+ "loss": 0.1888,
+ "step": 1792
+ },
+ {
+ "epoch": 6.5677655677655675,
+ "grad_norm": 34.03535461425781,
+ "learning_rate": 2.2905982905982908e-05,
+ "loss": 0.3731,
+ "step": 1793
+ },
+ {
+ "epoch": 6.571428571428571,
+ "grad_norm": 41.19938659667969,
+ "learning_rate": 2.2881562881562883e-05,
+ "loss": 0.4705,
+ "step": 1794
+ },
+ {
+ "epoch": 6.575091575091575,
+ "grad_norm": 6.060940742492676,
+ "learning_rate": 2.2857142857142858e-05,
+ "loss": 0.0586,
+ "step": 1795
+ },
+ {
+ "epoch": 6.5787545787545785,
+ "grad_norm": 19.60703468322754,
+ "learning_rate": 2.2832722832722833e-05,
+ "loss": 0.2046,
+ "step": 1796
+ },
+ {
+ "epoch": 6.582417582417582,
+ "grad_norm": 30.162328720092773,
+ "learning_rate": 2.2808302808302808e-05,
+ "loss": 0.1926,
+ "step": 1797
+ },
+ {
+ "epoch": 6.586080586080586,
+ "grad_norm": 28.184131622314453,
+ "learning_rate": 2.2783882783882783e-05,
+ "loss": 0.4085,
+ "step": 1798
+ },
+ {
+ "epoch": 6.589743589743589,
+ "grad_norm": 28.77677345275879,
+ "learning_rate": 2.275946275946276e-05,
+ "loss": 0.4333,
+ "step": 1799
+ },
+ {
+ "epoch": 6.593406593406593,
+ "grad_norm": 16.47443962097168,
+ "learning_rate": 2.2735042735042737e-05,
+ "loss": 0.1579,
+ "step": 1800
+ },
+ {
+ "epoch": 6.597069597069597,
+ "grad_norm": 24.273569107055664,
+ "learning_rate": 2.2710622710622712e-05,
+ "loss": 0.1917,
+ "step": 1801
+ },
+ {
+ "epoch": 6.6007326007326,
+ "grad_norm": 43.3727912902832,
+ "learning_rate": 2.2686202686202687e-05,
+ "loss": 0.4186,
+ "step": 1802
+ },
+ {
+ "epoch": 6.604395604395604,
+ "grad_norm": 21.321182250976562,
+ "learning_rate": 2.2661782661782662e-05,
+ "loss": 0.187,
+ "step": 1803
+ },
+ {
+ "epoch": 6.608058608058608,
+ "grad_norm": 9.65528678894043,
+ "learning_rate": 2.2637362637362637e-05,
+ "loss": 0.0584,
+ "step": 1804
+ },
+ {
+ "epoch": 6.611721611721611,
+ "grad_norm": 43.85563659667969,
+ "learning_rate": 2.2612942612942612e-05,
+ "loss": 0.2249,
+ "step": 1805
+ },
+ {
+ "epoch": 6.615384615384615,
+ "grad_norm": 36.068946838378906,
+ "learning_rate": 2.258852258852259e-05,
+ "loss": 0.8459,
+ "step": 1806
+ },
+ {
+ "epoch": 6.619047619047619,
+ "grad_norm": 37.197776794433594,
+ "learning_rate": 2.2564102564102566e-05,
+ "loss": 0.4026,
+ "step": 1807
+ },
+ {
+ "epoch": 6.622710622710622,
+ "grad_norm": 11.39905071258545,
+ "learning_rate": 2.253968253968254e-05,
+ "loss": 0.0544,
+ "step": 1808
+ },
+ {
+ "epoch": 6.626373626373626,
+ "grad_norm": 6.2379150390625,
+ "learning_rate": 2.2515262515262516e-05,
+ "loss": 0.0342,
+ "step": 1809
+ },
+ {
+ "epoch": 6.63003663003663,
+ "grad_norm": 14.908777236938477,
+ "learning_rate": 2.249084249084249e-05,
+ "loss": 0.1245,
+ "step": 1810
+ },
+ {
+ "epoch": 6.633699633699633,
+ "grad_norm": 47.33977508544922,
+ "learning_rate": 2.2466422466422466e-05,
+ "loss": 0.3771,
+ "step": 1811
+ },
+ {
+ "epoch": 6.637362637362637,
+ "grad_norm": 25.724132537841797,
+ "learning_rate": 2.2442002442002444e-05,
+ "loss": 0.3055,
+ "step": 1812
+ },
+ {
+ "epoch": 6.641025641025641,
+ "grad_norm": 30.99205207824707,
+ "learning_rate": 2.241758241758242e-05,
+ "loss": 0.2163,
+ "step": 1813
+ },
+ {
+ "epoch": 6.644688644688645,
+ "grad_norm": 22.741575241088867,
+ "learning_rate": 2.2393162393162394e-05,
+ "loss": 0.136,
+ "step": 1814
+ },
+ {
+ "epoch": 6.648351648351649,
+ "grad_norm": 22.271474838256836,
+ "learning_rate": 2.236874236874237e-05,
+ "loss": 0.2299,
+ "step": 1815
+ },
+ {
+ "epoch": 6.652014652014652,
+ "grad_norm": 51.153072357177734,
+ "learning_rate": 2.2344322344322345e-05,
+ "loss": 0.8646,
+ "step": 1816
+ },
+ {
+ "epoch": 6.655677655677656,
+ "grad_norm": 4.649880409240723,
+ "learning_rate": 2.231990231990232e-05,
+ "loss": 0.0344,
+ "step": 1817
+ },
+ {
+ "epoch": 6.65934065934066,
+ "grad_norm": 2.948399305343628,
+ "learning_rate": 2.2295482295482295e-05,
+ "loss": 0.0128,
+ "step": 1818
+ },
+ {
+ "epoch": 6.663003663003663,
+ "grad_norm": 10.776185035705566,
+ "learning_rate": 2.2271062271062273e-05,
+ "loss": 0.0438,
+ "step": 1819
+ },
+ {
+ "epoch": 6.666666666666667,
+ "grad_norm": 31.777973175048828,
+ "learning_rate": 2.2246642246642248e-05,
+ "loss": 1.3552,
+ "step": 1820
+ },
+ {
+ "epoch": 6.670329670329671,
+ "grad_norm": 44.022377014160156,
+ "learning_rate": 2.222222222222222e-05,
+ "loss": 0.1928,
+ "step": 1821
+ },
+ {
+ "epoch": 6.673992673992674,
+ "grad_norm": 7.014647960662842,
+ "learning_rate": 2.21978021978022e-05,
+ "loss": 0.0675,
+ "step": 1822
+ },
+ {
+ "epoch": 6.677655677655678,
+ "grad_norm": 10.964372634887695,
+ "learning_rate": 2.2173382173382173e-05,
+ "loss": 0.0809,
+ "step": 1823
+ },
+ {
+ "epoch": 6.681318681318682,
+ "grad_norm": 42.56317901611328,
+ "learning_rate": 2.214896214896215e-05,
+ "loss": 0.2639,
+ "step": 1824
+ },
+ {
+ "epoch": 6.684981684981685,
+ "grad_norm": 25.33672523498535,
+ "learning_rate": 2.2124542124542127e-05,
+ "loss": 0.294,
+ "step": 1825
+ },
+ {
+ "epoch": 6.688644688644689,
+ "grad_norm": 9.823565483093262,
+ "learning_rate": 2.2100122100122102e-05,
+ "loss": 0.0885,
+ "step": 1826
+ },
+ {
+ "epoch": 6.6923076923076925,
+ "grad_norm": 3.2519893646240234,
+ "learning_rate": 2.2075702075702077e-05,
+ "loss": 0.0208,
+ "step": 1827
+ },
+ {
+ "epoch": 6.695970695970696,
+ "grad_norm": 14.441536903381348,
+ "learning_rate": 2.2051282051282052e-05,
+ "loss": 0.1541,
+ "step": 1828
+ },
+ {
+ "epoch": 6.6996336996337,
+ "grad_norm": 4.128608226776123,
+ "learning_rate": 2.2026862026862027e-05,
+ "loss": 0.03,
+ "step": 1829
+ },
+ {
+ "epoch": 6.7032967032967035,
+ "grad_norm": 13.953630447387695,
+ "learning_rate": 2.2002442002442002e-05,
+ "loss": 0.0781,
+ "step": 1830
+ },
+ {
+ "epoch": 6.706959706959707,
+ "grad_norm": 24.90090560913086,
+ "learning_rate": 2.1978021978021977e-05,
+ "loss": 0.33,
+ "step": 1831
+ },
+ {
+ "epoch": 6.710622710622711,
+ "grad_norm": 43.3170051574707,
+ "learning_rate": 2.1953601953601956e-05,
+ "loss": 0.1735,
+ "step": 1832
+ },
+ {
+ "epoch": 6.714285714285714,
+ "grad_norm": 5.82177734375,
+ "learning_rate": 2.192918192918193e-05,
+ "loss": 0.0281,
+ "step": 1833
+ },
+ {
+ "epoch": 6.717948717948718,
+ "grad_norm": 26.415163040161133,
+ "learning_rate": 2.1904761904761903e-05,
+ "loss": 0.4272,
+ "step": 1834
+ },
+ {
+ "epoch": 6.721611721611722,
+ "grad_norm": 40.3553581237793,
+ "learning_rate": 2.188034188034188e-05,
+ "loss": 0.3375,
+ "step": 1835
+ },
+ {
+ "epoch": 6.725274725274725,
+ "grad_norm": 39.16763687133789,
+ "learning_rate": 2.1855921855921856e-05,
+ "loss": 0.881,
+ "step": 1836
+ },
+ {
+ "epoch": 6.728937728937729,
+ "grad_norm": 14.275158882141113,
+ "learning_rate": 2.183150183150183e-05,
+ "loss": 0.0499,
+ "step": 1837
+ },
+ {
+ "epoch": 6.732600732600733,
+ "grad_norm": 40.29611587524414,
+ "learning_rate": 2.180708180708181e-05,
+ "loss": 0.2447,
+ "step": 1838
+ },
+ {
+ "epoch": 6.736263736263736,
+ "grad_norm": 33.86298751831055,
+ "learning_rate": 2.1782661782661785e-05,
+ "loss": 0.2772,
+ "step": 1839
+ },
+ {
+ "epoch": 6.73992673992674,
+ "grad_norm": 34.46928405761719,
+ "learning_rate": 2.175824175824176e-05,
+ "loss": 0.2721,
+ "step": 1840
+ },
+ {
+ "epoch": 6.743589743589744,
+ "grad_norm": 17.7811222076416,
+ "learning_rate": 2.1733821733821735e-05,
+ "loss": 0.0955,
+ "step": 1841
+ },
+ {
+ "epoch": 6.747252747252747,
+ "grad_norm": 33.17821502685547,
+ "learning_rate": 2.170940170940171e-05,
+ "loss": 0.1831,
+ "step": 1842
+ },
+ {
+ "epoch": 6.750915750915751,
+ "grad_norm": 24.910184860229492,
+ "learning_rate": 2.1684981684981685e-05,
+ "loss": 0.1617,
+ "step": 1843
+ },
+ {
+ "epoch": 6.754578754578755,
+ "grad_norm": 28.5413875579834,
+ "learning_rate": 2.166056166056166e-05,
+ "loss": 0.2048,
+ "step": 1844
+ },
+ {
+ "epoch": 6.758241758241758,
+ "grad_norm": 26.866653442382812,
+ "learning_rate": 2.163614163614164e-05,
+ "loss": 0.1637,
+ "step": 1845
+ },
+ {
+ "epoch": 6.761904761904762,
+ "grad_norm": 43.447593688964844,
+ "learning_rate": 2.1611721611721613e-05,
+ "loss": 0.2206,
+ "step": 1846
+ },
+ {
+ "epoch": 6.7655677655677655,
+ "grad_norm": 8.146500587463379,
+ "learning_rate": 2.1587301587301585e-05,
+ "loss": 0.0199,
+ "step": 1847
+ },
+ {
+ "epoch": 6.769230769230769,
+ "grad_norm": 30.458940505981445,
+ "learning_rate": 2.1562881562881564e-05,
+ "loss": 0.0963,
+ "step": 1848
+ },
+ {
+ "epoch": 6.772893772893773,
+ "grad_norm": 1.6412991285324097,
+ "learning_rate": 2.153846153846154e-05,
+ "loss": 0.0097,
+ "step": 1849
+ },
+ {
+ "epoch": 6.7765567765567765,
+ "grad_norm": 22.804906845092773,
+ "learning_rate": 2.1514041514041514e-05,
+ "loss": 0.115,
+ "step": 1850
+ },
+ {
+ "epoch": 6.78021978021978,
+ "grad_norm": 21.790761947631836,
+ "learning_rate": 2.1489621489621492e-05,
+ "loss": 0.1609,
+ "step": 1851
+ },
+ {
+ "epoch": 6.783882783882784,
+ "grad_norm": 56.942420959472656,
+ "learning_rate": 2.1465201465201467e-05,
+ "loss": 0.3725,
+ "step": 1852
+ },
+ {
+ "epoch": 6.787545787545787,
+ "grad_norm": 31.713504791259766,
+ "learning_rate": 2.1440781440781442e-05,
+ "loss": 0.3035,
+ "step": 1853
+ },
+ {
+ "epoch": 6.791208791208791,
+ "grad_norm": 14.83351993560791,
+ "learning_rate": 2.1416361416361417e-05,
+ "loss": 0.0383,
+ "step": 1854
+ },
+ {
+ "epoch": 6.794871794871795,
+ "grad_norm": 28.03726577758789,
+ "learning_rate": 2.1391941391941392e-05,
+ "loss": 0.0432,
+ "step": 1855
+ },
+ {
+ "epoch": 6.798534798534798,
+ "grad_norm": 72.7824478149414,
+ "learning_rate": 2.1367521367521368e-05,
+ "loss": 0.7678,
+ "step": 1856
+ },
+ {
+ "epoch": 6.802197802197802,
+ "grad_norm": 48.0980224609375,
+ "learning_rate": 2.1343101343101343e-05,
+ "loss": 0.7691,
+ "step": 1857
+ },
+ {
+ "epoch": 6.805860805860806,
+ "grad_norm": 44.305519104003906,
+ "learning_rate": 2.131868131868132e-05,
+ "loss": 0.4334,
+ "step": 1858
+ },
+ {
+ "epoch": 6.809523809523809,
+ "grad_norm": 37.26662826538086,
+ "learning_rate": 2.1294261294261296e-05,
+ "loss": 0.5122,
+ "step": 1859
+ },
+ {
+ "epoch": 6.813186813186813,
+ "grad_norm": 11.758150100708008,
+ "learning_rate": 2.1269841269841268e-05,
+ "loss": 0.034,
+ "step": 1860
+ },
+ {
+ "epoch": 6.816849816849817,
+ "grad_norm": 22.28230857849121,
+ "learning_rate": 2.1245421245421246e-05,
+ "loss": 0.1423,
+ "step": 1861
+ },
+ {
+ "epoch": 6.82051282051282,
+ "grad_norm": 15.02229118347168,
+ "learning_rate": 2.122100122100122e-05,
+ "loss": 0.0574,
+ "step": 1862
+ },
+ {
+ "epoch": 6.824175824175824,
+ "grad_norm": 54.3133659362793,
+ "learning_rate": 2.1196581196581196e-05,
+ "loss": 0.7862,
+ "step": 1863
+ },
+ {
+ "epoch": 6.827838827838828,
+ "grad_norm": 14.319539070129395,
+ "learning_rate": 2.1172161172161175e-05,
+ "loss": 0.0509,
+ "step": 1864
+ },
+ {
+ "epoch": 6.831501831501831,
+ "grad_norm": 21.989151000976562,
+ "learning_rate": 2.114774114774115e-05,
+ "loss": 0.1181,
+ "step": 1865
+ },
+ {
+ "epoch": 6.835164835164835,
+ "grad_norm": 35.67295455932617,
+ "learning_rate": 2.112332112332112e-05,
+ "loss": 0.5721,
+ "step": 1866
+ },
+ {
+ "epoch": 6.8388278388278385,
+ "grad_norm": 1.1201294660568237,
+ "learning_rate": 2.10989010989011e-05,
+ "loss": 0.006,
+ "step": 1867
+ },
+ {
+ "epoch": 6.842490842490842,
+ "grad_norm": 55.64126205444336,
+ "learning_rate": 2.1074481074481075e-05,
+ "loss": 0.5155,
+ "step": 1868
+ },
+ {
+ "epoch": 6.846153846153846,
+ "grad_norm": 34.077598571777344,
+ "learning_rate": 2.105006105006105e-05,
+ "loss": 0.2999,
+ "step": 1869
+ },
+ {
+ "epoch": 6.8498168498168495,
+ "grad_norm": 47.34593200683594,
+ "learning_rate": 2.1025641025641025e-05,
+ "loss": 0.5192,
+ "step": 1870
+ },
+ {
+ "epoch": 6.853479853479853,
+ "grad_norm": 15.37938117980957,
+ "learning_rate": 2.1001221001221004e-05,
+ "loss": 0.0647,
+ "step": 1871
+ },
+ {
+ "epoch": 6.857142857142857,
+ "grad_norm": 8.03809928894043,
+ "learning_rate": 2.097680097680098e-05,
+ "loss": 0.0535,
+ "step": 1872
+ },
+ {
+ "epoch": 6.860805860805861,
+ "grad_norm": 34.22372055053711,
+ "learning_rate": 2.095238095238095e-05,
+ "loss": 0.4123,
+ "step": 1873
+ },
+ {
+ "epoch": 6.864468864468865,
+ "grad_norm": 19.66349220275879,
+ "learning_rate": 2.092796092796093e-05,
+ "loss": 0.165,
+ "step": 1874
+ },
+ {
+ "epoch": 6.868131868131869,
+ "grad_norm": 4.448884010314941,
+ "learning_rate": 2.0903540903540904e-05,
+ "loss": 0.0204,
+ "step": 1875
+ },
+ {
+ "epoch": 6.871794871794872,
+ "grad_norm": 7.874554634094238,
+ "learning_rate": 2.087912087912088e-05,
+ "loss": 0.0339,
+ "step": 1876
+ },
+ {
+ "epoch": 6.875457875457876,
+ "grad_norm": 2.1591508388519287,
+ "learning_rate": 2.0854700854700857e-05,
+ "loss": 0.0069,
+ "step": 1877
+ },
+ {
+ "epoch": 6.8791208791208796,
+ "grad_norm": 7.496129512786865,
+ "learning_rate": 2.0830280830280832e-05,
+ "loss": 0.0522,
+ "step": 1878
+ },
+ {
+ "epoch": 6.882783882783883,
+ "grad_norm": 1.867928385734558,
+ "learning_rate": 2.0805860805860804e-05,
+ "loss": 0.0075,
+ "step": 1879
+ },
+ {
+ "epoch": 6.886446886446887,
+ "grad_norm": 6.0440239906311035,
+ "learning_rate": 2.0781440781440783e-05,
+ "loss": 0.0454,
+ "step": 1880
+ },
+ {
+ "epoch": 6.8901098901098905,
+ "grad_norm": 38.901275634765625,
+ "learning_rate": 2.0757020757020758e-05,
+ "loss": 0.1179,
+ "step": 1881
+ },
+ {
+ "epoch": 6.893772893772894,
+ "grad_norm": 36.98682403564453,
+ "learning_rate": 2.0732600732600733e-05,
+ "loss": 0.4722,
+ "step": 1882
+ },
+ {
+ "epoch": 6.897435897435898,
+ "grad_norm": 24.764745712280273,
+ "learning_rate": 2.0708180708180708e-05,
+ "loss": 0.1179,
+ "step": 1883
+ },
+ {
+ "epoch": 6.9010989010989015,
+ "grad_norm": 9.029558181762695,
+ "learning_rate": 2.0683760683760686e-05,
+ "loss": 0.0134,
+ "step": 1884
+ },
+ {
+ "epoch": 6.904761904761905,
+ "grad_norm": 54.04767608642578,
+ "learning_rate": 2.065934065934066e-05,
+ "loss": 0.3645,
+ "step": 1885
+ },
+ {
+ "epoch": 6.908424908424909,
+ "grad_norm": 35.74855041503906,
+ "learning_rate": 2.0634920634920633e-05,
+ "loss": 0.5228,
+ "step": 1886
+ },
+ {
+ "epoch": 6.912087912087912,
+ "grad_norm": 18.870223999023438,
+ "learning_rate": 2.061050061050061e-05,
+ "loss": 0.0564,
+ "step": 1887
+ },
+ {
+ "epoch": 6.915750915750916,
+ "grad_norm": 1.4971216917037964,
+ "learning_rate": 2.0586080586080587e-05,
+ "loss": 0.0067,
+ "step": 1888
+ },
+ {
+ "epoch": 6.91941391941392,
+ "grad_norm": 71.35897064208984,
+ "learning_rate": 2.056166056166056e-05,
+ "loss": 0.9147,
+ "step": 1889
+ },
+ {
+ "epoch": 6.923076923076923,
+ "grad_norm": 20.66876220703125,
+ "learning_rate": 2.053724053724054e-05,
+ "loss": 0.0777,
+ "step": 1890
+ },
+ {
+ "epoch": 6.926739926739927,
+ "grad_norm": 12.178057670593262,
+ "learning_rate": 2.0512820512820515e-05,
+ "loss": 0.0682,
+ "step": 1891
+ },
+ {
+ "epoch": 6.930402930402931,
+ "grad_norm": 18.622045516967773,
+ "learning_rate": 2.0488400488400487e-05,
+ "loss": 0.2268,
+ "step": 1892
+ },
+ {
+ "epoch": 6.934065934065934,
+ "grad_norm": 13.028661727905273,
+ "learning_rate": 2.0463980463980462e-05,
+ "loss": 0.0783,
+ "step": 1893
+ },
+ {
+ "epoch": 6.937728937728938,
+ "grad_norm": 52.034603118896484,
+ "learning_rate": 2.043956043956044e-05,
+ "loss": 0.2124,
+ "step": 1894
+ },
+ {
+ "epoch": 6.941391941391942,
+ "grad_norm": 15.498795509338379,
+ "learning_rate": 2.0415140415140415e-05,
+ "loss": 0.1372,
+ "step": 1895
+ },
+ {
+ "epoch": 6.945054945054945,
+ "grad_norm": 4.659972190856934,
+ "learning_rate": 2.039072039072039e-05,
+ "loss": 0.0671,
+ "step": 1896
+ },
+ {
+ "epoch": 6.948717948717949,
+ "grad_norm": 67.44121551513672,
+ "learning_rate": 2.036630036630037e-05,
+ "loss": 0.3543,
+ "step": 1897
+ },
+ {
+ "epoch": 6.9523809523809526,
+ "grad_norm": 55.583770751953125,
+ "learning_rate": 2.0341880341880344e-05,
+ "loss": 0.5827,
+ "step": 1898
+ },
+ {
+ "epoch": 6.956043956043956,
+ "grad_norm": 2.5286853313446045,
+ "learning_rate": 2.0317460317460316e-05,
+ "loss": 0.0093,
+ "step": 1899
+ },
+ {
+ "epoch": 6.95970695970696,
+ "grad_norm": 35.537654876708984,
+ "learning_rate": 2.0293040293040294e-05,
+ "loss": 0.4927,
+ "step": 1900
+ },
+ {
+ "epoch": 6.9633699633699635,
+ "grad_norm": 5.582351207733154,
+ "learning_rate": 2.026862026862027e-05,
+ "loss": 0.0266,
+ "step": 1901
+ },
+ {
+ "epoch": 6.967032967032967,
+ "grad_norm": 24.245107650756836,
+ "learning_rate": 2.0244200244200244e-05,
+ "loss": 0.1652,
+ "step": 1902
+ },
+ {
+ "epoch": 6.970695970695971,
+ "grad_norm": 15.859257698059082,
+ "learning_rate": 2.0219780219780223e-05,
+ "loss": 0.0523,
+ "step": 1903
+ },
+ {
+ "epoch": 6.9743589743589745,
+ "grad_norm": 4.049310207366943,
+ "learning_rate": 2.0195360195360198e-05,
+ "loss": 0.016,
+ "step": 1904
+ },
+ {
+ "epoch": 6.978021978021978,
+ "grad_norm": 22.330875396728516,
+ "learning_rate": 2.017094017094017e-05,
+ "loss": 0.0999,
+ "step": 1905
+ },
+ {
+ "epoch": 6.981684981684982,
+ "grad_norm": 5.005560874938965,
+ "learning_rate": 2.0146520146520144e-05,
+ "loss": 0.0186,
+ "step": 1906
+ },
+ {
+ "epoch": 6.985347985347985,
+ "grad_norm": 5.587247848510742,
+ "learning_rate": 2.0122100122100123e-05,
+ "loss": 0.0312,
+ "step": 1907
+ },
+ {
+ "epoch": 6.989010989010989,
+ "grad_norm": 46.75461959838867,
+ "learning_rate": 2.0097680097680098e-05,
+ "loss": 0.2803,
+ "step": 1908
+ },
+ {
+ "epoch": 6.992673992673993,
+ "grad_norm": 9.029139518737793,
+ "learning_rate": 2.0073260073260073e-05,
+ "loss": 0.0437,
+ "step": 1909
+ },
+ {
+ "epoch": 6.996336996336996,
+ "grad_norm": 26.199968338012695,
+ "learning_rate": 2.004884004884005e-05,
+ "loss": 0.4601,
+ "step": 1910
+ },
+ {
+ "epoch": 7.0,
+ "grad_norm": 2.2140614986419678,
+ "learning_rate": 2.0024420024420023e-05,
+ "loss": 0.0096,
+ "step": 1911
+ },
+ {
+ "epoch": 7.003663003663004,
+ "grad_norm": 52.966732025146484,
+ "learning_rate": 1.9999999999999998e-05,
+ "loss": 0.5645,
+ "step": 1912
+ },
+ {
+ "epoch": 7.007326007326007,
+ "grad_norm": 11.818926811218262,
+ "learning_rate": 1.9975579975579977e-05,
+ "loss": 0.1,
+ "step": 1913
+ },
+ {
+ "epoch": 7.010989010989011,
+ "grad_norm": 3.5507917404174805,
+ "learning_rate": 1.9951159951159952e-05,
+ "loss": 0.0124,
+ "step": 1914
+ },
+ {
+ "epoch": 7.014652014652015,
+ "grad_norm": 13.962370872497559,
+ "learning_rate": 1.9926739926739927e-05,
+ "loss": 0.0361,
+ "step": 1915
+ },
+ {
+ "epoch": 7.018315018315018,
+ "grad_norm": 18.855941772460938,
+ "learning_rate": 1.9902319902319905e-05,
+ "loss": 0.1029,
+ "step": 1916
+ },
+ {
+ "epoch": 7.021978021978022,
+ "grad_norm": 25.34268569946289,
+ "learning_rate": 1.987789987789988e-05,
+ "loss": 0.0968,
+ "step": 1917
+ },
+ {
+ "epoch": 7.0256410256410255,
+ "grad_norm": 12.053638458251953,
+ "learning_rate": 1.9853479853479852e-05,
+ "loss": 0.0473,
+ "step": 1918
+ },
+ {
+ "epoch": 7.029304029304029,
+ "grad_norm": 28.66246795654297,
+ "learning_rate": 1.9829059829059827e-05,
+ "loss": 0.477,
+ "step": 1919
+ },
+ {
+ "epoch": 7.032967032967033,
+ "grad_norm": 37.606475830078125,
+ "learning_rate": 1.9804639804639806e-05,
+ "loss": 0.3894,
+ "step": 1920
+ },
+ {
+ "epoch": 7.0366300366300365,
+ "grad_norm": 10.550342559814453,
+ "learning_rate": 1.978021978021978e-05,
+ "loss": 0.031,
+ "step": 1921
+ },
+ {
+ "epoch": 7.04029304029304,
+ "grad_norm": 8.748348236083984,
+ "learning_rate": 1.9755799755799756e-05,
+ "loss": 0.087,
+ "step": 1922
+ },
+ {
+ "epoch": 7.043956043956044,
+ "grad_norm": 16.9587345123291,
+ "learning_rate": 1.9731379731379734e-05,
+ "loss": 0.1271,
+ "step": 1923
+ },
+ {
+ "epoch": 7.0476190476190474,
+ "grad_norm": 64.79300689697266,
+ "learning_rate": 1.9706959706959706e-05,
+ "loss": 0.4748,
+ "step": 1924
+ },
+ {
+ "epoch": 7.051282051282051,
+ "grad_norm": 1.4843182563781738,
+ "learning_rate": 1.968253968253968e-05,
+ "loss": 0.0074,
+ "step": 1925
+ },
+ {
+ "epoch": 7.054945054945055,
+ "grad_norm": 6.48045539855957,
+ "learning_rate": 1.965811965811966e-05,
+ "loss": 0.0312,
+ "step": 1926
+ },
+ {
+ "epoch": 7.058608058608058,
+ "grad_norm": 13.35557746887207,
+ "learning_rate": 1.9633699633699634e-05,
+ "loss": 0.0395,
+ "step": 1927
+ },
+ {
+ "epoch": 7.062271062271062,
+ "grad_norm": 6.710418701171875,
+ "learning_rate": 1.960927960927961e-05,
+ "loss": 0.0237,
+ "step": 1928
+ },
+ {
+ "epoch": 7.065934065934066,
+ "grad_norm": 1.5964992046356201,
+ "learning_rate": 1.9584859584859588e-05,
+ "loss": 0.0069,
+ "step": 1929
+ },
+ {
+ "epoch": 7.069597069597069,
+ "grad_norm": 17.386457443237305,
+ "learning_rate": 1.9560439560439563e-05,
+ "loss": 0.1719,
+ "step": 1930
+ },
+ {
+ "epoch": 7.073260073260073,
+ "grad_norm": 9.381852149963379,
+ "learning_rate": 1.9536019536019535e-05,
+ "loss": 0.0274,
+ "step": 1931
+ },
+ {
+ "epoch": 7.076923076923077,
+ "grad_norm": 96.48052978515625,
+ "learning_rate": 1.951159951159951e-05,
+ "loss": 0.9714,
+ "step": 1932
+ },
+ {
+ "epoch": 7.08058608058608,
+ "grad_norm": 9.537943840026855,
+ "learning_rate": 1.9487179487179488e-05,
+ "loss": 0.0608,
+ "step": 1933
+ },
+ {
+ "epoch": 7.084249084249084,
+ "grad_norm": 47.1885986328125,
+ "learning_rate": 1.9462759462759463e-05,
+ "loss": 0.3678,
+ "step": 1934
+ },
+ {
+ "epoch": 7.087912087912088,
+ "grad_norm": 22.831552505493164,
+ "learning_rate": 1.9438339438339438e-05,
+ "loss": 0.1386,
+ "step": 1935
+ },
+ {
+ "epoch": 7.091575091575091,
+ "grad_norm": 7.730359077453613,
+ "learning_rate": 1.9413919413919417e-05,
+ "loss": 0.0286,
+ "step": 1936
+ },
+ {
+ "epoch": 7.095238095238095,
+ "grad_norm": 34.329349517822266,
+ "learning_rate": 1.938949938949939e-05,
+ "loss": 0.2041,
+ "step": 1937
+ },
+ {
+ "epoch": 7.0989010989010985,
+ "grad_norm": 2.7768473625183105,
+ "learning_rate": 1.9365079365079363e-05,
+ "loss": 0.0095,
+ "step": 1938
+ },
+ {
+ "epoch": 7.102564102564102,
+ "grad_norm": 52.868446350097656,
+ "learning_rate": 1.9340659340659342e-05,
+ "loss": 1.3287,
+ "step": 1939
+ },
+ {
+ "epoch": 7.106227106227106,
+ "grad_norm": 46.30121612548828,
+ "learning_rate": 1.9316239316239317e-05,
+ "loss": 0.6172,
+ "step": 1940
+ },
+ {
+ "epoch": 7.1098901098901095,
+ "grad_norm": 22.829683303833008,
+ "learning_rate": 1.9291819291819292e-05,
+ "loss": 0.2141,
+ "step": 1941
+ },
+ {
+ "epoch": 7.113553113553113,
+ "grad_norm": 5.540363311767578,
+ "learning_rate": 1.926739926739927e-05,
+ "loss": 0.0202,
+ "step": 1942
+ },
+ {
+ "epoch": 7.117216117216117,
+ "grad_norm": 12.821202278137207,
+ "learning_rate": 1.9242979242979246e-05,
+ "loss": 0.0474,
+ "step": 1943
+ },
+ {
+ "epoch": 7.1208791208791204,
+ "grad_norm": 51.50701141357422,
+ "learning_rate": 1.9218559218559217e-05,
+ "loss": 0.2716,
+ "step": 1944
+ },
+ {
+ "epoch": 7.124542124542124,
+ "grad_norm": 22.156648635864258,
+ "learning_rate": 1.9194139194139192e-05,
+ "loss": 0.4693,
+ "step": 1945
+ },
+ {
+ "epoch": 7.128205128205128,
+ "grad_norm": 21.045156478881836,
+ "learning_rate": 1.916971916971917e-05,
+ "loss": 0.471,
+ "step": 1946
+ },
+ {
+ "epoch": 7.131868131868132,
+ "grad_norm": 19.406959533691406,
+ "learning_rate": 1.9145299145299146e-05,
+ "loss": 0.1439,
+ "step": 1947
+ },
+ {
+ "epoch": 7.135531135531136,
+ "grad_norm": 3.8923749923706055,
+ "learning_rate": 1.912087912087912e-05,
+ "loss": 0.0165,
+ "step": 1948
+ },
+ {
+ "epoch": 7.13919413919414,
+ "grad_norm": 19.87603759765625,
+ "learning_rate": 1.90964590964591e-05,
+ "loss": 0.1763,
+ "step": 1949
+ },
+ {
+ "epoch": 7.142857142857143,
+ "grad_norm": 0.5241024494171143,
+ "learning_rate": 1.907203907203907e-05,
+ "loss": 0.0026,
+ "step": 1950
+ },
+ {
+ "epoch": 7.146520146520147,
+ "grad_norm": 3.141636610031128,
+ "learning_rate": 1.9047619047619046e-05,
+ "loss": 0.0217,
+ "step": 1951
+ },
+ {
+ "epoch": 7.1501831501831505,
+ "grad_norm": 7.46498966217041,
+ "learning_rate": 1.9023199023199025e-05,
+ "loss": 0.0125,
+ "step": 1952
+ },
+ {
+ "epoch": 7.153846153846154,
+ "grad_norm": 2.050363779067993,
+ "learning_rate": 1.8998778998779e-05,
+ "loss": 0.0092,
+ "step": 1953
+ },
+ {
+ "epoch": 7.157509157509158,
+ "grad_norm": 65.3537826538086,
+ "learning_rate": 1.8974358974358975e-05,
+ "loss": 0.9234,
+ "step": 1954
+ },
+ {
+ "epoch": 7.1611721611721615,
+ "grad_norm": 39.09166717529297,
+ "learning_rate": 1.8949938949938953e-05,
+ "loss": 0.4183,
+ "step": 1955
+ },
+ {
+ "epoch": 7.164835164835165,
+ "grad_norm": 7.788208961486816,
+ "learning_rate": 1.8925518925518925e-05,
+ "loss": 0.0284,
+ "step": 1956
+ },
+ {
+ "epoch": 7.168498168498169,
+ "grad_norm": 19.53957176208496,
+ "learning_rate": 1.89010989010989e-05,
+ "loss": 0.148,
+ "step": 1957
+ },
+ {
+ "epoch": 7.172161172161172,
+ "grad_norm": 11.077863693237305,
+ "learning_rate": 1.8876678876678875e-05,
+ "loss": 0.0772,
+ "step": 1958
+ },
+ {
+ "epoch": 7.175824175824176,
+ "grad_norm": 10.294413566589355,
+ "learning_rate": 1.8852258852258853e-05,
+ "loss": 0.0278,
+ "step": 1959
+ },
+ {
+ "epoch": 7.17948717948718,
+ "grad_norm": 34.725284576416016,
+ "learning_rate": 1.882783882783883e-05,
+ "loss": 0.194,
+ "step": 1960
+ },
+ {
+ "epoch": 7.183150183150183,
+ "grad_norm": 27.773906707763672,
+ "learning_rate": 1.8803418803418804e-05,
+ "loss": 0.3261,
+ "step": 1961
+ },
+ {
+ "epoch": 7.186813186813187,
+ "grad_norm": 60.96028518676758,
+ "learning_rate": 1.8778998778998782e-05,
+ "loss": 0.5915,
+ "step": 1962
+ },
+ {
+ "epoch": 7.190476190476191,
+ "grad_norm": 9.918408393859863,
+ "learning_rate": 1.8754578754578754e-05,
+ "loss": 0.0428,
+ "step": 1963
+ },
+ {
+ "epoch": 7.194139194139194,
+ "grad_norm": 42.929927825927734,
+ "learning_rate": 1.873015873015873e-05,
+ "loss": 0.3522,
+ "step": 1964
+ },
+ {
+ "epoch": 7.197802197802198,
+ "grad_norm": 33.893463134765625,
+ "learning_rate": 1.8705738705738707e-05,
+ "loss": 0.5049,
+ "step": 1965
+ },
+ {
+ "epoch": 7.201465201465202,
+ "grad_norm": 3.18776273727417,
+ "learning_rate": 1.8681318681318682e-05,
+ "loss": 0.0204,
+ "step": 1966
+ },
+ {
+ "epoch": 7.205128205128205,
+ "grad_norm": 9.548710823059082,
+ "learning_rate": 1.8656898656898657e-05,
+ "loss": 0.0711,
+ "step": 1967
+ },
+ {
+ "epoch": 7.208791208791209,
+ "grad_norm": 38.94087600708008,
+ "learning_rate": 1.8632478632478636e-05,
+ "loss": 0.5289,
+ "step": 1968
+ },
+ {
+ "epoch": 7.212454212454213,
+ "grad_norm": 5.812004566192627,
+ "learning_rate": 1.8608058608058607e-05,
+ "loss": 0.0224,
+ "step": 1969
+ },
+ {
+ "epoch": 7.216117216117216,
+ "grad_norm": 1.2060245275497437,
+ "learning_rate": 1.8583638583638583e-05,
+ "loss": 0.0077,
+ "step": 1970
+ },
+ {
+ "epoch": 7.21978021978022,
+ "grad_norm": 20.632722854614258,
+ "learning_rate": 1.8559218559218558e-05,
+ "loss": 0.0907,
+ "step": 1971
+ },
+ {
+ "epoch": 7.2234432234432235,
+ "grad_norm": 24.92366600036621,
+ "learning_rate": 1.8534798534798536e-05,
+ "loss": 0.1633,
+ "step": 1972
+ },
+ {
+ "epoch": 7.227106227106227,
+ "grad_norm": 2.3411026000976562,
+ "learning_rate": 1.851037851037851e-05,
+ "loss": 0.0098,
+ "step": 1973
+ },
+ {
+ "epoch": 7.230769230769231,
+ "grad_norm": 30.942848205566406,
+ "learning_rate": 1.8485958485958486e-05,
+ "loss": 0.1813,
+ "step": 1974
+ },
+ {
+ "epoch": 7.2344322344322345,
+ "grad_norm": 12.736541748046875,
+ "learning_rate": 1.8461538461538465e-05,
+ "loss": 0.0397,
+ "step": 1975
+ },
+ {
+ "epoch": 7.238095238095238,
+ "grad_norm": 8.892921447753906,
+ "learning_rate": 1.8437118437118436e-05,
+ "loss": 0.0255,
+ "step": 1976
+ },
+ {
+ "epoch": 7.241758241758242,
+ "grad_norm": 36.48339080810547,
+ "learning_rate": 1.841269841269841e-05,
+ "loss": 0.3125,
+ "step": 1977
+ },
+ {
+ "epoch": 7.245421245421245,
+ "grad_norm": 48.35296630859375,
+ "learning_rate": 1.838827838827839e-05,
+ "loss": 0.4951,
+ "step": 1978
+ },
+ {
+ "epoch": 7.249084249084249,
+ "grad_norm": 31.021989822387695,
+ "learning_rate": 1.8363858363858365e-05,
+ "loss": 0.2124,
+ "step": 1979
+ },
+ {
+ "epoch": 7.252747252747253,
+ "grad_norm": 32.49650955200195,
+ "learning_rate": 1.833943833943834e-05,
+ "loss": 0.309,
+ "step": 1980
+ },
+ {
+ "epoch": 7.256410256410256,
+ "grad_norm": 43.47561264038086,
+ "learning_rate": 1.831501831501832e-05,
+ "loss": 0.3206,
+ "step": 1981
+ },
+ {
+ "epoch": 7.26007326007326,
+ "grad_norm": 14.67831802368164,
+ "learning_rate": 1.829059829059829e-05,
+ "loss": 0.0806,
+ "step": 1982
+ },
+ {
+ "epoch": 7.263736263736264,
+ "grad_norm": 23.66496467590332,
+ "learning_rate": 1.8266178266178265e-05,
+ "loss": 0.1769,
+ "step": 1983
+ },
+ {
+ "epoch": 7.267399267399267,
+ "grad_norm": 1.8125004768371582,
+ "learning_rate": 1.824175824175824e-05,
+ "loss": 0.0111,
+ "step": 1984
+ },
+ {
+ "epoch": 7.271062271062271,
+ "grad_norm": 1.3189254999160767,
+ "learning_rate": 1.821733821733822e-05,
+ "loss": 0.0056,
+ "step": 1985
+ },
+ {
+ "epoch": 7.274725274725275,
+ "grad_norm": 47.977203369140625,
+ "learning_rate": 1.8192918192918194e-05,
+ "loss": 0.3898,
+ "step": 1986
+ },
+ {
+ "epoch": 7.278388278388278,
+ "grad_norm": 39.66654968261719,
+ "learning_rate": 1.816849816849817e-05,
+ "loss": 0.4953,
+ "step": 1987
+ },
+ {
+ "epoch": 7.282051282051282,
+ "grad_norm": 24.90619659423828,
+ "learning_rate": 1.8144078144078147e-05,
+ "loss": 0.116,
+ "step": 1988
+ },
+ {
+ "epoch": 7.285714285714286,
+ "grad_norm": 4.373020648956299,
+ "learning_rate": 1.811965811965812e-05,
+ "loss": 0.0263,
+ "step": 1989
+ },
+ {
+ "epoch": 7.289377289377289,
+ "grad_norm": 24.788022994995117,
+ "learning_rate": 1.8095238095238094e-05,
+ "loss": 0.2322,
+ "step": 1990
+ },
+ {
+ "epoch": 7.293040293040293,
+ "grad_norm": 6.417362213134766,
+ "learning_rate": 1.8070818070818072e-05,
+ "loss": 0.0243,
+ "step": 1991
+ },
+ {
+ "epoch": 7.2967032967032965,
+ "grad_norm": 34.0954475402832,
+ "learning_rate": 1.8046398046398047e-05,
+ "loss": 0.6666,
+ "step": 1992
+ },
+ {
+ "epoch": 7.3003663003663,
+ "grad_norm": 5.597110748291016,
+ "learning_rate": 1.8021978021978023e-05,
+ "loss": 0.0389,
+ "step": 1993
+ },
+ {
+ "epoch": 7.304029304029304,
+ "grad_norm": 70.55953979492188,
+ "learning_rate": 1.7997557997558e-05,
+ "loss": 0.7335,
+ "step": 1994
+ },
+ {
+ "epoch": 7.3076923076923075,
+ "grad_norm": 17.913522720336914,
+ "learning_rate": 1.7973137973137973e-05,
+ "loss": 0.2307,
+ "step": 1995
+ },
+ {
+ "epoch": 7.311355311355311,
+ "grad_norm": 9.62990665435791,
+ "learning_rate": 1.7948717948717948e-05,
+ "loss": 0.0515,
+ "step": 1996
+ },
+ {
+ "epoch": 7.315018315018315,
+ "grad_norm": 1.333807110786438,
+ "learning_rate": 1.7924297924297923e-05,
+ "loss": 0.0088,
+ "step": 1997
+ },
+ {
+ "epoch": 7.318681318681318,
+ "grad_norm": 12.604703903198242,
+ "learning_rate": 1.78998778998779e-05,
+ "loss": 0.0802,
+ "step": 1998
+ },
+ {
+ "epoch": 7.322344322344322,
+ "grad_norm": 57.309974670410156,
+ "learning_rate": 1.7875457875457876e-05,
+ "loss": 0.738,
+ "step": 1999
+ },
+ {
+ "epoch": 7.326007326007326,
+ "grad_norm": 12.750027656555176,
+ "learning_rate": 1.785103785103785e-05,
+ "loss": 0.0785,
+ "step": 2000
+ },
+ {
+ "epoch": 7.329670329670329,
+ "grad_norm": 39.28510665893555,
+ "learning_rate": 1.7826617826617826e-05,
+ "loss": 0.4609,
+ "step": 2001
+ },
+ {
+ "epoch": 7.333333333333333,
+ "grad_norm": 19.048255920410156,
+ "learning_rate": 1.78021978021978e-05,
+ "loss": 0.1013,
+ "step": 2002
+ },
+ {
+ "epoch": 7.336996336996337,
+ "grad_norm": 50.47089385986328,
+ "learning_rate": 1.7777777777777777e-05,
+ "loss": 0.714,
+ "step": 2003
+ },
+ {
+ "epoch": 7.34065934065934,
+ "grad_norm": 2.6616337299346924,
+ "learning_rate": 1.7753357753357755e-05,
+ "loss": 0.0183,
+ "step": 2004
+ },
+ {
+ "epoch": 7.344322344322344,
+ "grad_norm": 23.130146026611328,
+ "learning_rate": 1.772893772893773e-05,
+ "loss": 0.2,
+ "step": 2005
+ },
+ {
+ "epoch": 7.347985347985348,
+ "grad_norm": 23.108713150024414,
+ "learning_rate": 1.7704517704517705e-05,
+ "loss": 0.2199,
+ "step": 2006
+ },
+ {
+ "epoch": 7.351648351648351,
+ "grad_norm": 14.890787124633789,
+ "learning_rate": 1.7680097680097684e-05,
+ "loss": 0.0872,
+ "step": 2007
+ },
+ {
+ "epoch": 7.355311355311355,
+ "grad_norm": 41.62394714355469,
+ "learning_rate": 1.7655677655677655e-05,
+ "loss": 0.3795,
+ "step": 2008
+ },
+ {
+ "epoch": 7.358974358974359,
+ "grad_norm": 19.252058029174805,
+ "learning_rate": 1.763125763125763e-05,
+ "loss": 0.0822,
+ "step": 2009
+ },
+ {
+ "epoch": 7.362637362637362,
+ "grad_norm": 23.167705535888672,
+ "learning_rate": 1.7606837606837605e-05,
+ "loss": 0.1602,
+ "step": 2010
+ },
+ {
+ "epoch": 7.366300366300366,
+ "grad_norm": 34.01895523071289,
+ "learning_rate": 1.7582417582417584e-05,
+ "loss": 0.3295,
+ "step": 2011
+ },
+ {
+ "epoch": 7.36996336996337,
+ "grad_norm": 28.834074020385742,
+ "learning_rate": 1.755799755799756e-05,
+ "loss": 0.1371,
+ "step": 2012
+ },
+ {
+ "epoch": 7.373626373626374,
+ "grad_norm": 13.843847274780273,
+ "learning_rate": 1.7533577533577534e-05,
+ "loss": 0.1339,
+ "step": 2013
+ },
+ {
+ "epoch": 7.377289377289378,
+ "grad_norm": 10.192770957946777,
+ "learning_rate": 1.750915750915751e-05,
+ "loss": 0.058,
+ "step": 2014
+ },
+ {
+ "epoch": 7.380952380952381,
+ "grad_norm": 55.51911544799805,
+ "learning_rate": 1.7484737484737484e-05,
+ "loss": 0.4047,
+ "step": 2015
+ },
+ {
+ "epoch": 7.384615384615385,
+ "grad_norm": 2.7761716842651367,
+ "learning_rate": 1.746031746031746e-05,
+ "loss": 0.0232,
+ "step": 2016
+ },
+ {
+ "epoch": 7.388278388278389,
+ "grad_norm": 7.78446626663208,
+ "learning_rate": 1.7435897435897438e-05,
+ "loss": 0.0781,
+ "step": 2017
+ },
+ {
+ "epoch": 7.391941391941392,
+ "grad_norm": 46.8702507019043,
+ "learning_rate": 1.7411477411477413e-05,
+ "loss": 0.8509,
+ "step": 2018
+ },
+ {
+ "epoch": 7.395604395604396,
+ "grad_norm": 32.83955001831055,
+ "learning_rate": 1.7387057387057388e-05,
+ "loss": 0.2573,
+ "step": 2019
+ },
+ {
+ "epoch": 7.3992673992674,
+ "grad_norm": 40.40720748901367,
+ "learning_rate": 1.7362637362637366e-05,
+ "loss": 0.1884,
+ "step": 2020
+ },
+ {
+ "epoch": 7.402930402930403,
+ "grad_norm": 19.889108657836914,
+ "learning_rate": 1.7338217338217338e-05,
+ "loss": 0.0401,
+ "step": 2021
+ },
+ {
+ "epoch": 7.406593406593407,
+ "grad_norm": 23.082000732421875,
+ "learning_rate": 1.7313797313797313e-05,
+ "loss": 0.3323,
+ "step": 2022
+ },
+ {
+ "epoch": 7.410256410256411,
+ "grad_norm": 44.391605377197266,
+ "learning_rate": 1.7289377289377288e-05,
+ "loss": 0.4417,
+ "step": 2023
+ },
+ {
+ "epoch": 7.413919413919414,
+ "grad_norm": 2.9148988723754883,
+ "learning_rate": 1.7264957264957267e-05,
+ "loss": 0.0104,
+ "step": 2024
+ },
+ {
+ "epoch": 7.417582417582418,
+ "grad_norm": 39.043304443359375,
+ "learning_rate": 1.724053724053724e-05,
+ "loss": 0.2819,
+ "step": 2025
+ },
+ {
+ "epoch": 7.4212454212454215,
+ "grad_norm": 47.23966598510742,
+ "learning_rate": 1.7216117216117217e-05,
+ "loss": 0.3823,
+ "step": 2026
+ },
+ {
+ "epoch": 7.424908424908425,
+ "grad_norm": 31.07651710510254,
+ "learning_rate": 1.7191697191697192e-05,
+ "loss": 0.1564,
+ "step": 2027
+ },
+ {
+ "epoch": 7.428571428571429,
+ "grad_norm": 2.0451018810272217,
+ "learning_rate": 1.7167277167277167e-05,
+ "loss": 0.0091,
+ "step": 2028
+ },
+ {
+ "epoch": 7.4322344322344325,
+ "grad_norm": 43.10199737548828,
+ "learning_rate": 1.7142857142857142e-05,
+ "loss": 0.2758,
+ "step": 2029
+ },
+ {
+ "epoch": 7.435897435897436,
+ "grad_norm": 9.677335739135742,
+ "learning_rate": 1.711843711843712e-05,
+ "loss": 0.0309,
+ "step": 2030
+ },
+ {
+ "epoch": 7.43956043956044,
+ "grad_norm": 21.8636474609375,
+ "learning_rate": 1.7094017094017095e-05,
+ "loss": 0.141,
+ "step": 2031
+ },
+ {
+ "epoch": 7.443223443223443,
+ "grad_norm": 0.3610914349555969,
+ "learning_rate": 1.706959706959707e-05,
+ "loss": 0.0022,
+ "step": 2032
+ },
+ {
+ "epoch": 7.446886446886447,
+ "grad_norm": 1.5513430833816528,
+ "learning_rate": 1.704517704517705e-05,
+ "loss": 0.0059,
+ "step": 2033
+ },
+ {
+ "epoch": 7.450549450549451,
+ "grad_norm": 0.36708980798721313,
+ "learning_rate": 1.702075702075702e-05,
+ "loss": 0.0018,
+ "step": 2034
+ },
+ {
+ "epoch": 7.454212454212454,
+ "grad_norm": 6.103841781616211,
+ "learning_rate": 1.6996336996336996e-05,
+ "loss": 0.0325,
+ "step": 2035
+ },
+ {
+ "epoch": 7.457875457875458,
+ "grad_norm": 26.09792709350586,
+ "learning_rate": 1.697191697191697e-05,
+ "loss": 0.0481,
+ "step": 2036
+ },
+ {
+ "epoch": 7.461538461538462,
+ "grad_norm": 19.57491111755371,
+ "learning_rate": 1.694749694749695e-05,
+ "loss": 0.0981,
+ "step": 2037
+ },
+ {
+ "epoch": 7.465201465201465,
+ "grad_norm": 6.412461280822754,
+ "learning_rate": 1.6923076923076924e-05,
+ "loss": 0.04,
+ "step": 2038
+ },
+ {
+ "epoch": 7.468864468864469,
+ "grad_norm": 0.46989959478378296,
+ "learning_rate": 1.68986568986569e-05,
+ "loss": 0.002,
+ "step": 2039
+ },
+ {
+ "epoch": 7.472527472527473,
+ "grad_norm": 5.42742919921875,
+ "learning_rate": 1.6874236874236874e-05,
+ "loss": 0.0245,
+ "step": 2040
+ },
+ {
+ "epoch": 7.476190476190476,
+ "grad_norm": 5.105862140655518,
+ "learning_rate": 1.684981684981685e-05,
+ "loss": 0.0277,
+ "step": 2041
+ },
+ {
+ "epoch": 7.47985347985348,
+ "grad_norm": 3.4603350162506104,
+ "learning_rate": 1.6825396825396824e-05,
+ "loss": 0.0093,
+ "step": 2042
+ },
+ {
+ "epoch": 7.483516483516484,
+ "grad_norm": 49.75768280029297,
+ "learning_rate": 1.6800976800976803e-05,
+ "loss": 0.7042,
+ "step": 2043
+ },
+ {
+ "epoch": 7.487179487179487,
+ "grad_norm": 51.32642364501953,
+ "learning_rate": 1.6776556776556778e-05,
+ "loss": 0.9967,
+ "step": 2044
+ },
+ {
+ "epoch": 7.490842490842491,
+ "grad_norm": 3.8675732612609863,
+ "learning_rate": 1.6752136752136753e-05,
+ "loss": 0.0153,
+ "step": 2045
+ },
+ {
+ "epoch": 7.4945054945054945,
+ "grad_norm": 36.375526428222656,
+ "learning_rate": 1.6727716727716728e-05,
+ "loss": 0.2771,
+ "step": 2046
+ },
+ {
+ "epoch": 7.498168498168498,
+ "grad_norm": 2.354778528213501,
+ "learning_rate": 1.6703296703296703e-05,
+ "loss": 0.0137,
+ "step": 2047
+ },
+ {
+ "epoch": 7.501831501831502,
+ "grad_norm": 46.09824752807617,
+ "learning_rate": 1.6678876678876678e-05,
+ "loss": 0.3772,
+ "step": 2048
+ },
+ {
+ "epoch": 7.5054945054945055,
+ "grad_norm": 42.83018112182617,
+ "learning_rate": 1.6654456654456653e-05,
+ "loss": 0.2655,
+ "step": 2049
+ },
+ {
+ "epoch": 7.509157509157509,
+ "grad_norm": 34.598880767822266,
+ "learning_rate": 1.6630036630036632e-05,
+ "loss": 0.235,
+ "step": 2050
+ },
+ {
+ "epoch": 7.512820512820513,
+ "grad_norm": 2.5649797916412354,
+ "learning_rate": 1.6605616605616607e-05,
+ "loss": 0.0157,
+ "step": 2051
+ },
+ {
+ "epoch": 7.516483516483516,
+ "grad_norm": 15.715023040771484,
+ "learning_rate": 1.6581196581196582e-05,
+ "loss": 0.0861,
+ "step": 2052
+ },
+ {
+ "epoch": 7.52014652014652,
+ "grad_norm": 17.451343536376953,
+ "learning_rate": 1.6556776556776557e-05,
+ "loss": 0.1199,
+ "step": 2053
+ },
+ {
+ "epoch": 7.523809523809524,
+ "grad_norm": 29.217243194580078,
+ "learning_rate": 1.6532356532356532e-05,
+ "loss": 0.3191,
+ "step": 2054
+ },
+ {
+ "epoch": 7.527472527472527,
+ "grad_norm": 5.1904683113098145,
+ "learning_rate": 1.6507936507936507e-05,
+ "loss": 0.0131,
+ "step": 2055
+ },
+ {
+ "epoch": 7.531135531135531,
+ "grad_norm": 4.807910919189453,
+ "learning_rate": 1.6483516483516486e-05,
+ "loss": 0.0225,
+ "step": 2056
+ },
+ {
+ "epoch": 7.534798534798535,
+ "grad_norm": 3.9078361988067627,
+ "learning_rate": 1.645909645909646e-05,
+ "loss": 0.014,
+ "step": 2057
+ },
+ {
+ "epoch": 7.538461538461538,
+ "grad_norm": 32.45369338989258,
+ "learning_rate": 1.6434676434676436e-05,
+ "loss": 0.2331,
+ "step": 2058
+ },
+ {
+ "epoch": 7.542124542124542,
+ "grad_norm": 9.129495620727539,
+ "learning_rate": 1.641025641025641e-05,
+ "loss": 0.0328,
+ "step": 2059
+ },
+ {
+ "epoch": 7.545787545787546,
+ "grad_norm": 1.4577407836914062,
+ "learning_rate": 1.6385836385836386e-05,
+ "loss": 0.0062,
+ "step": 2060
+ },
+ {
+ "epoch": 7.549450549450549,
+ "grad_norm": 15.017457008361816,
+ "learning_rate": 1.636141636141636e-05,
+ "loss": 0.0611,
+ "step": 2061
+ },
+ {
+ "epoch": 7.553113553113553,
+ "grad_norm": 39.598941802978516,
+ "learning_rate": 1.6336996336996336e-05,
+ "loss": 0.2892,
+ "step": 2062
+ },
+ {
+ "epoch": 7.556776556776557,
+ "grad_norm": 67.49568176269531,
+ "learning_rate": 1.6312576312576314e-05,
+ "loss": 0.66,
+ "step": 2063
+ },
+ {
+ "epoch": 7.56043956043956,
+ "grad_norm": 32.164634704589844,
+ "learning_rate": 1.628815628815629e-05,
+ "loss": 0.2308,
+ "step": 2064
+ },
+ {
+ "epoch": 7.564102564102564,
+ "grad_norm": 2.058502197265625,
+ "learning_rate": 1.6263736263736265e-05,
+ "loss": 0.0089,
+ "step": 2065
+ },
+ {
+ "epoch": 7.5677655677655675,
+ "grad_norm": 46.27522659301758,
+ "learning_rate": 1.623931623931624e-05,
+ "loss": 0.259,
+ "step": 2066
+ },
+ {
+ "epoch": 7.571428571428571,
+ "grad_norm": 54.9110221862793,
+ "learning_rate": 1.6214896214896215e-05,
+ "loss": 0.3899,
+ "step": 2067
+ },
+ {
+ "epoch": 7.575091575091575,
+ "grad_norm": 9.964278221130371,
+ "learning_rate": 1.619047619047619e-05,
+ "loss": 0.026,
+ "step": 2068
+ },
+ {
+ "epoch": 7.5787545787545785,
+ "grad_norm": 5.512078762054443,
+ "learning_rate": 1.6166056166056168e-05,
+ "loss": 0.0187,
+ "step": 2069
+ },
+ {
+ "epoch": 7.582417582417582,
+ "grad_norm": 38.90432357788086,
+ "learning_rate": 1.6141636141636143e-05,
+ "loss": 0.0728,
+ "step": 2070
+ },
+ {
+ "epoch": 7.586080586080586,
+ "grad_norm": 11.633467674255371,
+ "learning_rate": 1.6117216117216118e-05,
+ "loss": 0.0383,
+ "step": 2071
+ },
+ {
+ "epoch": 7.589743589743589,
+ "grad_norm": 8.595443725585938,
+ "learning_rate": 1.609279609279609e-05,
+ "loss": 0.0313,
+ "step": 2072
+ },
+ {
+ "epoch": 7.593406593406593,
+ "grad_norm": 2.8875672817230225,
+ "learning_rate": 1.606837606837607e-05,
+ "loss": 0.0102,
+ "step": 2073
+ },
+ {
+ "epoch": 7.597069597069597,
+ "grad_norm": 42.968170166015625,
+ "learning_rate": 1.6043956043956043e-05,
+ "loss": 0.1532,
+ "step": 2074
+ },
+ {
+ "epoch": 7.6007326007326,
+ "grad_norm": 1.500887393951416,
+ "learning_rate": 1.601953601953602e-05,
+ "loss": 0.0047,
+ "step": 2075
+ },
+ {
+ "epoch": 7.604395604395604,
+ "grad_norm": 1.2472022771835327,
+ "learning_rate": 1.5995115995115997e-05,
+ "loss": 0.004,
+ "step": 2076
+ },
+ {
+ "epoch": 7.608058608058608,
+ "grad_norm": 1.0480316877365112,
+ "learning_rate": 1.5970695970695972e-05,
+ "loss": 0.0045,
+ "step": 2077
+ },
+ {
+ "epoch": 7.611721611721611,
+ "grad_norm": 37.37439727783203,
+ "learning_rate": 1.5946275946275947e-05,
+ "loss": 0.2399,
+ "step": 2078
+ },
+ {
+ "epoch": 7.615384615384615,
+ "grad_norm": 0.7918564677238464,
+ "learning_rate": 1.5921855921855922e-05,
+ "loss": 0.0032,
+ "step": 2079
+ },
+ {
+ "epoch": 7.619047619047619,
+ "grad_norm": 6.207716941833496,
+ "learning_rate": 1.5897435897435897e-05,
+ "loss": 0.0214,
+ "step": 2080
+ },
+ {
+ "epoch": 7.622710622710622,
+ "grad_norm": 29.516454696655273,
+ "learning_rate": 1.5873015873015872e-05,
+ "loss": 0.3501,
+ "step": 2081
+ },
+ {
+ "epoch": 7.626373626373626,
+ "grad_norm": 2.8200786113739014,
+ "learning_rate": 1.584859584859585e-05,
+ "loss": 0.0057,
+ "step": 2082
+ },
+ {
+ "epoch": 7.63003663003663,
+ "grad_norm": 14.830533981323242,
+ "learning_rate": 1.5824175824175826e-05,
+ "loss": 0.0321,
+ "step": 2083
+ },
+ {
+ "epoch": 7.633699633699633,
+ "grad_norm": 34.72395706176758,
+ "learning_rate": 1.57997557997558e-05,
+ "loss": 0.1672,
+ "step": 2084
+ },
+ {
+ "epoch": 7.637362637362637,
+ "grad_norm": 75.48332214355469,
+ "learning_rate": 1.5775335775335773e-05,
+ "loss": 0.44,
+ "step": 2085
+ },
+ {
+ "epoch": 7.641025641025641,
+ "grad_norm": 0.4638623297214508,
+ "learning_rate": 1.575091575091575e-05,
+ "loss": 0.0025,
+ "step": 2086
+ },
+ {
+ "epoch": 7.644688644688645,
+ "grad_norm": 47.44121170043945,
+ "learning_rate": 1.5726495726495726e-05,
+ "loss": 0.2901,
+ "step": 2087
+ },
+ {
+ "epoch": 7.648351648351649,
+ "grad_norm": 31.14560317993164,
+ "learning_rate": 1.57020757020757e-05,
+ "loss": 0.1881,
+ "step": 2088
+ },
+ {
+ "epoch": 7.652014652014652,
+ "grad_norm": 14.16900634765625,
+ "learning_rate": 1.567765567765568e-05,
+ "loss": 0.0609,
+ "step": 2089
+ },
+ {
+ "epoch": 7.655677655677656,
+ "grad_norm": 248.16372680664062,
+ "learning_rate": 1.5653235653235655e-05,
+ "loss": 0.14,
+ "step": 2090
+ },
+ {
+ "epoch": 7.65934065934066,
+ "grad_norm": 78.32206726074219,
+ "learning_rate": 1.562881562881563e-05,
+ "loss": 1.3854,
+ "step": 2091
+ },
+ {
+ "epoch": 7.663003663003663,
+ "grad_norm": 2.940131664276123,
+ "learning_rate": 1.5604395604395605e-05,
+ "loss": 0.0101,
+ "step": 2092
+ },
+ {
+ "epoch": 7.666666666666667,
+ "grad_norm": 4.624741077423096,
+ "learning_rate": 1.557997557997558e-05,
+ "loss": 0.0168,
+ "step": 2093
+ },
+ {
+ "epoch": 7.670329670329671,
+ "grad_norm": 42.516990661621094,
+ "learning_rate": 1.5555555555555555e-05,
+ "loss": 0.2529,
+ "step": 2094
+ },
+ {
+ "epoch": 7.673992673992674,
+ "grad_norm": 24.555633544921875,
+ "learning_rate": 1.5531135531135533e-05,
+ "loss": 0.1367,
+ "step": 2095
+ },
+ {
+ "epoch": 7.677655677655678,
+ "grad_norm": 35.021644592285156,
+ "learning_rate": 1.550671550671551e-05,
+ "loss": 0.2322,
+ "step": 2096
+ },
+ {
+ "epoch": 7.681318681318682,
+ "grad_norm": 0.8293462991714478,
+ "learning_rate": 1.5482295482295484e-05,
+ "loss": 0.0038,
+ "step": 2097
+ },
+ {
+ "epoch": 7.684981684981685,
+ "grad_norm": 25.26691436767578,
+ "learning_rate": 1.5457875457875455e-05,
+ "loss": 0.1326,
+ "step": 2098
+ },
+ {
+ "epoch": 7.688644688644689,
+ "grad_norm": 46.36896514892578,
+ "learning_rate": 1.5433455433455434e-05,
+ "loss": 0.661,
+ "step": 2099
+ },
+ {
+ "epoch": 7.6923076923076925,
+ "grad_norm": 23.875978469848633,
+ "learning_rate": 1.540903540903541e-05,
+ "loss": 0.1815,
+ "step": 2100
+ },
+ {
+ "epoch": 7.695970695970696,
+ "grad_norm": 14.46264362335205,
+ "learning_rate": 1.5384615384615384e-05,
+ "loss": 0.1048,
+ "step": 2101
+ },
+ {
+ "epoch": 7.6996336996337,
+ "grad_norm": 15.445000648498535,
+ "learning_rate": 1.5360195360195362e-05,
+ "loss": 0.0455,
+ "step": 2102
+ },
+ {
+ "epoch": 7.7032967032967035,
+ "grad_norm": 0.21127165853977203,
+ "learning_rate": 1.5335775335775337e-05,
+ "loss": 0.0006,
+ "step": 2103
+ },
+ {
+ "epoch": 7.706959706959707,
+ "grad_norm": 11.099639892578125,
+ "learning_rate": 1.531135531135531e-05,
+ "loss": 0.0598,
+ "step": 2104
+ },
+ {
+ "epoch": 7.710622710622711,
+ "grad_norm": 5.1992950439453125,
+ "learning_rate": 1.5286935286935287e-05,
+ "loss": 0.0204,
+ "step": 2105
+ },
+ {
+ "epoch": 7.714285714285714,
+ "grad_norm": 4.170431613922119,
+ "learning_rate": 1.5262515262515263e-05,
+ "loss": 0.0202,
+ "step": 2106
+ },
+ {
+ "epoch": 7.717948717948718,
+ "grad_norm": 35.86619567871094,
+ "learning_rate": 1.5238095238095238e-05,
+ "loss": 0.2789,
+ "step": 2107
+ },
+ {
+ "epoch": 7.721611721611722,
+ "grad_norm": 39.799415588378906,
+ "learning_rate": 1.5213675213675216e-05,
+ "loss": 0.251,
+ "step": 2108
+ },
+ {
+ "epoch": 7.725274725274725,
+ "grad_norm": 25.5378475189209,
+ "learning_rate": 1.518925518925519e-05,
+ "loss": 0.1284,
+ "step": 2109
+ },
+ {
+ "epoch": 7.728937728937729,
+ "grad_norm": 2.4359946250915527,
+ "learning_rate": 1.5164835164835164e-05,
+ "loss": 0.0127,
+ "step": 2110
+ },
+ {
+ "epoch": 7.732600732600733,
+ "grad_norm": 12.041257858276367,
+ "learning_rate": 1.514041514041514e-05,
+ "loss": 0.027,
+ "step": 2111
+ },
+ {
+ "epoch": 7.736263736263736,
+ "grad_norm": 34.67470169067383,
+ "learning_rate": 1.5115995115995116e-05,
+ "loss": 0.2489,
+ "step": 2112
+ },
+ {
+ "epoch": 7.73992673992674,
+ "grad_norm": 2.041276693344116,
+ "learning_rate": 1.5091575091575091e-05,
+ "loss": 0.0071,
+ "step": 2113
+ },
+ {
+ "epoch": 7.743589743589744,
+ "grad_norm": 0.2618583142757416,
+ "learning_rate": 1.5067155067155066e-05,
+ "loss": 0.0015,
+ "step": 2114
+ },
+ {
+ "epoch": 7.747252747252747,
+ "grad_norm": 29.656461715698242,
+ "learning_rate": 1.5042735042735045e-05,
+ "loss": 0.1325,
+ "step": 2115
+ },
+ {
+ "epoch": 7.750915750915751,
+ "grad_norm": 33.18010330200195,
+ "learning_rate": 1.5018315018315018e-05,
+ "loss": 0.2356,
+ "step": 2116
+ },
+ {
+ "epoch": 7.754578754578755,
+ "grad_norm": 17.884321212768555,
+ "learning_rate": 1.4993894993894995e-05,
+ "loss": 0.0953,
+ "step": 2117
+ },
+ {
+ "epoch": 7.758241758241758,
+ "grad_norm": 9.597829818725586,
+ "learning_rate": 1.496947496947497e-05,
+ "loss": 0.051,
+ "step": 2118
+ },
+ {
+ "epoch": 7.761904761904762,
+ "grad_norm": 32.64970397949219,
+ "learning_rate": 1.4945054945054945e-05,
+ "loss": 0.2014,
+ "step": 2119
+ },
+ {
+ "epoch": 7.7655677655677655,
+ "grad_norm": 9.97050666809082,
+ "learning_rate": 1.492063492063492e-05,
+ "loss": 0.0659,
+ "step": 2120
+ },
+ {
+ "epoch": 7.769230769230769,
+ "grad_norm": 27.019380569458008,
+ "learning_rate": 1.4896214896214897e-05,
+ "loss": 0.155,
+ "step": 2121
+ },
+ {
+ "epoch": 7.772893772893773,
+ "grad_norm": 21.946569442749023,
+ "learning_rate": 1.4871794871794872e-05,
+ "loss": 0.0974,
+ "step": 2122
+ },
+ {
+ "epoch": 7.7765567765567765,
+ "grad_norm": 17.21709442138672,
+ "learning_rate": 1.4847374847374847e-05,
+ "loss": 0.0764,
+ "step": 2123
+ },
+ {
+ "epoch": 7.78021978021978,
+ "grad_norm": 25.19805335998535,
+ "learning_rate": 1.4822954822954824e-05,
+ "loss": 0.2383,
+ "step": 2124
+ },
+ {
+ "epoch": 7.783882783882784,
+ "grad_norm": 21.493112564086914,
+ "learning_rate": 1.4798534798534799e-05,
+ "loss": 0.1307,
+ "step": 2125
+ },
+ {
+ "epoch": 7.787545787545787,
+ "grad_norm": 7.874645233154297,
+ "learning_rate": 1.4774114774114774e-05,
+ "loss": 0.0303,
+ "step": 2126
+ },
+ {
+ "epoch": 7.791208791208791,
+ "grad_norm": 25.664508819580078,
+ "learning_rate": 1.474969474969475e-05,
+ "loss": 0.1054,
+ "step": 2127
+ },
+ {
+ "epoch": 7.794871794871795,
+ "grad_norm": 45.79121398925781,
+ "learning_rate": 1.4725274725274726e-05,
+ "loss": 0.4923,
+ "step": 2128
+ },
+ {
+ "epoch": 7.798534798534798,
+ "grad_norm": 7.350006580352783,
+ "learning_rate": 1.4700854700854701e-05,
+ "loss": 0.0278,
+ "step": 2129
+ },
+ {
+ "epoch": 7.802197802197802,
+ "grad_norm": 3.626199245452881,
+ "learning_rate": 1.4676434676434678e-05,
+ "loss": 0.0184,
+ "step": 2130
+ },
+ {
+ "epoch": 7.805860805860806,
+ "grad_norm": 46.03739547729492,
+ "learning_rate": 1.4652014652014653e-05,
+ "loss": 0.195,
+ "step": 2131
+ },
+ {
+ "epoch": 7.809523809523809,
+ "grad_norm": 1.0704383850097656,
+ "learning_rate": 1.4627594627594628e-05,
+ "loss": 0.0039,
+ "step": 2132
+ },
+ {
+ "epoch": 7.813186813186813,
+ "grad_norm": 46.02214813232422,
+ "learning_rate": 1.4603174603174603e-05,
+ "loss": 0.2533,
+ "step": 2133
+ },
+ {
+ "epoch": 7.816849816849817,
+ "grad_norm": 2.2334794998168945,
+ "learning_rate": 1.457875457875458e-05,
+ "loss": 0.0087,
+ "step": 2134
+ },
+ {
+ "epoch": 7.82051282051282,
+ "grad_norm": 2.7543773651123047,
+ "learning_rate": 1.4554334554334555e-05,
+ "loss": 0.0094,
+ "step": 2135
+ },
+ {
+ "epoch": 7.824175824175824,
+ "grad_norm": 44.24272918701172,
+ "learning_rate": 1.452991452991453e-05,
+ "loss": 0.4251,
+ "step": 2136
+ },
+ {
+ "epoch": 7.827838827838828,
+ "grad_norm": 48.497154235839844,
+ "learning_rate": 1.4505494505494506e-05,
+ "loss": 0.2763,
+ "step": 2137
+ },
+ {
+ "epoch": 7.831501831501831,
+ "grad_norm": 38.73664093017578,
+ "learning_rate": 1.4481074481074482e-05,
+ "loss": 0.6482,
+ "step": 2138
+ },
+ {
+ "epoch": 7.835164835164835,
+ "grad_norm": 2.15800142288208,
+ "learning_rate": 1.4456654456654457e-05,
+ "loss": 0.0085,
+ "step": 2139
+ },
+ {
+ "epoch": 7.8388278388278385,
+ "grad_norm": 7.289889812469482,
+ "learning_rate": 1.4432234432234433e-05,
+ "loss": 0.041,
+ "step": 2140
+ },
+ {
+ "epoch": 7.842490842490842,
+ "grad_norm": 39.962310791015625,
+ "learning_rate": 1.4407814407814407e-05,
+ "loss": 0.3403,
+ "step": 2141
+ },
+ {
+ "epoch": 7.846153846153846,
+ "grad_norm": 23.029020309448242,
+ "learning_rate": 1.4383394383394383e-05,
+ "loss": 0.1281,
+ "step": 2142
+ },
+ {
+ "epoch": 7.8498168498168495,
+ "grad_norm": 7.111436367034912,
+ "learning_rate": 1.435897435897436e-05,
+ "loss": 0.0416,
+ "step": 2143
+ },
+ {
+ "epoch": 7.853479853479853,
+ "grad_norm": 7.24738073348999,
+ "learning_rate": 1.4334554334554335e-05,
+ "loss": 0.0159,
+ "step": 2144
+ },
+ {
+ "epoch": 7.857142857142857,
+ "grad_norm": 74.41973876953125,
+ "learning_rate": 1.431013431013431e-05,
+ "loss": 0.4156,
+ "step": 2145
+ },
+ {
+ "epoch": 7.860805860805861,
+ "grad_norm": 1.8928090333938599,
+ "learning_rate": 1.4285714285714285e-05,
+ "loss": 0.0088,
+ "step": 2146
+ },
+ {
+ "epoch": 7.864468864468865,
+ "grad_norm": 74.72843170166016,
+ "learning_rate": 1.4261294261294262e-05,
+ "loss": 0.7266,
+ "step": 2147
+ },
+ {
+ "epoch": 7.868131868131869,
+ "grad_norm": 3.2044010162353516,
+ "learning_rate": 1.4236874236874237e-05,
+ "loss": 0.0134,
+ "step": 2148
+ },
+ {
+ "epoch": 7.871794871794872,
+ "grad_norm": 0.9343626499176025,
+ "learning_rate": 1.4212454212454212e-05,
+ "loss": 0.0028,
+ "step": 2149
+ },
+ {
+ "epoch": 7.875457875457876,
+ "grad_norm": 6.980963230133057,
+ "learning_rate": 1.4188034188034189e-05,
+ "loss": 0.011,
+ "step": 2150
+ },
+ {
+ "epoch": 7.8791208791208796,
+ "grad_norm": 1.1645610332489014,
+ "learning_rate": 1.4163614163614164e-05,
+ "loss": 0.005,
+ "step": 2151
+ },
+ {
+ "epoch": 7.882783882783883,
+ "grad_norm": 2.219325065612793,
+ "learning_rate": 1.413919413919414e-05,
+ "loss": 0.0127,
+ "step": 2152
+ },
+ {
+ "epoch": 7.886446886446887,
+ "grad_norm": 3.3467326164245605,
+ "learning_rate": 1.4114774114774116e-05,
+ "loss": 0.0108,
+ "step": 2153
+ },
+ {
+ "epoch": 7.8901098901098905,
+ "grad_norm": 47.454647064208984,
+ "learning_rate": 1.409035409035409e-05,
+ "loss": 0.4106,
+ "step": 2154
+ },
+ {
+ "epoch": 7.893772893772894,
+ "grad_norm": 26.02679443359375,
+ "learning_rate": 1.4065934065934066e-05,
+ "loss": 0.1038,
+ "step": 2155
+ },
+ {
+ "epoch": 7.897435897435898,
+ "grad_norm": 56.01240921020508,
+ "learning_rate": 1.4041514041514043e-05,
+ "loss": 0.4808,
+ "step": 2156
+ },
+ {
+ "epoch": 7.9010989010989015,
+ "grad_norm": 3.230677843093872,
+ "learning_rate": 1.4017094017094016e-05,
+ "loss": 0.0123,
+ "step": 2157
+ },
+ {
+ "epoch": 7.904761904761905,
+ "grad_norm": 0.07180067896842957,
+ "learning_rate": 1.3992673992673993e-05,
+ "loss": 0.0004,
+ "step": 2158
+ },
+ {
+ "epoch": 7.908424908424909,
+ "grad_norm": 45.324222564697266,
+ "learning_rate": 1.3968253968253968e-05,
+ "loss": 0.1359,
+ "step": 2159
+ },
+ {
+ "epoch": 7.912087912087912,
+ "grad_norm": 10.703695297241211,
+ "learning_rate": 1.3943833943833945e-05,
+ "loss": 0.0608,
+ "step": 2160
+ },
+ {
+ "epoch": 7.915750915750916,
+ "grad_norm": 18.48207664489746,
+ "learning_rate": 1.391941391941392e-05,
+ "loss": 0.0425,
+ "step": 2161
+ },
+ {
+ "epoch": 7.91941391941392,
+ "grad_norm": 26.093645095825195,
+ "learning_rate": 1.3894993894993895e-05,
+ "loss": 0.1672,
+ "step": 2162
+ },
+ {
+ "epoch": 7.923076923076923,
+ "grad_norm": 41.90341567993164,
+ "learning_rate": 1.3870573870573872e-05,
+ "loss": 0.2344,
+ "step": 2163
+ },
+ {
+ "epoch": 7.926739926739927,
+ "grad_norm": 47.571990966796875,
+ "learning_rate": 1.3846153846153847e-05,
+ "loss": 0.2271,
+ "step": 2164
+ },
+ {
+ "epoch": 7.930402930402931,
+ "grad_norm": 4.187535762786865,
+ "learning_rate": 1.3821733821733822e-05,
+ "loss": 0.0153,
+ "step": 2165
+ },
+ {
+ "epoch": 7.934065934065934,
+ "grad_norm": 2.7795937061309814,
+ "learning_rate": 1.3797313797313799e-05,
+ "loss": 0.0134,
+ "step": 2166
+ },
+ {
+ "epoch": 7.937728937728938,
+ "grad_norm": 41.12346267700195,
+ "learning_rate": 1.3772893772893772e-05,
+ "loss": 0.2219,
+ "step": 2167
+ },
+ {
+ "epoch": 7.941391941391942,
+ "grad_norm": 35.827301025390625,
+ "learning_rate": 1.3748473748473749e-05,
+ "loss": 0.2477,
+ "step": 2168
+ },
+ {
+ "epoch": 7.945054945054945,
+ "grad_norm": 44.316322326660156,
+ "learning_rate": 1.3724053724053725e-05,
+ "loss": 0.0963,
+ "step": 2169
+ },
+ {
+ "epoch": 7.948717948717949,
+ "grad_norm": 23.085559844970703,
+ "learning_rate": 1.3699633699633699e-05,
+ "loss": 0.1346,
+ "step": 2170
+ },
+ {
+ "epoch": 7.9523809523809526,
+ "grad_norm": 31.379549026489258,
+ "learning_rate": 1.3675213675213676e-05,
+ "loss": 0.1232,
+ "step": 2171
+ },
+ {
+ "epoch": 7.956043956043956,
+ "grad_norm": 14.274428367614746,
+ "learning_rate": 1.365079365079365e-05,
+ "loss": 0.0883,
+ "step": 2172
+ },
+ {
+ "epoch": 7.95970695970696,
+ "grad_norm": 77.79078674316406,
+ "learning_rate": 1.3626373626373627e-05,
+ "loss": 0.5814,
+ "step": 2173
+ },
+ {
+ "epoch": 7.9633699633699635,
+ "grad_norm": 16.881986618041992,
+ "learning_rate": 1.3601953601953602e-05,
+ "loss": 0.1247,
+ "step": 2174
+ },
+ {
+ "epoch": 7.967032967032967,
+ "grad_norm": 32.1965217590332,
+ "learning_rate": 1.3577533577533578e-05,
+ "loss": 0.208,
+ "step": 2175
+ },
+ {
+ "epoch": 7.970695970695971,
+ "grad_norm": 4.283143043518066,
+ "learning_rate": 1.3553113553113554e-05,
+ "loss": 0.0182,
+ "step": 2176
+ },
+ {
+ "epoch": 7.9743589743589745,
+ "grad_norm": 51.64984130859375,
+ "learning_rate": 1.352869352869353e-05,
+ "loss": 0.6282,
+ "step": 2177
+ },
+ {
+ "epoch": 7.978021978021978,
+ "grad_norm": 25.30405616760254,
+ "learning_rate": 1.3504273504273504e-05,
+ "loss": 0.1721,
+ "step": 2178
+ },
+ {
+ "epoch": 7.981684981684982,
+ "grad_norm": 35.99342346191406,
+ "learning_rate": 1.3479853479853481e-05,
+ "loss": 0.2065,
+ "step": 2179
+ },
+ {
+ "epoch": 7.985347985347985,
+ "grad_norm": 1.2389482259750366,
+ "learning_rate": 1.3455433455433455e-05,
+ "loss": 0.0046,
+ "step": 2180
+ },
+ {
+ "epoch": 7.989010989010989,
+ "grad_norm": 40.435752868652344,
+ "learning_rate": 1.3431013431013431e-05,
+ "loss": 0.2944,
+ "step": 2181
+ },
+ {
+ "epoch": 7.992673992673993,
+ "grad_norm": 20.92979621887207,
+ "learning_rate": 1.3406593406593408e-05,
+ "loss": 0.0686,
+ "step": 2182
+ },
+ {
+ "epoch": 7.996336996336996,
+ "grad_norm": 12.692971229553223,
+ "learning_rate": 1.3382173382173381e-05,
+ "loss": 0.0737,
+ "step": 2183
+ },
+ {
+ "epoch": 8.0,
+ "grad_norm": 15.363990783691406,
+ "learning_rate": 1.3357753357753358e-05,
+ "loss": 0.0755,
+ "step": 2184
+ },
+ {
+ "epoch": 8.003663003663004,
+ "grad_norm": 39.101654052734375,
+ "learning_rate": 1.3333333333333333e-05,
+ "loss": 0.4135,
+ "step": 2185
+ },
+ {
+ "epoch": 8.007326007326007,
+ "grad_norm": 23.566104888916016,
+ "learning_rate": 1.3308913308913308e-05,
+ "loss": 0.2477,
+ "step": 2186
+ },
+ {
+ "epoch": 8.010989010989011,
+ "grad_norm": 23.69949722290039,
+ "learning_rate": 1.3284493284493285e-05,
+ "loss": 0.0747,
+ "step": 2187
+ },
+ {
+ "epoch": 8.014652014652015,
+ "grad_norm": 40.992549896240234,
+ "learning_rate": 1.326007326007326e-05,
+ "loss": 0.3049,
+ "step": 2188
+ },
+ {
+ "epoch": 8.018315018315018,
+ "grad_norm": 7.42161226272583,
+ "learning_rate": 1.3235653235653237e-05,
+ "loss": 0.022,
+ "step": 2189
+ },
+ {
+ "epoch": 8.021978021978022,
+ "grad_norm": 39.43696594238281,
+ "learning_rate": 1.3211233211233212e-05,
+ "loss": 0.0781,
+ "step": 2190
+ },
+ {
+ "epoch": 8.025641025641026,
+ "grad_norm": 6.539100646972656,
+ "learning_rate": 1.3186813186813187e-05,
+ "loss": 0.0225,
+ "step": 2191
+ },
+ {
+ "epoch": 8.02930402930403,
+ "grad_norm": 0.2996901273727417,
+ "learning_rate": 1.3162393162393164e-05,
+ "loss": 0.002,
+ "step": 2192
+ },
+ {
+ "epoch": 8.032967032967033,
+ "grad_norm": 9.589286804199219,
+ "learning_rate": 1.3137973137973137e-05,
+ "loss": 0.0293,
+ "step": 2193
+ },
+ {
+ "epoch": 8.036630036630036,
+ "grad_norm": 5.144655227661133,
+ "learning_rate": 1.3113553113553114e-05,
+ "loss": 0.0256,
+ "step": 2194
+ },
+ {
+ "epoch": 8.04029304029304,
+ "grad_norm": 64.5682144165039,
+ "learning_rate": 1.308913308913309e-05,
+ "loss": 0.4062,
+ "step": 2195
+ },
+ {
+ "epoch": 8.043956043956044,
+ "grad_norm": 35.61048126220703,
+ "learning_rate": 1.3064713064713064e-05,
+ "loss": 0.1856,
+ "step": 2196
+ },
+ {
+ "epoch": 8.047619047619047,
+ "grad_norm": 0.3583362400531769,
+ "learning_rate": 1.3040293040293041e-05,
+ "loss": 0.0012,
+ "step": 2197
+ },
+ {
+ "epoch": 8.051282051282051,
+ "grad_norm": 10.168415069580078,
+ "learning_rate": 1.3015873015873016e-05,
+ "loss": 0.0616,
+ "step": 2198
+ },
+ {
+ "epoch": 8.054945054945055,
+ "grad_norm": 28.49810218811035,
+ "learning_rate": 1.2991452991452991e-05,
+ "loss": 0.1137,
+ "step": 2199
+ },
+ {
+ "epoch": 8.058608058608058,
+ "grad_norm": 5.252911567687988,
+ "learning_rate": 1.2967032967032968e-05,
+ "loss": 0.0211,
+ "step": 2200
+ },
+ {
+ "epoch": 8.062271062271062,
+ "grad_norm": 19.91984748840332,
+ "learning_rate": 1.2942612942612943e-05,
+ "loss": 0.0983,
+ "step": 2201
+ },
+ {
+ "epoch": 8.065934065934066,
+ "grad_norm": 10.991836547851562,
+ "learning_rate": 1.2918192918192918e-05,
+ "loss": 0.0398,
+ "step": 2202
+ },
+ {
+ "epoch": 8.06959706959707,
+ "grad_norm": 16.97028923034668,
+ "learning_rate": 1.2893772893772895e-05,
+ "loss": 0.0849,
+ "step": 2203
+ },
+ {
+ "epoch": 8.073260073260073,
+ "grad_norm": 11.924320220947266,
+ "learning_rate": 1.286935286935287e-05,
+ "loss": 0.055,
+ "step": 2204
+ },
+ {
+ "epoch": 8.076923076923077,
+ "grad_norm": 16.613285064697266,
+ "learning_rate": 1.2844932844932846e-05,
+ "loss": 0.0765,
+ "step": 2205
+ },
+ {
+ "epoch": 8.08058608058608,
+ "grad_norm": 0.5711580514907837,
+ "learning_rate": 1.282051282051282e-05,
+ "loss": 0.0015,
+ "step": 2206
+ },
+ {
+ "epoch": 8.084249084249084,
+ "grad_norm": 20.53736114501953,
+ "learning_rate": 1.2796092796092797e-05,
+ "loss": 0.101,
+ "step": 2207
+ },
+ {
+ "epoch": 8.087912087912088,
+ "grad_norm": 44.09571838378906,
+ "learning_rate": 1.2771672771672773e-05,
+ "loss": 0.2872,
+ "step": 2208
+ },
+ {
+ "epoch": 8.091575091575091,
+ "grad_norm": 34.870426177978516,
+ "learning_rate": 1.2747252747252747e-05,
+ "loss": 0.2131,
+ "step": 2209
+ },
+ {
+ "epoch": 8.095238095238095,
+ "grad_norm": 0.5102387070655823,
+ "learning_rate": 1.2722832722832723e-05,
+ "loss": 0.002,
+ "step": 2210
+ },
+ {
+ "epoch": 8.098901098901099,
+ "grad_norm": 46.13880157470703,
+ "learning_rate": 1.2698412698412699e-05,
+ "loss": 0.3227,
+ "step": 2211
+ },
+ {
+ "epoch": 8.102564102564102,
+ "grad_norm": 30.000337600708008,
+ "learning_rate": 1.2673992673992674e-05,
+ "loss": 0.0992,
+ "step": 2212
+ },
+ {
+ "epoch": 8.106227106227106,
+ "grad_norm": 6.741244316101074,
+ "learning_rate": 1.264957264957265e-05,
+ "loss": 0.0265,
+ "step": 2213
+ },
+ {
+ "epoch": 8.10989010989011,
+ "grad_norm": 2.9291558265686035,
+ "learning_rate": 1.2625152625152625e-05,
+ "loss": 0.011,
+ "step": 2214
+ },
+ {
+ "epoch": 8.113553113553113,
+ "grad_norm": 7.66628360748291,
+ "learning_rate": 1.26007326007326e-05,
+ "loss": 0.0379,
+ "step": 2215
+ },
+ {
+ "epoch": 8.117216117216117,
+ "grad_norm": 14.051595687866211,
+ "learning_rate": 1.2576312576312576e-05,
+ "loss": 0.0789,
+ "step": 2216
+ },
+ {
+ "epoch": 8.12087912087912,
+ "grad_norm": 0.47640302777290344,
+ "learning_rate": 1.2551892551892552e-05,
+ "loss": 0.0015,
+ "step": 2217
+ },
+ {
+ "epoch": 8.124542124542124,
+ "grad_norm": 71.04071807861328,
+ "learning_rate": 1.2527472527472529e-05,
+ "loss": 0.4696,
+ "step": 2218
+ },
+ {
+ "epoch": 8.128205128205128,
+ "grad_norm": 85.2886962890625,
+ "learning_rate": 1.2503052503052502e-05,
+ "loss": 0.1978,
+ "step": 2219
+ },
+ {
+ "epoch": 8.131868131868131,
+ "grad_norm": 16.694299697875977,
+ "learning_rate": 1.247863247863248e-05,
+ "loss": 0.1035,
+ "step": 2220
+ },
+ {
+ "epoch": 8.135531135531135,
+ "grad_norm": 38.73305130004883,
+ "learning_rate": 1.2454212454212456e-05,
+ "loss": 0.2607,
+ "step": 2221
+ },
+ {
+ "epoch": 8.139194139194139,
+ "grad_norm": 1.4563415050506592,
+ "learning_rate": 1.242979242979243e-05,
+ "loss": 0.0084,
+ "step": 2222
+ },
+ {
+ "epoch": 8.142857142857142,
+ "grad_norm": 22.82903289794922,
+ "learning_rate": 1.2405372405372406e-05,
+ "loss": 0.0936,
+ "step": 2223
+ },
+ {
+ "epoch": 8.146520146520146,
+ "grad_norm": 0.5167717933654785,
+ "learning_rate": 1.2380952380952381e-05,
+ "loss": 0.0016,
+ "step": 2224
+ },
+ {
+ "epoch": 8.15018315018315,
+ "grad_norm": 13.251680374145508,
+ "learning_rate": 1.2356532356532356e-05,
+ "loss": 0.0454,
+ "step": 2225
+ },
+ {
+ "epoch": 8.153846153846153,
+ "grad_norm": 21.030073165893555,
+ "learning_rate": 1.2332112332112333e-05,
+ "loss": 0.0737,
+ "step": 2226
+ },
+ {
+ "epoch": 8.157509157509157,
+ "grad_norm": 22.005605697631836,
+ "learning_rate": 1.2307692307692308e-05,
+ "loss": 0.1456,
+ "step": 2227
+ },
+ {
+ "epoch": 8.16117216117216,
+ "grad_norm": 2.234006404876709,
+ "learning_rate": 1.2283272283272283e-05,
+ "loss": 0.0088,
+ "step": 2228
+ },
+ {
+ "epoch": 8.164835164835164,
+ "grad_norm": 21.61121368408203,
+ "learning_rate": 1.2258852258852258e-05,
+ "loss": 0.0966,
+ "step": 2229
+ },
+ {
+ "epoch": 8.168498168498168,
+ "grad_norm": 24.948705673217773,
+ "learning_rate": 1.2234432234432235e-05,
+ "loss": 0.1154,
+ "step": 2230
+ },
+ {
+ "epoch": 8.172161172161172,
+ "grad_norm": 25.752145767211914,
+ "learning_rate": 1.221001221001221e-05,
+ "loss": 0.2507,
+ "step": 2231
+ },
+ {
+ "epoch": 8.175824175824175,
+ "grad_norm": 58.091697692871094,
+ "learning_rate": 1.2185592185592185e-05,
+ "loss": 0.5921,
+ "step": 2232
+ },
+ {
+ "epoch": 8.179487179487179,
+ "grad_norm": 0.6767385005950928,
+ "learning_rate": 1.2161172161172162e-05,
+ "loss": 0.0029,
+ "step": 2233
+ },
+ {
+ "epoch": 8.183150183150182,
+ "grad_norm": 7.783257007598877,
+ "learning_rate": 1.2136752136752139e-05,
+ "loss": 0.0277,
+ "step": 2234
+ },
+ {
+ "epoch": 8.186813186813186,
+ "grad_norm": 14.877440452575684,
+ "learning_rate": 1.2112332112332112e-05,
+ "loss": 0.0591,
+ "step": 2235
+ },
+ {
+ "epoch": 8.19047619047619,
+ "grad_norm": 59.26154708862305,
+ "learning_rate": 1.2087912087912089e-05,
+ "loss": 0.7976,
+ "step": 2236
+ },
+ {
+ "epoch": 8.194139194139193,
+ "grad_norm": 0.09101928025484085,
+ "learning_rate": 1.2063492063492064e-05,
+ "loss": 0.0003,
+ "step": 2237
+ },
+ {
+ "epoch": 8.197802197802197,
+ "grad_norm": 7.83564567565918,
+ "learning_rate": 1.2039072039072039e-05,
+ "loss": 0.0266,
+ "step": 2238
+ },
+ {
+ "epoch": 8.2014652014652,
+ "grad_norm": 24.55094337463379,
+ "learning_rate": 1.2014652014652016e-05,
+ "loss": 0.1821,
+ "step": 2239
+ },
+ {
+ "epoch": 8.205128205128204,
+ "grad_norm": 0.18979696929454803,
+ "learning_rate": 1.199023199023199e-05,
+ "loss": 0.0008,
+ "step": 2240
+ },
+ {
+ "epoch": 8.208791208791208,
+ "grad_norm": 11.952847480773926,
+ "learning_rate": 1.1965811965811966e-05,
+ "loss": 0.0236,
+ "step": 2241
+ },
+ {
+ "epoch": 8.212454212454212,
+ "grad_norm": 7.738105773925781,
+ "learning_rate": 1.194139194139194e-05,
+ "loss": 0.0307,
+ "step": 2242
+ },
+ {
+ "epoch": 8.216117216117215,
+ "grad_norm": 37.77316665649414,
+ "learning_rate": 1.1916971916971918e-05,
+ "loss": 0.0926,
+ "step": 2243
+ },
+ {
+ "epoch": 8.219780219780219,
+ "grad_norm": 1.5700554847717285,
+ "learning_rate": 1.1892551892551893e-05,
+ "loss": 0.003,
+ "step": 2244
+ },
+ {
+ "epoch": 8.223443223443223,
+ "grad_norm": 26.529850006103516,
+ "learning_rate": 1.1868131868131868e-05,
+ "loss": 0.0936,
+ "step": 2245
+ },
+ {
+ "epoch": 8.227106227106226,
+ "grad_norm": 77.18512725830078,
+ "learning_rate": 1.1843711843711844e-05,
+ "loss": 0.6767,
+ "step": 2246
+ },
+ {
+ "epoch": 8.23076923076923,
+ "grad_norm": 47.92250061035156,
+ "learning_rate": 1.181929181929182e-05,
+ "loss": 0.3669,
+ "step": 2247
+ },
+ {
+ "epoch": 8.234432234432234,
+ "grad_norm": 3.132725477218628,
+ "learning_rate": 1.1794871794871795e-05,
+ "loss": 0.0089,
+ "step": 2248
+ },
+ {
+ "epoch": 8.238095238095237,
+ "grad_norm": 24.75738525390625,
+ "learning_rate": 1.1770451770451771e-05,
+ "loss": 0.0641,
+ "step": 2249
+ },
+ {
+ "epoch": 8.241758241758241,
+ "grad_norm": 9.874589920043945,
+ "learning_rate": 1.1746031746031746e-05,
+ "loss": 0.0372,
+ "step": 2250
+ },
+ {
+ "epoch": 8.245421245421245,
+ "grad_norm": 2.6871144771575928,
+ "learning_rate": 1.1721611721611721e-05,
+ "loss": 0.0072,
+ "step": 2251
+ },
+ {
+ "epoch": 8.249084249084248,
+ "grad_norm": 8.98822021484375,
+ "learning_rate": 1.1697191697191698e-05,
+ "loss": 0.0614,
+ "step": 2252
+ },
+ {
+ "epoch": 8.252747252747252,
+ "grad_norm": 11.431350708007812,
+ "learning_rate": 1.1672771672771673e-05,
+ "loss": 0.0508,
+ "step": 2253
+ },
+ {
+ "epoch": 8.256410256410255,
+ "grad_norm": 37.37540817260742,
+ "learning_rate": 1.1648351648351648e-05,
+ "loss": 0.2841,
+ "step": 2254
+ },
+ {
+ "epoch": 8.260073260073261,
+ "grad_norm": 12.127150535583496,
+ "learning_rate": 1.1623931623931623e-05,
+ "loss": 0.0633,
+ "step": 2255
+ },
+ {
+ "epoch": 8.263736263736265,
+ "grad_norm": 20.860342025756836,
+ "learning_rate": 1.15995115995116e-05,
+ "loss": 0.0908,
+ "step": 2256
+ },
+ {
+ "epoch": 8.267399267399268,
+ "grad_norm": 14.559013366699219,
+ "learning_rate": 1.1575091575091575e-05,
+ "loss": 0.0419,
+ "step": 2257
+ },
+ {
+ "epoch": 8.271062271062272,
+ "grad_norm": 1.7196027040481567,
+ "learning_rate": 1.155067155067155e-05,
+ "loss": 0.0076,
+ "step": 2258
+ },
+ {
+ "epoch": 8.274725274725276,
+ "grad_norm": 10.175626754760742,
+ "learning_rate": 1.1526251526251527e-05,
+ "loss": 0.0393,
+ "step": 2259
+ },
+ {
+ "epoch": 8.27838827838828,
+ "grad_norm": 49.11803436279297,
+ "learning_rate": 1.1501831501831502e-05,
+ "loss": 0.4574,
+ "step": 2260
+ },
+ {
+ "epoch": 8.282051282051283,
+ "grad_norm": 31.251197814941406,
+ "learning_rate": 1.1477411477411477e-05,
+ "loss": 0.1318,
+ "step": 2261
+ },
+ {
+ "epoch": 8.285714285714286,
+ "grad_norm": 6.921731948852539,
+ "learning_rate": 1.1452991452991454e-05,
+ "loss": 0.0189,
+ "step": 2262
+ },
+ {
+ "epoch": 8.28937728937729,
+ "grad_norm": 12.07050609588623,
+ "learning_rate": 1.1428571428571429e-05,
+ "loss": 0.0614,
+ "step": 2263
+ },
+ {
+ "epoch": 8.293040293040294,
+ "grad_norm": 18.133323669433594,
+ "learning_rate": 1.1404151404151404e-05,
+ "loss": 0.0673,
+ "step": 2264
+ },
+ {
+ "epoch": 8.296703296703297,
+ "grad_norm": 13.542656898498535,
+ "learning_rate": 1.137973137973138e-05,
+ "loss": 0.067,
+ "step": 2265
+ },
+ {
+ "epoch": 8.300366300366301,
+ "grad_norm": 5.377211093902588,
+ "learning_rate": 1.1355311355311356e-05,
+ "loss": 0.0185,
+ "step": 2266
+ },
+ {
+ "epoch": 8.304029304029305,
+ "grad_norm": 35.056522369384766,
+ "learning_rate": 1.1330891330891331e-05,
+ "loss": 0.2152,
+ "step": 2267
+ },
+ {
+ "epoch": 8.307692307692308,
+ "grad_norm": 9.124246597290039,
+ "learning_rate": 1.1306471306471306e-05,
+ "loss": 0.0288,
+ "step": 2268
+ },
+ {
+ "epoch": 8.311355311355312,
+ "grad_norm": 26.452402114868164,
+ "learning_rate": 1.1282051282051283e-05,
+ "loss": 0.1257,
+ "step": 2269
+ },
+ {
+ "epoch": 8.315018315018316,
+ "grad_norm": 29.298583984375,
+ "learning_rate": 1.1257631257631258e-05,
+ "loss": 0.0647,
+ "step": 2270
+ },
+ {
+ "epoch": 8.31868131868132,
+ "grad_norm": 2.6505391597747803,
+ "learning_rate": 1.1233211233211233e-05,
+ "loss": 0.0077,
+ "step": 2271
+ },
+ {
+ "epoch": 8.322344322344323,
+ "grad_norm": 5.335651397705078,
+ "learning_rate": 1.120879120879121e-05,
+ "loss": 0.0104,
+ "step": 2272
+ },
+ {
+ "epoch": 8.326007326007327,
+ "grad_norm": 4.58416748046875,
+ "learning_rate": 1.1184371184371185e-05,
+ "loss": 0.0106,
+ "step": 2273
+ },
+ {
+ "epoch": 8.32967032967033,
+ "grad_norm": 135.4385528564453,
+ "learning_rate": 1.115995115995116e-05,
+ "loss": 0.5163,
+ "step": 2274
+ },
+ {
+ "epoch": 8.333333333333334,
+ "grad_norm": 25.397010803222656,
+ "learning_rate": 1.1135531135531137e-05,
+ "loss": 0.0851,
+ "step": 2275
+ },
+ {
+ "epoch": 8.336996336996338,
+ "grad_norm": 28.56364631652832,
+ "learning_rate": 1.111111111111111e-05,
+ "loss": 0.1686,
+ "step": 2276
+ },
+ {
+ "epoch": 8.340659340659341,
+ "grad_norm": 2.306708812713623,
+ "learning_rate": 1.1086691086691087e-05,
+ "loss": 0.006,
+ "step": 2277
+ },
+ {
+ "epoch": 8.344322344322345,
+ "grad_norm": 56.0256462097168,
+ "learning_rate": 1.1062271062271063e-05,
+ "loss": 0.9074,
+ "step": 2278
+ },
+ {
+ "epoch": 8.347985347985349,
+ "grad_norm": 36.44279861450195,
+ "learning_rate": 1.1037851037851039e-05,
+ "loss": 0.1967,
+ "step": 2279
+ },
+ {
+ "epoch": 8.351648351648352,
+ "grad_norm": 37.51696014404297,
+ "learning_rate": 1.1013431013431014e-05,
+ "loss": 0.192,
+ "step": 2280
+ },
+ {
+ "epoch": 8.355311355311356,
+ "grad_norm": 0.44260093569755554,
+ "learning_rate": 1.0989010989010989e-05,
+ "loss": 0.0023,
+ "step": 2281
+ },
+ {
+ "epoch": 8.35897435897436,
+ "grad_norm": 42.92826843261719,
+ "learning_rate": 1.0964590964590965e-05,
+ "loss": 0.283,
+ "step": 2282
+ },
+ {
+ "epoch": 8.362637362637363,
+ "grad_norm": 5.269385814666748,
+ "learning_rate": 1.094017094017094e-05,
+ "loss": 0.0273,
+ "step": 2283
+ },
+ {
+ "epoch": 8.366300366300367,
+ "grad_norm": 22.941513061523438,
+ "learning_rate": 1.0915750915750916e-05,
+ "loss": 0.0738,
+ "step": 2284
+ },
+ {
+ "epoch": 8.36996336996337,
+ "grad_norm": 47.469303131103516,
+ "learning_rate": 1.0891330891330892e-05,
+ "loss": 0.241,
+ "step": 2285
+ },
+ {
+ "epoch": 8.373626373626374,
+ "grad_norm": 17.6788387298584,
+ "learning_rate": 1.0866910866910867e-05,
+ "loss": 0.0661,
+ "step": 2286
+ },
+ {
+ "epoch": 8.377289377289378,
+ "grad_norm": 22.660839080810547,
+ "learning_rate": 1.0842490842490842e-05,
+ "loss": 0.1409,
+ "step": 2287
+ },
+ {
+ "epoch": 8.380952380952381,
+ "grad_norm": 4.898139476776123,
+ "learning_rate": 1.081807081807082e-05,
+ "loss": 0.0236,
+ "step": 2288
+ },
+ {
+ "epoch": 8.384615384615385,
+ "grad_norm": 24.638856887817383,
+ "learning_rate": 1.0793650793650793e-05,
+ "loss": 0.127,
+ "step": 2289
+ },
+ {
+ "epoch": 8.388278388278389,
+ "grad_norm": 30.863998413085938,
+ "learning_rate": 1.076923076923077e-05,
+ "loss": 0.2182,
+ "step": 2290
+ },
+ {
+ "epoch": 8.391941391941392,
+ "grad_norm": 0.24884633719921112,
+ "learning_rate": 1.0744810744810746e-05,
+ "loss": 0.0012,
+ "step": 2291
+ },
+ {
+ "epoch": 8.395604395604396,
+ "grad_norm": 40.2337532043457,
+ "learning_rate": 1.0720390720390721e-05,
+ "loss": 0.2617,
+ "step": 2292
+ },
+ {
+ "epoch": 8.3992673992674,
+ "grad_norm": 2.265397787094116,
+ "learning_rate": 1.0695970695970696e-05,
+ "loss": 0.0097,
+ "step": 2293
+ },
+ {
+ "epoch": 8.402930402930403,
+ "grad_norm": 20.82665252685547,
+ "learning_rate": 1.0671550671550671e-05,
+ "loss": 0.2609,
+ "step": 2294
+ },
+ {
+ "epoch": 8.406593406593407,
+ "grad_norm": 12.195377349853516,
+ "learning_rate": 1.0647130647130648e-05,
+ "loss": 0.0769,
+ "step": 2295
+ },
+ {
+ "epoch": 8.41025641025641,
+ "grad_norm": 39.17343521118164,
+ "learning_rate": 1.0622710622710623e-05,
+ "loss": 0.1292,
+ "step": 2296
+ },
+ {
+ "epoch": 8.413919413919414,
+ "grad_norm": 1.4848605394363403,
+ "learning_rate": 1.0598290598290598e-05,
+ "loss": 0.0077,
+ "step": 2297
+ },
+ {
+ "epoch": 8.417582417582418,
+ "grad_norm": 1.0283154249191284,
+ "learning_rate": 1.0573870573870575e-05,
+ "loss": 0.0021,
+ "step": 2298
+ },
+ {
+ "epoch": 8.421245421245422,
+ "grad_norm": 2.7084271907806396,
+ "learning_rate": 1.054945054945055e-05,
+ "loss": 0.0077,
+ "step": 2299
+ },
+ {
+ "epoch": 8.424908424908425,
+ "grad_norm": 15.100653648376465,
+ "learning_rate": 1.0525030525030525e-05,
+ "loss": 0.0352,
+ "step": 2300
+ },
+ {
+ "epoch": 8.428571428571429,
+ "grad_norm": 16.015790939331055,
+ "learning_rate": 1.0500610500610502e-05,
+ "loss": 0.071,
+ "step": 2301
+ },
+ {
+ "epoch": 8.432234432234432,
+ "grad_norm": 73.60521697998047,
+ "learning_rate": 1.0476190476190475e-05,
+ "loss": 0.7214,
+ "step": 2302
+ },
+ {
+ "epoch": 8.435897435897436,
+ "grad_norm": 49.41472625732422,
+ "learning_rate": 1.0451770451770452e-05,
+ "loss": 0.9831,
+ "step": 2303
+ },
+ {
+ "epoch": 8.43956043956044,
+ "grad_norm": 51.6118278503418,
+ "learning_rate": 1.0427350427350429e-05,
+ "loss": 0.2589,
+ "step": 2304
+ },
+ {
+ "epoch": 8.443223443223443,
+ "grad_norm": 9.20317554473877,
+ "learning_rate": 1.0402930402930402e-05,
+ "loss": 0.0274,
+ "step": 2305
+ },
+ {
+ "epoch": 8.446886446886447,
+ "grad_norm": 10.019723892211914,
+ "learning_rate": 1.0378510378510379e-05,
+ "loss": 0.0286,
+ "step": 2306
+ },
+ {
+ "epoch": 8.45054945054945,
+ "grad_norm": 83.85884857177734,
+ "learning_rate": 1.0354090354090354e-05,
+ "loss": 1.2867,
+ "step": 2307
+ },
+ {
+ "epoch": 8.454212454212454,
+ "grad_norm": 6.391974449157715,
+ "learning_rate": 1.032967032967033e-05,
+ "loss": 0.023,
+ "step": 2308
+ },
+ {
+ "epoch": 8.457875457875458,
+ "grad_norm": 18.662921905517578,
+ "learning_rate": 1.0305250305250306e-05,
+ "loss": 0.158,
+ "step": 2309
+ },
+ {
+ "epoch": 8.461538461538462,
+ "grad_norm": 10.009090423583984,
+ "learning_rate": 1.028083028083028e-05,
+ "loss": 0.0401,
+ "step": 2310
+ },
+ {
+ "epoch": 8.465201465201465,
+ "grad_norm": 34.67587661743164,
+ "learning_rate": 1.0256410256410258e-05,
+ "loss": 0.1569,
+ "step": 2311
+ },
+ {
+ "epoch": 8.468864468864469,
+ "grad_norm": 7.593516826629639,
+ "learning_rate": 1.0231990231990231e-05,
+ "loss": 0.0382,
+ "step": 2312
+ },
+ {
+ "epoch": 8.472527472527473,
+ "grad_norm": 14.953495979309082,
+ "learning_rate": 1.0207570207570208e-05,
+ "loss": 0.0311,
+ "step": 2313
+ },
+ {
+ "epoch": 8.476190476190476,
+ "grad_norm": 37.42109680175781,
+ "learning_rate": 1.0183150183150184e-05,
+ "loss": 0.2014,
+ "step": 2314
+ },
+ {
+ "epoch": 8.47985347985348,
+ "grad_norm": 1.2265455722808838,
+ "learning_rate": 1.0158730158730158e-05,
+ "loss": 0.0052,
+ "step": 2315
+ },
+ {
+ "epoch": 8.483516483516484,
+ "grad_norm": 23.93451499938965,
+ "learning_rate": 1.0134310134310135e-05,
+ "loss": 0.1253,
+ "step": 2316
+ },
+ {
+ "epoch": 8.487179487179487,
+ "grad_norm": 6.8919172286987305,
+ "learning_rate": 1.0109890109890111e-05,
+ "loss": 0.0287,
+ "step": 2317
+ },
+ {
+ "epoch": 8.49084249084249,
+ "grad_norm": 18.653671264648438,
+ "learning_rate": 1.0085470085470085e-05,
+ "loss": 0.0741,
+ "step": 2318
+ },
+ {
+ "epoch": 8.494505494505495,
+ "grad_norm": 13.255439758300781,
+ "learning_rate": 1.0061050061050061e-05,
+ "loss": 0.131,
+ "step": 2319
+ },
+ {
+ "epoch": 8.498168498168498,
+ "grad_norm": 39.42401885986328,
+ "learning_rate": 1.0036630036630037e-05,
+ "loss": 0.2419,
+ "step": 2320
+ },
+ {
+ "epoch": 8.501831501831502,
+ "grad_norm": 35.404022216796875,
+ "learning_rate": 1.0012210012210012e-05,
+ "loss": 0.2684,
+ "step": 2321
+ },
+ {
+ "epoch": 8.505494505494505,
+ "grad_norm": 28.29785919189453,
+ "learning_rate": 9.987789987789988e-06,
+ "loss": 0.6206,
+ "step": 2322
+ },
+ {
+ "epoch": 8.50915750915751,
+ "grad_norm": 36.69160842895508,
+ "learning_rate": 9.963369963369963e-06,
+ "loss": 0.3753,
+ "step": 2323
+ },
+ {
+ "epoch": 8.512820512820513,
+ "grad_norm": 23.688018798828125,
+ "learning_rate": 9.93894993894994e-06,
+ "loss": 0.1725,
+ "step": 2324
+ },
+ {
+ "epoch": 8.516483516483516,
+ "grad_norm": 3.7074942588806152,
+ "learning_rate": 9.914529914529914e-06,
+ "loss": 0.0237,
+ "step": 2325
+ },
+ {
+ "epoch": 8.52014652014652,
+ "grad_norm": 5.201651573181152,
+ "learning_rate": 9.89010989010989e-06,
+ "loss": 0.0267,
+ "step": 2326
+ },
+ {
+ "epoch": 8.523809523809524,
+ "grad_norm": 22.705989837646484,
+ "learning_rate": 9.865689865689867e-06,
+ "loss": 0.1745,
+ "step": 2327
+ },
+ {
+ "epoch": 8.527472527472527,
+ "grad_norm": 0.9097073078155518,
+ "learning_rate": 9.84126984126984e-06,
+ "loss": 0.0061,
+ "step": 2328
+ },
+ {
+ "epoch": 8.531135531135531,
+ "grad_norm": 0.8476067185401917,
+ "learning_rate": 9.816849816849817e-06,
+ "loss": 0.0049,
+ "step": 2329
+ },
+ {
+ "epoch": 8.534798534798535,
+ "grad_norm": 30.6715087890625,
+ "learning_rate": 9.792429792429794e-06,
+ "loss": 0.8089,
+ "step": 2330
+ },
+ {
+ "epoch": 8.538461538461538,
+ "grad_norm": 27.170246124267578,
+ "learning_rate": 9.768009768009767e-06,
+ "loss": 0.1874,
+ "step": 2331
+ },
+ {
+ "epoch": 8.542124542124542,
+ "grad_norm": 2.073500871658325,
+ "learning_rate": 9.743589743589744e-06,
+ "loss": 0.0057,
+ "step": 2332
+ },
+ {
+ "epoch": 8.545787545787546,
+ "grad_norm": 50.97946548461914,
+ "learning_rate": 9.719169719169719e-06,
+ "loss": 0.7219,
+ "step": 2333
+ },
+ {
+ "epoch": 8.54945054945055,
+ "grad_norm": 33.17234420776367,
+ "learning_rate": 9.694749694749694e-06,
+ "loss": 0.2579,
+ "step": 2334
+ },
+ {
+ "epoch": 8.553113553113553,
+ "grad_norm": 21.129179000854492,
+ "learning_rate": 9.670329670329671e-06,
+ "loss": 0.0973,
+ "step": 2335
+ },
+ {
+ "epoch": 8.556776556776557,
+ "grad_norm": 40.60239791870117,
+ "learning_rate": 9.645909645909646e-06,
+ "loss": 0.2773,
+ "step": 2336
+ },
+ {
+ "epoch": 8.56043956043956,
+ "grad_norm": 23.842580795288086,
+ "learning_rate": 9.621489621489623e-06,
+ "loss": 0.4137,
+ "step": 2337
+ },
+ {
+ "epoch": 8.564102564102564,
+ "grad_norm": 30.08527946472168,
+ "learning_rate": 9.597069597069596e-06,
+ "loss": 0.776,
+ "step": 2338
+ },
+ {
+ "epoch": 8.567765567765568,
+ "grad_norm": 26.280099868774414,
+ "learning_rate": 9.572649572649573e-06,
+ "loss": 0.2013,
+ "step": 2339
+ },
+ {
+ "epoch": 8.571428571428571,
+ "grad_norm": 9.810042381286621,
+ "learning_rate": 9.54822954822955e-06,
+ "loss": 0.0456,
+ "step": 2340
+ },
+ {
+ "epoch": 8.575091575091575,
+ "grad_norm": 9.112823486328125,
+ "learning_rate": 9.523809523809523e-06,
+ "loss": 0.0599,
+ "step": 2341
+ },
+ {
+ "epoch": 8.578754578754578,
+ "grad_norm": 32.358306884765625,
+ "learning_rate": 9.4993894993895e-06,
+ "loss": 0.3316,
+ "step": 2342
+ },
+ {
+ "epoch": 8.582417582417582,
+ "grad_norm": 28.62472915649414,
+ "learning_rate": 9.474969474969477e-06,
+ "loss": 0.3418,
+ "step": 2343
+ },
+ {
+ "epoch": 8.586080586080586,
+ "grad_norm": 40.83232879638672,
+ "learning_rate": 9.45054945054945e-06,
+ "loss": 0.498,
+ "step": 2344
+ },
+ {
+ "epoch": 8.58974358974359,
+ "grad_norm": 14.671387672424316,
+ "learning_rate": 9.426129426129427e-06,
+ "loss": 0.0943,
+ "step": 2345
+ },
+ {
+ "epoch": 8.593406593406593,
+ "grad_norm": 17.533994674682617,
+ "learning_rate": 9.401709401709402e-06,
+ "loss": 0.0914,
+ "step": 2346
+ },
+ {
+ "epoch": 8.597069597069597,
+ "grad_norm": 14.264333724975586,
+ "learning_rate": 9.377289377289377e-06,
+ "loss": 0.048,
+ "step": 2347
+ },
+ {
+ "epoch": 8.6007326007326,
+ "grad_norm": 10.327966690063477,
+ "learning_rate": 9.352869352869354e-06,
+ "loss": 0.0533,
+ "step": 2348
+ },
+ {
+ "epoch": 8.604395604395604,
+ "grad_norm": 23.408447265625,
+ "learning_rate": 9.328449328449329e-06,
+ "loss": 0.1076,
+ "step": 2349
+ },
+ {
+ "epoch": 8.608058608058608,
+ "grad_norm": 3.769625663757324,
+ "learning_rate": 9.304029304029304e-06,
+ "loss": 0.0173,
+ "step": 2350
+ },
+ {
+ "epoch": 8.611721611721611,
+ "grad_norm": 11.853968620300293,
+ "learning_rate": 9.279609279609279e-06,
+ "loss": 0.0461,
+ "step": 2351
+ },
+ {
+ "epoch": 8.615384615384615,
+ "grad_norm": 2.211425304412842,
+ "learning_rate": 9.255189255189256e-06,
+ "loss": 0.0074,
+ "step": 2352
+ },
+ {
+ "epoch": 8.619047619047619,
+ "grad_norm": 41.25067138671875,
+ "learning_rate": 9.230769230769232e-06,
+ "loss": 0.5553,
+ "step": 2353
+ },
+ {
+ "epoch": 8.622710622710622,
+ "grad_norm": 23.783859252929688,
+ "learning_rate": 9.206349206349206e-06,
+ "loss": 0.2602,
+ "step": 2354
+ },
+ {
+ "epoch": 8.626373626373626,
+ "grad_norm": 14.278258323669434,
+ "learning_rate": 9.181929181929182e-06,
+ "loss": 0.0603,
+ "step": 2355
+ },
+ {
+ "epoch": 8.63003663003663,
+ "grad_norm": 47.81812286376953,
+ "learning_rate": 9.15750915750916e-06,
+ "loss": 0.3209,
+ "step": 2356
+ },
+ {
+ "epoch": 8.633699633699633,
+ "grad_norm": 29.209117889404297,
+ "learning_rate": 9.133089133089133e-06,
+ "loss": 0.2166,
+ "step": 2357
+ },
+ {
+ "epoch": 8.637362637362637,
+ "grad_norm": 11.6812162399292,
+ "learning_rate": 9.10866910866911e-06,
+ "loss": 0.0394,
+ "step": 2358
+ },
+ {
+ "epoch": 8.64102564102564,
+ "grad_norm": 2.1416890621185303,
+ "learning_rate": 9.084249084249084e-06,
+ "loss": 0.0079,
+ "step": 2359
+ },
+ {
+ "epoch": 8.644688644688644,
+ "grad_norm": 13.363630294799805,
+ "learning_rate": 9.05982905982906e-06,
+ "loss": 0.1215,
+ "step": 2360
+ },
+ {
+ "epoch": 8.648351648351648,
+ "grad_norm": 10.95302963256836,
+ "learning_rate": 9.035409035409036e-06,
+ "loss": 0.0562,
+ "step": 2361
+ },
+ {
+ "epoch": 8.652014652014651,
+ "grad_norm": 2.392416000366211,
+ "learning_rate": 9.010989010989011e-06,
+ "loss": 0.0085,
+ "step": 2362
+ },
+ {
+ "epoch": 8.655677655677655,
+ "grad_norm": 13.83795166015625,
+ "learning_rate": 8.986568986568986e-06,
+ "loss": 0.0777,
+ "step": 2363
+ },
+ {
+ "epoch": 8.659340659340659,
+ "grad_norm": 18.058395385742188,
+ "learning_rate": 8.962148962148961e-06,
+ "loss": 0.0555,
+ "step": 2364
+ },
+ {
+ "epoch": 8.663003663003662,
+ "grad_norm": 2.548462390899658,
+ "learning_rate": 8.937728937728938e-06,
+ "loss": 0.0145,
+ "step": 2365
+ },
+ {
+ "epoch": 8.666666666666666,
+ "grad_norm": 1.9582334756851196,
+ "learning_rate": 8.913308913308913e-06,
+ "loss": 0.0114,
+ "step": 2366
+ },
+ {
+ "epoch": 8.67032967032967,
+ "grad_norm": 12.635466575622559,
+ "learning_rate": 8.888888888888888e-06,
+ "loss": 0.0936,
+ "step": 2367
+ },
+ {
+ "epoch": 8.673992673992673,
+ "grad_norm": 36.309967041015625,
+ "learning_rate": 8.864468864468865e-06,
+ "loss": 0.1474,
+ "step": 2368
+ },
+ {
+ "epoch": 8.677655677655677,
+ "grad_norm": 2.570406675338745,
+ "learning_rate": 8.840048840048842e-06,
+ "loss": 0.0115,
+ "step": 2369
+ },
+ {
+ "epoch": 8.68131868131868,
+ "grad_norm": 33.025535583496094,
+ "learning_rate": 8.815628815628815e-06,
+ "loss": 0.1509,
+ "step": 2370
+ },
+ {
+ "epoch": 8.684981684981684,
+ "grad_norm": 7.150747299194336,
+ "learning_rate": 8.791208791208792e-06,
+ "loss": 0.0128,
+ "step": 2371
+ },
+ {
+ "epoch": 8.688644688644688,
+ "grad_norm": 8.400662422180176,
+ "learning_rate": 8.766788766788767e-06,
+ "loss": 0.0405,
+ "step": 2372
+ },
+ {
+ "epoch": 8.692307692307692,
+ "grad_norm": 50.28904342651367,
+ "learning_rate": 8.742368742368742e-06,
+ "loss": 0.3981,
+ "step": 2373
+ },
+ {
+ "epoch": 8.695970695970695,
+ "grad_norm": 27.415250778198242,
+ "learning_rate": 8.717948717948719e-06,
+ "loss": 0.3461,
+ "step": 2374
+ },
+ {
+ "epoch": 8.699633699633699,
+ "grad_norm": 66.89543914794922,
+ "learning_rate": 8.693528693528694e-06,
+ "loss": 0.2926,
+ "step": 2375
+ },
+ {
+ "epoch": 8.703296703296703,
+ "grad_norm": 23.47862434387207,
+ "learning_rate": 8.669108669108669e-06,
+ "loss": 0.0704,
+ "step": 2376
+ },
+ {
+ "epoch": 8.706959706959706,
+ "grad_norm": 0.5212138891220093,
+ "learning_rate": 8.644688644688644e-06,
+ "loss": 0.0022,
+ "step": 2377
+ },
+ {
+ "epoch": 8.71062271062271,
+ "grad_norm": 11.140594482421875,
+ "learning_rate": 8.62026862026862e-06,
+ "loss": 0.0245,
+ "step": 2378
+ },
+ {
+ "epoch": 8.714285714285714,
+ "grad_norm": 6.0050554275512695,
+ "learning_rate": 8.595848595848596e-06,
+ "loss": 0.0156,
+ "step": 2379
+ },
+ {
+ "epoch": 8.717948717948717,
+ "grad_norm": 24.6923770904541,
+ "learning_rate": 8.571428571428571e-06,
+ "loss": 0.1378,
+ "step": 2380
+ },
+ {
+ "epoch": 8.72161172161172,
+ "grad_norm": 17.33226203918457,
+ "learning_rate": 8.547008547008548e-06,
+ "loss": 0.091,
+ "step": 2381
+ },
+ {
+ "epoch": 8.725274725274724,
+ "grad_norm": 1.3830251693725586,
+ "learning_rate": 8.522588522588524e-06,
+ "loss": 0.0056,
+ "step": 2382
+ },
+ {
+ "epoch": 8.728937728937728,
+ "grad_norm": 2.8836987018585205,
+ "learning_rate": 8.498168498168498e-06,
+ "loss": 0.0094,
+ "step": 2383
+ },
+ {
+ "epoch": 8.732600732600732,
+ "grad_norm": 21.720495223999023,
+ "learning_rate": 8.473748473748475e-06,
+ "loss": 0.0791,
+ "step": 2384
+ },
+ {
+ "epoch": 8.736263736263737,
+ "grad_norm": 23.333284378051758,
+ "learning_rate": 8.44932844932845e-06,
+ "loss": 0.0749,
+ "step": 2385
+ },
+ {
+ "epoch": 8.73992673992674,
+ "grad_norm": 6.694031715393066,
+ "learning_rate": 8.424908424908425e-06,
+ "loss": 0.0322,
+ "step": 2386
+ },
+ {
+ "epoch": 8.743589743589745,
+ "grad_norm": 8.488764762878418,
+ "learning_rate": 8.400488400488401e-06,
+ "loss": 0.0269,
+ "step": 2387
+ },
+ {
+ "epoch": 8.747252747252748,
+ "grad_norm": 0.5612779855728149,
+ "learning_rate": 8.376068376068377e-06,
+ "loss": 0.0021,
+ "step": 2388
+ },
+ {
+ "epoch": 8.750915750915752,
+ "grad_norm": 26.03545379638672,
+ "learning_rate": 8.351648351648352e-06,
+ "loss": 0.1388,
+ "step": 2389
+ },
+ {
+ "epoch": 8.754578754578755,
+ "grad_norm": 1.6444523334503174,
+ "learning_rate": 8.327228327228327e-06,
+ "loss": 0.005,
+ "step": 2390
+ },
+ {
+ "epoch": 8.758241758241759,
+ "grad_norm": 3.910712480545044,
+ "learning_rate": 8.302808302808303e-06,
+ "loss": 0.0166,
+ "step": 2391
+ },
+ {
+ "epoch": 8.761904761904763,
+ "grad_norm": 0.42347079515457153,
+ "learning_rate": 8.278388278388278e-06,
+ "loss": 0.0017,
+ "step": 2392
+ },
+ {
+ "epoch": 8.765567765567766,
+ "grad_norm": 3.2693428993225098,
+ "learning_rate": 8.253968253968254e-06,
+ "loss": 0.0103,
+ "step": 2393
+ },
+ {
+ "epoch": 8.76923076923077,
+ "grad_norm": 11.918498039245605,
+ "learning_rate": 8.22954822954823e-06,
+ "loss": 0.0702,
+ "step": 2394
+ },
+ {
+ "epoch": 8.772893772893774,
+ "grad_norm": 59.99433517456055,
+ "learning_rate": 8.205128205128205e-06,
+ "loss": 0.3443,
+ "step": 2395
+ },
+ {
+ "epoch": 8.776556776556777,
+ "grad_norm": 1.036231279373169,
+ "learning_rate": 8.18070818070818e-06,
+ "loss": 0.0029,
+ "step": 2396
+ },
+ {
+ "epoch": 8.780219780219781,
+ "grad_norm": 0.35836902260780334,
+ "learning_rate": 8.156288156288157e-06,
+ "loss": 0.0011,
+ "step": 2397
+ },
+ {
+ "epoch": 8.783882783882785,
+ "grad_norm": 11.58154010772705,
+ "learning_rate": 8.131868131868132e-06,
+ "loss": 0.0316,
+ "step": 2398
+ },
+ {
+ "epoch": 8.787545787545788,
+ "grad_norm": 54.74607849121094,
+ "learning_rate": 8.107448107448107e-06,
+ "loss": 0.2618,
+ "step": 2399
+ },
+ {
+ "epoch": 8.791208791208792,
+ "grad_norm": 21.049470901489258,
+ "learning_rate": 8.083028083028084e-06,
+ "loss": 0.0536,
+ "step": 2400
+ },
+ {
+ "epoch": 8.794871794871796,
+ "grad_norm": 4.356145858764648,
+ "learning_rate": 8.058608058608059e-06,
+ "loss": 0.0107,
+ "step": 2401
+ },
+ {
+ "epoch": 8.7985347985348,
+ "grad_norm": 6.708774089813232,
+ "learning_rate": 8.034188034188034e-06,
+ "loss": 0.2551,
+ "step": 2402
+ },
+ {
+ "epoch": 8.802197802197803,
+ "grad_norm": 33.899139404296875,
+ "learning_rate": 8.00976800976801e-06,
+ "loss": 0.1179,
+ "step": 2403
+ },
+ {
+ "epoch": 8.805860805860807,
+ "grad_norm": 20.150150299072266,
+ "learning_rate": 7.985347985347986e-06,
+ "loss": 0.0673,
+ "step": 2404
+ },
+ {
+ "epoch": 8.80952380952381,
+ "grad_norm": 0.7458391189575195,
+ "learning_rate": 7.960927960927961e-06,
+ "loss": 0.0015,
+ "step": 2405
+ },
+ {
+ "epoch": 8.813186813186814,
+ "grad_norm": 6.7325663566589355,
+ "learning_rate": 7.936507936507936e-06,
+ "loss": 0.0204,
+ "step": 2406
+ },
+ {
+ "epoch": 8.816849816849818,
+ "grad_norm": 1.79118013381958,
+ "learning_rate": 7.912087912087913e-06,
+ "loss": 0.0046,
+ "step": 2407
+ },
+ {
+ "epoch": 8.820512820512821,
+ "grad_norm": 16.44390106201172,
+ "learning_rate": 7.887667887667886e-06,
+ "loss": 0.0306,
+ "step": 2408
+ },
+ {
+ "epoch": 8.824175824175825,
+ "grad_norm": 0.12592382729053497,
+ "learning_rate": 7.863247863247863e-06,
+ "loss": 0.0006,
+ "step": 2409
+ },
+ {
+ "epoch": 8.827838827838828,
+ "grad_norm": 0.41172507405281067,
+ "learning_rate": 7.83882783882784e-06,
+ "loss": 0.0012,
+ "step": 2410
+ },
+ {
+ "epoch": 8.831501831501832,
+ "grad_norm": 12.583470344543457,
+ "learning_rate": 7.814407814407815e-06,
+ "loss": 0.0472,
+ "step": 2411
+ },
+ {
+ "epoch": 8.835164835164836,
+ "grad_norm": 2.4268991947174072,
+ "learning_rate": 7.78998778998779e-06,
+ "loss": 0.0787,
+ "step": 2412
+ },
+ {
+ "epoch": 8.83882783882784,
+ "grad_norm": 5.501258850097656,
+ "learning_rate": 7.765567765567767e-06,
+ "loss": 0.0161,
+ "step": 2413
+ },
+ {
+ "epoch": 8.842490842490843,
+ "grad_norm": 48.107818603515625,
+ "learning_rate": 7.741147741147742e-06,
+ "loss": 0.3023,
+ "step": 2414
+ },
+ {
+ "epoch": 8.846153846153847,
+ "grad_norm": 9.732619285583496,
+ "learning_rate": 7.716727716727717e-06,
+ "loss": 0.0313,
+ "step": 2415
+ },
+ {
+ "epoch": 8.84981684981685,
+ "grad_norm": 7.970260143280029,
+ "learning_rate": 7.692307692307692e-06,
+ "loss": 0.0364,
+ "step": 2416
+ },
+ {
+ "epoch": 8.853479853479854,
+ "grad_norm": 41.191104888916016,
+ "learning_rate": 7.667887667887669e-06,
+ "loss": 0.1678,
+ "step": 2417
+ },
+ {
+ "epoch": 8.857142857142858,
+ "grad_norm": 0.04402902349829674,
+ "learning_rate": 7.643467643467644e-06,
+ "loss": 0.0001,
+ "step": 2418
+ },
+ {
+ "epoch": 8.860805860805861,
+ "grad_norm": 5.091309547424316,
+ "learning_rate": 7.619047619047619e-06,
+ "loss": 0.0185,
+ "step": 2419
+ },
+ {
+ "epoch": 8.864468864468865,
+ "grad_norm": 0.8602111339569092,
+ "learning_rate": 7.594627594627595e-06,
+ "loss": 0.0027,
+ "step": 2420
+ },
+ {
+ "epoch": 8.868131868131869,
+ "grad_norm": 0.5093329548835754,
+ "learning_rate": 7.57020757020757e-06,
+ "loss": 0.0018,
+ "step": 2421
+ },
+ {
+ "epoch": 8.871794871794872,
+ "grad_norm": 1.818582534790039,
+ "learning_rate": 7.545787545787546e-06,
+ "loss": 0.0056,
+ "step": 2422
+ },
+ {
+ "epoch": 8.875457875457876,
+ "grad_norm": 1.239259123802185,
+ "learning_rate": 7.5213675213675224e-06,
+ "loss": 0.0035,
+ "step": 2423
+ },
+ {
+ "epoch": 8.87912087912088,
+ "grad_norm": 1.207359790802002,
+ "learning_rate": 7.4969474969474975e-06,
+ "loss": 0.0044,
+ "step": 2424
+ },
+ {
+ "epoch": 8.882783882783883,
+ "grad_norm": 16.89816665649414,
+ "learning_rate": 7.4725274725274726e-06,
+ "loss": 0.1432,
+ "step": 2425
+ },
+ {
+ "epoch": 8.886446886446887,
+ "grad_norm": 26.338607788085938,
+ "learning_rate": 7.4481074481074485e-06,
+ "loss": 0.1183,
+ "step": 2426
+ },
+ {
+ "epoch": 8.89010989010989,
+ "grad_norm": 0.16512498259544373,
+ "learning_rate": 7.4236874236874235e-06,
+ "loss": 0.0005,
+ "step": 2427
+ },
+ {
+ "epoch": 8.893772893772894,
+ "grad_norm": 0.37214791774749756,
+ "learning_rate": 7.3992673992673995e-06,
+ "loss": 0.0017,
+ "step": 2428
+ },
+ {
+ "epoch": 8.897435897435898,
+ "grad_norm": 31.914432525634766,
+ "learning_rate": 7.374847374847375e-06,
+ "loss": 0.0725,
+ "step": 2429
+ },
+ {
+ "epoch": 8.901098901098901,
+ "grad_norm": 1.1302192211151123,
+ "learning_rate": 7.3504273504273504e-06,
+ "loss": 0.0042,
+ "step": 2430
+ },
+ {
+ "epoch": 8.904761904761905,
+ "grad_norm": 10.759814262390137,
+ "learning_rate": 7.326007326007326e-06,
+ "loss": 0.0325,
+ "step": 2431
+ },
+ {
+ "epoch": 8.908424908424909,
+ "grad_norm": 10.23229694366455,
+ "learning_rate": 7.301587301587301e-06,
+ "loss": 0.0169,
+ "step": 2432
+ },
+ {
+ "epoch": 8.912087912087912,
+ "grad_norm": 18.52377700805664,
+ "learning_rate": 7.277167277167277e-06,
+ "loss": 0.0873,
+ "step": 2433
+ },
+ {
+ "epoch": 8.915750915750916,
+ "grad_norm": 9.769023895263672,
+ "learning_rate": 7.252747252747253e-06,
+ "loss": 0.0432,
+ "step": 2434
+ },
+ {
+ "epoch": 8.91941391941392,
+ "grad_norm": 5.880816459655762,
+ "learning_rate": 7.228327228327228e-06,
+ "loss": 0.0196,
+ "step": 2435
+ },
+ {
+ "epoch": 8.923076923076923,
+ "grad_norm": 12.387096405029297,
+ "learning_rate": 7.203907203907203e-06,
+ "loss": 0.0246,
+ "step": 2436
+ },
+ {
+ "epoch": 8.926739926739927,
+ "grad_norm": 18.798778533935547,
+ "learning_rate": 7.17948717948718e-06,
+ "loss": 0.1073,
+ "step": 2437
+ },
+ {
+ "epoch": 8.93040293040293,
+ "grad_norm": 2.706861734390259,
+ "learning_rate": 7.155067155067155e-06,
+ "loss": 0.0048,
+ "step": 2438
+ },
+ {
+ "epoch": 8.934065934065934,
+ "grad_norm": 4.111676216125488,
+ "learning_rate": 7.130647130647131e-06,
+ "loss": 0.0107,
+ "step": 2439
+ },
+ {
+ "epoch": 8.937728937728938,
+ "grad_norm": 55.59748077392578,
+ "learning_rate": 7.106227106227106e-06,
+ "loss": 0.2811,
+ "step": 2440
+ },
+ {
+ "epoch": 8.941391941391942,
+ "grad_norm": 10.445195198059082,
+ "learning_rate": 7.081807081807082e-06,
+ "loss": 0.0491,
+ "step": 2441
+ },
+ {
+ "epoch": 8.945054945054945,
+ "grad_norm": 0.049220070242881775,
+ "learning_rate": 7.057387057387058e-06,
+ "loss": 0.0002,
+ "step": 2442
+ },
+ {
+ "epoch": 8.948717948717949,
+ "grad_norm": 2.4076764583587646,
+ "learning_rate": 7.032967032967033e-06,
+ "loss": 0.0075,
+ "step": 2443
+ },
+ {
+ "epoch": 8.952380952380953,
+ "grad_norm": 1.8959174156188965,
+ "learning_rate": 7.008547008547008e-06,
+ "loss": 0.0053,
+ "step": 2444
+ },
+ {
+ "epoch": 8.956043956043956,
+ "grad_norm": 72.28501892089844,
+ "learning_rate": 6.984126984126984e-06,
+ "loss": 0.9784,
+ "step": 2445
+ },
+ {
+ "epoch": 8.95970695970696,
+ "grad_norm": 12.387998580932617,
+ "learning_rate": 6.95970695970696e-06,
+ "loss": 0.0623,
+ "step": 2446
+ },
+ {
+ "epoch": 8.963369963369964,
+ "grad_norm": 80.10337829589844,
+ "learning_rate": 6.935286935286936e-06,
+ "loss": 0.3668,
+ "step": 2447
+ },
+ {
+ "epoch": 8.967032967032967,
+ "grad_norm": 8.527040481567383,
+ "learning_rate": 6.910866910866911e-06,
+ "loss": 0.0308,
+ "step": 2448
+ },
+ {
+ "epoch": 8.97069597069597,
+ "grad_norm": 56.55281066894531,
+ "learning_rate": 6.886446886446886e-06,
+ "loss": 0.567,
+ "step": 2449
+ },
+ {
+ "epoch": 8.974358974358974,
+ "grad_norm": 1.594208836555481,
+ "learning_rate": 6.862026862026863e-06,
+ "loss": 0.0049,
+ "step": 2450
+ },
+ {
+ "epoch": 8.978021978021978,
+ "grad_norm": 0.4573160707950592,
+ "learning_rate": 6.837606837606838e-06,
+ "loss": 0.001,
+ "step": 2451
+ },
+ {
+ "epoch": 8.981684981684982,
+ "grad_norm": 48.936038970947266,
+ "learning_rate": 6.813186813186814e-06,
+ "loss": 0.6475,
+ "step": 2452
+ },
+ {
+ "epoch": 8.985347985347985,
+ "grad_norm": 11.618135452270508,
+ "learning_rate": 6.788766788766789e-06,
+ "loss": 0.0277,
+ "step": 2453
+ },
+ {
+ "epoch": 8.989010989010989,
+ "grad_norm": 2.847616195678711,
+ "learning_rate": 6.764346764346765e-06,
+ "loss": 0.0075,
+ "step": 2454
+ },
+ {
+ "epoch": 8.992673992673993,
+ "grad_norm": 0.4193238317966461,
+ "learning_rate": 6.739926739926741e-06,
+ "loss": 0.0013,
+ "step": 2455
+ },
+ {
+ "epoch": 8.996336996336996,
+ "grad_norm": 18.683883666992188,
+ "learning_rate": 6.715506715506716e-06,
+ "loss": 0.0652,
+ "step": 2456
+ },
+ {
+ "epoch": 9.0,
+ "grad_norm": 64.50067138671875,
+ "learning_rate": 6.691086691086691e-06,
+ "loss": 0.6786,
+ "step": 2457
+ },
+ {
+ "epoch": 9.003663003663004,
+ "grad_norm": 54.9294319152832,
+ "learning_rate": 6.666666666666667e-06,
+ "loss": 0.6198,
+ "step": 2458
+ },
+ {
+ "epoch": 9.007326007326007,
+ "grad_norm": 55.97196960449219,
+ "learning_rate": 6.6422466422466426e-06,
+ "loss": 0.3798,
+ "step": 2459
+ },
+ {
+ "epoch": 9.010989010989011,
+ "grad_norm": 3.0465450286865234,
+ "learning_rate": 6.6178266178266185e-06,
+ "loss": 0.0123,
+ "step": 2460
+ },
+ {
+ "epoch": 9.014652014652015,
+ "grad_norm": 8.725708961486816,
+ "learning_rate": 6.5934065934065935e-06,
+ "loss": 0.0295,
+ "step": 2461
+ },
+ {
+ "epoch": 9.018315018315018,
+ "grad_norm": 24.417634963989258,
+ "learning_rate": 6.568986568986569e-06,
+ "loss": 0.1857,
+ "step": 2462
+ },
+ {
+ "epoch": 9.021978021978022,
+ "grad_norm": 75.99623107910156,
+ "learning_rate": 6.544566544566545e-06,
+ "loss": 0.8702,
+ "step": 2463
+ },
+ {
+ "epoch": 9.025641025641026,
+ "grad_norm": 12.464011192321777,
+ "learning_rate": 6.5201465201465204e-06,
+ "loss": 0.0656,
+ "step": 2464
+ },
+ {
+ "epoch": 9.02930402930403,
+ "grad_norm": 1.5972875356674194,
+ "learning_rate": 6.4957264957264955e-06,
+ "loss": 0.0074,
+ "step": 2465
+ },
+ {
+ "epoch": 9.032967032967033,
+ "grad_norm": 0.38648298382759094,
+ "learning_rate": 6.471306471306471e-06,
+ "loss": 0.0007,
+ "step": 2466
+ },
+ {
+ "epoch": 9.036630036630036,
+ "grad_norm": 55.41832733154297,
+ "learning_rate": 6.446886446886447e-06,
+ "loss": 0.2693,
+ "step": 2467
+ },
+ {
+ "epoch": 9.04029304029304,
+ "grad_norm": 1.8419067859649658,
+ "learning_rate": 6.422466422466423e-06,
+ "loss": 0.008,
+ "step": 2468
+ },
+ {
+ "epoch": 9.043956043956044,
+ "grad_norm": 6.2913498878479,
+ "learning_rate": 6.398046398046398e-06,
+ "loss": 0.0214,
+ "step": 2469
+ },
+ {
+ "epoch": 9.047619047619047,
+ "grad_norm": 16.412883758544922,
+ "learning_rate": 6.373626373626373e-06,
+ "loss": 0.0702,
+ "step": 2470
+ },
+ {
+ "epoch": 9.051282051282051,
+ "grad_norm": 6.313873291015625,
+ "learning_rate": 6.349206349206349e-06,
+ "loss": 0.0191,
+ "step": 2471
+ },
+ {
+ "epoch": 9.054945054945055,
+ "grad_norm": 34.515655517578125,
+ "learning_rate": 6.324786324786325e-06,
+ "loss": 0.1514,
+ "step": 2472
+ },
+ {
+ "epoch": 9.058608058608058,
+ "grad_norm": 0.12106683105230331,
+ "learning_rate": 6.3003663003663e-06,
+ "loss": 0.0005,
+ "step": 2473
+ },
+ {
+ "epoch": 9.062271062271062,
+ "grad_norm": 1.749250888824463,
+ "learning_rate": 6.275946275946276e-06,
+ "loss": 0.006,
+ "step": 2474
+ },
+ {
+ "epoch": 9.065934065934066,
+ "grad_norm": 7.868753433227539,
+ "learning_rate": 6.251526251526251e-06,
+ "loss": 0.043,
+ "step": 2475
+ },
+ {
+ "epoch": 9.06959706959707,
+ "grad_norm": 0.787532389163971,
+ "learning_rate": 6.227106227106228e-06,
+ "loss": 0.002,
+ "step": 2476
+ },
+ {
+ "epoch": 9.073260073260073,
+ "grad_norm": 3.244596004486084,
+ "learning_rate": 6.202686202686203e-06,
+ "loss": 0.0103,
+ "step": 2477
+ },
+ {
+ "epoch": 9.076923076923077,
+ "grad_norm": 7.473750114440918,
+ "learning_rate": 6.178266178266178e-06,
+ "loss": 0.0261,
+ "step": 2478
+ },
+ {
+ "epoch": 9.08058608058608,
+ "grad_norm": 26.390687942504883,
+ "learning_rate": 6.153846153846154e-06,
+ "loss": 0.0861,
+ "step": 2479
+ },
+ {
+ "epoch": 9.084249084249084,
+ "grad_norm": 3.37931489944458,
+ "learning_rate": 6.129426129426129e-06,
+ "loss": 0.0161,
+ "step": 2480
+ },
+ {
+ "epoch": 9.087912087912088,
+ "grad_norm": 42.62114334106445,
+ "learning_rate": 6.105006105006105e-06,
+ "loss": 0.1498,
+ "step": 2481
+ },
+ {
+ "epoch": 9.091575091575091,
+ "grad_norm": 6.945065975189209,
+ "learning_rate": 6.080586080586081e-06,
+ "loss": 0.0335,
+ "step": 2482
+ },
+ {
+ "epoch": 9.095238095238095,
+ "grad_norm": 13.789215087890625,
+ "learning_rate": 6.056166056166056e-06,
+ "loss": 0.0344,
+ "step": 2483
+ },
+ {
+ "epoch": 9.098901098901099,
+ "grad_norm": 2.178279161453247,
+ "learning_rate": 6.031746031746032e-06,
+ "loss": 0.0089,
+ "step": 2484
+ },
+ {
+ "epoch": 9.102564102564102,
+ "grad_norm": 61.52828598022461,
+ "learning_rate": 6.007326007326008e-06,
+ "loss": 0.6747,
+ "step": 2485
+ },
+ {
+ "epoch": 9.106227106227106,
+ "grad_norm": 2.6004297733306885,
+ "learning_rate": 5.982905982905983e-06,
+ "loss": 0.015,
+ "step": 2486
+ },
+ {
+ "epoch": 9.10989010989011,
+ "grad_norm": 6.188530921936035,
+ "learning_rate": 5.958485958485959e-06,
+ "loss": 0.019,
+ "step": 2487
+ },
+ {
+ "epoch": 9.113553113553113,
+ "grad_norm": 8.141875267028809,
+ "learning_rate": 5.934065934065934e-06,
+ "loss": 0.04,
+ "step": 2488
+ },
+ {
+ "epoch": 9.117216117216117,
+ "grad_norm": 7.30596923828125,
+ "learning_rate": 5.90964590964591e-06,
+ "loss": 0.0223,
+ "step": 2489
+ },
+ {
+ "epoch": 9.12087912087912,
+ "grad_norm": 0.5398825407028198,
+ "learning_rate": 5.885225885225886e-06,
+ "loss": 0.0022,
+ "step": 2490
+ },
+ {
+ "epoch": 9.124542124542124,
+ "grad_norm": 8.664217948913574,
+ "learning_rate": 5.860805860805861e-06,
+ "loss": 0.0277,
+ "step": 2491
+ },
+ {
+ "epoch": 9.128205128205128,
+ "grad_norm": 24.2191162109375,
+ "learning_rate": 5.836385836385837e-06,
+ "loss": 0.1518,
+ "step": 2492
+ },
+ {
+ "epoch": 9.131868131868131,
+ "grad_norm": 8.598712921142578,
+ "learning_rate": 5.811965811965812e-06,
+ "loss": 0.0265,
+ "step": 2493
+ },
+ {
+ "epoch": 9.135531135531135,
+ "grad_norm": 23.29640007019043,
+ "learning_rate": 5.787545787545788e-06,
+ "loss": 0.1272,
+ "step": 2494
+ },
+ {
+ "epoch": 9.139194139194139,
+ "grad_norm": 18.841108322143555,
+ "learning_rate": 5.7631257631257635e-06,
+ "loss": 0.0955,
+ "step": 2495
+ },
+ {
+ "epoch": 9.142857142857142,
+ "grad_norm": 54.528018951416016,
+ "learning_rate": 5.738705738705739e-06,
+ "loss": 0.1903,
+ "step": 2496
+ },
+ {
+ "epoch": 9.146520146520146,
+ "grad_norm": 23.48889923095703,
+ "learning_rate": 5.7142857142857145e-06,
+ "loss": 0.0938,
+ "step": 2497
+ },
+ {
+ "epoch": 9.15018315018315,
+ "grad_norm": 5.552438259124756,
+ "learning_rate": 5.68986568986569e-06,
+ "loss": 0.0231,
+ "step": 2498
+ },
+ {
+ "epoch": 9.153846153846153,
+ "grad_norm": 4.428290843963623,
+ "learning_rate": 5.6654456654456655e-06,
+ "loss": 0.01,
+ "step": 2499
+ },
+ {
+ "epoch": 9.157509157509157,
+ "grad_norm": 0.700716495513916,
+ "learning_rate": 5.641025641025641e-06,
+ "loss": 0.0033,
+ "step": 2500
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 2730,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 10,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 0.0,
+ "train_batch_size": 4,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-2500/training_args.bin b/checkpoint-2500/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..efd73451f8808ee6551f09598ece18ffd5afe9a8
--- /dev/null
+++ b/checkpoint-2500/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9433d412d81580f751a4a8cdb904f13acd11bf72c98d8dd9b40ffc47b121468f
+size 7249
diff --git a/checkpoint-2500/zero_to_fp32.py b/checkpoint-2500/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04
--- /dev/null
+++ b/checkpoint-2500/zero_to_fp32.py
@@ -0,0 +1,760 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example:
+# python zero_to_fp32.py . output_dir/
+# or
+# python zero_to_fp32.py . output_dir/ --safe_serialization
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+import gc
+import json
+import numpy as np
+from tqdm import tqdm
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device, weights_only=False)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+ total_files = len(files)
+ state_dicts = []
+ for f in tqdm(files, desc='Loading checkpoint shards'):
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+class GatheredTensor:
+ """
+ A pseudo tensor that collects partitioned weights.
+ It is more memory efficient when there are multiple groups.
+ """
+
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
+ self.flat_groups = flat_groups
+ self.flat_groups_offset = flat_groups_offset
+ self.offset = offset
+ self.partitioned_numel = partitioned_numel
+ self.shape = shape
+ self.dtype = self.flat_groups[0][0].dtype
+
+ def contiguous(self):
+ """
+ Merge partitioned weights from flat_groups into a single tensor.
+ """
+ end_idx = self.offset + self.partitioned_numel
+ world_size = len(self.flat_groups)
+ pad_flat_param_chunks = []
+
+ for rank_i in range(world_size):
+ # for each rank, we need to collect weights from related group/groups
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
+ start_group_id = None
+ end_group_id = None
+ for group_id in range(len(self.flat_groups_offset)):
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
+ start_group_id = group_id
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
+ end_group_id = group_id
+ break
+ # collect weights from related group/groups
+ for group_id in range(start_group_id, end_group_id + 1):
+ flat_tensor = flat_groups_at_rank_i[group_id]
+ start_offset = self.offset - self.flat_groups_offset[group_id]
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
+
+ # collect weights from all ranks
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
+ return param
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
+
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # memory efficient tensor
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
+ state_dict[name] = tensor
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def to_torch_tensor(state_dict, return_empty_tensor=False):
+ """
+ Convert state_dict of GatheredTensor to torch tensor
+ """
+ torch_state_dict = {}
+ converted_tensors = {}
+ for name, tensor in state_dict.items():
+ tensor_id = id(tensor)
+ if tensor_id in converted_tensors: # shared tensors
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
+ torch_state_dict[name] = shared_tensor
+ else:
+ converted_tensors[tensor_id] = name
+ if return_empty_tensor:
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
+ else:
+ torch_state_dict[name] = tensor.contiguous()
+ return torch_state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag=None,
+ exclude_frozen_parameters=False,
+ lazy_mode=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint. Or you can load state_dict in lazy mode ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
+ for name, lazy_tensor in state_dict.item():
+ tensor = lazy_tensor.contiguous() # to cpu
+ print(name, tensor)
+ # del tensor to release memory if it no longer in use
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+ if lazy_mode:
+ return state_dict
+ else:
+ return to_torch_tensor(state_dict)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
+ output_dir,
+ max_shard_size="5GB",
+ safe_serialization=False,
+ tag=None,
+ exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ # Dependency pre-check
+ if safe_serialization:
+ try:
+ from safetensors.torch import save_file
+ except ImportError:
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
+ raise
+ if max_shard_size is not None:
+ try:
+ from huggingface_hub import split_torch_state_dict_into_shards
+ except ImportError:
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
+ raise
+
+ # Convert zero checkpoint to state_dict
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag,
+ exclude_frozen_parameters,
+ lazy_mode=True)
+
+ # Shard the model if it is too big.
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
+ if max_shard_size is not None:
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
+ # an memory-efficient approach for sharding
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
+ filename_pattern=filename_pattern,
+ max_shard_size=max_shard_size)
+ else:
+ from collections import namedtuple
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
+ state_dict_split = StateDictSplit(is_sharded=False,
+ filename_to_tensors={weights_name: list(state_dict.keys())})
+
+ # Save the model by shard
+ os.makedirs(output_dir, exist_ok=True)
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
+ shard_state_dict = to_torch_tensor(shard_state_dict)
+ output_path = os.path.join(output_dir, shard_file)
+ if safe_serialization:
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
+ else:
+ torch.save(shard_state_dict, output_path)
+ # release the memory of current shard
+ for tensor_name in list(shard_state_dict.keys()):
+ del state_dict[tensor_name]
+ del shard_state_dict[tensor_name]
+ del shard_state_dict
+ gc.collect()
+
+ # Save index if sharded
+ if state_dict_split.is_sharded:
+ index = {
+ "metadata": state_dict_split.metadata,
+ "weight_map": state_dict_split.tensor_to_filename,
+ }
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
+ save_index_file = os.path.join(output_dir, save_index_file)
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument("output_dir",
+ type=str,
+ help="directory to the pytorch fp32 state_dict output files"
+ "(e.g. path/checkpoint-12-output/)")
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="5GB",
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
+ "without CPU OOM issues.")
+ parser.add_argument(
+ "--safe_serialization",
+ default=False,
+ action='store_true',
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_dir,
+ max_shard_size=args.max_shard_size,
+ safe_serialization=args.safe_serialization,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-2730/config.json b/checkpoint-2730/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..40aa0a10ec7958e160bf07f2feca405387c8b288
--- /dev/null
+++ b/checkpoint-2730/config.json
@@ -0,0 +1,33 @@
+{
+ "architectures": [
+ "XLMRobertaForSequenceClassification"
+ ],
+ "attention_probs_dropout_prob": 0.1,
+ "bos_token_id": 0,
+ "classifier_dropout": null,
+ "eos_token_id": 2,
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 1024,
+ "id2label": {
+ "0": "LABEL_0"
+ },
+ "initializer_range": 0.02,
+ "intermediate_size": 4096,
+ "label2id": {
+ "LABEL_0": 0
+ },
+ "layer_norm_eps": 1e-05,
+ "max_position_embeddings": 8194,
+ "model_type": "xlm-roberta",
+ "num_attention_heads": 16,
+ "num_hidden_layers": 24,
+ "output_past": true,
+ "pad_token_id": 1,
+ "position_embedding_type": "absolute",
+ "torch_dtype": "float32",
+ "transformers_version": "4.54.0",
+ "type_vocab_size": 1,
+ "use_cache": true,
+ "vocab_size": 250002
+}
diff --git a/checkpoint-2730/global_step2730/mp_rank_00_model_states.pt b/checkpoint-2730/global_step2730/mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..51a9b8a6cd7dae68b4b7d74fa43618256de587ec
--- /dev/null
+++ b/checkpoint-2730/global_step2730/mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01dfd78c328b6cfffae9e8db25f722965a2c0d3413259f38a09ad9879a059ed1
+size 2271151845
diff --git a/checkpoint-2730/global_step2730/zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-2730/global_step2730/zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9a9043e4025dafb14b9372c11293109187bf44cf
--- /dev/null
+++ b/checkpoint-2730/global_step2730/zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9cbc2bc4cee9bd73ec7b851054a6991d982bcf88f6a67875a9644d4be545c36b
+size 3406552447
diff --git a/checkpoint-2730/global_step2730/zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-2730/global_step2730/zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..273dd56a33488e298cd639928bcf236bea561662
--- /dev/null
+++ b/checkpoint-2730/global_step2730/zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7a224b1c6a41ca6220fe6bf9ba0eb99ecb5be7713411fa02d8a81de7f2e19379
+size 3406564543
diff --git a/checkpoint-2730/latest b/checkpoint-2730/latest
new file mode 100644
index 0000000000000000000000000000000000000000..410a427b4e4e73f6f6e16e05a12ba1b4575bbdcb
--- /dev/null
+++ b/checkpoint-2730/latest
@@ -0,0 +1 @@
+global_step2730
\ No newline at end of file
diff --git a/checkpoint-2730/model.safetensors b/checkpoint-2730/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..fd71e1e4e431ea10b421ab4c13b03d587531fd19
--- /dev/null
+++ b/checkpoint-2730/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:86b0d1f9d6d3bf71028e927de9fa9bbbce7c4147712f9e04eebbd0edf10b9f75
+size 2271071852
diff --git a/checkpoint-2730/rng_state_0.pth b/checkpoint-2730/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..a551d1c0ca3f61de06978925d07481204f4ba594
--- /dev/null
+++ b/checkpoint-2730/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:78d685bcde15af839fc70a63a75b5db57c906458dc9945d7df59b215aefff5c4
+size 14917
diff --git a/checkpoint-2730/rng_state_1.pth b/checkpoint-2730/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..a587fd5d6a37ab3e253d27d03b6da8eef5221513
--- /dev/null
+++ b/checkpoint-2730/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:939be86412ae7dce3247f93d760abc0f2540c712ec29463595e9e8750e81faec
+size 14917
diff --git a/checkpoint-2730/scheduler.pt b/checkpoint-2730/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c83972319233979cabb415c7e59a1dc2ed80feb1
--- /dev/null
+++ b/checkpoint-2730/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b44c513c90683704eab687ac2506acafffaad754cd0ef1b1fd1f6e068e29e272
+size 1465
diff --git a/checkpoint-2730/sentencepiece.bpe.model b/checkpoint-2730/sentencepiece.bpe.model
new file mode 100644
index 0000000000000000000000000000000000000000..7a3f40a75f870bc1f21700cd414dc2acc431583c
--- /dev/null
+++ b/checkpoint-2730/sentencepiece.bpe.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
+size 5069051
diff --git a/checkpoint-2730/special_tokens_map.json b/checkpoint-2730/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1879d702821e753ffe4245048eee415d54a9385
--- /dev/null
+++ b/checkpoint-2730/special_tokens_map.json
@@ -0,0 +1,51 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-2730/tokenizer.json b/checkpoint-2730/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..322d084f75a19f4fec0fc0b5f351be9a3dfefa3e
--- /dev/null
+++ b/checkpoint-2730/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50ec628ce274af8429e5aa0c573e737ef2db1c2acd3b2dd51362a33c3a534f99
+size 17082999
diff --git a/checkpoint-2730/tokenizer_config.json b/checkpoint-2730/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..95bd7c849ee6a47d5c92805af18d187239c1ba4a
--- /dev/null
+++ b/checkpoint-2730/tokenizer_config.json
@@ -0,0 +1,56 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "250001": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": true,
+ "cls_token": "",
+ "eos_token": "",
+ "extra_special_tokens": {},
+ "mask_token": "",
+ "model_max_length": 8192,
+ "pad_token": "",
+ "sep_token": "",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "XLMRobertaTokenizer",
+ "unk_token": ""
+}
diff --git a/checkpoint-2730/trainer_state.json b/checkpoint-2730/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..c6bd2ab36eac86c0ddddd9597cbbccd56de8d4cf
--- /dev/null
+++ b/checkpoint-2730/trainer_state.json
@@ -0,0 +1,19144 @@
+{
+ "best_global_step": null,
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 10.0,
+ "eval_steps": 500,
+ "global_step": 2730,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.003663003663003663,
+ "grad_norm": 33.24192428588867,
+ "learning_rate": 0.0,
+ "loss": 0.9555,
+ "step": 1
+ },
+ {
+ "epoch": 0.007326007326007326,
+ "grad_norm": 23.005327224731445,
+ "learning_rate": 2.1978021978021978e-07,
+ "loss": 0.7557,
+ "step": 2
+ },
+ {
+ "epoch": 0.01098901098901099,
+ "grad_norm": 12.516372680664062,
+ "learning_rate": 4.3956043956043957e-07,
+ "loss": 0.2322,
+ "step": 3
+ },
+ {
+ "epoch": 0.014652014652014652,
+ "grad_norm": 22.350322723388672,
+ "learning_rate": 6.593406593406594e-07,
+ "loss": 0.5263,
+ "step": 4
+ },
+ {
+ "epoch": 0.018315018315018316,
+ "grad_norm": 37.14425277709961,
+ "learning_rate": 8.791208791208791e-07,
+ "loss": 0.547,
+ "step": 5
+ },
+ {
+ "epoch": 0.02197802197802198,
+ "grad_norm": 27.73367691040039,
+ "learning_rate": 1.098901098901099e-06,
+ "loss": 0.5922,
+ "step": 6
+ },
+ {
+ "epoch": 0.02564102564102564,
+ "grad_norm": 28.463964462280273,
+ "learning_rate": 1.3186813186813187e-06,
+ "loss": 1.0195,
+ "step": 7
+ },
+ {
+ "epoch": 0.029304029304029304,
+ "grad_norm": 12.688858032226562,
+ "learning_rate": 1.5384615384615385e-06,
+ "loss": 0.1519,
+ "step": 8
+ },
+ {
+ "epoch": 0.03296703296703297,
+ "grad_norm": 24.222930908203125,
+ "learning_rate": 1.7582417582417583e-06,
+ "loss": 0.8008,
+ "step": 9
+ },
+ {
+ "epoch": 0.03663003663003663,
+ "grad_norm": 22.45709800720215,
+ "learning_rate": 1.9780219780219782e-06,
+ "loss": 1.1024,
+ "step": 10
+ },
+ {
+ "epoch": 0.040293040293040296,
+ "grad_norm": 23.01483917236328,
+ "learning_rate": 2.197802197802198e-06,
+ "loss": 0.3072,
+ "step": 11
+ },
+ {
+ "epoch": 0.04395604395604396,
+ "grad_norm": 24.276216506958008,
+ "learning_rate": 2.4175824175824177e-06,
+ "loss": 0.8937,
+ "step": 12
+ },
+ {
+ "epoch": 0.047619047619047616,
+ "grad_norm": 24.501638412475586,
+ "learning_rate": 2.6373626373626375e-06,
+ "loss": 0.3748,
+ "step": 13
+ },
+ {
+ "epoch": 0.05128205128205128,
+ "grad_norm": 11.965837478637695,
+ "learning_rate": 2.8571428571428573e-06,
+ "loss": 0.2221,
+ "step": 14
+ },
+ {
+ "epoch": 0.054945054945054944,
+ "grad_norm": 8.884313583374023,
+ "learning_rate": 3.076923076923077e-06,
+ "loss": 0.1682,
+ "step": 15
+ },
+ {
+ "epoch": 0.05860805860805861,
+ "grad_norm": 13.486218452453613,
+ "learning_rate": 3.2967032967032968e-06,
+ "loss": 0.3324,
+ "step": 16
+ },
+ {
+ "epoch": 0.06227106227106227,
+ "grad_norm": 29.47451400756836,
+ "learning_rate": 3.5164835164835165e-06,
+ "loss": 0.9247,
+ "step": 17
+ },
+ {
+ "epoch": 0.06593406593406594,
+ "grad_norm": 38.8739128112793,
+ "learning_rate": 3.7362637362637363e-06,
+ "loss": 1.3591,
+ "step": 18
+ },
+ {
+ "epoch": 0.0695970695970696,
+ "grad_norm": 24.181066513061523,
+ "learning_rate": 3.9560439560439565e-06,
+ "loss": 0.4257,
+ "step": 19
+ },
+ {
+ "epoch": 0.07326007326007326,
+ "grad_norm": 18.25806427001953,
+ "learning_rate": 4.175824175824176e-06,
+ "loss": 0.3534,
+ "step": 20
+ },
+ {
+ "epoch": 0.07692307692307693,
+ "grad_norm": 4.121458053588867,
+ "learning_rate": 4.395604395604396e-06,
+ "loss": 0.0459,
+ "step": 21
+ },
+ {
+ "epoch": 0.08058608058608059,
+ "grad_norm": 17.89643096923828,
+ "learning_rate": 4.615384615384616e-06,
+ "loss": 0.3707,
+ "step": 22
+ },
+ {
+ "epoch": 0.08424908424908426,
+ "grad_norm": 43.25539016723633,
+ "learning_rate": 4.8351648351648355e-06,
+ "loss": 1.139,
+ "step": 23
+ },
+ {
+ "epoch": 0.08791208791208792,
+ "grad_norm": 19.56612205505371,
+ "learning_rate": 5.054945054945056e-06,
+ "loss": 0.3819,
+ "step": 24
+ },
+ {
+ "epoch": 0.09157509157509157,
+ "grad_norm": 18.20578956604004,
+ "learning_rate": 5.274725274725275e-06,
+ "loss": 0.516,
+ "step": 25
+ },
+ {
+ "epoch": 0.09523809523809523,
+ "grad_norm": 23.16927146911621,
+ "learning_rate": 5.494505494505494e-06,
+ "loss": 0.7161,
+ "step": 26
+ },
+ {
+ "epoch": 0.0989010989010989,
+ "grad_norm": 10.449734687805176,
+ "learning_rate": 5.7142857142857145e-06,
+ "loss": 0.3049,
+ "step": 27
+ },
+ {
+ "epoch": 0.10256410256410256,
+ "grad_norm": 33.13974380493164,
+ "learning_rate": 5.934065934065934e-06,
+ "loss": 1.0178,
+ "step": 28
+ },
+ {
+ "epoch": 0.10622710622710622,
+ "grad_norm": 34.373470306396484,
+ "learning_rate": 6.153846153846154e-06,
+ "loss": 1.0162,
+ "step": 29
+ },
+ {
+ "epoch": 0.10989010989010989,
+ "grad_norm": 22.710988998413086,
+ "learning_rate": 6.373626373626373e-06,
+ "loss": 0.5866,
+ "step": 30
+ },
+ {
+ "epoch": 0.11355311355311355,
+ "grad_norm": 23.314502716064453,
+ "learning_rate": 6.5934065934065935e-06,
+ "loss": 0.6159,
+ "step": 31
+ },
+ {
+ "epoch": 0.11721611721611722,
+ "grad_norm": 23.481319427490234,
+ "learning_rate": 6.813186813186814e-06,
+ "loss": 0.5441,
+ "step": 32
+ },
+ {
+ "epoch": 0.12087912087912088,
+ "grad_norm": 35.16271209716797,
+ "learning_rate": 7.032967032967033e-06,
+ "loss": 0.9091,
+ "step": 33
+ },
+ {
+ "epoch": 0.12454212454212454,
+ "grad_norm": 32.2298698425293,
+ "learning_rate": 7.252747252747253e-06,
+ "loss": 0.5156,
+ "step": 34
+ },
+ {
+ "epoch": 0.1282051282051282,
+ "grad_norm": 36.708953857421875,
+ "learning_rate": 7.4725274725274726e-06,
+ "loss": 1.5839,
+ "step": 35
+ },
+ {
+ "epoch": 0.13186813186813187,
+ "grad_norm": 34.64887619018555,
+ "learning_rate": 7.692307692307692e-06,
+ "loss": 1.2861,
+ "step": 36
+ },
+ {
+ "epoch": 0.13553113553113552,
+ "grad_norm": 20.94220733642578,
+ "learning_rate": 7.912087912087913e-06,
+ "loss": 0.5027,
+ "step": 37
+ },
+ {
+ "epoch": 0.1391941391941392,
+ "grad_norm": 30.93832015991211,
+ "learning_rate": 8.131868131868132e-06,
+ "loss": 0.3584,
+ "step": 38
+ },
+ {
+ "epoch": 0.14285714285714285,
+ "grad_norm": 19.195362091064453,
+ "learning_rate": 8.351648351648352e-06,
+ "loss": 0.6912,
+ "step": 39
+ },
+ {
+ "epoch": 0.14652014652014653,
+ "grad_norm": 21.054162979125977,
+ "learning_rate": 8.571428571428571e-06,
+ "loss": 0.8027,
+ "step": 40
+ },
+ {
+ "epoch": 0.15018315018315018,
+ "grad_norm": 16.64535903930664,
+ "learning_rate": 8.791208791208792e-06,
+ "loss": 0.3004,
+ "step": 41
+ },
+ {
+ "epoch": 0.15384615384615385,
+ "grad_norm": 12.1064453125,
+ "learning_rate": 9.010989010989011e-06,
+ "loss": 0.2158,
+ "step": 42
+ },
+ {
+ "epoch": 0.1575091575091575,
+ "grad_norm": 16.20220947265625,
+ "learning_rate": 9.230769230769232e-06,
+ "loss": 0.4137,
+ "step": 43
+ },
+ {
+ "epoch": 0.16117216117216118,
+ "grad_norm": 25.698654174804688,
+ "learning_rate": 9.45054945054945e-06,
+ "loss": 0.7716,
+ "step": 44
+ },
+ {
+ "epoch": 0.16483516483516483,
+ "grad_norm": 7.480422019958496,
+ "learning_rate": 9.670329670329671e-06,
+ "loss": 0.1046,
+ "step": 45
+ },
+ {
+ "epoch": 0.1684981684981685,
+ "grad_norm": 38.25539016723633,
+ "learning_rate": 9.89010989010989e-06,
+ "loss": 1.3913,
+ "step": 46
+ },
+ {
+ "epoch": 0.17216117216117216,
+ "grad_norm": 24.113954544067383,
+ "learning_rate": 1.0109890109890111e-05,
+ "loss": 0.4632,
+ "step": 47
+ },
+ {
+ "epoch": 0.17582417582417584,
+ "grad_norm": 22.136140823364258,
+ "learning_rate": 1.032967032967033e-05,
+ "loss": 0.6634,
+ "step": 48
+ },
+ {
+ "epoch": 0.1794871794871795,
+ "grad_norm": 19.417444229125977,
+ "learning_rate": 1.054945054945055e-05,
+ "loss": 0.3991,
+ "step": 49
+ },
+ {
+ "epoch": 0.18315018315018314,
+ "grad_norm": 13.265430450439453,
+ "learning_rate": 1.076923076923077e-05,
+ "loss": 0.2613,
+ "step": 50
+ },
+ {
+ "epoch": 0.18681318681318682,
+ "grad_norm": 25.118703842163086,
+ "learning_rate": 1.0989010989010989e-05,
+ "loss": 0.9231,
+ "step": 51
+ },
+ {
+ "epoch": 0.19047619047619047,
+ "grad_norm": 34.06997299194336,
+ "learning_rate": 1.120879120879121e-05,
+ "loss": 1.5809,
+ "step": 52
+ },
+ {
+ "epoch": 0.19413919413919414,
+ "grad_norm": 40.32486343383789,
+ "learning_rate": 1.1428571428571429e-05,
+ "loss": 1.4601,
+ "step": 53
+ },
+ {
+ "epoch": 0.1978021978021978,
+ "grad_norm": 18.847017288208008,
+ "learning_rate": 1.1648351648351648e-05,
+ "loss": 0.2345,
+ "step": 54
+ },
+ {
+ "epoch": 0.20146520146520147,
+ "grad_norm": 37.98270034790039,
+ "learning_rate": 1.1868131868131868e-05,
+ "loss": 0.9792,
+ "step": 55
+ },
+ {
+ "epoch": 0.20512820512820512,
+ "grad_norm": 35.72782897949219,
+ "learning_rate": 1.2087912087912089e-05,
+ "loss": 1.1561,
+ "step": 56
+ },
+ {
+ "epoch": 0.2087912087912088,
+ "grad_norm": 18.577186584472656,
+ "learning_rate": 1.2307692307692308e-05,
+ "loss": 0.5577,
+ "step": 57
+ },
+ {
+ "epoch": 0.21245421245421245,
+ "grad_norm": 23.086456298828125,
+ "learning_rate": 1.2527472527472529e-05,
+ "loss": 0.5807,
+ "step": 58
+ },
+ {
+ "epoch": 0.21611721611721613,
+ "grad_norm": 20.053525924682617,
+ "learning_rate": 1.2747252747252747e-05,
+ "loss": 0.7024,
+ "step": 59
+ },
+ {
+ "epoch": 0.21978021978021978,
+ "grad_norm": 22.25934410095215,
+ "learning_rate": 1.2967032967032968e-05,
+ "loss": 1.1033,
+ "step": 60
+ },
+ {
+ "epoch": 0.22344322344322345,
+ "grad_norm": 17.981454849243164,
+ "learning_rate": 1.3186813186813187e-05,
+ "loss": 0.2774,
+ "step": 61
+ },
+ {
+ "epoch": 0.2271062271062271,
+ "grad_norm": 11.286524772644043,
+ "learning_rate": 1.3406593406593408e-05,
+ "loss": 0.1802,
+ "step": 62
+ },
+ {
+ "epoch": 0.23076923076923078,
+ "grad_norm": 25.822996139526367,
+ "learning_rate": 1.3626373626373627e-05,
+ "loss": 0.651,
+ "step": 63
+ },
+ {
+ "epoch": 0.23443223443223443,
+ "grad_norm": 16.457286834716797,
+ "learning_rate": 1.3846153846153847e-05,
+ "loss": 0.2946,
+ "step": 64
+ },
+ {
+ "epoch": 0.23809523809523808,
+ "grad_norm": 26.712799072265625,
+ "learning_rate": 1.4065934065934066e-05,
+ "loss": 0.7763,
+ "step": 65
+ },
+ {
+ "epoch": 0.24175824175824176,
+ "grad_norm": 21.4671630859375,
+ "learning_rate": 1.4285714285714285e-05,
+ "loss": 0.4132,
+ "step": 66
+ },
+ {
+ "epoch": 0.2454212454212454,
+ "grad_norm": 21.834922790527344,
+ "learning_rate": 1.4505494505494506e-05,
+ "loss": 0.6544,
+ "step": 67
+ },
+ {
+ "epoch": 0.2490842490842491,
+ "grad_norm": 15.396453857421875,
+ "learning_rate": 1.4725274725274726e-05,
+ "loss": 0.2426,
+ "step": 68
+ },
+ {
+ "epoch": 0.25274725274725274,
+ "grad_norm": 8.851480484008789,
+ "learning_rate": 1.4945054945054945e-05,
+ "loss": 0.125,
+ "step": 69
+ },
+ {
+ "epoch": 0.2564102564102564,
+ "grad_norm": 22.21581268310547,
+ "learning_rate": 1.5164835164835164e-05,
+ "loss": 0.2585,
+ "step": 70
+ },
+ {
+ "epoch": 0.2600732600732601,
+ "grad_norm": 23.589736938476562,
+ "learning_rate": 1.5384615384615384e-05,
+ "loss": 0.386,
+ "step": 71
+ },
+ {
+ "epoch": 0.26373626373626374,
+ "grad_norm": 51.82280731201172,
+ "learning_rate": 1.5604395604395605e-05,
+ "loss": 1.1802,
+ "step": 72
+ },
+ {
+ "epoch": 0.2673992673992674,
+ "grad_norm": 36.43033981323242,
+ "learning_rate": 1.5824175824175826e-05,
+ "loss": 0.5574,
+ "step": 73
+ },
+ {
+ "epoch": 0.27106227106227104,
+ "grad_norm": 46.151885986328125,
+ "learning_rate": 1.6043956043956043e-05,
+ "loss": 0.9113,
+ "step": 74
+ },
+ {
+ "epoch": 0.27472527472527475,
+ "grad_norm": 34.090213775634766,
+ "learning_rate": 1.6263736263736265e-05,
+ "loss": 1.2161,
+ "step": 75
+ },
+ {
+ "epoch": 0.2783882783882784,
+ "grad_norm": 15.469125747680664,
+ "learning_rate": 1.6483516483516486e-05,
+ "loss": 0.1833,
+ "step": 76
+ },
+ {
+ "epoch": 0.28205128205128205,
+ "grad_norm": 26.77261734008789,
+ "learning_rate": 1.6703296703296703e-05,
+ "loss": 0.4095,
+ "step": 77
+ },
+ {
+ "epoch": 0.2857142857142857,
+ "grad_norm": 8.46114444732666,
+ "learning_rate": 1.6923076923076924e-05,
+ "loss": 0.0724,
+ "step": 78
+ },
+ {
+ "epoch": 0.2893772893772894,
+ "grad_norm": 7.954617500305176,
+ "learning_rate": 1.7142857142857142e-05,
+ "loss": 0.057,
+ "step": 79
+ },
+ {
+ "epoch": 0.29304029304029305,
+ "grad_norm": 32.47618103027344,
+ "learning_rate": 1.7362637362637366e-05,
+ "loss": 0.8099,
+ "step": 80
+ },
+ {
+ "epoch": 0.2967032967032967,
+ "grad_norm": 34.506927490234375,
+ "learning_rate": 1.7582417582417584e-05,
+ "loss": 0.5867,
+ "step": 81
+ },
+ {
+ "epoch": 0.30036630036630035,
+ "grad_norm": 18.276355743408203,
+ "learning_rate": 1.78021978021978e-05,
+ "loss": 0.4387,
+ "step": 82
+ },
+ {
+ "epoch": 0.304029304029304,
+ "grad_norm": 35.61729431152344,
+ "learning_rate": 1.8021978021978023e-05,
+ "loss": 0.9711,
+ "step": 83
+ },
+ {
+ "epoch": 0.3076923076923077,
+ "grad_norm": 14.001388549804688,
+ "learning_rate": 1.824175824175824e-05,
+ "loss": 0.1431,
+ "step": 84
+ },
+ {
+ "epoch": 0.31135531135531136,
+ "grad_norm": 27.521188735961914,
+ "learning_rate": 1.8461538461538465e-05,
+ "loss": 0.3686,
+ "step": 85
+ },
+ {
+ "epoch": 0.315018315018315,
+ "grad_norm": 38.0133171081543,
+ "learning_rate": 1.8681318681318682e-05,
+ "loss": 1.3866,
+ "step": 86
+ },
+ {
+ "epoch": 0.31868131868131866,
+ "grad_norm": 30.895553588867188,
+ "learning_rate": 1.89010989010989e-05,
+ "loss": 0.6676,
+ "step": 87
+ },
+ {
+ "epoch": 0.32234432234432236,
+ "grad_norm": 26.165082931518555,
+ "learning_rate": 1.912087912087912e-05,
+ "loss": 0.4763,
+ "step": 88
+ },
+ {
+ "epoch": 0.326007326007326,
+ "grad_norm": 25.6451473236084,
+ "learning_rate": 1.9340659340659342e-05,
+ "loss": 0.6921,
+ "step": 89
+ },
+ {
+ "epoch": 0.32967032967032966,
+ "grad_norm": 31.52683448791504,
+ "learning_rate": 1.9560439560439563e-05,
+ "loss": 0.8449,
+ "step": 90
+ },
+ {
+ "epoch": 0.3333333333333333,
+ "grad_norm": 27.559072494506836,
+ "learning_rate": 1.978021978021978e-05,
+ "loss": 0.9726,
+ "step": 91
+ },
+ {
+ "epoch": 0.336996336996337,
+ "grad_norm": 38.23103713989258,
+ "learning_rate": 1.9999999999999998e-05,
+ "loss": 0.2568,
+ "step": 92
+ },
+ {
+ "epoch": 0.34065934065934067,
+ "grad_norm": 28.575313568115234,
+ "learning_rate": 2.0219780219780223e-05,
+ "loss": 0.7039,
+ "step": 93
+ },
+ {
+ "epoch": 0.3443223443223443,
+ "grad_norm": 31.54847526550293,
+ "learning_rate": 2.043956043956044e-05,
+ "loss": 0.835,
+ "step": 94
+ },
+ {
+ "epoch": 0.34798534798534797,
+ "grad_norm": 34.27505111694336,
+ "learning_rate": 2.065934065934066e-05,
+ "loss": 1.0304,
+ "step": 95
+ },
+ {
+ "epoch": 0.3516483516483517,
+ "grad_norm": 23.972553253173828,
+ "learning_rate": 2.087912087912088e-05,
+ "loss": 0.775,
+ "step": 96
+ },
+ {
+ "epoch": 0.3553113553113553,
+ "grad_norm": 18.46526527404785,
+ "learning_rate": 2.10989010989011e-05,
+ "loss": 0.2856,
+ "step": 97
+ },
+ {
+ "epoch": 0.358974358974359,
+ "grad_norm": 22.087251663208008,
+ "learning_rate": 2.131868131868132e-05,
+ "loss": 0.6849,
+ "step": 98
+ },
+ {
+ "epoch": 0.3626373626373626,
+ "grad_norm": 13.144533157348633,
+ "learning_rate": 2.153846153846154e-05,
+ "loss": 0.2766,
+ "step": 99
+ },
+ {
+ "epoch": 0.3663003663003663,
+ "grad_norm": 14.740280151367188,
+ "learning_rate": 2.175824175824176e-05,
+ "loss": 0.27,
+ "step": 100
+ },
+ {
+ "epoch": 0.36996336996337,
+ "grad_norm": 17.15272331237793,
+ "learning_rate": 2.1978021978021977e-05,
+ "loss": 0.446,
+ "step": 101
+ },
+ {
+ "epoch": 0.37362637362637363,
+ "grad_norm": 45.865509033203125,
+ "learning_rate": 2.21978021978022e-05,
+ "loss": 2.4265,
+ "step": 102
+ },
+ {
+ "epoch": 0.3772893772893773,
+ "grad_norm": 22.298274993896484,
+ "learning_rate": 2.241758241758242e-05,
+ "loss": 1.5021,
+ "step": 103
+ },
+ {
+ "epoch": 0.38095238095238093,
+ "grad_norm": 20.314172744750977,
+ "learning_rate": 2.2637362637362637e-05,
+ "loss": 0.508,
+ "step": 104
+ },
+ {
+ "epoch": 0.38461538461538464,
+ "grad_norm": 11.217910766601562,
+ "learning_rate": 2.2857142857142858e-05,
+ "loss": 0.2282,
+ "step": 105
+ },
+ {
+ "epoch": 0.3882783882783883,
+ "grad_norm": 21.36184310913086,
+ "learning_rate": 2.307692307692308e-05,
+ "loss": 0.4684,
+ "step": 106
+ },
+ {
+ "epoch": 0.39194139194139194,
+ "grad_norm": 12.759861946105957,
+ "learning_rate": 2.3296703296703297e-05,
+ "loss": 0.3076,
+ "step": 107
+ },
+ {
+ "epoch": 0.3956043956043956,
+ "grad_norm": 24.42287254333496,
+ "learning_rate": 2.3516483516483518e-05,
+ "loss": 1.3607,
+ "step": 108
+ },
+ {
+ "epoch": 0.3992673992673993,
+ "grad_norm": 13.014902114868164,
+ "learning_rate": 2.3736263736263735e-05,
+ "loss": 0.4984,
+ "step": 109
+ },
+ {
+ "epoch": 0.40293040293040294,
+ "grad_norm": 12.8681640625,
+ "learning_rate": 2.395604395604396e-05,
+ "loss": 0.4529,
+ "step": 110
+ },
+ {
+ "epoch": 0.4065934065934066,
+ "grad_norm": 21.19939422607422,
+ "learning_rate": 2.4175824175824177e-05,
+ "loss": 1.0197,
+ "step": 111
+ },
+ {
+ "epoch": 0.41025641025641024,
+ "grad_norm": 20.60430145263672,
+ "learning_rate": 2.4395604395604395e-05,
+ "loss": 0.5367,
+ "step": 112
+ },
+ {
+ "epoch": 0.4139194139194139,
+ "grad_norm": 34.49782943725586,
+ "learning_rate": 2.4615384615384616e-05,
+ "loss": 1.9045,
+ "step": 113
+ },
+ {
+ "epoch": 0.4175824175824176,
+ "grad_norm": 28.380966186523438,
+ "learning_rate": 2.4835164835164834e-05,
+ "loss": 0.9019,
+ "step": 114
+ },
+ {
+ "epoch": 0.42124542124542125,
+ "grad_norm": 18.234045028686523,
+ "learning_rate": 2.5054945054945058e-05,
+ "loss": 0.5529,
+ "step": 115
+ },
+ {
+ "epoch": 0.4249084249084249,
+ "grad_norm": 18.759784698486328,
+ "learning_rate": 2.5274725274725276e-05,
+ "loss": 0.85,
+ "step": 116
+ },
+ {
+ "epoch": 0.42857142857142855,
+ "grad_norm": 15.784387588500977,
+ "learning_rate": 2.5494505494505493e-05,
+ "loss": 0.429,
+ "step": 117
+ },
+ {
+ "epoch": 0.43223443223443225,
+ "grad_norm": 23.149036407470703,
+ "learning_rate": 2.5714285714285714e-05,
+ "loss": 0.8784,
+ "step": 118
+ },
+ {
+ "epoch": 0.4358974358974359,
+ "grad_norm": 18.77080535888672,
+ "learning_rate": 2.5934065934065935e-05,
+ "loss": 0.537,
+ "step": 119
+ },
+ {
+ "epoch": 0.43956043956043955,
+ "grad_norm": 24.311708450317383,
+ "learning_rate": 2.6153846153846157e-05,
+ "loss": 0.74,
+ "step": 120
+ },
+ {
+ "epoch": 0.4432234432234432,
+ "grad_norm": 15.09874439239502,
+ "learning_rate": 2.6373626373626374e-05,
+ "loss": 0.2978,
+ "step": 121
+ },
+ {
+ "epoch": 0.4468864468864469,
+ "grad_norm": 19.65829086303711,
+ "learning_rate": 2.6593406593406592e-05,
+ "loss": 0.8287,
+ "step": 122
+ },
+ {
+ "epoch": 0.45054945054945056,
+ "grad_norm": 21.237165451049805,
+ "learning_rate": 2.6813186813186816e-05,
+ "loss": 1.1967,
+ "step": 123
+ },
+ {
+ "epoch": 0.4542124542124542,
+ "grad_norm": 25.737913131713867,
+ "learning_rate": 2.7032967032967034e-05,
+ "loss": 0.9414,
+ "step": 124
+ },
+ {
+ "epoch": 0.45787545787545786,
+ "grad_norm": 22.84954833984375,
+ "learning_rate": 2.7252747252747255e-05,
+ "loss": 0.398,
+ "step": 125
+ },
+ {
+ "epoch": 0.46153846153846156,
+ "grad_norm": 35.505027770996094,
+ "learning_rate": 2.7472527472527473e-05,
+ "loss": 1.0497,
+ "step": 126
+ },
+ {
+ "epoch": 0.4652014652014652,
+ "grad_norm": 6.610748291015625,
+ "learning_rate": 2.7692307692307694e-05,
+ "loss": 0.0491,
+ "step": 127
+ },
+ {
+ "epoch": 0.46886446886446886,
+ "grad_norm": 33.34388732910156,
+ "learning_rate": 2.7912087912087915e-05,
+ "loss": 0.8991,
+ "step": 128
+ },
+ {
+ "epoch": 0.4725274725274725,
+ "grad_norm": 17.098581314086914,
+ "learning_rate": 2.8131868131868132e-05,
+ "loss": 0.3217,
+ "step": 129
+ },
+ {
+ "epoch": 0.47619047619047616,
+ "grad_norm": 11.438309669494629,
+ "learning_rate": 2.8351648351648353e-05,
+ "loss": 0.4301,
+ "step": 130
+ },
+ {
+ "epoch": 0.47985347985347987,
+ "grad_norm": 25.803213119506836,
+ "learning_rate": 2.857142857142857e-05,
+ "loss": 0.8937,
+ "step": 131
+ },
+ {
+ "epoch": 0.4835164835164835,
+ "grad_norm": 16.61037826538086,
+ "learning_rate": 2.8791208791208792e-05,
+ "loss": 0.3603,
+ "step": 132
+ },
+ {
+ "epoch": 0.48717948717948717,
+ "grad_norm": 21.329975128173828,
+ "learning_rate": 2.9010989010989013e-05,
+ "loss": 0.4332,
+ "step": 133
+ },
+ {
+ "epoch": 0.4908424908424908,
+ "grad_norm": 24.83706283569336,
+ "learning_rate": 2.923076923076923e-05,
+ "loss": 0.3967,
+ "step": 134
+ },
+ {
+ "epoch": 0.4945054945054945,
+ "grad_norm": 8.3758544921875,
+ "learning_rate": 2.945054945054945e-05,
+ "loss": 0.1197,
+ "step": 135
+ },
+ {
+ "epoch": 0.4981684981684982,
+ "grad_norm": 31.096702575683594,
+ "learning_rate": 2.9670329670329673e-05,
+ "loss": 2.2867,
+ "step": 136
+ },
+ {
+ "epoch": 0.5018315018315018,
+ "grad_norm": 17.094390869140625,
+ "learning_rate": 2.989010989010989e-05,
+ "loss": 0.3064,
+ "step": 137
+ },
+ {
+ "epoch": 0.5054945054945055,
+ "grad_norm": 23.401243209838867,
+ "learning_rate": 3.010989010989011e-05,
+ "loss": 0.9779,
+ "step": 138
+ },
+ {
+ "epoch": 0.5091575091575091,
+ "grad_norm": 19.55811309814453,
+ "learning_rate": 3.032967032967033e-05,
+ "loss": 0.5665,
+ "step": 139
+ },
+ {
+ "epoch": 0.5128205128205128,
+ "grad_norm": 18.668622970581055,
+ "learning_rate": 3.0549450549450547e-05,
+ "loss": 0.7068,
+ "step": 140
+ },
+ {
+ "epoch": 0.5164835164835165,
+ "grad_norm": 9.49342155456543,
+ "learning_rate": 3.076923076923077e-05,
+ "loss": 0.2228,
+ "step": 141
+ },
+ {
+ "epoch": 0.5201465201465202,
+ "grad_norm": 17.131006240844727,
+ "learning_rate": 3.0989010989010995e-05,
+ "loss": 0.8947,
+ "step": 142
+ },
+ {
+ "epoch": 0.5238095238095238,
+ "grad_norm": 14.087484359741211,
+ "learning_rate": 3.120879120879121e-05,
+ "loss": 0.4394,
+ "step": 143
+ },
+ {
+ "epoch": 0.5274725274725275,
+ "grad_norm": 14.246976852416992,
+ "learning_rate": 3.142857142857143e-05,
+ "loss": 0.7608,
+ "step": 144
+ },
+ {
+ "epoch": 0.5311355311355311,
+ "grad_norm": 27.454071044921875,
+ "learning_rate": 3.164835164835165e-05,
+ "loss": 1.8982,
+ "step": 145
+ },
+ {
+ "epoch": 0.5347985347985348,
+ "grad_norm": 8.580923080444336,
+ "learning_rate": 3.1868131868131866e-05,
+ "loss": 0.2199,
+ "step": 146
+ },
+ {
+ "epoch": 0.5384615384615384,
+ "grad_norm": 12.200552940368652,
+ "learning_rate": 3.208791208791209e-05,
+ "loss": 0.4007,
+ "step": 147
+ },
+ {
+ "epoch": 0.5421245421245421,
+ "grad_norm": 11.350752830505371,
+ "learning_rate": 3.230769230769231e-05,
+ "loss": 0.5359,
+ "step": 148
+ },
+ {
+ "epoch": 0.5457875457875457,
+ "grad_norm": 21.45020866394043,
+ "learning_rate": 3.252747252747253e-05,
+ "loss": 1.4639,
+ "step": 149
+ },
+ {
+ "epoch": 0.5494505494505495,
+ "grad_norm": 29.84933090209961,
+ "learning_rate": 3.274725274725274e-05,
+ "loss": 0.8764,
+ "step": 150
+ },
+ {
+ "epoch": 0.5531135531135531,
+ "grad_norm": 14.899048805236816,
+ "learning_rate": 3.296703296703297e-05,
+ "loss": 0.3817,
+ "step": 151
+ },
+ {
+ "epoch": 0.5567765567765568,
+ "grad_norm": 14.95295238494873,
+ "learning_rate": 3.318681318681319e-05,
+ "loss": 1.0153,
+ "step": 152
+ },
+ {
+ "epoch": 0.5604395604395604,
+ "grad_norm": 13.904314994812012,
+ "learning_rate": 3.3406593406593406e-05,
+ "loss": 0.9891,
+ "step": 153
+ },
+ {
+ "epoch": 0.5641025641025641,
+ "grad_norm": 14.465546607971191,
+ "learning_rate": 3.362637362637363e-05,
+ "loss": 0.4935,
+ "step": 154
+ },
+ {
+ "epoch": 0.5677655677655677,
+ "grad_norm": 15.22211742401123,
+ "learning_rate": 3.384615384615385e-05,
+ "loss": 0.4973,
+ "step": 155
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 19.977941513061523,
+ "learning_rate": 3.406593406593406e-05,
+ "loss": 0.5768,
+ "step": 156
+ },
+ {
+ "epoch": 0.575091575091575,
+ "grad_norm": 21.778785705566406,
+ "learning_rate": 3.4285714285714284e-05,
+ "loss": 0.541,
+ "step": 157
+ },
+ {
+ "epoch": 0.5787545787545788,
+ "grad_norm": 7.957052707672119,
+ "learning_rate": 3.4505494505494505e-05,
+ "loss": 0.1676,
+ "step": 158
+ },
+ {
+ "epoch": 0.5824175824175825,
+ "grad_norm": 10.105476379394531,
+ "learning_rate": 3.472527472527473e-05,
+ "loss": 0.14,
+ "step": 159
+ },
+ {
+ "epoch": 0.5860805860805861,
+ "grad_norm": 13.895249366760254,
+ "learning_rate": 3.494505494505495e-05,
+ "loss": 0.2135,
+ "step": 160
+ },
+ {
+ "epoch": 0.5897435897435898,
+ "grad_norm": 15.14104175567627,
+ "learning_rate": 3.516483516483517e-05,
+ "loss": 0.2299,
+ "step": 161
+ },
+ {
+ "epoch": 0.5934065934065934,
+ "grad_norm": 27.537504196166992,
+ "learning_rate": 3.538461538461539e-05,
+ "loss": 0.4517,
+ "step": 162
+ },
+ {
+ "epoch": 0.5970695970695971,
+ "grad_norm": 22.290597915649414,
+ "learning_rate": 3.56043956043956e-05,
+ "loss": 0.2144,
+ "step": 163
+ },
+ {
+ "epoch": 0.6007326007326007,
+ "grad_norm": 24.176603317260742,
+ "learning_rate": 3.5824175824175824e-05,
+ "loss": 0.4184,
+ "step": 164
+ },
+ {
+ "epoch": 0.6043956043956044,
+ "grad_norm": 43.716552734375,
+ "learning_rate": 3.6043956043956045e-05,
+ "loss": 0.7672,
+ "step": 165
+ },
+ {
+ "epoch": 0.608058608058608,
+ "grad_norm": 5.516793727874756,
+ "learning_rate": 3.626373626373626e-05,
+ "loss": 0.0332,
+ "step": 166
+ },
+ {
+ "epoch": 0.6117216117216118,
+ "grad_norm": 13.202600479125977,
+ "learning_rate": 3.648351648351648e-05,
+ "loss": 0.1388,
+ "step": 167
+ },
+ {
+ "epoch": 0.6153846153846154,
+ "grad_norm": 8.389626502990723,
+ "learning_rate": 3.670329670329671e-05,
+ "loss": 0.0284,
+ "step": 168
+ },
+ {
+ "epoch": 0.6190476190476191,
+ "grad_norm": 11.500190734863281,
+ "learning_rate": 3.692307692307693e-05,
+ "loss": 0.1778,
+ "step": 169
+ },
+ {
+ "epoch": 0.6227106227106227,
+ "grad_norm": 49.76407241821289,
+ "learning_rate": 3.7142857142857143e-05,
+ "loss": 0.8075,
+ "step": 170
+ },
+ {
+ "epoch": 0.6263736263736264,
+ "grad_norm": 49.758705139160156,
+ "learning_rate": 3.7362637362637365e-05,
+ "loss": 1.3106,
+ "step": 171
+ },
+ {
+ "epoch": 0.63003663003663,
+ "grad_norm": 7.655544281005859,
+ "learning_rate": 3.7582417582417586e-05,
+ "loss": 0.1362,
+ "step": 172
+ },
+ {
+ "epoch": 0.6336996336996337,
+ "grad_norm": 29.778133392333984,
+ "learning_rate": 3.78021978021978e-05,
+ "loss": 0.2411,
+ "step": 173
+ },
+ {
+ "epoch": 0.6373626373626373,
+ "grad_norm": 23.79543113708496,
+ "learning_rate": 3.802197802197802e-05,
+ "loss": 0.5665,
+ "step": 174
+ },
+ {
+ "epoch": 0.6410256410256411,
+ "grad_norm": 25.333166122436523,
+ "learning_rate": 3.824175824175824e-05,
+ "loss": 0.5821,
+ "step": 175
+ },
+ {
+ "epoch": 0.6446886446886447,
+ "grad_norm": 38.367759704589844,
+ "learning_rate": 3.846153846153846e-05,
+ "loss": 1.1098,
+ "step": 176
+ },
+ {
+ "epoch": 0.6483516483516484,
+ "grad_norm": 31.53361701965332,
+ "learning_rate": 3.8681318681318684e-05,
+ "loss": 1.5399,
+ "step": 177
+ },
+ {
+ "epoch": 0.652014652014652,
+ "grad_norm": 8.453901290893555,
+ "learning_rate": 3.8901098901098905e-05,
+ "loss": 0.1327,
+ "step": 178
+ },
+ {
+ "epoch": 0.6556776556776557,
+ "grad_norm": 32.465980529785156,
+ "learning_rate": 3.9120879120879126e-05,
+ "loss": 0.8133,
+ "step": 179
+ },
+ {
+ "epoch": 0.6593406593406593,
+ "grad_norm": 21.503114700317383,
+ "learning_rate": 3.934065934065934e-05,
+ "loss": 0.2472,
+ "step": 180
+ },
+ {
+ "epoch": 0.663003663003663,
+ "grad_norm": 28.240659713745117,
+ "learning_rate": 3.956043956043956e-05,
+ "loss": 0.4718,
+ "step": 181
+ },
+ {
+ "epoch": 0.6666666666666666,
+ "grad_norm": 6.919331073760986,
+ "learning_rate": 3.978021978021978e-05,
+ "loss": 0.0947,
+ "step": 182
+ },
+ {
+ "epoch": 0.6703296703296703,
+ "grad_norm": 20.96783447265625,
+ "learning_rate": 3.9999999999999996e-05,
+ "loss": 1.1602,
+ "step": 183
+ },
+ {
+ "epoch": 0.673992673992674,
+ "grad_norm": 17.967914581298828,
+ "learning_rate": 4.021978021978022e-05,
+ "loss": 0.3684,
+ "step": 184
+ },
+ {
+ "epoch": 0.6776556776556777,
+ "grad_norm": 29.837678909301758,
+ "learning_rate": 4.0439560439560445e-05,
+ "loss": 0.5452,
+ "step": 185
+ },
+ {
+ "epoch": 0.6813186813186813,
+ "grad_norm": 37.0803108215332,
+ "learning_rate": 4.065934065934066e-05,
+ "loss": 0.5983,
+ "step": 186
+ },
+ {
+ "epoch": 0.684981684981685,
+ "grad_norm": 23.339448928833008,
+ "learning_rate": 4.087912087912088e-05,
+ "loss": 0.6255,
+ "step": 187
+ },
+ {
+ "epoch": 0.6886446886446886,
+ "grad_norm": 13.779767036437988,
+ "learning_rate": 4.10989010989011e-05,
+ "loss": 0.3705,
+ "step": 188
+ },
+ {
+ "epoch": 0.6923076923076923,
+ "grad_norm": 15.792436599731445,
+ "learning_rate": 4.131868131868132e-05,
+ "loss": 0.4128,
+ "step": 189
+ },
+ {
+ "epoch": 0.6959706959706959,
+ "grad_norm": 14.106623649597168,
+ "learning_rate": 4.153846153846154e-05,
+ "loss": 0.2914,
+ "step": 190
+ },
+ {
+ "epoch": 0.6996336996336996,
+ "grad_norm": 34.428951263427734,
+ "learning_rate": 4.175824175824176e-05,
+ "loss": 1.2232,
+ "step": 191
+ },
+ {
+ "epoch": 0.7032967032967034,
+ "grad_norm": 15.847033500671387,
+ "learning_rate": 4.197802197802198e-05,
+ "loss": 0.4129,
+ "step": 192
+ },
+ {
+ "epoch": 0.706959706959707,
+ "grad_norm": 17.834794998168945,
+ "learning_rate": 4.21978021978022e-05,
+ "loss": 0.4158,
+ "step": 193
+ },
+ {
+ "epoch": 0.7106227106227107,
+ "grad_norm": 29.807823181152344,
+ "learning_rate": 4.241758241758242e-05,
+ "loss": 0.9741,
+ "step": 194
+ },
+ {
+ "epoch": 0.7142857142857143,
+ "grad_norm": 15.9482421875,
+ "learning_rate": 4.263736263736264e-05,
+ "loss": 0.1953,
+ "step": 195
+ },
+ {
+ "epoch": 0.717948717948718,
+ "grad_norm": 37.89487075805664,
+ "learning_rate": 4.2857142857142856e-05,
+ "loss": 1.1018,
+ "step": 196
+ },
+ {
+ "epoch": 0.7216117216117216,
+ "grad_norm": 24.060779571533203,
+ "learning_rate": 4.307692307692308e-05,
+ "loss": 0.4774,
+ "step": 197
+ },
+ {
+ "epoch": 0.7252747252747253,
+ "grad_norm": 18.701725006103516,
+ "learning_rate": 4.32967032967033e-05,
+ "loss": 0.2641,
+ "step": 198
+ },
+ {
+ "epoch": 0.7289377289377289,
+ "grad_norm": 32.18348693847656,
+ "learning_rate": 4.351648351648352e-05,
+ "loss": 0.6958,
+ "step": 199
+ },
+ {
+ "epoch": 0.7326007326007326,
+ "grad_norm": 16.504337310791016,
+ "learning_rate": 4.3736263736263734e-05,
+ "loss": 0.1933,
+ "step": 200
+ },
+ {
+ "epoch": 0.7362637362637363,
+ "grad_norm": 34.5928840637207,
+ "learning_rate": 4.3956043956043955e-05,
+ "loss": 0.3712,
+ "step": 201
+ },
+ {
+ "epoch": 0.73992673992674,
+ "grad_norm": 47.998512268066406,
+ "learning_rate": 4.417582417582418e-05,
+ "loss": 1.4578,
+ "step": 202
+ },
+ {
+ "epoch": 0.7435897435897436,
+ "grad_norm": 29.871829986572266,
+ "learning_rate": 4.43956043956044e-05,
+ "loss": 0.7628,
+ "step": 203
+ },
+ {
+ "epoch": 0.7472527472527473,
+ "grad_norm": 53.70481491088867,
+ "learning_rate": 4.461538461538462e-05,
+ "loss": 1.4017,
+ "step": 204
+ },
+ {
+ "epoch": 0.7509157509157509,
+ "grad_norm": 58.087646484375,
+ "learning_rate": 4.483516483516484e-05,
+ "loss": 1.3168,
+ "step": 205
+ },
+ {
+ "epoch": 0.7545787545787546,
+ "grad_norm": 44.62531280517578,
+ "learning_rate": 4.505494505494505e-05,
+ "loss": 0.8959,
+ "step": 206
+ },
+ {
+ "epoch": 0.7582417582417582,
+ "grad_norm": 18.427953720092773,
+ "learning_rate": 4.5274725274725274e-05,
+ "loss": 0.4202,
+ "step": 207
+ },
+ {
+ "epoch": 0.7619047619047619,
+ "grad_norm": 32.799434661865234,
+ "learning_rate": 4.5494505494505495e-05,
+ "loss": 0.5432,
+ "step": 208
+ },
+ {
+ "epoch": 0.7655677655677655,
+ "grad_norm": 22.136354446411133,
+ "learning_rate": 4.5714285714285716e-05,
+ "loss": 1.0474,
+ "step": 209
+ },
+ {
+ "epoch": 0.7692307692307693,
+ "grad_norm": 14.09807014465332,
+ "learning_rate": 4.593406593406593e-05,
+ "loss": 0.4048,
+ "step": 210
+ },
+ {
+ "epoch": 0.7728937728937729,
+ "grad_norm": 16.818132400512695,
+ "learning_rate": 4.615384615384616e-05,
+ "loss": 0.4772,
+ "step": 211
+ },
+ {
+ "epoch": 0.7765567765567766,
+ "grad_norm": 36.87644577026367,
+ "learning_rate": 4.637362637362638e-05,
+ "loss": 1.0203,
+ "step": 212
+ },
+ {
+ "epoch": 0.7802197802197802,
+ "grad_norm": 23.279033660888672,
+ "learning_rate": 4.6593406593406593e-05,
+ "loss": 0.8223,
+ "step": 213
+ },
+ {
+ "epoch": 0.7838827838827839,
+ "grad_norm": 21.23172378540039,
+ "learning_rate": 4.6813186813186814e-05,
+ "loss": 0.6838,
+ "step": 214
+ },
+ {
+ "epoch": 0.7875457875457875,
+ "grad_norm": 15.129582405090332,
+ "learning_rate": 4.7032967032967035e-05,
+ "loss": 0.3939,
+ "step": 215
+ },
+ {
+ "epoch": 0.7912087912087912,
+ "grad_norm": 38.20903778076172,
+ "learning_rate": 4.725274725274725e-05,
+ "loss": 0.4395,
+ "step": 216
+ },
+ {
+ "epoch": 0.7948717948717948,
+ "grad_norm": 23.428571701049805,
+ "learning_rate": 4.747252747252747e-05,
+ "loss": 0.6657,
+ "step": 217
+ },
+ {
+ "epoch": 0.7985347985347986,
+ "grad_norm": 15.892741203308105,
+ "learning_rate": 4.769230769230769e-05,
+ "loss": 0.3867,
+ "step": 218
+ },
+ {
+ "epoch": 0.8021978021978022,
+ "grad_norm": 44.7977180480957,
+ "learning_rate": 4.791208791208792e-05,
+ "loss": 1.4335,
+ "step": 219
+ },
+ {
+ "epoch": 0.8058608058608059,
+ "grad_norm": 18.13700294494629,
+ "learning_rate": 4.8131868131868134e-05,
+ "loss": 0.3965,
+ "step": 220
+ },
+ {
+ "epoch": 0.8095238095238095,
+ "grad_norm": 23.00497817993164,
+ "learning_rate": 4.8351648351648355e-05,
+ "loss": 1.1319,
+ "step": 221
+ },
+ {
+ "epoch": 0.8131868131868132,
+ "grad_norm": 27.63648796081543,
+ "learning_rate": 4.8571428571428576e-05,
+ "loss": 0.7782,
+ "step": 222
+ },
+ {
+ "epoch": 0.8168498168498168,
+ "grad_norm": 23.91630744934082,
+ "learning_rate": 4.879120879120879e-05,
+ "loss": 0.7277,
+ "step": 223
+ },
+ {
+ "epoch": 0.8205128205128205,
+ "grad_norm": 27.157682418823242,
+ "learning_rate": 4.901098901098901e-05,
+ "loss": 0.8309,
+ "step": 224
+ },
+ {
+ "epoch": 0.8241758241758241,
+ "grad_norm": 20.686105728149414,
+ "learning_rate": 4.923076923076923e-05,
+ "loss": 0.4645,
+ "step": 225
+ },
+ {
+ "epoch": 0.8278388278388278,
+ "grad_norm": 18.44706916809082,
+ "learning_rate": 4.9450549450549446e-05,
+ "loss": 0.6298,
+ "step": 226
+ },
+ {
+ "epoch": 0.8315018315018315,
+ "grad_norm": 34.66194152832031,
+ "learning_rate": 4.967032967032967e-05,
+ "loss": 1.3282,
+ "step": 227
+ },
+ {
+ "epoch": 0.8351648351648352,
+ "grad_norm": 26.68456268310547,
+ "learning_rate": 4.9890109890109895e-05,
+ "loss": 0.8652,
+ "step": 228
+ },
+ {
+ "epoch": 0.8388278388278388,
+ "grad_norm": 18.36819839477539,
+ "learning_rate": 5.0109890109890116e-05,
+ "loss": 0.425,
+ "step": 229
+ },
+ {
+ "epoch": 0.8424908424908425,
+ "grad_norm": 10.212838172912598,
+ "learning_rate": 5.032967032967033e-05,
+ "loss": 0.2183,
+ "step": 230
+ },
+ {
+ "epoch": 0.8461538461538461,
+ "grad_norm": 28.40265464782715,
+ "learning_rate": 5.054945054945055e-05,
+ "loss": 1.6894,
+ "step": 231
+ },
+ {
+ "epoch": 0.8498168498168498,
+ "grad_norm": 48.70882797241211,
+ "learning_rate": 5.076923076923077e-05,
+ "loss": 0.8564,
+ "step": 232
+ },
+ {
+ "epoch": 0.8534798534798534,
+ "grad_norm": 38.576541900634766,
+ "learning_rate": 5.098901098901099e-05,
+ "loss": 0.8013,
+ "step": 233
+ },
+ {
+ "epoch": 0.8571428571428571,
+ "grad_norm": 20.17264747619629,
+ "learning_rate": 5.120879120879121e-05,
+ "loss": 0.4553,
+ "step": 234
+ },
+ {
+ "epoch": 0.8608058608058609,
+ "grad_norm": 33.383182525634766,
+ "learning_rate": 5.142857142857143e-05,
+ "loss": 0.9591,
+ "step": 235
+ },
+ {
+ "epoch": 0.8644688644688645,
+ "grad_norm": 22.734106063842773,
+ "learning_rate": 5.164835164835165e-05,
+ "loss": 0.589,
+ "step": 236
+ },
+ {
+ "epoch": 0.8681318681318682,
+ "grad_norm": 19.77442741394043,
+ "learning_rate": 5.186813186813187e-05,
+ "loss": 0.7066,
+ "step": 237
+ },
+ {
+ "epoch": 0.8717948717948718,
+ "grad_norm": 32.36431884765625,
+ "learning_rate": 5.208791208791209e-05,
+ "loss": 0.8878,
+ "step": 238
+ },
+ {
+ "epoch": 0.8754578754578755,
+ "grad_norm": 37.60574722290039,
+ "learning_rate": 5.230769230769231e-05,
+ "loss": 1.0034,
+ "step": 239
+ },
+ {
+ "epoch": 0.8791208791208791,
+ "grad_norm": 28.051666259765625,
+ "learning_rate": 5.252747252747253e-05,
+ "loss": 0.9695,
+ "step": 240
+ },
+ {
+ "epoch": 0.8827838827838828,
+ "grad_norm": 31.55886459350586,
+ "learning_rate": 5.274725274725275e-05,
+ "loss": 0.5416,
+ "step": 241
+ },
+ {
+ "epoch": 0.8864468864468864,
+ "grad_norm": 17.856632232666016,
+ "learning_rate": 5.296703296703297e-05,
+ "loss": 0.3647,
+ "step": 242
+ },
+ {
+ "epoch": 0.8901098901098901,
+ "grad_norm": 42.52962112426758,
+ "learning_rate": 5.3186813186813184e-05,
+ "loss": 1.3661,
+ "step": 243
+ },
+ {
+ "epoch": 0.8937728937728938,
+ "grad_norm": 26.439769744873047,
+ "learning_rate": 5.3406593406593405e-05,
+ "loss": 0.6629,
+ "step": 244
+ },
+ {
+ "epoch": 0.8974358974358975,
+ "grad_norm": 37.46576690673828,
+ "learning_rate": 5.362637362637363e-05,
+ "loss": 0.9631,
+ "step": 245
+ },
+ {
+ "epoch": 0.9010989010989011,
+ "grad_norm": 29.706708908081055,
+ "learning_rate": 5.384615384615385e-05,
+ "loss": 1.0034,
+ "step": 246
+ },
+ {
+ "epoch": 0.9047619047619048,
+ "grad_norm": 33.62871551513672,
+ "learning_rate": 5.406593406593407e-05,
+ "loss": 0.8036,
+ "step": 247
+ },
+ {
+ "epoch": 0.9084249084249084,
+ "grad_norm": 41.97051239013672,
+ "learning_rate": 5.428571428571429e-05,
+ "loss": 1.309,
+ "step": 248
+ },
+ {
+ "epoch": 0.9120879120879121,
+ "grad_norm": 37.57841110229492,
+ "learning_rate": 5.450549450549451e-05,
+ "loss": 1.2444,
+ "step": 249
+ },
+ {
+ "epoch": 0.9157509157509157,
+ "grad_norm": 21.220727920532227,
+ "learning_rate": 5.4725274725274724e-05,
+ "loss": 0.6556,
+ "step": 250
+ },
+ {
+ "epoch": 0.9194139194139194,
+ "grad_norm": 19.963764190673828,
+ "learning_rate": 5.4945054945054945e-05,
+ "loss": 0.7328,
+ "step": 251
+ },
+ {
+ "epoch": 0.9230769230769231,
+ "grad_norm": 21.196062088012695,
+ "learning_rate": 5.5164835164835166e-05,
+ "loss": 0.5752,
+ "step": 252
+ },
+ {
+ "epoch": 0.9267399267399268,
+ "grad_norm": 23.587268829345703,
+ "learning_rate": 5.538461538461539e-05,
+ "loss": 0.4801,
+ "step": 253
+ },
+ {
+ "epoch": 0.9304029304029304,
+ "grad_norm": 16.09604263305664,
+ "learning_rate": 5.560439560439561e-05,
+ "loss": 0.4795,
+ "step": 254
+ },
+ {
+ "epoch": 0.9340659340659341,
+ "grad_norm": 22.61296272277832,
+ "learning_rate": 5.582417582417583e-05,
+ "loss": 0.5807,
+ "step": 255
+ },
+ {
+ "epoch": 0.9377289377289377,
+ "grad_norm": 28.715890884399414,
+ "learning_rate": 5.604395604395604e-05,
+ "loss": 1.3141,
+ "step": 256
+ },
+ {
+ "epoch": 0.9413919413919414,
+ "grad_norm": 37.11213684082031,
+ "learning_rate": 5.6263736263736264e-05,
+ "loss": 1.7168,
+ "step": 257
+ },
+ {
+ "epoch": 0.945054945054945,
+ "grad_norm": 13.693246841430664,
+ "learning_rate": 5.6483516483516485e-05,
+ "loss": 0.3207,
+ "step": 258
+ },
+ {
+ "epoch": 0.9487179487179487,
+ "grad_norm": 18.186216354370117,
+ "learning_rate": 5.6703296703296706e-05,
+ "loss": 0.6265,
+ "step": 259
+ },
+ {
+ "epoch": 0.9523809523809523,
+ "grad_norm": 23.68426513671875,
+ "learning_rate": 5.692307692307692e-05,
+ "loss": 0.5226,
+ "step": 260
+ },
+ {
+ "epoch": 0.9560439560439561,
+ "grad_norm": 19.154836654663086,
+ "learning_rate": 5.714285714285714e-05,
+ "loss": 1.0116,
+ "step": 261
+ },
+ {
+ "epoch": 0.9597069597069597,
+ "grad_norm": 17.64719009399414,
+ "learning_rate": 5.736263736263737e-05,
+ "loss": 0.5992,
+ "step": 262
+ },
+ {
+ "epoch": 0.9633699633699634,
+ "grad_norm": 25.542757034301758,
+ "learning_rate": 5.7582417582417584e-05,
+ "loss": 0.8129,
+ "step": 263
+ },
+ {
+ "epoch": 0.967032967032967,
+ "grad_norm": 25.94204330444336,
+ "learning_rate": 5.7802197802197805e-05,
+ "loss": 1.2194,
+ "step": 264
+ },
+ {
+ "epoch": 0.9706959706959707,
+ "grad_norm": 13.693342208862305,
+ "learning_rate": 5.8021978021978026e-05,
+ "loss": 0.2565,
+ "step": 265
+ },
+ {
+ "epoch": 0.9743589743589743,
+ "grad_norm": 20.760122299194336,
+ "learning_rate": 5.824175824175824e-05,
+ "loss": 0.4023,
+ "step": 266
+ },
+ {
+ "epoch": 0.978021978021978,
+ "grad_norm": 20.00895118713379,
+ "learning_rate": 5.846153846153846e-05,
+ "loss": 0.2468,
+ "step": 267
+ },
+ {
+ "epoch": 0.9816849816849816,
+ "grad_norm": 25.56069564819336,
+ "learning_rate": 5.868131868131868e-05,
+ "loss": 0.5648,
+ "step": 268
+ },
+ {
+ "epoch": 0.9853479853479854,
+ "grad_norm": 38.19970703125,
+ "learning_rate": 5.89010989010989e-05,
+ "loss": 0.544,
+ "step": 269
+ },
+ {
+ "epoch": 0.989010989010989,
+ "grad_norm": 37.63619613647461,
+ "learning_rate": 5.9120879120879124e-05,
+ "loss": 0.7556,
+ "step": 270
+ },
+ {
+ "epoch": 0.9926739926739927,
+ "grad_norm": 10.586868286132812,
+ "learning_rate": 5.9340659340659345e-05,
+ "loss": 0.1003,
+ "step": 271
+ },
+ {
+ "epoch": 0.9963369963369964,
+ "grad_norm": 17.579208374023438,
+ "learning_rate": 5.9560439560439566e-05,
+ "loss": 0.2931,
+ "step": 272
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 24.657121658325195,
+ "learning_rate": 5.978021978021978e-05,
+ "loss": 0.2372,
+ "step": 273
+ },
+ {
+ "epoch": 1.0036630036630036,
+ "grad_norm": 29.52134895324707,
+ "learning_rate": 6e-05,
+ "loss": 0.5077,
+ "step": 274
+ },
+ {
+ "epoch": 1.0073260073260073,
+ "grad_norm": 51.900062561035156,
+ "learning_rate": 5.997557997557998e-05,
+ "loss": 0.4404,
+ "step": 275
+ },
+ {
+ "epoch": 1.010989010989011,
+ "grad_norm": 18.682769775390625,
+ "learning_rate": 5.995115995115995e-05,
+ "loss": 0.2405,
+ "step": 276
+ },
+ {
+ "epoch": 1.0146520146520146,
+ "grad_norm": 87.95014953613281,
+ "learning_rate": 5.992673992673993e-05,
+ "loss": 2.8585,
+ "step": 277
+ },
+ {
+ "epoch": 1.0183150183150182,
+ "grad_norm": 67.03990936279297,
+ "learning_rate": 5.990231990231991e-05,
+ "loss": 0.9746,
+ "step": 278
+ },
+ {
+ "epoch": 1.021978021978022,
+ "grad_norm": 47.63545227050781,
+ "learning_rate": 5.987789987789988e-05,
+ "loss": 0.241,
+ "step": 279
+ },
+ {
+ "epoch": 1.0256410256410255,
+ "grad_norm": 33.62876892089844,
+ "learning_rate": 5.985347985347986e-05,
+ "loss": 1.0003,
+ "step": 280
+ },
+ {
+ "epoch": 1.0293040293040292,
+ "grad_norm": 30.26620864868164,
+ "learning_rate": 5.982905982905983e-05,
+ "loss": 0.7767,
+ "step": 281
+ },
+ {
+ "epoch": 1.032967032967033,
+ "grad_norm": 33.785770416259766,
+ "learning_rate": 5.98046398046398e-05,
+ "loss": 0.899,
+ "step": 282
+ },
+ {
+ "epoch": 1.0366300366300367,
+ "grad_norm": 33.753849029541016,
+ "learning_rate": 5.978021978021978e-05,
+ "loss": 1.8225,
+ "step": 283
+ },
+ {
+ "epoch": 1.0402930402930404,
+ "grad_norm": 16.58989143371582,
+ "learning_rate": 5.975579975579976e-05,
+ "loss": 0.6211,
+ "step": 284
+ },
+ {
+ "epoch": 1.043956043956044,
+ "grad_norm": 23.08768653869629,
+ "learning_rate": 5.973137973137973e-05,
+ "loss": 0.7541,
+ "step": 285
+ },
+ {
+ "epoch": 1.0476190476190477,
+ "grad_norm": 24.57805824279785,
+ "learning_rate": 5.970695970695971e-05,
+ "loss": 0.8278,
+ "step": 286
+ },
+ {
+ "epoch": 1.0512820512820513,
+ "grad_norm": 25.1593017578125,
+ "learning_rate": 5.968253968253968e-05,
+ "loss": 0.6932,
+ "step": 287
+ },
+ {
+ "epoch": 1.054945054945055,
+ "grad_norm": 29.984054565429688,
+ "learning_rate": 5.965811965811966e-05,
+ "loss": 0.6987,
+ "step": 288
+ },
+ {
+ "epoch": 1.0586080586080586,
+ "grad_norm": 28.183151245117188,
+ "learning_rate": 5.963369963369964e-05,
+ "loss": 0.8771,
+ "step": 289
+ },
+ {
+ "epoch": 1.0622710622710623,
+ "grad_norm": 15.349969863891602,
+ "learning_rate": 5.960927960927961e-05,
+ "loss": 0.2906,
+ "step": 290
+ },
+ {
+ "epoch": 1.065934065934066,
+ "grad_norm": 17.618196487426758,
+ "learning_rate": 5.958485958485959e-05,
+ "loss": 0.595,
+ "step": 291
+ },
+ {
+ "epoch": 1.0695970695970696,
+ "grad_norm": 40.537925720214844,
+ "learning_rate": 5.9560439560439566e-05,
+ "loss": 1.3881,
+ "step": 292
+ },
+ {
+ "epoch": 1.0732600732600732,
+ "grad_norm": 41.12261962890625,
+ "learning_rate": 5.953601953601954e-05,
+ "loss": 0.5402,
+ "step": 293
+ },
+ {
+ "epoch": 1.0769230769230769,
+ "grad_norm": 38.4654655456543,
+ "learning_rate": 5.951159951159951e-05,
+ "loss": 0.3097,
+ "step": 294
+ },
+ {
+ "epoch": 1.0805860805860805,
+ "grad_norm": 34.19886016845703,
+ "learning_rate": 5.948717948717949e-05,
+ "loss": 1.0228,
+ "step": 295
+ },
+ {
+ "epoch": 1.0842490842490842,
+ "grad_norm": 19.727413177490234,
+ "learning_rate": 5.946275946275946e-05,
+ "loss": 0.1755,
+ "step": 296
+ },
+ {
+ "epoch": 1.0879120879120878,
+ "grad_norm": 33.413352966308594,
+ "learning_rate": 5.943833943833944e-05,
+ "loss": 0.8087,
+ "step": 297
+ },
+ {
+ "epoch": 1.0915750915750915,
+ "grad_norm": 29.848875045776367,
+ "learning_rate": 5.941391941391942e-05,
+ "loss": 0.673,
+ "step": 298
+ },
+ {
+ "epoch": 1.0952380952380953,
+ "grad_norm": 18.643922805786133,
+ "learning_rate": 5.938949938949939e-05,
+ "loss": 0.4759,
+ "step": 299
+ },
+ {
+ "epoch": 1.098901098901099,
+ "grad_norm": 28.923099517822266,
+ "learning_rate": 5.936507936507937e-05,
+ "loss": 0.6555,
+ "step": 300
+ },
+ {
+ "epoch": 1.1025641025641026,
+ "grad_norm": 26.4990177154541,
+ "learning_rate": 5.9340659340659345e-05,
+ "loss": 0.4679,
+ "step": 301
+ },
+ {
+ "epoch": 1.1062271062271063,
+ "grad_norm": 43.54881286621094,
+ "learning_rate": 5.931623931623932e-05,
+ "loss": 1.0861,
+ "step": 302
+ },
+ {
+ "epoch": 1.10989010989011,
+ "grad_norm": 32.66098403930664,
+ "learning_rate": 5.9291819291819295e-05,
+ "loss": 0.677,
+ "step": 303
+ },
+ {
+ "epoch": 1.1135531135531136,
+ "grad_norm": 43.79314422607422,
+ "learning_rate": 5.9267399267399274e-05,
+ "loss": 0.8883,
+ "step": 304
+ },
+ {
+ "epoch": 1.1172161172161172,
+ "grad_norm": 44.49085235595703,
+ "learning_rate": 5.9242979242979245e-05,
+ "loss": 0.9553,
+ "step": 305
+ },
+ {
+ "epoch": 1.120879120879121,
+ "grad_norm": 31.713787078857422,
+ "learning_rate": 5.9218559218559224e-05,
+ "loss": 0.6352,
+ "step": 306
+ },
+ {
+ "epoch": 1.1245421245421245,
+ "grad_norm": 19.930402755737305,
+ "learning_rate": 5.9194139194139196e-05,
+ "loss": 0.7023,
+ "step": 307
+ },
+ {
+ "epoch": 1.1282051282051282,
+ "grad_norm": 20.157196044921875,
+ "learning_rate": 5.916971916971917e-05,
+ "loss": 0.6241,
+ "step": 308
+ },
+ {
+ "epoch": 1.1318681318681318,
+ "grad_norm": 26.819135665893555,
+ "learning_rate": 5.9145299145299146e-05,
+ "loss": 0.4788,
+ "step": 309
+ },
+ {
+ "epoch": 1.1355311355311355,
+ "grad_norm": 24.948625564575195,
+ "learning_rate": 5.9120879120879124e-05,
+ "loss": 0.698,
+ "step": 310
+ },
+ {
+ "epoch": 1.1391941391941391,
+ "grad_norm": 15.883389472961426,
+ "learning_rate": 5.9096459096459096e-05,
+ "loss": 0.3325,
+ "step": 311
+ },
+ {
+ "epoch": 1.1428571428571428,
+ "grad_norm": 25.214584350585938,
+ "learning_rate": 5.9072039072039074e-05,
+ "loss": 0.4776,
+ "step": 312
+ },
+ {
+ "epoch": 1.1465201465201464,
+ "grad_norm": 27.4523983001709,
+ "learning_rate": 5.9047619047619046e-05,
+ "loss": 0.6155,
+ "step": 313
+ },
+ {
+ "epoch": 1.15018315018315,
+ "grad_norm": 48.60593795776367,
+ "learning_rate": 5.9023199023199024e-05,
+ "loss": 1.7225,
+ "step": 314
+ },
+ {
+ "epoch": 1.1538461538461537,
+ "grad_norm": 27.19314193725586,
+ "learning_rate": 5.8998778998779e-05,
+ "loss": 0.6805,
+ "step": 315
+ },
+ {
+ "epoch": 1.1575091575091574,
+ "grad_norm": 44.678768157958984,
+ "learning_rate": 5.8974358974358975e-05,
+ "loss": 0.5721,
+ "step": 316
+ },
+ {
+ "epoch": 1.1611721611721613,
+ "grad_norm": 12.109644889831543,
+ "learning_rate": 5.894993894993895e-05,
+ "loss": 0.1079,
+ "step": 317
+ },
+ {
+ "epoch": 1.164835164835165,
+ "grad_norm": 45.254730224609375,
+ "learning_rate": 5.892551892551893e-05,
+ "loss": 1.1492,
+ "step": 318
+ },
+ {
+ "epoch": 1.1684981684981686,
+ "grad_norm": 65.83439636230469,
+ "learning_rate": 5.89010989010989e-05,
+ "loss": 0.7049,
+ "step": 319
+ },
+ {
+ "epoch": 1.1721611721611722,
+ "grad_norm": 43.5418586730957,
+ "learning_rate": 5.8876678876678875e-05,
+ "loss": 0.4628,
+ "step": 320
+ },
+ {
+ "epoch": 1.1758241758241759,
+ "grad_norm": 137.285400390625,
+ "learning_rate": 5.885225885225885e-05,
+ "loss": 1.4227,
+ "step": 321
+ },
+ {
+ "epoch": 1.1794871794871795,
+ "grad_norm": 42.895565032958984,
+ "learning_rate": 5.8827838827838825e-05,
+ "loss": 0.4264,
+ "step": 322
+ },
+ {
+ "epoch": 1.1831501831501832,
+ "grad_norm": 10.602986335754395,
+ "learning_rate": 5.8803418803418803e-05,
+ "loss": 0.0494,
+ "step": 323
+ },
+ {
+ "epoch": 1.1868131868131868,
+ "grad_norm": 103.92290496826172,
+ "learning_rate": 5.877899877899878e-05,
+ "loss": 2.0111,
+ "step": 324
+ },
+ {
+ "epoch": 1.1904761904761905,
+ "grad_norm": 36.497764587402344,
+ "learning_rate": 5.8754578754578754e-05,
+ "loss": 0.4768,
+ "step": 325
+ },
+ {
+ "epoch": 1.1941391941391941,
+ "grad_norm": 45.52228546142578,
+ "learning_rate": 5.873015873015873e-05,
+ "loss": 0.994,
+ "step": 326
+ },
+ {
+ "epoch": 1.1978021978021978,
+ "grad_norm": 24.81894302368164,
+ "learning_rate": 5.870573870573871e-05,
+ "loss": 0.5563,
+ "step": 327
+ },
+ {
+ "epoch": 1.2014652014652014,
+ "grad_norm": 49.82950210571289,
+ "learning_rate": 5.868131868131868e-05,
+ "loss": 1.5448,
+ "step": 328
+ },
+ {
+ "epoch": 1.205128205128205,
+ "grad_norm": 23.945913314819336,
+ "learning_rate": 5.865689865689866e-05,
+ "loss": 0.5256,
+ "step": 329
+ },
+ {
+ "epoch": 1.2087912087912087,
+ "grad_norm": 20.63251304626465,
+ "learning_rate": 5.863247863247864e-05,
+ "loss": 0.3698,
+ "step": 330
+ },
+ {
+ "epoch": 1.2124542124542124,
+ "grad_norm": 32.270328521728516,
+ "learning_rate": 5.860805860805861e-05,
+ "loss": 0.3518,
+ "step": 331
+ },
+ {
+ "epoch": 1.2161172161172162,
+ "grad_norm": 32.445716857910156,
+ "learning_rate": 5.858363858363858e-05,
+ "loss": 0.857,
+ "step": 332
+ },
+ {
+ "epoch": 1.2197802197802199,
+ "grad_norm": 59.69521713256836,
+ "learning_rate": 5.855921855921856e-05,
+ "loss": 1.3786,
+ "step": 333
+ },
+ {
+ "epoch": 1.2234432234432235,
+ "grad_norm": 32.79878234863281,
+ "learning_rate": 5.853479853479853e-05,
+ "loss": 0.7648,
+ "step": 334
+ },
+ {
+ "epoch": 1.2271062271062272,
+ "grad_norm": 26.749393463134766,
+ "learning_rate": 5.851037851037851e-05,
+ "loss": 0.4723,
+ "step": 335
+ },
+ {
+ "epoch": 1.2307692307692308,
+ "grad_norm": 40.744102478027344,
+ "learning_rate": 5.848595848595849e-05,
+ "loss": 1.0543,
+ "step": 336
+ },
+ {
+ "epoch": 1.2344322344322345,
+ "grad_norm": 34.2275505065918,
+ "learning_rate": 5.846153846153846e-05,
+ "loss": 0.4533,
+ "step": 337
+ },
+ {
+ "epoch": 1.2380952380952381,
+ "grad_norm": 49.648136138916016,
+ "learning_rate": 5.843711843711844e-05,
+ "loss": 1.2112,
+ "step": 338
+ },
+ {
+ "epoch": 1.2417582417582418,
+ "grad_norm": 64.69720458984375,
+ "learning_rate": 5.841269841269841e-05,
+ "loss": 1.2234,
+ "step": 339
+ },
+ {
+ "epoch": 1.2454212454212454,
+ "grad_norm": 16.81964111328125,
+ "learning_rate": 5.838827838827839e-05,
+ "loss": 0.297,
+ "step": 340
+ },
+ {
+ "epoch": 1.249084249084249,
+ "grad_norm": 17.393678665161133,
+ "learning_rate": 5.836385836385837e-05,
+ "loss": 0.2504,
+ "step": 341
+ },
+ {
+ "epoch": 1.2527472527472527,
+ "grad_norm": 64.2254409790039,
+ "learning_rate": 5.833943833943834e-05,
+ "loss": 1.3656,
+ "step": 342
+ },
+ {
+ "epoch": 1.2564102564102564,
+ "grad_norm": 48.991249084472656,
+ "learning_rate": 5.831501831501832e-05,
+ "loss": 1.0819,
+ "step": 343
+ },
+ {
+ "epoch": 1.26007326007326,
+ "grad_norm": 22.78063201904297,
+ "learning_rate": 5.82905982905983e-05,
+ "loss": 0.1792,
+ "step": 344
+ },
+ {
+ "epoch": 1.2637362637362637,
+ "grad_norm": 35.463233947753906,
+ "learning_rate": 5.826617826617826e-05,
+ "loss": 0.5663,
+ "step": 345
+ },
+ {
+ "epoch": 1.2673992673992673,
+ "grad_norm": 54.528953552246094,
+ "learning_rate": 5.824175824175824e-05,
+ "loss": 1.5814,
+ "step": 346
+ },
+ {
+ "epoch": 1.271062271062271,
+ "grad_norm": 44.60401916503906,
+ "learning_rate": 5.821733821733822e-05,
+ "loss": 0.6471,
+ "step": 347
+ },
+ {
+ "epoch": 1.2747252747252746,
+ "grad_norm": 2.6468827724456787,
+ "learning_rate": 5.819291819291819e-05,
+ "loss": 0.0288,
+ "step": 348
+ },
+ {
+ "epoch": 1.2783882783882783,
+ "grad_norm": 21.465364456176758,
+ "learning_rate": 5.816849816849817e-05,
+ "loss": 0.5259,
+ "step": 349
+ },
+ {
+ "epoch": 1.282051282051282,
+ "grad_norm": 51.20866012573242,
+ "learning_rate": 5.814407814407815e-05,
+ "loss": 0.8054,
+ "step": 350
+ },
+ {
+ "epoch": 1.2857142857142856,
+ "grad_norm": 33.52774429321289,
+ "learning_rate": 5.811965811965812e-05,
+ "loss": 0.494,
+ "step": 351
+ },
+ {
+ "epoch": 1.2893772893772895,
+ "grad_norm": 39.15644836425781,
+ "learning_rate": 5.80952380952381e-05,
+ "loss": 1.6315,
+ "step": 352
+ },
+ {
+ "epoch": 1.293040293040293,
+ "grad_norm": 24.35202407836914,
+ "learning_rate": 5.8070818070818076e-05,
+ "loss": 0.6189,
+ "step": 353
+ },
+ {
+ "epoch": 1.2967032967032968,
+ "grad_norm": 39.99496841430664,
+ "learning_rate": 5.804639804639805e-05,
+ "loss": 1.2323,
+ "step": 354
+ },
+ {
+ "epoch": 1.3003663003663004,
+ "grad_norm": 26.282432556152344,
+ "learning_rate": 5.8021978021978026e-05,
+ "loss": 0.5383,
+ "step": 355
+ },
+ {
+ "epoch": 1.304029304029304,
+ "grad_norm": 36.909969329833984,
+ "learning_rate": 5.7997557997558004e-05,
+ "loss": 1.6886,
+ "step": 356
+ },
+ {
+ "epoch": 1.3076923076923077,
+ "grad_norm": 18.90056037902832,
+ "learning_rate": 5.7973137973137976e-05,
+ "loss": 0.7226,
+ "step": 357
+ },
+ {
+ "epoch": 1.3113553113553114,
+ "grad_norm": 21.10304832458496,
+ "learning_rate": 5.794871794871795e-05,
+ "loss": 0.8914,
+ "step": 358
+ },
+ {
+ "epoch": 1.315018315018315,
+ "grad_norm": 18.380769729614258,
+ "learning_rate": 5.7924297924297926e-05,
+ "loss": 1.4304,
+ "step": 359
+ },
+ {
+ "epoch": 1.3186813186813187,
+ "grad_norm": 17.992050170898438,
+ "learning_rate": 5.78998778998779e-05,
+ "loss": 1.0023,
+ "step": 360
+ },
+ {
+ "epoch": 1.3223443223443223,
+ "grad_norm": 17.944400787353516,
+ "learning_rate": 5.7875457875457876e-05,
+ "loss": 0.7734,
+ "step": 361
+ },
+ {
+ "epoch": 1.326007326007326,
+ "grad_norm": 19.117143630981445,
+ "learning_rate": 5.7851037851037855e-05,
+ "loss": 0.6923,
+ "step": 362
+ },
+ {
+ "epoch": 1.3296703296703296,
+ "grad_norm": 21.4644718170166,
+ "learning_rate": 5.7826617826617826e-05,
+ "loss": 0.666,
+ "step": 363
+ },
+ {
+ "epoch": 1.3333333333333333,
+ "grad_norm": 25.951030731201172,
+ "learning_rate": 5.7802197802197805e-05,
+ "loss": 1.522,
+ "step": 364
+ },
+ {
+ "epoch": 1.3369963369963371,
+ "grad_norm": 32.20412063598633,
+ "learning_rate": 5.7777777777777776e-05,
+ "loss": 1.5771,
+ "step": 365
+ },
+ {
+ "epoch": 1.3406593406593408,
+ "grad_norm": 26.847576141357422,
+ "learning_rate": 5.7753357753357755e-05,
+ "loss": 1.3427,
+ "step": 366
+ },
+ {
+ "epoch": 1.3443223443223444,
+ "grad_norm": 18.596710205078125,
+ "learning_rate": 5.772893772893773e-05,
+ "loss": 0.5533,
+ "step": 367
+ },
+ {
+ "epoch": 1.347985347985348,
+ "grad_norm": 23.6543025970459,
+ "learning_rate": 5.7704517704517705e-05,
+ "loss": 0.581,
+ "step": 368
+ },
+ {
+ "epoch": 1.3516483516483517,
+ "grad_norm": 13.732353210449219,
+ "learning_rate": 5.7680097680097684e-05,
+ "loss": 0.1908,
+ "step": 369
+ },
+ {
+ "epoch": 1.3553113553113554,
+ "grad_norm": 21.231159210205078,
+ "learning_rate": 5.765567765567766e-05,
+ "loss": 0.5858,
+ "step": 370
+ },
+ {
+ "epoch": 1.358974358974359,
+ "grad_norm": 18.647363662719727,
+ "learning_rate": 5.763125763125763e-05,
+ "loss": 0.6205,
+ "step": 371
+ },
+ {
+ "epoch": 1.3626373626373627,
+ "grad_norm": 20.302942276000977,
+ "learning_rate": 5.7606837606837605e-05,
+ "loss": 0.3637,
+ "step": 372
+ },
+ {
+ "epoch": 1.3663003663003663,
+ "grad_norm": 18.72137451171875,
+ "learning_rate": 5.7582417582417584e-05,
+ "loss": 0.2262,
+ "step": 373
+ },
+ {
+ "epoch": 1.36996336996337,
+ "grad_norm": 32.225738525390625,
+ "learning_rate": 5.7557997557997555e-05,
+ "loss": 0.5696,
+ "step": 374
+ },
+ {
+ "epoch": 1.3736263736263736,
+ "grad_norm": 21.453779220581055,
+ "learning_rate": 5.7533577533577534e-05,
+ "loss": 0.3533,
+ "step": 375
+ },
+ {
+ "epoch": 1.3772893772893773,
+ "grad_norm": 26.601511001586914,
+ "learning_rate": 5.750915750915751e-05,
+ "loss": 0.438,
+ "step": 376
+ },
+ {
+ "epoch": 1.380952380952381,
+ "grad_norm": 49.10448455810547,
+ "learning_rate": 5.7484737484737484e-05,
+ "loss": 0.6742,
+ "step": 377
+ },
+ {
+ "epoch": 1.3846153846153846,
+ "grad_norm": 51.251136779785156,
+ "learning_rate": 5.746031746031746e-05,
+ "loss": 0.7096,
+ "step": 378
+ },
+ {
+ "epoch": 1.3882783882783882,
+ "grad_norm": 35.14614486694336,
+ "learning_rate": 5.743589743589744e-05,
+ "loss": 1.5348,
+ "step": 379
+ },
+ {
+ "epoch": 1.3919413919413919,
+ "grad_norm": 58.83134078979492,
+ "learning_rate": 5.741147741147741e-05,
+ "loss": 1.303,
+ "step": 380
+ },
+ {
+ "epoch": 1.3956043956043955,
+ "grad_norm": 34.27029800415039,
+ "learning_rate": 5.738705738705739e-05,
+ "loss": 0.3682,
+ "step": 381
+ },
+ {
+ "epoch": 1.3992673992673992,
+ "grad_norm": 59.508628845214844,
+ "learning_rate": 5.736263736263737e-05,
+ "loss": 0.6489,
+ "step": 382
+ },
+ {
+ "epoch": 1.4029304029304028,
+ "grad_norm": 24.804059982299805,
+ "learning_rate": 5.733821733821734e-05,
+ "loss": 0.325,
+ "step": 383
+ },
+ {
+ "epoch": 1.4065934065934065,
+ "grad_norm": 20.69612693786621,
+ "learning_rate": 5.731379731379731e-05,
+ "loss": 0.1529,
+ "step": 384
+ },
+ {
+ "epoch": 1.4102564102564101,
+ "grad_norm": 29.134044647216797,
+ "learning_rate": 5.728937728937729e-05,
+ "loss": 0.8694,
+ "step": 385
+ },
+ {
+ "epoch": 1.4139194139194138,
+ "grad_norm": 37.44430923461914,
+ "learning_rate": 5.726495726495726e-05,
+ "loss": 0.9174,
+ "step": 386
+ },
+ {
+ "epoch": 1.4175824175824177,
+ "grad_norm": 36.84721755981445,
+ "learning_rate": 5.724053724053724e-05,
+ "loss": 0.3522,
+ "step": 387
+ },
+ {
+ "epoch": 1.4212454212454213,
+ "grad_norm": 44.15989685058594,
+ "learning_rate": 5.721611721611722e-05,
+ "loss": 1.4677,
+ "step": 388
+ },
+ {
+ "epoch": 1.424908424908425,
+ "grad_norm": 16.73012351989746,
+ "learning_rate": 5.719169719169719e-05,
+ "loss": 0.1621,
+ "step": 389
+ },
+ {
+ "epoch": 1.4285714285714286,
+ "grad_norm": 35.41815185546875,
+ "learning_rate": 5.716727716727717e-05,
+ "loss": 0.6702,
+ "step": 390
+ },
+ {
+ "epoch": 1.4322344322344323,
+ "grad_norm": 19.04936408996582,
+ "learning_rate": 5.714285714285714e-05,
+ "loss": 0.1845,
+ "step": 391
+ },
+ {
+ "epoch": 1.435897435897436,
+ "grad_norm": 22.89434242248535,
+ "learning_rate": 5.711843711843712e-05,
+ "loss": 0.5694,
+ "step": 392
+ },
+ {
+ "epoch": 1.4395604395604396,
+ "grad_norm": 22.125951766967773,
+ "learning_rate": 5.70940170940171e-05,
+ "loss": 0.821,
+ "step": 393
+ },
+ {
+ "epoch": 1.4432234432234432,
+ "grad_norm": 37.83376693725586,
+ "learning_rate": 5.706959706959707e-05,
+ "loss": 0.4658,
+ "step": 394
+ },
+ {
+ "epoch": 1.4468864468864469,
+ "grad_norm": 38.37764358520508,
+ "learning_rate": 5.704517704517705e-05,
+ "loss": 0.4146,
+ "step": 395
+ },
+ {
+ "epoch": 1.4505494505494505,
+ "grad_norm": 21.50092315673828,
+ "learning_rate": 5.702075702075703e-05,
+ "loss": 0.5044,
+ "step": 396
+ },
+ {
+ "epoch": 1.4542124542124542,
+ "grad_norm": 20.02173614501953,
+ "learning_rate": 5.699633699633699e-05,
+ "loss": 0.4955,
+ "step": 397
+ },
+ {
+ "epoch": 1.4578754578754578,
+ "grad_norm": 21.474336624145508,
+ "learning_rate": 5.697191697191697e-05,
+ "loss": 0.3818,
+ "step": 398
+ },
+ {
+ "epoch": 1.4615384615384617,
+ "grad_norm": 22.903839111328125,
+ "learning_rate": 5.694749694749695e-05,
+ "loss": 0.7603,
+ "step": 399
+ },
+ {
+ "epoch": 1.4652014652014653,
+ "grad_norm": 20.22893524169922,
+ "learning_rate": 5.692307692307692e-05,
+ "loss": 0.5612,
+ "step": 400
+ },
+ {
+ "epoch": 1.468864468864469,
+ "grad_norm": 32.34550857543945,
+ "learning_rate": 5.68986568986569e-05,
+ "loss": 0.4659,
+ "step": 401
+ },
+ {
+ "epoch": 1.4725274725274726,
+ "grad_norm": 49.979034423828125,
+ "learning_rate": 5.687423687423688e-05,
+ "loss": 0.6784,
+ "step": 402
+ },
+ {
+ "epoch": 1.4761904761904763,
+ "grad_norm": 79.79581451416016,
+ "learning_rate": 5.684981684981685e-05,
+ "loss": 0.9404,
+ "step": 403
+ },
+ {
+ "epoch": 1.47985347985348,
+ "grad_norm": 17.678560256958008,
+ "learning_rate": 5.682539682539683e-05,
+ "loss": 0.1675,
+ "step": 404
+ },
+ {
+ "epoch": 1.4835164835164836,
+ "grad_norm": 21.246519088745117,
+ "learning_rate": 5.6800976800976806e-05,
+ "loss": 0.2428,
+ "step": 405
+ },
+ {
+ "epoch": 1.4871794871794872,
+ "grad_norm": 34.815452575683594,
+ "learning_rate": 5.677655677655678e-05,
+ "loss": 0.3925,
+ "step": 406
+ },
+ {
+ "epoch": 1.4908424908424909,
+ "grad_norm": 73.8591079711914,
+ "learning_rate": 5.6752136752136756e-05,
+ "loss": 1.3163,
+ "step": 407
+ },
+ {
+ "epoch": 1.4945054945054945,
+ "grad_norm": 66.63922882080078,
+ "learning_rate": 5.6727716727716735e-05,
+ "loss": 0.9653,
+ "step": 408
+ },
+ {
+ "epoch": 1.4981684981684982,
+ "grad_norm": 52.39488220214844,
+ "learning_rate": 5.6703296703296706e-05,
+ "loss": 0.9322,
+ "step": 409
+ },
+ {
+ "epoch": 1.5018315018315018,
+ "grad_norm": 13.078998565673828,
+ "learning_rate": 5.667887667887668e-05,
+ "loss": 0.1168,
+ "step": 410
+ },
+ {
+ "epoch": 1.5054945054945055,
+ "grad_norm": 41.32448959350586,
+ "learning_rate": 5.6654456654456657e-05,
+ "loss": 0.9296,
+ "step": 411
+ },
+ {
+ "epoch": 1.5091575091575091,
+ "grad_norm": 26.448543548583984,
+ "learning_rate": 5.663003663003663e-05,
+ "loss": 0.5474,
+ "step": 412
+ },
+ {
+ "epoch": 1.5128205128205128,
+ "grad_norm": 29.58432960510254,
+ "learning_rate": 5.660561660561661e-05,
+ "loss": 0.6573,
+ "step": 413
+ },
+ {
+ "epoch": 1.5164835164835164,
+ "grad_norm": 28.568214416503906,
+ "learning_rate": 5.6581196581196585e-05,
+ "loss": 0.9223,
+ "step": 414
+ },
+ {
+ "epoch": 1.52014652014652,
+ "grad_norm": 31.92661476135254,
+ "learning_rate": 5.655677655677656e-05,
+ "loss": 1.0601,
+ "step": 415
+ },
+ {
+ "epoch": 1.5238095238095237,
+ "grad_norm": 31.934263229370117,
+ "learning_rate": 5.6532356532356535e-05,
+ "loss": 0.6288,
+ "step": 416
+ },
+ {
+ "epoch": 1.5274725274725274,
+ "grad_norm": 21.51350975036621,
+ "learning_rate": 5.650793650793651e-05,
+ "loss": 0.7378,
+ "step": 417
+ },
+ {
+ "epoch": 1.531135531135531,
+ "grad_norm": 19.010095596313477,
+ "learning_rate": 5.6483516483516485e-05,
+ "loss": 0.7792,
+ "step": 418
+ },
+ {
+ "epoch": 1.5347985347985347,
+ "grad_norm": 21.7001895904541,
+ "learning_rate": 5.6459096459096464e-05,
+ "loss": 0.7885,
+ "step": 419
+ },
+ {
+ "epoch": 1.5384615384615383,
+ "grad_norm": 21.400882720947266,
+ "learning_rate": 5.6434676434676436e-05,
+ "loss": 0.942,
+ "step": 420
+ },
+ {
+ "epoch": 1.542124542124542,
+ "grad_norm": 30.14664649963379,
+ "learning_rate": 5.6410256410256414e-05,
+ "loss": 0.7675,
+ "step": 421
+ },
+ {
+ "epoch": 1.5457875457875456,
+ "grad_norm": 33.25088882446289,
+ "learning_rate": 5.6385836385836386e-05,
+ "loss": 1.1349,
+ "step": 422
+ },
+ {
+ "epoch": 1.5494505494505495,
+ "grad_norm": 22.923208236694336,
+ "learning_rate": 5.636141636141636e-05,
+ "loss": 0.7145,
+ "step": 423
+ },
+ {
+ "epoch": 1.5531135531135531,
+ "grad_norm": 20.00519371032715,
+ "learning_rate": 5.6336996336996336e-05,
+ "loss": 0.5107,
+ "step": 424
+ },
+ {
+ "epoch": 1.5567765567765568,
+ "grad_norm": 21.95383071899414,
+ "learning_rate": 5.6312576312576314e-05,
+ "loss": 0.7836,
+ "step": 425
+ },
+ {
+ "epoch": 1.5604395604395604,
+ "grad_norm": 27.24031639099121,
+ "learning_rate": 5.6288156288156286e-05,
+ "loss": 0.4955,
+ "step": 426
+ },
+ {
+ "epoch": 1.564102564102564,
+ "grad_norm": 45.48428726196289,
+ "learning_rate": 5.6263736263736264e-05,
+ "loss": 1.016,
+ "step": 427
+ },
+ {
+ "epoch": 1.5677655677655677,
+ "grad_norm": 20.055965423583984,
+ "learning_rate": 5.623931623931624e-05,
+ "loss": 0.325,
+ "step": 428
+ },
+ {
+ "epoch": 1.5714285714285714,
+ "grad_norm": 22.020767211914062,
+ "learning_rate": 5.6214896214896215e-05,
+ "loss": 0.45,
+ "step": 429
+ },
+ {
+ "epoch": 1.575091575091575,
+ "grad_norm": 32.608741760253906,
+ "learning_rate": 5.619047619047619e-05,
+ "loss": 0.6561,
+ "step": 430
+ },
+ {
+ "epoch": 1.578754578754579,
+ "grad_norm": 38.14396667480469,
+ "learning_rate": 5.616605616605617e-05,
+ "loss": 0.6387,
+ "step": 431
+ },
+ {
+ "epoch": 1.5824175824175826,
+ "grad_norm": 26.266948699951172,
+ "learning_rate": 5.614163614163614e-05,
+ "loss": 0.5593,
+ "step": 432
+ },
+ {
+ "epoch": 1.5860805860805862,
+ "grad_norm": 16.37360954284668,
+ "learning_rate": 5.611721611721612e-05,
+ "loss": 0.1591,
+ "step": 433
+ },
+ {
+ "epoch": 1.5897435897435899,
+ "grad_norm": 21.9448299407959,
+ "learning_rate": 5.60927960927961e-05,
+ "loss": 0.2129,
+ "step": 434
+ },
+ {
+ "epoch": 1.5934065934065935,
+ "grad_norm": 30.096052169799805,
+ "learning_rate": 5.6068376068376065e-05,
+ "loss": 0.3384,
+ "step": 435
+ },
+ {
+ "epoch": 1.5970695970695972,
+ "grad_norm": 40.15864181518555,
+ "learning_rate": 5.604395604395604e-05,
+ "loss": 0.5181,
+ "step": 436
+ },
+ {
+ "epoch": 1.6007326007326008,
+ "grad_norm": 63.40933609008789,
+ "learning_rate": 5.601953601953602e-05,
+ "loss": 0.8834,
+ "step": 437
+ },
+ {
+ "epoch": 1.6043956043956045,
+ "grad_norm": 40.0787353515625,
+ "learning_rate": 5.5995115995115993e-05,
+ "loss": 0.437,
+ "step": 438
+ },
+ {
+ "epoch": 1.6080586080586081,
+ "grad_norm": 40.136863708496094,
+ "learning_rate": 5.597069597069597e-05,
+ "loss": 0.4834,
+ "step": 439
+ },
+ {
+ "epoch": 1.6117216117216118,
+ "grad_norm": 27.898317337036133,
+ "learning_rate": 5.594627594627595e-05,
+ "loss": 0.4862,
+ "step": 440
+ },
+ {
+ "epoch": 1.6153846153846154,
+ "grad_norm": 31.5762882232666,
+ "learning_rate": 5.592185592185592e-05,
+ "loss": 0.1878,
+ "step": 441
+ },
+ {
+ "epoch": 1.619047619047619,
+ "grad_norm": 88.90093994140625,
+ "learning_rate": 5.58974358974359e-05,
+ "loss": 1.3343,
+ "step": 442
+ },
+ {
+ "epoch": 1.6227106227106227,
+ "grad_norm": 57.7340202331543,
+ "learning_rate": 5.587301587301587e-05,
+ "loss": 0.3032,
+ "step": 443
+ },
+ {
+ "epoch": 1.6263736263736264,
+ "grad_norm": 57.28425979614258,
+ "learning_rate": 5.584859584859585e-05,
+ "loss": 1.3972,
+ "step": 444
+ },
+ {
+ "epoch": 1.63003663003663,
+ "grad_norm": 39.866302490234375,
+ "learning_rate": 5.582417582417583e-05,
+ "loss": 0.4026,
+ "step": 445
+ },
+ {
+ "epoch": 1.6336996336996337,
+ "grad_norm": 41.72932815551758,
+ "learning_rate": 5.57997557997558e-05,
+ "loss": 0.5407,
+ "step": 446
+ },
+ {
+ "epoch": 1.6373626373626373,
+ "grad_norm": 60.77634811401367,
+ "learning_rate": 5.577533577533578e-05,
+ "loss": 0.8581,
+ "step": 447
+ },
+ {
+ "epoch": 1.641025641025641,
+ "grad_norm": 28.382030487060547,
+ "learning_rate": 5.575091575091575e-05,
+ "loss": 0.3759,
+ "step": 448
+ },
+ {
+ "epoch": 1.6446886446886446,
+ "grad_norm": 62.1085205078125,
+ "learning_rate": 5.572649572649572e-05,
+ "loss": 1.0749,
+ "step": 449
+ },
+ {
+ "epoch": 1.6483516483516483,
+ "grad_norm": 41.8302001953125,
+ "learning_rate": 5.57020757020757e-05,
+ "loss": 0.5884,
+ "step": 450
+ },
+ {
+ "epoch": 1.652014652014652,
+ "grad_norm": 24.128931045532227,
+ "learning_rate": 5.567765567765568e-05,
+ "loss": 0.6113,
+ "step": 451
+ },
+ {
+ "epoch": 1.6556776556776556,
+ "grad_norm": 19.634384155273438,
+ "learning_rate": 5.565323565323565e-05,
+ "loss": 0.3902,
+ "step": 452
+ },
+ {
+ "epoch": 1.6593406593406592,
+ "grad_norm": 18.17875099182129,
+ "learning_rate": 5.562881562881563e-05,
+ "loss": 0.3137,
+ "step": 453
+ },
+ {
+ "epoch": 1.6630036630036629,
+ "grad_norm": 39.68446731567383,
+ "learning_rate": 5.560439560439561e-05,
+ "loss": 0.7587,
+ "step": 454
+ },
+ {
+ "epoch": 1.6666666666666665,
+ "grad_norm": 29.387836456298828,
+ "learning_rate": 5.557997557997558e-05,
+ "loss": 0.6397,
+ "step": 455
+ },
+ {
+ "epoch": 1.6703296703296702,
+ "grad_norm": 19.08424949645996,
+ "learning_rate": 5.555555555555556e-05,
+ "loss": 0.2484,
+ "step": 456
+ },
+ {
+ "epoch": 1.673992673992674,
+ "grad_norm": 36.07701873779297,
+ "learning_rate": 5.553113553113554e-05,
+ "loss": 0.8587,
+ "step": 457
+ },
+ {
+ "epoch": 1.6776556776556777,
+ "grad_norm": 52.062339782714844,
+ "learning_rate": 5.550671550671551e-05,
+ "loss": 1.6675,
+ "step": 458
+ },
+ {
+ "epoch": 1.6813186813186813,
+ "grad_norm": 45.415687561035156,
+ "learning_rate": 5.548229548229549e-05,
+ "loss": 1.653,
+ "step": 459
+ },
+ {
+ "epoch": 1.684981684981685,
+ "grad_norm": 31.457420349121094,
+ "learning_rate": 5.5457875457875465e-05,
+ "loss": 0.4578,
+ "step": 460
+ },
+ {
+ "epoch": 1.6886446886446886,
+ "grad_norm": 33.14665603637695,
+ "learning_rate": 5.543345543345543e-05,
+ "loss": 1.3327,
+ "step": 461
+ },
+ {
+ "epoch": 1.6923076923076923,
+ "grad_norm": 25.720529556274414,
+ "learning_rate": 5.540903540903541e-05,
+ "loss": 0.5,
+ "step": 462
+ },
+ {
+ "epoch": 1.695970695970696,
+ "grad_norm": 23.71514129638672,
+ "learning_rate": 5.538461538461539e-05,
+ "loss": 0.434,
+ "step": 463
+ },
+ {
+ "epoch": 1.6996336996336996,
+ "grad_norm": 45.231746673583984,
+ "learning_rate": 5.536019536019536e-05,
+ "loss": 0.9448,
+ "step": 464
+ },
+ {
+ "epoch": 1.7032967032967035,
+ "grad_norm": 17.44647789001465,
+ "learning_rate": 5.533577533577534e-05,
+ "loss": 0.3183,
+ "step": 465
+ },
+ {
+ "epoch": 1.7069597069597071,
+ "grad_norm": 18.627901077270508,
+ "learning_rate": 5.531135531135531e-05,
+ "loss": 0.4137,
+ "step": 466
+ },
+ {
+ "epoch": 1.7106227106227108,
+ "grad_norm": 45.57220458984375,
+ "learning_rate": 5.528693528693529e-05,
+ "loss": 1.0096,
+ "step": 467
+ },
+ {
+ "epoch": 1.7142857142857144,
+ "grad_norm": 27.329822540283203,
+ "learning_rate": 5.5262515262515266e-05,
+ "loss": 0.5416,
+ "step": 468
+ },
+ {
+ "epoch": 1.717948717948718,
+ "grad_norm": 46.70027160644531,
+ "learning_rate": 5.523809523809524e-05,
+ "loss": 0.983,
+ "step": 469
+ },
+ {
+ "epoch": 1.7216117216117217,
+ "grad_norm": 32.47868728637695,
+ "learning_rate": 5.5213675213675216e-05,
+ "loss": 1.5687,
+ "step": 470
+ },
+ {
+ "epoch": 1.7252747252747254,
+ "grad_norm": 16.49342155456543,
+ "learning_rate": 5.5189255189255194e-05,
+ "loss": 0.3101,
+ "step": 471
+ },
+ {
+ "epoch": 1.728937728937729,
+ "grad_norm": 26.58381462097168,
+ "learning_rate": 5.5164835164835166e-05,
+ "loss": 0.7027,
+ "step": 472
+ },
+ {
+ "epoch": 1.7326007326007327,
+ "grad_norm": 17.435213088989258,
+ "learning_rate": 5.5140415140415144e-05,
+ "loss": 0.3958,
+ "step": 473
+ },
+ {
+ "epoch": 1.7362637362637363,
+ "grad_norm": 19.37874412536621,
+ "learning_rate": 5.5115995115995116e-05,
+ "loss": 0.3979,
+ "step": 474
+ },
+ {
+ "epoch": 1.73992673992674,
+ "grad_norm": 16.509248733520508,
+ "learning_rate": 5.509157509157509e-05,
+ "loss": 0.5121,
+ "step": 475
+ },
+ {
+ "epoch": 1.7435897435897436,
+ "grad_norm": 9.653852462768555,
+ "learning_rate": 5.5067155067155066e-05,
+ "loss": 0.1386,
+ "step": 476
+ },
+ {
+ "epoch": 1.7472527472527473,
+ "grad_norm": 26.486963272094727,
+ "learning_rate": 5.5042735042735045e-05,
+ "loss": 1.0307,
+ "step": 477
+ },
+ {
+ "epoch": 1.750915750915751,
+ "grad_norm": 17.766828536987305,
+ "learning_rate": 5.5018315018315016e-05,
+ "loss": 0.278,
+ "step": 478
+ },
+ {
+ "epoch": 1.7545787545787546,
+ "grad_norm": 12.930633544921875,
+ "learning_rate": 5.4993894993894995e-05,
+ "loss": 0.1487,
+ "step": 479
+ },
+ {
+ "epoch": 1.7582417582417582,
+ "grad_norm": 44.64267349243164,
+ "learning_rate": 5.496947496947497e-05,
+ "loss": 0.7036,
+ "step": 480
+ },
+ {
+ "epoch": 1.7619047619047619,
+ "grad_norm": 17.474651336669922,
+ "learning_rate": 5.4945054945054945e-05,
+ "loss": 0.1666,
+ "step": 481
+ },
+ {
+ "epoch": 1.7655677655677655,
+ "grad_norm": 48.3519401550293,
+ "learning_rate": 5.4920634920634923e-05,
+ "loss": 0.6157,
+ "step": 482
+ },
+ {
+ "epoch": 1.7692307692307692,
+ "grad_norm": 18.429521560668945,
+ "learning_rate": 5.48962148962149e-05,
+ "loss": 0.2588,
+ "step": 483
+ },
+ {
+ "epoch": 1.7728937728937728,
+ "grad_norm": 66.73760986328125,
+ "learning_rate": 5.4871794871794874e-05,
+ "loss": 0.654,
+ "step": 484
+ },
+ {
+ "epoch": 1.7765567765567765,
+ "grad_norm": 53.831539154052734,
+ "learning_rate": 5.484737484737485e-05,
+ "loss": 0.7538,
+ "step": 485
+ },
+ {
+ "epoch": 1.7802197802197801,
+ "grad_norm": 52.023895263671875,
+ "learning_rate": 5.482295482295483e-05,
+ "loss": 1.6623,
+ "step": 486
+ },
+ {
+ "epoch": 1.7838827838827838,
+ "grad_norm": 38.4475212097168,
+ "learning_rate": 5.4798534798534795e-05,
+ "loss": 0.5079,
+ "step": 487
+ },
+ {
+ "epoch": 1.7875457875457874,
+ "grad_norm": 25.642650604248047,
+ "learning_rate": 5.4774114774114774e-05,
+ "loss": 0.3825,
+ "step": 488
+ },
+ {
+ "epoch": 1.791208791208791,
+ "grad_norm": 57.916900634765625,
+ "learning_rate": 5.474969474969475e-05,
+ "loss": 0.9583,
+ "step": 489
+ },
+ {
+ "epoch": 1.7948717948717947,
+ "grad_norm": 39.23340606689453,
+ "learning_rate": 5.4725274725274724e-05,
+ "loss": 0.4724,
+ "step": 490
+ },
+ {
+ "epoch": 1.7985347985347986,
+ "grad_norm": 24.188661575317383,
+ "learning_rate": 5.47008547008547e-05,
+ "loss": 0.4471,
+ "step": 491
+ },
+ {
+ "epoch": 1.8021978021978022,
+ "grad_norm": 68.73822021484375,
+ "learning_rate": 5.4676434676434674e-05,
+ "loss": 0.6618,
+ "step": 492
+ },
+ {
+ "epoch": 1.8058608058608059,
+ "grad_norm": 26.382184982299805,
+ "learning_rate": 5.465201465201465e-05,
+ "loss": 0.5835,
+ "step": 493
+ },
+ {
+ "epoch": 1.8095238095238095,
+ "grad_norm": 31.758886337280273,
+ "learning_rate": 5.462759462759463e-05,
+ "loss": 0.622,
+ "step": 494
+ },
+ {
+ "epoch": 1.8131868131868132,
+ "grad_norm": 26.657405853271484,
+ "learning_rate": 5.46031746031746e-05,
+ "loss": 0.6003,
+ "step": 495
+ },
+ {
+ "epoch": 1.8168498168498168,
+ "grad_norm": 31.248491287231445,
+ "learning_rate": 5.457875457875458e-05,
+ "loss": 0.4929,
+ "step": 496
+ },
+ {
+ "epoch": 1.8205128205128205,
+ "grad_norm": 53.82766342163086,
+ "learning_rate": 5.455433455433456e-05,
+ "loss": 2.0716,
+ "step": 497
+ },
+ {
+ "epoch": 1.8241758241758241,
+ "grad_norm": 46.39777374267578,
+ "learning_rate": 5.452991452991453e-05,
+ "loss": 1.6767,
+ "step": 498
+ },
+ {
+ "epoch": 1.8278388278388278,
+ "grad_norm": 39.58620071411133,
+ "learning_rate": 5.450549450549451e-05,
+ "loss": 0.8274,
+ "step": 499
+ },
+ {
+ "epoch": 1.8315018315018317,
+ "grad_norm": 29.395286560058594,
+ "learning_rate": 5.448107448107448e-05,
+ "loss": 1.1441,
+ "step": 500
+ },
+ {
+ "epoch": 1.8351648351648353,
+ "grad_norm": 26.250751495361328,
+ "learning_rate": 5.445665445665445e-05,
+ "loss": 0.7496,
+ "step": 501
+ },
+ {
+ "epoch": 1.838827838827839,
+ "grad_norm": 19.820999145507812,
+ "learning_rate": 5.443223443223443e-05,
+ "loss": 0.4367,
+ "step": 502
+ },
+ {
+ "epoch": 1.8424908424908426,
+ "grad_norm": 25.09316062927246,
+ "learning_rate": 5.440781440781441e-05,
+ "loss": 0.8584,
+ "step": 503
+ },
+ {
+ "epoch": 1.8461538461538463,
+ "grad_norm": 17.808509826660156,
+ "learning_rate": 5.438339438339438e-05,
+ "loss": 0.3869,
+ "step": 504
+ },
+ {
+ "epoch": 1.84981684981685,
+ "grad_norm": 28.342119216918945,
+ "learning_rate": 5.435897435897436e-05,
+ "loss": 0.8881,
+ "step": 505
+ },
+ {
+ "epoch": 1.8534798534798536,
+ "grad_norm": 33.80287551879883,
+ "learning_rate": 5.433455433455434e-05,
+ "loss": 1.2911,
+ "step": 506
+ },
+ {
+ "epoch": 1.8571428571428572,
+ "grad_norm": 55.428138732910156,
+ "learning_rate": 5.431013431013431e-05,
+ "loss": 0.8934,
+ "step": 507
+ },
+ {
+ "epoch": 1.8608058608058609,
+ "grad_norm": 27.962610244750977,
+ "learning_rate": 5.428571428571429e-05,
+ "loss": 0.662,
+ "step": 508
+ },
+ {
+ "epoch": 1.8644688644688645,
+ "grad_norm": 62.84252166748047,
+ "learning_rate": 5.426129426129427e-05,
+ "loss": 1.9216,
+ "step": 509
+ },
+ {
+ "epoch": 1.8681318681318682,
+ "grad_norm": 24.26439666748047,
+ "learning_rate": 5.423687423687424e-05,
+ "loss": 0.2164,
+ "step": 510
+ },
+ {
+ "epoch": 1.8717948717948718,
+ "grad_norm": 50.95674133300781,
+ "learning_rate": 5.421245421245422e-05,
+ "loss": 0.7023,
+ "step": 511
+ },
+ {
+ "epoch": 1.8754578754578755,
+ "grad_norm": 41.17847442626953,
+ "learning_rate": 5.418803418803419e-05,
+ "loss": 1.1081,
+ "step": 512
+ },
+ {
+ "epoch": 1.879120879120879,
+ "grad_norm": 28.701988220214844,
+ "learning_rate": 5.416361416361416e-05,
+ "loss": 0.6519,
+ "step": 513
+ },
+ {
+ "epoch": 1.8827838827838828,
+ "grad_norm": 48.42552947998047,
+ "learning_rate": 5.413919413919414e-05,
+ "loss": 1.5215,
+ "step": 514
+ },
+ {
+ "epoch": 1.8864468864468864,
+ "grad_norm": 19.71268653869629,
+ "learning_rate": 5.411477411477412e-05,
+ "loss": 0.4731,
+ "step": 515
+ },
+ {
+ "epoch": 1.89010989010989,
+ "grad_norm": 68.88224792480469,
+ "learning_rate": 5.409035409035409e-05,
+ "loss": 3.0071,
+ "step": 516
+ },
+ {
+ "epoch": 1.8937728937728937,
+ "grad_norm": 34.33188247680664,
+ "learning_rate": 5.406593406593407e-05,
+ "loss": 0.7014,
+ "step": 517
+ },
+ {
+ "epoch": 1.8974358974358974,
+ "grad_norm": 18.214942932128906,
+ "learning_rate": 5.404151404151404e-05,
+ "loss": 0.2362,
+ "step": 518
+ },
+ {
+ "epoch": 1.901098901098901,
+ "grad_norm": 31.553678512573242,
+ "learning_rate": 5.401709401709402e-05,
+ "loss": 0.5839,
+ "step": 519
+ },
+ {
+ "epoch": 1.9047619047619047,
+ "grad_norm": 15.681426048278809,
+ "learning_rate": 5.3992673992673996e-05,
+ "loss": 0.6039,
+ "step": 520
+ },
+ {
+ "epoch": 1.9084249084249083,
+ "grad_norm": 18.462688446044922,
+ "learning_rate": 5.396825396825397e-05,
+ "loss": 0.5773,
+ "step": 521
+ },
+ {
+ "epoch": 1.912087912087912,
+ "grad_norm": 10.23849105834961,
+ "learning_rate": 5.3943833943833946e-05,
+ "loss": 0.3801,
+ "step": 522
+ },
+ {
+ "epoch": 1.9157509157509156,
+ "grad_norm": 35.680973052978516,
+ "learning_rate": 5.3919413919413925e-05,
+ "loss": 1.2559,
+ "step": 523
+ },
+ {
+ "epoch": 1.9194139194139193,
+ "grad_norm": 23.97362518310547,
+ "learning_rate": 5.3894993894993897e-05,
+ "loss": 0.4112,
+ "step": 524
+ },
+ {
+ "epoch": 1.9230769230769231,
+ "grad_norm": 25.785356521606445,
+ "learning_rate": 5.387057387057387e-05,
+ "loss": 0.8993,
+ "step": 525
+ },
+ {
+ "epoch": 1.9267399267399268,
+ "grad_norm": 25.246868133544922,
+ "learning_rate": 5.384615384615385e-05,
+ "loss": 0.6534,
+ "step": 526
+ },
+ {
+ "epoch": 1.9304029304029304,
+ "grad_norm": 29.850788116455078,
+ "learning_rate": 5.382173382173382e-05,
+ "loss": 0.52,
+ "step": 527
+ },
+ {
+ "epoch": 1.934065934065934,
+ "grad_norm": 20.702608108520508,
+ "learning_rate": 5.37973137973138e-05,
+ "loss": 0.4093,
+ "step": 528
+ },
+ {
+ "epoch": 1.9377289377289377,
+ "grad_norm": 36.39994812011719,
+ "learning_rate": 5.3772893772893775e-05,
+ "loss": 1.275,
+ "step": 529
+ },
+ {
+ "epoch": 1.9413919413919414,
+ "grad_norm": 27.56822395324707,
+ "learning_rate": 5.374847374847375e-05,
+ "loss": 0.6773,
+ "step": 530
+ },
+ {
+ "epoch": 1.945054945054945,
+ "grad_norm": 26.07769012451172,
+ "learning_rate": 5.3724053724053725e-05,
+ "loss": 0.5373,
+ "step": 531
+ },
+ {
+ "epoch": 1.9487179487179487,
+ "grad_norm": 48.47615051269531,
+ "learning_rate": 5.3699633699633704e-05,
+ "loss": 1.1931,
+ "step": 532
+ },
+ {
+ "epoch": 1.9523809523809523,
+ "grad_norm": 24.416805267333984,
+ "learning_rate": 5.3675213675213675e-05,
+ "loss": 0.4523,
+ "step": 533
+ },
+ {
+ "epoch": 1.9560439560439562,
+ "grad_norm": 56.8088264465332,
+ "learning_rate": 5.3650793650793654e-05,
+ "loss": 1.8992,
+ "step": 534
+ },
+ {
+ "epoch": 1.9597069597069599,
+ "grad_norm": 36.805912017822266,
+ "learning_rate": 5.362637362637363e-05,
+ "loss": 1.0743,
+ "step": 535
+ },
+ {
+ "epoch": 1.9633699633699635,
+ "grad_norm": 17.375244140625,
+ "learning_rate": 5.3601953601953604e-05,
+ "loss": 0.3546,
+ "step": 536
+ },
+ {
+ "epoch": 1.9670329670329672,
+ "grad_norm": 35.297767639160156,
+ "learning_rate": 5.357753357753358e-05,
+ "loss": 1.4903,
+ "step": 537
+ },
+ {
+ "epoch": 1.9706959706959708,
+ "grad_norm": 38.64927673339844,
+ "learning_rate": 5.3553113553113554e-05,
+ "loss": 0.9346,
+ "step": 538
+ },
+ {
+ "epoch": 1.9743589743589745,
+ "grad_norm": 23.494552612304688,
+ "learning_rate": 5.3528693528693526e-05,
+ "loss": 0.3677,
+ "step": 539
+ },
+ {
+ "epoch": 1.978021978021978,
+ "grad_norm": 21.8272647857666,
+ "learning_rate": 5.3504273504273504e-05,
+ "loss": 0.591,
+ "step": 540
+ },
+ {
+ "epoch": 1.9816849816849818,
+ "grad_norm": 15.60590934753418,
+ "learning_rate": 5.347985347985348e-05,
+ "loss": 0.3129,
+ "step": 541
+ },
+ {
+ "epoch": 1.9853479853479854,
+ "grad_norm": 23.846555709838867,
+ "learning_rate": 5.3455433455433454e-05,
+ "loss": 0.6108,
+ "step": 542
+ },
+ {
+ "epoch": 1.989010989010989,
+ "grad_norm": 21.743024826049805,
+ "learning_rate": 5.343101343101343e-05,
+ "loss": 1.0541,
+ "step": 543
+ },
+ {
+ "epoch": 1.9926739926739927,
+ "grad_norm": 29.806121826171875,
+ "learning_rate": 5.3406593406593405e-05,
+ "loss": 0.6088,
+ "step": 544
+ },
+ {
+ "epoch": 1.9963369963369964,
+ "grad_norm": 26.778568267822266,
+ "learning_rate": 5.338217338217338e-05,
+ "loss": 0.5842,
+ "step": 545
+ },
+ {
+ "epoch": 2.0,
+ "grad_norm": 23.356237411499023,
+ "learning_rate": 5.335775335775336e-05,
+ "loss": 0.4591,
+ "step": 546
+ },
+ {
+ "epoch": 2.0036630036630036,
+ "grad_norm": 17.303443908691406,
+ "learning_rate": 5.333333333333333e-05,
+ "loss": 0.3432,
+ "step": 547
+ },
+ {
+ "epoch": 2.0073260073260073,
+ "grad_norm": 27.082172393798828,
+ "learning_rate": 5.330891330891331e-05,
+ "loss": 0.5156,
+ "step": 548
+ },
+ {
+ "epoch": 2.010989010989011,
+ "grad_norm": 26.520530700683594,
+ "learning_rate": 5.328449328449329e-05,
+ "loss": 0.3989,
+ "step": 549
+ },
+ {
+ "epoch": 2.0146520146520146,
+ "grad_norm": 23.737272262573242,
+ "learning_rate": 5.326007326007326e-05,
+ "loss": 0.5484,
+ "step": 550
+ },
+ {
+ "epoch": 2.0183150183150182,
+ "grad_norm": 24.222341537475586,
+ "learning_rate": 5.3235653235653233e-05,
+ "loss": 0.5365,
+ "step": 551
+ },
+ {
+ "epoch": 2.021978021978022,
+ "grad_norm": 29.081924438476562,
+ "learning_rate": 5.321123321123321e-05,
+ "loss": 0.6694,
+ "step": 552
+ },
+ {
+ "epoch": 2.0256410256410255,
+ "grad_norm": 32.419551849365234,
+ "learning_rate": 5.3186813186813184e-05,
+ "loss": 0.7003,
+ "step": 553
+ },
+ {
+ "epoch": 2.029304029304029,
+ "grad_norm": 42.403709411621094,
+ "learning_rate": 5.316239316239316e-05,
+ "loss": 1.5474,
+ "step": 554
+ },
+ {
+ "epoch": 2.032967032967033,
+ "grad_norm": 17.615140914916992,
+ "learning_rate": 5.313797313797314e-05,
+ "loss": 0.588,
+ "step": 555
+ },
+ {
+ "epoch": 2.0366300366300365,
+ "grad_norm": 14.864067077636719,
+ "learning_rate": 5.311355311355311e-05,
+ "loss": 0.1613,
+ "step": 556
+ },
+ {
+ "epoch": 2.04029304029304,
+ "grad_norm": 20.189815521240234,
+ "learning_rate": 5.308913308913309e-05,
+ "loss": 0.4281,
+ "step": 557
+ },
+ {
+ "epoch": 2.043956043956044,
+ "grad_norm": 28.350017547607422,
+ "learning_rate": 5.306471306471307e-05,
+ "loss": 0.6614,
+ "step": 558
+ },
+ {
+ "epoch": 2.0476190476190474,
+ "grad_norm": 19.987825393676758,
+ "learning_rate": 5.304029304029304e-05,
+ "loss": 0.6906,
+ "step": 559
+ },
+ {
+ "epoch": 2.051282051282051,
+ "grad_norm": 18.6667537689209,
+ "learning_rate": 5.301587301587302e-05,
+ "loss": 0.387,
+ "step": 560
+ },
+ {
+ "epoch": 2.0549450549450547,
+ "grad_norm": 20.930652618408203,
+ "learning_rate": 5.2991452991453e-05,
+ "loss": 0.7157,
+ "step": 561
+ },
+ {
+ "epoch": 2.0586080586080584,
+ "grad_norm": 22.05647087097168,
+ "learning_rate": 5.296703296703297e-05,
+ "loss": 0.3256,
+ "step": 562
+ },
+ {
+ "epoch": 2.062271062271062,
+ "grad_norm": 32.66161346435547,
+ "learning_rate": 5.294261294261295e-05,
+ "loss": 1.3013,
+ "step": 563
+ },
+ {
+ "epoch": 2.065934065934066,
+ "grad_norm": 37.43238067626953,
+ "learning_rate": 5.291819291819292e-05,
+ "loss": 0.186,
+ "step": 564
+ },
+ {
+ "epoch": 2.06959706959707,
+ "grad_norm": 32.39999008178711,
+ "learning_rate": 5.289377289377289e-05,
+ "loss": 0.8047,
+ "step": 565
+ },
+ {
+ "epoch": 2.0732600732600734,
+ "grad_norm": 29.727481842041016,
+ "learning_rate": 5.286935286935287e-05,
+ "loss": 0.662,
+ "step": 566
+ },
+ {
+ "epoch": 2.076923076923077,
+ "grad_norm": 16.536264419555664,
+ "learning_rate": 5.284493284493285e-05,
+ "loss": 0.4,
+ "step": 567
+ },
+ {
+ "epoch": 2.0805860805860807,
+ "grad_norm": 23.41500473022461,
+ "learning_rate": 5.282051282051282e-05,
+ "loss": 0.4945,
+ "step": 568
+ },
+ {
+ "epoch": 2.0842490842490844,
+ "grad_norm": 48.842864990234375,
+ "learning_rate": 5.27960927960928e-05,
+ "loss": 0.7584,
+ "step": 569
+ },
+ {
+ "epoch": 2.087912087912088,
+ "grad_norm": 60.06027603149414,
+ "learning_rate": 5.277167277167277e-05,
+ "loss": 0.7179,
+ "step": 570
+ },
+ {
+ "epoch": 2.0915750915750917,
+ "grad_norm": 59.2591552734375,
+ "learning_rate": 5.274725274725275e-05,
+ "loss": 0.4883,
+ "step": 571
+ },
+ {
+ "epoch": 2.0952380952380953,
+ "grad_norm": 14.527932167053223,
+ "learning_rate": 5.272283272283273e-05,
+ "loss": 0.2811,
+ "step": 572
+ },
+ {
+ "epoch": 2.098901098901099,
+ "grad_norm": 16.2915096282959,
+ "learning_rate": 5.26984126984127e-05,
+ "loss": 0.2524,
+ "step": 573
+ },
+ {
+ "epoch": 2.1025641025641026,
+ "grad_norm": 28.938081741333008,
+ "learning_rate": 5.267399267399268e-05,
+ "loss": 0.5138,
+ "step": 574
+ },
+ {
+ "epoch": 2.1062271062271063,
+ "grad_norm": 27.541440963745117,
+ "learning_rate": 5.2649572649572655e-05,
+ "loss": 0.278,
+ "step": 575
+ },
+ {
+ "epoch": 2.10989010989011,
+ "grad_norm": 23.179025650024414,
+ "learning_rate": 5.262515262515263e-05,
+ "loss": 0.1881,
+ "step": 576
+ },
+ {
+ "epoch": 2.1135531135531136,
+ "grad_norm": 42.55375671386719,
+ "learning_rate": 5.26007326007326e-05,
+ "loss": 0.7882,
+ "step": 577
+ },
+ {
+ "epoch": 2.1172161172161172,
+ "grad_norm": 8.902749061584473,
+ "learning_rate": 5.257631257631258e-05,
+ "loss": 0.0611,
+ "step": 578
+ },
+ {
+ "epoch": 2.120879120879121,
+ "grad_norm": 19.483346939086914,
+ "learning_rate": 5.255189255189255e-05,
+ "loss": 0.0978,
+ "step": 579
+ },
+ {
+ "epoch": 2.1245421245421245,
+ "grad_norm": 13.898221969604492,
+ "learning_rate": 5.252747252747253e-05,
+ "loss": 0.0797,
+ "step": 580
+ },
+ {
+ "epoch": 2.128205128205128,
+ "grad_norm": 53.42538833618164,
+ "learning_rate": 5.2503052503052506e-05,
+ "loss": 0.9066,
+ "step": 581
+ },
+ {
+ "epoch": 2.131868131868132,
+ "grad_norm": 38.467891693115234,
+ "learning_rate": 5.247863247863248e-05,
+ "loss": 0.3272,
+ "step": 582
+ },
+ {
+ "epoch": 2.1355311355311355,
+ "grad_norm": 26.421035766601562,
+ "learning_rate": 5.2454212454212456e-05,
+ "loss": 0.6537,
+ "step": 583
+ },
+ {
+ "epoch": 2.139194139194139,
+ "grad_norm": 32.80412292480469,
+ "learning_rate": 5.2429792429792434e-05,
+ "loss": 1.1225,
+ "step": 584
+ },
+ {
+ "epoch": 2.142857142857143,
+ "grad_norm": 26.87016487121582,
+ "learning_rate": 5.2405372405372406e-05,
+ "loss": 0.5749,
+ "step": 585
+ },
+ {
+ "epoch": 2.1465201465201464,
+ "grad_norm": 34.75699234008789,
+ "learning_rate": 5.2380952380952384e-05,
+ "loss": 0.6926,
+ "step": 586
+ },
+ {
+ "epoch": 2.15018315018315,
+ "grad_norm": 61.76310348510742,
+ "learning_rate": 5.235653235653236e-05,
+ "loss": 0.9029,
+ "step": 587
+ },
+ {
+ "epoch": 2.1538461538461537,
+ "grad_norm": 40.86505126953125,
+ "learning_rate": 5.2332112332112335e-05,
+ "loss": 0.5169,
+ "step": 588
+ },
+ {
+ "epoch": 2.1575091575091574,
+ "grad_norm": 16.05042839050293,
+ "learning_rate": 5.230769230769231e-05,
+ "loss": 0.5211,
+ "step": 589
+ },
+ {
+ "epoch": 2.161172161172161,
+ "grad_norm": 19.56302261352539,
+ "learning_rate": 5.2283272283272285e-05,
+ "loss": 0.5737,
+ "step": 590
+ },
+ {
+ "epoch": 2.1648351648351647,
+ "grad_norm": 22.311508178710938,
+ "learning_rate": 5.2258852258852256e-05,
+ "loss": 0.4223,
+ "step": 591
+ },
+ {
+ "epoch": 2.1684981684981683,
+ "grad_norm": 21.059213638305664,
+ "learning_rate": 5.2234432234432235e-05,
+ "loss": 0.2285,
+ "step": 592
+ },
+ {
+ "epoch": 2.172161172161172,
+ "grad_norm": 28.82351303100586,
+ "learning_rate": 5.221001221001221e-05,
+ "loss": 0.8438,
+ "step": 593
+ },
+ {
+ "epoch": 2.1758241758241756,
+ "grad_norm": 14.425333023071289,
+ "learning_rate": 5.2185592185592185e-05,
+ "loss": 0.1765,
+ "step": 594
+ },
+ {
+ "epoch": 2.1794871794871793,
+ "grad_norm": 16.967479705810547,
+ "learning_rate": 5.2161172161172163e-05,
+ "loss": 0.2465,
+ "step": 595
+ },
+ {
+ "epoch": 2.183150183150183,
+ "grad_norm": 40.79065704345703,
+ "learning_rate": 5.2136752136752135e-05,
+ "loss": 0.6077,
+ "step": 596
+ },
+ {
+ "epoch": 2.186813186813187,
+ "grad_norm": 22.434715270996094,
+ "learning_rate": 5.2112332112332114e-05,
+ "loss": 0.3748,
+ "step": 597
+ },
+ {
+ "epoch": 2.1904761904761907,
+ "grad_norm": 32.18471908569336,
+ "learning_rate": 5.208791208791209e-05,
+ "loss": 0.5163,
+ "step": 598
+ },
+ {
+ "epoch": 2.1941391941391943,
+ "grad_norm": 20.43740463256836,
+ "learning_rate": 5.2063492063492064e-05,
+ "loss": 0.4116,
+ "step": 599
+ },
+ {
+ "epoch": 2.197802197802198,
+ "grad_norm": 6.528069496154785,
+ "learning_rate": 5.203907203907204e-05,
+ "loss": 0.065,
+ "step": 600
+ },
+ {
+ "epoch": 2.2014652014652016,
+ "grad_norm": 35.0635871887207,
+ "learning_rate": 5.201465201465202e-05,
+ "loss": 1.2288,
+ "step": 601
+ },
+ {
+ "epoch": 2.2051282051282053,
+ "grad_norm": 23.499767303466797,
+ "learning_rate": 5.199023199023199e-05,
+ "loss": 0.49,
+ "step": 602
+ },
+ {
+ "epoch": 2.208791208791209,
+ "grad_norm": 20.234952926635742,
+ "learning_rate": 5.1965811965811964e-05,
+ "loss": 0.231,
+ "step": 603
+ },
+ {
+ "epoch": 2.2124542124542126,
+ "grad_norm": 9.268828392028809,
+ "learning_rate": 5.194139194139194e-05,
+ "loss": 0.0732,
+ "step": 604
+ },
+ {
+ "epoch": 2.2161172161172162,
+ "grad_norm": 52.60474395751953,
+ "learning_rate": 5.1916971916971914e-05,
+ "loss": 0.8766,
+ "step": 605
+ },
+ {
+ "epoch": 2.21978021978022,
+ "grad_norm": 41.86642074584961,
+ "learning_rate": 5.189255189255189e-05,
+ "loss": 0.4743,
+ "step": 606
+ },
+ {
+ "epoch": 2.2234432234432235,
+ "grad_norm": 30.304580688476562,
+ "learning_rate": 5.186813186813187e-05,
+ "loss": 0.4412,
+ "step": 607
+ },
+ {
+ "epoch": 2.227106227106227,
+ "grad_norm": 27.26057243347168,
+ "learning_rate": 5.184371184371184e-05,
+ "loss": 0.3496,
+ "step": 608
+ },
+ {
+ "epoch": 2.230769230769231,
+ "grad_norm": 40.55131149291992,
+ "learning_rate": 5.181929181929182e-05,
+ "loss": 0.7097,
+ "step": 609
+ },
+ {
+ "epoch": 2.2344322344322345,
+ "grad_norm": 61.97871017456055,
+ "learning_rate": 5.17948717948718e-05,
+ "loss": 1.3686,
+ "step": 610
+ },
+ {
+ "epoch": 2.238095238095238,
+ "grad_norm": 38.211700439453125,
+ "learning_rate": 5.177045177045177e-05,
+ "loss": 0.565,
+ "step": 611
+ },
+ {
+ "epoch": 2.241758241758242,
+ "grad_norm": 20.10716438293457,
+ "learning_rate": 5.174603174603175e-05,
+ "loss": 0.3468,
+ "step": 612
+ },
+ {
+ "epoch": 2.2454212454212454,
+ "grad_norm": 23.96891975402832,
+ "learning_rate": 5.172161172161173e-05,
+ "loss": 0.2295,
+ "step": 613
+ },
+ {
+ "epoch": 2.249084249084249,
+ "grad_norm": 10.14421272277832,
+ "learning_rate": 5.16971916971917e-05,
+ "loss": 0.0943,
+ "step": 614
+ },
+ {
+ "epoch": 2.2527472527472527,
+ "grad_norm": 15.786056518554688,
+ "learning_rate": 5.167277167277167e-05,
+ "loss": 0.1213,
+ "step": 615
+ },
+ {
+ "epoch": 2.2564102564102564,
+ "grad_norm": 20.907663345336914,
+ "learning_rate": 5.164835164835165e-05,
+ "loss": 0.235,
+ "step": 616
+ },
+ {
+ "epoch": 2.26007326007326,
+ "grad_norm": 32.149600982666016,
+ "learning_rate": 5.162393162393162e-05,
+ "loss": 0.4807,
+ "step": 617
+ },
+ {
+ "epoch": 2.2637362637362637,
+ "grad_norm": 33.965518951416016,
+ "learning_rate": 5.15995115995116e-05,
+ "loss": 0.4517,
+ "step": 618
+ },
+ {
+ "epoch": 2.2673992673992673,
+ "grad_norm": 49.98363494873047,
+ "learning_rate": 5.157509157509158e-05,
+ "loss": 0.6434,
+ "step": 619
+ },
+ {
+ "epoch": 2.271062271062271,
+ "grad_norm": 14.035831451416016,
+ "learning_rate": 5.155067155067155e-05,
+ "loss": 0.1117,
+ "step": 620
+ },
+ {
+ "epoch": 2.2747252747252746,
+ "grad_norm": 28.84484100341797,
+ "learning_rate": 5.152625152625153e-05,
+ "loss": 0.8002,
+ "step": 621
+ },
+ {
+ "epoch": 2.2783882783882783,
+ "grad_norm": 41.59181594848633,
+ "learning_rate": 5.15018315018315e-05,
+ "loss": 0.4465,
+ "step": 622
+ },
+ {
+ "epoch": 2.282051282051282,
+ "grad_norm": 33.10573196411133,
+ "learning_rate": 5.147741147741148e-05,
+ "loss": 0.5795,
+ "step": 623
+ },
+ {
+ "epoch": 2.2857142857142856,
+ "grad_norm": 34.79928970336914,
+ "learning_rate": 5.145299145299146e-05,
+ "loss": 0.3135,
+ "step": 624
+ },
+ {
+ "epoch": 2.2893772893772892,
+ "grad_norm": 18.095544815063477,
+ "learning_rate": 5.142857142857143e-05,
+ "loss": 0.0961,
+ "step": 625
+ },
+ {
+ "epoch": 2.293040293040293,
+ "grad_norm": 16.55453872680664,
+ "learning_rate": 5.140415140415141e-05,
+ "loss": 0.0868,
+ "step": 626
+ },
+ {
+ "epoch": 2.2967032967032965,
+ "grad_norm": 42.18946075439453,
+ "learning_rate": 5.1379731379731386e-05,
+ "loss": 0.8892,
+ "step": 627
+ },
+ {
+ "epoch": 2.3003663003663,
+ "grad_norm": 54.753448486328125,
+ "learning_rate": 5.135531135531135e-05,
+ "loss": 0.833,
+ "step": 628
+ },
+ {
+ "epoch": 2.304029304029304,
+ "grad_norm": 27.723228454589844,
+ "learning_rate": 5.133089133089133e-05,
+ "loss": 0.2744,
+ "step": 629
+ },
+ {
+ "epoch": 2.3076923076923075,
+ "grad_norm": 28.53034019470215,
+ "learning_rate": 5.130647130647131e-05,
+ "loss": 0.1696,
+ "step": 630
+ },
+ {
+ "epoch": 2.311355311355311,
+ "grad_norm": 65.4127426147461,
+ "learning_rate": 5.128205128205128e-05,
+ "loss": 0.9019,
+ "step": 631
+ },
+ {
+ "epoch": 2.315018315018315,
+ "grad_norm": 22.794870376586914,
+ "learning_rate": 5.125763125763126e-05,
+ "loss": 0.1987,
+ "step": 632
+ },
+ {
+ "epoch": 2.3186813186813184,
+ "grad_norm": 29.870113372802734,
+ "learning_rate": 5.1233211233211236e-05,
+ "loss": 0.4816,
+ "step": 633
+ },
+ {
+ "epoch": 2.3223443223443225,
+ "grad_norm": 38.91164779663086,
+ "learning_rate": 5.120879120879121e-05,
+ "loss": 0.7424,
+ "step": 634
+ },
+ {
+ "epoch": 2.326007326007326,
+ "grad_norm": 36.57811737060547,
+ "learning_rate": 5.1184371184371186e-05,
+ "loss": 1.1365,
+ "step": 635
+ },
+ {
+ "epoch": 2.32967032967033,
+ "grad_norm": 31.59128189086914,
+ "learning_rate": 5.1159951159951165e-05,
+ "loss": 0.6167,
+ "step": 636
+ },
+ {
+ "epoch": 2.3333333333333335,
+ "grad_norm": 25.956003189086914,
+ "learning_rate": 5.1135531135531136e-05,
+ "loss": 0.8808,
+ "step": 637
+ },
+ {
+ "epoch": 2.336996336996337,
+ "grad_norm": 38.18582534790039,
+ "learning_rate": 5.1111111111111115e-05,
+ "loss": 0.9417,
+ "step": 638
+ },
+ {
+ "epoch": 2.340659340659341,
+ "grad_norm": 27.436229705810547,
+ "learning_rate": 5.108669108669109e-05,
+ "loss": 0.7539,
+ "step": 639
+ },
+ {
+ "epoch": 2.3443223443223444,
+ "grad_norm": 40.86305618286133,
+ "learning_rate": 5.1062271062271065e-05,
+ "loss": 2.126,
+ "step": 640
+ },
+ {
+ "epoch": 2.347985347985348,
+ "grad_norm": 22.224748611450195,
+ "learning_rate": 5.103785103785104e-05,
+ "loss": 0.9958,
+ "step": 641
+ },
+ {
+ "epoch": 2.3516483516483517,
+ "grad_norm": 19.915552139282227,
+ "learning_rate": 5.1013431013431015e-05,
+ "loss": 1.1045,
+ "step": 642
+ },
+ {
+ "epoch": 2.3553113553113554,
+ "grad_norm": 17.045989990234375,
+ "learning_rate": 5.098901098901099e-05,
+ "loss": 0.8906,
+ "step": 643
+ },
+ {
+ "epoch": 2.358974358974359,
+ "grad_norm": 22.106670379638672,
+ "learning_rate": 5.0964590964590965e-05,
+ "loss": 0.9856,
+ "step": 644
+ },
+ {
+ "epoch": 2.3626373626373627,
+ "grad_norm": 17.583837509155273,
+ "learning_rate": 5.0940170940170944e-05,
+ "loss": 0.8328,
+ "step": 645
+ },
+ {
+ "epoch": 2.3663003663003663,
+ "grad_norm": 57.61167526245117,
+ "learning_rate": 5.0915750915750915e-05,
+ "loss": 0.578,
+ "step": 646
+ },
+ {
+ "epoch": 2.36996336996337,
+ "grad_norm": 13.941128730773926,
+ "learning_rate": 5.0891330891330894e-05,
+ "loss": 0.5892,
+ "step": 647
+ },
+ {
+ "epoch": 2.3736263736263736,
+ "grad_norm": 22.38715171813965,
+ "learning_rate": 5.0866910866910866e-05,
+ "loss": 0.7608,
+ "step": 648
+ },
+ {
+ "epoch": 2.3772893772893773,
+ "grad_norm": 22.42316436767578,
+ "learning_rate": 5.0842490842490844e-05,
+ "loss": 0.7923,
+ "step": 649
+ },
+ {
+ "epoch": 2.380952380952381,
+ "grad_norm": 32.75740432739258,
+ "learning_rate": 5.081807081807082e-05,
+ "loss": 1.0798,
+ "step": 650
+ },
+ {
+ "epoch": 2.3846153846153846,
+ "grad_norm": 19.295289993286133,
+ "learning_rate": 5.0793650793650794e-05,
+ "loss": 0.4898,
+ "step": 651
+ },
+ {
+ "epoch": 2.3882783882783882,
+ "grad_norm": 25.849227905273438,
+ "learning_rate": 5.076923076923077e-05,
+ "loss": 0.5557,
+ "step": 652
+ },
+ {
+ "epoch": 2.391941391941392,
+ "grad_norm": 21.321088790893555,
+ "learning_rate": 5.074481074481075e-05,
+ "loss": 0.2743,
+ "step": 653
+ },
+ {
+ "epoch": 2.3956043956043955,
+ "grad_norm": 28.795917510986328,
+ "learning_rate": 5.0720390720390716e-05,
+ "loss": 0.7039,
+ "step": 654
+ },
+ {
+ "epoch": 2.399267399267399,
+ "grad_norm": 19.86751937866211,
+ "learning_rate": 5.0695970695970694e-05,
+ "loss": 0.3155,
+ "step": 655
+ },
+ {
+ "epoch": 2.402930402930403,
+ "grad_norm": 33.3828010559082,
+ "learning_rate": 5.067155067155067e-05,
+ "loss": 1.0696,
+ "step": 656
+ },
+ {
+ "epoch": 2.4065934065934065,
+ "grad_norm": 37.38752746582031,
+ "learning_rate": 5.0647130647130645e-05,
+ "loss": 0.8123,
+ "step": 657
+ },
+ {
+ "epoch": 2.41025641025641,
+ "grad_norm": 29.22795867919922,
+ "learning_rate": 5.062271062271062e-05,
+ "loss": 0.9515,
+ "step": 658
+ },
+ {
+ "epoch": 2.413919413919414,
+ "grad_norm": 41.129981994628906,
+ "learning_rate": 5.05982905982906e-05,
+ "loss": 1.1329,
+ "step": 659
+ },
+ {
+ "epoch": 2.4175824175824174,
+ "grad_norm": 40.985042572021484,
+ "learning_rate": 5.057387057387057e-05,
+ "loss": 0.675,
+ "step": 660
+ },
+ {
+ "epoch": 2.421245421245421,
+ "grad_norm": 33.49393844604492,
+ "learning_rate": 5.054945054945055e-05,
+ "loss": 0.9679,
+ "step": 661
+ },
+ {
+ "epoch": 2.4249084249084247,
+ "grad_norm": 28.741533279418945,
+ "learning_rate": 5.052503052503053e-05,
+ "loss": 0.7928,
+ "step": 662
+ },
+ {
+ "epoch": 2.4285714285714284,
+ "grad_norm": 28.89700698852539,
+ "learning_rate": 5.05006105006105e-05,
+ "loss": 0.7594,
+ "step": 663
+ },
+ {
+ "epoch": 2.4322344322344325,
+ "grad_norm": 4.59797477722168,
+ "learning_rate": 5.047619047619048e-05,
+ "loss": 0.0584,
+ "step": 664
+ },
+ {
+ "epoch": 2.435897435897436,
+ "grad_norm": 29.852828979492188,
+ "learning_rate": 5.045177045177046e-05,
+ "loss": 0.614,
+ "step": 665
+ },
+ {
+ "epoch": 2.4395604395604398,
+ "grad_norm": 15.132670402526855,
+ "learning_rate": 5.042735042735043e-05,
+ "loss": 0.2353,
+ "step": 666
+ },
+ {
+ "epoch": 2.4432234432234434,
+ "grad_norm": 23.85403060913086,
+ "learning_rate": 5.04029304029304e-05,
+ "loss": 0.9065,
+ "step": 667
+ },
+ {
+ "epoch": 2.446886446886447,
+ "grad_norm": 12.384196281433105,
+ "learning_rate": 5.037851037851038e-05,
+ "loss": 0.2065,
+ "step": 668
+ },
+ {
+ "epoch": 2.4505494505494507,
+ "grad_norm": 18.347129821777344,
+ "learning_rate": 5.035409035409035e-05,
+ "loss": 0.647,
+ "step": 669
+ },
+ {
+ "epoch": 2.4542124542124544,
+ "grad_norm": 18.645936965942383,
+ "learning_rate": 5.032967032967033e-05,
+ "loss": 0.2072,
+ "step": 670
+ },
+ {
+ "epoch": 2.457875457875458,
+ "grad_norm": 9.493071556091309,
+ "learning_rate": 5.03052503052503e-05,
+ "loss": 0.1805,
+ "step": 671
+ },
+ {
+ "epoch": 2.4615384615384617,
+ "grad_norm": 18.552539825439453,
+ "learning_rate": 5.028083028083028e-05,
+ "loss": 0.4078,
+ "step": 672
+ },
+ {
+ "epoch": 2.4652014652014653,
+ "grad_norm": 21.735048294067383,
+ "learning_rate": 5.025641025641026e-05,
+ "loss": 0.4231,
+ "step": 673
+ },
+ {
+ "epoch": 2.468864468864469,
+ "grad_norm": 54.32040023803711,
+ "learning_rate": 5.023199023199023e-05,
+ "loss": 1.3927,
+ "step": 674
+ },
+ {
+ "epoch": 2.4725274725274726,
+ "grad_norm": 26.955970764160156,
+ "learning_rate": 5.020757020757021e-05,
+ "loss": 0.6899,
+ "step": 675
+ },
+ {
+ "epoch": 2.4761904761904763,
+ "grad_norm": 43.423526763916016,
+ "learning_rate": 5.018315018315019e-05,
+ "loss": 1.2084,
+ "step": 676
+ },
+ {
+ "epoch": 2.47985347985348,
+ "grad_norm": 35.98548126220703,
+ "learning_rate": 5.015873015873016e-05,
+ "loss": 1.5047,
+ "step": 677
+ },
+ {
+ "epoch": 2.4835164835164836,
+ "grad_norm": 22.593570709228516,
+ "learning_rate": 5.013431013431014e-05,
+ "loss": 0.6918,
+ "step": 678
+ },
+ {
+ "epoch": 2.4871794871794872,
+ "grad_norm": 21.29257583618164,
+ "learning_rate": 5.0109890109890116e-05,
+ "loss": 0.3578,
+ "step": 679
+ },
+ {
+ "epoch": 2.490842490842491,
+ "grad_norm": 21.672088623046875,
+ "learning_rate": 5.008547008547008e-05,
+ "loss": 0.7757,
+ "step": 680
+ },
+ {
+ "epoch": 2.4945054945054945,
+ "grad_norm": 9.625850677490234,
+ "learning_rate": 5.006105006105006e-05,
+ "loss": 0.1329,
+ "step": 681
+ },
+ {
+ "epoch": 2.498168498168498,
+ "grad_norm": 16.92123794555664,
+ "learning_rate": 5.003663003663004e-05,
+ "loss": 0.5599,
+ "step": 682
+ },
+ {
+ "epoch": 2.501831501831502,
+ "grad_norm": 15.665925025939941,
+ "learning_rate": 5.001221001221001e-05,
+ "loss": 0.3099,
+ "step": 683
+ },
+ {
+ "epoch": 2.5054945054945055,
+ "grad_norm": 21.316635131835938,
+ "learning_rate": 4.998778998778999e-05,
+ "loss": 0.5746,
+ "step": 684
+ },
+ {
+ "epoch": 2.509157509157509,
+ "grad_norm": 24.99594497680664,
+ "learning_rate": 4.996336996336997e-05,
+ "loss": 1.1274,
+ "step": 685
+ },
+ {
+ "epoch": 2.5128205128205128,
+ "grad_norm": 29.795175552368164,
+ "learning_rate": 4.993894993894994e-05,
+ "loss": 0.9991,
+ "step": 686
+ },
+ {
+ "epoch": 2.5164835164835164,
+ "grad_norm": 16.337533950805664,
+ "learning_rate": 4.991452991452992e-05,
+ "loss": 0.4101,
+ "step": 687
+ },
+ {
+ "epoch": 2.52014652014652,
+ "grad_norm": 20.065715789794922,
+ "learning_rate": 4.9890109890109895e-05,
+ "loss": 0.7786,
+ "step": 688
+ },
+ {
+ "epoch": 2.5238095238095237,
+ "grad_norm": 19.341567993164062,
+ "learning_rate": 4.986568986568987e-05,
+ "loss": 0.4989,
+ "step": 689
+ },
+ {
+ "epoch": 2.5274725274725274,
+ "grad_norm": 14.688420295715332,
+ "learning_rate": 4.9841269841269845e-05,
+ "loss": 0.4081,
+ "step": 690
+ },
+ {
+ "epoch": 2.531135531135531,
+ "grad_norm": 39.346012115478516,
+ "learning_rate": 4.9816849816849824e-05,
+ "loss": 1.7919,
+ "step": 691
+ },
+ {
+ "epoch": 2.5347985347985347,
+ "grad_norm": 21.353286743164062,
+ "learning_rate": 4.9792429792429796e-05,
+ "loss": 0.698,
+ "step": 692
+ },
+ {
+ "epoch": 2.5384615384615383,
+ "grad_norm": 35.96653366088867,
+ "learning_rate": 4.976800976800977e-05,
+ "loss": 1.6584,
+ "step": 693
+ },
+ {
+ "epoch": 2.542124542124542,
+ "grad_norm": 19.14348793029785,
+ "learning_rate": 4.9743589743589746e-05,
+ "loss": 0.885,
+ "step": 694
+ },
+ {
+ "epoch": 2.5457875457875456,
+ "grad_norm": 9.260897636413574,
+ "learning_rate": 4.971916971916972e-05,
+ "loss": 0.1629,
+ "step": 695
+ },
+ {
+ "epoch": 2.5494505494505493,
+ "grad_norm": 18.497526168823242,
+ "learning_rate": 4.9694749694749696e-05,
+ "loss": 0.7242,
+ "step": 696
+ },
+ {
+ "epoch": 2.553113553113553,
+ "grad_norm": 8.879841804504395,
+ "learning_rate": 4.967032967032967e-05,
+ "loss": 0.1302,
+ "step": 697
+ },
+ {
+ "epoch": 2.5567765567765566,
+ "grad_norm": 26.34065818786621,
+ "learning_rate": 4.9645909645909646e-05,
+ "loss": 0.7333,
+ "step": 698
+ },
+ {
+ "epoch": 2.5604395604395602,
+ "grad_norm": 15.10546588897705,
+ "learning_rate": 4.9621489621489624e-05,
+ "loss": 0.3119,
+ "step": 699
+ },
+ {
+ "epoch": 2.564102564102564,
+ "grad_norm": 10.68095874786377,
+ "learning_rate": 4.9597069597069596e-05,
+ "loss": 0.2505,
+ "step": 700
+ },
+ {
+ "epoch": 2.5677655677655675,
+ "grad_norm": 29.08888053894043,
+ "learning_rate": 4.9572649572649575e-05,
+ "loss": 0.4286,
+ "step": 701
+ },
+ {
+ "epoch": 2.571428571428571,
+ "grad_norm": 29.939416885375977,
+ "learning_rate": 4.954822954822955e-05,
+ "loss": 1.1529,
+ "step": 702
+ },
+ {
+ "epoch": 2.575091575091575,
+ "grad_norm": 32.78864669799805,
+ "learning_rate": 4.9523809523809525e-05,
+ "loss": 0.9834,
+ "step": 703
+ },
+ {
+ "epoch": 2.578754578754579,
+ "grad_norm": 13.99082088470459,
+ "learning_rate": 4.94993894993895e-05,
+ "loss": 0.1934,
+ "step": 704
+ },
+ {
+ "epoch": 2.5824175824175826,
+ "grad_norm": 31.696718215942383,
+ "learning_rate": 4.9474969474969475e-05,
+ "loss": 0.6881,
+ "step": 705
+ },
+ {
+ "epoch": 2.586080586080586,
+ "grad_norm": 39.26205062866211,
+ "learning_rate": 4.9450549450549446e-05,
+ "loss": 0.573,
+ "step": 706
+ },
+ {
+ "epoch": 2.58974358974359,
+ "grad_norm": 42.08647918701172,
+ "learning_rate": 4.9426129426129425e-05,
+ "loss": 1.5935,
+ "step": 707
+ },
+ {
+ "epoch": 2.5934065934065935,
+ "grad_norm": 24.630651473999023,
+ "learning_rate": 4.94017094017094e-05,
+ "loss": 0.7016,
+ "step": 708
+ },
+ {
+ "epoch": 2.597069597069597,
+ "grad_norm": 35.33428192138672,
+ "learning_rate": 4.9377289377289375e-05,
+ "loss": 0.9646,
+ "step": 709
+ },
+ {
+ "epoch": 2.600732600732601,
+ "grad_norm": 21.643918991088867,
+ "learning_rate": 4.9352869352869353e-05,
+ "loss": 0.3679,
+ "step": 710
+ },
+ {
+ "epoch": 2.6043956043956045,
+ "grad_norm": 10.6254301071167,
+ "learning_rate": 4.932844932844933e-05,
+ "loss": 0.1059,
+ "step": 711
+ },
+ {
+ "epoch": 2.608058608058608,
+ "grad_norm": 23.43462562561035,
+ "learning_rate": 4.9304029304029304e-05,
+ "loss": 0.5128,
+ "step": 712
+ },
+ {
+ "epoch": 2.6117216117216118,
+ "grad_norm": 25.748422622680664,
+ "learning_rate": 4.927960927960928e-05,
+ "loss": 0.6154,
+ "step": 713
+ },
+ {
+ "epoch": 2.6153846153846154,
+ "grad_norm": 23.163209915161133,
+ "learning_rate": 4.925518925518926e-05,
+ "loss": 0.3978,
+ "step": 714
+ },
+ {
+ "epoch": 2.619047619047619,
+ "grad_norm": 22.306194305419922,
+ "learning_rate": 4.923076923076923e-05,
+ "loss": 0.3984,
+ "step": 715
+ },
+ {
+ "epoch": 2.6227106227106227,
+ "grad_norm": 48.16558074951172,
+ "learning_rate": 4.920634920634921e-05,
+ "loss": 0.9568,
+ "step": 716
+ },
+ {
+ "epoch": 2.6263736263736264,
+ "grad_norm": 48.76753234863281,
+ "learning_rate": 4.918192918192919e-05,
+ "loss": 0.6579,
+ "step": 717
+ },
+ {
+ "epoch": 2.63003663003663,
+ "grad_norm": 57.938720703125,
+ "learning_rate": 4.9157509157509154e-05,
+ "loss": 1.0926,
+ "step": 718
+ },
+ {
+ "epoch": 2.6336996336996337,
+ "grad_norm": 25.495267868041992,
+ "learning_rate": 4.913308913308913e-05,
+ "loss": 0.3717,
+ "step": 719
+ },
+ {
+ "epoch": 2.6373626373626373,
+ "grad_norm": 20.054609298706055,
+ "learning_rate": 4.910866910866911e-05,
+ "loss": 0.4502,
+ "step": 720
+ },
+ {
+ "epoch": 2.641025641025641,
+ "grad_norm": 23.096263885498047,
+ "learning_rate": 4.908424908424908e-05,
+ "loss": 0.2794,
+ "step": 721
+ },
+ {
+ "epoch": 2.6446886446886446,
+ "grad_norm": 6.073278903961182,
+ "learning_rate": 4.905982905982906e-05,
+ "loss": 0.0519,
+ "step": 722
+ },
+ {
+ "epoch": 2.6483516483516483,
+ "grad_norm": 38.562618255615234,
+ "learning_rate": 4.903540903540903e-05,
+ "loss": 0.8839,
+ "step": 723
+ },
+ {
+ "epoch": 2.652014652014652,
+ "grad_norm": 23.544757843017578,
+ "learning_rate": 4.901098901098901e-05,
+ "loss": 0.3935,
+ "step": 724
+ },
+ {
+ "epoch": 2.6556776556776556,
+ "grad_norm": 22.844032287597656,
+ "learning_rate": 4.898656898656899e-05,
+ "loss": 0.2428,
+ "step": 725
+ },
+ {
+ "epoch": 2.659340659340659,
+ "grad_norm": 11.537687301635742,
+ "learning_rate": 4.896214896214896e-05,
+ "loss": 0.1538,
+ "step": 726
+ },
+ {
+ "epoch": 2.663003663003663,
+ "grad_norm": 59.37337112426758,
+ "learning_rate": 4.893772893772894e-05,
+ "loss": 1.181,
+ "step": 727
+ },
+ {
+ "epoch": 2.6666666666666665,
+ "grad_norm": 22.206314086914062,
+ "learning_rate": 4.891330891330892e-05,
+ "loss": 0.4044,
+ "step": 728
+ },
+ {
+ "epoch": 2.67032967032967,
+ "grad_norm": 27.44620132446289,
+ "learning_rate": 4.888888888888889e-05,
+ "loss": 0.585,
+ "step": 729
+ },
+ {
+ "epoch": 2.6739926739926743,
+ "grad_norm": 35.70675277709961,
+ "learning_rate": 4.886446886446887e-05,
+ "loss": 0.6853,
+ "step": 730
+ },
+ {
+ "epoch": 2.677655677655678,
+ "grad_norm": 25.653356552124023,
+ "learning_rate": 4.884004884004884e-05,
+ "loss": 0.6143,
+ "step": 731
+ },
+ {
+ "epoch": 2.6813186813186816,
+ "grad_norm": 24.242090225219727,
+ "learning_rate": 4.881562881562881e-05,
+ "loss": 0.4365,
+ "step": 732
+ },
+ {
+ "epoch": 2.684981684981685,
+ "grad_norm": 25.621902465820312,
+ "learning_rate": 4.879120879120879e-05,
+ "loss": 0.6644,
+ "step": 733
+ },
+ {
+ "epoch": 2.688644688644689,
+ "grad_norm": 14.14786434173584,
+ "learning_rate": 4.876678876678877e-05,
+ "loss": 0.4117,
+ "step": 734
+ },
+ {
+ "epoch": 2.6923076923076925,
+ "grad_norm": 37.98638916015625,
+ "learning_rate": 4.874236874236874e-05,
+ "loss": 1.0452,
+ "step": 735
+ },
+ {
+ "epoch": 2.695970695970696,
+ "grad_norm": 23.186302185058594,
+ "learning_rate": 4.871794871794872e-05,
+ "loss": 0.2642,
+ "step": 736
+ },
+ {
+ "epoch": 2.6996336996337,
+ "grad_norm": 27.23651695251465,
+ "learning_rate": 4.86935286935287e-05,
+ "loss": 0.393,
+ "step": 737
+ },
+ {
+ "epoch": 2.7032967032967035,
+ "grad_norm": 36.44395446777344,
+ "learning_rate": 4.866910866910867e-05,
+ "loss": 1.1309,
+ "step": 738
+ },
+ {
+ "epoch": 2.706959706959707,
+ "grad_norm": 9.733710289001465,
+ "learning_rate": 4.864468864468865e-05,
+ "loss": 0.2466,
+ "step": 739
+ },
+ {
+ "epoch": 2.7106227106227108,
+ "grad_norm": 24.727527618408203,
+ "learning_rate": 4.8620268620268626e-05,
+ "loss": 0.46,
+ "step": 740
+ },
+ {
+ "epoch": 2.7142857142857144,
+ "grad_norm": 15.122056007385254,
+ "learning_rate": 4.85958485958486e-05,
+ "loss": 0.3122,
+ "step": 741
+ },
+ {
+ "epoch": 2.717948717948718,
+ "grad_norm": 24.059120178222656,
+ "learning_rate": 4.8571428571428576e-05,
+ "loss": 0.2359,
+ "step": 742
+ },
+ {
+ "epoch": 2.7216117216117217,
+ "grad_norm": 7.659122467041016,
+ "learning_rate": 4.8547008547008554e-05,
+ "loss": 0.1212,
+ "step": 743
+ },
+ {
+ "epoch": 2.7252747252747254,
+ "grad_norm": 27.002117156982422,
+ "learning_rate": 4.852258852258852e-05,
+ "loss": 0.7593,
+ "step": 744
+ },
+ {
+ "epoch": 2.728937728937729,
+ "grad_norm": 6.3852009773254395,
+ "learning_rate": 4.84981684981685e-05,
+ "loss": 0.0644,
+ "step": 745
+ },
+ {
+ "epoch": 2.7326007326007327,
+ "grad_norm": 25.574190139770508,
+ "learning_rate": 4.8473748473748476e-05,
+ "loss": 0.7012,
+ "step": 746
+ },
+ {
+ "epoch": 2.7362637362637363,
+ "grad_norm": 15.720768928527832,
+ "learning_rate": 4.844932844932845e-05,
+ "loss": 0.2692,
+ "step": 747
+ },
+ {
+ "epoch": 2.73992673992674,
+ "grad_norm": 25.527997970581055,
+ "learning_rate": 4.8424908424908426e-05,
+ "loss": 0.2648,
+ "step": 748
+ },
+ {
+ "epoch": 2.7435897435897436,
+ "grad_norm": 27.791011810302734,
+ "learning_rate": 4.84004884004884e-05,
+ "loss": 0.6007,
+ "step": 749
+ },
+ {
+ "epoch": 2.7472527472527473,
+ "grad_norm": 20.487640380859375,
+ "learning_rate": 4.8376068376068376e-05,
+ "loss": 0.5715,
+ "step": 750
+ },
+ {
+ "epoch": 2.750915750915751,
+ "grad_norm": 6.386992454528809,
+ "learning_rate": 4.8351648351648355e-05,
+ "loss": 0.06,
+ "step": 751
+ },
+ {
+ "epoch": 2.7545787545787546,
+ "grad_norm": 13.110812187194824,
+ "learning_rate": 4.8327228327228327e-05,
+ "loss": 0.129,
+ "step": 752
+ },
+ {
+ "epoch": 2.758241758241758,
+ "grad_norm": 26.55845832824707,
+ "learning_rate": 4.8302808302808305e-05,
+ "loss": 0.67,
+ "step": 753
+ },
+ {
+ "epoch": 2.761904761904762,
+ "grad_norm": 38.83135223388672,
+ "learning_rate": 4.8278388278388283e-05,
+ "loss": 1.6656,
+ "step": 754
+ },
+ {
+ "epoch": 2.7655677655677655,
+ "grad_norm": 25.99518585205078,
+ "learning_rate": 4.8253968253968255e-05,
+ "loss": 0.3285,
+ "step": 755
+ },
+ {
+ "epoch": 2.769230769230769,
+ "grad_norm": 17.282081604003906,
+ "learning_rate": 4.8229548229548234e-05,
+ "loss": 0.2217,
+ "step": 756
+ },
+ {
+ "epoch": 2.772893772893773,
+ "grad_norm": 28.849924087524414,
+ "learning_rate": 4.8205128205128205e-05,
+ "loss": 0.7287,
+ "step": 757
+ },
+ {
+ "epoch": 2.7765567765567765,
+ "grad_norm": 45.79567337036133,
+ "learning_rate": 4.818070818070818e-05,
+ "loss": 1.6964,
+ "step": 758
+ },
+ {
+ "epoch": 2.78021978021978,
+ "grad_norm": 15.203421592712402,
+ "learning_rate": 4.8156288156288155e-05,
+ "loss": 0.2351,
+ "step": 759
+ },
+ {
+ "epoch": 2.7838827838827838,
+ "grad_norm": 10.686698913574219,
+ "learning_rate": 4.8131868131868134e-05,
+ "loss": 0.1533,
+ "step": 760
+ },
+ {
+ "epoch": 2.7875457875457874,
+ "grad_norm": 24.186473846435547,
+ "learning_rate": 4.8107448107448106e-05,
+ "loss": 1.0973,
+ "step": 761
+ },
+ {
+ "epoch": 2.791208791208791,
+ "grad_norm": 25.378986358642578,
+ "learning_rate": 4.8083028083028084e-05,
+ "loss": 0.5847,
+ "step": 762
+ },
+ {
+ "epoch": 2.7948717948717947,
+ "grad_norm": 20.066482543945312,
+ "learning_rate": 4.805860805860806e-05,
+ "loss": 0.2643,
+ "step": 763
+ },
+ {
+ "epoch": 2.7985347985347984,
+ "grad_norm": 56.11622619628906,
+ "learning_rate": 4.8034188034188034e-05,
+ "loss": 0.6949,
+ "step": 764
+ },
+ {
+ "epoch": 2.802197802197802,
+ "grad_norm": 27.80112648010254,
+ "learning_rate": 4.800976800976801e-05,
+ "loss": 0.5622,
+ "step": 765
+ },
+ {
+ "epoch": 2.8058608058608057,
+ "grad_norm": 30.947532653808594,
+ "learning_rate": 4.798534798534799e-05,
+ "loss": 0.6276,
+ "step": 766
+ },
+ {
+ "epoch": 2.8095238095238093,
+ "grad_norm": 8.91073226928711,
+ "learning_rate": 4.796092796092796e-05,
+ "loss": 0.1302,
+ "step": 767
+ },
+ {
+ "epoch": 2.813186813186813,
+ "grad_norm": 24.65394401550293,
+ "learning_rate": 4.793650793650794e-05,
+ "loss": 0.6811,
+ "step": 768
+ },
+ {
+ "epoch": 2.8168498168498166,
+ "grad_norm": 18.257539749145508,
+ "learning_rate": 4.791208791208792e-05,
+ "loss": 0.271,
+ "step": 769
+ },
+ {
+ "epoch": 2.8205128205128203,
+ "grad_norm": 41.41588592529297,
+ "learning_rate": 4.7887667887667884e-05,
+ "loss": 1.4149,
+ "step": 770
+ },
+ {
+ "epoch": 2.824175824175824,
+ "grad_norm": 7.753188610076904,
+ "learning_rate": 4.786324786324786e-05,
+ "loss": 0.0825,
+ "step": 771
+ },
+ {
+ "epoch": 2.8278388278388276,
+ "grad_norm": 208.88290405273438,
+ "learning_rate": 4.783882783882784e-05,
+ "loss": 1.032,
+ "step": 772
+ },
+ {
+ "epoch": 2.8315018315018317,
+ "grad_norm": 31.91672706604004,
+ "learning_rate": 4.781440781440781e-05,
+ "loss": 0.9783,
+ "step": 773
+ },
+ {
+ "epoch": 2.8351648351648353,
+ "grad_norm": 5.72416877746582,
+ "learning_rate": 4.778998778998779e-05,
+ "loss": 0.0399,
+ "step": 774
+ },
+ {
+ "epoch": 2.838827838827839,
+ "grad_norm": 30.503149032592773,
+ "learning_rate": 4.776556776556776e-05,
+ "loss": 0.6465,
+ "step": 775
+ },
+ {
+ "epoch": 2.8424908424908426,
+ "grad_norm": 29.615020751953125,
+ "learning_rate": 4.774114774114774e-05,
+ "loss": 0.5823,
+ "step": 776
+ },
+ {
+ "epoch": 2.8461538461538463,
+ "grad_norm": 49.922611236572266,
+ "learning_rate": 4.771672771672772e-05,
+ "loss": 1.2045,
+ "step": 777
+ },
+ {
+ "epoch": 2.84981684981685,
+ "grad_norm": 23.30948829650879,
+ "learning_rate": 4.769230769230769e-05,
+ "loss": 0.5962,
+ "step": 778
+ },
+ {
+ "epoch": 2.8534798534798536,
+ "grad_norm": 24.784086227416992,
+ "learning_rate": 4.766788766788767e-05,
+ "loss": 0.5702,
+ "step": 779
+ },
+ {
+ "epoch": 2.857142857142857,
+ "grad_norm": 30.03589630126953,
+ "learning_rate": 4.764346764346765e-05,
+ "loss": 0.8644,
+ "step": 780
+ },
+ {
+ "epoch": 2.860805860805861,
+ "grad_norm": 21.079742431640625,
+ "learning_rate": 4.761904761904762e-05,
+ "loss": 0.2304,
+ "step": 781
+ },
+ {
+ "epoch": 2.8644688644688645,
+ "grad_norm": 18.438365936279297,
+ "learning_rate": 4.75946275946276e-05,
+ "loss": 0.6457,
+ "step": 782
+ },
+ {
+ "epoch": 2.868131868131868,
+ "grad_norm": 16.265140533447266,
+ "learning_rate": 4.757020757020757e-05,
+ "loss": 0.3693,
+ "step": 783
+ },
+ {
+ "epoch": 2.871794871794872,
+ "grad_norm": 17.526954650878906,
+ "learning_rate": 4.754578754578754e-05,
+ "loss": 0.2614,
+ "step": 784
+ },
+ {
+ "epoch": 2.8754578754578755,
+ "grad_norm": 39.94060134887695,
+ "learning_rate": 4.752136752136752e-05,
+ "loss": 0.2829,
+ "step": 785
+ },
+ {
+ "epoch": 2.879120879120879,
+ "grad_norm": 10.09298324584961,
+ "learning_rate": 4.74969474969475e-05,
+ "loss": 0.1489,
+ "step": 786
+ },
+ {
+ "epoch": 2.8827838827838828,
+ "grad_norm": 29.092544555664062,
+ "learning_rate": 4.747252747252747e-05,
+ "loss": 0.6063,
+ "step": 787
+ },
+ {
+ "epoch": 2.8864468864468864,
+ "grad_norm": 30.071422576904297,
+ "learning_rate": 4.744810744810745e-05,
+ "loss": 0.3154,
+ "step": 788
+ },
+ {
+ "epoch": 2.89010989010989,
+ "grad_norm": 26.271251678466797,
+ "learning_rate": 4.742368742368743e-05,
+ "loss": 0.4548,
+ "step": 789
+ },
+ {
+ "epoch": 2.8937728937728937,
+ "grad_norm": 32.386775970458984,
+ "learning_rate": 4.73992673992674e-05,
+ "loss": 0.1872,
+ "step": 790
+ },
+ {
+ "epoch": 2.8974358974358974,
+ "grad_norm": 31.18532943725586,
+ "learning_rate": 4.737484737484738e-05,
+ "loss": 0.847,
+ "step": 791
+ },
+ {
+ "epoch": 2.901098901098901,
+ "grad_norm": 17.924785614013672,
+ "learning_rate": 4.7350427350427356e-05,
+ "loss": 0.1588,
+ "step": 792
+ },
+ {
+ "epoch": 2.9047619047619047,
+ "grad_norm": 16.458614349365234,
+ "learning_rate": 4.732600732600733e-05,
+ "loss": 0.1424,
+ "step": 793
+ },
+ {
+ "epoch": 2.9084249084249083,
+ "grad_norm": 50.29280471801758,
+ "learning_rate": 4.7301587301587306e-05,
+ "loss": 1.5482,
+ "step": 794
+ },
+ {
+ "epoch": 2.912087912087912,
+ "grad_norm": 58.37470245361328,
+ "learning_rate": 4.727716727716728e-05,
+ "loss": 1.8242,
+ "step": 795
+ },
+ {
+ "epoch": 2.9157509157509156,
+ "grad_norm": 32.5267448425293,
+ "learning_rate": 4.725274725274725e-05,
+ "loss": 1.1197,
+ "step": 796
+ },
+ {
+ "epoch": 2.9194139194139193,
+ "grad_norm": 43.77764892578125,
+ "learning_rate": 4.722832722832723e-05,
+ "loss": 0.7322,
+ "step": 797
+ },
+ {
+ "epoch": 2.9230769230769234,
+ "grad_norm": 25.303524017333984,
+ "learning_rate": 4.720390720390721e-05,
+ "loss": 0.6557,
+ "step": 798
+ },
+ {
+ "epoch": 2.926739926739927,
+ "grad_norm": 23.90159797668457,
+ "learning_rate": 4.717948717948718e-05,
+ "loss": 0.2669,
+ "step": 799
+ },
+ {
+ "epoch": 2.9304029304029307,
+ "grad_norm": 21.20945930480957,
+ "learning_rate": 4.715506715506716e-05,
+ "loss": 0.3279,
+ "step": 800
+ },
+ {
+ "epoch": 2.9340659340659343,
+ "grad_norm": 28.819482803344727,
+ "learning_rate": 4.713064713064713e-05,
+ "loss": 0.717,
+ "step": 801
+ },
+ {
+ "epoch": 2.937728937728938,
+ "grad_norm": 9.13611125946045,
+ "learning_rate": 4.710622710622711e-05,
+ "loss": 0.1291,
+ "step": 802
+ },
+ {
+ "epoch": 2.9413919413919416,
+ "grad_norm": 22.16252326965332,
+ "learning_rate": 4.7081807081807085e-05,
+ "loss": 0.4406,
+ "step": 803
+ },
+ {
+ "epoch": 2.9450549450549453,
+ "grad_norm": 47.73503112792969,
+ "learning_rate": 4.705738705738706e-05,
+ "loss": 0.6176,
+ "step": 804
+ },
+ {
+ "epoch": 2.948717948717949,
+ "grad_norm": 61.73493576049805,
+ "learning_rate": 4.7032967032967035e-05,
+ "loss": 0.581,
+ "step": 805
+ },
+ {
+ "epoch": 2.9523809523809526,
+ "grad_norm": 22.48004722595215,
+ "learning_rate": 4.7008547008547014e-05,
+ "loss": 0.7404,
+ "step": 806
+ },
+ {
+ "epoch": 2.956043956043956,
+ "grad_norm": 54.2432746887207,
+ "learning_rate": 4.6984126984126986e-05,
+ "loss": 1.1522,
+ "step": 807
+ },
+ {
+ "epoch": 2.95970695970696,
+ "grad_norm": 26.221921920776367,
+ "learning_rate": 4.695970695970696e-05,
+ "loss": 0.4869,
+ "step": 808
+ },
+ {
+ "epoch": 2.9633699633699635,
+ "grad_norm": 21.688526153564453,
+ "learning_rate": 4.6935286935286936e-05,
+ "loss": 0.6639,
+ "step": 809
+ },
+ {
+ "epoch": 2.967032967032967,
+ "grad_norm": 5.81218147277832,
+ "learning_rate": 4.691086691086691e-05,
+ "loss": 0.0824,
+ "step": 810
+ },
+ {
+ "epoch": 2.970695970695971,
+ "grad_norm": 39.09580612182617,
+ "learning_rate": 4.6886446886446886e-05,
+ "loss": 1.5035,
+ "step": 811
+ },
+ {
+ "epoch": 2.9743589743589745,
+ "grad_norm": 24.587574005126953,
+ "learning_rate": 4.6862026862026864e-05,
+ "loss": 1.1107,
+ "step": 812
+ },
+ {
+ "epoch": 2.978021978021978,
+ "grad_norm": 25.25336265563965,
+ "learning_rate": 4.6837606837606836e-05,
+ "loss": 0.7764,
+ "step": 813
+ },
+ {
+ "epoch": 2.9816849816849818,
+ "grad_norm": 16.311378479003906,
+ "learning_rate": 4.6813186813186814e-05,
+ "loss": 0.4079,
+ "step": 814
+ },
+ {
+ "epoch": 2.9853479853479854,
+ "grad_norm": 19.0888729095459,
+ "learning_rate": 4.678876678876679e-05,
+ "loss": 0.5259,
+ "step": 815
+ },
+ {
+ "epoch": 2.989010989010989,
+ "grad_norm": 24.599462509155273,
+ "learning_rate": 4.6764346764346765e-05,
+ "loss": 0.7475,
+ "step": 816
+ },
+ {
+ "epoch": 2.9926739926739927,
+ "grad_norm": 20.4777889251709,
+ "learning_rate": 4.673992673992674e-05,
+ "loss": 0.356,
+ "step": 817
+ },
+ {
+ "epoch": 2.9963369963369964,
+ "grad_norm": 30.4327449798584,
+ "learning_rate": 4.671550671550672e-05,
+ "loss": 0.7958,
+ "step": 818
+ },
+ {
+ "epoch": 3.0,
+ "grad_norm": 25.57271385192871,
+ "learning_rate": 4.669108669108669e-05,
+ "loss": 0.3918,
+ "step": 819
+ },
+ {
+ "epoch": 3.0036630036630036,
+ "grad_norm": 3.9672563076019287,
+ "learning_rate": 4.666666666666667e-05,
+ "loss": 0.0469,
+ "step": 820
+ },
+ {
+ "epoch": 3.0073260073260073,
+ "grad_norm": 6.657567501068115,
+ "learning_rate": 4.664224664224664e-05,
+ "loss": 0.0939,
+ "step": 821
+ },
+ {
+ "epoch": 3.010989010989011,
+ "grad_norm": 12.558409690856934,
+ "learning_rate": 4.6617826617826615e-05,
+ "loss": 0.1578,
+ "step": 822
+ },
+ {
+ "epoch": 3.0146520146520146,
+ "grad_norm": 18.909244537353516,
+ "learning_rate": 4.6593406593406593e-05,
+ "loss": 0.3209,
+ "step": 823
+ },
+ {
+ "epoch": 3.0183150183150182,
+ "grad_norm": 10.995687484741211,
+ "learning_rate": 4.656898656898657e-05,
+ "loss": 0.1198,
+ "step": 824
+ },
+ {
+ "epoch": 3.021978021978022,
+ "grad_norm": 16.14252471923828,
+ "learning_rate": 4.6544566544566544e-05,
+ "loss": 0.1431,
+ "step": 825
+ },
+ {
+ "epoch": 3.0256410256410255,
+ "grad_norm": 25.924381256103516,
+ "learning_rate": 4.652014652014652e-05,
+ "loss": 0.3989,
+ "step": 826
+ },
+ {
+ "epoch": 3.029304029304029,
+ "grad_norm": 4.87798547744751,
+ "learning_rate": 4.6495726495726494e-05,
+ "loss": 0.0472,
+ "step": 827
+ },
+ {
+ "epoch": 3.032967032967033,
+ "grad_norm": 15.078110694885254,
+ "learning_rate": 4.647130647130647e-05,
+ "loss": 0.1955,
+ "step": 828
+ },
+ {
+ "epoch": 3.0366300366300365,
+ "grad_norm": 19.74415397644043,
+ "learning_rate": 4.644688644688645e-05,
+ "loss": 0.1593,
+ "step": 829
+ },
+ {
+ "epoch": 3.04029304029304,
+ "grad_norm": 43.4788818359375,
+ "learning_rate": 4.642246642246642e-05,
+ "loss": 0.7917,
+ "step": 830
+ },
+ {
+ "epoch": 3.043956043956044,
+ "grad_norm": 27.122041702270508,
+ "learning_rate": 4.63980463980464e-05,
+ "loss": 0.1693,
+ "step": 831
+ },
+ {
+ "epoch": 3.0476190476190474,
+ "grad_norm": 9.51154899597168,
+ "learning_rate": 4.637362637362638e-05,
+ "loss": 0.0806,
+ "step": 832
+ },
+ {
+ "epoch": 3.051282051282051,
+ "grad_norm": 11.48532772064209,
+ "learning_rate": 4.634920634920635e-05,
+ "loss": 0.0815,
+ "step": 833
+ },
+ {
+ "epoch": 3.0549450549450547,
+ "grad_norm": 13.547063827514648,
+ "learning_rate": 4.632478632478632e-05,
+ "loss": 0.0817,
+ "step": 834
+ },
+ {
+ "epoch": 3.0586080586080584,
+ "grad_norm": 24.334409713745117,
+ "learning_rate": 4.63003663003663e-05,
+ "loss": 0.547,
+ "step": 835
+ },
+ {
+ "epoch": 3.062271062271062,
+ "grad_norm": 87.3517837524414,
+ "learning_rate": 4.627594627594627e-05,
+ "loss": 0.6534,
+ "step": 836
+ },
+ {
+ "epoch": 3.065934065934066,
+ "grad_norm": 16.100278854370117,
+ "learning_rate": 4.625152625152625e-05,
+ "loss": 0.2961,
+ "step": 837
+ },
+ {
+ "epoch": 3.06959706959707,
+ "grad_norm": 20.725875854492188,
+ "learning_rate": 4.622710622710623e-05,
+ "loss": 0.1114,
+ "step": 838
+ },
+ {
+ "epoch": 3.0732600732600734,
+ "grad_norm": 53.809722900390625,
+ "learning_rate": 4.62026862026862e-05,
+ "loss": 0.3808,
+ "step": 839
+ },
+ {
+ "epoch": 3.076923076923077,
+ "grad_norm": 3.237959623336792,
+ "learning_rate": 4.617826617826618e-05,
+ "loss": 0.019,
+ "step": 840
+ },
+ {
+ "epoch": 3.0805860805860807,
+ "grad_norm": 69.71659088134766,
+ "learning_rate": 4.615384615384616e-05,
+ "loss": 1.0945,
+ "step": 841
+ },
+ {
+ "epoch": 3.0842490842490844,
+ "grad_norm": 31.005935668945312,
+ "learning_rate": 4.612942612942613e-05,
+ "loss": 0.3241,
+ "step": 842
+ },
+ {
+ "epoch": 3.087912087912088,
+ "grad_norm": 66.98394775390625,
+ "learning_rate": 4.610500610500611e-05,
+ "loss": 1.0213,
+ "step": 843
+ },
+ {
+ "epoch": 3.0915750915750917,
+ "grad_norm": 23.54532814025879,
+ "learning_rate": 4.608058608058609e-05,
+ "loss": 0.2188,
+ "step": 844
+ },
+ {
+ "epoch": 3.0952380952380953,
+ "grad_norm": 25.952709197998047,
+ "learning_rate": 4.605616605616606e-05,
+ "loss": 0.4305,
+ "step": 845
+ },
+ {
+ "epoch": 3.098901098901099,
+ "grad_norm": 36.100746154785156,
+ "learning_rate": 4.603174603174604e-05,
+ "loss": 0.6497,
+ "step": 846
+ },
+ {
+ "epoch": 3.1025641025641026,
+ "grad_norm": 60.34727478027344,
+ "learning_rate": 4.600732600732601e-05,
+ "loss": 0.3083,
+ "step": 847
+ },
+ {
+ "epoch": 3.1062271062271063,
+ "grad_norm": 35.265167236328125,
+ "learning_rate": 4.598290598290598e-05,
+ "loss": 0.3222,
+ "step": 848
+ },
+ {
+ "epoch": 3.10989010989011,
+ "grad_norm": 19.180070877075195,
+ "learning_rate": 4.595848595848596e-05,
+ "loss": 0.4065,
+ "step": 849
+ },
+ {
+ "epoch": 3.1135531135531136,
+ "grad_norm": 22.92152976989746,
+ "learning_rate": 4.593406593406593e-05,
+ "loss": 0.3998,
+ "step": 850
+ },
+ {
+ "epoch": 3.1172161172161172,
+ "grad_norm": 48.91377639770508,
+ "learning_rate": 4.590964590964591e-05,
+ "loss": 0.7035,
+ "step": 851
+ },
+ {
+ "epoch": 3.120879120879121,
+ "grad_norm": 11.615083694458008,
+ "learning_rate": 4.588522588522589e-05,
+ "loss": 0.3102,
+ "step": 852
+ },
+ {
+ "epoch": 3.1245421245421245,
+ "grad_norm": 23.573801040649414,
+ "learning_rate": 4.586080586080586e-05,
+ "loss": 0.3358,
+ "step": 853
+ },
+ {
+ "epoch": 3.128205128205128,
+ "grad_norm": 16.903776168823242,
+ "learning_rate": 4.583638583638584e-05,
+ "loss": 0.2973,
+ "step": 854
+ },
+ {
+ "epoch": 3.131868131868132,
+ "grad_norm": 6.052688121795654,
+ "learning_rate": 4.5811965811965816e-05,
+ "loss": 0.0671,
+ "step": 855
+ },
+ {
+ "epoch": 3.1355311355311355,
+ "grad_norm": 34.40020751953125,
+ "learning_rate": 4.578754578754579e-05,
+ "loss": 0.508,
+ "step": 856
+ },
+ {
+ "epoch": 3.139194139194139,
+ "grad_norm": 21.39589500427246,
+ "learning_rate": 4.5763125763125766e-05,
+ "loss": 0.0805,
+ "step": 857
+ },
+ {
+ "epoch": 3.142857142857143,
+ "grad_norm": 24.03894805908203,
+ "learning_rate": 4.5738705738705744e-05,
+ "loss": 0.1884,
+ "step": 858
+ },
+ {
+ "epoch": 3.1465201465201464,
+ "grad_norm": 66.53777313232422,
+ "learning_rate": 4.5714285714285716e-05,
+ "loss": 0.5235,
+ "step": 859
+ },
+ {
+ "epoch": 3.15018315018315,
+ "grad_norm": 33.663490295410156,
+ "learning_rate": 4.568986568986569e-05,
+ "loss": 0.7579,
+ "step": 860
+ },
+ {
+ "epoch": 3.1538461538461537,
+ "grad_norm": 30.173309326171875,
+ "learning_rate": 4.5665445665445666e-05,
+ "loss": 0.2263,
+ "step": 861
+ },
+ {
+ "epoch": 3.1575091575091574,
+ "grad_norm": 37.52082824707031,
+ "learning_rate": 4.564102564102564e-05,
+ "loss": 0.5695,
+ "step": 862
+ },
+ {
+ "epoch": 3.161172161172161,
+ "grad_norm": 38.86849594116211,
+ "learning_rate": 4.5616605616605616e-05,
+ "loss": 0.6981,
+ "step": 863
+ },
+ {
+ "epoch": 3.1648351648351647,
+ "grad_norm": 42.702247619628906,
+ "learning_rate": 4.5592185592185595e-05,
+ "loss": 0.9864,
+ "step": 864
+ },
+ {
+ "epoch": 3.1684981684981683,
+ "grad_norm": 16.60870361328125,
+ "learning_rate": 4.5567765567765566e-05,
+ "loss": 0.1595,
+ "step": 865
+ },
+ {
+ "epoch": 3.172161172161172,
+ "grad_norm": 26.309768676757812,
+ "learning_rate": 4.5543345543345545e-05,
+ "loss": 0.4028,
+ "step": 866
+ },
+ {
+ "epoch": 3.1758241758241756,
+ "grad_norm": 45.7955322265625,
+ "learning_rate": 4.551892551892552e-05,
+ "loss": 1.1258,
+ "step": 867
+ },
+ {
+ "epoch": 3.1794871794871793,
+ "grad_norm": 25.780302047729492,
+ "learning_rate": 4.5494505494505495e-05,
+ "loss": 0.4018,
+ "step": 868
+ },
+ {
+ "epoch": 3.183150183150183,
+ "grad_norm": 41.65156555175781,
+ "learning_rate": 4.5470085470085474e-05,
+ "loss": 0.4543,
+ "step": 869
+ },
+ {
+ "epoch": 3.186813186813187,
+ "grad_norm": 56.92537307739258,
+ "learning_rate": 4.544566544566545e-05,
+ "loss": 0.334,
+ "step": 870
+ },
+ {
+ "epoch": 3.1904761904761907,
+ "grad_norm": 19.44786262512207,
+ "learning_rate": 4.5421245421245424e-05,
+ "loss": 0.2855,
+ "step": 871
+ },
+ {
+ "epoch": 3.1941391941391943,
+ "grad_norm": 19.75824546813965,
+ "learning_rate": 4.53968253968254e-05,
+ "loss": 0.2589,
+ "step": 872
+ },
+ {
+ "epoch": 3.197802197802198,
+ "grad_norm": 30.935569763183594,
+ "learning_rate": 4.5372405372405374e-05,
+ "loss": 0.5083,
+ "step": 873
+ },
+ {
+ "epoch": 3.2014652014652016,
+ "grad_norm": 32.59378433227539,
+ "learning_rate": 4.5347985347985345e-05,
+ "loss": 0.6806,
+ "step": 874
+ },
+ {
+ "epoch": 3.2051282051282053,
+ "grad_norm": 32.7809944152832,
+ "learning_rate": 4.5323565323565324e-05,
+ "loss": 0.7094,
+ "step": 875
+ },
+ {
+ "epoch": 3.208791208791209,
+ "grad_norm": 22.95226287841797,
+ "learning_rate": 4.5299145299145296e-05,
+ "loss": 0.3871,
+ "step": 876
+ },
+ {
+ "epoch": 3.2124542124542126,
+ "grad_norm": 13.90613079071045,
+ "learning_rate": 4.5274725274725274e-05,
+ "loss": 0.2049,
+ "step": 877
+ },
+ {
+ "epoch": 3.2161172161172162,
+ "grad_norm": 36.79647445678711,
+ "learning_rate": 4.525030525030525e-05,
+ "loss": 0.959,
+ "step": 878
+ },
+ {
+ "epoch": 3.21978021978022,
+ "grad_norm": 16.770553588867188,
+ "learning_rate": 4.5225885225885224e-05,
+ "loss": 0.3061,
+ "step": 879
+ },
+ {
+ "epoch": 3.2234432234432235,
+ "grad_norm": 22.241527557373047,
+ "learning_rate": 4.52014652014652e-05,
+ "loss": 0.1961,
+ "step": 880
+ },
+ {
+ "epoch": 3.227106227106227,
+ "grad_norm": 51.097957611083984,
+ "learning_rate": 4.517704517704518e-05,
+ "loss": 0.5272,
+ "step": 881
+ },
+ {
+ "epoch": 3.230769230769231,
+ "grad_norm": 43.70039749145508,
+ "learning_rate": 4.515262515262515e-05,
+ "loss": 0.6764,
+ "step": 882
+ },
+ {
+ "epoch": 3.2344322344322345,
+ "grad_norm": 30.666664123535156,
+ "learning_rate": 4.512820512820513e-05,
+ "loss": 0.6524,
+ "step": 883
+ },
+ {
+ "epoch": 3.238095238095238,
+ "grad_norm": 16.787954330444336,
+ "learning_rate": 4.510378510378511e-05,
+ "loss": 0.178,
+ "step": 884
+ },
+ {
+ "epoch": 3.241758241758242,
+ "grad_norm": 32.14992904663086,
+ "learning_rate": 4.507936507936508e-05,
+ "loss": 0.6206,
+ "step": 885
+ },
+ {
+ "epoch": 3.2454212454212454,
+ "grad_norm": 24.926103591918945,
+ "learning_rate": 4.505494505494505e-05,
+ "loss": 0.4696,
+ "step": 886
+ },
+ {
+ "epoch": 3.249084249084249,
+ "grad_norm": 31.044967651367188,
+ "learning_rate": 4.503052503052503e-05,
+ "loss": 0.3021,
+ "step": 887
+ },
+ {
+ "epoch": 3.2527472527472527,
+ "grad_norm": 10.355696678161621,
+ "learning_rate": 4.5006105006105e-05,
+ "loss": 0.0784,
+ "step": 888
+ },
+ {
+ "epoch": 3.2564102564102564,
+ "grad_norm": 28.19644546508789,
+ "learning_rate": 4.498168498168498e-05,
+ "loss": 0.234,
+ "step": 889
+ },
+ {
+ "epoch": 3.26007326007326,
+ "grad_norm": 21.245389938354492,
+ "learning_rate": 4.495726495726496e-05,
+ "loss": 0.2895,
+ "step": 890
+ },
+ {
+ "epoch": 3.2637362637362637,
+ "grad_norm": 27.337587356567383,
+ "learning_rate": 4.493284493284493e-05,
+ "loss": 0.4614,
+ "step": 891
+ },
+ {
+ "epoch": 3.2673992673992673,
+ "grad_norm": 37.06135177612305,
+ "learning_rate": 4.490842490842491e-05,
+ "loss": 0.2717,
+ "step": 892
+ },
+ {
+ "epoch": 3.271062271062271,
+ "grad_norm": 26.85171890258789,
+ "learning_rate": 4.488400488400489e-05,
+ "loss": 0.4965,
+ "step": 893
+ },
+ {
+ "epoch": 3.2747252747252746,
+ "grad_norm": 41.79130935668945,
+ "learning_rate": 4.485958485958486e-05,
+ "loss": 0.4209,
+ "step": 894
+ },
+ {
+ "epoch": 3.2783882783882783,
+ "grad_norm": 32.75770950317383,
+ "learning_rate": 4.483516483516484e-05,
+ "loss": 0.5126,
+ "step": 895
+ },
+ {
+ "epoch": 3.282051282051282,
+ "grad_norm": 67.75275421142578,
+ "learning_rate": 4.481074481074482e-05,
+ "loss": 0.8257,
+ "step": 896
+ },
+ {
+ "epoch": 3.2857142857142856,
+ "grad_norm": 36.773319244384766,
+ "learning_rate": 4.478632478632479e-05,
+ "loss": 1.6113,
+ "step": 897
+ },
+ {
+ "epoch": 3.2893772893772892,
+ "grad_norm": 60.94101333618164,
+ "learning_rate": 4.476190476190476e-05,
+ "loss": 0.7996,
+ "step": 898
+ },
+ {
+ "epoch": 3.293040293040293,
+ "grad_norm": 45.40288162231445,
+ "learning_rate": 4.473748473748474e-05,
+ "loss": 0.7139,
+ "step": 899
+ },
+ {
+ "epoch": 3.2967032967032965,
+ "grad_norm": 27.4019718170166,
+ "learning_rate": 4.471306471306471e-05,
+ "loss": 0.4695,
+ "step": 900
+ },
+ {
+ "epoch": 3.3003663003663,
+ "grad_norm": 20.126493453979492,
+ "learning_rate": 4.468864468864469e-05,
+ "loss": 0.2181,
+ "step": 901
+ },
+ {
+ "epoch": 3.304029304029304,
+ "grad_norm": 37.28034591674805,
+ "learning_rate": 4.466422466422466e-05,
+ "loss": 0.8902,
+ "step": 902
+ },
+ {
+ "epoch": 3.3076923076923075,
+ "grad_norm": 15.40217113494873,
+ "learning_rate": 4.463980463980464e-05,
+ "loss": 0.2428,
+ "step": 903
+ },
+ {
+ "epoch": 3.311355311355311,
+ "grad_norm": 21.924699783325195,
+ "learning_rate": 4.461538461538462e-05,
+ "loss": 0.3271,
+ "step": 904
+ },
+ {
+ "epoch": 3.315018315018315,
+ "grad_norm": 29.787410736083984,
+ "learning_rate": 4.459096459096459e-05,
+ "loss": 0.5914,
+ "step": 905
+ },
+ {
+ "epoch": 3.3186813186813184,
+ "grad_norm": 16.91995620727539,
+ "learning_rate": 4.456654456654457e-05,
+ "loss": 0.3442,
+ "step": 906
+ },
+ {
+ "epoch": 3.3223443223443225,
+ "grad_norm": 13.232250213623047,
+ "learning_rate": 4.4542124542124546e-05,
+ "loss": 0.1977,
+ "step": 907
+ },
+ {
+ "epoch": 3.326007326007326,
+ "grad_norm": 25.45724868774414,
+ "learning_rate": 4.451770451770452e-05,
+ "loss": 0.8241,
+ "step": 908
+ },
+ {
+ "epoch": 3.32967032967033,
+ "grad_norm": 20.996292114257812,
+ "learning_rate": 4.4493284493284496e-05,
+ "loss": 0.3154,
+ "step": 909
+ },
+ {
+ "epoch": 3.3333333333333335,
+ "grad_norm": 28.150684356689453,
+ "learning_rate": 4.4468864468864475e-05,
+ "loss": 0.4077,
+ "step": 910
+ },
+ {
+ "epoch": 3.336996336996337,
+ "grad_norm": 57.184322357177734,
+ "learning_rate": 4.444444444444444e-05,
+ "loss": 0.5701,
+ "step": 911
+ },
+ {
+ "epoch": 3.340659340659341,
+ "grad_norm": 26.231369018554688,
+ "learning_rate": 4.442002442002442e-05,
+ "loss": 0.4427,
+ "step": 912
+ },
+ {
+ "epoch": 3.3443223443223444,
+ "grad_norm": 32.52253723144531,
+ "learning_rate": 4.43956043956044e-05,
+ "loss": 1.014,
+ "step": 913
+ },
+ {
+ "epoch": 3.347985347985348,
+ "grad_norm": 19.39035987854004,
+ "learning_rate": 4.437118437118437e-05,
+ "loss": 0.1567,
+ "step": 914
+ },
+ {
+ "epoch": 3.3516483516483517,
+ "grad_norm": 24.542327880859375,
+ "learning_rate": 4.434676434676435e-05,
+ "loss": 0.5478,
+ "step": 915
+ },
+ {
+ "epoch": 3.3553113553113554,
+ "grad_norm": 46.6158447265625,
+ "learning_rate": 4.4322344322344325e-05,
+ "loss": 0.5636,
+ "step": 916
+ },
+ {
+ "epoch": 3.358974358974359,
+ "grad_norm": 36.008846282958984,
+ "learning_rate": 4.42979242979243e-05,
+ "loss": 0.4401,
+ "step": 917
+ },
+ {
+ "epoch": 3.3626373626373627,
+ "grad_norm": 6.922544956207275,
+ "learning_rate": 4.4273504273504275e-05,
+ "loss": 0.0885,
+ "step": 918
+ },
+ {
+ "epoch": 3.3663003663003663,
+ "grad_norm": 25.707748413085938,
+ "learning_rate": 4.4249084249084254e-05,
+ "loss": 0.3235,
+ "step": 919
+ },
+ {
+ "epoch": 3.36996336996337,
+ "grad_norm": 47.98778533935547,
+ "learning_rate": 4.4224664224664226e-05,
+ "loss": 1.3738,
+ "step": 920
+ },
+ {
+ "epoch": 3.3736263736263736,
+ "grad_norm": 26.64824104309082,
+ "learning_rate": 4.4200244200244204e-05,
+ "loss": 0.8405,
+ "step": 921
+ },
+ {
+ "epoch": 3.3772893772893773,
+ "grad_norm": 30.66206169128418,
+ "learning_rate": 4.417582417582418e-05,
+ "loss": 1.3021,
+ "step": 922
+ },
+ {
+ "epoch": 3.380952380952381,
+ "grad_norm": 33.15909194946289,
+ "learning_rate": 4.4151404151404154e-05,
+ "loss": 0.3064,
+ "step": 923
+ },
+ {
+ "epoch": 3.3846153846153846,
+ "grad_norm": 78.46485137939453,
+ "learning_rate": 4.4126984126984126e-05,
+ "loss": 0.6526,
+ "step": 924
+ },
+ {
+ "epoch": 3.3882783882783882,
+ "grad_norm": 45.584747314453125,
+ "learning_rate": 4.4102564102564104e-05,
+ "loss": 0.9546,
+ "step": 925
+ },
+ {
+ "epoch": 3.391941391941392,
+ "grad_norm": 23.244487762451172,
+ "learning_rate": 4.4078144078144076e-05,
+ "loss": 0.3334,
+ "step": 926
+ },
+ {
+ "epoch": 3.3956043956043955,
+ "grad_norm": 9.296119689941406,
+ "learning_rate": 4.4053724053724054e-05,
+ "loss": 0.1045,
+ "step": 927
+ },
+ {
+ "epoch": 3.399267399267399,
+ "grad_norm": 15.207316398620605,
+ "learning_rate": 4.4029304029304026e-05,
+ "loss": 0.087,
+ "step": 928
+ },
+ {
+ "epoch": 3.402930402930403,
+ "grad_norm": 20.554912567138672,
+ "learning_rate": 4.4004884004884005e-05,
+ "loss": 0.2658,
+ "step": 929
+ },
+ {
+ "epoch": 3.4065934065934065,
+ "grad_norm": 25.304515838623047,
+ "learning_rate": 4.398046398046398e-05,
+ "loss": 0.2862,
+ "step": 930
+ },
+ {
+ "epoch": 3.41025641025641,
+ "grad_norm": 44.320377349853516,
+ "learning_rate": 4.3956043956043955e-05,
+ "loss": 1.1972,
+ "step": 931
+ },
+ {
+ "epoch": 3.413919413919414,
+ "grad_norm": 21.3024845123291,
+ "learning_rate": 4.393162393162393e-05,
+ "loss": 0.2193,
+ "step": 932
+ },
+ {
+ "epoch": 3.4175824175824174,
+ "grad_norm": 12.274759292602539,
+ "learning_rate": 4.390720390720391e-05,
+ "loss": 0.1033,
+ "step": 933
+ },
+ {
+ "epoch": 3.421245421245421,
+ "grad_norm": 29.188446044921875,
+ "learning_rate": 4.388278388278388e-05,
+ "loss": 0.8143,
+ "step": 934
+ },
+ {
+ "epoch": 3.4249084249084247,
+ "grad_norm": 11.880194664001465,
+ "learning_rate": 4.385836385836386e-05,
+ "loss": 0.0932,
+ "step": 935
+ },
+ {
+ "epoch": 3.4285714285714284,
+ "grad_norm": 28.859825134277344,
+ "learning_rate": 4.383394383394384e-05,
+ "loss": 0.6026,
+ "step": 936
+ },
+ {
+ "epoch": 3.4322344322344325,
+ "grad_norm": 25.131824493408203,
+ "learning_rate": 4.3809523809523805e-05,
+ "loss": 0.4023,
+ "step": 937
+ },
+ {
+ "epoch": 3.435897435897436,
+ "grad_norm": 35.04637145996094,
+ "learning_rate": 4.3785103785103783e-05,
+ "loss": 0.7765,
+ "step": 938
+ },
+ {
+ "epoch": 3.4395604395604398,
+ "grad_norm": 15.831666946411133,
+ "learning_rate": 4.376068376068376e-05,
+ "loss": 0.1779,
+ "step": 939
+ },
+ {
+ "epoch": 3.4432234432234434,
+ "grad_norm": 26.455148696899414,
+ "learning_rate": 4.3736263736263734e-05,
+ "loss": 0.3165,
+ "step": 940
+ },
+ {
+ "epoch": 3.446886446886447,
+ "grad_norm": 23.840030670166016,
+ "learning_rate": 4.371184371184371e-05,
+ "loss": 0.5363,
+ "step": 941
+ },
+ {
+ "epoch": 3.4505494505494507,
+ "grad_norm": 30.517026901245117,
+ "learning_rate": 4.368742368742369e-05,
+ "loss": 0.422,
+ "step": 942
+ },
+ {
+ "epoch": 3.4542124542124544,
+ "grad_norm": 51.574703216552734,
+ "learning_rate": 4.366300366300366e-05,
+ "loss": 1.5333,
+ "step": 943
+ },
+ {
+ "epoch": 3.457875457875458,
+ "grad_norm": 57.92119216918945,
+ "learning_rate": 4.363858363858364e-05,
+ "loss": 0.5732,
+ "step": 944
+ },
+ {
+ "epoch": 3.4615384615384617,
+ "grad_norm": 34.3664436340332,
+ "learning_rate": 4.361416361416362e-05,
+ "loss": 0.5054,
+ "step": 945
+ },
+ {
+ "epoch": 3.4652014652014653,
+ "grad_norm": 14.034111976623535,
+ "learning_rate": 4.358974358974359e-05,
+ "loss": 0.0969,
+ "step": 946
+ },
+ {
+ "epoch": 3.468864468864469,
+ "grad_norm": 15.058267593383789,
+ "learning_rate": 4.356532356532357e-05,
+ "loss": 0.1877,
+ "step": 947
+ },
+ {
+ "epoch": 3.4725274725274726,
+ "grad_norm": 18.598024368286133,
+ "learning_rate": 4.354090354090355e-05,
+ "loss": 0.2378,
+ "step": 948
+ },
+ {
+ "epoch": 3.4761904761904763,
+ "grad_norm": 17.926319122314453,
+ "learning_rate": 4.351648351648352e-05,
+ "loss": 0.2935,
+ "step": 949
+ },
+ {
+ "epoch": 3.47985347985348,
+ "grad_norm": 8.25291633605957,
+ "learning_rate": 4.349206349206349e-05,
+ "loss": 0.0891,
+ "step": 950
+ },
+ {
+ "epoch": 3.4835164835164836,
+ "grad_norm": 26.152061462402344,
+ "learning_rate": 4.346764346764347e-05,
+ "loss": 0.2798,
+ "step": 951
+ },
+ {
+ "epoch": 3.4871794871794872,
+ "grad_norm": 22.669677734375,
+ "learning_rate": 4.344322344322344e-05,
+ "loss": 0.506,
+ "step": 952
+ },
+ {
+ "epoch": 3.490842490842491,
+ "grad_norm": 18.439355850219727,
+ "learning_rate": 4.341880341880342e-05,
+ "loss": 0.3034,
+ "step": 953
+ },
+ {
+ "epoch": 3.4945054945054945,
+ "grad_norm": 30.48084259033203,
+ "learning_rate": 4.339438339438339e-05,
+ "loss": 0.4366,
+ "step": 954
+ },
+ {
+ "epoch": 3.498168498168498,
+ "grad_norm": 51.792381286621094,
+ "learning_rate": 4.336996336996337e-05,
+ "loss": 0.5214,
+ "step": 955
+ },
+ {
+ "epoch": 3.501831501831502,
+ "grad_norm": 44.70718002319336,
+ "learning_rate": 4.334554334554335e-05,
+ "loss": 0.7823,
+ "step": 956
+ },
+ {
+ "epoch": 3.5054945054945055,
+ "grad_norm": 42.00168991088867,
+ "learning_rate": 4.332112332112332e-05,
+ "loss": 0.9207,
+ "step": 957
+ },
+ {
+ "epoch": 3.509157509157509,
+ "grad_norm": 28.97800636291504,
+ "learning_rate": 4.32967032967033e-05,
+ "loss": 0.279,
+ "step": 958
+ },
+ {
+ "epoch": 3.5128205128205128,
+ "grad_norm": 21.902843475341797,
+ "learning_rate": 4.327228327228328e-05,
+ "loss": 0.1969,
+ "step": 959
+ },
+ {
+ "epoch": 3.5164835164835164,
+ "grad_norm": 14.560053825378418,
+ "learning_rate": 4.324786324786325e-05,
+ "loss": 0.0976,
+ "step": 960
+ },
+ {
+ "epoch": 3.52014652014652,
+ "grad_norm": 4.2637104988098145,
+ "learning_rate": 4.322344322344323e-05,
+ "loss": 0.0277,
+ "step": 961
+ },
+ {
+ "epoch": 3.5238095238095237,
+ "grad_norm": 52.4840202331543,
+ "learning_rate": 4.3199023199023205e-05,
+ "loss": 0.2967,
+ "step": 962
+ },
+ {
+ "epoch": 3.5274725274725274,
+ "grad_norm": 48.95661163330078,
+ "learning_rate": 4.317460317460317e-05,
+ "loss": 0.2904,
+ "step": 963
+ },
+ {
+ "epoch": 3.531135531135531,
+ "grad_norm": 79.46379089355469,
+ "learning_rate": 4.315018315018315e-05,
+ "loss": 0.1644,
+ "step": 964
+ },
+ {
+ "epoch": 3.5347985347985347,
+ "grad_norm": 29.678428649902344,
+ "learning_rate": 4.312576312576313e-05,
+ "loss": 0.3498,
+ "step": 965
+ },
+ {
+ "epoch": 3.5384615384615383,
+ "grad_norm": 32.71342086791992,
+ "learning_rate": 4.31013431013431e-05,
+ "loss": 0.3509,
+ "step": 966
+ },
+ {
+ "epoch": 3.542124542124542,
+ "grad_norm": 6.679911136627197,
+ "learning_rate": 4.307692307692308e-05,
+ "loss": 0.0658,
+ "step": 967
+ },
+ {
+ "epoch": 3.5457875457875456,
+ "grad_norm": 19.8692626953125,
+ "learning_rate": 4.3052503052503056e-05,
+ "loss": 0.1626,
+ "step": 968
+ },
+ {
+ "epoch": 3.5494505494505493,
+ "grad_norm": 17.69087791442871,
+ "learning_rate": 4.302808302808303e-05,
+ "loss": 0.2592,
+ "step": 969
+ },
+ {
+ "epoch": 3.553113553113553,
+ "grad_norm": 11.734158515930176,
+ "learning_rate": 4.3003663003663006e-05,
+ "loss": 0.1007,
+ "step": 970
+ },
+ {
+ "epoch": 3.5567765567765566,
+ "grad_norm": 34.51172637939453,
+ "learning_rate": 4.2979242979242984e-05,
+ "loss": 0.2823,
+ "step": 971
+ },
+ {
+ "epoch": 3.5604395604395602,
+ "grad_norm": 15.009514808654785,
+ "learning_rate": 4.2954822954822956e-05,
+ "loss": 0.1203,
+ "step": 972
+ },
+ {
+ "epoch": 3.564102564102564,
+ "grad_norm": 67.92166137695312,
+ "learning_rate": 4.2930402930402934e-05,
+ "loss": 0.396,
+ "step": 973
+ },
+ {
+ "epoch": 3.5677655677655675,
+ "grad_norm": 66.84014129638672,
+ "learning_rate": 4.290598290598291e-05,
+ "loss": 0.6545,
+ "step": 974
+ },
+ {
+ "epoch": 3.571428571428571,
+ "grad_norm": 25.811107635498047,
+ "learning_rate": 4.2881562881562885e-05,
+ "loss": 0.1747,
+ "step": 975
+ },
+ {
+ "epoch": 3.575091575091575,
+ "grad_norm": 100.88753509521484,
+ "learning_rate": 4.2857142857142856e-05,
+ "loss": 0.3991,
+ "step": 976
+ },
+ {
+ "epoch": 3.578754578754579,
+ "grad_norm": 34.51667785644531,
+ "learning_rate": 4.2832722832722835e-05,
+ "loss": 0.1365,
+ "step": 977
+ },
+ {
+ "epoch": 3.5824175824175826,
+ "grad_norm": 26.852561950683594,
+ "learning_rate": 4.2808302808302806e-05,
+ "loss": 0.3627,
+ "step": 978
+ },
+ {
+ "epoch": 3.586080586080586,
+ "grad_norm": 24.968570709228516,
+ "learning_rate": 4.2783882783882785e-05,
+ "loss": 0.2106,
+ "step": 979
+ },
+ {
+ "epoch": 3.58974358974359,
+ "grad_norm": 27.33326530456543,
+ "learning_rate": 4.2759462759462757e-05,
+ "loss": 0.1758,
+ "step": 980
+ },
+ {
+ "epoch": 3.5934065934065935,
+ "grad_norm": 52.63814926147461,
+ "learning_rate": 4.2735042735042735e-05,
+ "loss": 0.601,
+ "step": 981
+ },
+ {
+ "epoch": 3.597069597069597,
+ "grad_norm": 37.77897262573242,
+ "learning_rate": 4.2710622710622713e-05,
+ "loss": 0.5299,
+ "step": 982
+ },
+ {
+ "epoch": 3.600732600732601,
+ "grad_norm": 27.691659927368164,
+ "learning_rate": 4.2686202686202685e-05,
+ "loss": 0.1784,
+ "step": 983
+ },
+ {
+ "epoch": 3.6043956043956045,
+ "grad_norm": 106.33782958984375,
+ "learning_rate": 4.2661782661782664e-05,
+ "loss": 0.8859,
+ "step": 984
+ },
+ {
+ "epoch": 3.608058608058608,
+ "grad_norm": 22.95706558227539,
+ "learning_rate": 4.263736263736264e-05,
+ "loss": 0.1611,
+ "step": 985
+ },
+ {
+ "epoch": 3.6117216117216118,
+ "grad_norm": 22.72148895263672,
+ "learning_rate": 4.2612942612942614e-05,
+ "loss": 0.1561,
+ "step": 986
+ },
+ {
+ "epoch": 3.6153846153846154,
+ "grad_norm": 93.37244415283203,
+ "learning_rate": 4.258852258852259e-05,
+ "loss": 0.4287,
+ "step": 987
+ },
+ {
+ "epoch": 3.619047619047619,
+ "grad_norm": 51.54584884643555,
+ "learning_rate": 4.2564102564102564e-05,
+ "loss": 0.6292,
+ "step": 988
+ },
+ {
+ "epoch": 3.6227106227106227,
+ "grad_norm": 61.58243942260742,
+ "learning_rate": 4.2539682539682536e-05,
+ "loss": 1.3205,
+ "step": 989
+ },
+ {
+ "epoch": 3.6263736263736264,
+ "grad_norm": 70.59432220458984,
+ "learning_rate": 4.2515262515262514e-05,
+ "loss": 0.7451,
+ "step": 990
+ },
+ {
+ "epoch": 3.63003663003663,
+ "grad_norm": 76.28730773925781,
+ "learning_rate": 4.249084249084249e-05,
+ "loss": 2.0314,
+ "step": 991
+ },
+ {
+ "epoch": 3.6336996336996337,
+ "grad_norm": 73.5402603149414,
+ "learning_rate": 4.2466422466422464e-05,
+ "loss": 1.6628,
+ "step": 992
+ },
+ {
+ "epoch": 3.6373626373626373,
+ "grad_norm": 75.8978042602539,
+ "learning_rate": 4.244200244200244e-05,
+ "loss": 1.652,
+ "step": 993
+ },
+ {
+ "epoch": 3.641025641025641,
+ "grad_norm": 37.04104232788086,
+ "learning_rate": 4.241758241758242e-05,
+ "loss": 1.5356,
+ "step": 994
+ },
+ {
+ "epoch": 3.6446886446886446,
+ "grad_norm": 34.31178283691406,
+ "learning_rate": 4.239316239316239e-05,
+ "loss": 1.1783,
+ "step": 995
+ },
+ {
+ "epoch": 3.6483516483516483,
+ "grad_norm": 22.934877395629883,
+ "learning_rate": 4.236874236874237e-05,
+ "loss": 1.2995,
+ "step": 996
+ },
+ {
+ "epoch": 3.652014652014652,
+ "grad_norm": 30.25251579284668,
+ "learning_rate": 4.234432234432235e-05,
+ "loss": 1.1304,
+ "step": 997
+ },
+ {
+ "epoch": 3.6556776556776556,
+ "grad_norm": 35.082027435302734,
+ "learning_rate": 4.231990231990232e-05,
+ "loss": 1.0827,
+ "step": 998
+ },
+ {
+ "epoch": 3.659340659340659,
+ "grad_norm": 24.526325225830078,
+ "learning_rate": 4.22954822954823e-05,
+ "loss": 0.8716,
+ "step": 999
+ },
+ {
+ "epoch": 3.663003663003663,
+ "grad_norm": 29.882883071899414,
+ "learning_rate": 4.227106227106228e-05,
+ "loss": 0.5432,
+ "step": 1000
+ },
+ {
+ "epoch": 3.6666666666666665,
+ "grad_norm": 34.53218078613281,
+ "learning_rate": 4.224664224664224e-05,
+ "loss": 1.2094,
+ "step": 1001
+ },
+ {
+ "epoch": 3.67032967032967,
+ "grad_norm": 22.50905990600586,
+ "learning_rate": 4.222222222222222e-05,
+ "loss": 0.4608,
+ "step": 1002
+ },
+ {
+ "epoch": 3.6739926739926743,
+ "grad_norm": 27.33183479309082,
+ "learning_rate": 4.21978021978022e-05,
+ "loss": 0.7181,
+ "step": 1003
+ },
+ {
+ "epoch": 3.677655677655678,
+ "grad_norm": 50.09929275512695,
+ "learning_rate": 4.217338217338217e-05,
+ "loss": 1.1163,
+ "step": 1004
+ },
+ {
+ "epoch": 3.6813186813186816,
+ "grad_norm": 32.48406982421875,
+ "learning_rate": 4.214896214896215e-05,
+ "loss": 0.7101,
+ "step": 1005
+ },
+ {
+ "epoch": 3.684981684981685,
+ "grad_norm": 5.821015357971191,
+ "learning_rate": 4.212454212454212e-05,
+ "loss": 0.0695,
+ "step": 1006
+ },
+ {
+ "epoch": 3.688644688644689,
+ "grad_norm": 32.04796600341797,
+ "learning_rate": 4.21001221001221e-05,
+ "loss": 0.609,
+ "step": 1007
+ },
+ {
+ "epoch": 3.6923076923076925,
+ "grad_norm": 37.282474517822266,
+ "learning_rate": 4.207570207570208e-05,
+ "loss": 0.873,
+ "step": 1008
+ },
+ {
+ "epoch": 3.695970695970696,
+ "grad_norm": 35.74583435058594,
+ "learning_rate": 4.205128205128205e-05,
+ "loss": 0.7387,
+ "step": 1009
+ },
+ {
+ "epoch": 3.6996336996337,
+ "grad_norm": 74.91361236572266,
+ "learning_rate": 4.202686202686203e-05,
+ "loss": 1.6302,
+ "step": 1010
+ },
+ {
+ "epoch": 3.7032967032967035,
+ "grad_norm": 25.163251876831055,
+ "learning_rate": 4.200244200244201e-05,
+ "loss": 0.3866,
+ "step": 1011
+ },
+ {
+ "epoch": 3.706959706959707,
+ "grad_norm": 34.36520004272461,
+ "learning_rate": 4.197802197802198e-05,
+ "loss": 0.8413,
+ "step": 1012
+ },
+ {
+ "epoch": 3.7106227106227108,
+ "grad_norm": 41.62683868408203,
+ "learning_rate": 4.195360195360196e-05,
+ "loss": 0.4155,
+ "step": 1013
+ },
+ {
+ "epoch": 3.7142857142857144,
+ "grad_norm": 34.24674987792969,
+ "learning_rate": 4.192918192918193e-05,
+ "loss": 0.8327,
+ "step": 1014
+ },
+ {
+ "epoch": 3.717948717948718,
+ "grad_norm": 27.771732330322266,
+ "learning_rate": 4.19047619047619e-05,
+ "loss": 0.4509,
+ "step": 1015
+ },
+ {
+ "epoch": 3.7216117216117217,
+ "grad_norm": 26.55430793762207,
+ "learning_rate": 4.188034188034188e-05,
+ "loss": 0.4851,
+ "step": 1016
+ },
+ {
+ "epoch": 3.7252747252747254,
+ "grad_norm": 34.8384895324707,
+ "learning_rate": 4.185592185592186e-05,
+ "loss": 0.4105,
+ "step": 1017
+ },
+ {
+ "epoch": 3.728937728937729,
+ "grad_norm": 29.447805404663086,
+ "learning_rate": 4.183150183150183e-05,
+ "loss": 0.4129,
+ "step": 1018
+ },
+ {
+ "epoch": 3.7326007326007327,
+ "grad_norm": 66.70004272460938,
+ "learning_rate": 4.180708180708181e-05,
+ "loss": 0.4762,
+ "step": 1019
+ },
+ {
+ "epoch": 3.7362637362637363,
+ "grad_norm": 10.356173515319824,
+ "learning_rate": 4.1782661782661786e-05,
+ "loss": 0.0718,
+ "step": 1020
+ },
+ {
+ "epoch": 3.73992673992674,
+ "grad_norm": 35.98944854736328,
+ "learning_rate": 4.175824175824176e-05,
+ "loss": 0.2672,
+ "step": 1021
+ },
+ {
+ "epoch": 3.7435897435897436,
+ "grad_norm": 6.806238651275635,
+ "learning_rate": 4.1733821733821736e-05,
+ "loss": 0.0455,
+ "step": 1022
+ },
+ {
+ "epoch": 3.7472527472527473,
+ "grad_norm": 19.689456939697266,
+ "learning_rate": 4.1709401709401715e-05,
+ "loss": 0.2323,
+ "step": 1023
+ },
+ {
+ "epoch": 3.750915750915751,
+ "grad_norm": 23.971303939819336,
+ "learning_rate": 4.1684981684981687e-05,
+ "loss": 0.1393,
+ "step": 1024
+ },
+ {
+ "epoch": 3.7545787545787546,
+ "grad_norm": 43.26774215698242,
+ "learning_rate": 4.1660561660561665e-05,
+ "loss": 0.7084,
+ "step": 1025
+ },
+ {
+ "epoch": 3.758241758241758,
+ "grad_norm": 36.04475402832031,
+ "learning_rate": 4.1636141636141643e-05,
+ "loss": 0.3782,
+ "step": 1026
+ },
+ {
+ "epoch": 3.761904761904762,
+ "grad_norm": 48.78522491455078,
+ "learning_rate": 4.161172161172161e-05,
+ "loss": 0.7698,
+ "step": 1027
+ },
+ {
+ "epoch": 3.7655677655677655,
+ "grad_norm": 11.876708984375,
+ "learning_rate": 4.158730158730159e-05,
+ "loss": 0.0943,
+ "step": 1028
+ },
+ {
+ "epoch": 3.769230769230769,
+ "grad_norm": 83.1320571899414,
+ "learning_rate": 4.1562881562881565e-05,
+ "loss": 0.8116,
+ "step": 1029
+ },
+ {
+ "epoch": 3.772893772893773,
+ "grad_norm": 22.412723541259766,
+ "learning_rate": 4.153846153846154e-05,
+ "loss": 0.2086,
+ "step": 1030
+ },
+ {
+ "epoch": 3.7765567765567765,
+ "grad_norm": 11.011713981628418,
+ "learning_rate": 4.1514041514041515e-05,
+ "loss": 0.1001,
+ "step": 1031
+ },
+ {
+ "epoch": 3.78021978021978,
+ "grad_norm": 21.958040237426758,
+ "learning_rate": 4.148962148962149e-05,
+ "loss": 0.8457,
+ "step": 1032
+ },
+ {
+ "epoch": 3.7838827838827838,
+ "grad_norm": 57.3586540222168,
+ "learning_rate": 4.1465201465201465e-05,
+ "loss": 0.1605,
+ "step": 1033
+ },
+ {
+ "epoch": 3.7875457875457874,
+ "grad_norm": 24.261554718017578,
+ "learning_rate": 4.1440781440781444e-05,
+ "loss": 0.1854,
+ "step": 1034
+ },
+ {
+ "epoch": 3.791208791208791,
+ "grad_norm": 31.09326171875,
+ "learning_rate": 4.1416361416361416e-05,
+ "loss": 0.2874,
+ "step": 1035
+ },
+ {
+ "epoch": 3.7948717948717947,
+ "grad_norm": 8.3728666305542,
+ "learning_rate": 4.1391941391941394e-05,
+ "loss": 0.0496,
+ "step": 1036
+ },
+ {
+ "epoch": 3.7985347985347984,
+ "grad_norm": 47.5240592956543,
+ "learning_rate": 4.136752136752137e-05,
+ "loss": 0.2025,
+ "step": 1037
+ },
+ {
+ "epoch": 3.802197802197802,
+ "grad_norm": 51.25822448730469,
+ "learning_rate": 4.1343101343101344e-05,
+ "loss": 0.714,
+ "step": 1038
+ },
+ {
+ "epoch": 3.8058608058608057,
+ "grad_norm": 91.58492279052734,
+ "learning_rate": 4.131868131868132e-05,
+ "loss": 2.2889,
+ "step": 1039
+ },
+ {
+ "epoch": 3.8095238095238093,
+ "grad_norm": 4.206390857696533,
+ "learning_rate": 4.1294261294261294e-05,
+ "loss": 0.024,
+ "step": 1040
+ },
+ {
+ "epoch": 3.813186813186813,
+ "grad_norm": 58.49787139892578,
+ "learning_rate": 4.1269841269841266e-05,
+ "loss": 0.7162,
+ "step": 1041
+ },
+ {
+ "epoch": 3.8168498168498166,
+ "grad_norm": 33.38972091674805,
+ "learning_rate": 4.1245421245421244e-05,
+ "loss": 0.3064,
+ "step": 1042
+ },
+ {
+ "epoch": 3.8205128205128203,
+ "grad_norm": 53.251007080078125,
+ "learning_rate": 4.122100122100122e-05,
+ "loss": 0.7376,
+ "step": 1043
+ },
+ {
+ "epoch": 3.824175824175824,
+ "grad_norm": 28.314645767211914,
+ "learning_rate": 4.1196581196581195e-05,
+ "loss": 0.4608,
+ "step": 1044
+ },
+ {
+ "epoch": 3.8278388278388276,
+ "grad_norm": 538.0653076171875,
+ "learning_rate": 4.117216117216117e-05,
+ "loss": 1.5678,
+ "step": 1045
+ },
+ {
+ "epoch": 3.8315018315018317,
+ "grad_norm": 38.662925720214844,
+ "learning_rate": 4.114774114774115e-05,
+ "loss": 1.1084,
+ "step": 1046
+ },
+ {
+ "epoch": 3.8351648351648353,
+ "grad_norm": 31.877248764038086,
+ "learning_rate": 4.112332112332112e-05,
+ "loss": 0.9947,
+ "step": 1047
+ },
+ {
+ "epoch": 3.838827838827839,
+ "grad_norm": 50.17106628417969,
+ "learning_rate": 4.10989010989011e-05,
+ "loss": 0.8024,
+ "step": 1048
+ },
+ {
+ "epoch": 3.8424908424908426,
+ "grad_norm": 18.851001739501953,
+ "learning_rate": 4.107448107448108e-05,
+ "loss": 0.4245,
+ "step": 1049
+ },
+ {
+ "epoch": 3.8461538461538463,
+ "grad_norm": 35.91590881347656,
+ "learning_rate": 4.105006105006105e-05,
+ "loss": 1.1046,
+ "step": 1050
+ },
+ {
+ "epoch": 3.84981684981685,
+ "grad_norm": 24.618389129638672,
+ "learning_rate": 4.102564102564103e-05,
+ "loss": 0.8167,
+ "step": 1051
+ },
+ {
+ "epoch": 3.8534798534798536,
+ "grad_norm": 27.028446197509766,
+ "learning_rate": 4.100122100122101e-05,
+ "loss": 0.6983,
+ "step": 1052
+ },
+ {
+ "epoch": 3.857142857142857,
+ "grad_norm": 17.247610092163086,
+ "learning_rate": 4.0976800976800974e-05,
+ "loss": 0.4761,
+ "step": 1053
+ },
+ {
+ "epoch": 3.860805860805861,
+ "grad_norm": 27.187416076660156,
+ "learning_rate": 4.095238095238095e-05,
+ "loss": 0.794,
+ "step": 1054
+ },
+ {
+ "epoch": 3.8644688644688645,
+ "grad_norm": 35.990623474121094,
+ "learning_rate": 4.0927960927960924e-05,
+ "loss": 0.7874,
+ "step": 1055
+ },
+ {
+ "epoch": 3.868131868131868,
+ "grad_norm": 168.7575225830078,
+ "learning_rate": 4.09035409035409e-05,
+ "loss": 0.6028,
+ "step": 1056
+ },
+ {
+ "epoch": 3.871794871794872,
+ "grad_norm": 31.459491729736328,
+ "learning_rate": 4.087912087912088e-05,
+ "loss": 0.6256,
+ "step": 1057
+ },
+ {
+ "epoch": 3.8754578754578755,
+ "grad_norm": 25.053123474121094,
+ "learning_rate": 4.085470085470085e-05,
+ "loss": 0.3041,
+ "step": 1058
+ },
+ {
+ "epoch": 3.879120879120879,
+ "grad_norm": 56.10730743408203,
+ "learning_rate": 4.083028083028083e-05,
+ "loss": 0.8875,
+ "step": 1059
+ },
+ {
+ "epoch": 3.8827838827838828,
+ "grad_norm": 26.897689819335938,
+ "learning_rate": 4.080586080586081e-05,
+ "loss": 0.5291,
+ "step": 1060
+ },
+ {
+ "epoch": 3.8864468864468864,
+ "grad_norm": 40.36210250854492,
+ "learning_rate": 4.078144078144078e-05,
+ "loss": 1.2323,
+ "step": 1061
+ },
+ {
+ "epoch": 3.89010989010989,
+ "grad_norm": 17.556934356689453,
+ "learning_rate": 4.075702075702076e-05,
+ "loss": 0.0951,
+ "step": 1062
+ },
+ {
+ "epoch": 3.8937728937728937,
+ "grad_norm": 54.6690559387207,
+ "learning_rate": 4.073260073260074e-05,
+ "loss": 0.4311,
+ "step": 1063
+ },
+ {
+ "epoch": 3.8974358974358974,
+ "grad_norm": 27.554750442504883,
+ "learning_rate": 4.070818070818071e-05,
+ "loss": 0.2851,
+ "step": 1064
+ },
+ {
+ "epoch": 3.901098901098901,
+ "grad_norm": 14.667935371398926,
+ "learning_rate": 4.068376068376069e-05,
+ "loss": 0.0866,
+ "step": 1065
+ },
+ {
+ "epoch": 3.9047619047619047,
+ "grad_norm": 39.62594985961914,
+ "learning_rate": 4.065934065934066e-05,
+ "loss": 0.2322,
+ "step": 1066
+ },
+ {
+ "epoch": 3.9084249084249083,
+ "grad_norm": 31.457260131835938,
+ "learning_rate": 4.063492063492063e-05,
+ "loss": 0.2554,
+ "step": 1067
+ },
+ {
+ "epoch": 3.912087912087912,
+ "grad_norm": 52.82997131347656,
+ "learning_rate": 4.061050061050061e-05,
+ "loss": 0.44,
+ "step": 1068
+ },
+ {
+ "epoch": 3.9157509157509156,
+ "grad_norm": 56.15779495239258,
+ "learning_rate": 4.058608058608059e-05,
+ "loss": 0.9419,
+ "step": 1069
+ },
+ {
+ "epoch": 3.9194139194139193,
+ "grad_norm": 59.23240661621094,
+ "learning_rate": 4.056166056166056e-05,
+ "loss": 0.5084,
+ "step": 1070
+ },
+ {
+ "epoch": 3.9230769230769234,
+ "grad_norm": 9.644290924072266,
+ "learning_rate": 4.053724053724054e-05,
+ "loss": 0.0456,
+ "step": 1071
+ },
+ {
+ "epoch": 3.926739926739927,
+ "grad_norm": 24.42845916748047,
+ "learning_rate": 4.051282051282052e-05,
+ "loss": 0.0907,
+ "step": 1072
+ },
+ {
+ "epoch": 3.9304029304029307,
+ "grad_norm": 81.36042785644531,
+ "learning_rate": 4.048840048840049e-05,
+ "loss": 1.0178,
+ "step": 1073
+ },
+ {
+ "epoch": 3.9340659340659343,
+ "grad_norm": 63.134071350097656,
+ "learning_rate": 4.046398046398047e-05,
+ "loss": 1.1125,
+ "step": 1074
+ },
+ {
+ "epoch": 3.937728937728938,
+ "grad_norm": 56.59608840942383,
+ "learning_rate": 4.0439560439560445e-05,
+ "loss": 0.4465,
+ "step": 1075
+ },
+ {
+ "epoch": 3.9413919413919416,
+ "grad_norm": 48.51662063598633,
+ "learning_rate": 4.041514041514042e-05,
+ "loss": 0.5054,
+ "step": 1076
+ },
+ {
+ "epoch": 3.9450549450549453,
+ "grad_norm": 50.393524169921875,
+ "learning_rate": 4.0390720390720395e-05,
+ "loss": 0.8157,
+ "step": 1077
+ },
+ {
+ "epoch": 3.948717948717949,
+ "grad_norm": 63.414878845214844,
+ "learning_rate": 4.036630036630037e-05,
+ "loss": 0.9598,
+ "step": 1078
+ },
+ {
+ "epoch": 3.9523809523809526,
+ "grad_norm": 35.72902297973633,
+ "learning_rate": 4.034188034188034e-05,
+ "loss": 0.4764,
+ "step": 1079
+ },
+ {
+ "epoch": 3.956043956043956,
+ "grad_norm": 20.452268600463867,
+ "learning_rate": 4.031746031746032e-05,
+ "loss": 0.191,
+ "step": 1080
+ },
+ {
+ "epoch": 3.95970695970696,
+ "grad_norm": 38.23368453979492,
+ "learning_rate": 4.029304029304029e-05,
+ "loss": 0.5218,
+ "step": 1081
+ },
+ {
+ "epoch": 3.9633699633699635,
+ "grad_norm": 79.35212707519531,
+ "learning_rate": 4.026862026862027e-05,
+ "loss": 1.3695,
+ "step": 1082
+ },
+ {
+ "epoch": 3.967032967032967,
+ "grad_norm": 62.0828742980957,
+ "learning_rate": 4.0244200244200246e-05,
+ "loss": 1.4882,
+ "step": 1083
+ },
+ {
+ "epoch": 3.970695970695971,
+ "grad_norm": 35.413734436035156,
+ "learning_rate": 4.021978021978022e-05,
+ "loss": 0.1966,
+ "step": 1084
+ },
+ {
+ "epoch": 3.9743589743589745,
+ "grad_norm": 18.060728073120117,
+ "learning_rate": 4.0195360195360196e-05,
+ "loss": 0.2902,
+ "step": 1085
+ },
+ {
+ "epoch": 3.978021978021978,
+ "grad_norm": 15.263091087341309,
+ "learning_rate": 4.0170940170940174e-05,
+ "loss": 0.1325,
+ "step": 1086
+ },
+ {
+ "epoch": 3.9816849816849818,
+ "grad_norm": 35.8296012878418,
+ "learning_rate": 4.0146520146520146e-05,
+ "loss": 1.0225,
+ "step": 1087
+ },
+ {
+ "epoch": 3.9853479853479854,
+ "grad_norm": 24.120967864990234,
+ "learning_rate": 4.0122100122100125e-05,
+ "loss": 0.4432,
+ "step": 1088
+ },
+ {
+ "epoch": 3.989010989010989,
+ "grad_norm": 47.371070861816406,
+ "learning_rate": 4.00976800976801e-05,
+ "loss": 0.9703,
+ "step": 1089
+ },
+ {
+ "epoch": 3.9926739926739927,
+ "grad_norm": 44.266082763671875,
+ "learning_rate": 4.0073260073260075e-05,
+ "loss": 0.6652,
+ "step": 1090
+ },
+ {
+ "epoch": 3.9963369963369964,
+ "grad_norm": 22.17586898803711,
+ "learning_rate": 4.0048840048840046e-05,
+ "loss": 0.1324,
+ "step": 1091
+ },
+ {
+ "epoch": 4.0,
+ "grad_norm": 45.4996337890625,
+ "learning_rate": 4.0024420024420025e-05,
+ "loss": 0.3746,
+ "step": 1092
+ },
+ {
+ "epoch": 4.003663003663004,
+ "grad_norm": 31.747541427612305,
+ "learning_rate": 3.9999999999999996e-05,
+ "loss": 0.5028,
+ "step": 1093
+ },
+ {
+ "epoch": 4.007326007326007,
+ "grad_norm": 13.460674285888672,
+ "learning_rate": 3.9975579975579975e-05,
+ "loss": 0.088,
+ "step": 1094
+ },
+ {
+ "epoch": 4.010989010989011,
+ "grad_norm": 23.94148826599121,
+ "learning_rate": 3.9951159951159953e-05,
+ "loss": 0.1944,
+ "step": 1095
+ },
+ {
+ "epoch": 4.014652014652015,
+ "grad_norm": 60.94758224487305,
+ "learning_rate": 3.9926739926739925e-05,
+ "loss": 0.555,
+ "step": 1096
+ },
+ {
+ "epoch": 4.018315018315018,
+ "grad_norm": 24.47633934020996,
+ "learning_rate": 3.9902319902319904e-05,
+ "loss": 0.1314,
+ "step": 1097
+ },
+ {
+ "epoch": 4.021978021978022,
+ "grad_norm": 42.690162658691406,
+ "learning_rate": 3.987789987789988e-05,
+ "loss": 0.4734,
+ "step": 1098
+ },
+ {
+ "epoch": 4.0256410256410255,
+ "grad_norm": 69.26956939697266,
+ "learning_rate": 3.9853479853479854e-05,
+ "loss": 1.4256,
+ "step": 1099
+ },
+ {
+ "epoch": 4.029304029304029,
+ "grad_norm": 7.718477725982666,
+ "learning_rate": 3.982905982905983e-05,
+ "loss": 0.0549,
+ "step": 1100
+ },
+ {
+ "epoch": 4.032967032967033,
+ "grad_norm": 60.15462875366211,
+ "learning_rate": 3.980463980463981e-05,
+ "loss": 1.2739,
+ "step": 1101
+ },
+ {
+ "epoch": 4.0366300366300365,
+ "grad_norm": 57.749656677246094,
+ "learning_rate": 3.978021978021978e-05,
+ "loss": 1.0691,
+ "step": 1102
+ },
+ {
+ "epoch": 4.04029304029304,
+ "grad_norm": 35.57550811767578,
+ "learning_rate": 3.975579975579976e-05,
+ "loss": 0.5114,
+ "step": 1103
+ },
+ {
+ "epoch": 4.043956043956044,
+ "grad_norm": 58.007694244384766,
+ "learning_rate": 3.973137973137973e-05,
+ "loss": 1.1552,
+ "step": 1104
+ },
+ {
+ "epoch": 4.0476190476190474,
+ "grad_norm": 30.794008255004883,
+ "learning_rate": 3.9706959706959704e-05,
+ "loss": 0.7502,
+ "step": 1105
+ },
+ {
+ "epoch": 4.051282051282051,
+ "grad_norm": 35.88930892944336,
+ "learning_rate": 3.968253968253968e-05,
+ "loss": 0.6965,
+ "step": 1106
+ },
+ {
+ "epoch": 4.054945054945055,
+ "grad_norm": 25.719144821166992,
+ "learning_rate": 3.9658119658119654e-05,
+ "loss": 0.4581,
+ "step": 1107
+ },
+ {
+ "epoch": 4.058608058608058,
+ "grad_norm": 37.397640228271484,
+ "learning_rate": 3.963369963369963e-05,
+ "loss": 1.0719,
+ "step": 1108
+ },
+ {
+ "epoch": 4.062271062271062,
+ "grad_norm": 25.8681640625,
+ "learning_rate": 3.960927960927961e-05,
+ "loss": 0.7,
+ "step": 1109
+ },
+ {
+ "epoch": 4.065934065934066,
+ "grad_norm": 16.983413696289062,
+ "learning_rate": 3.958485958485958e-05,
+ "loss": 0.2394,
+ "step": 1110
+ },
+ {
+ "epoch": 4.069597069597069,
+ "grad_norm": 31.7902889251709,
+ "learning_rate": 3.956043956043956e-05,
+ "loss": 0.5662,
+ "step": 1111
+ },
+ {
+ "epoch": 4.073260073260073,
+ "grad_norm": 37.51417922973633,
+ "learning_rate": 3.953601953601954e-05,
+ "loss": 0.3483,
+ "step": 1112
+ },
+ {
+ "epoch": 4.076923076923077,
+ "grad_norm": 24.01732635498047,
+ "learning_rate": 3.951159951159951e-05,
+ "loss": 0.2527,
+ "step": 1113
+ },
+ {
+ "epoch": 4.08058608058608,
+ "grad_norm": 29.152162551879883,
+ "learning_rate": 3.948717948717949e-05,
+ "loss": 0.4485,
+ "step": 1114
+ },
+ {
+ "epoch": 4.084249084249084,
+ "grad_norm": 31.519155502319336,
+ "learning_rate": 3.946275946275947e-05,
+ "loss": 0.2485,
+ "step": 1115
+ },
+ {
+ "epoch": 4.087912087912088,
+ "grad_norm": 18.462514877319336,
+ "learning_rate": 3.943833943833944e-05,
+ "loss": 0.1057,
+ "step": 1116
+ },
+ {
+ "epoch": 4.091575091575091,
+ "grad_norm": 35.28910827636719,
+ "learning_rate": 3.941391941391941e-05,
+ "loss": 0.3589,
+ "step": 1117
+ },
+ {
+ "epoch": 4.095238095238095,
+ "grad_norm": 47.00394058227539,
+ "learning_rate": 3.938949938949939e-05,
+ "loss": 0.5148,
+ "step": 1118
+ },
+ {
+ "epoch": 4.0989010989010985,
+ "grad_norm": 24.796058654785156,
+ "learning_rate": 3.936507936507936e-05,
+ "loss": 0.2486,
+ "step": 1119
+ },
+ {
+ "epoch": 4.102564102564102,
+ "grad_norm": 27.098758697509766,
+ "learning_rate": 3.934065934065934e-05,
+ "loss": 0.196,
+ "step": 1120
+ },
+ {
+ "epoch": 4.106227106227106,
+ "grad_norm": 59.4343147277832,
+ "learning_rate": 3.931623931623932e-05,
+ "loss": 0.8093,
+ "step": 1121
+ },
+ {
+ "epoch": 4.1098901098901095,
+ "grad_norm": 57.0518684387207,
+ "learning_rate": 3.929181929181929e-05,
+ "loss": 0.6495,
+ "step": 1122
+ },
+ {
+ "epoch": 4.113553113553113,
+ "grad_norm": 42.01070022583008,
+ "learning_rate": 3.926739926739927e-05,
+ "loss": 0.3272,
+ "step": 1123
+ },
+ {
+ "epoch": 4.117216117216117,
+ "grad_norm": 72.11932373046875,
+ "learning_rate": 3.924297924297925e-05,
+ "loss": 1.2542,
+ "step": 1124
+ },
+ {
+ "epoch": 4.1208791208791204,
+ "grad_norm": 13.270249366760254,
+ "learning_rate": 3.921855921855922e-05,
+ "loss": 0.0843,
+ "step": 1125
+ },
+ {
+ "epoch": 4.124542124542124,
+ "grad_norm": 32.058258056640625,
+ "learning_rate": 3.91941391941392e-05,
+ "loss": 0.158,
+ "step": 1126
+ },
+ {
+ "epoch": 4.128205128205128,
+ "grad_norm": 37.67665481567383,
+ "learning_rate": 3.9169719169719176e-05,
+ "loss": 0.3463,
+ "step": 1127
+ },
+ {
+ "epoch": 4.131868131868132,
+ "grad_norm": 98.33348846435547,
+ "learning_rate": 3.914529914529915e-05,
+ "loss": 0.8846,
+ "step": 1128
+ },
+ {
+ "epoch": 4.135531135531136,
+ "grad_norm": 49.11083221435547,
+ "learning_rate": 3.9120879120879126e-05,
+ "loss": 0.4124,
+ "step": 1129
+ },
+ {
+ "epoch": 4.13919413919414,
+ "grad_norm": 45.87646484375,
+ "learning_rate": 3.90964590964591e-05,
+ "loss": 0.3594,
+ "step": 1130
+ },
+ {
+ "epoch": 4.142857142857143,
+ "grad_norm": 49.34445571899414,
+ "learning_rate": 3.907203907203907e-05,
+ "loss": 0.1947,
+ "step": 1131
+ },
+ {
+ "epoch": 4.146520146520147,
+ "grad_norm": 8.654282569885254,
+ "learning_rate": 3.904761904761905e-05,
+ "loss": 0.0923,
+ "step": 1132
+ },
+ {
+ "epoch": 4.1501831501831505,
+ "grad_norm": 12.46809196472168,
+ "learning_rate": 3.902319902319902e-05,
+ "loss": 0.0841,
+ "step": 1133
+ },
+ {
+ "epoch": 4.153846153846154,
+ "grad_norm": 33.9839973449707,
+ "learning_rate": 3.8998778998779e-05,
+ "loss": 0.5838,
+ "step": 1134
+ },
+ {
+ "epoch": 4.157509157509158,
+ "grad_norm": 36.68742752075195,
+ "learning_rate": 3.8974358974358976e-05,
+ "loss": 0.5483,
+ "step": 1135
+ },
+ {
+ "epoch": 4.1611721611721615,
+ "grad_norm": 26.862363815307617,
+ "learning_rate": 3.894993894993895e-05,
+ "loss": 0.2464,
+ "step": 1136
+ },
+ {
+ "epoch": 4.164835164835165,
+ "grad_norm": 16.219947814941406,
+ "learning_rate": 3.8925518925518926e-05,
+ "loss": 0.1878,
+ "step": 1137
+ },
+ {
+ "epoch": 4.168498168498169,
+ "grad_norm": 36.86198425292969,
+ "learning_rate": 3.8901098901098905e-05,
+ "loss": 0.3656,
+ "step": 1138
+ },
+ {
+ "epoch": 4.172161172161172,
+ "grad_norm": 36.241432189941406,
+ "learning_rate": 3.8876678876678877e-05,
+ "loss": 0.8421,
+ "step": 1139
+ },
+ {
+ "epoch": 4.175824175824176,
+ "grad_norm": 45.81169891357422,
+ "learning_rate": 3.8852258852258855e-05,
+ "loss": 0.6081,
+ "step": 1140
+ },
+ {
+ "epoch": 4.17948717948718,
+ "grad_norm": 30.914037704467773,
+ "learning_rate": 3.8827838827838833e-05,
+ "loss": 0.2975,
+ "step": 1141
+ },
+ {
+ "epoch": 4.183150183150183,
+ "grad_norm": 4.663424968719482,
+ "learning_rate": 3.8803418803418805e-05,
+ "loss": 0.0319,
+ "step": 1142
+ },
+ {
+ "epoch": 4.186813186813187,
+ "grad_norm": 33.163551330566406,
+ "learning_rate": 3.877899877899878e-05,
+ "loss": 0.236,
+ "step": 1143
+ },
+ {
+ "epoch": 4.190476190476191,
+ "grad_norm": 20.820547103881836,
+ "learning_rate": 3.8754578754578755e-05,
+ "loss": 0.1907,
+ "step": 1144
+ },
+ {
+ "epoch": 4.194139194139194,
+ "grad_norm": 65.4993896484375,
+ "learning_rate": 3.873015873015873e-05,
+ "loss": 0.4195,
+ "step": 1145
+ },
+ {
+ "epoch": 4.197802197802198,
+ "grad_norm": 13.253530502319336,
+ "learning_rate": 3.8705738705738705e-05,
+ "loss": 0.1496,
+ "step": 1146
+ },
+ {
+ "epoch": 4.201465201465202,
+ "grad_norm": 18.291889190673828,
+ "learning_rate": 3.8681318681318684e-05,
+ "loss": 0.1544,
+ "step": 1147
+ },
+ {
+ "epoch": 4.205128205128205,
+ "grad_norm": 32.1517448425293,
+ "learning_rate": 3.8656898656898656e-05,
+ "loss": 0.3317,
+ "step": 1148
+ },
+ {
+ "epoch": 4.208791208791209,
+ "grad_norm": 37.809669494628906,
+ "learning_rate": 3.8632478632478634e-05,
+ "loss": 0.394,
+ "step": 1149
+ },
+ {
+ "epoch": 4.212454212454213,
+ "grad_norm": 113.17266082763672,
+ "learning_rate": 3.860805860805861e-05,
+ "loss": 1.2368,
+ "step": 1150
+ },
+ {
+ "epoch": 4.216117216117216,
+ "grad_norm": 10.35407543182373,
+ "learning_rate": 3.8583638583638584e-05,
+ "loss": 0.0584,
+ "step": 1151
+ },
+ {
+ "epoch": 4.21978021978022,
+ "grad_norm": 56.98881530761719,
+ "learning_rate": 3.855921855921856e-05,
+ "loss": 0.8088,
+ "step": 1152
+ },
+ {
+ "epoch": 4.2234432234432235,
+ "grad_norm": 45.7849006652832,
+ "learning_rate": 3.853479853479854e-05,
+ "loss": 0.6471,
+ "step": 1153
+ },
+ {
+ "epoch": 4.227106227106227,
+ "grad_norm": 43.57515335083008,
+ "learning_rate": 3.851037851037851e-05,
+ "loss": 0.2924,
+ "step": 1154
+ },
+ {
+ "epoch": 4.230769230769231,
+ "grad_norm": 14.98643684387207,
+ "learning_rate": 3.848595848595849e-05,
+ "loss": 0.1108,
+ "step": 1155
+ },
+ {
+ "epoch": 4.2344322344322345,
+ "grad_norm": 27.162513732910156,
+ "learning_rate": 3.846153846153846e-05,
+ "loss": 0.3856,
+ "step": 1156
+ },
+ {
+ "epoch": 4.238095238095238,
+ "grad_norm": 56.45119094848633,
+ "learning_rate": 3.8437118437118435e-05,
+ "loss": 0.6752,
+ "step": 1157
+ },
+ {
+ "epoch": 4.241758241758242,
+ "grad_norm": 15.522347450256348,
+ "learning_rate": 3.841269841269841e-05,
+ "loss": 0.1419,
+ "step": 1158
+ },
+ {
+ "epoch": 4.245421245421245,
+ "grad_norm": 16.31126594543457,
+ "learning_rate": 3.8388278388278385e-05,
+ "loss": 0.1303,
+ "step": 1159
+ },
+ {
+ "epoch": 4.249084249084249,
+ "grad_norm": 12.398606300354004,
+ "learning_rate": 3.836385836385836e-05,
+ "loss": 0.1306,
+ "step": 1160
+ },
+ {
+ "epoch": 4.252747252747253,
+ "grad_norm": 19.660768508911133,
+ "learning_rate": 3.833943833943834e-05,
+ "loss": 0.1554,
+ "step": 1161
+ },
+ {
+ "epoch": 4.256410256410256,
+ "grad_norm": 131.451416015625,
+ "learning_rate": 3.831501831501831e-05,
+ "loss": 0.2774,
+ "step": 1162
+ },
+ {
+ "epoch": 4.26007326007326,
+ "grad_norm": 42.0703125,
+ "learning_rate": 3.829059829059829e-05,
+ "loss": 0.471,
+ "step": 1163
+ },
+ {
+ "epoch": 4.263736263736264,
+ "grad_norm": 52.415096282958984,
+ "learning_rate": 3.826617826617827e-05,
+ "loss": 0.7872,
+ "step": 1164
+ },
+ {
+ "epoch": 4.267399267399267,
+ "grad_norm": 35.990421295166016,
+ "learning_rate": 3.824175824175824e-05,
+ "loss": 0.4495,
+ "step": 1165
+ },
+ {
+ "epoch": 4.271062271062271,
+ "grad_norm": 40.330265045166016,
+ "learning_rate": 3.821733821733822e-05,
+ "loss": 0.4009,
+ "step": 1166
+ },
+ {
+ "epoch": 4.274725274725275,
+ "grad_norm": 42.55587387084961,
+ "learning_rate": 3.81929181929182e-05,
+ "loss": 1.6215,
+ "step": 1167
+ },
+ {
+ "epoch": 4.278388278388278,
+ "grad_norm": 30.704498291015625,
+ "learning_rate": 3.816849816849817e-05,
+ "loss": 0.3539,
+ "step": 1168
+ },
+ {
+ "epoch": 4.282051282051282,
+ "grad_norm": 10.239601135253906,
+ "learning_rate": 3.814407814407814e-05,
+ "loss": 0.0779,
+ "step": 1169
+ },
+ {
+ "epoch": 4.285714285714286,
+ "grad_norm": 37.00144577026367,
+ "learning_rate": 3.811965811965812e-05,
+ "loss": 0.4089,
+ "step": 1170
+ },
+ {
+ "epoch": 4.289377289377289,
+ "grad_norm": 40.18193817138672,
+ "learning_rate": 3.809523809523809e-05,
+ "loss": 0.4854,
+ "step": 1171
+ },
+ {
+ "epoch": 4.293040293040293,
+ "grad_norm": 46.78989028930664,
+ "learning_rate": 3.807081807081807e-05,
+ "loss": 0.5863,
+ "step": 1172
+ },
+ {
+ "epoch": 4.2967032967032965,
+ "grad_norm": 49.5102653503418,
+ "learning_rate": 3.804639804639805e-05,
+ "loss": 1.0118,
+ "step": 1173
+ },
+ {
+ "epoch": 4.3003663003663,
+ "grad_norm": 30.41546058654785,
+ "learning_rate": 3.802197802197802e-05,
+ "loss": 0.2616,
+ "step": 1174
+ },
+ {
+ "epoch": 4.304029304029304,
+ "grad_norm": 41.22653579711914,
+ "learning_rate": 3.7997557997558e-05,
+ "loss": 0.5852,
+ "step": 1175
+ },
+ {
+ "epoch": 4.3076923076923075,
+ "grad_norm": 4.033203125,
+ "learning_rate": 3.797313797313798e-05,
+ "loss": 0.0221,
+ "step": 1176
+ },
+ {
+ "epoch": 4.311355311355311,
+ "grad_norm": 13.03472900390625,
+ "learning_rate": 3.794871794871795e-05,
+ "loss": 0.1499,
+ "step": 1177
+ },
+ {
+ "epoch": 4.315018315018315,
+ "grad_norm": 24.690824508666992,
+ "learning_rate": 3.792429792429793e-05,
+ "loss": 0.2631,
+ "step": 1178
+ },
+ {
+ "epoch": 4.318681318681318,
+ "grad_norm": 32.594451904296875,
+ "learning_rate": 3.7899877899877906e-05,
+ "loss": 0.2988,
+ "step": 1179
+ },
+ {
+ "epoch": 4.322344322344322,
+ "grad_norm": 10.510795593261719,
+ "learning_rate": 3.787545787545788e-05,
+ "loss": 0.0499,
+ "step": 1180
+ },
+ {
+ "epoch": 4.326007326007326,
+ "grad_norm": 65.71479034423828,
+ "learning_rate": 3.785103785103785e-05,
+ "loss": 0.9048,
+ "step": 1181
+ },
+ {
+ "epoch": 4.329670329670329,
+ "grad_norm": 12.129572868347168,
+ "learning_rate": 3.782661782661783e-05,
+ "loss": 0.0629,
+ "step": 1182
+ },
+ {
+ "epoch": 4.333333333333333,
+ "grad_norm": 88.66580200195312,
+ "learning_rate": 3.78021978021978e-05,
+ "loss": 0.8276,
+ "step": 1183
+ },
+ {
+ "epoch": 4.336996336996337,
+ "grad_norm": 35.2215461730957,
+ "learning_rate": 3.777777777777778e-05,
+ "loss": 0.2996,
+ "step": 1184
+ },
+ {
+ "epoch": 4.34065934065934,
+ "grad_norm": 29.870285034179688,
+ "learning_rate": 3.775335775335775e-05,
+ "loss": 0.2152,
+ "step": 1185
+ },
+ {
+ "epoch": 4.344322344322344,
+ "grad_norm": 30.441116333007812,
+ "learning_rate": 3.772893772893773e-05,
+ "loss": 0.6761,
+ "step": 1186
+ },
+ {
+ "epoch": 4.347985347985348,
+ "grad_norm": 22.49298095703125,
+ "learning_rate": 3.770451770451771e-05,
+ "loss": 0.7508,
+ "step": 1187
+ },
+ {
+ "epoch": 4.351648351648351,
+ "grad_norm": 22.43603515625,
+ "learning_rate": 3.768009768009768e-05,
+ "loss": 0.3601,
+ "step": 1188
+ },
+ {
+ "epoch": 4.355311355311355,
+ "grad_norm": 38.21080780029297,
+ "learning_rate": 3.765567765567766e-05,
+ "loss": 0.3769,
+ "step": 1189
+ },
+ {
+ "epoch": 4.358974358974359,
+ "grad_norm": 48.90728759765625,
+ "learning_rate": 3.7631257631257635e-05,
+ "loss": 0.4259,
+ "step": 1190
+ },
+ {
+ "epoch": 4.362637362637362,
+ "grad_norm": 7.331233024597168,
+ "learning_rate": 3.760683760683761e-05,
+ "loss": 0.0697,
+ "step": 1191
+ },
+ {
+ "epoch": 4.366300366300366,
+ "grad_norm": 25.096189498901367,
+ "learning_rate": 3.7582417582417586e-05,
+ "loss": 0.2196,
+ "step": 1192
+ },
+ {
+ "epoch": 4.36996336996337,
+ "grad_norm": 53.813209533691406,
+ "learning_rate": 3.7557997557997564e-05,
+ "loss": 0.3785,
+ "step": 1193
+ },
+ {
+ "epoch": 4.373626373626374,
+ "grad_norm": 13.184123039245605,
+ "learning_rate": 3.753357753357753e-05,
+ "loss": 0.1747,
+ "step": 1194
+ },
+ {
+ "epoch": 4.377289377289378,
+ "grad_norm": 1.818351149559021,
+ "learning_rate": 3.750915750915751e-05,
+ "loss": 0.0158,
+ "step": 1195
+ },
+ {
+ "epoch": 4.380952380952381,
+ "grad_norm": 63.21619415283203,
+ "learning_rate": 3.7484737484737486e-05,
+ "loss": 0.2863,
+ "step": 1196
+ },
+ {
+ "epoch": 4.384615384615385,
+ "grad_norm": 32.59927749633789,
+ "learning_rate": 3.746031746031746e-05,
+ "loss": 0.4261,
+ "step": 1197
+ },
+ {
+ "epoch": 4.388278388278389,
+ "grad_norm": 36.5265998840332,
+ "learning_rate": 3.7435897435897436e-05,
+ "loss": 0.8064,
+ "step": 1198
+ },
+ {
+ "epoch": 4.391941391941392,
+ "grad_norm": 47.726905822753906,
+ "learning_rate": 3.7411477411477414e-05,
+ "loss": 0.8884,
+ "step": 1199
+ },
+ {
+ "epoch": 4.395604395604396,
+ "grad_norm": 12.621973037719727,
+ "learning_rate": 3.7387057387057386e-05,
+ "loss": 0.1085,
+ "step": 1200
+ },
+ {
+ "epoch": 4.3992673992674,
+ "grad_norm": 24.7711124420166,
+ "learning_rate": 3.7362637362637365e-05,
+ "loss": 0.2249,
+ "step": 1201
+ },
+ {
+ "epoch": 4.402930402930403,
+ "grad_norm": 39.19346618652344,
+ "learning_rate": 3.733821733821734e-05,
+ "loss": 0.4065,
+ "step": 1202
+ },
+ {
+ "epoch": 4.406593406593407,
+ "grad_norm": 20.3857421875,
+ "learning_rate": 3.7313797313797315e-05,
+ "loss": 0.1653,
+ "step": 1203
+ },
+ {
+ "epoch": 4.410256410256411,
+ "grad_norm": 58.15717697143555,
+ "learning_rate": 3.728937728937729e-05,
+ "loss": 0.8774,
+ "step": 1204
+ },
+ {
+ "epoch": 4.413919413919414,
+ "grad_norm": 28.05725860595703,
+ "learning_rate": 3.726495726495727e-05,
+ "loss": 0.1695,
+ "step": 1205
+ },
+ {
+ "epoch": 4.417582417582418,
+ "grad_norm": 24.635583877563477,
+ "learning_rate": 3.724053724053724e-05,
+ "loss": 0.4871,
+ "step": 1206
+ },
+ {
+ "epoch": 4.4212454212454215,
+ "grad_norm": 16.8306941986084,
+ "learning_rate": 3.7216117216117215e-05,
+ "loss": 0.0863,
+ "step": 1207
+ },
+ {
+ "epoch": 4.424908424908425,
+ "grad_norm": 16.2359676361084,
+ "learning_rate": 3.719169719169719e-05,
+ "loss": 0.077,
+ "step": 1208
+ },
+ {
+ "epoch": 4.428571428571429,
+ "grad_norm": 31.431425094604492,
+ "learning_rate": 3.7167277167277165e-05,
+ "loss": 0.2815,
+ "step": 1209
+ },
+ {
+ "epoch": 4.4322344322344325,
+ "grad_norm": 31.44464874267578,
+ "learning_rate": 3.7142857142857143e-05,
+ "loss": 0.2237,
+ "step": 1210
+ },
+ {
+ "epoch": 4.435897435897436,
+ "grad_norm": 23.390378952026367,
+ "learning_rate": 3.7118437118437115e-05,
+ "loss": 0.1791,
+ "step": 1211
+ },
+ {
+ "epoch": 4.43956043956044,
+ "grad_norm": 48.210079193115234,
+ "learning_rate": 3.7094017094017094e-05,
+ "loss": 0.517,
+ "step": 1212
+ },
+ {
+ "epoch": 4.443223443223443,
+ "grad_norm": 45.35732650756836,
+ "learning_rate": 3.706959706959707e-05,
+ "loss": 0.4638,
+ "step": 1213
+ },
+ {
+ "epoch": 4.446886446886447,
+ "grad_norm": 16.88719367980957,
+ "learning_rate": 3.7045177045177044e-05,
+ "loss": 0.1203,
+ "step": 1214
+ },
+ {
+ "epoch": 4.450549450549451,
+ "grad_norm": 58.36906433105469,
+ "learning_rate": 3.702075702075702e-05,
+ "loss": 0.7366,
+ "step": 1215
+ },
+ {
+ "epoch": 4.454212454212454,
+ "grad_norm": 49.00838088989258,
+ "learning_rate": 3.6996336996337e-05,
+ "loss": 0.739,
+ "step": 1216
+ },
+ {
+ "epoch": 4.457875457875458,
+ "grad_norm": 42.87287521362305,
+ "learning_rate": 3.697191697191697e-05,
+ "loss": 1.3861,
+ "step": 1217
+ },
+ {
+ "epoch": 4.461538461538462,
+ "grad_norm": 44.62813949584961,
+ "learning_rate": 3.694749694749695e-05,
+ "loss": 0.549,
+ "step": 1218
+ },
+ {
+ "epoch": 4.465201465201465,
+ "grad_norm": 6.473313331604004,
+ "learning_rate": 3.692307692307693e-05,
+ "loss": 0.0407,
+ "step": 1219
+ },
+ {
+ "epoch": 4.468864468864469,
+ "grad_norm": 35.04784393310547,
+ "learning_rate": 3.6898656898656894e-05,
+ "loss": 0.3146,
+ "step": 1220
+ },
+ {
+ "epoch": 4.472527472527473,
+ "grad_norm": 44.79425811767578,
+ "learning_rate": 3.687423687423687e-05,
+ "loss": 0.5206,
+ "step": 1221
+ },
+ {
+ "epoch": 4.476190476190476,
+ "grad_norm": 36.52440643310547,
+ "learning_rate": 3.684981684981685e-05,
+ "loss": 0.5977,
+ "step": 1222
+ },
+ {
+ "epoch": 4.47985347985348,
+ "grad_norm": 58.15000915527344,
+ "learning_rate": 3.682539682539682e-05,
+ "loss": 1.0533,
+ "step": 1223
+ },
+ {
+ "epoch": 4.483516483516484,
+ "grad_norm": 32.33371353149414,
+ "learning_rate": 3.68009768009768e-05,
+ "loss": 0.3928,
+ "step": 1224
+ },
+ {
+ "epoch": 4.487179487179487,
+ "grad_norm": 44.501529693603516,
+ "learning_rate": 3.677655677655678e-05,
+ "loss": 0.8471,
+ "step": 1225
+ },
+ {
+ "epoch": 4.490842490842491,
+ "grad_norm": 41.62052536010742,
+ "learning_rate": 3.675213675213675e-05,
+ "loss": 0.7731,
+ "step": 1226
+ },
+ {
+ "epoch": 4.4945054945054945,
+ "grad_norm": 12.638876914978027,
+ "learning_rate": 3.672771672771673e-05,
+ "loss": 0.1219,
+ "step": 1227
+ },
+ {
+ "epoch": 4.498168498168498,
+ "grad_norm": 12.034523010253906,
+ "learning_rate": 3.670329670329671e-05,
+ "loss": 0.158,
+ "step": 1228
+ },
+ {
+ "epoch": 4.501831501831502,
+ "grad_norm": 42.04001235961914,
+ "learning_rate": 3.667887667887668e-05,
+ "loss": 0.8556,
+ "step": 1229
+ },
+ {
+ "epoch": 4.5054945054945055,
+ "grad_norm": 36.28947448730469,
+ "learning_rate": 3.665445665445666e-05,
+ "loss": 0.6569,
+ "step": 1230
+ },
+ {
+ "epoch": 4.509157509157509,
+ "grad_norm": 40.263912200927734,
+ "learning_rate": 3.663003663003664e-05,
+ "loss": 0.7625,
+ "step": 1231
+ },
+ {
+ "epoch": 4.512820512820513,
+ "grad_norm": 23.760005950927734,
+ "learning_rate": 3.660561660561661e-05,
+ "loss": 0.2465,
+ "step": 1232
+ },
+ {
+ "epoch": 4.516483516483516,
+ "grad_norm": 23.589109420776367,
+ "learning_rate": 3.658119658119658e-05,
+ "loss": 0.4408,
+ "step": 1233
+ },
+ {
+ "epoch": 4.52014652014652,
+ "grad_norm": 30.512271881103516,
+ "learning_rate": 3.655677655677655e-05,
+ "loss": 0.8748,
+ "step": 1234
+ },
+ {
+ "epoch": 4.523809523809524,
+ "grad_norm": 8.060181617736816,
+ "learning_rate": 3.653235653235653e-05,
+ "loss": 0.0818,
+ "step": 1235
+ },
+ {
+ "epoch": 4.527472527472527,
+ "grad_norm": 14.353645324707031,
+ "learning_rate": 3.650793650793651e-05,
+ "loss": 0.1899,
+ "step": 1236
+ },
+ {
+ "epoch": 4.531135531135531,
+ "grad_norm": 12.20384693145752,
+ "learning_rate": 3.648351648351648e-05,
+ "loss": 0.1618,
+ "step": 1237
+ },
+ {
+ "epoch": 4.534798534798535,
+ "grad_norm": 182.4698028564453,
+ "learning_rate": 3.645909645909646e-05,
+ "loss": 0.9223,
+ "step": 1238
+ },
+ {
+ "epoch": 4.538461538461538,
+ "grad_norm": 33.137081146240234,
+ "learning_rate": 3.643467643467644e-05,
+ "loss": 0.7708,
+ "step": 1239
+ },
+ {
+ "epoch": 4.542124542124542,
+ "grad_norm": 19.895912170410156,
+ "learning_rate": 3.641025641025641e-05,
+ "loss": 0.164,
+ "step": 1240
+ },
+ {
+ "epoch": 4.545787545787546,
+ "grad_norm": 62.816864013671875,
+ "learning_rate": 3.638583638583639e-05,
+ "loss": 1.4675,
+ "step": 1241
+ },
+ {
+ "epoch": 4.549450549450549,
+ "grad_norm": 35.58034896850586,
+ "learning_rate": 3.6361416361416366e-05,
+ "loss": 0.4449,
+ "step": 1242
+ },
+ {
+ "epoch": 4.553113553113553,
+ "grad_norm": 21.993911743164062,
+ "learning_rate": 3.633699633699634e-05,
+ "loss": 0.2302,
+ "step": 1243
+ },
+ {
+ "epoch": 4.556776556776557,
+ "grad_norm": 33.743812561035156,
+ "learning_rate": 3.6312576312576316e-05,
+ "loss": 0.1782,
+ "step": 1244
+ },
+ {
+ "epoch": 4.56043956043956,
+ "grad_norm": 40.135711669921875,
+ "learning_rate": 3.6288156288156294e-05,
+ "loss": 0.7147,
+ "step": 1245
+ },
+ {
+ "epoch": 4.564102564102564,
+ "grad_norm": 2.47517728805542,
+ "learning_rate": 3.626373626373626e-05,
+ "loss": 0.0188,
+ "step": 1246
+ },
+ {
+ "epoch": 4.5677655677655675,
+ "grad_norm": 22.023807525634766,
+ "learning_rate": 3.623931623931624e-05,
+ "loss": 0.3182,
+ "step": 1247
+ },
+ {
+ "epoch": 4.571428571428571,
+ "grad_norm": 21.8381290435791,
+ "learning_rate": 3.6214896214896216e-05,
+ "loss": 0.4161,
+ "step": 1248
+ },
+ {
+ "epoch": 4.575091575091575,
+ "grad_norm": 20.989906311035156,
+ "learning_rate": 3.619047619047619e-05,
+ "loss": 0.2972,
+ "step": 1249
+ },
+ {
+ "epoch": 4.5787545787545785,
+ "grad_norm": 75.8060073852539,
+ "learning_rate": 3.6166056166056166e-05,
+ "loss": 0.6194,
+ "step": 1250
+ },
+ {
+ "epoch": 4.582417582417582,
+ "grad_norm": 40.85308074951172,
+ "learning_rate": 3.6141636141636145e-05,
+ "loss": 0.7707,
+ "step": 1251
+ },
+ {
+ "epoch": 4.586080586080586,
+ "grad_norm": 62.22278594970703,
+ "learning_rate": 3.6117216117216117e-05,
+ "loss": 0.6872,
+ "step": 1252
+ },
+ {
+ "epoch": 4.589743589743589,
+ "grad_norm": 30.27143669128418,
+ "learning_rate": 3.6092796092796095e-05,
+ "loss": 0.484,
+ "step": 1253
+ },
+ {
+ "epoch": 4.593406593406593,
+ "grad_norm": 44.08026123046875,
+ "learning_rate": 3.6068376068376073e-05,
+ "loss": 0.8593,
+ "step": 1254
+ },
+ {
+ "epoch": 4.597069597069597,
+ "grad_norm": 22.63222312927246,
+ "learning_rate": 3.6043956043956045e-05,
+ "loss": 0.2542,
+ "step": 1255
+ },
+ {
+ "epoch": 4.6007326007326,
+ "grad_norm": 7.024168014526367,
+ "learning_rate": 3.6019536019536024e-05,
+ "loss": 0.0777,
+ "step": 1256
+ },
+ {
+ "epoch": 4.604395604395604,
+ "grad_norm": 24.981502532958984,
+ "learning_rate": 3.5995115995116e-05,
+ "loss": 0.2332,
+ "step": 1257
+ },
+ {
+ "epoch": 4.608058608058608,
+ "grad_norm": 28.929807662963867,
+ "learning_rate": 3.5970695970695974e-05,
+ "loss": 0.3665,
+ "step": 1258
+ },
+ {
+ "epoch": 4.611721611721611,
+ "grad_norm": 36.756683349609375,
+ "learning_rate": 3.5946275946275945e-05,
+ "loss": 1.2777,
+ "step": 1259
+ },
+ {
+ "epoch": 4.615384615384615,
+ "grad_norm": 53.04755783081055,
+ "learning_rate": 3.592185592185592e-05,
+ "loss": 0.3001,
+ "step": 1260
+ },
+ {
+ "epoch": 4.619047619047619,
+ "grad_norm": 39.71099853515625,
+ "learning_rate": 3.5897435897435896e-05,
+ "loss": 0.7756,
+ "step": 1261
+ },
+ {
+ "epoch": 4.622710622710622,
+ "grad_norm": 21.80796241760254,
+ "learning_rate": 3.5873015873015874e-05,
+ "loss": 0.2329,
+ "step": 1262
+ },
+ {
+ "epoch": 4.626373626373626,
+ "grad_norm": 25.909208297729492,
+ "learning_rate": 3.5848595848595846e-05,
+ "loss": 0.5081,
+ "step": 1263
+ },
+ {
+ "epoch": 4.63003663003663,
+ "grad_norm": 46.62733840942383,
+ "learning_rate": 3.5824175824175824e-05,
+ "loss": 0.8265,
+ "step": 1264
+ },
+ {
+ "epoch": 4.633699633699633,
+ "grad_norm": 5.689383506774902,
+ "learning_rate": 3.57997557997558e-05,
+ "loss": 0.055,
+ "step": 1265
+ },
+ {
+ "epoch": 4.637362637362637,
+ "grad_norm": 23.30045509338379,
+ "learning_rate": 3.5775335775335774e-05,
+ "loss": 0.3397,
+ "step": 1266
+ },
+ {
+ "epoch": 4.641025641025641,
+ "grad_norm": 15.685534477233887,
+ "learning_rate": 3.575091575091575e-05,
+ "loss": 0.0862,
+ "step": 1267
+ },
+ {
+ "epoch": 4.644688644688645,
+ "grad_norm": 27.56009864807129,
+ "learning_rate": 3.572649572649573e-05,
+ "loss": 0.4751,
+ "step": 1268
+ },
+ {
+ "epoch": 4.648351648351649,
+ "grad_norm": 18.164905548095703,
+ "learning_rate": 3.57020757020757e-05,
+ "loss": 0.1274,
+ "step": 1269
+ },
+ {
+ "epoch": 4.652014652014652,
+ "grad_norm": 18.178728103637695,
+ "learning_rate": 3.567765567765568e-05,
+ "loss": 0.1246,
+ "step": 1270
+ },
+ {
+ "epoch": 4.655677655677656,
+ "grad_norm": 11.308391571044922,
+ "learning_rate": 3.565323565323565e-05,
+ "loss": 0.0937,
+ "step": 1271
+ },
+ {
+ "epoch": 4.65934065934066,
+ "grad_norm": 38.507469177246094,
+ "learning_rate": 3.5628815628815625e-05,
+ "loss": 0.4616,
+ "step": 1272
+ },
+ {
+ "epoch": 4.663003663003663,
+ "grad_norm": 9.642159461975098,
+ "learning_rate": 3.56043956043956e-05,
+ "loss": 0.0772,
+ "step": 1273
+ },
+ {
+ "epoch": 4.666666666666667,
+ "grad_norm": 31.854310989379883,
+ "learning_rate": 3.557997557997558e-05,
+ "loss": 0.2349,
+ "step": 1274
+ },
+ {
+ "epoch": 4.670329670329671,
+ "grad_norm": 53.341617584228516,
+ "learning_rate": 3.555555555555555e-05,
+ "loss": 0.2926,
+ "step": 1275
+ },
+ {
+ "epoch": 4.673992673992674,
+ "grad_norm": 24.003368377685547,
+ "learning_rate": 3.553113553113553e-05,
+ "loss": 0.1689,
+ "step": 1276
+ },
+ {
+ "epoch": 4.677655677655678,
+ "grad_norm": 12.198409080505371,
+ "learning_rate": 3.550671550671551e-05,
+ "loss": 0.1001,
+ "step": 1277
+ },
+ {
+ "epoch": 4.681318681318682,
+ "grad_norm": 56.559051513671875,
+ "learning_rate": 3.548229548229548e-05,
+ "loss": 0.5314,
+ "step": 1278
+ },
+ {
+ "epoch": 4.684981684981685,
+ "grad_norm": 17.89840316772461,
+ "learning_rate": 3.545787545787546e-05,
+ "loss": 0.1258,
+ "step": 1279
+ },
+ {
+ "epoch": 4.688644688644689,
+ "grad_norm": 14.37424087524414,
+ "learning_rate": 3.543345543345544e-05,
+ "loss": 0.0925,
+ "step": 1280
+ },
+ {
+ "epoch": 4.6923076923076925,
+ "grad_norm": 21.21650505065918,
+ "learning_rate": 3.540903540903541e-05,
+ "loss": 0.1541,
+ "step": 1281
+ },
+ {
+ "epoch": 4.695970695970696,
+ "grad_norm": 36.1934814453125,
+ "learning_rate": 3.538461538461539e-05,
+ "loss": 0.403,
+ "step": 1282
+ },
+ {
+ "epoch": 4.6996336996337,
+ "grad_norm": 62.917022705078125,
+ "learning_rate": 3.536019536019537e-05,
+ "loss": 1.2771,
+ "step": 1283
+ },
+ {
+ "epoch": 4.7032967032967035,
+ "grad_norm": 30.238500595092773,
+ "learning_rate": 3.533577533577533e-05,
+ "loss": 0.3149,
+ "step": 1284
+ },
+ {
+ "epoch": 4.706959706959707,
+ "grad_norm": 12.155022621154785,
+ "learning_rate": 3.531135531135531e-05,
+ "loss": 0.0543,
+ "step": 1285
+ },
+ {
+ "epoch": 4.710622710622711,
+ "grad_norm": 39.67718505859375,
+ "learning_rate": 3.528693528693528e-05,
+ "loss": 0.4201,
+ "step": 1286
+ },
+ {
+ "epoch": 4.714285714285714,
+ "grad_norm": 46.620235443115234,
+ "learning_rate": 3.526251526251526e-05,
+ "loss": 0.7735,
+ "step": 1287
+ },
+ {
+ "epoch": 4.717948717948718,
+ "grad_norm": 29.740169525146484,
+ "learning_rate": 3.523809523809524e-05,
+ "loss": 0.4753,
+ "step": 1288
+ },
+ {
+ "epoch": 4.721611721611722,
+ "grad_norm": 17.668439865112305,
+ "learning_rate": 3.521367521367521e-05,
+ "loss": 0.0738,
+ "step": 1289
+ },
+ {
+ "epoch": 4.725274725274725,
+ "grad_norm": 29.107847213745117,
+ "learning_rate": 3.518925518925519e-05,
+ "loss": 0.2967,
+ "step": 1290
+ },
+ {
+ "epoch": 4.728937728937729,
+ "grad_norm": 41.70953369140625,
+ "learning_rate": 3.516483516483517e-05,
+ "loss": 0.2407,
+ "step": 1291
+ },
+ {
+ "epoch": 4.732600732600733,
+ "grad_norm": 41.50172805786133,
+ "learning_rate": 3.514041514041514e-05,
+ "loss": 0.5012,
+ "step": 1292
+ },
+ {
+ "epoch": 4.736263736263736,
+ "grad_norm": 10.921927452087402,
+ "learning_rate": 3.511599511599512e-05,
+ "loss": 0.0583,
+ "step": 1293
+ },
+ {
+ "epoch": 4.73992673992674,
+ "grad_norm": 10.986832618713379,
+ "learning_rate": 3.5091575091575096e-05,
+ "loss": 0.1684,
+ "step": 1294
+ },
+ {
+ "epoch": 4.743589743589744,
+ "grad_norm": 77.36996459960938,
+ "learning_rate": 3.506715506715507e-05,
+ "loss": 0.1532,
+ "step": 1295
+ },
+ {
+ "epoch": 4.747252747252747,
+ "grad_norm": 2.912205457687378,
+ "learning_rate": 3.5042735042735046e-05,
+ "loss": 0.0178,
+ "step": 1296
+ },
+ {
+ "epoch": 4.750915750915751,
+ "grad_norm": 7.694264888763428,
+ "learning_rate": 3.501831501831502e-05,
+ "loss": 0.0448,
+ "step": 1297
+ },
+ {
+ "epoch": 4.754578754578755,
+ "grad_norm": 59.40597152709961,
+ "learning_rate": 3.499389499389499e-05,
+ "loss": 0.825,
+ "step": 1298
+ },
+ {
+ "epoch": 4.758241758241758,
+ "grad_norm": 44.394065856933594,
+ "learning_rate": 3.496947496947497e-05,
+ "loss": 0.2582,
+ "step": 1299
+ },
+ {
+ "epoch": 4.761904761904762,
+ "grad_norm": 48.07161331176758,
+ "learning_rate": 3.494505494505495e-05,
+ "loss": 0.5681,
+ "step": 1300
+ },
+ {
+ "epoch": 4.7655677655677655,
+ "grad_norm": 47.763275146484375,
+ "learning_rate": 3.492063492063492e-05,
+ "loss": 0.2289,
+ "step": 1301
+ },
+ {
+ "epoch": 4.769230769230769,
+ "grad_norm": 33.30193328857422,
+ "learning_rate": 3.48962148962149e-05,
+ "loss": 0.2646,
+ "step": 1302
+ },
+ {
+ "epoch": 4.772893772893773,
+ "grad_norm": 62.87331008911133,
+ "learning_rate": 3.4871794871794875e-05,
+ "loss": 0.5135,
+ "step": 1303
+ },
+ {
+ "epoch": 4.7765567765567765,
+ "grad_norm": 57.62127685546875,
+ "learning_rate": 3.484737484737485e-05,
+ "loss": 0.6126,
+ "step": 1304
+ },
+ {
+ "epoch": 4.78021978021978,
+ "grad_norm": 35.42237854003906,
+ "learning_rate": 3.4822954822954825e-05,
+ "loss": 0.2312,
+ "step": 1305
+ },
+ {
+ "epoch": 4.783882783882784,
+ "grad_norm": 38.23964309692383,
+ "learning_rate": 3.4798534798534804e-05,
+ "loss": 0.4366,
+ "step": 1306
+ },
+ {
+ "epoch": 4.787545787545787,
+ "grad_norm": 24.94087028503418,
+ "learning_rate": 3.4774114774114776e-05,
+ "loss": 0.2944,
+ "step": 1307
+ },
+ {
+ "epoch": 4.791208791208791,
+ "grad_norm": 43.400047302246094,
+ "learning_rate": 3.4749694749694754e-05,
+ "loss": 0.4749,
+ "step": 1308
+ },
+ {
+ "epoch": 4.794871794871795,
+ "grad_norm": 82.01946258544922,
+ "learning_rate": 3.472527472527473e-05,
+ "loss": 0.6972,
+ "step": 1309
+ },
+ {
+ "epoch": 4.798534798534798,
+ "grad_norm": 25.38723373413086,
+ "learning_rate": 3.47008547008547e-05,
+ "loss": 0.3361,
+ "step": 1310
+ },
+ {
+ "epoch": 4.802197802197802,
+ "grad_norm": 13.022088050842285,
+ "learning_rate": 3.4676434676434676e-05,
+ "loss": 0.1853,
+ "step": 1311
+ },
+ {
+ "epoch": 4.805860805860806,
+ "grad_norm": 30.806135177612305,
+ "learning_rate": 3.465201465201465e-05,
+ "loss": 0.3196,
+ "step": 1312
+ },
+ {
+ "epoch": 4.809523809523809,
+ "grad_norm": 26.30035972595215,
+ "learning_rate": 3.4627594627594626e-05,
+ "loss": 0.2708,
+ "step": 1313
+ },
+ {
+ "epoch": 4.813186813186813,
+ "grad_norm": 6.557223796844482,
+ "learning_rate": 3.4603174603174604e-05,
+ "loss": 0.0815,
+ "step": 1314
+ },
+ {
+ "epoch": 4.816849816849817,
+ "grad_norm": 33.60557174682617,
+ "learning_rate": 3.4578754578754576e-05,
+ "loss": 0.9938,
+ "step": 1315
+ },
+ {
+ "epoch": 4.82051282051282,
+ "grad_norm": 104.2552719116211,
+ "learning_rate": 3.4554334554334555e-05,
+ "loss": 0.1937,
+ "step": 1316
+ },
+ {
+ "epoch": 4.824175824175824,
+ "grad_norm": 41.3105583190918,
+ "learning_rate": 3.452991452991453e-05,
+ "loss": 0.3856,
+ "step": 1317
+ },
+ {
+ "epoch": 4.827838827838828,
+ "grad_norm": 43.52134323120117,
+ "learning_rate": 3.4505494505494505e-05,
+ "loss": 0.4823,
+ "step": 1318
+ },
+ {
+ "epoch": 4.831501831501831,
+ "grad_norm": 29.37596893310547,
+ "learning_rate": 3.448107448107448e-05,
+ "loss": 0.1746,
+ "step": 1319
+ },
+ {
+ "epoch": 4.835164835164835,
+ "grad_norm": 13.94152545928955,
+ "learning_rate": 3.445665445665446e-05,
+ "loss": 0.141,
+ "step": 1320
+ },
+ {
+ "epoch": 4.8388278388278385,
+ "grad_norm": 34.95270538330078,
+ "learning_rate": 3.443223443223443e-05,
+ "loss": 0.2701,
+ "step": 1321
+ },
+ {
+ "epoch": 4.842490842490842,
+ "grad_norm": 64.49109649658203,
+ "learning_rate": 3.440781440781441e-05,
+ "loss": 1.095,
+ "step": 1322
+ },
+ {
+ "epoch": 4.846153846153846,
+ "grad_norm": 61.1287727355957,
+ "learning_rate": 3.4383394383394383e-05,
+ "loss": 0.2083,
+ "step": 1323
+ },
+ {
+ "epoch": 4.8498168498168495,
+ "grad_norm": 62.69855499267578,
+ "learning_rate": 3.4358974358974355e-05,
+ "loss": 0.5077,
+ "step": 1324
+ },
+ {
+ "epoch": 4.853479853479853,
+ "grad_norm": 92.53154754638672,
+ "learning_rate": 3.4334554334554334e-05,
+ "loss": 0.7287,
+ "step": 1325
+ },
+ {
+ "epoch": 4.857142857142857,
+ "grad_norm": 98.1663589477539,
+ "learning_rate": 3.431013431013431e-05,
+ "loss": 1.2834,
+ "step": 1326
+ },
+ {
+ "epoch": 4.860805860805861,
+ "grad_norm": 52.24921417236328,
+ "learning_rate": 3.4285714285714284e-05,
+ "loss": 0.8187,
+ "step": 1327
+ },
+ {
+ "epoch": 4.864468864468865,
+ "grad_norm": 60.897544860839844,
+ "learning_rate": 3.426129426129426e-05,
+ "loss": 1.5861,
+ "step": 1328
+ },
+ {
+ "epoch": 4.868131868131869,
+ "grad_norm": 21.70830535888672,
+ "learning_rate": 3.423687423687424e-05,
+ "loss": 0.1459,
+ "step": 1329
+ },
+ {
+ "epoch": 4.871794871794872,
+ "grad_norm": 47.87598419189453,
+ "learning_rate": 3.421245421245421e-05,
+ "loss": 1.0044,
+ "step": 1330
+ },
+ {
+ "epoch": 4.875457875457876,
+ "grad_norm": 172.73670959472656,
+ "learning_rate": 3.418803418803419e-05,
+ "loss": 1.4617,
+ "step": 1331
+ },
+ {
+ "epoch": 4.8791208791208796,
+ "grad_norm": 154.93960571289062,
+ "learning_rate": 3.416361416361417e-05,
+ "loss": 1.7488,
+ "step": 1332
+ },
+ {
+ "epoch": 4.882783882783883,
+ "grad_norm": 73.78408813476562,
+ "learning_rate": 3.413919413919414e-05,
+ "loss": 0.5789,
+ "step": 1333
+ },
+ {
+ "epoch": 4.886446886446887,
+ "grad_norm": 35.67369079589844,
+ "learning_rate": 3.411477411477412e-05,
+ "loss": 0.6101,
+ "step": 1334
+ },
+ {
+ "epoch": 4.8901098901098905,
+ "grad_norm": 54.61326599121094,
+ "learning_rate": 3.40903540903541e-05,
+ "loss": 0.7433,
+ "step": 1335
+ },
+ {
+ "epoch": 4.893772893772894,
+ "grad_norm": 28.492923736572266,
+ "learning_rate": 3.406593406593406e-05,
+ "loss": 0.7661,
+ "step": 1336
+ },
+ {
+ "epoch": 4.897435897435898,
+ "grad_norm": 17.2525634765625,
+ "learning_rate": 3.404151404151404e-05,
+ "loss": 0.2423,
+ "step": 1337
+ },
+ {
+ "epoch": 4.9010989010989015,
+ "grad_norm": 55.46605682373047,
+ "learning_rate": 3.401709401709401e-05,
+ "loss": 0.4419,
+ "step": 1338
+ },
+ {
+ "epoch": 4.904761904761905,
+ "grad_norm": 23.03455352783203,
+ "learning_rate": 3.399267399267399e-05,
+ "loss": 0.3046,
+ "step": 1339
+ },
+ {
+ "epoch": 4.908424908424909,
+ "grad_norm": 20.186574935913086,
+ "learning_rate": 3.396825396825397e-05,
+ "loss": 0.3712,
+ "step": 1340
+ },
+ {
+ "epoch": 4.912087912087912,
+ "grad_norm": 22.702407836914062,
+ "learning_rate": 3.394383394383394e-05,
+ "loss": 0.4481,
+ "step": 1341
+ },
+ {
+ "epoch": 4.915750915750916,
+ "grad_norm": 25.723426818847656,
+ "learning_rate": 3.391941391941392e-05,
+ "loss": 0.1832,
+ "step": 1342
+ },
+ {
+ "epoch": 4.91941391941392,
+ "grad_norm": 18.955692291259766,
+ "learning_rate": 3.38949938949939e-05,
+ "loss": 0.1334,
+ "step": 1343
+ },
+ {
+ "epoch": 4.923076923076923,
+ "grad_norm": 20.29511833190918,
+ "learning_rate": 3.387057387057387e-05,
+ "loss": 0.1811,
+ "step": 1344
+ },
+ {
+ "epoch": 4.926739926739927,
+ "grad_norm": 22.23061752319336,
+ "learning_rate": 3.384615384615385e-05,
+ "loss": 0.2643,
+ "step": 1345
+ },
+ {
+ "epoch": 4.930402930402931,
+ "grad_norm": 52.057132720947266,
+ "learning_rate": 3.382173382173383e-05,
+ "loss": 0.5874,
+ "step": 1346
+ },
+ {
+ "epoch": 4.934065934065934,
+ "grad_norm": 66.5381851196289,
+ "learning_rate": 3.37973137973138e-05,
+ "loss": 0.4993,
+ "step": 1347
+ },
+ {
+ "epoch": 4.937728937728938,
+ "grad_norm": 8.25474739074707,
+ "learning_rate": 3.377289377289378e-05,
+ "loss": 0.0263,
+ "step": 1348
+ },
+ {
+ "epoch": 4.941391941391942,
+ "grad_norm": 31.373722076416016,
+ "learning_rate": 3.374847374847375e-05,
+ "loss": 0.288,
+ "step": 1349
+ },
+ {
+ "epoch": 4.945054945054945,
+ "grad_norm": 51.15471267700195,
+ "learning_rate": 3.372405372405372e-05,
+ "loss": 0.7586,
+ "step": 1350
+ },
+ {
+ "epoch": 4.948717948717949,
+ "grad_norm": 39.163639068603516,
+ "learning_rate": 3.36996336996337e-05,
+ "loss": 1.221,
+ "step": 1351
+ },
+ {
+ "epoch": 4.9523809523809526,
+ "grad_norm": 11.033390998840332,
+ "learning_rate": 3.367521367521368e-05,
+ "loss": 0.069,
+ "step": 1352
+ },
+ {
+ "epoch": 4.956043956043956,
+ "grad_norm": 24.14516830444336,
+ "learning_rate": 3.365079365079365e-05,
+ "loss": 0.6001,
+ "step": 1353
+ },
+ {
+ "epoch": 4.95970695970696,
+ "grad_norm": 36.211891174316406,
+ "learning_rate": 3.362637362637363e-05,
+ "loss": 0.5598,
+ "step": 1354
+ },
+ {
+ "epoch": 4.9633699633699635,
+ "grad_norm": 23.723434448242188,
+ "learning_rate": 3.3601953601953606e-05,
+ "loss": 0.3133,
+ "step": 1355
+ },
+ {
+ "epoch": 4.967032967032967,
+ "grad_norm": 21.853551864624023,
+ "learning_rate": 3.357753357753358e-05,
+ "loss": 0.1974,
+ "step": 1356
+ },
+ {
+ "epoch": 4.970695970695971,
+ "grad_norm": 25.392358779907227,
+ "learning_rate": 3.3553113553113556e-05,
+ "loss": 0.5114,
+ "step": 1357
+ },
+ {
+ "epoch": 4.9743589743589745,
+ "grad_norm": 94.81107330322266,
+ "learning_rate": 3.3528693528693534e-05,
+ "loss": 0.4609,
+ "step": 1358
+ },
+ {
+ "epoch": 4.978021978021978,
+ "grad_norm": 24.487186431884766,
+ "learning_rate": 3.3504273504273506e-05,
+ "loss": 0.6613,
+ "step": 1359
+ },
+ {
+ "epoch": 4.981684981684982,
+ "grad_norm": 18.870473861694336,
+ "learning_rate": 3.3479853479853485e-05,
+ "loss": 0.1229,
+ "step": 1360
+ },
+ {
+ "epoch": 4.985347985347985,
+ "grad_norm": 17.630233764648438,
+ "learning_rate": 3.3455433455433456e-05,
+ "loss": 0.1836,
+ "step": 1361
+ },
+ {
+ "epoch": 4.989010989010989,
+ "grad_norm": 24.850299835205078,
+ "learning_rate": 3.343101343101343e-05,
+ "loss": 0.4499,
+ "step": 1362
+ },
+ {
+ "epoch": 4.992673992673993,
+ "grad_norm": 13.472710609436035,
+ "learning_rate": 3.3406593406593406e-05,
+ "loss": 0.2,
+ "step": 1363
+ },
+ {
+ "epoch": 4.996336996336996,
+ "grad_norm": 25.112987518310547,
+ "learning_rate": 3.338217338217338e-05,
+ "loss": 0.2978,
+ "step": 1364
+ },
+ {
+ "epoch": 5.0,
+ "grad_norm": 20.6419620513916,
+ "learning_rate": 3.3357753357753356e-05,
+ "loss": 0.1711,
+ "step": 1365
+ },
+ {
+ "epoch": 5.003663003663004,
+ "grad_norm": 20.868810653686523,
+ "learning_rate": 3.3333333333333335e-05,
+ "loss": 0.1433,
+ "step": 1366
+ },
+ {
+ "epoch": 5.007326007326007,
+ "grad_norm": 15.846084594726562,
+ "learning_rate": 3.3308913308913307e-05,
+ "loss": 0.2174,
+ "step": 1367
+ },
+ {
+ "epoch": 5.010989010989011,
+ "grad_norm": 29.00075912475586,
+ "learning_rate": 3.3284493284493285e-05,
+ "loss": 0.5032,
+ "step": 1368
+ },
+ {
+ "epoch": 5.014652014652015,
+ "grad_norm": 33.520896911621094,
+ "learning_rate": 3.3260073260073264e-05,
+ "loss": 0.4061,
+ "step": 1369
+ },
+ {
+ "epoch": 5.018315018315018,
+ "grad_norm": 12.909339904785156,
+ "learning_rate": 3.3235653235653235e-05,
+ "loss": 0.0953,
+ "step": 1370
+ },
+ {
+ "epoch": 5.021978021978022,
+ "grad_norm": 0.2602078318595886,
+ "learning_rate": 3.3211233211233214e-05,
+ "loss": 0.0012,
+ "step": 1371
+ },
+ {
+ "epoch": 5.0256410256410255,
+ "grad_norm": 38.391422271728516,
+ "learning_rate": 3.318681318681319e-05,
+ "loss": 0.1825,
+ "step": 1372
+ },
+ {
+ "epoch": 5.029304029304029,
+ "grad_norm": 70.76541900634766,
+ "learning_rate": 3.3162393162393164e-05,
+ "loss": 0.846,
+ "step": 1373
+ },
+ {
+ "epoch": 5.032967032967033,
+ "grad_norm": 17.12116813659668,
+ "learning_rate": 3.3137973137973135e-05,
+ "loss": 0.0827,
+ "step": 1374
+ },
+ {
+ "epoch": 5.0366300366300365,
+ "grad_norm": 10.847224235534668,
+ "learning_rate": 3.3113553113553114e-05,
+ "loss": 0.0598,
+ "step": 1375
+ },
+ {
+ "epoch": 5.04029304029304,
+ "grad_norm": 31.552082061767578,
+ "learning_rate": 3.3089133089133086e-05,
+ "loss": 0.4466,
+ "step": 1376
+ },
+ {
+ "epoch": 5.043956043956044,
+ "grad_norm": 15.32805061340332,
+ "learning_rate": 3.3064713064713064e-05,
+ "loss": 0.0502,
+ "step": 1377
+ },
+ {
+ "epoch": 5.0476190476190474,
+ "grad_norm": 80.18537139892578,
+ "learning_rate": 3.304029304029304e-05,
+ "loss": 0.7377,
+ "step": 1378
+ },
+ {
+ "epoch": 5.051282051282051,
+ "grad_norm": 11.73173713684082,
+ "learning_rate": 3.3015873015873014e-05,
+ "loss": 0.1129,
+ "step": 1379
+ },
+ {
+ "epoch": 5.054945054945055,
+ "grad_norm": 46.249935150146484,
+ "learning_rate": 3.299145299145299e-05,
+ "loss": 0.5367,
+ "step": 1380
+ },
+ {
+ "epoch": 5.058608058608058,
+ "grad_norm": 9.185178756713867,
+ "learning_rate": 3.296703296703297e-05,
+ "loss": 0.0453,
+ "step": 1381
+ },
+ {
+ "epoch": 5.062271062271062,
+ "grad_norm": 40.83237838745117,
+ "learning_rate": 3.294261294261294e-05,
+ "loss": 0.1428,
+ "step": 1382
+ },
+ {
+ "epoch": 5.065934065934066,
+ "grad_norm": 32.31568908691406,
+ "learning_rate": 3.291819291819292e-05,
+ "loss": 0.3131,
+ "step": 1383
+ },
+ {
+ "epoch": 5.069597069597069,
+ "grad_norm": 5.372808456420898,
+ "learning_rate": 3.28937728937729e-05,
+ "loss": 0.0452,
+ "step": 1384
+ },
+ {
+ "epoch": 5.073260073260073,
+ "grad_norm": 3.0900495052337646,
+ "learning_rate": 3.286935286935287e-05,
+ "loss": 0.0175,
+ "step": 1385
+ },
+ {
+ "epoch": 5.076923076923077,
+ "grad_norm": 25.293724060058594,
+ "learning_rate": 3.284493284493285e-05,
+ "loss": 0.2162,
+ "step": 1386
+ },
+ {
+ "epoch": 5.08058608058608,
+ "grad_norm": 26.231664657592773,
+ "learning_rate": 3.282051282051282e-05,
+ "loss": 0.1764,
+ "step": 1387
+ },
+ {
+ "epoch": 5.084249084249084,
+ "grad_norm": 24.69008445739746,
+ "learning_rate": 3.279609279609279e-05,
+ "loss": 0.1019,
+ "step": 1388
+ },
+ {
+ "epoch": 5.087912087912088,
+ "grad_norm": 12.522343635559082,
+ "learning_rate": 3.277167277167277e-05,
+ "loss": 0.0424,
+ "step": 1389
+ },
+ {
+ "epoch": 5.091575091575091,
+ "grad_norm": 28.68439292907715,
+ "learning_rate": 3.274725274725274e-05,
+ "loss": 0.3441,
+ "step": 1390
+ },
+ {
+ "epoch": 5.095238095238095,
+ "grad_norm": 9.312751770019531,
+ "learning_rate": 3.272283272283272e-05,
+ "loss": 0.0675,
+ "step": 1391
+ },
+ {
+ "epoch": 5.0989010989010985,
+ "grad_norm": 12.041552543640137,
+ "learning_rate": 3.26984126984127e-05,
+ "loss": 0.049,
+ "step": 1392
+ },
+ {
+ "epoch": 5.102564102564102,
+ "grad_norm": 36.706031799316406,
+ "learning_rate": 3.267399267399267e-05,
+ "loss": 0.2947,
+ "step": 1393
+ },
+ {
+ "epoch": 5.106227106227106,
+ "grad_norm": 0.5009213089942932,
+ "learning_rate": 3.264957264957265e-05,
+ "loss": 0.0028,
+ "step": 1394
+ },
+ {
+ "epoch": 5.1098901098901095,
+ "grad_norm": 53.88454818725586,
+ "learning_rate": 3.262515262515263e-05,
+ "loss": 0.5004,
+ "step": 1395
+ },
+ {
+ "epoch": 5.113553113553113,
+ "grad_norm": 11.917198181152344,
+ "learning_rate": 3.26007326007326e-05,
+ "loss": 0.0734,
+ "step": 1396
+ },
+ {
+ "epoch": 5.117216117216117,
+ "grad_norm": 58.02888107299805,
+ "learning_rate": 3.257631257631258e-05,
+ "loss": 0.7099,
+ "step": 1397
+ },
+ {
+ "epoch": 5.1208791208791204,
+ "grad_norm": 18.3216609954834,
+ "learning_rate": 3.255189255189256e-05,
+ "loss": 0.1162,
+ "step": 1398
+ },
+ {
+ "epoch": 5.124542124542124,
+ "grad_norm": 7.598775863647461,
+ "learning_rate": 3.252747252747253e-05,
+ "loss": 0.0341,
+ "step": 1399
+ },
+ {
+ "epoch": 5.128205128205128,
+ "grad_norm": 199.40313720703125,
+ "learning_rate": 3.25030525030525e-05,
+ "loss": 0.3829,
+ "step": 1400
+ },
+ {
+ "epoch": 5.131868131868132,
+ "grad_norm": 6.528984546661377,
+ "learning_rate": 3.247863247863248e-05,
+ "loss": 0.041,
+ "step": 1401
+ },
+ {
+ "epoch": 5.135531135531136,
+ "grad_norm": 28.80277442932129,
+ "learning_rate": 3.245421245421245e-05,
+ "loss": 0.3511,
+ "step": 1402
+ },
+ {
+ "epoch": 5.13919413919414,
+ "grad_norm": 5.08656120300293,
+ "learning_rate": 3.242979242979243e-05,
+ "loss": 0.0403,
+ "step": 1403
+ },
+ {
+ "epoch": 5.142857142857143,
+ "grad_norm": 16.86358070373535,
+ "learning_rate": 3.240537240537241e-05,
+ "loss": 0.1676,
+ "step": 1404
+ },
+ {
+ "epoch": 5.146520146520147,
+ "grad_norm": 46.099613189697266,
+ "learning_rate": 3.238095238095238e-05,
+ "loss": 0.8096,
+ "step": 1405
+ },
+ {
+ "epoch": 5.1501831501831505,
+ "grad_norm": 26.01686668395996,
+ "learning_rate": 3.235653235653236e-05,
+ "loss": 0.1283,
+ "step": 1406
+ },
+ {
+ "epoch": 5.153846153846154,
+ "grad_norm": 4.826385498046875,
+ "learning_rate": 3.2332112332112336e-05,
+ "loss": 0.0328,
+ "step": 1407
+ },
+ {
+ "epoch": 5.157509157509158,
+ "grad_norm": 34.697593688964844,
+ "learning_rate": 3.230769230769231e-05,
+ "loss": 0.1306,
+ "step": 1408
+ },
+ {
+ "epoch": 5.1611721611721615,
+ "grad_norm": 21.331661224365234,
+ "learning_rate": 3.2283272283272286e-05,
+ "loss": 0.1302,
+ "step": 1409
+ },
+ {
+ "epoch": 5.164835164835165,
+ "grad_norm": 9.991851806640625,
+ "learning_rate": 3.2258852258852265e-05,
+ "loss": 0.0441,
+ "step": 1410
+ },
+ {
+ "epoch": 5.168498168498169,
+ "grad_norm": 26.641136169433594,
+ "learning_rate": 3.2234432234432237e-05,
+ "loss": 0.0894,
+ "step": 1411
+ },
+ {
+ "epoch": 5.172161172161172,
+ "grad_norm": 24.541366577148438,
+ "learning_rate": 3.2210012210012215e-05,
+ "loss": 0.1026,
+ "step": 1412
+ },
+ {
+ "epoch": 5.175824175824176,
+ "grad_norm": 44.62923049926758,
+ "learning_rate": 3.218559218559218e-05,
+ "loss": 0.1887,
+ "step": 1413
+ },
+ {
+ "epoch": 5.17948717948718,
+ "grad_norm": 19.28236198425293,
+ "learning_rate": 3.216117216117216e-05,
+ "loss": 0.0631,
+ "step": 1414
+ },
+ {
+ "epoch": 5.183150183150183,
+ "grad_norm": 10.39486026763916,
+ "learning_rate": 3.213675213675214e-05,
+ "loss": 0.0614,
+ "step": 1415
+ },
+ {
+ "epoch": 5.186813186813187,
+ "grad_norm": 32.476009368896484,
+ "learning_rate": 3.211233211233211e-05,
+ "loss": 0.2238,
+ "step": 1416
+ },
+ {
+ "epoch": 5.190476190476191,
+ "grad_norm": 9.828605651855469,
+ "learning_rate": 3.208791208791209e-05,
+ "loss": 0.0589,
+ "step": 1417
+ },
+ {
+ "epoch": 5.194139194139194,
+ "grad_norm": 50.0748291015625,
+ "learning_rate": 3.2063492063492065e-05,
+ "loss": 0.8225,
+ "step": 1418
+ },
+ {
+ "epoch": 5.197802197802198,
+ "grad_norm": 31.925779342651367,
+ "learning_rate": 3.203907203907204e-05,
+ "loss": 0.1824,
+ "step": 1419
+ },
+ {
+ "epoch": 5.201465201465202,
+ "grad_norm": 108.24534606933594,
+ "learning_rate": 3.2014652014652016e-05,
+ "loss": 2.3808,
+ "step": 1420
+ },
+ {
+ "epoch": 5.205128205128205,
+ "grad_norm": 54.39910888671875,
+ "learning_rate": 3.1990231990231994e-05,
+ "loss": 0.614,
+ "step": 1421
+ },
+ {
+ "epoch": 5.208791208791209,
+ "grad_norm": 13.70672607421875,
+ "learning_rate": 3.1965811965811966e-05,
+ "loss": 0.0366,
+ "step": 1422
+ },
+ {
+ "epoch": 5.212454212454213,
+ "grad_norm": 19.851043701171875,
+ "learning_rate": 3.1941391941391944e-05,
+ "loss": 0.1847,
+ "step": 1423
+ },
+ {
+ "epoch": 5.216117216117216,
+ "grad_norm": 1.041467547416687,
+ "learning_rate": 3.191697191697192e-05,
+ "loss": 0.0062,
+ "step": 1424
+ },
+ {
+ "epoch": 5.21978021978022,
+ "grad_norm": 10.629105567932129,
+ "learning_rate": 3.1892551892551894e-05,
+ "loss": 0.1058,
+ "step": 1425
+ },
+ {
+ "epoch": 5.2234432234432235,
+ "grad_norm": 25.597496032714844,
+ "learning_rate": 3.1868131868131866e-05,
+ "loss": 0.1786,
+ "step": 1426
+ },
+ {
+ "epoch": 5.227106227106227,
+ "grad_norm": 21.409902572631836,
+ "learning_rate": 3.1843711843711844e-05,
+ "loss": 0.1354,
+ "step": 1427
+ },
+ {
+ "epoch": 5.230769230769231,
+ "grad_norm": 252.64599609375,
+ "learning_rate": 3.1819291819291816e-05,
+ "loss": 0.476,
+ "step": 1428
+ },
+ {
+ "epoch": 5.2344322344322345,
+ "grad_norm": 22.15670394897461,
+ "learning_rate": 3.1794871794871795e-05,
+ "loss": 0.2111,
+ "step": 1429
+ },
+ {
+ "epoch": 5.238095238095238,
+ "grad_norm": 37.93739700317383,
+ "learning_rate": 3.177045177045177e-05,
+ "loss": 0.391,
+ "step": 1430
+ },
+ {
+ "epoch": 5.241758241758242,
+ "grad_norm": 25.364606857299805,
+ "learning_rate": 3.1746031746031745e-05,
+ "loss": 0.3365,
+ "step": 1431
+ },
+ {
+ "epoch": 5.245421245421245,
+ "grad_norm": 20.658681869506836,
+ "learning_rate": 3.172161172161172e-05,
+ "loss": 0.2419,
+ "step": 1432
+ },
+ {
+ "epoch": 5.249084249084249,
+ "grad_norm": 11.507100105285645,
+ "learning_rate": 3.16971916971917e-05,
+ "loss": 0.074,
+ "step": 1433
+ },
+ {
+ "epoch": 5.252747252747253,
+ "grad_norm": 32.7891845703125,
+ "learning_rate": 3.167277167277167e-05,
+ "loss": 0.261,
+ "step": 1434
+ },
+ {
+ "epoch": 5.256410256410256,
+ "grad_norm": 10.153932571411133,
+ "learning_rate": 3.164835164835165e-05,
+ "loss": 0.0317,
+ "step": 1435
+ },
+ {
+ "epoch": 5.26007326007326,
+ "grad_norm": 16.879608154296875,
+ "learning_rate": 3.162393162393163e-05,
+ "loss": 0.0668,
+ "step": 1436
+ },
+ {
+ "epoch": 5.263736263736264,
+ "grad_norm": 5.040280818939209,
+ "learning_rate": 3.15995115995116e-05,
+ "loss": 0.0197,
+ "step": 1437
+ },
+ {
+ "epoch": 5.267399267399267,
+ "grad_norm": 32.5413818359375,
+ "learning_rate": 3.157509157509158e-05,
+ "loss": 0.2659,
+ "step": 1438
+ },
+ {
+ "epoch": 5.271062271062271,
+ "grad_norm": 54.41200637817383,
+ "learning_rate": 3.1550671550671545e-05,
+ "loss": 0.6863,
+ "step": 1439
+ },
+ {
+ "epoch": 5.274725274725275,
+ "grad_norm": 13.049643516540527,
+ "learning_rate": 3.1526251526251524e-05,
+ "loss": 0.0808,
+ "step": 1440
+ },
+ {
+ "epoch": 5.278388278388278,
+ "grad_norm": 37.76680374145508,
+ "learning_rate": 3.15018315018315e-05,
+ "loss": 0.2917,
+ "step": 1441
+ },
+ {
+ "epoch": 5.282051282051282,
+ "grad_norm": 22.97549057006836,
+ "learning_rate": 3.1477411477411474e-05,
+ "loss": 0.1115,
+ "step": 1442
+ },
+ {
+ "epoch": 5.285714285714286,
+ "grad_norm": 36.935115814208984,
+ "learning_rate": 3.145299145299145e-05,
+ "loss": 0.3719,
+ "step": 1443
+ },
+ {
+ "epoch": 5.289377289377289,
+ "grad_norm": 50.726070404052734,
+ "learning_rate": 3.142857142857143e-05,
+ "loss": 0.2635,
+ "step": 1444
+ },
+ {
+ "epoch": 5.293040293040293,
+ "grad_norm": 31.919862747192383,
+ "learning_rate": 3.14041514041514e-05,
+ "loss": 0.2158,
+ "step": 1445
+ },
+ {
+ "epoch": 5.2967032967032965,
+ "grad_norm": 2.463076114654541,
+ "learning_rate": 3.137973137973138e-05,
+ "loss": 0.0125,
+ "step": 1446
+ },
+ {
+ "epoch": 5.3003663003663,
+ "grad_norm": 12.970477104187012,
+ "learning_rate": 3.135531135531136e-05,
+ "loss": 0.0701,
+ "step": 1447
+ },
+ {
+ "epoch": 5.304029304029304,
+ "grad_norm": 30.649160385131836,
+ "learning_rate": 3.133089133089133e-05,
+ "loss": 0.3443,
+ "step": 1448
+ },
+ {
+ "epoch": 5.3076923076923075,
+ "grad_norm": 50.362281799316406,
+ "learning_rate": 3.130647130647131e-05,
+ "loss": 0.2792,
+ "step": 1449
+ },
+ {
+ "epoch": 5.311355311355311,
+ "grad_norm": 25.041845321655273,
+ "learning_rate": 3.128205128205129e-05,
+ "loss": 0.2127,
+ "step": 1450
+ },
+ {
+ "epoch": 5.315018315018315,
+ "grad_norm": 44.749515533447266,
+ "learning_rate": 3.125763125763126e-05,
+ "loss": 0.5353,
+ "step": 1451
+ },
+ {
+ "epoch": 5.318681318681318,
+ "grad_norm": 66.30032348632812,
+ "learning_rate": 3.123321123321123e-05,
+ "loss": 0.5775,
+ "step": 1452
+ },
+ {
+ "epoch": 5.322344322344322,
+ "grad_norm": 3.905022382736206,
+ "learning_rate": 3.120879120879121e-05,
+ "loss": 0.0229,
+ "step": 1453
+ },
+ {
+ "epoch": 5.326007326007326,
+ "grad_norm": 50.520259857177734,
+ "learning_rate": 3.118437118437118e-05,
+ "loss": 0.6539,
+ "step": 1454
+ },
+ {
+ "epoch": 5.329670329670329,
+ "grad_norm": 12.567275047302246,
+ "learning_rate": 3.115995115995116e-05,
+ "loss": 0.0493,
+ "step": 1455
+ },
+ {
+ "epoch": 5.333333333333333,
+ "grad_norm": 24.11554718017578,
+ "learning_rate": 3.113553113553114e-05,
+ "loss": 0.401,
+ "step": 1456
+ },
+ {
+ "epoch": 5.336996336996337,
+ "grad_norm": 6.885409832000732,
+ "learning_rate": 3.111111111111111e-05,
+ "loss": 0.022,
+ "step": 1457
+ },
+ {
+ "epoch": 5.34065934065934,
+ "grad_norm": 30.46776008605957,
+ "learning_rate": 3.108669108669109e-05,
+ "loss": 0.1968,
+ "step": 1458
+ },
+ {
+ "epoch": 5.344322344322344,
+ "grad_norm": 54.408790588378906,
+ "learning_rate": 3.106227106227107e-05,
+ "loss": 0.3258,
+ "step": 1459
+ },
+ {
+ "epoch": 5.347985347985348,
+ "grad_norm": 43.48060989379883,
+ "learning_rate": 3.103785103785104e-05,
+ "loss": 0.2663,
+ "step": 1460
+ },
+ {
+ "epoch": 5.351648351648351,
+ "grad_norm": 34.339962005615234,
+ "learning_rate": 3.101343101343102e-05,
+ "loss": 0.3313,
+ "step": 1461
+ },
+ {
+ "epoch": 5.355311355311355,
+ "grad_norm": 35.54948806762695,
+ "learning_rate": 3.0989010989010995e-05,
+ "loss": 0.2377,
+ "step": 1462
+ },
+ {
+ "epoch": 5.358974358974359,
+ "grad_norm": 30.956071853637695,
+ "learning_rate": 3.096459096459097e-05,
+ "loss": 0.2388,
+ "step": 1463
+ },
+ {
+ "epoch": 5.362637362637362,
+ "grad_norm": 15.458950996398926,
+ "learning_rate": 3.094017094017094e-05,
+ "loss": 0.1196,
+ "step": 1464
+ },
+ {
+ "epoch": 5.366300366300366,
+ "grad_norm": 56.893463134765625,
+ "learning_rate": 3.091575091575091e-05,
+ "loss": 0.5377,
+ "step": 1465
+ },
+ {
+ "epoch": 5.36996336996337,
+ "grad_norm": 31.90789794921875,
+ "learning_rate": 3.089133089133089e-05,
+ "loss": 0.5008,
+ "step": 1466
+ },
+ {
+ "epoch": 5.373626373626374,
+ "grad_norm": 18.772607803344727,
+ "learning_rate": 3.086691086691087e-05,
+ "loss": 0.1838,
+ "step": 1467
+ },
+ {
+ "epoch": 5.377289377289378,
+ "grad_norm": 1.7131195068359375,
+ "learning_rate": 3.084249084249084e-05,
+ "loss": 0.0055,
+ "step": 1468
+ },
+ {
+ "epoch": 5.380952380952381,
+ "grad_norm": 6.398471355438232,
+ "learning_rate": 3.081807081807082e-05,
+ "loss": 0.0309,
+ "step": 1469
+ },
+ {
+ "epoch": 5.384615384615385,
+ "grad_norm": 13.847221374511719,
+ "learning_rate": 3.0793650793650796e-05,
+ "loss": 0.0785,
+ "step": 1470
+ },
+ {
+ "epoch": 5.388278388278389,
+ "grad_norm": 46.000179290771484,
+ "learning_rate": 3.076923076923077e-05,
+ "loss": 0.4114,
+ "step": 1471
+ },
+ {
+ "epoch": 5.391941391941392,
+ "grad_norm": 39.47720718383789,
+ "learning_rate": 3.0744810744810746e-05,
+ "loss": 0.9189,
+ "step": 1472
+ },
+ {
+ "epoch": 5.395604395604396,
+ "grad_norm": 30.588356018066406,
+ "learning_rate": 3.0720390720390724e-05,
+ "loss": 0.372,
+ "step": 1473
+ },
+ {
+ "epoch": 5.3992673992674,
+ "grad_norm": 83.61669921875,
+ "learning_rate": 3.0695970695970696e-05,
+ "loss": 0.6729,
+ "step": 1474
+ },
+ {
+ "epoch": 5.402930402930403,
+ "grad_norm": 14.384758949279785,
+ "learning_rate": 3.0671550671550675e-05,
+ "loss": 0.0825,
+ "step": 1475
+ },
+ {
+ "epoch": 5.406593406593407,
+ "grad_norm": 41.9291877746582,
+ "learning_rate": 3.064713064713065e-05,
+ "loss": 0.2128,
+ "step": 1476
+ },
+ {
+ "epoch": 5.410256410256411,
+ "grad_norm": 31.03643035888672,
+ "learning_rate": 3.062271062271062e-05,
+ "loss": 0.6978,
+ "step": 1477
+ },
+ {
+ "epoch": 5.413919413919414,
+ "grad_norm": 43.225547790527344,
+ "learning_rate": 3.0598290598290596e-05,
+ "loss": 0.6546,
+ "step": 1478
+ },
+ {
+ "epoch": 5.417582417582418,
+ "grad_norm": 37.172611236572266,
+ "learning_rate": 3.0573870573870575e-05,
+ "loss": 0.5024,
+ "step": 1479
+ },
+ {
+ "epoch": 5.4212454212454215,
+ "grad_norm": 52.93882369995117,
+ "learning_rate": 3.0549450549450547e-05,
+ "loss": 0.9954,
+ "step": 1480
+ },
+ {
+ "epoch": 5.424908424908425,
+ "grad_norm": 30.838403701782227,
+ "learning_rate": 3.0525030525030525e-05,
+ "loss": 0.2539,
+ "step": 1481
+ },
+ {
+ "epoch": 5.428571428571429,
+ "grad_norm": 8.876139640808105,
+ "learning_rate": 3.0500610500610503e-05,
+ "loss": 0.0635,
+ "step": 1482
+ },
+ {
+ "epoch": 5.4322344322344325,
+ "grad_norm": 14.970293998718262,
+ "learning_rate": 3.0476190476190475e-05,
+ "loss": 0.1337,
+ "step": 1483
+ },
+ {
+ "epoch": 5.435897435897436,
+ "grad_norm": 29.44560432434082,
+ "learning_rate": 3.0451770451770454e-05,
+ "loss": 0.3719,
+ "step": 1484
+ },
+ {
+ "epoch": 5.43956043956044,
+ "grad_norm": 3.793294668197632,
+ "learning_rate": 3.0427350427350432e-05,
+ "loss": 0.0278,
+ "step": 1485
+ },
+ {
+ "epoch": 5.443223443223443,
+ "grad_norm": 37.418731689453125,
+ "learning_rate": 3.0402930402930404e-05,
+ "loss": 0.5153,
+ "step": 1486
+ },
+ {
+ "epoch": 5.446886446886447,
+ "grad_norm": 26.718324661254883,
+ "learning_rate": 3.037851037851038e-05,
+ "loss": 0.388,
+ "step": 1487
+ },
+ {
+ "epoch": 5.450549450549451,
+ "grad_norm": 28.463197708129883,
+ "learning_rate": 3.0354090354090357e-05,
+ "loss": 0.1956,
+ "step": 1488
+ },
+ {
+ "epoch": 5.454212454212454,
+ "grad_norm": 45.390602111816406,
+ "learning_rate": 3.032967032967033e-05,
+ "loss": 0.3694,
+ "step": 1489
+ },
+ {
+ "epoch": 5.457875457875458,
+ "grad_norm": 33.20753860473633,
+ "learning_rate": 3.0305250305250307e-05,
+ "loss": 0.2946,
+ "step": 1490
+ },
+ {
+ "epoch": 5.461538461538462,
+ "grad_norm": 66.42272186279297,
+ "learning_rate": 3.028083028083028e-05,
+ "loss": 0.9082,
+ "step": 1491
+ },
+ {
+ "epoch": 5.465201465201465,
+ "grad_norm": 33.85127258300781,
+ "learning_rate": 3.0256410256410257e-05,
+ "loss": 0.2362,
+ "step": 1492
+ },
+ {
+ "epoch": 5.468864468864469,
+ "grad_norm": 51.019256591796875,
+ "learning_rate": 3.0231990231990233e-05,
+ "loss": 0.5446,
+ "step": 1493
+ },
+ {
+ "epoch": 5.472527472527473,
+ "grad_norm": 30.998769760131836,
+ "learning_rate": 3.0207570207570204e-05,
+ "loss": 0.4739,
+ "step": 1494
+ },
+ {
+ "epoch": 5.476190476190476,
+ "grad_norm": 44.187957763671875,
+ "learning_rate": 3.0183150183150183e-05,
+ "loss": 0.3439,
+ "step": 1495
+ },
+ {
+ "epoch": 5.47985347985348,
+ "grad_norm": 50.70987319946289,
+ "learning_rate": 3.015873015873016e-05,
+ "loss": 0.1625,
+ "step": 1496
+ },
+ {
+ "epoch": 5.483516483516484,
+ "grad_norm": 33.66750717163086,
+ "learning_rate": 3.0134310134310133e-05,
+ "loss": 0.1927,
+ "step": 1497
+ },
+ {
+ "epoch": 5.487179487179487,
+ "grad_norm": 41.02281951904297,
+ "learning_rate": 3.010989010989011e-05,
+ "loss": 0.4102,
+ "step": 1498
+ },
+ {
+ "epoch": 5.490842490842491,
+ "grad_norm": 10.570262908935547,
+ "learning_rate": 3.008547008547009e-05,
+ "loss": 0.0664,
+ "step": 1499
+ },
+ {
+ "epoch": 5.4945054945054945,
+ "grad_norm": 54.08304214477539,
+ "learning_rate": 3.0061050061050058e-05,
+ "loss": 0.9224,
+ "step": 1500
+ },
+ {
+ "epoch": 5.498168498168498,
+ "grad_norm": 67.29845428466797,
+ "learning_rate": 3.0036630036630036e-05,
+ "loss": 0.8804,
+ "step": 1501
+ },
+ {
+ "epoch": 5.501831501831502,
+ "grad_norm": 13.707988739013672,
+ "learning_rate": 3.0012210012210015e-05,
+ "loss": 0.054,
+ "step": 1502
+ },
+ {
+ "epoch": 5.5054945054945055,
+ "grad_norm": 23.2605037689209,
+ "learning_rate": 2.998778998778999e-05,
+ "loss": 0.2343,
+ "step": 1503
+ },
+ {
+ "epoch": 5.509157509157509,
+ "grad_norm": 34.8508186340332,
+ "learning_rate": 2.9963369963369965e-05,
+ "loss": 0.4967,
+ "step": 1504
+ },
+ {
+ "epoch": 5.512820512820513,
+ "grad_norm": 20.457887649536133,
+ "learning_rate": 2.993894993894994e-05,
+ "loss": 0.1206,
+ "step": 1505
+ },
+ {
+ "epoch": 5.516483516483516,
+ "grad_norm": 34.01540756225586,
+ "learning_rate": 2.9914529914529915e-05,
+ "loss": 0.5167,
+ "step": 1506
+ },
+ {
+ "epoch": 5.52014652014652,
+ "grad_norm": 20.356525421142578,
+ "learning_rate": 2.989010989010989e-05,
+ "loss": 0.1363,
+ "step": 1507
+ },
+ {
+ "epoch": 5.523809523809524,
+ "grad_norm": 42.172054290771484,
+ "learning_rate": 2.9865689865689865e-05,
+ "loss": 0.2954,
+ "step": 1508
+ },
+ {
+ "epoch": 5.527472527472527,
+ "grad_norm": 16.814903259277344,
+ "learning_rate": 2.984126984126984e-05,
+ "loss": 0.0987,
+ "step": 1509
+ },
+ {
+ "epoch": 5.531135531135531,
+ "grad_norm": 34.35768508911133,
+ "learning_rate": 2.981684981684982e-05,
+ "loss": 0.215,
+ "step": 1510
+ },
+ {
+ "epoch": 5.534798534798535,
+ "grad_norm": 43.24858474731445,
+ "learning_rate": 2.9792429792429794e-05,
+ "loss": 0.3689,
+ "step": 1511
+ },
+ {
+ "epoch": 5.538461538461538,
+ "grad_norm": 39.85542297363281,
+ "learning_rate": 2.976800976800977e-05,
+ "loss": 0.6229,
+ "step": 1512
+ },
+ {
+ "epoch": 5.542124542124542,
+ "grad_norm": 17.576234817504883,
+ "learning_rate": 2.9743589743589744e-05,
+ "loss": 0.0994,
+ "step": 1513
+ },
+ {
+ "epoch": 5.545787545787546,
+ "grad_norm": 45.81230545043945,
+ "learning_rate": 2.971916971916972e-05,
+ "loss": 0.5225,
+ "step": 1514
+ },
+ {
+ "epoch": 5.549450549450549,
+ "grad_norm": 21.293874740600586,
+ "learning_rate": 2.9694749694749694e-05,
+ "loss": 0.1139,
+ "step": 1515
+ },
+ {
+ "epoch": 5.553113553113553,
+ "grad_norm": 3.8571391105651855,
+ "learning_rate": 2.9670329670329673e-05,
+ "loss": 0.0257,
+ "step": 1516
+ },
+ {
+ "epoch": 5.556776556776557,
+ "grad_norm": 32.1104736328125,
+ "learning_rate": 2.9645909645909648e-05,
+ "loss": 0.2649,
+ "step": 1517
+ },
+ {
+ "epoch": 5.56043956043956,
+ "grad_norm": 26.141633987426758,
+ "learning_rate": 2.9621489621489623e-05,
+ "loss": 0.2003,
+ "step": 1518
+ },
+ {
+ "epoch": 5.564102564102564,
+ "grad_norm": 44.93999099731445,
+ "learning_rate": 2.9597069597069598e-05,
+ "loss": 0.4019,
+ "step": 1519
+ },
+ {
+ "epoch": 5.5677655677655675,
+ "grad_norm": 10.86503791809082,
+ "learning_rate": 2.9572649572649573e-05,
+ "loss": 0.112,
+ "step": 1520
+ },
+ {
+ "epoch": 5.571428571428571,
+ "grad_norm": 164.05909729003906,
+ "learning_rate": 2.9548229548229548e-05,
+ "loss": 0.7215,
+ "step": 1521
+ },
+ {
+ "epoch": 5.575091575091575,
+ "grad_norm": 39.3042106628418,
+ "learning_rate": 2.9523809523809523e-05,
+ "loss": 0.3393,
+ "step": 1522
+ },
+ {
+ "epoch": 5.5787545787545785,
+ "grad_norm": 28.68779182434082,
+ "learning_rate": 2.94993894993895e-05,
+ "loss": 0.1175,
+ "step": 1523
+ },
+ {
+ "epoch": 5.582417582417582,
+ "grad_norm": 19.018821716308594,
+ "learning_rate": 2.9474969474969477e-05,
+ "loss": 0.1185,
+ "step": 1524
+ },
+ {
+ "epoch": 5.586080586080586,
+ "grad_norm": 32.04712677001953,
+ "learning_rate": 2.945054945054945e-05,
+ "loss": 0.275,
+ "step": 1525
+ },
+ {
+ "epoch": 5.589743589743589,
+ "grad_norm": 51.721744537353516,
+ "learning_rate": 2.9426129426129427e-05,
+ "loss": 0.5128,
+ "step": 1526
+ },
+ {
+ "epoch": 5.593406593406593,
+ "grad_norm": 8.353523254394531,
+ "learning_rate": 2.9401709401709402e-05,
+ "loss": 0.0452,
+ "step": 1527
+ },
+ {
+ "epoch": 5.597069597069597,
+ "grad_norm": 60.5823860168457,
+ "learning_rate": 2.9377289377289377e-05,
+ "loss": 0.7654,
+ "step": 1528
+ },
+ {
+ "epoch": 5.6007326007326,
+ "grad_norm": 39.350582122802734,
+ "learning_rate": 2.9352869352869355e-05,
+ "loss": 0.2384,
+ "step": 1529
+ },
+ {
+ "epoch": 5.604395604395604,
+ "grad_norm": 13.450817108154297,
+ "learning_rate": 2.932844932844933e-05,
+ "loss": 0.045,
+ "step": 1530
+ },
+ {
+ "epoch": 5.608058608058608,
+ "grad_norm": 19.569263458251953,
+ "learning_rate": 2.9304029304029305e-05,
+ "loss": 0.0806,
+ "step": 1531
+ },
+ {
+ "epoch": 5.611721611721611,
+ "grad_norm": 15.188614845275879,
+ "learning_rate": 2.927960927960928e-05,
+ "loss": 0.0639,
+ "step": 1532
+ },
+ {
+ "epoch": 5.615384615384615,
+ "grad_norm": 64.51557922363281,
+ "learning_rate": 2.9255189255189255e-05,
+ "loss": 0.4426,
+ "step": 1533
+ },
+ {
+ "epoch": 5.619047619047619,
+ "grad_norm": 80.56137084960938,
+ "learning_rate": 2.923076923076923e-05,
+ "loss": 0.8589,
+ "step": 1534
+ },
+ {
+ "epoch": 5.622710622710622,
+ "grad_norm": 50.31480407714844,
+ "learning_rate": 2.9206349206349206e-05,
+ "loss": 1.1482,
+ "step": 1535
+ },
+ {
+ "epoch": 5.626373626373626,
+ "grad_norm": 12.077424049377441,
+ "learning_rate": 2.9181929181929184e-05,
+ "loss": 0.0766,
+ "step": 1536
+ },
+ {
+ "epoch": 5.63003663003663,
+ "grad_norm": 58.46347427368164,
+ "learning_rate": 2.915750915750916e-05,
+ "loss": 0.6512,
+ "step": 1537
+ },
+ {
+ "epoch": 5.633699633699633,
+ "grad_norm": 22.6331729888916,
+ "learning_rate": 2.913308913308913e-05,
+ "loss": 0.155,
+ "step": 1538
+ },
+ {
+ "epoch": 5.637362637362637,
+ "grad_norm": 49.88985824584961,
+ "learning_rate": 2.910866910866911e-05,
+ "loss": 0.4947,
+ "step": 1539
+ },
+ {
+ "epoch": 5.641025641025641,
+ "grad_norm": 64.37980651855469,
+ "learning_rate": 2.9084249084249084e-05,
+ "loss": 0.4665,
+ "step": 1540
+ },
+ {
+ "epoch": 5.644688644688645,
+ "grad_norm": 13.715937614440918,
+ "learning_rate": 2.905982905982906e-05,
+ "loss": 0.0937,
+ "step": 1541
+ },
+ {
+ "epoch": 5.648351648351649,
+ "grad_norm": 25.40440559387207,
+ "learning_rate": 2.9035409035409038e-05,
+ "loss": 0.2467,
+ "step": 1542
+ },
+ {
+ "epoch": 5.652014652014652,
+ "grad_norm": 52.885963439941406,
+ "learning_rate": 2.9010989010989013e-05,
+ "loss": 0.5018,
+ "step": 1543
+ },
+ {
+ "epoch": 5.655677655677656,
+ "grad_norm": 7.535051345825195,
+ "learning_rate": 2.8986568986568988e-05,
+ "loss": 0.0607,
+ "step": 1544
+ },
+ {
+ "epoch": 5.65934065934066,
+ "grad_norm": 55.79275894165039,
+ "learning_rate": 2.8962148962148963e-05,
+ "loss": 1.0288,
+ "step": 1545
+ },
+ {
+ "epoch": 5.663003663003663,
+ "grad_norm": 21.050024032592773,
+ "learning_rate": 2.8937728937728938e-05,
+ "loss": 0.1987,
+ "step": 1546
+ },
+ {
+ "epoch": 5.666666666666667,
+ "grad_norm": 24.74984359741211,
+ "learning_rate": 2.8913308913308913e-05,
+ "loss": 0.202,
+ "step": 1547
+ },
+ {
+ "epoch": 5.670329670329671,
+ "grad_norm": 15.297272682189941,
+ "learning_rate": 2.8888888888888888e-05,
+ "loss": 0.127,
+ "step": 1548
+ },
+ {
+ "epoch": 5.673992673992674,
+ "grad_norm": 12.198046684265137,
+ "learning_rate": 2.8864468864468867e-05,
+ "loss": 0.115,
+ "step": 1549
+ },
+ {
+ "epoch": 5.677655677655678,
+ "grad_norm": 18.761402130126953,
+ "learning_rate": 2.8840048840048842e-05,
+ "loss": 0.1745,
+ "step": 1550
+ },
+ {
+ "epoch": 5.681318681318682,
+ "grad_norm": 26.97224235534668,
+ "learning_rate": 2.8815628815628813e-05,
+ "loss": 0.2554,
+ "step": 1551
+ },
+ {
+ "epoch": 5.684981684981685,
+ "grad_norm": 9.772692680358887,
+ "learning_rate": 2.8791208791208792e-05,
+ "loss": 0.0927,
+ "step": 1552
+ },
+ {
+ "epoch": 5.688644688644689,
+ "grad_norm": 35.73431396484375,
+ "learning_rate": 2.8766788766788767e-05,
+ "loss": 0.4048,
+ "step": 1553
+ },
+ {
+ "epoch": 5.6923076923076925,
+ "grad_norm": 31.94872283935547,
+ "learning_rate": 2.8742368742368742e-05,
+ "loss": 0.5711,
+ "step": 1554
+ },
+ {
+ "epoch": 5.695970695970696,
+ "grad_norm": 45.44688034057617,
+ "learning_rate": 2.871794871794872e-05,
+ "loss": 0.7126,
+ "step": 1555
+ },
+ {
+ "epoch": 5.6996336996337,
+ "grad_norm": 45.74476623535156,
+ "learning_rate": 2.8693528693528696e-05,
+ "loss": 0.933,
+ "step": 1556
+ },
+ {
+ "epoch": 5.7032967032967035,
+ "grad_norm": 19.827136993408203,
+ "learning_rate": 2.866910866910867e-05,
+ "loss": 0.2433,
+ "step": 1557
+ },
+ {
+ "epoch": 5.706959706959707,
+ "grad_norm": 35.981903076171875,
+ "learning_rate": 2.8644688644688646e-05,
+ "loss": 0.3429,
+ "step": 1558
+ },
+ {
+ "epoch": 5.710622710622711,
+ "grad_norm": 19.642629623413086,
+ "learning_rate": 2.862026862026862e-05,
+ "loss": 0.1454,
+ "step": 1559
+ },
+ {
+ "epoch": 5.714285714285714,
+ "grad_norm": 25.960437774658203,
+ "learning_rate": 2.8595848595848596e-05,
+ "loss": 0.2965,
+ "step": 1560
+ },
+ {
+ "epoch": 5.717948717948718,
+ "grad_norm": 49.41150665283203,
+ "learning_rate": 2.857142857142857e-05,
+ "loss": 0.3295,
+ "step": 1561
+ },
+ {
+ "epoch": 5.721611721611722,
+ "grad_norm": 10.984975814819336,
+ "learning_rate": 2.854700854700855e-05,
+ "loss": 0.0879,
+ "step": 1562
+ },
+ {
+ "epoch": 5.725274725274725,
+ "grad_norm": 26.814556121826172,
+ "learning_rate": 2.8522588522588524e-05,
+ "loss": 0.1456,
+ "step": 1563
+ },
+ {
+ "epoch": 5.728937728937729,
+ "grad_norm": 18.65792465209961,
+ "learning_rate": 2.8498168498168496e-05,
+ "loss": 0.161,
+ "step": 1564
+ },
+ {
+ "epoch": 5.732600732600733,
+ "grad_norm": 35.959590911865234,
+ "learning_rate": 2.8473748473748475e-05,
+ "loss": 0.672,
+ "step": 1565
+ },
+ {
+ "epoch": 5.736263736263736,
+ "grad_norm": 78.56996154785156,
+ "learning_rate": 2.844932844932845e-05,
+ "loss": 1.6393,
+ "step": 1566
+ },
+ {
+ "epoch": 5.73992673992674,
+ "grad_norm": 31.604719161987305,
+ "learning_rate": 2.8424908424908425e-05,
+ "loss": 0.5395,
+ "step": 1567
+ },
+ {
+ "epoch": 5.743589743589744,
+ "grad_norm": 14.373411178588867,
+ "learning_rate": 2.8400488400488403e-05,
+ "loss": 0.0688,
+ "step": 1568
+ },
+ {
+ "epoch": 5.747252747252747,
+ "grad_norm": 3.5718555450439453,
+ "learning_rate": 2.8376068376068378e-05,
+ "loss": 0.0161,
+ "step": 1569
+ },
+ {
+ "epoch": 5.750915750915751,
+ "grad_norm": 23.164167404174805,
+ "learning_rate": 2.8351648351648353e-05,
+ "loss": 0.2169,
+ "step": 1570
+ },
+ {
+ "epoch": 5.754578754578755,
+ "grad_norm": 33.42869186401367,
+ "learning_rate": 2.8327228327228328e-05,
+ "loss": 0.3731,
+ "step": 1571
+ },
+ {
+ "epoch": 5.758241758241758,
+ "grad_norm": 32.016361236572266,
+ "learning_rate": 2.8302808302808303e-05,
+ "loss": 0.2243,
+ "step": 1572
+ },
+ {
+ "epoch": 5.761904761904762,
+ "grad_norm": 43.50716018676758,
+ "learning_rate": 2.827838827838828e-05,
+ "loss": 0.4229,
+ "step": 1573
+ },
+ {
+ "epoch": 5.7655677655677655,
+ "grad_norm": 4.828849792480469,
+ "learning_rate": 2.8253968253968253e-05,
+ "loss": 0.0295,
+ "step": 1574
+ },
+ {
+ "epoch": 5.769230769230769,
+ "grad_norm": 30.276351928710938,
+ "learning_rate": 2.8229548229548232e-05,
+ "loss": 0.399,
+ "step": 1575
+ },
+ {
+ "epoch": 5.772893772893773,
+ "grad_norm": 17.416358947753906,
+ "learning_rate": 2.8205128205128207e-05,
+ "loss": 0.1529,
+ "step": 1576
+ },
+ {
+ "epoch": 5.7765567765567765,
+ "grad_norm": 39.488468170166016,
+ "learning_rate": 2.818070818070818e-05,
+ "loss": 0.1245,
+ "step": 1577
+ },
+ {
+ "epoch": 5.78021978021978,
+ "grad_norm": 27.775489807128906,
+ "learning_rate": 2.8156288156288157e-05,
+ "loss": 0.1312,
+ "step": 1578
+ },
+ {
+ "epoch": 5.783882783882784,
+ "grad_norm": 35.964717864990234,
+ "learning_rate": 2.8131868131868132e-05,
+ "loss": 0.5796,
+ "step": 1579
+ },
+ {
+ "epoch": 5.787545787545787,
+ "grad_norm": 53.15998077392578,
+ "learning_rate": 2.8107448107448107e-05,
+ "loss": 1.2654,
+ "step": 1580
+ },
+ {
+ "epoch": 5.791208791208791,
+ "grad_norm": 22.90069007873535,
+ "learning_rate": 2.8083028083028086e-05,
+ "loss": 0.2162,
+ "step": 1581
+ },
+ {
+ "epoch": 5.794871794871795,
+ "grad_norm": 45.380470275878906,
+ "learning_rate": 2.805860805860806e-05,
+ "loss": 0.4231,
+ "step": 1582
+ },
+ {
+ "epoch": 5.798534798534798,
+ "grad_norm": 32.56012725830078,
+ "learning_rate": 2.8034188034188032e-05,
+ "loss": 0.3711,
+ "step": 1583
+ },
+ {
+ "epoch": 5.802197802197802,
+ "grad_norm": 34.63470458984375,
+ "learning_rate": 2.800976800976801e-05,
+ "loss": 0.5414,
+ "step": 1584
+ },
+ {
+ "epoch": 5.805860805860806,
+ "grad_norm": 48.173797607421875,
+ "learning_rate": 2.7985347985347986e-05,
+ "loss": 1.2363,
+ "step": 1585
+ },
+ {
+ "epoch": 5.809523809523809,
+ "grad_norm": 27.12062644958496,
+ "learning_rate": 2.796092796092796e-05,
+ "loss": 0.4824,
+ "step": 1586
+ },
+ {
+ "epoch": 5.813186813186813,
+ "grad_norm": 23.13554573059082,
+ "learning_rate": 2.7936507936507936e-05,
+ "loss": 0.2321,
+ "step": 1587
+ },
+ {
+ "epoch": 5.816849816849817,
+ "grad_norm": 50.56953430175781,
+ "learning_rate": 2.7912087912087915e-05,
+ "loss": 0.2158,
+ "step": 1588
+ },
+ {
+ "epoch": 5.82051282051282,
+ "grad_norm": 20.73900604248047,
+ "learning_rate": 2.788766788766789e-05,
+ "loss": 0.217,
+ "step": 1589
+ },
+ {
+ "epoch": 5.824175824175824,
+ "grad_norm": 17.288028717041016,
+ "learning_rate": 2.786324786324786e-05,
+ "loss": 0.2936,
+ "step": 1590
+ },
+ {
+ "epoch": 5.827838827838828,
+ "grad_norm": 22.067502975463867,
+ "learning_rate": 2.783882783882784e-05,
+ "loss": 0.1906,
+ "step": 1591
+ },
+ {
+ "epoch": 5.831501831501831,
+ "grad_norm": 14.928089141845703,
+ "learning_rate": 2.7814407814407815e-05,
+ "loss": 0.1296,
+ "step": 1592
+ },
+ {
+ "epoch": 5.835164835164835,
+ "grad_norm": 25.669342041015625,
+ "learning_rate": 2.778998778998779e-05,
+ "loss": 0.2475,
+ "step": 1593
+ },
+ {
+ "epoch": 5.8388278388278385,
+ "grad_norm": 20.302515029907227,
+ "learning_rate": 2.776556776556777e-05,
+ "loss": 0.2206,
+ "step": 1594
+ },
+ {
+ "epoch": 5.842490842490842,
+ "grad_norm": 9.004451751708984,
+ "learning_rate": 2.7741147741147743e-05,
+ "loss": 0.0694,
+ "step": 1595
+ },
+ {
+ "epoch": 5.846153846153846,
+ "grad_norm": 7.495925426483154,
+ "learning_rate": 2.7716727716727715e-05,
+ "loss": 0.0481,
+ "step": 1596
+ },
+ {
+ "epoch": 5.8498168498168495,
+ "grad_norm": 11.891450881958008,
+ "learning_rate": 2.7692307692307694e-05,
+ "loss": 0.0754,
+ "step": 1597
+ },
+ {
+ "epoch": 5.853479853479853,
+ "grad_norm": 27.53200340270996,
+ "learning_rate": 2.766788766788767e-05,
+ "loss": 0.1459,
+ "step": 1598
+ },
+ {
+ "epoch": 5.857142857142857,
+ "grad_norm": 4.103634357452393,
+ "learning_rate": 2.7643467643467644e-05,
+ "loss": 0.0256,
+ "step": 1599
+ },
+ {
+ "epoch": 5.860805860805861,
+ "grad_norm": 30.772586822509766,
+ "learning_rate": 2.761904761904762e-05,
+ "loss": 0.2748,
+ "step": 1600
+ },
+ {
+ "epoch": 5.864468864468865,
+ "grad_norm": 39.70070266723633,
+ "learning_rate": 2.7594627594627597e-05,
+ "loss": 1.3089,
+ "step": 1601
+ },
+ {
+ "epoch": 5.868131868131869,
+ "grad_norm": 54.576236724853516,
+ "learning_rate": 2.7570207570207572e-05,
+ "loss": 0.3549,
+ "step": 1602
+ },
+ {
+ "epoch": 5.871794871794872,
+ "grad_norm": 14.617592811584473,
+ "learning_rate": 2.7545787545787544e-05,
+ "loss": 0.0976,
+ "step": 1603
+ },
+ {
+ "epoch": 5.875457875457876,
+ "grad_norm": 11.900232315063477,
+ "learning_rate": 2.7521367521367522e-05,
+ "loss": 0.0518,
+ "step": 1604
+ },
+ {
+ "epoch": 5.8791208791208796,
+ "grad_norm": 62.00771713256836,
+ "learning_rate": 2.7496947496947497e-05,
+ "loss": 0.2866,
+ "step": 1605
+ },
+ {
+ "epoch": 5.882783882783883,
+ "grad_norm": 51.59067153930664,
+ "learning_rate": 2.7472527472527473e-05,
+ "loss": 0.3357,
+ "step": 1606
+ },
+ {
+ "epoch": 5.886446886446887,
+ "grad_norm": 61.792476654052734,
+ "learning_rate": 2.744810744810745e-05,
+ "loss": 0.2923,
+ "step": 1607
+ },
+ {
+ "epoch": 5.8901098901098905,
+ "grad_norm": 12.737351417541504,
+ "learning_rate": 2.7423687423687426e-05,
+ "loss": 0.0893,
+ "step": 1608
+ },
+ {
+ "epoch": 5.893772893772894,
+ "grad_norm": 7.451726913452148,
+ "learning_rate": 2.7399267399267398e-05,
+ "loss": 0.044,
+ "step": 1609
+ },
+ {
+ "epoch": 5.897435897435898,
+ "grad_norm": 41.03788757324219,
+ "learning_rate": 2.7374847374847376e-05,
+ "loss": 0.4605,
+ "step": 1610
+ },
+ {
+ "epoch": 5.9010989010989015,
+ "grad_norm": 11.49382209777832,
+ "learning_rate": 2.735042735042735e-05,
+ "loss": 0.0754,
+ "step": 1611
+ },
+ {
+ "epoch": 5.904761904761905,
+ "grad_norm": 15.952816009521484,
+ "learning_rate": 2.7326007326007326e-05,
+ "loss": 0.0748,
+ "step": 1612
+ },
+ {
+ "epoch": 5.908424908424909,
+ "grad_norm": 8.492574691772461,
+ "learning_rate": 2.73015873015873e-05,
+ "loss": 0.0254,
+ "step": 1613
+ },
+ {
+ "epoch": 5.912087912087912,
+ "grad_norm": 17.973997116088867,
+ "learning_rate": 2.727716727716728e-05,
+ "loss": 0.1038,
+ "step": 1614
+ },
+ {
+ "epoch": 5.915750915750916,
+ "grad_norm": 6.881199359893799,
+ "learning_rate": 2.7252747252747255e-05,
+ "loss": 0.0186,
+ "step": 1615
+ },
+ {
+ "epoch": 5.91941391941392,
+ "grad_norm": 28.51510238647461,
+ "learning_rate": 2.7228327228327227e-05,
+ "loss": 0.1283,
+ "step": 1616
+ },
+ {
+ "epoch": 5.923076923076923,
+ "grad_norm": 33.539485931396484,
+ "learning_rate": 2.7203907203907205e-05,
+ "loss": 0.6151,
+ "step": 1617
+ },
+ {
+ "epoch": 5.926739926739927,
+ "grad_norm": 57.307823181152344,
+ "learning_rate": 2.717948717948718e-05,
+ "loss": 0.3924,
+ "step": 1618
+ },
+ {
+ "epoch": 5.930402930402931,
+ "grad_norm": 43.010276794433594,
+ "learning_rate": 2.7155067155067155e-05,
+ "loss": 0.3942,
+ "step": 1619
+ },
+ {
+ "epoch": 5.934065934065934,
+ "grad_norm": 26.552478790283203,
+ "learning_rate": 2.7130647130647134e-05,
+ "loss": 0.1961,
+ "step": 1620
+ },
+ {
+ "epoch": 5.937728937728938,
+ "grad_norm": 78.5624008178711,
+ "learning_rate": 2.710622710622711e-05,
+ "loss": 1.0705,
+ "step": 1621
+ },
+ {
+ "epoch": 5.941391941391942,
+ "grad_norm": 37.23006057739258,
+ "learning_rate": 2.708180708180708e-05,
+ "loss": 0.4875,
+ "step": 1622
+ },
+ {
+ "epoch": 5.945054945054945,
+ "grad_norm": 42.23412322998047,
+ "learning_rate": 2.705738705738706e-05,
+ "loss": 0.3795,
+ "step": 1623
+ },
+ {
+ "epoch": 5.948717948717949,
+ "grad_norm": 42.677696228027344,
+ "learning_rate": 2.7032967032967034e-05,
+ "loss": 0.3414,
+ "step": 1624
+ },
+ {
+ "epoch": 5.9523809523809526,
+ "grad_norm": 24.182249069213867,
+ "learning_rate": 2.700854700854701e-05,
+ "loss": 0.0814,
+ "step": 1625
+ },
+ {
+ "epoch": 5.956043956043956,
+ "grad_norm": 11.87109088897705,
+ "learning_rate": 2.6984126984126984e-05,
+ "loss": 0.0816,
+ "step": 1626
+ },
+ {
+ "epoch": 5.95970695970696,
+ "grad_norm": 7.575586318969727,
+ "learning_rate": 2.6959706959706962e-05,
+ "loss": 0.049,
+ "step": 1627
+ },
+ {
+ "epoch": 5.9633699633699635,
+ "grad_norm": 4.052019119262695,
+ "learning_rate": 2.6935286935286934e-05,
+ "loss": 0.0276,
+ "step": 1628
+ },
+ {
+ "epoch": 5.967032967032967,
+ "grad_norm": 24.308481216430664,
+ "learning_rate": 2.691086691086691e-05,
+ "loss": 0.2324,
+ "step": 1629
+ },
+ {
+ "epoch": 5.970695970695971,
+ "grad_norm": 32.5918083190918,
+ "learning_rate": 2.6886446886446888e-05,
+ "loss": 0.42,
+ "step": 1630
+ },
+ {
+ "epoch": 5.9743589743589745,
+ "grad_norm": 16.758689880371094,
+ "learning_rate": 2.6862026862026863e-05,
+ "loss": 0.1857,
+ "step": 1631
+ },
+ {
+ "epoch": 5.978021978021978,
+ "grad_norm": 24.96327781677246,
+ "learning_rate": 2.6837606837606838e-05,
+ "loss": 0.3293,
+ "step": 1632
+ },
+ {
+ "epoch": 5.981684981684982,
+ "grad_norm": 7.734143257141113,
+ "learning_rate": 2.6813186813186816e-05,
+ "loss": 0.0644,
+ "step": 1633
+ },
+ {
+ "epoch": 5.985347985347985,
+ "grad_norm": 49.89662551879883,
+ "learning_rate": 2.678876678876679e-05,
+ "loss": 0.7976,
+ "step": 1634
+ },
+ {
+ "epoch": 5.989010989010989,
+ "grad_norm": 20.55232810974121,
+ "learning_rate": 2.6764346764346763e-05,
+ "loss": 0.1911,
+ "step": 1635
+ },
+ {
+ "epoch": 5.992673992673993,
+ "grad_norm": 11.190897941589355,
+ "learning_rate": 2.673992673992674e-05,
+ "loss": 0.0604,
+ "step": 1636
+ },
+ {
+ "epoch": 5.996336996336996,
+ "grad_norm": 24.896806716918945,
+ "learning_rate": 2.6715506715506716e-05,
+ "loss": 0.2467,
+ "step": 1637
+ },
+ {
+ "epoch": 6.0,
+ "grad_norm": 39.5569953918457,
+ "learning_rate": 2.669108669108669e-05,
+ "loss": 0.8073,
+ "step": 1638
+ },
+ {
+ "epoch": 6.003663003663004,
+ "grad_norm": 4.203596591949463,
+ "learning_rate": 2.6666666666666667e-05,
+ "loss": 0.0266,
+ "step": 1639
+ },
+ {
+ "epoch": 6.007326007326007,
+ "grad_norm": 6.89768648147583,
+ "learning_rate": 2.6642246642246645e-05,
+ "loss": 0.0664,
+ "step": 1640
+ },
+ {
+ "epoch": 6.010989010989011,
+ "grad_norm": 33.19546890258789,
+ "learning_rate": 2.6617826617826617e-05,
+ "loss": 0.6504,
+ "step": 1641
+ },
+ {
+ "epoch": 6.014652014652015,
+ "grad_norm": 8.577303886413574,
+ "learning_rate": 2.6593406593406592e-05,
+ "loss": 0.0715,
+ "step": 1642
+ },
+ {
+ "epoch": 6.018315018315018,
+ "grad_norm": 11.48106861114502,
+ "learning_rate": 2.656898656898657e-05,
+ "loss": 0.0952,
+ "step": 1643
+ },
+ {
+ "epoch": 6.021978021978022,
+ "grad_norm": 16.87290382385254,
+ "learning_rate": 2.6544566544566545e-05,
+ "loss": 0.1156,
+ "step": 1644
+ },
+ {
+ "epoch": 6.0256410256410255,
+ "grad_norm": 5.304442405700684,
+ "learning_rate": 2.652014652014652e-05,
+ "loss": 0.0574,
+ "step": 1645
+ },
+ {
+ "epoch": 6.029304029304029,
+ "grad_norm": 12.058186531066895,
+ "learning_rate": 2.64957264957265e-05,
+ "loss": 0.1013,
+ "step": 1646
+ },
+ {
+ "epoch": 6.032967032967033,
+ "grad_norm": 11.20624828338623,
+ "learning_rate": 2.6471306471306474e-05,
+ "loss": 0.0637,
+ "step": 1647
+ },
+ {
+ "epoch": 6.0366300366300365,
+ "grad_norm": 20.595020294189453,
+ "learning_rate": 2.6446886446886446e-05,
+ "loss": 0.1282,
+ "step": 1648
+ },
+ {
+ "epoch": 6.04029304029304,
+ "grad_norm": 32.712425231933594,
+ "learning_rate": 2.6422466422466424e-05,
+ "loss": 1.0173,
+ "step": 1649
+ },
+ {
+ "epoch": 6.043956043956044,
+ "grad_norm": 31.00687599182129,
+ "learning_rate": 2.63980463980464e-05,
+ "loss": 0.2822,
+ "step": 1650
+ },
+ {
+ "epoch": 6.0476190476190474,
+ "grad_norm": 15.361159324645996,
+ "learning_rate": 2.6373626373626374e-05,
+ "loss": 0.08,
+ "step": 1651
+ },
+ {
+ "epoch": 6.051282051282051,
+ "grad_norm": 75.07713317871094,
+ "learning_rate": 2.634920634920635e-05,
+ "loss": 0.3835,
+ "step": 1652
+ },
+ {
+ "epoch": 6.054945054945055,
+ "grad_norm": 28.741546630859375,
+ "learning_rate": 2.6324786324786328e-05,
+ "loss": 0.1257,
+ "step": 1653
+ },
+ {
+ "epoch": 6.058608058608058,
+ "grad_norm": 173.8939971923828,
+ "learning_rate": 2.63003663003663e-05,
+ "loss": 0.0744,
+ "step": 1654
+ },
+ {
+ "epoch": 6.062271062271062,
+ "grad_norm": 8.212196350097656,
+ "learning_rate": 2.6275946275946274e-05,
+ "loss": 0.027,
+ "step": 1655
+ },
+ {
+ "epoch": 6.065934065934066,
+ "grad_norm": 56.60511779785156,
+ "learning_rate": 2.6251526251526253e-05,
+ "loss": 0.7258,
+ "step": 1656
+ },
+ {
+ "epoch": 6.069597069597069,
+ "grad_norm": 14.454882621765137,
+ "learning_rate": 2.6227106227106228e-05,
+ "loss": 0.0762,
+ "step": 1657
+ },
+ {
+ "epoch": 6.073260073260073,
+ "grad_norm": 40.66373062133789,
+ "learning_rate": 2.6202686202686203e-05,
+ "loss": 0.2663,
+ "step": 1658
+ },
+ {
+ "epoch": 6.076923076923077,
+ "grad_norm": 45.68836212158203,
+ "learning_rate": 2.617826617826618e-05,
+ "loss": 0.4244,
+ "step": 1659
+ },
+ {
+ "epoch": 6.08058608058608,
+ "grad_norm": 16.69190788269043,
+ "learning_rate": 2.6153846153846157e-05,
+ "loss": 0.1249,
+ "step": 1660
+ },
+ {
+ "epoch": 6.084249084249084,
+ "grad_norm": 58.633358001708984,
+ "learning_rate": 2.6129426129426128e-05,
+ "loss": 0.3699,
+ "step": 1661
+ },
+ {
+ "epoch": 6.087912087912088,
+ "grad_norm": 8.262107849121094,
+ "learning_rate": 2.6105006105006107e-05,
+ "loss": 0.0332,
+ "step": 1662
+ },
+ {
+ "epoch": 6.091575091575091,
+ "grad_norm": 1.7256231307983398,
+ "learning_rate": 2.6080586080586082e-05,
+ "loss": 0.0073,
+ "step": 1663
+ },
+ {
+ "epoch": 6.095238095238095,
+ "grad_norm": 27.97568130493164,
+ "learning_rate": 2.6056166056166057e-05,
+ "loss": 0.3567,
+ "step": 1664
+ },
+ {
+ "epoch": 6.0989010989010985,
+ "grad_norm": 8.167609214782715,
+ "learning_rate": 2.6031746031746032e-05,
+ "loss": 0.0328,
+ "step": 1665
+ },
+ {
+ "epoch": 6.102564102564102,
+ "grad_norm": 8.547285079956055,
+ "learning_rate": 2.600732600732601e-05,
+ "loss": 0.0438,
+ "step": 1666
+ },
+ {
+ "epoch": 6.106227106227106,
+ "grad_norm": 38.85865020751953,
+ "learning_rate": 2.5982905982905982e-05,
+ "loss": 0.3492,
+ "step": 1667
+ },
+ {
+ "epoch": 6.1098901098901095,
+ "grad_norm": 18.36060333251953,
+ "learning_rate": 2.5958485958485957e-05,
+ "loss": 0.0411,
+ "step": 1668
+ },
+ {
+ "epoch": 6.113553113553113,
+ "grad_norm": 8.013274192810059,
+ "learning_rate": 2.5934065934065935e-05,
+ "loss": 0.0461,
+ "step": 1669
+ },
+ {
+ "epoch": 6.117216117216117,
+ "grad_norm": 41.88865280151367,
+ "learning_rate": 2.590964590964591e-05,
+ "loss": 0.7209,
+ "step": 1670
+ },
+ {
+ "epoch": 6.1208791208791204,
+ "grad_norm": 93.57958221435547,
+ "learning_rate": 2.5885225885225886e-05,
+ "loss": 0.5563,
+ "step": 1671
+ },
+ {
+ "epoch": 6.124542124542124,
+ "grad_norm": 6.878098964691162,
+ "learning_rate": 2.5860805860805864e-05,
+ "loss": 0.0213,
+ "step": 1672
+ },
+ {
+ "epoch": 6.128205128205128,
+ "grad_norm": 41.09592819213867,
+ "learning_rate": 2.5836385836385836e-05,
+ "loss": 0.5724,
+ "step": 1673
+ },
+ {
+ "epoch": 6.131868131868132,
+ "grad_norm": 8.257637977600098,
+ "learning_rate": 2.581196581196581e-05,
+ "loss": 0.0396,
+ "step": 1674
+ },
+ {
+ "epoch": 6.135531135531136,
+ "grad_norm": 24.022602081298828,
+ "learning_rate": 2.578754578754579e-05,
+ "loss": 0.0623,
+ "step": 1675
+ },
+ {
+ "epoch": 6.13919413919414,
+ "grad_norm": 46.46554946899414,
+ "learning_rate": 2.5763125763125764e-05,
+ "loss": 0.4135,
+ "step": 1676
+ },
+ {
+ "epoch": 6.142857142857143,
+ "grad_norm": 96.42303466796875,
+ "learning_rate": 2.573870573870574e-05,
+ "loss": 0.4724,
+ "step": 1677
+ },
+ {
+ "epoch": 6.146520146520147,
+ "grad_norm": 8.401265144348145,
+ "learning_rate": 2.5714285714285714e-05,
+ "loss": 0.0396,
+ "step": 1678
+ },
+ {
+ "epoch": 6.1501831501831505,
+ "grad_norm": 29.346588134765625,
+ "learning_rate": 2.5689865689865693e-05,
+ "loss": 0.1959,
+ "step": 1679
+ },
+ {
+ "epoch": 6.153846153846154,
+ "grad_norm": 4.874574661254883,
+ "learning_rate": 2.5665445665445665e-05,
+ "loss": 0.0295,
+ "step": 1680
+ },
+ {
+ "epoch": 6.157509157509158,
+ "grad_norm": 6.668759346008301,
+ "learning_rate": 2.564102564102564e-05,
+ "loss": 0.0408,
+ "step": 1681
+ },
+ {
+ "epoch": 6.1611721611721615,
+ "grad_norm": 21.22933006286621,
+ "learning_rate": 2.5616605616605618e-05,
+ "loss": 0.1591,
+ "step": 1682
+ },
+ {
+ "epoch": 6.164835164835165,
+ "grad_norm": 2.3441169261932373,
+ "learning_rate": 2.5592185592185593e-05,
+ "loss": 0.0138,
+ "step": 1683
+ },
+ {
+ "epoch": 6.168498168498169,
+ "grad_norm": 31.336048126220703,
+ "learning_rate": 2.5567765567765568e-05,
+ "loss": 0.321,
+ "step": 1684
+ },
+ {
+ "epoch": 6.172161172161172,
+ "grad_norm": 39.17483139038086,
+ "learning_rate": 2.5543345543345547e-05,
+ "loss": 0.5268,
+ "step": 1685
+ },
+ {
+ "epoch": 6.175824175824176,
+ "grad_norm": 6.984042644500732,
+ "learning_rate": 2.551892551892552e-05,
+ "loss": 0.0377,
+ "step": 1686
+ },
+ {
+ "epoch": 6.17948717948718,
+ "grad_norm": 21.946880340576172,
+ "learning_rate": 2.5494505494505493e-05,
+ "loss": 0.1557,
+ "step": 1687
+ },
+ {
+ "epoch": 6.183150183150183,
+ "grad_norm": 23.447084426879883,
+ "learning_rate": 2.5470085470085472e-05,
+ "loss": 0.1996,
+ "step": 1688
+ },
+ {
+ "epoch": 6.186813186813187,
+ "grad_norm": 13.904314994812012,
+ "learning_rate": 2.5445665445665447e-05,
+ "loss": 0.0327,
+ "step": 1689
+ },
+ {
+ "epoch": 6.190476190476191,
+ "grad_norm": 11.126763343811035,
+ "learning_rate": 2.5421245421245422e-05,
+ "loss": 0.0335,
+ "step": 1690
+ },
+ {
+ "epoch": 6.194139194139194,
+ "grad_norm": 42.23086929321289,
+ "learning_rate": 2.5396825396825397e-05,
+ "loss": 0.3307,
+ "step": 1691
+ },
+ {
+ "epoch": 6.197802197802198,
+ "grad_norm": 26.350086212158203,
+ "learning_rate": 2.5372405372405376e-05,
+ "loss": 0.153,
+ "step": 1692
+ },
+ {
+ "epoch": 6.201465201465202,
+ "grad_norm": 6.667046546936035,
+ "learning_rate": 2.5347985347985347e-05,
+ "loss": 0.011,
+ "step": 1693
+ },
+ {
+ "epoch": 6.205128205128205,
+ "grad_norm": 63.5737190246582,
+ "learning_rate": 2.5323565323565322e-05,
+ "loss": 0.5602,
+ "step": 1694
+ },
+ {
+ "epoch": 6.208791208791209,
+ "grad_norm": 54.20994567871094,
+ "learning_rate": 2.52991452991453e-05,
+ "loss": 0.6584,
+ "step": 1695
+ },
+ {
+ "epoch": 6.212454212454213,
+ "grad_norm": 55.79521942138672,
+ "learning_rate": 2.5274725274725276e-05,
+ "loss": 0.5259,
+ "step": 1696
+ },
+ {
+ "epoch": 6.216117216117216,
+ "grad_norm": 65.18093872070312,
+ "learning_rate": 2.525030525030525e-05,
+ "loss": 0.308,
+ "step": 1697
+ },
+ {
+ "epoch": 6.21978021978022,
+ "grad_norm": 9.979923248291016,
+ "learning_rate": 2.522588522588523e-05,
+ "loss": 0.0312,
+ "step": 1698
+ },
+ {
+ "epoch": 6.2234432234432235,
+ "grad_norm": 62.80887222290039,
+ "learning_rate": 2.52014652014652e-05,
+ "loss": 0.3198,
+ "step": 1699
+ },
+ {
+ "epoch": 6.227106227106227,
+ "grad_norm": 63.2298583984375,
+ "learning_rate": 2.5177045177045176e-05,
+ "loss": 0.5223,
+ "step": 1700
+ },
+ {
+ "epoch": 6.230769230769231,
+ "grad_norm": 49.968502044677734,
+ "learning_rate": 2.515262515262515e-05,
+ "loss": 0.5554,
+ "step": 1701
+ },
+ {
+ "epoch": 6.2344322344322345,
+ "grad_norm": 29.190656661987305,
+ "learning_rate": 2.512820512820513e-05,
+ "loss": 0.2286,
+ "step": 1702
+ },
+ {
+ "epoch": 6.238095238095238,
+ "grad_norm": 38.25267028808594,
+ "learning_rate": 2.5103785103785105e-05,
+ "loss": 0.1948,
+ "step": 1703
+ },
+ {
+ "epoch": 6.241758241758242,
+ "grad_norm": 57.620323181152344,
+ "learning_rate": 2.507936507936508e-05,
+ "loss": 0.5533,
+ "step": 1704
+ },
+ {
+ "epoch": 6.245421245421245,
+ "grad_norm": 21.61467170715332,
+ "learning_rate": 2.5054945054945058e-05,
+ "loss": 0.0935,
+ "step": 1705
+ },
+ {
+ "epoch": 6.249084249084249,
+ "grad_norm": 19.86629867553711,
+ "learning_rate": 2.503052503052503e-05,
+ "loss": 0.0852,
+ "step": 1706
+ },
+ {
+ "epoch": 6.252747252747253,
+ "grad_norm": 59.41017150878906,
+ "learning_rate": 2.5006105006105005e-05,
+ "loss": 0.5853,
+ "step": 1707
+ },
+ {
+ "epoch": 6.256410256410256,
+ "grad_norm": 24.542570114135742,
+ "learning_rate": 2.4981684981684983e-05,
+ "loss": 0.0935,
+ "step": 1708
+ },
+ {
+ "epoch": 6.26007326007326,
+ "grad_norm": 29.034879684448242,
+ "learning_rate": 2.495726495726496e-05,
+ "loss": 0.1929,
+ "step": 1709
+ },
+ {
+ "epoch": 6.263736263736264,
+ "grad_norm": 17.3880672454834,
+ "learning_rate": 2.4932844932844933e-05,
+ "loss": 0.0927,
+ "step": 1710
+ },
+ {
+ "epoch": 6.267399267399267,
+ "grad_norm": 90.0419692993164,
+ "learning_rate": 2.4908424908424912e-05,
+ "loss": 1.1172,
+ "step": 1711
+ },
+ {
+ "epoch": 6.271062271062271,
+ "grad_norm": 4.710697650909424,
+ "learning_rate": 2.4884004884004884e-05,
+ "loss": 0.0207,
+ "step": 1712
+ },
+ {
+ "epoch": 6.274725274725275,
+ "grad_norm": 95.93651580810547,
+ "learning_rate": 2.485958485958486e-05,
+ "loss": 0.7137,
+ "step": 1713
+ },
+ {
+ "epoch": 6.278388278388278,
+ "grad_norm": 92.31869506835938,
+ "learning_rate": 2.4835164835164834e-05,
+ "loss": 0.2076,
+ "step": 1714
+ },
+ {
+ "epoch": 6.282051282051282,
+ "grad_norm": 66.66917419433594,
+ "learning_rate": 2.4810744810744812e-05,
+ "loss": 0.8763,
+ "step": 1715
+ },
+ {
+ "epoch": 6.285714285714286,
+ "grad_norm": 94.52323150634766,
+ "learning_rate": 2.4786324786324787e-05,
+ "loss": 0.5962,
+ "step": 1716
+ },
+ {
+ "epoch": 6.289377289377289,
+ "grad_norm": 31.169715881347656,
+ "learning_rate": 2.4761904761904762e-05,
+ "loss": 0.1906,
+ "step": 1717
+ },
+ {
+ "epoch": 6.293040293040293,
+ "grad_norm": 54.97831726074219,
+ "learning_rate": 2.4737484737484737e-05,
+ "loss": 0.3767,
+ "step": 1718
+ },
+ {
+ "epoch": 6.2967032967032965,
+ "grad_norm": 25.52306365966797,
+ "learning_rate": 2.4713064713064712e-05,
+ "loss": 0.2128,
+ "step": 1719
+ },
+ {
+ "epoch": 6.3003663003663,
+ "grad_norm": 12.478558540344238,
+ "learning_rate": 2.4688644688644688e-05,
+ "loss": 0.0713,
+ "step": 1720
+ },
+ {
+ "epoch": 6.304029304029304,
+ "grad_norm": 27.71872329711914,
+ "learning_rate": 2.4664224664224666e-05,
+ "loss": 0.319,
+ "step": 1721
+ },
+ {
+ "epoch": 6.3076923076923075,
+ "grad_norm": 44.587589263916016,
+ "learning_rate": 2.463980463980464e-05,
+ "loss": 0.1997,
+ "step": 1722
+ },
+ {
+ "epoch": 6.311355311355311,
+ "grad_norm": 11.289876937866211,
+ "learning_rate": 2.4615384615384616e-05,
+ "loss": 0.0694,
+ "step": 1723
+ },
+ {
+ "epoch": 6.315018315018315,
+ "grad_norm": 47.27211380004883,
+ "learning_rate": 2.4590964590964595e-05,
+ "loss": 0.249,
+ "step": 1724
+ },
+ {
+ "epoch": 6.318681318681318,
+ "grad_norm": 34.143611907958984,
+ "learning_rate": 2.4566544566544566e-05,
+ "loss": 0.3645,
+ "step": 1725
+ },
+ {
+ "epoch": 6.322344322344322,
+ "grad_norm": 33.73476791381836,
+ "learning_rate": 2.454212454212454e-05,
+ "loss": 0.5412,
+ "step": 1726
+ },
+ {
+ "epoch": 6.326007326007326,
+ "grad_norm": 20.03452491760254,
+ "learning_rate": 2.4517704517704516e-05,
+ "loss": 0.0966,
+ "step": 1727
+ },
+ {
+ "epoch": 6.329670329670329,
+ "grad_norm": 39.63338088989258,
+ "learning_rate": 2.4493284493284495e-05,
+ "loss": 0.2953,
+ "step": 1728
+ },
+ {
+ "epoch": 6.333333333333333,
+ "grad_norm": 42.99127960205078,
+ "learning_rate": 2.446886446886447e-05,
+ "loss": 0.5328,
+ "step": 1729
+ },
+ {
+ "epoch": 6.336996336996337,
+ "grad_norm": 18.581249237060547,
+ "learning_rate": 2.4444444444444445e-05,
+ "loss": 0.1095,
+ "step": 1730
+ },
+ {
+ "epoch": 6.34065934065934,
+ "grad_norm": 33.29508590698242,
+ "learning_rate": 2.442002442002442e-05,
+ "loss": 0.1608,
+ "step": 1731
+ },
+ {
+ "epoch": 6.344322344322344,
+ "grad_norm": 103.12726593017578,
+ "learning_rate": 2.4395604395604395e-05,
+ "loss": 0.9665,
+ "step": 1732
+ },
+ {
+ "epoch": 6.347985347985348,
+ "grad_norm": 55.45216369628906,
+ "learning_rate": 2.437118437118437e-05,
+ "loss": 0.4441,
+ "step": 1733
+ },
+ {
+ "epoch": 6.351648351648351,
+ "grad_norm": 68.68230438232422,
+ "learning_rate": 2.434676434676435e-05,
+ "loss": 0.6929,
+ "step": 1734
+ },
+ {
+ "epoch": 6.355311355311355,
+ "grad_norm": 99.91059875488281,
+ "learning_rate": 2.4322344322344324e-05,
+ "loss": 0.599,
+ "step": 1735
+ },
+ {
+ "epoch": 6.358974358974359,
+ "grad_norm": 24.994863510131836,
+ "learning_rate": 2.42979242979243e-05,
+ "loss": 0.2212,
+ "step": 1736
+ },
+ {
+ "epoch": 6.362637362637362,
+ "grad_norm": 106.0428466796875,
+ "learning_rate": 2.4273504273504277e-05,
+ "loss": 0.685,
+ "step": 1737
+ },
+ {
+ "epoch": 6.366300366300366,
+ "grad_norm": 37.730712890625,
+ "learning_rate": 2.424908424908425e-05,
+ "loss": 0.0792,
+ "step": 1738
+ },
+ {
+ "epoch": 6.36996336996337,
+ "grad_norm": 44.056556701660156,
+ "learning_rate": 2.4224664224664224e-05,
+ "loss": 0.5062,
+ "step": 1739
+ },
+ {
+ "epoch": 6.373626373626374,
+ "grad_norm": 72.15331268310547,
+ "learning_rate": 2.42002442002442e-05,
+ "loss": 0.7541,
+ "step": 1740
+ },
+ {
+ "epoch": 6.377289377289378,
+ "grad_norm": 151.57752990722656,
+ "learning_rate": 2.4175824175824177e-05,
+ "loss": 0.9455,
+ "step": 1741
+ },
+ {
+ "epoch": 6.380952380952381,
+ "grad_norm": 62.12364196777344,
+ "learning_rate": 2.4151404151404152e-05,
+ "loss": 0.3055,
+ "step": 1742
+ },
+ {
+ "epoch": 6.384615384615385,
+ "grad_norm": 21.725858688354492,
+ "learning_rate": 2.4126984126984128e-05,
+ "loss": 0.0691,
+ "step": 1743
+ },
+ {
+ "epoch": 6.388278388278389,
+ "grad_norm": 60.754615783691406,
+ "learning_rate": 2.4102564102564103e-05,
+ "loss": 0.5273,
+ "step": 1744
+ },
+ {
+ "epoch": 6.391941391941392,
+ "grad_norm": 63.324684143066406,
+ "learning_rate": 2.4078144078144078e-05,
+ "loss": 0.2735,
+ "step": 1745
+ },
+ {
+ "epoch": 6.395604395604396,
+ "grad_norm": 79.82772064208984,
+ "learning_rate": 2.4053724053724053e-05,
+ "loss": 1.1766,
+ "step": 1746
+ },
+ {
+ "epoch": 6.3992673992674,
+ "grad_norm": 42.69222640991211,
+ "learning_rate": 2.402930402930403e-05,
+ "loss": 0.3417,
+ "step": 1747
+ },
+ {
+ "epoch": 6.402930402930403,
+ "grad_norm": 125.5120849609375,
+ "learning_rate": 2.4004884004884006e-05,
+ "loss": 0.331,
+ "step": 1748
+ },
+ {
+ "epoch": 6.406593406593407,
+ "grad_norm": 61.30012512207031,
+ "learning_rate": 2.398046398046398e-05,
+ "loss": 0.5709,
+ "step": 1749
+ },
+ {
+ "epoch": 6.410256410256411,
+ "grad_norm": 18.139734268188477,
+ "learning_rate": 2.395604395604396e-05,
+ "loss": 0.0671,
+ "step": 1750
+ },
+ {
+ "epoch": 6.413919413919414,
+ "grad_norm": 29.233678817749023,
+ "learning_rate": 2.393162393162393e-05,
+ "loss": 0.2012,
+ "step": 1751
+ },
+ {
+ "epoch": 6.417582417582418,
+ "grad_norm": 6.065537452697754,
+ "learning_rate": 2.3907203907203907e-05,
+ "loss": 0.0362,
+ "step": 1752
+ },
+ {
+ "epoch": 6.4212454212454215,
+ "grad_norm": 27.241317749023438,
+ "learning_rate": 2.388278388278388e-05,
+ "loss": 0.2462,
+ "step": 1753
+ },
+ {
+ "epoch": 6.424908424908425,
+ "grad_norm": 34.21626663208008,
+ "learning_rate": 2.385836385836386e-05,
+ "loss": 0.3341,
+ "step": 1754
+ },
+ {
+ "epoch": 6.428571428571429,
+ "grad_norm": 3.2597031593322754,
+ "learning_rate": 2.3833943833943835e-05,
+ "loss": 0.0159,
+ "step": 1755
+ },
+ {
+ "epoch": 6.4322344322344325,
+ "grad_norm": 44.21895217895508,
+ "learning_rate": 2.380952380952381e-05,
+ "loss": 0.2461,
+ "step": 1756
+ },
+ {
+ "epoch": 6.435897435897436,
+ "grad_norm": 11.0900239944458,
+ "learning_rate": 2.3785103785103785e-05,
+ "loss": 0.0343,
+ "step": 1757
+ },
+ {
+ "epoch": 6.43956043956044,
+ "grad_norm": 33.349464416503906,
+ "learning_rate": 2.376068376068376e-05,
+ "loss": 0.1605,
+ "step": 1758
+ },
+ {
+ "epoch": 6.443223443223443,
+ "grad_norm": 36.584434509277344,
+ "learning_rate": 2.3736263736263735e-05,
+ "loss": 0.291,
+ "step": 1759
+ },
+ {
+ "epoch": 6.446886446886447,
+ "grad_norm": 1.5533220767974854,
+ "learning_rate": 2.3711843711843714e-05,
+ "loss": 0.0072,
+ "step": 1760
+ },
+ {
+ "epoch": 6.450549450549451,
+ "grad_norm": 31.38529396057129,
+ "learning_rate": 2.368742368742369e-05,
+ "loss": 0.2211,
+ "step": 1761
+ },
+ {
+ "epoch": 6.454212454212454,
+ "grad_norm": 33.149131774902344,
+ "learning_rate": 2.3663003663003664e-05,
+ "loss": 0.7844,
+ "step": 1762
+ },
+ {
+ "epoch": 6.457875457875458,
+ "grad_norm": 21.318105697631836,
+ "learning_rate": 2.363858363858364e-05,
+ "loss": 0.1297,
+ "step": 1763
+ },
+ {
+ "epoch": 6.461538461538462,
+ "grad_norm": 22.11357879638672,
+ "learning_rate": 2.3614163614163614e-05,
+ "loss": 0.1063,
+ "step": 1764
+ },
+ {
+ "epoch": 6.465201465201465,
+ "grad_norm": 2.4257397651672363,
+ "learning_rate": 2.358974358974359e-05,
+ "loss": 0.0098,
+ "step": 1765
+ },
+ {
+ "epoch": 6.468864468864469,
+ "grad_norm": 11.911495208740234,
+ "learning_rate": 2.3565323565323564e-05,
+ "loss": 0.0386,
+ "step": 1766
+ },
+ {
+ "epoch": 6.472527472527473,
+ "grad_norm": 5.848181247711182,
+ "learning_rate": 2.3540903540903543e-05,
+ "loss": 0.0141,
+ "step": 1767
+ },
+ {
+ "epoch": 6.476190476190476,
+ "grad_norm": 58.96442413330078,
+ "learning_rate": 2.3516483516483518e-05,
+ "loss": 0.1635,
+ "step": 1768
+ },
+ {
+ "epoch": 6.47985347985348,
+ "grad_norm": 45.464298248291016,
+ "learning_rate": 2.3492063492063493e-05,
+ "loss": 0.5185,
+ "step": 1769
+ },
+ {
+ "epoch": 6.483516483516484,
+ "grad_norm": 363.1459045410156,
+ "learning_rate": 2.3467643467643468e-05,
+ "loss": 0.9437,
+ "step": 1770
+ },
+ {
+ "epoch": 6.487179487179487,
+ "grad_norm": 30.113380432128906,
+ "learning_rate": 2.3443223443223443e-05,
+ "loss": 0.1013,
+ "step": 1771
+ },
+ {
+ "epoch": 6.490842490842491,
+ "grad_norm": 59.738224029541016,
+ "learning_rate": 2.3418803418803418e-05,
+ "loss": 0.7901,
+ "step": 1772
+ },
+ {
+ "epoch": 6.4945054945054945,
+ "grad_norm": 20.25137710571289,
+ "learning_rate": 2.3394383394383396e-05,
+ "loss": 0.2715,
+ "step": 1773
+ },
+ {
+ "epoch": 6.498168498168498,
+ "grad_norm": 36.56110763549805,
+ "learning_rate": 2.336996336996337e-05,
+ "loss": 0.4192,
+ "step": 1774
+ },
+ {
+ "epoch": 6.501831501831502,
+ "grad_norm": 25.077024459838867,
+ "learning_rate": 2.3345543345543347e-05,
+ "loss": 0.0861,
+ "step": 1775
+ },
+ {
+ "epoch": 6.5054945054945055,
+ "grad_norm": 19.396398544311523,
+ "learning_rate": 2.332112332112332e-05,
+ "loss": 0.0352,
+ "step": 1776
+ },
+ {
+ "epoch": 6.509157509157509,
+ "grad_norm": 93.91683197021484,
+ "learning_rate": 2.3296703296703297e-05,
+ "loss": 0.1414,
+ "step": 1777
+ },
+ {
+ "epoch": 6.512820512820513,
+ "grad_norm": 30.467477798461914,
+ "learning_rate": 2.3272283272283272e-05,
+ "loss": 0.123,
+ "step": 1778
+ },
+ {
+ "epoch": 6.516483516483516,
+ "grad_norm": 135.5657196044922,
+ "learning_rate": 2.3247863247863247e-05,
+ "loss": 0.9203,
+ "step": 1779
+ },
+ {
+ "epoch": 6.52014652014652,
+ "grad_norm": 66.74224853515625,
+ "learning_rate": 2.3223443223443225e-05,
+ "loss": 1.6109,
+ "step": 1780
+ },
+ {
+ "epoch": 6.523809523809524,
+ "grad_norm": 5.672858238220215,
+ "learning_rate": 2.31990231990232e-05,
+ "loss": 0.0259,
+ "step": 1781
+ },
+ {
+ "epoch": 6.527472527472527,
+ "grad_norm": 116.89350128173828,
+ "learning_rate": 2.3174603174603175e-05,
+ "loss": 0.5468,
+ "step": 1782
+ },
+ {
+ "epoch": 6.531135531135531,
+ "grad_norm": 67.1368637084961,
+ "learning_rate": 2.315018315018315e-05,
+ "loss": 0.2192,
+ "step": 1783
+ },
+ {
+ "epoch": 6.534798534798535,
+ "grad_norm": 23.453842163085938,
+ "learning_rate": 2.3125763125763126e-05,
+ "loss": 0.1637,
+ "step": 1784
+ },
+ {
+ "epoch": 6.538461538461538,
+ "grad_norm": 10.070181846618652,
+ "learning_rate": 2.31013431013431e-05,
+ "loss": 0.0613,
+ "step": 1785
+ },
+ {
+ "epoch": 6.542124542124542,
+ "grad_norm": 76.60414123535156,
+ "learning_rate": 2.307692307692308e-05,
+ "loss": 1.1513,
+ "step": 1786
+ },
+ {
+ "epoch": 6.545787545787546,
+ "grad_norm": 28.578702926635742,
+ "learning_rate": 2.3052503052503054e-05,
+ "loss": 0.4436,
+ "step": 1787
+ },
+ {
+ "epoch": 6.549450549450549,
+ "grad_norm": 56.702999114990234,
+ "learning_rate": 2.302808302808303e-05,
+ "loss": 0.3688,
+ "step": 1788
+ },
+ {
+ "epoch": 6.553113553113553,
+ "grad_norm": 97.274658203125,
+ "learning_rate": 2.3003663003663004e-05,
+ "loss": 1.3588,
+ "step": 1789
+ },
+ {
+ "epoch": 6.556776556776557,
+ "grad_norm": 15.371636390686035,
+ "learning_rate": 2.297924297924298e-05,
+ "loss": 0.1227,
+ "step": 1790
+ },
+ {
+ "epoch": 6.56043956043956,
+ "grad_norm": 48.43988800048828,
+ "learning_rate": 2.2954822954822954e-05,
+ "loss": 0.5581,
+ "step": 1791
+ },
+ {
+ "epoch": 6.564102564102564,
+ "grad_norm": 30.510440826416016,
+ "learning_rate": 2.293040293040293e-05,
+ "loss": 0.1888,
+ "step": 1792
+ },
+ {
+ "epoch": 6.5677655677655675,
+ "grad_norm": 34.03535461425781,
+ "learning_rate": 2.2905982905982908e-05,
+ "loss": 0.3731,
+ "step": 1793
+ },
+ {
+ "epoch": 6.571428571428571,
+ "grad_norm": 41.19938659667969,
+ "learning_rate": 2.2881562881562883e-05,
+ "loss": 0.4705,
+ "step": 1794
+ },
+ {
+ "epoch": 6.575091575091575,
+ "grad_norm": 6.060940742492676,
+ "learning_rate": 2.2857142857142858e-05,
+ "loss": 0.0586,
+ "step": 1795
+ },
+ {
+ "epoch": 6.5787545787545785,
+ "grad_norm": 19.60703468322754,
+ "learning_rate": 2.2832722832722833e-05,
+ "loss": 0.2046,
+ "step": 1796
+ },
+ {
+ "epoch": 6.582417582417582,
+ "grad_norm": 30.162328720092773,
+ "learning_rate": 2.2808302808302808e-05,
+ "loss": 0.1926,
+ "step": 1797
+ },
+ {
+ "epoch": 6.586080586080586,
+ "grad_norm": 28.184131622314453,
+ "learning_rate": 2.2783882783882783e-05,
+ "loss": 0.4085,
+ "step": 1798
+ },
+ {
+ "epoch": 6.589743589743589,
+ "grad_norm": 28.77677345275879,
+ "learning_rate": 2.275946275946276e-05,
+ "loss": 0.4333,
+ "step": 1799
+ },
+ {
+ "epoch": 6.593406593406593,
+ "grad_norm": 16.47443962097168,
+ "learning_rate": 2.2735042735042737e-05,
+ "loss": 0.1579,
+ "step": 1800
+ },
+ {
+ "epoch": 6.597069597069597,
+ "grad_norm": 24.273569107055664,
+ "learning_rate": 2.2710622710622712e-05,
+ "loss": 0.1917,
+ "step": 1801
+ },
+ {
+ "epoch": 6.6007326007326,
+ "grad_norm": 43.3727912902832,
+ "learning_rate": 2.2686202686202687e-05,
+ "loss": 0.4186,
+ "step": 1802
+ },
+ {
+ "epoch": 6.604395604395604,
+ "grad_norm": 21.321182250976562,
+ "learning_rate": 2.2661782661782662e-05,
+ "loss": 0.187,
+ "step": 1803
+ },
+ {
+ "epoch": 6.608058608058608,
+ "grad_norm": 9.65528678894043,
+ "learning_rate": 2.2637362637362637e-05,
+ "loss": 0.0584,
+ "step": 1804
+ },
+ {
+ "epoch": 6.611721611721611,
+ "grad_norm": 43.85563659667969,
+ "learning_rate": 2.2612942612942612e-05,
+ "loss": 0.2249,
+ "step": 1805
+ },
+ {
+ "epoch": 6.615384615384615,
+ "grad_norm": 36.068946838378906,
+ "learning_rate": 2.258852258852259e-05,
+ "loss": 0.8459,
+ "step": 1806
+ },
+ {
+ "epoch": 6.619047619047619,
+ "grad_norm": 37.197776794433594,
+ "learning_rate": 2.2564102564102566e-05,
+ "loss": 0.4026,
+ "step": 1807
+ },
+ {
+ "epoch": 6.622710622710622,
+ "grad_norm": 11.39905071258545,
+ "learning_rate": 2.253968253968254e-05,
+ "loss": 0.0544,
+ "step": 1808
+ },
+ {
+ "epoch": 6.626373626373626,
+ "grad_norm": 6.2379150390625,
+ "learning_rate": 2.2515262515262516e-05,
+ "loss": 0.0342,
+ "step": 1809
+ },
+ {
+ "epoch": 6.63003663003663,
+ "grad_norm": 14.908777236938477,
+ "learning_rate": 2.249084249084249e-05,
+ "loss": 0.1245,
+ "step": 1810
+ },
+ {
+ "epoch": 6.633699633699633,
+ "grad_norm": 47.33977508544922,
+ "learning_rate": 2.2466422466422466e-05,
+ "loss": 0.3771,
+ "step": 1811
+ },
+ {
+ "epoch": 6.637362637362637,
+ "grad_norm": 25.724132537841797,
+ "learning_rate": 2.2442002442002444e-05,
+ "loss": 0.3055,
+ "step": 1812
+ },
+ {
+ "epoch": 6.641025641025641,
+ "grad_norm": 30.99205207824707,
+ "learning_rate": 2.241758241758242e-05,
+ "loss": 0.2163,
+ "step": 1813
+ },
+ {
+ "epoch": 6.644688644688645,
+ "grad_norm": 22.741575241088867,
+ "learning_rate": 2.2393162393162394e-05,
+ "loss": 0.136,
+ "step": 1814
+ },
+ {
+ "epoch": 6.648351648351649,
+ "grad_norm": 22.271474838256836,
+ "learning_rate": 2.236874236874237e-05,
+ "loss": 0.2299,
+ "step": 1815
+ },
+ {
+ "epoch": 6.652014652014652,
+ "grad_norm": 51.153072357177734,
+ "learning_rate": 2.2344322344322345e-05,
+ "loss": 0.8646,
+ "step": 1816
+ },
+ {
+ "epoch": 6.655677655677656,
+ "grad_norm": 4.649880409240723,
+ "learning_rate": 2.231990231990232e-05,
+ "loss": 0.0344,
+ "step": 1817
+ },
+ {
+ "epoch": 6.65934065934066,
+ "grad_norm": 2.948399305343628,
+ "learning_rate": 2.2295482295482295e-05,
+ "loss": 0.0128,
+ "step": 1818
+ },
+ {
+ "epoch": 6.663003663003663,
+ "grad_norm": 10.776185035705566,
+ "learning_rate": 2.2271062271062273e-05,
+ "loss": 0.0438,
+ "step": 1819
+ },
+ {
+ "epoch": 6.666666666666667,
+ "grad_norm": 31.777973175048828,
+ "learning_rate": 2.2246642246642248e-05,
+ "loss": 1.3552,
+ "step": 1820
+ },
+ {
+ "epoch": 6.670329670329671,
+ "grad_norm": 44.022377014160156,
+ "learning_rate": 2.222222222222222e-05,
+ "loss": 0.1928,
+ "step": 1821
+ },
+ {
+ "epoch": 6.673992673992674,
+ "grad_norm": 7.014647960662842,
+ "learning_rate": 2.21978021978022e-05,
+ "loss": 0.0675,
+ "step": 1822
+ },
+ {
+ "epoch": 6.677655677655678,
+ "grad_norm": 10.964372634887695,
+ "learning_rate": 2.2173382173382173e-05,
+ "loss": 0.0809,
+ "step": 1823
+ },
+ {
+ "epoch": 6.681318681318682,
+ "grad_norm": 42.56317901611328,
+ "learning_rate": 2.214896214896215e-05,
+ "loss": 0.2639,
+ "step": 1824
+ },
+ {
+ "epoch": 6.684981684981685,
+ "grad_norm": 25.33672523498535,
+ "learning_rate": 2.2124542124542127e-05,
+ "loss": 0.294,
+ "step": 1825
+ },
+ {
+ "epoch": 6.688644688644689,
+ "grad_norm": 9.823565483093262,
+ "learning_rate": 2.2100122100122102e-05,
+ "loss": 0.0885,
+ "step": 1826
+ },
+ {
+ "epoch": 6.6923076923076925,
+ "grad_norm": 3.2519893646240234,
+ "learning_rate": 2.2075702075702077e-05,
+ "loss": 0.0208,
+ "step": 1827
+ },
+ {
+ "epoch": 6.695970695970696,
+ "grad_norm": 14.441536903381348,
+ "learning_rate": 2.2051282051282052e-05,
+ "loss": 0.1541,
+ "step": 1828
+ },
+ {
+ "epoch": 6.6996336996337,
+ "grad_norm": 4.128608226776123,
+ "learning_rate": 2.2026862026862027e-05,
+ "loss": 0.03,
+ "step": 1829
+ },
+ {
+ "epoch": 6.7032967032967035,
+ "grad_norm": 13.953630447387695,
+ "learning_rate": 2.2002442002442002e-05,
+ "loss": 0.0781,
+ "step": 1830
+ },
+ {
+ "epoch": 6.706959706959707,
+ "grad_norm": 24.90090560913086,
+ "learning_rate": 2.1978021978021977e-05,
+ "loss": 0.33,
+ "step": 1831
+ },
+ {
+ "epoch": 6.710622710622711,
+ "grad_norm": 43.3170051574707,
+ "learning_rate": 2.1953601953601956e-05,
+ "loss": 0.1735,
+ "step": 1832
+ },
+ {
+ "epoch": 6.714285714285714,
+ "grad_norm": 5.82177734375,
+ "learning_rate": 2.192918192918193e-05,
+ "loss": 0.0281,
+ "step": 1833
+ },
+ {
+ "epoch": 6.717948717948718,
+ "grad_norm": 26.415163040161133,
+ "learning_rate": 2.1904761904761903e-05,
+ "loss": 0.4272,
+ "step": 1834
+ },
+ {
+ "epoch": 6.721611721611722,
+ "grad_norm": 40.3553581237793,
+ "learning_rate": 2.188034188034188e-05,
+ "loss": 0.3375,
+ "step": 1835
+ },
+ {
+ "epoch": 6.725274725274725,
+ "grad_norm": 39.16763687133789,
+ "learning_rate": 2.1855921855921856e-05,
+ "loss": 0.881,
+ "step": 1836
+ },
+ {
+ "epoch": 6.728937728937729,
+ "grad_norm": 14.275158882141113,
+ "learning_rate": 2.183150183150183e-05,
+ "loss": 0.0499,
+ "step": 1837
+ },
+ {
+ "epoch": 6.732600732600733,
+ "grad_norm": 40.29611587524414,
+ "learning_rate": 2.180708180708181e-05,
+ "loss": 0.2447,
+ "step": 1838
+ },
+ {
+ "epoch": 6.736263736263736,
+ "grad_norm": 33.86298751831055,
+ "learning_rate": 2.1782661782661785e-05,
+ "loss": 0.2772,
+ "step": 1839
+ },
+ {
+ "epoch": 6.73992673992674,
+ "grad_norm": 34.46928405761719,
+ "learning_rate": 2.175824175824176e-05,
+ "loss": 0.2721,
+ "step": 1840
+ },
+ {
+ "epoch": 6.743589743589744,
+ "grad_norm": 17.7811222076416,
+ "learning_rate": 2.1733821733821735e-05,
+ "loss": 0.0955,
+ "step": 1841
+ },
+ {
+ "epoch": 6.747252747252747,
+ "grad_norm": 33.17821502685547,
+ "learning_rate": 2.170940170940171e-05,
+ "loss": 0.1831,
+ "step": 1842
+ },
+ {
+ "epoch": 6.750915750915751,
+ "grad_norm": 24.910184860229492,
+ "learning_rate": 2.1684981684981685e-05,
+ "loss": 0.1617,
+ "step": 1843
+ },
+ {
+ "epoch": 6.754578754578755,
+ "grad_norm": 28.5413875579834,
+ "learning_rate": 2.166056166056166e-05,
+ "loss": 0.2048,
+ "step": 1844
+ },
+ {
+ "epoch": 6.758241758241758,
+ "grad_norm": 26.866653442382812,
+ "learning_rate": 2.163614163614164e-05,
+ "loss": 0.1637,
+ "step": 1845
+ },
+ {
+ "epoch": 6.761904761904762,
+ "grad_norm": 43.447593688964844,
+ "learning_rate": 2.1611721611721613e-05,
+ "loss": 0.2206,
+ "step": 1846
+ },
+ {
+ "epoch": 6.7655677655677655,
+ "grad_norm": 8.146500587463379,
+ "learning_rate": 2.1587301587301585e-05,
+ "loss": 0.0199,
+ "step": 1847
+ },
+ {
+ "epoch": 6.769230769230769,
+ "grad_norm": 30.458940505981445,
+ "learning_rate": 2.1562881562881564e-05,
+ "loss": 0.0963,
+ "step": 1848
+ },
+ {
+ "epoch": 6.772893772893773,
+ "grad_norm": 1.6412991285324097,
+ "learning_rate": 2.153846153846154e-05,
+ "loss": 0.0097,
+ "step": 1849
+ },
+ {
+ "epoch": 6.7765567765567765,
+ "grad_norm": 22.804906845092773,
+ "learning_rate": 2.1514041514041514e-05,
+ "loss": 0.115,
+ "step": 1850
+ },
+ {
+ "epoch": 6.78021978021978,
+ "grad_norm": 21.790761947631836,
+ "learning_rate": 2.1489621489621492e-05,
+ "loss": 0.1609,
+ "step": 1851
+ },
+ {
+ "epoch": 6.783882783882784,
+ "grad_norm": 56.942420959472656,
+ "learning_rate": 2.1465201465201467e-05,
+ "loss": 0.3725,
+ "step": 1852
+ },
+ {
+ "epoch": 6.787545787545787,
+ "grad_norm": 31.713504791259766,
+ "learning_rate": 2.1440781440781442e-05,
+ "loss": 0.3035,
+ "step": 1853
+ },
+ {
+ "epoch": 6.791208791208791,
+ "grad_norm": 14.83351993560791,
+ "learning_rate": 2.1416361416361417e-05,
+ "loss": 0.0383,
+ "step": 1854
+ },
+ {
+ "epoch": 6.794871794871795,
+ "grad_norm": 28.03726577758789,
+ "learning_rate": 2.1391941391941392e-05,
+ "loss": 0.0432,
+ "step": 1855
+ },
+ {
+ "epoch": 6.798534798534798,
+ "grad_norm": 72.7824478149414,
+ "learning_rate": 2.1367521367521368e-05,
+ "loss": 0.7678,
+ "step": 1856
+ },
+ {
+ "epoch": 6.802197802197802,
+ "grad_norm": 48.0980224609375,
+ "learning_rate": 2.1343101343101343e-05,
+ "loss": 0.7691,
+ "step": 1857
+ },
+ {
+ "epoch": 6.805860805860806,
+ "grad_norm": 44.305519104003906,
+ "learning_rate": 2.131868131868132e-05,
+ "loss": 0.4334,
+ "step": 1858
+ },
+ {
+ "epoch": 6.809523809523809,
+ "grad_norm": 37.26662826538086,
+ "learning_rate": 2.1294261294261296e-05,
+ "loss": 0.5122,
+ "step": 1859
+ },
+ {
+ "epoch": 6.813186813186813,
+ "grad_norm": 11.758150100708008,
+ "learning_rate": 2.1269841269841268e-05,
+ "loss": 0.034,
+ "step": 1860
+ },
+ {
+ "epoch": 6.816849816849817,
+ "grad_norm": 22.28230857849121,
+ "learning_rate": 2.1245421245421246e-05,
+ "loss": 0.1423,
+ "step": 1861
+ },
+ {
+ "epoch": 6.82051282051282,
+ "grad_norm": 15.02229118347168,
+ "learning_rate": 2.122100122100122e-05,
+ "loss": 0.0574,
+ "step": 1862
+ },
+ {
+ "epoch": 6.824175824175824,
+ "grad_norm": 54.3133659362793,
+ "learning_rate": 2.1196581196581196e-05,
+ "loss": 0.7862,
+ "step": 1863
+ },
+ {
+ "epoch": 6.827838827838828,
+ "grad_norm": 14.319539070129395,
+ "learning_rate": 2.1172161172161175e-05,
+ "loss": 0.0509,
+ "step": 1864
+ },
+ {
+ "epoch": 6.831501831501831,
+ "grad_norm": 21.989151000976562,
+ "learning_rate": 2.114774114774115e-05,
+ "loss": 0.1181,
+ "step": 1865
+ },
+ {
+ "epoch": 6.835164835164835,
+ "grad_norm": 35.67295455932617,
+ "learning_rate": 2.112332112332112e-05,
+ "loss": 0.5721,
+ "step": 1866
+ },
+ {
+ "epoch": 6.8388278388278385,
+ "grad_norm": 1.1201294660568237,
+ "learning_rate": 2.10989010989011e-05,
+ "loss": 0.006,
+ "step": 1867
+ },
+ {
+ "epoch": 6.842490842490842,
+ "grad_norm": 55.64126205444336,
+ "learning_rate": 2.1074481074481075e-05,
+ "loss": 0.5155,
+ "step": 1868
+ },
+ {
+ "epoch": 6.846153846153846,
+ "grad_norm": 34.077598571777344,
+ "learning_rate": 2.105006105006105e-05,
+ "loss": 0.2999,
+ "step": 1869
+ },
+ {
+ "epoch": 6.8498168498168495,
+ "grad_norm": 47.34593200683594,
+ "learning_rate": 2.1025641025641025e-05,
+ "loss": 0.5192,
+ "step": 1870
+ },
+ {
+ "epoch": 6.853479853479853,
+ "grad_norm": 15.37938117980957,
+ "learning_rate": 2.1001221001221004e-05,
+ "loss": 0.0647,
+ "step": 1871
+ },
+ {
+ "epoch": 6.857142857142857,
+ "grad_norm": 8.03809928894043,
+ "learning_rate": 2.097680097680098e-05,
+ "loss": 0.0535,
+ "step": 1872
+ },
+ {
+ "epoch": 6.860805860805861,
+ "grad_norm": 34.22372055053711,
+ "learning_rate": 2.095238095238095e-05,
+ "loss": 0.4123,
+ "step": 1873
+ },
+ {
+ "epoch": 6.864468864468865,
+ "grad_norm": 19.66349220275879,
+ "learning_rate": 2.092796092796093e-05,
+ "loss": 0.165,
+ "step": 1874
+ },
+ {
+ "epoch": 6.868131868131869,
+ "grad_norm": 4.448884010314941,
+ "learning_rate": 2.0903540903540904e-05,
+ "loss": 0.0204,
+ "step": 1875
+ },
+ {
+ "epoch": 6.871794871794872,
+ "grad_norm": 7.874554634094238,
+ "learning_rate": 2.087912087912088e-05,
+ "loss": 0.0339,
+ "step": 1876
+ },
+ {
+ "epoch": 6.875457875457876,
+ "grad_norm": 2.1591508388519287,
+ "learning_rate": 2.0854700854700857e-05,
+ "loss": 0.0069,
+ "step": 1877
+ },
+ {
+ "epoch": 6.8791208791208796,
+ "grad_norm": 7.496129512786865,
+ "learning_rate": 2.0830280830280832e-05,
+ "loss": 0.0522,
+ "step": 1878
+ },
+ {
+ "epoch": 6.882783882783883,
+ "grad_norm": 1.867928385734558,
+ "learning_rate": 2.0805860805860804e-05,
+ "loss": 0.0075,
+ "step": 1879
+ },
+ {
+ "epoch": 6.886446886446887,
+ "grad_norm": 6.0440239906311035,
+ "learning_rate": 2.0781440781440783e-05,
+ "loss": 0.0454,
+ "step": 1880
+ },
+ {
+ "epoch": 6.8901098901098905,
+ "grad_norm": 38.901275634765625,
+ "learning_rate": 2.0757020757020758e-05,
+ "loss": 0.1179,
+ "step": 1881
+ },
+ {
+ "epoch": 6.893772893772894,
+ "grad_norm": 36.98682403564453,
+ "learning_rate": 2.0732600732600733e-05,
+ "loss": 0.4722,
+ "step": 1882
+ },
+ {
+ "epoch": 6.897435897435898,
+ "grad_norm": 24.764745712280273,
+ "learning_rate": 2.0708180708180708e-05,
+ "loss": 0.1179,
+ "step": 1883
+ },
+ {
+ "epoch": 6.9010989010989015,
+ "grad_norm": 9.029558181762695,
+ "learning_rate": 2.0683760683760686e-05,
+ "loss": 0.0134,
+ "step": 1884
+ },
+ {
+ "epoch": 6.904761904761905,
+ "grad_norm": 54.04767608642578,
+ "learning_rate": 2.065934065934066e-05,
+ "loss": 0.3645,
+ "step": 1885
+ },
+ {
+ "epoch": 6.908424908424909,
+ "grad_norm": 35.74855041503906,
+ "learning_rate": 2.0634920634920633e-05,
+ "loss": 0.5228,
+ "step": 1886
+ },
+ {
+ "epoch": 6.912087912087912,
+ "grad_norm": 18.870223999023438,
+ "learning_rate": 2.061050061050061e-05,
+ "loss": 0.0564,
+ "step": 1887
+ },
+ {
+ "epoch": 6.915750915750916,
+ "grad_norm": 1.4971216917037964,
+ "learning_rate": 2.0586080586080587e-05,
+ "loss": 0.0067,
+ "step": 1888
+ },
+ {
+ "epoch": 6.91941391941392,
+ "grad_norm": 71.35897064208984,
+ "learning_rate": 2.056166056166056e-05,
+ "loss": 0.9147,
+ "step": 1889
+ },
+ {
+ "epoch": 6.923076923076923,
+ "grad_norm": 20.66876220703125,
+ "learning_rate": 2.053724053724054e-05,
+ "loss": 0.0777,
+ "step": 1890
+ },
+ {
+ "epoch": 6.926739926739927,
+ "grad_norm": 12.178057670593262,
+ "learning_rate": 2.0512820512820515e-05,
+ "loss": 0.0682,
+ "step": 1891
+ },
+ {
+ "epoch": 6.930402930402931,
+ "grad_norm": 18.622045516967773,
+ "learning_rate": 2.0488400488400487e-05,
+ "loss": 0.2268,
+ "step": 1892
+ },
+ {
+ "epoch": 6.934065934065934,
+ "grad_norm": 13.028661727905273,
+ "learning_rate": 2.0463980463980462e-05,
+ "loss": 0.0783,
+ "step": 1893
+ },
+ {
+ "epoch": 6.937728937728938,
+ "grad_norm": 52.034603118896484,
+ "learning_rate": 2.043956043956044e-05,
+ "loss": 0.2124,
+ "step": 1894
+ },
+ {
+ "epoch": 6.941391941391942,
+ "grad_norm": 15.498795509338379,
+ "learning_rate": 2.0415140415140415e-05,
+ "loss": 0.1372,
+ "step": 1895
+ },
+ {
+ "epoch": 6.945054945054945,
+ "grad_norm": 4.659972190856934,
+ "learning_rate": 2.039072039072039e-05,
+ "loss": 0.0671,
+ "step": 1896
+ },
+ {
+ "epoch": 6.948717948717949,
+ "grad_norm": 67.44121551513672,
+ "learning_rate": 2.036630036630037e-05,
+ "loss": 0.3543,
+ "step": 1897
+ },
+ {
+ "epoch": 6.9523809523809526,
+ "grad_norm": 55.583770751953125,
+ "learning_rate": 2.0341880341880344e-05,
+ "loss": 0.5827,
+ "step": 1898
+ },
+ {
+ "epoch": 6.956043956043956,
+ "grad_norm": 2.5286853313446045,
+ "learning_rate": 2.0317460317460316e-05,
+ "loss": 0.0093,
+ "step": 1899
+ },
+ {
+ "epoch": 6.95970695970696,
+ "grad_norm": 35.537654876708984,
+ "learning_rate": 2.0293040293040294e-05,
+ "loss": 0.4927,
+ "step": 1900
+ },
+ {
+ "epoch": 6.9633699633699635,
+ "grad_norm": 5.582351207733154,
+ "learning_rate": 2.026862026862027e-05,
+ "loss": 0.0266,
+ "step": 1901
+ },
+ {
+ "epoch": 6.967032967032967,
+ "grad_norm": 24.245107650756836,
+ "learning_rate": 2.0244200244200244e-05,
+ "loss": 0.1652,
+ "step": 1902
+ },
+ {
+ "epoch": 6.970695970695971,
+ "grad_norm": 15.859257698059082,
+ "learning_rate": 2.0219780219780223e-05,
+ "loss": 0.0523,
+ "step": 1903
+ },
+ {
+ "epoch": 6.9743589743589745,
+ "grad_norm": 4.049310207366943,
+ "learning_rate": 2.0195360195360198e-05,
+ "loss": 0.016,
+ "step": 1904
+ },
+ {
+ "epoch": 6.978021978021978,
+ "grad_norm": 22.330875396728516,
+ "learning_rate": 2.017094017094017e-05,
+ "loss": 0.0999,
+ "step": 1905
+ },
+ {
+ "epoch": 6.981684981684982,
+ "grad_norm": 5.005560874938965,
+ "learning_rate": 2.0146520146520144e-05,
+ "loss": 0.0186,
+ "step": 1906
+ },
+ {
+ "epoch": 6.985347985347985,
+ "grad_norm": 5.587247848510742,
+ "learning_rate": 2.0122100122100123e-05,
+ "loss": 0.0312,
+ "step": 1907
+ },
+ {
+ "epoch": 6.989010989010989,
+ "grad_norm": 46.75461959838867,
+ "learning_rate": 2.0097680097680098e-05,
+ "loss": 0.2803,
+ "step": 1908
+ },
+ {
+ "epoch": 6.992673992673993,
+ "grad_norm": 9.029139518737793,
+ "learning_rate": 2.0073260073260073e-05,
+ "loss": 0.0437,
+ "step": 1909
+ },
+ {
+ "epoch": 6.996336996336996,
+ "grad_norm": 26.199968338012695,
+ "learning_rate": 2.004884004884005e-05,
+ "loss": 0.4601,
+ "step": 1910
+ },
+ {
+ "epoch": 7.0,
+ "grad_norm": 2.2140614986419678,
+ "learning_rate": 2.0024420024420023e-05,
+ "loss": 0.0096,
+ "step": 1911
+ },
+ {
+ "epoch": 7.003663003663004,
+ "grad_norm": 52.966732025146484,
+ "learning_rate": 1.9999999999999998e-05,
+ "loss": 0.5645,
+ "step": 1912
+ },
+ {
+ "epoch": 7.007326007326007,
+ "grad_norm": 11.818926811218262,
+ "learning_rate": 1.9975579975579977e-05,
+ "loss": 0.1,
+ "step": 1913
+ },
+ {
+ "epoch": 7.010989010989011,
+ "grad_norm": 3.5507917404174805,
+ "learning_rate": 1.9951159951159952e-05,
+ "loss": 0.0124,
+ "step": 1914
+ },
+ {
+ "epoch": 7.014652014652015,
+ "grad_norm": 13.962370872497559,
+ "learning_rate": 1.9926739926739927e-05,
+ "loss": 0.0361,
+ "step": 1915
+ },
+ {
+ "epoch": 7.018315018315018,
+ "grad_norm": 18.855941772460938,
+ "learning_rate": 1.9902319902319905e-05,
+ "loss": 0.1029,
+ "step": 1916
+ },
+ {
+ "epoch": 7.021978021978022,
+ "grad_norm": 25.34268569946289,
+ "learning_rate": 1.987789987789988e-05,
+ "loss": 0.0968,
+ "step": 1917
+ },
+ {
+ "epoch": 7.0256410256410255,
+ "grad_norm": 12.053638458251953,
+ "learning_rate": 1.9853479853479852e-05,
+ "loss": 0.0473,
+ "step": 1918
+ },
+ {
+ "epoch": 7.029304029304029,
+ "grad_norm": 28.66246795654297,
+ "learning_rate": 1.9829059829059827e-05,
+ "loss": 0.477,
+ "step": 1919
+ },
+ {
+ "epoch": 7.032967032967033,
+ "grad_norm": 37.606475830078125,
+ "learning_rate": 1.9804639804639806e-05,
+ "loss": 0.3894,
+ "step": 1920
+ },
+ {
+ "epoch": 7.0366300366300365,
+ "grad_norm": 10.550342559814453,
+ "learning_rate": 1.978021978021978e-05,
+ "loss": 0.031,
+ "step": 1921
+ },
+ {
+ "epoch": 7.04029304029304,
+ "grad_norm": 8.748348236083984,
+ "learning_rate": 1.9755799755799756e-05,
+ "loss": 0.087,
+ "step": 1922
+ },
+ {
+ "epoch": 7.043956043956044,
+ "grad_norm": 16.9587345123291,
+ "learning_rate": 1.9731379731379734e-05,
+ "loss": 0.1271,
+ "step": 1923
+ },
+ {
+ "epoch": 7.0476190476190474,
+ "grad_norm": 64.79300689697266,
+ "learning_rate": 1.9706959706959706e-05,
+ "loss": 0.4748,
+ "step": 1924
+ },
+ {
+ "epoch": 7.051282051282051,
+ "grad_norm": 1.4843182563781738,
+ "learning_rate": 1.968253968253968e-05,
+ "loss": 0.0074,
+ "step": 1925
+ },
+ {
+ "epoch": 7.054945054945055,
+ "grad_norm": 6.48045539855957,
+ "learning_rate": 1.965811965811966e-05,
+ "loss": 0.0312,
+ "step": 1926
+ },
+ {
+ "epoch": 7.058608058608058,
+ "grad_norm": 13.35557746887207,
+ "learning_rate": 1.9633699633699634e-05,
+ "loss": 0.0395,
+ "step": 1927
+ },
+ {
+ "epoch": 7.062271062271062,
+ "grad_norm": 6.710418701171875,
+ "learning_rate": 1.960927960927961e-05,
+ "loss": 0.0237,
+ "step": 1928
+ },
+ {
+ "epoch": 7.065934065934066,
+ "grad_norm": 1.5964992046356201,
+ "learning_rate": 1.9584859584859588e-05,
+ "loss": 0.0069,
+ "step": 1929
+ },
+ {
+ "epoch": 7.069597069597069,
+ "grad_norm": 17.386457443237305,
+ "learning_rate": 1.9560439560439563e-05,
+ "loss": 0.1719,
+ "step": 1930
+ },
+ {
+ "epoch": 7.073260073260073,
+ "grad_norm": 9.381852149963379,
+ "learning_rate": 1.9536019536019535e-05,
+ "loss": 0.0274,
+ "step": 1931
+ },
+ {
+ "epoch": 7.076923076923077,
+ "grad_norm": 96.48052978515625,
+ "learning_rate": 1.951159951159951e-05,
+ "loss": 0.9714,
+ "step": 1932
+ },
+ {
+ "epoch": 7.08058608058608,
+ "grad_norm": 9.537943840026855,
+ "learning_rate": 1.9487179487179488e-05,
+ "loss": 0.0608,
+ "step": 1933
+ },
+ {
+ "epoch": 7.084249084249084,
+ "grad_norm": 47.1885986328125,
+ "learning_rate": 1.9462759462759463e-05,
+ "loss": 0.3678,
+ "step": 1934
+ },
+ {
+ "epoch": 7.087912087912088,
+ "grad_norm": 22.831552505493164,
+ "learning_rate": 1.9438339438339438e-05,
+ "loss": 0.1386,
+ "step": 1935
+ },
+ {
+ "epoch": 7.091575091575091,
+ "grad_norm": 7.730359077453613,
+ "learning_rate": 1.9413919413919417e-05,
+ "loss": 0.0286,
+ "step": 1936
+ },
+ {
+ "epoch": 7.095238095238095,
+ "grad_norm": 34.329349517822266,
+ "learning_rate": 1.938949938949939e-05,
+ "loss": 0.2041,
+ "step": 1937
+ },
+ {
+ "epoch": 7.0989010989010985,
+ "grad_norm": 2.7768473625183105,
+ "learning_rate": 1.9365079365079363e-05,
+ "loss": 0.0095,
+ "step": 1938
+ },
+ {
+ "epoch": 7.102564102564102,
+ "grad_norm": 52.868446350097656,
+ "learning_rate": 1.9340659340659342e-05,
+ "loss": 1.3287,
+ "step": 1939
+ },
+ {
+ "epoch": 7.106227106227106,
+ "grad_norm": 46.30121612548828,
+ "learning_rate": 1.9316239316239317e-05,
+ "loss": 0.6172,
+ "step": 1940
+ },
+ {
+ "epoch": 7.1098901098901095,
+ "grad_norm": 22.829683303833008,
+ "learning_rate": 1.9291819291819292e-05,
+ "loss": 0.2141,
+ "step": 1941
+ },
+ {
+ "epoch": 7.113553113553113,
+ "grad_norm": 5.540363311767578,
+ "learning_rate": 1.926739926739927e-05,
+ "loss": 0.0202,
+ "step": 1942
+ },
+ {
+ "epoch": 7.117216117216117,
+ "grad_norm": 12.821202278137207,
+ "learning_rate": 1.9242979242979246e-05,
+ "loss": 0.0474,
+ "step": 1943
+ },
+ {
+ "epoch": 7.1208791208791204,
+ "grad_norm": 51.50701141357422,
+ "learning_rate": 1.9218559218559217e-05,
+ "loss": 0.2716,
+ "step": 1944
+ },
+ {
+ "epoch": 7.124542124542124,
+ "grad_norm": 22.156648635864258,
+ "learning_rate": 1.9194139194139192e-05,
+ "loss": 0.4693,
+ "step": 1945
+ },
+ {
+ "epoch": 7.128205128205128,
+ "grad_norm": 21.045156478881836,
+ "learning_rate": 1.916971916971917e-05,
+ "loss": 0.471,
+ "step": 1946
+ },
+ {
+ "epoch": 7.131868131868132,
+ "grad_norm": 19.406959533691406,
+ "learning_rate": 1.9145299145299146e-05,
+ "loss": 0.1439,
+ "step": 1947
+ },
+ {
+ "epoch": 7.135531135531136,
+ "grad_norm": 3.8923749923706055,
+ "learning_rate": 1.912087912087912e-05,
+ "loss": 0.0165,
+ "step": 1948
+ },
+ {
+ "epoch": 7.13919413919414,
+ "grad_norm": 19.87603759765625,
+ "learning_rate": 1.90964590964591e-05,
+ "loss": 0.1763,
+ "step": 1949
+ },
+ {
+ "epoch": 7.142857142857143,
+ "grad_norm": 0.5241024494171143,
+ "learning_rate": 1.907203907203907e-05,
+ "loss": 0.0026,
+ "step": 1950
+ },
+ {
+ "epoch": 7.146520146520147,
+ "grad_norm": 3.141636610031128,
+ "learning_rate": 1.9047619047619046e-05,
+ "loss": 0.0217,
+ "step": 1951
+ },
+ {
+ "epoch": 7.1501831501831505,
+ "grad_norm": 7.46498966217041,
+ "learning_rate": 1.9023199023199025e-05,
+ "loss": 0.0125,
+ "step": 1952
+ },
+ {
+ "epoch": 7.153846153846154,
+ "grad_norm": 2.050363779067993,
+ "learning_rate": 1.8998778998779e-05,
+ "loss": 0.0092,
+ "step": 1953
+ },
+ {
+ "epoch": 7.157509157509158,
+ "grad_norm": 65.3537826538086,
+ "learning_rate": 1.8974358974358975e-05,
+ "loss": 0.9234,
+ "step": 1954
+ },
+ {
+ "epoch": 7.1611721611721615,
+ "grad_norm": 39.09166717529297,
+ "learning_rate": 1.8949938949938953e-05,
+ "loss": 0.4183,
+ "step": 1955
+ },
+ {
+ "epoch": 7.164835164835165,
+ "grad_norm": 7.788208961486816,
+ "learning_rate": 1.8925518925518925e-05,
+ "loss": 0.0284,
+ "step": 1956
+ },
+ {
+ "epoch": 7.168498168498169,
+ "grad_norm": 19.53957176208496,
+ "learning_rate": 1.89010989010989e-05,
+ "loss": 0.148,
+ "step": 1957
+ },
+ {
+ "epoch": 7.172161172161172,
+ "grad_norm": 11.077863693237305,
+ "learning_rate": 1.8876678876678875e-05,
+ "loss": 0.0772,
+ "step": 1958
+ },
+ {
+ "epoch": 7.175824175824176,
+ "grad_norm": 10.294413566589355,
+ "learning_rate": 1.8852258852258853e-05,
+ "loss": 0.0278,
+ "step": 1959
+ },
+ {
+ "epoch": 7.17948717948718,
+ "grad_norm": 34.725284576416016,
+ "learning_rate": 1.882783882783883e-05,
+ "loss": 0.194,
+ "step": 1960
+ },
+ {
+ "epoch": 7.183150183150183,
+ "grad_norm": 27.773906707763672,
+ "learning_rate": 1.8803418803418804e-05,
+ "loss": 0.3261,
+ "step": 1961
+ },
+ {
+ "epoch": 7.186813186813187,
+ "grad_norm": 60.96028518676758,
+ "learning_rate": 1.8778998778998782e-05,
+ "loss": 0.5915,
+ "step": 1962
+ },
+ {
+ "epoch": 7.190476190476191,
+ "grad_norm": 9.918408393859863,
+ "learning_rate": 1.8754578754578754e-05,
+ "loss": 0.0428,
+ "step": 1963
+ },
+ {
+ "epoch": 7.194139194139194,
+ "grad_norm": 42.929927825927734,
+ "learning_rate": 1.873015873015873e-05,
+ "loss": 0.3522,
+ "step": 1964
+ },
+ {
+ "epoch": 7.197802197802198,
+ "grad_norm": 33.893463134765625,
+ "learning_rate": 1.8705738705738707e-05,
+ "loss": 0.5049,
+ "step": 1965
+ },
+ {
+ "epoch": 7.201465201465202,
+ "grad_norm": 3.18776273727417,
+ "learning_rate": 1.8681318681318682e-05,
+ "loss": 0.0204,
+ "step": 1966
+ },
+ {
+ "epoch": 7.205128205128205,
+ "grad_norm": 9.548710823059082,
+ "learning_rate": 1.8656898656898657e-05,
+ "loss": 0.0711,
+ "step": 1967
+ },
+ {
+ "epoch": 7.208791208791209,
+ "grad_norm": 38.94087600708008,
+ "learning_rate": 1.8632478632478636e-05,
+ "loss": 0.5289,
+ "step": 1968
+ },
+ {
+ "epoch": 7.212454212454213,
+ "grad_norm": 5.812004566192627,
+ "learning_rate": 1.8608058608058607e-05,
+ "loss": 0.0224,
+ "step": 1969
+ },
+ {
+ "epoch": 7.216117216117216,
+ "grad_norm": 1.2060245275497437,
+ "learning_rate": 1.8583638583638583e-05,
+ "loss": 0.0077,
+ "step": 1970
+ },
+ {
+ "epoch": 7.21978021978022,
+ "grad_norm": 20.632722854614258,
+ "learning_rate": 1.8559218559218558e-05,
+ "loss": 0.0907,
+ "step": 1971
+ },
+ {
+ "epoch": 7.2234432234432235,
+ "grad_norm": 24.92366600036621,
+ "learning_rate": 1.8534798534798536e-05,
+ "loss": 0.1633,
+ "step": 1972
+ },
+ {
+ "epoch": 7.227106227106227,
+ "grad_norm": 2.3411026000976562,
+ "learning_rate": 1.851037851037851e-05,
+ "loss": 0.0098,
+ "step": 1973
+ },
+ {
+ "epoch": 7.230769230769231,
+ "grad_norm": 30.942848205566406,
+ "learning_rate": 1.8485958485958486e-05,
+ "loss": 0.1813,
+ "step": 1974
+ },
+ {
+ "epoch": 7.2344322344322345,
+ "grad_norm": 12.736541748046875,
+ "learning_rate": 1.8461538461538465e-05,
+ "loss": 0.0397,
+ "step": 1975
+ },
+ {
+ "epoch": 7.238095238095238,
+ "grad_norm": 8.892921447753906,
+ "learning_rate": 1.8437118437118436e-05,
+ "loss": 0.0255,
+ "step": 1976
+ },
+ {
+ "epoch": 7.241758241758242,
+ "grad_norm": 36.48339080810547,
+ "learning_rate": 1.841269841269841e-05,
+ "loss": 0.3125,
+ "step": 1977
+ },
+ {
+ "epoch": 7.245421245421245,
+ "grad_norm": 48.35296630859375,
+ "learning_rate": 1.838827838827839e-05,
+ "loss": 0.4951,
+ "step": 1978
+ },
+ {
+ "epoch": 7.249084249084249,
+ "grad_norm": 31.021989822387695,
+ "learning_rate": 1.8363858363858365e-05,
+ "loss": 0.2124,
+ "step": 1979
+ },
+ {
+ "epoch": 7.252747252747253,
+ "grad_norm": 32.49650955200195,
+ "learning_rate": 1.833943833943834e-05,
+ "loss": 0.309,
+ "step": 1980
+ },
+ {
+ "epoch": 7.256410256410256,
+ "grad_norm": 43.47561264038086,
+ "learning_rate": 1.831501831501832e-05,
+ "loss": 0.3206,
+ "step": 1981
+ },
+ {
+ "epoch": 7.26007326007326,
+ "grad_norm": 14.67831802368164,
+ "learning_rate": 1.829059829059829e-05,
+ "loss": 0.0806,
+ "step": 1982
+ },
+ {
+ "epoch": 7.263736263736264,
+ "grad_norm": 23.66496467590332,
+ "learning_rate": 1.8266178266178265e-05,
+ "loss": 0.1769,
+ "step": 1983
+ },
+ {
+ "epoch": 7.267399267399267,
+ "grad_norm": 1.8125004768371582,
+ "learning_rate": 1.824175824175824e-05,
+ "loss": 0.0111,
+ "step": 1984
+ },
+ {
+ "epoch": 7.271062271062271,
+ "grad_norm": 1.3189254999160767,
+ "learning_rate": 1.821733821733822e-05,
+ "loss": 0.0056,
+ "step": 1985
+ },
+ {
+ "epoch": 7.274725274725275,
+ "grad_norm": 47.977203369140625,
+ "learning_rate": 1.8192918192918194e-05,
+ "loss": 0.3898,
+ "step": 1986
+ },
+ {
+ "epoch": 7.278388278388278,
+ "grad_norm": 39.66654968261719,
+ "learning_rate": 1.816849816849817e-05,
+ "loss": 0.4953,
+ "step": 1987
+ },
+ {
+ "epoch": 7.282051282051282,
+ "grad_norm": 24.90619659423828,
+ "learning_rate": 1.8144078144078147e-05,
+ "loss": 0.116,
+ "step": 1988
+ },
+ {
+ "epoch": 7.285714285714286,
+ "grad_norm": 4.373020648956299,
+ "learning_rate": 1.811965811965812e-05,
+ "loss": 0.0263,
+ "step": 1989
+ },
+ {
+ "epoch": 7.289377289377289,
+ "grad_norm": 24.788022994995117,
+ "learning_rate": 1.8095238095238094e-05,
+ "loss": 0.2322,
+ "step": 1990
+ },
+ {
+ "epoch": 7.293040293040293,
+ "grad_norm": 6.417362213134766,
+ "learning_rate": 1.8070818070818072e-05,
+ "loss": 0.0243,
+ "step": 1991
+ },
+ {
+ "epoch": 7.2967032967032965,
+ "grad_norm": 34.0954475402832,
+ "learning_rate": 1.8046398046398047e-05,
+ "loss": 0.6666,
+ "step": 1992
+ },
+ {
+ "epoch": 7.3003663003663,
+ "grad_norm": 5.597110748291016,
+ "learning_rate": 1.8021978021978023e-05,
+ "loss": 0.0389,
+ "step": 1993
+ },
+ {
+ "epoch": 7.304029304029304,
+ "grad_norm": 70.55953979492188,
+ "learning_rate": 1.7997557997558e-05,
+ "loss": 0.7335,
+ "step": 1994
+ },
+ {
+ "epoch": 7.3076923076923075,
+ "grad_norm": 17.913522720336914,
+ "learning_rate": 1.7973137973137973e-05,
+ "loss": 0.2307,
+ "step": 1995
+ },
+ {
+ "epoch": 7.311355311355311,
+ "grad_norm": 9.62990665435791,
+ "learning_rate": 1.7948717948717948e-05,
+ "loss": 0.0515,
+ "step": 1996
+ },
+ {
+ "epoch": 7.315018315018315,
+ "grad_norm": 1.333807110786438,
+ "learning_rate": 1.7924297924297923e-05,
+ "loss": 0.0088,
+ "step": 1997
+ },
+ {
+ "epoch": 7.318681318681318,
+ "grad_norm": 12.604703903198242,
+ "learning_rate": 1.78998778998779e-05,
+ "loss": 0.0802,
+ "step": 1998
+ },
+ {
+ "epoch": 7.322344322344322,
+ "grad_norm": 57.309974670410156,
+ "learning_rate": 1.7875457875457876e-05,
+ "loss": 0.738,
+ "step": 1999
+ },
+ {
+ "epoch": 7.326007326007326,
+ "grad_norm": 12.750027656555176,
+ "learning_rate": 1.785103785103785e-05,
+ "loss": 0.0785,
+ "step": 2000
+ },
+ {
+ "epoch": 7.329670329670329,
+ "grad_norm": 39.28510665893555,
+ "learning_rate": 1.7826617826617826e-05,
+ "loss": 0.4609,
+ "step": 2001
+ },
+ {
+ "epoch": 7.333333333333333,
+ "grad_norm": 19.048255920410156,
+ "learning_rate": 1.78021978021978e-05,
+ "loss": 0.1013,
+ "step": 2002
+ },
+ {
+ "epoch": 7.336996336996337,
+ "grad_norm": 50.47089385986328,
+ "learning_rate": 1.7777777777777777e-05,
+ "loss": 0.714,
+ "step": 2003
+ },
+ {
+ "epoch": 7.34065934065934,
+ "grad_norm": 2.6616337299346924,
+ "learning_rate": 1.7753357753357755e-05,
+ "loss": 0.0183,
+ "step": 2004
+ },
+ {
+ "epoch": 7.344322344322344,
+ "grad_norm": 23.130146026611328,
+ "learning_rate": 1.772893772893773e-05,
+ "loss": 0.2,
+ "step": 2005
+ },
+ {
+ "epoch": 7.347985347985348,
+ "grad_norm": 23.108713150024414,
+ "learning_rate": 1.7704517704517705e-05,
+ "loss": 0.2199,
+ "step": 2006
+ },
+ {
+ "epoch": 7.351648351648351,
+ "grad_norm": 14.890787124633789,
+ "learning_rate": 1.7680097680097684e-05,
+ "loss": 0.0872,
+ "step": 2007
+ },
+ {
+ "epoch": 7.355311355311355,
+ "grad_norm": 41.62394714355469,
+ "learning_rate": 1.7655677655677655e-05,
+ "loss": 0.3795,
+ "step": 2008
+ },
+ {
+ "epoch": 7.358974358974359,
+ "grad_norm": 19.252058029174805,
+ "learning_rate": 1.763125763125763e-05,
+ "loss": 0.0822,
+ "step": 2009
+ },
+ {
+ "epoch": 7.362637362637362,
+ "grad_norm": 23.167705535888672,
+ "learning_rate": 1.7606837606837605e-05,
+ "loss": 0.1602,
+ "step": 2010
+ },
+ {
+ "epoch": 7.366300366300366,
+ "grad_norm": 34.01895523071289,
+ "learning_rate": 1.7582417582417584e-05,
+ "loss": 0.3295,
+ "step": 2011
+ },
+ {
+ "epoch": 7.36996336996337,
+ "grad_norm": 28.834074020385742,
+ "learning_rate": 1.755799755799756e-05,
+ "loss": 0.1371,
+ "step": 2012
+ },
+ {
+ "epoch": 7.373626373626374,
+ "grad_norm": 13.843847274780273,
+ "learning_rate": 1.7533577533577534e-05,
+ "loss": 0.1339,
+ "step": 2013
+ },
+ {
+ "epoch": 7.377289377289378,
+ "grad_norm": 10.192770957946777,
+ "learning_rate": 1.750915750915751e-05,
+ "loss": 0.058,
+ "step": 2014
+ },
+ {
+ "epoch": 7.380952380952381,
+ "grad_norm": 55.51911544799805,
+ "learning_rate": 1.7484737484737484e-05,
+ "loss": 0.4047,
+ "step": 2015
+ },
+ {
+ "epoch": 7.384615384615385,
+ "grad_norm": 2.7761716842651367,
+ "learning_rate": 1.746031746031746e-05,
+ "loss": 0.0232,
+ "step": 2016
+ },
+ {
+ "epoch": 7.388278388278389,
+ "grad_norm": 7.78446626663208,
+ "learning_rate": 1.7435897435897438e-05,
+ "loss": 0.0781,
+ "step": 2017
+ },
+ {
+ "epoch": 7.391941391941392,
+ "grad_norm": 46.8702507019043,
+ "learning_rate": 1.7411477411477413e-05,
+ "loss": 0.8509,
+ "step": 2018
+ },
+ {
+ "epoch": 7.395604395604396,
+ "grad_norm": 32.83955001831055,
+ "learning_rate": 1.7387057387057388e-05,
+ "loss": 0.2573,
+ "step": 2019
+ },
+ {
+ "epoch": 7.3992673992674,
+ "grad_norm": 40.40720748901367,
+ "learning_rate": 1.7362637362637366e-05,
+ "loss": 0.1884,
+ "step": 2020
+ },
+ {
+ "epoch": 7.402930402930403,
+ "grad_norm": 19.889108657836914,
+ "learning_rate": 1.7338217338217338e-05,
+ "loss": 0.0401,
+ "step": 2021
+ },
+ {
+ "epoch": 7.406593406593407,
+ "grad_norm": 23.082000732421875,
+ "learning_rate": 1.7313797313797313e-05,
+ "loss": 0.3323,
+ "step": 2022
+ },
+ {
+ "epoch": 7.410256410256411,
+ "grad_norm": 44.391605377197266,
+ "learning_rate": 1.7289377289377288e-05,
+ "loss": 0.4417,
+ "step": 2023
+ },
+ {
+ "epoch": 7.413919413919414,
+ "grad_norm": 2.9148988723754883,
+ "learning_rate": 1.7264957264957267e-05,
+ "loss": 0.0104,
+ "step": 2024
+ },
+ {
+ "epoch": 7.417582417582418,
+ "grad_norm": 39.043304443359375,
+ "learning_rate": 1.724053724053724e-05,
+ "loss": 0.2819,
+ "step": 2025
+ },
+ {
+ "epoch": 7.4212454212454215,
+ "grad_norm": 47.23966598510742,
+ "learning_rate": 1.7216117216117217e-05,
+ "loss": 0.3823,
+ "step": 2026
+ },
+ {
+ "epoch": 7.424908424908425,
+ "grad_norm": 31.07651710510254,
+ "learning_rate": 1.7191697191697192e-05,
+ "loss": 0.1564,
+ "step": 2027
+ },
+ {
+ "epoch": 7.428571428571429,
+ "grad_norm": 2.0451018810272217,
+ "learning_rate": 1.7167277167277167e-05,
+ "loss": 0.0091,
+ "step": 2028
+ },
+ {
+ "epoch": 7.4322344322344325,
+ "grad_norm": 43.10199737548828,
+ "learning_rate": 1.7142857142857142e-05,
+ "loss": 0.2758,
+ "step": 2029
+ },
+ {
+ "epoch": 7.435897435897436,
+ "grad_norm": 9.677335739135742,
+ "learning_rate": 1.711843711843712e-05,
+ "loss": 0.0309,
+ "step": 2030
+ },
+ {
+ "epoch": 7.43956043956044,
+ "grad_norm": 21.8636474609375,
+ "learning_rate": 1.7094017094017095e-05,
+ "loss": 0.141,
+ "step": 2031
+ },
+ {
+ "epoch": 7.443223443223443,
+ "grad_norm": 0.3610914349555969,
+ "learning_rate": 1.706959706959707e-05,
+ "loss": 0.0022,
+ "step": 2032
+ },
+ {
+ "epoch": 7.446886446886447,
+ "grad_norm": 1.5513430833816528,
+ "learning_rate": 1.704517704517705e-05,
+ "loss": 0.0059,
+ "step": 2033
+ },
+ {
+ "epoch": 7.450549450549451,
+ "grad_norm": 0.36708980798721313,
+ "learning_rate": 1.702075702075702e-05,
+ "loss": 0.0018,
+ "step": 2034
+ },
+ {
+ "epoch": 7.454212454212454,
+ "grad_norm": 6.103841781616211,
+ "learning_rate": 1.6996336996336996e-05,
+ "loss": 0.0325,
+ "step": 2035
+ },
+ {
+ "epoch": 7.457875457875458,
+ "grad_norm": 26.09792709350586,
+ "learning_rate": 1.697191697191697e-05,
+ "loss": 0.0481,
+ "step": 2036
+ },
+ {
+ "epoch": 7.461538461538462,
+ "grad_norm": 19.57491111755371,
+ "learning_rate": 1.694749694749695e-05,
+ "loss": 0.0981,
+ "step": 2037
+ },
+ {
+ "epoch": 7.465201465201465,
+ "grad_norm": 6.412461280822754,
+ "learning_rate": 1.6923076923076924e-05,
+ "loss": 0.04,
+ "step": 2038
+ },
+ {
+ "epoch": 7.468864468864469,
+ "grad_norm": 0.46989959478378296,
+ "learning_rate": 1.68986568986569e-05,
+ "loss": 0.002,
+ "step": 2039
+ },
+ {
+ "epoch": 7.472527472527473,
+ "grad_norm": 5.42742919921875,
+ "learning_rate": 1.6874236874236874e-05,
+ "loss": 0.0245,
+ "step": 2040
+ },
+ {
+ "epoch": 7.476190476190476,
+ "grad_norm": 5.105862140655518,
+ "learning_rate": 1.684981684981685e-05,
+ "loss": 0.0277,
+ "step": 2041
+ },
+ {
+ "epoch": 7.47985347985348,
+ "grad_norm": 3.4603350162506104,
+ "learning_rate": 1.6825396825396824e-05,
+ "loss": 0.0093,
+ "step": 2042
+ },
+ {
+ "epoch": 7.483516483516484,
+ "grad_norm": 49.75768280029297,
+ "learning_rate": 1.6800976800976803e-05,
+ "loss": 0.7042,
+ "step": 2043
+ },
+ {
+ "epoch": 7.487179487179487,
+ "grad_norm": 51.32642364501953,
+ "learning_rate": 1.6776556776556778e-05,
+ "loss": 0.9967,
+ "step": 2044
+ },
+ {
+ "epoch": 7.490842490842491,
+ "grad_norm": 3.8675732612609863,
+ "learning_rate": 1.6752136752136753e-05,
+ "loss": 0.0153,
+ "step": 2045
+ },
+ {
+ "epoch": 7.4945054945054945,
+ "grad_norm": 36.375526428222656,
+ "learning_rate": 1.6727716727716728e-05,
+ "loss": 0.2771,
+ "step": 2046
+ },
+ {
+ "epoch": 7.498168498168498,
+ "grad_norm": 2.354778528213501,
+ "learning_rate": 1.6703296703296703e-05,
+ "loss": 0.0137,
+ "step": 2047
+ },
+ {
+ "epoch": 7.501831501831502,
+ "grad_norm": 46.09824752807617,
+ "learning_rate": 1.6678876678876678e-05,
+ "loss": 0.3772,
+ "step": 2048
+ },
+ {
+ "epoch": 7.5054945054945055,
+ "grad_norm": 42.83018112182617,
+ "learning_rate": 1.6654456654456653e-05,
+ "loss": 0.2655,
+ "step": 2049
+ },
+ {
+ "epoch": 7.509157509157509,
+ "grad_norm": 34.598880767822266,
+ "learning_rate": 1.6630036630036632e-05,
+ "loss": 0.235,
+ "step": 2050
+ },
+ {
+ "epoch": 7.512820512820513,
+ "grad_norm": 2.5649797916412354,
+ "learning_rate": 1.6605616605616607e-05,
+ "loss": 0.0157,
+ "step": 2051
+ },
+ {
+ "epoch": 7.516483516483516,
+ "grad_norm": 15.715023040771484,
+ "learning_rate": 1.6581196581196582e-05,
+ "loss": 0.0861,
+ "step": 2052
+ },
+ {
+ "epoch": 7.52014652014652,
+ "grad_norm": 17.451343536376953,
+ "learning_rate": 1.6556776556776557e-05,
+ "loss": 0.1199,
+ "step": 2053
+ },
+ {
+ "epoch": 7.523809523809524,
+ "grad_norm": 29.217243194580078,
+ "learning_rate": 1.6532356532356532e-05,
+ "loss": 0.3191,
+ "step": 2054
+ },
+ {
+ "epoch": 7.527472527472527,
+ "grad_norm": 5.1904683113098145,
+ "learning_rate": 1.6507936507936507e-05,
+ "loss": 0.0131,
+ "step": 2055
+ },
+ {
+ "epoch": 7.531135531135531,
+ "grad_norm": 4.807910919189453,
+ "learning_rate": 1.6483516483516486e-05,
+ "loss": 0.0225,
+ "step": 2056
+ },
+ {
+ "epoch": 7.534798534798535,
+ "grad_norm": 3.9078361988067627,
+ "learning_rate": 1.645909645909646e-05,
+ "loss": 0.014,
+ "step": 2057
+ },
+ {
+ "epoch": 7.538461538461538,
+ "grad_norm": 32.45369338989258,
+ "learning_rate": 1.6434676434676436e-05,
+ "loss": 0.2331,
+ "step": 2058
+ },
+ {
+ "epoch": 7.542124542124542,
+ "grad_norm": 9.129495620727539,
+ "learning_rate": 1.641025641025641e-05,
+ "loss": 0.0328,
+ "step": 2059
+ },
+ {
+ "epoch": 7.545787545787546,
+ "grad_norm": 1.4577407836914062,
+ "learning_rate": 1.6385836385836386e-05,
+ "loss": 0.0062,
+ "step": 2060
+ },
+ {
+ "epoch": 7.549450549450549,
+ "grad_norm": 15.017457008361816,
+ "learning_rate": 1.636141636141636e-05,
+ "loss": 0.0611,
+ "step": 2061
+ },
+ {
+ "epoch": 7.553113553113553,
+ "grad_norm": 39.598941802978516,
+ "learning_rate": 1.6336996336996336e-05,
+ "loss": 0.2892,
+ "step": 2062
+ },
+ {
+ "epoch": 7.556776556776557,
+ "grad_norm": 67.49568176269531,
+ "learning_rate": 1.6312576312576314e-05,
+ "loss": 0.66,
+ "step": 2063
+ },
+ {
+ "epoch": 7.56043956043956,
+ "grad_norm": 32.164634704589844,
+ "learning_rate": 1.628815628815629e-05,
+ "loss": 0.2308,
+ "step": 2064
+ },
+ {
+ "epoch": 7.564102564102564,
+ "grad_norm": 2.058502197265625,
+ "learning_rate": 1.6263736263736265e-05,
+ "loss": 0.0089,
+ "step": 2065
+ },
+ {
+ "epoch": 7.5677655677655675,
+ "grad_norm": 46.27522659301758,
+ "learning_rate": 1.623931623931624e-05,
+ "loss": 0.259,
+ "step": 2066
+ },
+ {
+ "epoch": 7.571428571428571,
+ "grad_norm": 54.9110221862793,
+ "learning_rate": 1.6214896214896215e-05,
+ "loss": 0.3899,
+ "step": 2067
+ },
+ {
+ "epoch": 7.575091575091575,
+ "grad_norm": 9.964278221130371,
+ "learning_rate": 1.619047619047619e-05,
+ "loss": 0.026,
+ "step": 2068
+ },
+ {
+ "epoch": 7.5787545787545785,
+ "grad_norm": 5.512078762054443,
+ "learning_rate": 1.6166056166056168e-05,
+ "loss": 0.0187,
+ "step": 2069
+ },
+ {
+ "epoch": 7.582417582417582,
+ "grad_norm": 38.90432357788086,
+ "learning_rate": 1.6141636141636143e-05,
+ "loss": 0.0728,
+ "step": 2070
+ },
+ {
+ "epoch": 7.586080586080586,
+ "grad_norm": 11.633467674255371,
+ "learning_rate": 1.6117216117216118e-05,
+ "loss": 0.0383,
+ "step": 2071
+ },
+ {
+ "epoch": 7.589743589743589,
+ "grad_norm": 8.595443725585938,
+ "learning_rate": 1.609279609279609e-05,
+ "loss": 0.0313,
+ "step": 2072
+ },
+ {
+ "epoch": 7.593406593406593,
+ "grad_norm": 2.8875672817230225,
+ "learning_rate": 1.606837606837607e-05,
+ "loss": 0.0102,
+ "step": 2073
+ },
+ {
+ "epoch": 7.597069597069597,
+ "grad_norm": 42.968170166015625,
+ "learning_rate": 1.6043956043956043e-05,
+ "loss": 0.1532,
+ "step": 2074
+ },
+ {
+ "epoch": 7.6007326007326,
+ "grad_norm": 1.500887393951416,
+ "learning_rate": 1.601953601953602e-05,
+ "loss": 0.0047,
+ "step": 2075
+ },
+ {
+ "epoch": 7.604395604395604,
+ "grad_norm": 1.2472022771835327,
+ "learning_rate": 1.5995115995115997e-05,
+ "loss": 0.004,
+ "step": 2076
+ },
+ {
+ "epoch": 7.608058608058608,
+ "grad_norm": 1.0480316877365112,
+ "learning_rate": 1.5970695970695972e-05,
+ "loss": 0.0045,
+ "step": 2077
+ },
+ {
+ "epoch": 7.611721611721611,
+ "grad_norm": 37.37439727783203,
+ "learning_rate": 1.5946275946275947e-05,
+ "loss": 0.2399,
+ "step": 2078
+ },
+ {
+ "epoch": 7.615384615384615,
+ "grad_norm": 0.7918564677238464,
+ "learning_rate": 1.5921855921855922e-05,
+ "loss": 0.0032,
+ "step": 2079
+ },
+ {
+ "epoch": 7.619047619047619,
+ "grad_norm": 6.207716941833496,
+ "learning_rate": 1.5897435897435897e-05,
+ "loss": 0.0214,
+ "step": 2080
+ },
+ {
+ "epoch": 7.622710622710622,
+ "grad_norm": 29.516454696655273,
+ "learning_rate": 1.5873015873015872e-05,
+ "loss": 0.3501,
+ "step": 2081
+ },
+ {
+ "epoch": 7.626373626373626,
+ "grad_norm": 2.8200786113739014,
+ "learning_rate": 1.584859584859585e-05,
+ "loss": 0.0057,
+ "step": 2082
+ },
+ {
+ "epoch": 7.63003663003663,
+ "grad_norm": 14.830533981323242,
+ "learning_rate": 1.5824175824175826e-05,
+ "loss": 0.0321,
+ "step": 2083
+ },
+ {
+ "epoch": 7.633699633699633,
+ "grad_norm": 34.72395706176758,
+ "learning_rate": 1.57997557997558e-05,
+ "loss": 0.1672,
+ "step": 2084
+ },
+ {
+ "epoch": 7.637362637362637,
+ "grad_norm": 75.48332214355469,
+ "learning_rate": 1.5775335775335773e-05,
+ "loss": 0.44,
+ "step": 2085
+ },
+ {
+ "epoch": 7.641025641025641,
+ "grad_norm": 0.4638623297214508,
+ "learning_rate": 1.575091575091575e-05,
+ "loss": 0.0025,
+ "step": 2086
+ },
+ {
+ "epoch": 7.644688644688645,
+ "grad_norm": 47.44121170043945,
+ "learning_rate": 1.5726495726495726e-05,
+ "loss": 0.2901,
+ "step": 2087
+ },
+ {
+ "epoch": 7.648351648351649,
+ "grad_norm": 31.14560317993164,
+ "learning_rate": 1.57020757020757e-05,
+ "loss": 0.1881,
+ "step": 2088
+ },
+ {
+ "epoch": 7.652014652014652,
+ "grad_norm": 14.16900634765625,
+ "learning_rate": 1.567765567765568e-05,
+ "loss": 0.0609,
+ "step": 2089
+ },
+ {
+ "epoch": 7.655677655677656,
+ "grad_norm": 248.16372680664062,
+ "learning_rate": 1.5653235653235655e-05,
+ "loss": 0.14,
+ "step": 2090
+ },
+ {
+ "epoch": 7.65934065934066,
+ "grad_norm": 78.32206726074219,
+ "learning_rate": 1.562881562881563e-05,
+ "loss": 1.3854,
+ "step": 2091
+ },
+ {
+ "epoch": 7.663003663003663,
+ "grad_norm": 2.940131664276123,
+ "learning_rate": 1.5604395604395605e-05,
+ "loss": 0.0101,
+ "step": 2092
+ },
+ {
+ "epoch": 7.666666666666667,
+ "grad_norm": 4.624741077423096,
+ "learning_rate": 1.557997557997558e-05,
+ "loss": 0.0168,
+ "step": 2093
+ },
+ {
+ "epoch": 7.670329670329671,
+ "grad_norm": 42.516990661621094,
+ "learning_rate": 1.5555555555555555e-05,
+ "loss": 0.2529,
+ "step": 2094
+ },
+ {
+ "epoch": 7.673992673992674,
+ "grad_norm": 24.555633544921875,
+ "learning_rate": 1.5531135531135533e-05,
+ "loss": 0.1367,
+ "step": 2095
+ },
+ {
+ "epoch": 7.677655677655678,
+ "grad_norm": 35.021644592285156,
+ "learning_rate": 1.550671550671551e-05,
+ "loss": 0.2322,
+ "step": 2096
+ },
+ {
+ "epoch": 7.681318681318682,
+ "grad_norm": 0.8293462991714478,
+ "learning_rate": 1.5482295482295484e-05,
+ "loss": 0.0038,
+ "step": 2097
+ },
+ {
+ "epoch": 7.684981684981685,
+ "grad_norm": 25.26691436767578,
+ "learning_rate": 1.5457875457875455e-05,
+ "loss": 0.1326,
+ "step": 2098
+ },
+ {
+ "epoch": 7.688644688644689,
+ "grad_norm": 46.36896514892578,
+ "learning_rate": 1.5433455433455434e-05,
+ "loss": 0.661,
+ "step": 2099
+ },
+ {
+ "epoch": 7.6923076923076925,
+ "grad_norm": 23.875978469848633,
+ "learning_rate": 1.540903540903541e-05,
+ "loss": 0.1815,
+ "step": 2100
+ },
+ {
+ "epoch": 7.695970695970696,
+ "grad_norm": 14.46264362335205,
+ "learning_rate": 1.5384615384615384e-05,
+ "loss": 0.1048,
+ "step": 2101
+ },
+ {
+ "epoch": 7.6996336996337,
+ "grad_norm": 15.445000648498535,
+ "learning_rate": 1.5360195360195362e-05,
+ "loss": 0.0455,
+ "step": 2102
+ },
+ {
+ "epoch": 7.7032967032967035,
+ "grad_norm": 0.21127165853977203,
+ "learning_rate": 1.5335775335775337e-05,
+ "loss": 0.0006,
+ "step": 2103
+ },
+ {
+ "epoch": 7.706959706959707,
+ "grad_norm": 11.099639892578125,
+ "learning_rate": 1.531135531135531e-05,
+ "loss": 0.0598,
+ "step": 2104
+ },
+ {
+ "epoch": 7.710622710622711,
+ "grad_norm": 5.1992950439453125,
+ "learning_rate": 1.5286935286935287e-05,
+ "loss": 0.0204,
+ "step": 2105
+ },
+ {
+ "epoch": 7.714285714285714,
+ "grad_norm": 4.170431613922119,
+ "learning_rate": 1.5262515262515263e-05,
+ "loss": 0.0202,
+ "step": 2106
+ },
+ {
+ "epoch": 7.717948717948718,
+ "grad_norm": 35.86619567871094,
+ "learning_rate": 1.5238095238095238e-05,
+ "loss": 0.2789,
+ "step": 2107
+ },
+ {
+ "epoch": 7.721611721611722,
+ "grad_norm": 39.799415588378906,
+ "learning_rate": 1.5213675213675216e-05,
+ "loss": 0.251,
+ "step": 2108
+ },
+ {
+ "epoch": 7.725274725274725,
+ "grad_norm": 25.5378475189209,
+ "learning_rate": 1.518925518925519e-05,
+ "loss": 0.1284,
+ "step": 2109
+ },
+ {
+ "epoch": 7.728937728937729,
+ "grad_norm": 2.4359946250915527,
+ "learning_rate": 1.5164835164835164e-05,
+ "loss": 0.0127,
+ "step": 2110
+ },
+ {
+ "epoch": 7.732600732600733,
+ "grad_norm": 12.041257858276367,
+ "learning_rate": 1.514041514041514e-05,
+ "loss": 0.027,
+ "step": 2111
+ },
+ {
+ "epoch": 7.736263736263736,
+ "grad_norm": 34.67470169067383,
+ "learning_rate": 1.5115995115995116e-05,
+ "loss": 0.2489,
+ "step": 2112
+ },
+ {
+ "epoch": 7.73992673992674,
+ "grad_norm": 2.041276693344116,
+ "learning_rate": 1.5091575091575091e-05,
+ "loss": 0.0071,
+ "step": 2113
+ },
+ {
+ "epoch": 7.743589743589744,
+ "grad_norm": 0.2618583142757416,
+ "learning_rate": 1.5067155067155066e-05,
+ "loss": 0.0015,
+ "step": 2114
+ },
+ {
+ "epoch": 7.747252747252747,
+ "grad_norm": 29.656461715698242,
+ "learning_rate": 1.5042735042735045e-05,
+ "loss": 0.1325,
+ "step": 2115
+ },
+ {
+ "epoch": 7.750915750915751,
+ "grad_norm": 33.18010330200195,
+ "learning_rate": 1.5018315018315018e-05,
+ "loss": 0.2356,
+ "step": 2116
+ },
+ {
+ "epoch": 7.754578754578755,
+ "grad_norm": 17.884321212768555,
+ "learning_rate": 1.4993894993894995e-05,
+ "loss": 0.0953,
+ "step": 2117
+ },
+ {
+ "epoch": 7.758241758241758,
+ "grad_norm": 9.597829818725586,
+ "learning_rate": 1.496947496947497e-05,
+ "loss": 0.051,
+ "step": 2118
+ },
+ {
+ "epoch": 7.761904761904762,
+ "grad_norm": 32.64970397949219,
+ "learning_rate": 1.4945054945054945e-05,
+ "loss": 0.2014,
+ "step": 2119
+ },
+ {
+ "epoch": 7.7655677655677655,
+ "grad_norm": 9.97050666809082,
+ "learning_rate": 1.492063492063492e-05,
+ "loss": 0.0659,
+ "step": 2120
+ },
+ {
+ "epoch": 7.769230769230769,
+ "grad_norm": 27.019380569458008,
+ "learning_rate": 1.4896214896214897e-05,
+ "loss": 0.155,
+ "step": 2121
+ },
+ {
+ "epoch": 7.772893772893773,
+ "grad_norm": 21.946569442749023,
+ "learning_rate": 1.4871794871794872e-05,
+ "loss": 0.0974,
+ "step": 2122
+ },
+ {
+ "epoch": 7.7765567765567765,
+ "grad_norm": 17.21709442138672,
+ "learning_rate": 1.4847374847374847e-05,
+ "loss": 0.0764,
+ "step": 2123
+ },
+ {
+ "epoch": 7.78021978021978,
+ "grad_norm": 25.19805335998535,
+ "learning_rate": 1.4822954822954824e-05,
+ "loss": 0.2383,
+ "step": 2124
+ },
+ {
+ "epoch": 7.783882783882784,
+ "grad_norm": 21.493112564086914,
+ "learning_rate": 1.4798534798534799e-05,
+ "loss": 0.1307,
+ "step": 2125
+ },
+ {
+ "epoch": 7.787545787545787,
+ "grad_norm": 7.874645233154297,
+ "learning_rate": 1.4774114774114774e-05,
+ "loss": 0.0303,
+ "step": 2126
+ },
+ {
+ "epoch": 7.791208791208791,
+ "grad_norm": 25.664508819580078,
+ "learning_rate": 1.474969474969475e-05,
+ "loss": 0.1054,
+ "step": 2127
+ },
+ {
+ "epoch": 7.794871794871795,
+ "grad_norm": 45.79121398925781,
+ "learning_rate": 1.4725274725274726e-05,
+ "loss": 0.4923,
+ "step": 2128
+ },
+ {
+ "epoch": 7.798534798534798,
+ "grad_norm": 7.350006580352783,
+ "learning_rate": 1.4700854700854701e-05,
+ "loss": 0.0278,
+ "step": 2129
+ },
+ {
+ "epoch": 7.802197802197802,
+ "grad_norm": 3.626199245452881,
+ "learning_rate": 1.4676434676434678e-05,
+ "loss": 0.0184,
+ "step": 2130
+ },
+ {
+ "epoch": 7.805860805860806,
+ "grad_norm": 46.03739547729492,
+ "learning_rate": 1.4652014652014653e-05,
+ "loss": 0.195,
+ "step": 2131
+ },
+ {
+ "epoch": 7.809523809523809,
+ "grad_norm": 1.0704383850097656,
+ "learning_rate": 1.4627594627594628e-05,
+ "loss": 0.0039,
+ "step": 2132
+ },
+ {
+ "epoch": 7.813186813186813,
+ "grad_norm": 46.02214813232422,
+ "learning_rate": 1.4603174603174603e-05,
+ "loss": 0.2533,
+ "step": 2133
+ },
+ {
+ "epoch": 7.816849816849817,
+ "grad_norm": 2.2334794998168945,
+ "learning_rate": 1.457875457875458e-05,
+ "loss": 0.0087,
+ "step": 2134
+ },
+ {
+ "epoch": 7.82051282051282,
+ "grad_norm": 2.7543773651123047,
+ "learning_rate": 1.4554334554334555e-05,
+ "loss": 0.0094,
+ "step": 2135
+ },
+ {
+ "epoch": 7.824175824175824,
+ "grad_norm": 44.24272918701172,
+ "learning_rate": 1.452991452991453e-05,
+ "loss": 0.4251,
+ "step": 2136
+ },
+ {
+ "epoch": 7.827838827838828,
+ "grad_norm": 48.497154235839844,
+ "learning_rate": 1.4505494505494506e-05,
+ "loss": 0.2763,
+ "step": 2137
+ },
+ {
+ "epoch": 7.831501831501831,
+ "grad_norm": 38.73664093017578,
+ "learning_rate": 1.4481074481074482e-05,
+ "loss": 0.6482,
+ "step": 2138
+ },
+ {
+ "epoch": 7.835164835164835,
+ "grad_norm": 2.15800142288208,
+ "learning_rate": 1.4456654456654457e-05,
+ "loss": 0.0085,
+ "step": 2139
+ },
+ {
+ "epoch": 7.8388278388278385,
+ "grad_norm": 7.289889812469482,
+ "learning_rate": 1.4432234432234433e-05,
+ "loss": 0.041,
+ "step": 2140
+ },
+ {
+ "epoch": 7.842490842490842,
+ "grad_norm": 39.962310791015625,
+ "learning_rate": 1.4407814407814407e-05,
+ "loss": 0.3403,
+ "step": 2141
+ },
+ {
+ "epoch": 7.846153846153846,
+ "grad_norm": 23.029020309448242,
+ "learning_rate": 1.4383394383394383e-05,
+ "loss": 0.1281,
+ "step": 2142
+ },
+ {
+ "epoch": 7.8498168498168495,
+ "grad_norm": 7.111436367034912,
+ "learning_rate": 1.435897435897436e-05,
+ "loss": 0.0416,
+ "step": 2143
+ },
+ {
+ "epoch": 7.853479853479853,
+ "grad_norm": 7.24738073348999,
+ "learning_rate": 1.4334554334554335e-05,
+ "loss": 0.0159,
+ "step": 2144
+ },
+ {
+ "epoch": 7.857142857142857,
+ "grad_norm": 74.41973876953125,
+ "learning_rate": 1.431013431013431e-05,
+ "loss": 0.4156,
+ "step": 2145
+ },
+ {
+ "epoch": 7.860805860805861,
+ "grad_norm": 1.8928090333938599,
+ "learning_rate": 1.4285714285714285e-05,
+ "loss": 0.0088,
+ "step": 2146
+ },
+ {
+ "epoch": 7.864468864468865,
+ "grad_norm": 74.72843170166016,
+ "learning_rate": 1.4261294261294262e-05,
+ "loss": 0.7266,
+ "step": 2147
+ },
+ {
+ "epoch": 7.868131868131869,
+ "grad_norm": 3.2044010162353516,
+ "learning_rate": 1.4236874236874237e-05,
+ "loss": 0.0134,
+ "step": 2148
+ },
+ {
+ "epoch": 7.871794871794872,
+ "grad_norm": 0.9343626499176025,
+ "learning_rate": 1.4212454212454212e-05,
+ "loss": 0.0028,
+ "step": 2149
+ },
+ {
+ "epoch": 7.875457875457876,
+ "grad_norm": 6.980963230133057,
+ "learning_rate": 1.4188034188034189e-05,
+ "loss": 0.011,
+ "step": 2150
+ },
+ {
+ "epoch": 7.8791208791208796,
+ "grad_norm": 1.1645610332489014,
+ "learning_rate": 1.4163614163614164e-05,
+ "loss": 0.005,
+ "step": 2151
+ },
+ {
+ "epoch": 7.882783882783883,
+ "grad_norm": 2.219325065612793,
+ "learning_rate": 1.413919413919414e-05,
+ "loss": 0.0127,
+ "step": 2152
+ },
+ {
+ "epoch": 7.886446886446887,
+ "grad_norm": 3.3467326164245605,
+ "learning_rate": 1.4114774114774116e-05,
+ "loss": 0.0108,
+ "step": 2153
+ },
+ {
+ "epoch": 7.8901098901098905,
+ "grad_norm": 47.454647064208984,
+ "learning_rate": 1.409035409035409e-05,
+ "loss": 0.4106,
+ "step": 2154
+ },
+ {
+ "epoch": 7.893772893772894,
+ "grad_norm": 26.02679443359375,
+ "learning_rate": 1.4065934065934066e-05,
+ "loss": 0.1038,
+ "step": 2155
+ },
+ {
+ "epoch": 7.897435897435898,
+ "grad_norm": 56.01240921020508,
+ "learning_rate": 1.4041514041514043e-05,
+ "loss": 0.4808,
+ "step": 2156
+ },
+ {
+ "epoch": 7.9010989010989015,
+ "grad_norm": 3.230677843093872,
+ "learning_rate": 1.4017094017094016e-05,
+ "loss": 0.0123,
+ "step": 2157
+ },
+ {
+ "epoch": 7.904761904761905,
+ "grad_norm": 0.07180067896842957,
+ "learning_rate": 1.3992673992673993e-05,
+ "loss": 0.0004,
+ "step": 2158
+ },
+ {
+ "epoch": 7.908424908424909,
+ "grad_norm": 45.324222564697266,
+ "learning_rate": 1.3968253968253968e-05,
+ "loss": 0.1359,
+ "step": 2159
+ },
+ {
+ "epoch": 7.912087912087912,
+ "grad_norm": 10.703695297241211,
+ "learning_rate": 1.3943833943833945e-05,
+ "loss": 0.0608,
+ "step": 2160
+ },
+ {
+ "epoch": 7.915750915750916,
+ "grad_norm": 18.48207664489746,
+ "learning_rate": 1.391941391941392e-05,
+ "loss": 0.0425,
+ "step": 2161
+ },
+ {
+ "epoch": 7.91941391941392,
+ "grad_norm": 26.093645095825195,
+ "learning_rate": 1.3894993894993895e-05,
+ "loss": 0.1672,
+ "step": 2162
+ },
+ {
+ "epoch": 7.923076923076923,
+ "grad_norm": 41.90341567993164,
+ "learning_rate": 1.3870573870573872e-05,
+ "loss": 0.2344,
+ "step": 2163
+ },
+ {
+ "epoch": 7.926739926739927,
+ "grad_norm": 47.571990966796875,
+ "learning_rate": 1.3846153846153847e-05,
+ "loss": 0.2271,
+ "step": 2164
+ },
+ {
+ "epoch": 7.930402930402931,
+ "grad_norm": 4.187535762786865,
+ "learning_rate": 1.3821733821733822e-05,
+ "loss": 0.0153,
+ "step": 2165
+ },
+ {
+ "epoch": 7.934065934065934,
+ "grad_norm": 2.7795937061309814,
+ "learning_rate": 1.3797313797313799e-05,
+ "loss": 0.0134,
+ "step": 2166
+ },
+ {
+ "epoch": 7.937728937728938,
+ "grad_norm": 41.12346267700195,
+ "learning_rate": 1.3772893772893772e-05,
+ "loss": 0.2219,
+ "step": 2167
+ },
+ {
+ "epoch": 7.941391941391942,
+ "grad_norm": 35.827301025390625,
+ "learning_rate": 1.3748473748473749e-05,
+ "loss": 0.2477,
+ "step": 2168
+ },
+ {
+ "epoch": 7.945054945054945,
+ "grad_norm": 44.316322326660156,
+ "learning_rate": 1.3724053724053725e-05,
+ "loss": 0.0963,
+ "step": 2169
+ },
+ {
+ "epoch": 7.948717948717949,
+ "grad_norm": 23.085559844970703,
+ "learning_rate": 1.3699633699633699e-05,
+ "loss": 0.1346,
+ "step": 2170
+ },
+ {
+ "epoch": 7.9523809523809526,
+ "grad_norm": 31.379549026489258,
+ "learning_rate": 1.3675213675213676e-05,
+ "loss": 0.1232,
+ "step": 2171
+ },
+ {
+ "epoch": 7.956043956043956,
+ "grad_norm": 14.274428367614746,
+ "learning_rate": 1.365079365079365e-05,
+ "loss": 0.0883,
+ "step": 2172
+ },
+ {
+ "epoch": 7.95970695970696,
+ "grad_norm": 77.79078674316406,
+ "learning_rate": 1.3626373626373627e-05,
+ "loss": 0.5814,
+ "step": 2173
+ },
+ {
+ "epoch": 7.9633699633699635,
+ "grad_norm": 16.881986618041992,
+ "learning_rate": 1.3601953601953602e-05,
+ "loss": 0.1247,
+ "step": 2174
+ },
+ {
+ "epoch": 7.967032967032967,
+ "grad_norm": 32.1965217590332,
+ "learning_rate": 1.3577533577533578e-05,
+ "loss": 0.208,
+ "step": 2175
+ },
+ {
+ "epoch": 7.970695970695971,
+ "grad_norm": 4.283143043518066,
+ "learning_rate": 1.3553113553113554e-05,
+ "loss": 0.0182,
+ "step": 2176
+ },
+ {
+ "epoch": 7.9743589743589745,
+ "grad_norm": 51.64984130859375,
+ "learning_rate": 1.352869352869353e-05,
+ "loss": 0.6282,
+ "step": 2177
+ },
+ {
+ "epoch": 7.978021978021978,
+ "grad_norm": 25.30405616760254,
+ "learning_rate": 1.3504273504273504e-05,
+ "loss": 0.1721,
+ "step": 2178
+ },
+ {
+ "epoch": 7.981684981684982,
+ "grad_norm": 35.99342346191406,
+ "learning_rate": 1.3479853479853481e-05,
+ "loss": 0.2065,
+ "step": 2179
+ },
+ {
+ "epoch": 7.985347985347985,
+ "grad_norm": 1.2389482259750366,
+ "learning_rate": 1.3455433455433455e-05,
+ "loss": 0.0046,
+ "step": 2180
+ },
+ {
+ "epoch": 7.989010989010989,
+ "grad_norm": 40.435752868652344,
+ "learning_rate": 1.3431013431013431e-05,
+ "loss": 0.2944,
+ "step": 2181
+ },
+ {
+ "epoch": 7.992673992673993,
+ "grad_norm": 20.92979621887207,
+ "learning_rate": 1.3406593406593408e-05,
+ "loss": 0.0686,
+ "step": 2182
+ },
+ {
+ "epoch": 7.996336996336996,
+ "grad_norm": 12.692971229553223,
+ "learning_rate": 1.3382173382173381e-05,
+ "loss": 0.0737,
+ "step": 2183
+ },
+ {
+ "epoch": 8.0,
+ "grad_norm": 15.363990783691406,
+ "learning_rate": 1.3357753357753358e-05,
+ "loss": 0.0755,
+ "step": 2184
+ },
+ {
+ "epoch": 8.003663003663004,
+ "grad_norm": 39.101654052734375,
+ "learning_rate": 1.3333333333333333e-05,
+ "loss": 0.4135,
+ "step": 2185
+ },
+ {
+ "epoch": 8.007326007326007,
+ "grad_norm": 23.566104888916016,
+ "learning_rate": 1.3308913308913308e-05,
+ "loss": 0.2477,
+ "step": 2186
+ },
+ {
+ "epoch": 8.010989010989011,
+ "grad_norm": 23.69949722290039,
+ "learning_rate": 1.3284493284493285e-05,
+ "loss": 0.0747,
+ "step": 2187
+ },
+ {
+ "epoch": 8.014652014652015,
+ "grad_norm": 40.992549896240234,
+ "learning_rate": 1.326007326007326e-05,
+ "loss": 0.3049,
+ "step": 2188
+ },
+ {
+ "epoch": 8.018315018315018,
+ "grad_norm": 7.42161226272583,
+ "learning_rate": 1.3235653235653237e-05,
+ "loss": 0.022,
+ "step": 2189
+ },
+ {
+ "epoch": 8.021978021978022,
+ "grad_norm": 39.43696594238281,
+ "learning_rate": 1.3211233211233212e-05,
+ "loss": 0.0781,
+ "step": 2190
+ },
+ {
+ "epoch": 8.025641025641026,
+ "grad_norm": 6.539100646972656,
+ "learning_rate": 1.3186813186813187e-05,
+ "loss": 0.0225,
+ "step": 2191
+ },
+ {
+ "epoch": 8.02930402930403,
+ "grad_norm": 0.2996901273727417,
+ "learning_rate": 1.3162393162393164e-05,
+ "loss": 0.002,
+ "step": 2192
+ },
+ {
+ "epoch": 8.032967032967033,
+ "grad_norm": 9.589286804199219,
+ "learning_rate": 1.3137973137973137e-05,
+ "loss": 0.0293,
+ "step": 2193
+ },
+ {
+ "epoch": 8.036630036630036,
+ "grad_norm": 5.144655227661133,
+ "learning_rate": 1.3113553113553114e-05,
+ "loss": 0.0256,
+ "step": 2194
+ },
+ {
+ "epoch": 8.04029304029304,
+ "grad_norm": 64.5682144165039,
+ "learning_rate": 1.308913308913309e-05,
+ "loss": 0.4062,
+ "step": 2195
+ },
+ {
+ "epoch": 8.043956043956044,
+ "grad_norm": 35.61048126220703,
+ "learning_rate": 1.3064713064713064e-05,
+ "loss": 0.1856,
+ "step": 2196
+ },
+ {
+ "epoch": 8.047619047619047,
+ "grad_norm": 0.3583362400531769,
+ "learning_rate": 1.3040293040293041e-05,
+ "loss": 0.0012,
+ "step": 2197
+ },
+ {
+ "epoch": 8.051282051282051,
+ "grad_norm": 10.168415069580078,
+ "learning_rate": 1.3015873015873016e-05,
+ "loss": 0.0616,
+ "step": 2198
+ },
+ {
+ "epoch": 8.054945054945055,
+ "grad_norm": 28.49810218811035,
+ "learning_rate": 1.2991452991452991e-05,
+ "loss": 0.1137,
+ "step": 2199
+ },
+ {
+ "epoch": 8.058608058608058,
+ "grad_norm": 5.252911567687988,
+ "learning_rate": 1.2967032967032968e-05,
+ "loss": 0.0211,
+ "step": 2200
+ },
+ {
+ "epoch": 8.062271062271062,
+ "grad_norm": 19.91984748840332,
+ "learning_rate": 1.2942612942612943e-05,
+ "loss": 0.0983,
+ "step": 2201
+ },
+ {
+ "epoch": 8.065934065934066,
+ "grad_norm": 10.991836547851562,
+ "learning_rate": 1.2918192918192918e-05,
+ "loss": 0.0398,
+ "step": 2202
+ },
+ {
+ "epoch": 8.06959706959707,
+ "grad_norm": 16.97028923034668,
+ "learning_rate": 1.2893772893772895e-05,
+ "loss": 0.0849,
+ "step": 2203
+ },
+ {
+ "epoch": 8.073260073260073,
+ "grad_norm": 11.924320220947266,
+ "learning_rate": 1.286935286935287e-05,
+ "loss": 0.055,
+ "step": 2204
+ },
+ {
+ "epoch": 8.076923076923077,
+ "grad_norm": 16.613285064697266,
+ "learning_rate": 1.2844932844932846e-05,
+ "loss": 0.0765,
+ "step": 2205
+ },
+ {
+ "epoch": 8.08058608058608,
+ "grad_norm": 0.5711580514907837,
+ "learning_rate": 1.282051282051282e-05,
+ "loss": 0.0015,
+ "step": 2206
+ },
+ {
+ "epoch": 8.084249084249084,
+ "grad_norm": 20.53736114501953,
+ "learning_rate": 1.2796092796092797e-05,
+ "loss": 0.101,
+ "step": 2207
+ },
+ {
+ "epoch": 8.087912087912088,
+ "grad_norm": 44.09571838378906,
+ "learning_rate": 1.2771672771672773e-05,
+ "loss": 0.2872,
+ "step": 2208
+ },
+ {
+ "epoch": 8.091575091575091,
+ "grad_norm": 34.870426177978516,
+ "learning_rate": 1.2747252747252747e-05,
+ "loss": 0.2131,
+ "step": 2209
+ },
+ {
+ "epoch": 8.095238095238095,
+ "grad_norm": 0.5102387070655823,
+ "learning_rate": 1.2722832722832723e-05,
+ "loss": 0.002,
+ "step": 2210
+ },
+ {
+ "epoch": 8.098901098901099,
+ "grad_norm": 46.13880157470703,
+ "learning_rate": 1.2698412698412699e-05,
+ "loss": 0.3227,
+ "step": 2211
+ },
+ {
+ "epoch": 8.102564102564102,
+ "grad_norm": 30.000337600708008,
+ "learning_rate": 1.2673992673992674e-05,
+ "loss": 0.0992,
+ "step": 2212
+ },
+ {
+ "epoch": 8.106227106227106,
+ "grad_norm": 6.741244316101074,
+ "learning_rate": 1.264957264957265e-05,
+ "loss": 0.0265,
+ "step": 2213
+ },
+ {
+ "epoch": 8.10989010989011,
+ "grad_norm": 2.9291558265686035,
+ "learning_rate": 1.2625152625152625e-05,
+ "loss": 0.011,
+ "step": 2214
+ },
+ {
+ "epoch": 8.113553113553113,
+ "grad_norm": 7.66628360748291,
+ "learning_rate": 1.26007326007326e-05,
+ "loss": 0.0379,
+ "step": 2215
+ },
+ {
+ "epoch": 8.117216117216117,
+ "grad_norm": 14.051595687866211,
+ "learning_rate": 1.2576312576312576e-05,
+ "loss": 0.0789,
+ "step": 2216
+ },
+ {
+ "epoch": 8.12087912087912,
+ "grad_norm": 0.47640302777290344,
+ "learning_rate": 1.2551892551892552e-05,
+ "loss": 0.0015,
+ "step": 2217
+ },
+ {
+ "epoch": 8.124542124542124,
+ "grad_norm": 71.04071807861328,
+ "learning_rate": 1.2527472527472529e-05,
+ "loss": 0.4696,
+ "step": 2218
+ },
+ {
+ "epoch": 8.128205128205128,
+ "grad_norm": 85.2886962890625,
+ "learning_rate": 1.2503052503052502e-05,
+ "loss": 0.1978,
+ "step": 2219
+ },
+ {
+ "epoch": 8.131868131868131,
+ "grad_norm": 16.694299697875977,
+ "learning_rate": 1.247863247863248e-05,
+ "loss": 0.1035,
+ "step": 2220
+ },
+ {
+ "epoch": 8.135531135531135,
+ "grad_norm": 38.73305130004883,
+ "learning_rate": 1.2454212454212456e-05,
+ "loss": 0.2607,
+ "step": 2221
+ },
+ {
+ "epoch": 8.139194139194139,
+ "grad_norm": 1.4563415050506592,
+ "learning_rate": 1.242979242979243e-05,
+ "loss": 0.0084,
+ "step": 2222
+ },
+ {
+ "epoch": 8.142857142857142,
+ "grad_norm": 22.82903289794922,
+ "learning_rate": 1.2405372405372406e-05,
+ "loss": 0.0936,
+ "step": 2223
+ },
+ {
+ "epoch": 8.146520146520146,
+ "grad_norm": 0.5167717933654785,
+ "learning_rate": 1.2380952380952381e-05,
+ "loss": 0.0016,
+ "step": 2224
+ },
+ {
+ "epoch": 8.15018315018315,
+ "grad_norm": 13.251680374145508,
+ "learning_rate": 1.2356532356532356e-05,
+ "loss": 0.0454,
+ "step": 2225
+ },
+ {
+ "epoch": 8.153846153846153,
+ "grad_norm": 21.030073165893555,
+ "learning_rate": 1.2332112332112333e-05,
+ "loss": 0.0737,
+ "step": 2226
+ },
+ {
+ "epoch": 8.157509157509157,
+ "grad_norm": 22.005605697631836,
+ "learning_rate": 1.2307692307692308e-05,
+ "loss": 0.1456,
+ "step": 2227
+ },
+ {
+ "epoch": 8.16117216117216,
+ "grad_norm": 2.234006404876709,
+ "learning_rate": 1.2283272283272283e-05,
+ "loss": 0.0088,
+ "step": 2228
+ },
+ {
+ "epoch": 8.164835164835164,
+ "grad_norm": 21.61121368408203,
+ "learning_rate": 1.2258852258852258e-05,
+ "loss": 0.0966,
+ "step": 2229
+ },
+ {
+ "epoch": 8.168498168498168,
+ "grad_norm": 24.948705673217773,
+ "learning_rate": 1.2234432234432235e-05,
+ "loss": 0.1154,
+ "step": 2230
+ },
+ {
+ "epoch": 8.172161172161172,
+ "grad_norm": 25.752145767211914,
+ "learning_rate": 1.221001221001221e-05,
+ "loss": 0.2507,
+ "step": 2231
+ },
+ {
+ "epoch": 8.175824175824175,
+ "grad_norm": 58.091697692871094,
+ "learning_rate": 1.2185592185592185e-05,
+ "loss": 0.5921,
+ "step": 2232
+ },
+ {
+ "epoch": 8.179487179487179,
+ "grad_norm": 0.6767385005950928,
+ "learning_rate": 1.2161172161172162e-05,
+ "loss": 0.0029,
+ "step": 2233
+ },
+ {
+ "epoch": 8.183150183150182,
+ "grad_norm": 7.783257007598877,
+ "learning_rate": 1.2136752136752139e-05,
+ "loss": 0.0277,
+ "step": 2234
+ },
+ {
+ "epoch": 8.186813186813186,
+ "grad_norm": 14.877440452575684,
+ "learning_rate": 1.2112332112332112e-05,
+ "loss": 0.0591,
+ "step": 2235
+ },
+ {
+ "epoch": 8.19047619047619,
+ "grad_norm": 59.26154708862305,
+ "learning_rate": 1.2087912087912089e-05,
+ "loss": 0.7976,
+ "step": 2236
+ },
+ {
+ "epoch": 8.194139194139193,
+ "grad_norm": 0.09101928025484085,
+ "learning_rate": 1.2063492063492064e-05,
+ "loss": 0.0003,
+ "step": 2237
+ },
+ {
+ "epoch": 8.197802197802197,
+ "grad_norm": 7.83564567565918,
+ "learning_rate": 1.2039072039072039e-05,
+ "loss": 0.0266,
+ "step": 2238
+ },
+ {
+ "epoch": 8.2014652014652,
+ "grad_norm": 24.55094337463379,
+ "learning_rate": 1.2014652014652016e-05,
+ "loss": 0.1821,
+ "step": 2239
+ },
+ {
+ "epoch": 8.205128205128204,
+ "grad_norm": 0.18979696929454803,
+ "learning_rate": 1.199023199023199e-05,
+ "loss": 0.0008,
+ "step": 2240
+ },
+ {
+ "epoch": 8.208791208791208,
+ "grad_norm": 11.952847480773926,
+ "learning_rate": 1.1965811965811966e-05,
+ "loss": 0.0236,
+ "step": 2241
+ },
+ {
+ "epoch": 8.212454212454212,
+ "grad_norm": 7.738105773925781,
+ "learning_rate": 1.194139194139194e-05,
+ "loss": 0.0307,
+ "step": 2242
+ },
+ {
+ "epoch": 8.216117216117215,
+ "grad_norm": 37.77316665649414,
+ "learning_rate": 1.1916971916971918e-05,
+ "loss": 0.0926,
+ "step": 2243
+ },
+ {
+ "epoch": 8.219780219780219,
+ "grad_norm": 1.5700554847717285,
+ "learning_rate": 1.1892551892551893e-05,
+ "loss": 0.003,
+ "step": 2244
+ },
+ {
+ "epoch": 8.223443223443223,
+ "grad_norm": 26.529850006103516,
+ "learning_rate": 1.1868131868131868e-05,
+ "loss": 0.0936,
+ "step": 2245
+ },
+ {
+ "epoch": 8.227106227106226,
+ "grad_norm": 77.18512725830078,
+ "learning_rate": 1.1843711843711844e-05,
+ "loss": 0.6767,
+ "step": 2246
+ },
+ {
+ "epoch": 8.23076923076923,
+ "grad_norm": 47.92250061035156,
+ "learning_rate": 1.181929181929182e-05,
+ "loss": 0.3669,
+ "step": 2247
+ },
+ {
+ "epoch": 8.234432234432234,
+ "grad_norm": 3.132725477218628,
+ "learning_rate": 1.1794871794871795e-05,
+ "loss": 0.0089,
+ "step": 2248
+ },
+ {
+ "epoch": 8.238095238095237,
+ "grad_norm": 24.75738525390625,
+ "learning_rate": 1.1770451770451771e-05,
+ "loss": 0.0641,
+ "step": 2249
+ },
+ {
+ "epoch": 8.241758241758241,
+ "grad_norm": 9.874589920043945,
+ "learning_rate": 1.1746031746031746e-05,
+ "loss": 0.0372,
+ "step": 2250
+ },
+ {
+ "epoch": 8.245421245421245,
+ "grad_norm": 2.6871144771575928,
+ "learning_rate": 1.1721611721611721e-05,
+ "loss": 0.0072,
+ "step": 2251
+ },
+ {
+ "epoch": 8.249084249084248,
+ "grad_norm": 8.98822021484375,
+ "learning_rate": 1.1697191697191698e-05,
+ "loss": 0.0614,
+ "step": 2252
+ },
+ {
+ "epoch": 8.252747252747252,
+ "grad_norm": 11.431350708007812,
+ "learning_rate": 1.1672771672771673e-05,
+ "loss": 0.0508,
+ "step": 2253
+ },
+ {
+ "epoch": 8.256410256410255,
+ "grad_norm": 37.37540817260742,
+ "learning_rate": 1.1648351648351648e-05,
+ "loss": 0.2841,
+ "step": 2254
+ },
+ {
+ "epoch": 8.260073260073261,
+ "grad_norm": 12.127150535583496,
+ "learning_rate": 1.1623931623931623e-05,
+ "loss": 0.0633,
+ "step": 2255
+ },
+ {
+ "epoch": 8.263736263736265,
+ "grad_norm": 20.860342025756836,
+ "learning_rate": 1.15995115995116e-05,
+ "loss": 0.0908,
+ "step": 2256
+ },
+ {
+ "epoch": 8.267399267399268,
+ "grad_norm": 14.559013366699219,
+ "learning_rate": 1.1575091575091575e-05,
+ "loss": 0.0419,
+ "step": 2257
+ },
+ {
+ "epoch": 8.271062271062272,
+ "grad_norm": 1.7196027040481567,
+ "learning_rate": 1.155067155067155e-05,
+ "loss": 0.0076,
+ "step": 2258
+ },
+ {
+ "epoch": 8.274725274725276,
+ "grad_norm": 10.175626754760742,
+ "learning_rate": 1.1526251526251527e-05,
+ "loss": 0.0393,
+ "step": 2259
+ },
+ {
+ "epoch": 8.27838827838828,
+ "grad_norm": 49.11803436279297,
+ "learning_rate": 1.1501831501831502e-05,
+ "loss": 0.4574,
+ "step": 2260
+ },
+ {
+ "epoch": 8.282051282051283,
+ "grad_norm": 31.251197814941406,
+ "learning_rate": 1.1477411477411477e-05,
+ "loss": 0.1318,
+ "step": 2261
+ },
+ {
+ "epoch": 8.285714285714286,
+ "grad_norm": 6.921731948852539,
+ "learning_rate": 1.1452991452991454e-05,
+ "loss": 0.0189,
+ "step": 2262
+ },
+ {
+ "epoch": 8.28937728937729,
+ "grad_norm": 12.07050609588623,
+ "learning_rate": 1.1428571428571429e-05,
+ "loss": 0.0614,
+ "step": 2263
+ },
+ {
+ "epoch": 8.293040293040294,
+ "grad_norm": 18.133323669433594,
+ "learning_rate": 1.1404151404151404e-05,
+ "loss": 0.0673,
+ "step": 2264
+ },
+ {
+ "epoch": 8.296703296703297,
+ "grad_norm": 13.542656898498535,
+ "learning_rate": 1.137973137973138e-05,
+ "loss": 0.067,
+ "step": 2265
+ },
+ {
+ "epoch": 8.300366300366301,
+ "grad_norm": 5.377211093902588,
+ "learning_rate": 1.1355311355311356e-05,
+ "loss": 0.0185,
+ "step": 2266
+ },
+ {
+ "epoch": 8.304029304029305,
+ "grad_norm": 35.056522369384766,
+ "learning_rate": 1.1330891330891331e-05,
+ "loss": 0.2152,
+ "step": 2267
+ },
+ {
+ "epoch": 8.307692307692308,
+ "grad_norm": 9.124246597290039,
+ "learning_rate": 1.1306471306471306e-05,
+ "loss": 0.0288,
+ "step": 2268
+ },
+ {
+ "epoch": 8.311355311355312,
+ "grad_norm": 26.452402114868164,
+ "learning_rate": 1.1282051282051283e-05,
+ "loss": 0.1257,
+ "step": 2269
+ },
+ {
+ "epoch": 8.315018315018316,
+ "grad_norm": 29.298583984375,
+ "learning_rate": 1.1257631257631258e-05,
+ "loss": 0.0647,
+ "step": 2270
+ },
+ {
+ "epoch": 8.31868131868132,
+ "grad_norm": 2.6505391597747803,
+ "learning_rate": 1.1233211233211233e-05,
+ "loss": 0.0077,
+ "step": 2271
+ },
+ {
+ "epoch": 8.322344322344323,
+ "grad_norm": 5.335651397705078,
+ "learning_rate": 1.120879120879121e-05,
+ "loss": 0.0104,
+ "step": 2272
+ },
+ {
+ "epoch": 8.326007326007327,
+ "grad_norm": 4.58416748046875,
+ "learning_rate": 1.1184371184371185e-05,
+ "loss": 0.0106,
+ "step": 2273
+ },
+ {
+ "epoch": 8.32967032967033,
+ "grad_norm": 135.4385528564453,
+ "learning_rate": 1.115995115995116e-05,
+ "loss": 0.5163,
+ "step": 2274
+ },
+ {
+ "epoch": 8.333333333333334,
+ "grad_norm": 25.397010803222656,
+ "learning_rate": 1.1135531135531137e-05,
+ "loss": 0.0851,
+ "step": 2275
+ },
+ {
+ "epoch": 8.336996336996338,
+ "grad_norm": 28.56364631652832,
+ "learning_rate": 1.111111111111111e-05,
+ "loss": 0.1686,
+ "step": 2276
+ },
+ {
+ "epoch": 8.340659340659341,
+ "grad_norm": 2.306708812713623,
+ "learning_rate": 1.1086691086691087e-05,
+ "loss": 0.006,
+ "step": 2277
+ },
+ {
+ "epoch": 8.344322344322345,
+ "grad_norm": 56.0256462097168,
+ "learning_rate": 1.1062271062271063e-05,
+ "loss": 0.9074,
+ "step": 2278
+ },
+ {
+ "epoch": 8.347985347985349,
+ "grad_norm": 36.44279861450195,
+ "learning_rate": 1.1037851037851039e-05,
+ "loss": 0.1967,
+ "step": 2279
+ },
+ {
+ "epoch": 8.351648351648352,
+ "grad_norm": 37.51696014404297,
+ "learning_rate": 1.1013431013431014e-05,
+ "loss": 0.192,
+ "step": 2280
+ },
+ {
+ "epoch": 8.355311355311356,
+ "grad_norm": 0.44260093569755554,
+ "learning_rate": 1.0989010989010989e-05,
+ "loss": 0.0023,
+ "step": 2281
+ },
+ {
+ "epoch": 8.35897435897436,
+ "grad_norm": 42.92826843261719,
+ "learning_rate": 1.0964590964590965e-05,
+ "loss": 0.283,
+ "step": 2282
+ },
+ {
+ "epoch": 8.362637362637363,
+ "grad_norm": 5.269385814666748,
+ "learning_rate": 1.094017094017094e-05,
+ "loss": 0.0273,
+ "step": 2283
+ },
+ {
+ "epoch": 8.366300366300367,
+ "grad_norm": 22.941513061523438,
+ "learning_rate": 1.0915750915750916e-05,
+ "loss": 0.0738,
+ "step": 2284
+ },
+ {
+ "epoch": 8.36996336996337,
+ "grad_norm": 47.469303131103516,
+ "learning_rate": 1.0891330891330892e-05,
+ "loss": 0.241,
+ "step": 2285
+ },
+ {
+ "epoch": 8.373626373626374,
+ "grad_norm": 17.6788387298584,
+ "learning_rate": 1.0866910866910867e-05,
+ "loss": 0.0661,
+ "step": 2286
+ },
+ {
+ "epoch": 8.377289377289378,
+ "grad_norm": 22.660839080810547,
+ "learning_rate": 1.0842490842490842e-05,
+ "loss": 0.1409,
+ "step": 2287
+ },
+ {
+ "epoch": 8.380952380952381,
+ "grad_norm": 4.898139476776123,
+ "learning_rate": 1.081807081807082e-05,
+ "loss": 0.0236,
+ "step": 2288
+ },
+ {
+ "epoch": 8.384615384615385,
+ "grad_norm": 24.638856887817383,
+ "learning_rate": 1.0793650793650793e-05,
+ "loss": 0.127,
+ "step": 2289
+ },
+ {
+ "epoch": 8.388278388278389,
+ "grad_norm": 30.863998413085938,
+ "learning_rate": 1.076923076923077e-05,
+ "loss": 0.2182,
+ "step": 2290
+ },
+ {
+ "epoch": 8.391941391941392,
+ "grad_norm": 0.24884633719921112,
+ "learning_rate": 1.0744810744810746e-05,
+ "loss": 0.0012,
+ "step": 2291
+ },
+ {
+ "epoch": 8.395604395604396,
+ "grad_norm": 40.2337532043457,
+ "learning_rate": 1.0720390720390721e-05,
+ "loss": 0.2617,
+ "step": 2292
+ },
+ {
+ "epoch": 8.3992673992674,
+ "grad_norm": 2.265397787094116,
+ "learning_rate": 1.0695970695970696e-05,
+ "loss": 0.0097,
+ "step": 2293
+ },
+ {
+ "epoch": 8.402930402930403,
+ "grad_norm": 20.82665252685547,
+ "learning_rate": 1.0671550671550671e-05,
+ "loss": 0.2609,
+ "step": 2294
+ },
+ {
+ "epoch": 8.406593406593407,
+ "grad_norm": 12.195377349853516,
+ "learning_rate": 1.0647130647130648e-05,
+ "loss": 0.0769,
+ "step": 2295
+ },
+ {
+ "epoch": 8.41025641025641,
+ "grad_norm": 39.17343521118164,
+ "learning_rate": 1.0622710622710623e-05,
+ "loss": 0.1292,
+ "step": 2296
+ },
+ {
+ "epoch": 8.413919413919414,
+ "grad_norm": 1.4848605394363403,
+ "learning_rate": 1.0598290598290598e-05,
+ "loss": 0.0077,
+ "step": 2297
+ },
+ {
+ "epoch": 8.417582417582418,
+ "grad_norm": 1.0283154249191284,
+ "learning_rate": 1.0573870573870575e-05,
+ "loss": 0.0021,
+ "step": 2298
+ },
+ {
+ "epoch": 8.421245421245422,
+ "grad_norm": 2.7084271907806396,
+ "learning_rate": 1.054945054945055e-05,
+ "loss": 0.0077,
+ "step": 2299
+ },
+ {
+ "epoch": 8.424908424908425,
+ "grad_norm": 15.100653648376465,
+ "learning_rate": 1.0525030525030525e-05,
+ "loss": 0.0352,
+ "step": 2300
+ },
+ {
+ "epoch": 8.428571428571429,
+ "grad_norm": 16.015790939331055,
+ "learning_rate": 1.0500610500610502e-05,
+ "loss": 0.071,
+ "step": 2301
+ },
+ {
+ "epoch": 8.432234432234432,
+ "grad_norm": 73.60521697998047,
+ "learning_rate": 1.0476190476190475e-05,
+ "loss": 0.7214,
+ "step": 2302
+ },
+ {
+ "epoch": 8.435897435897436,
+ "grad_norm": 49.41472625732422,
+ "learning_rate": 1.0451770451770452e-05,
+ "loss": 0.9831,
+ "step": 2303
+ },
+ {
+ "epoch": 8.43956043956044,
+ "grad_norm": 51.6118278503418,
+ "learning_rate": 1.0427350427350429e-05,
+ "loss": 0.2589,
+ "step": 2304
+ },
+ {
+ "epoch": 8.443223443223443,
+ "grad_norm": 9.20317554473877,
+ "learning_rate": 1.0402930402930402e-05,
+ "loss": 0.0274,
+ "step": 2305
+ },
+ {
+ "epoch": 8.446886446886447,
+ "grad_norm": 10.019723892211914,
+ "learning_rate": 1.0378510378510379e-05,
+ "loss": 0.0286,
+ "step": 2306
+ },
+ {
+ "epoch": 8.45054945054945,
+ "grad_norm": 83.85884857177734,
+ "learning_rate": 1.0354090354090354e-05,
+ "loss": 1.2867,
+ "step": 2307
+ },
+ {
+ "epoch": 8.454212454212454,
+ "grad_norm": 6.391974449157715,
+ "learning_rate": 1.032967032967033e-05,
+ "loss": 0.023,
+ "step": 2308
+ },
+ {
+ "epoch": 8.457875457875458,
+ "grad_norm": 18.662921905517578,
+ "learning_rate": 1.0305250305250306e-05,
+ "loss": 0.158,
+ "step": 2309
+ },
+ {
+ "epoch": 8.461538461538462,
+ "grad_norm": 10.009090423583984,
+ "learning_rate": 1.028083028083028e-05,
+ "loss": 0.0401,
+ "step": 2310
+ },
+ {
+ "epoch": 8.465201465201465,
+ "grad_norm": 34.67587661743164,
+ "learning_rate": 1.0256410256410258e-05,
+ "loss": 0.1569,
+ "step": 2311
+ },
+ {
+ "epoch": 8.468864468864469,
+ "grad_norm": 7.593516826629639,
+ "learning_rate": 1.0231990231990231e-05,
+ "loss": 0.0382,
+ "step": 2312
+ },
+ {
+ "epoch": 8.472527472527473,
+ "grad_norm": 14.953495979309082,
+ "learning_rate": 1.0207570207570208e-05,
+ "loss": 0.0311,
+ "step": 2313
+ },
+ {
+ "epoch": 8.476190476190476,
+ "grad_norm": 37.42109680175781,
+ "learning_rate": 1.0183150183150184e-05,
+ "loss": 0.2014,
+ "step": 2314
+ },
+ {
+ "epoch": 8.47985347985348,
+ "grad_norm": 1.2265455722808838,
+ "learning_rate": 1.0158730158730158e-05,
+ "loss": 0.0052,
+ "step": 2315
+ },
+ {
+ "epoch": 8.483516483516484,
+ "grad_norm": 23.93451499938965,
+ "learning_rate": 1.0134310134310135e-05,
+ "loss": 0.1253,
+ "step": 2316
+ },
+ {
+ "epoch": 8.487179487179487,
+ "grad_norm": 6.8919172286987305,
+ "learning_rate": 1.0109890109890111e-05,
+ "loss": 0.0287,
+ "step": 2317
+ },
+ {
+ "epoch": 8.49084249084249,
+ "grad_norm": 18.653671264648438,
+ "learning_rate": 1.0085470085470085e-05,
+ "loss": 0.0741,
+ "step": 2318
+ },
+ {
+ "epoch": 8.494505494505495,
+ "grad_norm": 13.255439758300781,
+ "learning_rate": 1.0061050061050061e-05,
+ "loss": 0.131,
+ "step": 2319
+ },
+ {
+ "epoch": 8.498168498168498,
+ "grad_norm": 39.42401885986328,
+ "learning_rate": 1.0036630036630037e-05,
+ "loss": 0.2419,
+ "step": 2320
+ },
+ {
+ "epoch": 8.501831501831502,
+ "grad_norm": 35.404022216796875,
+ "learning_rate": 1.0012210012210012e-05,
+ "loss": 0.2684,
+ "step": 2321
+ },
+ {
+ "epoch": 8.505494505494505,
+ "grad_norm": 28.29785919189453,
+ "learning_rate": 9.987789987789988e-06,
+ "loss": 0.6206,
+ "step": 2322
+ },
+ {
+ "epoch": 8.50915750915751,
+ "grad_norm": 36.69160842895508,
+ "learning_rate": 9.963369963369963e-06,
+ "loss": 0.3753,
+ "step": 2323
+ },
+ {
+ "epoch": 8.512820512820513,
+ "grad_norm": 23.688018798828125,
+ "learning_rate": 9.93894993894994e-06,
+ "loss": 0.1725,
+ "step": 2324
+ },
+ {
+ "epoch": 8.516483516483516,
+ "grad_norm": 3.7074942588806152,
+ "learning_rate": 9.914529914529914e-06,
+ "loss": 0.0237,
+ "step": 2325
+ },
+ {
+ "epoch": 8.52014652014652,
+ "grad_norm": 5.201651573181152,
+ "learning_rate": 9.89010989010989e-06,
+ "loss": 0.0267,
+ "step": 2326
+ },
+ {
+ "epoch": 8.523809523809524,
+ "grad_norm": 22.705989837646484,
+ "learning_rate": 9.865689865689867e-06,
+ "loss": 0.1745,
+ "step": 2327
+ },
+ {
+ "epoch": 8.527472527472527,
+ "grad_norm": 0.9097073078155518,
+ "learning_rate": 9.84126984126984e-06,
+ "loss": 0.0061,
+ "step": 2328
+ },
+ {
+ "epoch": 8.531135531135531,
+ "grad_norm": 0.8476067185401917,
+ "learning_rate": 9.816849816849817e-06,
+ "loss": 0.0049,
+ "step": 2329
+ },
+ {
+ "epoch": 8.534798534798535,
+ "grad_norm": 30.6715087890625,
+ "learning_rate": 9.792429792429794e-06,
+ "loss": 0.8089,
+ "step": 2330
+ },
+ {
+ "epoch": 8.538461538461538,
+ "grad_norm": 27.170246124267578,
+ "learning_rate": 9.768009768009767e-06,
+ "loss": 0.1874,
+ "step": 2331
+ },
+ {
+ "epoch": 8.542124542124542,
+ "grad_norm": 2.073500871658325,
+ "learning_rate": 9.743589743589744e-06,
+ "loss": 0.0057,
+ "step": 2332
+ },
+ {
+ "epoch": 8.545787545787546,
+ "grad_norm": 50.97946548461914,
+ "learning_rate": 9.719169719169719e-06,
+ "loss": 0.7219,
+ "step": 2333
+ },
+ {
+ "epoch": 8.54945054945055,
+ "grad_norm": 33.17234420776367,
+ "learning_rate": 9.694749694749694e-06,
+ "loss": 0.2579,
+ "step": 2334
+ },
+ {
+ "epoch": 8.553113553113553,
+ "grad_norm": 21.129179000854492,
+ "learning_rate": 9.670329670329671e-06,
+ "loss": 0.0973,
+ "step": 2335
+ },
+ {
+ "epoch": 8.556776556776557,
+ "grad_norm": 40.60239791870117,
+ "learning_rate": 9.645909645909646e-06,
+ "loss": 0.2773,
+ "step": 2336
+ },
+ {
+ "epoch": 8.56043956043956,
+ "grad_norm": 23.842580795288086,
+ "learning_rate": 9.621489621489623e-06,
+ "loss": 0.4137,
+ "step": 2337
+ },
+ {
+ "epoch": 8.564102564102564,
+ "grad_norm": 30.08527946472168,
+ "learning_rate": 9.597069597069596e-06,
+ "loss": 0.776,
+ "step": 2338
+ },
+ {
+ "epoch": 8.567765567765568,
+ "grad_norm": 26.280099868774414,
+ "learning_rate": 9.572649572649573e-06,
+ "loss": 0.2013,
+ "step": 2339
+ },
+ {
+ "epoch": 8.571428571428571,
+ "grad_norm": 9.810042381286621,
+ "learning_rate": 9.54822954822955e-06,
+ "loss": 0.0456,
+ "step": 2340
+ },
+ {
+ "epoch": 8.575091575091575,
+ "grad_norm": 9.112823486328125,
+ "learning_rate": 9.523809523809523e-06,
+ "loss": 0.0599,
+ "step": 2341
+ },
+ {
+ "epoch": 8.578754578754578,
+ "grad_norm": 32.358306884765625,
+ "learning_rate": 9.4993894993895e-06,
+ "loss": 0.3316,
+ "step": 2342
+ },
+ {
+ "epoch": 8.582417582417582,
+ "grad_norm": 28.62472915649414,
+ "learning_rate": 9.474969474969477e-06,
+ "loss": 0.3418,
+ "step": 2343
+ },
+ {
+ "epoch": 8.586080586080586,
+ "grad_norm": 40.83232879638672,
+ "learning_rate": 9.45054945054945e-06,
+ "loss": 0.498,
+ "step": 2344
+ },
+ {
+ "epoch": 8.58974358974359,
+ "grad_norm": 14.671387672424316,
+ "learning_rate": 9.426129426129427e-06,
+ "loss": 0.0943,
+ "step": 2345
+ },
+ {
+ "epoch": 8.593406593406593,
+ "grad_norm": 17.533994674682617,
+ "learning_rate": 9.401709401709402e-06,
+ "loss": 0.0914,
+ "step": 2346
+ },
+ {
+ "epoch": 8.597069597069597,
+ "grad_norm": 14.264333724975586,
+ "learning_rate": 9.377289377289377e-06,
+ "loss": 0.048,
+ "step": 2347
+ },
+ {
+ "epoch": 8.6007326007326,
+ "grad_norm": 10.327966690063477,
+ "learning_rate": 9.352869352869354e-06,
+ "loss": 0.0533,
+ "step": 2348
+ },
+ {
+ "epoch": 8.604395604395604,
+ "grad_norm": 23.408447265625,
+ "learning_rate": 9.328449328449329e-06,
+ "loss": 0.1076,
+ "step": 2349
+ },
+ {
+ "epoch": 8.608058608058608,
+ "grad_norm": 3.769625663757324,
+ "learning_rate": 9.304029304029304e-06,
+ "loss": 0.0173,
+ "step": 2350
+ },
+ {
+ "epoch": 8.611721611721611,
+ "grad_norm": 11.853968620300293,
+ "learning_rate": 9.279609279609279e-06,
+ "loss": 0.0461,
+ "step": 2351
+ },
+ {
+ "epoch": 8.615384615384615,
+ "grad_norm": 2.211425304412842,
+ "learning_rate": 9.255189255189256e-06,
+ "loss": 0.0074,
+ "step": 2352
+ },
+ {
+ "epoch": 8.619047619047619,
+ "grad_norm": 41.25067138671875,
+ "learning_rate": 9.230769230769232e-06,
+ "loss": 0.5553,
+ "step": 2353
+ },
+ {
+ "epoch": 8.622710622710622,
+ "grad_norm": 23.783859252929688,
+ "learning_rate": 9.206349206349206e-06,
+ "loss": 0.2602,
+ "step": 2354
+ },
+ {
+ "epoch": 8.626373626373626,
+ "grad_norm": 14.278258323669434,
+ "learning_rate": 9.181929181929182e-06,
+ "loss": 0.0603,
+ "step": 2355
+ },
+ {
+ "epoch": 8.63003663003663,
+ "grad_norm": 47.81812286376953,
+ "learning_rate": 9.15750915750916e-06,
+ "loss": 0.3209,
+ "step": 2356
+ },
+ {
+ "epoch": 8.633699633699633,
+ "grad_norm": 29.209117889404297,
+ "learning_rate": 9.133089133089133e-06,
+ "loss": 0.2166,
+ "step": 2357
+ },
+ {
+ "epoch": 8.637362637362637,
+ "grad_norm": 11.6812162399292,
+ "learning_rate": 9.10866910866911e-06,
+ "loss": 0.0394,
+ "step": 2358
+ },
+ {
+ "epoch": 8.64102564102564,
+ "grad_norm": 2.1416890621185303,
+ "learning_rate": 9.084249084249084e-06,
+ "loss": 0.0079,
+ "step": 2359
+ },
+ {
+ "epoch": 8.644688644688644,
+ "grad_norm": 13.363630294799805,
+ "learning_rate": 9.05982905982906e-06,
+ "loss": 0.1215,
+ "step": 2360
+ },
+ {
+ "epoch": 8.648351648351648,
+ "grad_norm": 10.95302963256836,
+ "learning_rate": 9.035409035409036e-06,
+ "loss": 0.0562,
+ "step": 2361
+ },
+ {
+ "epoch": 8.652014652014651,
+ "grad_norm": 2.392416000366211,
+ "learning_rate": 9.010989010989011e-06,
+ "loss": 0.0085,
+ "step": 2362
+ },
+ {
+ "epoch": 8.655677655677655,
+ "grad_norm": 13.83795166015625,
+ "learning_rate": 8.986568986568986e-06,
+ "loss": 0.0777,
+ "step": 2363
+ },
+ {
+ "epoch": 8.659340659340659,
+ "grad_norm": 18.058395385742188,
+ "learning_rate": 8.962148962148961e-06,
+ "loss": 0.0555,
+ "step": 2364
+ },
+ {
+ "epoch": 8.663003663003662,
+ "grad_norm": 2.548462390899658,
+ "learning_rate": 8.937728937728938e-06,
+ "loss": 0.0145,
+ "step": 2365
+ },
+ {
+ "epoch": 8.666666666666666,
+ "grad_norm": 1.9582334756851196,
+ "learning_rate": 8.913308913308913e-06,
+ "loss": 0.0114,
+ "step": 2366
+ },
+ {
+ "epoch": 8.67032967032967,
+ "grad_norm": 12.635466575622559,
+ "learning_rate": 8.888888888888888e-06,
+ "loss": 0.0936,
+ "step": 2367
+ },
+ {
+ "epoch": 8.673992673992673,
+ "grad_norm": 36.309967041015625,
+ "learning_rate": 8.864468864468865e-06,
+ "loss": 0.1474,
+ "step": 2368
+ },
+ {
+ "epoch": 8.677655677655677,
+ "grad_norm": 2.570406675338745,
+ "learning_rate": 8.840048840048842e-06,
+ "loss": 0.0115,
+ "step": 2369
+ },
+ {
+ "epoch": 8.68131868131868,
+ "grad_norm": 33.025535583496094,
+ "learning_rate": 8.815628815628815e-06,
+ "loss": 0.1509,
+ "step": 2370
+ },
+ {
+ "epoch": 8.684981684981684,
+ "grad_norm": 7.150747299194336,
+ "learning_rate": 8.791208791208792e-06,
+ "loss": 0.0128,
+ "step": 2371
+ },
+ {
+ "epoch": 8.688644688644688,
+ "grad_norm": 8.400662422180176,
+ "learning_rate": 8.766788766788767e-06,
+ "loss": 0.0405,
+ "step": 2372
+ },
+ {
+ "epoch": 8.692307692307692,
+ "grad_norm": 50.28904342651367,
+ "learning_rate": 8.742368742368742e-06,
+ "loss": 0.3981,
+ "step": 2373
+ },
+ {
+ "epoch": 8.695970695970695,
+ "grad_norm": 27.415250778198242,
+ "learning_rate": 8.717948717948719e-06,
+ "loss": 0.3461,
+ "step": 2374
+ },
+ {
+ "epoch": 8.699633699633699,
+ "grad_norm": 66.89543914794922,
+ "learning_rate": 8.693528693528694e-06,
+ "loss": 0.2926,
+ "step": 2375
+ },
+ {
+ "epoch": 8.703296703296703,
+ "grad_norm": 23.47862434387207,
+ "learning_rate": 8.669108669108669e-06,
+ "loss": 0.0704,
+ "step": 2376
+ },
+ {
+ "epoch": 8.706959706959706,
+ "grad_norm": 0.5212138891220093,
+ "learning_rate": 8.644688644688644e-06,
+ "loss": 0.0022,
+ "step": 2377
+ },
+ {
+ "epoch": 8.71062271062271,
+ "grad_norm": 11.140594482421875,
+ "learning_rate": 8.62026862026862e-06,
+ "loss": 0.0245,
+ "step": 2378
+ },
+ {
+ "epoch": 8.714285714285714,
+ "grad_norm": 6.0050554275512695,
+ "learning_rate": 8.595848595848596e-06,
+ "loss": 0.0156,
+ "step": 2379
+ },
+ {
+ "epoch": 8.717948717948717,
+ "grad_norm": 24.6923770904541,
+ "learning_rate": 8.571428571428571e-06,
+ "loss": 0.1378,
+ "step": 2380
+ },
+ {
+ "epoch": 8.72161172161172,
+ "grad_norm": 17.33226203918457,
+ "learning_rate": 8.547008547008548e-06,
+ "loss": 0.091,
+ "step": 2381
+ },
+ {
+ "epoch": 8.725274725274724,
+ "grad_norm": 1.3830251693725586,
+ "learning_rate": 8.522588522588524e-06,
+ "loss": 0.0056,
+ "step": 2382
+ },
+ {
+ "epoch": 8.728937728937728,
+ "grad_norm": 2.8836987018585205,
+ "learning_rate": 8.498168498168498e-06,
+ "loss": 0.0094,
+ "step": 2383
+ },
+ {
+ "epoch": 8.732600732600732,
+ "grad_norm": 21.720495223999023,
+ "learning_rate": 8.473748473748475e-06,
+ "loss": 0.0791,
+ "step": 2384
+ },
+ {
+ "epoch": 8.736263736263737,
+ "grad_norm": 23.333284378051758,
+ "learning_rate": 8.44932844932845e-06,
+ "loss": 0.0749,
+ "step": 2385
+ },
+ {
+ "epoch": 8.73992673992674,
+ "grad_norm": 6.694031715393066,
+ "learning_rate": 8.424908424908425e-06,
+ "loss": 0.0322,
+ "step": 2386
+ },
+ {
+ "epoch": 8.743589743589745,
+ "grad_norm": 8.488764762878418,
+ "learning_rate": 8.400488400488401e-06,
+ "loss": 0.0269,
+ "step": 2387
+ },
+ {
+ "epoch": 8.747252747252748,
+ "grad_norm": 0.5612779855728149,
+ "learning_rate": 8.376068376068377e-06,
+ "loss": 0.0021,
+ "step": 2388
+ },
+ {
+ "epoch": 8.750915750915752,
+ "grad_norm": 26.03545379638672,
+ "learning_rate": 8.351648351648352e-06,
+ "loss": 0.1388,
+ "step": 2389
+ },
+ {
+ "epoch": 8.754578754578755,
+ "grad_norm": 1.6444523334503174,
+ "learning_rate": 8.327228327228327e-06,
+ "loss": 0.005,
+ "step": 2390
+ },
+ {
+ "epoch": 8.758241758241759,
+ "grad_norm": 3.910712480545044,
+ "learning_rate": 8.302808302808303e-06,
+ "loss": 0.0166,
+ "step": 2391
+ },
+ {
+ "epoch": 8.761904761904763,
+ "grad_norm": 0.42347079515457153,
+ "learning_rate": 8.278388278388278e-06,
+ "loss": 0.0017,
+ "step": 2392
+ },
+ {
+ "epoch": 8.765567765567766,
+ "grad_norm": 3.2693428993225098,
+ "learning_rate": 8.253968253968254e-06,
+ "loss": 0.0103,
+ "step": 2393
+ },
+ {
+ "epoch": 8.76923076923077,
+ "grad_norm": 11.918498039245605,
+ "learning_rate": 8.22954822954823e-06,
+ "loss": 0.0702,
+ "step": 2394
+ },
+ {
+ "epoch": 8.772893772893774,
+ "grad_norm": 59.99433517456055,
+ "learning_rate": 8.205128205128205e-06,
+ "loss": 0.3443,
+ "step": 2395
+ },
+ {
+ "epoch": 8.776556776556777,
+ "grad_norm": 1.036231279373169,
+ "learning_rate": 8.18070818070818e-06,
+ "loss": 0.0029,
+ "step": 2396
+ },
+ {
+ "epoch": 8.780219780219781,
+ "grad_norm": 0.35836902260780334,
+ "learning_rate": 8.156288156288157e-06,
+ "loss": 0.0011,
+ "step": 2397
+ },
+ {
+ "epoch": 8.783882783882785,
+ "grad_norm": 11.58154010772705,
+ "learning_rate": 8.131868131868132e-06,
+ "loss": 0.0316,
+ "step": 2398
+ },
+ {
+ "epoch": 8.787545787545788,
+ "grad_norm": 54.74607849121094,
+ "learning_rate": 8.107448107448107e-06,
+ "loss": 0.2618,
+ "step": 2399
+ },
+ {
+ "epoch": 8.791208791208792,
+ "grad_norm": 21.049470901489258,
+ "learning_rate": 8.083028083028084e-06,
+ "loss": 0.0536,
+ "step": 2400
+ },
+ {
+ "epoch": 8.794871794871796,
+ "grad_norm": 4.356145858764648,
+ "learning_rate": 8.058608058608059e-06,
+ "loss": 0.0107,
+ "step": 2401
+ },
+ {
+ "epoch": 8.7985347985348,
+ "grad_norm": 6.708774089813232,
+ "learning_rate": 8.034188034188034e-06,
+ "loss": 0.2551,
+ "step": 2402
+ },
+ {
+ "epoch": 8.802197802197803,
+ "grad_norm": 33.899139404296875,
+ "learning_rate": 8.00976800976801e-06,
+ "loss": 0.1179,
+ "step": 2403
+ },
+ {
+ "epoch": 8.805860805860807,
+ "grad_norm": 20.150150299072266,
+ "learning_rate": 7.985347985347986e-06,
+ "loss": 0.0673,
+ "step": 2404
+ },
+ {
+ "epoch": 8.80952380952381,
+ "grad_norm": 0.7458391189575195,
+ "learning_rate": 7.960927960927961e-06,
+ "loss": 0.0015,
+ "step": 2405
+ },
+ {
+ "epoch": 8.813186813186814,
+ "grad_norm": 6.7325663566589355,
+ "learning_rate": 7.936507936507936e-06,
+ "loss": 0.0204,
+ "step": 2406
+ },
+ {
+ "epoch": 8.816849816849818,
+ "grad_norm": 1.79118013381958,
+ "learning_rate": 7.912087912087913e-06,
+ "loss": 0.0046,
+ "step": 2407
+ },
+ {
+ "epoch": 8.820512820512821,
+ "grad_norm": 16.44390106201172,
+ "learning_rate": 7.887667887667886e-06,
+ "loss": 0.0306,
+ "step": 2408
+ },
+ {
+ "epoch": 8.824175824175825,
+ "grad_norm": 0.12592382729053497,
+ "learning_rate": 7.863247863247863e-06,
+ "loss": 0.0006,
+ "step": 2409
+ },
+ {
+ "epoch": 8.827838827838828,
+ "grad_norm": 0.41172507405281067,
+ "learning_rate": 7.83882783882784e-06,
+ "loss": 0.0012,
+ "step": 2410
+ },
+ {
+ "epoch": 8.831501831501832,
+ "grad_norm": 12.583470344543457,
+ "learning_rate": 7.814407814407815e-06,
+ "loss": 0.0472,
+ "step": 2411
+ },
+ {
+ "epoch": 8.835164835164836,
+ "grad_norm": 2.4268991947174072,
+ "learning_rate": 7.78998778998779e-06,
+ "loss": 0.0787,
+ "step": 2412
+ },
+ {
+ "epoch": 8.83882783882784,
+ "grad_norm": 5.501258850097656,
+ "learning_rate": 7.765567765567767e-06,
+ "loss": 0.0161,
+ "step": 2413
+ },
+ {
+ "epoch": 8.842490842490843,
+ "grad_norm": 48.107818603515625,
+ "learning_rate": 7.741147741147742e-06,
+ "loss": 0.3023,
+ "step": 2414
+ },
+ {
+ "epoch": 8.846153846153847,
+ "grad_norm": 9.732619285583496,
+ "learning_rate": 7.716727716727717e-06,
+ "loss": 0.0313,
+ "step": 2415
+ },
+ {
+ "epoch": 8.84981684981685,
+ "grad_norm": 7.970260143280029,
+ "learning_rate": 7.692307692307692e-06,
+ "loss": 0.0364,
+ "step": 2416
+ },
+ {
+ "epoch": 8.853479853479854,
+ "grad_norm": 41.191104888916016,
+ "learning_rate": 7.667887667887669e-06,
+ "loss": 0.1678,
+ "step": 2417
+ },
+ {
+ "epoch": 8.857142857142858,
+ "grad_norm": 0.04402902349829674,
+ "learning_rate": 7.643467643467644e-06,
+ "loss": 0.0001,
+ "step": 2418
+ },
+ {
+ "epoch": 8.860805860805861,
+ "grad_norm": 5.091309547424316,
+ "learning_rate": 7.619047619047619e-06,
+ "loss": 0.0185,
+ "step": 2419
+ },
+ {
+ "epoch": 8.864468864468865,
+ "grad_norm": 0.8602111339569092,
+ "learning_rate": 7.594627594627595e-06,
+ "loss": 0.0027,
+ "step": 2420
+ },
+ {
+ "epoch": 8.868131868131869,
+ "grad_norm": 0.5093329548835754,
+ "learning_rate": 7.57020757020757e-06,
+ "loss": 0.0018,
+ "step": 2421
+ },
+ {
+ "epoch": 8.871794871794872,
+ "grad_norm": 1.818582534790039,
+ "learning_rate": 7.545787545787546e-06,
+ "loss": 0.0056,
+ "step": 2422
+ },
+ {
+ "epoch": 8.875457875457876,
+ "grad_norm": 1.239259123802185,
+ "learning_rate": 7.5213675213675224e-06,
+ "loss": 0.0035,
+ "step": 2423
+ },
+ {
+ "epoch": 8.87912087912088,
+ "grad_norm": 1.207359790802002,
+ "learning_rate": 7.4969474969474975e-06,
+ "loss": 0.0044,
+ "step": 2424
+ },
+ {
+ "epoch": 8.882783882783883,
+ "grad_norm": 16.89816665649414,
+ "learning_rate": 7.4725274725274726e-06,
+ "loss": 0.1432,
+ "step": 2425
+ },
+ {
+ "epoch": 8.886446886446887,
+ "grad_norm": 26.338607788085938,
+ "learning_rate": 7.4481074481074485e-06,
+ "loss": 0.1183,
+ "step": 2426
+ },
+ {
+ "epoch": 8.89010989010989,
+ "grad_norm": 0.16512498259544373,
+ "learning_rate": 7.4236874236874235e-06,
+ "loss": 0.0005,
+ "step": 2427
+ },
+ {
+ "epoch": 8.893772893772894,
+ "grad_norm": 0.37214791774749756,
+ "learning_rate": 7.3992673992673995e-06,
+ "loss": 0.0017,
+ "step": 2428
+ },
+ {
+ "epoch": 8.897435897435898,
+ "grad_norm": 31.914432525634766,
+ "learning_rate": 7.374847374847375e-06,
+ "loss": 0.0725,
+ "step": 2429
+ },
+ {
+ "epoch": 8.901098901098901,
+ "grad_norm": 1.1302192211151123,
+ "learning_rate": 7.3504273504273504e-06,
+ "loss": 0.0042,
+ "step": 2430
+ },
+ {
+ "epoch": 8.904761904761905,
+ "grad_norm": 10.759814262390137,
+ "learning_rate": 7.326007326007326e-06,
+ "loss": 0.0325,
+ "step": 2431
+ },
+ {
+ "epoch": 8.908424908424909,
+ "grad_norm": 10.23229694366455,
+ "learning_rate": 7.301587301587301e-06,
+ "loss": 0.0169,
+ "step": 2432
+ },
+ {
+ "epoch": 8.912087912087912,
+ "grad_norm": 18.52377700805664,
+ "learning_rate": 7.277167277167277e-06,
+ "loss": 0.0873,
+ "step": 2433
+ },
+ {
+ "epoch": 8.915750915750916,
+ "grad_norm": 9.769023895263672,
+ "learning_rate": 7.252747252747253e-06,
+ "loss": 0.0432,
+ "step": 2434
+ },
+ {
+ "epoch": 8.91941391941392,
+ "grad_norm": 5.880816459655762,
+ "learning_rate": 7.228327228327228e-06,
+ "loss": 0.0196,
+ "step": 2435
+ },
+ {
+ "epoch": 8.923076923076923,
+ "grad_norm": 12.387096405029297,
+ "learning_rate": 7.203907203907203e-06,
+ "loss": 0.0246,
+ "step": 2436
+ },
+ {
+ "epoch": 8.926739926739927,
+ "grad_norm": 18.798778533935547,
+ "learning_rate": 7.17948717948718e-06,
+ "loss": 0.1073,
+ "step": 2437
+ },
+ {
+ "epoch": 8.93040293040293,
+ "grad_norm": 2.706861734390259,
+ "learning_rate": 7.155067155067155e-06,
+ "loss": 0.0048,
+ "step": 2438
+ },
+ {
+ "epoch": 8.934065934065934,
+ "grad_norm": 4.111676216125488,
+ "learning_rate": 7.130647130647131e-06,
+ "loss": 0.0107,
+ "step": 2439
+ },
+ {
+ "epoch": 8.937728937728938,
+ "grad_norm": 55.59748077392578,
+ "learning_rate": 7.106227106227106e-06,
+ "loss": 0.2811,
+ "step": 2440
+ },
+ {
+ "epoch": 8.941391941391942,
+ "grad_norm": 10.445195198059082,
+ "learning_rate": 7.081807081807082e-06,
+ "loss": 0.0491,
+ "step": 2441
+ },
+ {
+ "epoch": 8.945054945054945,
+ "grad_norm": 0.049220070242881775,
+ "learning_rate": 7.057387057387058e-06,
+ "loss": 0.0002,
+ "step": 2442
+ },
+ {
+ "epoch": 8.948717948717949,
+ "grad_norm": 2.4076764583587646,
+ "learning_rate": 7.032967032967033e-06,
+ "loss": 0.0075,
+ "step": 2443
+ },
+ {
+ "epoch": 8.952380952380953,
+ "grad_norm": 1.8959174156188965,
+ "learning_rate": 7.008547008547008e-06,
+ "loss": 0.0053,
+ "step": 2444
+ },
+ {
+ "epoch": 8.956043956043956,
+ "grad_norm": 72.28501892089844,
+ "learning_rate": 6.984126984126984e-06,
+ "loss": 0.9784,
+ "step": 2445
+ },
+ {
+ "epoch": 8.95970695970696,
+ "grad_norm": 12.387998580932617,
+ "learning_rate": 6.95970695970696e-06,
+ "loss": 0.0623,
+ "step": 2446
+ },
+ {
+ "epoch": 8.963369963369964,
+ "grad_norm": 80.10337829589844,
+ "learning_rate": 6.935286935286936e-06,
+ "loss": 0.3668,
+ "step": 2447
+ },
+ {
+ "epoch": 8.967032967032967,
+ "grad_norm": 8.527040481567383,
+ "learning_rate": 6.910866910866911e-06,
+ "loss": 0.0308,
+ "step": 2448
+ },
+ {
+ "epoch": 8.97069597069597,
+ "grad_norm": 56.55281066894531,
+ "learning_rate": 6.886446886446886e-06,
+ "loss": 0.567,
+ "step": 2449
+ },
+ {
+ "epoch": 8.974358974358974,
+ "grad_norm": 1.594208836555481,
+ "learning_rate": 6.862026862026863e-06,
+ "loss": 0.0049,
+ "step": 2450
+ },
+ {
+ "epoch": 8.978021978021978,
+ "grad_norm": 0.4573160707950592,
+ "learning_rate": 6.837606837606838e-06,
+ "loss": 0.001,
+ "step": 2451
+ },
+ {
+ "epoch": 8.981684981684982,
+ "grad_norm": 48.936038970947266,
+ "learning_rate": 6.813186813186814e-06,
+ "loss": 0.6475,
+ "step": 2452
+ },
+ {
+ "epoch": 8.985347985347985,
+ "grad_norm": 11.618135452270508,
+ "learning_rate": 6.788766788766789e-06,
+ "loss": 0.0277,
+ "step": 2453
+ },
+ {
+ "epoch": 8.989010989010989,
+ "grad_norm": 2.847616195678711,
+ "learning_rate": 6.764346764346765e-06,
+ "loss": 0.0075,
+ "step": 2454
+ },
+ {
+ "epoch": 8.992673992673993,
+ "grad_norm": 0.4193238317966461,
+ "learning_rate": 6.739926739926741e-06,
+ "loss": 0.0013,
+ "step": 2455
+ },
+ {
+ "epoch": 8.996336996336996,
+ "grad_norm": 18.683883666992188,
+ "learning_rate": 6.715506715506716e-06,
+ "loss": 0.0652,
+ "step": 2456
+ },
+ {
+ "epoch": 9.0,
+ "grad_norm": 64.50067138671875,
+ "learning_rate": 6.691086691086691e-06,
+ "loss": 0.6786,
+ "step": 2457
+ },
+ {
+ "epoch": 9.003663003663004,
+ "grad_norm": 54.9294319152832,
+ "learning_rate": 6.666666666666667e-06,
+ "loss": 0.6198,
+ "step": 2458
+ },
+ {
+ "epoch": 9.007326007326007,
+ "grad_norm": 55.97196960449219,
+ "learning_rate": 6.6422466422466426e-06,
+ "loss": 0.3798,
+ "step": 2459
+ },
+ {
+ "epoch": 9.010989010989011,
+ "grad_norm": 3.0465450286865234,
+ "learning_rate": 6.6178266178266185e-06,
+ "loss": 0.0123,
+ "step": 2460
+ },
+ {
+ "epoch": 9.014652014652015,
+ "grad_norm": 8.725708961486816,
+ "learning_rate": 6.5934065934065935e-06,
+ "loss": 0.0295,
+ "step": 2461
+ },
+ {
+ "epoch": 9.018315018315018,
+ "grad_norm": 24.417634963989258,
+ "learning_rate": 6.568986568986569e-06,
+ "loss": 0.1857,
+ "step": 2462
+ },
+ {
+ "epoch": 9.021978021978022,
+ "grad_norm": 75.99623107910156,
+ "learning_rate": 6.544566544566545e-06,
+ "loss": 0.8702,
+ "step": 2463
+ },
+ {
+ "epoch": 9.025641025641026,
+ "grad_norm": 12.464011192321777,
+ "learning_rate": 6.5201465201465204e-06,
+ "loss": 0.0656,
+ "step": 2464
+ },
+ {
+ "epoch": 9.02930402930403,
+ "grad_norm": 1.5972875356674194,
+ "learning_rate": 6.4957264957264955e-06,
+ "loss": 0.0074,
+ "step": 2465
+ },
+ {
+ "epoch": 9.032967032967033,
+ "grad_norm": 0.38648298382759094,
+ "learning_rate": 6.471306471306471e-06,
+ "loss": 0.0007,
+ "step": 2466
+ },
+ {
+ "epoch": 9.036630036630036,
+ "grad_norm": 55.41832733154297,
+ "learning_rate": 6.446886446886447e-06,
+ "loss": 0.2693,
+ "step": 2467
+ },
+ {
+ "epoch": 9.04029304029304,
+ "grad_norm": 1.8419067859649658,
+ "learning_rate": 6.422466422466423e-06,
+ "loss": 0.008,
+ "step": 2468
+ },
+ {
+ "epoch": 9.043956043956044,
+ "grad_norm": 6.2913498878479,
+ "learning_rate": 6.398046398046398e-06,
+ "loss": 0.0214,
+ "step": 2469
+ },
+ {
+ "epoch": 9.047619047619047,
+ "grad_norm": 16.412883758544922,
+ "learning_rate": 6.373626373626373e-06,
+ "loss": 0.0702,
+ "step": 2470
+ },
+ {
+ "epoch": 9.051282051282051,
+ "grad_norm": 6.313873291015625,
+ "learning_rate": 6.349206349206349e-06,
+ "loss": 0.0191,
+ "step": 2471
+ },
+ {
+ "epoch": 9.054945054945055,
+ "grad_norm": 34.515655517578125,
+ "learning_rate": 6.324786324786325e-06,
+ "loss": 0.1514,
+ "step": 2472
+ },
+ {
+ "epoch": 9.058608058608058,
+ "grad_norm": 0.12106683105230331,
+ "learning_rate": 6.3003663003663e-06,
+ "loss": 0.0005,
+ "step": 2473
+ },
+ {
+ "epoch": 9.062271062271062,
+ "grad_norm": 1.749250888824463,
+ "learning_rate": 6.275946275946276e-06,
+ "loss": 0.006,
+ "step": 2474
+ },
+ {
+ "epoch": 9.065934065934066,
+ "grad_norm": 7.868753433227539,
+ "learning_rate": 6.251526251526251e-06,
+ "loss": 0.043,
+ "step": 2475
+ },
+ {
+ "epoch": 9.06959706959707,
+ "grad_norm": 0.787532389163971,
+ "learning_rate": 6.227106227106228e-06,
+ "loss": 0.002,
+ "step": 2476
+ },
+ {
+ "epoch": 9.073260073260073,
+ "grad_norm": 3.244596004486084,
+ "learning_rate": 6.202686202686203e-06,
+ "loss": 0.0103,
+ "step": 2477
+ },
+ {
+ "epoch": 9.076923076923077,
+ "grad_norm": 7.473750114440918,
+ "learning_rate": 6.178266178266178e-06,
+ "loss": 0.0261,
+ "step": 2478
+ },
+ {
+ "epoch": 9.08058608058608,
+ "grad_norm": 26.390687942504883,
+ "learning_rate": 6.153846153846154e-06,
+ "loss": 0.0861,
+ "step": 2479
+ },
+ {
+ "epoch": 9.084249084249084,
+ "grad_norm": 3.37931489944458,
+ "learning_rate": 6.129426129426129e-06,
+ "loss": 0.0161,
+ "step": 2480
+ },
+ {
+ "epoch": 9.087912087912088,
+ "grad_norm": 42.62114334106445,
+ "learning_rate": 6.105006105006105e-06,
+ "loss": 0.1498,
+ "step": 2481
+ },
+ {
+ "epoch": 9.091575091575091,
+ "grad_norm": 6.945065975189209,
+ "learning_rate": 6.080586080586081e-06,
+ "loss": 0.0335,
+ "step": 2482
+ },
+ {
+ "epoch": 9.095238095238095,
+ "grad_norm": 13.789215087890625,
+ "learning_rate": 6.056166056166056e-06,
+ "loss": 0.0344,
+ "step": 2483
+ },
+ {
+ "epoch": 9.098901098901099,
+ "grad_norm": 2.178279161453247,
+ "learning_rate": 6.031746031746032e-06,
+ "loss": 0.0089,
+ "step": 2484
+ },
+ {
+ "epoch": 9.102564102564102,
+ "grad_norm": 61.52828598022461,
+ "learning_rate": 6.007326007326008e-06,
+ "loss": 0.6747,
+ "step": 2485
+ },
+ {
+ "epoch": 9.106227106227106,
+ "grad_norm": 2.6004297733306885,
+ "learning_rate": 5.982905982905983e-06,
+ "loss": 0.015,
+ "step": 2486
+ },
+ {
+ "epoch": 9.10989010989011,
+ "grad_norm": 6.188530921936035,
+ "learning_rate": 5.958485958485959e-06,
+ "loss": 0.019,
+ "step": 2487
+ },
+ {
+ "epoch": 9.113553113553113,
+ "grad_norm": 8.141875267028809,
+ "learning_rate": 5.934065934065934e-06,
+ "loss": 0.04,
+ "step": 2488
+ },
+ {
+ "epoch": 9.117216117216117,
+ "grad_norm": 7.30596923828125,
+ "learning_rate": 5.90964590964591e-06,
+ "loss": 0.0223,
+ "step": 2489
+ },
+ {
+ "epoch": 9.12087912087912,
+ "grad_norm": 0.5398825407028198,
+ "learning_rate": 5.885225885225886e-06,
+ "loss": 0.0022,
+ "step": 2490
+ },
+ {
+ "epoch": 9.124542124542124,
+ "grad_norm": 8.664217948913574,
+ "learning_rate": 5.860805860805861e-06,
+ "loss": 0.0277,
+ "step": 2491
+ },
+ {
+ "epoch": 9.128205128205128,
+ "grad_norm": 24.2191162109375,
+ "learning_rate": 5.836385836385837e-06,
+ "loss": 0.1518,
+ "step": 2492
+ },
+ {
+ "epoch": 9.131868131868131,
+ "grad_norm": 8.598712921142578,
+ "learning_rate": 5.811965811965812e-06,
+ "loss": 0.0265,
+ "step": 2493
+ },
+ {
+ "epoch": 9.135531135531135,
+ "grad_norm": 23.29640007019043,
+ "learning_rate": 5.787545787545788e-06,
+ "loss": 0.1272,
+ "step": 2494
+ },
+ {
+ "epoch": 9.139194139194139,
+ "grad_norm": 18.841108322143555,
+ "learning_rate": 5.7631257631257635e-06,
+ "loss": 0.0955,
+ "step": 2495
+ },
+ {
+ "epoch": 9.142857142857142,
+ "grad_norm": 54.528018951416016,
+ "learning_rate": 5.738705738705739e-06,
+ "loss": 0.1903,
+ "step": 2496
+ },
+ {
+ "epoch": 9.146520146520146,
+ "grad_norm": 23.48889923095703,
+ "learning_rate": 5.7142857142857145e-06,
+ "loss": 0.0938,
+ "step": 2497
+ },
+ {
+ "epoch": 9.15018315018315,
+ "grad_norm": 5.552438259124756,
+ "learning_rate": 5.68986568986569e-06,
+ "loss": 0.0231,
+ "step": 2498
+ },
+ {
+ "epoch": 9.153846153846153,
+ "grad_norm": 4.428290843963623,
+ "learning_rate": 5.6654456654456655e-06,
+ "loss": 0.01,
+ "step": 2499
+ },
+ {
+ "epoch": 9.157509157509157,
+ "grad_norm": 0.700716495513916,
+ "learning_rate": 5.641025641025641e-06,
+ "loss": 0.0033,
+ "step": 2500
+ },
+ {
+ "epoch": 9.16117216117216,
+ "grad_norm": 3.372562885284424,
+ "learning_rate": 5.6166056166056165e-06,
+ "loss": 0.0107,
+ "step": 2501
+ },
+ {
+ "epoch": 9.164835164835164,
+ "grad_norm": 7.765186786651611,
+ "learning_rate": 5.592185592185592e-06,
+ "loss": 0.0259,
+ "step": 2502
+ },
+ {
+ "epoch": 9.168498168498168,
+ "grad_norm": 10.317693710327148,
+ "learning_rate": 5.567765567765568e-06,
+ "loss": 0.0339,
+ "step": 2503
+ },
+ {
+ "epoch": 9.172161172161172,
+ "grad_norm": 23.525001525878906,
+ "learning_rate": 5.543345543345543e-06,
+ "loss": 0.1518,
+ "step": 2504
+ },
+ {
+ "epoch": 9.175824175824175,
+ "grad_norm": 22.77937889099121,
+ "learning_rate": 5.518925518925519e-06,
+ "loss": 0.0839,
+ "step": 2505
+ },
+ {
+ "epoch": 9.179487179487179,
+ "grad_norm": 11.377626419067383,
+ "learning_rate": 5.494505494505494e-06,
+ "loss": 0.0381,
+ "step": 2506
+ },
+ {
+ "epoch": 9.183150183150182,
+ "grad_norm": 62.58002853393555,
+ "learning_rate": 5.47008547008547e-06,
+ "loss": 0.6765,
+ "step": 2507
+ },
+ {
+ "epoch": 9.186813186813186,
+ "grad_norm": 0.11700604110956192,
+ "learning_rate": 5.445665445665446e-06,
+ "loss": 0.0005,
+ "step": 2508
+ },
+ {
+ "epoch": 9.19047619047619,
+ "grad_norm": 48.127662658691406,
+ "learning_rate": 5.421245421245421e-06,
+ "loss": 0.2048,
+ "step": 2509
+ },
+ {
+ "epoch": 9.194139194139193,
+ "grad_norm": 30.58054542541504,
+ "learning_rate": 5.396825396825396e-06,
+ "loss": 0.4033,
+ "step": 2510
+ },
+ {
+ "epoch": 9.197802197802197,
+ "grad_norm": 0.17868542671203613,
+ "learning_rate": 5.372405372405373e-06,
+ "loss": 0.0008,
+ "step": 2511
+ },
+ {
+ "epoch": 9.2014652014652,
+ "grad_norm": 5.950721263885498,
+ "learning_rate": 5.347985347985348e-06,
+ "loss": 0.0111,
+ "step": 2512
+ },
+ {
+ "epoch": 9.205128205128204,
+ "grad_norm": 48.85601806640625,
+ "learning_rate": 5.323565323565324e-06,
+ "loss": 0.1712,
+ "step": 2513
+ },
+ {
+ "epoch": 9.208791208791208,
+ "grad_norm": 29.88921356201172,
+ "learning_rate": 5.299145299145299e-06,
+ "loss": 0.1477,
+ "step": 2514
+ },
+ {
+ "epoch": 9.212454212454212,
+ "grad_norm": 1.6933594942092896,
+ "learning_rate": 5.274725274725275e-06,
+ "loss": 0.0075,
+ "step": 2515
+ },
+ {
+ "epoch": 9.216117216117215,
+ "grad_norm": 10.35281753540039,
+ "learning_rate": 5.250305250305251e-06,
+ "loss": 0.0389,
+ "step": 2516
+ },
+ {
+ "epoch": 9.219780219780219,
+ "grad_norm": 52.530269622802734,
+ "learning_rate": 5.225885225885226e-06,
+ "loss": 0.754,
+ "step": 2517
+ },
+ {
+ "epoch": 9.223443223443223,
+ "grad_norm": 32.8208122253418,
+ "learning_rate": 5.201465201465201e-06,
+ "loss": 0.3333,
+ "step": 2518
+ },
+ {
+ "epoch": 9.227106227106226,
+ "grad_norm": 1.3080699443817139,
+ "learning_rate": 5.177045177045177e-06,
+ "loss": 0.0049,
+ "step": 2519
+ },
+ {
+ "epoch": 9.23076923076923,
+ "grad_norm": 17.936071395874023,
+ "learning_rate": 5.152625152625153e-06,
+ "loss": 0.0636,
+ "step": 2520
+ },
+ {
+ "epoch": 9.234432234432234,
+ "grad_norm": 0.08917564153671265,
+ "learning_rate": 5.128205128205129e-06,
+ "loss": 0.0003,
+ "step": 2521
+ },
+ {
+ "epoch": 9.238095238095237,
+ "grad_norm": 42.1187629699707,
+ "learning_rate": 5.103785103785104e-06,
+ "loss": 0.9348,
+ "step": 2522
+ },
+ {
+ "epoch": 9.241758241758241,
+ "grad_norm": 0.9316588640213013,
+ "learning_rate": 5.079365079365079e-06,
+ "loss": 0.0028,
+ "step": 2523
+ },
+ {
+ "epoch": 9.245421245421245,
+ "grad_norm": 5.581846714019775,
+ "learning_rate": 5.054945054945056e-06,
+ "loss": 0.0237,
+ "step": 2524
+ },
+ {
+ "epoch": 9.249084249084248,
+ "grad_norm": 0.30131420493125916,
+ "learning_rate": 5.030525030525031e-06,
+ "loss": 0.0009,
+ "step": 2525
+ },
+ {
+ "epoch": 9.252747252747252,
+ "grad_norm": 32.7791862487793,
+ "learning_rate": 5.006105006105006e-06,
+ "loss": 0.1743,
+ "step": 2526
+ },
+ {
+ "epoch": 9.256410256410255,
+ "grad_norm": 4.903104782104492,
+ "learning_rate": 4.981684981684982e-06,
+ "loss": 0.0104,
+ "step": 2527
+ },
+ {
+ "epoch": 9.260073260073261,
+ "grad_norm": 19.023868560791016,
+ "learning_rate": 4.957264957264957e-06,
+ "loss": 0.0561,
+ "step": 2528
+ },
+ {
+ "epoch": 9.263736263736265,
+ "grad_norm": 3.8107199668884277,
+ "learning_rate": 4.9328449328449335e-06,
+ "loss": 0.0099,
+ "step": 2529
+ },
+ {
+ "epoch": 9.267399267399268,
+ "grad_norm": 18.885719299316406,
+ "learning_rate": 4.908424908424909e-06,
+ "loss": 0.0538,
+ "step": 2530
+ },
+ {
+ "epoch": 9.271062271062272,
+ "grad_norm": 5.979161739349365,
+ "learning_rate": 4.884004884004884e-06,
+ "loss": 0.0209,
+ "step": 2531
+ },
+ {
+ "epoch": 9.274725274725276,
+ "grad_norm": 4.547204494476318,
+ "learning_rate": 4.8595848595848596e-06,
+ "loss": 0.0156,
+ "step": 2532
+ },
+ {
+ "epoch": 9.27838827838828,
+ "grad_norm": 3.088197708129883,
+ "learning_rate": 4.8351648351648355e-06,
+ "loss": 0.0048,
+ "step": 2533
+ },
+ {
+ "epoch": 9.282051282051283,
+ "grad_norm": 8.822454452514648,
+ "learning_rate": 4.810744810744811e-06,
+ "loss": 0.0301,
+ "step": 2534
+ },
+ {
+ "epoch": 9.285714285714286,
+ "grad_norm": 0.07323496788740158,
+ "learning_rate": 4.7863247863247865e-06,
+ "loss": 0.0003,
+ "step": 2535
+ },
+ {
+ "epoch": 9.28937728937729,
+ "grad_norm": 41.7569694519043,
+ "learning_rate": 4.7619047619047615e-06,
+ "loss": 0.2709,
+ "step": 2536
+ },
+ {
+ "epoch": 9.293040293040294,
+ "grad_norm": 6.304474353790283,
+ "learning_rate": 4.737484737484738e-06,
+ "loss": 0.0311,
+ "step": 2537
+ },
+ {
+ "epoch": 9.296703296703297,
+ "grad_norm": 62.788997650146484,
+ "learning_rate": 4.713064713064713e-06,
+ "loss": 0.669,
+ "step": 2538
+ },
+ {
+ "epoch": 9.300366300366301,
+ "grad_norm": 26.603708267211914,
+ "learning_rate": 4.688644688644688e-06,
+ "loss": 0.4077,
+ "step": 2539
+ },
+ {
+ "epoch": 9.304029304029305,
+ "grad_norm": 1.8226062059402466,
+ "learning_rate": 4.664224664224664e-06,
+ "loss": 0.0083,
+ "step": 2540
+ },
+ {
+ "epoch": 9.307692307692308,
+ "grad_norm": 1.0840901136398315,
+ "learning_rate": 4.639804639804639e-06,
+ "loss": 0.0022,
+ "step": 2541
+ },
+ {
+ "epoch": 9.311355311355312,
+ "grad_norm": 0.7872227430343628,
+ "learning_rate": 4.615384615384616e-06,
+ "loss": 0.0037,
+ "step": 2542
+ },
+ {
+ "epoch": 9.315018315018316,
+ "grad_norm": 12.286794662475586,
+ "learning_rate": 4.590964590964591e-06,
+ "loss": 0.0395,
+ "step": 2543
+ },
+ {
+ "epoch": 9.31868131868132,
+ "grad_norm": 47.97056579589844,
+ "learning_rate": 4.566544566544566e-06,
+ "loss": 0.2777,
+ "step": 2544
+ },
+ {
+ "epoch": 9.322344322344323,
+ "grad_norm": 62.515010833740234,
+ "learning_rate": 4.542124542124542e-06,
+ "loss": 0.417,
+ "step": 2545
+ },
+ {
+ "epoch": 9.326007326007327,
+ "grad_norm": 9.020209312438965,
+ "learning_rate": 4.517704517704518e-06,
+ "loss": 0.0283,
+ "step": 2546
+ },
+ {
+ "epoch": 9.32967032967033,
+ "grad_norm": 8.966110229492188,
+ "learning_rate": 4.493284493284493e-06,
+ "loss": 0.0419,
+ "step": 2547
+ },
+ {
+ "epoch": 9.333333333333334,
+ "grad_norm": 2.7591960430145264,
+ "learning_rate": 4.468864468864469e-06,
+ "loss": 0.0101,
+ "step": 2548
+ },
+ {
+ "epoch": 9.336996336996338,
+ "grad_norm": 0.7941232323646545,
+ "learning_rate": 4.444444444444444e-06,
+ "loss": 0.0027,
+ "step": 2549
+ },
+ {
+ "epoch": 9.340659340659341,
+ "grad_norm": 3.809084177017212,
+ "learning_rate": 4.420024420024421e-06,
+ "loss": 0.0077,
+ "step": 2550
+ },
+ {
+ "epoch": 9.344322344322345,
+ "grad_norm": 15.51340389251709,
+ "learning_rate": 4.395604395604396e-06,
+ "loss": 0.1073,
+ "step": 2551
+ },
+ {
+ "epoch": 9.347985347985349,
+ "grad_norm": 0.07044784724712372,
+ "learning_rate": 4.371184371184371e-06,
+ "loss": 0.0003,
+ "step": 2552
+ },
+ {
+ "epoch": 9.351648351648352,
+ "grad_norm": 37.41777420043945,
+ "learning_rate": 4.346764346764347e-06,
+ "loss": 0.2386,
+ "step": 2553
+ },
+ {
+ "epoch": 9.355311355311356,
+ "grad_norm": 37.003570556640625,
+ "learning_rate": 4.322344322344322e-06,
+ "loss": 0.3759,
+ "step": 2554
+ },
+ {
+ "epoch": 9.35897435897436,
+ "grad_norm": 22.988313674926758,
+ "learning_rate": 4.297924297924298e-06,
+ "loss": 0.1126,
+ "step": 2555
+ },
+ {
+ "epoch": 9.362637362637363,
+ "grad_norm": 0.8595256805419922,
+ "learning_rate": 4.273504273504274e-06,
+ "loss": 0.0037,
+ "step": 2556
+ },
+ {
+ "epoch": 9.366300366300367,
+ "grad_norm": 5.932793140411377,
+ "learning_rate": 4.249084249084249e-06,
+ "loss": 0.0362,
+ "step": 2557
+ },
+ {
+ "epoch": 9.36996336996337,
+ "grad_norm": 3.5106499195098877,
+ "learning_rate": 4.224664224664225e-06,
+ "loss": 0.0115,
+ "step": 2558
+ },
+ {
+ "epoch": 9.373626373626374,
+ "grad_norm": 1.531368613243103,
+ "learning_rate": 4.200244200244201e-06,
+ "loss": 0.0078,
+ "step": 2559
+ },
+ {
+ "epoch": 9.377289377289378,
+ "grad_norm": 2.6112287044525146,
+ "learning_rate": 4.175824175824176e-06,
+ "loss": 0.0078,
+ "step": 2560
+ },
+ {
+ "epoch": 9.380952380952381,
+ "grad_norm": 14.049304008483887,
+ "learning_rate": 4.151404151404152e-06,
+ "loss": 0.0416,
+ "step": 2561
+ },
+ {
+ "epoch": 9.384615384615385,
+ "grad_norm": 40.526161193847656,
+ "learning_rate": 4.126984126984127e-06,
+ "loss": 0.199,
+ "step": 2562
+ },
+ {
+ "epoch": 9.388278388278389,
+ "grad_norm": 11.9779634475708,
+ "learning_rate": 4.102564102564103e-06,
+ "loss": 0.0334,
+ "step": 2563
+ },
+ {
+ "epoch": 9.391941391941392,
+ "grad_norm": 50.550819396972656,
+ "learning_rate": 4.078144078144079e-06,
+ "loss": 0.6283,
+ "step": 2564
+ },
+ {
+ "epoch": 9.395604395604396,
+ "grad_norm": 12.796805381774902,
+ "learning_rate": 4.053724053724054e-06,
+ "loss": 0.0478,
+ "step": 2565
+ },
+ {
+ "epoch": 9.3992673992674,
+ "grad_norm": 50.15115737915039,
+ "learning_rate": 4.0293040293040296e-06,
+ "loss": 0.5339,
+ "step": 2566
+ },
+ {
+ "epoch": 9.402930402930403,
+ "grad_norm": 4.983883857727051,
+ "learning_rate": 4.004884004884005e-06,
+ "loss": 0.0089,
+ "step": 2567
+ },
+ {
+ "epoch": 9.406593406593407,
+ "grad_norm": 6.172858238220215,
+ "learning_rate": 3.9804639804639805e-06,
+ "loss": 0.0202,
+ "step": 2568
+ },
+ {
+ "epoch": 9.41025641025641,
+ "grad_norm": 35.42179870605469,
+ "learning_rate": 3.9560439560439565e-06,
+ "loss": 0.268,
+ "step": 2569
+ },
+ {
+ "epoch": 9.413919413919414,
+ "grad_norm": 12.001964569091797,
+ "learning_rate": 3.9316239316239315e-06,
+ "loss": 0.0516,
+ "step": 2570
+ },
+ {
+ "epoch": 9.417582417582418,
+ "grad_norm": 40.64314651489258,
+ "learning_rate": 3.9072039072039074e-06,
+ "loss": 0.3076,
+ "step": 2571
+ },
+ {
+ "epoch": 9.421245421245422,
+ "grad_norm": 0.95157790184021,
+ "learning_rate": 3.882783882783883e-06,
+ "loss": 0.0039,
+ "step": 2572
+ },
+ {
+ "epoch": 9.424908424908425,
+ "grad_norm": 12.725324630737305,
+ "learning_rate": 3.858363858363858e-06,
+ "loss": 0.0529,
+ "step": 2573
+ },
+ {
+ "epoch": 9.428571428571429,
+ "grad_norm": 7.514665603637695,
+ "learning_rate": 3.833943833943834e-06,
+ "loss": 0.0308,
+ "step": 2574
+ },
+ {
+ "epoch": 9.432234432234432,
+ "grad_norm": 2.3982863426208496,
+ "learning_rate": 3.8095238095238094e-06,
+ "loss": 0.0058,
+ "step": 2575
+ },
+ {
+ "epoch": 9.435897435897436,
+ "grad_norm": 20.93942642211914,
+ "learning_rate": 3.785103785103785e-06,
+ "loss": 0.1107,
+ "step": 2576
+ },
+ {
+ "epoch": 9.43956043956044,
+ "grad_norm": 58.94899368286133,
+ "learning_rate": 3.7606837606837612e-06,
+ "loss": 0.152,
+ "step": 2577
+ },
+ {
+ "epoch": 9.443223443223443,
+ "grad_norm": 0.7720767855644226,
+ "learning_rate": 3.7362637362637363e-06,
+ "loss": 0.0029,
+ "step": 2578
+ },
+ {
+ "epoch": 9.446886446886447,
+ "grad_norm": 19.965377807617188,
+ "learning_rate": 3.7118437118437118e-06,
+ "loss": 0.0761,
+ "step": 2579
+ },
+ {
+ "epoch": 9.45054945054945,
+ "grad_norm": 4.148466110229492,
+ "learning_rate": 3.6874236874236877e-06,
+ "loss": 0.0216,
+ "step": 2580
+ },
+ {
+ "epoch": 9.454212454212454,
+ "grad_norm": 52.55584716796875,
+ "learning_rate": 3.663003663003663e-06,
+ "loss": 0.2195,
+ "step": 2581
+ },
+ {
+ "epoch": 9.457875457875458,
+ "grad_norm": 0.05151841789484024,
+ "learning_rate": 3.6385836385836387e-06,
+ "loss": 0.0002,
+ "step": 2582
+ },
+ {
+ "epoch": 9.461538461538462,
+ "grad_norm": 3.748384714126587,
+ "learning_rate": 3.614163614163614e-06,
+ "loss": 0.0152,
+ "step": 2583
+ },
+ {
+ "epoch": 9.465201465201465,
+ "grad_norm": 27.1336669921875,
+ "learning_rate": 3.58974358974359e-06,
+ "loss": 0.1158,
+ "step": 2584
+ },
+ {
+ "epoch": 9.468864468864469,
+ "grad_norm": 19.104896545410156,
+ "learning_rate": 3.5653235653235655e-06,
+ "loss": 0.0845,
+ "step": 2585
+ },
+ {
+ "epoch": 9.472527472527473,
+ "grad_norm": 3.337369203567505,
+ "learning_rate": 3.540903540903541e-06,
+ "loss": 0.0137,
+ "step": 2586
+ },
+ {
+ "epoch": 9.476190476190476,
+ "grad_norm": 56.447086334228516,
+ "learning_rate": 3.5164835164835165e-06,
+ "loss": 0.6262,
+ "step": 2587
+ },
+ {
+ "epoch": 9.47985347985348,
+ "grad_norm": 8.920942306518555,
+ "learning_rate": 3.492063492063492e-06,
+ "loss": 0.0346,
+ "step": 2588
+ },
+ {
+ "epoch": 9.483516483516484,
+ "grad_norm": 31.762073516845703,
+ "learning_rate": 3.467643467643468e-06,
+ "loss": 0.1586,
+ "step": 2589
+ },
+ {
+ "epoch": 9.487179487179487,
+ "grad_norm": 0.17851080000400543,
+ "learning_rate": 3.443223443223443e-06,
+ "loss": 0.0008,
+ "step": 2590
+ },
+ {
+ "epoch": 9.49084249084249,
+ "grad_norm": 45.99188995361328,
+ "learning_rate": 3.418803418803419e-06,
+ "loss": 0.2865,
+ "step": 2591
+ },
+ {
+ "epoch": 9.494505494505495,
+ "grad_norm": 14.874516487121582,
+ "learning_rate": 3.3943833943833944e-06,
+ "loss": 0.0655,
+ "step": 2592
+ },
+ {
+ "epoch": 9.498168498168498,
+ "grad_norm": 61.91463088989258,
+ "learning_rate": 3.3699633699633703e-06,
+ "loss": 0.2476,
+ "step": 2593
+ },
+ {
+ "epoch": 9.501831501831502,
+ "grad_norm": 7.143148422241211,
+ "learning_rate": 3.3455433455433454e-06,
+ "loss": 0.0287,
+ "step": 2594
+ },
+ {
+ "epoch": 9.505494505494505,
+ "grad_norm": 19.79998016357422,
+ "learning_rate": 3.3211233211233213e-06,
+ "loss": 0.0467,
+ "step": 2595
+ },
+ {
+ "epoch": 9.50915750915751,
+ "grad_norm": 8.070464134216309,
+ "learning_rate": 3.2967032967032968e-06,
+ "loss": 0.0225,
+ "step": 2596
+ },
+ {
+ "epoch": 9.512820512820513,
+ "grad_norm": 1.80027174949646,
+ "learning_rate": 3.2722832722832727e-06,
+ "loss": 0.0082,
+ "step": 2597
+ },
+ {
+ "epoch": 9.516483516483516,
+ "grad_norm": 33.25511932373047,
+ "learning_rate": 3.2478632478632477e-06,
+ "loss": 0.2057,
+ "step": 2598
+ },
+ {
+ "epoch": 9.52014652014652,
+ "grad_norm": 18.330320358276367,
+ "learning_rate": 3.2234432234432237e-06,
+ "loss": 0.1071,
+ "step": 2599
+ },
+ {
+ "epoch": 9.523809523809524,
+ "grad_norm": 16.037355422973633,
+ "learning_rate": 3.199023199023199e-06,
+ "loss": 0.0652,
+ "step": 2600
+ },
+ {
+ "epoch": 9.527472527472527,
+ "grad_norm": 3.849982500076294,
+ "learning_rate": 3.1746031746031746e-06,
+ "loss": 0.015,
+ "step": 2601
+ },
+ {
+ "epoch": 9.531135531135531,
+ "grad_norm": 18.308284759521484,
+ "learning_rate": 3.15018315018315e-06,
+ "loss": 0.0645,
+ "step": 2602
+ },
+ {
+ "epoch": 9.534798534798535,
+ "grad_norm": 0.3144608736038208,
+ "learning_rate": 3.1257631257631256e-06,
+ "loss": 0.0016,
+ "step": 2603
+ },
+ {
+ "epoch": 9.538461538461538,
+ "grad_norm": 29.68560791015625,
+ "learning_rate": 3.1013431013431015e-06,
+ "loss": 0.1218,
+ "step": 2604
+ },
+ {
+ "epoch": 9.542124542124542,
+ "grad_norm": 3.620643138885498,
+ "learning_rate": 3.076923076923077e-06,
+ "loss": 0.0168,
+ "step": 2605
+ },
+ {
+ "epoch": 9.545787545787546,
+ "grad_norm": 4.140717029571533,
+ "learning_rate": 3.0525030525030525e-06,
+ "loss": 0.014,
+ "step": 2606
+ },
+ {
+ "epoch": 9.54945054945055,
+ "grad_norm": 2.3041791915893555,
+ "learning_rate": 3.028083028083028e-06,
+ "loss": 0.0091,
+ "step": 2607
+ },
+ {
+ "epoch": 9.553113553113553,
+ "grad_norm": 28.394710540771484,
+ "learning_rate": 3.003663003663004e-06,
+ "loss": 0.1327,
+ "step": 2608
+ },
+ {
+ "epoch": 9.556776556776557,
+ "grad_norm": 2.544114828109741,
+ "learning_rate": 2.9792429792429794e-06,
+ "loss": 0.0137,
+ "step": 2609
+ },
+ {
+ "epoch": 9.56043956043956,
+ "grad_norm": 23.328161239624023,
+ "learning_rate": 2.954822954822955e-06,
+ "loss": 0.1358,
+ "step": 2610
+ },
+ {
+ "epoch": 9.564102564102564,
+ "grad_norm": 1.2700049877166748,
+ "learning_rate": 2.9304029304029304e-06,
+ "loss": 0.0052,
+ "step": 2611
+ },
+ {
+ "epoch": 9.567765567765568,
+ "grad_norm": 39.445613861083984,
+ "learning_rate": 2.905982905982906e-06,
+ "loss": 0.3688,
+ "step": 2612
+ },
+ {
+ "epoch": 9.571428571428571,
+ "grad_norm": 2.7372241020202637,
+ "learning_rate": 2.8815628815628818e-06,
+ "loss": 0.0106,
+ "step": 2613
+ },
+ {
+ "epoch": 9.575091575091575,
+ "grad_norm": 9.760733604431152,
+ "learning_rate": 2.8571428571428573e-06,
+ "loss": 0.0346,
+ "step": 2614
+ },
+ {
+ "epoch": 9.578754578754578,
+ "grad_norm": 24.24974822998047,
+ "learning_rate": 2.8327228327228327e-06,
+ "loss": 0.1375,
+ "step": 2615
+ },
+ {
+ "epoch": 9.582417582417582,
+ "grad_norm": 28.413419723510742,
+ "learning_rate": 2.8083028083028082e-06,
+ "loss": 0.1033,
+ "step": 2616
+ },
+ {
+ "epoch": 9.586080586080586,
+ "grad_norm": 23.164695739746094,
+ "learning_rate": 2.783882783882784e-06,
+ "loss": 0.1317,
+ "step": 2617
+ },
+ {
+ "epoch": 9.58974358974359,
+ "grad_norm": 24.03489112854004,
+ "learning_rate": 2.7594627594627596e-06,
+ "loss": 0.0697,
+ "step": 2618
+ },
+ {
+ "epoch": 9.593406593406593,
+ "grad_norm": 38.185237884521484,
+ "learning_rate": 2.735042735042735e-06,
+ "loss": 0.2214,
+ "step": 2619
+ },
+ {
+ "epoch": 9.597069597069597,
+ "grad_norm": 6.269185543060303,
+ "learning_rate": 2.7106227106227106e-06,
+ "loss": 0.0425,
+ "step": 2620
+ },
+ {
+ "epoch": 9.6007326007326,
+ "grad_norm": 49.141212463378906,
+ "learning_rate": 2.6862026862026865e-06,
+ "loss": 0.8936,
+ "step": 2621
+ },
+ {
+ "epoch": 9.604395604395604,
+ "grad_norm": 15.392929077148438,
+ "learning_rate": 2.661782661782662e-06,
+ "loss": 0.0873,
+ "step": 2622
+ },
+ {
+ "epoch": 9.608058608058608,
+ "grad_norm": 0.15730391442775726,
+ "learning_rate": 2.6373626373626375e-06,
+ "loss": 0.0005,
+ "step": 2623
+ },
+ {
+ "epoch": 9.611721611721611,
+ "grad_norm": 0.5650401711463928,
+ "learning_rate": 2.612942612942613e-06,
+ "loss": 0.0029,
+ "step": 2624
+ },
+ {
+ "epoch": 9.615384615384615,
+ "grad_norm": 39.7634162902832,
+ "learning_rate": 2.5885225885225885e-06,
+ "loss": 0.9392,
+ "step": 2625
+ },
+ {
+ "epoch": 9.619047619047619,
+ "grad_norm": 2.412442207336426,
+ "learning_rate": 2.5641025641025644e-06,
+ "loss": 0.0112,
+ "step": 2626
+ },
+ {
+ "epoch": 9.622710622710622,
+ "grad_norm": 1.3341130018234253,
+ "learning_rate": 2.5396825396825395e-06,
+ "loss": 0.0052,
+ "step": 2627
+ },
+ {
+ "epoch": 9.626373626373626,
+ "grad_norm": 2.404193639755249,
+ "learning_rate": 2.5152625152625154e-06,
+ "loss": 0.0141,
+ "step": 2628
+ },
+ {
+ "epoch": 9.63003663003663,
+ "grad_norm": 31.471454620361328,
+ "learning_rate": 2.490842490842491e-06,
+ "loss": 0.163,
+ "step": 2629
+ },
+ {
+ "epoch": 9.633699633699633,
+ "grad_norm": 1.0943841934204102,
+ "learning_rate": 2.4664224664224668e-06,
+ "loss": 0.0045,
+ "step": 2630
+ },
+ {
+ "epoch": 9.637362637362637,
+ "grad_norm": 1.806687355041504,
+ "learning_rate": 2.442002442002442e-06,
+ "loss": 0.0045,
+ "step": 2631
+ },
+ {
+ "epoch": 9.64102564102564,
+ "grad_norm": 27.9985294342041,
+ "learning_rate": 2.4175824175824177e-06,
+ "loss": 0.528,
+ "step": 2632
+ },
+ {
+ "epoch": 9.644688644688644,
+ "grad_norm": 15.279302597045898,
+ "learning_rate": 2.3931623931623932e-06,
+ "loss": 0.0732,
+ "step": 2633
+ },
+ {
+ "epoch": 9.648351648351648,
+ "grad_norm": 7.075009822845459,
+ "learning_rate": 2.368742368742369e-06,
+ "loss": 0.0267,
+ "step": 2634
+ },
+ {
+ "epoch": 9.652014652014651,
+ "grad_norm": 10.250161170959473,
+ "learning_rate": 2.344322344322344e-06,
+ "loss": 0.0546,
+ "step": 2635
+ },
+ {
+ "epoch": 9.655677655677655,
+ "grad_norm": 3.6995301246643066,
+ "learning_rate": 2.3199023199023197e-06,
+ "loss": 0.0181,
+ "step": 2636
+ },
+ {
+ "epoch": 9.659340659340659,
+ "grad_norm": 0.07929769903421402,
+ "learning_rate": 2.2954822954822956e-06,
+ "loss": 0.0003,
+ "step": 2637
+ },
+ {
+ "epoch": 9.663003663003662,
+ "grad_norm": 2.5525121688842773,
+ "learning_rate": 2.271062271062271e-06,
+ "loss": 0.0153,
+ "step": 2638
+ },
+ {
+ "epoch": 9.666666666666666,
+ "grad_norm": 5.717019557952881,
+ "learning_rate": 2.2466422466422466e-06,
+ "loss": 0.0337,
+ "step": 2639
+ },
+ {
+ "epoch": 9.67032967032967,
+ "grad_norm": 38.69899368286133,
+ "learning_rate": 2.222222222222222e-06,
+ "loss": 0.4106,
+ "step": 2640
+ },
+ {
+ "epoch": 9.673992673992673,
+ "grad_norm": 44.96628189086914,
+ "learning_rate": 2.197802197802198e-06,
+ "loss": 0.25,
+ "step": 2641
+ },
+ {
+ "epoch": 9.677655677655677,
+ "grad_norm": 0.5954161882400513,
+ "learning_rate": 2.1733821733821735e-06,
+ "loss": 0.0015,
+ "step": 2642
+ },
+ {
+ "epoch": 9.68131868131868,
+ "grad_norm": 0.4261475205421448,
+ "learning_rate": 2.148962148962149e-06,
+ "loss": 0.0016,
+ "step": 2643
+ },
+ {
+ "epoch": 9.684981684981684,
+ "grad_norm": 14.589736938476562,
+ "learning_rate": 2.1245421245421245e-06,
+ "loss": 0.0879,
+ "step": 2644
+ },
+ {
+ "epoch": 9.688644688644688,
+ "grad_norm": 32.692874908447266,
+ "learning_rate": 2.1001221001221004e-06,
+ "loss": 0.1608,
+ "step": 2645
+ },
+ {
+ "epoch": 9.692307692307692,
+ "grad_norm": 18.50358009338379,
+ "learning_rate": 2.075702075702076e-06,
+ "loss": 0.0805,
+ "step": 2646
+ },
+ {
+ "epoch": 9.695970695970695,
+ "grad_norm": 3.718752145767212,
+ "learning_rate": 2.0512820512820513e-06,
+ "loss": 0.014,
+ "step": 2647
+ },
+ {
+ "epoch": 9.699633699633699,
+ "grad_norm": 15.605087280273438,
+ "learning_rate": 2.026862026862027e-06,
+ "loss": 0.0602,
+ "step": 2648
+ },
+ {
+ "epoch": 9.703296703296703,
+ "grad_norm": 3.935739040374756,
+ "learning_rate": 2.0024420024420023e-06,
+ "loss": 0.012,
+ "step": 2649
+ },
+ {
+ "epoch": 9.706959706959706,
+ "grad_norm": 33.90616226196289,
+ "learning_rate": 1.9780219780219782e-06,
+ "loss": 0.2279,
+ "step": 2650
+ },
+ {
+ "epoch": 9.71062271062271,
+ "grad_norm": 13.841948509216309,
+ "learning_rate": 1.9536019536019537e-06,
+ "loss": 0.0349,
+ "step": 2651
+ },
+ {
+ "epoch": 9.714285714285714,
+ "grad_norm": 13.099137306213379,
+ "learning_rate": 1.929181929181929e-06,
+ "loss": 0.0598,
+ "step": 2652
+ },
+ {
+ "epoch": 9.717948717948717,
+ "grad_norm": 26.72678565979004,
+ "learning_rate": 1.9047619047619047e-06,
+ "loss": 0.094,
+ "step": 2653
+ },
+ {
+ "epoch": 9.72161172161172,
+ "grad_norm": 0.567373514175415,
+ "learning_rate": 1.8803418803418806e-06,
+ "loss": 0.0022,
+ "step": 2654
+ },
+ {
+ "epoch": 9.725274725274724,
+ "grad_norm": 16.621074676513672,
+ "learning_rate": 1.8559218559218559e-06,
+ "loss": 0.0439,
+ "step": 2655
+ },
+ {
+ "epoch": 9.728937728937728,
+ "grad_norm": 3.6618309020996094,
+ "learning_rate": 1.8315018315018316e-06,
+ "loss": 0.0147,
+ "step": 2656
+ },
+ {
+ "epoch": 9.732600732600732,
+ "grad_norm": 2.858623504638672,
+ "learning_rate": 1.807081807081807e-06,
+ "loss": 0.0073,
+ "step": 2657
+ },
+ {
+ "epoch": 9.736263736263737,
+ "grad_norm": 9.052343368530273,
+ "learning_rate": 1.7826617826617828e-06,
+ "loss": 0.0292,
+ "step": 2658
+ },
+ {
+ "epoch": 9.73992673992674,
+ "grad_norm": 28.919038772583008,
+ "learning_rate": 1.7582417582417583e-06,
+ "loss": 0.1734,
+ "step": 2659
+ },
+ {
+ "epoch": 9.743589743589745,
+ "grad_norm": 0.4150852560997009,
+ "learning_rate": 1.733821733821734e-06,
+ "loss": 0.0014,
+ "step": 2660
+ },
+ {
+ "epoch": 9.747252747252748,
+ "grad_norm": 1.8463596105575562,
+ "learning_rate": 1.7094017094017095e-06,
+ "loss": 0.0082,
+ "step": 2661
+ },
+ {
+ "epoch": 9.750915750915752,
+ "grad_norm": 1.7587751150131226,
+ "learning_rate": 1.6849816849816852e-06,
+ "loss": 0.0059,
+ "step": 2662
+ },
+ {
+ "epoch": 9.754578754578755,
+ "grad_norm": 22.40215301513672,
+ "learning_rate": 1.6605616605616606e-06,
+ "loss": 0.253,
+ "step": 2663
+ },
+ {
+ "epoch": 9.758241758241759,
+ "grad_norm": 30.525720596313477,
+ "learning_rate": 1.6361416361416363e-06,
+ "loss": 0.1751,
+ "step": 2664
+ },
+ {
+ "epoch": 9.761904761904763,
+ "grad_norm": 17.739097595214844,
+ "learning_rate": 1.6117216117216118e-06,
+ "loss": 0.0545,
+ "step": 2665
+ },
+ {
+ "epoch": 9.765567765567766,
+ "grad_norm": 9.031868934631348,
+ "learning_rate": 1.5873015873015873e-06,
+ "loss": 0.0289,
+ "step": 2666
+ },
+ {
+ "epoch": 9.76923076923077,
+ "grad_norm": 1.6399365663528442,
+ "learning_rate": 1.5628815628815628e-06,
+ "loss": 0.0066,
+ "step": 2667
+ },
+ {
+ "epoch": 9.772893772893774,
+ "grad_norm": 6.3039703369140625,
+ "learning_rate": 1.5384615384615385e-06,
+ "loss": 0.0284,
+ "step": 2668
+ },
+ {
+ "epoch": 9.776556776556777,
+ "grad_norm": 16.56195831298828,
+ "learning_rate": 1.514041514041514e-06,
+ "loss": 0.0506,
+ "step": 2669
+ },
+ {
+ "epoch": 9.780219780219781,
+ "grad_norm": 50.48940658569336,
+ "learning_rate": 1.4896214896214897e-06,
+ "loss": 0.7348,
+ "step": 2670
+ },
+ {
+ "epoch": 9.783882783882785,
+ "grad_norm": 56.05281066894531,
+ "learning_rate": 1.4652014652014652e-06,
+ "loss": 0.2116,
+ "step": 2671
+ },
+ {
+ "epoch": 9.787545787545788,
+ "grad_norm": 13.636507987976074,
+ "learning_rate": 1.4407814407814409e-06,
+ "loss": 0.0464,
+ "step": 2672
+ },
+ {
+ "epoch": 9.791208791208792,
+ "grad_norm": 24.965688705444336,
+ "learning_rate": 1.4163614163614164e-06,
+ "loss": 0.1001,
+ "step": 2673
+ },
+ {
+ "epoch": 9.794871794871796,
+ "grad_norm": 2.3229403495788574,
+ "learning_rate": 1.391941391941392e-06,
+ "loss": 0.0095,
+ "step": 2674
+ },
+ {
+ "epoch": 9.7985347985348,
+ "grad_norm": 43.538116455078125,
+ "learning_rate": 1.3675213675213676e-06,
+ "loss": 0.0976,
+ "step": 2675
+ },
+ {
+ "epoch": 9.802197802197803,
+ "grad_norm": 12.537564277648926,
+ "learning_rate": 1.3431013431013433e-06,
+ "loss": 0.0648,
+ "step": 2676
+ },
+ {
+ "epoch": 9.805860805860807,
+ "grad_norm": 1.1308547258377075,
+ "learning_rate": 1.3186813186813187e-06,
+ "loss": 0.0041,
+ "step": 2677
+ },
+ {
+ "epoch": 9.80952380952381,
+ "grad_norm": 0.39147478342056274,
+ "learning_rate": 1.2942612942612942e-06,
+ "loss": 0.0012,
+ "step": 2678
+ },
+ {
+ "epoch": 9.813186813186814,
+ "grad_norm": 0.8738119602203369,
+ "learning_rate": 1.2698412698412697e-06,
+ "loss": 0.0031,
+ "step": 2679
+ },
+ {
+ "epoch": 9.816849816849818,
+ "grad_norm": 76.26824951171875,
+ "learning_rate": 1.2454212454212454e-06,
+ "loss": 0.5945,
+ "step": 2680
+ },
+ {
+ "epoch": 9.820512820512821,
+ "grad_norm": 7.169777870178223,
+ "learning_rate": 1.221001221001221e-06,
+ "loss": 0.0352,
+ "step": 2681
+ },
+ {
+ "epoch": 9.824175824175825,
+ "grad_norm": 23.889244079589844,
+ "learning_rate": 1.1965811965811966e-06,
+ "loss": 0.075,
+ "step": 2682
+ },
+ {
+ "epoch": 9.827838827838828,
+ "grad_norm": 34.23906707763672,
+ "learning_rate": 1.172161172161172e-06,
+ "loss": 0.1658,
+ "step": 2683
+ },
+ {
+ "epoch": 9.831501831501832,
+ "grad_norm": 11.047301292419434,
+ "learning_rate": 1.1477411477411478e-06,
+ "loss": 0.027,
+ "step": 2684
+ },
+ {
+ "epoch": 9.835164835164836,
+ "grad_norm": 32.673614501953125,
+ "learning_rate": 1.1233211233211233e-06,
+ "loss": 0.136,
+ "step": 2685
+ },
+ {
+ "epoch": 9.83882783882784,
+ "grad_norm": 2.088254690170288,
+ "learning_rate": 1.098901098901099e-06,
+ "loss": 0.005,
+ "step": 2686
+ },
+ {
+ "epoch": 9.842490842490843,
+ "grad_norm": 22.962482452392578,
+ "learning_rate": 1.0744810744810745e-06,
+ "loss": 0.0928,
+ "step": 2687
+ },
+ {
+ "epoch": 9.846153846153847,
+ "grad_norm": 0.890580952167511,
+ "learning_rate": 1.0500610500610502e-06,
+ "loss": 0.0049,
+ "step": 2688
+ },
+ {
+ "epoch": 9.84981684981685,
+ "grad_norm": 54.203060150146484,
+ "learning_rate": 1.0256410256410257e-06,
+ "loss": 0.5689,
+ "step": 2689
+ },
+ {
+ "epoch": 9.853479853479854,
+ "grad_norm": 3.851505994796753,
+ "learning_rate": 1.0012210012210012e-06,
+ "loss": 0.0151,
+ "step": 2690
+ },
+ {
+ "epoch": 9.857142857142858,
+ "grad_norm": 0.47577881813049316,
+ "learning_rate": 9.768009768009769e-07,
+ "loss": 0.0019,
+ "step": 2691
+ },
+ {
+ "epoch": 9.860805860805861,
+ "grad_norm": 22.646617889404297,
+ "learning_rate": 9.523809523809523e-07,
+ "loss": 0.0957,
+ "step": 2692
+ },
+ {
+ "epoch": 9.864468864468865,
+ "grad_norm": 3.797091245651245,
+ "learning_rate": 9.279609279609279e-07,
+ "loss": 0.019,
+ "step": 2693
+ },
+ {
+ "epoch": 9.868131868131869,
+ "grad_norm": 14.5469970703125,
+ "learning_rate": 9.035409035409035e-07,
+ "loss": 0.0558,
+ "step": 2694
+ },
+ {
+ "epoch": 9.871794871794872,
+ "grad_norm": 0.9983586668968201,
+ "learning_rate": 8.791208791208791e-07,
+ "loss": 0.0031,
+ "step": 2695
+ },
+ {
+ "epoch": 9.875457875457876,
+ "grad_norm": 24.32516098022461,
+ "learning_rate": 8.547008547008547e-07,
+ "loss": 0.0742,
+ "step": 2696
+ },
+ {
+ "epoch": 9.87912087912088,
+ "grad_norm": 30.38161277770996,
+ "learning_rate": 8.302808302808303e-07,
+ "loss": 0.1136,
+ "step": 2697
+ },
+ {
+ "epoch": 9.882783882783883,
+ "grad_norm": 11.00711441040039,
+ "learning_rate": 8.058608058608059e-07,
+ "loss": 0.0493,
+ "step": 2698
+ },
+ {
+ "epoch": 9.886446886446887,
+ "grad_norm": 64.27474975585938,
+ "learning_rate": 7.814407814407814e-07,
+ "loss": 1.0419,
+ "step": 2699
+ },
+ {
+ "epoch": 9.89010989010989,
+ "grad_norm": 1.8479113578796387,
+ "learning_rate": 7.57020757020757e-07,
+ "loss": 0.0072,
+ "step": 2700
+ },
+ {
+ "epoch": 9.893772893772894,
+ "grad_norm": 37.80733871459961,
+ "learning_rate": 7.326007326007326e-07,
+ "loss": 0.2632,
+ "step": 2701
+ },
+ {
+ "epoch": 9.897435897435898,
+ "grad_norm": 1.2681913375854492,
+ "learning_rate": 7.081807081807082e-07,
+ "loss": 0.0061,
+ "step": 2702
+ },
+ {
+ "epoch": 9.901098901098901,
+ "grad_norm": 25.79681968688965,
+ "learning_rate": 6.837606837606838e-07,
+ "loss": 0.16,
+ "step": 2703
+ },
+ {
+ "epoch": 9.904761904761905,
+ "grad_norm": 21.561281204223633,
+ "learning_rate": 6.593406593406594e-07,
+ "loss": 0.0859,
+ "step": 2704
+ },
+ {
+ "epoch": 9.908424908424909,
+ "grad_norm": 9.584563255310059,
+ "learning_rate": 6.349206349206349e-07,
+ "loss": 0.0586,
+ "step": 2705
+ },
+ {
+ "epoch": 9.912087912087912,
+ "grad_norm": 0.8793169260025024,
+ "learning_rate": 6.105006105006105e-07,
+ "loss": 0.0038,
+ "step": 2706
+ },
+ {
+ "epoch": 9.915750915750916,
+ "grad_norm": 1.0741767883300781,
+ "learning_rate": 5.86080586080586e-07,
+ "loss": 0.0034,
+ "step": 2707
+ },
+ {
+ "epoch": 9.91941391941392,
+ "grad_norm": 4.130448341369629,
+ "learning_rate": 5.616605616605616e-07,
+ "loss": 0.0169,
+ "step": 2708
+ },
+ {
+ "epoch": 9.923076923076923,
+ "grad_norm": 14.90553092956543,
+ "learning_rate": 5.372405372405372e-07,
+ "loss": 0.075,
+ "step": 2709
+ },
+ {
+ "epoch": 9.926739926739927,
+ "grad_norm": 3.487302303314209,
+ "learning_rate": 5.128205128205128e-07,
+ "loss": 0.0118,
+ "step": 2710
+ },
+ {
+ "epoch": 9.93040293040293,
+ "grad_norm": 0.1407042294740677,
+ "learning_rate": 4.884004884004884e-07,
+ "loss": 0.0007,
+ "step": 2711
+ },
+ {
+ "epoch": 9.934065934065934,
+ "grad_norm": 0.7696022987365723,
+ "learning_rate": 4.6398046398046397e-07,
+ "loss": 0.0027,
+ "step": 2712
+ },
+ {
+ "epoch": 9.937728937728938,
+ "grad_norm": 1.4584400653839111,
+ "learning_rate": 4.3956043956043957e-07,
+ "loss": 0.0063,
+ "step": 2713
+ },
+ {
+ "epoch": 9.941391941391942,
+ "grad_norm": 2.271784782409668,
+ "learning_rate": 4.1514041514041516e-07,
+ "loss": 0.0091,
+ "step": 2714
+ },
+ {
+ "epoch": 9.945054945054945,
+ "grad_norm": 1.5582953691482544,
+ "learning_rate": 3.907203907203907e-07,
+ "loss": 0.0049,
+ "step": 2715
+ },
+ {
+ "epoch": 9.948717948717949,
+ "grad_norm": 26.0163631439209,
+ "learning_rate": 3.663003663003663e-07,
+ "loss": 0.0794,
+ "step": 2716
+ },
+ {
+ "epoch": 9.952380952380953,
+ "grad_norm": 0.42155659198760986,
+ "learning_rate": 3.418803418803419e-07,
+ "loss": 0.0015,
+ "step": 2717
+ },
+ {
+ "epoch": 9.956043956043956,
+ "grad_norm": 30.18914031982422,
+ "learning_rate": 3.1746031746031743e-07,
+ "loss": 0.3479,
+ "step": 2718
+ },
+ {
+ "epoch": 9.95970695970696,
+ "grad_norm": 15.960806846618652,
+ "learning_rate": 2.93040293040293e-07,
+ "loss": 0.0544,
+ "step": 2719
+ },
+ {
+ "epoch": 9.963369963369964,
+ "grad_norm": 7.016077518463135,
+ "learning_rate": 2.686202686202686e-07,
+ "loss": 0.0192,
+ "step": 2720
+ },
+ {
+ "epoch": 9.967032967032967,
+ "grad_norm": 28.954301834106445,
+ "learning_rate": 2.442002442002442e-07,
+ "loss": 0.0955,
+ "step": 2721
+ },
+ {
+ "epoch": 9.97069597069597,
+ "grad_norm": 0.9817501902580261,
+ "learning_rate": 2.1978021978021978e-07,
+ "loss": 0.0044,
+ "step": 2722
+ },
+ {
+ "epoch": 9.974358974358974,
+ "grad_norm": 18.653146743774414,
+ "learning_rate": 1.9536019536019535e-07,
+ "loss": 0.0724,
+ "step": 2723
+ },
+ {
+ "epoch": 9.978021978021978,
+ "grad_norm": 21.797466278076172,
+ "learning_rate": 1.7094017094017095e-07,
+ "loss": 0.2351,
+ "step": 2724
+ },
+ {
+ "epoch": 9.981684981684982,
+ "grad_norm": 29.776206970214844,
+ "learning_rate": 1.465201465201465e-07,
+ "loss": 0.107,
+ "step": 2725
+ },
+ {
+ "epoch": 9.985347985347985,
+ "grad_norm": 1.1500120162963867,
+ "learning_rate": 1.221001221001221e-07,
+ "loss": 0.0034,
+ "step": 2726
+ },
+ {
+ "epoch": 9.989010989010989,
+ "grad_norm": 0.2573654353618622,
+ "learning_rate": 9.768009768009768e-08,
+ "loss": 0.0008,
+ "step": 2727
+ },
+ {
+ "epoch": 9.992673992673993,
+ "grad_norm": 10.110895156860352,
+ "learning_rate": 7.326007326007326e-08,
+ "loss": 0.0412,
+ "step": 2728
+ },
+ {
+ "epoch": 9.996336996336996,
+ "grad_norm": 17.363428115844727,
+ "learning_rate": 4.884004884004884e-08,
+ "loss": 0.0552,
+ "step": 2729
+ },
+ {
+ "epoch": 10.0,
+ "grad_norm": 0.05963617190718651,
+ "learning_rate": 2.442002442002442e-08,
+ "loss": 0.0002,
+ "step": 2730
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 2730,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 10,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": true
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 0.0,
+ "train_batch_size": 4,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-2730/training_args.bin b/checkpoint-2730/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..efd73451f8808ee6551f09598ece18ffd5afe9a8
--- /dev/null
+++ b/checkpoint-2730/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9433d412d81580f751a4a8cdb904f13acd11bf72c98d8dd9b40ffc47b121468f
+size 7249
diff --git a/checkpoint-2730/zero_to_fp32.py b/checkpoint-2730/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04
--- /dev/null
+++ b/checkpoint-2730/zero_to_fp32.py
@@ -0,0 +1,760 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example:
+# python zero_to_fp32.py . output_dir/
+# or
+# python zero_to_fp32.py . output_dir/ --safe_serialization
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+import gc
+import json
+import numpy as np
+from tqdm import tqdm
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device, weights_only=False)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+ total_files = len(files)
+ state_dicts = []
+ for f in tqdm(files, desc='Loading checkpoint shards'):
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+class GatheredTensor:
+ """
+ A pseudo tensor that collects partitioned weights.
+ It is more memory efficient when there are multiple groups.
+ """
+
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
+ self.flat_groups = flat_groups
+ self.flat_groups_offset = flat_groups_offset
+ self.offset = offset
+ self.partitioned_numel = partitioned_numel
+ self.shape = shape
+ self.dtype = self.flat_groups[0][0].dtype
+
+ def contiguous(self):
+ """
+ Merge partitioned weights from flat_groups into a single tensor.
+ """
+ end_idx = self.offset + self.partitioned_numel
+ world_size = len(self.flat_groups)
+ pad_flat_param_chunks = []
+
+ for rank_i in range(world_size):
+ # for each rank, we need to collect weights from related group/groups
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
+ start_group_id = None
+ end_group_id = None
+ for group_id in range(len(self.flat_groups_offset)):
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
+ start_group_id = group_id
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
+ end_group_id = group_id
+ break
+ # collect weights from related group/groups
+ for group_id in range(start_group_id, end_group_id + 1):
+ flat_tensor = flat_groups_at_rank_i[group_id]
+ start_offset = self.offset - self.flat_groups_offset[group_id]
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
+
+ # collect weights from all ranks
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
+ return param
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
+
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # memory efficient tensor
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
+ state_dict[name] = tensor
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def to_torch_tensor(state_dict, return_empty_tensor=False):
+ """
+ Convert state_dict of GatheredTensor to torch tensor
+ """
+ torch_state_dict = {}
+ converted_tensors = {}
+ for name, tensor in state_dict.items():
+ tensor_id = id(tensor)
+ if tensor_id in converted_tensors: # shared tensors
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
+ torch_state_dict[name] = shared_tensor
+ else:
+ converted_tensors[tensor_id] = name
+ if return_empty_tensor:
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
+ else:
+ torch_state_dict[name] = tensor.contiguous()
+ return torch_state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag=None,
+ exclude_frozen_parameters=False,
+ lazy_mode=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint. Or you can load state_dict in lazy mode ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
+ for name, lazy_tensor in state_dict.item():
+ tensor = lazy_tensor.contiguous() # to cpu
+ print(name, tensor)
+ # del tensor to release memory if it no longer in use
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+ if lazy_mode:
+ return state_dict
+ else:
+ return to_torch_tensor(state_dict)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
+ output_dir,
+ max_shard_size="5GB",
+ safe_serialization=False,
+ tag=None,
+ exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ # Dependency pre-check
+ if safe_serialization:
+ try:
+ from safetensors.torch import save_file
+ except ImportError:
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
+ raise
+ if max_shard_size is not None:
+ try:
+ from huggingface_hub import split_torch_state_dict_into_shards
+ except ImportError:
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
+ raise
+
+ # Convert zero checkpoint to state_dict
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag,
+ exclude_frozen_parameters,
+ lazy_mode=True)
+
+ # Shard the model if it is too big.
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
+ if max_shard_size is not None:
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
+ # an memory-efficient approach for sharding
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
+ filename_pattern=filename_pattern,
+ max_shard_size=max_shard_size)
+ else:
+ from collections import namedtuple
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
+ state_dict_split = StateDictSplit(is_sharded=False,
+ filename_to_tensors={weights_name: list(state_dict.keys())})
+
+ # Save the model by shard
+ os.makedirs(output_dir, exist_ok=True)
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
+ shard_state_dict = to_torch_tensor(shard_state_dict)
+ output_path = os.path.join(output_dir, shard_file)
+ if safe_serialization:
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
+ else:
+ torch.save(shard_state_dict, output_path)
+ # release the memory of current shard
+ for tensor_name in list(shard_state_dict.keys()):
+ del state_dict[tensor_name]
+ del shard_state_dict[tensor_name]
+ del shard_state_dict
+ gc.collect()
+
+ # Save index if sharded
+ if state_dict_split.is_sharded:
+ index = {
+ "metadata": state_dict_split.metadata,
+ "weight_map": state_dict_split.tensor_to_filename,
+ }
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
+ save_index_file = os.path.join(output_dir, save_index_file)
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument("output_dir",
+ type=str,
+ help="directory to the pytorch fp32 state_dict output files"
+ "(e.g. path/checkpoint-12-output/)")
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="5GB",
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
+ "without CPU OOM issues.")
+ parser.add_argument(
+ "--safe_serialization",
+ default=False,
+ action='store_true',
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_dir,
+ max_shard_size=args.max_shard_size,
+ safe_serialization=args.safe_serialization,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/checkpoint-500/config.json b/checkpoint-500/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..40aa0a10ec7958e160bf07f2feca405387c8b288
--- /dev/null
+++ b/checkpoint-500/config.json
@@ -0,0 +1,33 @@
+{
+ "architectures": [
+ "XLMRobertaForSequenceClassification"
+ ],
+ "attention_probs_dropout_prob": 0.1,
+ "bos_token_id": 0,
+ "classifier_dropout": null,
+ "eos_token_id": 2,
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 1024,
+ "id2label": {
+ "0": "LABEL_0"
+ },
+ "initializer_range": 0.02,
+ "intermediate_size": 4096,
+ "label2id": {
+ "LABEL_0": 0
+ },
+ "layer_norm_eps": 1e-05,
+ "max_position_embeddings": 8194,
+ "model_type": "xlm-roberta",
+ "num_attention_heads": 16,
+ "num_hidden_layers": 24,
+ "output_past": true,
+ "pad_token_id": 1,
+ "position_embedding_type": "absolute",
+ "torch_dtype": "float32",
+ "transformers_version": "4.54.0",
+ "type_vocab_size": 1,
+ "use_cache": true,
+ "vocab_size": 250002
+}
diff --git a/checkpoint-500/global_step500/mp_rank_00_model_states.pt b/checkpoint-500/global_step500/mp_rank_00_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ac290a84cee7e64333e1961cbcc18532e66824ac
--- /dev/null
+++ b/checkpoint-500/global_step500/mp_rank_00_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6e553e256c1875ffa6548d0c27fdf2ae8b09e7632744bb5aa9f1da02759499e0
+size 2271151845
diff --git a/checkpoint-500/global_step500/zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-500/global_step500/zero_pp_rank_0_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2ce5d4634259d83573ee2db7b9644e4dd3f4b0e0
--- /dev/null
+++ b/checkpoint-500/global_step500/zero_pp_rank_0_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8a245e42a6f66938db13d0ffc509ba86736f4534760d9ffc1c0a5e490d56899
+size 3406552447
diff --git a/checkpoint-500/global_step500/zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-500/global_step500/zero_pp_rank_1_mp_rank_00_optim_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a2a7966d93a9a47c5fe3f27bd9729d6715051a59
--- /dev/null
+++ b/checkpoint-500/global_step500/zero_pp_rank_1_mp_rank_00_optim_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b2f6957e3ae3c9246c4ced69f349986b516d7d477a51bf7b42c3744c9ece982f
+size 3406564543
diff --git a/checkpoint-500/latest b/checkpoint-500/latest
new file mode 100644
index 0000000000000000000000000000000000000000..f0b47ce15fff9a01b2a416a473b2148085048a50
--- /dev/null
+++ b/checkpoint-500/latest
@@ -0,0 +1 @@
+global_step500
\ No newline at end of file
diff --git a/checkpoint-500/model.safetensors b/checkpoint-500/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..20b9b45e6321db4a94e880fd1edf9a552b282098
--- /dev/null
+++ b/checkpoint-500/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d74a031fb90325a076810af6cdb0410465222c0de27f9ca63c0731ca126688c
+size 2271071852
diff --git a/checkpoint-500/rng_state_0.pth b/checkpoint-500/rng_state_0.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ddb4ef33819ce24b35f7389820fc1b7abee59dcc
--- /dev/null
+++ b/checkpoint-500/rng_state_0.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:de1b49824e85c223f0ddb468dd85e8d77bede38208a5b2049e602e74916ee32b
+size 14917
diff --git a/checkpoint-500/rng_state_1.pth b/checkpoint-500/rng_state_1.pth
new file mode 100644
index 0000000000000000000000000000000000000000..46aa1b17bbf3b71f61578c9832db5c108001614e
--- /dev/null
+++ b/checkpoint-500/rng_state_1.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:93e8f5f2fcc28f0366878cf74da1c692266afeb8da10922ff53cc2d2d9eed639
+size 14917
diff --git a/checkpoint-500/scheduler.pt b/checkpoint-500/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b397b1f487f0a918cdf88126a37695323ba27eca
--- /dev/null
+++ b/checkpoint-500/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c8688dba30012c47dd21b80aa3e88c6f3a3c1e199dc0c1c6320ec8d918aafc38
+size 1465
diff --git a/checkpoint-500/sentencepiece.bpe.model b/checkpoint-500/sentencepiece.bpe.model
new file mode 100644
index 0000000000000000000000000000000000000000..7a3f40a75f870bc1f21700cd414dc2acc431583c
--- /dev/null
+++ b/checkpoint-500/sentencepiece.bpe.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
+size 5069051
diff --git a/checkpoint-500/special_tokens_map.json b/checkpoint-500/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1879d702821e753ffe4245048eee415d54a9385
--- /dev/null
+++ b/checkpoint-500/special_tokens_map.json
@@ -0,0 +1,51 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/checkpoint-500/tokenizer.json b/checkpoint-500/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..322d084f75a19f4fec0fc0b5f351be9a3dfefa3e
--- /dev/null
+++ b/checkpoint-500/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50ec628ce274af8429e5aa0c573e737ef2db1c2acd3b2dd51362a33c3a534f99
+size 17082999
diff --git a/checkpoint-500/tokenizer_config.json b/checkpoint-500/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..95bd7c849ee6a47d5c92805af18d187239c1ba4a
--- /dev/null
+++ b/checkpoint-500/tokenizer_config.json
@@ -0,0 +1,56 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "250001": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": true,
+ "cls_token": "",
+ "eos_token": "",
+ "extra_special_tokens": {},
+ "mask_token": "",
+ "model_max_length": 8192,
+ "pad_token": "",
+ "sep_token": "",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "XLMRobertaTokenizer",
+ "unk_token": ""
+}
diff --git a/checkpoint-500/trainer_state.json b/checkpoint-500/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..9f8d14cb1d5f4d4e3cf6c2a014f2f179ceff15b7
--- /dev/null
+++ b/checkpoint-500/trainer_state.json
@@ -0,0 +1,3534 @@
+{
+ "best_global_step": null,
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.8315018315018317,
+ "eval_steps": 500,
+ "global_step": 500,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.003663003663003663,
+ "grad_norm": 33.24192428588867,
+ "learning_rate": 0.0,
+ "loss": 0.9555,
+ "step": 1
+ },
+ {
+ "epoch": 0.007326007326007326,
+ "grad_norm": 23.005327224731445,
+ "learning_rate": 2.1978021978021978e-07,
+ "loss": 0.7557,
+ "step": 2
+ },
+ {
+ "epoch": 0.01098901098901099,
+ "grad_norm": 12.516372680664062,
+ "learning_rate": 4.3956043956043957e-07,
+ "loss": 0.2322,
+ "step": 3
+ },
+ {
+ "epoch": 0.014652014652014652,
+ "grad_norm": 22.350322723388672,
+ "learning_rate": 6.593406593406594e-07,
+ "loss": 0.5263,
+ "step": 4
+ },
+ {
+ "epoch": 0.018315018315018316,
+ "grad_norm": 37.14425277709961,
+ "learning_rate": 8.791208791208791e-07,
+ "loss": 0.547,
+ "step": 5
+ },
+ {
+ "epoch": 0.02197802197802198,
+ "grad_norm": 27.73367691040039,
+ "learning_rate": 1.098901098901099e-06,
+ "loss": 0.5922,
+ "step": 6
+ },
+ {
+ "epoch": 0.02564102564102564,
+ "grad_norm": 28.463964462280273,
+ "learning_rate": 1.3186813186813187e-06,
+ "loss": 1.0195,
+ "step": 7
+ },
+ {
+ "epoch": 0.029304029304029304,
+ "grad_norm": 12.688858032226562,
+ "learning_rate": 1.5384615384615385e-06,
+ "loss": 0.1519,
+ "step": 8
+ },
+ {
+ "epoch": 0.03296703296703297,
+ "grad_norm": 24.222930908203125,
+ "learning_rate": 1.7582417582417583e-06,
+ "loss": 0.8008,
+ "step": 9
+ },
+ {
+ "epoch": 0.03663003663003663,
+ "grad_norm": 22.45709800720215,
+ "learning_rate": 1.9780219780219782e-06,
+ "loss": 1.1024,
+ "step": 10
+ },
+ {
+ "epoch": 0.040293040293040296,
+ "grad_norm": 23.01483917236328,
+ "learning_rate": 2.197802197802198e-06,
+ "loss": 0.3072,
+ "step": 11
+ },
+ {
+ "epoch": 0.04395604395604396,
+ "grad_norm": 24.276216506958008,
+ "learning_rate": 2.4175824175824177e-06,
+ "loss": 0.8937,
+ "step": 12
+ },
+ {
+ "epoch": 0.047619047619047616,
+ "grad_norm": 24.501638412475586,
+ "learning_rate": 2.6373626373626375e-06,
+ "loss": 0.3748,
+ "step": 13
+ },
+ {
+ "epoch": 0.05128205128205128,
+ "grad_norm": 11.965837478637695,
+ "learning_rate": 2.8571428571428573e-06,
+ "loss": 0.2221,
+ "step": 14
+ },
+ {
+ "epoch": 0.054945054945054944,
+ "grad_norm": 8.884313583374023,
+ "learning_rate": 3.076923076923077e-06,
+ "loss": 0.1682,
+ "step": 15
+ },
+ {
+ "epoch": 0.05860805860805861,
+ "grad_norm": 13.486218452453613,
+ "learning_rate": 3.2967032967032968e-06,
+ "loss": 0.3324,
+ "step": 16
+ },
+ {
+ "epoch": 0.06227106227106227,
+ "grad_norm": 29.47451400756836,
+ "learning_rate": 3.5164835164835165e-06,
+ "loss": 0.9247,
+ "step": 17
+ },
+ {
+ "epoch": 0.06593406593406594,
+ "grad_norm": 38.8739128112793,
+ "learning_rate": 3.7362637362637363e-06,
+ "loss": 1.3591,
+ "step": 18
+ },
+ {
+ "epoch": 0.0695970695970696,
+ "grad_norm": 24.181066513061523,
+ "learning_rate": 3.9560439560439565e-06,
+ "loss": 0.4257,
+ "step": 19
+ },
+ {
+ "epoch": 0.07326007326007326,
+ "grad_norm": 18.25806427001953,
+ "learning_rate": 4.175824175824176e-06,
+ "loss": 0.3534,
+ "step": 20
+ },
+ {
+ "epoch": 0.07692307692307693,
+ "grad_norm": 4.121458053588867,
+ "learning_rate": 4.395604395604396e-06,
+ "loss": 0.0459,
+ "step": 21
+ },
+ {
+ "epoch": 0.08058608058608059,
+ "grad_norm": 17.89643096923828,
+ "learning_rate": 4.615384615384616e-06,
+ "loss": 0.3707,
+ "step": 22
+ },
+ {
+ "epoch": 0.08424908424908426,
+ "grad_norm": 43.25539016723633,
+ "learning_rate": 4.8351648351648355e-06,
+ "loss": 1.139,
+ "step": 23
+ },
+ {
+ "epoch": 0.08791208791208792,
+ "grad_norm": 19.56612205505371,
+ "learning_rate": 5.054945054945056e-06,
+ "loss": 0.3819,
+ "step": 24
+ },
+ {
+ "epoch": 0.09157509157509157,
+ "grad_norm": 18.20578956604004,
+ "learning_rate": 5.274725274725275e-06,
+ "loss": 0.516,
+ "step": 25
+ },
+ {
+ "epoch": 0.09523809523809523,
+ "grad_norm": 23.16927146911621,
+ "learning_rate": 5.494505494505494e-06,
+ "loss": 0.7161,
+ "step": 26
+ },
+ {
+ "epoch": 0.0989010989010989,
+ "grad_norm": 10.449734687805176,
+ "learning_rate": 5.7142857142857145e-06,
+ "loss": 0.3049,
+ "step": 27
+ },
+ {
+ "epoch": 0.10256410256410256,
+ "grad_norm": 33.13974380493164,
+ "learning_rate": 5.934065934065934e-06,
+ "loss": 1.0178,
+ "step": 28
+ },
+ {
+ "epoch": 0.10622710622710622,
+ "grad_norm": 34.373470306396484,
+ "learning_rate": 6.153846153846154e-06,
+ "loss": 1.0162,
+ "step": 29
+ },
+ {
+ "epoch": 0.10989010989010989,
+ "grad_norm": 22.710988998413086,
+ "learning_rate": 6.373626373626373e-06,
+ "loss": 0.5866,
+ "step": 30
+ },
+ {
+ "epoch": 0.11355311355311355,
+ "grad_norm": 23.314502716064453,
+ "learning_rate": 6.5934065934065935e-06,
+ "loss": 0.6159,
+ "step": 31
+ },
+ {
+ "epoch": 0.11721611721611722,
+ "grad_norm": 23.481319427490234,
+ "learning_rate": 6.813186813186814e-06,
+ "loss": 0.5441,
+ "step": 32
+ },
+ {
+ "epoch": 0.12087912087912088,
+ "grad_norm": 35.16271209716797,
+ "learning_rate": 7.032967032967033e-06,
+ "loss": 0.9091,
+ "step": 33
+ },
+ {
+ "epoch": 0.12454212454212454,
+ "grad_norm": 32.2298698425293,
+ "learning_rate": 7.252747252747253e-06,
+ "loss": 0.5156,
+ "step": 34
+ },
+ {
+ "epoch": 0.1282051282051282,
+ "grad_norm": 36.708953857421875,
+ "learning_rate": 7.4725274725274726e-06,
+ "loss": 1.5839,
+ "step": 35
+ },
+ {
+ "epoch": 0.13186813186813187,
+ "grad_norm": 34.64887619018555,
+ "learning_rate": 7.692307692307692e-06,
+ "loss": 1.2861,
+ "step": 36
+ },
+ {
+ "epoch": 0.13553113553113552,
+ "grad_norm": 20.94220733642578,
+ "learning_rate": 7.912087912087913e-06,
+ "loss": 0.5027,
+ "step": 37
+ },
+ {
+ "epoch": 0.1391941391941392,
+ "grad_norm": 30.93832015991211,
+ "learning_rate": 8.131868131868132e-06,
+ "loss": 0.3584,
+ "step": 38
+ },
+ {
+ "epoch": 0.14285714285714285,
+ "grad_norm": 19.195362091064453,
+ "learning_rate": 8.351648351648352e-06,
+ "loss": 0.6912,
+ "step": 39
+ },
+ {
+ "epoch": 0.14652014652014653,
+ "grad_norm": 21.054162979125977,
+ "learning_rate": 8.571428571428571e-06,
+ "loss": 0.8027,
+ "step": 40
+ },
+ {
+ "epoch": 0.15018315018315018,
+ "grad_norm": 16.64535903930664,
+ "learning_rate": 8.791208791208792e-06,
+ "loss": 0.3004,
+ "step": 41
+ },
+ {
+ "epoch": 0.15384615384615385,
+ "grad_norm": 12.1064453125,
+ "learning_rate": 9.010989010989011e-06,
+ "loss": 0.2158,
+ "step": 42
+ },
+ {
+ "epoch": 0.1575091575091575,
+ "grad_norm": 16.20220947265625,
+ "learning_rate": 9.230769230769232e-06,
+ "loss": 0.4137,
+ "step": 43
+ },
+ {
+ "epoch": 0.16117216117216118,
+ "grad_norm": 25.698654174804688,
+ "learning_rate": 9.45054945054945e-06,
+ "loss": 0.7716,
+ "step": 44
+ },
+ {
+ "epoch": 0.16483516483516483,
+ "grad_norm": 7.480422019958496,
+ "learning_rate": 9.670329670329671e-06,
+ "loss": 0.1046,
+ "step": 45
+ },
+ {
+ "epoch": 0.1684981684981685,
+ "grad_norm": 38.25539016723633,
+ "learning_rate": 9.89010989010989e-06,
+ "loss": 1.3913,
+ "step": 46
+ },
+ {
+ "epoch": 0.17216117216117216,
+ "grad_norm": 24.113954544067383,
+ "learning_rate": 1.0109890109890111e-05,
+ "loss": 0.4632,
+ "step": 47
+ },
+ {
+ "epoch": 0.17582417582417584,
+ "grad_norm": 22.136140823364258,
+ "learning_rate": 1.032967032967033e-05,
+ "loss": 0.6634,
+ "step": 48
+ },
+ {
+ "epoch": 0.1794871794871795,
+ "grad_norm": 19.417444229125977,
+ "learning_rate": 1.054945054945055e-05,
+ "loss": 0.3991,
+ "step": 49
+ },
+ {
+ "epoch": 0.18315018315018314,
+ "grad_norm": 13.265430450439453,
+ "learning_rate": 1.076923076923077e-05,
+ "loss": 0.2613,
+ "step": 50
+ },
+ {
+ "epoch": 0.18681318681318682,
+ "grad_norm": 25.118703842163086,
+ "learning_rate": 1.0989010989010989e-05,
+ "loss": 0.9231,
+ "step": 51
+ },
+ {
+ "epoch": 0.19047619047619047,
+ "grad_norm": 34.06997299194336,
+ "learning_rate": 1.120879120879121e-05,
+ "loss": 1.5809,
+ "step": 52
+ },
+ {
+ "epoch": 0.19413919413919414,
+ "grad_norm": 40.32486343383789,
+ "learning_rate": 1.1428571428571429e-05,
+ "loss": 1.4601,
+ "step": 53
+ },
+ {
+ "epoch": 0.1978021978021978,
+ "grad_norm": 18.847017288208008,
+ "learning_rate": 1.1648351648351648e-05,
+ "loss": 0.2345,
+ "step": 54
+ },
+ {
+ "epoch": 0.20146520146520147,
+ "grad_norm": 37.98270034790039,
+ "learning_rate": 1.1868131868131868e-05,
+ "loss": 0.9792,
+ "step": 55
+ },
+ {
+ "epoch": 0.20512820512820512,
+ "grad_norm": 35.72782897949219,
+ "learning_rate": 1.2087912087912089e-05,
+ "loss": 1.1561,
+ "step": 56
+ },
+ {
+ "epoch": 0.2087912087912088,
+ "grad_norm": 18.577186584472656,
+ "learning_rate": 1.2307692307692308e-05,
+ "loss": 0.5577,
+ "step": 57
+ },
+ {
+ "epoch": 0.21245421245421245,
+ "grad_norm": 23.086456298828125,
+ "learning_rate": 1.2527472527472529e-05,
+ "loss": 0.5807,
+ "step": 58
+ },
+ {
+ "epoch": 0.21611721611721613,
+ "grad_norm": 20.053525924682617,
+ "learning_rate": 1.2747252747252747e-05,
+ "loss": 0.7024,
+ "step": 59
+ },
+ {
+ "epoch": 0.21978021978021978,
+ "grad_norm": 22.25934410095215,
+ "learning_rate": 1.2967032967032968e-05,
+ "loss": 1.1033,
+ "step": 60
+ },
+ {
+ "epoch": 0.22344322344322345,
+ "grad_norm": 17.981454849243164,
+ "learning_rate": 1.3186813186813187e-05,
+ "loss": 0.2774,
+ "step": 61
+ },
+ {
+ "epoch": 0.2271062271062271,
+ "grad_norm": 11.286524772644043,
+ "learning_rate": 1.3406593406593408e-05,
+ "loss": 0.1802,
+ "step": 62
+ },
+ {
+ "epoch": 0.23076923076923078,
+ "grad_norm": 25.822996139526367,
+ "learning_rate": 1.3626373626373627e-05,
+ "loss": 0.651,
+ "step": 63
+ },
+ {
+ "epoch": 0.23443223443223443,
+ "grad_norm": 16.457286834716797,
+ "learning_rate": 1.3846153846153847e-05,
+ "loss": 0.2946,
+ "step": 64
+ },
+ {
+ "epoch": 0.23809523809523808,
+ "grad_norm": 26.712799072265625,
+ "learning_rate": 1.4065934065934066e-05,
+ "loss": 0.7763,
+ "step": 65
+ },
+ {
+ "epoch": 0.24175824175824176,
+ "grad_norm": 21.4671630859375,
+ "learning_rate": 1.4285714285714285e-05,
+ "loss": 0.4132,
+ "step": 66
+ },
+ {
+ "epoch": 0.2454212454212454,
+ "grad_norm": 21.834922790527344,
+ "learning_rate": 1.4505494505494506e-05,
+ "loss": 0.6544,
+ "step": 67
+ },
+ {
+ "epoch": 0.2490842490842491,
+ "grad_norm": 15.396453857421875,
+ "learning_rate": 1.4725274725274726e-05,
+ "loss": 0.2426,
+ "step": 68
+ },
+ {
+ "epoch": 0.25274725274725274,
+ "grad_norm": 8.851480484008789,
+ "learning_rate": 1.4945054945054945e-05,
+ "loss": 0.125,
+ "step": 69
+ },
+ {
+ "epoch": 0.2564102564102564,
+ "grad_norm": 22.21581268310547,
+ "learning_rate": 1.5164835164835164e-05,
+ "loss": 0.2585,
+ "step": 70
+ },
+ {
+ "epoch": 0.2600732600732601,
+ "grad_norm": 23.589736938476562,
+ "learning_rate": 1.5384615384615384e-05,
+ "loss": 0.386,
+ "step": 71
+ },
+ {
+ "epoch": 0.26373626373626374,
+ "grad_norm": 51.82280731201172,
+ "learning_rate": 1.5604395604395605e-05,
+ "loss": 1.1802,
+ "step": 72
+ },
+ {
+ "epoch": 0.2673992673992674,
+ "grad_norm": 36.43033981323242,
+ "learning_rate": 1.5824175824175826e-05,
+ "loss": 0.5574,
+ "step": 73
+ },
+ {
+ "epoch": 0.27106227106227104,
+ "grad_norm": 46.151885986328125,
+ "learning_rate": 1.6043956043956043e-05,
+ "loss": 0.9113,
+ "step": 74
+ },
+ {
+ "epoch": 0.27472527472527475,
+ "grad_norm": 34.090213775634766,
+ "learning_rate": 1.6263736263736265e-05,
+ "loss": 1.2161,
+ "step": 75
+ },
+ {
+ "epoch": 0.2783882783882784,
+ "grad_norm": 15.469125747680664,
+ "learning_rate": 1.6483516483516486e-05,
+ "loss": 0.1833,
+ "step": 76
+ },
+ {
+ "epoch": 0.28205128205128205,
+ "grad_norm": 26.77261734008789,
+ "learning_rate": 1.6703296703296703e-05,
+ "loss": 0.4095,
+ "step": 77
+ },
+ {
+ "epoch": 0.2857142857142857,
+ "grad_norm": 8.46114444732666,
+ "learning_rate": 1.6923076923076924e-05,
+ "loss": 0.0724,
+ "step": 78
+ },
+ {
+ "epoch": 0.2893772893772894,
+ "grad_norm": 7.954617500305176,
+ "learning_rate": 1.7142857142857142e-05,
+ "loss": 0.057,
+ "step": 79
+ },
+ {
+ "epoch": 0.29304029304029305,
+ "grad_norm": 32.47618103027344,
+ "learning_rate": 1.7362637362637366e-05,
+ "loss": 0.8099,
+ "step": 80
+ },
+ {
+ "epoch": 0.2967032967032967,
+ "grad_norm": 34.506927490234375,
+ "learning_rate": 1.7582417582417584e-05,
+ "loss": 0.5867,
+ "step": 81
+ },
+ {
+ "epoch": 0.30036630036630035,
+ "grad_norm": 18.276355743408203,
+ "learning_rate": 1.78021978021978e-05,
+ "loss": 0.4387,
+ "step": 82
+ },
+ {
+ "epoch": 0.304029304029304,
+ "grad_norm": 35.61729431152344,
+ "learning_rate": 1.8021978021978023e-05,
+ "loss": 0.9711,
+ "step": 83
+ },
+ {
+ "epoch": 0.3076923076923077,
+ "grad_norm": 14.001388549804688,
+ "learning_rate": 1.824175824175824e-05,
+ "loss": 0.1431,
+ "step": 84
+ },
+ {
+ "epoch": 0.31135531135531136,
+ "grad_norm": 27.521188735961914,
+ "learning_rate": 1.8461538461538465e-05,
+ "loss": 0.3686,
+ "step": 85
+ },
+ {
+ "epoch": 0.315018315018315,
+ "grad_norm": 38.0133171081543,
+ "learning_rate": 1.8681318681318682e-05,
+ "loss": 1.3866,
+ "step": 86
+ },
+ {
+ "epoch": 0.31868131868131866,
+ "grad_norm": 30.895553588867188,
+ "learning_rate": 1.89010989010989e-05,
+ "loss": 0.6676,
+ "step": 87
+ },
+ {
+ "epoch": 0.32234432234432236,
+ "grad_norm": 26.165082931518555,
+ "learning_rate": 1.912087912087912e-05,
+ "loss": 0.4763,
+ "step": 88
+ },
+ {
+ "epoch": 0.326007326007326,
+ "grad_norm": 25.6451473236084,
+ "learning_rate": 1.9340659340659342e-05,
+ "loss": 0.6921,
+ "step": 89
+ },
+ {
+ "epoch": 0.32967032967032966,
+ "grad_norm": 31.52683448791504,
+ "learning_rate": 1.9560439560439563e-05,
+ "loss": 0.8449,
+ "step": 90
+ },
+ {
+ "epoch": 0.3333333333333333,
+ "grad_norm": 27.559072494506836,
+ "learning_rate": 1.978021978021978e-05,
+ "loss": 0.9726,
+ "step": 91
+ },
+ {
+ "epoch": 0.336996336996337,
+ "grad_norm": 38.23103713989258,
+ "learning_rate": 1.9999999999999998e-05,
+ "loss": 0.2568,
+ "step": 92
+ },
+ {
+ "epoch": 0.34065934065934067,
+ "grad_norm": 28.575313568115234,
+ "learning_rate": 2.0219780219780223e-05,
+ "loss": 0.7039,
+ "step": 93
+ },
+ {
+ "epoch": 0.3443223443223443,
+ "grad_norm": 31.54847526550293,
+ "learning_rate": 2.043956043956044e-05,
+ "loss": 0.835,
+ "step": 94
+ },
+ {
+ "epoch": 0.34798534798534797,
+ "grad_norm": 34.27505111694336,
+ "learning_rate": 2.065934065934066e-05,
+ "loss": 1.0304,
+ "step": 95
+ },
+ {
+ "epoch": 0.3516483516483517,
+ "grad_norm": 23.972553253173828,
+ "learning_rate": 2.087912087912088e-05,
+ "loss": 0.775,
+ "step": 96
+ },
+ {
+ "epoch": 0.3553113553113553,
+ "grad_norm": 18.46526527404785,
+ "learning_rate": 2.10989010989011e-05,
+ "loss": 0.2856,
+ "step": 97
+ },
+ {
+ "epoch": 0.358974358974359,
+ "grad_norm": 22.087251663208008,
+ "learning_rate": 2.131868131868132e-05,
+ "loss": 0.6849,
+ "step": 98
+ },
+ {
+ "epoch": 0.3626373626373626,
+ "grad_norm": 13.144533157348633,
+ "learning_rate": 2.153846153846154e-05,
+ "loss": 0.2766,
+ "step": 99
+ },
+ {
+ "epoch": 0.3663003663003663,
+ "grad_norm": 14.740280151367188,
+ "learning_rate": 2.175824175824176e-05,
+ "loss": 0.27,
+ "step": 100
+ },
+ {
+ "epoch": 0.36996336996337,
+ "grad_norm": 17.15272331237793,
+ "learning_rate": 2.1978021978021977e-05,
+ "loss": 0.446,
+ "step": 101
+ },
+ {
+ "epoch": 0.37362637362637363,
+ "grad_norm": 45.865509033203125,
+ "learning_rate": 2.21978021978022e-05,
+ "loss": 2.4265,
+ "step": 102
+ },
+ {
+ "epoch": 0.3772893772893773,
+ "grad_norm": 22.298274993896484,
+ "learning_rate": 2.241758241758242e-05,
+ "loss": 1.5021,
+ "step": 103
+ },
+ {
+ "epoch": 0.38095238095238093,
+ "grad_norm": 20.314172744750977,
+ "learning_rate": 2.2637362637362637e-05,
+ "loss": 0.508,
+ "step": 104
+ },
+ {
+ "epoch": 0.38461538461538464,
+ "grad_norm": 11.217910766601562,
+ "learning_rate": 2.2857142857142858e-05,
+ "loss": 0.2282,
+ "step": 105
+ },
+ {
+ "epoch": 0.3882783882783883,
+ "grad_norm": 21.36184310913086,
+ "learning_rate": 2.307692307692308e-05,
+ "loss": 0.4684,
+ "step": 106
+ },
+ {
+ "epoch": 0.39194139194139194,
+ "grad_norm": 12.759861946105957,
+ "learning_rate": 2.3296703296703297e-05,
+ "loss": 0.3076,
+ "step": 107
+ },
+ {
+ "epoch": 0.3956043956043956,
+ "grad_norm": 24.42287254333496,
+ "learning_rate": 2.3516483516483518e-05,
+ "loss": 1.3607,
+ "step": 108
+ },
+ {
+ "epoch": 0.3992673992673993,
+ "grad_norm": 13.014902114868164,
+ "learning_rate": 2.3736263736263735e-05,
+ "loss": 0.4984,
+ "step": 109
+ },
+ {
+ "epoch": 0.40293040293040294,
+ "grad_norm": 12.8681640625,
+ "learning_rate": 2.395604395604396e-05,
+ "loss": 0.4529,
+ "step": 110
+ },
+ {
+ "epoch": 0.4065934065934066,
+ "grad_norm": 21.19939422607422,
+ "learning_rate": 2.4175824175824177e-05,
+ "loss": 1.0197,
+ "step": 111
+ },
+ {
+ "epoch": 0.41025641025641024,
+ "grad_norm": 20.60430145263672,
+ "learning_rate": 2.4395604395604395e-05,
+ "loss": 0.5367,
+ "step": 112
+ },
+ {
+ "epoch": 0.4139194139194139,
+ "grad_norm": 34.49782943725586,
+ "learning_rate": 2.4615384615384616e-05,
+ "loss": 1.9045,
+ "step": 113
+ },
+ {
+ "epoch": 0.4175824175824176,
+ "grad_norm": 28.380966186523438,
+ "learning_rate": 2.4835164835164834e-05,
+ "loss": 0.9019,
+ "step": 114
+ },
+ {
+ "epoch": 0.42124542124542125,
+ "grad_norm": 18.234045028686523,
+ "learning_rate": 2.5054945054945058e-05,
+ "loss": 0.5529,
+ "step": 115
+ },
+ {
+ "epoch": 0.4249084249084249,
+ "grad_norm": 18.759784698486328,
+ "learning_rate": 2.5274725274725276e-05,
+ "loss": 0.85,
+ "step": 116
+ },
+ {
+ "epoch": 0.42857142857142855,
+ "grad_norm": 15.784387588500977,
+ "learning_rate": 2.5494505494505493e-05,
+ "loss": 0.429,
+ "step": 117
+ },
+ {
+ "epoch": 0.43223443223443225,
+ "grad_norm": 23.149036407470703,
+ "learning_rate": 2.5714285714285714e-05,
+ "loss": 0.8784,
+ "step": 118
+ },
+ {
+ "epoch": 0.4358974358974359,
+ "grad_norm": 18.77080535888672,
+ "learning_rate": 2.5934065934065935e-05,
+ "loss": 0.537,
+ "step": 119
+ },
+ {
+ "epoch": 0.43956043956043955,
+ "grad_norm": 24.311708450317383,
+ "learning_rate": 2.6153846153846157e-05,
+ "loss": 0.74,
+ "step": 120
+ },
+ {
+ "epoch": 0.4432234432234432,
+ "grad_norm": 15.09874439239502,
+ "learning_rate": 2.6373626373626374e-05,
+ "loss": 0.2978,
+ "step": 121
+ },
+ {
+ "epoch": 0.4468864468864469,
+ "grad_norm": 19.65829086303711,
+ "learning_rate": 2.6593406593406592e-05,
+ "loss": 0.8287,
+ "step": 122
+ },
+ {
+ "epoch": 0.45054945054945056,
+ "grad_norm": 21.237165451049805,
+ "learning_rate": 2.6813186813186816e-05,
+ "loss": 1.1967,
+ "step": 123
+ },
+ {
+ "epoch": 0.4542124542124542,
+ "grad_norm": 25.737913131713867,
+ "learning_rate": 2.7032967032967034e-05,
+ "loss": 0.9414,
+ "step": 124
+ },
+ {
+ "epoch": 0.45787545787545786,
+ "grad_norm": 22.84954833984375,
+ "learning_rate": 2.7252747252747255e-05,
+ "loss": 0.398,
+ "step": 125
+ },
+ {
+ "epoch": 0.46153846153846156,
+ "grad_norm": 35.505027770996094,
+ "learning_rate": 2.7472527472527473e-05,
+ "loss": 1.0497,
+ "step": 126
+ },
+ {
+ "epoch": 0.4652014652014652,
+ "grad_norm": 6.610748291015625,
+ "learning_rate": 2.7692307692307694e-05,
+ "loss": 0.0491,
+ "step": 127
+ },
+ {
+ "epoch": 0.46886446886446886,
+ "grad_norm": 33.34388732910156,
+ "learning_rate": 2.7912087912087915e-05,
+ "loss": 0.8991,
+ "step": 128
+ },
+ {
+ "epoch": 0.4725274725274725,
+ "grad_norm": 17.098581314086914,
+ "learning_rate": 2.8131868131868132e-05,
+ "loss": 0.3217,
+ "step": 129
+ },
+ {
+ "epoch": 0.47619047619047616,
+ "grad_norm": 11.438309669494629,
+ "learning_rate": 2.8351648351648353e-05,
+ "loss": 0.4301,
+ "step": 130
+ },
+ {
+ "epoch": 0.47985347985347987,
+ "grad_norm": 25.803213119506836,
+ "learning_rate": 2.857142857142857e-05,
+ "loss": 0.8937,
+ "step": 131
+ },
+ {
+ "epoch": 0.4835164835164835,
+ "grad_norm": 16.61037826538086,
+ "learning_rate": 2.8791208791208792e-05,
+ "loss": 0.3603,
+ "step": 132
+ },
+ {
+ "epoch": 0.48717948717948717,
+ "grad_norm": 21.329975128173828,
+ "learning_rate": 2.9010989010989013e-05,
+ "loss": 0.4332,
+ "step": 133
+ },
+ {
+ "epoch": 0.4908424908424908,
+ "grad_norm": 24.83706283569336,
+ "learning_rate": 2.923076923076923e-05,
+ "loss": 0.3967,
+ "step": 134
+ },
+ {
+ "epoch": 0.4945054945054945,
+ "grad_norm": 8.3758544921875,
+ "learning_rate": 2.945054945054945e-05,
+ "loss": 0.1197,
+ "step": 135
+ },
+ {
+ "epoch": 0.4981684981684982,
+ "grad_norm": 31.096702575683594,
+ "learning_rate": 2.9670329670329673e-05,
+ "loss": 2.2867,
+ "step": 136
+ },
+ {
+ "epoch": 0.5018315018315018,
+ "grad_norm": 17.094390869140625,
+ "learning_rate": 2.989010989010989e-05,
+ "loss": 0.3064,
+ "step": 137
+ },
+ {
+ "epoch": 0.5054945054945055,
+ "grad_norm": 23.401243209838867,
+ "learning_rate": 3.010989010989011e-05,
+ "loss": 0.9779,
+ "step": 138
+ },
+ {
+ "epoch": 0.5091575091575091,
+ "grad_norm": 19.55811309814453,
+ "learning_rate": 3.032967032967033e-05,
+ "loss": 0.5665,
+ "step": 139
+ },
+ {
+ "epoch": 0.5128205128205128,
+ "grad_norm": 18.668622970581055,
+ "learning_rate": 3.0549450549450547e-05,
+ "loss": 0.7068,
+ "step": 140
+ },
+ {
+ "epoch": 0.5164835164835165,
+ "grad_norm": 9.49342155456543,
+ "learning_rate": 3.076923076923077e-05,
+ "loss": 0.2228,
+ "step": 141
+ },
+ {
+ "epoch": 0.5201465201465202,
+ "grad_norm": 17.131006240844727,
+ "learning_rate": 3.0989010989010995e-05,
+ "loss": 0.8947,
+ "step": 142
+ },
+ {
+ "epoch": 0.5238095238095238,
+ "grad_norm": 14.087484359741211,
+ "learning_rate": 3.120879120879121e-05,
+ "loss": 0.4394,
+ "step": 143
+ },
+ {
+ "epoch": 0.5274725274725275,
+ "grad_norm": 14.246976852416992,
+ "learning_rate": 3.142857142857143e-05,
+ "loss": 0.7608,
+ "step": 144
+ },
+ {
+ "epoch": 0.5311355311355311,
+ "grad_norm": 27.454071044921875,
+ "learning_rate": 3.164835164835165e-05,
+ "loss": 1.8982,
+ "step": 145
+ },
+ {
+ "epoch": 0.5347985347985348,
+ "grad_norm": 8.580923080444336,
+ "learning_rate": 3.1868131868131866e-05,
+ "loss": 0.2199,
+ "step": 146
+ },
+ {
+ "epoch": 0.5384615384615384,
+ "grad_norm": 12.200552940368652,
+ "learning_rate": 3.208791208791209e-05,
+ "loss": 0.4007,
+ "step": 147
+ },
+ {
+ "epoch": 0.5421245421245421,
+ "grad_norm": 11.350752830505371,
+ "learning_rate": 3.230769230769231e-05,
+ "loss": 0.5359,
+ "step": 148
+ },
+ {
+ "epoch": 0.5457875457875457,
+ "grad_norm": 21.45020866394043,
+ "learning_rate": 3.252747252747253e-05,
+ "loss": 1.4639,
+ "step": 149
+ },
+ {
+ "epoch": 0.5494505494505495,
+ "grad_norm": 29.84933090209961,
+ "learning_rate": 3.274725274725274e-05,
+ "loss": 0.8764,
+ "step": 150
+ },
+ {
+ "epoch": 0.5531135531135531,
+ "grad_norm": 14.899048805236816,
+ "learning_rate": 3.296703296703297e-05,
+ "loss": 0.3817,
+ "step": 151
+ },
+ {
+ "epoch": 0.5567765567765568,
+ "grad_norm": 14.95295238494873,
+ "learning_rate": 3.318681318681319e-05,
+ "loss": 1.0153,
+ "step": 152
+ },
+ {
+ "epoch": 0.5604395604395604,
+ "grad_norm": 13.904314994812012,
+ "learning_rate": 3.3406593406593406e-05,
+ "loss": 0.9891,
+ "step": 153
+ },
+ {
+ "epoch": 0.5641025641025641,
+ "grad_norm": 14.465546607971191,
+ "learning_rate": 3.362637362637363e-05,
+ "loss": 0.4935,
+ "step": 154
+ },
+ {
+ "epoch": 0.5677655677655677,
+ "grad_norm": 15.22211742401123,
+ "learning_rate": 3.384615384615385e-05,
+ "loss": 0.4973,
+ "step": 155
+ },
+ {
+ "epoch": 0.5714285714285714,
+ "grad_norm": 19.977941513061523,
+ "learning_rate": 3.406593406593406e-05,
+ "loss": 0.5768,
+ "step": 156
+ },
+ {
+ "epoch": 0.575091575091575,
+ "grad_norm": 21.778785705566406,
+ "learning_rate": 3.4285714285714284e-05,
+ "loss": 0.541,
+ "step": 157
+ },
+ {
+ "epoch": 0.5787545787545788,
+ "grad_norm": 7.957052707672119,
+ "learning_rate": 3.4505494505494505e-05,
+ "loss": 0.1676,
+ "step": 158
+ },
+ {
+ "epoch": 0.5824175824175825,
+ "grad_norm": 10.105476379394531,
+ "learning_rate": 3.472527472527473e-05,
+ "loss": 0.14,
+ "step": 159
+ },
+ {
+ "epoch": 0.5860805860805861,
+ "grad_norm": 13.895249366760254,
+ "learning_rate": 3.494505494505495e-05,
+ "loss": 0.2135,
+ "step": 160
+ },
+ {
+ "epoch": 0.5897435897435898,
+ "grad_norm": 15.14104175567627,
+ "learning_rate": 3.516483516483517e-05,
+ "loss": 0.2299,
+ "step": 161
+ },
+ {
+ "epoch": 0.5934065934065934,
+ "grad_norm": 27.537504196166992,
+ "learning_rate": 3.538461538461539e-05,
+ "loss": 0.4517,
+ "step": 162
+ },
+ {
+ "epoch": 0.5970695970695971,
+ "grad_norm": 22.290597915649414,
+ "learning_rate": 3.56043956043956e-05,
+ "loss": 0.2144,
+ "step": 163
+ },
+ {
+ "epoch": 0.6007326007326007,
+ "grad_norm": 24.176603317260742,
+ "learning_rate": 3.5824175824175824e-05,
+ "loss": 0.4184,
+ "step": 164
+ },
+ {
+ "epoch": 0.6043956043956044,
+ "grad_norm": 43.716552734375,
+ "learning_rate": 3.6043956043956045e-05,
+ "loss": 0.7672,
+ "step": 165
+ },
+ {
+ "epoch": 0.608058608058608,
+ "grad_norm": 5.516793727874756,
+ "learning_rate": 3.626373626373626e-05,
+ "loss": 0.0332,
+ "step": 166
+ },
+ {
+ "epoch": 0.6117216117216118,
+ "grad_norm": 13.202600479125977,
+ "learning_rate": 3.648351648351648e-05,
+ "loss": 0.1388,
+ "step": 167
+ },
+ {
+ "epoch": 0.6153846153846154,
+ "grad_norm": 8.389626502990723,
+ "learning_rate": 3.670329670329671e-05,
+ "loss": 0.0284,
+ "step": 168
+ },
+ {
+ "epoch": 0.6190476190476191,
+ "grad_norm": 11.500190734863281,
+ "learning_rate": 3.692307692307693e-05,
+ "loss": 0.1778,
+ "step": 169
+ },
+ {
+ "epoch": 0.6227106227106227,
+ "grad_norm": 49.76407241821289,
+ "learning_rate": 3.7142857142857143e-05,
+ "loss": 0.8075,
+ "step": 170
+ },
+ {
+ "epoch": 0.6263736263736264,
+ "grad_norm": 49.758705139160156,
+ "learning_rate": 3.7362637362637365e-05,
+ "loss": 1.3106,
+ "step": 171
+ },
+ {
+ "epoch": 0.63003663003663,
+ "grad_norm": 7.655544281005859,
+ "learning_rate": 3.7582417582417586e-05,
+ "loss": 0.1362,
+ "step": 172
+ },
+ {
+ "epoch": 0.6336996336996337,
+ "grad_norm": 29.778133392333984,
+ "learning_rate": 3.78021978021978e-05,
+ "loss": 0.2411,
+ "step": 173
+ },
+ {
+ "epoch": 0.6373626373626373,
+ "grad_norm": 23.79543113708496,
+ "learning_rate": 3.802197802197802e-05,
+ "loss": 0.5665,
+ "step": 174
+ },
+ {
+ "epoch": 0.6410256410256411,
+ "grad_norm": 25.333166122436523,
+ "learning_rate": 3.824175824175824e-05,
+ "loss": 0.5821,
+ "step": 175
+ },
+ {
+ "epoch": 0.6446886446886447,
+ "grad_norm": 38.367759704589844,
+ "learning_rate": 3.846153846153846e-05,
+ "loss": 1.1098,
+ "step": 176
+ },
+ {
+ "epoch": 0.6483516483516484,
+ "grad_norm": 31.53361701965332,
+ "learning_rate": 3.8681318681318684e-05,
+ "loss": 1.5399,
+ "step": 177
+ },
+ {
+ "epoch": 0.652014652014652,
+ "grad_norm": 8.453901290893555,
+ "learning_rate": 3.8901098901098905e-05,
+ "loss": 0.1327,
+ "step": 178
+ },
+ {
+ "epoch": 0.6556776556776557,
+ "grad_norm": 32.465980529785156,
+ "learning_rate": 3.9120879120879126e-05,
+ "loss": 0.8133,
+ "step": 179
+ },
+ {
+ "epoch": 0.6593406593406593,
+ "grad_norm": 21.503114700317383,
+ "learning_rate": 3.934065934065934e-05,
+ "loss": 0.2472,
+ "step": 180
+ },
+ {
+ "epoch": 0.663003663003663,
+ "grad_norm": 28.240659713745117,
+ "learning_rate": 3.956043956043956e-05,
+ "loss": 0.4718,
+ "step": 181
+ },
+ {
+ "epoch": 0.6666666666666666,
+ "grad_norm": 6.919331073760986,
+ "learning_rate": 3.978021978021978e-05,
+ "loss": 0.0947,
+ "step": 182
+ },
+ {
+ "epoch": 0.6703296703296703,
+ "grad_norm": 20.96783447265625,
+ "learning_rate": 3.9999999999999996e-05,
+ "loss": 1.1602,
+ "step": 183
+ },
+ {
+ "epoch": 0.673992673992674,
+ "grad_norm": 17.967914581298828,
+ "learning_rate": 4.021978021978022e-05,
+ "loss": 0.3684,
+ "step": 184
+ },
+ {
+ "epoch": 0.6776556776556777,
+ "grad_norm": 29.837678909301758,
+ "learning_rate": 4.0439560439560445e-05,
+ "loss": 0.5452,
+ "step": 185
+ },
+ {
+ "epoch": 0.6813186813186813,
+ "grad_norm": 37.0803108215332,
+ "learning_rate": 4.065934065934066e-05,
+ "loss": 0.5983,
+ "step": 186
+ },
+ {
+ "epoch": 0.684981684981685,
+ "grad_norm": 23.339448928833008,
+ "learning_rate": 4.087912087912088e-05,
+ "loss": 0.6255,
+ "step": 187
+ },
+ {
+ "epoch": 0.6886446886446886,
+ "grad_norm": 13.779767036437988,
+ "learning_rate": 4.10989010989011e-05,
+ "loss": 0.3705,
+ "step": 188
+ },
+ {
+ "epoch": 0.6923076923076923,
+ "grad_norm": 15.792436599731445,
+ "learning_rate": 4.131868131868132e-05,
+ "loss": 0.4128,
+ "step": 189
+ },
+ {
+ "epoch": 0.6959706959706959,
+ "grad_norm": 14.106623649597168,
+ "learning_rate": 4.153846153846154e-05,
+ "loss": 0.2914,
+ "step": 190
+ },
+ {
+ "epoch": 0.6996336996336996,
+ "grad_norm": 34.428951263427734,
+ "learning_rate": 4.175824175824176e-05,
+ "loss": 1.2232,
+ "step": 191
+ },
+ {
+ "epoch": 0.7032967032967034,
+ "grad_norm": 15.847033500671387,
+ "learning_rate": 4.197802197802198e-05,
+ "loss": 0.4129,
+ "step": 192
+ },
+ {
+ "epoch": 0.706959706959707,
+ "grad_norm": 17.834794998168945,
+ "learning_rate": 4.21978021978022e-05,
+ "loss": 0.4158,
+ "step": 193
+ },
+ {
+ "epoch": 0.7106227106227107,
+ "grad_norm": 29.807823181152344,
+ "learning_rate": 4.241758241758242e-05,
+ "loss": 0.9741,
+ "step": 194
+ },
+ {
+ "epoch": 0.7142857142857143,
+ "grad_norm": 15.9482421875,
+ "learning_rate": 4.263736263736264e-05,
+ "loss": 0.1953,
+ "step": 195
+ },
+ {
+ "epoch": 0.717948717948718,
+ "grad_norm": 37.89487075805664,
+ "learning_rate": 4.2857142857142856e-05,
+ "loss": 1.1018,
+ "step": 196
+ },
+ {
+ "epoch": 0.7216117216117216,
+ "grad_norm": 24.060779571533203,
+ "learning_rate": 4.307692307692308e-05,
+ "loss": 0.4774,
+ "step": 197
+ },
+ {
+ "epoch": 0.7252747252747253,
+ "grad_norm": 18.701725006103516,
+ "learning_rate": 4.32967032967033e-05,
+ "loss": 0.2641,
+ "step": 198
+ },
+ {
+ "epoch": 0.7289377289377289,
+ "grad_norm": 32.18348693847656,
+ "learning_rate": 4.351648351648352e-05,
+ "loss": 0.6958,
+ "step": 199
+ },
+ {
+ "epoch": 0.7326007326007326,
+ "grad_norm": 16.504337310791016,
+ "learning_rate": 4.3736263736263734e-05,
+ "loss": 0.1933,
+ "step": 200
+ },
+ {
+ "epoch": 0.7362637362637363,
+ "grad_norm": 34.5928840637207,
+ "learning_rate": 4.3956043956043955e-05,
+ "loss": 0.3712,
+ "step": 201
+ },
+ {
+ "epoch": 0.73992673992674,
+ "grad_norm": 47.998512268066406,
+ "learning_rate": 4.417582417582418e-05,
+ "loss": 1.4578,
+ "step": 202
+ },
+ {
+ "epoch": 0.7435897435897436,
+ "grad_norm": 29.871829986572266,
+ "learning_rate": 4.43956043956044e-05,
+ "loss": 0.7628,
+ "step": 203
+ },
+ {
+ "epoch": 0.7472527472527473,
+ "grad_norm": 53.70481491088867,
+ "learning_rate": 4.461538461538462e-05,
+ "loss": 1.4017,
+ "step": 204
+ },
+ {
+ "epoch": 0.7509157509157509,
+ "grad_norm": 58.087646484375,
+ "learning_rate": 4.483516483516484e-05,
+ "loss": 1.3168,
+ "step": 205
+ },
+ {
+ "epoch": 0.7545787545787546,
+ "grad_norm": 44.62531280517578,
+ "learning_rate": 4.505494505494505e-05,
+ "loss": 0.8959,
+ "step": 206
+ },
+ {
+ "epoch": 0.7582417582417582,
+ "grad_norm": 18.427953720092773,
+ "learning_rate": 4.5274725274725274e-05,
+ "loss": 0.4202,
+ "step": 207
+ },
+ {
+ "epoch": 0.7619047619047619,
+ "grad_norm": 32.799434661865234,
+ "learning_rate": 4.5494505494505495e-05,
+ "loss": 0.5432,
+ "step": 208
+ },
+ {
+ "epoch": 0.7655677655677655,
+ "grad_norm": 22.136354446411133,
+ "learning_rate": 4.5714285714285716e-05,
+ "loss": 1.0474,
+ "step": 209
+ },
+ {
+ "epoch": 0.7692307692307693,
+ "grad_norm": 14.09807014465332,
+ "learning_rate": 4.593406593406593e-05,
+ "loss": 0.4048,
+ "step": 210
+ },
+ {
+ "epoch": 0.7728937728937729,
+ "grad_norm": 16.818132400512695,
+ "learning_rate": 4.615384615384616e-05,
+ "loss": 0.4772,
+ "step": 211
+ },
+ {
+ "epoch": 0.7765567765567766,
+ "grad_norm": 36.87644577026367,
+ "learning_rate": 4.637362637362638e-05,
+ "loss": 1.0203,
+ "step": 212
+ },
+ {
+ "epoch": 0.7802197802197802,
+ "grad_norm": 23.279033660888672,
+ "learning_rate": 4.6593406593406593e-05,
+ "loss": 0.8223,
+ "step": 213
+ },
+ {
+ "epoch": 0.7838827838827839,
+ "grad_norm": 21.23172378540039,
+ "learning_rate": 4.6813186813186814e-05,
+ "loss": 0.6838,
+ "step": 214
+ },
+ {
+ "epoch": 0.7875457875457875,
+ "grad_norm": 15.129582405090332,
+ "learning_rate": 4.7032967032967035e-05,
+ "loss": 0.3939,
+ "step": 215
+ },
+ {
+ "epoch": 0.7912087912087912,
+ "grad_norm": 38.20903778076172,
+ "learning_rate": 4.725274725274725e-05,
+ "loss": 0.4395,
+ "step": 216
+ },
+ {
+ "epoch": 0.7948717948717948,
+ "grad_norm": 23.428571701049805,
+ "learning_rate": 4.747252747252747e-05,
+ "loss": 0.6657,
+ "step": 217
+ },
+ {
+ "epoch": 0.7985347985347986,
+ "grad_norm": 15.892741203308105,
+ "learning_rate": 4.769230769230769e-05,
+ "loss": 0.3867,
+ "step": 218
+ },
+ {
+ "epoch": 0.8021978021978022,
+ "grad_norm": 44.7977180480957,
+ "learning_rate": 4.791208791208792e-05,
+ "loss": 1.4335,
+ "step": 219
+ },
+ {
+ "epoch": 0.8058608058608059,
+ "grad_norm": 18.13700294494629,
+ "learning_rate": 4.8131868131868134e-05,
+ "loss": 0.3965,
+ "step": 220
+ },
+ {
+ "epoch": 0.8095238095238095,
+ "grad_norm": 23.00497817993164,
+ "learning_rate": 4.8351648351648355e-05,
+ "loss": 1.1319,
+ "step": 221
+ },
+ {
+ "epoch": 0.8131868131868132,
+ "grad_norm": 27.63648796081543,
+ "learning_rate": 4.8571428571428576e-05,
+ "loss": 0.7782,
+ "step": 222
+ },
+ {
+ "epoch": 0.8168498168498168,
+ "grad_norm": 23.91630744934082,
+ "learning_rate": 4.879120879120879e-05,
+ "loss": 0.7277,
+ "step": 223
+ },
+ {
+ "epoch": 0.8205128205128205,
+ "grad_norm": 27.157682418823242,
+ "learning_rate": 4.901098901098901e-05,
+ "loss": 0.8309,
+ "step": 224
+ },
+ {
+ "epoch": 0.8241758241758241,
+ "grad_norm": 20.686105728149414,
+ "learning_rate": 4.923076923076923e-05,
+ "loss": 0.4645,
+ "step": 225
+ },
+ {
+ "epoch": 0.8278388278388278,
+ "grad_norm": 18.44706916809082,
+ "learning_rate": 4.9450549450549446e-05,
+ "loss": 0.6298,
+ "step": 226
+ },
+ {
+ "epoch": 0.8315018315018315,
+ "grad_norm": 34.66194152832031,
+ "learning_rate": 4.967032967032967e-05,
+ "loss": 1.3282,
+ "step": 227
+ },
+ {
+ "epoch": 0.8351648351648352,
+ "grad_norm": 26.68456268310547,
+ "learning_rate": 4.9890109890109895e-05,
+ "loss": 0.8652,
+ "step": 228
+ },
+ {
+ "epoch": 0.8388278388278388,
+ "grad_norm": 18.36819839477539,
+ "learning_rate": 5.0109890109890116e-05,
+ "loss": 0.425,
+ "step": 229
+ },
+ {
+ "epoch": 0.8424908424908425,
+ "grad_norm": 10.212838172912598,
+ "learning_rate": 5.032967032967033e-05,
+ "loss": 0.2183,
+ "step": 230
+ },
+ {
+ "epoch": 0.8461538461538461,
+ "grad_norm": 28.40265464782715,
+ "learning_rate": 5.054945054945055e-05,
+ "loss": 1.6894,
+ "step": 231
+ },
+ {
+ "epoch": 0.8498168498168498,
+ "grad_norm": 48.70882797241211,
+ "learning_rate": 5.076923076923077e-05,
+ "loss": 0.8564,
+ "step": 232
+ },
+ {
+ "epoch": 0.8534798534798534,
+ "grad_norm": 38.576541900634766,
+ "learning_rate": 5.098901098901099e-05,
+ "loss": 0.8013,
+ "step": 233
+ },
+ {
+ "epoch": 0.8571428571428571,
+ "grad_norm": 20.17264747619629,
+ "learning_rate": 5.120879120879121e-05,
+ "loss": 0.4553,
+ "step": 234
+ },
+ {
+ "epoch": 0.8608058608058609,
+ "grad_norm": 33.383182525634766,
+ "learning_rate": 5.142857142857143e-05,
+ "loss": 0.9591,
+ "step": 235
+ },
+ {
+ "epoch": 0.8644688644688645,
+ "grad_norm": 22.734106063842773,
+ "learning_rate": 5.164835164835165e-05,
+ "loss": 0.589,
+ "step": 236
+ },
+ {
+ "epoch": 0.8681318681318682,
+ "grad_norm": 19.77442741394043,
+ "learning_rate": 5.186813186813187e-05,
+ "loss": 0.7066,
+ "step": 237
+ },
+ {
+ "epoch": 0.8717948717948718,
+ "grad_norm": 32.36431884765625,
+ "learning_rate": 5.208791208791209e-05,
+ "loss": 0.8878,
+ "step": 238
+ },
+ {
+ "epoch": 0.8754578754578755,
+ "grad_norm": 37.60574722290039,
+ "learning_rate": 5.230769230769231e-05,
+ "loss": 1.0034,
+ "step": 239
+ },
+ {
+ "epoch": 0.8791208791208791,
+ "grad_norm": 28.051666259765625,
+ "learning_rate": 5.252747252747253e-05,
+ "loss": 0.9695,
+ "step": 240
+ },
+ {
+ "epoch": 0.8827838827838828,
+ "grad_norm": 31.55886459350586,
+ "learning_rate": 5.274725274725275e-05,
+ "loss": 0.5416,
+ "step": 241
+ },
+ {
+ "epoch": 0.8864468864468864,
+ "grad_norm": 17.856632232666016,
+ "learning_rate": 5.296703296703297e-05,
+ "loss": 0.3647,
+ "step": 242
+ },
+ {
+ "epoch": 0.8901098901098901,
+ "grad_norm": 42.52962112426758,
+ "learning_rate": 5.3186813186813184e-05,
+ "loss": 1.3661,
+ "step": 243
+ },
+ {
+ "epoch": 0.8937728937728938,
+ "grad_norm": 26.439769744873047,
+ "learning_rate": 5.3406593406593405e-05,
+ "loss": 0.6629,
+ "step": 244
+ },
+ {
+ "epoch": 0.8974358974358975,
+ "grad_norm": 37.46576690673828,
+ "learning_rate": 5.362637362637363e-05,
+ "loss": 0.9631,
+ "step": 245
+ },
+ {
+ "epoch": 0.9010989010989011,
+ "grad_norm": 29.706708908081055,
+ "learning_rate": 5.384615384615385e-05,
+ "loss": 1.0034,
+ "step": 246
+ },
+ {
+ "epoch": 0.9047619047619048,
+ "grad_norm": 33.62871551513672,
+ "learning_rate": 5.406593406593407e-05,
+ "loss": 0.8036,
+ "step": 247
+ },
+ {
+ "epoch": 0.9084249084249084,
+ "grad_norm": 41.97051239013672,
+ "learning_rate": 5.428571428571429e-05,
+ "loss": 1.309,
+ "step": 248
+ },
+ {
+ "epoch": 0.9120879120879121,
+ "grad_norm": 37.57841110229492,
+ "learning_rate": 5.450549450549451e-05,
+ "loss": 1.2444,
+ "step": 249
+ },
+ {
+ "epoch": 0.9157509157509157,
+ "grad_norm": 21.220727920532227,
+ "learning_rate": 5.4725274725274724e-05,
+ "loss": 0.6556,
+ "step": 250
+ },
+ {
+ "epoch": 0.9194139194139194,
+ "grad_norm": 19.963764190673828,
+ "learning_rate": 5.4945054945054945e-05,
+ "loss": 0.7328,
+ "step": 251
+ },
+ {
+ "epoch": 0.9230769230769231,
+ "grad_norm": 21.196062088012695,
+ "learning_rate": 5.5164835164835166e-05,
+ "loss": 0.5752,
+ "step": 252
+ },
+ {
+ "epoch": 0.9267399267399268,
+ "grad_norm": 23.587268829345703,
+ "learning_rate": 5.538461538461539e-05,
+ "loss": 0.4801,
+ "step": 253
+ },
+ {
+ "epoch": 0.9304029304029304,
+ "grad_norm": 16.09604263305664,
+ "learning_rate": 5.560439560439561e-05,
+ "loss": 0.4795,
+ "step": 254
+ },
+ {
+ "epoch": 0.9340659340659341,
+ "grad_norm": 22.61296272277832,
+ "learning_rate": 5.582417582417583e-05,
+ "loss": 0.5807,
+ "step": 255
+ },
+ {
+ "epoch": 0.9377289377289377,
+ "grad_norm": 28.715890884399414,
+ "learning_rate": 5.604395604395604e-05,
+ "loss": 1.3141,
+ "step": 256
+ },
+ {
+ "epoch": 0.9413919413919414,
+ "grad_norm": 37.11213684082031,
+ "learning_rate": 5.6263736263736264e-05,
+ "loss": 1.7168,
+ "step": 257
+ },
+ {
+ "epoch": 0.945054945054945,
+ "grad_norm": 13.693246841430664,
+ "learning_rate": 5.6483516483516485e-05,
+ "loss": 0.3207,
+ "step": 258
+ },
+ {
+ "epoch": 0.9487179487179487,
+ "grad_norm": 18.186216354370117,
+ "learning_rate": 5.6703296703296706e-05,
+ "loss": 0.6265,
+ "step": 259
+ },
+ {
+ "epoch": 0.9523809523809523,
+ "grad_norm": 23.68426513671875,
+ "learning_rate": 5.692307692307692e-05,
+ "loss": 0.5226,
+ "step": 260
+ },
+ {
+ "epoch": 0.9560439560439561,
+ "grad_norm": 19.154836654663086,
+ "learning_rate": 5.714285714285714e-05,
+ "loss": 1.0116,
+ "step": 261
+ },
+ {
+ "epoch": 0.9597069597069597,
+ "grad_norm": 17.64719009399414,
+ "learning_rate": 5.736263736263737e-05,
+ "loss": 0.5992,
+ "step": 262
+ },
+ {
+ "epoch": 0.9633699633699634,
+ "grad_norm": 25.542757034301758,
+ "learning_rate": 5.7582417582417584e-05,
+ "loss": 0.8129,
+ "step": 263
+ },
+ {
+ "epoch": 0.967032967032967,
+ "grad_norm": 25.94204330444336,
+ "learning_rate": 5.7802197802197805e-05,
+ "loss": 1.2194,
+ "step": 264
+ },
+ {
+ "epoch": 0.9706959706959707,
+ "grad_norm": 13.693342208862305,
+ "learning_rate": 5.8021978021978026e-05,
+ "loss": 0.2565,
+ "step": 265
+ },
+ {
+ "epoch": 0.9743589743589743,
+ "grad_norm": 20.760122299194336,
+ "learning_rate": 5.824175824175824e-05,
+ "loss": 0.4023,
+ "step": 266
+ },
+ {
+ "epoch": 0.978021978021978,
+ "grad_norm": 20.00895118713379,
+ "learning_rate": 5.846153846153846e-05,
+ "loss": 0.2468,
+ "step": 267
+ },
+ {
+ "epoch": 0.9816849816849816,
+ "grad_norm": 25.56069564819336,
+ "learning_rate": 5.868131868131868e-05,
+ "loss": 0.5648,
+ "step": 268
+ },
+ {
+ "epoch": 0.9853479853479854,
+ "grad_norm": 38.19970703125,
+ "learning_rate": 5.89010989010989e-05,
+ "loss": 0.544,
+ "step": 269
+ },
+ {
+ "epoch": 0.989010989010989,
+ "grad_norm": 37.63619613647461,
+ "learning_rate": 5.9120879120879124e-05,
+ "loss": 0.7556,
+ "step": 270
+ },
+ {
+ "epoch": 0.9926739926739927,
+ "grad_norm": 10.586868286132812,
+ "learning_rate": 5.9340659340659345e-05,
+ "loss": 0.1003,
+ "step": 271
+ },
+ {
+ "epoch": 0.9963369963369964,
+ "grad_norm": 17.579208374023438,
+ "learning_rate": 5.9560439560439566e-05,
+ "loss": 0.2931,
+ "step": 272
+ },
+ {
+ "epoch": 1.0,
+ "grad_norm": 24.657121658325195,
+ "learning_rate": 5.978021978021978e-05,
+ "loss": 0.2372,
+ "step": 273
+ },
+ {
+ "epoch": 1.0036630036630036,
+ "grad_norm": 29.52134895324707,
+ "learning_rate": 6e-05,
+ "loss": 0.5077,
+ "step": 274
+ },
+ {
+ "epoch": 1.0073260073260073,
+ "grad_norm": 51.900062561035156,
+ "learning_rate": 5.997557997557998e-05,
+ "loss": 0.4404,
+ "step": 275
+ },
+ {
+ "epoch": 1.010989010989011,
+ "grad_norm": 18.682769775390625,
+ "learning_rate": 5.995115995115995e-05,
+ "loss": 0.2405,
+ "step": 276
+ },
+ {
+ "epoch": 1.0146520146520146,
+ "grad_norm": 87.95014953613281,
+ "learning_rate": 5.992673992673993e-05,
+ "loss": 2.8585,
+ "step": 277
+ },
+ {
+ "epoch": 1.0183150183150182,
+ "grad_norm": 67.03990936279297,
+ "learning_rate": 5.990231990231991e-05,
+ "loss": 0.9746,
+ "step": 278
+ },
+ {
+ "epoch": 1.021978021978022,
+ "grad_norm": 47.63545227050781,
+ "learning_rate": 5.987789987789988e-05,
+ "loss": 0.241,
+ "step": 279
+ },
+ {
+ "epoch": 1.0256410256410255,
+ "grad_norm": 33.62876892089844,
+ "learning_rate": 5.985347985347986e-05,
+ "loss": 1.0003,
+ "step": 280
+ },
+ {
+ "epoch": 1.0293040293040292,
+ "grad_norm": 30.26620864868164,
+ "learning_rate": 5.982905982905983e-05,
+ "loss": 0.7767,
+ "step": 281
+ },
+ {
+ "epoch": 1.032967032967033,
+ "grad_norm": 33.785770416259766,
+ "learning_rate": 5.98046398046398e-05,
+ "loss": 0.899,
+ "step": 282
+ },
+ {
+ "epoch": 1.0366300366300367,
+ "grad_norm": 33.753849029541016,
+ "learning_rate": 5.978021978021978e-05,
+ "loss": 1.8225,
+ "step": 283
+ },
+ {
+ "epoch": 1.0402930402930404,
+ "grad_norm": 16.58989143371582,
+ "learning_rate": 5.975579975579976e-05,
+ "loss": 0.6211,
+ "step": 284
+ },
+ {
+ "epoch": 1.043956043956044,
+ "grad_norm": 23.08768653869629,
+ "learning_rate": 5.973137973137973e-05,
+ "loss": 0.7541,
+ "step": 285
+ },
+ {
+ "epoch": 1.0476190476190477,
+ "grad_norm": 24.57805824279785,
+ "learning_rate": 5.970695970695971e-05,
+ "loss": 0.8278,
+ "step": 286
+ },
+ {
+ "epoch": 1.0512820512820513,
+ "grad_norm": 25.1593017578125,
+ "learning_rate": 5.968253968253968e-05,
+ "loss": 0.6932,
+ "step": 287
+ },
+ {
+ "epoch": 1.054945054945055,
+ "grad_norm": 29.984054565429688,
+ "learning_rate": 5.965811965811966e-05,
+ "loss": 0.6987,
+ "step": 288
+ },
+ {
+ "epoch": 1.0586080586080586,
+ "grad_norm": 28.183151245117188,
+ "learning_rate": 5.963369963369964e-05,
+ "loss": 0.8771,
+ "step": 289
+ },
+ {
+ "epoch": 1.0622710622710623,
+ "grad_norm": 15.349969863891602,
+ "learning_rate": 5.960927960927961e-05,
+ "loss": 0.2906,
+ "step": 290
+ },
+ {
+ "epoch": 1.065934065934066,
+ "grad_norm": 17.618196487426758,
+ "learning_rate": 5.958485958485959e-05,
+ "loss": 0.595,
+ "step": 291
+ },
+ {
+ "epoch": 1.0695970695970696,
+ "grad_norm": 40.537925720214844,
+ "learning_rate": 5.9560439560439566e-05,
+ "loss": 1.3881,
+ "step": 292
+ },
+ {
+ "epoch": 1.0732600732600732,
+ "grad_norm": 41.12261962890625,
+ "learning_rate": 5.953601953601954e-05,
+ "loss": 0.5402,
+ "step": 293
+ },
+ {
+ "epoch": 1.0769230769230769,
+ "grad_norm": 38.4654655456543,
+ "learning_rate": 5.951159951159951e-05,
+ "loss": 0.3097,
+ "step": 294
+ },
+ {
+ "epoch": 1.0805860805860805,
+ "grad_norm": 34.19886016845703,
+ "learning_rate": 5.948717948717949e-05,
+ "loss": 1.0228,
+ "step": 295
+ },
+ {
+ "epoch": 1.0842490842490842,
+ "grad_norm": 19.727413177490234,
+ "learning_rate": 5.946275946275946e-05,
+ "loss": 0.1755,
+ "step": 296
+ },
+ {
+ "epoch": 1.0879120879120878,
+ "grad_norm": 33.413352966308594,
+ "learning_rate": 5.943833943833944e-05,
+ "loss": 0.8087,
+ "step": 297
+ },
+ {
+ "epoch": 1.0915750915750915,
+ "grad_norm": 29.848875045776367,
+ "learning_rate": 5.941391941391942e-05,
+ "loss": 0.673,
+ "step": 298
+ },
+ {
+ "epoch": 1.0952380952380953,
+ "grad_norm": 18.643922805786133,
+ "learning_rate": 5.938949938949939e-05,
+ "loss": 0.4759,
+ "step": 299
+ },
+ {
+ "epoch": 1.098901098901099,
+ "grad_norm": 28.923099517822266,
+ "learning_rate": 5.936507936507937e-05,
+ "loss": 0.6555,
+ "step": 300
+ },
+ {
+ "epoch": 1.1025641025641026,
+ "grad_norm": 26.4990177154541,
+ "learning_rate": 5.9340659340659345e-05,
+ "loss": 0.4679,
+ "step": 301
+ },
+ {
+ "epoch": 1.1062271062271063,
+ "grad_norm": 43.54881286621094,
+ "learning_rate": 5.931623931623932e-05,
+ "loss": 1.0861,
+ "step": 302
+ },
+ {
+ "epoch": 1.10989010989011,
+ "grad_norm": 32.66098403930664,
+ "learning_rate": 5.9291819291819295e-05,
+ "loss": 0.677,
+ "step": 303
+ },
+ {
+ "epoch": 1.1135531135531136,
+ "grad_norm": 43.79314422607422,
+ "learning_rate": 5.9267399267399274e-05,
+ "loss": 0.8883,
+ "step": 304
+ },
+ {
+ "epoch": 1.1172161172161172,
+ "grad_norm": 44.49085235595703,
+ "learning_rate": 5.9242979242979245e-05,
+ "loss": 0.9553,
+ "step": 305
+ },
+ {
+ "epoch": 1.120879120879121,
+ "grad_norm": 31.713787078857422,
+ "learning_rate": 5.9218559218559224e-05,
+ "loss": 0.6352,
+ "step": 306
+ },
+ {
+ "epoch": 1.1245421245421245,
+ "grad_norm": 19.930402755737305,
+ "learning_rate": 5.9194139194139196e-05,
+ "loss": 0.7023,
+ "step": 307
+ },
+ {
+ "epoch": 1.1282051282051282,
+ "grad_norm": 20.157196044921875,
+ "learning_rate": 5.916971916971917e-05,
+ "loss": 0.6241,
+ "step": 308
+ },
+ {
+ "epoch": 1.1318681318681318,
+ "grad_norm": 26.819135665893555,
+ "learning_rate": 5.9145299145299146e-05,
+ "loss": 0.4788,
+ "step": 309
+ },
+ {
+ "epoch": 1.1355311355311355,
+ "grad_norm": 24.948625564575195,
+ "learning_rate": 5.9120879120879124e-05,
+ "loss": 0.698,
+ "step": 310
+ },
+ {
+ "epoch": 1.1391941391941391,
+ "grad_norm": 15.883389472961426,
+ "learning_rate": 5.9096459096459096e-05,
+ "loss": 0.3325,
+ "step": 311
+ },
+ {
+ "epoch": 1.1428571428571428,
+ "grad_norm": 25.214584350585938,
+ "learning_rate": 5.9072039072039074e-05,
+ "loss": 0.4776,
+ "step": 312
+ },
+ {
+ "epoch": 1.1465201465201464,
+ "grad_norm": 27.4523983001709,
+ "learning_rate": 5.9047619047619046e-05,
+ "loss": 0.6155,
+ "step": 313
+ },
+ {
+ "epoch": 1.15018315018315,
+ "grad_norm": 48.60593795776367,
+ "learning_rate": 5.9023199023199024e-05,
+ "loss": 1.7225,
+ "step": 314
+ },
+ {
+ "epoch": 1.1538461538461537,
+ "grad_norm": 27.19314193725586,
+ "learning_rate": 5.8998778998779e-05,
+ "loss": 0.6805,
+ "step": 315
+ },
+ {
+ "epoch": 1.1575091575091574,
+ "grad_norm": 44.678768157958984,
+ "learning_rate": 5.8974358974358975e-05,
+ "loss": 0.5721,
+ "step": 316
+ },
+ {
+ "epoch": 1.1611721611721613,
+ "grad_norm": 12.109644889831543,
+ "learning_rate": 5.894993894993895e-05,
+ "loss": 0.1079,
+ "step": 317
+ },
+ {
+ "epoch": 1.164835164835165,
+ "grad_norm": 45.254730224609375,
+ "learning_rate": 5.892551892551893e-05,
+ "loss": 1.1492,
+ "step": 318
+ },
+ {
+ "epoch": 1.1684981684981686,
+ "grad_norm": 65.83439636230469,
+ "learning_rate": 5.89010989010989e-05,
+ "loss": 0.7049,
+ "step": 319
+ },
+ {
+ "epoch": 1.1721611721611722,
+ "grad_norm": 43.5418586730957,
+ "learning_rate": 5.8876678876678875e-05,
+ "loss": 0.4628,
+ "step": 320
+ },
+ {
+ "epoch": 1.1758241758241759,
+ "grad_norm": 137.285400390625,
+ "learning_rate": 5.885225885225885e-05,
+ "loss": 1.4227,
+ "step": 321
+ },
+ {
+ "epoch": 1.1794871794871795,
+ "grad_norm": 42.895565032958984,
+ "learning_rate": 5.8827838827838825e-05,
+ "loss": 0.4264,
+ "step": 322
+ },
+ {
+ "epoch": 1.1831501831501832,
+ "grad_norm": 10.602986335754395,
+ "learning_rate": 5.8803418803418803e-05,
+ "loss": 0.0494,
+ "step": 323
+ },
+ {
+ "epoch": 1.1868131868131868,
+ "grad_norm": 103.92290496826172,
+ "learning_rate": 5.877899877899878e-05,
+ "loss": 2.0111,
+ "step": 324
+ },
+ {
+ "epoch": 1.1904761904761905,
+ "grad_norm": 36.497764587402344,
+ "learning_rate": 5.8754578754578754e-05,
+ "loss": 0.4768,
+ "step": 325
+ },
+ {
+ "epoch": 1.1941391941391941,
+ "grad_norm": 45.52228546142578,
+ "learning_rate": 5.873015873015873e-05,
+ "loss": 0.994,
+ "step": 326
+ },
+ {
+ "epoch": 1.1978021978021978,
+ "grad_norm": 24.81894302368164,
+ "learning_rate": 5.870573870573871e-05,
+ "loss": 0.5563,
+ "step": 327
+ },
+ {
+ "epoch": 1.2014652014652014,
+ "grad_norm": 49.82950210571289,
+ "learning_rate": 5.868131868131868e-05,
+ "loss": 1.5448,
+ "step": 328
+ },
+ {
+ "epoch": 1.205128205128205,
+ "grad_norm": 23.945913314819336,
+ "learning_rate": 5.865689865689866e-05,
+ "loss": 0.5256,
+ "step": 329
+ },
+ {
+ "epoch": 1.2087912087912087,
+ "grad_norm": 20.63251304626465,
+ "learning_rate": 5.863247863247864e-05,
+ "loss": 0.3698,
+ "step": 330
+ },
+ {
+ "epoch": 1.2124542124542124,
+ "grad_norm": 32.270328521728516,
+ "learning_rate": 5.860805860805861e-05,
+ "loss": 0.3518,
+ "step": 331
+ },
+ {
+ "epoch": 1.2161172161172162,
+ "grad_norm": 32.445716857910156,
+ "learning_rate": 5.858363858363858e-05,
+ "loss": 0.857,
+ "step": 332
+ },
+ {
+ "epoch": 1.2197802197802199,
+ "grad_norm": 59.69521713256836,
+ "learning_rate": 5.855921855921856e-05,
+ "loss": 1.3786,
+ "step": 333
+ },
+ {
+ "epoch": 1.2234432234432235,
+ "grad_norm": 32.79878234863281,
+ "learning_rate": 5.853479853479853e-05,
+ "loss": 0.7648,
+ "step": 334
+ },
+ {
+ "epoch": 1.2271062271062272,
+ "grad_norm": 26.749393463134766,
+ "learning_rate": 5.851037851037851e-05,
+ "loss": 0.4723,
+ "step": 335
+ },
+ {
+ "epoch": 1.2307692307692308,
+ "grad_norm": 40.744102478027344,
+ "learning_rate": 5.848595848595849e-05,
+ "loss": 1.0543,
+ "step": 336
+ },
+ {
+ "epoch": 1.2344322344322345,
+ "grad_norm": 34.2275505065918,
+ "learning_rate": 5.846153846153846e-05,
+ "loss": 0.4533,
+ "step": 337
+ },
+ {
+ "epoch": 1.2380952380952381,
+ "grad_norm": 49.648136138916016,
+ "learning_rate": 5.843711843711844e-05,
+ "loss": 1.2112,
+ "step": 338
+ },
+ {
+ "epoch": 1.2417582417582418,
+ "grad_norm": 64.69720458984375,
+ "learning_rate": 5.841269841269841e-05,
+ "loss": 1.2234,
+ "step": 339
+ },
+ {
+ "epoch": 1.2454212454212454,
+ "grad_norm": 16.81964111328125,
+ "learning_rate": 5.838827838827839e-05,
+ "loss": 0.297,
+ "step": 340
+ },
+ {
+ "epoch": 1.249084249084249,
+ "grad_norm": 17.393678665161133,
+ "learning_rate": 5.836385836385837e-05,
+ "loss": 0.2504,
+ "step": 341
+ },
+ {
+ "epoch": 1.2527472527472527,
+ "grad_norm": 64.2254409790039,
+ "learning_rate": 5.833943833943834e-05,
+ "loss": 1.3656,
+ "step": 342
+ },
+ {
+ "epoch": 1.2564102564102564,
+ "grad_norm": 48.991249084472656,
+ "learning_rate": 5.831501831501832e-05,
+ "loss": 1.0819,
+ "step": 343
+ },
+ {
+ "epoch": 1.26007326007326,
+ "grad_norm": 22.78063201904297,
+ "learning_rate": 5.82905982905983e-05,
+ "loss": 0.1792,
+ "step": 344
+ },
+ {
+ "epoch": 1.2637362637362637,
+ "grad_norm": 35.463233947753906,
+ "learning_rate": 5.826617826617826e-05,
+ "loss": 0.5663,
+ "step": 345
+ },
+ {
+ "epoch": 1.2673992673992673,
+ "grad_norm": 54.528953552246094,
+ "learning_rate": 5.824175824175824e-05,
+ "loss": 1.5814,
+ "step": 346
+ },
+ {
+ "epoch": 1.271062271062271,
+ "grad_norm": 44.60401916503906,
+ "learning_rate": 5.821733821733822e-05,
+ "loss": 0.6471,
+ "step": 347
+ },
+ {
+ "epoch": 1.2747252747252746,
+ "grad_norm": 2.6468827724456787,
+ "learning_rate": 5.819291819291819e-05,
+ "loss": 0.0288,
+ "step": 348
+ },
+ {
+ "epoch": 1.2783882783882783,
+ "grad_norm": 21.465364456176758,
+ "learning_rate": 5.816849816849817e-05,
+ "loss": 0.5259,
+ "step": 349
+ },
+ {
+ "epoch": 1.282051282051282,
+ "grad_norm": 51.20866012573242,
+ "learning_rate": 5.814407814407815e-05,
+ "loss": 0.8054,
+ "step": 350
+ },
+ {
+ "epoch": 1.2857142857142856,
+ "grad_norm": 33.52774429321289,
+ "learning_rate": 5.811965811965812e-05,
+ "loss": 0.494,
+ "step": 351
+ },
+ {
+ "epoch": 1.2893772893772895,
+ "grad_norm": 39.15644836425781,
+ "learning_rate": 5.80952380952381e-05,
+ "loss": 1.6315,
+ "step": 352
+ },
+ {
+ "epoch": 1.293040293040293,
+ "grad_norm": 24.35202407836914,
+ "learning_rate": 5.8070818070818076e-05,
+ "loss": 0.6189,
+ "step": 353
+ },
+ {
+ "epoch": 1.2967032967032968,
+ "grad_norm": 39.99496841430664,
+ "learning_rate": 5.804639804639805e-05,
+ "loss": 1.2323,
+ "step": 354
+ },
+ {
+ "epoch": 1.3003663003663004,
+ "grad_norm": 26.282432556152344,
+ "learning_rate": 5.8021978021978026e-05,
+ "loss": 0.5383,
+ "step": 355
+ },
+ {
+ "epoch": 1.304029304029304,
+ "grad_norm": 36.909969329833984,
+ "learning_rate": 5.7997557997558004e-05,
+ "loss": 1.6886,
+ "step": 356
+ },
+ {
+ "epoch": 1.3076923076923077,
+ "grad_norm": 18.90056037902832,
+ "learning_rate": 5.7973137973137976e-05,
+ "loss": 0.7226,
+ "step": 357
+ },
+ {
+ "epoch": 1.3113553113553114,
+ "grad_norm": 21.10304832458496,
+ "learning_rate": 5.794871794871795e-05,
+ "loss": 0.8914,
+ "step": 358
+ },
+ {
+ "epoch": 1.315018315018315,
+ "grad_norm": 18.380769729614258,
+ "learning_rate": 5.7924297924297926e-05,
+ "loss": 1.4304,
+ "step": 359
+ },
+ {
+ "epoch": 1.3186813186813187,
+ "grad_norm": 17.992050170898438,
+ "learning_rate": 5.78998778998779e-05,
+ "loss": 1.0023,
+ "step": 360
+ },
+ {
+ "epoch": 1.3223443223443223,
+ "grad_norm": 17.944400787353516,
+ "learning_rate": 5.7875457875457876e-05,
+ "loss": 0.7734,
+ "step": 361
+ },
+ {
+ "epoch": 1.326007326007326,
+ "grad_norm": 19.117143630981445,
+ "learning_rate": 5.7851037851037855e-05,
+ "loss": 0.6923,
+ "step": 362
+ },
+ {
+ "epoch": 1.3296703296703296,
+ "grad_norm": 21.4644718170166,
+ "learning_rate": 5.7826617826617826e-05,
+ "loss": 0.666,
+ "step": 363
+ },
+ {
+ "epoch": 1.3333333333333333,
+ "grad_norm": 25.951030731201172,
+ "learning_rate": 5.7802197802197805e-05,
+ "loss": 1.522,
+ "step": 364
+ },
+ {
+ "epoch": 1.3369963369963371,
+ "grad_norm": 32.20412063598633,
+ "learning_rate": 5.7777777777777776e-05,
+ "loss": 1.5771,
+ "step": 365
+ },
+ {
+ "epoch": 1.3406593406593408,
+ "grad_norm": 26.847576141357422,
+ "learning_rate": 5.7753357753357755e-05,
+ "loss": 1.3427,
+ "step": 366
+ },
+ {
+ "epoch": 1.3443223443223444,
+ "grad_norm": 18.596710205078125,
+ "learning_rate": 5.772893772893773e-05,
+ "loss": 0.5533,
+ "step": 367
+ },
+ {
+ "epoch": 1.347985347985348,
+ "grad_norm": 23.6543025970459,
+ "learning_rate": 5.7704517704517705e-05,
+ "loss": 0.581,
+ "step": 368
+ },
+ {
+ "epoch": 1.3516483516483517,
+ "grad_norm": 13.732353210449219,
+ "learning_rate": 5.7680097680097684e-05,
+ "loss": 0.1908,
+ "step": 369
+ },
+ {
+ "epoch": 1.3553113553113554,
+ "grad_norm": 21.231159210205078,
+ "learning_rate": 5.765567765567766e-05,
+ "loss": 0.5858,
+ "step": 370
+ },
+ {
+ "epoch": 1.358974358974359,
+ "grad_norm": 18.647363662719727,
+ "learning_rate": 5.763125763125763e-05,
+ "loss": 0.6205,
+ "step": 371
+ },
+ {
+ "epoch": 1.3626373626373627,
+ "grad_norm": 20.302942276000977,
+ "learning_rate": 5.7606837606837605e-05,
+ "loss": 0.3637,
+ "step": 372
+ },
+ {
+ "epoch": 1.3663003663003663,
+ "grad_norm": 18.72137451171875,
+ "learning_rate": 5.7582417582417584e-05,
+ "loss": 0.2262,
+ "step": 373
+ },
+ {
+ "epoch": 1.36996336996337,
+ "grad_norm": 32.225738525390625,
+ "learning_rate": 5.7557997557997555e-05,
+ "loss": 0.5696,
+ "step": 374
+ },
+ {
+ "epoch": 1.3736263736263736,
+ "grad_norm": 21.453779220581055,
+ "learning_rate": 5.7533577533577534e-05,
+ "loss": 0.3533,
+ "step": 375
+ },
+ {
+ "epoch": 1.3772893772893773,
+ "grad_norm": 26.601511001586914,
+ "learning_rate": 5.750915750915751e-05,
+ "loss": 0.438,
+ "step": 376
+ },
+ {
+ "epoch": 1.380952380952381,
+ "grad_norm": 49.10448455810547,
+ "learning_rate": 5.7484737484737484e-05,
+ "loss": 0.6742,
+ "step": 377
+ },
+ {
+ "epoch": 1.3846153846153846,
+ "grad_norm": 51.251136779785156,
+ "learning_rate": 5.746031746031746e-05,
+ "loss": 0.7096,
+ "step": 378
+ },
+ {
+ "epoch": 1.3882783882783882,
+ "grad_norm": 35.14614486694336,
+ "learning_rate": 5.743589743589744e-05,
+ "loss": 1.5348,
+ "step": 379
+ },
+ {
+ "epoch": 1.3919413919413919,
+ "grad_norm": 58.83134078979492,
+ "learning_rate": 5.741147741147741e-05,
+ "loss": 1.303,
+ "step": 380
+ },
+ {
+ "epoch": 1.3956043956043955,
+ "grad_norm": 34.27029800415039,
+ "learning_rate": 5.738705738705739e-05,
+ "loss": 0.3682,
+ "step": 381
+ },
+ {
+ "epoch": 1.3992673992673992,
+ "grad_norm": 59.508628845214844,
+ "learning_rate": 5.736263736263737e-05,
+ "loss": 0.6489,
+ "step": 382
+ },
+ {
+ "epoch": 1.4029304029304028,
+ "grad_norm": 24.804059982299805,
+ "learning_rate": 5.733821733821734e-05,
+ "loss": 0.325,
+ "step": 383
+ },
+ {
+ "epoch": 1.4065934065934065,
+ "grad_norm": 20.69612693786621,
+ "learning_rate": 5.731379731379731e-05,
+ "loss": 0.1529,
+ "step": 384
+ },
+ {
+ "epoch": 1.4102564102564101,
+ "grad_norm": 29.134044647216797,
+ "learning_rate": 5.728937728937729e-05,
+ "loss": 0.8694,
+ "step": 385
+ },
+ {
+ "epoch": 1.4139194139194138,
+ "grad_norm": 37.44430923461914,
+ "learning_rate": 5.726495726495726e-05,
+ "loss": 0.9174,
+ "step": 386
+ },
+ {
+ "epoch": 1.4175824175824177,
+ "grad_norm": 36.84721755981445,
+ "learning_rate": 5.724053724053724e-05,
+ "loss": 0.3522,
+ "step": 387
+ },
+ {
+ "epoch": 1.4212454212454213,
+ "grad_norm": 44.15989685058594,
+ "learning_rate": 5.721611721611722e-05,
+ "loss": 1.4677,
+ "step": 388
+ },
+ {
+ "epoch": 1.424908424908425,
+ "grad_norm": 16.73012351989746,
+ "learning_rate": 5.719169719169719e-05,
+ "loss": 0.1621,
+ "step": 389
+ },
+ {
+ "epoch": 1.4285714285714286,
+ "grad_norm": 35.41815185546875,
+ "learning_rate": 5.716727716727717e-05,
+ "loss": 0.6702,
+ "step": 390
+ },
+ {
+ "epoch": 1.4322344322344323,
+ "grad_norm": 19.04936408996582,
+ "learning_rate": 5.714285714285714e-05,
+ "loss": 0.1845,
+ "step": 391
+ },
+ {
+ "epoch": 1.435897435897436,
+ "grad_norm": 22.89434242248535,
+ "learning_rate": 5.711843711843712e-05,
+ "loss": 0.5694,
+ "step": 392
+ },
+ {
+ "epoch": 1.4395604395604396,
+ "grad_norm": 22.125951766967773,
+ "learning_rate": 5.70940170940171e-05,
+ "loss": 0.821,
+ "step": 393
+ },
+ {
+ "epoch": 1.4432234432234432,
+ "grad_norm": 37.83376693725586,
+ "learning_rate": 5.706959706959707e-05,
+ "loss": 0.4658,
+ "step": 394
+ },
+ {
+ "epoch": 1.4468864468864469,
+ "grad_norm": 38.37764358520508,
+ "learning_rate": 5.704517704517705e-05,
+ "loss": 0.4146,
+ "step": 395
+ },
+ {
+ "epoch": 1.4505494505494505,
+ "grad_norm": 21.50092315673828,
+ "learning_rate": 5.702075702075703e-05,
+ "loss": 0.5044,
+ "step": 396
+ },
+ {
+ "epoch": 1.4542124542124542,
+ "grad_norm": 20.02173614501953,
+ "learning_rate": 5.699633699633699e-05,
+ "loss": 0.4955,
+ "step": 397
+ },
+ {
+ "epoch": 1.4578754578754578,
+ "grad_norm": 21.474336624145508,
+ "learning_rate": 5.697191697191697e-05,
+ "loss": 0.3818,
+ "step": 398
+ },
+ {
+ "epoch": 1.4615384615384617,
+ "grad_norm": 22.903839111328125,
+ "learning_rate": 5.694749694749695e-05,
+ "loss": 0.7603,
+ "step": 399
+ },
+ {
+ "epoch": 1.4652014652014653,
+ "grad_norm": 20.22893524169922,
+ "learning_rate": 5.692307692307692e-05,
+ "loss": 0.5612,
+ "step": 400
+ },
+ {
+ "epoch": 1.468864468864469,
+ "grad_norm": 32.34550857543945,
+ "learning_rate": 5.68986568986569e-05,
+ "loss": 0.4659,
+ "step": 401
+ },
+ {
+ "epoch": 1.4725274725274726,
+ "grad_norm": 49.979034423828125,
+ "learning_rate": 5.687423687423688e-05,
+ "loss": 0.6784,
+ "step": 402
+ },
+ {
+ "epoch": 1.4761904761904763,
+ "grad_norm": 79.79581451416016,
+ "learning_rate": 5.684981684981685e-05,
+ "loss": 0.9404,
+ "step": 403
+ },
+ {
+ "epoch": 1.47985347985348,
+ "grad_norm": 17.678560256958008,
+ "learning_rate": 5.682539682539683e-05,
+ "loss": 0.1675,
+ "step": 404
+ },
+ {
+ "epoch": 1.4835164835164836,
+ "grad_norm": 21.246519088745117,
+ "learning_rate": 5.6800976800976806e-05,
+ "loss": 0.2428,
+ "step": 405
+ },
+ {
+ "epoch": 1.4871794871794872,
+ "grad_norm": 34.815452575683594,
+ "learning_rate": 5.677655677655678e-05,
+ "loss": 0.3925,
+ "step": 406
+ },
+ {
+ "epoch": 1.4908424908424909,
+ "grad_norm": 73.8591079711914,
+ "learning_rate": 5.6752136752136756e-05,
+ "loss": 1.3163,
+ "step": 407
+ },
+ {
+ "epoch": 1.4945054945054945,
+ "grad_norm": 66.63922882080078,
+ "learning_rate": 5.6727716727716735e-05,
+ "loss": 0.9653,
+ "step": 408
+ },
+ {
+ "epoch": 1.4981684981684982,
+ "grad_norm": 52.39488220214844,
+ "learning_rate": 5.6703296703296706e-05,
+ "loss": 0.9322,
+ "step": 409
+ },
+ {
+ "epoch": 1.5018315018315018,
+ "grad_norm": 13.078998565673828,
+ "learning_rate": 5.667887667887668e-05,
+ "loss": 0.1168,
+ "step": 410
+ },
+ {
+ "epoch": 1.5054945054945055,
+ "grad_norm": 41.32448959350586,
+ "learning_rate": 5.6654456654456657e-05,
+ "loss": 0.9296,
+ "step": 411
+ },
+ {
+ "epoch": 1.5091575091575091,
+ "grad_norm": 26.448543548583984,
+ "learning_rate": 5.663003663003663e-05,
+ "loss": 0.5474,
+ "step": 412
+ },
+ {
+ "epoch": 1.5128205128205128,
+ "grad_norm": 29.58432960510254,
+ "learning_rate": 5.660561660561661e-05,
+ "loss": 0.6573,
+ "step": 413
+ },
+ {
+ "epoch": 1.5164835164835164,
+ "grad_norm": 28.568214416503906,
+ "learning_rate": 5.6581196581196585e-05,
+ "loss": 0.9223,
+ "step": 414
+ },
+ {
+ "epoch": 1.52014652014652,
+ "grad_norm": 31.92661476135254,
+ "learning_rate": 5.655677655677656e-05,
+ "loss": 1.0601,
+ "step": 415
+ },
+ {
+ "epoch": 1.5238095238095237,
+ "grad_norm": 31.934263229370117,
+ "learning_rate": 5.6532356532356535e-05,
+ "loss": 0.6288,
+ "step": 416
+ },
+ {
+ "epoch": 1.5274725274725274,
+ "grad_norm": 21.51350975036621,
+ "learning_rate": 5.650793650793651e-05,
+ "loss": 0.7378,
+ "step": 417
+ },
+ {
+ "epoch": 1.531135531135531,
+ "grad_norm": 19.010095596313477,
+ "learning_rate": 5.6483516483516485e-05,
+ "loss": 0.7792,
+ "step": 418
+ },
+ {
+ "epoch": 1.5347985347985347,
+ "grad_norm": 21.7001895904541,
+ "learning_rate": 5.6459096459096464e-05,
+ "loss": 0.7885,
+ "step": 419
+ },
+ {
+ "epoch": 1.5384615384615383,
+ "grad_norm": 21.400882720947266,
+ "learning_rate": 5.6434676434676436e-05,
+ "loss": 0.942,
+ "step": 420
+ },
+ {
+ "epoch": 1.542124542124542,
+ "grad_norm": 30.14664649963379,
+ "learning_rate": 5.6410256410256414e-05,
+ "loss": 0.7675,
+ "step": 421
+ },
+ {
+ "epoch": 1.5457875457875456,
+ "grad_norm": 33.25088882446289,
+ "learning_rate": 5.6385836385836386e-05,
+ "loss": 1.1349,
+ "step": 422
+ },
+ {
+ "epoch": 1.5494505494505495,
+ "grad_norm": 22.923208236694336,
+ "learning_rate": 5.636141636141636e-05,
+ "loss": 0.7145,
+ "step": 423
+ },
+ {
+ "epoch": 1.5531135531135531,
+ "grad_norm": 20.00519371032715,
+ "learning_rate": 5.6336996336996336e-05,
+ "loss": 0.5107,
+ "step": 424
+ },
+ {
+ "epoch": 1.5567765567765568,
+ "grad_norm": 21.95383071899414,
+ "learning_rate": 5.6312576312576314e-05,
+ "loss": 0.7836,
+ "step": 425
+ },
+ {
+ "epoch": 1.5604395604395604,
+ "grad_norm": 27.24031639099121,
+ "learning_rate": 5.6288156288156286e-05,
+ "loss": 0.4955,
+ "step": 426
+ },
+ {
+ "epoch": 1.564102564102564,
+ "grad_norm": 45.48428726196289,
+ "learning_rate": 5.6263736263736264e-05,
+ "loss": 1.016,
+ "step": 427
+ },
+ {
+ "epoch": 1.5677655677655677,
+ "grad_norm": 20.055965423583984,
+ "learning_rate": 5.623931623931624e-05,
+ "loss": 0.325,
+ "step": 428
+ },
+ {
+ "epoch": 1.5714285714285714,
+ "grad_norm": 22.020767211914062,
+ "learning_rate": 5.6214896214896215e-05,
+ "loss": 0.45,
+ "step": 429
+ },
+ {
+ "epoch": 1.575091575091575,
+ "grad_norm": 32.608741760253906,
+ "learning_rate": 5.619047619047619e-05,
+ "loss": 0.6561,
+ "step": 430
+ },
+ {
+ "epoch": 1.578754578754579,
+ "grad_norm": 38.14396667480469,
+ "learning_rate": 5.616605616605617e-05,
+ "loss": 0.6387,
+ "step": 431
+ },
+ {
+ "epoch": 1.5824175824175826,
+ "grad_norm": 26.266948699951172,
+ "learning_rate": 5.614163614163614e-05,
+ "loss": 0.5593,
+ "step": 432
+ },
+ {
+ "epoch": 1.5860805860805862,
+ "grad_norm": 16.37360954284668,
+ "learning_rate": 5.611721611721612e-05,
+ "loss": 0.1591,
+ "step": 433
+ },
+ {
+ "epoch": 1.5897435897435899,
+ "grad_norm": 21.9448299407959,
+ "learning_rate": 5.60927960927961e-05,
+ "loss": 0.2129,
+ "step": 434
+ },
+ {
+ "epoch": 1.5934065934065935,
+ "grad_norm": 30.096052169799805,
+ "learning_rate": 5.6068376068376065e-05,
+ "loss": 0.3384,
+ "step": 435
+ },
+ {
+ "epoch": 1.5970695970695972,
+ "grad_norm": 40.15864181518555,
+ "learning_rate": 5.604395604395604e-05,
+ "loss": 0.5181,
+ "step": 436
+ },
+ {
+ "epoch": 1.6007326007326008,
+ "grad_norm": 63.40933609008789,
+ "learning_rate": 5.601953601953602e-05,
+ "loss": 0.8834,
+ "step": 437
+ },
+ {
+ "epoch": 1.6043956043956045,
+ "grad_norm": 40.0787353515625,
+ "learning_rate": 5.5995115995115993e-05,
+ "loss": 0.437,
+ "step": 438
+ },
+ {
+ "epoch": 1.6080586080586081,
+ "grad_norm": 40.136863708496094,
+ "learning_rate": 5.597069597069597e-05,
+ "loss": 0.4834,
+ "step": 439
+ },
+ {
+ "epoch": 1.6117216117216118,
+ "grad_norm": 27.898317337036133,
+ "learning_rate": 5.594627594627595e-05,
+ "loss": 0.4862,
+ "step": 440
+ },
+ {
+ "epoch": 1.6153846153846154,
+ "grad_norm": 31.5762882232666,
+ "learning_rate": 5.592185592185592e-05,
+ "loss": 0.1878,
+ "step": 441
+ },
+ {
+ "epoch": 1.619047619047619,
+ "grad_norm": 88.90093994140625,
+ "learning_rate": 5.58974358974359e-05,
+ "loss": 1.3343,
+ "step": 442
+ },
+ {
+ "epoch": 1.6227106227106227,
+ "grad_norm": 57.7340202331543,
+ "learning_rate": 5.587301587301587e-05,
+ "loss": 0.3032,
+ "step": 443
+ },
+ {
+ "epoch": 1.6263736263736264,
+ "grad_norm": 57.28425979614258,
+ "learning_rate": 5.584859584859585e-05,
+ "loss": 1.3972,
+ "step": 444
+ },
+ {
+ "epoch": 1.63003663003663,
+ "grad_norm": 39.866302490234375,
+ "learning_rate": 5.582417582417583e-05,
+ "loss": 0.4026,
+ "step": 445
+ },
+ {
+ "epoch": 1.6336996336996337,
+ "grad_norm": 41.72932815551758,
+ "learning_rate": 5.57997557997558e-05,
+ "loss": 0.5407,
+ "step": 446
+ },
+ {
+ "epoch": 1.6373626373626373,
+ "grad_norm": 60.77634811401367,
+ "learning_rate": 5.577533577533578e-05,
+ "loss": 0.8581,
+ "step": 447
+ },
+ {
+ "epoch": 1.641025641025641,
+ "grad_norm": 28.382030487060547,
+ "learning_rate": 5.575091575091575e-05,
+ "loss": 0.3759,
+ "step": 448
+ },
+ {
+ "epoch": 1.6446886446886446,
+ "grad_norm": 62.1085205078125,
+ "learning_rate": 5.572649572649572e-05,
+ "loss": 1.0749,
+ "step": 449
+ },
+ {
+ "epoch": 1.6483516483516483,
+ "grad_norm": 41.8302001953125,
+ "learning_rate": 5.57020757020757e-05,
+ "loss": 0.5884,
+ "step": 450
+ },
+ {
+ "epoch": 1.652014652014652,
+ "grad_norm": 24.128931045532227,
+ "learning_rate": 5.567765567765568e-05,
+ "loss": 0.6113,
+ "step": 451
+ },
+ {
+ "epoch": 1.6556776556776556,
+ "grad_norm": 19.634384155273438,
+ "learning_rate": 5.565323565323565e-05,
+ "loss": 0.3902,
+ "step": 452
+ },
+ {
+ "epoch": 1.6593406593406592,
+ "grad_norm": 18.17875099182129,
+ "learning_rate": 5.562881562881563e-05,
+ "loss": 0.3137,
+ "step": 453
+ },
+ {
+ "epoch": 1.6630036630036629,
+ "grad_norm": 39.68446731567383,
+ "learning_rate": 5.560439560439561e-05,
+ "loss": 0.7587,
+ "step": 454
+ },
+ {
+ "epoch": 1.6666666666666665,
+ "grad_norm": 29.387836456298828,
+ "learning_rate": 5.557997557997558e-05,
+ "loss": 0.6397,
+ "step": 455
+ },
+ {
+ "epoch": 1.6703296703296702,
+ "grad_norm": 19.08424949645996,
+ "learning_rate": 5.555555555555556e-05,
+ "loss": 0.2484,
+ "step": 456
+ },
+ {
+ "epoch": 1.673992673992674,
+ "grad_norm": 36.07701873779297,
+ "learning_rate": 5.553113553113554e-05,
+ "loss": 0.8587,
+ "step": 457
+ },
+ {
+ "epoch": 1.6776556776556777,
+ "grad_norm": 52.062339782714844,
+ "learning_rate": 5.550671550671551e-05,
+ "loss": 1.6675,
+ "step": 458
+ },
+ {
+ "epoch": 1.6813186813186813,
+ "grad_norm": 45.415687561035156,
+ "learning_rate": 5.548229548229549e-05,
+ "loss": 1.653,
+ "step": 459
+ },
+ {
+ "epoch": 1.684981684981685,
+ "grad_norm": 31.457420349121094,
+ "learning_rate": 5.5457875457875465e-05,
+ "loss": 0.4578,
+ "step": 460
+ },
+ {
+ "epoch": 1.6886446886446886,
+ "grad_norm": 33.14665603637695,
+ "learning_rate": 5.543345543345543e-05,
+ "loss": 1.3327,
+ "step": 461
+ },
+ {
+ "epoch": 1.6923076923076923,
+ "grad_norm": 25.720529556274414,
+ "learning_rate": 5.540903540903541e-05,
+ "loss": 0.5,
+ "step": 462
+ },
+ {
+ "epoch": 1.695970695970696,
+ "grad_norm": 23.71514129638672,
+ "learning_rate": 5.538461538461539e-05,
+ "loss": 0.434,
+ "step": 463
+ },
+ {
+ "epoch": 1.6996336996336996,
+ "grad_norm": 45.231746673583984,
+ "learning_rate": 5.536019536019536e-05,
+ "loss": 0.9448,
+ "step": 464
+ },
+ {
+ "epoch": 1.7032967032967035,
+ "grad_norm": 17.44647789001465,
+ "learning_rate": 5.533577533577534e-05,
+ "loss": 0.3183,
+ "step": 465
+ },
+ {
+ "epoch": 1.7069597069597071,
+ "grad_norm": 18.627901077270508,
+ "learning_rate": 5.531135531135531e-05,
+ "loss": 0.4137,
+ "step": 466
+ },
+ {
+ "epoch": 1.7106227106227108,
+ "grad_norm": 45.57220458984375,
+ "learning_rate": 5.528693528693529e-05,
+ "loss": 1.0096,
+ "step": 467
+ },
+ {
+ "epoch": 1.7142857142857144,
+ "grad_norm": 27.329822540283203,
+ "learning_rate": 5.5262515262515266e-05,
+ "loss": 0.5416,
+ "step": 468
+ },
+ {
+ "epoch": 1.717948717948718,
+ "grad_norm": 46.70027160644531,
+ "learning_rate": 5.523809523809524e-05,
+ "loss": 0.983,
+ "step": 469
+ },
+ {
+ "epoch": 1.7216117216117217,
+ "grad_norm": 32.47868728637695,
+ "learning_rate": 5.5213675213675216e-05,
+ "loss": 1.5687,
+ "step": 470
+ },
+ {
+ "epoch": 1.7252747252747254,
+ "grad_norm": 16.49342155456543,
+ "learning_rate": 5.5189255189255194e-05,
+ "loss": 0.3101,
+ "step": 471
+ },
+ {
+ "epoch": 1.728937728937729,
+ "grad_norm": 26.58381462097168,
+ "learning_rate": 5.5164835164835166e-05,
+ "loss": 0.7027,
+ "step": 472
+ },
+ {
+ "epoch": 1.7326007326007327,
+ "grad_norm": 17.435213088989258,
+ "learning_rate": 5.5140415140415144e-05,
+ "loss": 0.3958,
+ "step": 473
+ },
+ {
+ "epoch": 1.7362637362637363,
+ "grad_norm": 19.37874412536621,
+ "learning_rate": 5.5115995115995116e-05,
+ "loss": 0.3979,
+ "step": 474
+ },
+ {
+ "epoch": 1.73992673992674,
+ "grad_norm": 16.509248733520508,
+ "learning_rate": 5.509157509157509e-05,
+ "loss": 0.5121,
+ "step": 475
+ },
+ {
+ "epoch": 1.7435897435897436,
+ "grad_norm": 9.653852462768555,
+ "learning_rate": 5.5067155067155066e-05,
+ "loss": 0.1386,
+ "step": 476
+ },
+ {
+ "epoch": 1.7472527472527473,
+ "grad_norm": 26.486963272094727,
+ "learning_rate": 5.5042735042735045e-05,
+ "loss": 1.0307,
+ "step": 477
+ },
+ {
+ "epoch": 1.750915750915751,
+ "grad_norm": 17.766828536987305,
+ "learning_rate": 5.5018315018315016e-05,
+ "loss": 0.278,
+ "step": 478
+ },
+ {
+ "epoch": 1.7545787545787546,
+ "grad_norm": 12.930633544921875,
+ "learning_rate": 5.4993894993894995e-05,
+ "loss": 0.1487,
+ "step": 479
+ },
+ {
+ "epoch": 1.7582417582417582,
+ "grad_norm": 44.64267349243164,
+ "learning_rate": 5.496947496947497e-05,
+ "loss": 0.7036,
+ "step": 480
+ },
+ {
+ "epoch": 1.7619047619047619,
+ "grad_norm": 17.474651336669922,
+ "learning_rate": 5.4945054945054945e-05,
+ "loss": 0.1666,
+ "step": 481
+ },
+ {
+ "epoch": 1.7655677655677655,
+ "grad_norm": 48.3519401550293,
+ "learning_rate": 5.4920634920634923e-05,
+ "loss": 0.6157,
+ "step": 482
+ },
+ {
+ "epoch": 1.7692307692307692,
+ "grad_norm": 18.429521560668945,
+ "learning_rate": 5.48962148962149e-05,
+ "loss": 0.2588,
+ "step": 483
+ },
+ {
+ "epoch": 1.7728937728937728,
+ "grad_norm": 66.73760986328125,
+ "learning_rate": 5.4871794871794874e-05,
+ "loss": 0.654,
+ "step": 484
+ },
+ {
+ "epoch": 1.7765567765567765,
+ "grad_norm": 53.831539154052734,
+ "learning_rate": 5.484737484737485e-05,
+ "loss": 0.7538,
+ "step": 485
+ },
+ {
+ "epoch": 1.7802197802197801,
+ "grad_norm": 52.023895263671875,
+ "learning_rate": 5.482295482295483e-05,
+ "loss": 1.6623,
+ "step": 486
+ },
+ {
+ "epoch": 1.7838827838827838,
+ "grad_norm": 38.4475212097168,
+ "learning_rate": 5.4798534798534795e-05,
+ "loss": 0.5079,
+ "step": 487
+ },
+ {
+ "epoch": 1.7875457875457874,
+ "grad_norm": 25.642650604248047,
+ "learning_rate": 5.4774114774114774e-05,
+ "loss": 0.3825,
+ "step": 488
+ },
+ {
+ "epoch": 1.791208791208791,
+ "grad_norm": 57.916900634765625,
+ "learning_rate": 5.474969474969475e-05,
+ "loss": 0.9583,
+ "step": 489
+ },
+ {
+ "epoch": 1.7948717948717947,
+ "grad_norm": 39.23340606689453,
+ "learning_rate": 5.4725274725274724e-05,
+ "loss": 0.4724,
+ "step": 490
+ },
+ {
+ "epoch": 1.7985347985347986,
+ "grad_norm": 24.188661575317383,
+ "learning_rate": 5.47008547008547e-05,
+ "loss": 0.4471,
+ "step": 491
+ },
+ {
+ "epoch": 1.8021978021978022,
+ "grad_norm": 68.73822021484375,
+ "learning_rate": 5.4676434676434674e-05,
+ "loss": 0.6618,
+ "step": 492
+ },
+ {
+ "epoch": 1.8058608058608059,
+ "grad_norm": 26.382184982299805,
+ "learning_rate": 5.465201465201465e-05,
+ "loss": 0.5835,
+ "step": 493
+ },
+ {
+ "epoch": 1.8095238095238095,
+ "grad_norm": 31.758886337280273,
+ "learning_rate": 5.462759462759463e-05,
+ "loss": 0.622,
+ "step": 494
+ },
+ {
+ "epoch": 1.8131868131868132,
+ "grad_norm": 26.657405853271484,
+ "learning_rate": 5.46031746031746e-05,
+ "loss": 0.6003,
+ "step": 495
+ },
+ {
+ "epoch": 1.8168498168498168,
+ "grad_norm": 31.248491287231445,
+ "learning_rate": 5.457875457875458e-05,
+ "loss": 0.4929,
+ "step": 496
+ },
+ {
+ "epoch": 1.8205128205128205,
+ "grad_norm": 53.82766342163086,
+ "learning_rate": 5.455433455433456e-05,
+ "loss": 2.0716,
+ "step": 497
+ },
+ {
+ "epoch": 1.8241758241758241,
+ "grad_norm": 46.39777374267578,
+ "learning_rate": 5.452991452991453e-05,
+ "loss": 1.6767,
+ "step": 498
+ },
+ {
+ "epoch": 1.8278388278388278,
+ "grad_norm": 39.58620071411133,
+ "learning_rate": 5.450549450549451e-05,
+ "loss": 0.8274,
+ "step": 499
+ },
+ {
+ "epoch": 1.8315018315018317,
+ "grad_norm": 29.395286560058594,
+ "learning_rate": 5.448107448107448e-05,
+ "loss": 1.1441,
+ "step": 500
+ }
+ ],
+ "logging_steps": 1.0,
+ "max_steps": 2730,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 10,
+ "save_steps": 500,
+ "stateful_callbacks": {
+ "TrainerControl": {
+ "args": {
+ "should_epoch_stop": false,
+ "should_evaluate": false,
+ "should_log": false,
+ "should_save": true,
+ "should_training_stop": false
+ },
+ "attributes": {}
+ }
+ },
+ "total_flos": 0.0,
+ "train_batch_size": 4,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/checkpoint-500/training_args.bin b/checkpoint-500/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..efd73451f8808ee6551f09598ece18ffd5afe9a8
--- /dev/null
+++ b/checkpoint-500/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9433d412d81580f751a4a8cdb904f13acd11bf72c98d8dd9b40ffc47b121468f
+size 7249
diff --git a/checkpoint-500/zero_to_fp32.py b/checkpoint-500/zero_to_fp32.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04
--- /dev/null
+++ b/checkpoint-500/zero_to_fp32.py
@@ -0,0 +1,760 @@
+#!/usr/bin/env python
+
+# Copyright (c) Microsoft Corporation.
+# SPDX-License-Identifier: Apache-2.0
+
+# DeepSpeed Team
+
+# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
+# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
+# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
+# application.
+#
+# example:
+# python zero_to_fp32.py . output_dir/
+# or
+# python zero_to_fp32.py . output_dir/ --safe_serialization
+
+import argparse
+import torch
+import glob
+import math
+import os
+import re
+import gc
+import json
+import numpy as np
+from tqdm import tqdm
+from collections import OrderedDict
+from dataclasses import dataclass
+
+# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
+# DeepSpeed data structures it has to be available in the current python environment.
+from deepspeed.utils import logger
+from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
+
+
+@dataclass
+class zero_model_state:
+ buffers: dict()
+ param_shapes: dict()
+ shared_params: list
+ ds_version: int
+ frozen_param_shapes: dict()
+ frozen_param_fragments: dict()
+
+
+debug = 0
+
+# load to cpu
+device = torch.device('cpu')
+
+
+def atoi(text):
+ return int(text) if text.isdigit() else text
+
+
+def natural_keys(text):
+ '''
+ alist.sort(key=natural_keys) sorts in human order
+ http://nedbatchelder.com/blog/200712/human_sorting.html
+ (See Toothy's implementation in the comments)
+ '''
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
+
+
+def get_model_state_file(checkpoint_dir, zero_stage):
+ if not os.path.isdir(checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
+
+ # there should be only one file
+ if zero_stage <= 2:
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
+ elif zero_stage == 3:
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
+
+ if not os.path.exists(file):
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
+
+ return file
+
+
+def get_checkpoint_files(checkpoint_dir, glob_pattern):
+ # XXX: need to test that this simple glob rule works for multi-node setup too
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
+
+ if len(ckpt_files) == 0:
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
+
+ return ckpt_files
+
+
+def get_optim_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
+
+
+def get_model_state_files(checkpoint_dir):
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
+
+
+def parse_model_states(files):
+ zero_model_states = []
+ for file in files:
+ state_dict = torch.load(file, map_location=device, weights_only=False)
+
+ if BUFFER_NAMES not in state_dict:
+ raise ValueError(f"{file} is not a model state checkpoint")
+ buffer_names = state_dict[BUFFER_NAMES]
+ if debug:
+ print("Found buffers:", buffer_names)
+
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
+ param_shapes = state_dict[PARAM_SHAPES]
+
+ # collect parameters that are included in param_shapes
+ param_names = []
+ for s in param_shapes:
+ for name in s.keys():
+ param_names.append(name)
+
+ # update with frozen parameters
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
+ if frozen_param_shapes is not None:
+ if debug:
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
+ param_names += list(frozen_param_shapes.keys())
+
+ # handle shared params
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
+
+ ds_version = state_dict.get(DS_VERSION, None)
+
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
+
+ z_model_state = zero_model_state(buffers=buffers,
+ param_shapes=param_shapes,
+ shared_params=shared_params,
+ ds_version=ds_version,
+ frozen_param_shapes=frozen_param_shapes,
+ frozen_param_fragments=frozen_param_fragments)
+ zero_model_states.append(z_model_state)
+
+ return zero_model_states
+
+
+def parse_optim_states(files, ds_checkpoint_dir):
+ total_files = len(files)
+ state_dicts = []
+ for f in tqdm(files, desc='Loading checkpoint shards'):
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
+ # and also handle the case where it was already removed by another helper script
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
+ state_dicts.append(state_dict)
+
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
+
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
+ # use the max of the partition_count to get the dp world_size.
+
+ if type(world_size) is list:
+ world_size = max(world_size)
+
+ if world_size != total_files:
+ raise ValueError(
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
+ )
+
+ # the groups are named differently in each stage
+ if zero_stage <= 2:
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
+ elif zero_stage == 3:
+ fp32_groups_key = FP32_FLAT_GROUPS
+ else:
+ raise ValueError(f"unknown zero stage {zero_stage}")
+
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
+ return zero_stage, world_size, fp32_flat_groups
+
+
+def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
+ """
+ Returns fp32 state_dict reconstructed from ds checkpoint
+
+ Args:
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
+
+ """
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
+
+ optim_files = get_optim_files(ds_checkpoint_dir)
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
+
+ model_files = get_model_state_files(ds_checkpoint_dir)
+
+ zero_model_states = parse_model_states(model_files)
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
+
+ if zero_stage <= 2:
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+ elif zero_stage == 3:
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters)
+
+
+def _zero2_merge_frozen_params(state_dict, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
+
+ if debug:
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ state_dict[name] = frozen_param_fragments[name]
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _has_callable(obj, fn):
+ attr = getattr(obj, fn, None)
+ return callable(attr)
+
+
+def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+
+ # Reconstruction protocol:
+ #
+ # XXX: document this
+
+ if debug:
+ for i in range(world_size):
+ for j in range(len(fp32_flat_groups[0])):
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
+
+ # XXX: memory usage doubles here (zero2)
+ num_param_groups = len(fp32_flat_groups[0])
+ merged_single_partition_of_fp32_groups = []
+ for i in range(num_param_groups):
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
+ avail_numel = sum(
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
+
+ if debug:
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
+ # not asserting if there is a mismatch due to possible padding
+ print(f"Have {avail_numel} numels to process.")
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ total_numel = 0
+ total_params = 0
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
+ offset = 0
+ avail_numel = full_single_fp32_vector.numel()
+ for name, shape in shapes.items():
+
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
+ total_numel += unpartitioned_numel
+ total_params += 1
+
+ if debug:
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
+ offset += unpartitioned_numel
+
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
+ # live optimizer object, so we are checking that the numbers are within the right range
+ align_to = 2 * world_size
+
+ def zero2_align(x):
+ return align_to * math.ceil(x / align_to)
+
+ if debug:
+ print(f"original offset={offset}, avail_numel={avail_numel}")
+
+ offset = zero2_align(offset)
+ avail_numel = zero2_align(avail_numel)
+
+ if debug:
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
+
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def zero3_partitioned_param_info(unpartitioned_numel, world_size):
+ remainder = unpartitioned_numel % world_size
+ padding_numel = (world_size - remainder) if remainder else 0
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
+ return partitioned_numel, padding_numel
+
+
+def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
+ return
+
+ if debug:
+ for i in range(world_size):
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
+
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
+ wanted_params = len(frozen_param_shapes)
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
+ print(f'Frozen params: Have {avail_numel} numels to process.')
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
+
+ total_params = 0
+ total_numel = 0
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
+ total_params += 1
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
+
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
+
+
+class GatheredTensor:
+ """
+ A pseudo tensor that collects partitioned weights.
+ It is more memory efficient when there are multiple groups.
+ """
+
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
+ self.flat_groups = flat_groups
+ self.flat_groups_offset = flat_groups_offset
+ self.offset = offset
+ self.partitioned_numel = partitioned_numel
+ self.shape = shape
+ self.dtype = self.flat_groups[0][0].dtype
+
+ def contiguous(self):
+ """
+ Merge partitioned weights from flat_groups into a single tensor.
+ """
+ end_idx = self.offset + self.partitioned_numel
+ world_size = len(self.flat_groups)
+ pad_flat_param_chunks = []
+
+ for rank_i in range(world_size):
+ # for each rank, we need to collect weights from related group/groups
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
+ start_group_id = None
+ end_group_id = None
+ for group_id in range(len(self.flat_groups_offset)):
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
+ start_group_id = group_id
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
+ end_group_id = group_id
+ break
+ # collect weights from related group/groups
+ for group_id in range(start_group_id, end_group_id + 1):
+ flat_tensor = flat_groups_at_rank_i[group_id]
+ start_offset = self.offset - self.flat_groups_offset[group_id]
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
+
+ # collect weights from all ranks
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
+ return param
+
+
+def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
+ param_shapes = zero_model_states[0].param_shapes
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
+
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
+ # param, re-consolidating each param, while dealing with padding if any
+
+ # merge list of dicts, preserving order
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
+
+ if debug:
+ for i in range(world_size):
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
+
+ wanted_params = len(param_shapes)
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
+ # not asserting if there is a mismatch due to possible padding
+ avail_numel = fp32_flat_groups[0].numel() * world_size
+ print(f"Trainable params: Have {avail_numel} numels to process.")
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
+
+ # params
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
+ # out-of-core computing solution
+ offset = 0
+ total_numel = 0
+ total_params = 0
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
+ unpartitioned_numel = shape.numel()
+ total_numel += unpartitioned_numel
+ total_params += 1
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
+
+ if debug:
+ print(
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
+ )
+
+ # memory efficient tensor
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
+ state_dict[name] = tensor
+ offset += partitioned_numel
+
+ offset *= world_size
+
+ # Sanity check
+ if offset != avail_numel:
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
+
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
+
+
+def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
+ exclude_frozen_parameters):
+ state_dict = OrderedDict()
+
+ # buffers
+ buffers = zero_model_states[0].buffers
+ state_dict.update(buffers)
+ if debug:
+ print(f"added {len(buffers)} buffers")
+
+ if not exclude_frozen_parameters:
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
+
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
+
+ # recover shared parameters
+ for pair in zero_model_states[0].shared_params:
+ if pair[1] in state_dict:
+ state_dict[pair[0]] = state_dict[pair[1]]
+
+ return state_dict
+
+
+def to_torch_tensor(state_dict, return_empty_tensor=False):
+ """
+ Convert state_dict of GatheredTensor to torch tensor
+ """
+ torch_state_dict = {}
+ converted_tensors = {}
+ for name, tensor in state_dict.items():
+ tensor_id = id(tensor)
+ if tensor_id in converted_tensors: # shared tensors
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
+ torch_state_dict[name] = shared_tensor
+ else:
+ converted_tensors[tensor_id] = name
+ if return_empty_tensor:
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
+ else:
+ torch_state_dict[name] = tensor.contiguous()
+ return torch_state_dict
+
+
+def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag=None,
+ exclude_frozen_parameters=False,
+ lazy_mode=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
+ via a model hub.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
+
+ Returns:
+ - pytorch ``state_dict``
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ # do the training and checkpoint saving
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
+ model = model.cpu() # move to cpu
+ model.load_state_dict(state_dict)
+ # submit to model hub or save the model to share with others
+
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
+ application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
+
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
+ the checkpoint. Or you can load state_dict in lazy mode ::
+
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
+ for name, lazy_tensor in state_dict.item():
+ tensor = lazy_tensor.contiguous() # to cpu
+ print(name, tensor)
+ # del tensor to release memory if it no longer in use
+ """
+ if tag is None:
+ latest_path = os.path.join(checkpoint_dir, 'latest')
+ if os.path.isfile(latest_path):
+ with open(latest_path, 'r') as fd:
+ tag = fd.read().strip()
+ else:
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
+
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
+
+ if not os.path.isdir(ds_checkpoint_dir):
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
+
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
+ if lazy_mode:
+ return state_dict
+ else:
+ return to_torch_tensor(state_dict)
+
+
+def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
+ output_dir,
+ max_shard_size="5GB",
+ safe_serialization=False,
+ tag=None,
+ exclude_frozen_parameters=False):
+ """
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
+
+ Args:
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+ - ``exclude_frozen_parameters``: exclude frozen parameters
+ """
+
+ # Dependency pre-check
+ if safe_serialization:
+ try:
+ from safetensors.torch import save_file
+ except ImportError:
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
+ raise
+ if max_shard_size is not None:
+ try:
+ from huggingface_hub import split_torch_state_dict_into_shards
+ except ImportError:
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
+ raise
+
+ # Convert zero checkpoint to state_dict
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
+ tag,
+ exclude_frozen_parameters,
+ lazy_mode=True)
+
+ # Shard the model if it is too big.
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
+ if max_shard_size is not None:
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
+ # an memory-efficient approach for sharding
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
+ filename_pattern=filename_pattern,
+ max_shard_size=max_shard_size)
+ else:
+ from collections import namedtuple
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
+ state_dict_split = StateDictSplit(is_sharded=False,
+ filename_to_tensors={weights_name: list(state_dict.keys())})
+
+ # Save the model by shard
+ os.makedirs(output_dir, exist_ok=True)
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
+ shard_state_dict = to_torch_tensor(shard_state_dict)
+ output_path = os.path.join(output_dir, shard_file)
+ if safe_serialization:
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
+ else:
+ torch.save(shard_state_dict, output_path)
+ # release the memory of current shard
+ for tensor_name in list(shard_state_dict.keys()):
+ del state_dict[tensor_name]
+ del shard_state_dict[tensor_name]
+ del shard_state_dict
+ gc.collect()
+
+ # Save index if sharded
+ if state_dict_split.is_sharded:
+ index = {
+ "metadata": state_dict_split.metadata,
+ "weight_map": state_dict_split.tensor_to_filename,
+ }
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
+ save_index_file = os.path.join(output_dir, save_index_file)
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+
+def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
+ """
+ 1. Put the provided model to cpu
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
+ 3. Load it into the provided model
+
+ Args:
+ - ``model``: the model object to update
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
+
+ Returns:
+ - ``model`: modified model
+
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
+ conveniently placed for you in the checkpoint folder.
+
+ A typical usage might be ::
+
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
+ # submit to model hub or save the model to share with others
+
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
+
+ """
+ logger.info(f"Extracting fp32 weights")
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
+
+ logger.info(f"Overwriting model with fp32 weights")
+ model = model.cpu()
+ model.load_state_dict(state_dict, strict=False)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("checkpoint_dir",
+ type=str,
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
+ parser.add_argument("output_dir",
+ type=str,
+ help="directory to the pytorch fp32 state_dict output files"
+ "(e.g. path/checkpoint-12-output/)")
+ parser.add_argument(
+ "--max_shard_size",
+ type=str,
+ default="5GB",
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
+ "without CPU OOM issues.")
+ parser.add_argument(
+ "--safe_serialization",
+ default=False,
+ action='store_true',
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
+ parser.add_argument("-t",
+ "--tag",
+ type=str,
+ default=None,
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
+ args = parser.parse_args()
+
+ debug = args.debug
+
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
+ args.output_dir,
+ max_shard_size=args.max_shard_size,
+ safe_serialization=args.safe_serialization,
+ tag=args.tag,
+ exclude_frozen_parameters=args.exclude_frozen_parameters)
diff --git a/config.json b/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..40aa0a10ec7958e160bf07f2feca405387c8b288
--- /dev/null
+++ b/config.json
@@ -0,0 +1,33 @@
+{
+ "architectures": [
+ "XLMRobertaForSequenceClassification"
+ ],
+ "attention_probs_dropout_prob": 0.1,
+ "bos_token_id": 0,
+ "classifier_dropout": null,
+ "eos_token_id": 2,
+ "hidden_act": "gelu",
+ "hidden_dropout_prob": 0.1,
+ "hidden_size": 1024,
+ "id2label": {
+ "0": "LABEL_0"
+ },
+ "initializer_range": 0.02,
+ "intermediate_size": 4096,
+ "label2id": {
+ "LABEL_0": 0
+ },
+ "layer_norm_eps": 1e-05,
+ "max_position_embeddings": 8194,
+ "model_type": "xlm-roberta",
+ "num_attention_heads": 16,
+ "num_hidden_layers": 24,
+ "output_past": true,
+ "pad_token_id": 1,
+ "position_embedding_type": "absolute",
+ "torch_dtype": "float32",
+ "transformers_version": "4.54.0",
+ "type_vocab_size": 1,
+ "use_cache": true,
+ "vocab_size": 250002
+}
diff --git a/model.safetensors b/model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..fd71e1e4e431ea10b421ab4c13b03d587531fd19
--- /dev/null
+++ b/model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:86b0d1f9d6d3bf71028e927de9fa9bbbce7c4147712f9e04eebbd0edf10b9f75
+size 2271071852
diff --git a/sentencepiece.bpe.model b/sentencepiece.bpe.model
new file mode 100644
index 0000000000000000000000000000000000000000..7a3f40a75f870bc1f21700cd414dc2acc431583c
--- /dev/null
+++ b/sentencepiece.bpe.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
+size 5069051
diff --git a/special_tokens_map.json b/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1879d702821e753ffe4245048eee415d54a9385
--- /dev/null
+++ b/special_tokens_map.json
@@ -0,0 +1,51 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "cls_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "mask_token": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "sep_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/tokenizer.json b/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..322d084f75a19f4fec0fc0b5f351be9a3dfefa3e
--- /dev/null
+++ b/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50ec628ce274af8429e5aa0c573e737ef2db1c2acd3b2dd51362a33c3a534f99
+size 17082999
diff --git a/tokenizer_config.json b/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..95bd7c849ee6a47d5c92805af18d187239c1ba4a
--- /dev/null
+++ b/tokenizer_config.json
@@ -0,0 +1,56 @@
+{
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "3": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "250001": {
+ "content": "",
+ "lstrip": true,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "bos_token": "",
+ "clean_up_tokenization_spaces": true,
+ "cls_token": "",
+ "eos_token": "",
+ "extra_special_tokens": {},
+ "mask_token": "",
+ "model_max_length": 8192,
+ "pad_token": "",
+ "sep_token": "",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "XLMRobertaTokenizer",
+ "unk_token": ""
+}
diff --git a/training_args.bin b/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..efd73451f8808ee6551f09598ece18ffd5afe9a8
--- /dev/null
+++ b/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9433d412d81580f751a4a8cdb904f13acd11bf72c98d8dd9b40ffc47b121468f
+size 7249