diff --git a/checkpoint-272/config.json b/checkpoint-272/config.json new file mode 100644 index 0000000000000000000000000000000000000000..321049cbcea926f12a79cc40f4a19a8f17cd8ed3 --- /dev/null +++ b/checkpoint-272/config.json @@ -0,0 +1,26 @@ +{ + "_name_or_path": "meta-math/MetaMath-Mistral-7B", + "architectures": [ + "MistralForCausalLM" + ], + "attention_dropout": 0.0, + "bos_token_id": 1, + "eos_token_id": 2, + "hidden_act": "silu", + "hidden_size": 4096, + "initializer_range": 0.02, + "intermediate_size": 14336, + "max_position_embeddings": 32768, + "model_type": "mistral", + "num_attention_heads": 32, + "num_hidden_layers": 32, + "num_key_value_heads": 8, + "rms_norm_eps": 1e-05, + "rope_theta": 10000.0, + "sliding_window": 4096, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.38.2", + "use_cache": false, + "vocab_size": 32001 +} diff --git a/checkpoint-272/generation_config.json b/checkpoint-272/generation_config.json new file mode 100644 index 0000000000000000000000000000000000000000..282b497efd8f276cf9270e576fb79be429aebcdc --- /dev/null +++ b/checkpoint-272/generation_config.json @@ -0,0 +1,7 @@ +{ + "_from_model_config": true, + "bos_token_id": 1, + "do_sample": true, + "eos_token_id": 2, + "transformers_version": "4.38.2" +} diff --git a/checkpoint-272/global_step272/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-272/global_step272/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..48c558d06b0c8563d8a151508450eafb89b679c8 --- /dev/null +++ b/checkpoint-272/global_step272/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e31849038fa9cd4678f9b66e0fd5c35fc041a94b8b208c80fd609d43b0126a0 +size 4831618059 diff --git a/checkpoint-272/global_step272/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-272/global_step272/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..bba45e44964c92746c74cd7ad0e6943c82b1d9ab --- /dev/null +++ b/checkpoint-272/global_step272/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf6fa0ee61cc028e89dd431ca628d68f10a1f95a7ff5ed098166ecc8f6d8c1f7 +size 4831618059 diff --git a/checkpoint-272/global_step272/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-272/global_step272/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..83c158c74a284d518a56cc2259b70303c72580d8 --- /dev/null +++ b/checkpoint-272/global_step272/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac6663dd6e69cfb0a3eea1233a785100ecbe6a9f90463a7f4d8fc505fdbbce3b +size 4831618059 diff --git a/checkpoint-272/global_step272/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-272/global_step272/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..e07690224db07e3520e9e4e32bdaa61a38fbf4ad --- /dev/null +++ b/checkpoint-272/global_step272/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4112230565592bb524050869c6efe110642e4ed541727329587ea0adb1f119e +size 4831618059 diff --git a/checkpoint-272/global_step272/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-272/global_step272/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..707789acec4a023012313d749018f9bd95328ff0 --- /dev/null +++ b/checkpoint-272/global_step272/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f062872fdddf3358ba89f0e2c081d0665fe65398f61e674b2bb8ff363748c302 +size 4831618059 diff --git a/checkpoint-272/global_step272/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-272/global_step272/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..787bf555682c4946cfba1c1da7665cf0baf03cc9 --- /dev/null +++ b/checkpoint-272/global_step272/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55c346ffddb70182661cdf51a8bc119e315e3aed15d66d7130adfb1f268320ae +size 4831618059 diff --git a/checkpoint-272/global_step272/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-272/global_step272/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..ccbf28bb7c25bd09aa2e75a6c8d1c9f836b0d68f --- /dev/null +++ b/checkpoint-272/global_step272/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8376e6c211faf63973ea5506550505a6e4ab80119df71bce7c81e8301a07331b +size 4831618059 diff --git a/checkpoint-272/global_step272/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-272/global_step272/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..94f28cac74a908ce605b0afa8053b01ff4416c37 --- /dev/null +++ b/checkpoint-272/global_step272/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f090e3cda29d6cdd54eb5b30634166223f1f2036143772c25b6456a05bfce39 +size 4831618059 diff --git a/checkpoint-272/global_step272/bf16_zero_pp_rank_8_mp_rank_00_optim_states.pt b/checkpoint-272/global_step272/bf16_zero_pp_rank_8_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..740903966d479ad5305927c6e6b5617ced5b3b95 --- /dev/null +++ b/checkpoint-272/global_step272/bf16_zero_pp_rank_8_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e92caf0a4a936c842a455a9a7dfb1f8b5f82f5adaa1b3c327e8da76f5ac5ad70 +size 4831618059 diff --git a/checkpoint-272/global_step272/zero_pp_rank_0_mp_rank_00_model_states.pt b/checkpoint-272/global_step272/zero_pp_rank_0_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..8f3ccfa90455ce87f67539861610539c05d23bb9 --- /dev/null +++ b/checkpoint-272/global_step272/zero_pp_rank_0_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:280bfa97e4d83b87d6b6e0bd40e16e960075c0f7cc87d31a7841a3ee3639f30a +size 153829 diff --git a/checkpoint-272/global_step272/zero_pp_rank_1_mp_rank_00_model_states.pt b/checkpoint-272/global_step272/zero_pp_rank_1_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..eee70053d8691ac8935a22bce6bb1183d01e5248 --- /dev/null +++ b/checkpoint-272/global_step272/zero_pp_rank_1_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e8cb4abe6afe1bd7dba3a8b7485c585f50fdb8f2f6c91c8d63f094c6048859c +size 153829 diff --git a/checkpoint-272/global_step272/zero_pp_rank_2_mp_rank_00_model_states.pt b/checkpoint-272/global_step272/zero_pp_rank_2_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..748286a3f0e37264dd5ffb534f6d02e0c1c5800d --- /dev/null +++ b/checkpoint-272/global_step272/zero_pp_rank_2_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d52d693fc7723ad49cc2f0672dc16bb568676ee6305230603aa5e256824d6e6 +size 153829 diff --git a/checkpoint-272/global_step272/zero_pp_rank_3_mp_rank_00_model_states.pt b/checkpoint-272/global_step272/zero_pp_rank_3_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..2f85e5028881e378b88c5ae0f6bb592515f8ab2a --- /dev/null +++ b/checkpoint-272/global_step272/zero_pp_rank_3_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:327184ae8ca4973115be4df9d8909ab4309b4c7d5786289ef8b4c20fd2fb41b7 +size 153829 diff --git a/checkpoint-272/global_step272/zero_pp_rank_4_mp_rank_00_model_states.pt b/checkpoint-272/global_step272/zero_pp_rank_4_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..96b013e4261115593cc4e1256a8af3bd05175453 --- /dev/null +++ b/checkpoint-272/global_step272/zero_pp_rank_4_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65f34e4df94e1fe86860c5ad6f589b08b34935929479a9b75cf4567ec42986a5 +size 153829 diff --git a/checkpoint-272/global_step272/zero_pp_rank_5_mp_rank_00_model_states.pt b/checkpoint-272/global_step272/zero_pp_rank_5_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..17e25b3d86c3ab33781f4fb506701b8010fcc9ef --- /dev/null +++ b/checkpoint-272/global_step272/zero_pp_rank_5_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab500a98d6713dc68032e36a46e64cecdc6539c9a62b6b61040628613f0f81e8 +size 153829 diff --git a/checkpoint-272/global_step272/zero_pp_rank_6_mp_rank_00_model_states.pt b/checkpoint-272/global_step272/zero_pp_rank_6_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..69f3516f6db2828f550747a14ba7dd8c49a7199b --- /dev/null +++ b/checkpoint-272/global_step272/zero_pp_rank_6_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d5a4e70c3f97c371795ec0366e88e65ccd7799ec2152fe13ddfd24fdc027ab0 +size 153829 diff --git a/checkpoint-272/global_step272/zero_pp_rank_7_mp_rank_00_model_states.pt b/checkpoint-272/global_step272/zero_pp_rank_7_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..c2dc26921050c284fb31180f1cd026318e9625ff --- /dev/null +++ b/checkpoint-272/global_step272/zero_pp_rank_7_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9df33f762b6e113595c5d0bf3d434930fa902b91a8a8eaa7fb0e94bef7670fd +size 153829 diff --git a/checkpoint-272/global_step272/zero_pp_rank_8_mp_rank_00_model_states.pt b/checkpoint-272/global_step272/zero_pp_rank_8_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..c5312fdd9e58e6eecd064912845a040947a042a2 --- /dev/null +++ b/checkpoint-272/global_step272/zero_pp_rank_8_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21bf5d06b3a94429b3b6c0c6acfce9e344c07b8bf1311da209791490c130b1c4 +size 153829 diff --git a/checkpoint-272/latest b/checkpoint-272/latest new file mode 100644 index 0000000000000000000000000000000000000000..27efce4d2a5eafcfb9852baec1b0d40102e85f5d --- /dev/null +++ b/checkpoint-272/latest @@ -0,0 +1 @@ +global_step272 \ No newline at end of file diff --git a/checkpoint-272/model-00001-of-00003.safetensors b/checkpoint-272/model-00001-of-00003.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b1c0c2b944d347ff856b40039558ffa6e9c9f290 --- /dev/null +++ b/checkpoint-272/model-00001-of-00003.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb7ddd132c950151879ee704033773a1c08f22fedfbe2459a71cf1304378ddad +size 4943170528 diff --git a/checkpoint-272/model-00002-of-00003.safetensors b/checkpoint-272/model-00002-of-00003.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..28b5746d5e66444a50e7518b8b0331c06beb5e7a --- /dev/null +++ b/checkpoint-272/model-00002-of-00003.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:254fae62a9850c1250d558ce0c0a152cbf3843311738cf4ef96d0b9eb71c8ba0 +size 4999819336 diff --git a/checkpoint-272/model-00003-of-00003.safetensors b/checkpoint-272/model-00003-of-00003.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..425c3a07a909809b59c0241e43b0f88242e536db --- /dev/null +++ b/checkpoint-272/model-00003-of-00003.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5b4497b7b6358ed1de5f189caf947738698ebcf00c3dec230c973c0552e5d86 +size 4540524536 diff --git a/checkpoint-272/model.safetensors.index.json b/checkpoint-272/model.safetensors.index.json new file mode 100644 index 0000000000000000000000000000000000000000..74703d23d7ed329df7a6abebb508ca436906cacb --- /dev/null +++ b/checkpoint-272/model.safetensors.index.json @@ -0,0 +1,298 @@ +{ + "metadata": { + "total_size": 14483480576 + }, + "weight_map": { + "lm_head.weight": "model-00003-of-00003.safetensors", + "model.embed_tokens.weight": "model-00001-of-00003.safetensors", + "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.norm.weight": "model-00003-of-00003.safetensors" + } +} diff --git a/checkpoint-272/rng_state_0.pth b/checkpoint-272/rng_state_0.pth new file mode 100644 index 0000000000000000000000000000000000000000..f99aa16a815eef6c03aa1f771715d2113e3ddcf6 --- /dev/null +++ b/checkpoint-272/rng_state_0.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e37b5dbacf124b1514a121af5a0ce2c5a8e77be83bf19ae649a665a468082d28 +size 16240 diff --git a/checkpoint-272/rng_state_1.pth b/checkpoint-272/rng_state_1.pth new file mode 100644 index 0000000000000000000000000000000000000000..0ec0714e09bf2b78ff9dfb54f839d29262893976 --- /dev/null +++ b/checkpoint-272/rng_state_1.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cef1e45867cf45a884341d3d1df4a7485b45b65e7ef081206135e62bcccb42f5 +size 16240 diff --git a/checkpoint-272/rng_state_2.pth b/checkpoint-272/rng_state_2.pth new file mode 100644 index 0000000000000000000000000000000000000000..81cae168d16c3911e3031e4fd9a49afceea8ce7c --- /dev/null +++ b/checkpoint-272/rng_state_2.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4c6bfccb1c88b7ba35a635a24b890be2e0af719772c1d99cd0a5ba42ef608ec +size 16240 diff --git a/checkpoint-272/rng_state_3.pth b/checkpoint-272/rng_state_3.pth new file mode 100644 index 0000000000000000000000000000000000000000..9e9b13c355b731f09381dbcd5993691aed311640 --- /dev/null +++ b/checkpoint-272/rng_state_3.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7430906032884979d0dae96997913bf4abe89d78b37987bb6dfdce3fed39b2a9 +size 16240 diff --git a/checkpoint-272/rng_state_4.pth b/checkpoint-272/rng_state_4.pth new file mode 100644 index 0000000000000000000000000000000000000000..479098e9e352834b81eb11852e4628cb031fd3b5 --- /dev/null +++ b/checkpoint-272/rng_state_4.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d01f98d61eec8827743e7fec29e83ca6ecdd540e8d277817dce7fc06a97b258 +size 16240 diff --git a/checkpoint-272/rng_state_5.pth b/checkpoint-272/rng_state_5.pth new file mode 100644 index 0000000000000000000000000000000000000000..9c372e50dc8b49096544026f8c631b8d32f1e0dd --- /dev/null +++ b/checkpoint-272/rng_state_5.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:794f06b07218837f68fb7b5fe84665c13dc6a5180f685b6d8e6b4365ee8470bf +size 16240 diff --git a/checkpoint-272/rng_state_6.pth b/checkpoint-272/rng_state_6.pth new file mode 100644 index 0000000000000000000000000000000000000000..0187808d69d4219e61ebbe348e1125942bbe7576 --- /dev/null +++ b/checkpoint-272/rng_state_6.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:406f4ad8fafa642cbfe4d8b4fd81a4a4c339ce8fed12fd4ced0b9ccd483ad18f +size 16240 diff --git a/checkpoint-272/rng_state_7.pth b/checkpoint-272/rng_state_7.pth new file mode 100644 index 0000000000000000000000000000000000000000..92377208a01063595163d4a058c1b3c1c77c77b5 --- /dev/null +++ b/checkpoint-272/rng_state_7.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa49f77dfa366a04d42761a422f906b99bb3991a7119ec4d497a4cd6a129c4e4 +size 16240 diff --git a/checkpoint-272/rng_state_8.pth b/checkpoint-272/rng_state_8.pth new file mode 100644 index 0000000000000000000000000000000000000000..7d36f325fcb1d54e00fd8095d35f4dd6e815c121 --- /dev/null +++ b/checkpoint-272/rng_state_8.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52aeb24997fb0b3fdd2c038ceb9e0a217724db63ac5cb47bb06bab9354d5be3c +size 16240 diff --git a/checkpoint-272/scheduler.pt b/checkpoint-272/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..cb790b606eb56004763bd78336da002be6130ec5 --- /dev/null +++ b/checkpoint-272/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f321f2f0ea6e36dc3550ed5e4455f04e5d7636ce96621025506fa529386c2b11 +size 1064 diff --git a/checkpoint-272/trainer_state.json b/checkpoint-272/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..973426e233d5480860b65796c81bfcbdfaffc52c --- /dev/null +++ b/checkpoint-272/trainer_state.json @@ -0,0 +1,1965 @@ +{ + "best_metric": 0.22680288553237915, + "best_model_checkpoint": "./EulerMath-Mistral-7B-model/checkpoint-272", + "epoch": 0.9990817263544536, + "eval_steps": 68, + "global_step": 272, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.0, + "grad_norm": 19.19068191513093, + "learning_rate": 5.000000000000001e-07, + "loss": 0.707, + "step": 1 + }, + { + "epoch": 0.0, + "eval_loss": 0.9060535430908203, + "eval_runtime": 1745.9683, + "eval_samples_per_second": 1.324, + "eval_steps_per_second": 0.074, + "step": 1 + }, + { + "epoch": 0.01, + "grad_norm": 20.035932532601844, + "learning_rate": 1.0000000000000002e-06, + "loss": 0.7236, + "step": 2 + }, + { + "epoch": 0.01, + "grad_norm": 19.31513317860667, + "learning_rate": 1.5e-06, + "loss": 0.7201, + "step": 3 + }, + { + "epoch": 0.01, + "grad_norm": 16.561326930760348, + "learning_rate": 2.0000000000000003e-06, + "loss": 0.6717, + "step": 4 + }, + { + "epoch": 0.02, + "grad_norm": 9.069275733221579, + "learning_rate": 2.5e-06, + "loss": 0.573, + "step": 5 + }, + { + "epoch": 0.02, + "grad_norm": 6.0702110208300475, + "learning_rate": 3e-06, + "loss": 0.4965, + "step": 6 + }, + { + "epoch": 0.03, + "grad_norm": 6.5389430446896055, + "learning_rate": 3.5e-06, + "loss": 0.5093, + "step": 7 + }, + { + "epoch": 0.03, + "grad_norm": 7.709934958779789, + "learning_rate": 4.000000000000001e-06, + "loss": 0.524, + "step": 8 + }, + { + "epoch": 0.03, + "grad_norm": 6.1640217934257135, + "learning_rate": 4.5e-06, + "loss": 0.503, + "step": 9 + }, + { + "epoch": 0.04, + "grad_norm": 4.079182690080823, + "learning_rate": 5e-06, + "loss": 0.4787, + "step": 10 + }, + { + "epoch": 0.04, + "grad_norm": 4.269731620276111, + "learning_rate": 4.999956736067563e-06, + "loss": 0.4545, + "step": 11 + }, + { + "epoch": 0.04, + "grad_norm": 4.059214670786909, + "learning_rate": 4.999826945767665e-06, + "loss": 0.4638, + "step": 12 + }, + { + "epoch": 0.05, + "grad_norm": 3.583247385116129, + "learning_rate": 4.9996106335924965e-06, + "loss": 0.4396, + "step": 13 + }, + { + "epoch": 0.05, + "grad_norm": 3.2077663599892405, + "learning_rate": 4.999307807028872e-06, + "loss": 0.4287, + "step": 14 + }, + { + "epoch": 0.06, + "grad_norm": 2.3678816023894513, + "learning_rate": 4.998918476557964e-06, + "loss": 0.4169, + "step": 15 + }, + { + "epoch": 0.06, + "grad_norm": 1.9925263681909064, + "learning_rate": 4.998442655654946e-06, + "loss": 0.4099, + "step": 16 + }, + { + "epoch": 0.06, + "grad_norm": 1.7706573910428134, + "learning_rate": 4.997880360788527e-06, + "loss": 0.4003, + "step": 17 + }, + { + "epoch": 0.07, + "grad_norm": 1.6789390301868525, + "learning_rate": 4.997231611420374e-06, + "loss": 0.399, + "step": 18 + }, + { + "epoch": 0.07, + "grad_norm": 1.5622054221426698, + "learning_rate": 4.996496430004446e-06, + "loss": 0.3885, + "step": 19 + }, + { + "epoch": 0.07, + "grad_norm": 1.5663787846468284, + "learning_rate": 4.995674841986217e-06, + "loss": 0.3987, + "step": 20 + }, + { + "epoch": 0.08, + "grad_norm": 1.4502330087611721, + "learning_rate": 4.994766875801789e-06, + "loss": 0.3962, + "step": 21 + }, + { + "epoch": 0.08, + "grad_norm": 1.4188997099391882, + "learning_rate": 4.993772562876909e-06, + "loss": 0.3845, + "step": 22 + }, + { + "epoch": 0.08, + "grad_norm": 1.4360806887465898, + "learning_rate": 4.992691937625892e-06, + "loss": 0.3764, + "step": 23 + }, + { + "epoch": 0.09, + "grad_norm": 1.4216582090099372, + "learning_rate": 4.991525037450412e-06, + "loss": 0.3712, + "step": 24 + }, + { + "epoch": 0.09, + "grad_norm": 1.2856499279799387, + "learning_rate": 4.990271902738223e-06, + "loss": 0.3603, + "step": 25 + }, + { + "epoch": 0.1, + "grad_norm": 1.247117404577534, + "learning_rate": 4.988932576861754e-06, + "loss": 0.3652, + "step": 26 + }, + { + "epoch": 0.1, + "grad_norm": 1.3197850379000642, + "learning_rate": 4.987507106176606e-06, + "loss": 0.371, + "step": 27 + }, + { + "epoch": 0.1, + "grad_norm": 1.243400495941476, + "learning_rate": 4.985995540019956e-06, + "loss": 0.3599, + "step": 28 + }, + { + "epoch": 0.11, + "grad_norm": 1.3278566257982103, + "learning_rate": 4.984397930708838e-06, + "loss": 0.3594, + "step": 29 + }, + { + "epoch": 0.11, + "grad_norm": 1.337022527470652, + "learning_rate": 4.982714333538344e-06, + "loss": 0.3477, + "step": 30 + }, + { + "epoch": 0.11, + "grad_norm": 1.2099362672151601, + "learning_rate": 4.980944806779698e-06, + "loss": 0.3425, + "step": 31 + }, + { + "epoch": 0.12, + "grad_norm": 1.2110593150023343, + "learning_rate": 4.979089411678252e-06, + "loss": 0.3567, + "step": 32 + }, + { + "epoch": 0.12, + "grad_norm": 1.2334965596913852, + "learning_rate": 4.977148212451354e-06, + "loss": 0.3526, + "step": 33 + }, + { + "epoch": 0.12, + "grad_norm": 1.1687161424016368, + "learning_rate": 4.975121276286136e-06, + "loss": 0.3496, + "step": 34 + }, + { + "epoch": 0.13, + "grad_norm": 1.1881954676378432, + "learning_rate": 4.973008673337181e-06, + "loss": 0.3321, + "step": 35 + }, + { + "epoch": 0.13, + "grad_norm": 1.2174270605971114, + "learning_rate": 4.970810476724097e-06, + "loss": 0.3446, + "step": 36 + }, + { + "epoch": 0.14, + "grad_norm": 1.1609330509652702, + "learning_rate": 4.968526762528988e-06, + "loss": 0.341, + "step": 37 + }, + { + "epoch": 0.14, + "grad_norm": 1.2149352568793006, + "learning_rate": 4.9661576097938205e-06, + "loss": 0.3459, + "step": 38 + }, + { + "epoch": 0.14, + "grad_norm": 1.1885081900677397, + "learning_rate": 4.963703100517684e-06, + "loss": 0.3425, + "step": 39 + }, + { + "epoch": 0.15, + "grad_norm": 1.113235885075549, + "learning_rate": 4.961163319653959e-06, + "loss": 0.339, + "step": 40 + }, + { + "epoch": 0.15, + "grad_norm": 1.0983562726057154, + "learning_rate": 4.958538355107369e-06, + "loss": 0.3298, + "step": 41 + }, + { + "epoch": 0.15, + "grad_norm": 1.1594289217865181, + "learning_rate": 4.955828297730949e-06, + "loss": 0.3187, + "step": 42 + }, + { + "epoch": 0.16, + "grad_norm": 1.1714548911644644, + "learning_rate": 4.953033241322887e-06, + "loss": 0.3373, + "step": 43 + }, + { + "epoch": 0.16, + "grad_norm": 1.1450397323165031, + "learning_rate": 4.950153282623289e-06, + "loss": 0.3232, + "step": 44 + }, + { + "epoch": 0.17, + "grad_norm": 1.1526363934692334, + "learning_rate": 4.947188521310827e-06, + "loss": 0.3243, + "step": 45 + }, + { + "epoch": 0.17, + "grad_norm": 1.2175235837438554, + "learning_rate": 4.944139059999286e-06, + "loss": 0.3252, + "step": 46 + }, + { + "epoch": 0.17, + "grad_norm": 1.099789045296574, + "learning_rate": 4.941005004234019e-06, + "loss": 0.3178, + "step": 47 + }, + { + "epoch": 0.18, + "grad_norm": 1.2219677196886505, + "learning_rate": 4.937786462488284e-06, + "loss": 0.3185, + "step": 48 + }, + { + "epoch": 0.18, + "grad_norm": 1.1806399387287625, + "learning_rate": 4.9344835461595016e-06, + "loss": 0.3131, + "step": 49 + }, + { + "epoch": 0.18, + "grad_norm": 1.1320527868188186, + "learning_rate": 4.93109636956539e-06, + "loss": 0.3198, + "step": 50 + }, + { + "epoch": 0.19, + "grad_norm": 1.2551253674231917, + "learning_rate": 4.927625049940013e-06, + "loss": 0.3063, + "step": 51 + }, + { + "epoch": 0.19, + "grad_norm": 1.1131050315591549, + "learning_rate": 4.9240697074297205e-06, + "loss": 0.3192, + "step": 52 + }, + { + "epoch": 0.19, + "grad_norm": 1.218025833644298, + "learning_rate": 4.920430465088992e-06, + "loss": 0.3083, + "step": 53 + }, + { + "epoch": 0.2, + "grad_norm": 1.090531576651011, + "learning_rate": 4.916707448876173e-06, + "loss": 0.3076, + "step": 54 + }, + { + "epoch": 0.2, + "grad_norm": 1.1865422414756877, + "learning_rate": 4.912900787649124e-06, + "loss": 0.3155, + "step": 55 + }, + { + "epoch": 0.21, + "grad_norm": 1.1236405558973956, + "learning_rate": 4.909010613160751e-06, + "loss": 0.306, + "step": 56 + }, + { + "epoch": 0.21, + "grad_norm": 1.222805799933775, + "learning_rate": 4.90503706005445e-06, + "loss": 0.3054, + "step": 57 + }, + { + "epoch": 0.21, + "grad_norm": 1.179814726076065, + "learning_rate": 4.900980265859449e-06, + "loss": 0.309, + "step": 58 + }, + { + "epoch": 0.22, + "grad_norm": 1.155763655177263, + "learning_rate": 4.896840370986042e-06, + "loss": 0.2974, + "step": 59 + }, + { + "epoch": 0.22, + "grad_norm": 1.1687171308842221, + "learning_rate": 4.892617518720737e-06, + "loss": 0.3018, + "step": 60 + }, + { + "epoch": 0.22, + "grad_norm": 1.2240587320323661, + "learning_rate": 4.88831185522129e-06, + "loss": 0.3066, + "step": 61 + }, + { + "epoch": 0.23, + "grad_norm": 1.1042960875500205, + "learning_rate": 4.883923529511646e-06, + "loss": 0.2977, + "step": 62 + }, + { + "epoch": 0.23, + "grad_norm": 1.1885949614868223, + "learning_rate": 4.87945269347679e-06, + "loss": 0.3087, + "step": 63 + }, + { + "epoch": 0.24, + "grad_norm": 1.1420656757477574, + "learning_rate": 4.874899501857477e-06, + "loss": 0.2904, + "step": 64 + }, + { + "epoch": 0.24, + "grad_norm": 1.1453980260713446, + "learning_rate": 4.87026411224489e-06, + "loss": 0.306, + "step": 65 + }, + { + "epoch": 0.24, + "grad_norm": 1.2729287210416769, + "learning_rate": 4.865546685075174e-06, + "loss": 0.2938, + "step": 66 + }, + { + "epoch": 0.25, + "grad_norm": 1.2052792222072466, + "learning_rate": 4.860747383623889e-06, + "loss": 0.2977, + "step": 67 + }, + { + "epoch": 0.25, + "grad_norm": 1.2657508580603682, + "learning_rate": 4.85586637400036e-06, + "loss": 0.3011, + "step": 68 + }, + { + "epoch": 0.25, + "eval_loss": 0.32630813121795654, + "eval_runtime": 1744.5857, + "eval_samples_per_second": 1.325, + "eval_steps_per_second": 0.074, + "step": 68 + }, + { + "epoch": 0.25, + "grad_norm": 1.1832834131492187, + "learning_rate": 4.85090382514192e-06, + "loss": 0.2972, + "step": 69 + }, + { + "epoch": 0.26, + "grad_norm": 1.255475532117491, + "learning_rate": 4.845859908808074e-06, + "loss": 0.302, + "step": 70 + }, + { + "epoch": 0.26, + "grad_norm": 1.298818409489401, + "learning_rate": 4.8407347995745465e-06, + "loss": 0.2935, + "step": 71 + }, + { + "epoch": 0.26, + "grad_norm": 1.3499885398461409, + "learning_rate": 4.8355286748272405e-06, + "loss": 0.295, + "step": 72 + }, + { + "epoch": 0.27, + "grad_norm": 1.3446382549398914, + "learning_rate": 4.830241714756099e-06, + "loss": 0.2824, + "step": 73 + }, + { + "epoch": 0.27, + "grad_norm": 1.2082987304246777, + "learning_rate": 4.8248741023488705e-06, + "loss": 0.3026, + "step": 74 + }, + { + "epoch": 0.28, + "grad_norm": 1.3432457490726049, + "learning_rate": 4.81942602338477e-06, + "loss": 0.2985, + "step": 75 + }, + { + "epoch": 0.28, + "grad_norm": 1.170337150254348, + "learning_rate": 4.813897666428054e-06, + "loss": 0.2969, + "step": 76 + }, + { + "epoch": 0.28, + "grad_norm": 1.339414484466056, + "learning_rate": 4.808289222821491e-06, + "loss": 0.2985, + "step": 77 + }, + { + "epoch": 0.29, + "grad_norm": 1.1944077580462804, + "learning_rate": 4.802600886679743e-06, + "loss": 0.2852, + "step": 78 + }, + { + "epoch": 0.29, + "grad_norm": 1.357246876413576, + "learning_rate": 4.79683285488264e-06, + "loss": 0.2904, + "step": 79 + }, + { + "epoch": 0.29, + "grad_norm": 1.4115119936533302, + "learning_rate": 4.790985327068376e-06, + "loss": 0.3079, + "step": 80 + }, + { + "epoch": 0.3, + "grad_norm": 1.285315536324781, + "learning_rate": 4.7850585056265866e-06, + "loss": 0.2816, + "step": 81 + }, + { + "epoch": 0.3, + "grad_norm": 1.3631452273406317, + "learning_rate": 4.779052595691355e-06, + "loss": 0.2865, + "step": 82 + }, + { + "epoch": 0.3, + "grad_norm": 1.196518391890594, + "learning_rate": 4.772967805134106e-06, + "loss": 0.2793, + "step": 83 + }, + { + "epoch": 0.31, + "grad_norm": 1.2485622601747421, + "learning_rate": 4.766804344556414e-06, + "loss": 0.2827, + "step": 84 + }, + { + "epoch": 0.31, + "grad_norm": 1.2945099002171803, + "learning_rate": 4.7605624272827125e-06, + "loss": 0.2854, + "step": 85 + }, + { + "epoch": 0.32, + "grad_norm": 1.224576498812201, + "learning_rate": 4.754242269352911e-06, + "loss": 0.2875, + "step": 86 + }, + { + "epoch": 0.32, + "grad_norm": 1.2535747430861524, + "learning_rate": 4.747844089514919e-06, + "loss": 0.2807, + "step": 87 + }, + { + "epoch": 0.32, + "grad_norm": 1.171951212608294, + "learning_rate": 4.741368109217072e-06, + "loss": 0.2761, + "step": 88 + }, + { + "epoch": 0.33, + "grad_norm": 1.2123280755320154, + "learning_rate": 4.734814552600469e-06, + "loss": 0.2832, + "step": 89 + }, + { + "epoch": 0.33, + "grad_norm": 1.1358700523339582, + "learning_rate": 4.728183646491215e-06, + "loss": 0.2871, + "step": 90 + }, + { + "epoch": 0.33, + "grad_norm": 1.1484698203958048, + "learning_rate": 4.721475620392567e-06, + "loss": 0.2806, + "step": 91 + }, + { + "epoch": 0.34, + "grad_norm": 1.1887290775946084, + "learning_rate": 4.714690706477e-06, + "loss": 0.2858, + "step": 92 + }, + { + "epoch": 0.34, + "grad_norm": 1.1568061250650739, + "learning_rate": 4.707829139578156e-06, + "loss": 0.2888, + "step": 93 + }, + { + "epoch": 0.35, + "grad_norm": 1.176832058354239, + "learning_rate": 4.700891157182729e-06, + "loss": 0.2829, + "step": 94 + }, + { + "epoch": 0.35, + "grad_norm": 1.138549309431515, + "learning_rate": 4.693876999422241e-06, + "loss": 0.2763, + "step": 95 + }, + { + "epoch": 0.35, + "grad_norm": 1.1479926100837645, + "learning_rate": 4.68678690906473e-06, + "loss": 0.2686, + "step": 96 + }, + { + "epoch": 0.36, + "grad_norm": 1.1771516377197246, + "learning_rate": 4.679621131506347e-06, + "loss": 0.2814, + "step": 97 + }, + { + "epoch": 0.36, + "grad_norm": 1.2184996974539424, + "learning_rate": 4.672379914762867e-06, + "loss": 0.2822, + "step": 98 + }, + { + "epoch": 0.36, + "grad_norm": 1.1792108348242942, + "learning_rate": 4.665063509461098e-06, + "loss": 0.282, + "step": 99 + }, + { + "epoch": 0.37, + "grad_norm": 1.2850683815489914, + "learning_rate": 4.657672168830211e-06, + "loss": 0.2776, + "step": 100 + }, + { + "epoch": 0.37, + "grad_norm": 1.2508897770511975, + "learning_rate": 4.650206148692977e-06, + "loss": 0.2787, + "step": 101 + }, + { + "epoch": 0.37, + "grad_norm": 1.2031990746786907, + "learning_rate": 4.642665707456908e-06, + "loss": 0.2719, + "step": 102 + }, + { + "epoch": 0.38, + "grad_norm": 1.1842474930123255, + "learning_rate": 4.635051106105316e-06, + "loss": 0.2732, + "step": 103 + }, + { + "epoch": 0.38, + "grad_norm": 1.2596970412015132, + "learning_rate": 4.627362608188281e-06, + "loss": 0.2731, + "step": 104 + }, + { + "epoch": 0.39, + "grad_norm": 1.4294759311096437, + "learning_rate": 4.619600479813524e-06, + "loss": 0.2738, + "step": 105 + }, + { + "epoch": 0.39, + "grad_norm": 1.31619095423113, + "learning_rate": 4.6117649896372055e-06, + "loss": 0.2764, + "step": 106 + }, + { + "epoch": 0.39, + "grad_norm": 1.2349728666776751, + "learning_rate": 4.6038564088546185e-06, + "loss": 0.2722, + "step": 107 + }, + { + "epoch": 0.4, + "grad_norm": 1.2418477065252158, + "learning_rate": 4.5958750111908065e-06, + "loss": 0.271, + "step": 108 + }, + { + "epoch": 0.4, + "grad_norm": 1.3529322240859796, + "learning_rate": 4.587821072891089e-06, + "loss": 0.276, + "step": 109 + }, + { + "epoch": 0.4, + "grad_norm": 1.2671711562594927, + "learning_rate": 4.579694872711501e-06, + "loss": 0.2706, + "step": 110 + }, + { + "epoch": 0.41, + "grad_norm": 1.238356873891121, + "learning_rate": 4.571496691909142e-06, + "loss": 0.2749, + "step": 111 + }, + { + "epoch": 0.41, + "grad_norm": 1.2059912760303926, + "learning_rate": 4.563226814232444e-06, + "loss": 0.2676, + "step": 112 + }, + { + "epoch": 0.42, + "grad_norm": 1.1876458610423755, + "learning_rate": 4.554885525911351e-06, + "loss": 0.2743, + "step": 113 + }, + { + "epoch": 0.42, + "grad_norm": 1.1715592937521375, + "learning_rate": 4.54647311564741e-06, + "loss": 0.2734, + "step": 114 + }, + { + "epoch": 0.42, + "grad_norm": 1.236329928620471, + "learning_rate": 4.53798987460378e-06, + "loss": 0.2855, + "step": 115 + }, + { + "epoch": 0.43, + "grad_norm": 1.1717820999866062, + "learning_rate": 4.529436096395157e-06, + "loss": 0.2699, + "step": 116 + }, + { + "epoch": 0.43, + "grad_norm": 1.3490101744641771, + "learning_rate": 4.520812077077604e-06, + "loss": 0.2731, + "step": 117 + }, + { + "epoch": 0.43, + "grad_norm": 1.192962777526519, + "learning_rate": 4.512118115138315e-06, + "loss": 0.2719, + "step": 118 + }, + { + "epoch": 0.44, + "grad_norm": 1.2384657820337475, + "learning_rate": 4.5033545114852734e-06, + "loss": 0.2647, + "step": 119 + }, + { + "epoch": 0.44, + "grad_norm": 1.2128578058956592, + "learning_rate": 4.494521569436845e-06, + "loss": 0.2615, + "step": 120 + }, + { + "epoch": 0.44, + "grad_norm": 1.3237640584842072, + "learning_rate": 4.485619594711278e-06, + "loss": 0.2663, + "step": 121 + }, + { + "epoch": 0.45, + "grad_norm": 1.2691929068372239, + "learning_rate": 4.476648895416116e-06, + "loss": 0.2614, + "step": 122 + }, + { + "epoch": 0.45, + "grad_norm": 1.2606618599832538, + "learning_rate": 4.467609782037543e-06, + "loss": 0.2606, + "step": 123 + }, + { + "epoch": 0.46, + "grad_norm": 1.3048381409549332, + "learning_rate": 4.4585025674296315e-06, + "loss": 0.2601, + "step": 124 + }, + { + "epoch": 0.46, + "grad_norm": 1.3022768451107203, + "learning_rate": 4.449327566803515e-06, + "loss": 0.2683, + "step": 125 + }, + { + "epoch": 0.46, + "grad_norm": 1.3820289309230962, + "learning_rate": 4.44008509771648e-06, + "loss": 0.2681, + "step": 126 + }, + { + "epoch": 0.47, + "grad_norm": 1.2802354999925132, + "learning_rate": 4.430775480060973e-06, + "loss": 0.2648, + "step": 127 + }, + { + "epoch": 0.47, + "grad_norm": 1.3242106497833372, + "learning_rate": 4.4213990360535274e-06, + "loss": 0.268, + "step": 128 + }, + { + "epoch": 0.47, + "grad_norm": 1.3009976864959876, + "learning_rate": 4.411956090223618e-06, + "loss": 0.2662, + "step": 129 + }, + { + "epoch": 0.48, + "grad_norm": 1.3212829688401424, + "learning_rate": 4.4024469694024194e-06, + "loss": 0.2605, + "step": 130 + }, + { + "epoch": 0.48, + "grad_norm": 1.2123869956343973, + "learning_rate": 4.3928720027115015e-06, + "loss": 0.2604, + "step": 131 + }, + { + "epoch": 0.48, + "grad_norm": 1.284537459167204, + "learning_rate": 4.383231521551432e-06, + "loss": 0.2593, + "step": 132 + }, + { + "epoch": 0.49, + "grad_norm": 1.443338680183996, + "learning_rate": 4.373525859590313e-06, + "loss": 0.2561, + "step": 133 + }, + { + "epoch": 0.49, + "grad_norm": 1.2809230468289576, + "learning_rate": 4.3637553527522265e-06, + "loss": 0.2599, + "step": 134 + }, + { + "epoch": 0.5, + "grad_norm": 1.3669470609932883, + "learning_rate": 4.3539203392056114e-06, + "loss": 0.2587, + "step": 135 + }, + { + "epoch": 0.5, + "grad_norm": 1.4112940230474231, + "learning_rate": 4.3440211593515556e-06, + "loss": 0.2585, + "step": 136 + }, + { + "epoch": 0.5, + "eval_loss": 0.28355109691619873, + "eval_runtime": 1744.5175, + "eval_samples_per_second": 1.325, + "eval_steps_per_second": 0.074, + "step": 136 + }, + { + "epoch": 0.5, + "grad_norm": 1.3061396480876788, + "learning_rate": 4.33405815581202e-06, + "loss": 0.2549, + "step": 137 + }, + { + "epoch": 0.51, + "grad_norm": 1.46460991921356, + "learning_rate": 4.324031673417971e-06, + "loss": 0.2639, + "step": 138 + }, + { + "epoch": 0.51, + "grad_norm": 1.211168578821325, + "learning_rate": 4.313942059197457e-06, + "loss": 0.2581, + "step": 139 + }, + { + "epoch": 0.51, + "grad_norm": 1.4657150585182341, + "learning_rate": 4.303789662363587e-06, + "loss": 0.2616, + "step": 140 + }, + { + "epoch": 0.52, + "grad_norm": 1.4251800081691455, + "learning_rate": 4.29357483430245e-06, + "loss": 0.2668, + "step": 141 + }, + { + "epoch": 0.52, + "grad_norm": 1.3599666478045191, + "learning_rate": 4.283297928560951e-06, + "loss": 0.2598, + "step": 142 + }, + { + "epoch": 0.53, + "grad_norm": 1.6103346253156021, + "learning_rate": 4.272959300834574e-06, + "loss": 0.2656, + "step": 143 + }, + { + "epoch": 0.53, + "grad_norm": 1.2184694580930981, + "learning_rate": 4.262559308955072e-06, + "loss": 0.2546, + "step": 144 + }, + { + "epoch": 0.53, + "grad_norm": 1.3362006281948362, + "learning_rate": 4.252098312878083e-06, + "loss": 0.2557, + "step": 145 + }, + { + "epoch": 0.54, + "grad_norm": 1.3369296531115935, + "learning_rate": 4.241576674670668e-06, + "loss": 0.2568, + "step": 146 + }, + { + "epoch": 0.54, + "grad_norm": 1.4747872641188995, + "learning_rate": 4.230994758498783e-06, + "loss": 0.2564, + "step": 147 + }, + { + "epoch": 0.54, + "grad_norm": 1.60778480089848, + "learning_rate": 4.220352930614672e-06, + "loss": 0.2573, + "step": 148 + }, + { + "epoch": 0.55, + "grad_norm": 1.188044808018822, + "learning_rate": 4.209651559344195e-06, + "loss": 0.2525, + "step": 149 + }, + { + "epoch": 0.55, + "grad_norm": 1.5856639134844415, + "learning_rate": 4.198891015074074e-06, + "loss": 0.2647, + "step": 150 + }, + { + "epoch": 0.55, + "grad_norm": 1.2859262024596512, + "learning_rate": 4.1880716702390764e-06, + "loss": 0.2471, + "step": 151 + }, + { + "epoch": 0.56, + "grad_norm": 1.4653590828956073, + "learning_rate": 4.177193899309127e-06, + "loss": 0.2575, + "step": 152 + }, + { + "epoch": 0.56, + "grad_norm": 1.1821237121686685, + "learning_rate": 4.166258078776342e-06, + "loss": 0.2493, + "step": 153 + }, + { + "epoch": 0.57, + "grad_norm": 1.575597475848357, + "learning_rate": 4.155264587142002e-06, + "loss": 0.2537, + "step": 154 + }, + { + "epoch": 0.57, + "grad_norm": 1.2702085752651588, + "learning_rate": 4.144213804903449e-06, + "loss": 0.2493, + "step": 155 + }, + { + "epoch": 0.57, + "grad_norm": 1.5026735427361002, + "learning_rate": 4.133106114540923e-06, + "loss": 0.2505, + "step": 156 + }, + { + "epoch": 0.58, + "grad_norm": 1.5297903686100347, + "learning_rate": 4.121941900504316e-06, + "loss": 0.2472, + "step": 157 + }, + { + "epoch": 0.58, + "grad_norm": 1.25258373375573, + "learning_rate": 4.110721549199866e-06, + "loss": 0.2487, + "step": 158 + }, + { + "epoch": 0.58, + "grad_norm": 1.5941545034573665, + "learning_rate": 4.099445448976793e-06, + "loss": 0.2497, + "step": 159 + }, + { + "epoch": 0.59, + "grad_norm": 1.3096080921873048, + "learning_rate": 4.088113990113846e-06, + "loss": 0.2439, + "step": 160 + }, + { + "epoch": 0.59, + "grad_norm": 1.6950266606195492, + "learning_rate": 4.076727564805803e-06, + "loss": 0.2538, + "step": 161 + }, + { + "epoch": 0.6, + "grad_norm": 1.440485526817555, + "learning_rate": 4.065286567149891e-06, + "loss": 0.2613, + "step": 162 + }, + { + "epoch": 0.6, + "grad_norm": 1.606032223752871, + "learning_rate": 4.0537913931321495e-06, + "loss": 0.2505, + "step": 163 + }, + { + "epoch": 0.6, + "grad_norm": 1.5319951141665498, + "learning_rate": 4.042242440613724e-06, + "loss": 0.256, + "step": 164 + }, + { + "epoch": 0.61, + "grad_norm": 1.3468098768373629, + "learning_rate": 4.030640109317096e-06, + "loss": 0.2424, + "step": 165 + }, + { + "epoch": 0.61, + "grad_norm": 1.6652562481471478, + "learning_rate": 4.018984800812248e-06, + "loss": 0.2396, + "step": 166 + }, + { + "epoch": 0.61, + "grad_norm": 1.302975081280886, + "learning_rate": 4.007276918502763e-06, + "loss": 0.2462, + "step": 167 + }, + { + "epoch": 0.62, + "grad_norm": 1.623125313268604, + "learning_rate": 3.995516867611865e-06, + "loss": 0.256, + "step": 168 + }, + { + "epoch": 0.62, + "grad_norm": 1.3069782036585045, + "learning_rate": 3.983705055168391e-06, + "loss": 0.2518, + "step": 169 + }, + { + "epoch": 0.62, + "grad_norm": 1.6527449270834242, + "learning_rate": 3.971841889992706e-06, + "loss": 0.2544, + "step": 170 + }, + { + "epoch": 0.63, + "grad_norm": 1.3586948189643275, + "learning_rate": 3.959927782682551e-06, + "loss": 0.2491, + "step": 171 + }, + { + "epoch": 0.63, + "grad_norm": 1.3440233460948727, + "learning_rate": 3.947963145598833e-06, + "loss": 0.2516, + "step": 172 + }, + { + "epoch": 0.64, + "grad_norm": 1.3389168317613516, + "learning_rate": 3.935948392851354e-06, + "loss": 0.2541, + "step": 173 + }, + { + "epoch": 0.64, + "grad_norm": 1.3142664585396417, + "learning_rate": 3.923883940284472e-06, + "loss": 0.2508, + "step": 174 + }, + { + "epoch": 0.64, + "grad_norm": 1.2767521320981983, + "learning_rate": 3.911770205462717e-06, + "loss": 0.2479, + "step": 175 + }, + { + "epoch": 0.65, + "grad_norm": 1.3281972191838929, + "learning_rate": 3.899607607656334e-06, + "loss": 0.2501, + "step": 176 + }, + { + "epoch": 0.65, + "grad_norm": 1.3793116543581005, + "learning_rate": 3.887396567826769e-06, + "loss": 0.2454, + "step": 177 + }, + { + "epoch": 0.65, + "grad_norm": 1.3293987156576104, + "learning_rate": 3.875137508612104e-06, + "loss": 0.249, + "step": 178 + }, + { + "epoch": 0.66, + "grad_norm": 1.4957835845929142, + "learning_rate": 3.862830854312427e-06, + "loss": 0.2445, + "step": 179 + }, + { + "epoch": 0.66, + "grad_norm": 1.2804679875446887, + "learning_rate": 3.850477030875147e-06, + "loss": 0.2411, + "step": 180 + }, + { + "epoch": 0.66, + "grad_norm": 1.5611119218300138, + "learning_rate": 3.838076465880248e-06, + "loss": 0.237, + "step": 181 + }, + { + "epoch": 0.67, + "grad_norm": 1.3387338916825537, + "learning_rate": 3.825629588525498e-06, + "loss": 0.2429, + "step": 182 + }, + { + "epoch": 0.67, + "grad_norm": 1.5091720406707172, + "learning_rate": 3.813136829611583e-06, + "loss": 0.2428, + "step": 183 + }, + { + "epoch": 0.68, + "grad_norm": 1.359116281666385, + "learning_rate": 3.8005986215272056e-06, + "loss": 0.2543, + "step": 184 + }, + { + "epoch": 0.68, + "grad_norm": 1.4094254259139338, + "learning_rate": 3.7880153982341167e-06, + "loss": 0.2502, + "step": 185 + }, + { + "epoch": 0.68, + "grad_norm": 1.2806047483095333, + "learning_rate": 3.7753875952520943e-06, + "loss": 0.2431, + "step": 186 + }, + { + "epoch": 0.69, + "grad_norm": 1.409218880016104, + "learning_rate": 3.7627156496438686e-06, + "loss": 0.2463, + "step": 187 + }, + { + "epoch": 0.69, + "grad_norm": 1.2466244404207094, + "learning_rate": 3.7500000000000005e-06, + "loss": 0.2372, + "step": 188 + }, + { + "epoch": 0.69, + "grad_norm": 1.4192484726979884, + "learning_rate": 3.7372410864236954e-06, + "loss": 0.2396, + "step": 189 + }, + { + "epoch": 0.7, + "grad_norm": 1.3260879207799772, + "learning_rate": 3.7244393505155713e-06, + "loss": 0.241, + "step": 190 + }, + { + "epoch": 0.7, + "grad_norm": 1.6407257220698948, + "learning_rate": 3.7115952353583804e-06, + "loss": 0.2552, + "step": 191 + }, + { + "epoch": 0.71, + "grad_norm": 1.4113760059054485, + "learning_rate": 3.6987091855016667e-06, + "loss": 0.2513, + "step": 192 + }, + { + "epoch": 0.71, + "grad_norm": 1.3008883773347888, + "learning_rate": 3.6857816469463806e-06, + "loss": 0.2361, + "step": 193 + }, + { + "epoch": 0.71, + "grad_norm": 1.3040857591494066, + "learning_rate": 3.6728130671294485e-06, + "loss": 0.2491, + "step": 194 + }, + { + "epoch": 0.72, + "grad_norm": 1.2543618451342111, + "learning_rate": 3.6598038949082777e-06, + "loss": 0.2309, + "step": 195 + }, + { + "epoch": 0.72, + "grad_norm": 1.3944108707435374, + "learning_rate": 3.6467545805452266e-06, + "loss": 0.2426, + "step": 196 + }, + { + "epoch": 0.72, + "grad_norm": 1.301851485207592, + "learning_rate": 3.6336655756920198e-06, + "loss": 0.2421, + "step": 197 + }, + { + "epoch": 0.73, + "grad_norm": 1.3562155385998595, + "learning_rate": 3.620537333374114e-06, + "loss": 0.2406, + "step": 198 + }, + { + "epoch": 0.73, + "grad_norm": 1.4263666275672418, + "learning_rate": 3.6073703079750204e-06, + "loss": 0.2418, + "step": 199 + }, + { + "epoch": 0.73, + "grad_norm": 1.2767612877970262, + "learning_rate": 3.594164955220577e-06, + "loss": 0.2353, + "step": 200 + }, + { + "epoch": 0.74, + "grad_norm": 1.3349267171117716, + "learning_rate": 3.5809217321631745e-06, + "loss": 0.2348, + "step": 201 + }, + { + "epoch": 0.74, + "grad_norm": 1.2217693484408796, + "learning_rate": 3.5676410971659404e-06, + "loss": 0.2287, + "step": 202 + }, + { + "epoch": 0.75, + "grad_norm": 1.4554473054976789, + "learning_rate": 3.5543235098868702e-06, + "loss": 0.241, + "step": 203 + }, + { + "epoch": 0.75, + "grad_norm": 1.184805169962002, + "learning_rate": 3.5409694312629193e-06, + "loss": 0.2352, + "step": 204 + }, + { + "epoch": 0.75, + "eval_loss": 0.25444912910461426, + "eval_runtime": 1745.7708, + "eval_samples_per_second": 1.324, + "eval_steps_per_second": 0.074, + "step": 204 + }, + { + "epoch": 0.75, + "grad_norm": 1.2973792749867632, + "learning_rate": 3.527579323494055e-06, + "loss": 0.2404, + "step": 205 + }, + { + "epoch": 0.76, + "grad_norm": 1.390330195755624, + "learning_rate": 3.5141536500272494e-06, + "loss": 0.2397, + "step": 206 + }, + { + "epoch": 0.76, + "grad_norm": 1.2415077962351395, + "learning_rate": 3.5006928755404467e-06, + "loss": 0.2296, + "step": 207 + }, + { + "epoch": 0.76, + "grad_norm": 1.3223264932925407, + "learning_rate": 3.4871974659264786e-06, + "loss": 0.2332, + "step": 208 + }, + { + "epoch": 0.77, + "grad_norm": 1.4376836200586416, + "learning_rate": 3.473667888276935e-06, + "loss": 0.2361, + "step": 209 + }, + { + "epoch": 0.77, + "grad_norm": 1.2495709137167788, + "learning_rate": 3.4601046108660036e-06, + "loss": 0.2351, + "step": 210 + }, + { + "epoch": 0.78, + "grad_norm": 1.4449247677336339, + "learning_rate": 3.446508103134259e-06, + "loss": 0.2373, + "step": 211 + }, + { + "epoch": 0.78, + "grad_norm": 1.3961526866418432, + "learning_rate": 3.4328788356724135e-06, + "loss": 0.2383, + "step": 212 + }, + { + "epoch": 0.78, + "grad_norm": 1.2766356071702671, + "learning_rate": 3.419217280205032e-06, + "loss": 0.2348, + "step": 213 + }, + { + "epoch": 0.79, + "grad_norm": 1.2201985305952152, + "learning_rate": 3.4055239095742067e-06, + "loss": 0.236, + "step": 214 + }, + { + "epoch": 0.79, + "grad_norm": 1.3670381437866368, + "learning_rate": 3.3917991977231855e-06, + "loss": 0.228, + "step": 215 + }, + { + "epoch": 0.79, + "grad_norm": 1.2724648753569285, + "learning_rate": 3.378043619679974e-06, + "loss": 0.2386, + "step": 216 + }, + { + "epoch": 0.8, + "grad_norm": 1.2826844172302947, + "learning_rate": 3.364257651540891e-06, + "loss": 0.2366, + "step": 217 + }, + { + "epoch": 0.8, + "grad_norm": 1.1767059777022655, + "learning_rate": 3.3504417704540925e-06, + "loss": 0.2251, + "step": 218 + }, + { + "epoch": 0.8, + "grad_norm": 1.3111513963454882, + "learning_rate": 3.3365964546030544e-06, + "loss": 0.2396, + "step": 219 + }, + { + "epoch": 0.81, + "grad_norm": 1.2617225478707708, + "learning_rate": 3.322722183190025e-06, + "loss": 0.2412, + "step": 220 + }, + { + "epoch": 0.81, + "grad_norm": 1.2183220743609309, + "learning_rate": 3.308819436419437e-06, + "loss": 0.2276, + "step": 221 + }, + { + "epoch": 0.82, + "grad_norm": 1.31561824749082, + "learning_rate": 3.2948886954812877e-06, + "loss": 0.2404, + "step": 222 + }, + { + "epoch": 0.82, + "grad_norm": 1.250087552624437, + "learning_rate": 3.280930442534486e-06, + "loss": 0.2263, + "step": 223 + }, + { + "epoch": 0.82, + "grad_norm": 1.2524310598377044, + "learning_rate": 3.26694516069016e-06, + "loss": 0.2368, + "step": 224 + }, + { + "epoch": 0.83, + "grad_norm": 1.3487266981725987, + "learning_rate": 3.252933333994942e-06, + "loss": 0.2243, + "step": 225 + }, + { + "epoch": 0.83, + "grad_norm": 1.2427013509424278, + "learning_rate": 3.238895447414211e-06, + "loss": 0.2366, + "step": 226 + }, + { + "epoch": 0.83, + "grad_norm": 1.268723527146989, + "learning_rate": 3.2248319868153067e-06, + "loss": 0.2262, + "step": 227 + }, + { + "epoch": 0.84, + "grad_norm": 1.2476040692827028, + "learning_rate": 3.210743438950718e-06, + "loss": 0.234, + "step": 228 + }, + { + "epoch": 0.84, + "grad_norm": 1.2944243964732431, + "learning_rate": 3.196630291441231e-06, + "loss": 0.2261, + "step": 229 + }, + { + "epoch": 0.84, + "grad_norm": 1.2348938264581308, + "learning_rate": 3.182493032759053e-06, + "loss": 0.2368, + "step": 230 + }, + { + "epoch": 0.85, + "grad_norm": 1.3877133957904717, + "learning_rate": 3.168332152210909e-06, + "loss": 0.2342, + "step": 231 + }, + { + "epoch": 0.85, + "grad_norm": 1.2088837041711673, + "learning_rate": 3.154148139921102e-06, + "loss": 0.222, + "step": 232 + }, + { + "epoch": 0.86, + "grad_norm": 1.4750513048080165, + "learning_rate": 3.1399414868145506e-06, + "loss": 0.2301, + "step": 233 + }, + { + "epoch": 0.86, + "grad_norm": 1.2097458338635088, + "learning_rate": 3.1257126845998e-06, + "loss": 0.2365, + "step": 234 + }, + { + "epoch": 0.86, + "grad_norm": 1.3570468614316236, + "learning_rate": 3.1114622257520004e-06, + "loss": 0.2275, + "step": 235 + }, + { + "epoch": 0.87, + "grad_norm": 1.2331713108579336, + "learning_rate": 3.0971906034958616e-06, + "loss": 0.2193, + "step": 236 + }, + { + "epoch": 0.87, + "grad_norm": 1.330924002893457, + "learning_rate": 3.0828983117885856e-06, + "loss": 0.2258, + "step": 237 + }, + { + "epoch": 0.87, + "grad_norm": 1.2713775149937143, + "learning_rate": 3.0685858453027668e-06, + "loss": 0.2287, + "step": 238 + }, + { + "epoch": 0.88, + "grad_norm": 1.3460227514964078, + "learning_rate": 3.05425369940927e-06, + "loss": 0.2268, + "step": 239 + }, + { + "epoch": 0.88, + "grad_norm": 1.3124465221253792, + "learning_rate": 3.0399023701600903e-06, + "loss": 0.2237, + "step": 240 + }, + { + "epoch": 0.89, + "grad_norm": 1.2621420000416141, + "learning_rate": 3.0255323542711784e-06, + "loss": 0.221, + "step": 241 + }, + { + "epoch": 0.89, + "grad_norm": 1.3207975689997922, + "learning_rate": 3.011144149105251e-06, + "loss": 0.2177, + "step": 242 + }, + { + "epoch": 0.89, + "grad_norm": 1.3364690610440046, + "learning_rate": 2.996738252654577e-06, + "loss": 0.2266, + "step": 243 + }, + { + "epoch": 0.9, + "grad_norm": 1.3069082882086795, + "learning_rate": 2.9823151635237424e-06, + "loss": 0.2274, + "step": 244 + }, + { + "epoch": 0.9, + "grad_norm": 1.402608898892496, + "learning_rate": 2.9678753809123884e-06, + "loss": 0.233, + "step": 245 + }, + { + "epoch": 0.9, + "grad_norm": 1.3349783439901974, + "learning_rate": 2.9534194045979397e-06, + "loss": 0.2198, + "step": 246 + }, + { + "epoch": 0.91, + "grad_norm": 1.3319911413244738, + "learning_rate": 2.938947734918302e-06, + "loss": 0.2241, + "step": 247 + }, + { + "epoch": 0.91, + "grad_norm": 1.2836113523110935, + "learning_rate": 2.924460872754547e-06, + "loss": 0.2247, + "step": 248 + }, + { + "epoch": 0.91, + "grad_norm": 1.3420053396118825, + "learning_rate": 2.9099593195135743e-06, + "loss": 0.2245, + "step": 249 + }, + { + "epoch": 0.92, + "grad_norm": 1.3018957576647208, + "learning_rate": 2.8954435771107604e-06, + "loss": 0.2198, + "step": 250 + }, + { + "epoch": 0.92, + "grad_norm": 1.493108819116986, + "learning_rate": 2.8809141479525843e-06, + "loss": 0.2261, + "step": 251 + }, + { + "epoch": 0.93, + "grad_norm": 1.2240817395656585, + "learning_rate": 2.8663715349192388e-06, + "loss": 0.2182, + "step": 252 + }, + { + "epoch": 0.93, + "grad_norm": 1.3972966685231503, + "learning_rate": 2.8518162413472266e-06, + "loss": 0.2289, + "step": 253 + }, + { + "epoch": 0.93, + "grad_norm": 1.3158850314947335, + "learning_rate": 2.8372487710119374e-06, + "loss": 0.2286, + "step": 254 + }, + { + "epoch": 0.94, + "grad_norm": 1.295772538693981, + "learning_rate": 2.8226696281102134e-06, + "loss": 0.2157, + "step": 255 + }, + { + "epoch": 0.94, + "grad_norm": 1.34085577207588, + "learning_rate": 2.8080793172428965e-06, + "loss": 0.2223, + "step": 256 + }, + { + "epoch": 0.94, + "grad_norm": 1.3610764715193495, + "learning_rate": 2.7934783433973672e-06, + "loss": 0.2227, + "step": 257 + }, + { + "epoch": 0.95, + "grad_norm": 1.2629712566442401, + "learning_rate": 2.778867211930061e-06, + "loss": 0.2263, + "step": 258 + }, + { + "epoch": 0.95, + "grad_norm": 1.2782582856568219, + "learning_rate": 2.764246428548983e-06, + "loss": 0.2234, + "step": 259 + }, + { + "epoch": 0.96, + "grad_norm": 1.2621019245043847, + "learning_rate": 2.7496164992961995e-06, + "loss": 0.2177, + "step": 260 + }, + { + "epoch": 0.96, + "grad_norm": 1.2033350046761524, + "learning_rate": 2.7349779305303263e-06, + "loss": 0.2226, + "step": 261 + }, + { + "epoch": 0.96, + "grad_norm": 1.361220136423699, + "learning_rate": 2.720331228909005e-06, + "loss": 0.2179, + "step": 262 + }, + { + "epoch": 0.97, + "grad_norm": 1.3715434561254194, + "learning_rate": 2.7056769013713623e-06, + "loss": 0.2231, + "step": 263 + }, + { + "epoch": 0.97, + "grad_norm": 1.1330086039392537, + "learning_rate": 2.691015455120468e-06, + "loss": 0.2164, + "step": 264 + }, + { + "epoch": 0.97, + "grad_norm": 1.2694263709270768, + "learning_rate": 2.6763473976057776e-06, + "loss": 0.2127, + "step": 265 + }, + { + "epoch": 0.98, + "grad_norm": 1.3274231972419466, + "learning_rate": 2.6616732365055713e-06, + "loss": 0.2092, + "step": 266 + }, + { + "epoch": 0.98, + "grad_norm": 1.276485394682339, + "learning_rate": 2.64699347970938e-06, + "loss": 0.2206, + "step": 267 + }, + { + "epoch": 0.98, + "grad_norm": 1.33640777595863, + "learning_rate": 2.6323086353004077e-06, + "loss": 0.2201, + "step": 268 + }, + { + "epoch": 0.99, + "grad_norm": 1.2867150222472765, + "learning_rate": 2.6176192115379494e-06, + "loss": 0.2176, + "step": 269 + }, + { + "epoch": 0.99, + "grad_norm": 1.220258552427881, + "learning_rate": 2.602925716839795e-06, + "loss": 0.2131, + "step": 270 + }, + { + "epoch": 1.0, + "grad_norm": 1.3301323985426015, + "learning_rate": 2.588228659764632e-06, + "loss": 0.2244, + "step": 271 + }, + { + "epoch": 1.0, + "grad_norm": 1.2313785507924382, + "learning_rate": 2.573528548994449e-06, + "loss": 0.2192, + "step": 272 + }, + { + "epoch": 1.0, + "eval_loss": 0.22680288553237915, + "eval_runtime": 1744.6696, + "eval_samples_per_second": 1.325, + "eval_steps_per_second": 0.074, + "step": 272 + } + ], + "logging_steps": 1, + "max_steps": 544, + "num_input_tokens_seen": 0, + "num_train_epochs": 2, + "save_steps": 272, + "total_flos": 256045146439680.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-272/training_args.bin b/checkpoint-272/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..57d371d207333f3981c73912981eb12ae9766a94 --- /dev/null +++ b/checkpoint-272/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01a4c76e5fdc09ec01dc7e8ead7778553f5e617c35ba83b4354ef7a547fbf2ae +size 7352 diff --git a/checkpoint-272/zero_to_fp32.py b/checkpoint-272/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..49b846633d6eb1e836e34681e44033581f4edb7b --- /dev/null +++ b/checkpoint-272/zero_to_fp32.py @@ -0,0 +1,592 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: python zero_to_fp32.py . pytorch_model.bin + +import argparse +import torch +import glob +import math +import os +import re +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + + total_files = len(files) + state_dicts = [] + for f in files: + state_dict = torch.load(f, map_location=device) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + if zero_stage <= 2: + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + elif zero_stage == 3: + # if there is more than one param group, there will be multiple flattened tensors - one + # flattened tensor per group - for simplicity merge them into a single tensor + # + # XXX: could make the script more memory efficient for when there are multiple groups - it + # will require matching the sub-lists of param_shapes for each param group flattened tensor + + fp32_flat_groups = [ + torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) + ] + + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = fp32_flat_groups[0].numel() * world_size + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + for name, shape in param_shapes.items(): + + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # XXX: memory usage doubles here + state_dict[name] = torch.cat( + tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), + 0).narrow(0, 0, unpartitioned_numel).view(shape) + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + + Returns: + - pytorch ``state_dict`` + + Note: this approach may not work if your application doesn't have sufficient free CPU memory and + you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + """ + + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + print(f"Saving fp32 state dict to {output_file}") + torch.save(state_dict, output_file) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument( + "output_file", + type=str, + help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag) diff --git a/checkpoint-544/config.json b/checkpoint-544/config.json new file mode 100644 index 0000000000000000000000000000000000000000..321049cbcea926f12a79cc40f4a19a8f17cd8ed3 --- /dev/null +++ b/checkpoint-544/config.json @@ -0,0 +1,26 @@ +{ + "_name_or_path": "meta-math/MetaMath-Mistral-7B", + "architectures": [ + "MistralForCausalLM" + ], + "attention_dropout": 0.0, + "bos_token_id": 1, + "eos_token_id": 2, + "hidden_act": "silu", + "hidden_size": 4096, + "initializer_range": 0.02, + "intermediate_size": 14336, + "max_position_embeddings": 32768, + "model_type": "mistral", + "num_attention_heads": 32, + "num_hidden_layers": 32, + "num_key_value_heads": 8, + "rms_norm_eps": 1e-05, + "rope_theta": 10000.0, + "sliding_window": 4096, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.38.2", + "use_cache": false, + "vocab_size": 32001 +} diff --git a/checkpoint-544/generation_config.json b/checkpoint-544/generation_config.json new file mode 100644 index 0000000000000000000000000000000000000000..282b497efd8f276cf9270e576fb79be429aebcdc --- /dev/null +++ b/checkpoint-544/generation_config.json @@ -0,0 +1,7 @@ +{ + "_from_model_config": true, + "bos_token_id": 1, + "do_sample": true, + "eos_token_id": 2, + "transformers_version": "4.38.2" +} diff --git a/checkpoint-544/global_step544/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/checkpoint-544/global_step544/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..31b176dae291ae13cba28bd75948a34c32cd3fe1 --- /dev/null +++ b/checkpoint-544/global_step544/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0b635a6f6c93873bb79a1f6f7e80dcca3787ce0fda8d4098c2d40359e2fa073 +size 4831618059 diff --git a/checkpoint-544/global_step544/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/checkpoint-544/global_step544/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..a7a6df0ec91a4ab77fd719fc45c21d895063bf08 --- /dev/null +++ b/checkpoint-544/global_step544/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b42f03ac86e8e2a33c86c2e5202e8c4acdd6dc200c2f8c9a6c8e50f0318529df +size 4831618059 diff --git a/checkpoint-544/global_step544/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/checkpoint-544/global_step544/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..5702d8e320d5216a45de35ffcfc0408bf795c673 --- /dev/null +++ b/checkpoint-544/global_step544/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c92391ed04ed2926f1f28fdc573122425756b249bc2d21b2851b78baea89cd3b +size 4831618059 diff --git a/checkpoint-544/global_step544/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/checkpoint-544/global_step544/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..46cc8b9c84d293856e5f904edf4b1528faeb3d67 --- /dev/null +++ b/checkpoint-544/global_step544/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b2be3bc10a6a7a2dc37c52a0587e3fc56976e3e13b2298ddde6af69826afeeb +size 4831618059 diff --git a/checkpoint-544/global_step544/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt b/checkpoint-544/global_step544/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..667fb25f27de14c10a685c8eaee6afa2afe119d5 --- /dev/null +++ b/checkpoint-544/global_step544/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fdf1bf6bfe56ee728ffa31b8604f170adb5a5980a8b24f4aa662dfcd471d4f4 +size 4831618059 diff --git a/checkpoint-544/global_step544/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt b/checkpoint-544/global_step544/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..bec3fa54700974078cb880972f3216d9f3339444 --- /dev/null +++ b/checkpoint-544/global_step544/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3684e3dd7f9c957ee35cc90c43f7ff56a82ab875b36af71f12eb184e60b603c3 +size 4831618059 diff --git a/checkpoint-544/global_step544/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt b/checkpoint-544/global_step544/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..e4214982067b454233fe9de284ed572cf35bbd28 --- /dev/null +++ b/checkpoint-544/global_step544/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78c2672a287caac96da9b241ec70d12afbb0cf5d4540829c5f52d7fff6fa98a8 +size 4831618059 diff --git a/checkpoint-544/global_step544/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt b/checkpoint-544/global_step544/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..075d42e52a43db8368f225961f0bb4b3897e41fb --- /dev/null +++ b/checkpoint-544/global_step544/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed24fdc0dc351ee4c1ad70b88af052fbf700353644b65b86afd6910ce918f61e +size 4831618059 diff --git a/checkpoint-544/global_step544/bf16_zero_pp_rank_8_mp_rank_00_optim_states.pt b/checkpoint-544/global_step544/bf16_zero_pp_rank_8_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..d2c3a0ae26f2713b9d9b216d0664c8a21867a091 --- /dev/null +++ b/checkpoint-544/global_step544/bf16_zero_pp_rank_8_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ebb6b14e237e3d7ee8c339b87f536906ab23894cfd3c6ef4496c89e4053394a +size 4831618059 diff --git a/checkpoint-544/global_step544/zero_pp_rank_0_mp_rank_00_model_states.pt b/checkpoint-544/global_step544/zero_pp_rank_0_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..e9491fda3b53d9778653c8cb661b979e33ab178a --- /dev/null +++ b/checkpoint-544/global_step544/zero_pp_rank_0_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05f780232f1fbb656afded0ffeeb734c028ac6960f56536fb5bb144e06343358 +size 153829 diff --git a/checkpoint-544/global_step544/zero_pp_rank_1_mp_rank_00_model_states.pt b/checkpoint-544/global_step544/zero_pp_rank_1_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..9f9ef97cb8ee18e334e5085283dec977fb34880c --- /dev/null +++ b/checkpoint-544/global_step544/zero_pp_rank_1_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48a5b6b7778eb6a68da63199dc8352775fd08e079f73af513a5ba376dd96d5af +size 153829 diff --git a/checkpoint-544/global_step544/zero_pp_rank_2_mp_rank_00_model_states.pt b/checkpoint-544/global_step544/zero_pp_rank_2_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..833f388211d15106b40b019578c3df8b7ee637dd --- /dev/null +++ b/checkpoint-544/global_step544/zero_pp_rank_2_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c66492b398fffa1b9caa9895f8cb9f70cefc538b86b06e76605ae64bf13c0b6 +size 153829 diff --git a/checkpoint-544/global_step544/zero_pp_rank_3_mp_rank_00_model_states.pt b/checkpoint-544/global_step544/zero_pp_rank_3_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..2bd2b038640ba52ce685528a364eac2fa8961e48 --- /dev/null +++ b/checkpoint-544/global_step544/zero_pp_rank_3_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b5ecc08be2fc70d900f9ec52c9e5223471a97394ce547b4d6557bfe4877409b +size 153829 diff --git a/checkpoint-544/global_step544/zero_pp_rank_4_mp_rank_00_model_states.pt b/checkpoint-544/global_step544/zero_pp_rank_4_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..9ea278e5acb9c1c6fce1965cd2fc24d7f674371a --- /dev/null +++ b/checkpoint-544/global_step544/zero_pp_rank_4_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:889be38f2c127d967dbc35f8de436faa0221294ea80d60dcca67fa06cf9cc53d +size 153829 diff --git a/checkpoint-544/global_step544/zero_pp_rank_5_mp_rank_00_model_states.pt b/checkpoint-544/global_step544/zero_pp_rank_5_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..f3617545d6867516c407dde1407479e862399641 --- /dev/null +++ b/checkpoint-544/global_step544/zero_pp_rank_5_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bfb3480e6a34733d75286ebaf0abe92cdc4312608d5c65d97fca0994cd72b97 +size 153829 diff --git a/checkpoint-544/global_step544/zero_pp_rank_6_mp_rank_00_model_states.pt b/checkpoint-544/global_step544/zero_pp_rank_6_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..542cd37720fd830453188b05d110a8a3ff64b544 --- /dev/null +++ b/checkpoint-544/global_step544/zero_pp_rank_6_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b27822ebb1f00163963411a26dc737791e986a57a4547282f4f4915199396450 +size 153829 diff --git a/checkpoint-544/global_step544/zero_pp_rank_7_mp_rank_00_model_states.pt b/checkpoint-544/global_step544/zero_pp_rank_7_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..050dd6980400b428da8d3531074fbed4714ac239 --- /dev/null +++ b/checkpoint-544/global_step544/zero_pp_rank_7_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7e041ca9c19d047d71d38796731a923f812bfb66687db4c5e7b4ede59fa3729 +size 153829 diff --git a/checkpoint-544/global_step544/zero_pp_rank_8_mp_rank_00_model_states.pt b/checkpoint-544/global_step544/zero_pp_rank_8_mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..0ee65729bb837f2a0f415abd02fcd6e16fb89fbe --- /dev/null +++ b/checkpoint-544/global_step544/zero_pp_rank_8_mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65f33b04d75a718db78cabadc16edfc0e2f73b405a018827241bc7193392ad85 +size 153829 diff --git a/checkpoint-544/latest b/checkpoint-544/latest new file mode 100644 index 0000000000000000000000000000000000000000..606df2a525126f7e381b691c76048c61744888b9 --- /dev/null +++ b/checkpoint-544/latest @@ -0,0 +1 @@ +global_step544 \ No newline at end of file diff --git a/checkpoint-544/model-00001-of-00003.safetensors b/checkpoint-544/model-00001-of-00003.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..603a8bcacbc0ef1a79431167166b301e7d3f2e8a --- /dev/null +++ b/checkpoint-544/model-00001-of-00003.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3e6645954961b8991f249065609b6491bf175453e49211f0ca8ee2fbf8ffeb7 +size 4943170528 diff --git a/checkpoint-544/model-00002-of-00003.safetensors b/checkpoint-544/model-00002-of-00003.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9bbba5240dcc14409bc4b565b18005740cb2887e --- /dev/null +++ b/checkpoint-544/model-00002-of-00003.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:445c2dd56bda6dbe8914dcc5f16947ac46290e9d906f8566f9c0867481212964 +size 4999819336 diff --git a/checkpoint-544/model-00003-of-00003.safetensors b/checkpoint-544/model-00003-of-00003.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..b669e1c3b19f8d92611497b8980f6bf36bade125 --- /dev/null +++ b/checkpoint-544/model-00003-of-00003.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be5900b554d420f18e739a39543dc322439881329fbd19177f398f008c1e3a31 +size 4540524536 diff --git a/checkpoint-544/model.safetensors.index.json b/checkpoint-544/model.safetensors.index.json new file mode 100644 index 0000000000000000000000000000000000000000..74703d23d7ed329df7a6abebb508ca436906cacb --- /dev/null +++ b/checkpoint-544/model.safetensors.index.json @@ -0,0 +1,298 @@ +{ + "metadata": { + "total_size": 14483480576 + }, + "weight_map": { + "lm_head.weight": "model-00003-of-00003.safetensors", + "model.embed_tokens.weight": "model-00001-of-00003.safetensors", + "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors", + "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors", + "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors", + "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors", + "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors", + "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors", + "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors", + "model.norm.weight": "model-00003-of-00003.safetensors" + } +} diff --git a/checkpoint-544/rng_state_0.pth b/checkpoint-544/rng_state_0.pth new file mode 100644 index 0000000000000000000000000000000000000000..2ec221bf648e2b334c5bdcb3a0ec2177020a22f3 --- /dev/null +++ b/checkpoint-544/rng_state_0.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8babca61ef4b87d8f5fcc3fd60cdc16d8236102ca8c9f0a354428eaf65a9b716 +size 16240 diff --git a/checkpoint-544/rng_state_1.pth b/checkpoint-544/rng_state_1.pth new file mode 100644 index 0000000000000000000000000000000000000000..b7fe88913675041099e40945a9b1da714861a34a --- /dev/null +++ b/checkpoint-544/rng_state_1.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65498317cec9c6f487784e1606ebbf4bd3cfc0fca2fbd036c5781cc9fbac5aed +size 16240 diff --git a/checkpoint-544/rng_state_2.pth b/checkpoint-544/rng_state_2.pth new file mode 100644 index 0000000000000000000000000000000000000000..236138c165e216c5f0f086024d241580bd4f6ba7 --- /dev/null +++ b/checkpoint-544/rng_state_2.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e869972f75f904a5f4aaac6c7bdc68f44bcd88cc03e0adfa9900b1940a02f80 +size 16240 diff --git a/checkpoint-544/rng_state_3.pth b/checkpoint-544/rng_state_3.pth new file mode 100644 index 0000000000000000000000000000000000000000..e2f2309166746c8562886eb4961188e7873e6d62 --- /dev/null +++ b/checkpoint-544/rng_state_3.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab42e6c2cb499329ff3233905f5cc063640903c3b883ce739bd3565f4102aa8e +size 16240 diff --git a/checkpoint-544/rng_state_4.pth b/checkpoint-544/rng_state_4.pth new file mode 100644 index 0000000000000000000000000000000000000000..0825eb67e41bb36ce71277c84ef6c3217e301936 --- /dev/null +++ b/checkpoint-544/rng_state_4.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f1715a4544ef117b0c1178643d0308900a43ec1d29be6d03c129428ced3b1ff +size 16240 diff --git a/checkpoint-544/rng_state_5.pth b/checkpoint-544/rng_state_5.pth new file mode 100644 index 0000000000000000000000000000000000000000..eb2191cb838d1fd089dc229c72f4b25a1d3d1927 --- /dev/null +++ b/checkpoint-544/rng_state_5.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a16ec6ddf9755b15a77b9238188f9c32f3e8a5e03dd587b67071a79c8b1884b6 +size 16240 diff --git a/checkpoint-544/rng_state_6.pth b/checkpoint-544/rng_state_6.pth new file mode 100644 index 0000000000000000000000000000000000000000..bb6c017f4a04cd72af43a7900b678a5b8fd03484 --- /dev/null +++ b/checkpoint-544/rng_state_6.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c35ac73f225656fcff46b792d0d1e71472ea3eaacaaa8a3275332734fbcb7047 +size 16240 diff --git a/checkpoint-544/rng_state_7.pth b/checkpoint-544/rng_state_7.pth new file mode 100644 index 0000000000000000000000000000000000000000..60eb3128dcfd0eb3fdc7408923acde54994e0d4e --- /dev/null +++ b/checkpoint-544/rng_state_7.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fa16a4d5de1688654ba6dba457a1fc80677b018eedf80243cf5e6217dfef49d +size 16240 diff --git a/checkpoint-544/rng_state_8.pth b/checkpoint-544/rng_state_8.pth new file mode 100644 index 0000000000000000000000000000000000000000..9b0e0e1d62333f8c99c0fa7674a2757d7a17dd40 --- /dev/null +++ b/checkpoint-544/rng_state_8.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0881763109dff036dc4a0a10e5161eb1f17f4075a9ef4de9b15909419202d86 +size 16240 diff --git a/checkpoint-544/scheduler.pt b/checkpoint-544/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..c898c6ac118b96458c399d2770eee1e365f80205 --- /dev/null +++ b/checkpoint-544/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed7f579a1fee249d0ea7d887e908b99603fac01d957d1e1cd90904dde9e6c139 +size 1064 diff --git a/checkpoint-544/trainer_state.json b/checkpoint-544/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..29a31371c199d281bfeaa5e1cc0cb12ebe998270 --- /dev/null +++ b/checkpoint-544/trainer_state.json @@ -0,0 +1,3901 @@ +{ + "best_metric": 0.19557544589042664, + "best_model_checkpoint": "./EulerMath-Mistral-7B-model/checkpoint-544", + "epoch": 1.982552800734619, + "eval_steps": 68, + "global_step": 544, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.0, + "grad_norm": 19.19068191513093, + "learning_rate": 5.000000000000001e-07, + "loss": 0.707, + "step": 1 + }, + { + "epoch": 0.0, + "eval_loss": 0.9060535430908203, + "eval_runtime": 1745.9683, + "eval_samples_per_second": 1.324, + "eval_steps_per_second": 0.074, + "step": 1 + }, + { + "epoch": 0.01, + "grad_norm": 20.035932532601844, + "learning_rate": 1.0000000000000002e-06, + "loss": 0.7236, + "step": 2 + }, + { + "epoch": 0.01, + "grad_norm": 19.31513317860667, + "learning_rate": 1.5e-06, + "loss": 0.7201, + "step": 3 + }, + { + "epoch": 0.01, + "grad_norm": 16.561326930760348, + "learning_rate": 2.0000000000000003e-06, + "loss": 0.6717, + "step": 4 + }, + { + "epoch": 0.02, + "grad_norm": 9.069275733221579, + "learning_rate": 2.5e-06, + "loss": 0.573, + "step": 5 + }, + { + "epoch": 0.02, + "grad_norm": 6.0702110208300475, + "learning_rate": 3e-06, + "loss": 0.4965, + "step": 6 + }, + { + "epoch": 0.03, + "grad_norm": 6.5389430446896055, + "learning_rate": 3.5e-06, + "loss": 0.5093, + "step": 7 + }, + { + "epoch": 0.03, + "grad_norm": 7.709934958779789, + "learning_rate": 4.000000000000001e-06, + "loss": 0.524, + "step": 8 + }, + { + "epoch": 0.03, + "grad_norm": 6.1640217934257135, + "learning_rate": 4.5e-06, + "loss": 0.503, + "step": 9 + }, + { + "epoch": 0.04, + "grad_norm": 4.079182690080823, + "learning_rate": 5e-06, + "loss": 0.4787, + "step": 10 + }, + { + "epoch": 0.04, + "grad_norm": 4.269731620276111, + "learning_rate": 4.999956736067563e-06, + "loss": 0.4545, + "step": 11 + }, + { + "epoch": 0.04, + "grad_norm": 4.059214670786909, + "learning_rate": 4.999826945767665e-06, + "loss": 0.4638, + "step": 12 + }, + { + "epoch": 0.05, + "grad_norm": 3.583247385116129, + "learning_rate": 4.9996106335924965e-06, + "loss": 0.4396, + "step": 13 + }, + { + "epoch": 0.05, + "grad_norm": 3.2077663599892405, + "learning_rate": 4.999307807028872e-06, + "loss": 0.4287, + "step": 14 + }, + { + "epoch": 0.06, + "grad_norm": 2.3678816023894513, + "learning_rate": 4.998918476557964e-06, + "loss": 0.4169, + "step": 15 + }, + { + "epoch": 0.06, + "grad_norm": 1.9925263681909064, + "learning_rate": 4.998442655654946e-06, + "loss": 0.4099, + "step": 16 + }, + { + "epoch": 0.06, + "grad_norm": 1.7706573910428134, + "learning_rate": 4.997880360788527e-06, + "loss": 0.4003, + "step": 17 + }, + { + "epoch": 0.07, + "grad_norm": 1.6789390301868525, + "learning_rate": 4.997231611420374e-06, + "loss": 0.399, + "step": 18 + }, + { + "epoch": 0.07, + "grad_norm": 1.5622054221426698, + "learning_rate": 4.996496430004446e-06, + "loss": 0.3885, + "step": 19 + }, + { + "epoch": 0.07, + "grad_norm": 1.5663787846468284, + "learning_rate": 4.995674841986217e-06, + "loss": 0.3987, + "step": 20 + }, + { + "epoch": 0.08, + "grad_norm": 1.4502330087611721, + "learning_rate": 4.994766875801789e-06, + "loss": 0.3962, + "step": 21 + }, + { + "epoch": 0.08, + "grad_norm": 1.4188997099391882, + "learning_rate": 4.993772562876909e-06, + "loss": 0.3845, + "step": 22 + }, + { + "epoch": 0.08, + "grad_norm": 1.4360806887465898, + "learning_rate": 4.992691937625892e-06, + "loss": 0.3764, + "step": 23 + }, + { + "epoch": 0.09, + "grad_norm": 1.4216582090099372, + "learning_rate": 4.991525037450412e-06, + "loss": 0.3712, + "step": 24 + }, + { + "epoch": 0.09, + "grad_norm": 1.2856499279799387, + "learning_rate": 4.990271902738223e-06, + "loss": 0.3603, + "step": 25 + }, + { + "epoch": 0.1, + "grad_norm": 1.247117404577534, + "learning_rate": 4.988932576861754e-06, + "loss": 0.3652, + "step": 26 + }, + { + "epoch": 0.1, + "grad_norm": 1.3197850379000642, + "learning_rate": 4.987507106176606e-06, + "loss": 0.371, + "step": 27 + }, + { + "epoch": 0.1, + "grad_norm": 1.243400495941476, + "learning_rate": 4.985995540019956e-06, + "loss": 0.3599, + "step": 28 + }, + { + "epoch": 0.11, + "grad_norm": 1.3278566257982103, + "learning_rate": 4.984397930708838e-06, + "loss": 0.3594, + "step": 29 + }, + { + "epoch": 0.11, + "grad_norm": 1.337022527470652, + "learning_rate": 4.982714333538344e-06, + "loss": 0.3477, + "step": 30 + }, + { + "epoch": 0.11, + "grad_norm": 1.2099362672151601, + "learning_rate": 4.980944806779698e-06, + "loss": 0.3425, + "step": 31 + }, + { + "epoch": 0.12, + "grad_norm": 1.2110593150023343, + "learning_rate": 4.979089411678252e-06, + "loss": 0.3567, + "step": 32 + }, + { + "epoch": 0.12, + "grad_norm": 1.2334965596913852, + "learning_rate": 4.977148212451354e-06, + "loss": 0.3526, + "step": 33 + }, + { + "epoch": 0.12, + "grad_norm": 1.1687161424016368, + "learning_rate": 4.975121276286136e-06, + "loss": 0.3496, + "step": 34 + }, + { + "epoch": 0.13, + "grad_norm": 1.1881954676378432, + "learning_rate": 4.973008673337181e-06, + "loss": 0.3321, + "step": 35 + }, + { + "epoch": 0.13, + "grad_norm": 1.2174270605971114, + "learning_rate": 4.970810476724097e-06, + "loss": 0.3446, + "step": 36 + }, + { + "epoch": 0.14, + "grad_norm": 1.1609330509652702, + "learning_rate": 4.968526762528988e-06, + "loss": 0.341, + "step": 37 + }, + { + "epoch": 0.14, + "grad_norm": 1.2149352568793006, + "learning_rate": 4.9661576097938205e-06, + "loss": 0.3459, + "step": 38 + }, + { + "epoch": 0.14, + "grad_norm": 1.1885081900677397, + "learning_rate": 4.963703100517684e-06, + "loss": 0.3425, + "step": 39 + }, + { + "epoch": 0.15, + "grad_norm": 1.113235885075549, + "learning_rate": 4.961163319653959e-06, + "loss": 0.339, + "step": 40 + }, + { + "epoch": 0.15, + "grad_norm": 1.0983562726057154, + "learning_rate": 4.958538355107369e-06, + "loss": 0.3298, + "step": 41 + }, + { + "epoch": 0.15, + "grad_norm": 1.1594289217865181, + "learning_rate": 4.955828297730949e-06, + "loss": 0.3187, + "step": 42 + }, + { + "epoch": 0.16, + "grad_norm": 1.1714548911644644, + "learning_rate": 4.953033241322887e-06, + "loss": 0.3373, + "step": 43 + }, + { + "epoch": 0.16, + "grad_norm": 1.1450397323165031, + "learning_rate": 4.950153282623289e-06, + "loss": 0.3232, + "step": 44 + }, + { + "epoch": 0.17, + "grad_norm": 1.1526363934692334, + "learning_rate": 4.947188521310827e-06, + "loss": 0.3243, + "step": 45 + }, + { + "epoch": 0.17, + "grad_norm": 1.2175235837438554, + "learning_rate": 4.944139059999286e-06, + "loss": 0.3252, + "step": 46 + }, + { + "epoch": 0.17, + "grad_norm": 1.099789045296574, + "learning_rate": 4.941005004234019e-06, + "loss": 0.3178, + "step": 47 + }, + { + "epoch": 0.18, + "grad_norm": 1.2219677196886505, + "learning_rate": 4.937786462488284e-06, + "loss": 0.3185, + "step": 48 + }, + { + "epoch": 0.18, + "grad_norm": 1.1806399387287625, + "learning_rate": 4.9344835461595016e-06, + "loss": 0.3131, + "step": 49 + }, + { + "epoch": 0.18, + "grad_norm": 1.1320527868188186, + "learning_rate": 4.93109636956539e-06, + "loss": 0.3198, + "step": 50 + }, + { + "epoch": 0.19, + "grad_norm": 1.2551253674231917, + "learning_rate": 4.927625049940013e-06, + "loss": 0.3063, + "step": 51 + }, + { + "epoch": 0.19, + "grad_norm": 1.1131050315591549, + "learning_rate": 4.9240697074297205e-06, + "loss": 0.3192, + "step": 52 + }, + { + "epoch": 0.19, + "grad_norm": 1.218025833644298, + "learning_rate": 4.920430465088992e-06, + "loss": 0.3083, + "step": 53 + }, + { + "epoch": 0.2, + "grad_norm": 1.090531576651011, + "learning_rate": 4.916707448876173e-06, + "loss": 0.3076, + "step": 54 + }, + { + "epoch": 0.2, + "grad_norm": 1.1865422414756877, + "learning_rate": 4.912900787649124e-06, + "loss": 0.3155, + "step": 55 + }, + { + "epoch": 0.21, + "grad_norm": 1.1236405558973956, + "learning_rate": 4.909010613160751e-06, + "loss": 0.306, + "step": 56 + }, + { + "epoch": 0.21, + "grad_norm": 1.222805799933775, + "learning_rate": 4.90503706005445e-06, + "loss": 0.3054, + "step": 57 + }, + { + "epoch": 0.21, + "grad_norm": 1.179814726076065, + "learning_rate": 4.900980265859449e-06, + "loss": 0.309, + "step": 58 + }, + { + "epoch": 0.22, + "grad_norm": 1.155763655177263, + "learning_rate": 4.896840370986042e-06, + "loss": 0.2974, + "step": 59 + }, + { + "epoch": 0.22, + "grad_norm": 1.1687171308842221, + "learning_rate": 4.892617518720737e-06, + "loss": 0.3018, + "step": 60 + }, + { + "epoch": 0.22, + "grad_norm": 1.2240587320323661, + "learning_rate": 4.88831185522129e-06, + "loss": 0.3066, + "step": 61 + }, + { + "epoch": 0.23, + "grad_norm": 1.1042960875500205, + "learning_rate": 4.883923529511646e-06, + "loss": 0.2977, + "step": 62 + }, + { + "epoch": 0.23, + "grad_norm": 1.1885949614868223, + "learning_rate": 4.87945269347679e-06, + "loss": 0.3087, + "step": 63 + }, + { + "epoch": 0.24, + "grad_norm": 1.1420656757477574, + "learning_rate": 4.874899501857477e-06, + "loss": 0.2904, + "step": 64 + }, + { + "epoch": 0.24, + "grad_norm": 1.1453980260713446, + "learning_rate": 4.87026411224489e-06, + "loss": 0.306, + "step": 65 + }, + { + "epoch": 0.24, + "grad_norm": 1.2729287210416769, + "learning_rate": 4.865546685075174e-06, + "loss": 0.2938, + "step": 66 + }, + { + "epoch": 0.25, + "grad_norm": 1.2052792222072466, + "learning_rate": 4.860747383623889e-06, + "loss": 0.2977, + "step": 67 + }, + { + "epoch": 0.25, + "grad_norm": 1.2657508580603682, + "learning_rate": 4.85586637400036e-06, + "loss": 0.3011, + "step": 68 + }, + { + "epoch": 0.25, + "eval_loss": 0.32630813121795654, + "eval_runtime": 1744.5857, + "eval_samples_per_second": 1.325, + "eval_steps_per_second": 0.074, + "step": 68 + }, + { + "epoch": 0.25, + "grad_norm": 1.1832834131492187, + "learning_rate": 4.85090382514192e-06, + "loss": 0.2972, + "step": 69 + }, + { + "epoch": 0.26, + "grad_norm": 1.255475532117491, + "learning_rate": 4.845859908808074e-06, + "loss": 0.302, + "step": 70 + }, + { + "epoch": 0.26, + "grad_norm": 1.298818409489401, + "learning_rate": 4.8407347995745465e-06, + "loss": 0.2935, + "step": 71 + }, + { + "epoch": 0.26, + "grad_norm": 1.3499885398461409, + "learning_rate": 4.8355286748272405e-06, + "loss": 0.295, + "step": 72 + }, + { + "epoch": 0.27, + "grad_norm": 1.3446382549398914, + "learning_rate": 4.830241714756099e-06, + "loss": 0.2824, + "step": 73 + }, + { + "epoch": 0.27, + "grad_norm": 1.2082987304246777, + "learning_rate": 4.8248741023488705e-06, + "loss": 0.3026, + "step": 74 + }, + { + "epoch": 0.28, + "grad_norm": 1.3432457490726049, + "learning_rate": 4.81942602338477e-06, + "loss": 0.2985, + "step": 75 + }, + { + "epoch": 0.28, + "grad_norm": 1.170337150254348, + "learning_rate": 4.813897666428054e-06, + "loss": 0.2969, + "step": 76 + }, + { + "epoch": 0.28, + "grad_norm": 1.339414484466056, + "learning_rate": 4.808289222821491e-06, + "loss": 0.2985, + "step": 77 + }, + { + "epoch": 0.29, + "grad_norm": 1.1944077580462804, + "learning_rate": 4.802600886679743e-06, + "loss": 0.2852, + "step": 78 + }, + { + "epoch": 0.29, + "grad_norm": 1.357246876413576, + "learning_rate": 4.79683285488264e-06, + "loss": 0.2904, + "step": 79 + }, + { + "epoch": 0.29, + "grad_norm": 1.4115119936533302, + "learning_rate": 4.790985327068376e-06, + "loss": 0.3079, + "step": 80 + }, + { + "epoch": 0.3, + "grad_norm": 1.285315536324781, + "learning_rate": 4.7850585056265866e-06, + "loss": 0.2816, + "step": 81 + }, + { + "epoch": 0.3, + "grad_norm": 1.3631452273406317, + "learning_rate": 4.779052595691355e-06, + "loss": 0.2865, + "step": 82 + }, + { + "epoch": 0.3, + "grad_norm": 1.196518391890594, + "learning_rate": 4.772967805134106e-06, + "loss": 0.2793, + "step": 83 + }, + { + "epoch": 0.31, + "grad_norm": 1.2485622601747421, + "learning_rate": 4.766804344556414e-06, + "loss": 0.2827, + "step": 84 + }, + { + "epoch": 0.31, + "grad_norm": 1.2945099002171803, + "learning_rate": 4.7605624272827125e-06, + "loss": 0.2854, + "step": 85 + }, + { + "epoch": 0.32, + "grad_norm": 1.224576498812201, + "learning_rate": 4.754242269352911e-06, + "loss": 0.2875, + "step": 86 + }, + { + "epoch": 0.32, + "grad_norm": 1.2535747430861524, + "learning_rate": 4.747844089514919e-06, + "loss": 0.2807, + "step": 87 + }, + { + "epoch": 0.32, + "grad_norm": 1.171951212608294, + "learning_rate": 4.741368109217072e-06, + "loss": 0.2761, + "step": 88 + }, + { + "epoch": 0.33, + "grad_norm": 1.2123280755320154, + "learning_rate": 4.734814552600469e-06, + "loss": 0.2832, + "step": 89 + }, + { + "epoch": 0.33, + "grad_norm": 1.1358700523339582, + "learning_rate": 4.728183646491215e-06, + "loss": 0.2871, + "step": 90 + }, + { + "epoch": 0.33, + "grad_norm": 1.1484698203958048, + "learning_rate": 4.721475620392567e-06, + "loss": 0.2806, + "step": 91 + }, + { + "epoch": 0.34, + "grad_norm": 1.1887290775946084, + "learning_rate": 4.714690706477e-06, + "loss": 0.2858, + "step": 92 + }, + { + "epoch": 0.34, + "grad_norm": 1.1568061250650739, + "learning_rate": 4.707829139578156e-06, + "loss": 0.2888, + "step": 93 + }, + { + "epoch": 0.35, + "grad_norm": 1.176832058354239, + "learning_rate": 4.700891157182729e-06, + "loss": 0.2829, + "step": 94 + }, + { + "epoch": 0.35, + "grad_norm": 1.138549309431515, + "learning_rate": 4.693876999422241e-06, + "loss": 0.2763, + "step": 95 + }, + { + "epoch": 0.35, + "grad_norm": 1.1479926100837645, + "learning_rate": 4.68678690906473e-06, + "loss": 0.2686, + "step": 96 + }, + { + "epoch": 0.36, + "grad_norm": 1.1771516377197246, + "learning_rate": 4.679621131506347e-06, + "loss": 0.2814, + "step": 97 + }, + { + "epoch": 0.36, + "grad_norm": 1.2184996974539424, + "learning_rate": 4.672379914762867e-06, + "loss": 0.2822, + "step": 98 + }, + { + "epoch": 0.36, + "grad_norm": 1.1792108348242942, + "learning_rate": 4.665063509461098e-06, + "loss": 0.282, + "step": 99 + }, + { + "epoch": 0.37, + "grad_norm": 1.2850683815489914, + "learning_rate": 4.657672168830211e-06, + "loss": 0.2776, + "step": 100 + }, + { + "epoch": 0.37, + "grad_norm": 1.2508897770511975, + "learning_rate": 4.650206148692977e-06, + "loss": 0.2787, + "step": 101 + }, + { + "epoch": 0.37, + "grad_norm": 1.2031990746786907, + "learning_rate": 4.642665707456908e-06, + "loss": 0.2719, + "step": 102 + }, + { + "epoch": 0.38, + "grad_norm": 1.1842474930123255, + "learning_rate": 4.635051106105316e-06, + "loss": 0.2732, + "step": 103 + }, + { + "epoch": 0.38, + "grad_norm": 1.2596970412015132, + "learning_rate": 4.627362608188281e-06, + "loss": 0.2731, + "step": 104 + }, + { + "epoch": 0.39, + "grad_norm": 1.4294759311096437, + "learning_rate": 4.619600479813524e-06, + "loss": 0.2738, + "step": 105 + }, + { + "epoch": 0.39, + "grad_norm": 1.31619095423113, + "learning_rate": 4.6117649896372055e-06, + "loss": 0.2764, + "step": 106 + }, + { + "epoch": 0.39, + "grad_norm": 1.2349728666776751, + "learning_rate": 4.6038564088546185e-06, + "loss": 0.2722, + "step": 107 + }, + { + "epoch": 0.4, + "grad_norm": 1.2418477065252158, + "learning_rate": 4.5958750111908065e-06, + "loss": 0.271, + "step": 108 + }, + { + "epoch": 0.4, + "grad_norm": 1.3529322240859796, + "learning_rate": 4.587821072891089e-06, + "loss": 0.276, + "step": 109 + }, + { + "epoch": 0.4, + "grad_norm": 1.2671711562594927, + "learning_rate": 4.579694872711501e-06, + "loss": 0.2706, + "step": 110 + }, + { + "epoch": 0.41, + "grad_norm": 1.238356873891121, + "learning_rate": 4.571496691909142e-06, + "loss": 0.2749, + "step": 111 + }, + { + "epoch": 0.41, + "grad_norm": 1.2059912760303926, + "learning_rate": 4.563226814232444e-06, + "loss": 0.2676, + "step": 112 + }, + { + "epoch": 0.42, + "grad_norm": 1.1876458610423755, + "learning_rate": 4.554885525911351e-06, + "loss": 0.2743, + "step": 113 + }, + { + "epoch": 0.42, + "grad_norm": 1.1715592937521375, + "learning_rate": 4.54647311564741e-06, + "loss": 0.2734, + "step": 114 + }, + { + "epoch": 0.42, + "grad_norm": 1.236329928620471, + "learning_rate": 4.53798987460378e-06, + "loss": 0.2855, + "step": 115 + }, + { + "epoch": 0.43, + "grad_norm": 1.1717820999866062, + "learning_rate": 4.529436096395157e-06, + "loss": 0.2699, + "step": 116 + }, + { + "epoch": 0.43, + "grad_norm": 1.3490101744641771, + "learning_rate": 4.520812077077604e-06, + "loss": 0.2731, + "step": 117 + }, + { + "epoch": 0.43, + "grad_norm": 1.192962777526519, + "learning_rate": 4.512118115138315e-06, + "loss": 0.2719, + "step": 118 + }, + { + "epoch": 0.44, + "grad_norm": 1.2384657820337475, + "learning_rate": 4.5033545114852734e-06, + "loss": 0.2647, + "step": 119 + }, + { + "epoch": 0.44, + "grad_norm": 1.2128578058956592, + "learning_rate": 4.494521569436845e-06, + "loss": 0.2615, + "step": 120 + }, + { + "epoch": 0.44, + "grad_norm": 1.3237640584842072, + "learning_rate": 4.485619594711278e-06, + "loss": 0.2663, + "step": 121 + }, + { + "epoch": 0.45, + "grad_norm": 1.2691929068372239, + "learning_rate": 4.476648895416116e-06, + "loss": 0.2614, + "step": 122 + }, + { + "epoch": 0.45, + "grad_norm": 1.2606618599832538, + "learning_rate": 4.467609782037543e-06, + "loss": 0.2606, + "step": 123 + }, + { + "epoch": 0.46, + "grad_norm": 1.3048381409549332, + "learning_rate": 4.4585025674296315e-06, + "loss": 0.2601, + "step": 124 + }, + { + "epoch": 0.46, + "grad_norm": 1.3022768451107203, + "learning_rate": 4.449327566803515e-06, + "loss": 0.2683, + "step": 125 + }, + { + "epoch": 0.46, + "grad_norm": 1.3820289309230962, + "learning_rate": 4.44008509771648e-06, + "loss": 0.2681, + "step": 126 + }, + { + "epoch": 0.47, + "grad_norm": 1.2802354999925132, + "learning_rate": 4.430775480060973e-06, + "loss": 0.2648, + "step": 127 + }, + { + "epoch": 0.47, + "grad_norm": 1.3242106497833372, + "learning_rate": 4.4213990360535274e-06, + "loss": 0.268, + "step": 128 + }, + { + "epoch": 0.47, + "grad_norm": 1.3009976864959876, + "learning_rate": 4.411956090223618e-06, + "loss": 0.2662, + "step": 129 + }, + { + "epoch": 0.48, + "grad_norm": 1.3212829688401424, + "learning_rate": 4.4024469694024194e-06, + "loss": 0.2605, + "step": 130 + }, + { + "epoch": 0.48, + "grad_norm": 1.2123869956343973, + "learning_rate": 4.3928720027115015e-06, + "loss": 0.2604, + "step": 131 + }, + { + "epoch": 0.48, + "grad_norm": 1.284537459167204, + "learning_rate": 4.383231521551432e-06, + "loss": 0.2593, + "step": 132 + }, + { + "epoch": 0.49, + "grad_norm": 1.443338680183996, + "learning_rate": 4.373525859590313e-06, + "loss": 0.2561, + "step": 133 + }, + { + "epoch": 0.49, + "grad_norm": 1.2809230468289576, + "learning_rate": 4.3637553527522265e-06, + "loss": 0.2599, + "step": 134 + }, + { + "epoch": 0.5, + "grad_norm": 1.3669470609932883, + "learning_rate": 4.3539203392056114e-06, + "loss": 0.2587, + "step": 135 + }, + { + "epoch": 0.5, + "grad_norm": 1.4112940230474231, + "learning_rate": 4.3440211593515556e-06, + "loss": 0.2585, + "step": 136 + }, + { + "epoch": 0.5, + "eval_loss": 0.28355109691619873, + "eval_runtime": 1744.5175, + "eval_samples_per_second": 1.325, + "eval_steps_per_second": 0.074, + "step": 136 + }, + { + "epoch": 0.5, + "grad_norm": 1.3061396480876788, + "learning_rate": 4.33405815581202e-06, + "loss": 0.2549, + "step": 137 + }, + { + "epoch": 0.51, + "grad_norm": 1.46460991921356, + "learning_rate": 4.324031673417971e-06, + "loss": 0.2639, + "step": 138 + }, + { + "epoch": 0.51, + "grad_norm": 1.211168578821325, + "learning_rate": 4.313942059197457e-06, + "loss": 0.2581, + "step": 139 + }, + { + "epoch": 0.51, + "grad_norm": 1.4657150585182341, + "learning_rate": 4.303789662363587e-06, + "loss": 0.2616, + "step": 140 + }, + { + "epoch": 0.52, + "grad_norm": 1.4251800081691455, + "learning_rate": 4.29357483430245e-06, + "loss": 0.2668, + "step": 141 + }, + { + "epoch": 0.52, + "grad_norm": 1.3599666478045191, + "learning_rate": 4.283297928560951e-06, + "loss": 0.2598, + "step": 142 + }, + { + "epoch": 0.53, + "grad_norm": 1.6103346253156021, + "learning_rate": 4.272959300834574e-06, + "loss": 0.2656, + "step": 143 + }, + { + "epoch": 0.53, + "grad_norm": 1.2184694580930981, + "learning_rate": 4.262559308955072e-06, + "loss": 0.2546, + "step": 144 + }, + { + "epoch": 0.53, + "grad_norm": 1.3362006281948362, + "learning_rate": 4.252098312878083e-06, + "loss": 0.2557, + "step": 145 + }, + { + "epoch": 0.54, + "grad_norm": 1.3369296531115935, + "learning_rate": 4.241576674670668e-06, + "loss": 0.2568, + "step": 146 + }, + { + "epoch": 0.54, + "grad_norm": 1.4747872641188995, + "learning_rate": 4.230994758498783e-06, + "loss": 0.2564, + "step": 147 + }, + { + "epoch": 0.54, + "grad_norm": 1.60778480089848, + "learning_rate": 4.220352930614672e-06, + "loss": 0.2573, + "step": 148 + }, + { + "epoch": 0.55, + "grad_norm": 1.188044808018822, + "learning_rate": 4.209651559344195e-06, + "loss": 0.2525, + "step": 149 + }, + { + "epoch": 0.55, + "grad_norm": 1.5856639134844415, + "learning_rate": 4.198891015074074e-06, + "loss": 0.2647, + "step": 150 + }, + { + "epoch": 0.55, + "grad_norm": 1.2859262024596512, + "learning_rate": 4.1880716702390764e-06, + "loss": 0.2471, + "step": 151 + }, + { + "epoch": 0.56, + "grad_norm": 1.4653590828956073, + "learning_rate": 4.177193899309127e-06, + "loss": 0.2575, + "step": 152 + }, + { + "epoch": 0.56, + "grad_norm": 1.1821237121686685, + "learning_rate": 4.166258078776342e-06, + "loss": 0.2493, + "step": 153 + }, + { + "epoch": 0.57, + "grad_norm": 1.575597475848357, + "learning_rate": 4.155264587142002e-06, + "loss": 0.2537, + "step": 154 + }, + { + "epoch": 0.57, + "grad_norm": 1.2702085752651588, + "learning_rate": 4.144213804903449e-06, + "loss": 0.2493, + "step": 155 + }, + { + "epoch": 0.57, + "grad_norm": 1.5026735427361002, + "learning_rate": 4.133106114540923e-06, + "loss": 0.2505, + "step": 156 + }, + { + "epoch": 0.58, + "grad_norm": 1.5297903686100347, + "learning_rate": 4.121941900504316e-06, + "loss": 0.2472, + "step": 157 + }, + { + "epoch": 0.58, + "grad_norm": 1.25258373375573, + "learning_rate": 4.110721549199866e-06, + "loss": 0.2487, + "step": 158 + }, + { + "epoch": 0.58, + "grad_norm": 1.5941545034573665, + "learning_rate": 4.099445448976793e-06, + "loss": 0.2497, + "step": 159 + }, + { + "epoch": 0.59, + "grad_norm": 1.3096080921873048, + "learning_rate": 4.088113990113846e-06, + "loss": 0.2439, + "step": 160 + }, + { + "epoch": 0.59, + "grad_norm": 1.6950266606195492, + "learning_rate": 4.076727564805803e-06, + "loss": 0.2538, + "step": 161 + }, + { + "epoch": 0.6, + "grad_norm": 1.440485526817555, + "learning_rate": 4.065286567149891e-06, + "loss": 0.2613, + "step": 162 + }, + { + "epoch": 0.6, + "grad_norm": 1.606032223752871, + "learning_rate": 4.0537913931321495e-06, + "loss": 0.2505, + "step": 163 + }, + { + "epoch": 0.6, + "grad_norm": 1.5319951141665498, + "learning_rate": 4.042242440613724e-06, + "loss": 0.256, + "step": 164 + }, + { + "epoch": 0.61, + "grad_norm": 1.3468098768373629, + "learning_rate": 4.030640109317096e-06, + "loss": 0.2424, + "step": 165 + }, + { + "epoch": 0.61, + "grad_norm": 1.6652562481471478, + "learning_rate": 4.018984800812248e-06, + "loss": 0.2396, + "step": 166 + }, + { + "epoch": 0.61, + "grad_norm": 1.302975081280886, + "learning_rate": 4.007276918502763e-06, + "loss": 0.2462, + "step": 167 + }, + { + "epoch": 0.62, + "grad_norm": 1.623125313268604, + "learning_rate": 3.995516867611865e-06, + "loss": 0.256, + "step": 168 + }, + { + "epoch": 0.62, + "grad_norm": 1.3069782036585045, + "learning_rate": 3.983705055168391e-06, + "loss": 0.2518, + "step": 169 + }, + { + "epoch": 0.62, + "grad_norm": 1.6527449270834242, + "learning_rate": 3.971841889992706e-06, + "loss": 0.2544, + "step": 170 + }, + { + "epoch": 0.63, + "grad_norm": 1.3586948189643275, + "learning_rate": 3.959927782682551e-06, + "loss": 0.2491, + "step": 171 + }, + { + "epoch": 0.63, + "grad_norm": 1.3440233460948727, + "learning_rate": 3.947963145598833e-06, + "loss": 0.2516, + "step": 172 + }, + { + "epoch": 0.64, + "grad_norm": 1.3389168317613516, + "learning_rate": 3.935948392851354e-06, + "loss": 0.2541, + "step": 173 + }, + { + "epoch": 0.64, + "grad_norm": 1.3142664585396417, + "learning_rate": 3.923883940284472e-06, + "loss": 0.2508, + "step": 174 + }, + { + "epoch": 0.64, + "grad_norm": 1.2767521320981983, + "learning_rate": 3.911770205462717e-06, + "loss": 0.2479, + "step": 175 + }, + { + "epoch": 0.65, + "grad_norm": 1.3281972191838929, + "learning_rate": 3.899607607656334e-06, + "loss": 0.2501, + "step": 176 + }, + { + "epoch": 0.65, + "grad_norm": 1.3793116543581005, + "learning_rate": 3.887396567826769e-06, + "loss": 0.2454, + "step": 177 + }, + { + "epoch": 0.65, + "grad_norm": 1.3293987156576104, + "learning_rate": 3.875137508612104e-06, + "loss": 0.249, + "step": 178 + }, + { + "epoch": 0.66, + "grad_norm": 1.4957835845929142, + "learning_rate": 3.862830854312427e-06, + "loss": 0.2445, + "step": 179 + }, + { + "epoch": 0.66, + "grad_norm": 1.2804679875446887, + "learning_rate": 3.850477030875147e-06, + "loss": 0.2411, + "step": 180 + }, + { + "epoch": 0.66, + "grad_norm": 1.5611119218300138, + "learning_rate": 3.838076465880248e-06, + "loss": 0.237, + "step": 181 + }, + { + "epoch": 0.67, + "grad_norm": 1.3387338916825537, + "learning_rate": 3.825629588525498e-06, + "loss": 0.2429, + "step": 182 + }, + { + "epoch": 0.67, + "grad_norm": 1.5091720406707172, + "learning_rate": 3.813136829611583e-06, + "loss": 0.2428, + "step": 183 + }, + { + "epoch": 0.68, + "grad_norm": 1.359116281666385, + "learning_rate": 3.8005986215272056e-06, + "loss": 0.2543, + "step": 184 + }, + { + "epoch": 0.68, + "grad_norm": 1.4094254259139338, + "learning_rate": 3.7880153982341167e-06, + "loss": 0.2502, + "step": 185 + }, + { + "epoch": 0.68, + "grad_norm": 1.2806047483095333, + "learning_rate": 3.7753875952520943e-06, + "loss": 0.2431, + "step": 186 + }, + { + "epoch": 0.69, + "grad_norm": 1.409218880016104, + "learning_rate": 3.7627156496438686e-06, + "loss": 0.2463, + "step": 187 + }, + { + "epoch": 0.69, + "grad_norm": 1.2466244404207094, + "learning_rate": 3.7500000000000005e-06, + "loss": 0.2372, + "step": 188 + }, + { + "epoch": 0.69, + "grad_norm": 1.4192484726979884, + "learning_rate": 3.7372410864236954e-06, + "loss": 0.2396, + "step": 189 + }, + { + "epoch": 0.7, + "grad_norm": 1.3260879207799772, + "learning_rate": 3.7244393505155713e-06, + "loss": 0.241, + "step": 190 + }, + { + "epoch": 0.7, + "grad_norm": 1.6407257220698948, + "learning_rate": 3.7115952353583804e-06, + "loss": 0.2552, + "step": 191 + }, + { + "epoch": 0.71, + "grad_norm": 1.4113760059054485, + "learning_rate": 3.6987091855016667e-06, + "loss": 0.2513, + "step": 192 + }, + { + "epoch": 0.71, + "grad_norm": 1.3008883773347888, + "learning_rate": 3.6857816469463806e-06, + "loss": 0.2361, + "step": 193 + }, + { + "epoch": 0.71, + "grad_norm": 1.3040857591494066, + "learning_rate": 3.6728130671294485e-06, + "loss": 0.2491, + "step": 194 + }, + { + "epoch": 0.72, + "grad_norm": 1.2543618451342111, + "learning_rate": 3.6598038949082777e-06, + "loss": 0.2309, + "step": 195 + }, + { + "epoch": 0.72, + "grad_norm": 1.3944108707435374, + "learning_rate": 3.6467545805452266e-06, + "loss": 0.2426, + "step": 196 + }, + { + "epoch": 0.72, + "grad_norm": 1.301851485207592, + "learning_rate": 3.6336655756920198e-06, + "loss": 0.2421, + "step": 197 + }, + { + "epoch": 0.73, + "grad_norm": 1.3562155385998595, + "learning_rate": 3.620537333374114e-06, + "loss": 0.2406, + "step": 198 + }, + { + "epoch": 0.73, + "grad_norm": 1.4263666275672418, + "learning_rate": 3.6073703079750204e-06, + "loss": 0.2418, + "step": 199 + }, + { + "epoch": 0.73, + "grad_norm": 1.2767612877970262, + "learning_rate": 3.594164955220577e-06, + "loss": 0.2353, + "step": 200 + }, + { + "epoch": 0.74, + "grad_norm": 1.3349267171117716, + "learning_rate": 3.5809217321631745e-06, + "loss": 0.2348, + "step": 201 + }, + { + "epoch": 0.74, + "grad_norm": 1.2217693484408796, + "learning_rate": 3.5676410971659404e-06, + "loss": 0.2287, + "step": 202 + }, + { + "epoch": 0.75, + "grad_norm": 1.4554473054976789, + "learning_rate": 3.5543235098868702e-06, + "loss": 0.241, + "step": 203 + }, + { + "epoch": 0.75, + "grad_norm": 1.184805169962002, + "learning_rate": 3.5409694312629193e-06, + "loss": 0.2352, + "step": 204 + }, + { + "epoch": 0.75, + "eval_loss": 0.25444912910461426, + "eval_runtime": 1745.7708, + "eval_samples_per_second": 1.324, + "eval_steps_per_second": 0.074, + "step": 204 + }, + { + "epoch": 0.75, + "grad_norm": 1.2973792749867632, + "learning_rate": 3.527579323494055e-06, + "loss": 0.2404, + "step": 205 + }, + { + "epoch": 0.76, + "grad_norm": 1.390330195755624, + "learning_rate": 3.5141536500272494e-06, + "loss": 0.2397, + "step": 206 + }, + { + "epoch": 0.76, + "grad_norm": 1.2415077962351395, + "learning_rate": 3.5006928755404467e-06, + "loss": 0.2296, + "step": 207 + }, + { + "epoch": 0.76, + "grad_norm": 1.3223264932925407, + "learning_rate": 3.4871974659264786e-06, + "loss": 0.2332, + "step": 208 + }, + { + "epoch": 0.77, + "grad_norm": 1.4376836200586416, + "learning_rate": 3.473667888276935e-06, + "loss": 0.2361, + "step": 209 + }, + { + "epoch": 0.77, + "grad_norm": 1.2495709137167788, + "learning_rate": 3.4601046108660036e-06, + "loss": 0.2351, + "step": 210 + }, + { + "epoch": 0.78, + "grad_norm": 1.4449247677336339, + "learning_rate": 3.446508103134259e-06, + "loss": 0.2373, + "step": 211 + }, + { + "epoch": 0.78, + "grad_norm": 1.3961526866418432, + "learning_rate": 3.4328788356724135e-06, + "loss": 0.2383, + "step": 212 + }, + { + "epoch": 0.78, + "grad_norm": 1.2766356071702671, + "learning_rate": 3.419217280205032e-06, + "loss": 0.2348, + "step": 213 + }, + { + "epoch": 0.79, + "grad_norm": 1.2201985305952152, + "learning_rate": 3.4055239095742067e-06, + "loss": 0.236, + "step": 214 + }, + { + "epoch": 0.79, + "grad_norm": 1.3670381437866368, + "learning_rate": 3.3917991977231855e-06, + "loss": 0.228, + "step": 215 + }, + { + "epoch": 0.79, + "grad_norm": 1.2724648753569285, + "learning_rate": 3.378043619679974e-06, + "loss": 0.2386, + "step": 216 + }, + { + "epoch": 0.8, + "grad_norm": 1.2826844172302947, + "learning_rate": 3.364257651540891e-06, + "loss": 0.2366, + "step": 217 + }, + { + "epoch": 0.8, + "grad_norm": 1.1767059777022655, + "learning_rate": 3.3504417704540925e-06, + "loss": 0.2251, + "step": 218 + }, + { + "epoch": 0.8, + "grad_norm": 1.3111513963454882, + "learning_rate": 3.3365964546030544e-06, + "loss": 0.2396, + "step": 219 + }, + { + "epoch": 0.81, + "grad_norm": 1.2617225478707708, + "learning_rate": 3.322722183190025e-06, + "loss": 0.2412, + "step": 220 + }, + { + "epoch": 0.81, + "grad_norm": 1.2183220743609309, + "learning_rate": 3.308819436419437e-06, + "loss": 0.2276, + "step": 221 + }, + { + "epoch": 0.82, + "grad_norm": 1.31561824749082, + "learning_rate": 3.2948886954812877e-06, + "loss": 0.2404, + "step": 222 + }, + { + "epoch": 0.82, + "grad_norm": 1.250087552624437, + "learning_rate": 3.280930442534486e-06, + "loss": 0.2263, + "step": 223 + }, + { + "epoch": 0.82, + "grad_norm": 1.2524310598377044, + "learning_rate": 3.26694516069016e-06, + "loss": 0.2368, + "step": 224 + }, + { + "epoch": 0.83, + "grad_norm": 1.3487266981725987, + "learning_rate": 3.252933333994942e-06, + "loss": 0.2243, + "step": 225 + }, + { + "epoch": 0.83, + "grad_norm": 1.2427013509424278, + "learning_rate": 3.238895447414211e-06, + "loss": 0.2366, + "step": 226 + }, + { + "epoch": 0.83, + "grad_norm": 1.268723527146989, + "learning_rate": 3.2248319868153067e-06, + "loss": 0.2262, + "step": 227 + }, + { + "epoch": 0.84, + "grad_norm": 1.2476040692827028, + "learning_rate": 3.210743438950718e-06, + "loss": 0.234, + "step": 228 + }, + { + "epoch": 0.84, + "grad_norm": 1.2944243964732431, + "learning_rate": 3.196630291441231e-06, + "loss": 0.2261, + "step": 229 + }, + { + "epoch": 0.84, + "grad_norm": 1.2348938264581308, + "learning_rate": 3.182493032759053e-06, + "loss": 0.2368, + "step": 230 + }, + { + "epoch": 0.85, + "grad_norm": 1.3877133957904717, + "learning_rate": 3.168332152210909e-06, + "loss": 0.2342, + "step": 231 + }, + { + "epoch": 0.85, + "grad_norm": 1.2088837041711673, + "learning_rate": 3.154148139921102e-06, + "loss": 0.222, + "step": 232 + }, + { + "epoch": 0.86, + "grad_norm": 1.4750513048080165, + "learning_rate": 3.1399414868145506e-06, + "loss": 0.2301, + "step": 233 + }, + { + "epoch": 0.86, + "grad_norm": 1.2097458338635088, + "learning_rate": 3.1257126845998e-06, + "loss": 0.2365, + "step": 234 + }, + { + "epoch": 0.86, + "grad_norm": 1.3570468614316236, + "learning_rate": 3.1114622257520004e-06, + "loss": 0.2275, + "step": 235 + }, + { + "epoch": 0.87, + "grad_norm": 1.2331713108579336, + "learning_rate": 3.0971906034958616e-06, + "loss": 0.2193, + "step": 236 + }, + { + "epoch": 0.87, + "grad_norm": 1.330924002893457, + "learning_rate": 3.0828983117885856e-06, + "loss": 0.2258, + "step": 237 + }, + { + "epoch": 0.87, + "grad_norm": 1.2713775149937143, + "learning_rate": 3.0685858453027668e-06, + "loss": 0.2287, + "step": 238 + }, + { + "epoch": 0.88, + "grad_norm": 1.3460227514964078, + "learning_rate": 3.05425369940927e-06, + "loss": 0.2268, + "step": 239 + }, + { + "epoch": 0.88, + "grad_norm": 1.3124465221253792, + "learning_rate": 3.0399023701600903e-06, + "loss": 0.2237, + "step": 240 + }, + { + "epoch": 0.89, + "grad_norm": 1.2621420000416141, + "learning_rate": 3.0255323542711784e-06, + "loss": 0.221, + "step": 241 + }, + { + "epoch": 0.89, + "grad_norm": 1.3207975689997922, + "learning_rate": 3.011144149105251e-06, + "loss": 0.2177, + "step": 242 + }, + { + "epoch": 0.89, + "grad_norm": 1.3364690610440046, + "learning_rate": 2.996738252654577e-06, + "loss": 0.2266, + "step": 243 + }, + { + "epoch": 0.9, + "grad_norm": 1.3069082882086795, + "learning_rate": 2.9823151635237424e-06, + "loss": 0.2274, + "step": 244 + }, + { + "epoch": 0.9, + "grad_norm": 1.402608898892496, + "learning_rate": 2.9678753809123884e-06, + "loss": 0.233, + "step": 245 + }, + { + "epoch": 0.9, + "grad_norm": 1.3349783439901974, + "learning_rate": 2.9534194045979397e-06, + "loss": 0.2198, + "step": 246 + }, + { + "epoch": 0.91, + "grad_norm": 1.3319911413244738, + "learning_rate": 2.938947734918302e-06, + "loss": 0.2241, + "step": 247 + }, + { + "epoch": 0.91, + "grad_norm": 1.2836113523110935, + "learning_rate": 2.924460872754547e-06, + "loss": 0.2247, + "step": 248 + }, + { + "epoch": 0.91, + "grad_norm": 1.3420053396118825, + "learning_rate": 2.9099593195135743e-06, + "loss": 0.2245, + "step": 249 + }, + { + "epoch": 0.92, + "grad_norm": 1.3018957576647208, + "learning_rate": 2.8954435771107604e-06, + "loss": 0.2198, + "step": 250 + }, + { + "epoch": 0.92, + "grad_norm": 1.493108819116986, + "learning_rate": 2.8809141479525843e-06, + "loss": 0.2261, + "step": 251 + }, + { + "epoch": 0.93, + "grad_norm": 1.2240817395656585, + "learning_rate": 2.8663715349192388e-06, + "loss": 0.2182, + "step": 252 + }, + { + "epoch": 0.93, + "grad_norm": 1.3972966685231503, + "learning_rate": 2.8518162413472266e-06, + "loss": 0.2289, + "step": 253 + }, + { + "epoch": 0.93, + "grad_norm": 1.3158850314947335, + "learning_rate": 2.8372487710119374e-06, + "loss": 0.2286, + "step": 254 + }, + { + "epoch": 0.94, + "grad_norm": 1.295772538693981, + "learning_rate": 2.8226696281102134e-06, + "loss": 0.2157, + "step": 255 + }, + { + "epoch": 0.94, + "grad_norm": 1.34085577207588, + "learning_rate": 2.8080793172428965e-06, + "loss": 0.2223, + "step": 256 + }, + { + "epoch": 0.94, + "grad_norm": 1.3610764715193495, + "learning_rate": 2.7934783433973672e-06, + "loss": 0.2227, + "step": 257 + }, + { + "epoch": 0.95, + "grad_norm": 1.2629712566442401, + "learning_rate": 2.778867211930061e-06, + "loss": 0.2263, + "step": 258 + }, + { + "epoch": 0.95, + "grad_norm": 1.2782582856568219, + "learning_rate": 2.764246428548983e-06, + "loss": 0.2234, + "step": 259 + }, + { + "epoch": 0.96, + "grad_norm": 1.2621019245043847, + "learning_rate": 2.7496164992961995e-06, + "loss": 0.2177, + "step": 260 + }, + { + "epoch": 0.96, + "grad_norm": 1.2033350046761524, + "learning_rate": 2.7349779305303263e-06, + "loss": 0.2226, + "step": 261 + }, + { + "epoch": 0.96, + "grad_norm": 1.361220136423699, + "learning_rate": 2.720331228909005e-06, + "loss": 0.2179, + "step": 262 + }, + { + "epoch": 0.97, + "grad_norm": 1.3715434561254194, + "learning_rate": 2.7056769013713623e-06, + "loss": 0.2231, + "step": 263 + }, + { + "epoch": 0.97, + "grad_norm": 1.1330086039392537, + "learning_rate": 2.691015455120468e-06, + "loss": 0.2164, + "step": 264 + }, + { + "epoch": 0.97, + "grad_norm": 1.2694263709270768, + "learning_rate": 2.6763473976057776e-06, + "loss": 0.2127, + "step": 265 + }, + { + "epoch": 0.98, + "grad_norm": 1.3274231972419466, + "learning_rate": 2.6616732365055713e-06, + "loss": 0.2092, + "step": 266 + }, + { + "epoch": 0.98, + "grad_norm": 1.276485394682339, + "learning_rate": 2.64699347970938e-06, + "loss": 0.2206, + "step": 267 + }, + { + "epoch": 0.98, + "grad_norm": 1.33640777595863, + "learning_rate": 2.6323086353004077e-06, + "loss": 0.2201, + "step": 268 + }, + { + "epoch": 0.99, + "grad_norm": 1.2867150222472765, + "learning_rate": 2.6176192115379494e-06, + "loss": 0.2176, + "step": 269 + }, + { + "epoch": 0.99, + "grad_norm": 1.220258552427881, + "learning_rate": 2.602925716839795e-06, + "loss": 0.2131, + "step": 270 + }, + { + "epoch": 1.0, + "grad_norm": 1.3301323985426015, + "learning_rate": 2.588228659764632e-06, + "loss": 0.2244, + "step": 271 + }, + { + "epoch": 1.0, + "grad_norm": 1.2313785507924382, + "learning_rate": 2.573528548994449e-06, + "loss": 0.2192, + "step": 272 + }, + { + "epoch": 1.0, + "eval_loss": 0.22680288553237915, + "eval_runtime": 1744.6696, + "eval_samples_per_second": 1.325, + "eval_steps_per_second": 0.074, + "step": 272 + }, + { + "epoch": 1.0, + "grad_norm": 1.2609355191620695, + "learning_rate": 2.5588258933169248e-06, + "loss": 0.2179, + "step": 273 + }, + { + "epoch": 1.01, + "grad_norm": 1.3297110273345063, + "learning_rate": 2.544121201607822e-06, + "loss": 0.224, + "step": 274 + }, + { + "epoch": 1.01, + "grad_norm": 1.342809587498978, + "learning_rate": 2.529414982813371e-06, + "loss": 0.2184, + "step": 275 + }, + { + "epoch": 1.01, + "grad_norm": 1.1924689638641053, + "learning_rate": 2.5147077459326556e-06, + "loss": 0.2068, + "step": 276 + }, + { + "epoch": 1.0, + "grad_norm": 1.6157951810655014, + "learning_rate": 2.5e-06, + "loss": 0.1933, + "step": 277 + }, + { + "epoch": 1.01, + "grad_norm": 1.651876874652974, + "learning_rate": 2.485292254067345e-06, + "loss": 0.1689, + "step": 278 + }, + { + "epoch": 1.01, + "grad_norm": 1.5010520510421532, + "learning_rate": 2.47058501718663e-06, + "loss": 0.1654, + "step": 279 + }, + { + "epoch": 1.01, + "grad_norm": 1.8858303977250737, + "learning_rate": 2.455878798392179e-06, + "loss": 0.1655, + "step": 280 + }, + { + "epoch": 1.02, + "grad_norm": 1.456869066446747, + "learning_rate": 2.441174106683076e-06, + "loss": 0.1678, + "step": 281 + }, + { + "epoch": 1.02, + "grad_norm": 1.4462287949555628, + "learning_rate": 2.4264714510055517e-06, + "loss": 0.1665, + "step": 282 + }, + { + "epoch": 1.02, + "grad_norm": 1.5646273900304237, + "learning_rate": 2.411771340235369e-06, + "loss": 0.1658, + "step": 283 + }, + { + "epoch": 1.03, + "grad_norm": 1.488477859974886, + "learning_rate": 2.397074283160206e-06, + "loss": 0.1686, + "step": 284 + }, + { + "epoch": 1.03, + "grad_norm": 1.4574537402645513, + "learning_rate": 2.38238078846205e-06, + "loss": 0.1601, + "step": 285 + }, + { + "epoch": 1.03, + "grad_norm": 1.6434048093135507, + "learning_rate": 2.3676913646995923e-06, + "loss": 0.1582, + "step": 286 + }, + { + "epoch": 1.04, + "grad_norm": 1.6322883890612716, + "learning_rate": 2.353006520290621e-06, + "loss": 0.1623, + "step": 287 + }, + { + "epoch": 1.04, + "grad_norm": 1.4784654340551553, + "learning_rate": 2.338326763494429e-06, + "loss": 0.1628, + "step": 288 + }, + { + "epoch": 1.05, + "grad_norm": 1.3965236916476, + "learning_rate": 2.3236526023942224e-06, + "loss": 0.1622, + "step": 289 + }, + { + "epoch": 1.05, + "grad_norm": 1.507049801043257, + "learning_rate": 2.308984544879533e-06, + "loss": 0.1642, + "step": 290 + }, + { + "epoch": 1.05, + "grad_norm": 1.4166245505260515, + "learning_rate": 2.294323098628639e-06, + "loss": 0.1587, + "step": 291 + }, + { + "epoch": 1.06, + "grad_norm": 1.4067647816276172, + "learning_rate": 2.2796687710909966e-06, + "loss": 0.1626, + "step": 292 + }, + { + "epoch": 1.06, + "grad_norm": 1.3560693211555064, + "learning_rate": 2.265022069469675e-06, + "loss": 0.166, + "step": 293 + }, + { + "epoch": 1.06, + "grad_norm": 1.5995341036267872, + "learning_rate": 2.250383500703802e-06, + "loss": 0.1598, + "step": 294 + }, + { + "epoch": 1.07, + "grad_norm": 1.3503496270546655, + "learning_rate": 2.235753571451018e-06, + "loss": 0.1601, + "step": 295 + }, + { + "epoch": 1.07, + "grad_norm": 1.3891549915195316, + "learning_rate": 2.2211327880699392e-06, + "loss": 0.1661, + "step": 296 + }, + { + "epoch": 1.08, + "grad_norm": 1.360245129967798, + "learning_rate": 2.206521656602633e-06, + "loss": 0.164, + "step": 297 + }, + { + "epoch": 1.08, + "grad_norm": 1.299865787786878, + "learning_rate": 2.191920682757104e-06, + "loss": 0.161, + "step": 298 + }, + { + "epoch": 1.08, + "grad_norm": 1.3615875639579715, + "learning_rate": 2.1773303718897874e-06, + "loss": 0.1637, + "step": 299 + }, + { + "epoch": 1.09, + "grad_norm": 1.3028069612639404, + "learning_rate": 2.162751228988063e-06, + "loss": 0.1704, + "step": 300 + }, + { + "epoch": 1.09, + "grad_norm": 1.4541962542353601, + "learning_rate": 2.148183758652774e-06, + "loss": 0.1662, + "step": 301 + }, + { + "epoch": 1.09, + "grad_norm": 1.3897324274811969, + "learning_rate": 2.1336284650807616e-06, + "loss": 0.1652, + "step": 302 + }, + { + "epoch": 1.1, + "grad_norm": 1.423934057079935, + "learning_rate": 2.1190858520474166e-06, + "loss": 0.155, + "step": 303 + }, + { + "epoch": 1.1, + "grad_norm": 8.913932406854636, + "learning_rate": 2.1045564228892404e-06, + "loss": 0.1584, + "step": 304 + }, + { + "epoch": 1.1, + "grad_norm": 1.4488278716858565, + "learning_rate": 2.090040680486426e-06, + "loss": 0.1575, + "step": 305 + }, + { + "epoch": 1.11, + "grad_norm": 1.3322793715481207, + "learning_rate": 2.075539127245454e-06, + "loss": 0.1589, + "step": 306 + }, + { + "epoch": 1.11, + "grad_norm": 1.408746176643782, + "learning_rate": 2.0610522650816985e-06, + "loss": 0.1678, + "step": 307 + }, + { + "epoch": 1.12, + "grad_norm": 1.3733647706065368, + "learning_rate": 2.04658059540206e-06, + "loss": 0.1608, + "step": 308 + }, + { + "epoch": 1.12, + "grad_norm": 1.3509502932095994, + "learning_rate": 2.0321246190876116e-06, + "loss": 0.1629, + "step": 309 + }, + { + "epoch": 1.12, + "grad_norm": 1.5755500012182067, + "learning_rate": 2.017684836476258e-06, + "loss": 0.1591, + "step": 310 + }, + { + "epoch": 1.13, + "grad_norm": 1.3782160856950585, + "learning_rate": 2.0032617473454228e-06, + "loss": 0.1608, + "step": 311 + }, + { + "epoch": 1.13, + "grad_norm": 1.3725154258171277, + "learning_rate": 1.9888558508947496e-06, + "loss": 0.1602, + "step": 312 + }, + { + "epoch": 1.13, + "grad_norm": 1.4709716658016592, + "learning_rate": 1.9744676457288225e-06, + "loss": 0.1546, + "step": 313 + }, + { + "epoch": 1.14, + "grad_norm": 1.2823795003075644, + "learning_rate": 1.960097629839911e-06, + "loss": 0.1578, + "step": 314 + }, + { + "epoch": 1.14, + "grad_norm": 1.3699610089071859, + "learning_rate": 1.945746300590731e-06, + "loss": 0.162, + "step": 315 + }, + { + "epoch": 1.15, + "grad_norm": 1.3005171017134662, + "learning_rate": 1.9314141546972345e-06, + "loss": 0.1589, + "step": 316 + }, + { + "epoch": 1.15, + "grad_norm": 1.398353083209785, + "learning_rate": 1.9171016882114156e-06, + "loss": 0.1618, + "step": 317 + }, + { + "epoch": 1.15, + "grad_norm": 1.2844646362570225, + "learning_rate": 1.9028093965041394e-06, + "loss": 0.1578, + "step": 318 + }, + { + "epoch": 1.16, + "grad_norm": 1.3303581432805507, + "learning_rate": 1.8885377742480005e-06, + "loss": 0.1564, + "step": 319 + }, + { + "epoch": 1.16, + "grad_norm": 1.3220471151533941, + "learning_rate": 1.8742873154002007e-06, + "loss": 0.1622, + "step": 320 + }, + { + "epoch": 1.16, + "grad_norm": 1.368830484779579, + "learning_rate": 1.8600585131854502e-06, + "loss": 0.1647, + "step": 321 + }, + { + "epoch": 1.17, + "grad_norm": 1.3615900513424675, + "learning_rate": 1.8458518600788988e-06, + "loss": 0.1643, + "step": 322 + }, + { + "epoch": 1.17, + "grad_norm": 1.3362363005894682, + "learning_rate": 1.8316678477890914e-06, + "loss": 0.1578, + "step": 323 + }, + { + "epoch": 1.17, + "grad_norm": 1.3517462392489898, + "learning_rate": 1.8175069672409476e-06, + "loss": 0.1582, + "step": 324 + }, + { + "epoch": 1.18, + "grad_norm": 1.2669852402541302, + "learning_rate": 1.8033697085587698e-06, + "loss": 0.1683, + "step": 325 + }, + { + "epoch": 1.18, + "grad_norm": 1.2939043985754843, + "learning_rate": 1.789256561049283e-06, + "loss": 0.1623, + "step": 326 + }, + { + "epoch": 1.19, + "grad_norm": 1.2926084659921688, + "learning_rate": 1.7751680131846943e-06, + "loss": 0.1539, + "step": 327 + }, + { + "epoch": 1.19, + "grad_norm": 1.353797079547449, + "learning_rate": 1.7611045525857902e-06, + "loss": 0.1568, + "step": 328 + }, + { + "epoch": 1.19, + "grad_norm": 1.359447656597316, + "learning_rate": 1.7470666660050587e-06, + "loss": 0.1575, + "step": 329 + }, + { + "epoch": 1.2, + "grad_norm": 1.389993699299018, + "learning_rate": 1.7330548393098406e-06, + "loss": 0.1583, + "step": 330 + }, + { + "epoch": 1.2, + "grad_norm": 1.3663634701100151, + "learning_rate": 1.7190695574655147e-06, + "loss": 0.1664, + "step": 331 + }, + { + "epoch": 1.2, + "grad_norm": 1.2758470273344145, + "learning_rate": 1.7051113045187123e-06, + "loss": 0.1524, + "step": 332 + }, + { + "epoch": 1.21, + "grad_norm": 1.3092369590446187, + "learning_rate": 1.6911805635805633e-06, + "loss": 0.1589, + "step": 333 + }, + { + "epoch": 1.21, + "grad_norm": 1.3136599262285558, + "learning_rate": 1.677277816809975e-06, + "loss": 0.1612, + "step": 334 + }, + { + "epoch": 1.21, + "grad_norm": 1.326031926879103, + "learning_rate": 1.6634035453969458e-06, + "loss": 0.1618, + "step": 335 + }, + { + "epoch": 1.22, + "grad_norm": 1.4491107406555894, + "learning_rate": 1.6495582295459081e-06, + "loss": 0.1622, + "step": 336 + }, + { + "epoch": 1.22, + "grad_norm": 1.3131791827166341, + "learning_rate": 1.635742348459109e-06, + "loss": 0.1582, + "step": 337 + }, + { + "epoch": 1.23, + "grad_norm": 1.287364602164134, + "learning_rate": 1.6219563803200273e-06, + "loss": 0.1555, + "step": 338 + }, + { + "epoch": 1.23, + "grad_norm": 1.2888477607809152, + "learning_rate": 1.6082008022768153e-06, + "loss": 0.1548, + "step": 339 + }, + { + "epoch": 1.23, + "grad_norm": 1.2550597457172734, + "learning_rate": 1.5944760904257944e-06, + "loss": 0.1527, + "step": 340 + }, + { + "epoch": 1.23, + "eval_loss": 0.2143905907869339, + "eval_runtime": 1743.8656, + "eval_samples_per_second": 1.325, + "eval_steps_per_second": 0.074, + "step": 340 + }, + { + "epoch": 1.24, + "grad_norm": 1.3070014388815419, + "learning_rate": 1.5807827197949689e-06, + "loss": 0.165, + "step": 341 + }, + { + "epoch": 1.24, + "grad_norm": 1.3870115691039606, + "learning_rate": 1.5671211643275878e-06, + "loss": 0.1573, + "step": 342 + }, + { + "epoch": 1.24, + "grad_norm": 1.3222626065467171, + "learning_rate": 1.5534918968657423e-06, + "loss": 0.1576, + "step": 343 + }, + { + "epoch": 1.25, + "grad_norm": 1.3125309007704298, + "learning_rate": 1.5398953891339972e-06, + "loss": 0.1546, + "step": 344 + }, + { + "epoch": 1.25, + "grad_norm": 1.339807181653631, + "learning_rate": 1.5263321117230657e-06, + "loss": 0.1636, + "step": 345 + }, + { + "epoch": 1.26, + "grad_norm": 1.3747371643432478, + "learning_rate": 1.5128025340735223e-06, + "loss": 0.1602, + "step": 346 + }, + { + "epoch": 1.26, + "grad_norm": 1.3032518105225548, + "learning_rate": 1.4993071244595537e-06, + "loss": 0.1589, + "step": 347 + }, + { + "epoch": 1.26, + "grad_norm": 1.3506075750291142, + "learning_rate": 1.485846349972751e-06, + "loss": 0.1555, + "step": 348 + }, + { + "epoch": 1.27, + "grad_norm": 1.2563180389632984, + "learning_rate": 1.4724206765059456e-06, + "loss": 0.1505, + "step": 349 + }, + { + "epoch": 1.27, + "grad_norm": 1.269046692347184, + "learning_rate": 1.4590305687370811e-06, + "loss": 0.1555, + "step": 350 + }, + { + "epoch": 1.27, + "grad_norm": 1.2877604643227054, + "learning_rate": 1.445676490113131e-06, + "loss": 0.1533, + "step": 351 + }, + { + "epoch": 1.28, + "grad_norm": 1.2662579209503562, + "learning_rate": 1.4323589028340598e-06, + "loss": 0.1575, + "step": 352 + }, + { + "epoch": 1.28, + "grad_norm": 1.3429930753255879, + "learning_rate": 1.419078267836826e-06, + "loss": 0.1564, + "step": 353 + }, + { + "epoch": 1.28, + "grad_norm": 1.26706017644121, + "learning_rate": 1.4058350447794236e-06, + "loss": 0.156, + "step": 354 + }, + { + "epoch": 1.29, + "grad_norm": 1.257905613838671, + "learning_rate": 1.3926296920249796e-06, + "loss": 0.1537, + "step": 355 + }, + { + "epoch": 1.29, + "grad_norm": 1.2400776485949667, + "learning_rate": 1.3794626666258868e-06, + "loss": 0.1542, + "step": 356 + }, + { + "epoch": 1.3, + "grad_norm": 1.2702834346904048, + "learning_rate": 1.3663344243079806e-06, + "loss": 0.1497, + "step": 357 + }, + { + "epoch": 1.3, + "grad_norm": 1.2713710589635325, + "learning_rate": 1.3532454194547734e-06, + "loss": 0.1545, + "step": 358 + }, + { + "epoch": 1.3, + "grad_norm": 1.312880775402093, + "learning_rate": 1.340196105091723e-06, + "loss": 0.1587, + "step": 359 + }, + { + "epoch": 1.31, + "grad_norm": 1.277443114809593, + "learning_rate": 1.3271869328705517e-06, + "loss": 0.156, + "step": 360 + }, + { + "epoch": 1.31, + "grad_norm": 1.2782436668274384, + "learning_rate": 1.314218353053619e-06, + "loss": 0.1539, + "step": 361 + }, + { + "epoch": 1.31, + "grad_norm": 1.2624650220990674, + "learning_rate": 1.3012908144983352e-06, + "loss": 0.1508, + "step": 362 + }, + { + "epoch": 1.32, + "grad_norm": 1.2864430571271352, + "learning_rate": 1.2884047646416206e-06, + "loss": 0.149, + "step": 363 + }, + { + "epoch": 1.32, + "grad_norm": 1.2878378599372577, + "learning_rate": 1.2755606494844294e-06, + "loss": 0.1543, + "step": 364 + }, + { + "epoch": 1.33, + "grad_norm": 1.3273087656238196, + "learning_rate": 1.262758913576307e-06, + "loss": 0.1635, + "step": 365 + }, + { + "epoch": 1.33, + "grad_norm": 1.3063597147867572, + "learning_rate": 1.2500000000000007e-06, + "loss": 0.1572, + "step": 366 + }, + { + "epoch": 1.33, + "grad_norm": 1.3265916320932594, + "learning_rate": 1.2372843503561318e-06, + "loss": 0.1527, + "step": 367 + }, + { + "epoch": 1.34, + "grad_norm": 1.3468965129904442, + "learning_rate": 1.2246124047479074e-06, + "loss": 0.1614, + "step": 368 + }, + { + "epoch": 1.34, + "grad_norm": 1.2940794181817707, + "learning_rate": 1.211984601765884e-06, + "loss": 0.1515, + "step": 369 + }, + { + "epoch": 1.34, + "grad_norm": 1.2501130213094063, + "learning_rate": 1.1994013784727948e-06, + "loss": 0.1493, + "step": 370 + }, + { + "epoch": 1.35, + "grad_norm": 1.3053031029097866, + "learning_rate": 1.1868631703884184e-06, + "loss": 0.1506, + "step": 371 + }, + { + "epoch": 1.35, + "grad_norm": 1.3099092397845344, + "learning_rate": 1.174370411474503e-06, + "loss": 0.1529, + "step": 372 + }, + { + "epoch": 1.35, + "grad_norm": 1.2578167758256806, + "learning_rate": 1.161923534119752e-06, + "loss": 0.1509, + "step": 373 + }, + { + "epoch": 1.36, + "grad_norm": 1.2667497206763518, + "learning_rate": 1.1495229691248543e-06, + "loss": 0.1531, + "step": 374 + }, + { + "epoch": 1.36, + "grad_norm": 1.2377112087308577, + "learning_rate": 1.1371691456875736e-06, + "loss": 0.1496, + "step": 375 + }, + { + "epoch": 1.37, + "grad_norm": 1.2918429870359907, + "learning_rate": 1.1248624913878966e-06, + "loss": 0.1543, + "step": 376 + }, + { + "epoch": 1.37, + "grad_norm": 1.1975180926719193, + "learning_rate": 1.1126034321732325e-06, + "loss": 0.1469, + "step": 377 + }, + { + "epoch": 1.37, + "grad_norm": 1.252929398509232, + "learning_rate": 1.1003923923436671e-06, + "loss": 0.1486, + "step": 378 + }, + { + "epoch": 1.38, + "grad_norm": 1.3379674585704076, + "learning_rate": 1.088229794537283e-06, + "loss": 0.1526, + "step": 379 + }, + { + "epoch": 1.38, + "grad_norm": 1.2231138295761887, + "learning_rate": 1.0761160597155288e-06, + "loss": 0.1502, + "step": 380 + }, + { + "epoch": 1.38, + "grad_norm": 1.2544323260744303, + "learning_rate": 1.0640516071486467e-06, + "loss": 0.155, + "step": 381 + }, + { + "epoch": 1.39, + "grad_norm": 1.233432960445612, + "learning_rate": 1.0520368544011661e-06, + "loss": 0.1474, + "step": 382 + }, + { + "epoch": 1.39, + "grad_norm": 1.2440122786519763, + "learning_rate": 1.040072217317449e-06, + "loss": 0.1519, + "step": 383 + }, + { + "epoch": 1.39, + "grad_norm": 1.2480740637023224, + "learning_rate": 1.028158110007294e-06, + "loss": 0.1458, + "step": 384 + }, + { + "epoch": 1.4, + "grad_norm": 1.2432976861644531, + "learning_rate": 1.0162949448316089e-06, + "loss": 0.1479, + "step": 385 + }, + { + "epoch": 1.4, + "grad_norm": 1.2964192758509852, + "learning_rate": 1.0044831323881358e-06, + "loss": 0.1475, + "step": 386 + }, + { + "epoch": 1.41, + "grad_norm": 1.2757240206848581, + "learning_rate": 9.927230814972382e-07, + "loss": 0.1498, + "step": 387 + }, + { + "epoch": 1.41, + "grad_norm": 1.2901319142522056, + "learning_rate": 9.81015199187753e-07, + "loss": 0.1538, + "step": 388 + }, + { + "epoch": 1.41, + "grad_norm": 1.2630303590794087, + "learning_rate": 9.693598906829046e-07, + "loss": 0.1502, + "step": 389 + }, + { + "epoch": 1.42, + "grad_norm": 1.2729600112496837, + "learning_rate": 9.577575593862776e-07, + "loss": 0.1544, + "step": 390 + }, + { + "epoch": 1.42, + "grad_norm": 1.2770225702977753, + "learning_rate": 9.462086068678519e-07, + "loss": 0.151, + "step": 391 + }, + { + "epoch": 1.42, + "grad_norm": 1.220801959366837, + "learning_rate": 9.347134328501098e-07, + "loss": 0.1607, + "step": 392 + }, + { + "epoch": 1.43, + "grad_norm": 1.2290254840672323, + "learning_rate": 9.232724351941979e-07, + "loss": 0.159, + "step": 393 + }, + { + "epoch": 1.43, + "grad_norm": 1.322655155228603, + "learning_rate": 9.118860098861538e-07, + "loss": 0.1551, + "step": 394 + }, + { + "epoch": 1.44, + "grad_norm": 1.2186422910767156, + "learning_rate": 9.005545510232069e-07, + "loss": 0.1509, + "step": 395 + }, + { + "epoch": 1.44, + "grad_norm": 1.257647431526692, + "learning_rate": 8.892784508001343e-07, + "loss": 0.1468, + "step": 396 + }, + { + "epoch": 1.44, + "grad_norm": 1.2726496065869601, + "learning_rate": 8.78058099495685e-07, + "loss": 0.1543, + "step": 397 + }, + { + "epoch": 1.45, + "grad_norm": 1.2121038785428775, + "learning_rate": 8.668938854590764e-07, + "loss": 0.1499, + "step": 398 + }, + { + "epoch": 1.45, + "grad_norm": 1.29305260381573, + "learning_rate": 8.55786195096551e-07, + "loss": 0.1502, + "step": 399 + }, + { + "epoch": 1.45, + "grad_norm": 1.2602014430877129, + "learning_rate": 8.44735412857999e-07, + "loss": 0.151, + "step": 400 + }, + { + "epoch": 1.46, + "grad_norm": 1.236251101824342, + "learning_rate": 8.337419212236586e-07, + "loss": 0.1508, + "step": 401 + }, + { + "epoch": 1.46, + "grad_norm": 1.2539688041578283, + "learning_rate": 8.228061006908738e-07, + "loss": 0.1451, + "step": 402 + }, + { + "epoch": 1.46, + "grad_norm": 1.2111443760013914, + "learning_rate": 8.119283297609238e-07, + "loss": 0.1495, + "step": 403 + }, + { + "epoch": 1.47, + "grad_norm": 1.2738687910572695, + "learning_rate": 8.011089849259263e-07, + "loss": 0.1551, + "step": 404 + }, + { + "epoch": 1.47, + "grad_norm": 1.208326230509236, + "learning_rate": 7.903484406558055e-07, + "loss": 0.1494, + "step": 405 + }, + { + "epoch": 1.48, + "grad_norm": 1.268018040214494, + "learning_rate": 7.796470693853281e-07, + "loss": 0.1564, + "step": 406 + }, + { + "epoch": 1.48, + "grad_norm": 1.2362997453724565, + "learning_rate": 7.690052415012175e-07, + "loss": 0.1519, + "step": 407 + }, + { + "epoch": 1.48, + "grad_norm": 1.2071685150586688, + "learning_rate": 7.584233253293327e-07, + "loss": 0.1452, + "step": 408 + }, + { + "epoch": 1.48, + "eval_loss": 0.20317326486110687, + "eval_runtime": 1745.6342, + "eval_samples_per_second": 1.324, + "eval_steps_per_second": 0.074, + "step": 408 + }, + { + "epoch": 1.49, + "grad_norm": 1.346278884056406, + "learning_rate": 7.479016871219174e-07, + "loss": 0.1535, + "step": 409 + }, + { + "epoch": 1.49, + "grad_norm": 1.2480834034819637, + "learning_rate": 7.374406910449277e-07, + "loss": 0.1481, + "step": 410 + }, + { + "epoch": 1.49, + "grad_norm": 1.2888697856384739, + "learning_rate": 7.270406991654275e-07, + "loss": 0.1522, + "step": 411 + }, + { + "epoch": 1.5, + "grad_norm": 1.2193538871316398, + "learning_rate": 7.167020714390502e-07, + "loss": 0.1482, + "step": 412 + }, + { + "epoch": 1.5, + "grad_norm": 1.2479509388584915, + "learning_rate": 7.064251656975504e-07, + "loss": 0.1464, + "step": 413 + }, + { + "epoch": 1.51, + "grad_norm": 1.2474534794862786, + "learning_rate": 6.962103376364141e-07, + "loss": 0.1524, + "step": 414 + }, + { + "epoch": 1.51, + "grad_norm": 1.2250593567603887, + "learning_rate": 6.860579408025436e-07, + "loss": 0.1439, + "step": 415 + }, + { + "epoch": 1.51, + "grad_norm": 1.2677994690123586, + "learning_rate": 6.759683265820294e-07, + "loss": 0.1525, + "step": 416 + }, + { + "epoch": 1.52, + "grad_norm": 1.2583921961249045, + "learning_rate": 6.659418441879817e-07, + "loss": 0.1504, + "step": 417 + }, + { + "epoch": 1.52, + "grad_norm": 1.231037560593916, + "learning_rate": 6.559788406484446e-07, + "loss": 0.1473, + "step": 418 + }, + { + "epoch": 1.52, + "grad_norm": 1.3011695714953009, + "learning_rate": 6.46079660794389e-07, + "loss": 0.1539, + "step": 419 + }, + { + "epoch": 1.53, + "grad_norm": 1.2086771462916839, + "learning_rate": 6.36244647247774e-07, + "loss": 0.1467, + "step": 420 + }, + { + "epoch": 1.53, + "grad_norm": 1.2557794947188656, + "learning_rate": 6.264741404096875e-07, + "loss": 0.1432, + "step": 421 + }, + { + "epoch": 1.53, + "grad_norm": 1.235054043308426, + "learning_rate": 6.167684784485681e-07, + "loss": 0.153, + "step": 422 + }, + { + "epoch": 1.54, + "grad_norm": 1.2302380084740911, + "learning_rate": 6.071279972884997e-07, + "loss": 0.1432, + "step": 423 + }, + { + "epoch": 1.54, + "grad_norm": 1.2718618453856045, + "learning_rate": 5.975530305975808e-07, + "loss": 0.1457, + "step": 424 + }, + { + "epoch": 1.55, + "grad_norm": 1.2808499453387527, + "learning_rate": 5.880439097763821e-07, + "loss": 0.1513, + "step": 425 + }, + { + "epoch": 1.55, + "grad_norm": 1.238430737692288, + "learning_rate": 5.786009639464729e-07, + "loss": 0.1441, + "step": 426 + }, + { + "epoch": 1.55, + "grad_norm": 1.217824530917093, + "learning_rate": 5.692245199390281e-07, + "loss": 0.1496, + "step": 427 + }, + { + "epoch": 1.56, + "grad_norm": 1.313018954972511, + "learning_rate": 5.599149022835201e-07, + "loss": 0.1548, + "step": 428 + }, + { + "epoch": 1.56, + "grad_norm": 1.1743198424886485, + "learning_rate": 5.506724331964852e-07, + "loss": 0.1377, + "step": 429 + }, + { + "epoch": 1.56, + "grad_norm": 1.2024637841254677, + "learning_rate": 5.414974325703687e-07, + "loss": 0.1421, + "step": 430 + }, + { + "epoch": 1.57, + "grad_norm": 1.2168864150330052, + "learning_rate": 5.323902179624571e-07, + "loss": 0.1447, + "step": 431 + }, + { + "epoch": 1.57, + "grad_norm": 1.2575833758604005, + "learning_rate": 5.233511045838846e-07, + "loss": 0.1506, + "step": 432 + }, + { + "epoch": 1.57, + "grad_norm": 1.23638504568709, + "learning_rate": 5.143804052887228e-07, + "loss": 0.1435, + "step": 433 + }, + { + "epoch": 1.58, + "grad_norm": 1.200927376547426, + "learning_rate": 5.054784305631547e-07, + "loss": 0.148, + "step": 434 + }, + { + "epoch": 1.58, + "grad_norm": 1.212598226936373, + "learning_rate": 4.966454885147271e-07, + "loss": 0.1531, + "step": 435 + }, + { + "epoch": 1.59, + "grad_norm": 1.2391297277249578, + "learning_rate": 4.878818848616861e-07, + "loss": 0.1501, + "step": 436 + }, + { + "epoch": 1.59, + "grad_norm": 1.2347871913401802, + "learning_rate": 4.791879229223965e-07, + "loss": 0.1511, + "step": 437 + }, + { + "epoch": 1.59, + "grad_norm": 1.1946271149354053, + "learning_rate": 4.70563903604844e-07, + "loss": 0.1433, + "step": 438 + }, + { + "epoch": 1.6, + "grad_norm": 1.1786859198567885, + "learning_rate": 4.620101253962206e-07, + "loss": 0.1438, + "step": 439 + }, + { + "epoch": 1.6, + "grad_norm": 1.2164629314133888, + "learning_rate": 4.5352688435259084e-07, + "loss": 0.1465, + "step": 440 + }, + { + "epoch": 1.6, + "grad_norm": 1.1987369779528, + "learning_rate": 4.451144740886498e-07, + "loss": 0.1427, + "step": 441 + }, + { + "epoch": 1.61, + "grad_norm": 1.1745044456257632, + "learning_rate": 4.3677318576755693e-07, + "loss": 0.1452, + "step": 442 + }, + { + "epoch": 1.61, + "grad_norm": 1.2015030673130942, + "learning_rate": 4.285033080908588e-07, + "loss": 0.146, + "step": 443 + }, + { + "epoch": 1.62, + "grad_norm": 1.1926886810652166, + "learning_rate": 4.2030512728849946e-07, + "loss": 0.1466, + "step": 444 + }, + { + "epoch": 1.62, + "grad_norm": 1.2129835230208534, + "learning_rate": 4.1217892710891134e-07, + "loss": 0.1502, + "step": 445 + }, + { + "epoch": 1.62, + "grad_norm": 1.2812952282309764, + "learning_rate": 4.0412498880919417e-07, + "loss": 0.1495, + "step": 446 + }, + { + "epoch": 1.63, + "grad_norm": 1.2263165816713995, + "learning_rate": 3.9614359114538204e-07, + "loss": 0.137, + "step": 447 + }, + { + "epoch": 1.63, + "grad_norm": 1.2391702560018432, + "learning_rate": 3.882350103627952e-07, + "loss": 0.1478, + "step": 448 + }, + { + "epoch": 1.63, + "grad_norm": 1.2343948335632742, + "learning_rate": 3.803995201864763e-07, + "loss": 0.1416, + "step": 449 + }, + { + "epoch": 1.64, + "grad_norm": 1.2579508783896658, + "learning_rate": 3.726373918117196e-07, + "loss": 0.1479, + "step": 450 + }, + { + "epoch": 1.64, + "grad_norm": 1.2359295790578557, + "learning_rate": 3.649488938946844e-07, + "loss": 0.1465, + "step": 451 + }, + { + "epoch": 1.64, + "grad_norm": 1.2369737532237948, + "learning_rate": 3.5733429254309253e-07, + "loss": 0.1393, + "step": 452 + }, + { + "epoch": 1.65, + "grad_norm": 1.232300381116893, + "learning_rate": 3.497938513070234e-07, + "loss": 0.1466, + "step": 453 + }, + { + "epoch": 1.65, + "grad_norm": 1.2103992230159202, + "learning_rate": 3.4232783116978976e-07, + "loss": 0.1468, + "step": 454 + }, + { + "epoch": 1.66, + "grad_norm": 1.175611690299674, + "learning_rate": 3.3493649053890325e-07, + "loss": 0.1422, + "step": 455 + }, + { + "epoch": 1.66, + "grad_norm": 1.232453266775988, + "learning_rate": 3.276200852371339e-07, + "loss": 0.1501, + "step": 456 + }, + { + "epoch": 1.66, + "grad_norm": 1.1629870452205142, + "learning_rate": 3.203788684936535e-07, + "loss": 0.1409, + "step": 457 + }, + { + "epoch": 1.67, + "grad_norm": 1.1970563544760915, + "learning_rate": 3.13213090935271e-07, + "loss": 0.138, + "step": 458 + }, + { + "epoch": 1.67, + "grad_norm": 1.1900136131482124, + "learning_rate": 3.0612300057775934e-07, + "loss": 0.1499, + "step": 459 + }, + { + "epoch": 1.67, + "grad_norm": 1.217041014292307, + "learning_rate": 2.9910884281727225e-07, + "loss": 0.1484, + "step": 460 + }, + { + "epoch": 1.68, + "grad_norm": 1.2302492319561962, + "learning_rate": 2.921708604218454e-07, + "loss": 0.1413, + "step": 461 + }, + { + "epoch": 1.68, + "grad_norm": 1.2328210089050602, + "learning_rate": 2.853092935230009e-07, + "loss": 0.148, + "step": 462 + }, + { + "epoch": 1.69, + "grad_norm": 1.151860914936698, + "learning_rate": 2.785243796074333e-07, + "loss": 0.1363, + "step": 463 + }, + { + "epoch": 1.69, + "grad_norm": 1.2454166718871302, + "learning_rate": 2.7181635350878645e-07, + "loss": 0.1476, + "step": 464 + }, + { + "epoch": 1.69, + "grad_norm": 1.1704259555104832, + "learning_rate": 2.651854473995319e-07, + "loss": 0.1438, + "step": 465 + }, + { + "epoch": 1.7, + "grad_norm": 1.2056185431547946, + "learning_rate": 2.5863189078292913e-07, + "loss": 0.1491, + "step": 466 + }, + { + "epoch": 1.7, + "grad_norm": 1.2058871996577587, + "learning_rate": 2.521559104850815e-07, + "loss": 0.1407, + "step": 467 + }, + { + "epoch": 1.7, + "grad_norm": 1.214594006375769, + "learning_rate": 2.4575773064708904e-07, + "loss": 0.1451, + "step": 468 + }, + { + "epoch": 1.71, + "grad_norm": 1.21306124146432, + "learning_rate": 2.3943757271728816e-07, + "loss": 0.147, + "step": 469 + }, + { + "epoch": 1.71, + "grad_norm": 1.2551011043687805, + "learning_rate": 2.331956554435863e-07, + "loss": 0.1502, + "step": 470 + }, + { + "epoch": 1.71, + "grad_norm": 1.1678659805171299, + "learning_rate": 2.2703219486589434e-07, + "loss": 0.1448, + "step": 471 + }, + { + "epoch": 1.72, + "grad_norm": 1.195393936649886, + "learning_rate": 2.2094740430864569e-07, + "loss": 0.1454, + "step": 472 + }, + { + "epoch": 1.72, + "grad_norm": 1.1601308911930628, + "learning_rate": 2.1494149437341377e-07, + "loss": 0.1415, + "step": 473 + }, + { + "epoch": 1.73, + "grad_norm": 1.2073272533882111, + "learning_rate": 2.0901467293162448e-07, + "loss": 0.1462, + "step": 474 + }, + { + "epoch": 1.73, + "grad_norm": 1.2058672701679227, + "learning_rate": 2.0316714511736002e-07, + "loss": 0.1434, + "step": 475 + }, + { + "epoch": 1.73, + "grad_norm": 1.2242817630659004, + "learning_rate": 1.9739911332025796e-07, + "loss": 0.144, + "step": 476 + }, + { + "epoch": 1.73, + "eval_loss": 0.1969931423664093, + "eval_runtime": 1744.8052, + "eval_samples_per_second": 1.325, + "eval_steps_per_second": 0.074, + "step": 476 + }, + { + "epoch": 1.74, + "grad_norm": 1.2178435451927976, + "learning_rate": 1.9171077717850955e-07, + "loss": 0.1481, + "step": 477 + }, + { + "epoch": 1.74, + "grad_norm": 1.1877313865288135, + "learning_rate": 1.861023335719475e-07, + "loss": 0.1423, + "step": 478 + }, + { + "epoch": 1.74, + "grad_norm": 1.1722658735456841, + "learning_rate": 1.805739766152309e-07, + "loss": 0.1451, + "step": 479 + }, + { + "epoch": 1.75, + "grad_norm": 1.2929779609593208, + "learning_rate": 1.7512589765112998e-07, + "loss": 0.1534, + "step": 480 + }, + { + "epoch": 1.75, + "grad_norm": 1.1696523903876856, + "learning_rate": 1.6975828524390116e-07, + "loss": 0.1424, + "step": 481 + }, + { + "epoch": 1.75, + "grad_norm": 1.2354949943062012, + "learning_rate": 1.6447132517276005e-07, + "loss": 0.1475, + "step": 482 + }, + { + "epoch": 1.76, + "grad_norm": 1.1918146343547897, + "learning_rate": 1.5926520042545385e-07, + "loss": 0.1497, + "step": 483 + }, + { + "epoch": 1.76, + "grad_norm": 1.1867136093995747, + "learning_rate": 1.5414009119192635e-07, + "loss": 0.1471, + "step": 484 + }, + { + "epoch": 1.77, + "grad_norm": 1.2129514214834805, + "learning_rate": 1.4909617485808077e-07, + "loss": 0.1491, + "step": 485 + }, + { + "epoch": 1.77, + "grad_norm": 1.1739798762450462, + "learning_rate": 1.441336259996412e-07, + "loss": 0.1457, + "step": 486 + }, + { + "epoch": 1.77, + "grad_norm": 1.1996141212437654, + "learning_rate": 1.392526163761107e-07, + "loss": 0.148, + "step": 487 + }, + { + "epoch": 1.78, + "grad_norm": 1.188713566292959, + "learning_rate": 1.3445331492482617e-07, + "loss": 0.1402, + "step": 488 + }, + { + "epoch": 1.78, + "grad_norm": 1.1632542773939891, + "learning_rate": 1.2973588775511026e-07, + "loss": 0.1442, + "step": 489 + }, + { + "epoch": 1.78, + "grad_norm": 1.1541487583998058, + "learning_rate": 1.2510049814252302e-07, + "loss": 0.1351, + "step": 490 + }, + { + "epoch": 1.79, + "grad_norm": 1.1856032676442905, + "learning_rate": 1.2054730652321127e-07, + "loss": 0.1441, + "step": 491 + }, + { + "epoch": 1.79, + "grad_norm": 1.2087592729180825, + "learning_rate": 1.1607647048835463e-07, + "loss": 0.1442, + "step": 492 + }, + { + "epoch": 1.8, + "grad_norm": 1.1754537896803356, + "learning_rate": 1.1168814477871132e-07, + "loss": 0.1488, + "step": 493 + }, + { + "epoch": 1.8, + "grad_norm": 1.1873953173260194, + "learning_rate": 1.0738248127926343e-07, + "loss": 0.1418, + "step": 494 + }, + { + "epoch": 1.8, + "grad_norm": 1.1873470171545304, + "learning_rate": 1.0315962901395804e-07, + "loss": 0.1435, + "step": 495 + }, + { + "epoch": 1.81, + "grad_norm": 1.2347280084616006, + "learning_rate": 9.901973414055188e-08, + "loss": 0.1468, + "step": 496 + }, + { + "epoch": 1.81, + "grad_norm": 1.1993545224998636, + "learning_rate": 9.496293994555067e-08, + "loss": 0.1441, + "step": 497 + }, + { + "epoch": 1.81, + "grad_norm": 1.1712070785768458, + "learning_rate": 9.098938683924974e-08, + "loss": 0.1415, + "step": 498 + }, + { + "epoch": 1.82, + "grad_norm": 1.1845736378772493, + "learning_rate": 8.709921235087598e-08, + "loss": 0.1408, + "step": 499 + }, + { + "epoch": 1.82, + "grad_norm": 1.1393418670546167, + "learning_rate": 8.329255112382666e-08, + "loss": 0.1405, + "step": 500 + }, + { + "epoch": 1.82, + "grad_norm": 1.1777279904642142, + "learning_rate": 7.956953491100872e-08, + "loss": 0.1438, + "step": 501 + }, + { + "epoch": 1.83, + "grad_norm": 1.1530863211839957, + "learning_rate": 7.593029257027956e-08, + "loss": 0.1409, + "step": 502 + }, + { + "epoch": 1.83, + "grad_norm": 1.1709920792646966, + "learning_rate": 7.23749500599874e-08, + "loss": 0.1421, + "step": 503 + }, + { + "epoch": 1.84, + "grad_norm": 1.1637406875875316, + "learning_rate": 6.890363043461051e-08, + "loss": 0.1438, + "step": 504 + }, + { + "epoch": 1.84, + "grad_norm": 1.1912276846910155, + "learning_rate": 6.551645384049898e-08, + "loss": 0.1421, + "step": 505 + }, + { + "epoch": 1.84, + "grad_norm": 1.1799385095168429, + "learning_rate": 6.221353751171666e-08, + "loss": 0.1481, + "step": 506 + }, + { + "epoch": 1.85, + "grad_norm": 1.179099784689955, + "learning_rate": 5.8994995765982166e-08, + "loss": 0.1405, + "step": 507 + }, + { + "epoch": 1.85, + "grad_norm": 1.1613286990849088, + "learning_rate": 5.5860940000714016e-08, + "loss": 0.1415, + "step": 508 + }, + { + "epoch": 1.85, + "grad_norm": 1.1703680815211537, + "learning_rate": 5.281147868917369e-08, + "loss": 0.1396, + "step": 509 + }, + { + "epoch": 1.86, + "grad_norm": 1.1546772402387957, + "learning_rate": 4.984671737671143e-08, + "loss": 0.1432, + "step": 510 + }, + { + "epoch": 1.86, + "grad_norm": 1.1889024572352718, + "learning_rate": 4.6966758677113865e-08, + "loss": 0.1481, + "step": 511 + }, + { + "epoch": 1.87, + "grad_norm": 1.184835084634113, + "learning_rate": 4.4171702269051874e-08, + "loss": 0.1392, + "step": 512 + }, + { + "epoch": 1.87, + "grad_norm": 1.1939670509454507, + "learning_rate": 4.146164489263055e-08, + "loss": 0.1423, + "step": 513 + }, + { + "epoch": 1.87, + "grad_norm": 1.152354164012095, + "learning_rate": 3.88366803460416e-08, + "loss": 0.144, + "step": 514 + }, + { + "epoch": 1.88, + "grad_norm": 1.198608847020095, + "learning_rate": 3.629689948231624e-08, + "loss": 0.1455, + "step": 515 + }, + { + "epoch": 1.88, + "grad_norm": 1.1541466216184588, + "learning_rate": 3.3842390206180186e-08, + "loss": 0.1433, + "step": 516 + }, + { + "epoch": 1.88, + "grad_norm": 1.1731774238817725, + "learning_rate": 3.147323747101222e-08, + "loss": 0.1425, + "step": 517 + }, + { + "epoch": 1.89, + "grad_norm": 1.183496832152116, + "learning_rate": 2.9189523275903743e-08, + "loss": 0.1383, + "step": 518 + }, + { + "epoch": 1.89, + "grad_norm": 1.15889113953881, + "learning_rate": 2.6991326662819674e-08, + "loss": 0.1408, + "step": 519 + }, + { + "epoch": 1.89, + "grad_norm": 1.146505237790243, + "learning_rate": 2.487872371386424e-08, + "loss": 0.1406, + "step": 520 + }, + { + "epoch": 1.9, + "grad_norm": 1.1645336235315122, + "learning_rate": 2.2851787548646143e-08, + "loss": 0.1432, + "step": 521 + }, + { + "epoch": 1.9, + "grad_norm": 1.1254425886768031, + "learning_rate": 2.0910588321748915e-08, + "loss": 0.1391, + "step": 522 + }, + { + "epoch": 1.91, + "grad_norm": 1.198578151082183, + "learning_rate": 1.9055193220302582e-08, + "loss": 0.1432, + "step": 523 + }, + { + "epoch": 1.91, + "grad_norm": 1.1756617221424746, + "learning_rate": 1.728566646165747e-08, + "loss": 0.1397, + "step": 524 + }, + { + "epoch": 1.91, + "grad_norm": 1.1694962671017113, + "learning_rate": 1.560206929116237e-08, + "loss": 0.1426, + "step": 525 + }, + { + "epoch": 1.92, + "grad_norm": 1.1590713418748508, + "learning_rate": 1.4004459980045127e-08, + "loss": 0.1367, + "step": 526 + }, + { + "epoch": 1.92, + "grad_norm": 1.2336300671854394, + "learning_rate": 1.2492893823394248e-08, + "loss": 0.1463, + "step": 527 + }, + { + "epoch": 1.92, + "grad_norm": 1.1697007816662468, + "learning_rate": 1.1067423138247103e-08, + "loss": 0.1483, + "step": 528 + }, + { + "epoch": 1.93, + "grad_norm": 1.1582767175877697, + "learning_rate": 9.728097261777202e-09, + "loss": 0.1395, + "step": 529 + }, + { + "epoch": 1.93, + "grad_norm": 1.1955747448497502, + "learning_rate": 8.47496254958835e-09, + "loss": 0.1472, + "step": 530 + }, + { + "epoch": 1.93, + "grad_norm": 1.18078514121399, + "learning_rate": 7.3080623741086935e-09, + "loss": 0.1424, + "step": 531 + }, + { + "epoch": 1.94, + "grad_norm": 1.1815591933113088, + "learning_rate": 6.2274371230905405e-09, + "loss": 0.142, + "step": 532 + }, + { + "epoch": 1.94, + "grad_norm": 1.2085213019586194, + "learning_rate": 5.233124198212036e-09, + "loss": 0.147, + "step": 533 + }, + { + "epoch": 1.95, + "grad_norm": 1.184479260760234, + "learning_rate": 4.325158013783193e-09, + "loss": 0.1443, + "step": 534 + }, + { + "epoch": 1.95, + "grad_norm": 1.1472257541841997, + "learning_rate": 3.503569995554068e-09, + "loss": 0.1395, + "step": 535 + }, + { + "epoch": 1.95, + "grad_norm": 1.134153422962893, + "learning_rate": 2.7683885796273014e-09, + "loss": 0.1438, + "step": 536 + }, + { + "epoch": 1.96, + "grad_norm": 1.1654823194655364, + "learning_rate": 2.1196392114744556e-09, + "loss": 0.1456, + "step": 537 + }, + { + "epoch": 1.96, + "grad_norm": 1.2267159569729837, + "learning_rate": 1.5573443450545012e-09, + "loss": 0.1496, + "step": 538 + }, + { + "epoch": 1.96, + "grad_norm": 1.1986955603206728, + "learning_rate": 1.0815234420369358e-09, + "loss": 0.1471, + "step": 539 + }, + { + "epoch": 1.97, + "grad_norm": 1.158924207587925, + "learning_rate": 6.921929711287134e-10, + "loss": 0.1417, + "step": 540 + }, + { + "epoch": 1.97, + "grad_norm": 1.2069843316534727, + "learning_rate": 3.8936640750358856e-10, + "loss": 0.1453, + "step": 541 + }, + { + "epoch": 1.98, + "grad_norm": 1.167188164531165, + "learning_rate": 1.7305423233554553e-10, + "loss": 0.1438, + "step": 542 + }, + { + "epoch": 1.98, + "grad_norm": 1.1760519543485166, + "learning_rate": 4.3263932437420665e-11, + "loss": 0.1481, + "step": 543 + }, + { + "epoch": 1.98, + "grad_norm": 1.1726856432077697, + "learning_rate": 0.0, + "loss": 0.1441, + "step": 544 + }, + { + "epoch": 1.98, + "eval_loss": 0.19557544589042664, + "eval_runtime": 1745.4847, + "eval_samples_per_second": 1.324, + "eval_steps_per_second": 0.074, + "step": 544 + } + ], + "logging_steps": 1, + "max_steps": 544, + "num_input_tokens_seen": 0, + "num_train_epochs": 2, + "save_steps": 272, + "total_flos": 512325844992000.0, + "train_batch_size": 2, + "trial_name": null, + "trial_params": null +} diff --git a/checkpoint-544/training_args.bin b/checkpoint-544/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..57d371d207333f3981c73912981eb12ae9766a94 --- /dev/null +++ b/checkpoint-544/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01a4c76e5fdc09ec01dc7e8ead7778553f5e617c35ba83b4354ef7a547fbf2ae +size 7352 diff --git a/checkpoint-544/zero_to_fp32.py b/checkpoint-544/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..49b846633d6eb1e836e34681e44033581f4edb7b --- /dev/null +++ b/checkpoint-544/zero_to_fp32.py @@ -0,0 +1,592 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: python zero_to_fp32.py . pytorch_model.bin + +import argparse +import torch +import glob +import math +import os +import re +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + + total_files = len(files) + state_dicts = [] + for f in files: + state_dict = torch.load(f, map_location=device) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + if zero_stage <= 2: + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + elif zero_stage == 3: + # if there is more than one param group, there will be multiple flattened tensors - one + # flattened tensor per group - for simplicity merge them into a single tensor + # + # XXX: could make the script more memory efficient for when there are multiple groups - it + # will require matching the sub-lists of param_shapes for each param group flattened tensor + + fp32_flat_groups = [ + torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) + ] + + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = fp32_flat_groups[0].numel() * world_size + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + for name, shape in param_shapes.items(): + + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # XXX: memory usage doubles here + state_dict[name] = torch.cat( + tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), + 0).narrow(0, 0, unpartitioned_numel).view(shape) + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + + Returns: + - pytorch ``state_dict`` + + Note: this approach may not work if your application doesn't have sufficient free CPU memory and + you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + """ + + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + print(f"Saving fp32 state dict to {output_file}") + torch.save(state_dict, output_file) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument( + "output_file", + type=str, + help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)