IvanHU commited on
Commit
b0b7ea8
·
verified ·
1 Parent(s): 895548e

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. config.json +72 -0
  2. configuration_minicpm.py +202 -0
  3. global_step29187/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  4. global_step29187/bf16_zero_pp_rank_10_mp_rank_00_optim_states.pt +3 -0
  5. global_step29187/bf16_zero_pp_rank_11_mp_rank_00_optim_states.pt +3 -0
  6. global_step29187/bf16_zero_pp_rank_12_mp_rank_00_optim_states.pt +3 -0
  7. global_step29187/bf16_zero_pp_rank_13_mp_rank_00_optim_states.pt +3 -0
  8. global_step29187/bf16_zero_pp_rank_14_mp_rank_00_optim_states.pt +3 -0
  9. global_step29187/bf16_zero_pp_rank_15_mp_rank_00_optim_states.pt +3 -0
  10. global_step29187/bf16_zero_pp_rank_16_mp_rank_00_optim_states.pt +3 -0
  11. global_step29187/bf16_zero_pp_rank_17_mp_rank_00_optim_states.pt +3 -0
  12. global_step29187/bf16_zero_pp_rank_18_mp_rank_00_optim_states.pt +3 -0
  13. global_step29187/bf16_zero_pp_rank_19_mp_rank_00_optim_states.pt +3 -0
  14. global_step29187/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
  15. global_step29187/bf16_zero_pp_rank_20_mp_rank_00_optim_states.pt +3 -0
  16. global_step29187/bf16_zero_pp_rank_21_mp_rank_00_optim_states.pt +3 -0
  17. global_step29187/bf16_zero_pp_rank_22_mp_rank_00_optim_states.pt +3 -0
  18. global_step29187/bf16_zero_pp_rank_23_mp_rank_00_optim_states.pt +3 -0
  19. global_step29187/bf16_zero_pp_rank_24_mp_rank_00_optim_states.pt +3 -0
  20. global_step29187/bf16_zero_pp_rank_25_mp_rank_00_optim_states.pt +3 -0
  21. global_step29187/bf16_zero_pp_rank_26_mp_rank_00_optim_states.pt +3 -0
  22. global_step29187/bf16_zero_pp_rank_27_mp_rank_00_optim_states.pt +3 -0
  23. global_step29187/bf16_zero_pp_rank_28_mp_rank_00_optim_states.pt +3 -0
  24. global_step29187/bf16_zero_pp_rank_29_mp_rank_00_optim_states.pt +3 -0
  25. global_step29187/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
  26. global_step29187/bf16_zero_pp_rank_30_mp_rank_00_optim_states.pt +3 -0
  27. global_step29187/bf16_zero_pp_rank_31_mp_rank_00_optim_states.pt +3 -0
  28. global_step29187/bf16_zero_pp_rank_32_mp_rank_00_optim_states.pt +3 -0
  29. global_step29187/bf16_zero_pp_rank_33_mp_rank_00_optim_states.pt +3 -0
  30. global_step29187/bf16_zero_pp_rank_34_mp_rank_00_optim_states.pt +3 -0
  31. global_step29187/bf16_zero_pp_rank_35_mp_rank_00_optim_states.pt +3 -0
  32. global_step29187/bf16_zero_pp_rank_36_mp_rank_00_optim_states.pt +3 -0
  33. global_step29187/bf16_zero_pp_rank_37_mp_rank_00_optim_states.pt +3 -0
  34. global_step29187/bf16_zero_pp_rank_38_mp_rank_00_optim_states.pt +3 -0
  35. global_step29187/bf16_zero_pp_rank_39_mp_rank_00_optim_states.pt +3 -0
  36. global_step29187/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
  37. global_step29187/bf16_zero_pp_rank_40_mp_rank_00_optim_states.pt +3 -0
  38. global_step29187/bf16_zero_pp_rank_41_mp_rank_00_optim_states.pt +3 -0
  39. global_step29187/bf16_zero_pp_rank_42_mp_rank_00_optim_states.pt +3 -0
  40. global_step29187/bf16_zero_pp_rank_43_mp_rank_00_optim_states.pt +3 -0
  41. global_step29187/bf16_zero_pp_rank_44_mp_rank_00_optim_states.pt +3 -0
  42. global_step29187/bf16_zero_pp_rank_45_mp_rank_00_optim_states.pt +3 -0
  43. global_step29187/bf16_zero_pp_rank_46_mp_rank_00_optim_states.pt +3 -0
  44. global_step29187/bf16_zero_pp_rank_47_mp_rank_00_optim_states.pt +3 -0
  45. global_step29187/bf16_zero_pp_rank_48_mp_rank_00_optim_states.pt +3 -0
  46. global_step29187/bf16_zero_pp_rank_49_mp_rank_00_optim_states.pt +3 -0
  47. global_step29187/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt +3 -0
  48. global_step29187/bf16_zero_pp_rank_50_mp_rank_00_optim_states.pt +3 -0
  49. global_step29187/bf16_zero_pp_rank_51_mp_rank_00_optim_states.pt +3 -0
  50. global_step29187/bf16_zero_pp_rank_52_mp_rank_00_optim_states.pt +3 -0
config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/fs/archive/share/yulan/data/aa_mini/output/minicpm-2B-final-stage19/checkpoint-184750",
3
+ "architectures": [
4
+ "MiniCPMForCausalLM"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_minicpm.MiniCPMConfig",
8
+ "AutoModel": "modeling_minicpm.MiniCPMModel",
9
+ "AutoModelForCausalLM": "modeling_minicpm.MiniCPMForCausalLM",
10
+ "AutoModelForSeq2SeqLM": "modeling_minicpm.MiniCPMForCausalLM",
11
+ "AutoModelForSequenceClassification": "modeling_minicpm.MiniCPMForSequenceClassification"
12
+ },
13
+ "attention_bias": true,
14
+ "attention_dropout": 0.0,
15
+ "bos_token_id": 1,
16
+ "dim_model_base": 1920,
17
+ "dim_model_base_attn": 64,
18
+ "dim_model_base_init": null,
19
+ "dim_model_base_lmh": 1,
20
+ "dim_model_base_logits": 1920.0,
21
+ "dim_model_base_lr": 256.0,
22
+ "down_proj_alpha": 0.03450327796711771,
23
+ "embed_tokens_alpha": 1,
24
+ "embedding_ln": false,
25
+ "embedding_rmsln": false,
26
+ "eos_token_id": 2,
27
+ "gate_up_proj_alpha": 0.3651483716701107,
28
+ "gradient_checkpointing_step": 11,
29
+ "hidden_act": "silu",
30
+ "hidden_size": 1920,
31
+ "hidden_states_shrink": 0.18708286933869706,
32
+ "init_scale_o": 1,
33
+ "initializer_range": 5e-05,
34
+ "input_layernorm_alpha": 1.0,
35
+ "intermediate_size": 4800,
36
+ "k_proj_alpha": 0.3651483716701107,
37
+ "layer_norm_eps": 1e-06,
38
+ "lm_head_alpha": 1.0,
39
+ "ln_scale": 1,
40
+ "max_position_embeddings": 4096,
41
+ "model_reproduce": "transformer",
42
+ "model_type": "minicpm",
43
+ "norm_alpha": 1.0,
44
+ "num_attention_heads": 30,
45
+ "num_epochs_trained_before_this_epoch": 18.0,
46
+ "num_hidden_layers": 56,
47
+ "num_key_value_heads": 6,
48
+ "num_steps_trained_before_this_epoch": 175608,
49
+ "o_proj_alpha": 0.03450327796711771,
50
+ "post_attention_layernorm_alpha": 1.0,
51
+ "q_proj_alpha": 0.3651483716701107,
52
+ "qk_layernorm": false,
53
+ "rms_norm_eps": 1e-06,
54
+ "rms_type": "llama",
55
+ "rope_scaling": null,
56
+ "rope_theta": 10000.0,
57
+ "scale_emb": 10.0,
58
+ "shrink_alpha": 1,
59
+ "sliding_window": null,
60
+ "tie_word_embeddings": false,
61
+ "torch_dtype": "bfloat16",
62
+ "transformers_version": "4.44.2",
63
+ "use_cache": true,
64
+ "use_emb_alpha": true,
65
+ "use_liger": true,
66
+ "use_norm_alpha": false,
67
+ "use_sliding_window": false,
68
+ "v_proj_alpha": 0.3651483716701107,
69
+ "vocab_size": 99000,
70
+ "wesar_weights": false,
71
+ "z_loss": 0.0001
72
+ }
configuration_minicpm.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ MiniCPM model configuration"""
21
+
22
+ from transformers.configuration_utils import PretrainedConfig
23
+ from transformers.utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ MINICPM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
29
+
30
+
31
+ class MiniCPMConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`MiniCPMModel`]. It is used to instantiate an MiniCPM
34
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
+ defaults will yield a similar configuration to that of the MiniCPM-7B.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+
41
+ Args:
42
+ vocab_size (`int`, *optional*, defaults to 32000):
43
+ Vocabulary size of the MiniCPM model. Defines the number of different tokens that can be represented by the
44
+ `inputs_ids` passed when calling [`MiniCPMModel`]
45
+ hidden_size (`int`, *optional*, defaults to 4096):
46
+ Dimension of the hidden representations.
47
+ intermediate_size (`int`, *optional*, defaults to 11008):
48
+ Dimension of the MLP representations.
49
+ num_hidden_layers (`int`, *optional*, defaults to 32):
50
+ Number of hidden layers in the Transformer decoder.
51
+ num_attention_heads (`int`, *optional*, defaults to 32):
52
+ Number of attention heads for each attention layer in the Transformer decoder.
53
+ num_key_value_heads (`int`, *optional*):
54
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
55
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
56
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
57
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
58
+ by meanpooling all the original heads within that group. For more details checkout [this
59
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
60
+ `num_attention_heads`.
61
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
62
+ The non-linear activation function (function or string) in the decoder.
63
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
64
+ The maximum sequence length that this model might ever be used with. MiniCPM 1 supports up to 2048 tokens,
65
+ MiniCPM 2 up to 4096, CodeMiniCPM up to 16384.
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
69
+ The epsilon used by the rms normalization layers.
70
+ use_cache (`bool`, *optional*, defaults to `True`):
71
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
72
+ relevant if `config.is_decoder=True`.
73
+ pad_token_id (`int`, *optional*):
74
+ Padding token id.
75
+ bos_token_id (`int`, *optional*, defaults to 1):
76
+ Beginning of stream token id.
77
+ eos_token_id (`int`, *optional*, defaults to 2):
78
+ End of stream token id.
79
+ pretraining_tp (`int`, *optional*, defaults to 1):
80
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
81
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
82
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
83
+ issue](https://github.com/pytorch/pytorch/issues/76232).
84
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
85
+ Whether to tie weight embeddings
86
+ rope_theta (`float`, *optional*, defaults to 10000.0):
87
+ The base period of the RoPE embeddings.
88
+ rope_scaling (`Dict`, *optional*):
89
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
90
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
91
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
92
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
93
+ these scaling strategies behave:
94
+ https://www.reddit.com/r/LocalMiniCPM/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
95
+ experimental feature, subject to breaking API changes in future versions.
96
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
97
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
98
+ attention_dropout (`float`, *optional*, defaults to 0.0):
99
+ The dropout ratio for the attention probabilities.
100
+
101
+ ```python
102
+ >>> from transformers import MiniCPMModel, MiniCPMConfig
103
+
104
+ >>> # Initializing a MiniCPM minicpm-7b style configuration
105
+ >>> configuration = MiniCPMConfig()
106
+
107
+ >>> # Initializing a model from the minicpm-7b style configuration
108
+ >>> model = MiniCPMModel(configuration)
109
+
110
+ >>> # Accessing the model configuration
111
+ >>> configuration = model.config
112
+ ```"""
113
+
114
+ model_type = "minicpm"
115
+ keys_to_ignore_at_inference = ["past_key_values"]
116
+
117
+ def __init__(
118
+ self,
119
+ vocab_size=32000,
120
+ hidden_size=4096,
121
+ intermediate_size=11008,
122
+ num_hidden_layers=32,
123
+ num_attention_heads=32,
124
+ num_key_value_heads=None,
125
+ hidden_act="silu",
126
+ max_position_embeddings=2048,
127
+ initializer_range=0.02,
128
+ rms_norm_eps=1e-6,
129
+ use_cache=True,
130
+ pad_token_id=None,
131
+ bos_token_id=1,
132
+ eos_token_id=2,
133
+ pretraining_tp=1,
134
+ tie_word_embeddings=True,
135
+ rope_theta=10000.0,
136
+ rope_scaling=None,
137
+ attention_bias=False,
138
+ attention_dropout=0.0,
139
+ scale_emb=1,
140
+ dim_model_base=1,
141
+ scale_depth=1,
142
+ **kwargs,
143
+ ):
144
+ self.vocab_size = vocab_size
145
+ self.max_position_embeddings = max_position_embeddings
146
+ self.hidden_size = hidden_size
147
+ self.intermediate_size = intermediate_size
148
+ self.num_hidden_layers = num_hidden_layers
149
+ self.num_attention_heads = num_attention_heads
150
+
151
+ # for backward compatibility
152
+ if num_key_value_heads is None:
153
+ num_key_value_heads = num_attention_heads
154
+
155
+ self.num_key_value_heads = num_key_value_heads
156
+ self.hidden_act = hidden_act
157
+ self.initializer_range = initializer_range
158
+ self.rms_norm_eps = rms_norm_eps
159
+ self.pretraining_tp = pretraining_tp
160
+ self.use_cache = use_cache
161
+ self.rope_theta = rope_theta
162
+ self.rope_scaling = rope_scaling
163
+ self._rope_scaling_validation()
164
+ self.attention_bias = attention_bias
165
+ self.attention_dropout = attention_dropout
166
+ self.scale_emb = scale_emb
167
+ self.dim_model_base = dim_model_base
168
+ self.scale_depth = scale_depth
169
+
170
+ super().__init__(
171
+ pad_token_id=pad_token_id,
172
+ bos_token_id=bos_token_id,
173
+ eos_token_id=eos_token_id,
174
+ tie_word_embeddings=tie_word_embeddings,
175
+ **kwargs,
176
+ )
177
+ try:
178
+ import flash_attn
179
+ self._attn_implementation = "flash_attention_2"
180
+ except:
181
+ pass
182
+
183
+ def _rope_scaling_validation(self):
184
+ """
185
+ Validate the `rope_scaling` configuration.
186
+ """
187
+ if self.rope_scaling is None:
188
+ return
189
+
190
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
191
+ raise ValueError(
192
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
193
+ f"got {self.rope_scaling}"
194
+ )
195
+ rope_scaling_type = self.rope_scaling.get("type", None)
196
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
197
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
198
+ raise ValueError(
199
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
200
+ )
201
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
202
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
global_step29187/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:281a83ac9253ef0b882ab1ae088ce5da00fbf8e2c7d0b2d5c48227a7630548af
3
+ size 478761714
global_step29187/bf16_zero_pp_rank_10_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bff1f35dd9627cc268f3717a11de1d9af22cbdb05ebb16a97882cb4bee9064c2
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_11_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfe3f54f323d9b98ec45beaa71f619d0c72b46aabb1c335dc3f5d59cf5fa5823
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_12_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46a880acd960869c6b38f085b4470d5377941eb232dd7f7a22077eb1a7560676
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_13_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e988699fbb0be6202c2620e17a3fca43be8c9a82cf882d664968a142f888226d
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_14_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:feee0bed0ed67f574cba0d90c265e773bc9d057fe872060b3c11abd945a1d283
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_15_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32137b1cd476099b5da47dfe7dd7ba772d35c134bf899443bc1920f8d7266ece
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_16_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:835ad89ce8e829c6e0d61a8c1d8a89d72783d8918d51a44bd95e002a9293ecf1
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_17_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56c09761d3db7dc8f10f49127a5b8e1e70fce1eb739a0dd4fc9d47ce2a1a588c
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_18_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72c870a40b86acde5e790220a6b7698ff20c5a5b51926df5ffcca661d7ba560d
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_19_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ceaaf2109a55709d7ee4d2555b23ec36e310b5b2fda6bc654f756ef767942e5a
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82314eae5c11a0194f9a5258f811b379b3cad6b92a3f872f0ec73f1ac3346786
3
+ size 478761650
global_step29187/bf16_zero_pp_rank_20_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0089ea08a1215d25d8f58f5fde73399ffc7f7cb033ef0610af9e5fd7e8873c49
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_21_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:775e3e2312762f3f69618a9d46833a1463c2cb166fab306a8cdf9002ffb40c57
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_22_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d526a89cd63029f1c83776abea2c1f4e65dbf4c5e6d197cd3de07ad85a2f5c2a
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_23_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63cd1e79b4ed2f27a50ba2cffc43de0181ecf63e03cfc6116e6fc8adb2ae56e9
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_24_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95a3cd5a7307cc0a627dacb485161479364ff34dbfd1d389c97d82bbe59bc598
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_25_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:374c89318effe6a0cd7196899943eb7cf97bf1fb0f036d86762b0f18e76da418
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_26_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2f0e714c1b86f5575b3f56d7d995fb51d36fe4dcec9c178a7b70eae040cd96e
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_27_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:494e8492c5a7233961e0586a76ef4ec6d74c1399ec8e21cd72c2db3d6ec3851c
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_28_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3b72bd017736c2c903e3552f3e2e81fb26456ff077012c4db3fb22e10fe2209
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_29_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:538afeb2a62a35e0ab6e9570e834d8f161545afe6734d14bcbc206cb5a857a68
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6682ef595565d189034d4cd5a256ac19fcb4616dadba08871d841c9324266e8
3
+ size 478761650
global_step29187/bf16_zero_pp_rank_30_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcea6aa67ac002b791c7a95c9acfdce753d46241920dd78f645a0da1e45377c0
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_31_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:699c4386e153f53a03c87357956658b7611ccb6bf36d55dc9e385b65f504755e
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_32_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c58d2b95d1aef2e1b7e0fe04e442f1c06b0767a1afdf260d3e5168be97ab488
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_33_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02bf4fc8d1b33330ab3ae1e8833f4c8c1f518ae7ab45709b112a0cc1f210dc19
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_34_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6888b8b24bf655daf11bf1df7ce6ff5049f37c9f2500e05cfda450d8961e6b32
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_35_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a71fda8a830a3c72ac76892e85dcda128d2d07c11c9dde4182e91e6ac15a955f
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_36_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03b928269a1ad1adcf7a851170b0426c31911fd8d121bf18dbaa318097a88334
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_37_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e7c28eacbaa112d9d05ae8143b24981a080fa8b1d8874e43057ed485f954ee5
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_38_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:089c77ade5c339df46a7ee880185e8f531260899cadba64b4ed9bb4e26943abb
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_39_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc965dd6a38d8f175503bd9cd0672e97cd83221e3b8f8a44c7288f0bbe879e2e
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa0c3055cb601ad85d610db08e9af810e971109a693d9576fdb9b1bd1478172f
3
+ size 478761650
global_step29187/bf16_zero_pp_rank_40_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d336858598b0a1209ff03dd61fd7e6598227daf1ff2139758032dcd0c03d451a
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_41_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5de2adf18432ebb75bf70cce170151af92d78b7862d2fd06e7a5dd7d86d50c4
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_42_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29d35ffcb243826d0f0c59a745d24a36ec36214af0ebd7ee501ace9dc0957a6c
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_43_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:948df9faa58e3c6d8efb0d5649ec5f795030061c2a08f4ad1d2ebb5eba45bb0c
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_44_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5f82723d9a23d19bff39682c51cf60e8161604dec7f370c6b3fb189c1168da8
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_45_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a437f053286f3c7e861c61b65534576d4a0a5d71a7b6a163a1de3a192c856404
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_46_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bac5d61f5fbf07e7e9affa02182acc5558bf5ae286069af4f19e21c4db9c3d6
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_47_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:540b0a9147c05fa9dcba81f868e594e5cd53dc7fa1cf0ae86dccf451c5a52012
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_48_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3112c739bd72ab77c4f1d65a437ed0312b2fa0b20577ab546d16b184270623c
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_49_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32e35aec2f65a070160d63609ab2bd9070e33dbe00723a2ce5bf5d40ec5ff8d1
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ff5fa7be77d817d9109dcc3f49996ab2a1075b669a87a0b238fcd33b8521005
3
+ size 478761650
global_step29187/bf16_zero_pp_rank_50_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cce9b61a3964e81f3978283a038b961995dbc0c1bc5fef887d2f358df5243013
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_51_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:689721478cc14cef9de7db4ec0a2c68d68d57b6ae7f24adc848b59eec6b50d26
3
+ size 478761666
global_step29187/bf16_zero_pp_rank_52_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43f6e485119521aeee0de41a39faa5b4f8fd0ed201756da367316268eb8b323c
3
+ size 478761666