intervitens commited on
Commit
77efb6d
·
verified ·
1 Parent(s): c604174

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. chat_template.jinja +70 -0
  3. config.json +46 -0
  4. configuration_glm4_shared_moe.py +236 -0
  5. generation_config.json +8 -0
  6. model-00001-of-00179.safetensors +3 -0
  7. model-00002-of-00179-new.safetensors +3 -0
  8. model-00002-of-00179-old.safetensors +3 -0
  9. model-00002-of-00179.safetensors +3 -0
  10. model-00003-of-00179.safetensors +3 -0
  11. model-00004-of-00179.safetensors +3 -0
  12. model-00005-of-00179.safetensors +3 -0
  13. model-00006-of-00179.safetensors +3 -0
  14. model-00007-of-00179.safetensors +3 -0
  15. model-00008-of-00179.safetensors +3 -0
  16. model-00009-of-00179.safetensors +3 -0
  17. model-00010-of-00179.safetensors +3 -0
  18. model-00011-of-00179.safetensors +3 -0
  19. model-00012-of-00179.safetensors +3 -0
  20. model-00013-of-00179.safetensors +3 -0
  21. model-00014-of-00179.safetensors +3 -0
  22. model-00015-of-00179.safetensors +3 -0
  23. model-00016-of-00179.safetensors +3 -0
  24. model-00017-of-00179.safetensors +3 -0
  25. model-00018-of-00179.safetensors +3 -0
  26. model-00019-of-00179.safetensors +3 -0
  27. model-00020-of-00179.safetensors +3 -0
  28. model-00021-of-00179.safetensors +3 -0
  29. model-00022-of-00179.safetensors +3 -0
  30. model-00023-of-00179.safetensors +3 -0
  31. model-00024-of-00179.safetensors +3 -0
  32. model-00025-of-00179.safetensors +3 -0
  33. model-00026-of-00179.safetensors +3 -0
  34. model-00027-of-00179.safetensors +3 -0
  35. model-00028-of-00179.safetensors +3 -0
  36. model-00029-of-00179.safetensors +3 -0
  37. model-00030-of-00179.safetensors +3 -0
  38. model-00031-of-00179.safetensors +3 -0
  39. model-00032-of-00179.safetensors +3 -0
  40. model-00033-of-00179.safetensors +3 -0
  41. model-00034-of-00179.safetensors +3 -0
  42. model-00035-of-00179.safetensors +3 -0
  43. model-00036-of-00179.safetensors +3 -0
  44. model-00037-of-00179.safetensors +3 -0
  45. model-00038-of-00179.safetensors +3 -0
  46. model-00039-of-00179.safetensors +3 -0
  47. model-00040-of-00179.safetensors +3 -0
  48. model-00041-of-00179.safetensors +3 -0
  49. model-00042-of-00179.safetensors +3 -0
  50. model-00043-of-00179.safetensors +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
chat_template.jinja ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [gMASK]
2
+ {%- if tools %}
3
+ {{- '<|im_start|>system\n' }}
4
+ {%- if messages[0].role == 'system' %}
5
+ {{- messages[0].content + '\n\n' }}
6
+ {%- endif %}
7
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
8
+ {%- for tool in tools %}
9
+ {{- "\n" }}
10
+ {{- tool | tojson }}
11
+ {%- endfor %}
12
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
13
+ {%- else %}
14
+ {%- if messages[0].role == 'system' %}
15
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
16
+ {%- endif %}
17
+ {%- endif %}
18
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
19
+ {%- for message in messages[::-1] %}
20
+ {%- set index = (messages|length - 1) - loop.index0 %}
21
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
22
+ {%- set ns.multi_step_tool = false %}
23
+ {%- set ns.last_query_index = index %}
24
+ {%- endif %}
25
+ {%- endfor %}
26
+ {%- for message in messages %}
27
+ {%- if message.content is string %}
28
+ {%- set content = message.content %}
29
+ {%- else %}
30
+ {%- set content = '' %}
31
+ {%- endif %}
32
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
33
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
34
+ {%- elif message.role == "assistant" %}
35
+ {{- '<|im_start|>' + message.role + '\n' + content }}
36
+ {%- if message.tool_calls %}
37
+ {%- for tool_call in message.tool_calls %}
38
+ {%- if (loop.first and content) or (not loop.first) %}
39
+ {{- '\n' }}
40
+ {%- endif %}
41
+ {%- if tool_call.function %}
42
+ {%- set tool_call = tool_call.function %}
43
+ {%- endif %}
44
+ {{- '<tool_call>\n{"name": "' }}
45
+ {{- tool_call.name }}
46
+ {{- '", "arguments": ' }}
47
+ {%- if tool_call.arguments is string %}
48
+ {{- tool_call.arguments }}
49
+ {%- else %}
50
+ {{- tool_call.arguments | tojson }}
51
+ {%- endif %}
52
+ {{- '}\n</tool_call>' }}
53
+ {%- endfor %}
54
+ {%- endif %}
55
+ {{- '<|im_end|>\n' }}
56
+ {%- elif message.role == "tool" %}
57
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
58
+ {{- '<|im_start|>user' }}
59
+ {%- endif %}
60
+ {{- '\n<tool_response>\n' }}
61
+ {{- content }}
62
+ {{- '\n</tool_response>' }}
63
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
64
+ {{- '<|im_end|>\n' }}
65
+ {%- endif %}
66
+ {%- endif %}
67
+ {%- endfor %}
68
+ {%- if add_generation_prompt %}
69
+ {{- '<|im_start|>assistant\n' }}
70
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Glm4SharedMoeForCausalLM"
4
+ ],
5
+ "attention_bias": true,
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_glm4_shared_moe.Glm4SharedMoeConfig",
9
+ "AutoModel": "modeling_glm4_shared_moe.Glm4SharedMoeModel",
10
+ "AutoModelForCausalLM": "modeling_glm4_shared_moe.Glm4SharedMoeForCausalLM"
11
+ },
12
+ "dtype": "bfloat16",
13
+ "eos_token_id": [
14
+ 151366
15
+ ],
16
+ "first_k_dense_replace": 3,
17
+ "head_dim": 128,
18
+ "hidden_act": "silu",
19
+ "hidden_size": 5120,
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 12288,
22
+ "max_position_embeddings": 131072,
23
+ "model_type": "glm4_shared_moe",
24
+ "moe_intermediate_size": 1536,
25
+ "n_group": 1,
26
+ "n_routed_experts": 160,
27
+ "n_shared_experts": 1,
28
+ "norm_topk_prob": true,
29
+ "num_attention_heads": 96,
30
+ "num_experts_per_tok": 8,
31
+ "num_hidden_layers": 92,
32
+ "num_key_value_heads": 8,
33
+ "num_nextn_predict_layers": 1,
34
+ "pad_token_id": 151329,
35
+ "partial_rotary_factor": 0.5,
36
+ "rms_norm_eps": 1e-05,
37
+ "rope_scaling": null,
38
+ "rope_theta": 1000000,
39
+ "routed_scaling_factor": 2.5,
40
+ "tie_word_embeddings": false,
41
+ "topk_group": 1,
42
+ "transformers_version": "4.56.1",
43
+ "use_cache": true,
44
+ "use_qk_norm": true,
45
+ "vocab_size": 151552
46
+ }
configuration_glm4_shared_moe.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 The ZhipuAI Inc. team and HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from transformers.configuration_utils import PretrainedConfig
17
+ from transformers.modeling_rope_utils import rope_config_validation
18
+
19
+
20
+ class Glm4SharedMoeConfig(PretrainedConfig):
21
+ r"""
22
+ This is the configuration class to store the configuration of a [`Glm4MoeModel`]. It is used to instantiate a
23
+ Glm4Moe model according to the specified arguments, defining the model architecture. Instantiating a configuration
24
+ with the defaults will yield a similar configuration to that of [THUDM/GLM-4-100B-A10B](https://huggingface.co/THUDM/GLM-4-100B-A10B).
25
+
26
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
27
+ documentation from [`PretrainedConfig`] for more information.
28
+
29
+
30
+ Args:
31
+ vocab_size (`int`, *optional*, defaults to 151552):
32
+ Vocabulary size of the Glm4Moe model. Defines the number of different tokens that can be represented by the
33
+ `inputs_ids` passed when calling [`Glm4MoeModel`]
34
+ hidden_size (`int`, *optional*, defaults to 4096):
35
+ Dimension of the hidden representations.
36
+ intermediate_size (`int`, *optional*, defaults to 10944):
37
+ Dimension of the MLP representations.
38
+ num_hidden_layers (`int`, *optional*, defaults to 46):
39
+ Number of hidden layers in the Transformer encoder.
40
+ num_attention_heads (`int`, *optional*, defaults to 96):
41
+ Number of attention heads for each attention layer in the Transformer encoder.
42
+ partial_rotary_factor (`float`, *optional*, defaults to 0.5):
43
+ The factor of the partial rotary position.
44
+ num_key_value_heads (`int`, *optional*, defaults to 8):
45
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
46
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
47
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
48
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
49
+ by meanpooling all the original heads within that group. For more details, check out [this
50
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
51
+
52
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
53
+ The non-linear activation function (function or string) in the decoder.
54
+ max_position_embeddings (`int`, *optional*, defaults to 131072):
55
+ The maximum sequence length that this model might ever be used with.
56
+ initializer_range (`float`, *optional*, defaults to 0.02):
57
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
58
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
59
+ The epsilon used by the rms normalization layers.
60
+ use_cache (`bool`, *optional*, defaults to `True`):
61
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
62
+ relevant if `config.is_decoder=True`.
63
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
64
+ Whether the model's input and output word embeddings should be tied.
65
+ rope_theta (`float`, *optional*, defaults to 10000.0):
66
+ The base period of the RoPE embeddings.
67
+ rope_scaling (`Dict`, *optional*):
68
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
69
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
70
+ accordingly.
71
+ Expected contents:
72
+ `rope_type` (`str`):
73
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
74
+ 'llama3'], with 'default' being the original RoPE implementation.
75
+ `factor` (`float`, *optional*):
76
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
77
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
78
+ original maximum pre-trained length.
79
+ `original_max_position_embeddings` (`int`, *optional*):
80
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
81
+ pretraining.
82
+ `attention_factor` (`float`, *optional*):
83
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
84
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
85
+ `factor` field to infer the suggested value.
86
+ `beta_fast` (`float`, *optional*):
87
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
88
+ ramp function. If unspecified, it defaults to 32.
89
+ `beta_slow` (`float`, *optional*):
90
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
91
+ ramp function. If unspecified, it defaults to 1.
92
+ `short_factor` (`list[float]`, *optional*):
93
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
94
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
95
+ size divided by the number of attention heads divided by 2
96
+ `long_factor` (`list[float]`, *optional*):
97
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
98
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
99
+ size divided by the number of attention heads divided by 2
100
+ `low_freq_factor` (`float`, *optional*):
101
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
102
+ `high_freq_factor` (`float`, *optional*):
103
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
104
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
105
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
106
+ attention_dropout (`float`, *optional*, defaults to 0.0):
107
+ The dropout ratio for the attention probabilities.
108
+ moe_intermediate_size (`int`, *optional*, defaults to 1408):
109
+ Intermediate size of the routed expert.
110
+ num_experts_per_tok (`int`, *optional*, defaults to 8):
111
+ number of experts per token.
112
+ n_shared_experts (`int`, *optional*, defaults to 1):
113
+ Number of shared experts.
114
+ n_routed_experts (`int`, *optional*, defaults to 128):
115
+ Number of routed experts.
116
+ routed_scaling_factor (`float`, *optional*, defaults to 1.0):
117
+ Scaling factor or routed experts.
118
+ n_group (`int`, *optional*, defaults to 1):
119
+ Number of groups for routed experts.
120
+ topk_group (`int`, *optional*, defaults to 1):
121
+ Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
122
+ first_k_dense_replace (`int`, *optional*, defaults to 1):
123
+ Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
124
+ \--k dense layers--/
125
+ norm_topk_prob (`bool`, *optional*, defaults to `True`):
126
+ Whether to normalize the topk probabilities.
127
+ use_qk_norm (`bool`, *optional*, defaults to `False`):
128
+ Whether to use query-key normalization in the attention
129
+ ```python
130
+ >>> from transformers import Glm4MoeModel, Glm4MoeConfig
131
+
132
+ >>> # Initializing a Glm4Moe style configuration
133
+ >>> configuration = Glm4MoeConfig()
134
+
135
+ >>> # Initializing a model from the GLM-4-MOE-100B-A10B style configuration
136
+ >>> model = Glm4MoeModel(configuration)
137
+
138
+ >>> # Accessing the model configuration
139
+ >>> configuration = model.config
140
+ ```"""
141
+
142
+ model_type = "glm4_shared_moe"
143
+ keys_to_ignore_at_inference = ["past_key_values"]
144
+
145
+ # Default tensor parallel plan for base model `Glm4Moe`
146
+ base_model_tp_plan = {
147
+ "layers.*.self_attn.q_proj": "colwise",
148
+ "layers.*.self_attn.k_proj": "colwise",
149
+ "layers.*.self_attn.v_proj": "colwise",
150
+ "layers.*.self_attn.o_proj": "rowwise",
151
+ "layers.*.mlp.experts.*.gate_proj": "colwise",
152
+ "layers.*.mlp.experts.*.up_proj": "colwise",
153
+ "layers.*.mlp.experts.*.down_proj": "rowwise",
154
+ "layers.*.mlp.gate_proj": "colwise",
155
+ "layers.*.mlp.up_proj": "colwise",
156
+ "layers.*.mlp.down_proj": "rowwise",
157
+ }
158
+ base_model_pp_plan = {
159
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
160
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
161
+ "norm": (["hidden_states"], ["hidden_states"]),
162
+ }
163
+
164
+ def __init__(
165
+ self,
166
+ vocab_size=151552,
167
+ hidden_size=4096,
168
+ intermediate_size=10944,
169
+ num_hidden_layers=46,
170
+ num_attention_heads=96,
171
+ partial_rotary_factor=0.5,
172
+ num_key_value_heads=8,
173
+ hidden_act="silu",
174
+ max_position_embeddings=131072,
175
+ initializer_range=0.02,
176
+ rms_norm_eps=1e-5,
177
+ use_cache=True,
178
+ tie_word_embeddings=False,
179
+ rope_theta=10000.0,
180
+ rope_scaling=None,
181
+ attention_bias=False,
182
+ attention_dropout=0.0,
183
+ moe_intermediate_size=1408,
184
+ num_experts_per_tok=8,
185
+ n_shared_experts=1,
186
+ n_routed_experts=128,
187
+ routed_scaling_factor=1.0,
188
+ n_group=1,
189
+ topk_group=1,
190
+ first_k_dense_replace=1,
191
+ norm_topk_prob=True,
192
+ use_qk_norm=False,
193
+ **kwargs,
194
+ ):
195
+ self.vocab_size = vocab_size
196
+ self.max_position_embeddings = max_position_embeddings
197
+ self.hidden_size = hidden_size
198
+ self.intermediate_size = intermediate_size
199
+ self.num_hidden_layers = num_hidden_layers
200
+ self.num_attention_heads = num_attention_heads
201
+ self.partial_rotary_factor = partial_rotary_factor
202
+
203
+ self.num_key_value_heads = num_key_value_heads
204
+ self.hidden_act = hidden_act
205
+ self.initializer_range = initializer_range
206
+ self.rms_norm_eps = rms_norm_eps
207
+ self.use_cache = use_cache
208
+ self.rope_theta = rope_theta
209
+ self.rope_scaling = rope_scaling
210
+ self.attention_bias = attention_bias
211
+ self.attention_dropout = attention_dropout
212
+ # Validate the correctness of rotary position embeddings parameters
213
+ # BC: if there is a 'type' field, move it to 'rope_type'.
214
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
215
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
216
+ rope_config_validation(self)
217
+
218
+ # MoE arguments
219
+ self.moe_intermediate_size = moe_intermediate_size
220
+ self.num_experts_per_tok = num_experts_per_tok
221
+ self.n_group = n_group
222
+ self.topk_group = topk_group
223
+ self.n_shared_experts = n_shared_experts
224
+ self.n_routed_experts = n_routed_experts
225
+ self.routed_scaling_factor = routed_scaling_factor
226
+ self.first_k_dense_replace = first_k_dense_replace
227
+ self.norm_topk_prob = norm_topk_prob
228
+ self.use_qk_norm = use_qk_norm
229
+
230
+ super().__init__(
231
+ tie_word_embeddings=tie_word_embeddings,
232
+ **kwargs,
233
+ )
234
+
235
+
236
+ __all__ = ["Glm4SharedMoeConfig"]
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "eos_token_id": [
4
+ 151366
5
+ ],
6
+ "pad_token_id": 151329,
7
+ "transformers_version": "4.56.1"
8
+ }
model-00001-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79123a69917ffccee68aa1a970f806f06da209e1d5b4f970dc0bb66b901f095b
3
+ size 5033164960
model-00002-of-00179-new.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60778aba79c8a88a43209961dd8005348ee2eacd8095f0051e5c10ddb5a1d52a
3
+ size 3775057728
model-00002-of-00179-old.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d8359b168dd5e58949d8fab8afec44083406e7548b7ce0718275304059a139d
3
+ size 3775057728
model-00002-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60778aba79c8a88a43209961dd8005348ee2eacd8095f0051e5c10ddb5a1d52a
3
+ size 3775057728
model-00003-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74134635135a8354cb4b5fde05c4289b630b11d06aa8194fe61da4107cf9ed48
3
+ size 5033164960
model-00004-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53e65f3aa6f5e21bd78e6a97be4f4c2a052a7b0f345b4bd7c7dca79b59035797
3
+ size 2838088784
model-00005-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f223d5bddd4ffd4e350f4f72898daa5be8978f7ba5542509003ecd4995ff1cbc
3
+ size 5033164960
model-00006-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb228e8fc3e514b7ab873d83c42446fd6ca5802cb1e1c2cd989e889c48ae64c7
3
+ size 2838088784
model-00007-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1ab9932adb791f016bff6b93c8528f8f74996b52e8117cc92181dfb5b77d61f
3
+ size 5033164960
model-00008-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b28697156d43819307ee1d0fa02d779f2ffcf3c934f44f46b2759a282a9bf6a2
3
+ size 2838088784
model-00009-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a5b9b574f4e6349e1384199d3b1af8d7351408c4dcf462234c598da3557b4ed
3
+ size 5033164960
model-00010-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c04863d35b35ad8701adfde10a594218c6388374227f9a0fffc9d64295e8421a
3
+ size 2838088784
model-00011-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cada358281601b2a8dd353eecbbe4ae1b9e5c0567fb9f93641a7f401d5d4827
3
+ size 5033164960
model-00012-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d07f3e6d40c0c0a83eb9136496e9ed81cf8c7c5b4f847537d5fb8f6c31c6e7f
3
+ size 2838088784
model-00013-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c33e6433919c4fe1cfb646e4348824fc6b6c186c6e654a4db5a93d8312b79c10
3
+ size 5033164960
model-00014-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd6bdc77d90429a72bc3bb0901697288303d5547a95d5732f1ea0e668e6df32
3
+ size 2838088784
model-00015-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31c2c22d335c1e69cbc541105201d22253b9f9090b5f04fda4c4c989b6c6f117
3
+ size 5033164960
model-00016-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53c060b9471911f3090e9aa29f5e7f8f3f8b76b1ba62a4ddf8d2476628f6e0a0
3
+ size 2838088768
model-00017-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45732ef503e5d5fa9281c9ed188c7d4b0d233b93e9d29f7d12a4369d23c16368
3
+ size 5033164960
model-00018-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dcfa47f48486787a6095b9e47837b5847b6d82a216f7492f3ff66d6d599d7ca
3
+ size 2838088800
model-00019-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a8e42f6f1875402c286d97853ab3f56a17f90a757e94818e266ae66e0dd4023
3
+ size 5033164960
model-00020-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56b6f0fc39c2b33c03f24dea8c0634ec6b3aa0004a994c82da6cd1956a09ac77
3
+ size 2838088800
model-00021-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39940622f90bc8a9fac270bc35323166671287eef0da9b5aef838254397a9bef
3
+ size 5033164960
model-00022-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d72b815999fa325eb4128947f386ff15ca13c5d93463439d8c7899439da4e590
3
+ size 2838088800
model-00023-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef86f6bdbd45444b2048eafb5a11f5eb99a0532fde854ba87e4c831d730a536b
3
+ size 5033164960
model-00024-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cd21e7b8ecb47fcad5b2bf4488c7e5ebb49a633f1b3ba54a2ddc8bfe905638d
3
+ size 2838088800
model-00025-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95467ee13559bd01f13c549cc7d7df54fad1a8d821e0c772221fb9cdff3e7206
3
+ size 5033164960
model-00026-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69249e2a9d3b66ffed534186ec0c731efdca91b08c334b04b6a5418a9899e853
3
+ size 2838088800
model-00027-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a3ac1f7cbe726da00a26b0fa95f08c311cff820cf2dab70747c05c1feaa2089
3
+ size 5033164960
model-00028-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53f205d269d8e2d0127bea2abe87c6539eebf4d91e15c9d080cb1f9d5c155cd3
3
+ size 2838088800
model-00029-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8483710c95a36e8a6f16da9492bcd46fc3005e58a0fd138dc9b602fb4a10db7f
3
+ size 5033164960
model-00030-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60520a7607729bfef0e7e85564939a8bbbd3fcc6b2615a7e0d0da9749e714fed
3
+ size 2838088800
model-00031-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44d538e1845ffb15b706515aade1ef52b3dd62886d98f614f75658c8ba124aef
3
+ size 5033164960
model-00032-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73ad00956c3bdda229c76d342730c6f4c1b29fb797e7f31dc23d663790b4d6e2
3
+ size 2838088800
model-00033-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89e1516229fd5865daa475b497036d90f81a0bd44900d2cd966acd75a78e24f2
3
+ size 5033164960
model-00034-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c11ee29e0f6b5c24cbe93dd0719f56fa23786c5dc45c0516ee17ccf276fe3de
3
+ size 2838088800
model-00035-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87328f785b040caa7b7c79f53a678a7f0620d1b9082b7a38d064133238ec1606
3
+ size 5033164960
model-00036-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1674afc7afa0c344cdc968470b8e1f1d9555a9aa817e2a2ee6e78a193641aea
3
+ size 2838088800
model-00037-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:821a059310d43afb674191953d59e2e1d13d84eec728dde57811092ad8d44ffb
3
+ size 5033164960
model-00038-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:602e9f7ac5502f8ac9ea0c76fdbc7c87b41002ea1a792ddfa5da2de2b193deca
3
+ size 2838088800
model-00039-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c811f06d0f869499e01bdd758e492f8fd1e004f33fa88b5b6779b9c65ed937c4
3
+ size 5033164960
model-00040-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e098fff38872c1b865aad1caf20041b9f498ab09eac7a018e764445f3032ae61
3
+ size 2838088800
model-00041-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2796a01ff697af30fbd028f32dd695ba5eda2b340b74868d7aa5a741e65efc9b
3
+ size 5033164960
model-00042-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:778ca06925b5df03eafddca304769d82868c7655e1b9dc3ededc19ce7d371288
3
+ size 2838088800
model-00043-of-00179.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29943ccb5570849225a1386f72066dd3e4e3ea8a7bbf2890ef428dfc4f246f95
3
+ size 5033164960