cmeka commited on
Commit
58ec182
·
verified ·
1 Parent(s): b2ce5d4

Upload lcpp-seedvr.patch

Browse files

To use in replace of "lcpp.patch" here:
https://github.com/city96/ComfyUI-GGUF/tree/main/tools

Files changed (1) hide show
  1. lcpp-seedvr.patch +472 -0
lcpp-seedvr.patch ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ diff --git a/common/common.cpp b/common/common.cpp
2
+ index a8eebb68..db9498aa 100644
3
+ --- a/common/common.cpp
4
+ +++ b/common/common.cpp
5
+ @@ -13,6 +13,7 @@
6
+ #include <algorithm>
7
+ #include <cinttypes>
8
+ #include <climits>
9
+ +#include <chrono>
10
+ #include <cmath>
11
+ #include <codecvt>
12
+ #include <cstdarg>
13
+ diff --git a/common/log.cpp b/common/log.cpp
14
+ index 04c7c0ed..83e2a7d9 100644
15
+ --- a/common/log.cpp
16
+ +++ b/common/log.cpp
17
+ @@ -4,6 +4,7 @@
18
+ #include <cstdarg>
19
+ #include <cstdio>
20
+ #include <mutex>
21
+ +#include <chrono>
22
+ #include <sstream>
23
+ #include <thread>
24
+ #include <vector>
25
+ diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h
26
+ index de3c706f..0267c1fa 100644
27
+ --- a/ggml/include/ggml.h
28
+ +++ b/ggml/include/ggml.h
29
+ @@ -223,7 +223,7 @@
30
+ #define GGML_MAX_OP_PARAMS 64
31
+
32
+ #ifndef GGML_MAX_NAME
33
+ -# define GGML_MAX_NAME 64
34
+ +# define GGML_MAX_NAME 128
35
+ #endif
36
+
37
+ #define GGML_DEFAULT_N_THREADS 4
38
+ @@ -2449,6 +2449,7 @@ extern "C" {
39
+
40
+ // manage tensor info
41
+ GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor);
42
+ + GGML_API void gguf_set_tensor_ndim(struct gguf_context * ctx, const char * name, int n_dim);
43
+ GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type);
44
+ GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size);
45
+
46
+ diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
47
+ index b16c462f..6d1568f1 100644
48
+ --- a/ggml/src/ggml.c
49
+ +++ b/ggml/src/ggml.c
50
+ @@ -22960,6 +22960,14 @@ void gguf_add_tensor(
51
+ ctx->header.n_tensors++;
52
+ }
53
+
54
+ +void gguf_set_tensor_ndim(struct gguf_context * ctx, const char * name, const int n_dim) {
55
+ + const int idx = gguf_find_tensor(ctx, name);
56
+ + if (idx < 0) {
57
+ + GGML_ABORT("tensor not found");
58
+ + }
59
+ + ctx->infos[idx].n_dims = n_dim;
60
+ +}
61
+ +
62
+ void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) {
63
+ const int idx = gguf_find_tensor(ctx, name);
64
+ if (idx < 0) {
65
+ diff --git a/src/llama.cpp b/src/llama.cpp
66
+ index 24e1f1f0..bac36971 100644
67
+ --- a/src/llama.cpp
68
+ +++ b/src/llama.cpp
69
+ @@ -205,6 +205,17 @@ enum llm_arch {
70
+ LLM_ARCH_GRANITE,
71
+ LLM_ARCH_GRANITE_MOE,
72
+ LLM_ARCH_CHAMELEON,
73
+ + LLM_ARCH_FLUX,
74
+ + LLM_ARCH_SD1,
75
+ + LLM_ARCH_SDXL,
76
+ + LLM_ARCH_SD3,
77
+ + LLM_ARCH_AURA,
78
+ + LLM_ARCH_LTXV,
79
+ + LLM_ARCH_HYVID,
80
+ + LLM_ARCH_WAN,
81
+ + LLM_ARCH_HIDREAM,
82
+ + LLM_ARCH_COSMOS,
83
+ + LLM_ARCH_SEEDVR,
84
+ LLM_ARCH_UNKNOWN,
85
+ };
86
+
87
+ @@ -258,6 +269,17 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
88
+ { LLM_ARCH_GRANITE, "granite" },
89
+ { LLM_ARCH_GRANITE_MOE, "granitemoe" },
90
+ { LLM_ARCH_CHAMELEON, "chameleon" },
91
+ + { LLM_ARCH_FLUX, "flux" },
92
+ + { LLM_ARCH_SD1, "sd1" },
93
+ + { LLM_ARCH_SDXL, "sdxl" },
94
+ + { LLM_ARCH_SD3, "sd3" },
95
+ + { LLM_ARCH_AURA, "aura" },
96
+ + { LLM_ARCH_LTXV, "ltxv" },
97
+ + { LLM_ARCH_HYVID, "hyvid" },
98
+ + { LLM_ARCH_WAN, "wan" },
99
+ + { LLM_ARCH_HIDREAM, "hidream" },
100
+ + { LLM_ARCH_COSMOS, "cosmos" },
101
+ + { LLM_ARCH_SEEDVR, "seedvr" },
102
+ { LLM_ARCH_UNKNOWN, "(unknown)" },
103
+ };
104
+
105
+ @@ -1531,6 +1553,17 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
106
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
107
+ },
108
+ },
109
+ + { LLM_ARCH_FLUX, {}},
110
+ + { LLM_ARCH_SD1, {}},
111
+ + { LLM_ARCH_SDXL, {}},
112
+ + { LLM_ARCH_SD3, {}},
113
+ + { LLM_ARCH_AURA, {}},
114
+ + { LLM_ARCH_LTXV, {}},
115
+ + { LLM_ARCH_HYVID, {}},
116
+ + { LLM_ARCH_WAN, {}},
117
+ + { LLM_ARCH_HIDREAM, {}},
118
+ + { LLM_ARCH_COSMOS, {}},
119
+ + { LLM_ARCH_SEEDVR, {}},
120
+ {
121
+ LLM_ARCH_UNKNOWN,
122
+ {
123
+ @@ -5403,6 +5436,25 @@ static void llm_load_hparams(
124
+ // get general kv
125
+ ml.get_key(LLM_KV_GENERAL_NAME, model.name, false);
126
+
127
+ + // Disable LLM metadata for image models
128
+ + switch (model.arch) {
129
+ + case LLM_ARCH_FLUX:
130
+ + case LLM_ARCH_SD1:
131
+ + case LLM_ARCH_SDXL:
132
+ + case LLM_ARCH_SD3:
133
+ + case LLM_ARCH_AURA:
134
+ + case LLM_ARCH_LTXV:
135
+ + case LLM_ARCH_HYVID:
136
+ + case LLM_ARCH_WAN:
137
+ + case LLM_ARCH_HIDREAM:
138
+ + case LLM_ARCH_COSMOS:
139
+ + case LLM_ARCH_SEEDVR:
140
+ + model.ftype = ml.ftype;
141
+ + return;
142
+ + default:
143
+ + break;
144
+ + }
145
+ +
146
+ // get hparams kv
147
+ ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab);
148
+
149
+ @@ -18016,6 +18068,132 @@ static void llama_tensor_dequantize_internal(
150
+ workers.clear();
151
+ }
152
+
153
+ +static ggml_type img_tensor_get_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
154
+ + // Special function for quantizing image model tensors
155
+ + const std::string name = ggml_get_name(tensor);
156
+ + const llm_arch arch = qs.model.arch;
157
+ +
158
+ + // Sanity check
159
+ + if (
160
+ + (name.find("model.diffusion_model.") != std::string::npos) ||
161
+ + (name.find("first_stage_model.") != std::string::npos) ||
162
+ + (name.find("single_transformer_blocks.") != std::string::npos) ||
163
+ + (name.find("joint_transformer_blocks.") != std::string::npos)
164
+ + ) {
165
+ + throw std::runtime_error("Invalid input GGUF file. This is not a supported UNET model");
166
+ + }
167
+ +
168
+ + // Unsupported quant types - exclude all IQ quants for now
169
+ + if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS ||
170
+ + ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ||
171
+ + ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
172
+ + ftype == LLAMA_FTYPE_MOSTLY_IQ1_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL ||
173
+ + ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S ||
174
+ + ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_Q4_0_4_4 ||
175
+ + ftype == LLAMA_FTYPE_MOSTLY_Q4_0_4_8 || ftype == LLAMA_FTYPE_MOSTLY_Q4_0_8_8) {
176
+ + throw std::runtime_error("Invalid quantization type for image model (Not supported)");
177
+ + }
178
+ +
179
+ + if ( // Rules for to_v attention
180
+ + (name.find("attn_v.weight") != std::string::npos) ||
181
+ + (name.find(".to_v.weight") != std::string::npos) ||
182
+ + (name.find(".v.weight") != std::string::npos) ||
183
+ + (name.find(".attn.w1v.weight") != std::string::npos) ||
184
+ + (name.find(".attn.w2v.weight") != std::string::npos) ||
185
+ + (name.find("_attn.v_proj.weight") != std::string::npos)
186
+ + ){
187
+ + if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
188
+ + new_type = GGML_TYPE_Q3_K;
189
+ + }
190
+ + else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
191
+ + new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
192
+ + }
193
+ + else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
194
+ + new_type = GGML_TYPE_Q5_K;
195
+ + }
196
+ + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) {
197
+ + new_type = GGML_TYPE_Q6_K;
198
+ + }
199
+ + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) {
200
+ + new_type = GGML_TYPE_Q5_K;
201
+ + }
202
+ + ++qs.i_attention_wv;
203
+ + } else if ( // Rules for fused qkv attention
204
+ + (name.find("attn_qkv.weight") != std::string::npos) ||
205
+ + (name.find("attn.qkv.weight") != std::string::npos)
206
+ + ) {
207
+ + if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
208
+ + new_type = GGML_TYPE_Q4_K;
209
+ + }
210
+ + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
211
+ + new_type = GGML_TYPE_Q5_K;
212
+ + }
213
+ + else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) {
214
+ + new_type = GGML_TYPE_Q6_K;
215
+ + }
216
+ + } else if ( // Rules for ffn
217
+ + (name.find("ffn_down") != std::string::npos) ||
218
+ + ((name.find("experts.") != std::string::npos) && (name.find(".w2.weight") != std::string::npos)) ||
219
+ + (name.find(".ffn.2.weight") != std::string::npos) || // is this even the right way around?
220
+ + (name.find(".ff.net.2.weight") != std::string::npos) ||
221
+ + (name.find(".mlp.layer2.weight") != std::string::npos) ||
222
+ + (name.find(".adaln_modulation_mlp.2.weight") != std::string::npos)
223
+ + ) {
224
+ + // TODO: add back `layer_info` with some model specific logic + logic further down
225
+ + if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
226
+ + new_type = GGML_TYPE_Q4_K;
227
+ + }
228
+ + else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
229
+ + new_type = GGML_TYPE_Q5_K;
230
+ + }
231
+ + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S) {
232
+ + new_type = GGML_TYPE_Q5_K;
233
+ + }
234
+ + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
235
+ + new_type = GGML_TYPE_Q6_K;
236
+ + }
237
+ + else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) {
238
+ + new_type = GGML_TYPE_Q6_K;
239
+ + }
240
+ + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_0) {
241
+ + new_type = GGML_TYPE_Q4_1;
242
+ + }
243
+ + else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_0) {
244
+ + new_type = GGML_TYPE_Q5_1;
245
+ + }
246
+ + ++qs.i_ffn_down;
247
+ + }
248
+ +
249
+ + // Sanity check for row shape
250
+ + bool convert_incompatible_tensor = false;
251
+ + if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K ||
252
+ + new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K) {
253
+ + int nx = tensor->ne[0];
254
+ + int ny = tensor->ne[1];
255
+ + if (nx % QK_K != 0) {
256
+ + LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
257
+ + convert_incompatible_tensor = true;
258
+ + } else {
259
+ + ++qs.n_k_quantized;
260
+ + }
261
+ + }
262
+ + if (convert_incompatible_tensor) {
263
+ + // TODO: Possibly reenable this in the future
264
+ + // switch (new_type) {
265
+ + // case GGML_TYPE_Q2_K:
266
+ + // case GGML_TYPE_Q3_K:
267
+ + // case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
268
+ + // case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
269
+ + // case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
270
+ + // default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
271
+ + // }
272
+ + new_type = GGML_TYPE_F16;
273
+ + LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
274
+ + ++qs.n_fallback;
275
+ + }
276
+ + return new_type;
277
+ +}
278
+ +
279
+ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
280
+ const std::string name = ggml_get_name(tensor);
281
+
282
+ @@ -18513,7 +18691,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
283
+ if (llama_model_has_encoder(&model)) {
284
+ n_attn_layer *= 3;
285
+ }
286
+ - GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected");
287
+ + if (model.arch != LLM_ARCH_HYVID) { // TODO: Check why this fails
288
+ + GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected");
289
+ + }
290
+ }
291
+
292
+ size_t total_size_org = 0;
293
+ @@ -18547,6 +18727,51 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
294
+ ctx_outs[i_split] = gguf_init_empty();
295
+ }
296
+ gguf_add_tensor(ctx_outs[i_split], tensor);
297
+ + // SD3 pos_embed needs special fix as first dim is 1, which gets truncated here
298
+ + if (model.arch == LLM_ARCH_SD3) {
299
+ + const std::string name = ggml_get_name(tensor);
300
+ + if (name == "pos_embed" && tensor->ne[2] == 1) {
301
+ + const int n_dim = 3;
302
+ + gguf_set_tensor_ndim(ctx_outs[i_split], "pos_embed", n_dim);
303
+ + LLAMA_LOG_INFO("\n%s: Correcting pos_embed shape for SD3: [key:%s]\n", __func__, tensor->name);
304
+ + }
305
+ + }
306
+ + // same goes for auraflow
307
+ + if (model.arch == LLM_ARCH_AURA) {
308
+ + const std::string name = ggml_get_name(tensor);
309
+ + if (name == "positional_encoding" && tensor->ne[2] == 1) {
310
+ + const int n_dim = 3;
311
+ + gguf_set_tensor_ndim(ctx_outs[i_split], "positional_encoding", n_dim);
312
+ + LLAMA_LOG_INFO("\n%s: Correcting positional_encoding shape for AuraFlow: [key:%s]\n", __func__, tensor->name);
313
+ + }
314
+ + if (name == "register_tokens" && tensor->ne[2] == 1) {
315
+ + const int n_dim = 3;
316
+ + gguf_set_tensor_ndim(ctx_outs[i_split], "register_tokens", n_dim);
317
+ + LLAMA_LOG_INFO("\n%s: Correcting register_tokens shape for AuraFlow: [key:%s]\n", __func__, tensor->name);
318
+ + }
319
+ + }
320
+ + // conv3d fails due to max dims - unsure what to do here as we never even reach this check
321
+ + if (model.arch == LLM_ARCH_HYVID) {
322
+ + const std::string name = ggml_get_name(tensor);
323
+ + if (name == "img_in.proj.weight" && tensor->ne[5] != 1 ) {
324
+ + throw std::runtime_error("img_in.proj.weight size failed for HyVid");
325
+ + }
326
+ + }
327
+ + // All the modulation layers also have dim1, and I think conv3d fails here too but we segfaul way before that...
328
+ + if (model.arch == LLM_ARCH_WAN) {
329
+ + const std::string name = ggml_get_name(tensor);
330
+ + if (name.find(".modulation") != std::string::npos && tensor->ne[2] == 1) {
331
+ + const int n_dim = 3;
332
+ + gguf_set_tensor_ndim(ctx_outs[i_split], tensor->name, n_dim);
333
+ + LLAMA_LOG_INFO("\n%s: Correcting shape for Wan: [key:%s]\n", __func__, tensor->name);
334
+ + }
335
+ + // FLF2V model only
336
+ + if (name == "img_emb.emb_pos") {
337
+ + const int n_dim = 3;
338
+ + gguf_set_tensor_ndim(ctx_outs[i_split], tensor->name, n_dim);
339
+ + LLAMA_LOG_INFO("\n%s: Correcting shape for Wan FLF2V: [key:%s]\n", __func__, tensor->name);
340
+ + }
341
+ + }
342
+ }
343
+
344
+ // Set split info if needed
345
+ @@ -18647,6 +18872,109 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
346
+ // do not quantize relative position bias (T5)
347
+ quantize &= name.find("attn_rel_b.weight") == std::string::npos;
348
+
349
+ + // rules for image models
350
+ + bool image_model = false;
351
+ + if (model.arch == LLM_ARCH_FLUX) {
352
+ + image_model = true;
353
+ + quantize &= name.find("txt_in.") == std::string::npos;
354
+ + quantize &= name.find("img_in.") == std::string::npos;
355
+ + quantize &= name.find("time_in.") == std::string::npos;
356
+ + quantize &= name.find("vector_in.") == std::string::npos;
357
+ + quantize &= name.find("guidance_in.") == std::string::npos;
358
+ + quantize &= name.find("final_layer.") == std::string::npos;
359
+ + }
360
+ + if (model.arch == LLM_ARCH_SD1 || model.arch == LLM_ARCH_SDXL) {
361
+ + image_model = true;
362
+ + quantize &= name.find("class_embedding.") == std::string::npos;
363
+ + quantize &= name.find("time_embedding.") == std::string::npos;
364
+ + quantize &= name.find("add_embedding.") == std::string::npos;
365
+ + quantize &= name.find("time_embed.") == std::string::npos;
366
+ + quantize &= name.find("label_emb.") == std::string::npos;
367
+ + quantize &= name.find("conv_in.") == std::string::npos;
368
+ + quantize &= name.find("conv_out.") == std::string::npos;
369
+ + quantize &= name != "input_blocks.0.0.weight";
370
+ + quantize &= name != "out.2.weight";
371
+ + }
372
+ + if (model.arch == LLM_ARCH_SD3) {
373
+ + image_model = true;
374
+ + quantize &= name.find("final_layer.") == std::string::npos;
375
+ + quantize &= name.find("time_text_embed.") == std::string::npos;
376
+ + quantize &= name.find("context_embedder.") == std::string::npos;
377
+ + quantize &= name.find("t_embedder.") == std::string::npos;
378
+ + quantize &= name.find("y_embedder.") == std::string::npos;
379
+ + quantize &= name.find("x_embedder.") == std::string::npos;
380
+ + quantize &= name != "proj_out.weight";
381
+ + quantize &= name != "pos_embed";
382
+ + }
383
+ + if (model.arch == LLM_ARCH_AURA) {
384
+ + image_model = true;
385
+ + quantize &= name.find("t_embedder.") == std::string::npos;
386
+ + quantize &= name.find("init_x_linear.") == std::string::npos;
387
+ + quantize &= name != "modF.1.weight";
388
+ + quantize &= name != "cond_seq_linear.weight";
389
+ + quantize &= name != "final_linear.weight";
390
+ + quantize &= name != "final_linear.weight";
391
+ + quantize &= name != "positional_encoding";
392
+ + quantize &= name != "register_tokens";
393
+ + }
394
+ + if (model.arch == LLM_ARCH_LTXV) {
395
+ + image_model = true;
396
+ + quantize &= name.find("adaln_single.") == std::string::npos;
397
+ + quantize &= name.find("caption_projection.") == std::string::npos;
398
+ + quantize &= name.find("patchify_proj.") == std::string::npos;
399
+ + quantize &= name.find("proj_out.") == std::string::npos;
400
+ + quantize &= name.find("scale_shift_table") == std::string::npos; // last block too
401
+ + }
402
+ + if (model.arch == LLM_ARCH_HYVID) {
403
+ + image_model = true;
404
+ + quantize &= name.find("txt_in.") == std::string::npos;
405
+ + quantize &= name.find("img_in.") == std::string::npos;
406
+ + quantize &= name.find("time_in.") == std::string::npos;
407
+ + quantize &= name.find("vector_in.") == std::string::npos;
408
+ + quantize &= name.find("guidance_in.") == std::string::npos;
409
+ + quantize &= name.find("final_layer.") == std::string::npos;
410
+ + }
411
+ + if (model.arch == LLM_ARCH_WAN) {
412
+ + image_model = true;
413
+ + quantize &= name.find("modulation.") == std::string::npos;
414
+ + quantize &= name.find("patch_embedding.") == std::string::npos;
415
+ + quantize &= name.find("text_embedding.") == std::string::npos;
416
+ + quantize &= name.find("time_projection.") == std::string::npos;
417
+ + quantize &= name.find("time_embedding.") == std::string::npos;
418
+ + quantize &= name.find("img_emb.") == std::string::npos;
419
+ + quantize &= name.find("head.") == std::string::npos;
420
+ + }
421
+ + if (model.arch == LLM_ARCH_HIDREAM) {
422
+ + image_model = true;
423
+ + quantize &= name.find("p_embedder.") == std::string::npos;
424
+ + quantize &= name.find("t_embedder.") == std::string::npos;
425
+ + quantize &= name.find("x_embedder.") == std::string::npos;
426
+ + quantize &= name.find("final_layer.") == std::string::npos;
427
+ + quantize &= name.find(".ff_i.gate.weight") == std::string::npos;
428
+ + quantize &= name.find("caption_projection.") == std::string::npos;
429
+ + }
430
+ + if (model.arch == LLM_ARCH_COSMOS) {
431
+ + image_model = true;
432
+ + quantize &= name.find("p_embedder.") == std::string::npos;
433
+ + quantize &= name.find("t_embedder.") == std::string::npos;
434
+ + quantize &= name.find("t_embedding_norm.") == std::string::npos;
435
+ + quantize &= name.find("x_embedder.") == std::string::npos;
436
+ + quantize &= name.find("pos_embedder.") == std::string::npos;
437
+ + quantize &= name.find("final_layer.") == std::string::npos;
438
+ + }
439
+ + if (model.arch == LLM_ARCH_SEEDVR) {
440
+ + image_model = true;
441
+ + quantize &= name.find("emb_in.") == std::string::npos;
442
+ + quantize &= name.find("txt_in.") == std::string::npos;
443
+ + quantize &= name.find("vid_in.") == std::string::npos;
444
+ + quantize &= name.find("vid_out.") == std::string::npos;
445
+ + quantize &= name.find(".ada.") == std::string::npos;
446
+ + }
447
+ + // ignore 3D/4D tensors for image models as the code was never meant to handle these
448
+ + if (image_model) {
449
+ + quantize &= ggml_n_dims(tensor) == 2;
450
+ + }
451
+ +
452
+ enum ggml_type new_type;
453
+ void * new_data;
454
+ size_t new_size;
455
+ @@ -18655,6 +18983,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
456
+ new_type = default_type;
457
+
458
+ // get more optimal quantization type based on the tensor shape, layer, etc.
459
+ + if (image_model) {
460
+ + new_type = img_tensor_get_type(qs, new_type, tensor, ftype);
461
+ + } else {
462
+ if (!params->pure && ggml_is_quantized(default_type)) {
463
+ new_type = llama_tensor_get_type(qs, new_type, tensor, ftype);
464
+ }
465
+ @@ -18664,6 +18995,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
466
+ if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) {
467
+ new_type = params->output_tensor_type;
468
+ }
469
+ + }
470
+
471
+ // If we've decided to quantize to the same type the tensor is already
472
+ // in then there's nothing to do.