| { | |
| "_class_name": "FluxTransformer2DModel", | |
| "_diffusers_version": "0.33.1", | |
| "_name_or_path": "F:/T2ITrainer/flux_models/krea/", | |
| "attention_head_dim": 128, | |
| "axes_dims_rope": [ | |
| 16, | |
| 56, | |
| 56 | |
| ], | |
| "guidance_embeds": true, | |
| "in_channels": 64, | |
| "joint_attention_dim": 4096, | |
| "num_attention_heads": 24, | |
| "num_layers": 19, | |
| "num_single_layers": 38, | |
| "out_channels": null, | |
| "patch_size": 1, | |
| "pooled_projection_dim": 768, | |
| "quantization_config": { | |
| "_load_in_4bit": true, | |
| "_load_in_8bit": false, | |
| "bnb_4bit_compute_dtype": "float32", | |
| "bnb_4bit_quant_storage": "uint8", | |
| "bnb_4bit_quant_type": "nf4", | |
| "bnb_4bit_use_double_quant": false, | |
| "llm_int8_enable_fp32_cpu_offload": false, | |
| "llm_int8_has_fp16_weight": false, | |
| "llm_int8_skip_modules": null, | |
| "llm_int8_threshold": 6.0, | |
| "load_in_4bit": true, | |
| "load_in_8bit": false, | |
| "quant_method": "bitsandbytes" | |
| } | |
| } | |