Jingya's picture
Jingya HF Staff
Upload vae_decoder/config.json with huggingface_hub
d6f2643 verified
{
"_class_name": "AutoencoderKL",
"_commit_hash": null,
"_diffusers_version": "0.32.2",
"_name_or_path": "/home/ubuntu/.cache/huggingface/hub/models--Jingya--pixart_sigma_pipe_xl_2_512_ms/snapshots/ee9e1f15aa5e778132655fab637943a6bc71a27c/vae",
"_use_default_values": [
"latents_std",
"shift_factor",
"latents_mean",
"use_quant_conv",
"mid_block_add_attention",
"use_post_quant_conv"
],
"act_fn": "silu",
"block_out_channels": [
128,
256,
512,
512
],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D"
],
"force_upcast": false,
"in_channels": 3,
"latent_channels": 4,
"latents_mean": null,
"latents_std": null,
"layers_per_block": 2,
"mid_block_add_attention": true,
"neuron": {
"auto_cast": "none",
"auto_cast_type": "bf16",
"compiler_type": "neuronx-cc",
"compiler_version": "2.16.372.0+4a9b2326",
"dynamic_batch_size": false,
"inline_weights_to_neff": true,
"input_names": [
"latent_sample"
],
"model_type": "vae-decoder",
"optlevel": "2",
"output_attentions": false,
"output_hidden_states": false,
"output_names": [
"sample"
],
"static_batch_size": 1,
"static_height": 64,
"static_num_channels": 4,
"static_width": 64,
"tensor_parallel_size": 1
},
"norm_num_groups": 32,
"out_channels": 3,
"sample_size": 512,
"scaling_factor": 0.13025,
"shift_factor": null,
"task": "semantic-segmentation",
"transformers_version": null,
"up_block_types": [
"UpDecoderBlock2D",
"UpDecoderBlock2D",
"UpDecoderBlock2D",
"UpDecoderBlock2D"
],
"use_post_quant_conv": true,
"use_quant_conv": true
}