Wan-2.1
Collection
10 items
•
Updated
This LoRA is trained on the Wan2.1 14B I2V 720p model.
pip install git+https://github.com/huggingface/diffusers.git
import torch
from diffusers.utils import export_to_video, load_image
from diffusers import AutoencoderKLWan, WanImageToVideoPipeline
from transformers import CLIPVisionModel
import numpy as np
model_id = "Wan-AI/Wan2.1-I2V-14B-720P-Diffusers"
image_encoder = CLIPVisionModel.from_pretrained(model_id, subfolder="image_encoder", torch_dtype=torch.float32)
vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
pipe = WanImageToVideoPipeline.from_pretrained(model_id, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16)
pipe.to("cuda")
pipe.load_lora_weights("valiantcat/Wan2.1-Mecha-LoRA")
pipe.enable_model_cpu_offload() #for low-vram environments
prompt = "gangtiexia,背景保持不变,这个人开始变身红色机甲,变身过程中出现机甲面罩遮住脸部,变身完成之后这个人向前走."
image = load_image("https://huggingface.co/valiantcat/Wan2.1-Mecha-LoRA/blob/main/result/test.jpg")
max_area = 512 * 768
aspect_ratio = image.height / image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(np.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(np.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
image = image.resize((width, height))
output = pipe(
image=image,
prompt=prompt,
height=height,
width=width,
num_frames=81,
guidance_scale=5.0,
num_inference_steps=25
).frames[0]
export_to_video(output, "output.mp4", fps=16)
The key trigger phrase is: gangtiexia
For best results, use this prompt structure:
Simply replace [color]
with whatever you want to let this person transform into the color of a mecha
Base model
Wan-AI/Wan2.1-I2V-14B-720P