imnotednamode commited on
Commit
f29e25b
1 Parent(s): 3507f0d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -28,7 +28,7 @@ To use:
28
  from diffusers import MochiPipeline, MochiTransformer3DModel
29
  from diffusers.utils import export_to_video
30
  transformer = MochiTransformer3DModel.from_pretrained("imnotednamode/mochi-1-preview-mix-nf4-small", torch_dtype=torch.bfloat16)
31
- pipe = MochiPipeline.from_pretrained("mochi-1-diffusers", torch_dtype=torch.bfloat16, transformer=transformer)
32
  pipe.enable_model_cpu_offload()
33
  pipe.enable_vae_tiling()
34
  frames = pipe("A camera follows a squirrel running around on a tree branch", num_inference_steps=100, guidance_scale=4.5, height=480, width=848, num_frames=161).frames[0]
@@ -43,6 +43,6 @@ from diffusers import MochiPipeline, MochiTransformer3DModel, BitsAndBytesConfig
43
  import torch
44
  quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type="nf4", llm_int8_skip_modules=["final_layer", "x_embedder.proj", "t_embedder", "pos_frequencies", "t5"])
45
  # Please convert mochi to diffusers first
46
- transformer = MochiTransformer3DModel.from_pretrained("mochi-1-diffusers", subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16)
47
  transformer.save_pretrained("mochi-1-preview-nf4")
48
  ```
 
28
  from diffusers import MochiPipeline, MochiTransformer3DModel
29
  from diffusers.utils import export_to_video
30
  transformer = MochiTransformer3DModel.from_pretrained("imnotednamode/mochi-1-preview-mix-nf4-small", torch_dtype=torch.bfloat16)
31
+ pipe = MochiPipeline.from_pretrained("genmo/mochi-1-preview", torch_dtype=torch.bfloat16, transformer=transformer)
32
  pipe.enable_model_cpu_offload()
33
  pipe.enable_vae_tiling()
34
  frames = pipe("A camera follows a squirrel running around on a tree branch", num_inference_steps=100, guidance_scale=4.5, height=480, width=848, num_frames=161).frames[0]
 
43
  import torch
44
  quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type="nf4", llm_int8_skip_modules=["final_layer", "x_embedder.proj", "t_embedder", "pos_frequencies", "t5"])
45
  # Please convert mochi to diffusers first
46
+ transformer = MochiTransformer3DModel.from_pretrained("genmo/mochi-1-preview", variant="refs/pr/18", subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16)
47
  transformer.save_pretrained("mochi-1-preview-nf4")
48
  ```