Carlexxx commited on
Commit
3351ca9
·
1 Parent(s): 1dcffa2

heloo terakeos Aduc1.0

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. LICENSE +25 -0
  2. README.md +45 -5
  3. app.py +559 -0
  4. appv0.py +633 -0
  5. configs/ltxv-13b-0.9.7-dev.yaml +34 -0
  6. configs/ltxv-13b-0.9.7-distilled.yaml +28 -0
  7. configs/ltxv-13b-0.9.8-dev-fp8.yaml +34 -0
  8. configs/ltxv-13b-0.9.8-dev.yaml +34 -0
  9. configs/ltxv-13b-0.9.8-distilled-fp8.yaml +29 -0
  10. configs/ltxv-13b-0.9.8-distilled.yaml +29 -0
  11. configs/ltxv-2b-0.9.1.yaml +17 -0
  12. configs/ltxv-2b-0.9.5.yaml +17 -0
  13. configs/ltxv-2b-0.9.6-dev.yaml +17 -0
  14. configs/ltxv-2b-0.9.6-distilled.yaml +16 -0
  15. configs/ltxv-2b-0.9.8-distilled-fp8.yaml +28 -0
  16. configs/ltxv-2b-0.9.8-distilled.yaml +28 -0
  17. configs/ltxv-2b-0.9.yaml +17 -0
  18. dreamo/dreamo_pipeline.py +507 -0
  19. dreamo/transformer.py +187 -0
  20. dreamo/utils.py +232 -0
  21. dreamo_helpers.py +123 -0
  22. inference.py +774 -0
  23. ltx_video/__init__.py +0 -0
  24. ltx_video/models/__init__.py +0 -0
  25. ltx_video/models/autoencoders/__init__.py +0 -0
  26. ltx_video/models/autoencoders/causal_conv3d.py +63 -0
  27. ltx_video/models/autoencoders/causal_video_autoencoder.py +1398 -0
  28. ltx_video/models/autoencoders/conv_nd_factory.py +90 -0
  29. ltx_video/models/autoencoders/dual_conv3d.py +217 -0
  30. ltx_video/models/autoencoders/latent_upsampler.py +203 -0
  31. ltx_video/models/autoencoders/pixel_norm.py +12 -0
  32. ltx_video/models/autoencoders/pixel_shuffle.py +33 -0
  33. ltx_video/models/autoencoders/vae.py +380 -0
  34. ltx_video/models/autoencoders/vae_encode.py +247 -0
  35. ltx_video/models/autoencoders/video_autoencoder.py +1045 -0
  36. ltx_video/models/transformers/__init__.py +0 -0
  37. ltx_video/models/transformers/attention.py +1264 -0
  38. ltx_video/models/transformers/embeddings.py +129 -0
  39. ltx_video/models/transformers/symmetric_patchifier.py +84 -0
  40. ltx_video/models/transformers/transformer3d.py +507 -0
  41. ltx_video/pipelines/__init__.py +0 -0
  42. ltx_video/pipelines/ai_studio_code (11).py +157 -0
  43. ltx_video/pipelines/crf_compressor.py +50 -0
  44. ltx_video/pipelines/pipeline_ltx_video.py +1903 -0
  45. ltx_video/schedulers/__init__.py +0 -0
  46. ltx_video/schedulers/rf.py +386 -0
  47. ltx_video/utils/__init__.py +0 -0
  48. ltx_video/utils/diffusers_config_mapping.py +174 -0
  49. ltx_video/utils/prompt_enhance_utils.py +226 -0
  50. ltx_video/utils/skip_layer_strategy.py +8 -0
LICENSE ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Euia-AducSdr: Uma implementação aberta e funcional da arquitetura ADUC-SDR para geração de vídeo coerente.
2
+ # Copyright (C) 4 de Agosto de 2025 Carlos Rodrigues dos Santos
3
+ #
4
+ # Contato:
5
+ # Carlos Rodrigues dos Santos
6
7
+ # Rua Eduardo Carlos Pereira, 4125, B1 Ap32, Curitiba, PR, Brazil, CEP 8102025
8
+ #
9
+ # Repositórios e Projetos Relacionados:
10
+ # GitHub: https://github.com/carlex22/Aduc-sdr
11
+ # Hugging Face (Ltx-SuperTime-60Secondos): https://huggingface.co/spaces/Carlexx/Ltx-SuperTime-60Secondos/
12
+ # Hugging Face (Novinho): https://huggingface.co/spaces/Carlexxx/Novinho/
13
+ #
14
+ # This program is free software: you can redistribute it and/or modify
15
+ # it under the terms of the GNU Affero General Public License as published by
16
+ # the Free Software Foundation, either version 3 of the License, or
17
+ # (at your option) any later version.
18
+ #
19
+ # This program is distributed in the hope that it will be useful,
20
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
21
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22
+ # GNU Affero General Public License for more details.
23
+ #
24
+ # You should have received a copy of the GNU Affero General Public License
25
+ # along with this program. If not, see <https://www.gnu.org/licenses/>.
README.md CHANGED
@@ -1,12 +1,52 @@
1
  ---
2
- title: ADUC-Sdr Gemini Drem0 Ltx Video60seconds
3
- emoji: 🌍
4
- colorFrom: red
5
- colorTo: pink
6
  sdk: gradio
7
  sdk_version: 5.42.0
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Euia-AducSdr
3
+ emoji: 🎬
4
+ colorFrom: indigo
5
+ colorTo: purple
6
  sdk: gradio
7
  sdk_version: 5.42.0
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
+ ### 🇧🇷 Português
13
+
14
+ Uma implementação aberta e funcional da arquitetura ADUC-SDR (Arquitetura de Unificação Compositiva - Escala Dinâmica e Resiliente), projetada para a geração de vídeo coerente de longa duração. Este projeto materializa os princípios de fragmentação, navegação geométrica e um mecanismo de "eco causal 4bits memoria" para garantir a continuidade física e narrativa em sequências de vídeo geradas por múltiplos modelos de IA.
15
+
16
+ **Licença:** Este projeto é licenciado sob os termos da **GNU Affero General Public License v3.0**. Isto significa que se você usar este software (ou qualquer trabalho derivado) para fornecer um serviço através de uma rede, você é **obrigado a disponibilizar o código-fonte completo** da sua versão para os usuários desse serviço.
17
+
18
+ - **Copyright (C) 4 de Agosto de 2025, Carlos Rodrigues dos Santos**
19
+ - Uma cópia completa da licença pode ser encontrada no arquivo [LICENSE](LICENSE).
20
+
21
+ ---
22
+
23
+ ### 🇬🇧 English
24
+
25
+ An open and functional implementation of the ADUC-SDR (Architecture for Compositive Unification - Dynamic and Resilient Scaling) architecture, designed for long-form coherent video generation. This project materializes the principles of fragmentation, geometric navigation, and a "causal echo 4bits memori" mechanism to ensure physical and narrative continuity in video sequences generated by multiple AI models.
26
+
27
+ **License:** This project is licensed under the terms of the **GNU Affero General Public License v3.0**. This means that if you use this software (or any derivative work) to provide a service over a network, you are **required to make the complete source code** of your version available to the users of that service.
28
+
29
+ - **Copyright (C) August 4, 2025, Carlos Rodrigues dos Santos**
30
+ - A full copy of the license can be found in the [LICENSE](LICENSE) file.
31
+
32
+ ---
33
+
34
+ ### 🇪🇸 Español
35
+
36
+ Una implementación abierta y funcional de la arquitectura ADUC-SDR (Arquitectura de Unificación Compositiva - Escala Dinámica y Resiliente), diseñada para la generación de video coherente de larga duración. Este proyecto materializa los principios de fragmentación, navegación geométrica y un mecanismo de "eco causal 4bits memoria" para garantizar la continuidad física y narrativa en secuencias de video generadas por múltiples modelos de IA.
37
+
38
+ **Licencia:** Este proyecto está licenciado bajo los términos de la **Licencia Pública General Affero de GNU v3.0**. Esto significa que si usted utiliza este software (o cualquier obra derivada) para proporcionar un servicio a través de una red, está **obligado a ofrecer el código fuente completo** de su versión a los usuarios de dicho servicio.
39
+
40
+ - **Copyright (C) 4 de Agosto de 2025, Carlos Rodrigues dos Santos**
41
+ - Puede encontrar una copia completa de la licencia en el archivo [LICENSE](LICENSE).
42
+
43
+ ---
44
+
45
+ ### Contact / Contato / Contacto
46
+
47
+ - **Author / Autor:** Carlos Rodrigues dos Santos
48
+ - **Email:** [email protected]
49
+ - **GitHub:** [https://github.com/carlex22/Aduc-sdr](https://github.com/carlex22/Aduc-sdr)
50
+ - **Hugging Face Spaces:**
51
+ - [Ltx-SuperTime-60Secondos](https://huggingface.co/spaces/Carlexx/Ltx-SuperTime-60Secondos/)
52
+ - [Novinho](https://huggingface.co/spaces/Carlexxx/Novinho/)
app.py ADDED
@@ -0,0 +1,559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Euia-AducSdr: Uma implementação aberta e funcional da arquitetura ADUC-SDR para geração de vídeo coerente.
2
+ # Copyright (C) 4 de Agosto de 2025 Carlos Rodrigues dos Santos
3
+ #
4
+ # Contato:
5
+ # Carlos Rodrigues dos Santos
6
7
+ # Rua Eduardo Carlos Pereira, 4125, B1 Ap32, Curitiba, PR, Brazil, CEP 8102025
8
+ #
9
+ # Repositórios e Projetos Relacionados:
10
+ # GitHub: https://github.com/carlex22/Aduc-sdr
11
+ # Hugging Face: https://huggingface.co/spaces/Carlexx/Ltx-SuperTime-60Secondos/
12
+ # Hugging Face: https://huggingface.co/spaces/Carlexxx/Novinho/
13
+ #
14
+ # Este programa é software livre: você pode redistribuí-lo e/ou modificá-lo
15
+ # sob os termos da Licença Pública Geral Affero da GNU como publicada pela
16
+ # Free Software Foundation, seja a versão 3 da Licença, ou
17
+ # (a seu critério) qualquer versão posterior.
18
+ #
19
+ # Este programa é distribuído na esperança de que seja útil,
20
+ # mas SEM QUALQUER GARANTIA; sem mesmo a garantia implícita de
21
+ # COMERCIALIZAÇÃO ou ADEQUAÇÃO A UM DETERMINADO FIM. Consulte a
22
+ # Licença Pública Geral Affero da GNU para mais detalhes.
23
+ #
24
+ # Você deve ter recebido uma cópia da Licença Pública Geral Affero da GNU
25
+ # junto com este programa. Se não, veja <https://www.gnu.org/licenses/>.
26
+
27
+ # --- app.py (NOVINHO-4.4: O Piloto de Testes - Vetor de Frames) ---
28
+
29
+ # --- Ato 1: A Convocação da Orquestra (Importações) ---
30
+ import gradio as gr
31
+ import torch
32
+ import os
33
+ import yaml
34
+ from PIL import Image, ImageOps
35
+ import shutil
36
+ import gc
37
+ import subprocess
38
+ import google.generativeai as genai
39
+ import numpy as np
40
+ import imageio
41
+ from pathlib import Path
42
+ import huggingface_hub
43
+ import json
44
+ import time
45
+ from typing import Union, List
46
+
47
+ from inference import create_ltx_video_pipeline, load_image_to_tensor_with_resize_and_crop, ConditioningItem, calculate_padding
48
+ from dreamo_helpers import dreamo_generator_singleton
49
+
50
+ # --- Ato 2: A Preparação do Palco (Configurações) ---
51
+ config_file_path = "configs/ltxv-13b-0.9.8-distilled.yaml"
52
+ with open(config_file_path, "r") as file: PIPELINE_CONFIG_YAML = yaml.safe_load(file)
53
+
54
+ LTX_REPO = "Lightricks/LTX-Video"
55
+ models_dir = "downloaded_models_gradio_cpu_init"
56
+ Path(models_dir).mkdir(parents=True, exist_ok=True)
57
+ WORKSPACE_DIR = "aduc_workspace"
58
+ GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
59
+
60
+ VIDEO_FPS = 36
61
+ VIDEO_DURATION_SECONDS = 4
62
+ VIDEO_TOTAL_FRAMES = VIDEO_DURATION_SECONDS * VIDEO_FPS
63
+ CONVERGENCE_FRAMES = 8
64
+ TARGET_RESOLUTION = 720
65
+ MAX_REFS = 4
66
+
67
+ print("Baixando e criando pipelines LTX na CPU...")
68
+ distilled_model_actual_path = huggingface_hub.hf_hub_download(repo_id=LTX_REPO, filename=PIPELINE_CONFIG_YAML["checkpoint_path"], local_dir=models_dir, local_dir_use_symlinks=False)
69
+ pipeline_instance = create_ltx_video_pipeline(
70
+ ckpt_path=distilled_model_actual_path,
71
+ precision=PIPELINE_CONFIG_YAML["precision"],
72
+ text_encoder_model_name_or_path=PIPELINE_CONFIG_YAML["text_encoder_model_name_or_path"],
73
+ sampler=PIPELINE_CONFIG_YAML["sampler"],
74
+ device='cpu'
75
+ )
76
+ print("Modelos LTX prontos (na CPU).")
77
+
78
+
79
+ # --- Ato 3: As Partituras dos Músicos (Funções Corrigidas e Documentadas) ---
80
+
81
+ def load_conditioning_tensor(media_path: str, height: int, width: int) -> torch.Tensor:
82
+ if not media_path: raise ValueError("Caminho da mídia de condicionamento não pode ser nulo.")
83
+ return load_image_to_tensor_with_resize_and_crop(media_path, height, width)
84
+
85
+ def run_ltx_animation(current_fragment_index, motion_prompt, conditioning_items_data, width, height, seed, cfg, progress=gr.Progress()):
86
+ progress(0, desc=f"[TECPIX 5000] Filmando Cena {current_fragment_index}...");
87
+ output_path = os.path.join(WORKSPACE_DIR, f"fragment_{current_fragment_index}.mp4"); target_device = 'cuda' if torch.cuda.is_available() else 'cpu'
88
+ try:
89
+ pipeline_instance.to(target_device)
90
+ conditioning_items = []
91
+ for (path, start_frame, strength) in conditioning_items_data:
92
+ tensor = load_conditioning_tensor(path, height, width)
93
+ conditioning_items.append(ConditioningItem(tensor.to(target_device), start_frame, strength))
94
+
95
+ n_val = round((float(VIDEO_TOTAL_FRAMES) - 1.0) / 8.0); actual_num_frames = int(n_val * 8 + 1)
96
+ padded_h, padded_w = ((height - 1) // 32 + 1) * 32, ((width - 1) // 32 + 1) * 32
97
+ padding_vals = calculate_padding(height, width, padded_h, padded_w)
98
+ for cond_item in conditioning_items: cond_item.media_item = torch.nn.functional.pad(cond_item.media_item, padding_vals)
99
+ kwargs = {"prompt": motion_prompt, "negative_prompt": "blurry, distorted, bad quality, artifacts", "height": padded_h, "width": padded_w, "num_frames": actual_num_frames, "frame_rate": VIDEO_FPS, "generator": torch.Generator(device=target_device).manual_seed(int(seed) + current_fragment_index), "output_type": "pt", "guidance_scale": float(cfg), "timesteps": PIPELINE_CONFIG_YAML.get("first_pass", {}).get("timesteps"), "conditioning_items": conditioning_items, "decode_timestep": PIPELINE_CONFIG_YAML.get("decode_timestep"), "decode_noise_scale": PIPELINE_CONFIG_YAML.get("decode_noise_scale"), "stochastic_sampling": PIPELINE_CONFIG_YAML.get("stochastic_sampling"), "image_cond_noise_scale": 0.15, "is_video": True, "vae_per_channel_normalize": True, "mixed_precision": (PIPELINE_CONFIG_YAML.get("precision") == "mixed_precision"), "offload_to_cpu": False, "enhance_prompt": False}
100
+ result_tensor = pipeline_instance(**kwargs).images
101
+ pad_l, pad_r, pad_t, pad_b = map(int, padding_vals); slice_h = -pad_b if pad_b > 0 else None; slice_w = -pad_r if pad_r > 0 else None
102
+ cropped_tensor = result_tensor[:, :, :VIDEO_TOTAL_FRAMES, pad_t:slice_h, pad_l:slice_w]; video_np = (cropped_tensor[0].permute(1, 2, 3, 0).cpu().float().numpy() * 255).astype(np.uint8)
103
+ with imageio.get_writer(output_path, fps=VIDEO_FPS, codec='libx264', quality=8) as writer:
104
+ for i, frame in enumerate(video_np): progress(i / len(video_np), desc=f"Renderizando frame {i+1}/{len(video_np)}..."); writer.append_data(frame)
105
+ return output_path
106
+ finally:
107
+ pipeline_instance.to('cpu'); gc.collect()
108
+ if torch.cuda.is_available(): torch.cuda.empty_cache()
109
+
110
+ def process_image_to_square(image_path: str, size: int = TARGET_RESOLUTION) -> str:
111
+ if not image_path or not os.path.exists(image_path): return None
112
+ try:
113
+ img = Image.open(image_path).convert("RGB")
114
+ img_square = ImageOps.fit(img, (size, size), Image.Resampling.LANCZOS)
115
+ output_filename = f"initial_ref_{size}x{size}.png"
116
+ output_path = os.path.join(WORKSPACE_DIR, output_filename)
117
+ img_square.save(output_path)
118
+ return output_path
119
+ except Exception as e: raise gr.Error(f"Falha ao processar a imagem de referência: {e}")
120
+
121
+ def get_static_scenes_storyboard(num_fragments: int, prompt: str, initial_image_path: str):
122
+ if not initial_image_path: raise gr.Error("Por favor, forneça uma imagem de referência inicial.")
123
+ if not GEMINI_API_KEY: raise gr.Error("Chave da API Gemini não configurada!")
124
+ genai.configure(api_key=GEMINI_API_KEY)
125
+ prompt_file = "prompts/photographer_prompt.txt"
126
+ with open(os.path.join(os.path.dirname(__file__), prompt_file), "r", encoding="utf-8") as f: template = f.read()
127
+ director_prompt = template.format(user_prompt=prompt, num_fragments=int(num_fragments))
128
+ model = genai.GenerativeModel('gemini-2.0-flash'); img = Image.open(initial_image_path)
129
+ response = model.generate_content([director_prompt, img])
130
+ try:
131
+ cleaned_response = response.text.strip().replace("```json", "").replace("```", "")
132
+ storyboard_data = json.loads(cleaned_response)
133
+ return storyboard_data.get("scene_storyboard", [])
134
+ except Exception as e: raise gr.Error(f"O Sonhador (Gemini) falhou ao criar o roteiro: {e}. Resposta: {response.text}")
135
+
136
+ def run_keyframe_generation(storyboard, initial_ref_image_path, *reference_args):
137
+ # ... (código inalterado) ...
138
+ if not storyboard:
139
+ raise gr.Error("Nenhum roteiro para gerar imagens-chave.")
140
+ if not initial_ref_image_path or not os.path.exists(initial_ref_image_path):
141
+ raise gr.Error("A imagem de referência principal é obrigatória para iniciar a pintura.")
142
+
143
+ num_total_refs = MAX_REFS + 1
144
+ ref_paths = list(reference_args[:num_total_refs])
145
+ ref_tasks = list(reference_args[num_total_refs:])
146
+
147
+ with Image.open(initial_ref_image_path) as img:
148
+ width, height = img.size
149
+ width, height = (width // 32) * 32, (height // 32) * 32
150
+
151
+ keyframe_paths = []
152
+ log_history = ""
153
+
154
+ try:
155
+ dreamo_generator_singleton.to_gpu()
156
+
157
+ log_history += f"Pintando Keyframe Inicial (Cena 1/{len(storyboard)})...\n"
158
+ yield {keyframe_log_output: gr.update(value=log_history), keyframe_gallery_output: gr.update(value=keyframe_paths)}
159
+
160
+ references_for_first_frame = []
161
+ references_for_first_frame.append({'image_np': np.array(Image.open(initial_ref_image_path).convert("RGB")), 'task': 'ip'})
162
+ log_history += f" - Usando imagem de referência principal '{os.path.basename(initial_ref_image_path)}' (Tarefa: ip)\n"
163
+
164
+ for j in range(1, num_total_refs):
165
+ aux_path, aux_task = ref_paths[j], ref_tasks[j]
166
+ if aux_path and os.path.exists(aux_path):
167
+ references_for_first_frame.append({'image_np': np.array(Image.open(aux_path).convert("RGB")), 'task': aux_task})
168
+ log_history += f" - Usando ref. auxiliar: {os.path.basename(aux_path)} (Tarefa: {aux_task})\n"
169
+
170
+ first_prompt = storyboard[0]
171
+ output_path = os.path.join(WORKSPACE_DIR, "keyframe_1.png")
172
+ image = dreamo_generator_singleton.generate_image_with_gpu_management(
173
+ reference_items=references_for_first_frame, prompt=first_prompt, width=width, height=height
174
+ )
175
+ image.save(output_path)
176
+ keyframe_paths.append(output_path)
177
+ current_ref_image_path = output_path
178
+
179
+ for i, prompt in enumerate(storyboard[1:], start=1):
180
+ log_history += f"\nPintando Cena Sequencial {i+1}/{len(storyboard)}...\n"
181
+ yield {keyframe_log_output: gr.update(value=log_history), keyframe_gallery_output: gr.update(value=keyframe_paths)}
182
+
183
+ reference_items_for_dreamo = []
184
+ sequential_ref_task = ref_tasks[0]
185
+ reference_items_for_dreamo.append({'image_np': np.array(Image.open(current_ref_image_path).convert("RGB")), 'task': sequential_ref_task})
186
+ log_history += f" - Usando ref. sequencial: {os.path.basename(current_ref_image_path)} (Tarefa: {sequential_ref_task})\n"
187
+
188
+ for j in range(1, num_total_refs):
189
+ aux_path, aux_task = ref_paths[j], ref_tasks[j]
190
+ if aux_path and os.path.exists(aux_path):
191
+ reference_items_for_dreamo.append({'image_np': np.array(Image.open(aux_path).convert("RGB")), 'task': aux_task})
192
+ log_history += f" - Usando ref. auxiliar: {os.path.basename(aux_path)} (Tarefa: {aux_task})\n"
193
+
194
+ output_path = os.path.join(WORKSPACE_DIR, f"keyframe_{i+1}.png")
195
+ image = dreamo_generator_singleton.generate_image_with_gpu_management(
196
+ reference_items=reference_items_for_dreamo, prompt=prompt, width=width, height=height
197
+ )
198
+ image.save(output_path)
199
+ keyframe_paths.append(output_path)
200
+ current_ref_image_path = output_path
201
+
202
+ except Exception as e:
203
+ raise gr.Error(f"O Pintor (DreamO) encontrou um erro: {e}")
204
+ finally:
205
+ dreamo_generator_singleton.to_cpu()
206
+
207
+ log_history += "\nPintura de todos os keyframes concluída.\n"
208
+ yield {keyframe_log_output: gr.update(value=log_history), keyframe_gallery_output: gr.update(value=keyframe_paths), keyframe_images_state: keyframe_paths}
209
+
210
+ ####
211
+ # Gera um prompt de movimento para uma transição.
212
+ # Agora é flexível: aceita uma única imagem de partida ou uma lista de frames de contexto.
213
+ ####
214
+ def get_single_motion_prompt(user_prompt: str, story_history: str, start_media_paths: Union[str, List[str]], end_keyframe_path: str, prompt_filename: str):
215
+ if not GEMINI_API_KEY:
216
+ raise gr.Error("Chave da API Gemini não configurada!")
217
+
218
+ if isinstance(start_media_paths, str):
219
+ start_media_paths = [start_media_paths]
220
+
221
+ uploaded_files = []
222
+ try:
223
+ genai.configure(api_key=GEMINI_API_KEY)
224
+ model = genai.GenerativeModel('gemini-2.0-flash')
225
+
226
+ for path in start_media_paths:
227
+ print(f"Cineasta: Fazendo upload do arquivo de contexto '{path}'...")
228
+ file_to_upload = genai.upload_file(path)
229
+
230
+ print(f"Cineasta: Aguardando arquivo '{file_to_upload.name}' ficar ATIVO...")
231
+ timeout_seconds = 180
232
+ start_time = time.time()
233
+
234
+ while file_to_upload.state.name == "PROCESSING":
235
+ if time.time() - start_time > timeout_seconds:
236
+ raise TimeoutError(f"Tempo de espera para '{file_to_upload.name}' excedido.")
237
+ time.sleep(2)
238
+ file_to_upload = genai.get_file(name=file_to_upload.name)
239
+
240
+ if file_to_upload.state.name != "ACTIVE":
241
+ raise gr.Error(f"O arquivo de mídia '{file_to_upload.name}' falhou no processamento. Estado: {file_to_upload.state.name}")
242
+
243
+ uploaded_files.append(file_to_upload)
244
+
245
+ print(f"Cineasta: Todos os {len(uploaded_files)} arquivos de contexto estão ATIVOS. Gerando prompt...")
246
+ end_media = Image.open(end_keyframe_path)
247
+
248
+ prompt_file_path = os.path.join(os.path.dirname(__file__), "prompts", prompt_filename)
249
+ with open(prompt_file_path, "r", encoding="utf-8") as f:
250
+ template = f.read()
251
+
252
+ director_prompt = template.format(user_prompt=user_prompt, story_history=story_history)
253
+
254
+ model_contents = [director_prompt] + uploaded_files + [end_media]
255
+ response = model.generate_content(model_contents)
256
+
257
+ cleaned_text = response.text.strip().replace("```json", "").replace("```", "")
258
+ motion_data = json.loads(cleaned_text)
259
+ return motion_data.get("motion_prompt", "")
260
+
261
+ except Exception as e:
262
+ response_text = getattr(e, 'text', 'Nenhuma resposta de texto disponível.')
263
+ raise gr.Error(f"O Cineasta (Gemini) falhou ao criar o prompt de movimento: {e}. Resposta: {response_text}")
264
+
265
+ finally:
266
+ for f in uploaded_files:
267
+ try:
268
+ genai.delete_file(f.name)
269
+ except Exception as delete_e:
270
+ print(f"Aviso: Falha ao deletar o arquivo temporário {f.name} da API Gemini. Erro: {delete_e}")
271
+
272
+ ####
273
+ # NOVA FUNÇÃO: Extrai os 3 frames de contexto (vetor de movimento) de um vídeo.
274
+ ####
275
+ def extract_context_frames(input_video_path: str, fragment_index: int) -> List[str]:
276
+ print(f"Editor: Extraindo vetor de frames do fragmento {fragment_index}...")
277
+ output_paths = []
278
+ try:
279
+ command_probe = f"ffprobe -v error -select_streams v:0 -count_frames -show_entries stream=nb_read_frames -of default=noprint_wrappers=1:nokey=1 \"{input_video_path}\""
280
+ result_probe = subprocess.run(command_probe, shell=True, check=True, capture_output=True, text=True)
281
+ total_frames = int(result_probe.stdout.strip())
282
+
283
+ if total_frames <= CONVERGENCE_FRAMES:
284
+ # Se o vídeo for muito curto, apenas retorna o último frame 3 vezes.
285
+ frame_indices = [total_frames - 1] * 3
286
+ else:
287
+ # Frame final, frame a 8 frames de distância, frame a 16 frames de distância
288
+ frame_indices = [total_frames - 1 - CONVERGENCE_FRAMES, total_frames - 1 - (CONVERGENCE_FRAMES // 2), total_frames - 1]
289
+
290
+ for i, frame_idx in enumerate(frame_indices):
291
+ output_path = os.path.join(WORKSPACE_DIR, f"context_{fragment_index}_frame_{i+1}.png")
292
+ command_extract = f"ffmpeg -y -v error -i \"{input_video_path}\" -vf \"select='eq(n,{frame_idx})'\" -frames:v 1 \"{output_path}\""
293
+ subprocess.run(command_extract, shell=True, check=True)
294
+ output_paths.append(output_path)
295
+
296
+ print(f"Editor: Vetor de frames extraído para: {output_paths}")
297
+ return output_paths
298
+ except Exception as e:
299
+ error_message = f"Editor Mágico (FFmpeg) falhou ao extrair os frames de contexto: {e}"
300
+ if hasattr(e, 'stderr'): error_message += f"\nDetalhes: {e.stderr}"
301
+ raise gr.Error(error_message)
302
+
303
+ ####
304
+ # Orquestra a produção de todos os fragmentos de vídeo com a nova lógica de vetor de frames.
305
+ ####
306
+ def run_video_production(prompt_geral, keyframe_image_paths, scene_storyboard, seed, cfg, progress=gr.Progress()):
307
+ if not keyframe_image_paths or len(keyframe_image_paths) < 2:
308
+ raise gr.Error("Pinte pelo menos 2 keyframes na Etapa 2 para produzir as transições.")
309
+
310
+ log_history = "\n--- FASE 3/4: A Câmera e o Cineasta estão filmando em sequência just-in-time...\n"
311
+ yield {production_log_output: log_history, video_gallery_glitch: []}
312
+
313
+ video_fragments = []
314
+ start_media_for_prompt = keyframe_image_paths[0]
315
+ previous_media_for_ltx = keyframe_image_paths[0]
316
+
317
+ story_history = ""
318
+ with Image.open(keyframe_image_paths[0]) as img:
319
+ width, height = img.size
320
+
321
+ num_transitions = len(keyframe_image_paths) - 1
322
+ for i in range(num_transitions):
323
+ end_keyframe_path = keyframe_image_paths[i+1]
324
+ is_first_fragment = (i == 0)
325
+ fragment_num = i + 1
326
+
327
+ progress(i / num_transitions, desc=f"Planejando Fragmento {fragment_num}/{num_transitions}")
328
+
329
+ log_history += f"\n--- FRAGMENTO {fragment_num} ---\n"
330
+ log_history += "Cineasta (Gemini) está analisando o contexto de movimento...\n"
331
+ yield {production_log_output: log_history}
332
+
333
+ if is_first_fragment:
334
+ prompt_filename_to_use = "director_motion_prompt.txt"
335
+ story_history = f"A história começa com a transição da cena '{scene_storyboard[0]}' para '{scene_storyboard[1]}'."
336
+ else:
337
+ prompt_filename_to_use = "director_motion_prompt_vector.txt"
338
+ story_history += f"\n- Em seguida, a cena muda de '{scene_storyboard[i]}' para '{scene_storyboard[i+1]}'."
339
+
340
+ current_motion_prompt = get_single_motion_prompt(prompt_geral, story_history, start_media_for_prompt, end_keyframe_path, prompt_filename_to_use)
341
+
342
+ log_history += f"Instrução do Cineasta ({prompt_filename_to_use}): '{current_motion_prompt}'\n"
343
+ log_history += f"Filmando transição de '{os.path.basename(previous_media_for_ltx)}' para '{os.path.basename(end_keyframe_path)}'...\n"
344
+ yield {production_log_output: log_history}
345
+
346
+ # LTX ainda usa apenas uma imagem de partida (o último frame do vídeo anterior)
347
+ end_frame_index = VIDEO_TOTAL_FRAMES - CONVERGENCE_FRAMES
348
+ conditioning_items_data = [(previous_media_for_ltx, 0, 1.0), (end_keyframe_path, end_frame_index, 1.0)]
349
+
350
+ fragment_path = run_ltx_animation(fragment_num, current_motion_prompt, conditioning_items_data, width, height, seed, cfg, progress)
351
+ video_fragments.append(fragment_path)
352
+
353
+ log_history += f"Fragmento {fragment_num} filmado. Preparando contexto para a próxima cena...\n"
354
+ yield {production_log_output: log_history, video_gallery_glitch: video_fragments}
355
+
356
+ # Prepara as entradas para a PRÓXIMA iteração
357
+ context_frames = extract_context_frames(fragment_path, fragment_num)
358
+ start_media_for_prompt = context_frames # Gemini usará os 3 frames
359
+ previous_media_for_ltx = context_frames[-1] # LTX usará apenas o último frame
360
+
361
+ log_history += "\nFilmagem de todos os fragmentos de transição concluída.\n"
362
+ progress(1.0, desc="Produção Concluída.")
363
+ yield {production_log_output: log_history, video_gallery_glitch: video_fragments, fragment_list_state: video_fragments}
364
+
365
+ def concatenate_and_trim_masterpiece(fragment_paths: list, progress=gr.Progress()):
366
+ # ... (código inalterado) ...
367
+ if not fragment_paths: raise gr.Error("Nenhum fragmento de vídeo para concatenar.")
368
+ progress(0.2, desc="Aparando fragmentos para transições suaves...");
369
+ trimmed_dir = os.path.join(WORKSPACE_DIR, "trimmed"); os.makedirs(trimmed_dir, exist_ok=True)
370
+ paths_for_concat = []
371
+ try:
372
+ for i, path in enumerate(fragment_paths):
373
+ if i == len(fragment_paths) - 1:
374
+ paths_for_concat.append(path)
375
+ continue
376
+
377
+ trimmed_path = os.path.join(trimmed_dir, f"fragment_{i}_trimmed.mp4")
378
+ probe_cmd = f"ffprobe -v error -select_streams v:0 -count_frames -show_entries stream=nb_read_frames -of default=noprint_wrappers=1:nokey=1 \"{path}\""
379
+ result = subprocess.run(probe_cmd, shell=True, check=True, capture_output=True, text=True)
380
+ total_frames = int(result.stdout.strip())
381
+ frames_to_keep = total_frames - CONVERGENCE_FRAMES
382
+ if frames_to_keep <= 0:
383
+ shutil.copyfile(path, trimmed_path)
384
+ paths_for_concat.append(trimmed_path)
385
+ continue
386
+
387
+ trim_cmd = f"ffmpeg -y -v error -i \"{path}\" -vf \"select='lt(n,{frames_to_keep})'\" -c:v libx264 -preset ultrafast -an \"{trimmed_path}\""
388
+ subprocess.run(trim_cmd, shell=True, check=True, capture_output=True, text=True)
389
+ paths_for_concat.append(trimmed_path)
390
+
391
+ progress(0.6, desc="Montando a obra-prima final...")
392
+ list_file_path = os.path.join(WORKSPACE_DIR, "concat_list.txt"); final_output_path = os.path.join(WORKSPACE_DIR, "obra_prima_final.mp4")
393
+ with open(list_file_path, "w") as f:
394
+ for p in paths_for_concat: f.write(f"file '{os.path.abspath(p)}'\n")
395
+ concat_cmd = f"ffmpeg -y -v error -f concat -safe 0 -i \"{list_file_path}\" -c copy \"{final_output_path}\""
396
+ subprocess.run(concat_cmd, shell=True, check=True, capture_output=True, text=True)
397
+ return final_output_path
398
+ except (subprocess.CalledProcessError, ValueError) as e:
399
+ error_message = f"FFmpeg falhou durante a pós-produção (corte e concatenação): {e}"
400
+ if hasattr(e, 'stderr'): error_message += f"\nDetalhes do erro do FFmpeg: {e.stderr}"
401
+ raise gr.Error(error_message)
402
+
403
+
404
+ # --- Ato 5: A Interface com o Mundo ---
405
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
406
+ gr.Markdown("# NOVINHO-4.4 (Piloto de Testes - Vetor de Frames)\n*By Carlex & Gemini & DreamO*")
407
+
408
+ # ... (Interface inalterada) ...
409
+ if os.path.exists(WORKSPACE_DIR): shutil.rmtree(WORKSPACE_DIR)
410
+ os.makedirs(WORKSPACE_DIR)
411
+ Path("examples").mkdir(exist_ok=True)
412
+
413
+ scene_storyboard_state = gr.State([])
414
+ keyframe_images_state = gr.State([])
415
+ fragment_list_state = gr.State([])
416
+ prompt_geral_state = gr.State("")
417
+ processed_ref_path_state = gr.State("")
418
+ visible_references_state = gr.State(0)
419
+
420
+ # --- ETAPA 1: O ROTEIRO ---
421
+ gr.Markdown("--- \n ## ETAPA 1: O ROTEIRO (Sonhador)")
422
+ with gr.Row():
423
+ with gr.Column(scale=1):
424
+ prompt_input = gr.Textbox(label="Ideia Geral (Prompt)")
425
+ num_fragments_input = gr.Slider(2, 10, 4, step=1, label="Número de Cenas")
426
+ image_input = gr.Image(type="filepath", label=f"Imagem de Referência Principal (será {TARGET_RESOLUTION}x{TARGET_RESOLUTION})")
427
+ director_button = gr.Button("▶️ 1. Gerar Roteiro de Cenas", variant="primary")
428
+ with gr.Column(scale=2):
429
+ storyboard_to_show = gr.JSON(label="Roteiro de Cenas Gerado")
430
+
431
+ # --- ETAPA 2: OS KEYFRAMES ---
432
+ gr.Markdown("--- \n ## ETAPA 2: OS KEYFRAMES (Pintor)")
433
+ with gr.Row():
434
+ with gr.Column(scale=2):
435
+ gr.Markdown("### Controles do Pintor (DreamO)")
436
+ gr.Markdown("**Tarefas:** `style` (estilo), `ip` (conteúdo), `id` (identidade).")
437
+ ref_image_inputs, ref_task_inputs, aux_ref_rows = [], [], []
438
+ with gr.Group():
439
+ with gr.Row():
440
+ ref_image_inputs.append(gr.Image(label="Referência Sequencial (Automática)", type="filepath", interactive=False))
441
+ ref_task_inputs.append(gr.Dropdown(choices=["ip", "id", "style"], value="style", label="Tarefa Seq."))
442
+ for i in range(MAX_REFS):
443
+ with gr.Row(visible=False) as ref_row_aux:
444
+ ref_image_inputs.append(gr.Image(label=f"Ref. Auxiliar {i+1}", type="filepath"))
445
+ ref_task_inputs.append(gr.Dropdown(choices=["ip", "id", "style"], value="ip", label=f"Tarefa Aux. {i+1}"))
446
+ aux_ref_rows.append(ref_row_aux)
447
+ with gr.Row():
448
+ add_ref_button = gr.Button("➕ Add Ref.")
449
+ remove_ref_button = gr.Button("➖ Rem. Ref.")
450
+ photographer_button = gr.Button("▶️ 2. Pintar Imagens-Chave", variant="primary")
451
+ with gr.Column(scale=1):
452
+ keyframe_log_output = gr.Textbox(label="Diário de Bordo do Pintor", lines=15, interactive=False)
453
+ keyframe_gallery_output = gr.Gallery(label="Imagens-Chave Pintadas", object_fit="contain", height="auto", type="filepath")
454
+
455
+ # --- ETAPA 3: A PRODUÇÃO ---
456
+ gr.Markdown("--- \n ## ETAPA 3: A PRODUÇÃO (Cineasta e Câmera)")
457
+ with gr.Row():
458
+ with gr.Column(scale=1):
459
+ with gr.Row():
460
+ seed_number = gr.Number(42, label="Seed")
461
+ cfg_slider = gr.Slider(1.0, 10.0, 2.5, step=0.1, label="CFG")
462
+ animator_button = gr.Button("▶️ 3. Produzir Cenas em Vídeo", variant="primary")
463
+ production_log_output = gr.Textbox(label="Diário de Bordo da Produção", lines=10, interactive=False)
464
+ with gr.Column(scale=1):
465
+ video_gallery_glitch = gr.Gallery(label="Fragmentos Gerados (com sobreposição)", object_fit="contain", height="auto", type="video")
466
+
467
+ # --- ETAPA 4: PÓS-PRODUÇÃO ---
468
+ gr.Markdown(f"--- \n ## ETAPA 4: PÓS-PRODUÇÃO (Editor)")
469
+ editor_button = gr.Button("▶️ 4. Montar Vídeo Final", variant="primary")
470
+ final_video_output = gr.Video(label="A Obra-Prima Final", width=TARGET_RESOLUTION)
471
+
472
+ # --- Rodapé Filosófico ---
473
+ gr.Markdown(
474
+ """
475
+ ---
476
+ ### A Arquitetura ADUC-SDR: O Esquema Matemático
477
+ A geração de vídeo é governada por uma função seccional que define como cada fragmento (`V_i`) é criado, operando em dois regimes distintos:
478
+
479
+ ---
480
+ #### **FÓRMULA 1: O FRAGMENTO INICIAL (Gênesis, `i=1`)**
481
+ *Define a criação do primeiro clipe a partir de imagens estáticas.*
482
+
483
+ **Planejamento:** `P_1 = Γ_initial( K_1, K_2, P_geral )`
484
+
485
+ **Execução:** `V_1 = Ψ( { (K_1, F_start), (K_2, F_end) }, P_1 )`
486
+
487
+ ---
488
+ #### **FÓRMULA 2: A CADEIA CAUSAL (Momentum, `i > 1`)**
489
+ *Define a criação dos fragmentos subsequentes, garantindo a continuidade através do "eco".*
490
+
491
+ **Destilação:** `C_(i-1) = Δ(V_(i-1))`
492
+
493
+ **Planejamento:** `P_i = Γ_transition( C_(i-1), K_(i+1), P_geral, H_(i-1) )`
494
+
495
+ **Execução:** `V_i = Ψ( { (C_(i-1), F_start), (K_(i+1), F_end) }, P_i )`
496
+
497
+ ---
498
+ #### **Componentes (O Léxico da Arquitetura):**
499
+ - **`V_i`**: Fragmento de Vídeo
500
+ - **`K_i`**: Keyframe (Imagem Estática)
501
+ - **`C_i`**: "Eco" Causal (Clipe de Vídeo)
502
+ - **`P_i`**: Prompt de Movimento
503
+ - **`P_geral`**: Prompt Geral (Intenção do Diretor)
504
+ - **`H_i`**: Histórico Narrativo
505
+ - **`Γ`**: Cineasta (Gerador de Prompt)
506
+ - **`Ψ`**: Câmera (Gerador de Vídeo)
507
+ - **`Δ`**: Editor (Extrator de "Eco")
508
+ - **`F_start`, `F_end`**: Constantes de Frame (Âncoras Temporais)
509
+ """
510
+ )
511
+
512
+
513
+ # --- Ato 6: A Regência (Lógica de Conexão dos Botões) ---
514
+ def update_reference_visibility(current_count, action):
515
+ if action == "add": new_count = min(MAX_REFS, current_count + 1)
516
+ else: new_count = max(0, current_count - 1)
517
+ return [new_count] + [gr.update(visible=(i < new_count)) for i in range(MAX_REFS)]
518
+
519
+ add_ref_button.click(fn=update_reference_visibility, inputs=[visible_references_state, gr.State("add")], outputs=[visible_references_state] + aux_ref_rows)
520
+ remove_ref_button.click(fn=update_reference_visibility, inputs=[visible_references_state, gr.State("remove")], outputs=[visible_references_state] + aux_ref_rows)
521
+
522
+ director_button.click(
523
+ fn=get_static_scenes_storyboard,
524
+ inputs=[num_fragments_input, prompt_input, image_input],
525
+ outputs=[scene_storyboard_state]
526
+ ).success(
527
+ fn=lambda s, p: (s, p),
528
+ inputs=[scene_storyboard_state, prompt_input],
529
+ outputs=[storyboard_to_show, prompt_geral_state]
530
+ ).success(
531
+ fn=process_image_to_square,
532
+ inputs=[image_input],
533
+ outputs=[processed_ref_path_state]
534
+ ).success(
535
+ fn=lambda p: p,
536
+ inputs=[processed_ref_path_state],
537
+ outputs=[ref_image_inputs[0]]
538
+ )
539
+
540
+ photographer_button.click(
541
+ fn=run_keyframe_generation,
542
+ inputs=[scene_storyboard_state, processed_ref_path_state] + ref_image_inputs + ref_task_inputs,
543
+ outputs=[keyframe_log_output, keyframe_gallery_output, keyframe_images_state]
544
+ )
545
+
546
+ animator_button.click(
547
+ fn=run_video_production,
548
+ inputs=[prompt_geral_state, keyframe_images_state, scene_storyboard_state, seed_number, cfg_slider],
549
+ outputs=[production_log_output, video_gallery_glitch, fragment_list_state]
550
+ )
551
+
552
+ editor_button.click(
553
+ fn=concatenate_and_trim_masterpiece,
554
+ inputs=[fragment_list_state],
555
+ outputs=[final_video_output]
556
+ )
557
+
558
+ if __name__ == "__main__":
559
+ demo.queue().launch(server_name="0.0.0.0", share=True)
appv0.py ADDED
@@ -0,0 +1,633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Euia-AducSdr: Uma implementação aberta e funcional da arquitetura ADUC-SDR para geração de vídeo coerente.
2
+ # Copyright (C) 4 de Agosto de 2025 Carlos Rodrigues dos Santos
3
+ #
4
+ # Contato:
5
+ # Carlos Rodrigues dos Santos
6
7
+ # Rua Eduardo Carlos Pereira, 4125, B1 Ap32, Curitiba, PR, Brazil, CEP 8102025
8
+ #
9
+ # Repositórios e Projetos Relacionados:
10
+ # GitHub: https://github.com/carlex22/Aduc-sdr
11
+ # Hugging Face: https://huggingface.co/spaces/Carlexx/Ltx-SuperTime-60Secondos/
12
+ # Hugging Face: https://huggingface.co/spaces/Carlexxx/Novinho/
13
+ #
14
+ # Este programa é software livre: você pode redistribuí-lo e/ou modificá-lo
15
+ # sob os termos da Licença Pública Geral Affero da GNU como publicada pela
16
+ # Free Software Foundation, seja a versão 3 da Licença, ou
17
+ # (a seu critério) qualquer versão posterior.
18
+ #
19
+ # Este programa é distribuído na esperança de que seja útil,
20
+ # mas SEM QUALQUER GARANTIA; sem mesmo a garantia implícita de
21
+ # COMERCIALIZAÇÃO ou ADEQUAÇÃO A UM DETERMINADO FIM. Consulte a
22
+ # Licença Pública Geral Affero da GNU para mais detalhes.
23
+ #
24
+ # Você deve ter recebido uma cópia da Licença Pública Geral Affero da GNU
25
+ # junto com este programa. Se não, veja <https://www.gnu.org/licenses/>.
26
+
27
+ # --- app.py (NOVINHO-4.0: O Piloto de Testes - Documentação Externa) ---
28
+
29
+ # --- Ato 1: A Convocação da Orquestra (Importações) ---
30
+ import gradio as gr
31
+ import torch
32
+ import os
33
+ import yaml
34
+ from PIL import Image, ImageOps
35
+ import shutil
36
+ import gc
37
+ import subprocess
38
+ import google.generativeai as genai
39
+ import numpy as np
40
+ import imageio
41
+ from pathlib import Path
42
+ import huggingface_hub
43
+ import json
44
+ import time
45
+
46
+ from inference import create_ltx_video_pipeline, load_image_to_tensor_with_resize_and_crop, ConditioningItem, calculate_padding
47
+ from dreamo_helpers import dreamo_generator_singleton
48
+
49
+ # --- Ato 2: A Preparação do Palco (Configurações) ---
50
+ config_file_path = "configs/ltxv-13b-0.9.8-distilled.yaml"
51
+ with open(config_file_path, "r") as file: PIPELINE_CONFIG_YAML = yaml.safe_load(file)
52
+
53
+ LTX_REPO = "Lightricks/LTX-Video"
54
+ models_dir = "downloaded_models_gradio_cpu_init"
55
+ Path(models_dir).mkdir(parents=True, exist_ok=True)
56
+ WORKSPACE_DIR = "aduc_workspace"
57
+ GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
58
+
59
+ VIDEO_FPS = 36
60
+ VIDEO_DURATION_SECONDS = 4
61
+ VIDEO_TOTAL_FRAMES = VIDEO_DURATION_SECONDS * VIDEO_FPS
62
+ CONVERGENCE_FRAMES = 8
63
+ TARGET_RESOLUTION = 720
64
+ MAX_REFS = 4
65
+
66
+ print("Baixando e criando pipelines LTX na CPU...")
67
+ distilled_model_actual_path = huggingface_hub.hf_hub_download(repo_id=LTX_REPO, filename=PIPELINE_CONFIG_YAML["checkpoint_path"], local_dir=models_dir, local_dir_use_symlinks=False)
68
+ pipeline_instance = create_ltx_video_pipeline(
69
+ ckpt_path=distilled_model_actual_path,
70
+ precision=PIPELINE_CONFIG_YAML["precision"],
71
+ text_encoder_model_name_or_path=PIPELINE_CONFIG_YAML["text_encoder_model_name_or_path"],
72
+ sampler=PIPELINE_CONFIG_YAML["sampler"],
73
+ device='cpu'
74
+ )
75
+ print("Modelos LTX prontos (na CPU).")
76
+
77
+
78
+ # --- Ato 3: As Partituras dos Músicos (Funções Corrigidas e Documentadas) ---
79
+
80
+ ####
81
+ # Carrega uma mídia (imagem ou vídeo) e a converte em um tensor Pytorch.
82
+ # Esta função prepara as imagens de condicionamento (início e fim) para o pipeline LTX.
83
+ # Se a entrada for um vídeo, extrai e usa apenas o seu primeiro frame.
84
+ # Args:
85
+ # media_path: O caminho para o arquivo de imagem ou vídeo.
86
+ # height: A altura alvo do tensor.
87
+ # width: A largura alvo do tensor.
88
+ # Returns:
89
+ # Um tensor Pytorch pronto para ser usado como condicionamento.
90
+ ####
91
+ def load_conditioning_tensor(media_path: str, height: int, width: int) -> torch.Tensor:
92
+ if not media_path: raise ValueError("Caminho da mídia de condicionamento não pode ser nulo.")
93
+ lower_path = media_path.lower()
94
+ if lower_path.endswith(('.png', '.jpg', '.jpeg')):
95
+ return load_image_to_tensor_with_resize_and_crop(media_path, height, width)
96
+ elif lower_path.endswith('.mp4'):
97
+ try:
98
+ with imageio.get_reader(media_path) as reader:
99
+ first_frame = reader.get_data(0)
100
+ image = Image.fromarray(first_frame).convert("RGB")
101
+ return load_image_to_tensor_with_resize_and_crop(image, height, width)
102
+ except Exception as e:
103
+ raise gr.Error(f"Falha ao ler o primeiro frame do vídeo '{media_path}': {e}")
104
+ else:
105
+ raise gr.Error(f"Formato de arquivo de condicionamento não suportado: {media_path}")
106
+
107
+ ####
108
+ # Executa o pipeline LTX para gerar um único fragmento de vídeo.
109
+ # Atua como a "Câmera" do sistema. Recebe um ponto de partida e um ponto de
110
+ # chegada (em `conditioning_items_data`), uma instrução de movimento (`motion_prompt`),
111
+ # e renderiza o clipe de vídeo correspondente. Gerencia o uso da GPU.
112
+ # Args:
113
+ # current_fragment_index: O número da cena atual, usado para o nome do arquivo e seed.
114
+ # motion_prompt: A instrução do "Cineasta" (Gemini) sobre como a câmera deve se mover.
115
+ # conditioning_items_data: Lista de tuplas contendo os caminhos das mídias de
116
+ # condicionamento (início e fim), seus frames de início e força.
117
+ # width, height: Dimensões do vídeo.
118
+ # seed, cfg: Parâmetros de geração do LTX.
119
+ # Returns:
120
+ # O caminho para o arquivo de vídeo .mp4 gerado.
121
+ ####
122
+ def run_ltx_animation(current_fragment_index, motion_prompt, conditioning_items_data, width, height, seed, cfg, progress=gr.Progress()):
123
+ progress(0, desc=f"[TECPIX 5000] Filmando Cena {current_fragment_index}...");
124
+ output_path = os.path.join(WORKSPACE_DIR, f"fragment_{current_fragment_index}.mp4"); target_device = 'cuda' if torch.cuda.is_available() else 'cpu'
125
+ try:
126
+ pipeline_instance.to(target_device)
127
+ conditioning_items = []
128
+ for (path, start_frame, strength) in conditioning_items_data:
129
+ tensor = load_conditioning_tensor(path, height, width)
130
+ conditioning_items.append(ConditioningItem(tensor.to(target_device), start_frame, strength))
131
+
132
+ n_val = round((float(VIDEO_TOTAL_FRAMES) - 1.0) / 8.0); actual_num_frames = int(n_val * 8 + 1)
133
+ padded_h, padded_w = ((height - 1) // 32 + 1) * 32, ((width - 1) // 32 + 1) * 32
134
+ padding_vals = calculate_padding(height, width, padded_h, padded_w)
135
+ for cond_item in conditioning_items: cond_item.media_item = torch.nn.functional.pad(cond_item.media_item, padding_vals)
136
+ kwargs = {"prompt": motion_prompt, "negative_prompt": "blurry, distorted, bad quality, artifacts", "height": padded_h, "width": padded_w, "num_frames": actual_num_frames, "frame_rate": VIDEO_FPS, "generator": torch.Generator(device=target_device).manual_seed(int(seed) + current_fragment_index), "output_type": "pt", "guidance_scale": float(cfg), "timesteps": PIPELINE_CONFIG_YAML.get("first_pass", {}).get("timesteps"), "conditioning_items": conditioning_items, "decode_timestep": PIPELINE_CONFIG_YAML.get("decode_timestep"), "decode_noise_scale": PIPELINE_CONFIG_YAML.get("decode_noise_scale"), "stochastic_sampling": PIPELINE_CONFIG_YAML.get("stochastic_sampling"), "image_cond_noise_scale": 0.15, "is_video": True, "vae_per_channel_normalize": True, "mixed_precision": (PIPELINE_CONFIG_YAML.get("precision") == "mixed_precision"), "offload_to_cpu": False, "enhance_prompt": False}
137
+ result_tensor = pipeline_instance(**kwargs).images
138
+ pad_l, pad_r, pad_t, pad_b = map(int, padding_vals); slice_h = -pad_b if pad_b > 0 else None; slice_w = -pad_r if pad_r > 0 else None
139
+ cropped_tensor = result_tensor[:, :, :VIDEO_TOTAL_FRAMES, pad_t:slice_h, pad_l:slice_w]; video_np = (cropped_tensor[0].permute(1, 2, 3, 0).cpu().float().numpy() * 255).astype(np.uint8)
140
+ with imageio.get_writer(output_path, fps=VIDEO_FPS, codec='libx264', quality=8) as writer:
141
+ for i, frame in enumerate(video_np): progress(i / len(video_np), desc=f"Renderizando frame {i+1}/{len(video_np)}..."); writer.append_data(frame)
142
+ return output_path
143
+ finally:
144
+ pipeline_instance.to('cpu'); gc.collect()
145
+ if torch.cuda.is_available(): torch.cuda.empty_cache()
146
+
147
+ ####
148
+ # Processa a imagem de referência inicial do usuário, tornando-a quadrada.
149
+ # Garante que a imagem de referência principal tenha as dimensões corretas
150
+ # (ex: 720x720) antes de ser usada no pipeline, evitando distorções.
151
+ # Args:
152
+ # image_path: Caminho da imagem original.
153
+ # size: A dimensão alvo (altura e largura).
154
+ # Returns:
155
+ # O caminho para a nova imagem quadrada salva no workspace.
156
+ ####
157
+ def process_image_to_square(image_path: str, size: int = TARGET_RESOLUTION) -> str:
158
+ if not image_path or not os.path.exists(image_path): return None
159
+ try:
160
+ img = Image.open(image_path).convert("RGB")
161
+ img_square = ImageOps.fit(img, (size, size), Image.Resampling.LANCZOS)
162
+ output_filename = f"initial_ref_{size}x{size}.png"
163
+ output_path = os.path.join(WORKSPACE_DIR, output_filename)
164
+ img_square.save(output_path)
165
+ return output_path
166
+ except Exception as e: raise gr.Error(f"Falha ao processar a imagem de referência: {e}")
167
+
168
+ ####
169
+ # Gera o roteiro de cenas estáticas (storyboard) usando Gemini.
170
+ # Atua como o "Sonhador" ou "Fotógrafo". Analisa a ideia geral do usuário
171
+ # e a imagem de referência para criar uma sequência de descrições de cenas
172
+ # que formarão a base para a geração dos keyframes.
173
+ # Args:
174
+ # num_fragments: O número de cenas a serem criadas.
175
+ # prompt: A ideia geral da história.
176
+ # initial_image_path: Caminho para a imagem de referência visual.
177
+ # Returns:
178
+ # Uma lista de strings, onde cada string é a descrição de uma cena.
179
+ ####
180
+ def get_static_scenes_storyboard(num_fragments: int, prompt: str, initial_image_path: str):
181
+ if not initial_image_path: raise gr.Error("Por favor, forneça uma imagem de referência inicial.")
182
+ if not GEMINI_API_KEY: raise gr.Error("Chave da API Gemini não configurada!")
183
+ genai.configure(api_key=GEMINI_API_KEY)
184
+ prompt_file = "prompts/photographer_prompt.txt"
185
+ with open(os.path.join(os.path.dirname(__file__), prompt_file), "r", encoding="utf-8") as f: template = f.read()
186
+ director_prompt = template.format(user_prompt=prompt, num_fragments=int(num_fragments))
187
+ model = genai.GenerativeModel('gemini-2.0-flash'); img = Image.open(initial_image_path)
188
+ response = model.generate_content([director_prompt, img])
189
+ try:
190
+ cleaned_response = response.text.strip().replace("```json", "").replace("```", "")
191
+ storyboard_data = json.loads(cleaned_response)
192
+ return storyboard_data.get("scene_storyboard", [])
193
+ except Exception as e: raise gr.Error(f"O Sonhador (Gemini) falhou ao criar o roteiro: {e}. Resposta: {response.text}")
194
+
195
+ ####
196
+ # Gera todas as imagens-chave (keyframes) para a história usando DreamO.
197
+ # Atua como o "Pintor". Itera sobre o roteiro gerado pelo "Sonhador" e pinta
198
+ # uma imagem estática para cada cena. Opera em um modo sequencial:
199
+ # 1. O Keyframe 1 é gerado com base na imagem de referência do usuário.
200
+ # 2. Os Keyframes 2, 3, ... são gerados usando o keyframe anterior como
201
+ # referência principal, garantindo a continuidade visual.
202
+ # Args:
203
+ # storyboard: A lista de prompts de cena do "Sonhador".
204
+ # initial_ref_image_path: O caminho para a imagem de referência principal do usuário.
205
+ # *reference_args: Argumentos variáveis da UI contendo referências auxiliares e suas tarefas.
206
+ # Yields:
207
+ # Dicionários para atualizar a UI (log e galeria) progressivamente.
208
+ ####
209
+ def run_keyframe_generation(storyboard, initial_ref_image_path, *reference_args):
210
+ if not storyboard:
211
+ raise gr.Error("Nenhum roteiro para gerar imagens-chave.")
212
+ if not initial_ref_image_path or not os.path.exists(initial_ref_image_path):
213
+ raise gr.Error("A imagem de referência principal é obrigatória para iniciar a pintura.")
214
+
215
+ num_total_refs = MAX_REFS + 1
216
+ ref_paths = list(reference_args[:num_total_refs])
217
+ ref_tasks = list(reference_args[num_total_refs:])
218
+
219
+ with Image.open(initial_ref_image_path) as img:
220
+ width, height = img.size
221
+ width, height = (width // 32) * 32, (height // 32) * 32
222
+
223
+ keyframe_paths = []
224
+ log_history = ""
225
+
226
+ try:
227
+ dreamo_generator_singleton.to_gpu()
228
+
229
+ log_history += f"Pintando Keyframe Inicial (Cena 1/{len(storyboard)})...\n"
230
+ yield {keyframe_log_output: gr.update(value=log_history), keyframe_gallery_output: gr.update(value=keyframe_paths)}
231
+
232
+ references_for_first_frame = []
233
+ references_for_first_frame.append({'image_np': np.array(Image.open(initial_ref_image_path).convert("RGB")), 'task': 'ip'})
234
+ log_history += f" - Usando imagem de referência principal '{os.path.basename(initial_ref_image_path)}' (Tarefa: ip)\n"
235
+
236
+ for j in range(1, num_total_refs):
237
+ aux_path, aux_task = ref_paths[j], ref_tasks[j]
238
+ if aux_path and os.path.exists(aux_path):
239
+ references_for_first_frame.append({'image_np': np.array(Image.open(aux_path).convert("RGB")), 'task': aux_task})
240
+ log_history += f" - Usando ref. auxiliar: {os.path.basename(aux_path)} (Tarefa: {aux_task})\n"
241
+
242
+ first_prompt = storyboard[0]
243
+ output_path = os.path.join(WORKSPACE_DIR, "keyframe_1.png")
244
+ image = dreamo_generator_singleton.generate_image_with_gpu_management(
245
+ reference_items=references_for_first_frame, prompt=first_prompt, width=width, height=height
246
+ )
247
+ image.save(output_path)
248
+ keyframe_paths.append(output_path)
249
+ current_ref_image_path = output_path
250
+
251
+ for i, prompt in enumerate(storyboard[1:], start=1):
252
+ log_history += f"\nPintando Cena Sequencial {i+1}/{len(storyboard)}...\n"
253
+ yield {keyframe_log_output: gr.update(value=log_history), keyframe_gallery_output: gr.update(value=keyframe_paths)}
254
+
255
+ reference_items_for_dreamo = []
256
+ sequential_ref_task = ref_tasks[0]
257
+ reference_items_for_dreamo.append({'image_np': np.array(Image.open(current_ref_image_path).convert("RGB")), 'task': sequential_ref_task})
258
+ log_history += f" - Usando ref. sequencial: {os.path.basename(current_ref_image_path)} (Tarefa: {sequential_ref_task})\n"
259
+
260
+ for j in range(1, num_total_refs):
261
+ aux_path, aux_task = ref_paths[j], ref_tasks[j]
262
+ if aux_path and os.path.exists(aux_path):
263
+ reference_items_for_dreamo.append({'image_np': np.array(Image.open(aux_path).convert("RGB")), 'task': aux_task})
264
+ log_history += f" - Usando ref. auxiliar: {os.path.basename(aux_path)} (Tarefa: {aux_task})\n"
265
+
266
+ output_path = os.path.join(WORKSPACE_DIR, f"keyframe_{i+1}.png")
267
+ image = dreamo_generator_singleton.generate_image_with_gpu_management(
268
+ reference_items=reference_items_for_dreamo, prompt=prompt, width=width, height=height
269
+ )
270
+ image.save(output_path)
271
+ keyframe_paths.append(output_path)
272
+ current_ref_image_path = output_path
273
+
274
+ except Exception as e:
275
+ raise gr.Error(f"O Pintor (DreamO) encontrou um erro: {e}")
276
+ finally:
277
+ dreamo_generator_singleton.to_cpu()
278
+
279
+ log_history += "\nPintura de todos os keyframes concluída.\n"
280
+ yield {keyframe_log_output: gr.update(value=log_history), keyframe_gallery_output: gr.update(value=keyframe_paths), keyframe_images_state: keyframe_paths}
281
+
282
+ ####
283
+ # Gera um único prompt de movimento para uma transição, usando Gemini.
284
+ # Atua como a "consciência" do Cineasta. Analisa uma mídia de partida e uma
285
+ # imagem de chegada para descrever como a câmera deve se mover.
286
+ # - Lógica Bifocal: Usa um `prompt_filename` para decidir se a transição é
287
+ # de imagem-para-imagem (a primeira) ou de vídeo-para-imagem (as seguintes).
288
+ # - Espera Ativa: Aguarda o arquivo de mídia enviado à API do Google atingir
289
+ # o estado 'ACTIVE' antes de usá-lo, evitando condições de corrida.
290
+ # Args:
291
+ # user_prompt: A ideia geral da história.
292
+ # story_history: O histórico de cenas e movimentos anteriores.
293
+ # start_media_path: O caminho para a mídia de partida (imagem K1 ou vídeo de eco).
294
+ # end_keyframe_path: O caminho para a imagem-alvo (K2, K3, ...).
295
+ # prompt_filename: O nome do arquivo de prompt a ser usado ('...initial.txt' ou '...transition.txt').
296
+ # Returns:
297
+ # Uma string contendo a instrução de movimento (motion prompt).
298
+ ####
299
+ def get_single_motion_prompt(user_prompt: str, story_history: str, start_media_path: str, end_keyframe_path: str, prompt_filename: str):
300
+ if not GEMINI_API_KEY:
301
+ raise gr.Error("Chave da API Gemini não configurada!")
302
+
303
+ uploaded_file = None
304
+ try:
305
+ genai.configure(api_key=GEMINI_API_KEY)
306
+ model = genai.GenerativeModel('gemini-2.0-flash')
307
+
308
+ print(f"Cineasta: Fazendo upload do arquivo de contexto '{start_media_path}'...")
309
+ file_to_upload = genai.upload_file(start_media_path)
310
+
311
+ print(f"Cineasta: Aguardando arquivo '{file_to_upload.name}' ficar ATIVO...")
312
+ timeout_seconds = 180
313
+ start_time = time.time()
314
+
315
+ while file_to_upload.state.name == "PROCESSING":
316
+ if time.time() - start_time > timeout_seconds:
317
+ genai.delete_file(name=file_to_upload.name)
318
+ raise TimeoutError(f"Tempo de espera para o processamento do arquivo '{file_to_upload.name}' excedido.")
319
+
320
+ time.sleep(5)
321
+ file_to_upload = genai.get_file(name=file_to_upload.name)
322
+
323
+ if file_to_upload.state.name != "ACTIVE":
324
+ raise gr.Error(f"O arquivo de mídia '{file_to_upload.name}' não pôde ser processado. Estado final: {file_to_upload.state.name}")
325
+
326
+ print(f"Cineasta: Arquivo '{file_to_upload.name}' está ATIVO. Gerando prompt...")
327
+ uploaded_file = file_to_upload
328
+
329
+ end_media = Image.open(end_keyframe_path)
330
+
331
+ prompt_file_path = os.path.join(os.path.dirname(__file__), "prompts", prompt_filename)
332
+ with open(prompt_file_path, "r", encoding="utf-8") as f:
333
+ template = f.read()
334
+
335
+ director_prompt = template.format(user_prompt=user_prompt, story_history=story_history)
336
+
337
+ model_contents = [director_prompt, uploaded_file, end_media]
338
+ response = model.generate_content(model_contents)
339
+
340
+ cleaned_text = response.text.strip()
341
+ if cleaned_text.startswith("```json"):
342
+ cleaned_text = cleaned_text[len("```json"):].strip()
343
+ if cleaned_text.endswith("```"):
344
+ cleaned_text = cleaned_text[:-len("```")].strip()
345
+
346
+ try:
347
+ motion_data = json.loads(cleaned_text)
348
+ final_prompt = motion_data.get("motion_prompt", "")
349
+ if not final_prompt:
350
+ raise ValueError("Prompt de movimento vazio no JSON.")
351
+ return final_prompt
352
+ except (json.JSONDecodeError, ValueError):
353
+ return cleaned_text.replace("\"", "").replace("{", "").replace("}", "").replace("motion_prompt:", "").strip()
354
+
355
+ except Exception as e:
356
+ response_text = getattr(e, 'text', 'Nenhuma resposta de texto disponível.')
357
+ raise gr.Error(f"O Cineasta (Gemini) falhou ao criar o prompt de movimento: {e}. Resposta: {response_text}")
358
+
359
+ finally:
360
+ if uploaded_file:
361
+ try:
362
+ genai.delete_file(uploaded_file.name)
363
+ except Exception as delete_e:
364
+ print(f"Aviso: Falha ao deletar o arquivo temporário {uploaded_file.name} da API Gemini. Erro: {delete_e}")
365
+
366
+ ####
367
+ # Orquestra a produção de todos os fragmentos de vídeo.
368
+ # Implementa a lógica central da arquitetura ADUC-SDR para a geração de vídeo:
369
+ # 1. Fragmento 1: Gerado a partir da transição do Keyframe 1 para o Keyframe 2.
370
+ # Usa o prompt de animação inicial ('director_motion_prompt.txt').
371
+ # 2. Fragmentos Subsequentes: Gerados a partir do "eco" (clipe de convergência)
372
+ # do vídeo anterior para o próximo keyframe na sequência. Usam o prompt de transição
373
+ # ('director_motion_prompt_transition.txt').
374
+ # O loop executa N-1 vezes para N keyframes, criando todas as transições.
375
+ # Args:
376
+ # prompt_geral: A ideia geral da história.
377
+ # keyframe_image_paths: Lista com os caminhos para todos os keyframes gerados.
378
+ # scene_storyboard: Lista com as descrições de cada cena.
379
+ # seed, cfg: Parâmetros de geração do LTX.
380
+ # Yields:
381
+ # Dicionários para atualizar a UI (log e galeria de vídeos) progressivamente.
382
+ ####
383
+ def run_video_production(prompt_geral, keyframe_image_paths, scene_storyboard, seed, cfg, progress=gr.Progress()):
384
+ if not keyframe_image_paths or len(keyframe_image_paths) < 2:
385
+ raise gr.Error("Pinte pelo menos 2 keyframes na Etapa 2 para produzir as transições.")
386
+
387
+ log_history = "\n--- FASE 3/4: A Câmera e o Cineasta estão filmando em sequência just-in-time...\n"
388
+ yield {production_log_output: log_history, video_gallery_glitch: []}
389
+
390
+ video_fragments = []
391
+
392
+ previous_media_path = keyframe_image_paths[0]
393
+
394
+ story_history = ""
395
+ with Image.open(keyframe_image_paths[0]) as img:
396
+ width, height = img.size
397
+
398
+ num_transitions = len(keyframe_image_paths) - 1
399
+ for i in range(num_transitions):
400
+ start_media_path = previous_media_path
401
+ end_keyframe_path = keyframe_image_paths[i+1]
402
+
403
+ is_first_fragment = (i == 0)
404
+
405
+ fragment_num = i + 1
406
+ progress(i / num_transitions, desc=f"Planejando e Filmando Fragmento {fragment_num}/{num_transitions}")
407
+
408
+ log_history += f"\n--- FRAGMENTO {fragment_num} ---\n"
409
+ log_history += "Cineasta (Gemini) está analisando a cena anterior e a próxima...\n"
410
+ yield {production_log_output: log_history}
411
+
412
+ if is_first_fragment:
413
+ prompt_filename_to_use = "director_motion_prompt.txt"
414
+ story_history = f"A história começa com a transição da cena '{scene_storyboard[0]}' para '{scene_storyboard[1]}'."
415
+ else:
416
+ prompt_filename_to_use = "director_motion_prompt_transition.txt"
417
+ story_history += f"\n- Em seguida, a cena muda de '{scene_storyboard[i]}' para '{scene_storyboard[i+1]}'."
418
+
419
+ current_motion_prompt = get_single_motion_prompt(prompt_geral, story_history, start_media_path, end_keyframe_path, prompt_filename_to_use)
420
+
421
+ log_history += f"Instrução do Cineasta ({prompt_filename_to_use}): '{current_motion_prompt}'\n"
422
+ log_history += f"Filmando transição de '{os.path.basename(start_media_path)}' para '{os.path.basename(end_keyframe_path)}'...\n"
423
+ yield {production_log_output: log_history}
424
+
425
+ end_frame_index = VIDEO_TOTAL_FRAMES - CONVERGENCE_FRAMES
426
+ conditioning_items_data = [(start_media_path, 0, 1.0), (end_keyframe_path, end_frame_index, 1.0)]
427
+
428
+ fragment_path = run_ltx_animation(fragment_num, current_motion_prompt, conditioning_items_data, width, height, seed, cfg, progress)
429
+ video_fragments.append(fragment_path)
430
+
431
+ log_history += f"Fragmento {fragment_num} filmado. Extraindo memória física para a próxima cena...\n"
432
+ yield {production_log_output: log_history, video_gallery_glitch: video_fragments}
433
+
434
+ previous_media_path = extract_final_frames_video(fragment_path, fragment_num, CONVERGENCE_FRAMES)
435
+
436
+ log_history += "\nFilmagem de todos os fragmentos de transição concluída.\n"
437
+ progress(1.0, desc="Produção Concluída.")
438
+ yield {production_log_output: log_history, video_gallery_glitch: video_fragments, fragment_list_state: video_fragments}
439
+
440
+ ####
441
+ # Extrai os últimos N frames de um vídeo para criar o "clipe de convergência" ou "eco".
442
+ # Esta é a etapa de "Destilação" da arquitetura ADUC-SDR. O clipe gerado
443
+ # serve como o ponto de partida (Contexto Causal) para a próxima animação,
444
+ # garantindo a continuidade do movimento. Usa FFmpeg para a extração.
445
+ # Args:
446
+ # input_video_path: O vídeo completo do qual extrair os frames.
447
+ # fragment_index: O número do fragmento, usado para nomear o arquivo de saída.
448
+ # num_frames: O número de frames a serem extraídos do final do vídeo.
449
+ # Returns:
450
+ # O caminho para o clipe de convergência .mp4 gerado.
451
+ ####
452
+ def extract_final_frames_video(input_video_path: str, fragment_index: int, num_frames: int):
453
+ output_video_path = os.path.join(WORKSPACE_DIR, f"convergence_clip_{fragment_index}.mp4")
454
+ if not os.path.exists(input_video_path): raise gr.Error(f"Erro Interno: Vídeo de entrada para extração não encontrado: {input_video_path}")
455
+ try:
456
+ command_probe = f"ffprobe -v error -select_streams v:0 -count_frames -show_entries stream=nb_read_frames -of default=noprint_wrappers=1:nokey=1 \"{input_video_path}\""
457
+ result_probe = subprocess.run(command_probe, shell=True, check=True, capture_output=True, text=True); total_frames = int(result_probe.stdout.strip())
458
+ start_frame_index = total_frames - num_frames
459
+ if start_frame_index < 0:
460
+ shutil.copyfile(input_video_path, output_video_path); return output_video_path
461
+ command_extract = f"ffmpeg -y -v error -i \"{input_video_path}\" -vf \"select='gte(n,{start_frame_index})'\" -c:v libx264 -preset ultrafast -an \"{output_video_path}\""
462
+ subprocess.run(command_extract, shell=True, check=True, capture_output=True, text=True); return output_video_path
463
+ except (subprocess.CalledProcessError, ValueError) as e:
464
+ error_message = f"Editor Mágico (FFmpeg) falhou ao extrair o clipe de convergência: {e}"
465
+ if hasattr(e, 'stderr'): error_message += f"\nDetalhes: {e.stderr}"
466
+ raise gr.Error(error_message)
467
+
468
+ ####
469
+ # Realiza a pós-produção, unindo todos os fragmentos de vídeo em uma obra final.
470
+ # Atua como o "Editor". O processo envolve:
471
+ # 1. Aparar (Trim): Remove os `CONVERGENCE_FRAMES` do final de cada fragmento
472
+ # (exceto o último) para criar transições suaves e sem sobreposição.
473
+ # 2. Concatenar: Une os fragmentos aparados em um único vídeo contínuo.
474
+ # Usa FFmpeg para ambas as operações.
475
+ # Args:
476
+ # fragment_paths: Uma lista contendo os caminhos para todos os fragmentos de vídeo gerados.
477
+ # Returns:
478
+ # O caminho para o vídeo final, "obra_prima_final.mp4".
479
+ ####
480
+ def concatenate_and_trim_masterpiece(fragment_paths: list, progress=gr.Progress()):
481
+ if not fragment_paths: raise gr.Error("Nenhum fragmento de vídeo para concatenar.")
482
+ progress(0.2, desc="Aparando fragmentos para transições suaves...");
483
+ trimmed_dir = os.path.join(WORKSPACE_DIR, "trimmed"); os.makedirs(trimmed_dir, exist_ok=True)
484
+ paths_for_concat = []
485
+ try:
486
+ for i, path in enumerate(fragment_paths):
487
+ if i == len(fragment_paths) - 1:
488
+ paths_for_concat.append(path)
489
+ continue
490
+
491
+ trimmed_path = os.path.join(trimmed_dir, f"fragment_{i}_trimmed.mp4")
492
+ probe_cmd = f"ffprobe -v error -select_streams v:0 -count_frames -show_entries stream=nb_read_frames -of default=noprint_wrappers=1:nokey=1 \"{path}\""
493
+ result = subprocess.run(probe_cmd, shell=True, check=True, capture_output=True, text=True)
494
+ total_frames = int(result.stdout.strip())
495
+ frames_to_keep = total_frames - CONVERGENCE_FRAMES
496
+ if frames_to_keep <= 0:
497
+ shutil.copyfile(path, trimmed_path)
498
+ paths_for_concat.append(trimmed_path)
499
+ continue
500
+
501
+ trim_cmd = f"ffmpeg -y -v error -i \"{path}\" -vf \"select='lt(n,{frames_to_keep})'\" -c:v libx264 -preset ultrafast -an \"{trimmed_path}\""
502
+ subprocess.run(trim_cmd, shell=True, check=True, capture_output=True, text=True)
503
+ paths_for_concat.append(trimmed_path)
504
+
505
+ progress(0.6, desc="Montando a obra-prima final...")
506
+ list_file_path = os.path.join(WORKSPACE_DIR, "concat_list.txt"); final_output_path = os.path.join(WORKSPACE_DIR, "obra_prima_final.mp4")
507
+ with open(list_file_path, "w") as f:
508
+ for p in paths_for_concat: f.write(f"file '{os.path.abspath(p)}'\n")
509
+ concat_cmd = f"ffmpeg -y -v error -f concat -safe 0 -i \"{list_file_path}\" -c copy \"{final_output_path}\""
510
+ subprocess.run(concat_cmd, shell=True, check=True, capture_output=True, text=True)
511
+ return final_output_path
512
+ except (subprocess.CalledProcessError, ValueError) as e:
513
+ error_message = f"FFmpeg falhou durante a pós-produção (corte e concatenação): {e}"
514
+ if hasattr(e, 'stderr'): error_message += f"\nDetalhes do erro do FFmpeg: {e.stderr}"
515
+ raise gr.Error(error_message)
516
+
517
+ # --- Ato 5: A Interface com o Mundo (A UI Restaurada e Aprimorada) ---
518
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
519
+ gr.Markdown("# NOVINHO-4.0 (Piloto de Testes - Documentação Externa)\n*By Carlex & Gemini & DreamO*")
520
+
521
+ if os.path.exists(WORKSPACE_DIR): shutil.rmtree(WORKSPACE_DIR)
522
+ os.makedirs(WORKSPACE_DIR)
523
+ Path("examples").mkdir(exist_ok=True)
524
+
525
+ scene_storyboard_state = gr.State([])
526
+ keyframe_images_state = gr.State([])
527
+ fragment_list_state = gr.State([])
528
+ prompt_geral_state = gr.State("")
529
+ processed_ref_path_state = gr.State("")
530
+ visible_references_state = gr.State(0)
531
+
532
+ # --- ETAPA 1: O ROTEIRO ---
533
+ gr.Markdown("--- \n ## ETAPA 1: O ROTEIRO (Sonhador)")
534
+ with gr.Row():
535
+ with gr.Column(scale=1):
536
+ prompt_input = gr.Textbox(label="Ideia Geral (Prompt)")
537
+ num_fragments_input = gr.Slider(2, 10, 4, step=1, label="Número de Cenas")
538
+ image_input = gr.Image(type="filepath", label=f"Imagem de Referência Principal (será {TARGET_RESOLUTION}x{TARGET_RESOLUTION})")
539
+ director_button = gr.Button("▶️ 1. Gerar Roteiro de Cenas", variant="primary")
540
+ with gr.Column(scale=2):
541
+ storyboard_to_show = gr.JSON(label="Roteiro de Cenas Gerado")
542
+
543
+ # --- ETAPA 2: OS KEYFRAMES ---
544
+ gr.Markdown("--- \n ## ETAPA 2: OS KEYFRAMES (Pintor)")
545
+ with gr.Row():
546
+ with gr.Column(scale=2):
547
+ gr.Markdown("### Controles do Pintor (DreamO)")
548
+ gr.Markdown("**Tarefas:** `style` (estilo), `ip` (conteúdo), `id` (identidade).")
549
+ ref_image_inputs, ref_task_inputs, aux_ref_rows = [], [], []
550
+ with gr.Group():
551
+ with gr.Row():
552
+ ref_image_inputs.append(gr.Image(label="Referência Sequencial (Automática)", type="filepath", interactive=False))
553
+ ref_task_inputs.append(gr.Dropdown(choices=["ip", "id", "style"], value="style", label="Tarefa Seq."))
554
+ for i in range(MAX_REFS):
555
+ with gr.Row(visible=False) as ref_row_aux:
556
+ ref_image_inputs.append(gr.Image(label=f"Ref. Auxiliar {i+1}", type="filepath"))
557
+ ref_task_inputs.append(gr.Dropdown(choices=["ip", "id", "style"], value="ip", label=f"Tarefa Aux. {i+1}"))
558
+ aux_ref_rows.append(ref_row_aux)
559
+ with gr.Row():
560
+ add_ref_button = gr.Button("➕ Add Ref.")
561
+ remove_ref_button = gr.Button("➖ Rem. Ref.")
562
+ photographer_button = gr.Button("▶️ 2. Pintar Imagens-Chave", variant="primary")
563
+ with gr.Column(scale=1):
564
+ keyframe_log_output = gr.Textbox(label="Diário de Bordo do Pintor", lines=15, interactive=False)
565
+ keyframe_gallery_output = gr.Gallery(label="Imagens-Chave Pintadas", object_fit="contain", height="auto", type="filepath")
566
+
567
+ # --- ETAPA 3: A PRODUÇÃO ---
568
+ gr.Markdown("--- \n ## ETAPA 3: A PRODUÇÃO (Cineasta e Câmera)")
569
+ with gr.Row():
570
+ with gr.Column(scale=1):
571
+ with gr.Row():
572
+ seed_number = gr.Number(42, label="Seed")
573
+ cfg_slider = gr.Slider(1.0, 10.0, 2.5, step=0.1, label="CFG")
574
+ animator_button = gr.Button("▶️ 3. Produzir Cenas em Vídeo", variant="primary")
575
+ production_log_output = gr.Textbox(label="Diário de Bordo da Produção", lines=10, interactive=False)
576
+ with gr.Column(scale=1):
577
+ video_gallery_glitch = gr.Gallery(label="Fragmentos Gerados (com sobreposição)", object_fit="contain", height="auto", type="video")
578
+
579
+ # --- ETAPA 4: PÓS-PRODUÇÃO ---
580
+ gr.Markdown(f"--- \n ## ETAPA 4: PÓS-PRODUÇÃO (Editor)")
581
+ editor_button = gr.Button("▶️ 4. Montar Vídeo Final", variant="primary")
582
+ final_video_output = gr.Video(label="A Obra-Prima Final", width=TARGET_RESOLUTION)
583
+
584
+ # --- Rodapé Filosófico ---
585
+ gr.Markdown("--- \n ### A Filosofia por Trás: A Arquitetura ADUC-SDR")
586
+
587
+ # --- Ato 6: A Regência (Lógica de Conexão dos Botões) ---
588
+ def update_reference_visibility(current_count, action):
589
+ if action == "add": new_count = min(MAX_REFS, current_count + 1)
590
+ else: new_count = max(0, current_count - 1)
591
+ return [new_count] + [gr.update(visible=(i < new_count)) for i in range(MAX_REFS)]
592
+
593
+ add_ref_button.click(fn=update_reference_visibility, inputs=[visible_references_state, gr.State("add")], outputs=[visible_references_state] + aux_ref_rows)
594
+ remove_ref_button.click(fn=update_reference_visibility, inputs=[visible_references_state, gr.State("remove")], outputs=[visible_references_state] + aux_ref_rows)
595
+
596
+ director_button.click(
597
+ fn=get_static_scenes_storyboard,
598
+ inputs=[num_fragments_input, prompt_input, image_input],
599
+ outputs=[scene_storyboard_state]
600
+ ).success(
601
+ fn=lambda s, p: (s, p),
602
+ inputs=[scene_storyboard_state, prompt_input],
603
+ outputs=[storyboard_to_show, prompt_geral_state]
604
+ ).success(
605
+ fn=process_image_to_square,
606
+ inputs=[image_input],
607
+ outputs=[processed_ref_path_state]
608
+ ).success(
609
+ fn=lambda p: p,
610
+ inputs=[processed_ref_path_state],
611
+ outputs=[ref_image_inputs[0]]
612
+ )
613
+
614
+ photographer_button.click(
615
+ fn=run_keyframe_generation,
616
+ inputs=[scene_storyboard_state, processed_ref_path_state] + ref_image_inputs + ref_task_inputs,
617
+ outputs=[keyframe_log_output, keyframe_gallery_output, keyframe_images_state]
618
+ )
619
+
620
+ animator_button.click(
621
+ fn=run_video_production,
622
+ inputs=[prompt_geral_state, keyframe_images_state, scene_storyboard_state, seed_number, cfg_slider],
623
+ outputs=[production_log_output, video_gallery_glitch, fragment_list_state]
624
+ )
625
+
626
+ editor_button.click(
627
+ fn=concatenate_and_trim_masterpiece,
628
+ inputs=[fragment_list_state],
629
+ outputs=[final_video_output]
630
+ )
631
+
632
+ if __name__ == "__main__":
633
+ demo.queue().launch(server_name="0.0.0.0", share=True)
configs/ltxv-13b-0.9.7-dev.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: multi-scale
2
+ checkpoint_path: "ltxv-13b-0.9.7-dev.safetensors"
3
+ downscale_factor: 0.6666666
4
+ spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.7.safetensors"
5
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
6
+ decode_timestep: 0.05
7
+ decode_noise_scale: 0.025
8
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
9
+ precision: "bfloat16"
10
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
11
+ prompt_enhancement_words_threshold: 120
12
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
13
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
14
+ stochastic_sampling: false
15
+
16
+ first_pass:
17
+ guidance_scale: [1, 1, 6, 8, 6, 1, 1]
18
+ stg_scale: [0, 0, 4, 4, 4, 2, 1]
19
+ rescaling_scale: [1, 1, 0.5, 0.5, 1, 1, 1]
20
+ guidance_timesteps: [1.0, 0.996, 0.9933, 0.9850, 0.9767, 0.9008, 0.6180]
21
+ skip_block_list: [[], [11, 25, 35, 39], [22, 35, 39], [28], [28], [28], [28]]
22
+ num_inference_steps: 30
23
+ skip_final_inference_steps: 3
24
+ cfg_star_rescale: true
25
+
26
+ second_pass:
27
+ guidance_scale: [1]
28
+ stg_scale: [1]
29
+ rescaling_scale: [1]
30
+ guidance_timesteps: [1.0]
31
+ skip_block_list: [27]
32
+ num_inference_steps: 30
33
+ skip_initial_inference_steps: 17
34
+ cfg_star_rescale: true
configs/ltxv-13b-0.9.7-distilled.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: multi-scale
2
+ checkpoint_path: "ltxv-13b-0.9.7-distilled.safetensors"
3
+ downscale_factor: 0.6666666
4
+ spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.7.safetensors"
5
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
6
+ decode_timestep: 0.05
7
+ decode_noise_scale: 0.025
8
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
9
+ precision: "bfloat16"
10
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
11
+ prompt_enhancement_words_threshold: 120
12
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
13
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
14
+ stochastic_sampling: false
15
+
16
+ first_pass:
17
+ timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250]
18
+ guidance_scale: 1
19
+ stg_scale: 0
20
+ rescaling_scale: 1
21
+ skip_block_list: [42]
22
+
23
+ second_pass:
24
+ timesteps: [0.9094, 0.7250, 0.4219]
25
+ guidance_scale: 1
26
+ stg_scale: 0
27
+ rescaling_scale: 1
28
+ skip_block_list: [42]
configs/ltxv-13b-0.9.8-dev-fp8.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: multi-scale
2
+ checkpoint_path: "ltxv-13b-0.9.8-dev-fp8.safetensors"
3
+ downscale_factor: 0.6666666
4
+ spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors"
5
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
6
+ decode_timestep: 0.05
7
+ decode_noise_scale: 0.025
8
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
9
+ precision: "float8_e4m3fn" # options: "float8_e4m3fn", "bfloat16", "mixed_precision"
10
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
11
+ prompt_enhancement_words_threshold: 120
12
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
13
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
14
+ stochastic_sampling: false
15
+
16
+ first_pass:
17
+ guidance_scale: [1, 1, 6, 8, 6, 1, 1]
18
+ stg_scale: [0, 0, 4, 4, 4, 2, 1]
19
+ rescaling_scale: [1, 1, 0.5, 0.5, 1, 1, 1]
20
+ guidance_timesteps: [1.0, 0.996, 0.9933, 0.9850, 0.9767, 0.9008, 0.6180]
21
+ skip_block_list: [[], [11, 25, 35, 39], [22, 35, 39], [28], [28], [28], [28]]
22
+ num_inference_steps: 30
23
+ skip_final_inference_steps: 3
24
+ cfg_star_rescale: true
25
+
26
+ second_pass:
27
+ guidance_scale: [1]
28
+ stg_scale: [1]
29
+ rescaling_scale: [1]
30
+ guidance_timesteps: [1.0]
31
+ skip_block_list: [27]
32
+ num_inference_steps: 30
33
+ skip_initial_inference_steps: 17
34
+ cfg_star_rescale: true
configs/ltxv-13b-0.9.8-dev.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: multi-scale
2
+ checkpoint_path: "ltxv-13b-0.9.8-dev.safetensors"
3
+ downscale_factor: 0.6666666
4
+ spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors"
5
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
6
+ decode_timestep: 0.05
7
+ decode_noise_scale: 0.025
8
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
9
+ precision: "bfloat16"
10
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
11
+ prompt_enhancement_words_threshold: 120
12
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
13
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
14
+ stochastic_sampling: false
15
+
16
+ first_pass:
17
+ guidance_scale: [1, 1, 6, 8, 6, 1, 1]
18
+ stg_scale: [0, 0, 4, 4, 4, 2, 1]
19
+ rescaling_scale: [1, 1, 0.5, 0.5, 1, 1, 1]
20
+ guidance_timesteps: [1.0, 0.996, 0.9933, 0.9850, 0.9767, 0.9008, 0.6180]
21
+ skip_block_list: [[], [11, 25, 35, 39], [22, 35, 39], [28], [28], [28], [28]]
22
+ num_inference_steps: 30
23
+ skip_final_inference_steps: 3
24
+ cfg_star_rescale: true
25
+
26
+ second_pass:
27
+ guidance_scale: [1]
28
+ stg_scale: [1]
29
+ rescaling_scale: [1]
30
+ guidance_timesteps: [1.0]
31
+ skip_block_list: [27]
32
+ num_inference_steps: 30
33
+ skip_initial_inference_steps: 17
34
+ cfg_star_rescale: true
configs/ltxv-13b-0.9.8-distilled-fp8.yaml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: multi-scale
2
+ checkpoint_path: "ltxv-13b-0.9.8-distilled-fp8.safetensors"
3
+ downscale_factor: 0.6666666
4
+ spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors"
5
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
6
+ decode_timestep: 0.05
7
+ decode_noise_scale: 0.025
8
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
9
+ precision: "float8_e4m3fn" # options: "float8_e4m3fn", "bfloat16", "mixed_precision"
10
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
11
+ prompt_enhancement_words_threshold: 120
12
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
13
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
14
+ stochastic_sampling: false
15
+
16
+ first_pass:
17
+ timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250]
18
+ guidance_scale: 1
19
+ stg_scale: 0
20
+ rescaling_scale: 1
21
+ skip_block_list: [42]
22
+
23
+ second_pass:
24
+ timesteps: [0.9094, 0.7250, 0.4219]
25
+ guidance_scale: 1
26
+ stg_scale: 0
27
+ rescaling_scale: 1
28
+ skip_block_list: [42]
29
+ tone_map_compression_ratio: 0.6
configs/ltxv-13b-0.9.8-distilled.yaml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: multi-scale
2
+ checkpoint_path: "ltxv-13b-0.9.8-distilled.safetensors"
3
+ downscale_factor: 0.6666666
4
+ spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors"
5
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
6
+ decode_timestep: 0.05
7
+ decode_noise_scale: 0.025
8
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
9
+ precision: "bfloat16"
10
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
11
+ prompt_enhancement_words_threshold: 120
12
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
13
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
14
+ stochastic_sampling: false
15
+
16
+ first_pass:
17
+ timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250]
18
+ guidance_scale: 1
19
+ stg_scale: 0
20
+ rescaling_scale: 1
21
+ skip_block_list: [42]
22
+
23
+ second_pass:
24
+ timesteps: [0.9094, 0.7250, 0.4219]
25
+ guidance_scale: 1
26
+ stg_scale: 0
27
+ rescaling_scale: 1
28
+ skip_block_list: [42]
29
+ tone_map_compression_ratio: 0.6
configs/ltxv-2b-0.9.1.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: base
2
+ checkpoint_path: "ltx-video-2b-v0.9.1.safetensors"
3
+ guidance_scale: 3
4
+ stg_scale: 1
5
+ rescaling_scale: 0.7
6
+ skip_block_list: [19]
7
+ num_inference_steps: 40
8
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
9
+ decode_timestep: 0.05
10
+ decode_noise_scale: 0.025
11
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
12
+ precision: "bfloat16"
13
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
14
+ prompt_enhancement_words_threshold: 120
15
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
16
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
17
+ stochastic_sampling: false
configs/ltxv-2b-0.9.5.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: base
2
+ checkpoint_path: "ltx-video-2b-v0.9.5.safetensors"
3
+ guidance_scale: 3
4
+ stg_scale: 1
5
+ rescaling_scale: 0.7
6
+ skip_block_list: [19]
7
+ num_inference_steps: 40
8
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
9
+ decode_timestep: 0.05
10
+ decode_noise_scale: 0.025
11
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
12
+ precision: "bfloat16"
13
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
14
+ prompt_enhancement_words_threshold: 120
15
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
16
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
17
+ stochastic_sampling: false
configs/ltxv-2b-0.9.6-dev.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: base
2
+ checkpoint_path: "ltxv-2b-0.9.6-dev-04-25.safetensors"
3
+ guidance_scale: 3
4
+ stg_scale: 1
5
+ rescaling_scale: 0.7
6
+ skip_block_list: [19]
7
+ num_inference_steps: 40
8
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
9
+ decode_timestep: 0.05
10
+ decode_noise_scale: 0.025
11
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
12
+ precision: "bfloat16"
13
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
14
+ prompt_enhancement_words_threshold: 120
15
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
16
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
17
+ stochastic_sampling: false
configs/ltxv-2b-0.9.6-distilled.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: base
2
+ checkpoint_path: "ltxv-2b-0.9.6-distilled-04-25.safetensors"
3
+ guidance_scale: 1
4
+ stg_scale: 0
5
+ rescaling_scale: 1
6
+ num_inference_steps: 8
7
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
8
+ decode_timestep: 0.05
9
+ decode_noise_scale: 0.025
10
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
11
+ precision: "bfloat16"
12
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
13
+ prompt_enhancement_words_threshold: 120
14
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
15
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
16
+ stochastic_sampling: true
configs/ltxv-2b-0.9.8-distilled-fp8.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: multi-scale
2
+ checkpoint_path: "ltxv-2b-0.9.8-distilled-fp8.safetensors"
3
+ downscale_factor: 0.6666666
4
+ spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors"
5
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
6
+ decode_timestep: 0.05
7
+ decode_noise_scale: 0.025
8
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
9
+ precision: "float8_e4m3fn" # options: "float8_e4m3fn", "bfloat16", "mixed_precision"
10
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
11
+ prompt_enhancement_words_threshold: 120
12
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
13
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
14
+ stochastic_sampling: false
15
+
16
+ first_pass:
17
+ timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250]
18
+ guidance_scale: 1
19
+ stg_scale: 0
20
+ rescaling_scale: 1
21
+ skip_block_list: [42]
22
+
23
+ second_pass:
24
+ timesteps: [0.9094, 0.7250, 0.4219]
25
+ guidance_scale: 1
26
+ stg_scale: 0
27
+ rescaling_scale: 1
28
+ skip_block_list: [42]
configs/ltxv-2b-0.9.8-distilled.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: multi-scale
2
+ checkpoint_path: "ltxv-2b-0.9.8-distilled.safetensors"
3
+ downscale_factor: 0.6666666
4
+ spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors"
5
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
6
+ decode_timestep: 0.05
7
+ decode_noise_scale: 0.025
8
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
9
+ precision: "bfloat16"
10
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
11
+ prompt_enhancement_words_threshold: 120
12
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
13
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
14
+ stochastic_sampling: false
15
+
16
+ first_pass:
17
+ timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250]
18
+ guidance_scale: 1
19
+ stg_scale: 0
20
+ rescaling_scale: 1
21
+ skip_block_list: [42]
22
+
23
+ second_pass:
24
+ timesteps: [0.9094, 0.7250, 0.4219]
25
+ guidance_scale: 1
26
+ stg_scale: 0
27
+ rescaling_scale: 1
28
+ skip_block_list: [42]
configs/ltxv-2b-0.9.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pipeline_type: base
2
+ checkpoint_path: "ltx-video-2b-v0.9.safetensors"
3
+ guidance_scale: 3
4
+ stg_scale: 1
5
+ rescaling_scale: 0.7
6
+ skip_block_list: [19]
7
+ num_inference_steps: 40
8
+ stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block"
9
+ decode_timestep: 0.05
10
+ decode_noise_scale: 0.025
11
+ text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS"
12
+ precision: "bfloat16"
13
+ sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint"
14
+ prompt_enhancement_words_threshold: 120
15
+ prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0"
16
+ prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct"
17
+ stochastic_sampling: false
dreamo/dreamo_pipeline.py ADDED
@@ -0,0 +1,507 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2
+ # Copyright 2024 Black Forest Labs and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import diffusers
19
+ import numpy as np
20
+ import torch
21
+ import torch.nn as nn
22
+ from diffusers import FluxPipeline
23
+ from diffusers.pipelines.flux.pipeline_flux import calculate_shift, retrieve_timesteps
24
+ from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput
25
+ from einops import repeat
26
+ from huggingface_hub import hf_hub_download
27
+ from safetensors.torch import load_file
28
+
29
+ from dreamo.transformer import flux_transformer_forward
30
+ from dreamo.utils import convert_flux_lora_to_diffusers
31
+
32
+ diffusers.models.transformers.transformer_flux.FluxTransformer2DModel.forward = flux_transformer_forward
33
+
34
+
35
+ def get_task_embedding_idx(task):
36
+ return 0
37
+
38
+
39
+ class DreamOPipeline(FluxPipeline):
40
+ def __init__(self, scheduler, vae, text_encoder, tokenizer, text_encoder_2, tokenizer_2, transformer):
41
+ super().__init__(scheduler, vae, text_encoder, tokenizer, text_encoder_2, tokenizer_2, transformer)
42
+ self.t5_embedding = nn.Embedding(10, 4096)
43
+ self.task_embedding = nn.Embedding(2, 3072)
44
+ self.idx_embedding = nn.Embedding(10, 3072)
45
+
46
+ def load_dreamo_model(self, device, use_turbo=True, version='v1.1'):
47
+ # download models and load file
48
+ hf_hub_download(repo_id='ByteDance/DreamO', filename='dreamo.safetensors', local_dir='models')
49
+ hf_hub_download(repo_id='ByteDance/DreamO', filename='dreamo_cfg_distill.safetensors', local_dir='models')
50
+ if version == 'v1':
51
+ hf_hub_download(repo_id='ByteDance/DreamO', filename='dreamo_quality_lora_pos.safetensors',
52
+ local_dir='models')
53
+ hf_hub_download(repo_id='ByteDance/DreamO', filename='dreamo_quality_lora_neg.safetensors',
54
+ local_dir='models')
55
+ quality_lora_pos = load_file('models/dreamo_quality_lora_pos.safetensors')
56
+ quality_lora_neg = load_file('models/dreamo_quality_lora_neg.safetensors')
57
+ elif version == 'v1.1':
58
+ hf_hub_download(repo_id='ByteDance/DreamO', filename='v1.1/dreamo_sft_lora.safetensors', local_dir='models')
59
+ hf_hub_download(repo_id='ByteDance/DreamO', filename='v1.1/dreamo_dpo_lora.safetensors', local_dir='models')
60
+ sft_lora = load_file('models/v1.1/dreamo_sft_lora.safetensors')
61
+ dpo_lora = load_file('models/v1.1/dreamo_dpo_lora.safetensors')
62
+ else:
63
+ raise ValueError(f'there is no {version}')
64
+ dreamo_lora = load_file('models/dreamo.safetensors')
65
+ cfg_distill_lora = load_file('models/dreamo_cfg_distill.safetensors')
66
+
67
+ # load embedding
68
+ self.t5_embedding.weight.data = dreamo_lora.pop('dreamo_t5_embedding.weight')[-10:]
69
+ self.task_embedding.weight.data = dreamo_lora.pop('dreamo_task_embedding.weight')
70
+ self.idx_embedding.weight.data = dreamo_lora.pop('dreamo_idx_embedding.weight')
71
+ self._prepare_t5()
72
+
73
+ # main lora
74
+ dreamo_diffuser_lora = convert_flux_lora_to_diffusers(dreamo_lora)
75
+ adapter_names = ['dreamo']
76
+ adapter_weights = [1]
77
+ self.load_lora_weights(dreamo_diffuser_lora, adapter_name='dreamo')
78
+
79
+ # cfg lora to avoid true image cfg
80
+ cfg_diffuser_lora = convert_flux_lora_to_diffusers(cfg_distill_lora)
81
+ self.load_lora_weights(cfg_diffuser_lora, adapter_name='cfg')
82
+ adapter_names.append('cfg')
83
+ adapter_weights.append(1)
84
+
85
+ # turbo lora to speed up (from 25+ step to 12 step)
86
+ if use_turbo:
87
+ self.load_lora_weights(
88
+ hf_hub_download(
89
+ "alimama-creative/FLUX.1-Turbo-Alpha", "diffusion_pytorch_model.safetensors", local_dir='models'
90
+ ),
91
+ adapter_name='turbo',
92
+ )
93
+ adapter_names.append('turbo')
94
+ adapter_weights.append(1)
95
+
96
+ if version == 'v1':
97
+ # quality loras, one pos, one neg
98
+ quality_lora_pos = convert_flux_lora_to_diffusers(quality_lora_pos)
99
+ self.load_lora_weights(quality_lora_pos, adapter_name='quality_pos')
100
+ adapter_names.append('quality_pos')
101
+ adapter_weights.append(0.15)
102
+ quality_lora_neg = convert_flux_lora_to_diffusers(quality_lora_neg)
103
+ self.load_lora_weights(quality_lora_neg, adapter_name='quality_neg')
104
+ adapter_names.append('quality_neg')
105
+ adapter_weights.append(-0.8)
106
+ elif version == 'v1.1':
107
+ self.load_lora_weights(sft_lora, adapter_name='sft_lora')
108
+ adapter_names.append('sft_lora')
109
+ adapter_weights.append(1)
110
+ self.load_lora_weights(dpo_lora, adapter_name='dpo_lora')
111
+ adapter_names.append('dpo_lora')
112
+ adapter_weights.append(1.25)
113
+
114
+ self.set_adapters(adapter_names, adapter_weights)
115
+ self.fuse_lora(adapter_names=adapter_names, lora_scale=1)
116
+ self.unload_lora_weights()
117
+
118
+ self.t5_embedding = self.t5_embedding.to(device)
119
+ self.task_embedding = self.task_embedding.to(device)
120
+ self.idx_embedding = self.idx_embedding.to(device)
121
+
122
+ def _prepare_t5(self):
123
+ self.text_encoder_2.resize_token_embeddings(len(self.tokenizer_2))
124
+ num_new_token = 10
125
+ new_token_list = [f"[ref#{i}]" for i in range(1, 10)] + ["[res]"]
126
+ self.tokenizer_2.add_tokens(new_token_list, special_tokens=False)
127
+ self.text_encoder_2.resize_token_embeddings(len(self.tokenizer_2))
128
+ input_embedding = self.text_encoder_2.get_input_embeddings().weight.data
129
+ input_embedding[-num_new_token:] = self.t5_embedding.weight.data
130
+
131
+ @staticmethod
132
+ def _prepare_latent_image_ids(batch_size, height, width, device, dtype, start_height=0, start_width=0):
133
+ latent_image_ids = torch.zeros(height // 2, width // 2, 3)
134
+ latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] + start_height
135
+ latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] + start_width
136
+
137
+ latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape
138
+
139
+ latent_image_ids = latent_image_ids[None, :].repeat(batch_size, 1, 1, 1)
140
+ latent_image_ids = latent_image_ids.reshape(
141
+ batch_size, latent_image_id_height * latent_image_id_width, latent_image_id_channels
142
+ )
143
+
144
+ return latent_image_ids.to(device=device, dtype=dtype)
145
+
146
+ @staticmethod
147
+ def _prepare_style_latent_image_ids(batch_size, height, width, device, dtype, start_height=0, start_width=0):
148
+ latent_image_ids = torch.zeros(height // 2, width // 2, 3)
149
+ latent_image_ids[..., 1] = latent_image_ids[..., 1] + start_height
150
+ latent_image_ids[..., 2] = latent_image_ids[..., 2] + start_width
151
+
152
+ latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape
153
+
154
+ latent_image_ids = latent_image_ids[None, :].repeat(batch_size, 1, 1, 1)
155
+ latent_image_ids = latent_image_ids.reshape(
156
+ batch_size, latent_image_id_height * latent_image_id_width, latent_image_id_channels
157
+ )
158
+
159
+ return latent_image_ids.to(device=device, dtype=dtype)
160
+
161
+ @torch.no_grad()
162
+ def __call__(
163
+ self,
164
+ prompt: Union[str, List[str]] = None,
165
+ prompt_2: Optional[Union[str, List[str]]] = None,
166
+ negative_prompt: Union[str, List[str]] = None,
167
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
168
+ true_cfg_scale: float = 1.0,
169
+ true_cfg_start_step: int = 1,
170
+ true_cfg_end_step: int = 1,
171
+ height: Optional[int] = None,
172
+ width: Optional[int] = None,
173
+ num_inference_steps: int = 28,
174
+ sigmas: Optional[List[float]] = None,
175
+ guidance_scale: float = 3.5,
176
+ neg_guidance_scale: float = 3.5,
177
+ num_images_per_prompt: Optional[int] = 1,
178
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
179
+ latents: Optional[torch.FloatTensor] = None,
180
+ prompt_embeds: Optional[torch.FloatTensor] = None,
181
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
182
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
183
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
184
+ output_type: Optional[str] = "pil",
185
+ return_dict: bool = True,
186
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
187
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
188
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
189
+ max_sequence_length: int = 512,
190
+ ref_conds=None,
191
+ first_step_guidance_scale=3.5,
192
+ ):
193
+ r"""
194
+ Function invoked when calling the pipeline for generation.
195
+
196
+ Args:
197
+ prompt (`str` or `List[str]`, *optional*):
198
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
199
+ instead.
200
+ prompt_2 (`str` or `List[str]`, *optional*):
201
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
202
+ will be used instead.
203
+ negative_prompt (`str` or `List[str]`, *optional*):
204
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
205
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is
206
+ not greater than `1`).
207
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
208
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
209
+ `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders.
210
+ true_cfg_scale (`float`, *optional*, defaults to 1.0):
211
+ When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance.
212
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
213
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
214
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
215
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
216
+ num_inference_steps (`int`, *optional*, defaults to 50):
217
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
218
+ expense of slower inference.
219
+ sigmas (`List[float]`, *optional*):
220
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
221
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
222
+ will be used.
223
+ guidance_scale (`float`, *optional*, defaults to 3.5):
224
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
225
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
226
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
227
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
228
+ usually at the expense of lower image quality.
229
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
230
+ The number of images to generate per prompt.
231
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
232
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
233
+ to make generation deterministic.
234
+ latents (`torch.FloatTensor`, *optional*):
235
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
236
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
237
+ tensor will ge generated by sampling using the supplied random `generator`.
238
+ prompt_embeds (`torch.FloatTensor`, *optional*):
239
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
240
+ provided, text embeddings will be generated from `prompt` input argument.
241
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
242
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
243
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
244
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
245
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
246
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
247
+ argument.
248
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
249
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
250
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
251
+ input argument.
252
+ output_type (`str`, *optional*, defaults to `"pil"`):
253
+ The output format of the generate image. Choose between
254
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
255
+ return_dict (`bool`, *optional*, defaults to `True`):
256
+ Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple.
257
+ joint_attention_kwargs (`dict`, *optional*):
258
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
259
+ `self.processor` in
260
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
261
+ callback_on_step_end (`Callable`, *optional*):
262
+ A function that calls at the end of each denoising steps during the inference. The function is called
263
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
264
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
265
+ `callback_on_step_end_tensor_inputs`.
266
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
267
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
268
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
269
+ `._callback_tensor_inputs` attribute of your pipeline class.
270
+ max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`.
271
+
272
+ Examples:
273
+
274
+ Returns:
275
+ [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict`
276
+ is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated
277
+ images.
278
+ """
279
+
280
+ height = height or self.default_sample_size * self.vae_scale_factor
281
+ width = width or self.default_sample_size * self.vae_scale_factor
282
+
283
+ # 1. Check inputs. Raise error if not correct
284
+ self.check_inputs(
285
+ prompt,
286
+ prompt_2,
287
+ height,
288
+ width,
289
+ prompt_embeds=prompt_embeds,
290
+ pooled_prompt_embeds=pooled_prompt_embeds,
291
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
292
+ max_sequence_length=max_sequence_length,
293
+ )
294
+
295
+ self._guidance_scale = guidance_scale
296
+ self._joint_attention_kwargs = joint_attention_kwargs
297
+ self._current_timestep = None
298
+ self._interrupt = False
299
+
300
+ # 2. Define call parameters
301
+ if prompt is not None and isinstance(prompt, str):
302
+ batch_size = 1
303
+ elif prompt is not None and isinstance(prompt, list):
304
+ batch_size = len(prompt)
305
+ else:
306
+ batch_size = prompt_embeds.shape[0]
307
+
308
+ device = self._execution_device
309
+
310
+ lora_scale = (
311
+ self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
312
+ )
313
+ has_neg_prompt = negative_prompt is not None or (
314
+ negative_prompt_embeds is not None and negative_pooled_prompt_embeds is not None
315
+ )
316
+ do_true_cfg = true_cfg_scale > 1 and has_neg_prompt
317
+ (
318
+ prompt_embeds,
319
+ pooled_prompt_embeds,
320
+ text_ids,
321
+ ) = self.encode_prompt(
322
+ prompt=prompt,
323
+ prompt_2=prompt_2,
324
+ prompt_embeds=prompt_embeds,
325
+ pooled_prompt_embeds=pooled_prompt_embeds,
326
+ device=device,
327
+ num_images_per_prompt=num_images_per_prompt,
328
+ max_sequence_length=max_sequence_length,
329
+ lora_scale=lora_scale,
330
+ )
331
+ if do_true_cfg:
332
+ (
333
+ negative_prompt_embeds,
334
+ negative_pooled_prompt_embeds,
335
+ _,
336
+ ) = self.encode_prompt(
337
+ prompt=negative_prompt,
338
+ prompt_2=negative_prompt_2,
339
+ prompt_embeds=negative_prompt_embeds,
340
+ pooled_prompt_embeds=negative_pooled_prompt_embeds,
341
+ device=device,
342
+ num_images_per_prompt=num_images_per_prompt,
343
+ max_sequence_length=max_sequence_length,
344
+ lora_scale=lora_scale,
345
+ )
346
+
347
+ # 4. Prepare latent variables
348
+ num_channels_latents = self.transformer.config.in_channels // 4
349
+ latents, latent_image_ids = self.prepare_latents(
350
+ batch_size * num_images_per_prompt,
351
+ num_channels_latents,
352
+ height,
353
+ width,
354
+ prompt_embeds.dtype,
355
+ device,
356
+ generator,
357
+ latents,
358
+ )
359
+
360
+ # 4.1 concat ref tokens to latent
361
+ origin_img_len = latents.shape[1]
362
+ embeddings = repeat(self.task_embedding.weight[1], "c -> n l c", n=batch_size, l=origin_img_len)
363
+ ref_latents = []
364
+ ref_latent_image_idss = []
365
+ start_height = height // 16
366
+ start_width = width // 16
367
+ for ref_cond in ref_conds:
368
+ img = ref_cond['img'] # [b, 3, h, w], range [-1, 1]
369
+ task = ref_cond['task']
370
+ idx = ref_cond['idx']
371
+
372
+ # encode ref with VAE
373
+ img = img.to(latents)
374
+ ref_latent = self.vae.encode(img).latent_dist.sample()
375
+ ref_latent = (ref_latent - self.vae.config.shift_factor) * self.vae.config.scaling_factor
376
+ cur_height = ref_latent.shape[2]
377
+ cur_width = ref_latent.shape[3]
378
+ ref_latent = self._pack_latents(ref_latent, batch_size, num_channels_latents, cur_height, cur_width)
379
+ ref_latent_image_ids = self._prepare_latent_image_ids(
380
+ batch_size, cur_height, cur_width, device, prompt_embeds.dtype, start_height, start_width
381
+ )
382
+ start_height += cur_height // 2
383
+ start_width += cur_width // 2
384
+
385
+ # prepare task_idx_embedding
386
+ task_idx = get_task_embedding_idx(task)
387
+ cur_task_embedding = repeat(
388
+ self.task_embedding.weight[task_idx], "c -> n l c", n=batch_size, l=ref_latent.shape[1]
389
+ )
390
+ cur_idx_embedding = repeat(
391
+ self.idx_embedding.weight[idx], "c -> n l c", n=batch_size, l=ref_latent.shape[1]
392
+ )
393
+ cur_embedding = cur_task_embedding + cur_idx_embedding
394
+
395
+ # concat ref to latent
396
+ embeddings = torch.cat([embeddings, cur_embedding], dim=1)
397
+ ref_latents.append(ref_latent)
398
+ ref_latent_image_idss.append(ref_latent_image_ids)
399
+
400
+ # 5. Prepare timesteps
401
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas
402
+ image_seq_len = latents.shape[1]
403
+ mu = calculate_shift(
404
+ image_seq_len,
405
+ self.scheduler.config.get("base_image_seq_len", 256),
406
+ self.scheduler.config.get("max_image_seq_len", 4096),
407
+ self.scheduler.config.get("base_shift", 0.5),
408
+ self.scheduler.config.get("max_shift", 1.15),
409
+ )
410
+ timesteps, num_inference_steps = retrieve_timesteps(
411
+ self.scheduler,
412
+ num_inference_steps,
413
+ device,
414
+ sigmas=sigmas,
415
+ mu=mu,
416
+ )
417
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
418
+ self._num_timesteps = len(timesteps)
419
+
420
+ # handle guidance
421
+ if self.transformer.config.guidance_embeds:
422
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32)
423
+ guidance = guidance.expand(latents.shape[0])
424
+ else:
425
+ guidance = None
426
+ neg_guidance = torch.full([1], neg_guidance_scale, device=device, dtype=torch.float32)
427
+ neg_guidance = neg_guidance.expand(latents.shape[0])
428
+ first_step_guidance = torch.full([1], first_step_guidance_scale, device=device, dtype=torch.float32)
429
+
430
+ if self.joint_attention_kwargs is None:
431
+ self._joint_attention_kwargs = {}
432
+
433
+ # 6. Denoising loop
434
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
435
+ for i, t in enumerate(timesteps):
436
+ if self.interrupt:
437
+ continue
438
+
439
+ self._current_timestep = t
440
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
441
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
442
+
443
+ noise_pred = self.transformer(
444
+ hidden_states=torch.cat((latents, *ref_latents), dim=1),
445
+ timestep=timestep / 1000,
446
+ guidance=guidance if i > 0 else first_step_guidance,
447
+ pooled_projections=pooled_prompt_embeds,
448
+ encoder_hidden_states=prompt_embeds,
449
+ txt_ids=text_ids,
450
+ img_ids=torch.cat((latent_image_ids, *ref_latent_image_idss), dim=1),
451
+ joint_attention_kwargs=self.joint_attention_kwargs,
452
+ return_dict=False,
453
+ embeddings=embeddings,
454
+ )[0][:, :origin_img_len]
455
+
456
+ if do_true_cfg and i >= true_cfg_start_step and i < true_cfg_end_step:
457
+ neg_noise_pred = self.transformer(
458
+ hidden_states=latents,
459
+ timestep=timestep / 1000,
460
+ guidance=neg_guidance,
461
+ pooled_projections=negative_pooled_prompt_embeds,
462
+ encoder_hidden_states=negative_prompt_embeds,
463
+ txt_ids=text_ids,
464
+ img_ids=latent_image_ids,
465
+ joint_attention_kwargs=self.joint_attention_kwargs,
466
+ return_dict=False,
467
+ )[0]
468
+ noise_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred)
469
+
470
+ # compute the previous noisy sample x_t -> x_t-1
471
+ latents_dtype = latents.dtype
472
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
473
+
474
+ if latents.dtype != latents_dtype and torch.backends.mps.is_available():
475
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
476
+ latents = latents.to(latents_dtype)
477
+
478
+ if callback_on_step_end is not None:
479
+ callback_kwargs = {}
480
+ for k in callback_on_step_end_tensor_inputs:
481
+ callback_kwargs[k] = locals()[k]
482
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
483
+
484
+ latents = callback_outputs.pop("latents", latents)
485
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
486
+
487
+ # call the callback, if provided
488
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
489
+ progress_bar.update()
490
+
491
+ self._current_timestep = None
492
+
493
+ if output_type == "latent":
494
+ image = latents
495
+ else:
496
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
497
+ latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
498
+ image = self.vae.decode(latents, return_dict=False)[0]
499
+ image = self.image_processor.postprocess(image, output_type=output_type)
500
+
501
+ # Offload all models
502
+ self.maybe_free_model_hooks()
503
+
504
+ if not return_dict:
505
+ return (image,)
506
+
507
+ return FluxPipelineOutput(images=image)
dreamo/transformer.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2
+ # Copyright 2024 Black Forest Labs and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Any, Dict, Optional, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+ from diffusers.models.modeling_outputs import Transformer2DModelOutput
21
+ from diffusers.utils import (
22
+ USE_PEFT_BACKEND,
23
+ logging,
24
+ scale_lora_layers,
25
+ unscale_lora_layers,
26
+ )
27
+
28
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
29
+
30
+
31
+ def flux_transformer_forward(
32
+ self,
33
+ hidden_states: torch.Tensor,
34
+ encoder_hidden_states: torch.Tensor = None,
35
+ pooled_projections: torch.Tensor = None,
36
+ timestep: torch.LongTensor = None,
37
+ img_ids: torch.Tensor = None,
38
+ txt_ids: torch.Tensor = None,
39
+ guidance: torch.Tensor = None,
40
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
41
+ controlnet_block_samples=None,
42
+ controlnet_single_block_samples=None,
43
+ return_dict: bool = True,
44
+ controlnet_blocks_repeat: bool = False,
45
+ embeddings: torch.Tensor = None,
46
+ ) -> Union[torch.Tensor, Transformer2DModelOutput]:
47
+ """
48
+ The [`FluxTransformer2DModel`] forward method.
49
+
50
+ Args:
51
+ hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`):
52
+ Input `hidden_states`.
53
+ encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`):
54
+ Conditional embeddings (embeddings computed from the input conditions such as prompts) to use.
55
+ pooled_projections (`torch.Tensor` of shape `(batch_size, projection_dim)`): Embeddings projected
56
+ from the embeddings of input conditions.
57
+ timestep ( `torch.LongTensor`):
58
+ Used to indicate denoising step.
59
+ block_controlnet_hidden_states: (`list` of `torch.Tensor`):
60
+ A list of tensors that if specified are added to the residuals of transformer blocks.
61
+ joint_attention_kwargs (`dict`, *optional*):
62
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
63
+ `self.processor` in
64
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
65
+ return_dict (`bool`, *optional*, defaults to `True`):
66
+ Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain
67
+ tuple.
68
+
69
+ Returns:
70
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
71
+ `tuple` where the first element is the sample tensor.
72
+ """
73
+ if joint_attention_kwargs is not None:
74
+ joint_attention_kwargs = joint_attention_kwargs.copy()
75
+ lora_scale = joint_attention_kwargs.pop("scale", 1.0)
76
+ else:
77
+ lora_scale = 1.0
78
+
79
+ if USE_PEFT_BACKEND:
80
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
81
+ scale_lora_layers(self, lora_scale)
82
+ else:
83
+ if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
84
+ logger.warning(
85
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
86
+ )
87
+
88
+ hidden_states = self.x_embedder(hidden_states)
89
+ # add task and idx embedding
90
+ if embeddings is not None:
91
+ hidden_states = hidden_states + embeddings
92
+
93
+ timestep = timestep.to(hidden_states.dtype) * 1000
94
+ guidance = guidance.to(hidden_states.dtype) * 1000 if guidance is not None else None
95
+
96
+ temb = (
97
+ self.time_text_embed(timestep, pooled_projections)
98
+ if guidance is None
99
+ else self.time_text_embed(timestep, guidance, pooled_projections)
100
+ )
101
+ encoder_hidden_states = self.context_embedder(encoder_hidden_states)
102
+
103
+ if txt_ids.ndim == 3:
104
+ # logger.warning(
105
+ # "Passing `txt_ids` 3d torch.Tensor is deprecated."
106
+ # "Please remove the batch dimension and pass it as a 2d torch Tensor"
107
+ # )
108
+ txt_ids = txt_ids[0]
109
+ if img_ids.ndim == 3:
110
+ # logger.warning(
111
+ # "Passing `img_ids` 3d torch.Tensor is deprecated."
112
+ # "Please remove the batch dimension and pass it as a 2d torch Tensor"
113
+ # )
114
+ img_ids = img_ids[0]
115
+
116
+ ids = torch.cat((txt_ids, img_ids), dim=0)
117
+ image_rotary_emb = self.pos_embed(ids)
118
+
119
+ for index_block, block in enumerate(self.transformer_blocks):
120
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
121
+ encoder_hidden_states, hidden_states = self._gradient_checkpointing_func(
122
+ block,
123
+ hidden_states,
124
+ encoder_hidden_states,
125
+ temb,
126
+ image_rotary_emb,
127
+ )
128
+
129
+ else:
130
+ encoder_hidden_states, hidden_states = block(
131
+ hidden_states=hidden_states,
132
+ encoder_hidden_states=encoder_hidden_states,
133
+ temb=temb,
134
+ image_rotary_emb=image_rotary_emb,
135
+ joint_attention_kwargs=joint_attention_kwargs,
136
+ )
137
+
138
+ # controlnet residual
139
+ if controlnet_block_samples is not None:
140
+ interval_control = len(self.transformer_blocks) / len(controlnet_block_samples)
141
+ interval_control = int(np.ceil(interval_control))
142
+ # For Xlabs ControlNet.
143
+ if controlnet_blocks_repeat:
144
+ hidden_states = hidden_states + controlnet_block_samples[index_block % len(controlnet_block_samples)]
145
+ else:
146
+ hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control]
147
+ hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
148
+
149
+ for index_block, block in enumerate(self.single_transformer_blocks):
150
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
151
+ hidden_states = self._gradient_checkpointing_func(
152
+ block,
153
+ hidden_states,
154
+ temb,
155
+ image_rotary_emb,
156
+ )
157
+
158
+ else:
159
+ hidden_states = block(
160
+ hidden_states=hidden_states,
161
+ temb=temb,
162
+ image_rotary_emb=image_rotary_emb,
163
+ joint_attention_kwargs=joint_attention_kwargs,
164
+ )
165
+
166
+ # controlnet residual
167
+ if controlnet_single_block_samples is not None:
168
+ interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples)
169
+ interval_control = int(np.ceil(interval_control))
170
+ hidden_states[:, encoder_hidden_states.shape[1] :, ...] = (
171
+ hidden_states[:, encoder_hidden_states.shape[1] :, ...]
172
+ + controlnet_single_block_samples[index_block // interval_control]
173
+ )
174
+
175
+ hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :, ...]
176
+
177
+ hidden_states = self.norm_out(hidden_states, temb)
178
+ output = self.proj_out(hidden_states)
179
+
180
+ if USE_PEFT_BACKEND:
181
+ # remove `lora_scale` from each PEFT layer
182
+ unscale_lora_layers(self, lora_scale)
183
+
184
+ if not return_dict:
185
+ return (output,)
186
+
187
+ return Transformer2DModelOutput(sample=output)
dreamo/utils.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ import re
17
+
18
+ import cv2
19
+ import numpy as np
20
+ import torch
21
+ from torchvision.utils import make_grid
22
+
23
+
24
+ # from basicsr
25
+ def img2tensor(imgs, bgr2rgb=True, float32=True):
26
+ """Numpy array to tensor.
27
+
28
+ Args:
29
+ imgs (list[ndarray] | ndarray): Input images.
30
+ bgr2rgb (bool): Whether to change bgr to rgb.
31
+ float32 (bool): Whether to change to float32.
32
+
33
+ Returns:
34
+ list[tensor] | tensor: Tensor images. If returned results only have
35
+ one element, just return tensor.
36
+ """
37
+
38
+ def _totensor(img, bgr2rgb, float32):
39
+ if img.shape[2] == 3 and bgr2rgb:
40
+ if img.dtype == 'float64':
41
+ img = img.astype('float32')
42
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
43
+ img = torch.from_numpy(img.transpose(2, 0, 1))
44
+ if float32:
45
+ img = img.float()
46
+ return img
47
+
48
+ if isinstance(imgs, list):
49
+ return [_totensor(img, bgr2rgb, float32) for img in imgs]
50
+ return _totensor(imgs, bgr2rgb, float32)
51
+
52
+
53
+ def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
54
+ """Convert torch Tensors into image numpy arrays.
55
+
56
+ After clamping to [min, max], values will be normalized to [0, 1].
57
+
58
+ Args:
59
+ tensor (Tensor or list[Tensor]): Accept shapes:
60
+ 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);
61
+ 2) 3D Tensor of shape (3/1 x H x W);
62
+ 3) 2D Tensor of shape (H x W).
63
+ Tensor channel should be in RGB order.
64
+ rgb2bgr (bool): Whether to change rgb to bgr.
65
+ out_type (numpy type): output types. If ``np.uint8``, transform outputs
66
+ to uint8 type with range [0, 255]; otherwise, float type with
67
+ range [0, 1]. Default: ``np.uint8``.
68
+ min_max (tuple[int]): min and max values for clamp.
69
+
70
+ Returns:
71
+ (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of
72
+ shape (H x W). The channel order is BGR.
73
+ """
74
+ if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
75
+ raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')
76
+
77
+ if torch.is_tensor(tensor):
78
+ tensor = [tensor]
79
+ result = []
80
+ for _tensor in tensor:
81
+ _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
82
+ _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
83
+
84
+ n_dim = _tensor.dim()
85
+ if n_dim == 4:
86
+ img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()
87
+ img_np = img_np.transpose(1, 2, 0)
88
+ if rgb2bgr:
89
+ img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
90
+ elif n_dim == 3:
91
+ img_np = _tensor.numpy()
92
+ img_np = img_np.transpose(1, 2, 0)
93
+ if img_np.shape[2] == 1: # gray image
94
+ img_np = np.squeeze(img_np, axis=2)
95
+ else:
96
+ if rgb2bgr:
97
+ img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
98
+ elif n_dim == 2:
99
+ img_np = _tensor.numpy()
100
+ else:
101
+ raise TypeError(f'Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}')
102
+ if out_type == np.uint8:
103
+ # Unlike MATLAB, numpy.unit8() WILL NOT round by default.
104
+ img_np = (img_np * 255.0).round()
105
+ img_np = img_np.astype(out_type)
106
+ result.append(img_np)
107
+ if len(result) == 1:
108
+ result = result[0]
109
+ return result
110
+
111
+
112
+ def resize_numpy_image_area(image, area=512 * 512):
113
+ h, w = image.shape[:2]
114
+ k = math.sqrt(area / (h * w))
115
+ h = int(h * k) - (int(h * k) % 16)
116
+ w = int(w * k) - (int(w * k) % 16)
117
+ image = cv2.resize(image, (w, h), interpolation=cv2.INTER_AREA)
118
+ return image
119
+
120
+ def resize_numpy_image_long(image, long_edge=768):
121
+ h, w = image.shape[:2]
122
+ if max(h, w) <= long_edge:
123
+ return image
124
+ k = long_edge / max(h, w)
125
+ h = int(h * k)
126
+ w = int(w * k)
127
+ image = cv2.resize(image, (w, h), interpolation=cv2.INTER_AREA)
128
+ return image
129
+
130
+
131
+ # reference: https://github.com/huggingface/diffusers/pull/9295/files
132
+ def convert_flux_lora_to_diffusers(old_state_dict):
133
+ new_state_dict = {}
134
+ orig_keys = list(old_state_dict.keys())
135
+
136
+ def handle_qkv(sds_sd, ait_sd, sds_key, ait_keys, dims=None):
137
+ down_weight = sds_sd.pop(sds_key)
138
+ up_weight = sds_sd.pop(sds_key.replace(".down.weight", ".up.weight"))
139
+
140
+ # calculate dims if not provided
141
+ num_splits = len(ait_keys)
142
+ if dims is None:
143
+ dims = [up_weight.shape[0] // num_splits] * num_splits
144
+ else:
145
+ assert sum(dims) == up_weight.shape[0]
146
+
147
+ # make ai-toolkit weight
148
+ ait_down_keys = [k + ".lora_A.weight" for k in ait_keys]
149
+ ait_up_keys = [k + ".lora_B.weight" for k in ait_keys]
150
+
151
+ # down_weight is copied to each split
152
+ ait_sd.update({k: down_weight for k in ait_down_keys})
153
+
154
+ # up_weight is split to each split
155
+ ait_sd.update({k: v for k, v in zip(ait_up_keys, torch.split(up_weight, dims, dim=0))}) # noqa: C416
156
+
157
+ for old_key in orig_keys:
158
+ # Handle double_blocks
159
+ if 'double_blocks' in old_key:
160
+ block_num = re.search(r"double_blocks_(\d+)", old_key).group(1)
161
+ new_key = f"transformer.transformer_blocks.{block_num}"
162
+
163
+ if "proj_lora1" in old_key:
164
+ new_key += ".attn.to_out.0"
165
+ elif "proj_lora2" in old_key:
166
+ new_key += ".attn.to_add_out"
167
+ elif "qkv_lora2" in old_key and "up" not in old_key:
168
+ handle_qkv(
169
+ old_state_dict,
170
+ new_state_dict,
171
+ old_key,
172
+ [
173
+ f"transformer.transformer_blocks.{block_num}.attn.add_q_proj",
174
+ f"transformer.transformer_blocks.{block_num}.attn.add_k_proj",
175
+ f"transformer.transformer_blocks.{block_num}.attn.add_v_proj",
176
+ ],
177
+ )
178
+ # continue
179
+ elif "qkv_lora1" in old_key and "up" not in old_key:
180
+ handle_qkv(
181
+ old_state_dict,
182
+ new_state_dict,
183
+ old_key,
184
+ [
185
+ f"transformer.transformer_blocks.{block_num}.attn.to_q",
186
+ f"transformer.transformer_blocks.{block_num}.attn.to_k",
187
+ f"transformer.transformer_blocks.{block_num}.attn.to_v",
188
+ ],
189
+ )
190
+ # continue
191
+
192
+ if "down" in old_key:
193
+ new_key += ".lora_A.weight"
194
+ elif "up" in old_key:
195
+ new_key += ".lora_B.weight"
196
+
197
+ # Handle single_blocks
198
+ elif 'single_blocks' in old_key:
199
+ block_num = re.search(r"single_blocks_(\d+)", old_key).group(1)
200
+ new_key = f"transformer.single_transformer_blocks.{block_num}"
201
+
202
+ if "proj_lora" in old_key:
203
+ new_key += ".proj_out"
204
+ elif "qkv_lora" in old_key and "up" not in old_key:
205
+ handle_qkv(
206
+ old_state_dict,
207
+ new_state_dict,
208
+ old_key,
209
+ [
210
+ f"transformer.single_transformer_blocks.{block_num}.attn.to_q",
211
+ f"transformer.single_transformer_blocks.{block_num}.attn.to_k",
212
+ f"transformer.single_transformer_blocks.{block_num}.attn.to_v",
213
+ ],
214
+ )
215
+
216
+ if "down" in old_key:
217
+ new_key += ".lora_A.weight"
218
+ elif "up" in old_key:
219
+ new_key += ".lora_B.weight"
220
+
221
+ else:
222
+ # Handle other potential key patterns here
223
+ new_key = old_key
224
+
225
+ # Since we already handle qkv above.
226
+ if "qkv" not in old_key and 'embedding' not in old_key:
227
+ new_state_dict[new_key] = old_state_dict.pop(old_key)
228
+
229
+ # if len(old_state_dict) > 0:
230
+ # raise ValueError(f"`old_state_dict` should be at this point but has: {list(old_state_dict.keys())}.")
231
+
232
+ return new_state_dict
dreamo_helpers.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dreamo_helpers.py
2
+ # Módulo de serviço para o DreamO, com gestão de memória e aceitando uma lista dinâmica de referências.
3
+
4
+ import os
5
+ import cv2
6
+ import torch
7
+ import numpy as np
8
+ from PIL import Image
9
+ import huggingface_hub
10
+ import gc
11
+ from facexlib.utils.face_restoration_helper import FaceRestoreHelper
12
+ from torchvision.transforms.functional import normalize
13
+ from dreamo.dreamo_pipeline import DreamOPipeline
14
+ from dreamo.utils import img2tensor, tensor2img
15
+ from tools import BEN2
16
+
17
+ class Generator:
18
+ def __init__(self):
19
+ self.cpu_device = torch.device('cpu')
20
+ self.gpu_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
21
+
22
+ print("Carregando modelos DreamO para a CPU...")
23
+ model_root = 'black-forest-labs/FLUX.1-dev'
24
+ self.dreamo_pipeline = DreamOPipeline.from_pretrained(model_root, torch_dtype=torch.bfloat16)
25
+ self.dreamo_pipeline.load_dreamo_model(self.cpu_device, use_turbo=True)
26
+
27
+ self.bg_rm_model = BEN2.BEN_Base().to(self.cpu_device).eval()
28
+ huggingface_hub.hf_hub_download(repo_id='PramaLLC/BEN2', filename='BEN2_Base.pth', local_dir='models')
29
+ self.bg_rm_model.loadcheckpoints('models/BEN2_Base.pth')
30
+
31
+ self.face_helper = FaceRestoreHelper(
32
+ upscale_factor=1, face_size=512, crop_ratio=(1, 1),
33
+ det_model='retinaface_resnet50', save_ext='png', device=self.cpu_device,
34
+ )
35
+ print("Modelos DreamO prontos (na CPU).")
36
+
37
+ def to_gpu(self):
38
+ if self.gpu_device.type == 'cpu': return
39
+ print("Movendo modelos DreamO para a GPU...")
40
+ self.dreamo_pipeline.to(self.gpu_device)
41
+ self.bg_rm_model.to(self.gpu_device)
42
+ self.face_helper.device = self.gpu_device
43
+ self.dreamo_pipeline.t5_embedding.to(self.gpu_device)
44
+ self.dreamo_pipeline.task_embedding.to(self.gpu_device)
45
+ self.dreamo_pipeline.idx_embedding.to(self.gpu_device)
46
+ if hasattr(self.face_helper, 'face_parse'): self.face_helper.face_parse.to(self.gpu_device)
47
+ if hasattr(self.face_helper, 'face_det'): self.face_helper.face_det.to(self.gpu_device)
48
+ print("Modelos DreamO na GPU.")
49
+
50
+ def to_cpu(self):
51
+ if self.gpu_device.type == 'cpu': return
52
+ print("Descarregando modelos DreamO da GPU...")
53
+ self.dreamo_pipeline.to(self.cpu_device)
54
+ self.bg_rm_model.to(self.cpu_device)
55
+ self.face_helper.device = self.cpu_device
56
+ self.dreamo_pipeline.t5_embedding.to(self.cpu_device)
57
+ self.dreamo_pipeline.task_embedding.to(self.cpu_device)
58
+ self.dreamo_pipeline.idx_embedding.to(self.cpu_device)
59
+ if hasattr(self.face_helper, 'face_det'): self.face_helper.face_det.to(self.cpu_device)
60
+ if hasattr(self.face_helper, 'face_parse'): self.face_helper.face_parse.to(self.cpu_device)
61
+ gc.collect()
62
+ if torch.cuda.is_available(): torch.cuda.empty_cache()
63
+
64
+ @torch.inference_mode()
65
+ # <<<<< MODIFICAÇÃO PRINCIPAL: Aceita uma lista de dicionários de referência >>>>>
66
+ def generate_image_with_gpu_management(self, reference_items, prompt, width, height):
67
+ ref_conds = []
68
+
69
+ for idx, item in enumerate(reference_items):
70
+ ref_image_np = item.get('image_np')
71
+ ref_task = item.get('task')
72
+
73
+ if ref_image_np is not None:
74
+ if ref_task == "id":
75
+ ref_image = self.get_align_face(ref_image_np)
76
+ elif ref_task != "style":
77
+ ref_image = self.bg_rm_model.inference(Image.fromarray(ref_image_np))
78
+ else: # Style usa a imagem original
79
+ ref_image = ref_image_np
80
+
81
+ ref_image_tensor = img2tensor(np.array(ref_image), bgr2rgb=False).unsqueeze(0) / 255.0
82
+ ref_image_tensor = (2 * ref_image_tensor - 1.0).to(self.gpu_device, dtype=torch.bfloat16)
83
+
84
+ # O modelo DreamO espera o índice começando em 1
85
+ ref_conds.append({'img': ref_image_tensor, 'task': ref_task, 'idx': idx + 1})
86
+
87
+ image = self.dreamo_pipeline(
88
+ prompt=prompt,
89
+ width=width,
90
+ height=height,
91
+ num_inference_steps=12,
92
+ guidance_scale=4.5,
93
+ ref_conds=ref_conds,
94
+ generator=torch.Generator(device="cpu").manual_seed(42)
95
+ ).images[0]
96
+ return image
97
+
98
+ @torch.no_grad()
99
+ def get_align_face(self, img):
100
+ # ... (lógica inalterada)
101
+ self.face_helper.clean_all()
102
+ image_bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
103
+ self.face_helper.read_image(image_bgr)
104
+ self.face_helper.get_face_landmarks_5(only_center_face=True)
105
+ self.face_helper.align_warp_face()
106
+ if len(self.face_helper.cropped_faces) == 0: return None
107
+ align_face = self.face_helper.cropped_faces[0]
108
+ input_tensor = img2tensor(align_face, bgr2rgb=True).unsqueeze(0) / 255.0
109
+ input_tensor = input_tensor.to(self.gpu_device)
110
+ parsing_out = self.face_helper.face_parse(normalize(input_tensor, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))[0]
111
+ parsing_out = parsing_out.argmax(dim=1, keepdim=True)
112
+ bg_label = [0, 16, 18, 7, 8, 9, 14, 15]
113
+ bg = sum(parsing_out == i for i in bg_label).bool()
114
+ white_image = torch.ones_like(input_tensor)
115
+ face_features_image = torch.where(bg, white_image, input_tensor)
116
+ return tensor2img(face_features_image, rgb2bgr=False)
117
+
118
+ # --- Instância Singleton ---
119
+ print("Inicializando o Pintor de Cenas (DreamO Helper)...")
120
+ hf_token = os.getenv('HF_TOKEN')
121
+ if hf_token: huggingface_hub.login(token=hf_token)
122
+ dreamo_generator_singleton = Generator()
123
+ print("Pintor de Cenas (DreamO Helper) pronto.")
inference.py ADDED
@@ -0,0 +1,774 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import random
4
+ from datetime import datetime
5
+ from pathlib import Path
6
+ from diffusers.utils import logging
7
+ from typing import Optional, List, Union
8
+ import yaml
9
+
10
+ import imageio
11
+ import json
12
+ import numpy as np
13
+ import torch
14
+ import cv2
15
+ from safetensors import safe_open
16
+ from PIL import Image
17
+ from transformers import (
18
+ T5EncoderModel,
19
+ T5Tokenizer,
20
+ AutoModelForCausalLM,
21
+ AutoProcessor,
22
+ AutoTokenizer,
23
+ )
24
+ from huggingface_hub import hf_hub_download
25
+
26
+ from ltx_video.models.autoencoders.causal_video_autoencoder import (
27
+ CausalVideoAutoencoder,
28
+ )
29
+ from ltx_video.models.transformers.symmetric_patchifier import SymmetricPatchifier
30
+ from ltx_video.models.transformers.transformer3d import Transformer3DModel
31
+ from ltx_video.pipelines.pipeline_ltx_video import (
32
+ ConditioningItem,
33
+ LTXVideoPipeline,
34
+ LTXMultiScalePipeline,
35
+ )
36
+ from ltx_video.schedulers.rf import RectifiedFlowScheduler
37
+ from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
38
+ from ltx_video.models.autoencoders.latent_upsampler import LatentUpsampler
39
+ import ltx_video.pipelines.crf_compressor as crf_compressor
40
+
41
+ MAX_HEIGHT = 720
42
+ MAX_WIDTH = 1280
43
+ MAX_NUM_FRAMES = 257
44
+
45
+ logger = logging.get_logger("LTX-Video")
46
+
47
+
48
+ def get_total_gpu_memory():
49
+ if torch.cuda.is_available():
50
+ total_memory = torch.cuda.get_device_properties(0).total_memory / (1024**3)
51
+ return total_memory
52
+ return 44
53
+
54
+
55
+ def get_device():
56
+ if torch.cuda.is_available():
57
+ return "cuda"
58
+ elif torch.backends.mps.is_available():
59
+ return "mps"
60
+ return "cuda"
61
+
62
+
63
+ def load_image_to_tensor_with_resize_and_crop(
64
+ image_input: Union[str, Image.Image],
65
+ target_height: int = 512,
66
+ target_width: int = 768,
67
+ just_crop: bool = False,
68
+ ) -> torch.Tensor:
69
+ """Load and process an image into a tensor.
70
+
71
+ Args:
72
+ image_input: Either a file path (str) or a PIL Image object
73
+ target_height: Desired height of output tensor
74
+ target_width: Desired width of output tensor
75
+ just_crop: If True, only crop the image to the target size without resizing
76
+ """
77
+ if isinstance(image_input, str):
78
+ image = Image.open(image_input).convert("RGB")
79
+ elif isinstance(image_input, Image.Image):
80
+ image = image_input
81
+ else:
82
+ raise ValueError("image_input must be either a file path or a PIL Image object")
83
+
84
+ input_width, input_height = image.size
85
+ aspect_ratio_target = target_width / target_height
86
+ aspect_ratio_frame = input_width / input_height
87
+ if aspect_ratio_frame > aspect_ratio_target:
88
+ new_width = int(input_height * aspect_ratio_target)
89
+ new_height = input_height
90
+ x_start = (input_width - new_width) // 2
91
+ y_start = 0
92
+ else:
93
+ new_width = input_width
94
+ new_height = int(input_width / aspect_ratio_target)
95
+ x_start = 0
96
+ y_start = (input_height - new_height) // 2
97
+
98
+ image = image.crop((x_start, y_start, x_start + new_width, y_start + new_height))
99
+ if not just_crop:
100
+ image = image.resize((target_width, target_height))
101
+
102
+ image = np.array(image)
103
+ image = cv2.GaussianBlur(image, (3, 3), 0)
104
+ frame_tensor = torch.from_numpy(image).float()
105
+ frame_tensor = crf_compressor.compress(frame_tensor / 255.0) * 255.0
106
+ frame_tensor = frame_tensor.permute(2, 0, 1)
107
+ frame_tensor = (frame_tensor / 127.5) - 1.0
108
+ # Create 5D tensor: (batch_size=1, channels=3, num_frames=1, height, width)
109
+ return frame_tensor.unsqueeze(0).unsqueeze(2)
110
+
111
+
112
+ def calculate_padding(
113
+ source_height: int, source_width: int, target_height: int, target_width: int
114
+ ) -> tuple[int, int, int, int]:
115
+
116
+ # Calculate total padding needed
117
+ pad_height = target_height - source_height
118
+ pad_width = target_width - source_width
119
+
120
+ # Calculate padding for each side
121
+ pad_top = pad_height // 2
122
+ pad_bottom = pad_height - pad_top # Handles odd padding
123
+ pad_left = pad_width // 2
124
+ pad_right = pad_width - pad_left # Handles odd padding
125
+
126
+ # Return padded tensor
127
+ # Padding format is (left, right, top, bottom)
128
+ padding = (pad_left, pad_right, pad_top, pad_bottom)
129
+ return padding
130
+
131
+
132
+ def convert_prompt_to_filename(text: str, max_len: int = 20) -> str:
133
+ # Remove non-letters and convert to lowercase
134
+ clean_text = "".join(
135
+ char.lower() for char in text if char.isalpha() or char.isspace()
136
+ )
137
+
138
+ # Split into words
139
+ words = clean_text.split()
140
+
141
+ # Build result string keeping track of length
142
+ result = []
143
+ current_length = 0
144
+
145
+ for word in words:
146
+ # Add word length plus 1 for underscore (except for first word)
147
+ new_length = current_length + len(word)
148
+
149
+ if new_length <= max_len:
150
+ result.append(word)
151
+ current_length += len(word)
152
+ else:
153
+ break
154
+
155
+ return "-".join(result)
156
+
157
+
158
+ # Generate output video name
159
+ def get_unique_filename(
160
+ base: str,
161
+ ext: str,
162
+ prompt: str,
163
+ seed: int,
164
+ resolution: tuple[int, int, int],
165
+ dir: Path,
166
+ endswith=None,
167
+ index_range=1000,
168
+ ) -> Path:
169
+ base_filename = f"{base}_{convert_prompt_to_filename(prompt, max_len=30)}_{seed}_{resolution[0]}x{resolution[1]}x{resolution[2]}"
170
+ for i in range(index_range):
171
+ filename = dir / f"{base_filename}_{i}{endswith if endswith else ''}{ext}"
172
+ if not os.path.exists(filename):
173
+ return filename
174
+ raise FileExistsError(
175
+ f"Could not find a unique filename after {index_range} attempts."
176
+ )
177
+
178
+
179
+ def seed_everething(seed: int):
180
+ random.seed(seed)
181
+ np.random.seed(seed)
182
+ torch.manual_seed(seed)
183
+ if torch.cuda.is_available():
184
+ torch.cuda.manual_seed(seed)
185
+ if torch.backends.mps.is_available():
186
+ torch.mps.manual_seed(seed)
187
+
188
+
189
+ def main():
190
+ parser = argparse.ArgumentParser(
191
+ description="Load models from separate directories and run the pipeline."
192
+ )
193
+
194
+ # Directories
195
+ parser.add_argument(
196
+ "--output_path",
197
+ type=str,
198
+ default=None,
199
+ help="Path to the folder to save output video, if None will save in outputs/ directory.",
200
+ )
201
+ parser.add_argument("--seed", type=int, default="171198")
202
+
203
+ # Pipeline parameters
204
+ parser.add_argument(
205
+ "--num_images_per_prompt",
206
+ type=int,
207
+ default=1,
208
+ help="Number of images per prompt",
209
+ )
210
+ parser.add_argument(
211
+ "--image_cond_noise_scale",
212
+ type=float,
213
+ default=0.15,
214
+ help="Amount of noise to add to the conditioned image",
215
+ )
216
+ parser.add_argument(
217
+ "--height",
218
+ type=int,
219
+ default=704,
220
+ help="Height of the output video frames. Optional if an input image provided.",
221
+ )
222
+ parser.add_argument(
223
+ "--width",
224
+ type=int,
225
+ default=1216,
226
+ help="Width of the output video frames. If None will infer from input image.",
227
+ )
228
+ parser.add_argument(
229
+ "--num_frames",
230
+ type=int,
231
+ default=121,
232
+ help="Number of frames to generate in the output video",
233
+ )
234
+ parser.add_argument(
235
+ "--frame_rate", type=int, default=30, help="Frame rate for the output video"
236
+ )
237
+ parser.add_argument(
238
+ "--device",
239
+ default=None,
240
+ help="Device to run inference on. If not specified, will automatically detect and use CUDA or MPS if available, else CPU.",
241
+ )
242
+ parser.add_argument(
243
+ "--pipeline_config",
244
+ type=str,
245
+ default="configs/ltxv-13b-0.9.7-dev.yaml",
246
+ help="The path to the config file for the pipeline, which contains the parameters for the pipeline",
247
+ )
248
+
249
+ # Prompts
250
+ parser.add_argument(
251
+ "--prompt",
252
+ type=str,
253
+ help="Text prompt to guide generation",
254
+ )
255
+ parser.add_argument(
256
+ "--negative_prompt",
257
+ type=str,
258
+ default="worst quality, inconsistent motion, blurry, jittery, distorted",
259
+ help="Negative prompt for undesired features",
260
+ )
261
+
262
+ parser.add_argument(
263
+ "--offload_to_cpu",
264
+ action="store_true",
265
+ help="Offloading unnecessary computations to CPU.",
266
+ )
267
+
268
+ # video-to-video arguments:
269
+ parser.add_argument(
270
+ "--input_media_path",
271
+ type=str,
272
+ default=None,
273
+ help="Path to the input video (or imaage) to be modified using the video-to-video pipeline",
274
+ )
275
+
276
+ # Conditioning arguments
277
+ parser.add_argument(
278
+ "--conditioning_media_paths",
279
+ type=str,
280
+ nargs="*",
281
+ help="List of paths to conditioning media (images or videos). Each path will be used as a conditioning item.",
282
+ )
283
+ parser.add_argument(
284
+ "--conditioning_strengths",
285
+ type=float,
286
+ nargs="*",
287
+ help="List of conditioning strengths (between 0 and 1) for each conditioning item. Must match the number of conditioning items.",
288
+ )
289
+ parser.add_argument(
290
+ "--conditioning_start_frames",
291
+ type=int,
292
+ nargs="*",
293
+ help="List of frame indices where each conditioning item should be applied. Must match the number of conditioning items.",
294
+ )
295
+
296
+ args = parser.parse_args()
297
+ logger.warning(f"Running generation with arguments: {args}")
298
+ infer(**vars(args))
299
+
300
+
301
+ def create_ltx_video_pipeline(
302
+ ckpt_path: str,
303
+ precision: str,
304
+ text_encoder_model_name_or_path: str,
305
+ sampler: Optional[str] = None,
306
+ device: Optional[str] = None,
307
+ enhance_prompt: bool = False,
308
+ prompt_enhancer_image_caption_model_name_or_path: Optional[str] = None,
309
+ prompt_enhancer_llm_model_name_or_path: Optional[str] = None,
310
+ ) -> LTXVideoPipeline:
311
+ ckpt_path = Path(ckpt_path)
312
+ assert os.path.exists(
313
+ ckpt_path
314
+ ), f"Ckpt path provided (--ckpt_path) {ckpt_path} does not exist"
315
+
316
+ with safe_open(ckpt_path, framework="pt") as f:
317
+ metadata = f.metadata()
318
+ config_str = metadata.get("config")
319
+ configs = json.loads(config_str)
320
+ allowed_inference_steps = configs.get("allowed_inference_steps", None)
321
+
322
+ vae = CausalVideoAutoencoder.from_pretrained(ckpt_path)
323
+ transformer = Transformer3DModel.from_pretrained(ckpt_path)
324
+
325
+ # Use constructor if sampler is specified, otherwise use from_pretrained
326
+ if sampler == "from_checkpoint" or not sampler:
327
+ scheduler = RectifiedFlowScheduler.from_pretrained(ckpt_path)
328
+ else:
329
+ scheduler = RectifiedFlowScheduler(
330
+ sampler=("Uniform" if sampler.lower() == "uniform" else "LinearQuadratic")
331
+ )
332
+
333
+ text_encoder = T5EncoderModel.from_pretrained(
334
+ text_encoder_model_name_or_path, subfolder="text_encoder"
335
+ )
336
+ patchifier = SymmetricPatchifier(patch_size=1)
337
+ tokenizer = T5Tokenizer.from_pretrained(
338
+ text_encoder_model_name_or_path, subfolder="tokenizer"
339
+ )
340
+
341
+ transformer = transformer.to(device)
342
+ vae = vae.to(device)
343
+ text_encoder = text_encoder.to(device)
344
+
345
+ if enhance_prompt:
346
+ prompt_enhancer_image_caption_model = AutoModelForCausalLM.from_pretrained(
347
+ prompt_enhancer_image_caption_model_name_or_path, trust_remote_code=True
348
+ )
349
+ prompt_enhancer_image_caption_processor = AutoProcessor.from_pretrained(
350
+ prompt_enhancer_image_caption_model_name_or_path, trust_remote_code=True
351
+ )
352
+ prompt_enhancer_llm_model = AutoModelForCausalLM.from_pretrained(
353
+ prompt_enhancer_llm_model_name_or_path,
354
+ torch_dtype="bfloat16",
355
+ )
356
+ prompt_enhancer_llm_tokenizer = AutoTokenizer.from_pretrained(
357
+ prompt_enhancer_llm_model_name_or_path,
358
+ )
359
+ else:
360
+ prompt_enhancer_image_caption_model = None
361
+ prompt_enhancer_image_caption_processor = None
362
+ prompt_enhancer_llm_model = None
363
+ prompt_enhancer_llm_tokenizer = None
364
+
365
+ vae = vae.to(torch.bfloat16)
366
+ if precision == "bfloat16" and transformer.dtype != torch.bfloat16:
367
+ transformer = transformer.to(torch.bfloat16)
368
+ text_encoder = text_encoder.to(torch.bfloat16)
369
+
370
+ # Use submodels for the pipeline
371
+ submodel_dict = {
372
+ "transformer": transformer,
373
+ "patchifier": patchifier,
374
+ "text_encoder": text_encoder,
375
+ "tokenizer": tokenizer,
376
+ "scheduler": scheduler,
377
+ "vae": vae,
378
+ "prompt_enhancer_image_caption_model": prompt_enhancer_image_caption_model,
379
+ "prompt_enhancer_image_caption_processor": prompt_enhancer_image_caption_processor,
380
+ "prompt_enhancer_llm_model": prompt_enhancer_llm_model,
381
+ "prompt_enhancer_llm_tokenizer": prompt_enhancer_llm_tokenizer,
382
+ "allowed_inference_steps": allowed_inference_steps,
383
+ }
384
+
385
+ pipeline = LTXVideoPipeline(**submodel_dict)
386
+ pipeline = pipeline.to(device)
387
+ return pipeline
388
+
389
+
390
+ def create_latent_upsampler(latent_upsampler_model_path: str, device: str):
391
+ latent_upsampler = LatentUpsampler.from_pretrained(latent_upsampler_model_path)
392
+ latent_upsampler.to(device)
393
+ latent_upsampler.eval()
394
+ return latent_upsampler
395
+
396
+
397
+ def infer(
398
+ output_path: Optional[str],
399
+ seed: int,
400
+ pipeline_config: str,
401
+ image_cond_noise_scale: float,
402
+ height: Optional[int],
403
+ width: Optional[int],
404
+ num_frames: int,
405
+ frame_rate: int,
406
+ prompt: str,
407
+ negative_prompt: str,
408
+ offload_to_cpu: bool,
409
+ input_media_path: Optional[str] = None,
410
+ conditioning_media_paths: Optional[List[str]] = None,
411
+ conditioning_strengths: Optional[List[float]] = None,
412
+ conditioning_start_frames: Optional[List[int]] = None,
413
+ device: Optional[str] = None,
414
+ **kwargs,
415
+ ):
416
+ # check if pipeline_config is a file
417
+ if not os.path.isfile(pipeline_config):
418
+ raise ValueError(f"Pipeline config file {pipeline_config} does not exist")
419
+ with open(pipeline_config, "r") as f:
420
+ pipeline_config = yaml.safe_load(f)
421
+
422
+ models_dir = "MODEL_DIR"
423
+
424
+ ltxv_model_name_or_path = pipeline_config["checkpoint_path"]
425
+ if not os.path.isfile(ltxv_model_name_or_path):
426
+ ltxv_model_path = hf_hub_download(
427
+ repo_id="Lightricks/LTX-Video",
428
+ filename=ltxv_model_name_or_path,
429
+ local_dir=models_dir,
430
+ repo_type="model",
431
+ )
432
+ else:
433
+ ltxv_model_path = ltxv_model_name_or_path
434
+
435
+ spatial_upscaler_model_name_or_path = pipeline_config.get(
436
+ "spatial_upscaler_model_path"
437
+ )
438
+ if spatial_upscaler_model_name_or_path and not os.path.isfile(
439
+ spatial_upscaler_model_name_or_path
440
+ ):
441
+ spatial_upscaler_model_path = hf_hub_download(
442
+ repo_id="Lightricks/LTX-Video",
443
+ filename=spatial_upscaler_model_name_or_path,
444
+ local_dir=models_dir,
445
+ repo_type="model",
446
+ )
447
+ else:
448
+ spatial_upscaler_model_path = spatial_upscaler_model_name_or_path
449
+
450
+ if kwargs.get("input_image_path", None):
451
+ logger.warning(
452
+ "Please use conditioning_media_paths instead of input_image_path."
453
+ )
454
+ assert not conditioning_media_paths and not conditioning_start_frames
455
+ conditioning_media_paths = [kwargs["input_image_path"]]
456
+ conditioning_start_frames = [0]
457
+
458
+ # Validate conditioning arguments
459
+ if conditioning_media_paths:
460
+ # Use default strengths of 1.0
461
+ if not conditioning_strengths:
462
+ conditioning_strengths = [1.0] * len(conditioning_media_paths)
463
+ if not conditioning_start_frames:
464
+ raise ValueError(
465
+ "If `conditioning_media_paths` is provided, "
466
+ "`conditioning_start_frames` must also be provided"
467
+ )
468
+ if len(conditioning_media_paths) != len(conditioning_strengths) or len(
469
+ conditioning_media_paths
470
+ ) != len(conditioning_start_frames):
471
+ raise ValueError(
472
+ "`conditioning_media_paths`, `conditioning_strengths`, "
473
+ "and `conditioning_start_frames` must have the same length"
474
+ )
475
+ if any(s < 0 or s > 1 for s in conditioning_strengths):
476
+ raise ValueError("All conditioning strengths must be between 0 and 1")
477
+ if any(f < 0 or f >= num_frames for f in conditioning_start_frames):
478
+ raise ValueError(
479
+ f"All conditioning start frames must be between 0 and {num_frames-1}"
480
+ )
481
+
482
+ seed_everething(seed)
483
+ if offload_to_cpu and not torch.cuda.is_available():
484
+ logger.warning(
485
+ "offload_to_cpu is set to True, but offloading will not occur since the model is already running on CPU."
486
+ )
487
+ offload_to_cpu = False
488
+ else:
489
+ offload_to_cpu = offload_to_cpu and get_total_gpu_memory() < 30
490
+
491
+ output_dir = (
492
+ Path(output_path)
493
+ if output_path
494
+ else Path(f"outputs/{datetime.today().strftime('%Y-%m-%d')}")
495
+ )
496
+ output_dir.mkdir(parents=True, exist_ok=True)
497
+
498
+ # Adjust dimensions to be divisible by 32 and num_frames to be (N * 8 + 1)
499
+ height_padded = ((height - 1) // 32 + 1) * 32
500
+ width_padded = ((width - 1) // 32 + 1) * 32
501
+ num_frames_padded = ((num_frames - 2) // 8 + 1) * 8 + 1
502
+
503
+ padding = calculate_padding(height, width, height_padded, width_padded)
504
+
505
+ logger.warning(
506
+ f"Padded dimensions: {height_padded}x{width_padded}x{num_frames_padded}"
507
+ )
508
+
509
+ prompt_enhancement_words_threshold = pipeline_config[
510
+ "prompt_enhancement_words_threshold"
511
+ ]
512
+
513
+ prompt_word_count = len(prompt.split())
514
+ enhance_prompt = (
515
+ prompt_enhancement_words_threshold > 0
516
+ and prompt_word_count < prompt_enhancement_words_threshold
517
+ )
518
+
519
+ if prompt_enhancement_words_threshold > 0 and not enhance_prompt:
520
+ logger.info(
521
+ f"Prompt has {prompt_word_count} words, which exceeds the threshold of {prompt_enhancement_words_threshold}. Prompt enhancement disabled."
522
+ )
523
+
524
+ precision = pipeline_config["precision"]
525
+ text_encoder_model_name_or_path = pipeline_config["text_encoder_model_name_or_path"]
526
+ sampler = pipeline_config["sampler"]
527
+ prompt_enhancer_image_caption_model_name_or_path = pipeline_config[
528
+ "prompt_enhancer_image_caption_model_name_or_path"
529
+ ]
530
+ prompt_enhancer_llm_model_name_or_path = pipeline_config[
531
+ "prompt_enhancer_llm_model_name_or_path"
532
+ ]
533
+
534
+ pipeline = create_ltx_video_pipeline(
535
+ ckpt_path=ltxv_model_path,
536
+ precision=precision,
537
+ text_encoder_model_name_or_path=text_encoder_model_name_or_path,
538
+ sampler=sampler,
539
+ device=kwargs.get("device", get_device()),
540
+ enhance_prompt=enhance_prompt,
541
+ prompt_enhancer_image_caption_model_name_or_path=prompt_enhancer_image_caption_model_name_or_path,
542
+ prompt_enhancer_llm_model_name_or_path=prompt_enhancer_llm_model_name_or_path,
543
+ )
544
+
545
+ if pipeline_config.get("pipeline_type", None) == "multi-scale":
546
+ if not spatial_upscaler_model_path:
547
+ raise ValueError(
548
+ "spatial upscaler model path is missing from pipeline config file and is required for multi-scale rendering"
549
+ )
550
+ latent_upsampler = create_latent_upsampler(
551
+ spatial_upscaler_model_path, pipeline.device
552
+ )
553
+ pipeline = LTXMultiScalePipeline(pipeline, latent_upsampler=latent_upsampler)
554
+
555
+ media_item = None
556
+ if input_media_path:
557
+ media_item = load_media_file(
558
+ media_path=input_media_path,
559
+ height=height,
560
+ width=width,
561
+ max_frames=num_frames_padded,
562
+ padding=padding,
563
+ )
564
+
565
+ conditioning_items = (
566
+ prepare_conditioning(
567
+ conditioning_media_paths=conditioning_media_paths,
568
+ conditioning_strengths=conditioning_strengths,
569
+ conditioning_start_frames=conditioning_start_frames,
570
+ height=height,
571
+ width=width,
572
+ num_frames=num_frames,
573
+ padding=padding,
574
+ pipeline=pipeline,
575
+ )
576
+ if conditioning_media_paths
577
+ else None
578
+ )
579
+
580
+ stg_mode = pipeline_config.get("stg_mode", "attention_values")
581
+ del pipeline_config["stg_mode"]
582
+ if stg_mode.lower() == "stg_av" or stg_mode.lower() == "attention_values":
583
+ skip_layer_strategy = SkipLayerStrategy.AttentionValues
584
+ elif stg_mode.lower() == "stg_as" or stg_mode.lower() == "attention_skip":
585
+ skip_layer_strategy = SkipLayerStrategy.AttentionSkip
586
+ elif stg_mode.lower() == "stg_r" or stg_mode.lower() == "residual":
587
+ skip_layer_strategy = SkipLayerStrategy.Residual
588
+ elif stg_mode.lower() == "stg_t" or stg_mode.lower() == "transformer_block":
589
+ skip_layer_strategy = SkipLayerStrategy.TransformerBlock
590
+ else:
591
+ raise ValueError(f"Invalid spatiotemporal guidance mode: {stg_mode}")
592
+
593
+ # Prepare input for the pipeline
594
+ sample = {
595
+ "prompt": prompt,
596
+ "prompt_attention_mask": None,
597
+ "negative_prompt": negative_prompt,
598
+ "negative_prompt_attention_mask": None,
599
+ }
600
+
601
+ device = device or get_device()
602
+ generator = torch.Generator(device=device).manual_seed(seed)
603
+
604
+ images = pipeline(
605
+ **pipeline_config,
606
+ skip_layer_strategy=skip_layer_strategy,
607
+ generator=generator,
608
+ output_type="pt",
609
+ callback_on_step_end=None,
610
+ height=height_padded,
611
+ width=width_padded,
612
+ num_frames=num_frames_padded,
613
+ frame_rate=frame_rate,
614
+ **sample,
615
+ media_items=media_item,
616
+ conditioning_items=conditioning_items,
617
+ is_video=True,
618
+ vae_per_channel_normalize=True,
619
+ image_cond_noise_scale=image_cond_noise_scale,
620
+ mixed_precision=(precision == "mixed_precision"),
621
+ offload_to_cpu=offload_to_cpu,
622
+ device=device,
623
+ enhance_prompt=enhance_prompt,
624
+ ).images
625
+
626
+ # Crop the padded images to the desired resolution and number of frames
627
+ (pad_left, pad_right, pad_top, pad_bottom) = padding
628
+ pad_bottom = -pad_bottom
629
+ pad_right = -pad_right
630
+ if pad_bottom == 0:
631
+ pad_bottom = images.shape[3]
632
+ if pad_right == 0:
633
+ pad_right = images.shape[4]
634
+ images = images[:, :, :num_frames, pad_top:pad_bottom, pad_left:pad_right]
635
+
636
+ for i in range(images.shape[0]):
637
+ # Gathering from B, C, F, H, W to C, F, H, W and then permuting to F, H, W, C
638
+ video_np = images[i].permute(1, 2, 3, 0).cpu().float().numpy()
639
+ # Unnormalizing images to [0, 255] range
640
+ video_np = (video_np * 255).astype(np.uint8)
641
+ fps = frame_rate
642
+ height, width = video_np.shape[1:3]
643
+ # In case a single image is generated
644
+ if video_np.shape[0] == 1:
645
+ output_filename = get_unique_filename(
646
+ f"image_output_{i}",
647
+ ".png",
648
+ prompt=prompt,
649
+ seed=seed,
650
+ resolution=(height, width, num_frames),
651
+ dir=output_dir,
652
+ )
653
+ imageio.imwrite(output_filename, video_np[0])
654
+ else:
655
+ output_filename = get_unique_filename(
656
+ f"video_output_{i}",
657
+ ".mp4",
658
+ prompt=prompt,
659
+ seed=seed,
660
+ resolution=(height, width, num_frames),
661
+ dir=output_dir,
662
+ )
663
+
664
+ # Write video
665
+ with imageio.get_writer(output_filename, fps=fps) as video:
666
+ for frame in video_np:
667
+ video.append_data(frame)
668
+
669
+ logger.warning(f"Output saved to {output_filename}")
670
+
671
+
672
+ def prepare_conditioning(
673
+ conditioning_media_paths: List[str],
674
+ conditioning_strengths: List[float],
675
+ conditioning_start_frames: List[int],
676
+ height: int,
677
+ width: int,
678
+ num_frames: int,
679
+ padding: tuple[int, int, int, int],
680
+ pipeline: LTXVideoPipeline,
681
+ ) -> Optional[List[ConditioningItem]]:
682
+ """Prepare conditioning items based on input media paths and their parameters.
683
+
684
+ Args:
685
+ conditioning_media_paths: List of paths to conditioning media (images or videos)
686
+ conditioning_strengths: List of conditioning strengths for each media item
687
+ conditioning_start_frames: List of frame indices where each item should be applied
688
+ height: Height of the output frames
689
+ width: Width of the output frames
690
+ num_frames: Number of frames in the output video
691
+ padding: Padding to apply to the frames
692
+ pipeline: LTXVideoPipeline object used for condition video trimming
693
+
694
+ Returns:
695
+ A list of ConditioningItem objects.
696
+ """
697
+ conditioning_items = []
698
+ for path, strength, start_frame in zip(
699
+ conditioning_media_paths, conditioning_strengths, conditioning_start_frames
700
+ ):
701
+ num_input_frames = orig_num_input_frames = get_media_num_frames(path)
702
+ if hasattr(pipeline, "trim_conditioning_sequence") and callable(
703
+ getattr(pipeline, "trim_conditioning_sequence")
704
+ ):
705
+ num_input_frames = pipeline.trim_conditioning_sequence(
706
+ start_frame, orig_num_input_frames, num_frames
707
+ )
708
+ if num_input_frames < orig_num_input_frames:
709
+ logger.warning(
710
+ f"Trimming conditioning video {path} from {orig_num_input_frames} to {num_input_frames} frames."
711
+ )
712
+
713
+ media_tensor = load_media_file(
714
+ media_path=path,
715
+ height=height,
716
+ width=width,
717
+ max_frames=num_input_frames,
718
+ padding=padding,
719
+ just_crop=True,
720
+ )
721
+ conditioning_items.append(ConditioningItem(media_tensor, start_frame, strength))
722
+ return conditioning_items
723
+
724
+
725
+ def get_media_num_frames(media_path: str) -> int:
726
+ is_video = any(
727
+ media_path.lower().endswith(ext) for ext in [".mp4", ".avi", ".mov", ".mkv"]
728
+ )
729
+ num_frames = 1
730
+ if is_video:
731
+ reader = imageio.get_reader(media_path)
732
+ num_frames = reader.count_frames()
733
+ reader.close()
734
+ return num_frames
735
+
736
+
737
+ def load_media_file(
738
+ media_path: str,
739
+ height: int,
740
+ width: int,
741
+ max_frames: int,
742
+ padding: tuple[int, int, int, int],
743
+ just_crop: bool = False,
744
+ ) -> torch.Tensor:
745
+ is_video = any(
746
+ media_path.lower().endswith(ext) for ext in [".mp4", ".avi", ".mov", ".mkv"]
747
+ )
748
+ if is_video:
749
+ reader = imageio.get_reader(media_path)
750
+ num_input_frames = min(reader.count_frames(), max_frames)
751
+
752
+ # Read and preprocess the relevant frames from the video file.
753
+ frames = []
754
+ for i in range(num_input_frames):
755
+ frame = Image.fromarray(reader.get_data(i))
756
+ frame_tensor = load_image_to_tensor_with_resize_and_crop(
757
+ frame, height, width, just_crop=just_crop
758
+ )
759
+ frame_tensor = torch.nn.functional.pad(frame_tensor, padding)
760
+ frames.append(frame_tensor)
761
+ reader.close()
762
+
763
+ # Stack frames along the temporal dimension
764
+ media_tensor = torch.cat(frames, dim=2)
765
+ else: # Input image
766
+ media_tensor = load_image_to_tensor_with_resize_and_crop(
767
+ media_path, height, width, just_crop=just_crop
768
+ )
769
+ media_tensor = torch.nn.functional.pad(media_tensor, padding)
770
+ return media_tensor
771
+
772
+
773
+ if __name__ == "__main__":
774
+ main()
ltx_video/__init__.py ADDED
File without changes
ltx_video/models/__init__.py ADDED
File without changes
ltx_video/models/autoencoders/__init__.py ADDED
File without changes
ltx_video/models/autoencoders/causal_conv3d.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple, Union
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+
7
+ class CausalConv3d(nn.Module):
8
+ def __init__(
9
+ self,
10
+ in_channels,
11
+ out_channels,
12
+ kernel_size: int = 3,
13
+ stride: Union[int, Tuple[int]] = 1,
14
+ dilation: int = 1,
15
+ groups: int = 1,
16
+ spatial_padding_mode: str = "zeros",
17
+ **kwargs,
18
+ ):
19
+ super().__init__()
20
+
21
+ self.in_channels = in_channels
22
+ self.out_channels = out_channels
23
+
24
+ kernel_size = (kernel_size, kernel_size, kernel_size)
25
+ self.time_kernel_size = kernel_size[0]
26
+
27
+ dilation = (dilation, 1, 1)
28
+
29
+ height_pad = kernel_size[1] // 2
30
+ width_pad = kernel_size[2] // 2
31
+ padding = (0, height_pad, width_pad)
32
+
33
+ self.conv = nn.Conv3d(
34
+ in_channels,
35
+ out_channels,
36
+ kernel_size,
37
+ stride=stride,
38
+ dilation=dilation,
39
+ padding=padding,
40
+ padding_mode=spatial_padding_mode,
41
+ groups=groups,
42
+ )
43
+
44
+ def forward(self, x, causal: bool = True):
45
+ if causal:
46
+ first_frame_pad = x[:, :, :1, :, :].repeat(
47
+ (1, 1, self.time_kernel_size - 1, 1, 1)
48
+ )
49
+ x = torch.concatenate((first_frame_pad, x), dim=2)
50
+ else:
51
+ first_frame_pad = x[:, :, :1, :, :].repeat(
52
+ (1, 1, (self.time_kernel_size - 1) // 2, 1, 1)
53
+ )
54
+ last_frame_pad = x[:, :, -1:, :, :].repeat(
55
+ (1, 1, (self.time_kernel_size - 1) // 2, 1, 1)
56
+ )
57
+ x = torch.concatenate((first_frame_pad, x, last_frame_pad), dim=2)
58
+ x = self.conv(x)
59
+ return x
60
+
61
+ @property
62
+ def weight(self):
63
+ return self.conv.weight
ltx_video/models/autoencoders/causal_video_autoencoder.py ADDED
@@ -0,0 +1,1398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from functools import partial
4
+ from types import SimpleNamespace
5
+ from typing import Any, Mapping, Optional, Tuple, Union, List
6
+ from pathlib import Path
7
+
8
+ import torch
9
+ import numpy as np
10
+ from einops import rearrange
11
+ from torch import nn
12
+ from diffusers.utils import logging
13
+ import torch.nn.functional as F
14
+ from diffusers.models.embeddings import PixArtAlphaCombinedTimestepSizeEmbeddings
15
+ from safetensors import safe_open
16
+
17
+
18
+ from ltx_video.models.autoencoders.conv_nd_factory import make_conv_nd, make_linear_nd
19
+ from ltx_video.models.autoencoders.pixel_norm import PixelNorm
20
+ from ltx_video.models.autoencoders.pixel_shuffle import PixelShuffleND
21
+ from ltx_video.models.autoencoders.vae import AutoencoderKLWrapper
22
+ from ltx_video.models.transformers.attention import Attention
23
+ from ltx_video.utils.diffusers_config_mapping import (
24
+ diffusers_and_ours_config_mapping,
25
+ make_hashable_key,
26
+ VAE_KEYS_RENAME_DICT,
27
+ )
28
+
29
+ PER_CHANNEL_STATISTICS_PREFIX = "per_channel_statistics."
30
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
31
+
32
+
33
+ class CausalVideoAutoencoder(AutoencoderKLWrapper):
34
+ @classmethod
35
+ def from_pretrained(
36
+ cls,
37
+ pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
38
+ *args,
39
+ **kwargs,
40
+ ):
41
+ pretrained_model_name_or_path = Path(pretrained_model_name_or_path)
42
+ if (
43
+ pretrained_model_name_or_path.is_dir()
44
+ and (pretrained_model_name_or_path / "autoencoder.pth").exists()
45
+ ):
46
+ config_local_path = pretrained_model_name_or_path / "config.json"
47
+ config = cls.load_config(config_local_path, **kwargs)
48
+
49
+ model_local_path = pretrained_model_name_or_path / "autoencoder.pth"
50
+ state_dict = torch.load(model_local_path, map_location=torch.device("cpu"))
51
+
52
+ statistics_local_path = (
53
+ pretrained_model_name_or_path / "per_channel_statistics.json"
54
+ )
55
+ if statistics_local_path.exists():
56
+ with open(statistics_local_path, "r") as file:
57
+ data = json.load(file)
58
+ transposed_data = list(zip(*data["data"]))
59
+ data_dict = {
60
+ col: torch.tensor(vals)
61
+ for col, vals in zip(data["columns"], transposed_data)
62
+ }
63
+ std_of_means = data_dict["std-of-means"]
64
+ mean_of_means = data_dict.get(
65
+ "mean-of-means", torch.zeros_like(data_dict["std-of-means"])
66
+ )
67
+ state_dict[f"{PER_CHANNEL_STATISTICS_PREFIX}std-of-means"] = (
68
+ std_of_means
69
+ )
70
+ state_dict[f"{PER_CHANNEL_STATISTICS_PREFIX}mean-of-means"] = (
71
+ mean_of_means
72
+ )
73
+
74
+ elif pretrained_model_name_or_path.is_dir():
75
+ config_path = pretrained_model_name_or_path / "vae" / "config.json"
76
+ with open(config_path, "r") as f:
77
+ config = make_hashable_key(json.load(f))
78
+
79
+ assert config in diffusers_and_ours_config_mapping, (
80
+ "Provided diffusers checkpoint config for VAE is not suppported. "
81
+ "We only support diffusers configs found in Lightricks/LTX-Video."
82
+ )
83
+
84
+ config = diffusers_and_ours_config_mapping[config]
85
+
86
+ state_dict_path = (
87
+ pretrained_model_name_or_path
88
+ / "vae"
89
+ / "diffusion_pytorch_model.safetensors"
90
+ )
91
+
92
+ state_dict = {}
93
+ with safe_open(state_dict_path, framework="pt", device="cpu") as f:
94
+ for k in f.keys():
95
+ state_dict[k] = f.get_tensor(k)
96
+ for key in list(state_dict.keys()):
97
+ new_key = key
98
+ for replace_key, rename_key in VAE_KEYS_RENAME_DICT.items():
99
+ new_key = new_key.replace(replace_key, rename_key)
100
+
101
+ state_dict[new_key] = state_dict.pop(key)
102
+
103
+ elif pretrained_model_name_or_path.is_file() and str(
104
+ pretrained_model_name_or_path
105
+ ).endswith(".safetensors"):
106
+ state_dict = {}
107
+ with safe_open(
108
+ pretrained_model_name_or_path, framework="pt", device="cpu"
109
+ ) as f:
110
+ metadata = f.metadata()
111
+ for k in f.keys():
112
+ state_dict[k] = f.get_tensor(k)
113
+ configs = json.loads(metadata["config"])
114
+ config = configs["vae"]
115
+
116
+ video_vae = cls.from_config(config)
117
+ if "torch_dtype" in kwargs:
118
+ video_vae.to(kwargs["torch_dtype"])
119
+ video_vae.load_state_dict(state_dict)
120
+ return video_vae
121
+
122
+ @staticmethod
123
+ def from_config(config):
124
+ assert (
125
+ config["_class_name"] == "CausalVideoAutoencoder"
126
+ ), "config must have _class_name=CausalVideoAutoencoder"
127
+ if isinstance(config["dims"], list):
128
+ config["dims"] = tuple(config["dims"])
129
+
130
+ assert config["dims"] in [2, 3, (2, 1)], "dims must be 2, 3 or (2, 1)"
131
+
132
+ double_z = config.get("double_z", True)
133
+ latent_log_var = config.get(
134
+ "latent_log_var", "per_channel" if double_z else "none"
135
+ )
136
+ use_quant_conv = config.get("use_quant_conv", True)
137
+ normalize_latent_channels = config.get("normalize_latent_channels", False)
138
+
139
+ if use_quant_conv and latent_log_var in ["uniform", "constant"]:
140
+ raise ValueError(
141
+ f"latent_log_var={latent_log_var} requires use_quant_conv=False"
142
+ )
143
+
144
+ encoder = Encoder(
145
+ dims=config["dims"],
146
+ in_channels=config.get("in_channels", 3),
147
+ out_channels=config["latent_channels"],
148
+ blocks=config.get("encoder_blocks", config.get("blocks")),
149
+ patch_size=config.get("patch_size", 1),
150
+ latent_log_var=latent_log_var,
151
+ norm_layer=config.get("norm_layer", "group_norm"),
152
+ base_channels=config.get("encoder_base_channels", 128),
153
+ spatial_padding_mode=config.get("spatial_padding_mode", "zeros"),
154
+ )
155
+
156
+ decoder = Decoder(
157
+ dims=config["dims"],
158
+ in_channels=config["latent_channels"],
159
+ out_channels=config.get("out_channels", 3),
160
+ blocks=config.get("decoder_blocks", config.get("blocks")),
161
+ patch_size=config.get("patch_size", 1),
162
+ norm_layer=config.get("norm_layer", "group_norm"),
163
+ causal=config.get("causal_decoder", False),
164
+ timestep_conditioning=config.get("timestep_conditioning", False),
165
+ base_channels=config.get("decoder_base_channels", 128),
166
+ spatial_padding_mode=config.get("spatial_padding_mode", "zeros"),
167
+ )
168
+
169
+ dims = config["dims"]
170
+ return CausalVideoAutoencoder(
171
+ encoder=encoder,
172
+ decoder=decoder,
173
+ latent_channels=config["latent_channels"],
174
+ dims=dims,
175
+ use_quant_conv=use_quant_conv,
176
+ normalize_latent_channels=normalize_latent_channels,
177
+ )
178
+
179
+ @property
180
+ def config(self):
181
+ return SimpleNamespace(
182
+ _class_name="CausalVideoAutoencoder",
183
+ dims=self.dims,
184
+ in_channels=self.encoder.conv_in.in_channels // self.encoder.patch_size**2,
185
+ out_channels=self.decoder.conv_out.out_channels
186
+ // self.decoder.patch_size**2,
187
+ latent_channels=self.decoder.conv_in.in_channels,
188
+ encoder_blocks=self.encoder.blocks_desc,
189
+ decoder_blocks=self.decoder.blocks_desc,
190
+ scaling_factor=1.0,
191
+ norm_layer=self.encoder.norm_layer,
192
+ patch_size=self.encoder.patch_size,
193
+ latent_log_var=self.encoder.latent_log_var,
194
+ use_quant_conv=self.use_quant_conv,
195
+ causal_decoder=self.decoder.causal,
196
+ timestep_conditioning=self.decoder.timestep_conditioning,
197
+ normalize_latent_channels=self.normalize_latent_channels,
198
+ )
199
+
200
+ @property
201
+ def is_video_supported(self):
202
+ """
203
+ Check if the model supports video inputs of shape (B, C, F, H, W). Otherwise, the model only supports 2D images.
204
+ """
205
+ return self.dims != 2
206
+
207
+ @property
208
+ def spatial_downscale_factor(self):
209
+ return (
210
+ 2
211
+ ** len(
212
+ [
213
+ block
214
+ for block in self.encoder.blocks_desc
215
+ if block[0]
216
+ in [
217
+ "compress_space",
218
+ "compress_all",
219
+ "compress_all_res",
220
+ "compress_space_res",
221
+ ]
222
+ ]
223
+ )
224
+ * self.encoder.patch_size
225
+ )
226
+
227
+ @property
228
+ def temporal_downscale_factor(self):
229
+ return 2 ** len(
230
+ [
231
+ block
232
+ for block in self.encoder.blocks_desc
233
+ if block[0]
234
+ in [
235
+ "compress_time",
236
+ "compress_all",
237
+ "compress_all_res",
238
+ "compress_time_res",
239
+ ]
240
+ ]
241
+ )
242
+
243
+ def to_json_string(self) -> str:
244
+ import json
245
+
246
+ return json.dumps(self.config.__dict__)
247
+
248
+ def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True):
249
+ if any([key.startswith("vae.") for key in state_dict.keys()]):
250
+ state_dict = {
251
+ key.replace("vae.", ""): value
252
+ for key, value in state_dict.items()
253
+ if key.startswith("vae.")
254
+ }
255
+ ckpt_state_dict = {
256
+ key: value
257
+ for key, value in state_dict.items()
258
+ if not key.startswith(PER_CHANNEL_STATISTICS_PREFIX)
259
+ }
260
+
261
+ model_keys = set(name for name, _ in self.named_modules())
262
+
263
+ key_mapping = {
264
+ ".resnets.": ".res_blocks.",
265
+ "downsamplers.0": "downsample",
266
+ "upsamplers.0": "upsample",
267
+ }
268
+ converted_state_dict = {}
269
+ for key, value in ckpt_state_dict.items():
270
+ for k, v in key_mapping.items():
271
+ key = key.replace(k, v)
272
+
273
+ key_prefix = ".".join(key.split(".")[:-1])
274
+ if "norm" in key and key_prefix not in model_keys:
275
+ logger.info(
276
+ f"Removing key {key} from state_dict as it is not present in the model"
277
+ )
278
+ continue
279
+
280
+ converted_state_dict[key] = value
281
+
282
+ super().load_state_dict(converted_state_dict, strict=strict)
283
+
284
+ data_dict = {
285
+ key.removeprefix(PER_CHANNEL_STATISTICS_PREFIX): value
286
+ for key, value in state_dict.items()
287
+ if key.startswith(PER_CHANNEL_STATISTICS_PREFIX)
288
+ }
289
+ if len(data_dict) > 0:
290
+ self.register_buffer("std_of_means", data_dict["std-of-means"])
291
+ self.register_buffer(
292
+ "mean_of_means",
293
+ data_dict.get(
294
+ "mean-of-means", torch.zeros_like(data_dict["std-of-means"])
295
+ ),
296
+ )
297
+
298
+ def last_layer(self):
299
+ if hasattr(self.decoder, "conv_out"):
300
+ if isinstance(self.decoder.conv_out, nn.Sequential):
301
+ last_layer = self.decoder.conv_out[-1]
302
+ else:
303
+ last_layer = self.decoder.conv_out
304
+ else:
305
+ last_layer = self.decoder.layers[-1]
306
+ return last_layer
307
+
308
+ def set_use_tpu_flash_attention(self):
309
+ for block in self.decoder.up_blocks:
310
+ if isinstance(block, UNetMidBlock3D) and block.attention_blocks:
311
+ for attention_block in block.attention_blocks:
312
+ attention_block.set_use_tpu_flash_attention()
313
+
314
+
315
+ class Encoder(nn.Module):
316
+ r"""
317
+ The `Encoder` layer of a variational autoencoder that encodes its input into a latent representation.
318
+
319
+ Args:
320
+ dims (`int` or `Tuple[int, int]`, *optional*, defaults to 3):
321
+ The number of dimensions to use in convolutions.
322
+ in_channels (`int`, *optional*, defaults to 3):
323
+ The number of input channels.
324
+ out_channels (`int`, *optional*, defaults to 3):
325
+ The number of output channels.
326
+ blocks (`List[Tuple[str, int]]`, *optional*, defaults to `[("res_x", 1)]`):
327
+ The blocks to use. Each block is a tuple of the block name and the number of layers.
328
+ base_channels (`int`, *optional*, defaults to 128):
329
+ The number of output channels for the first convolutional layer.
330
+ norm_num_groups (`int`, *optional*, defaults to 32):
331
+ The number of groups for normalization.
332
+ patch_size (`int`, *optional*, defaults to 1):
333
+ The patch size to use. Should be a power of 2.
334
+ norm_layer (`str`, *optional*, defaults to `group_norm`):
335
+ The normalization layer to use. Can be either `group_norm` or `pixel_norm`.
336
+ latent_log_var (`str`, *optional*, defaults to `per_channel`):
337
+ The number of channels for the log variance. Can be either `per_channel`, `uniform`, `constant` or `none`.
338
+ """
339
+
340
+ def __init__(
341
+ self,
342
+ dims: Union[int, Tuple[int, int]] = 3,
343
+ in_channels: int = 3,
344
+ out_channels: int = 3,
345
+ blocks: List[Tuple[str, int | dict]] = [("res_x", 1)],
346
+ base_channels: int = 128,
347
+ norm_num_groups: int = 32,
348
+ patch_size: Union[int, Tuple[int]] = 1,
349
+ norm_layer: str = "group_norm", # group_norm, pixel_norm
350
+ latent_log_var: str = "per_channel",
351
+ spatial_padding_mode: str = "zeros",
352
+ ):
353
+ super().__init__()
354
+ self.patch_size = patch_size
355
+ self.norm_layer = norm_layer
356
+ self.latent_channels = out_channels
357
+ self.latent_log_var = latent_log_var
358
+ self.blocks_desc = blocks
359
+
360
+ in_channels = in_channels * patch_size**2
361
+ output_channel = base_channels
362
+
363
+ self.conv_in = make_conv_nd(
364
+ dims=dims,
365
+ in_channels=in_channels,
366
+ out_channels=output_channel,
367
+ kernel_size=3,
368
+ stride=1,
369
+ padding=1,
370
+ causal=True,
371
+ spatial_padding_mode=spatial_padding_mode,
372
+ )
373
+
374
+ self.down_blocks = nn.ModuleList([])
375
+
376
+ for block_name, block_params in blocks:
377
+ input_channel = output_channel
378
+ if isinstance(block_params, int):
379
+ block_params = {"num_layers": block_params}
380
+
381
+ if block_name == "res_x":
382
+ block = UNetMidBlock3D(
383
+ dims=dims,
384
+ in_channels=input_channel,
385
+ num_layers=block_params["num_layers"],
386
+ resnet_eps=1e-6,
387
+ resnet_groups=norm_num_groups,
388
+ norm_layer=norm_layer,
389
+ spatial_padding_mode=spatial_padding_mode,
390
+ )
391
+ elif block_name == "res_x_y":
392
+ output_channel = block_params.get("multiplier", 2) * output_channel
393
+ block = ResnetBlock3D(
394
+ dims=dims,
395
+ in_channels=input_channel,
396
+ out_channels=output_channel,
397
+ eps=1e-6,
398
+ groups=norm_num_groups,
399
+ norm_layer=norm_layer,
400
+ spatial_padding_mode=spatial_padding_mode,
401
+ )
402
+ elif block_name == "compress_time":
403
+ block = make_conv_nd(
404
+ dims=dims,
405
+ in_channels=input_channel,
406
+ out_channels=output_channel,
407
+ kernel_size=3,
408
+ stride=(2, 1, 1),
409
+ causal=True,
410
+ spatial_padding_mode=spatial_padding_mode,
411
+ )
412
+ elif block_name == "compress_space":
413
+ block = make_conv_nd(
414
+ dims=dims,
415
+ in_channels=input_channel,
416
+ out_channels=output_channel,
417
+ kernel_size=3,
418
+ stride=(1, 2, 2),
419
+ causal=True,
420
+ spatial_padding_mode=spatial_padding_mode,
421
+ )
422
+ elif block_name == "compress_all":
423
+ block = make_conv_nd(
424
+ dims=dims,
425
+ in_channels=input_channel,
426
+ out_channels=output_channel,
427
+ kernel_size=3,
428
+ stride=(2, 2, 2),
429
+ causal=True,
430
+ spatial_padding_mode=spatial_padding_mode,
431
+ )
432
+ elif block_name == "compress_all_x_y":
433
+ output_channel = block_params.get("multiplier", 2) * output_channel
434
+ block = make_conv_nd(
435
+ dims=dims,
436
+ in_channels=input_channel,
437
+ out_channels=output_channel,
438
+ kernel_size=3,
439
+ stride=(2, 2, 2),
440
+ causal=True,
441
+ spatial_padding_mode=spatial_padding_mode,
442
+ )
443
+ elif block_name == "compress_all_res":
444
+ output_channel = block_params.get("multiplier", 2) * output_channel
445
+ block = SpaceToDepthDownsample(
446
+ dims=dims,
447
+ in_channels=input_channel,
448
+ out_channels=output_channel,
449
+ stride=(2, 2, 2),
450
+ spatial_padding_mode=spatial_padding_mode,
451
+ )
452
+ elif block_name == "compress_space_res":
453
+ output_channel = block_params.get("multiplier", 2) * output_channel
454
+ block = SpaceToDepthDownsample(
455
+ dims=dims,
456
+ in_channels=input_channel,
457
+ out_channels=output_channel,
458
+ stride=(1, 2, 2),
459
+ spatial_padding_mode=spatial_padding_mode,
460
+ )
461
+ elif block_name == "compress_time_res":
462
+ output_channel = block_params.get("multiplier", 2) * output_channel
463
+ block = SpaceToDepthDownsample(
464
+ dims=dims,
465
+ in_channels=input_channel,
466
+ out_channels=output_channel,
467
+ stride=(2, 1, 1),
468
+ spatial_padding_mode=spatial_padding_mode,
469
+ )
470
+ else:
471
+ raise ValueError(f"unknown block: {block_name}")
472
+
473
+ self.down_blocks.append(block)
474
+
475
+ # out
476
+ if norm_layer == "group_norm":
477
+ self.conv_norm_out = nn.GroupNorm(
478
+ num_channels=output_channel, num_groups=norm_num_groups, eps=1e-6
479
+ )
480
+ elif norm_layer == "pixel_norm":
481
+ self.conv_norm_out = PixelNorm()
482
+ elif norm_layer == "layer_norm":
483
+ self.conv_norm_out = LayerNorm(output_channel, eps=1e-6)
484
+
485
+ self.conv_act = nn.SiLU()
486
+
487
+ conv_out_channels = out_channels
488
+ if latent_log_var == "per_channel":
489
+ conv_out_channels *= 2
490
+ elif latent_log_var == "uniform":
491
+ conv_out_channels += 1
492
+ elif latent_log_var == "constant":
493
+ conv_out_channels += 1
494
+ elif latent_log_var != "none":
495
+ raise ValueError(f"Invalid latent_log_var: {latent_log_var}")
496
+ self.conv_out = make_conv_nd(
497
+ dims,
498
+ output_channel,
499
+ conv_out_channels,
500
+ 3,
501
+ padding=1,
502
+ causal=True,
503
+ spatial_padding_mode=spatial_padding_mode,
504
+ )
505
+
506
+ self.gradient_checkpointing = False
507
+
508
+ def forward(self, sample: torch.FloatTensor) -> torch.FloatTensor:
509
+ r"""The forward method of the `Encoder` class."""
510
+
511
+ sample = patchify(sample, patch_size_hw=self.patch_size, patch_size_t=1)
512
+ sample = self.conv_in(sample)
513
+
514
+ checkpoint_fn = (
515
+ partial(torch.utils.checkpoint.checkpoint, use_reentrant=False)
516
+ if self.gradient_checkpointing and self.training
517
+ else lambda x: x
518
+ )
519
+
520
+ for down_block in self.down_blocks:
521
+ sample = checkpoint_fn(down_block)(sample)
522
+
523
+ sample = self.conv_norm_out(sample)
524
+ sample = self.conv_act(sample)
525
+ sample = self.conv_out(sample)
526
+
527
+ if self.latent_log_var == "uniform":
528
+ last_channel = sample[:, -1:, ...]
529
+ num_dims = sample.dim()
530
+
531
+ if num_dims == 4:
532
+ # For shape (B, C, H, W)
533
+ repeated_last_channel = last_channel.repeat(
534
+ 1, sample.shape[1] - 2, 1, 1
535
+ )
536
+ sample = torch.cat([sample, repeated_last_channel], dim=1)
537
+ elif num_dims == 5:
538
+ # For shape (B, C, F, H, W)
539
+ repeated_last_channel = last_channel.repeat(
540
+ 1, sample.shape[1] - 2, 1, 1, 1
541
+ )
542
+ sample = torch.cat([sample, repeated_last_channel], dim=1)
543
+ else:
544
+ raise ValueError(f"Invalid input shape: {sample.shape}")
545
+ elif self.latent_log_var == "constant":
546
+ sample = sample[:, :-1, ...]
547
+ approx_ln_0 = (
548
+ -30
549
+ ) # this is the minimal clamp value in DiagonalGaussianDistribution objects
550
+ sample = torch.cat(
551
+ [sample, torch.ones_like(sample, device=sample.device) * approx_ln_0],
552
+ dim=1,
553
+ )
554
+
555
+ return sample
556
+
557
+
558
+ class Decoder(nn.Module):
559
+ r"""
560
+ The `Decoder` layer of a variational autoencoder that decodes its latent representation into an output sample.
561
+
562
+ Args:
563
+ dims (`int` or `Tuple[int, int]`, *optional*, defaults to 3):
564
+ The number of dimensions to use in convolutions.
565
+ in_channels (`int`, *optional*, defaults to 3):
566
+ The number of input channels.
567
+ out_channels (`int`, *optional*, defaults to 3):
568
+ The number of output channels.
569
+ blocks (`List[Tuple[str, int]]`, *optional*, defaults to `[("res_x", 1)]`):
570
+ The blocks to use. Each block is a tuple of the block name and the number of layers.
571
+ base_channels (`int`, *optional*, defaults to 128):
572
+ The number of output channels for the first convolutional layer.
573
+ norm_num_groups (`int`, *optional*, defaults to 32):
574
+ The number of groups for normalization.
575
+ patch_size (`int`, *optional*, defaults to 1):
576
+ The patch size to use. Should be a power of 2.
577
+ norm_layer (`str`, *optional*, defaults to `group_norm`):
578
+ The normalization layer to use. Can be either `group_norm` or `pixel_norm`.
579
+ causal (`bool`, *optional*, defaults to `True`):
580
+ Whether to use causal convolutions or not.
581
+ """
582
+
583
+ def __init__(
584
+ self,
585
+ dims,
586
+ in_channels: int = 3,
587
+ out_channels: int = 3,
588
+ blocks: List[Tuple[str, int | dict]] = [("res_x", 1)],
589
+ base_channels: int = 128,
590
+ layers_per_block: int = 2,
591
+ norm_num_groups: int = 32,
592
+ patch_size: int = 1,
593
+ norm_layer: str = "group_norm",
594
+ causal: bool = True,
595
+ timestep_conditioning: bool = False,
596
+ spatial_padding_mode: str = "zeros",
597
+ ):
598
+ super().__init__()
599
+ self.patch_size = patch_size
600
+ self.layers_per_block = layers_per_block
601
+ out_channels = out_channels * patch_size**2
602
+ self.causal = causal
603
+ self.blocks_desc = blocks
604
+
605
+ # Compute output channel to be product of all channel-multiplier blocks
606
+ output_channel = base_channels
607
+ for block_name, block_params in list(reversed(blocks)):
608
+ block_params = block_params if isinstance(block_params, dict) else {}
609
+ if block_name == "res_x_y":
610
+ output_channel = output_channel * block_params.get("multiplier", 2)
611
+ if block_name.startswith("compress"):
612
+ output_channel = output_channel * block_params.get("multiplier", 1)
613
+
614
+ self.conv_in = make_conv_nd(
615
+ dims,
616
+ in_channels,
617
+ output_channel,
618
+ kernel_size=3,
619
+ stride=1,
620
+ padding=1,
621
+ causal=True,
622
+ spatial_padding_mode=spatial_padding_mode,
623
+ )
624
+
625
+ self.up_blocks = nn.ModuleList([])
626
+
627
+ for block_name, block_params in list(reversed(blocks)):
628
+ input_channel = output_channel
629
+ if isinstance(block_params, int):
630
+ block_params = {"num_layers": block_params}
631
+
632
+ if block_name == "res_x":
633
+ block = UNetMidBlock3D(
634
+ dims=dims,
635
+ in_channels=input_channel,
636
+ num_layers=block_params["num_layers"],
637
+ resnet_eps=1e-6,
638
+ resnet_groups=norm_num_groups,
639
+ norm_layer=norm_layer,
640
+ inject_noise=block_params.get("inject_noise", False),
641
+ timestep_conditioning=timestep_conditioning,
642
+ spatial_padding_mode=spatial_padding_mode,
643
+ )
644
+ elif block_name == "attn_res_x":
645
+ block = UNetMidBlock3D(
646
+ dims=dims,
647
+ in_channels=input_channel,
648
+ num_layers=block_params["num_layers"],
649
+ resnet_groups=norm_num_groups,
650
+ norm_layer=norm_layer,
651
+ inject_noise=block_params.get("inject_noise", False),
652
+ timestep_conditioning=timestep_conditioning,
653
+ attention_head_dim=block_params["attention_head_dim"],
654
+ spatial_padding_mode=spatial_padding_mode,
655
+ )
656
+ elif block_name == "res_x_y":
657
+ output_channel = output_channel // block_params.get("multiplier", 2)
658
+ block = ResnetBlock3D(
659
+ dims=dims,
660
+ in_channels=input_channel,
661
+ out_channels=output_channel,
662
+ eps=1e-6,
663
+ groups=norm_num_groups,
664
+ norm_layer=norm_layer,
665
+ inject_noise=block_params.get("inject_noise", False),
666
+ timestep_conditioning=False,
667
+ spatial_padding_mode=spatial_padding_mode,
668
+ )
669
+ elif block_name == "compress_time":
670
+ block = DepthToSpaceUpsample(
671
+ dims=dims,
672
+ in_channels=input_channel,
673
+ stride=(2, 1, 1),
674
+ spatial_padding_mode=spatial_padding_mode,
675
+ )
676
+ elif block_name == "compress_space":
677
+ block = DepthToSpaceUpsample(
678
+ dims=dims,
679
+ in_channels=input_channel,
680
+ stride=(1, 2, 2),
681
+ spatial_padding_mode=spatial_padding_mode,
682
+ )
683
+ elif block_name == "compress_all":
684
+ output_channel = output_channel // block_params.get("multiplier", 1)
685
+ block = DepthToSpaceUpsample(
686
+ dims=dims,
687
+ in_channels=input_channel,
688
+ stride=(2, 2, 2),
689
+ residual=block_params.get("residual", False),
690
+ out_channels_reduction_factor=block_params.get("multiplier", 1),
691
+ spatial_padding_mode=spatial_padding_mode,
692
+ )
693
+ else:
694
+ raise ValueError(f"unknown layer: {block_name}")
695
+
696
+ self.up_blocks.append(block)
697
+
698
+ if norm_layer == "group_norm":
699
+ self.conv_norm_out = nn.GroupNorm(
700
+ num_channels=output_channel, num_groups=norm_num_groups, eps=1e-6
701
+ )
702
+ elif norm_layer == "pixel_norm":
703
+ self.conv_norm_out = PixelNorm()
704
+ elif norm_layer == "layer_norm":
705
+ self.conv_norm_out = LayerNorm(output_channel, eps=1e-6)
706
+
707
+ self.conv_act = nn.SiLU()
708
+ self.conv_out = make_conv_nd(
709
+ dims,
710
+ output_channel,
711
+ out_channels,
712
+ 3,
713
+ padding=1,
714
+ causal=True,
715
+ spatial_padding_mode=spatial_padding_mode,
716
+ )
717
+
718
+ self.gradient_checkpointing = False
719
+
720
+ self.timestep_conditioning = timestep_conditioning
721
+
722
+ if timestep_conditioning:
723
+ self.timestep_scale_multiplier = nn.Parameter(
724
+ torch.tensor(1000.0, dtype=torch.float32)
725
+ )
726
+ self.last_time_embedder = PixArtAlphaCombinedTimestepSizeEmbeddings(
727
+ output_channel * 2, 0
728
+ )
729
+ self.last_scale_shift_table = nn.Parameter(
730
+ torch.randn(2, output_channel) / output_channel**0.5
731
+ )
732
+
733
+ def forward(
734
+ self,
735
+ sample: torch.FloatTensor,
736
+ target_shape,
737
+ timestep: Optional[torch.Tensor] = None,
738
+ ) -> torch.FloatTensor:
739
+ r"""The forward method of the `Decoder` class."""
740
+ assert target_shape is not None, "target_shape must be provided"
741
+ batch_size = sample.shape[0]
742
+
743
+ sample = self.conv_in(sample, causal=self.causal)
744
+
745
+ upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
746
+
747
+ checkpoint_fn = (
748
+ partial(torch.utils.checkpoint.checkpoint, use_reentrant=False)
749
+ if self.gradient_checkpointing and self.training
750
+ else lambda x: x
751
+ )
752
+
753
+ sample = sample.to(upscale_dtype)
754
+
755
+ if self.timestep_conditioning:
756
+ assert (
757
+ timestep is not None
758
+ ), "should pass timestep with timestep_conditioning=True"
759
+ scaled_timestep = timestep * self.timestep_scale_multiplier
760
+
761
+ for up_block in self.up_blocks:
762
+ if self.timestep_conditioning and isinstance(up_block, UNetMidBlock3D):
763
+ sample = checkpoint_fn(up_block)(
764
+ sample, causal=self.causal, timestep=scaled_timestep
765
+ )
766
+ else:
767
+ sample = checkpoint_fn(up_block)(sample, causal=self.causal)
768
+
769
+ sample = self.conv_norm_out(sample)
770
+
771
+ if self.timestep_conditioning:
772
+ embedded_timestep = self.last_time_embedder(
773
+ timestep=scaled_timestep.flatten(),
774
+ resolution=None,
775
+ aspect_ratio=None,
776
+ batch_size=sample.shape[0],
777
+ hidden_dtype=sample.dtype,
778
+ )
779
+ embedded_timestep = embedded_timestep.view(
780
+ batch_size, embedded_timestep.shape[-1], 1, 1, 1
781
+ )
782
+ ada_values = self.last_scale_shift_table[
783
+ None, ..., None, None, None
784
+ ] + embedded_timestep.reshape(
785
+ batch_size,
786
+ 2,
787
+ -1,
788
+ embedded_timestep.shape[-3],
789
+ embedded_timestep.shape[-2],
790
+ embedded_timestep.shape[-1],
791
+ )
792
+ shift, scale = ada_values.unbind(dim=1)
793
+ sample = sample * (1 + scale) + shift
794
+
795
+ sample = self.conv_act(sample)
796
+ sample = self.conv_out(sample, causal=self.causal)
797
+
798
+ sample = unpatchify(sample, patch_size_hw=self.patch_size, patch_size_t=1)
799
+
800
+ return sample
801
+
802
+
803
+ class UNetMidBlock3D(nn.Module):
804
+ """
805
+ A 3D UNet mid-block [`UNetMidBlock3D`] with multiple residual blocks.
806
+
807
+ Args:
808
+ in_channels (`int`): The number of input channels.
809
+ dropout (`float`, *optional*, defaults to 0.0): The dropout rate.
810
+ num_layers (`int`, *optional*, defaults to 1): The number of residual blocks.
811
+ resnet_eps (`float`, *optional*, 1e-6 ): The epsilon value for the resnet blocks.
812
+ resnet_groups (`int`, *optional*, defaults to 32):
813
+ The number of groups to use in the group normalization layers of the resnet blocks.
814
+ norm_layer (`str`, *optional*, defaults to `group_norm`):
815
+ The normalization layer to use. Can be either `group_norm` or `pixel_norm`.
816
+ inject_noise (`bool`, *optional*, defaults to `False`):
817
+ Whether to inject noise into the hidden states.
818
+ timestep_conditioning (`bool`, *optional*, defaults to `False`):
819
+ Whether to condition the hidden states on the timestep.
820
+ attention_head_dim (`int`, *optional*, defaults to -1):
821
+ The dimension of the attention head. If -1, no attention is used.
822
+
823
+ Returns:
824
+ `torch.FloatTensor`: The output of the last residual block, which is a tensor of shape `(batch_size,
825
+ in_channels, height, width)`.
826
+
827
+ """
828
+
829
+ def __init__(
830
+ self,
831
+ dims: Union[int, Tuple[int, int]],
832
+ in_channels: int,
833
+ dropout: float = 0.0,
834
+ num_layers: int = 1,
835
+ resnet_eps: float = 1e-6,
836
+ resnet_groups: int = 32,
837
+ norm_layer: str = "group_norm",
838
+ inject_noise: bool = False,
839
+ timestep_conditioning: bool = False,
840
+ attention_head_dim: int = -1,
841
+ spatial_padding_mode: str = "zeros",
842
+ ):
843
+ super().__init__()
844
+ resnet_groups = (
845
+ resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
846
+ )
847
+ self.timestep_conditioning = timestep_conditioning
848
+
849
+ if timestep_conditioning:
850
+ self.time_embedder = PixArtAlphaCombinedTimestepSizeEmbeddings(
851
+ in_channels * 4, 0
852
+ )
853
+
854
+ self.res_blocks = nn.ModuleList(
855
+ [
856
+ ResnetBlock3D(
857
+ dims=dims,
858
+ in_channels=in_channels,
859
+ out_channels=in_channels,
860
+ eps=resnet_eps,
861
+ groups=resnet_groups,
862
+ dropout=dropout,
863
+ norm_layer=norm_layer,
864
+ inject_noise=inject_noise,
865
+ timestep_conditioning=timestep_conditioning,
866
+ spatial_padding_mode=spatial_padding_mode,
867
+ )
868
+ for _ in range(num_layers)
869
+ ]
870
+ )
871
+
872
+ self.attention_blocks = None
873
+
874
+ if attention_head_dim > 0:
875
+ if attention_head_dim > in_channels:
876
+ raise ValueError(
877
+ "attention_head_dim must be less than or equal to in_channels"
878
+ )
879
+
880
+ self.attention_blocks = nn.ModuleList(
881
+ [
882
+ Attention(
883
+ query_dim=in_channels,
884
+ heads=in_channels // attention_head_dim,
885
+ dim_head=attention_head_dim,
886
+ bias=True,
887
+ out_bias=True,
888
+ qk_norm="rms_norm",
889
+ residual_connection=True,
890
+ )
891
+ for _ in range(num_layers)
892
+ ]
893
+ )
894
+
895
+ def forward(
896
+ self,
897
+ hidden_states: torch.FloatTensor,
898
+ causal: bool = True,
899
+ timestep: Optional[torch.Tensor] = None,
900
+ ) -> torch.FloatTensor:
901
+ timestep_embed = None
902
+ if self.timestep_conditioning:
903
+ assert (
904
+ timestep is not None
905
+ ), "should pass timestep with timestep_conditioning=True"
906
+ batch_size = hidden_states.shape[0]
907
+ timestep_embed = self.time_embedder(
908
+ timestep=timestep.flatten(),
909
+ resolution=None,
910
+ aspect_ratio=None,
911
+ batch_size=batch_size,
912
+ hidden_dtype=hidden_states.dtype,
913
+ )
914
+ timestep_embed = timestep_embed.view(
915
+ batch_size, timestep_embed.shape[-1], 1, 1, 1
916
+ )
917
+
918
+ if self.attention_blocks:
919
+ for resnet, attention in zip(self.res_blocks, self.attention_blocks):
920
+ hidden_states = resnet(
921
+ hidden_states, causal=causal, timestep=timestep_embed
922
+ )
923
+
924
+ # Reshape the hidden states to be (batch_size, frames * height * width, channel)
925
+ batch_size, channel, frames, height, width = hidden_states.shape
926
+ hidden_states = hidden_states.view(
927
+ batch_size, channel, frames * height * width
928
+ ).transpose(1, 2)
929
+
930
+ if attention.use_tpu_flash_attention:
931
+ # Pad the second dimension to be divisible by block_k_major (block in flash attention)
932
+ seq_len = hidden_states.shape[1]
933
+ block_k_major = 512
934
+ pad_len = (block_k_major - seq_len % block_k_major) % block_k_major
935
+ if pad_len > 0:
936
+ hidden_states = F.pad(
937
+ hidden_states, (0, 0, 0, pad_len), "constant", 0
938
+ )
939
+
940
+ # Create a mask with ones for the original sequence length and zeros for the padded indexes
941
+ mask = torch.ones(
942
+ (hidden_states.shape[0], seq_len),
943
+ device=hidden_states.device,
944
+ dtype=hidden_states.dtype,
945
+ )
946
+ if pad_len > 0:
947
+ mask = F.pad(mask, (0, pad_len), "constant", 0)
948
+
949
+ hidden_states = attention(
950
+ hidden_states,
951
+ attention_mask=(
952
+ None if not attention.use_tpu_flash_attention else mask
953
+ ),
954
+ )
955
+
956
+ if attention.use_tpu_flash_attention:
957
+ # Remove the padding
958
+ if pad_len > 0:
959
+ hidden_states = hidden_states[:, :-pad_len, :]
960
+
961
+ # Reshape the hidden states back to (batch_size, channel, frames, height, width, channel)
962
+ hidden_states = hidden_states.transpose(-1, -2).reshape(
963
+ batch_size, channel, frames, height, width
964
+ )
965
+ else:
966
+ for resnet in self.res_blocks:
967
+ hidden_states = resnet(
968
+ hidden_states, causal=causal, timestep=timestep_embed
969
+ )
970
+
971
+ return hidden_states
972
+
973
+
974
+ class SpaceToDepthDownsample(nn.Module):
975
+ def __init__(self, dims, in_channels, out_channels, stride, spatial_padding_mode):
976
+ super().__init__()
977
+ self.stride = stride
978
+ self.group_size = in_channels * np.prod(stride) // out_channels
979
+ self.conv = make_conv_nd(
980
+ dims=dims,
981
+ in_channels=in_channels,
982
+ out_channels=out_channels // np.prod(stride),
983
+ kernel_size=3,
984
+ stride=1,
985
+ causal=True,
986
+ spatial_padding_mode=spatial_padding_mode,
987
+ )
988
+
989
+ def forward(self, x, causal: bool = True):
990
+ if self.stride[0] == 2:
991
+ x = torch.cat(
992
+ [x[:, :, :1, :, :], x], dim=2
993
+ ) # duplicate first frames for padding
994
+
995
+ # skip connection
996
+ x_in = rearrange(
997
+ x,
998
+ "b c (d p1) (h p2) (w p3) -> b (c p1 p2 p3) d h w",
999
+ p1=self.stride[0],
1000
+ p2=self.stride[1],
1001
+ p3=self.stride[2],
1002
+ )
1003
+ x_in = rearrange(x_in, "b (c g) d h w -> b c g d h w", g=self.group_size)
1004
+ x_in = x_in.mean(dim=2)
1005
+
1006
+ # conv
1007
+ x = self.conv(x, causal=causal)
1008
+ x = rearrange(
1009
+ x,
1010
+ "b c (d p1) (h p2) (w p3) -> b (c p1 p2 p3) d h w",
1011
+ p1=self.stride[0],
1012
+ p2=self.stride[1],
1013
+ p3=self.stride[2],
1014
+ )
1015
+
1016
+ x = x + x_in
1017
+
1018
+ return x
1019
+
1020
+
1021
+ class DepthToSpaceUpsample(nn.Module):
1022
+ def __init__(
1023
+ self,
1024
+ dims,
1025
+ in_channels,
1026
+ stride,
1027
+ residual=False,
1028
+ out_channels_reduction_factor=1,
1029
+ spatial_padding_mode="zeros",
1030
+ ):
1031
+ super().__init__()
1032
+ self.stride = stride
1033
+ self.out_channels = (
1034
+ np.prod(stride) * in_channels // out_channels_reduction_factor
1035
+ )
1036
+ self.conv = make_conv_nd(
1037
+ dims=dims,
1038
+ in_channels=in_channels,
1039
+ out_channels=self.out_channels,
1040
+ kernel_size=3,
1041
+ stride=1,
1042
+ causal=True,
1043
+ spatial_padding_mode=spatial_padding_mode,
1044
+ )
1045
+ self.pixel_shuffle = PixelShuffleND(dims=dims, upscale_factors=stride)
1046
+ self.residual = residual
1047
+ self.out_channels_reduction_factor = out_channels_reduction_factor
1048
+
1049
+ def forward(self, x, causal: bool = True):
1050
+ if self.residual:
1051
+ # Reshape and duplicate the input to match the output shape
1052
+ x_in = self.pixel_shuffle(x)
1053
+ num_repeat = np.prod(self.stride) // self.out_channels_reduction_factor
1054
+ x_in = x_in.repeat(1, num_repeat, 1, 1, 1)
1055
+ if self.stride[0] == 2:
1056
+ x_in = x_in[:, :, 1:, :, :]
1057
+ x = self.conv(x, causal=causal)
1058
+ x = self.pixel_shuffle(x)
1059
+ if self.stride[0] == 2:
1060
+ x = x[:, :, 1:, :, :]
1061
+ if self.residual:
1062
+ x = x + x_in
1063
+ return x
1064
+
1065
+
1066
+ class LayerNorm(nn.Module):
1067
+ def __init__(self, dim, eps, elementwise_affine=True) -> None:
1068
+ super().__init__()
1069
+ self.norm = nn.LayerNorm(dim, eps=eps, elementwise_affine=elementwise_affine)
1070
+
1071
+ def forward(self, x):
1072
+ x = rearrange(x, "b c d h w -> b d h w c")
1073
+ x = self.norm(x)
1074
+ x = rearrange(x, "b d h w c -> b c d h w")
1075
+ return x
1076
+
1077
+
1078
+ class ResnetBlock3D(nn.Module):
1079
+ r"""
1080
+ A Resnet block.
1081
+
1082
+ Parameters:
1083
+ in_channels (`int`): The number of channels in the input.
1084
+ out_channels (`int`, *optional*, default to be `None`):
1085
+ The number of output channels for the first conv layer. If None, same as `in_channels`.
1086
+ dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use.
1087
+ groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer.
1088
+ eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization.
1089
+ """
1090
+
1091
+ def __init__(
1092
+ self,
1093
+ dims: Union[int, Tuple[int, int]],
1094
+ in_channels: int,
1095
+ out_channels: Optional[int] = None,
1096
+ dropout: float = 0.0,
1097
+ groups: int = 32,
1098
+ eps: float = 1e-6,
1099
+ norm_layer: str = "group_norm",
1100
+ inject_noise: bool = False,
1101
+ timestep_conditioning: bool = False,
1102
+ spatial_padding_mode: str = "zeros",
1103
+ ):
1104
+ super().__init__()
1105
+ self.in_channels = in_channels
1106
+ out_channels = in_channels if out_channels is None else out_channels
1107
+ self.out_channels = out_channels
1108
+ self.inject_noise = inject_noise
1109
+
1110
+ if norm_layer == "group_norm":
1111
+ self.norm1 = nn.GroupNorm(
1112
+ num_groups=groups, num_channels=in_channels, eps=eps, affine=True
1113
+ )
1114
+ elif norm_layer == "pixel_norm":
1115
+ self.norm1 = PixelNorm()
1116
+ elif norm_layer == "layer_norm":
1117
+ self.norm1 = LayerNorm(in_channels, eps=eps, elementwise_affine=True)
1118
+
1119
+ self.non_linearity = nn.SiLU()
1120
+
1121
+ self.conv1 = make_conv_nd(
1122
+ dims,
1123
+ in_channels,
1124
+ out_channels,
1125
+ kernel_size=3,
1126
+ stride=1,
1127
+ padding=1,
1128
+ causal=True,
1129
+ spatial_padding_mode=spatial_padding_mode,
1130
+ )
1131
+
1132
+ if inject_noise:
1133
+ self.per_channel_scale1 = nn.Parameter(torch.zeros((in_channels, 1, 1)))
1134
+
1135
+ if norm_layer == "group_norm":
1136
+ self.norm2 = nn.GroupNorm(
1137
+ num_groups=groups, num_channels=out_channels, eps=eps, affine=True
1138
+ )
1139
+ elif norm_layer == "pixel_norm":
1140
+ self.norm2 = PixelNorm()
1141
+ elif norm_layer == "layer_norm":
1142
+ self.norm2 = LayerNorm(out_channels, eps=eps, elementwise_affine=True)
1143
+
1144
+ self.dropout = torch.nn.Dropout(dropout)
1145
+
1146
+ self.conv2 = make_conv_nd(
1147
+ dims,
1148
+ out_channels,
1149
+ out_channels,
1150
+ kernel_size=3,
1151
+ stride=1,
1152
+ padding=1,
1153
+ causal=True,
1154
+ spatial_padding_mode=spatial_padding_mode,
1155
+ )
1156
+
1157
+ if inject_noise:
1158
+ self.per_channel_scale2 = nn.Parameter(torch.zeros((in_channels, 1, 1)))
1159
+
1160
+ self.conv_shortcut = (
1161
+ make_linear_nd(
1162
+ dims=dims, in_channels=in_channels, out_channels=out_channels
1163
+ )
1164
+ if in_channels != out_channels
1165
+ else nn.Identity()
1166
+ )
1167
+
1168
+ self.norm3 = (
1169
+ LayerNorm(in_channels, eps=eps, elementwise_affine=True)
1170
+ if in_channels != out_channels
1171
+ else nn.Identity()
1172
+ )
1173
+
1174
+ self.timestep_conditioning = timestep_conditioning
1175
+
1176
+ if timestep_conditioning:
1177
+ self.scale_shift_table = nn.Parameter(
1178
+ torch.randn(4, in_channels) / in_channels**0.5
1179
+ )
1180
+
1181
+ def _feed_spatial_noise(
1182
+ self, hidden_states: torch.FloatTensor, per_channel_scale: torch.FloatTensor
1183
+ ) -> torch.FloatTensor:
1184
+ spatial_shape = hidden_states.shape[-2:]
1185
+ device = hidden_states.device
1186
+ dtype = hidden_states.dtype
1187
+
1188
+ # similar to the "explicit noise inputs" method in style-gan
1189
+ spatial_noise = torch.randn(spatial_shape, device=device, dtype=dtype)[None]
1190
+ scaled_noise = (spatial_noise * per_channel_scale)[None, :, None, ...]
1191
+ hidden_states = hidden_states + scaled_noise
1192
+
1193
+ return hidden_states
1194
+
1195
+ def forward(
1196
+ self,
1197
+ input_tensor: torch.FloatTensor,
1198
+ causal: bool = True,
1199
+ timestep: Optional[torch.Tensor] = None,
1200
+ ) -> torch.FloatTensor:
1201
+ hidden_states = input_tensor
1202
+ batch_size = hidden_states.shape[0]
1203
+
1204
+ hidden_states = self.norm1(hidden_states)
1205
+ if self.timestep_conditioning:
1206
+ assert (
1207
+ timestep is not None
1208
+ ), "should pass timestep with timestep_conditioning=True"
1209
+ ada_values = self.scale_shift_table[
1210
+ None, ..., None, None, None
1211
+ ] + timestep.reshape(
1212
+ batch_size,
1213
+ 4,
1214
+ -1,
1215
+ timestep.shape[-3],
1216
+ timestep.shape[-2],
1217
+ timestep.shape[-1],
1218
+ )
1219
+ shift1, scale1, shift2, scale2 = ada_values.unbind(dim=1)
1220
+
1221
+ hidden_states = hidden_states * (1 + scale1) + shift1
1222
+
1223
+ hidden_states = self.non_linearity(hidden_states)
1224
+
1225
+ hidden_states = self.conv1(hidden_states, causal=causal)
1226
+
1227
+ if self.inject_noise:
1228
+ hidden_states = self._feed_spatial_noise(
1229
+ hidden_states, self.per_channel_scale1
1230
+ )
1231
+
1232
+ hidden_states = self.norm2(hidden_states)
1233
+
1234
+ if self.timestep_conditioning:
1235
+ hidden_states = hidden_states * (1 + scale2) + shift2
1236
+
1237
+ hidden_states = self.non_linearity(hidden_states)
1238
+
1239
+ hidden_states = self.dropout(hidden_states)
1240
+
1241
+ hidden_states = self.conv2(hidden_states, causal=causal)
1242
+
1243
+ if self.inject_noise:
1244
+ hidden_states = self._feed_spatial_noise(
1245
+ hidden_states, self.per_channel_scale2
1246
+ )
1247
+
1248
+ input_tensor = self.norm3(input_tensor)
1249
+
1250
+ batch_size = input_tensor.shape[0]
1251
+
1252
+ input_tensor = self.conv_shortcut(input_tensor)
1253
+
1254
+ output_tensor = input_tensor + hidden_states
1255
+
1256
+ return output_tensor
1257
+
1258
+
1259
+ def patchify(x, patch_size_hw, patch_size_t=1):
1260
+ if patch_size_hw == 1 and patch_size_t == 1:
1261
+ return x
1262
+ if x.dim() == 4:
1263
+ x = rearrange(
1264
+ x, "b c (h q) (w r) -> b (c r q) h w", q=patch_size_hw, r=patch_size_hw
1265
+ )
1266
+ elif x.dim() == 5:
1267
+ x = rearrange(
1268
+ x,
1269
+ "b c (f p) (h q) (w r) -> b (c p r q) f h w",
1270
+ p=patch_size_t,
1271
+ q=patch_size_hw,
1272
+ r=patch_size_hw,
1273
+ )
1274
+ else:
1275
+ raise ValueError(f"Invalid input shape: {x.shape}")
1276
+
1277
+ return x
1278
+
1279
+
1280
+ def unpatchify(x, patch_size_hw, patch_size_t=1):
1281
+ if patch_size_hw == 1 and patch_size_t == 1:
1282
+ return x
1283
+
1284
+ if x.dim() == 4:
1285
+ x = rearrange(
1286
+ x, "b (c r q) h w -> b c (h q) (w r)", q=patch_size_hw, r=patch_size_hw
1287
+ )
1288
+ elif x.dim() == 5:
1289
+ x = rearrange(
1290
+ x,
1291
+ "b (c p r q) f h w -> b c (f p) (h q) (w r)",
1292
+ p=patch_size_t,
1293
+ q=patch_size_hw,
1294
+ r=patch_size_hw,
1295
+ )
1296
+
1297
+ return x
1298
+
1299
+
1300
+ def create_video_autoencoder_demo_config(
1301
+ latent_channels: int = 64,
1302
+ ):
1303
+ encoder_blocks = [
1304
+ ("res_x", {"num_layers": 2}),
1305
+ ("compress_space_res", {"multiplier": 2}),
1306
+ ("compress_time_res", {"multiplier": 2}),
1307
+ ("compress_all_res", {"multiplier": 2}),
1308
+ ("compress_all_res", {"multiplier": 2}),
1309
+ ("res_x", {"num_layers": 1}),
1310
+ ]
1311
+ decoder_blocks = [
1312
+ ("res_x", {"num_layers": 2, "inject_noise": False}),
1313
+ ("compress_all", {"residual": True, "multiplier": 2}),
1314
+ ("compress_all", {"residual": True, "multiplier": 2}),
1315
+ ("compress_all", {"residual": True, "multiplier": 2}),
1316
+ ("res_x", {"num_layers": 2, "inject_noise": False}),
1317
+ ]
1318
+ return {
1319
+ "_class_name": "CausalVideoAutoencoder",
1320
+ "dims": 3,
1321
+ "encoder_blocks": encoder_blocks,
1322
+ "decoder_blocks": decoder_blocks,
1323
+ "latent_channels": latent_channels,
1324
+ "norm_layer": "pixel_norm",
1325
+ "patch_size": 4,
1326
+ "latent_log_var": "uniform",
1327
+ "use_quant_conv": False,
1328
+ "causal_decoder": False,
1329
+ "timestep_conditioning": True,
1330
+ "spatial_padding_mode": "replicate",
1331
+ }
1332
+
1333
+
1334
+ def test_vae_patchify_unpatchify():
1335
+ import torch
1336
+
1337
+ x = torch.randn(2, 3, 8, 64, 64)
1338
+ x_patched = patchify(x, patch_size_hw=4, patch_size_t=4)
1339
+ x_unpatched = unpatchify(x_patched, patch_size_hw=4, patch_size_t=4)
1340
+ assert torch.allclose(x, x_unpatched)
1341
+
1342
+
1343
+ def demo_video_autoencoder_forward_backward():
1344
+ # Configuration for the VideoAutoencoder
1345
+ config = create_video_autoencoder_demo_config()
1346
+
1347
+ # Instantiate the VideoAutoencoder with the specified configuration
1348
+ video_autoencoder = CausalVideoAutoencoder.from_config(config)
1349
+
1350
+ print(video_autoencoder)
1351
+ video_autoencoder.eval()
1352
+ # Print the total number of parameters in the video autoencoder
1353
+ total_params = sum(p.numel() for p in video_autoencoder.parameters())
1354
+ print(f"Total number of parameters in VideoAutoencoder: {total_params:,}")
1355
+
1356
+ # Create a mock input tensor simulating a batch of videos
1357
+ # Shape: (batch_size, channels, depth, height, width)
1358
+ # E.g., 4 videos, each with 3 color channels, 16 frames, and 64x64 pixels per frame
1359
+ input_videos = torch.randn(2, 3, 17, 64, 64)
1360
+
1361
+ # Forward pass: encode and decode the input videos
1362
+ latent = video_autoencoder.encode(input_videos).latent_dist.mode()
1363
+ print(f"input shape={input_videos.shape}")
1364
+ print(f"latent shape={latent.shape}")
1365
+
1366
+ timestep = torch.ones(input_videos.shape[0]) * 0.1
1367
+ reconstructed_videos = video_autoencoder.decode(
1368
+ latent, target_shape=input_videos.shape, timestep=timestep
1369
+ ).sample
1370
+
1371
+ print(f"reconstructed shape={reconstructed_videos.shape}")
1372
+
1373
+ # Validate that single image gets treated the same way as first frame
1374
+ input_image = input_videos[:, :, :1, :, :]
1375
+ image_latent = video_autoencoder.encode(input_image).latent_dist.mode()
1376
+ _ = video_autoencoder.decode(
1377
+ image_latent, target_shape=image_latent.shape, timestep=timestep
1378
+ ).sample
1379
+
1380
+ first_frame_latent = latent[:, :, :1, :, :]
1381
+
1382
+ assert torch.allclose(image_latent, first_frame_latent, atol=1e-6)
1383
+ # assert torch.allclose(reconstructed_image, reconstructed_videos[:, :, :1, :, :], atol=1e-6)
1384
+ # assert torch.allclose(image_latent, first_frame_latent, atol=1e-6)
1385
+ # assert (reconstructed_image == reconstructed_videos[:, :, :1, :, :]).all()
1386
+
1387
+ # Calculate the loss (e.g., mean squared error)
1388
+ loss = torch.nn.functional.mse_loss(input_videos, reconstructed_videos)
1389
+
1390
+ # Perform backward pass
1391
+ loss.backward()
1392
+
1393
+ print(f"Demo completed with loss: {loss.item()}")
1394
+
1395
+
1396
+ # Ensure to call the demo function to execute the forward and backward pass
1397
+ if __name__ == "__main__":
1398
+ demo_video_autoencoder_forward_backward()
ltx_video/models/autoencoders/conv_nd_factory.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple, Union
2
+
3
+ import torch
4
+
5
+ from ltx_video.models.autoencoders.dual_conv3d import DualConv3d
6
+ from ltx_video.models.autoencoders.causal_conv3d import CausalConv3d
7
+
8
+
9
+ def make_conv_nd(
10
+ dims: Union[int, Tuple[int, int]],
11
+ in_channels: int,
12
+ out_channels: int,
13
+ kernel_size: int,
14
+ stride=1,
15
+ padding=0,
16
+ dilation=1,
17
+ groups=1,
18
+ bias=True,
19
+ causal=False,
20
+ spatial_padding_mode="zeros",
21
+ temporal_padding_mode="zeros",
22
+ ):
23
+ if not (spatial_padding_mode == temporal_padding_mode or causal):
24
+ raise NotImplementedError("spatial and temporal padding modes must be equal")
25
+ if dims == 2:
26
+ return torch.nn.Conv2d(
27
+ in_channels=in_channels,
28
+ out_channels=out_channels,
29
+ kernel_size=kernel_size,
30
+ stride=stride,
31
+ padding=padding,
32
+ dilation=dilation,
33
+ groups=groups,
34
+ bias=bias,
35
+ padding_mode=spatial_padding_mode,
36
+ )
37
+ elif dims == 3:
38
+ if causal:
39
+ return CausalConv3d(
40
+ in_channels=in_channels,
41
+ out_channels=out_channels,
42
+ kernel_size=kernel_size,
43
+ stride=stride,
44
+ padding=padding,
45
+ dilation=dilation,
46
+ groups=groups,
47
+ bias=bias,
48
+ spatial_padding_mode=spatial_padding_mode,
49
+ )
50
+ return torch.nn.Conv3d(
51
+ in_channels=in_channels,
52
+ out_channels=out_channels,
53
+ kernel_size=kernel_size,
54
+ stride=stride,
55
+ padding=padding,
56
+ dilation=dilation,
57
+ groups=groups,
58
+ bias=bias,
59
+ padding_mode=spatial_padding_mode,
60
+ )
61
+ elif dims == (2, 1):
62
+ return DualConv3d(
63
+ in_channels=in_channels,
64
+ out_channels=out_channels,
65
+ kernel_size=kernel_size,
66
+ stride=stride,
67
+ padding=padding,
68
+ bias=bias,
69
+ padding_mode=spatial_padding_mode,
70
+ )
71
+ else:
72
+ raise ValueError(f"unsupported dimensions: {dims}")
73
+
74
+
75
+ def make_linear_nd(
76
+ dims: int,
77
+ in_channels: int,
78
+ out_channels: int,
79
+ bias=True,
80
+ ):
81
+ if dims == 2:
82
+ return torch.nn.Conv2d(
83
+ in_channels=in_channels, out_channels=out_channels, kernel_size=1, bias=bias
84
+ )
85
+ elif dims == 3 or dims == (2, 1):
86
+ return torch.nn.Conv3d(
87
+ in_channels=in_channels, out_channels=out_channels, kernel_size=1, bias=bias
88
+ )
89
+ else:
90
+ raise ValueError(f"unsupported dimensions: {dims}")
ltx_video/models/autoencoders/dual_conv3d.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Tuple, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from einops import rearrange
8
+
9
+
10
+ class DualConv3d(nn.Module):
11
+ def __init__(
12
+ self,
13
+ in_channels,
14
+ out_channels,
15
+ kernel_size,
16
+ stride: Union[int, Tuple[int, int, int]] = 1,
17
+ padding: Union[int, Tuple[int, int, int]] = 0,
18
+ dilation: Union[int, Tuple[int, int, int]] = 1,
19
+ groups=1,
20
+ bias=True,
21
+ padding_mode="zeros",
22
+ ):
23
+ super(DualConv3d, self).__init__()
24
+
25
+ self.in_channels = in_channels
26
+ self.out_channels = out_channels
27
+ self.padding_mode = padding_mode
28
+ # Ensure kernel_size, stride, padding, and dilation are tuples of length 3
29
+ if isinstance(kernel_size, int):
30
+ kernel_size = (kernel_size, kernel_size, kernel_size)
31
+ if kernel_size == (1, 1, 1):
32
+ raise ValueError(
33
+ "kernel_size must be greater than 1. Use make_linear_nd instead."
34
+ )
35
+ if isinstance(stride, int):
36
+ stride = (stride, stride, stride)
37
+ if isinstance(padding, int):
38
+ padding = (padding, padding, padding)
39
+ if isinstance(dilation, int):
40
+ dilation = (dilation, dilation, dilation)
41
+
42
+ # Set parameters for convolutions
43
+ self.groups = groups
44
+ self.bias = bias
45
+
46
+ # Define the size of the channels after the first convolution
47
+ intermediate_channels = (
48
+ out_channels if in_channels < out_channels else in_channels
49
+ )
50
+
51
+ # Define parameters for the first convolution
52
+ self.weight1 = nn.Parameter(
53
+ torch.Tensor(
54
+ intermediate_channels,
55
+ in_channels // groups,
56
+ 1,
57
+ kernel_size[1],
58
+ kernel_size[2],
59
+ )
60
+ )
61
+ self.stride1 = (1, stride[1], stride[2])
62
+ self.padding1 = (0, padding[1], padding[2])
63
+ self.dilation1 = (1, dilation[1], dilation[2])
64
+ if bias:
65
+ self.bias1 = nn.Parameter(torch.Tensor(intermediate_channels))
66
+ else:
67
+ self.register_parameter("bias1", None)
68
+
69
+ # Define parameters for the second convolution
70
+ self.weight2 = nn.Parameter(
71
+ torch.Tensor(
72
+ out_channels, intermediate_channels // groups, kernel_size[0], 1, 1
73
+ )
74
+ )
75
+ self.stride2 = (stride[0], 1, 1)
76
+ self.padding2 = (padding[0], 0, 0)
77
+ self.dilation2 = (dilation[0], 1, 1)
78
+ if bias:
79
+ self.bias2 = nn.Parameter(torch.Tensor(out_channels))
80
+ else:
81
+ self.register_parameter("bias2", None)
82
+
83
+ # Initialize weights and biases
84
+ self.reset_parameters()
85
+
86
+ def reset_parameters(self):
87
+ nn.init.kaiming_uniform_(self.weight1, a=math.sqrt(5))
88
+ nn.init.kaiming_uniform_(self.weight2, a=math.sqrt(5))
89
+ if self.bias:
90
+ fan_in1, _ = nn.init._calculate_fan_in_and_fan_out(self.weight1)
91
+ bound1 = 1 / math.sqrt(fan_in1)
92
+ nn.init.uniform_(self.bias1, -bound1, bound1)
93
+ fan_in2, _ = nn.init._calculate_fan_in_and_fan_out(self.weight2)
94
+ bound2 = 1 / math.sqrt(fan_in2)
95
+ nn.init.uniform_(self.bias2, -bound2, bound2)
96
+
97
+ def forward(self, x, use_conv3d=False, skip_time_conv=False):
98
+ if use_conv3d:
99
+ return self.forward_with_3d(x=x, skip_time_conv=skip_time_conv)
100
+ else:
101
+ return self.forward_with_2d(x=x, skip_time_conv=skip_time_conv)
102
+
103
+ def forward_with_3d(self, x, skip_time_conv):
104
+ # First convolution
105
+ x = F.conv3d(
106
+ x,
107
+ self.weight1,
108
+ self.bias1,
109
+ self.stride1,
110
+ self.padding1,
111
+ self.dilation1,
112
+ self.groups,
113
+ padding_mode=self.padding_mode,
114
+ )
115
+
116
+ if skip_time_conv:
117
+ return x
118
+
119
+ # Second convolution
120
+ x = F.conv3d(
121
+ x,
122
+ self.weight2,
123
+ self.bias2,
124
+ self.stride2,
125
+ self.padding2,
126
+ self.dilation2,
127
+ self.groups,
128
+ padding_mode=self.padding_mode,
129
+ )
130
+
131
+ return x
132
+
133
+ def forward_with_2d(self, x, skip_time_conv):
134
+ b, c, d, h, w = x.shape
135
+
136
+ # First 2D convolution
137
+ x = rearrange(x, "b c d h w -> (b d) c h w")
138
+ # Squeeze the depth dimension out of weight1 since it's 1
139
+ weight1 = self.weight1.squeeze(2)
140
+ # Select stride, padding, and dilation for the 2D convolution
141
+ stride1 = (self.stride1[1], self.stride1[2])
142
+ padding1 = (self.padding1[1], self.padding1[2])
143
+ dilation1 = (self.dilation1[1], self.dilation1[2])
144
+ x = F.conv2d(
145
+ x,
146
+ weight1,
147
+ self.bias1,
148
+ stride1,
149
+ padding1,
150
+ dilation1,
151
+ self.groups,
152
+ padding_mode=self.padding_mode,
153
+ )
154
+
155
+ _, _, h, w = x.shape
156
+
157
+ if skip_time_conv:
158
+ x = rearrange(x, "(b d) c h w -> b c d h w", b=b)
159
+ return x
160
+
161
+ # Second convolution which is essentially treated as a 1D convolution across the 'd' dimension
162
+ x = rearrange(x, "(b d) c h w -> (b h w) c d", b=b)
163
+
164
+ # Reshape weight2 to match the expected dimensions for conv1d
165
+ weight2 = self.weight2.squeeze(-1).squeeze(-1)
166
+ # Use only the relevant dimension for stride, padding, and dilation for the 1D convolution
167
+ stride2 = self.stride2[0]
168
+ padding2 = self.padding2[0]
169
+ dilation2 = self.dilation2[0]
170
+ x = F.conv1d(
171
+ x,
172
+ weight2,
173
+ self.bias2,
174
+ stride2,
175
+ padding2,
176
+ dilation2,
177
+ self.groups,
178
+ padding_mode=self.padding_mode,
179
+ )
180
+ x = rearrange(x, "(b h w) c d -> b c d h w", b=b, h=h, w=w)
181
+
182
+ return x
183
+
184
+ @property
185
+ def weight(self):
186
+ return self.weight2
187
+
188
+
189
+ def test_dual_conv3d_consistency():
190
+ # Initialize parameters
191
+ in_channels = 3
192
+ out_channels = 5
193
+ kernel_size = (3, 3, 3)
194
+ stride = (2, 2, 2)
195
+ padding = (1, 1, 1)
196
+
197
+ # Create an instance of the DualConv3d class
198
+ dual_conv3d = DualConv3d(
199
+ in_channels=in_channels,
200
+ out_channels=out_channels,
201
+ kernel_size=kernel_size,
202
+ stride=stride,
203
+ padding=padding,
204
+ bias=True,
205
+ )
206
+
207
+ # Example input tensor
208
+ test_input = torch.randn(1, 3, 10, 10, 10)
209
+
210
+ # Perform forward passes with both 3D and 2D settings
211
+ output_conv3d = dual_conv3d(test_input, use_conv3d=True)
212
+ output_2d = dual_conv3d(test_input, use_conv3d=False)
213
+
214
+ # Assert that the outputs from both methods are sufficiently close
215
+ assert torch.allclose(
216
+ output_conv3d, output_2d, atol=1e-6
217
+ ), "Outputs are not consistent between 3D and 2D convolutions."
ltx_video/models/autoencoders/latent_upsampler.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Union
2
+ from pathlib import Path
3
+ import os
4
+ import json
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ from einops import rearrange
9
+ from diffusers import ConfigMixin, ModelMixin
10
+ from safetensors.torch import safe_open
11
+
12
+ from ltx_video.models.autoencoders.pixel_shuffle import PixelShuffleND
13
+
14
+
15
+ class ResBlock(nn.Module):
16
+ def __init__(
17
+ self, channels: int, mid_channels: Optional[int] = None, dims: int = 3
18
+ ):
19
+ super().__init__()
20
+ if mid_channels is None:
21
+ mid_channels = channels
22
+
23
+ Conv = nn.Conv2d if dims == 2 else nn.Conv3d
24
+
25
+ self.conv1 = Conv(channels, mid_channels, kernel_size=3, padding=1)
26
+ self.norm1 = nn.GroupNorm(32, mid_channels)
27
+ self.conv2 = Conv(mid_channels, channels, kernel_size=3, padding=1)
28
+ self.norm2 = nn.GroupNorm(32, channels)
29
+ self.activation = nn.SiLU()
30
+
31
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
32
+ residual = x
33
+ x = self.conv1(x)
34
+ x = self.norm1(x)
35
+ x = self.activation(x)
36
+ x = self.conv2(x)
37
+ x = self.norm2(x)
38
+ x = self.activation(x + residual)
39
+ return x
40
+
41
+
42
+ class LatentUpsampler(ModelMixin, ConfigMixin):
43
+ """
44
+ Model to spatially upsample VAE latents.
45
+
46
+ Args:
47
+ in_channels (`int`): Number of channels in the input latent
48
+ mid_channels (`int`): Number of channels in the middle layers
49
+ num_blocks_per_stage (`int`): Number of ResBlocks to use in each stage (pre/post upsampling)
50
+ dims (`int`): Number of dimensions for convolutions (2 or 3)
51
+ spatial_upsample (`bool`): Whether to spatially upsample the latent
52
+ temporal_upsample (`bool`): Whether to temporally upsample the latent
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ in_channels: int = 128,
58
+ mid_channels: int = 512,
59
+ num_blocks_per_stage: int = 4,
60
+ dims: int = 3,
61
+ spatial_upsample: bool = True,
62
+ temporal_upsample: bool = False,
63
+ ):
64
+ super().__init__()
65
+
66
+ self.in_channels = in_channels
67
+ self.mid_channels = mid_channels
68
+ self.num_blocks_per_stage = num_blocks_per_stage
69
+ self.dims = dims
70
+ self.spatial_upsample = spatial_upsample
71
+ self.temporal_upsample = temporal_upsample
72
+
73
+ Conv = nn.Conv2d if dims == 2 else nn.Conv3d
74
+
75
+ self.initial_conv = Conv(in_channels, mid_channels, kernel_size=3, padding=1)
76
+ self.initial_norm = nn.GroupNorm(32, mid_channels)
77
+ self.initial_activation = nn.SiLU()
78
+
79
+ self.res_blocks = nn.ModuleList(
80
+ [ResBlock(mid_channels, dims=dims) for _ in range(num_blocks_per_stage)]
81
+ )
82
+
83
+ if spatial_upsample and temporal_upsample:
84
+ self.upsampler = nn.Sequential(
85
+ nn.Conv3d(mid_channels, 8 * mid_channels, kernel_size=3, padding=1),
86
+ PixelShuffleND(3),
87
+ )
88
+ elif spatial_upsample:
89
+ self.upsampler = nn.Sequential(
90
+ nn.Conv2d(mid_channels, 4 * mid_channels, kernel_size=3, padding=1),
91
+ PixelShuffleND(2),
92
+ )
93
+ elif temporal_upsample:
94
+ self.upsampler = nn.Sequential(
95
+ nn.Conv3d(mid_channels, 2 * mid_channels, kernel_size=3, padding=1),
96
+ PixelShuffleND(1),
97
+ )
98
+ else:
99
+ raise ValueError(
100
+ "Either spatial_upsample or temporal_upsample must be True"
101
+ )
102
+
103
+ self.post_upsample_res_blocks = nn.ModuleList(
104
+ [ResBlock(mid_channels, dims=dims) for _ in range(num_blocks_per_stage)]
105
+ )
106
+
107
+ self.final_conv = Conv(mid_channels, in_channels, kernel_size=3, padding=1)
108
+
109
+ def forward(self, latent: torch.Tensor) -> torch.Tensor:
110
+ b, c, f, h, w = latent.shape
111
+
112
+ if self.dims == 2:
113
+ x = rearrange(latent, "b c f h w -> (b f) c h w")
114
+ x = self.initial_conv(x)
115
+ x = self.initial_norm(x)
116
+ x = self.initial_activation(x)
117
+
118
+ for block in self.res_blocks:
119
+ x = block(x)
120
+
121
+ x = self.upsampler(x)
122
+
123
+ for block in self.post_upsample_res_blocks:
124
+ x = block(x)
125
+
126
+ x = self.final_conv(x)
127
+ x = rearrange(x, "(b f) c h w -> b c f h w", b=b, f=f)
128
+ else:
129
+ x = self.initial_conv(latent)
130
+ x = self.initial_norm(x)
131
+ x = self.initial_activation(x)
132
+
133
+ for block in self.res_blocks:
134
+ x = block(x)
135
+
136
+ if self.temporal_upsample:
137
+ x = self.upsampler(x)
138
+ x = x[:, :, 1:, :, :]
139
+ else:
140
+ x = rearrange(x, "b c f h w -> (b f) c h w")
141
+ x = self.upsampler(x)
142
+ x = rearrange(x, "(b f) c h w -> b c f h w", b=b, f=f)
143
+
144
+ for block in self.post_upsample_res_blocks:
145
+ x = block(x)
146
+
147
+ x = self.final_conv(x)
148
+
149
+ return x
150
+
151
+ @classmethod
152
+ def from_config(cls, config):
153
+ return cls(
154
+ in_channels=config.get("in_channels", 4),
155
+ mid_channels=config.get("mid_channels", 128),
156
+ num_blocks_per_stage=config.get("num_blocks_per_stage", 4),
157
+ dims=config.get("dims", 2),
158
+ spatial_upsample=config.get("spatial_upsample", True),
159
+ temporal_upsample=config.get("temporal_upsample", False),
160
+ )
161
+
162
+ def config(self):
163
+ return {
164
+ "_class_name": "LatentUpsampler",
165
+ "in_channels": self.in_channels,
166
+ "mid_channels": self.mid_channels,
167
+ "num_blocks_per_stage": self.num_blocks_per_stage,
168
+ "dims": self.dims,
169
+ "spatial_upsample": self.spatial_upsample,
170
+ "temporal_upsample": self.temporal_upsample,
171
+ }
172
+
173
+ @classmethod
174
+ def from_pretrained(
175
+ cls,
176
+ pretrained_model_path: Optional[Union[str, os.PathLike]],
177
+ *args,
178
+ **kwargs,
179
+ ):
180
+ pretrained_model_path = Path(pretrained_model_path)
181
+ if pretrained_model_path.is_file() and str(pretrained_model_path).endswith(
182
+ ".safetensors"
183
+ ):
184
+ state_dict = {}
185
+ with safe_open(pretrained_model_path, framework="pt", device="cpu") as f:
186
+ metadata = f.metadata()
187
+ for k in f.keys():
188
+ state_dict[k] = f.get_tensor(k)
189
+ config = json.loads(metadata["config"])
190
+ with torch.device("meta"):
191
+ latent_upsampler = LatentUpsampler.from_config(config)
192
+ latent_upsampler.load_state_dict(state_dict, assign=True)
193
+ return latent_upsampler
194
+
195
+
196
+ if __name__ == "__main__":
197
+ latent_upsampler = LatentUpsampler(num_blocks_per_stage=4, dims=3)
198
+ print(latent_upsampler)
199
+ total_params = sum(p.numel() for p in latent_upsampler.parameters())
200
+ print(f"Total number of parameters: {total_params:,}")
201
+ latent = torch.randn(1, 128, 9, 16, 16)
202
+ upsampled_latent = latent_upsampler(latent)
203
+ print(f"Upsampled latent shape: {upsampled_latent.shape}")
ltx_video/models/autoencoders/pixel_norm.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+
5
+ class PixelNorm(nn.Module):
6
+ def __init__(self, dim=1, eps=1e-8):
7
+ super(PixelNorm, self).__init__()
8
+ self.dim = dim
9
+ self.eps = eps
10
+
11
+ def forward(self, x):
12
+ return x / torch.sqrt(torch.mean(x**2, dim=self.dim, keepdim=True) + self.eps)
ltx_video/models/autoencoders/pixel_shuffle.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ from einops import rearrange
3
+
4
+
5
+ class PixelShuffleND(nn.Module):
6
+ def __init__(self, dims, upscale_factors=(2, 2, 2)):
7
+ super().__init__()
8
+ assert dims in [1, 2, 3], "dims must be 1, 2, or 3"
9
+ self.dims = dims
10
+ self.upscale_factors = upscale_factors
11
+
12
+ def forward(self, x):
13
+ if self.dims == 3:
14
+ return rearrange(
15
+ x,
16
+ "b (c p1 p2 p3) d h w -> b c (d p1) (h p2) (w p3)",
17
+ p1=self.upscale_factors[0],
18
+ p2=self.upscale_factors[1],
19
+ p3=self.upscale_factors[2],
20
+ )
21
+ elif self.dims == 2:
22
+ return rearrange(
23
+ x,
24
+ "b (c p1 p2) h w -> b c (h p1) (w p2)",
25
+ p1=self.upscale_factors[0],
26
+ p2=self.upscale_factors[1],
27
+ )
28
+ elif self.dims == 1:
29
+ return rearrange(
30
+ x,
31
+ "b (c p1) f h w -> b c (f p1) h w",
32
+ p1=self.upscale_factors[0],
33
+ )
ltx_video/models/autoencoders/vae.py ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Union
2
+
3
+ import torch
4
+ import inspect
5
+ import math
6
+ import torch.nn as nn
7
+ from diffusers import ConfigMixin, ModelMixin
8
+ from diffusers.models.autoencoders.vae import (
9
+ DecoderOutput,
10
+ DiagonalGaussianDistribution,
11
+ )
12
+ from diffusers.models.modeling_outputs import AutoencoderKLOutput
13
+ from ltx_video.models.autoencoders.conv_nd_factory import make_conv_nd
14
+
15
+
16
+ class AutoencoderKLWrapper(ModelMixin, ConfigMixin):
17
+ """Variational Autoencoder (VAE) model with KL loss.
18
+
19
+ VAE from the paper Auto-Encoding Variational Bayes by Diederik P. Kingma and Max Welling.
20
+ This model is a wrapper around an encoder and a decoder, and it adds a KL loss term to the reconstruction loss.
21
+
22
+ Args:
23
+ encoder (`nn.Module`):
24
+ Encoder module.
25
+ decoder (`nn.Module`):
26
+ Decoder module.
27
+ latent_channels (`int`, *optional*, defaults to 4):
28
+ Number of latent channels.
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ encoder: nn.Module,
34
+ decoder: nn.Module,
35
+ latent_channels: int = 4,
36
+ dims: int = 2,
37
+ sample_size=512,
38
+ use_quant_conv: bool = True,
39
+ normalize_latent_channels: bool = False,
40
+ ):
41
+ super().__init__()
42
+
43
+ # pass init params to Encoder
44
+ self.encoder = encoder
45
+ self.use_quant_conv = use_quant_conv
46
+ self.normalize_latent_channels = normalize_latent_channels
47
+
48
+ # pass init params to Decoder
49
+ quant_dims = 2 if dims == 2 else 3
50
+ self.decoder = decoder
51
+ if use_quant_conv:
52
+ self.quant_conv = make_conv_nd(
53
+ quant_dims, 2 * latent_channels, 2 * latent_channels, 1
54
+ )
55
+ self.post_quant_conv = make_conv_nd(
56
+ quant_dims, latent_channels, latent_channels, 1
57
+ )
58
+ else:
59
+ self.quant_conv = nn.Identity()
60
+ self.post_quant_conv = nn.Identity()
61
+
62
+ if normalize_latent_channels:
63
+ if dims == 2:
64
+ self.latent_norm_out = nn.BatchNorm2d(latent_channels, affine=False)
65
+ else:
66
+ self.latent_norm_out = nn.BatchNorm3d(latent_channels, affine=False)
67
+ else:
68
+ self.latent_norm_out = nn.Identity()
69
+ self.use_z_tiling = False
70
+ self.use_hw_tiling = False
71
+ self.dims = dims
72
+ self.z_sample_size = 1
73
+
74
+ self.decoder_params = inspect.signature(self.decoder.forward).parameters
75
+
76
+ # only relevant if vae tiling is enabled
77
+ self.set_tiling_params(sample_size=sample_size, overlap_factor=0.25)
78
+
79
+ def set_tiling_params(self, sample_size: int = 512, overlap_factor: float = 0.25):
80
+ self.tile_sample_min_size = sample_size
81
+ num_blocks = len(self.encoder.down_blocks)
82
+ self.tile_latent_min_size = int(sample_size / (2 ** (num_blocks - 1)))
83
+ self.tile_overlap_factor = overlap_factor
84
+
85
+ def enable_z_tiling(self, z_sample_size: int = 8):
86
+ r"""
87
+ Enable tiling during VAE decoding.
88
+
89
+ When this option is enabled, the VAE will split the input tensor in tiles to compute decoding in several
90
+ steps. This is useful to save some memory and allow larger batch sizes.
91
+ """
92
+ self.use_z_tiling = z_sample_size > 1
93
+ self.z_sample_size = z_sample_size
94
+ assert (
95
+ z_sample_size % 8 == 0 or z_sample_size == 1
96
+ ), f"z_sample_size must be a multiple of 8 or 1. Got {z_sample_size}."
97
+
98
+ def disable_z_tiling(self):
99
+ r"""
100
+ Disable tiling during VAE decoding. If `use_tiling` was previously invoked, this method will go back to computing
101
+ decoding in one step.
102
+ """
103
+ self.use_z_tiling = False
104
+
105
+ def enable_hw_tiling(self):
106
+ r"""
107
+ Enable tiling during VAE decoding along the height and width dimension.
108
+ """
109
+ self.use_hw_tiling = True
110
+
111
+ def disable_hw_tiling(self):
112
+ r"""
113
+ Disable tiling during VAE decoding along the height and width dimension.
114
+ """
115
+ self.use_hw_tiling = False
116
+
117
+ def _hw_tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True):
118
+ overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
119
+ blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
120
+ row_limit = self.tile_latent_min_size - blend_extent
121
+
122
+ # Split the image into 512x512 tiles and encode them separately.
123
+ rows = []
124
+ for i in range(0, x.shape[3], overlap_size):
125
+ row = []
126
+ for j in range(0, x.shape[4], overlap_size):
127
+ tile = x[
128
+ :,
129
+ :,
130
+ :,
131
+ i : i + self.tile_sample_min_size,
132
+ j : j + self.tile_sample_min_size,
133
+ ]
134
+ tile = self.encoder(tile)
135
+ tile = self.quant_conv(tile)
136
+ row.append(tile)
137
+ rows.append(row)
138
+ result_rows = []
139
+ for i, row in enumerate(rows):
140
+ result_row = []
141
+ for j, tile in enumerate(row):
142
+ # blend the above tile and the left tile
143
+ # to the current tile and add the current tile to the result row
144
+ if i > 0:
145
+ tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
146
+ if j > 0:
147
+ tile = self.blend_h(row[j - 1], tile, blend_extent)
148
+ result_row.append(tile[:, :, :, :row_limit, :row_limit])
149
+ result_rows.append(torch.cat(result_row, dim=4))
150
+
151
+ moments = torch.cat(result_rows, dim=3)
152
+ return moments
153
+
154
+ def blend_z(
155
+ self, a: torch.Tensor, b: torch.Tensor, blend_extent: int
156
+ ) -> torch.Tensor:
157
+ blend_extent = min(a.shape[2], b.shape[2], blend_extent)
158
+ for z in range(blend_extent):
159
+ b[:, :, z, :, :] = a[:, :, -blend_extent + z, :, :] * (
160
+ 1 - z / blend_extent
161
+ ) + b[:, :, z, :, :] * (z / blend_extent)
162
+ return b
163
+
164
+ def blend_v(
165
+ self, a: torch.Tensor, b: torch.Tensor, blend_extent: int
166
+ ) -> torch.Tensor:
167
+ blend_extent = min(a.shape[3], b.shape[3], blend_extent)
168
+ for y in range(blend_extent):
169
+ b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (
170
+ 1 - y / blend_extent
171
+ ) + b[:, :, :, y, :] * (y / blend_extent)
172
+ return b
173
+
174
+ def blend_h(
175
+ self, a: torch.Tensor, b: torch.Tensor, blend_extent: int
176
+ ) -> torch.Tensor:
177
+ blend_extent = min(a.shape[4], b.shape[4], blend_extent)
178
+ for x in range(blend_extent):
179
+ b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (
180
+ 1 - x / blend_extent
181
+ ) + b[:, :, :, :, x] * (x / blend_extent)
182
+ return b
183
+
184
+ def _hw_tiled_decode(self, z: torch.FloatTensor, target_shape):
185
+ overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
186
+ blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor)
187
+ row_limit = self.tile_sample_min_size - blend_extent
188
+ tile_target_shape = (
189
+ *target_shape[:3],
190
+ self.tile_sample_min_size,
191
+ self.tile_sample_min_size,
192
+ )
193
+ # Split z into overlapping 64x64 tiles and decode them separately.
194
+ # The tiles have an overlap to avoid seams between tiles.
195
+ rows = []
196
+ for i in range(0, z.shape[3], overlap_size):
197
+ row = []
198
+ for j in range(0, z.shape[4], overlap_size):
199
+ tile = z[
200
+ :,
201
+ :,
202
+ :,
203
+ i : i + self.tile_latent_min_size,
204
+ j : j + self.tile_latent_min_size,
205
+ ]
206
+ tile = self.post_quant_conv(tile)
207
+ decoded = self.decoder(tile, target_shape=tile_target_shape)
208
+ row.append(decoded)
209
+ rows.append(row)
210
+ result_rows = []
211
+ for i, row in enumerate(rows):
212
+ result_row = []
213
+ for j, tile in enumerate(row):
214
+ # blend the above tile and the left tile
215
+ # to the current tile and add the current tile to the result row
216
+ if i > 0:
217
+ tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
218
+ if j > 0:
219
+ tile = self.blend_h(row[j - 1], tile, blend_extent)
220
+ result_row.append(tile[:, :, :, :row_limit, :row_limit])
221
+ result_rows.append(torch.cat(result_row, dim=4))
222
+
223
+ dec = torch.cat(result_rows, dim=3)
224
+ return dec
225
+
226
+ def encode(
227
+ self, z: torch.FloatTensor, return_dict: bool = True
228
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
229
+ if self.use_z_tiling and z.shape[2] > self.z_sample_size > 1:
230
+ num_splits = z.shape[2] // self.z_sample_size
231
+ sizes = [self.z_sample_size] * num_splits
232
+ sizes = (
233
+ sizes + [z.shape[2] - sum(sizes)]
234
+ if z.shape[2] - sum(sizes) > 0
235
+ else sizes
236
+ )
237
+ tiles = z.split(sizes, dim=2)
238
+ moments_tiles = [
239
+ (
240
+ self._hw_tiled_encode(z_tile, return_dict)
241
+ if self.use_hw_tiling
242
+ else self._encode(z_tile)
243
+ )
244
+ for z_tile in tiles
245
+ ]
246
+ moments = torch.cat(moments_tiles, dim=2)
247
+
248
+ else:
249
+ moments = (
250
+ self._hw_tiled_encode(z, return_dict)
251
+ if self.use_hw_tiling
252
+ else self._encode(z)
253
+ )
254
+
255
+ posterior = DiagonalGaussianDistribution(moments)
256
+ if not return_dict:
257
+ return (posterior,)
258
+
259
+ return AutoencoderKLOutput(latent_dist=posterior)
260
+
261
+ def _normalize_latent_channels(self, z: torch.FloatTensor) -> torch.FloatTensor:
262
+ if isinstance(self.latent_norm_out, nn.BatchNorm3d):
263
+ _, c, _, _, _ = z.shape
264
+ z = torch.cat(
265
+ [
266
+ self.latent_norm_out(z[:, : c // 2, :, :, :]),
267
+ z[:, c // 2 :, :, :, :],
268
+ ],
269
+ dim=1,
270
+ )
271
+ elif isinstance(self.latent_norm_out, nn.BatchNorm2d):
272
+ raise NotImplementedError("BatchNorm2d not supported")
273
+ return z
274
+
275
+ def _unnormalize_latent_channels(self, z: torch.FloatTensor) -> torch.FloatTensor:
276
+ if isinstance(self.latent_norm_out, nn.BatchNorm3d):
277
+ running_mean = self.latent_norm_out.running_mean.view(1, -1, 1, 1, 1)
278
+ running_var = self.latent_norm_out.running_var.view(1, -1, 1, 1, 1)
279
+ eps = self.latent_norm_out.eps
280
+
281
+ z = z * torch.sqrt(running_var + eps) + running_mean
282
+ elif isinstance(self.latent_norm_out, nn.BatchNorm3d):
283
+ raise NotImplementedError("BatchNorm2d not supported")
284
+ return z
285
+
286
+ def _encode(self, x: torch.FloatTensor) -> AutoencoderKLOutput:
287
+ h = self.encoder(x)
288
+ moments = self.quant_conv(h)
289
+ moments = self._normalize_latent_channels(moments)
290
+ return moments
291
+
292
+ def _decode(
293
+ self,
294
+ z: torch.FloatTensor,
295
+ target_shape=None,
296
+ timestep: Optional[torch.Tensor] = None,
297
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
298
+ z = self._unnormalize_latent_channels(z)
299
+ z = self.post_quant_conv(z)
300
+ if "timestep" in self.decoder_params:
301
+ dec = self.decoder(z, target_shape=target_shape, timestep=timestep)
302
+ else:
303
+ dec = self.decoder(z, target_shape=target_shape)
304
+ return dec
305
+
306
+ def decode(
307
+ self,
308
+ z: torch.FloatTensor,
309
+ return_dict: bool = True,
310
+ target_shape=None,
311
+ timestep: Optional[torch.Tensor] = None,
312
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
313
+ assert target_shape is not None, "target_shape must be provided for decoding"
314
+ if self.use_z_tiling and z.shape[2] > self.z_sample_size > 1:
315
+ reduction_factor = int(
316
+ self.encoder.patch_size_t
317
+ * 2
318
+ ** (
319
+ len(self.encoder.down_blocks)
320
+ - 1
321
+ - math.sqrt(self.encoder.patch_size)
322
+ )
323
+ )
324
+ split_size = self.z_sample_size // reduction_factor
325
+ num_splits = z.shape[2] // split_size
326
+
327
+ # copy target shape, and divide frame dimension (=2) by the context size
328
+ target_shape_split = list(target_shape)
329
+ target_shape_split[2] = target_shape[2] // num_splits
330
+
331
+ decoded_tiles = [
332
+ (
333
+ self._hw_tiled_decode(z_tile, target_shape_split)
334
+ if self.use_hw_tiling
335
+ else self._decode(z_tile, target_shape=target_shape_split)
336
+ )
337
+ for z_tile in torch.tensor_split(z, num_splits, dim=2)
338
+ ]
339
+ decoded = torch.cat(decoded_tiles, dim=2)
340
+ else:
341
+ decoded = (
342
+ self._hw_tiled_decode(z, target_shape)
343
+ if self.use_hw_tiling
344
+ else self._decode(z, target_shape=target_shape, timestep=timestep)
345
+ )
346
+
347
+ if not return_dict:
348
+ return (decoded,)
349
+
350
+ return DecoderOutput(sample=decoded)
351
+
352
+ def forward(
353
+ self,
354
+ sample: torch.FloatTensor,
355
+ sample_posterior: bool = False,
356
+ return_dict: bool = True,
357
+ generator: Optional[torch.Generator] = None,
358
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
359
+ r"""
360
+ Args:
361
+ sample (`torch.FloatTensor`): Input sample.
362
+ sample_posterior (`bool`, *optional*, defaults to `False`):
363
+ Whether to sample from the posterior.
364
+ return_dict (`bool`, *optional*, defaults to `True`):
365
+ Whether to return a [`DecoderOutput`] instead of a plain tuple.
366
+ generator (`torch.Generator`, *optional*):
367
+ Generator used to sample from the posterior.
368
+ """
369
+ x = sample
370
+ posterior = self.encode(x).latent_dist
371
+ if sample_posterior:
372
+ z = posterior.sample(generator=generator)
373
+ else:
374
+ z = posterior.mode()
375
+ dec = self.decode(z, target_shape=sample.shape).sample
376
+
377
+ if not return_dict:
378
+ return (dec,)
379
+
380
+ return DecoderOutput(sample=dec)
ltx_video/models/autoencoders/vae_encode.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+ import torch
3
+ from diffusers import AutoencoderKL
4
+ from einops import rearrange
5
+ from torch import Tensor
6
+
7
+
8
+ from ltx_video.models.autoencoders.causal_video_autoencoder import (
9
+ CausalVideoAutoencoder,
10
+ )
11
+ from ltx_video.models.autoencoders.video_autoencoder import (
12
+ Downsample3D,
13
+ VideoAutoencoder,
14
+ )
15
+
16
+ try:
17
+ import torch_xla.core.xla_model as xm
18
+ except ImportError:
19
+ xm = None
20
+
21
+
22
+ def vae_encode(
23
+ media_items: Tensor,
24
+ vae: AutoencoderKL,
25
+ split_size: int = 1,
26
+ vae_per_channel_normalize=False,
27
+ ) -> Tensor:
28
+ """
29
+ Encodes media items (images or videos) into latent representations using a specified VAE model.
30
+ The function supports processing batches of images or video frames and can handle the processing
31
+ in smaller sub-batches if needed.
32
+
33
+ Args:
34
+ media_items (Tensor): A torch Tensor containing the media items to encode. The expected
35
+ shape is (batch_size, channels, height, width) for images or (batch_size, channels,
36
+ frames, height, width) for videos.
37
+ vae (AutoencoderKL): An instance of the `AutoencoderKL` class from the `diffusers` library,
38
+ pre-configured and loaded with the appropriate model weights.
39
+ split_size (int, optional): The number of sub-batches to split the input batch into for encoding.
40
+ If set to more than 1, the input media items are processed in smaller batches according to
41
+ this value. Defaults to 1, which processes all items in a single batch.
42
+
43
+ Returns:
44
+ Tensor: A torch Tensor of the encoded latent representations. The shape of the tensor is adjusted
45
+ to match the input shape, scaled by the model's configuration.
46
+
47
+ Examples:
48
+ >>> import torch
49
+ >>> from diffusers import AutoencoderKL
50
+ >>> vae = AutoencoderKL.from_pretrained('your-model-name')
51
+ >>> images = torch.rand(10, 3, 8 256, 256) # Example tensor with 10 videos of 8 frames.
52
+ >>> latents = vae_encode(images, vae)
53
+ >>> print(latents.shape) # Output shape will depend on the model's latent configuration.
54
+
55
+ Note:
56
+ In case of a video, the function encodes the media item frame-by frame.
57
+ """
58
+ is_video_shaped = media_items.dim() == 5
59
+ batch_size, channels = media_items.shape[0:2]
60
+
61
+ if channels != 3:
62
+ raise ValueError(f"Expects tensors with 3 channels, got {channels}.")
63
+
64
+ if is_video_shaped and not isinstance(
65
+ vae, (VideoAutoencoder, CausalVideoAutoencoder)
66
+ ):
67
+ media_items = rearrange(media_items, "b c n h w -> (b n) c h w")
68
+ if split_size > 1:
69
+ if len(media_items) % split_size != 0:
70
+ raise ValueError(
71
+ "Error: The batch size must be divisible by 'train.vae_bs_split"
72
+ )
73
+ encode_bs = len(media_items) // split_size
74
+ # latents = [vae.encode(image_batch).latent_dist.sample() for image_batch in media_items.split(encode_bs)]
75
+ latents = []
76
+ if media_items.device.type == "xla":
77
+ xm.mark_step()
78
+ for image_batch in media_items.split(encode_bs):
79
+ latents.append(vae.encode(image_batch).latent_dist.sample())
80
+ if media_items.device.type == "xla":
81
+ xm.mark_step()
82
+ latents = torch.cat(latents, dim=0)
83
+ else:
84
+ latents = vae.encode(media_items).latent_dist.sample()
85
+
86
+ latents = normalize_latents(latents, vae, vae_per_channel_normalize)
87
+ if is_video_shaped and not isinstance(
88
+ vae, (VideoAutoencoder, CausalVideoAutoencoder)
89
+ ):
90
+ latents = rearrange(latents, "(b n) c h w -> b c n h w", b=batch_size)
91
+ return latents
92
+
93
+
94
+ def vae_decode(
95
+ latents: Tensor,
96
+ vae: AutoencoderKL,
97
+ is_video: bool = True,
98
+ split_size: int = 1,
99
+ vae_per_channel_normalize=False,
100
+ timestep=None,
101
+ ) -> Tensor:
102
+ is_video_shaped = latents.dim() == 5
103
+ batch_size = latents.shape[0]
104
+
105
+ if is_video_shaped and not isinstance(
106
+ vae, (VideoAutoencoder, CausalVideoAutoencoder)
107
+ ):
108
+ latents = rearrange(latents, "b c n h w -> (b n) c h w")
109
+ if split_size > 1:
110
+ if len(latents) % split_size != 0:
111
+ raise ValueError(
112
+ "Error: The batch size must be divisible by 'train.vae_bs_split"
113
+ )
114
+ encode_bs = len(latents) // split_size
115
+ image_batch = [
116
+ _run_decoder(
117
+ latent_batch, vae, is_video, vae_per_channel_normalize, timestep
118
+ )
119
+ for latent_batch in latents.split(encode_bs)
120
+ ]
121
+ images = torch.cat(image_batch, dim=0)
122
+ else:
123
+ images = _run_decoder(
124
+ latents, vae, is_video, vae_per_channel_normalize, timestep
125
+ )
126
+
127
+ if is_video_shaped and not isinstance(
128
+ vae, (VideoAutoencoder, CausalVideoAutoencoder)
129
+ ):
130
+ images = rearrange(images, "(b n) c h w -> b c n h w", b=batch_size)
131
+ return images
132
+
133
+
134
+ def _run_decoder(
135
+ latents: Tensor,
136
+ vae: AutoencoderKL,
137
+ is_video: bool,
138
+ vae_per_channel_normalize=False,
139
+ timestep=None,
140
+ ) -> Tensor:
141
+ if isinstance(vae, (VideoAutoencoder, CausalVideoAutoencoder)):
142
+ *_, fl, hl, wl = latents.shape
143
+ temporal_scale, spatial_scale, _ = get_vae_size_scale_factor(vae)
144
+ latents = latents.to(vae.dtype)
145
+ vae_decode_kwargs = {}
146
+ if timestep is not None:
147
+ vae_decode_kwargs["timestep"] = timestep
148
+ image = vae.decode(
149
+ un_normalize_latents(latents, vae, vae_per_channel_normalize),
150
+ return_dict=False,
151
+ target_shape=(
152
+ 1,
153
+ 3,
154
+ fl * temporal_scale if is_video else 1,
155
+ hl * spatial_scale,
156
+ wl * spatial_scale,
157
+ ),
158
+ **vae_decode_kwargs,
159
+ )[0]
160
+ else:
161
+ image = vae.decode(
162
+ un_normalize_latents(latents, vae, vae_per_channel_normalize),
163
+ return_dict=False,
164
+ )[0]
165
+ return image
166
+
167
+
168
+ def get_vae_size_scale_factor(vae: AutoencoderKL) -> float:
169
+ if isinstance(vae, CausalVideoAutoencoder):
170
+ spatial = vae.spatial_downscale_factor
171
+ temporal = vae.temporal_downscale_factor
172
+ else:
173
+ down_blocks = len(
174
+ [
175
+ block
176
+ for block in vae.encoder.down_blocks
177
+ if isinstance(block.downsample, Downsample3D)
178
+ ]
179
+ )
180
+ spatial = vae.config.patch_size * 2**down_blocks
181
+ temporal = (
182
+ vae.config.patch_size_t * 2**down_blocks
183
+ if isinstance(vae, VideoAutoencoder)
184
+ else 1
185
+ )
186
+
187
+ return (temporal, spatial, spatial)
188
+
189
+
190
+ def latent_to_pixel_coords(
191
+ latent_coords: Tensor, vae: AutoencoderKL, causal_fix: bool = False
192
+ ) -> Tensor:
193
+ """
194
+ Converts latent coordinates to pixel coordinates by scaling them according to the VAE's
195
+ configuration.
196
+
197
+ Args:
198
+ latent_coords (Tensor): A tensor of shape [batch_size, 3, num_latents]
199
+ containing the latent corner coordinates of each token.
200
+ vae (AutoencoderKL): The VAE model
201
+ causal_fix (bool): Whether to take into account the different temporal scale
202
+ of the first frame. Default = False for backwards compatibility.
203
+ Returns:
204
+ Tensor: A tensor of pixel coordinates corresponding to the input latent coordinates.
205
+ """
206
+
207
+ scale_factors = get_vae_size_scale_factor(vae)
208
+ causal_fix = isinstance(vae, CausalVideoAutoencoder) and causal_fix
209
+ pixel_coords = latent_to_pixel_coords_from_factors(
210
+ latent_coords, scale_factors, causal_fix
211
+ )
212
+ return pixel_coords
213
+
214
+
215
+ def latent_to_pixel_coords_from_factors(
216
+ latent_coords: Tensor, scale_factors: Tuple, causal_fix: bool = False
217
+ ) -> Tensor:
218
+ pixel_coords = (
219
+ latent_coords
220
+ * torch.tensor(scale_factors, device=latent_coords.device)[None, :, None]
221
+ )
222
+ if causal_fix:
223
+ # Fix temporal scale for first frame to 1 due to causality
224
+ pixel_coords[:, 0] = (pixel_coords[:, 0] + 1 - scale_factors[0]).clamp(min=0)
225
+ return pixel_coords
226
+
227
+
228
+ def normalize_latents(
229
+ latents: Tensor, vae: AutoencoderKL, vae_per_channel_normalize: bool = False
230
+ ) -> Tensor:
231
+ return (
232
+ (latents - vae.mean_of_means.to(latents.dtype).view(1, -1, 1, 1, 1))
233
+ / vae.std_of_means.to(latents.dtype).view(1, -1, 1, 1, 1)
234
+ if vae_per_channel_normalize
235
+ else latents * vae.config.scaling_factor
236
+ )
237
+
238
+
239
+ def un_normalize_latents(
240
+ latents: Tensor, vae: AutoencoderKL, vae_per_channel_normalize: bool = False
241
+ ) -> Tensor:
242
+ return (
243
+ latents * vae.std_of_means.to(latents.dtype).view(1, -1, 1, 1, 1)
244
+ + vae.mean_of_means.to(latents.dtype).view(1, -1, 1, 1, 1)
245
+ if vae_per_channel_normalize
246
+ else latents / vae.config.scaling_factor
247
+ )
ltx_video/models/autoencoders/video_autoencoder.py ADDED
@@ -0,0 +1,1045 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from functools import partial
4
+ from types import SimpleNamespace
5
+ from typing import Any, Mapping, Optional, Tuple, Union
6
+
7
+ import torch
8
+ from einops import rearrange
9
+ from torch import nn
10
+ from torch.nn import functional
11
+
12
+ from diffusers.utils import logging
13
+
14
+ from ltx_video.utils.torch_utils import Identity
15
+ from ltx_video.models.autoencoders.conv_nd_factory import make_conv_nd, make_linear_nd
16
+ from ltx_video.models.autoencoders.pixel_norm import PixelNorm
17
+ from ltx_video.models.autoencoders.vae import AutoencoderKLWrapper
18
+
19
+ logger = logging.get_logger(__name__)
20
+
21
+
22
+ class VideoAutoencoder(AutoencoderKLWrapper):
23
+ @classmethod
24
+ def from_pretrained(
25
+ cls,
26
+ pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
27
+ *args,
28
+ **kwargs,
29
+ ):
30
+ config_local_path = pretrained_model_name_or_path / "config.json"
31
+ config = cls.load_config(config_local_path, **kwargs)
32
+ video_vae = cls.from_config(config)
33
+ video_vae.to(kwargs["torch_dtype"])
34
+
35
+ model_local_path = pretrained_model_name_or_path / "autoencoder.pth"
36
+ ckpt_state_dict = torch.load(model_local_path)
37
+ video_vae.load_state_dict(ckpt_state_dict)
38
+
39
+ statistics_local_path = (
40
+ pretrained_model_name_or_path / "per_channel_statistics.json"
41
+ )
42
+ if statistics_local_path.exists():
43
+ with open(statistics_local_path, "r") as file:
44
+ data = json.load(file)
45
+ transposed_data = list(zip(*data["data"]))
46
+ data_dict = {
47
+ col: torch.tensor(vals)
48
+ for col, vals in zip(data["columns"], transposed_data)
49
+ }
50
+ video_vae.register_buffer("std_of_means", data_dict["std-of-means"])
51
+ video_vae.register_buffer(
52
+ "mean_of_means",
53
+ data_dict.get(
54
+ "mean-of-means", torch.zeros_like(data_dict["std-of-means"])
55
+ ),
56
+ )
57
+
58
+ return video_vae
59
+
60
+ @staticmethod
61
+ def from_config(config):
62
+ assert (
63
+ config["_class_name"] == "VideoAutoencoder"
64
+ ), "config must have _class_name=VideoAutoencoder"
65
+ if isinstance(config["dims"], list):
66
+ config["dims"] = tuple(config["dims"])
67
+
68
+ assert config["dims"] in [2, 3, (2, 1)], "dims must be 2, 3 or (2, 1)"
69
+
70
+ double_z = config.get("double_z", True)
71
+ latent_log_var = config.get(
72
+ "latent_log_var", "per_channel" if double_z else "none"
73
+ )
74
+ use_quant_conv = config.get("use_quant_conv", True)
75
+
76
+ if use_quant_conv and latent_log_var == "uniform":
77
+ raise ValueError("uniform latent_log_var requires use_quant_conv=False")
78
+
79
+ encoder = Encoder(
80
+ dims=config["dims"],
81
+ in_channels=config.get("in_channels", 3),
82
+ out_channels=config["latent_channels"],
83
+ block_out_channels=config["block_out_channels"],
84
+ patch_size=config.get("patch_size", 1),
85
+ latent_log_var=latent_log_var,
86
+ norm_layer=config.get("norm_layer", "group_norm"),
87
+ patch_size_t=config.get("patch_size_t", config.get("patch_size", 1)),
88
+ add_channel_padding=config.get("add_channel_padding", False),
89
+ )
90
+
91
+ decoder = Decoder(
92
+ dims=config["dims"],
93
+ in_channels=config["latent_channels"],
94
+ out_channels=config.get("out_channels", 3),
95
+ block_out_channels=config["block_out_channels"],
96
+ patch_size=config.get("patch_size", 1),
97
+ norm_layer=config.get("norm_layer", "group_norm"),
98
+ patch_size_t=config.get("patch_size_t", config.get("patch_size", 1)),
99
+ add_channel_padding=config.get("add_channel_padding", False),
100
+ )
101
+
102
+ dims = config["dims"]
103
+ return VideoAutoencoder(
104
+ encoder=encoder,
105
+ decoder=decoder,
106
+ latent_channels=config["latent_channels"],
107
+ dims=dims,
108
+ use_quant_conv=use_quant_conv,
109
+ )
110
+
111
+ @property
112
+ def config(self):
113
+ return SimpleNamespace(
114
+ _class_name="VideoAutoencoder",
115
+ dims=self.dims,
116
+ in_channels=self.encoder.conv_in.in_channels
117
+ // (self.encoder.patch_size_t * self.encoder.patch_size**2),
118
+ out_channels=self.decoder.conv_out.out_channels
119
+ // (self.decoder.patch_size_t * self.decoder.patch_size**2),
120
+ latent_channels=self.decoder.conv_in.in_channels,
121
+ block_out_channels=[
122
+ self.encoder.down_blocks[i].res_blocks[-1].conv1.out_channels
123
+ for i in range(len(self.encoder.down_blocks))
124
+ ],
125
+ scaling_factor=1.0,
126
+ norm_layer=self.encoder.norm_layer,
127
+ patch_size=self.encoder.patch_size,
128
+ latent_log_var=self.encoder.latent_log_var,
129
+ use_quant_conv=self.use_quant_conv,
130
+ patch_size_t=self.encoder.patch_size_t,
131
+ add_channel_padding=self.encoder.add_channel_padding,
132
+ )
133
+
134
+ @property
135
+ def is_video_supported(self):
136
+ """
137
+ Check if the model supports video inputs of shape (B, C, F, H, W). Otherwise, the model only supports 2D images.
138
+ """
139
+ return self.dims != 2
140
+
141
+ @property
142
+ def downscale_factor(self):
143
+ return self.encoder.downsample_factor
144
+
145
+ def to_json_string(self) -> str:
146
+ import json
147
+
148
+ return json.dumps(self.config.__dict__)
149
+
150
+ def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True):
151
+ model_keys = set(name for name, _ in self.named_parameters())
152
+
153
+ key_mapping = {
154
+ ".resnets.": ".res_blocks.",
155
+ "downsamplers.0": "downsample",
156
+ "upsamplers.0": "upsample",
157
+ }
158
+
159
+ converted_state_dict = {}
160
+ for key, value in state_dict.items():
161
+ for k, v in key_mapping.items():
162
+ key = key.replace(k, v)
163
+
164
+ if "norm" in key and key not in model_keys:
165
+ logger.info(
166
+ f"Removing key {key} from state_dict as it is not present in the model"
167
+ )
168
+ continue
169
+
170
+ converted_state_dict[key] = value
171
+
172
+ super().load_state_dict(converted_state_dict, strict=strict)
173
+
174
+ def last_layer(self):
175
+ if hasattr(self.decoder, "conv_out"):
176
+ if isinstance(self.decoder.conv_out, nn.Sequential):
177
+ last_layer = self.decoder.conv_out[-1]
178
+ else:
179
+ last_layer = self.decoder.conv_out
180
+ else:
181
+ last_layer = self.decoder.layers[-1]
182
+ return last_layer
183
+
184
+
185
+ class Encoder(nn.Module):
186
+ r"""
187
+ The `Encoder` layer of a variational autoencoder that encodes its input into a latent representation.
188
+
189
+ Args:
190
+ in_channels (`int`, *optional*, defaults to 3):
191
+ The number of input channels.
192
+ out_channels (`int`, *optional*, defaults to 3):
193
+ The number of output channels.
194
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
195
+ The number of output channels for each block.
196
+ layers_per_block (`int`, *optional*, defaults to 2):
197
+ The number of layers per block.
198
+ norm_num_groups (`int`, *optional*, defaults to 32):
199
+ The number of groups for normalization.
200
+ patch_size (`int`, *optional*, defaults to 1):
201
+ The patch size to use. Should be a power of 2.
202
+ norm_layer (`str`, *optional*, defaults to `group_norm`):
203
+ The normalization layer to use. Can be either `group_norm` or `pixel_norm`.
204
+ latent_log_var (`str`, *optional*, defaults to `per_channel`):
205
+ The number of channels for the log variance. Can be either `per_channel`, `uniform`, or `none`.
206
+ """
207
+
208
+ def __init__(
209
+ self,
210
+ dims: Union[int, Tuple[int, int]] = 3,
211
+ in_channels: int = 3,
212
+ out_channels: int = 3,
213
+ block_out_channels: Tuple[int, ...] = (64,),
214
+ layers_per_block: int = 2,
215
+ norm_num_groups: int = 32,
216
+ patch_size: Union[int, Tuple[int]] = 1,
217
+ norm_layer: str = "group_norm", # group_norm, pixel_norm
218
+ latent_log_var: str = "per_channel",
219
+ patch_size_t: Optional[int] = None,
220
+ add_channel_padding: Optional[bool] = False,
221
+ ):
222
+ super().__init__()
223
+ self.patch_size = patch_size
224
+ self.patch_size_t = patch_size_t if patch_size_t is not None else patch_size
225
+ self.add_channel_padding = add_channel_padding
226
+ self.layers_per_block = layers_per_block
227
+ self.norm_layer = norm_layer
228
+ self.latent_channels = out_channels
229
+ self.latent_log_var = latent_log_var
230
+ if add_channel_padding:
231
+ in_channels = in_channels * self.patch_size**3
232
+ else:
233
+ in_channels = in_channels * self.patch_size_t * self.patch_size**2
234
+ self.in_channels = in_channels
235
+ output_channel = block_out_channels[0]
236
+
237
+ self.conv_in = make_conv_nd(
238
+ dims=dims,
239
+ in_channels=in_channels,
240
+ out_channels=output_channel,
241
+ kernel_size=3,
242
+ stride=1,
243
+ padding=1,
244
+ )
245
+
246
+ self.down_blocks = nn.ModuleList([])
247
+
248
+ for i in range(len(block_out_channels)):
249
+ input_channel = output_channel
250
+ output_channel = block_out_channels[i]
251
+ is_final_block = i == len(block_out_channels) - 1
252
+
253
+ down_block = DownEncoderBlock3D(
254
+ dims=dims,
255
+ in_channels=input_channel,
256
+ out_channels=output_channel,
257
+ num_layers=self.layers_per_block,
258
+ add_downsample=not is_final_block and 2**i >= patch_size,
259
+ resnet_eps=1e-6,
260
+ downsample_padding=0,
261
+ resnet_groups=norm_num_groups,
262
+ norm_layer=norm_layer,
263
+ )
264
+ self.down_blocks.append(down_block)
265
+
266
+ self.mid_block = UNetMidBlock3D(
267
+ dims=dims,
268
+ in_channels=block_out_channels[-1],
269
+ num_layers=self.layers_per_block,
270
+ resnet_eps=1e-6,
271
+ resnet_groups=norm_num_groups,
272
+ norm_layer=norm_layer,
273
+ )
274
+
275
+ # out
276
+ if norm_layer == "group_norm":
277
+ self.conv_norm_out = nn.GroupNorm(
278
+ num_channels=block_out_channels[-1],
279
+ num_groups=norm_num_groups,
280
+ eps=1e-6,
281
+ )
282
+ elif norm_layer == "pixel_norm":
283
+ self.conv_norm_out = PixelNorm()
284
+ self.conv_act = nn.SiLU()
285
+
286
+ conv_out_channels = out_channels
287
+ if latent_log_var == "per_channel":
288
+ conv_out_channels *= 2
289
+ elif latent_log_var == "uniform":
290
+ conv_out_channels += 1
291
+ elif latent_log_var != "none":
292
+ raise ValueError(f"Invalid latent_log_var: {latent_log_var}")
293
+ self.conv_out = make_conv_nd(
294
+ dims, block_out_channels[-1], conv_out_channels, 3, padding=1
295
+ )
296
+
297
+ self.gradient_checkpointing = False
298
+
299
+ @property
300
+ def downscale_factor(self):
301
+ return (
302
+ 2
303
+ ** len(
304
+ [
305
+ block
306
+ for block in self.down_blocks
307
+ if isinstance(block.downsample, Downsample3D)
308
+ ]
309
+ )
310
+ * self.patch_size
311
+ )
312
+
313
+ def forward(
314
+ self, sample: torch.FloatTensor, return_features=False
315
+ ) -> torch.FloatTensor:
316
+ r"""The forward method of the `Encoder` class."""
317
+
318
+ downsample_in_time = sample.shape[2] != 1
319
+
320
+ # patchify
321
+ patch_size_t = self.patch_size_t if downsample_in_time else 1
322
+ sample = patchify(
323
+ sample,
324
+ patch_size_hw=self.patch_size,
325
+ patch_size_t=patch_size_t,
326
+ add_channel_padding=self.add_channel_padding,
327
+ )
328
+
329
+ sample = self.conv_in(sample)
330
+
331
+ checkpoint_fn = (
332
+ partial(torch.utils.checkpoint.checkpoint, use_reentrant=False)
333
+ if self.gradient_checkpointing and self.training
334
+ else lambda x: x
335
+ )
336
+
337
+ if return_features:
338
+ features = []
339
+ for down_block in self.down_blocks:
340
+ sample = checkpoint_fn(down_block)(
341
+ sample, downsample_in_time=downsample_in_time
342
+ )
343
+ if return_features:
344
+ features.append(sample)
345
+
346
+ sample = checkpoint_fn(self.mid_block)(sample)
347
+
348
+ # post-process
349
+ sample = self.conv_norm_out(sample)
350
+ sample = self.conv_act(sample)
351
+ sample = self.conv_out(sample)
352
+
353
+ if self.latent_log_var == "uniform":
354
+ last_channel = sample[:, -1:, ...]
355
+ num_dims = sample.dim()
356
+
357
+ if num_dims == 4:
358
+ # For shape (B, C, H, W)
359
+ repeated_last_channel = last_channel.repeat(
360
+ 1, sample.shape[1] - 2, 1, 1
361
+ )
362
+ sample = torch.cat([sample, repeated_last_channel], dim=1)
363
+ elif num_dims == 5:
364
+ # For shape (B, C, F, H, W)
365
+ repeated_last_channel = last_channel.repeat(
366
+ 1, sample.shape[1] - 2, 1, 1, 1
367
+ )
368
+ sample = torch.cat([sample, repeated_last_channel], dim=1)
369
+ else:
370
+ raise ValueError(f"Invalid input shape: {sample.shape}")
371
+
372
+ if return_features:
373
+ features.append(sample[:, : self.latent_channels, ...])
374
+ return sample, features
375
+ return sample
376
+
377
+
378
+ class Decoder(nn.Module):
379
+ r"""
380
+ The `Decoder` layer of a variational autoencoder that decodes its latent representation into an output sample.
381
+
382
+ Args:
383
+ in_channels (`int`, *optional*, defaults to 3):
384
+ The number of input channels.
385
+ out_channels (`int`, *optional*, defaults to 3):
386
+ The number of output channels.
387
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
388
+ The number of output channels for each block.
389
+ layers_per_block (`int`, *optional*, defaults to 2):
390
+ The number of layers per block.
391
+ norm_num_groups (`int`, *optional*, defaults to 32):
392
+ The number of groups for normalization.
393
+ patch_size (`int`, *optional*, defaults to 1):
394
+ The patch size to use. Should be a power of 2.
395
+ norm_layer (`str`, *optional*, defaults to `group_norm`):
396
+ The normalization layer to use. Can be either `group_norm` or `pixel_norm`.
397
+ """
398
+
399
+ def __init__(
400
+ self,
401
+ dims,
402
+ in_channels: int = 3,
403
+ out_channels: int = 3,
404
+ block_out_channels: Tuple[int, ...] = (64,),
405
+ layers_per_block: int = 2,
406
+ norm_num_groups: int = 32,
407
+ patch_size: int = 1,
408
+ norm_layer: str = "group_norm",
409
+ patch_size_t: Optional[int] = None,
410
+ add_channel_padding: Optional[bool] = False,
411
+ ):
412
+ super().__init__()
413
+ self.patch_size = patch_size
414
+ self.patch_size_t = patch_size_t if patch_size_t is not None else patch_size
415
+ self.add_channel_padding = add_channel_padding
416
+ self.layers_per_block = layers_per_block
417
+ if add_channel_padding:
418
+ out_channels = out_channels * self.patch_size**3
419
+ else:
420
+ out_channels = out_channels * self.patch_size_t * self.patch_size**2
421
+ self.out_channels = out_channels
422
+
423
+ self.conv_in = make_conv_nd(
424
+ dims,
425
+ in_channels,
426
+ block_out_channels[-1],
427
+ kernel_size=3,
428
+ stride=1,
429
+ padding=1,
430
+ )
431
+
432
+ self.mid_block = None
433
+ self.up_blocks = nn.ModuleList([])
434
+
435
+ self.mid_block = UNetMidBlock3D(
436
+ dims=dims,
437
+ in_channels=block_out_channels[-1],
438
+ num_layers=self.layers_per_block,
439
+ resnet_eps=1e-6,
440
+ resnet_groups=norm_num_groups,
441
+ norm_layer=norm_layer,
442
+ )
443
+
444
+ reversed_block_out_channels = list(reversed(block_out_channels))
445
+ output_channel = reversed_block_out_channels[0]
446
+ for i in range(len(reversed_block_out_channels)):
447
+ prev_output_channel = output_channel
448
+ output_channel = reversed_block_out_channels[i]
449
+
450
+ is_final_block = i == len(block_out_channels) - 1
451
+
452
+ up_block = UpDecoderBlock3D(
453
+ dims=dims,
454
+ num_layers=self.layers_per_block + 1,
455
+ in_channels=prev_output_channel,
456
+ out_channels=output_channel,
457
+ add_upsample=not is_final_block
458
+ and 2 ** (len(block_out_channels) - i - 1) > patch_size,
459
+ resnet_eps=1e-6,
460
+ resnet_groups=norm_num_groups,
461
+ norm_layer=norm_layer,
462
+ )
463
+ self.up_blocks.append(up_block)
464
+
465
+ if norm_layer == "group_norm":
466
+ self.conv_norm_out = nn.GroupNorm(
467
+ num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6
468
+ )
469
+ elif norm_layer == "pixel_norm":
470
+ self.conv_norm_out = PixelNorm()
471
+
472
+ self.conv_act = nn.SiLU()
473
+ self.conv_out = make_conv_nd(
474
+ dims, block_out_channels[0], out_channels, 3, padding=1
475
+ )
476
+
477
+ self.gradient_checkpointing = False
478
+
479
+ def forward(self, sample: torch.FloatTensor, target_shape) -> torch.FloatTensor:
480
+ r"""The forward method of the `Decoder` class."""
481
+ assert target_shape is not None, "target_shape must be provided"
482
+ upsample_in_time = sample.shape[2] < target_shape[2]
483
+
484
+ sample = self.conv_in(sample)
485
+
486
+ upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
487
+
488
+ checkpoint_fn = (
489
+ partial(torch.utils.checkpoint.checkpoint, use_reentrant=False)
490
+ if self.gradient_checkpointing and self.training
491
+ else lambda x: x
492
+ )
493
+
494
+ sample = checkpoint_fn(self.mid_block)(sample)
495
+ sample = sample.to(upscale_dtype)
496
+
497
+ for up_block in self.up_blocks:
498
+ sample = checkpoint_fn(up_block)(sample, upsample_in_time=upsample_in_time)
499
+
500
+ # post-process
501
+ sample = self.conv_norm_out(sample)
502
+ sample = self.conv_act(sample)
503
+ sample = self.conv_out(sample)
504
+
505
+ # un-patchify
506
+ patch_size_t = self.patch_size_t if upsample_in_time else 1
507
+ sample = unpatchify(
508
+ sample,
509
+ patch_size_hw=self.patch_size,
510
+ patch_size_t=patch_size_t,
511
+ add_channel_padding=self.add_channel_padding,
512
+ )
513
+
514
+ return sample
515
+
516
+
517
+ class DownEncoderBlock3D(nn.Module):
518
+ def __init__(
519
+ self,
520
+ dims: Union[int, Tuple[int, int]],
521
+ in_channels: int,
522
+ out_channels: int,
523
+ dropout: float = 0.0,
524
+ num_layers: int = 1,
525
+ resnet_eps: float = 1e-6,
526
+ resnet_groups: int = 32,
527
+ add_downsample: bool = True,
528
+ downsample_padding: int = 1,
529
+ norm_layer: str = "group_norm",
530
+ ):
531
+ super().__init__()
532
+ res_blocks = []
533
+
534
+ for i in range(num_layers):
535
+ in_channels = in_channels if i == 0 else out_channels
536
+ res_blocks.append(
537
+ ResnetBlock3D(
538
+ dims=dims,
539
+ in_channels=in_channels,
540
+ out_channels=out_channels,
541
+ eps=resnet_eps,
542
+ groups=resnet_groups,
543
+ dropout=dropout,
544
+ norm_layer=norm_layer,
545
+ )
546
+ )
547
+
548
+ self.res_blocks = nn.ModuleList(res_blocks)
549
+
550
+ if add_downsample:
551
+ self.downsample = Downsample3D(
552
+ dims,
553
+ out_channels,
554
+ out_channels=out_channels,
555
+ padding=downsample_padding,
556
+ )
557
+ else:
558
+ self.downsample = Identity()
559
+
560
+ def forward(
561
+ self, hidden_states: torch.FloatTensor, downsample_in_time
562
+ ) -> torch.FloatTensor:
563
+ for resnet in self.res_blocks:
564
+ hidden_states = resnet(hidden_states)
565
+
566
+ hidden_states = self.downsample(
567
+ hidden_states, downsample_in_time=downsample_in_time
568
+ )
569
+
570
+ return hidden_states
571
+
572
+
573
+ class UNetMidBlock3D(nn.Module):
574
+ """
575
+ A 3D UNet mid-block [`UNetMidBlock3D`] with multiple residual blocks.
576
+
577
+ Args:
578
+ in_channels (`int`): The number of input channels.
579
+ dropout (`float`, *optional*, defaults to 0.0): The dropout rate.
580
+ num_layers (`int`, *optional*, defaults to 1): The number of residual blocks.
581
+ resnet_eps (`float`, *optional*, 1e-6 ): The epsilon value for the resnet blocks.
582
+ resnet_groups (`int`, *optional*, defaults to 32):
583
+ The number of groups to use in the group normalization layers of the resnet blocks.
584
+
585
+ Returns:
586
+ `torch.FloatTensor`: The output of the last residual block, which is a tensor of shape `(batch_size,
587
+ in_channels, height, width)`.
588
+
589
+ """
590
+
591
+ def __init__(
592
+ self,
593
+ dims: Union[int, Tuple[int, int]],
594
+ in_channels: int,
595
+ dropout: float = 0.0,
596
+ num_layers: int = 1,
597
+ resnet_eps: float = 1e-6,
598
+ resnet_groups: int = 32,
599
+ norm_layer: str = "group_norm",
600
+ ):
601
+ super().__init__()
602
+ resnet_groups = (
603
+ resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
604
+ )
605
+
606
+ self.res_blocks = nn.ModuleList(
607
+ [
608
+ ResnetBlock3D(
609
+ dims=dims,
610
+ in_channels=in_channels,
611
+ out_channels=in_channels,
612
+ eps=resnet_eps,
613
+ groups=resnet_groups,
614
+ dropout=dropout,
615
+ norm_layer=norm_layer,
616
+ )
617
+ for _ in range(num_layers)
618
+ ]
619
+ )
620
+
621
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
622
+ for resnet in self.res_blocks:
623
+ hidden_states = resnet(hidden_states)
624
+
625
+ return hidden_states
626
+
627
+
628
+ class UpDecoderBlock3D(nn.Module):
629
+ def __init__(
630
+ self,
631
+ dims: Union[int, Tuple[int, int]],
632
+ in_channels: int,
633
+ out_channels: int,
634
+ resolution_idx: Optional[int] = None,
635
+ dropout: float = 0.0,
636
+ num_layers: int = 1,
637
+ resnet_eps: float = 1e-6,
638
+ resnet_groups: int = 32,
639
+ add_upsample: bool = True,
640
+ norm_layer: str = "group_norm",
641
+ ):
642
+ super().__init__()
643
+ res_blocks = []
644
+
645
+ for i in range(num_layers):
646
+ input_channels = in_channels if i == 0 else out_channels
647
+
648
+ res_blocks.append(
649
+ ResnetBlock3D(
650
+ dims=dims,
651
+ in_channels=input_channels,
652
+ out_channels=out_channels,
653
+ eps=resnet_eps,
654
+ groups=resnet_groups,
655
+ dropout=dropout,
656
+ norm_layer=norm_layer,
657
+ )
658
+ )
659
+
660
+ self.res_blocks = nn.ModuleList(res_blocks)
661
+
662
+ if add_upsample:
663
+ self.upsample = Upsample3D(
664
+ dims=dims, channels=out_channels, out_channels=out_channels
665
+ )
666
+ else:
667
+ self.upsample = Identity()
668
+
669
+ self.resolution_idx = resolution_idx
670
+
671
+ def forward(
672
+ self, hidden_states: torch.FloatTensor, upsample_in_time=True
673
+ ) -> torch.FloatTensor:
674
+ for resnet in self.res_blocks:
675
+ hidden_states = resnet(hidden_states)
676
+
677
+ hidden_states = self.upsample(hidden_states, upsample_in_time=upsample_in_time)
678
+
679
+ return hidden_states
680
+
681
+
682
+ class ResnetBlock3D(nn.Module):
683
+ r"""
684
+ A Resnet block.
685
+
686
+ Parameters:
687
+ in_channels (`int`): The number of channels in the input.
688
+ out_channels (`int`, *optional*, default to be `None`):
689
+ The number of output channels for the first conv layer. If None, same as `in_channels`.
690
+ dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use.
691
+ groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer.
692
+ eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization.
693
+ """
694
+
695
+ def __init__(
696
+ self,
697
+ dims: Union[int, Tuple[int, int]],
698
+ in_channels: int,
699
+ out_channels: Optional[int] = None,
700
+ conv_shortcut: bool = False,
701
+ dropout: float = 0.0,
702
+ groups: int = 32,
703
+ eps: float = 1e-6,
704
+ norm_layer: str = "group_norm",
705
+ ):
706
+ super().__init__()
707
+ self.in_channels = in_channels
708
+ out_channels = in_channels if out_channels is None else out_channels
709
+ self.out_channels = out_channels
710
+ self.use_conv_shortcut = conv_shortcut
711
+
712
+ if norm_layer == "group_norm":
713
+ self.norm1 = torch.nn.GroupNorm(
714
+ num_groups=groups, num_channels=in_channels, eps=eps, affine=True
715
+ )
716
+ elif norm_layer == "pixel_norm":
717
+ self.norm1 = PixelNorm()
718
+
719
+ self.non_linearity = nn.SiLU()
720
+
721
+ self.conv1 = make_conv_nd(
722
+ dims, in_channels, out_channels, kernel_size=3, stride=1, padding=1
723
+ )
724
+
725
+ if norm_layer == "group_norm":
726
+ self.norm2 = torch.nn.GroupNorm(
727
+ num_groups=groups, num_channels=out_channels, eps=eps, affine=True
728
+ )
729
+ elif norm_layer == "pixel_norm":
730
+ self.norm2 = PixelNorm()
731
+
732
+ self.dropout = torch.nn.Dropout(dropout)
733
+
734
+ self.conv2 = make_conv_nd(
735
+ dims, out_channels, out_channels, kernel_size=3, stride=1, padding=1
736
+ )
737
+
738
+ self.conv_shortcut = (
739
+ make_linear_nd(
740
+ dims=dims, in_channels=in_channels, out_channels=out_channels
741
+ )
742
+ if in_channels != out_channels
743
+ else nn.Identity()
744
+ )
745
+
746
+ def forward(
747
+ self,
748
+ input_tensor: torch.FloatTensor,
749
+ ) -> torch.FloatTensor:
750
+ hidden_states = input_tensor
751
+
752
+ hidden_states = self.norm1(hidden_states)
753
+
754
+ hidden_states = self.non_linearity(hidden_states)
755
+
756
+ hidden_states = self.conv1(hidden_states)
757
+
758
+ hidden_states = self.norm2(hidden_states)
759
+
760
+ hidden_states = self.non_linearity(hidden_states)
761
+
762
+ hidden_states = self.dropout(hidden_states)
763
+
764
+ hidden_states = self.conv2(hidden_states)
765
+
766
+ input_tensor = self.conv_shortcut(input_tensor)
767
+
768
+ output_tensor = input_tensor + hidden_states
769
+
770
+ return output_tensor
771
+
772
+
773
+ class Downsample3D(nn.Module):
774
+ def __init__(
775
+ self,
776
+ dims,
777
+ in_channels: int,
778
+ out_channels: int,
779
+ kernel_size: int = 3,
780
+ padding: int = 1,
781
+ ):
782
+ super().__init__()
783
+ stride: int = 2
784
+ self.padding = padding
785
+ self.in_channels = in_channels
786
+ self.dims = dims
787
+ self.conv = make_conv_nd(
788
+ dims=dims,
789
+ in_channels=in_channels,
790
+ out_channels=out_channels,
791
+ kernel_size=kernel_size,
792
+ stride=stride,
793
+ padding=padding,
794
+ )
795
+
796
+ def forward(self, x, downsample_in_time=True):
797
+ conv = self.conv
798
+ if self.padding == 0:
799
+ if self.dims == 2:
800
+ padding = (0, 1, 0, 1)
801
+ else:
802
+ padding = (0, 1, 0, 1, 0, 1 if downsample_in_time else 0)
803
+
804
+ x = functional.pad(x, padding, mode="constant", value=0)
805
+
806
+ if self.dims == (2, 1) and not downsample_in_time:
807
+ return conv(x, skip_time_conv=True)
808
+
809
+ return conv(x)
810
+
811
+
812
+ class Upsample3D(nn.Module):
813
+ """
814
+ An upsampling layer for 3D tensors of shape (B, C, D, H, W).
815
+
816
+ :param channels: channels in the inputs and outputs.
817
+ """
818
+
819
+ def __init__(self, dims, channels, out_channels=None):
820
+ super().__init__()
821
+ self.dims = dims
822
+ self.channels = channels
823
+ self.out_channels = out_channels or channels
824
+ self.conv = make_conv_nd(
825
+ dims, channels, out_channels, kernel_size=3, padding=1, bias=True
826
+ )
827
+
828
+ def forward(self, x, upsample_in_time):
829
+ if self.dims == 2:
830
+ x = functional.interpolate(
831
+ x, (x.shape[2] * 2, x.shape[3] * 2), mode="nearest"
832
+ )
833
+ else:
834
+ time_scale_factor = 2 if upsample_in_time else 1
835
+ # print("before:", x.shape)
836
+ b, c, d, h, w = x.shape
837
+ x = rearrange(x, "b c d h w -> (b d) c h w")
838
+ # height and width interpolate
839
+ x = functional.interpolate(
840
+ x, (x.shape[2] * 2, x.shape[3] * 2), mode="nearest"
841
+ )
842
+ _, _, h, w = x.shape
843
+
844
+ if not upsample_in_time and self.dims == (2, 1):
845
+ x = rearrange(x, "(b d) c h w -> b c d h w ", b=b, h=h, w=w)
846
+ return self.conv(x, skip_time_conv=True)
847
+
848
+ # Second ** upsampling ** which is essentially treated as a 1D convolution across the 'd' dimension
849
+ x = rearrange(x, "(b d) c h w -> (b h w) c 1 d", b=b)
850
+
851
+ # (b h w) c 1 d
852
+ new_d = x.shape[-1] * time_scale_factor
853
+ x = functional.interpolate(x, (1, new_d), mode="nearest")
854
+ # (b h w) c 1 new_d
855
+ x = rearrange(
856
+ x, "(b h w) c 1 new_d -> b c new_d h w", b=b, h=h, w=w, new_d=new_d
857
+ )
858
+ # b c d h w
859
+
860
+ # x = functional.interpolate(
861
+ # x, (x.shape[2] * time_scale_factor, x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
862
+ # )
863
+ # print("after:", x.shape)
864
+
865
+ return self.conv(x)
866
+
867
+
868
+ def patchify(x, patch_size_hw, patch_size_t=1, add_channel_padding=False):
869
+ if patch_size_hw == 1 and patch_size_t == 1:
870
+ return x
871
+ if x.dim() == 4:
872
+ x = rearrange(
873
+ x, "b c (h q) (w r) -> b (c r q) h w", q=patch_size_hw, r=patch_size_hw
874
+ )
875
+ elif x.dim() == 5:
876
+ x = rearrange(
877
+ x,
878
+ "b c (f p) (h q) (w r) -> b (c p r q) f h w",
879
+ p=patch_size_t,
880
+ q=patch_size_hw,
881
+ r=patch_size_hw,
882
+ )
883
+ else:
884
+ raise ValueError(f"Invalid input shape: {x.shape}")
885
+
886
+ if (
887
+ (x.dim() == 5)
888
+ and (patch_size_hw > patch_size_t)
889
+ and (patch_size_t > 1 or add_channel_padding)
890
+ ):
891
+ channels_to_pad = x.shape[1] * (patch_size_hw // patch_size_t) - x.shape[1]
892
+ padding_zeros = torch.zeros(
893
+ x.shape[0],
894
+ channels_to_pad,
895
+ x.shape[2],
896
+ x.shape[3],
897
+ x.shape[4],
898
+ device=x.device,
899
+ dtype=x.dtype,
900
+ )
901
+ x = torch.cat([padding_zeros, x], dim=1)
902
+
903
+ return x
904
+
905
+
906
+ def unpatchify(x, patch_size_hw, patch_size_t=1, add_channel_padding=False):
907
+ if patch_size_hw == 1 and patch_size_t == 1:
908
+ return x
909
+
910
+ if (
911
+ (x.dim() == 5)
912
+ and (patch_size_hw > patch_size_t)
913
+ and (patch_size_t > 1 or add_channel_padding)
914
+ ):
915
+ channels_to_keep = int(x.shape[1] * (patch_size_t / patch_size_hw))
916
+ x = x[:, :channels_to_keep, :, :, :]
917
+
918
+ if x.dim() == 4:
919
+ x = rearrange(
920
+ x, "b (c r q) h w -> b c (h q) (w r)", q=patch_size_hw, r=patch_size_hw
921
+ )
922
+ elif x.dim() == 5:
923
+ x = rearrange(
924
+ x,
925
+ "b (c p r q) f h w -> b c (f p) (h q) (w r)",
926
+ p=patch_size_t,
927
+ q=patch_size_hw,
928
+ r=patch_size_hw,
929
+ )
930
+
931
+ return x
932
+
933
+
934
+ def create_video_autoencoder_config(
935
+ latent_channels: int = 4,
936
+ ):
937
+ config = {
938
+ "_class_name": "VideoAutoencoder",
939
+ "dims": (
940
+ 2,
941
+ 1,
942
+ ), # 2 for Conv2, 3 for Conv3d, (2, 1) for Conv2d followed by Conv1d
943
+ "in_channels": 3, # Number of input color channels (e.g., RGB)
944
+ "out_channels": 3, # Number of output color channels
945
+ "latent_channels": latent_channels, # Number of channels in the latent space representation
946
+ "block_out_channels": [
947
+ 128,
948
+ 256,
949
+ 512,
950
+ 512,
951
+ ], # Number of output channels of each encoder / decoder inner block
952
+ "patch_size": 1,
953
+ }
954
+
955
+ return config
956
+
957
+
958
+ def create_video_autoencoder_pathify4x4x4_config(
959
+ latent_channels: int = 4,
960
+ ):
961
+ config = {
962
+ "_class_name": "VideoAutoencoder",
963
+ "dims": (
964
+ 2,
965
+ 1,
966
+ ), # 2 for Conv2, 3 for Conv3d, (2, 1) for Conv2d followed by Conv1d
967
+ "in_channels": 3, # Number of input color channels (e.g., RGB)
968
+ "out_channels": 3, # Number of output color channels
969
+ "latent_channels": latent_channels, # Number of channels in the latent space representation
970
+ "block_out_channels": [512]
971
+ * 4, # Number of output channels of each encoder / decoder inner block
972
+ "patch_size": 4,
973
+ "latent_log_var": "uniform",
974
+ }
975
+
976
+ return config
977
+
978
+
979
+ def create_video_autoencoder_pathify4x4_config(
980
+ latent_channels: int = 4,
981
+ ):
982
+ config = {
983
+ "_class_name": "VideoAutoencoder",
984
+ "dims": 2, # 2 for Conv2, 3 for Conv3d, (2, 1) for Conv2d followed by Conv1d
985
+ "in_channels": 3, # Number of input color channels (e.g., RGB)
986
+ "out_channels": 3, # Number of output color channels
987
+ "latent_channels": latent_channels, # Number of channels in the latent space representation
988
+ "block_out_channels": [512]
989
+ * 4, # Number of output channels of each encoder / decoder inner block
990
+ "patch_size": 4,
991
+ "norm_layer": "pixel_norm",
992
+ }
993
+
994
+ return config
995
+
996
+
997
+ def test_vae_patchify_unpatchify():
998
+ import torch
999
+
1000
+ x = torch.randn(2, 3, 8, 64, 64)
1001
+ x_patched = patchify(x, patch_size_hw=4, patch_size_t=4)
1002
+ x_unpatched = unpatchify(x_patched, patch_size_hw=4, patch_size_t=4)
1003
+ assert torch.allclose(x, x_unpatched)
1004
+
1005
+
1006
+ def demo_video_autoencoder_forward_backward():
1007
+ # Configuration for the VideoAutoencoder
1008
+ config = create_video_autoencoder_pathify4x4x4_config()
1009
+
1010
+ # Instantiate the VideoAutoencoder with the specified configuration
1011
+ video_autoencoder = VideoAutoencoder.from_config(config)
1012
+
1013
+ print(video_autoencoder)
1014
+
1015
+ # Print the total number of parameters in the video autoencoder
1016
+ total_params = sum(p.numel() for p in video_autoencoder.parameters())
1017
+ print(f"Total number of parameters in VideoAutoencoder: {total_params:,}")
1018
+
1019
+ # Create a mock input tensor simulating a batch of videos
1020
+ # Shape: (batch_size, channels, depth, height, width)
1021
+ # E.g., 4 videos, each with 3 color channels, 16 frames, and 64x64 pixels per frame
1022
+ input_videos = torch.randn(2, 3, 8, 64, 64)
1023
+
1024
+ # Forward pass: encode and decode the input videos
1025
+ latent = video_autoencoder.encode(input_videos).latent_dist.mode()
1026
+ print(f"input shape={input_videos.shape}")
1027
+ print(f"latent shape={latent.shape}")
1028
+ reconstructed_videos = video_autoencoder.decode(
1029
+ latent, target_shape=input_videos.shape
1030
+ ).sample
1031
+
1032
+ print(f"reconstructed shape={reconstructed_videos.shape}")
1033
+
1034
+ # Calculate the loss (e.g., mean squared error)
1035
+ loss = torch.nn.functional.mse_loss(input_videos, reconstructed_videos)
1036
+
1037
+ # Perform backward pass
1038
+ loss.backward()
1039
+
1040
+ print(f"Demo completed with loss: {loss.item()}")
1041
+
1042
+
1043
+ # Ensure to call the demo function to execute the forward and backward pass
1044
+ if __name__ == "__main__":
1045
+ demo_video_autoencoder_forward_backward()
ltx_video/models/transformers/__init__.py ADDED
File without changes
ltx_video/models/transformers/attention.py ADDED
@@ -0,0 +1,1264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from importlib import import_module
3
+ from typing import Any, Dict, Optional, Tuple
4
+
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from diffusers.models.activations import GEGLU, GELU, ApproximateGELU
8
+ from diffusers.models.attention import _chunked_feed_forward
9
+ from diffusers.models.attention_processor import (
10
+ LoRAAttnAddedKVProcessor,
11
+ LoRAAttnProcessor,
12
+ LoRAAttnProcessor2_0,
13
+ LoRAXFormersAttnProcessor,
14
+ SpatialNorm,
15
+ )
16
+ from diffusers.models.lora import LoRACompatibleLinear
17
+ from diffusers.models.normalization import RMSNorm
18
+ from diffusers.utils import deprecate, logging
19
+ from diffusers.utils.torch_utils import maybe_allow_in_graph
20
+ from einops import rearrange
21
+ from torch import nn
22
+
23
+ from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
24
+
25
+ try:
26
+ from torch_xla.experimental.custom_kernel import flash_attention
27
+ except ImportError:
28
+ # workaround for automatic tests. Currently this function is manually patched
29
+ # to the torch_xla lib on setup of container
30
+ pass
31
+
32
+ # code adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ @maybe_allow_in_graph
38
+ class BasicTransformerBlock(nn.Module):
39
+ r"""
40
+ A basic Transformer block.
41
+
42
+ Parameters:
43
+ dim (`int`): The number of channels in the input and output.
44
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
45
+ attention_head_dim (`int`): The number of channels in each head.
46
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
47
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
48
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
49
+ num_embeds_ada_norm (:
50
+ obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
51
+ attention_bias (:
52
+ obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
53
+ only_cross_attention (`bool`, *optional*):
54
+ Whether to use only cross-attention layers. In this case two cross attention layers are used.
55
+ double_self_attention (`bool`, *optional*):
56
+ Whether to use two self-attention layers. In this case no cross attention layers are used.
57
+ upcast_attention (`bool`, *optional*):
58
+ Whether to upcast the attention computation to float32. This is useful for mixed precision training.
59
+ norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
60
+ Whether to use learnable elementwise affine parameters for normalization.
61
+ qk_norm (`str`, *optional*, defaults to None):
62
+ Set to 'layer_norm' or `rms_norm` to perform query and key normalization.
63
+ adaptive_norm (`str`, *optional*, defaults to `"single_scale_shift"`):
64
+ The type of adaptive norm to use. Can be `"single_scale_shift"`, `"single_scale"` or "none".
65
+ standardization_norm (`str`, *optional*, defaults to `"layer_norm"`):
66
+ The type of pre-normalization to use. Can be `"layer_norm"` or `"rms_norm"`.
67
+ final_dropout (`bool` *optional*, defaults to False):
68
+ Whether to apply a final dropout after the last feed-forward layer.
69
+ attention_type (`str`, *optional*, defaults to `"default"`):
70
+ The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`.
71
+ positional_embeddings (`str`, *optional*, defaults to `None`):
72
+ The type of positional embeddings to apply to.
73
+ num_positional_embeddings (`int`, *optional*, defaults to `None`):
74
+ The maximum number of positional embeddings to apply.
75
+ """
76
+
77
+ def __init__(
78
+ self,
79
+ dim: int,
80
+ num_attention_heads: int,
81
+ attention_head_dim: int,
82
+ dropout=0.0,
83
+ cross_attention_dim: Optional[int] = None,
84
+ activation_fn: str = "geglu",
85
+ num_embeds_ada_norm: Optional[int] = None, # pylint: disable=unused-argument
86
+ attention_bias: bool = False,
87
+ only_cross_attention: bool = False,
88
+ double_self_attention: bool = False,
89
+ upcast_attention: bool = False,
90
+ norm_elementwise_affine: bool = True,
91
+ adaptive_norm: str = "single_scale_shift", # 'single_scale_shift', 'single_scale' or 'none'
92
+ standardization_norm: str = "layer_norm", # 'layer_norm' or 'rms_norm'
93
+ norm_eps: float = 1e-5,
94
+ qk_norm: Optional[str] = None,
95
+ final_dropout: bool = False,
96
+ attention_type: str = "default", # pylint: disable=unused-argument
97
+ ff_inner_dim: Optional[int] = None,
98
+ ff_bias: bool = True,
99
+ attention_out_bias: bool = True,
100
+ use_tpu_flash_attention: bool = False,
101
+ use_rope: bool = False,
102
+ ):
103
+ super().__init__()
104
+ self.only_cross_attention = only_cross_attention
105
+ self.use_tpu_flash_attention = use_tpu_flash_attention
106
+ self.adaptive_norm = adaptive_norm
107
+
108
+ assert standardization_norm in ["layer_norm", "rms_norm"]
109
+ assert adaptive_norm in ["single_scale_shift", "single_scale", "none"]
110
+
111
+ make_norm_layer = (
112
+ nn.LayerNorm if standardization_norm == "layer_norm" else RMSNorm
113
+ )
114
+
115
+ # Define 3 blocks. Each block has its own normalization layer.
116
+ # 1. Self-Attn
117
+ self.norm1 = make_norm_layer(
118
+ dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps
119
+ )
120
+
121
+ self.attn1 = Attention(
122
+ query_dim=dim,
123
+ heads=num_attention_heads,
124
+ dim_head=attention_head_dim,
125
+ dropout=dropout,
126
+ bias=attention_bias,
127
+ cross_attention_dim=cross_attention_dim if only_cross_attention else None,
128
+ upcast_attention=upcast_attention,
129
+ out_bias=attention_out_bias,
130
+ use_tpu_flash_attention=use_tpu_flash_attention,
131
+ qk_norm=qk_norm,
132
+ use_rope=use_rope,
133
+ )
134
+
135
+ # 2. Cross-Attn
136
+ if cross_attention_dim is not None or double_self_attention:
137
+ self.attn2 = Attention(
138
+ query_dim=dim,
139
+ cross_attention_dim=(
140
+ cross_attention_dim if not double_self_attention else None
141
+ ),
142
+ heads=num_attention_heads,
143
+ dim_head=attention_head_dim,
144
+ dropout=dropout,
145
+ bias=attention_bias,
146
+ upcast_attention=upcast_attention,
147
+ out_bias=attention_out_bias,
148
+ use_tpu_flash_attention=use_tpu_flash_attention,
149
+ qk_norm=qk_norm,
150
+ use_rope=use_rope,
151
+ ) # is self-attn if encoder_hidden_states is none
152
+
153
+ if adaptive_norm == "none":
154
+ self.attn2_norm = make_norm_layer(
155
+ dim, norm_eps, norm_elementwise_affine
156
+ )
157
+ else:
158
+ self.attn2 = None
159
+ self.attn2_norm = None
160
+
161
+ self.norm2 = make_norm_layer(dim, norm_eps, norm_elementwise_affine)
162
+
163
+ # 3. Feed-forward
164
+ self.ff = FeedForward(
165
+ dim,
166
+ dropout=dropout,
167
+ activation_fn=activation_fn,
168
+ final_dropout=final_dropout,
169
+ inner_dim=ff_inner_dim,
170
+ bias=ff_bias,
171
+ )
172
+
173
+ # 5. Scale-shift for PixArt-Alpha.
174
+ if adaptive_norm != "none":
175
+ num_ada_params = 4 if adaptive_norm == "single_scale" else 6
176
+ self.scale_shift_table = nn.Parameter(
177
+ torch.randn(num_ada_params, dim) / dim**0.5
178
+ )
179
+
180
+ # let chunk size default to None
181
+ self._chunk_size = None
182
+ self._chunk_dim = 0
183
+
184
+ def set_use_tpu_flash_attention(self):
185
+ r"""
186
+ Function sets the flag in this object and propagates down the children. The flag will enforce the usage of TPU
187
+ attention kernel.
188
+ """
189
+ self.use_tpu_flash_attention = True
190
+ self.attn1.set_use_tpu_flash_attention()
191
+ self.attn2.set_use_tpu_flash_attention()
192
+
193
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
194
+ # Sets chunk feed-forward
195
+ self._chunk_size = chunk_size
196
+ self._chunk_dim = dim
197
+
198
+ def forward(
199
+ self,
200
+ hidden_states: torch.FloatTensor,
201
+ freqs_cis: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
202
+ attention_mask: Optional[torch.FloatTensor] = None,
203
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
204
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
205
+ timestep: Optional[torch.LongTensor] = None,
206
+ cross_attention_kwargs: Dict[str, Any] = None,
207
+ class_labels: Optional[torch.LongTensor] = None,
208
+ skip_layer_mask: Optional[torch.Tensor] = None,
209
+ skip_layer_strategy: Optional[SkipLayerStrategy] = None,
210
+ ) -> torch.FloatTensor:
211
+ if cross_attention_kwargs is not None:
212
+ if cross_attention_kwargs.get("scale", None) is not None:
213
+ logger.warning(
214
+ "Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored."
215
+ )
216
+
217
+ # Notice that normalization is always applied before the real computation in the following blocks.
218
+ # 0. Self-Attention
219
+ batch_size = hidden_states.shape[0]
220
+
221
+ original_hidden_states = hidden_states
222
+
223
+ norm_hidden_states = self.norm1(hidden_states)
224
+
225
+ # Apply ada_norm_single
226
+ if self.adaptive_norm in ["single_scale_shift", "single_scale"]:
227
+ assert timestep.ndim == 3 # [batch, 1 or num_tokens, embedding_dim]
228
+ num_ada_params = self.scale_shift_table.shape[0]
229
+ ada_values = self.scale_shift_table[None, None] + timestep.reshape(
230
+ batch_size, timestep.shape[1], num_ada_params, -1
231
+ )
232
+ if self.adaptive_norm == "single_scale_shift":
233
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
234
+ ada_values.unbind(dim=2)
235
+ )
236
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
237
+ else:
238
+ scale_msa, gate_msa, scale_mlp, gate_mlp = ada_values.unbind(dim=2)
239
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa)
240
+ elif self.adaptive_norm == "none":
241
+ scale_msa, gate_msa, scale_mlp, gate_mlp = None, None, None, None
242
+ else:
243
+ raise ValueError(f"Unknown adaptive norm type: {self.adaptive_norm}")
244
+
245
+ norm_hidden_states = norm_hidden_states.squeeze(
246
+ 1
247
+ ) # TODO: Check if this is needed
248
+
249
+ # 1. Prepare GLIGEN inputs
250
+ cross_attention_kwargs = (
251
+ cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
252
+ )
253
+
254
+ attn_output = self.attn1(
255
+ norm_hidden_states,
256
+ freqs_cis=freqs_cis,
257
+ encoder_hidden_states=(
258
+ encoder_hidden_states if self.only_cross_attention else None
259
+ ),
260
+ attention_mask=attention_mask,
261
+ skip_layer_mask=skip_layer_mask,
262
+ skip_layer_strategy=skip_layer_strategy,
263
+ **cross_attention_kwargs,
264
+ )
265
+ if gate_msa is not None:
266
+ attn_output = gate_msa * attn_output
267
+
268
+ hidden_states = attn_output + hidden_states
269
+ if hidden_states.ndim == 4:
270
+ hidden_states = hidden_states.squeeze(1)
271
+
272
+ # 3. Cross-Attention
273
+ if self.attn2 is not None:
274
+ if self.adaptive_norm == "none":
275
+ attn_input = self.attn2_norm(hidden_states)
276
+ else:
277
+ attn_input = hidden_states
278
+ attn_output = self.attn2(
279
+ attn_input,
280
+ freqs_cis=freqs_cis,
281
+ encoder_hidden_states=encoder_hidden_states,
282
+ attention_mask=encoder_attention_mask,
283
+ **cross_attention_kwargs,
284
+ )
285
+ hidden_states = attn_output + hidden_states
286
+
287
+ # 4. Feed-forward
288
+ norm_hidden_states = self.norm2(hidden_states)
289
+ if self.adaptive_norm == "single_scale_shift":
290
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
291
+ elif self.adaptive_norm == "single_scale":
292
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp)
293
+ elif self.adaptive_norm == "none":
294
+ pass
295
+ else:
296
+ raise ValueError(f"Unknown adaptive norm type: {self.adaptive_norm}")
297
+
298
+ if self._chunk_size is not None:
299
+ # "feed_forward_chunk_size" can be used to save memory
300
+ ff_output = _chunked_feed_forward(
301
+ self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size
302
+ )
303
+ else:
304
+ ff_output = self.ff(norm_hidden_states)
305
+ if gate_mlp is not None:
306
+ ff_output = gate_mlp * ff_output
307
+
308
+ hidden_states = ff_output + hidden_states
309
+ if hidden_states.ndim == 4:
310
+ hidden_states = hidden_states.squeeze(1)
311
+
312
+ if (
313
+ skip_layer_mask is not None
314
+ and skip_layer_strategy == SkipLayerStrategy.TransformerBlock
315
+ ):
316
+ skip_layer_mask = skip_layer_mask.view(-1, 1, 1)
317
+ hidden_states = hidden_states * skip_layer_mask + original_hidden_states * (
318
+ 1.0 - skip_layer_mask
319
+ )
320
+
321
+ return hidden_states
322
+
323
+
324
+ @maybe_allow_in_graph
325
+ class Attention(nn.Module):
326
+ r"""
327
+ A cross attention layer.
328
+
329
+ Parameters:
330
+ query_dim (`int`):
331
+ The number of channels in the query.
332
+ cross_attention_dim (`int`, *optional*):
333
+ The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`.
334
+ heads (`int`, *optional*, defaults to 8):
335
+ The number of heads to use for multi-head attention.
336
+ dim_head (`int`, *optional*, defaults to 64):
337
+ The number of channels in each head.
338
+ dropout (`float`, *optional*, defaults to 0.0):
339
+ The dropout probability to use.
340
+ bias (`bool`, *optional*, defaults to False):
341
+ Set to `True` for the query, key, and value linear layers to contain a bias parameter.
342
+ upcast_attention (`bool`, *optional*, defaults to False):
343
+ Set to `True` to upcast the attention computation to `float32`.
344
+ upcast_softmax (`bool`, *optional*, defaults to False):
345
+ Set to `True` to upcast the softmax computation to `float32`.
346
+ cross_attention_norm (`str`, *optional*, defaults to `None`):
347
+ The type of normalization to use for the cross attention. Can be `None`, `layer_norm`, or `group_norm`.
348
+ cross_attention_norm_num_groups (`int`, *optional*, defaults to 32):
349
+ The number of groups to use for the group norm in the cross attention.
350
+ added_kv_proj_dim (`int`, *optional*, defaults to `None`):
351
+ The number of channels to use for the added key and value projections. If `None`, no projection is used.
352
+ norm_num_groups (`int`, *optional*, defaults to `None`):
353
+ The number of groups to use for the group norm in the attention.
354
+ spatial_norm_dim (`int`, *optional*, defaults to `None`):
355
+ The number of channels to use for the spatial normalization.
356
+ out_bias (`bool`, *optional*, defaults to `True`):
357
+ Set to `True` to use a bias in the output linear layer.
358
+ scale_qk (`bool`, *optional*, defaults to `True`):
359
+ Set to `True` to scale the query and key by `1 / sqrt(dim_head)`.
360
+ qk_norm (`str`, *optional*, defaults to None):
361
+ Set to 'layer_norm' or `rms_norm` to perform query and key normalization.
362
+ only_cross_attention (`bool`, *optional*, defaults to `False`):
363
+ Set to `True` to only use cross attention and not added_kv_proj_dim. Can only be set to `True` if
364
+ `added_kv_proj_dim` is not `None`.
365
+ eps (`float`, *optional*, defaults to 1e-5):
366
+ An additional value added to the denominator in group normalization that is used for numerical stability.
367
+ rescale_output_factor (`float`, *optional*, defaults to 1.0):
368
+ A factor to rescale the output by dividing it with this value.
369
+ residual_connection (`bool`, *optional*, defaults to `False`):
370
+ Set to `True` to add the residual connection to the output.
371
+ _from_deprecated_attn_block (`bool`, *optional*, defaults to `False`):
372
+ Set to `True` if the attention block is loaded from a deprecated state dict.
373
+ processor (`AttnProcessor`, *optional*, defaults to `None`):
374
+ The attention processor to use. If `None`, defaults to `AttnProcessor2_0` if `torch 2.x` is used and
375
+ `AttnProcessor` otherwise.
376
+ """
377
+
378
+ def __init__(
379
+ self,
380
+ query_dim: int,
381
+ cross_attention_dim: Optional[int] = None,
382
+ heads: int = 8,
383
+ dim_head: int = 64,
384
+ dropout: float = 0.0,
385
+ bias: bool = False,
386
+ upcast_attention: bool = False,
387
+ upcast_softmax: bool = False,
388
+ cross_attention_norm: Optional[str] = None,
389
+ cross_attention_norm_num_groups: int = 32,
390
+ added_kv_proj_dim: Optional[int] = None,
391
+ norm_num_groups: Optional[int] = None,
392
+ spatial_norm_dim: Optional[int] = None,
393
+ out_bias: bool = True,
394
+ scale_qk: bool = True,
395
+ qk_norm: Optional[str] = None,
396
+ only_cross_attention: bool = False,
397
+ eps: float = 1e-5,
398
+ rescale_output_factor: float = 1.0,
399
+ residual_connection: bool = False,
400
+ _from_deprecated_attn_block: bool = False,
401
+ processor: Optional["AttnProcessor"] = None,
402
+ out_dim: int = None,
403
+ use_tpu_flash_attention: bool = False,
404
+ use_rope: bool = False,
405
+ ):
406
+ super().__init__()
407
+ self.inner_dim = out_dim if out_dim is not None else dim_head * heads
408
+ self.query_dim = query_dim
409
+ self.use_bias = bias
410
+ self.is_cross_attention = cross_attention_dim is not None
411
+ self.cross_attention_dim = (
412
+ cross_attention_dim if cross_attention_dim is not None else query_dim
413
+ )
414
+ self.upcast_attention = upcast_attention
415
+ self.upcast_softmax = upcast_softmax
416
+ self.rescale_output_factor = rescale_output_factor
417
+ self.residual_connection = residual_connection
418
+ self.dropout = dropout
419
+ self.fused_projections = False
420
+ self.out_dim = out_dim if out_dim is not None else query_dim
421
+ self.use_tpu_flash_attention = use_tpu_flash_attention
422
+ self.use_rope = use_rope
423
+
424
+ # we make use of this private variable to know whether this class is loaded
425
+ # with an deprecated state dict so that we can convert it on the fly
426
+ self._from_deprecated_attn_block = _from_deprecated_attn_block
427
+
428
+ self.scale_qk = scale_qk
429
+ self.scale = dim_head**-0.5 if self.scale_qk else 1.0
430
+
431
+ if qk_norm is None:
432
+ self.q_norm = nn.Identity()
433
+ self.k_norm = nn.Identity()
434
+ elif qk_norm == "rms_norm":
435
+ self.q_norm = RMSNorm(dim_head * heads, eps=1e-5)
436
+ self.k_norm = RMSNorm(dim_head * heads, eps=1e-5)
437
+ elif qk_norm == "layer_norm":
438
+ self.q_norm = nn.LayerNorm(dim_head * heads, eps=1e-5)
439
+ self.k_norm = nn.LayerNorm(dim_head * heads, eps=1e-5)
440
+ else:
441
+ raise ValueError(f"Unsupported qk_norm method: {qk_norm}")
442
+
443
+ self.heads = out_dim // dim_head if out_dim is not None else heads
444
+ # for slice_size > 0 the attention score computation
445
+ # is split across the batch axis to save memory
446
+ # You can set slice_size with `set_attention_slice`
447
+ self.sliceable_head_dim = heads
448
+
449
+ self.added_kv_proj_dim = added_kv_proj_dim
450
+ self.only_cross_attention = only_cross_attention
451
+
452
+ if self.added_kv_proj_dim is None and self.only_cross_attention:
453
+ raise ValueError(
454
+ "`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`."
455
+ )
456
+
457
+ if norm_num_groups is not None:
458
+ self.group_norm = nn.GroupNorm(
459
+ num_channels=query_dim, num_groups=norm_num_groups, eps=eps, affine=True
460
+ )
461
+ else:
462
+ self.group_norm = None
463
+
464
+ if spatial_norm_dim is not None:
465
+ self.spatial_norm = SpatialNorm(
466
+ f_channels=query_dim, zq_channels=spatial_norm_dim
467
+ )
468
+ else:
469
+ self.spatial_norm = None
470
+
471
+ if cross_attention_norm is None:
472
+ self.norm_cross = None
473
+ elif cross_attention_norm == "layer_norm":
474
+ self.norm_cross = nn.LayerNorm(self.cross_attention_dim)
475
+ elif cross_attention_norm == "group_norm":
476
+ if self.added_kv_proj_dim is not None:
477
+ # The given `encoder_hidden_states` are initially of shape
478
+ # (batch_size, seq_len, added_kv_proj_dim) before being projected
479
+ # to (batch_size, seq_len, cross_attention_dim). The norm is applied
480
+ # before the projection, so we need to use `added_kv_proj_dim` as
481
+ # the number of channels for the group norm.
482
+ norm_cross_num_channels = added_kv_proj_dim
483
+ else:
484
+ norm_cross_num_channels = self.cross_attention_dim
485
+
486
+ self.norm_cross = nn.GroupNorm(
487
+ num_channels=norm_cross_num_channels,
488
+ num_groups=cross_attention_norm_num_groups,
489
+ eps=1e-5,
490
+ affine=True,
491
+ )
492
+ else:
493
+ raise ValueError(
494
+ f"unknown cross_attention_norm: {cross_attention_norm}. Should be None, 'layer_norm' or 'group_norm'"
495
+ )
496
+
497
+ linear_cls = nn.Linear
498
+
499
+ self.linear_cls = linear_cls
500
+ self.to_q = linear_cls(query_dim, self.inner_dim, bias=bias)
501
+
502
+ if not self.only_cross_attention:
503
+ # only relevant for the `AddedKVProcessor` classes
504
+ self.to_k = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias)
505
+ self.to_v = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias)
506
+ else:
507
+ self.to_k = None
508
+ self.to_v = None
509
+
510
+ if self.added_kv_proj_dim is not None:
511
+ self.add_k_proj = linear_cls(added_kv_proj_dim, self.inner_dim)
512
+ self.add_v_proj = linear_cls(added_kv_proj_dim, self.inner_dim)
513
+
514
+ self.to_out = nn.ModuleList([])
515
+ self.to_out.append(linear_cls(self.inner_dim, self.out_dim, bias=out_bias))
516
+ self.to_out.append(nn.Dropout(dropout))
517
+
518
+ # set attention processor
519
+ # We use the AttnProcessor2_0 by default when torch 2.x is used which uses
520
+ # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention
521
+ # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1
522
+ if processor is None:
523
+ processor = AttnProcessor2_0()
524
+ self.set_processor(processor)
525
+
526
+ def set_use_tpu_flash_attention(self):
527
+ r"""
528
+ Function sets the flag in this object. The flag will enforce the usage of TPU attention kernel.
529
+ """
530
+ self.use_tpu_flash_attention = True
531
+
532
+ def set_processor(self, processor: "AttnProcessor") -> None:
533
+ r"""
534
+ Set the attention processor to use.
535
+
536
+ Args:
537
+ processor (`AttnProcessor`):
538
+ The attention processor to use.
539
+ """
540
+ # if current processor is in `self._modules` and if passed `processor` is not, we need to
541
+ # pop `processor` from `self._modules`
542
+ if (
543
+ hasattr(self, "processor")
544
+ and isinstance(self.processor, torch.nn.Module)
545
+ and not isinstance(processor, torch.nn.Module)
546
+ ):
547
+ logger.info(
548
+ f"You are removing possibly trained weights of {self.processor} with {processor}"
549
+ )
550
+ self._modules.pop("processor")
551
+
552
+ self.processor = processor
553
+
554
+ def get_processor(
555
+ self, return_deprecated_lora: bool = False
556
+ ) -> "AttentionProcessor": # noqa: F821
557
+ r"""
558
+ Get the attention processor in use.
559
+
560
+ Args:
561
+ return_deprecated_lora (`bool`, *optional*, defaults to `False`):
562
+ Set to `True` to return the deprecated LoRA attention processor.
563
+
564
+ Returns:
565
+ "AttentionProcessor": The attention processor in use.
566
+ """
567
+ if not return_deprecated_lora:
568
+ return self.processor
569
+
570
+ # TODO(Sayak, Patrick). The rest of the function is needed to ensure backwards compatible
571
+ # serialization format for LoRA Attention Processors. It should be deleted once the integration
572
+ # with PEFT is completed.
573
+ is_lora_activated = {
574
+ name: module.lora_layer is not None
575
+ for name, module in self.named_modules()
576
+ if hasattr(module, "lora_layer")
577
+ }
578
+
579
+ # 1. if no layer has a LoRA activated we can return the processor as usual
580
+ if not any(is_lora_activated.values()):
581
+ return self.processor
582
+
583
+ # If doesn't apply LoRA do `add_k_proj` or `add_v_proj`
584
+ is_lora_activated.pop("add_k_proj", None)
585
+ is_lora_activated.pop("add_v_proj", None)
586
+ # 2. else it is not posssible that only some layers have LoRA activated
587
+ if not all(is_lora_activated.values()):
588
+ raise ValueError(
589
+ f"Make sure that either all layers or no layers have LoRA activated, but have {is_lora_activated}"
590
+ )
591
+
592
+ # 3. And we need to merge the current LoRA layers into the corresponding LoRA attention processor
593
+ non_lora_processor_cls_name = self.processor.__class__.__name__
594
+ lora_processor_cls = getattr(
595
+ import_module(__name__), "LoRA" + non_lora_processor_cls_name
596
+ )
597
+
598
+ hidden_size = self.inner_dim
599
+
600
+ # now create a LoRA attention processor from the LoRA layers
601
+ if lora_processor_cls in [
602
+ LoRAAttnProcessor,
603
+ LoRAAttnProcessor2_0,
604
+ LoRAXFormersAttnProcessor,
605
+ ]:
606
+ kwargs = {
607
+ "cross_attention_dim": self.cross_attention_dim,
608
+ "rank": self.to_q.lora_layer.rank,
609
+ "network_alpha": self.to_q.lora_layer.network_alpha,
610
+ "q_rank": self.to_q.lora_layer.rank,
611
+ "q_hidden_size": self.to_q.lora_layer.out_features,
612
+ "k_rank": self.to_k.lora_layer.rank,
613
+ "k_hidden_size": self.to_k.lora_layer.out_features,
614
+ "v_rank": self.to_v.lora_layer.rank,
615
+ "v_hidden_size": self.to_v.lora_layer.out_features,
616
+ "out_rank": self.to_out[0].lora_layer.rank,
617
+ "out_hidden_size": self.to_out[0].lora_layer.out_features,
618
+ }
619
+
620
+ if hasattr(self.processor, "attention_op"):
621
+ kwargs["attention_op"] = self.processor.attention_op
622
+
623
+ lora_processor = lora_processor_cls(hidden_size, **kwargs)
624
+ lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict())
625
+ lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict())
626
+ lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict())
627
+ lora_processor.to_out_lora.load_state_dict(
628
+ self.to_out[0].lora_layer.state_dict()
629
+ )
630
+ elif lora_processor_cls == LoRAAttnAddedKVProcessor:
631
+ lora_processor = lora_processor_cls(
632
+ hidden_size,
633
+ cross_attention_dim=self.add_k_proj.weight.shape[0],
634
+ rank=self.to_q.lora_layer.rank,
635
+ network_alpha=self.to_q.lora_layer.network_alpha,
636
+ )
637
+ lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict())
638
+ lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict())
639
+ lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict())
640
+ lora_processor.to_out_lora.load_state_dict(
641
+ self.to_out[0].lora_layer.state_dict()
642
+ )
643
+
644
+ # only save if used
645
+ if self.add_k_proj.lora_layer is not None:
646
+ lora_processor.add_k_proj_lora.load_state_dict(
647
+ self.add_k_proj.lora_layer.state_dict()
648
+ )
649
+ lora_processor.add_v_proj_lora.load_state_dict(
650
+ self.add_v_proj.lora_layer.state_dict()
651
+ )
652
+ else:
653
+ lora_processor.add_k_proj_lora = None
654
+ lora_processor.add_v_proj_lora = None
655
+ else:
656
+ raise ValueError(f"{lora_processor_cls} does not exist.")
657
+
658
+ return lora_processor
659
+
660
+ def forward(
661
+ self,
662
+ hidden_states: torch.FloatTensor,
663
+ freqs_cis: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
664
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
665
+ attention_mask: Optional[torch.FloatTensor] = None,
666
+ skip_layer_mask: Optional[torch.Tensor] = None,
667
+ skip_layer_strategy: Optional[SkipLayerStrategy] = None,
668
+ **cross_attention_kwargs,
669
+ ) -> torch.Tensor:
670
+ r"""
671
+ The forward method of the `Attention` class.
672
+
673
+ Args:
674
+ hidden_states (`torch.Tensor`):
675
+ The hidden states of the query.
676
+ encoder_hidden_states (`torch.Tensor`, *optional*):
677
+ The hidden states of the encoder.
678
+ attention_mask (`torch.Tensor`, *optional*):
679
+ The attention mask to use. If `None`, no mask is applied.
680
+ skip_layer_mask (`torch.Tensor`, *optional*):
681
+ The skip layer mask to use. If `None`, no mask is applied.
682
+ skip_layer_strategy (`SkipLayerStrategy`, *optional*, defaults to `None`):
683
+ Controls which layers to skip for spatiotemporal guidance.
684
+ **cross_attention_kwargs:
685
+ Additional keyword arguments to pass along to the cross attention.
686
+
687
+ Returns:
688
+ `torch.Tensor`: The output of the attention layer.
689
+ """
690
+ # The `Attention` class can call different attention processors / attention functions
691
+ # here we simply pass along all tensors to the selected processor class
692
+ # For standard processors that are defined here, `**cross_attention_kwargs` is empty
693
+
694
+ attn_parameters = set(
695
+ inspect.signature(self.processor.__call__).parameters.keys()
696
+ )
697
+ unused_kwargs = [
698
+ k for k, _ in cross_attention_kwargs.items() if k not in attn_parameters
699
+ ]
700
+ if len(unused_kwargs) > 0:
701
+ logger.warning(
702
+ f"cross_attention_kwargs {unused_kwargs} are not expected by"
703
+ f" {self.processor.__class__.__name__} and will be ignored."
704
+ )
705
+ cross_attention_kwargs = {
706
+ k: w for k, w in cross_attention_kwargs.items() if k in attn_parameters
707
+ }
708
+
709
+ return self.processor(
710
+ self,
711
+ hidden_states,
712
+ freqs_cis=freqs_cis,
713
+ encoder_hidden_states=encoder_hidden_states,
714
+ attention_mask=attention_mask,
715
+ skip_layer_mask=skip_layer_mask,
716
+ skip_layer_strategy=skip_layer_strategy,
717
+ **cross_attention_kwargs,
718
+ )
719
+
720
+ def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor:
721
+ r"""
722
+ Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads`
723
+ is the number of heads initialized while constructing the `Attention` class.
724
+
725
+ Args:
726
+ tensor (`torch.Tensor`): The tensor to reshape.
727
+
728
+ Returns:
729
+ `torch.Tensor`: The reshaped tensor.
730
+ """
731
+ head_size = self.heads
732
+ batch_size, seq_len, dim = tensor.shape
733
+ tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
734
+ tensor = tensor.permute(0, 2, 1, 3).reshape(
735
+ batch_size // head_size, seq_len, dim * head_size
736
+ )
737
+ return tensor
738
+
739
+ def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor:
740
+ r"""
741
+ Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is
742
+ the number of heads initialized while constructing the `Attention` class.
743
+
744
+ Args:
745
+ tensor (`torch.Tensor`): The tensor to reshape.
746
+ out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is
747
+ reshaped to `[batch_size * heads, seq_len, dim // heads]`.
748
+
749
+ Returns:
750
+ `torch.Tensor`: The reshaped tensor.
751
+ """
752
+
753
+ head_size = self.heads
754
+ if tensor.ndim == 3:
755
+ batch_size, seq_len, dim = tensor.shape
756
+ extra_dim = 1
757
+ else:
758
+ batch_size, extra_dim, seq_len, dim = tensor.shape
759
+ tensor = tensor.reshape(
760
+ batch_size, seq_len * extra_dim, head_size, dim // head_size
761
+ )
762
+ tensor = tensor.permute(0, 2, 1, 3)
763
+
764
+ if out_dim == 3:
765
+ tensor = tensor.reshape(
766
+ batch_size * head_size, seq_len * extra_dim, dim // head_size
767
+ )
768
+
769
+ return tensor
770
+
771
+ def get_attention_scores(
772
+ self,
773
+ query: torch.Tensor,
774
+ key: torch.Tensor,
775
+ attention_mask: torch.Tensor = None,
776
+ ) -> torch.Tensor:
777
+ r"""
778
+ Compute the attention scores.
779
+
780
+ Args:
781
+ query (`torch.Tensor`): The query tensor.
782
+ key (`torch.Tensor`): The key tensor.
783
+ attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied.
784
+
785
+ Returns:
786
+ `torch.Tensor`: The attention probabilities/scores.
787
+ """
788
+ dtype = query.dtype
789
+ if self.upcast_attention:
790
+ query = query.float()
791
+ key = key.float()
792
+
793
+ if attention_mask is None:
794
+ baddbmm_input = torch.empty(
795
+ query.shape[0],
796
+ query.shape[1],
797
+ key.shape[1],
798
+ dtype=query.dtype,
799
+ device=query.device,
800
+ )
801
+ beta = 0
802
+ else:
803
+ baddbmm_input = attention_mask
804
+ beta = 1
805
+
806
+ attention_scores = torch.baddbmm(
807
+ baddbmm_input,
808
+ query,
809
+ key.transpose(-1, -2),
810
+ beta=beta,
811
+ alpha=self.scale,
812
+ )
813
+ del baddbmm_input
814
+
815
+ if self.upcast_softmax:
816
+ attention_scores = attention_scores.float()
817
+
818
+ attention_probs = attention_scores.softmax(dim=-1)
819
+ del attention_scores
820
+
821
+ attention_probs = attention_probs.to(dtype)
822
+
823
+ return attention_probs
824
+
825
+ def prepare_attention_mask(
826
+ self,
827
+ attention_mask: torch.Tensor,
828
+ target_length: int,
829
+ batch_size: int,
830
+ out_dim: int = 3,
831
+ ) -> torch.Tensor:
832
+ r"""
833
+ Prepare the attention mask for the attention computation.
834
+
835
+ Args:
836
+ attention_mask (`torch.Tensor`):
837
+ The attention mask to prepare.
838
+ target_length (`int`):
839
+ The target length of the attention mask. This is the length of the attention mask after padding.
840
+ batch_size (`int`):
841
+ The batch size, which is used to repeat the attention mask.
842
+ out_dim (`int`, *optional*, defaults to `3`):
843
+ The output dimension of the attention mask. Can be either `3` or `4`.
844
+
845
+ Returns:
846
+ `torch.Tensor`: The prepared attention mask.
847
+ """
848
+ head_size = self.heads
849
+ if attention_mask is None:
850
+ return attention_mask
851
+
852
+ current_length: int = attention_mask.shape[-1]
853
+ if current_length != target_length:
854
+ if attention_mask.device.type == "mps":
855
+ # HACK: MPS: Does not support padding by greater than dimension of input tensor.
856
+ # Instead, we can manually construct the padding tensor.
857
+ padding_shape = (
858
+ attention_mask.shape[0],
859
+ attention_mask.shape[1],
860
+ target_length,
861
+ )
862
+ padding = torch.zeros(
863
+ padding_shape,
864
+ dtype=attention_mask.dtype,
865
+ device=attention_mask.device,
866
+ )
867
+ attention_mask = torch.cat([attention_mask, padding], dim=2)
868
+ else:
869
+ # TODO: for pipelines such as stable-diffusion, padding cross-attn mask:
870
+ # we want to instead pad by (0, remaining_length), where remaining_length is:
871
+ # remaining_length: int = target_length - current_length
872
+ # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding
873
+ attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)
874
+
875
+ if out_dim == 3:
876
+ if attention_mask.shape[0] < batch_size * head_size:
877
+ attention_mask = attention_mask.repeat_interleave(head_size, dim=0)
878
+ elif out_dim == 4:
879
+ attention_mask = attention_mask.unsqueeze(1)
880
+ attention_mask = attention_mask.repeat_interleave(head_size, dim=1)
881
+
882
+ return attention_mask
883
+
884
+ def norm_encoder_hidden_states(
885
+ self, encoder_hidden_states: torch.Tensor
886
+ ) -> torch.Tensor:
887
+ r"""
888
+ Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the
889
+ `Attention` class.
890
+
891
+ Args:
892
+ encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder.
893
+
894
+ Returns:
895
+ `torch.Tensor`: The normalized encoder hidden states.
896
+ """
897
+ assert (
898
+ self.norm_cross is not None
899
+ ), "self.norm_cross must be defined to call self.norm_encoder_hidden_states"
900
+
901
+ if isinstance(self.norm_cross, nn.LayerNorm):
902
+ encoder_hidden_states = self.norm_cross(encoder_hidden_states)
903
+ elif isinstance(self.norm_cross, nn.GroupNorm):
904
+ # Group norm norms along the channels dimension and expects
905
+ # input to be in the shape of (N, C, *). In this case, we want
906
+ # to norm along the hidden dimension, so we need to move
907
+ # (batch_size, sequence_length, hidden_size) ->
908
+ # (batch_size, hidden_size, sequence_length)
909
+ encoder_hidden_states = encoder_hidden_states.transpose(1, 2)
910
+ encoder_hidden_states = self.norm_cross(encoder_hidden_states)
911
+ encoder_hidden_states = encoder_hidden_states.transpose(1, 2)
912
+ else:
913
+ assert False
914
+
915
+ return encoder_hidden_states
916
+
917
+ @staticmethod
918
+ def apply_rotary_emb(
919
+ input_tensor: torch.Tensor,
920
+ freqs_cis: Tuple[torch.FloatTensor, torch.FloatTensor],
921
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
922
+ cos_freqs = freqs_cis[0]
923
+ sin_freqs = freqs_cis[1]
924
+
925
+ t_dup = rearrange(input_tensor, "... (d r) -> ... d r", r=2)
926
+ t1, t2 = t_dup.unbind(dim=-1)
927
+ t_dup = torch.stack((-t2, t1), dim=-1)
928
+ input_tensor_rot = rearrange(t_dup, "... d r -> ... (d r)")
929
+
930
+ out = input_tensor * cos_freqs + input_tensor_rot * sin_freqs
931
+
932
+ return out
933
+
934
+
935
+ class AttnProcessor2_0:
936
+ r"""
937
+ Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
938
+ """
939
+
940
+ def __init__(self):
941
+ pass
942
+
943
+ def __call__(
944
+ self,
945
+ attn: Attention,
946
+ hidden_states: torch.FloatTensor,
947
+ freqs_cis: Tuple[torch.FloatTensor, torch.FloatTensor],
948
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
949
+ attention_mask: Optional[torch.FloatTensor] = None,
950
+ temb: Optional[torch.FloatTensor] = None,
951
+ skip_layer_mask: Optional[torch.FloatTensor] = None,
952
+ skip_layer_strategy: Optional[SkipLayerStrategy] = None,
953
+ *args,
954
+ **kwargs,
955
+ ) -> torch.FloatTensor:
956
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
957
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
958
+ deprecate("scale", "1.0.0", deprecation_message)
959
+
960
+ residual = hidden_states
961
+ if attn.spatial_norm is not None:
962
+ hidden_states = attn.spatial_norm(hidden_states, temb)
963
+
964
+ input_ndim = hidden_states.ndim
965
+
966
+ if input_ndim == 4:
967
+ batch_size, channel, height, width = hidden_states.shape
968
+ hidden_states = hidden_states.view(
969
+ batch_size, channel, height * width
970
+ ).transpose(1, 2)
971
+
972
+ batch_size, sequence_length, _ = (
973
+ hidden_states.shape
974
+ if encoder_hidden_states is None
975
+ else encoder_hidden_states.shape
976
+ )
977
+
978
+ if skip_layer_mask is not None:
979
+ skip_layer_mask = skip_layer_mask.reshape(batch_size, 1, 1)
980
+
981
+ if (attention_mask is not None) and (not attn.use_tpu_flash_attention):
982
+ attention_mask = attn.prepare_attention_mask(
983
+ attention_mask, sequence_length, batch_size
984
+ )
985
+ # scaled_dot_product_attention expects attention_mask shape to be
986
+ # (batch, heads, source_length, target_length)
987
+ attention_mask = attention_mask.view(
988
+ batch_size, attn.heads, -1, attention_mask.shape[-1]
989
+ )
990
+
991
+ if attn.group_norm is not None:
992
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(
993
+ 1, 2
994
+ )
995
+
996
+ query = attn.to_q(hidden_states)
997
+ query = attn.q_norm(query)
998
+
999
+ if encoder_hidden_states is not None:
1000
+ if attn.norm_cross:
1001
+ encoder_hidden_states = attn.norm_encoder_hidden_states(
1002
+ encoder_hidden_states
1003
+ )
1004
+ key = attn.to_k(encoder_hidden_states)
1005
+ key = attn.k_norm(key)
1006
+ else: # if no context provided do self-attention
1007
+ encoder_hidden_states = hidden_states
1008
+ key = attn.to_k(hidden_states)
1009
+ key = attn.k_norm(key)
1010
+ if attn.use_rope:
1011
+ key = attn.apply_rotary_emb(key, freqs_cis)
1012
+ query = attn.apply_rotary_emb(query, freqs_cis)
1013
+
1014
+ value = attn.to_v(encoder_hidden_states)
1015
+ value_for_stg = value
1016
+
1017
+ inner_dim = key.shape[-1]
1018
+ head_dim = inner_dim // attn.heads
1019
+
1020
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
1021
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
1022
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
1023
+
1024
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
1025
+
1026
+ if attn.use_tpu_flash_attention: # use tpu attention offload 'flash attention'
1027
+ q_segment_indexes = None
1028
+ if (
1029
+ attention_mask is not None
1030
+ ): # if mask is required need to tune both segmenIds fields
1031
+ # attention_mask = torch.squeeze(attention_mask).to(torch.float32)
1032
+ attention_mask = attention_mask.to(torch.float32)
1033
+ q_segment_indexes = torch.ones(
1034
+ batch_size, query.shape[2], device=query.device, dtype=torch.float32
1035
+ )
1036
+ assert (
1037
+ attention_mask.shape[1] == key.shape[2]
1038
+ ), f"ERROR: KEY SHAPE must be same as attention mask [{key.shape[2]}, {attention_mask.shape[1]}]"
1039
+
1040
+ assert (
1041
+ query.shape[2] % 128 == 0
1042
+ ), f"ERROR: QUERY SHAPE must be divisible by 128 (TPU limitation) [{query.shape[2]}]"
1043
+ assert (
1044
+ key.shape[2] % 128 == 0
1045
+ ), f"ERROR: KEY SHAPE must be divisible by 128 (TPU limitation) [{key.shape[2]}]"
1046
+
1047
+ # run the TPU kernel implemented in jax with pallas
1048
+ hidden_states_a = flash_attention(
1049
+ q=query,
1050
+ k=key,
1051
+ v=value,
1052
+ q_segment_ids=q_segment_indexes,
1053
+ kv_segment_ids=attention_mask,
1054
+ sm_scale=attn.scale,
1055
+ )
1056
+ else:
1057
+ hidden_states_a = F.scaled_dot_product_attention(
1058
+ query,
1059
+ key,
1060
+ value,
1061
+ attn_mask=attention_mask,
1062
+ dropout_p=0.0,
1063
+ is_causal=False,
1064
+ )
1065
+
1066
+ hidden_states_a = hidden_states_a.transpose(1, 2).reshape(
1067
+ batch_size, -1, attn.heads * head_dim
1068
+ )
1069
+ hidden_states_a = hidden_states_a.to(query.dtype)
1070
+
1071
+ if (
1072
+ skip_layer_mask is not None
1073
+ and skip_layer_strategy == SkipLayerStrategy.AttentionSkip
1074
+ ):
1075
+ hidden_states = hidden_states_a * skip_layer_mask + hidden_states * (
1076
+ 1.0 - skip_layer_mask
1077
+ )
1078
+ elif (
1079
+ skip_layer_mask is not None
1080
+ and skip_layer_strategy == SkipLayerStrategy.AttentionValues
1081
+ ):
1082
+ hidden_states = hidden_states_a * skip_layer_mask + value_for_stg * (
1083
+ 1.0 - skip_layer_mask
1084
+ )
1085
+ else:
1086
+ hidden_states = hidden_states_a
1087
+
1088
+ # linear proj
1089
+ hidden_states = attn.to_out[0](hidden_states)
1090
+ # dropout
1091
+ hidden_states = attn.to_out[1](hidden_states)
1092
+
1093
+ if input_ndim == 4:
1094
+ hidden_states = hidden_states.transpose(-1, -2).reshape(
1095
+ batch_size, channel, height, width
1096
+ )
1097
+ if (
1098
+ skip_layer_mask is not None
1099
+ and skip_layer_strategy == SkipLayerStrategy.Residual
1100
+ ):
1101
+ skip_layer_mask = skip_layer_mask.reshape(batch_size, 1, 1, 1)
1102
+
1103
+ if attn.residual_connection:
1104
+ if (
1105
+ skip_layer_mask is not None
1106
+ and skip_layer_strategy == SkipLayerStrategy.Residual
1107
+ ):
1108
+ hidden_states = hidden_states + residual * skip_layer_mask
1109
+ else:
1110
+ hidden_states = hidden_states + residual
1111
+
1112
+ hidden_states = hidden_states / attn.rescale_output_factor
1113
+
1114
+ return hidden_states
1115
+
1116
+
1117
+ class AttnProcessor:
1118
+ r"""
1119
+ Default processor for performing attention-related computations.
1120
+ """
1121
+
1122
+ def __call__(
1123
+ self,
1124
+ attn: Attention,
1125
+ hidden_states: torch.FloatTensor,
1126
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1127
+ attention_mask: Optional[torch.FloatTensor] = None,
1128
+ temb: Optional[torch.FloatTensor] = None,
1129
+ *args,
1130
+ **kwargs,
1131
+ ) -> torch.Tensor:
1132
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
1133
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
1134
+ deprecate("scale", "1.0.0", deprecation_message)
1135
+
1136
+ residual = hidden_states
1137
+
1138
+ if attn.spatial_norm is not None:
1139
+ hidden_states = attn.spatial_norm(hidden_states, temb)
1140
+
1141
+ input_ndim = hidden_states.ndim
1142
+
1143
+ if input_ndim == 4:
1144
+ batch_size, channel, height, width = hidden_states.shape
1145
+ hidden_states = hidden_states.view(
1146
+ batch_size, channel, height * width
1147
+ ).transpose(1, 2)
1148
+
1149
+ batch_size, sequence_length, _ = (
1150
+ hidden_states.shape
1151
+ if encoder_hidden_states is None
1152
+ else encoder_hidden_states.shape
1153
+ )
1154
+ attention_mask = attn.prepare_attention_mask(
1155
+ attention_mask, sequence_length, batch_size
1156
+ )
1157
+
1158
+ if attn.group_norm is not None:
1159
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(
1160
+ 1, 2
1161
+ )
1162
+
1163
+ query = attn.to_q(hidden_states)
1164
+
1165
+ if encoder_hidden_states is None:
1166
+ encoder_hidden_states = hidden_states
1167
+ elif attn.norm_cross:
1168
+ encoder_hidden_states = attn.norm_encoder_hidden_states(
1169
+ encoder_hidden_states
1170
+ )
1171
+
1172
+ key = attn.to_k(encoder_hidden_states)
1173
+ value = attn.to_v(encoder_hidden_states)
1174
+
1175
+ query = attn.head_to_batch_dim(query)
1176
+ key = attn.head_to_batch_dim(key)
1177
+ value = attn.head_to_batch_dim(value)
1178
+
1179
+ query = attn.q_norm(query)
1180
+ key = attn.k_norm(key)
1181
+
1182
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
1183
+ hidden_states = torch.bmm(attention_probs, value)
1184
+ hidden_states = attn.batch_to_head_dim(hidden_states)
1185
+
1186
+ # linear proj
1187
+ hidden_states = attn.to_out[0](hidden_states)
1188
+ # dropout
1189
+ hidden_states = attn.to_out[1](hidden_states)
1190
+
1191
+ if input_ndim == 4:
1192
+ hidden_states = hidden_states.transpose(-1, -2).reshape(
1193
+ batch_size, channel, height, width
1194
+ )
1195
+
1196
+ if attn.residual_connection:
1197
+ hidden_states = hidden_states + residual
1198
+
1199
+ hidden_states = hidden_states / attn.rescale_output_factor
1200
+
1201
+ return hidden_states
1202
+
1203
+
1204
+ class FeedForward(nn.Module):
1205
+ r"""
1206
+ A feed-forward layer.
1207
+
1208
+ Parameters:
1209
+ dim (`int`): The number of channels in the input.
1210
+ dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
1211
+ mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
1212
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
1213
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
1214
+ final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
1215
+ bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
1216
+ """
1217
+
1218
+ def __init__(
1219
+ self,
1220
+ dim: int,
1221
+ dim_out: Optional[int] = None,
1222
+ mult: int = 4,
1223
+ dropout: float = 0.0,
1224
+ activation_fn: str = "geglu",
1225
+ final_dropout: bool = False,
1226
+ inner_dim=None,
1227
+ bias: bool = True,
1228
+ ):
1229
+ super().__init__()
1230
+ if inner_dim is None:
1231
+ inner_dim = int(dim * mult)
1232
+ dim_out = dim_out if dim_out is not None else dim
1233
+ linear_cls = nn.Linear
1234
+
1235
+ if activation_fn == "gelu":
1236
+ act_fn = GELU(dim, inner_dim, bias=bias)
1237
+ elif activation_fn == "gelu-approximate":
1238
+ act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias)
1239
+ elif activation_fn == "geglu":
1240
+ act_fn = GEGLU(dim, inner_dim, bias=bias)
1241
+ elif activation_fn == "geglu-approximate":
1242
+ act_fn = ApproximateGELU(dim, inner_dim, bias=bias)
1243
+ else:
1244
+ raise ValueError(f"Unsupported activation function: {activation_fn}")
1245
+
1246
+ self.net = nn.ModuleList([])
1247
+ # project in
1248
+ self.net.append(act_fn)
1249
+ # project dropout
1250
+ self.net.append(nn.Dropout(dropout))
1251
+ # project out
1252
+ self.net.append(linear_cls(inner_dim, dim_out, bias=bias))
1253
+ # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
1254
+ if final_dropout:
1255
+ self.net.append(nn.Dropout(dropout))
1256
+
1257
+ def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:
1258
+ compatible_cls = (GEGLU, LoRACompatibleLinear)
1259
+ for module in self.net:
1260
+ if isinstance(module, compatible_cls):
1261
+ hidden_states = module(hidden_states, scale)
1262
+ else:
1263
+ hidden_states = module(hidden_states)
1264
+ return hidden_states
ltx_video/models/transformers/embeddings.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from: https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/embeddings.py
2
+ import math
3
+
4
+ import numpy as np
5
+ import torch
6
+ from einops import rearrange
7
+ from torch import nn
8
+
9
+
10
+ def get_timestep_embedding(
11
+ timesteps: torch.Tensor,
12
+ embedding_dim: int,
13
+ flip_sin_to_cos: bool = False,
14
+ downscale_freq_shift: float = 1,
15
+ scale: float = 1,
16
+ max_period: int = 10000,
17
+ ):
18
+ """
19
+ This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings.
20
+
21
+ :param timesteps: a 1-D Tensor of N indices, one per batch element.
22
+ These may be fractional.
23
+ :param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the
24
+ embeddings. :return: an [N x dim] Tensor of positional embeddings.
25
+ """
26
+ assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array"
27
+
28
+ half_dim = embedding_dim // 2
29
+ exponent = -math.log(max_period) * torch.arange(
30
+ start=0, end=half_dim, dtype=torch.float32, device=timesteps.device
31
+ )
32
+ exponent = exponent / (half_dim - downscale_freq_shift)
33
+
34
+ emb = torch.exp(exponent)
35
+ emb = timesteps[:, None].float() * emb[None, :]
36
+
37
+ # scale embeddings
38
+ emb = scale * emb
39
+
40
+ # concat sine and cosine embeddings
41
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1)
42
+
43
+ # flip sine and cosine embeddings
44
+ if flip_sin_to_cos:
45
+ emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1)
46
+
47
+ # zero pad
48
+ if embedding_dim % 2 == 1:
49
+ emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
50
+ return emb
51
+
52
+
53
+ def get_3d_sincos_pos_embed(embed_dim, grid, w, h, f):
54
+ """
55
+ grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or
56
+ [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
57
+ """
58
+ grid = rearrange(grid, "c (f h w) -> c f h w", h=h, w=w)
59
+ grid = rearrange(grid, "c f h w -> c h w f", h=h, w=w)
60
+ grid = grid.reshape([3, 1, w, h, f])
61
+ pos_embed = get_3d_sincos_pos_embed_from_grid(embed_dim, grid)
62
+ pos_embed = pos_embed.transpose(1, 0, 2, 3)
63
+ return rearrange(pos_embed, "h w f c -> (f h w) c")
64
+
65
+
66
+ def get_3d_sincos_pos_embed_from_grid(embed_dim, grid):
67
+ if embed_dim % 3 != 0:
68
+ raise ValueError("embed_dim must be divisible by 3")
69
+
70
+ # use half of dimensions to encode grid_h
71
+ emb_f = get_1d_sincos_pos_embed_from_grid(embed_dim // 3, grid[0]) # (H*W*T, D/3)
72
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 3, grid[1]) # (H*W*T, D/3)
73
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 3, grid[2]) # (H*W*T, D/3)
74
+
75
+ emb = np.concatenate([emb_h, emb_w, emb_f], axis=-1) # (H*W*T, D)
76
+ return emb
77
+
78
+
79
+ def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
80
+ """
81
+ embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D)
82
+ """
83
+ if embed_dim % 2 != 0:
84
+ raise ValueError("embed_dim must be divisible by 2")
85
+
86
+ omega = np.arange(embed_dim // 2, dtype=np.float64)
87
+ omega /= embed_dim / 2.0
88
+ omega = 1.0 / 10000**omega # (D/2,)
89
+
90
+ pos_shape = pos.shape
91
+
92
+ pos = pos.reshape(-1)
93
+ out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
94
+ out = out.reshape([*pos_shape, -1])[0]
95
+
96
+ emb_sin = np.sin(out) # (M, D/2)
97
+ emb_cos = np.cos(out) # (M, D/2)
98
+
99
+ emb = np.concatenate([emb_sin, emb_cos], axis=-1) # (M, D)
100
+ return emb
101
+
102
+
103
+ class SinusoidalPositionalEmbedding(nn.Module):
104
+ """Apply positional information to a sequence of embeddings.
105
+
106
+ Takes in a sequence of embeddings with shape (batch_size, seq_length, embed_dim) and adds positional embeddings to
107
+ them
108
+
109
+ Args:
110
+ embed_dim: (int): Dimension of the positional embedding.
111
+ max_seq_length: Maximum sequence length to apply positional embeddings
112
+
113
+ """
114
+
115
+ def __init__(self, embed_dim: int, max_seq_length: int = 32):
116
+ super().__init__()
117
+ position = torch.arange(max_seq_length).unsqueeze(1)
118
+ div_term = torch.exp(
119
+ torch.arange(0, embed_dim, 2) * (-math.log(10000.0) / embed_dim)
120
+ )
121
+ pe = torch.zeros(1, max_seq_length, embed_dim)
122
+ pe[0, :, 0::2] = torch.sin(position * div_term)
123
+ pe[0, :, 1::2] = torch.cos(position * div_term)
124
+ self.register_buffer("pe", pe)
125
+
126
+ def forward(self, x):
127
+ _, seq_length, _ = x.shape
128
+ x = x + self.pe[:, :seq_length]
129
+ return x
ltx_video/models/transformers/symmetric_patchifier.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from typing import Tuple
3
+
4
+ import torch
5
+ from diffusers.configuration_utils import ConfigMixin
6
+ from einops import rearrange
7
+ from torch import Tensor
8
+
9
+
10
+ class Patchifier(ConfigMixin, ABC):
11
+ def __init__(self, patch_size: int):
12
+ super().__init__()
13
+ self._patch_size = (1, patch_size, patch_size)
14
+
15
+ @abstractmethod
16
+ def patchify(self, latents: Tensor) -> Tuple[Tensor, Tensor]:
17
+ raise NotImplementedError("Patchify method not implemented")
18
+
19
+ @abstractmethod
20
+ def unpatchify(
21
+ self,
22
+ latents: Tensor,
23
+ output_height: int,
24
+ output_width: int,
25
+ out_channels: int,
26
+ ) -> Tuple[Tensor, Tensor]:
27
+ pass
28
+
29
+ @property
30
+ def patch_size(self):
31
+ return self._patch_size
32
+
33
+ def get_latent_coords(
34
+ self, latent_num_frames, latent_height, latent_width, batch_size, device
35
+ ):
36
+ """
37
+ Return a tensor of shape [batch_size, 3, num_patches] containing the
38
+ top-left corner latent coordinates of each latent patch.
39
+ The tensor is repeated for each batch element.
40
+ """
41
+ latent_sample_coords = torch.meshgrid(
42
+ torch.arange(0, latent_num_frames, self._patch_size[0], device=device),
43
+ torch.arange(0, latent_height, self._patch_size[1], device=device),
44
+ torch.arange(0, latent_width, self._patch_size[2], device=device),
45
+ )
46
+ latent_sample_coords = torch.stack(latent_sample_coords, dim=0)
47
+ latent_coords = latent_sample_coords.unsqueeze(0).repeat(batch_size, 1, 1, 1, 1)
48
+ latent_coords = rearrange(
49
+ latent_coords, "b c f h w -> b c (f h w)", b=batch_size
50
+ )
51
+ return latent_coords
52
+
53
+
54
+ class SymmetricPatchifier(Patchifier):
55
+ def patchify(self, latents: Tensor) -> Tuple[Tensor, Tensor]:
56
+ b, _, f, h, w = latents.shape
57
+ latent_coords = self.get_latent_coords(f, h, w, b, latents.device)
58
+ latents = rearrange(
59
+ latents,
60
+ "b c (f p1) (h p2) (w p3) -> b (f h w) (c p1 p2 p3)",
61
+ p1=self._patch_size[0],
62
+ p2=self._patch_size[1],
63
+ p3=self._patch_size[2],
64
+ )
65
+ return latents, latent_coords
66
+
67
+ def unpatchify(
68
+ self,
69
+ latents: Tensor,
70
+ output_height: int,
71
+ output_width: int,
72
+ out_channels: int,
73
+ ) -> Tuple[Tensor, Tensor]:
74
+ output_height = output_height // self._patch_size[1]
75
+ output_width = output_width // self._patch_size[2]
76
+ latents = rearrange(
77
+ latents,
78
+ "b (f h w) (c p q) -> b c f (h p) (w q)",
79
+ h=output_height,
80
+ w=output_width,
81
+ p=self._patch_size[1],
82
+ q=self._patch_size[2],
83
+ )
84
+ return latents
ltx_video/models/transformers/transformer3d.py ADDED
@@ -0,0 +1,507 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from: https://github.com/huggingface/diffusers/blob/v0.26.3/src/diffusers/models/transformers/transformer_2d.py
2
+ import math
3
+ from dataclasses import dataclass
4
+ from typing import Any, Dict, List, Optional, Union
5
+ import os
6
+ import json
7
+ import glob
8
+ from pathlib import Path
9
+
10
+ import torch
11
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
12
+ from diffusers.models.embeddings import PixArtAlphaTextProjection
13
+ from diffusers.models.modeling_utils import ModelMixin
14
+ from diffusers.models.normalization import AdaLayerNormSingle
15
+ from diffusers.utils import BaseOutput, is_torch_version
16
+ from diffusers.utils import logging
17
+ from torch import nn
18
+ from safetensors import safe_open
19
+
20
+
21
+ from ltx_video.models.transformers.attention import BasicTransformerBlock
22
+ from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
23
+
24
+ from ltx_video.utils.diffusers_config_mapping import (
25
+ diffusers_and_ours_config_mapping,
26
+ make_hashable_key,
27
+ TRANSFORMER_KEYS_RENAME_DICT,
28
+ )
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ @dataclass
35
+ class Transformer3DModelOutput(BaseOutput):
36
+ """
37
+ The output of [`Transformer2DModel`].
38
+
39
+ Args:
40
+ sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete):
41
+ The hidden states output conditioned on the `encoder_hidden_states` input. If discrete, returns probability
42
+ distributions for the unnoised latent pixels.
43
+ """
44
+
45
+ sample: torch.FloatTensor
46
+
47
+
48
+ class Transformer3DModel(ModelMixin, ConfigMixin):
49
+ _supports_gradient_checkpointing = True
50
+
51
+ @register_to_config
52
+ def __init__(
53
+ self,
54
+ num_attention_heads: int = 16,
55
+ attention_head_dim: int = 88,
56
+ in_channels: Optional[int] = None,
57
+ out_channels: Optional[int] = None,
58
+ num_layers: int = 1,
59
+ dropout: float = 0.0,
60
+ norm_num_groups: int = 32,
61
+ cross_attention_dim: Optional[int] = None,
62
+ attention_bias: bool = False,
63
+ num_vector_embeds: Optional[int] = None,
64
+ activation_fn: str = "geglu",
65
+ num_embeds_ada_norm: Optional[int] = None,
66
+ use_linear_projection: bool = False,
67
+ only_cross_attention: bool = False,
68
+ double_self_attention: bool = False,
69
+ upcast_attention: bool = False,
70
+ adaptive_norm: str = "single_scale_shift", # 'single_scale_shift' or 'single_scale'
71
+ standardization_norm: str = "layer_norm", # 'layer_norm' or 'rms_norm'
72
+ norm_elementwise_affine: bool = True,
73
+ norm_eps: float = 1e-5,
74
+ attention_type: str = "default",
75
+ caption_channels: int = None,
76
+ use_tpu_flash_attention: bool = False, # if True uses the TPU attention offload ('flash attention')
77
+ qk_norm: Optional[str] = None,
78
+ positional_embedding_type: str = "rope",
79
+ positional_embedding_theta: Optional[float] = None,
80
+ positional_embedding_max_pos: Optional[List[int]] = None,
81
+ timestep_scale_multiplier: Optional[float] = None,
82
+ causal_temporal_positioning: bool = False, # For backward compatibility, will be deprecated
83
+ ):
84
+ super().__init__()
85
+ self.use_tpu_flash_attention = (
86
+ use_tpu_flash_attention # FIXME: push config down to the attention modules
87
+ )
88
+ self.use_linear_projection = use_linear_projection
89
+ self.num_attention_heads = num_attention_heads
90
+ self.attention_head_dim = attention_head_dim
91
+ inner_dim = num_attention_heads * attention_head_dim
92
+ self.inner_dim = inner_dim
93
+ self.patchify_proj = nn.Linear(in_channels, inner_dim, bias=True)
94
+ self.positional_embedding_type = positional_embedding_type
95
+ self.positional_embedding_theta = positional_embedding_theta
96
+ self.positional_embedding_max_pos = positional_embedding_max_pos
97
+ self.use_rope = self.positional_embedding_type == "rope"
98
+ self.timestep_scale_multiplier = timestep_scale_multiplier
99
+
100
+ if self.positional_embedding_type == "absolute":
101
+ raise ValueError("Absolute positional embedding is no longer supported")
102
+ elif self.positional_embedding_type == "rope":
103
+ if positional_embedding_theta is None:
104
+ raise ValueError(
105
+ "If `positional_embedding_type` type is rope, `positional_embedding_theta` must also be defined"
106
+ )
107
+ if positional_embedding_max_pos is None:
108
+ raise ValueError(
109
+ "If `positional_embedding_type` type is rope, `positional_embedding_max_pos` must also be defined"
110
+ )
111
+
112
+ # 3. Define transformers blocks
113
+ self.transformer_blocks = nn.ModuleList(
114
+ [
115
+ BasicTransformerBlock(
116
+ inner_dim,
117
+ num_attention_heads,
118
+ attention_head_dim,
119
+ dropout=dropout,
120
+ cross_attention_dim=cross_attention_dim,
121
+ activation_fn=activation_fn,
122
+ num_embeds_ada_norm=num_embeds_ada_norm,
123
+ attention_bias=attention_bias,
124
+ only_cross_attention=only_cross_attention,
125
+ double_self_attention=double_self_attention,
126
+ upcast_attention=upcast_attention,
127
+ adaptive_norm=adaptive_norm,
128
+ standardization_norm=standardization_norm,
129
+ norm_elementwise_affine=norm_elementwise_affine,
130
+ norm_eps=norm_eps,
131
+ attention_type=attention_type,
132
+ use_tpu_flash_attention=use_tpu_flash_attention,
133
+ qk_norm=qk_norm,
134
+ use_rope=self.use_rope,
135
+ )
136
+ for d in range(num_layers)
137
+ ]
138
+ )
139
+
140
+ # 4. Define output layers
141
+ self.out_channels = in_channels if out_channels is None else out_channels
142
+ self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)
143
+ self.scale_shift_table = nn.Parameter(
144
+ torch.randn(2, inner_dim) / inner_dim**0.5
145
+ )
146
+ self.proj_out = nn.Linear(inner_dim, self.out_channels)
147
+
148
+ self.adaln_single = AdaLayerNormSingle(
149
+ inner_dim, use_additional_conditions=False
150
+ )
151
+ if adaptive_norm == "single_scale":
152
+ self.adaln_single.linear = nn.Linear(inner_dim, 4 * inner_dim, bias=True)
153
+
154
+ self.caption_projection = None
155
+ if caption_channels is not None:
156
+ self.caption_projection = PixArtAlphaTextProjection(
157
+ in_features=caption_channels, hidden_size=inner_dim
158
+ )
159
+
160
+ self.gradient_checkpointing = False
161
+
162
+ def set_use_tpu_flash_attention(self):
163
+ r"""
164
+ Function sets the flag in this object and propagates down the children. The flag will enforce the usage of TPU
165
+ attention kernel.
166
+ """
167
+ logger.info("ENABLE TPU FLASH ATTENTION -> TRUE")
168
+ self.use_tpu_flash_attention = True
169
+ # push config down to the attention modules
170
+ for block in self.transformer_blocks:
171
+ block.set_use_tpu_flash_attention()
172
+
173
+ def create_skip_layer_mask(
174
+ self,
175
+ batch_size: int,
176
+ num_conds: int,
177
+ ptb_index: int,
178
+ skip_block_list: Optional[List[int]] = None,
179
+ ):
180
+ if skip_block_list is None or len(skip_block_list) == 0:
181
+ return None
182
+ num_layers = len(self.transformer_blocks)
183
+ mask = torch.ones(
184
+ (num_layers, batch_size * num_conds), device=self.device, dtype=self.dtype
185
+ )
186
+ for block_idx in skip_block_list:
187
+ mask[block_idx, ptb_index::num_conds] = 0
188
+ return mask
189
+
190
+ def _set_gradient_checkpointing(self, module, value=False):
191
+ if hasattr(module, "gradient_checkpointing"):
192
+ module.gradient_checkpointing = value
193
+
194
+ def get_fractional_positions(self, indices_grid):
195
+ fractional_positions = torch.stack(
196
+ [
197
+ indices_grid[:, i] / self.positional_embedding_max_pos[i]
198
+ for i in range(3)
199
+ ],
200
+ dim=-1,
201
+ )
202
+ return fractional_positions
203
+
204
+ def precompute_freqs_cis(self, indices_grid, spacing="exp"):
205
+ dtype = torch.float32 # We need full precision in the freqs_cis computation.
206
+ dim = self.inner_dim
207
+ theta = self.positional_embedding_theta
208
+
209
+ fractional_positions = self.get_fractional_positions(indices_grid)
210
+
211
+ start = 1
212
+ end = theta
213
+ device = fractional_positions.device
214
+ if spacing == "exp":
215
+ indices = theta ** (
216
+ torch.linspace(
217
+ math.log(start, theta),
218
+ math.log(end, theta),
219
+ dim // 6,
220
+ device=device,
221
+ dtype=dtype,
222
+ )
223
+ )
224
+ indices = indices.to(dtype=dtype)
225
+ elif spacing == "exp_2":
226
+ indices = 1.0 / theta ** (torch.arange(0, dim, 6, device=device) / dim)
227
+ indices = indices.to(dtype=dtype)
228
+ elif spacing == "linear":
229
+ indices = torch.linspace(start, end, dim // 6, device=device, dtype=dtype)
230
+ elif spacing == "sqrt":
231
+ indices = torch.linspace(
232
+ start**2, end**2, dim // 6, device=device, dtype=dtype
233
+ ).sqrt()
234
+
235
+ indices = indices * math.pi / 2
236
+
237
+ if spacing == "exp_2":
238
+ freqs = (
239
+ (indices * fractional_positions.unsqueeze(-1))
240
+ .transpose(-1, -2)
241
+ .flatten(2)
242
+ )
243
+ else:
244
+ freqs = (
245
+ (indices * (fractional_positions.unsqueeze(-1) * 2 - 1))
246
+ .transpose(-1, -2)
247
+ .flatten(2)
248
+ )
249
+
250
+ cos_freq = freqs.cos().repeat_interleave(2, dim=-1)
251
+ sin_freq = freqs.sin().repeat_interleave(2, dim=-1)
252
+ if dim % 6 != 0:
253
+ cos_padding = torch.ones_like(cos_freq[:, :, : dim % 6])
254
+ sin_padding = torch.zeros_like(cos_freq[:, :, : dim % 6])
255
+ cos_freq = torch.cat([cos_padding, cos_freq], dim=-1)
256
+ sin_freq = torch.cat([sin_padding, sin_freq], dim=-1)
257
+ return cos_freq.to(self.dtype), sin_freq.to(self.dtype)
258
+
259
+ def load_state_dict(
260
+ self,
261
+ state_dict: Dict,
262
+ *args,
263
+ **kwargs,
264
+ ):
265
+ if any([key.startswith("model.diffusion_model.") for key in state_dict.keys()]):
266
+ state_dict = {
267
+ key.replace("model.diffusion_model.", ""): value
268
+ for key, value in state_dict.items()
269
+ if key.startswith("model.diffusion_model.")
270
+ }
271
+ super().load_state_dict(state_dict, *args, **kwargs)
272
+
273
+ @classmethod
274
+ def from_pretrained(
275
+ cls,
276
+ pretrained_model_path: Optional[Union[str, os.PathLike]],
277
+ *args,
278
+ **kwargs,
279
+ ):
280
+ pretrained_model_path = Path(pretrained_model_path)
281
+ if pretrained_model_path.is_dir():
282
+ config_path = pretrained_model_path / "transformer" / "config.json"
283
+ with open(config_path, "r") as f:
284
+ config = make_hashable_key(json.load(f))
285
+
286
+ assert config in diffusers_and_ours_config_mapping, (
287
+ "Provided diffusers checkpoint config for transformer is not suppported. "
288
+ "We only support diffusers configs found in Lightricks/LTX-Video."
289
+ )
290
+
291
+ config = diffusers_and_ours_config_mapping[config]
292
+ state_dict = {}
293
+ ckpt_paths = (
294
+ pretrained_model_path
295
+ / "transformer"
296
+ / "diffusion_pytorch_model*.safetensors"
297
+ )
298
+ dict_list = glob.glob(str(ckpt_paths))
299
+ for dict_path in dict_list:
300
+ part_dict = {}
301
+ with safe_open(dict_path, framework="pt", device="cpu") as f:
302
+ for k in f.keys():
303
+ part_dict[k] = f.get_tensor(k)
304
+ state_dict.update(part_dict)
305
+
306
+ for key in list(state_dict.keys()):
307
+ new_key = key
308
+ for replace_key, rename_key in TRANSFORMER_KEYS_RENAME_DICT.items():
309
+ new_key = new_key.replace(replace_key, rename_key)
310
+ state_dict[new_key] = state_dict.pop(key)
311
+
312
+ with torch.device("meta"):
313
+ transformer = cls.from_config(config)
314
+ transformer.load_state_dict(state_dict, assign=True, strict=True)
315
+ elif pretrained_model_path.is_file() and str(pretrained_model_path).endswith(
316
+ ".safetensors"
317
+ ):
318
+ comfy_single_file_state_dict = {}
319
+ with safe_open(pretrained_model_path, framework="pt", device="cpu") as f:
320
+ metadata = f.metadata()
321
+ for k in f.keys():
322
+ comfy_single_file_state_dict[k] = f.get_tensor(k)
323
+ configs = json.loads(metadata["config"])
324
+ transformer_config = configs["transformer"]
325
+ with torch.device("meta"):
326
+ transformer = Transformer3DModel.from_config(transformer_config)
327
+ transformer.load_state_dict(comfy_single_file_state_dict, assign=True)
328
+ return transformer
329
+
330
+ def forward(
331
+ self,
332
+ hidden_states: torch.Tensor,
333
+ indices_grid: torch.Tensor,
334
+ encoder_hidden_states: Optional[torch.Tensor] = None,
335
+ timestep: Optional[torch.LongTensor] = None,
336
+ class_labels: Optional[torch.LongTensor] = None,
337
+ cross_attention_kwargs: Dict[str, Any] = None,
338
+ attention_mask: Optional[torch.Tensor] = None,
339
+ encoder_attention_mask: Optional[torch.Tensor] = None,
340
+ skip_layer_mask: Optional[torch.Tensor] = None,
341
+ skip_layer_strategy: Optional[SkipLayerStrategy] = None,
342
+ return_dict: bool = True,
343
+ ):
344
+ """
345
+ The [`Transformer2DModel`] forward method.
346
+
347
+ Args:
348
+ hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
349
+ Input `hidden_states`.
350
+ indices_grid (`torch.LongTensor` of shape `(batch size, 3, num latent pixels)`):
351
+ encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
352
+ Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
353
+ self-attention.
354
+ timestep ( `torch.LongTensor`, *optional*):
355
+ Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
356
+ class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
357
+ Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
358
+ `AdaLayerZeroNorm`.
359
+ cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
360
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
361
+ `self.processor` in
362
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
363
+ attention_mask ( `torch.Tensor`, *optional*):
364
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
365
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
366
+ negative values to the attention scores corresponding to "discard" tokens.
367
+ encoder_attention_mask ( `torch.Tensor`, *optional*):
368
+ Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
369
+
370
+ * Mask `(batch, sequence_length)` True = keep, False = discard.
371
+ * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
372
+
373
+ If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
374
+ above. This bias will be added to the cross-attention scores.
375
+ skip_layer_mask ( `torch.Tensor`, *optional*):
376
+ A mask of shape `(num_layers, batch)` that indicates which layers to skip. `0` at position
377
+ `layer, batch_idx` indicates that the layer should be skipped for the corresponding batch index.
378
+ skip_layer_strategy ( `SkipLayerStrategy`, *optional*, defaults to `None`):
379
+ Controls which layers are skipped when calculating a perturbed latent for spatiotemporal guidance.
380
+ return_dict (`bool`, *optional*, defaults to `True`):
381
+ Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
382
+ tuple.
383
+
384
+ Returns:
385
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
386
+ `tuple` where the first element is the sample tensor.
387
+ """
388
+ # for tpu attention offload 2d token masks are used. No need to transform.
389
+ if not self.use_tpu_flash_attention:
390
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
391
+ # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
392
+ # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
393
+ # expects mask of shape:
394
+ # [batch, key_tokens]
395
+ # adds singleton query_tokens dimension:
396
+ # [batch, 1, key_tokens]
397
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
398
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
399
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
400
+ if attention_mask is not None and attention_mask.ndim == 2:
401
+ # assume that mask is expressed as:
402
+ # (1 = keep, 0 = discard)
403
+ # convert mask into a bias that can be added to attention scores:
404
+ # (keep = +0, discard = -10000.0)
405
+ attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
406
+ attention_mask = attention_mask.unsqueeze(1)
407
+
408
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
409
+ if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
410
+ encoder_attention_mask = (
411
+ 1 - encoder_attention_mask.to(hidden_states.dtype)
412
+ ) * -10000.0
413
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
414
+
415
+ # 1. Input
416
+ hidden_states = self.patchify_proj(hidden_states)
417
+
418
+ if self.timestep_scale_multiplier:
419
+ timestep = self.timestep_scale_multiplier * timestep
420
+
421
+ freqs_cis = self.precompute_freqs_cis(indices_grid)
422
+
423
+ batch_size = hidden_states.shape[0]
424
+ timestep, embedded_timestep = self.adaln_single(
425
+ timestep.flatten(),
426
+ {"resolution": None, "aspect_ratio": None},
427
+ batch_size=batch_size,
428
+ hidden_dtype=hidden_states.dtype,
429
+ )
430
+ # Second dimension is 1 or number of tokens (if timestep_per_token)
431
+ timestep = timestep.view(batch_size, -1, timestep.shape[-1])
432
+ embedded_timestep = embedded_timestep.view(
433
+ batch_size, -1, embedded_timestep.shape[-1]
434
+ )
435
+
436
+ # 2. Blocks
437
+ if self.caption_projection is not None:
438
+ batch_size = hidden_states.shape[0]
439
+ encoder_hidden_states = self.caption_projection(encoder_hidden_states)
440
+ encoder_hidden_states = encoder_hidden_states.view(
441
+ batch_size, -1, hidden_states.shape[-1]
442
+ )
443
+
444
+ for block_idx, block in enumerate(self.transformer_blocks):
445
+ if self.training and self.gradient_checkpointing:
446
+
447
+ def create_custom_forward(module, return_dict=None):
448
+ def custom_forward(*inputs):
449
+ if return_dict is not None:
450
+ return module(*inputs, return_dict=return_dict)
451
+ else:
452
+ return module(*inputs)
453
+
454
+ return custom_forward
455
+
456
+ ckpt_kwargs: Dict[str, Any] = (
457
+ {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
458
+ )
459
+ hidden_states = torch.utils.checkpoint.checkpoint(
460
+ create_custom_forward(block),
461
+ hidden_states,
462
+ freqs_cis,
463
+ attention_mask,
464
+ encoder_hidden_states,
465
+ encoder_attention_mask,
466
+ timestep,
467
+ cross_attention_kwargs,
468
+ class_labels,
469
+ (
470
+ skip_layer_mask[block_idx]
471
+ if skip_layer_mask is not None
472
+ else None
473
+ ),
474
+ skip_layer_strategy,
475
+ **ckpt_kwargs,
476
+ )
477
+ else:
478
+ hidden_states = block(
479
+ hidden_states,
480
+ freqs_cis=freqs_cis,
481
+ attention_mask=attention_mask,
482
+ encoder_hidden_states=encoder_hidden_states,
483
+ encoder_attention_mask=encoder_attention_mask,
484
+ timestep=timestep,
485
+ cross_attention_kwargs=cross_attention_kwargs,
486
+ class_labels=class_labels,
487
+ skip_layer_mask=(
488
+ skip_layer_mask[block_idx]
489
+ if skip_layer_mask is not None
490
+ else None
491
+ ),
492
+ skip_layer_strategy=skip_layer_strategy,
493
+ )
494
+
495
+ # 3. Output
496
+ scale_shift_values = (
497
+ self.scale_shift_table[None, None] + embedded_timestep[:, :, None]
498
+ )
499
+ shift, scale = scale_shift_values[:, :, 0], scale_shift_values[:, :, 1]
500
+ hidden_states = self.norm_out(hidden_states)
501
+ # Modulation
502
+ hidden_states = hidden_states * (1 + scale) + shift
503
+ hidden_states = self.proj_out(hidden_states)
504
+ if not return_dict:
505
+ return (hidden_states,)
506
+
507
+ return Transformer3DModelOutput(sample=hidden_states)
ltx_video/pipelines/__init__.py ADDED
File without changes
ltx_video/pipelines/ai_studio_code (11).py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adaptado de: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py
2
+ # (e com a nossa modificação pela ciência!)
3
+
4
+ import copy
5
+ import inspect
6
+ import math
7
+ import re
8
+ from contextlib import nullcontext
9
+ from dataclasses import dataclass
10
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
11
+
12
+ import torch
13
+ import torch.nn.functional as F
14
+ from diffusers.image_processor import VaeImageProcessor
15
+ from diffusers.models import AutoencoderKL
16
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
17
+ from diffusers.schedulers import DPMSolverMultistepScheduler
18
+ from diffusers.utils import deprecate, logging
19
+ from diffusers.utils.torch_utils import randn_tensor
20
+ from einops import rearrange
21
+ from transformers import (
22
+ T5EncoderModel,
23
+ T5Tokenizer,
24
+ AutoModelForCausalLM,
25
+ AutoProcessor,
26
+ AutoTokenizer,
27
+ )
28
+
29
+ from ltx_video.models.autoencoders.causal_video_autoencoder import (
30
+ CausalVideoAutoencoder,
31
+ )
32
+ from ltx_video.models.autoencoders.vae_encode import (
33
+ get_vae_size_scale_factor,
34
+ latent_to_pixel_coords,
35
+ vae_decode,
36
+ vae_encode,
37
+ )
38
+ from ltx_video.models.transformers.symmetric_patchifier import Patchifier
39
+ from ltx_video.models.transformers.transformer3d import Transformer3DModel
40
+ from ltx_video.schedulers.rf import TimestepShifter
41
+ from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
42
+ from ltx_video.utils.prompt_enhance_utils import generate_cinematic_prompt
43
+ from ltx_video.models.autoencoders.latent_upsampler import LatentUpsampler
44
+ from ltx_video.models.autoencoders.vae_encode import (
45
+ un_normalize_latents,
46
+ normalize_latents,
47
+ )
48
+
49
+ # ... (Todo o código inicial do arquivo permanece o mesmo, incluindo ASPECT_RATIO_BINS, retrieve_timesteps, ConditioningItem, etc.)
50
+ # ... (Vou pular para a classe LTXVideoPipeline para manter a resposta focada)
51
+
52
+ class LTXVideoPipeline(DiffusionPipeline):
53
+ # ... (O __init__ e outras funções como encode_prompt, check_inputs, etc., permanecem as mesmas)
54
+ # ... (Pulando para a função __call__ onde faremos a nossa modificação)
55
+
56
+ @torch.no_grad()
57
+ def __call__(
58
+ self,
59
+ height: int,
60
+ width: int,
61
+ num_frames: int,
62
+ frame_rate: float,
63
+ prompt: Union[str, List[str]] = None,
64
+ negative_prompt: str = "",
65
+ num_inference_steps: int = 20,
66
+ skip_initial_inference_steps: int = 0,
67
+ skip_final_inference_steps: int = 0,
68
+ timesteps: List[int] = None,
69
+ guidance_scale: Union[float, List[float]] = 4.5,
70
+ cfg_star_rescale: bool = False,
71
+ skip_layer_strategy: Optional[SkipLayerStrategy] = None,
72
+ skip_block_list: Optional[Union[List[List[int]], List[int]]] = None,
73
+ stg_scale: Union[float, List[float]] = 1.0,
74
+ rescaling_scale: Union[float, List[float]] = 0.7,
75
+ guidance_timesteps: Optional[List[int]] = None,
76
+ num_images_per_prompt: Optional[int] = 1,
77
+ eta: float = 0.0,
78
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
79
+ latents: Optional[torch.FloatTensor] = None,
80
+ prompt_embeds: Optional[torch.FloatTensor] = None,
81
+ prompt_attention_mask: Optional[torch.FloatTensor] = None,
82
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
83
+ negative_prompt_attention_mask: Optional[torch.FloatTensor] = None,
84
+ output_type: Optional[str] = "pil",
85
+ return_dict: bool = True,
86
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
87
+ conditioning_items: Optional[List[ConditioningItem]] = None,
88
+ decode_timestep: Union[List[float], float] = 0.0,
89
+ decode_noise_scale: Optional[List[float]] = None,
90
+ mixed_precision: bool = False,
91
+ offload_to_cpu: bool = False,
92
+ enhance_prompt: bool = False,
93
+ text_encoder_max_tokens: int = 256,
94
+ stochastic_sampling: bool = False,
95
+ media_items: Optional[torch.Tensor] = None,
96
+ tone_map_compression_ratio: float = 0.0,
97
+ **kwargs,
98
+ ) -> Union[ImagePipelineOutput, Tuple]:
99
+
100
+ # --- [NOSSA MODIFICAÇÃO] Captura o prompt original para logging ---
101
+ original_prompt_for_logging = prompt
102
+
103
+ # ... (O resto do código inicial da função __call__ permanece o mesmo) ...
104
+ # ... (check_inputs, default height/width, etc.)
105
+
106
+ if enhance_prompt:
107
+ self.prompt_enhancer_image_caption_model = (
108
+ self.prompt_enhancer_image_caption_model.to(self._execution_device)
109
+ )
110
+ self.prompt_enhancer_llm_model = self.prompt_enhancer_llm_model.to(
111
+ self._execution_device
112
+ )
113
+
114
+ # A chamada para o Diretor Assistente
115
+ enhanced_prompt = generate_cinematic_prompt(
116
+ self.prompt_enhancer_image_caption_model,
117
+ self.prompt_enhancer_image_caption_processor,
118
+ self.prompt_enhancer_llm_model,
119
+ self.prompt_enhancer_llm_tokenizer,
120
+ prompt,
121
+ conditioning_items,
122
+ max_new_tokens=text_encoder_max_tokens,
123
+ )
124
+
125
+ # --- [NOSSA ESCUTA SECRETA PELA CIÊNCIA!] ---
126
+ print("\n" + "="*50)
127
+ print("--- [LOG DO DIRETOR ASSISTENTE (PROMPT ENHANCER)] ---")
128
+ print(f"Prompt Original do Maestro: {original_prompt_for_logging}")
129
+ print(f"PROMPT FINAL APERFEIÇOADO (enviado para o LTX): {enhanced_prompt}")
130
+ print("--- [FIM DO LOG DO DIRETOR ASSISTENTE] ---")
131
+ print("="*50 + "\n")
132
+ # --- [FIM DA ESCUTA] ---
133
+
134
+ # Atualiza o prompt que será usado pelo resto da função
135
+ prompt = enhanced_prompt
136
+
137
+ # ... (O resto da função __call__ continua a partir daqui, usando o `prompt` novo ou o original)
138
+ # ... (encode_prompt, prepare_latents, denoising loop, etc.)
139
+
140
+ # 3. Encode input prompt
141
+ if self.text_encoder is not None:
142
+ self.text_encoder = self.text_encoder.to(self._execution_device)
143
+
144
+ (
145
+ prompt_embeds,
146
+ prompt_attention_mask,
147
+ negative_prompt_embeds,
148
+ negative_prompt_attention_mask,
149
+ ) = self.encode_prompt(
150
+ prompt,
151
+ True,
152
+ negative_prompt=negative_prompt,
153
+ # ... (resto dos parâmetros)
154
+ )
155
+
156
+ # ... (todo o resto do arquivo, sem mais nenhuma modificação) ...
157
+ # ... (denoising_step, prepare_conditioning, etc.)
ltx_video/pipelines/crf_compressor.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import av
2
+ import torch
3
+ import io
4
+ import numpy as np
5
+
6
+
7
+ def _encode_single_frame(output_file, image_array: np.ndarray, crf):
8
+ container = av.open(output_file, "w", format="mp4")
9
+ try:
10
+ stream = container.add_stream(
11
+ "libx264", rate=1, options={"crf": str(crf), "preset": "veryfast"}
12
+ )
13
+ stream.height = image_array.shape[0]
14
+ stream.width = image_array.shape[1]
15
+ av_frame = av.VideoFrame.from_ndarray(image_array, format="rgb24").reformat(
16
+ format="yuv420p"
17
+ )
18
+ container.mux(stream.encode(av_frame))
19
+ container.mux(stream.encode())
20
+ finally:
21
+ container.close()
22
+
23
+
24
+ def _decode_single_frame(video_file):
25
+ container = av.open(video_file)
26
+ try:
27
+ stream = next(s for s in container.streams if s.type == "video")
28
+ frame = next(container.decode(stream))
29
+ finally:
30
+ container.close()
31
+ return frame.to_ndarray(format="rgb24")
32
+
33
+
34
+ def compress(image: torch.Tensor, crf=29):
35
+ if crf == 0:
36
+ return image
37
+
38
+ image_array = (
39
+ (image[: (image.shape[0] // 2) * 2, : (image.shape[1] // 2) * 2] * 255.0)
40
+ .byte()
41
+ .cpu()
42
+ .numpy()
43
+ )
44
+ with io.BytesIO() as output_file:
45
+ _encode_single_frame(output_file, image_array, crf)
46
+ video_bytes = output_file.getvalue()
47
+ with io.BytesIO(video_bytes) as video_file:
48
+ image_array = _decode_single_frame(video_file)
49
+ tensor = torch.tensor(image_array, dtype=image.dtype, device=image.device) / 255.0
50
+ return tensor
ltx_video/pipelines/pipeline_ltx_video.py ADDED
@@ -0,0 +1,1903 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py
2
+ import copy
3
+ import inspect
4
+ import math
5
+ import re
6
+ from contextlib import nullcontext
7
+ from dataclasses import dataclass
8
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
9
+
10
+ import torch
11
+ import torch.nn.functional as F
12
+ from diffusers.image_processor import VaeImageProcessor
13
+ from diffusers.models import AutoencoderKL
14
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
15
+ from diffusers.schedulers import DPMSolverMultistepScheduler
16
+ from diffusers.utils import deprecate, logging
17
+ from diffusers.utils.torch_utils import randn_tensor
18
+ from einops import rearrange
19
+ from transformers import (
20
+ T5EncoderModel,
21
+ T5Tokenizer,
22
+ AutoModelForCausalLM,
23
+ AutoProcessor,
24
+ AutoTokenizer,
25
+ )
26
+
27
+ from ltx_video.models.autoencoders.causal_video_autoencoder import (
28
+ CausalVideoAutoencoder,
29
+ )
30
+ from ltx_video.models.autoencoders.vae_encode import (
31
+ get_vae_size_scale_factor,
32
+ latent_to_pixel_coords,
33
+ vae_decode,
34
+ vae_encode,
35
+ )
36
+ from ltx_video.models.transformers.symmetric_patchifier import Patchifier
37
+ from ltx_video.models.transformers.transformer3d import Transformer3DModel
38
+ from ltx_video.schedulers.rf import TimestepShifter
39
+ from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
40
+ from ltx_video.utils.prompt_enhance_utils import generate_cinematic_prompt
41
+ from ltx_video.models.autoencoders.latent_upsampler import LatentUpsampler
42
+ from ltx_video.models.autoencoders.vae_encode import (
43
+ un_normalize_latents,
44
+ normalize_latents,
45
+ )
46
+
47
+
48
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
49
+
50
+
51
+ ASPECT_RATIO_1024_BIN = {
52
+ "0.25": [512.0, 2048.0],
53
+ "0.28": [512.0, 1856.0],
54
+ "0.32": [576.0, 1792.0],
55
+ "0.33": [576.0, 1728.0],
56
+ "0.35": [576.0, 1664.0],
57
+ "0.4": [640.0, 1600.0],
58
+ "0.42": [640.0, 1536.0],
59
+ "0.48": [704.0, 1472.0],
60
+ "0.5": [704.0, 1408.0],
61
+ "0.52": [704.0, 1344.0],
62
+ "0.57": [768.0, 1344.0],
63
+ "0.6": [768.0, 1280.0],
64
+ "0.68": [832.0, 1216.0],
65
+ "0.72": [832.0, 1152.0],
66
+ "0.78": [896.0, 1152.0],
67
+ "0.82": [896.0, 1088.0],
68
+ "0.88": [960.0, 1088.0],
69
+ "0.94": [960.0, 1024.0],
70
+ "1.0": [1024.0, 1024.0],
71
+ "1.07": [1024.0, 960.0],
72
+ "1.13": [1088.0, 960.0],
73
+ "1.21": [1088.0, 896.0],
74
+ "1.29": [1152.0, 896.0],
75
+ "1.38": [1152.0, 832.0],
76
+ "1.46": [1216.0, 832.0],
77
+ "1.67": [1280.0, 768.0],
78
+ "1.75": [1344.0, 768.0],
79
+ "2.0": [1408.0, 704.0],
80
+ "2.09": [1472.0, 704.0],
81
+ "2.4": [1536.0, 640.0],
82
+ "2.5": [1600.0, 640.0],
83
+ "3.0": [1728.0, 576.0],
84
+ "4.0": [2048.0, 512.0],
85
+ }
86
+
87
+ ASPECT_RATIO_512_BIN = {
88
+ "0.25": [256.0, 1024.0],
89
+ "0.28": [256.0, 928.0],
90
+ "0.32": [288.0, 896.0],
91
+ "0.33": [288.0, 864.0],
92
+ "0.35": [288.0, 832.0],
93
+ "0.4": [320.0, 800.0],
94
+ "0.42": [320.0, 768.0],
95
+ "0.48": [352.0, 736.0],
96
+ "0.5": [352.0, 704.0],
97
+ "0.52": [352.0, 672.0],
98
+ "0.57": [384.0, 672.0],
99
+ "0.6": [384.0, 640.0],
100
+ "0.68": [416.0, 608.0],
101
+ "0.72": [416.0, 576.0],
102
+ "0.78": [448.0, 576.0],
103
+ "0.82": [448.0, 544.0],
104
+ "0.88": [480.0, 544.0],
105
+ "0.94": [480.0, 512.0],
106
+ "1.0": [512.0, 512.0],
107
+ "1.07": [512.0, 480.0],
108
+ "1.13": [544.0, 480.0],
109
+ "1.21": [544.0, 448.0],
110
+ "1.29": [576.0, 448.0],
111
+ "1.38": [576.0, 416.0],
112
+ "1.46": [608.0, 416.0],
113
+ "1.67": [640.0, 384.0],
114
+ "1.75": [672.0, 384.0],
115
+ "2.0": [704.0, 352.0],
116
+ "2.09": [736.0, 352.0],
117
+ "2.4": [768.0, 320.0],
118
+ "2.5": [800.0, 320.0],
119
+ "3.0": [864.0, 288.0],
120
+ "4.0": [1024.0, 256.0],
121
+ }
122
+
123
+
124
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
125
+ def retrieve_timesteps(
126
+ scheduler,
127
+ num_inference_steps: Optional[int] = None,
128
+ device: Optional[Union[str, torch.device]] = None,
129
+ timesteps: Optional[List[int]] = None,
130
+ skip_initial_inference_steps: int = 0,
131
+ skip_final_inference_steps: int = 0,
132
+ **kwargs,
133
+ ):
134
+ """
135
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
136
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
137
+
138
+ Args:
139
+ scheduler (`SchedulerMixin`):
140
+ The scheduler to get timesteps from.
141
+ num_inference_steps (`int`):
142
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
143
+ `timesteps` must be `None`.
144
+ device (`str` or `torch.device`, *optional*):
145
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
146
+ timesteps (`List[int]`, *optional*):
147
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
148
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
149
+ must be `None`.
150
+ max_timestep ('float', *optional*, defaults to 1.0):
151
+ The initial noising level for image-to-image/video-to-video. The list if timestamps will be
152
+ truncated to start with a timestamp greater or equal to this.
153
+
154
+ Returns:
155
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
156
+ second element is the number of inference steps.
157
+ """
158
+ if timesteps is not None:
159
+ accepts_timesteps = "timesteps" in set(
160
+ inspect.signature(scheduler.set_timesteps).parameters.keys()
161
+ )
162
+ if not accepts_timesteps:
163
+ raise ValueError(
164
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
165
+ f" timestep schedules. Please check whether you are using the correct scheduler."
166
+ )
167
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
168
+ timesteps = scheduler.timesteps
169
+ num_inference_steps = len(timesteps)
170
+ else:
171
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
172
+ timesteps = scheduler.timesteps
173
+
174
+ if (
175
+ skip_initial_inference_steps < 0
176
+ or skip_final_inference_steps < 0
177
+ or skip_initial_inference_steps + skip_final_inference_steps
178
+ >= num_inference_steps
179
+ ):
180
+ raise ValueError(
181
+ "invalid skip inference step values: must be non-negative and the sum of skip_initial_inference_steps and skip_final_inference_steps must be less than the number of inference steps"
182
+ )
183
+
184
+ timesteps = timesteps[
185
+ skip_initial_inference_steps : len(timesteps) - skip_final_inference_steps
186
+ ]
187
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
188
+ num_inference_steps = len(timesteps)
189
+
190
+ return timesteps, num_inference_steps
191
+
192
+
193
+ @dataclass
194
+ class ConditioningItem:
195
+ """
196
+ Defines a single frame-conditioning item - a single frame or a sequence of frames.
197
+
198
+ Attributes:
199
+ media_item (torch.Tensor): shape=(b, 3, f, h, w). The media item to condition on.
200
+ media_frame_number (int): The start-frame number of the media item in the generated video.
201
+ conditioning_strength (float): The strength of the conditioning (1.0 = full conditioning).
202
+ media_x (Optional[int]): Optional left x coordinate of the media item in the generated frame.
203
+ media_y (Optional[int]): Optional top y coordinate of the media item in the generated frame.
204
+ """
205
+
206
+ media_item: torch.Tensor
207
+ media_frame_number: int
208
+ conditioning_strength: float
209
+ media_x: Optional[int] = None
210
+ media_y: Optional[int] = None
211
+
212
+
213
+ class LTXVideoPipeline(DiffusionPipeline):
214
+ r"""
215
+ Pipeline for text-to-image generation using LTX-Video.
216
+
217
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
218
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
219
+
220
+ Args:
221
+ vae ([`AutoencoderKL`]):
222
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
223
+ text_encoder ([`T5EncoderModel`]):
224
+ Frozen text-encoder. This uses
225
+ [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the
226
+ [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant.
227
+ tokenizer (`T5Tokenizer`):
228
+ Tokenizer of class
229
+ [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
230
+ transformer ([`Transformer2DModel`]):
231
+ A text conditioned `Transformer2DModel` to denoise the encoded image latents.
232
+ scheduler ([`SchedulerMixin`]):
233
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
234
+ """
235
+
236
+ bad_punct_regex = re.compile(
237
+ r"["
238
+ + "#®•©™&@·º½¾¿¡§~"
239
+ + r"\)"
240
+ + r"\("
241
+ + r"\]"
242
+ + r"\["
243
+ + r"\}"
244
+ + r"\{"
245
+ + r"\|"
246
+ + "\\"
247
+ + r"\/"
248
+ + r"\*"
249
+ + r"]{1,}"
250
+ ) # noqa
251
+
252
+ _optional_components = [
253
+ "tokenizer",
254
+ "text_encoder",
255
+ "prompt_enhancer_image_caption_model",
256
+ "prompt_enhancer_image_caption_processor",
257
+ "prompt_enhancer_llm_model",
258
+ "prompt_enhancer_llm_tokenizer",
259
+ ]
260
+ model_cpu_offload_seq = "prompt_enhancer_image_caption_model->prompt_enhancer_llm_model->text_encoder->transformer->vae"
261
+
262
+ def __init__(
263
+ self,
264
+ tokenizer: T5Tokenizer,
265
+ text_encoder: T5EncoderModel,
266
+ vae: AutoencoderKL,
267
+ transformer: Transformer3DModel,
268
+ scheduler: DPMSolverMultistepScheduler,
269
+ patchifier: Patchifier,
270
+ prompt_enhancer_image_caption_model: AutoModelForCausalLM,
271
+ prompt_enhancer_image_caption_processor: AutoProcessor,
272
+ prompt_enhancer_llm_model: AutoModelForCausalLM,
273
+ prompt_enhancer_llm_tokenizer: AutoTokenizer,
274
+ allowed_inference_steps: Optional[List[float]] = None,
275
+ ):
276
+ super().__init__()
277
+
278
+ self.register_modules(
279
+ tokenizer=tokenizer,
280
+ text_encoder=text_encoder,
281
+ vae=vae,
282
+ transformer=transformer,
283
+ scheduler=scheduler,
284
+ patchifier=patchifier,
285
+ prompt_enhancer_image_caption_model=prompt_enhancer_image_caption_model,
286
+ prompt_enhancer_image_caption_processor=prompt_enhancer_image_caption_processor,
287
+ prompt_enhancer_llm_model=prompt_enhancer_llm_model,
288
+ prompt_enhancer_llm_tokenizer=prompt_enhancer_llm_tokenizer,
289
+ )
290
+
291
+ self.video_scale_factor, self.vae_scale_factor, _ = get_vae_size_scale_factor(
292
+ self.vae
293
+ )
294
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
295
+
296
+ self.allowed_inference_steps = allowed_inference_steps
297
+
298
+ def mask_text_embeddings(self, emb, mask):
299
+ if emb.shape[0] == 1:
300
+ keep_index = mask.sum().item()
301
+ return emb[:, :, :keep_index, :], keep_index
302
+ else:
303
+ masked_feature = emb * mask[:, None, :, None]
304
+ return masked_feature, emb.shape[2]
305
+
306
+ # Adapted from diffusers.pipelines.deepfloyd_if.pipeline_if.encode_prompt
307
+ def encode_prompt(
308
+ self,
309
+ prompt: Union[str, List[str]],
310
+ do_classifier_free_guidance: bool = True,
311
+ negative_prompt: str = "",
312
+ num_images_per_prompt: int = 1,
313
+ device: Optional[torch.device] = None,
314
+ prompt_embeds: Optional[torch.FloatTensor] = None,
315
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
316
+ prompt_attention_mask: Optional[torch.FloatTensor] = None,
317
+ negative_prompt_attention_mask: Optional[torch.FloatTensor] = None,
318
+ text_encoder_max_tokens: int = 256,
319
+ **kwargs,
320
+ ):
321
+ r"""
322
+ Encodes the prompt into text encoder hidden states.
323
+
324
+ Args:
325
+ prompt (`str` or `List[str]`, *optional*):
326
+ prompt to be encoded
327
+ negative_prompt (`str` or `List[str]`, *optional*):
328
+ The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`
329
+ instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For
330
+ This should be "".
331
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
332
+ whether to use classifier free guidance or not
333
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
334
+ number of images that should be generated per prompt
335
+ device: (`torch.device`, *optional*):
336
+ torch device to place the resulting embeddings on
337
+ prompt_embeds (`torch.FloatTensor`, *optional*):
338
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
339
+ provided, text embeddings will be generated from `prompt` input argument.
340
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
341
+ Pre-generated negative text embeddings.
342
+ """
343
+
344
+ if "mask_feature" in kwargs:
345
+ deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version."
346
+ deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False)
347
+
348
+ if device is None:
349
+ device = self._execution_device
350
+
351
+ if prompt is not None and isinstance(prompt, str):
352
+ batch_size = 1
353
+ elif prompt is not None and isinstance(prompt, list):
354
+ batch_size = len(prompt)
355
+ else:
356
+ batch_size = prompt_embeds.shape[0]
357
+
358
+ # See Section 3.1. of the paper.
359
+ max_length = (
360
+ text_encoder_max_tokens # TPU supports only lengths multiple of 128
361
+ )
362
+ if prompt_embeds is None:
363
+ assert (
364
+ self.text_encoder is not None
365
+ ), "You should provide either prompt_embeds or self.text_encoder should not be None,"
366
+ text_enc_device = next(self.text_encoder.parameters()).device
367
+ prompt = self._text_preprocessing(prompt)
368
+ text_inputs = self.tokenizer(
369
+ prompt,
370
+ padding="max_length",
371
+ max_length=max_length,
372
+ truncation=True,
373
+ add_special_tokens=True,
374
+ return_tensors="pt",
375
+ )
376
+ text_input_ids = text_inputs.input_ids
377
+ untruncated_ids = self.tokenizer(
378
+ prompt, padding="longest", return_tensors="pt"
379
+ ).input_ids
380
+
381
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[
382
+ -1
383
+ ] and not torch.equal(text_input_ids, untruncated_ids):
384
+ removed_text = self.tokenizer.batch_decode(
385
+ untruncated_ids[:, max_length - 1 : -1]
386
+ )
387
+ logger.warning(
388
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
389
+ f" {max_length} tokens: {removed_text}"
390
+ )
391
+
392
+ prompt_attention_mask = text_inputs.attention_mask
393
+ prompt_attention_mask = prompt_attention_mask.to(text_enc_device)
394
+ prompt_attention_mask = prompt_attention_mask.to(device)
395
+
396
+ prompt_embeds = self.text_encoder(
397
+ text_input_ids.to(text_enc_device), attention_mask=prompt_attention_mask
398
+ )
399
+ prompt_embeds = prompt_embeds[0]
400
+
401
+ if self.text_encoder is not None:
402
+ dtype = self.text_encoder.dtype
403
+ elif self.transformer is not None:
404
+ dtype = self.transformer.dtype
405
+ else:
406
+ dtype = None
407
+
408
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
409
+
410
+ bs_embed, seq_len, _ = prompt_embeds.shape
411
+ # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
412
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
413
+ prompt_embeds = prompt_embeds.view(
414
+ bs_embed * num_images_per_prompt, seq_len, -1
415
+ )
416
+ prompt_attention_mask = prompt_attention_mask.repeat(1, num_images_per_prompt)
417
+ prompt_attention_mask = prompt_attention_mask.view(
418
+ bs_embed * num_images_per_prompt, -1
419
+ )
420
+
421
+ # get unconditional embeddings for classifier free guidance
422
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
423
+ uncond_tokens = self._text_preprocessing(negative_prompt)
424
+ uncond_tokens = uncond_tokens * batch_size
425
+ max_length = prompt_embeds.shape[1]
426
+ uncond_input = self.tokenizer(
427
+ uncond_tokens,
428
+ padding="max_length",
429
+ max_length=max_length,
430
+ truncation=True,
431
+ return_attention_mask=True,
432
+ add_special_tokens=True,
433
+ return_tensors="pt",
434
+ )
435
+ negative_prompt_attention_mask = uncond_input.attention_mask
436
+ negative_prompt_attention_mask = negative_prompt_attention_mask.to(
437
+ text_enc_device
438
+ )
439
+
440
+ negative_prompt_embeds = self.text_encoder(
441
+ uncond_input.input_ids.to(text_enc_device),
442
+ attention_mask=negative_prompt_attention_mask,
443
+ )
444
+ negative_prompt_embeds = negative_prompt_embeds[0]
445
+
446
+ if do_classifier_free_guidance:
447
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
448
+ seq_len = negative_prompt_embeds.shape[1]
449
+
450
+ negative_prompt_embeds = negative_prompt_embeds.to(
451
+ dtype=dtype, device=device
452
+ )
453
+
454
+ negative_prompt_embeds = negative_prompt_embeds.repeat(
455
+ 1, num_images_per_prompt, 1
456
+ )
457
+ negative_prompt_embeds = negative_prompt_embeds.view(
458
+ batch_size * num_images_per_prompt, seq_len, -1
459
+ )
460
+
461
+ negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(
462
+ 1, num_images_per_prompt
463
+ )
464
+ negative_prompt_attention_mask = negative_prompt_attention_mask.view(
465
+ bs_embed * num_images_per_prompt, -1
466
+ )
467
+ else:
468
+ negative_prompt_embeds = None
469
+ negative_prompt_attention_mask = None
470
+
471
+ return (
472
+ prompt_embeds,
473
+ prompt_attention_mask,
474
+ negative_prompt_embeds,
475
+ negative_prompt_attention_mask,
476
+ )
477
+
478
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
479
+ def prepare_extra_step_kwargs(self, generator, eta):
480
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
481
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
482
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
483
+ # and should be between [0, 1]
484
+
485
+ accepts_eta = "eta" in set(
486
+ inspect.signature(self.scheduler.step).parameters.keys()
487
+ )
488
+ extra_step_kwargs = {}
489
+ if accepts_eta:
490
+ extra_step_kwargs["eta"] = eta
491
+
492
+ # check if the scheduler accepts generator
493
+ accepts_generator = "generator" in set(
494
+ inspect.signature(self.scheduler.step).parameters.keys()
495
+ )
496
+ if accepts_generator:
497
+ extra_step_kwargs["generator"] = generator
498
+ return extra_step_kwargs
499
+
500
+ def check_inputs(
501
+ self,
502
+ prompt,
503
+ height,
504
+ width,
505
+ negative_prompt,
506
+ prompt_embeds=None,
507
+ negative_prompt_embeds=None,
508
+ prompt_attention_mask=None,
509
+ negative_prompt_attention_mask=None,
510
+ enhance_prompt=False,
511
+ ):
512
+ if height % 8 != 0 or width % 8 != 0:
513
+ raise ValueError(
514
+ f"`height` and `width` have to be divisible by 8 but are {height} and {width}."
515
+ )
516
+
517
+ if prompt is not None and prompt_embeds is not None:
518
+ raise ValueError(
519
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
520
+ " only forward one of the two."
521
+ )
522
+ elif prompt is None and prompt_embeds is None:
523
+ raise ValueError(
524
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
525
+ )
526
+ elif prompt is not None and (
527
+ not isinstance(prompt, str) and not isinstance(prompt, list)
528
+ ):
529
+ raise ValueError(
530
+ f"`prompt` has to be of type `str` or `list` but is {type(prompt)}"
531
+ )
532
+
533
+ if prompt is not None and negative_prompt_embeds is not None:
534
+ raise ValueError(
535
+ f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:"
536
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
537
+ )
538
+
539
+ if negative_prompt is not None and negative_prompt_embeds is not None:
540
+ raise ValueError(
541
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
542
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
543
+ )
544
+
545
+ if prompt_embeds is not None and prompt_attention_mask is None:
546
+ raise ValueError(
547
+ "Must provide `prompt_attention_mask` when specifying `prompt_embeds`."
548
+ )
549
+
550
+ if (
551
+ negative_prompt_embeds is not None
552
+ and negative_prompt_attention_mask is None
553
+ ):
554
+ raise ValueError(
555
+ "Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`."
556
+ )
557
+
558
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
559
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
560
+ raise ValueError(
561
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
562
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
563
+ f" {negative_prompt_embeds.shape}."
564
+ )
565
+ if prompt_attention_mask.shape != negative_prompt_attention_mask.shape:
566
+ raise ValueError(
567
+ "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but"
568
+ f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`"
569
+ f" {negative_prompt_attention_mask.shape}."
570
+ )
571
+
572
+ if enhance_prompt:
573
+ assert (
574
+ self.prompt_enhancer_image_caption_model is not None
575
+ ), "Image caption model must be initialized if enhance_prompt is True"
576
+ assert (
577
+ self.prompt_enhancer_image_caption_processor is not None
578
+ ), "Image caption processor must be initialized if enhance_prompt is True"
579
+ assert (
580
+ self.prompt_enhancer_llm_model is not None
581
+ ), "Text prompt enhancer model must be initialized if enhance_prompt is True"
582
+ assert (
583
+ self.prompt_enhancer_llm_tokenizer is not None
584
+ ), "Text prompt enhancer tokenizer must be initialized if enhance_prompt is True"
585
+
586
+ def _text_preprocessing(self, text):
587
+ if not isinstance(text, (tuple, list)):
588
+ text = [text]
589
+
590
+ def process(text: str):
591
+ text = text.strip()
592
+ return text
593
+
594
+ return [process(t) for t in text]
595
+
596
+ @staticmethod
597
+ def add_noise_to_image_conditioning_latents(
598
+ t: float,
599
+ init_latents: torch.Tensor,
600
+ latents: torch.Tensor,
601
+ noise_scale: float,
602
+ conditioning_mask: torch.Tensor,
603
+ generator,
604
+ eps=1e-6,
605
+ ):
606
+ """
607
+ Add timestep-dependent noise to the hard-conditioning latents.
608
+ This helps with motion continuity, especially when conditioned on a single frame.
609
+ """
610
+ noise = randn_tensor(
611
+ latents.shape,
612
+ generator=generator,
613
+ device=latents.device,
614
+ dtype=latents.dtype,
615
+ )
616
+ # Add noise only to hard-conditioning latents (conditioning_mask = 1.0)
617
+ need_to_noise = (conditioning_mask > 1.0 - eps).unsqueeze(-1)
618
+ noised_latents = init_latents + noise_scale * noise * (t**2)
619
+ latents = torch.where(need_to_noise, noised_latents, latents)
620
+ return latents
621
+
622
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
623
+ def prepare_latents(
624
+ self,
625
+ latents: torch.Tensor | None,
626
+ media_items: torch.Tensor | None,
627
+ timestep: float,
628
+ latent_shape: torch.Size | Tuple[Any, ...],
629
+ dtype: torch.dtype,
630
+ device: torch.device,
631
+ generator: torch.Generator | List[torch.Generator],
632
+ vae_per_channel_normalize: bool = True,
633
+ ):
634
+ """
635
+ Prepare the initial latent tensor to be denoised.
636
+ The latents are either pure noise or a noised version of the encoded media items.
637
+ Args:
638
+ latents (`torch.FloatTensor` or `None`):
639
+ The latents to use (provided by the user) or `None` to create new latents.
640
+ media_items (`torch.FloatTensor` or `None`):
641
+ An image or video to be updated using img2img or vid2vid. The media item is encoded and noised.
642
+ timestep (`float`):
643
+ The timestep to noise the encoded media_items to.
644
+ latent_shape (`torch.Size`):
645
+ The target latent shape.
646
+ dtype (`torch.dtype`):
647
+ The target dtype.
648
+ device (`torch.device`):
649
+ The target device.
650
+ generator (`torch.Generator` or `List[torch.Generator]`):
651
+ Generator(s) to be used for the noising process.
652
+ vae_per_channel_normalize ('bool'):
653
+ When encoding the media_items, whether to normalize the latents per-channel.
654
+ Returns:
655
+ `torch.FloatTensor`: The latents to be used for the denoising process. This is a tensor of shape
656
+ (batch_size, num_channels, height, width).
657
+ """
658
+ if isinstance(generator, list) and len(generator) != latent_shape[0]:
659
+ raise ValueError(
660
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
661
+ f" size of {latent_shape[0]}. Make sure the batch size matches the length of the generators."
662
+ )
663
+
664
+ # Initialize the latents with the given latents or encoded media item, if provided
665
+ assert (
666
+ latents is None or media_items is None
667
+ ), "Cannot provide both latents and media_items. Please provide only one of the two."
668
+
669
+ assert (
670
+ latents is None and media_items is None or timestep < 1.0
671
+ ), "Input media_item or latents are provided, but they will be replaced with noise."
672
+
673
+ if media_items is not None:
674
+ latents = vae_encode(
675
+ media_items.to(dtype=self.vae.dtype, device=self.vae.device),
676
+ self.vae,
677
+ vae_per_channel_normalize=vae_per_channel_normalize,
678
+ )
679
+ if latents is not None:
680
+ assert (
681
+ latents.shape == latent_shape
682
+ ), f"Latents have to be of shape {latent_shape} but are {latents.shape}."
683
+ latents = latents.to(device=device, dtype=dtype)
684
+
685
+ # For backward compatibility, generate in the "patchified" shape and rearrange
686
+ b, c, f, h, w = latent_shape
687
+ noise = randn_tensor(
688
+ (b, f * h * w, c), generator=generator, device=device, dtype=dtype
689
+ )
690
+ noise = rearrange(noise, "b (f h w) c -> b c f h w", f=f, h=h, w=w)
691
+
692
+ # scale the initial noise by the standard deviation required by the scheduler
693
+ noise = noise * self.scheduler.init_noise_sigma
694
+
695
+ if latents is None:
696
+ latents = noise
697
+ else:
698
+ # Noise the latents to the required (first) timestep
699
+ latents = timestep * noise + (1 - timestep) * latents
700
+
701
+ return latents
702
+
703
+ @staticmethod
704
+ def classify_height_width_bin(
705
+ height: int, width: int, ratios: dict
706
+ ) -> Tuple[int, int]:
707
+ """Returns binned height and width."""
708
+ ar = float(height / width)
709
+ closest_ratio = min(ratios.keys(), key=lambda ratio: abs(float(ratio) - ar))
710
+ default_hw = ratios[closest_ratio]
711
+ return int(default_hw[0]), int(default_hw[1])
712
+
713
+ @staticmethod
714
+ def resize_and_crop_tensor(
715
+ samples: torch.Tensor, new_width: int, new_height: int
716
+ ) -> torch.Tensor:
717
+ n_frames, orig_height, orig_width = samples.shape[-3:]
718
+
719
+ # Check if resizing is needed
720
+ if orig_height != new_height or orig_width != new_width:
721
+ ratio = max(new_height / orig_height, new_width / orig_width)
722
+ resized_width = int(orig_width * ratio)
723
+ resized_height = int(orig_height * ratio)
724
+
725
+ # Resize
726
+ samples = LTXVideoPipeline.resize_tensor(
727
+ samples, resized_height, resized_width
728
+ )
729
+
730
+ # Center Crop
731
+ start_x = (resized_width - new_width) // 2
732
+ end_x = start_x + new_width
733
+ start_y = (resized_height - new_height) // 2
734
+ end_y = start_y + new_height
735
+ samples = samples[..., start_y:end_y, start_x:end_x]
736
+
737
+ return samples
738
+
739
+ @staticmethod
740
+ def resize_tensor(media_items, height, width):
741
+ n_frames = media_items.shape[2]
742
+ if media_items.shape[-2:] != (height, width):
743
+ media_items = rearrange(media_items, "b c n h w -> (b n) c h w")
744
+ media_items = F.interpolate(
745
+ media_items,
746
+ size=(height, width),
747
+ mode="bilinear",
748
+ align_corners=False,
749
+ )
750
+ media_items = rearrange(media_items, "(b n) c h w -> b c n h w", n=n_frames)
751
+ return media_items
752
+
753
+ @torch.no_grad()
754
+ def __call__(
755
+ self,
756
+ height: int,
757
+ width: int,
758
+ num_frames: int,
759
+ frame_rate: float,
760
+ prompt: Union[str, List[str]] = None,
761
+ negative_prompt: str = "",
762
+ num_inference_steps: int = 20,
763
+ skip_initial_inference_steps: int = 0,
764
+ skip_final_inference_steps: int = 0,
765
+ timesteps: List[int] = None,
766
+ guidance_scale: Union[float, List[float]] = 4.5,
767
+ cfg_star_rescale: bool = False,
768
+ skip_layer_strategy: Optional[SkipLayerStrategy] = None,
769
+ skip_block_list: Optional[Union[List[List[int]], List[int]]] = None,
770
+ stg_scale: Union[float, List[float]] = 1.0,
771
+ rescaling_scale: Union[float, List[float]] = 0.7,
772
+ guidance_timesteps: Optional[List[int]] = None,
773
+ num_images_per_prompt: Optional[int] = 1,
774
+ eta: float = 0.0,
775
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
776
+ latents: Optional[torch.FloatTensor] = None,
777
+ prompt_embeds: Optional[torch.FloatTensor] = None,
778
+ prompt_attention_mask: Optional[torch.FloatTensor] = None,
779
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
780
+ negative_prompt_attention_mask: Optional[torch.FloatTensor] = None,
781
+ output_type: Optional[str] = "pil",
782
+ return_dict: bool = True,
783
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
784
+ conditioning_items: Optional[List[ConditioningItem]] = None,
785
+ decode_timestep: Union[List[float], float] = 0.0,
786
+ decode_noise_scale: Optional[List[float]] = None,
787
+ mixed_precision: bool = False,
788
+ offload_to_cpu: bool = False,
789
+ enhance_prompt: bool = False,
790
+ text_encoder_max_tokens: int = 256,
791
+ stochastic_sampling: bool = False,
792
+ media_items: Optional[torch.Tensor] = None,
793
+ tone_map_compression_ratio: float = 0.0,
794
+ **kwargs,
795
+ ) -> Union[ImagePipelineOutput, Tuple]:
796
+ """
797
+ Function invoked when calling the pipeline for generation.
798
+
799
+ Args:
800
+ prompt (`str` or `List[str]`, *optional*):
801
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
802
+ instead.
803
+ negative_prompt (`str` or `List[str]`, *optional*):
804
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
805
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
806
+ less than `1`).
807
+ num_inference_steps (`int`, *optional*, defaults to 100):
808
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
809
+ expense of slower inference. If `timesteps` is provided, this parameter is ignored.
810
+ skip_initial_inference_steps (`int`, *optional*, defaults to 0):
811
+ The number of initial timesteps to skip. After calculating the timesteps, this number of timesteps will
812
+ be removed from the beginning of the timesteps list. Meaning the highest-timesteps values will not run.
813
+ skip_final_inference_steps (`int`, *optional*, defaults to 0):
814
+ The number of final timesteps to skip. After calculating the timesteps, this number of timesteps will
815
+ be removed from the end of the timesteps list. Meaning the lowest-timesteps values will not run.
816
+ timesteps (`List[int]`, *optional*):
817
+ Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps`
818
+ timesteps are used. Must be in descending order.
819
+ guidance_scale (`float`, *optional*, defaults to 4.5):
820
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
821
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
822
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
823
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
824
+ usually at the expense of lower image quality.
825
+ cfg_star_rescale (`bool`, *optional*, defaults to `False`):
826
+ If set to `True`, applies the CFG star rescale. Scales the negative prediction according to dot
827
+ product between positive and negative.
828
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
829
+ The number of images to generate per prompt.
830
+ height (`int`, *optional*, defaults to self.unet.config.sample_size):
831
+ The height in pixels of the generated image.
832
+ width (`int`, *optional*, defaults to self.unet.config.sample_size):
833
+ The width in pixels of the generated image.
834
+ eta (`float`, *optional*, defaults to 0.0):
835
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
836
+ [`schedulers.DDIMScheduler`], will be ignored for others.
837
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
838
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
839
+ to make generation deterministic.
840
+ latents (`torch.FloatTensor`, *optional*):
841
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
842
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
843
+ tensor will ge generated by sampling using the supplied random `generator`.
844
+ prompt_embeds (`torch.FloatTensor`, *optional*):
845
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
846
+ provided, text embeddings will be generated from `prompt` input argument.
847
+ prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for text embeddings.
848
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
849
+ Pre-generated negative text embeddings. This negative prompt should be "". If not
850
+ provided, negative_prompt_embeds will be generated from `negative_prompt` input argument.
851
+ negative_prompt_attention_mask (`torch.FloatTensor`, *optional*):
852
+ Pre-generated attention mask for negative text embeddings.
853
+ output_type (`str`, *optional*, defaults to `"pil"`):
854
+ The output format of the generate image. Choose between
855
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
856
+ return_dict (`bool`, *optional*, defaults to `True`):
857
+ Whether to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
858
+ callback_on_step_end (`Callable`, *optional*):
859
+ A function that calls at the end of each denoising steps during the inference. The function is called
860
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
861
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
862
+ `callback_on_step_end_tensor_inputs`.
863
+ use_resolution_binning (`bool` defaults to `True`):
864
+ If set to `True`, the requested height and width are first mapped to the closest resolutions using
865
+ `ASPECT_RATIO_1024_BIN`. After the produced latents are decoded into images, they are resized back to
866
+ the requested resolution. Useful for generating non-square images.
867
+ enhance_prompt (`bool`, *optional*, defaults to `False`):
868
+ If set to `True`, the prompt is enhanced using a LLM model.
869
+ text_encoder_max_tokens (`int`, *optional*, defaults to `256`):
870
+ The maximum number of tokens to use for the text encoder.
871
+ stochastic_sampling (`bool`, *optional*, defaults to `False`):
872
+ If set to `True`, the sampling is stochastic. If set to `False`, the sampling is deterministic.
873
+ media_items ('torch.Tensor', *optional*):
874
+ The input media item used for image-to-image / video-to-video.
875
+ tone_map_compression_ratio: compression ratio for tone mapping, defaults to 0.0.
876
+ If set to 0.0, no tone mapping is applied. If set to 1.0 - full compression is applied.
877
+ Examples:
878
+
879
+ Returns:
880
+ [`~pipelines.ImagePipelineOutput`] or `tuple`:
881
+ If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
882
+ returned where the first element is a list with the generated images
883
+ """
884
+ if "mask_feature" in kwargs:
885
+ deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version."
886
+ deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False)
887
+
888
+ is_video = kwargs.get("is_video", False)
889
+ self.check_inputs(
890
+ prompt,
891
+ height,
892
+ width,
893
+ negative_prompt,
894
+ prompt_embeds,
895
+ negative_prompt_embeds,
896
+ prompt_attention_mask,
897
+ negative_prompt_attention_mask,
898
+ )
899
+
900
+ # 2. Default height and width to transformer
901
+ if prompt is not None and isinstance(prompt, str):
902
+ batch_size = 1
903
+ elif prompt is not None and isinstance(prompt, list):
904
+ batch_size = len(prompt)
905
+ else:
906
+ batch_size = prompt_embeds.shape[0]
907
+
908
+ device = self._execution_device
909
+
910
+ self.video_scale_factor = self.video_scale_factor if is_video else 1
911
+ vae_per_channel_normalize = kwargs.get("vae_per_channel_normalize", True)
912
+ image_cond_noise_scale = kwargs.get("image_cond_noise_scale", 0.0)
913
+
914
+ latent_height = height // self.vae_scale_factor
915
+ latent_width = width // self.vae_scale_factor
916
+ latent_num_frames = num_frames // self.video_scale_factor
917
+ if isinstance(self.vae, CausalVideoAutoencoder) and is_video:
918
+ latent_num_frames += 1
919
+ latent_shape = (
920
+ batch_size * num_images_per_prompt,
921
+ self.transformer.config.in_channels,
922
+ latent_num_frames,
923
+ latent_height,
924
+ latent_width,
925
+ )
926
+
927
+ # Prepare the list of denoising time-steps
928
+
929
+ retrieve_timesteps_kwargs = {}
930
+ if isinstance(self.scheduler, TimestepShifter):
931
+ retrieve_timesteps_kwargs["samples_shape"] = latent_shape
932
+
933
+ assert (
934
+ skip_initial_inference_steps == 0
935
+ or latents is not None
936
+ or media_items is not None
937
+ ), (
938
+ f"skip_initial_inference_steps ({skip_initial_inference_steps}) is used for image-to-image/video-to-video - "
939
+ "media_item or latents should be provided."
940
+ )
941
+
942
+ timesteps, num_inference_steps = retrieve_timesteps(
943
+ self.scheduler,
944
+ num_inference_steps,
945
+ device,
946
+ timesteps,
947
+ skip_initial_inference_steps=skip_initial_inference_steps,
948
+ skip_final_inference_steps=skip_final_inference_steps,
949
+ **retrieve_timesteps_kwargs,
950
+ )
951
+
952
+ if self.allowed_inference_steps is not None:
953
+ for timestep in [round(x, 4) for x in timesteps.tolist()]:
954
+ assert (
955
+ timestep in self.allowed_inference_steps
956
+ ), f"Invalid inference timestep {timestep}. Allowed timesteps are {self.allowed_inference_steps}."
957
+
958
+ if guidance_timesteps:
959
+ guidance_mapping = []
960
+ for timestep in timesteps:
961
+ indices = [
962
+ i for i, val in enumerate(guidance_timesteps) if val <= timestep
963
+ ]
964
+ # assert len(indices) > 0, f"No guidance timestep found for {timestep}"
965
+ guidance_mapping.append(
966
+ indices[0] if len(indices) > 0 else (len(guidance_timesteps) - 1)
967
+ )
968
+
969
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
970
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
971
+ # corresponds to doing no classifier free guidance.
972
+ if not isinstance(guidance_scale, List):
973
+ guidance_scale = [guidance_scale] * len(timesteps)
974
+ else:
975
+ guidance_scale = [
976
+ guidance_scale[guidance_mapping[i]] for i in range(len(timesteps))
977
+ ]
978
+
979
+ if not isinstance(stg_scale, List):
980
+ stg_scale = [stg_scale] * len(timesteps)
981
+ else:
982
+ stg_scale = [stg_scale[guidance_mapping[i]] for i in range(len(timesteps))]
983
+
984
+ if not isinstance(rescaling_scale, List):
985
+ rescaling_scale = [rescaling_scale] * len(timesteps)
986
+ else:
987
+ rescaling_scale = [
988
+ rescaling_scale[guidance_mapping[i]] for i in range(len(timesteps))
989
+ ]
990
+
991
+ # Normalize skip_block_list to always be None or a list of lists matching timesteps
992
+ if skip_block_list is not None:
993
+ # Convert single list to list of lists if needed
994
+ if len(skip_block_list) == 0 or not isinstance(skip_block_list[0], list):
995
+ skip_block_list = [skip_block_list] * len(timesteps)
996
+ else:
997
+ new_skip_block_list = []
998
+ for i, timestep in enumerate(timesteps):
999
+ new_skip_block_list.append(skip_block_list[guidance_mapping[i]])
1000
+ skip_block_list = new_skip_block_list
1001
+
1002
+ if enhance_prompt:
1003
+ self.prompt_enhancer_image_caption_model = (
1004
+ self.prompt_enhancer_image_caption_model.to(self._execution_device)
1005
+ )
1006
+ self.prompt_enhancer_llm_model = self.prompt_enhancer_llm_model.to(
1007
+ self._execution_device
1008
+ )
1009
+
1010
+ prompt = generate_cinematic_prompt(
1011
+ self.prompt_enhancer_image_caption_model,
1012
+ self.prompt_enhancer_image_caption_processor,
1013
+ self.prompt_enhancer_llm_model,
1014
+ self.prompt_enhancer_llm_tokenizer,
1015
+ prompt,
1016
+ conditioning_items,
1017
+ max_new_tokens=text_encoder_max_tokens,
1018
+ )
1019
+
1020
+ # --- [NOSSA ESCUTA SECRETA AQUI] ---
1021
+ print("--- [LOG DO DIRETOR ASSISTENTE (PROMPT ENHANCER)] ---")
1022
+ print("Prompt Original do Maestro:", kwargs.get("original_prompt_for_logging", "N/A")) # Precisamos passar isso
1023
+ print("PROMPT FINAL APERFEIÇOADO (enviado para o LTX):", prompt)
1024
+ print("--- [FIM DO LOG DO DIRETOR ASSISTENTE] ---")
1025
+ # --- [FIM DA ESCUTA] ---
1026
+
1027
+
1028
+ # 3. Encode input prompt
1029
+ if self.text_encoder is not None:
1030
+ self.text_encoder = self.text_encoder.to(self._execution_device)
1031
+
1032
+ (
1033
+ prompt_embeds,
1034
+ prompt_attention_mask,
1035
+ negative_prompt_embeds,
1036
+ negative_prompt_attention_mask,
1037
+ ) = self.encode_prompt(
1038
+ prompt,
1039
+ True,
1040
+ negative_prompt=negative_prompt,
1041
+ num_images_per_prompt=num_images_per_prompt,
1042
+ device=device,
1043
+ prompt_embeds=prompt_embeds,
1044
+ negative_prompt_embeds=negative_prompt_embeds,
1045
+ prompt_attention_mask=prompt_attention_mask,
1046
+ negative_prompt_attention_mask=negative_prompt_attention_mask,
1047
+ text_encoder_max_tokens=text_encoder_max_tokens,
1048
+ )
1049
+
1050
+ if offload_to_cpu and self.text_encoder is not None:
1051
+ self.text_encoder = self.text_encoder.cpu()
1052
+
1053
+ self.transformer = self.transformer.to(self._execution_device)
1054
+
1055
+ prompt_embeds_batch = prompt_embeds
1056
+ prompt_attention_mask_batch = prompt_attention_mask
1057
+ negative_prompt_embeds = (
1058
+ torch.zeros_like(prompt_embeds)
1059
+ if negative_prompt_embeds is None
1060
+ else negative_prompt_embeds
1061
+ )
1062
+ negative_prompt_attention_mask = (
1063
+ torch.zeros_like(prompt_attention_mask)
1064
+ if negative_prompt_attention_mask is None
1065
+ else negative_prompt_attention_mask
1066
+ )
1067
+
1068
+ prompt_embeds_batch = torch.cat(
1069
+ [negative_prompt_embeds, prompt_embeds, prompt_embeds], dim=0
1070
+ )
1071
+ prompt_attention_mask_batch = torch.cat(
1072
+ [
1073
+ negative_prompt_attention_mask,
1074
+ prompt_attention_mask,
1075
+ prompt_attention_mask,
1076
+ ],
1077
+ dim=0,
1078
+ )
1079
+ # 4. Prepare the initial latents using the provided media and conditioning items
1080
+
1081
+ # Prepare the initial latents tensor, shape = (b, c, f, h, w)
1082
+ latents = self.prepare_latents(
1083
+ latents=latents,
1084
+ media_items=media_items,
1085
+ timestep=timesteps[0],
1086
+ latent_shape=latent_shape,
1087
+ dtype=prompt_embeds.dtype,
1088
+ device=device,
1089
+ generator=generator,
1090
+ vae_per_channel_normalize=vae_per_channel_normalize,
1091
+ )
1092
+
1093
+ # Update the latents with the conditioning items and patchify them into (b, n, c)
1094
+ latents, pixel_coords, conditioning_mask, num_cond_latents = (
1095
+ self.prepare_conditioning(
1096
+ conditioning_items=conditioning_items,
1097
+ init_latents=latents,
1098
+ num_frames=num_frames,
1099
+ height=height,
1100
+ width=width,
1101
+ vae_per_channel_normalize=vae_per_channel_normalize,
1102
+ generator=generator,
1103
+ )
1104
+ )
1105
+ init_latents = latents.clone() # Used for image_cond_noise_update
1106
+
1107
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1108
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1109
+
1110
+ # 7. Denoising loop
1111
+ num_warmup_steps = max(
1112
+ len(timesteps) - num_inference_steps * self.scheduler.order, 0
1113
+ )
1114
+
1115
+ orig_conditioning_mask = conditioning_mask
1116
+
1117
+ # Befor compiling this code please be aware:
1118
+ # This code might generate different input shapes if some timesteps have no STG or CFG.
1119
+ # This means that the codes might need to be compiled mutliple times.
1120
+ # To avoid that, use the same STG and CFG values for all timesteps.
1121
+
1122
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1123
+ for i, t in enumerate(timesteps):
1124
+ do_classifier_free_guidance = guidance_scale[i] > 1.0
1125
+ do_spatio_temporal_guidance = stg_scale[i] > 0
1126
+ do_rescaling = rescaling_scale[i] != 1.0
1127
+
1128
+ num_conds = 1
1129
+ if do_classifier_free_guidance:
1130
+ num_conds += 1
1131
+ if do_spatio_temporal_guidance:
1132
+ num_conds += 1
1133
+
1134
+ if do_classifier_free_guidance and do_spatio_temporal_guidance:
1135
+ indices = slice(batch_size * 0, batch_size * 3)
1136
+ elif do_classifier_free_guidance:
1137
+ indices = slice(batch_size * 0, batch_size * 2)
1138
+ elif do_spatio_temporal_guidance:
1139
+ indices = slice(batch_size * 1, batch_size * 3)
1140
+ else:
1141
+ indices = slice(batch_size * 1, batch_size * 2)
1142
+
1143
+ # Prepare skip layer masks
1144
+ skip_layer_mask: Optional[torch.Tensor] = None
1145
+ if do_spatio_temporal_guidance:
1146
+ if skip_block_list is not None:
1147
+ skip_layer_mask = self.transformer.create_skip_layer_mask(
1148
+ batch_size, num_conds, num_conds - 1, skip_block_list[i]
1149
+ )
1150
+
1151
+ batch_pixel_coords = torch.cat([pixel_coords] * num_conds)
1152
+ conditioning_mask = orig_conditioning_mask
1153
+ if conditioning_mask is not None and is_video:
1154
+ assert num_images_per_prompt == 1
1155
+ conditioning_mask = torch.cat([conditioning_mask] * num_conds)
1156
+ fractional_coords = batch_pixel_coords.to(torch.float32)
1157
+ fractional_coords[:, 0] = fractional_coords[:, 0] * (1.0 / frame_rate)
1158
+
1159
+ if conditioning_mask is not None and image_cond_noise_scale > 0.0:
1160
+ latents = self.add_noise_to_image_conditioning_latents(
1161
+ t,
1162
+ init_latents,
1163
+ latents,
1164
+ image_cond_noise_scale,
1165
+ orig_conditioning_mask,
1166
+ generator,
1167
+ )
1168
+
1169
+ latent_model_input = (
1170
+ torch.cat([latents] * num_conds) if num_conds > 1 else latents
1171
+ )
1172
+ latent_model_input = self.scheduler.scale_model_input(
1173
+ latent_model_input, t
1174
+ )
1175
+
1176
+ current_timestep = t
1177
+ if not torch.is_tensor(current_timestep):
1178
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
1179
+ # This would be a good case for the `match` statement (Python 3.10+)
1180
+ is_mps = latent_model_input.device.type == "mps"
1181
+ if isinstance(current_timestep, float):
1182
+ dtype = torch.float32 if is_mps else torch.float64
1183
+ else:
1184
+ dtype = torch.int32 if is_mps else torch.int64
1185
+ current_timestep = torch.tensor(
1186
+ [current_timestep],
1187
+ dtype=dtype,
1188
+ device=latent_model_input.device,
1189
+ )
1190
+ elif len(current_timestep.shape) == 0:
1191
+ current_timestep = current_timestep[None].to(
1192
+ latent_model_input.device
1193
+ )
1194
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
1195
+ current_timestep = current_timestep.expand(
1196
+ latent_model_input.shape[0]
1197
+ ).unsqueeze(-1)
1198
+
1199
+ if conditioning_mask is not None:
1200
+ # Conditioning latents have an initial timestep and noising level of (1.0 - conditioning_mask)
1201
+ # and will start to be denoised when the current timestep is lower than their conditioning timestep.
1202
+ current_timestep = torch.min(
1203
+ current_timestep, 1.0 - conditioning_mask
1204
+ )
1205
+
1206
+ # Choose the appropriate context manager based on `mixed_precision`
1207
+ if mixed_precision:
1208
+ context_manager = torch.autocast(device.type, dtype=torch.bfloat16)
1209
+ else:
1210
+ context_manager = nullcontext() # Dummy context manager
1211
+
1212
+ # predict noise model_output
1213
+ with context_manager:
1214
+ noise_pred = self.transformer(
1215
+ latent_model_input.to(self.transformer.dtype),
1216
+ indices_grid=fractional_coords,
1217
+ encoder_hidden_states=prompt_embeds_batch[indices].to(
1218
+ self.transformer.dtype
1219
+ ),
1220
+ encoder_attention_mask=prompt_attention_mask_batch[indices],
1221
+ timestep=current_timestep,
1222
+ skip_layer_mask=skip_layer_mask,
1223
+ skip_layer_strategy=skip_layer_strategy,
1224
+ return_dict=False,
1225
+ )[0]
1226
+
1227
+ # perform guidance
1228
+ if do_spatio_temporal_guidance:
1229
+ noise_pred_text, noise_pred_text_perturb = noise_pred.chunk(
1230
+ num_conds
1231
+ )[-2:]
1232
+ if do_classifier_free_guidance:
1233
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(num_conds)[:2]
1234
+
1235
+ if cfg_star_rescale:
1236
+ # Rescales the unconditional noise prediction using the projection of the conditional prediction onto it:
1237
+ # α = (⟨ε_text, ε_uncond⟩ / ||ε_uncond||²), then ε_uncond ← α * ε_uncond
1238
+ # where ε_text is the conditional noise prediction and ε_uncond is the unconditional one.
1239
+ positive_flat = noise_pred_text.view(batch_size, -1)
1240
+ negative_flat = noise_pred_uncond.view(batch_size, -1)
1241
+ dot_product = torch.sum(
1242
+ positive_flat * negative_flat, dim=1, keepdim=True
1243
+ )
1244
+ squared_norm = (
1245
+ torch.sum(negative_flat**2, dim=1, keepdim=True) + 1e-8
1246
+ )
1247
+ alpha = dot_product / squared_norm
1248
+ noise_pred_uncond = alpha * noise_pred_uncond
1249
+
1250
+ noise_pred = noise_pred_uncond + guidance_scale[i] * (
1251
+ noise_pred_text - noise_pred_uncond
1252
+ )
1253
+ elif do_spatio_temporal_guidance:
1254
+ noise_pred = noise_pred_text
1255
+ if do_spatio_temporal_guidance:
1256
+ noise_pred = noise_pred + stg_scale[i] * (
1257
+ noise_pred_text - noise_pred_text_perturb
1258
+ )
1259
+ if do_rescaling and stg_scale[i] > 0.0:
1260
+ noise_pred_text_std = noise_pred_text.view(batch_size, -1).std(
1261
+ dim=1, keepdim=True
1262
+ )
1263
+ noise_pred_std = noise_pred.view(batch_size, -1).std(
1264
+ dim=1, keepdim=True
1265
+ )
1266
+
1267
+ factor = noise_pred_text_std / noise_pred_std
1268
+ factor = rescaling_scale[i] * factor + (1 - rescaling_scale[i])
1269
+
1270
+ noise_pred = noise_pred * factor.view(batch_size, 1, 1)
1271
+
1272
+ current_timestep = current_timestep[:1]
1273
+ # learned sigma
1274
+ if (
1275
+ self.transformer.config.out_channels // 2
1276
+ == self.transformer.config.in_channels
1277
+ ):
1278
+ noise_pred = noise_pred.chunk(2, dim=1)[0]
1279
+
1280
+ # compute previous image: x_t -> x_t-1
1281
+ latents = self.denoising_step(
1282
+ latents,
1283
+ noise_pred,
1284
+ current_timestep,
1285
+ orig_conditioning_mask,
1286
+ t,
1287
+ extra_step_kwargs,
1288
+ stochastic_sampling=stochastic_sampling,
1289
+ )
1290
+
1291
+ # call the callback, if provided
1292
+ if i == len(timesteps) - 1 or (
1293
+ (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
1294
+ ):
1295
+ progress_bar.update()
1296
+
1297
+ if callback_on_step_end is not None:
1298
+ callback_on_step_end(self, i, t, {})
1299
+
1300
+ if offload_to_cpu:
1301
+ self.transformer = self.transformer.cpu()
1302
+ if self._execution_device == "cuda":
1303
+ torch.cuda.empty_cache()
1304
+
1305
+ # Remove the added conditioning latents
1306
+ latents = latents[:, num_cond_latents:]
1307
+
1308
+ latents = self.patchifier.unpatchify(
1309
+ latents=latents,
1310
+ output_height=latent_height,
1311
+ output_width=latent_width,
1312
+ out_channels=self.transformer.in_channels
1313
+ // math.prod(self.patchifier.patch_size),
1314
+ )
1315
+ if output_type != "latent":
1316
+ if self.vae.decoder.timestep_conditioning:
1317
+ noise = torch.randn_like(latents)
1318
+ if not isinstance(decode_timestep, list):
1319
+ decode_timestep = [decode_timestep] * latents.shape[0]
1320
+ if decode_noise_scale is None:
1321
+ decode_noise_scale = decode_timestep
1322
+ elif not isinstance(decode_noise_scale, list):
1323
+ decode_noise_scale = [decode_noise_scale] * latents.shape[0]
1324
+
1325
+ decode_timestep = torch.tensor(decode_timestep).to(latents.device)
1326
+ decode_noise_scale = torch.tensor(decode_noise_scale).to(
1327
+ latents.device
1328
+ )[:, None, None, None, None]
1329
+ latents = (
1330
+ latents * (1 - decode_noise_scale) + noise * decode_noise_scale
1331
+ )
1332
+ else:
1333
+ decode_timestep = None
1334
+ latents = self.tone_map_latents(latents, tone_map_compression_ratio)
1335
+ image = vae_decode(
1336
+ latents,
1337
+ self.vae,
1338
+ is_video,
1339
+ vae_per_channel_normalize=kwargs["vae_per_channel_normalize"],
1340
+ timestep=decode_timestep,
1341
+ )
1342
+
1343
+ image = self.image_processor.postprocess(image, output_type=output_type)
1344
+
1345
+ else:
1346
+ image = latents
1347
+
1348
+ # Offload all models
1349
+ self.maybe_free_model_hooks()
1350
+
1351
+ if not return_dict:
1352
+ return (image,)
1353
+
1354
+ return ImagePipelineOutput(images=image)
1355
+
1356
+ def denoising_step(
1357
+ self,
1358
+ latents: torch.Tensor,
1359
+ noise_pred: torch.Tensor,
1360
+ current_timestep: torch.Tensor,
1361
+ conditioning_mask: torch.Tensor,
1362
+ t: float,
1363
+ extra_step_kwargs,
1364
+ t_eps=1e-6,
1365
+ stochastic_sampling=False,
1366
+ ):
1367
+ """
1368
+ Perform the denoising step for the required tokens, based on the current timestep and
1369
+ conditioning mask:
1370
+ Conditioning latents have an initial timestep and noising level of (1.0 - conditioning_mask)
1371
+ and will start to be denoised when the current timestep is equal or lower than their
1372
+ conditioning timestep.
1373
+ (hard-conditioning latents with conditioning_mask = 1.0 are never denoised)
1374
+ """
1375
+ # Denoise the latents using the scheduler
1376
+ denoised_latents = self.scheduler.step(
1377
+ noise_pred,
1378
+ t if current_timestep is None else current_timestep,
1379
+ latents,
1380
+ **extra_step_kwargs,
1381
+ return_dict=False,
1382
+ stochastic_sampling=stochastic_sampling,
1383
+ )[0]
1384
+
1385
+ if conditioning_mask is None:
1386
+ return denoised_latents
1387
+
1388
+ tokens_to_denoise_mask = (t - t_eps < (1.0 - conditioning_mask)).unsqueeze(-1)
1389
+ return torch.where(tokens_to_denoise_mask, denoised_latents, latents)
1390
+
1391
+ def prepare_conditioning(
1392
+ self,
1393
+ conditioning_items: Optional[List[ConditioningItem]],
1394
+ init_latents: torch.Tensor,
1395
+ num_frames: int,
1396
+ height: int,
1397
+ width: int,
1398
+ vae_per_channel_normalize: bool = False,
1399
+ generator=None,
1400
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]:
1401
+ """
1402
+ Prepare conditioning tokens based on the provided conditioning items.
1403
+
1404
+ This method encodes provided conditioning items (video frames or single frames) into latents
1405
+ and integrates them with the initial latent tensor. It also calculates corresponding pixel
1406
+ coordinates, a mask indicating the influence of conditioning latents, and the total number of
1407
+ conditioning latents.
1408
+
1409
+ Args:
1410
+ conditioning_items (Optional[List[ConditioningItem]]): A list of ConditioningItem objects.
1411
+ init_latents (torch.Tensor): The initial latent tensor of shape (b, c, f_l, h_l, w_l), where
1412
+ `f_l` is the number of latent frames, and `h_l` and `w_l` are latent spatial dimensions.
1413
+ num_frames, height, width: The dimensions of the generated video.
1414
+ vae_per_channel_normalize (bool, optional): Whether to normalize channels during VAE encoding.
1415
+ Defaults to `False`.
1416
+ generator: The random generator
1417
+
1418
+ Returns:
1419
+ Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]:
1420
+ - `init_latents` (torch.Tensor): The updated latent tensor including conditioning latents,
1421
+ patchified into (b, n, c) shape.
1422
+ - `init_pixel_coords` (torch.Tensor): The pixel coordinates corresponding to the updated
1423
+ latent tensor.
1424
+ - `conditioning_mask` (torch.Tensor): A mask indicating the conditioning-strength of each
1425
+ latent token.
1426
+ - `num_cond_latents` (int): The total number of latent tokens added from conditioning items.
1427
+
1428
+ Raises:
1429
+ AssertionError: If input shapes, dimensions, or conditions for applying conditioning are invalid.
1430
+ """
1431
+ assert isinstance(self.vae, CausalVideoAutoencoder)
1432
+
1433
+ if conditioning_items:
1434
+ batch_size, _, num_latent_frames = init_latents.shape[:3]
1435
+
1436
+ init_conditioning_mask = torch.zeros(
1437
+ init_latents[:, 0, :, :, :].shape,
1438
+ dtype=torch.float32,
1439
+ device=init_latents.device,
1440
+ )
1441
+
1442
+ extra_conditioning_latents = []
1443
+ extra_conditioning_pixel_coords = []
1444
+ extra_conditioning_mask = []
1445
+ extra_conditioning_num_latents = 0 # Number of extra conditioning latents added (should be removed before decoding)
1446
+
1447
+ # Process each conditioning item
1448
+ for conditioning_item in conditioning_items:
1449
+ conditioning_item = self._resize_conditioning_item(
1450
+ conditioning_item, height, width
1451
+ )
1452
+ media_item = conditioning_item.media_item
1453
+ media_frame_number = conditioning_item.media_frame_number
1454
+ strength = conditioning_item.conditioning_strength
1455
+ assert media_item.ndim == 5 # (b, c, f, h, w)
1456
+ b, c, n_frames, h, w = media_item.shape
1457
+ assert (
1458
+ height == h and width == w
1459
+ ) or media_frame_number == 0, f"Dimensions do not match: {height}x{width} != {h}x{w} - allowed only when media_frame_number == 0"
1460
+ assert n_frames % 8 == 1
1461
+ assert (
1462
+ media_frame_number >= 0
1463
+ and media_frame_number + n_frames <= num_frames
1464
+ )
1465
+
1466
+ # Encode the provided conditioning media item
1467
+ media_item_latents = vae_encode(
1468
+ media_item.to(dtype=self.vae.dtype, device=self.vae.device),
1469
+ self.vae,
1470
+ vae_per_channel_normalize=vae_per_channel_normalize,
1471
+ ).to(dtype=init_latents.dtype)
1472
+
1473
+ # Handle the different conditioning cases
1474
+ if media_frame_number == 0:
1475
+ # Get the target spatial position of the latent conditioning item
1476
+ media_item_latents, l_x, l_y = self._get_latent_spatial_position(
1477
+ media_item_latents,
1478
+ conditioning_item,
1479
+ height,
1480
+ width,
1481
+ strip_latent_border=True,
1482
+ )
1483
+ b, c_l, f_l, h_l, w_l = media_item_latents.shape
1484
+
1485
+ # First frame or sequence - just update the initial noise latents and the mask
1486
+ init_latents[:, :, :f_l, l_y : l_y + h_l, l_x : l_x + w_l] = (
1487
+ torch.lerp(
1488
+ init_latents[:, :, :f_l, l_y : l_y + h_l, l_x : l_x + w_l],
1489
+ media_item_latents,
1490
+ strength,
1491
+ )
1492
+ )
1493
+ init_conditioning_mask[
1494
+ :, :f_l, l_y : l_y + h_l, l_x : l_x + w_l
1495
+ ] = strength
1496
+ else:
1497
+ # Non-first frame or sequence
1498
+ if n_frames > 1:
1499
+ # Handle non-first sequence.
1500
+ # Encoded latents are either fully consumed, or the prefix is handled separately below.
1501
+ (
1502
+ init_latents,
1503
+ init_conditioning_mask,
1504
+ media_item_latents,
1505
+ ) = self._handle_non_first_conditioning_sequence(
1506
+ init_latents,
1507
+ init_conditioning_mask,
1508
+ media_item_latents,
1509
+ media_frame_number,
1510
+ strength,
1511
+ )
1512
+
1513
+ # Single frame or sequence-prefix latents
1514
+ if media_item_latents is not None:
1515
+ noise = randn_tensor(
1516
+ media_item_latents.shape,
1517
+ generator=generator,
1518
+ device=media_item_latents.device,
1519
+ dtype=media_item_latents.dtype,
1520
+ )
1521
+
1522
+ media_item_latents = torch.lerp(
1523
+ noise, media_item_latents, strength
1524
+ )
1525
+
1526
+ # Patchify the extra conditioning latents and calculate their pixel coordinates
1527
+ media_item_latents, latent_coords = self.patchifier.patchify(
1528
+ latents=media_item_latents
1529
+ )
1530
+ pixel_coords = latent_to_pixel_coords(
1531
+ latent_coords,
1532
+ self.vae,
1533
+ causal_fix=self.transformer.config.causal_temporal_positioning,
1534
+ )
1535
+
1536
+ # Update the frame numbers to match the target frame number
1537
+ pixel_coords[:, 0] += media_frame_number
1538
+ extra_conditioning_num_latents += media_item_latents.shape[1]
1539
+
1540
+ conditioning_mask = torch.full(
1541
+ media_item_latents.shape[:2],
1542
+ strength,
1543
+ dtype=torch.float32,
1544
+ device=init_latents.device,
1545
+ )
1546
+
1547
+ extra_conditioning_latents.append(media_item_latents)
1548
+ extra_conditioning_pixel_coords.append(pixel_coords)
1549
+ extra_conditioning_mask.append(conditioning_mask)
1550
+
1551
+ # Patchify the updated latents and calculate their pixel coordinates
1552
+ init_latents, init_latent_coords = self.patchifier.patchify(
1553
+ latents=init_latents
1554
+ )
1555
+ init_pixel_coords = latent_to_pixel_coords(
1556
+ init_latent_coords,
1557
+ self.vae,
1558
+ causal_fix=self.transformer.config.causal_temporal_positioning,
1559
+ )
1560
+
1561
+ if not conditioning_items:
1562
+ return init_latents, init_pixel_coords, None, 0
1563
+
1564
+ init_conditioning_mask, _ = self.patchifier.patchify(
1565
+ latents=init_conditioning_mask.unsqueeze(1)
1566
+ )
1567
+ init_conditioning_mask = init_conditioning_mask.squeeze(-1)
1568
+
1569
+ if extra_conditioning_latents:
1570
+ # Stack the extra conditioning latents, pixel coordinates and mask
1571
+ init_latents = torch.cat([*extra_conditioning_latents, init_latents], dim=1)
1572
+ init_pixel_coords = torch.cat(
1573
+ [*extra_conditioning_pixel_coords, init_pixel_coords], dim=2
1574
+ )
1575
+ init_conditioning_mask = torch.cat(
1576
+ [*extra_conditioning_mask, init_conditioning_mask], dim=1
1577
+ )
1578
+
1579
+ if self.transformer.use_tpu_flash_attention:
1580
+ # When flash attention is used, keep the original number of tokens by removing
1581
+ # tokens from the end.
1582
+ init_latents = init_latents[:, :-extra_conditioning_num_latents]
1583
+ init_pixel_coords = init_pixel_coords[
1584
+ :, :, :-extra_conditioning_num_latents
1585
+ ]
1586
+ init_conditioning_mask = init_conditioning_mask[
1587
+ :, :-extra_conditioning_num_latents
1588
+ ]
1589
+
1590
+ return (
1591
+ init_latents,
1592
+ init_pixel_coords,
1593
+ init_conditioning_mask,
1594
+ extra_conditioning_num_latents,
1595
+ )
1596
+
1597
+ @staticmethod
1598
+ def _resize_conditioning_item(
1599
+ conditioning_item: ConditioningItem,
1600
+ height: int,
1601
+ width: int,
1602
+ ):
1603
+ if conditioning_item.media_x or conditioning_item.media_y:
1604
+ raise ValueError(
1605
+ "Provide media_item in the target size for spatial conditioning."
1606
+ )
1607
+ new_conditioning_item = copy.copy(conditioning_item)
1608
+ new_conditioning_item.media_item = LTXVideoPipeline.resize_tensor(
1609
+ conditioning_item.media_item, height, width
1610
+ )
1611
+ return new_conditioning_item
1612
+
1613
+ def _get_latent_spatial_position(
1614
+ self,
1615
+ latents: torch.Tensor,
1616
+ conditioning_item: ConditioningItem,
1617
+ height: int,
1618
+ width: int,
1619
+ strip_latent_border,
1620
+ ):
1621
+ """
1622
+ Get the spatial position of the conditioning item in the latent space.
1623
+ If requested, strip the conditioning latent borders that do not align with target borders.
1624
+ (border latents look different then other latents and might confuse the model)
1625
+ """
1626
+ scale = self.vae_scale_factor
1627
+ h, w = conditioning_item.media_item.shape[-2:]
1628
+ assert (
1629
+ h <= height and w <= width
1630
+ ), f"Conditioning item size {h}x{w} is larger than target size {height}x{width}"
1631
+ assert h % scale == 0 and w % scale == 0
1632
+
1633
+ # Compute the start and end spatial positions of the media item
1634
+ x_start, y_start = conditioning_item.media_x, conditioning_item.media_y
1635
+ x_start = (width - w) // 2 if x_start is None else x_start
1636
+ y_start = (height - h) // 2 if y_start is None else y_start
1637
+ x_end, y_end = x_start + w, y_start + h
1638
+ assert (
1639
+ x_end <= width and y_end <= height
1640
+ ), f"Conditioning item {x_start}:{x_end}x{y_start}:{y_end} is out of bounds for target size {width}x{height}"
1641
+
1642
+ if strip_latent_border:
1643
+ # Strip one latent from left/right and/or top/bottom, update x, y accordingly
1644
+ if x_start > 0:
1645
+ x_start += scale
1646
+ latents = latents[:, :, :, :, 1:]
1647
+
1648
+ if y_start > 0:
1649
+ y_start += scale
1650
+ latents = latents[:, :, :, 1:, :]
1651
+
1652
+ if x_end < width:
1653
+ latents = latents[:, :, :, :, :-1]
1654
+
1655
+ if y_end < height:
1656
+ latents = latents[:, :, :, :-1, :]
1657
+
1658
+ return latents, x_start // scale, y_start // scale
1659
+
1660
+ @staticmethod
1661
+ def _handle_non_first_conditioning_sequence(
1662
+ init_latents: torch.Tensor,
1663
+ init_conditioning_mask: torch.Tensor,
1664
+ latents: torch.Tensor,
1665
+ media_frame_number: int,
1666
+ strength: float,
1667
+ num_prefix_latent_frames: int = 2,
1668
+ prefix_latents_mode: str = "concat",
1669
+ prefix_soft_conditioning_strength: float = 0.15,
1670
+ ):
1671
+ """
1672
+ Special handling for a conditioning sequence that does not start on the first frame.
1673
+ The special handling is required to allow a short encoded video to be used as middle
1674
+ (or last) sequence in a longer video.
1675
+ Args:
1676
+ init_latents (torch.Tensor): The initial noise latents to be updated.
1677
+ init_conditioning_mask (torch.Tensor): The initial conditioning mask to be updated.
1678
+ latents (torch.Tensor): The encoded conditioning item.
1679
+ media_frame_number (int): The target frame number of the first frame in the conditioning sequence.
1680
+ strength (float): The conditioning strength for the conditioning latents.
1681
+ num_prefix_latent_frames (int, optional): The length of the sequence prefix, to be handled
1682
+ separately. Defaults to 2.
1683
+ prefix_latents_mode (str, optional): Special treatment for prefix (boundary) latents.
1684
+ - "drop": Drop the prefix latents.
1685
+ - "soft": Use the prefix latents, but with soft-conditioning
1686
+ - "concat": Add the prefix latents as extra tokens (like single frames)
1687
+ prefix_soft_conditioning_strength (float, optional): The strength of the soft-conditioning for
1688
+ the prefix latents, relevant if `prefix_latents_mode` is "soft". Defaults to 0.1.
1689
+
1690
+ """
1691
+ f_l = latents.shape[2]
1692
+ f_l_p = num_prefix_latent_frames
1693
+ assert f_l >= f_l_p
1694
+ assert media_frame_number % 8 == 0
1695
+ if f_l > f_l_p:
1696
+ # Insert the conditioning latents **excluding the prefix** into the sequence
1697
+ f_l_start = media_frame_number // 8 + f_l_p
1698
+ f_l_end = f_l_start + f_l - f_l_p
1699
+ init_latents[:, :, f_l_start:f_l_end] = torch.lerp(
1700
+ init_latents[:, :, f_l_start:f_l_end],
1701
+ latents[:, :, f_l_p:],
1702
+ strength,
1703
+ )
1704
+ # Mark these latent frames as conditioning latents
1705
+ init_conditioning_mask[:, f_l_start:f_l_end] = strength
1706
+
1707
+ # Handle the prefix-latents
1708
+ if prefix_latents_mode == "soft":
1709
+ if f_l_p > 1:
1710
+ # Drop the first (single-frame) latent and soft-condition the remaining prefix
1711
+ f_l_start = media_frame_number // 8 + 1
1712
+ f_l_end = f_l_start + f_l_p - 1
1713
+ strength = min(prefix_soft_conditioning_strength, strength)
1714
+ init_latents[:, :, f_l_start:f_l_end] = torch.lerp(
1715
+ init_latents[:, :, f_l_start:f_l_end],
1716
+ latents[:, :, 1:f_l_p],
1717
+ strength,
1718
+ )
1719
+ # Mark these latent frames as conditioning latents
1720
+ init_conditioning_mask[:, f_l_start:f_l_end] = strength
1721
+ latents = None # No more latents to handle
1722
+ elif prefix_latents_mode == "drop":
1723
+ # Drop the prefix latents
1724
+ latents = None
1725
+ elif prefix_latents_mode == "concat":
1726
+ # Pass-on the prefix latents to be handled as extra conditioning frames
1727
+ latents = latents[:, :, :f_l_p]
1728
+ else:
1729
+ raise ValueError(f"Invalid prefix_latents_mode: {prefix_latents_mode}")
1730
+ return (
1731
+ init_latents,
1732
+ init_conditioning_mask,
1733
+ latents,
1734
+ )
1735
+
1736
+ def trim_conditioning_sequence(
1737
+ self, start_frame: int, sequence_num_frames: int, target_num_frames: int
1738
+ ):
1739
+ """
1740
+ Trim a conditioning sequence to the allowed number of frames.
1741
+
1742
+ Args:
1743
+ start_frame (int): The target frame number of the first frame in the sequence.
1744
+ sequence_num_frames (int): The number of frames in the sequence.
1745
+ target_num_frames (int): The target number of frames in the generated video.
1746
+
1747
+ Returns:
1748
+ int: updated sequence length
1749
+ """
1750
+ scale_factor = self.video_scale_factor
1751
+ num_frames = min(sequence_num_frames, target_num_frames - start_frame)
1752
+ # Trim down to a multiple of temporal_scale_factor frames plus 1
1753
+ num_frames = (num_frames - 1) // scale_factor * scale_factor + 1
1754
+ return num_frames
1755
+
1756
+ @staticmethod
1757
+ def tone_map_latents(
1758
+ latents: torch.Tensor,
1759
+ compression: float,
1760
+ ) -> torch.Tensor:
1761
+ """
1762
+ Applies a non-linear tone-mapping function to latent values to reduce their dynamic range
1763
+ in a perceptually smooth way using a sigmoid-based compression.
1764
+
1765
+ This is useful for regularizing high-variance latents or for conditioning outputs
1766
+ during generation, especially when controlling dynamic behavior with a `compression` factor.
1767
+
1768
+ Parameters:
1769
+ ----------
1770
+ latents : torch.Tensor
1771
+ Input latent tensor with arbitrary shape. Expected to be roughly in [-1, 1] or [0, 1] range.
1772
+ compression : float
1773
+ Compression strength in the range [0, 1].
1774
+ - 0.0: No tone-mapping (identity transform)
1775
+ - 1.0: Full compression effect
1776
+
1777
+ Returns:
1778
+ -------
1779
+ torch.Tensor
1780
+ The tone-mapped latent tensor of the same shape as input.
1781
+ """
1782
+ if not (0 <= compression <= 1):
1783
+ raise ValueError("Compression must be in the range [0, 1]")
1784
+
1785
+ # Remap [0-1] to [0-0.75] and apply sigmoid compression in one shot
1786
+ scale_factor = compression * 0.75
1787
+ abs_latents = torch.abs(latents)
1788
+
1789
+ # Sigmoid compression: sigmoid shifts large values toward 0.2, small values stay ~1.0
1790
+ # When scale_factor=0, sigmoid term vanishes, when scale_factor=0.75, full effect
1791
+ sigmoid_term = torch.sigmoid(4.0 * scale_factor * (abs_latents - 1.0))
1792
+ scales = 1.0 - 0.8 * scale_factor * sigmoid_term
1793
+
1794
+ filtered = latents * scales
1795
+ return filtered
1796
+
1797
+
1798
+ def adain_filter_latent(
1799
+ latents: torch.Tensor, reference_latents: torch.Tensor, factor=1.0
1800
+ ):
1801
+ """
1802
+ Applies Adaptive Instance Normalization (AdaIN) to a latent tensor based on
1803
+ statistics from a reference latent tensor.
1804
+
1805
+ Args:
1806
+ latent (torch.Tensor): Input latents to normalize
1807
+ reference_latent (torch.Tensor): The reference latents providing style statistics.
1808
+ factor (float): Blending factor between original and transformed latent.
1809
+ Range: -10.0 to 10.0, Default: 1.0
1810
+
1811
+ Returns:
1812
+ torch.Tensor: The transformed latent tensor
1813
+ """
1814
+ result = latents.clone()
1815
+
1816
+ for i in range(latents.size(0)):
1817
+ for c in range(latents.size(1)):
1818
+ r_sd, r_mean = torch.std_mean(
1819
+ reference_latents[i, c], dim=None
1820
+ ) # index by original dim order
1821
+ i_sd, i_mean = torch.std_mean(result[i, c], dim=None)
1822
+
1823
+ result[i, c] = ((result[i, c] - i_mean) / i_sd) * r_sd + r_mean
1824
+
1825
+ result = torch.lerp(latents, result, factor)
1826
+ return result
1827
+
1828
+
1829
+ class LTXMultiScalePipeline:
1830
+ def _upsample_latents(
1831
+ self, latest_upsampler: LatentUpsampler, latents: torch.Tensor
1832
+ ):
1833
+ assert latents.device == latest_upsampler.device
1834
+
1835
+ latents = un_normalize_latents(
1836
+ latents, self.vae, vae_per_channel_normalize=True
1837
+ )
1838
+ upsampled_latents = latest_upsampler(latents)
1839
+ upsampled_latents = normalize_latents(
1840
+ upsampled_latents, self.vae, vae_per_channel_normalize=True
1841
+ )
1842
+ return upsampled_latents
1843
+
1844
+ def __init__(
1845
+ self, video_pipeline: LTXVideoPipeline, latent_upsampler: LatentUpsampler
1846
+ ):
1847
+ self.video_pipeline = video_pipeline
1848
+ self.vae = video_pipeline.vae
1849
+ self.latent_upsampler = latent_upsampler
1850
+
1851
+ def __call__(
1852
+ self,
1853
+ downscale_factor: float,
1854
+ first_pass: dict,
1855
+ second_pass: dict,
1856
+ *args: Any,
1857
+ **kwargs: Any,
1858
+ ) -> Any:
1859
+ original_kwargs = kwargs.copy()
1860
+ original_output_type = kwargs["output_type"]
1861
+ original_width = kwargs["width"]
1862
+ original_height = kwargs["height"]
1863
+
1864
+ x_width = int(kwargs["width"] * downscale_factor)
1865
+ downscaled_width = x_width - (x_width % self.video_pipeline.vae_scale_factor)
1866
+ x_height = int(kwargs["height"] * downscale_factor)
1867
+ downscaled_height = x_height - (x_height % self.video_pipeline.vae_scale_factor)
1868
+
1869
+ kwargs["output_type"] = "latent"
1870
+ kwargs["width"] = downscaled_width
1871
+ kwargs["height"] = downscaled_height
1872
+ kwargs.update(**first_pass)
1873
+ result = self.video_pipeline(*args, **kwargs)
1874
+ latents = result.images
1875
+
1876
+ upsampled_latents = self._upsample_latents(self.latent_upsampler, latents)
1877
+ upsampled_latents = adain_filter_latent(
1878
+ latents=upsampled_latents, reference_latents=latents
1879
+ )
1880
+
1881
+ kwargs = original_kwargs
1882
+
1883
+ kwargs["latents"] = upsampled_latents
1884
+ kwargs["output_type"] = original_output_type
1885
+ kwargs["width"] = downscaled_width * 2
1886
+ kwargs["height"] = downscaled_height * 2
1887
+ kwargs.update(**second_pass)
1888
+
1889
+ result = self.video_pipeline(*args, **kwargs)
1890
+ if original_output_type != "latent":
1891
+ num_frames = result.images.shape[2]
1892
+ videos = rearrange(result.images, "b c f h w -> (b f) c h w")
1893
+
1894
+ videos = F.interpolate(
1895
+ videos,
1896
+ size=(original_height, original_width),
1897
+ mode="bilinear",
1898
+ align_corners=False,
1899
+ )
1900
+ videos = rearrange(videos, "(b f) c h w -> b c f h w", f=num_frames)
1901
+ result.images = videos
1902
+
1903
+ return result
ltx_video/schedulers/__init__.py ADDED
File without changes
ltx_video/schedulers/rf.py ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from abc import ABC, abstractmethod
3
+ from dataclasses import dataclass
4
+ from typing import Callable, Optional, Tuple, Union
5
+ import json
6
+ import os
7
+ from pathlib import Path
8
+
9
+ import torch
10
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
11
+ from diffusers.schedulers.scheduling_utils import SchedulerMixin
12
+ from diffusers.utils import BaseOutput
13
+ from torch import Tensor
14
+ from safetensors import safe_open
15
+
16
+
17
+ from ltx_video.utils.torch_utils import append_dims
18
+
19
+ from ltx_video.utils.diffusers_config_mapping import (
20
+ diffusers_and_ours_config_mapping,
21
+ make_hashable_key,
22
+ )
23
+
24
+
25
+ def linear_quadratic_schedule(num_steps, threshold_noise=0.025, linear_steps=None):
26
+ if num_steps == 1:
27
+ return torch.tensor([1.0])
28
+ if linear_steps is None:
29
+ linear_steps = num_steps // 2
30
+ linear_sigma_schedule = [
31
+ i * threshold_noise / linear_steps for i in range(linear_steps)
32
+ ]
33
+ threshold_noise_step_diff = linear_steps - threshold_noise * num_steps
34
+ quadratic_steps = num_steps - linear_steps
35
+ quadratic_coef = threshold_noise_step_diff / (linear_steps * quadratic_steps**2)
36
+ linear_coef = threshold_noise / linear_steps - 2 * threshold_noise_step_diff / (
37
+ quadratic_steps**2
38
+ )
39
+ const = quadratic_coef * (linear_steps**2)
40
+ quadratic_sigma_schedule = [
41
+ quadratic_coef * (i**2) + linear_coef * i + const
42
+ for i in range(linear_steps, num_steps)
43
+ ]
44
+ sigma_schedule = linear_sigma_schedule + quadratic_sigma_schedule + [1.0]
45
+ sigma_schedule = [1.0 - x for x in sigma_schedule]
46
+ return torch.tensor(sigma_schedule[:-1])
47
+
48
+
49
+ def simple_diffusion_resolution_dependent_timestep_shift(
50
+ samples_shape: torch.Size,
51
+ timesteps: Tensor,
52
+ n: int = 32 * 32,
53
+ ) -> Tensor:
54
+ if len(samples_shape) == 3:
55
+ _, m, _ = samples_shape
56
+ elif len(samples_shape) in [4, 5]:
57
+ m = math.prod(samples_shape[2:])
58
+ else:
59
+ raise ValueError(
60
+ "Samples must have shape (b, t, c), (b, c, h, w) or (b, c, f, h, w)"
61
+ )
62
+ snr = (timesteps / (1 - timesteps)) ** 2
63
+ shift_snr = torch.log(snr) + 2 * math.log(m / n)
64
+ shifted_timesteps = torch.sigmoid(0.5 * shift_snr)
65
+
66
+ return shifted_timesteps
67
+
68
+
69
+ def time_shift(mu: float, sigma: float, t: Tensor):
70
+ return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
71
+
72
+
73
+ def get_normal_shift(
74
+ n_tokens: int,
75
+ min_tokens: int = 1024,
76
+ max_tokens: int = 4096,
77
+ min_shift: float = 0.95,
78
+ max_shift: float = 2.05,
79
+ ) -> Callable[[float], float]:
80
+ m = (max_shift - min_shift) / (max_tokens - min_tokens)
81
+ b = min_shift - m * min_tokens
82
+ return m * n_tokens + b
83
+
84
+
85
+ def strech_shifts_to_terminal(shifts: Tensor, terminal=0.1):
86
+ """
87
+ Stretch a function (given as sampled shifts) so that its final value matches the given terminal value
88
+ using the provided formula.
89
+
90
+ Parameters:
91
+ - shifts (Tensor): The samples of the function to be stretched (PyTorch Tensor).
92
+ - terminal (float): The desired terminal value (value at the last sample).
93
+
94
+ Returns:
95
+ - Tensor: The stretched shifts such that the final value equals `terminal`.
96
+ """
97
+ if shifts.numel() == 0:
98
+ raise ValueError("The 'shifts' tensor must not be empty.")
99
+
100
+ # Ensure terminal value is valid
101
+ if terminal <= 0 or terminal >= 1:
102
+ raise ValueError("The terminal value must be between 0 and 1 (exclusive).")
103
+
104
+ # Transform the shifts using the given formula
105
+ one_minus_z = 1 - shifts
106
+ scale_factor = one_minus_z[-1] / (1 - terminal)
107
+ stretched_shifts = 1 - (one_minus_z / scale_factor)
108
+
109
+ return stretched_shifts
110
+
111
+
112
+ def sd3_resolution_dependent_timestep_shift(
113
+ samples_shape: torch.Size,
114
+ timesteps: Tensor,
115
+ target_shift_terminal: Optional[float] = None,
116
+ ) -> Tensor:
117
+ """
118
+ Shifts the timestep schedule as a function of the generated resolution.
119
+
120
+ In the SD3 paper, the authors empirically how to shift the timesteps based on the resolution of the target images.
121
+ For more details: https://arxiv.org/pdf/2403.03206
122
+
123
+ In Flux they later propose a more dynamic resolution dependent timestep shift, see:
124
+ https://github.com/black-forest-labs/flux/blob/87f6fff727a377ea1c378af692afb41ae84cbe04/src/flux/sampling.py#L66
125
+
126
+
127
+ Args:
128
+ samples_shape (torch.Size): The samples batch shape (batch_size, channels, height, width) or
129
+ (batch_size, channels, frame, height, width).
130
+ timesteps (Tensor): A batch of timesteps with shape (batch_size,).
131
+ target_shift_terminal (float): The target terminal value for the shifted timesteps.
132
+
133
+ Returns:
134
+ Tensor: The shifted timesteps.
135
+ """
136
+ if len(samples_shape) == 3:
137
+ _, m, _ = samples_shape
138
+ elif len(samples_shape) in [4, 5]:
139
+ m = math.prod(samples_shape[2:])
140
+ else:
141
+ raise ValueError(
142
+ "Samples must have shape (b, t, c), (b, c, h, w) or (b, c, f, h, w)"
143
+ )
144
+
145
+ shift = get_normal_shift(m)
146
+ time_shifts = time_shift(shift, 1, timesteps)
147
+ if target_shift_terminal is not None: # Stretch the shifts to the target terminal
148
+ time_shifts = strech_shifts_to_terminal(time_shifts, target_shift_terminal)
149
+ return time_shifts
150
+
151
+
152
+ class TimestepShifter(ABC):
153
+ @abstractmethod
154
+ def shift_timesteps(self, samples_shape: torch.Size, timesteps: Tensor) -> Tensor:
155
+ pass
156
+
157
+
158
+ @dataclass
159
+ class RectifiedFlowSchedulerOutput(BaseOutput):
160
+ """
161
+ Output class for the scheduler's step function output.
162
+
163
+ Args:
164
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
165
+ Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
166
+ denoising loop.
167
+ pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
168
+ The predicted denoised sample (x_{0}) based on the model output from the current timestep.
169
+ `pred_original_sample` can be used to preview progress or for guidance.
170
+ """
171
+
172
+ prev_sample: torch.FloatTensor
173
+ pred_original_sample: Optional[torch.FloatTensor] = None
174
+
175
+
176
+ class RectifiedFlowScheduler(SchedulerMixin, ConfigMixin, TimestepShifter):
177
+ order = 1
178
+
179
+ @register_to_config
180
+ def __init__(
181
+ self,
182
+ num_train_timesteps=1000,
183
+ shifting: Optional[str] = None,
184
+ base_resolution: int = 32**2,
185
+ target_shift_terminal: Optional[float] = None,
186
+ sampler: Optional[str] = "Uniform",
187
+ shift: Optional[float] = None,
188
+ ):
189
+ super().__init__()
190
+ self.init_noise_sigma = 1.0
191
+ self.num_inference_steps = None
192
+ self.sampler = sampler
193
+ self.shifting = shifting
194
+ self.base_resolution = base_resolution
195
+ self.target_shift_terminal = target_shift_terminal
196
+ self.timesteps = self.sigmas = self.get_initial_timesteps(
197
+ num_train_timesteps, shift=shift
198
+ )
199
+ self.shift = shift
200
+
201
+ def get_initial_timesteps(
202
+ self, num_timesteps: int, shift: Optional[float] = None
203
+ ) -> Tensor:
204
+ if self.sampler == "Uniform":
205
+ return torch.linspace(1, 1 / num_timesteps, num_timesteps)
206
+ elif self.sampler == "LinearQuadratic":
207
+ return linear_quadratic_schedule(num_timesteps)
208
+ elif self.sampler == "Constant":
209
+ assert (
210
+ shift is not None
211
+ ), "Shift must be provided for constant time shift sampler."
212
+ return time_shift(
213
+ shift, 1, torch.linspace(1, 1 / num_timesteps, num_timesteps)
214
+ )
215
+
216
+ def shift_timesteps(self, samples_shape: torch.Size, timesteps: Tensor) -> Tensor:
217
+ if self.shifting == "SD3":
218
+ return sd3_resolution_dependent_timestep_shift(
219
+ samples_shape, timesteps, self.target_shift_terminal
220
+ )
221
+ elif self.shifting == "SimpleDiffusion":
222
+ return simple_diffusion_resolution_dependent_timestep_shift(
223
+ samples_shape, timesteps, self.base_resolution
224
+ )
225
+ return timesteps
226
+
227
+ def set_timesteps(
228
+ self,
229
+ num_inference_steps: Optional[int] = None,
230
+ samples_shape: Optional[torch.Size] = None,
231
+ timesteps: Optional[Tensor] = None,
232
+ device: Union[str, torch.device] = None,
233
+ ):
234
+ """
235
+ Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
236
+ If `timesteps` are provided, they will be used instead of the scheduled timesteps.
237
+
238
+ Args:
239
+ num_inference_steps (`int` *optional*): The number of diffusion steps used when generating samples.
240
+ samples_shape (`torch.Size` *optional*): The samples batch shape, used for shifting.
241
+ timesteps ('torch.Tensor' *optional*): Specific timesteps to use instead of scheduled timesteps.
242
+ device (`Union[str, torch.device]`, *optional*): The device to which the timesteps tensor will be moved.
243
+ """
244
+ if timesteps is not None and num_inference_steps is not None:
245
+ raise ValueError(
246
+ "You cannot provide both `timesteps` and `num_inference_steps`."
247
+ )
248
+ if timesteps is None:
249
+ num_inference_steps = min(
250
+ self.config.num_train_timesteps, num_inference_steps
251
+ )
252
+ timesteps = self.get_initial_timesteps(
253
+ num_inference_steps, shift=self.shift
254
+ ).to(device)
255
+ timesteps = self.shift_timesteps(samples_shape, timesteps)
256
+ else:
257
+ timesteps = torch.Tensor(timesteps).to(device)
258
+ num_inference_steps = len(timesteps)
259
+ self.timesteps = timesteps
260
+ self.num_inference_steps = num_inference_steps
261
+ self.sigmas = self.timesteps
262
+
263
+ @staticmethod
264
+ def from_pretrained(pretrained_model_path: Union[str, os.PathLike]):
265
+ pretrained_model_path = Path(pretrained_model_path)
266
+ if pretrained_model_path.is_file():
267
+ comfy_single_file_state_dict = {}
268
+ with safe_open(pretrained_model_path, framework="pt", device="cpu") as f:
269
+ metadata = f.metadata()
270
+ for k in f.keys():
271
+ comfy_single_file_state_dict[k] = f.get_tensor(k)
272
+ configs = json.loads(metadata["config"])
273
+ config = configs["scheduler"]
274
+ del comfy_single_file_state_dict
275
+
276
+ elif pretrained_model_path.is_dir():
277
+ diffusers_noise_scheduler_config_path = (
278
+ pretrained_model_path / "scheduler" / "scheduler_config.json"
279
+ )
280
+
281
+ with open(diffusers_noise_scheduler_config_path, "r") as f:
282
+ scheduler_config = json.load(f)
283
+ hashable_config = make_hashable_key(scheduler_config)
284
+ if hashable_config in diffusers_and_ours_config_mapping:
285
+ config = diffusers_and_ours_config_mapping[hashable_config]
286
+ return RectifiedFlowScheduler.from_config(config)
287
+
288
+ def scale_model_input(
289
+ self, sample: torch.FloatTensor, timestep: Optional[int] = None
290
+ ) -> torch.FloatTensor:
291
+ # pylint: disable=unused-argument
292
+ """
293
+ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
294
+ current timestep.
295
+
296
+ Args:
297
+ sample (`torch.FloatTensor`): input sample
298
+ timestep (`int`, optional): current timestep
299
+
300
+ Returns:
301
+ `torch.FloatTensor`: scaled input sample
302
+ """
303
+ return sample
304
+
305
+ def step(
306
+ self,
307
+ model_output: torch.FloatTensor,
308
+ timestep: torch.FloatTensor,
309
+ sample: torch.FloatTensor,
310
+ return_dict: bool = True,
311
+ stochastic_sampling: Optional[bool] = False,
312
+ **kwargs,
313
+ ) -> Union[RectifiedFlowSchedulerOutput, Tuple]:
314
+ """
315
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
316
+ process from the learned model outputs (most often the predicted noise).
317
+ z_{t_1} = z_t - Delta_t * v
318
+ The method finds the next timestep that is lower than the input timestep(s) and denoises the latents
319
+ to that level. The input timestep(s) are not required to be one of the predefined timesteps.
320
+
321
+ Args:
322
+ model_output (`torch.FloatTensor`):
323
+ The direct output from learned diffusion model - the velocity,
324
+ timestep (`float`):
325
+ The current discrete timestep in the diffusion chain (global or per-token).
326
+ sample (`torch.FloatTensor`):
327
+ A current latent tokens to be de-noised.
328
+ return_dict (`bool`, *optional*, defaults to `True`):
329
+ Whether or not to return a [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] or `tuple`.
330
+ stochastic_sampling (`bool`, *optional*, defaults to `False`):
331
+ Whether to use stochastic sampling for the sampling process.
332
+
333
+ Returns:
334
+ [`~schedulers.scheduling_utils.RectifiedFlowSchedulerOutput`] or `tuple`:
335
+ If return_dict is `True`, [`~schedulers.rf_scheduler.RectifiedFlowSchedulerOutput`] is returned,
336
+ otherwise a tuple is returned where the first element is the sample tensor.
337
+ """
338
+ if self.num_inference_steps is None:
339
+ raise ValueError(
340
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
341
+ )
342
+ t_eps = 1e-6 # Small epsilon to avoid numerical issues in timestep values
343
+
344
+ timesteps_padded = torch.cat(
345
+ [self.timesteps, torch.zeros(1, device=self.timesteps.device)]
346
+ )
347
+
348
+ # Find the next lower timestep(s) and compute the dt from the current timestep(s)
349
+ if timestep.ndim == 0:
350
+ # Global timestep case
351
+ lower_mask = timesteps_padded < timestep - t_eps
352
+ lower_timestep = timesteps_padded[lower_mask][0] # Closest lower timestep
353
+ dt = timestep - lower_timestep
354
+
355
+ else:
356
+ # Per-token case
357
+ assert timestep.ndim == 2
358
+ lower_mask = timesteps_padded[:, None, None] < timestep[None] - t_eps
359
+ lower_timestep = lower_mask * timesteps_padded[:, None, None]
360
+ lower_timestep, _ = lower_timestep.max(dim=0)
361
+ dt = (timestep - lower_timestep)[..., None]
362
+
363
+ # Compute previous sample
364
+ if stochastic_sampling:
365
+ x0 = sample - timestep[..., None] * model_output
366
+ next_timestep = timestep[..., None] - dt
367
+ prev_sample = self.add_noise(x0, torch.randn_like(sample), next_timestep)
368
+ else:
369
+ prev_sample = sample - dt * model_output
370
+
371
+ if not return_dict:
372
+ return (prev_sample,)
373
+
374
+ return RectifiedFlowSchedulerOutput(prev_sample=prev_sample)
375
+
376
+ def add_noise(
377
+ self,
378
+ original_samples: torch.FloatTensor,
379
+ noise: torch.FloatTensor,
380
+ timesteps: torch.FloatTensor,
381
+ ) -> torch.FloatTensor:
382
+ sigmas = timesteps
383
+ sigmas = append_dims(sigmas, original_samples.ndim)
384
+ alphas = 1 - sigmas
385
+ noisy_samples = alphas * original_samples + sigmas * noise
386
+ return noisy_samples
ltx_video/utils/__init__.py ADDED
File without changes
ltx_video/utils/diffusers_config_mapping.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def make_hashable_key(dict_key):
2
+ def convert_value(value):
3
+ if isinstance(value, list):
4
+ return tuple(value)
5
+ elif isinstance(value, dict):
6
+ return tuple(sorted((k, convert_value(v)) for k, v in value.items()))
7
+ else:
8
+ return value
9
+
10
+ return tuple(sorted((k, convert_value(v)) for k, v in dict_key.items()))
11
+
12
+
13
+ DIFFUSERS_SCHEDULER_CONFIG = {
14
+ "_class_name": "FlowMatchEulerDiscreteScheduler",
15
+ "_diffusers_version": "0.32.0.dev0",
16
+ "base_image_seq_len": 1024,
17
+ "base_shift": 0.95,
18
+ "invert_sigmas": False,
19
+ "max_image_seq_len": 4096,
20
+ "max_shift": 2.05,
21
+ "num_train_timesteps": 1000,
22
+ "shift": 1.0,
23
+ "shift_terminal": 0.1,
24
+ "use_beta_sigmas": False,
25
+ "use_dynamic_shifting": True,
26
+ "use_exponential_sigmas": False,
27
+ "use_karras_sigmas": False,
28
+ }
29
+ DIFFUSERS_TRANSFORMER_CONFIG = {
30
+ "_class_name": "LTXVideoTransformer3DModel",
31
+ "_diffusers_version": "0.32.0.dev0",
32
+ "activation_fn": "gelu-approximate",
33
+ "attention_bias": True,
34
+ "attention_head_dim": 64,
35
+ "attention_out_bias": True,
36
+ "caption_channels": 4096,
37
+ "cross_attention_dim": 2048,
38
+ "in_channels": 128,
39
+ "norm_elementwise_affine": False,
40
+ "norm_eps": 1e-06,
41
+ "num_attention_heads": 32,
42
+ "num_layers": 28,
43
+ "out_channels": 128,
44
+ "patch_size": 1,
45
+ "patch_size_t": 1,
46
+ "qk_norm": "rms_norm_across_heads",
47
+ }
48
+ DIFFUSERS_VAE_CONFIG = {
49
+ "_class_name": "AutoencoderKLLTXVideo",
50
+ "_diffusers_version": "0.32.0.dev0",
51
+ "block_out_channels": [128, 256, 512, 512],
52
+ "decoder_causal": False,
53
+ "encoder_causal": True,
54
+ "in_channels": 3,
55
+ "latent_channels": 128,
56
+ "layers_per_block": [4, 3, 3, 3, 4],
57
+ "out_channels": 3,
58
+ "patch_size": 4,
59
+ "patch_size_t": 1,
60
+ "resnet_norm_eps": 1e-06,
61
+ "scaling_factor": 1.0,
62
+ "spatio_temporal_scaling": [True, True, True, False],
63
+ }
64
+
65
+ OURS_SCHEDULER_CONFIG = {
66
+ "_class_name": "RectifiedFlowScheduler",
67
+ "_diffusers_version": "0.25.1",
68
+ "num_train_timesteps": 1000,
69
+ "shifting": "SD3",
70
+ "base_resolution": None,
71
+ "target_shift_terminal": 0.1,
72
+ }
73
+
74
+ OURS_TRANSFORMER_CONFIG = {
75
+ "_class_name": "Transformer3DModel",
76
+ "_diffusers_version": "0.25.1",
77
+ "_name_or_path": "PixArt-alpha/PixArt-XL-2-256x256",
78
+ "activation_fn": "gelu-approximate",
79
+ "attention_bias": True,
80
+ "attention_head_dim": 64,
81
+ "attention_type": "default",
82
+ "caption_channels": 4096,
83
+ "cross_attention_dim": 2048,
84
+ "double_self_attention": False,
85
+ "dropout": 0.0,
86
+ "in_channels": 128,
87
+ "norm_elementwise_affine": False,
88
+ "norm_eps": 1e-06,
89
+ "norm_num_groups": 32,
90
+ "num_attention_heads": 32,
91
+ "num_embeds_ada_norm": 1000,
92
+ "num_layers": 28,
93
+ "num_vector_embeds": None,
94
+ "only_cross_attention": False,
95
+ "out_channels": 128,
96
+ "project_to_2d_pos": True,
97
+ "upcast_attention": False,
98
+ "use_linear_projection": False,
99
+ "qk_norm": "rms_norm",
100
+ "standardization_norm": "rms_norm",
101
+ "positional_embedding_type": "rope",
102
+ "positional_embedding_theta": 10000.0,
103
+ "positional_embedding_max_pos": [20, 2048, 2048],
104
+ "timestep_scale_multiplier": 1000,
105
+ }
106
+ OURS_VAE_CONFIG = {
107
+ "_class_name": "CausalVideoAutoencoder",
108
+ "dims": 3,
109
+ "in_channels": 3,
110
+ "out_channels": 3,
111
+ "latent_channels": 128,
112
+ "blocks": [
113
+ ["res_x", 4],
114
+ ["compress_all", 1],
115
+ ["res_x_y", 1],
116
+ ["res_x", 3],
117
+ ["compress_all", 1],
118
+ ["res_x_y", 1],
119
+ ["res_x", 3],
120
+ ["compress_all", 1],
121
+ ["res_x", 3],
122
+ ["res_x", 4],
123
+ ],
124
+ "scaling_factor": 1.0,
125
+ "norm_layer": "pixel_norm",
126
+ "patch_size": 4,
127
+ "latent_log_var": "uniform",
128
+ "use_quant_conv": False,
129
+ "causal_decoder": False,
130
+ }
131
+
132
+
133
+ diffusers_and_ours_config_mapping = {
134
+ make_hashable_key(DIFFUSERS_SCHEDULER_CONFIG): OURS_SCHEDULER_CONFIG,
135
+ make_hashable_key(DIFFUSERS_TRANSFORMER_CONFIG): OURS_TRANSFORMER_CONFIG,
136
+ make_hashable_key(DIFFUSERS_VAE_CONFIG): OURS_VAE_CONFIG,
137
+ }
138
+
139
+
140
+ TRANSFORMER_KEYS_RENAME_DICT = {
141
+ "proj_in": "patchify_proj",
142
+ "time_embed": "adaln_single",
143
+ "norm_q": "q_norm",
144
+ "norm_k": "k_norm",
145
+ }
146
+
147
+
148
+ VAE_KEYS_RENAME_DICT = {
149
+ "decoder.up_blocks.3.conv_in": "decoder.up_blocks.7",
150
+ "decoder.up_blocks.3.upsamplers.0": "decoder.up_blocks.8",
151
+ "decoder.up_blocks.3": "decoder.up_blocks.9",
152
+ "decoder.up_blocks.2.upsamplers.0": "decoder.up_blocks.5",
153
+ "decoder.up_blocks.2.conv_in": "decoder.up_blocks.4",
154
+ "decoder.up_blocks.2": "decoder.up_blocks.6",
155
+ "decoder.up_blocks.1.upsamplers.0": "decoder.up_blocks.2",
156
+ "decoder.up_blocks.1": "decoder.up_blocks.3",
157
+ "decoder.up_blocks.0": "decoder.up_blocks.1",
158
+ "decoder.mid_block": "decoder.up_blocks.0",
159
+ "encoder.down_blocks.3": "encoder.down_blocks.8",
160
+ "encoder.down_blocks.2.downsamplers.0": "encoder.down_blocks.7",
161
+ "encoder.down_blocks.2": "encoder.down_blocks.6",
162
+ "encoder.down_blocks.1.downsamplers.0": "encoder.down_blocks.4",
163
+ "encoder.down_blocks.1.conv_out": "encoder.down_blocks.5",
164
+ "encoder.down_blocks.1": "encoder.down_blocks.3",
165
+ "encoder.down_blocks.0.conv_out": "encoder.down_blocks.2",
166
+ "encoder.down_blocks.0.downsamplers.0": "encoder.down_blocks.1",
167
+ "encoder.down_blocks.0": "encoder.down_blocks.0",
168
+ "encoder.mid_block": "encoder.down_blocks.9",
169
+ "conv_shortcut.conv": "conv_shortcut",
170
+ "resnets": "res_blocks",
171
+ "norm3": "norm3.norm",
172
+ "latents_mean": "per_channel_statistics.mean-of-means",
173
+ "latents_std": "per_channel_statistics.std-of-means",
174
+ }
ltx_video/utils/prompt_enhance_utils.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Union, List, Optional
3
+
4
+ import torch
5
+ from PIL import Image
6
+
7
+ logger = logging.getLogger(__name__) # pylint: disable=invalid-name
8
+
9
+ T2V_CINEMATIC_PROMPT = """You are an expert cinematic director with many award winning movies, When writing prompts based on the user input, focus on detailed, chronological descriptions of actions and scenes.
10
+ Include specific movements, appearances, camera angles, and environmental details - all in a single flowing paragraph.
11
+ Start directly with the action, and keep descriptions literal and precise.
12
+ Think like a cinematographer describing a shot list.
13
+ Do not change the user input intent, just enhance it.
14
+ Keep within 150 words.
15
+ For best results, build your prompts using this structure:
16
+ Start with main action in a single sentence
17
+ Add specific details about movements and gestures
18
+ Describe character/object appearances precisely
19
+ Include background and environment details
20
+ Specify camera angles and movements
21
+ Describe lighting and colors
22
+ Note any changes or sudden events
23
+ Do not exceed the 150 word limit!
24
+ Output the enhanced prompt only.
25
+ """
26
+
27
+ I2V_CINEMATIC_PROMPT = """You are an expert cinematic director with many award winning movies, When writing prompts based on the user input, focus on detailed, chronological descriptions of actions and scenes.
28
+ Include specific movements, appearances, camera angles, and environmental details - all in a single flowing paragraph.
29
+ Start directly with the action, and keep descriptions literal and precise.
30
+ Think like a cinematographer describing a shot list.
31
+ Keep within 150 words.
32
+ For best results, build your prompts using this structure:
33
+ Describe the image first and then add the user input. Image description should be in first priority! Align to the image caption if it contradicts the user text input.
34
+ Start with main action in a single sentence
35
+ Add specific details about movements and gestures
36
+ Describe character/object appearances precisely
37
+ Include background and environment details
38
+ Specify camera angles and movements
39
+ Describe lighting and colors
40
+ Note any changes or sudden events
41
+ Align to the image caption if it contradicts the user text input.
42
+ Do not exceed the 150 word limit!
43
+ Output the enhanced prompt only.
44
+ """
45
+
46
+
47
+ def tensor_to_pil(tensor):
48
+ # Ensure tensor is in range [-1, 1]
49
+ assert tensor.min() >= -1 and tensor.max() <= 1
50
+
51
+ # Convert from [-1, 1] to [0, 1]
52
+ tensor = (tensor + 1) / 2
53
+
54
+ # Rearrange from [C, H, W] to [H, W, C]
55
+ tensor = tensor.permute(1, 2, 0)
56
+
57
+ # Convert to numpy array and then to uint8 range [0, 255]
58
+ numpy_image = (tensor.cpu().numpy() * 255).astype("uint8")
59
+
60
+ # Convert to PIL Image
61
+ return Image.fromarray(numpy_image)
62
+
63
+
64
+ def generate_cinematic_prompt(
65
+ image_caption_model,
66
+ image_caption_processor,
67
+ prompt_enhancer_model,
68
+ prompt_enhancer_tokenizer,
69
+ prompt: Union[str, List[str]],
70
+ conditioning_items: Optional[List] = None,
71
+ max_new_tokens: int = 256,
72
+ ) -> List[str]:
73
+ prompts = [prompt] if isinstance(prompt, str) else prompt
74
+
75
+ if conditioning_items is None:
76
+ prompts = _generate_t2v_prompt(
77
+ prompt_enhancer_model,
78
+ prompt_enhancer_tokenizer,
79
+ prompts,
80
+ max_new_tokens,
81
+ T2V_CINEMATIC_PROMPT,
82
+ )
83
+ else:
84
+ if len(conditioning_items) > 1 or conditioning_items[0].media_frame_number != 0:
85
+ logger.warning(
86
+ "prompt enhancement does only support unconditional or first frame of conditioning items, returning original prompts"
87
+ )
88
+ return prompts
89
+
90
+ first_frame_conditioning_item = conditioning_items[0]
91
+ first_frames = _get_first_frames_from_conditioning_item(
92
+ first_frame_conditioning_item
93
+ )
94
+
95
+ assert len(first_frames) == len(
96
+ prompts
97
+ ), "Number of conditioning frames must match number of prompts"
98
+
99
+ prompts = _generate_i2v_prompt(
100
+ image_caption_model,
101
+ image_caption_processor,
102
+ prompt_enhancer_model,
103
+ prompt_enhancer_tokenizer,
104
+ prompts,
105
+ first_frames,
106
+ max_new_tokens,
107
+ I2V_CINEMATIC_PROMPT,
108
+ )
109
+
110
+ return prompts
111
+
112
+
113
+ def _get_first_frames_from_conditioning_item(conditioning_item) -> List[Image.Image]:
114
+ frames_tensor = conditioning_item.media_item
115
+ return [
116
+ tensor_to_pil(frames_tensor[i, :, 0, :, :])
117
+ for i in range(frames_tensor.shape[0])
118
+ ]
119
+
120
+
121
+ def _generate_t2v_prompt(
122
+ prompt_enhancer_model,
123
+ prompt_enhancer_tokenizer,
124
+ prompts: List[str],
125
+ max_new_tokens: int,
126
+ system_prompt: str,
127
+ ) -> List[str]:
128
+ messages = [
129
+ [
130
+ {"role": "system", "content": system_prompt},
131
+ {"role": "user", "content": f"user_prompt: {p}"},
132
+ ]
133
+ for p in prompts
134
+ ]
135
+
136
+ texts = [
137
+ prompt_enhancer_tokenizer.apply_chat_template(
138
+ m, tokenize=False, add_generation_prompt=True
139
+ )
140
+ for m in messages
141
+ ]
142
+ model_inputs = prompt_enhancer_tokenizer(texts, return_tensors="pt").to(
143
+ prompt_enhancer_model.device
144
+ )
145
+
146
+ return _generate_and_decode_prompts(
147
+ prompt_enhancer_model, prompt_enhancer_tokenizer, model_inputs, max_new_tokens
148
+ )
149
+
150
+
151
+ def _generate_i2v_prompt(
152
+ image_caption_model,
153
+ image_caption_processor,
154
+ prompt_enhancer_model,
155
+ prompt_enhancer_tokenizer,
156
+ prompts: List[str],
157
+ first_frames: List[Image.Image],
158
+ max_new_tokens: int,
159
+ system_prompt: str,
160
+ ) -> List[str]:
161
+ image_captions = _generate_image_captions(
162
+ image_caption_model, image_caption_processor, first_frames
163
+ )
164
+
165
+ messages = [
166
+ [
167
+ {"role": "system", "content": system_prompt},
168
+ {"role": "user", "content": f"user_prompt: {p}\nimage_caption: {c}"},
169
+ ]
170
+ for p, c in zip(prompts, image_captions)
171
+ ]
172
+
173
+ texts = [
174
+ prompt_enhancer_tokenizer.apply_chat_template(
175
+ m, tokenize=False, add_generation_prompt=True
176
+ )
177
+ for m in messages
178
+ ]
179
+ model_inputs = prompt_enhancer_tokenizer(texts, return_tensors="pt").to(
180
+ prompt_enhancer_model.device
181
+ )
182
+
183
+ return _generate_and_decode_prompts(
184
+ prompt_enhancer_model, prompt_enhancer_tokenizer, model_inputs, max_new_tokens
185
+ )
186
+
187
+
188
+ def _generate_image_captions(
189
+ image_caption_model,
190
+ image_caption_processor,
191
+ images: List[Image.Image],
192
+ system_prompt: str = "<DETAILED_CAPTION>",
193
+ ) -> List[str]:
194
+ image_caption_prompts = [system_prompt] * len(images)
195
+ inputs = image_caption_processor(
196
+ image_caption_prompts, images, return_tensors="pt"
197
+ ).to(image_caption_model.device)
198
+
199
+ with torch.inference_mode():
200
+ generated_ids = image_caption_model.generate(
201
+ input_ids=inputs["input_ids"],
202
+ pixel_values=inputs["pixel_values"],
203
+ max_new_tokens=1024,
204
+ do_sample=False,
205
+ num_beams=3,
206
+ )
207
+
208
+ return image_caption_processor.batch_decode(generated_ids, skip_special_tokens=True)
209
+
210
+
211
+ def _generate_and_decode_prompts(
212
+ prompt_enhancer_model, prompt_enhancer_tokenizer, model_inputs, max_new_tokens: int
213
+ ) -> List[str]:
214
+ with torch.inference_mode():
215
+ outputs = prompt_enhancer_model.generate(
216
+ **model_inputs, max_new_tokens=max_new_tokens
217
+ )
218
+ generated_ids = [
219
+ output_ids[len(input_ids) :]
220
+ for input_ids, output_ids in zip(model_inputs.input_ids, outputs)
221
+ ]
222
+ decoded_prompts = prompt_enhancer_tokenizer.batch_decode(
223
+ generated_ids, skip_special_tokens=True
224
+ )
225
+
226
+ return decoded_prompts
ltx_video/utils/skip_layer_strategy.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum, auto
2
+
3
+
4
+ class SkipLayerStrategy(Enum):
5
+ AttentionSkip = auto()
6
+ AttentionValues = auto()
7
+ Residual = auto()
8
+ TransformerBlock = auto()