|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os
|
|
import imageio
|
|
import numpy as np
|
|
import json
|
|
from typing import Union
|
|
import matplotlib.pyplot as plt
|
|
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
import torchvision
|
|
import torch.distributed as dist
|
|
from torchvision import transforms
|
|
|
|
from tqdm import tqdm
|
|
from einops import rearrange
|
|
import cv2
|
|
from decord import AudioReader, VideoReader
|
|
import shutil
|
|
import subprocess
|
|
|
|
|
|
|
|
eps = np.finfo(np.float32).eps
|
|
|
|
|
|
def read_json(filepath: str):
|
|
with open(filepath) as f:
|
|
json_dict = json.load(f)
|
|
return json_dict
|
|
|
|
|
|
def read_video(video_path: str, change_fps=True, use_decord=True):
|
|
if change_fps:
|
|
temp_dir = "temp"
|
|
if os.path.exists(temp_dir):
|
|
shutil.rmtree(temp_dir)
|
|
os.makedirs(temp_dir, exist_ok=True)
|
|
command = (
|
|
f"ffmpeg -loglevel error -y -nostdin -i {video_path} -r 25 -crf 18 {os.path.join(temp_dir, 'video.mp4')}"
|
|
)
|
|
subprocess.run(command, shell=True)
|
|
target_video_path = os.path.join(temp_dir, "video.mp4")
|
|
else:
|
|
target_video_path = video_path
|
|
|
|
if use_decord:
|
|
return read_video_decord(target_video_path)
|
|
else:
|
|
return read_video_cv2(target_video_path)
|
|
|
|
|
|
def read_video_decord(video_path: str):
|
|
vr = VideoReader(video_path)
|
|
video_frames = vr[:].asnumpy()
|
|
vr.seek(0)
|
|
return video_frames
|
|
|
|
|
|
def read_video_cv2(video_path: str):
|
|
|
|
cap = cv2.VideoCapture(video_path)
|
|
|
|
|
|
if not cap.isOpened():
|
|
print("Error: Could not open video.")
|
|
return np.array([])
|
|
|
|
frames = []
|
|
|
|
while True:
|
|
|
|
ret, frame = cap.read()
|
|
|
|
|
|
if not ret:
|
|
break
|
|
|
|
|
|
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
|
|
frames.append(frame_rgb)
|
|
|
|
|
|
cap.release()
|
|
|
|
return np.array(frames)
|
|
|
|
|
|
def read_audio(audio_path: str, audio_sample_rate: int = 16000):
|
|
if audio_path is None:
|
|
raise ValueError("Audio path is required.")
|
|
ar = AudioReader(audio_path, sample_rate=audio_sample_rate, mono=True)
|
|
|
|
|
|
audio_samples = torch.from_numpy(ar[:].asnumpy())
|
|
audio_samples = audio_samples.squeeze(0)
|
|
|
|
return audio_samples
|
|
|
|
|
|
def write_video(video_output_path: str, video_frames: np.ndarray, fps: int):
|
|
height, width = video_frames[0].shape[:2]
|
|
out = cv2.VideoWriter(video_output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
|
|
|
|
for frame in video_frames:
|
|
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
|
out.write(frame)
|
|
out.release()
|
|
|
|
|
|
def init_dist(backend="nccl", **kwargs):
|
|
"""Initializes distributed environment."""
|
|
rank = int(os.environ["RANK"])
|
|
num_gpus = torch.cuda.device_count()
|
|
if num_gpus == 0:
|
|
raise RuntimeError("No GPUs available for training.")
|
|
local_rank = rank % num_gpus
|
|
torch.cuda.set_device(local_rank)
|
|
dist.init_process_group(backend=backend, **kwargs)
|
|
|
|
return local_rank
|
|
|
|
|
|
def zero_rank_print(s):
|
|
if dist.is_initialized() and dist.get_rank() == 0:
|
|
print("### " + s)
|
|
|
|
|
|
def zero_rank_log(logger, message: str):
|
|
if dist.is_initialized() and dist.get_rank() == 0:
|
|
logger.info(message)
|
|
|
|
|
|
def make_audio_window(audio_embeddings: torch.Tensor, window_size: int):
|
|
audio_window = []
|
|
end_idx = audio_embeddings.shape[1] - window_size + 1
|
|
for i in range(end_idx):
|
|
audio_window.append(audio_embeddings[:, i : i + window_size, :])
|
|
audio_window = torch.stack(audio_window)
|
|
audio_window = rearrange(audio_window, "f b w d -> b f w d")
|
|
return audio_window
|
|
|
|
|
|
def check_video_fps(video_path: str):
|
|
cam = cv2.VideoCapture(video_path)
|
|
fps = cam.get(cv2.CAP_PROP_FPS)
|
|
if fps != 25:
|
|
raise ValueError(f"Video FPS is not 25, it is {fps}. Please convert the video to 25 FPS.")
|
|
|
|
|
|
def tailor_tensor_to_length(tensor: torch.Tensor, length: int):
|
|
if len(tensor) == length:
|
|
return tensor
|
|
elif len(tensor) > length:
|
|
return tensor[:length]
|
|
else:
|
|
return torch.cat([tensor, tensor[-1].repeat(length - len(tensor))])
|
|
|
|
|
|
def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):
|
|
videos = rearrange(videos, "b c f h w -> f b c h w")
|
|
outputs = []
|
|
for x in videos:
|
|
x = torchvision.utils.make_grid(x, nrow=n_rows)
|
|
x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)
|
|
if rescale:
|
|
x = (x + 1.0) / 2.0
|
|
x = (x * 255).numpy().astype(np.uint8)
|
|
outputs.append(x)
|
|
|
|
os.makedirs(os.path.dirname(path), exist_ok=True)
|
|
imageio.mimsave(path, outputs, fps=fps)
|
|
|
|
|
|
def interpolate_features(features: torch.Tensor, output_len: int) -> torch.Tensor:
|
|
features = features.cpu().numpy()
|
|
input_len, num_features = features.shape
|
|
|
|
input_timesteps = np.linspace(0, 10, input_len)
|
|
output_timesteps = np.linspace(0, 10, output_len)
|
|
output_features = np.zeros((output_len, num_features))
|
|
for feat in range(num_features):
|
|
output_features[:, feat] = np.interp(output_timesteps, input_timesteps, features[:, feat])
|
|
return torch.from_numpy(output_features)
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
def init_prompt(prompt, pipeline):
|
|
uncond_input = pipeline.tokenizer(
|
|
[""], padding="max_length", max_length=pipeline.tokenizer.model_max_length, return_tensors="pt"
|
|
)
|
|
uncond_embeddings = pipeline.text_encoder(uncond_input.input_ids.to(pipeline.device))[0]
|
|
text_input = pipeline.tokenizer(
|
|
[prompt],
|
|
padding="max_length",
|
|
max_length=pipeline.tokenizer.model_max_length,
|
|
truncation=True,
|
|
return_tensors="pt",
|
|
)
|
|
text_embeddings = pipeline.text_encoder(text_input.input_ids.to(pipeline.device))[0]
|
|
context = torch.cat([uncond_embeddings, text_embeddings])
|
|
|
|
return context
|
|
|
|
|
|
def reversed_forward(ddim_scheduler, pred_noise, timesteps, x_t):
|
|
|
|
alpha_prod_t = ddim_scheduler.alphas_cumprod[timesteps]
|
|
beta_prod_t = 1 - alpha_prod_t
|
|
|
|
|
|
|
|
if ddim_scheduler.config.prediction_type == "epsilon":
|
|
beta_prod_t = beta_prod_t[:, None, None, None, None]
|
|
alpha_prod_t = alpha_prod_t[:, None, None, None, None]
|
|
pred_original_sample = (x_t - beta_prod_t ** (0.5) * pred_noise) / alpha_prod_t ** (0.5)
|
|
else:
|
|
raise NotImplementedError("This prediction type is not implemented yet")
|
|
|
|
|
|
if ddim_scheduler.config.clip_sample:
|
|
pred_original_sample = torch.clamp(pred_original_sample, -1, 1)
|
|
return pred_original_sample
|
|
|
|
|
|
def next_step(
|
|
model_output: Union[torch.FloatTensor, np.ndarray],
|
|
timestep: int,
|
|
sample: Union[torch.FloatTensor, np.ndarray],
|
|
ddim_scheduler,
|
|
):
|
|
timestep, next_timestep = (
|
|
min(timestep - ddim_scheduler.config.num_train_timesteps // ddim_scheduler.num_inference_steps, 999),
|
|
timestep,
|
|
)
|
|
alpha_prod_t = ddim_scheduler.alphas_cumprod[timestep] if timestep >= 0 else ddim_scheduler.final_alpha_cumprod
|
|
alpha_prod_t_next = ddim_scheduler.alphas_cumprod[next_timestep]
|
|
beta_prod_t = 1 - alpha_prod_t
|
|
next_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
|
|
next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output
|
|
next_sample = alpha_prod_t_next**0.5 * next_original_sample + next_sample_direction
|
|
return next_sample
|
|
|
|
|
|
def get_noise_pred_single(latents, t, context, unet):
|
|
noise_pred = unet(latents, t, encoder_hidden_states=context)["sample"]
|
|
return noise_pred
|
|
|
|
|
|
@torch.no_grad()
|
|
def ddim_loop(pipeline, ddim_scheduler, latent, num_inv_steps, prompt):
|
|
context = init_prompt(prompt, pipeline)
|
|
uncond_embeddings, cond_embeddings = context.chunk(2)
|
|
all_latent = [latent]
|
|
latent = latent.clone().detach()
|
|
for i in tqdm(range(num_inv_steps)):
|
|
t = ddim_scheduler.timesteps[len(ddim_scheduler.timesteps) - i - 1]
|
|
noise_pred = get_noise_pred_single(latent, t, cond_embeddings, pipeline.unet)
|
|
latent = next_step(noise_pred, t, latent, ddim_scheduler)
|
|
all_latent.append(latent)
|
|
return all_latent
|
|
|
|
|
|
@torch.no_grad()
|
|
def ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=""):
|
|
ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt)
|
|
return ddim_latents
|
|
|
|
|
|
def plot_loss_chart(save_path: str, *args):
|
|
|
|
plt.figure()
|
|
for loss_line in args:
|
|
plt.plot(loss_line[1], loss_line[2], label=loss_line[0])
|
|
plt.xlabel("Step")
|
|
plt.ylabel("Loss")
|
|
plt.legend()
|
|
|
|
|
|
plt.savefig(save_path)
|
|
|
|
|
|
plt.close()
|
|
|
|
|
|
CRED = "\033[91m"
|
|
CEND = "\033[0m"
|
|
|
|
|
|
def red_text(text: str):
|
|
return f"{CRED}{text}{CEND}"
|
|
|
|
|
|
log_loss = nn.BCELoss(reduction="none")
|
|
|
|
|
|
def cosine_loss(vision_embeds, audio_embeds, y):
|
|
sims = nn.functional.cosine_similarity(vision_embeds, audio_embeds)
|
|
|
|
|
|
loss = log_loss(sims.unsqueeze(1), y).squeeze()
|
|
return loss
|
|
|
|
|
|
def save_image(image, save_path):
|
|
|
|
image = (image / 2 + 0.5).clamp(0, 1)
|
|
image = (image * 255).to(torch.uint8)
|
|
image = transforms.ToPILImage()(image)
|
|
|
|
image.save(save_path)
|
|
|
|
|
|
image.close()
|
|
|
|
|
|
def gather_loss(loss, device):
|
|
|
|
local_loss = loss.item()
|
|
global_loss = torch.tensor(local_loss, dtype=torch.float32).to(device)
|
|
dist.all_reduce(global_loss, op=dist.ReduceOp.SUM)
|
|
|
|
|
|
global_average_loss = global_loss.item() / dist.get_world_size()
|
|
return global_average_loss
|
|
|
|
|
|
def gather_video_paths_recursively(input_dir):
|
|
print(f"Recursively gathering video paths of {input_dir} ...")
|
|
paths = []
|
|
gather_video_paths(input_dir, paths)
|
|
return paths
|
|
|
|
|
|
def gather_video_paths(input_dir, paths):
|
|
for file in sorted(os.listdir(input_dir)):
|
|
if file.endswith(".mp4"):
|
|
filepath = os.path.join(input_dir, file)
|
|
paths.append(filepath)
|
|
elif os.path.isdir(os.path.join(input_dir, file)):
|
|
gather_video_paths(os.path.join(input_dir, file), paths)
|
|
|
|
|
|
def count_video_time(video_path):
|
|
video = cv2.VideoCapture(video_path)
|
|
|
|
frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
|
|
fps = video.get(cv2.CAP_PROP_FPS)
|
|
return frame_count / fps
|
|
|