Spaces:
Running
on
Zero
Running
on
Zero
import spaces | |
import torch | |
import numpy as np | |
import gradio as gr | |
from util.file import generate_binary_file, load_numpy_from_binary_bitwise | |
from latent_utils import generate_ours | |
def main(prompt, T, K, K_tilde, model_type='512x512', bitstream=None, avail_models=None, | |
progress=gr.Progress(track_tqdm=True)): | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
indices = load_numpy_from_binary_bitwise(bitstream, K, T, model_type, T - 1) | |
if indices is not None: | |
indices = indices.to(device) | |
# model, _ = load_model(img_size_to_id[img_size], T, device, float16=True, compile=False) | |
model = avail_models[model_type].to(device) | |
model.device = device | |
model.model.to(device=device) | |
model.model.scheduler.device = device | |
model.set_timesteps(T, device=device) | |
with torch.no_grad(): | |
x, indices = generate_ours(model, | |
num_noises=K, | |
num_noises_to_optimize=K_tilde, | |
prompt=prompt, | |
negative_prompt=None, | |
indices=indices) | |
x = (x / 2 + 0.5).clamp(0, 1) | |
x = x.detach().cpu().squeeze().numpy() | |
x = np.transpose(x, (1, 2, 0)) | |
torch.cuda.empty_cache() | |
if bitstream is None: | |
indices = generate_binary_file(indices.numpy(), K, T, model_type) | |
return x, indices | |
return x |