from diffusers import DiffusionPipeline import gradio as gr import torch import math orig_start_prompt = "A photograph of an adult Lion" orig_end_prompt = "A photograph of a Lion cub" model_list = ["kakaobrain/karlo-v1-alpha"] def unclip_text_interpolation( model_path, start_prompt, end_prompt, steps, num_inference_steps ): pipe = DiffusionPipeline.from_pretrained(model_list, torch_dtype=torch.bfloat16, custom_pipeline='unclip_text_interpolation') images = pipe(start_prompt, end_prompt, steps, num_inference_steps=num_inference_steps) return images inputs = [ gr.Dropdown(model_list, value=model_list[0], label="Model"), gr.inputs.Textbox(lines=5, default=orig_start_prompt, label="Start Prompt"), gr.inputs.Textbox(lines=1, default=orig_end_prompt, label="End Prompt"), gr.inputs.Slider(minimum=2, maximum=12, default=5, step=1, label="Steps") ] output = gr.Gallery( label="Generated images", show_label=False, elem_id="gallery" ).style(grid=[2], height="auto") examples = [ ["kakaobrain/karlo-v1-alpha", orig_start_prompt, orig_end_prompt, 6], ] title = "UnClip Text Interpolation Pipeline" description = """
For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.