Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,486 Bytes
39d9406 dc382c8 39d9406 dc382c8 5bebd85 dc382c8 39d9406 5bebd85 39d9406 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
from functools import partial
from os import getenv
import gradio as gr
from PIL.Image import Image
from huggingface_hub import InferenceClient
def text_to_image(client: InferenceClient, prompt: str) -> Image:
"""Generate an image from a text prompt using Hugging Face Inference API.
Args:
client: Hugging Face InferenceClient instance for API calls.
prompt: Text description of the desired image.
Returns:
PIL Image object representing the generated image.
Note:
The model to use is determined by the TEXT_TO_IMAGE_MODEL environment variable.
"""
return client.text_to_image(prompt, model=getenv("TEXT_TO_IMAGE_MODEL"))
def create_text_to_image_tab(client: InferenceClient):
"""Create the text-to-image generation tab in the Gradio interface.
This function sets up all UI components for text-to-image generation,
including input textbox, generate button, and output image display.
Args:
client: Hugging Face InferenceClient instance to pass to the text_to_image function.
"""
gr.Markdown("Generate an image from a text prompt.")
text_to_image_prompt = gr.Textbox(label="Prompt")
text_to_image_generate_button = gr.Button("Generate")
text_to_image_output = gr.Image(label="Image", type="pil")
text_to_image_generate_button.click(
fn=partial(text_to_image, client),
inputs=text_to_image_prompt,
outputs=text_to_image_output
)
|