from functools import partial from os import getenv import gradio as gr from PIL.Image import Image from huggingface_hub import InferenceClient def text_to_image(client: InferenceClient, prompt: str) -> Image: """Generate an image from a text prompt using Hugging Face Inference API. Args: client: Hugging Face InferenceClient instance for API calls. prompt: Text description of the desired image. Returns: PIL Image object representing the generated image. Note: The model to use is determined by the TEXT_TO_IMAGE_MODEL environment variable. """ return client.text_to_image(prompt, model=getenv("TEXT_TO_IMAGE_MODEL")) def create_text_to_image_tab(client: InferenceClient): """Create the text-to-image generation tab in the Gradio interface. This function sets up all UI components for text-to-image generation, including input textbox, generate button, and output image display. Args: client: Hugging Face InferenceClient instance to pass to the text_to_image function. """ gr.Markdown("Generate an image from a text prompt.") text_to_image_prompt = gr.Textbox(label="Prompt") text_to_image_generate_button = gr.Button("Generate") text_to_image_output = gr.Image(label="Image", type="pil") text_to_image_generate_button.click( fn=partial(text_to_image, client), inputs=text_to_image_prompt, outputs=text_to_image_output )