Spaces:
Running
on
Zero
Running
on
Zero
| from dotenv import load_dotenv | |
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| from automatic_speech_recognition import create_asr_tab | |
| from chatbot import create_chatbot_tab | |
| from image_classification import create_image_classification_tab | |
| from image_to_text import create_image_to_text_tab | |
| from text_to_image import create_text_to_image_tab | |
| from text_to_speech import create_text_to_speech_tab | |
| class App: | |
| """Main application class for the AI Building Blocks Gradio interface. | |
| This class orchestrates the entire application by creating the Gradio UI | |
| and integrating all the individual building block tabs. | |
| """ | |
| def __init__(self, client: InferenceClient): | |
| """Initialize the App with an InferenceClient instance. | |
| Args: | |
| client: Hugging Face InferenceClient instance for making API calls | |
| to Hugging Face's inference endpoints. | |
| """ | |
| self.client = client | |
| def run(self): | |
| """Launch the Gradio application with all building block tabs. | |
| Creates a Gradio Blocks interface with multiple tabs, each representing | |
| a different AI building block. The application will block until the | |
| interface is closed. | |
| """ | |
| with gr.Blocks(title="AI Building Blocks") as demo: | |
| gr.Markdown("# AI Building Blocks") | |
| gr.Markdown("A gallery of building blocks for building AI applications") | |
| with gr.Tabs(): | |
| with gr.Tab("Text-to-image Generation"): | |
| create_text_to_image_tab(self.client) | |
| with gr.Tab("Image-to-text or Image Captioning"): | |
| create_image_to_text_tab() | |
| with gr.Tab("Image Classification"): | |
| create_image_classification_tab(self.client) | |
| with gr.Tab("Text-to-speech (TTS)"): | |
| create_text_to_speech_tab() | |
| with gr.Tab("Automatic Speech Recognition (ASR)"): | |
| create_asr_tab(self.client) | |
| with gr.Tab("Chat"): | |
| create_chatbot_tab() | |
| demo.launch() | |
| if __name__ == "__main__": | |
| load_dotenv() | |
| app = App(InferenceClient()) | |
| app.run() | |