File size: 1,910 Bytes
bb6e1c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import os
import gradio as gr
from PIL import Image
from src.orchestrator import run_task


def ui_generate_code(prompt, language):
    return run_task("code", {"prompt": prompt, "language": language})


def ui_generate_image(prompt):
    img = run_task("image", {"prompt": prompt})
    return img


def ui_create_bot(bot_type):
    path = run_task("bot", {"bot_type": bot_type})
    return f"Created bot template at: {path}"


def ui_plan(goal):
    steps = run_task("plan", {"goal": goal})
    return "\n".join(f"- {s}" for s in steps)


with gr.Blocks(title="Multi-Agent Orchestrator") as demo:
    gr.Markdown("# Multi-Agent: Code, Vision, Bot, Reasoning")

    with gr.Tab("Code"):
        prompt = gr.Textbox(label="What to build?", lines=6)
        lang = gr.Dropdown(choices=["python", "javascript", "typescript", "go", "rust"], value="python", label="Language")
        out = gr.Code(language="python", label="Generated Code")
        gr.Button("Generate").click(ui_generate_code, inputs=[prompt, lang], outputs=out)

    with gr.Tab("Image"):
        iprompt = gr.Textbox(label="Image prompt", lines=3)
        img = gr.Image(type="pil", label="Output")
        gr.Button("Generate Image").click(ui_generate_image, inputs=[iprompt], outputs=img)

    with gr.Tab("Bot"):
        btype = gr.Dropdown(choices=["telegram", "discord"], value="telegram", label="Bot type")
        bot_out = gr.Textbox(label="Result")
        gr.Button("Create Bot Template").click(ui_create_bot, inputs=[btype], outputs=bot_out)

    with gr.Tab("Reasoning / Plan"):
        goal = gr.Textbox(label="Goal description", lines=4)
        plan_out = gr.Textbox(label="Plan")
        gr.Button("Make Plan").click(ui_plan, inputs=[goal], outputs=plan_out)


if __name__ == "__main__":
    # Expect HF_TOKEN to be set for InferenceClient usage.
    demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))