File size: 2,541 Bytes
709e4f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
# -*- coding: utf-8 -*-
"""

Created on Thu Aug  1 19:04:11 2024



@author: rkram

"""

import os
import gradio as gr
from transformers import HfEngine, Tool,CodeAgent,load_tool
from gradio_tools import StableDiffusionPromptGeneratorTool 
from streaming import stream_to_gradio
from huggingface_hub import login

# turn caching off
#client.headers["x-use-cache"] = "0"

#login
login(os.getenv("HUGGINGFACEHUB_API_TOKEN"))
#define llm engine
llm_engine = HfEngine("meta-llama/Meta-Llama-3.1-70B-Instruct")
#load tools
image_gen_tool = load_tool("huggingface-tools/text-to-image")
gradio_pg_tool = StableDiffusionPromptGeneratorTool()
pg_tool = Tool.from_gradio(gradio_pg_tool)
#create agent
agent = CodeAgent(
    tools=[pg_tool,image_gen_tool],
    llm_engine=llm_engine,
    additional_authorized_imports=[],
    max_iterations=10,
)
#base prompt 
base_prompt = """Improve the following prompt and generate an image.

Prompt:"""
#Main function to interact with streaming
def interact_with_agent(add_prompt):
    prompt = base_prompt
    if add_prompt and len(add_prompt) > 0:
        prompt += add_prompt
    else:
        prompt="There is no prompt made. Reply exactly with:'***ERROR: Please input a prompt.***'"
        
    messages = [gr.ChatMessage(role="assistant", content="⏳ _Generating image..._")]
    yield messages

    for msg in stream_to_gradio(agent, prompt):
        messages.append(msg)
        yield messages + [
            gr.ChatMessage(role="assistant", content="⏳ _Still processing..._")
        ]
    yield messages

#Gradio blocks and markdowns
with gr.Blocks(
    theme=gr.themes.Soft(
        primary_hue=gr.themes.colors.blue,
        secondary_hue=gr.themes.colors.yellow,
    )
) as demo:
    gr.Markdown("""# Image Generator""")
    chatbot = gr.Chatbot(
        label="ImageBot",
        type="messages",
        avatar_images=(
            None,
            "https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png"
        ),
    )
    text_input = gr.Textbox(
        label="What image would you like to generate?"
    )
    submit = gr.Button("Run", variant="primary")
    
    # gr.Examples(
    #     examples=[["./example/titanic.csv", example_notes]],
    #     inputs=[file_input, text_input],
    #     cache_examples=False,
    #     label='Click anywhere below to try this example.'
    # )

    submit.click(interact_with_agent, [text_input], [chatbot])

if __name__ == "__main__":
    demo.launch()