Spaces:
Runtime error
Runtime error
import os | |
import random | |
from huggingface_hub import InferenceClient | |
import gradio as gr | |
#from utils import parse_action, parse_file_content, read_python_module_structure | |
from datetime import datetime | |
from PIL import Image | |
import agent | |
from models import models | |
import urllib.request | |
import uuid | |
import requests | |
import io | |
from chat_models import models as c_models | |
loaded_model=[] | |
chat_model=[] | |
for i,model in enumerate(models): | |
loaded_model.append(gr.load(f'models/{model}')) | |
print (loaded_model) | |
for i,model_c in enumerate(c_models): | |
chat_model.append(model_c) | |
print (chat_model) | |
now = datetime.now() | |
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S") | |
#client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
history = [] | |
def gen_from_infer(purpose,history,image,model_drop,chat_drop,choice,seed,im_seed): | |
#out_img = infer(out_prompt) | |
history.clear() | |
if seed == 0: | |
seed = random.randint(1,1111111111111111) | |
if im_seed == 0: | |
im_seed = random.randint(1,1111111111111111) | |
out_prompt=generate(purpose,history,chat_drop,seed) | |
history.append((purpose,out_prompt)) | |
yield (history,None) | |
infer_model = models[int(model_drop)] | |
print (infer_model) | |
infer=InferenceClient(f'{infer_model}') | |
print (infer) | |
out_img=infer.text_to_image( | |
prompt=out_prompt, | |
negative_prompt=None, | |
height=512, | |
width=512, | |
num_inference_steps=None, | |
guidance_scale=None, | |
model=None, | |
seed=im_seed, | |
) | |
yield (history,out_img) | |
def format_prompt(message, history,seed): | |
#print (f'HISTORY ::: {history}') | |
prompt = "<s>" | |
t=False | |
for user_prompt, bot_response in history: | |
prompt += f"[INST] {user_prompt} [/INST]" | |
prompt += f" {bot_response}</s> " | |
print(f'MESSAGE :: {message}, USER_PROMPT :: {user_prompt}') | |
if user_prompt == message: | |
t=True | |
if t==True: | |
prompt = "<s>"+f"[INST] {message} [/INST]" | |
return prompt | |
else: | |
prompt += f"[INST] {message} [/INST]" | |
return prompt | |
def run_gpt(in_prompt,history,model_drop,seed): | |
client = InferenceClient(c_models[int(model_drop)]) | |
print(f'history :: {history}') | |
prompt=format_prompt(in_prompt,history,seed) | |
if seed == 0: | |
seed = random.randint(1,1111111111111111) | |
print (seed) | |
generate_kwargs = dict( | |
temperature=1.0, | |
max_new_tokens=1048, | |
top_p=0.99, | |
repetition_penalty=1.0, | |
do_sample=True, | |
seed=seed, | |
) | |
content = agent.GENERATE_PROMPT + prompt | |
print(content) | |
stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
resp = "" | |
for response in stream: | |
resp += response.token.text | |
return resp | |
def run_idefics(in_prompt,history,image,model_drop,seed): | |
send_list=[] | |
#client = InferenceClient("HuggingFaceM4/idefics-9b-instruct") | |
client = InferenceClient("HuggingFaceM4/idefics-80b-instruct") | |
print(f'history :: {history}') | |
prompt=format_prompt(in_prompt,history,seed) | |
seed = random.randint(1,1111111111111111) | |
print (seed) | |
generate_kwargs = dict( | |
temperature=1.0, | |
max_new_tokens=512, | |
top_p=0.99, | |
repetition_penalty=1.0, | |
do_sample=True, | |
seed=seed, | |
) | |
generation_args = { | |
"max_new_tokens": 256, | |
"repetition_penalty": 1.0, | |
"stop_sequences": ["<end_of_utterance>", "\nUser:"], | |
} | |
#content = f'{agent.IDEFICS_PROMPT}' +"\nUser"+ in_prompt +f' ' | |
#send_list.append(agent.IDEFICS_PROMPT) | |
#send_list.append(prompt) | |
#send_list.append(image) | |
content = "\nUser: What is in this image?<end_of_utterance>\nAssistant:" | |
print(content) | |
stream = client.text_generation(prompt=content, **generation_args) | |
#stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
#resp = "" | |
#for response in stream: | |
# resp += response.token.text | |
print (stream) | |
return stream | |
def generate(purpose,history,chat_drop,seed): | |
print (history) | |
out_prompt = run_gpt(purpose,history,chat_drop,seed) | |
return out_prompt | |
def describe(purpose,history,image,chat_drop,seed): | |
print (history) | |
#purpose=f"{purpose}," | |
out_prompt = run_idefics(purpose,history,image,chat_drop,seed) | |
return out_prompt | |
def run(purpose,history,image,model_drop,chat_drop,choice,seed): | |
if choice == "Generate": | |
#out_img = infer(out_prompt) | |
out_prompt=generate(purpose,history,chat_drop,seed) | |
history.append((purpose,out_prompt)) | |
yield (history,None) | |
model=loaded_model[int(model_drop)] | |
out_img=model(out_prompt) | |
#return (history,None) | |
print(out_img) | |
url=f'https://johann22-chat-diffusion-describe.hf.space/file={out_img}' | |
print(url) | |
uid = uuid.uuid4() | |
#urllib.request.urlretrieve(image, 'tmp.png') | |
#out=Image.open('tmp.png') | |
r = requests.get(url, stream=True) | |
if r.status_code == 200: | |
out = Image.open(io.BytesIO(r.content)) | |
#yield ([(purpose,out_prompt)],out) | |
yield (history,out) | |
else: | |
yield ([(purpose,"an Error occured")],None) | |
if choice == "Describe": | |
#out_img = infer(out_prompt) | |
out_prompt=describe(purpose,history,image,chat_drop,seed) | |
history.append((purpose,out_prompt)) | |
yield (history,None) | |
################################################ | |
style=""" | |
.top_head{ | |
background: no-repeat; | |
background-image: url(https://huggingface.co/spaces/johann22/chat-diffusion/resolve/main/image.png); | |
background-position-y: bottom; | |
height: 180px; | |
background-position-x: center; | |
} | |
.top_h1{ | |
color: white!important; | |
-webkit-text-stroke-width: medium; | |
} | |
""" | |
with gr.Blocks(css=style) as iface: | |
gr.HTML("""<div class="top_head"><center><br><h1 class="top_h1">Mixtral Chat Diffusion</h1><br><h3 class="top_h1">This chatbot will generate images</h3></center></div?""") | |
#chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), | |
with gr.Row(): | |
with gr.Column(scale=1): | |
chatbot=gr.Chatbot(show_copy_button=True, layout='panel') | |
with gr.Row(): | |
agent_choice = gr.Radio(choices=["Generate","Describe"],value="Generate") | |
msg = gr.Textbox() | |
with gr.Accordion("Controls", open=False): | |
model_drop=gr.Dropdown(label="Diffusion Models", type="index", choices=[m for m in models], value=models[0]) | |
chat_model_drop=gr.Dropdown(label="Chatbot Models", type="index", choices=[m for m in c_models], value=c_models[0]) | |
chat_seed=gr.Slider(label="Prompt Seed", minimum=0,maximum=1000000000000, | |
value=random.randint(1,1000000000000),step=1, | |
interactive=True, | |
info="Set Seed to 0 to randomize the session") | |
image_seed=gr.Slider(label="Image Seed", minimum=0,maximum=1000000000000, | |
value=random.randint(1,1000000000000),step=1, | |
interactive=True, | |
info="Set Seed to 0 to randomize the session") | |
with gr.Group(): | |
with gr.Row(): | |
submit_b = gr.Button() | |
stop_b = gr.Button("Stop") | |
clear = gr.ClearButton([msg, chatbot]) | |
test_btn = gr.Button("Test") | |
with gr.Column(scale=2): | |
sumbox=gr.Image(label="Image") | |
run_test = test_btn.click(gen_from_infer, [msg,chatbot,sumbox,model_drop,chat_model_drop,agent_choice,chat_seed,image_seed],[chatbot,sumbox],concurrency_limit=20) | |
sub_b = submit_b.click(run, [msg,chatbot,sumbox,model_drop,chat_model_drop,agent_choice,chat_seed],[chatbot,sumbox]) | |
sub_e = msg.submit(run, [msg, chatbot,sumbox,model_drop,chat_model_drop,agent_choice,chat_seed], [chatbot,sumbox]) | |
stop_b.click(None,None,None, cancels=[sub_b,sub_e]) | |
iface.queue(default_concurrency_limit=None).launch() | |