Spaces:
Running
Running
import os | |
import gradio as gr | |
from openai import OpenAI | |
import logging | |
import anthropic | |
logging.basicConfig(level=logging.INFO) | |
logging.getLogger("gradio").setLevel(logging.INFO) | |
logging.getLogger("httpx").setLevel(logging.WARNING) | |
client = OpenAI() | |
def generate_completion(input, history): | |
messages = [ | |
{ | |
"role": "system", | |
"content": "You are a world-class extractor of information from messy job postings.", | |
} | |
] | |
# Convert history from a list of lists to a list of dictionaries | |
if history: | |
for entry in history: | |
# Assuming each entry has exactly 2 elements: user input and assistant response | |
if len(entry) == 2: # Validate format | |
# Append user message | |
messages.append( | |
{ | |
"role": "user", | |
"content": entry[0], | |
} | |
) | |
# Append assistant response | |
messages.append( | |
{ | |
"role": "assistant", | |
"content": entry[1], | |
} | |
) | |
# Append the current user message | |
messages.append( | |
{ | |
"role": "user", | |
"content": input, | |
} | |
) | |
response = client.chat.completions.create( | |
model="gpt-3.5-turbo-0125", | |
messages=messages, # type: ignore | |
stream=True, | |
temperature=0, | |
max_tokens=4000, | |
) # type: ignore | |
answer_str: str = "" | |
for chunk in response: | |
if chunk.choices[0].delta.content is not None: | |
answer_str += chunk.choices[0].delta.content | |
else: | |
answer_str += "" | |
yield answer_str | |
if __name__ == "__main__": | |
demo = gr.ChatInterface(fn=generate_completion) | |
demo.queue() | |
demo.launch() | |