Spaces:
Runtime error
Runtime error
File size: 1,609 Bytes
09728a0 53f445e b216154 53f445e b216154 53f445e 8c8e86a 53f445e af065f5 4a99aee 53f445e 4a99aee 424bead b216154 53f445e a093a2e 53f445e ff3e890 0e428a6 ff3e890 0e428a6 a6217f6 26c884f 53f445e a093a2e 09728a0 4a99aee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
# Initialize the DialoGPT model and tokenizer
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
tokenizer = AutoTokenizer.from_pretrained("gpt2")
chat_history = None
def chat(message):
global chat_history
# Encode the user's message with the GPT-2 tokenizer
input_ids = tokenizer.encode(message + tokenizer.eos_token, return_tensors="pt")
# Generate a response from DialoGPT-medium
response_ids = model.generate(input_ids, max_length=150, pad_token_id=tokenizer.eos_token_id, num_return_sequences=1)
# Decode and return the bot's response
bot_response = tokenizer.decode(response_ids[0], skip_special_tokens=True)
chat_history = bot_response # Store the bot's response for reference
return bot_response
# Create and launch the Gradio interface
iface = gr.Interface(
fn=chat,
title="UrFriendly Chatbot",
description="UrFriendly Chatbot is a conversational assistant based on DialoGPT-medium with GPT-2 tokenization. Type or click on one of the examples to get started. Please note that UrFriendly Chatbot is not 100% accurate, so incorrect information may generate. π¬π€",
examples=[
"Howdy!",
"Tell me a joke.",
"Explain quantum computing in simple terms.",
"How are you?",
"What is an exponent in mathematics?",
"Does money buy happiness?"
],
inputs="text",
outputs="text",
live=True # Set to True to allow continuous conversation
)
iface.launch()
|