Spaces:
Running
Running
File size: 5,466 Bytes
3f77356 1518493 3f77356 1518493 3f77356 1518493 821b4f5 1518493 821b4f5 1518493 3f77356 821b4f5 3f77356 1518493 3f77356 1518493 95eb2b4 1518493 821b4f5 1518493 821b4f5 1518493 821b4f5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
import gradio as gr
from huggingface_hub import InferenceClient
import langdetect
import json
# Initialize Hugging Face client with the new model
client = InferenceClient(model="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5")
# Default system message to guide the assistant
default_system_message = (
"You are NLPToolkit Agent, an advanced assistant specializing in NLP tasks such as text summarization, "
"sentiment analysis, text classification, and entity recognition. Adapt your responses to the selected task."
)
# Predefined task-specific instructions
task_instructions = {
"Summarization": "Summarize the text clearly and concisely.",
"Sentiment Analysis": "Analyze the sentiment of the text (positive, neutral, negative).",
"Text Classification": "Classify the text into relevant categories.",
"Entity Recognition": "Identify and list named entities in the text."
}
# Preprocessing user input
def preprocess_text(text):
"""
Clean and validate the user's input text.
"""
try:
# Detect input language
language = langdetect.detect(text)
if language != "en":
return f"Input language detected as {language}. Please provide input in English."
except Exception:
return "Unable to detect language. Please provide valid text input."
return text.strip()
# Respond function for handling user input and generating a response
def respond(task, message, history, system_message, max_tokens, temperature, top_p):
"""
Handle user messages and generate responses using the NLP model.
"""
# Apply task-specific instructions
system_message = f"{system_message} Task: {task_instructions.get(task, 'General NLP task')}"
# Preprocess the user's input
message = preprocess_text(message)
if message.startswith("Input language detected") or message.startswith("Unable to detect"):
yield message
return
# Prepare conversation history
messages = [{"role": "system", "content": system_message}]
for user_message, assistant_message in history:
if user_message:
messages.append({"role": "user", "content": user_message})
if assistant_message:
messages.append({"role": "assistant", "content": assistant_message})
messages.append({"role": "user", "content": message})
response = ""
# Stream response from the Hugging Face model
try:
for chunk in client.chat_completion(
messages=messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = chunk.choices[0].delta.content
response += token
yield response
except Exception as e:
yield f"Error generating response: {str(e)}"
# Save conversation history to a JSON file
def save_history(history):
with open("chat_history.json", "w") as f:
json.dump(history, f)
return "Chat history saved successfully."
# Load conversation history from a JSON file
def load_history():
try:
with open("chat_history.json", "r") as f:
history = json.load(f)
return history
except FileNotFoundError:
return []
# Gradio app interface
def create_interface():
"""
Create the Gradio interface for the chatbot.
"""
with gr.Blocks() as demo:
gr.Markdown("## 🧠 NLPToolkit Agent\nAn advanced assistant for NLP tasks, powered by Hugging Face.")
with gr.Row():
# Task selection dropdown
task = gr.Dropdown(
choices=["Summarization", "Sentiment Analysis", "Text Classification", "Entity Recognition"],
value="Summarization",
label="Select NLP Task"
)
with gr.Row():
# User input and system message
user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
system_message = gr.Textbox(value=default_system_message, label="System Message")
with gr.Row():
# Chat history and assistant response
chat_history = gr.State(value=[])
assistant_response = gr.Textbox(label="Assistant Response", interactive=False)
with gr.Row():
# Parameter sliders
max_tokens = gr.Slider(1, 2048, value=512, label="Max Tokens")
temperature = gr.Slider(0.1, 4.0, value=0.7, label="Temperature")
top_p = gr.Slider(0.1, 1.0, value=0.95, label="Top-p (Nucleus Sampling)")
with gr.Row():
# Buttons for save/load functionality
save_button = gr.Button("Save Chat History")
load_button = gr.Button("Load Chat History")
with gr.Row():
# Submit button
submit_button = gr.Button("Generate Response")
# Connect functionalities
submit_button.click(
fn=respond,
inputs=[task, user_input, chat_history, system_message, max_tokens, temperature, top_p],
outputs=assistant_response
)
save_button.click(fn=save_history, inputs=chat_history, outputs=None)
load_button.click(fn=load_history, inputs=None, outputs=chat_history)
gr.Markdown("### 🚀 Powered by Hugging Face and Gradio | Developed by Canstralian")
return demo
# Run the app
if __name__ == "__main__":
demo = create_interface()
demo.launch() |