Spaces:
Paused
Paused
import gradio as gr | |
import os | |
from huggingface_hub import InferenceClient | |
""" | |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
""" | |
client = InferenceClient("nvidia/Llama-3.1-Nemotron-8B-UltraLong-4M-Instruct") | |
api_key = os.environ.get("NINJA_API") | |
PERSONALITY = os.environ.get("CHATBOT_PERSONALITY") | |
def role(message, history): | |
history.append({"role": "user", "content": message}) | |
prompt = PERSONALITY + "\n\n" | |
for msg in history: | |
prompt += f"{msg['role'].capitalize()}: {msg['content']}\n" | |
prompt += "Role:" | |
response = client.text_generation(prompt, max_new_tokens=250) | |
history.append({"role": "assistant", "content": response}) | |
return history, history | |
""" | |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
""" | |
demo = gr.ChatInterface( | |
role, | |
title="Shinobi - Japanese Culture Specialist", | |
description="A chatbot specializing in Japanese culture.", | |
chatbot=gr.Chatbot(), | |
examples=["Tell me about the history of Samurai", "What is Bushido?", "Recommend a Japanese film"] | |
) | |
if __name__ == "__main__": | |
demo.launch() | |