|
import gradio as gr |
|
from transformers import AutoTokenizer |
|
import json |
|
import os |
|
from huggingface_hub import login |
|
|
|
|
|
HUGGINGFACEHUB_API_TOKEN = os.environ.get("HF_TOKEN", "") |
|
|
|
default_model = "meta-llama/Meta-Llama-3-8B-Instruct" |
|
|
|
demo_conversation = """[ |
|
{"role": "system", "content": "You are a helpful chatbot."}, |
|
{"role": "user", "content": "Hi there!"}, |
|
{"role": "assistant", "content": "Hello, human!"}, |
|
{"role": "user", "content": "Can I ask a question?"} |
|
]""" |
|
|
|
description_text = """# Chat Template Viewer |
|
### This space helps visualize chat formatting using Hugging Face models. |
|
""" |
|
|
|
default_tools = [{"type": "function", "function": {"name": "get_current_weather", |
|
"description": "Get the current weather", |
|
"parameters": { |
|
"type": "object", |
|
"properties": { |
|
"location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"}, |
|
"format": {"type": "string", "enum": ["celsius", "fahrenheit"], |
|
"description": "The temperature unit to use. Infer this from the user's location."} |
|
}, |
|
"required": ["location", "format"] |
|
} |
|
}}] |
|
|
|
def apply_chat_template(model_name, test_conversation, add_generation_prompt, cleanup_whitespace, hf_token, tools): |
|
try: |
|
if hf_token: |
|
login(token=hf_token) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
except Exception as e: |
|
return f"Error: Could not load model {model_name} or invalid HF token. {str(e)}" |
|
|
|
try: |
|
conversation = json.loads(test_conversation) |
|
|
|
formatted = tokenizer.apply_chat_template( |
|
conversation, |
|
tokenize=False, |
|
add_generation_prompt=add_generation_prompt, |
|
tools=tools |
|
) |
|
return formatted |
|
except Exception as e: |
|
return f"Error: {str(e)}" |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown(description_text) |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
model_name_input = gr.Textbox(label="Model Name", placeholder="Enter model name", value=default_model) |
|
hf_token_input = gr.Textbox(label="Hugging Face Token (optional)", placeholder="Enter your HF token", type="password") |
|
conversation_input = gr.TextArea(value=demo_conversation, lines=8, label="Conversation") |
|
add_generation_prompt_checkbox = gr.Checkbox(value=False, label="Add generation prompt") |
|
cleanup_whitespace_checkbox = gr.Checkbox(value=True, label="Cleanup template whitespace") |
|
format_button = gr.Button("Format Conversation") |
|
|
|
with gr.Column(): |
|
output = gr.TextArea(label="Formatted Conversation", interactive=False, lines=12) |
|
|
|
|
|
tools_state = gr.State(default_tools) |
|
|
|
format_button.click( |
|
fn=apply_chat_template, |
|
inputs=[ |
|
model_name_input, |
|
conversation_input, |
|
add_generation_prompt_checkbox, |
|
cleanup_whitespace_checkbox, |
|
hf_token_input, |
|
tools_state |
|
], |
|
outputs=output |
|
) |
|
|
|
demo.launch() |