Jofthomas HF staff commited on
Commit
9bf50e7
·
verified ·
1 Parent(s): f5e5ce2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -20
app.py CHANGED
@@ -4,9 +4,10 @@ import json
4
  import os
5
  from huggingface_hub import login
6
 
7
- HUGGINGFACEHUB_API_TOKEN = os.environ.get("HF_TOKEN")
 
8
 
9
- default_model="meta-llama/Meta-Llama-3-8B-Instruct"
10
 
11
  demo_conversation = """[
12
  {"role": "system", "content": "You are a helpful chatbot."},
@@ -19,9 +20,19 @@ description_text = """# Chat Template Viewer
19
  ### This space is a helper to learn more about [Chat Templates](https://huggingface.co/docs/transformers/main/en/chat_templating).
20
  """
21
 
22
- default_tools = [{"type": "function", "function": {"name":"get_current_weather", "description": "Get▁the▁current▁weather", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"}, "format": {"type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location."}},"required":["location","format"]}}}]
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- # render the tool use prompt as a string:
25
  def get_template_names(model_name):
26
  try:
27
  tokenizer = AutoTokenizer.from_pretrained(model_name)
@@ -35,38 +46,55 @@ def get_template_names(model_name):
35
  def update_template_dropdown(model_name):
36
  template_names = get_template_names(model_name)
37
  if template_names:
38
- return gr.update(choices=template_names, value=None)
 
39
 
40
  def apply_chat_template(model_name, test_conversation, add_generation_prompt, cleanup_whitespace, template_name, hf_token, kwargs):
41
  try:
42
- login(token=hf_token)
 
 
43
  tokenizer = AutoTokenizer.from_pretrained(model_name)
44
- except:
45
- return f"model {model_name} could not be loaded or invalid HF token"
 
46
  try:
47
- outputs = []
48
  conversation = json.loads(test_conversation)
49
 
50
- template = tokenizer.chat_template.get(template_name) if template_name else None
51
- print(kwargs)
52
- formatted = tokenizer.apply_chat_template(conversation, chat_template=template, tokenize=False, add_generation_prompt=add_generation_prompt, tools=default_tools)
 
 
 
 
 
 
 
 
 
53
  return formatted
54
  except Exception as e:
55
- return str(e)
56
 
57
  with gr.Blocks() as demo:
 
 
58
  model_name_input = gr.Textbox(label="Model Name", placeholder="Enter model name", value=default_model)
59
  template_dropdown = gr.Dropdown(label="Template Name", choices=[], interactive=True)
60
  conversation_input = gr.TextArea(value=demo_conversation, lines=6, label="Conversation")
61
  add_generation_prompt_checkbox = gr.Checkbox(value=False, label="Add generation prompt")
62
  cleanup_whitespace_checkbox = gr.Checkbox(value=True, label="Cleanup template whitespace")
63
- hf_token_input = gr.Textbox(label="Hugging Face Token (optional)", placeholder="Enter your HF token")
64
- kwargs_input = gr.JSON(label="Additional kwargs", value=default_tools, render=False)
65
- output = gr.TextArea(label="Formatted conversation")
 
 
 
 
 
66
 
67
- model_name_input.change(fn=update_template_dropdown, inputs=model_name_input, outputs=template_dropdown)
68
- gr.Interface(
69
- description=description_text,
70
  fn=apply_chat_template,
71
  inputs=[
72
  model_name_input,
@@ -80,4 +108,4 @@ with gr.Blocks() as demo:
80
  outputs=output
81
  )
82
 
83
- demo.launch()
 
4
  import os
5
  from huggingface_hub import login
6
 
7
+ # Fetch HF Token
8
+ HUGGINGFACEHUB_API_TOKEN = os.environ.get("HF_TOKEN", "")
9
 
10
+ default_model = "meta-llama/Meta-Llama-3-8B-Instruct"
11
 
12
  demo_conversation = """[
13
  {"role": "system", "content": "You are a helpful chatbot."},
 
20
  ### This space is a helper to learn more about [Chat Templates](https://huggingface.co/docs/transformers/main/en/chat_templating).
21
  """
22
 
23
+ default_tools = [{"type": "function", "function": {"name": "get_current_weather",
24
+ "description": "Get the current weather",
25
+ "parameters": {
26
+ "type": "object",
27
+ "properties": {
28
+ "location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"},
29
+ "format": {"type": "string", "enum": ["celsius", "fahrenheit"],
30
+ "description": "The temperature unit to use. Infer this from the user's location."}
31
+ },
32
+ "required": ["location", "format"]
33
+ }
34
+ }}]
35
 
 
36
  def get_template_names(model_name):
37
  try:
38
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
46
  def update_template_dropdown(model_name):
47
  template_names = get_template_names(model_name)
48
  if template_names:
49
+ return gr.Dropdown.update(choices=template_names, value=template_names[0])
50
+ return gr.Dropdown.update(choices=[], value=None)
51
 
52
  def apply_chat_template(model_name, test_conversation, add_generation_prompt, cleanup_whitespace, template_name, hf_token, kwargs):
53
  try:
54
+ if hf_token:
55
+ login(token=hf_token) # Ensure login is successful
56
+
57
  tokenizer = AutoTokenizer.from_pretrained(model_name)
58
+ except Exception as e:
59
+ return f"Error: Could not load model {model_name} or invalid HF token. {str(e)}"
60
+
61
  try:
 
62
  conversation = json.loads(test_conversation)
63
 
64
+ if template_name and tokenizer.chat_template:
65
+ template = tokenizer.chat_template.get(template_name, None)
66
+ else:
67
+ template = None
68
+
69
+ formatted = tokenizer.apply_chat_template(
70
+ conversation,
71
+ chat_template=template,
72
+ tokenize=False,
73
+ add_generation_prompt=add_generation_prompt,
74
+ tools=default_tools
75
+ )
76
  return formatted
77
  except Exception as e:
78
+ return f"Error: {str(e)}"
79
 
80
  with gr.Blocks() as demo:
81
+ gr.Markdown(description_text)
82
+
83
  model_name_input = gr.Textbox(label="Model Name", placeholder="Enter model name", value=default_model)
84
  template_dropdown = gr.Dropdown(label="Template Name", choices=[], interactive=True)
85
  conversation_input = gr.TextArea(value=demo_conversation, lines=6, label="Conversation")
86
  add_generation_prompt_checkbox = gr.Checkbox(value=False, label="Add generation prompt")
87
  cleanup_whitespace_checkbox = gr.Checkbox(value=True, label="Cleanup template whitespace")
88
+ hf_token_input = gr.Textbox(label="Hugging Face Token (optional)", placeholder="Enter your HF token", type="password")
89
+ kwargs_input = gr.JSON(label="Additional kwargs", value=default_tools, visible=False)
90
+ output = gr.TextArea(label="Formatted conversation", interactive=False)
91
+
92
+ update_button = gr.Button("Update Template List")
93
+ format_button = gr.Button("Format Conversation")
94
+
95
+ update_button.click(fn=update_template_dropdown, inputs=model_name_input, outputs=template_dropdown)
96
 
97
+ format_button.click(
 
 
98
  fn=apply_chat_template,
99
  inputs=[
100
  model_name_input,
 
108
  outputs=output
109
  )
110
 
111
+ demo.launch()