lcipolina commited on
Commit
8975c07
·
verified ·
1 Parent(s): 8e80ffc

Fixed long names in leaderboards

Browse files
Files changed (1) hide show
  1. ui/utils.py +141 -0
ui/utils.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ UI utilities for the Game Reasoning Arena Gradio app.
3
+
4
+ This module contains utility functions for the Gradio interface,
5
+ including model name cleaning and display formatting.
6
+ """
7
+
8
+
9
+ def clean_model_name(model_name: str) -> str:
10
+ """
11
+ Clean up long model names to display only the essential model name.
12
+
13
+ This function handles various model naming patterns from different providers:
14
+ - LiteLLM models with provider prefixes
15
+ - vLLM models with prefixes
16
+ - Models with slash-separated paths
17
+ - GPT model variants
18
+
19
+ Args:
20
+ model_name: Full model name from database
21
+ (e.g., "litellm_together_ai_meta_llama_Meta_Llama_3.1...")
22
+
23
+ Returns:
24
+ Cleaned model name (e.g., "Meta-Llama-3.1-8B-Instruct-Turbo")
25
+
26
+ Examples:
27
+ >>> clean_model_name("litellm_together_ai/meta-llama/Meta-Llama-3.1-8B")
28
+ "Meta-Llama-3.1-8B"
29
+ >>> clean_model_name("litellm_fireworks_ai/accounts/fireworks/models/glm-4p5-air")
30
+ "glm-4p5-air"
31
+ >>> clean_model_name("vllm_Qwen2-7B-Instruct")
32
+ "Qwen2-7B-Instruct"
33
+ >>> clean_model_name("litellm_gpt-4-turbo")
34
+ "GPT-4-turbo"
35
+ """
36
+ if not model_name or model_name == "Unknown":
37
+ return model_name
38
+
39
+ # Handle special cases first
40
+ if model_name == "None" or model_name.lower() == "random":
41
+ return "Random Bot"
42
+
43
+ # Handle random_None specifically
44
+ if model_name == "random_None":
45
+ return "Random Bot"
46
+
47
+ # GPT models - keep the GPT part
48
+ if "gpt" in model_name.lower():
49
+ # Extract GPT model variants
50
+ if "gpt_3.5" in model_name.lower() or "gpt-3.5" in model_name.lower():
51
+ return "GPT-3.5-turbo"
52
+ elif "gpt_4" in model_name.lower() or "gpt-4" in model_name.lower():
53
+ if "turbo" in model_name.lower():
54
+ return "GPT-4-turbo"
55
+ elif "mini" in model_name.lower():
56
+ return "GPT-4-mini"
57
+ else:
58
+ return "GPT-4"
59
+ elif "gpt_5" in model_name.lower() or "gpt-5" in model_name.lower():
60
+ if "mini" in model_name.lower():
61
+ return "GPT-5-mini"
62
+ else:
63
+ return "GPT-5"
64
+ elif "gpt2" in model_name.lower() or "gpt-2" in model_name.lower():
65
+ return "GPT-2"
66
+ elif "distilgpt2" in model_name.lower():
67
+ return "DistilGPT-2"
68
+ elif "gpt-neo" in model_name.lower():
69
+ return "GPT-Neo-125M"
70
+
71
+ # For litellm models, extract everything after the last slash
72
+ if "litellm_" in model_name and "/" in model_name:
73
+ # Split by "/" and take the last part
74
+ model_part = model_name.split("/")[-1]
75
+ # Clean up underscores and make it more readable
76
+ cleaned = model_part.replace("_", "-")
77
+ return cleaned
78
+
79
+ # For vllm models, extract the model name part
80
+ if model_name.startswith("vllm_"):
81
+ # Remove vllm_ prefix
82
+ model_part = model_name[5:]
83
+ # Clean up underscores
84
+ cleaned = model_part.replace("_", "-")
85
+ return cleaned
86
+
87
+ # For litellm models without slashes (from database storage)
88
+ # These correspond to the slash-separated patterns in the YAML
89
+ if model_name.startswith("litellm_"):
90
+ parts = model_name.split("_")
91
+
92
+ # Handle Fireworks AI pattern: litellm_fireworks_ai_accounts_fireworks_models_*
93
+ if "fireworks" in model_name and "accounts" in model_name and "models" in model_name:
94
+ try:
95
+ models_idx = parts.index("models")
96
+ model_parts = parts[models_idx + 1:]
97
+ return "-".join(model_parts)
98
+ except ValueError:
99
+ pass
100
+
101
+ # Handle Together AI pattern: litellm_together_ai_meta_llama_*
102
+ # Original: litellm_together_ai/meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo
103
+ # Becomes: litellm_together_ai_meta_llama_Meta_Llama_3.1_8B_Instruct_Turbo
104
+ # We want: Meta-Llama-3.1-8B-Instruct-Turbo
105
+ if "together" in model_name and "meta" in model_name and "llama" in model_name:
106
+ try:
107
+ # Find "meta" and "llama" - the model name starts after "meta_llama_"
108
+ for i, part in enumerate(parts):
109
+ if part == "meta" and i + 1 < len(parts) and parts[i + 1] == "llama":
110
+ # Model name starts after "meta_llama_"
111
+ model_parts = parts[i + 2:]
112
+ return "-".join(model_parts)
113
+ except Exception:
114
+ pass
115
+
116
+ # Handle Groq pattern: litellm_groq_*
117
+ # These are simpler patterns
118
+ if parts[1] == "groq" and len(parts) >= 3:
119
+ model_parts = parts[2:] # Everything after "litellm_groq_"
120
+ cleaned = "-".join(model_parts)
121
+ # Special handling for common models
122
+ if "llama3" in cleaned.lower():
123
+ cleaned = cleaned.replace("llama3", "Llama-3")
124
+ elif "qwen" in cleaned.lower():
125
+ cleaned = cleaned.replace("qwen", "Qwen")
126
+ elif "gemma" in cleaned.lower():
127
+ cleaned = cleaned.replace("gemma", "Gemma")
128
+ return cleaned
129
+
130
+ # For other patterns, skip first two parts (litellm_provider_)
131
+ if len(parts) >= 3:
132
+ model_parts = parts[2:] # Everything after provider
133
+ cleaned = "-".join(model_parts)
134
+ return cleaned
135
+
136
+ # For models with slashes but not litellm (like direct model paths)
137
+ if "/" in model_name:
138
+ return model_name.split("/")[-1].replace("_", "-")
139
+
140
+ # Default: just replace underscores with dashes
141
+ return model_name.replace("_", "-")