ajude commited on
Commit
07a2d86
Β·
1 Parent(s): db1ab48

refactor(model_links and slider):

Browse files

1. Removed the HTML code from core.py
2. Removed the secondary lookup table
3. Now uses the HF database to set up the model meta data.

Files changed (3) hide show
  1. app.py +39 -29
  2. core.py +19 -16
  3. utils.py +7 -193
app.py CHANGED
@@ -17,9 +17,9 @@ with demo:
17
 
18
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
19
  with gr.TabItem(
20
- "πŸ… LLM accuracy benchmark",
21
- elem_id="llm-benchmark-tab-table-acc",
22
- id=0,
23
  ) as acc:
24
  with gr.Column():
25
  with gr.Row():
@@ -44,8 +44,8 @@ with demo:
44
  value=list(T_SYMBOLS.values()),
45
  )
46
  with gr.Column():
47
- model_sizes = RangeSlider(minimum=0,maximum=150,value=(7, 10),label="Select the number of parameters (B)")
48
-
49
 
50
  with gr.Row():
51
  langs_bar = gr.CheckboxGroup(
@@ -95,7 +95,8 @@ with demo:
95
  scale=1,
96
  )
97
  select.click(
98
- lambda: gr.CheckboxGroup(value=core.get_available_task_groups(core.get_selected_task_type(0), True)),
 
99
  inputs=[],
100
  outputs=shown_tasks,
101
  )
@@ -104,9 +105,9 @@ with demo:
104
  leaderboard_table = gr.Dataframe(datatype=["str", "markdown"], column_widths=[None, "30%"], wrap=False)
105
 
106
  with gr.TabItem(
107
- "πŸ… LLM accuracy benchmark (Zero-Shot)",
108
- elem_id="llm-benchmark-tab-table-acc-zeroshot",
109
- id=3,
110
  ) as acc_zero_shot:
111
  with gr.Column():
112
  with gr.Row():
@@ -117,7 +118,6 @@ with demo:
117
  elem_id="search-bar",
118
  )
119
 
120
-
121
  with gr.Row():
122
  with gr.Column():
123
  model_types_zero_shot = gr.CheckboxGroup(
@@ -133,7 +133,7 @@ with demo:
133
  )
134
  with gr.Column():
135
  model_sizes_zero_shot = RangeSlider(minimum=0, maximum=150, value=(7, 10),
136
- label="Select the number of parameters (B)")
137
 
138
  with gr.Row():
139
  langs_bar_zero_shot = gr.CheckboxGroup(
@@ -183,16 +183,18 @@ with demo:
183
  scale=1,
184
  )
185
  select_zero_shot.click(
186
- lambda: gr.CheckboxGroup(value=core.get_available_task_groups(core.get_selected_task_type(3), False)),
 
187
  inputs=[],
188
  outputs=shown_tasks_zero_shot,
189
  )
190
- leaderboard_table_zero_shot = gr.Dataframe(datatype=["str", "markdown"], column_widths=[None, "30%"], wrap=False)
 
191
 
192
  with gr.TabItem(
193
- "🌐 LLM translation benchmark",
194
- elem_id="llm-benchmark-tab-table-misc",
195
- id=1,
196
  ) as misc:
197
  with gr.Column():
198
  with gr.Row():
@@ -218,8 +220,7 @@ with demo:
218
  )
219
  with gr.Column():
220
  model_sizes_misc = RangeSlider(minimum=0, maximum=150, value=(7, 10),
221
- label="Select the number of parameters (B)")
222
-
223
 
224
  with gr.Row():
225
  langs_bar_misc = gr.CheckboxGroup(
@@ -269,7 +270,8 @@ with demo:
269
  scale=1,
270
  )
271
  select_all_tasks_misc.click(
272
- lambda: gr.CheckboxGroup(value=core.get_available_task_groups(core.get_selected_task_type(1), False)),
 
273
  inputs=[],
274
  outputs=shown_tasks_misc,
275
  )
@@ -277,9 +279,9 @@ with demo:
277
  leaderboard_table_misc = gr.Dataframe(datatype=["str", "markdown"], column_widths=[None, "30%"], wrap=False)
278
 
279
  with gr.TabItem(
280
- "🌐 LLM MT-Bench benchmark",
281
- elem_id="llm-benchmark-tab-table-mtbench",
282
- id=2,
283
  ) as mtbench:
284
  with gr.Column():
285
  with gr.Row():
@@ -317,7 +319,8 @@ with demo:
317
  outputs=langs_bar_mtbench,
318
  )
319
 
320
- leaderboard_table_mtbench = gr.Dataframe(datatype=["str", "markdown"], column_widths=[None, "60%"], wrap=False)
 
321
 
322
  for comp, fn in [
323
  (search_bar, "submit"),
@@ -342,7 +345,8 @@ with demo:
342
  ]:
343
  getattr(comp, fn)(
344
  core.update_df,
345
- [shown_tasks_zero_shot, search_bar_zero_shot, langs_bar_zero_shot, model_types_zero_shot, model_sizes_zero_shot, gr.State(value=False)],
 
346
  leaderboard_table_zero_shot,
347
  )
348
 
@@ -355,7 +359,8 @@ with demo:
355
  ]:
356
  getattr(comp, fn)(
357
  core.update_df,
358
- [shown_tasks_misc, search_bar_misc, langs_bar_misc, model_types_misc, model_sizes_misc, gr.State(value=False)],
 
359
  leaderboard_table_misc,
360
  )
361
 
@@ -365,7 +370,9 @@ with demo:
365
  ]:
366
  getattr(comp, fn)(
367
  core.update_df,
368
- [gr.State(value=core.get_available_task_groups(core.get_selected_task_type(2), False)), search_bar_mtbench, langs_bar_mtbench, gr.State(value=[T_SYMBOLS["chat"]]), gr.State(value=False)], # TODO
 
 
369
  leaderboard_table_mtbench,
370
  )
371
 
@@ -380,21 +387,24 @@ with demo:
380
  gr.Blocks.load(
381
  block=demo,
382
  fn=core.update_df,
383
- inputs=[shown_tasks_zero_shot, search_bar_zero_shot, langs_bar_zero_shot, model_types_zero_shot, model_sizes_zero_shot, gr.State(value=False)],
 
384
  outputs=leaderboard_table_zero_shot,
385
  )
386
 
387
  gr.Blocks.load(
388
  block=demo,
389
  fn=core.update_df,
390
- inputs=[shown_tasks_misc, search_bar_misc, langs_bar_misc, model_types_misc, model_sizes_misc, gr.State(value=False)],
 
391
  outputs=leaderboard_table_misc,
392
  )
393
 
394
  gr.Blocks.load(
395
  block=demo,
396
  fn=core.update_df,
397
- inputs=[gr.State(value=core.get_available_task_groups(core.get_selected_task_type(2), False)), search_bar_mtbench, langs_bar_mtbench, gr.State(value=[T_SYMBOLS["chat"]]), gr.State(value=False)],
 
398
  outputs=leaderboard_table_mtbench,
399
  )
400
 
 
17
 
18
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
19
  with gr.TabItem(
20
+ "πŸ… LLM accuracy benchmark",
21
+ elem_id="llm-benchmark-tab-table-acc",
22
+ id=0,
23
  ) as acc:
24
  with gr.Column():
25
  with gr.Row():
 
44
  value=list(T_SYMBOLS.values()),
45
  )
46
  with gr.Column():
47
+ model_sizes = RangeSlider(minimum=0, maximum=150, value=(7, 10),
48
+ label="Select the number of parameters (B)")
49
 
50
  with gr.Row():
51
  langs_bar = gr.CheckboxGroup(
 
95
  scale=1,
96
  )
97
  select.click(
98
+ lambda: gr.CheckboxGroup(
99
+ value=core.get_available_task_groups(core.get_selected_task_type(0), True)),
100
  inputs=[],
101
  outputs=shown_tasks,
102
  )
 
105
  leaderboard_table = gr.Dataframe(datatype=["str", "markdown"], column_widths=[None, "30%"], wrap=False)
106
 
107
  with gr.TabItem(
108
+ "πŸ… LLM accuracy benchmark (Zero-Shot)",
109
+ elem_id="llm-benchmark-tab-table-acc-zeroshot",
110
+ id=3,
111
  ) as acc_zero_shot:
112
  with gr.Column():
113
  with gr.Row():
 
118
  elem_id="search-bar",
119
  )
120
 
 
121
  with gr.Row():
122
  with gr.Column():
123
  model_types_zero_shot = gr.CheckboxGroup(
 
133
  )
134
  with gr.Column():
135
  model_sizes_zero_shot = RangeSlider(minimum=0, maximum=150, value=(7, 10),
136
+ label="Select the number of parameters (B)")
137
 
138
  with gr.Row():
139
  langs_bar_zero_shot = gr.CheckboxGroup(
 
183
  scale=1,
184
  )
185
  select_zero_shot.click(
186
+ lambda: gr.CheckboxGroup(
187
+ value=core.get_available_task_groups(core.get_selected_task_type(3), False)),
188
  inputs=[],
189
  outputs=shown_tasks_zero_shot,
190
  )
191
+ leaderboard_table_zero_shot = gr.Dataframe(datatype=["str", "markdown"], column_widths=[None, "30%"],
192
+ wrap=False)
193
 
194
  with gr.TabItem(
195
+ "🌐 LLM translation benchmark",
196
+ elem_id="llm-benchmark-tab-table-misc",
197
+ id=1,
198
  ) as misc:
199
  with gr.Column():
200
  with gr.Row():
 
220
  )
221
  with gr.Column():
222
  model_sizes_misc = RangeSlider(minimum=0, maximum=150, value=(7, 10),
223
+ label="Select the number of parameters (B)")
 
224
 
225
  with gr.Row():
226
  langs_bar_misc = gr.CheckboxGroup(
 
270
  scale=1,
271
  )
272
  select_all_tasks_misc.click(
273
+ lambda: gr.CheckboxGroup(
274
+ value=core.get_available_task_groups(core.get_selected_task_type(1), False)),
275
  inputs=[],
276
  outputs=shown_tasks_misc,
277
  )
 
279
  leaderboard_table_misc = gr.Dataframe(datatype=["str", "markdown"], column_widths=[None, "30%"], wrap=False)
280
 
281
  with gr.TabItem(
282
+ "🌐 LLM MT-Bench benchmark",
283
+ elem_id="llm-benchmark-tab-table-mtbench",
284
+ id=2,
285
  ) as mtbench:
286
  with gr.Column():
287
  with gr.Row():
 
319
  outputs=langs_bar_mtbench,
320
  )
321
 
322
+ leaderboard_table_mtbench = gr.Dataframe(datatype=["str", "markdown"], column_widths=[None, "60%"],
323
+ wrap=False)
324
 
325
  for comp, fn in [
326
  (search_bar, "submit"),
 
345
  ]:
346
  getattr(comp, fn)(
347
  core.update_df,
348
+ [shown_tasks_zero_shot, search_bar_zero_shot, langs_bar_zero_shot, model_types_zero_shot,
349
+ model_sizes_zero_shot, gr.State(value=False)],
350
  leaderboard_table_zero_shot,
351
  )
352
 
 
359
  ]:
360
  getattr(comp, fn)(
361
  core.update_df,
362
+ [shown_tasks_misc, search_bar_misc, langs_bar_misc, model_types_misc, model_sizes_misc,
363
+ gr.State(value=False)],
364
  leaderboard_table_misc,
365
  )
366
 
 
370
  ]:
371
  getattr(comp, fn)(
372
  core.update_df,
373
+ [gr.State(value=core.get_available_task_groups(core.get_selected_task_type(2), False)),
374
+ search_bar_mtbench, langs_bar_mtbench, gr.State(value=[T_SYMBOLS["chat"]]), gr.State(value=False)],
375
+ # TODO
376
  leaderboard_table_mtbench,
377
  )
378
 
 
387
  gr.Blocks.load(
388
  block=demo,
389
  fn=core.update_df,
390
+ inputs=[shown_tasks_zero_shot, search_bar_zero_shot, langs_bar_zero_shot, model_types_zero_shot,
391
+ model_sizes_zero_shot, gr.State(value=False)],
392
  outputs=leaderboard_table_zero_shot,
393
  )
394
 
395
  gr.Blocks.load(
396
  block=demo,
397
  fn=core.update_df,
398
+ inputs=[shown_tasks_misc, search_bar_misc, langs_bar_misc, model_types_misc, model_sizes_misc,
399
+ gr.State(value=False)],
400
  outputs=leaderboard_table_misc,
401
  )
402
 
403
  gr.Blocks.load(
404
  block=demo,
405
  fn=core.update_df,
406
+ inputs=[gr.State(value=core.get_available_task_groups(core.get_selected_task_type(2), False)),
407
+ search_bar_mtbench, langs_bar_mtbench, gr.State(value=[T_SYMBOLS["chat"]]), gr.State(value=False)],
408
  outputs=leaderboard_table_mtbench,
409
  )
410
 
core.py CHANGED
@@ -4,7 +4,7 @@ import os
4
  import numpy as np
5
  import pandas as pd
6
  from datasets import load_dataset
7
- from utils import model_hf_look_up_table_filter
8
 
9
  import style
10
 
@@ -13,7 +13,7 @@ FEW_SHOT_ONLY = ["GSM8K", "TruthfulQA"]
13
 
14
 
15
  def init():
16
- global repo_id, config_name, split_name, hidden_df, task_group_names_list, task_group_type_dict, task_groups_shots_dict, languages_list, model_type_dict, mt_bench_language_list
17
 
18
  repo_id = os.getenv("OGX_LEADERBOARD_DATASET_NAME")
19
  config_name = os.getenv("OGX_LEADERBOARD_DATASET_CONFIG")
@@ -33,6 +33,14 @@ def init():
33
  model_type_df = hidden_df[["Model_Name", "Model_Type"]].drop_duplicates()
34
  model_type_dict = model_type_df.set_index("Model_Name")["Model_Type"].to_dict()
35
 
 
 
 
 
 
 
 
 
36
  hidden_df = hidden_df.pivot_table(
37
  columns=["Task_Group", "Few_Shot", "Language"],
38
  index=["Model_Name"],
@@ -43,19 +51,8 @@ def init():
43
  hidden_df["Type"] = hidden_df["Model_Name"].apply(lambda x: style.T_SYMBOLS[model_type_dict[x]])
44
 
45
 
46
- def model_hyperlink(link, model_name):
47
- return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;"> {model_name} </a>'
48
-
49
-
50
- def make_clickable_model(model_name):
51
- link = f"https://huggingface.co/" + model_hf_look_up_table_filter[model_name]['link']
52
- return model_hyperlink(link, model_name)
53
-
54
-
55
  def sort_cols(df: pd.DataFrame, fewshot: bool = False) -> pd.DataFrame:
56
  task_cols = get_task_columns(df)
57
- df['Model_Name'] = df['Model_Name'].apply(
58
- lambda x: make_clickable_model(x) if x in model_hf_look_up_table_filter else x)
59
  return df.reindex(["Type", "Model_Name", "Average"] + sorted(task_cols), axis=1)
60
 
61
 
@@ -76,6 +73,13 @@ def filter_type(df: pd.DataFrame, model_types: list[str]) -> pd.DataFrame:
76
  return df[df["Type"].isin(model_types)]
77
 
78
 
 
 
 
 
 
 
 
79
  def search_model(df: pd.DataFrame, query: str) -> pd.DataFrame:
80
  """Keep only rows for which model name matches search query"""
81
  query = query.replace(";", "|")
@@ -134,10 +138,9 @@ def update_df(
134
  df = filter_type(df, model_types)
135
 
136
  if model_sizes:
137
- result = [key for key, value in model_hf_look_up_table_filter.items() if
138
- (value.get("model_size") >= model_sizes[0] and value.get("model_size") <= model_sizes[1])]
139
- df = df[df['Model_Name'].isin(result)]
140
 
 
141
  if format:
142
  return sort_cols(df, fewshot).style.format(precision=2, decimal=".", na_rep="N/A")
143
  else:
 
4
  import numpy as np
5
  import pandas as pd
6
  from datasets import load_dataset
7
+ from utils import add_model_hyperlink
8
 
9
  import style
10
 
 
13
 
14
 
15
  def init():
16
+ global repo_id, config_name, split_name, hidden_df, task_group_names_list, task_group_type_dict, task_groups_shots_dict, languages_list, model_type_dict, mt_bench_language_list, model_link_dict, model_size_dict
17
 
18
  repo_id = os.getenv("OGX_LEADERBOARD_DATASET_NAME")
19
  config_name = os.getenv("OGX_LEADERBOARD_DATASET_CONFIG")
 
33
  model_type_df = hidden_df[["Model_Name", "Model_Type"]].drop_duplicates()
34
  model_type_dict = model_type_df.set_index("Model_Name")["Model_Type"].to_dict()
35
 
36
+ model_size_df = hidden_df[["Model_Name", "Model_Size"]].drop_duplicates()
37
+ model_size_df['Model_Size'] = model_size_df['Model_Size'].fillna(0)
38
+ model_size_dict = model_size_df.set_index("Model_Name")["Model_Size"].to_dict()
39
+
40
+ model_link_df = hidden_df[["Model_Name", "Model_Link"]].drop_duplicates()
41
+ model_link_df["Model_Link"] = model_link_df["Model_Link"].apply(lambda x: f"https://huggingface.co/" + str(x))
42
+ model_link_dict = model_link_df.set_index("Model_Name")["Model_Link"].to_dict()
43
+
44
  hidden_df = hidden_df.pivot_table(
45
  columns=["Task_Group", "Few_Shot", "Language"],
46
  index=["Model_Name"],
 
51
  hidden_df["Type"] = hidden_df["Model_Name"].apply(lambda x: style.T_SYMBOLS[model_type_dict[x]])
52
 
53
 
 
 
 
 
 
 
 
 
 
54
  def sort_cols(df: pd.DataFrame, fewshot: bool = False) -> pd.DataFrame:
55
  task_cols = get_task_columns(df)
 
 
56
  return df.reindex(["Type", "Model_Name", "Average"] + sorted(task_cols), axis=1)
57
 
58
 
 
73
  return df[df["Type"].isin(model_types)]
74
 
75
 
76
+ def filter_model_size(df: pd.DataFrame, model_sizes, lookup: dict):
77
+ filtered_model_size = [model_name for model_name, model_size in lookup.items() if
78
+ model_sizes[0] <= model_size <= model_sizes[1]]
79
+ filtered_df = df[df['Model_Name'].isin(filtered_model_size)]
80
+ return filtered_df
81
+
82
+
83
  def search_model(df: pd.DataFrame, query: str) -> pd.DataFrame:
84
  """Keep only rows for which model name matches search query"""
85
  query = query.replace(";", "|")
 
138
  df = filter_type(df, model_types)
139
 
140
  if model_sizes:
141
+ df = filter_model_size(df=df, model_sizes=model_sizes, lookup=model_size_dict)
 
 
142
 
143
+ df = add_model_hyperlink(df, model_link_dict)
144
  if format:
145
  return sort_cols(df, fewshot).style.format(precision=2, decimal=".", na_rep="N/A")
146
  else:
utils.py CHANGED
@@ -1,193 +1,7 @@
1
- model_hf_look_up_table_filter = {
2
- "Aya-23-8B": {
3
- "link": "CohereForAI/aya-23-8B",
4
- "model_size": 8
5
- },
6
- "Bloom-7b1": {
7
- "link": "bigscience/bloom-7b1",
8
- "model_size": 7,
9
- },
10
- "Bloomz-7b1": {
11
- "link": "bigscience/bloomz-7b1",
12
- "model_size": 7,
13
- },
14
- "Meta-Llama-2-7B": {
15
- "link": "meta-llama/Llama-2-7b",
16
- "model_size": 7,
17
- },
18
- "Gemma-7b": {
19
- "link": "google/gemma-7b",
20
- "model_size": 7,
21
- },
22
- "Gemma-1.1-7b-Instruct": {
23
- "link": "google/gemma-1.1-7b-it",
24
- "model_size": 7,
25
- },
26
- "Meta-Llama-3-8B": {
27
- "link": "meta-llama/Meta-Llama-3-8B",
28
- "model_size": 8
29
- },
30
- "Meta-Llama-3-8B-Instruct": {
31
- "link": "meta-llama/Meta-Llama-3-8B-Instruct",
32
- "model_size": 8
33
- },
34
- "Mistral-7B-Instruct-v0.3": {
35
- "link": "mistralai/Mistral-7B-Instruct-v0.3",
36
- "model_size": 7
37
- },
38
- "Mistral-7B-Instruct-v0.1": {
39
- "link": "mistralai/Mistral-7B-Instruct-v0.1",
40
- "model_size": 7
41
- },
42
- "Mistral-7B-Instruct-v0.2": {
43
- "link": "mistralai/Mistral-7B-Instruct-v0.2",
44
- "model_size": 7
45
- },
46
- "Mistral-7B-v0.1": {
47
- "link": "mistralai/Mistral-7B-v0.1",
48
- "model_size": 7
49
- },
50
- "Mistral-7B-v0.3": {
51
- "link": "mistralai/Mistral-7B-v0.3",
52
- "model_size": 7
53
- },
54
- "Occiglot-7b-eu5": {
55
- "link": "occiglot/occiglot-7b-eu5",
56
- "model_size": 7
57
- },
58
- "Occiglot-7b-eu5-Instruct": {
59
- "link": "occiglot/occiglot-7b-eu5-instruct",
60
- "model_size": 7
61
- },
62
- "Phi-3-mini-4k-Instruct": {
63
- "link": "microsoft/Phi-3-mini-4k-instruct",
64
- "model_size": 3.8
65
- },
66
- "Qwen2-7B": {
67
- "link": "Qwen/Qwen2-7B-Instruct",
68
- "model_size": 7
69
- },
70
- "Qwen2-7B-Instruct": {
71
- "link": "Qwen/Qwen2-7B-Instruct",
72
- "model_size": 7
73
- },
74
- "7B_24EU_2.5T_bactrianx17_bb_ckp1": {
75
- "link": "",
76
- "model_size": 7
77
- },
78
- "7B_24EU_2.5T_bactrianx5_bb_ckp1": {
79
- "link": "",
80
- "model_size": 7
81
- },
82
- "7B_24EU_2.5T_honey_ckp2701": {
83
- "link": "",
84
- "model_size": 7
85
- },
86
- "7B_24EU_2T_bactrianx17_bb_ckp2": {
87
- "link": "",
88
- "model_size": 7
89
- },
90
- "7B_24EU_2T_bactrianx5_bb_ckp2": {
91
- "link": "",
92
- "model_size": 7
93
- },
94
- "7B_24EU_2.86T_EP5_iter_0681300": {
95
- "link": "",
96
- "model_size": 7
97
- },
98
- "7B_24EU_2.86T_iter_0602100": {
99
- "link": "",
100
- "model_size": 7
101
- },
102
- "7B_24EU_1.45T_bactrianx17_ckp1": {
103
- "link": "",
104
- "model_size": 7
105
- },
106
- "7B_24EU_1.45T_bactrianx17_bb_ckp2": {
107
- "link": "",
108
- "model_size": 7
109
- },
110
- "7B_24EU_1.45T_bactrianx5_ckp1": {
111
- "link": "",
112
- "model_size": 7
113
- },
114
- "7B_24EU_1.65T_bactrianx17_ckp1": {
115
- "link": "",
116
- "model_size": 7
117
- },
118
- "7B_24EU_1.65T_bactrianx17_bb_ckp1": {
119
- "link": "",
120
- "model_size": 7
121
- },
122
- "7B_24EU_1.65T_bactrianx5_ckp1": {
123
- "link": "",
124
- "model_size": 7
125
- },
126
- "7B_EN_200B_iter_0047683": {
127
- "link": "",
128
- "model_size": 7
129
- },
130
- "7B_EQUAL_200B_iter_0046950": {
131
- "link": "",
132
- "model_size": 7
133
- },
134
- "7B_EU24_1.1T_iter_0236250": {
135
- "link": "",
136
- "model_size": 7
137
- },
138
- "7B_EU24_1.45T_iter_0346050": {
139
- "link": "",
140
- "model_size": 7
141
- },
142
- "7B_EU24_1.65T_iter_0393075": {
143
- "link": "",
144
- "model_size": 7
145
- },
146
- "7B_EU24_2.5T_DE_213B": {
147
- "link": "",
148
- "model_size": 7
149
- },
150
- "7B_EU24_2.5T_DE_262B": {
151
- "link": "",
152
- "model_size": 7
153
- },
154
- "7B_EU24_2.5T_iter_0602100": {
155
- "link": "",
156
- "model_size": 7
157
- },
158
- "7B_EU24_2T_iter_0477675": {
159
- "link": "",
160
- "model_size": 7
161
- },
162
- "7B_EU24_2T_iter_0477900": {
163
- "link": "",
164
- "model_size": 7
165
- },
166
- "7B_EU24_2T_iter_0478125": {
167
- "link": "",
168
- "model_size": 7
169
- },
170
- "7B_EU24_3T_oscar_iter_0715255": {
171
- "link": "",
172
- "model_size": 7
173
- },
174
- "7B_EU24_3T_fw_iter_0715255": {
175
- "link": "",
176
- "model_size": 7
177
- },
178
- "7B_EU24_fw_3T_honey_ckp1350": {
179
- "link": "",
180
- "model_size": 7
181
- },
182
- "7B_EU24_fw_3.1T_iter_0025875": {
183
- "link": "",
184
- "model_size": 7
185
- },
186
- "7B_EU24_1.1T_bactrianx_ckp2": {
187
- "link": "",
188
- "model_size": 7
189
- },
190
-
191
-
192
-
193
- }
 
1
+ def add_model_hyperlink(df, lookup):
2
+ df["Model_Name"] = df["Model_Name"].apply(
3
+ lambda
4
+ x: f'<a target="_blank" href="{lookup[x]}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;"> {x} </a>' if
5
+ x in lookup else x
6
+ )
7
+ return df