Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -256,7 +256,7 @@ def get_normalized_df(df):
|
|
256 |
# final_score = df.drop('name', axis=1).sum(axis=1)
|
257 |
# df.insert(1, 'Overall Score', final_score)
|
258 |
normalize_df = df.copy().fillna(0.0)
|
259 |
-
for column in normalize_df.columns[1:-
|
260 |
min_val = NORMALIZE_DIC[column]['Min']
|
261 |
max_val = NORMALIZE_DIC[column]['Max']
|
262 |
normalize_df[column] = (normalize_df[column] - min_val) / (max_val - min_val)
|
@@ -264,7 +264,7 @@ def get_normalized_df(df):
|
|
264 |
|
265 |
def get_normalized_i2v_df(df):
|
266 |
normalize_df = df.copy().fillna(0.0)
|
267 |
-
for column in normalize_df.columns[1:-
|
268 |
min_val = NORMALIZE_DIC_I2V[column]['Min']
|
269 |
max_val = NORMALIZE_DIC_I2V[column]['Max']
|
270 |
normalize_df[column] = (normalize_df[column] - min_val) / (max_val - min_val)
|
@@ -308,8 +308,12 @@ def calculate_selected_score_i2v(df, selected_columns):
|
|
308 |
def get_final_score(df, selected_columns):
|
309 |
normalize_df = get_normalized_df(df)
|
310 |
#final_score = normalize_df.drop('name', axis=1).sum(axis=1)
|
311 |
-
|
312 |
-
|
|
|
|
|
|
|
|
|
313 |
quality_score = normalize_df[QUALITY_LIST].sum(axis=1)/sum([DIM_WEIGHT[i] for i in QUALITY_LIST])
|
314 |
semantic_score = normalize_df[SEMANTIC_LIST].sum(axis=1)/sum([DIM_WEIGHT[i] for i in SEMANTIC_LIST ])
|
315 |
final_score = (quality_score * QUALITY_WEIGHT + semantic_score * SEMANTIC_WEIGHT) / (QUALITY_WEIGHT + SEMANTIC_WEIGHT)
|
@@ -335,7 +339,7 @@ def get_final_score(df, selected_columns):
|
|
335 |
def get_final_score_i2v(df, selected_columns):
|
336 |
normalize_df = get_normalized_i2v_df(df)
|
337 |
#final_score = normalize_df.drop('name', axis=1).sum(axis=1)
|
338 |
-
for name in normalize_df.drop('Model Name (clickable)', axis=1).drop('Video-Text Camera Motion', axis=1).drop(
|
339 |
normalize_df[name] = normalize_df[name]*DIM_WEIGHT_I2V[name]
|
340 |
quality_score = normalize_df[I2V_QUALITY_LIST].sum(axis=1)/sum([DIM_WEIGHT_I2V[i] for i in I2V_QUALITY_LIST])
|
341 |
i2v_score = normalize_df[I2V_LIST].sum(axis=1)/sum([DIM_WEIGHT_I2V[i] for i in I2V_LIST ])
|
@@ -388,9 +392,10 @@ def get_baseline_df():
|
|
388 |
df = get_final_score(df, checkbox_group.value)
|
389 |
df = df.sort_values(by="Selected Score", ascending=False)
|
390 |
present_columns = MODEL_INFO + checkbox_group.value
|
391 |
-
print(present_columns)
|
392 |
df = df[present_columns]
|
393 |
-
|
|
|
394 |
df = convert_scores_to_percentage(df)
|
395 |
return df
|
396 |
|
@@ -412,7 +417,7 @@ def get_baseline_df_i2v():
|
|
412 |
df = get_final_score_i2v(df, checkbox_group_i2v.value)
|
413 |
df = df.sort_values(by="Selected Score", ascending=False)
|
414 |
present_columns = MODEL_INFO_TAB_I2V + checkbox_group_i2v.value
|
415 |
-
# df = df[df[
|
416 |
df = df[present_columns]
|
417 |
df = convert_scores_to_percentage(df)
|
418 |
return df
|
@@ -424,7 +429,7 @@ def get_baseline_df_long():
|
|
424 |
df = get_final_score(df, checkbox_group.value)
|
425 |
df = df.sort_values(by="Selected Score", ascending=False)
|
426 |
present_columns = MODEL_INFO + checkbox_group.value
|
427 |
-
# df = df[df[
|
428 |
df = df[present_columns]
|
429 |
df = convert_scores_to_percentage(df)
|
430 |
return df
|
@@ -463,15 +468,19 @@ def get_all_df_long(selected_columns, dir=LONG_DIR):
|
|
463 |
|
464 |
|
465 |
def convert_scores_to_percentage(df):
|
466 |
-
#
|
467 |
-
|
468 |
-
if 'Source' in df.columns:
|
469 |
skip_col =3
|
470 |
else:
|
471 |
skip_col =1
|
|
|
472 |
for column in df.columns[skip_col:]: # 假设第一列是'name'
|
473 |
-
|
474 |
-
|
|
|
|
|
|
|
|
|
475 |
return df
|
476 |
|
477 |
def choose_all_quailty():
|
@@ -487,10 +496,12 @@ def enable_all():
|
|
487 |
return gr.update(value=TASK_INFO)
|
488 |
|
489 |
# select function
|
490 |
-
def on_filter_model_size_method_change(selected_columns,
|
491 |
updated_data = get_all_df(selected_columns, CSV_DIR)
|
492 |
-
if
|
493 |
-
updated_data = updated_data[updated_data[
|
|
|
|
|
494 |
#print(updated_data)
|
495 |
# columns:
|
496 |
selected_columns = [item for item in TASK_INFO if item in selected_columns]
|
@@ -499,6 +510,7 @@ def on_filter_model_size_method_change(selected_columns, only_vbench_team):
|
|
499 |
updated_data = updated_data.sort_values(by="Selected Score", ascending=False)
|
500 |
updated_data = convert_scores_to_percentage(updated_data)
|
501 |
updated_headers = present_columns
|
|
|
502 |
update_datatype = [DATA_TITILE_TYPE[COLUMN_NAMES.index(x)] for x in updated_headers]
|
503 |
# print(updated_data,present_columns,update_datatype)
|
504 |
filter_component = gr.components.Dataframe(
|
@@ -533,10 +545,12 @@ def on_filter_model_size_method_change_quality(selected_columns):
|
|
533 |
)
|
534 |
return filter_component#.value
|
535 |
|
536 |
-
def on_filter_model_size_method_change_i2v(selected_columns,
|
537 |
updated_data = get_all_df_i2v(selected_columns, I2V_DIR)
|
538 |
-
if
|
539 |
-
updated_data = updated_data[updated_data[
|
|
|
|
|
540 |
selected_columns = [item for item in I2V_TAB if item in selected_columns]
|
541 |
present_columns = MODEL_INFO_TAB_I2V + selected_columns
|
542 |
updated_data = updated_data[present_columns]
|
@@ -555,10 +569,12 @@ def on_filter_model_size_method_change_i2v(selected_columns,only_vbench_team):
|
|
555 |
)
|
556 |
return filter_component#.value
|
557 |
|
558 |
-
def on_filter_model_size_method_change_long(selected_columns,
|
559 |
updated_data = get_all_df_long(selected_columns, LONG_DIR)
|
560 |
-
if
|
561 |
-
updated_data = updated_data[updated_data[
|
|
|
|
|
562 |
selected_columns = [item for item in TASK_INFO if item in selected_columns]
|
563 |
present_columns = MODEL_INFO + selected_columns
|
564 |
updated_data = updated_data[present_columns]
|
@@ -607,6 +623,11 @@ with block:
|
|
607 |
|
608 |
with gr.Column(scale=0.8):
|
609 |
vbench_team_filter = gr.Checkbox(
|
|
|
|
|
|
|
|
|
|
|
610 |
label="Evaluated by VBench Team (Uncheck to view all submissions)",
|
611 |
value=True,
|
612 |
interactive=True
|
@@ -629,12 +650,13 @@ with block:
|
|
629 |
height=700,
|
630 |
)
|
631 |
|
632 |
-
choosen_q.click(choose_all_quailty, inputs=None, outputs=[checkbox_group]).then(fn=on_filter_model_size_method_change, inputs=[ checkbox_group, vbench_team_filter], outputs=data_component)
|
633 |
-
choosen_s.click(choose_all_semantic, inputs=None, outputs=[checkbox_group]).then(fn=on_filter_model_size_method_change, inputs=[ checkbox_group, vbench_team_filter], outputs=data_component)
|
634 |
# enable_b.click(enable_all, inputs=None, outputs=[checkbox_group]).then(fn=on_filter_model_size_method_change, inputs=[ checkbox_group, vbench_team_filter], outputs=data_component)
|
635 |
-
disable_b.click(disable_all, inputs=None, outputs=[checkbox_group]).then(fn=on_filter_model_size_method_change, inputs=[ checkbox_group, vbench_team_filter], outputs=data_component)
|
636 |
-
checkbox_group.change(fn=on_filter_model_size_method_change, inputs=[ checkbox_group, vbench_team_filter], outputs=data_component)
|
637 |
-
vbench_team_filter.change(fn=on_filter_model_size_method_change, inputs=[checkbox_group, vbench_team_filter], outputs=data_component)
|
|
|
638 |
# Table 1
|
639 |
with gr.TabItem("Video Quaity", elem_id="vbench-tab-table", id=2):
|
640 |
with gr.Accordion("INSTRUCTION", open=False):
|
@@ -678,11 +700,17 @@ with block:
|
|
678 |
with gr.Row():
|
679 |
with gr.Column(scale=1.0):
|
680 |
# selection for column part:
|
681 |
-
|
682 |
-
|
683 |
-
|
684 |
-
|
685 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
686 |
checkbox_group_i2v = gr.CheckboxGroup(
|
687 |
choices=I2V_TAB,
|
688 |
value=I2V_TAB,
|
@@ -699,8 +727,9 @@ with block:
|
|
699 |
visible=True,
|
700 |
)
|
701 |
|
702 |
-
checkbox_group_i2v.change(fn=on_filter_model_size_method_change_i2v, inputs=[checkbox_group_i2v, vbench_team_filter_i2v], outputs=data_component_i2v)
|
703 |
-
vbench_team_filter_i2v.change(fn=on_filter_model_size_method_change_i2v, inputs=[checkbox_group_i2v, vbench_team_filter_i2v], outputs=data_component_i2v)
|
|
|
704 |
|
705 |
with gr.TabItem("📊 VBench-Long", elem_id="vbench-tab-table", id=4):
|
706 |
with gr.Row():
|
@@ -723,11 +752,17 @@ with block:
|
|
723 |
disable_b_long = gr.Button("Deselect All")
|
724 |
|
725 |
with gr.Column(scale=0.8):
|
726 |
-
|
727 |
-
|
728 |
-
|
729 |
-
|
730 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
731 |
checkbox_group_long = gr.CheckboxGroup(
|
732 |
choices=TASK_INFO,
|
733 |
value=DEFAULT_INFO,
|
@@ -745,12 +780,14 @@ with block:
|
|
745 |
height=700,
|
746 |
)
|
747 |
|
748 |
-
choosen_q_long.click(choose_all_quailty, inputs=None, outputs=[checkbox_group_long]).then(fn=on_filter_model_size_method_change_long, inputs=[ checkbox_group_long, vbench_team_filter_long], outputs=data_component)
|
749 |
-
choosen_s_long.click(choose_all_semantic, inputs=None, outputs=[checkbox_group_long]).then(fn=on_filter_model_size_method_change_long, inputs=[ checkbox_group_long, vbench_team_filter_long], outputs=data_component)
|
750 |
-
enable_b_long.click(enable_all, inputs=None, outputs=[checkbox_group_long]).then(fn=on_filter_model_size_method_change_long, inputs=[ checkbox_group_long, vbench_team_filter_long], outputs=data_component)
|
751 |
-
disable_b_long.click(disable_all, inputs=None, outputs=[checkbox_group_long]).then(fn=on_filter_model_size_method_change_long, inputs=[ checkbox_group_long, vbench_team_filter_long], outputs=data_component)
|
752 |
-
checkbox_group_long.change(fn=on_filter_model_size_method_change_long, inputs=[checkbox_group_long, vbench_team_filter_long], outputs=data_component)
|
753 |
-
vbench_team_filter_long.change(fn=on_filter_model_size_method_change_long, inputs=[checkbox_group_long, vbench_team_filter_long], outputs=data_component)
|
|
|
|
|
754 |
# table info
|
755 |
with gr.TabItem("📝 About", elem_id="mvbench-tab-table", id=5):
|
756 |
gr.Markdown(LEADERBORAD_INFO, elem_classes="markdown-text")
|
@@ -775,6 +812,7 @@ with block:
|
|
775 |
revision_name_textbox = gr.Textbox(
|
776 |
label="Revision Model Name(Optional)", placeholder="If you need to update the previous results, please fill in this line"
|
777 |
)
|
|
|
778 |
|
779 |
with gr.Column():
|
780 |
model_link = gr.Textbox(
|
@@ -815,6 +853,7 @@ with block:
|
|
815 |
team_name,
|
816 |
contact_email,
|
817 |
release_time,
|
|
|
818 |
model_resolution,
|
819 |
model_fps,
|
820 |
model_frame,
|
@@ -885,6 +924,7 @@ with block:
|
|
885 |
team_name_i2v,
|
886 |
contact_email_i2v,
|
887 |
release_time_i2v,
|
|
|
888 |
model_resolution_i2v,
|
889 |
model_fps_i2v,
|
890 |
model_frame_i2v,
|
|
|
256 |
# final_score = df.drop('name', axis=1).sum(axis=1)
|
257 |
# df.insert(1, 'Overall Score', final_score)
|
258 |
normalize_df = df.copy().fillna(0.0)
|
259 |
+
for column in normalize_df.columns[1:-5]:
|
260 |
min_val = NORMALIZE_DIC[column]['Min']
|
261 |
max_val = NORMALIZE_DIC[column]['Max']
|
262 |
normalize_df[column] = (normalize_df[column] - min_val) / (max_val - min_val)
|
|
|
264 |
|
265 |
def get_normalized_i2v_df(df):
|
266 |
normalize_df = df.copy().fillna(0.0)
|
267 |
+
for column in normalize_df.columns[1:-5]:
|
268 |
min_val = NORMALIZE_DIC_I2V[column]['Min']
|
269 |
max_val = NORMALIZE_DIC_I2V[column]['Max']
|
270 |
normalize_df[column] = (normalize_df[column] - min_val) / (max_val - min_val)
|
|
|
308 |
def get_final_score(df, selected_columns):
|
309 |
normalize_df = get_normalized_df(df)
|
310 |
#final_score = normalize_df.drop('name', axis=1).sum(axis=1)
|
311 |
+
try:
|
312 |
+
for name in normalize_df.drop('Model Name (clickable)', axis=1).drop("Sampled by", axis=1).drop('Mail', axis=1).drop('Date',axis=1).drop("Evaluated by", axis=1).drop("Accessibility", axis=1):
|
313 |
+
normalize_df[name] = normalize_df[name]*DIM_WEIGHT[name]
|
314 |
+
except:
|
315 |
+
for name in normalize_df.drop('Model Name (clickable)', axis=1).drop("Sampled by", axis=1).drop('Mail', axis=1).drop('Date',axis=1):
|
316 |
+
normalize_df[name] = normalize_df[name]*DIM_WEIGHT[name]
|
317 |
quality_score = normalize_df[QUALITY_LIST].sum(axis=1)/sum([DIM_WEIGHT[i] for i in QUALITY_LIST])
|
318 |
semantic_score = normalize_df[SEMANTIC_LIST].sum(axis=1)/sum([DIM_WEIGHT[i] for i in SEMANTIC_LIST ])
|
319 |
final_score = (quality_score * QUALITY_WEIGHT + semantic_score * SEMANTIC_WEIGHT) / (QUALITY_WEIGHT + SEMANTIC_WEIGHT)
|
|
|
339 |
def get_final_score_i2v(df, selected_columns):
|
340 |
normalize_df = get_normalized_i2v_df(df)
|
341 |
#final_score = normalize_df.drop('name', axis=1).sum(axis=1)
|
342 |
+
for name in normalize_df.drop('Model Name (clickable)', axis=1).drop('Video-Text Camera Motion', axis=1).drop("Source", axis=1).drop('Mail', axis=1).drop('Date',axis=1):
|
343 |
normalize_df[name] = normalize_df[name]*DIM_WEIGHT_I2V[name]
|
344 |
quality_score = normalize_df[I2V_QUALITY_LIST].sum(axis=1)/sum([DIM_WEIGHT_I2V[i] for i in I2V_QUALITY_LIST])
|
345 |
i2v_score = normalize_df[I2V_LIST].sum(axis=1)/sum([DIM_WEIGHT_I2V[i] for i in I2V_LIST ])
|
|
|
392 |
df = get_final_score(df, checkbox_group.value)
|
393 |
df = df.sort_values(by="Selected Score", ascending=False)
|
394 |
present_columns = MODEL_INFO + checkbox_group.value
|
395 |
+
# print(present_columns)
|
396 |
df = df[present_columns]
|
397 |
+
# Add this line to display the results evaluated by VBench by default
|
398 |
+
df = df[df['Evaluated by'] == 'VBench Team']
|
399 |
df = convert_scores_to_percentage(df)
|
400 |
return df
|
401 |
|
|
|
417 |
df = get_final_score_i2v(df, checkbox_group_i2v.value)
|
418 |
df = df.sort_values(by="Selected Score", ascending=False)
|
419 |
present_columns = MODEL_INFO_TAB_I2V + checkbox_group_i2v.value
|
420 |
+
# df = df[df["Sampled by"] == 'VBench Team']
|
421 |
df = df[present_columns]
|
422 |
df = convert_scores_to_percentage(df)
|
423 |
return df
|
|
|
429 |
df = get_final_score(df, checkbox_group.value)
|
430 |
df = df.sort_values(by="Selected Score", ascending=False)
|
431 |
present_columns = MODEL_INFO + checkbox_group.value
|
432 |
+
# df = df[df["Sampled by"] == 'VBench Team']
|
433 |
df = df[present_columns]
|
434 |
df = convert_scores_to_percentage(df)
|
435 |
return df
|
|
|
468 |
|
469 |
|
470 |
def convert_scores_to_percentage(df):
|
471 |
+
# Operate on every column in the DataFrame (except the'name 'column)
|
472 |
+
if "Sampled by" in df.columns:
|
|
|
473 |
skip_col =3
|
474 |
else:
|
475 |
skip_col =1
|
476 |
+
print(df)
|
477 |
for column in df.columns[skip_col:]: # 假设第一列是'name'
|
478 |
+
# if df[column].isdigit():
|
479 |
+
# print(df[column])
|
480 |
+
is_numeric = pd.to_numeric(df[column], errors='coerce').notna().all()
|
481 |
+
if is_numeric:
|
482 |
+
df[column] = round(df[column] * 100,2)
|
483 |
+
df[column] = df[column].apply(lambda x: f"{x:05.2f}") + '%'
|
484 |
return df
|
485 |
|
486 |
def choose_all_quailty():
|
|
|
496 |
return gr.update(value=TASK_INFO)
|
497 |
|
498 |
# select function
|
499 |
+
def on_filter_model_size_method_change(selected_columns, vbench_team_sample, vbench_team_eval=False):
|
500 |
updated_data = get_all_df(selected_columns, CSV_DIR)
|
501 |
+
if vbench_team_sample:
|
502 |
+
updated_data = updated_data[updated_data["Sampled by"] == 'VBench Team']
|
503 |
+
if vbench_team_eval:
|
504 |
+
updated_data = updated_data[updated_data['Evaluated by'] == 'VBench Team']
|
505 |
#print(updated_data)
|
506 |
# columns:
|
507 |
selected_columns = [item for item in TASK_INFO if item in selected_columns]
|
|
|
510 |
updated_data = updated_data.sort_values(by="Selected Score", ascending=False)
|
511 |
updated_data = convert_scores_to_percentage(updated_data)
|
512 |
updated_headers = present_columns
|
513 |
+
print(COLUMN_NAMES,updated_headers,DATA_TITILE_TYPE )
|
514 |
update_datatype = [DATA_TITILE_TYPE[COLUMN_NAMES.index(x)] for x in updated_headers]
|
515 |
# print(updated_data,present_columns,update_datatype)
|
516 |
filter_component = gr.components.Dataframe(
|
|
|
545 |
)
|
546 |
return filter_component#.value
|
547 |
|
548 |
+
def on_filter_model_size_method_change_i2v(selected_columns,vbench_team_sample, vbench_team_eval=False):
|
549 |
updated_data = get_all_df_i2v(selected_columns, I2V_DIR)
|
550 |
+
if vbench_team_sample:
|
551 |
+
updated_data = updated_data[updated_data["Sampled by"] == 'VBench Team']
|
552 |
+
# if vbench_team_eval:
|
553 |
+
# updated_data = updated_data[updated_data['Eval'] == 'VBench Team']
|
554 |
selected_columns = [item for item in I2V_TAB if item in selected_columns]
|
555 |
present_columns = MODEL_INFO_TAB_I2V + selected_columns
|
556 |
updated_data = updated_data[present_columns]
|
|
|
569 |
)
|
570 |
return filter_component#.value
|
571 |
|
572 |
+
def on_filter_model_size_method_change_long(selected_columns, vbench_team_sample, vbench_team_eval=False):
|
573 |
updated_data = get_all_df_long(selected_columns, LONG_DIR)
|
574 |
+
if vbench_team_sample:
|
575 |
+
updated_data = updated_data[updated_data["Sampled by"] == 'VBench Team']
|
576 |
+
if vbench_team_eval:
|
577 |
+
updated_data = updated_data[updated_data['Evaluated by'] == 'VBench Team']
|
578 |
selected_columns = [item for item in TASK_INFO if item in selected_columns]
|
579 |
present_columns = MODEL_INFO + selected_columns
|
580 |
updated_data = updated_data[present_columns]
|
|
|
623 |
|
624 |
with gr.Column(scale=0.8):
|
625 |
vbench_team_filter = gr.Checkbox(
|
626 |
+
label="Sampled by VBench Team (Uncheck to view all submissions)",
|
627 |
+
value=False,
|
628 |
+
interactive=True
|
629 |
+
)
|
630 |
+
vbench_validate_filter = gr.Checkbox(
|
631 |
label="Evaluated by VBench Team (Uncheck to view all submissions)",
|
632 |
value=True,
|
633 |
interactive=True
|
|
|
650 |
height=700,
|
651 |
)
|
652 |
|
653 |
+
choosen_q.click(choose_all_quailty, inputs=None, outputs=[checkbox_group]).then(fn=on_filter_model_size_method_change, inputs=[ checkbox_group, vbench_team_filter,vbench_validate_filter], outputs=data_component)
|
654 |
+
choosen_s.click(choose_all_semantic, inputs=None, outputs=[checkbox_group]).then(fn=on_filter_model_size_method_change, inputs=[ checkbox_group, vbench_team_filter,vbench_validate_filter], outputs=data_component)
|
655 |
# enable_b.click(enable_all, inputs=None, outputs=[checkbox_group]).then(fn=on_filter_model_size_method_change, inputs=[ checkbox_group, vbench_team_filter], outputs=data_component)
|
656 |
+
disable_b.click(disable_all, inputs=None, outputs=[checkbox_group]).then(fn=on_filter_model_size_method_change, inputs=[ checkbox_group, vbench_team_filter, vbench_validate_filter], outputs=data_component)
|
657 |
+
checkbox_group.change(fn=on_filter_model_size_method_change, inputs=[ checkbox_group, vbench_team_filter, vbench_validate_filter], outputs=data_component)
|
658 |
+
vbench_team_filter.change(fn=on_filter_model_size_method_change, inputs=[checkbox_group, vbench_team_filter, vbench_validate_filter], outputs=data_component)
|
659 |
+
vbench_validate_filter.change(fn=on_filter_model_size_method_change, inputs=[checkbox_group, vbench_team_filter, vbench_validate_filter], outputs=data_component)
|
660 |
# Table 1
|
661 |
with gr.TabItem("Video Quaity", elem_id="vbench-tab-table", id=2):
|
662 |
with gr.Accordion("INSTRUCTION", open=False):
|
|
|
700 |
with gr.Row():
|
701 |
with gr.Column(scale=1.0):
|
702 |
# selection for column part:
|
703 |
+
with gr.Row():
|
704 |
+
vbench_team_filter_i2v = gr.Checkbox(
|
705 |
+
label="Sampled by VBench Team (Uncheck to view all submissions)",
|
706 |
+
value=False,
|
707 |
+
interactive=True
|
708 |
+
)
|
709 |
+
vbench_validate_filter_i2v = gr.Checkbox(
|
710 |
+
label="Evaluated by VBench Team (Uncheck to view all submissions)",
|
711 |
+
value=False,
|
712 |
+
interactive=True
|
713 |
+
)
|
714 |
checkbox_group_i2v = gr.CheckboxGroup(
|
715 |
choices=I2V_TAB,
|
716 |
value=I2V_TAB,
|
|
|
727 |
visible=True,
|
728 |
)
|
729 |
|
730 |
+
checkbox_group_i2v.change(fn=on_filter_model_size_method_change_i2v, inputs=[checkbox_group_i2v, vbench_team_filter_i2v,vbench_validate_filter_i2v], outputs=data_component_i2v)
|
731 |
+
vbench_team_filter_i2v.change(fn=on_filter_model_size_method_change_i2v, inputs=[checkbox_group_i2v, vbench_team_filter_i2v,vbench_validate_filter_i2v], outputs=data_component_i2v)
|
732 |
+
vbench_validate_filter_i2v.change(fn=on_filter_model_size_method_change_i2v, inputs=[checkbox_group_i2v, vbench_team_filter_i2v,vbench_validate_filter_i2v], outputs=data_component_i2v)
|
733 |
|
734 |
with gr.TabItem("📊 VBench-Long", elem_id="vbench-tab-table", id=4):
|
735 |
with gr.Row():
|
|
|
752 |
disable_b_long = gr.Button("Deselect All")
|
753 |
|
754 |
with gr.Column(scale=0.8):
|
755 |
+
with gr.Row():
|
756 |
+
vbench_team_filter_long = gr.Checkbox(
|
757 |
+
label="Sampled by VBench Team (Uncheck to view all submissions)",
|
758 |
+
value=False,
|
759 |
+
interactive=True
|
760 |
+
)
|
761 |
+
vbench_validate_filter_long = gr.Checkbox(
|
762 |
+
label="Evaluated by VBench Team (Uncheck to view all submissions)",
|
763 |
+
value=False,
|
764 |
+
interactive=True
|
765 |
+
)
|
766 |
checkbox_group_long = gr.CheckboxGroup(
|
767 |
choices=TASK_INFO,
|
768 |
value=DEFAULT_INFO,
|
|
|
780 |
height=700,
|
781 |
)
|
782 |
|
783 |
+
choosen_q_long.click(choose_all_quailty, inputs=None, outputs=[checkbox_group_long]).then(fn=on_filter_model_size_method_change_long, inputs=[ checkbox_group_long, vbench_team_filter_long, vbench_validate_filter_long], outputs=data_component)
|
784 |
+
choosen_s_long.click(choose_all_semantic, inputs=None, outputs=[checkbox_group_long]).then(fn=on_filter_model_size_method_change_long, inputs=[ checkbox_group_long, vbench_team_filter_long, vbench_validate_filter_long], outputs=data_component)
|
785 |
+
enable_b_long.click(enable_all, inputs=None, outputs=[checkbox_group_long]).then(fn=on_filter_model_size_method_change_long, inputs=[ checkbox_group_long, vbench_team_filter_long, vbench_validate_filter_long], outputs=data_component)
|
786 |
+
disable_b_long.click(disable_all, inputs=None, outputs=[checkbox_group_long]).then(fn=on_filter_model_size_method_change_long, inputs=[ checkbox_group_long, vbench_team_filter_long, vbench_validate_filter_long], outputs=data_component)
|
787 |
+
checkbox_group_long.change(fn=on_filter_model_size_method_change_long, inputs=[checkbox_group_long, vbench_team_filter_long,vbench_validate_filter_long], outputs=data_component)
|
788 |
+
vbench_team_filter_long.change(fn=on_filter_model_size_method_change_long, inputs=[checkbox_group_long, vbench_team_filter_long,vbench_validate_filter_long], outputs=data_component)
|
789 |
+
vbench_validate_filter_long.change(fn=on_filter_model_size_method_change_long, inputs=[checkbox_group_long, vbench_team_filter_long,vbench_validate_filter_long], outputs=data_component)
|
790 |
+
|
791 |
# table info
|
792 |
with gr.TabItem("📝 About", elem_id="mvbench-tab-table", id=5):
|
793 |
gr.Markdown(LEADERBORAD_INFO, elem_classes="markdown-text")
|
|
|
812 |
revision_name_textbox = gr.Textbox(
|
813 |
label="Revision Model Name(Optional)", placeholder="If you need to update the previous results, please fill in this line"
|
814 |
)
|
815 |
+
access_type = gr.Dropdown(["Open Source", "Ready to Open Source", "API", "Close"], label="Please select the way user can access your model. You can update the content by revision_name, or contact the VBench Team.")
|
816 |
|
817 |
with gr.Column():
|
818 |
model_link = gr.Textbox(
|
|
|
853 |
team_name,
|
854 |
contact_email,
|
855 |
release_time,
|
856 |
+
access_type,
|
857 |
model_resolution,
|
858 |
model_fps,
|
859 |
model_frame,
|
|
|
924 |
team_name_i2v,
|
925 |
contact_email_i2v,
|
926 |
release_time_i2v,
|
927 |
+
access_type,
|
928 |
model_resolution_i2v,
|
929 |
model_fps_i2v,
|
930 |
model_frame_i2v,
|