koustuvs commited on
Commit
5f0fbb6
·
0 Parent(s):

Initial commit

Browse files
Files changed (11) hide show
  1. .gitattributes +35 -0
  2. .gitignore +13 -0
  3. .pre-commit-config.yaml +53 -0
  4. Makefile +13 -0
  5. README.md +19 -0
  6. app.py +945 -0
  7. content.py +90 -0
  8. pyproject.toml +13 -0
  9. requirements.txt +16 -0
  10. scorer.py +104 -0
  11. utils.py +15 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ auto_evals/
2
+ venv/
3
+ __pycache__/
4
+ .env
5
+ .ipynb_checkpoints
6
+ *ipynb
7
+ .vscode/
8
+
9
+ eval-queue/
10
+ eval-results/
11
+ eval-queue-bk/
12
+ eval-results-bk/
13
+ logs/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ default_language_version:
16
+ python: python3
17
+
18
+ ci:
19
+ autofix_prs: true
20
+ autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
21
+ autoupdate_schedule: quarterly
22
+
23
+ repos:
24
+ - repo: https://github.com/pre-commit/pre-commit-hooks
25
+ rev: v4.3.0
26
+ hooks:
27
+ - id: check-yaml
28
+ - id: check-case-conflict
29
+ - id: detect-private-key
30
+ - id: check-added-large-files
31
+ args: ['--maxkb=1000']
32
+ - id: requirements-txt-fixer
33
+ - id: end-of-file-fixer
34
+ - id: trailing-whitespace
35
+
36
+ - repo: https://github.com/PyCQA/isort
37
+ rev: 5.12.0
38
+ hooks:
39
+ - id: isort
40
+ name: Format imports
41
+
42
+ - repo: https://github.com/psf/black
43
+ rev: 22.12.0
44
+ hooks:
45
+ - id: black
46
+ name: Format code
47
+ additional_dependencies: ['click==8.0.2']
48
+
49
+ - repo: https://github.com/charliermarsh/ruff-pre-commit
50
+ # Ruff version.
51
+ rev: 'v0.0.267'
52
+ hooks:
53
+ - id: ruff
Makefile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: style format
2
+
3
+
4
+ style:
5
+ python -m black --line-length 119 .
6
+ python -m isort .
7
+ ruff check --fix .
8
+
9
+
10
+ quality:
11
+ python -m black --check --line-length 119 .
12
+ python -m isort --check-only .
13
+ ruff check .
README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: 'Leaderboard: Physical Reasoning from Video'
3
+ emoji: 🏃
4
+ colorFrom: blue
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 5.23.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ hf_oauth: true
12
+ ---
13
+
14
+ # Local debugging
15
+
16
+ Run `python app.py` and you'll get a gradio link to view the leaderboard.
17
+
18
+
19
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,945 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import datetime
3
+ import json
4
+ import os
5
+ from email.utils import parseaddr
6
+ import re
7
+
8
+ import gradio as gr
9
+ import numpy as np
10
+ import pandas as pd
11
+ from apscheduler.schedulers.background import BackgroundScheduler
12
+ from datasets import Dataset, DatasetDict, VerificationMode, get_dataset_config_names, load_dataset
13
+ from huggingface_hub import HfApi
14
+
15
+ from content import (
16
+ CITATION_BUTTON_LABEL,
17
+ CITATION_BUTTON_TEXT,
18
+ INTRODUCTION_TEXT,
19
+ SUBMISSION_TEXT,
20
+ TITLE,
21
+ format_error,
22
+ format_log,
23
+ format_warning,
24
+ model_hyperlink,
25
+ )
26
+
27
+ TOKEN = os.environ.get("HF_TOKEN", None)
28
+
29
+
30
+ OWNER = "facebook"
31
+ ## private datasets
32
+ SUBMISSION_DATASET = f"{OWNER}/pwm_leaderboard_submissions_internal"
33
+ CONTACT_DATASET = f"{OWNER}/pwm_leaderboard_contact_info_internal"
34
+ ## public datasets
35
+ RESULTS_DATASET = f"{OWNER}/pwm_leaderboard_results_public"
36
+ LEADERBOARD_PATH = f"{OWNER}/pwm_leaderboard"
37
+ DATA_VERSION = "1.0.0"
38
+
39
+ # Dataset paths
40
+ MVP_DATASET = "facebook/minimal_video_pairs"
41
+ INTP_DATASET = "facebook/IntPhys2_test"
42
+ WMQA_DATASET = "facebook/CausalVQA"
43
+
44
+ # Dataset names
45
+ MVP_NAME = "MVPBench"
46
+ INTP_NAME = "IntPhys 2"
47
+ WMQA_NAME = "CausalVQA"
48
+
49
+ # Dataset keys
50
+ MVP_KEY = "mvp"
51
+ MVP_MINI_KEY = "mvp_mini"
52
+ INTP_KEY = "intphys2"
53
+ WMQA_KEY = "causalvqa"
54
+
55
+ TASKS = [
56
+ (INTP_KEY, INTP_NAME),
57
+ (MVP_KEY, MVP_NAME),
58
+ (WMQA_KEY, WMQA_NAME),
59
+ ]
60
+ VISIBLE_TASKS = copy.deepcopy(TASKS)
61
+ PRE_COL_NAMES = ["Model Name"]
62
+ POST_COL_NAMES = ["Model Type", "Vision Backbone", "LLM Backbone", "Submission Date"]
63
+
64
+
65
+ api = HfApi()
66
+
67
+ os.makedirs("scored", exist_ok=True)
68
+
69
+ LOCAL_DEBUG = False
70
+
71
+ # Display the results
72
+
73
+ LDB_TEXT_KEYS = ["model", "model_type", "vision_backbone", "llm_backbone"]
74
+ LDB_TEXT_TYPES = ["markdown", "text", "text", "text"]
75
+ MISSING_VALUE = -1.0
76
+
77
+ HUMAN_BASELINES = {
78
+ "url": "",
79
+ "model": "Human",
80
+ "model_type": "Human",
81
+ "system_prompt": "test",
82
+ "vision_backbone": " - ",
83
+ "llm_backbone": " - ",
84
+ "num_frames": -1,
85
+ f"score_{INTP_KEY}": 92.44,
86
+ f"score_{MVP_KEY}": MISSING_VALUE,
87
+ f"score_{MVP_MINI_KEY}": 92.9,
88
+ f"score_{WMQA_KEY}": 84.78,
89
+ "date": "2025-06-11",
90
+ "organization": "Meta",
91
+ "submitted_by": "user",
92
+ }
93
+
94
+
95
+ GEMINI2_5 = {
96
+ "url": "https://deepmind.google/models/gemini/flash/",
97
+ "model": "Gemini 2.5 Flash",
98
+ "model_type": "Closed",
99
+ "system_prompt": "test",
100
+ "vision_backbone": " - ",
101
+ "llm_backbone": " - ",
102
+ "num_frames": 10,
103
+ f"score_{INTP_KEY}": 56.1,
104
+ f"score_{MVP_KEY}": MISSING_VALUE,
105
+ f"score_{MVP_MINI_KEY}": MISSING_VALUE,
106
+ f"score_{WMQA_KEY}": 61.66,
107
+ "date": "2025-06-11",
108
+ "organization": "Meta",
109
+ "submitted_by": "user",
110
+ }
111
+
112
+ GPT4O = {
113
+ "url": "https://openai.com/index/gpt-4o-system-card/",
114
+ "model": "GPT-4o",
115
+ "model_type": "Closed",
116
+ "system_prompt": "test",
117
+ "vision_backbone": " - ",
118
+ "llm_backbone": " - ",
119
+ "num_frames": 10,
120
+ f"score_{INTP_KEY}": 53.19,
121
+ f"score_{MVP_KEY}": MISSING_VALUE,
122
+ f"score_{MVP_MINI_KEY}": 32.5,
123
+ f"score_{WMQA_KEY}": 50.95,
124
+ "date": "2025-06-11",
125
+ "organization": "Meta",
126
+ "submitted_by": "user",
127
+ }
128
+
129
+ INTERN_VL = {
130
+ "url": "https://internvl.github.io/blog/2024-12-05-InternVL-2.5/",
131
+ "model": "InternVL2.5",
132
+ "model_type": "Open",
133
+ "system_prompt": "test",
134
+ "vision_backbone": "InternViT-300M",
135
+ "llm_backbone": "InternLM2.5-7B-Chat",
136
+ "num_frames": 16,
137
+ f"score_{INTP_KEY}": MISSING_VALUE,
138
+ f"score_{MVP_KEY}": MISSING_VALUE,
139
+ f"score_{MVP_MINI_KEY}": 39.9,
140
+ f"score_{WMQA_KEY}": 47.54,
141
+ "date": "2025-06-11",
142
+ "organization": "Meta",
143
+ "submitted_by": "user",
144
+ }
145
+
146
+ LLAVA = {
147
+ "url": "https://huggingface.co/lmms-lab/llava-onevision-qwen2-7b-ov",
148
+ "model": "LLaVA-OneVision",
149
+ "model_type": "Open",
150
+ "system_prompt": "test",
151
+ "vision_backbone": "SigLIP",
152
+ "llm_backbone": "Qwen2-7B",
153
+ "num_frames": 16,
154
+ f"score_{INTP_KEY}": MISSING_VALUE,
155
+ f"score_{MVP_KEY}": MISSING_VALUE,
156
+ f"score_{MVP_MINI_KEY}": 20.7,
157
+ f"score_{WMQA_KEY}": 45.27,
158
+ "date": "2025-06-11",
159
+ "organization": "Meta",
160
+ "submitted_by": "user",
161
+ }
162
+
163
+ PLM = {
164
+ "url": "https://github.com/facebookresearch/perception_models",
165
+ "model": "Perception Language Model (PLM)",
166
+ "model_type": "Open",
167
+ "system_prompt": "test",
168
+ "vision_backbone": "PE",
169
+ "llm_backbone": "Llama3.1 8B",
170
+ "num_frames": 16,
171
+ f"score_{INTP_KEY}": MISSING_VALUE,
172
+ f"score_{MVP_KEY}": MISSING_VALUE,
173
+ f"score_{MVP_MINI_KEY}": 39.7,
174
+ f"score_{WMQA_KEY}": 50.06,
175
+ "date": "2025-06-11",
176
+ "organization": "Meta",
177
+ "submitted_by": "user",
178
+ }
179
+
180
+ QWENVL = {
181
+ "url": "https://github.com/QwenLM/Qwen2.5-VL",
182
+ "model": "Qwen2.5-VL",
183
+ "model_type": "Open",
184
+ "system_prompt": "test",
185
+ "vision_backbone": "ViT",
186
+ "llm_backbone": "Qwen2.5-7B-Instruct",
187
+ "num_frames": 16,
188
+ f"score_{INTP_KEY}": 49.12,
189
+ f"score_{MVP_KEY}": MISSING_VALUE,
190
+ f"score_{MVP_MINI_KEY}": 36.7,
191
+ f"score_{WMQA_KEY}": 49.05,
192
+ "date": "2025-06-11",
193
+ "organization": "Meta",
194
+ "submitted_by": "user",
195
+ }
196
+
197
+ GEMINI1_5 = {
198
+ "url": "https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/1-5-pro",
199
+ "model": "Gemini 1.5 Pro",
200
+ "model_type": "Closed",
201
+ "system_prompt": "test",
202
+ "vision_backbone": " - ",
203
+ "llm_backbone": " - ",
204
+ "num_frames": -1,
205
+ f"score_{INTP_KEY}": 52.1,
206
+ f"score_{MVP_KEY}": MISSING_VALUE,
207
+ f"score_{MVP_MINI_KEY}": 29.6,
208
+ f"score_{WMQA_KEY}": MISSING_VALUE,
209
+ "date": "2025-06-11",
210
+ "organization": "Meta",
211
+ "submitted_by": "user",
212
+ }
213
+
214
+ VJEPA2 = {
215
+ "url": "https://ai.meta.com/vjepa/",
216
+ "model": "V-JEPA 2",
217
+ "model_type": "Open",
218
+ "system_prompt": "test",
219
+ "vision_backbone": "VJEPA 2",
220
+ "llm_backbone": "Llama3.1 8B",
221
+ "num_frames": -1,
222
+ f"score_{INTP_KEY}": 56.4,
223
+ f"score_{MVP_KEY}": MISSING_VALUE,
224
+ f"score_{MVP_MINI_KEY}": 44.5,
225
+ f"score_{WMQA_KEY}": 38.99,
226
+ "date": "2025-06-11",
227
+ "organization": "Meta",
228
+ "submitted_by": "user",
229
+ }
230
+
231
+ COSMOS = {
232
+ "url": "https://huggingface.co/nvidia/Cosmos-1.0-Autoregressive-4B",
233
+ "model": "Cosmos-4B",
234
+ "model_type": "Open",
235
+ "system_prompt": "test",
236
+ "vision_backbone": " - ",
237
+ "llm_backbone": " - ",
238
+ "num_frames": -1,
239
+ f"score_{INTP_KEY}": 48.84,
240
+ f"score_{MVP_KEY}": MISSING_VALUE,
241
+ f"score_{MVP_MINI_KEY}": MISSING_VALUE,
242
+ f"score_{WMQA_KEY}": MISSING_VALUE,
243
+ "date": "2025-06-11",
244
+ "organization": "Meta",
245
+ "submitted_by": "user",
246
+ }
247
+
248
+
249
+ def get_dataframe_from_results(eval_results, split):
250
+ local_df = eval_results[split]
251
+ local_df = local_df.map(lambda row: {"model": model_hyperlink(row["url"], row["model"])})
252
+ local_df = local_df.remove_columns(["system_prompt"])#, "url"])
253
+
254
+ df = pd.DataFrame(local_df)
255
+ # reformat the data to keep a single row for a given model and organization pair
256
+ # in case of multiple entries, choose the ones with latest values
257
+ df["model_org"] = df["model"].str.cat(df["organization"], sep="-")
258
+ ldb_m2r = {}
259
+ for i, row in df.iterrows():
260
+ if row["model_org"] not in ldb_m2r:
261
+ ldb_m2r[row["model_org"]] = {}
262
+
263
+ prev_d = ldb_m2r[row["model_org"]]
264
+ new_d = {}
265
+ for key in LDB_TEXT_KEYS:
266
+ new_d[key] = row[key] if len(row[key]) > 0 else prev_d.get(key, "NA")
267
+ for tname, _ in TASKS:
268
+ new_d[f"score_{tname}"] = (
269
+ row[f"score_{tname}"] if row[f"score_{tname}"] >= 0 else prev_d.get(f"score_{tname}", MISSING_VALUE)
270
+ )
271
+ if tname == "mvp":
272
+ new_d[f"score_mvp_mini"] = (
273
+ row[f"score_mvp_mini"]
274
+ if row[f"score_mvp_mini"] >= 0
275
+ else prev_d.get(f"score_mvp_mini", MISSING_VALUE)
276
+ )
277
+ new_d["date"] = row["date"]
278
+ ldb_m2r[row["model_org"]] = new_d
279
+
280
+ # add Human baseline
281
+ ldb_m2r["human"] = HUMAN_BASELINES
282
+ ldb_m2r["gemini2.5"] = GEMINI2_5
283
+ ldb_m2r["gemini1.5"] = GEMINI1_5
284
+ ldb_m2r["gpt4o"] = GPT4O
285
+ ldb_m2r["internvl"] = INTERN_VL
286
+ ldb_m2r["llavaov"] = LLAVA
287
+ ldb_m2r["plm"] = PLM
288
+ ldb_m2r["qwen2.5"] = QWENVL
289
+ ldb_m2r["vjepa2"] = VJEPA2
290
+ ldb_m2r["cosmos"] = COSMOS
291
+ # compute average and convert back to rows
292
+ ldb_rows = []
293
+ for key, val in ldb_m2r.items():
294
+ print(ldb_m2r[key])
295
+ if "url" in ldb_m2r[key].keys() and ldb_m2r[key]["url"] != "":
296
+ ldb_m2r[key]["model"] = model_hyperlink(ldb_m2r[key]["url"],ldb_m2r[key]["model"])
297
+ row = copy.deepcopy(val)
298
+ score_keys = {k for k in val if k.startswith("score_")}
299
+ row["score"] = np.round(np.mean([row[sk] for sk in score_keys if (row[sk] != MISSING_VALUE and row[sk] != "-")]), 2)
300
+ tasks_completed = 0
301
+ for sk in score_keys:
302
+ if row[sk] == MISSING_VALUE:
303
+ row[sk] = "-"
304
+ else:
305
+ tasks_completed += 1
306
+ row["tasks_completed"] = tasks_completed
307
+ ldb_rows.append(row)
308
+
309
+ df = pd.DataFrame(ldb_rows)
310
+ df = df.query('date >= "2025-06-11"')
311
+ # df = df.map(lambda row: {"model": model_hyperlink(row["url"], row["model"])})
312
+
313
+ # sort
314
+ df = df.sort_values(by=["tasks_completed", "score"], ascending=False)
315
+
316
+ # format numerics
317
+ numeric_cols = [c for c in df.columns if c.startswith("score_")]
318
+ for nc in numeric_cols:
319
+ df[nc] = df[nc].apply(lambda x: np.round(x, 2) if type(x) == float else x)
320
+
321
+ # remove columns and rename
322
+ df.drop(["tasks_completed"], axis=1, inplace=True)
323
+ col_mapper = {f"score_{tname}": f"{tdisplay} (%)" for tname, tdisplay in TASKS if tname != "mvp"}
324
+ col_mapper.update(
325
+ {
326
+ "model": "Model Name",
327
+ "model_type": "Model Type",
328
+ "vision_backbone": "Vision Backbone",
329
+ "llm_backbone": "LLM Backbone",
330
+ #"score": "Average Score (%)",
331
+ "date": "Submission Date",
332
+ }
333
+ )
334
+ df.rename(col_mapper, axis=1, inplace=True)
335
+
336
+ df[f"{MVP_NAME} (%)"] = df.score_mvp_mini.astype(str)
337
+ df.drop([f"score_{MVP_KEY}", f"score_{MVP_MINI_KEY}"], axis=1, inplace=True)
338
+ # order columns
339
+ df = df[PRE_COL_NAMES + [f"{t[1]} (%)" for t in VISIBLE_TASKS] + POST_COL_NAMES]
340
+
341
+ return df
342
+
343
+
344
+ def create_dummy_data():
345
+ # Dummy evals data
346
+ rows = [
347
+ {
348
+ "url": "https://deepmind.google/models/gemini/flash/",
349
+ "model": "Gemini Test",
350
+ "model_type": "Closed",
351
+ "system_prompt": "test",
352
+ "vision_backbone": " - ",
353
+ "llm_backbone": " - ",
354
+ "num_frames": 10,
355
+ f"score_{INTP_KEY}": 56.1,
356
+ f"score_{MVP_KEY}": MISSING_VALUE,
357
+ f"score_{MVP_MINI_KEY}": MISSING_VALUE,
358
+ f"score_{WMQA_KEY}": 61.66,
359
+ "date": datetime.datetime.today().strftime("%Y-%m-%d"),
360
+ "organization": "test",
361
+ "submitted_by": "octocat",
362
+ },
363
+ {
364
+ "url": "https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf",
365
+ "model": "Llava 1.6",
366
+ "model_type": "Open",
367
+ "system_prompt": "test",
368
+ "vision_backbone": "CLIP",
369
+ "llm_backbone": "Mistral",
370
+ "num_frames": 16,
371
+ f"score_{INTP_KEY}": MISSING_VALUE,
372
+ f"score_{MVP_KEY}": MISSING_VALUE,
373
+ f"score_{MVP_MINI_KEY}": MISSING_VALUE,
374
+ f"score_{WMQA_KEY}": MISSING_VALUE,
375
+ "date": datetime.datetime.today().strftime("%Y-%m-%d"),
376
+ "organization": "test",
377
+ "submitted_by": "octocat",
378
+ },
379
+ {
380
+ "url": "https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf",
381
+ "model": "Llava 1.6",
382
+ "model_type": "Open",
383
+ "system_prompt": "test",
384
+ "vision_backbone": "CLIP",
385
+ "llm_backbone": "Mistral",
386
+ "num_frames": 16,
387
+ f"score_{INTP_KEY}": 0.0,
388
+ f"score_{MVP_KEY}": MISSING_VALUE,
389
+ f"score_{MVP_MINI_KEY}": MISSING_VALUE,
390
+ f"score_{WMQA_KEY}": 0.0,
391
+ "date": datetime.datetime.today().strftime("%Y-%m-%d"),
392
+ "organization": "test",
393
+ "submitted_by": "octocat",
394
+ },
395
+ ]
396
+ dt = DatasetDict({"valid": Dataset.from_list(rows), "test": Dataset.from_list(rows)})
397
+ # Dummy contact
398
+ contact_info = {
399
+ "model": "llama",
400
+ "url": "test",
401
+ "organization": "test",
402
+ "username": "test",
403
+ "mail": "test",
404
+ "date": datetime.datetime.today().strftime("%Y-%m-%d"),
405
+ }
406
+ cdt = DatasetDict({"valid": Dataset.from_list([contact_info]), "test": Dataset.from_list([contact_info])})
407
+ return dt, cdt
408
+
409
+
410
+ DUMMY_DATA = False
411
+
412
+
413
+ def get_eval_data():
414
+ if DUMMY_DATA:
415
+ eval_results, _ = create_dummy_data()
416
+ else:
417
+ eval_results = load_dataset(
418
+ RESULTS_DATASET,
419
+ token=TOKEN,
420
+ download_mode="force_redownload",
421
+ verification_mode=VerificationMode.NO_CHECKS,
422
+ trust_remote_code=True,
423
+ )
424
+ eval_dataframe_val = get_dataframe_from_results(eval_results=eval_results, split="valid")
425
+ eval_dataframe_test = get_dataframe_from_results(eval_results=eval_results, split="test")
426
+ return eval_results, eval_dataframe_val, eval_dataframe_test
427
+
428
+
429
+ def restart_space():
430
+ api.restart_space(repo_id=LEADERBOARD_PATH, token=TOKEN)
431
+
432
+
433
+ # --- MVP Functions __
434
+
435
+
436
+ def validate_mvp(submission_df, split="valid"):
437
+ subsets = submission_df.data_name.unique()
438
+ for subset in subsets:
439
+ assert subset in [MVP_KEY, MVP_MINI_KEY], format_error(
440
+ f"Wrong tasks, got {subset} but expecting either mvp or mvp_mini"
441
+ )
442
+ gold_tasks = get_dataset_config_names(MVP_DATASET, token=TOKEN)
443
+ for subset in subsets:
444
+ tasks = submission_df[submission_df.data_name == subset].task.unique()
445
+ assert len(tasks) == len(gold_tasks), format_error(
446
+ f"{MVP_NAME} submission must have all tasks, found = {tasks}, expecting = {gold_tasks}"
447
+ )
448
+ for task in tasks:
449
+ sub_df = submission_df[(submission_df.data_name == subset) & (submission_df.task == task)].copy()
450
+ assert task in gold_tasks, format_error(f"Found unknown task {task} for {MVP_NAME}, check submission")
451
+ gold_dataset = load_dataset(MVP_DATASET, task, split="full" if subset == MVP_KEY else "mini", token=TOKEN)
452
+ assert len(sub_df) == len(gold_dataset), format_error(
453
+ f"Number of examples do not match in user submission, found {len(sub_df)} but expecting {len(gold_dataset)} for task {task} in split {subset}"
454
+ )
455
+ id2answer = {row["video_id"]: row["answer"] for row in gold_dataset}
456
+ for i, r in sub_df.iterrows():
457
+ assert r["row_id"] in id2answer, format_error(
458
+ f"Submission contains row_id {r['row_id']} which doesn't match the dataset's video_id"
459
+ )
460
+
461
+
462
+ def compute_scores_mvp(submission_df, split="valid"):
463
+ gold_tasks = get_dataset_config_names(MVP_DATASET, token=TOKEN)
464
+ subsets = submission_df.data_name.unique()
465
+ scored_subs = []
466
+ for subset in subsets:
467
+ tasks = submission_df[submission_df.data_name == subset].task.unique()
468
+ assert len(tasks) == len(gold_tasks), format_error(f"{MVP_NAME} submission must have all tasks")
469
+ for task in tasks:
470
+ sub_df = submission_df[(submission_df.data_name == subset) & (submission_df.task == task)].copy()
471
+ gold_dataset = load_dataset(MVP_DATASET, task, split="full" if subset == MVP_KEY else "mini", token=TOKEN)
472
+ id2answer = {row["video_id"]: row["answer"] for row in gold_dataset}
473
+ correct = []
474
+ for i, r in sub_df.iterrows():
475
+ gold_answer = id2answer[r["row_id"]]
476
+ model_answer = r["model_answer"]
477
+ if gold_answer == model_answer:
478
+ correct.append(1)
479
+ else:
480
+ correct.append(0)
481
+ sub_df["rating"] = correct
482
+ scored_subs.append(sub_df)
483
+ return pd.concat(scored_subs)
484
+
485
+
486
+ def aggregate_scores_mvp(scored_submission_df, split="valid"):
487
+ subsets = scored_submission_df.data_name.unique()
488
+ subset_scores = {f"score_{s}": 0 for s in subsets}
489
+ for subset in subsets:
490
+ tasks = scored_submission_df[scored_submission_df.data_name == subset].task.unique()
491
+ task_pair_accuracies = []
492
+ for task in tasks:
493
+ sub_df = scored_submission_df[
494
+ (scored_submission_df.data_name == subset) & (scored_submission_df.task == task)
495
+ ].copy()
496
+ result_by_vid = {}
497
+ pair_correct_count = 0
498
+ for i, row in sub_df.iterrows():
499
+ video_id = "_".join(row["row_id"].split("_")[:-1])
500
+ if video_id not in result_by_vid:
501
+ result_by_vid[video_id] = [row.to_dict()]
502
+ else:
503
+ result_by_vid[video_id].append(row.to_dict())
504
+ for video_id, answer_dict_pair in result_by_vid.items():
505
+ answer_dict_1, answer_dict_2 = answer_dict_pair
506
+ if answer_dict_1["rating"] == 1 and answer_dict_2["rating"] == 1:
507
+ pair_correct_count += 1
508
+
509
+ task_pair_accuracies.append((pair_correct_count / len(result_by_vid)) * 100)
510
+ # compute macro scores
511
+ subset_scores[f"score_{subset}"] = np.mean(task_pair_accuracies)
512
+ return subset_scores
513
+
514
+
515
+ # --- CausalVQA functions ---
516
+
517
+ def validate_causalvqa(submission_df, split="test"):
518
+ #assert split == "test", format_error(f"Split {split} not available for dataset {WMQA_NAME}")
519
+ split = "train"
520
+ subsets = submission_df.data_name.unique()
521
+ for subset in subsets:
522
+ assert subset in [WMQA_KEY], format_error(
523
+ f"Wrong tasks, got {subset} but expecting causalvqa"
524
+ )
525
+ gold_tasks = get_dataset_config_names(WMQA_DATASET, token=TOKEN)
526
+ for subset in subsets:
527
+ tasks = "default"#submission_df[submission_df.data_name == subset].task.unique()
528
+ sub_df = submission_df[(submission_df.data_name == subset)].copy()
529
+ gold_dataset = load_dataset(WMQA_DATASET, "", split="train", token=TOKEN) #note, causalvqa only has a test dataset under hf split 'valid'
530
+ assert len(sub_df) == len(gold_dataset), format_error(
531
+ f"Number of examples do not match in user submission, found {len(sub_df)} but expecting {len(gold_dataset)} for task {task} in split {subset}"
532
+ )
533
+ id2answer = {row["id"]+'_'+str(row["n"]): row["answer"] for row in gold_dataset}
534
+ for i, r in sub_df.iterrows():
535
+ assert r["row_id"] in id2answer, format_error(
536
+ f"Submission contains row_id {r['row_id']} which doesn't match the dataset's qid"
537
+ )
538
+ print('validated')
539
+
540
+ def compute_scores_causalvqa(submission_df, split="test"):
541
+ #assert split == "test", format_error(f"Split {split} not available for dataset {WMQA_NAME}")
542
+ split = "train"
543
+ gold_tasks = get_dataset_config_names(WMQA_DATASET, token=TOKEN)
544
+ subsets = submission_df.data_name.unique()
545
+ scored_subs = []
546
+ for subset in subsets:
547
+ sub_df = submission_df[(submission_df.data_name == subset)].copy()
548
+ sub_df['model_answer'] = sub_df['model_answer'].str.replace(r'[^a-eA-E]', '', regex=True, flags=re.IGNORECASE).str.upper()
549
+ gold_dataset = load_dataset(WMQA_DATASET, "", split="train", token=TOKEN)
550
+ gold_dataset = gold_dataset.to_pandas()
551
+ gold_dataset['row_id'] = gold_dataset.apply(lambda x: x['id']+'_'+str(x['n']), axis=1)
552
+ joined = pd.merge(gold_dataset, sub_df, on='row_id', how='left')
553
+ correct = []
554
+ for i, r in joined.iterrows():
555
+ gold_answer = r['answer']
556
+ model_answer = r["model_answer"]
557
+ if gold_answer == model_answer:
558
+ correct.append(1)
559
+ else:
560
+ correct.append(0)
561
+ joined["rating"] = correct
562
+ scored_subs.append(joined)
563
+ print(joined.columns)
564
+ print('scored')
565
+ return pd.concat(scored_subs)
566
+
567
+ def aggregate_scores_causalvqa(scored_submission_df, split="test"):
568
+ subsets = scored_submission_df.data_name.unique()
569
+ subset_scores = {f"score_{s}": 0 for s in subsets}
570
+ for subset in subsets:
571
+ sub_df = scored_submission_df[scored_submission_df.data_name == subset].copy()
572
+ agg_df = sub_df.groupby(['id','strata'])['rating'].sum().reset_index()
573
+ agg_df['points'] = 0
574
+ agg_df.loc[agg_df['rating']==2, 'points'] = 1
575
+
576
+
577
+ # compute macro scores
578
+ subset_scores[f"score_{subset}"] = agg_df.points.mean()*100.00
579
+ print('aggregated')
580
+ return subset_scores
581
+
582
+
583
+ # --- IntPhys functions ---
584
+
585
+
586
+ def validate_intphys(submission_df, split="test"):
587
+ assert split == "test", format_error(f"Split {split} not available for dataset {INTP_NAME}")
588
+ subsets = submission_df.data_name.unique()
589
+ for subset in subsets:
590
+ assert subset in [INTP_KEY], format_error(
591
+ f"Wrong tasks, got {subset} but expecting " + INTP_KEY
592
+ )
593
+ gold_tasks = get_dataset_config_names(INTP_DATASET, token=TOKEN)
594
+ for subset in subsets:
595
+ sub_df = submission_df[(submission_df.data_name == subset)].copy()
596
+ gold_dataset = load_dataset(INTP_DATASET, "", split="test")
597
+ assert len(sub_df) == len(gold_dataset), format_error(
598
+ f"Number of examples do not match in user submission, found {len(sub_df)} but expecting {len(gold_dataset)} in split {subset}"
599
+ )
600
+ id2answer = {row["name"]: row["answer"] for row in gold_dataset}
601
+ for i, r in sub_df.iterrows():
602
+ assert r["row_id"] in id2answer, format_error(
603
+ f"Submission contains row_id {r['row_id']} which doesn't match the dataset's video_id"
604
+ )
605
+
606
+
607
+
608
+ def compute_scores_intphys(submission_df, split="test"):
609
+ assert split == "test", format_error(f"Split {split} not available for dataset {INTP_NAME}")
610
+ gold_tasks = get_dataset_config_names(INTP_DATASET, token=TOKEN)
611
+ subsets = submission_df.data_name.unique()
612
+ scored_subs = []
613
+ for subset in subsets:
614
+ sub_df = submission_df[(submission_df.data_name == subset)].copy()
615
+ gold_dataset = load_dataset(INTP_DATASET, "", split="test", token=TOKEN)
616
+ id2answer = {row["name"]: row["answer"] for row in gold_dataset}
617
+ correct = []
618
+ for i, r in sub_df.iterrows():
619
+ gold_answer = id2answer[r["row_id"]]
620
+ model_answer = r["model_answer"]
621
+ if gold_answer == model_answer:
622
+ correct.append(1)
623
+ else:
624
+ correct.append(0)
625
+ sub_df["rating"] = correct
626
+ scored_subs.append(sub_df)
627
+ return pd.concat(scored_subs)
628
+
629
+
630
+ def aggregate_scores_intphys(scored_submission_df, split="test"):
631
+ subsets = scored_submission_df.data_name.unique()
632
+ subset_scores = {f"score_{s}": 0 for s in subsets}
633
+ accuracies = []
634
+ for subset in subsets:
635
+ sub_df = scored_submission_df[
636
+ (scored_submission_df.data_name == subset)
637
+ ].copy()
638
+ result_by_vid = {}
639
+ pair_correct_count = 0
640
+ for i, row in sub_df.iterrows():
641
+ if row["rating"] == 1:
642
+ pair_correct_count += 1
643
+ accuracies.append((pair_correct_count / len(sub_df)) * 100)
644
+ # compute macro scores
645
+ subset_scores[f"score_{subset}"] = np.mean(accuracies)
646
+ return subset_scores
647
+
648
+
649
+
650
+
651
+ VALIDATION_FN = {
652
+ MVP_KEY: validate_mvp,
653
+ MVP_MINI_KEY: validate_mvp,
654
+ INTP_KEY: validate_intphys,
655
+ WMQA_KEY: validate_causalvqa,
656
+ }
657
+
658
+ SCORER_FN = {
659
+ MVP_KEY: compute_scores_mvp,
660
+ MVP_MINI_KEY: compute_scores_mvp,
661
+ INTP_KEY: compute_scores_intphys,
662
+ WMQA_KEY: compute_scores_causalvqa,
663
+ }
664
+
665
+ AGGREGATE_FN = {
666
+ MVP_KEY: aggregate_scores_mvp,
667
+ MVP_MINI_KEY: aggregate_scores_mvp,
668
+ INTP_KEY: aggregate_scores_intphys,
669
+ WMQA_KEY: aggregate_scores_causalvqa,
670
+ }
671
+
672
+
673
+ def compute_scores(submission_df, split="valid"):
674
+ """
675
+ Runs the scores with held out valid/test sets, and updates the submission with metrics for each dataset
676
+ - First, runs validation for the input to ensure the right keys are present
677
+ - Then, runs the evaluations
678
+ """
679
+ tasks = submission_df.data_name.unique()
680
+ scored_subs = []
681
+ for t in tasks:
682
+ task_sub = submission_df[submission_df.data_name == t].copy()
683
+ scored_subs.append(SCORER_FN[t](task_sub, split))
684
+ scored_subs = pd.concat(scored_subs)
685
+ return scored_subs
686
+
687
+
688
+ def aggregate_scores(scored_df, split="valid"):
689
+ tasks = scored_df.data_name.unique()
690
+ agg_scores = {}
691
+ for task in tasks:
692
+ task_sub = scored_df[scored_df.data_name == task].copy()
693
+ agg_metrics = AGGREGATE_FN[task](task_sub, split=split)
694
+ agg_scores.update(agg_metrics)
695
+ return agg_scores
696
+
697
+
698
+ def validate_submission(submission_df, split="valid"):
699
+ """
700
+ Validate user submissions
701
+ """
702
+ # Run checks
703
+ assert "data_name" in submission_df.columns, format_error("Submission missing column data_name")
704
+ assert "row_id" in submission_df.columns, format_error("Submission missing column row_id")
705
+ assert "task" in submission_df.columns, format_error("Submission missing column task")
706
+ assert "model_answer" in submission_df.columns, format_error("Submission missing column model_answer")
707
+ tasks = submission_df.data_name.unique()
708
+ valid_tasks = [t[0] for t in TASKS] + [MVP_MINI_KEY]
709
+ for t in tasks:
710
+ assert t in valid_tasks, format_error(
711
+ f"Submission contains one or more rows with data_name={t}, which is not a valid task for this leaderboard (expecting to match a dataset in {valid_tasks})"
712
+ )
713
+ # Dataset specific checks
714
+ for task in tasks:
715
+ task_sub = submission_df[submission_df.data_name == task].copy()
716
+ VALIDATION_FN[task](task_sub)
717
+
718
+
719
+ def add_new_eval(
720
+ model: str,
721
+ vision_backbone: str,
722
+ llm_backbone: str,
723
+ url: str,
724
+ model_type: str,
725
+ path_to_file: str,
726
+ organization: str,
727
+ mail: str,
728
+ profile: gr.OAuthProfile,
729
+ progress=gr.Progress(),
730
+ ):
731
+ progress(0, desc="Validating user ...")
732
+ contact_infos = load_dataset(
733
+ CONTACT_DATASET,
734
+ token=TOKEN,
735
+ download_mode="force_redownload",
736
+ verification_mode=VerificationMode.NO_CHECKS,
737
+ trust_remote_code=True,
738
+ )
739
+ user_submission_dates = sorted(
740
+ row["date"] for row in contact_infos["test"] if row["username"] == profile.username
741
+ )
742
+ # Logic to limit submissions per day
743
+ if len(user_submission_dates) > 0 and user_submission_dates[-1] == datetime.datetime.today().strftime("%Y-%m-%d"):
744
+ return format_error("You already submitted once today, please try again tomorrow.")
745
+ # Very basic email parsing
746
+ _, parsed_mail = parseaddr(mail)
747
+ if not "@" in parsed_mail:
748
+ return format_warning("Please provide a valid email adress.")
749
+
750
+ print("Adding new eval")
751
+ progress(0.1, desc="Fetching recent evals ...")
752
+
753
+ eval_results, _, _ = get_eval_data()
754
+ # # Check if the combination model/org already exists and prints a warning message if yes
755
+ # if model.lower() in set([m.lower() for m in eval_results[val_or_test]["model"]]) and organization.lower() in set(
756
+ # [o.lower() for o in eval_results[val_or_test]["organization"]]
757
+ # ):
758
+ # return format_warning("This model has been already submitted.")
759
+
760
+ if path_to_file is None:
761
+ return format_warning("Please attach a file.")
762
+
763
+ # validate submission - do not save submission until its fully validated
764
+ progress(0.3, desc="Validating user submission ...")
765
+ file_path = path_to_file.name
766
+ assert file_path.endswith(".jsonl"), format_error("Please submit a jsonl file")
767
+ submissions_df = pd.read_json(file_path, lines=True, orient="records")
768
+ validate_submission(submissions_df)
769
+
770
+ # Save submitted file
771
+ if LOCAL_DEBUG:
772
+ gr.Info("In local debug mode, mock uploading submission dataset.")
773
+ else:
774
+ api.upload_file(
775
+ repo_id=SUBMISSION_DATASET,
776
+ path_or_fileobj=path_to_file.name,
777
+ path_in_repo=f"{organization}/{model}/submissions/test_raw_{datetime.datetime.today()}.jsonl",
778
+ repo_type="dataset",
779
+ token=TOKEN,
780
+ )
781
+
782
+ # Compute score
783
+ progress(0.5, desc="Computing scores ...")
784
+ scored_df = compute_scores(submissions_df, split="test")
785
+
786
+ # Save scored file
787
+ if LOCAL_DEBUG:
788
+ gr.Info("In local debug mode, mock uploading scored files")
789
+ else:
790
+ tasks = scored_df.data_name.unique()
791
+ for task in tasks:
792
+ scored_df.to_json(f"scored/{organization}_{model}_{task}.jsonl", lines=True, orient="records")
793
+ api.upload_file(
794
+ repo_id=SUBMISSION_DATASET,
795
+ path_or_fileobj=f"scored/{organization}_{model}_{task}.jsonl",
796
+ path_in_repo=f"{organization}/{model}/scored/{task}/test_scored_{datetime.datetime.today()}.jsonl",
797
+ repo_type="dataset",
798
+ token=TOKEN,
799
+ )
800
+
801
+ # Actual submission
802
+ progress(0.7, desc="Submitting leaderboard entry ...")
803
+ eval_entry = {
804
+ "model": model,
805
+ "model_type": model_type,
806
+ "vision_backbone": vision_backbone,
807
+ "llm_backbone": llm_backbone,
808
+ "url": url,
809
+ "organization": organization,
810
+ "submitted_by": profile.username,
811
+ "date": datetime.datetime.today().strftime("%Y-%m-%d"),
812
+ }
813
+ agg_metrics = aggregate_scores(scored_df, split="test")
814
+ eval_entry.update(agg_metrics)
815
+ # update missing tasks to MISSING_VALUE
816
+ task_keys = [t[0] for t in TASKS] + [MVP_MINI_KEY]
817
+ missing_metrics = {f"score_{task}": MISSING_VALUE for task in task_keys if f"score_{task}" not in eval_entry}
818
+ eval_entry.update(missing_metrics)
819
+
820
+ eval_results["test"] = eval_results["test"].add_item(eval_entry)
821
+ if LOCAL_DEBUG:
822
+ print(eval_results["valid"][-1])
823
+ gr.Info("In local debug mode, mock uploading aggregated scores")
824
+ else:
825
+ eval_results.push_to_hub(RESULTS_DATASET, token=TOKEN)
826
+
827
+ progress(0.9, desc="Updating contacts ...")
828
+ contact_info = {
829
+ "model": model,
830
+ "url": url,
831
+ "organization": organization,
832
+ "username": profile.username,
833
+ "mail": mail,
834
+ "date": datetime.datetime.today().strftime("%Y-%m-%d"),
835
+ }
836
+ contact_infos["test"] = contact_infos["test"].add_item(contact_info)
837
+ if LOCAL_DEBUG:
838
+ print("mock uploaded contact info")
839
+ else:
840
+ contact_infos.push_to_hub(CONTACT_DATASET, token=TOKEN)
841
+
842
+ progress(1.0, desc="Completed evaluation successfully. Please refresh leaderboard")
843
+ success_str = f"Model {model} submitted by {organization} is successfully evaluated and stored in our database.\nPlease wait a few hours and refresh the leaderboard to see your score displayed."
844
+ format_log(success_str)
845
+ return success_str
846
+
847
+
848
+ def on_filter_model_size_method_change():
849
+ _, eval_dataframe_val, eval_dataframe_test = get_eval_data()
850
+ # eval_dataframe_val = eval_dataframe_val[PRE_COL_NAMES + [f"{t} (%)" for t in selected_columns] + POST_COL_NAMES]
851
+ eval_dataframe_test = eval_dataframe_test[PRE_COL_NAMES + [f"{t} (%)" for _,t in VISIBLE_TASKS] + POST_COL_NAMES]
852
+ datatypes = ["markdown"] + ["number" for _ in VISIBLE_TASKS] + ["text"] + ["text"] + ["text"] + ["date"]
853
+ # val_ldb = gr.components.Dataframe(
854
+ # value=eval_dataframe_val, datatype=datatypes, interactive=False, column_widths=["20%"]
855
+ # )
856
+ test_ldb = gr.components.Dataframe(
857
+ value=eval_dataframe_test, datatype=datatypes, interactive=False, column_widths=["20%"]
858
+ )
859
+ return test_ldb
860
+
861
+
862
+ def upload_file(files):
863
+ file_paths = [file.name for file in files]
864
+ return file_paths
865
+
866
+ if __name__ == "__main__":
867
+
868
+ _, eval_dataframe_val, eval_dataframe_test = get_eval_data()
869
+ demo = gr.Blocks()
870
+ with demo:
871
+ gr.HTML(TITLE)
872
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
873
+
874
+ with gr.Row():
875
+ with gr.Accordion("📙 Citation", open=False):
876
+ gr.Markdown(CITATION_BUTTON_LABEL)
877
+ gr.Markdown(CITATION_BUTTON_TEXT)
878
+
879
+
880
+ datatypes = ["markdown"] + ["number" for _ in VISIBLE_TASKS] + ["text"] + ["text"] + ["text"] + ["date"]
881
+
882
+ with gr.Tab("Results: Test"):
883
+ leaderboard_table_test = gr.components.Dataframe(
884
+ value=eval_dataframe_test, datatype=datatypes, interactive=False, column_widths=["20%"]
885
+ )
886
+
887
+ refresh_button = gr.Button("Refresh")
888
+ refresh_button.click(
889
+ # print(task_filter)
890
+ on_filter_model_size_method_change,
891
+ #inputs=[VISIBLE_TASKS],
892
+ #inputs=[],
893
+ outputs=[
894
+ #leaderboard_table_val,
895
+ leaderboard_table_test,
896
+ ],
897
+ )
898
+ with gr.Accordion("Submit a new model for evaluation"):
899
+ with gr.Row():
900
+ gr.Markdown(SUBMISSION_TEXT, elem_classes="markdown-text")
901
+ with gr.Row():
902
+ with gr.Column():
903
+ # level_of_test = "test"
904
+ model_name_textbox = gr.Textbox(label="Model name")
905
+ model_url = gr.Textbox(label="Model URL")
906
+ model_type = gr.Dropdown(choices=["Open", "Closed"], label="Model Type")
907
+ # num_frames = gr.Textbox(label="Number of frames used")
908
+ llm_backbone_textbox = gr.Textbox(label="LLM Backbone")
909
+ vision_backbone_textbox = gr.Textbox(label="Vision Backbone")
910
+ # system_prompt_textbox = gr.Textbox(label="System prompt example")
911
+ # url_textbox = gr.Textbox(label="Url to model information")
912
+ with gr.Column():
913
+ organization = gr.Textbox(label="Organization")
914
+ mail = gr.Textbox(
915
+ label="Contact email"
916
+ )
917
+ file_output = gr.File()
918
+ submission_result = gr.Textbox(label="Status")
919
+ with gr.Row():
920
+ with gr.Column():
921
+ gr.LoginButton()
922
+ with gr.Column():
923
+ submit_button = gr.Button("Submit Eval")
924
+
925
+ submit_button.click(
926
+ add_new_eval,
927
+ [
928
+ #level_of_test,
929
+ model_name_textbox,
930
+ vision_backbone_textbox,
931
+ llm_backbone_textbox,
932
+ model_url,
933
+ model_type,
934
+ # num_frames,
935
+ file_output,
936
+ organization,
937
+ mail,
938
+ ],
939
+ submission_result,
940
+ )
941
+
942
+ scheduler = BackgroundScheduler()
943
+ scheduler.add_job(restart_space, "interval", seconds=3600)
944
+ scheduler.start()
945
+ demo.launch(debug=True)
content.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ TITLE = """<h1 align="center" id="space-title">Leaderboard: Physical Reasoning from Video</h1>"""
4
+
5
+ INTRODUCTION_TEXT = """
6
+ This leaderboard tracks the progress of frontier models on 3 physical reasoning benchmark datasets released by Meta FAIR -- **Minimal Video Pairs (MVPBench)**, **IntPhys 2**, and **CausalVQA**. In addition to tracking the progress of the community through public submissions, we also present human scores for each benchmark to understand the gap between leading models and human performance on key physical and video reasoning tasks.
7
+
8
+ - **[MVPBench](https://github.com/facebookresearch/minimal_video_pairs):** A Video Question Answering (VQA) benchmark for spatio-temporal and intuitive physics video understanding. Videos were sourced from diverse datasets and automatically paired together, such that videos in each pair differ in only minimal ways, but have opposing correct answers for the same question. This design ensures that models need to go beyond relying on surface visual or textual biases in order to perform well on the benchmark.
9
+
10
+ - **[IntPhys 2](https://github.com/facebookresearch/IntPhys2):** A video benchmark designed to evaluate the intuitive physics understanding of deep learning models. IntPhys 2 focuses on four core principles: Permanence, Immutability, Spatio-Temporal Continuity, and Solidity, and offers a comprehensive suite of tests based on the violation of expectation framework, which challenge models to differentiate between possible and impossible events within controlled and diverse virtual environments.
11
+
12
+ - **[CausalVQA](https://github.com/facebookresearch/CausalVQA):** A Video Question Answering (VQA) benchmark composed of question-answer pairs that probe models’ understanding of causality in the physical world. Questions were designed to be grounded in real-world scenarios, while focusing on models’ ability to predict the likely outcomes of different actions and events through five question types – counterfactual, hypothetical, anticipation, planning and descriptive.
13
+
14
+ Please see the linked GitHub repos for instructions on how to download and run each benchmark, and the instructions below on how to submit results to the leaderboard.
15
+ """
16
+
17
+ SUBMISSION_TEXT = """
18
+ ## Submissions
19
+ Scores are calculated according to the metrics defined in each dataset in the leaderboard. Please see each datasets' linked repos below for more details on metric calculation.
20
+
21
+ - **[MVPBench](https://github.com/facebookresearch/minimal_video_pairs):** The leaderboard computes and tracks performance on the _"mini"_ split of MVPBench, which contains ~5k items.
22
+
23
+ - **[IntPhys 2](https://github.com/facebookresearch/IntPhys2):** The leaderboard computes and tracks performance on the _"held out"_ split of IntPhys 2, a set of 344 simulated videos with moving cameras, realistic objects, and complex backgrounds.
24
+
25
+ - **[CausalVQA](https://github.com/facebookresearch/CausalVQA):** The leaderboard computes and tracks performance on the _"held out"_ split of CausalVQA, which contains 793 pairs of curated questions.
26
+
27
+ Note that for IntPhys 2 and CausalVQA, submitting to this leaderboard is the only way to compute accuracy on the _held-out_ splits, as the correct answers on those splits are stored privately and not publicly available.
28
+
29
+ ## Format
30
+
31
+ Each question calls for an answer that is either a string (one or a few words), a number, or a comma separated list of strings or floats, unless specified otherwise. There is only one correct answer.
32
+
33
+
34
+ We expect submissions to be json-line files with the following format:
35
+ ```
36
+ {"data_name": "mvp", "task": "some_task", "row_id": 0, "model_answer": "Answer 1 from your model"}
37
+ {"data_name": "mvp", "task": "some_task", "row_id" : 1, "model_answer": "Answer 2 from your model"}
38
+ ```
39
+
40
+ Here, `data_name` can be one of three valid options: `mvp_mini`, `intphys2`, `causalvqa`. You can merge your results from multiple datasets into a single submission.
41
+ If the dataset contains multiple tasks (as in case of MVPBench and CausalVQA), the `task` field is used to identify the subtask. For datasets that do not have `task`, this field can be kept blank; however, the submission file should have this field in the json.
42
+ For each submission we receive, we will check if all rows from the dataset are valid.
43
+
44
+ """
45
+
46
+ CITATION_BUTTON_LABEL = "Please cite the following papers if you use these benchmarks"
47
+ CITATION_BUTTON_TEXT = r"""
48
+ ```latex
49
+ @misc{mvpbench,
50
+ title={A Shortcut-aware Video-QA Benchmark for Physical Understanding via Minimal Video Pairs},
51
+ author={Benno Krojer and Mojtaba Komeili and Candace Ross and Quentin Garrido and Koustuv Sinha and Nicolas Ballas and Mahmoud Assran},
52
+ year={2025},
53
+ eprint={},
54
+ archivePrefix={arXiv},
55
+ primaryClass={cs.CL}
56
+ }
57
+ @misc{causalvqa,
58
+ title={CausalVQA: A Physically Grounded Causal Reasoning Benchmark for Video Models},
59
+ author={Aaron Foss and Chloe Evans and Sasha Mitts and Koustuv Sinha and Ammar Rizvi and Justine T Kao},
60
+ year={2025},
61
+ eprint={},
62
+ archivePrefix={arXiv},
63
+ primaryClass={cs.CL}
64
+ }
65
+ @misc{intphys2,
66
+ title={IntPhys 2: Benchmarking Intuitive Physics Understanding In Complex Synthetic Environments},
67
+ author={Florian Bordes and Quentin Garrido and Justine T Kao and Adina Williams and Michael Rabbat and Emmanuel Dupoux},
68
+ year={2025},
69
+ eprint={},
70
+ archivePrefix={arXiv},
71
+ primaryClass={cs.CL}
72
+ }
73
+ ```
74
+ """
75
+
76
+
77
+ def format_error(msg):
78
+ raise gr.Error(msg)
79
+
80
+
81
+ def format_warning(msg):
82
+ return gr.Warning(msg)
83
+
84
+
85
+ def format_log(msg):
86
+ return gr.Info(msg)
87
+
88
+
89
+ def model_hyperlink(link, model_name):
90
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
pyproject.toml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.ruff]
2
+ # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
3
+ select = ["E", "F"]
4
+ ignore = ["E501"] # line too long (black is taking care of this)
5
+ line-length = 119
6
+ fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
7
+
8
+ [tool.isort]
9
+ profile = "black"
10
+ line_length = 119
11
+
12
+ [tool.black]
13
+ line-length = 119
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ APScheduler
2
+ black
3
+ datasets
4
+ gradio
5
+ gradio[oauth]
6
+ gradio_leaderboard==0.0.13
7
+ gradio_client
8
+ huggingface-hub>=0.18.0
9
+ matplotlib
10
+ numpy
11
+ pandas
12
+ python-dateutil
13
+ tqdm
14
+ transformers
15
+ tokenizers>=0.15.0
16
+ sentencepiece
scorer.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+ import string
4
+ import warnings
5
+
6
+ import numpy as np
7
+
8
+
9
+ def normalize_number_str(number_str: str) -> float:
10
+ # we replace these common units and commas to allow
11
+ # conversion to float
12
+ for char in ["$", "%", ","]:
13
+ number_str = number_str.replace(char, "")
14
+ try:
15
+ return float(number_str)
16
+ except ValueError:
17
+ print(f"String {number_str} cannot be normalized to number str.")
18
+ return float("inf")
19
+
20
+
21
+ def split_string(
22
+ s: str,
23
+ char_list: list[str] = [",", ";"],
24
+ ) -> list[str]:
25
+ pattern = f"[{''.join(char_list)}]"
26
+ return re.split(pattern, s)
27
+
28
+
29
+ def question_scorer(
30
+ model_answer: str,
31
+ ground_truth: str,
32
+ ) -> bool:
33
+ def is_float(element: any) -> bool:
34
+ try:
35
+ float(element)
36
+ return True
37
+ except ValueError:
38
+ return False
39
+
40
+ if model_answer is None:
41
+ model_answer = "None"
42
+
43
+ # if gt is a number
44
+ if is_float(ground_truth):
45
+ print(f"Evaluating {model_answer} as a number.")
46
+ normalized_answer = normalize_number_str(model_answer)
47
+ return normalized_answer == float(ground_truth)
48
+
49
+ # if gt is a list
50
+ elif any(char in ground_truth for char in [",", ";"]):
51
+ print(f"Evaluating {model_answer} as a comma separated list.")
52
+ # question with the fish: normalization removes punct
53
+
54
+ gt_elems = split_string(ground_truth)
55
+ ma_elems = split_string(model_answer)
56
+
57
+ # check length is the same
58
+ if len(gt_elems) != len(ma_elems):
59
+ warnings.warn(
60
+ "Answer lists have different lengths, returning False.", UserWarning
61
+ )
62
+ return False
63
+
64
+ # compare each element as float or str
65
+ comparisons = []
66
+ for ma_elem, gt_elem in zip(ma_elems, gt_elems):
67
+ if is_float(gt_elem):
68
+ normalized_ma_elem = normalize_number_str(ma_elem)
69
+ comparisons.append(normalized_ma_elem == float(gt_elem))
70
+ else:
71
+ # we do not remove punct since comparisons can include punct
72
+ comparisons.append(
73
+ normalize_str(ma_elem, remove_punct=False)
74
+ == normalize_str(gt_elem, remove_punct=False)
75
+ )
76
+ return all(comparisons)
77
+
78
+ # if gt is a str
79
+ else:
80
+ print(f"Evaluating {model_answer} as a string.")
81
+ return normalize_str(model_answer) == normalize_str(ground_truth)
82
+
83
+
84
+ def normalize_str(input_str, remove_punct=True) -> str:
85
+ """
86
+ Normalize a string by:
87
+ - Removing all white spaces
88
+ - Optionally removing punctuation (if remove_punct is True)
89
+ - Converting to lowercase
90
+ Parameters:
91
+ - input_str: str, the string to normalize
92
+ - remove_punct: bool, whether to remove punctuation (default: True)
93
+ Returns:
94
+ - str, the normalized string
95
+ """
96
+ # Remove all white spaces. Required e.g for seagull vs. sea gull
97
+ no_spaces = re.sub(r"\s", "", input_str)
98
+
99
+ # Remove punctuation, if specified.
100
+ if remove_punct:
101
+ translator = str.maketrans("", "", string.punctuation)
102
+ return no_spaces.lower().translate(translator)
103
+ else:
104
+ return no_spaces.lower()
utils.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ def extract_single_option(video_llm_output) -> Union[str, bool]:
4
+ video_llm_output = video_llm_output.lower()
5
+ pattern = r"(Answer|Assistant)?:?\s*([AB])\b"
6
+ matches = re.findall(pattern, video_llm_output, re.IGNORECASE)
7
+ if matches:
8
+ actual_answer = matches[1] if len(matches) > 1 else matches[0]
9
+ answer = actual_answer[1].lower()
10
+ return answer
11
+ elif len(video_llm_output) == 1:
12
+ answer = video_llm_output.lower()
13
+ return answer
14
+ else:
15
+ return False