abdoelsayed commited on
Commit
a9c03d7
·
verified ·
1 Parent(s): af4cfc9

Upload 2 files

Browse files
Files changed (2) hide show
  1. FutureQueryEval.py +143 -0
  2. dataset_infos.json +111 -0
FutureQueryEval.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import datasets
3
+
4
+ _CITATION = """\
5
+ @misc{abdallah2025good,
6
+ title={How Good are LLM-based Rerankers? An Empirical Analysis of State-of-the-Art Reranking Models},
7
+ author={Abdelrahman Abdallah and Bhawna Piryani and Jamshid Mozafari and Mohammed Ali and Adam Jatowt},
8
+ year={2025},
9
+ eprint={2508.16757},
10
+ archivePrefix={arXiv},
11
+ primaryClass={cs.CL}
12
+ }
13
+ """
14
+
15
+ _DESCRIPTION = """\
16
+ FutureQueryEval is a novel IR benchmark comprising 148 queries with 2,938 query-document pairs
17
+ across 7 topical categories, designed to evaluate reranker performance on temporal novelty.
18
+ All queries refer to events after April 2025 to ensure zero contamination with LLM pretraining data.
19
+ """
20
+
21
+ _HOMEPAGE = "https://github.com/DataScienceUIBK/llm-reranking-generalization-study"
22
+
23
+ _LICENSE = "Apache-2.0"
24
+
25
+ _URLS = {
26
+ "queries": "queries.csv",
27
+ "corpus": "corpus.tsv",
28
+ "qrels": "qrels.txt",
29
+ }
30
+
31
+ class FutureQueryEval(datasets.GeneratorBasedBuilder):
32
+ """FutureQueryEval dataset for temporal IR evaluation."""
33
+
34
+ VERSION = datasets.Version("1.0.0")
35
+
36
+ BUILDER_CONFIGS = [
37
+ datasets.BuilderConfig(
38
+ name="queries",
39
+ version=VERSION,
40
+ description="Query collection with categories",
41
+ ),
42
+ datasets.BuilderConfig(
43
+ name="corpus",
44
+ version=VERSION,
45
+ description="Document corpus",
46
+ ),
47
+ datasets.BuilderConfig(
48
+ name="qrels",
49
+ version=VERSION,
50
+ description="Relevance judgments",
51
+ ),
52
+ ]
53
+
54
+ DEFAULT_CONFIG_NAME = "queries"
55
+
56
+ def _info(self):
57
+ if self.config.name == "queries":
58
+ features = datasets.Features({
59
+ "query_id": datasets.Value("string"),
60
+ "query_text": datasets.Value("string"),
61
+ "category": datasets.Value("string"),
62
+ })
63
+ elif self.config.name == "corpus":
64
+ features = datasets.Features({
65
+ "doc_id": datasets.Value("string"),
66
+ "title": datasets.Value("string"),
67
+ "text": datasets.Value("string"),
68
+ "url": datasets.Value("string"),
69
+ })
70
+ elif self.config.name == "qrels":
71
+ features = datasets.Features({
72
+ "query_id": datasets.Value("string"),
73
+ "iteration": datasets.Value("int32"),
74
+ "doc_id": datasets.Value("string"),
75
+ "relevance": datasets.Value("int32"),
76
+ })
77
+
78
+ return datasets.DatasetInfo(
79
+ description=_DESCRIPTION,
80
+ features=features,
81
+ homepage=_HOMEPAGE,
82
+ license=_LICENSE,
83
+ citation=_CITATION,
84
+ )
85
+
86
+ def _split_generators(self, dl_manager):
87
+ downloaded_files = dl_manager.download(_URLS)
88
+
89
+ if self.config.name == "queries":
90
+ return [
91
+ datasets.SplitGenerator(
92
+ name="queries",
93
+ gen_kwargs={"filepath": downloaded_files["queries"]},
94
+ ),
95
+ ]
96
+ elif self.config.name == "corpus":
97
+ return [
98
+ datasets.SplitGenerator(
99
+ name="corpus",
100
+ gen_kwargs={"filepath": downloaded_files["corpus"]},
101
+ ),
102
+ ]
103
+ elif self.config.name == "qrels":
104
+ return [
105
+ datasets.SplitGenerator(
106
+ name="qrels",
107
+ gen_kwargs={"filepath": downloaded_files["qrels"]},
108
+ ),
109
+ ]
110
+
111
+ def _generate_examples(self, filepath):
112
+ if self.config.name == "queries":
113
+ with open(filepath, encoding="utf-8") as f:
114
+ reader = csv.DictReader(f, delimiter=",")
115
+ for key, row in enumerate(reader):
116
+ yield key, {
117
+ "query_id": row["query_id"],
118
+ "query_text": row["query_text"],
119
+ "category": row["category"],
120
+ }
121
+
122
+ elif self.config.name == "corpus":
123
+ with open(filepath, encoding="utf-8") as f:
124
+ reader = csv.DictReader(f, delimiter="\t")
125
+ for key, row in enumerate(reader):
126
+ yield key, {
127
+ "doc_id": row["doc_id"],
128
+ "title": row["title"],
129
+ "text": row["text"],
130
+ "url": row["url"],
131
+ }
132
+
133
+ elif self.config.name == "qrels":
134
+ with open(filepath, encoding="utf-8") as f:
135
+ for key, line in enumerate(f):
136
+ parts = line.strip().split()
137
+ if len(parts) == 4:
138
+ yield key, {
139
+ "query_id": parts[0],
140
+ "iteration": int(parts[1]),
141
+ "doc_id": parts[2],
142
+ "relevance": int(parts[3]),
143
+ }
dataset_infos.json ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "queries": {
3
+ "description": "FutureQueryEval query collection with 148 queries across 7 categories",
4
+ "citation": "@misc{abdallah2025good, title={How Good are LLM-based Rerankers? An Empirical Analysis of State-of-the-Art Reranking Models}, author={Abdelrahman Abdallah and Bhawna Piryani and Jamshid Mozafari and Mohammed Ali and Adam Jatowt}, year={2025}, eprint={2508.16757}, archivePrefix={arXiv}, primaryClass={cs.CL}}",
5
+ "homepage": "https://github.com/DataScienceUIBK/llm-reranking-generalization-study",
6
+ "license": "Apache-2.0",
7
+ "features": {
8
+ "query_id": {
9
+ "dtype": "string",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "query_text": {
14
+ "dtype": "string",
15
+ "id": null,
16
+ "_type": "Value"
17
+ },
18
+ "category": {
19
+ "dtype": "string",
20
+ "id": null,
21
+ "_type": "Value"
22
+ }
23
+ },
24
+ "splits": {
25
+ "queries": {
26
+ "name": "queries",
27
+ "num_bytes": 45056,
28
+ "num_examples": 148,
29
+ "dataset_name": "future_query_eval"
30
+ }
31
+ },
32
+ "download_size": 45056,
33
+ "dataset_size": 45056
34
+ },
35
+ "corpus": {
36
+ "description": "FutureQueryEval document corpus with 2,787 documents",
37
+ "citation": "@misc{abdallah2025good, title={How Good are LLM-based Rerankers? An Empirical Analysis of State-of-the-Art Reranking Models}, author={Abdelrahman Abdallah and Bhawna Piryani and Jamshid Mozafari and Mohammed Ali and Adam Jatowt}, year={2025}, eprint={2508.16757}, archivePrefix={arXiv}, primaryClass={cs.CL}}",
38
+ "homepage": "https://github.com/DataScienceUIBK/llm-reranking-generalization-study",
39
+ "license": "Apache-2.0",
40
+ "features": {
41
+ "doc_id": {
42
+ "dtype": "string",
43
+ "id": null,
44
+ "_type": "Value"
45
+ },
46
+ "title": {
47
+ "dtype": "string",
48
+ "id": null,
49
+ "_type": "Value"
50
+ },
51
+ "text": {
52
+ "dtype": "string",
53
+ "id": null,
54
+ "_type": "Value"
55
+ },
56
+ "url": {
57
+ "dtype": "string",
58
+ "id": null,
59
+ "_type": "Value"
60
+ }
61
+ },
62
+ "splits": {
63
+ "corpus": {
64
+ "name": "corpus",
65
+ "num_bytes": 964608,
66
+ "num_examples": 2787,
67
+ "dataset_name": "future_query_eval"
68
+ }
69
+ },
70
+ "download_size": 964608,
71
+ "dataset_size": 964608
72
+ },
73
+ "qrels": {
74
+ "description": "FutureQueryEval relevance judgments with 2,938 query-document pairs",
75
+ "citation": "@misc{abdallah2025good, title={How Good are LLM-based Rerankers? An Empirical Analysis of State-of-the-Art Reranking Models}, author={Abdelrahman Abdallah and Bhawna Piryani and Jamshid Mozafari and Mohammed Ali and Adam Jatowt}, year={2025}, eprint={2508.16757}, archivePrefix={arXiv}, primaryClass={cs.CL}}",
76
+ "homepage": "https://github.com/DataScienceUIBK/llm-reranking-generalization-study",
77
+ "license": "Apache-2.0",
78
+ "features": {
79
+ "query_id": {
80
+ "dtype": "string",
81
+ "id": null,
82
+ "_type": "Value"
83
+ },
84
+ "iteration": {
85
+ "dtype": "int32",
86
+ "id": null,
87
+ "_type": "Value"
88
+ },
89
+ "doc_id": {
90
+ "dtype": "string",
91
+ "id": null,
92
+ "_type": "Value"
93
+ },
94
+ "relevance": {
95
+ "dtype": "int32",
96
+ "id": null,
97
+ "_type": "Value"
98
+ }
99
+ },
100
+ "splits": {
101
+ "qrels": {
102
+ "name": "qrels",
103
+ "num_bytes": 99328,
104
+ "num_examples": 2938,
105
+ "dataset_name": "future_query_eval"
106
+ }
107
+ },
108
+ "download_size": 99328,
109
+ "dataset_size": 99328
110
+ }
111
+ }