Datasets:
metadata
annotations_creators:
- human-annotated
language:
- deu
- eng
- fra
- ita
- por
- spa
- zho
license: other
multilinguality: multilingual
source_datasets:
- forresty/xglue
task_categories:
- text-ranking
task_ids: []
dataset_info:
- config_name: de-corpus
features:
- name: title
dtype: string
- name: text
dtype: string
- name: id
dtype: string
splits:
- name: validation
num_bytes: 2388440
num_examples: 8007
- name: test
num_bytes: 2297772
num_examples: 7891
download_size: 2479520
dataset_size: 4686212
- config_name: de-qrels
features:
- name: query-id
dtype: string
- name: corpus-id
dtype: string
- name: score
dtype: int64
splits:
- name: validation
num_bytes: 554364
num_examples: 8007
- name: test
num_bytes: 451701
num_examples: 7891
download_size: 127491
dataset_size: 1006065
- config_name: de-queries
features:
- name: id
dtype: string
- name: text
dtype: string
splits:
- name: validation
num_bytes: 36037
num_examples: 717
- name: test
num_bytes: 35094
num_examples: 783
download_size: 47498
dataset_size: 71131
- config_name: de-top_ranked
features:
- name: query-id
dtype: string
- name: corpus-ids
sequence: string
splits:
- name: validation
num_bytes: 326645
num_examples: 717
- name: test
num_bytes: 271944
num_examples: 783
download_size: 117548
dataset_size: 598589
- config_name: en-corpus
features:
- name: title
dtype: string
- name: text
dtype: string
- name: id
dtype: string
splits:
- name: validation
num_bytes: 2813122
num_examples: 8232
- name: test
num_bytes: 2751718
num_examples: 8208
download_size: 2930536
dataset_size: 5564840
- config_name: en-qrels
features:
- name: query-id
dtype: string
- name: corpus-id
dtype: string
- name: score
dtype: int64
splits:
- name: validation
num_bytes: 569556
num_examples: 8232
- name: test
num_bytes: 469954
num_examples: 8208
download_size: 130770
dataset_size: 1039510
- config_name: en-queries
features:
- name: id
dtype: string
- name: text
dtype: string
splits:
- name: validation
num_bytes: 37301
num_examples: 687
- name: test
num_bytes: 35030
num_examples: 732
download_size: 48707
dataset_size: 72331
- config_name: en-top_ranked
features:
- name: query-id
dtype: string
- name: corpus-ids
sequence: string
splits:
- name: validation
num_bytes: 334181
num_examples: 687
- name: test
num_bytes: 281198
num_examples: 732
download_size: 122803
dataset_size: 615379
- config_name: es-corpus
features:
- name: title
dtype: string
- name: text
dtype: string
- name: id
dtype: string
splits:
- name: validation
num_bytes: 2085965
num_examples: 8497
- name: test
num_bytes: 1981339
num_examples: 8317
download_size: 2022274
dataset_size: 4067304
- config_name: es-qrels
features:
- name: query-id
dtype: string
- name: corpus-id
dtype: string
- name: score
dtype: int64
splits:
- name: validation
num_bytes: 587538
num_examples: 8497
- name: test
num_bytes: 475824
num_examples: 8317
download_size: 132346
dataset_size: 1063362
- config_name: es-queries
features:
- name: id
dtype: string
- name: text
dtype: string
splits:
- name: validation
num_bytes: 37250
num_examples: 704
- name: test
num_bytes: 31006
num_examples: 682
download_size: 44442
dataset_size: 68256
- config_name: es-top_ranked
features:
- name: query-id
dtype: string
- name: corpus-ids
sequence: string
splits:
- name: validation
num_bytes: 344518
num_examples: 704
- name: test
num_bytes: 283415
num_examples: 682
download_size: 124488
dataset_size: 627933
- config_name: fr-corpus
features:
- name: title
dtype: string
- name: text
dtype: string
- name: id
dtype: string
splits:
- name: validation
num_bytes: 2560585
num_examples: 8306
- name: test
num_bytes: 2473044
num_examples: 8246
download_size: 2612033
dataset_size: 5033629
- config_name: fr-qrels
features:
- name: query-id
dtype: string
- name: corpus-id
dtype: string
- name: score
dtype: int64
splits:
- name: validation
num_bytes: 574671
num_examples: 8306
- name: test
num_bytes: 471131
num_examples: 8246
download_size: 131469
dataset_size: 1045802
- config_name: fr-queries
features:
- name: id
dtype: string
- name: text
dtype: string
splits:
- name: validation
num_bytes: 38748
num_examples: 749
- name: test
num_bytes: 32083
num_examples: 690
download_size: 46560
dataset_size: 70831
- config_name: fr-top_ranked
features:
- name: query-id
dtype: string
- name: corpus-ids
sequence: string
splits:
- name: validation
num_bytes: 338610
num_examples: 749
- name: test
num_bytes: 280831
num_examples: 690
download_size: 123258
dataset_size: 619441
- config_name: it-corpus
features:
- name: title
dtype: string
- name: text
dtype: string
- name: id
dtype: string
splits:
- name: validation
num_bytes: 2016734
num_examples: 8297
- name: test
num_bytes: 1886755
num_examples: 7965
download_size: 1982354
dataset_size: 3903489
- config_name: it-qrels
features:
- name: query-id
dtype: string
- name: corpus-id
dtype: string
- name: score
dtype: int64
splits:
- name: validation
num_bytes: 573744
num_examples: 8297
- name: test
num_bytes: 455664
num_examples: 7965
download_size: 129629
dataset_size: 1029408
- config_name: it-queries
features:
- name: id
dtype: string
- name: text
dtype: string
splits:
- name: validation
num_bytes: 39029
num_examples: 755
- name: test
num_bytes: 35428
num_examples: 777
download_size: 48155
dataset_size: 74457
- config_name: it-top_ranked
features:
- name: query-id
dtype: string
- name: corpus-ids
sequence: string
splits:
- name: validation
num_bytes: 338034
num_examples: 755
- name: test
num_bytes: 273816
num_examples: 777
download_size: 120736
dataset_size: 611850
- config_name: pt-corpus
features:
- name: title
dtype: string
- name: text
dtype: string
- name: id
dtype: string
splits:
- name: validation
num_bytes: 2095419
num_examples: 8591
- name: test
num_bytes: 1982691
num_examples: 8313
download_size: 2028030
dataset_size: 4078110
- config_name: pt-qrels
features:
- name: query-id
dtype: string
- name: corpus-id
dtype: string
- name: score
dtype: int64
splits:
- name: validation
num_bytes: 595109
num_examples: 8591
- name: test
num_bytes: 475704
num_examples: 8313
download_size: 132940
dataset_size: 1070813
- config_name: pt-queries
features:
- name: id
dtype: string
- name: text
dtype: string
splits:
- name: validation
num_bytes: 39019
num_examples: 702
- name: test
num_bytes: 33757
num_examples: 677
download_size: 49188
dataset_size: 72776
- config_name: pt-top_ranked
features:
- name: query-id
dtype: string
- name: corpus-ids
sequence: string
splits:
- name: validation
num_bytes: 348880
num_examples: 702
- name: test
num_bytes: 283343
num_examples: 677
download_size: 125122
dataset_size: 632223
- config_name: zh-corpus
features:
- name: title
dtype: string
- name: text
dtype: string
- name: id
dtype: string
splits:
- name: validation
num_bytes: 2649611
num_examples: 8440
- name: test
num_bytes: 2540926
num_examples: 8212
download_size: 2671177
dataset_size: 5190537
- config_name: zh-qrels
features:
- name: query-id
dtype: string
- name: corpus-id
dtype: string
- name: score
dtype: int64
splits:
- name: validation
num_bytes: 582364
num_examples: 8440
- name: test
num_bytes: 469139
num_examples: 8212
download_size: 130209
dataset_size: 1051503
- config_name: zh-queries
features:
- name: id
dtype: string
- name: text
dtype: string
splits:
- name: validation
num_bytes: 31410
num_examples: 630
- name: test
num_bytes: 29341
num_examples: 660
download_size: 41800
dataset_size: 60751
- config_name: zh-top_ranked
features:
- name: query-id
dtype: string
- name: corpus-ids
sequence: string
splits:
- name: validation
num_bytes: 339271
num_examples: 630
- name: test
num_bytes: 278811
num_examples: 660
download_size: 122740
dataset_size: 618082
configs:
- config_name: de-corpus
data_files:
- split: validation
path: de-corpus/validation-*
- split: test
path: de-corpus/test-*
- config_name: de-qrels
data_files:
- split: validation
path: de-qrels/validation-*
- split: test
path: de-qrels/test-*
- config_name: de-queries
data_files:
- split: validation
path: de-queries/validation-*
- split: test
path: de-queries/test-*
- config_name: de-top_ranked
data_files:
- split: validation
path: de-top_ranked/validation-*
- split: test
path: de-top_ranked/test-*
- config_name: en-corpus
data_files:
- split: validation
path: en-corpus/validation-*
- split: test
path: en-corpus/test-*
- config_name: en-qrels
data_files:
- split: validation
path: en-qrels/validation-*
- split: test
path: en-qrels/test-*
- config_name: en-queries
data_files:
- split: validation
path: en-queries/validation-*
- split: test
path: en-queries/test-*
- config_name: en-top_ranked
data_files:
- split: validation
path: en-top_ranked/validation-*
- split: test
path: en-top_ranked/test-*
- config_name: es-corpus
data_files:
- split: validation
path: es-corpus/validation-*
- split: test
path: es-corpus/test-*
- config_name: es-qrels
data_files:
- split: validation
path: es-qrels/validation-*
- split: test
path: es-qrels/test-*
- config_name: es-queries
data_files:
- split: validation
path: es-queries/validation-*
- split: test
path: es-queries/test-*
- config_name: es-top_ranked
data_files:
- split: validation
path: es-top_ranked/validation-*
- split: test
path: es-top_ranked/test-*
- config_name: fr-corpus
data_files:
- split: validation
path: fr-corpus/validation-*
- split: test
path: fr-corpus/test-*
- config_name: fr-qrels
data_files:
- split: validation
path: fr-qrels/validation-*
- split: test
path: fr-qrels/test-*
- config_name: fr-queries
data_files:
- split: validation
path: fr-queries/validation-*
- split: test
path: fr-queries/test-*
- config_name: fr-top_ranked
data_files:
- split: validation
path: fr-top_ranked/validation-*
- split: test
path: fr-top_ranked/test-*
- config_name: it-corpus
data_files:
- split: validation
path: it-corpus/validation-*
- split: test
path: it-corpus/test-*
- config_name: it-qrels
data_files:
- split: validation
path: it-qrels/validation-*
- split: test
path: it-qrels/test-*
- config_name: it-queries
data_files:
- split: validation
path: it-queries/validation-*
- split: test
path: it-queries/test-*
- config_name: it-top_ranked
data_files:
- split: validation
path: it-top_ranked/validation-*
- split: test
path: it-top_ranked/test-*
- config_name: pt-corpus
data_files:
- split: validation
path: pt-corpus/validation-*
- split: test
path: pt-corpus/test-*
- config_name: pt-qrels
data_files:
- split: validation
path: pt-qrels/validation-*
- split: test
path: pt-qrels/test-*
- config_name: pt-queries
data_files:
- split: validation
path: pt-queries/validation-*
- split: test
path: pt-queries/test-*
- config_name: pt-top_ranked
data_files:
- split: validation
path: pt-top_ranked/validation-*
- split: test
path: pt-top_ranked/test-*
- config_name: zh-corpus
data_files:
- split: validation
path: zh-corpus/validation-*
- split: test
path: zh-corpus/test-*
- config_name: zh-qrels
data_files:
- split: validation
path: zh-qrels/validation-*
- split: test
path: zh-qrels/test-*
- config_name: zh-queries
data_files:
- split: validation
path: zh-queries/validation-*
- split: test
path: zh-queries/test-*
- config_name: zh-top_ranked
data_files:
- split: validation
path: zh-top_ranked/validation-*
- split: test
path: zh-top_ranked/test-*
tags:
- mteb
- text
XGLUE is a new benchmark dataset to evaluate the performance of cross-lingual pre-trained models with respect to cross-lingual natural language understanding and generation. XGLUE is composed of 11 tasks spans 19 languages.
Task category | t2t |
Domains | Written |
Reference | https://github.com/microsoft/XGLUE |
Source datasets:
How to evaluate on this task
You can evaluate an embedding model on this dataset using the following code:
import mteb
task = mteb.get_task("XGlueWPRReranking")
evaluator = mteb.MTEB([task])
model = mteb.get_model(YOUR_MODEL)
evaluator.run(model)
To learn more about how to run models on mteb
task check out the GitHub repository.
Citation
If you use this dataset, please cite the dataset as well as mteb, as this dataset likely includes additional processing as a part of the MMTEB Contribution.
@misc{11234/1-3105,
author = {Zeman, Daniel and Nivre, Joakim and Abrams, Mitchell and Aepli, No{\"e}mi and Agi{\'c}, {\v Z}eljko and Ahrenberg, Lars and Aleksandravi{\v c}i{\=u}t{\.e}, Gabriel{\.e} and Antonsen, Lene and Aplonova, Katya and Aranzabe, Maria Jesus and Arutie, Gashaw and Asahara, Masayuki and Ateyah, Luma and Attia, Mohammed and Atutxa, Aitziber and Augustinus, Liesbeth and Badmaeva, Elena and Ballesteros, Miguel and Banerjee, Esha and Bank, Sebastian and Barbu Mititelu, Verginica and Basmov, Victoria and Batchelor, Colin and Bauer, John and Bellato, Sandra and Bengoetxea, Kepa and Berzak, Yevgeni and Bhat, Irshad Ahmad and Bhat, Riyaz Ahmad and Biagetti, Erica and Bick, Eckhard and Bielinskien{\.e}, Agn{\.e} and Blokland, Rogier and Bobicev, Victoria and Boizou, Lo{\"{\i}}c and Borges V{\"o}lker, Emanuel and B{\"o}rstell, Carl and Bosco, Cristina and Bouma, Gosse and Bowman, Sam and Boyd, Adriane and Brokait{\.e}, Kristina and Burchardt, Aljoscha and Candito, Marie and Caron, Bernard and Caron, Gauthier and Cavalcanti, Tatiana and Cebiro{\u g}lu Eryi{\u g}it, G{\"u}l{\c s}en and Cecchini, Flavio Massimiliano and Celano, Giuseppe G. A. and {\v C}{\'e}pl{\"o}, Slavom{\'{\i}}r and Cetin, Savas and Chalub, Fabricio and Choi, Jinho and Cho, Yongseok and Chun, Jayeol and Cignarella, Alessandra T. and Cinkov{\'a}, Silvie and Collomb, Aur{\'e}lie and {\c C}{\"o}ltekin, {\c C}a{\u g}r{\i} and Connor, Miriam and Courtin, Marine and Davidson, Elizabeth and de Marneffe, Marie-Catherine and de Paiva, Valeria and de Souza, Elvis and Diaz de Ilarraza, Arantza and Dickerson, Carly and Dione, Bamba and Dirix, Peter and Dobrovoljc, Kaja and Dozat, Timothy and Droganova, Kira and Dwivedi, Puneet and Eckhoff, Hanne and Eli, Marhaba and Elkahky, Ali and Ephrem, Binyam and Erina, Olga and Erjavec, Toma{\v z} and Etienne, Aline and Evelyn, Wograine and Farkas, Rich{\'a}rd and Fernandez Alcalde, Hector and Foster, Jennifer and Freitas, Cl{\'a}udia and Fujita, Kazunori and Gajdo{\v s}ov{\'a}, Katar{\'{\i}}na and Galbraith, Daniel and Garcia, Marcos and G{\"a}rdenfors, Moa and Garza, Sebastian and Gerdes, Kim and Ginter, Filip and Goenaga, Iakes and Gojenola, Koldo and G{\"o}k{\i}rmak, Memduh and Goldberg, Yoav and G{\'o}mez Guinovart, Xavier and Gonz{\'a}lez Saavedra, Berta and Grici{\=u}t{\.e}, Bernadeta and Grioni, Matias and Gr{\=u}z{\={\i}}tis, Normunds and Guillaume, Bruno and Guillot-Barbance, C{\'e}line and Habash, Nizar and Haji{\v c}, Jan and Haji{\v c} jr., Jan and H{\"a}m{\"a}l{\"a}inen, Mika and H{\`a} M{\~y}, Linh and Han, Na-Rae and Harris, Kim and Haug, Dag and Heinecke, Johannes and Hennig, Felix and Hladk{\'a}, Barbora and Hlav{\'a}{\v c}ov{\'a}, Jaroslava and Hociung, Florinel and Hohle, Petter and Hwang, Jena and Ikeda, Takumi and Ion, Radu and Irimia, Elena and Ishola, {\d O}l{\'a}j{\'{\i}}d{\'e} and Jel{\'{\i}}nek, Tom{\'a}{\v s} and Johannsen, Anders and J{\o}rgensen, Fredrik and Juutinen, Markus and Ka{\c s}{\i}kara, H{\"u}ner and Kaasen, Andre and Kabaeva, Nadezhda and Kahane, Sylvain and Kanayama, Hiroshi and Kanerva, Jenna and Katz, Boris and Kayadelen, Tolga and Kenney, Jessica and Kettnerov{\'a}, V{\'a}clava and Kirchner, Jesse and Klementieva, Elena and K{\"o}hn, Arne and Kopacewicz, Kamil and Kotsyba, Natalia and Kovalevskait{\.e}, Jolanta and Krek, Simon and Kwak, Sookyoung and Laippala, Veronika and Lambertino, Lorenzo and Lam, Lucia and Lando, Tatiana and Larasati, Septina Dian and Lavrentiev, Alexei and Lee, John and L{\^e} H{\`{\^o}}ng, Phương and Lenci, Alessandro and Lertpradit, Saran and Leung, Herman and Li, Cheuk Ying and Li, Josie and Li, Keying and Lim, {KyungTae} and Liovina, Maria and Li, Yuan and Ljube{\v s}i{\'c}, Nikola and Loginova, Olga and Lyashevskaya, Olga and Lynn, Teresa and Macketanz, Vivien and Makazhanov, Aibek and Mandl, Michael and Manning, Christopher and Manurung, Ruli and M{\u a}r{\u a}nduc, C{\u a}t{\u a}lina and Mare{\v c}ek, David and Marheinecke, Katrin and Mart{\'{\i}}nez Alonso, H{\'e}ctor and Martins, Andr{\'e} and Ma{\v s}ek, Jan and Matsumoto, Yuji and {McDonald}, Ryan and {McGuinness}, Sarah and Mendon{\c c}a, Gustavo and Miekka, Niko and Misirpashayeva, Margarita and Missil{\"a}, Anna and Mititelu, C{\u a}t{\u a}lin and Mitrofan, Maria and Miyao, Yusuke and Montemagni, Simonetta and More, Amir and Moreno Romero, Laura and Mori, Keiko Sophie and Morioka, Tomohiko and Mori, Shinsuke and Moro, Shigeki and Mortensen, Bjartur and Moskalevskyi, Bohdan and Muischnek, Kadri and Munro, Robert and Murawaki, Yugo and M{\"u}{\"u}risep, Kaili and Nainwani, Pinkey and Navarro Hor{\~n}iacek, Juan Ignacio and Nedoluzhko, Anna and Ne{\v s}pore-B{\=e}rzkalne, Gunta and Nguy{\~{\^e}}n Th{\d i}, Lương and Nguy{\~{\^e}}n Th{\d i} Minh, Huy{\`{\^e}}n and Nikaido, Yoshihiro and Nikolaev, Vitaly and Nitisaroj, Rattima and Nurmi, Hanna and Ojala, Stina and Ojha, Atul Kr. and Ol{\'u}{\`o}kun, Ad{\'e}day{\d o}̀ and Omura, Mai and Osenova, Petya and {\"O}stling, Robert and {\O}vrelid, Lilja and Partanen, Niko and Pascual, Elena and Passarotti, Marco and Patejuk, Agnieszka and Paulino-Passos, Guilherme and Peljak-{\L}api{\'n}ska, Angelika and Peng, Siyao and Perez, Cenel-Augusto and Perrier, Guy and Petrova, Daria and Petrov, Slav and Phelan, Jason and Piitulainen, Jussi and Pirinen, Tommi A and Pitler, Emily and Plank, Barbara and Poibeau, Thierry and Ponomareva, Larisa and Popel, Martin and Pretkalni{\c n}a, Lauma and Pr{\'e}vost, Sophie and Prokopidis, Prokopis and Przepi{\'o}rkowski, Adam and Puolakainen, Tiina and Pyysalo, Sampo and Qi, Peng and R{\"a}{\"a}bis, Andriela and Rademaker, Alexandre and Ramasamy, Loganathan and Rama, Taraka and Ramisch, Carlos and Ravishankar, Vinit and Real, Livy and Reddy, Siva and Rehm, Georg and Riabov, Ivan and Rie{\ss}ler, Michael and Rimkut{\.e}, Erika and Rinaldi, Larissa and Rituma, Laura and Rocha, Luisa and Romanenko, Mykhailo and Rosa, Rudolf and Rovati, Davide and Roșca, Valentin and Rudina, Olga and Rueter, Jack and Sadde, Shoval and Sagot, Beno{\^{\i}}t and Saleh, Shadi and Salomoni, Alessio and Samard{\v z}i{\'c}, Tanja and Samson, Stephanie and Sanguinetti, Manuela and S{\"a}rg, Dage and Saul{\={\i}}te, Baiba and Sawanakunanon, Yanin and Schneider, Nathan and Schuster, Sebastian and Seddah, Djam{\'e} and Seeker, Wolfgang and Seraji, Mojgan and Shen, Mo and Shimada, Atsuko and Shirasu, Hiroyuki and Shohibussirri, Muh and Sichinava, Dmitry and Silveira, Aline and Silveira, Natalia and Simi, Maria and Simionescu, Radu and Simk{\'o}, Katalin and {\v S}imkov{\'a}, M{\'a}ria and Simov, Kiril and Smith, Aaron and Soares-Bastos, Isabela and Spadine, Carolyn and Stella, Antonio and Straka, Milan and Strnadov{\'a}, Jana and Suhr, Alane and Sulubacak, Umut and Suzuki, Shingo and Sz{\'a}nt{\'o}, Zsolt and Taji, Dima and Takahashi, Yuta and Tamburini, Fabio and Tanaka, Takaaki and Tellier, Isabelle and Thomas, Guillaume and Torga, Liisi and Trosterud, Trond and Trukhina, Anna and Tsarfaty, Reut and Tyers, Francis and Uematsu, Sumire and Ure{\v s}ov{\'a}, Zde{\v n}ka and Uria, Larraitz and Uszkoreit, Hans and Utka, Andrius and Vajjala, Sowmya and van Niekerk, Daniel and van Noord, Gertjan and Varga, Viktor and Villemonte de la Clergerie, Eric and Vincze, Veronika and Wallin, Lars and Walsh, Abigail and Wang, Jing Xian and Washington, Jonathan North and Wendt, Maximilan and Williams, Seyi and Wir{\'e}n, Mats and Wittern, Christian and Woldemariam, Tsegay and Wong, Tak-sum and Wr{\'o}blewska, Alina and Yako, Mary and Yamazaki, Naoki and Yan, Chunxiao and Yasuoka, Koichi and Yavrumyan, Marat M. and Yu, Zhuoran and {\v Z}abokrtsk{\'y}, Zden{\v e}k and Zeldes, Amir and Zhang, Manying and Zhu, Hanzhi},
copyright = {Licence Universal Dependencies v2.5},
note = {{LINDAT}/{CLARIAH}-{CZ} digital library at the Institute of Formal and Applied Linguistics ({{\'U}FAL}), Faculty of Mathematics and Physics, Charles University},
title = {Universal Dependencies 2.5},
url = {http://hdl.handle.net/11234/1-3105},
year = {2019},
}
@inproceedings{Conneau2018XNLIEC,
author = {Alexis Conneau and Guillaume Lample and Ruty Rinott and Adina Williams and Samuel R. Bowman and Holger Schwenk and Veselin Stoyanov},
booktitle = {EMNLP},
title = {XNLI: Evaluating Cross-lingual Sentence Representations},
year = {2018},
}
@article{Lewis2019MLQAEC,
author = {Patrick Lewis and Barlas Oguz and Ruty Rinott and Sebastian Riedel and Holger Schwenk},
journal = {ArXiv},
title = {MLQA: Evaluating Cross-lingual Extractive Question Answering},
volume = {abs/1910.07475},
year = {2019},
}
@article{Liang2020XGLUEAN,
author = {Yaobo Liang and Nan Duan and Yeyun Gong and Ning Wu and Fenfei Guo and Weizhen Qi and Ming Gong and Linjun Shou and Daxin Jiang and Guihong Cao and Xiaodong Fan and Ruofei Zhang and Rahul Agrawal and Edward Cui and Sining Wei and Taroon Bharti and Ying Qiao and Jiun-Hung Chen and Winnie Wu and Shuguang Liu and Fan Yang and Daniel Campos and Rangan Majumder and Ming Zhou},
journal = {arXiv},
title = {XGLUE: A New Benchmark Dataset for Cross-lingual Pre-training, Understanding and Generation},
volume = {abs/2004.01401},
year = {2020},
}
@article{Sang2002IntroductionTT,
author = {Erik F. Tjong Kim Sang},
journal = {ArXiv},
title = {Introduction to the CoNLL-2002 Shared Task: Language-Independent Named Entity Recognition},
volume = {cs.CL/0209010},
year = {2002},
}
@article{Sang2003IntroductionTT,
author = {Erik F. Tjong Kim Sang and Fien De Meulder},
journal = {ArXiv},
title = {Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition},
volume = {cs.CL/0306050},
year = {2003},
}
@article{Yang2019PAWSXAC,
author = {Yinfei Yang and Yuan Zhang and Chris Tar and Jason Baldridge},
journal = {ArXiv},
title = {PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification},
volume = {abs/1908.11828},
year = {2019},
}
@article{enevoldsen2025mmtebmassivemultilingualtext,
title={MMTEB: Massive Multilingual Text Embedding Benchmark},
author={Kenneth Enevoldsen and Isaac Chung and Imene Kerboua and Márton Kardos and Ashwin Mathur and David Stap and Jay Gala and Wissam Siblini and Dominik Krzemiński and Genta Indra Winata and Saba Sturua and Saiteja Utpala and Mathieu Ciancone and Marion Schaeffer and Gabriel Sequeira and Diganta Misra and Shreeya Dhakal and Jonathan Rystrøm and Roman Solomatin and Ömer Çağatan and Akash Kundu and Martin Bernstorff and Shitao Xiao and Akshita Sukhlecha and Bhavish Pahwa and Rafał Poświata and Kranthi Kiran GV and Shawon Ashraf and Daniel Auras and Björn Plüster and Jan Philipp Harries and Loïc Magne and Isabelle Mohr and Mariya Hendriksen and Dawei Zhu and Hippolyte Gisserot-Boukhlef and Tom Aarsen and Jan Kostkan and Konrad Wojtasik and Taemin Lee and Marek Šuppa and Crystina Zhang and Roberta Rocca and Mohammed Hamdy and Andrianos Michail and John Yang and Manuel Faysse and Aleksei Vatolin and Nandan Thakur and Manan Dey and Dipam Vasani and Pranjal Chitale and Simone Tedeschi and Nguyen Tai and Artem Snegirev and Michael Günther and Mengzhou Xia and Weijia Shi and Xing Han Lù and Jordan Clive and Gayatri Krishnakumar and Anna Maksimova and Silvan Wehrli and Maria Tikhonova and Henil Panchal and Aleksandr Abramov and Malte Ostendorff and Zheng Liu and Simon Clematide and Lester James Miranda and Alena Fenogenova and Guangyu Song and Ruqiya Bin Safi and Wen-Ding Li and Alessia Borghini and Federico Cassano and Hongjin Su and Jimmy Lin and Howard Yen and Lasse Hansen and Sara Hooker and Chenghao Xiao and Vaibhav Adlakha and Orion Weller and Siva Reddy and Niklas Muennighoff},
publisher = {arXiv},
journal={arXiv preprint arXiv:2502.13595},
year={2025},
url={https://arxiv.org/abs/2502.13595},
doi = {10.48550/arXiv.2502.13595},
}
@article{muennighoff2022mteb,
author = {Muennighoff, Niklas and Tazi, Nouamane and Magne, Loïc and Reimers, Nils},
title = {MTEB: Massive Text Embedding Benchmark},
publisher = {arXiv},
journal={arXiv preprint arXiv:2210.07316},
year = {2022}
url = {https://arxiv.org/abs/2210.07316},
doi = {10.48550/ARXIV.2210.07316},
}
Dataset Statistics
Dataset Statistics
The following code contains the descriptive statistics from the task. These can also be obtained using:
import mteb
task = mteb.get_task("XGlueWPRReranking")
desc_stats = task.metadata.descriptive_stats
{}
This dataset card was automatically generated using MTEB