Datasets:
Update README.md
Browse files
README.md
CHANGED
|
@@ -1229,10 +1229,56 @@ Below we provide a list of language codes. For each language code the amount of
|
|
| 1229 |
### Cite us
|
| 1230 |
|
| 1231 |
```
|
| 1232 |
-
@
|
| 1233 |
-
|
| 1234 |
-
|
| 1235 |
-
|
| 1236 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1237 |
}
|
| 1238 |
```
|
|
|
|
| 1229 |
### Cite us
|
| 1230 |
|
| 1231 |
```
|
| 1232 |
+
@inproceedings{burchell-etal-2025-expanded,
|
| 1233 |
+
title = "An Expanded Massive Multilingual Dataset for High-Performance Language Technologies ({HPLT})",
|
| 1234 |
+
author = {Burchell, Laurie and
|
| 1235 |
+
de Gibert, Ona and
|
| 1236 |
+
Arefyev, Nikolay and
|
| 1237 |
+
Aulamo, Mikko and
|
| 1238 |
+
Ba{\~n}{\'o}n, Marta and
|
| 1239 |
+
Chen, Pinzhen and
|
| 1240 |
+
Fedorova, Mariia and
|
| 1241 |
+
Guillou, Liane and
|
| 1242 |
+
Haddow, Barry and
|
| 1243 |
+
Haji{\v{c}}, Jan and
|
| 1244 |
+
Helcl, Jind{\v{r}}ich and
|
| 1245 |
+
Henriksson, Erik and
|
| 1246 |
+
Klimaszewski, Mateusz and
|
| 1247 |
+
Komulainen, Ville and
|
| 1248 |
+
Kutuzov, Andrey and
|
| 1249 |
+
Kyt{\"o}niemi, Joona and
|
| 1250 |
+
Laippala, Veronika and
|
| 1251 |
+
M{\ae}hlum, Petter and
|
| 1252 |
+
Malik, Bhavitvya and
|
| 1253 |
+
Mehryary, Farrokh and
|
| 1254 |
+
Mikhailov, Vladislav and
|
| 1255 |
+
Moghe, Nikita and
|
| 1256 |
+
Myntti, Amanda and
|
| 1257 |
+
O{'}Brien, Dayy{\'a}n and
|
| 1258 |
+
Oepen, Stephan and
|
| 1259 |
+
Pal, Proyag and
|
| 1260 |
+
Piha, Jousia and
|
| 1261 |
+
Pyysalo, Sampo and
|
| 1262 |
+
Ram{\'i}rez-S{\'a}nchez, Gema and
|
| 1263 |
+
Samuel, David and
|
| 1264 |
+
Stepachev, Pavel and
|
| 1265 |
+
Tiedemann, J{\"o}rg and
|
| 1266 |
+
Vari{\v{s}}, Du{\v{s}}an and
|
| 1267 |
+
Vojt{\v{e}}chov{\'a}, Tereza and
|
| 1268 |
+
Zaragoza-Bernabeu, Jaume},
|
| 1269 |
+
editor = "Che, Wanxiang and
|
| 1270 |
+
Nabende, Joyce and
|
| 1271 |
+
Shutova, Ekaterina and
|
| 1272 |
+
Pilehvar, Mohammad Taher",
|
| 1273 |
+
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
|
| 1274 |
+
month = jul,
|
| 1275 |
+
year = "2025",
|
| 1276 |
+
address = "Vienna, Austria",
|
| 1277 |
+
publisher = "Association for Computational Linguistics",
|
| 1278 |
+
url = "https://aclanthology.org/2025.acl-long.854/",
|
| 1279 |
+
doi = "10.18653/v1/2025.acl-long.854",
|
| 1280 |
+
pages = "17452--17485",
|
| 1281 |
+
ISBN = "979-8-89176-251-0",
|
| 1282 |
+
abstract = "Training state-of-the-art large language models requires vast amounts of clean and diverse textual data. However, building suitable multilingual datasets remains a challenge. In this work, we present HPLT v2, a collection of high-quality multilingual monolingual and parallel corpora, extending prior work of the HPLT project. The monolingual portion of the data contains 8T tokens covering 193 languages, while the parallel data contains 380M sentence pairs covering 51 languages. We document the entire data pipeline and release the code to reproduce it. We provide extensive analysis of the quality and characteristics of our data. Finally, we evaluate the performance of language models and machine translation systems trained on HPLT v2, demonstrating its value."
|
| 1283 |
}
|
| 1284 |
```
|