sklep / README.md
mrshu's picture
Upload README.md with huggingface_hub
2e428d1 verified
metadata
annotations_creators:
  - other
language_creators:
  - other
language:
  - sk
license:
  - other
  - cc-by-sa-4.0
  - cc-by-sa-3.0
  - mit
multilinguality:
  - monolingual
size_categories:
  - 10K<n<100K
source_datasets:
  - original
task_categories:
  - question-answering
  - text-classification
  - token-classification
task_ids:
  - extractive-qa
  - named-entity-recognition
  - acceptability-classification
  - natural-language-inference
  - semantic-similarity-scoring
  - sentiment-classification
  - text-scoring
paperswithcode_id: skLEP
pretty_name: skLEP (General Language Understanding Evaluation benchmark for Slovak)
tags:
  - qa-nli
  - coreference-nli
  - paraphrase-identification
config_names:
  - hate-speech
  - sentiment-analysis
  - ner-wikigoldsk
  - ner-uner
  - pos
  - question-answering
  - rte
  - nli
  - sts
dataset_info:
  - config_name: hate-speech
    features:
      - name: text
        dtype: string
      - name: label
        dtype:
          class_label:
            names:
              '0': negative
              '1': positive
      - name: id
        dtype: int32
    splits:
      - name: train
        num_bytes: 1393604
        num_examples: 10531
      - name: test
        num_bytes: 150919
        num_examples: 1319
      - name: validation
        num_bytes: 160199
        num_examples: 1339
    download_size: 326394
    dataset_size: 605704
  - config_name: sentiment-analysis
    features:
      - name: text
        dtype: string
      - name: label
        dtype:
          class_label:
            names:
              '0': negative
              '1': positive
      - name: id
        dtype: int32
    splits:
      - name: train
        num_bytes: 387491
        num_examples: 3560
      - name: test
        num_bytes: 117983
        num_examples: 1042
      - name: validation
        num_bytes: 117983
        num_examples: 522
    download_size: 326394
    dataset_size: 605704
  - config_name: ner-wikigoldsk
    features:
      - name: sentence
        dtype: string
      - name: tokens
        sequence: string
      - name: ner_tags
        sequence:
          class_label:
            names:
              '0': O
              '1': B-LOC
              '2': I-LOC
              '3': B-ORG
              '4': I-ORG
              '5': B-PER
              '6': I-PER
              '7': B-MISC
              '8': I-MISC
      - name: ner_tags_text
        sequence: string
    splits:
      - name: train
        num_bytes: 1885504
        num_examples: 4687
      - name: validation
        num_bytes: 267514
        num_examples: 669
      - name: test
        num_bytes: 532642
        num_examples: 1340
  - config_name: ner-uner
    features:
      - name: sentence
        dtype: string
      - name: tokens
        sequence: string
      - name: ner_tags
        sequence:
          class_label:
            names:
              '0': O
              '1': B-LOC
              '2': I-LOC
              '3': B-ORG
              '4': I-ORG
              '5': B-PER
              '6': I-PER
              '7': B-MISC
              '8': I-MISC
      - name: ner_tags_text
        sequence: string
    splits:
      - name: train
        num_bytes: 1786598
        num_examples: 8483
      - name: validation
        num_bytes: 289084
        num_examples: 1060
      - name: test
        num_bytes: 289026
        num_examples: 1061
  - config_name: pos
    features:
      - name: id
        dtype: string
      - name: tokens
        sequence: string
      - name: pos_tags
        sequence: string
    splits:
      - name: train
        num_bytes: 1786598
        num_examples: 8483
      - name: validation
        num_bytes: 289084
        num_examples: 1060
      - name: test
        num_bytes: 289026
        num_examples: 1061
  - config_name: question-answering
    features:
      - name: id
        dtype: string
      - name: title
        dtype: string
      - name: context
        dtype: string
      - name: question
        dtype: string
      - name: answers
        struct:
          - name: text
            sequence: string
          - name: answer_start
            sequence: int32
    splits:
      - name: train
        num_bytes: 98742578
        num_examples: 71999
      - name: validation
        num_bytes: 13100270
        num_examples: 9583
      - name: test
        num_bytes: 12992195
        num_examples: 9583
  - config_name: rte
    features:
      - name: text1
        dtype: string
      - name: text2
        dtype: string
      - name: label
        dtype:
          class_label:
            names:
              '0': not entailment
              '1': entailment
      - name: idx
        dtype: int32
      - name: label_text
        dtype: string
      - name: text1_orig
        dtype: string
      - name: text2_orig
        dtype: string
    splits:
      - name: train
        num_bytes: 2134837
        num_examples: 2490
      - name: validation
        num_bytes: 229013
        num_examples: 277
      - name: test
        num_bytes: 1255739
        num_examples: 1660
  - config_name: nli
    features:
      - name: premise
        dtype: string
      - name: hypothesis
        dtype: string
      - name: label
        dtype:
          class_label:
            names:
              '0': neutral
              '1': entailment
              '2': contradiction
      - name: premise_orig
        dtype: string
      - name: hypothesis_orig
        dtype: string
    splits:
      - name: train
        num_bytes: 142579745
        num_examples: 392702
      - name: validation
        num_bytes: 1138053
        num_examples: 2490
      - name: test
        num_bytes: 2294209
        num_examples: 5004
  - config_name: sts
    features:
      - name: sentence1
        dtype: string
      - name: sentence2
        dtype: string
      - name: similarity_score
        dtype: float64
      - name: sentence1_orig
        dtype: string
      - name: sentence2_orig
        dtype: string
    splits:
      - name: train
        num_examples: 5604
        num_bytes: 2184171
      - name: validation
        num_examples: 1481
        num_bytes: 617309
      - name: test
        num_examples: 1352
        num_bytes: 493116
configs:
  - config_name: hate-speech
    data_files:
      - split: test
        path: hate-speech/test.json
      - split: train
        path: hate-speech/train.json
      - split: validation
        path: hate-speech/validation.json
  - config_name: sentiment-analysis
    data_files:
      - split: test
        path: sentiment-analysis/test.json
      - split: train
        path: sentiment-analysis/train.json
      - split: validation
        path: sentiment-analysis/validation.json
  - config_name: ner-wikigoldsk
    data_files:
      - split: test
        path: ner-wikigoldsk/test.jsonl
      - split: train
        path: ner-wikigoldsk/train.jsonl
      - split: validation
        path: ner-wikigoldsk/dev.jsonl
  - config_name: ner-uner
    data_files:
      - split: test
        path: ner-uner/test.jsonl
      - split: train
        path: ner-uner/train.jsonl
      - split: validation
        path: ner-uner/dev.jsonl
  - config_name: pos
    data_files:
      - split: test
        path: pos/test.jsonl
      - split: validation
        path: pos/dev.jsonl
      - split: train
        path: pos/train.jsonl
  - config_name: question-answering
    data_files:
      - split: test
        path: question-answering/test.json
      - split: validation
        path: question-answering/validation.json
      - split: train
        path: question-answering/train.json
  - config_name: rte
    data_files:
      - split: test
        path: rte/test.json
      - split: validation
        path: rte/validation.json
      - split: train
        path: rte/train.json
  - config_name: nli
    data_files:
      - split: test
        path: nli/test.json
      - split: validation
        path: nli/validation.json
      - split: train
        path: nli/train.json
  - config_name: sts
    data_files:
      - split: test
        path: sts/test.json
      - split: validation
        path: sts/validation.json
      - split: train
        path: sts/train.json

Dataset Card for skLEP

Dataset Description

skLEP (General Language Understanding Evaluation benchmark for Slovak) is the first comprehensive benchmark specifically designed for evaluating Slovak natural language understanding (NLU) models. The benchmark encompasses nine diverse tasks that span token-level, sentence-pair, and document-level challenges, thereby offering a thorough assessment of model capabilities.

To create this benchmark, we curated new, original datasets tailored for Slovak and meticulously translated established English NLU resources with native speaker post-editing to ensure high quality evaluation.

Dataset Summary

skLEP, the General Language Understanding Evaluation benchmark for Slovak is a collection of resources for training, evaluating, and analyzing natural language understanding systems.

Supported Tasks and Leaderboards

skLEP includes nine tasks across three categories:

Token-Level Tasks:

  • Part-of-Speech (POS) Tagging using Universal Dependencies
  • Named Entity Recognition using Universal NER (UNER)
  • Named Entity Recognition using WikiGoldSK (WGSK)

Sentence-Pair Tasks:

  • Recognizing Textual Entailment (RTE)
  • Natural Language Inference (NLI)
  • Semantic Textual Similarity (STS)

Document-Level Tasks:

  • Hate Speech Classification (HS)
  • Sentiment Analysis (SA)
  • Question Answering (QA) based on SK-QuAD

A public leaderboard is available at https://github.com/slovak-nlp/sklep

Languages

The language data in skLEP is in Slovak (BCP-47 sk)

Dataset Structure

Data Instances

The benchmark contains the following data splits:

  • hate-speech: 10,531 train, 1,339 validation, 1,319 test examples
  • sentiment-analysis: 3,560 train, 522 validation, 1,042 test examples
  • ner-wikigoldsk: 4,687 train, 669 validation, 1,340 test examples
  • ner-uner: 8,483 train, 1,060 validation, 1,061 test examples
  • pos: 8,483 train, 1,060 validation, 1,061 test examples
  • question-answering: 71,999 train, 9,583 validation, 9,583 test examples
  • rte: 2,490 train, 277 validation, 1,660 test examples
  • nli: 392,702 train, 2,490 validation, 5,004 test examples
  • sts: 5,604 train, 1,481 validation, 1,352 test examples

Data Fields

Each task has specific data fields:

Token-level tasks (UD, UNER, WGSK): sentence, tokens, ner_tags/pos_tags, ner_tags_text

Sentence-pair tasks:

  • RTE: text1, text2, label, idx, label_text
  • NLI: premise, hypothesis, label
  • STS: sentence1, sentence2, similarity_score

Document-level tasks:

  • Hate Speech/Sentiment: text, label, id
  • Question Answering: id, title, context, question, answers

Data Splits

All tasks follow a standard train/validation/test split structure. Some datasets (HS and QA) originally only had train/test splits, so validation sets were created by sampling from the training data to match the test set size.

Dataset Creation

Curation Rationale

skLEP was created to address the lack of a comprehensive benchmark for Slovak natural language understanding. While similar benchmarks exist for other Slavic languages (Bulgarian, Polish, Russian, Slovene), no equivalent existed for Slovak despite the emergence of several Slovak-specific large language models.

The benchmark was designed to provide a principled tool for evaluating language understanding capabilities across diverse tasks, enabling systematic comparison of Slovak-specific, multilingual, and English pre-trained models.

Source Data

Initial Data Collection and Normalization

Data was collected from multiple sources:

  • Existing Slovak datasets: Universal Dependencies, Universal NER, WikiGoldSK, Slovak Hate Speech Database, Reviews3, SK-QuAD
  • Translated datasets: RTE, NLI (XNLI), and STS were translated from English using machine translation services followed by native speaker post-editing

During preprocessing, duplicates were removed from XNLI and STS datasets. For STS, sentence pairs with identical text but non-perfect similarity scores were eliminated as translation artifacts.

Who are the source language producers?

The source language producers include:

  • Native Slovak speakers for original Slovak datasets
  • Professional translators and native Slovak post-editors for translated datasets
  • Wikipedia contributors for WikiGoldSK and SK-QuAD
  • Social media users for hate speech dataset
  • Customer reviewers for sentiment analysis dataset

Annotations

Annotation process

Annotation processes varied by dataset:

  • Token-level tasks: Following Universal Dependencies and Universal NER annotation guidelines
  • WikiGoldSK: Manual annotation following BSNLP-2017 guidelines with CoNLL-2003 NER tagset
  • Hate Speech: Expert annotation with quality filtering (removing annotators with >90% uniform responses or <70% agreement)
  • Sentiment Analysis: Manual labeling by two annotators reaching consensus
  • SK-QuAD: Created by 150+ volunteers and 9 part-time annotators, validated by 5 paid reviewers
  • Translated datasets: Professional translation followed by native speaker post-editing

Who are the annotators?

Annotators include:

  • Expert linguists and NLP researchers for token-level tasks
  • Native Slovak speakers for post-editing translated content
  • Domain experts for hate speech classification
  • Trained volunteers and professional annotators for SK-QuAD
  • Customer service experts for sentiment analysis

Personal and Sensitive Information

The hate speech dataset contains social media posts that may include offensive language by design. Personal information was removed during preprocessing. Other datasets (Wikipedia-based, customer reviews, translated content) have minimal personal information risk.

Considerations for Using the Data

Social Impact of Dataset

skLEP enables systematic evaluation and improvement of Slovak NLP models, supporting the development of better language technology for Slovak speakers. The hate speech detection task specifically contributes to online safety tools for Slovak social media platforms.

Discussion of Biases

Potential biases include:

  • Domain bias: Wikipedia-heavy content in several tasks may not represent colloquial Slovak
  • Translation bias: Translated tasks may carry over English linguistic patterns
  • Social media bias: Hate speech dataset reflects specific online communities
  • Geographic bias: May favor standard Slovak over regional variants

Other Known Limitations

  • Some test sets differ from English counterparts due to translation and re-labeling requirements
  • Dataset sizes vary significantly across tasks
  • Limited coverage of specialized domains outside Wikipedia and social media
  • Validation sets for some tasks were created by splitting training data rather than independent collection

Additional Information

Dataset Curators

skLEP was curated by researchers from:

  • Comenius University in Bratislava, Slovakia
  • Technical University of Košice, Slovakia
  • Kempelen Institute of Intelligent Technologies, Bratislava, Slovakia
  • Cisco Systems

Lead contact: Marek Šuppa ([email protected])

Licensing Information

The primary skLEP tasks are built on and derived from existing datasets. We refer users to the original licenses accompanying each dataset.

Citation Information

If you use skLEP, please cite the following paper:

@inproceedings{suppa-etal-2025-sklep,
    title = "sk{LEP}: A {S}lovak General Language Understanding Benchmark",
    author = "Suppa, Marek  and
      Ridzik, Andrej  and
      Hl{\'a}dek, Daniel  and
      Jav{\r{u}}rek, Tom{\'a}{\v{s}}  and
      Ondrejov{\'a}, Vikt{\'o}ria  and
      S{\'a}sikov{\'a}, Krist{\'i}na  and
      Tamajka, Martin  and
      Simko, Marian",
    editor = "Che, Wanxiang  and
      Nabende, Joyce  and
      Shutova, Ekaterina  and
      Pilehvar, Mohammad Taher",
    booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
    month = jul,
    year = "2025",
    address = "Vienna, Austria",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2025.findings-acl.1371/",
    pages = "26716--26743",
    ISBN = "979-8-89176-256-5",
    abstract = "In this work, we introduce skLEP, the first comprehensive benchmark specifically designed for evaluating Slovak natural language understanding (NLU) models. We have compiled skLEP to encompass nine diverse tasks that span token-level, sentence-pair, and document-level challenges, thereby offering a thorough assessment of model capabilities. To create this benchmark, we curated new, original datasets tailored for Slovak and meticulously translated established English NLU resources. Within this paper, we also present the first systematic and extensive evaluation of a wide array of Slovak-specific, multilingual, and English pre-trained language models using the skLEP tasks. Finally, we also release the complete benchmark data, an open-source toolkit facilitating both fine-tuning and evaluation of models, and a public leaderboard at \url{https://github.com/slovak-nlp/sklep} in the hopes of fostering reproducibility and drive future research in Slovak NLU."
}

Contributions

Contributions to skLEP include:

  • First comprehensive Slovak NLU benchmark with 9 diverse tasks
  • High-quality translations with native speaker post-editing
  • Extensive baseline evaluations across multiple model types
  • Open-source toolkit and standardized leaderboard
  • Rigorous evaluation methodology with hyperparameter optimization

Future contributions and improvements are welcome through the project repository.