ProverbEval / README.md
israel's picture
Update README.md
59e4bf8 verified
metadata
configs:
  - config_name: amh
    data_files:
      - split: train
        path: amh/amh_all.csv
  - config_name: amh_fill_blank
    data_files:
      - split: train_1
        path: amh/amh_fill_1.csv
      - split: train_2
        path: amh/amh_fill_2.csv
      - split: train_3
        path: amh/amh_fill_3.csv
      - split: valid
        path: amh/amh_fill_valid.csv
  - config_name: amh_choice_english
    data_files:
      - split: english_1
        path: amh/amh_english_test_1.csv
      - split: english_2
        path: amh/amh_english_test_2.csv
      - split: english_3
        path: amh/amh_english_test_3.csv
      - split: english_4
        path: amh/amh_english_test_4.csv
      - split: english_5
        path: amh/amh_english_test_5.csv
  - config_name: translate_amh_choice_english
    data_files:
      - split: english_1
        path: translate-test/amh/amh_english_test_1.csv
      - split: english_2
        path: translate-test/amh/amh_english_test_2.csv
      - split: english_3
        path: translate-test/amh/amh_english_test_3.csv
  - config_name: amh_choice_native
    data_files:
      - split: native_1
        path: amh/amh_native_test_1.csv
      - split: native_2
        path: amh/amh_native_test_2.csv
      - split: native_3
        path: amh/amh_native_test_3.csv
      - split: native_4
        path: amh/amh_native_test_4.csv
      - split: native_5
        path: amh/amh_native_test_5.csv
  - config_name: translate_amh_choice_native
    data_files:
      - split: native_1
        path: translate-test/amh/amh_native_test_1.csv
      - split: native_2
        path: translate-test/amh/amh_native_test_2.csv
      - split: native_3
        path: translate-test/amh/amh_native_test_3.csv
  - config_name: amh_generation
    data_files:
      - split: native
        path: amh/amh_meaining_generation_native.csv
      - split: english
        path: amh/amh_meaining_generation_english.csv
  - config_name: eng
    data_files:
      - split: train
        path: eng/eng_all.csv
  - config_name: eng_fill_blank
    data_files:
      - split: train_1
        path: eng/eng_fill_1.csv
      - split: train_2
        path: eng/eng_fill_2.csv
      - split: train_3
        path: eng/eng_fill_3.csv
      - split: valid
        path: eng/eng_fill_valid.csv
  - config_name: eng_generation
    data_files:
      - split: native
        path: eng/eng_meaining_generation_native.csv
  - config_name: eng_choice_native
    data_files:
      - split: native_1
        path: eng/eng_native_test_1.csv
      - split: native_2
        path: eng/eng_native_test_2.csv
      - split: native_3
        path: eng/eng_native_test_3.csv
  - config_name: eng_choice_native
    data_files:
      - split: native_1
        path: eng/eng_native_test_1.csv
      - split: native_2
        path: eng/eng_native_test_2.csv
      - split: native_3
        path: eng/eng_native_test_3.csv
      - split: native_4
        path: eng/eng_native_test_4.csv
      - split: native_5
        path: eng/eng_native_test_5.csv
  - config_name: gez_fill_blank
    data_files:
      - split: train_1
        path: geez/geez_fill_1.csv
      - split: train_2
        path: geez/geez_fill_2.csv
      - split: train_3
        path: geez/geez_fill_3.csv
      - split: valid
        path: geez/gez_fill_valid.csv
  - config_name: gez_choice_english
    data_files:
      - split: english_1
        path: geez/geez_english_test_1.csv
      - split: english_2
        path: geez/geez_english_test_2.csv
      - split: english_3
        path: geez/geez_english_test_3.csv
      - split: english_4
        path: geez/geez_english_test_4.csv
      - split: english_5
        path: geez/geez_english_test_5.csv
  - config_name: gez_choice_native
    data_files:
      - split: native_1
        path: geez/geez_native_test_1.csv
      - split: native_2
        path: geez/geez_native_test_2.csv
      - split: native_3
        path: geez/geez_native_test_3.csv
      - split: native_4
        path: geez/geez_native_test_4.csv
      - split: native_5
        path: geez/geez_native_test_5.csv
  - config_name: gez_generation
    data_files:
      - split: native
        path: geez/gez-native-description.csv
      - split: english
        path: geez/geez_meaining_generation_english.csv
  - config_name: orm
    data_files:
      - split: train
        path: orm/orm_all.csv
  - config_name: orm_choice_english
    data_files:
      - split: english_1
        path: orm/orm_english_test_1.csv
      - split: english_2
        path: orm/orm_english_test_2.csv
      - split: english_3
        path: orm/orm_english_test_3.csv
      - split: english_4
        path: orm/orm_english_test_4.csv
      - split: english_5
        path: orm/orm_english_test_5.csv
  - config_name: translate_orm_choice_english
    data_files:
      - split: english_1
        path: translate-test/orm/orm_english_test_1.csv
      - split: english_2
        path: translate-test/orm/orm_english_test_2.csv
      - split: english_3
        path: translate-test/orm/orm_english_test_3.csv
  - config_name: orm_choice_native
    data_files:
      - split: native_1
        path: orm/orm_native_test_1.csv
      - split: native_2
        path: orm/orm_native_test_2.csv
      - split: native_3
        path: orm/orm_native_test_3.csv
      - split: native_4
        path: orm/orm_native_test_4.csv
      - split: native_5
        path: orm/orm_native_test_5.csv
  - config_name: translate_orm_choice_native
    data_files:
      - split: native_1
        path: translate-test/orm/orm_native_test_1.csv
      - split: native_2
        path: translate-test/orm/orm_native_test_2.csv
      - split: native_3
        path: translate-test/orm/orm_native_test_3.csv
  - config_name: orm_generation
    data_files:
      - split: native
        path: orm/orm_meaining_generation_native.csv
      - split: english
        path: orm/orm_meaining_generation_english.csv
  - config_name: orm_fill_blank
    data_files:
      - split: train_1
        path: orm/orm_fill_1.csv
      - split: train_2
        path: orm/orm_fill_2.csv
      - split: train_3
        path: orm/orm_fill_3.csv
      - split: valid
        path: orm/orm_fill_valid.csv
  - config_name: tir
    data_files:
      - split: train
        path: tir/tir_all.csv
  - config_name: tir_fill_blank
    data_files:
      - split: train_1
        path: tir/tir_fill_1.csv
      - split: train_2
        path: tir/tir_fill_2.csv
      - split: train_3
        path: tir/tir_fill_3.csv
      - split: valid
        path: tir/tir_fill_valid.csv
  - config_name: tir_generation
    data_files:
      - split: native
        path: tir/tir_meaining_generation_native.csv
      - split: english
        path: tir/tir_meaining_generation_english.csv
  - config_name: tir_choice_english
    data_files:
      - split: english_1
        path: tir/tir_english_test_1.csv
      - split: english_2
        path: tir/tir_english_test_2.csv
      - split: english_3
        path: tir/tir_english_test_3.csv
      - split: english_4
        path: tir/tir_english_test_4.csv
      - split: english_5
        path: tir/tir_english_test_5.csv
  - config_name: tir_choice_native
    data_files:
      - split: native_1
        path: tir/tir_native_test_1.csv
      - split: native_2
        path: tir/tir_native_test_2.csv
      - split: native_3
        path: tir/tir_native_test_3.csv
      - split: native_4
        path: tir/tir_native_test_4.csv
      - split: native_5
        path: tir/tir_native_test_5.csv
  - config_name: translate_tir_choice_english
    data_files:
      - split: english_1
        path: translate-test/tir/tir_english_test_1.csv
      - split: english_2
        path: translate-test/tir/tir_english_test_2.csv
      - split: english_3
        path: translate-test/tir/tir_english_test_3.csv
  - config_name: translate_tir_choice_native
    data_files:
      - split: native_1
        path: translate-test/tir/tir_native_test_1.csv
      - split: native_2
        path: translate-test/tir/tir_native_test_2.csv
      - split: native_3
        path: translate-test/tir/tir_native_test_3.csv

ProverbEval: Benchmark for Evaluating LLMs on Low-Resource Proverbs

This dataset accompanies the paper:
"ProverbEval: Exploring LLM Evaluation Challenges for Low-resource Language Understanding"
ArXiv:2411.05049v3

Dataset Summary

ProverbEval is a culturally grounded evaluation benchmark designed to assess the language understanding abilities of large language models (LLMs) in low-resource settings. It consists of tasks based on proverbs in five languages:

  • Amharic
  • Afaan Oromo
  • Tigrinya
  • Ge’ez
  • English

The benchmark focuses on three tasks:

  • Task 1: Multiple Choice Meaning Matching
  • Task 2: Fill-in-the-Blank
  • Task 3: Proverb Generation

Supported Tasks and Formats

Each task is formatted to support multilingual and cross-lingual evaluation:

Task 1: Meaning Multiple Choice

  • Input: A proverb in a given language.
  • Output: Select one correct meaning from four choices.
  • Format: Multiple-choice question with optional language variants for choices (native or English).

Task 2: Fill-in-the-Blank

  • Input: A proverb with one word removed and four candidate words.
  • Output: Select the most suitable word to complete the proverb.
  • Format: Cloze-style multiple-choice.

Task 3: Generation

  • Input: A detailed description of a proverb in English or the native language.
  • Output: The matching proverb generated in the target language.
  • Format: Text generation.

Languages and Statistics


| Language     | Task 1 | Task 2 | Task 3 |
|--------------|--------|--------|--------|
| Amharic      | 483    | 494    | 484    |
| Afaan Oromo  | 502    | 493    | 502    |
| Tigrinya     | 380    | 503    | 380    |
| Ge’ez        | 434    | 429    | 434    |
| English      | 437    | 462    | 437    |

Note: The dataset focuses on test sets only. Few-shot examples are also included for Task 2.

Data Structure

Each example includes the following fields (depending on the task):

  • language
  • task
  • prompt
  • choices or description
  • answer or target_proverb
  • prompt_type (native or English)
  • choice_language (native or English)

Usage

You can load the dataset directly using the datasets library:

from datasets import load_dataset

# Load the full dataset
dataset = load_dataset("israel/ProverbEval")
.
β”œβ”€β”€ amh
β”‚   β”œβ”€β”€ amharic-fill_test.csv
β”‚   β”œβ”€β”€ amh_english_test_1.csv
β”‚   β”œβ”€β”€ amh_english_test_2.csv
β”‚   β”œβ”€β”€ amh_english_test_3.csv
β”‚   β”œβ”€β”€ amh_fill_1.csv
β”‚   β”œβ”€β”€ amh_fill_2.csv
β”‚   β”œβ”€β”€ amh_fill_3.csv
β”‚   β”œβ”€β”€ amh_meaining_generation_english.csv
β”‚   β”œβ”€β”€ amh_meaining_generation_native.csv
β”‚   β”œβ”€β”€ amh_native_test_1.csv
β”‚   β”œβ”€β”€ amh_native_test_2.csv
β”‚   └── amh_native_test_3.csv
β”œβ”€β”€ eng
β”‚   β”œβ”€β”€ eng_fill_test.csv
β”‚   β”œβ”€β”€ eng_meaining_generation_native.csv
β”‚   β”œβ”€β”€ eng_native_test_1.csv
β”‚   β”œβ”€β”€ eng_native_test_2.csv
β”‚   └── eng_native_test_3.csv
β”œβ”€β”€ geez
β”‚   β”œβ”€β”€ geez_english_test_1.csv
β”‚   β”œβ”€β”€ geez_english_test_2.csv
β”‚   β”œβ”€β”€ geez_english_test_3.csv
β”‚   β”œβ”€β”€ geez_fill_1.csv
β”‚   β”œβ”€β”€ geez_fill_2.csv
β”‚   β”œβ”€β”€ geez_fill_3.csv
β”‚   └── geez_meaining_generation_english.csv
β”œβ”€β”€ orm
β”‚   β”œβ”€β”€ orm_english_test_1.csv
β”‚   β”œβ”€β”€ orm_english_test_2.csv
β”‚   β”œβ”€β”€ orm_english_test_3.csv
β”‚   β”œβ”€β”€ orm_fill_1.csv
β”‚   β”œβ”€β”€ orm_fill_2.csv
β”‚   β”œβ”€β”€ orm_fill_3.csv
β”‚   β”œβ”€β”€ orm_meaining_generation_english.csv
β”‚   β”œβ”€β”€ orm_meaining_generation_native.csv
β”‚   β”œβ”€β”€ orm_native_test_1.csv
β”‚   β”œβ”€β”€ orm_native_test_2.csv
β”‚   β”œβ”€β”€ orm_native_test_3.csv
β”‚   └── oromo_fill_test.csv
└── tir
    β”œβ”€β”€ tir_fill_1.csv
    β”œβ”€β”€ tir_fill_2.csv
    └── tir_fill_3.csv
@article{azime2024proverbeval,
  title={ProverbEval: Exploring LLM Evaluation Challenges for Low-resource Language Understanding},
  author={Azime, Israel Abebe and Tonja, Atnafu Lambebo and Belay, Tadesse Destaw and Chanie, Yonas and Balcha, Bontu Fufa and Abadi, Negasi Haile and Ademtew, Henok Biadglign and Nerea, Mulubrhan Abebe and Yadeta, Debela Desalegn and Geremew, Derartu Dagne and others},
  journal={arXiv preprint arXiv:2411.05049},
  year={2024}
}