LEGAR_BENCH / LEGAR_BENCH.py
Chaeeun-Kim's picture
Update LEGAR_BENCH.py
c53c815 verified
import json
import datasets
_DESCRIPTION = """\
LEGAR_BENCH is the first large-scale Korean LCR benchmark, covering 411 diverse crime types in queries over 1.2M legal cases.
"""
_HOMEPAGE = "https://huggingface.co/datasets/Chaeeun-Kim/LEGAR_BENCH"
_LICENSE = "Apache 2.0"
_URLS = {
"standard": "data/standard_train.jsonl",
"stricter": "data/stricter_train.jsonl",
"stricter_by_difficulty": "data/stricter_by_difficulty_train.jsonl",
}
class LegarBench(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="standard",
version=VERSION,
description="Standard version of LEGAR BENCH",
),
datasets.BuilderConfig(
name="stricter",
version=VERSION,
description="Stricter version of LEGAR BENCH",
),
datasets.BuilderConfig(
name="stricter_by_difficulty",
version=VERSION,
description="Stricter version organized by difficulty",
),
]
DEFAULT_CONFIG_NAME = "standard"
def _info(self):
features = datasets.Features({
"id": datasets.Value("int64"),
"target_category": datasets.Value("string"),
"category": datasets.Value("string"),
"question": datasets.Value("string"),
"question_id": datasets.Value("string"),
"answer": datasets.Sequence(datasets.Value("string")),
"evidence_id": datasets.Sequence(datasets.Value("string")),
"difficulty": datasets.Value("string"),
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
url = _URLS[self.config.name]
data_file = dl_manager.download(url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_file,
},
),
]
def _generate_examples(self, filepath):
with open(filepath, encoding="utf-8") as f:
for key, line in enumerate(f):
data = json.loads(line)
yield key, {
"id": int(data.get("id", 0)),
"target_category": str(data.get("target_category", "")),
"category": self._process_category(data.get("category", {})),
"question": str(data.get("question", "")),
"question_id": str(data.get("question_id", "")),
"answer": self._process_list_field(data.get("answer", [])),
"evidence_id": self._process_list_field(data.get("evidence_id", [])),
"difficulty": str(data.get("difficulty", "")),
}
def _process_category(self, category):
if isinstance(category, dict):
return json.dumps(category, ensure_ascii=False)
return str(category)
def _process_list_field(self, field):
if field is None:
return []
return [str(item) if item is not None else "" for item in field]