import json
import os

import datasets



_CITATION = """\
@InProceedings{huggingface:dataset,
title = {Ember2018},
author=Christian Williams
},
year={2023}
}
"""

_DESCRIPTION = """\
This dataset is from the EMBER 2018 Malware Analysis dataset
"""
_HOMEPAGE = "https://github.com/elastic/ember"
_LICENSE = ""
_URLS = {
    "text_classification": "https://huggingface.co/datasets/cw1521/ember2018-malware/blob/main/data/"
}


class EMBERConfig(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.1.0")
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="text_classification", 
            version=VERSION, description="This part of my dataset covers text classification"
        )
    ]

    DEFAULT_CONFIG_NAME = "text_classification" 

    def _info(self):
        if self.config.name == "text_classification": 
            features = datasets.Features(
                {
                    "input": datasets.Value("string"),
                    "label": datasets.Value("string"),
                    "x": datasets.features.Sequence(
                            datasets.Value("float32")
                    ),
                    "y": datasets.Value("string"),
                    "appeared": datasets.Value("string"),
                    "avclass": datasets.Value("string"),
                    "subset": datasets.Value("string"),
                    "sha256": datasets.Value("string")
                }
            )
        else: 
            features = datasets.Features(
                {
                    "input": datasets.Value("string"),
                    "label": datasets.Value("string"),
                    "x": datasets.features.Sequence(
                            datasets.Value("float32")
                    ),
                    "y": datasets.Value("string"),
                    "appeared": datasets.Value("string"),
                    "avclass": datasets.Value("string"),
                    "subset": datasets.Value("string"),
                    "sha256": datasets.Value("string")
                }
            )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features, 
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )
        
    def _split_generators(self, dl_manager):
        urls = _URLS[self.config.name]
        data_dir = dl_manager.download_and_extract(urls)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepaths": os.path.join(data_dir, "ember2018_train_*.jsonl"),
                    "split": "train",
                },
            ),
            # datasets.SplitGenerator(
            #     name=datasets.Split.VALIDATION,
            #     gen_kwargs={
            #         "filepaths": os.path.join(data_dir, "*_valid_*.jsonl"),
            #         "split": "valid",
            #     },
            # ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "filepaths": os.path.join(data_dir, "ember2018_test_*.jsonl"),
                    "split": "test"
                },
            )
        ]


    def _generate_examples(self, filepaths, split):
        key = 0
        for id, filepath in enumerate(filepaths[split]): 
            key += 1
            with open(filepath[id], encoding="utf-8") as f:
                data_list = json.load(f)
                for data in data_list:
                    if self.config.name == "text_classification":
                        data.remove
                        yield key, {
                            "input": data["input"],
                            "label": data["label"],
                            # "x": data["x"],
                            # "y": data["y"],
                            # "appeared": data["appeared"],
                            # "avclass": data["avclass"],
                            # "subset": data["subset"],
                            # "sha256": data["sha256"]
                        }
                    else:
                        yield key, {
                            "input": data["input"],
                            "label": data["label"],
                            "x": data["x"],
                            "y": data["y"],
                            "appeared": data["appeared"],
                            "avclass": data["avclass"],
                            "subset": data["subset"],
                            "sha256": data["sha256"]
                        }