# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NaijaSenti: A Nigerian Twitter Sentiment Corpus for Multilingual Sentiment Analysis"""



_HOMEPAGE = "https://github.com/hausanlp/NaijaSenti"

_DESCRIPTION = """\
Naija-Stopwords is a part of the Naija-Senti project. It is a list of collected stopwords from the four most widely spoken languages in Nigeria — Hausa, Igbo, Nigerian-Pidgin, and Yorùbá.
"""


_CITATION = """\
@inproceedings{muhammad-etal-2022-naijasenti,
    title = "{N}aija{S}enti: A {N}igerian {T}witter Sentiment Corpus for Multilingual Sentiment Analysis",
    author = "Muhammad, Shamsuddeen Hassan  and
      Adelani, David Ifeoluwa  and
      Ruder, Sebastian  and
      Ahmad, Ibrahim Sa{'}id  and
      Abdulmumin, Idris  and
      Bello, Bello Shehu  and
      Choudhury, Monojit  and
      Emezue, Chris Chinenye  and
      Abdullahi, Saheed Salahudeen  and
      Aremu, Anuoluwapo  and
      Jorge, Al{\'\i}pio  and
      Brazdil, Pavel",
    booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference",
    month = jun,
    year = "2022",
    address = "Marseille, France",
    publisher = "European Language Resources Association",
    url = "https://aclanthology.org/2022.lrec-1.63",
    pages = "590--602",
}
"""


import textwrap
import pandas as pd

import datasets

#LANGUAGES = ['hausa', 'igbo', 'yoruba', 'nigerian_pidgin']

class NaijaStopwordsConfig(datasets.BuilderConfig):
    """BuilderConfig for NaijaStopwords"""

    def __init__(
        self,
        text_features,
        hau_url,
        ibo_url,
        pcm_url,
        yor_url,
        citation,
        **kwargs,
    ):
        """BuilderConfig for NaijaStopwords.

        Args:
          text_features: `dict[string]`, map from the name of the feature
            dict for each text field to the name of the column in the txt/csv/tsv file
          label_column: `string`, name of the column in the txt/csv/tsv file corresponding
            to the label
          label_classes: `list[string]`, the list of classes if the label is categorical
          train_url: `string`, url to train file from
          valid_url: `string`, url to valid file from
          test_url: `string`, url to test file from
          citation: `string`, citation for the data set
          **kwargs: keyword arguments forwarded to super.
        """
        super(NaijaStopwordsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
        self.text_features = text_features
        self.hau_url = hau_url
        self.ibo_url = ibo_url
        self.pcm_url = pcm_url
        self.yor_url = yor_url
        self.citation = citation


class NaijaStopwords(datasets.GeneratorBasedBuilder):
    """NaijaStopwords benchmark"""

    BUILDER_CONFIGS = []

    BUILDER_CONFIGS.append(
        NaijaStopwordsConfig(
            description=textwrap.dedent(
                f"""{_DESCRIPTION}"""
            ),
            text_features={"word": "word"},
            hau_url=f"https://raw.githubusercontent.com/hausanlp/NaijaSenti/main/data/stopwords/hau.csv",
            ibo_url=f"https://raw.githubusercontent.com/hausanlp/NaijaSenti/main/data/stopwords/ibo.csv",
            pcm_url=f"https://raw.githubusercontent.com/hausanlp/NaijaSenti/main/data/stopwords/pcm.csv",
            yor_url=f"https://raw.githubusercontent.com/hausanlp/NaijaSenti/main/data/stopwords/yor.csv",
            citation=textwrap.dedent(
                f"""{_CITATION}"""
            ),
        ),
    )

    def _info(self):
        features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features}

        return datasets.DatasetInfo(
            description=self.config.description,
            features=datasets.Features(features),
            citation=self.config.citation,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        hau_path = dl_manager.download_and_extract(self.config.hau_url)
        ibo_path = dl_manager.download_and_extract(self.config.ibo_url)
        pcm_path = dl_manager.download_and_extract(self.config.pcm_url)
        yor_path = dl_manager.download_and_extract(self.config.yor_url)
        
        return [
            datasets.SplitGenerator(name='hausa', gen_kwargs={"filepath": hau_path}),
            datasets.SplitGenerator(name='igbo', gen_kwargs={"filepath": ibo_path}),
            datasets.SplitGenerator(name='nigerian_pidgin', gen_kwargs={"filepath": pcm_path}),
            datasets.SplitGenerator(name='yoruba', gen_kwargs={"filepath": yor_path})
        ]

    def _generate_examples(self, filepath):
        df = pd.read_csv(filepath)

        print('-'*100)
        print(df.head())
        print('-'*100)

        for id_, row in df.iterrows():
            stopword = row["word"]

            yield id_, {"word": stopword}