import multiprocessing
import pathlib
import re
import string
import traceback
from multiprocessing import Queue

import markdownify
import orjson
import tqdm
from bs4 import BeautifulSoup, Comment, NavigableString, Tag
from markdownify import chomp

if __name__ == "__main__":
    multiprocessing.set_start_method("forkserver")


queue = Queue(maxsize=64)


class WikiConverter(markdownify.MarkdownConverter):
    def convert_a(self, el, text, convert_as_inline):
        prefix, suffix, text = chomp(text)
        if not text:
            return ""
        return "%s%s%s" % (prefix, text, suffix)

    integer_rgx = re.compile("^[0-9]*$")

    @staticmethod
    def is_intable(string: str):
        if not string or not string.isdigit():
            return False
        if WikiConverter.integer_rgx.match(string):
            return True

    def convert_img(self, el, text, convert_as_inline):
        convert_as_inline = True
        if (
            convert_as_inline
            and el.parent.name not in self.options["keep_inline_images_in"]
        ):
            return ""
        return ""

    def convert_li(self, el, text, convert_as_inline):
        parent = el.parent
        if parent is not None and parent.name == "ol":
            start = parent.get("start")
            if start and WikiConverter.is_intable(start.strip()):
                start = int(start.strip())
            else:
                start = 1
            bullet = "%s." % (start + parent.index(el))
        else:
            depth = -1
            while el:
                if el.name == "ul":
                    depth += 1
                el = el.parent
            bullets = self.options["bullets"]
            bullet = bullets[depth % len(bullets)]
        return "%s %s\n" % (bullet, (text or "").strip())


wk = WikiConverter()


def table_filtration(input_soup: BeautifulSoup, title):
    for table in input_soup.select("table"):
        tds = len(table.find_all("td"))
        texsize = len(table.get_text().replace(" ", ""))
        if tds >= texsize and texsize < 50:
            table.decompose()
        if tds > 20:
            # print("Decompose Excessive td")
            table.decompose()
            # print(table.get_text().replace(" ", ""))
            # print("Removing table from", title, ". TD exceeds Content")
    return input_soup


def soup_data(data: BeautifulSoup, title: str):
    soup = data

    # Navigation elements
    [toc.decompose() for toc in soup.find_all("div", attrs={"class": "toc"})]
    [toc.decompose() for toc in soup.select("[class*='nav']")]
    # [toc.decompose() for toc in soup.select(".navbox")]
    # Edit Section
    [element.decompose() for element in soup.select(".mw-editsection")]
    # Remove styles
    [element.decompose() for element in soup.select("style")]

    [element.decompose() for element in soup.select("sup.reference")]
    # star-citizen.wiki.
    [element.decompose() for element in soup.select(".thumbcaption")]
    # Images and pictures are not under licenses typically.
    [toc.decompose() for toc in soup.select("audio")]
    [toc.decompose() for toc in soup.select("picture")]
    [toc.decompose() for toc in soup.select("img")]
    [toc.decompose() for toc in soup.select("[class*='video']")]
    # Blazblue.wiki
    [toc.decompose() for toc in soup.select("[class*='tooltip']")]
    [toc.decompose() for toc in soup.select("video")]

    [
        toc.decompose()
        for toc in soup.select(".no-featured-video .featured-video-player-container")
    ]
    # print(soup)
    cleaned_soup = table_filtration(soup, title)
    composed_data = (
        re.sub(r"\n\s*\n", "\n\n", wk.convert_soup(cleaned_soup))
        .replace("\n |\n|", " |\n")
        .strip()
    )
    return composed_data


raw_puncts = string.punctuation + "{}()[]【】、,゠=…‥。「」『』〝〟"

puncts = str.maketrans("", "", raw_puncts)

cpl = re.compile(r"\n\s*\n")

dbg_q = multiprocessing.Queue()


def debug_queue():
    try:
        with open("debug.txt", "w", encoding="utf-8") as f:
            while True:
                z = dbg_q.get()
                if z is None or z == "None":
                    break
                f.write(z + "\n")
                f.flush()
    except Exception as e:
        print(e)


# Filters based on classes
msgbox = set(["mbox", "notice", "hidden", "plainlinks"])
msgbox2 = set(["mbox", "notice", "stub-box", "plainlinks"])
msgbox3 = set(["notice", "metadata", "plainlinks"])
# Aggressive class filter.
msgbox_agressive = set(
    [
        "mbox-w",
        "mbox",
        "msgbox",
        "notice-container",
        "notice",
        "message-box",
        "boilerplate",
        "ambox",
        "ombox",
    ]
)

wikistub = set(["wikistub"])


def get_text_cleaned(elem):
    return (
        cpl.sub("\n\n", elem.get_text(" "))
        .replace("\n", " ")
        .replace("  ", " ")
        .replace("  ", " ")
        .lower()
    )


def get_plain_text_clean(data: str):
    return (
        cpl.sub("\n\n", data.translate(puncts))
        .replace("\n", " ")
        .replace("  ", " ")
        .replace("  ", " ")
        .lower()
    )


t_stubs = 0
f_stubs = 0

t_lock = multiprocessing.Lock()


def t_inc(idx: int):
    global t_stubs
    with t_lock:
        t_stubs += idx


f_lock = multiprocessing.Lock()


def f_inc(idx: int):
    global f_stubs
    with f_lock:
        f_stubs += idx


def stub_removal(soup: str, debug=None):
    b_soup = BeautifulSoup(soup, "lxml")
    a = get_text_cleaned(b_soup)
    if "this article is a stub" in a:
        parser = b_soup.select_one(".mw-parser-output")
        if parser is None:
            return b_soup, []
        pruned = []
        for child in parser.children:
            if child is None:
                continue
            if isinstance(child, Comment):
                continue
            if isinstance(child, NavigableString):
                # print("Nav string?")
                # print(child)
                # print("===========")
                continue
            if not isinstance(child, Tag):
                # print(type(child))
                continue
            classes = set(i.lower() for i in child.get("class", []))
            styles = child.get("style", "")
            has_border = False
            if styles:
                styles = {
                    i.split(":")[0]: ":".join(i.split(":")[1:])
                    for i in child.get("style", "").split(";")
                }

                has_border = any(
                    [
                        styles.get("border"),
                        styles.get("border-width") and styles.get("border-style"),
                        styles.get("border-top"),
                        styles.get("border-bottom"),
                        styles.get("border-left"),
                        styles.get("border-right"),
                        styles.get("background"),
                    ]
                )
                # print(styles)

            child_text = get_text_cleaned(child)
            has_stub_word = "stub" in child_text and "this article" in child_text
            c_name = "" if not child.name else child.name

            if not has_stub_word:
                continue
            if (
                len(classes.intersection(msgbox)) == len(msgbox)
                or len(classes.intersection(msgbox2)) == len(msgbox2)
                or len(classes.intersection(msgbox3)) == len(msgbox3)
            ):  # Seems to be safe.
                child.decompose()
                pruned.append("mbox")
            elif len(classes.intersection(msgbox_agressive)) > 0:  # Aggressive
                child.decompose()
                # dbg_q.put(f'[I aggressive "mbox3_aggressive" prune]: {debug}')
                pruned.append("mbox3_aggressive")
            elif len(classes.intersection(wikistub)) == 1:  # Seems to be safe.
                child.decompose()
                pruned.append("wikistub[gods-games-we-play]")
            elif "table" in c_name:  # Bit risky, but i guess it works?
                pruned.append("table[stub-word]")
                child.decompose()
            elif "dl" in c_name:  # Seems to be safe.
                if len(child.find_all("dd", recursive=False)) == 1:
                    pruned.append("dl > dd")
                    child.decompose()
            elif "div" in c_name:
                inner_elements = [
                    i for i in child.find_all(recursive=False) if isinstance(i, Tag)
                ]
                if len(inner_elements) == 0:
                    # print(child.find_all(recursive=False))
                    # dbg_q.put(f"[No Inner Element()?]: {len(inner_elements)}")
                    continue
                stub_inner = get_text_cleaned(inner_elements[0])
                has_stub_word = "stub" in stub_inner and "this article"

                if len(inner_elements) == 0 or len(inner_elements) > 2:
                    # dbg_q.put(
                    #     f"[W No Prune len()? has_stub_word]: {debug} {has_stub_word} {len(inner_elements)}"
                    # )
                    continue
                if (
                    inner_elements[0].name
                    and inner_elements[0].name == "table"
                    and has_stub_word
                ):
                    pruned.append("table[stub-word]")
                    child.decompose()
                elif has_border and inner_elements[0].name.lower() in [
                    "div",
                    "p",
                    "span",
                ]:
                    # dbg_q.put(f'[I Risky "has_border" prune]: {debug}')
                    pruned.append("risky[border]")
                else:
                    # dbg_q.put(
                    #     f"[W No Prune div? has_stub_word]: {debug} {has_stub_word} {inner_elements[0].name}"
                    # )
                    pruned.append("?")
            elif "p" in c_name:  # Really risky. will try logging it first.
                child.decompose()
                # dbg_q.put(f'[I Risky "p" prune]: {debug}')
                f_inc(1)
                pruned.append("risky[p]")
            elif "center" in c_name:  # Really risky. will try logging it first.
                # dbg_q.put(f'[I Risky "center" prune]: {debug}')
                child.decompose()
                f_inc(1)
                pruned.append("risky[center]")
        if pruned:
            t_inc(len(pruned))
            # print("Pruned", pruned, debug)
            return b_soup, pruned
        else:
            # dbg_q.put(f"[W No Prune?]: {debug}")
            # print(f"[W No Prune?]: {debug}")
            return b_soup, []

    return b_soup, []


def writer(fp: str, writer_queue: multiprocessing.Queue):
    pbar = tqdm.tqdm()
    try:
        with open(fp, "wb") as fl:
            while True:
                q = writer_queue.get(block=True)
                if q is None or isinstance(q, str) and q == "None":
                    print("Q Empty, exit.")
                    break
                elif isinstance(q, bytes):
                    fl.write(q)
                    fl.write(b"\n")
                pbar.update(1)
    except Exception as e:
        print("Writer Crashed?")
        traceback.print_exception(e)


def mt_fn(bytes_data: bytes, write_queue: multiprocessing.Queue):
    data = orjson.loads(bytes_data)
    dbg = f"domain: {data['domain']} title: {data['page']}"
    if "parse" not in data["content"]:
        print(
            "Missing parse content",
            "domain",
            data["domain"],
            "title",
            data["page"],
        )
        return
    lower_title = data["page"].lower()
    if (
        lower_title.startswith("forum:")
        or lower_title.startswith("discussion:")
        or lower_title.startswith("thread:")
        or lower_title.startswith("comments:")
        or lower_title.startswith("comment:")
    ):
        return
    if lower_title.endswith("gallery"):
        return
    soup = data["content"]["parse"]["text"]["*"]
    cats = ",".join([z["*"] for z in data["content"]["parse"]["categories"]]).lower()
    unstubbed_soup, removed = stub_removal(soup, debug=dbg)
    text = soup_data(unstubbed_soup, data["page"])
    unpunct = get_plain_text_clean(text)
    if len(unpunct) > 64:
        # print(text[:64])
        write_queue.put(
            orjson.dumps(
                {
                    "text": text,
                    "meta": {
                        "title": data["page"],
                        "domain": data["domain"],
                        "cats": cats,
                        "removed": removed,
                    },
                }
            )
        )


def err_handler(e):
    if "KeyboardInterrupt" not in str(e):
        traceback.print_exception(e)


def main():
    with multiprocessing.Pool(processes=64) as pool:
        managed = multiprocessing.Manager()
        writer_out = managed.Queue()
        write_proc = multiprocessing.Process(
            target=writer, args=("test.jsonl", writer_out), daemon=False
        )
        # debug_tasks.start()
        write_proc.start()

        tasks = []
        for file in pathlib.Path("v2.5-chunks-roblox-filter").iterdir():
            with open(file, "rb") as f:
                iter_wrapper = f  # tqdm.tqdm(f)
                for line in iter_wrapper:
                    tasks.append(
                        pool.apply_async(
                            mt_fn, args=(line, writer_out), error_callback=err_handler
                        )
                    )

                    if len(tasks) >= 100000:
                        print("Waiting for Chunked task to complete.")
                        for task in tasks:
                            if task.ready():
                                continue
                            task.wait()
                        tasks = []
            print("[I] ========== Task gen done", file)
        print("Waiting for Chunked task to complete.")
        for task in tasks:
            if task.ready():
                continue
            task.wait()
        print("Cleanup")
        # print("Stubs", t_stubs, "FStubs", f_stubs)
        dbg_q.put("None")
        # debug_tasks.join()
        writer_out.put("None")
        # while not writer_out.empty():
        #     print(f"Waiting for empty writer {writer_out.qsize()} items left")
        #     time.sleep(5)

        write_proc.join()

        pool.close()
        pool.join()


if __name__ == "__main__":
    main()