import gradio as gr
import torch
import uuid
from mario_gpt.dataset import MarioDataset
from mario_gpt.prompter import Prompter
from mario_gpt.lm import MarioLM
from mario_gpt.utils import view_level, convert_level_to_png

import os
import subprocess

from pyngrok import ngrok

mario_lm = MarioLM()
device = torch.device('cuda')
mario_lm = mario_lm.to(device)
TILE_DIR = "data/tiles"

subprocess.Popen(["python3","-m","http.server","7861"])
ngrok.set_auth_token(os.environ.get('NGROK_TOKEN'))
http_tunnel = ngrok.connect(7861,bind_tls=True)

def make_html_file(generated_level):
    level_text = f"""{'''
'''.join(view_level(generated_level,mario_lm.tokenizer))}"""
    unique_id = uuid.uuid1()
    with open(f"demo-{unique_id}.html", 'w', encoding='utf-8') as f:
        f.write(f'''<!DOCTYPE html>
<html lang="en">

<head>
    <meta charset="utf-8">
    <title>Mario Game</title>
    <script src="https://cjrtnc.leaningtech.com/20230216/loader.js"></script>
</head>

<body>
</body>
<script>
    cheerpjInit().then(function () {{
        cheerpjAddStringFile("/str/mylevel.txt", `{level_text}`);
    }});
    cheerpjCreateDisplay(512, 500);
    cheerpjRunJar("/app/mario.jar");
</script>
</html>''')
    return f"demo-{unique_id}.html"

def generate(pipes, enemies, blocks, elevation, temperature = 2.0, level_size = 1399, prompt = ""):
    if prompt == "":
        prompt = f"{pipes} pipes, {enemies} enemies, {blocks} blocks, {elevation} elevation"
    print(f"Using prompt: {prompt}")
    prompts = [prompt]
    generated_level = mario_lm.sample(
        prompts=prompts,
        num_steps=level_size,
        temperature=temperature,
        use_tqdm=True
    )
    filename = make_html_file(generated_level)
    img = convert_level_to_png(generated_level.squeeze(), TILE_DIR, mario_lm.tokenizer)[0]
    
    gradio_html = f'''<div style="border: 2px solid;">
        <iframe width=512 height=512 style="margin: 0 auto" src="{http_tunnel.public_url}/{filename}"></iframe>
        <p style="text-align:center">Press the arrow keys to move. Press <code>s</code> to jump and <code>a</code> to shoot flowers</p>
    </div>'''
    return [img, gradio_html]

with gr.Blocks() as demo:
    gr.Markdown('''### Playable demo for MarioGPT: Open-Ended Text2Level Generation through Large Language Models
    [[Github](https://github.com/shyamsn97/mario-gpt)], [[Paper](https://arxiv.org/abs/2302.05981)]
    ''')
    with gr.Tabs():
        with gr.TabItem("Type prompt"):
            text_prompt = gr.Textbox(value="", label="Enter your MarioGPT prompt. ex: 'many pipes, many enemies, some blocks, low elevation'")
        with gr.TabItem("Compose prompt"):
            with gr.Row():
                pipes = gr.Radio(["no", "little", "some", "many"], label="pipes")
                enemies = gr.Radio(["no", "little", "some", "many"], label="enemies")
            with gr.Row():
                blocks = gr.Radio(["little", "some", "many"], label="blocks")
                elevation = gr.Radio(["low", "high"], label="elevation")
            
    with gr.Accordion(label="Advanced settings", open=False):
        temperature = gr.Number(value=2.0, label="temperature: Increase these for more diverse, but lower quality, generations")
        level_size = gr.Number(value=1399, precision=0, label="level_size")
    
    btn = gr.Button("Generate level")
    with gr.Row():
        with gr.Box():
            level_play = gr.HTML()    
        level_image = gr.Image()
    btn.click(fn=generate, inputs=[pipes, enemies, blocks, elevation, temperature, level_size, text_prompt], outputs=[level_image, level_play])
    gr.Examples(
        examples=[
            ["many", "many", "some", "high"],
            ["no", "some", "many", "high", 2.0],
            ["many", "many", "little", "low", 2.0],
            ["no", "no", "many", "high", 2.4],
        ],
        inputs=[pipes, enemies, blocks, elevation],
        outputs=[level_image, level_play],
        fn=generate,
        cache_examples=True,
    )
demo.launch()