DeTikZify / app.py
potamides's picture
fix: work around memory leak
026edab
raw
history blame
1.93 kB
from os import execl, getenv
from sys import argv, exception, executable
from textwrap import dedent
import traceback
import gradio as gr
from torch import cuda
from detikzify.webui import BANNER, build_ui, make_light
def is_official_demo():
return getenv("SPACE_AUTHOR_NAME") == "nllg"
# Hack to temporarily work around memory leak, see:
# * https://huggingface.co/spaces/nllg/DeTikZify/discussions/2
# * https://github.com/gradio-app/gradio/issues/8503
def reload_on_oom_hook(func):
def wrapper(*args, **kwargs):
if isinstance(exception(), (MemoryError, cuda.OutOfMemoryError)):
execl(executable, executable, *argv)
return func(*args, **kwargs)
return wrapper
if is_official_demo() and not cuda.is_available():
center = ".gradio-container {text-align: center}"
with gr.Blocks(css=center, theme=make_light(gr.themes.Soft()), title="DeTikZify") as demo:
badge = "https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-xl.svg"
link = "https://huggingface.co/spaces/nllg/DeTikZify?duplicate=true"
html = f'<a style="display:inline-block" href="{link}"> <img src="{badge}" alt="Duplicate this Space"> </a>'
message = dedent("""\
The resources required by our models surpass those provided by Hugging
Face Spaces' free CPU tier. For full functionality, we suggest
duplicating this space using a paid private GPU runtime.
""")
gr.HTML(f'{BANNER}\n<p>{message}</p>\n{html}')
else:
use_big_models = cuda.is_available() and cuda.get_device_properties(0).total_memory > 15835660288
model = f"detikzify-ds-{'7' if use_big_models else '1.3'}b"
demo = build_ui(lock=is_official_demo(), model=model, light=True).queue()
traceback.print_exc = reload_on_oom_hook(traceback.print_exc)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)