Spaces:
Runtime error
Runtime error
dung-vpt-uney
commited on
Commit
·
799282e
1
Parent(s):
51b978b
Deploy latest CoRGI Gradio demo
Browse files- PROGRESS_LOG.md +1 -0
- app.py +21 -0
- corgi/__pycache__/gradio_app.cpython-313.pyc +0 -0
- corgi/gradio_app.py +14 -0
PROGRESS_LOG.md
CHANGED
|
@@ -11,6 +11,7 @@
|
|
| 11 |
- Authored a metadata-rich `README.md` (with Hugging Face Space front matter) so the deployed Space renders without configuration errors.
|
| 12 |
- Updated `app.py` to fall back to `demo.queue()` when `concurrency_count` is unsupported, fixing the runtime error seen on Spaces.
|
| 13 |
- Added ZeroGPU support: cached model/processor globals live on CUDA when available, a `@spaces.GPU`-decorated executor handles pipeline runs, and requirements now include the `spaces` SDK.
|
|
|
|
| 14 |
|
| 15 |
## 2024-10-21
|
| 16 |
- Updated default checkpoints to `Qwen/Qwen3-VL-8B-Thinking` and verified CLI/Gradio/test coverage.
|
|
|
|
| 11 |
- Authored a metadata-rich `README.md` (with Hugging Face Space front matter) so the deployed Space renders without configuration errors.
|
| 12 |
- Updated `app.py` to fall back to `demo.queue()` when `concurrency_count` is unsupported, fixing the runtime error seen on Spaces.
|
| 13 |
- Added ZeroGPU support: cached model/processor globals live on CUDA when available, a `@spaces.GPU`-decorated executor handles pipeline runs, and requirements now include the `spaces` SDK.
|
| 14 |
+
- Introduced structured logging for the app (`app.py`) and pipeline execution to trace model loads, cache hits, and Gradio lifecycle events on Spaces.
|
| 15 |
|
| 16 |
## 2024-10-21
|
| 17 |
- Updated default checkpoints to `Qwen/Qwen3-VL-8B-Thinking` and verified CLI/Gradio/test coverage.
|
app.py
CHANGED
|
@@ -1,13 +1,34 @@
|
|
| 1 |
"""Hugging Face Spaces entrypoint for the CoRGI Qwen3-VL demo."""
|
| 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
from corgi.gradio_app import build_demo
|
| 4 |
|
| 5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
demo = build_demo()
|
|
|
|
| 7 |
try: # Gradio >=4.29 supports concurrency_count
|
| 8 |
demo = demo.queue(concurrency_count=1)
|
|
|
|
| 9 |
except TypeError:
|
|
|
|
| 10 |
demo = demo.queue()
|
| 11 |
|
| 12 |
if __name__ == "__main__":
|
|
|
|
| 13 |
demo.launch()
|
|
|
|
| 1 |
"""Hugging Face Spaces entrypoint for the CoRGI Qwen3-VL demo."""
|
| 2 |
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import logging
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
from corgi.gradio_app import build_demo
|
| 9 |
|
| 10 |
|
| 11 |
+
def _configure_logging() -> logging.Logger:
|
| 12 |
+
level = os.getenv("CORGI_LOG_LEVEL", "INFO").upper()
|
| 13 |
+
logging.basicConfig(
|
| 14 |
+
level=getattr(logging, level, logging.INFO),
|
| 15 |
+
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
|
| 16 |
+
)
|
| 17 |
+
return logging.getLogger("corgi.app")
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
logger = _configure_logging()
|
| 21 |
+
logger.info("Initializing Gradio demo build.")
|
| 22 |
+
|
| 23 |
demo = build_demo()
|
| 24 |
+
logger.info("Gradio Blocks created; configuring queue.")
|
| 25 |
try: # Gradio >=4.29 supports concurrency_count
|
| 26 |
demo = demo.queue(concurrency_count=1)
|
| 27 |
+
logger.info("Queue configured with concurrency_count=1.")
|
| 28 |
except TypeError:
|
| 29 |
+
logger.warning("concurrency_count unsupported; falling back to default queue().")
|
| 30 |
demo = demo.queue()
|
| 31 |
|
| 32 |
if __name__ == "__main__":
|
| 33 |
+
logger.info("Launching Gradio demo.")
|
| 34 |
demo.launch()
|
corgi/__pycache__/gradio_app.cpython-313.pyc
CHANGED
|
Binary files a/corgi/__pycache__/gradio_app.cpython-313.pyc and b/corgi/__pycache__/gradio_app.cpython-313.pyc differ
|
|
|
corgi/gradio_app.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
from __future__ import annotations
|
| 2 |
|
|
|
|
| 3 |
from dataclasses import dataclass
|
| 4 |
from typing import Callable, Optional
|
| 5 |
|
|
@@ -23,6 +24,7 @@ class PipelineState:
|
|
| 23 |
|
| 24 |
_PIPELINE_CACHE: dict[str, CoRGIPipeline] = {}
|
| 25 |
_GLOBAL_FACTORY: Callable[[Optional[str]], CoRGIPipeline] | None = None
|
|
|
|
| 26 |
|
| 27 |
|
| 28 |
def _default_factory(model_id: Optional[str]) -> CoRGIPipeline:
|
|
@@ -33,8 +35,11 @@ def _default_factory(model_id: Optional[str]) -> CoRGIPipeline:
|
|
| 33 |
def _get_pipeline(model_id: str, factory: Callable[[Optional[str]], CoRGIPipeline]) -> CoRGIPipeline:
|
| 34 |
pipeline = _PIPELINE_CACHE.get(model_id)
|
| 35 |
if pipeline is None:
|
|
|
|
| 36 |
pipeline = factory(model_id)
|
| 37 |
_PIPELINE_CACHE[model_id] = pipeline
|
|
|
|
|
|
|
| 38 |
return pipeline
|
| 39 |
|
| 40 |
|
|
@@ -47,6 +52,12 @@ def _execute_pipeline(
|
|
| 47 |
) -> PipelineResult:
|
| 48 |
factory = _GLOBAL_FACTORY or _default_factory
|
| 49 |
pipeline = _get_pipeline(model_id, factory)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
return pipeline.run(
|
| 51 |
image=image.convert("RGB"),
|
| 52 |
question=question,
|
|
@@ -65,6 +76,7 @@ if spaces is not None:
|
|
| 65 |
max_regions: int,
|
| 66 |
model_id: str,
|
| 67 |
) -> PipelineResult:
|
|
|
|
| 68 |
return _execute_pipeline(image, question, max_steps, max_regions, model_id)
|
| 69 |
|
| 70 |
else:
|
|
@@ -131,6 +143,7 @@ def _run_pipeline(
|
|
| 131 |
if not question.strip():
|
| 132 |
return state or PipelineState(model_id=model_id or DEFAULT_MODEL_ID, pipeline=None), "Please enter a question before running the demo."
|
| 133 |
target_model = (model_id or DEFAULT_MODEL_ID).strip() or DEFAULT_MODEL_ID
|
|
|
|
| 134 |
result = _execute_pipeline_gpu(
|
| 135 |
image=image.convert("RGB"),
|
| 136 |
question=question.strip(),
|
|
@@ -154,6 +167,7 @@ def build_demo(
|
|
| 154 |
factory = pipeline_factory or _default_factory
|
| 155 |
global _GLOBAL_FACTORY
|
| 156 |
_GLOBAL_FACTORY = factory
|
|
|
|
| 157 |
|
| 158 |
with gr.Blocks(title="CoRGI Qwen3-VL Demo") as demo:
|
| 159 |
state = gr.State() # stores PipelineState
|
|
|
|
| 1 |
from __future__ import annotations
|
| 2 |
|
| 3 |
+
import logging
|
| 4 |
from dataclasses import dataclass
|
| 5 |
from typing import Callable, Optional
|
| 6 |
|
|
|
|
| 24 |
|
| 25 |
_PIPELINE_CACHE: dict[str, CoRGIPipeline] = {}
|
| 26 |
_GLOBAL_FACTORY: Callable[[Optional[str]], CoRGIPipeline] | None = None
|
| 27 |
+
logger = logging.getLogger("corgi.gradio_app")
|
| 28 |
|
| 29 |
|
| 30 |
def _default_factory(model_id: Optional[str]) -> CoRGIPipeline:
|
|
|
|
| 35 |
def _get_pipeline(model_id: str, factory: Callable[[Optional[str]], CoRGIPipeline]) -> CoRGIPipeline:
|
| 36 |
pipeline = _PIPELINE_CACHE.get(model_id)
|
| 37 |
if pipeline is None:
|
| 38 |
+
logger.info("Creating new pipeline for model_id=%s", model_id)
|
| 39 |
pipeline = factory(model_id)
|
| 40 |
_PIPELINE_CACHE[model_id] = pipeline
|
| 41 |
+
else:
|
| 42 |
+
logger.debug("Reusing cached pipeline for model_id=%s", model_id)
|
| 43 |
return pipeline
|
| 44 |
|
| 45 |
|
|
|
|
| 52 |
) -> PipelineResult:
|
| 53 |
factory = _GLOBAL_FACTORY or _default_factory
|
| 54 |
pipeline = _get_pipeline(model_id, factory)
|
| 55 |
+
logger.info(
|
| 56 |
+
"Executing pipeline for model_id=%s | max_steps=%s | max_regions=%s",
|
| 57 |
+
model_id,
|
| 58 |
+
max_steps,
|
| 59 |
+
max_regions,
|
| 60 |
+
)
|
| 61 |
return pipeline.run(
|
| 62 |
image=image.convert("RGB"),
|
| 63 |
question=question,
|
|
|
|
| 76 |
max_regions: int,
|
| 77 |
model_id: str,
|
| 78 |
) -> PipelineResult:
|
| 79 |
+
logger.debug("Running GPU-decorated pipeline.")
|
| 80 |
return _execute_pipeline(image, question, max_steps, max_regions, model_id)
|
| 81 |
|
| 82 |
else:
|
|
|
|
| 143 |
if not question.strip():
|
| 144 |
return state or PipelineState(model_id=model_id or DEFAULT_MODEL_ID, pipeline=None), "Please enter a question before running the demo."
|
| 145 |
target_model = (model_id or DEFAULT_MODEL_ID).strip() or DEFAULT_MODEL_ID
|
| 146 |
+
logger.info("Received request for model_id=%s", target_model)
|
| 147 |
result = _execute_pipeline_gpu(
|
| 148 |
image=image.convert("RGB"),
|
| 149 |
question=question.strip(),
|
|
|
|
| 167 |
factory = pipeline_factory or _default_factory
|
| 168 |
global _GLOBAL_FACTORY
|
| 169 |
_GLOBAL_FACTORY = factory
|
| 170 |
+
logger.info("Registering pipeline factory %s", factory)
|
| 171 |
|
| 172 |
with gr.Blocks(title="CoRGI Qwen3-VL Demo") as demo:
|
| 173 |
state = gr.State() # stores PipelineState
|