Spaces:
Sleeping
Sleeping
Commit
·
b26a6dc
1
Parent(s):
5160659
Merge Gradio UI into FastAPI app and standardize port to 7860
Browse files- Dockerfile +1 -3
- app.py +0 -24
- docker-compose.yml +2 -12
- main.py +36 -8
Dockerfile
CHANGED
@@ -21,8 +21,6 @@ ENV HOME=/home/user \
|
|
21 |
|
22 |
# Exposer le port sur lequel l'application va tourner
|
23 |
EXPOSE 7860
|
24 |
-
EXPOSE 8000
|
25 |
-
|
26 |
|
27 |
# RUN --mount=type=secret,id=api_read,mode=0444,required=true \
|
28 |
# cat /run/secrets/api_read > /secret
|
@@ -31,4 +29,4 @@ EXPOSE 8000
|
|
31 |
# git clone $(cat /run/secrets/api_read)
|
32 |
|
33 |
# Commande pour lancer l'application
|
34 |
-
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "
|
|
|
21 |
|
22 |
# Exposer le port sur lequel l'application va tourner
|
23 |
EXPOSE 7860
|
|
|
|
|
24 |
|
25 |
# RUN --mount=type=secret,id=api_read,mode=0444,required=true \
|
26 |
# cat /run/secrets/api_read > /secret
|
|
|
29 |
# git clone $(cat /run/secrets/api_read)
|
30 |
|
31 |
# Commande pour lancer l'application
|
32 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
# Interface Gradio pour afficher la liste des modèles chargés
|
2 |
-
import gradio as gr
|
3 |
-
import asyncio
|
4 |
-
from db.models import fetch_models_for_group
|
5 |
-
from models.loader import load_models, model_pipelines
|
6 |
-
from config.settings import RESOURCE_GROUP
|
7 |
-
|
8 |
-
async def init_models():
|
9 |
-
"""Charger les modèles au démarrage"""
|
10 |
-
models_data = await fetch_models_for_group(RESOURCE_GROUP)
|
11 |
-
await load_models(models_data)
|
12 |
-
|
13 |
-
# Initialisation des modèles
|
14 |
-
asyncio.run(init_models())
|
15 |
-
|
16 |
-
def get_model_list():
|
17 |
-
"""Retourner la liste des modèles chargés"""
|
18 |
-
return list(model_pipelines.keys())
|
19 |
-
|
20 |
-
with gr.Blocks() as demo:
|
21 |
-
demo.title("Tamis AI - Liste des modèles chargés")
|
22 |
-
gr.JSON(get_model_list)
|
23 |
-
|
24 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docker-compose.yml
CHANGED
@@ -3,19 +3,9 @@ version: '3.8'
|
|
3 |
services:
|
4 |
api:
|
5 |
build: .
|
6 |
-
|
7 |
ports:
|
8 |
-
- "
|
9 |
-
volumes:
|
10 |
-
- .:/app
|
11 |
-
env_file:
|
12 |
-
- .env
|
13 |
-
|
14 |
-
ui:
|
15 |
-
build: .
|
16 |
-
command: python app.py
|
17 |
-
ports:
|
18 |
-
- "7860:7860"
|
19 |
volumes:
|
20 |
- .:/app
|
21 |
env_file:
|
|
|
3 |
services:
|
4 |
api:
|
5 |
build: .
|
6 |
+
# Command is now defined in Dockerfile's CMD
|
7 |
ports:
|
8 |
+
- "7860:7860" # Expose Gradio/API port
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
volumes:
|
10 |
- .:/app
|
11 |
env_file:
|
main.py
CHANGED
@@ -1,9 +1,11 @@
|
|
1 |
import logging
|
2 |
from fastapi import FastAPI
|
|
|
|
|
3 |
|
4 |
from api.router import router, verify_api_key
|
5 |
from db.models import fetch_models_for_group
|
6 |
-
from models.loader import load_models
|
7 |
from config.settings import RESOURCE_GROUP
|
8 |
|
9 |
# Configuration de base des logs
|
@@ -26,16 +28,42 @@ app.middleware("http")(verify_api_key)
|
|
26 |
# Inclure les routes
|
27 |
app.include_router(router)
|
28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
@app.on_event("startup")
|
30 |
async def startup():
|
31 |
"""Initialiser l'API : charger les modèles depuis la base de données."""
|
32 |
-
|
33 |
-
|
34 |
-
# Charger les modèles depuis la base de données
|
35 |
-
models_data = await fetch_models_for_group(RESOURCE_GROUP)
|
36 |
-
await load_models(models_data)
|
37 |
-
|
38 |
-
logger.info("API initialization complete.")
|
39 |
|
40 |
@app.get("/health")
|
41 |
async def health_check():
|
|
|
1 |
import logging
|
2 |
from fastapi import FastAPI
|
3 |
+
import gradio as gr
|
4 |
+
from gradio.routes import mount_gradio_app
|
5 |
|
6 |
from api.router import router, verify_api_key
|
7 |
from db.models import fetch_models_for_group
|
8 |
+
from models.loader import load_models, model_pipelines
|
9 |
from config.settings import RESOURCE_GROUP
|
10 |
|
11 |
# Configuration de base des logs
|
|
|
28 |
# Inclure les routes
|
29 |
app.include_router(router)
|
30 |
|
31 |
+
async def init_models():
|
32 |
+
"""Charger les modèles au démarrage pour Gradio et FastAPI."""
|
33 |
+
logger.info("Initializing models for Gradio and FastAPI...")
|
34 |
+
try:
|
35 |
+
models_data = await fetch_models_for_group(RESOURCE_GROUP)
|
36 |
+
await load_models(models_data)
|
37 |
+
logger.info("Models loaded successfully.")
|
38 |
+
except Exception as e:
|
39 |
+
logger.error(f"Failed to initialize models: {e}", exc_info=True)
|
40 |
+
# Decide if the app should fail to start or continue without models
|
41 |
+
# raise RuntimeError("Model initialization failed.")
|
42 |
+
# For now, let's log and continue, Gradio will show an empty list
|
43 |
+
pass
|
44 |
+
|
45 |
+
# Définir la fonction pour Gradio qui récupère les modèles chargés
|
46 |
+
def get_loaded_models_list():
|
47 |
+
"""Retourne la liste des noms de modèles actuellement chargés."""
|
48 |
+
return list(model_pipelines.keys())
|
49 |
+
|
50 |
+
# Créer l'interface Gradio
|
51 |
+
gradio_app = gr.Blocks(title="Tamis AI - Modèles Chargés")
|
52 |
+
with gradio_app:
|
53 |
+
gr.Markdown("## Modèles actuellement chargés dans l'API")
|
54 |
+
gr.JSON(get_loaded_models_list, label="Modèles Chargés")
|
55 |
+
|
56 |
+
# Monter l'application Gradio à la racine dans FastAPI
|
57 |
+
app = mount_gradio_app(
|
58 |
+
app, gradio_app, path="/"
|
59 |
+
)
|
60 |
+
|
61 |
+
# Event startup to load models (ensure it runs *after* Gradio is mounted if needed)
|
62 |
+
# We call init_models inside startup
|
63 |
@app.on_event("startup")
|
64 |
async def startup():
|
65 |
"""Initialiser l'API : charger les modèles depuis la base de données."""
|
66 |
+
await init_models() # Call the consolidated init function
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
@app.get("/health")
|
69 |
async def health_check():
|