Spaces:
Running
Running
# gunicorn.conf.py | |
import os | |
import multiprocessing | |
# Hugging Face standard port | |
_port = os.environ.get('PORT', '7860') | |
bind = f"0.0.0.0:{_port}" | |
# Worker class for ASGI | |
worker_class = 'uvicorn.workers.UvicornWorker' | |
# Number of workers - Use GUNICORN_WORKERS env var, default to a sensible value | |
# Default to 1 for safety on potentially small free tier instances | |
# You might increase this based on available CPU/Memory: min(multiprocessing.cpu_count() * 2 + 1, 4) | |
workers = int(os.environ.get('GUNICORN_WORKERS', '1')) | |
# Timeout - needs to be longer than the longest expected scrape/summary task | |
# Includes crawl4ai timeout + summary timeout + buffer | |
# Crawl4AI default is 60s, Gemini/OpenRouter can take 90-120s. Let's set to 240s. | |
timeout = 240 # seconds | |
# Optional: Log level (can also be set via env var) | |
# loglevel = os.environ.get('GUNICORN_LOGLEVEL', 'info').lower() | |
# Optional: Reload for development (usually False in production) | |
# reload = os.environ.get('GUNICORN_RELOAD', 'False').lower() == 'true' | |
print(f"Gunicorn config:") | |
print(f" Bind: {bind}") | |
print(f" Workers: {workers}") | |
print(f" Worker Class: {worker_class}") | |
print(f" Timeout: {timeout}s") | |
# print(f" Log Level: {loglevel}") | |
# print(f" Reload: {reload}") |