File size: 1,241 Bytes
429a61a
6af529b
fe9e418
6af529b
fe9e418
 
 
 
 
6af529b
fe9e418
 
 
 
afee9b9
6af529b
fe9e418
 
 
 
 
 
 
 
 
 
9f2782c
fe9e418
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# gunicorn.conf.py
import os
import multiprocessing

# Hugging Face standard port
_port = os.environ.get('PORT', '7860')
bind = f"0.0.0.0:{_port}"

# Worker class for ASGI
worker_class = 'uvicorn.workers.UvicornWorker'

# Number of workers - Use GUNICORN_WORKERS env var, default to a sensible value
# Default to 1 for safety on potentially small free tier instances
# You might increase this based on available CPU/Memory: min(multiprocessing.cpu_count() * 2 + 1, 4)
workers = int(os.environ.get('GUNICORN_WORKERS', '1'))

# Timeout - needs to be longer than the longest expected scrape/summary task
# Includes crawl4ai timeout + summary timeout + buffer
# Crawl4AI default is 60s, Gemini/OpenRouter can take 90-120s. Let's set to 240s.
timeout = 240 # seconds

# Optional: Log level (can also be set via env var)
# loglevel = os.environ.get('GUNICORN_LOGLEVEL', 'info').lower()

# Optional: Reload for development (usually False in production)
# reload = os.environ.get('GUNICORN_RELOAD', 'False').lower() == 'true'

print(f"Gunicorn config:")
print(f"  Bind: {bind}")
print(f"  Workers: {workers}")
print(f"  Worker Class: {worker_class}")
print(f"  Timeout: {timeout}s")
# print(f"  Log Level: {loglevel}")
# print(f"  Reload: {reload}")