black44 commited on
Commit
7054b88
·
verified ·
1 Parent(s): f11bec0

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +20 -13
Dockerfile CHANGED
@@ -1,23 +1,29 @@
1
- # Use the official Python 3.9 slim image
2
- FROM python:3.9-slim
3
 
4
- # Set the working directory
5
- WORKDIR /app
 
 
 
6
 
7
- # Create and configure the cache directory with recursive permissions
8
- RUN mkdir -p /app/.cache && chmod -R 777 /app/.cache
 
 
9
 
10
- # Set environment variables for caching
11
- ENV TRANSFORMERS_CACHE=/app/.cache
12
- ENV HF_HOME=/app/.cache
 
 
13
 
14
- # Install dependencies
15
  COPY requirements.txt .
16
  RUN pip install --no-cache-dir -r requirements.txt
17
 
18
- # Pre-download the model (replace 'suno/bark' with a valid model if needed)
19
- RUN mkdir -p /app/models/suno-bark && \
20
- python -c "from transformers import AutoTokenizer, AutoProcessor, BarkModel; \
21
  tokenizer = AutoTokenizer.from_pretrained('suno/bark'); \
22
  processor = AutoProcessor.from_pretrained('suno/bark'); \
23
  model = BarkModel.from_pretrained('suno/bark'); \
@@ -28,5 +34,6 @@ RUN mkdir -p /app/models/suno-bark && \
28
  # Copy application code
29
  COPY app.py .
30
 
 
31
  EXPOSE 7860
32
  CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
+ # Use official Python 3.10 image with slim variant for smaller size
2
+ FROM python:3.10-slim
3
 
4
+ # Set environment variables
5
+ ENV PYTHONDONTWRITEBYTECODE=1 \
6
+ PYTHONUNBUFFERED=1 \
7
+ HF_HOME=/app/.cache \
8
+ TRANSFORMERS_CACHE=/app/.cache
9
 
10
+ # Create directories and set permissions
11
+ WORKDIR /app
12
+ RUN mkdir -p /app/.cache /app/models && \
13
+ chmod -R 777 /app/.cache /app/models
14
 
15
+ # Install system dependencies for audio processing
16
+ RUN apt-get update && apt-get install -y --no-install-recommends \
17
+ libsndfile1 \
18
+ ffmpeg && \
19
+ rm -rf /var/lib/apt/lists/*
20
 
21
+ # Install Python dependencies
22
  COPY requirements.txt .
23
  RUN pip install --no-cache-dir -r requirements.txt
24
 
25
+ # Pre-download and cache models
26
+ RUN python -c "from transformers import AutoTokenizer, AutoProcessor, BarkModel; \
 
27
  tokenizer = AutoTokenizer.from_pretrained('suno/bark'); \
28
  processor = AutoProcessor.from_pretrained('suno/bark'); \
29
  model = BarkModel.from_pretrained('suno/bark'); \
 
34
  # Copy application code
35
  COPY app.py .
36
 
37
+ # Expose port and run
38
  EXPOSE 7860
39
  CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]