unclemusclez commited on
Commit
7b1e862
·
verified ·
1 Parent(s): 0922966

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +10 -9
Dockerfile CHANGED
@@ -26,6 +26,7 @@ RUN apt-get update && \
26
  nvidia-driver-550 \
27
  python3.10 \
28
  python3.10-venv \
 
29
  ffmpeg
30
 
31
  ENV USER='user'
@@ -46,18 +47,18 @@ COPY --chown=1000 . ${APPDIR}
46
  # pyenv global ${PYTHON_VERSION} && \
47
  # pyenv rehash && \
48
 
49
- RUN python3.10 -m venv .venv
50
- RUN . .venv/bin/activate
51
- RUN pip install --no-cache-dir -U pip setuptools wheel && \
52
- pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.7" "APScheduler"
53
- RUN deactivate
54
 
55
  RUN git clone https://github.com/ollama/ollama
56
  RUN git clone https://github.com/ggerganov/llama.cpp
57
  COPY groups_merged.txt ${HOME}/app/llama.cpp/.
58
 
59
- ENV PYTHONPATH=${APPDIR}/.venv/bin \
60
- PYTHONUNBUFFERED=1 \
61
  HF_HUB_ENABLE_HF_TRANSFER=1 \
62
  GRADIO_ALLOW_FLAGGING=never \
63
  GRADIO_NUM_PORTS=1 \
@@ -77,12 +78,12 @@ RUN OLLAMA_CUSTOM_CPU_DEFS="-DGGML_AVX=on -DGGML_AVX2=on -DGGML_F16C=on -DGGML_F
77
  go install . --verbose
78
 
79
  WORKDIR ${APPDIR}/llama.cpp
80
- RUN pip install -r requirements.txt \
81
  LLAMA_CUDA=0 make -j llama-quantize --verbose
82
 
83
  WORKDIR ${APPDIR}
84
 
85
  # EXPOSE map[11434/tcp:{}]
86
  RUN ollama serve --verbose & sleep 5
87
- RUN . .venv/bin/activate
88
  ENTRYPOINT python app.py --verbose
 
26
  nvidia-driver-550 \
27
  python3.10 \
28
  python3.10-venv \
29
+ python-is-python3 \
30
  ffmpeg
31
 
32
  ENV USER='user'
 
47
  # pyenv global ${PYTHON_VERSION} && \
48
  # pyenv rehash && \
49
 
50
+ # RUN python3.10 -m venv .venv
51
+ # RUN . .venv/bin/activate
52
+ RUN python pip install --no-cache-dir -U pip setuptools wheel
53
+ RUN python pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.7" "APScheduler"
54
+ # RUN deactivate
55
 
56
  RUN git clone https://github.com/ollama/ollama
57
  RUN git clone https://github.com/ggerganov/llama.cpp
58
  COPY groups_merged.txt ${HOME}/app/llama.cpp/.
59
 
60
+ # ENV PYTHONPATH=${APPDIR}/.venv/bin \
61
+ ENV PYTHONUNBUFFERED=1 \
62
  HF_HUB_ENABLE_HF_TRANSFER=1 \
63
  GRADIO_ALLOW_FLAGGING=never \
64
  GRADIO_NUM_PORTS=1 \
 
78
  go install . --verbose
79
 
80
  WORKDIR ${APPDIR}/llama.cpp
81
+ RUN python pip install -r requirements.txt \
82
  LLAMA_CUDA=0 make -j llama-quantize --verbose
83
 
84
  WORKDIR ${APPDIR}
85
 
86
  # EXPOSE map[11434/tcp:{}]
87
  RUN ollama serve --verbose & sleep 5
88
+ # RUN . .venv/bin/activate
89
  ENTRYPOINT python app.py --verbose