unclemusclez commited on
Commit
8c9ed7b
·
verified ·
1 Parent(s): 7afd7f7

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +27 -22
Dockerfile CHANGED
@@ -1,12 +1,9 @@
1
- FROM ollama/ollama:latest
2
  FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04
3
 
4
-
5
-
6
  ENV DEBIAN_FRONTEND=noninteractive
7
  RUN apt-get update && \
8
  apt-get upgrade -y && \
9
- apt-get install -y --no-install-recommends \
10
  git \
11
  git-lfs \
12
  wget \
@@ -28,14 +25,28 @@ RUN apt-get update && \
28
  nvidia-driver-515 \
29
  ffmpeg
30
 
 
31
  ENV USER='user'
32
  RUN useradd -m -u 1000 ${USER}
33
  USER ${USER}
 
 
34
  ENV HOME=/home/${USER} \
35
- PATH=/home/${USER}/.local/bin:${PATH} \
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  OLLAMA_HOST=0.0.0.0
37
- WORKDIR ${HOME}/app
38
- EXPOSE 11434
39
 
40
  RUN curl https://pyenv.run | bash
41
  ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
@@ -45,25 +56,19 @@ RUN pyenv install ${PYTHON_VERSION} && \
45
  pyenv rehash && \
46
  pip install --no-cache-dir -U pip setuptools wheel && \
47
  pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.7" "APScheduler"
48
-
49
- COPY --chown=1000 . ${HOME}/app
50
  RUN git clone https://github.com/ggerganov/llama.cpp
51
  RUN pip install -r llama.cpp/requirements.txt
52
-
53
  COPY groups_merged.txt ${HOME}/app/llama.cpp/
54
 
55
- ENV PYTHONPATH=${HOME}/app \
56
- PYTHONUNBUFFERED=1 \
57
- HF_HUB_ENABLE_HF_TRANSFER=1 \
58
- GRADIO_ALLOW_FLAGGING=never \
59
- GRADIO_NUM_PORTS=1 \
60
- GRADIO_SERVER_NAME=0.0.0.0 \
61
- GRADIO_THEME=huggingface \
62
- TQDM_POSITION=-1 \
63
- TQDM_MININTERVAL=1 \
64
- SYSTEM=spaces \
65
- LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH} \
66
- PATH=/usr/local/nvidia/bin:${PATH}
67
 
 
 
 
 
 
 
 
68
  ENTRYPOINT /bin/sh start.sh
69
 
 
 
1
  FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04
2
 
 
 
3
  ENV DEBIAN_FRONTEND=noninteractive
4
  RUN apt-get update && \
5
  apt-get upgrade -y && \
6
+ apt-get install -y --no-install-recommends ca-certificates \
7
  git \
8
  git-lfs \
9
  wget \
 
25
  nvidia-driver-515 \
26
  ffmpeg
27
 
28
+
29
  ENV USER='user'
30
  RUN useradd -m -u 1000 ${USER}
31
  USER ${USER}
32
+ COPY --chown=1000 . ${HOME}/app
33
+
34
  ENV HOME=/home/${USER} \
35
+ PATH=/home/${USER}/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:${PATH} \
36
+ PYTHONPATH=${HOME}/app \
37
+ PYTHONUNBUFFERED=1 \
38
+ HF_HUB_ENABLE_HF_TRANSFER=1 \
39
+ GRADIO_ALLOW_FLAGGING=never \
40
+ GRADIO_NUM_PORTS=1 \
41
+ GRADIO_SERVER_NAME=0.0.0.0 \
42
+ GRADIO_THEME=huggingface \
43
+ TQDM_POSITION=-1 \
44
+ TQDM_MININTERVAL=1 \
45
+ SYSTEM=spaces \
46
+ LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64:${LD_LIBRARY_PATH} \
47
+ ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility \
48
+ NVIDIA_VISIBLE_DEVICES=all \
49
  OLLAMA_HOST=0.0.0.0
 
 
50
 
51
  RUN curl https://pyenv.run | bash
52
  ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
 
56
  pyenv rehash && \
57
  pip install --no-cache-dir -U pip setuptools wheel && \
58
  pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.7" "APScheduler"
59
+
60
+ COPY --from=build-amd64 /go/src/github.com/ollama/ollama/ollama /bin/ollama
61
  RUN git clone https://github.com/ggerganov/llama.cpp
62
  RUN pip install -r llama.cpp/requirements.txt
 
63
  COPY groups_merged.txt ${HOME}/app/llama.cpp/
64
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
+
67
+
68
+
69
+
70
+
71
+
72
+ # EXPOSE 11434
73
  ENTRYPOINT /bin/sh start.sh
74