ollamafy / Dockerfile
unclemusclez's picture
Update Dockerfile
d1bb23f verified
raw
history blame
2.85 kB
FROM nvidia/cuda:12.4.1-cudnn-devel-ubuntu22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y --no-install-recommends ca-certificates \
git \
git-lfs \
wget \
curl \
# python build dependencies \
build-essential \
libssl-dev \
zlib1g-dev \
libbz2-dev \
libreadline-dev \
libsqlite3-dev \
libncursesw5-dev \
xz-utils \
tk-dev \
libxml2-dev \
libxmlsec1-dev \
libffi-dev \
liblzma-dev \
nvidia-driver-550 \
python3.10 \
python3.10-venv \
python3-pip \
python-is-python3 \
ffmpeg\
software-properties-common
RUN add-apt-repository ppa:longsleep/golang-backports \
apt-get update && \
apt-get upgrade -y && \
apt-get install -y --no-install-recommends ca-certificates \
golang-go
ENV USER='user'
RUN useradd -m -u 1000 ${USER}
USER ${USER}
ENV HOME=/home/${USER} \
PATH=${HOME}/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:${PATH} \
APPDIR=${HOME}/app
WORKDIR ${APPDIR}
COPY --chown=1000 . ${APPDIR}
# ENV NVIDIA_VISIBLE_DEVICES=all
# RUN curl https://pyenv.run | bash
# ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
# ARG PYTHON_VERSION=3.10.13
# RUN pyenv install ${PYTHON_VERSION} && \
# pyenv global ${PYTHON_VERSION} && \
# pyenv rehash && \
# RUN python3.10 -m venv .venv
# RUN . .venv/bin/activate
RUN python -m pip install --no-cache-dir -U pip setuptools wheel
RUN python -m pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.7" "APScheduler"
# RUN deactivate
# RUN go install golang.org/x/tools/gopls@latest
RUN git clone https://github.com/ollama/ollama
WORKDIR ${APPDIR}/ollama
RUN OLLAMA_CUSTOM_CPU_DEFS="-DGGML_AVX=on -DGGML_AVX2=on -DGGML_F16C=on -DGGML_FMA=on" go generate ./... --verbose \
go build . --verbose \
go install . --verbose
RUN git clone https://github.com/ggerganov/llama.cpp
COPY groups_merged.txt ${HOME}/app/llama.cpp/.
WORKDIR ${APPDIR}/llama.cpp
RUN python -m pip install -r requirements.txt \
LLAMA_CUDA=0 make -j llama-quantize --verbose
# ENV PYTHONPATH=${APPDIR}/.venv/bin \
ENV PYTHONUNBUFFERED=1 \
HF_HUB_ENABLE_HF_TRANSFER=1 \
GRADIO_ALLOW_FLAGGING=never \
GRADIO_NUM_PORTS=1 \
GRADIO_SERVER_NAME=0.0.0.0 \
GRADIO_THEME=huggingface \
TQDM_POSITION=-1 \
TQDM_MININTERVAL=1 \
SYSTEM=spaces \
LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64:${LD_LIBRARY_PATH} \
NVIDIA_DRIVER_CAPABILITIES=compute,utility \
NVIDIA_VISIBLE_DEVICES=all \
OLLAMA_HOST=0.0.0.0
WORKDIR ${APPDIR}
# EXPOSE map[11434/tcp:{}]
RUN ollama serve --verbose & sleep 5
# RUN . .venv/bin/activate
ENTRYPOINT python app.py --verbose