Upload nvidia/Dockerfile with huggingface_hub
Browse files- nvidia/Dockerfile +21 -0
nvidia/Dockerfile
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# BUILDER
|
| 2 |
+
FROM ubuntu:22.04
|
| 3 |
+
WORKDIR /builder
|
| 4 |
+
ARG TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX}"
|
| 5 |
+
ARG BUILD_EXTENSIONS="${BUILD_EXTENSIONS:-}"
|
| 6 |
+
ARG APP_UID="${APP_UID:-6972}"
|
| 7 |
+
ARG APP_GID="${APP_GID:-6972}"
|
| 8 |
+
|
| 9 |
+
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked,rw \
|
| 10 |
+
apt update && \
|
| 11 |
+
apt install --no-install-recommends -y git vim build-essential python3-dev pip bash curl bash-completion && \
|
| 12 |
+
rm -rf /var/lib/apt/lists/*
|
| 13 |
+
WORKDIR /home/app/
|
| 14 |
+
RUN git clone https://github.com/oobabooga/text-generation-webui.git
|
| 15 |
+
WORKDIR /home/app/text-generation-webui
|
| 16 |
+
RUN GPU_CHOICE=A USE_CUDA118=FALSE LAUNCH_AFTER_INSTALL=FALSE INSTALL_EXTENSIONS=TRUE ./start_linux.sh --verbose
|
| 17 |
+
COPY CMD_FLAGS.txt /home/app/text-generation-webui/
|
| 18 |
+
EXPOSE ${CONTAINER_PORT:-7860} ${CONTAINER_API_PORT:-5000} ${CONTAINER_API_STREAM_PORT:-5005}
|
| 19 |
+
WORKDIR /home/app/text-generation-webui
|
| 20 |
+
# set umask to ensure group read / write at runtime
|
| 21 |
+
CMD umask 0002 && export HOME=/home/app/text-generation-webui && ./start_linux.sh --listen --model /Users/computer/git/text-generation-webui/models/LLamandementFineTunéWithoutNotation16Q.gguf --loader llama.cpp --verbose
|