open-webui (main)
Published 2024-09-09 14:24:38 +00:00 by Kabooshki
Installation
docker pull gitea.henriserverack.com/archive-team/open-webui:main
sha256:21e2705c4ee6ba760f09847f38c1f7576ad0ff6637087a66defe31392cc9b3cc
About this package
User-friendly WebUI for LLMs (Formerly Ollama WebUI)
Image Layers
ADD file:d13afefcc2b0b02b598a3ac2598fe2187db41de1e17820e5b600a955b1429d59 in / |
CMD ["bash"] |
ENV PATH=/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin |
ENV LANG=C.UTF-8 |
RUN /bin/sh -c set -eux; apt-get update; apt-get install -y --no-install-recommends ca-certificates netbase tzdata ; rm -rf /var/lib/apt/lists/* # buildkit |
ENV GPG_KEY=A035C8C19219BA821ECEA86B64E628F8D684696D |
ENV PYTHON_VERSION=3.11.9 |
RUN /bin/sh -c set -eux; savedAptMark="$(apt-mark showmanual)"; apt-get update; apt-get install -y --no-install-recommends dpkg-dev gcc gnupg libbluetooth-dev libbz2-dev libc6-dev libdb-dev libexpat1-dev libffi-dev libgdbm-dev liblzma-dev libncursesw5-dev libreadline-dev libsqlite3-dev libssl-dev make tk-dev uuid-dev wget xz-utils zlib1g-dev ; wget -O python.tar.xz "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz"; wget -O python.tar.xz.asc "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz.asc"; GNUPGHOME="$(mktemp -d)"; export GNUPGHOME; gpg --batch --keyserver hkps://keys.openpgp.org --recv-keys "$GPG_KEY"; gpg --batch --verify python.tar.xz.asc python.tar.xz; gpgconf --kill all; rm -rf "$GNUPGHOME" python.tar.xz.asc; mkdir -p /usr/src/python; tar --extract --directory /usr/src/python --strip-components=1 --file python.tar.xz; rm python.tar.xz; cd /usr/src/python; gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)"; ./configure --build="$gnuArch" --enable-loadable-sqlite-extensions --enable-optimizations --enable-option-checking=fatal --enable-shared --with-lto --with-system-expat --without-ensurepip ; nproc="$(nproc)"; EXTRA_CFLAGS="$(dpkg-buildflags --get CFLAGS)"; LDFLAGS="$(dpkg-buildflags --get LDFLAGS)"; LDFLAGS="${LDFLAGS:--Wl},--strip-all"; make -j "$nproc" "EXTRA_CFLAGS=${EXTRA_CFLAGS:-}" "LDFLAGS=${LDFLAGS:-}" "PROFILE_TASK=${PROFILE_TASK:-}" ; rm python; make -j "$nproc" "EXTRA_CFLAGS=${EXTRA_CFLAGS:-}" "LDFLAGS=${LDFLAGS:--Wl},-rpath='\$\$ORIGIN/../lib'" "PROFILE_TASK=${PROFILE_TASK:-}" python ; make install; cd /; rm -rf /usr/src/python; find /usr/local -depth \( \( -type d -a \( -name test -o -name tests -o -name idle_test \) \) -o \( -type f -a \( -name '*.pyc' -o -name '*.pyo' -o -name 'libpython*.a' \) \) \) -exec rm -rf '{}' + ; ldconfig; apt-mark auto '.*' > /dev/null; apt-mark manual $savedAptMark; find /usr/local -type f -executable -not \( -name '*tkinter*' \) -exec ldd '{}' ';' | awk '/=>/ { so = $(NF-1); if (index(so, "/usr/local/") == 1) { next }; gsub("^/(usr/)?", "", so); printf "*%s\n", so }' | sort -u | xargs -r dpkg-query --search | cut -d: -f1 | sort -u | xargs -r apt-mark manual ; apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; rm -rf /var/lib/apt/lists/*; python3 --version # buildkit |
RUN /bin/sh -c set -eux; for src in idle3 pydoc3 python3 python3-config; do dst="$(echo "$src" | tr -d 3)"; [ -s "/usr/local/bin/$src" ]; [ ! -e "/usr/local/bin/$dst" ]; ln -svT "$src" "/usr/local/bin/$dst"; done # buildkit |
ENV PYTHON_PIP_VERSION=24.0 |
ENV PYTHON_SETUPTOOLS_VERSION=65.5.1 |
ENV PYTHON_GET_PIP_URL=https://github.com/pypa/get-pip/raw/def4aec84b261b939137dd1c69eff0aabb4a7bf4/public/get-pip.py |
ENV PYTHON_GET_PIP_SHA256=bc37786ec99618416cc0a0ca32833da447f4d91ab51d2c138dd15b7af21e8e9a |
RUN /bin/sh -c set -eux; savedAptMark="$(apt-mark showmanual)"; apt-get update; apt-get install -y --no-install-recommends wget; wget -O get-pip.py "$PYTHON_GET_PIP_URL"; echo "$PYTHON_GET_PIP_SHA256 *get-pip.py" | sha256sum -c -; apt-mark auto '.*' > /dev/null; [ -z "$savedAptMark" ] || apt-mark manual $savedAptMark > /dev/null; apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; rm -rf /var/lib/apt/lists/*; export PYTHONDONTWRITEBYTECODE=1; python get-pip.py --disable-pip-version-check --no-cache-dir --no-compile "pip==$PYTHON_PIP_VERSION" "setuptools==$PYTHON_SETUPTOOLS_VERSION" wheel ; rm -f get-pip.py; pip --version # buildkit |
CMD ["python3"] |
ARG USE_CUDA=false |
ARG USE_OLLAMA=false |
ARG USE_CUDA_VER=cu121 |
ARG USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 |
ARG USE_RERANKING_MODEL= |
ARG UID=0 |
ARG GID=0 |
ENV ENV=prod PORT=8080 USE_OLLAMA_DOCKER=false USE_CUDA_DOCKER=false USE_CUDA_DOCKER_VER=cu121 USE_EMBEDDING_MODEL_DOCKER=sentence-transformers/all-MiniLM-L6-v2 USE_RERANKING_MODEL_DOCKER= |
ENV OLLAMA_BASE_URL=/ollama OPENAI_API_BASE_URL= |
ENV OPENAI_API_KEY= WEBUI_SECRET_KEY= SCARF_NO_ANALYTICS=true DO_NOT_TRACK=true ANONYMIZED_TELEMETRY=false |
ENV WHISPER_MODEL=base WHISPER_MODEL_DIR=/app/backend/data/cache/whisper/models |
ENV RAG_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 RAG_RERANKING_MODEL= SENTENCE_TRANSFORMERS_HOME=/app/backend/data/cache/embedding/models |
ENV HF_HOME=/app/backend/data/cache/embedding/models |
WORKDIR /app/backend |
ENV HOME=/root |
RUN |7 USE_CUDA=false USE_OLLAMA=false USE_CUDA_VER=cu121 USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 USE_RERANKING_MODEL= UID=0 GID=0 /bin/sh -c if [ $UID -ne 0 ]; then if [ $GID -ne 0 ]; then addgroup --gid $GID app; fi; adduser --uid $UID --gid $GID --home $HOME --disabled-password --no-create-home app; fi # buildkit |
RUN |7 USE_CUDA=false USE_OLLAMA=false USE_CUDA_VER=cu121 USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 USE_RERANKING_MODEL= UID=0 GID=0 /bin/sh -c mkdir -p $HOME/.cache/chroma # buildkit |
RUN |7 USE_CUDA=false USE_OLLAMA=false USE_CUDA_VER=cu121 USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 USE_RERANKING_MODEL= UID=0 GID=0 /bin/sh -c echo -n 00000000-0000-0000-0000-000000000000 > $HOME/.cache/chroma/telemetry_user_id # buildkit |
RUN |7 USE_CUDA=false USE_OLLAMA=false USE_CUDA_VER=cu121 USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 USE_RERANKING_MODEL= UID=0 GID=0 /bin/sh -c chown -R $UID:$GID /app $HOME # buildkit |
RUN |7 USE_CUDA=false USE_OLLAMA=false USE_CUDA_VER=cu121 USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 USE_RERANKING_MODEL= UID=0 GID=0 /bin/sh -c if [ "$USE_OLLAMA" = "true" ]; then apt-get update && apt-get install -y --no-install-recommends pandoc netcat-openbsd curl && apt-get install -y --no-install-recommends gcc python3-dev && apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && apt-get install -y --no-install-recommends curl jq && curl -fsSL https://ollama.com/install.sh | sh && rm -rf /var/lib/apt/lists/*; else apt-get update && apt-get install -y --no-install-recommends pandoc gcc netcat-openbsd curl jq && apt-get install -y --no-install-recommends gcc python3-dev && apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && rm -rf /var/lib/apt/lists/*; fi # buildkit |
COPY --chown=0:0 ./backend/requirements.txt ./requirements.txt # buildkit |
RUN |7 USE_CUDA=false USE_OLLAMA=false USE_CUDA_VER=cu121 USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 USE_RERANKING_MODEL= UID=0 GID=0 /bin/sh -c pip3 install uv && if [ "$USE_CUDA" = "true" ]; then pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && uv pip install --system -r requirements.txt --no-cache-dir && python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; else pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && uv pip install --system -r requirements.txt --no-cache-dir && python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; fi; chown -R $UID:$GID /app/backend/data/ # buildkit |
COPY --chown=0:0 /app/build /app/build # buildkit |
COPY --chown=0:0 /app/CHANGELOG.md /app/CHANGELOG.md # buildkit |
COPY --chown=0:0 /app/package.json /app/package.json # buildkit |
COPY --chown=0:0 ./backend . # buildkit |
EXPOSE map[8080/tcp:{}] |
HEALTHCHECK &{["CMD-SHELL" "curl --silent --fail http://localhost:${PORT:-8080}/health | jq -ne 'input.status == true' || exit 1"] "0s" "0s" "0s" "0s" '\x00'} |
USER 0:0 |
ARG BUILD_HASH=e510c8b11f0f898796635cf09aa93a3d72c4c2ee |
ENV WEBUI_BUILD_VERSION=e510c8b11f0f898796635cf09aa93a3d72c4c2ee |
CMD ["bash" "start.sh"] |
Labels
Key | Value |
---|---|
org.opencontainers.image.created | 2024-09-08T00:52:54.876Z |
org.opencontainers.image.description | User-friendly WebUI for LLMs (Formerly Ollama WebUI) |
org.opencontainers.image.licenses | MIT |
org.opencontainers.image.revision | e510c8b11f0f898796635cf09aa93a3d72c4c2ee |
org.opencontainers.image.source | https://github.com/open-webui/open-webui |
org.opencontainers.image.title | open-webui |
org.opencontainers.image.url | https://github.com/open-webui/open-webui |
org.opencontainers.image.version | main |