|
| 1 | +# syntax=docker/dockerfile:1.4 |
| 2 | + |
| 3 | +#### Web UI ------------------------------------ |
| 4 | + |
| 5 | +FROM docker.io/node:22-slim AS web-builder |
| 6 | +ENV PNPM_HOME="/pnpm" |
| 7 | +ENV PATH="$PNPM_HOME:$PATH" |
| 8 | +RUN corepack use [email protected] |
| 9 | +RUN corepack enable |
| 10 | + |
| 11 | +WORKDIR /build |
| 12 | +COPY invokeai/frontend/web/ ./ |
| 13 | +RUN --mount=type=cache,target=/pnpm/store \ |
| 14 | + pnpm install --frozen-lockfile |
| 15 | +RUN npx vite build |
| 16 | + |
| 17 | +## Backend --------------------------------------- |
| 18 | + |
| 19 | +FROM library/ubuntu:24.04 |
| 20 | + |
| 21 | +ARG DEBIAN_FRONTEND=noninteractive |
| 22 | +RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache |
| 23 | +RUN --mount=type=cache,target=/var/cache/apt \ |
| 24 | + --mount=type=cache,target=/var/lib/apt \ |
| 25 | + apt update && apt install -y --no-install-recommends \ |
| 26 | + ca-certificates \ |
| 27 | + git \ |
| 28 | + gosu \ |
| 29 | + libglib2.0-0 \ |
| 30 | + libgl1 \ |
| 31 | + libglx-mesa0 \ |
| 32 | + build-essential \ |
| 33 | + libopencv-dev \ |
| 34 | + libstdc++-10-dev \ |
| 35 | + wget |
| 36 | + |
| 37 | +ENV \ |
| 38 | + PYTHONUNBUFFERED=1 \ |
| 39 | + PYTHONDONTWRITEBYTECODE=1 \ |
| 40 | + VIRTUAL_ENV=/opt/venv \ |
| 41 | + INVOKEAI_SRC=/opt/invokeai \ |
| 42 | + PYTHON_VERSION=3.12 \ |
| 43 | + UV_PYTHON=3.12 \ |
| 44 | + UV_COMPILE_BYTECODE=1 \ |
| 45 | + UV_MANAGED_PYTHON=1 \ |
| 46 | + UV_LINK_MODE=copy \ |
| 47 | + UV_PROJECT_ENVIRONMENT=/opt/venv \ |
| 48 | + INVOKEAI_ROOT=/invokeai \ |
| 49 | + INVOKEAI_HOST=0.0.0.0 \ |
| 50 | + INVOKEAI_PORT=9090 \ |
| 51 | + PATH="/opt/venv/bin:$PATH" \ |
| 52 | + CONTAINER_UID=${CONTAINER_UID:-1000} \ |
| 53 | + CONTAINER_GID=${CONTAINER_GID:-1000} |
| 54 | + |
| 55 | +ARG GPU_DRIVER=cuda |
| 56 | + |
| 57 | +# Install `uv` for package management |
| 58 | +COPY --from=ghcr.io/astral-sh/uv:0.6.9 /uv /uvx /bin/ |
| 59 | + |
| 60 | +# Install python & allow non-root user to use it by traversing the /root dir without read permissions |
| 61 | +RUN --mount=type=cache,target=/root/.cache/uv \ |
| 62 | + uv python install ${PYTHON_VERSION} && \ |
| 63 | + # chmod --recursive a+rX /root/.local/share/uv/python |
| 64 | + chmod 711 /root |
| 65 | + |
| 66 | +WORKDIR ${INVOKEAI_SRC} |
| 67 | + |
| 68 | +# Install project's dependencies as a separate layer so they aren't rebuilt every commit. |
| 69 | +# bind-mount instead of copy to defer adding sources to the image until next layer. |
| 70 | +# |
| 71 | +# NOTE: there are no pytorch builds for arm64 + cuda, only cpu |
| 72 | +# x86_64/CUDA is the default |
| 73 | +RUN --mount=type=cache,target=/root/.cache/uv \ |
| 74 | + --mount=type=bind,source=pyproject.toml,target=pyproject.toml \ |
| 75 | + --mount=type=bind,source=uv.lock,target=uv.lock \ |
| 76 | + # this is just to get the package manager to recognize that the project exists, without making changes to the docker layer |
| 77 | + --mount=type=bind,source=invokeai/version,target=invokeai/version \ |
| 78 | + ulimit -n 30000 && \ |
| 79 | + uv sync --extra $GPU_DRIVER --frozen |
| 80 | + |
| 81 | +RUN --mount=type=cache,target=/var/cache/apt \ |
| 82 | + --mount=type=cache,target=/var/lib/apt \ |
| 83 | + if [ "$GPU_DRIVER" = "rocm" ]; then \ |
| 84 | + wget -O /tmp/amdgpu-install.deb \ |
| 85 | + https://repo.radeon.com/amdgpu-install/6.3.4/ubuntu/noble/amdgpu-install_6.3.60304-1_all.deb && \ |
| 86 | + apt install -y /tmp/amdgpu-install.deb && \ |
| 87 | + apt update && \ |
| 88 | + amdgpu-install --usecase=rocm -y && \ |
| 89 | + apt-get autoclean && \ |
| 90 | + apt clean && \ |
| 91 | + rm -rf /tmp/* /var/tmp/* && \ |
| 92 | + usermod -a -G render ubuntu && \ |
| 93 | + usermod -a -G video ubuntu && \ |
| 94 | + echo "\\n/opt/rocm/lib\\n/opt/rocm/lib64" >> /etc/ld.so.conf.d/rocm.conf && \ |
| 95 | + ldconfig && \ |
| 96 | + update-alternatives --auto rocm; \ |
| 97 | + fi |
| 98 | + |
| 99 | +## Heathen711: Leaving this for review input, will remove before merge |
| 100 | +# RUN --mount=type=cache,target=/var/cache/apt \ |
| 101 | +# --mount=type=cache,target=/var/lib/apt \ |
| 102 | +# if [ "$GPU_DRIVER" = "rocm" ]; then \ |
| 103 | +# groupadd render && \ |
| 104 | +# usermod -a -G render ubuntu && \ |
| 105 | +# usermod -a -G video ubuntu; \ |
| 106 | +# fi |
| 107 | + |
| 108 | +## Link amdgpu.ids for ROCm builds |
| 109 | +## contributed by https://github.com/Rubonnek |
| 110 | +# RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\ |
| 111 | +# ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids" |
| 112 | + |
| 113 | +# build patchmatch |
| 114 | +RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc |
| 115 | +RUN python -c "from patchmatch import patch_match" |
| 116 | + |
| 117 | +RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT} |
| 118 | + |
| 119 | +COPY docker/docker-entrypoint.sh ./ |
| 120 | +ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"] |
| 121 | +CMD ["invokeai-web"] |
| 122 | + |
| 123 | +# --link requires buldkit w/ dockerfile syntax 1.4, does not work with podman |
| 124 | +COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist |
| 125 | + |
| 126 | +# add sources last to minimize image changes on code changes |
| 127 | +COPY invokeai ${INVOKEAI_SRC}/invokeai |
| 128 | + |
| 129 | +# this should not increase image size because we've already installed dependencies |
| 130 | +# in a previous layer |
| 131 | +RUN --mount=type=cache,target=/root/.cache/uv \ |
| 132 | + --mount=type=bind,source=pyproject.toml,target=pyproject.toml \ |
| 133 | + --mount=type=bind,source=uv.lock,target=uv.lock \ |
| 134 | + ulimit -n 30000 && \ |
| 135 | + uv pip install -e .[$GPU_DRIVER] |
| 136 | + |
0 commit comments