├── .dockerignore ├── .gitignore ├── Dockerfile ├── ubuntu22_cuda12.2.2.Dockerfile ├── ubuntu22_cuda12.3.2.Dockerfile ├── ubuntu22_cuda12.4.1.Dockerfile ├── ubuntu24_cuda12.5.1.Dockerfile ├── ubuntu24_cuda12.6.3.Dockerfile ├── ubuntu24_cuda12.8.Dockerfile └── ubuntu24_cuda12.9.Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── assets ├── FirstRun.png ├── Flux1Dev-run.png ├── ImportFailed-TryFix.png ├── Unraid_CA-ComfyUI-Nvidia-Docker.png ├── flux1-dev-lora.png └── flux1-schnell-lora.png ├── components ├── base-ubuntu22_cuda12.2.2.Dockerfile ├── base-ubuntu22_cuda12.3.2.Dockerfile ├── base-ubuntu22_cuda12.4.1.Dockerfile ├── base-ubuntu24_cuda12.5.1.Dockerfile ├── base-ubuntu24_cuda12.6.3.Dockerfile ├── base-ubuntu24_cuda12.8.Dockerfile ├── base-ubuntu24_cuda12.9.Dockerfile └── part1-common.Dockerfile ├── config.sh ├── extras ├── FAQ.md └── PyTorch2.7-CUDA12.8.sh ├── init.bash ├── userscripts_dir ├── 00-nvidiaDev.sh ├── 10-pip3Dev.sh ├── 20-SageAttention.sh ├── 21-Triton.sh ├── 25-HiDream.sh └── README.md └── workflow ├── ComfyUI-Flux1Dev-ExtendedWorkflow.png ├── ComfyUI_flux1dev-lora.png ├── ComfyUI_flux1schnell-lora.png └── HappyBirthdayComfy-AtomixFlux.png /.dockerignore: -------------------------------------------------------------------------------- 1 | assets/ 2 | components/ 3 | workflow/ 4 | 5 | userscripts_dir/ 6 | extras/ 7 | 8 | basedir/ 9 | run/ 10 | 11 | LICENSE 12 | README.md 13 | Makefile 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.log 2 | *.log.temp 3 | *.cmd 4 | 5 | # Mac cache 6 | .DS_Store 7 | 8 | # version grab 9 | .comfyui_version 10 | 11 | # generation directories 12 | data/* 13 | HF/* 14 | models/* 15 | user/* 16 | 17 | run/* 18 | basedir/* -------------------------------------------------------------------------------- /Dockerfile/ubuntu22_cuda12.2.2.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.2.2-cudnn8-devel-ubuntu22.04 2 | ARG BASE_DOCKER_FROM=nvidia/cuda:12.2.2-cudnn8-devel-ubuntu22.04 3 | 4 | ##### Base 5 | 6 | # Install system packages 7 | ENV DEBIAN_FRONTEND=noninteractive 8 | RUN apt-get update -y --fix-missing\ 9 | && apt-get install -y \ 10 | apt-utils \ 11 | locales \ 12 | ca-certificates \ 13 | && apt-get upgrade -y \ 14 | && apt-get clean 15 | 16 | # UTF-8 17 | RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 18 | ENV LANG=en_US.utf8 19 | ENV LC_ALL=C 20 | 21 | # Install needed packages 22 | RUN apt-get update -y --fix-missing \ 23 | && apt-get upgrade -y \ 24 | && apt-get install -y \ 25 | build-essential \ 26 | python3-dev \ 27 | unzip \ 28 | wget \ 29 | zip \ 30 | zlib1g \ 31 | zlib1g-dev \ 32 | gnupg \ 33 | rsync \ 34 | python3-pip \ 35 | python3-venv \ 36 | git \ 37 | sudo \ 38 | libglib2.0-0 \ 39 | socat \ 40 | && apt-get clean 41 | 42 | # Add libEGL ICD loaders and libraries + Vulkan ICD loaders and libraries 43 | # Per https://github.com/mmartial/ComfyUI-Nvidia-Docker/issues/26 44 | RUN apt install -y libglvnd0 libglvnd-dev libegl1-mesa-dev libvulkan1 libvulkan-dev ffmpeg \ 45 | && apt-get clean \ 46 | && rm -rf /var/lib/apt/lists/* \ 47 | && mkdir -p /usr/share/glvnd/egl_vendor.d \ 48 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libEGL_nvidia.so.0"}}' > /usr/share/glvnd/egl_vendor.d/10_nvidia.json \ 49 | && mkdir -p /usr/share/vulkan/icd.d \ 50 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libGLX_nvidia.so.0","api_version":"1.3"}}' > /usr/share/vulkan/icd.d/nvidia_icd.json 51 | ENV MESA_D3D12_DEFAULT_ADAPTER_NAME="NVIDIA" 52 | 53 | ENV BUILD_FILE="/etc/image_base.txt" 54 | ARG BASE_DOCKER_FROM 55 | RUN echo "DOCKER_FROM: ${BASE_DOCKER_FROM}" | tee ${BUILD_FILE} 56 | RUN echo "CUDNN: ${NV_CUDNN_PACKAGE_NAME} (${NV_CUDNN_VERSION})" | tee -a ${BUILD_FILE} 57 | 58 | ARG BUILD_BASE="unknown" 59 | LABEL comfyui-nvidia-docker-build-from=${BUILD_BASE} 60 | RUN it="/etc/build_base.txt"; echo ${BUILD_BASE} > $it && chmod 555 $it 61 | 62 | # Place the init script and its config in / so it can be found by the entrypoint 63 | COPY --chmod=555 init.bash /comfyui-nvidia_init.bash 64 | COPY --chmod=555 config.sh /comfyui-nvidia_config.sh 65 | 66 | ##### ComfyUI preparation 67 | # Every sudo group user does not need a password 68 | RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 69 | 70 | # Create a new group for the comfy and comfytoo users 71 | RUN groupadd -g 1024 comfy \ 72 | && groupadd -g 1025 comfytoo 73 | 74 | # The comfy (resp. comfytoo) user will have UID 1024 (resp. 1025), 75 | # be part of the comfy (resp. comfytoo) and users groups and be sudo capable (passwordless) 76 | RUN useradd -u 1024 -d /home/comfy -g comfy -s /bin/bash -m comfy \ 77 | && usermod -G users comfy \ 78 | && adduser comfy sudo 79 | RUN useradd -u 1025 -d /home/comfytoo -g comfytoo -s /bin/bash -m comfytoo \ 80 | && usermod -G users comfytoo \ 81 | && adduser comfytoo sudo 82 | 83 | ENV COMFYUSER_DIR="/comfy" 84 | RUN mkdir -p ${COMFYUSER_DIR} 85 | RUN it="/etc/comfyuser_dir"; echo ${COMFYUSER_DIR} > $it && chmod 555 $it 86 | 87 | ENV NVIDIA_DRIVER_CAPABILITIES="all" 88 | ENV NVIDIA_VISIBLE_DEVICES=all 89 | 90 | EXPOSE 8188 91 | 92 | ARG COMFYUI_NVIDIA_DOCKER_VERSION="unknown" 93 | LABEL comfyui-nvidia-docker-build=${COMFYUI_NVIDIA_DOCKER_VERSION} 94 | RUN echo "COMFYUI_NVIDIA_DOCKER_VERSION: ${COMFYUI_NVIDIA_DOCKER_VERSION}" | tee -a ${BUILD_FILE} 95 | 96 | # We start as comfytoo and will switch to the comfy user AFTER the container is up 97 | # and after having altered the comfy details to match the requested UID/GID 98 | USER comfytoo 99 | 100 | # We use ENTRYPOINT to run the init script (from CMD) 101 | ENTRYPOINT [ "/comfyui-nvidia_init.bash" ] 102 | -------------------------------------------------------------------------------- /Dockerfile/ubuntu22_cuda12.3.2.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.3.2-cudnn9-devel-ubuntu22.04 2 | ARG BASE_DOCKER_FROM=nvidia/cuda:12.3.2-cudnn9-devel-ubuntu22.04 3 | 4 | ##### Base 5 | 6 | # Install system packages 7 | ENV DEBIAN_FRONTEND=noninteractive 8 | RUN apt-get update -y --fix-missing\ 9 | && apt-get install -y \ 10 | apt-utils \ 11 | locales \ 12 | ca-certificates \ 13 | && apt-get upgrade -y \ 14 | && apt-get clean 15 | 16 | # UTF-8 17 | RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 18 | ENV LANG=en_US.utf8 19 | ENV LC_ALL=C 20 | 21 | # Install needed packages 22 | RUN apt-get update -y --fix-missing \ 23 | && apt-get upgrade -y \ 24 | && apt-get install -y \ 25 | build-essential \ 26 | python3-dev \ 27 | unzip \ 28 | wget \ 29 | zip \ 30 | zlib1g \ 31 | zlib1g-dev \ 32 | gnupg \ 33 | rsync \ 34 | python3-pip \ 35 | python3-venv \ 36 | git \ 37 | sudo \ 38 | libglib2.0-0 \ 39 | socat \ 40 | && apt-get clean 41 | 42 | # Add libEGL ICD loaders and libraries + Vulkan ICD loaders and libraries 43 | # Per https://github.com/mmartial/ComfyUI-Nvidia-Docker/issues/26 44 | RUN apt install -y libglvnd0 libglvnd-dev libegl1-mesa-dev libvulkan1 libvulkan-dev ffmpeg \ 45 | && apt-get clean \ 46 | && rm -rf /var/lib/apt/lists/* \ 47 | && mkdir -p /usr/share/glvnd/egl_vendor.d \ 48 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libEGL_nvidia.so.0"}}' > /usr/share/glvnd/egl_vendor.d/10_nvidia.json \ 49 | && mkdir -p /usr/share/vulkan/icd.d \ 50 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libGLX_nvidia.so.0","api_version":"1.3"}}' > /usr/share/vulkan/icd.d/nvidia_icd.json 51 | ENV MESA_D3D12_DEFAULT_ADAPTER_NAME="NVIDIA" 52 | 53 | ENV BUILD_FILE="/etc/image_base.txt" 54 | ARG BASE_DOCKER_FROM 55 | RUN echo "DOCKER_FROM: ${BASE_DOCKER_FROM}" | tee ${BUILD_FILE} 56 | RUN echo "CUDNN: ${NV_CUDNN_PACKAGE_NAME} (${NV_CUDNN_VERSION})" | tee -a ${BUILD_FILE} 57 | 58 | ARG BUILD_BASE="unknown" 59 | LABEL comfyui-nvidia-docker-build-from=${BUILD_BASE} 60 | RUN it="/etc/build_base.txt"; echo ${BUILD_BASE} > $it && chmod 555 $it 61 | 62 | # Place the init script and its config in / so it can be found by the entrypoint 63 | COPY --chmod=555 init.bash /comfyui-nvidia_init.bash 64 | COPY --chmod=555 config.sh /comfyui-nvidia_config.sh 65 | 66 | ##### ComfyUI preparation 67 | # Every sudo group user does not need a password 68 | RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 69 | 70 | # Create a new group for the comfy and comfytoo users 71 | RUN groupadd -g 1024 comfy \ 72 | && groupadd -g 1025 comfytoo 73 | 74 | # The comfy (resp. comfytoo) user will have UID 1024 (resp. 1025), 75 | # be part of the comfy (resp. comfytoo) and users groups and be sudo capable (passwordless) 76 | RUN useradd -u 1024 -d /home/comfy -g comfy -s /bin/bash -m comfy \ 77 | && usermod -G users comfy \ 78 | && adduser comfy sudo 79 | RUN useradd -u 1025 -d /home/comfytoo -g comfytoo -s /bin/bash -m comfytoo \ 80 | && usermod -G users comfytoo \ 81 | && adduser comfytoo sudo 82 | 83 | ENV COMFYUSER_DIR="/comfy" 84 | RUN mkdir -p ${COMFYUSER_DIR} 85 | RUN it="/etc/comfyuser_dir"; echo ${COMFYUSER_DIR} > $it && chmod 555 $it 86 | 87 | ENV NVIDIA_DRIVER_CAPABILITIES="all" 88 | ENV NVIDIA_VISIBLE_DEVICES=all 89 | 90 | EXPOSE 8188 91 | 92 | ARG COMFYUI_NVIDIA_DOCKER_VERSION="unknown" 93 | LABEL comfyui-nvidia-docker-build=${COMFYUI_NVIDIA_DOCKER_VERSION} 94 | RUN echo "COMFYUI_NVIDIA_DOCKER_VERSION: ${COMFYUI_NVIDIA_DOCKER_VERSION}" | tee -a ${BUILD_FILE} 95 | 96 | # We start as comfytoo and will switch to the comfy user AFTER the container is up 97 | # and after having altered the comfy details to match the requested UID/GID 98 | USER comfytoo 99 | 100 | # We use ENTRYPOINT to run the init script (from CMD) 101 | ENTRYPOINT [ "/comfyui-nvidia_init.bash" ] 102 | -------------------------------------------------------------------------------- /Dockerfile/ubuntu22_cuda12.4.1.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.4.1-cudnn-devel-ubuntu22.04 2 | ARG BASE_DOCKER_FROM=nvidia/cuda:12.4.1-cudnn-devel-ubuntu22.04 3 | 4 | ##### Base 5 | 6 | # Install system packages 7 | ENV DEBIAN_FRONTEND=noninteractive 8 | RUN apt-get update -y --fix-missing\ 9 | && apt-get install -y \ 10 | apt-utils \ 11 | locales \ 12 | ca-certificates \ 13 | && apt-get upgrade -y \ 14 | && apt-get clean 15 | 16 | # UTF-8 17 | RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 18 | ENV LANG=en_US.utf8 19 | ENV LC_ALL=C 20 | 21 | # Install needed packages 22 | RUN apt-get update -y --fix-missing \ 23 | && apt-get upgrade -y \ 24 | && apt-get install -y \ 25 | build-essential \ 26 | python3-dev \ 27 | unzip \ 28 | wget \ 29 | zip \ 30 | zlib1g \ 31 | zlib1g-dev \ 32 | gnupg \ 33 | rsync \ 34 | python3-pip \ 35 | python3-venv \ 36 | git \ 37 | sudo \ 38 | libglib2.0-0 \ 39 | socat \ 40 | && apt-get clean 41 | 42 | # Add libEGL ICD loaders and libraries + Vulkan ICD loaders and libraries 43 | # Per https://github.com/mmartial/ComfyUI-Nvidia-Docker/issues/26 44 | RUN apt install -y libglvnd0 libglvnd-dev libegl1-mesa-dev libvulkan1 libvulkan-dev ffmpeg \ 45 | && apt-get clean \ 46 | && rm -rf /var/lib/apt/lists/* \ 47 | && mkdir -p /usr/share/glvnd/egl_vendor.d \ 48 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libEGL_nvidia.so.0"}}' > /usr/share/glvnd/egl_vendor.d/10_nvidia.json \ 49 | && mkdir -p /usr/share/vulkan/icd.d \ 50 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libGLX_nvidia.so.0","api_version":"1.3"}}' > /usr/share/vulkan/icd.d/nvidia_icd.json 51 | ENV MESA_D3D12_DEFAULT_ADAPTER_NAME="NVIDIA" 52 | 53 | ENV BUILD_FILE="/etc/image_base.txt" 54 | ARG BASE_DOCKER_FROM 55 | RUN echo "DOCKER_FROM: ${BASE_DOCKER_FROM}" | tee ${BUILD_FILE} 56 | RUN echo "CUDNN: ${NV_CUDNN_PACKAGE_NAME} (${NV_CUDNN_VERSION})" | tee -a ${BUILD_FILE} 57 | 58 | ARG BUILD_BASE="unknown" 59 | LABEL comfyui-nvidia-docker-build-from=${BUILD_BASE} 60 | RUN it="/etc/build_base.txt"; echo ${BUILD_BASE} > $it && chmod 555 $it 61 | 62 | # Place the init script and its config in / so it can be found by the entrypoint 63 | COPY --chmod=555 init.bash /comfyui-nvidia_init.bash 64 | COPY --chmod=555 config.sh /comfyui-nvidia_config.sh 65 | 66 | ##### ComfyUI preparation 67 | # Every sudo group user does not need a password 68 | RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 69 | 70 | # Create a new group for the comfy and comfytoo users 71 | RUN groupadd -g 1024 comfy \ 72 | && groupadd -g 1025 comfytoo 73 | 74 | # The comfy (resp. comfytoo) user will have UID 1024 (resp. 1025), 75 | # be part of the comfy (resp. comfytoo) and users groups and be sudo capable (passwordless) 76 | RUN useradd -u 1024 -d /home/comfy -g comfy -s /bin/bash -m comfy \ 77 | && usermod -G users comfy \ 78 | && adduser comfy sudo 79 | RUN useradd -u 1025 -d /home/comfytoo -g comfytoo -s /bin/bash -m comfytoo \ 80 | && usermod -G users comfytoo \ 81 | && adduser comfytoo sudo 82 | 83 | ENV COMFYUSER_DIR="/comfy" 84 | RUN mkdir -p ${COMFYUSER_DIR} 85 | RUN it="/etc/comfyuser_dir"; echo ${COMFYUSER_DIR} > $it && chmod 555 $it 86 | 87 | ENV NVIDIA_DRIVER_CAPABILITIES="all" 88 | ENV NVIDIA_VISIBLE_DEVICES=all 89 | 90 | EXPOSE 8188 91 | 92 | ARG COMFYUI_NVIDIA_DOCKER_VERSION="unknown" 93 | LABEL comfyui-nvidia-docker-build=${COMFYUI_NVIDIA_DOCKER_VERSION} 94 | RUN echo "COMFYUI_NVIDIA_DOCKER_VERSION: ${COMFYUI_NVIDIA_DOCKER_VERSION}" | tee -a ${BUILD_FILE} 95 | 96 | # We start as comfytoo and will switch to the comfy user AFTER the container is up 97 | # and after having altered the comfy details to match the requested UID/GID 98 | USER comfytoo 99 | 100 | # We use ENTRYPOINT to run the init script (from CMD) 101 | ENTRYPOINT [ "/comfyui-nvidia_init.bash" ] 102 | -------------------------------------------------------------------------------- /Dockerfile/ubuntu24_cuda12.5.1.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.5.1-devel-ubuntu24.04 2 | 3 | # Extended from https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.5.1/ubuntu2404/runtime/Dockerfile 4 | ENV NV_CUDNN_VERSION=9.3.0.75-1 5 | ENV NV_CUDNN_PACKAGE_NAME="libcudnn9" 6 | ENV NV_CUDA_ADD=cuda-12 7 | ENV NV_CUDNN_PACKAGE="$NV_CUDNN_PACKAGE_NAME-$NV_CUDA_ADD=$NV_CUDNN_VERSION" 8 | 9 | LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" 10 | 11 | RUN apt-get update && apt-get install -y --no-install-recommends \ 12 | ${NV_CUDNN_PACKAGE} \ 13 | && apt-mark hold ${NV_CUDNN_PACKAGE_NAME}-${NV_CUDA_ADD} 14 | 15 | ARG BASE_DOCKER_FROM=nvidia/cuda:12.5.1-devel-ubuntu24.04 16 | 17 | ##### Base 18 | 19 | # Install system packages 20 | ENV DEBIAN_FRONTEND=noninteractive 21 | RUN apt-get update -y --fix-missing\ 22 | && apt-get install -y \ 23 | apt-utils \ 24 | locales \ 25 | ca-certificates \ 26 | && apt-get upgrade -y \ 27 | && apt-get clean 28 | 29 | # UTF-8 30 | RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 31 | ENV LANG=en_US.utf8 32 | ENV LC_ALL=C 33 | 34 | # Install needed packages 35 | RUN apt-get update -y --fix-missing \ 36 | && apt-get upgrade -y \ 37 | && apt-get install -y \ 38 | build-essential \ 39 | python3-dev \ 40 | unzip \ 41 | wget \ 42 | zip \ 43 | zlib1g \ 44 | zlib1g-dev \ 45 | gnupg \ 46 | rsync \ 47 | python3-pip \ 48 | python3-venv \ 49 | git \ 50 | sudo \ 51 | libglib2.0-0 \ 52 | socat \ 53 | && apt-get clean 54 | 55 | # Add libEGL ICD loaders and libraries + Vulkan ICD loaders and libraries 56 | # Per https://github.com/mmartial/ComfyUI-Nvidia-Docker/issues/26 57 | RUN apt install -y libglvnd0 libglvnd-dev libegl1-mesa-dev libvulkan1 libvulkan-dev ffmpeg \ 58 | && apt-get clean \ 59 | && rm -rf /var/lib/apt/lists/* \ 60 | && mkdir -p /usr/share/glvnd/egl_vendor.d \ 61 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libEGL_nvidia.so.0"}}' > /usr/share/glvnd/egl_vendor.d/10_nvidia.json \ 62 | && mkdir -p /usr/share/vulkan/icd.d \ 63 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libGLX_nvidia.so.0","api_version":"1.3"}}' > /usr/share/vulkan/icd.d/nvidia_icd.json 64 | ENV MESA_D3D12_DEFAULT_ADAPTER_NAME="NVIDIA" 65 | 66 | ENV BUILD_FILE="/etc/image_base.txt" 67 | ARG BASE_DOCKER_FROM 68 | RUN echo "DOCKER_FROM: ${BASE_DOCKER_FROM}" | tee ${BUILD_FILE} 69 | RUN echo "CUDNN: ${NV_CUDNN_PACKAGE_NAME} (${NV_CUDNN_VERSION})" | tee -a ${BUILD_FILE} 70 | 71 | ARG BUILD_BASE="unknown" 72 | LABEL comfyui-nvidia-docker-build-from=${BUILD_BASE} 73 | RUN it="/etc/build_base.txt"; echo ${BUILD_BASE} > $it && chmod 555 $it 74 | 75 | # Place the init script and its config in / so it can be found by the entrypoint 76 | COPY --chmod=555 init.bash /comfyui-nvidia_init.bash 77 | COPY --chmod=555 config.sh /comfyui-nvidia_config.sh 78 | 79 | ##### ComfyUI preparation 80 | # Every sudo group user does not need a password 81 | RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 82 | 83 | # Create a new group for the comfy and comfytoo users 84 | RUN groupadd -g 1024 comfy \ 85 | && groupadd -g 1025 comfytoo 86 | 87 | # The comfy (resp. comfytoo) user will have UID 1024 (resp. 1025), 88 | # be part of the comfy (resp. comfytoo) and users groups and be sudo capable (passwordless) 89 | RUN useradd -u 1024 -d /home/comfy -g comfy -s /bin/bash -m comfy \ 90 | && usermod -G users comfy \ 91 | && adduser comfy sudo 92 | RUN useradd -u 1025 -d /home/comfytoo -g comfytoo -s /bin/bash -m comfytoo \ 93 | && usermod -G users comfytoo \ 94 | && adduser comfytoo sudo 95 | 96 | ENV COMFYUSER_DIR="/comfy" 97 | RUN mkdir -p ${COMFYUSER_DIR} 98 | RUN it="/etc/comfyuser_dir"; echo ${COMFYUSER_DIR} > $it && chmod 555 $it 99 | 100 | ENV NVIDIA_DRIVER_CAPABILITIES="all" 101 | ENV NVIDIA_VISIBLE_DEVICES=all 102 | 103 | EXPOSE 8188 104 | 105 | ARG COMFYUI_NVIDIA_DOCKER_VERSION="unknown" 106 | LABEL comfyui-nvidia-docker-build=${COMFYUI_NVIDIA_DOCKER_VERSION} 107 | RUN echo "COMFYUI_NVIDIA_DOCKER_VERSION: ${COMFYUI_NVIDIA_DOCKER_VERSION}" | tee -a ${BUILD_FILE} 108 | 109 | # We start as comfytoo and will switch to the comfy user AFTER the container is up 110 | # and after having altered the comfy details to match the requested UID/GID 111 | USER comfytoo 112 | 113 | # We use ENTRYPOINT to run the init script (from CMD) 114 | ENTRYPOINT [ "/comfyui-nvidia_init.bash" ] 115 | -------------------------------------------------------------------------------- /Dockerfile/ubuntu24_cuda12.6.3.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu24.04 2 | ARG BASE_DOCKER_FROM=nvidia/cuda:12.6.3-cudnn-devel-ubuntu24.04##### Base 3 | 4 | # Install system packages 5 | ENV DEBIAN_FRONTEND=noninteractive 6 | RUN apt-get update -y --fix-missing\ 7 | && apt-get install -y \ 8 | apt-utils \ 9 | locales \ 10 | ca-certificates \ 11 | && apt-get upgrade -y \ 12 | && apt-get clean 13 | 14 | # UTF-8 15 | RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 16 | ENV LANG=en_US.utf8 17 | ENV LC_ALL=C 18 | 19 | # Install needed packages 20 | RUN apt-get update -y --fix-missing \ 21 | && apt-get upgrade -y \ 22 | && apt-get install -y \ 23 | build-essential \ 24 | python3-dev \ 25 | unzip \ 26 | wget \ 27 | zip \ 28 | zlib1g \ 29 | zlib1g-dev \ 30 | gnupg \ 31 | rsync \ 32 | python3-pip \ 33 | python3-venv \ 34 | git \ 35 | sudo \ 36 | libglib2.0-0 \ 37 | socat \ 38 | && apt-get clean 39 | 40 | # Add libEGL ICD loaders and libraries + Vulkan ICD loaders and libraries 41 | # Per https://github.com/mmartial/ComfyUI-Nvidia-Docker/issues/26 42 | RUN apt install -y libglvnd0 libglvnd-dev libegl1-mesa-dev libvulkan1 libvulkan-dev ffmpeg \ 43 | && apt-get clean \ 44 | && rm -rf /var/lib/apt/lists/* \ 45 | && mkdir -p /usr/share/glvnd/egl_vendor.d \ 46 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libEGL_nvidia.so.0"}}' > /usr/share/glvnd/egl_vendor.d/10_nvidia.json \ 47 | && mkdir -p /usr/share/vulkan/icd.d \ 48 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libGLX_nvidia.so.0","api_version":"1.3"}}' > /usr/share/vulkan/icd.d/nvidia_icd.json 49 | ENV MESA_D3D12_DEFAULT_ADAPTER_NAME="NVIDIA" 50 | 51 | ENV BUILD_FILE="/etc/image_base.txt" 52 | ARG BASE_DOCKER_FROM 53 | RUN echo "DOCKER_FROM: ${BASE_DOCKER_FROM}" | tee ${BUILD_FILE} 54 | RUN echo "CUDNN: ${NV_CUDNN_PACKAGE_NAME} (${NV_CUDNN_VERSION})" | tee -a ${BUILD_FILE} 55 | 56 | ARG BUILD_BASE="unknown" 57 | LABEL comfyui-nvidia-docker-build-from=${BUILD_BASE} 58 | RUN it="/etc/build_base.txt"; echo ${BUILD_BASE} > $it && chmod 555 $it 59 | 60 | # Place the init script and its config in / so it can be found by the entrypoint 61 | COPY --chmod=555 init.bash /comfyui-nvidia_init.bash 62 | COPY --chmod=555 config.sh /comfyui-nvidia_config.sh 63 | 64 | ##### ComfyUI preparation 65 | # Every sudo group user does not need a password 66 | RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 67 | 68 | # Create a new group for the comfy and comfytoo users 69 | RUN groupadd -g 1024 comfy \ 70 | && groupadd -g 1025 comfytoo 71 | 72 | # The comfy (resp. comfytoo) user will have UID 1024 (resp. 1025), 73 | # be part of the comfy (resp. comfytoo) and users groups and be sudo capable (passwordless) 74 | RUN useradd -u 1024 -d /home/comfy -g comfy -s /bin/bash -m comfy \ 75 | && usermod -G users comfy \ 76 | && adduser comfy sudo 77 | RUN useradd -u 1025 -d /home/comfytoo -g comfytoo -s /bin/bash -m comfytoo \ 78 | && usermod -G users comfytoo \ 79 | && adduser comfytoo sudo 80 | 81 | ENV COMFYUSER_DIR="/comfy" 82 | RUN mkdir -p ${COMFYUSER_DIR} 83 | RUN it="/etc/comfyuser_dir"; echo ${COMFYUSER_DIR} > $it && chmod 555 $it 84 | 85 | ENV NVIDIA_DRIVER_CAPABILITIES="all" 86 | ENV NVIDIA_VISIBLE_DEVICES=all 87 | 88 | EXPOSE 8188 89 | 90 | ARG COMFYUI_NVIDIA_DOCKER_VERSION="unknown" 91 | LABEL comfyui-nvidia-docker-build=${COMFYUI_NVIDIA_DOCKER_VERSION} 92 | RUN echo "COMFYUI_NVIDIA_DOCKER_VERSION: ${COMFYUI_NVIDIA_DOCKER_VERSION}" | tee -a ${BUILD_FILE} 93 | 94 | # We start as comfytoo and will switch to the comfy user AFTER the container is up 95 | # and after having altered the comfy details to match the requested UID/GID 96 | USER comfytoo 97 | 98 | # We use ENTRYPOINT to run the init script (from CMD) 99 | ENTRYPOINT [ "/comfyui-nvidia_init.bash" ] 100 | -------------------------------------------------------------------------------- /Dockerfile/ubuntu24_cuda12.8.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.8.1-cudnn-devel-ubuntu24.04 2 | ARG BASE_DOCKER_FROM=nvidia/cuda:12.8.1-cudnn-devel-ubuntu24.04 3 | ##### Base 4 | 5 | # Install system packages 6 | ENV DEBIAN_FRONTEND=noninteractive 7 | RUN apt-get update -y --fix-missing\ 8 | && apt-get install -y \ 9 | apt-utils \ 10 | locales \ 11 | ca-certificates \ 12 | && apt-get upgrade -y \ 13 | && apt-get clean 14 | 15 | # UTF-8 16 | RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 17 | ENV LANG=en_US.utf8 18 | ENV LC_ALL=C 19 | 20 | # Install needed packages 21 | RUN apt-get update -y --fix-missing \ 22 | && apt-get upgrade -y \ 23 | && apt-get install -y \ 24 | build-essential \ 25 | python3-dev \ 26 | unzip \ 27 | wget \ 28 | zip \ 29 | zlib1g \ 30 | zlib1g-dev \ 31 | gnupg \ 32 | rsync \ 33 | python3-pip \ 34 | python3-venv \ 35 | git \ 36 | sudo \ 37 | libglib2.0-0 \ 38 | socat \ 39 | && apt-get clean 40 | 41 | # Add libEGL ICD loaders and libraries + Vulkan ICD loaders and libraries 42 | # Per https://github.com/mmartial/ComfyUI-Nvidia-Docker/issues/26 43 | RUN apt install -y libglvnd0 libglvnd-dev libegl1-mesa-dev libvulkan1 libvulkan-dev ffmpeg \ 44 | && apt-get clean \ 45 | && rm -rf /var/lib/apt/lists/* \ 46 | && mkdir -p /usr/share/glvnd/egl_vendor.d \ 47 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libEGL_nvidia.so.0"}}' > /usr/share/glvnd/egl_vendor.d/10_nvidia.json \ 48 | && mkdir -p /usr/share/vulkan/icd.d \ 49 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libGLX_nvidia.so.0","api_version":"1.3"}}' > /usr/share/vulkan/icd.d/nvidia_icd.json 50 | ENV MESA_D3D12_DEFAULT_ADAPTER_NAME="NVIDIA" 51 | 52 | ENV BUILD_FILE="/etc/image_base.txt" 53 | ARG BASE_DOCKER_FROM 54 | RUN echo "DOCKER_FROM: ${BASE_DOCKER_FROM}" | tee ${BUILD_FILE} 55 | RUN echo "CUDNN: ${NV_CUDNN_PACKAGE_NAME} (${NV_CUDNN_VERSION})" | tee -a ${BUILD_FILE} 56 | 57 | ARG BUILD_BASE="unknown" 58 | LABEL comfyui-nvidia-docker-build-from=${BUILD_BASE} 59 | RUN it="/etc/build_base.txt"; echo ${BUILD_BASE} > $it && chmod 555 $it 60 | 61 | # Place the init script and its config in / so it can be found by the entrypoint 62 | COPY --chmod=555 init.bash /comfyui-nvidia_init.bash 63 | COPY --chmod=555 config.sh /comfyui-nvidia_config.sh 64 | 65 | ##### ComfyUI preparation 66 | # Every sudo group user does not need a password 67 | RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 68 | 69 | # Create a new group for the comfy and comfytoo users 70 | RUN groupadd -g 1024 comfy \ 71 | && groupadd -g 1025 comfytoo 72 | 73 | # The comfy (resp. comfytoo) user will have UID 1024 (resp. 1025), 74 | # be part of the comfy (resp. comfytoo) and users groups and be sudo capable (passwordless) 75 | RUN useradd -u 1024 -d /home/comfy -g comfy -s /bin/bash -m comfy \ 76 | && usermod -G users comfy \ 77 | && adduser comfy sudo 78 | RUN useradd -u 1025 -d /home/comfytoo -g comfytoo -s /bin/bash -m comfytoo \ 79 | && usermod -G users comfytoo \ 80 | && adduser comfytoo sudo 81 | 82 | ENV COMFYUSER_DIR="/comfy" 83 | RUN mkdir -p ${COMFYUSER_DIR} 84 | RUN it="/etc/comfyuser_dir"; echo ${COMFYUSER_DIR} > $it && chmod 555 $it 85 | 86 | ENV NVIDIA_DRIVER_CAPABILITIES="all" 87 | ENV NVIDIA_VISIBLE_DEVICES=all 88 | 89 | EXPOSE 8188 90 | 91 | ARG COMFYUI_NVIDIA_DOCKER_VERSION="unknown" 92 | LABEL comfyui-nvidia-docker-build=${COMFYUI_NVIDIA_DOCKER_VERSION} 93 | RUN echo "COMFYUI_NVIDIA_DOCKER_VERSION: ${COMFYUI_NVIDIA_DOCKER_VERSION}" | tee -a ${BUILD_FILE} 94 | 95 | # We start as comfytoo and will switch to the comfy user AFTER the container is up 96 | # and after having altered the comfy details to match the requested UID/GID 97 | USER comfytoo 98 | 99 | # We use ENTRYPOINT to run the init script (from CMD) 100 | ENTRYPOINT [ "/comfyui-nvidia_init.bash" ] 101 | -------------------------------------------------------------------------------- /Dockerfile/ubuntu24_cuda12.9.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.9.0-cudnn-devel-ubuntu24.04 2 | ARG BASE_DOCKER_FROM=nvidia/cuda:12.9.0-cudnn-devel-ubuntu24.04 3 | ##### Base 4 | 5 | # Install system packages 6 | ENV DEBIAN_FRONTEND=noninteractive 7 | RUN apt-get update -y --fix-missing\ 8 | && apt-get install -y \ 9 | apt-utils \ 10 | locales \ 11 | ca-certificates \ 12 | && apt-get upgrade -y \ 13 | && apt-get clean 14 | 15 | # UTF-8 16 | RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 17 | ENV LANG=en_US.utf8 18 | ENV LC_ALL=C 19 | 20 | # Install needed packages 21 | RUN apt-get update -y --fix-missing \ 22 | && apt-get upgrade -y \ 23 | && apt-get install -y \ 24 | build-essential \ 25 | python3-dev \ 26 | unzip \ 27 | wget \ 28 | zip \ 29 | zlib1g \ 30 | zlib1g-dev \ 31 | gnupg \ 32 | rsync \ 33 | python3-pip \ 34 | python3-venv \ 35 | git \ 36 | sudo \ 37 | libglib2.0-0 \ 38 | socat \ 39 | && apt-get clean 40 | 41 | # Add libEGL ICD loaders and libraries + Vulkan ICD loaders and libraries 42 | # Per https://github.com/mmartial/ComfyUI-Nvidia-Docker/issues/26 43 | RUN apt install -y libglvnd0 libglvnd-dev libegl1-mesa-dev libvulkan1 libvulkan-dev ffmpeg \ 44 | && apt-get clean \ 45 | && rm -rf /var/lib/apt/lists/* \ 46 | && mkdir -p /usr/share/glvnd/egl_vendor.d \ 47 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libEGL_nvidia.so.0"}}' > /usr/share/glvnd/egl_vendor.d/10_nvidia.json \ 48 | && mkdir -p /usr/share/vulkan/icd.d \ 49 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libGLX_nvidia.so.0","api_version":"1.3"}}' > /usr/share/vulkan/icd.d/nvidia_icd.json 50 | ENV MESA_D3D12_DEFAULT_ADAPTER_NAME="NVIDIA" 51 | 52 | ENV BUILD_FILE="/etc/image_base.txt" 53 | ARG BASE_DOCKER_FROM 54 | RUN echo "DOCKER_FROM: ${BASE_DOCKER_FROM}" | tee ${BUILD_FILE} 55 | RUN echo "CUDNN: ${NV_CUDNN_PACKAGE_NAME} (${NV_CUDNN_VERSION})" | tee -a ${BUILD_FILE} 56 | 57 | ARG BUILD_BASE="unknown" 58 | LABEL comfyui-nvidia-docker-build-from=${BUILD_BASE} 59 | RUN it="/etc/build_base.txt"; echo ${BUILD_BASE} > $it && chmod 555 $it 60 | 61 | # Place the init script and its config in / so it can be found by the entrypoint 62 | COPY --chmod=555 init.bash /comfyui-nvidia_init.bash 63 | COPY --chmod=555 config.sh /comfyui-nvidia_config.sh 64 | 65 | ##### ComfyUI preparation 66 | # Every sudo group user does not need a password 67 | RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 68 | 69 | # Create a new group for the comfy and comfytoo users 70 | RUN groupadd -g 1024 comfy \ 71 | && groupadd -g 1025 comfytoo 72 | 73 | # The comfy (resp. comfytoo) user will have UID 1024 (resp. 1025), 74 | # be part of the comfy (resp. comfytoo) and users groups and be sudo capable (passwordless) 75 | RUN useradd -u 1024 -d /home/comfy -g comfy -s /bin/bash -m comfy \ 76 | && usermod -G users comfy \ 77 | && adduser comfy sudo 78 | RUN useradd -u 1025 -d /home/comfytoo -g comfytoo -s /bin/bash -m comfytoo \ 79 | && usermod -G users comfytoo \ 80 | && adduser comfytoo sudo 81 | 82 | ENV COMFYUSER_DIR="/comfy" 83 | RUN mkdir -p ${COMFYUSER_DIR} 84 | RUN it="/etc/comfyuser_dir"; echo ${COMFYUSER_DIR} > $it && chmod 555 $it 85 | 86 | ENV NVIDIA_DRIVER_CAPABILITIES="all" 87 | ENV NVIDIA_VISIBLE_DEVICES=all 88 | 89 | EXPOSE 8188 90 | 91 | ARG COMFYUI_NVIDIA_DOCKER_VERSION="unknown" 92 | LABEL comfyui-nvidia-docker-build=${COMFYUI_NVIDIA_DOCKER_VERSION} 93 | RUN echo "COMFYUI_NVIDIA_DOCKER_VERSION: ${COMFYUI_NVIDIA_DOCKER_VERSION}" | tee -a ${BUILD_FILE} 94 | 95 | # We start as comfytoo and will switch to the comfy user AFTER the container is up 96 | # and after having altered the comfy details to match the requested UID/GID 97 | USER comfytoo 98 | 99 | # We use ENTRYPOINT to run the init script (from CMD) 100 | ENTRYPOINT [ "/comfyui-nvidia_init.bash" ] 101 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024-2025 Martial Michel 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL := /bin/bash 2 | .PHONY: all 3 | 4 | DOCKER_CMD=docker 5 | DOCKER_PRE="NVIDIA_VISIBLE_DEVICES=all" 6 | DOCKER_BUILD_ARGS= 7 | 8 | COMFYUI_NVIDIA_DOCKER_VERSION=20250607 9 | 10 | COMFYUI_CONTAINER_NAME=comfyui-nvidia-docker 11 | 12 | COMPONENTS_DIR=components 13 | DOCKERFILE_DIR=Dockerfile 14 | 15 | # Get the list of all the base- files in COMPONENTS_DIR 16 | DOCKER_ALL=$(shell ls -1 ${COMPONENTS_DIR}/base-* | perl -pe 's%^.+/base-%%' | perl -pe 's%\.Dockerfile%%' | sort) 17 | 18 | all: 19 | @if [ `echo ${DOCKER_ALL} | wc -w` -eq 0 ]; then echo "No images candidates to build"; exit 1; fi 20 | @echo "Available ${COMFYUI_CONTAINER_NAME} ${DOCKER_CMD} images to be built (make targets):" 21 | @echo -n " "; echo ${DOCKER_ALL} | sed -e 's/ /\n /g' 22 | @echo "" 23 | @echo "build: builds all" 24 | 25 | build: ${DOCKER_ALL} 26 | 27 | ${DOCKERFILE_DIR}: 28 | @mkdir -p ${DOCKERFILE_DIR} 29 | 30 | ${DOCKER_ALL}: ${DOCKERFILE_DIR} 31 | @echo ""; echo ""; echo "===== Building ${COMFYUI_CONTAINER_NAME}:$@" 32 | @$(eval DOCKERFILE_NAME="${DOCKERFILE_DIR}/$@.Dockerfile") 33 | @cat ${COMPONENTS_DIR}/base-$@.Dockerfile > ${DOCKERFILE_NAME} 34 | @cat ${COMPONENTS_DIR}/part1-common.Dockerfile >> ${DOCKERFILE_NAME} 35 | @$(eval VAR_NT="${COMFYUI_CONTAINER_NAME}-$@") 36 | @echo "-- Docker command to be run:" 37 | @echo "docker buildx ls | grep -q ${COMFYUI_CONTAINER_NAME} && echo \"builder already exists -- to delete it, use: docker buildx rm ${COMFYUI_CONTAINER_NAME}\" || docker buildx create --name ${COMFYUI_CONTAINER_NAME}" > ${VAR_NT}.cmd 38 | @echo "docker buildx use ${COMFYUI_CONTAINER_NAME} || exit 1" >> ${VAR_NT}.cmd 39 | @echo "BUILDX_EXPERIMENTAL=1 ${DOCKER_PRE} docker buildx debug --on=error build --progress plain --platform linux/amd64 ${DOCKER_BUILD_ARGS} \\" >> ${VAR_NT}.cmd 40 | @echo " --build-arg COMFYUI_NVIDIA_DOCKER_VERSION=\"${COMFYUI_NVIDIA_DOCKER_VERSION}\" \\" >> ${VAR_NT}.cmd 41 | @echo " --build-arg BUILD_BASE=\"$@\" \\" >> ${VAR_NT}.cmd 42 | @echo " --tag=\"${COMFYUI_CONTAINER_NAME}:$@\" \\" >> ${VAR_NT}.cmd 43 | @echo " -f ${DOCKERFILE_NAME} \\" >> ${VAR_NT}.cmd 44 | @echo " --load \\" >> ${VAR_NT}.cmd 45 | @echo " ." >> ${VAR_NT}.cmd 46 | @cat ${VAR_NT}.cmd | tee ${VAR_NT}.log.temp 47 | @echo "" | tee -a ${VAR_NT}.log.temp 48 | @echo "Press Ctl+c within 5 seconds to cancel" 49 | @for i in 5 4 3 2 1; do echo -n "$$i "; sleep 1; done; echo "" 50 | # Actual build 51 | @chmod +x ./${VAR_NT}.cmd 52 | @script -a -e -c ./${VAR_NT}.cmd ${VAR_NT}.log.temp; exit "$${PIPESTATUS[0]}" 53 | @mv ${VAR_NT}.log.temp ${VAR_NT}.log 54 | @rm -f ./${VAR_NT}.cmd 55 | 56 | ###### clean 57 | 58 | docker_tag_list: 59 | @${DOCKER_CMD} images --filter "label=comfyui-nvidia-docker-build" 60 | 61 | docker_buildx_rm: 62 | @docker buildx rm ${COMFYUI_CONTAINER_NAME} 63 | 64 | # Get the list of all existing Docker images 65 | DOCKERHUB_REPO="mmartial" 66 | DOCKER_PRESENT=$(shell for i in ${DOCKER_ALL}; do image="${COMFYUI_CONTAINER_NAME}:$$i"; if docker images --format "{{.Repository}}:{{.Tag}}" | grep -v ${DOCKERHUB_REPO} | grep -q $$image; then echo $$image; fi; done) 67 | 68 | docker_rmi: 69 | @echo -n "== Images to delete: " 70 | @echo ${DOCKER_PRESENT} | wc -w 71 | @if [ `echo ${DOCKER_PRESENT} | wc -w` -eq 0 ]; then echo "No images to delete"; exit 1; fi 72 | @echo ${DOCKER_PRESENT} | sed -e 's/ /\n/g' 73 | @echo "" 74 | @echo "Press Ctl+c within 5 seconds to cancel" 75 | @for i in 5 4 3 2 1; do echo -n "$$i "; sleep 1; done; echo "" 76 | @for i in ${DOCKER_PRESENT}; do docker rmi $$i; done 77 | @echo ""; echo " ** Remaining image with the build label:" 78 | @make docker_tag_list 79 | 80 | 81 | ############################################### For maintainer only 82 | ###### push -- will only proceed with existing ("present") images 83 | 84 | # user the highest numbered entry 85 | #LATEST_ENTRY=$(shell echo ${DOCKER_ALL} | sed -e 's/ /\n/g' | tail -1) 86 | # use the previous to last entry as the candidate: 12.8 is for 50xx series GPUs, not making it the default yet) 87 | #LATEST_ENTRY=$(shell echo ${DOCKER_ALL} | sed -e 's/ /\n/g' | tail -2 | head -1) 88 | # use the 2nd to last entry as the candidate 89 | LATEST_ENTRY=$(shell echo ${DOCKER_ALL} | sed -e 's/ /\n/g' | tail -3 | head -1) 90 | 91 | LATEST_CANDIDATE=$(shell echo ${COMFYUI_CONTAINER_NAME}:${LATEST_ENTRY}) 92 | 93 | docker_tag: 94 | @if [ `echo ${DOCKER_PRESENT} | wc -w` -eq 0 ]; then echo "No images to tag"; exit 1; fi 95 | @echo "== About to tag:" 96 | @for i in ${DOCKER_PRESENT}; do image_out1="${DOCKERHUB_REPO}/$$i-${COMFYUI_NVIDIA_DOCKER_VERSION}"; image_out2="${DOCKERHUB_REPO}/$$i-latest"; echo " ++ $$i -> $$image_out1"; echo " ++ $$i -> $$image_out2"; done 97 | @if echo ${DOCKER_PRESENT} | grep -q ${LATEST_CANDIDATE}; then image_out="${DOCKERHUB_REPO}/${COMFYUI_CONTAINER_NAME}:latest"; echo " ++ ${LATEST_CANDIDATE} -> $$image_out"; else echo " -- Unable to find latest candidate: ${LATEST_CANDIDATE}"; fi 98 | @echo "" 99 | @echo "tagging for hub.docker.com upload -- Press Ctl+c within 5 seconds to cancel" 100 | @for i in 5 4 3 2 1; do echo -n "$$i "; sleep 1; done; echo "" 101 | @for i in ${DOCKER_PRESENT}; do image_out1="${DOCKERHUB_REPO}/$$i-${COMFYUI_NVIDIA_DOCKER_VERSION}"; image_out2="${DOCKERHUB_REPO}/$$i-latest"; docker tag $$i $$image_out1; docker tag $$i $$image_out2; done 102 | @if echo ${DOCKER_PRESENT} | grep -q ${LATEST_CANDIDATE}; then image_out="${DOCKERHUB_REPO}/${COMFYUI_CONTAINER_NAME}:latest"; docker tag ${LATEST_CANDIDATE} $$image_out; fi 103 | 104 | DOCKERHUB_READY=$(shell for i in ${DOCKER_ALL}; do image="${DOCKERHUB_REPO}/${COMFYUI_CONTAINER_NAME}:$$i"; image1=$$image-${COMFYUI_NVIDIA_DOCKER_VERSION}; image2=$$image-latest; if docker images --format "{{.Repository}}:{{.Tag}}" | grep -q $$image1; then echo $$image1; fi; if docker images --format "{{.Repository}}:{{.Tag}}" | grep -q $$image2; then echo $$image2; fi; done) 105 | DOCKERHUB_READY_LATEST=$(shell image="${DOCKERHUB_REPO}/${COMFYUI_CONTAINER_NAME}:latest"; if docker images --format "{{.Repository}}:{{.Tag}}" | grep -q $$image; then echo $$image; else echo ""; fi) 106 | 107 | docker_push: 108 | @if [ `echo ${DOCKERHUB_READY} | wc -w` -eq 0 ]; then echo "No images to push"; exit 1; fi 109 | @echo "== About to push:" 110 | @for i in ${DOCKERHUB_READY} ${DOCKERHUB_READY_LATEST}; do echo " ++ $$i"; done 111 | @echo "pushing to hub.docker.com -- Press Ctl+c within 5 seconds to cancel" 112 | @for i in 5 4 3 2 1; do echo -n "$$i "; sleep 1; done; echo "" 113 | @for i in ${DOCKERHUB_READY} ${DOCKERHUB_READY_LATEST}; do docker push $$i; done 114 | 115 | 116 | docker_rmi_hub: 117 | @echo ""; echo " ** Potential images with the build label:" 118 | @make docker_tag_list 119 | @if [ `echo ${DOCKERHUB_READY} ${DOCKERHUB_READY_LATEST} | wc -w` -eq 0 ]; then echo "No expected images to delete"; exit 1; fi 120 | @echo "== About to delete:" 121 | @for i in ${DOCKERHUB_READY} ${DOCKERHUB_READY_LATEST}; do echo " -- $$i"; done 122 | @echo "deleting -- Press Ctl+c within 5 seconds to cancel" 123 | @for i in 5 4 3 2 1; do echo -n "$$i "; sleep 1; done; echo "" 124 | @for i in ${DOCKERHUB_READY} ${DOCKERHUB_READY_LATEST}; do docker rmi $$i; done 125 | @echo ""; echo " ** Remaining images with the build label:" 126 | @make docker_tag_list 127 | 128 | ##### Maintainer 129 | # - Create a new branch on GitHub that match the expected release tag, pull and checkout that branch 130 | # - In the Makefile, update the CCOMFYUI_NVIDIA_DOCKER_VERSION variable to match the final release tag 131 | # - Build the images: 132 | # % make build 133 | # - Confirm tags are correct, esp. latest (be ready to Ctrl+C before re-running) 134 | # % make docker_tag 135 | # - Push the images (here too be ready to Ctrl+C before re-running) 136 | # % make docker_push 137 | # - Update the README.md file with the new release tag + version history 138 | # - Commit and push the changes to GitHub (in the branch created at the beginning) 139 | # - On Github, "Open a pull request", 140 | # use the value of COMFYUI_NVIDIA_DOCKER_VERSION for the release name (ie the YYYYMMDD value) 141 | # add PR modifications as a summry of the content of the commits, 142 | # create the PR, add a self-approve message, merge and delete the branch 143 | # - on the build system, checkout main and pull the changes 144 | # % git checkout main 145 | # % git pull 146 | # - delete the temporary branch (named after the COMFYUI_NVIDIA_DOCKER_VERSION value) 147 | # % git branch -d YYYYMMDD 148 | # - Tag the release on GitHub 149 | # % git tag YYYYMMDD 150 | # % git push origin YYYYMMDD 151 | # - Create a release on GitHub using the YYYYMMDD tag, add the release notes, and publish 152 | # - Erase build logs 153 | # % rm *.log 154 | # - Erase the buildx builder 155 | # % make docker_buildx_rm 156 | # - Manually check for local images to delete if pushed (and not used locally) -- docker rmi [...] 157 | # % make docker_rmi 158 | # % make docker_rmi_hub 159 | # % make docker_tag_list 160 | # - Update the Unraid template if needed with new release and environment variables (remember to push to GitHub) 161 | # - Update the Docker Hub template if needed with tag information(esp when latest changes or is about to change) 162 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

ComfyUI (NVIDIA) Docker

2 | 3 | - runs in [containers](https://www.gkr.one/blg-20240501-docker101) for enhanced host OS separation 4 | - work with `docker` (and `compose`) or `podman` using `Windows Subsystem for Linux 2` (WSL2) on Windows (using a Linux Guest Virtual Machine on your Windows host) 5 | - can run multiple setups with an independent `run` folder (for virtual environment management and source code) shared `basedir` folder (for user files, input, output, custom nodes, models, etc.) 6 | - drops privileges to a regular user/preserves user permissions with custom UID/GID mapping (the running user's `id -u` and `id -g` as specified on the command line) 7 | - Integrated `ComfyUI-Manager` for hassle-free updates 8 | - permits modification of `ComfyUI-Manager`'s security level (`SECURITY_LEVEL`) 9 | - expose to Localhost-only access by default (`-p 127.0.0.1:8188:8188`) 10 | - built on official NVIDIA CUDA containers for optimal GPU performance 11 | - multiple `Ubuntu` + `CUDA` version combinations available --for older hardware: down to `CUDA 12.3.2` / for 50xx GPUs: `CUDA 12.8`-- see the tags list 12 | - separate `run` and `basedir` folders 13 | - `run` folder is used to store the ComfyUI setup (virtual environment, source code) 14 | - `basedir` folder is used to store user files, input, output, custom nodes, models, etc. 15 | - command-line override 16 | - using the `COMFY_CMDLINE_EXTRA` environment variable to pass additional command-line arguments set during the init script 17 | - ability to run `user_script.bash` from within the container for complex customizations, installations (`pip`, `apt`, ...) and command-line overrides 18 | - pre-built container images available on [DockerHub](https://hub.docker.com/r/mmartial/comfyui-nvidia-docker) 19 | - including [Unraid](https://unraid.net) compatible images 20 | - open-source: build it yourself using the corresponding `Dockerfile` present in the directory of the same name and review the `init.bash` (i.e. the setup logic) 21 | 22 |

Blackwell (RTX50xx) note

23 | 24 | When using RTX 50xx GPUs: 25 | - you must use NVIDIA driver 570 (or above). 26 | - use the `ubuntu24_cuda12.8` container tag (or above). 27 | - for a compatible `PyTorch` installation, run `cp extras/PyTorch2.7-CUDA12.8.sh /postvenv_script.bash` as described in the "Blackwell support" section. 28 | 29 |

About "latest" tag

30 | 31 | `latest` now points to the `ubuntu24_cuda12.6.3` tag (as announced in the `20250320` release) 32 | 33 | Some installed `custom_nodes` might need to be fixed (`Try Fix`) for `Import Failed` nodes in `ComfyUI-Manager`. 34 | This manual step is only needed once per the `latest` tag update to a different Ubuntu+CUDA version. 35 | 36 | To avoid `latest` changing your container's Ubuntu or CUDA version, manually select the docker image tag from the list of available tags. 37 | 38 |

Quick Start

39 | 40 | **Windows users, see the "Windows: WSL2 and podman" section** 41 | 42 | **Blackwell (RTX 50xx series GPUs) users, see the "Blackwell support" section** 43 | 44 | Make sure you have the NVIDIA Container Toolkit installed. More details: https://www.gkr.one/blg-20240523-u24-nvidia-docker-podman 45 | 46 | To run the container on an NVIDIA GPU, mount the specified directory, expose only to `localhost` on port `8188` (remove `127.0.0.1` to expose to your subnet, and change the port by altering the `-p local:container` port mapping), pass the calling user's UID and GID to the container, and select the `SECURITY_LEVEL`: 47 | 48 | ```bash 49 | # 'run' will contain your virtual environment(s), ComfyUI source code, and Hugging Face Hub data 50 | # 'basedir' will contain your custom nodes, input, output, user and models directories 51 | mkdir run basedir 52 | 53 | 54 | # Using docker 55 | docker run --rm -it --runtime nvidia --gpus all -v `pwd`/run:/comfy/mnt -v `pwd`/basedir:/basedir -e WANTED_UID=`id -u` -e WANTED_GID=`id -g` -e BASE_DIRECTORY=/basedir -e SECURITY_LEVEL=normal -p 127.0.0.1:8188:8188 --name comfyui-nvidia mmartial/comfyui-nvidia-docker:latest 56 | 57 | # Using podman 58 | podman run --rm -it --userns=keep-id --device nvidia.com/gpu=all -v `pwd`/run:/comfy/mnt -v `pwd`/basedir:/basedir -e WANTED_UID=`id -u` -e WANTED_GID=`id -g` -e BASE_DIRECTORY=/basedir -e SECURITY_LEVEL=normal -p 127.0.0.1:8188:8188 --name comfyui-nvidia docker.io/mmartial/comfyui-nvidia-docker:latest 59 | ``` 60 | 61 |
62 | 63 |

ComfyUI (NVIDIA) Docker

64 | 65 | [ComfyUI](https://github.com/comfyanonymous/ComfyUI/tree/master) is a Stable Diffusion WebUI. 66 | With the addition in August 2024 of a [Flux example](https://comfyanonymous.github.io/ComfyUI_examples/flux/), I created this container builder to test it. 67 | This container was built to benefit from the process isolation that containers bring and to drop the container's main process privileges to that of a regular user (the container's `comfy` user, which is `sudo` capable). 68 | 69 | The base container size is usually over 8GB, as new releases are now based on Nvidia's `devel` images. It contains the required components on an Ubuntu image with Nvidia CUDA and CuDNN (the base container is available from Nvidia's DockerHub); we add the requirements components to support an installation of ComfyUI. 70 | 71 | Multiple images are available. Each image's name contains a tag reflecting its core components. For example, `ubuntu24_cuda12.5.1` is based on Ubuntu 24.04 with CUDA 12.5.1. Depending on the version of the Nvidia drivers installed, the Docker container runtime will only support a certain version of CUDA. For example, Driver 550 supports up to CUDA 12.4 and will not be able to run the CUDA 12.4.1 or 12.5.1 versions. The recently released 570 driver supports up to CUDA 12.8 and RTX 50xx GPUs. 72 | 73 | For more details on CUDA/GPU support, see https://en.wikipedia.org/wiki/CUDA#GPUs_supported 74 | 75 | Use the `nvidia-smi` command on your system to obtain the `CUDA Version:` entry. It will show you the maximum CUDA version supported by your driver. If the printout shows `CUDA Version: 12.6`, your driver will support up to the `cuda12.5.1` version of the container (below the maximum CUDA version supported by the driver) but not `cuda12.8`. With this information, check for a usable `tag` in the table below. 76 | 77 | The `latest` tag will point to a most up-to-date build (i.e., the most recent OS+CUDA). 78 | If this version is incompatible with your container runtime, please see the list of alternative builds. 79 | 80 | | tag | aka | note | 81 | | --- | --- | --- | 82 | | ubuntu22_cuda12.2.2-latest | | | 83 | | ubuntu22_cuda12.3.2-latest | | | 84 | | ubuntu22_cuda12.4.1-latest | | | 85 | | ubuntu24_cuda12.5.1-latest | | was `latest` up to `20250320` release | 86 | | ubuntu24_cuda12.6.3-latest | `latest` | `latest` as of `20250413` release | 87 | | ubuntu24_cuda12.8-latest | | minimum required for Blackwell (inc RTX 50xx) hardware (see "Blackwell support" section) | 88 | | ubuntu24_cuda12.9-latest | | | 89 | 90 | For more details on driver capabilities and how to update those, please see [Setting up NVIDIA docker & podman (Ubuntu 24.04)](https://www.gkr.one/blg-20240523-u24-nvidia-docker-podman). 91 | 92 | During its first run, the container will download ComfyUI from `git` (into the `run/ComfyUI` folder), create a Python virtual environment (in `run/venv`) for all the Python packages needed by the tool, and install [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) into ComfyUI's `custom_nodes` directory. 93 | This adds about 5GB of content to the installation. The download time depends on your internet connection. 94 | 95 | Given that `venv` (Python virtual environments) might not be compatible from OS+CUDA-version to version and will create a new `venv` when the current one is not for the expected version. 96 | **An installation might end up with multiple `venv`-based directories in the `run` folder, as the tool will rename existing unusable ones as "venv-OS+CUDA" (for example, `venv-ubuntu22_cuda12.3.2`). To support downgrading if needed, the script will not delete the previous version, and this is currently left to the end-user to remove if not needed** 97 | Using alternate `venv` means that some installed custom nodes might have an `import failed` error. We are attempting to make use of [`cm-cli`](https://github.com/ltdrdata/ComfyUI-Manager/blob/main/docs/en/cm-cli.md) before starting ComfyUI. If that fails, start the `Manager -> Custom Nodes Manager`, `Filter` by `Import Failed`, and use the `Try fix` button as this will download the required packages and install those in the used `venv`. A `Restart` and UI reload will be required to fix issues with the nodes. 98 | 99 | You will know the ComfyUI WebUI is running when you check the `docker logs` and see `To see the GUI go to: http://0.0.0.0:8188` 100 | 101 | **About 15GB of space between the container and the virtual environment installation is needed.** 102 | This does not consider the models, additional package installations, or custom nodes that the end user might perform. 103 | 104 | **ComfyUI's `security_levels` are not accessible until the configuration file is created during the first run.** 105 | 106 | It is recommended that a container monitoring tool be available to watch the logs and see when installations are completed or other relevant messages. Some installations and updates (updating packages, downloading content, etc.) will take a long time, and the lack of updates on the WebUI is not a sign of failure. 107 | [Dozzle](https://github.com/amir20/dozzle) is a good solution for following the logs from a WebUI. 108 | 109 | 110 | - [1. Preamble](#1-preamble) 111 | - [2. Running the container](#2-running-the-container) 112 | - [2.1. docker run](#21-docker-run) 113 | - [2.2. podman](#22-podman) 114 | - [2.3. Docker compose](#23-docker-compose) 115 | - [2.4. First time use](#24-first-time-use) 116 | - [3. Docker image](#3-docker-image) 117 | - [3.1. Building the image](#31-building-the-image) 118 | - [3.1.1. Using the Makefile](#311-using-the-makefile) 119 | - [3.1.2. Using a Dockerfile](#312-using-a-dockerfile) 120 | - [3.2. Availability on DockerHub](#32-availability-on-dockerhub) 121 | - [3.3. Unraid availability](#33-unraid-availability) 122 | - [3.4. Nvidia base container](#34-nvidia-base-container) 123 | - [4. Screenshots](#4-screenshots) 124 | - [4.1. First run: Bottle image](#41-first-run-bottle-image) 125 | - [4.2. FLUX.1\[dev\] example](#42-flux1dev-example) 126 | - [5. FAQ](#5-faq) 127 | - [5.1. API nodes](#51-api-nodes) 128 | - [5.2. Virtualenv](#52-virtualenv) 129 | - [5.2.1. Multiple virtualenv](#521-multiple-virtualenv) 130 | - [5.2.2. Fixing Failed Custom Nodes](#522-fixing-failed-custom-nodes) 131 | - [5.3. postvenv\_script.bash](#53-postvenv_scriptbash) 132 | - [5.4. user\_script.bash](#54-user_scriptbash) 133 | - [5.5. /userscripts\_dir](#55-userscripts_dir) 134 | - [5.6. /comfyui-nvidia\_config.sh](#56-comfyui-nvidia_configsh) 135 | - [5.7. Available environment variables](#57-available-environment-variables) 136 | - [5.7.1. WANTED\_UID and WANTED\_GID](#571-wanted_uid-and-wanted_gid) 137 | - [5.7.2. COMFY\_CMDLINE\_BASE and COMFY\_CMDLINE\_EXTRA](#572-comfy_cmdline_base-and-comfy_cmdline_extra) 138 | - [5.7.3. BASE\_DIRECTORY](#573-base_directory) 139 | - [5.7.4. SECURITY\_LEVEL](#574-security_level) 140 | - [5.7.5. USE\_SOCAT](#575-use_socat) 141 | - [5.7.6. FORCE\_CHOWN](#576-force_chown) 142 | - [5.7.7. USE\_PIPUPGRADE](#577-use_pipupgrade) 143 | - [5.8. run/pip\_cache and run/tmp](#58-runpip_cache-and-runtmp) 144 | - [5.9. ComfyUI Manager \& Security levels](#59-comfyui-manager--security-levels) 145 | - [5.10. Shell within the Docker image](#510-shell-within-the-docker-image) 146 | - [5.10.1. Alternate method](#5101-alternate-method) 147 | - [5.11. Additional FAQ](#511-additional-faq) 148 | - [5.11.1. Windows: WSL2 and podman](#5111-windows-wsl2-and-podman) 149 | - [5.11.2. Blackwell support](#5112-blackwell-support) 150 | - [5.11.2.1. Blackwell support on Unraid](#51121-blackwell-support-on-unraid) 151 | - [5.11.3. Specifying alternate folder location (ex: --output\_directory) with BASE\_DIRECTORY](#5113-specifying-alternate-folder-location-ex---output_directory-with-base_directory) 152 | - [6. Troubleshooting](#6-troubleshooting) 153 | - [6.1. Virtual environment](#61-virtual-environment) 154 | - [6.2. run directory](#62-run-directory) 155 | - [6.3. using BASE\_DIRECTORY with an outdated ComfyUI](#63-using-base_directory-with-an-outdated-comfyui) 156 | - [6.3.1. using a specific ComfyUI version or SHA](#631-using-a-specific-comfyui-version-or-sha) 157 | - [6.3.2. Errors with ComfyUI WebUI -- re-installation method with models migration](#632-errors-with-comfyui-webui----re-installation-method-with-models-migration) 158 | - [7. Changelog](#7-changelog) 159 | 160 | # 1. Preamble 161 | 162 | The container is made to run as the `comfy` user, NOT as `root` user. 163 | Within the container, the final user is `comfy` and the UID/GID is requested at `docker run` time; if none are provided, the container will use `1024`/`1024`. 164 | This is done to allow end users to have local directory structures for all the side data (input, output, temp, user), Hugging Face `HF_HOME` if used, and the entire `models`, which are separate from the container and able to be altered by the user. 165 | To request a different UID/GID at run time, use the `WANTED_UID` and `WANTED_GID` environment variables when calling the container. 166 | 167 | Note: 168 | - for details on how to set up a Docker to support an NVIDIA GPU on an Ubuntu 24.04 system, please see [Setting up NVIDIA docker & podman (Ubuntu 24.04)](https://www.gkr.one/blg-20240523-u24-nvidia-docker-podman) 169 | - If you are new to ComfyUI, see [OpenArt's ComfyUI Academy](https://openart.ai/workflows/academy) 170 | - Some ComfyUI examples: 171 | - [ComfyUI_examples](https://comfyanonymous.github.io/ComfyUI_examples/) 172 | - [ComfyUI FLUX examples](https://comfyanonymous.github.io/ComfyUI_examples/flux/) 173 | - Some additional reads: 174 | - [FLUX.1[dev] with ComfyUI and Stability Matrix](https://www.gkr.one/blg-20240810-flux1dev) 175 | - [FLUX.1 LoRA training](https://www.gkr.one/blg-20240818-flux-lora-training) 176 | 177 | # 2. Running the container 178 | 179 | In the directory where we intend to run the container, create the `run` and `basedir` folders as the user with whom we want to share the UID/GID. **This needs to be done before the container is run (it is started as root, so the folders, if they do not exist, will be created as root)** (or give it another name; adapt the `-v` mapping in the `docker run` below). 180 | 181 | That `run` folder will be populated with a few sub-directories created with the UID/GID passed on the command line (see the command line below). 182 | Among the folders that will be created within `run` are `HF, ComfyUI, venv` 183 | - `HF` is the expected location of the `HF_HOME` (HuggingFace installation directory) 184 | - `ComfyUI` is the git clone version of the tool, with all its sub-directories, among which: 185 | - `custom_nodes` for additional support nodes, for example, ComfyUI-Manager, 186 | - `models` and all its sub-directories is where `checkpoints`, `clip`, `loras`, `unet`, etc have to be placed. 187 | - `input` and `output` are where input images will be placed, and generated images will end up. 188 | - `user` is where the user's customizations and saved `workflows` (and ComfyUI Manager's configuration) are stored. 189 | - `venv` is the virtual environment where all the required Python packages for ComfyUI and other additions will be placed. A default ComfyUI package installation requires about 5GB of additional installation in addition to the container itself; those packages will be in this `venv` folder. 190 | 191 | **Currently, it is not recommended to volume map folders within the `ComfyUI` folder**. Doing so is likely to prevent proper installation (during the first run) or update, as any volume mapping (`docker ... -v` or `- local_path:container_path` for compose) creates those directories within a directory structure that is not supposed to exist during the initial execution. 192 | 193 | The use of the `basedir` is recommended. This folder will be populated at run time with the content from ComfyUI's `input`, `output`, `user` and `models` folders. This allow for the separation of the run time components (within the `run` folder) from the user files. In particular, if you were to delete the `run` folder, you would still have model files in the `basedir` folder. 194 | This is possible because of a new CLI option `--basedir` that was added to the code at the end of January 2025. This option will not be available unless ComfyUI is updated for existing installations. 195 | 196 | When starting, the container image executes the `init.bash` script (existing as `/comfyui-nvidia_init.bash` within the container) that performs a few operations: 197 | - load the `/comfyui-nvidia_config.sh` script (`source` it). This script is copied from the host at build time (the `config.sh` file), and can contain override for command line environment variables. 198 | - When starting, the container is using the `comfytoo` user. This user has UID/GID 1025/1025 (ie not a value existing by default in a default Ubuntu installation). 199 | - As the `sudo` capable `comfytoo` user, the script will modify the existing `comfy` user to use the `WANTED_UID` and `WANTED_GID` 200 | - Then, it will re-start the initialization script by becoming the newly modified `comfy` user (which can write in the `run` and `basedir` folders with the provided `WANTED_UID` and `WANTED_GID`). 201 | - Environment variables for the `comfytoo` user will be shared with the `comfy` user. 202 | - After restarting as the `comfy` user... 203 | - Check that the NVIDIA driver is loaded and show details for the seen GPUs 204 | - Obtain the latest version of ComfyUI from GitHub if not already present in the mounted `run` folder. 205 | - Create the virtual environment (`venv`) if one does not already exist 206 | - if one exists, confirm it is the one for this OS+CUDA pair 207 | - if not, rename it and look for a renamed one that would match 208 | - if none is found, create a new one 209 | - Activate this virtual environment 210 | - Install all the ComfyUI-required Python packages. If those are already present, additional content should not need to be downloaded. 211 | - Installing ComfyUI-Manager if it is not present. 212 | - During additional runs, we will allow the user to change the `security_level` from `normal` to another value set using the `SECURITY_LEVEL` environment passed to the container (see the "Security Levels" section of this document for details) to allow for the tool grant more of less functionalities 213 | - Populate the `BASE_DIRECTORY` with the `input`, `output`, `user` and `models` directories from ComfyUI's `run` folder if none are present in the `basedir` folder 214 | - extend the `COMFY_CMDLINE_EXTRA` environment variable with the `--basedir` option. This variable is `export`ed so that it should be used with any `user_script.bash` if the `BASE_DIRECTORY` is used. 215 | - Run independent user scripts if a `/userscript_dir` is mounted. 216 | - only executable `.sh` scripts are executed, in alphanumerical order 217 | - if any script fails, the container will stop with an error 218 | - environment variables set by the script will be available to the following scripts if they are saved in the `/tmp/comfy_${userscript_name}_env.txt` file, adapting `userscript_name` to the script name (ex: `00-nvidiaDev.sh` would be `/tmp/comfy_00-nvidiaDev_env.txt`) 219 | - Check for a user custom script in the "run" directory. It must be named `user_script.bash`. If one exists, run it. 220 | - **Make sure to use the `COMFY_CMDLINE_EXTRA` environment variable to pass the `--basedir` option to the tool if running the tool from within this script** 221 | - Run the ComfyUI WebUI. For the exact command run, please see the last line of `init.bash` 222 | 223 | If the `FORCE_CHOWN` environment variable is set to any non empty value (ex: "yes"), the script will force change directory ownership as the `comfy` user during script startup (might be slow). 224 | 225 | ## 2.1. docker run 226 | 227 | To run the container on an NVIDIA GPU, mount the specified directory, expose only to `localhost` on port `8188` (remove `127.0.0.1` to expose to your subnet, and change the port by altering the `-p local:container` port mapping), pass the calling user's UID and GID to the container, provide a `BASE_DIRECTORY` and select the `SECURITY_LEVEL`: 228 | 229 | ```bash 230 | mkdir run basedir 231 | docker run --rm -it --runtime nvidia --gpus all -v `pwd`/run:/comfy/mnt -v `pwd`/basedir:/basedir -e WANTED_UID=`id -u` -e WANTED_GID=`id -g` -e BASE_DIRECTORY=/basedir -e SECURITY_LEVEL=normal -p 127.0.0.1:8188:8188 --name comfyui-nvidia mmartial/comfyui-nvidia-docker:latest 232 | ``` 233 | 234 | ## 2.2. podman 235 | 236 | It is also possible to run the tool using `podman`. Before doing so, ensure the Container Device Interface (CDI) is properly set for your driver. Please see https://www.gkr.one/blg-20240523-u24-nvidia-docker-podman for instructions. 237 | To run the container on an NVIDIA GPU, mount the specified directory, expose only to `localhost` on port `8188` (remove `127.0.0.1` to expose to your subnet, and change the port by altering the `-p local:container` port mapping), pass the calling user's UID and GID to the container, provide a `BASE_DIRECTORY` and select the `SECURITY_LEVEL`: 238 | 239 | ```bash 240 | mkdir run basedir 241 | podman run --rm -it --userns=keep-id --device nvidia.com/gpu=all -v `pwd`/run:/comfy/mnt -v `pwd`/basedir:/basedir -e WANTED_UID=`id -u` -e WANTED_GID=`id -g` -e BASE_DIRECTORY=/basedir -e SECURITY_LEVEL=normal -p 127.0.0.1:8188:8188 --name comfyui-nvidia docker.io/mmartial/comfyui-nvidia-docker:latest 242 | ``` 243 | 244 | ## 2.3. Docker compose 245 | 246 | In the directory where you want to run the compose stack, create the `compose.yaml` file with the following content: 247 | 248 | ```yaml 249 | services: 250 | comfyui-nvidia: 251 | image: mmartial/comfyui-nvidia-docker:latest 252 | container_name: comfyui-nvidia 253 | ports: 254 | - 8188:8188 255 | volumes: 256 | - ./run:/comfy/mnt 257 | - ./basedir:/basedir 258 | restart: unless-stopped 259 | environment: 260 | # set WANTED_UID and WANTED_GID to your user and group as obtained with `id -u` and `id -g` 261 | - WANTED_UID=1000 262 | - WANTED_GID=1000 263 | - BASE_DIRECTORY=/basedir 264 | - SECURITY_LEVEL=normal 265 | - NVIDIA_VISIBLE_DEVICES=all 266 | - NVIDIA_DRIVER_CAPABILITIES=all 267 | deploy: 268 | resources: 269 | reservations: 270 | devices: 271 | - driver: nvidia 272 | count: all 273 | capabilities: 274 | - gpu 275 | - compute 276 | - utility 277 | ``` 278 | 279 | This will use port 8188 (`host:container`). Use a `run` directory local to the directory where this `compose.yml` is, and specify the `WANTED_UID` and `WANTED_GID` to 1000 (adapt to reflect the user and group you want to run as, which can be obtained using the `id` command in a terminal). Make sure to create the `run` and `basedir` directories as the user with the desired uid and gid before running the docker-compose for the first time. 280 | 281 | Start it with `docker compose up` (with `-detached` to run the container in the background) 282 | 283 | Please see [docker compose up](https://docs.docker.com/reference/cli/docker/compose/up/) reference manual for additional details. 284 | 285 | For users interested in adding it to a [Dockge](https://dockge.kuma.pet/) (a self-hosted Docker Compose stacks management tool ) stack, please see my [Dockge blog post](https://www.gkr.one/blg-20240706-dockge) where we discuss directory and bind mounts (models take a lot of space). 286 | 287 | ## 2.4. First time use 288 | 289 | The first time we run the container, we will go to our host's IP on port 8188 (likely `http://127.0.0.1:8188`) and see the latest run or the bottle-generating example. 290 | 291 | Before attempting to run this example, restarting the container is recommended. 292 | The default security model of `normal` is used unless specified, but the needed configuration file is created at the first run of the container. As such, the ComfyUI Manager's default `security_level` can not be modified until the first container restart (after the WebUI ran the first time). 293 | 294 | This example requires the [`v1-5-pruned-emaonly.ckpt`](https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt) file which can be downloaded directly from the `Manager`'s "Model Manager". 295 | 296 | It is also possible to manually install Stable Diffusion checkpoints, upscale, or Loras (and more) by placing them directly in their respective directories under the `models` folder. For example, to manually install the required "bottle example" checkpoint, as the user with the wanted uid/gid: 297 | 298 | ```bash 299 | cd /models/checkpoints 300 | wget https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt 301 | ``` 302 | 303 | After the download is complete, click "Refresh" on the WebUI and "Queue Prompt" 304 | 305 | Depending on the workflow, some "custom nodes" might be needed. Those should usually be available in the "Manager"'s "Install Missing Custom Nodes". 306 | Other needed files could be found on [HuggingFace](https://huggingface.co/) or [CivitAI](https://civitai.com/). 307 | 308 | "Custom nodes" should be installed using the "Manager". The ability to install those manually depends on the `security_levels` selected. 309 | 310 | # 3. Docker image 311 | 312 | ## 3.1. Building the image 313 | 314 | ### 3.1.1. Using the Makefile 315 | 316 | 317 | Running `make` will show us the different build targets. That list will differ depending on the available `base` files in the `components` directory 318 | 319 | For example, you might see: 320 | 321 | Run: 322 | ```bash 323 | % make 324 | Available comfyui-nvidia-docker docker images to be built (make targets): 325 | ubuntu22_cuda12.3.2 326 | ubuntu22_cuda12.4.1 327 | ubuntu24_cuda12.5.1 328 | 329 | build: builds all 330 | ``` 331 | 332 | It is possible to build a specific target, such as `make ubuntu22_cuda12.3.2`, or all the available containers. 333 | 334 | Running a given target will create a `comfyui-nvidia-docker` `docker buildx`. 335 | As long as none are present, this will initiate a build without caching. 336 | 337 | The process will create the `Dockerfile` used within the `Dockerfile` folder. For example, when using `make ubuntu22_cuda12.3.2` a `Dockerfile/Dockerfile-ubuntu22_cuda12.3.2` file is created that will contain the steps used to build the local `comfyui-nvidia-docker:ubuntu22_cuda12.3.2` Docker image. 338 | 339 | ### 3.1.2. Using a Dockerfile 340 | 341 | It is also possible to use one of the generated `Dockerfile` to build a specific image. 342 | After selecting the image to build from the `OS+CUDA` name within the `Dockerfile` folder, proceed with a `docker build` command in the directory where this `README.md` is located. 343 | To build the `ubuntu24_cuda12.5.1` container, run: 344 | 345 | ```bash 346 | docker build --tag comfyui-nvidia-docker:ubuntu24_cuda12.5.1 -f Dockerfile/Dockerfile-ubuntu24_cuda12.5.1 . 347 | ``` 348 | 349 | Upon completion of the build, we will have a newly created local `comfyui-nvidia-docker:ubuntu24_cuda12.5.1` Docker image. 350 | 351 | ## 3.2. Availability on DockerHub 352 | 353 | Builds are available on DockerHub at [mmartial/comfyui-nvidia-docker](https://hub.docker.com/r/mmartial/comfyui-nvidia-docker), built from this repository's `Dockerfile`(s). 354 | 355 | The table at the top of this document shows the list of available versions on DockerHub. Make sure your NVIDIA container runtime supports the proposed CUDA version. This is particularly important if you use the `latest` tag, as it is expected to refer to the most recent OS+CUDA release. 356 | 357 | ## 3.3. Unraid availability 358 | 359 | The container has been tested on Unraid and [added to Community Apps an 2024-09-02](assets/Unraid_CA-ComfyUI-Nvidia-Docker.png). 360 | 361 | FYSA, if interested, you can see the template from https://raw.githubusercontent.com/mmartial/unraid-templates/main/templates/ComfyUI-Nvidia-Docker.xml 362 | 363 | 364 | ## 3.4. Nvidia base container 365 | 366 | Note that the original `Dockerfile` `FROM` is from Nvidia, as such: 367 | 368 | ``` 369 | This container image and its contents are governed by the NVIDIA Deep Learning Container License. 370 | By pulling and using the container, you accept the terms and conditions of this license: 371 | https://developer.nvidia.com/ngc/nvidia-deep-learning-container-license 372 | ``` 373 | 374 | # 4. Screenshots 375 | 376 | ## 4.1. First run: Bottle image 377 | 378 | ![First Run](assets/FirstRun.png) 379 | 380 | ## 4.2. FLUX.1[dev] example 381 | 382 | Template at [Flux example](https://comfyanonymous.github.io/ComfyUI_examples/flux/) 383 | 384 | ![Flux Dev example](assets/Flux1Dev-run.png) 385 | 386 | # 5. FAQ 387 | 388 | ## 5.1. API nodes 389 | 390 | "API Nodes are ComfyUI’s new way of calling closed-source models through API requests" 391 | https://docs.comfy.org/tutorials/api-nodes/overview 392 | 393 | **The container is not designed to be used with API nodes (even with API keys), only to run self-hosted models.** 394 | If you want to use those, it is recommended to use the Desktop version of ComfyUI from https://www.comfy.org/download 395 | 396 | ## 5.2. Virtualenv 397 | 398 | The container pip installs all required packages in the container and then creates a virtual environment (in `/comfy/mnt/venv` with `comfy/mnt` mounted with the `docker run [...]—v`). 399 | 400 | This allows for the installation of Python packages using `pip3 install`. 401 | 402 | After running `docker exec -t comfy-nvidia /bin/bash` from the provided `bash`, activate the `venv` with `source /comfy/mnt/venv/bin/activate`. 403 | From this `bash` prompt, you can now run `pip3 freeze` or other `pip3` commands such as `pip3 install civitai` 404 | 405 | ### 5.2.1. Multiple virtualenv 406 | 407 | Because a `venv` is tied to an OS+CUDA version, the tool attempts to create some internal logic so that the `venv` folder matches the OS+CUDA of the started container. 408 | **Starting two `comfyui-nvidia-docker` containers with different OS+CUDA tags at the same time is likely to cause some issues** 409 | 410 | For illustration, let's say we last ran `ubuntu22_cuda12.3.1`, exited the container, and now attempt to run `ubuntu24_cuda12.5.1`. The script initialization is as follows: 411 | - check for an existing `venv`; there is one 412 | - check that this `venv` is for `ubuntu24_cuda12.5.1`: it is not, it is for `ubuntu22_cuda12.3.1` 413 | - move `venv` to `venv-ubuntu22_cuda12.3.1` 414 | - check if there is a `venv-ubuntu24_cuda12.5.1` to renamed as `venv` if present: there is not 415 | - the script continues as if there was no `venv` and a new one for `ubuntu24_cuda12.5.1` is created 416 | 417 | Because of this, it is possible to have multiple `venv`-based folders in the "run" folder. 418 | 419 | ### 5.2.2. Fixing Failed Custom Nodes 420 | 421 | A side effect of the multiple virtual environment integration is that some installed custom nodes might have an `import failed` error when switching from one OS+CUDA version to another. 422 | When the container is initialized ,we run `cm-cli.py fix all` to attempt to fix this. 423 | If this does not resolve the issue, start the `Manager -> Custom Nodes Manager`, Filter by `Import Failed`, and use the `Try fix` button. This will download the required packages and install those in the used `venv`. A `Restart` and UI reload will be required, but this ought to fix issues with the nodes. 424 | 425 | ![Import Failed: Try Fix](./assets/ImportFailed-TryFix.png) 426 | 427 | ## 5.3. postvenv_script.bash 428 | 429 | The `run/postvenv_script.bash` script can perform additional operations directly after the virtual environment is created and before ComfyUI is installed (or updated). 430 | 431 | This proves useful for installing Python packages that are required for the ComfyUI installation. 432 | 433 | Please see additional details about the use of user scripts in the `user_script.bash` section. 434 | 435 | ## 5.4. user_script.bash 436 | 437 | The `run/user_script.bash` user script can perform additional operations. 438 | Because this is a Docker container, updating the container will remove any additional installations not in the "run" directory, so it is possible to force a reinstall at runtime. 439 | It is also possible to bypass the ComfyUI command started (for people interested in trying the `--fast`, for example). 440 | 441 | To perform those changes, be aware that: 442 | - The container image is Ubuntu-based. 443 | - The `comfy` user is `sudo` capable. 444 | 445 | An example of one could be: 446 | 447 | ```bash 448 | #!/bin/bash 449 | 450 | echo "== Adding system package" 451 | DEBIAN_FRONTEND=noninteractive sudo apt update 452 | DEBIAN_FRONTEND=noninteractive sudo apt install -y nvtop 453 | 454 | echo "== Adding python package" 455 | source /comfy/mnt/venv/bin/activate 456 | pip3 install pipx 457 | echo "== Adding nvitop" 458 | # nvitop will be installed in the user's .local/bin directory which will be removed when the container is updated 459 | pipx install nvitop 460 | # extend the path to include the installation directory 461 | export PATH=/comfy/.local/bin:${PATH} 462 | # when starting a new docker exec, will still need to be run as ~/.local/bin/nvitop 463 | # but will be in the PATH for commands run from within this script 464 | 465 | echo "== Override ComfyUI launch command" 466 | # Make sure to have 1) activated the venv before running this command 467 | # 2) use the COMFY_CMDLINE_EXTRA environment variable to pass additional command-line arguments set during the init script 468 | cd /comfy/mnt/ComfyUI 469 | python3 ./main.py --listen 0.0.0.0 --disable-auto-launch --fast ${COMFY_CMDLINE_EXTRA} 470 | 471 | echo "== To prevent the regular Comfy command from starting, we 'exit 1'" 472 | echo " If we had not overridden it, we could simply end with an ok exit: 'exit 0'" 473 | exit 1 474 | ``` 475 | 476 | The script will be placed in the `run` directory and must be named `user_script.bash` to be found. 477 | 478 | If you encounter an error, it is recommended to check the container logs; this script must be executable and readable by the `comfy` user. 479 | If the file is not executable, the tool will attempt to make it executable, but if another user owns it, the step will fail. 480 | 481 | ## 5.5. /userscripts_dir 482 | 483 | 🏗️ Please contribute to the `/userscripts_dir` if you have a script that you think would be useful to others or if you find an issue in the scripts provided. 484 | 485 | ⚠️ **WARNING**: This directory is used to run independent user scripts to perform additional operations that might damage your installation. This was added at the request of users trying to install packages from source. **Use with caution**. No support will be provided for issues resulting from the use of this directory. In case of trouble, it is recommended to delete the `run` folder and start a new container. 486 | 487 | Example scripts (which may not work --please feel free to contribute) are provided to demonstrate the capability. None are executable by default. Those scripts were added to enable end users to install components that needed to be built at the time, not yet supported by ComfyUI Manager, or for which no compatible packages were available. 488 | 489 | If a pip version is available, it is recommended to use it instead of the `/userscripts_dir`. 490 | 491 | 492 | The `/userscripts_dir` is a directory that can be mounted to the container: add it to your command line with `-v /path/to/userscripts_dir:/userscripts_dir`. 493 | 494 | ```bash 495 | docker run [...] -v /path/to/userscripts_dir:/userscripts_dir [...] mmartial/comfyui-nvidia-docker:latest 496 | ``` 497 | 498 | This directory is used to run independent user scripts in order to perform additional operations. 499 | A few examples scripts are provided in the `userscripts_dir` folder, such as installing `SageAttention` (see [userscripts_dir/20-SageAttention.sh](userscripts_dir/20-SageAttention.sh) for an example). 500 | 501 | FAQ: 502 | - The container will only run executable `.sh` scripts in this directory in alphanumerical order (`chmod -x script.sh` to disable execution of a given script). None of the scripts in the folder are executable by default. 503 | - Reserve its usage for installing custom nodes NOT available in ComfyUI Manager. 504 | - The scripts will be run with the `comfy` user, so you will need to use `sudo` commands if needed. 505 | - Some scripts might depend on previous scripts, so the order of execution is important: confirm that needed dependencies are met before performing installations. 506 | - If any script fails, the container will stop with an error. 507 | - Environment variables set by the script will be available to the calling script if they are saved in the `/tmp/comfy_${userscript_name}_env.txt` file, adapting `userscript_name` to the script name (`00-nvidiaDev.sh` uses this feature and stores its environment variables in `/tmp/comfy_00-nvidiaDev_env.txt`) 508 | - The scripts will be run BEFORE the user script (`user_script.bash` if any). Those scripts should not start ComfyUI. 509 | - See the example scripts for details of what can be done. 510 | 511 | ## 5.6. /comfyui-nvidia_config.sh 512 | 513 | The `/comfyui-nvidia_config.sh` is a file that can be mounted within the container and can be used to load the entire configuration for the container, instead of setting environment variables on the command line. 514 | 515 | Copy and adapt the `config.sh` file to create your own configuration file, uncommenting each section and setting their appropriate values. Then it is possible to run something similar to: 516 | 517 | ```bash 518 | docker run -it --runtime nvidia --gpus all v `pwd`/config.sh:/comfyui-nvidia_config.sh -v `pwd`/run:/comfy/mnt -v `pwd`/basedir:/basedir -v `pwd`/userscripts_dir:/userscripts_dir -p 8188:8188 mmartial/comfyui-nvidia-docker:latest 519 | ``` 520 | , i.e. the same command as before, but without any `-e` options (`WANTED_UID`, `WANTED_GID`, `BASE_DIRECTORY` and `SECURITY_LEVEL` are set in the config file) 521 | 522 | Note: the file is loaded AFTER the environment variables set on the command line, so the config file will override any environment variables set on the command line. 523 | 524 | ## 5.7. Available environment variables 525 | 526 | ### 5.7.1. WANTED_UID and WANTED_GID 527 | 528 | The `WANTED_UID` and `WANTED_GID` environment variables will be used to set the `comfy` user within the container. 529 | It is recommended that those be set to the end-user's `uid` and `gid` to allow the addition of files, models, and other content within the `run` directory. 530 | Content to be added within the `run` directory must be created with the `uid` and `gid`. 531 | 532 | The running user's `uid` and `gid` can be obtained using `id -u` and `id -g` in a terminal. 533 | 534 | **Note:** It is not recommended to override the default starting user of the script (`comfytoo`), as it is used to set up the `comfy` user to run with the provided `WANTED_UID` and `WANTED_GID`. The script checks for the `comfytoo` user to do so, then after restarting as the `comfy` user, the script checks that the `comfy` user has the correct `uid` and `gid` and will fail if it has not been able to set it up. 535 | 536 | ### 5.7.2. COMFY_CMDLINE_BASE and COMFY_CMDLINE_EXTRA 537 | 538 | You can add extra parameters by adding ComfyUI-compatible command-line arguments to the `COMFY_CMDLINE_EXTRA` environment variable. 539 | For example: `docker run [...] -e COMFY_CMDLINE_EXTRA="--fast --reserve-vram 2.0 --lowvram"` 540 | 541 | Note that the `COMFY_CMDLINE_EXTRA` variable might be extended by the init script to match additional parameters such as the `BASE_DIRECTORY` variable. 542 | 543 | The default command line used by the script to start ComfyUI is `python3 ./main.py --listen 0.0.0.0 --disable-auto-launch` 544 | This is also the default value set to the `COMFY_CMDLINE_BASE` variable during the initialization script. **It is recommended not to alter the value of this variable, as this might prevent the tool from starting successfully**. 545 | 546 | The tool will run the combination of COMFY_CMDLINE_BASE followed by COMFY_CMDLINE_EXTRA. In the above example: 547 | ```bash 548 | python3 ./main.py --listen 0.0.0.0 --disable-auto-launch --fast --reserve-vram 2.0 --lowvram 549 | ``` 550 | 551 | In case of container failure, checking the container logs for error messages is recommended. 552 | 553 | The tool does not attempt to resolve quotes or special shell characters, so it is recommended that you prefer the `user_script.bash` method. 554 | 555 | It is also possible to use the environment variables in combination with the `users_script.bash` by 1) not starting ComfyUI from the script and 2) exiting with `exit 0` (i.e., success), which will allow the rest of the script to continue. The following example installs additional Ubuntu packages and allows for the environment variables to be used: 556 | 557 | ```bash 558 | #!/bin/bash 559 | 560 | #echo "== Update installed packages" 561 | DEBIAN_FRONTEND=noninteractive sudo apt-get update 562 | DEBIAN_FRONTEND=noninteractive sudo apt-get upgrade -y 563 | 564 | # Exit with an "okay" status to allow the init script to run the regular ComfyUI command 565 | exit 0 566 | ``` 567 | 568 | Note that `pip install`ation of custom nodes is not possible in `normal` security level, and `weak` should be used instead (see the "Security levels" section for details) 569 | 570 | ### 5.7.3. BASE_DIRECTORY 571 | 572 | The `BASE_DIRECTORY` environment variable is used to specify the directory where ComfyUI will look for the `models`, `input`, `output`, `user` and `custom_nodes` folders. This is a good option to seprate the virtual environment and ComfyUI's code (in the `run` folder) from the end user's files (in the `basedir` folder). For Unraid in particular, you can use this to place the `basedir` on a separate volume, outside of the `appdata` folder. 573 | 574 | **This option was added to ComfyUI at the end of January 2025. If you are using an already existing installation, update ComfyUI using the manager before enabling this option.** 575 | 576 | Once enabled, this option should not be disabled in future run. 577 | During the first run with this option, the tool will **move** exisiting content from the `run` directory to the `BASE_DIRECTORY` specified. 578 | This is to avoid having multiple copies of downloaded models (taking multiple GB of storage) in both locations. 579 | **If your `models` directory is large, I recommend doing a manual `mv run/ComfyUI/models basedir/.` before running the container. The volumes are considered separate within the container, so the move operation within the container will 1) perform a file copy for each file within the folder (which will take a while) 2) double the model directory size before it is finished copying before it can delete the previous folder.** 580 | The same logic can be applied to the `input`, `output`, `user`, and `custom_nodes` folders. 581 | 582 | ### 5.7.4. SECURITY_LEVEL 583 | 584 | After the initial run, the `SECURITY_LEVEL` environment variable can be used to alter the default security level imposed by ComfyUI Manager. 585 | 586 | When following the rules defined at https://github.com/ltdrdata/ComfyUI-Manager?tab=readme-ov-file#security-policy the user should decide if `normal` will work for their use case. 587 | You will prefer ' weak ' if you manually install or alter custom nodes. 588 | **WARNING: Using `normal-` will prevent access to the WebUI unless the USE_SOCAT environment variable is set to `true`.** 589 | 590 | ### 5.7.5. USE_SOCAT 591 | 592 | The `USE_SOCAT` environment variable is used to enable an alternate service behavior: have ComfyUI listen on `127.0.0.1:8181` and use `socat` to expose the service on `0.0.0.0:8188`. 593 | 594 | The default is to run ComfyUI within the container to listen on `0.0.0.0:8188` (i.e., all network interfaces), which is needed to make it accessible on the exposed port outside of the running container. 595 | 596 | Some `SECURITY_LEVEL` settings might prevent access to the WebUI unless the tool is running on `127.0.0.1` (i.e., only the host). The `USE_SOCAT=true` environment variable can be used to support this behavior. 597 | 598 | ### 5.7.6. FORCE_CHOWN 599 | 600 | The `FORCE_CHOWN` environment variable is used to force change directory ownership as the `comfy` user during script startup (this process might be slow). 601 | 602 | This option was added to support users who mount the `run` and `basedir` folders onto other hosts which might not respect the UID/GID of the `comfy` user. 603 | 604 | `FORCE_CHOWN` must be set with a non empty value (for example "yes: `-e FORCE_CHOWN=yes`) to be enabled. 605 | 606 | When set, it will "force chown" every sub-folder in the `run` and `basedir` folders when it first attempt to access them before verifying they are owned by the proper user. 607 | 608 | ### 5.7.7. USE_PIPUPGRADE 609 | 610 | The `USE_PIPUPGRADE` environment variable is used to enable the use of `pip3 install --upgrade` to upgrade ComfyUI and other Python packages to the latest version during startup. 611 | 612 | This option is enabled by default as with the sepratation of the UI from the Core ComfyUI code, it is possible to be off-synced with the latest version of the UI. 613 | 614 | It can be disabled by setting `USE_PIPUPGRADE=false`. 615 | 616 | ## 5.8. run/pip_cache and run/tmp 617 | 618 | If the `run/pip_cache` and `run/tmp` folders are present, they will be used as the cache folder for pip and the temporary directory for the comfy user. They should be created in the `run` folder before running the container starts with the user with the `WANTED_UID` and `WANTED_GID`. 619 | 620 | ```bash 621 | # For example 622 | mkdir -p run basedir run/pip_cache run/tmp 623 | ``` 624 | 625 | If used, various `pip install` commands will write content to those folders (mounted as part of `run`) instead of within the container. 626 | This can be useful to avoid using the container's `writeable` layers, which might be limited in size on Unraid systems. 627 | 628 | Those are temporary folders, and can be deleted when the container is stopped. 629 | 630 | ## 5.9. ComfyUI Manager & Security levels 631 | 632 | [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager/) is installed and available in the container. 633 | 634 | The container is accessible on `0.0.0.0` internally to the container (i.e., all network interfaces), but it is only accessible on the exposed port outside of the running container. 635 | 636 | To modify the `security_level`: 637 | - manually: by going into your "run" folder directory and editing either `ComfyUI/user/default/ComfyUI-Manager/config.ini` if present, otherwise `custom_nodes/ComfyUI-Manager/config.ini` and alter the `security_level = ` to match your requirements (then reload ComfyUI) 638 | - automatically: use the `SECURITY_LEVEL` docker environment variable at run time to set it for this run. 639 | 640 | Note that if this is the first time starting the container, the file will not yet exist; it is created the first time ComfyUI is run. After this step, stop and restart the container; the `config.ini` will be there at consecutive restarts 641 | 642 | To use `cm-cli`, from the virtualenv, use: `python3 /comfy/mnt/custom_nodes/ComfyUI-Manager/cm-cli.py`. 643 | For example: `python3 /comfy/mnt/custom_nodes/ComfyUI-Manager/cm-cli.py show installed` (`COMFYUI_PATH=/ComfyUI` should be set) 644 | 645 | ## 5.10. Shell within the Docker image 646 | 647 | When starting a `docker exec -it comfyui-nvidia /bin/bash` (or getting a `bash` terminal from `docker compose`), you will be logged in as the `comfytoo` user. 648 | 649 | Switch to the `comfy` user with: 650 | ```bash 651 | sudo su -l comfy 652 | ``` 653 | 654 | As the `comfy` user you will be using the `WANTED_UID` and `WANTED_GID` provided. 655 | You will be able to `cd` into the mounted locations for the `run` and `basedir` folders. 656 | 657 | ```bash 658 | source /comfy/mnt/venv/bin/activate 659 | ``` 660 | 661 | to get the virtual environment activated (allowing you to perfom `pip3 install` operations as those will be done within the `run` folder, so outside of the container), and other operations that the `comfy` user is allowed to perform. 662 | 663 | **Note:** as a reminder the `comfy` user is `sudo` capable, but `apt` commands might not persist a container restart, use the `user_script.bash` method to perform `apt` installs when the container is started. 664 | 665 | ### 5.10.1. Alternate method 666 | 667 | It is possible to pass a command line override to the container by adding it to the `docker run ... mmartial/comfyui-nvidia-docker:latest` command. 668 | 669 | For example: `docker run ... -it ... mmartial/comfyui-nvidia-docker:latest /bin/bash` 670 | 671 | This will start a container and drop you into a bash shell as the `comfy` user with all mounts and permissions set up. 672 | 673 | ## 5.11. Additional FAQ 674 | 675 | See [extras/FAQ.md] for additional FAQ topics, among which: 676 | - Updating ComfyUI 677 | - Updating ComfyUI-Manager 678 | - Installing a custom node from git 679 | 680 | ### 5.11.1. Windows: WSL2 and podman 681 | 682 | **Note:** per https://github.com/mmartial/ComfyUI-Nvidia-Docker/issues/26, you must use `-v /usr/lib/wsl:/usr/lib/wsl -e LD_LIBRARY_PATH=/usr/lib/wsl/lib` to passthrough the nvidia drivers related to opengl. 683 | 684 | 685 | The container can be used on Windows using "Windows Subsystem for Linux 2" (WSL2). 686 | For additional details on WSL, please read https://learn.microsoft.com/en-us/windows/wsl/about 687 | For additional details on podman, please read https://docs.podman.io/latest/getting_started/ 688 | 689 | WSL2 is a Linux guest Virtual Machine on a Windows host (for a slightly longer understanding of what this means, please see the first section of https://www.gkr.one/blg-20240501-docker101). 690 | The started container is Linux based (Ubuntu Linux) that will perform a full installation of ComfyUI from sources. 691 | Some experience with the Linux and Python command line interface is relevant for any modifictions of the virtual environment of container post container start. 692 | 693 | 694 | In the following, we will describe the method to use the `podman` command line interface. For Docker Desktop users, please see https://docs.docker.com/desktop/features/gpu/ for details on how to enable GPU support with Docker. 695 | 696 | First, follow the steps in Section 2 ("Getting Started with CUDA on WSL 2") of https://docs.nvidia.com/cuda/wsl-user-guide/index.html 697 | 698 | Once you have your Ubuntu Virtual Machine installed, start its terminal and follow the instructions to create your new user account (in the rest of this section we will use `USER` to refer to it, adapt as needed) and set a password (which you will use for `sudo` commands). Check your UID and GID using `id`; by default those should be `1000` and `1000`. 699 | 700 | Then, from the terminal, run the following commands (for further details on some of the steps below, see https://www.gkr.one/blg-20240523-u24-nvidia-docker-podman): 701 | 702 | ```bash 703 | # Update the package list & Upgrade the already installed packages 704 | sudo apt update && sudo apt upgrade -y 705 | 706 | # Install podman 707 | sudo apt install -y podman 708 | 709 | # Install the nvidia-container-toolkit 710 | curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ 711 | && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ 712 | sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ 713 | sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list 714 | 715 | sudo apt-get update 716 | sudo apt-get install -y nvidia-container-toolkit 717 | 718 | # Generate the Container Device Interface (CDI) for podman 719 | sudo nvidia-ctk cdi generate --output=/etc/cdi/nvidia.yaml 720 | # note that when you update the Nvidia driver, you will need to regenerate the CDI 721 | ``` 722 | 723 | Then you can confirm the CUDA version your driver supports with: 724 | 725 | ```bash 726 | podman run --rm --device nvidia.com/gpu=all ubuntu nvidia-smi 727 | ``` 728 | with the latest driver, you can support CUDA 12.8 or above, which is needed for RTX 50xx GPUs. 729 | 730 | In the following, we will run the `latest` tag but you can modify this depending on the CUDA version you want to support. 731 | 732 | To run the container: 733 | 734 | ```bash 735 | # Create the needed data directories 736 | # 'run' will contain your virtual environment(s), ComfyUI source code, and Hugging Face Hub data 737 | # 'basedir' will contain your custom nodes, input, output, user and models directories 738 | mkdir run basedir 739 | 740 | # Download and start the container 741 | # - the directories will be written with your user's UID and GID 742 | # - the ComfyUI-Manager security levels will be set to "normal" 743 | # - we will expose the WebUI to http://127.0.0.1:8188 744 | # please see other sections of this README.md for options 745 | podman run --rm -it --userns=keep-id --device nvidia.com/gpu=all -v `pwd`/run:/comfy/mnt -v `pwd`/basedir:/basedir -v /usr/lib/wsl:/usr/lib/wsl -e LD_LIBRARY_PATH=/usr/lib/wsl/lib-e WANTED_UID=`id -u` -e WANTED_GID=`id -g` -e BASE_DIRECTORY=/basedir -e SECURITY_LEVEL=normal -p 127.0.0.1:8188:8188 --name comfyui-nvidia docker.io/mmartial/comfyui-nvidia-docker:latest 746 | ``` 747 | 748 | Once started, go to http://127.0.0.1:8188 and enjoy your first workflow (the bottle example). With this workflow, ComfyUI-Manager should offer to download the model. but since your browser runs on the Windows side, we will need to move the downloaded file to the Ubuntu VM. In another `Ubuntu` terminal, run (adapt `USER`): `mv /mnt/c/Users/USER/Downloads/v1-5-pruned-emaonly-fp16.safetensors basedir/models/checkpoints/`. You will see that `basedir` and `run` are owned by your `USER`. 749 | 750 | After using ComfyUI, `Ctrl+C` in the `podman` terminal will terminate the WebUI. Use the `podman run ...` command from the same folder in the Ubuntu terminal to restart it and use the same `run` and `basedir` as before. 751 | 752 | ### 5.11.2. Blackwell support 753 | 754 | To use the Blackwell GPU (RTX 5080/5090), you will need to make sure to install NVIDIA driver 570 or above. This driver brings support for the RTX 50xx series of GPUs and CUDA 12.8. 755 | 756 | On 20250424, PyTorch 2.7.0 was released with support for CUDA 12.8. 757 | 758 | The `postvenv_script.bash` feature was added because with the release of PyTorch 2.7.0, for the time being, when installing on a CUDA 12.8 base image, the PyTorch wheel used appears to be for CUDA 12.6, which is incompatible. 759 | 760 | Until `cu128` is the default, a workaround script [extras/PyTorch2.7-CUDA12.8.sh](./extras/PyTorch2.7-CUDA12.8.sh) is provided that will install PyTorch 2.7.0 with CUDA 12.8 support. 761 | 762 | To use it, copy the [extras/PyTorch2.7-CUDA12.8.sh](./extras/PyTorch2.7-CUDA12.8.sh) file into the `run` folder as `postvenv_script.bash` before starting the container. 763 | 764 | ```bash 765 | ## Adapt to the folder that contains your run folder 766 | cd 767 | 768 | ## Only use one of the wget or curl depending on what is installed on your system 769 | 770 | # wget 771 | wget https://raw.githubusercontent.com/mmartial/ComfyUI-Nvidia-Docker/refs/heads/main/extras/PyTorch2.7-CUDA12.8.sh -O ./run/postvenv_script.bash 772 | 773 | # curl 774 | curl -s -L https://raw.githubusercontent.com/mmartial/ComfyUI-Nvidia-Docker/refs/heads/main/extras/PyTorch2.7-CUDA12.8.sh -o ./run/postvenv_script.bash 775 | ``` 776 | 777 | You can then run the `ubuntu24_cuda12.8` container. 778 | For example: 779 | 780 | ```bash 781 | docker run --rm -it --runtime nvidia --gpus all -v `pwd`/run:/comfy/mnt -v `pwd`/basedir:/basedir -e WANTED_UID=`id -u` -e WANTED_GID=`id -g` -e BASE_DIRECTORY=/basedir -e SECURITY_LEVEL=normal -p 8188:8188 mmartial/comfyui-nvidia-docker:ubuntu24_cuda12.8-latest 782 | ``` 783 | 784 | During initial run, you will see something similar to: 785 | 786 | ``` 787 | [...] 788 | == Checking for post-venv script: /comfy/mnt/postvenv_script.bash 789 | == Attempting to make user script executable 790 | ++ Running user script: /comfy/mnt/postvenv_script.bash 791 | PyTorch is not installed, need to install 792 | Looking in indexes: https://download.pytorch.org/whl/cu128 793 | Collecting torch==2.7.0 794 | [...] 795 | ``` 796 | 797 | This step is ran after the virtual environment is created and before the ComfyUI installation. 798 | It will install PyTorch 2.7.0 (`torch`, `torchvision`, `torchaudio`) with CUDA 12.8 support. 799 | 800 | In the log, you can confirm ComfyUI uses the proper version of PyTorch: 801 | 802 | ``` 803 | =================== 804 | == Running ComfyUI 805 | [...] 806 | pytorch version: 2.7.0+cu128 807 | ``` 808 | 809 | Additional details on this can be found in [this issue](https://github.com/mmartial/ComfyUI-Nvidia-Docker/issues/43). 810 | 811 | #### 5.11.2.1. Blackwell support on Unraid 812 | 813 | When using the `ubuntu24_cuda12.8-latest` image on Unraid, obtain a terminal from your Unraid WebUI and run the following commands: 814 | 815 | ```bash 816 | cd /mnt/user/appdata/comfyui-nvidia/mnt 817 | wget https://raw.githubusercontent.com/mmartial/ComfyUI-Nvidia-Docker/refs/heads/main/extras/PyTorch2.7-CUDA12.8.sh -O ./postvenv_script.bash 818 | chown 99:100 ./postvenv_script.bash 819 | ``` 820 | 821 | Restart the container and it should install PyTorch 2.7.0 with CUDA 12.8 support. 822 | 823 | ### 5.11.3. Specifying alternate folder location (ex: --output_directory) with BASE_DIRECTORY 824 | 825 | The `BASE_DIRECTORY` environment variable can be used to specify an alternate folder location for `input`, `output`, `temp`, `user`, `models` and `custom_nodes`. 826 | The ComfyUI CLI provides means to specify the location of some of those folders from the command line. 827 | - `--output-directory` for `output` 828 | - `--input-directory` for `input` 829 | - `--temp-directory` for `temp` 830 | - `--user-directory` for `user` 831 | Each one of those option overrides `--base-directory`. 832 | 833 | The logic in `init.bash` moves the content of `input`, `output`, `temp`, `user` and `models` to the specified `BASE_DIRECTORY` the first time it is used if the destination folder does not exist. 834 | 835 | The script logic is based on the `BASE_DIRECTORY` environment variable alone. For end-users who prefer to use one of those alternate folder command lines, those can be added to either the `COMFY_CMDLINE_EXTRA` environment variable or the `user_script.bash` script (please refer to the other sections of this document that describe those options). 836 | 837 | Indepent of the method used the core logic is the same (the example will specify the `output` folder): 838 | 1. you will need to make sure a new folder is mounted within the container (ex: `docker run ... -v /preferredlocation/output:/output`) 839 | 2. tell the ComfyUI command line to use that location for its outputs: `python3 ./main.py [...] --output-directory /output` 840 | 3. (optional) make sure to copy the already existing content of `output` to the new location if you want consitency. 841 | 842 | Please note that an `output` folder will still exist in the `basedir` location (per the `BASE_DIRECTORY` logic) but the comamnd line option will tell Confy to override it. 843 | 844 | For Unraid users, those steps can done by editing the template from the `Docker` tab, `Edit`ing the container and using `Add another Path, Port, Variable, Label or Device` to: 845 | 1. add a new `Path` entry (name it `output directory`) with a `Container Path` with value `/output`, a `Host Path` with your selected lcoation, for example `/preferredlocation/output`, and an `Access Mode` of `Read/Write`. 846 | 2. edit the existing `COMFY_CMDLINE_EXTRA` variable to add the `--output-directory /output` option. 847 | 848 | # 6. Troubleshooting 849 | 850 | ## 6.1. Virtual environment 851 | 852 | The `venv` in the "run" directory contains all the Python packages the tool requires. 853 | In case of an issue, it is recommended that you terminate the container, delete (or rename) the `venv` directory, and restart the container. 854 | The virtual environment will be recreated; any `custom_scripts` should re-install their requirements; please see the "Fixing Failed Custom Nodes" section for additional details. 855 | 856 | ## 6.2. run directory 857 | 858 | It is also possible to rename the entire "run" directory to get a clean installation of ComfyUI and its virtual environment. This method is preferred, compared to deleting the "run" directory—as it will allow us to copy the content of the various downloaded `ComfyUI/models`, `ComfyUI/custom_nodes`, generated `ComfyUI/outputs`, `ComfyUI/user`, added `ComfyUI/inputs`, and other folders present within the old "run" directory. 859 | If using the `BASE_DIRECTORY` environment variable, please note that some of that `run` directory content will be moved to the `BASE_DIRECTORY` specified. 860 | 861 | ## 6.3. using BASE_DIRECTORY with an outdated ComfyUI 862 | 863 | If using the `BASE_DIRECTORY` option and the program exit saying the `--base-directory` option does not exist, this is due to an outdated ComfyUI installation. A possible solution is to disable the option, restart the container and use the ComfyUI-Manager to update ComfyUI. Another option is manually update the code: `cd run/ComfyUI; git pull` 864 | In some case, it is easier to create a simple `user_script.bash` to perform those steps; particularly on Unraid. 865 | The `run/user_script.bash` file content would be (on Unraid it would go in `/mnt/user/appdata/comfyui-nvidia/mnt`) 866 | 867 | ```bash 868 | #!/bin/bash 869 | 870 | cd /comfy/mnt/ComfyUI 871 | git pull 872 | 873 | exit 0 874 | ``` 875 | 876 | Make sure to change file ownership to the user with the `WANTED_UID` and `WANTED_GID` environment variables and to make it executable (on Unraid in the directory, run `chown nobody:users user_script.bash; chmod +x user_script.bash`) 877 | 878 | **After the process complete, you should be presented with the WebUI. Make to delete or rename the script to avoid upgrading ComfyUI at start time, and use ComfyUI Manager instead.** 879 | 880 | ### 6.3.1. using a specific ComfyUI version or SHA 881 | 882 | Following the conversation in https://github.com/mmartial/ComfyUI-Nvidia-Docker/issues/32 883 | Use a `user_script.bash` to install a specific version of ComfyUI 884 | 885 | ```bash 886 | #!/bin/bash 887 | 888 | # Checkout based on SHA (commit) 889 | cd /comfy/mnt/ComfyUI 890 | git checkout SHAvalue 891 | 892 | # Install required packages (note that this might cause some downgrades -- some might not be possible) 893 | source /comfy/mnt/venv/bin/activate 894 | pip3 install -r requirements.txt 895 | 896 | exit 0 897 | ``` 898 | 899 | Adapt the `SHAvalue` to match your desired version. 900 | 901 | Make sure to change file ownership to the user with the `WANTED_UID` and `WANTED_GID` environment variables and to make it executable 902 | 903 | **After the process complete, you should be presented with the WebUI. Make sure to delete or rename the script to avoid it being run again.** 904 | 905 | ### 6.3.2. Errors with ComfyUI WebUI -- re-installation method with models migration 906 | 907 | Sometimes a `custom_nodes` might cause the WebUI to fail to start, or error out with a message (ex: `Loading aborted due to error reloading workflow data`). In such cases, it is recommended to start from a brand new `run` and `basedir` folders, since `run` contains ComfyUI and the `venv` (virtual environment) that is required to run the WebUI, and `basedir` contains the `models` and `custom_nodes`. Because we would prefer to not have to redownload the models, the following describes a method to do so, such that you will be able to copy the content of the `models` folder from a `_old ``run` and `basedir` folders to the new ones. 908 | 909 | Process: 910 | - `docker stop comfyui-nvidia` and `docker rm comfyui-nvidia` the container. We will need to start a new one so that no cached data is used. This will require a fresh installation of all the packages used by ComfyUI. 911 | - in the folder where your `run` and `basedir` are located, move the old folders to `run_off` and `basedir_off` and recreate new empty ones: `mv run run_off; mv basedir basedir_off; mkdir run basedir` 912 | - `docker run ...` a new container, which will reinstall everything as new. We note that none of the custom nodes will be installed. You will need to install each custom node manually after the process is complete (or redownload them from the ComfyUI-Manager by using older workflows embedded images) 913 | - after successful installation and confirmation that the WebUI is working, `docker stop comfyui-nvidia` the container but do not delete it 914 | - in the folder where your new `run` and `basedir` are located, replace the models with the `_old` ones: `rm -rf basedir/model; mv basedir_off/models basedir/` 915 | - `docker start comfyui-nvidia` to restart the container, and test custom nodes installation by using the manager to enable `ComfyUI-Crystools`, follow the instructions and reload the WebUI 916 | 917 | You will still have previous content in the `run_off` and `basedir_off` folders, such as `basedir_off/output`, ... 918 | 919 | From `run_off/custom_nodes`. you will be able to see the list of custom nodes that were installed in the old container and can decided to reinstall them from the manager. 920 | 921 | Once you are confident that you have migrated content from the old container's folders, you can delete the `run_off` and `basedir_off` folders. 922 | 923 | # 7. Changelog 924 | 925 | - 20250607: Added `USE_PIPUPGRADE` and `USE_SOCAT` environment variables + added CUDA 12.9 build. 926 | - 20250503: Future proof [extras/PyTorch2.7-CUDA12.8.sh](./extras/PyTorch2.7-CUDA12.8.sh) to use `torch>=2.7` instead of `torch==2.7.0` + added `run/pip_cache` and `run/tmp` folders support 927 | - 20250426: Added support for `postvenv_script.bash` script (run after the virtual environment is set but before ComfyUI is installed/updated -- with a direct application to Blackwell GPUs to install PyTorch 2.7.0 with CUDA 12.8 support). See [extras/PyTorch2.7-CUDA12.8.sh](./extras/PyTorch2.7-CUDA12.8.sh) for details. 928 | - 20250424: No RTX50xx special case needed: PyTorch 2.7.0 is available for CUDA 12.8, only re-releasing this image 929 | - 20250418: use ENTRYPOINT to run the init script: replaced previous command line arguments to support command line override + Added content in `README.md` to explain the use of `comfytoo` user & a section on reinstallation without losing our existing models folder. 930 | - 20250413: Made CUDA 12.6.3 the new `latest` tag + Added support for `/userscripts_dir` and `/comfyui-nvidia_config.sh` 931 | - 20250320: Made CUDA 12.6.3 image which will be the new `latest` as of the next release + Added checks for directory ownership + added `FORCE_CHOWN` + added libEGL/Vulkan ICD loaders and libraries (per https://github.com/mmartial/ComfyUI-Nvidia-Docker/issues/26) including extension to Windows usage section related to this addition 932 | - 20250227: Simplified user switching logic using the `comfytoo` user as the default entry point user that will set up the `comfy` user 933 | - 20250216: Fix issue with empty `BASE_DIRECTORY` variable 934 | - 20250202: Added `BASE_DIRECTORY` variable 935 | - 20250116: Happy 2nd Birthday ComfyUI -- added multiple builds for different base Ubuntu OS and CUDA combinations + added `ffmpeg` into the base container. 936 | - 20250109: Integrated `SECURITY_LEVELS` within the docker arguments + added `libGL` into the base container. 937 | - 20240915: Added `COMFY_CMDLINE_BASE` and `COMFY_CMDLINE_EXTRA` variable 938 | - 20240824: Tag 0.2: shift to pull at first run-time, user upgradable with lighter base container 939 | - 20240824: Tag 0.1: builds were based on ComfyUI release, not user upgradable 940 | - 20240810: Initial Release 941 | -------------------------------------------------------------------------------- /assets/FirstRun.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mmartial/ComfyUI-Nvidia-Docker/bb8c46eb449f1adc409729a4fa3f4877512f6b1b/assets/FirstRun.png -------------------------------------------------------------------------------- /assets/Flux1Dev-run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mmartial/ComfyUI-Nvidia-Docker/bb8c46eb449f1adc409729a4fa3f4877512f6b1b/assets/Flux1Dev-run.png -------------------------------------------------------------------------------- /assets/ImportFailed-TryFix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mmartial/ComfyUI-Nvidia-Docker/bb8c46eb449f1adc409729a4fa3f4877512f6b1b/assets/ImportFailed-TryFix.png -------------------------------------------------------------------------------- /assets/Unraid_CA-ComfyUI-Nvidia-Docker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mmartial/ComfyUI-Nvidia-Docker/bb8c46eb449f1adc409729a4fa3f4877512f6b1b/assets/Unraid_CA-ComfyUI-Nvidia-Docker.png -------------------------------------------------------------------------------- /assets/flux1-dev-lora.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mmartial/ComfyUI-Nvidia-Docker/bb8c46eb449f1adc409729a4fa3f4877512f6b1b/assets/flux1-dev-lora.png -------------------------------------------------------------------------------- /assets/flux1-schnell-lora.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mmartial/ComfyUI-Nvidia-Docker/bb8c46eb449f1adc409729a4fa3f4877512f6b1b/assets/flux1-schnell-lora.png -------------------------------------------------------------------------------- /components/base-ubuntu22_cuda12.2.2.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.2.2-cudnn8-devel-ubuntu22.04 2 | ARG BASE_DOCKER_FROM=nvidia/cuda:12.2.2-cudnn8-devel-ubuntu22.04 3 | 4 | -------------------------------------------------------------------------------- /components/base-ubuntu22_cuda12.3.2.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.3.2-cudnn9-devel-ubuntu22.04 2 | ARG BASE_DOCKER_FROM=nvidia/cuda:12.3.2-cudnn9-devel-ubuntu22.04 3 | 4 | -------------------------------------------------------------------------------- /components/base-ubuntu22_cuda12.4.1.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.4.1-cudnn-devel-ubuntu22.04 2 | ARG BASE_DOCKER_FROM=nvidia/cuda:12.4.1-cudnn-devel-ubuntu22.04 3 | 4 | -------------------------------------------------------------------------------- /components/base-ubuntu24_cuda12.5.1.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.5.1-devel-ubuntu24.04 2 | 3 | # Extended from https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/12.5.1/ubuntu2404/runtime/Dockerfile 4 | ENV NV_CUDNN_VERSION=9.3.0.75-1 5 | ENV NV_CUDNN_PACKAGE_NAME="libcudnn9" 6 | ENV NV_CUDA_ADD=cuda-12 7 | ENV NV_CUDNN_PACKAGE="$NV_CUDNN_PACKAGE_NAME-$NV_CUDA_ADD=$NV_CUDNN_VERSION" 8 | 9 | LABEL com.nvidia.cudnn.version="${NV_CUDNN_VERSION}" 10 | 11 | RUN apt-get update && apt-get install -y --no-install-recommends \ 12 | ${NV_CUDNN_PACKAGE} \ 13 | && apt-mark hold ${NV_CUDNN_PACKAGE_NAME}-${NV_CUDA_ADD} 14 | 15 | ARG BASE_DOCKER_FROM=nvidia/cuda:12.5.1-devel-ubuntu24.04 16 | 17 | -------------------------------------------------------------------------------- /components/base-ubuntu24_cuda12.6.3.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu24.04 2 | ARG BASE_DOCKER_FROM=nvidia/cuda:12.6.3-cudnn-devel-ubuntu24.04 -------------------------------------------------------------------------------- /components/base-ubuntu24_cuda12.8.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.8.1-cudnn-devel-ubuntu24.04 2 | ARG BASE_DOCKER_FROM=nvidia/cuda:12.8.1-cudnn-devel-ubuntu24.04 3 | -------------------------------------------------------------------------------- /components/base-ubuntu24_cuda12.9.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.9.0-cudnn-devel-ubuntu24.04 2 | ARG BASE_DOCKER_FROM=nvidia/cuda:12.9.0-cudnn-devel-ubuntu24.04 3 | -------------------------------------------------------------------------------- /components/part1-common.Dockerfile: -------------------------------------------------------------------------------- 1 | ##### Base 2 | 3 | # Install system packages 4 | ENV DEBIAN_FRONTEND=noninteractive 5 | RUN apt-get update -y --fix-missing\ 6 | && apt-get install -y \ 7 | apt-utils \ 8 | locales \ 9 | ca-certificates \ 10 | && apt-get upgrade -y \ 11 | && apt-get clean 12 | 13 | # UTF-8 14 | RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 15 | ENV LANG=en_US.utf8 16 | ENV LC_ALL=C 17 | 18 | # Install needed packages 19 | RUN apt-get update -y --fix-missing \ 20 | && apt-get upgrade -y \ 21 | && apt-get install -y \ 22 | build-essential \ 23 | python3-dev \ 24 | unzip \ 25 | wget \ 26 | zip \ 27 | zlib1g \ 28 | zlib1g-dev \ 29 | gnupg \ 30 | rsync \ 31 | python3-pip \ 32 | python3-venv \ 33 | git \ 34 | sudo \ 35 | libglib2.0-0 \ 36 | socat \ 37 | && apt-get clean 38 | 39 | # Add libEGL ICD loaders and libraries + Vulkan ICD loaders and libraries 40 | # Per https://github.com/mmartial/ComfyUI-Nvidia-Docker/issues/26 41 | RUN apt install -y libglvnd0 libglvnd-dev libegl1-mesa-dev libvulkan1 libvulkan-dev ffmpeg \ 42 | && apt-get clean \ 43 | && rm -rf /var/lib/apt/lists/* \ 44 | && mkdir -p /usr/share/glvnd/egl_vendor.d \ 45 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libEGL_nvidia.so.0"}}' > /usr/share/glvnd/egl_vendor.d/10_nvidia.json \ 46 | && mkdir -p /usr/share/vulkan/icd.d \ 47 | && echo '{"file_format_version":"1.0.0","ICD":{"library_path":"libGLX_nvidia.so.0","api_version":"1.3"}}' > /usr/share/vulkan/icd.d/nvidia_icd.json 48 | ENV MESA_D3D12_DEFAULT_ADAPTER_NAME="NVIDIA" 49 | 50 | ENV BUILD_FILE="/etc/image_base.txt" 51 | ARG BASE_DOCKER_FROM 52 | RUN echo "DOCKER_FROM: ${BASE_DOCKER_FROM}" | tee ${BUILD_FILE} 53 | RUN echo "CUDNN: ${NV_CUDNN_PACKAGE_NAME} (${NV_CUDNN_VERSION})" | tee -a ${BUILD_FILE} 54 | 55 | ARG BUILD_BASE="unknown" 56 | LABEL comfyui-nvidia-docker-build-from=${BUILD_BASE} 57 | RUN it="/etc/build_base.txt"; echo ${BUILD_BASE} > $it && chmod 555 $it 58 | 59 | # Place the init script and its config in / so it can be found by the entrypoint 60 | COPY --chmod=555 init.bash /comfyui-nvidia_init.bash 61 | COPY --chmod=555 config.sh /comfyui-nvidia_config.sh 62 | 63 | ##### ComfyUI preparation 64 | # Every sudo group user does not need a password 65 | RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 66 | 67 | # Create a new group for the comfy and comfytoo users 68 | RUN groupadd -g 1024 comfy \ 69 | && groupadd -g 1025 comfytoo 70 | 71 | # The comfy (resp. comfytoo) user will have UID 1024 (resp. 1025), 72 | # be part of the comfy (resp. comfytoo) and users groups and be sudo capable (passwordless) 73 | RUN useradd -u 1024 -d /home/comfy -g comfy -s /bin/bash -m comfy \ 74 | && usermod -G users comfy \ 75 | && adduser comfy sudo 76 | RUN useradd -u 1025 -d /home/comfytoo -g comfytoo -s /bin/bash -m comfytoo \ 77 | && usermod -G users comfytoo \ 78 | && adduser comfytoo sudo 79 | 80 | ENV COMFYUSER_DIR="/comfy" 81 | RUN mkdir -p ${COMFYUSER_DIR} 82 | RUN it="/etc/comfyuser_dir"; echo ${COMFYUSER_DIR} > $it && chmod 555 $it 83 | 84 | ENV NVIDIA_DRIVER_CAPABILITIES="all" 85 | ENV NVIDIA_VISIBLE_DEVICES=all 86 | 87 | EXPOSE 8188 88 | 89 | ARG COMFYUI_NVIDIA_DOCKER_VERSION="unknown" 90 | LABEL comfyui-nvidia-docker-build=${COMFYUI_NVIDIA_DOCKER_VERSION} 91 | RUN echo "COMFYUI_NVIDIA_DOCKER_VERSION: ${COMFYUI_NVIDIA_DOCKER_VERSION}" | tee -a ${BUILD_FILE} 92 | 93 | # We start as comfytoo and will switch to the comfy user AFTER the container is up 94 | # and after having altered the comfy details to match the requested UID/GID 95 | USER comfytoo 96 | 97 | # We use ENTRYPOINT to run the init script (from CMD) 98 | ENTRYPOINT [ "/comfyui-nvidia_init.bash" ] 99 | -------------------------------------------------------------------------------- /config.sh: -------------------------------------------------------------------------------- 1 | ## ComfyUI-Nvidia-Docker configuration 2 | # loaded by init.bash as /comfyui-nvidia_config.sh 3 | # ... after setting the variables from the command line: will override with the values set here 4 | # 5 | # To use your custom version, duplicate the file and mount it in the container: -v /path/to/your/config.sh:/comfyui-nvidia_config.sh 6 | # 7 | # Can be used to set the other command line variables 8 | # Set using: export VARIABLE=value 9 | 10 | ## Environment variables loaded when passing environment variables from user to user 11 | # Ignore list: variables to ignore when loading environment variables from user to user 12 | export ENV_IGNORELIST="HOME PWD USER SHLVL TERM OLDPWD SHELL _ SUDO_COMMAND HOSTNAME LOGNAME MAIL SUDO_GID SUDO_UID SUDO_USER CHECK_NV_CUDNN_VERSION VIRTUAL_ENV VIRTUAL_ENV_PROMPT ENV_IGNORELIST ENV_OBFUSCATE_PART" 13 | # Obfuscate part: part of the key to obfuscate when loading environment variables from user to user, ex: HF_TOKEN, ... 14 | export ENV_OBFUSCATE_PART="TOKEN API KEY" 15 | 16 | ########## Command line variables 17 | # Uncomment and set as preferred, see README.md for more details 18 | 19 | # User and group id 20 | #export WANTED_UID=1000 21 | #export WANTED_GID=1000 22 | # DO NOT use `id -u` or `id -g` to set the values, use the actual values -- the script is started by comfytoo with 1025/1025 23 | 24 | # Use socat to listen on port 8188 and forward to 127.0.0.1:8181 (ie use an alternate port for Comfy to run) 25 | # default is false, value must set to "true" to enable 26 | #export USE_SOCAT="true" 27 | 28 | # Use pip upgrade: new default is to use pip install --upgrade, set to "false" to use pip install 29 | # default is true, value must set to "false" to disable 30 | #export USE_PIPUPGRADE="false" 31 | 32 | # Base directory 33 | #export BASE_DIRECTORY="/basedir" 34 | 35 | # Security level 36 | #export SECURITY_LEVEL="weak" 37 | 38 | # ComfyUI command line extra 39 | #export COMFY_CMDLINE_EXTRA="--fast --use-sage-attention" 40 | # Force chown: force chown mode enabled, will force change directory ownership as comfy user during script rerun (might be slow) 41 | #export FORCE_CHOWN="false" 42 | 43 | 44 | ## NVIDIA specific adds 45 | #export NVIDIA_VISIBLE_DEVICES=all 46 | #export NVIDIA_DRIVER_CAPABILITIES=all 47 | #export NVCC_APPEND_FLAGS='-allow-unsupported-compiler' 48 | 49 | ## User settings 50 | # If adding content to be obfuscated, add it to ENV_OBFUSCATE_PART 51 | #export HF_TOKEN="" 52 | #export OPENAI_API_KEY="" 53 | 54 | 55 | # Do not use an exit code, this is loaded by source 56 | -------------------------------------------------------------------------------- /extras/FAQ.md: -------------------------------------------------------------------------------- 1 |

Extra FAQ

2 | 3 | **It is recommended to have a docker log viewer when performing operations that perform python packages installation. Those will take a long time, and the log will help confirm progress until ComfyUI Manager provides a status update** 4 | 5 | - [1. Installating components](#1-installating-components) 6 | - [1.1. Updating ComfyUI](#11-updating-comfyui) 7 | - [1.2. Updating ComfyUI Manager](#12-updating-comfyui-manager) 8 | - [1.3. Installing a custom node from git](#13-installing-a-custom-node-from-git) 9 | - [1.4. Intalling a know custom node](#14-intalling-a-know-custom-node) 10 | - [2. Short list of custom nodes](#2-short-list-of-custom-nodes) 11 | - [2.1. rgthree-comfy](#21-rgthree-comfy) 12 | - [2.2. Crystools](#22-crystools) 13 | - [2.3. ComfyUI\_bitsandbytes\_NF4](#23-comfyui_bitsandbytes_nf4) 14 | - [2.4. FLUX.1-dev-gguf](#24-flux1-dev-gguf) 15 | 16 | # 1. Installating components 17 | 18 | ## 1.1. Updating ComfyUI 19 | 20 | - From the ComfyUI canvas, in the "Queue Prompt" menu (usually on the bottom right), select `Manager` (ComfyUI Manager) 21 | - From this new menu, in the center row, select `Update ComfyUI` 22 | - Wait for a completion popup 23 | - Click `Restart` and validate if prompted 24 | - Wait for the `Reconnected` (green message in the top right) 25 | - Reload the webpage using your browser 26 | - Display the Manager again. In the news box (within manager, box on the right side), scroll down to see the git commit information from ComfyUI (and the date of that commit) 27 | 28 | ## 1.2. Updating ComfyUI Manager 29 | 30 | - On the ComfyUI canvas, select "Manager" 31 | - Select `Custom Nodes Manager` 32 | - Filter by `Installed` 33 | - Use `Try Update` next to the `ComfyUI-Manager` line 34 | - If watching the logs, you will see a `Update custom node 'ComfyUI-Manager'` entry 35 | - When completed, at the bottom of the WebUI (in red) as message similar to `To apply the installed/updated/disabled/enabled custom node, please restart ComfyUI. And refresh browser` will appear 36 | - Click `Restart` and validate if prompted 37 | - Wait for the `Reconnected` (green message in the top right) 38 | - Reload the webpage using your browser 39 | - Display the Manager again. In the news box (within manager, box on the right side), scroll down to see the version of ComfyUI-Manager that was installed. 40 | 41 | ## 1.3. Installing a custom node from git 42 | 43 | - On the ComfyUI canvas, select "Manager" 44 | - Select `Custom Nodes Manager` 45 | - Use the `Install via Git URL` button 46 | - Enter the git repo location of your custom node 47 | - Click `Install` and wait for the process to complete 48 | - If watching the logs, you will see the python packages requirements installation process 49 | - When completed, you will see a message similar to `To apply the installed custom node, please RESTART ComfyUI.` 50 | - Click the `RESTART` button in that message and validate if prompted 51 | - Wait for the `Reconnected` (green message in the top right) 52 | - Reload the webpage using your browser 53 | - After double clicking on the canvas, your custom node should now be searchable 54 | 55 | ## 1.4. Intalling a know custom node 56 | 57 | Many nodes are already added to the search functionality of ComfyUI Manager. 58 | To see what is available: 59 | - On the ComfyUI canvas, select "Manager" 60 | - Select `Custom Nodes Manager` 61 | - Search for an item of interest 62 | - Click `Install` and wait for the process to complete 63 | - Follow similar steps as the steps following "Click Install" from "Installing a custom node from git" 64 | 65 | 66 | # 2. Short list of custom nodes 67 | 68 | ## 2.1. rgthree-comfy 69 | 70 | URL: https://github.com/rgthree/rgthree-comfy 71 | 72 | A quality of life node as "a collection of nodes and improvements": Progress Bar (runs alongs the top of the app window), Reroute, Power Lora Loader, Power Prompt(s), 73 | 74 | can be installed directly from ComfyUI Manager using the "known custom node" method: 75 | - Select `Custom Nodes Manager` 76 | - Type `rgthree` from the search bar 77 | - `rgthree` is the "Author" 78 | - `Select` and `Install` 79 | - Follow the steps after "Click Install" from "Installing a custom node from git" 80 | 81 | ## 2.2. Crystools 82 | 83 | URL: https://github.com/crystian/ComfyUI-Crystools.git 84 | 85 | Quality of life node providing "resources monitor, progress bar & time elapsed, ..." 86 | 87 | Can be installed as a "known custom node" from the search menu. 88 | - Select `Custom Nodes Manager` 89 | - Type `crys` from the search bar 90 | - `crystian` is the "Author", decide if you want to install the available custom nodes. 91 | - `Select` and `Install` 92 | - Follow the steps after "Click Install" from "Installing a custom node from git" 93 | 94 | ## 2.3. ComfyUI_bitsandbytes_NF4 95 | 96 | URL: https://github.com/comfyanonymous/ComfyUI_bitsandbytes_NF4.git 97 | (deprecated in favor of GGUF) 98 | 99 | Follow the "Installing a custom node from git" 100 | 101 | If possible, find a test workflow, obtain the required weights and after placing them in the expected location (see the "Running the container" section of the main [README.md](../README.md) for further details), `Queue Prompt` 102 | 103 | ## 2.4. FLUX.1-dev-gguf 104 | 105 | URL: https://github.com/city96/ComfyUI-GGUF.git 106 | 107 | GGUF Quantization support for native ComfyUI model 108 | 109 | GGUF can be installed directly from ComfyUI Manager using the "known custom node" method: 110 | - Select `Custom Nodes Manager` 111 | - Type `GGUF` from the search bar 112 | - `city96` is the "Author" 113 | - `Select` and `Install` 114 | - Follow the steps after "Click Install" from "Installing a custom node from git" 115 | 116 | -------------------------------------------------------------------------------- /extras/PyTorch2.7-CUDA12.8.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | error_exit() { 6 | echo -n "!! ERROR: " 7 | echo $* 8 | echo "!! Exiting script (ID: $$)" 9 | exit 1 10 | } 11 | 12 | source /comfy/mnt/venv/bin/activate || error_exit "Failed to activate virtualenv" 13 | 14 | min_torch_version="2.7.0" 15 | cuda_wheel="cu128" 16 | 17 | # bad example: python3 -c 'import torch; print(f"{torch.__version__}")' 18 | # returns: 2.7.0+cu126 19 | # we want cu128 20 | # -> check against both the version and the cuda version 21 | 22 | must_install=0 # increment if torch needs to be installed 23 | if pip3 show torch &>/dev/null; then 24 | # if torch is installed, check the version 25 | full_torch_version=$(python3 -c 'import torch; print(f"{torch.__version__}")') 26 | 27 | # Split version and cuda 28 | torch_version=$(echo "$full_torch_version" | awk -F'+' '{print $1}') 29 | cuda_version=$(echo "$full_torch_version" | awk -F'+' '{print $2}') 30 | 31 | echo "PyTorch is installed with version $full_torch_version -- Torch: $torch_version, CUDA: $cuda_version" 32 | if [ "A$cuda_version" != "A$cuda_wheel" ]; then 33 | echo "Torch CUDA version $cuda_version does not match required version $cuda_wheel, need to install" 34 | must_install=$((must_install+1)) 35 | fi 36 | 37 | # Check if the version matches the minimum version 38 | if [ "$(printf '%s\n' "$torch_version" "$min_torch_version" | sort -V | head -n1)" != "$min_torch_version" ]; then 39 | echo "Torch version $torch_version is below minimum version $min_torch_version, need to install" 40 | must_install=$((must_install+1)) 41 | fi 42 | else 43 | echo "PyTorch is not installed, need to install" 44 | must_install=$((must_install+1)) 45 | fi 46 | 47 | if [ $must_install -eq 0 ]; then 48 | echo "Torch is already installed or the version is up to date (version $torch_version), skipping installation" 49 | exit 0 50 | fi 51 | 52 | pip3 install -U --trusted-host pypi.org --trusted-host files.pythonhosted.org "torch>=${min_torch_version}" torchvision torchaudio --index-url https://download.pytorch.org/whl/${cuda_wheel} 53 | 54 | exit 0 55 | -------------------------------------------------------------------------------- /init.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | error_exit() { 6 | echo -n "!! ERROR: " 7 | echo $* 8 | echo "!! Exiting script (ID: $$)" 9 | exit 1 10 | } 11 | 12 | ok_exit() { 13 | echo $* 14 | echo "++ Exiting script (ID: $$)" 15 | exit 0 16 | } 17 | 18 | # Load config (must have at least ENV_IGNORELIST and ENV_OBFUSCATE_PART set) 19 | it=/comfyui-nvidia_config.sh 20 | if [ -f $it ]; then 21 | source $it || error_exit "Failed to load config: $it" 22 | else 23 | error_exit "Failed to load config: $it not found" 24 | fi 25 | # Check for ENV_IGNORELIST and ENV_OBFUSCATE_PART 26 | if [ -z "${ENV_IGNORELIST+x}" ]; then error_exit "ENV_IGNORELIST not set"; fi 27 | if [ -z "${ENV_OBFUSCATE_PART+x}" ]; then error_exit "ENV_OBFUSCATE_PART not set"; fi 28 | 29 | whoami=`whoami` 30 | script_dir=$(dirname $0) 31 | script_name=$(basename $0) 32 | echo ""; echo "" 33 | echo "======================================" 34 | echo "=================== Starting script (ID: $$)" 35 | echo "== Running ${script_name} in ${script_dir} as ${whoami}" 36 | script_fullname=$0 37 | echo " - script_fullname: ${script_fullname}" 38 | ## 20250418: Removed previous command line arguments to support command line override 39 | ignore_value="VALUE_TO_IGNORE" 40 | 41 | # everyone can read our files by default 42 | umask 0022 43 | 44 | # Write a world-writeable file (preferably inside /tmp -- ie within the container) 45 | write_worldtmpfile() { 46 | tmpfile=$1 47 | if [ -z "${tmpfile}" ]; then error_exit "write_worldfile: missing argument"; fi 48 | if [ -f $tmpfile ]; then rm -f $tmpfile; fi 49 | echo -n $2 > ${tmpfile} 50 | chmod 777 ${tmpfile} 51 | } 52 | 53 | itdir=/tmp/comfy_init 54 | if [ ! -d $itdir ]; then mkdir $itdir; chmod 777 $itdir; fi 55 | if [ ! -d $itdir ]; then error_exit "Failed to create $itdir"; fi 56 | 57 | # Default behavior: listen on 0.0.0.0 58 | USE_SOCAT=${USE_SOCAT:-"false"} 59 | if [ "A${USE_SOCAT}" = "Atrue" ]; then 60 | LISTEN_ADDRESS="127.0.0.1" 61 | LISTEN_PORT="8181" 62 | echo "== Using alternate behavior: socat listens on 0.0.0.0:8188 -> forward to ComfyUI on ${LISTEN_ADDRESS}:${LISTEN_PORT}" 63 | else 64 | USE_SOCAT="false" 65 | LISTEN_ADDRESS="0.0.0.0" 66 | LISTEN_PORT="8188" 67 | echo "== Using default behavior: ComfyUI listens on ${LISTEN_ADDRESS}:${LISTEN_PORT}" 68 | fi 69 | 70 | USE_PIPUPGRADE=${USE_PIPUPGRADE:-"true"} 71 | if [ "A${USE_PIPUPGRADE}" = "Atrue" ]; then 72 | PIP3_CMD="pip3 install --upgrade --trusted-host pypi.org --trusted-host files.pythonhosted.org" 73 | else 74 | PIP3_CMD="pip3 install --trusted-host pypi.org --trusted-host files.pythonhosted.org" 75 | fi 76 | echo "== PIP3_CMD: \"${PIP3_CMD}\"" 77 | 78 | # Set ComfyUI base command line 79 | it=$itdir/comfy_cmdline_base 80 | if [ -f $it ]; then COMFY_CMDLINE_BASE=$(cat $it); fi 81 | COMFY_CMDLINE_BASE=${COMFY_CMDLINE_BASE:-"python3 ./main.py --listen ${LISTEN_ADDRESS} --port ${LISTEN_PORT} --disable-auto-launch"} 82 | if [ ! -f $it ]; then write_worldtmpfile $it "$COMFY_CMDLINE_BASE"; fi 83 | echo "-- COMFY_CMDLINE_BASE: \"${COMFY_CMDLINE_BASE}\"" 84 | 85 | # Set ComfyUI command line extra 86 | if [ ! -z ${COMFY_CMDLINE_XTRA+x} ]; then COMFY_CMDLINE_EXTRA="${COMFY_CMDLINE_XTRA}"; fi # support previous variable 87 | it=$itdir/comfy_cmdline_extra 88 | if [ -f $it ]; then COMFY_CMDLINE_EXTRA=$(cat $it); fi 89 | COMFY_CMDLINE_EXTRA=${COMFY_CMDLINE_EXTRA:-""} 90 | if [ ! -f $it ]; then write_worldtmpfile $it "$COMFY_CMDLINE_EXTRA"; fi 91 | echo "-- COMFY_CMDLINE_EXTRA: \"${COMFY_CMDLINE_EXTRA}\"" 92 | 93 | # Set user and group id 94 | it=$itdir/comfy_user_uid 95 | if [ -f $it ]; then WANTED_UID=$(cat $it); fi 96 | WANTED_UID=${WANTED_UID:-1024} 97 | if [ ! -f $it ]; then write_worldtmpfile $it "$WANTED_UID"; fi 98 | echo "-- WANTED_UID: \"${WANTED_UID}\"" 99 | 100 | it=$itdir/comfy_user_gid 101 | if [ -f $it ]; then WANTED_GID=$(cat $it); fi 102 | WANTED_GID=${WANTED_GID:-1024} 103 | if [ ! -f $it ]; then write_worldtmpfile $it "$WANTED_GID"; fi 104 | echo "-- WANTED_GID: \"${WANTED_GID}\"" 105 | 106 | # Set security level 107 | it=$itdir/comfy_security_level 108 | if [ -f $it ]; then SECURITY_LEVEL=$(cat $it); fi 109 | SECURITY_LEVEL=${SECURITY_LEVEL:-"normal"} 110 | if [ ! -f $it ]; then write_worldtmpfile $it "$SECURITY_LEVEL"; fi 111 | echo "-- SECURITY_LEVEL: \"${SECURITY_LEVEL}\"" 112 | 113 | # Set base directory (if not used, set to $ignore_value) 114 | it=$itdir/comfy_base_directory 115 | if [ -f $it ]; then BASE_DIRECTORY=$(cat $it); fi 116 | BASE_DIRECTORY=${BASE_DIRECTORY:-"$ignore_value"} 117 | if [ ! -f $it ]; then write_worldtmpfile $it "$BASE_DIRECTORY"; fi 118 | echo "-- BASE_DIRECTORY: \"${BASE_DIRECTORY}\"" 119 | 120 | # Validate base directory 121 | if [ ! -z "$BASE_DIRECTORY" ]; then if [ $BASE_DIRECTORY != $ignore_value ] && [ ! -d "$BASE_DIRECTORY" ]; then error_exit "BASE_DIRECTORY requested but not found or not a directory ($BASE_DIRECTORY)"; fi; fi 122 | 123 | echo "== Environment variables set" 124 | 125 | # if command line arguments are provided, write them to a file, for example /bin/bash would give us a shell as comfy 126 | cmd_override_file=$itdir/comfy_run.sh 127 | if [ ! -z "$*" ]; then 128 | echo "!! Seeing command line override, placing it in $cmd_override_file: $*" 129 | write_worldtmpfile $cmd_override_file "$*" 130 | fi 131 | 132 | echo "== Extracting base image information" 133 | # extract base image information 134 | it=/etc/image_base.txt 135 | if [ ! -f $it ]; then error_exit "$it missing, exiting"; fi 136 | echo "-- Base image details (from $it):"; cat $it 137 | 138 | # extract comfy user directory 139 | it=/etc/comfyuser_dir 140 | if [ ! -f $it ]; then error_exit "$it missing, exiting"; fi 141 | COMFYUSER_DIR=`cat $it` 142 | echo "-- COMFYUIUSER_DIR: \"${COMFYUSER_DIR}\"" 143 | if test -z ${COMFYUSER_DIR}; then error_exit "Empty COMFYUSER_DIR variable"; fi 144 | 145 | # extract build base information 146 | it=/etc/build_base.txt 147 | if [ ! -f $it ]; then error_exit "$it missing, exiting"; fi 148 | BUILD_BASE=`cat $it` 149 | BUILD_BASE_FILE=$it 150 | BUILD_BASE_SPECIAL="ubuntu22_cuda12.3.2" # this is a special value: when this feature was introduced, will be used to mark exisitng venv if the marker is not present 151 | echo "-- BUILD_BASE: \"${BUILD_BASE}\"" 152 | if test -z ${BUILD_BASE}; then error_exit "Empty BUILD_BASE variable"; fi 153 | 154 | # Check user id and group id 155 | new_gid=`id -g` 156 | new_uid=`id -u` 157 | echo "== user ($whoami)" 158 | echo " uid: $new_uid / WANTED_UID: $WANTED_UID" 159 | echo " gid: $new_gid / WANTED_GID: $WANTED_GID" 160 | 161 | save_env() { 162 | tosave=$1 163 | echo "-- Saving environment variables to $tosave" 164 | env | sort > "$tosave" 165 | } 166 | 167 | load_env() { 168 | tocheck=$1 169 | overwrite_if_different=$2 170 | ignore_list="${ENV_IGNORELIST}" 171 | obfuscate_part="${ENV_OBFUSCATE_PART}" 172 | if [ -f "$tocheck" ]; then 173 | echo "-- Loading environment variables from $tocheck (overwrite existing: $overwrite_if_different) (ignorelist: $ignore_list) (obfuscate: $obfuscate_part)" 174 | while IFS='=' read -r key value; do 175 | doit=false 176 | # checking if the key is in the ignorelist 177 | for i in $ignore_list; do 178 | if [[ "A$key" == "A$i" ]]; then doit=ignore; break; fi 179 | done 180 | if [[ "A$doit" == "Aignore" ]]; then continue; fi 181 | rvalue=$value 182 | # checking if part of the key is in the obfuscate list 183 | doobs=false 184 | for i in $obfuscate_part; do 185 | if [[ "A$key" == *"$i"* ]]; then doobs=obfuscate; break; fi 186 | done 187 | if [[ "A$doobs" == "Aobfuscate" ]]; then rvalue="**OBFUSCATED**"; fi 188 | 189 | if [ -z "${!key}" ]; then 190 | echo " ++ Setting environment variable $key [$rvalue]" 191 | doit=true 192 | elif [ "$overwrite_if_different" = true ]; then 193 | cvalue="${!key}" 194 | if [[ "A${doobs}" == "Aobfuscate" ]]; then cvalue="**OBFUSCATED**"; fi 195 | if [[ "A${!key}" != "A${value}" ]]; then 196 | echo " @@ Overwriting environment variable $key [$cvalue] -> [$rvalue]" 197 | doit=true 198 | else 199 | echo " == Environment variable $key [$rvalue] already set and value is unchanged" 200 | fi 201 | fi 202 | if [[ "A$doit" == "Atrue" ]]; then 203 | export "$key=$value" 204 | fi 205 | done < "$tocheck" 206 | fi 207 | } 208 | 209 | # comfytoo is a specfiic user not existing by default on ubuntu, we can check its whomai 210 | if [ "A${whoami}" == "Acomfytoo" ]; then 211 | echo "-- Running as comfytoo, will switch comfy to the desired UID/GID" 212 | # The script is started as comfytoo -- UID/GID 1025/1025 213 | 214 | if [ ! -z $FORCE_CHOWN ]; then # any value works, empty value means disabled 215 | echo "-- Force chown mode enabled, will force change directory ownership as comfy user during script rerun (might be slow)" 216 | sudo touch /etc/comfy_force_chown 217 | fi 218 | 219 | # We are altering the UID/GID of the comfy user to the desired ones and restarting as comfy 220 | # using usermod for the already create comfy user, knowing it is not already in use 221 | # per usermod manual: "You must make certain that the named user is not executing any processes when this command is being executed" 222 | sudo groupmod -o -g ${WANTED_GID} comfy || error_exit "Failed to set GID of comfy user" 223 | sudo usermod -o -u ${WANTED_UID} comfy || error_exit "Failed to set UID of comfy user" 224 | sudo chown -R ${WANTED_UID}:${WANTED_GID} /home/comfy || error_exit "Failed to set owner of /home/comfy" 225 | sudo chown ${WANTED_UID}:${WANTED_GID} ${COMFYUSER_DIR} || error_exit "Failed to set owner of ${COMFYUSER_DIR}" 226 | save_env /tmp/comfytoo_env.txt 227 | # restart the script as comfy set with the correct UID/GID this time 228 | echo "-- Restarting as comfy user with UID ${WANTED_UID} GID ${WANTED_GID}" 229 | sudo su comfy $script_fullname || error_exit "subscript failed" 230 | ok_exit "Clean exit" 231 | fi 232 | 233 | # If we are here, the script is started as another user than comfytoo 234 | # because the whoami value for the comfy user can be any existing user, we can not check against it 235 | # instead we check if the UID/GID are the expected ones 236 | if [ "$WANTED_GID" != "$new_gid" ]; then error_exit "comfy MUST be running as UID ${WANTED_UID} GID ${WANTED_GID}, current UID ${new_uid} GID ${new_gid}"; fi 237 | if [ "$WANTED_UID" != "$new_uid" ]; then error_exit "comfy MUST be running as UID ${WANTED_UID} GID ${WANTED_GID}, current UID ${new_uid} GID ${new_gid}"; fi 238 | 239 | ########## 'comfy' specific section below 240 | 241 | # We are therefore running as comfy 242 | echo ""; echo "== Running as comfy" 243 | 244 | # Load environment variables one by one if they do not exist from /tmp/comfytoo_env.txt 245 | it=/tmp/comfytoo_env.txt 246 | if [ -f $it ]; then 247 | echo "-- Loading not already set environment variables from $it" 248 | load_env $it true 249 | fi 250 | 251 | # If a command line override was provided, run it 252 | if [ -f $cmd_override_file ]; then 253 | echo "-- Running provided command line override from $cmd_override_file" 254 | sudo chmod +x $cmd_override_file || error_exit "Failed to make $cmd_override_file executable" 255 | $cmd_override_file 256 | # This is a complete override of the script, exit right after 257 | exit 0 258 | fi 259 | 260 | echo "-- Confirming we have the NVIDIA driver loaded and showing details for the seen GPUs" 261 | if ! command -v nvidia-smi &> /dev/null; then 262 | error_exit "nvidia-smi not found" 263 | fi 264 | nvidia-smi || error_exit "Failed to run nvidia-smi" 265 | 266 | dir_validate() { # arg1 = directory to validate / arg2 = "mount" or ""; a "mount" can not be chmod'ed 267 | testdir=$1 268 | 269 | if [ ! -d "$testdir" ]; then error_exit "Directory $testdir not found (or not a directory)"; fi 270 | 271 | if [ "A$2" == "A" ] && [ -f /etc/comfy_force_chown ]; then 272 | echo " ++ Attempting to recursively set ownership of $testdir to ${WANTED_UID}:${WANTED_GID} (might take a long time)" 273 | sudo chown -R ${WANTED_UID}:${WANTED_GID} "$testdir" || error_exit "Failed to set owner of $testdir" 274 | fi 275 | 276 | # check if the directory is owned by WANTED_UID/WANTED_GID 277 | if [ "$(stat -c %u:%g "$testdir")" != "${WANTED_UID}:${WANTED_GID}" ]; then 278 | xtra_txt=" -- recommended to start with the FORCE_CHOWN=yes environment varable enabled" 279 | if [ "A$2" == "Amount" ]; then 280 | xtra_txt=" -- FORCE_CHOWN will not work for this folder, it is a PATH mounted at container startup and requires a manual fix: chown -R ${WANTED_UID}:${WANTED_GID} foldername" 281 | fi 282 | error_exit "Directory $testdir owned by unexpected user/group, expected ${WANTED_UID}:${WANTED_GID}, actual $(stat -c %u:%g "$testdir")$xtra_txt" 283 | fi 284 | 285 | if [ ! -w "$testdir" ]; then error_exit "Directory $testdir not writeable"; fi 286 | if [ ! -x "$testdir" ]; then error_exit "Directory $testdir not executable"; fi 287 | if [ ! -r "$testdir" ]; then error_exit "Directory $testdir not readable"; fi 288 | } 289 | 290 | ## Path: ${COMFYUSER_DIR}/mnt 291 | echo "== Testing write access as the comfy user to the run directory" 292 | it_dir="${COMFYUSER_DIR}/mnt" 293 | dir_validate "${it_dir}" "mount" 294 | it="${it_dir}/.testfile"; touch $it && rm -f $it || error_exit "Failed to write to $it_dir" 295 | 296 | ## 297 | it_dir="${COMFYUSER_DIR}/mnt/pip_cache" 298 | if [ -d "${it_dir}" ]; then 299 | echo ""; echo "== ${it_dir} present: Setting the PIP_CACHE_DIR variable to use it" 300 | dir_validate "${it_dir}" 301 | it="${it_dir}/.testfile" && rm -f $it || error_exit "Failed to write to pip cache directory as the comfy user" 302 | export PIP_CACHE_DIR=${COMFYUSER_DIR}/mnt/pip_cache 303 | fi 304 | 305 | ## 306 | it_dir="${COMFYUSER_DIR}/mnt/tmp" 307 | if [ -d "${it_dir}" ]; then 308 | echo ""; echo "== ${it_dir} present: Setting the TMPDIR variable to use it" 309 | dir_validate "${it_dir}" 310 | it="${it_dir}/.testfile" && rm -f $it || error_exit "Failed to write to tmp directory as the comfy user" 311 | export TMPDIR=${COMFYUSER_DIR}/mnt/tmp 312 | fi 313 | 314 | ## 315 | it_dir="${COMFYUSER_DIR}/mnt" 316 | echo ""; echo "== Obtaining the latest version of ComfyUI (if folder not present)" 317 | cd $it_dir # ${COMFYUSER_DIR}/mnt -- stay here for the following checks/setups 318 | if [ ! -d "ComfyUI" ]; then 319 | echo ""; echo "== Cloning ComfyUI" 320 | git clone https://github.com/comfyanonymous/ComfyUI.git ComfyUI || error_exit "ComfyUI clone failed" 321 | fi 322 | 323 | ## 324 | echo ""; echo "== Confirm the ComfyUI directory is present and we can write to it" 325 | it_dir="${COMFYUSER_DIR}/mnt/ComfyUI" 326 | dir_validate "${it_dir}" 327 | it="${it_dir}/.testfile" && rm -f $it || error_exit "Failed to write to ComfyUI directory as the comfy user" 328 | 329 | ## 330 | echo ""; echo "== Check on BASE_DIRECTORY (if used / if using \"$ignore_value\" then disable it)" 331 | if [ "$BASE_DIRECTORY" == "$ignore_value" ]; then BASE_DIRECTORY=""; fi 332 | if [ ! -z "$BASE_DIRECTORY" ]; then 333 | it_dir=$BASE_DIRECTORY 334 | dir_validate "${it_dir}" "mount" 335 | it="${it_dir}/.testfile" && touch $it && rm -f $it || error_exit "Failed to write to BASE_DIRECTORY" 336 | fi 337 | 338 | ## 339 | echo ""; echo "== Validate/Create HugginFace directory" 340 | it_dir="${COMFYUSER_DIR}/mnt/HF" 341 | if [ ! -d "${it_dir}" ]; then 342 | echo "";echo "== Creating HF directory" 343 | mkdir -p ${it_dir} 344 | fi 345 | dir_validate "${it_dir}" 346 | it=${it_dir}/.testfile && rm -f $it || error_exit "Failed to write to HF directory as the comfy user" 347 | export HF_HOME=${COMFYUSER_DIR}/mnt/HF 348 | 349 | # Attempting to support multiple build bases 350 | # the venv directory is specific to the build base 351 | # we are placing a marker file in the venv directory to match it to a build base 352 | # if the marker is not for container's build base, we rename the venv directory to avoid conflicts 353 | 354 | ## Current path: ${COMFYUSER_DIR}/mnt 355 | echo ""; echo "== if a venv is present, confirm we can write to it" 356 | it_dir="${COMFYUSER_DIR}/mnt/venv" 357 | if [ -d "${it_dir}" ]; then 358 | dir_validate "${it_dir}" 359 | it=${it_dir}/.testfile && rm -f $it || error_exit "Failed to write to venv directory as the comfy user" 360 | # use the special value to mark existing venv if the marker is not present 361 | it=${it_dir}/.build_base.txt; if [ ! -f $it ]; then echo $BUILD_BASE_SPECIAL > $it; fi 362 | fi 363 | 364 | ## 365 | echo ""; echo "== Matching any existing venv to container's BUILD_BASE (${BUILD_BASE})" 366 | SWITCHED_VENV=True # this is a marker to indicate that we have switched to a different venv, which is set unless we re-use the same venv as before (see below) 367 | # Check for an existing venv; if present, is it the proper one -- ie does its .build_base.txt match the container's BUILD_BASE_FILE? 368 | if [ -d venv ]; then 369 | it=venv/.build_base.txt 370 | venv_bb=`cat $it` 371 | 372 | echo "" 373 | if cmp --silent $it $BUILD_BASE_FILE; then 374 | echo "== venv is for this BUILD_BASE (${BUILD_BASE})" 375 | SWITCHED_VENV=False 376 | else 377 | echo "== venv ($venv_bb) is not for this BUILD_BASE (${BUILD_BASE}), renaming it and seeing if a valid one is present" 378 | mv venv venv-${venv_bb} || error_exit "Failed to rename venv to venv-${venv_bb}" 379 | 380 | if [ -d venv-${BUILD_BASE} ]; then 381 | echo "== Existing venv (${BUILD_BASE}) found, attempting to use it" 382 | mv venv-${BUILD_BASE} venv || error_exit "Failed to rename ven-${BUILD_BASE} to venv" 383 | fi 384 | fi 385 | fi 386 | 387 | ## 388 | echo ""; echo "== Create virtualenv for installation (if not present)" 389 | if [ ! -d "venv" ]; then 390 | echo ""; echo "== Creating virtualenv" 391 | python3 -m venv venv || error_exit "Virtualenv creation failed" 392 | echo $BUILD_BASE > venv/.build_base.txt 393 | fi 394 | 395 | ## 396 | echo ""; echo "== Confirming venv is writeable" 397 | it_dir="${COMFYUSER_DIR}/mnt/venv" 398 | dir_validate "${it_dir}" 399 | it="${it_dir}/.testfile" && rm -f $it || error_exit "Failed to write to venv directory as the comfy user" 400 | 401 | ## 402 | echo ""; echo "== Activate the virtualenv and upgrade pip" 403 | it="${it_dir}/bin/activate" 404 | if [ ! -f "$it" ]; then error_exit "virtualenv not created, please erase any venv directory"; fi 405 | echo ""; echo " == Activating virtualenv" 406 | source "$it" || error_exit "Virtualenv activation failed" 407 | echo ""; echo " == Upgrading pip" 408 | pip3 install --upgrade pip || error_exit "Pip upgrade failed" 409 | 410 | # extent the PATH to include the user local bin directory 411 | export PATH=${COMFYUSER_DIR}/.local/bin:${PATH} 412 | 413 | # Verify the variables 414 | echo ""; echo ""; echo "===================" 415 | echo "== Environment details:" 416 | echo -n " PATH: "; echo $PATH 417 | echo -n " Python version: "; python3 --version 418 | echo -n " Pip version: "; pip3 --version 419 | echo -n " python bin: "; which python3 420 | echo -n " pip bin: "; which pip3 421 | echo -n " git bin: "; which git 422 | 423 | 424 | run_userscript() { 425 | userscript=$1 426 | if [ ! -f $userscript ]; then 427 | echo "!! ${userscript} not found, skipping it" 428 | return 429 | fi 430 | 431 | exec_method=$2 432 | if [ "A$exec_method" == "Askip" ]; then 433 | if [ ! -x $userscript ]; then 434 | echo "!! ${userscript} not executable, skipping it" 435 | return 436 | fi 437 | elif [ "A$exec_method" == "Achmod" ]; then 438 | if [ ! -x $userscript ]; then 439 | echo "== Attempting to make user script executable" 440 | chmod +x $userscript || error_exit "Failed to make user script executable" 441 | fi 442 | else 443 | echo "!! Invalid exec_method: ${exec_method}, skipping it" 444 | return 445 | fi 446 | userscript_name=$(basename $userscript) 447 | userscript_env="/tmp/comfy_${userscript_name}_env.txt" 448 | if [ -f $userscript_env ]; then 449 | rm -f $userscript_env || error_exit "Failed to remove ${userscript_env}" 450 | fi 451 | 452 | echo "++ Running user script: ${userscript}" 453 | $userscript || error_exit "User script ($userscript) failed or exited with an error, stopping further processing" 454 | 455 | if [ -f $userscript_env ]; then 456 | load_env $userscript_env true 457 | fi 458 | echo "-- User script completed: ${userscript}" 459 | echo "" 460 | } 461 | 462 | 463 | # Check for the post-venv script 464 | it=${COMFYUSER_DIR}/mnt/postvenv_script.bash 465 | echo ""; echo "== Checking for post-venv script: ${it}" 466 | run_userscript $it "chmod" 467 | 468 | 469 | # Install ComfyUI's requirements 470 | cd ComfyUI 471 | it=requirements.txt 472 | echo ""; echo "== Installing/Updating from ComfyUI's requirements" 473 | ${PIP3_CMD} -r $it || error_exit "ComfyUI requirements install/upgrade failed" 474 | echo ""; echo "== Installing Huggingface Hub" 475 | ${PIP3_CMD} "huggingface_hub[cli]" || error_exit "HuggingFace Hub CLI install/upgrade failed" 476 | 477 | export COMFYUI_PATH=`pwd` 478 | echo ""; echo "-- COMFYUI_PATH: ${COMFYUI_PATH}" 479 | 480 | # Install ComfyUI Manager if not already present 481 | echo "" 482 | customnodes_dir=${COMFYUI_PATH}/custom_nodes 483 | if [ ! -z "$BASE_DIRECTORY" ]; then it=${BASE_DIRECTORY}/custom_nodes; if [ -d $it ]; then customnodes_dir=$it; fi; fi 484 | cd ${customnodes_dir} 485 | if [ ! -d ComfyUI-Manager ]; then 486 | echo "== Cloning ComfyUI-Manager (within ${customnodes_dir})" 487 | git clone https://github.com/ltdrdata/ComfyUI-Manager.git || error_exit "ComfyUI-Manager clone failed" 488 | fi 489 | if [ ! -d ComfyUI-Manager ]; then error_exit "ComfyUI-Manager not found"; fi 490 | echo "== Installing/Updating ComfyUI-Manager's requirements (from ${customnodes_dir}/ComfyUI-Manager/requirements.txt)" 491 | ${PIP3_CMD} -r ${customnodes_dir}/ComfyUI-Manager/requirements.txt || error_exit "ComfyUI-Manager CLI requirements install/upgrade failed" 492 | 493 | # Please see https://github.com/ltdrdata/ComfyUI-Manager?tab=readme-ov-file#security-policy for details on authorized values 494 | # recent releases of ComfyUI-Manager have a config.ini file in the user folder, if this is not present, we expect it in the default folder 495 | cm_conf_user=${COMFYUI_PATH}/user/default/ComfyUI-Manager/config.ini 496 | cm_conf=${COMFYUI_PATH}/custom_nodes/ComfyUI-Manager/config.ini 497 | if [ ! -z "$BASE_DIRECTORY" ]; then it=${BASE_DIRECTORY}/user/default/ComfyUI-Manager/config.ini ; if [ -f $it ]; then cm_conf_user=$it; fi; fi 498 | if [ -f $cm_conf_user ]; then cm_conf=$cm_conf_user; fi 499 | echo "" 500 | if [ ! -f $cm_conf ]; then 501 | echo "== ComfyUI-Manager $cm_conf file missing, script potentially never run before. You will need to run ComfyUI-Manager a first time for the configuration file to be generated, we can not attempt to update its security level yet -- if this keeps occurring, please let the developer know so he can investigate. Thank you" 502 | else 503 | echo " -- Using ComfyUI-Manager config file: $cm_conf" 504 | perl -p -i -e 's%security_level = \w+%security_level = '${SECURITY_LEVEL}'%g' $cm_conf 505 | echo -n " -- ComfyUI-Manager (should show: ${SECURITY_LEVEL}): " 506 | grep security_level $cm_conf 507 | fi 508 | 509 | # Attempt to use ComfyUI Manager CLI to fix all installed nodes -- This must be done within the activated virtualenv 510 | echo "" 511 | if [ "A${SWITCHED_VENV}" == "AFalse" ]; then 512 | echo "== Skipping ComfyUI-Manager CLI fix as we are re-using the same venv as the last execution" 513 | echo " -- If you are experiencing issues with custom nodes, use 'Manager -> Custom Nodes Manager -> Filter: Import Failed -> Try Fix' from the WebUI" 514 | else 515 | cm_cli=${COMFYUI_PATH}/custom_nodes/ComfyUI-Manager/cm-cli.py 516 | if [ ! -z "$BASE_DIRECTORY" ]; then it=${BASE_DIRECTORY}/custom_nodes/ComfyUI-Manager/cm-cli.py ; if [ -f $it ]; then cm_cli=$it; fi; fi 517 | if [ -f $cm_cli ]; then 518 | echo "== Running ComfyUI-Manager CLI to fix installed custom nodes" 519 | python3 $cm_cli fix all || echo "ComfyUI-Manager CLI failed -- in case of issue with custom nodes: use 'Manager -> Custom Nodes Manager -> Filter: Import Failed -> Try Fix' from the WebUI" 520 | else 521 | echo "== ComfyUI-Manager CLI not found, skipping" 522 | fi 523 | fi 524 | 525 | # If we are using a base directory... 526 | if [ ! -z "$BASE_DIRECTORY" ]; then 527 | if [ ! -d "$BASE_DIRECTORY" ]; then error_exit "BASE_DIRECTORY ($BASE_DIRECTORY) not found or not a directory"; fi 528 | dir_validate "${BASE_DIRECTORY}" "mount" 529 | it=${BASE_DIRECTORY}/.testfile && rm -f $it || error_exit "Failed to write to BASE_DIRECTORY" 530 | 531 | echo ""; echo "== Setting base_directory: $BASE_DIRECTORY" 532 | 533 | # List of content to process obtained from https://github.com/comfyanonymous/ComfyUI/pull/6600/files 534 | 535 | # we want to MOVE content from the expected directories into the new base_directory (if those directories do not exist yet) 536 | # any git pull on the ComfyUI directory will create new folder structure under the source directories but since we have moved existing 537 | # ones to the new base_directory, the new structure will be ignored 538 | echo "++ Logic to move content from ComfyUI directories to the new base_directory" 539 | for i in models input output temp user custom_nodes; do 540 | in=${COMFYUI_PATH}/$i 541 | out=${BASE_DIRECTORY}/$i 542 | if [ -d $in ]; then 543 | if [ ! -d $out ]; then 544 | echo " ++ Moving $in to $out" 545 | mv $in $out || error_exit "Failed to move $in to $out" 546 | else 547 | echo " -- Both $in (in) and $out (out) exist, skipping move." 548 | echo "FYI attempting to list files in 'in' that are not in 'out' (empty means no differences):" 549 | comm -23 <(find $in -type f -printf "%P\n" | sort) <(find $out -type f -printf "%P\n" | sort) 550 | fi 551 | else 552 | if [ ! -d $out ]; then 553 | echo " ++ $in not found, $out does not exist: creating destination directory" 554 | mkdir -p $out || error_exit "Failed to create $out" 555 | else 556 | echo " -- $in not found, $out exists, skipping" 557 | fi 558 | fi 559 | 560 | dir_validate "$out" 561 | it=${out}/.testfile && rm -f $it || error_exit "Failed to write to $out" 562 | done 563 | 564 | # Next check that all expected directories in models are present. Create them otherwise 565 | echo " == Checking models directory" 566 | present_directories="" 567 | if [ -d ${BASE_DIRECTORY}/models ]; then 568 | for i in ${BASE_DIRECTORY}/models/*; do 569 | if [ -d $i ]; then 570 | present_directories+="${i##*/} " 571 | fi 572 | done 573 | fi 574 | 575 | present_directories_unique=$(echo "$present_directories" checkpoints loras vae configs clip_vision style_models diffusers vae_approx gligen upscale_models embeddings hypernetworks photomaker classifiers| tr ' ' '\n' | sort -u | tr '\n' ' ') 576 | 577 | for i in ${present_directories_unique}; do 578 | it=${BASE_DIRECTORY}/models/$i 579 | if [ ! -d $it ]; then 580 | echo " ++ Creating $it" 581 | mkdir -p $it || error_exit "Failed to create $it" 582 | else 583 | echo " -- $it already exists, skipping" 584 | fi 585 | 586 | dir_validate "$it" 587 | it=${it}/.testfile && rm -f $it || error_exit "Failed to write to $it" 588 | done 589 | 590 | # and extend the command line using COMFY_CMDLINE_EXTRA (export to be accessible to child processes such as the user script) 591 | export COMFY_CMDLINE_EXTRA="${COMFY_CMDLINE_EXTRA} --base-directory $BASE_DIRECTORY" 592 | echo "!! COMFY_CMDLINE_EXTRA extended, make sure to use it in user script (if any): ${COMFY_CMDLINE_EXTRA}" 593 | fi 594 | 595 | # Final steps before running ComfyUI 596 | cd ${COMFYUI_PATH} 597 | echo "";echo -n "== Container directory: "; pwd 598 | 599 | # Saving environment variables 600 | it=/tmp/comfy_env.txt 601 | save_env $it 602 | 603 | 604 | # Run independent user scripts if a /userscript_dir is mounted 605 | it_dir=/userscripts_dir 606 | if [ -d $it_dir ]; then 607 | echo "== Running user scripts from directory: ${it_dir}" 608 | torun=$(ls $it_dir/*.sh | sort) 609 | # Order the scripts by name to run them in order 610 | for it in $torun; do 611 | run_userscript $it "skip" 612 | done 613 | fi 614 | 615 | 616 | # Check for the main custom user script (usually with command line override) 617 | it=${COMFYUSER_DIR}/mnt/user_script.bash 618 | echo ""; echo "== Checking for primary user script: ${it}" 619 | run_userscript $it "chmod" 620 | 621 | 622 | # Saving environment variables 623 | it=/tmp/comfy_env_final.txt 624 | save_env $it 625 | 626 | # Run socat if requested 627 | if [ "A${USE_SOCAT}" = "Atrue" ]; then 628 | echo ""; echo "===================" 629 | echo "== Running socat" 630 | socat TCP4-LISTEN:8188,fork TCP4:127.0.0.1:8181 & 631 | fi 632 | 633 | echo ""; echo "===================" 634 | echo "== Running ComfyUI" 635 | # Full list of CLI options at https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/cli_args.py 636 | echo "-- Command line run: ${COMFY_CMDLINE_BASE} ${COMFY_CMDLINE_EXTRA}" 637 | ${COMFY_CMDLINE_BASE} ${COMFY_CMDLINE_EXTRA} || error_exit "ComfyUI failed or exited with an error" 638 | 639 | ok_exit "Clean exit" 640 | -------------------------------------------------------------------------------- /userscripts_dir/00-nvidiaDev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | script_name=$(basename $0) 4 | 5 | # This script will check for installed NVIDIA development tools 6 | # 7 | # At the end of the run, we are saving the environment variables to /tmp/comfy_${script_name}_env.txt 8 | # so they can be used by the init.bash script 9 | 10 | set -e 11 | 12 | error_exit() { 13 | echo -n "!! ERROR: " 14 | echo $* 15 | echo "!! Exiting script (ID: $$)" 16 | exit 1 17 | } 18 | 19 | check_nvcc() { 20 | if ! command -v nvcc &> /dev/null; then 21 | return 1 22 | fi 23 | return 0 24 | } 25 | 26 | save_env() { 27 | tosave=$1 28 | echo "-- Saving environment variables to $tosave" 29 | env | sort > "$tosave" 30 | } 31 | 32 | 33 | echo "Obtaining build base" 34 | cd /comfy/mnt 35 | bb="venv/.build_base.txt" 36 | if [ ! -f $bb ]; then error_exit "${bb} not found"; fi 37 | BUILD_BASE=$(cat $bb) 38 | if [ "A$BUILD_BASE" = "A" ]; then error_exit "BUILD_BASE is empty"; fi 39 | 40 | echo " ++ Build base: ${BUILD_BASE}" 41 | 42 | # Attempt to fix the dpkg lock potential issue 43 | # the dpkg step might fail so always exiting with "true" 44 | if [ -f /var/lib/dpkg/lock ]; then 45 | echo "++ Attempting to fix dpkg lock" 46 | sudo rm /var/lib/dpkg/lock 47 | sudo dpkg --configure -a || true 48 | fi 49 | 50 | echo "Checking if nvcc is available" 51 | if ! check_nvcc; then 52 | error_exit " !! nvcc not found, stopping further execution" 53 | fi 54 | 55 | save_env /tmp/comfy_${script_name}_env.txt 56 | exit 0 57 | -------------------------------------------------------------------------------- /userscripts_dir/10-pip3Dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | error_exit() { 6 | echo -n "!! ERROR: " 7 | echo $* 8 | echo "!! Exiting script (ID: $$)" 9 | exit 1 10 | } 11 | 12 | source /comfy/mnt/venv/bin/activate || error_exit "Failed to activate virtualenv" 13 | 14 | python3 -m ensurepip --upgrade || error_exit "Failed to upgrade pip" 15 | python3 -m pip install --upgrade setuptools || error_exit "Failed to upgrade setuptools" 16 | 17 | pip3 install ninja cmake wheel pybind11 packaging || error_exit "Failed to install build dependencies" 18 | 19 | exit 0 20 | -------------------------------------------------------------------------------- /userscripts_dir/20-SageAttention.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | min_sageattention_version="2.1" 4 | 5 | set -e 6 | 7 | error_exit() { 8 | echo -n "!! ERROR: " 9 | echo $* 10 | echo "!! Exiting script (ID: $$)" 11 | exit 1 12 | } 13 | 14 | source /comfy/mnt/venv/bin/activate || error_exit "Failed to activate virtualenv" 15 | 16 | ## requires: 00-nvidiaDev,sh 17 | echo "Checking if nvcc is available" 18 | if ! command -v nvcc &> /dev/null; then 19 | error_exit " !! nvcc not found, canceling run" 20 | fi 21 | 22 | ## requires: 10-pip3Dev.sh 23 | if pip3 show setuptools &>/dev/null; then 24 | echo " ++ setuptools installed" 25 | else 26 | error_exit " !! setuptools not installed, canceling run" 27 | fi 28 | if pip3 show ninja &>/dev/null; then 29 | echo " ++ ninja installed" 30 | else 31 | error_exit " !! ninja not installed, canceling run" 32 | fi 33 | 34 | # Adapted from https://github.com/eddiehavila/ComfyUI-Nvidia-Docker/blob/main/user_script.bash 35 | compile_flag=true 36 | if pip3 show sageattention &>/dev/null; then 37 | # Extract the installed version of sageattention 38 | sageattention_version=$(pip3 show sageattention | grep '^Version:' | awk '{print $2}') 39 | echo "SageAttention is installed with version $sageattention_version" 40 | 41 | # Use version sort to check if sageattention_version is below the minimal version 42 | # This command prints the lowest version of the two. 43 | # If the lowest isn't the minimal version, then sageattention_version is below the minimal version. 44 | if [ "$(printf '%s\n' "$sageattention_version" "$min_sageattention_version" | sort -V | head -n1)" != "$min_sageattention_version" ]; then 45 | echo "SageAttention version $sageattention_version is below minimum version $min_sageattention_version, need to compile" 46 | else 47 | compile_flag=false 48 | fi 49 | fi 50 | 51 | if [ "A$compile_flag" = "Afalse" ]; then 52 | echo "SageAttention is already up to date (version $sageattention_version), skipping compilation" 53 | exit 0 54 | fi 55 | 56 | echo "Compiling SageAttention" 57 | 58 | cd /comfy/mnt 59 | bb="venv/.build_base.txt" 60 | if [ ! -f $bb ]; then error_exit "${bb} not found"; fi 61 | BUILD_BASE=$(cat $bb) 62 | 63 | 64 | if [ ! -d src ]; then mkdir src; fi 65 | cd src 66 | 67 | mkdir -p ${BUILD_BASE} 68 | if [ ! -d ${BUILD_BASE} ]; then error_exit "${BUILD_BASE} not found"; fi 69 | cd ${BUILD_BASE} 70 | 71 | dd="/comfy/mnt/src/${BUILD_BASE}/SageAttention" 72 | if [ -d $dd ]; then 73 | echo "SageAttention source already present, deleting $dd to force reinstallation" 74 | rm -rf $dd 75 | fi 76 | git clone https://github.com/thu-ml/SageAttention.git 77 | cd SageAttention 78 | python3 -s -m pip install . --no-build-isolation || error_exit "Failed to install SageAttention" 79 | 80 | exit 0 81 | -------------------------------------------------------------------------------- /userscripts_dir/21-Triton.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | min_triton_version="3.3" 4 | 5 | set -e 6 | 7 | error_exit() { 8 | echo -n "!! ERROR: " 9 | echo $* 10 | echo "!! Exiting script (ID: $$)" 11 | exit 1 12 | } 13 | 14 | source /comfy/mnt/venv/bin/activate || error_exit "Failed to activate virtualenv" 15 | 16 | ## requires: 00-nvidiaDev,sh 17 | echo "Checking if nvcc is available" 18 | if ! command -v nvcc &> /dev/null; then 19 | error_exit " !! nvcc not found, canceling run" 20 | fi 21 | 22 | ## requires: 10-pip3Dev.sh 23 | if pip3 show setuptools &>/dev/null; then 24 | echo " ++ setuptools installed" 25 | else 26 | error_exit " !! setuptools not installed, canceling run" 27 | fi 28 | if pip3 show ninja &>/dev/null; then 29 | echo " ++ ninja installed" 30 | else 31 | error_exit " !! ninja not installed, canceling run" 32 | fi 33 | 34 | # Adapted from https://github.com/eddiehavila/ComfyUI-Nvidia-Docker/blob/main/user_script.bash 35 | compile_flag=true 36 | if pip3 show triton &>/dev/null; then 37 | # Extract the installed version of triton 38 | triton_version=$(pip3 show triton | grep '^Version:' | awk '{print $2}') 39 | echo "Triton is installed with version $triton_version" 40 | 41 | # Use version sort to check if triton_version is below the minimal version 42 | # This command prints the lowest version of the two. 43 | # If the lowest isn't the minimal version, then triton_version is below the minimal version. 44 | if [ "$(printf '%s\n' "$triton_version" "$min_triton_version" | sort -V | head -n1)" != "$min_triton_version" ]; then 45 | echo "Triton version $triton_version is below minimum version $min_triton_version, need to compile" 46 | else 47 | compile_flag=false 48 | fi 49 | fi 50 | 51 | if [ "A$compile_flag" = "Afalse" ]; then 52 | echo "Triton is already up to date (version $triton_version), skipping compilation" 53 | exit 0 54 | fi 55 | 56 | echo "Compiling Triton" 57 | 58 | cd /comfy/mnt 59 | bb="venv/.build_base.txt" 60 | if [ ! -f $bb ]; then error_exit "${bb} not found"; fi 61 | BUILD_BASE=$(cat $bb) 62 | 63 | 64 | if [ ! -d src ]; then mkdir src; fi 65 | cd src 66 | 67 | mkdir -p ${BUILD_BASE} 68 | if [ ! -d ${BUILD_BASE} ]; then error_exit "${BUILD_BASE} not found"; fi 69 | cd ${BUILD_BASE} 70 | 71 | dd="/comfy/mnt/src/${BUILD_BASE}/triton" 72 | 73 | if [ -d $dd ]; then 74 | echo "Triton source already present, deleting $dd to force reinstallation" 75 | rm -rf $dd 76 | fi 77 | git clone https://github.com/triton-lang/triton.git 78 | cd triton 79 | pip3 install -e python || error_exit "Failed to install Triton" 80 | 81 | exit 0 82 | -------------------------------------------------------------------------------- /userscripts_dir/25-HiDream.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | error_exit() { 6 | echo -n "!! ERROR: " 7 | echo $* 8 | echo "!! Exiting script (ID: $$)" 9 | exit 1 10 | } 11 | 12 | source /comfy/mnt/venv/bin/activate || error_exit "Failed to activate virtualenv" 13 | 14 | ## requires: 00-nvidiaDev,sh 15 | echo "Checking if nvcc is available" 16 | if ! command -v nvcc &> /dev/null; then 17 | error_exit " !! nvcc not found, canceling run" 18 | fi 19 | 20 | ## requires: 10-pip3Dev.sh 21 | if pip3 show setuptools &>/dev/null; then 22 | echo " ++ setuptools installed" 23 | else 24 | error_exit " !! setuptools not installed, canceling run" 25 | fi 26 | if pip3 show ninja &>/dev/null; then 27 | echo " ++ ninja installed" 28 | else 29 | error_exit " !! ninja not installed, canceling run" 30 | fi 31 | 32 | ## requires: 20-sageattention.sh 33 | if pip3 show sageattention &>/dev/null; then 34 | echo " ++ sageattention installed" 35 | else 36 | error_exit " !! sageattention not installed, canceling run" 37 | fi 38 | 39 | ## requires: 21-triton.sh 40 | if pip3 show triton &>/dev/null; then 41 | echo " ++ triton installed" 42 | else 43 | error_exit " !! triton not installed, canceling run" 44 | fi 45 | 46 | # HiDream is a custom node, no need to compile, we just needed to confirm dependencies are met 47 | 48 | exit 0 49 | -------------------------------------------------------------------------------- /userscripts_dir/README.md: -------------------------------------------------------------------------------- 1 | See the `/userscripts_dir` section of the [README.md](../README.md) for details. 2 | 3 | This directory is used to run independent user scripts in order to perform additional operations that might damage your installation. 4 | **Use with caution**. No support will be provided for issues resulting from the use of this directory. In case of trouble, it is recommended to delete the `run` folder and start a new container. 5 | -------------------------------------------------------------------------------- /workflow/ComfyUI-Flux1Dev-ExtendedWorkflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mmartial/ComfyUI-Nvidia-Docker/bb8c46eb449f1adc409729a4fa3f4877512f6b1b/workflow/ComfyUI-Flux1Dev-ExtendedWorkflow.png -------------------------------------------------------------------------------- /workflow/ComfyUI_flux1dev-lora.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mmartial/ComfyUI-Nvidia-Docker/bb8c46eb449f1adc409729a4fa3f4877512f6b1b/workflow/ComfyUI_flux1dev-lora.png -------------------------------------------------------------------------------- /workflow/ComfyUI_flux1schnell-lora.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mmartial/ComfyUI-Nvidia-Docker/bb8c46eb449f1adc409729a4fa3f4877512f6b1b/workflow/ComfyUI_flux1schnell-lora.png -------------------------------------------------------------------------------- /workflow/HappyBirthdayComfy-AtomixFlux.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mmartial/ComfyUI-Nvidia-Docker/bb8c46eb449f1adc409729a4fa3f4877512f6b1b/workflow/HappyBirthdayComfy-AtomixFlux.png --------------------------------------------------------------------------------