├── cmd.bat ├── docker ├── prune.sh ├── run.sh ├── ssh-into-sd.sh ├── stop.sh ├── build.sh ├── prep │ ├── launch_prep.py │ ├── web_prep.sh │ ├── web_prep.py │ └── launch_prep.sh ├── trunc-sd-containers.sh └── Dockerfile ├── .gitignore ├── uninstall.bat ├── start.bat ├── ssh-into-sd.bat ├── truncate.bat ├── stop.bat ├── install.bat ├── install ├── config.json ├── provision.sh ├── webui-user.sh └── install.ps1 ├── docker-compose.yml ├── LICENSE └── README.md /cmd.bat: -------------------------------------------------------------------------------- 1 | wsl -d ubuntu-stable-diffusion -------------------------------------------------------------------------------- /docker/prune.sh: -------------------------------------------------------------------------------- 1 | docker system prune -f -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.vhdx 2 | .idea 3 | docker-compose.override.yml -------------------------------------------------------------------------------- /docker/run.sh: -------------------------------------------------------------------------------- 1 | echo "Starting container" 2 | docker start sd -------------------------------------------------------------------------------- /uninstall.bat: -------------------------------------------------------------------------------- 1 | wsl --unregister ubuntu-stable-diffusion 2 | -------------------------------------------------------------------------------- /docker/ssh-into-sd.sh: -------------------------------------------------------------------------------- 1 | docker exec -it `docker ps -a | grep sd | head -1 | awk '{print $1}'` /bin/bash -------------------------------------------------------------------------------- /start.bat: -------------------------------------------------------------------------------- 1 | wsl -d ubuntu-stable-diffusion -e sh -c "cd `wslpath -a '%~dp0'`/docker && ./run.sh" 2 | -------------------------------------------------------------------------------- /ssh-into-sd.bat: -------------------------------------------------------------------------------- 1 | wsl -d ubuntu-stable-diffusion -e sh -c "cd `wslpath -a '%~dp0'`/docker && ./ssh-into-sd.sh" 2 | -------------------------------------------------------------------------------- /docker/stop.sh: -------------------------------------------------------------------------------- 1 | docker container stop `docker ps -a | grep sd | awk '{print $1}' | awk 'BEGIN { ORS = " " } { print }'` -------------------------------------------------------------------------------- /truncate.bat: -------------------------------------------------------------------------------- 1 | wsl -d ubuntu-stable-diffusion -e sh -c "cd `wslpath -a '%~dp0'`/docker && ./trunc-sd-containers.sh" 2 | -------------------------------------------------------------------------------- /stop.bat: -------------------------------------------------------------------------------- 1 | wsl -d ubuntu-stable-diffusion -e sh -c "cd `wslpath -a '%~dp0'`/docker && ./stop.sh" 2 | wsl -t ubuntu-stable-diffusion 3 | -------------------------------------------------------------------------------- /install.bat: -------------------------------------------------------------------------------- 1 | powershell.exe "Start-Process powershell -Verb RunAs -ArgumentList '-NoExit -ExecutionPolicy Bypass -file %~dp0\install\install.ps1'" 2 | -------------------------------------------------------------------------------- /docker/build.sh: -------------------------------------------------------------------------------- 1 | echo "Building docker image" 2 | docker buildx build -t sd:latest . 3 | echo "Creating docker container" 4 | docker create --name sd sd:latest 5 | -------------------------------------------------------------------------------- /install/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "sd_vae": "sdxl_vae.safetensors", 3 | "quicksettings_list": [ 4 | "sd_model_checkpoint", 5 | "sd_vae" 6 | ] 7 | } -------------------------------------------------------------------------------- /docker/prep/launch_prep.py: -------------------------------------------------------------------------------- 1 | from launch import prepare_environment, args 2 | 3 | args.skip_torch_cuda_test=True 4 | args.xformers=True 5 | 6 | prepare_environment() -------------------------------------------------------------------------------- /docker/prep/web_prep.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source $SD_PROJECT/venv/bin/activate 3 | 4 | mv web_prep.py $SD_PROJECT/web_prep.py 5 | 6 | cd $SD_PROJECT 7 | python3.11 web_prep.py && rm web_prep.py -------------------------------------------------------------------------------- /docker/trunc-sd-containers.sh: -------------------------------------------------------------------------------- 1 | docker container stop `docker ps -a | grep sd | awk '{print $1}' | awk 'BEGIN { ORS = " " } { print }'` 2 | docker container rm `docker ps -a | grep sd | awk '{print $1}' | awk 'BEGIN { ORS = " " } { print }'` -------------------------------------------------------------------------------- /docker/prep/web_prep.py: -------------------------------------------------------------------------------- 1 | from webui import initialize 2 | from modules import shared 3 | from modules.launch_utils import args 4 | 5 | args.skip_torch_cuda_test=True 6 | shared.cmd_opts.no_download_sd_model=True 7 | 8 | initialize.imports() 9 | initialize.initialize() -------------------------------------------------------------------------------- /docker/prep/launch_prep.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source $SD_PROJECT/venv/bin/activate 3 | 4 | python3.11 -m pip install wheel triton torch==2.1.0 torchvision==0.16 xformers==0.0.22.post7 5 | mv launch_prep.py $SD_PROJECT/launch_prep.py 6 | 7 | cd $SD_PROJECT 8 | python3.11 -m pip install -r requirements.txt 9 | python3.11 launch_prep.py && rm launch_prep.py -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | sd: 3 | # build: docker # Local source 4 | build: https://github.com/rgryta/Stable-Diffusion-WSL2-Docker.git#main:docker 5 | container_name: sd 6 | ports: 7 | - "7860:7860" 8 | deploy: 9 | resources: 10 | reservations: 11 | devices: 12 | - driver: nvidia 13 | count: all 14 | capabilities: [gpu] -------------------------------------------------------------------------------- /install/provision.sh: -------------------------------------------------------------------------------- 1 | cp -rf webui-user.sh /home/sd/stable-diffusion-webui 2 | cp -rf config.json /home/sd/stable-diffusion-webui 3 | 4 | cd /home/sd/stable-diffusion-webui/models/Stable-diffusion 5 | curl -OL https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors 6 | curl -OL https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors 7 | 8 | cd /home/sd/stable-diffusion-webui/models/VAE 9 | curl -OL https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/resolve/main/sdxl_vae.safetensors -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Radosław Gryta 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /install/webui-user.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ######################################################### 3 | # Uncomment and change the variables below to your need:# 4 | ######################################################### 5 | 6 | # Install directory without trailing slash 7 | #install_dir="/home/$(whoami)" 8 | 9 | # Name of the subdirectory 10 | #clone_dir="stable-diffusion-webui" 11 | 12 | # Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention" 13 | export COMMANDLINE_ARGS="--medvram-sdxl --xformers" 14 | 15 | # python3 executable 16 | #python_cmd="python3" 17 | 18 | # git executable 19 | #export GIT="git" 20 | 21 | # python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv) 22 | #venv_dir="venv" 23 | 24 | # script to launch to start the app 25 | #export LAUNCH_SCRIPT="launch.py" 26 | 27 | # install command for torch 28 | #export TORCH_COMMAND="pip install torch==1.12.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113" 29 | 30 | # Requirements file to use for stable-diffusion-webui 31 | #export REQS_FILE="requirements_versions.txt" 32 | 33 | # Fixed git repos 34 | #export K_DIFFUSION_PACKAGE="" 35 | #export GFPGAN_PACKAGE="" 36 | 37 | # Fixed git commits 38 | #export STABLE_DIFFUSION_COMMIT_HASH="" 39 | #export CODEFORMER_COMMIT_HASH="" 40 | #export BLIP_COMMIT_HASH="" 41 | 42 | # Uncomment to enable accelerated launch 43 | #export ACCELERATE="True" 44 | 45 | # Uncomment to disable TCMalloc 46 | #export NO_TCMALLOC="True" 47 | 48 | ########################################### 49 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:12.1.1-devel-ubuntu22.04 as sd_webui 2 | 3 | ENV SD_HOME=/home/sd 4 | ENV SD_PROJECT=$SD_HOME/stable-diffusion-webui 5 | 6 | # Install necessary packages 7 | RUN apt-get update && apt-get install software-properties-common -y 8 | RUN add-apt-repository ppa:deadsnakes/ppa -y 9 | RUN ln -snf /usr/share/zoneinfo/$CONTAINER_TIMEZONE /etc/localtime && echo $CONTAINER_TIMEZONE > /etc/timezone 10 | RUN apt-get install --no-install-recommends --no-install-suggests -y \ 11 | sudo git g++ python3.11 python3.11-distutils python3.11-venv python3.11-dev ffmpeg libsm6 libxext6 curl wget vim bc 12 | RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.11 13 | 14 | # Create Stable Diffusion user 15 | RUN useradd -m -G sudo -p $(openssl passwd -1 St@bl3D1ff) sd 16 | USER sd 17 | 18 | # Python setup - PYTHONUNBUFFERED set to 1 makes logs pipe into the container output 19 | RUN pip install --upgrade pip 20 | # Torch support list based on https://github.com/pytorch/builder/blob/main/conda/pytorch-nightly/build.sh 21 | # and https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ 22 | ENV PYTHONUNBUFFERED=1 \ 23 | PATH="$PATH:$SD_HOME/.local/bin" \ 24 | TORCH_CUDA_ARCH_LIST=All \ 25 | FORCE_CUDA=1 26 | 27 | # Clone AUTOMATIC1111 repo 28 | RUN cd $SD_HOME && git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git 29 | 30 | # Set up virtual python environment 31 | USER root 32 | RUN cd $SD_PROJECT && python3.11 -m venv venv/ && chown -R sd venv 33 | RUN sudo apt-get install google-perftools -y 34 | USER sd 35 | 36 | COPY --chown=sd --chmod=755 prep $SD_HOME/prep 37 | RUN cd $SD_HOME/prep && ./launch_prep.sh 38 | 39 | # Download cache 40 | RUN cd $SD_HOME/prep && ./web_prep.sh 41 | 42 | # Clean prep 43 | RUN rm -r $SD_HOME/prep 44 | 45 | WORKDIR $SD_PROJECT 46 | 47 | EXPOSE 7860/tcp 48 | CMD ["/bin/bash", "-c", "./webui.sh --listen --port 7860 --enable-insecure-extension-access"] 49 | -------------------------------------------------------------------------------- /install/install.ps1: -------------------------------------------------------------------------------- 1 | # Install dependencies for installing Linux 2 | if (-not (Get-Module -ListAvailable -Name WSLTools)) { 3 | Install-Module -Name WSLTools -Force 4 | } 5 | 6 | Import-Module WSLTools -WarningAction SilentlyContinue 7 | if (-not (Ensure-WSL)) { 8 | $question = "Yes","No" 9 | $selected = Get-Select -Prompt "[OPER] Would you like to install HyperV and WSL now?" -Options $question 10 | if ($selected -eq "Yes") { 11 | iex ((New-Object System.Net.WebClient).DownloadString('https://raw.githubusercontent.com/rgryta/PowerShell-WSLTools/main/install-wsl.ps1')) 12 | Write-Host "Reboot your system now and then restart the script" 13 | } 14 | if ($selected -eq "No") { 15 | Write-Host "Please set up HyperV and WSL manually and then relaunch the script" 16 | } 17 | return $false 18 | } 19 | 20 | # Install Ubuntu on WSL2 21 | $scriptPath = Split-Path -Parent $MyInvocation.MyCommand.Definition 22 | 23 | $distro = 'ubuntu-stable-diffusion' 24 | $ignr = wsl --unregister $distro 25 | 26 | WSL-Ubuntu-Install -DistroAlias $distro -InstallPath $scriptPath -Version jammy # lunar has no docker, and kinetic has no nvidia-container-toolkit 27 | $ignr = wsl -d $distro -u root -e sh -c "apt-get install -y apt-utils sudo curl systemd" 28 | 29 | # Creating new user 30 | $ignr = wsl -d $distro -u root -e sh -c "useradd -m -G sudo sd" 31 | $ignr = wsl -d $distro -u root -e sh -c "echo sd:U6aMy0wojraho | sudo chpasswd -e" # No password 32 | 33 | # Enabling Systemd (with fix: https://github.com/microsoft/WSL/issues/9602#issuecomment-1421897547) 34 | $ignr = wsl -d $distro -u root -e sh -c 'echo "[boot]\\nsystemd=true" > /etc/wsl.conf' 35 | $ignr = wsl -d $distro -u root -e sh -c 'echo "[user]\\ndefault=sd" >> /etc/wsl.conf' 36 | $ignr = wsl -d $distro -u root -e sh -c 'ln -s /usr/lib/systemd/systemd /sbin/init' 37 | 38 | Write-Host "Waiting for Ubuntu setup to finish..." 39 | while ($true) 40 | { 41 | $username = wsl -d $distro -e sh -c "grep -v '/usr/sbin/nologin' /etc/passwd | grep 'sd:' | awk -F: '{print `$1}'" 42 | if ($username -ne $null) { 43 | Write-Host "Created default WSL user: $username" 44 | break 45 | } 46 | } 47 | 48 | # Waiting for processes to finish and restarting 49 | while ($true) 50 | { 51 | $setupstatus = wsl -d $distro -u root -e sh -c "ps -Ao comm --no-headers | grep 'adduser\|passwd' | wc -l" 52 | if ($setupstatus -eq '0') { 53 | break 54 | } 55 | } 56 | $ignr = wsl -t $distro 57 | 58 | # Setting up CUDA drivers 59 | $ignr = wsl -d $distro -u root -e sh -c "apt-key del 7fa2af80 > /dev/null 2>&1 `&`& wget https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin > /dev/null 2>&1 `&`& mv cuda-wsl-ubuntu.pin /etc/apt/preferences.d/cuda-repository-pin-600 `&`& apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/3bf863cc.pub > /dev/null 2>&1 `&`& add-apt-repository -y 'deb https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/ /' > /dev/null 2>&1 `&`& apt-get update > /dev/null 2>&1 `&`& apt-get -y install cuda > /dev/null 2>&1" 60 | 61 | 62 | Write-Host 'If you can see your GPU listed below, everything went smoothly so far:' 63 | Start-Sleep -Seconds 5 64 | wsl -d $distro -e sh -c 'nvidia-smi' 65 | 66 | # Installing Docker 67 | $ignr = wsl -d $distro -u root -e sh -c "curl https://get.docker.com | sh > /dev/null 2>&1 `&`& systemctl --now enable docker > /dev/null 2>&1 " 68 | 69 | $ignr = wsl -d $distro -u root -e sh -c "distribution=`$(. /etc/os-release;echo `$ID`$VERSION_ID) `&`& curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg > /dev/null 2>&1 `&`& curl -s -L https://nvidia.github.io/libnvidia-container/`$distribution/libnvidia-container.list | sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | tee /etc/apt/sources.list.d/nvidia-container-toolkit.list" 70 | 71 | # Installing Nvidia Docker 72 | Write-Host 'Installing nvidia-docker' 73 | 74 | $ignr = wsl -d $distro -u root -e sh -c "apt-get update > /dev/null 2>&1 `&`& apt-get install -y nvidia-docker2 > /dev/null 2>&1" 75 | $ignr = wsl -d $distro -u root -e sh -c 'systemctl restart docker' 76 | $ignr = wsl -d $distro -u root -e sh -c "apt-get upgrade -y > /dev/null 2>&1 `&`& apt-get autoremove -y `&`& apt-get autoclean -y" 77 | 78 | # Testing Nvidia Docker 79 | Write-Host 'Pulling base NVIDIA CUDA container' 80 | $ignr = wsl -d $distro -u root -e sh -c "usermod -aG docker $username" 81 | $ignr = wsl -d $distro -e sh -c "docker pull nvidia/cuda:12.1.1-base-ubuntu22.04" 82 | 83 | Write-Host 'Verify if you are able to see your GPU below - this time within docker container:' 84 | wsl -d $distro -e sh -c "docker run --rm --gpus all nvidia/cuda:12.1.1-base-ubuntu22.04 nvidia-smi" 85 | 86 | Write-Host 'Now local container will be built. This can take about an hour depending on your CPU and internet speed.' 87 | wsl -d $distro -e sh -c "cd ``wslpath -a '$scriptPath'``/../docker && ./build.sh" 88 | 89 | wsl -d $distro -e sh -c "mkdir -p /home/sd/stable-diffusion-webui" 90 | wsl -d $distro -e sh -c "docker volume create --driver local --opt type=none --opt device=/home/sd/stable-diffusion-webui --opt o=bind sd_vol" 91 | wsl -d $distro -e sh -c "docker create -v sd_vol:/home/sd/stable-diffusion-webui -p 127.0.0.1:7860:7860 --name sd --gpus all sd" 92 | 93 | wsl -d $distro -e sh -c "cd ``wslpath -a '$scriptPath'`` && ./provision.sh" 94 | 95 | Write-Host 'You can now use `start.bat` to launch Stable Diffusion. Closing this window in 5 seconds...' 96 | Start-Sleep -Seconds 5 -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Stable Diffusion on Docker (WSL2) 2 | 3 | ## About 4 | 5 | This repository is meant to allow for easy installation of Stable Diffusion on Windows. One click to install. Second click to start. 6 | 7 | This setup is completely dependant on current versions of AUTOMATIC1111's webui repository and StabilityAI's Stable-Diffusion models. 8 | 9 | In it's current configuration only Nvidia GPUs are supported. This script will also install all dependencies (including xformers) in order to speed up launching the webui. 10 | 11 | ## Prerequisites 12 | 13 | Before following through with these instructions. Please verify below. 14 | 15 | 1. You have virtualization support - easiest way is to check if you can see "Virtualization" section in Windows Task Manager -> Performance -> CPU (it's located under "More details" if you don't see the Performance tab). 16 | 1. You have virtualization enabled - you have to enable it in your BIOS if you don't. 17 | 1. You have Windows 11 Pro - you can also use Windows 11 Home (also Windows 10 above certain version), but I cannot guarantee that provided scripts will work their magic. 18 | 1. You have Nvidia GPU - this is mandatory for current configuration. Support for AMD is presumably possible, but won't be added until such request shows up. Make sure you also have the newest drivers! Whole repository is based on CUDA 12 - you will be limited to GTX 900-series or higher. 19 | 1. You need admin access. These scripts use a PowerShell library that I've prepared, called [WSLTools](https://github.com/rgryta/PowerShell-WSLTools) (handles automatic and interactive installation of WSL distributions from source), you need to have admin privileges to install this module. 20 | 21 | ## How to use 22 | 23 | After installation simply execute start.bat file to start the Stable-Diffusion app. You can open it under [http://localhost:7860](http://localhost:7860). 24 | 25 | If you want to close the app - simply launch stop.bat, it will terminate the application and close the terminals. 26 | 27 | Note! Keep in mind that stop.bat will terminate and remove all containers based on Stable-Diffusion webui image. If you have downloaded additional models while the application was running - e.g. GAN models - they will have to be redownloaded again. 28 | 29 | ## Installation 30 | 31 | ### Automatic 32 | 33 | Run install.bat in order to install the Stable Diffusion. This will take a while - as long as you don't see red errors - everything's fine. I takes about 20min to install on my machine. 34 | 35 | ### Manual 36 | 37 | #### WSL 38 | 39 | 1. Install Windows 11 40 | 1. Install WSL from MS Store (https://www.microsoft.com/store/productId/9P9TQF7MRM4R) 41 | 1. Search for "Turn Windows features on or off" and enable "Hyper-V" 42 | 1. Set WSL to use v2: `wsl --set-default-version 2` 43 | 1. Install Linux distro of your choice (Ubuntu given as example): `wsl --install Ubuntu` 44 | 1. Set up your username and password 45 | 1. (In distro command line) `sudo sh -c 'echo "[boot]\nsystemd=true" > /etc/wsl.conf'` 46 | 1. Check your distro name using `wsl --list` 47 | 1. Shutdown all distros `wsl --shutdown` and restart the one we're using `wsl --distribution Ubuntu` 48 | 1. Make sure you have nvidia drivers installed on Windows 49 | 1. Now open WSL. From now on, everything is executed from there. 50 | 51 | #### Docker 52 | 1. Execute following scripts (installs cuda drivers): 53 | ```bash 54 | sudo apt-key del 7fa2af80 55 | wget https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin 56 | sudo mv cuda-wsl-ubuntu.pin /etc/apt/preferences.d/cuda-repository-pin-600 57 | sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/3bf863cc.pub 58 | sudo add-apt-repository 'deb https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/ /' 59 | sudo apt-get update 60 | sudo apt-get -y install cuda 61 | ``` 62 | 1. Check if you're able to see your GPU in WSL: nvidia-smi 63 | 1. Install docker: 64 | ```bash 65 | curl https://get.docker.com | sh \ 66 | && sudo systemctl --now enable docker 67 | ``` 68 | 1. Prepare gpg keys to install nvidia-docker as per https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#installing-with-apt 69 | ``` 70 | curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ 71 | && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ 72 | sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ 73 | sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list 74 | ``` 75 | 1. Now we can install it: `sudo apt-get install -y nvidia-docker2` 76 | 1. Restart docker service: `sudo systemctl restart docker` 77 | 1. Check if docker container also sees your GPU: `sudo docker run --rm --gpus all nvidia/cuda:12.0.1-base-ubuntu22.04 nvidia-smi` 78 | 79 | #### New Docker Container 80 | 1. `docker compose up --force-recreate -d` 81 | 1. If everything started great and no errors you can run the instance in the background `docker compose up -d` 82 | 83 | #### Old Docker Container Method (doesn't work) 84 | 1. Run `sh docker/build.sh` from repo directory to build the container. You can uncomment depth, upscaler, inpainting and gfpgan from Dockerfile (first generated image) but it will take much more space - default installation is ~25GB total. 85 | 1. Run `sh docker/run.sh` to start container. Open http://localhost:7860 to access the webui - you can do so from Windows of course. 86 | 87 | ## Development 88 | If you wish to make changes to this repository and test your changes, the following steps will help. 89 | 90 | 1. Fork this repository 91 | 1. Create a branch called "dev" 92 | 1. Created a file called "docker-compose.override.yml" with the following contents in the repository root. 93 | ``` 94 | services: 95 | sd: 96 | container_name: sd-dev 97 | build: https://github.com/rgryta/Stable-Diffusion-WSL2-Docker.git#dev:docker # Set to your github repostiory fork 98 | ``` 99 | 1. Run `docker compose up --force-recreate -d` 100 | 101 | ## Sources 102 | 103 | 1. [StabilityAI Stable-Diffusion GitHub](https://github.com/Stability-AI/stablediffusion) 104 | 1. [StabilityAI Stable-Diffusion HuggingFace](https://huggingface.co/stabilityai/stable-diffusion-2-1) 105 | 1. [AUTOMATIC1111 webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) 106 | 1. [Nvidia Container Runtime](https://nvidia.github.io/nvidia-container-runtime/) 107 | 1. [Ubuntu GPU acceleration on WSL2](https://ubuntu.com/tutorials/enabling-gpu-acceleration-on-ubuntu-on-wsl2-with-the-nvidia-cuda-platform#3-install-nvidia-cuda-on-ubuntu) 108 | 1. [MS WSL systemd](https://devblogs.microsoft.com/commandline/systemd-support-is-now-available-in-wsl/) 109 | 1. [Nvidia WSL](https://docs.nvidia.com/cuda/wsl-user-guide/index.html) 110 | --------------------------------------------------------------------------------