├── .gitattributes
├── .github
└── workflows
│ ├── build-docker-image.yml
│ └── build-push-image.yml
├── Dockerfile
├── README.md
├── docs
├── .nojekyll
├── README.md
├── _navbar.md
├── _sidebar.md
├── assets
│ ├── arcai.svg
│ ├── github-connection-443.jpg
│ ├── killed.jpg
│ ├── port-not-available.png
│ └── urlerror.png
├── faq.md
├── getting-started.md
├── index.html
├── release-notes.md
└── zh-cn
│ ├── README.md
│ ├── _sidebar.md
│ ├── faq.md
│ ├── getting-started.md
│ └── release-notes.md
├── scripts
├── BOM
│ ├── neg.glob
│ └── pos.glob
├── check_bom.py
├── install.bat
├── package.py
└── 使用说明.txt
└── startup.sh
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.bat text eol=crlf
2 |
--------------------------------------------------------------------------------
/.github/workflows/build-docker-image.yml:
--------------------------------------------------------------------------------
1 | name: Build Docker Image
2 |
3 | on:
4 | push:
5 | branches: [ "main" ]
6 | pull_request:
7 | branches: [ "main" ]
8 |
9 | jobs:
10 |
11 | build:
12 |
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - uses: actions/checkout@v3
17 | - name: Build the Docker image
18 | run: docker build -t ipex-arc-sd:$(date +%s) -f Dockerfile .
19 |
--------------------------------------------------------------------------------
/.github/workflows/build-push-image.yml:
--------------------------------------------------------------------------------
1 | name: Publish image
2 |
3 | on:
4 | release:
5 | types: [published]
6 |
7 | jobs:
8 | push_to_registry:
9 | name: Push Docker image to Docker Hub
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Check out the repo
13 | uses: actions/checkout@v3
14 |
15 | - name: Log in to Docker Hub
16 | uses: docker/login-action@v2
17 | with:
18 | username: ${{ secrets.DOCKER_USERNAME }}
19 | password: ${{ secrets.DOCKER_PASSWORD }}
20 |
21 | - name: Build and push
22 | uses: docker/build-push-action@v4
23 | with:
24 | context: .
25 | file: ./Dockerfile
26 | push: true
27 | tags: |
28 | ${{ secrets.DOCKER_USERNAME }}/ipex-arc-sd:${{ github.event.release.tag_name }}
29 | ${{ secrets.DOCKER_USERNAME }}/ipex-arc-sd:latest
30 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # Licensed under the Apache License, Version 2.0 (the "License");
3 | # you may not use this file except in compliance with the License.
4 | # You may obtain a copy of the License at
5 | #
6 | # http://www.apache.org/licenses/LICENSE-2.0
7 | #
8 | # Unless required by applicable law or agreed to in writing, software
9 | # distributed under the License is distributed on an "AS IS" BASIS,
10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | # See the License for the specific language governing permissions and
12 | # limitations under the License.
13 | # ============================================================================
14 |
15 | ARG UBUNTU_VERSION=22.04
16 | FROM ubuntu:${UBUNTU_VERSION} AS oneapi-lib-installer
17 |
18 | RUN apt-get update && \
19 | apt-get install -y --no-install-recommends --fix-missing \
20 | ca-certificates \
21 | gnupg2 \
22 | gpg-agent \
23 | unzip \
24 | wget
25 |
26 | # oneAPI packages
27 | RUN no_proxy=$no_proxy wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \
28 | | gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null && \
29 | echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" \
30 | | tee /etc/apt/sources.list.d/oneAPI.list
31 |
32 | ARG DPCPP_VER=2023.2.1-16
33 | ARG MKL_VER=2023.2.0-49495
34 | # intel-oneapi-compiler-shared-common provides `sycl-ls`
35 | ARG CMPLR_COMMON_VER=2023.2.1
36 | # Install runtime libs to reduce image size
37 | RUN apt-get update && \
38 | apt-get install -y --no-install-recommends --fix-missing \
39 | intel-oneapi-runtime-dpcpp-cpp=${DPCPP_VER} \
40 | intel-oneapi-runtime-mkl=${MKL_VER} \
41 | intel-oneapi-compiler-shared-common-${CMPLR_COMMON_VER}=${DPCPP_VER}
42 |
43 | # Prepare Intel Graphics driver index
44 | ARG DEVICE=flex
45 | RUN no_proxy=$no_proxy wget -qO - https://repositories.intel.com/graphics/intel-graphics.key | \
46 | gpg --dearmor --output /usr/share/keyrings/intel-graphics.gpg
47 | RUN printf 'deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy %s\n' "$DEVICE" | \
48 | tee /etc/apt/sources.list.d/intel.gpu.jammy.list
49 |
50 | ARG UBUNTU_VERSION=22.04
51 | FROM ubuntu:${UBUNTU_VERSION}
52 |
53 | RUN mkdir /oneapi-lib
54 | COPY --from=oneapi-lib-installer /opt/intel/oneapi/lib /oneapi-lib/
55 | ARG CMPLR_COMMON_VER=2023.2.1
56 | COPY --from=oneapi-lib-installer /opt/intel/oneapi/compiler/${CMPLR_COMMON_VER}/linux/bin/sycl-ls /bin/
57 | COPY --from=oneapi-lib-installer /usr/share/keyrings/intel-graphics.gpg /usr/share/keyrings/intel-graphics.gpg
58 | COPY --from=oneapi-lib-installer /etc/apt/sources.list.d/intel.gpu.jammy.list /etc/apt/sources.list.d/intel.gpu.jammy.list
59 |
60 | # Set oneAPI lib env
61 | ENV LD_LIBRARY_PATH=/oneapi-lib:/oneapi-lib/intel64:$LD_LIBRARY_PATH
62 |
63 | RUN apt-get update && \
64 | apt-get install -y --no-install-recommends --fix-missing \
65 | ca-certificates && \
66 | apt-get clean && \
67 | rm -rf /var/lib/apt/lists/*
68 |
69 | ARG PYTHON=python3.10
70 | RUN apt-get update && apt-get install -y --no-install-recommends --fix-missing \
71 | ${PYTHON} lib${PYTHON} python3-pip && \
72 | apt-get clean && \
73 | rm -rf /var/lib/apt/lists/*
74 |
75 | RUN pip --no-cache-dir install --upgrade \
76 | pip \
77 | setuptools
78 |
79 | RUN ln -sf $(which ${PYTHON}) /usr/local/bin/python && \
80 | ln -sf $(which ${PYTHON}) /usr/local/bin/python3 && \
81 | ln -sf $(which ${PYTHON}) /usr/bin/python && \
82 | ln -sf $(which ${PYTHON}) /usr/bin/python3
83 |
84 | ARG ICD_VER=23.17.26241.33-647~22.04
85 | ARG LEVEL_ZERO_GPU_VER=1.3.26241.33-647~22.04
86 | ARG LEVEL_ZERO_VER=1.11.0-647~22.04
87 | ARG LEVEL_ZERO_DEV_VER=1.11.0-647~22.04
88 | RUN apt-get update && \
89 | apt-get install -y --no-install-recommends --fix-missing \
90 | intel-opencl-icd=${ICD_VER} \
91 | intel-level-zero-gpu=${LEVEL_ZERO_GPU_VER} \
92 | level-zero=${LEVEL_ZERO_VER} \
93 | level-zero-dev=${LEVEL_ZERO_DEV_VER} && \
94 | apt-get clean && \
95 | rm -rf /var/lib/apt/lists/*
96 |
97 | # Stable Diffusion Web UI dependencies
98 | RUN apt-get update && \
99 | apt-get install -y --no-install-recommends --fix-missing \
100 | libgl1 \
101 | libglib2.0-0 \
102 | libgomp1 \
103 | libjemalloc-dev \
104 | python3-venv \
105 | git \
106 | numactl && \
107 | apt-get clean && \
108 | rm -rf /var/lib/apt/lists/*
109 |
110 | COPY startup.sh /bin/
111 | RUN chmod 755 /bin/startup.sh
112 |
113 | VOLUME [ "/deps" ]
114 | VOLUME [ "/sd-webui" ]
115 | VOLUME [ "/root/.cache/huggingface" ]
116 |
117 | ENV venv_dir=/deps/venv
118 | ENV LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so
119 |
120 | # Force 100% available VRAM size for compute-runtime
121 | # See https://github.com/intel/compute-runtime/issues/586
122 | ENV NEOReadDebugKeys=1
123 | ENV ClDeviceGlobalMemSizeAvailablePercent=100
124 |
125 | WORKDIR /sd-webui
126 |
127 | ENTRYPOINT [ "startup.sh", "-f", "--use-ipex", "--listen" ]
128 | CMD [ "--insecure" ]
129 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Stable Diffusion Web UI Docker for Intel Arc GPUs
2 |
3 |
4 |
5 |
6 |
7 | - [Documentation](https://blog.nuullll.com/ipex-sd-docker-for-arc-gpu/#/)
8 | - [Getting Started](https://blog.nuullll.com/ipex-sd-docker-for-arc-gpu/#/getting-started)
9 | - [FAQ](https://blog.nuullll.com/ipex-sd-docker-for-arc-gpu/#/faq)
10 | - [Release Notes](https://blog.nuullll.com/ipex-sd-docker-for-arc-gpu/#/release-notes)
11 |
12 | The [docker image](https://hub.docker.com/r/nuullll/ipex-arc-sd) includes
13 | - Intel oneAPI DPC++ runtime libs _(Note: compiler executables are not included)_
14 | - Intel oneAPI MKL runtime libs
15 | - Intel oneAPI compiler common tool `sycl-ls`
16 | - Intel Graphics driver
17 | - Basic python environment
18 |
19 | The Stable Diffusion Web UI variant used by the image: [SD.Next](https://github.com/vladmandic/automatic)
20 |
21 | - Intel Extension for Pytorch (IPEX) and other python packages will be installed by [SD.Next](https://github.com/vladmandic/automatic) dynamically
22 |
23 | ## (For Developers) Build docker image locally
24 |
25 | ```powershell
26 | docker build -t ipex-arc-sd -f Dockerfile .
27 | ```
28 |
29 | Refer to [Dockerfile](./Dockerfile) for available build arguments.
30 |
31 | ## Contributors
32 |
33 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/docs/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nuullll/ipex-sd-docker-for-arc-gpu/0dfba22e53bd164529e40b3f5d6f7cf17270e1f7/docs/.nojekyll
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Stable Diffusion Web UI Docker for Arc GPUs
2 |
3 | > Let's release the AI power of Intel Arc GPUs :fire:
4 |
5 | ## What it is
6 |
7 | A docker project to containerize all environment requirements to enable Stable Diffusion workloads for **Intel Arc GPUs**.
8 |
9 | You can use your Arc GPUs for fancy image generations with minimal setup! Start now: [Getting started](/getting-started)
10 |
11 | ## Docker Image Release
12 |
13 |
14 |
15 |
16 |
17 | [Release Notes](/release-notes)
18 |
19 | ## Stable Diffusion Web UI
20 |
21 | Stable Diffusion Web UI variant used by the image: [SD.Next](https://github.com/vladmandic/automatic) (A.K.A, vladmandic/automatic).
22 |
23 | _Note: [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) doesn't support Arc GPUs, see [#4690](https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/4690), [#6417](https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/6417)._
24 |
25 | ## How SD.Next works on Arc GPUs
26 |
27 | Given that [SD.Next](https://github.com/vladmandic/automatic) is using PyTorch as the AI framework for Stable Diffusion applications, [Intel Extension for PyTorch (IPEX)](https://github.com/intel/intel-extension-for-pytorch) provides easy GPU acceleration for Intel discrete GPUs with PyTorch. And fortunately, IPEX provides [experimental support](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/installation.html) for **Intel Arc A-Series GPUs**!
28 |
29 | Minor code changes are required to actually enable AI workloads with IPEX's XPU backend, which have been implemented by contributors of [SD.Next](https://github.com/vladmandic/automatic). And [SD.Next](https://github.com/vladmandic/automatic) will also try to install the corresponding IPEX dependencies by default.
30 |
31 | IPEX itself also has dependencies on [Intel GPU Driver](https://dgpu-docs.intel.com/installation-guides/index.html) and [oneAPI base Toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit-download.html). Installing them manually could be time-consuming and error-prone for users (it's not easy to install all required packages with **proper** versions).
32 |
33 | **Don't worry, the [docker image](https://hub.docker.com/r/nuullll/ipex-arc-sd) will handle all the complexities for you. :hearts:**
34 |
35 | ## Community
36 |
37 | - [Github Discussion](https://github.com/Nuullll/ipex-sd-docker-for-arc-gpu/discussions)
38 | - :penguin: Group ID: 558[zero]74047
39 |
40 | ## Contributors
41 |
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/docs/_navbar.md:
--------------------------------------------------------------------------------
1 | * [En](/)
2 | * [简体中文](/zh-cn/)
--------------------------------------------------------------------------------
/docs/_sidebar.md:
--------------------------------------------------------------------------------
1 | * [Home](/ "Stable Diffusion Web UI docker for Intel Arc")
2 | * [Getting Started](/getting-started "Getting Started with SD.Next Docker")
3 | * [FAQ](/faq "Frequently Asked Questions")
4 | * [Release Notes](/release-notes "Release Notes")
5 |
--------------------------------------------------------------------------------
/docs/assets/arcai.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/assets/github-connection-443.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nuullll/ipex-sd-docker-for-arc-gpu/0dfba22e53bd164529e40b3f5d6f7cf17270e1f7/docs/assets/github-connection-443.jpg
--------------------------------------------------------------------------------
/docs/assets/killed.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nuullll/ipex-sd-docker-for-arc-gpu/0dfba22e53bd164529e40b3f5d6f7cf17270e1f7/docs/assets/killed.jpg
--------------------------------------------------------------------------------
/docs/assets/port-not-available.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nuullll/ipex-sd-docker-for-arc-gpu/0dfba22e53bd164529e40b3f5d6f7cf17270e1f7/docs/assets/port-not-available.png
--------------------------------------------------------------------------------
/docs/assets/urlerror.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nuullll/ipex-sd-docker-for-arc-gpu/0dfba22e53bd164529e40b3f5d6f7cf17270e1f7/docs/assets/urlerror.png
--------------------------------------------------------------------------------
/docs/faq.md:
--------------------------------------------------------------------------------
1 | # Frequently Asked Questions
2 |
3 | ## Environment setup
4 |
5 | ### Network issues when cloning from Github
6 |
7 | 
8 |
9 | > Network issues are common for Chinese users...
10 | >
11 | > Please setup a stable proxy on your host, enable [TUN mode](https://docs.cfw.lbyczf.com/contents/tun.html) (and probably a combination with `System Proxy`) and retry.
12 | >
13 | > Or you can specify proxy enironment variables when executing `docker run`:
14 | >
15 | > ```powershell
16 | > docker run -it `
17 | > --device /dev/dxg `
18 | > -v /usr/lib/wsl:/usr/lib/wsl `
19 | > -v $home\docker-mount\sd-webui:/sd-webui `
20 | > -v deps:/deps `
21 | > -v huggingface:/root/.cache/huggingface `
22 | > -p 7860:7860 `
23 | > --rm `
24 | > -e http_proxy=: `
25 | > -e https_proxy=: `
26 | > nuullll/ipex-arc-sd:latest
27 | > ```
28 | >
29 | > For example, `-e http_proxy=http://192.168.1.2:7890`. `localhost` or `127.0.0.1` might not work for WSL2, please use your real host IP as the ``.
30 |
31 | ### Network issues when installing python packages
32 |
33 | > Network issues are common for Chinese users... You can always try the methods mentioned [above](#network-issues-when-cloning-from-github).
34 | >
35 | > Or you can specify a Chinese mirror (e.g. THU TUNA) for pip via the environment variable `PIP_EXTRA_INDEX_URL`:
36 | >
37 | > ```powershell
38 | > docker run -it `
39 | > --device /dev/dxg `
40 | > -v /usr/lib/wsl:/usr/lib/wsl `
41 | > -v $home\docker-mount\sd-webui:/sd-webui `
42 | > -v deps:/deps `
43 | > -v huggingface:/root/.cache/huggingface `
44 | > -p 7860:7860 `
45 | > --rm `
46 | > -e PIP_EXTRA_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple `
47 | > nuullll/ipex-arc-sd:latest
48 | > ```
49 |
50 | ### docker run: Ports are not available
51 |
52 | 
53 |
54 | Full error:
55 |
56 | ```txt
57 | docker: Error response from daemon: Ports are not available: exposing port TCP 0.0.0.0:7860 -> 0.0.0.0: listen tcp 0.0.0.0:7860: bind: An attempt was made to access a socket in a way forbidden by its access permissions.
58 | ```
59 |
60 | > This happened to me (on Windows) under certain network configurations.
61 | >
62 | > Solution ([reference](https://github.com/docker/for-win/issues/9272#issuecomment-776225866)):
63 | > 1. Open a cmd/powershell as **administrator**.
64 | > 2. Execute `net stop winnat`.
65 | > 3. [Restart your container](getting-started.md#restart-container).
66 | > 4. Execute `net start winnat` after the container is up.
67 |
68 | ### Killed without further information
69 |
70 | 
71 |
72 | > Caused by insufficient memory allocated to WSL. The Web UI requires a minimum of 7GB memory (estimated) for basic functionalities.
73 | >
74 | > By default, 50% of host system memory will be allocated to WSL. You can [execute `free -m` inside container](getting-started.md#open-another-terminal-for-a-running-container) to check.
75 | >
76 | > Solution ([reference](https://learn.microsoft.com/en-us/answers/questions/1296124/how-to-increase-memory-and-cpu-limits-for-wsl2-win)):
77 | > 1. Create a text file named as `.wslconfig` under your host home directory.
78 | > 2. Edit and save `.wslconfig` as following (16GB for example):
79 | >
80 | > ```.wslconfig
81 | > # Settings apply across all Linux distros running on WSL 2
82 | > [wsl2]
83 | >
84 | > # Limits VM memory to use no more than 16 GB, this can be set as whole numbers using GB or MB
85 | > memory=16GB
86 | > ```
87 |
88 | ## Web UI running
89 |
90 | ### Container hanging
91 |
92 | > You may not be able to [stop the container](getting-started.md#stop-container) in normal ways.
93 | >
94 | > Try to right-click the Docker Desktop icon in the system tray and click `restart`. Otherwise, reboot your computer. :cold_sweat:
95 |
96 | ### URLError: [Errno 99] Cannot assign requested address
97 |
98 | 
99 |
100 | > This may happen while generating images and the frontend is submitting too many progress querying requests, but the error does not have any impact on generated images.
101 | >
102 | > Solution:
103 | >
104 | > Edit `\javascript\progressBar.js`: change the default value of `once` to `true` for function `requestProgress()`.
105 |
106 | ### Aborted while generating images
107 |
108 | Full error:
109 |
110 | ```txt
111 | Abort was called at 718 line in file:
112 | ../../neo/shared/source/os_interface/windows/wddm_memory_manager.cpp
113 | ```
114 |
115 | > Solution: disable your iGPU in the device manager or BIOS. See [#1272](https://github.com/vladmandic/automatic/issues/1272).
116 |
117 | ### DPCPP out of memory after a few generations
118 |
119 | Full error:
120 |
121 | ```txt
122 | DPCPP out of memory. Tried to allocate 186.00 MiB (GPU Time taken: 22.18s | GPU active 3754 MB reserved 3888 MB | System peak 3754 MB total 13005 MB
123 | ```
124 |
125 | > See [#8](https://github.com/Nuullll/ipex-sd-docker-for-arc-gpu/issues/8). Now fixed by latest [SD.Next](https://github.com/vladmandic/automatic/commit/c3a4293f2227fe77b9ea908c99a1bda2aef43175).
126 | >
127 | > Solution: upgrade your SD.Next codebase by following [this instruction](getting-started.md#upgrade-sdnext-source-code)
128 |
129 |
--------------------------------------------------------------------------------
/docs/getting-started.md:
--------------------------------------------------------------------------------
1 | # Getting Started
2 |
3 | For Chinese users, there's also a [tutorial video](https://www.bilibili.com/video/BV1Ek4y1u7Z4/) posted on Bilibili.
4 |
5 | ## Requirements
6 |
7 | * Install [Docker Desktop](https://www.docker.com/) on your system.
8 | - If you are using Windows, be sure to use the WSL 2 based docker engine, which is the default setting. You can go to `Setting -> General` in Docker Desktop GUI to examine whether the `Use the WSL 2 based engine` option is checked.
9 | * Healthy network connections. :surfer:
10 |
11 | ## Launch the docker container with the remote image
12 |
13 |
14 |
15 | ### **Windows**
16 |
17 | ```powershell
18 | docker run -it `
19 | --device /dev/dxg `
20 | -v /usr/lib/wsl:/usr/lib/wsl `
21 | -v $home\docker-mount\sd-webui:/sd-webui `
22 | -v deps:/deps `
23 | -v huggingface:/root/.cache/huggingface `
24 | -p 7860:7860 `
25 | --name sd-server `
26 | nuullll/ipex-arc-sd:latest
27 | ```
28 |
29 | ### **Linux**
30 |
31 | ```bash
32 | docker run -it \
33 | --device /dev/dri \
34 | -v ~/docker-mount/sd-webui:/sd-webui \
35 | -v deps:/deps \
36 | -v huggingface:/root/.cache/huggingface \
37 | -p 7860:7860 \
38 | --name sd-server \
39 | nuullll/ipex-arc-sd:latest
40 | ```
41 |
42 |
43 |
44 | In case you are not familiar with docker, let's see what is going on with this long magical command:
45 |
46 | - [`docker run`](https://docs.docker.com/engine/reference/commandline/run/) creates and runs **a new container** from an image.
47 | - The last argument `nuullll/ipex-arc-sd:latest` specifies the image, in format of `:`. If the image doesn't exist on your local machine, docker will try to pull from [DockerHub remote registry](https://hub.docker.com/r/nuullll/ipex-arc-sd).
48 | - `--name sd-server` assigns a meaningful name (e.g. `sd-server`) to the newly created container. This option is useful but not mandatory.
49 | - `-it` will let you launch the container with an interactive command line. This is highly recommended since we may need to monitor the Web UI status via the command line log output.
50 | - On linux, `--device /dev/dri` is required to enable container access to your GPUs. On windows, `--device /dev/dxg` and `-v /usr/lib/wsl:/usr/lib/wsl` are both required to enable container access to your GPUs. See [wslg samples](https://github.com/microsoft/wslg/blob/main/samples/container/Containers.md#containerized-applications-access-to-the-vgpu) for details.
51 | - `-v :/sd-webui` specifies a directory on host to be [bind-mounted](https://docs.docker.com/storage/bind-mounts/) to `/sd-webui` directory inside the container. When you launch the container for the first time, you should specify an **empty or non-existent** directory on host as ``, so that the container can pull [SD.Next](https://github.com/vladmandic/automatic) source code into the corresponding directory. If you want to launch another container (e.g. [overriding the docker entrypoint](https://docs.docker.com/engine/reference/run/#entrypoint-default-command-to-execute-at-runtime)) that shares the initialized Web UI folder, you should specify the same ``.
52 | - `-v :/deps` specifies a [volume](https://docs.docker.com/storage/volumes/) managed by the docker engine (e.g. a volume named as `deps`), to be mounted as `/deps` directory inside the container. `/deps` is configured as the python virtual environment root directory (see [`Dockerfile: ENV venv_dir`](https://github.com/Nuullll/ipex-sd-docker-for-arc-gpu/blob/main/Dockerfile)), to store all dynamic python dependencies (e.g. packages needed by Web UI extensions) required after the Web UI launches. You can mount the `deps` volume to multiple containers so that those dynamic dependencies would be downloaded and installed only once. This is useful for users who want to run containers with different Web UI arguments (e.g. `--debug`), and for those who actually build local docker images.
53 | - `-p :7860` specifies the [published port](https://docs.docker.com/network/). The Web UI running on 7860 port inside the container will be forwarded to `http://localhost:` on your host system.
54 |
55 | ### Expected result
56 |
57 |
58 |
59 | #### **Pulling the image from DockerHub**
60 |
61 | This may take some time, but it's a one-time setup unless you want to upgrade the image to a newer version. Take a coffee break!
62 |
63 | ```txt
64 | docker run -it `
65 | >> --device /dev/dxg `
66 | >> -v /usr/lib/wsl:/usr/lib/wsl `
67 | >> -v $home\docker-mount\sd-webui:/sd-webui `
68 | >> -v deps:/deps `
69 | >> -v huggingface:/root/.cache/huggingface `
70 | >> -p 7860:7860 `
71 | >> --name sd-server `
72 | >> nuullll/ipex-arc-sd:latest
73 | Unable to find image 'nuullll/ipex-arc-sd:latest' locally
74 | latest: Pulling from nuullll/ipex-arc-sd
75 | 6b851dcae6ca: Already exists
76 | 2614e0cfd126: Pull complete
77 | b7f3f70e6e79: Downloading [==========> ] 133.2MB/632.5MB
78 | 855ff6ba44ef: Download complete
79 | 63a57d250c21: Download complete
80 | 27dc2936b164: Download complete
81 | f5b66ca8a170: Download complete
82 | 7a2fe23e57b6: Download complete
83 | b88d41ac9d88: Download complete
84 | ea324f812f19: Download complete
85 | 0e31e3e1e212: Download complete
86 | 93d440be2069: Downloading [========================================> ] 68.26MB/83.51MB
87 | 08aa679dcc94: Download complete
88 | 4315cf4ec169: Download complete
89 | 693801fa781b: Download complete
90 | ```
91 |
92 | You will see the following upon success:
93 |
94 | ```txt
95 | Status: Downloaded newer image for nuullll/ipex-arc-sd:latest
96 | ```
97 |
98 | #### **Cloning [SD.Next](https://github.com/vladmandic/automatic) source code**
99 |
100 | ```txt
101 | fatal: not a git repository (or any parent up to mount point /)
102 | Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set).
103 | Cloning into '.'...
104 | remote: Enumerating objects: 27569, done.
105 | remote: Counting objects: 100% (214/214), done.
106 | remote: Compressing objects: 100% (99/99), done.
107 | remote: Total 27569 (delta 129), reused 183 (delta 114), pack-reused 27355
108 | Receiving objects: 100% (27569/27569), 34.77 MiB | 3.35 MiB/s, done.
109 | Resolving deltas: 100% (19625/19625), done.
110 | Updating files: 100% (272/272), done.
111 | ```
112 |
113 | You may notice the fatal error on the first line: `fatal: not a git repository`. Don't worry, it's expected since we are bind-mounting an empty or non-existent host directory as the Web UI source code folder for the first time. The script will clone [SD.Next](https://github.com/vladmandic/automatic) into that folder.
114 |
115 | #### **Launching Web UI server**
116 |
117 | The Web UI will try to install python dependencies for the first time.
118 |
119 | All the python packages will be installed into the docker volume `deps` (as specified by the `-v deps:/deps` option) and the data is **persistent**. So each dependency will be downloaded and installed only once, unless you manually remove the `deps` volume or mount a new volume to the container.
120 |
121 | ```txt
122 | Create and activate python venv
123 | Launching launch.py...
124 | 06:37:21-673336 INFO Starting SD.Next
125 | 06:37:21-679480 INFO Python 3.10.6 on Linux
126 | 06:37:21-762667 INFO Version: 205b5164 Fri Jul 7 22:41:26 2023 +0300
127 | 06:37:21-822410 INFO Intel OneAPI Toolkit detected <<<<<------------ oneAPI environment baked in the image is detected!
128 | 06:37:21-825165 INFO Installing package: torch==1.13.0a0 torchvision==0.14.1a0
129 | intel_extension_for_pytorch==1.13.120+xpu -f https://developer.intel.com/ipex-whl-stable-xpu <<<<<------------ installing torch and ipex for Intel XPU!
130 | 06:39:59-352248 INFO Torch 1.13.0a0+gitb1dde16
131 | /deps/venv/lib/python3.10/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
132 | warn(f"Failed to load image Python extension: {e}") <<<<<------------ Warning is a warning
133 | 06:39:59-901569 INFO Torch backend: Intel IPEX 1.13.120+xpu <<<<<------------ IPEX XPU backend is used
134 | /bin/sh: 1: icpx: not found <<<<<------------ Don't worry. This is harmless
135 | 06:39:59-905688 INFO
136 | 06:39:59-908720 INFO Torch detected GPU: Intel(R) Graphics [0x56a0] VRAM 13005 Compute Units 512 <<<<<------------ your Arc GPU is detected! (0x56a0 is the device ID for Arc A770)
137 | 06:39:59-910747 INFO Installing package: tensorflow==2.12.0
138 | 06:42:27-193632 INFO Verifying requirements
139 | 06:42:27-199643 INFO Installing package: addict
140 | 06:42:31-869764 INFO Installing package: aenum
141 | 06:42:36-004535 INFO Installing package: aiohttp
142 | 06:42:49-502038 INFO Installing package: anyio
143 | 06:42:54-800093 INFO Installing package: appdirs
144 | 06:42:57-293693 INFO Installing package: astunparse
145 | 06:42:58-683720 INFO Installing package: bitsandbytes
146 | 06:43:32-072033 INFO Installing package: blendmodes
147 | 06:43:41-571773 INFO Installing package: clean-fid
148 | 06:43:45-510400 INFO Installing package: easydev
149 | 06:43:52-898981 INFO Installing package: extcolors
150 | 06:43:55-996995 INFO Installing package: facexlib
151 | 06:45:06-051191 INFO Installing package: filetype
152 | 06:45:08-795192 INFO Installing package: future
153 | 06:45:13-184420 INFO Installing package: gdown
154 | 06:45:18-665094 INFO Installing package: gfpgan
155 | ...
156 | 06:53:42-651633 INFO Installing repositories
157 | 06:53:42-661095 INFO Cloning repository: https://github.com/Stability-AI/stablediffusion.git
158 | 06:54:10-838284 INFO Cloning repository: https://github.com/CompVis/taming-transformers.git
159 | 06:55:29-401387 INFO Cloning repository: https://github.com/crowsonkb/k-diffusion.git
160 | 06:55:33-136352 INFO Cloning repository: https://github.com/sczhou/CodeFormer.git
161 | 06:55:46-026258 INFO Cloning repository: https://github.com/salesforce/BLIP.git
162 | 06:55:51-501455 INFO Installing submodules
163 | 07:20:37-262294 INFO Extension installed packages: sd-webui-agent-scheduler ['SQLAlchemy==2.0.18',
164 | 'greenlet==2.0.2']
165 | 07:21:17-718747 INFO Extension installed packages: sd-webui-controlnet ['lxml==4.9.3', 'reportlab==4.0.4',
166 | 'pycparser==2.21', 'portalocker==2.7.0', 'cffi==1.15.1', 'svglib==1.5.1', 'tinycss2==1.2.1',
167 | 'mediapipe==0.10.1', 'tabulate==0.9.0', 'cssselect2==0.7.0', 'webencodings==0.5.1',
168 | 'sounddevice==0.4.6', 'iopath==0.1.9', 'yacs==0.1.8', 'fvcore==0.1.5.post20221221']
169 | 07:21:17-825317 INFO Extensions enabled: ['a1111-sd-webui-lycoris', 'clip-interrogator-ext', 'LDSR', 'Lora',
170 | 'multidiffusion-upscaler-for-automatic1111', 'ScuNET', 'sd-dynamic-thresholding',
171 | 'sd-extension-system-info', 'sd-webui-agent-scheduler', 'sd-webui-controlnet',
172 | 'stable-diffusion-webui-images-browser', 'stable-diffusion-webui-rembg', 'SwinIR']
173 | 07:21:18-037617 INFO Extension preload: 0.2s /sd-webui/extensions-builtin
174 | 07:21:18-043483 INFO Extension preload: 0.0s /sd-webui/extensions
175 | 07:21:18-049065 INFO Server arguments: ['-f', '--use-ipex', '--listen']
176 | No module 'xformers'. Proceeding without it.
177 | 07:21:25-915862 INFO Libraries loaded
178 | 07:21:26-122879 INFO Using data path: /sd-webui
179 | 07:21:26-219703 INFO Available VAEs: /sd-webui/models/VAE 0
180 | 07:21:26-278684 INFO Available models: /sd-webui/models/Stable-diffusion 0
181 | ```
182 |
183 | #### **Downloading the default model**
184 |
185 | ```txt
186 | Download the default model? (y/N)
187 | ```
188 |
189 | When you see this prompt, input `y` if you need the default model.
190 |
191 | Or just copy your favorite models into `$home\docker-mount\sd-webui\models\Stable-diffusion`. Note `$home\docker-mount\sd-webui` is the `` specified with `-v :/sd-webui`.
192 |
193 | #### **All set!**
194 |
195 | Now open the Web UI in your favorite browser on the host!
196 |
197 | [http://localhost:7860/](http://localhost:7860/)
198 |
199 | Enjor your Stable Diffusion journey! :fire:
200 |
201 | ```txt
202 | 07:42:49-061422 INFO ControlNet v1.1.227
203 | ControlNet preprocessor location: /sd-webui/extensions-builtin/sd-webui-controlnet/annotator/downloads
204 | 07:42:49-687272 INFO ControlNet v1.1.227
205 | 07:42:50-912721 INFO Loading UI theme: name=black-orange style=Auto
206 | Running on local URL: http://0.0.0.0:7860
207 | 07:42:53-611671 INFO Local URL: http://localhost:7860/
208 | 07:42:53-616045 INFO Initializing middleware
209 | 07:42:53-761013 INFO [AgentScheduler] Task queue is empty
210 | 07:42:53-762931 INFO [AgentScheduler] Registering APIs
211 | 07:42:53-837492 INFO Model metadata saved: /sd-webui/metadata.json 1
212 | 07:42:53-947057 WARNING Selected checkpoint not found: model.ckpt
213 | 07:42:54-048539 WARNING Loading fallback checkpoint: v1-5-pruned-emaonly.safetensors
214 | Loading weights: /sd-webui/models/Stable-diffusion/v1-5-pruned-emaonly.safetensors ━━━━━━━━━━━━━━━━━━ 0.0/4.3 GB -:--:--
215 | 07:42:55-568796 INFO Setting Torch parameters: dtype=torch.float16 vae=torch.float16 unet=torch.float16
216 | LatentDiffusion: Running in eps-prediction mode
217 | DiffusionWrapper has 859.52 M params.
218 | Downloading (…)olve/main/vocab.json: 100%|████████████████████████████████████████████| 961k/961k [00:01<00:00, 848kB/s]
219 | Downloading (…)olve/main/merges.txt: 100%|███████████████████████████████████████████| 525k/525k [00:00<00:00, 1.27MB/s]
220 | Downloading (…)cial_tokens_map.json: 100%|██████████████████████████████████████████████| 389/389 [00:00<00:00, 347kB/s]
221 | Downloading (…)okenizer_config.json: 100%|█████████████████████████████████████████████| 905/905 [00:00<00:00, 4.47MB/s]
222 | Downloading (…)lve/main/config.json: 100%|█████████████████████████████████████████| 4.52k/4.52k [00:00<00:00, 1.60MB/s]
223 | Calculating model hash: /sd-webui/models/Stable-diffusion/v1-5-pruned-emaonly.safetensors ━━━━━━━━━━━ 4.3/4.3 GB 0:00:00
224 | 07:43:25-547549 INFO Applying sub-quadratic cross attention optimization
225 | 07:43:25-718775 INFO Applied IPEX Optimize
226 | 07:43:26-115071 INFO Embeddings: loaded=0 skipped=0
227 | 07:43:26-119315 INFO Model loaded in 32.1s (load=0.9s config=0.6s create=5.1s hash=7.7s apply=16.2s vae=0.2s
228 | move=0.8s embeddings=0.6s)
229 | 07:43:26-235786 INFO Model load finished: {'ram': {'used': 7.73, 'total': 23.47}, 'gpu': {'used': 2.02, 'total':
230 | 12.7}, 'retries': 0, 'oom': 0} cached=0
231 | 07:43:26-415000 INFO Startup time: 46.2s (torch=1.3s gradio=0.9s libraries=2.4s vae=0.1s codeformer=0.6s
232 | scripts=4.3s onchange=1.0s ui=2.5s launch=0.2s app-started=0.2s checkpoint=32.6s)
233 | ```
234 |
235 |
236 |
237 | ## Access your SD.Next directory on the host machine
238 |
239 | Remeber the `-v :/sd-webui` option of the previous `docker run` command? `` is a host directory that is synced with the `/sd-webui` directory inside the container.
240 |
241 | You can edit the content of `` (default: `$home\docker-mount\sd-webui`) from the host and all changes will be reflected inside the container in real time.
242 |
243 | For example, you can copy your SD models into `\models\Stable-diffusion`, check generated images in `\outputs`, or even perform `git` operations from the host.
244 |
245 | ## Manage your SD.Next containers
246 |
247 | ### Stop container
248 |
249 | Several ways:
250 |
251 | - Press `Ctrl + C` in the interactive command line.
252 | - Close the interactive command line directly.
253 | - Open Docker Desktop Dashboard, then go to the `Containers` tab and find your running container (e.g. named as `sd-server`) in the list. Click the "stop" :black_medium_square: button.
254 | - Quit Docker Desktop.
255 |
256 | ### Restart container
257 |
258 | After you stop the container, there're also several ways to restart it:
259 |
260 | - Open Docker Desktop Dashboard, then go to the `Containers` tab and find your container in the list. Click the "start" button. This will not open an interactive command line for you, but you can check logs by clicking the name of the container and go to the `Logs` tab.
261 | - Open a terminal on your host (e.g. PowerShell or bash) and execute `docker start -i `.
262 |
263 | ### Launch your container with different Web UI arguments
264 |
265 | You need to create a new container to achieve this. As long as you specify the same `` and `` for the new container, the Web UI folder and python virtual environment will be reused, and in this case, the overhead of creating a new container is little to none.
266 |
267 | Just append your customized Web UI arguments **after the image name** (we are adding the `--debug --lowvram --no-half` options to the new container):
268 |
269 |
270 |
271 | #### **Windows**
272 |
273 | ```powershell
274 | docker run -it `
275 | --device /dev/dxg `
276 | -v /usr/lib/wsl:/usr/lib/wsl `
277 | -v $home\docker-mount\sd-webui:/sd-webui `
278 | -v deps:/deps `
279 | -v huggingface:/root/.cache/huggingface `
280 | -p 7860:7860 `
281 | --name customized-sd-server `
282 | nuullll/ipex-arc-sd:latest `
283 | --debug --lowvram --no-half
284 | ```
285 |
286 | #### **Linux**
287 |
288 | ```bash
289 | docker run -it \
290 | --device /dev/dri \
291 | -v ~/docker-mount/sd-webui:/sd-webui \
292 | -v deps:/deps \
293 | -v huggingface:/root/.cache/huggingface \
294 | -p 7860:7860 \
295 | --name customized-sd-server \
296 | nuullll/ipex-arc-sd:latest \
297 | --debug --lowvram --no-half
298 | ```
299 |
300 |
301 |
302 | You can give a different name to the new container as above, if you plan to use this container in the long run. If you are just debugging or doing some temporary experiments, you could replace `--name customized-sd-server` with `--rm` to tell docker to remove this container when it exits.
303 |
304 | ### Upgrade SD.Next source code
305 |
306 | We could use the technique above to upgrade the [SD.Next](https://github.com/vladmandic/automatic) source code to get the latest features (by specifying the `--upgrade` option):
307 |
308 |
309 |
310 | #### **Windows**
311 |
312 | ```powershell
313 | docker run -it `
314 | --device /dev/dxg `
315 | -v /usr/lib/wsl:/usr/lib/wsl `
316 | -v $home\docker-mount\sd-webui:/sd-webui `
317 | -v deps:/deps `
318 | -v huggingface:/root/.cache/huggingface `
319 | -p 7860:7860 `
320 | --rm `
321 | nuullll/ipex-arc-sd:latest --upgrade
322 | ```
323 |
324 | #### **Linux**
325 |
326 | ```bash
327 | docker run -it \
328 | --device /dev/dri \
329 | -v ~/docker-mount/sd-webui:/sd-webui \
330 | -v deps:/deps \
331 | -v huggingface:/root/.cache/huggingface \
332 | -p 7860:7860 \
333 | --rm \
334 | nuullll/ipex-arc-sd:latest --upgrade
335 | ```
336 |
337 |
338 |
339 | It makes sense to use `--rm` here, because we are actually updating the content of the ``, whose changes will be reflected to our original container automatically.
340 |
341 | ### Open another terminal for a running container
342 |
343 | Sometimes you may want to check the status of a running container without stopping it.
344 |
345 | You could either open a terminal on host and execute
346 |
347 | ```powershell/bash
348 | docker exec -i bash
349 | ```
350 |
351 | or open Docker Desktop Dashboard, then go to the `Containers` tab, find your container and click the name, and finally go to the `Terminal` tab.
352 |
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Document
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/docs/release-notes.md:
--------------------------------------------------------------------------------
1 | # Release Notes
2 |
3 | ## [v0.6](https://hub.docker.com/layers/nuullll/ipex-arc-sd/v0.6/images/sha256-30bdf186bc21abbcbb1d59ee87b4a726af9aa93794543121caf58ba95f44caaa?context=explore) (latest)
4 |
5 | Compressed image size: 845.44 MB
6 |
7 | Major changes:
8 |
9 | - Uplift oneAPI to 2023.2 to support IPEX XPU 2.0
10 |
11 | The [image](https://hub.docker.com/r/nuullll/ipex-arc-sd) (Ubuntu 22.04 based) includes:
12 |
13 | - Intel oneAPI DPC++ runtime libs (2023.2.1) (Note: compiler executables are not included)
14 | - Intel oneAPI MKL runtime libs (2023.2.0)
15 | - Intel oneAPI compiler common tool sycl-ls (2023.2.1)
16 | - Intel Graphics driver (1.3.26241.33-647~22.04)
17 | - Basic python environment (3.10.6)
18 |
19 | Tested on Windows 11 22H2 22621.1848 with i9-13900 + Arc A770 (Windows driver: 31.0.101.4502)
20 |
21 | ## [v0.5](https://hub.docker.com/layers/nuullll/ipex-arc-sd/v0.5/images/sha256-bb556a04a3ad6d331582ad1d64e79a123650fd43981d2bdd3c2e1f639bde818c?context=explore)
22 |
23 | Compressed image size: 831.44 MB
24 |
25 | Major changes:
26 |
27 | - Allow extension installation by default (`--insecure`)
28 | - Skip startup git operations by default (`--skip-git`)
29 | - Use faster offline git repo check
30 |
31 | The [image](https://hub.docker.com/r/nuullll/ipex-arc-sd) (Ubuntu 22.04 based) includes:
32 |
33 | - Intel oneAPI DPC++ runtime libs (2023.1) (Note: compiler executables are not included)
34 | - Intel oneAPI MKL runtime libs (2023.1)
35 | - Intel oneAPI compiler common tool sycl-ls (2023.1)
36 | - Intel Graphics driver (1.3.26241.21-647~22.04)
37 | - Basic python environment (3.10.6)
38 |
39 | Tested on Windows 11 22H2 22621.1848 with i9-13900 + Arc A770 (Windows driver: 31.0.101.4502)
40 |
41 | ## [v0.4](https://hub.docker.com/layers/nuullll/ipex-arc-sd/v0.4/images/sha256-ca5ba4aab952e6afb3150865b33b03846cf38d1b512fbae575d3f54f7d38a829?context=explore)
42 |
43 | Compressed image size: 831.25 MB
44 |
45 | This release only includes one major change on top of [release v0.3](release-notes#v03):
46 |
47 | - Force 100% available VRAM size for compute runtime.
48 |
49 | Tested on Windows 11 22H2 22621.1848 with i9-13900 + Arc A770 (Windows driver: 31.0.101.4502)
50 |
51 | For A770, now VRAM size increases from 13005 to 16256 MB.
52 |
53 | ```txt
54 | v0.3 or earilier >>>> Torch detected GPU: Intel(R) Graphics [0x56a0] VRAM 13005 Compute Units 512
55 | v0.4 >>>>>>>>>>>>>>>> Torch detected GPU: Intel(R) Graphics [0x56a0] VRAM 16256 Compute Units 512
56 | ```
57 |
58 | ## [v0.3](https://hub.docker.com/layers/nuullll/ipex-arc-sd/v0.3/images/sha256-accb961e63a14b92567e7c594ad5222fd4592a40b8e3c5a76310a70257b1f00e?context=explore)
59 |
60 | Compressed image size: 831.25 MB
61 |
62 | Important notes:
63 |
64 | * The severe VRAM leak problem of IPEX in WSL2 (https://github.com/Nuullll/ipex-sd-docker-for-arc-gpu/issues/8, https://github.com/intel/intel-extension-for-pytorch/issues/388) has been worked around in SD.Next https://github.com/vladmandic/automatic/commit/c3a4293f2227fe77b9ea908c99a1bda2aef43175. **If you are using WSL, please be sure to upgrade your SD.Next codebase by following [this instruction](getting-started.md#upgrade-sdnext-source-code).**
65 |
66 | Major changes:
67 |
68 | * Enable TCMalloc to mitigate the RAM leak problem in WSL.
69 | * Mount `/root/.cache/huggingface` to avoid re-downloading huggingface models.
70 |
71 | The image (Ubuntu 22.04 based) includes:
72 |
73 | - Intel oneAPI DPC++ runtime libs (2023.1) (Note: compiler executables are not included)
74 | - Intel oneAPI MKL runtime libs (2023.1)
75 | - Intel oneAPI compiler common tool sycl-ls (2023.1)
76 | - Intel Graphics driver (1.3.26241.21-647~22.04)
77 | - Basic python environment (3.10.6)
78 |
79 | Tested on Windows 11 22H2 22621.1848 with i9-13900 + Arc A770 (Windows driver: 31.0.101.4502)
80 |
81 | ## [v0.2](https://hub.docker.com/layers/nuullll/ipex-arc-sd/v0.2/images/sha256-58f7c7ae5b837b427623472a23582c1b4ecbd49460d245ddcb533e721cb396db?context=explore)
82 |
83 | Compressed image size: 827.1 MB
84 |
85 | Major changes:
86 | - Removed IPEX and python packages required by SD.Next from the image to reduce size.
87 | - Removed other unnecessary compiler tools (except sycl-ls) to reduce size.
88 | - Uplift Intel Graphics driver to 1.3.26241.21-647~22.04.
89 |
90 | The image (Ubuntu 22.04 based) includes:
91 |
92 | - Intel oneAPI DPC++ runtime libs (2023.1) (Note: compiler executables are not included)
93 | - Intel oneAPI MKL runtime libs (2023.1)
94 | - Intel oneAPI compiler common tool sycl-ls (2023.1)
95 | - Intel Graphics driver (1.3.26241.21-647~22.04)
96 | - Basic python environment (3.10.6)
97 |
98 | Tested on Windows 11 22H2 22621.1848 with i9-13900 + Arc A770 (Windows driver: 31.0.101.4382)
99 |
100 | ## [v0.1](https://hub.docker.com/layers/nuullll/ipex-arc-sd/v0.1/images/sha256-5c00e46920a396a2b1c69e5ad24218883ba205afe6d59ce153f12f684ef2c006)
101 |
102 | Compressed image size: 2.11 GB
103 |
104 | Initial release. The image (Ubuntu 22.04 based) includes:
105 |
106 | - Intel oneAPI DPC++ runtime libs (2023.1) (Note: compiler executables are not included)
107 | - Intel oneAPI MKL runtime libs (2023.1)
108 | - Intel oneAPI compiler common tool sycl-ls (2023.1)
109 | - Intel Graphics driver (1.3.25593.18-601~22.04)
110 | - Basic python environment (3.10.6)
111 | - IPEX (1.13.120+xpu) and python packages required by SD.Next requirements.txt
112 |
113 | Tested on Windows 11 22H2 22621.1848 with i9-13900 + Arc A770 (Windows driver: 31.0.101.4382)
114 |
--------------------------------------------------------------------------------
/docs/zh-cn/README.md:
--------------------------------------------------------------------------------
1 | # Stable Diffusion Web UI Docker for Arc GPUs
2 |
3 | > 一起释放英特尔锐炫显卡的AI能量 :fire:
4 |
5 | ## 这是什么
6 |
7 | 一个docker项目,将**英特尔锐炫显卡**运行Stable Diffusion应用所需要的所有环境需求进行容器化。
8 |
9 | 只需要超简单的配置,你就可以使用锐炫显卡生成超酷的图片!现在开始:[开始使用](/zh-cn/getting-started)
10 |
11 | ## Docker镜像发布
12 |
13 |
14 |
15 |
16 |
17 | [更新日志](/zh-cn/release-notes)
18 |
19 | ## Stable Diffusion Web UI
20 |
21 | 此镜像使用的Stable Diffusion Web UI版本为:[SD.Next](https://github.com/vladmandic/automatic) (或者叫 vladmandic/automatic).
22 |
23 | _注:[AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)不支持锐炫显卡,详见[#4690](https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/4690),[#6417](https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/6417)。_
24 |
25 | ## SD.Next是如何支持锐炫显卡的
26 |
27 | 由于[SD.Next](https://github.com/vladmandic/automatic)使用AI框架PyTorch运行Stable Diffusion应用,而[英特尔PyTorch扩展 (IPEX)](https://github.com/intel/intel-extension-for-pytorch)提供了针对英特尔独立显卡的PyTorch加速。幸运的是,IPEX对**英特尔锐炫A系列显卡**也提供了[实验性支持](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/installation.html)!
28 |
29 | 要在IPEX的XPU后端上运行AI应用,需要对应用代码做一些小的改动,这些工作[SD.Next](https://github.com/vladmandic/automatic)的贡献者们已经搞定。[SD.Next](https://github.com/vladmandic/automatic)默认也会尝试安装IPEX。
30 |
31 | IPEX本身还对英特尔显卡驱动以及oneAPI基础套件有依赖。手动安装这些依赖项不仅费时,还容易出错(很难将所有需要的软件包安装为**正确的**版本)。
32 |
33 | **别担心,[这个docker镜像](https://hub.docker.com/r/nuullll/ipex-arc-sd)会帮你搞定这些复杂的问题。:hearts:**
34 |
35 | ## 社群
36 |
37 | - [Github讨论区](https://github.com/Nuullll/ipex-sd-docker-for-arc-gpu/discussions)
38 | - :penguin: 群:558[零]74047
39 |
40 | ## 贡献者
41 |
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/docs/zh-cn/_sidebar.md:
--------------------------------------------------------------------------------
1 | * [首页](/zh-cn/ "Intel Arc显卡|Stable Diffusion Web UI容器")
2 | * [开始使用](/zh-cn/getting-started "开始使用SD.Next容器")
3 | * [常见问题](/zh-cn/faq "常见问题")
4 | * [更新日志](/zh-cn/release-notes "更新日志")
5 |
--------------------------------------------------------------------------------
/docs/zh-cn/faq.md:
--------------------------------------------------------------------------------
1 | # 常见问题
2 |
3 | ## 环境设置
4 |
5 | ### 从Github下载代码的网络问题
6 |
7 | 
8 |
9 | > 网络问题对中国用户很常见。。。
10 | >
11 | > 请在你的宿主机上设置一个稳定的代理,启用[TUN模式](https://docs.cfw.lbyczf.com/contents/tun.html)(可能还需要配合`系统代理`食用)再重试。
12 | >
13 | > 或者你可以在执行`docker run`的时候指定环境变量:
14 | >
15 | > ```powershell
16 | > docker run -it `
17 | > --device /dev/dxg `
18 | > -v /usr/lib/wsl:/usr/lib/wsl `
19 | > -v $home\docker-mount\sd-webui:/sd-webui `
20 | > -v deps:/deps `
21 | > -v huggingface:/root/.cache/huggingface `
22 | > -p 7860:7860 `
23 | > --rm `
24 | > -e http_proxy=: `
25 | > -e https_proxy=: `
26 | > nuullll/ipex-arc-sd:latest
27 | > ```
28 | >
29 | > 比如,`-e http_proxy=http://192.168.1.2:7890`。在WSL2里`localhost`或者`127.0.0.1`可能不起作用,请把``替换为你宿主机的真实IP。
30 |
31 | ### 安装python包的网络问题
32 |
33 | > 网络问题对中国用户很常见。。。你可以尝试[上面](#从github下载代码的网络问题)的办法。
34 | >
35 | > 或者通过环境变量`PIP_EXTRA_INDEX_URL`给pip指定一个国内的镜像源(例如清华的TUNA):
36 | >
37 | > ```powershell
38 | > docker run -it `
39 | > --device /dev/dxg `
40 | > -v /usr/lib/wsl:/usr/lib/wsl `
41 | > -v $home\docker-mount\sd-webui:/sd-webui `
42 | > -v deps:/deps `
43 | > -v huggingface:/root/.cache/huggingface `
44 | > -p 7860:7860 `
45 | > --rm `
46 | > -e PIP_EXTRA_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple `
47 | > nuullll/ipex-arc-sd:latest
48 | > ```
49 |
50 | ### docker run: Ports are not available (端口不可用)
51 |
52 | 
53 |
54 | 完整错误信息:
55 |
56 | ```txt
57 | docker: Error response from daemon: Ports are not available: exposing port TCP 0.0.0.0:7860 -> 0.0.0.0: listen tcp 0.0.0.0:7860: bind: An attempt was made to access a socket in a way forbidden by its access permissions.
58 | ```
59 |
60 | > 我在windows特定的网络配置下遇到过这个问题。
61 | >
62 | > 解决方案([指引](https://github.com/docker/for-win/issues/9272#issuecomment-776225866)):
63 | > 1. 以**管理员**身份打开cmd/powershell。
64 | > 2. 执行`net stop winnat`。
65 | > 3. [重启容器](getting-started.md#重启容器)。
66 | > 4. 容器运行起来后,执行`net start winnat`。
67 |
68 | ### Killed (强制终止)没有其他提示
69 |
70 | 
71 |
72 | > 通常是用于分配给WSL的内存不足导致的。Web UI最少需要7GB内存(估算的)才能运行基本功能。
73 | >
74 | > 默认情况下,宿主机的50%内存会分配给WSL。你可以[在容器中执行`free -m`](getting-started.md#在正在运行的容器中新开一个终端)确认一下。
75 | >
76 | > 解决方案([指引](https://learn.microsoft.com/en-us/answers/questions/1296124/how-to-increase-memory-and-cpu-limits-for-wsl2-win)):
77 | > 1. 在宿主机用户目录下创建一个名为`.wslconfig`的文本文件。
78 | > 2. 编辑`.wslconfig`并保存为如下内容(以16GB为例):
79 | >
80 | > ```.wslconfig
81 | > # Settings apply across all Linux distros running on WSL 2
82 | > [wsl2]
83 | >
84 | > # Limits VM memory to use no more than 16 GB, this can be set as whole numbers using GB or MB
85 | > memory=16GB
86 | > ```
87 |
88 | ## Web UI运行中
89 |
90 | ### 容器卡死
91 |
92 | > 你可能无法以正常的方式[终止容器](getting-started.md#终止容器)。
93 | >
94 | > 尝试右键点击系统托盘的Docker Desktop图标并点击`restart(重启)`。不行的话,重启电脑吧。:cold_sweat:
95 |
96 | ### URLError: [Errno 99] Cannot assign requested address(无法赋予请求的地址)
97 |
98 | 
99 |
100 | > 可能会在跑图的时候发生,原因是前端发送了太多查询生成图片进度的请求,但这个问题并不影响图片的生成。
101 | >
102 | > 解决方案:
103 | >
104 | > 编辑`\javascript\progressBar.js`:把函数`requestProgress()`的参数`once`默认值改为`true`。
105 |
106 | ### 生成图片时Aborted(被终止)
107 |
108 | 完整错误信息:
109 |
110 | ```txt
111 | Abort was called at 718 line in file:
112 | ../../neo/shared/source/os_interface/windows/wddm_memory_manager.cpp
113 | ```
114 |
115 | > 解决方案:在设备管理器或者BIOS中禁用核显。见[#1272](https://github.com/vladmandic/automatic/issues/1272)。
116 |
117 | ### 跑图若干次后DPCPP out of memory (内存耗尽)
118 |
119 | 完整错误信息:
120 |
121 | ```txt
122 | DPCPP out of memory. Tried to allocate 186.00 MiB (GPU Time taken: 22.18s | GPU active 3754 MB reserved 3888 MB | System peak 3754 MB total 13005 MB
123 | ```
124 |
125 | > 见[#8](https://github.com/Nuullll/ipex-sd-docker-for-arc-gpu/issues/8)。最新的[SD.Next](https://github.com/vladmandic/automatic/commit/c3a4293f2227fe77b9ea908c99a1bda2aef43175)已经修复这个问题。
126 | >
127 | > 解决方案:根据[这里的指示](getting-started.md#更新sdnext源代码)将SD.Next代码更新。
128 |
129 |
--------------------------------------------------------------------------------
/docs/zh-cn/getting-started.md:
--------------------------------------------------------------------------------
1 | # 开始使用
2 |
3 | 中国用户看过来,这里还有一版[视频教程](https://www.bilibili.com/video/BV1Ek4y1u7Z4/)在B站。
4 |
5 | ## 前置需求
6 |
7 | * 在你的系统上安装[Docker Desktop](https://www.docker.com/)。
8 | - 如果你使用的是Windows,请确认使用的是默认的基于WSL2的docker引擎。你可以在Docker Desktop图形界面中前往`Setting(设置) -> General(通用)` 检查`Use the WSL 2 based engine`选项是否勾选。
9 | * 良好的(且科学的)网络连接。:surfer:
10 |
11 | ## 使用远程镜像启动docker容器
12 |
13 |
14 |
15 | ### **Windows**
16 |
17 | ```powershell
18 | docker run -it `
19 | --device /dev/dxg `
20 | -v /usr/lib/wsl:/usr/lib/wsl `
21 | -v $home\docker-mount\sd-webui:/sd-webui `
22 | -v deps:/deps `
23 | -v huggingface:/root/.cache/huggingface `
24 | -p 7860:7860 `
25 | --name sd-server `
26 | nuullll/ipex-arc-sd:latest
27 | ```
28 |
29 | ### **Linux**
30 |
31 | ```bash
32 | docker run -it \
33 | --device /dev/dri \
34 | -v ~/docker-mount/sd-webui:/sd-webui \
35 | -v deps:/deps \
36 | -v huggingface:/root/.cache/huggingface \
37 | -p 7860:7860 \
38 | --name sd-server \
39 | nuullll/ipex-arc-sd:latest
40 | ```
41 |
42 |
43 |
44 | 如果你对docker还不太熟悉,我们来看一下这一长串命令究竟在做什么:
45 |
46 | - [`docker run`](https://docs.docker.com/engine/reference/commandline/run/)通过一个镜像来创建并运行**一个新容器**。
47 | - 最后一个参数`nuullll/ipex-arc-sd:latest`指定了镜像,格式是`:`。如果在本地没有找到这个镜像,docker会尝试从[DockerHub远程](https://hub.docker.com/r/nuullll/ipex-arc-sd)拉取。
48 | - `--name sd-server`给新创建的容器指定了一个有意义的名字(比如`sd-server`)。这个参数很有用但不是必须的。
49 | - `-it`会在容器启动后生成一个可交互的命令行。强烈建议使用这个选项,因为我们可能需要通过命令行的日志输出来监控Web UI的状态。
50 | - 在linux中,需要使用`--device /dev/dri`来授权容器访问你的显卡。在windows中,需要同时使用`--device /dev/dxg`和`-v /usr/lib/wsl:/usr/lib/wsl`来授权容器访问你的显卡。详见[wslg示例](https://github.com/microsoft/wslg/blob/main/samples/container/Containers.md#containerized-applications-access-to-the-vgpu)。
51 | - `-v :/sd-webui`指定了需要被[绑定挂载](https://docs.docker.com/storage/bind-mounts/)到容器`/sd-webui`路径的宿主机目录。在第一次启动容器时,应当指定一个宿主机上的**空目录或不存在的目录**作为``,使得容器能够将[SD.Next](https://github.com/vladmandic/automatic)的源代码下载到该目录。如果你想另起一个新容器(比如[覆盖镜像的entrypoint](https://docs.docker.com/engine/reference/run/#entrypoint-default-command-to-execute-at-runtime))同时共享着已经被其他容器初始化过的Web UI目录,应当指定相同的``。
52 | - `-v :/deps`指定了一个由docker引擎管理的[数据卷](https://docs.docker.com/storage/volumes/)(例如,名为`deps`的数据卷),并将其挂载到容器内部的`/deps`目录。`/deps`会成为python虚拟环境的根目录(见[`Dockerfile: ENV venv_dir`](https://github.com/Nuullll/ipex-sd-docker-for-arc-gpu/blob/main/Dockerfile)),用于存放Web UI启动后需要用到的动态python依赖项(例如Web UI扩展所需要的包)。你可以将`deps`数据卷挂载到多个容器,那么这些动态依赖项只需要下载和安装一次。对于想以不同Web UI参数(比如`--debug`)运行容器的用户以及本地创建docker镜像的开发者来说,这很有用。
53 | - `-p :7860`指定了[端口映射](https://docs.docker.com/network/)。容器内运行于7860端口的Web UI服务会被转发至宿主机上的`http://localhost:`。
54 |
55 | ### 预期结果
56 |
57 |
58 |
59 | #### **从DockerHub拉取镜像**
60 |
61 | 这可能会花一些时间,但这是一劳永逸的设置,除非你以后想将镜像升级到新版本。喝杯咖啡休息一下!
62 |
63 | ```txt
64 | docker run -it `
65 | >> --device /dev/dxg `
66 | >> -v /usr/lib/wsl:/usr/lib/wsl `
67 | >> -v $home\docker-mount\sd-webui:/sd-webui `
68 | >> -v deps:/deps `
69 | >> -v huggingface:/root/.cache/huggingface `
70 | >> -p 7860:7860 `
71 | >> --name sd-server `
72 | >> nuullll/ipex-arc-sd:latest
73 | Unable to find image 'nuullll/ipex-arc-sd:latest' locally
74 | latest: Pulling from nuullll/ipex-arc-sd
75 | 6b851dcae6ca: Already exists
76 | 2614e0cfd126: Pull complete
77 | b7f3f70e6e79: Downloading [==========> ] 133.2MB/632.5MB
78 | 855ff6ba44ef: Download complete
79 | 63a57d250c21: Download complete
80 | 27dc2936b164: Download complete
81 | f5b66ca8a170: Download complete
82 | 7a2fe23e57b6: Download complete
83 | b88d41ac9d88: Download complete
84 | ea324f812f19: Download complete
85 | 0e31e3e1e212: Download complete
86 | 93d440be2069: Downloading [========================================> ] 68.26MB/83.51MB
87 | 08aa679dcc94: Download complete
88 | 4315cf4ec169: Download complete
89 | 693801fa781b: Download complete
90 | ```
91 |
92 | 成功后你会看到以下信息:
93 |
94 | ```txt
95 | Status: Downloaded newer image for nuullll/ipex-arc-sd:latest
96 | ```
97 |
98 | #### **下载[SD.Next](https://github.com/vladmandic/automatic)源代码**
99 |
100 | ```txt
101 | fatal: not a git repository (or any parent up to mount point /)
102 | Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set).
103 | Cloning into '.'...
104 | remote: Enumerating objects: 27569, done.
105 | remote: Counting objects: 100% (214/214), done.
106 | remote: Compressing objects: 100% (99/99), done.
107 | remote: Total 27569 (delta 129), reused 183 (delta 114), pack-reused 27355
108 | Receiving objects: 100% (27569/27569), 34.77 MiB | 3.35 MiB/s, done.
109 | Resolving deltas: 100% (19625/19625), done.
110 | Updating files: 100% (272/272), done.
111 | ```
112 |
113 | 你可能注意到了第一行有个严重错误`fatal: not a git repository`。没事,这是正常现象。因为我们首次将一个宿主机上的空目录或不存在的目录挂载作为Web UI的目录。脚本会向该目录中下载[SD.Next](https://github.com/vladmandic/automatic)的代码。
114 |
115 | #### **启动Web UI服务**
116 |
117 | Web UI会首次尝试安装python依赖项。
118 |
119 | 所有python包都会安装到docker数据卷`deps`(通过`-v deps:/deps`选项指定的),数据卷里的数据是**持久化的**。因此每个依赖项只会被下载安装一次,除非你手动移除`deps`数据卷或者给容器挂载一个新的数据卷。
120 |
121 | ```txt
122 | Create and activate python venv
123 | Launching launch.py...
124 | 06:37:21-673336 INFO Starting SD.Next
125 | 06:37:21-679480 INFO Python 3.10.6 on Linux
126 | 06:37:21-762667 INFO Version: 205b5164 Fri Jul 7 22:41:26 2023 +0300
127 | 06:37:21-822410 INFO Intel OneAPI Toolkit detected <<<<<------------ 成功检测到了镜像中的oneAPI环境!
128 | 06:37:21-825165 INFO Installing package: torch==1.13.0a0 torchvision==0.14.1a0
129 | intel_extension_for_pytorch==1.13.120+xpu -f https://developer.intel.com/ipex-whl-stable-xpu <<<<<------------ 为英特尔XPU安装torch和ipex!
130 | 06:39:59-352248 INFO Torch 1.13.0a0+gitb1dde16
131 | /deps/venv/lib/python3.10/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension:
132 | warn(f"Failed to load image Python extension: {e}") <<<<<------------ 警告只是警告
133 | 06:39:59-901569 INFO Torch backend: Intel IPEX 1.13.120+xpu <<<<<------------ 使用了IPEX XPU后端
134 | /bin/sh: 1: icpx: not found <<<<<------------ 没事,这个不用管
135 | 06:39:59-905688 INFO
136 | 06:39:59-908720 INFO Torch detected GPU: Intel(R) Graphics [0x56a0] VRAM 13005 Compute Units 512 <<<<<------------ 检测到了你的锐炫显卡!(0x56a0是Arc A770的设备识别码)
137 | 06:39:59-910747 INFO Installing package: tensorflow==2.12.0
138 | 06:42:27-193632 INFO Verifying requirements
139 | 06:42:27-199643 INFO Installing package: addict
140 | 06:42:31-869764 INFO Installing package: aenum
141 | 06:42:36-004535 INFO Installing package: aiohttp
142 | 06:42:49-502038 INFO Installing package: anyio
143 | 06:42:54-800093 INFO Installing package: appdirs
144 | 06:42:57-293693 INFO Installing package: astunparse
145 | 06:42:58-683720 INFO Installing package: bitsandbytes
146 | 06:43:32-072033 INFO Installing package: blendmodes
147 | 06:43:41-571773 INFO Installing package: clean-fid
148 | 06:43:45-510400 INFO Installing package: easydev
149 | 06:43:52-898981 INFO Installing package: extcolors
150 | 06:43:55-996995 INFO Installing package: facexlib
151 | 06:45:06-051191 INFO Installing package: filetype
152 | 06:45:08-795192 INFO Installing package: future
153 | 06:45:13-184420 INFO Installing package: gdown
154 | 06:45:18-665094 INFO Installing package: gfpgan
155 | ...
156 | 06:53:42-651633 INFO Installing repositories
157 | 06:53:42-661095 INFO Cloning repository: https://github.com/Stability-AI/stablediffusion.git
158 | 06:54:10-838284 INFO Cloning repository: https://github.com/CompVis/taming-transformers.git
159 | 06:55:29-401387 INFO Cloning repository: https://github.com/crowsonkb/k-diffusion.git
160 | 06:55:33-136352 INFO Cloning repository: https://github.com/sczhou/CodeFormer.git
161 | 06:55:46-026258 INFO Cloning repository: https://github.com/salesforce/BLIP.git
162 | 06:55:51-501455 INFO Installing submodules
163 | 07:20:37-262294 INFO Extension installed packages: sd-webui-agent-scheduler ['SQLAlchemy==2.0.18',
164 | 'greenlet==2.0.2']
165 | 07:21:17-718747 INFO Extension installed packages: sd-webui-controlnet ['lxml==4.9.3', 'reportlab==4.0.4',
166 | 'pycparser==2.21', 'portalocker==2.7.0', 'cffi==1.15.1', 'svglib==1.5.1', 'tinycss2==1.2.1',
167 | 'mediapipe==0.10.1', 'tabulate==0.9.0', 'cssselect2==0.7.0', 'webencodings==0.5.1',
168 | 'sounddevice==0.4.6', 'iopath==0.1.9', 'yacs==0.1.8', 'fvcore==0.1.5.post20221221']
169 | 07:21:17-825317 INFO Extensions enabled: ['a1111-sd-webui-lycoris', 'clip-interrogator-ext', 'LDSR', 'Lora',
170 | 'multidiffusion-upscaler-for-automatic1111', 'ScuNET', 'sd-dynamic-thresholding',
171 | 'sd-extension-system-info', 'sd-webui-agent-scheduler', 'sd-webui-controlnet',
172 | 'stable-diffusion-webui-images-browser', 'stable-diffusion-webui-rembg', 'SwinIR']
173 | 07:21:18-037617 INFO Extension preload: 0.2s /sd-webui/extensions-builtin
174 | 07:21:18-043483 INFO Extension preload: 0.0s /sd-webui/extensions
175 | 07:21:18-049065 INFO Server arguments: ['-f', '--use-ipex', '--listen']
176 | No module 'xformers'. Proceeding without it.
177 | 07:21:25-915862 INFO Libraries loaded
178 | 07:21:26-122879 INFO Using data path: /sd-webui
179 | 07:21:26-219703 INFO Available VAEs: /sd-webui/models/VAE 0
180 | 07:21:26-278684 INFO Available models: /sd-webui/models/Stable-diffusion 0
181 | ```
182 |
183 | #### **下载默认模型**
184 |
185 | ```txt
186 | Download the default model? (y/N)
187 | ```
188 |
189 | 当你看到这行提示,如果你需要默认模型的话,输入`y`。
190 |
191 | 或者直接把你喜欢的模型复制到`$home\docker-mount\sd-webui\models\Stable-diffusion`路径下。注意`$home\docker-mount\sd-webui`就是`-v :/sd-webui`选项中指定的``。
192 |
193 | #### **全部搞定!**
194 |
195 | 现在可以用你最爱的浏览器在宿主机上打开Web UI啦!
196 |
197 | [http://localhost:7860/](http://localhost:7860/)
198 |
199 | 享受你的Stable Diffusion之旅!:fire:
200 |
201 | ```txt
202 | 07:42:49-061422 INFO ControlNet v1.1.227
203 | ControlNet preprocessor location: /sd-webui/extensions-builtin/sd-webui-controlnet/annotator/downloads
204 | 07:42:49-687272 INFO ControlNet v1.1.227
205 | 07:42:50-912721 INFO Loading UI theme: name=black-orange style=Auto
206 | Running on local URL: http://0.0.0.0:7860
207 | 07:42:53-611671 INFO Local URL: http://localhost:7860/
208 | 07:42:53-616045 INFO Initializing middleware
209 | 07:42:53-761013 INFO [AgentScheduler] Task queue is empty
210 | 07:42:53-762931 INFO [AgentScheduler] Registering APIs
211 | 07:42:53-837492 INFO Model metadata saved: /sd-webui/metadata.json 1
212 | 07:42:53-947057 WARNING Selected checkpoint not found: model.ckpt
213 | 07:42:54-048539 WARNING Loading fallback checkpoint: v1-5-pruned-emaonly.safetensors
214 | Loading weights: /sd-webui/models/Stable-diffusion/v1-5-pruned-emaonly.safetensors ━━━━━━━━━━━━━━━━━━ 0.0/4.3 GB -:--:--
215 | 07:42:55-568796 INFO Setting Torch parameters: dtype=torch.float16 vae=torch.float16 unet=torch.float16
216 | LatentDiffusion: Running in eps-prediction mode
217 | DiffusionWrapper has 859.52 M params.
218 | Downloading (…)olve/main/vocab.json: 100%|████████████████████████████████████████████| 961k/961k [00:01<00:00, 848kB/s]
219 | Downloading (…)olve/main/merges.txt: 100%|███████████████████████████████████████████| 525k/525k [00:00<00:00, 1.27MB/s]
220 | Downloading (…)cial_tokens_map.json: 100%|██████████████████████████████████████████████| 389/389 [00:00<00:00, 347kB/s]
221 | Downloading (…)okenizer_config.json: 100%|█████████████████████████████████████████████| 905/905 [00:00<00:00, 4.47MB/s]
222 | Downloading (…)lve/main/config.json: 100%|█████████████████████████████████████████| 4.52k/4.52k [00:00<00:00, 1.60MB/s]
223 | Calculating model hash: /sd-webui/models/Stable-diffusion/v1-5-pruned-emaonly.safetensors ━━━━━━━━━━━ 4.3/4.3 GB 0:00:00
224 | 07:43:25-547549 INFO Applying sub-quadratic cross attention optimization
225 | 07:43:25-718775 INFO Applied IPEX Optimize
226 | 07:43:26-115071 INFO Embeddings: loaded=0 skipped=0
227 | 07:43:26-119315 INFO Model loaded in 32.1s (load=0.9s config=0.6s create=5.1s hash=7.7s apply=16.2s vae=0.2s
228 | move=0.8s embeddings=0.6s)
229 | 07:43:26-235786 INFO Model load finished: {'ram': {'used': 7.73, 'total': 23.47}, 'gpu': {'used': 2.02, 'total':
230 | 12.7}, 'retries': 0, 'oom': 0} cached=0
231 | 07:43:26-415000 INFO Startup time: 46.2s (torch=1.3s gradio=0.9s libraries=2.4s vae=0.1s codeformer=0.6s
232 | scripts=4.3s onchange=1.0s ui=2.5s launch=0.2s app-started=0.2s checkpoint=32.6s)
233 | ```
234 |
235 |
236 |
237 | ## 在宿主机上访问SD.Next目录
238 |
239 | 还记得之前`docker run`命令中指定的`-v :/sd-webui`选项吧?``就是宿主机上的一个目录,且与容器内的`/sd-webui`目录保持同步。
240 |
241 | 你可以在宿主机上修改``(默认:`$home\docker-mount\sd-webui`)目录的内容,所有改动都会实时反映到容器中。
242 |
243 | 比如,你可以将SD模型复制到`\models\Stable-diffusion`,在`\outputs`目录中查看生成的图片,甚至是直接在宿主机上执行`git`相关操作。
244 |
245 | ## 管理SD.Next容器
246 |
247 | ### 终止容器
248 |
249 | 几个方法:
250 |
251 | - 在可交互命令行中按`Ctrl + C`。
252 | - 直接关掉可交互命令行。
253 | - 打开Docker Desktop面板,点击`Containers(容器)`标签找到正在运行的容器(比如名为`sd-server`)。点击停止:black_medium_square:按钮。
254 | - 退出Docker Desktop。
255 |
256 | ### 重启容器
257 |
258 | 关掉容器之后,有几种方法可以重启它:
259 |
260 | - 打开Docker Desktop面板,点击`Containers(容器)`标签找到你想运行的容器(比如名为`sd-server`)。点击启动按钮。这不会帮你打开一个可交互的命令行界面,但你可以通过点击容器名字然后进入`Logs(日志)`标签查看日志。
261 | - 在宿主机上打开一个终端(比如PowerShell或者bash),执行`docker start -i `。
262 |
263 | ### 用不同Web UI参数启动容器
264 |
265 | 需要创建一个新容器来实现这个操作。只要你给新容器指定的``和数据卷``和之前的容器相同,那新容器就可以复用Web UI目录和pyton虚拟环境,这种情况下新开一个容器的开销小到可以忽略。
266 |
267 | 直接将你定制的Web UI参数加在**镜像名字之后**(比如给新容器加上`--debug --lowvram --no-half`参数):
268 |
269 |
270 |
271 | #### **Windows**
272 |
273 | ```powershell
274 | docker run -it `
275 | --device /dev/dxg `
276 | -v /usr/lib/wsl:/usr/lib/wsl `
277 | -v $home\docker-mount\sd-webui:/sd-webui `
278 | -v deps:/deps `
279 | -v huggingface:/root/.cache/huggingface `
280 | -p 7860:7860 `
281 | --name customized-sd-server `
282 | nuullll/ipex-arc-sd:latest `
283 | --debug --lowvram --no-half
284 | ```
285 |
286 | #### **Linux**
287 |
288 | ```bash
289 | docker run -it \
290 | --device /dev/dri \
291 | -v ~/docker-mount/sd-webui:/sd-webui \
292 | -v deps:/deps \
293 | -v huggingface:/root/.cache/huggingface \
294 | -p 7860:7860 \
295 | --name customized-sd-server \
296 | nuullll/ipex-arc-sd:latest \
297 | --debug --lowvram --no-half
298 | ```
299 |
300 |
301 |
302 | 如果你打算长期使用这个新容器,你可以像上面一样给它指定一个新名字。如果你只是在调试或者做一些临时实验,那你可以把`--name customized-sd-server`替换成`--rm`,来让docker在这个容器时退出时自动删除它。
303 |
304 | ### 更新SD.Next源代码
305 |
306 | 我们可以用上面的技巧(通过指定`--upgrade`选项)来更新[SD.Next](https://github.com/vladmandic/automatic)的代码来体验最新的功能:
307 |
308 |
309 |
310 | #### **Windows**
311 |
312 | ```powershell
313 | docker run -it `
314 | --device /dev/dxg `
315 | -v /usr/lib/wsl:/usr/lib/wsl `
316 | -v $home\docker-mount\sd-webui:/sd-webui `
317 | -v deps:/deps `
318 | -v huggingface:/root/.cache/huggingface `
319 | -p 7860:7860 `
320 | --rm `
321 | nuullll/ipex-arc-sd:latest --upgrade
322 | ```
323 |
324 | #### **Linux**
325 |
326 | ```bash
327 | docker run -it \
328 | --device /dev/dri \
329 | -v ~/docker-mount/sd-webui:/sd-webui \
330 | -v deps:/deps \
331 | -v huggingface:/root/.cache/huggingface \
332 | -p 7860:7860 \
333 | --rm \
334 | nuullll/ipex-arc-sd:latest --upgrade
335 | ```
336 |
337 |
338 |
339 | 这里使用`--rm`很合理,因为我们实际上是在更新``目录的内容,它的改动会自动反映到我们原来的容器中。
340 |
341 | ### 在正在运行的容器中新开一个终端
342 |
343 | 有时我们可能想在不关掉正在运行的容器的前提下,查看一下它的状态。
344 |
345 | 你可以在宿主机上打开终端,执行
346 |
347 | ```powershell/bash
348 | docker exec -i bash
349 | ```
350 |
351 | 或者打开Docker Desktop面板前往`Containers(容器)`页面,找到你的容器点击名字,最后点击`Terminal(终端)`标签。
352 |
--------------------------------------------------------------------------------
/docs/zh-cn/release-notes.md:
--------------------------------------------------------------------------------
1 | # 更新日志
2 |
3 | ## [v0.6](https://hub.docker.com/layers/nuullll/ipex-arc-sd/v0.6/images/sha256-30bdf186bc21abbcbb1d59ee87b4a726af9aa93794543121caf58ba95f44caaa?context=explore) (最新)
4 |
5 | 压缩后镜像大小:845.44 MB
6 |
7 | 主要变动:
8 |
9 | - oneAPI版本升级至2023.2,支持IPEX XPU 2.0
10 |
11 | 镜像(基于Ubuntu 22.04)中包含:
12 |
13 | - Intel oneAPI DPC++运行时库 (2023.2.1) (注:不包含编译器可执行文件)
14 | - Intel oneAPI MKL运行时库 (2023.2.0)
15 | - Intel oneAPI 编译器通用工具sycl-ls (2023.2.1)
16 | - Intel显卡驱动 (1.3.26241.33-647~22.04)
17 | - 基础python环境 (3.10.6)
18 |
19 | 在Windows 11 22H2 22621.1848 + i9-13900 + Arc A770 (Windows driver: 31.0.101.4502)环境下测试通过。
20 |
21 | ## [v0.5](https://hub.docker.com/layers/nuullll/ipex-arc-sd/v0.5/images/sha256-bb556a04a3ad6d331582ad1d64e79a123650fd43981d2bdd3c2e1f639bde818c?context=explore)
22 |
23 | 压缩后镜像大小:831.44 MB
24 |
25 | Major changes:
26 |
27 | - Allow extension installation by default (`--insecure`)
28 | - Skip startup git operations by default (`--skip-git`)
29 | - Use faster offline git repo check
30 |
31 | 镜像(基于Ubuntu 22.04)中包含:
32 |
33 | - Intel oneAPI DPC++运行时库 (2023.1) (注:不包含编译器可执行文件)
34 | - Intel oneAPI MKL运行时库 (2023.1)
35 | - Intel oneAPI 编译器通用工具sycl-ls (2023.1)
36 | - Intel显卡驱动 (1.3.26241.21-647~22.04)
37 | - 基础python环境 (3.10.6)
38 |
39 | 在Windows 11 22H2 22621.1848 + i9-13900 + Arc A770 (Windows driver: 31.0.101.4502)环境下测试通过。
40 |
41 | ## [v0.4](https://hub.docker.com/layers/nuullll/ipex-arc-sd/v0.4/images/sha256-ca5ba4aab952e6afb3150865b33b03846cf38d1b512fbae575d3f54f7d38a829?context=explore)
42 |
43 | 压缩后镜像大小:831.25 MB
44 |
45 | 本次更新仅在[v0.3版本](release-notes#v03)基础上做了一个重要改动:
46 |
47 | - 强制将compute runtime中可用显存大小设为100%。
48 |
49 | 在Windows 11 22H2 22621.1848 + i9-13900 + Arc A770 (Windows driver: 31.0.101.4502)环境下测试通过。
50 |
51 | 对于A770,现在Web UI中检测到的显存大小从13005增加到了16256MB。
52 |
53 | ```txt
54 | v0.3及以前 >>>> Torch detected GPU: Intel(R) Graphics [0x56a0] VRAM 13005 Compute Units 512
55 | v0.4 >>>>>>>>> Torch detected GPU: Intel(R) Graphics [0x56a0] VRAM 16256 Compute Units 512
56 | ```
57 |
58 | ## [v0.3](https://hub.docker.com/layers/nuullll/ipex-arc-sd/v0.3/images/sha256-accb961e63a14b92567e7c594ad5222fd4592a40b8e3c5a76310a70257b1f00e?context=explore)
59 |
60 | 压缩后镜像大小:831.25 MB
61 |
62 | 重要通知:
63 |
64 | * IPEX在WSL2中存在严重的显存泄漏问题( https://github.com/Nuullll/ipex-sd-docker-for-arc-gpu/issues/8, https://github.com/intel/intel-extension-for-pytorch/issues/388 ),现已在SD.Next https://github.com/vladmandic/automatic/commit/c3a4293f2227fe77b9ea908c99a1bda2aef43175 中临时解决。**如果你使用的是WSL,请根据[这里的指示](getting-started.md#更新sdnext源代码)将SD.Next代码更新。**
65 |
66 | 主要变动:
67 |
68 | * 使用TCMalloc来缓解WSL中的内存泄漏问题。
69 | * 挂载`/root/.cache/huggingface`数据卷,避免重复下载huggingface模型。
70 |
71 | 镜像(基于Ubuntu 22.04)中包含:
72 |
73 | - Intel oneAPI DPC++运行时库 (2023.1) (注:不包含编译器可执行文件)
74 | - Intel oneAPI MKL运行时库 (2023.1)
75 | - Intel oneAPI 编译器通用工具sycl-ls (2023.1)
76 | - Intel显卡驱动(1.3.26241.21-647~22.04)
77 | - 基础python环境 (3.10.6)
78 |
79 | 在Windows 11 22H2 22621.1848 + i9-13900 + Arc A770 (Windows driver: 31.0.101.4502)环境下测试通过。
80 |
81 | ## [v0.2](https://hub.docker.com/layers/nuullll/ipex-arc-sd/v0.2/images/sha256-58f7c7ae5b837b427623472a23582c1b4ecbd49460d245ddcb533e721cb396db?context=explore)
82 |
83 | 压缩后镜像大小:827.1 MB
84 |
85 | 主要变动:
86 | - 将IPEX和SD.Next所需要的python包从镜像中移除来减小镜像大小。
87 | - 将除了sycl-ls之外没用的编译器工具移除来减小镜像大小。
88 | - 将Intel显卡驱动版本升级为1.3.26241.21-647~22.04。
89 |
90 | 镜像(基于Ubuntu 22.04)中包含:
91 |
92 | - Intel oneAPI DPC++运行时库 (2023.1) (注:不包含编译器可执行文件)
93 | - Intel oneAPI MKL运行时库 (2023.1)
94 | - Intel oneAPI 编译器通用工具sycl-ls (2023.1)
95 | - Intel显卡驱动(1.3.26241.21-647~22.04)
96 | - 基础python环境 (3.10.6)
97 |
98 | 在Windows 11 22H2 22621.1848 + i9-13900 + Arc A770 (Windows driver: 31.0.101.4382)环境下测试通过。
99 |
100 | ## [v0.1](https://hub.docker.com/layers/nuullll/ipex-arc-sd/v0.1/images/sha256-5c00e46920a396a2b1c69e5ad24218883ba205afe6d59ce153f12f684ef2c006)
101 |
102 | 压缩后镜像大小:2.11 GB
103 |
104 | 初版。镜像(基于Ubuntu 22.04)中包含:
105 |
106 | - Intel oneAPI DPC++运行时库 (2023.1) (注:不包含编译器可执行文件)
107 | - Intel oneAPI MKL运行时库 (2023.1)
108 | - Intel oneAPI 编译器通用工具sycl-ls (2023.1)
109 | - Intel显卡驱动 (1.3.25593.18-601~22.04)
110 | - 基础python环境 (3.10.6)
111 | - IPEX (1.13.120+xpu)以及SD.Next requirements.txt指定的python包
112 |
113 | 在Windows 11 22H2 22621.1848 + i9-13900 + Arc A770 (Windows driver: 31.0.101.4382)环境下测试通过。
114 |
--------------------------------------------------------------------------------
/scripts/BOM/neg.glob:
--------------------------------------------------------------------------------
1 | *.whl
2 | *.log
3 | __pycache__
4 | *.lock
5 | cache
6 | venv
7 | outputs
8 |
9 | # Large models
10 | models/Stable-diffusion/*
11 |
--------------------------------------------------------------------------------
/scripts/BOM/pos.glob:
--------------------------------------------------------------------------------
1 | extensions/a1111-sd-webui-tagcomplete
2 | extensions/adetailer
3 | extensions/sd-webui-bilingual-localization
4 | extensions/sd-webui-prompt-all-in-one
5 | extensions/ultimate-upscale-for-automatic1111
6 |
7 | localizations/I18N_sd-webui-zh_CN.json
8 |
9 | webui-user.sh
10 | config.json
11 |
12 | # original configs
13 | configs/v1-inference.yaml
14 | configs/v2-inference-v.yaml
15 | configs/sd_xl_base.yaml
16 | configs/sd_xl_refiner.yaml
17 |
18 | # clip configs
19 | openai/clip-vit-large-patch14/* 7
20 | laion/CLIP-ViT-bigG-14-laion2B-39B-b160k/* 9
21 |
22 | # models
23 | ## adetailer models
24 | models/adetailer/*.pt 10
25 |
26 | models/CLIP/*.pt
27 | models/Codeformer/*.pth
28 | models/embeddings/* 3
29 | models/ESRGAN/*.pth 2
30 | models/karlo/*
31 | models/LDSR/* 2
32 | models/RealESRGAN/*.pth 2
33 | models/ScuNET/*
34 | models/Stable-diffusion
35 | models/SwinIR/*
36 | models/torch_deepdanbooru/*
37 | models/VAE/* 2
38 | models/VAE-approx/*
39 |
--------------------------------------------------------------------------------
/scripts/check_bom.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import argparse
3 | import os
4 | import shutil
5 | import sys
6 | import glob
7 | import json
8 |
9 | cwd = os.path.dirname(os.path.abspath(sys.argv[0]))
10 |
11 | logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
12 |
13 | parser = argparse.ArgumentParser(description='Export BOM file for Arc-SDNext-Installer')
14 | parser.add_argument('repo_dir', help='SD.Next directory')
15 |
16 | args = parser.parse_args()
17 |
18 | def check_must_have_files():
19 | glob_file = os.path.join(cwd, 'BOM/pos.glob')
20 | with open(glob_file, 'r') as f:
21 | lines = f.readlines()
22 | ok = True
23 | for line in lines:
24 | line = line.strip()
25 | if line and not line.startswith('#'):
26 | fields = line.split()
27 | count = 1 if len(fields) == 1 else int(fields[1])
28 | files = glob.glob(fields[0], root_dir=args.repo_dir)
29 | if len(files) != count:
30 | logging.error(f"Expect {count} files: {fields[0]}, got {len(files)}")
31 | ok = False
32 | return ok
33 |
34 | def check_must_not_have_files(auto_remove=False):
35 | glob_file = os.path.join(cwd, 'BOM/neg.glob')
36 | with open(glob_file, 'r') as f:
37 | lines = f.readlines()
38 | ok = True
39 | for line in lines:
40 | line = line.strip()
41 | if line and not line.startswith('#'):
42 | files = glob.glob(line, root_dir=args.repo_dir)
43 | if len(files):
44 | logging.error(f"Don't expect file: {line} -> {files}")
45 | if auto_remove:
46 | for f in files:
47 | p = os.path.join(args.repo_dir, f)
48 | if os.path.isdir(p):
49 | shutil.rmtree(p)
50 | else:
51 | os.remove(p)
52 | logging.info(f"Auto removed!")
53 | ok = False
54 | return ok
55 |
56 | def check_config():
57 | config_file = os.path.join(args.repo_dir, 'config.json')
58 | with open(config_file, 'r') as f:
59 | config = json.load(f)
60 | ok = True
61 | if "sd_vae" not in config["quicksettings_list"]:
62 | logging.error("sd_vae not in quicksettings")
63 | ok = False
64 | if "sd_model_refiner" not in config["quicksettings_list"]:
65 | logging.error("sd_model_refiner not in quicksettings")
66 | ok = False
67 | if "Euler" not in config["show_samplers"]:
68 | logging.error("Euler not in show_samplers")
69 | ok = False
70 | if config["gradio_theme"] != "gradio/default":
71 | logging.error("gradio_theme is not gradio/default")
72 | ok = False
73 | if config["bilingual_localization_file"] != "I18N_sd-webui-zh_CN":
74 | logging.error("bilingual_localization_file is not I18N_sd-webui-zh_CN")
75 | ok = False
76 | if "sd_model_checkpoint" in config or "sd_checkpoint_hash" in config:
77 | logging.error("Default model not removed")
78 | ok = False
79 | return ok
80 |
81 | def check_webui_user():
82 | user_sh = os.path.join(args.repo_dir, 'webui-user.sh')
83 | with open(user_sh, 'r') as f:
84 | lines = f.readlines()
85 | for line in lines:
86 | line = line.strip()
87 | if line and not line.startswith('#'):
88 | if line.startswith('export COMMANDLINE_ARGS='):
89 | expected_options = ['--skip-git', '--no-download', '--ad-no-huggingface']
90 | for opt in expected_options:
91 | if opt not in line:
92 | logging.error(f"Missing preset COMMANDLINE_ARGS: {opt}")
93 | return False
94 | return True
95 | return False
96 |
97 | if __name__ == '__main__':
98 | check_must_have_files()
99 | check_must_not_have_files(auto_remove=True)
100 | check_config()
101 | check_webui_user()
102 |
--------------------------------------------------------------------------------
/scripts/install.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | setlocal enableextensions enabledelayedexpansion
3 | chcp 65001 >NUL
4 |
5 | @set delim===================================================
6 | set cwd=%~dp0
7 |
8 | ::Lanuage selection
9 | :LANG_SEL
10 | cls
11 | echo %delim%
12 | echo 请选择此安装脚本的语言:
13 | echo Please select the lanuage for this installation script:
14 | echo %delim%
15 | echo [1] 简体中文
16 | echo [2] English
17 | echo %delim%
18 | echo.
19 | set /p LANG=输入1或2, 然后按回车 (Type 1 or 2 then press ENTER):
20 | if "!LANG!" == "" set LANG=1
21 | if not "!LANG!" == "1" if not "!LANG!" == "2" goto :LANG_SEL
22 |
23 | ::Check System status
24 | echo.
25 | echo %delim%
26 | call :Print "正在检测系统环境 ..." , "Checking system environment ..."
27 |
28 | wmic path win32_VideoController get name | findstr "Arc"
29 | wmic path win32_VideoController get name | findstr "UHD" >NUL
30 | if %ERRORLEVEL% EQU 0 call :PrintRed "核显独显同时打开可能在运行Stable Diffusion时引起错误 建议在设备管理器或BIOS中禁用核显!" , "iGPU + dGPU combination may cause errors when running Stable Diffusion. Suggest to disable iGPU in device manager or BIOS"
31 |
32 | ::Update WSL
33 | call :Print "正在更新WSL ..." , "Updating WSL ..."
34 | wsl --update
35 | if not %ERRORLEVEL% EQU 0 call :PrintRed "WSL更新失败 后续安装Docker Desktop可能失败!", "Failed to update WSL. Docker Desktop installation may fail"
36 |
37 | ::Get total RAM size
38 | set /a UNIT_MB=1024*1024
39 | set /a UNIT_MB1=UNIT_MB/100
40 | for /f "skip=1" %%p in ('wmic computersystem get TotalPhysicalMemory') do (
41 | set TOTAL_RAM=%%p
42 | goto :RAM_DONE
43 | )
44 |
45 | :RAM_DONE
46 | set TOTAL_RAM1=%TOTAL_RAM:~0,-2%
47 | set /a TOTAL_RAM_MB=TOTAL_RAM1/UNIT_MB1
48 | set /a WSL_RAM=TOTAL_RAM_MB/2
49 | call :Print "WSL的默认内存限制为系统物理内存[!TOTAL_RAM_MB! MB]的一半[!WSL_RAM! MB]" , "Default memory limit for WSL is half [!WSL_RAM! MB] of the total physical RAM size [!TOTAL_RAM_MB! MB]"
50 | ::Check minimum WSL RAM requirement
51 | set /a WSL_RAM_REQ=13000
52 | set WSL_CONFIG=%USERPROFILE%\.wslconfig
53 | if !WSL_RAM! LEQ !WSL_RAM_REQ! (
54 | call :PrintRed "WSL的默认内存限制太低. 可能无法正常运行Stable Diffusion Web UI." , "Default memory limit for WSL is too low to run Stable Diffusion Web UI."
55 | if !TOTAL_RAM_MB! LSS !WSL_RAM_REQ! (
56 | set /a WSL_RAM_REQ=TOTAL_RAM_MB
57 | call :PrintRed "警告: 系统内存太低. 可能无法正常运行." , "WARNING: Total physical RAM size is low. Might not work."
58 | )
59 |
60 | call :PrintRed "是否要自动将WSL内存上限设置为 !WSL_RAM_REQ! MB?" , "Do you want to automatically adjust WSL memory limit to !WSL_RAM_REQ! MB?"
61 | set /p AUTO_WSL_RAM=输入y或N, 然后按回车 ^(Type y or N then press ENTER^):
62 | if "!AUTO_WSL_RAM!" == "n" goto :CHECK_DD
63 | if "!AUTO_WSL_RAM!" == "N" goto :CHECK_DD
64 |
65 | if exist "!WSL_CONFIG!" (
66 | call :PrintRed "!WSL_CONFIG!已存在. 请手动修改后继续." , "!WSL_CONFIG! already exist. Please edit it manually."
67 | echo.
68 | call :Print ".wslconfig 示例" , "Example .wslconfig"
69 | echo.
70 | echo ^[wsl2^]
71 | echo memory=!WSL_RAM_REQ!MB
72 | echo.
73 | ) else (
74 | (
75 | echo ^[wsl2^]
76 | echo memory=!WSL_RAM_REQ!MB
77 | )>!WSL_CONFIG!
78 | call :Print "已成功生成文件 !WSL_CONFIG!" , "Successfully generated !WSL_CONFIG!"
79 | )
80 | start "" notepad.exe "!WSL_CONFIG!"
81 |
82 | echo .
83 | call :PrintRed "需要重新启动WSL服务才能生效. 是否现在重启WSL服务?" , "WSL backend needs to be restarted to allow the new config to take effect. Do you want to restart WSL backend now?"
84 |
85 | set /p SHUTDOWN_WSL=输入y或N, 然后按回车 ^(Type y or N then press ENTER^):
86 | if "!SHUTDOWN_WSL!" == "n" goto :CHECK_DD
87 | if "!SHUTDOWN_WSL!" == "N" goto :CHECK_DD
88 |
89 | wsl --shutdown
90 | )
91 | echo.
92 | call :PrintGreen "系统环境检查完成" , "Done checking system environment"
93 | call :Print "按回车继续" , "Press ENTER to continue"
94 | pause >NUL
95 |
96 | ::Install Docker Desktop
97 | :CHECK_DD
98 | echo.
99 | echo %delim%
100 | call :PrintRed "是否要自动安装/更新Docker Desktop?" , "Do you want to install/upgrade Docker Desktop automatically?"
101 | set /p INSTALL_DD=输入y或N, 然后按回车 (Type y or N then press ENTER):
102 | if "!INSTALL_DD!" == "n" goto :LAUNCH_DD
103 | if "!INSTALL_DD!" == "N" goto :LAUNCH_DD
104 |
105 | call :Print "正在检查Docker Desktop环境 ..." , "Checking Docker Desktop environment ..."
106 |
107 | winget install docker.dockerdesktop
108 | echo.
109 | if %ERRORLEVEL% EQU 0 (
110 | call :PrintGreen "成功安装Docker Desktop" , "Installed Docker Desktop successfully"
111 | call :PrintRed "请重启系统后重新执行此脚本" , "Please re-execute this script after REBOOTing your system"
112 | call :PrintRed "按任意键退出脚本" , "Press any key to exit"
113 | pause >NUL
114 | exit
115 | ) else if %ERRORLEVEL% EQU -1978335189 (
116 | call :PrintGreen "Docker Desktop已是最新版" , "Docker Desktop is up-to-date"
117 | )
118 |
119 | ::Launch Docker Desktop
120 | :LAUNCH_DD
121 | echo.
122 | echo %delim%
123 | set dd_exe=C:\Program Files\Docker\Docker\Docker Desktop.exe
124 | call :Print "正在启动Docker Desktop ..." , "Launching Docker Desktop ..."
125 | if exist "!dd_exe!" (
126 | start "" "!dd_exe!"
127 | ) else (
128 | call :PrintRed "未在默认安装位置找到Docker Desktop程序. 请手动启动Docker Desktop" , "Didn't find Docker Desktop executable in the default location. please launch Docker Desktop manually"
129 | )
130 | call :PrintRed "请在图形界面弹出且docker engine启动完成后. 按任意键继续" , "When GUI pops up and docker engine is up. THEN press any key to continue"
131 | pause >NUL
132 |
133 | ::Check docker daemon status
134 | docker image ls 2>&1 | findstr "error during connect" >NUL
135 | if %ERRORLEVEL% EQU 0 (
136 | call :PrintRed "Docker服务未启动. 请等待Docker Desktop完全启动后再继续" , "Docker daemon is not running. Please wait until Docker Desktop is fully launched."
137 | goto :LAUNCH_DD
138 | )
139 |
140 | ::Import image
141 | ::Will automatically skip if the image 'nuullll/ipex-arc-sd:latest' already exists
142 | echo.
143 | echo %delim%
144 | call :Print "正在从image.tar导入nuullll/ipex-arc-sd镜像 ..." , "Importing docker image: nuullll/ipex-arc-sd from image.tar ..."
145 | docker load --input %cwd%\image.tar
146 | if not %ERRORLEVEL% EQU 0 (
147 | call :PrintRed "导入本地镜像失败" , "Failed to import the local image"
148 | call :PrintRed "请确认脚本路径 %cwd% 不包含空格!" , "Please make sure there's no 'space' character in path %cwd%"
149 | call :Print "按任意键退出" , "Press any key to exit"
150 | pause >NUL
151 | exit
152 | )
153 | call :PrintGreen "成功导入镜像" , "Successfully imported the image"
154 |
155 | ::Import volumes
156 | echo.
157 | echo %delim%
158 | call :Print "正在导入数据卷 ..." , "Importing volumes ..."
159 |
160 | ::Check existence first
161 | docker volume ls -f name=deps-%%IMAGE_VER%% | findstr "local" >NUL
162 | if %ERRORLEVEL% EQU 0 (
163 | call :PrintRed "警告: 本地数据卷 deps-%%IMAGE_VER%% 已存在" , "WARNING: local volumes deps-%%IMAGE_VER%% already exist, the content would be overwritten"
164 | call :PrintRed "是否要覆盖本地数据卷?" , "Do you want to overwrite local volumes?"
165 | set /p OVERWRITE_VOLUME=输入y或N, 然后按回车 ^(Type y or N then press ENTER^):
166 | if "!OVERWRITE_VOLUME!" == "n" goto :WEBUI
167 | if "!OVERWRITE_VOLUME!" == "N" goto :WEBUI
168 | docker volume rm deps-%%IMAGE_VER%% -f >NUL
169 | )
170 |
171 | call :Print "解压中... 可能需要几分钟" , "Extracting ... may take several minutes"
172 | docker run --rm ^
173 | -v %cwd%:/backup ^
174 | -v deps-%%IMAGE_VER%%:/deps ^
175 | -v huggingface:/root/.cache/huggingface ^
176 | --entrypoint bash ^
177 | nuullll/ipex-arc-sd:v%%IMAGE_VER%% ^
178 | -c "cd /deps && tar xf /backup/volume-deps.tar --totals --strip 1 && cd /root/.cache/huggingface && tar xf /backup/volume-huggingface.tar --totals --strip 3"
179 | if not %ERRORLEVEL% EQU 0 (
180 | call :PrintRed "导入本地数据卷失败" , "Failed to import local volumes"
181 | call :Print "按任意键退出" , "Press any key to exit"
182 | pause >NUL
183 | exit
184 | )
185 | call :PrintGreen "成功导入数据卷" , "Successfully imported volumes"
186 |
187 | ::Setup Web UI folder
188 | :WEBUI
189 | echo.
190 | echo %delim%
191 | call :Print "正在复制Web UI目录 ..." , "Copying Web UI folder ..."
192 | call :PrintRed "想把Web UI目录安装到哪里? [用于放置Web UI源代码 模型文件 输出图片等]" , "Where do you want to install Stable Diffusion Web UI [to place the Web UI source code | models | outputs etc]?"
193 | call :PrintGreen "默认路径为 %USERPROFILE%\docker-mount\sd-webui" , "Default path %USERPROFILE%\docker-mount\sd-webui"
194 | call :PrintRed "请勿输入带空格的路径" , "Don't use path with spaces"
195 | set /p loc=输入安装路径 (Input install path):
196 | if "!loc!" == "" set loc=%USERPROFILE%\docker-mount\sd-webui
197 | echo !loc! | findstr : >NUL
198 | if not %ERRORLEVEL% EQU 0 (
199 | call :PrintRed "请输入正确的绝对路径. 例如 D:\ARC-AI" , "Please specify a correct absolute path. For example D:\ARC-AI"
200 | goto :WEBUI
201 | )
202 |
203 | ::Check folder status
204 | if exist !loc! (
205 | call :PrintRed "警告: 指定路径已存在 !loc!", "WARNING: Specified path already exists !loc!"
206 | call :PrintRed "是否要用新文件覆盖原有同名文件?" , "Do you want to overwrite conflicting files?"
207 | set /p FORCE_EXTRACT=输入y或N, 然后按回车 ^(Type y or N then press ENTER^):
208 | if "!FORCE_EXTRACT!" == "n" goto :CONFIRM
209 | if "!FORCE_EXTRACT!" == "N" goto :CONFIRM
210 | )
211 | goto :EXTRACT
212 |
213 | :CONFIRM
214 | call :PrintRed "是否要跳过Web UI解压 [输入N重新选择解压路径]" , "Skip extracting Web UI foler [Input N to choose install path again]"
215 | set /p SKIP_EXTRACT=输入y或N, 然后按回车 ^(Type y or N then press ENTER^):
216 | if "!SKIP_EXTRACT!" == "n" goto :WEBUI
217 | if "!SKIP_EXTRACT!" == "N" goto :WEBUI
218 | goto :WARMUP
219 |
220 | :EXTRACT
221 | echo.
222 | echo %delim%
223 | call :Print "正在将Web UI复制至 !loc!" , "Copying Web UI to !loc!"
224 | robocopy %cwd%\webui !loc! /e /mt /z
225 | echo.
226 | call :PrintGreen "复制成功: !loc!" , "Copied to: !loc!"
227 |
228 | echo.
229 | echo %delim%
230 | call :PrintGreen "现在请把你自己下载的SD大模型文件手动复制到 !loc!\models\Stable-diffusion" , "Now you can manually copy your model files into corresponding locations under !loc! [e.g. !loc!\models\Stable-diffusion]"
231 | call :Print "按回车继续" , "Press ENTER to continue"
232 | pause >NUL
233 |
234 | ::Warmup Web UI
235 | :WARMUP
236 | echo.
237 | echo %delim%
238 | call :Print "正在初始化Web UI ... 可能需要几分钟" , "Initializing Web UI ... may take several minutes"
239 |
240 | for /f "tokens=*" %%g in ('docker run -d ^
241 | --device /dev/dxg ^
242 | -v /usr/lib/wsl:/usr/lib/wsl ^
243 | -v !loc!:/sd-webui ^
244 | -v deps-%%IMAGE_VER%%:/deps ^
245 | -v huggingface:/root/.cache/huggingface ^
246 | -p 7860:7860 ^
247 | --rm ^
248 | nuullll/ipex-arc-sd:v%%IMAGE_VER%% ^
249 | --no-hashing') do (set container_id=%%g)
250 |
251 | set /a i=0
252 | :WARMUP_CHECK
253 | findstr "Startup time" "!loc!\sdnext.log" >NUL 2>NUL
254 | if %ERRORLEVEL% EQU 0 (
255 | docker stop !container_id! >NUL
256 | call :PrintGreen "初始化成功" , "Initialized successfully"
257 | goto :WARMUP_DONE
258 | )
259 | timeout /t 10 /nobreak >NUL
260 | set /a i=i+10
261 | if !i! GEQ 120 (
262 | docker stop !container_id! >NUL
263 | call :PrintRed "警告: Web UI初始化超时 [120秒]" , "WARNING: Web UI initialization timeout [120s]"
264 | call :PrintRed "请查看日志 !loc!\sdnext.log" , "Please check the log !loc!\sdnext.log"
265 | goto :WARMUP_DONE
266 | )
267 | goto :WARMUP_CHECK
268 | :WARMUP_DONE
269 |
270 | ::Launch Web UI for the first time
271 | set container_name=sd-server-%%CONTAINER_VER%%
272 | :LAUNCH_WEBUI
273 | echo.
274 | echo %delim%
275 | call :Print "正在创建容器: !container_name! ..." , "Creating container: !container_name! ..."
276 | ::Check name first
277 | docker container ls -a -f name=!container_name! | more | findstr /rc:" !container_name!$">NUL
278 | if %ERRORLEVEL% EQU 0 (
279 | call :PrintRed "已有其他容器占用了'!container_name!'这个名字" , "The name '!container_name!' is used by the other container"
280 | set /p container_name=请指定一个新名字 ^(Please specify a new name^):
281 | if "!container_name!" == "sd-server" set container_name=new-sd-server
282 | goto :LAUNCH_WEBUI
283 | )
284 |
285 | call :PrintGreen "正在启动Web UI [容器: !container_name!]..." , "Launching Web UI [container: !container_name!]..."
286 | docker run -it ^
287 | --device /dev/dxg ^
288 | -v /usr/lib/wsl:/usr/lib/wsl ^
289 | -v !loc!:/sd-webui ^
290 | -v deps-%%IMAGE_VER%%:/deps ^
291 | -v huggingface:/root/.cache/huggingface ^
292 | -p 7860:7860 ^
293 | --name !container_name! ^
294 | nuullll/ipex-arc-sd:v%%IMAGE_VER%%
295 | exit
296 |
297 | :Print
298 | if !LANG! == 1 (echo %~1 ) else echo %~2
299 | exit /b 0
300 |
301 | :PrintRed
302 | if !LANG! == 1 (
303 | powershell write-host -fore Red %~1
304 | ) else (
305 | powershell write-host -fore Red %~2
306 | )
307 | exit /b 0
308 |
309 | :PrintGreen
310 | if !LANG! == 1 (
311 | powershell write-host -fore Green %~1
312 | ) else (
313 | powershell write-host -fore Green %~2
314 | )
315 | exit /b 0
316 |
--------------------------------------------------------------------------------
/scripts/package.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging
3 | import os
4 | from pathlib import Path
5 | import shutil
6 | import sys
7 | from subprocess import Popen, PIPE
8 |
9 | cwd = os.path.dirname(os.path.abspath(sys.argv[0]))
10 |
11 | logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
12 |
13 | parser = argparse.ArgumentParser(description='Packaing script for Arc-SDNext-Installer')
14 | parser.add_argument('output_dir', help='Directory to store the packaged artifacts')
15 | parser.add_argument('--version', type=str, required=True, help='Version of packaged installer: ..')
16 | parser.add_argument('--stage', nargs='*', help='Packaging stages: image | volume | webui | strip | all')
17 | parser.add_argument('--webui-dir', help='The web ui directory')
18 |
19 | args = parser.parse_args()
20 |
21 | def get_image_version():
22 | major, minor, _ = args.version.split('.')
23 | return f"{major}.{minor}"
24 |
25 | def run(cmd):
26 | with Popen(cmd, stdout=PIPE, stderr=PIPE) as p:
27 | o = p.stdout.read().decode('utf-8').strip()
28 | e = p.stderr.read().decode('utf-8').strip()
29 | if o:
30 | logging.info(o)
31 | if e:
32 | logging.error(e)
33 |
34 | if p.returncode != 0:
35 | raise Exception(f"Failed to run {' '.join(cmd)}")
36 |
37 | def export_image():
38 | output_file = os.path.join(args.output_dir, 'image.tar')
39 | image_name = f'nuullll/ipex-arc-sd:v{get_image_version()}'
40 | logging.info(f'Exporting image {image_name} to {output_file}')
41 |
42 | run(['docker', 'save', '--output', output_file, image_name])
43 |
44 | def export_volumes():
45 | logging.info(f'Exporting volumes')
46 | run(['docker', 'pull', 'ubuntu'])
47 | run(['docker', 'run', '--rm', '-v', f'deps-{get_image_version()}:/deps', '-v', f'{args.output_dir}:/backup', 'ubuntu', 'tar', 'cvf', '/backup/volume-deps.tar', '/deps'])
48 | run(['docker', 'run', '--rm', '-v', 'huggingface:/root/.cache/huggingface', '-v', f'{args.output_dir}:/backup', 'ubuntu', 'tar', 'cvf', '/backup/volume-huggingface.tar', '/root/.cache/huggingface'])
49 |
50 | def copy_webui():
51 | dst = os.path.join(args.output_dir, 'webui')
52 | logging.info(f'Copying {args.webui_dir} to {dst}')
53 | shutil.copytree(args.webui_dir, dst)
54 |
55 | def get_dir_size(dir):
56 | return sum(f.stat().st_size for f in Path(dir).glob('**/*') if f.is_file()) / (1024*1024*1024)
57 |
58 | def copy_install_script():
59 | src = os.path.join(cwd, 'install.bat')
60 | dst = os.path.join(args.output_dir, 'install.bat')
61 | shutil.copy2(src, dst)
62 | with open(dst, 'r', encoding='utf-8') as f:
63 | s = f.read()
64 | s = s.replace('%%IMAGE_VER%%', get_image_version())
65 | s = s.replace('%%CONTAINER_VER%%', args.version)
66 | with open(dst, 'w', encoding='utf-8') as f:
67 | f.write(s)
68 |
69 | src = os.path.join(cwd, '使用说明.txt')
70 | dst = os.path.join(args.output_dir, '使用说明.txt')
71 | shutil.copy2(src, dst)
72 |
73 | if __name__ == '__main__':
74 | major, minor, patch = args.version.split('.')
75 | os.makedirs(args.output_dir, exist_ok=True)
76 |
77 | stages = args.stage
78 | if 'image' in stages or 'all' in stages:
79 | export_image()
80 |
81 | if 'webui' in stages or 'all' in stages:
82 | copy_webui()
83 |
84 | if 'volume' in stages or 'all' in stages:
85 | export_volumes()
86 |
87 | if 'final' in stages or 'all' in stages:
88 | copy_install_script()
89 |
--------------------------------------------------------------------------------
/scripts/使用说明.txt:
--------------------------------------------------------------------------------
1 | # AI绘画 Intel Arc -- Windows一键安装包
2 |
3 | - 安装包版本: v0.7.1
4 | - SD.Next版本: c98a4ddb
5 | - 镜像版本: v0.7
6 | - oneAPI版本: 2023.2
7 | - IPEX版本: 2.0.110
8 | - 作者: Nuullll
9 | - GitHub: https://github.com/Nuullll/ipex-sd-docker-for-arc-gpu
10 | - bilibili: https://space.bilibili.com/18233791
11 | - QQ交流群: 558074047
12 |
13 | ## 更新日志
14 |
15 | 0.7.1
16 | - 修复SDXL离线配置,现在Base+Refiner的基础功能可以在离线环境下运行 [AR: diffusers PR]
17 | - 修复Tiled VAE [AR: SD.Next PR]
18 |
19 | 0.7.0
20 | - 优化docker内存管理
21 | - SD.Next SDXL支持(diffusers)
22 |
23 | ## 使用方法
24 |
25 | 将Arc-AI绘画-安装包.zip解压到任意位置, 然后双击install.bat (Windows批处理文件) 开始安装.
26 | 安装过程中请根据提示以及系统环境做相应选择.
27 |
28 | ## 会自动安装哪些东西?
29 |
30 | 1. WSL 和 Docker Desktop (需要网络连接)
31 | 如果你已经安装过, 可以选择跳过.
32 |
33 | 2. ipex-arc-sd Docker镜像 v0.7 (离线解压)
34 | 本镜像基于WSL2的Ubuntu系统, 可以直接在Windows系统中运行.
35 | 镜像中集成了在Intel Arc上运行Stable Diffusion Web UI所需要的oneAPI环境.
36 | ** 使用的SD Web UI为 SD.Next: https://github.com/vladmandic/automatic **
37 |
38 | 3. Docker镜像需要挂载的数据卷 (离线解压)
39 | 数据卷中主要包含运行SD.Next所必需的python依赖包, 包括Intel Extension for PyTorch (IPEX)等.
40 |
41 | 4. 打包的SD.Next源代码 (离线解压)
42 | 除了SD.Next本身自带的内置插件, 作者还额外打包了:
43 | -- 中英双语对照插件
44 | -- 提示词大礼包(prompt-all-in-one)插件
45 | -- tag自动补全(tagcomplete)插件
46 | -- Adetailer, Ultimate upscaler
47 | -- 常用功能需要的体积较小的模型 (SD大模型等烦请自行下载):
48 | ---- CLIP ViT-L-14.pt
49 | ---- codeformer-v0.1.0.pth
50 | ---- ESRGAN_4x.pth
51 | ---- LDSR model.ckpt
52 | ---- RealESRGAN_x4plus.pth RealESRGAN_x4plus_anime_6B.pth
53 | ---- ScuNet.pth
54 | ---- SwinIR_4x.pth
55 | ---- torch_deepdanbooru model-resnet_custom_v3.pt
56 |
57 | ## 其他
58 | 如果安装遇到问题, 欢迎联系作者 (Q群, GitHub, bilibili).
59 | 安装脚本install.bat以及作者使用的打包脚本已开源: https://github.com/Nuullll/ipex-sd-docker-for-arc-gpu/tree/main/scripts
60 |
61 | 关于此镜像的高阶用法, 请移步进阶教程: https://blog.nuullll.com/ipex-sd-docker-for-arc-gpu/#/zh-cn/
62 |
63 | ## 声明
64 | 此一键安装包仅为了简化Intel Arc运行AI绘画应用的环境配置. 请勿用于不正当用途.
65 |
--------------------------------------------------------------------------------
/startup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | git rev-parse --git-dir > /dev/null 2>&1 || (git clone https://github.com/vladmandic/automatic.git .)
3 | git config core.filemode false
4 | ./webui.sh "$@"
5 |
--------------------------------------------------------------------------------