├── tests
├── __init__.py
├── inference
│ ├── __init__.py
│ └── graphs
│ │ └── default_graph_sdxl1_0.json
├── README.md
├── conftest.py
└── compare
│ └── conftest.py
├── models
├── vae
│ └── put_vae_here
├── loras
│ └── put_loras_here
├── unet
│ └── put_unet_files_here
├── gligen
│ └── put_gligen_models_here
├── checkpoints
│ └── put_checkpoints_here
├── diffusers
│ └── put_diffusers_models_here
├── hypernetworks
│ └── put_hypernetworks_here
├── clip
│ └── put_clip_or_text_encoder_models_here
├── clip_vision
│ └── put_clip_vision_models_here
├── controlnet
│ └── put_controlnets_and_t2i_here
├── style_models
│ └── put_t2i_style_model_here
├── upscale_models
│ └── put_esrgan_and_other_upscale_models_here
├── embeddings
│ └── put_embeddings_or_textual_inversion_concepts_here
├── vae_approx
│ └── put_taesd_encoder_pth_and_taesd_decoder_pth_here
└── configs
│ ├── v2-inference.yaml
│ ├── v2-inference_fp32.yaml
│ ├── v2-inference-v.yaml
│ ├── v2-inference-v_fp32.yaml
│ ├── v1-inference.yaml
│ ├── v1-inference_fp16.yaml
│ ├── anything_v3.yaml
│ ├── v1-inference_clip_skip_2.yaml
│ ├── v1-inference_clip_skip_2_fp16.yaml
│ └── v1-inpainting-inference.yaml
├── tests-ui
├── .gitignore
├── babel.config.json
├── utils
│ ├── nopProxy.js
│ ├── litegraph.js
│ ├── setup.js
│ └── index.js
├── jest.config.js
├── globalSetup.js
├── package.json
└── setup.js
├── CODEOWNERS
├── comfy
├── ldm
│ └── modules
│ │ ├── encoders
│ │ ├── __init__.py
│ │ └── noise_aug_modules.py
│ │ ├── distributions
│ │ ├── __init__.py
│ │ └── distributions.py
│ │ ├── diffusionmodules
│ │ ├── __init__.py
│ │ └── upscaling.py
│ │ └── ema.py
├── options.py
├── checkpoint_pickle.py
├── clip_vision_config_g.json
├── clip_vision_config_h.json
├── clip_vision_config_vitl.json
├── sd1_tokenizer
│ ├── special_tokens_map.json
│ └── tokenizer_config.json
├── clip_config_bigg.json
├── sd2_clip_config.json
├── sd1_clip_config.json
├── latent_formats.py
├── sd2_clip.py
├── diffusers_load.py
├── ops.py
├── conds.py
├── supported_models_base.py
├── taesd
│ └── taesd.py
└── sdxl_clip.py
├── output
└── _output_images_will_be_put_here
├── comfy_extras
├── chainner_models
│ ├── __init__.py
│ ├── architecture
│ │ ├── __init__.py
│ │ ├── timm
│ │ │ └── helpers.py
│ │ ├── OmniSR
│ │ │ ├── pixelshuffle.py
│ │ │ ├── OSAG.py
│ │ │ ├── layernorm.py
│ │ │ └── ChannelAttention.py
│ │ ├── LICENSE-HAT
│ │ ├── LICENSE-RealESRGAN
│ │ ├── face
│ │ │ ├── LICENSE-codeformer
│ │ │ └── fused_act.py
│ │ └── SRVGG.py
│ ├── types.py
│ └── model_loading.py
├── nodes_perpneg.py
├── nodes_upscale_model.py
├── nodes_model_downscale.py
├── nodes_clip_sdxl.py
├── nodes_hypertile.py
├── nodes_video_model.py
├── nodes_latent.py
└── nodes_rebatch.py
├── web
├── user.css
├── jsconfig.json
├── extensions
│ ├── core
│ │ ├── uploadImage.js
│ │ ├── linkRenderMode.js
│ │ ├── invertMenuScrolling.js
│ │ ├── noteNode.js
│ │ ├── saveImageExtraOutput.js
│ │ ├── dynamicPrompts.js
│ │ ├── keybinds.js
│ │ ├── snapToGrid.js
│ │ ├── slotDefaults.js
│ │ ├── undoRedo.js
│ │ └── clipspace.js
│ └── logging.js.example
├── lib
│ └── litegraph.extensions.js
├── index.html
├── scripts
│ ├── utils.js
│ ├── ui
│ │ └── imagePreview.js
│ └── defaultGraph.js
└── types
│ └── comfy.d.ts
├── input
└── example.png
├── .ci
├── update_windows
│ ├── update_comfyui.bat
│ ├── update_comfyui_and_python_dependencies.bat
│ └── update.py
├── windows_base_files
│ ├── run_cpu.bat
│ ├── run_nvidia_gpu.bat
│ └── README_VERY_IMPORTANT.txt
└── update_windows_cu118
│ └── update_comfyui_and_python_dependencies.bat
├── comfyui_screenshot.png
├── pytest.ini
├── requirements.txt
├── .vscode
└── settings.json
├── .gitignore
├── .github
└── workflows
│ ├── test-ui.yaml
│ ├── test-build.yml
│ ├── windows_release_cu118_dependencies_2.yml
│ ├── windows_release_dependencies.yml
│ ├── windows_release_cu118_package.yml
│ ├── windows_release_cu118_dependencies.yml
│ ├── windows_release_package.yml
│ └── windows_release_nightly_pytorch.yml
├── extra_model_paths.yaml.example
├── script_examples
└── basic_api_example.py
├── cuda_malloc.py
├── latent_preview.py
└── custom_nodes
└── example_node.py.example
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/vae/put_vae_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/loras/put_loras_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/inference/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/unet/put_unet_files_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests-ui/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @comfyanonymous
2 |
--------------------------------------------------------------------------------
/comfy/ldm/modules/encoders/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/gligen/put_gligen_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/output/_output_images_will_be_put_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/comfy/ldm/modules/distributions/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/checkpoints/put_checkpoints_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/diffusers/put_diffusers_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/hypernetworks/put_hypernetworks_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/web/user.css:
--------------------------------------------------------------------------------
1 | /* Put custom styles here */
--------------------------------------------------------------------------------
/comfy/ldm/modules/diffusionmodules/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/clip/put_clip_or_text_encoder_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/clip_vision/put_clip_vision_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/controlnet/put_controlnets_and_t2i_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/style_models/put_t2i_style_model_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/architecture/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/upscale_models/put_esrgan_and_other_upscale_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/embeddings/put_embeddings_or_textual_inversion_concepts_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/vae_approx/put_taesd_encoder_pth_and_taesd_decoder_pth_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests-ui/babel.config.json:
--------------------------------------------------------------------------------
1 | {
2 | "presets": ["@babel/preset-env"]
3 | }
4 |
--------------------------------------------------------------------------------
/input/example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/subtleGradient/ComfyUI/master/input/example.png
--------------------------------------------------------------------------------
/.ci/update_windows/update_comfyui.bat:
--------------------------------------------------------------------------------
1 | ..\python_embeded\python.exe .\update.py ..\ComfyUI\
2 | pause
3 |
--------------------------------------------------------------------------------
/comfyui_screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/subtleGradient/ComfyUI/master/comfyui_screenshot.png
--------------------------------------------------------------------------------
/.ci/windows_base_files/run_cpu.bat:
--------------------------------------------------------------------------------
1 | .\python_embeded\python.exe -s ComfyUI\main.py --cpu --windows-standalone-build
2 | pause
3 |
--------------------------------------------------------------------------------
/.ci/windows_base_files/run_nvidia_gpu.bat:
--------------------------------------------------------------------------------
1 | .\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build
2 | pause
3 |
--------------------------------------------------------------------------------
/comfy/options.py:
--------------------------------------------------------------------------------
1 |
2 | args_parsing = False
3 |
4 | def enable_args_parsing(enable=True):
5 | global args_parsing
6 | args_parsing = enable
7 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | markers =
3 | inference: mark as inference test (deselect with '-m "not inference"')
4 | testpaths = tests
5 | addopts = -s
--------------------------------------------------------------------------------
/web/jsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "baseUrl": ".",
4 | "paths": {
5 | "/*": ["./*"]
6 | }
7 | },
8 | "include": ["."]
9 | }
10 |
--------------------------------------------------------------------------------
/tests-ui/utils/nopProxy.js:
--------------------------------------------------------------------------------
1 | export const nop = new Proxy(function () {}, {
2 | get: () => nop,
3 | set: () => true,
4 | apply: () => nop,
5 | construct: () => nop,
6 | });
7 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | torchsde
3 | einops
4 | transformers>=4.25.1
5 | safetensors>=0.3.0
6 | aiohttp
7 | accelerate
8 | pyyaml
9 | Pillow
10 | scipy
11 | tqdm
12 | psutil
13 |
--------------------------------------------------------------------------------
/tests-ui/jest.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('jest').Config} */
2 | const config = {
3 | testEnvironment: "jsdom",
4 | setupFiles: ["./globalSetup.js"],
5 | clearMocks: true,
6 | resetModules: true,
7 | };
8 |
9 | module.exports = config;
10 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "path-intellisense.mappings": {
3 | "../": "${workspaceFolder}/web/extensions/core"
4 | },
5 | "[python]": {
6 | "editor.defaultFormatter": "ms-python.autopep8"
7 | },
8 | "python.formatting.provider": "none"
9 | }
10 |
--------------------------------------------------------------------------------
/.ci/update_windows/update_comfyui_and_python_dependencies.bat:
--------------------------------------------------------------------------------
1 | ..\python_embeded\python.exe .\update.py ..\ComfyUI\
2 | ..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2
3 | pause
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.py[cod]
3 | /output/
4 | /input/
5 | !/input/example.png
6 | /models/
7 | /temp/
8 | /custom_nodes/
9 | !custom_nodes/example_node.py.example
10 | extra_model_paths.yaml
11 | /.vs
12 | .idea/
13 | venv/
14 | /web/extensions/*
15 | !/web/extensions/logging.js.example
16 | !/web/extensions/core/
17 | /tests-ui/data/object_info.json
--------------------------------------------------------------------------------
/comfy/checkpoint_pickle.py:
--------------------------------------------------------------------------------
1 | import pickle
2 |
3 | load = pickle.load
4 |
5 | class Empty:
6 | pass
7 |
8 | class Unpickler(pickle.Unpickler):
9 | def find_class(self, module, name):
10 | #TODO: safe unpickle
11 | if module.startswith("pytorch_lightning"):
12 | return Empty
13 | return super().find_class(module, name)
14 |
--------------------------------------------------------------------------------
/web/extensions/core/uploadImage.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 |
3 | // Adds an upload button to the nodes
4 |
5 | app.registerExtension({
6 | name: "Comfy.UploadImage",
7 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
8 | if (nodeData?.input?.required?.image?.[1]?.image_upload === true) {
9 | nodeData.input.required.upload = ["IMAGEUPLOAD"];
10 | }
11 | },
12 | });
13 |
--------------------------------------------------------------------------------
/tests-ui/globalSetup.js:
--------------------------------------------------------------------------------
1 | module.exports = async function () {
2 | global.ResizeObserver = class ResizeObserver {
3 | observe() {}
4 | unobserve() {}
5 | disconnect() {}
6 | };
7 |
8 | const { nop } = require("./utils/nopProxy");
9 | global.enableWebGLCanvas = nop;
10 |
11 | HTMLCanvasElement.prototype.getContext = nop;
12 |
13 | localStorage["Comfy.Settings.Comfy.Logging.Enabled"] = "false";
14 | };
15 |
--------------------------------------------------------------------------------
/comfy/clip_vision_config_g.json:
--------------------------------------------------------------------------------
1 | {
2 | "attention_dropout": 0.0,
3 | "dropout": 0.0,
4 | "hidden_act": "gelu",
5 | "hidden_size": 1664,
6 | "image_size": 224,
7 | "initializer_factor": 1.0,
8 | "initializer_range": 0.02,
9 | "intermediate_size": 8192,
10 | "layer_norm_eps": 1e-05,
11 | "model_type": "clip_vision_model",
12 | "num_attention_heads": 16,
13 | "num_channels": 3,
14 | "num_hidden_layers": 48,
15 | "patch_size": 14,
16 | "projection_dim": 1280,
17 | "torch_dtype": "float32"
18 | }
19 |
--------------------------------------------------------------------------------
/comfy/clip_vision_config_h.json:
--------------------------------------------------------------------------------
1 | {
2 | "attention_dropout": 0.0,
3 | "dropout": 0.0,
4 | "hidden_act": "gelu",
5 | "hidden_size": 1280,
6 | "image_size": 224,
7 | "initializer_factor": 1.0,
8 | "initializer_range": 0.02,
9 | "intermediate_size": 5120,
10 | "layer_norm_eps": 1e-05,
11 | "model_type": "clip_vision_model",
12 | "num_attention_heads": 16,
13 | "num_channels": 3,
14 | "num_hidden_layers": 32,
15 | "patch_size": 14,
16 | "projection_dim": 1024,
17 | "torch_dtype": "float32"
18 | }
19 |
--------------------------------------------------------------------------------
/comfy/clip_vision_config_vitl.json:
--------------------------------------------------------------------------------
1 | {
2 | "attention_dropout": 0.0,
3 | "dropout": 0.0,
4 | "hidden_act": "quick_gelu",
5 | "hidden_size": 1024,
6 | "image_size": 224,
7 | "initializer_factor": 1.0,
8 | "initializer_range": 0.02,
9 | "intermediate_size": 4096,
10 | "layer_norm_eps": 1e-05,
11 | "model_type": "clip_vision_model",
12 | "num_attention_heads": 16,
13 | "num_channels": 3,
14 | "num_hidden_layers": 24,
15 | "patch_size": 14,
16 | "projection_dim": 768,
17 | "torch_dtype": "float32"
18 | }
19 |
--------------------------------------------------------------------------------
/comfy/sd1_tokenizer/special_tokens_map.json:
--------------------------------------------------------------------------------
1 | {
2 | "bos_token": {
3 | "content": "<|startoftext|>",
4 | "lstrip": false,
5 | "normalized": true,
6 | "rstrip": false,
7 | "single_word": false
8 | },
9 | "eos_token": {
10 | "content": "<|endoftext|>",
11 | "lstrip": false,
12 | "normalized": true,
13 | "rstrip": false,
14 | "single_word": false
15 | },
16 | "pad_token": "<|endoftext|>",
17 | "unk_token": {
18 | "content": "<|endoftext|>",
19 | "lstrip": false,
20 | "normalized": true,
21 | "rstrip": false,
22 | "single_word": false
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/comfy/clip_config_bigg.json:
--------------------------------------------------------------------------------
1 | {
2 | "architectures": [
3 | "CLIPTextModel"
4 | ],
5 | "attention_dropout": 0.0,
6 | "bos_token_id": 0,
7 | "dropout": 0.0,
8 | "eos_token_id": 2,
9 | "hidden_act": "gelu",
10 | "hidden_size": 1280,
11 | "initializer_factor": 1.0,
12 | "initializer_range": 0.02,
13 | "intermediate_size": 5120,
14 | "layer_norm_eps": 1e-05,
15 | "max_position_embeddings": 77,
16 | "model_type": "clip_text_model",
17 | "num_attention_heads": 20,
18 | "num_hidden_layers": 32,
19 | "pad_token_id": 1,
20 | "projection_dim": 1280,
21 | "torch_dtype": "float32",
22 | "vocab_size": 49408
23 | }
24 |
--------------------------------------------------------------------------------
/comfy/sd2_clip_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "architectures": [
3 | "CLIPTextModel"
4 | ],
5 | "attention_dropout": 0.0,
6 | "bos_token_id": 0,
7 | "dropout": 0.0,
8 | "eos_token_id": 2,
9 | "hidden_act": "gelu",
10 | "hidden_size": 1024,
11 | "initializer_factor": 1.0,
12 | "initializer_range": 0.02,
13 | "intermediate_size": 4096,
14 | "layer_norm_eps": 1e-05,
15 | "max_position_embeddings": 77,
16 | "model_type": "clip_text_model",
17 | "num_attention_heads": 16,
18 | "num_hidden_layers": 24,
19 | "pad_token_id": 1,
20 | "projection_dim": 1024,
21 | "torch_dtype": "float32",
22 | "vocab_size": 49408
23 | }
24 |
--------------------------------------------------------------------------------
/web/lib/litegraph.extensions.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Changes the background color of the canvas.
3 | *
4 | * @method updateBackground
5 | * @param {image} String
6 | * @param {clearBackgroundColor} String
7 | * @
8 | */
9 | LGraphCanvas.prototype.updateBackground = function (image, clearBackgroundColor) {
10 | this._bg_img = new Image();
11 | this._bg_img.name = image;
12 | this._bg_img.src = image;
13 | this._bg_img.onload = () => {
14 | this.draw(true, true);
15 | };
16 | this.background_image = image;
17 |
18 | this.clear_background = true;
19 | this.clear_background_color = clearBackgroundColor;
20 | this._pattern = null
21 | }
22 |
--------------------------------------------------------------------------------
/web/extensions/core/linkRenderMode.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 |
3 | const id = "Comfy.LinkRenderMode";
4 | const ext = {
5 | name: id,
6 | async setup(app) {
7 | app.ui.settings.addSetting({
8 | id,
9 | name: "Link Render Mode",
10 | defaultValue: 2,
11 | type: "combo",
12 | options: [...LiteGraph.LINK_RENDER_MODES, "Hidden"].map((m, i) => ({
13 | value: i,
14 | text: m,
15 | selected: i == app.canvas.links_render_mode,
16 | })),
17 | onChange(value) {
18 | app.canvas.links_render_mode = +value;
19 | app.graph.setDirtyCanvas(true);
20 | },
21 | });
22 | },
23 | };
24 |
25 | app.registerExtension(ext);
26 |
--------------------------------------------------------------------------------
/.ci/update_windows_cu118/update_comfyui_and_python_dependencies.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | ..\python_embeded\python.exe .\update.py ..\ComfyUI\
3 | echo
4 | echo This will try to update pytorch and all python dependencies, if you get an error wait for pytorch/xformers to fix their stuff
5 | echo You should not be running this anyways unless you really have to
6 | echo
7 | echo If you just want to update normally, close this and run update_comfyui.bat instead.
8 | echo
9 | pause
10 | ..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2
11 | pause
12 |
--------------------------------------------------------------------------------
/tests/README.md:
--------------------------------------------------------------------------------
1 | # Automated Testing
2 |
3 | ## Running tests locally
4 |
5 | Additional requirements for running tests:
6 | ```
7 | pip install pytest
8 | pip install websocket-client==1.6.1
9 | opencv-python==4.6.0.66
10 | scikit-image==0.21.0
11 | ```
12 | Run inference tests:
13 | ```
14 | pytest tests/inference
15 | ```
16 |
17 | ## Quality regression test
18 | Compares images in 2 directories to ensure they are the same
19 |
20 | 1) Run an inference test to save a directory of "ground truth" images
21 | ```
22 | pytest tests/inference --output_dir tests/inference/baseline
23 | ```
24 | 2) Make code edits
25 |
26 | 3) Run inference and quality comparison tests
27 | ```
28 | pytest
29 | ```
--------------------------------------------------------------------------------
/comfy/sd1_clip_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "_name_or_path": "openai/clip-vit-large-patch14",
3 | "architectures": [
4 | "CLIPTextModel"
5 | ],
6 | "attention_dropout": 0.0,
7 | "bos_token_id": 0,
8 | "dropout": 0.0,
9 | "eos_token_id": 2,
10 | "hidden_act": "quick_gelu",
11 | "hidden_size": 768,
12 | "initializer_factor": 1.0,
13 | "initializer_range": 0.02,
14 | "intermediate_size": 3072,
15 | "layer_norm_eps": 1e-05,
16 | "max_position_embeddings": 77,
17 | "model_type": "clip_text_model",
18 | "num_attention_heads": 12,
19 | "num_hidden_layers": 12,
20 | "pad_token_id": 1,
21 | "projection_dim": 768,
22 | "torch_dtype": "float32",
23 | "transformers_version": "4.24.0",
24 | "vocab_size": 49408
25 | }
26 |
--------------------------------------------------------------------------------
/.github/workflows/test-ui.yaml:
--------------------------------------------------------------------------------
1 | name: Tests CI
2 |
3 | on: [push, pull_request]
4 |
5 | jobs:
6 | test:
7 | runs-on: ubuntu-latest
8 | steps:
9 | - uses: actions/checkout@v4
10 | - uses: actions/setup-node@v3
11 | with:
12 | node-version: 18
13 | - uses: actions/setup-python@v4
14 | with:
15 | python-version: '3.10'
16 | - name: Install requirements
17 | run: |
18 | python -m pip install --upgrade pip
19 | pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
20 | pip install -r requirements.txt
21 | - name: Run Tests
22 | run: |
23 | npm ci
24 | npm run test:generate
25 | npm test
26 | working-directory: ./tests-ui
27 |
--------------------------------------------------------------------------------
/web/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | ComfyUI
6 |
7 |
8 |
9 |
10 |
11 |
12 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/tests-ui/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "comfui-tests",
3 | "version": "1.0.0",
4 | "description": "UI tests",
5 | "main": "index.js",
6 | "scripts": {
7 | "test": "jest",
8 | "test:generate": "node setup.js"
9 | },
10 | "repository": {
11 | "type": "git",
12 | "url": "git+https://github.com/comfyanonymous/ComfyUI.git"
13 | },
14 | "keywords": [
15 | "comfyui",
16 | "test"
17 | ],
18 | "author": "comfyanonymous",
19 | "license": "GPL-3.0",
20 | "bugs": {
21 | "url": "https://github.com/comfyanonymous/ComfyUI/issues"
22 | },
23 | "homepage": "https://github.com/comfyanonymous/ComfyUI#readme",
24 | "devDependencies": {
25 | "@babel/preset-env": "^7.22.20",
26 | "@types/jest": "^29.5.5",
27 | "jest": "^29.7.0",
28 | "jest-environment-jsdom": "^29.7.0"
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/.github/workflows/test-build.yml:
--------------------------------------------------------------------------------
1 | name: Build package
2 |
3 | #
4 | # This workflow is a test of the python package build.
5 | # Install Python dependencies across different Python versions.
6 | #
7 |
8 | on:
9 | push:
10 | paths:
11 | - "requirements.txt"
12 | - ".github/workflows/test-build.yml"
13 |
14 | jobs:
15 | build:
16 | name: Build Test
17 | runs-on: ubuntu-latest
18 | strategy:
19 | fail-fast: false
20 | matrix:
21 | python-version: ["3.8", "3.9", "3.10", "3.11"]
22 | steps:
23 | - uses: actions/checkout@v4
24 | - name: Set up Python ${{ matrix.python-version }}
25 | uses: actions/setup-python@v4
26 | with:
27 | python-version: ${{ matrix.python-version }}
28 | - name: Install dependencies
29 | run: |
30 | python -m pip install --upgrade pip
31 | pip install -r requirements.txt
--------------------------------------------------------------------------------
/tests-ui/utils/litegraph.js:
--------------------------------------------------------------------------------
1 | const fs = require("fs");
2 | const path = require("path");
3 | const { nop } = require("../utils/nopProxy");
4 |
5 | function forEachKey(cb) {
6 | for (const k of [
7 | "LiteGraph",
8 | "LGraph",
9 | "LLink",
10 | "LGraphNode",
11 | "LGraphGroup",
12 | "DragAndScale",
13 | "LGraphCanvas",
14 | "ContextMenu",
15 | ]) {
16 | cb(k);
17 | }
18 | }
19 |
20 | export function setup(ctx) {
21 | const lg = fs.readFileSync(path.resolve("../web/lib/litegraph.core.js"), "utf-8");
22 | const globalTemp = {};
23 | (function (console) {
24 | eval(lg);
25 | }).call(globalTemp, nop);
26 |
27 | forEachKey((k) => (ctx[k] = globalTemp[k]));
28 | require(path.resolve("../web/lib/litegraph.extensions.js"));
29 | }
30 |
31 | export function teardown(ctx) {
32 | forEachKey((k) => delete ctx[k]);
33 |
34 | // Clear document after each run
35 | document.getElementsByTagName("html")[0].innerHTML = "";
36 | }
37 |
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/architecture/timm/helpers.py:
--------------------------------------------------------------------------------
1 | """ Layer/Module Helpers
2 | Hacked together by / Copyright 2020 Ross Wightman
3 | """
4 | import collections.abc
5 | from itertools import repeat
6 |
7 |
8 | # From PyTorch internals
9 | def _ntuple(n):
10 | def parse(x):
11 | if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
12 | return x
13 | return tuple(repeat(x, n))
14 |
15 | return parse
16 |
17 |
18 | to_1tuple = _ntuple(1)
19 | to_2tuple = _ntuple(2)
20 | to_3tuple = _ntuple(3)
21 | to_4tuple = _ntuple(4)
22 | to_ntuple = _ntuple
23 |
24 |
25 | def make_divisible(v, divisor=8, min_value=None, round_limit=0.9):
26 | min_value = min_value or divisor
27 | new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
28 | # Make sure that round down does not go down by more than 10%.
29 | if new_v < round_limit * v:
30 | new_v += divisor
31 | return new_v
32 |
--------------------------------------------------------------------------------
/comfy/sd1_tokenizer/tokenizer_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "add_prefix_space": false,
3 | "bos_token": {
4 | "__type": "AddedToken",
5 | "content": "<|startoftext|>",
6 | "lstrip": false,
7 | "normalized": true,
8 | "rstrip": false,
9 | "single_word": false
10 | },
11 | "do_lower_case": true,
12 | "eos_token": {
13 | "__type": "AddedToken",
14 | "content": "<|endoftext|>",
15 | "lstrip": false,
16 | "normalized": true,
17 | "rstrip": false,
18 | "single_word": false
19 | },
20 | "errors": "replace",
21 | "model_max_length": 77,
22 | "name_or_path": "openai/clip-vit-large-patch14",
23 | "pad_token": "<|endoftext|>",
24 | "special_tokens_map_file": "./special_tokens_map.json",
25 | "tokenizer_class": "CLIPTokenizer",
26 | "unk_token": {
27 | "__type": "AddedToken",
28 | "content": "<|endoftext|>",
29 | "lstrip": false,
30 | "normalized": true,
31 | "rstrip": false,
32 | "single_word": false
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/web/extensions/core/invertMenuScrolling.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 |
3 | // Inverts the scrolling of context menus
4 |
5 | const id = "Comfy.InvertMenuScrolling";
6 | app.registerExtension({
7 | name: id,
8 | init() {
9 | const ctxMenu = LiteGraph.ContextMenu;
10 | const replace = () => {
11 | LiteGraph.ContextMenu = function (values, options) {
12 | options = options || {};
13 | if (options.scroll_speed) {
14 | options.scroll_speed *= -1;
15 | } else {
16 | options.scroll_speed = -0.1;
17 | }
18 | return ctxMenu.call(this, values, options);
19 | };
20 | LiteGraph.ContextMenu.prototype = ctxMenu.prototype;
21 | };
22 | app.ui.settings.addSetting({
23 | id,
24 | name: "Invert Menu Scrolling",
25 | type: "boolean",
26 | defaultValue: false,
27 | onChange(value) {
28 | if (value) {
29 | replace();
30 | } else {
31 | LiteGraph.ContextMenu = ctxMenu;
32 | }
33 | },
34 | });
35 | },
36 | });
37 |
--------------------------------------------------------------------------------
/.ci/windows_base_files/README_VERY_IMPORTANT.txt:
--------------------------------------------------------------------------------
1 | HOW TO RUN:
2 |
3 | if you have a NVIDIA gpu:
4 |
5 | run_nvidia_gpu.bat
6 |
7 |
8 |
9 | To run it in slow CPU mode:
10 |
11 | run_cpu.bat
12 |
13 |
14 |
15 | IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints
16 |
17 | You can download the stable diffusion 1.5 one from: https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt
18 |
19 |
20 | RECOMMENDED WAY TO UPDATE:
21 | To update the ComfyUI code: update\update_comfyui.bat
22 |
23 |
24 |
25 | To update ComfyUI with the python dependencies, note that you should ONLY run this if you have issues with python dependencies.
26 | update\update_comfyui_and_python_dependencies.bat
27 |
28 |
29 | TO SHARE MODELS BETWEEN COMFYUI AND ANOTHER UI:
30 | In the ComfyUI directory you will find a file: extra_model_paths.yaml.example
31 | Rename this file to: extra_model_paths.yaml and edit it with your favorite text editor.
32 |
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/architecture/OmniSR/pixelshuffle.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | #############################################################
4 | # File: pixelshuffle.py
5 | # Created Date: Friday July 1st 2022
6 | # Author: Chen Xuanhong
7 | # Email: chenxuanhongzju@outlook.com
8 | # Last Modified: Friday, 1st July 2022 10:18:39 am
9 | # Modified By: Chen Xuanhong
10 | # Copyright (c) 2022 Shanghai Jiao Tong University
11 | #############################################################
12 |
13 | import torch.nn as nn
14 |
15 |
16 | def pixelshuffle_block(
17 | in_channels, out_channels, upscale_factor=2, kernel_size=3, bias=False
18 | ):
19 | """
20 | Upsample features according to `upscale_factor`.
21 | """
22 | padding = kernel_size // 2
23 | conv = nn.Conv2d(
24 | in_channels,
25 | out_channels * (upscale_factor**2),
26 | kernel_size,
27 | padding=1,
28 | bias=bias,
29 | )
30 | pixel_shuffle = nn.PixelShuffle(upscale_factor)
31 | return nn.Sequential(*[conv, pixel_shuffle])
32 |
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/architecture/LICENSE-HAT:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Xiangyu Chen
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/.github/workflows/windows_release_cu118_dependencies_2.yml:
--------------------------------------------------------------------------------
1 | name: "Windows Release cu118 dependencies 2"
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | xformers:
7 | description: 'xformers version'
8 | required: true
9 | type: string
10 | default: "xformers"
11 |
12 | # push:
13 | # branches:
14 | # - master
15 |
16 | jobs:
17 | build_dependencies:
18 | runs-on: windows-latest
19 | steps:
20 | - uses: actions/checkout@v3
21 | - uses: actions/setup-python@v4
22 | with:
23 | python-version: '3.10.9'
24 |
25 | - shell: bash
26 | run: |
27 | python -m pip wheel --no-cache-dir torch torchvision torchaudio ${{ inputs.xformers }} --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir
28 | python -m pip install --no-cache-dir ./temp_wheel_dir/*
29 | echo installed basic
30 | ls -lah temp_wheel_dir
31 | mv temp_wheel_dir cu118_python_deps
32 | tar cf cu118_python_deps.tar cu118_python_deps
33 |
34 | - uses: actions/cache/save@v3
35 | with:
36 | path: cu118_python_deps.tar
37 | key: ${{ runner.os }}-build-cu118
38 |
--------------------------------------------------------------------------------
/comfy/latent_formats.py:
--------------------------------------------------------------------------------
1 |
2 | class LatentFormat:
3 | scale_factor = 1.0
4 | latent_rgb_factors = None
5 | taesd_decoder_name = None
6 |
7 | def process_in(self, latent):
8 | return latent * self.scale_factor
9 |
10 | def process_out(self, latent):
11 | return latent / self.scale_factor
12 |
13 | class SD15(LatentFormat):
14 | def __init__(self, scale_factor=0.18215):
15 | self.scale_factor = scale_factor
16 | self.latent_rgb_factors = [
17 | # R G B
18 | [ 0.3512, 0.2297, 0.3227],
19 | [ 0.3250, 0.4974, 0.2350],
20 | [-0.2829, 0.1762, 0.2721],
21 | [-0.2120, -0.2616, -0.7177]
22 | ]
23 | self.taesd_decoder_name = "taesd_decoder"
24 |
25 | class SDXL(LatentFormat):
26 | def __init__(self):
27 | self.scale_factor = 0.13025
28 | self.latent_rgb_factors = [
29 | # R G B
30 | [ 0.3920, 0.4054, 0.4549],
31 | [-0.2634, -0.0196, 0.0653],
32 | [ 0.0568, 0.1687, -0.0755],
33 | [-0.3112, -0.2359, -0.2076]
34 | ]
35 | self.taesd_decoder_name = "taesdxl_decoder"
36 |
--------------------------------------------------------------------------------
/web/extensions/core/noteNode.js:
--------------------------------------------------------------------------------
1 | import {app} from "../../scripts/app.js";
2 | import {ComfyWidgets} from "../../scripts/widgets.js";
3 | // Node that add notes to your project
4 |
5 | app.registerExtension({
6 | name: "Comfy.NoteNode",
7 | registerCustomNodes() {
8 | class NoteNode {
9 | color=LGraphCanvas.node_colors.yellow.color;
10 | bgcolor=LGraphCanvas.node_colors.yellow.bgcolor;
11 | groupcolor = LGraphCanvas.node_colors.yellow.groupcolor;
12 | constructor() {
13 | if (!this.properties) {
14 | this.properties = {};
15 | this.properties.text="";
16 | }
17 |
18 | ComfyWidgets.STRING(this, "", ["", {default:this.properties.text, multiline: true}], app)
19 |
20 | this.serialize_widgets = true;
21 | this.isVirtualNode = true;
22 |
23 | }
24 |
25 |
26 | }
27 |
28 | // Load default visibility
29 |
30 | LiteGraph.registerNodeType(
31 | "Note",
32 | Object.assign(NoteNode, {
33 | title_mode: LiteGraph.NORMAL_TITLE,
34 | title: "Note",
35 | collapsable: true,
36 | })
37 | );
38 |
39 | NoteNode.category = "utils";
40 | },
41 | });
42 |
--------------------------------------------------------------------------------
/comfy/sd2_clip.py:
--------------------------------------------------------------------------------
1 | from comfy import sd1_clip
2 | import torch
3 | import os
4 |
5 | class SD2ClipHModel(sd1_clip.SDClipModel):
6 | def __init__(self, arch="ViT-H-14", device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, dtype=None):
7 | if layer == "penultimate":
8 | layer="hidden"
9 | layer_idx=-2
10 |
11 | textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd2_clip_config.json")
12 | super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0})
13 |
14 | class SD2ClipHTokenizer(sd1_clip.SDTokenizer):
15 | def __init__(self, tokenizer_path=None, embedding_directory=None):
16 | super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1024)
17 |
18 | class SD2Tokenizer(sd1_clip.SD1Tokenizer):
19 | def __init__(self, embedding_directory=None):
20 | super().__init__(embedding_directory=embedding_directory, clip_name="h", tokenizer=SD2ClipHTokenizer)
21 |
22 | class SD2ClipModel(sd1_clip.SD1ClipModel):
23 | def __init__(self, device="cpu", dtype=None, **kwargs):
24 | super().__init__(device=device, dtype=dtype, clip_name="h", clip_model=SD2ClipHModel, **kwargs)
25 |
--------------------------------------------------------------------------------
/extra_model_paths.yaml.example:
--------------------------------------------------------------------------------
1 | #Rename this to extra_model_paths.yaml and ComfyUI will load it
2 |
3 |
4 | #config for a1111 ui
5 | #all you have to do is change the base_path to where yours is installed
6 | a111:
7 | base_path: path/to/stable-diffusion-webui/
8 |
9 | checkpoints: models/Stable-diffusion
10 | configs: models/Stable-diffusion
11 | vae: models/VAE
12 | loras: |
13 | models/Lora
14 | models/LyCORIS
15 | upscale_models: |
16 | models/ESRGAN
17 | models/RealESRGAN
18 | models/SwinIR
19 | embeddings: embeddings
20 | hypernetworks: models/hypernetworks
21 | controlnet: models/ControlNet
22 |
23 | #config for comfyui
24 | #your base path should be either an existing comfy install or a central folder where you store all of your models, loras, etc.
25 |
26 | #comfyui:
27 | # base_path: path/to/comfyui/
28 | # checkpoints: models/checkpoints/
29 | # clip: models/clip/
30 | # clip_vision: models/clip_vision/
31 | # configs: models/configs/
32 | # controlnet: models/controlnet/
33 | # embeddings: models/embeddings/
34 | # loras: models/loras/
35 | # upscale_models: models/upscale_models/
36 | # vae: models/vae/
37 |
38 | #other_ui:
39 | # base_path: path/to/ui
40 | # checkpoints: models/checkpoints
41 | # gligen: models/gligen
42 | # custom_nodes: path/custom_nodes
43 |
--------------------------------------------------------------------------------
/web/extensions/core/saveImageExtraOutput.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 | import { applyTextReplacements } from "../../scripts/utils.js";
3 | // Use widget values and dates in output filenames
4 |
5 | app.registerExtension({
6 | name: "Comfy.SaveImageExtraOutput",
7 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
8 | if (nodeData.name === "SaveImage") {
9 | const onNodeCreated = nodeType.prototype.onNodeCreated;
10 | // When the SaveImage node is created we want to override the serialization of the output name widget to run our S&R
11 | nodeType.prototype.onNodeCreated = function () {
12 | const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined;
13 |
14 | const widget = this.widgets.find((w) => w.name === "filename_prefix");
15 | widget.serializeValue = () => {
16 | return applyTextReplacements(app, widget.value);
17 | };
18 |
19 | return r;
20 | };
21 | } else {
22 | // When any other node is created add a property to alias the node
23 | const onNodeCreated = nodeType.prototype.onNodeCreated;
24 | nodeType.prototype.onNodeCreated = function () {
25 | const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined;
26 |
27 | if (!this.properties || !("Node name for S&R" in this.properties)) {
28 | this.addProperty("Node name for S&R", this.constructor.type, "string");
29 | }
30 |
31 | return r;
32 | };
33 | }
34 | },
35 | });
36 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 |
4 | # Command line arguments for pytest
5 | def pytest_addoption(parser):
6 | parser.addoption('--output_dir', action="store", default='tests/inference/samples', help='Output directory for generated images')
7 | parser.addoption("--listen", type=str, default="127.0.0.1", metavar="IP", nargs="?", const="0.0.0.0", help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)")
8 | parser.addoption("--port", type=int, default=8188, help="Set the listen port.")
9 |
10 | # This initializes args at the beginning of the test session
11 | @pytest.fixture(scope="session", autouse=True)
12 | def args_pytest(pytestconfig):
13 | args = {}
14 | args['output_dir'] = pytestconfig.getoption('output_dir')
15 | args['listen'] = pytestconfig.getoption('listen')
16 | args['port'] = pytestconfig.getoption('port')
17 |
18 | os.makedirs(args['output_dir'], exist_ok=True)
19 |
20 | return args
21 |
22 | def pytest_collection_modifyitems(items):
23 | # Modifies items so tests run in the correct order
24 |
25 | LAST_TESTS = ['test_quality']
26 |
27 | # Move the last items to the end
28 | last_items = []
29 | for test_name in LAST_TESTS:
30 | for item in items.copy():
31 | print(item.module.__name__, item)
32 | if item.module.__name__ == test_name:
33 | last_items.append(item)
34 | items.remove(item)
35 |
36 | items.extend(last_items)
37 |
--------------------------------------------------------------------------------
/comfy/diffusers_load.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 |
4 | import comfy.sd
5 |
6 | def first_file(path, filenames):
7 | for f in filenames:
8 | p = os.path.join(path, f)
9 | if os.path.exists(p):
10 | return p
11 | return None
12 |
13 | def load_diffusers(model_path, output_vae=True, output_clip=True, embedding_directory=None):
14 | diffusion_model_names = ["diffusion_pytorch_model.fp16.safetensors", "diffusion_pytorch_model.safetensors", "diffusion_pytorch_model.fp16.bin", "diffusion_pytorch_model.bin"]
15 | unet_path = first_file(os.path.join(model_path, "unet"), diffusion_model_names)
16 | vae_path = first_file(os.path.join(model_path, "vae"), diffusion_model_names)
17 |
18 | text_encoder_model_names = ["model.fp16.safetensors", "model.safetensors", "pytorch_model.fp16.bin", "pytorch_model.bin"]
19 | text_encoder1_path = first_file(os.path.join(model_path, "text_encoder"), text_encoder_model_names)
20 | text_encoder2_path = first_file(os.path.join(model_path, "text_encoder_2"), text_encoder_model_names)
21 |
22 | text_encoder_paths = [text_encoder1_path]
23 | if text_encoder2_path is not None:
24 | text_encoder_paths.append(text_encoder2_path)
25 |
26 | unet = comfy.sd.load_unet(unet_path)
27 |
28 | clip = None
29 | if output_clip:
30 | clip = comfy.sd.load_clip(text_encoder_paths, embedding_directory=embedding_directory)
31 |
32 | vae = None
33 | if output_vae:
34 | sd = comfy.utils.load_torch_file(vae_path)
35 | vae = comfy.sd.VAE(sd=sd)
36 |
37 | return (unet, clip, vae)
38 |
--------------------------------------------------------------------------------
/tests-ui/utils/setup.js:
--------------------------------------------------------------------------------
1 | require("../../web/scripts/api");
2 |
3 | const fs = require("fs");
4 | const path = require("path");
5 | function* walkSync(dir) {
6 | const files = fs.readdirSync(dir, { withFileTypes: true });
7 | for (const file of files) {
8 | if (file.isDirectory()) {
9 | yield* walkSync(path.join(dir, file.name));
10 | } else {
11 | yield path.join(dir, file.name);
12 | }
13 | }
14 | }
15 |
16 | /**
17 | * @typedef { import("../../web/types/comfy").ComfyObjectInfo } ComfyObjectInfo
18 | */
19 |
20 | /**
21 | * @param { { mockExtensions?: string[], mockNodeDefs?: Record } } config
22 | */
23 | export function mockApi({ mockExtensions, mockNodeDefs } = {}) {
24 | if (!mockExtensions) {
25 | mockExtensions = Array.from(walkSync(path.resolve("../web/extensions/core")))
26 | .filter((x) => x.endsWith(".js"))
27 | .map((x) => path.relative(path.resolve("../web"), x));
28 | }
29 | if (!mockNodeDefs) {
30 | mockNodeDefs = JSON.parse(fs.readFileSync(path.resolve("./data/object_info.json")));
31 | }
32 |
33 | const events = new EventTarget();
34 | const mockApi = {
35 | addEventListener: events.addEventListener.bind(events),
36 | removeEventListener: events.removeEventListener.bind(events),
37 | dispatchEvent: events.dispatchEvent.bind(events),
38 | getSystemStats: jest.fn(),
39 | getExtensions: jest.fn(() => mockExtensions),
40 | getNodeDefs: jest.fn(() => mockNodeDefs),
41 | init: jest.fn(),
42 | apiURL: jest.fn((x) => "../../web/" + x),
43 | };
44 | jest.mock("../../web/scripts/api", () => ({
45 | get api() {
46 | return mockApi;
47 | },
48 | }));
49 | }
50 |
--------------------------------------------------------------------------------
/comfy/ldm/modules/encoders/noise_aug_modules.py:
--------------------------------------------------------------------------------
1 | from ..diffusionmodules.upscaling import ImageConcatWithNoiseAugmentation
2 | from ..diffusionmodules.openaimodel import Timestep
3 | import torch
4 |
5 | class CLIPEmbeddingNoiseAugmentation(ImageConcatWithNoiseAugmentation):
6 | def __init__(self, *args, clip_stats_path=None, timestep_dim=256, **kwargs):
7 | super().__init__(*args, **kwargs)
8 | if clip_stats_path is None:
9 | clip_mean, clip_std = torch.zeros(timestep_dim), torch.ones(timestep_dim)
10 | else:
11 | clip_mean, clip_std = torch.load(clip_stats_path, map_location="cpu")
12 | self.register_buffer("data_mean", clip_mean[None, :], persistent=False)
13 | self.register_buffer("data_std", clip_std[None, :], persistent=False)
14 | self.time_embed = Timestep(timestep_dim)
15 |
16 | def scale(self, x):
17 | # re-normalize to centered mean and unit variance
18 | x = (x - self.data_mean) * 1. / self.data_std
19 | return x
20 |
21 | def unscale(self, x):
22 | # back to original data stats
23 | x = (x * self.data_std) + self.data_mean
24 | return x
25 |
26 | def forward(self, x, noise_level=None):
27 | if noise_level is None:
28 | noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long()
29 | else:
30 | assert isinstance(noise_level, torch.Tensor)
31 | x = self.scale(x)
32 | z = self.q_sample(x, noise_level)
33 | z = self.unscale(z)
34 | noise_level = self.time_embed(noise_level)
35 | return z, noise_level
36 |
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/architecture/LICENSE-RealESRGAN:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2021, Xintao Wang
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/architecture/face/LICENSE-codeformer:
--------------------------------------------------------------------------------
1 | S-Lab License 1.0
2 |
3 | Copyright 2022 S-Lab
4 |
5 | Redistribution and use for non-commercial purpose in source and
6 | binary forms, with or without modification, are permitted provided
7 | that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright
10 | notice, this list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright
13 | notice, this list of conditions and the following disclaimer in
14 | the documentation and/or other materials provided with the
15 | distribution.
16 |
17 | 3. Neither the name of the copyright holder nor the names of its
18 | contributors may be used to endorse or promote products derived
19 | from this software without specific prior written permission.
20 |
21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 |
33 | In the event that redistribution and/or use for commercial purpose in
34 | source or binary forms, with or without modification is required,
35 | please contact the contributor(s) of the work.
36 |
--------------------------------------------------------------------------------
/web/extensions/core/dynamicPrompts.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 |
3 | // Allows for simple dynamic prompt replacement
4 | // Inputs in the format {a|b} will have a random value of a or b chosen when the prompt is queued.
5 |
6 | /*
7 | * Strips C-style line and block comments from a string
8 | */
9 | function stripComments(str) {
10 | return str.replace(/\/\*[\s\S]*?\*\/|\/\/.*/g,'');
11 | }
12 |
13 | app.registerExtension({
14 | name: "Comfy.DynamicPrompts",
15 | nodeCreated(node) {
16 | if (node.widgets) {
17 | // Locate dynamic prompt text widgets
18 | // Include any widgets with dynamicPrompts set to true, and customtext
19 | const widgets = node.widgets.filter(
20 | (n) => (n.type === "customtext" && n.dynamicPrompts !== false) || n.dynamicPrompts
21 | );
22 | for (const widget of widgets) {
23 | // Override the serialization of the value to resolve dynamic prompts for all widgets supporting it in this node
24 | widget.serializeValue = (workflowNode, widgetIndex) => {
25 | let prompt = stripComments(widget.value);
26 | while (prompt.replace("\\{", "").includes("{") && prompt.replace("\\}", "").includes("}")) {
27 | const startIndex = prompt.replace("\\{", "00").indexOf("{");
28 | const endIndex = prompt.replace("\\}", "00").indexOf("}");
29 |
30 | const optionsString = prompt.substring(startIndex + 1, endIndex);
31 | const options = optionsString.split("|");
32 |
33 | const randomIndex = Math.floor(Math.random() * options.length);
34 | const randomOption = options[randomIndex];
35 |
36 | prompt = prompt.substring(0, startIndex) + randomOption + prompt.substring(endIndex + 1);
37 | }
38 |
39 | // Overwrite the value in the serialized workflow pnginfo
40 | if (workflowNode?.widgets_values)
41 | workflowNode.widgets_values[widgetIndex] = prompt;
42 |
43 | return prompt;
44 | };
45 | }
46 | }
47 | },
48 | });
49 |
--------------------------------------------------------------------------------
/tests/compare/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 |
4 | # Command line arguments for pytest
5 | def pytest_addoption(parser):
6 | parser.addoption('--baseline_dir', action="store", default='tests/inference/baseline', help='Directory for ground-truth images')
7 | parser.addoption('--test_dir', action="store", default='tests/inference/samples', help='Directory for images to test')
8 | parser.addoption('--metrics_file', action="store", default='tests/metrics.md', help='Output file for metrics')
9 | parser.addoption('--img_output_dir', action="store", default='tests/compare/samples', help='Output directory for diff metric images')
10 |
11 | # This initializes args at the beginning of the test session
12 | @pytest.fixture(scope="session", autouse=True)
13 | def args_pytest(pytestconfig):
14 | args = {}
15 | args['baseline_dir'] = pytestconfig.getoption('baseline_dir')
16 | args['test_dir'] = pytestconfig.getoption('test_dir')
17 | args['metrics_file'] = pytestconfig.getoption('metrics_file')
18 | args['img_output_dir'] = pytestconfig.getoption('img_output_dir')
19 |
20 | # Initialize metrics file
21 | with open(args['metrics_file'], 'a') as f:
22 | # if file is empty, write header
23 | if os.stat(args['metrics_file']).st_size == 0:
24 | f.write("| date | run | file | status | value | \n")
25 | f.write("| --- | --- | --- | --- | --- | \n")
26 |
27 | return args
28 |
29 |
30 | def gather_file_basenames(directory: str):
31 | files = []
32 | for file in os.listdir(directory):
33 | if file.endswith(".png"):
34 | files.append(file)
35 | return files
36 |
37 | # Creates the list of baseline file names to use as a fixture
38 | def pytest_generate_tests(metafunc):
39 | if "baseline_fname" in metafunc.fixturenames:
40 | baseline_fnames = gather_file_basenames(metafunc.config.getoption("baseline_dir"))
41 | metafunc.parametrize("baseline_fname", baseline_fnames)
42 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_perpneg.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import comfy.model_management
3 | import comfy.sample
4 | import comfy.samplers
5 | import comfy.utils
6 |
7 |
8 | class PerpNeg:
9 | @classmethod
10 | def INPUT_TYPES(s):
11 | return {"required": {"model": ("MODEL", ),
12 | "empty_conditioning": ("CONDITIONING", ),
13 | "neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}),
14 | }}
15 | RETURN_TYPES = ("MODEL",)
16 | FUNCTION = "patch"
17 |
18 | CATEGORY = "_for_testing"
19 |
20 | def patch(self, model, empty_conditioning, neg_scale):
21 | m = model.clone()
22 | nocond = comfy.sample.convert_cond(empty_conditioning)
23 |
24 | def cfg_function(args):
25 | model = args["model"]
26 | noise_pred_pos = args["cond_denoised"]
27 | noise_pred_neg = args["uncond_denoised"]
28 | cond_scale = args["cond_scale"]
29 | x = args["input"]
30 | sigma = args["sigma"]
31 | model_options = args["model_options"]
32 | nocond_processed = comfy.samplers.encode_model_conds(model.extra_conds, nocond, x, x.device, "negative")
33 |
34 | (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond_processed, None, x, sigma, model_options)
35 |
36 | pos = noise_pred_pos - noise_pred_nocond
37 | neg = noise_pred_neg - noise_pred_nocond
38 | perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg
39 | perp_neg = perp * neg_scale
40 | cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)
41 | cfg_result = x - cfg_result
42 | return cfg_result
43 |
44 | m.set_model_sampler_cfg_function(cfg_function)
45 |
46 | return (m, )
47 |
48 |
49 | NODE_CLASS_MAPPINGS = {
50 | "PerpNeg": PerpNeg,
51 | }
52 |
53 | NODE_DISPLAY_NAME_MAPPINGS = {
54 | "PerpNeg": "Perp-Neg",
55 | }
56 |
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/types.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 |
3 | from .architecture.DAT import DAT
4 | from .architecture.face.codeformer import CodeFormer
5 | from .architecture.face.gfpganv1_clean_arch import GFPGANv1Clean
6 | from .architecture.face.restoreformer_arch import RestoreFormer
7 | from .architecture.HAT import HAT
8 | from .architecture.LaMa import LaMa
9 | from .architecture.OmniSR.OmniSR import OmniSR
10 | from .architecture.RRDB import RRDBNet as ESRGAN
11 | from .architecture.SCUNet import SCUNet
12 | from .architecture.SPSR import SPSRNet as SPSR
13 | from .architecture.SRVGG import SRVGGNetCompact as RealESRGANv2
14 | from .architecture.SwiftSRGAN import Generator as SwiftSRGAN
15 | from .architecture.Swin2SR import Swin2SR
16 | from .architecture.SwinIR import SwinIR
17 |
18 | PyTorchSRModels = (
19 | RealESRGANv2,
20 | SPSR,
21 | SwiftSRGAN,
22 | ESRGAN,
23 | SwinIR,
24 | Swin2SR,
25 | HAT,
26 | OmniSR,
27 | SCUNet,
28 | DAT,
29 | )
30 | PyTorchSRModel = Union[
31 | RealESRGANv2,
32 | SPSR,
33 | SwiftSRGAN,
34 | ESRGAN,
35 | SwinIR,
36 | Swin2SR,
37 | HAT,
38 | OmniSR,
39 | SCUNet,
40 | DAT,
41 | ]
42 |
43 |
44 | def is_pytorch_sr_model(model: object):
45 | return isinstance(model, PyTorchSRModels)
46 |
47 |
48 | PyTorchFaceModels = (GFPGANv1Clean, RestoreFormer, CodeFormer)
49 | PyTorchFaceModel = Union[GFPGANv1Clean, RestoreFormer, CodeFormer]
50 |
51 |
52 | def is_pytorch_face_model(model: object):
53 | return isinstance(model, PyTorchFaceModels)
54 |
55 |
56 | PyTorchInpaintModels = (LaMa,)
57 | PyTorchInpaintModel = Union[LaMa]
58 |
59 |
60 | def is_pytorch_inpaint_model(model: object):
61 | return isinstance(model, PyTorchInpaintModels)
62 |
63 |
64 | PyTorchModels = (*PyTorchSRModels, *PyTorchFaceModels, *PyTorchInpaintModels)
65 | PyTorchModel = Union[PyTorchSRModel, PyTorchFaceModel, PyTorchInpaintModel]
66 |
67 |
68 | def is_pytorch_model(model: object):
69 | return isinstance(model, PyTorchModels)
70 |
--------------------------------------------------------------------------------
/models/configs/v2-inference.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-4
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False # we set this to false because this is an inference only config
19 |
20 | unet_config:
21 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
22 | params:
23 | use_checkpoint: True
24 | use_fp16: True
25 | image_size: 32 # unused
26 | in_channels: 4
27 | out_channels: 4
28 | model_channels: 320
29 | attention_resolutions: [ 4, 2, 1 ]
30 | num_res_blocks: 2
31 | channel_mult: [ 1, 2, 4, 4 ]
32 | num_head_channels: 64 # need to fix for flash-attn
33 | use_spatial_transformer: True
34 | use_linear_in_transformer: True
35 | transformer_depth: 1
36 | context_dim: 1024
37 | legacy: False
38 |
39 | first_stage_config:
40 | target: ldm.models.autoencoder.AutoencoderKL
41 | params:
42 | embed_dim: 4
43 | monitor: val/rec_loss
44 | ddconfig:
45 | #attn_type: "vanilla-xformers"
46 | double_z: true
47 | z_channels: 4
48 | resolution: 256
49 | in_channels: 3
50 | out_ch: 3
51 | ch: 128
52 | ch_mult:
53 | - 1
54 | - 2
55 | - 4
56 | - 4
57 | num_res_blocks: 2
58 | attn_resolutions: []
59 | dropout: 0.0
60 | lossconfig:
61 | target: torch.nn.Identity
62 |
63 | cond_stage_config:
64 | target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
65 | params:
66 | freeze: True
67 | layer: "penultimate"
68 |
--------------------------------------------------------------------------------
/models/configs/v2-inference_fp32.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-4
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False # we set this to false because this is an inference only config
19 |
20 | unet_config:
21 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
22 | params:
23 | use_checkpoint: True
24 | use_fp16: False
25 | image_size: 32 # unused
26 | in_channels: 4
27 | out_channels: 4
28 | model_channels: 320
29 | attention_resolutions: [ 4, 2, 1 ]
30 | num_res_blocks: 2
31 | channel_mult: [ 1, 2, 4, 4 ]
32 | num_head_channels: 64 # need to fix for flash-attn
33 | use_spatial_transformer: True
34 | use_linear_in_transformer: True
35 | transformer_depth: 1
36 | context_dim: 1024
37 | legacy: False
38 |
39 | first_stage_config:
40 | target: ldm.models.autoencoder.AutoencoderKL
41 | params:
42 | embed_dim: 4
43 | monitor: val/rec_loss
44 | ddconfig:
45 | #attn_type: "vanilla-xformers"
46 | double_z: true
47 | z_channels: 4
48 | resolution: 256
49 | in_channels: 3
50 | out_ch: 3
51 | ch: 128
52 | ch_mult:
53 | - 1
54 | - 2
55 | - 4
56 | - 4
57 | num_res_blocks: 2
58 | attn_resolutions: []
59 | dropout: 0.0
60 | lossconfig:
61 | target: torch.nn.Identity
62 |
63 | cond_stage_config:
64 | target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
65 | params:
66 | freeze: True
67 | layer: "penultimate"
68 |
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/architecture/OmniSR/OSAG.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | #############################################################
4 | # File: OSAG.py
5 | # Created Date: Tuesday April 28th 2022
6 | # Author: Chen Xuanhong
7 | # Email: chenxuanhongzju@outlook.com
8 | # Last Modified: Sunday, 23rd April 2023 3:08:49 pm
9 | # Modified By: Chen Xuanhong
10 | # Copyright (c) 2020 Shanghai Jiao Tong University
11 | #############################################################
12 |
13 |
14 | import torch.nn as nn
15 |
16 | from .esa import ESA
17 | from .OSA import OSA_Block
18 |
19 |
20 | class OSAG(nn.Module):
21 | def __init__(
22 | self,
23 | channel_num=64,
24 | bias=True,
25 | block_num=4,
26 | ffn_bias=False,
27 | window_size=0,
28 | pe=False,
29 | ):
30 | super(OSAG, self).__init__()
31 |
32 | # print("window_size: %d" % (window_size))
33 | # print("with_pe", pe)
34 | # print("ffn_bias: %d" % (ffn_bias))
35 |
36 | # block_script_name = kwargs.get("block_script_name", "OSA")
37 | # block_class_name = kwargs.get("block_class_name", "OSA_Block")
38 |
39 | # script_name = "." + block_script_name
40 | # package = __import__(script_name, fromlist=True)
41 | block_class = OSA_Block # getattr(package, block_class_name)
42 | group_list = []
43 | for _ in range(block_num):
44 | temp_res = block_class(
45 | channel_num,
46 | bias,
47 | ffn_bias=ffn_bias,
48 | window_size=window_size,
49 | with_pe=pe,
50 | )
51 | group_list.append(temp_res)
52 | group_list.append(nn.Conv2d(channel_num, channel_num, 1, 1, 0, bias=bias))
53 | self.residual_layer = nn.Sequential(*group_list)
54 | esa_channel = max(channel_num // 4, 16)
55 | self.esa = ESA(esa_channel, channel_num)
56 |
57 | def forward(self, x):
58 | out = self.residual_layer(x)
59 | out = out + x
60 | return self.esa(out)
61 |
--------------------------------------------------------------------------------
/models/configs/v2-inference-v.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-4
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | parameterization: "v"
6 | linear_start: 0.00085
7 | linear_end: 0.0120
8 | num_timesteps_cond: 1
9 | log_every_t: 200
10 | timesteps: 1000
11 | first_stage_key: "jpg"
12 | cond_stage_key: "txt"
13 | image_size: 64
14 | channels: 4
15 | cond_stage_trainable: false
16 | conditioning_key: crossattn
17 | monitor: val/loss_simple_ema
18 | scale_factor: 0.18215
19 | use_ema: False # we set this to false because this is an inference only config
20 |
21 | unet_config:
22 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
23 | params:
24 | use_checkpoint: True
25 | use_fp16: True
26 | image_size: 32 # unused
27 | in_channels: 4
28 | out_channels: 4
29 | model_channels: 320
30 | attention_resolutions: [ 4, 2, 1 ]
31 | num_res_blocks: 2
32 | channel_mult: [ 1, 2, 4, 4 ]
33 | num_head_channels: 64 # need to fix for flash-attn
34 | use_spatial_transformer: True
35 | use_linear_in_transformer: True
36 | transformer_depth: 1
37 | context_dim: 1024
38 | legacy: False
39 |
40 | first_stage_config:
41 | target: ldm.models.autoencoder.AutoencoderKL
42 | params:
43 | embed_dim: 4
44 | monitor: val/rec_loss
45 | ddconfig:
46 | #attn_type: "vanilla-xformers"
47 | double_z: true
48 | z_channels: 4
49 | resolution: 256
50 | in_channels: 3
51 | out_ch: 3
52 | ch: 128
53 | ch_mult:
54 | - 1
55 | - 2
56 | - 4
57 | - 4
58 | num_res_blocks: 2
59 | attn_resolutions: []
60 | dropout: 0.0
61 | lossconfig:
62 | target: torch.nn.Identity
63 |
64 | cond_stage_config:
65 | target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
66 | params:
67 | freeze: True
68 | layer: "penultimate"
69 |
--------------------------------------------------------------------------------
/models/configs/v2-inference-v_fp32.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-4
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | parameterization: "v"
6 | linear_start: 0.00085
7 | linear_end: 0.0120
8 | num_timesteps_cond: 1
9 | log_every_t: 200
10 | timesteps: 1000
11 | first_stage_key: "jpg"
12 | cond_stage_key: "txt"
13 | image_size: 64
14 | channels: 4
15 | cond_stage_trainable: false
16 | conditioning_key: crossattn
17 | monitor: val/loss_simple_ema
18 | scale_factor: 0.18215
19 | use_ema: False # we set this to false because this is an inference only config
20 |
21 | unet_config:
22 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
23 | params:
24 | use_checkpoint: True
25 | use_fp16: False
26 | image_size: 32 # unused
27 | in_channels: 4
28 | out_channels: 4
29 | model_channels: 320
30 | attention_resolutions: [ 4, 2, 1 ]
31 | num_res_blocks: 2
32 | channel_mult: [ 1, 2, 4, 4 ]
33 | num_head_channels: 64 # need to fix for flash-attn
34 | use_spatial_transformer: True
35 | use_linear_in_transformer: True
36 | transformer_depth: 1
37 | context_dim: 1024
38 | legacy: False
39 |
40 | first_stage_config:
41 | target: ldm.models.autoencoder.AutoencoderKL
42 | params:
43 | embed_dim: 4
44 | monitor: val/rec_loss
45 | ddconfig:
46 | #attn_type: "vanilla-xformers"
47 | double_z: true
48 | z_channels: 4
49 | resolution: 256
50 | in_channels: 3
51 | out_ch: 3
52 | ch: 128
53 | ch_mult:
54 | - 1
55 | - 2
56 | - 4
57 | - 4
58 | num_res_blocks: 2
59 | attn_resolutions: []
60 | dropout: 0.0
61 | lossconfig:
62 | target: torch.nn.Identity
63 |
64 | cond_stage_config:
65 | target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
66 | params:
67 | freeze: True
68 | layer: "penultimate"
69 |
--------------------------------------------------------------------------------
/web/extensions/core/keybinds.js:
--------------------------------------------------------------------------------
1 | import {app} from "../../scripts/app.js";
2 |
3 | app.registerExtension({
4 | name: "Comfy.Keybinds",
5 | init() {
6 | const keybindListener = function (event) {
7 | const modifierPressed = event.ctrlKey || event.metaKey;
8 |
9 | // Queue prompt using ctrl or command + enter
10 | if (modifierPressed && event.key === "Enter") {
11 | app.queuePrompt(event.shiftKey ? -1 : 0).then();
12 | return;
13 | }
14 |
15 | const target = event.composedPath()[0];
16 | if (["INPUT", "TEXTAREA"].includes(target.tagName)) {
17 | return;
18 | }
19 |
20 | const modifierKeyIdMap = {
21 | s: "#comfy-save-button",
22 | o: "#comfy-file-input",
23 | Backspace: "#comfy-clear-button",
24 | Delete: "#comfy-clear-button",
25 | d: "#comfy-load-default-button",
26 | };
27 |
28 | const modifierKeybindId = modifierKeyIdMap[event.key];
29 | if (modifierPressed && modifierKeybindId) {
30 | event.preventDefault();
31 |
32 | const elem = document.querySelector(modifierKeybindId);
33 | elem.click();
34 | return;
35 | }
36 |
37 | // Finished Handling all modifier keybinds, now handle the rest
38 | if (event.ctrlKey || event.altKey || event.metaKey) {
39 | return;
40 | }
41 |
42 | // Close out of modals using escape
43 | if (event.key === "Escape") {
44 | const modals = document.querySelectorAll(".comfy-modal");
45 | const modal = Array.from(modals).find(modal => window.getComputedStyle(modal).getPropertyValue("display") !== "none");
46 | if (modal) {
47 | modal.style.display = "none";
48 | }
49 |
50 | [...document.querySelectorAll("dialog")].forEach(d => {
51 | d.close();
52 | });
53 | }
54 |
55 | const keyIdMap = {
56 | q: "#comfy-view-queue-button",
57 | h: "#comfy-view-history-button",
58 | r: "#comfy-refresh-button",
59 | };
60 |
61 | const buttonId = keyIdMap[event.key];
62 | if (buttonId) {
63 | const button = document.querySelector(buttonId);
64 | button.click();
65 | }
66 | }
67 |
68 | window.addEventListener("keydown", keybindListener, true);
69 | }
70 | });
71 |
--------------------------------------------------------------------------------
/web/scripts/utils.js:
--------------------------------------------------------------------------------
1 | // Simple date formatter
2 | const parts = {
3 | d: (d) => d.getDate(),
4 | M: (d) => d.getMonth() + 1,
5 | h: (d) => d.getHours(),
6 | m: (d) => d.getMinutes(),
7 | s: (d) => d.getSeconds(),
8 | };
9 | const format =
10 | Object.keys(parts)
11 | .map((k) => k + k + "?")
12 | .join("|") + "|yyy?y?";
13 |
14 | function formatDate(text, date) {
15 | return text.replace(new RegExp(format, "g"), function (text) {
16 | if (text === "yy") return (date.getFullYear() + "").substring(2);
17 | if (text === "yyyy") return date.getFullYear();
18 | if (text[0] in parts) {
19 | const p = parts[text[0]](date);
20 | return (p + "").padStart(text.length, "0");
21 | }
22 | return text;
23 | });
24 | }
25 |
26 | export function applyTextReplacements(app, value) {
27 | return value.replace(/%([^%]+)%/g, function (match, text) {
28 | const split = text.split(".");
29 | if (split.length !== 2) {
30 | // Special handling for dates
31 | if (split[0].startsWith("date:")) {
32 | return formatDate(split[0].substring(5), new Date());
33 | }
34 |
35 | if (text !== "width" && text !== "height") {
36 | // Dont warn on standard replacements
37 | console.warn("Invalid replacement pattern", text);
38 | }
39 | return match;
40 | }
41 |
42 | // Find node with matching S&R property name
43 | let nodes = app.graph._nodes.filter((n) => n.properties?.["Node name for S&R"] === split[0]);
44 | // If we cant, see if there is a node with that title
45 | if (!nodes.length) {
46 | nodes = app.graph._nodes.filter((n) => n.title === split[0]);
47 | }
48 | if (!nodes.length) {
49 | console.warn("Unable to find node", split[0]);
50 | return match;
51 | }
52 |
53 | if (nodes.length > 1) {
54 | console.warn("Multiple nodes matched", split[0], "using first match");
55 | }
56 |
57 | const node = nodes[0];
58 |
59 | const widget = node.widgets?.find((w) => w.name === split[1]);
60 | if (!widget) {
61 | console.warn("Unable to find widget", split[1], "on node", split[0], node);
62 | return match;
63 | }
64 |
65 | return ((widget.value ?? "") + "").replaceAll(/\/|\\/g, "_");
66 | });
67 | }
68 |
--------------------------------------------------------------------------------
/models/configs/v1-inference.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 4
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71 |
--------------------------------------------------------------------------------
/models/configs/v1-inference_fp16.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | use_fp16: True
33 | image_size: 32 # unused
34 | in_channels: 4
35 | out_channels: 4
36 | model_channels: 320
37 | attention_resolutions: [ 4, 2, 1 ]
38 | num_res_blocks: 2
39 | channel_mult: [ 1, 2, 4, 4 ]
40 | num_heads: 8
41 | use_spatial_transformer: True
42 | transformer_depth: 1
43 | context_dim: 768
44 | use_checkpoint: True
45 | legacy: False
46 |
47 | first_stage_config:
48 | target: ldm.models.autoencoder.AutoencoderKL
49 | params:
50 | embed_dim: 4
51 | monitor: val/rec_loss
52 | ddconfig:
53 | double_z: true
54 | z_channels: 4
55 | resolution: 256
56 | in_channels: 3
57 | out_ch: 3
58 | ch: 128
59 | ch_mult:
60 | - 1
61 | - 2
62 | - 4
63 | - 4
64 | num_res_blocks: 2
65 | attn_resolutions: []
66 | dropout: 0.0
67 | lossconfig:
68 | target: torch.nn.Identity
69 |
70 | cond_stage_config:
71 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
72 |
--------------------------------------------------------------------------------
/web/extensions/logging.js.example:
--------------------------------------------------------------------------------
1 | import { app } from "../scripts/app.js";
2 |
3 | const ext = {
4 | // Unique name for the extension
5 | name: "Example.LoggingExtension",
6 | async init(app) {
7 | // Any initial setup to run as soon as the page loads
8 | console.log("[logging]", "extension init");
9 | },
10 | async setup(app) {
11 | // Any setup to run after the app is created
12 | console.log("[logging]", "extension setup");
13 | },
14 | async addCustomNodeDefs(defs, app) {
15 | // Add custom node definitions
16 | // These definitions will be configured and registered automatically
17 | // defs is a lookup core nodes, add yours into this
18 | console.log("[logging]", "add custom node definitions", "current nodes:", Object.keys(defs));
19 | },
20 | async getCustomWidgets(app) {
21 | // Return custom widget types
22 | // See ComfyWidgets for widget examples
23 | console.log("[logging]", "provide custom widgets");
24 | },
25 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
26 | // Run custom logic before a node definition is registered with the graph
27 | console.log("[logging]", "before register node: ", nodeType, nodeData);
28 |
29 | // This fires for every node definition so only log once
30 | delete ext.beforeRegisterNodeDef;
31 | },
32 | async registerCustomNodes(app) {
33 | // Register any custom node implementations here allowing for more flexability than a custom node def
34 | console.log("[logging]", "register custom nodes");
35 | },
36 | loadedGraphNode(node, app) {
37 | // Fires for each node when loading/dragging/etc a workflow json or png
38 | // If you break something in the backend and want to patch workflows in the frontend
39 | // This is the place to do this
40 | console.log("[logging]", "loaded graph node: ", node);
41 |
42 | // This fires for every node on each load so only log once
43 | delete ext.loadedGraphNode;
44 | },
45 | nodeCreated(node, app) {
46 | // Fires every time a node is constructed
47 | // You can modify widgets/add handlers/etc here
48 | console.log("[logging]", "node created: ", node);
49 |
50 | // This fires for every node so only log once
51 | delete ext.nodeCreated;
52 | }
53 | };
54 |
55 | app.registerExtension(ext);
56 |
--------------------------------------------------------------------------------
/models/configs/anything_v3.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 4
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71 | params:
72 | layer: "hidden"
73 | layer_idx: -2
74 |
--------------------------------------------------------------------------------
/models/configs/v1-inference_clip_skip_2.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 4
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71 | params:
72 | layer: "hidden"
73 | layer_idx: -2
74 |
--------------------------------------------------------------------------------
/models/configs/v1-inference_clip_skip_2_fp16.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | use_fp16: True
33 | image_size: 32 # unused
34 | in_channels: 4
35 | out_channels: 4
36 | model_channels: 320
37 | attention_resolutions: [ 4, 2, 1 ]
38 | num_res_blocks: 2
39 | channel_mult: [ 1, 2, 4, 4 ]
40 | num_heads: 8
41 | use_spatial_transformer: True
42 | transformer_depth: 1
43 | context_dim: 768
44 | use_checkpoint: True
45 | legacy: False
46 |
47 | first_stage_config:
48 | target: ldm.models.autoencoder.AutoencoderKL
49 | params:
50 | embed_dim: 4
51 | monitor: val/rec_loss
52 | ddconfig:
53 | double_z: true
54 | z_channels: 4
55 | resolution: 256
56 | in_channels: 3
57 | out_ch: 3
58 | ch: 128
59 | ch_mult:
60 | - 1
61 | - 2
62 | - 4
63 | - 4
64 | num_res_blocks: 2
65 | attn_resolutions: []
66 | dropout: 0.0
67 | lossconfig:
68 | target: torch.nn.Identity
69 |
70 | cond_stage_config:
71 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
72 | params:
73 | layer: "hidden"
74 | layer_idx: -2
75 |
--------------------------------------------------------------------------------
/models/configs/v1-inpainting-inference.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 7.5e-05
3 | target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: hybrid # important
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | finetune_keys: null
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 9 # 4 data + 4 downscaled image + 1 mask
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71 |
72 |
--------------------------------------------------------------------------------
/comfy/ops.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from contextlib import contextmanager
3 |
4 | class disable_weight_init:
5 | class Linear(torch.nn.Linear):
6 | def reset_parameters(self):
7 | return None
8 |
9 | class Conv2d(torch.nn.Conv2d):
10 | def reset_parameters(self):
11 | return None
12 |
13 | class Conv3d(torch.nn.Conv3d):
14 | def reset_parameters(self):
15 | return None
16 |
17 | class GroupNorm(torch.nn.GroupNorm):
18 | def reset_parameters(self):
19 | return None
20 |
21 | class LayerNorm(torch.nn.LayerNorm):
22 | def reset_parameters(self):
23 | return None
24 |
25 | @classmethod
26 | def conv_nd(s, dims, *args, **kwargs):
27 | if dims == 2:
28 | return s.Conv2d(*args, **kwargs)
29 | elif dims == 3:
30 | return s.Conv3d(*args, **kwargs)
31 | else:
32 | raise ValueError(f"unsupported dimensions: {dims}")
33 |
34 | def cast_bias_weight(s, input):
35 | bias = None
36 | if s.bias is not None:
37 | bias = s.bias.to(device=input.device, dtype=input.dtype)
38 | weight = s.weight.to(device=input.device, dtype=input.dtype)
39 | return weight, bias
40 |
41 | class manual_cast(disable_weight_init):
42 | class Linear(disable_weight_init.Linear):
43 | def forward(self, input):
44 | weight, bias = cast_bias_weight(self, input)
45 | return torch.nn.functional.linear(input, weight, bias)
46 |
47 | class Conv2d(disable_weight_init.Conv2d):
48 | def forward(self, input):
49 | weight, bias = cast_bias_weight(self, input)
50 | return self._conv_forward(input, weight, bias)
51 |
52 | class Conv3d(disable_weight_init.Conv3d):
53 | def forward(self, input):
54 | weight, bias = cast_bias_weight(self, input)
55 | return self._conv_forward(input, weight, bias)
56 |
57 | class GroupNorm(disable_weight_init.GroupNorm):
58 | def forward(self, input):
59 | weight, bias = cast_bias_weight(self, input)
60 | return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps)
61 |
62 | class LayerNorm(disable_weight_init.LayerNorm):
63 | def forward(self, input):
64 | weight, bias = cast_bias_weight(self, input)
65 | return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps)
66 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_upscale_model.py:
--------------------------------------------------------------------------------
1 | import os
2 | from comfy_extras.chainner_models import model_loading
3 | from comfy import model_management
4 | import torch
5 | import comfy.utils
6 | import folder_paths
7 |
8 | class UpscaleModelLoader:
9 | @classmethod
10 | def INPUT_TYPES(s):
11 | return {"required": { "model_name": (folder_paths.get_filename_list("upscale_models"), ),
12 | }}
13 | RETURN_TYPES = ("UPSCALE_MODEL",)
14 | FUNCTION = "load_model"
15 |
16 | CATEGORY = "loaders"
17 |
18 | def load_model(self, model_name):
19 | model_path = folder_paths.get_full_path("upscale_models", model_name)
20 | sd = comfy.utils.load_torch_file(model_path, safe_load=True)
21 | if "module.layers.0.residual_group.blocks.0.norm1.weight" in sd:
22 | sd = comfy.utils.state_dict_prefix_replace(sd, {"module.":""})
23 | out = model_loading.load_state_dict(sd).eval()
24 | return (out, )
25 |
26 |
27 | class ImageUpscaleWithModel:
28 | @classmethod
29 | def INPUT_TYPES(s):
30 | return {"required": { "upscale_model": ("UPSCALE_MODEL",),
31 | "image": ("IMAGE",),
32 | }}
33 | RETURN_TYPES = ("IMAGE",)
34 | FUNCTION = "upscale"
35 |
36 | CATEGORY = "image/upscaling"
37 |
38 | def upscale(self, upscale_model, image):
39 | device = model_management.get_torch_device()
40 | upscale_model.to(device)
41 | in_img = image.movedim(-1,-3).to(device)
42 | free_memory = model_management.get_free_memory(device)
43 |
44 | tile = 512
45 | overlap = 32
46 |
47 | oom = True
48 | while oom:
49 | try:
50 | steps = in_img.shape[0] * comfy.utils.get_tiled_scale_steps(in_img.shape[3], in_img.shape[2], tile_x=tile, tile_y=tile, overlap=overlap)
51 | pbar = comfy.utils.ProgressBar(steps)
52 | s = comfy.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap, upscale_amount=upscale_model.scale, pbar=pbar)
53 | oom = False
54 | except model_management.OOM_EXCEPTION as e:
55 | tile //= 2
56 | if tile < 128:
57 | raise e
58 |
59 | upscale_model.cpu()
60 | s = torch.clamp(s.movedim(-3,-1), min=0, max=1.0)
61 | return (s,)
62 |
63 | NODE_CLASS_MAPPINGS = {
64 | "UpscaleModelLoader": UpscaleModelLoader,
65 | "ImageUpscaleWithModel": ImageUpscaleWithModel
66 | }
67 |
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/architecture/OmniSR/layernorm.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding:utf-8 -*-
3 | #############################################################
4 | # File: layernorm.py
5 | # Created Date: Tuesday April 28th 2022
6 | # Author: Chen Xuanhong
7 | # Email: chenxuanhongzju@outlook.com
8 | # Last Modified: Thursday, 20th April 2023 9:28:20 am
9 | # Modified By: Chen Xuanhong
10 | # Copyright (c) 2020 Shanghai Jiao Tong University
11 | #############################################################
12 |
13 | import torch
14 | import torch.nn as nn
15 |
16 |
17 | class LayerNormFunction(torch.autograd.Function):
18 | @staticmethod
19 | def forward(ctx, x, weight, bias, eps):
20 | ctx.eps = eps
21 | N, C, H, W = x.size()
22 | mu = x.mean(1, keepdim=True)
23 | var = (x - mu).pow(2).mean(1, keepdim=True)
24 | y = (x - mu) / (var + eps).sqrt()
25 | ctx.save_for_backward(y, var, weight)
26 | y = weight.view(1, C, 1, 1) * y + bias.view(1, C, 1, 1)
27 | return y
28 |
29 | @staticmethod
30 | def backward(ctx, grad_output):
31 | eps = ctx.eps
32 |
33 | N, C, H, W = grad_output.size()
34 | y, var, weight = ctx.saved_variables
35 | g = grad_output * weight.view(1, C, 1, 1)
36 | mean_g = g.mean(dim=1, keepdim=True)
37 |
38 | mean_gy = (g * y).mean(dim=1, keepdim=True)
39 | gx = 1.0 / torch.sqrt(var + eps) * (g - y * mean_gy - mean_g)
40 | return (
41 | gx,
42 | (grad_output * y).sum(dim=3).sum(dim=2).sum(dim=0),
43 | grad_output.sum(dim=3).sum(dim=2).sum(dim=0),
44 | None,
45 | )
46 |
47 |
48 | class LayerNorm2d(nn.Module):
49 | def __init__(self, channels, eps=1e-6):
50 | super(LayerNorm2d, self).__init__()
51 | self.register_parameter("weight", nn.Parameter(torch.ones(channels)))
52 | self.register_parameter("bias", nn.Parameter(torch.zeros(channels)))
53 | self.eps = eps
54 |
55 | def forward(self, x):
56 | return LayerNormFunction.apply(x, self.weight, self.bias, self.eps)
57 |
58 |
59 | class GRN(nn.Module):
60 | """GRN (Global Response Normalization) layer"""
61 |
62 | def __init__(self, dim):
63 | super().__init__()
64 | self.gamma = nn.Parameter(torch.zeros(1, dim, 1, 1))
65 | self.beta = nn.Parameter(torch.zeros(1, dim, 1, 1))
66 |
67 | def forward(self, x):
68 | Gx = torch.norm(x, p=2, dim=(2, 3), keepdim=True)
69 | Nx = Gx / (Gx.mean(dim=1, keepdim=True) + 1e-6)
70 | return self.gamma * (x * Nx) + self.beta + x
71 |
--------------------------------------------------------------------------------
/tests-ui/setup.js:
--------------------------------------------------------------------------------
1 | const { spawn } = require("child_process");
2 | const { resolve } = require("path");
3 | const { existsSync, mkdirSync, writeFileSync } = require("fs");
4 | const http = require("http");
5 |
6 | async function setup() {
7 | // Wait up to 30s for it to start
8 | let success = false;
9 | let child;
10 | for (let i = 0; i < 30; i++) {
11 | try {
12 | await new Promise((res, rej) => {
13 | http
14 | .get("http://127.0.0.1:8188/object_info", (resp) => {
15 | let data = "";
16 | resp.on("data", (chunk) => {
17 | data += chunk;
18 | });
19 | resp.on("end", () => {
20 | // Modify the response data to add some checkpoints
21 | const objectInfo = JSON.parse(data);
22 | objectInfo.CheckpointLoaderSimple.input.required.ckpt_name[0] = ["model1.safetensors", "model2.ckpt"];
23 | objectInfo.VAELoader.input.required.vae_name[0] = ["vae1.safetensors", "vae2.ckpt"];
24 |
25 | data = JSON.stringify(objectInfo, undefined, "\t");
26 |
27 | const outDir = resolve("./data");
28 | if (!existsSync(outDir)) {
29 | mkdirSync(outDir);
30 | }
31 |
32 | const outPath = resolve(outDir, "object_info.json");
33 | console.log(`Writing ${Object.keys(objectInfo).length} nodes to ${outPath}`);
34 | writeFileSync(outPath, data, {
35 | encoding: "utf8",
36 | });
37 | res();
38 | });
39 | })
40 | .on("error", rej);
41 | });
42 | success = true;
43 | break;
44 | } catch (error) {
45 | console.log(i + "/30", error);
46 | if (i === 0) {
47 | // Start the server on first iteration if it fails to connect
48 | console.log("Starting ComfyUI server...");
49 |
50 | let python = resolve("../../python_embeded/python.exe");
51 | let args;
52 | let cwd;
53 | if (existsSync(python)) {
54 | args = ["-s", "ComfyUI/main.py"];
55 | cwd = "../..";
56 | } else {
57 | python = "python";
58 | args = ["main.py"];
59 | cwd = "..";
60 | }
61 | args.push("--cpu");
62 | console.log(python, ...args);
63 | child = spawn(python, args, { cwd });
64 | child.on("error", (err) => {
65 | console.log(`Server error (${err})`);
66 | i = 30;
67 | });
68 | child.on("exit", (code) => {
69 | if (!success) {
70 | console.log(`Server exited (${code})`);
71 | i = 30;
72 | }
73 | });
74 | }
75 | await new Promise((r) => {
76 | setTimeout(r, 1000);
77 | });
78 | }
79 | }
80 |
81 | child?.kill();
82 |
83 | if (!success) {
84 | throw new Error("Waiting for server failed...");
85 | }
86 | }
87 |
88 | setup();
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/architecture/face/fused_act.py:
--------------------------------------------------------------------------------
1 | # pylint: skip-file
2 | # type: ignore
3 | # modify from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/fused_act.py # noqa:E501
4 |
5 | import torch
6 | from torch import nn
7 | from torch.autograd import Function
8 |
9 | fused_act_ext = None
10 |
11 |
12 | class FusedLeakyReLUFunctionBackward(Function):
13 | @staticmethod
14 | def forward(ctx, grad_output, out, negative_slope, scale):
15 | ctx.save_for_backward(out)
16 | ctx.negative_slope = negative_slope
17 | ctx.scale = scale
18 |
19 | empty = grad_output.new_empty(0)
20 |
21 | grad_input = fused_act_ext.fused_bias_act(
22 | grad_output, empty, out, 3, 1, negative_slope, scale
23 | )
24 |
25 | dim = [0]
26 |
27 | if grad_input.ndim > 2:
28 | dim += list(range(2, grad_input.ndim))
29 |
30 | grad_bias = grad_input.sum(dim).detach()
31 |
32 | return grad_input, grad_bias
33 |
34 | @staticmethod
35 | def backward(ctx, gradgrad_input, gradgrad_bias):
36 | (out,) = ctx.saved_tensors
37 | gradgrad_out = fused_act_ext.fused_bias_act(
38 | gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
39 | )
40 |
41 | return gradgrad_out, None, None, None
42 |
43 |
44 | class FusedLeakyReLUFunction(Function):
45 | @staticmethod
46 | def forward(ctx, input, bias, negative_slope, scale):
47 | empty = input.new_empty(0)
48 | out = fused_act_ext.fused_bias_act(
49 | input, bias, empty, 3, 0, negative_slope, scale
50 | )
51 | ctx.save_for_backward(out)
52 | ctx.negative_slope = negative_slope
53 | ctx.scale = scale
54 |
55 | return out
56 |
57 | @staticmethod
58 | def backward(ctx, grad_output):
59 | (out,) = ctx.saved_tensors
60 |
61 | grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
62 | grad_output, out, ctx.negative_slope, ctx.scale
63 | )
64 |
65 | return grad_input, grad_bias, None, None
66 |
67 |
68 | class FusedLeakyReLU(nn.Module):
69 | def __init__(self, channel, negative_slope=0.2, scale=2**0.5):
70 | super().__init__()
71 |
72 | self.bias = nn.Parameter(torch.zeros(channel))
73 | self.negative_slope = negative_slope
74 | self.scale = scale
75 |
76 | def forward(self, input):
77 | return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
78 |
79 |
80 | def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2**0.5):
81 | return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
82 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_model_downscale.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import comfy.utils
3 |
4 | class PatchModelAddDownscale:
5 | upscale_methods = ["bicubic", "nearest-exact", "bilinear", "area", "bislerp"]
6 | @classmethod
7 | def INPUT_TYPES(s):
8 | return {"required": { "model": ("MODEL",),
9 | "block_number": ("INT", {"default": 3, "min": 1, "max": 32, "step": 1}),
10 | "downscale_factor": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 9.0, "step": 0.001}),
11 | "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
12 | "end_percent": ("FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.001}),
13 | "downscale_after_skip": ("BOOLEAN", {"default": True}),
14 | "downscale_method": (s.upscale_methods,),
15 | "upscale_method": (s.upscale_methods,),
16 | }}
17 | RETURN_TYPES = ("MODEL",)
18 | FUNCTION = "patch"
19 |
20 | CATEGORY = "_for_testing"
21 |
22 | def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip, downscale_method, upscale_method):
23 | sigma_start = model.model.model_sampling.percent_to_sigma(start_percent)
24 | sigma_end = model.model.model_sampling.percent_to_sigma(end_percent)
25 |
26 | def input_block_patch(h, transformer_options):
27 | if transformer_options["block"][1] == block_number:
28 | sigma = transformer_options["sigmas"][0].item()
29 | if sigma <= sigma_start and sigma >= sigma_end:
30 | h = comfy.utils.common_upscale(h, round(h.shape[-1] * (1.0 / downscale_factor)), round(h.shape[-2] * (1.0 / downscale_factor)), downscale_method, "disabled")
31 | return h
32 |
33 | def output_block_patch(h, hsp, transformer_options):
34 | if h.shape[2] != hsp.shape[2]:
35 | h = comfy.utils.common_upscale(h, hsp.shape[-1], hsp.shape[-2], upscale_method, "disabled")
36 | return h, hsp
37 |
38 | m = model.clone()
39 | if downscale_after_skip:
40 | m.set_model_input_block_patch_after_skip(input_block_patch)
41 | else:
42 | m.set_model_input_block_patch(input_block_patch)
43 | m.set_model_output_block_patch(output_block_patch)
44 | return (m, )
45 |
46 | NODE_CLASS_MAPPINGS = {
47 | "PatchModelAddDownscale": PatchModelAddDownscale,
48 | }
49 |
50 | NODE_DISPLAY_NAME_MAPPINGS = {
51 | # Sampling
52 | "PatchModelAddDownscale": "PatchModelAddDownscale (Kohya Deep Shrink)",
53 | }
54 |
--------------------------------------------------------------------------------
/web/scripts/ui/imagePreview.js:
--------------------------------------------------------------------------------
1 | import { $el } from "../ui.js";
2 |
3 | export function calculateImageGrid(imgs, dw, dh) {
4 | let best = 0;
5 | let w = imgs[0].naturalWidth;
6 | let h = imgs[0].naturalHeight;
7 | const numImages = imgs.length;
8 |
9 | let cellWidth, cellHeight, cols, rows, shiftX;
10 | // compact style
11 | for (let c = 1; c <= numImages; c++) {
12 | const r = Math.ceil(numImages / c);
13 | const cW = dw / c;
14 | const cH = dh / r;
15 | const scaleX = cW / w;
16 | const scaleY = cH / h;
17 |
18 | const scale = Math.min(scaleX, scaleY, 1);
19 | const imageW = w * scale;
20 | const imageH = h * scale;
21 | const area = imageW * imageH * numImages;
22 |
23 | if (area > best) {
24 | best = area;
25 | cellWidth = imageW;
26 | cellHeight = imageH;
27 | cols = c;
28 | rows = r;
29 | shiftX = c * ((cW - imageW) / 2);
30 | }
31 | }
32 |
33 | return { cellWidth, cellHeight, cols, rows, shiftX };
34 | }
35 |
36 | export function createImageHost(node) {
37 | const el = $el("div.comfy-img-preview");
38 | let currentImgs;
39 | let first = true;
40 |
41 | function updateSize() {
42 | let w = null;
43 | let h = null;
44 |
45 | if (currentImgs) {
46 | let elH = el.clientHeight;
47 | if (first) {
48 | first = false;
49 | // On first run, if we are small then grow a bit
50 | if (elH < 190) {
51 | elH = 190;
52 | }
53 | el.style.setProperty("--comfy-widget-min-height", elH);
54 | } else {
55 | el.style.setProperty("--comfy-widget-min-height", null);
56 | }
57 |
58 | const nw = node.size[0];
59 | ({ cellWidth: w, cellHeight: h } = calculateImageGrid(currentImgs, nw - 20, elH));
60 | w += "px";
61 | h += "px";
62 |
63 | el.style.setProperty("--comfy-img-preview-width", w);
64 | el.style.setProperty("--comfy-img-preview-height", h);
65 | }
66 | }
67 | return {
68 | el,
69 | updateImages(imgs) {
70 | if (imgs !== currentImgs) {
71 | if (currentImgs == null) {
72 | requestAnimationFrame(() => {
73 | updateSize();
74 | });
75 | }
76 | el.replaceChildren(...imgs);
77 | currentImgs = imgs;
78 | node.onResize(node.size);
79 | node.graph.setDirtyCanvas(true, true);
80 | }
81 | },
82 | getHeight() {
83 | updateSize();
84 | },
85 | onDraw() {
86 | // Element from point uses a hittest find elements so we need to toggle pointer events
87 | el.style.pointerEvents = "all";
88 | const over = document.elementFromPoint(app.canvas.mouse[0], app.canvas.mouse[1]);
89 | el.style.pointerEvents = "none";
90 |
91 | if(!over) return;
92 | // Set the overIndex so Open Image etc work
93 | const idx = currentImgs.indexOf(over);
94 | node.overIndex = idx;
95 | },
96 | };
97 | }
98 |
--------------------------------------------------------------------------------
/.github/workflows/windows_release_dependencies.yml:
--------------------------------------------------------------------------------
1 | name: "Windows Release dependencies"
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | xformers:
7 | description: 'xformers version'
8 | required: false
9 | type: string
10 | default: ""
11 | cu:
12 | description: 'cuda version'
13 | required: true
14 | type: string
15 | default: "121"
16 |
17 | python_minor:
18 | description: 'python minor version'
19 | required: true
20 | type: string
21 | default: "11"
22 |
23 | python_patch:
24 | description: 'python patch version'
25 | required: true
26 | type: string
27 | default: "6"
28 | # push:
29 | # branches:
30 | # - master
31 |
32 | jobs:
33 | build_dependencies:
34 | runs-on: windows-latest
35 | steps:
36 | - uses: actions/checkout@v3
37 | - uses: actions/setup-python@v4
38 | with:
39 | python-version: 3.${{ inputs.python_minor }}.${{ inputs.python_patch }}
40 |
41 | - shell: bash
42 | run: |
43 | echo "@echo off
44 | ..\python_embeded\python.exe .\update.py ..\ComfyUI\\
45 | echo -
46 | echo This will try to update pytorch and all python dependencies, if you get an error wait for pytorch/xformers to fix their stuff
47 | echo You should not be running this anyways unless you really have to
48 | echo -
49 | echo If you just want to update normally, close this and run update_comfyui.bat instead.
50 | echo -
51 | pause
52 | ..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio ${{ inputs.xformers }} --extra-index-url https://download.pytorch.org/whl/cu${{ inputs.cu }} -r ../ComfyUI/requirements.txt pygit2
53 | pause" > update_comfyui_and_python_dependencies.bat
54 |
55 | python -m pip wheel --no-cache-dir torch torchvision torchaudio ${{ inputs.xformers }} --extra-index-url https://download.pytorch.org/whl/cu${{ inputs.cu }} -r requirements.txt pygit2 -w ./temp_wheel_dir
56 | python -m pip install --no-cache-dir ./temp_wheel_dir/*
57 | echo installed basic
58 | ls -lah temp_wheel_dir
59 | mv temp_wheel_dir cu${{ inputs.cu }}_python_deps
60 | tar cf cu${{ inputs.cu }}_python_deps.tar cu${{ inputs.cu }}_python_deps
61 |
62 | - uses: actions/cache/save@v3
63 | with:
64 | path: |
65 | cu${{ inputs.cu }}_python_deps.tar
66 | update_comfyui_and_python_dependencies.bat
67 | key: ${{ runner.os }}-build-cu${{ inputs.cu }}-${{ inputs.python_minor }}
68 |
--------------------------------------------------------------------------------
/comfy/conds.py:
--------------------------------------------------------------------------------
1 | import enum
2 | import torch
3 | import math
4 | import comfy.utils
5 |
6 |
7 | def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9)
8 | return abs(a*b) // math.gcd(a, b)
9 |
10 | class CONDRegular:
11 | def __init__(self, cond):
12 | self.cond = cond
13 |
14 | def _copy_with(self, cond):
15 | return self.__class__(cond)
16 |
17 | def process_cond(self, batch_size, device, **kwargs):
18 | return self._copy_with(comfy.utils.repeat_to_batch_size(self.cond, batch_size).to(device))
19 |
20 | def can_concat(self, other):
21 | if self.cond.shape != other.cond.shape:
22 | return False
23 | return True
24 |
25 | def concat(self, others):
26 | conds = [self.cond]
27 | for x in others:
28 | conds.append(x.cond)
29 | return torch.cat(conds)
30 |
31 | class CONDNoiseShape(CONDRegular):
32 | def process_cond(self, batch_size, device, area, **kwargs):
33 | data = self.cond[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]]
34 | return self._copy_with(comfy.utils.repeat_to_batch_size(data, batch_size).to(device))
35 |
36 |
37 | class CONDCrossAttn(CONDRegular):
38 | def can_concat(self, other):
39 | s1 = self.cond.shape
40 | s2 = other.cond.shape
41 | if s1 != s2:
42 | if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen
43 | return False
44 |
45 | mult_min = lcm(s1[1], s2[1])
46 | diff = mult_min // min(s1[1], s2[1])
47 | if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much
48 | return False
49 | return True
50 |
51 | def concat(self, others):
52 | conds = [self.cond]
53 | crossattn_max_len = self.cond.shape[1]
54 | for x in others:
55 | c = x.cond
56 | crossattn_max_len = lcm(crossattn_max_len, c.shape[1])
57 | conds.append(c)
58 |
59 | out = []
60 | for c in conds:
61 | if c.shape[1] < crossattn_max_len:
62 | c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result
63 | out.append(c)
64 | return torch.cat(out)
65 |
66 | class CONDConstant(CONDRegular):
67 | def __init__(self, cond):
68 | self.cond = cond
69 |
70 | def process_cond(self, batch_size, device, **kwargs):
71 | return self._copy_with(self.cond)
72 |
73 | def can_concat(self, other):
74 | if self.cond != other.cond:
75 | return False
76 | return True
77 |
78 | def concat(self, others):
79 | return self.cond
80 |
--------------------------------------------------------------------------------
/comfy/supported_models_base.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from . import model_base
3 | from . import utils
4 | from . import latent_formats
5 |
6 | class ClipTarget:
7 | def __init__(self, tokenizer, clip):
8 | self.clip = clip
9 | self.tokenizer = tokenizer
10 | self.params = {}
11 |
12 | class BASE:
13 | unet_config = {}
14 | unet_extra_config = {
15 | "num_heads": -1,
16 | "num_head_channels": 64,
17 | }
18 |
19 | clip_prefix = []
20 | clip_vision_prefix = None
21 | noise_aug_config = None
22 | sampling_settings = {}
23 | latent_format = latent_formats.LatentFormat
24 |
25 | manual_cast_dtype = None
26 |
27 | @classmethod
28 | def matches(s, unet_config):
29 | for k in s.unet_config:
30 | if s.unet_config[k] != unet_config[k]:
31 | return False
32 | return True
33 |
34 | def model_type(self, state_dict, prefix=""):
35 | return model_base.ModelType.EPS
36 |
37 | def inpaint_model(self):
38 | return self.unet_config["in_channels"] > 4
39 |
40 | def __init__(self, unet_config):
41 | self.unet_config = unet_config
42 | self.latent_format = self.latent_format()
43 | for x in self.unet_extra_config:
44 | self.unet_config[x] = self.unet_extra_config[x]
45 |
46 | def get_model(self, state_dict, prefix="", device=None):
47 | if self.noise_aug_config is not None:
48 | out = model_base.SD21UNCLIP(self, self.noise_aug_config, model_type=self.model_type(state_dict, prefix), device=device)
49 | else:
50 | out = model_base.BaseModel(self, model_type=self.model_type(state_dict, prefix), device=device)
51 | if self.inpaint_model():
52 | out.set_inpaint()
53 | return out
54 |
55 | def process_clip_state_dict(self, state_dict):
56 | return state_dict
57 |
58 | def process_unet_state_dict(self, state_dict):
59 | return state_dict
60 |
61 | def process_vae_state_dict(self, state_dict):
62 | return state_dict
63 |
64 | def process_clip_state_dict_for_saving(self, state_dict):
65 | replace_prefix = {"": "cond_stage_model."}
66 | return utils.state_dict_prefix_replace(state_dict, replace_prefix)
67 |
68 | def process_unet_state_dict_for_saving(self, state_dict):
69 | replace_prefix = {"": "model.diffusion_model."}
70 | return utils.state_dict_prefix_replace(state_dict, replace_prefix)
71 |
72 | def process_vae_state_dict_for_saving(self, state_dict):
73 | replace_prefix = {"": "first_stage_model."}
74 | return utils.state_dict_prefix_replace(state_dict, replace_prefix)
75 |
76 | def set_manual_cast(self, manual_cast_dtype):
77 | self.manual_cast_dtype = manual_cast_dtype
78 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_clip_sdxl.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from nodes import MAX_RESOLUTION
3 |
4 | class CLIPTextEncodeSDXLRefiner:
5 | @classmethod
6 | def INPUT_TYPES(s):
7 | return {"required": {
8 | "ascore": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 1000.0, "step": 0.01}),
9 | "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
10 | "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
11 | "text": ("STRING", {"multiline": True}), "clip": ("CLIP", ),
12 | }}
13 | RETURN_TYPES = ("CONDITIONING",)
14 | FUNCTION = "encode"
15 |
16 | CATEGORY = "advanced/conditioning"
17 |
18 | def encode(self, clip, ascore, width, height, text):
19 | tokens = clip.tokenize(text)
20 | cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
21 | return ([[cond, {"pooled_output": pooled, "aesthetic_score": ascore, "width": width,"height": height}]], )
22 |
23 | class CLIPTextEncodeSDXL:
24 | @classmethod
25 | def INPUT_TYPES(s):
26 | return {"required": {
27 | "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
28 | "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
29 | "crop_w": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
30 | "crop_h": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
31 | "target_width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
32 | "target_height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
33 | "text_g": ("STRING", {"multiline": True, "default": "CLIP_G"}), "clip": ("CLIP", ),
34 | "text_l": ("STRING", {"multiline": True, "default": "CLIP_L"}), "clip": ("CLIP", ),
35 | }}
36 | RETURN_TYPES = ("CONDITIONING",)
37 | FUNCTION = "encode"
38 |
39 | CATEGORY = "advanced/conditioning"
40 |
41 | def encode(self, clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l):
42 | tokens = clip.tokenize(text_g)
43 | tokens["l"] = clip.tokenize(text_l)["l"]
44 | if len(tokens["l"]) != len(tokens["g"]):
45 | empty = clip.tokenize("")
46 | while len(tokens["l"]) < len(tokens["g"]):
47 | tokens["l"] += empty["l"]
48 | while len(tokens["l"]) > len(tokens["g"]):
49 | tokens["g"] += empty["g"]
50 | cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
51 | return ([[cond, {"pooled_output": pooled, "width": width, "height": height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height}]], )
52 |
53 | NODE_CLASS_MAPPINGS = {
54 | "CLIPTextEncodeSDXLRefiner": CLIPTextEncodeSDXLRefiner,
55 | "CLIPTextEncodeSDXL": CLIPTextEncodeSDXL,
56 | }
57 |
--------------------------------------------------------------------------------
/.github/workflows/windows_release_cu118_package.yml:
--------------------------------------------------------------------------------
1 | name: "Windows Release cu118 packaging"
2 |
3 | on:
4 | workflow_dispatch:
5 | # push:
6 | # branches:
7 | # - master
8 |
9 | jobs:
10 | package_comfyui:
11 | permissions:
12 | contents: "write"
13 | packages: "write"
14 | pull-requests: "read"
15 | runs-on: windows-latest
16 | steps:
17 | - uses: actions/cache/restore@v3
18 | id: cache
19 | with:
20 | path: cu118_python_deps.tar
21 | key: ${{ runner.os }}-build-cu118
22 | - shell: bash
23 | run: |
24 | mv cu118_python_deps.tar ../
25 | cd ..
26 | tar xf cu118_python_deps.tar
27 | pwd
28 | ls
29 |
30 | - uses: actions/checkout@v3
31 | with:
32 | fetch-depth: 0
33 | persist-credentials: false
34 | - shell: bash
35 | run: |
36 | cd ..
37 | cp -r ComfyUI ComfyUI_copy
38 | curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip
39 | unzip python_embeded.zip -d python_embeded
40 | cd python_embeded
41 | echo 'import site' >> ./python310._pth
42 | curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
43 | ./python.exe get-pip.py
44 | ./python.exe -s -m pip install ../cu118_python_deps/*
45 | sed -i '1i../ComfyUI' ./python310._pth
46 | cd ..
47 |
48 | git clone https://github.com/comfyanonymous/taesd
49 | cp taesd/*.pth ./ComfyUI_copy/models/vae_approx/
50 |
51 | mkdir ComfyUI_windows_portable
52 | mv python_embeded ComfyUI_windows_portable
53 | mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI
54 |
55 | cd ComfyUI_windows_portable
56 |
57 | mkdir update
58 | cp -r ComfyUI/.ci/update_windows/* ./update/
59 | cp -r ComfyUI/.ci/update_windows_cu118/* ./update/
60 | cp -r ComfyUI/.ci/windows_base_files/* ./
61 |
62 | cd ..
63 |
64 | "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable
65 | mv ComfyUI_windows_portable.7z ComfyUI/new_ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z
66 |
67 | cd ComfyUI_windows_portable
68 | python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu
69 |
70 | ls
71 |
72 | - name: Upload binaries to release
73 | uses: svenstaro/upload-release-action@v2
74 | with:
75 | repo_token: ${{ secrets.GITHUB_TOKEN }}
76 | file: new_ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z
77 | tag: "latest"
78 | overwrite: true
79 |
80 |
--------------------------------------------------------------------------------
/.github/workflows/windows_release_cu118_dependencies.yml:
--------------------------------------------------------------------------------
1 | name: "Windows Release cu118 dependencies"
2 |
3 | on:
4 | workflow_dispatch:
5 | # push:
6 | # branches:
7 | # - master
8 |
9 | jobs:
10 | build_dependencies:
11 | env:
12 | # you need at least cuda 5.0 for some of the stuff compiled here.
13 | TORCH_CUDA_ARCH_LIST: "5.0+PTX 6.0 6.1 7.0 7.5 8.0 8.6 8.9"
14 | FORCE_CUDA: 1
15 | MAX_JOBS: 1 # will crash otherwise
16 | DISTUTILS_USE_SDK: 1 # otherwise distutils will complain on windows about multiple versions of msvc
17 | XFORMERS_BUILD_TYPE: "Release"
18 | runs-on: windows-latest
19 | steps:
20 | - name: Cache Built Dependencies
21 | uses: actions/cache@v3
22 | id: cache-cu118_python_stuff
23 | with:
24 | path: cu118_python_deps.tar
25 | key: ${{ runner.os }}-build-cu118
26 |
27 | - if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true'
28 | uses: actions/checkout@v3
29 |
30 | - if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true'
31 | uses: actions/setup-python@v4
32 | with:
33 | python-version: '3.10.9'
34 |
35 | - if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true'
36 | uses: comfyanonymous/cuda-toolkit@test
37 | id: cuda-toolkit
38 | with:
39 | cuda: '11.8.0'
40 | # copied from xformers github
41 | - name: Setup MSVC
42 | uses: ilammy/msvc-dev-cmd@v1
43 | - name: Configure Pagefile
44 | # windows runners will OOM with many CUDA architectures
45 | # we cheat here with a page file
46 | uses: al-cheb/configure-pagefile-action@v1.3
47 | with:
48 | minimum-size: 2GB
49 | # really unfortunate: https://github.com/ilammy/msvc-dev-cmd#name-conflicts-with-shell-bash
50 | - name: Remove link.exe
51 | shell: bash
52 | run: rm /usr/bin/link
53 |
54 | - if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true'
55 | shell: bash
56 | run: |
57 | python -m pip wheel --no-cache-dir torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir
58 | python -m pip install --no-cache-dir ./temp_wheel_dir/*
59 | echo installed basic
60 | git clone --recurse-submodules https://github.com/facebookresearch/xformers.git
61 | cd xformers
62 | python -m pip install --no-cache-dir wheel setuptools twine
63 | echo building xformers
64 | python setup.py bdist_wheel -d ../temp_wheel_dir/
65 | cd ..
66 | rm -rf xformers
67 | ls -lah temp_wheel_dir
68 | mv temp_wheel_dir cu118_python_deps
69 | tar cf cu118_python_deps.tar cu118_python_deps
70 |
71 |
72 |
--------------------------------------------------------------------------------
/web/types/comfy.d.ts:
--------------------------------------------------------------------------------
1 | import { LGraphNode, IWidget } from "./litegraph";
2 | import { ComfyApp } from "../../scripts/app";
3 |
4 | export interface ComfyExtension {
5 | /**
6 | * The name of the extension
7 | */
8 | name: string;
9 | /**
10 | * Allows any initialisation, e.g. loading resources. Called after the canvas is created but before nodes are added
11 | * @param app The ComfyUI app instance
12 | */
13 | init(app: ComfyApp): Promise;
14 | /**
15 | * Allows any additonal setup, called after the application is fully set up and running
16 | * @param app The ComfyUI app instance
17 | */
18 | setup(app: ComfyApp): Promise;
19 | /**
20 | * Called before nodes are registered with the graph
21 | * @param defs The collection of node definitions, add custom ones or edit existing ones
22 | * @param app The ComfyUI app instance
23 | */
24 | addCustomNodeDefs(defs: Record, app: ComfyApp): Promise;
25 | /**
26 | * Allows the extension to add custom widgets
27 | * @param app The ComfyUI app instance
28 | * @returns An array of {[widget name]: widget data}
29 | */
30 | getCustomWidgets(
31 | app: ComfyApp
32 | ): Promise<
33 | Record { widget?: IWidget; minWidth?: number; minHeight?: number }>
34 | >;
35 | /**
36 | * Allows the extension to add additional handling to the node before it is registered with LGraph
37 | * @param nodeType The node class (not an instance)
38 | * @param nodeData The original node object info config object
39 | * @param app The ComfyUI app instance
40 | */
41 | beforeRegisterNodeDef(nodeType: typeof LGraphNode, nodeData: ComfyObjectInfo, app: ComfyApp): Promise;
42 | /**
43 | * Allows the extension to register additional nodes with LGraph after standard nodes are added
44 | * @param app The ComfyUI app instance
45 | */
46 | registerCustomNodes(app: ComfyApp): Promise;
47 | /**
48 | * Allows the extension to modify a node that has been reloaded onto the graph.
49 | * If you break something in the backend and want to patch workflows in the frontend
50 | * This is the place to do this
51 | * @param node The node that has been loaded
52 | * @param app The ComfyUI app instance
53 | */
54 | loadedGraphNode(node: LGraphNode, app: ComfyApp);
55 | /**
56 | * Allows the extension to run code after the constructor of the node
57 | * @param node The node that has been created
58 | * @param app The ComfyUI app instance
59 | */
60 | nodeCreated(node: LGraphNode, app: ComfyApp);
61 | }
62 |
63 | export type ComfyObjectInfo = {
64 | name: string;
65 | display_name?: string;
66 | description?: string;
67 | category: string;
68 | input?: {
69 | required?: Record;
70 | optional?: Record;
71 | };
72 | output?: string[];
73 | output_name: string[];
74 | };
75 |
76 | export type ComfyObjectInfoConfig = [string | any[]] | [string | any[], any];
77 |
--------------------------------------------------------------------------------
/.ci/update_windows/update.py:
--------------------------------------------------------------------------------
1 | import pygit2
2 | from datetime import datetime
3 | import sys
4 |
5 | def pull(repo, remote_name='origin', branch='master'):
6 | for remote in repo.remotes:
7 | if remote.name == remote_name:
8 | remote.fetch()
9 | remote_master_id = repo.lookup_reference('refs/remotes/origin/%s' % (branch)).target
10 | merge_result, _ = repo.merge_analysis(remote_master_id)
11 | # Up to date, do nothing
12 | if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE:
13 | return
14 | # We can just fastforward
15 | elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD:
16 | repo.checkout_tree(repo.get(remote_master_id))
17 | try:
18 | master_ref = repo.lookup_reference('refs/heads/%s' % (branch))
19 | master_ref.set_target(remote_master_id)
20 | except KeyError:
21 | repo.create_branch(branch, repo.get(remote_master_id))
22 | repo.head.set_target(remote_master_id)
23 | elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL:
24 | repo.merge(remote_master_id)
25 |
26 | if repo.index.conflicts is not None:
27 | for conflict in repo.index.conflicts:
28 | print('Conflicts found in:', conflict[0].path)
29 | raise AssertionError('Conflicts, ahhhhh!!')
30 |
31 | user = repo.default_signature
32 | tree = repo.index.write_tree()
33 | commit = repo.create_commit('HEAD',
34 | user,
35 | user,
36 | 'Merge!',
37 | tree,
38 | [repo.head.target, remote_master_id])
39 | # We need to do this or git CLI will think we are still merging.
40 | repo.state_cleanup()
41 | else:
42 | raise AssertionError('Unknown merge analysis result')
43 |
44 | pygit2.option(pygit2.GIT_OPT_SET_OWNER_VALIDATION, 0)
45 | repo = pygit2.Repository(str(sys.argv[1]))
46 | ident = pygit2.Signature('comfyui', 'comfy@ui')
47 | try:
48 | print("stashing current changes")
49 | repo.stash(ident)
50 | except KeyError:
51 | print("nothing to stash")
52 | backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S'))
53 | print("creating backup branch: {}".format(backup_branch_name))
54 | repo.branches.local.create(backup_branch_name, repo.head.peel())
55 |
56 | print("checking out master branch")
57 | branch = repo.lookup_branch('master')
58 | ref = repo.lookup_reference(branch.name)
59 | repo.checkout(ref)
60 |
61 | print("pulling latest changes")
62 | pull(repo)
63 |
64 | print("Done!")
65 |
66 |
--------------------------------------------------------------------------------
/comfy/taesd/taesd.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Tiny AutoEncoder for Stable Diffusion
4 | (DNN for encoding / decoding SD's latent space)
5 | """
6 | import torch
7 | import torch.nn as nn
8 |
9 | import comfy.utils
10 |
11 | def conv(n_in, n_out, **kwargs):
12 | return nn.Conv2d(n_in, n_out, 3, padding=1, **kwargs)
13 |
14 | class Clamp(nn.Module):
15 | def forward(self, x):
16 | return torch.tanh(x / 3) * 3
17 |
18 | class Block(nn.Module):
19 | def __init__(self, n_in, n_out):
20 | super().__init__()
21 | self.conv = nn.Sequential(conv(n_in, n_out), nn.ReLU(), conv(n_out, n_out), nn.ReLU(), conv(n_out, n_out))
22 | self.skip = nn.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity()
23 | self.fuse = nn.ReLU()
24 | def forward(self, x):
25 | return self.fuse(self.conv(x) + self.skip(x))
26 |
27 | def Encoder():
28 | return nn.Sequential(
29 | conv(3, 64), Block(64, 64),
30 | conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
31 | conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
32 | conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
33 | conv(64, 4),
34 | )
35 |
36 | def Decoder():
37 | return nn.Sequential(
38 | Clamp(), conv(4, 64), nn.ReLU(),
39 | Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),
40 | Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),
41 | Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),
42 | Block(64, 64), conv(64, 3),
43 | )
44 |
45 | class TAESD(nn.Module):
46 | latent_magnitude = 3
47 | latent_shift = 0.5
48 |
49 | def __init__(self, encoder_path=None, decoder_path=None):
50 | """Initialize pretrained TAESD on the given device from the given checkpoints."""
51 | super().__init__()
52 | self.taesd_encoder = Encoder()
53 | self.taesd_decoder = Decoder()
54 | self.vae_scale = torch.nn.Parameter(torch.tensor(1.0))
55 | if encoder_path is not None:
56 | self.taesd_encoder.load_state_dict(comfy.utils.load_torch_file(encoder_path, safe_load=True))
57 | if decoder_path is not None:
58 | self.taesd_decoder.load_state_dict(comfy.utils.load_torch_file(decoder_path, safe_load=True))
59 |
60 | @staticmethod
61 | def scale_latents(x):
62 | """raw latents -> [0, 1]"""
63 | return x.div(2 * TAESD.latent_magnitude).add(TAESD.latent_shift).clamp(0, 1)
64 |
65 | @staticmethod
66 | def unscale_latents(x):
67 | """[0, 1] -> raw latents"""
68 | return x.sub(TAESD.latent_shift).mul(2 * TAESD.latent_magnitude)
69 |
70 | def decode(self, x):
71 | x_sample = self.taesd_decoder(x * self.vae_scale)
72 | x_sample = x_sample.sub(0.5).mul(2)
73 | return x_sample
74 |
75 | def encode(self, x):
76 | return self.taesd_encoder(x * 0.5 + 0.5) / self.vae_scale
77 |
--------------------------------------------------------------------------------
/comfy/sdxl_clip.py:
--------------------------------------------------------------------------------
1 | from comfy import sd1_clip
2 | import torch
3 | import os
4 |
5 | class SDXLClipG(sd1_clip.SDClipModel):
6 | def __init__(self, device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, dtype=None):
7 | if layer == "penultimate":
8 | layer="hidden"
9 | layer_idx=-2
10 |
11 | textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_config_bigg.json")
12 | super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype,
13 | special_tokens={"start": 49406, "end": 49407, "pad": 0}, layer_norm_hidden_state=False)
14 |
15 | def load_sd(self, sd):
16 | return super().load_sd(sd)
17 |
18 | class SDXLClipGTokenizer(sd1_clip.SDTokenizer):
19 | def __init__(self, tokenizer_path=None, embedding_directory=None):
20 | super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1280, embedding_key='clip_g')
21 |
22 |
23 | class SDXLTokenizer:
24 | def __init__(self, embedding_directory=None):
25 | self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory)
26 | self.clip_g = SDXLClipGTokenizer(embedding_directory=embedding_directory)
27 |
28 | def tokenize_with_weights(self, text:str, return_word_ids=False):
29 | out = {}
30 | out["g"] = self.clip_g.tokenize_with_weights(text, return_word_ids)
31 | out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids)
32 | return out
33 |
34 | def untokenize(self, token_weight_pair):
35 | return self.clip_g.untokenize(token_weight_pair)
36 |
37 | class SDXLClipModel(torch.nn.Module):
38 | def __init__(self, device="cpu", dtype=None):
39 | super().__init__()
40 | self.clip_l = sd1_clip.SDClipModel(layer="hidden", layer_idx=-2, device=device, dtype=dtype, layer_norm_hidden_state=False)
41 | self.clip_g = SDXLClipG(device=device, dtype=dtype)
42 |
43 | def clip_layer(self, layer_idx):
44 | self.clip_l.clip_layer(layer_idx)
45 | self.clip_g.clip_layer(layer_idx)
46 |
47 | def reset_clip_layer(self):
48 | self.clip_g.reset_clip_layer()
49 | self.clip_l.reset_clip_layer()
50 |
51 | def encode_token_weights(self, token_weight_pairs):
52 | token_weight_pairs_g = token_weight_pairs["g"]
53 | token_weight_pairs_l = token_weight_pairs["l"]
54 | g_out, g_pooled = self.clip_g.encode_token_weights(token_weight_pairs_g)
55 | l_out, l_pooled = self.clip_l.encode_token_weights(token_weight_pairs_l)
56 | return torch.cat([l_out, g_out], dim=-1), g_pooled
57 |
58 | def load_sd(self, sd):
59 | if "text_model.encoder.layers.30.mlp.fc1.weight" in sd:
60 | return self.clip_g.load_sd(sd)
61 | else:
62 | return self.clip_l.load_sd(sd)
63 |
64 | class SDXLRefinerClipModel(sd1_clip.SD1ClipModel):
65 | def __init__(self, device="cpu", dtype=None):
66 | super().__init__(device=device, dtype=dtype, clip_name="g", clip_model=SDXLClipG)
67 |
--------------------------------------------------------------------------------
/web/extensions/core/snapToGrid.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 |
3 | // Shift + drag/resize to snap to grid
4 |
5 | app.registerExtension({
6 | name: "Comfy.SnapToGrid",
7 | init() {
8 | // Add setting to control grid size
9 | app.ui.settings.addSetting({
10 | id: "Comfy.SnapToGrid.GridSize",
11 | name: "Grid Size",
12 | type: "slider",
13 | attrs: {
14 | min: 1,
15 | max: 500,
16 | },
17 | tooltip:
18 | "When dragging and resizing nodes while holding shift they will be aligned to the grid, this controls the size of that grid.",
19 | defaultValue: LiteGraph.CANVAS_GRID_SIZE,
20 | onChange(value) {
21 | LiteGraph.CANVAS_GRID_SIZE = +value;
22 | },
23 | });
24 |
25 | // After moving a node, if the shift key is down align it to grid
26 | const onNodeMoved = app.canvas.onNodeMoved;
27 | app.canvas.onNodeMoved = function (node) {
28 | const r = onNodeMoved?.apply(this, arguments);
29 |
30 | if (app.shiftDown) {
31 | // Ensure all selected nodes are realigned
32 | for (const id in this.selected_nodes) {
33 | this.selected_nodes[id].alignToGrid();
34 | }
35 | }
36 |
37 | return r;
38 | };
39 |
40 | // When a node is added, add a resize handler to it so we can fix align the size with the grid
41 | const onNodeAdded = app.graph.onNodeAdded;
42 | app.graph.onNodeAdded = function (node) {
43 | const onResize = node.onResize;
44 | node.onResize = function () {
45 | if (app.shiftDown) {
46 | const w = LiteGraph.CANVAS_GRID_SIZE * Math.round(node.size[0] / LiteGraph.CANVAS_GRID_SIZE);
47 | const h = LiteGraph.CANVAS_GRID_SIZE * Math.round(node.size[1] / LiteGraph.CANVAS_GRID_SIZE);
48 | node.size[0] = w;
49 | node.size[1] = h;
50 | }
51 | return onResize?.apply(this, arguments);
52 | };
53 | return onNodeAdded?.apply(this, arguments);
54 | };
55 |
56 | // Draw a preview of where the node will go if holding shift and the node is selected
57 | const origDrawNode = LGraphCanvas.prototype.drawNode;
58 | LGraphCanvas.prototype.drawNode = function (node, ctx) {
59 | if (app.shiftDown && this.node_dragged && node.id in this.selected_nodes) {
60 | const x = LiteGraph.CANVAS_GRID_SIZE * Math.round(node.pos[0] / LiteGraph.CANVAS_GRID_SIZE);
61 | const y = LiteGraph.CANVAS_GRID_SIZE * Math.round(node.pos[1] / LiteGraph.CANVAS_GRID_SIZE);
62 |
63 | const shiftX = x - node.pos[0];
64 | let shiftY = y - node.pos[1];
65 |
66 | let w, h;
67 | if (node.flags.collapsed) {
68 | w = node._collapsed_width;
69 | h = LiteGraph.NODE_TITLE_HEIGHT;
70 | shiftY -= LiteGraph.NODE_TITLE_HEIGHT;
71 | } else {
72 | w = node.size[0];
73 | h = node.size[1];
74 | let titleMode = node.constructor.title_mode;
75 | if (titleMode !== LiteGraph.TRANSPARENT_TITLE && titleMode !== LiteGraph.NO_TITLE) {
76 | h += LiteGraph.NODE_TITLE_HEIGHT;
77 | shiftY -= LiteGraph.NODE_TITLE_HEIGHT;
78 | }
79 | }
80 | const f = ctx.fillStyle;
81 | ctx.fillStyle = "rgba(100, 100, 100, 0.5)";
82 | ctx.fillRect(shiftX, shiftY, w, h);
83 | ctx.fillStyle = f;
84 | }
85 |
86 | return origDrawNode.apply(this, arguments);
87 | };
88 | },
89 | });
90 |
--------------------------------------------------------------------------------
/script_examples/basic_api_example.py:
--------------------------------------------------------------------------------
1 | import json
2 | from urllib import request, parse
3 | import random
4 |
5 | #This is the ComfyUI api prompt format.
6 |
7 | #If you want it for a specific workflow you can "enable dev mode options"
8 | #in the settings of the UI (gear beside the "Queue Size: ") this will enable
9 | #a button on the UI to save workflows in api format.
10 |
11 | #keep in mind ComfyUI is pre alpha software so this format will change a bit.
12 |
13 | #this is the one for the default workflow
14 | prompt_text = """
15 | {
16 | "3": {
17 | "class_type": "KSampler",
18 | "inputs": {
19 | "cfg": 8,
20 | "denoise": 1,
21 | "latent_image": [
22 | "5",
23 | 0
24 | ],
25 | "model": [
26 | "4",
27 | 0
28 | ],
29 | "negative": [
30 | "7",
31 | 0
32 | ],
33 | "positive": [
34 | "6",
35 | 0
36 | ],
37 | "sampler_name": "euler",
38 | "scheduler": "normal",
39 | "seed": 8566257,
40 | "steps": 20
41 | }
42 | },
43 | "4": {
44 | "class_type": "CheckpointLoaderSimple",
45 | "inputs": {
46 | "ckpt_name": "v1-5-pruned-emaonly.ckpt"
47 | }
48 | },
49 | "5": {
50 | "class_type": "EmptyLatentImage",
51 | "inputs": {
52 | "batch_size": 1,
53 | "height": 512,
54 | "width": 512
55 | }
56 | },
57 | "6": {
58 | "class_type": "CLIPTextEncode",
59 | "inputs": {
60 | "clip": [
61 | "4",
62 | 1
63 | ],
64 | "text": "masterpiece best quality girl"
65 | }
66 | },
67 | "7": {
68 | "class_type": "CLIPTextEncode",
69 | "inputs": {
70 | "clip": [
71 | "4",
72 | 1
73 | ],
74 | "text": "bad hands"
75 | }
76 | },
77 | "8": {
78 | "class_type": "VAEDecode",
79 | "inputs": {
80 | "samples": [
81 | "3",
82 | 0
83 | ],
84 | "vae": [
85 | "4",
86 | 2
87 | ]
88 | }
89 | },
90 | "9": {
91 | "class_type": "SaveImage",
92 | "inputs": {
93 | "filename_prefix": "ComfyUI",
94 | "images": [
95 | "8",
96 | 0
97 | ]
98 | }
99 | }
100 | }
101 | """
102 |
103 | def queue_prompt(prompt):
104 | p = {"prompt": prompt}
105 | data = json.dumps(p).encode('utf-8')
106 | req = request.Request("http://127.0.0.1:8188/prompt", data=data)
107 | request.urlopen(req)
108 |
109 |
110 | prompt = json.loads(prompt_text)
111 | #set the text prompt for our positive CLIPTextEncode
112 | prompt["6"]["inputs"]["text"] = "masterpiece best quality man"
113 |
114 | #set the seed for our KSampler node
115 | prompt["3"]["inputs"]["seed"] = 5
116 |
117 |
118 | queue_prompt(prompt)
119 |
120 |
121 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_hypertile.py:
--------------------------------------------------------------------------------
1 | #Taken from: https://github.com/tfernd/HyperTile/
2 |
3 | import math
4 | from einops import rearrange
5 | # Use torch rng for consistency across generations
6 | from torch import randint
7 |
8 | def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int:
9 | min_value = min(min_value, value)
10 |
11 | # All big divisors of value (inclusive)
12 | divisors = [i for i in range(min_value, value + 1) if value % i == 0]
13 |
14 | ns = [value // i for i in divisors[:max_options]] # has at least 1 element
15 |
16 | if len(ns) - 1 > 0:
17 | idx = randint(low=0, high=len(ns) - 1, size=(1,)).item()
18 | else:
19 | idx = 0
20 |
21 | return ns[idx]
22 |
23 | class HyperTile:
24 | @classmethod
25 | def INPUT_TYPES(s):
26 | return {"required": { "model": ("MODEL",),
27 | "tile_size": ("INT", {"default": 256, "min": 1, "max": 2048}),
28 | "swap_size": ("INT", {"default": 2, "min": 1, "max": 128}),
29 | "max_depth": ("INT", {"default": 0, "min": 0, "max": 10}),
30 | "scale_depth": ("BOOLEAN", {"default": False}),
31 | }}
32 | RETURN_TYPES = ("MODEL",)
33 | FUNCTION = "patch"
34 |
35 | CATEGORY = "_for_testing"
36 |
37 | def patch(self, model, tile_size, swap_size, max_depth, scale_depth):
38 | model_channels = model.model.model_config.unet_config["model_channels"]
39 |
40 | apply_to = set()
41 | temp = model_channels
42 | for x in range(max_depth + 1):
43 | apply_to.add(temp)
44 | temp *= 2
45 |
46 | latent_tile_size = max(32, tile_size) // 8
47 | self.temp = None
48 |
49 | def hypertile_in(q, k, v, extra_options):
50 | if q.shape[-1] in apply_to:
51 | shape = extra_options["original_shape"]
52 | aspect_ratio = shape[-1] / shape[-2]
53 |
54 | hw = q.size(1)
55 | h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))
56 |
57 | factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1
58 | nh = random_divisor(h, latent_tile_size * factor, swap_size)
59 | nw = random_divisor(w, latent_tile_size * factor, swap_size)
60 |
61 | if nh * nw > 1:
62 | q = rearrange(q, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw)
63 | self.temp = (nh, nw, h, w)
64 | return q, k, v
65 |
66 | return q, k, v
67 | def hypertile_out(out, extra_options):
68 | if self.temp is not None:
69 | nh, nw, h, w = self.temp
70 | self.temp = None
71 | out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw)
72 | out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw)
73 | return out
74 |
75 |
76 | m = model.clone()
77 | m.set_model_attn1_patch(hypertile_in)
78 | m.set_model_attn1_output_patch(hypertile_out)
79 | return (m, )
80 |
81 | NODE_CLASS_MAPPINGS = {
82 | "HyperTile": HyperTile,
83 | }
84 |
--------------------------------------------------------------------------------
/comfy/ldm/modules/distributions/distributions.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 |
5 | class AbstractDistribution:
6 | def sample(self):
7 | raise NotImplementedError()
8 |
9 | def mode(self):
10 | raise NotImplementedError()
11 |
12 |
13 | class DiracDistribution(AbstractDistribution):
14 | def __init__(self, value):
15 | self.value = value
16 |
17 | def sample(self):
18 | return self.value
19 |
20 | def mode(self):
21 | return self.value
22 |
23 |
24 | class DiagonalGaussianDistribution(object):
25 | def __init__(self, parameters, deterministic=False):
26 | self.parameters = parameters
27 | self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
28 | self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
29 | self.deterministic = deterministic
30 | self.std = torch.exp(0.5 * self.logvar)
31 | self.var = torch.exp(self.logvar)
32 | if self.deterministic:
33 | self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
34 |
35 | def sample(self):
36 | x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
37 | return x
38 |
39 | def kl(self, other=None):
40 | if self.deterministic:
41 | return torch.Tensor([0.])
42 | else:
43 | if other is None:
44 | return 0.5 * torch.sum(torch.pow(self.mean, 2)
45 | + self.var - 1.0 - self.logvar,
46 | dim=[1, 2, 3])
47 | else:
48 | return 0.5 * torch.sum(
49 | torch.pow(self.mean - other.mean, 2) / other.var
50 | + self.var / other.var - 1.0 - self.logvar + other.logvar,
51 | dim=[1, 2, 3])
52 |
53 | def nll(self, sample, dims=[1,2,3]):
54 | if self.deterministic:
55 | return torch.Tensor([0.])
56 | logtwopi = np.log(2.0 * np.pi)
57 | return 0.5 * torch.sum(
58 | logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
59 | dim=dims)
60 |
61 | def mode(self):
62 | return self.mean
63 |
64 |
65 | def normal_kl(mean1, logvar1, mean2, logvar2):
66 | """
67 | source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
68 | Compute the KL divergence between two gaussians.
69 | Shapes are automatically broadcasted, so batches can be compared to
70 | scalars, among other use cases.
71 | """
72 | tensor = None
73 | for obj in (mean1, logvar1, mean2, logvar2):
74 | if isinstance(obj, torch.Tensor):
75 | tensor = obj
76 | break
77 | assert tensor is not None, "at least one argument must be a Tensor"
78 |
79 | # Force variances to be Tensors. Broadcasting helps convert scalars to
80 | # Tensors, but it does not work for torch.exp().
81 | logvar1, logvar2 = [
82 | x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
83 | for x in (logvar1, logvar2)
84 | ]
85 |
86 | return 0.5 * (
87 | -1.0
88 | + logvar2
89 | - logvar1
90 | + torch.exp(logvar1 - logvar2)
91 | + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
92 | )
93 |
--------------------------------------------------------------------------------
/comfy/ldm/modules/ema.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 |
5 | class LitEma(nn.Module):
6 | def __init__(self, model, decay=0.9999, use_num_upates=True):
7 | super().__init__()
8 | if decay < 0.0 or decay > 1.0:
9 | raise ValueError('Decay must be between 0 and 1')
10 |
11 | self.m_name2s_name = {}
12 | self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
13 | self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates
14 | else torch.tensor(-1, dtype=torch.int))
15 |
16 | for name, p in model.named_parameters():
17 | if p.requires_grad:
18 | # remove as '.'-character is not allowed in buffers
19 | s_name = name.replace('.', '')
20 | self.m_name2s_name.update({name: s_name})
21 | self.register_buffer(s_name, p.clone().detach().data)
22 |
23 | self.collected_params = []
24 |
25 | def reset_num_updates(self):
26 | del self.num_updates
27 | self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))
28 |
29 | def forward(self, model):
30 | decay = self.decay
31 |
32 | if self.num_updates >= 0:
33 | self.num_updates += 1
34 | decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))
35 |
36 | one_minus_decay = 1.0 - decay
37 |
38 | with torch.no_grad():
39 | m_param = dict(model.named_parameters())
40 | shadow_params = dict(self.named_buffers())
41 |
42 | for key in m_param:
43 | if m_param[key].requires_grad:
44 | sname = self.m_name2s_name[key]
45 | shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
46 | shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
47 | else:
48 | assert not key in self.m_name2s_name
49 |
50 | def copy_to(self, model):
51 | m_param = dict(model.named_parameters())
52 | shadow_params = dict(self.named_buffers())
53 | for key in m_param:
54 | if m_param[key].requires_grad:
55 | m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
56 | else:
57 | assert not key in self.m_name2s_name
58 |
59 | def store(self, parameters):
60 | """
61 | Save the current parameters for restoring later.
62 | Args:
63 | parameters: Iterable of `torch.nn.Parameter`; the parameters to be
64 | temporarily stored.
65 | """
66 | self.collected_params = [param.clone() for param in parameters]
67 |
68 | def restore(self, parameters):
69 | """
70 | Restore the parameters stored with the `store` method.
71 | Useful to validate the model with EMA parameters without affecting the
72 | original optimization process. Store the parameters before the
73 | `copy_to` method. After validation (or model saving), use this to
74 | restore the former parameters.
75 | Args:
76 | parameters: Iterable of `torch.nn.Parameter`; the parameters to be
77 | updated with the stored parameters.
78 | """
79 | for c_param, param in zip(self.collected_params, parameters):
80 | param.data.copy_(c_param.data)
81 |
--------------------------------------------------------------------------------
/web/extensions/core/slotDefaults.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 | import { ComfyWidgets } from "../../scripts/widgets.js";
3 | // Adds defaults for quickly adding nodes with middle click on the input/output
4 |
5 | app.registerExtension({
6 | name: "Comfy.SlotDefaults",
7 | suggestionsNumber: null,
8 | init() {
9 | LiteGraph.search_filter_enabled = true;
10 | LiteGraph.middle_click_slot_add_default_node = true;
11 | this.suggestionsNumber = app.ui.settings.addSetting({
12 | id: "Comfy.NodeSuggestions.number",
13 | name: "Number of nodes suggestions",
14 | type: "slider",
15 | attrs: {
16 | min: 1,
17 | max: 100,
18 | step: 1,
19 | },
20 | defaultValue: 5,
21 | onChange: (newVal, oldVal) => {
22 | this.setDefaults(newVal);
23 | }
24 | });
25 | },
26 | slot_types_default_out: {},
27 | slot_types_default_in: {},
28 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
29 | var nodeId = nodeData.name;
30 | var inputs = [];
31 | inputs = nodeData["input"]["required"]; //only show required inputs to reduce the mess also not logical to create node with optional inputs
32 | for (const inputKey in inputs) {
33 | var input = (inputs[inputKey]);
34 | if (typeof input[0] !== "string") continue;
35 |
36 | var type = input[0]
37 | if (type in ComfyWidgets) {
38 | var customProperties = input[1]
39 | if (!(customProperties?.forceInput)) continue; //ignore widgets that don't force input
40 | }
41 |
42 | if (!(type in this.slot_types_default_out)) {
43 | this.slot_types_default_out[type] = ["Reroute"];
44 | }
45 | if (this.slot_types_default_out[type].includes(nodeId)) continue;
46 | this.slot_types_default_out[type].push(nodeId);
47 |
48 | // Input types have to be stored as lower case
49 | // Store each node that can handle this input type
50 | const lowerType = type.toLocaleLowerCase();
51 | if (!(lowerType in LiteGraph.registered_slot_in_types)) {
52 | LiteGraph.registered_slot_in_types[lowerType] = { nodes: [] };
53 | }
54 | LiteGraph.registered_slot_in_types[lowerType].nodes.push(nodeType.comfyClass);
55 | }
56 |
57 | var outputs = nodeData["output"];
58 | for (const key in outputs) {
59 | var type = outputs[key];
60 | if (!(type in this.slot_types_default_in)) {
61 | this.slot_types_default_in[type] = ["Reroute"];// ["Reroute", "Primitive"]; primitive doesn't always work :'()
62 | }
63 |
64 | this.slot_types_default_in[type].push(nodeId);
65 |
66 | // Store each node that can handle this output type
67 | if (!(type in LiteGraph.registered_slot_out_types)) {
68 | LiteGraph.registered_slot_out_types[type] = { nodes: [] };
69 | }
70 | LiteGraph.registered_slot_out_types[type].nodes.push(nodeType.comfyClass);
71 |
72 | if(!LiteGraph.slot_types_out.includes(type)) {
73 | LiteGraph.slot_types_out.push(type);
74 | }
75 | }
76 | var maxNum = this.suggestionsNumber.value;
77 | this.setDefaults(maxNum);
78 | },
79 | setDefaults(maxNum) {
80 |
81 | LiteGraph.slot_types_default_out = {};
82 | LiteGraph.slot_types_default_in = {};
83 |
84 | for (const type in this.slot_types_default_out) {
85 | LiteGraph.slot_types_default_out[type] = this.slot_types_default_out[type].slice(0, maxNum);
86 | }
87 | for (const type in this.slot_types_default_in) {
88 | LiteGraph.slot_types_default_in[type] = this.slot_types_default_in[type].slice(0, maxNum);
89 | }
90 | }
91 | });
92 |
--------------------------------------------------------------------------------
/tests-ui/utils/index.js:
--------------------------------------------------------------------------------
1 | const { mockApi } = require("./setup");
2 | const { Ez } = require("./ezgraph");
3 | const lg = require("./litegraph");
4 |
5 | /**
6 | *
7 | * @param { Parameters[0] & { resetEnv?: boolean, preSetup?(app): Promise } } config
8 | * @returns
9 | */
10 | export async function start(config = {}) {
11 | if(config.resetEnv) {
12 | jest.resetModules();
13 | jest.resetAllMocks();
14 | lg.setup(global);
15 | }
16 |
17 | mockApi(config);
18 | const { app } = require("../../web/scripts/app");
19 | config.preSetup?.(app);
20 | await app.setup();
21 | return { ...Ez.graph(app, global["LiteGraph"], global["LGraphCanvas"]), app };
22 | }
23 |
24 | /**
25 | * @param { ReturnType["graph"] } graph
26 | * @param { (hasReloaded: boolean) => (Promise | void) } cb
27 | */
28 | export async function checkBeforeAndAfterReload(graph, cb) {
29 | await cb(false);
30 | await graph.reload();
31 | await cb(true);
32 | }
33 |
34 | /**
35 | * @param { string } name
36 | * @param { Record } input
37 | * @param { (string | string[])[] | Record } output
38 | * @returns { Record }
39 | */
40 | export function makeNodeDef(name, input, output = {}) {
41 | const nodeDef = {
42 | name,
43 | category: "test",
44 | output: [],
45 | output_name: [],
46 | output_is_list: [],
47 | input: {
48 | required: {},
49 | },
50 | };
51 | for (const k in input) {
52 | nodeDef.input.required[k] = typeof input[k] === "string" ? [input[k], {}] : [...input[k]];
53 | }
54 | if (output instanceof Array) {
55 | output = output.reduce((p, c) => {
56 | p[c] = c;
57 | return p;
58 | }, {});
59 | }
60 | for (const k in output) {
61 | nodeDef.output.push(output[k]);
62 | nodeDef.output_name.push(k);
63 | nodeDef.output_is_list.push(false);
64 | }
65 |
66 | return { [name]: nodeDef };
67 | }
68 |
69 | /**
70 | /**
71 | * @template { any } T
72 | * @param { T } x
73 | * @returns { x is Exclude }
74 | */
75 | export function assertNotNullOrUndefined(x) {
76 | expect(x).not.toEqual(null);
77 | expect(x).not.toEqual(undefined);
78 | return true;
79 | }
80 |
81 | /**
82 | *
83 | * @param { ReturnType["ez"] } ez
84 | * @param { ReturnType["graph"] } graph
85 | */
86 | export function createDefaultWorkflow(ez, graph) {
87 | graph.clear();
88 | const ckpt = ez.CheckpointLoaderSimple();
89 |
90 | const pos = ez.CLIPTextEncode(ckpt.outputs.CLIP, { text: "positive" });
91 | const neg = ez.CLIPTextEncode(ckpt.outputs.CLIP, { text: "negative" });
92 |
93 | const empty = ez.EmptyLatentImage();
94 | const sampler = ez.KSampler(
95 | ckpt.outputs.MODEL,
96 | pos.outputs.CONDITIONING,
97 | neg.outputs.CONDITIONING,
98 | empty.outputs.LATENT
99 | );
100 |
101 | const decode = ez.VAEDecode(sampler.outputs.LATENT, ckpt.outputs.VAE);
102 | const save = ez.SaveImage(decode.outputs.IMAGE);
103 | graph.arrange();
104 |
105 | return { ckpt, pos, neg, empty, sampler, decode, save };
106 | }
107 |
108 | export async function getNodeDefs() {
109 | const { api } = require("../../web/scripts/api");
110 | return api.getNodeDefs();
111 | }
112 |
113 | export async function getNodeDef(nodeId) {
114 | return (await getNodeDefs())[nodeId];
115 | }
--------------------------------------------------------------------------------
/tests/inference/graphs/default_graph_sdxl1_0.json:
--------------------------------------------------------------------------------
1 | {
2 | "4": {
3 | "inputs": {
4 | "ckpt_name": "sd_xl_base_1.0.safetensors"
5 | },
6 | "class_type": "CheckpointLoaderSimple"
7 | },
8 | "5": {
9 | "inputs": {
10 | "width": 1024,
11 | "height": 1024,
12 | "batch_size": 1
13 | },
14 | "class_type": "EmptyLatentImage"
15 | },
16 | "6": {
17 | "inputs": {
18 | "text": "a photo of a cat",
19 | "clip": [
20 | "4",
21 | 1
22 | ]
23 | },
24 | "class_type": "CLIPTextEncode"
25 | },
26 | "10": {
27 | "inputs": {
28 | "add_noise": "enable",
29 | "noise_seed": 42,
30 | "steps": 20,
31 | "cfg": 7.5,
32 | "sampler_name": "euler",
33 | "scheduler": "normal",
34 | "start_at_step": 0,
35 | "end_at_step": 32,
36 | "return_with_leftover_noise": "enable",
37 | "model": [
38 | "4",
39 | 0
40 | ],
41 | "positive": [
42 | "6",
43 | 0
44 | ],
45 | "negative": [
46 | "15",
47 | 0
48 | ],
49 | "latent_image": [
50 | "5",
51 | 0
52 | ]
53 | },
54 | "class_type": "KSamplerAdvanced"
55 | },
56 | "12": {
57 | "inputs": {
58 | "samples": [
59 | "14",
60 | 0
61 | ],
62 | "vae": [
63 | "4",
64 | 2
65 | ]
66 | },
67 | "class_type": "VAEDecode"
68 | },
69 | "13": {
70 | "inputs": {
71 | "filename_prefix": "test_inference",
72 | "images": [
73 | "12",
74 | 0
75 | ]
76 | },
77 | "class_type": "SaveImage"
78 | },
79 | "14": {
80 | "inputs": {
81 | "add_noise": "disable",
82 | "noise_seed": 42,
83 | "steps": 20,
84 | "cfg": 7.5,
85 | "sampler_name": "euler",
86 | "scheduler": "normal",
87 | "start_at_step": 32,
88 | "end_at_step": 10000,
89 | "return_with_leftover_noise": "disable",
90 | "model": [
91 | "16",
92 | 0
93 | ],
94 | "positive": [
95 | "17",
96 | 0
97 | ],
98 | "negative": [
99 | "20",
100 | 0
101 | ],
102 | "latent_image": [
103 | "10",
104 | 0
105 | ]
106 | },
107 | "class_type": "KSamplerAdvanced"
108 | },
109 | "15": {
110 | "inputs": {
111 | "conditioning": [
112 | "6",
113 | 0
114 | ]
115 | },
116 | "class_type": "ConditioningZeroOut"
117 | },
118 | "16": {
119 | "inputs": {
120 | "ckpt_name": "sd_xl_refiner_1.0.safetensors"
121 | },
122 | "class_type": "CheckpointLoaderSimple"
123 | },
124 | "17": {
125 | "inputs": {
126 | "text": "a photo of a cat",
127 | "clip": [
128 | "16",
129 | 1
130 | ]
131 | },
132 | "class_type": "CLIPTextEncode"
133 | },
134 | "20": {
135 | "inputs": {
136 | "text": "",
137 | "clip": [
138 | "16",
139 | 1
140 | ]
141 | },
142 | "class_type": "CLIPTextEncode"
143 | }
144 | }
--------------------------------------------------------------------------------
/cuda_malloc.py:
--------------------------------------------------------------------------------
1 | import os
2 | import importlib.util
3 | from comfy.cli_args import args
4 |
5 | #Can't use pytorch to get the GPU names because the cuda malloc has to be set before the first import.
6 | def get_gpu_names():
7 | if os.name == 'nt':
8 | import ctypes
9 |
10 | # Define necessary C structures and types
11 | class DISPLAY_DEVICEA(ctypes.Structure):
12 | _fields_ = [
13 | ('cb', ctypes.c_ulong),
14 | ('DeviceName', ctypes.c_char * 32),
15 | ('DeviceString', ctypes.c_char * 128),
16 | ('StateFlags', ctypes.c_ulong),
17 | ('DeviceID', ctypes.c_char * 128),
18 | ('DeviceKey', ctypes.c_char * 128)
19 | ]
20 |
21 | # Load user32.dll
22 | user32 = ctypes.windll.user32
23 |
24 | # Call EnumDisplayDevicesA
25 | def enum_display_devices():
26 | device_info = DISPLAY_DEVICEA()
27 | device_info.cb = ctypes.sizeof(device_info)
28 | device_index = 0
29 | gpu_names = set()
30 |
31 | while user32.EnumDisplayDevicesA(None, device_index, ctypes.byref(device_info), 0):
32 | device_index += 1
33 | gpu_names.add(device_info.DeviceString.decode('utf-8'))
34 | return gpu_names
35 | return enum_display_devices()
36 | else:
37 | return set()
38 |
39 | blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M",
40 | "GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745", "Quadro K620",
41 | "Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000",
42 | "Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000",
43 | "GeForce MX110", "GeForce MX130", "GeForce 830M", "GeForce 840M", "GeForce GTX 850M", "GeForce GTX 860M",
44 | "GeForce GTX 1650", "GeForce GTX 1630"
45 | }
46 |
47 | def cuda_malloc_supported():
48 | try:
49 | names = get_gpu_names()
50 | except:
51 | names = set()
52 | for x in names:
53 | if "NVIDIA" in x:
54 | for b in blacklist:
55 | if b in x:
56 | return False
57 | return True
58 |
59 |
60 | if not args.cuda_malloc:
61 | try:
62 | version = ""
63 | torch_spec = importlib.util.find_spec("torch")
64 | for folder in torch_spec.submodule_search_locations:
65 | ver_file = os.path.join(folder, "version.py")
66 | if os.path.isfile(ver_file):
67 | spec = importlib.util.spec_from_file_location("torch_version_import", ver_file)
68 | module = importlib.util.module_from_spec(spec)
69 | spec.loader.exec_module(module)
70 | version = module.__version__
71 | if int(version[0]) >= 2: #enable by default for torch version 2.0 and up
72 | args.cuda_malloc = cuda_malloc_supported()
73 | except:
74 | pass
75 |
76 |
77 | if args.cuda_malloc and not args.disable_cuda_malloc:
78 | env_var = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', None)
79 | if env_var is None:
80 | env_var = "backend:cudaMallocAsync"
81 | else:
82 | env_var += ",backend:cudaMallocAsync"
83 |
84 | os.environ['PYTORCH_CUDA_ALLOC_CONF'] = env_var
85 |
--------------------------------------------------------------------------------
/web/scripts/defaultGraph.js:
--------------------------------------------------------------------------------
1 | export const defaultGraph = {
2 | last_node_id: 9,
3 | last_link_id: 9,
4 | nodes: [
5 | {
6 | id: 7,
7 | type: "CLIPTextEncode",
8 | pos: [413, 389],
9 | size: { 0: 425.27801513671875, 1: 180.6060791015625 },
10 | flags: {},
11 | order: 3,
12 | mode: 0,
13 | inputs: [{ name: "clip", type: "CLIP", link: 5 }],
14 | outputs: [{ name: "CONDITIONING", type: "CONDITIONING", links: [6], slot_index: 0 }],
15 | properties: {},
16 | widgets_values: ["text, watermark"],
17 | },
18 | {
19 | id: 6,
20 | type: "CLIPTextEncode",
21 | pos: [415, 186],
22 | size: { 0: 422.84503173828125, 1: 164.31304931640625 },
23 | flags: {},
24 | order: 2,
25 | mode: 0,
26 | inputs: [{ name: "clip", type: "CLIP", link: 3 }],
27 | outputs: [{ name: "CONDITIONING", type: "CONDITIONING", links: [4], slot_index: 0 }],
28 | properties: {},
29 | widgets_values: ["beautiful scenery nature glass bottle landscape, , purple galaxy bottle,"],
30 | },
31 | {
32 | id: 5,
33 | type: "EmptyLatentImage",
34 | pos: [473, 609],
35 | size: { 0: 315, 1: 106 },
36 | flags: {},
37 | order: 1,
38 | mode: 0,
39 | outputs: [{ name: "LATENT", type: "LATENT", links: [2], slot_index: 0 }],
40 | properties: {},
41 | widgets_values: [512, 512, 1],
42 | },
43 | {
44 | id: 3,
45 | type: "KSampler",
46 | pos: [863, 186],
47 | size: { 0: 315, 1: 262 },
48 | flags: {},
49 | order: 4,
50 | mode: 0,
51 | inputs: [
52 | { name: "model", type: "MODEL", link: 1 },
53 | { name: "positive", type: "CONDITIONING", link: 4 },
54 | { name: "negative", type: "CONDITIONING", link: 6 },
55 | { name: "latent_image", type: "LATENT", link: 2 },
56 | ],
57 | outputs: [{ name: "LATENT", type: "LATENT", links: [7], slot_index: 0 }],
58 | properties: {},
59 | widgets_values: [156680208700286, true, 20, 8, "euler", "normal", 1],
60 | },
61 | {
62 | id: 8,
63 | type: "VAEDecode",
64 | pos: [1209, 188],
65 | size: { 0: 210, 1: 46 },
66 | flags: {},
67 | order: 5,
68 | mode: 0,
69 | inputs: [
70 | { name: "samples", type: "LATENT", link: 7 },
71 | { name: "vae", type: "VAE", link: 8 },
72 | ],
73 | outputs: [{ name: "IMAGE", type: "IMAGE", links: [9], slot_index: 0 }],
74 | properties: {},
75 | },
76 | {
77 | id: 9,
78 | type: "SaveImage",
79 | pos: [1451, 189],
80 | size: { 0: 210, 1: 26 },
81 | flags: {},
82 | order: 6,
83 | mode: 0,
84 | inputs: [{ name: "images", type: "IMAGE", link: 9 }],
85 | properties: {},
86 | },
87 | {
88 | id: 4,
89 | type: "CheckpointLoaderSimple",
90 | pos: [26, 474],
91 | size: { 0: 315, 1: 98 },
92 | flags: {},
93 | order: 0,
94 | mode: 0,
95 | outputs: [
96 | { name: "MODEL", type: "MODEL", links: [1], slot_index: 0 },
97 | { name: "CLIP", type: "CLIP", links: [3, 5], slot_index: 1 },
98 | { name: "VAE", type: "VAE", links: [8], slot_index: 2 },
99 | ],
100 | properties: {},
101 | widgets_values: ["v1-5-pruned-emaonly.ckpt"],
102 | },
103 | ],
104 | links: [
105 | [1, 4, 0, 3, 0, "MODEL"],
106 | [2, 5, 0, 3, 3, "LATENT"],
107 | [3, 4, 1, 6, 0, "CLIP"],
108 | [4, 6, 0, 3, 1, "CONDITIONING"],
109 | [5, 4, 1, 7, 0, "CLIP"],
110 | [6, 7, 0, 3, 2, "CONDITIONING"],
111 | [7, 3, 0, 8, 0, "LATENT"],
112 | [8, 4, 2, 8, 1, "VAE"],
113 | [9, 8, 0, 9, 0, "IMAGE"],
114 | ],
115 | groups: [],
116 | config: {},
117 | extra: {},
118 | version: 0.4,
119 | };
120 |
--------------------------------------------------------------------------------
/comfy/ldm/modules/diffusionmodules/upscaling.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 | from functools import partial
5 |
6 | from .util import extract_into_tensor, make_beta_schedule
7 | from comfy.ldm.util import default
8 |
9 |
10 | class AbstractLowScaleModel(nn.Module):
11 | # for concatenating a downsampled image to the latent representation
12 | def __init__(self, noise_schedule_config=None):
13 | super(AbstractLowScaleModel, self).__init__()
14 | if noise_schedule_config is not None:
15 | self.register_schedule(**noise_schedule_config)
16 |
17 | def register_schedule(self, beta_schedule="linear", timesteps=1000,
18 | linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
19 | betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
20 | cosine_s=cosine_s)
21 | alphas = 1. - betas
22 | alphas_cumprod = np.cumprod(alphas, axis=0)
23 | alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
24 |
25 | timesteps, = betas.shape
26 | self.num_timesteps = int(timesteps)
27 | self.linear_start = linear_start
28 | self.linear_end = linear_end
29 | assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
30 |
31 | to_torch = partial(torch.tensor, dtype=torch.float32)
32 |
33 | self.register_buffer('betas', to_torch(betas))
34 | self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
35 | self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
36 |
37 | # calculations for diffusion q(x_t | x_{t-1}) and others
38 | self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
39 | self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
40 | self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
41 | self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
42 | self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
43 |
44 | def q_sample(self, x_start, t, noise=None):
45 | noise = default(noise, lambda: torch.randn_like(x_start))
46 | return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
47 | extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
48 |
49 | def forward(self, x):
50 | return x, None
51 |
52 | def decode(self, x):
53 | return x
54 |
55 |
56 | class SimpleImageConcat(AbstractLowScaleModel):
57 | # no noise level conditioning
58 | def __init__(self):
59 | super(SimpleImageConcat, self).__init__(noise_schedule_config=None)
60 | self.max_noise_level = 0
61 |
62 | def forward(self, x):
63 | # fix to constant noise level
64 | return x, torch.zeros(x.shape[0], device=x.device).long()
65 |
66 |
67 | class ImageConcatWithNoiseAugmentation(AbstractLowScaleModel):
68 | def __init__(self, noise_schedule_config, max_noise_level=1000, to_cuda=False):
69 | super().__init__(noise_schedule_config=noise_schedule_config)
70 | self.max_noise_level = max_noise_level
71 |
72 | def forward(self, x, noise_level=None):
73 | if noise_level is None:
74 | noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long()
75 | else:
76 | assert isinstance(noise_level, torch.Tensor)
77 | z = self.q_sample(x, noise_level)
78 | return z, noise_level
79 |
80 |
81 |
82 |
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/architecture/OmniSR/ChannelAttention.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import torch.nn as nn
4 |
5 |
6 | class CA_layer(nn.Module):
7 | def __init__(self, channel, reduction=16):
8 | super(CA_layer, self).__init__()
9 | # global average pooling
10 | self.gap = nn.AdaptiveAvgPool2d(1)
11 | self.fc = nn.Sequential(
12 | nn.Conv2d(channel, channel // reduction, kernel_size=(1, 1), bias=False),
13 | nn.GELU(),
14 | nn.Conv2d(channel // reduction, channel, kernel_size=(1, 1), bias=False),
15 | # nn.Sigmoid()
16 | )
17 |
18 | def forward(self, x):
19 | y = self.fc(self.gap(x))
20 | return x * y.expand_as(x)
21 |
22 |
23 | class Simple_CA_layer(nn.Module):
24 | def __init__(self, channel):
25 | super(Simple_CA_layer, self).__init__()
26 | self.gap = nn.AdaptiveAvgPool2d(1)
27 | self.fc = nn.Conv2d(
28 | in_channels=channel,
29 | out_channels=channel,
30 | kernel_size=1,
31 | padding=0,
32 | stride=1,
33 | groups=1,
34 | bias=True,
35 | )
36 |
37 | def forward(self, x):
38 | return x * self.fc(self.gap(x))
39 |
40 |
41 | class ECA_layer(nn.Module):
42 | """Constructs a ECA module.
43 | Args:
44 | channel: Number of channels of the input feature map
45 | k_size: Adaptive selection of kernel size
46 | """
47 |
48 | def __init__(self, channel):
49 | super(ECA_layer, self).__init__()
50 |
51 | b = 1
52 | gamma = 2
53 | k_size = int(abs(math.log(channel, 2) + b) / gamma)
54 | k_size = k_size if k_size % 2 else k_size + 1
55 | self.avg_pool = nn.AdaptiveAvgPool2d(1)
56 | self.conv = nn.Conv1d(
57 | 1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False
58 | )
59 | # self.sigmoid = nn.Sigmoid()
60 |
61 | def forward(self, x):
62 | # x: input features with shape [b, c, h, w]
63 | # b, c, h, w = x.size()
64 |
65 | # feature descriptor on the global spatial information
66 | y = self.avg_pool(x)
67 |
68 | # Two different branches of ECA module
69 | y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
70 |
71 | # Multi-scale information fusion
72 | # y = self.sigmoid(y)
73 |
74 | return x * y.expand_as(x)
75 |
76 |
77 | class ECA_MaxPool_layer(nn.Module):
78 | """Constructs a ECA module.
79 | Args:
80 | channel: Number of channels of the input feature map
81 | k_size: Adaptive selection of kernel size
82 | """
83 |
84 | def __init__(self, channel):
85 | super(ECA_MaxPool_layer, self).__init__()
86 |
87 | b = 1
88 | gamma = 2
89 | k_size = int(abs(math.log(channel, 2) + b) / gamma)
90 | k_size = k_size if k_size % 2 else k_size + 1
91 | self.max_pool = nn.AdaptiveMaxPool2d(1)
92 | self.conv = nn.Conv1d(
93 | 1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False
94 | )
95 | # self.sigmoid = nn.Sigmoid()
96 |
97 | def forward(self, x):
98 | # x: input features with shape [b, c, h, w]
99 | # b, c, h, w = x.size()
100 |
101 | # feature descriptor on the global spatial information
102 | y = self.max_pool(x)
103 |
104 | # Two different branches of ECA module
105 | y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
106 |
107 | # Multi-scale information fusion
108 | # y = self.sigmoid(y)
109 |
110 | return x * y.expand_as(x)
111 |
--------------------------------------------------------------------------------
/.github/workflows/windows_release_package.yml:
--------------------------------------------------------------------------------
1 | name: "Windows Release packaging"
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | cu:
7 | description: 'cuda version'
8 | required: true
9 | type: string
10 | default: "121"
11 |
12 | python_minor:
13 | description: 'python minor version'
14 | required: true
15 | type: string
16 | default: "11"
17 |
18 | python_patch:
19 | description: 'python patch version'
20 | required: true
21 | type: string
22 | default: "6"
23 | # push:
24 | # branches:
25 | # - master
26 |
27 | jobs:
28 | package_comfyui:
29 | permissions:
30 | contents: "write"
31 | packages: "write"
32 | pull-requests: "read"
33 | runs-on: windows-latest
34 | steps:
35 | - uses: actions/cache/restore@v3
36 | id: cache
37 | with:
38 | path: |
39 | cu${{ inputs.cu }}_python_deps.tar
40 | update_comfyui_and_python_dependencies.bat
41 | key: ${{ runner.os }}-build-cu${{ inputs.cu }}-${{ inputs.python_minor }}
42 | - shell: bash
43 | run: |
44 | mv cu${{ inputs.cu }}_python_deps.tar ../
45 | mv update_comfyui_and_python_dependencies.bat ../
46 | cd ..
47 | tar xf cu${{ inputs.cu }}_python_deps.tar
48 | pwd
49 | ls
50 |
51 | - uses: actions/checkout@v3
52 | with:
53 | fetch-depth: 0
54 | persist-credentials: false
55 | - shell: bash
56 | run: |
57 | cd ..
58 | cp -r ComfyUI ComfyUI_copy
59 | curl https://www.python.org/ftp/python/3.${{ inputs.python_minor }}.${{ inputs.python_patch }}/python-3.${{ inputs.python_minor }}.${{ inputs.python_patch }}-embed-amd64.zip -o python_embeded.zip
60 | unzip python_embeded.zip -d python_embeded
61 | cd python_embeded
62 | echo 'import site' >> ./python3${{ inputs.python_minor }}._pth
63 | curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
64 | ./python.exe get-pip.py
65 | ./python.exe -s -m pip install ../cu${{ inputs.cu }}_python_deps/*
66 | sed -i '1i../ComfyUI' ./python3${{ inputs.python_minor }}._pth
67 | cd ..
68 |
69 | git clone https://github.com/comfyanonymous/taesd
70 | cp taesd/*.pth ./ComfyUI_copy/models/vae_approx/
71 |
72 | mkdir ComfyUI_windows_portable
73 | mv python_embeded ComfyUI_windows_portable
74 | mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI
75 |
76 | cd ComfyUI_windows_portable
77 |
78 | mkdir update
79 | cp -r ComfyUI/.ci/update_windows/* ./update/
80 | cp -r ComfyUI/.ci/windows_base_files/* ./
81 | cp ../update_comfyui_and_python_dependencies.bat ./update/
82 |
83 | cd ..
84 |
85 | "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable
86 | mv ComfyUI_windows_portable.7z ComfyUI/new_ComfyUI_windows_portable_nvidia_cu${{ inputs.cu }}_or_cpu.7z
87 |
88 | cd ComfyUI_windows_portable
89 | python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu
90 |
91 | ls
92 |
93 | - name: Upload binaries to release
94 | uses: svenstaro/upload-release-action@v2
95 | with:
96 | repo_token: ${{ secrets.GITHUB_TOKEN }}
97 | file: new_ComfyUI_windows_portable_nvidia_cu${{ inputs.cu }}_or_cpu.7z
98 | tag: "latest"
99 | overwrite: true
100 |
101 |
--------------------------------------------------------------------------------
/latent_preview.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from PIL import Image
3 | import struct
4 | import numpy as np
5 | from comfy.cli_args import args, LatentPreviewMethod
6 | from comfy.taesd.taesd import TAESD
7 | import folder_paths
8 | import comfy.utils
9 |
10 | MAX_PREVIEW_RESOLUTION = 512
11 |
12 | class LatentPreviewer:
13 | def decode_latent_to_preview(self, x0):
14 | pass
15 |
16 | def decode_latent_to_preview_image(self, preview_format, x0):
17 | preview_image = self.decode_latent_to_preview(x0)
18 | return ("JPEG", preview_image, MAX_PREVIEW_RESOLUTION)
19 |
20 | class TAESDPreviewerImpl(LatentPreviewer):
21 | def __init__(self, taesd):
22 | self.taesd = taesd
23 |
24 | def decode_latent_to_preview(self, x0):
25 | x_sample = self.taesd.decode(x0[:1])[0].detach()
26 | x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
27 | x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
28 | x_sample = x_sample.astype(np.uint8)
29 |
30 | preview_image = Image.fromarray(x_sample)
31 | return preview_image
32 |
33 |
34 | class Latent2RGBPreviewer(LatentPreviewer):
35 | def __init__(self, latent_rgb_factors):
36 | self.latent_rgb_factors = torch.tensor(latent_rgb_factors, device="cpu")
37 |
38 | def decode_latent_to_preview(self, x0):
39 | latent_image = x0[0].permute(1, 2, 0).cpu() @ self.latent_rgb_factors
40 |
41 | latents_ubyte = (((latent_image + 1) / 2)
42 | .clamp(0, 1) # change scale from -1..1 to 0..1
43 | .mul(0xFF) # to 0..255
44 | .byte()).cpu()
45 |
46 | return Image.fromarray(latents_ubyte.numpy())
47 |
48 |
49 | def get_previewer(device, latent_format):
50 | previewer = None
51 | method = args.preview_method
52 | if method != LatentPreviewMethod.NoPreviews:
53 | # TODO previewer methods
54 | taesd_decoder_path = None
55 | if latent_format.taesd_decoder_name is not None:
56 | taesd_decoder_path = next(
57 | (fn for fn in folder_paths.get_filename_list("vae_approx")
58 | if fn.startswith(latent_format.taesd_decoder_name)),
59 | ""
60 | )
61 | taesd_decoder_path = folder_paths.get_full_path("vae_approx", taesd_decoder_path)
62 |
63 | if method == LatentPreviewMethod.Auto:
64 | method = LatentPreviewMethod.Latent2RGB
65 | if taesd_decoder_path:
66 | method = LatentPreviewMethod.TAESD
67 |
68 | if method == LatentPreviewMethod.TAESD:
69 | if taesd_decoder_path:
70 | taesd = TAESD(None, taesd_decoder_path).to(device)
71 | previewer = TAESDPreviewerImpl(taesd)
72 | else:
73 | print("Warning: TAESD previews enabled, but could not find models/vae_approx/{}".format(latent_format.taesd_decoder_name))
74 |
75 | if previewer is None:
76 | if latent_format.latent_rgb_factors is not None:
77 | previewer = Latent2RGBPreviewer(latent_format.latent_rgb_factors)
78 | return previewer
79 |
80 | def prepare_callback(model, steps, x0_output_dict=None):
81 | preview_format = "JPEG"
82 | if preview_format not in ["JPEG", "PNG"]:
83 | preview_format = "JPEG"
84 |
85 | previewer = get_previewer(model.load_device, model.model.latent_format)
86 |
87 | pbar = comfy.utils.ProgressBar(steps)
88 | def callback(step, x0, x, total_steps):
89 | if x0_output_dict is not None:
90 | x0_output_dict["x0"] = x0
91 |
92 | preview_bytes = None
93 | if previewer:
94 | preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
95 | pbar.update_absolute(step + 1, total_steps, preview_bytes)
96 | return callback
97 |
98 |
--------------------------------------------------------------------------------
/.github/workflows/windows_release_nightly_pytorch.yml:
--------------------------------------------------------------------------------
1 | name: "Windows Release Nightly pytorch"
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | cu:
7 | description: 'cuda version'
8 | required: true
9 | type: string
10 | default: "121"
11 |
12 | python_minor:
13 | description: 'python minor version'
14 | required: true
15 | type: string
16 | default: "12"
17 |
18 | python_patch:
19 | description: 'python patch version'
20 | required: true
21 | type: string
22 | default: "1"
23 | # push:
24 | # branches:
25 | # - master
26 |
27 | jobs:
28 | build:
29 | permissions:
30 | contents: "write"
31 | packages: "write"
32 | pull-requests: "read"
33 | runs-on: windows-latest
34 | steps:
35 | - uses: actions/checkout@v3
36 | with:
37 | fetch-depth: 0
38 | persist-credentials: false
39 | - uses: actions/setup-python@v4
40 | with:
41 | python-version: 3.${{ inputs.python_minor }}.${{ inputs.python_patch }}
42 | - shell: bash
43 | run: |
44 | cd ..
45 | cp -r ComfyUI ComfyUI_copy
46 | curl https://www.python.org/ftp/python/3.${{ inputs.python_minor }}.${{ inputs.python_patch }}/python-3.${{ inputs.python_minor }}.${{ inputs.python_patch }}-embed-amd64.zip -o python_embeded.zip
47 | unzip python_embeded.zip -d python_embeded
48 | cd python_embeded
49 | echo 'import site' >> ./python3${{ inputs.python_minor }}._pth
50 | curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
51 | ./python.exe get-pip.py
52 | python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu${{ inputs.cu }} -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir
53 | ls ../temp_wheel_dir
54 | ./python.exe -s -m pip install --pre ../temp_wheel_dir/*
55 | sed -i '1i../ComfyUI' ./python3${{ inputs.python_minor }}._pth
56 | cd ..
57 |
58 | git clone https://github.com/comfyanonymous/taesd
59 | cp taesd/*.pth ./ComfyUI_copy/models/vae_approx/
60 |
61 | mkdir ComfyUI_windows_portable_nightly_pytorch
62 | mv python_embeded ComfyUI_windows_portable_nightly_pytorch
63 | mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI
64 |
65 | cd ComfyUI_windows_portable_nightly_pytorch
66 |
67 | mkdir update
68 | cp -r ComfyUI/.ci/update_windows/* ./update/
69 | cp -r ComfyUI/.ci/windows_base_files/* ./
70 |
71 | echo "..\python_embeded\python.exe .\update.py ..\ComfyUI\\
72 | ..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cu${{ inputs.cu }} -r ../ComfyUI/requirements.txt pygit2
73 | pause" > ./update/update_comfyui_and_python_dependencies.bat
74 | cd ..
75 |
76 | "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on -mf=BCJ2 ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI_windows_portable_nightly_pytorch
77 | mv ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu_nightly_pytorch.7z
78 |
79 | cd ComfyUI_windows_portable_nightly_pytorch
80 | python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu
81 |
82 | ls
83 |
84 | - name: Upload binaries to release
85 | uses: svenstaro/upload-release-action@v2
86 | with:
87 | repo_token: ${{ secrets.GITHUB_TOKEN }}
88 | file: ComfyUI_windows_portable_nvidia_or_cpu_nightly_pytorch.7z
89 | tag: "latest"
90 | overwrite: true
91 |
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/model_loading.py:
--------------------------------------------------------------------------------
1 | import logging as logger
2 |
3 | from .architecture.DAT import DAT
4 | from .architecture.face.codeformer import CodeFormer
5 | from .architecture.face.gfpganv1_clean_arch import GFPGANv1Clean
6 | from .architecture.face.restoreformer_arch import RestoreFormer
7 | from .architecture.HAT import HAT
8 | from .architecture.LaMa import LaMa
9 | from .architecture.OmniSR.OmniSR import OmniSR
10 | from .architecture.RRDB import RRDBNet as ESRGAN
11 | from .architecture.SCUNet import SCUNet
12 | from .architecture.SPSR import SPSRNet as SPSR
13 | from .architecture.SRVGG import SRVGGNetCompact as RealESRGANv2
14 | from .architecture.SwiftSRGAN import Generator as SwiftSRGAN
15 | from .architecture.Swin2SR import Swin2SR
16 | from .architecture.SwinIR import SwinIR
17 | from .types import PyTorchModel
18 |
19 |
20 | class UnsupportedModel(Exception):
21 | pass
22 |
23 |
24 | def load_state_dict(state_dict) -> PyTorchModel:
25 | logger.debug(f"Loading state dict into pytorch model arch")
26 |
27 | state_dict_keys = list(state_dict.keys())
28 |
29 | if "params_ema" in state_dict_keys:
30 | state_dict = state_dict["params_ema"]
31 | elif "params-ema" in state_dict_keys:
32 | state_dict = state_dict["params-ema"]
33 | elif "params" in state_dict_keys:
34 | state_dict = state_dict["params"]
35 |
36 | state_dict_keys = list(state_dict.keys())
37 | # SRVGGNet Real-ESRGAN (v2)
38 | if "body.0.weight" in state_dict_keys and "body.1.weight" in state_dict_keys:
39 | model = RealESRGANv2(state_dict)
40 | # SPSR (ESRGAN with lots of extra layers)
41 | elif "f_HR_conv1.0.weight" in state_dict:
42 | model = SPSR(state_dict)
43 | # Swift-SRGAN
44 | elif (
45 | "model" in state_dict_keys
46 | and "initial.cnn.depthwise.weight" in state_dict["model"].keys()
47 | ):
48 | model = SwiftSRGAN(state_dict)
49 | # SwinIR, Swin2SR, HAT
50 | elif "layers.0.residual_group.blocks.0.norm1.weight" in state_dict_keys:
51 | if (
52 | "layers.0.residual_group.blocks.0.conv_block.cab.0.weight"
53 | in state_dict_keys
54 | ):
55 | model = HAT(state_dict)
56 | elif "patch_embed.proj.weight" in state_dict_keys:
57 | model = Swin2SR(state_dict)
58 | else:
59 | model = SwinIR(state_dict)
60 | # GFPGAN
61 | elif (
62 | "toRGB.0.weight" in state_dict_keys
63 | and "stylegan_decoder.style_mlp.1.weight" in state_dict_keys
64 | ):
65 | model = GFPGANv1Clean(state_dict)
66 | # RestoreFormer
67 | elif (
68 | "encoder.conv_in.weight" in state_dict_keys
69 | and "encoder.down.0.block.0.norm1.weight" in state_dict_keys
70 | ):
71 | model = RestoreFormer(state_dict)
72 | elif (
73 | "encoder.blocks.0.weight" in state_dict_keys
74 | and "quantize.embedding.weight" in state_dict_keys
75 | ):
76 | model = CodeFormer(state_dict)
77 | # LaMa
78 | elif (
79 | "model.model.1.bn_l.running_mean" in state_dict_keys
80 | or "generator.model.1.bn_l.running_mean" in state_dict_keys
81 | ):
82 | model = LaMa(state_dict)
83 | # Omni-SR
84 | elif "residual_layer.0.residual_layer.0.layer.0.fn.0.weight" in state_dict_keys:
85 | model = OmniSR(state_dict)
86 | # SCUNet
87 | elif "m_head.0.weight" in state_dict_keys and "m_tail.0.weight" in state_dict_keys:
88 | model = SCUNet(state_dict)
89 | # DAT
90 | elif "layers.0.blocks.2.attn.attn_mask_0" in state_dict_keys:
91 | model = DAT(state_dict)
92 | # Regular ESRGAN, "new-arch" ESRGAN, Real-ESRGAN v1
93 | else:
94 | try:
95 | model = ESRGAN(state_dict)
96 | except:
97 | # pylint: disable=raise-missing-from
98 | raise UnsupportedModel
99 | return model
100 |
--------------------------------------------------------------------------------
/web/extensions/core/undoRedo.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 |
3 | const MAX_HISTORY = 50;
4 |
5 | let undo = [];
6 | let redo = [];
7 | let activeState = null;
8 | let isOurLoad = false;
9 | function checkState() {
10 | const currentState = app.graph.serialize();
11 | if (!graphEqual(activeState, currentState)) {
12 | undo.push(activeState);
13 | if (undo.length > MAX_HISTORY) {
14 | undo.shift();
15 | }
16 | activeState = clone(currentState);
17 | redo.length = 0;
18 | }
19 | }
20 |
21 | const loadGraphData = app.loadGraphData;
22 | app.loadGraphData = async function () {
23 | const v = await loadGraphData.apply(this, arguments);
24 | if (isOurLoad) {
25 | isOurLoad = false;
26 | } else {
27 | checkState();
28 | }
29 | return v;
30 | };
31 |
32 | function clone(obj) {
33 | try {
34 | if (typeof structuredClone !== "undefined") {
35 | return structuredClone(obj);
36 | }
37 | } catch (error) {
38 | // structuredClone is stricter than using JSON.parse/stringify so fallback to that
39 | }
40 |
41 | return JSON.parse(JSON.stringify(obj));
42 | }
43 |
44 | function graphEqual(a, b, root = true) {
45 | if (a === b) return true;
46 |
47 | if (typeof a == "object" && a && typeof b == "object" && b) {
48 | const keys = Object.getOwnPropertyNames(a);
49 |
50 | if (keys.length != Object.getOwnPropertyNames(b).length) {
51 | return false;
52 | }
53 |
54 | for (const key of keys) {
55 | let av = a[key];
56 | let bv = b[key];
57 | if (root && key === "nodes") {
58 | // Nodes need to be sorted as the order changes when selecting nodes
59 | av = [...av].sort((a, b) => a.id - b.id);
60 | bv = [...bv].sort((a, b) => a.id - b.id);
61 | }
62 | if (!graphEqual(av, bv, false)) {
63 | return false;
64 | }
65 | }
66 |
67 | return true;
68 | }
69 |
70 | return false;
71 | }
72 |
73 | const undoRedo = async (e) => {
74 | const updateState = async (source, target) => {
75 | const prevState = source.pop();
76 | if (prevState) {
77 | target.push(activeState);
78 | isOurLoad = true;
79 | await app.loadGraphData(prevState, false);
80 | activeState = prevState;
81 | }
82 | }
83 | if (e.ctrlKey || e.metaKey) {
84 | if (e.key === "y") {
85 | updateState(redo, undo);
86 | return true;
87 | } else if (e.key === "z") {
88 | updateState(undo, redo);
89 | return true;
90 | }
91 | }
92 | };
93 |
94 | const bindInput = (activeEl) => {
95 | if (activeEl?.tagName !== "CANVAS" && activeEl?.tagName !== "BODY") {
96 | for (const evt of ["change", "input", "blur"]) {
97 | if (`on${evt}` in activeEl) {
98 | const listener = () => {
99 | checkState();
100 | activeEl.removeEventListener(evt, listener);
101 | };
102 | activeEl.addEventListener(evt, listener);
103 | return true;
104 | }
105 | }
106 | }
107 | };
108 |
109 | window.addEventListener(
110 | "keydown",
111 | (e) => {
112 | requestAnimationFrame(async () => {
113 | const activeEl = document.activeElement;
114 | if (activeEl?.tagName === "INPUT" || activeEl?.type === "textarea") {
115 | // Ignore events on inputs, they have their native history
116 | return;
117 | }
118 |
119 | // Check if this is a ctrl+z ctrl+y
120 | if (await undoRedo(e)) return;
121 |
122 | // If our active element is some type of input then handle changes after they're done
123 | if (bindInput(activeEl)) return;
124 | checkState();
125 | });
126 | },
127 | true
128 | );
129 |
130 | // Handle clicking DOM elements (e.g. widgets)
131 | window.addEventListener("mouseup", () => {
132 | checkState();
133 | });
134 |
135 | // Handle litegraph clicks
136 | const processMouseUp = LGraphCanvas.prototype.processMouseUp;
137 | LGraphCanvas.prototype.processMouseUp = function (e) {
138 | const v = processMouseUp.apply(this, arguments);
139 | checkState();
140 | return v;
141 | };
142 | const processMouseDown = LGraphCanvas.prototype.processMouseDown;
143 | LGraphCanvas.prototype.processMouseDown = function (e) {
144 | const v = processMouseDown.apply(this, arguments);
145 | checkState();
146 | return v;
147 | };
148 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_video_model.py:
--------------------------------------------------------------------------------
1 | import nodes
2 | import torch
3 | import comfy.utils
4 | import comfy.sd
5 | import folder_paths
6 |
7 |
8 | class ImageOnlyCheckpointLoader:
9 | @classmethod
10 | def INPUT_TYPES(s):
11 | return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
12 | }}
13 | RETURN_TYPES = ("MODEL", "CLIP_VISION", "VAE")
14 | FUNCTION = "load_checkpoint"
15 |
16 | CATEGORY = "loaders/video_models"
17 |
18 | def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
19 | ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
20 | out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=False, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
21 | return (out[0], out[3], out[2])
22 |
23 |
24 | class SVD_img2vid_Conditioning:
25 | @classmethod
26 | def INPUT_TYPES(s):
27 | return {"required": { "clip_vision": ("CLIP_VISION",),
28 | "init_image": ("IMAGE",),
29 | "vae": ("VAE",),
30 | "width": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}),
31 | "height": ("INT", {"default": 576, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}),
32 | "video_frames": ("INT", {"default": 14, "min": 1, "max": 4096}),
33 | "motion_bucket_id": ("INT", {"default": 127, "min": 1, "max": 1023}),
34 | "fps": ("INT", {"default": 6, "min": 1, "max": 1024}),
35 | "augmentation_level": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01})
36 | }}
37 | RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
38 | RETURN_NAMES = ("positive", "negative", "latent")
39 |
40 | FUNCTION = "encode"
41 |
42 | CATEGORY = "conditioning/video_models"
43 |
44 | def encode(self, clip_vision, init_image, vae, width, height, video_frames, motion_bucket_id, fps, augmentation_level):
45 | output = clip_vision.encode_image(init_image)
46 | pooled = output.image_embeds.unsqueeze(0)
47 | pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1)
48 | encode_pixels = pixels[:,:,:,:3]
49 | if augmentation_level > 0:
50 | encode_pixels += torch.randn_like(pixels) * augmentation_level
51 | t = vae.encode(encode_pixels)
52 | positive = [[pooled, {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": t}]]
53 | negative = [[torch.zeros_like(pooled), {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": torch.zeros_like(t)}]]
54 | latent = torch.zeros([video_frames, 4, height // 8, width // 8])
55 | return (positive, negative, {"samples":latent})
56 |
57 | class VideoLinearCFGGuidance:
58 | @classmethod
59 | def INPUT_TYPES(s):
60 | return {"required": { "model": ("MODEL",),
61 | "min_cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}),
62 | }}
63 | RETURN_TYPES = ("MODEL",)
64 | FUNCTION = "patch"
65 |
66 | CATEGORY = "sampling/video_models"
67 |
68 | def patch(self, model, min_cfg):
69 | def linear_cfg(args):
70 | cond = args["cond"]
71 | uncond = args["uncond"]
72 | cond_scale = args["cond_scale"]
73 |
74 | scale = torch.linspace(min_cfg, cond_scale, cond.shape[0], device=cond.device).reshape((cond.shape[0], 1, 1, 1))
75 | return uncond + scale * (cond - uncond)
76 |
77 | m = model.clone()
78 | m.set_model_sampler_cfg_function(linear_cfg)
79 | return (m, )
80 |
81 | NODE_CLASS_MAPPINGS = {
82 | "ImageOnlyCheckpointLoader": ImageOnlyCheckpointLoader,
83 | "SVD_img2vid_Conditioning": SVD_img2vid_Conditioning,
84 | "VideoLinearCFGGuidance": VideoLinearCFGGuidance,
85 | }
86 |
87 | NODE_DISPLAY_NAME_MAPPINGS = {
88 | "ImageOnlyCheckpointLoader": "Image Only Checkpoint Loader (img2vid model)",
89 | }
90 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_latent.py:
--------------------------------------------------------------------------------
1 | import comfy.utils
2 | import torch
3 |
4 | def reshape_latent_to(target_shape, latent):
5 | if latent.shape[1:] != target_shape[1:]:
6 | latent = comfy.utils.common_upscale(latent, target_shape[3], target_shape[2], "bilinear", "center")
7 | return comfy.utils.repeat_to_batch_size(latent, target_shape[0])
8 |
9 |
10 | class LatentAdd:
11 | @classmethod
12 | def INPUT_TYPES(s):
13 | return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}}
14 |
15 | RETURN_TYPES = ("LATENT",)
16 | FUNCTION = "op"
17 |
18 | CATEGORY = "latent/advanced"
19 |
20 | def op(self, samples1, samples2):
21 | samples_out = samples1.copy()
22 |
23 | s1 = samples1["samples"]
24 | s2 = samples2["samples"]
25 |
26 | s2 = reshape_latent_to(s1.shape, s2)
27 | samples_out["samples"] = s1 + s2
28 | return (samples_out,)
29 |
30 | class LatentSubtract:
31 | @classmethod
32 | def INPUT_TYPES(s):
33 | return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}}
34 |
35 | RETURN_TYPES = ("LATENT",)
36 | FUNCTION = "op"
37 |
38 | CATEGORY = "latent/advanced"
39 |
40 | def op(self, samples1, samples2):
41 | samples_out = samples1.copy()
42 |
43 | s1 = samples1["samples"]
44 | s2 = samples2["samples"]
45 |
46 | s2 = reshape_latent_to(s1.shape, s2)
47 | samples_out["samples"] = s1 - s2
48 | return (samples_out,)
49 |
50 | class LatentMultiply:
51 | @classmethod
52 | def INPUT_TYPES(s):
53 | return {"required": { "samples": ("LATENT",),
54 | "multiplier": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
55 | }}
56 |
57 | RETURN_TYPES = ("LATENT",)
58 | FUNCTION = "op"
59 |
60 | CATEGORY = "latent/advanced"
61 |
62 | def op(self, samples, multiplier):
63 | samples_out = samples.copy()
64 |
65 | s1 = samples["samples"]
66 | samples_out["samples"] = s1 * multiplier
67 | return (samples_out,)
68 |
69 | class LatentInterpolate:
70 | @classmethod
71 | def INPUT_TYPES(s):
72 | return {"required": { "samples1": ("LATENT",),
73 | "samples2": ("LATENT",),
74 | "ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
75 | }}
76 |
77 | RETURN_TYPES = ("LATENT",)
78 | FUNCTION = "op"
79 |
80 | CATEGORY = "latent/advanced"
81 |
82 | def op(self, samples1, samples2, ratio):
83 | samples_out = samples1.copy()
84 |
85 | s1 = samples1["samples"]
86 | s2 = samples2["samples"]
87 |
88 | s2 = reshape_latent_to(s1.shape, s2)
89 |
90 | m1 = torch.linalg.vector_norm(s1, dim=(1))
91 | m2 = torch.linalg.vector_norm(s2, dim=(1))
92 |
93 | s1 = torch.nan_to_num(s1 / m1)
94 | s2 = torch.nan_to_num(s2 / m2)
95 |
96 | t = (s1 * ratio + s2 * (1.0 - ratio))
97 | mt = torch.linalg.vector_norm(t, dim=(1))
98 | st = torch.nan_to_num(t / mt)
99 |
100 | samples_out["samples"] = st * (m1 * ratio + m2 * (1.0 - ratio))
101 | return (samples_out,)
102 |
103 | class LatentBatch:
104 | @classmethod
105 | def INPUT_TYPES(s):
106 | return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}}
107 |
108 | RETURN_TYPES = ("LATENT",)
109 | FUNCTION = "batch"
110 |
111 | CATEGORY = "latent/batch"
112 |
113 | def batch(self, samples1, samples2):
114 | samples_out = samples1.copy()
115 | s1 = samples1["samples"]
116 | s2 = samples2["samples"]
117 |
118 | if s1.shape[1:] != s2.shape[1:]:
119 | s2 = comfy.utils.common_upscale(s2, s1.shape[3], s1.shape[2], "bilinear", "center")
120 | s = torch.cat((s1, s2), dim=0)
121 | samples_out["samples"] = s
122 | samples_out["batch_index"] = samples1.get("batch_index", [x for x in range(0, s1.shape[0])]) + samples2.get("batch_index", [x for x in range(0, s2.shape[0])])
123 | return (samples_out,)
124 |
125 | NODE_CLASS_MAPPINGS = {
126 | "LatentAdd": LatentAdd,
127 | "LatentSubtract": LatentSubtract,
128 | "LatentMultiply": LatentMultiply,
129 | "LatentInterpolate": LatentInterpolate,
130 | "LatentBatch": LatentBatch,
131 | }
132 |
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/architecture/SRVGG.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | import math
5 |
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 |
9 |
10 | class SRVGGNetCompact(nn.Module):
11 | """A compact VGG-style network structure for super-resolution.
12 | It is a compact network structure, which performs upsampling in the last layer and no convolution is
13 | conducted on the HR feature space.
14 | Args:
15 | num_in_ch (int): Channel number of inputs. Default: 3.
16 | num_out_ch (int): Channel number of outputs. Default: 3.
17 | num_feat (int): Channel number of intermediate features. Default: 64.
18 | num_conv (int): Number of convolution layers in the body network. Default: 16.
19 | upscale (int): Upsampling factor. Default: 4.
20 | act_type (str): Activation type, options: 'relu', 'prelu', 'leakyrelu'. Default: prelu.
21 | """
22 |
23 | def __init__(
24 | self,
25 | state_dict,
26 | act_type: str = "prelu",
27 | ):
28 | super(SRVGGNetCompact, self).__init__()
29 | self.model_arch = "SRVGG (RealESRGAN)"
30 | self.sub_type = "SR"
31 |
32 | self.act_type = act_type
33 |
34 | self.state = state_dict
35 |
36 | if "params" in self.state:
37 | self.state = self.state["params"]
38 |
39 | self.key_arr = list(self.state.keys())
40 |
41 | self.in_nc = self.get_in_nc()
42 | self.num_feat = self.get_num_feats()
43 | self.num_conv = self.get_num_conv()
44 | self.out_nc = self.in_nc # :(
45 | self.pixelshuffle_shape = None # Defined in get_scale()
46 | self.scale = self.get_scale()
47 |
48 | self.supports_fp16 = True
49 | self.supports_bfp16 = True
50 | self.min_size_restriction = None
51 |
52 | self.body = nn.ModuleList()
53 | # the first conv
54 | self.body.append(nn.Conv2d(self.in_nc, self.num_feat, 3, 1, 1))
55 | # the first activation
56 | if act_type == "relu":
57 | activation = nn.ReLU(inplace=True)
58 | elif act_type == "prelu":
59 | activation = nn.PReLU(num_parameters=self.num_feat)
60 | elif act_type == "leakyrelu":
61 | activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
62 | self.body.append(activation) # type: ignore
63 |
64 | # the body structure
65 | for _ in range(self.num_conv):
66 | self.body.append(nn.Conv2d(self.num_feat, self.num_feat, 3, 1, 1))
67 | # activation
68 | if act_type == "relu":
69 | activation = nn.ReLU(inplace=True)
70 | elif act_type == "prelu":
71 | activation = nn.PReLU(num_parameters=self.num_feat)
72 | elif act_type == "leakyrelu":
73 | activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
74 | self.body.append(activation) # type: ignore
75 |
76 | # the last conv
77 | self.body.append(nn.Conv2d(self.num_feat, self.pixelshuffle_shape, 3, 1, 1)) # type: ignore
78 | # upsample
79 | self.upsampler = nn.PixelShuffle(self.scale)
80 |
81 | self.load_state_dict(self.state, strict=False)
82 |
83 | def get_num_conv(self) -> int:
84 | return (int(self.key_arr[-1].split(".")[1]) - 2) // 2
85 |
86 | def get_num_feats(self) -> int:
87 | return self.state[self.key_arr[0]].shape[0]
88 |
89 | def get_in_nc(self) -> int:
90 | return self.state[self.key_arr[0]].shape[1]
91 |
92 | def get_scale(self) -> int:
93 | self.pixelshuffle_shape = self.state[self.key_arr[-1]].shape[0]
94 | # Assume out_nc is the same as in_nc
95 | # I cant think of a better way to do that
96 | self.out_nc = self.in_nc
97 | scale = math.sqrt(self.pixelshuffle_shape / self.out_nc)
98 | if scale - int(scale) > 0:
99 | print(
100 | "out_nc is probably different than in_nc, scale calculation might be wrong"
101 | )
102 | scale = int(scale)
103 | return scale
104 |
105 | def forward(self, x):
106 | out = x
107 | for i in range(0, len(self.body)):
108 | out = self.body[i](out)
109 |
110 | out = self.upsampler(out)
111 | # add the nearest upsampled image, so that the network learns the residual
112 | base = F.interpolate(x, scale_factor=self.scale, mode="nearest")
113 | out += base
114 | return out
115 |
--------------------------------------------------------------------------------
/custom_nodes/example_node.py.example:
--------------------------------------------------------------------------------
1 | class Example:
2 | """
3 | A example node
4 |
5 | Class methods
6 | -------------
7 | INPUT_TYPES (dict):
8 | Tell the main program input parameters of nodes.
9 |
10 | Attributes
11 | ----------
12 | RETURN_TYPES (`tuple`):
13 | The type of each element in the output tulple.
14 | RETURN_NAMES (`tuple`):
15 | Optional: The name of each output in the output tulple.
16 | FUNCTION (`str`):
17 | The name of the entry-point method. For example, if `FUNCTION = "execute"` then it will run Example().execute()
18 | OUTPUT_NODE ([`bool`]):
19 | If this node is an output node that outputs a result/image from the graph. The SaveImage node is an example.
20 | The backend iterates on these output nodes and tries to execute all their parents if their parent graph is properly connected.
21 | Assumed to be False if not present.
22 | CATEGORY (`str`):
23 | The category the node should appear in the UI.
24 | execute(s) -> tuple || None:
25 | The entry point method. The name of this method must be the same as the value of property `FUNCTION`.
26 | For example, if `FUNCTION = "execute"` then this method's name must be `execute`, if `FUNCTION = "foo"` then it must be `foo`.
27 | """
28 | def __init__(self):
29 | pass
30 |
31 | @classmethod
32 | def INPUT_TYPES(s):
33 | """
34 | Return a dictionary which contains config for all input fields.
35 | Some types (string): "MODEL", "VAE", "CLIP", "CONDITIONING", "LATENT", "IMAGE", "INT", "STRING", "FLOAT".
36 | Input types "INT", "STRING" or "FLOAT" are special values for fields on the node.
37 | The type can be a list for selection.
38 |
39 | Returns: `dict`:
40 | - Key input_fields_group (`string`): Can be either required, hidden or optional. A node class must have property `required`
41 | - Value input_fields (`dict`): Contains input fields config:
42 | * Key field_name (`string`): Name of a entry-point method's argument
43 | * Value field_config (`tuple`):
44 | + First value is a string indicate the type of field or a list for selection.
45 | + Secound value is a config for type "INT", "STRING" or "FLOAT".
46 | """
47 | return {
48 | "required": {
49 | "image": ("IMAGE",),
50 | "int_field": ("INT", {
51 | "default": 0,
52 | "min": 0, #Minimum value
53 | "max": 4096, #Maximum value
54 | "step": 64, #Slider's step
55 | "display": "number" # Cosmetic only: display as "number" or "slider"
56 | }),
57 | "float_field": ("FLOAT", {
58 | "default": 1.0,
59 | "min": 0.0,
60 | "max": 10.0,
61 | "step": 0.01,
62 | "round": 0.001, #The value represeting the precision to round to, will be set to the step value by default. Can be set to False to disable rounding.
63 | "display": "number"}),
64 | "print_to_screen": (["enable", "disable"],),
65 | "string_field": ("STRING", {
66 | "multiline": False, #True if you want the field to look like the one on the ClipTextEncode node
67 | "default": "Hello World!"
68 | }),
69 | },
70 | }
71 |
72 | RETURN_TYPES = ("IMAGE",)
73 | #RETURN_NAMES = ("image_output_name",)
74 |
75 | FUNCTION = "test"
76 |
77 | #OUTPUT_NODE = False
78 |
79 | CATEGORY = "Example"
80 |
81 | def test(self, image, string_field, int_field, float_field, print_to_screen):
82 | if print_to_screen == "enable":
83 | print(f"""Your input contains:
84 | string_field aka input text: {string_field}
85 | int_field: {int_field}
86 | float_field: {float_field}
87 | """)
88 | #do some processing on the image, in this example I just invert it
89 | image = 1.0 - image
90 | return (image,)
91 |
92 |
93 | # A dictionary that contains all nodes you want to export with their names
94 | # NOTE: names should be globally unique
95 | NODE_CLASS_MAPPINGS = {
96 | "Example": Example
97 | }
98 |
99 | # A dictionary that contains the friendly/humanly readable titles for the nodes
100 | NODE_DISPLAY_NAME_MAPPINGS = {
101 | "Example": "Example Node"
102 | }
103 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_rebatch.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | class LatentRebatch:
4 | @classmethod
5 | def INPUT_TYPES(s):
6 | return {"required": { "latents": ("LATENT",),
7 | "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
8 | }}
9 | RETURN_TYPES = ("LATENT",)
10 | INPUT_IS_LIST = True
11 | OUTPUT_IS_LIST = (True, )
12 |
13 | FUNCTION = "rebatch"
14 |
15 | CATEGORY = "latent/batch"
16 |
17 | @staticmethod
18 | def get_batch(latents, list_ind, offset):
19 | '''prepare a batch out of the list of latents'''
20 | samples = latents[list_ind]['samples']
21 | shape = samples.shape
22 | mask = latents[list_ind]['noise_mask'] if 'noise_mask' in latents[list_ind] else torch.ones((shape[0], 1, shape[2]*8, shape[3]*8), device='cpu')
23 | if mask.shape[-1] != shape[-1] * 8 or mask.shape[-2] != shape[-2]:
24 | torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[-2]*8, shape[-1]*8), mode="bilinear")
25 | if mask.shape[0] < samples.shape[0]:
26 | mask = mask.repeat((shape[0] - 1) // mask.shape[0] + 1, 1, 1, 1)[:shape[0]]
27 | if 'batch_index' in latents[list_ind]:
28 | batch_inds = latents[list_ind]['batch_index']
29 | else:
30 | batch_inds = [x+offset for x in range(shape[0])]
31 | return samples, mask, batch_inds
32 |
33 | @staticmethod
34 | def get_slices(indexable, num, batch_size):
35 | '''divides an indexable object into num slices of length batch_size, and a remainder'''
36 | slices = []
37 | for i in range(num):
38 | slices.append(indexable[i*batch_size:(i+1)*batch_size])
39 | if num * batch_size < len(indexable):
40 | return slices, indexable[num * batch_size:]
41 | else:
42 | return slices, None
43 |
44 | @staticmethod
45 | def slice_batch(batch, num, batch_size):
46 | result = [LatentRebatch.get_slices(x, num, batch_size) for x in batch]
47 | return list(zip(*result))
48 |
49 | @staticmethod
50 | def cat_batch(batch1, batch2):
51 | if batch1[0] is None:
52 | return batch2
53 | result = [torch.cat((b1, b2)) if torch.is_tensor(b1) else b1 + b2 for b1, b2 in zip(batch1, batch2)]
54 | return result
55 |
56 | def rebatch(self, latents, batch_size):
57 | batch_size = batch_size[0]
58 |
59 | output_list = []
60 | current_batch = (None, None, None)
61 | processed = 0
62 |
63 | for i in range(len(latents)):
64 | # fetch new entry of list
65 | #samples, masks, indices = self.get_batch(latents, i)
66 | next_batch = self.get_batch(latents, i, processed)
67 | processed += len(next_batch[2])
68 | # set to current if current is None
69 | if current_batch[0] is None:
70 | current_batch = next_batch
71 | # add previous to list if dimensions do not match
72 | elif next_batch[0].shape[-1] != current_batch[0].shape[-1] or next_batch[0].shape[-2] != current_batch[0].shape[-2]:
73 | sliced, _ = self.slice_batch(current_batch, 1, batch_size)
74 | output_list.append({'samples': sliced[0][0], 'noise_mask': sliced[1][0], 'batch_index': sliced[2][0]})
75 | current_batch = next_batch
76 | # cat if everything checks out
77 | else:
78 | current_batch = self.cat_batch(current_batch, next_batch)
79 |
80 | # add to list if dimensions gone above target batch size
81 | if current_batch[0].shape[0] > batch_size:
82 | num = current_batch[0].shape[0] // batch_size
83 | sliced, remainder = self.slice_batch(current_batch, num, batch_size)
84 |
85 | for i in range(num):
86 | output_list.append({'samples': sliced[0][i], 'noise_mask': sliced[1][i], 'batch_index': sliced[2][i]})
87 |
88 | current_batch = remainder
89 |
90 | #add remainder
91 | if current_batch[0] is not None:
92 | sliced, _ = self.slice_batch(current_batch, 1, batch_size)
93 | output_list.append({'samples': sliced[0][0], 'noise_mask': sliced[1][0], 'batch_index': sliced[2][0]})
94 |
95 | #get rid of empty masks
96 | for s in output_list:
97 | if s['noise_mask'].mean() == 1.0:
98 | del s['noise_mask']
99 |
100 | return (output_list,)
101 |
102 | NODE_CLASS_MAPPINGS = {
103 | "RebatchLatents": LatentRebatch,
104 | }
105 |
106 | NODE_DISPLAY_NAME_MAPPINGS = {
107 | "RebatchLatents": "Rebatch Latents",
108 | }
--------------------------------------------------------------------------------
/web/extensions/core/clipspace.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 | import { ComfyDialog, $el } from "../../scripts/ui.js";
3 | import { ComfyApp } from "../../scripts/app.js";
4 |
5 | export class ClipspaceDialog extends ComfyDialog {
6 | static items = [];
7 | static instance = null;
8 |
9 | static registerButton(name, contextPredicate, callback) {
10 | const item =
11 | $el("button", {
12 | type: "button",
13 | textContent: name,
14 | contextPredicate: contextPredicate,
15 | onclick: callback
16 | })
17 |
18 | ClipspaceDialog.items.push(item);
19 | }
20 |
21 | static invalidatePreview() {
22 | if(ComfyApp.clipspace && ComfyApp.clipspace.imgs && ComfyApp.clipspace.imgs.length > 0) {
23 | const img_preview = document.getElementById("clipspace_preview");
24 | if(img_preview) {
25 | img_preview.src = ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src;
26 | img_preview.style.maxHeight = "100%";
27 | img_preview.style.maxWidth = "100%";
28 | }
29 | }
30 | }
31 |
32 | static invalidate() {
33 | if(ClipspaceDialog.instance) {
34 | const self = ClipspaceDialog.instance;
35 | // allow reconstruct controls when copying from non-image to image content.
36 | const children = $el("div.comfy-modal-content", [ self.createImgSettings(), ...self.createButtons() ]);
37 |
38 | if(self.element) {
39 | // update
40 | self.element.removeChild(self.element.firstChild);
41 | self.element.appendChild(children);
42 | }
43 | else {
44 | // new
45 | self.element = $el("div.comfy-modal", { parent: document.body }, [children,]);
46 | }
47 |
48 | if(self.element.children[0].children.length <= 1) {
49 | self.element.children[0].appendChild($el("p", {}, ["Unable to find the features to edit content of a format stored in the current Clipspace."]));
50 | }
51 |
52 | ClipspaceDialog.invalidatePreview();
53 | }
54 | }
55 |
56 | constructor() {
57 | super();
58 | }
59 |
60 | createButtons(self) {
61 | const buttons = [];
62 |
63 | for(let idx in ClipspaceDialog.items) {
64 | const item = ClipspaceDialog.items[idx];
65 | if(!item.contextPredicate || item.contextPredicate())
66 | buttons.push(ClipspaceDialog.items[idx]);
67 | }
68 |
69 | buttons.push(
70 | $el("button", {
71 | type: "button",
72 | textContent: "Close",
73 | onclick: () => { this.close(); }
74 | })
75 | );
76 |
77 | return buttons;
78 | }
79 |
80 | createImgSettings() {
81 | if(ComfyApp.clipspace.imgs) {
82 | const combo_items = [];
83 | const imgs = ComfyApp.clipspace.imgs;
84 |
85 | for(let i=0; i < imgs.length; i++) {
86 | combo_items.push($el("option", {value:i}, [`${i}`]));
87 | }
88 |
89 | const combo1 = $el("select",
90 | {id:"clipspace_img_selector", onchange:(event) => {
91 | ComfyApp.clipspace['selectedIndex'] = event.target.selectedIndex;
92 | ClipspaceDialog.invalidatePreview();
93 | } }, combo_items);
94 |
95 | const row1 =
96 | $el("tr", {},
97 | [
98 | $el("td", {}, [$el("font", {color:"white"}, ["Select Image"])]),
99 | $el("td", {}, [combo1])
100 | ]);
101 |
102 |
103 | const combo2 = $el("select",
104 | {id:"clipspace_img_paste_mode", onchange:(event) => {
105 | ComfyApp.clipspace['img_paste_mode'] = event.target.value;
106 | } },
107 | [
108 | $el("option", {value:'selected'}, 'selected'),
109 | $el("option", {value:'all'}, 'all')
110 | ]);
111 | combo2.value = ComfyApp.clipspace['img_paste_mode'];
112 |
113 | const row2 =
114 | $el("tr", {},
115 | [
116 | $el("td", {}, [$el("font", {color:"white"}, ["Paste Mode"])]),
117 | $el("td", {}, [combo2])
118 | ]);
119 |
120 | const td = $el("td", {align:'center', width:'100px', height:'100px', colSpan:'2'},
121 | [ $el("img",{id:"clipspace_preview", ondragstart:() => false},[]) ]);
122 |
123 | const row3 =
124 | $el("tr", {}, [td]);
125 |
126 | return $el("table", {}, [row1, row2, row3]);
127 | }
128 | else {
129 | return [];
130 | }
131 | }
132 |
133 | createImgPreview() {
134 | if(ComfyApp.clipspace.imgs) {
135 | return $el("img",{id:"clipspace_preview", ondragstart:() => false});
136 | }
137 | else
138 | return [];
139 | }
140 |
141 | show() {
142 | const img_preview = document.getElementById("clipspace_preview");
143 | ClipspaceDialog.invalidate();
144 |
145 | this.element.style.display = "block";
146 | }
147 | }
148 |
149 | app.registerExtension({
150 | name: "Comfy.Clipspace",
151 | init(app) {
152 | app.openClipspace =
153 | function () {
154 | if(!ClipspaceDialog.instance) {
155 | ClipspaceDialog.instance = new ClipspaceDialog(app);
156 | ComfyApp.clipspace_invalidate_handler = ClipspaceDialog.invalidate;
157 | }
158 |
159 | if(ComfyApp.clipspace) {
160 | ClipspaceDialog.instance.show();
161 | }
162 | else
163 | app.ui.dialog.show("Clipspace is Empty!");
164 | };
165 | }
166 | });
--------------------------------------------------------------------------------