├── app
├── __init__.py
└── app_settings.py
├── tests
├── __init__.py
├── inference
│ ├── __init__.py
│ └── graphs
│ │ └── default_graph_sdxl1_0.json
├── README.md
├── conftest.py
└── compare
│ └── conftest.py
├── models
├── vae
│ └── put_vae_here
├── loras
│ └── put_loras_here
├── unet
│ └── put_unet_files_here
├── gligen
│ └── put_gligen_models_here
├── checkpoints
│ └── put_checkpoints_here
├── diffusers
│ └── put_diffusers_models_here
├── hypernetworks
│ └── put_hypernetworks_here
├── clip
│ └── put_clip_or_text_encoder_models_here
├── clip_vision
│ └── put_clip_vision_models_here
├── controlnet
│ └── put_controlnets_and_t2i_here
├── photomaker
│ └── put_photomaker_models_here
├── style_models
│ └── put_t2i_style_model_here
├── upscale_models
│ └── put_esrgan_and_other_upscale_models_here
├── embeddings
│ └── put_embeddings_or_textual_inversion_concepts_here
├── vae_approx
│ └── put_taesd_encoder_pth_and_taesd_decoder_pth_here
└── configs
│ ├── v2-inference.yaml
│ ├── v2-inference_fp32.yaml
│ ├── v2-inference-v.yaml
│ ├── v2-inference-v_fp32.yaml
│ ├── v1-inference.yaml
│ ├── v1-inference_fp16.yaml
│ ├── anything_v3.yaml
│ ├── v1-inference_clip_skip_2.yaml
│ ├── v1-inference_clip_skip_2_fp16.yaml
│ └── v1-inpainting-inference.yaml
├── tests-ui
├── .gitignore
├── babel.config.json
├── utils
│ ├── nopProxy.js
│ ├── litegraph.js
│ └── setup.js
├── jest.config.js
├── afterSetup.js
├── globalSetup.js
├── package.json
└── setup.js
├── tests-unit
├── app_test
│ ├── __init__.py
│ └── frontend_manager_test.py
├── requirements.txt
└── README.md
├── CODEOWNERS
├── comfy
├── ldm
│ ├── modules
│ │ ├── encoders
│ │ │ ├── __init__.py
│ │ │ └── noise_aug_modules.py
│ │ ├── distributions
│ │ │ ├── __init__.py
│ │ │ └── distributions.py
│ │ ├── diffusionmodules
│ │ │ └── __init__.py
│ │ └── ema.py
│ ├── common_dit.py
│ ├── flux
│ │ └── math.py
│ └── hydit
│ │ └── poolers.py
├── options.py
├── text_encoders
│ ├── t5_pile_tokenizer
│ │ └── tokenizer.model
│ ├── hydit_clip_tokenizer
│ │ ├── special_tokens_map.json
│ │ └── tokenizer_config.json
│ ├── t5_config_base.json
│ ├── t5_config_xxl.json
│ ├── mt5_config_xl.json
│ ├── t5_pile_config_xl.json
│ ├── sd2_clip_config.json
│ ├── hydit_clip.json
│ ├── spiece_tokenizer.py
│ ├── sd2_clip.py
│ ├── sa_t5.py
│ ├── aura_t5.py
│ ├── t5_tokenizer
│ │ └── special_tokens_map.json
│ └── flux.py
├── cldm
│ ├── control_types.py
│ └── mmdit.py
├── checkpoint_pickle.py
├── clip_vision_config_g.json
├── clip_vision_config_h.json
├── clip_vision_config_vitl.json
├── clip_vision_config_vitl_336.json
├── sd1_tokenizer
│ ├── special_tokens_map.json
│ └── tokenizer_config.json
├── clip_config_bigg.json
├── sd1_clip_config.json
├── types.py
├── diffusers_load.py
├── conds.py
├── sampler_helpers.py
├── sample.py
└── taesd
│ └── taesd.py
├── output
└── _output_images_will_be_put_here
├── web
├── user.css
├── fonts
│ └── materialdesignicons-webfont.woff2
├── jsconfig.json
├── scripts
│ └── ui
│ │ ├── spinner.js
│ │ ├── menu
│ │ ├── interruptButton.js
│ │ ├── viewHistory.js
│ │ ├── viewQueue.js
│ │ ├── queueOptions.js
│ │ └── queueButton.js
│ │ ├── spinner.css
│ │ ├── dialog.js
│ │ ├── components
│ │ ├── buttonGroup.js
│ │ ├── splitButton.js
│ │ └── asyncDialog.js
│ │ ├── utils.js
│ │ ├── toggleSwitch.js
│ │ ├── imagePreview.js
│ │ └── userSelection.css
├── extensions
│ ├── core
│ │ ├── uploadImage.js
│ │ ├── linkRenderMode.js
│ │ ├── invertMenuScrolling.js
│ │ ├── noteNode.js
│ │ ├── saveImageExtraOutput.js
│ │ ├── dynamicPrompts.js
│ │ ├── keybinds.js
│ │ ├── simpleTouchSupport.js
│ │ └── slotDefaults.js
│ └── logging.js.example
├── lib
│ └── litegraph.extensions.js
├── index.html
└── types
│ └── comfy.d.ts
├── .pylintrc
├── input
└── example.png
├── comfyui_screenshot.png
├── .ci
├── windows_base_files
│ ├── run_cpu.bat
│ ├── run_nvidia_gpu.bat
│ └── README_VERY_IMPORTANT.txt
└── update_windows
│ └── update_comfyui.bat
├── pytest.ini
├── comfy_extras
├── chainner_models
│ └── model_loading.py
├── nodes_hunyuan.py
├── nodes_cond.py
├── nodes_controlnet.py
├── nodes_webcam.py
├── nodes_canny.py
├── nodes_differential_diffusion.py
├── nodes_ip2p.py
├── nodes_flux.py
├── nodes_sdupscale.py
├── nodes_pag.py
├── nodes_morphology.py
├── nodes_align_your_steps.py
├── nodes_model_downscale.py
├── nodes_clip_sdxl.py
├── nodes_upscale_model.py
└── nodes_hypertile.py
├── requirements.txt
├── .gitignore
├── .github
├── ISSUE_TEMPLATE
│ ├── config.yml
│ ├── user-support.yml
│ ├── feature-request.yml
│ └── bug-report.yml
└── workflows
│ ├── pylint.yml
│ ├── test-build.yml
│ ├── test-ui.yaml
│ ├── pullrequest-ci-run.yml
│ ├── windows_release_dependencies.yml
│ ├── test-browser.yml
│ └── test-ci.yml
├── fix_torch.py
├── node_helpers.py
├── new_updater.py
├── extra_model_paths.yaml.example
├── custom_nodes
└── websocket_image_save.py
├── CONTRIBUTING.md
└── script_examples
└── basic_api_example.py
/app/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/vae/put_vae_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/loras/put_loras_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/inference/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/unet/put_unet_files_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests-ui/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
--------------------------------------------------------------------------------
/tests-unit/app_test/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @comfyanonymous
2 |
--------------------------------------------------------------------------------
/comfy/ldm/modules/encoders/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/gligen/put_gligen_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/output/_output_images_will_be_put_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/comfy/ldm/modules/distributions/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/checkpoints/put_checkpoints_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/diffusers/put_diffusers_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/hypernetworks/put_hypernetworks_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/web/user.css:
--------------------------------------------------------------------------------
1 | /* Put custom styles here */
--------------------------------------------------------------------------------
/comfy/ldm/modules/diffusionmodules/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/clip/put_clip_or_text_encoder_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/clip_vision/put_clip_vision_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/controlnet/put_controlnets_and_t2i_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/photomaker/put_photomaker_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/style_models/put_t2i_style_model_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests-unit/requirements.txt:
--------------------------------------------------------------------------------
1 | pytest>=7.8.0
2 |
--------------------------------------------------------------------------------
/models/upscale_models/put_esrgan_and_other_upscale_models_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/embeddings/put_embeddings_or_textual_inversion_concepts_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/models/vae_approx/put_taesd_encoder_pth_and_taesd_decoder_pth_here:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.pylintrc:
--------------------------------------------------------------------------------
1 | [MESSAGES CONTROL]
2 | disable=all
3 | enable=eval-used
4 |
--------------------------------------------------------------------------------
/input/example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eust-w/ComfyUI/HEAD/input/example.png
--------------------------------------------------------------------------------
/comfyui_screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eust-w/ComfyUI/HEAD/comfyui_screenshot.png
--------------------------------------------------------------------------------
/.ci/windows_base_files/run_cpu.bat:
--------------------------------------------------------------------------------
1 | .\python_embeded\python.exe -s ComfyUI\main.py --cpu --windows-standalone-build
2 | pause
3 |
--------------------------------------------------------------------------------
/.ci/windows_base_files/run_nvidia_gpu.bat:
--------------------------------------------------------------------------------
1 | .\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build
2 | pause
3 |
--------------------------------------------------------------------------------
/tests-ui/babel.config.json:
--------------------------------------------------------------------------------
1 | {
2 | "presets": ["@babel/preset-env"],
3 | "plugins": ["babel-plugin-transform-import-meta"]
4 | }
5 |
--------------------------------------------------------------------------------
/web/fonts/materialdesignicons-webfont.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eust-w/ComfyUI/HEAD/web/fonts/materialdesignicons-webfont.woff2
--------------------------------------------------------------------------------
/comfy/options.py:
--------------------------------------------------------------------------------
1 |
2 | args_parsing = False
3 |
4 | def enable_args_parsing(enable=True):
5 | global args_parsing
6 | args_parsing = enable
7 |
--------------------------------------------------------------------------------
/comfy/text_encoders/t5_pile_tokenizer/tokenizer.model:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/eust-w/ComfyUI/HEAD/comfy/text_encoders/t5_pile_tokenizer/tokenizer.model
--------------------------------------------------------------------------------
/tests-ui/utils/nopProxy.js:
--------------------------------------------------------------------------------
1 | export const nop = new Proxy(function () {}, {
2 | get: () => nop,
3 | set: () => true,
4 | apply: () => nop,
5 | construct: () => nop,
6 | });
7 |
--------------------------------------------------------------------------------
/tests-unit/README.md:
--------------------------------------------------------------------------------
1 | # Pytest Unit Tests
2 |
3 | ## Install test dependencies
4 |
5 | `pip install -r tests-units/requirements.txt`
6 |
7 | ## Run tests
8 | `pytest tests-units/`
9 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | markers =
3 | inference: mark as inference test (deselect with '-m "not inference"')
4 | testpaths =
5 | tests
6 | tests-unit
7 | addopts = -s
8 | pythonpath = .
9 |
--------------------------------------------------------------------------------
/comfy/text_encoders/hydit_clip_tokenizer/special_tokens_map.json:
--------------------------------------------------------------------------------
1 | {
2 | "cls_token": "[CLS]",
3 | "mask_token": "[MASK]",
4 | "pad_token": "[PAD]",
5 | "sep_token": "[SEP]",
6 | "unk_token": "[UNK]"
7 | }
8 |
--------------------------------------------------------------------------------
/web/jsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "baseUrl": ".",
4 | "paths": {
5 | "/*": ["./*"]
6 | },
7 | "lib": ["DOM", "ES2022", "DOM.Iterable"],
8 | "target": "ES2015",
9 | "module": "es2020"
10 | },
11 | "include": ["."]
12 | }
13 |
--------------------------------------------------------------------------------
/comfy/cldm/control_types.py:
--------------------------------------------------------------------------------
1 | UNION_CONTROLNET_TYPES = {
2 | "openpose": 0,
3 | "depth": 1,
4 | "hed/pidi/scribble/ted": 2,
5 | "canny/lineart/anime_lineart/mlsd": 3,
6 | "normal": 4,
7 | "segment": 5,
8 | "tile": 6,
9 | "repaint": 7,
10 | }
11 |
--------------------------------------------------------------------------------
/comfy_extras/chainner_models/model_loading.py:
--------------------------------------------------------------------------------
1 | from spandrel import ModelLoader
2 |
3 | def load_state_dict(state_dict):
4 | print("WARNING: comfy_extras.chainner_models is deprecated and has been replaced by the spandrel library.")
5 | return ModelLoader().load_from_state_dict(state_dict).eval()
6 |
--------------------------------------------------------------------------------
/tests-ui/jest.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('jest').Config} */
2 | const config = {
3 | testEnvironment: "jsdom",
4 | setupFiles: ["./globalSetup.js"],
5 | setupFilesAfterEnv: ["./afterSetup.js"],
6 | clearMocks: true,
7 | resetModules: true,
8 | testTimeout: 10000
9 | };
10 |
11 | module.exports = config;
12 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | torchsde
3 | torchvision
4 | torchaudio
5 | einops
6 | transformers>=4.28.1
7 | tokenizers>=0.13.3
8 | sentencepiece
9 | safetensors>=0.4.2
10 | aiohttp
11 | pyyaml
12 | Pillow
13 | scipy
14 | tqdm
15 | psutil
16 |
17 | #non essential dependencies:
18 | kornia>=0.7.1
19 | spandrel
20 | soundfile
21 |
--------------------------------------------------------------------------------
/tests-ui/afterSetup.js:
--------------------------------------------------------------------------------
1 | const { start } = require("./utils");
2 | const lg = require("./utils/litegraph");
3 |
4 | // Load things once per test file before to ensure its all warmed up for the tests
5 | beforeAll(async () => {
6 | lg.setup(global);
7 | await start({ resetEnv: true });
8 | lg.teardown(global);
9 | });
10 |
--------------------------------------------------------------------------------
/.ci/update_windows/update_comfyui.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | ..\python_embeded\python.exe .\update.py ..\ComfyUI\
3 | if exist update_new.py (
4 | move /y update_new.py update.py
5 | echo Running updater again since it got updated.
6 | ..\python_embeded\python.exe .\update.py ..\ComfyUI\ --skip_self_update
7 | )
8 | if "%~1"=="" pause
9 |
--------------------------------------------------------------------------------
/web/scripts/ui/spinner.js:
--------------------------------------------------------------------------------
1 | import { addStylesheet } from "../utils.js";
2 |
3 | addStylesheet(import.meta.url);
4 |
5 | export function createSpinner() {
6 | const div = document.createElement("div");
7 | div.innerHTML = `
`;
8 | return div.firstElementChild;
9 | }
10 |
--------------------------------------------------------------------------------
/comfy/checkpoint_pickle.py:
--------------------------------------------------------------------------------
1 | import pickle
2 |
3 | load = pickle.load
4 |
5 | class Empty:
6 | pass
7 |
8 | class Unpickler(pickle.Unpickler):
9 | def find_class(self, module, name):
10 | #TODO: safe unpickle
11 | if module.startswith("pytorch_lightning"):
12 | return Empty
13 | return super().find_class(module, name)
14 |
--------------------------------------------------------------------------------
/web/extensions/core/uploadImage.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 |
3 | // Adds an upload button to the nodes
4 |
5 | app.registerExtension({
6 | name: "Comfy.UploadImage",
7 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
8 | if (nodeData?.input?.required?.image?.[1]?.image_upload === true) {
9 | nodeData.input.required.upload = ["IMAGEUPLOAD"];
10 | }
11 | },
12 | });
13 |
--------------------------------------------------------------------------------
/tests-ui/globalSetup.js:
--------------------------------------------------------------------------------
1 | module.exports = async function () {
2 | global.ResizeObserver = class ResizeObserver {
3 | observe() {}
4 | unobserve() {}
5 | disconnect() {}
6 | };
7 |
8 | const { nop } = require("./utils/nopProxy");
9 | global.enableWebGLCanvas = nop;
10 |
11 | HTMLCanvasElement.prototype.getContext = nop;
12 |
13 | localStorage["Comfy.Settings.Comfy.Logging.Enabled"] = "false";
14 | };
15 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.py[cod]
3 | /output/
4 | /input/
5 | !/input/example.png
6 | /models/
7 | /temp/
8 | /custom_nodes/
9 | !custom_nodes/example_node.py.example
10 | extra_model_paths.yaml
11 | /.vs
12 | .vscode/
13 | .idea/
14 | venv/
15 | /web/extensions/*
16 | !/web/extensions/logging.js.example
17 | !/web/extensions/core/
18 | /tests-ui/data/object_info.json
19 | /user/
20 | *.log
21 | web_custom_versions/
22 | .DS_Store
23 |
--------------------------------------------------------------------------------
/comfy/ldm/common_dit.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | def pad_to_patch_size(img, patch_size=(2, 2), padding_mode="circular"):
4 | if padding_mode == "circular" and torch.jit.is_tracing() or torch.jit.is_scripting():
5 | padding_mode = "reflect"
6 | pad_h = (patch_size[0] - img.shape[-2] % patch_size[0]) % patch_size[0]
7 | pad_w = (patch_size[1] - img.shape[-1] % patch_size[1]) % patch_size[1]
8 | return torch.nn.functional.pad(img, (0, pad_w, 0, pad_h), mode=padding_mode)
9 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: true
2 | contact_links:
3 | - name: ComfyUI Matrix Space
4 | url: https://app.element.io/#/room/%23comfyui_space%3Amatrix.org
5 | about: The ComfyUI Matrix Space is available for support and general discussion related to ComfyUI (Matrix is like Discord but open source).
6 | - name: Comfy Org Discord
7 | url: https://discord.gg/comfyorg
8 | about: The Comfy Org Discord is available for support and general discussion related to ComfyUI.
9 |
--------------------------------------------------------------------------------
/comfy/clip_vision_config_g.json:
--------------------------------------------------------------------------------
1 | {
2 | "attention_dropout": 0.0,
3 | "dropout": 0.0,
4 | "hidden_act": "gelu",
5 | "hidden_size": 1664,
6 | "image_size": 224,
7 | "initializer_factor": 1.0,
8 | "initializer_range": 0.02,
9 | "intermediate_size": 8192,
10 | "layer_norm_eps": 1e-05,
11 | "model_type": "clip_vision_model",
12 | "num_attention_heads": 16,
13 | "num_channels": 3,
14 | "num_hidden_layers": 48,
15 | "patch_size": 14,
16 | "projection_dim": 1280,
17 | "torch_dtype": "float32"
18 | }
19 |
--------------------------------------------------------------------------------
/comfy/clip_vision_config_h.json:
--------------------------------------------------------------------------------
1 | {
2 | "attention_dropout": 0.0,
3 | "dropout": 0.0,
4 | "hidden_act": "gelu",
5 | "hidden_size": 1280,
6 | "image_size": 224,
7 | "initializer_factor": 1.0,
8 | "initializer_range": 0.02,
9 | "intermediate_size": 5120,
10 | "layer_norm_eps": 1e-05,
11 | "model_type": "clip_vision_model",
12 | "num_attention_heads": 16,
13 | "num_channels": 3,
14 | "num_hidden_layers": 32,
15 | "patch_size": 14,
16 | "projection_dim": 1024,
17 | "torch_dtype": "float32"
18 | }
19 |
--------------------------------------------------------------------------------
/comfy/clip_vision_config_vitl.json:
--------------------------------------------------------------------------------
1 | {
2 | "attention_dropout": 0.0,
3 | "dropout": 0.0,
4 | "hidden_act": "quick_gelu",
5 | "hidden_size": 1024,
6 | "image_size": 224,
7 | "initializer_factor": 1.0,
8 | "initializer_range": 0.02,
9 | "intermediate_size": 4096,
10 | "layer_norm_eps": 1e-05,
11 | "model_type": "clip_vision_model",
12 | "num_attention_heads": 16,
13 | "num_channels": 3,
14 | "num_hidden_layers": 24,
15 | "patch_size": 14,
16 | "projection_dim": 768,
17 | "torch_dtype": "float32"
18 | }
19 |
--------------------------------------------------------------------------------
/comfy/clip_vision_config_vitl_336.json:
--------------------------------------------------------------------------------
1 | {
2 | "attention_dropout": 0.0,
3 | "dropout": 0.0,
4 | "hidden_act": "quick_gelu",
5 | "hidden_size": 1024,
6 | "image_size": 336,
7 | "initializer_factor": 1.0,
8 | "initializer_range": 0.02,
9 | "intermediate_size": 4096,
10 | "layer_norm_eps": 1e-5,
11 | "model_type": "clip_vision_model",
12 | "num_attention_heads": 16,
13 | "num_channels": 3,
14 | "num_hidden_layers": 24,
15 | "patch_size": 14,
16 | "projection_dim": 768,
17 | "torch_dtype": "float32"
18 | }
19 |
--------------------------------------------------------------------------------
/.github/workflows/pylint.yml:
--------------------------------------------------------------------------------
1 | name: Python Linting
2 |
3 | on: [push, pull_request]
4 |
5 | jobs:
6 | pylint:
7 | name: Run Pylint
8 | runs-on: ubuntu-latest
9 |
10 | steps:
11 | - name: Checkout repository
12 | uses: actions/checkout@v4
13 |
14 | - name: Set up Python
15 | uses: actions/setup-python@v2
16 | with:
17 | python-version: 3.x
18 |
19 | - name: Install Pylint
20 | run: pip install pylint
21 |
22 | - name: Run Pylint
23 | run: pylint --rcfile=.pylintrc $(find . -type f -name "*.py")
24 |
--------------------------------------------------------------------------------
/comfy/sd1_tokenizer/special_tokens_map.json:
--------------------------------------------------------------------------------
1 | {
2 | "bos_token": {
3 | "content": "<|startoftext|>",
4 | "lstrip": false,
5 | "normalized": true,
6 | "rstrip": false,
7 | "single_word": false
8 | },
9 | "eos_token": {
10 | "content": "<|endoftext|>",
11 | "lstrip": false,
12 | "normalized": true,
13 | "rstrip": false,
14 | "single_word": false
15 | },
16 | "pad_token": "<|endoftext|>",
17 | "unk_token": {
18 | "content": "<|endoftext|>",
19 | "lstrip": false,
20 | "normalized": true,
21 | "rstrip": false,
22 | "single_word": false
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/comfy/text_encoders/t5_config_base.json:
--------------------------------------------------------------------------------
1 | {
2 | "d_ff": 3072,
3 | "d_kv": 64,
4 | "d_model": 768,
5 | "decoder_start_token_id": 0,
6 | "dropout_rate": 0.1,
7 | "eos_token_id": 1,
8 | "dense_act_fn": "relu",
9 | "initializer_factor": 1.0,
10 | "is_encoder_decoder": true,
11 | "is_gated_act": false,
12 | "layer_norm_epsilon": 1e-06,
13 | "model_type": "t5",
14 | "num_decoder_layers": 12,
15 | "num_heads": 12,
16 | "num_layers": 12,
17 | "output_past": true,
18 | "pad_token_id": 0,
19 | "relative_attention_num_buckets": 32,
20 | "tie_word_embeddings": false,
21 | "vocab_size": 32128
22 | }
23 |
--------------------------------------------------------------------------------
/comfy/text_encoders/t5_config_xxl.json:
--------------------------------------------------------------------------------
1 | {
2 | "d_ff": 10240,
3 | "d_kv": 64,
4 | "d_model": 4096,
5 | "decoder_start_token_id": 0,
6 | "dropout_rate": 0.1,
7 | "eos_token_id": 1,
8 | "dense_act_fn": "gelu_pytorch_tanh",
9 | "initializer_factor": 1.0,
10 | "is_encoder_decoder": true,
11 | "is_gated_act": true,
12 | "layer_norm_epsilon": 1e-06,
13 | "model_type": "t5",
14 | "num_decoder_layers": 24,
15 | "num_heads": 64,
16 | "num_layers": 24,
17 | "output_past": true,
18 | "pad_token_id": 0,
19 | "relative_attention_num_buckets": 32,
20 | "tie_word_embeddings": false,
21 | "vocab_size": 32128
22 | }
23 |
--------------------------------------------------------------------------------
/comfy/text_encoders/mt5_config_xl.json:
--------------------------------------------------------------------------------
1 | {
2 | "d_ff": 5120,
3 | "d_kv": 64,
4 | "d_model": 2048,
5 | "decoder_start_token_id": 0,
6 | "dropout_rate": 0.1,
7 | "eos_token_id": 1,
8 | "dense_act_fn": "gelu_pytorch_tanh",
9 | "initializer_factor": 1.0,
10 | "is_encoder_decoder": true,
11 | "is_gated_act": true,
12 | "layer_norm_epsilon": 1e-06,
13 | "model_type": "mt5",
14 | "num_decoder_layers": 24,
15 | "num_heads": 32,
16 | "num_layers": 24,
17 | "output_past": true,
18 | "pad_token_id": 0,
19 | "relative_attention_num_buckets": 32,
20 | "tie_word_embeddings": false,
21 | "vocab_size": 250112
22 | }
23 |
--------------------------------------------------------------------------------
/comfy/text_encoders/t5_pile_config_xl.json:
--------------------------------------------------------------------------------
1 | {
2 | "d_ff": 5120,
3 | "d_kv": 64,
4 | "d_model": 2048,
5 | "decoder_start_token_id": 0,
6 | "dropout_rate": 0.1,
7 | "eos_token_id": 2,
8 | "dense_act_fn": "gelu_pytorch_tanh",
9 | "initializer_factor": 1.0,
10 | "is_encoder_decoder": true,
11 | "is_gated_act": true,
12 | "layer_norm_epsilon": 1e-06,
13 | "model_type": "umt5",
14 | "num_decoder_layers": 24,
15 | "num_heads": 32,
16 | "num_layers": 24,
17 | "output_past": true,
18 | "pad_token_id": 1,
19 | "relative_attention_num_buckets": 32,
20 | "tie_word_embeddings": false,
21 | "vocab_size": 32128
22 | }
23 |
--------------------------------------------------------------------------------
/web/lib/litegraph.extensions.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Changes the background color of the canvas.
3 | *
4 | * @method updateBackground
5 | * @param {image} String
6 | * @param {clearBackgroundColor} String
7 | * @
8 | */
9 | LGraphCanvas.prototype.updateBackground = function (image, clearBackgroundColor) {
10 | this._bg_img = new Image();
11 | this._bg_img.name = image;
12 | this._bg_img.src = image;
13 | this._bg_img.onload = () => {
14 | this.draw(true, true);
15 | };
16 | this.background_image = image;
17 |
18 | this.clear_background = true;
19 | this.clear_background_color = clearBackgroundColor;
20 | this._pattern = null
21 | }
22 |
--------------------------------------------------------------------------------
/comfy/clip_config_bigg.json:
--------------------------------------------------------------------------------
1 | {
2 | "architectures": [
3 | "CLIPTextModel"
4 | ],
5 | "attention_dropout": 0.0,
6 | "bos_token_id": 0,
7 | "dropout": 0.0,
8 | "eos_token_id": 49407,
9 | "hidden_act": "gelu",
10 | "hidden_size": 1280,
11 | "initializer_factor": 1.0,
12 | "initializer_range": 0.02,
13 | "intermediate_size": 5120,
14 | "layer_norm_eps": 1e-05,
15 | "max_position_embeddings": 77,
16 | "model_type": "clip_text_model",
17 | "num_attention_heads": 20,
18 | "num_hidden_layers": 32,
19 | "pad_token_id": 1,
20 | "projection_dim": 1280,
21 | "torch_dtype": "float32",
22 | "vocab_size": 49408
23 | }
24 |
--------------------------------------------------------------------------------
/comfy/text_encoders/sd2_clip_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "architectures": [
3 | "CLIPTextModel"
4 | ],
5 | "attention_dropout": 0.0,
6 | "bos_token_id": 0,
7 | "dropout": 0.0,
8 | "eos_token_id": 49407,
9 | "hidden_act": "gelu",
10 | "hidden_size": 1024,
11 | "initializer_factor": 1.0,
12 | "initializer_range": 0.02,
13 | "intermediate_size": 4096,
14 | "layer_norm_eps": 1e-05,
15 | "max_position_embeddings": 77,
16 | "model_type": "clip_text_model",
17 | "num_attention_heads": 16,
18 | "num_hidden_layers": 24,
19 | "pad_token_id": 1,
20 | "projection_dim": 1024,
21 | "torch_dtype": "float32",
22 | "vocab_size": 49408
23 | }
24 |
--------------------------------------------------------------------------------
/web/scripts/ui/menu/interruptButton.js:
--------------------------------------------------------------------------------
1 | // @ts-check
2 |
3 | import { api } from "../../api.js";
4 | import { ComfyButton } from "../components/button.js";
5 |
6 | export function getInteruptButton(visibility) {
7 | const btn = new ComfyButton({
8 | icon: "close",
9 | tooltip: "Cancel current generation",
10 | enabled: false,
11 | action: () => {
12 | api.interrupt();
13 | },
14 | classList: ["comfyui-button", "comfyui-interrupt-button", visibility],
15 | });
16 |
17 | api.addEventListener("status", ({ detail }) => {
18 | const sz = detail?.exec_info?.queue_remaining;
19 | btn.enabled = sz > 0;
20 | });
21 |
22 | return btn;
23 | }
24 |
--------------------------------------------------------------------------------
/web/extensions/core/linkRenderMode.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 |
3 | const id = "Comfy.LinkRenderMode";
4 | const ext = {
5 | name: id,
6 | async setup(app) {
7 | app.ui.settings.addSetting({
8 | id,
9 | name: "Link Render Mode",
10 | defaultValue: 2,
11 | type: "combo",
12 | options: [...LiteGraph.LINK_RENDER_MODES, "Hidden"].map((m, i) => ({
13 | value: i,
14 | text: m,
15 | selected: i == app.canvas.links_render_mode,
16 | })),
17 | onChange(value) {
18 | app.canvas.links_render_mode = +value;
19 | app.graph.setDirtyCanvas(true);
20 | },
21 | });
22 | },
23 | };
24 |
25 | app.registerExtension(ext);
26 |
--------------------------------------------------------------------------------
/comfy/text_encoders/hydit_clip_tokenizer/tokenizer_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "cls_token": "[CLS]",
3 | "do_basic_tokenize": true,
4 | "do_lower_case": true,
5 | "mask_token": "[MASK]",
6 | "name_or_path": "hfl/chinese-roberta-wwm-ext",
7 | "never_split": null,
8 | "pad_token": "[PAD]",
9 | "sep_token": "[SEP]",
10 | "special_tokens_map_file": "/home/chenweifeng/.cache/huggingface/hub/models--hfl--chinese-roberta-wwm-ext/snapshots/5c58d0b8ec1d9014354d691c538661bf00bfdb44/special_tokens_map.json",
11 | "strip_accents": null,
12 | "tokenize_chinese_chars": true,
13 | "tokenizer_class": "BertTokenizer",
14 | "unk_token": "[UNK]",
15 | "model_max_length": 77
16 | }
17 |
--------------------------------------------------------------------------------
/tests/README.md:
--------------------------------------------------------------------------------
1 | # Automated Testing
2 |
3 | ## Running tests locally
4 |
5 | Additional requirements for running tests:
6 | ```
7 | pip install pytest
8 | pip install websocket-client==1.6.1
9 | opencv-python==4.6.0.66
10 | scikit-image==0.21.0
11 | ```
12 | Run inference tests:
13 | ```
14 | pytest tests/inference
15 | ```
16 |
17 | ## Quality regression test
18 | Compares images in 2 directories to ensure they are the same
19 |
20 | 1) Run an inference test to save a directory of "ground truth" images
21 | ```
22 | pytest tests/inference --output_dir tests/inference/baseline
23 | ```
24 | 2) Make code edits
25 |
26 | 3) Run inference and quality comparison tests
27 | ```
28 | pytest
29 | ```
--------------------------------------------------------------------------------
/comfy/sd1_clip_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "_name_or_path": "openai/clip-vit-large-patch14",
3 | "architectures": [
4 | "CLIPTextModel"
5 | ],
6 | "attention_dropout": 0.0,
7 | "bos_token_id": 0,
8 | "dropout": 0.0,
9 | "eos_token_id": 49407,
10 | "hidden_act": "quick_gelu",
11 | "hidden_size": 768,
12 | "initializer_factor": 1.0,
13 | "initializer_range": 0.02,
14 | "intermediate_size": 3072,
15 | "layer_norm_eps": 1e-05,
16 | "max_position_embeddings": 77,
17 | "model_type": "clip_text_model",
18 | "num_attention_heads": 12,
19 | "num_hidden_layers": 12,
20 | "pad_token_id": 1,
21 | "projection_dim": 768,
22 | "torch_dtype": "float32",
23 | "transformers_version": "4.24.0",
24 | "vocab_size": 49408
25 | }
26 |
--------------------------------------------------------------------------------
/web/scripts/ui/menu/viewHistory.js:
--------------------------------------------------------------------------------
1 | // @ts-check
2 |
3 | import { ComfyButton } from "../components/button.js";
4 | import { ComfyViewList, ComfyViewListButton } from "./viewList.js";
5 |
6 | export class ComfyViewHistoryButton extends ComfyViewListButton {
7 | constructor(app) {
8 | super(app, {
9 | button: new ComfyButton({
10 | content: "View History",
11 | icon: "history",
12 | tooltip: "View history",
13 | classList: "comfyui-button comfyui-history-button",
14 | }),
15 | list: ComfyViewHistoryList,
16 | mode: "History",
17 | });
18 | }
19 | }
20 |
21 | export class ComfyViewHistoryList extends ComfyViewList {
22 | async loadItems() {
23 | const items = await super.loadItems();
24 | items["History"].reverse();
25 | return items;
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/web/scripts/ui/spinner.css:
--------------------------------------------------------------------------------
1 | .lds-ring {
2 | display: inline-block;
3 | position: relative;
4 | width: 1em;
5 | height: 1em;
6 | }
7 | .lds-ring div {
8 | box-sizing: border-box;
9 | display: block;
10 | position: absolute;
11 | width: 100%;
12 | height: 100%;
13 | border: 0.15em solid #fff;
14 | border-radius: 50%;
15 | animation: lds-ring 1.2s cubic-bezier(0.5, 0, 0.5, 1) infinite;
16 | border-color: #fff transparent transparent transparent;
17 | }
18 | .lds-ring div:nth-child(1) {
19 | animation-delay: -0.45s;
20 | }
21 | .lds-ring div:nth-child(2) {
22 | animation-delay: -0.3s;
23 | }
24 | .lds-ring div:nth-child(3) {
25 | animation-delay: -0.15s;
26 | }
27 | @keyframes lds-ring {
28 | 0% {
29 | transform: rotate(0deg);
30 | }
31 | 100% {
32 | transform: rotate(360deg);
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/fix_torch.py:
--------------------------------------------------------------------------------
1 | import importlib.util
2 | import shutil
3 | import os
4 | import ctypes
5 | import logging
6 |
7 |
8 | torch_spec = importlib.util.find_spec("torch")
9 | for folder in torch_spec.submodule_search_locations:
10 | lib_folder = os.path.join(folder, "lib")
11 | test_file = os.path.join(lib_folder, "fbgemm.dll")
12 | dest = os.path.join(lib_folder, "libomp140.x86_64.dll")
13 | if os.path.exists(dest):
14 | break
15 |
16 | with open(test_file, 'rb') as f:
17 | contents = f.read()
18 | if b"libomp140.x86_64.dll" not in contents:
19 | break
20 | try:
21 | mydll = ctypes.cdll.LoadLibrary(test_file)
22 | except FileNotFoundError as e:
23 | logging.warning("Detected pytorch version with libomp issue, patching.")
24 | shutil.copyfile(os.path.join(lib_folder, "libiomp5md.dll"), dest)
25 |
--------------------------------------------------------------------------------
/tests-ui/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "comfui-tests",
3 | "version": "1.0.0",
4 | "description": "UI tests",
5 | "main": "index.js",
6 | "scripts": {
7 | "test": "jest",
8 | "test:generate": "node setup.js"
9 | },
10 | "repository": {
11 | "type": "git",
12 | "url": "git+https://github.com/comfyanonymous/ComfyUI.git"
13 | },
14 | "keywords": [
15 | "comfyui",
16 | "test"
17 | ],
18 | "author": "comfyanonymous",
19 | "license": "GPL-3.0",
20 | "bugs": {
21 | "url": "https://github.com/comfyanonymous/ComfyUI/issues"
22 | },
23 | "homepage": "https://github.com/comfyanonymous/ComfyUI#readme",
24 | "devDependencies": {
25 | "@babel/preset-env": "^7.22.20",
26 | "@types/jest": "^29.5.5",
27 | "babel-plugin-transform-import-meta": "^2.2.1",
28 | "jest": "^29.7.0",
29 | "jest-environment-jsdom": "^29.7.0"
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/.github/workflows/test-build.yml:
--------------------------------------------------------------------------------
1 | name: Build package
2 |
3 | #
4 | # This workflow is a test of the python package build.
5 | # Install Python dependencies across different Python versions.
6 | #
7 |
8 | on:
9 | push:
10 | paths:
11 | - "requirements.txt"
12 | - ".github/workflows/test-build.yml"
13 |
14 | jobs:
15 | build:
16 | name: Build Test
17 | runs-on: ubuntu-latest
18 | strategy:
19 | fail-fast: false
20 | matrix:
21 | python-version: ["3.8", "3.9", "3.10", "3.11"]
22 | steps:
23 | - uses: actions/checkout@v4
24 | - name: Set up Python ${{ matrix.python-version }}
25 | uses: actions/setup-python@v4
26 | with:
27 | python-version: ${{ matrix.python-version }}
28 | - name: Install dependencies
29 | run: |
30 | python -m pip install --upgrade pip
31 | pip install -r requirements.txt
--------------------------------------------------------------------------------
/comfy_extras/nodes_hunyuan.py:
--------------------------------------------------------------------------------
1 | class CLIPTextEncodeHunyuanDiT:
2 | @classmethod
3 | def INPUT_TYPES(s):
4 | return {"required": {
5 | "clip": ("CLIP", ),
6 | "bert": ("STRING", {"multiline": True, "dynamicPrompts": True}),
7 | "mt5xl": ("STRING", {"multiline": True, "dynamicPrompts": True}),
8 | }}
9 | RETURN_TYPES = ("CONDITIONING",)
10 | FUNCTION = "encode"
11 |
12 | CATEGORY = "advanced/conditioning"
13 |
14 | def encode(self, clip, bert, mt5xl):
15 | tokens = clip.tokenize(bert)
16 | tokens["mt5xl"] = clip.tokenize(mt5xl)["mt5xl"]
17 |
18 | output = clip.encode_from_tokens(tokens, return_pooled=True, return_dict=True)
19 | cond = output.pop("cond")
20 | return ([[cond, output]], )
21 |
22 |
23 | NODE_CLASS_MAPPINGS = {
24 | "CLIPTextEncodeHunyuanDiT": CLIPTextEncodeHunyuanDiT,
25 | }
26 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_cond.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | class CLIPTextEncodeControlnet:
4 | @classmethod
5 | def INPUT_TYPES(s):
6 | return {"required": {"clip": ("CLIP", ), "conditioning": ("CONDITIONING", ), "text": ("STRING", {"multiline": True, "dynamicPrompts": True})}}
7 | RETURN_TYPES = ("CONDITIONING",)
8 | FUNCTION = "encode"
9 |
10 | CATEGORY = "_for_testing/conditioning"
11 |
12 | def encode(self, clip, conditioning, text):
13 | tokens = clip.tokenize(text)
14 | cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
15 | c = []
16 | for t in conditioning:
17 | n = [t[0], t[1].copy()]
18 | n[1]['cross_attn_controlnet'] = cond
19 | n[1]['pooled_output_controlnet'] = pooled
20 | c.append(n)
21 | return (c, )
22 |
23 | NODE_CLASS_MAPPINGS = {
24 | "CLIPTextEncodeControlnet": CLIPTextEncodeControlnet
25 | }
26 |
--------------------------------------------------------------------------------
/.github/workflows/test-ui.yaml:
--------------------------------------------------------------------------------
1 | name: Tests CI
2 |
3 | on: [push, pull_request]
4 |
5 | jobs:
6 | test:
7 | runs-on: ubuntu-latest
8 | steps:
9 | - uses: actions/checkout@v4
10 | - uses: actions/setup-node@v3
11 | with:
12 | node-version: 18
13 | - uses: actions/setup-python@v4
14 | with:
15 | python-version: '3.10'
16 | - name: Install requirements
17 | run: |
18 | python -m pip install --upgrade pip
19 | pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
20 | pip install -r requirements.txt
21 | - name: Run Tests
22 | run: |
23 | npm ci
24 | npm run test:generate
25 | npm test -- --verbose
26 | working-directory: ./tests-ui
27 | - name: Run Unit Tests
28 | run: |
29 | pip install -r tests-unit/requirements.txt
30 | python -m pytest tests-unit
31 |
--------------------------------------------------------------------------------
/tests-ui/utils/litegraph.js:
--------------------------------------------------------------------------------
1 | const fs = require("fs");
2 | const path = require("path");
3 | const { nop } = require("../utils/nopProxy");
4 |
5 | function forEachKey(cb) {
6 | for (const k of [
7 | "LiteGraph",
8 | "LGraph",
9 | "LLink",
10 | "LGraphNode",
11 | "LGraphGroup",
12 | "DragAndScale",
13 | "LGraphCanvas",
14 | "ContextMenu",
15 | ]) {
16 | cb(k);
17 | }
18 | }
19 |
20 | export function setup(ctx) {
21 | const lg = fs.readFileSync(path.resolve("../web/lib/litegraph.core.js"), "utf-8");
22 | const globalTemp = {};
23 | (function (console) {
24 | eval(lg);
25 | }).call(globalTemp, nop);
26 |
27 | forEachKey((k) => (ctx[k] = globalTemp[k]));
28 | require(path.resolve("../web/lib/litegraph.extensions.js"));
29 | }
30 |
31 | export function teardown(ctx) {
32 | forEachKey((k) => delete ctx[k]);
33 |
34 | // Clear document after each run
35 | document.getElementsByTagName("html")[0].innerHTML = "";
36 | }
37 |
--------------------------------------------------------------------------------
/comfy/sd1_tokenizer/tokenizer_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "add_prefix_space": false,
3 | "bos_token": {
4 | "__type": "AddedToken",
5 | "content": "<|startoftext|>",
6 | "lstrip": false,
7 | "normalized": true,
8 | "rstrip": false,
9 | "single_word": false
10 | },
11 | "do_lower_case": true,
12 | "eos_token": {
13 | "__type": "AddedToken",
14 | "content": "<|endoftext|>",
15 | "lstrip": false,
16 | "normalized": true,
17 | "rstrip": false,
18 | "single_word": false
19 | },
20 | "errors": "replace",
21 | "model_max_length": 77,
22 | "name_or_path": "openai/clip-vit-large-patch14",
23 | "pad_token": "<|endoftext|>",
24 | "special_tokens_map_file": "./special_tokens_map.json",
25 | "tokenizer_class": "CLIPTokenizer",
26 | "unk_token": {
27 | "__type": "AddedToken",
28 | "content": "<|endoftext|>",
29 | "lstrip": false,
30 | "normalized": true,
31 | "rstrip": false,
32 | "single_word": false
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/web/extensions/core/invertMenuScrolling.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 |
3 | // Inverts the scrolling of context menus
4 |
5 | const id = "Comfy.InvertMenuScrolling";
6 | app.registerExtension({
7 | name: id,
8 | init() {
9 | const ctxMenu = LiteGraph.ContextMenu;
10 | const replace = () => {
11 | LiteGraph.ContextMenu = function (values, options) {
12 | options = options || {};
13 | if (options.scroll_speed) {
14 | options.scroll_speed *= -1;
15 | } else {
16 | options.scroll_speed = -0.1;
17 | }
18 | return ctxMenu.call(this, values, options);
19 | };
20 | LiteGraph.ContextMenu.prototype = ctxMenu.prototype;
21 | };
22 | app.ui.settings.addSetting({
23 | id,
24 | name: "Invert Menu Scrolling",
25 | type: "boolean",
26 | defaultValue: false,
27 | onChange(value) {
28 | if (value) {
29 | replace();
30 | } else {
31 | LiteGraph.ContextMenu = ctxMenu;
32 | }
33 | },
34 | });
35 | },
36 | });
37 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_controlnet.py:
--------------------------------------------------------------------------------
1 | from comfy.cldm.control_types import UNION_CONTROLNET_TYPES
2 |
3 | class SetUnionControlNetType:
4 | @classmethod
5 | def INPUT_TYPES(s):
6 | return {"required": {"control_net": ("CONTROL_NET", ),
7 | "type": (["auto"] + list(UNION_CONTROLNET_TYPES.keys()),)
8 | }}
9 |
10 | CATEGORY = "conditioning/controlnet"
11 | RETURN_TYPES = ("CONTROL_NET",)
12 |
13 | FUNCTION = "set_controlnet_type"
14 |
15 | def set_controlnet_type(self, control_net, type):
16 | control_net = control_net.copy()
17 | type_number = UNION_CONTROLNET_TYPES.get(type, -1)
18 | if type_number >= 0:
19 | control_net.set_extra_arg("control_type", [type_number])
20 | else:
21 | control_net.set_extra_arg("control_type", [])
22 |
23 | return (control_net,)
24 |
25 | NODE_CLASS_MAPPINGS = {
26 | "SetUnionControlNetType": SetUnionControlNetType,
27 | }
28 |
--------------------------------------------------------------------------------
/web/scripts/ui/dialog.js:
--------------------------------------------------------------------------------
1 | import { $el } from "../ui.js";
2 |
3 | export class ComfyDialog extends EventTarget {
4 | #buttons;
5 |
6 | constructor(type = "div", buttons = null) {
7 | super();
8 | this.#buttons = buttons;
9 | this.element = $el(type + ".comfy-modal", { parent: document.body }, [
10 | $el("div.comfy-modal-content", [$el("p", { $: (p) => (this.textElement = p) }), ...this.createButtons()]),
11 | ]);
12 | }
13 |
14 | createButtons() {
15 | return (
16 | this.#buttons ?? [
17 | $el("button", {
18 | type: "button",
19 | textContent: "Close",
20 | onclick: () => this.close(),
21 | }),
22 | ]
23 | );
24 | }
25 |
26 | close() {
27 | this.element.style.display = "none";
28 | }
29 |
30 | show(html) {
31 | if (typeof html === "string") {
32 | this.textElement.innerHTML = html;
33 | } else {
34 | this.textElement.replaceChildren(...(html instanceof Array ? html : [html]));
35 | }
36 | this.element.style.display = "flex";
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_webcam.py:
--------------------------------------------------------------------------------
1 | import nodes
2 | import folder_paths
3 |
4 | MAX_RESOLUTION = nodes.MAX_RESOLUTION
5 |
6 |
7 | class WebcamCapture(nodes.LoadImage):
8 | @classmethod
9 | def INPUT_TYPES(s):
10 | return {
11 | "required": {
12 | "image": ("WEBCAM", {}),
13 | "width": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
14 | "height": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
15 | "capture_on_queue": ("BOOLEAN", {"default": True}),
16 | }
17 | }
18 | RETURN_TYPES = ("IMAGE",)
19 | FUNCTION = "load_capture"
20 |
21 | CATEGORY = "image"
22 |
23 | def load_capture(s, image, **kwargs):
24 | return super().load_image(folder_paths.get_annotated_filepath(image))
25 |
26 |
27 | NODE_CLASS_MAPPINGS = {
28 | "WebcamCapture": WebcamCapture,
29 | }
30 |
31 | NODE_DISPLAY_NAME_MAPPINGS = {
32 | "WebcamCapture": "Webcam Capture",
33 | }
--------------------------------------------------------------------------------
/comfy_extras/nodes_canny.py:
--------------------------------------------------------------------------------
1 | from kornia.filters import canny
2 | import comfy.model_management
3 |
4 |
5 | class Canny:
6 | @classmethod
7 | def INPUT_TYPES(s):
8 | return {"required": {"image": ("IMAGE",),
9 | "low_threshold": ("FLOAT", {"default": 0.4, "min": 0.01, "max": 0.99, "step": 0.01}),
10 | "high_threshold": ("FLOAT", {"default": 0.8, "min": 0.01, "max": 0.99, "step": 0.01})
11 | }}
12 |
13 | RETURN_TYPES = ("IMAGE",)
14 | FUNCTION = "detect_edge"
15 |
16 | CATEGORY = "image/preprocessors"
17 |
18 | def detect_edge(self, image, low_threshold, high_threshold):
19 | output = canny(image.to(comfy.model_management.get_torch_device()).movedim(-1, 1), low_threshold, high_threshold)
20 | img_out = output[1].to(comfy.model_management.intermediate_device()).repeat(1, 3, 1, 1).movedim(1, -1)
21 | return (img_out,)
22 |
23 | NODE_CLASS_MAPPINGS = {
24 | "Canny": Canny,
25 | }
26 |
--------------------------------------------------------------------------------
/.ci/windows_base_files/README_VERY_IMPORTANT.txt:
--------------------------------------------------------------------------------
1 | HOW TO RUN:
2 |
3 | if you have a NVIDIA gpu:
4 |
5 | run_nvidia_gpu.bat
6 |
7 |
8 |
9 | To run it in slow CPU mode:
10 |
11 | run_cpu.bat
12 |
13 |
14 |
15 | IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints
16 |
17 | You can download the stable diffusion 1.5 one from: https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt
18 |
19 |
20 | RECOMMENDED WAY TO UPDATE:
21 | To update the ComfyUI code: update\update_comfyui.bat
22 |
23 |
24 |
25 | To update ComfyUI with the python dependencies, note that you should ONLY run this if you have issues with python dependencies.
26 | update\update_comfyui_and_python_dependencies.bat
27 |
28 |
29 | TO SHARE MODELS BETWEEN COMFYUI AND ANOTHER UI:
30 | In the ComfyUI directory you will find a file: extra_model_paths.yaml.example
31 | Rename this file to: extra_model_paths.yaml and edit it with your favorite text editor.
32 |
--------------------------------------------------------------------------------
/comfy/types.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from typing import Callable, Protocol, TypedDict, Optional, List
3 |
4 |
5 | class UnetApplyFunction(Protocol):
6 | """Function signature protocol on comfy.model_base.BaseModel.apply_model"""
7 |
8 | def __call__(self, x: torch.Tensor, t: torch.Tensor, **kwargs) -> torch.Tensor:
9 | pass
10 |
11 |
12 | class UnetApplyConds(TypedDict):
13 | """Optional conditions for unet apply function."""
14 |
15 | c_concat: Optional[torch.Tensor]
16 | c_crossattn: Optional[torch.Tensor]
17 | control: Optional[torch.Tensor]
18 | transformer_options: Optional[dict]
19 |
20 |
21 | class UnetParams(TypedDict):
22 | # Tensor of shape [B, C, H, W]
23 | input: torch.Tensor
24 | # Tensor of shape [B]
25 | timestep: torch.Tensor
26 | c: UnetApplyConds
27 | # List of [0, 1], [0], [1], ...
28 | # 0 means conditional, 1 means conditional unconditional
29 | cond_or_uncond: List[int]
30 |
31 |
32 | UnetWrapperFunction = Callable[[UnetApplyFunction, UnetParams], torch.Tensor]
33 |
--------------------------------------------------------------------------------
/comfy/text_encoders/hydit_clip.json:
--------------------------------------------------------------------------------
1 | {
2 | "_name_or_path": "hfl/chinese-roberta-wwm-ext-large",
3 | "architectures": [
4 | "BertModel"
5 | ],
6 | "attention_probs_dropout_prob": 0.1,
7 | "bos_token_id": 0,
8 | "classifier_dropout": null,
9 | "directionality": "bidi",
10 | "eos_token_id": 2,
11 | "hidden_act": "gelu",
12 | "hidden_dropout_prob": 0.1,
13 | "hidden_size": 1024,
14 | "initializer_range": 0.02,
15 | "intermediate_size": 4096,
16 | "layer_norm_eps": 1e-12,
17 | "max_position_embeddings": 512,
18 | "model_type": "bert",
19 | "num_attention_heads": 16,
20 | "num_hidden_layers": 24,
21 | "output_past": true,
22 | "pad_token_id": 0,
23 | "pooler_fc_size": 768,
24 | "pooler_num_attention_heads": 12,
25 | "pooler_num_fc_layers": 3,
26 | "pooler_size_per_head": 128,
27 | "pooler_type": "first_token_transform",
28 | "position_embedding_type": "absolute",
29 | "torch_dtype": "float32",
30 | "transformers_version": "4.22.1",
31 | "type_vocab_size": 2,
32 | "use_cache": true,
33 | "vocab_size": 47020
34 | }
35 |
36 |
--------------------------------------------------------------------------------
/node_helpers.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 |
3 | from comfy.cli_args import args
4 |
5 | from PIL import ImageFile, UnidentifiedImageError
6 |
7 | def conditioning_set_values(conditioning, values={}):
8 | c = []
9 | for t in conditioning:
10 | n = [t[0], t[1].copy()]
11 | for k in values:
12 | n[1][k] = values[k]
13 | c.append(n)
14 |
15 | return c
16 |
17 | def pillow(fn, arg):
18 | prev_value = None
19 | try:
20 | x = fn(arg)
21 | except (OSError, UnidentifiedImageError, ValueError): #PIL issues #4472 and #2445, also fixes ComfyUI issue #3416
22 | prev_value = ImageFile.LOAD_TRUNCATED_IMAGES
23 | ImageFile.LOAD_TRUNCATED_IMAGES = True
24 | x = fn(arg)
25 | finally:
26 | if prev_value is not None:
27 | ImageFile.LOAD_TRUNCATED_IMAGES = prev_value
28 | return x
29 |
30 | def hasher():
31 | hashfuncs = {
32 | "md5": hashlib.md5,
33 | "sha1": hashlib.sha1,
34 | "sha256": hashlib.sha256,
35 | "sha512": hashlib.sha512
36 | }
37 | return hashfuncs[args.default_hashing_function]
38 |
--------------------------------------------------------------------------------
/comfy/text_encoders/spiece_tokenizer.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 |
4 | class SPieceTokenizer:
5 | add_eos = True
6 |
7 | @staticmethod
8 | def from_pretrained(path):
9 | return SPieceTokenizer(path)
10 |
11 | def __init__(self, tokenizer_path):
12 | import sentencepiece
13 | if torch.is_tensor(tokenizer_path):
14 | tokenizer_path = tokenizer_path.numpy().tobytes()
15 |
16 | if isinstance(tokenizer_path, bytes):
17 | self.tokenizer = sentencepiece.SentencePieceProcessor(model_proto=tokenizer_path, add_eos=self.add_eos)
18 | else:
19 | self.tokenizer = sentencepiece.SentencePieceProcessor(model_file=tokenizer_path, add_eos=self.add_eos)
20 |
21 | def get_vocab(self):
22 | out = {}
23 | for i in range(self.tokenizer.get_piece_size()):
24 | out[self.tokenizer.id_to_piece(i)] = i
25 | return out
26 |
27 | def __call__(self, string):
28 | out = self.tokenizer.encode(string)
29 | return {"input_ids": out}
30 |
31 | def serialize_model(self):
32 | return torch.ByteTensor(list(self.tokenizer.serialized_model_proto()))
33 |
--------------------------------------------------------------------------------
/web/scripts/ui/components/buttonGroup.js:
--------------------------------------------------------------------------------
1 | // @ts-check
2 |
3 | import { $el } from "../../ui.js";
4 | import { ComfyButton } from "./button.js";
5 | import { prop } from "../../utils.js";
6 |
7 | export class ComfyButtonGroup {
8 | element = $el("div.comfyui-button-group");
9 |
10 | /** @param {Array} buttons */
11 | constructor(...buttons) {
12 | this.buttons = prop(this, "buttons", buttons, () => this.update());
13 | }
14 |
15 | /**
16 | * @param {ComfyButton} button
17 | * @param {number} index
18 | */
19 | insert(button, index) {
20 | this.buttons.splice(index, 0, button);
21 | this.update();
22 | }
23 |
24 | /** @param {ComfyButton} button */
25 | append(button) {
26 | this.buttons.push(button);
27 | this.update();
28 | }
29 |
30 | /** @param {ComfyButton|number} indexOrButton */
31 | remove(indexOrButton) {
32 | if (typeof indexOrButton !== "number") {
33 | indexOrButton = this.buttons.indexOf(indexOrButton);
34 | }
35 | if (indexOrButton > -1) {
36 | const r = this.buttons.splice(indexOrButton, 1);
37 | this.update();
38 | return r;
39 | }
40 | }
41 |
42 | update() {
43 | this.element.replaceChildren(...this.buttons.map((b) => b["element"] ?? b));
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/web/extensions/core/noteNode.js:
--------------------------------------------------------------------------------
1 | import {app} from "../../scripts/app.js";
2 | import {ComfyWidgets} from "../../scripts/widgets.js";
3 | // Node that add notes to your project
4 |
5 | app.registerExtension({
6 | name: "Comfy.NoteNode",
7 | registerCustomNodes() {
8 | class NoteNode {
9 | color=LGraphCanvas.node_colors.yellow.color;
10 | bgcolor=LGraphCanvas.node_colors.yellow.bgcolor;
11 | groupcolor = LGraphCanvas.node_colors.yellow.groupcolor;
12 | constructor() {
13 | if (!this.properties) {
14 | this.properties = {};
15 | this.properties.text="";
16 | }
17 |
18 | ComfyWidgets.STRING(this, "", ["", {default:this.properties.text, multiline: true}], app)
19 |
20 | this.serialize_widgets = true;
21 | this.isVirtualNode = true;
22 |
23 | }
24 |
25 |
26 | }
27 |
28 | // Load default visibility
29 |
30 | LiteGraph.registerNodeType(
31 | "Note",
32 | Object.assign(NoteNode, {
33 | title_mode: LiteGraph.NORMAL_TITLE,
34 | title: "Note",
35 | collapsable: true,
36 | })
37 | );
38 |
39 | NoteNode.category = "utils";
40 | },
41 | });
42 |
--------------------------------------------------------------------------------
/new_updater.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 |
4 | base_path = os.path.dirname(os.path.realpath(__file__))
5 |
6 |
7 | def update_windows_updater():
8 | top_path = os.path.dirname(base_path)
9 | updater_path = os.path.join(base_path, ".ci/update_windows/update.py")
10 | bat_path = os.path.join(base_path, ".ci/update_windows/update_comfyui.bat")
11 |
12 | dest_updater_path = os.path.join(top_path, "update/update.py")
13 | dest_bat_path = os.path.join(top_path, "update/update_comfyui.bat")
14 | dest_bat_deps_path = os.path.join(top_path, "update/update_comfyui_and_python_dependencies.bat")
15 |
16 | try:
17 | with open(dest_bat_path, 'rb') as f:
18 | contents = f.read()
19 | except:
20 | return
21 |
22 | if not contents.startswith(b"..\\python_embeded\\python.exe .\\update.py"):
23 | return
24 |
25 | shutil.copy(updater_path, dest_updater_path)
26 | try:
27 | with open(dest_bat_deps_path, 'rb') as f:
28 | contents = f.read()
29 | contents = contents.replace(b'..\\python_embeded\\python.exe .\\update.py ..\\ComfyUI\\', b'call update_comfyui.bat nopause')
30 | with open(dest_bat_deps_path, 'wb') as f:
31 | f.write(contents)
32 | except:
33 | pass
34 | shutil.copy(bat_path, dest_bat_path)
35 | print("Updated the windows standalone package updater.")
36 |
--------------------------------------------------------------------------------
/extra_model_paths.yaml.example:
--------------------------------------------------------------------------------
1 | #Rename this to extra_model_paths.yaml and ComfyUI will load it
2 |
3 |
4 | #config for a1111 ui
5 | #all you have to do is change the base_path to where yours is installed
6 | a111:
7 | base_path: path/to/stable-diffusion-webui/
8 |
9 | checkpoints: models/Stable-diffusion
10 | configs: models/Stable-diffusion
11 | vae: models/VAE
12 | loras: |
13 | models/Lora
14 | models/LyCORIS
15 | upscale_models: |
16 | models/ESRGAN
17 | models/RealESRGAN
18 | models/SwinIR
19 | embeddings: embeddings
20 | hypernetworks: models/hypernetworks
21 | controlnet: models/ControlNet
22 |
23 | #config for comfyui
24 | #your base path should be either an existing comfy install or a central folder where you store all of your models, loras, etc.
25 |
26 | #comfyui:
27 | # base_path: path/to/comfyui/
28 | # checkpoints: models/checkpoints/
29 | # clip: models/clip/
30 | # clip_vision: models/clip_vision/
31 | # configs: models/configs/
32 | # controlnet: models/controlnet/
33 | # embeddings: models/embeddings/
34 | # loras: models/loras/
35 | # upscale_models: models/upscale_models/
36 | # vae: models/vae/
37 |
38 | #other_ui:
39 | # base_path: path/to/ui
40 | # checkpoints: models/checkpoints
41 | # gligen: models/gligen
42 | # custom_nodes: path/custom_nodes
43 |
--------------------------------------------------------------------------------
/comfy/text_encoders/sd2_clip.py:
--------------------------------------------------------------------------------
1 | from comfy import sd1_clip
2 | import os
3 |
4 | class SD2ClipHModel(sd1_clip.SDClipModel):
5 | def __init__(self, arch="ViT-H-14", device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, dtype=None):
6 | if layer == "penultimate":
7 | layer="hidden"
8 | layer_idx=-2
9 |
10 | textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd2_clip_config.json")
11 | super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0})
12 |
13 | class SD2ClipHTokenizer(sd1_clip.SDTokenizer):
14 | def __init__(self, tokenizer_path=None, embedding_directory=None, tokenizer_data={}):
15 | super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1024)
16 |
17 | class SD2Tokenizer(sd1_clip.SD1Tokenizer):
18 | def __init__(self, embedding_directory=None, tokenizer_data={}):
19 | super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="h", tokenizer=SD2ClipHTokenizer)
20 |
21 | class SD2ClipModel(sd1_clip.SD1ClipModel):
22 | def __init__(self, device="cpu", dtype=None, **kwargs):
23 | super().__init__(device=device, dtype=dtype, clip_name="h", clip_model=SD2ClipHModel, **kwargs)
24 |
--------------------------------------------------------------------------------
/web/extensions/core/saveImageExtraOutput.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 | import { applyTextReplacements } from "../../scripts/utils.js";
3 | // Use widget values and dates in output filenames
4 |
5 | app.registerExtension({
6 | name: "Comfy.SaveImageExtraOutput",
7 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
8 | if (nodeData.name === "SaveImage") {
9 | const onNodeCreated = nodeType.prototype.onNodeCreated;
10 | // When the SaveImage node is created we want to override the serialization of the output name widget to run our S&R
11 | nodeType.prototype.onNodeCreated = function () {
12 | const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined;
13 |
14 | const widget = this.widgets.find((w) => w.name === "filename_prefix");
15 | widget.serializeValue = () => {
16 | return applyTextReplacements(app, widget.value);
17 | };
18 |
19 | return r;
20 | };
21 | } else {
22 | // When any other node is created add a property to alias the node
23 | const onNodeCreated = nodeType.prototype.onNodeCreated;
24 | nodeType.prototype.onNodeCreated = function () {
25 | const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined;
26 |
27 | if (!this.properties || !("Node name for S&R" in this.properties)) {
28 | this.addProperty("Node name for S&R", this.constructor.type, "string");
29 | }
30 |
31 | return r;
32 | };
33 | }
34 | },
35 | });
36 |
--------------------------------------------------------------------------------
/custom_nodes/websocket_image_save.py:
--------------------------------------------------------------------------------
1 | from PIL import Image, ImageOps
2 | from io import BytesIO
3 | import numpy as np
4 | import struct
5 | import comfy.utils
6 | import time
7 |
8 | #You can use this node to save full size images through the websocket, the
9 | #images will be sent in exactly the same format as the image previews: as
10 | #binary images on the websocket with a 8 byte header indicating the type
11 | #of binary message (first 4 bytes) and the image format (next 4 bytes).
12 |
13 | #Note that no metadata will be put in the images saved with this node.
14 |
15 | class SaveImageWebsocket:
16 | @classmethod
17 | def INPUT_TYPES(s):
18 | return {"required":
19 | {"images": ("IMAGE", ),}
20 | }
21 |
22 | RETURN_TYPES = ()
23 | FUNCTION = "save_images"
24 |
25 | OUTPUT_NODE = True
26 |
27 | CATEGORY = "api/image"
28 |
29 | def save_images(self, images):
30 | pbar = comfy.utils.ProgressBar(images.shape[0])
31 | step = 0
32 | for image in images:
33 | i = 255. * image.cpu().numpy()
34 | img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
35 | pbar.update_absolute(step, images.shape[0], ("PNG", img, None))
36 | step += 1
37 |
38 | return {}
39 |
40 | def IS_CHANGED(s, images):
41 | return time.time()
42 |
43 | NODE_CLASS_MAPPINGS = {
44 | "SaveImageWebsocket": SaveImageWebsocket,
45 | }
46 |
--------------------------------------------------------------------------------
/web/scripts/ui/components/splitButton.js:
--------------------------------------------------------------------------------
1 | // @ts-check
2 |
3 | import { $el } from "../../ui.js";
4 | import { ComfyButton } from "./button.js";
5 | import { prop } from "../../utils.js";
6 | import { ComfyPopup } from "./popup.js";
7 |
8 | export class ComfySplitButton {
9 | /**
10 | * @param {{
11 | * primary: ComfyButton,
12 | * mode?: "hover" | "click",
13 | * horizontal?: "left" | "right",
14 | * position?: "relative" | "absolute"
15 | * }} param0
16 | * @param {Array | Array} items
17 | */
18 | constructor({ primary, mode, horizontal = "left", position = "relative" }, ...items) {
19 | this.arrow = new ComfyButton({
20 | icon: "chevron-down",
21 | });
22 | this.element = $el("div.comfyui-split-button" + (mode === "hover" ? ".hover" : ""), [
23 | $el("div.comfyui-split-primary", primary.element),
24 | $el("div.comfyui-split-arrow", this.arrow.element),
25 | ]);
26 | this.popup = new ComfyPopup({
27 | target: this.element,
28 | container: position === "relative" ? this.element : document.body,
29 | classList: "comfyui-split-button-popup" + (mode === "hover" ? " hover" : ""),
30 | closeOnEscape: mode === "click",
31 | position,
32 | horizontal,
33 | });
34 |
35 | this.arrow.withPopup(this.popup, mode);
36 |
37 | this.items = prop(this, "items", items, () => this.update());
38 | }
39 |
40 | update() {
41 | this.popup.element.replaceChildren(...this.items.map((b) => b.element ?? b));
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/web/scripts/ui/menu/viewQueue.js:
--------------------------------------------------------------------------------
1 | // @ts-check
2 |
3 | import { ComfyButton } from "../components/button.js";
4 | import { ComfyViewList, ComfyViewListButton } from "./viewList.js";
5 | import { api } from "../../api.js";
6 |
7 | export class ComfyViewQueueButton extends ComfyViewListButton {
8 | constructor(app) {
9 | super(app, {
10 | button: new ComfyButton({
11 | content: "View Queue",
12 | icon: "format-list-numbered",
13 | tooltip: "View queue",
14 | classList: "comfyui-button comfyui-queue-button",
15 | }),
16 | list: ComfyViewQueueList,
17 | mode: "Queue",
18 | });
19 | }
20 | }
21 |
22 | export class ComfyViewQueueList extends ComfyViewList {
23 | getRow = (item, section) => {
24 | if (section !== "Running") {
25 | return super.getRow(item, section);
26 | }
27 | return {
28 | text: item.prompt[0] + "",
29 | actions: [
30 | {
31 | text: "Load",
32 | action: async () => {
33 | try {
34 | await this.app.loadGraphData(item.prompt[3].extra_pnginfo.workflow);
35 | if (item.outputs) {
36 | this.app.nodeOutputs = item.outputs;
37 | }
38 | } catch (error) {
39 | alert("Error loading workflow: " + error.message);
40 | console.error(error);
41 | }
42 | },
43 | },
44 | {
45 | text: "Cancel",
46 | action: async () => {
47 | try {
48 | await api.interrupt();
49 | } catch (error) {}
50 | },
51 | },
52 | ],
53 | };
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/web/scripts/ui/utils.js:
--------------------------------------------------------------------------------
1 | /**
2 | * @typedef { string | string[] | Record } ClassList
3 | */
4 |
5 | /**
6 | * @param { HTMLElement } element
7 | * @param { ClassList } classList
8 | * @param { string[] } requiredClasses
9 | */
10 | export function applyClasses(element, classList, ...requiredClasses) {
11 | classList ??= "";
12 |
13 | let str;
14 | if (typeof classList === "string") {
15 | str = classList;
16 | } else if (classList instanceof Array) {
17 | str = classList.join(" ");
18 | } else {
19 | str = Object.entries(classList).reduce((p, c) => {
20 | if (c[1]) {
21 | p += (p.length ? " " : "") + c[0];
22 | }
23 | return p;
24 | }, "");
25 | }
26 | element.className = str;
27 | if (requiredClasses) {
28 | element.classList.add(...requiredClasses);
29 | }
30 | }
31 |
32 | /**
33 | * @param { HTMLElement } element
34 | * @param { { onHide?: (el: HTMLElement) => void, onShow?: (el: HTMLElement, value) => void } } [param1]
35 | * @returns
36 | */
37 | export function toggleElement(element, { onHide, onShow } = {}) {
38 | let placeholder;
39 | let hidden;
40 | return (value) => {
41 | if (value) {
42 | if (hidden) {
43 | hidden = false;
44 | placeholder.replaceWith(element);
45 | }
46 | onShow?.(element, value);
47 | } else {
48 | if (!placeholder) {
49 | placeholder = document.createComment("");
50 | }
51 | hidden = true;
52 | element.replaceWith(placeholder);
53 | onHide?.(element);
54 | }
55 | };
56 | }
57 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_differential_diffusion.py:
--------------------------------------------------------------------------------
1 | # code adapted from https://github.com/exx8/differential-diffusion
2 |
3 | import torch
4 |
5 | class DifferentialDiffusion():
6 | @classmethod
7 | def INPUT_TYPES(s):
8 | return {"required": {"model": ("MODEL", ),
9 | }}
10 | RETURN_TYPES = ("MODEL",)
11 | FUNCTION = "apply"
12 | CATEGORY = "_for_testing"
13 | INIT = False
14 |
15 | def apply(self, model):
16 | model = model.clone()
17 | model.set_model_denoise_mask_function(self.forward)
18 | return (model,)
19 |
20 | def forward(self, sigma: torch.Tensor, denoise_mask: torch.Tensor, extra_options: dict):
21 | model = extra_options["model"]
22 | step_sigmas = extra_options["sigmas"]
23 | sigma_to = model.inner_model.model_sampling.sigma_min
24 | if step_sigmas[-1] > sigma_to:
25 | sigma_to = step_sigmas[-1]
26 | sigma_from = step_sigmas[0]
27 |
28 | ts_from = model.inner_model.model_sampling.timestep(sigma_from)
29 | ts_to = model.inner_model.model_sampling.timestep(sigma_to)
30 | current_ts = model.inner_model.model_sampling.timestep(sigma[0])
31 |
32 | threshold = (current_ts - ts_to) / (ts_from - ts_to)
33 |
34 | return (denoise_mask >= threshold).to(denoise_mask.dtype)
35 |
36 |
37 | NODE_CLASS_MAPPINGS = {
38 | "DifferentialDiffusion": DifferentialDiffusion,
39 | }
40 | NODE_DISPLAY_NAME_MAPPINGS = {
41 | "DifferentialDiffusion": "Differential Diffusion",
42 | }
43 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 |
4 | # Command line arguments for pytest
5 | def pytest_addoption(parser):
6 | parser.addoption('--output_dir', action="store", default='tests/inference/samples', help='Output directory for generated images')
7 | parser.addoption("--listen", type=str, default="127.0.0.1", metavar="IP", nargs="?", const="0.0.0.0", help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)")
8 | parser.addoption("--port", type=int, default=8188, help="Set the listen port.")
9 |
10 | # This initializes args at the beginning of the test session
11 | @pytest.fixture(scope="session", autouse=True)
12 | def args_pytest(pytestconfig):
13 | args = {}
14 | args['output_dir'] = pytestconfig.getoption('output_dir')
15 | args['listen'] = pytestconfig.getoption('listen')
16 | args['port'] = pytestconfig.getoption('port')
17 |
18 | os.makedirs(args['output_dir'], exist_ok=True)
19 |
20 | return args
21 |
22 | def pytest_collection_modifyitems(items):
23 | # Modifies items so tests run in the correct order
24 |
25 | LAST_TESTS = ['test_quality']
26 |
27 | # Move the last items to the end
28 | last_items = []
29 | for test_name in LAST_TESTS:
30 | for item in items.copy():
31 | print(item.module.__name__, item)
32 | if item.module.__name__ == test_name:
33 | last_items.append(item)
34 | items.remove(item)
35 |
36 | items.extend(last_items)
37 |
--------------------------------------------------------------------------------
/web/scripts/ui/components/asyncDialog.js:
--------------------------------------------------------------------------------
1 | import { ComfyDialog } from "../dialog.js";
2 | import { $el } from "../../ui.js";
3 |
4 | export class ComfyAsyncDialog extends ComfyDialog {
5 | #resolve;
6 |
7 | constructor(actions) {
8 | super(
9 | "dialog.comfy-dialog.comfyui-dialog",
10 | actions?.map((opt) => {
11 | if (typeof opt === "string") {
12 | opt = { text: opt };
13 | }
14 | return $el("button.comfyui-button", {
15 | type: "button",
16 | textContent: opt.text,
17 | onclick: () => this.close(opt.value ?? opt.text),
18 | });
19 | })
20 | );
21 | }
22 |
23 | show(html) {
24 | this.element.addEventListener("close", () => {
25 | this.close();
26 | });
27 |
28 | super.show(html);
29 |
30 | return new Promise((resolve) => {
31 | this.#resolve = resolve;
32 | });
33 | }
34 |
35 | showModal(html) {
36 | this.element.addEventListener("close", () => {
37 | this.close();
38 | });
39 |
40 | super.show(html);
41 | this.element.showModal();
42 |
43 | return new Promise((resolve) => {
44 | this.#resolve = resolve;
45 | });
46 | }
47 |
48 | close(result = null) {
49 | this.#resolve(result);
50 | this.element.close();
51 | super.close();
52 | }
53 |
54 | static async prompt({ title = null, message, actions }) {
55 | const dialog = new ComfyAsyncDialog(actions);
56 | const content = [$el("span", message)];
57 | if (title) {
58 | content.unshift($el("h3", title));
59 | }
60 | const res = await dialog.showModal(content);
61 | dialog.element.remove();
62 | return res;
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/comfy/diffusers_load.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import comfy.sd
4 |
5 | def first_file(path, filenames):
6 | for f in filenames:
7 | p = os.path.join(path, f)
8 | if os.path.exists(p):
9 | return p
10 | return None
11 |
12 | def load_diffusers(model_path, output_vae=True, output_clip=True, embedding_directory=None):
13 | diffusion_model_names = ["diffusion_pytorch_model.fp16.safetensors", "diffusion_pytorch_model.safetensors", "diffusion_pytorch_model.fp16.bin", "diffusion_pytorch_model.bin"]
14 | unet_path = first_file(os.path.join(model_path, "unet"), diffusion_model_names)
15 | vae_path = first_file(os.path.join(model_path, "vae"), diffusion_model_names)
16 |
17 | text_encoder_model_names = ["model.fp16.safetensors", "model.safetensors", "pytorch_model.fp16.bin", "pytorch_model.bin"]
18 | text_encoder1_path = first_file(os.path.join(model_path, "text_encoder"), text_encoder_model_names)
19 | text_encoder2_path = first_file(os.path.join(model_path, "text_encoder_2"), text_encoder_model_names)
20 |
21 | text_encoder_paths = [text_encoder1_path]
22 | if text_encoder2_path is not None:
23 | text_encoder_paths.append(text_encoder2_path)
24 |
25 | unet = comfy.sd.load_diffusion_model(unet_path)
26 |
27 | clip = None
28 | if output_clip:
29 | clip = comfy.sd.load_clip(text_encoder_paths, embedding_directory=embedding_directory)
30 |
31 | vae = None
32 | if output_vae:
33 | sd = comfy.utils.load_torch_file(vae_path)
34 | vae = comfy.sd.VAE(sd=sd)
35 |
36 | return (unet, clip, vae)
37 |
--------------------------------------------------------------------------------
/comfy/text_encoders/sa_t5.py:
--------------------------------------------------------------------------------
1 | from comfy import sd1_clip
2 | from transformers import T5TokenizerFast
3 | import comfy.text_encoders.t5
4 | import os
5 |
6 | class T5BaseModel(sd1_clip.SDClipModel):
7 | def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None):
8 | textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_config_base.json")
9 | super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5, enable_attention_masks=True, zero_out_masked=True)
10 |
11 | class T5BaseTokenizer(sd1_clip.SDTokenizer):
12 | def __init__(self, embedding_directory=None, tokenizer_data={}):
13 | tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer")
14 | super().__init__(tokenizer_path, pad_with_end=False, embedding_size=768, embedding_key='t5base', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=128)
15 |
16 | class SAT5Tokenizer(sd1_clip.SD1Tokenizer):
17 | def __init__(self, embedding_directory=None, tokenizer_data={}):
18 | super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="t5base", tokenizer=T5BaseTokenizer)
19 |
20 | class SAT5Model(sd1_clip.SD1ClipModel):
21 | def __init__(self, device="cpu", dtype=None, **kwargs):
22 | super().__init__(device=device, dtype=dtype, name="t5base", clip_model=T5BaseModel, **kwargs)
23 |
--------------------------------------------------------------------------------
/comfy/ldm/flux/math.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from einops import rearrange
3 | from torch import Tensor
4 | from comfy.ldm.modules.attention import optimized_attention
5 | import comfy.model_management
6 |
7 | def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor) -> Tensor:
8 | q, k = apply_rope(q, k, pe)
9 |
10 | heads = q.shape[1]
11 | x = optimized_attention(q, k, v, heads, skip_reshape=True)
12 | return x
13 |
14 |
15 | def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
16 | assert dim % 2 == 0
17 | if comfy.model_management.is_device_mps(pos.device) or comfy.model_management.is_intel_xpu():
18 | device = torch.device("cpu")
19 | else:
20 | device = pos.device
21 |
22 | scale = torch.linspace(0, (dim - 2) / dim, steps=dim//2, dtype=torch.float64, device=device)
23 | omega = 1.0 / (theta**scale)
24 | out = torch.einsum("...n,d->...nd", pos.to(dtype=torch.float32, device=device), omega)
25 | out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1)
26 | out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2)
27 | return out.to(dtype=torch.float32, device=pos.device)
28 |
29 |
30 | def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor):
31 | xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2)
32 | xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2)
33 | xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1]
34 | xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1]
35 | return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk)
36 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/user-support.yml:
--------------------------------------------------------------------------------
1 | name: User Support
2 | description: "Use this if you need help with something, or you're experiencing an issue."
3 | labels: [ "User Support" ]
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | Before submitting a **User Report** issue, please ensure the following:
9 |
10 | **1:** You are running the latest version of ComfyUI.
11 | **2:** You have made an effort to find public answers to your question before asking here. In other words, you googled it first, and scrolled through recent help topics.
12 |
13 | If unsure, ask on the [ComfyUI Matrix Space](https://app.element.io/#/room/%23comfyui_space%3Amatrix.org) or the [Comfy Org Discord](https://discord.gg/comfyorg) first.
14 | - type: textarea
15 | attributes:
16 | label: Your question
17 | description: "Post your question here. Please be as detailed as possible."
18 | validations:
19 | required: true
20 | - type: textarea
21 | attributes:
22 | label: Logs
23 | description: "If your question relates to an issue you're experiencing, please go to `Server` -> `Logs` -> potentially set `View Type` to `Debug` as well, then copypaste all the text into here."
24 | render: powershell
25 | validations:
26 | required: false
27 | - type: textarea
28 | attributes:
29 | label: Other
30 | description: "Any other additional information you think might be helpful."
31 | validations:
32 | required: false
33 |
--------------------------------------------------------------------------------
/comfy/text_encoders/aura_t5.py:
--------------------------------------------------------------------------------
1 | from comfy import sd1_clip
2 | from .spiece_tokenizer import SPieceTokenizer
3 | import comfy.text_encoders.t5
4 | import os
5 |
6 | class PT5XlModel(sd1_clip.SDClipModel):
7 | def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None):
8 | textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_pile_config_xl.json")
9 | super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 2, "pad": 1}, model_class=comfy.text_encoders.t5.T5, enable_attention_masks=True, zero_out_masked=True)
10 |
11 | class PT5XlTokenizer(sd1_clip.SDTokenizer):
12 | def __init__(self, embedding_directory=None, tokenizer_data={}):
13 | tokenizer_path = os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_pile_tokenizer"), "tokenizer.model")
14 | super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2048, embedding_key='pile_t5xl', tokenizer_class=SPieceTokenizer, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256, pad_token=1)
15 |
16 | class AuraT5Tokenizer(sd1_clip.SD1Tokenizer):
17 | def __init__(self, embedding_directory=None, tokenizer_data={}):
18 | super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, clip_name="pile_t5xl", tokenizer=PT5XlTokenizer)
19 |
20 | class AuraT5Model(sd1_clip.SD1ClipModel):
21 | def __init__(self, device="cpu", dtype=None, **kwargs):
22 | super().__init__(device=device, dtype=dtype, name="pile_t5xl", clip_model=PT5XlModel, **kwargs)
23 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_ip2p.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | class InstructPixToPixConditioning:
4 | @classmethod
5 | def INPUT_TYPES(s):
6 | return {"required": {"positive": ("CONDITIONING", ),
7 | "negative": ("CONDITIONING", ),
8 | "vae": ("VAE", ),
9 | "pixels": ("IMAGE", ),
10 | }}
11 |
12 | RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT")
13 | RETURN_NAMES = ("positive", "negative", "latent")
14 | FUNCTION = "encode"
15 |
16 | CATEGORY = "conditioning/instructpix2pix"
17 |
18 | def encode(self, positive, negative, pixels, vae):
19 | x = (pixels.shape[1] // 8) * 8
20 | y = (pixels.shape[2] // 8) * 8
21 |
22 | if pixels.shape[1] != x or pixels.shape[2] != y:
23 | x_offset = (pixels.shape[1] % 8) // 2
24 | y_offset = (pixels.shape[2] % 8) // 2
25 | pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
26 |
27 | concat_latent = vae.encode(pixels)
28 |
29 | out_latent = {}
30 | out_latent["samples"] = torch.zeros_like(concat_latent)
31 |
32 | out = []
33 | for conditioning in [positive, negative]:
34 | c = []
35 | for t in conditioning:
36 | d = t[1].copy()
37 | d["concat_latent_image"] = concat_latent
38 | n = [t[0], d]
39 | c.append(n)
40 | out.append(c)
41 | return (out[0], out[1], out_latent)
42 |
43 | NODE_CLASS_MAPPINGS = {
44 | "InstructPixToPixConditioning": InstructPixToPixConditioning,
45 | }
46 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_flux.py:
--------------------------------------------------------------------------------
1 | import node_helpers
2 |
3 | class CLIPTextEncodeFlux:
4 | @classmethod
5 | def INPUT_TYPES(s):
6 | return {"required": {
7 | "clip": ("CLIP", ),
8 | "clip_l": ("STRING", {"multiline": True, "dynamicPrompts": True}),
9 | "t5xxl": ("STRING", {"multiline": True, "dynamicPrompts": True}),
10 | "guidance": ("FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}),
11 | }}
12 | RETURN_TYPES = ("CONDITIONING",)
13 | FUNCTION = "encode"
14 |
15 | CATEGORY = "advanced/conditioning/flux"
16 |
17 | def encode(self, clip, clip_l, t5xxl, guidance):
18 | tokens = clip.tokenize(clip_l)
19 | tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"]
20 |
21 | output = clip.encode_from_tokens(tokens, return_pooled=True, return_dict=True)
22 | cond = output.pop("cond")
23 | output["guidance"] = guidance
24 | return ([[cond, output]], )
25 |
26 | class FluxGuidance:
27 | @classmethod
28 | def INPUT_TYPES(s):
29 | return {"required": {
30 | "conditioning": ("CONDITIONING", ),
31 | "guidance": ("FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}),
32 | }}
33 |
34 | RETURN_TYPES = ("CONDITIONING",)
35 | FUNCTION = "append"
36 |
37 | CATEGORY = "advanced/conditioning/flux"
38 |
39 | def append(self, conditioning, guidance):
40 | c = node_helpers.conditioning_set_values(conditioning, {"guidance": guidance})
41 | return (c, )
42 |
43 |
44 | NODE_CLASS_MAPPINGS = {
45 | "CLIPTextEncodeFlux": CLIPTextEncodeFlux,
46 | "FluxGuidance": FluxGuidance,
47 | }
48 |
--------------------------------------------------------------------------------
/comfy/ldm/modules/encoders/noise_aug_modules.py:
--------------------------------------------------------------------------------
1 | from ..diffusionmodules.upscaling import ImageConcatWithNoiseAugmentation
2 | from ..diffusionmodules.openaimodel import Timestep
3 | import torch
4 |
5 | class CLIPEmbeddingNoiseAugmentation(ImageConcatWithNoiseAugmentation):
6 | def __init__(self, *args, clip_stats_path=None, timestep_dim=256, **kwargs):
7 | super().__init__(*args, **kwargs)
8 | if clip_stats_path is None:
9 | clip_mean, clip_std = torch.zeros(timestep_dim), torch.ones(timestep_dim)
10 | else:
11 | clip_mean, clip_std = torch.load(clip_stats_path, map_location="cpu")
12 | self.register_buffer("data_mean", clip_mean[None, :], persistent=False)
13 | self.register_buffer("data_std", clip_std[None, :], persistent=False)
14 | self.time_embed = Timestep(timestep_dim)
15 |
16 | def scale(self, x):
17 | # re-normalize to centered mean and unit variance
18 | x = (x - self.data_mean.to(x.device)) * 1. / self.data_std.to(x.device)
19 | return x
20 |
21 | def unscale(self, x):
22 | # back to original data stats
23 | x = (x * self.data_std.to(x.device)) + self.data_mean.to(x.device)
24 | return x
25 |
26 | def forward(self, x, noise_level=None, seed=None):
27 | if noise_level is None:
28 | noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long()
29 | else:
30 | assert isinstance(noise_level, torch.Tensor)
31 | x = self.scale(x)
32 | z = self.q_sample(x, noise_level, seed=seed)
33 | z = self.unscale(z)
34 | noise_level = self.time_embed(noise_level)
35 | return z, noise_level
36 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request.yml:
--------------------------------------------------------------------------------
1 | name: Feature Request
2 | description: "You have an idea for something new you would like to see added to ComfyUI's core."
3 | labels: [ "Feature" ]
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | Before submitting a **Feature Request**, please ensure the following:
9 |
10 | **1:** You are running the latest version of ComfyUI.
11 | **2:** You have looked to make sure there is not already a feature that does what you need, and there is not already a Feature Request listed for the same idea.
12 | **3:** This is something that makes sense to add to ComfyUI Core, and wouldn't make more sense as a custom node.
13 |
14 | If unsure, ask on the [ComfyUI Matrix Space](https://app.element.io/#/room/%23comfyui_space%3Amatrix.org) or the [Comfy Org Discord](https://discord.gg/comfyorg) first.
15 | - type: textarea
16 | attributes:
17 | label: Feature Idea
18 | description: "Describe the feature you want to see."
19 | validations:
20 | required: true
21 | - type: textarea
22 | attributes:
23 | label: Existing Solutions
24 | description: "Please search through available custom nodes / extensions to see if there are existing custom solutions for this. If so, please link the options you found here as a reference."
25 | validations:
26 | required: false
27 | - type: textarea
28 | attributes:
29 | label: Other
30 | description: "Any other additional information you think might be helpful."
31 | validations:
32 | required: false
33 |
--------------------------------------------------------------------------------
/web/scripts/ui/toggleSwitch.js:
--------------------------------------------------------------------------------
1 | import { $el } from "../ui.js";
2 |
3 | /**
4 | * @typedef { { text: string, value?: string, tooltip?: string } } ToggleSwitchItem
5 | */
6 | /**
7 | * Creates a toggle switch element
8 | * @param { string } name
9 | * @param { Array void } [opts.onChange]
12 | */
13 | export function toggleSwitch(name, items, { onChange } = {}) {
14 | let selectedIndex;
15 | let elements;
16 |
17 | function updateSelected(index) {
18 | if (selectedIndex != null) {
19 | elements[selectedIndex].classList.remove("comfy-toggle-selected");
20 | }
21 | onChange?.({ item: items[index], prev: selectedIndex == null ? undefined : items[selectedIndex] });
22 | selectedIndex = index;
23 | elements[selectedIndex].classList.add("comfy-toggle-selected");
24 | }
25 |
26 | elements = items.map((item, i) => {
27 | if (typeof item === "string") item = { text: item };
28 | if (!item.value) item.value = item.text;
29 |
30 | const toggle = $el(
31 | "label",
32 | {
33 | textContent: item.text,
34 | title: item.tooltip ?? "",
35 | },
36 | $el("input", {
37 | name,
38 | type: "radio",
39 | value: item.value ?? item.text,
40 | checked: item.selected,
41 | onchange: () => {
42 | updateSelected(i);
43 | },
44 | })
45 | );
46 | if (item.selected) {
47 | updateSelected(i);
48 | }
49 | return toggle;
50 | });
51 |
52 | const container = $el("div.comfy-toggle-switch", elements);
53 |
54 | if (selectedIndex == null) {
55 | elements[0].children[0].checked = true;
56 | updateSelected(0);
57 | }
58 |
59 | return container;
60 | }
61 |
--------------------------------------------------------------------------------
/web/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | ComfyUI
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
19 |
20 |
21 |
22 |
23 | ComfyUI
24 |
46 |
47 |
48 |
49 |
50 |
--------------------------------------------------------------------------------
/web/extensions/core/dynamicPrompts.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 |
3 | // Allows for simple dynamic prompt replacement
4 | // Inputs in the format {a|b} will have a random value of a or b chosen when the prompt is queued.
5 |
6 | /*
7 | * Strips C-style line and block comments from a string
8 | */
9 | function stripComments(str) {
10 | return str.replace(/\/\*[\s\S]*?\*\/|\/\/.*/g,'');
11 | }
12 |
13 | app.registerExtension({
14 | name: "Comfy.DynamicPrompts",
15 | nodeCreated(node) {
16 | if (node.widgets) {
17 | // Locate dynamic prompt text widgets
18 | // Include any widgets with dynamicPrompts set to true, and customtext
19 | const widgets = node.widgets.filter(
20 | (n) => n.dynamicPrompts
21 | );
22 | for (const widget of widgets) {
23 | // Override the serialization of the value to resolve dynamic prompts for all widgets supporting it in this node
24 | widget.serializeValue = (workflowNode, widgetIndex) => {
25 | let prompt = stripComments(widget.value);
26 | while (prompt.replace("\\{", "").includes("{") && prompt.replace("\\}", "").includes("}")) {
27 | const startIndex = prompt.replace("\\{", "00").indexOf("{");
28 | const endIndex = prompt.replace("\\}", "00").indexOf("}");
29 |
30 | const optionsString = prompt.substring(startIndex + 1, endIndex);
31 | const options = optionsString.split("|");
32 |
33 | const randomIndex = Math.floor(Math.random() * options.length);
34 | const randomOption = options[randomIndex];
35 |
36 | prompt = prompt.substring(0, startIndex) + randomOption + prompt.substring(endIndex + 1);
37 | }
38 |
39 | // Overwrite the value in the serialized workflow pnginfo
40 | if (workflowNode?.widgets_values)
41 | workflowNode.widgets_values[widgetIndex] = prompt;
42 |
43 | return prompt;
44 | };
45 | }
46 | }
47 | },
48 | });
49 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_sdupscale.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import comfy.utils
3 |
4 | class SD_4XUpscale_Conditioning:
5 | @classmethod
6 | def INPUT_TYPES(s):
7 | return {"required": { "images": ("IMAGE",),
8 | "positive": ("CONDITIONING",),
9 | "negative": ("CONDITIONING",),
10 | "scale_ratio": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 10.0, "step": 0.01}),
11 | "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
12 | }}
13 | RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
14 | RETURN_NAMES = ("positive", "negative", "latent")
15 |
16 | FUNCTION = "encode"
17 |
18 | CATEGORY = "conditioning/upscale_diffusion"
19 |
20 | def encode(self, images, positive, negative, scale_ratio, noise_augmentation):
21 | width = max(1, round(images.shape[-2] * scale_ratio))
22 | height = max(1, round(images.shape[-3] * scale_ratio))
23 |
24 | pixels = comfy.utils.common_upscale((images.movedim(-1,1) * 2.0) - 1.0, width // 4, height // 4, "bilinear", "center")
25 |
26 | out_cp = []
27 | out_cn = []
28 |
29 | for t in positive:
30 | n = [t[0], t[1].copy()]
31 | n[1]['concat_image'] = pixels
32 | n[1]['noise_augmentation'] = noise_augmentation
33 | out_cp.append(n)
34 |
35 | for t in negative:
36 | n = [t[0], t[1].copy()]
37 | n[1]['concat_image'] = pixels
38 | n[1]['noise_augmentation'] = noise_augmentation
39 | out_cn.append(n)
40 |
41 | latent = torch.zeros([images.shape[0], 4, height // 4, width // 4])
42 | return (out_cp, out_cn, {"samples":latent})
43 |
44 | NODE_CLASS_MAPPINGS = {
45 | "SD_4XUpscale_Conditioning": SD_4XUpscale_Conditioning,
46 | }
47 |
--------------------------------------------------------------------------------
/tests/compare/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 |
4 | # Command line arguments for pytest
5 | def pytest_addoption(parser):
6 | parser.addoption('--baseline_dir', action="store", default='tests/inference/baseline', help='Directory for ground-truth images')
7 | parser.addoption('--test_dir', action="store", default='tests/inference/samples', help='Directory for images to test')
8 | parser.addoption('--metrics_file', action="store", default='tests/metrics.md', help='Output file for metrics')
9 | parser.addoption('--img_output_dir', action="store", default='tests/compare/samples', help='Output directory for diff metric images')
10 |
11 | # This initializes args at the beginning of the test session
12 | @pytest.fixture(scope="session", autouse=True)
13 | def args_pytest(pytestconfig):
14 | args = {}
15 | args['baseline_dir'] = pytestconfig.getoption('baseline_dir')
16 | args['test_dir'] = pytestconfig.getoption('test_dir')
17 | args['metrics_file'] = pytestconfig.getoption('metrics_file')
18 | args['img_output_dir'] = pytestconfig.getoption('img_output_dir')
19 |
20 | # Initialize metrics file
21 | with open(args['metrics_file'], 'a') as f:
22 | # if file is empty, write header
23 | if os.stat(args['metrics_file']).st_size == 0:
24 | f.write("| date | run | file | status | value | \n")
25 | f.write("| --- | --- | --- | --- | --- | \n")
26 |
27 | return args
28 |
29 |
30 | def gather_file_basenames(directory: str):
31 | files = []
32 | for file in os.listdir(directory):
33 | if file.endswith(".png"):
34 | files.append(file)
35 | return files
36 |
37 | # Creates the list of baseline file names to use as a fixture
38 | def pytest_generate_tests(metafunc):
39 | if "baseline_fname" in metafunc.fixturenames:
40 | baseline_fnames = gather_file_basenames(metafunc.config.getoption("baseline_dir"))
41 | metafunc.parametrize("baseline_fname", baseline_fnames)
42 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_pag.py:
--------------------------------------------------------------------------------
1 | #Modified/simplified version of the node from: https://github.com/pamparamm/sd-perturbed-attention
2 | #If you want the one with more options see the above repo.
3 |
4 | #My modified one here is more basic but has less chances of breaking with ComfyUI updates.
5 |
6 | import comfy.model_patcher
7 | import comfy.samplers
8 |
9 | class PerturbedAttentionGuidance:
10 | @classmethod
11 | def INPUT_TYPES(s):
12 | return {
13 | "required": {
14 | "model": ("MODEL",),
15 | "scale": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": 0.01}),
16 | }
17 | }
18 |
19 | RETURN_TYPES = ("MODEL",)
20 | FUNCTION = "patch"
21 |
22 | CATEGORY = "model_patches/unet"
23 |
24 | def patch(self, model, scale):
25 | unet_block = "middle"
26 | unet_block_id = 0
27 | m = model.clone()
28 |
29 | def perturbed_attention(q, k, v, extra_options, mask=None):
30 | return v
31 |
32 | def post_cfg_function(args):
33 | model = args["model"]
34 | cond_pred = args["cond_denoised"]
35 | cond = args["cond"]
36 | cfg_result = args["denoised"]
37 | sigma = args["sigma"]
38 | model_options = args["model_options"].copy()
39 | x = args["input"]
40 |
41 | if scale == 0:
42 | return cfg_result
43 |
44 | # Replace Self-attention with PAG
45 | model_options = comfy.model_patcher.set_model_options_patch_replace(model_options, perturbed_attention, "attn1", unet_block, unet_block_id)
46 | (pag,) = comfy.samplers.calc_cond_batch(model, [cond], x, sigma, model_options)
47 |
48 | return cfg_result + (cond_pred - pag) * scale
49 |
50 | m.set_model_sampler_post_cfg_function(post_cfg_function)
51 |
52 | return (m,)
53 |
54 | NODE_CLASS_MAPPINGS = {
55 | "PerturbedAttentionGuidance": PerturbedAttentionGuidance,
56 | }
57 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to ComfyUI
2 |
3 | Welcome, and thank you for your interest in contributing to ComfyUI!
4 |
5 | There are several ways in which you can contribute, beyond writing code. The goal of this document is to provide a high-level overview of how you can get involved.
6 |
7 | ## Asking Questions
8 |
9 | Have a question? Instead of opening an issue, please ask on [Discord](https://comfy.org/discord) or [Matrix](https://app.element.io/#/room/%23comfyui_space%3Amatrix.org) channels. Our team and the community will help you.
10 |
11 | ## Providing Feedback
12 |
13 | Your comments and feedback are welcome, and the development team is available via a handful of different channels.
14 |
15 | See the `#bug-report`, `#feature-request` and `#feedback` channels on Discord.
16 |
17 | ## Reporting Issues
18 |
19 | Have you identified a reproducible problem in ComfyUI? Do you have a feature request? We want to hear about it! Here's how you can report your issue as effectively as possible.
20 |
21 |
22 | ### Look For an Existing Issue
23 |
24 | Before you create a new issue, please do a search in [open issues](https://github.com/comfyanonymous/ComfyUI/issues) to see if the issue or feature request has already been filed.
25 |
26 | If you find your issue already exists, make relevant comments and add your [reaction](https://github.com/blog/2119-add-reactions-to-pull-requests-issues-and-comments). Use a reaction in place of a "+1" comment:
27 |
28 | * 👍 - upvote
29 | * 👎 - downvote
30 |
31 | If you cannot find an existing issue that describes your bug or feature, create a new issue. We have an issue template in place to organize new issues.
32 |
33 |
34 | ### Creating Pull Requests
35 |
36 | * Please refer to the article on [creating pull requests](https://github.com/comfyanonymous/ComfyUI/wiki/How-to-Contribute-Code) and contributing to this project.
37 |
38 |
39 | ## Thank You
40 |
41 | Your contributions to open source, large or small, make great projects like this possible. Thank you for taking the time to contribute.
42 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_morphology.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import comfy.model_management
3 |
4 | from kornia.morphology import dilation, erosion, opening, closing, gradient, top_hat, bottom_hat
5 |
6 |
7 | class Morphology:
8 | @classmethod
9 | def INPUT_TYPES(s):
10 | return {"required": {"image": ("IMAGE",),
11 | "operation": (["erode", "dilate", "open", "close", "gradient", "bottom_hat", "top_hat"],),
12 | "kernel_size": ("INT", {"default": 3, "min": 3, "max": 999, "step": 1}),
13 | }}
14 |
15 | RETURN_TYPES = ("IMAGE",)
16 | FUNCTION = "process"
17 |
18 | CATEGORY = "image/postprocessing"
19 |
20 | def process(self, image, operation, kernel_size):
21 | device = comfy.model_management.get_torch_device()
22 | kernel = torch.ones(kernel_size, kernel_size, device=device)
23 | image_k = image.to(device).movedim(-1, 1)
24 | if operation == "erode":
25 | output = erosion(image_k, kernel)
26 | elif operation == "dilate":
27 | output = dilation(image_k, kernel)
28 | elif operation == "open":
29 | output = opening(image_k, kernel)
30 | elif operation == "close":
31 | output = closing(image_k, kernel)
32 | elif operation == "gradient":
33 | output = gradient(image_k, kernel)
34 | elif operation == "top_hat":
35 | output = top_hat(image_k, kernel)
36 | elif operation == "bottom_hat":
37 | output = bottom_hat(image_k, kernel)
38 | else:
39 | raise ValueError(f"Invalid operation {operation} for morphology. Must be one of 'erode', 'dilate', 'open', 'close', 'gradient', 'tophat', 'bottomhat'")
40 | img_out = output.to(comfy.model_management.intermediate_device()).movedim(1, -1)
41 | return (img_out,)
42 |
43 | NODE_CLASS_MAPPINGS = {
44 | "Morphology": Morphology,
45 | }
46 |
47 | NODE_DISPLAY_NAME_MAPPINGS = {
48 | "Morphology": "ImageMorphology",
49 | }
--------------------------------------------------------------------------------
/web/scripts/ui/menu/queueOptions.js:
--------------------------------------------------------------------------------
1 | // @ts-check
2 |
3 | import { $el } from "../../ui.js";
4 | import { prop } from "../../utils.js";
5 |
6 | export class ComfyQueueOptions extends EventTarget {
7 | element = $el("div.comfyui-queue-options");
8 |
9 | constructor(app) {
10 | super();
11 | this.app = app;
12 |
13 | this.batchCountInput = $el("input", {
14 | className: "comfyui-queue-batch-value",
15 | type: "number",
16 | min: "1",
17 | value: "1",
18 | oninput: () => (this.batchCount = +this.batchCountInput.value),
19 | });
20 |
21 | this.batchCountRange = $el("input", {
22 | type: "range",
23 | min: "1",
24 | max: "100",
25 | value: "1",
26 | oninput: () => (this.batchCount = +this.batchCountRange.value),
27 | });
28 |
29 | this.element.append(
30 | $el("div.comfyui-queue-batch", [
31 | $el(
32 | "label",
33 | {
34 | textContent: "Batch count: ",
35 | },
36 | this.batchCountInput
37 | ),
38 | this.batchCountRange,
39 | ])
40 | );
41 |
42 | const createOption = (text, value, checked = false) =>
43 | $el(
44 | "label",
45 | { textContent: text },
46 | $el("input", {
47 | type: "radio",
48 | name: "AutoQueueMode",
49 | checked,
50 | value,
51 | oninput: (e) => (this.autoQueueMode = e.target["value"]),
52 | })
53 | );
54 |
55 | this.autoQueueEl = $el("div.comfyui-queue-mode", [
56 | $el("span", "Auto Queue:"),
57 | createOption("Disabled", "", true),
58 | createOption("Instant", "instant"),
59 | createOption("On Change", "change"),
60 | ]);
61 |
62 | this.element.append(this.autoQueueEl);
63 |
64 | this.batchCount = prop(this, "batchCount", 1, () => {
65 | this.batchCountInput.value = this.batchCount + "";
66 | this.batchCountRange.value = this.batchCount + "";
67 | });
68 |
69 | this.autoQueueMode = prop(this, "autoQueueMode", "Disabled", () => {
70 | this.dispatchEvent(
71 | new CustomEvent("autoQueueMode", {
72 | detail: this.autoQueueMode,
73 | })
74 | );
75 | });
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/models/configs/v2-inference.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-4
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False # we set this to false because this is an inference only config
19 |
20 | unet_config:
21 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
22 | params:
23 | use_checkpoint: True
24 | use_fp16: True
25 | image_size: 32 # unused
26 | in_channels: 4
27 | out_channels: 4
28 | model_channels: 320
29 | attention_resolutions: [ 4, 2, 1 ]
30 | num_res_blocks: 2
31 | channel_mult: [ 1, 2, 4, 4 ]
32 | num_head_channels: 64 # need to fix for flash-attn
33 | use_spatial_transformer: True
34 | use_linear_in_transformer: True
35 | transformer_depth: 1
36 | context_dim: 1024
37 | legacy: False
38 |
39 | first_stage_config:
40 | target: ldm.models.autoencoder.AutoencoderKL
41 | params:
42 | embed_dim: 4
43 | monitor: val/rec_loss
44 | ddconfig:
45 | #attn_type: "vanilla-xformers"
46 | double_z: true
47 | z_channels: 4
48 | resolution: 256
49 | in_channels: 3
50 | out_ch: 3
51 | ch: 128
52 | ch_mult:
53 | - 1
54 | - 2
55 | - 4
56 | - 4
57 | num_res_blocks: 2
58 | attn_resolutions: []
59 | dropout: 0.0
60 | lossconfig:
61 | target: torch.nn.Identity
62 |
63 | cond_stage_config:
64 | target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
65 | params:
66 | freeze: True
67 | layer: "penultimate"
68 |
--------------------------------------------------------------------------------
/comfy/ldm/hydit/poolers.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from comfy.ldm.modules.attention import optimized_attention
5 | import comfy.ops
6 |
7 | class AttentionPool(nn.Module):
8 | def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None, dtype=None, device=None, operations=None):
9 | super().__init__()
10 | self.positional_embedding = nn.Parameter(torch.empty(spacial_dim + 1, embed_dim, dtype=dtype, device=device))
11 | self.k_proj = operations.Linear(embed_dim, embed_dim, dtype=dtype, device=device)
12 | self.q_proj = operations.Linear(embed_dim, embed_dim, dtype=dtype, device=device)
13 | self.v_proj = operations.Linear(embed_dim, embed_dim, dtype=dtype, device=device)
14 | self.c_proj = operations.Linear(embed_dim, output_dim or embed_dim, dtype=dtype, device=device)
15 | self.num_heads = num_heads
16 | self.embed_dim = embed_dim
17 |
18 | def forward(self, x):
19 | x = x[:,:self.positional_embedding.shape[0] - 1]
20 | x = x.permute(1, 0, 2) # NLC -> LNC
21 | x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (L+1)NC
22 | x = x + comfy.ops.cast_to_input(self.positional_embedding[:, None, :], x) # (L+1)NC
23 |
24 | q = self.q_proj(x[:1])
25 | k = self.k_proj(x)
26 | v = self.v_proj(x)
27 |
28 | batch_size = q.shape[1]
29 | head_dim = self.embed_dim // self.num_heads
30 | q = q.view(1, batch_size * self.num_heads, head_dim).transpose(0, 1).view(batch_size, self.num_heads, -1, head_dim)
31 | k = k.view(k.shape[0], batch_size * self.num_heads, head_dim).transpose(0, 1).view(batch_size, self.num_heads, -1, head_dim)
32 | v = v.view(v.shape[0], batch_size * self.num_heads, head_dim).transpose(0, 1).view(batch_size, self.num_heads, -1, head_dim)
33 |
34 | attn_output = optimized_attention(q, k, v, self.num_heads, skip_reshape=True).transpose(0, 1)
35 |
36 | attn_output = self.c_proj(attn_output)
37 | return attn_output.squeeze(0)
38 |
--------------------------------------------------------------------------------
/models/configs/v2-inference_fp32.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-4
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False # we set this to false because this is an inference only config
19 |
20 | unet_config:
21 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
22 | params:
23 | use_checkpoint: True
24 | use_fp16: False
25 | image_size: 32 # unused
26 | in_channels: 4
27 | out_channels: 4
28 | model_channels: 320
29 | attention_resolutions: [ 4, 2, 1 ]
30 | num_res_blocks: 2
31 | channel_mult: [ 1, 2, 4, 4 ]
32 | num_head_channels: 64 # need to fix for flash-attn
33 | use_spatial_transformer: True
34 | use_linear_in_transformer: True
35 | transformer_depth: 1
36 | context_dim: 1024
37 | legacy: False
38 |
39 | first_stage_config:
40 | target: ldm.models.autoencoder.AutoencoderKL
41 | params:
42 | embed_dim: 4
43 | monitor: val/rec_loss
44 | ddconfig:
45 | #attn_type: "vanilla-xformers"
46 | double_z: true
47 | z_channels: 4
48 | resolution: 256
49 | in_channels: 3
50 | out_ch: 3
51 | ch: 128
52 | ch_mult:
53 | - 1
54 | - 2
55 | - 4
56 | - 4
57 | num_res_blocks: 2
58 | attn_resolutions: []
59 | dropout: 0.0
60 | lossconfig:
61 | target: torch.nn.Identity
62 |
63 | cond_stage_config:
64 | target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
65 | params:
66 | freeze: True
67 | layer: "penultimate"
68 |
--------------------------------------------------------------------------------
/web/extensions/core/keybinds.js:
--------------------------------------------------------------------------------
1 | import {app} from "../../scripts/app.js";
2 |
3 | app.registerExtension({
4 | name: "Comfy.Keybinds",
5 | init() {
6 | const keybindListener = function (event) {
7 | const modifierPressed = event.ctrlKey || event.metaKey;
8 |
9 | // Queue prompt using ctrl or command + enter
10 | if (modifierPressed && event.key === "Enter") {
11 | app.queuePrompt(event.shiftKey ? -1 : 0).then();
12 | return;
13 | }
14 |
15 | const target = event.composedPath()[0];
16 | if (["INPUT", "TEXTAREA"].includes(target.tagName)) {
17 | return;
18 | }
19 |
20 | const modifierKeyIdMap = {
21 | s: "#comfy-save-button",
22 | o: "#comfy-file-input",
23 | Backspace: "#comfy-clear-button",
24 | d: "#comfy-load-default-button",
25 | };
26 |
27 | const modifierKeybindId = modifierKeyIdMap[event.key];
28 | if (modifierPressed && modifierKeybindId) {
29 | event.preventDefault();
30 |
31 | const elem = document.querySelector(modifierKeybindId);
32 | elem.click();
33 | return;
34 | }
35 |
36 | // Finished Handling all modifier keybinds, now handle the rest
37 | if (event.ctrlKey || event.altKey || event.metaKey) {
38 | return;
39 | }
40 |
41 | // Close out of modals using escape
42 | if (event.key === "Escape") {
43 | const modals = document.querySelectorAll(".comfy-modal");
44 | const modal = Array.from(modals).find(modal => window.getComputedStyle(modal).getPropertyValue("display") !== "none");
45 | if (modal) {
46 | modal.style.display = "none";
47 | }
48 |
49 | [...document.querySelectorAll("dialog")].forEach(d => {
50 | d.close();
51 | });
52 | }
53 |
54 | const keyIdMap = {
55 | q: "#comfy-view-queue-button",
56 | h: "#comfy-view-history-button",
57 | r: "#comfy-refresh-button",
58 | };
59 |
60 | const buttonId = keyIdMap[event.key];
61 | if (buttonId) {
62 | const button = document.querySelector(buttonId);
63 | button.click();
64 | }
65 | }
66 |
67 | window.addEventListener("keydown", keybindListener, true);
68 | }
69 | });
70 |
--------------------------------------------------------------------------------
/app/app_settings.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | from aiohttp import web
4 |
5 |
6 | class AppSettings():
7 | def __init__(self, user_manager):
8 | self.user_manager = user_manager
9 |
10 | def get_settings(self, request):
11 | file = self.user_manager.get_request_user_filepath(
12 | request, "comfy.settings.json")
13 | if os.path.isfile(file):
14 | with open(file) as f:
15 | return json.load(f)
16 | else:
17 | return {}
18 |
19 | def save_settings(self, request, settings):
20 | file = self.user_manager.get_request_user_filepath(
21 | request, "comfy.settings.json")
22 | with open(file, "w") as f:
23 | f.write(json.dumps(settings, indent=4))
24 |
25 | def add_routes(self, routes):
26 | @routes.get("/settings")
27 | async def get_settings(request):
28 | return web.json_response(self.get_settings(request))
29 |
30 | @routes.get("/settings/{id}")
31 | async def get_setting(request):
32 | value = None
33 | settings = self.get_settings(request)
34 | setting_id = request.match_info.get("id", None)
35 | if setting_id and setting_id in settings:
36 | value = settings[setting_id]
37 | return web.json_response(value)
38 |
39 | @routes.post("/settings")
40 | async def post_settings(request):
41 | settings = self.get_settings(request)
42 | new_settings = await request.json()
43 | self.save_settings(request, {**settings, **new_settings})
44 | return web.Response(status=200)
45 |
46 | @routes.post("/settings/{id}")
47 | async def post_setting(request):
48 | setting_id = request.match_info.get("id", None)
49 | if not setting_id:
50 | return web.Response(status=400)
51 | settings = self.get_settings(request)
52 | settings[setting_id] = await request.json()
53 | self.save_settings(request, settings)
54 | return web.Response(status=200)
--------------------------------------------------------------------------------
/models/configs/v2-inference-v.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-4
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | parameterization: "v"
6 | linear_start: 0.00085
7 | linear_end: 0.0120
8 | num_timesteps_cond: 1
9 | log_every_t: 200
10 | timesteps: 1000
11 | first_stage_key: "jpg"
12 | cond_stage_key: "txt"
13 | image_size: 64
14 | channels: 4
15 | cond_stage_trainable: false
16 | conditioning_key: crossattn
17 | monitor: val/loss_simple_ema
18 | scale_factor: 0.18215
19 | use_ema: False # we set this to false because this is an inference only config
20 |
21 | unet_config:
22 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
23 | params:
24 | use_checkpoint: True
25 | use_fp16: True
26 | image_size: 32 # unused
27 | in_channels: 4
28 | out_channels: 4
29 | model_channels: 320
30 | attention_resolutions: [ 4, 2, 1 ]
31 | num_res_blocks: 2
32 | channel_mult: [ 1, 2, 4, 4 ]
33 | num_head_channels: 64 # need to fix for flash-attn
34 | use_spatial_transformer: True
35 | use_linear_in_transformer: True
36 | transformer_depth: 1
37 | context_dim: 1024
38 | legacy: False
39 |
40 | first_stage_config:
41 | target: ldm.models.autoencoder.AutoencoderKL
42 | params:
43 | embed_dim: 4
44 | monitor: val/rec_loss
45 | ddconfig:
46 | #attn_type: "vanilla-xformers"
47 | double_z: true
48 | z_channels: 4
49 | resolution: 256
50 | in_channels: 3
51 | out_ch: 3
52 | ch: 128
53 | ch_mult:
54 | - 1
55 | - 2
56 | - 4
57 | - 4
58 | num_res_blocks: 2
59 | attn_resolutions: []
60 | dropout: 0.0
61 | lossconfig:
62 | target: torch.nn.Identity
63 |
64 | cond_stage_config:
65 | target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
66 | params:
67 | freeze: True
68 | layer: "penultimate"
69 |
--------------------------------------------------------------------------------
/models/configs/v2-inference-v_fp32.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-4
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | parameterization: "v"
6 | linear_start: 0.00085
7 | linear_end: 0.0120
8 | num_timesteps_cond: 1
9 | log_every_t: 200
10 | timesteps: 1000
11 | first_stage_key: "jpg"
12 | cond_stage_key: "txt"
13 | image_size: 64
14 | channels: 4
15 | cond_stage_trainable: false
16 | conditioning_key: crossattn
17 | monitor: val/loss_simple_ema
18 | scale_factor: 0.18215
19 | use_ema: False # we set this to false because this is an inference only config
20 |
21 | unet_config:
22 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
23 | params:
24 | use_checkpoint: True
25 | use_fp16: False
26 | image_size: 32 # unused
27 | in_channels: 4
28 | out_channels: 4
29 | model_channels: 320
30 | attention_resolutions: [ 4, 2, 1 ]
31 | num_res_blocks: 2
32 | channel_mult: [ 1, 2, 4, 4 ]
33 | num_head_channels: 64 # need to fix for flash-attn
34 | use_spatial_transformer: True
35 | use_linear_in_transformer: True
36 | transformer_depth: 1
37 | context_dim: 1024
38 | legacy: False
39 |
40 | first_stage_config:
41 | target: ldm.models.autoencoder.AutoencoderKL
42 | params:
43 | embed_dim: 4
44 | monitor: val/rec_loss
45 | ddconfig:
46 | #attn_type: "vanilla-xformers"
47 | double_z: true
48 | z_channels: 4
49 | resolution: 256
50 | in_channels: 3
51 | out_ch: 3
52 | ch: 128
53 | ch_mult:
54 | - 1
55 | - 2
56 | - 4
57 | - 4
58 | num_res_blocks: 2
59 | attn_resolutions: []
60 | dropout: 0.0
61 | lossconfig:
62 | target: torch.nn.Identity
63 |
64 | cond_stage_config:
65 | target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
66 | params:
67 | freeze: True
68 | layer: "penultimate"
69 |
--------------------------------------------------------------------------------
/.github/workflows/pullrequest-ci-run.yml:
--------------------------------------------------------------------------------
1 | # This is the GitHub Workflow that drives full-GPU-enabled tests of pull requests to ComfyUI, when the 'Run-CI-Test' label is added
2 | # Results are reported as checkmarks on the commits, as well as onto https://ci.comfy.org/
3 | name: Pull Request CI Workflow Runs
4 | on:
5 | pull_request_target:
6 | types: [labeled]
7 |
8 | jobs:
9 | pr-test-stable:
10 | if: ${{ github.event.label.name == 'Run-CI-Test' }}
11 | strategy:
12 | fail-fast: false
13 | matrix:
14 | os: [macos, linux, windows]
15 | python_version: ["3.9", "3.10", "3.11", "3.12"]
16 | cuda_version: ["12.1"]
17 | torch_version: ["stable"]
18 | include:
19 | - os: macos
20 | runner_label: [self-hosted, macOS]
21 | flags: "--use-pytorch-cross-attention"
22 | - os: linux
23 | runner_label: [self-hosted, Linux]
24 | flags: ""
25 | - os: windows
26 | runner_label: [self-hosted, win]
27 | flags: ""
28 | runs-on: ${{ matrix.runner_label }}
29 | steps:
30 | - name: Test Workflows
31 | uses: comfy-org/comfy-action@main
32 | with:
33 | os: ${{ matrix.os }}
34 | python_version: ${{ matrix.python_version }}
35 | torch_version: ${{ matrix.torch_version }}
36 | google_credentials: ${{ secrets.GCS_SERVICE_ACCOUNT_JSON }}
37 | comfyui_flags: ${{ matrix.flags }}
38 | use_prior_commit: 'true'
39 | comment:
40 | if: ${{ github.event.label.name == 'Run-CI-Test' }}
41 | runs-on: ubuntu-latest
42 | permissions:
43 | pull-requests: write
44 | steps:
45 | - uses: actions/github-script@v6
46 | with:
47 | script: |
48 | github.rest.issues.createComment({
49 | issue_number: context.issue.number,
50 | owner: context.repo.owner,
51 | repo: context.repo.repo,
52 | body: '(Automated Bot Message) CI Tests are running, you can view the results at https://ci.comfy.org/?branch=${{ github.event.pull_request.number }}%2Fmerge'
53 | })
54 |
--------------------------------------------------------------------------------
/models/configs/v1-inference.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 4
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71 |
--------------------------------------------------------------------------------
/models/configs/v1-inference_fp16.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | use_fp16: True
33 | image_size: 32 # unused
34 | in_channels: 4
35 | out_channels: 4
36 | model_channels: 320
37 | attention_resolutions: [ 4, 2, 1 ]
38 | num_res_blocks: 2
39 | channel_mult: [ 1, 2, 4, 4 ]
40 | num_heads: 8
41 | use_spatial_transformer: True
42 | transformer_depth: 1
43 | context_dim: 768
44 | use_checkpoint: True
45 | legacy: False
46 |
47 | first_stage_config:
48 | target: ldm.models.autoencoder.AutoencoderKL
49 | params:
50 | embed_dim: 4
51 | monitor: val/rec_loss
52 | ddconfig:
53 | double_z: true
54 | z_channels: 4
55 | resolution: 256
56 | in_channels: 3
57 | out_ch: 3
58 | ch: 128
59 | ch_mult:
60 | - 1
61 | - 2
62 | - 4
63 | - 4
64 | num_res_blocks: 2
65 | attn_resolutions: []
66 | dropout: 0.0
67 | lossconfig:
68 | target: torch.nn.Identity
69 |
70 | cond_stage_config:
71 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
72 |
--------------------------------------------------------------------------------
/web/extensions/logging.js.example:
--------------------------------------------------------------------------------
1 | import { app } from "../scripts/app.js";
2 |
3 | const ext = {
4 | // Unique name for the extension
5 | name: "Example.LoggingExtension",
6 | async init(app) {
7 | // Any initial setup to run as soon as the page loads
8 | console.log("[logging]", "extension init");
9 | },
10 | async setup(app) {
11 | // Any setup to run after the app is created
12 | console.log("[logging]", "extension setup");
13 | },
14 | async addCustomNodeDefs(defs, app) {
15 | // Add custom node definitions
16 | // These definitions will be configured and registered automatically
17 | // defs is a lookup core nodes, add yours into this
18 | console.log("[logging]", "add custom node definitions", "current nodes:", Object.keys(defs));
19 | },
20 | async getCustomWidgets(app) {
21 | // Return custom widget types
22 | // See ComfyWidgets for widget examples
23 | console.log("[logging]", "provide custom widgets");
24 | },
25 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
26 | // Run custom logic before a node definition is registered with the graph
27 | console.log("[logging]", "before register node: ", nodeType, nodeData);
28 |
29 | // This fires for every node definition so only log once
30 | delete ext.beforeRegisterNodeDef;
31 | },
32 | async registerCustomNodes(app) {
33 | // Register any custom node implementations here allowing for more flexability than a custom node def
34 | console.log("[logging]", "register custom nodes");
35 | },
36 | loadedGraphNode(node, app) {
37 | // Fires for each node when loading/dragging/etc a workflow json or png
38 | // If you break something in the backend and want to patch workflows in the frontend
39 | // This is the place to do this
40 | console.log("[logging]", "loaded graph node: ", node);
41 |
42 | // This fires for every node on each load so only log once
43 | delete ext.loadedGraphNode;
44 | },
45 | nodeCreated(node, app) {
46 | // Fires every time a node is constructed
47 | // You can modify widgets/add handlers/etc here
48 | console.log("[logging]", "node created: ", node);
49 |
50 | // This fires for every node so only log once
51 | delete ext.nodeCreated;
52 | }
53 | };
54 |
55 | app.registerExtension(ext);
56 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_align_your_steps.py:
--------------------------------------------------------------------------------
1 | #from: https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
2 | import numpy as np
3 | import torch
4 |
5 | def loglinear_interp(t_steps, num_steps):
6 | """
7 | Performs log-linear interpolation of a given array of decreasing numbers.
8 | """
9 | xs = np.linspace(0, 1, len(t_steps))
10 | ys = np.log(t_steps[::-1])
11 |
12 | new_xs = np.linspace(0, 1, num_steps)
13 | new_ys = np.interp(new_xs, xs, ys)
14 |
15 | interped_ys = np.exp(new_ys)[::-1].copy()
16 | return interped_ys
17 |
18 | NOISE_LEVELS = {"SD1": [14.6146412293, 6.4745760956, 3.8636745985, 2.6946151520, 1.8841921177, 1.3943805092, 0.9642583904, 0.6523686016, 0.3977456272, 0.1515232662, 0.0291671582],
19 | "SDXL":[14.6146412293, 6.3184485287, 3.7681790315, 2.1811480769, 1.3405244945, 0.8620721141, 0.5550693289, 0.3798540708, 0.2332364134, 0.1114188177, 0.0291671582],
20 | "SVD": [700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002]}
21 |
22 | class AlignYourStepsScheduler:
23 | @classmethod
24 | def INPUT_TYPES(s):
25 | return {"required":
26 | {"model_type": (["SD1", "SDXL", "SVD"], ),
27 | "steps": ("INT", {"default": 10, "min": 10, "max": 10000}),
28 | "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
29 | }
30 | }
31 | RETURN_TYPES = ("SIGMAS",)
32 | CATEGORY = "sampling/custom_sampling/schedulers"
33 |
34 | FUNCTION = "get_sigmas"
35 |
36 | def get_sigmas(self, model_type, steps, denoise):
37 | total_steps = steps
38 | if denoise < 1.0:
39 | if denoise <= 0.0:
40 | return (torch.FloatTensor([]),)
41 | total_steps = round(steps * denoise)
42 |
43 | sigmas = NOISE_LEVELS[model_type][:]
44 | if (steps + 1) != len(sigmas):
45 | sigmas = loglinear_interp(sigmas, steps + 1)
46 |
47 | sigmas = sigmas[-(total_steps + 1):]
48 | sigmas[-1] = 0
49 | return (torch.FloatTensor(sigmas), )
50 |
51 | NODE_CLASS_MAPPINGS = {
52 | "AlignYourStepsScheduler": AlignYourStepsScheduler,
53 | }
54 |
--------------------------------------------------------------------------------
/models/configs/anything_v3.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 4
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71 | params:
72 | layer: "hidden"
73 | layer_idx: -2
74 |
--------------------------------------------------------------------------------
/models/configs/v1-inference_clip_skip_2.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 4
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71 | params:
72 | layer: "hidden"
73 | layer_idx: -2
74 |
--------------------------------------------------------------------------------
/models/configs/v1-inference_clip_skip_2_fp16.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 1.0e-04
3 | target: ldm.models.diffusion.ddpm.LatentDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: crossattn
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | use_ema: False
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 10000 ]
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | use_fp16: True
33 | image_size: 32 # unused
34 | in_channels: 4
35 | out_channels: 4
36 | model_channels: 320
37 | attention_resolutions: [ 4, 2, 1 ]
38 | num_res_blocks: 2
39 | channel_mult: [ 1, 2, 4, 4 ]
40 | num_heads: 8
41 | use_spatial_transformer: True
42 | transformer_depth: 1
43 | context_dim: 768
44 | use_checkpoint: True
45 | legacy: False
46 |
47 | first_stage_config:
48 | target: ldm.models.autoencoder.AutoencoderKL
49 | params:
50 | embed_dim: 4
51 | monitor: val/rec_loss
52 | ddconfig:
53 | double_z: true
54 | z_channels: 4
55 | resolution: 256
56 | in_channels: 3
57 | out_ch: 3
58 | ch: 128
59 | ch_mult:
60 | - 1
61 | - 2
62 | - 4
63 | - 4
64 | num_res_blocks: 2
65 | attn_resolutions: []
66 | dropout: 0.0
67 | lossconfig:
68 | target: torch.nn.Identity
69 |
70 | cond_stage_config:
71 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
72 | params:
73 | layer: "hidden"
74 | layer_idx: -2
75 |
--------------------------------------------------------------------------------
/models/configs/v1-inpainting-inference.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | base_learning_rate: 7.5e-05
3 | target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
4 | params:
5 | linear_start: 0.00085
6 | linear_end: 0.0120
7 | num_timesteps_cond: 1
8 | log_every_t: 200
9 | timesteps: 1000
10 | first_stage_key: "jpg"
11 | cond_stage_key: "txt"
12 | image_size: 64
13 | channels: 4
14 | cond_stage_trainable: false # Note: different from the one we trained before
15 | conditioning_key: hybrid # important
16 | monitor: val/loss_simple_ema
17 | scale_factor: 0.18215
18 | finetune_keys: null
19 |
20 | scheduler_config: # 10000 warmup steps
21 | target: ldm.lr_scheduler.LambdaLinearScheduler
22 | params:
23 | warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
24 | cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25 | f_start: [ 1.e-6 ]
26 | f_max: [ 1. ]
27 | f_min: [ 1. ]
28 |
29 | unet_config:
30 | target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31 | params:
32 | image_size: 32 # unused
33 | in_channels: 9 # 4 data + 4 downscaled image + 1 mask
34 | out_channels: 4
35 | model_channels: 320
36 | attention_resolutions: [ 4, 2, 1 ]
37 | num_res_blocks: 2
38 | channel_mult: [ 1, 2, 4, 4 ]
39 | num_heads: 8
40 | use_spatial_transformer: True
41 | transformer_depth: 1
42 | context_dim: 768
43 | use_checkpoint: True
44 | legacy: False
45 |
46 | first_stage_config:
47 | target: ldm.models.autoencoder.AutoencoderKL
48 | params:
49 | embed_dim: 4
50 | monitor: val/rec_loss
51 | ddconfig:
52 | double_z: true
53 | z_channels: 4
54 | resolution: 256
55 | in_channels: 3
56 | out_ch: 3
57 | ch: 128
58 | ch_mult:
59 | - 1
60 | - 2
61 | - 4
62 | - 4
63 | num_res_blocks: 2
64 | attn_resolutions: []
65 | dropout: 0.0
66 | lossconfig:
67 | target: torch.nn.Identity
68 |
69 | cond_stage_config:
70 | target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
71 |
72 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug-report.yml:
--------------------------------------------------------------------------------
1 | name: Bug Report
2 | description: "Something is broken inside of ComfyUI. (Do not use this if you're just having issues and need help, or if the issue relates to a custom node)"
3 | labels: ["Potential Bug"]
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | Before submitting a **Bug Report**, please ensure the following:
9 |
10 | - **1:** You are running the latest version of ComfyUI.
11 | - **2:** You have looked at the existing bug reports and made sure this isn't already reported.
12 | - **3:** You confirmed that the bug is not caused by a custom node. You can disable all custom nodes by passing
13 | `--disable-all-custom-nodes` command line argument.
14 | - **4:** This is an actual bug in ComfyUI, not just a support question. A bug is when you can specify exact
15 | steps to replicate what went wrong and others will be able to repeat your steps and see the same issue happen.
16 |
17 | If unsure, ask on the [ComfyUI Matrix Space](https://app.element.io/#/room/%23comfyui_space%3Amatrix.org) or the [Comfy Org Discord](https://discord.gg/comfyorg) first.
18 | - type: textarea
19 | attributes:
20 | label: Expected Behavior
21 | description: "What you expected to happen."
22 | validations:
23 | required: true
24 | - type: textarea
25 | attributes:
26 | label: Actual Behavior
27 | description: "What actually happened. Please include a screenshot of the issue if possible."
28 | validations:
29 | required: true
30 | - type: textarea
31 | attributes:
32 | label: Steps to Reproduce
33 | description: "Describe how to reproduce the issue. Please be sure to attach a workflow JSON or PNG, ideally one that doesn't require custom nodes to test. If the bug open happens when certain custom nodes are used, most likely that custom node is what has the bug rather than ComfyUI, in which case it should be reported to the node's author."
34 | validations:
35 | required: true
36 | - type: textarea
37 | attributes:
38 | label: Debug Logs
39 | description: "Please copy the output from your terminal logs here."
40 | render: powershell
41 | validations:
42 | required: true
43 | - type: textarea
44 | attributes:
45 | label: Other
46 | description: "Any other additional information you think might be helpful."
47 | validations:
48 | required: false
49 |
--------------------------------------------------------------------------------
/tests-ui/setup.js:
--------------------------------------------------------------------------------
1 | const { spawn } = require("child_process");
2 | const { resolve } = require("path");
3 | const { existsSync, mkdirSync, writeFileSync } = require("fs");
4 | const http = require("http");
5 |
6 | async function setup() {
7 | // Wait up to 30s for it to start
8 | let success = false;
9 | let child;
10 | for (let i = 0; i < 30; i++) {
11 | try {
12 | await new Promise((res, rej) => {
13 | http
14 | .get("http://127.0.0.1:8188/object_info", (resp) => {
15 | let data = "";
16 | resp.on("data", (chunk) => {
17 | data += chunk;
18 | });
19 | resp.on("end", () => {
20 | // Modify the response data to add some checkpoints
21 | const objectInfo = JSON.parse(data);
22 | objectInfo.CheckpointLoaderSimple.input.required.ckpt_name[0] = ["model1.safetensors", "model2.ckpt"];
23 | objectInfo.VAELoader.input.required.vae_name[0] = ["vae1.safetensors", "vae2.ckpt"];
24 |
25 | data = JSON.stringify(objectInfo, undefined, "\t");
26 |
27 | const outDir = resolve("./data");
28 | if (!existsSync(outDir)) {
29 | mkdirSync(outDir);
30 | }
31 |
32 | const outPath = resolve(outDir, "object_info.json");
33 | console.log(`Writing ${Object.keys(objectInfo).length} nodes to ${outPath}`);
34 | writeFileSync(outPath, data, {
35 | encoding: "utf8",
36 | });
37 | res();
38 | });
39 | })
40 | .on("error", rej);
41 | });
42 | success = true;
43 | break;
44 | } catch (error) {
45 | console.log(i + "/30", error);
46 | if (i === 0) {
47 | // Start the server on first iteration if it fails to connect
48 | console.log("Starting ComfyUI server...");
49 |
50 | let python = resolve("../../python_embeded/python.exe");
51 | let args;
52 | let cwd;
53 | if (existsSync(python)) {
54 | args = ["-s", "ComfyUI/main.py"];
55 | cwd = "../..";
56 | } else {
57 | python = "python";
58 | args = ["main.py"];
59 | cwd = "..";
60 | }
61 | args.push("--cpu");
62 | console.log(python, ...args);
63 | child = spawn(python, args, { cwd });
64 | child.on("error", (err) => {
65 | console.log(`Server error (${err})`);
66 | i = 30;
67 | });
68 | child.on("exit", (code) => {
69 | if (!success) {
70 | console.log(`Server exited (${code})`);
71 | i = 30;
72 | }
73 | });
74 | }
75 | await new Promise((r) => {
76 | setTimeout(r, 1000);
77 | });
78 | }
79 | }
80 |
81 | child?.kill();
82 |
83 | if (!success) {
84 | throw new Error("Waiting for server failed...");
85 | }
86 | }
87 |
88 | setup();
--------------------------------------------------------------------------------
/comfy/cldm/mmdit.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from typing import Dict, Optional
3 | import comfy.ldm.modules.diffusionmodules.mmdit
4 |
5 | class ControlNet(comfy.ldm.modules.diffusionmodules.mmdit.MMDiT):
6 | def __init__(
7 | self,
8 | num_blocks = None,
9 | dtype = None,
10 | device = None,
11 | operations = None,
12 | **kwargs,
13 | ):
14 | super().__init__(dtype=dtype, device=device, operations=operations, final_layer=False, num_blocks=num_blocks, **kwargs)
15 | # controlnet_blocks
16 | self.controlnet_blocks = torch.nn.ModuleList([])
17 | for _ in range(len(self.joint_blocks)):
18 | self.controlnet_blocks.append(operations.Linear(self.hidden_size, self.hidden_size, device=device, dtype=dtype))
19 |
20 | self.pos_embed_input = comfy.ldm.modules.diffusionmodules.mmdit.PatchEmbed(
21 | None,
22 | self.patch_size,
23 | self.in_channels,
24 | self.hidden_size,
25 | bias=True,
26 | strict_img_size=False,
27 | dtype=dtype,
28 | device=device,
29 | operations=operations
30 | )
31 |
32 | def forward(
33 | self,
34 | x: torch.Tensor,
35 | timesteps: torch.Tensor,
36 | y: Optional[torch.Tensor] = None,
37 | context: Optional[torch.Tensor] = None,
38 | hint = None,
39 | ) -> torch.Tensor:
40 |
41 | #weird sd3 controlnet specific stuff
42 | y = torch.zeros_like(y)
43 |
44 | if self.context_processor is not None:
45 | context = self.context_processor(context)
46 |
47 | hw = x.shape[-2:]
48 | x = self.x_embedder(x) + self.cropped_pos_embed(hw, device=x.device).to(dtype=x.dtype, device=x.device)
49 | x += self.pos_embed_input(hint)
50 |
51 | c = self.t_embedder(timesteps, dtype=x.dtype)
52 | if y is not None and self.y_embedder is not None:
53 | y = self.y_embedder(y)
54 | c = c + y
55 |
56 | if context is not None:
57 | context = self.context_embedder(context)
58 |
59 | output = []
60 |
61 | blocks = len(self.joint_blocks)
62 | for i in range(blocks):
63 | context, x = self.joint_blocks[i](
64 | context,
65 | x,
66 | c=c,
67 | use_checkpoint=self.use_checkpoint,
68 | )
69 |
70 | out = self.controlnet_blocks[i](x)
71 | count = self.depth // blocks
72 | if i == blocks - 1:
73 | count -= 1
74 | for j in range(count):
75 | output.append(out)
76 |
77 | return {"output": output}
78 |
--------------------------------------------------------------------------------
/web/scripts/ui/imagePreview.js:
--------------------------------------------------------------------------------
1 | import { $el } from "../ui.js";
2 |
3 | export function calculateImageGrid(imgs, dw, dh) {
4 | let best = 0;
5 | let w = imgs[0].naturalWidth;
6 | let h = imgs[0].naturalHeight;
7 | const numImages = imgs.length;
8 |
9 | let cellWidth, cellHeight, cols, rows, shiftX;
10 | // compact style
11 | for (let c = 1; c <= numImages; c++) {
12 | const r = Math.ceil(numImages / c);
13 | const cW = dw / c;
14 | const cH = dh / r;
15 | const scaleX = cW / w;
16 | const scaleY = cH / h;
17 |
18 | const scale = Math.min(scaleX, scaleY, 1);
19 | const imageW = w * scale;
20 | const imageH = h * scale;
21 | const area = imageW * imageH * numImages;
22 |
23 | if (area > best) {
24 | best = area;
25 | cellWidth = imageW;
26 | cellHeight = imageH;
27 | cols = c;
28 | rows = r;
29 | shiftX = c * ((cW - imageW) / 2);
30 | }
31 | }
32 |
33 | return { cellWidth, cellHeight, cols, rows, shiftX };
34 | }
35 |
36 | export function createImageHost(node) {
37 | const el = $el("div.comfy-img-preview");
38 | let currentImgs;
39 | let first = true;
40 |
41 | function updateSize() {
42 | let w = null;
43 | let h = null;
44 |
45 | if (currentImgs) {
46 | let elH = el.clientHeight;
47 | if (first) {
48 | first = false;
49 | // On first run, if we are small then grow a bit
50 | if (elH < 190) {
51 | elH = 190;
52 | }
53 | el.style.setProperty("--comfy-widget-min-height", elH);
54 | } else {
55 | el.style.setProperty("--comfy-widget-min-height", null);
56 | }
57 |
58 | const nw = node.size[0];
59 | ({ cellWidth: w, cellHeight: h } = calculateImageGrid(currentImgs, nw - 20, elH));
60 | w += "px";
61 | h += "px";
62 |
63 | el.style.setProperty("--comfy-img-preview-width", w);
64 | el.style.setProperty("--comfy-img-preview-height", h);
65 | }
66 | }
67 | return {
68 | el,
69 | updateImages(imgs) {
70 | if (imgs !== currentImgs) {
71 | if (currentImgs == null) {
72 | requestAnimationFrame(() => {
73 | updateSize();
74 | });
75 | }
76 | el.replaceChildren(...imgs);
77 | currentImgs = imgs;
78 | node.onResize(node.size);
79 | node.graph.setDirtyCanvas(true, true);
80 | }
81 | },
82 | getHeight() {
83 | updateSize();
84 | },
85 | onDraw() {
86 | // Element from point uses a hittest find elements so we need to toggle pointer events
87 | el.style.pointerEvents = "all";
88 | const over = document.elementFromPoint(app.canvas.mouse[0], app.canvas.mouse[1]);
89 | el.style.pointerEvents = "none";
90 |
91 | if(!over) return;
92 | // Set the overIndex so Open Image etc work
93 | const idx = currentImgs.indexOf(over);
94 | node.overIndex = idx;
95 | },
96 | };
97 | }
98 |
--------------------------------------------------------------------------------
/tests-ui/utils/setup.js:
--------------------------------------------------------------------------------
1 | require("../../web/scripts/api");
2 |
3 | const fs = require("fs");
4 | const path = require("path");
5 | function* walkSync(dir) {
6 | const files = fs.readdirSync(dir, { withFileTypes: true });
7 | for (const file of files) {
8 | if (file.isDirectory()) {
9 | yield* walkSync(path.join(dir, file.name));
10 | } else {
11 | yield path.join(dir, file.name);
12 | }
13 | }
14 | }
15 |
16 | /**
17 | * @typedef { import("../../web/types/comfy").ComfyObjectInfo } ComfyObjectInfo
18 | */
19 |
20 | /**
21 | * @param {{
22 | * mockExtensions?: string[],
23 | * mockNodeDefs?: Record,
24 | * settings?: Record
25 | * userConfig?: {storage: "server" | "browser", users?: Record, migrated?: boolean },
26 | * userData?: Record
27 | * }} config
28 | */
29 | export function mockApi(config = {}) {
30 | let { mockExtensions, mockNodeDefs, userConfig, settings, userData } = {
31 | userConfig,
32 | settings: {},
33 | userData: {},
34 | ...config,
35 | };
36 | if (!mockExtensions) {
37 | mockExtensions = Array.from(walkSync(path.resolve("../web/extensions/core")))
38 | .filter((x) => x.endsWith(".js"))
39 | .map((x) => path.relative(path.resolve("../web"), x));
40 | }
41 | if (!mockNodeDefs) {
42 | mockNodeDefs = JSON.parse(fs.readFileSync(path.resolve("./data/object_info.json")));
43 | }
44 |
45 | const events = new EventTarget();
46 | const mockApi = {
47 | addEventListener: events.addEventListener.bind(events),
48 | removeEventListener: events.removeEventListener.bind(events),
49 | dispatchEvent: events.dispatchEvent.bind(events),
50 | getSystemStats: jest.fn(),
51 | getExtensions: jest.fn(() => mockExtensions),
52 | getNodeDefs: jest.fn(() => mockNodeDefs),
53 | init: jest.fn(),
54 | apiURL: jest.fn((x) => "../../web/" + x),
55 | createUser: jest.fn((username) => {
56 | if(username in userConfig.users) {
57 | return { status: 400, json: () => "Duplicate" }
58 | }
59 | userConfig.users[username + "!"] = username;
60 | return { status: 200, json: () => username + "!" }
61 | }),
62 | getUserConfig: jest.fn(() => userConfig ?? { storage: "browser", migrated: false }),
63 | getSettings: jest.fn(() => settings),
64 | storeSettings: jest.fn((v) => Object.assign(settings, v)),
65 | getUserData: jest.fn((f) => {
66 | if (f in userData) {
67 | return { status: 200, json: () => userData[f] };
68 | } else {
69 | return { status: 404 };
70 | }
71 | }),
72 | storeUserData: jest.fn((file, data) => {
73 | userData[file] = data;
74 | }),
75 | listUserData: jest.fn(() => [])
76 | };
77 | jest.mock("../../web/scripts/api", () => ({
78 | get api() {
79 | return mockApi;
80 | },
81 | }));
82 | }
83 |
--------------------------------------------------------------------------------
/.github/workflows/windows_release_dependencies.yml:
--------------------------------------------------------------------------------
1 | name: "Windows Release dependencies"
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | xformers:
7 | description: 'xformers version'
8 | required: false
9 | type: string
10 | default: ""
11 | extra_dependencies:
12 | description: 'extra dependencies'
13 | required: false
14 | type: string
15 | default: "\"numpy<2\""
16 | cu:
17 | description: 'cuda version'
18 | required: true
19 | type: string
20 | default: "124"
21 |
22 | python_minor:
23 | description: 'python minor version'
24 | required: true
25 | type: string
26 | default: "11"
27 |
28 | python_patch:
29 | description: 'python patch version'
30 | required: true
31 | type: string
32 | default: "9"
33 | # push:
34 | # branches:
35 | # - master
36 |
37 | jobs:
38 | build_dependencies:
39 | runs-on: windows-latest
40 | steps:
41 | - uses: actions/checkout@v4
42 | - uses: actions/setup-python@v5
43 | with:
44 | python-version: 3.${{ inputs.python_minor }}.${{ inputs.python_patch }}
45 |
46 | - shell: bash
47 | run: |
48 | echo "@echo off
49 | call update_comfyui.bat nopause
50 | echo -
51 | echo This will try to update pytorch and all python dependencies.
52 | echo -
53 | echo If you just want to update normally, close this and run update_comfyui.bat instead.
54 | echo -
55 | pause
56 | ..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio ${{ inputs.xformers }} --extra-index-url https://download.pytorch.org/whl/cu${{ inputs.cu }} -r ../ComfyUI/requirements.txt pygit2
57 | pause" > update_comfyui_and_python_dependencies.bat
58 |
59 | python -m pip wheel --no-cache-dir torch torchvision torchaudio ${{ inputs.xformers }} ${{ inputs.extra_dependencies }} --extra-index-url https://download.pytorch.org/whl/cu${{ inputs.cu }} -r requirements.txt pygit2 -w ./temp_wheel_dir
60 | python -m pip install --no-cache-dir ./temp_wheel_dir/*
61 | echo installed basic
62 | ls -lah temp_wheel_dir
63 | mv temp_wheel_dir cu${{ inputs.cu }}_python_deps
64 | tar cf cu${{ inputs.cu }}_python_deps.tar cu${{ inputs.cu }}_python_deps
65 |
66 | - uses: actions/cache/save@v4
67 | with:
68 | path: |
69 | cu${{ inputs.cu }}_python_deps.tar
70 | update_comfyui_and_python_dependencies.bat
71 | key: ${{ runner.os }}-build-cu${{ inputs.cu }}-${{ inputs.python_minor }}
72 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_model_downscale.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import comfy.utils
3 |
4 | class PatchModelAddDownscale:
5 | upscale_methods = ["bicubic", "nearest-exact", "bilinear", "area", "bislerp"]
6 | @classmethod
7 | def INPUT_TYPES(s):
8 | return {"required": { "model": ("MODEL",),
9 | "block_number": ("INT", {"default": 3, "min": 1, "max": 32, "step": 1}),
10 | "downscale_factor": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 9.0, "step": 0.001}),
11 | "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
12 | "end_percent": ("FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.001}),
13 | "downscale_after_skip": ("BOOLEAN", {"default": True}),
14 | "downscale_method": (s.upscale_methods,),
15 | "upscale_method": (s.upscale_methods,),
16 | }}
17 | RETURN_TYPES = ("MODEL",)
18 | FUNCTION = "patch"
19 |
20 | CATEGORY = "_for_testing"
21 |
22 | def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip, downscale_method, upscale_method):
23 | model_sampling = model.get_model_object("model_sampling")
24 | sigma_start = model_sampling.percent_to_sigma(start_percent)
25 | sigma_end = model_sampling.percent_to_sigma(end_percent)
26 |
27 | def input_block_patch(h, transformer_options):
28 | if transformer_options["block"][1] == block_number:
29 | sigma = transformer_options["sigmas"][0].item()
30 | if sigma <= sigma_start and sigma >= sigma_end:
31 | h = comfy.utils.common_upscale(h, round(h.shape[-1] * (1.0 / downscale_factor)), round(h.shape[-2] * (1.0 / downscale_factor)), downscale_method, "disabled")
32 | return h
33 |
34 | def output_block_patch(h, hsp, transformer_options):
35 | if h.shape[2] != hsp.shape[2]:
36 | h = comfy.utils.common_upscale(h, hsp.shape[-1], hsp.shape[-2], upscale_method, "disabled")
37 | return h, hsp
38 |
39 | m = model.clone()
40 | if downscale_after_skip:
41 | m.set_model_input_block_patch_after_skip(input_block_patch)
42 | else:
43 | m.set_model_input_block_patch(input_block_patch)
44 | m.set_model_output_block_patch(output_block_patch)
45 | return (m, )
46 |
47 | NODE_CLASS_MAPPINGS = {
48 | "PatchModelAddDownscale": PatchModelAddDownscale,
49 | }
50 |
51 | NODE_DISPLAY_NAME_MAPPINGS = {
52 | # Sampling
53 | "PatchModelAddDownscale": "PatchModelAddDownscale (Kohya Deep Shrink)",
54 | }
55 |
--------------------------------------------------------------------------------
/.github/workflows/test-browser.yml:
--------------------------------------------------------------------------------
1 | # This is a temporary action during frontend TS migration.
2 | # This file should be removed after TS migration is completed.
3 | # The browser test is here to ensure TS repo is working the same way as the
4 | # current JS code.
5 | # If you are adding UI feature, please sync your changes to the TS repo:
6 | # huchenlei/ComfyUI_frontend and update test expectation files accordingly.
7 | name: Playwright Browser Tests CI
8 |
9 | on:
10 | push:
11 | branches: [ main, master ]
12 | pull_request:
13 | branches: [ main, master ]
14 |
15 | jobs:
16 | test:
17 | runs-on: ubuntu-latest
18 | steps:
19 | - name: Checkout ComfyUI
20 | uses: actions/checkout@v4
21 | with:
22 | repository: "comfyanonymous/ComfyUI"
23 | path: "ComfyUI"
24 | - name: Checkout ComfyUI_frontend
25 | uses: actions/checkout@v4
26 | with:
27 | repository: "huchenlei/ComfyUI_frontend"
28 | path: "ComfyUI_frontend"
29 | ref: "fcc54d803e5b6a9b08a462a1d94899318c96dcbb"
30 | - uses: actions/setup-node@v3
31 | with:
32 | node-version: lts/*
33 | - uses: actions/setup-python@v4
34 | with:
35 | python-version: '3.8'
36 | - name: Install requirements
37 | run: |
38 | python -m pip install --upgrade pip
39 | pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
40 | pip install -r requirements.txt
41 | pip install wait-for-it
42 | working-directory: ComfyUI
43 | - name: Start ComfyUI server
44 | run: |
45 | python main.py --cpu 2>&1 | tee console_output.log &
46 | wait-for-it --service 127.0.0.1:8188 -t 600
47 | working-directory: ComfyUI
48 | - name: Install ComfyUI_frontend dependencies
49 | run: |
50 | npm ci
51 | working-directory: ComfyUI_frontend
52 | - name: Install Playwright Browsers
53 | run: npx playwright install --with-deps
54 | working-directory: ComfyUI_frontend
55 | - name: Run Playwright tests
56 | run: npx playwright test
57 | working-directory: ComfyUI_frontend
58 | - name: Check for unhandled exceptions in server log
59 | run: |
60 | if grep -qE "Exception|Error" console_output.log; then
61 | echo "Unhandled exception/error found in server log."
62 | exit 1
63 | fi
64 | working-directory: ComfyUI
65 | - uses: actions/upload-artifact@v4
66 | if: always()
67 | with:
68 | name: playwright-report
69 | path: ComfyUI_frontend/playwright-report/
70 | retention-days: 30
71 | - uses: actions/upload-artifact@v4
72 | if: always()
73 | with:
74 | name: console-output
75 | path: ComfyUI/console_output.log
76 | retention-days: 30
77 |
--------------------------------------------------------------------------------
/comfy/conds.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import math
3 | import comfy.utils
4 |
5 |
6 | def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9)
7 | return abs(a*b) // math.gcd(a, b)
8 |
9 | class CONDRegular:
10 | def __init__(self, cond):
11 | self.cond = cond
12 |
13 | def _copy_with(self, cond):
14 | return self.__class__(cond)
15 |
16 | def process_cond(self, batch_size, device, **kwargs):
17 | return self._copy_with(comfy.utils.repeat_to_batch_size(self.cond, batch_size).to(device))
18 |
19 | def can_concat(self, other):
20 | if self.cond.shape != other.cond.shape:
21 | return False
22 | return True
23 |
24 | def concat(self, others):
25 | conds = [self.cond]
26 | for x in others:
27 | conds.append(x.cond)
28 | return torch.cat(conds)
29 |
30 | class CONDNoiseShape(CONDRegular):
31 | def process_cond(self, batch_size, device, area, **kwargs):
32 | data = self.cond
33 | if area is not None:
34 | dims = len(area) // 2
35 | for i in range(dims):
36 | data = data.narrow(i + 2, area[i + dims], area[i])
37 |
38 | return self._copy_with(comfy.utils.repeat_to_batch_size(data, batch_size).to(device))
39 |
40 |
41 | class CONDCrossAttn(CONDRegular):
42 | def can_concat(self, other):
43 | s1 = self.cond.shape
44 | s2 = other.cond.shape
45 | if s1 != s2:
46 | if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen
47 | return False
48 |
49 | mult_min = lcm(s1[1], s2[1])
50 | diff = mult_min // min(s1[1], s2[1])
51 | if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much
52 | return False
53 | return True
54 |
55 | def concat(self, others):
56 | conds = [self.cond]
57 | crossattn_max_len = self.cond.shape[1]
58 | for x in others:
59 | c = x.cond
60 | crossattn_max_len = lcm(crossattn_max_len, c.shape[1])
61 | conds.append(c)
62 |
63 | out = []
64 | for c in conds:
65 | if c.shape[1] < crossattn_max_len:
66 | c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result
67 | out.append(c)
68 | return torch.cat(out)
69 |
70 | class CONDConstant(CONDRegular):
71 | def __init__(self, cond):
72 | self.cond = cond
73 |
74 | def process_cond(self, batch_size, device, **kwargs):
75 | return self._copy_with(self.cond)
76 |
77 | def can_concat(self, other):
78 | if self.cond != other.cond:
79 | return False
80 | return True
81 |
82 | def concat(self, others):
83 | return self.cond
84 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_clip_sdxl.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from nodes import MAX_RESOLUTION
3 |
4 | class CLIPTextEncodeSDXLRefiner:
5 | @classmethod
6 | def INPUT_TYPES(s):
7 | return {"required": {
8 | "ascore": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 1000.0, "step": 0.01}),
9 | "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
10 | "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
11 | "text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ),
12 | }}
13 | RETURN_TYPES = ("CONDITIONING",)
14 | FUNCTION = "encode"
15 |
16 | CATEGORY = "advanced/conditioning"
17 |
18 | def encode(self, clip, ascore, width, height, text):
19 | tokens = clip.tokenize(text)
20 | cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
21 | return ([[cond, {"pooled_output": pooled, "aesthetic_score": ascore, "width": width,"height": height}]], )
22 |
23 | class CLIPTextEncodeSDXL:
24 | @classmethod
25 | def INPUT_TYPES(s):
26 | return {"required": {
27 | "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
28 | "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
29 | "crop_w": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
30 | "crop_h": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
31 | "target_width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
32 | "target_height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
33 | "text_g": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ),
34 | "text_l": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ),
35 | }}
36 | RETURN_TYPES = ("CONDITIONING",)
37 | FUNCTION = "encode"
38 |
39 | CATEGORY = "advanced/conditioning"
40 |
41 | def encode(self, clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l):
42 | tokens = clip.tokenize(text_g)
43 | tokens["l"] = clip.tokenize(text_l)["l"]
44 | if len(tokens["l"]) != len(tokens["g"]):
45 | empty = clip.tokenize("")
46 | while len(tokens["l"]) < len(tokens["g"]):
47 | tokens["l"] += empty["l"]
48 | while len(tokens["l"]) > len(tokens["g"]):
49 | tokens["g"] += empty["g"]
50 | cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
51 | return ([[cond, {"pooled_output": pooled, "width": width, "height": height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height}]], )
52 |
53 | NODE_CLASS_MAPPINGS = {
54 | "CLIPTextEncodeSDXLRefiner": CLIPTextEncodeSDXLRefiner,
55 | "CLIPTextEncodeSDXL": CLIPTextEncodeSDXL,
56 | }
57 |
--------------------------------------------------------------------------------
/web/scripts/ui/menu/queueButton.js:
--------------------------------------------------------------------------------
1 | // @ts-check
2 |
3 | import { ComfyButton } from "../components/button.js";
4 | import { $el } from "../../ui.js";
5 | import { api } from "../../api.js";
6 | import { ComfySplitButton } from "../components/splitButton.js";
7 | import { ComfyQueueOptions } from "./queueOptions.js";
8 | import { prop } from "../../utils.js";
9 |
10 | export class ComfyQueueButton {
11 | element = $el("div.comfyui-queue-button");
12 | #internalQueueSize = 0;
13 |
14 | queuePrompt = async (e) => {
15 | this.#internalQueueSize += this.queueOptions.batchCount;
16 | // Hold shift to queue front, event is undefined when auto-queue is enabled
17 | await this.app.queuePrompt(e?.shiftKey ? -1 : 0, this.queueOptions.batchCount);
18 | };
19 |
20 | constructor(app) {
21 | this.app = app;
22 | this.queueSizeElement = $el("span.comfyui-queue-count", {
23 | textContent: "?",
24 | });
25 |
26 | const queue = new ComfyButton({
27 | content: $el("div", [
28 | $el("span", {
29 | textContent: "Queue",
30 | }),
31 | this.queueSizeElement,
32 | ]),
33 | icon: "play",
34 | classList: "comfyui-button",
35 | action: this.queuePrompt,
36 | });
37 |
38 | this.queueOptions = new ComfyQueueOptions(app);
39 |
40 | const btn = new ComfySplitButton(
41 | {
42 | primary: queue,
43 | mode: "click",
44 | position: "absolute",
45 | horizontal: "right",
46 | },
47 | this.queueOptions.element
48 | );
49 | btn.element.classList.add("primary");
50 | this.element.append(btn.element);
51 |
52 | this.autoQueueMode = prop(this, "autoQueueMode", "", () => {
53 | switch (this.autoQueueMode) {
54 | case "instant":
55 | queue.icon = "infinity";
56 | break;
57 | case "change":
58 | queue.icon = "auto-mode";
59 | break;
60 | default:
61 | queue.icon = "play";
62 | break;
63 | }
64 | });
65 |
66 | this.queueOptions.addEventListener("autoQueueMode", (e) => (this.autoQueueMode = e["detail"]));
67 |
68 | api.addEventListener("graphChanged", () => {
69 | if (this.autoQueueMode === "change") {
70 | if (this.#internalQueueSize) {
71 | this.graphHasChanged = true;
72 | } else {
73 | this.graphHasChanged = false;
74 | this.queuePrompt();
75 | }
76 | }
77 | });
78 |
79 | api.addEventListener("status", ({ detail }) => {
80 | this.#internalQueueSize = detail?.exec_info?.queue_remaining;
81 | if (this.#internalQueueSize != null) {
82 | this.queueSizeElement.textContent = this.#internalQueueSize > 99 ? "99+" : this.#internalQueueSize + "";
83 | this.queueSizeElement.title = `${this.#internalQueueSize} prompts in queue`;
84 | if (!this.#internalQueueSize && !app.lastExecutionError) {
85 | if (this.autoQueueMode === "instant" || (this.autoQueueMode === "change" && this.graphHasChanged)) {
86 | this.graphHasChanged = false;
87 | this.queuePrompt();
88 | }
89 | }
90 | }
91 | });
92 | }
93 | }
94 |
--------------------------------------------------------------------------------
/web/types/comfy.d.ts:
--------------------------------------------------------------------------------
1 | import { LGraphNode, IWidget } from "./litegraph";
2 | import { ComfyApp } from "../../scripts/app";
3 |
4 | export interface ComfyExtension {
5 | /**
6 | * The name of the extension
7 | */
8 | name: string;
9 | /**
10 | * Allows any initialisation, e.g. loading resources. Called after the canvas is created but before nodes are added
11 | * @param app The ComfyUI app instance
12 | */
13 | init?(app: ComfyApp): Promise;
14 | /**
15 | * Allows any additonal setup, called after the application is fully set up and running
16 | * @param app The ComfyUI app instance
17 | */
18 | setup?(app: ComfyApp): Promise;
19 | /**
20 | * Called before nodes are registered with the graph
21 | * @param defs The collection of node definitions, add custom ones or edit existing ones
22 | * @param app The ComfyUI app instance
23 | */
24 | addCustomNodeDefs?(defs: Record, app: ComfyApp): Promise;
25 | /**
26 | * Allows the extension to add custom widgets
27 | * @param app The ComfyUI app instance
28 | * @returns An array of {[widget name]: widget data}
29 | */
30 | getCustomWidgets?(
31 | app: ComfyApp
32 | ): Promise<
33 | Record { widget?: IWidget; minWidth?: number; minHeight?: number }>
34 | >;
35 | /**
36 | * Allows the extension to add additional handling to the node before it is registered with LGraph
37 | * @param nodeType The node class (not an instance)
38 | * @param nodeData The original node object info config object
39 | * @param app The ComfyUI app instance
40 | */
41 | beforeRegisterNodeDef?(nodeType: typeof LGraphNode, nodeData: ComfyObjectInfo, app: ComfyApp): Promise;
42 | /**
43 | * Allows the extension to register additional nodes with LGraph after standard nodes are added
44 | * @param app The ComfyUI app instance
45 | */
46 | registerCustomNodes?(app: ComfyApp): Promise;
47 | /**
48 | * Allows the extension to modify a node that has been reloaded onto the graph.
49 | * If you break something in the backend and want to patch workflows in the frontend
50 | * This is the place to do this
51 | * @param node The node that has been loaded
52 | * @param app The ComfyUI app instance
53 | */
54 | loadedGraphNode?(node: LGraphNode, app: ComfyApp);
55 | /**
56 | * Allows the extension to run code after the constructor of the node
57 | * @param node The node that has been created
58 | * @param app The ComfyUI app instance
59 | */
60 | nodeCreated?(node: LGraphNode, app: ComfyApp);
61 | }
62 |
63 | export type ComfyObjectInfo = {
64 | name: string;
65 | display_name?: string;
66 | description?: string;
67 | category: string;
68 | input?: {
69 | required?: Record;
70 | optional?: Record;
71 | };
72 | output?: string[];
73 | output_name: string[];
74 | };
75 |
76 | export type ComfyObjectInfoConfig = [string | any[]] | [string | any[], any];
77 |
--------------------------------------------------------------------------------
/comfy/sampler_helpers.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import comfy.model_management
3 | import comfy.conds
4 |
5 | def prepare_mask(noise_mask, shape, device):
6 | """ensures noise mask is of proper dimensions"""
7 | noise_mask = torch.nn.functional.interpolate(noise_mask.reshape((-1, 1, noise_mask.shape[-2], noise_mask.shape[-1])), size=(shape[2], shape[3]), mode="bilinear")
8 | noise_mask = torch.cat([noise_mask] * shape[1], dim=1)
9 | noise_mask = comfy.utils.repeat_to_batch_size(noise_mask, shape[0])
10 | noise_mask = noise_mask.to(device)
11 | return noise_mask
12 |
13 | def get_models_from_cond(cond, model_type):
14 | models = []
15 | for c in cond:
16 | if model_type in c:
17 | models += [c[model_type]]
18 | return models
19 |
20 | def convert_cond(cond):
21 | out = []
22 | for c in cond:
23 | temp = c[1].copy()
24 | model_conds = temp.get("model_conds", {})
25 | if c[0] is not None:
26 | model_conds["c_crossattn"] = comfy.conds.CONDCrossAttn(c[0]) #TODO: remove
27 | temp["cross_attn"] = c[0]
28 | temp["model_conds"] = model_conds
29 | out.append(temp)
30 | return out
31 |
32 | def get_additional_models(conds, dtype):
33 | """loads additional models in conditioning"""
34 | cnets = []
35 | gligen = []
36 |
37 | for k in conds:
38 | cnets += get_models_from_cond(conds[k], "control")
39 | gligen += get_models_from_cond(conds[k], "gligen")
40 |
41 | control_nets = set(cnets)
42 |
43 | inference_memory = 0
44 | control_models = []
45 | for m in control_nets:
46 | control_models += m.get_models()
47 | inference_memory += m.inference_memory_requirements(dtype)
48 |
49 | gligen = [x[1] for x in gligen]
50 | models = control_models + gligen
51 | return models, inference_memory
52 |
53 | def cleanup_additional_models(models):
54 | """cleanup additional models that were loaded"""
55 | for m in models:
56 | if hasattr(m, 'cleanup'):
57 | m.cleanup()
58 |
59 |
60 | def prepare_sampling(model, noise_shape, conds):
61 | device = model.load_device
62 | real_model = None
63 | models, inference_memory = get_additional_models(conds, model.model_dtype())
64 | memory_required = model.memory_required([noise_shape[0] * 2] + list(noise_shape[1:])) + inference_memory
65 | minimum_memory_required = model.memory_required([noise_shape[0]] + list(noise_shape[1:])) + inference_memory
66 | comfy.model_management.load_models_gpu([model] + models, memory_required=memory_required, minimum_memory_required=minimum_memory_required)
67 | real_model = model.model
68 |
69 | return real_model, conds, models
70 |
71 | def cleanup_models(conds, models):
72 | cleanup_additional_models(models)
73 |
74 | control_cleanup = []
75 | for k in conds:
76 | control_cleanup += get_models_from_cond(conds[k], "control")
77 |
78 | cleanup_additional_models(set(control_cleanup))
79 |
--------------------------------------------------------------------------------
/web/extensions/core/simpleTouchSupport.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 |
3 | let touchZooming;
4 | let touchCount = 0;
5 |
6 | app.registerExtension({
7 | name: "Comfy.SimpleTouchSupport",
8 | setup() {
9 | let zoomPos;
10 | let touchTime;
11 | let lastTouch;
12 |
13 | function getMultiTouchPos(e) {
14 | return Math.hypot(e.touches[0].clientX - e.touches[1].clientX, e.touches[0].clientY - e.touches[1].clientY);
15 | }
16 |
17 | app.canvasEl.addEventListener(
18 | "touchstart",
19 | (e) => {
20 | touchCount++;
21 | lastTouch = null;
22 | if (e.touches?.length === 1) {
23 | // Store start time for press+hold for context menu
24 | touchTime = new Date();
25 | lastTouch = e.touches[0];
26 | } else {
27 | touchTime = null;
28 | if (e.touches?.length === 2) {
29 | // Store center pos for zoom
30 | zoomPos = getMultiTouchPos(e);
31 | app.canvas.pointer_is_down = false;
32 | }
33 | }
34 | },
35 | true
36 | );
37 |
38 | app.canvasEl.addEventListener("touchend", (e) => {
39 | touchZooming = false;
40 | touchCount = e.touches?.length ?? touchCount - 1;
41 | if (touchTime && !e.touches?.length) {
42 | if (new Date() - touchTime > 600) {
43 | try {
44 | // hack to get litegraph to use this event
45 | e.constructor = CustomEvent;
46 | } catch (error) {}
47 | e.clientX = lastTouch.clientX;
48 | e.clientY = lastTouch.clientY;
49 |
50 | app.canvas.pointer_is_down = true;
51 | app.canvas._mousedown_callback(e);
52 | }
53 | touchTime = null;
54 | }
55 | });
56 |
57 | app.canvasEl.addEventListener(
58 | "touchmove",
59 | (e) => {
60 | touchTime = null;
61 | if (e.touches?.length === 2) {
62 | app.canvas.pointer_is_down = false;
63 | touchZooming = true;
64 | LiteGraph.closeAllContextMenus();
65 | app.canvas.search_box?.close();
66 | const newZoomPos = getMultiTouchPos(e);
67 |
68 | const midX = (e.touches[0].clientX + e.touches[1].clientX) / 2;
69 | const midY = (e.touches[0].clientY + e.touches[1].clientY) / 2;
70 |
71 | let scale = app.canvas.ds.scale;
72 | const diff = zoomPos - newZoomPos;
73 | if (diff > 0.5) {
74 | scale *= 1 / 1.07;
75 | } else if (diff < -0.5) {
76 | scale *= 1.07;
77 | }
78 | app.canvas.ds.changeScale(scale, [midX, midY]);
79 | app.canvas.setDirty(true, true);
80 | zoomPos = newZoomPos;
81 | }
82 | },
83 | true
84 | );
85 | },
86 | });
87 |
88 | const processMouseDown = LGraphCanvas.prototype.processMouseDown;
89 | LGraphCanvas.prototype.processMouseDown = function (e) {
90 | if (touchZooming || touchCount) {
91 | return;
92 | }
93 | return processMouseDown.apply(this, arguments);
94 | };
95 |
96 | const processMouseMove = LGraphCanvas.prototype.processMouseMove;
97 | LGraphCanvas.prototype.processMouseMove = function (e) {
98 | if (touchZooming || touchCount > 1) {
99 | return;
100 | }
101 | return processMouseMove.apply(this, arguments);
102 | };
103 |
--------------------------------------------------------------------------------
/comfy/text_encoders/t5_tokenizer/special_tokens_map.json:
--------------------------------------------------------------------------------
1 | {
2 | "additional_special_tokens": [
3 | "",
4 | "",
5 | "",
6 | "",
7 | "",
8 | "",
9 | "",
10 | "",
11 | "",
12 | "",
13 | "",
14 | "",
15 | "",
16 | "",
17 | "",
18 | "",
19 | "",
20 | "",
21 | "",
22 | "",
23 | "",
24 | "",
25 | "",
26 | "",
27 | "",
28 | "",
29 | "",
30 | "",
31 | "",
32 | "",
33 | "",
34 | "",
35 | "",
36 | "",
37 | "",
38 | "",
39 | "",
40 | "",
41 | "",
42 | "",
43 | "",
44 | "",
45 | "",
46 | "",
47 | "",
48 | "",
49 | "",
50 | "",
51 | "",
52 | "",
53 | "",
54 | "",
55 | "",
56 | "",
57 | "",
58 | "",
59 | "",
60 | "",
61 | "",
62 | "",
63 | "",
64 | "",
65 | "",
66 | "",
67 | "",
68 | "",
69 | "",
70 | "",
71 | "",
72 | "",
73 | "",
74 | "",
75 | "",
76 | "",
77 | "",
78 | "",
79 | "",
80 | "",
81 | "",
82 | "",
83 | "",
84 | "",
85 | "",
86 | "",
87 | "",
88 | "",
89 | "",
90 | "",
91 | "",
92 | "",
93 | "",
94 | "",
95 | "",
96 | "",
97 | "",
98 | "",
99 | "",
100 | "",
101 | "",
102 | ""
103 | ],
104 | "eos_token": {
105 | "content": "",
106 | "lstrip": false,
107 | "normalized": false,
108 | "rstrip": false,
109 | "single_word": false
110 | },
111 | "pad_token": {
112 | "content": "",
113 | "lstrip": false,
114 | "normalized": false,
115 | "rstrip": false,
116 | "single_word": false
117 | },
118 | "unk_token": {
119 | "content": "",
120 | "lstrip": false,
121 | "normalized": false,
122 | "rstrip": false,
123 | "single_word": false
124 | }
125 | }
126 |
--------------------------------------------------------------------------------
/comfy/sample.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import comfy.model_management
3 | import comfy.samplers
4 | import comfy.utils
5 | import numpy as np
6 | import logging
7 |
8 | def prepare_noise(latent_image, seed, noise_inds=None):
9 | """
10 | creates random noise given a latent image and a seed.
11 | optional arg skip can be used to skip and discard x number of noise generations for a given seed
12 | """
13 | generator = torch.manual_seed(seed)
14 | if noise_inds is None:
15 | return torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
16 |
17 | unique_inds, inverse = np.unique(noise_inds, return_inverse=True)
18 | noises = []
19 | for i in range(unique_inds[-1]+1):
20 | noise = torch.randn([1] + list(latent_image.size())[1:], dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
21 | if i in unique_inds:
22 | noises.append(noise)
23 | noises = [noises[i] for i in inverse]
24 | noises = torch.cat(noises, axis=0)
25 | return noises
26 |
27 | def fix_empty_latent_channels(model, latent_image):
28 | latent_channels = model.get_model_object("latent_format").latent_channels #Resize the empty latent image so it has the right number of channels
29 | if latent_channels != latent_image.shape[1] and torch.count_nonzero(latent_image) == 0:
30 | latent_image = comfy.utils.repeat_to_batch_size(latent_image, latent_channels, dim=1)
31 | return latent_image
32 |
33 | def prepare_sampling(model, noise_shape, positive, negative, noise_mask):
34 | logging.warning("Warning: comfy.sample.prepare_sampling isn't used anymore and can be removed")
35 | return model, positive, negative, noise_mask, []
36 |
37 | def cleanup_additional_models(models):
38 | logging.warning("Warning: comfy.sample.cleanup_additional_models isn't used anymore and can be removed")
39 |
40 | def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None, callback=None, disable_pbar=False, seed=None):
41 | sampler = comfy.samplers.KSampler(model, steps=steps, device=model.load_device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)
42 |
43 | samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
44 | samples = samples.to(comfy.model_management.intermediate_device())
45 | return samples
46 |
47 | def sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=None, callback=None, disable_pbar=False, seed=None):
48 | samples = comfy.samplers.sample(model, noise, positive, negative, cfg, model.load_device, sampler, sigmas, model_options=model.model_options, latent_image=latent_image, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
49 | samples = samples.to(comfy.model_management.intermediate_device())
50 | return samples
51 |
--------------------------------------------------------------------------------
/script_examples/basic_api_example.py:
--------------------------------------------------------------------------------
1 | import json
2 | from urllib import request, parse
3 | import random
4 |
5 | #This is the ComfyUI api prompt format.
6 |
7 | #If you want it for a specific workflow you can "enable dev mode options"
8 | #in the settings of the UI (gear beside the "Queue Size: ") this will enable
9 | #a button on the UI to save workflows in api format.
10 |
11 | #keep in mind ComfyUI is pre alpha software so this format will change a bit.
12 |
13 | #this is the one for the default workflow
14 | prompt_text = """
15 | {
16 | "3": {
17 | "class_type": "KSampler",
18 | "inputs": {
19 | "cfg": 8,
20 | "denoise": 1,
21 | "latent_image": [
22 | "5",
23 | 0
24 | ],
25 | "model": [
26 | "4",
27 | 0
28 | ],
29 | "negative": [
30 | "7",
31 | 0
32 | ],
33 | "positive": [
34 | "6",
35 | 0
36 | ],
37 | "sampler_name": "euler",
38 | "scheduler": "normal",
39 | "seed": 8566257,
40 | "steps": 20
41 | }
42 | },
43 | "4": {
44 | "class_type": "CheckpointLoaderSimple",
45 | "inputs": {
46 | "ckpt_name": "v1-5-pruned-emaonly.ckpt"
47 | }
48 | },
49 | "5": {
50 | "class_type": "EmptyLatentImage",
51 | "inputs": {
52 | "batch_size": 1,
53 | "height": 512,
54 | "width": 512
55 | }
56 | },
57 | "6": {
58 | "class_type": "CLIPTextEncode",
59 | "inputs": {
60 | "clip": [
61 | "4",
62 | 1
63 | ],
64 | "text": "masterpiece best quality girl"
65 | }
66 | },
67 | "7": {
68 | "class_type": "CLIPTextEncode",
69 | "inputs": {
70 | "clip": [
71 | "4",
72 | 1
73 | ],
74 | "text": "bad hands"
75 | }
76 | },
77 | "8": {
78 | "class_type": "VAEDecode",
79 | "inputs": {
80 | "samples": [
81 | "3",
82 | 0
83 | ],
84 | "vae": [
85 | "4",
86 | 2
87 | ]
88 | }
89 | },
90 | "9": {
91 | "class_type": "SaveImage",
92 | "inputs": {
93 | "filename_prefix": "ComfyUI",
94 | "images": [
95 | "8",
96 | 0
97 | ]
98 | }
99 | }
100 | }
101 | """
102 |
103 | def queue_prompt(prompt):
104 | p = {"prompt": prompt}
105 | data = json.dumps(p).encode('utf-8')
106 | req = request.Request("http://127.0.0.1:8188/prompt", data=data)
107 | request.urlopen(req)
108 |
109 |
110 | prompt = json.loads(prompt_text)
111 | #set the text prompt for our positive CLIPTextEncode
112 | prompt["6"]["inputs"]["text"] = "masterpiece best quality man"
113 |
114 | #set the seed for our KSampler node
115 | prompt["3"]["inputs"]["seed"] = 5
116 |
117 |
118 | queue_prompt(prompt)
119 |
120 |
121 |
--------------------------------------------------------------------------------
/.github/workflows/test-ci.yml:
--------------------------------------------------------------------------------
1 | # This is the GitHub Workflow that drives automatic full-GPU-enabled tests of all new commits to the master branch of ComfyUI
2 | # Results are reported as checkmarks on the commits, as well as onto https://ci.comfy.org/
3 | name: Full Comfy CI Workflow Runs
4 | on:
5 | push:
6 | branches:
7 | - master
8 | paths-ignore:
9 | - 'app/**'
10 | - 'input/**'
11 | - 'output/**'
12 | - 'notebooks/**'
13 | - 'script_examples/**'
14 | - '.github/**'
15 | - 'web/**'
16 | workflow_dispatch:
17 |
18 | jobs:
19 | test-stable:
20 | strategy:
21 | fail-fast: false
22 | matrix:
23 | os: [macos, linux, windows]
24 | python_version: ["3.9", "3.10", "3.11", "3.12"]
25 | cuda_version: ["12.1"]
26 | torch_version: ["stable"]
27 | include:
28 | - os: macos
29 | runner_label: [self-hosted, macOS]
30 | flags: "--use-pytorch-cross-attention"
31 | - os: linux
32 | runner_label: [self-hosted, Linux]
33 | flags: ""
34 | - os: windows
35 | runner_label: [self-hosted, win]
36 | flags: ""
37 | runs-on: ${{ matrix.runner_label }}
38 | steps:
39 | - name: Test Workflows
40 | uses: comfy-org/comfy-action@main
41 | with:
42 | os: ${{ matrix.os }}
43 | python_version: ${{ matrix.python_version }}
44 | torch_version: ${{ matrix.torch_version }}
45 | google_credentials: ${{ secrets.GCS_SERVICE_ACCOUNT_JSON }}
46 | comfyui_flags: ${{ matrix.flags }}
47 |
48 | test-win-nightly:
49 | strategy:
50 | fail-fast: true
51 | matrix:
52 | os: [windows]
53 | python_version: ["3.9", "3.10", "3.11", "3.12"]
54 | cuda_version: ["12.1"]
55 | torch_version: ["nightly"]
56 | include:
57 | - os: windows
58 | runner_label: [self-hosted, win]
59 | flags: ""
60 | runs-on: ${{ matrix.runner_label }}
61 | steps:
62 | - name: Test Workflows
63 | uses: comfy-org/comfy-action@main
64 | with:
65 | os: ${{ matrix.os }}
66 | python_version: ${{ matrix.python_version }}
67 | torch_version: ${{ matrix.torch_version }}
68 | google_credentials: ${{ secrets.GCS_SERVICE_ACCOUNT_JSON }}
69 | comfyui_flags: ${{ matrix.flags }}
70 |
71 | test-unix-nightly:
72 | strategy:
73 | fail-fast: false
74 | matrix:
75 | os: [macos, linux]
76 | python_version: ["3.11"]
77 | cuda_version: ["12.1"]
78 | torch_version: ["nightly"]
79 | include:
80 | - os: macos
81 | runner_label: [self-hosted, macOS]
82 | flags: "--use-pytorch-cross-attention"
83 | - os: linux
84 | runner_label: [self-hosted, Linux]
85 | flags: ""
86 | runs-on: ${{ matrix.runner_label }}
87 | steps:
88 | - name: Test Workflows
89 | uses: comfy-org/comfy-action@main
90 | with:
91 | os: ${{ matrix.os }}
92 | python_version: ${{ matrix.python_version }}
93 | torch_version: ${{ matrix.torch_version }}
94 | google_credentials: ${{ secrets.GCS_SERVICE_ACCOUNT_JSON }}
95 | comfyui_flags: ${{ matrix.flags }}
96 |
--------------------------------------------------------------------------------
/comfy/ldm/modules/distributions/distributions.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 |
5 | class AbstractDistribution:
6 | def sample(self):
7 | raise NotImplementedError()
8 |
9 | def mode(self):
10 | raise NotImplementedError()
11 |
12 |
13 | class DiracDistribution(AbstractDistribution):
14 | def __init__(self, value):
15 | self.value = value
16 |
17 | def sample(self):
18 | return self.value
19 |
20 | def mode(self):
21 | return self.value
22 |
23 |
24 | class DiagonalGaussianDistribution(object):
25 | def __init__(self, parameters, deterministic=False):
26 | self.parameters = parameters
27 | self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
28 | self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
29 | self.deterministic = deterministic
30 | self.std = torch.exp(0.5 * self.logvar)
31 | self.var = torch.exp(self.logvar)
32 | if self.deterministic:
33 | self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
34 |
35 | def sample(self):
36 | x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
37 | return x
38 |
39 | def kl(self, other=None):
40 | if self.deterministic:
41 | return torch.Tensor([0.])
42 | else:
43 | if other is None:
44 | return 0.5 * torch.sum(torch.pow(self.mean, 2)
45 | + self.var - 1.0 - self.logvar,
46 | dim=[1, 2, 3])
47 | else:
48 | return 0.5 * torch.sum(
49 | torch.pow(self.mean - other.mean, 2) / other.var
50 | + self.var / other.var - 1.0 - self.logvar + other.logvar,
51 | dim=[1, 2, 3])
52 |
53 | def nll(self, sample, dims=[1,2,3]):
54 | if self.deterministic:
55 | return torch.Tensor([0.])
56 | logtwopi = np.log(2.0 * np.pi)
57 | return 0.5 * torch.sum(
58 | logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
59 | dim=dims)
60 |
61 | def mode(self):
62 | return self.mean
63 |
64 |
65 | def normal_kl(mean1, logvar1, mean2, logvar2):
66 | """
67 | source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
68 | Compute the KL divergence between two gaussians.
69 | Shapes are automatically broadcasted, so batches can be compared to
70 | scalars, among other use cases.
71 | """
72 | tensor = None
73 | for obj in (mean1, logvar1, mean2, logvar2):
74 | if isinstance(obj, torch.Tensor):
75 | tensor = obj
76 | break
77 | assert tensor is not None, "at least one argument must be a Tensor"
78 |
79 | # Force variances to be Tensors. Broadcasting helps convert scalars to
80 | # Tensors, but it does not work for torch.exp().
81 | logvar1, logvar2 = [
82 | x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
83 | for x in (logvar1, logvar2)
84 | ]
85 |
86 | return 0.5 * (
87 | -1.0
88 | + logvar2
89 | - logvar1
90 | + torch.exp(logvar1 - logvar2)
91 | + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
92 | )
93 |
--------------------------------------------------------------------------------
/tests-unit/app_test/frontend_manager_test.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import pytest
3 | from requests.exceptions import HTTPError
4 |
5 | from app.frontend_management import (
6 | FrontendManager,
7 | FrontEndProvider,
8 | Release,
9 | )
10 | from comfy.cli_args import DEFAULT_VERSION_STRING
11 |
12 |
13 | @pytest.fixture
14 | def mock_releases():
15 | return [
16 | Release(
17 | id=1,
18 | tag_name="1.0.0",
19 | name="Release 1.0.0",
20 | prerelease=False,
21 | created_at="2022-01-01T00:00:00Z",
22 | published_at="2022-01-01T00:00:00Z",
23 | body="Release notes for 1.0.0",
24 | assets=[{"name": "dist.zip", "url": "https://example.com/dist.zip"}],
25 | ),
26 | Release(
27 | id=2,
28 | tag_name="2.0.0",
29 | name="Release 2.0.0",
30 | prerelease=False,
31 | created_at="2022-02-01T00:00:00Z",
32 | published_at="2022-02-01T00:00:00Z",
33 | body="Release notes for 2.0.0",
34 | assets=[{"name": "dist.zip", "url": "https://example.com/dist.zip"}],
35 | ),
36 | ]
37 |
38 |
39 | @pytest.fixture
40 | def mock_provider(mock_releases):
41 | provider = FrontEndProvider(
42 | owner="test-owner",
43 | repo="test-repo",
44 | )
45 | provider.all_releases = mock_releases
46 | provider.latest_release = mock_releases[1]
47 | FrontendManager.PROVIDERS = [provider]
48 | return provider
49 |
50 |
51 | def test_get_release(mock_provider, mock_releases):
52 | version = "1.0.0"
53 | release = mock_provider.get_release(version)
54 | assert release == mock_releases[0]
55 |
56 |
57 | def test_get_release_latest(mock_provider, mock_releases):
58 | version = "latest"
59 | release = mock_provider.get_release(version)
60 | assert release == mock_releases[1]
61 |
62 |
63 | def test_get_release_invalid_version(mock_provider):
64 | version = "invalid"
65 | with pytest.raises(ValueError):
66 | mock_provider.get_release(version)
67 |
68 |
69 | def test_init_frontend_default():
70 | version_string = DEFAULT_VERSION_STRING
71 | frontend_path = FrontendManager.init_frontend(version_string)
72 | assert frontend_path == FrontendManager.DEFAULT_FRONTEND_PATH
73 |
74 |
75 | def test_init_frontend_invalid_version():
76 | version_string = "test-owner/test-repo@1.100.99"
77 | with pytest.raises(HTTPError):
78 | FrontendManager.init_frontend_unsafe(version_string)
79 |
80 |
81 | def test_init_frontend_invalid_provider():
82 | version_string = "invalid/invalid@latest"
83 | with pytest.raises(HTTPError):
84 | FrontendManager.init_frontend_unsafe(version_string)
85 |
86 |
87 | def test_parse_version_string():
88 | version_string = "owner/repo@1.0.0"
89 | repo_owner, repo_name, version = FrontendManager.parse_version_string(
90 | version_string
91 | )
92 | assert repo_owner == "owner"
93 | assert repo_name == "repo"
94 | assert version == "1.0.0"
95 |
96 |
97 | def test_parse_version_string_invalid():
98 | version_string = "invalid"
99 | with pytest.raises(argparse.ArgumentTypeError):
100 | FrontendManager.parse_version_string(version_string)
101 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_upscale_model.py:
--------------------------------------------------------------------------------
1 | import os
2 | import logging
3 | from spandrel import ModelLoader, ImageModelDescriptor
4 | from comfy import model_management
5 | import torch
6 | import comfy.utils
7 | import folder_paths
8 |
9 | try:
10 | from spandrel_extra_arches import EXTRA_REGISTRY
11 | from spandrel import MAIN_REGISTRY
12 | MAIN_REGISTRY.add(*EXTRA_REGISTRY)
13 | logging.info("Successfully imported spandrel_extra_arches: support for non commercial upscale models.")
14 | except:
15 | pass
16 |
17 | class UpscaleModelLoader:
18 | @classmethod
19 | def INPUT_TYPES(s):
20 | return {"required": { "model_name": (folder_paths.get_filename_list("upscale_models"), ),
21 | }}
22 | RETURN_TYPES = ("UPSCALE_MODEL",)
23 | FUNCTION = "load_model"
24 |
25 | CATEGORY = "loaders"
26 |
27 | def load_model(self, model_name):
28 | model_path = folder_paths.get_full_path("upscale_models", model_name)
29 | sd = comfy.utils.load_torch_file(model_path, safe_load=True)
30 | if "module.layers.0.residual_group.blocks.0.norm1.weight" in sd:
31 | sd = comfy.utils.state_dict_prefix_replace(sd, {"module.":""})
32 | out = ModelLoader().load_from_state_dict(sd).eval()
33 |
34 | if not isinstance(out, ImageModelDescriptor):
35 | raise Exception("Upscale model must be a single-image model.")
36 |
37 | return (out, )
38 |
39 |
40 | class ImageUpscaleWithModel:
41 | @classmethod
42 | def INPUT_TYPES(s):
43 | return {"required": { "upscale_model": ("UPSCALE_MODEL",),
44 | "image": ("IMAGE",),
45 | }}
46 | RETURN_TYPES = ("IMAGE",)
47 | FUNCTION = "upscale"
48 |
49 | CATEGORY = "image/upscaling"
50 |
51 | def upscale(self, upscale_model, image):
52 | device = model_management.get_torch_device()
53 |
54 | memory_required = model_management.module_size(upscale_model.model)
55 | memory_required += (512 * 512 * 3) * image.element_size() * max(upscale_model.scale, 1.0) * 384.0 #The 384.0 is an estimate of how much some of these models take, TODO: make it more accurate
56 | memory_required += image.nelement() * image.element_size()
57 | model_management.free_memory(memory_required, device)
58 |
59 | upscale_model.to(device)
60 | in_img = image.movedim(-1,-3).to(device)
61 |
62 | tile = 512
63 | overlap = 32
64 |
65 | oom = True
66 | while oom:
67 | try:
68 | steps = in_img.shape[0] * comfy.utils.get_tiled_scale_steps(in_img.shape[3], in_img.shape[2], tile_x=tile, tile_y=tile, overlap=overlap)
69 | pbar = comfy.utils.ProgressBar(steps)
70 | s = comfy.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap, upscale_amount=upscale_model.scale, pbar=pbar)
71 | oom = False
72 | except model_management.OOM_EXCEPTION as e:
73 | tile //= 2
74 | if tile < 128:
75 | raise e
76 |
77 | upscale_model.to("cpu")
78 | s = torch.clamp(s.movedim(-3,-1), min=0, max=1.0)
79 | return (s,)
80 |
81 | NODE_CLASS_MAPPINGS = {
82 | "UpscaleModelLoader": UpscaleModelLoader,
83 | "ImageUpscaleWithModel": ImageUpscaleWithModel
84 | }
85 |
--------------------------------------------------------------------------------
/comfy_extras/nodes_hypertile.py:
--------------------------------------------------------------------------------
1 | #Taken from: https://github.com/tfernd/HyperTile/
2 |
3 | import math
4 | from einops import rearrange
5 | # Use torch rng for consistency across generations
6 | from torch import randint
7 |
8 | def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int:
9 | min_value = min(min_value, value)
10 |
11 | # All big divisors of value (inclusive)
12 | divisors = [i for i in range(min_value, value + 1) if value % i == 0]
13 |
14 | ns = [value // i for i in divisors[:max_options]] # has at least 1 element
15 |
16 | if len(ns) - 1 > 0:
17 | idx = randint(low=0, high=len(ns) - 1, size=(1,)).item()
18 | else:
19 | idx = 0
20 |
21 | return ns[idx]
22 |
23 | class HyperTile:
24 | @classmethod
25 | def INPUT_TYPES(s):
26 | return {"required": { "model": ("MODEL",),
27 | "tile_size": ("INT", {"default": 256, "min": 1, "max": 2048}),
28 | "swap_size": ("INT", {"default": 2, "min": 1, "max": 128}),
29 | "max_depth": ("INT", {"default": 0, "min": 0, "max": 10}),
30 | "scale_depth": ("BOOLEAN", {"default": False}),
31 | }}
32 | RETURN_TYPES = ("MODEL",)
33 | FUNCTION = "patch"
34 |
35 | CATEGORY = "model_patches/unet"
36 |
37 | def patch(self, model, tile_size, swap_size, max_depth, scale_depth):
38 | model_channels = model.model.model_config.unet_config["model_channels"]
39 |
40 | latent_tile_size = max(32, tile_size) // 8
41 | self.temp = None
42 |
43 | def hypertile_in(q, k, v, extra_options):
44 | model_chans = q.shape[-2]
45 | orig_shape = extra_options['original_shape']
46 | apply_to = []
47 | for i in range(max_depth + 1):
48 | apply_to.append((orig_shape[-2] / (2 ** i)) * (orig_shape[-1] / (2 ** i)))
49 |
50 | if model_chans in apply_to:
51 | shape = extra_options["original_shape"]
52 | aspect_ratio = shape[-1] / shape[-2]
53 |
54 | hw = q.size(1)
55 | h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))
56 |
57 | factor = (2 ** apply_to.index(model_chans)) if scale_depth else 1
58 | nh = random_divisor(h, latent_tile_size * factor, swap_size)
59 | nw = random_divisor(w, latent_tile_size * factor, swap_size)
60 |
61 | if nh * nw > 1:
62 | q = rearrange(q, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw)
63 | self.temp = (nh, nw, h, w)
64 | return q, k, v
65 |
66 | return q, k, v
67 | def hypertile_out(out, extra_options):
68 | if self.temp is not None:
69 | nh, nw, h, w = self.temp
70 | self.temp = None
71 | out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw)
72 | out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw)
73 | return out
74 |
75 |
76 | m = model.clone()
77 | m.set_model_attn1_patch(hypertile_in)
78 | m.set_model_attn1_output_patch(hypertile_out)
79 | return (m, )
80 |
81 | NODE_CLASS_MAPPINGS = {
82 | "HyperTile": HyperTile,
83 | }
84 |
--------------------------------------------------------------------------------
/comfy/ldm/modules/ema.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 |
5 | class LitEma(nn.Module):
6 | def __init__(self, model, decay=0.9999, use_num_upates=True):
7 | super().__init__()
8 | if decay < 0.0 or decay > 1.0:
9 | raise ValueError('Decay must be between 0 and 1')
10 |
11 | self.m_name2s_name = {}
12 | self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
13 | self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates
14 | else torch.tensor(-1, dtype=torch.int))
15 |
16 | for name, p in model.named_parameters():
17 | if p.requires_grad:
18 | # remove as '.'-character is not allowed in buffers
19 | s_name = name.replace('.', '')
20 | self.m_name2s_name.update({name: s_name})
21 | self.register_buffer(s_name, p.clone().detach().data)
22 |
23 | self.collected_params = []
24 |
25 | def reset_num_updates(self):
26 | del self.num_updates
27 | self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))
28 |
29 | def forward(self, model):
30 | decay = self.decay
31 |
32 | if self.num_updates >= 0:
33 | self.num_updates += 1
34 | decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))
35 |
36 | one_minus_decay = 1.0 - decay
37 |
38 | with torch.no_grad():
39 | m_param = dict(model.named_parameters())
40 | shadow_params = dict(self.named_buffers())
41 |
42 | for key in m_param:
43 | if m_param[key].requires_grad:
44 | sname = self.m_name2s_name[key]
45 | shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
46 | shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
47 | else:
48 | assert not key in self.m_name2s_name
49 |
50 | def copy_to(self, model):
51 | m_param = dict(model.named_parameters())
52 | shadow_params = dict(self.named_buffers())
53 | for key in m_param:
54 | if m_param[key].requires_grad:
55 | m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
56 | else:
57 | assert not key in self.m_name2s_name
58 |
59 | def store(self, parameters):
60 | """
61 | Save the current parameters for restoring later.
62 | Args:
63 | parameters: Iterable of `torch.nn.Parameter`; the parameters to be
64 | temporarily stored.
65 | """
66 | self.collected_params = [param.clone() for param in parameters]
67 |
68 | def restore(self, parameters):
69 | """
70 | Restore the parameters stored with the `store` method.
71 | Useful to validate the model with EMA parameters without affecting the
72 | original optimization process. Store the parameters before the
73 | `copy_to` method. After validation (or model saving), use this to
74 | restore the former parameters.
75 | Args:
76 | parameters: Iterable of `torch.nn.Parameter`; the parameters to be
77 | updated with the stored parameters.
78 | """
79 | for c_param, param in zip(self.collected_params, parameters):
80 | param.data.copy_(c_param.data)
81 |
--------------------------------------------------------------------------------
/comfy/text_encoders/flux.py:
--------------------------------------------------------------------------------
1 | from comfy import sd1_clip
2 | import comfy.text_encoders.t5
3 | import comfy.model_management
4 | from transformers import T5TokenizerFast
5 | import torch
6 | import os
7 |
8 | class T5XXLModel(sd1_clip.SDClipModel):
9 | def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None):
10 | textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_config_xxl.json")
11 | super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5)
12 |
13 | class T5XXLTokenizer(sd1_clip.SDTokenizer):
14 | def __init__(self, embedding_directory=None, tokenizer_data={}):
15 | tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer")
16 | super().__init__(tokenizer_path, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256)
17 |
18 |
19 | class FluxTokenizer:
20 | def __init__(self, embedding_directory=None, tokenizer_data={}):
21 | self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory)
22 | self.t5xxl = T5XXLTokenizer(embedding_directory=embedding_directory)
23 |
24 | def tokenize_with_weights(self, text:str, return_word_ids=False):
25 | out = {}
26 | out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids)
27 | out["t5xxl"] = self.t5xxl.tokenize_with_weights(text, return_word_ids)
28 | return out
29 |
30 | def untokenize(self, token_weight_pair):
31 | return self.clip_l.untokenize(token_weight_pair)
32 |
33 | def state_dict(self):
34 | return {}
35 |
36 |
37 | class FluxClipModel(torch.nn.Module):
38 | def __init__(self, dtype_t5=None, device="cpu", dtype=None):
39 | super().__init__()
40 | dtype_t5 = comfy.model_management.pick_weight_dtype(dtype_t5, dtype, device)
41 | self.clip_l = sd1_clip.SDClipModel(device=device, dtype=dtype, return_projected_pooled=False)
42 | self.t5xxl = T5XXLModel(device=device, dtype=dtype_t5)
43 | self.dtypes = set([dtype, dtype_t5])
44 |
45 | def set_clip_options(self, options):
46 | self.clip_l.set_clip_options(options)
47 | self.t5xxl.set_clip_options(options)
48 |
49 | def reset_clip_options(self):
50 | self.clip_l.reset_clip_options()
51 | self.t5xxl.reset_clip_options()
52 |
53 | def encode_token_weights(self, token_weight_pairs):
54 | token_weight_pairs_l = token_weight_pairs["l"]
55 | token_weight_pairs_t5 = token_weight_pairs["t5xxl"]
56 |
57 | t5_out, t5_pooled = self.t5xxl.encode_token_weights(token_weight_pairs_t5)
58 | l_out, l_pooled = self.clip_l.encode_token_weights(token_weight_pairs_l)
59 | return t5_out, l_pooled
60 |
61 | def load_sd(self, sd):
62 | if "text_model.encoder.layers.1.mlp.fc1.weight" in sd:
63 | return self.clip_l.load_sd(sd)
64 | else:
65 | return self.t5xxl.load_sd(sd)
66 |
67 | def flux_clip(dtype_t5=None):
68 | class FluxClipModel_(FluxClipModel):
69 | def __init__(self, device="cpu", dtype=None):
70 | super().__init__(dtype_t5=dtype_t5, device=device, dtype=dtype)
71 | return FluxClipModel_
72 |
--------------------------------------------------------------------------------
/web/extensions/core/slotDefaults.js:
--------------------------------------------------------------------------------
1 | import { app } from "../../scripts/app.js";
2 | import { ComfyWidgets } from "../../scripts/widgets.js";
3 | // Adds defaults for quickly adding nodes with middle click on the input/output
4 |
5 | app.registerExtension({
6 | name: "Comfy.SlotDefaults",
7 | suggestionsNumber: null,
8 | init() {
9 | LiteGraph.search_filter_enabled = true;
10 | LiteGraph.middle_click_slot_add_default_node = true;
11 | this.suggestionsNumber = app.ui.settings.addSetting({
12 | id: "Comfy.NodeSuggestions.number",
13 | name: "Number of nodes suggestions",
14 | type: "slider",
15 | attrs: {
16 | min: 1,
17 | max: 100,
18 | step: 1,
19 | },
20 | defaultValue: 5,
21 | onChange: (newVal, oldVal) => {
22 | this.setDefaults(newVal);
23 | }
24 | });
25 | },
26 | slot_types_default_out: {},
27 | slot_types_default_in: {},
28 | async beforeRegisterNodeDef(nodeType, nodeData, app) {
29 | var nodeId = nodeData.name;
30 | var inputs = [];
31 | inputs = nodeData["input"]["required"]; //only show required inputs to reduce the mess also not logical to create node with optional inputs
32 | for (const inputKey in inputs) {
33 | var input = (inputs[inputKey]);
34 | if (typeof input[0] !== "string") continue;
35 |
36 | var type = input[0]
37 | if (type in ComfyWidgets) {
38 | var customProperties = input[1]
39 | if (!(customProperties?.forceInput)) continue; //ignore widgets that don't force input
40 | }
41 |
42 | if (!(type in this.slot_types_default_out)) {
43 | this.slot_types_default_out[type] = ["Reroute"];
44 | }
45 | if (this.slot_types_default_out[type].includes(nodeId)) continue;
46 | this.slot_types_default_out[type].push(nodeId);
47 |
48 | // Input types have to be stored as lower case
49 | // Store each node that can handle this input type
50 | const lowerType = type.toLocaleLowerCase();
51 | if (!(lowerType in LiteGraph.registered_slot_in_types)) {
52 | LiteGraph.registered_slot_in_types[lowerType] = { nodes: [] };
53 | }
54 | LiteGraph.registered_slot_in_types[lowerType].nodes.push(nodeType.comfyClass);
55 | }
56 |
57 | var outputs = nodeData["output"];
58 | for (const key in outputs) {
59 | var type = outputs[key];
60 | if (!(type in this.slot_types_default_in)) {
61 | this.slot_types_default_in[type] = ["Reroute"];// ["Reroute", "Primitive"]; primitive doesn't always work :'()
62 | }
63 |
64 | this.slot_types_default_in[type].push(nodeId);
65 |
66 | // Store each node that can handle this output type
67 | if (!(type in LiteGraph.registered_slot_out_types)) {
68 | LiteGraph.registered_slot_out_types[type] = { nodes: [] };
69 | }
70 | LiteGraph.registered_slot_out_types[type].nodes.push(nodeType.comfyClass);
71 |
72 | if(!LiteGraph.slot_types_out.includes(type)) {
73 | LiteGraph.slot_types_out.push(type);
74 | }
75 | }
76 | var maxNum = this.suggestionsNumber.value;
77 | this.setDefaults(maxNum);
78 | },
79 | setDefaults(maxNum) {
80 |
81 | LiteGraph.slot_types_default_out = {};
82 | LiteGraph.slot_types_default_in = {};
83 |
84 | for (const type in this.slot_types_default_out) {
85 | LiteGraph.slot_types_default_out[type] = this.slot_types_default_out[type].slice(0, maxNum);
86 | }
87 | for (const type in this.slot_types_default_in) {
88 | LiteGraph.slot_types_default_in[type] = this.slot_types_default_in[type].slice(0, maxNum);
89 | }
90 | }
91 | });
92 |
--------------------------------------------------------------------------------
/comfy/taesd/taesd.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Tiny AutoEncoder for Stable Diffusion
4 | (DNN for encoding / decoding SD's latent space)
5 | """
6 | import torch
7 | import torch.nn as nn
8 |
9 | import comfy.utils
10 | import comfy.ops
11 |
12 | def conv(n_in, n_out, **kwargs):
13 | return comfy.ops.disable_weight_init.Conv2d(n_in, n_out, 3, padding=1, **kwargs)
14 |
15 | class Clamp(nn.Module):
16 | def forward(self, x):
17 | return torch.tanh(x / 3) * 3
18 |
19 | class Block(nn.Module):
20 | def __init__(self, n_in, n_out):
21 | super().__init__()
22 | self.conv = nn.Sequential(conv(n_in, n_out), nn.ReLU(), conv(n_out, n_out), nn.ReLU(), conv(n_out, n_out))
23 | self.skip = comfy.ops.disable_weight_init.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity()
24 | self.fuse = nn.ReLU()
25 | def forward(self, x):
26 | return self.fuse(self.conv(x) + self.skip(x))
27 |
28 | def Encoder(latent_channels=4):
29 | return nn.Sequential(
30 | conv(3, 64), Block(64, 64),
31 | conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
32 | conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
33 | conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
34 | conv(64, latent_channels),
35 | )
36 |
37 |
38 | def Decoder(latent_channels=4):
39 | return nn.Sequential(
40 | Clamp(), conv(latent_channels, 64), nn.ReLU(),
41 | Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),
42 | Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),
43 | Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),
44 | Block(64, 64), conv(64, 3),
45 | )
46 |
47 | class TAESD(nn.Module):
48 | latent_magnitude = 3
49 | latent_shift = 0.5
50 |
51 | def __init__(self, encoder_path=None, decoder_path=None, latent_channels=4):
52 | """Initialize pretrained TAESD on the given device from the given checkpoints."""
53 | super().__init__()
54 | self.taesd_encoder = Encoder(latent_channels=latent_channels)
55 | self.taesd_decoder = Decoder(latent_channels=latent_channels)
56 | self.vae_scale = torch.nn.Parameter(torch.tensor(1.0))
57 | self.vae_shift = torch.nn.Parameter(torch.tensor(0.0))
58 | if encoder_path is not None:
59 | self.taesd_encoder.load_state_dict(comfy.utils.load_torch_file(encoder_path, safe_load=True))
60 | if decoder_path is not None:
61 | self.taesd_decoder.load_state_dict(comfy.utils.load_torch_file(decoder_path, safe_load=True))
62 |
63 | @staticmethod
64 | def scale_latents(x):
65 | """raw latents -> [0, 1]"""
66 | return x.div(2 * TAESD.latent_magnitude).add(TAESD.latent_shift).clamp(0, 1)
67 |
68 | @staticmethod
69 | def unscale_latents(x):
70 | """[0, 1] -> raw latents"""
71 | return x.sub(TAESD.latent_shift).mul(2 * TAESD.latent_magnitude)
72 |
73 | def decode(self, x):
74 | x_sample = self.taesd_decoder((x - self.vae_shift) * self.vae_scale)
75 | x_sample = x_sample.sub(0.5).mul(2)
76 | return x_sample
77 |
78 | def encode(self, x):
79 | return (self.taesd_encoder(x * 0.5 + 0.5) / self.vae_scale) + self.vae_shift
80 |
--------------------------------------------------------------------------------
/web/scripts/ui/userSelection.css:
--------------------------------------------------------------------------------
1 | .comfy-user-selection {
2 | width: 100vw;
3 | height: 100vh;
4 | position: absolute;
5 | top: 0;
6 | left: 0;
7 | z-index: 999;
8 | display: flex;
9 | align-items: center;
10 | justify-content: center;
11 | font-family: sans-serif;
12 | background: linear-gradient(var(--tr-even-bg-color), var(--tr-odd-bg-color));
13 | }
14 |
15 | .comfy-user-selection-inner {
16 | background: var(--comfy-menu-bg);
17 | margin-top: -30vh;
18 | padding: 20px 40px;
19 | border-radius: 10px;
20 | min-width: 365px;
21 | position: relative;
22 | box-shadow: 0 0 20px rgba(0, 0, 0, 0.3);
23 | }
24 |
25 | .comfy-user-selection-inner form {
26 | width: 100%;
27 | display: flex;
28 | flex-direction: column;
29 | align-items: center;
30 | }
31 |
32 | .comfy-user-selection-inner h1 {
33 | margin: 10px 0 30px 0;
34 | font-weight: normal;
35 | }
36 |
37 | .comfy-user-selection-inner label {
38 | display: flex;
39 | flex-direction: column;
40 | width: 100%;
41 | }
42 |
43 | .comfy-user-selection input,
44 | .comfy-user-selection select {
45 | background-color: var(--comfy-input-bg);
46 | color: var(--input-text);
47 | border: 0;
48 | border-radius: 5px;
49 | padding: 5px;
50 | margin-top: 10px;
51 | }
52 |
53 | .comfy-user-selection input::placeholder {
54 | color: var(--descrip-text);
55 | opacity: 1;
56 | }
57 |
58 | .comfy-user-existing {
59 | width: 100%;
60 | }
61 |
62 | .no-users .comfy-user-existing {
63 | display: none;
64 | }
65 |
66 | .comfy-user-selection-inner .or-separator {
67 | margin: 10px 0;
68 | padding: 10px;
69 | display: block;
70 | text-align: center;
71 | width: 100%;
72 | color: var(--descrip-text);
73 | }
74 |
75 | .comfy-user-selection-inner .or-separator {
76 | overflow: hidden;
77 | text-align: center;
78 | margin-left: -10px;
79 | }
80 |
81 | .comfy-user-selection-inner .or-separator::before,
82 | .comfy-user-selection-inner .or-separator::after {
83 | content: "";
84 | background-color: var(--border-color);
85 | position: relative;
86 | height: 1px;
87 | vertical-align: middle;
88 | display: inline-block;
89 | width: calc(50% - 20px);
90 | top: -1px;
91 | }
92 |
93 | .comfy-user-selection-inner .or-separator::before {
94 | right: 10px;
95 | margin-left: -50%;
96 | }
97 |
98 | .comfy-user-selection-inner .or-separator::after {
99 | left: 10px;
100 | margin-right: -50%;
101 | }
102 |
103 | .comfy-user-selection-inner section {
104 | width: 100%;
105 | padding: 10px;
106 | margin: -10px;
107 | transition: background-color 0.2s;
108 | }
109 |
110 | .comfy-user-selection-inner section.selected {
111 | background: var(--border-color);
112 | border-radius: 5px;
113 | }
114 |
115 | .comfy-user-selection-inner footer {
116 | display: flex;
117 | flex-direction: column;
118 | align-items: center;
119 | margin-top: 20px;
120 | }
121 |
122 | .comfy-user-selection-inner .comfy-user-error {
123 | color: var(--error-text);
124 | margin-bottom: 10px;
125 | }
126 |
127 | .comfy-user-button-next {
128 | font-size: 16px;
129 | padding: 6px 10px;
130 | width: 100px;
131 | display: flex;
132 | gap: 5px;
133 | align-items: center;
134 | justify-content: center;
135 | }
--------------------------------------------------------------------------------
/tests/inference/graphs/default_graph_sdxl1_0.json:
--------------------------------------------------------------------------------
1 | {
2 | "4": {
3 | "inputs": {
4 | "ckpt_name": "sd_xl_base_1.0.safetensors"
5 | },
6 | "class_type": "CheckpointLoaderSimple"
7 | },
8 | "5": {
9 | "inputs": {
10 | "width": 1024,
11 | "height": 1024,
12 | "batch_size": 1
13 | },
14 | "class_type": "EmptyLatentImage"
15 | },
16 | "6": {
17 | "inputs": {
18 | "text": "a photo of a cat",
19 | "clip": [
20 | "4",
21 | 1
22 | ]
23 | },
24 | "class_type": "CLIPTextEncode"
25 | },
26 | "10": {
27 | "inputs": {
28 | "add_noise": "enable",
29 | "noise_seed": 42,
30 | "steps": 20,
31 | "cfg": 7.5,
32 | "sampler_name": "euler",
33 | "scheduler": "normal",
34 | "start_at_step": 0,
35 | "end_at_step": 32,
36 | "return_with_leftover_noise": "enable",
37 | "model": [
38 | "4",
39 | 0
40 | ],
41 | "positive": [
42 | "6",
43 | 0
44 | ],
45 | "negative": [
46 | "15",
47 | 0
48 | ],
49 | "latent_image": [
50 | "5",
51 | 0
52 | ]
53 | },
54 | "class_type": "KSamplerAdvanced"
55 | },
56 | "12": {
57 | "inputs": {
58 | "samples": [
59 | "14",
60 | 0
61 | ],
62 | "vae": [
63 | "4",
64 | 2
65 | ]
66 | },
67 | "class_type": "VAEDecode"
68 | },
69 | "13": {
70 | "inputs": {
71 | "filename_prefix": "test_inference",
72 | "images": [
73 | "12",
74 | 0
75 | ]
76 | },
77 | "class_type": "SaveImage"
78 | },
79 | "14": {
80 | "inputs": {
81 | "add_noise": "disable",
82 | "noise_seed": 42,
83 | "steps": 20,
84 | "cfg": 7.5,
85 | "sampler_name": "euler",
86 | "scheduler": "normal",
87 | "start_at_step": 32,
88 | "end_at_step": 10000,
89 | "return_with_leftover_noise": "disable",
90 | "model": [
91 | "16",
92 | 0
93 | ],
94 | "positive": [
95 | "17",
96 | 0
97 | ],
98 | "negative": [
99 | "20",
100 | 0
101 | ],
102 | "latent_image": [
103 | "10",
104 | 0
105 | ]
106 | },
107 | "class_type": "KSamplerAdvanced"
108 | },
109 | "15": {
110 | "inputs": {
111 | "conditioning": [
112 | "6",
113 | 0
114 | ]
115 | },
116 | "class_type": "ConditioningZeroOut"
117 | },
118 | "16": {
119 | "inputs": {
120 | "ckpt_name": "sd_xl_refiner_1.0.safetensors"
121 | },
122 | "class_type": "CheckpointLoaderSimple"
123 | },
124 | "17": {
125 | "inputs": {
126 | "text": "a photo of a cat",
127 | "clip": [
128 | "16",
129 | 1
130 | ]
131 | },
132 | "class_type": "CLIPTextEncode"
133 | },
134 | "20": {
135 | "inputs": {
136 | "text": "",
137 | "clip": [
138 | "16",
139 | 1
140 | ]
141 | },
142 | "class_type": "CLIPTextEncode"
143 | }
144 | }
--------------------------------------------------------------------------------