├── py ├── modules │ ├── briaai │ │ └── __init__.py │ ├── dit │ │ ├── pixArt │ │ │ ├── __init__.py │ │ │ ├── models │ │ │ │ ├── __init__.py │ │ │ │ └── utils.py │ │ │ └── config.py │ │ ├── __init__.py │ │ ├── utils.py │ │ └── config.py │ ├── kolors │ │ ├── __init__.py │ │ ├── chatglm │ │ │ ├── __init__.py │ │ │ ├── tokenizer │ │ │ │ ├── vocab.txt │ │ │ │ ├── tokenizer.model │ │ │ │ └── tokenizer_config.json │ │ │ ├── config_chatglm.json │ │ │ └── configuration_chatglm.py │ │ ├── clip_vision_config_vitl_336.json │ │ ├── model_patch.py │ │ └── text_encode.py │ ├── human_parsing │ │ ├── __init__.py │ │ ├── simple_extractor_dataset.py │ │ ├── run_parsing.py │ │ └── transforms.py │ ├── ipadapter │ │ ├── flux │ │ │ ├── __init__.py │ │ │ └── math.py │ │ ├── sd3 │ │ │ └── __init__.py │ │ ├── attention_processor.py │ │ └── utils.py │ └── brushnet │ │ ├── config │ │ ├── powerpaint.json │ │ ├── brushnet.json │ │ └── brushnet_xl.json │ │ └── model_patch.py ├── __init__.py ├── libs │ ├── easing.py │ ├── api │ │ └── fluxai.py │ ├── messages.py │ ├── model.py │ ├── log.py │ ├── cache.py │ ├── conditioning.py │ ├── controlnet.py │ ├── add_resources.py │ ├── colorfix.py │ ├── styleAlign.py │ └── chooser.py ├── nodes │ ├── seed.py │ ├── util.py │ └── api.py └── server.py ├── resources ├── OpenSans-Medium.ttf ├── mmb-preset.custom.txt.example └── mmb-preset.txt ├── .github ├── FUNDING.yml └── workflows │ └── publish.yml ├── repair_dependency_list.txt ├── .gitmodules ├── requirements.txt ├── .gitignore ├── web_version ├── v1 │ ├── css │ │ ├── index.css │ │ ├── theme.css │ │ ├── contextmenu.css │ │ ├── chooser.css │ │ ├── groupmap.css │ │ ├── dropdown.css │ │ ├── sliderControl.css │ │ ├── account.css │ │ ├── toast.css │ │ ├── easy.css │ │ ├── selector.css │ │ └── toolbar.css │ └── js │ │ ├── image_chooser │ │ ├── messaging.js │ │ ├── state.js │ │ ├── preview.js │ │ └── prompt.js │ │ ├── easy │ │ ├── easySaveImage.js │ │ └── easySeg.js │ │ ├── seed.js │ │ ├── image.js │ │ ├── bookmark.js │ │ └── common │ │ └── i18n.js └── v2 │ └── assets │ └── vueuse-CqzKat4r.js ├── pyproject.toml ├── locales ├── ko │ ├── main.json │ └── settings.json ├── ja │ ├── main.json │ └── settings.json ├── zh │ ├── main.json │ └── settings.json ├── en │ ├── main.json │ └── settings.json ├── ru │ ├── main.json │ └── settings.json └── fr │ ├── main.json │ └── settings.json ├── install.bat ├── install.sh ├── tools ├── convert_locale_format.py └── combine_autocomplete.py ├── prestartup_script.py └── __init__.py /py/modules/briaai/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /py/modules/dit/pixArt/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /py/modules/kolors/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /py/modules/human_parsing/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /py/modules/ipadapter/flux/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /py/modules/ipadapter/sd3/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /py/modules/kolors/chatglm/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /py/modules/dit/pixArt/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /py/modules/dit/__init__.py: -------------------------------------------------------------------------------- 1 | #credit to city96 for this module 2 | #from https://github.com/city96/ComfyUI_ExtraModels/ -------------------------------------------------------------------------------- /resources/OpenSans-Medium.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yolain/ComfyUI-Easy-Use/HEAD/resources/OpenSans-Medium.ttf -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | custom: ["https://space.bilibili.com/1840885116"] -------------------------------------------------------------------------------- /py/modules/kolors/chatglm/tokenizer/vocab.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yolain/ComfyUI-Easy-Use/HEAD/py/modules/kolors/chatglm/tokenizer/vocab.txt -------------------------------------------------------------------------------- /repair_dependency_list.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.19.0 2 | diffusers>=0.32.2 3 | huggingface_hub>=0.25.0 4 | transformers>=4.48.0 5 | peft>=0.14.0 6 | protobuf>=4.25.3 7 | -------------------------------------------------------------------------------- /py/__init__.py: -------------------------------------------------------------------------------- 1 | from .libs.loader import easyLoader 2 | from .libs.sampler import easySampler 3 | 4 | sampler = easySampler() 5 | easyCache = easyLoader() 6 | 7 | -------------------------------------------------------------------------------- /py/modules/kolors/chatglm/tokenizer/tokenizer.model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yolain/ComfyUI-Easy-Use/HEAD/py/modules/kolors/chatglm/tokenizer/tokenizer.model -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "ComfyUI-Easy-Use-Frontend"] 2 | path = ComfyUI-Easy-Use-Frontend 3 | url = https://github.com/yolain/ComfyUI-Easy-Use-Frontend.git 4 | branch = main -------------------------------------------------------------------------------- /resources/mmb-preset.custom.txt.example: -------------------------------------------------------------------------------- 1 | MMB-ALL:1,1,1 2 | MMB-IN:1,0,0 3 | MMB-MID:0,1,0 4 | MMB-OUT:0,0,1 5 | MMB-INMID:1,1,0 6 | MMB-INOUT:1,0,1 7 | MMB-MIDOUT:0,1,1 8 | MMB-NONE:0,0,0 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | diffusers 2 | accelerate 3 | clip_interrogator>=0.6.0 4 | lark 5 | onnxruntime 6 | opencv-python-headless 7 | sentencepiece 8 | spandrel 9 | matplotlib 10 | peft 11 | -------------------------------------------------------------------------------- /resources/mmb-preset.txt: -------------------------------------------------------------------------------- 1 | MMB-ALL:1,1,1 2 | MMB-IN:1,0,0 3 | MMB-MID:0,1,0 4 | MMB-OUT:0,0,1 5 | MMB-INMID:1,1,0 6 | MMB-INOUT:1,0,1 7 | MMB-MIDOUT:0,1,1 8 | MMB-NONE:0,0,0 9 | @MMBN-FULL-TEST:27 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | .DS_Store 3 | *.cache 4 | *.ini 5 | *.bak 6 | wildcards/** 7 | styles/** 8 | workflow/** 9 | autocomplete/** 10 | web_beta/** 11 | web_version/dev/** 12 | docs/** 13 | .vscode/ 14 | .idea/ 15 | mmb-preset.custom.txt 16 | config.yaml 17 | node.tar.gz 18 | 19 | .cursorrules 20 | tools/ComfyUI-Easy-Use.json -------------------------------------------------------------------------------- /web_version/v1/css/index.css: -------------------------------------------------------------------------------- 1 | @import "theme.css"; 2 | @import "dropdown.css"; 3 | @import "selector.css"; 4 | @import "groupmap.css"; 5 | @import "contextmenu.css"; 6 | @import "modelinfo.css"; 7 | @import "toast.css"; 8 | @import "account.css"; 9 | @import "chooser.css"; 10 | @import "toolbar.css"; 11 | @import "sliderControl.css"; -------------------------------------------------------------------------------- /py/modules/kolors/chatglm/tokenizer/tokenizer_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name_or_path": "THUDM/chatglm3-6b-base", 3 | "remove_space": false, 4 | "do_lower_case": false, 5 | "tokenizer_class": "ChatGLMTokenizer", 6 | "auto_map": { 7 | "AutoTokenizer": [ 8 | "tokenization_chatglm.ChatGLMTokenizer", 9 | null 10 | ] 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /web_version/v1/css/theme.css: -------------------------------------------------------------------------------- 1 | :root { 2 | /*--theme-color:#3f3eed;*/ 3 | /*--theme-color-light: #008ecb;*/ 4 | --theme-color:#236692; 5 | --theme-color-light: #3485bb; 6 | --success-color: #52c41a; 7 | --error-color: #ff4d4f; 8 | --warning-color: #faad14; 9 | --font-family: Inter, -apple-system, BlinkMacSystemFont, Helvetica Neue, sans-serif; 10 | } 11 | -------------------------------------------------------------------------------- /web_version/v1/css/contextmenu.css: -------------------------------------------------------------------------------- 1 | .easyuse-model{ 2 | position:relative; 3 | } 4 | .easyuse-model:hover img{ 5 | display: block; 6 | opacity: 1; 7 | } 8 | .easyuse-model img{ 9 | position: absolute; 10 | z-index:1; 11 | right:-155px; 12 | top:0; 13 | width:150px; 14 | height:auto; 15 | display: none; 16 | filter:brightness(70%); 17 | -webkit-filter: brightness(70%); 18 | opacity: 0; 19 | transition:all 0.5s ease-in-out; 20 | } -------------------------------------------------------------------------------- /py/modules/kolors/clip_vision_config_vitl_336.json: -------------------------------------------------------------------------------- 1 | { 2 | "attention_dropout": 0.0, 3 | "dropout": 0.0, 4 | "hidden_act": "quick_gelu", 5 | "hidden_size": 1024, 6 | "image_size": 336, 7 | "initializer_factor": 1.0, 8 | "initializer_range": 0.02, 9 | "intermediate_size": 4096, 10 | "layer_norm_eps": 1e-05, 11 | "model_type": "clip_vision_model", 12 | "num_attention_heads": 16, 13 | "num_channels": 3, 14 | "num_hidden_layers": 24, 15 | "patch_size": 14, 16 | "projection_dim": 768, 17 | "torch_dtype": "float32" 18 | } 19 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui-easy-use" 3 | description = "To enhance the usability of ComfyUI, optimizations and integrations have been implemented for several commonly used nodes." 4 | version = "1.3.4" 5 | license = { file = "LICENSE" } 6 | dependencies = ["diffusers", "accelerate", "clip_interrogator>=0.6.0", "sentencepiece", "lark", "onnxruntime", "spandrel", "opencv-python-headless", "matplotlib", "peft"] 7 | 8 | [project.urls] 9 | Repository = "https://github.com/yolain/ComfyUI-Easy-Use" 10 | # Used by Comfy Registry https://comfyregistry.org 11 | 12 | [tool.comfy] 13 | PublisherId = "yolain" 14 | DisplayName = "ComfyUI-Easy-Use" 15 | Icon = "" 16 | -------------------------------------------------------------------------------- /locales/ko/main.json: -------------------------------------------------------------------------------- 1 | { 2 | "settingsCategories": { 3 | "Hotkeys": "단축키", 4 | "Nodes": "노드", 5 | "NodesMap": "노드 맵" 6 | }, 7 | "nodeCategories": { 8 | "Util": "유틸", 9 | "Seed": "시드", 10 | "Prompt": "프롬프트", 11 | "Loaders": "로더", 12 | "Adapter": "어댑터", 13 | "Inpaint": "인페인트", 14 | "PreSampling": "사전 샘플링", 15 | "Sampler": "샘플러", 16 | "Fix": "픽스", 17 | "Pipe": "파이프", 18 | "XY Inputs": "XY 입력", 19 | "Image": "이미지", 20 | "Segmentation": "분할", 21 | "\uD83D\uDEAB Deprecated": "\uD83D\uDEAB 사용 중단", 22 | "Type": "유형", 23 | "Math": "수학", 24 | "Switch": "스위치", 25 | "Index Switch": "인덱스 스위치", 26 | "While Loop": "while 루프", 27 | "For Loop": "for 루프", 28 | "LoadImage": "이미지 로드" 29 | } 30 | } -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - "pyproject.toml" 9 | 10 | permissions: 11 | issues: write 12 | 13 | jobs: 14 | publish-node: 15 | name: Publish Custom Node to registry 16 | runs-on: ubuntu-latest 17 | if: ${{ github.repository_owner == 'yolain' }} 18 | steps: 19 | - name: Check out code 20 | uses: actions/checkout@v4 21 | - name: Publish Custom Node 22 | uses: Comfy-Org/publish-node-action@v1 23 | with: 24 | ## Add your own personal access token to your Github Repository secrets and reference it here. 25 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} 26 | -------------------------------------------------------------------------------- /py/libs/easing.py: -------------------------------------------------------------------------------- 1 | @staticmethod 2 | def easyIn(t: float)-> float: 3 | return t*t 4 | @staticmethod 5 | def easyOut(t: float)-> float: 6 | return -(t * (t - 2)) 7 | @staticmethod 8 | def easyInOut(t: float)-> float: 9 | if t < 0.5: 10 | return 2*t*t 11 | else: 12 | return (-2*t*t) + (4*t) - 1 13 | 14 | class EasingBase: 15 | 16 | def easing(self, t: float, function='linear') -> float: 17 | if function == 'easyIn': 18 | return easyIn(t) 19 | elif function == 'easyOut': 20 | return easyOut(t) 21 | elif function == 'easyInOut': 22 | return easyInOut(t) 23 | else: 24 | return t 25 | 26 | def ease(self, start, end, t) -> float: 27 | return end * t + start * (1 - t) -------------------------------------------------------------------------------- /locales/ja/main.json: -------------------------------------------------------------------------------- 1 | { 2 | "settingsCategories": { 3 | "Hotkeys": "ショートカットキー", 4 | "Nodes": "ノード", 5 | "NodesMap": "ノードマップ" 6 | }, 7 | "nodeCategories": { 8 | "Util": "ユーティリティ", 9 | "Seed": "シード", 10 | "Prompt": "プロンプト", 11 | "Loaders": "ローダー", 12 | "Adapter": "アダプター", 13 | "Inpaint": "インペイント", 14 | "PreSampling": "プリサンプリング", 15 | "Sampler": "サンプラー", 16 | "Fix": "フィックス", 17 | "Pipe": "パイプ", 18 | "XY Inputs": "XY入力", 19 | "Image": "画像", 20 | "Segmentation": "セグメンテーション", 21 | "\uD83D\uDEAB Deprecated": "🚫 非推奨", 22 | "Type": "タイプ", 23 | "Math": "数学", 24 | "Switch": "スイッチ", 25 | "Index Switch": "インデックススイッチ", 26 | "While Loop": "Whileループ", 27 | "For Loop": "Forループ", 28 | "LoadImage": "画像読み込み" 29 | } 30 | } -------------------------------------------------------------------------------- /locales/zh/main.json: -------------------------------------------------------------------------------- 1 | { 2 | "settingsCategories": { 3 | "Hotkeys": "快捷键", 4 | "Nodes": "节点相关", 5 | "NodesMap": "管理节点组", 6 | "StylesSelector": "样式选择器" 7 | }, 8 | "nodeCategories": { 9 | "Util": "工具", 10 | "Seed": "随机种", 11 | "Prompt": "提示词", 12 | "Loaders": "模型加载器", 13 | "Adapter": "模型适配器", 14 | "Inpaint": "内补重绘", 15 | "PreSampling": "预采样参数", 16 | "Sampler": "采样器", 17 | "Fix": "修复相关", 18 | "Pipe": "节点束", 19 | "XY Inputs": "XY图表输入项", 20 | "Image": "图像", 21 | "Segmentation": "分割", 22 | "Logic": "逻辑", 23 | "\uD83D\uDEAB Deprecated": "\uD83D\uDEAB 已弃用", 24 | "Type": "类型", 25 | "Math": "数学计算", 26 | "Switch": "开关", 27 | "Index Switch": "索引开关", 28 | "While Loop": "While循环", 29 | "For Loop": "For循环", 30 | "LoadImage": "加载图像" 31 | } 32 | } -------------------------------------------------------------------------------- /web_version/v1/css/chooser.css: -------------------------------------------------------------------------------- 1 | .easyuse-chooser-dialog{ 2 | max-width: 600px; 3 | } 4 | .easyuse-chooser-dialog-title{ 5 | font-size: 18px; 6 | font-weight: 700; 7 | text-align: center; 8 | color:var(--input-text); 9 | margin:0; 10 | } 11 | .easyuse-chooser-dialog-images{ 12 | margin-top:10px; 13 | display: flex; 14 | flex-wrap: wrap; 15 | width: 100%; 16 | box-sizing: border-box; 17 | } 18 | .easyuse-chooser-dialog-images img{ 19 | width: 50%; 20 | height: auto; 21 | cursor: pointer; 22 | box-sizing: border-box; 23 | filter:brightness(80%); 24 | } 25 | .easyuse-chooser-dialog-images img:hover{ 26 | filter:brightness(100%); 27 | } 28 | .easyuse-chooser-dialog-images img.selected{ 29 | border: 4px solid var(--success-color); 30 | } 31 | 32 | .easyuse-chooser-hidden{ 33 | display: none; 34 | height:0; 35 | } -------------------------------------------------------------------------------- /locales/en/main.json: -------------------------------------------------------------------------------- 1 | { 2 | "settingsCategories": { 3 | "Hotkeys": "Hotkeys", 4 | "Nodes": "Nodes", 5 | "NodesMap": "NodesMap", 6 | "StylesSelector": "StylesSelector" 7 | }, 8 | "nodeCategories": { 9 | "Util": "Util", 10 | "Seed": "Seed", 11 | "Prompt": "Prompt", 12 | "Loaders": "Loaders", 13 | "Adapter": "Adapter", 14 | "Inpaint": "Inpaint", 15 | "PreSampling": "PreSampling", 16 | "Sampler": "Sampler", 17 | "Fix": "Fix", 18 | "Pipe": "Pipe", 19 | "XY Inputs": "XY Inputs", 20 | "Image": "Image", 21 | "Segmentation": "Segmentation", 22 | "\uD83D\uDEAB Deprecated": "\uD83D\uDEAB Deprecated", 23 | "Type": "Type", 24 | "Math": "Math", 25 | "Switch": "Switch", 26 | "Index Switch": "Index Switch", 27 | "While Loop": "While Loop", 28 | "For Loop": "For Loop", 29 | "LoadImage": "Load Image" 30 | } 31 | } -------------------------------------------------------------------------------- /install.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | set "requirements_txt=%~dp0\requirements.txt" 4 | set "requirements_repair_txt=%~dp0\repair_dependency_list.txt" 5 | set "python_exec=..\..\..\python_embeded\python.exe" 6 | set "aki_python_exec=..\..\python\python.exe" 7 | 8 | echo Installing EasyUse Requirements... 9 | 10 | if exist "%python_exec%" ( 11 | echo Installing with ComfyUI Portable 12 | "%python_exec%" -s -m pip install -r "%requirements_txt%" 13 | )^ 14 | else if exist "%aki_python_exec%" ( 15 | echo Installing with ComfyUI Aki 16 | "%aki_python_exec%" -s -m pip install -r "%requirements_txt%" 17 | for /f "delims=" %%i in (%requirements_repair_txt%) do ( 18 | %aki_python_exec% -s -m pip install -i https://pypi.tuna.tsinghua.edu.cn/simple "%%i" 19 | ) 20 | )^ 21 | else ( 22 | echo Installing with system Python 23 | pip install -r "%requirements_txt%" 24 | ) 25 | 26 | pause -------------------------------------------------------------------------------- /locales/ru/main.json: -------------------------------------------------------------------------------- 1 | { 2 | "settingsCategories": { 3 | "Hotkeys": "Горячие клавиши", 4 | "Nodes": "Узлы", 5 | "NodesMap": "Карта узлов" 6 | }, 7 | "nodeCategories": { 8 | "Util": "Утилиты", 9 | "Seed": "Сид", 10 | "Prompt": "Подсказка", 11 | "Loaders": "Загрузчики", 12 | "Adapter": "Адаптер", 13 | "Inpaint": "Ретушь", 14 | "PreSampling": "Предвыборка", 15 | "Sampler": "Сэмплер", 16 | "Fix": "Исправление", 17 | "Pipe": "Конвейер", 18 | "XY Inputs": "Ввод XY", 19 | "Image": "Изображение", 20 | "Segmentation": "Сегментация", 21 | "\uD83D\uDEAB Deprecated": "\uD83D\uDEAB Устарело", 22 | "Type": "Тип", 23 | "Math": "Математика", 24 | "Switch": "Переключатель", 25 | "Index Switch": "Переключатель индексов", 26 | "While Loop": "Цикл while", 27 | "For Loop": "Цикл for", 28 | "LoadImage": "Загрузка изображения" 29 | } 30 | } -------------------------------------------------------------------------------- /locales/fr/main.json: -------------------------------------------------------------------------------- 1 | { 2 | "settingsCategories": { 3 | "Hotkeys": "Raccourcis", 4 | "Nodes": "Nœuds", 5 | "NodesMap": "Carte des nœuds" 6 | }, 7 | "nodeCategories": { 8 | "Util": "Utilitaire", 9 | "Seed": "Graine", 10 | "Prompt": "Prompt", 11 | "Loaders": "Chargeurs", 12 | "Adapter": "Adaptateur", 13 | "Inpaint": "Retouche", 14 | "PreSampling": "Pré-échantillonnage", 15 | "Sampler": "Échantillonneur", 16 | "Fix": "Correction", 17 | "Pipe": "Pipeline", 18 | "XY Inputs": "Entrées XY", 19 | "Image": "Image", 20 | "Segmentation": "Segmentation", 21 | "\uD83D\uDEAB Deprecated": "\uD83D\uDEAB Obsolète", 22 | "Type": "Type", 23 | "Math": "Mathématiques", 24 | "Switch": "Interrupteur", 25 | "Index Switch": "Interrupteur d'index", 26 | "While Loop": "Boucle While", 27 | "For Loop": "Boucle For", 28 | "LoadImage": "Charger l'image" 29 | } 30 | } -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | requirements_txt="$(dirname "$0")/requirements.txt" 4 | requirements_repair_txt="$(dirname "$0")/repair_dependency_list.txt" 5 | python_exec="../../../python_embeded/python.exe" 6 | aki_python_exec="../../python/python.exe" 7 | 8 | echo "Installing EasyUse Requirements..." 9 | 10 | if [ -f "$python_exec" ]; then 11 | echo "Installing with ComfyUI Portable" 12 | "$python_exec" -s -m pip install -r "$requirements_txt" 13 | elif [ -f "$aki_python_exec" ]; then 14 | echo "Installing with ComfyUI Aki" 15 | "$aki_python_exec" -s -m pip install -r "$requirements_txt" 16 | while IFS= read -r line; do 17 | "$aki_python_exec" -s -m pip install -i https://pypi.tuna.tsinghua.edu.cn/simple "$line" 18 | done < "$requirements_repair_txt" 19 | else 20 | echo "Installing with system Python" 21 | pip install -r "$requirements_txt" 22 | fi 23 | 24 | read -p "Press any key to continue..." -------------------------------------------------------------------------------- /web_version/v1/css/groupmap.css: -------------------------------------------------------------------------------- 1 | #easyuse_groups_map{ 2 | flex-direction: column; 3 | align-items: end; 4 | display:flex;position: absolute; 5 | top: 50px; left: 10px; width: 180px; 6 | border-radius:12px; 7 | min-height:100px; 8 | max-height:400px; 9 | color: var(--descrip-text); 10 | background-color: var(--comfy-menu-bg); 11 | padding: 10px 4px; 12 | border: 1px solid var(--border-color); 13 | z-index: 399; 14 | padding-top: 0; 15 | } 16 | #easyuse_groups_map .icon{ 17 | width: 12px; 18 | height:12px; 19 | } 20 | #easyuse_groups_map .closeBtn{ 21 | float: right; 22 | color: var(--input-text); 23 | border-radius:30px; 24 | background-color: var(--comfy-input-bg); 25 | border: 1px solid var(--border-color); 26 | cursor: pointer; 27 | aspect-ratio: 1 / 1; 28 | display: flex; 29 | justify-content: center; 30 | align-items: center; 31 | } 32 | #easyuse_groups_map .closeBtn:hover{ 33 | filter:brightness(120%); 34 | } -------------------------------------------------------------------------------- /web_version/v1/js/image_chooser/messaging.js: -------------------------------------------------------------------------------- 1 | import { api } from "../../../../scripts/api.js"; 2 | import { FlowState } from "./state.js"; 3 | 4 | function send_message_from_pausing_node(message) { 5 | const id = app.runningNodeId; 6 | send_message(id, message); 7 | } 8 | 9 | function send_message(id, message) { 10 | const body = new FormData(); 11 | body.append('message',message); 12 | body.append('id', id); 13 | api.fetchApi("/easyuse/image_chooser_message", { method: "POST", body, }); 14 | } 15 | 16 | function send_cancel() { 17 | send_message(-1,'__cancel__'); 18 | FlowState.cancelling = true; 19 | api.interrupt(); 20 | FlowState.cancelling = false; 21 | } 22 | 23 | var skip_next = 0; 24 | function skip_next_restart_message() { skip_next += 1; } 25 | function send_onstart() { 26 | if (skip_next>0) { 27 | skip_next -= 1; 28 | return false; 29 | } 30 | send_message(-1,'__start__'); 31 | return true; 32 | } 33 | 34 | export { send_message_from_pausing_node, send_cancel, send_message, send_onstart, skip_next_restart_message } -------------------------------------------------------------------------------- /py/modules/dit/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from comfy import model_management 3 | 4 | def string_to_dtype(s="none", mode=None): 5 | s = s.lower().strip() 6 | if s in ["default", "as-is"]: 7 | return None 8 | elif s in ["auto", "auto (comfy)"]: 9 | if mode == "vae": 10 | return model_management.vae_device() 11 | elif mode == "text_encoder": 12 | return model_management.text_encoder_dtype() 13 | elif mode == "unet": 14 | return model_management.unet_dtype() 15 | else: 16 | raise NotImplementedError(f"Unknown dtype mode '{mode}'") 17 | elif s in ["none", "auto (hf)", "auto (hf/bnb)"]: 18 | return None 19 | elif s in ["fp32", "float32", "float"]: 20 | return torch.float32 21 | elif s in ["bf16", "bfloat16"]: 22 | return torch.bfloat16 23 | elif s in ["fp16", "float16", "half"]: 24 | return torch.float16 25 | elif "fp8" in s or "float8" in s: 26 | if "e5m2" in s: 27 | return torch.float8_e5m2 28 | elif "e4m3" in s: 29 | return torch.float8_e4m3fn 30 | else: 31 | raise NotImplementedError(f"Unknown 8bit dtype '{s}'") 32 | elif "bnb" in s: 33 | assert s in ["bnb8bit", "bnb4bit"], f"Unknown bnb mode '{s}'" 34 | return s 35 | elif s is None: 36 | return None 37 | else: 38 | raise NotImplementedError(f"Unknown dtype '{s}'") -------------------------------------------------------------------------------- /py/modules/kolors/chatglm/config_chatglm.json: -------------------------------------------------------------------------------- 1 | { 2 | "_name_or_path": "THUDM/chatglm3-6b-base", 3 | "model_type": "chatglm", 4 | "architectures": [ 5 | "ChatGLMModel" 6 | ], 7 | "auto_map": { 8 | "AutoConfig": "configuration_chatglm.ChatGLMConfig", 9 | "AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration", 10 | "AutoModelForCausalLM": "modeling_chatglm.ChatGLMForConditionalGeneration", 11 | "AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration", 12 | "AutoModelForSequenceClassification": "modeling_chatglm.ChatGLMForSequenceClassification" 13 | }, 14 | "add_bias_linear": false, 15 | "add_qkv_bias": true, 16 | "apply_query_key_layer_scaling": true, 17 | "apply_residual_connection_post_layernorm": false, 18 | "attention_dropout": 0.0, 19 | "attention_softmax_in_fp32": true, 20 | "bias_dropout_fusion": true, 21 | "ffn_hidden_size": 13696, 22 | "fp32_residual_connection": false, 23 | "hidden_dropout": 0.0, 24 | "hidden_size": 4096, 25 | "kv_channels": 128, 26 | "layernorm_epsilon": 1e-05, 27 | "multi_query_attention": true, 28 | "multi_query_group_num": 2, 29 | "num_attention_heads": 32, 30 | "num_layers": 28, 31 | "original_rope": true, 32 | "padded_vocab_size": 65024, 33 | "post_layer_norm": true, 34 | "rmsnorm": true, 35 | "seq_length": 32768, 36 | "use_cache": true, 37 | "torch_dtype": "float16", 38 | "transformers_version": "4.30.2", 39 | "tie_word_embeddings": false, 40 | "eos_token_id": 2, 41 | "pad_token_id": 0 42 | } -------------------------------------------------------------------------------- /web_version/v1/js/image_chooser/state.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../../../scripts/app.js"; 2 | 3 | 4 | class HUD { 5 | constructor() { 6 | this.current_node_id = undefined; 7 | this.class_of_current_node = null; 8 | this.current_node_is_chooser = false; 9 | } 10 | 11 | update() { 12 | if (app.runningNodeId==this.current_node_id) return false; 13 | 14 | this.current_node_id = app.runningNodeId; 15 | 16 | if (this.current_node_id) { 17 | this.class_of_current_node = app.graph?._nodes_by_id[app.runningNodeId.toString()]?.comfyClass; 18 | this.current_node_is_chooser = this.class_of_current_node === "easy imageChooser" 19 | } else { 20 | this.class_of_current_node = undefined; 21 | this.current_node_is_chooser = false; 22 | } 23 | return true; 24 | } 25 | } 26 | 27 | const hud = new HUD(); 28 | 29 | 30 | class FlowState { 31 | constructor(){} 32 | static idle() { 33 | return (!app.runningNodeId); 34 | } 35 | static paused() { 36 | return true; 37 | } 38 | static paused_here(node_id) { 39 | return (FlowState.paused() && FlowState.here(node_id)) 40 | } 41 | static running() { 42 | return (!FlowState.idle()); 43 | } 44 | static here(node_id) { 45 | return (app.runningNodeId==node_id); 46 | } 47 | static state() { 48 | if (FlowState.paused()) return "Paused"; 49 | if (FlowState.running()) return "Running"; 50 | return "Idle"; 51 | } 52 | static cancelling = false; 53 | } 54 | 55 | export { hud, FlowState} -------------------------------------------------------------------------------- /py/modules/brushnet/config/powerpaint.json: -------------------------------------------------------------------------------- 1 | { 2 | "_class_name": "BrushNetModel", 3 | "_diffusers_version": "0.27.2", 4 | "act_fn": "silu", 5 | "addition_embed_type": null, 6 | "addition_embed_type_num_heads": 64, 7 | "addition_time_embed_dim": null, 8 | "attention_head_dim": 8, 9 | "block_out_channels": [ 10 | 320, 11 | 640, 12 | 1280, 13 | 1280 14 | ], 15 | "brushnet_conditioning_channel_order": "rgb", 16 | "class_embed_type": null, 17 | "conditioning_channels": 5, 18 | "conditioning_embedding_out_channels": [ 19 | 16, 20 | 32, 21 | 96, 22 | 256 23 | ], 24 | "cross_attention_dim": 768, 25 | "down_block_types": [ 26 | "CrossAttnDownBlock2D", 27 | "CrossAttnDownBlock2D", 28 | "CrossAttnDownBlock2D", 29 | "DownBlock2D" 30 | ], 31 | "downsample_padding": 1, 32 | "encoder_hid_dim": null, 33 | "encoder_hid_dim_type": null, 34 | "flip_sin_to_cos": true, 35 | "freq_shift": 0, 36 | "global_pool_conditions": false, 37 | "in_channels": 4, 38 | "layers_per_block": 2, 39 | "mid_block_scale_factor": 1, 40 | "mid_block_type": "UNetMidBlock2DCrossAttn", 41 | "norm_eps": 1e-05, 42 | "norm_num_groups": 32, 43 | "num_attention_heads": null, 44 | "num_class_embeds": null, 45 | "only_cross_attention": false, 46 | "projection_class_embeddings_input_dim": null, 47 | "resnet_time_scale_shift": "default", 48 | "transformer_layers_per_block": 1, 49 | "up_block_types": [ 50 | "UpBlock2D", 51 | "CrossAttnUpBlock2D", 52 | "CrossAttnUpBlock2D", 53 | "CrossAttnUpBlock2D" 54 | ], 55 | "upcast_attention": false, 56 | "use_linear_projection": false 57 | } -------------------------------------------------------------------------------- /py/modules/ipadapter/flux/math.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from einops import rearrange 3 | from torch import Tensor 4 | from comfy.ldm.modules.attention import optimized_attention 5 | import comfy.model_management 6 | 7 | def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, mask=None) -> Tensor: 8 | q, k = apply_rope(q, k, pe) 9 | 10 | heads = q.shape[1] 11 | x = optimized_attention(q, k, v, heads, skip_reshape=True, mask=mask) 12 | return x 13 | 14 | 15 | def rope(pos: Tensor, dim: int, theta: int) -> Tensor: 16 | assert dim % 2 == 0 17 | if comfy.model_management.is_device_mps(pos.device) or comfy.model_management.is_intel_xpu(): 18 | device = torch.device("cpu") 19 | else: 20 | device = pos.device 21 | 22 | scale = torch.linspace(0, (dim - 2) / dim, steps=dim//2, dtype=torch.float64, device=device) 23 | omega = 1.0 / (theta**scale) 24 | out = torch.einsum("...n,d->...nd", pos.to(dtype=torch.float32, device=device), omega) 25 | out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1) 26 | out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2) 27 | return out.to(dtype=torch.float32, device=pos.device) 28 | 29 | 30 | def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor): 31 | xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2) 32 | xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2) 33 | xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1] 34 | xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1] 35 | return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk) -------------------------------------------------------------------------------- /web_version/v1/js/easy/easySaveImage.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../../../scripts/app.js"; 2 | import { applyTextReplacements } from "../../../../scripts/utils.js"; 3 | 4 | const extraNodes = ["easy imageSave", "easy fullkSampler", "easy kSampler", "easy kSamplerTiled","easy kSamplerInpainting", "easy kSamplerDownscaleUnet", "easy kSamplerSDTurbo","easy detailerFix"] 5 | 6 | app.registerExtension({ 7 | name: "Comfy.Easy.SaveImageExtraOutput", 8 | async beforeRegisterNodeDef(nodeType, nodeData, app) { 9 | if (extraNodes.includes(nodeData.name)) { 10 | const onNodeCreated = nodeType.prototype.onNodeCreated; 11 | // When the SaveImage node is created we want to override the serialization of the output name widget to run our S&R 12 | nodeType.prototype.onNodeCreated = function () { 13 | const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined; 14 | 15 | const widget = this.widgets.find((w) => w.name === "filename_prefix" || w.name === 'save_prefix'); 16 | widget.serializeValue = () => { 17 | return applyTextReplacements(app, widget.value); 18 | }; 19 | 20 | return r; 21 | }; 22 | } else { 23 | // When any other node is created add a property to alias the node 24 | const onNodeCreated = nodeType.prototype.onNodeCreated; 25 | nodeType.prototype.onNodeCreated = function () { 26 | const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined; 27 | 28 | if (!this.properties || !("Node name for S&R" in this.properties)) { 29 | this.addProperty("Node name for S&R", this.constructor.type, "string"); 30 | } 31 | 32 | return r; 33 | }; 34 | } 35 | }, 36 | }); 37 | -------------------------------------------------------------------------------- /py/modules/brushnet/config/brushnet.json: -------------------------------------------------------------------------------- 1 | { 2 | "_class_name": "BrushNetModel", 3 | "_diffusers_version": "0.27.0.dev0", 4 | "_name_or_path": "runs/logs/brushnet_randommask/checkpoint-100000", 5 | "act_fn": "silu", 6 | "addition_embed_type": null, 7 | "addition_embed_type_num_heads": 64, 8 | "addition_time_embed_dim": null, 9 | "attention_head_dim": 8, 10 | "block_out_channels": [ 11 | 320, 12 | 640, 13 | 1280, 14 | 1280 15 | ], 16 | "brushnet_conditioning_channel_order": "rgb", 17 | "class_embed_type": null, 18 | "conditioning_channels": 5, 19 | "conditioning_embedding_out_channels": [ 20 | 16, 21 | 32, 22 | 96, 23 | 256 24 | ], 25 | "cross_attention_dim": 768, 26 | "down_block_types": [ 27 | "DownBlock2D", 28 | "DownBlock2D", 29 | "DownBlock2D", 30 | "DownBlock2D" 31 | ], 32 | "downsample_padding": 1, 33 | "encoder_hid_dim": null, 34 | "encoder_hid_dim_type": null, 35 | "flip_sin_to_cos": true, 36 | "freq_shift": 0, 37 | "global_pool_conditions": false, 38 | "in_channels": 4, 39 | "layers_per_block": 2, 40 | "mid_block_scale_factor": 1, 41 | "mid_block_type": "MidBlock2D", 42 | "norm_eps": 1e-05, 43 | "norm_num_groups": 32, 44 | "num_attention_heads": null, 45 | "num_class_embeds": null, 46 | "only_cross_attention": false, 47 | "projection_class_embeddings_input_dim": null, 48 | "resnet_time_scale_shift": "default", 49 | "transformer_layers_per_block": 1, 50 | "up_block_types": [ 51 | "UpBlock2D", 52 | "UpBlock2D", 53 | "UpBlock2D", 54 | "UpBlock2D" 55 | ], 56 | "upcast_attention": false, 57 | "use_linear_projection": false 58 | } -------------------------------------------------------------------------------- /web_version/v1/js/seed.js: -------------------------------------------------------------------------------- 1 | import { api } from "../../../scripts/api.js"; 2 | 3 | // 全局Seed 4 | function globalSeedHandler(event) { 5 | let nodes = app.graph._nodes_by_id; 6 | for(let i in nodes) { 7 | let node = nodes[i]; 8 | if(node.type == 'easy globalSeed') { 9 | if(node.widgets) { 10 | const w = node.widgets.find((w) => w.name == 'value'); 11 | const last_w = node.widgets.find((w) => w.name == 'last_seed'); 12 | last_w.value = w.value; 13 | w.value = event.detail.value; 14 | } 15 | } 16 | else{ 17 | if(node.widgets) { 18 | const w = node.widgets.find((w) => w.name == 'seed_num' || w.name == 'seed' || w.name == 'noise_seed'); 19 | if(w && event.detail.seed_map[node.id] != undefined) { 20 | w.value = event.detail.seed_map[node.id]; 21 | } 22 | } 23 | } 24 | 25 | } 26 | } 27 | 28 | api.addEventListener("easyuse-global-seed", globalSeedHandler); 29 | 30 | const original_queuePrompt = api.queuePrompt; 31 | async function queuePrompt_with_seed(number, { output, workflow }) { 32 | workflow.seed_widgets = {}; 33 | 34 | for(let i in app.graph._nodes_by_id) { 35 | let widgets = app.graph._nodes_by_id[i].widgets; 36 | if(widgets) { 37 | for(let j in widgets) { 38 | if((widgets[j].name == 'seed_num' || widgets[j].name == 'seed' || widgets[j].name == 'noise_seed') && widgets[j].type != 'converted-widget') 39 | workflow.seed_widgets[i] = parseInt(j); 40 | } 41 | } 42 | } 43 | 44 | return await original_queuePrompt.call(api, number, { output, workflow }); 45 | } 46 | 47 | api.queuePrompt = queuePrompt_with_seed; 48 | -------------------------------------------------------------------------------- /py/modules/brushnet/config/brushnet_xl.json: -------------------------------------------------------------------------------- 1 | { 2 | "_class_name": "BrushNetModel", 3 | "_diffusers_version": "0.27.0.dev0", 4 | "_name_or_path": "runs/logs/brushnetsdxl_randommask/checkpoint-80000", 5 | "act_fn": "silu", 6 | "addition_embed_type": "text_time", 7 | "addition_embed_type_num_heads": 64, 8 | "addition_time_embed_dim": 256, 9 | "attention_head_dim": [ 10 | 5, 11 | 10, 12 | 20 13 | ], 14 | "block_out_channels": [ 15 | 320, 16 | 640, 17 | 1280 18 | ], 19 | "brushnet_conditioning_channel_order": "rgb", 20 | "class_embed_type": null, 21 | "conditioning_channels": 5, 22 | "conditioning_embedding_out_channels": [ 23 | 16, 24 | 32, 25 | 96, 26 | 256 27 | ], 28 | "cross_attention_dim": 2048, 29 | "down_block_types": [ 30 | "DownBlock2D", 31 | "DownBlock2D", 32 | "DownBlock2D" 33 | ], 34 | "downsample_padding": 1, 35 | "encoder_hid_dim": null, 36 | "encoder_hid_dim_type": null, 37 | "flip_sin_to_cos": true, 38 | "freq_shift": 0, 39 | "global_pool_conditions": false, 40 | "in_channels": 4, 41 | "layers_per_block": 2, 42 | "mid_block_scale_factor": 1, 43 | "mid_block_type": "MidBlock2D", 44 | "norm_eps": 1e-05, 45 | "norm_num_groups": 32, 46 | "num_attention_heads": null, 47 | "num_class_embeds": null, 48 | "only_cross_attention": false, 49 | "projection_class_embeddings_input_dim": 2816, 50 | "resnet_time_scale_shift": "default", 51 | "transformer_layers_per_block": [ 52 | 1, 53 | 2, 54 | 10 55 | ], 56 | "up_block_types": [ 57 | "UpBlock2D", 58 | "UpBlock2D", 59 | "UpBlock2D" 60 | ], 61 | "upcast_attention": null, 62 | "use_linear_projection": true 63 | } -------------------------------------------------------------------------------- /web_version/v1/css/dropdown.css: -------------------------------------------------------------------------------- 1 | .easy-dropdown, .easy-nested-dropdown { 2 | position: relative; 3 | box-sizing: border-box; 4 | background-color: #171717; 5 | box-shadow: 0 4px 4px rgba(255, 255, 255, .25); 6 | padding: 0; 7 | margin: 0; 8 | list-style: none; 9 | z-index: 1000; 10 | overflow: visible; 11 | max-height: fit-content; 12 | max-width: fit-content; 13 | } 14 | 15 | .easy-dropdown { 16 | position: absolute; 17 | border-radius: 0; 18 | } 19 | 20 | /* Style for final items */ 21 | .easy-dropdown li.item, .easy-nested-dropdown li.item { 22 | font-weight: normal; 23 | min-width: max-content; 24 | } 25 | 26 | /* Style for folders (parent items) */ 27 | .easy-dropdown li.folder, .easy-nested-dropdown li.folder { 28 | cursor: default; 29 | position: relative; 30 | border-right: 3px solid cyan; 31 | } 32 | 33 | .easy-dropdown li.folder::after, .easy-nested-dropdown li.folder::after { 34 | content: ">"; 35 | position: absolute; 36 | right: 2px; 37 | font-weight: normal; 38 | } 39 | 40 | .easy-dropdown li, .easy-nested-dropdown li { 41 | padding: 4px 10px; 42 | cursor: pointer; 43 | font-family: system-ui; 44 | font-size: 0.7rem; 45 | position: relative; 46 | } 47 | 48 | /* Style for nested dropdowns */ 49 | .easy-nested-dropdown { 50 | position: absolute; 51 | top: 0; 52 | left: 100%; 53 | margin: 0; 54 | border: none; 55 | display: none; 56 | } 57 | 58 | .easy-dropdown li.selected > .easy-nested-dropdown, 59 | .easy-nested-dropdown li.selected > .easy-nested-dropdown { 60 | display: block; 61 | border: none; 62 | } 63 | 64 | .easy-dropdown li.selected, 65 | .easy-nested-dropdown li.selected { 66 | background-color: #e5e5e5; 67 | border: none; 68 | } -------------------------------------------------------------------------------- /tools/convert_locale_format.py: -------------------------------------------------------------------------------- 1 | # 开发人员使用(请勿运行) 2 | # 将 https://github.com/AIGODLIKE/AIGODLIKE-ComfyUI-Translation 的翻译文件转换格式以适配 ComfyUI locales 3 | 4 | import json 5 | import os 6 | import pathlib 7 | 8 | old_json_path = 'ComfyUI-Easy-Use.json' 9 | root_path = pathlib.Path(__file__).parent.parent 10 | new_json_path = os.path.join(root_path,'locales/zh/nodeDefs.json') 11 | 12 | def transform_dict(data): 13 | new_dict = {} 14 | for k, v in data.items(): 15 | new_dict[k] = { 16 | "display_name": "", 17 | "inputs": {} 18 | } 19 | if isinstance(v, dict): 20 | for key, value in v.items(): 21 | if key == 'title': 22 | new_dict[k]['display_name'] = value 23 | elif key in ['inputs','widgets']: 24 | for _key, _value in value.items(): 25 | new_dict[k]['inputs'] = { 26 | **new_dict[k]['inputs'], 27 | _key: {"name": _value} 28 | } 29 | elif key == 'outputs': 30 | if not new_dict[k].get('outputs'): 31 | new_dict[k]['outputs'] = {} 32 | for idx, (out_key, out_value) in enumerate(value.items()): 33 | new_dict[k]['outputs'][idx] = {"name": out_value} 34 | return new_dict 35 | 36 | def main(): 37 | 38 | # 读取原始JSON文件 39 | with open(old_json_path, 'r', encoding='utf-8') as f: 40 | data = json.load(f) 41 | 42 | # 转换数据 43 | transformed_data = transform_dict(data) 44 | 45 | # 写入新的JSON文件 46 | with open(new_json_path, 'w', encoding='utf-8') as f: 47 | json.dump(transformed_data, f, ensure_ascii=False, indent=2) 48 | 49 | if __name__ == '__main__': 50 | main() 51 | -------------------------------------------------------------------------------- /web_version/v1/css/sliderControl.css: -------------------------------------------------------------------------------- 1 | .easyuse-slider{ 2 | width:100%; 3 | height:100%; 4 | display: flex; 5 | flex-direction: row; 6 | justify-content: space-between; 7 | position: relative; 8 | } 9 | .easyuse-slider-item{ 10 | height: inherit; 11 | min-width: 25px; 12 | justify-content: center; 13 | display: flex; 14 | flex-direction: column; 15 | align-items: center; 16 | } 17 | .easyuse-slider-item.positive .easyuse-slider-item-label{ 18 | color: var(--success-color); 19 | } 20 | .easyuse-slider-item.negative .easyuse-slider-item-label{ 21 | color: var(--error-color); 22 | } 23 | .easyuse-slider-item-input{ 24 | height:15px; 25 | font-size: 10px; 26 | color: var(--input-text); 27 | } 28 | .easyuse-slider-item-label{ 29 | height:15px; 30 | border: none; 31 | color: var(--descrip-text); 32 | font-size: 8px; 33 | } 34 | .easyuse-slider-item-scroll { 35 | width: 5px; 36 | height: calc(100% - 30px); 37 | background: var(--comfy-input-bg); 38 | border-radius: 10px; 39 | position: relative; 40 | } 41 | .easyuse-slider-item-bar{ 42 | width: 10px; 43 | height: 10px; 44 | background: linear-gradient(to bottom, var(--input-text), var(--descrip-text)); 45 | border-radius:100%; 46 | box-shadow: 0 2px 10px var(--bg-color); 47 | position: absolute; 48 | top: 0; 49 | left:-2.5px; 50 | cursor: pointer; 51 | z-index:1; 52 | } 53 | .easyuse-slider-item-area{ 54 | width: 100%; 55 | border-radius:20px; 56 | position: absolute; 57 | bottom: 0; 58 | background: var(--input-text); 59 | z-index:0; 60 | } 61 | .easyuse-slider-item.positive .easyuse-slider-item-area{ 62 | background: var(--success-color); 63 | } 64 | .easyuse-slider-item.negative .easyuse-slider-item-area{ 65 | background: var(--error-color); 66 | } 67 | -------------------------------------------------------------------------------- /tools/combine_autocomplete.py: -------------------------------------------------------------------------------- 1 | # 自定义提示词自动补全工具 2 | 3 | import os, sys 4 | import glob 5 | import shutil 6 | 7 | output_file = None 8 | cwd_path = os.path.dirname(os.path.realpath(__file__)) 9 | pyssss_path = os.path.join(cwd_path, "..", "ComfyUI-Custom-Scripts", "user") 10 | combine_folder = os.path.join(cwd_path, "autocomplete") 11 | 12 | def backup_autocomplete(): 13 | bak_file = os.path.join(pyssss_path, "autocomplete.txt.bak") 14 | if os.path.exists(bak_file): 15 | pass 16 | elif os.path.exists(output_file): 17 | shutil.copy(output_file, bak_file) 18 | 19 | def combine_autocomplete(): 20 | if os.path.exists(combine_folder): 21 | pass 22 | else: 23 | os.mkdir(combine_folder) 24 | if os.path.exists(pyssss_path): 25 | output_file = os.path.join(pyssss_path, "autocomplete.txt") 26 | # 遍历 combine 目录下的所有 txt 文件,读取内容并合并 27 | merged_content = '' 28 | for file_path in glob.glob(os.path.join(combine_folder, '*.txt')): 29 | with open(file_path, 'r', encoding='utf-8', errors='ignore') as file: 30 | try: 31 | file_content = file.read() 32 | merged_content += file_content + '\n' 33 | except UnicodeDecodeError: 34 | pass 35 | if merged_content != '': 36 | # 将合并的内容写入目标文件 autocomplete.txt,并指定编码为 utf-8 37 | with open(output_file, 'w', encoding='utf-8') as target_file: 38 | target_file.write(merged_content) 39 | 40 | if __name__ == "__main__": 41 | arg = sys.argv[0] 42 | if 'combine_autocomplete' in arg: 43 | arg = sys.argv[1] 44 | if arg == 'backup': 45 | backup_autocomplete() 46 | elif arg == 'combine': 47 | combine_autocomplete() 48 | else: 49 | print("Usage: python combine_autocomplete.py [backup|combine]") 50 | sys.exit(1) -------------------------------------------------------------------------------- /py/libs/api/fluxai.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import yaml 4 | import requests 5 | import pathlib 6 | from aiohttp import web 7 | 8 | root_path = pathlib.Path(__file__).parent.parent.parent.parent 9 | config_path = os.path.join(root_path,'config.yaml') 10 | class FluxAIAPI: 11 | def __init__(self): 12 | self.api_url = "https://fluxaiimagegenerator.com/api" 13 | self.origin = "https://fluxaiimagegenerator.com" 14 | self.user_agent = None 15 | self.cookie = None 16 | 17 | def promptGenerate(self, text, cookies=None): 18 | cookie = self.cookie if cookies is None else cookies 19 | if cookie is None: 20 | if os.path.isfile(config_path): 21 | with open(config_path, 'r') as f: 22 | data = yaml.load(f, Loader=yaml.FullLoader) 23 | if 'FLUXAI_COOKIE' not in data: 24 | raise Exception("Please add FLUXAI_COOKIE to config.yaml") 25 | if "FLUXAI_USER_AGENT" in data: 26 | self.user_agent = data["FLUXAI_USER_AGENT"] 27 | self.cookie = cookie = data['FLUXAI_COOKIE'] 28 | 29 | headers = { 30 | "Cookie": cookie, 31 | "Referer": "https://fluxaiimagegenerator.com/flux-prompt-generator", 32 | "Origin": self.origin, 33 | "Content-Type": "application/json", 34 | } 35 | if self.user_agent is not None: 36 | headers['User-Agent'] = self.user_agent 37 | 38 | url = self.api_url + '/prompt' 39 | json = { 40 | "prompt": text 41 | } 42 | 43 | response = requests.post(url, json=json, headers=headers) 44 | res = response.json() 45 | if "error" in res: 46 | return res['error'] 47 | elif "data" in res and "prompt" in res['data']: 48 | return res['data']['prompt'] 49 | 50 | fluxaiAPI = FluxAIAPI() 51 | 52 | -------------------------------------------------------------------------------- /py/libs/messages.py: -------------------------------------------------------------------------------- 1 | from server import PromptServer 2 | from aiohttp import web 3 | import time 4 | import json 5 | 6 | class MessageCancelled(Exception): 7 | pass 8 | 9 | class Message: 10 | stash = {} 11 | messages = {} 12 | cancelled = False 13 | 14 | @classmethod 15 | def addMessage(cls, id, message): 16 | if message == '__cancel__': 17 | cls.messages = {} 18 | cls.cancelled = True 19 | elif message == '__start__': 20 | cls.messages = {} 21 | cls.stash = {} 22 | cls.cancelled = False 23 | else: 24 | cls.messages[str(id)] = message 25 | 26 | @classmethod 27 | def waitForMessage(cls, id, period=0.1, asList=False): 28 | sid = str(id) 29 | while not (sid in cls.messages) and not ("-1" in cls.messages): 30 | if cls.cancelled: 31 | cls.cancelled = False 32 | raise MessageCancelled() 33 | time.sleep(period) 34 | if cls.cancelled: 35 | cls.cancelled = False 36 | raise MessageCancelled() 37 | message = cls.messages.pop(str(id), None) or cls.messages.pop("-1") 38 | try: 39 | if asList: 40 | return [str(x.strip()) for x in message.split(",")] 41 | else: 42 | try: 43 | return json.loads(message) 44 | except ValueError: 45 | return message 46 | except ValueError: 47 | print( f"ERROR IN MESSAGE - failed to parse '${message}' as ${'comma separated list of strings' if asList else 'string'}") 48 | return [message] if asList else message 49 | 50 | 51 | @PromptServer.instance.routes.post('/easyuse/message_callback') 52 | async def message_callback(request): 53 | post = await request.post() 54 | Message.addMessage(post.get("id"), post.get("message")) 55 | return web.json_response({}) -------------------------------------------------------------------------------- /locales/ja/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "EasyUse_Hotkeys_AddGroup": { 3 | "name": "Shift+gを使用して選択したノードをグループに追加する", 4 | "tooltip": "v1.2.39以降、Ctrl+gが使用できます" 5 | }, 6 | "EasyUse_Hotkeys_cleanVRAMUsed": { 7 | "name": "Shift+rを使用してモデルおよびノードキャッシュをアンロードする" 8 | }, 9 | "EasyUse_Hotkeys_toggleNodesMap": { 10 | "name": "Shift+mを使用してノードマップを表示/非表示にします" 11 | }, 12 | "EasyUse_Hotkeys_AlignSelectedNodes": { 13 | "name": "Shift+上/下/左/右およびShift+Ctrl+Alt+左/右を使用して選択したノードを整列する", 14 | "tooltip": "Shift+上/下/左/右で選択したノードを整列し、Shift+Ctrl+Alt+左/右で水平方向/垂直方向に分布させる" 15 | }, 16 | "EasyUse_Hotkeys_NormalizeSelectedNodes": { 17 | "name": "Shift+Ctrl+左/右を使用して選択したノードのサイズを正規化する", 18 | "tooltip": "Shift+Ctrl+左で幅を、Shift+Ctrl+右で高さを正規化する" 19 | }, 20 | "EasyUse_Hotkeys_NodesTemplate": { 21 | "name": "Alt+1~9を使用してワークフローにノードテンプレートを貼り付ける" 22 | }, 23 | "EasyUse_Hotkeys_JumpNearestNodes": { 24 | "name": "上/下/左/右を使用して最も近いノードにジャンプする" 25 | }, 26 | "EasyUse_ContextMenu_SubDirectories": { 27 | "name": "コンテキストメニューでサブディレクトリを自動でネストする" 28 | }, 29 | "EasyUse_ContextMenu_ModelsThumbnails": { 30 | "name": "モデルプレビューサムネイルを有効にする" 31 | }, 32 | "EasyUse_ContextMenu_NodesSort": { 33 | "name": "コンテキストメニューで新規ノードをA~Z順に並べ替える" 34 | }, 35 | "EasyUse_ContextMenu_QuickOptions": { 36 | "name": "コンテキストメニューで3つのクイックボタンを使用する", 37 | "options": { 38 | "At the forefront": "最前面に", 39 | "At the end": "最後に", 40 | "Disable": "無効" 41 | } 42 | }, 43 | "EasyUse_Nodes_Runtime": { 44 | "name": "ノードの実行時間表示を有効にする" 45 | }, 46 | "EasyUse_Nodes_ChainGetSet": { 47 | "name": "親ノードと取得/設定ポイントを連結することを有効にする" 48 | }, 49 | "EasyUse_NodesMap_Sorting": { 50 | "name": "ノードグループの並べ替えモードを管理する", 51 | "tooltip": "デフォルトで自動的に並べ替えます。マニュアルに設定した場合、グループをドラッグアンドドロップで並べ替え、順序が保存されます。", 52 | "options": { 53 | "Auto sorting": "自動並べ替え", 54 | "Manual drag&drop sorting": "手動ドラッグアンドドロップによる並べ替え" 55 | } 56 | }, 57 | "EasyUse_NodesMap_DisplayNodeID": { 58 | "name": "ノードIDの表示を有効にする" 59 | }, 60 | "EasyUse_NodesMap_DisplayGroupOnly": { 61 | "name": "グループのみ表示する" 62 | }, 63 | "EasyUse_NodesMap_Enable": { 64 | "name": "グループマップを有効にする", 65 | "tooltip": "ページを更新する必要があります" 66 | } 67 | } -------------------------------------------------------------------------------- /web_version/v1/js/image.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../../scripts/app.js"; 2 | 3 | 4 | app.registerExtension({ 5 | name: "comfy.easyUse.imageWidgets", 6 | 7 | nodeCreated(node) { 8 | if (["easy imageSize","easy imageSizeBySide","easy imageSizeByLongerSide","easy imageSizeShow", "easy imageRatio", "easy imagePixelPerfect"].includes(node.comfyClass)) { 9 | 10 | const inputEl = document.createElement("textarea"); 11 | inputEl.className = "comfy-multiline-input"; 12 | inputEl.readOnly = true 13 | 14 | const widget = node.addDOMWidget("info", "customtext", inputEl, { 15 | getValue() { 16 | return inputEl.value; 17 | }, 18 | setValue(v) { 19 | inputEl.value = v; 20 | }, 21 | serialize: false 22 | }); 23 | widget.inputEl = inputEl; 24 | 25 | inputEl.addEventListener("input", () => { 26 | widget.callback?.(widget.value); 27 | }); 28 | } 29 | }, 30 | 31 | beforeRegisterNodeDef(nodeType, nodeData, app) { 32 | if (["easy imageSize","easy imageSizeBySide","easy imageSizeByLongerSide", "easy imageSizeShow", "easy imageRatio", "easy imagePixelPerfect"].includes(nodeData.name)) { 33 | function populate(arr_text) { 34 | var text = ''; 35 | for (let i = 0; i < arr_text.length; i++){ 36 | text += arr_text[i]; 37 | } 38 | if (this.widgets) { 39 | const pos = this.widgets.findIndex((w) => w.name === "info"); 40 | if (pos !== -1 && this.widgets[pos]) { 41 | const w = this.widgets[pos] 42 | w.value = text; 43 | } 44 | } 45 | requestAnimationFrame(() => { 46 | const sz = this.computeSize(); 47 | if (sz[0] < this.size[0]) { 48 | sz[0] = this.size[0]; 49 | } 50 | if (sz[1] < this.size[1]) { 51 | sz[1] = this.size[1]; 52 | } 53 | this.onResize?.(sz); 54 | app.graph.setDirtyCanvas(true, false); 55 | }); 56 | } 57 | 58 | // When the node is executed we will be sent the input text, display this in the widget 59 | const onExecuted = nodeType.prototype.onExecuted; 60 | nodeType.prototype.onExecuted = function (message) { 61 | onExecuted?.apply(this, arguments); 62 | populate.call(this, message.text); 63 | }; 64 | } 65 | } 66 | }) -------------------------------------------------------------------------------- /locales/zh/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "EasyUse_Hotkeys_AddGroup": { 3 | "name": "启用 Shift+g 键将选中的节点添加一个组", 4 | "tooltip": "从v1.2.39开始,可以使用Ctrl+g代替" 5 | }, 6 | "EasyUse_Hotkeys_cleanVRAMUsed": { 7 | "name": "启用 Shift+r 键卸载模型和节点缓存" 8 | }, 9 | "EasyUse_Hotkeys_toggleNodesMap": { 10 | "name": "启用 Shift+m 键显隐管理节点组" 11 | }, 12 | "EasyUse_Hotkeys_AlignSelectedNodes": { 13 | "name": "启用 Shift+上/下/左/右 和 Shift+Ctrl+Alt+左/右 键对齐选中的节点", 14 | "tooltip": "Shift+上/下/左/右 可以对齐选中的节点, Shift+Ctrl+Alt+左/右 可以水平/垂直分布节点" 15 | }, 16 | "EasyUse_Hotkeys_NormalizeSelectedNodes": { 17 | "name": "启用 Shift+Ctrl+左/右 键规范化选中的节点", 18 | "tooltip": "启用 Shift+Ctrl+左 键规范化宽度和 Shift+Ctrl+右 键规范化高度" 19 | }, 20 | "EasyUse_Hotkeys_NodesTemplate": { 21 | "name": "启用 Alt+1~9 从节点模板粘贴到工作流中" 22 | }, 23 | "EasyUse_Hotkeys_JumpNearestNodes": { 24 | "name": "启用 上/下/左/右 键跳转到最近的前后节点" 25 | }, 26 | "EasyUse_ContextMenu_SubDirectories": { 27 | "name": "启用上下文菜单自动嵌套子目录" 28 | }, 29 | "EasyUse_ContextMenu_ModelsThumbnails": { 30 | "name": "启动模型预览图显示" 31 | }, 32 | "EasyUse_ContextMenu_NodesSort": { 33 | "name": "启用右键菜单中新建节点A~Z排序" 34 | }, 35 | "EasyUse_ContextMenu_QuickOptions": { 36 | "name": "在右键菜单中使用三个快捷按钮", 37 | "options": { 38 | "At the forefront": "在最前面", 39 | "At the end": "在最后面", 40 | "Disable": "禁用" 41 | } 42 | }, 43 | "EasyUse_Nodes_Runtime": { 44 | "name": "启动节点运行时间显示" 45 | }, 46 | "EasyUse_Nodes_ChainGetSet": { 47 | "name": "启用将获取点和设置点与父节点链在一起" 48 | }, 49 | "EasyUse_NodesMap_Sorting": { 50 | "name": "管理节点组排序模式", 51 | "tooltip": "默认自动排序,如果设置为手动,组可以拖放并保存排序结果。", 52 | "options": { 53 | "Auto sorting": "自动排序", 54 | "Manual drag&drop sorting": "手动拖拽排序" 55 | } 56 | }, 57 | "EasyUse_NodesMap_DisplayNodeID": { 58 | "name": "启用节点ID显示" 59 | }, 60 | "EasyUse_NodesMap_DisplayGroupOnly": { 61 | "name": "仅显示组" 62 | }, 63 | "EasyUse_NodesMap_Enable": { 64 | "name": "启用管理节点组", 65 | "tooltip": "您需要刷新页面以成功更新" 66 | }, 67 | "EasyUse_StylesSelector_DisplayType": { 68 | "name": "样式选择器显示类型", 69 | "tooltip": "样式选择器显示类型,如果设置为“网格”,则显示为网格,如果设置为“列表”,则显示为列表", 70 | "options": { 71 | "Grid": "网格", 72 | "List": "列表" 73 | } 74 | } 75 | } -------------------------------------------------------------------------------- /locales/ko/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "EasyUse_Hotkeys_AddGroup": { 3 | "name": "Shift+g 를 사용하여 선택된 노드를 그룹에 추가합니다", 4 | "tooltip": "v1.2.39부터는 Ctrl+g 를 사용할 수 있습니다" 5 | }, 6 | "EasyUse_Hotkeys_cleanVRAMUsed": { 7 | "name": "Shift+r 를 사용하여 모델 및 노드 캐시를 언로드합니다" 8 | }, 9 | "EasyUse_Hotkeys_toggleNodesMap": { 10 | "name": "Shift+m 를 사용하여 노드 맵을 전환합니다" 11 | }, 12 | "EasyUse_Hotkeys_AlignSelectedNodes": { 13 | "name": "Shift+Up/Down/Left/Right 와 Shift+Ctrl+Alt+Left/Right 를 사용하여 선택된 노드를 정렬합니다", 14 | "tooltip": "Shift+Up/Down/Left/Right 는 선택된 노드를 정렬하며, Shift+Ctrl+Alt+Left/Right 는 노드를 수평/수직으로 분배합니다" 15 | }, 16 | "EasyUse_Hotkeys_NormalizeSelectedNodes": { 17 | "name": "Shift+Ctrl+Left/Right 를 사용하여 선택된 노드를 정규화합니다", 18 | "tooltip": "Shift+Ctrl+Left 는 너비를, Shift+Ctrl+Right 는 높이를 정규화합니다" 19 | }, 20 | "EasyUse_Hotkeys_NodesTemplate": { 21 | "name": "Alt+1~9 를 사용하여 워크플로우에 노드 템플릿을 붙여넣습니다" 22 | }, 23 | "EasyUse_Hotkeys_JumpNearestNodes": { 24 | "name": "Up/Down/Left/Right 를 사용하여 가장 가까운 노드로 이동합니다" 25 | }, 26 | "EasyUse_ContextMenu_SubDirectories": { 27 | "name": "컨텍스트 메뉴에서 자동으로 하위 디렉토리를 중첩합니다" 28 | }, 29 | "EasyUse_ContextMenu_ModelsThumbnails": { 30 | "name": "모델 미리보기 썸네일을 활성화합니다" 31 | }, 32 | "EasyUse_ContextMenu_NodesSort": { 33 | "name": "컨텍스트 메뉴에서 새로운 노드를 A~Z 순으로 정렬합니다" 34 | }, 35 | "EasyUse_ContextMenu_QuickOptions": { 36 | "name": "컨텍스트 메뉴에 3개의 빠른 옵션 버튼을 사용합니다", 37 | "options": { 38 | "At the forefront": "앞쪽에", 39 | "At the end": "뒤쪽에", 40 | "Disable": "비활성화" 41 | } 42 | }, 43 | "EasyUse_Nodes_Runtime": { 44 | "name": "노드 실행 시간 표시를 활성화합니다" 45 | }, 46 | "EasyUse_Nodes_ChainGetSet": { 47 | "name": "부모 노드와 연결된 get/ set 포인트 체이닝을 활성화합니다" 48 | }, 49 | "EasyUse_NodesMap_Sorting": { 50 | "name": "노드 그룹 정렬 모드를 관리합니다", 51 | "tooltip": "기본값은 자동 정렬입니다. 수동으로 설정하면 그룹을 드래그 앤 드롭할 수 있으며 순서가 저장됩니다.", 52 | "options": { 53 | "Auto sorting": "자동 정렬", 54 | "Manual drag&drop sorting": "수동 드래그 앤 드롭 정렬" 55 | } 56 | }, 57 | "EasyUse_NodesMap_DisplayNodeID": { 58 | "name": "노드 ID 표시를 활성화합니다" 59 | }, 60 | "EasyUse_NodesMap_DisplayGroupOnly": { 61 | "name": "그룹만 표시합니다" 62 | }, 63 | "EasyUse_NodesMap_Enable": { 64 | "name": "그룹 맵을 활성화합니다", 65 | "tooltip": "업데이트를 위해 페이지를 새로고침해야 합니다" 66 | } 67 | } -------------------------------------------------------------------------------- /py/libs/model.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import folder_paths 4 | import server 5 | from .utils import find_tags 6 | 7 | class easyModelManager: 8 | 9 | def __init__(self): 10 | self.img_suffixes = [".png", ".jpg", ".jpeg", ".gif", ".webp", ".bmp", ".tiff", ".svg", ".tif", ".tiff"] 11 | self.default_suffixes = [".ckpt", ".pt", ".bin", ".pth", ".safetensors"] 12 | self.models_config = { 13 | "checkpoints": {"suffix": self.default_suffixes}, 14 | "loras": {"suffix": self.default_suffixes}, 15 | "unet": {"suffix": self.default_suffixes}, 16 | } 17 | self.model_lists = {} 18 | 19 | def find_thumbnail(self, model_type, name): 20 | file_no_ext = os.path.splitext(name)[0] 21 | for ext in self.img_suffixes: 22 | full_path = folder_paths.get_full_path(model_type, file_no_ext + ext) 23 | if os.path.isfile(str(full_path)): 24 | return full_path 25 | return None 26 | 27 | def get_model_lists(self, model_type): 28 | if model_type not in self.models_config: 29 | return [] 30 | filenames = folder_paths.get_filename_list(model_type) 31 | model_lists = [] 32 | for name in filenames: 33 | model_suffix = os.path.splitext(name)[-1] 34 | if model_suffix not in self.models_config[model_type]["suffix"]: 35 | continue 36 | else: 37 | cfg = { 38 | "name": os.path.basename(os.path.splitext(name)[0]), 39 | "full_name": name, 40 | "remark": '', 41 | "file_path": folder_paths.get_full_path(model_type, name), 42 | "type": model_type, 43 | "suffix": model_suffix, 44 | "dir_tags": find_tags(name), 45 | "cover": self.find_thumbnail(model_type, name), 46 | "metadata": None, 47 | "sha256": None 48 | } 49 | model_lists.append(cfg) 50 | 51 | return model_lists 52 | 53 | def get_model_info(self, model_type, model_name): 54 | pass 55 | 56 | # if __name__ == "__main__": 57 | # manager = easyModelManager() 58 | # print(manager.get_model_lists("checkpoints")) -------------------------------------------------------------------------------- /py/libs/log.py: -------------------------------------------------------------------------------- 1 | COLORS_FG = { 2 | 'BLACK': '\33[30m', 3 | 'RED': '\33[31m', 4 | 'GREEN': '\33[32m', 5 | 'YELLOW': '\33[33m', 6 | 'BLUE': '\33[34m', 7 | 'MAGENTA': '\33[35m', 8 | 'CYAN': '\33[36m', 9 | 'WHITE': '\33[37m', 10 | 'GREY': '\33[90m', 11 | 'BRIGHT_RED': '\33[91m', 12 | 'BRIGHT_GREEN': '\33[92m', 13 | 'BRIGHT_YELLOW': '\33[93m', 14 | 'BRIGHT_BLUE': '\33[94m', 15 | 'BRIGHT_MAGENTA': '\33[95m', 16 | 'BRIGHT_CYAN': '\33[96m', 17 | 'BRIGHT_WHITE': '\33[97m', 18 | } 19 | COLORS_STYLE = { 20 | 'RESET': '\33[0m', 21 | 'BOLD': '\33[1m', 22 | 'NORMAL': '\33[22m', 23 | 'ITALIC': '\33[3m', 24 | 'UNDERLINE': '\33[4m', 25 | 'BLINK': '\33[5m', 26 | 'BLINK2': '\33[6m', 27 | 'SELECTED': '\33[7m', 28 | } 29 | COLORS_BG = { 30 | 'BLACK': '\33[40m', 31 | 'RED': '\33[41m', 32 | 'GREEN': '\33[42m', 33 | 'YELLOW': '\33[43m', 34 | 'BLUE': '\33[44m', 35 | 'MAGENTA': '\33[45m', 36 | 'CYAN': '\33[46m', 37 | 'WHITE': '\33[47m', 38 | 'GREY': '\33[100m', 39 | 'BRIGHT_RED': '\33[101m', 40 | 'BRIGHT_GREEN': '\33[102m', 41 | 'BRIGHT_YELLOW': '\33[103m', 42 | 'BRIGHT_BLUE': '\33[104m', 43 | 'BRIGHT_MAGENTA': '\33[105m', 44 | 'BRIGHT_CYAN': '\33[106m', 45 | 'BRIGHT_WHITE': '\33[107m', 46 | } 47 | 48 | def log_node_success(node_name, message=None): 49 | """Logs a success message.""" 50 | _log_node(COLORS_FG["GREEN"], node_name, message) 51 | 52 | def log_node_info(node_name, message=None): 53 | """Logs an info message.""" 54 | _log_node(COLORS_FG["CYAN"], node_name, message) 55 | 56 | 57 | def log_node_warn(node_name, message=None): 58 | """Logs an warn message.""" 59 | _log_node(COLORS_FG["YELLOW"], node_name, message) 60 | 61 | def log_node_error(node_name, message=None): 62 | """Logs an warn message.""" 63 | _log_node(COLORS_FG["RED"], node_name, message) 64 | 65 | def log_node(node_name, message=None): 66 | """Logs a message.""" 67 | _log_node(COLORS_FG["CYAN"], node_name, message) 68 | 69 | 70 | def _log_node(color, node_name, message=None, prefix=''): 71 | print(_get_log_msg(color, node_name, message, prefix=prefix)) 72 | 73 | def _get_log_msg(color, node_name, message=None, prefix=''): 74 | msg = f'{COLORS_STYLE["BOLD"]}{color}{prefix}[EasyUse] {node_name.replace(" (EasyUse)", "")}' 75 | msg += f':{COLORS_STYLE["RESET"]} {message}' if message is not None else f'{COLORS_STYLE["RESET"]}' 76 | return msg 77 | 78 | -------------------------------------------------------------------------------- /web_version/v1/css/account.css: -------------------------------------------------------------------------------- 1 | .easyuse-account{ 2 | 3 | } 4 | .easyuse-account-user{ 5 | font-size: 10px; 6 | color:var(--descrip-text); 7 | text-align: center; 8 | } 9 | .easyuse-account-user-info{ 10 | display: flex; 11 | justify-content: space-between; 12 | align-items: center; 13 | padding-bottom:10px; 14 | cursor: pointer; 15 | } 16 | .easyuse-account-user-info .user{ 17 | display: flex; 18 | align-items: center; 19 | } 20 | .easyuse-account-user-info .edit{ 21 | padding:5px 10px; 22 | background: var(--comfy-menu-bg); 23 | border-radius:4px; 24 | } 25 | .easyuse-account-user-info:hover{ 26 | filter:brightness(110%); 27 | } 28 | .easyuse-account-user-info h5{ 29 | margin:0; 30 | font-size: 10px; 31 | text-align: left; 32 | } 33 | .easyuse-account-user-info h6{ 34 | margin:0; 35 | font-size: 8px; 36 | text-align: left; 37 | font-weight: 300; 38 | } 39 | .easyuse-account-user-info .remark{ 40 | margin-top: 4px; 41 | } 42 | .easyuse-account-user-info .avatar{ 43 | width: 36px; 44 | height: 36px; 45 | background: var(--comfy-input-bg); 46 | border-radius: 50%; 47 | margin-right: 5px; 48 | display: flex; 49 | justify-content: center; 50 | align-items: center; 51 | font-size: 16px; 52 | overflow: hidden; 53 | } 54 | .easyuse-account-user-info .avatar img{ 55 | width: 100%; 56 | height: 100%; 57 | } 58 | .easyuse-account-dialog{ 59 | width: 600px; 60 | } 61 | .easyuse-account-dialog-main a, .easyuse-account-dialog-main a:visited{ 62 | font-weight: 400; 63 | color: var(--theme-color-light); 64 | } 65 | .easyuse-account-dialog-item{ 66 | display: flex; 67 | justify-content: flex-start; 68 | align-items: center; 69 | padding: 10px 0; 70 | border-bottom: 1px solid var(--border-color); 71 | } 72 | .easyuse-account-dialog-item input{ 73 | padding:5px; 74 | margin-right:5px; 75 | } 76 | .easyuse-account-dialog-item input.key{ 77 | flex:1; 78 | } 79 | .easyuse-account-dialog-item button{ 80 | cursor: pointer; 81 | margin-left:5px!important; 82 | padding:5px!important; 83 | font-size: 16px!important; 84 | } 85 | .easyuse-account-dialog-item button:hover{ 86 | filter:brightness(120%); 87 | } 88 | .easyuse-account-dialog-item button.choose { 89 | background: var(--theme-color); 90 | } 91 | .easyuse-account-dialog-item button.delete{ 92 | background: var(--error-color); 93 | } -------------------------------------------------------------------------------- /py/modules/kolors/chatglm/configuration_chatglm.py: -------------------------------------------------------------------------------- 1 | from transformers import PretrainedConfig 2 | 3 | class ChatGLMConfig(PretrainedConfig): 4 | model_type = "chatglm" 5 | def __init__( 6 | self, 7 | num_layers=28, 8 | padded_vocab_size=65024, 9 | hidden_size=4096, 10 | ffn_hidden_size=13696, 11 | kv_channels=128, 12 | num_attention_heads=32, 13 | seq_length=2048, 14 | hidden_dropout=0.0, 15 | classifier_dropout=None, 16 | attention_dropout=0.0, 17 | layernorm_epsilon=1e-5, 18 | rmsnorm=True, 19 | apply_residual_connection_post_layernorm=False, 20 | post_layer_norm=True, 21 | add_bias_linear=False, 22 | add_qkv_bias=False, 23 | bias_dropout_fusion=True, 24 | multi_query_attention=False, 25 | multi_query_group_num=1, 26 | apply_query_key_layer_scaling=True, 27 | attention_softmax_in_fp32=True, 28 | fp32_residual_connection=False, 29 | quantization_bit=0, 30 | pre_seq_len=None, 31 | prefix_projection=False, 32 | **kwargs 33 | ): 34 | self.num_layers = num_layers 35 | self.vocab_size = padded_vocab_size 36 | self.padded_vocab_size = padded_vocab_size 37 | self.hidden_size = hidden_size 38 | self.ffn_hidden_size = ffn_hidden_size 39 | self.kv_channels = kv_channels 40 | self.num_attention_heads = num_attention_heads 41 | self.seq_length = seq_length 42 | self.hidden_dropout = hidden_dropout 43 | self.classifier_dropout = classifier_dropout 44 | self.attention_dropout = attention_dropout 45 | self.layernorm_epsilon = layernorm_epsilon 46 | self.rmsnorm = rmsnorm 47 | self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm 48 | self.post_layer_norm = post_layer_norm 49 | self.add_bias_linear = add_bias_linear 50 | self.add_qkv_bias = add_qkv_bias 51 | self.bias_dropout_fusion = bias_dropout_fusion 52 | self.multi_query_attention = multi_query_attention 53 | self.multi_query_group_num = multi_query_group_num 54 | self.apply_query_key_layer_scaling = apply_query_key_layer_scaling 55 | self.attention_softmax_in_fp32 = attention_softmax_in_fp32 56 | self.fp32_residual_connection = fp32_residual_connection 57 | self.quantization_bit = quantization_bit 58 | self.pre_seq_len = pre_seq_len 59 | self.prefix_projection = prefix_projection 60 | super().__init__(**kwargs) -------------------------------------------------------------------------------- /locales/en/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "EasyUse_Hotkeys_AddGroup": { 3 | "name": "Enable Shift+g to add the selected nodes to a group", 4 | "tooltip": "From v1.2.39, you can use Ctrl+g instead" 5 | }, 6 | "EasyUse_Hotkeys_cleanVRAMUsed": { 7 | "name": "Enable Shift+r to unload model and node cache" 8 | }, 9 | "EasyUse_Hotkeys_toggleNodesMap": { 10 | "name": "Enable Shift+m to toggle nodes map" 11 | }, 12 | "EasyUse_Hotkeys_AlignSelectedNodes": { 13 | "name": "Enable Shift+Up/Down/Left/Right and Shift+Ctrl+Alt+Left/Right to align selected nodes", 14 | "tooltip": "Shift+Up/Down/Left/Right can align selected nodes, Shift+Ctrl+Alt+Left/Right can distribute nodes horizontally/vertically" 15 | }, 16 | "EasyUse_Hotkeys_NormalizeSelectedNodes": { 17 | "name": "Enable Shift+Ctrl+Left/Right to normalize selected nodes", 18 | "tooltip": "Enable Shift+Ctrl+Left to normalize width and Shift+Ctrl+Right to normalize height" 19 | }, 20 | "EasyUse_Hotkeys_NodesTemplate": { 21 | "name": "Enable Alt+1~9 to paste node templates into the workflow" 22 | }, 23 | "EasyUse_Hotkeys_JumpNearestNodes": { 24 | "name": "Enable Up/Down/Left/Right to jump to the nearest node" 25 | }, 26 | "EasyUse_ContextMenu_SubDirectories": { 27 | "name": "Enable automatic nesting of subdirectories in the context menu" 28 | }, 29 | "EasyUse_ContextMenu_ModelsThumbnails": { 30 | "name": "Enable model preview thumbnails" 31 | }, 32 | "EasyUse_ContextMenu_NodesSort": { 33 | "name": "Enable A~Z sorting of new nodes in the context menu" 34 | }, 35 | "EasyUse_ContextMenu_QuickOptions": { 36 | "name": "Use three quick buttons in the context menu", 37 | "options": { 38 | "At the forefront": "At the forefront", 39 | "At the end": "At the end", 40 | "Disable": "Disable" 41 | } 42 | }, 43 | "EasyUse_Nodes_Runtime": { 44 | "name": "Enable node runtime display" 45 | }, 46 | "EasyUse_Nodes_ChainGetSet": { 47 | "name": "Enable chaining of get and set points with the parent node" 48 | }, 49 | "EasyUse_NodesMap_Sorting": { 50 | "name": "Manage nodes group sorting mode", 51 | "tooltip": "Automatically sort by default. If set to manual, groups can be drag and dropped and the order will be saved.", 52 | "options": { 53 | "Auto sorting": "Auto sorting", 54 | "Manual drag&drop sorting": "Manual drag&drop sorting" 55 | } 56 | }, 57 | "EasyUse_NodesMap_DisplayNodeID": { 58 | "name": "Enable node ID display" 59 | }, 60 | "EasyUse_NodesMap_DisplayGroupOnly": { 61 | "name": "Show groups only" 62 | }, 63 | "EasyUse_NodesMap_Enable": { 64 | "name": "Enable Group Map", 65 | "tooltip": "You need to refresh the page to update successfully" 66 | } 67 | } -------------------------------------------------------------------------------- /py/modules/kolors/model_patch.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import Linear 3 | from types import MethodType 4 | import comfy.model_management 5 | import comfy.samplers 6 | from comfy.cldm.cldm import ControlNet 7 | from comfy.controlnet import ControlLora 8 | 9 | def patch_controlnet(model, control_net): 10 | import comfy.controlnet 11 | if isinstance(control_net, ControlLora): 12 | del_keys = [] 13 | for k in control_net.control_weights: 14 | if k.startswith("label_emb.0.0."): 15 | del_keys.append(k) 16 | 17 | for k in del_keys: 18 | control_net.control_weights.pop(k) 19 | 20 | super_pre_run = ControlLora.pre_run 21 | super_copy = ControlLora.copy 22 | 23 | super_forward = ControlNet.forward 24 | 25 | def KolorsControlNet_forward(self, x, hint, timesteps, context, **kwargs): 26 | with torch.cuda.amp.autocast(enabled=True): 27 | context = model.model.diffusion_model.encoder_hid_proj(context) 28 | return super_forward(self, x, hint, timesteps, context, **kwargs) 29 | 30 | def KolorsControlLora_pre_run(self, *args, **kwargs): 31 | result = super_pre_run(self, *args, **kwargs) 32 | 33 | if hasattr(self, "control_model"): 34 | self.control_model.forward = MethodType( 35 | KolorsControlNet_forward, self.control_model) 36 | return result 37 | 38 | control_net.pre_run = MethodType( 39 | KolorsControlLora_pre_run, control_net) 40 | 41 | def KolorsControlLora_copy(self, *args, **kwargs): 42 | c = super_copy(self, *args, **kwargs) 43 | c.pre_run = MethodType( 44 | KolorsControlLora_pre_run, c) 45 | return c 46 | 47 | control_net.copy = MethodType(KolorsControlLora_copy, control_net) 48 | 49 | elif isinstance(control_net, comfy.controlnet.ControlNet): 50 | model_label_emb = model.model.diffusion_model.label_emb 51 | control_net.control_model.label_emb = model_label_emb 52 | control_net.control_model_wrapped.model.label_emb = model_label_emb 53 | super_forward = ControlNet.forward 54 | 55 | def KolorsControlNet_forward(self, x, hint, timesteps, context, **kwargs): 56 | with torch.cuda.amp.autocast(enabled=True): 57 | context = model.model.diffusion_model.encoder_hid_proj(context) 58 | return super_forward(self, x, hint, timesteps, context, **kwargs) 59 | 60 | control_net.control_model.forward = MethodType( 61 | KolorsControlNet_forward, control_net.control_model) 62 | 63 | else: 64 | raise NotImplementedError(f"Type {control_net} not supported for KolorsControlNetPatch") 65 | 66 | return control_net 67 | -------------------------------------------------------------------------------- /py/libs/cache.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | from typing import Optional 3 | 4 | class TaggedCache: 5 | def __init__(self, tag_settings: Optional[dict]=None): 6 | self._tag_settings = tag_settings or {} # tag cache size 7 | self._data = {} 8 | 9 | def __getitem__(self, key): 10 | for tag_data in self._data.values(): 11 | if key in tag_data: 12 | return tag_data[key] 13 | raise KeyError(f'Key `{key}` does not exist') 14 | 15 | def __setitem__(self, key, value: tuple): 16 | # value: (tag: str, (islist: bool, data: *)) 17 | 18 | # if key already exists, pop old value 19 | for tag_data in self._data.values(): 20 | if key in tag_data: 21 | tag_data.pop(key, None) 22 | break 23 | 24 | tag = value[0] 25 | if tag not in self._data: 26 | 27 | try: 28 | from cachetools import LRUCache 29 | 30 | default_size = 20 31 | if 'ckpt' in tag: 32 | default_size = 5 33 | elif tag in ['latent', 'image']: 34 | default_size = 100 35 | 36 | self._data[tag] = LRUCache(maxsize=self._tag_settings.get(tag, default_size)) 37 | 38 | except (ImportError, ModuleNotFoundError): 39 | # TODO: implement a simple lru dict 40 | self._data[tag] = {} 41 | self._data[tag][key] = value 42 | 43 | def __delitem__(self, key): 44 | for tag_data in self._data.values(): 45 | if key in tag_data: 46 | del tag_data[key] 47 | return 48 | raise KeyError(f'Key `{key}` does not exist') 49 | 50 | def __contains__(self, key): 51 | return any(key in tag_data for tag_data in self._data.values()) 52 | 53 | def items(self): 54 | yield from itertools.chain(*map(lambda x :x.items(), self._data.values())) 55 | 56 | def get(self, key, default=None): 57 | """D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.""" 58 | for tag_data in self._data.values(): 59 | if key in tag_data: 60 | return tag_data[key] 61 | return default 62 | 63 | def clear(self): 64 | # clear all cache 65 | self._data = {} 66 | 67 | cache_settings = {} 68 | cache = TaggedCache(cache_settings) 69 | cache_count = {} 70 | 71 | def update_cache(k, tag, v): 72 | cache[k] = (tag, v) 73 | cnt = cache_count.get(k) 74 | if cnt is None: 75 | cnt = 0 76 | cache_count[k] = cnt 77 | else: 78 | cache_count[k] += 1 79 | def remove_cache(key): 80 | global cache 81 | if key == '*': 82 | cache = TaggedCache(cache_settings) 83 | elif key in cache: 84 | del cache[key] 85 | else: 86 | print(f"invalid {key}") -------------------------------------------------------------------------------- /locales/ru/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "EasyUse_Hotkeys_AddGroup": { 3 | "name": "Включить Shift+g для добавления выделенных узлов в группу", 4 | "tooltip": "Начиная с версии v1.2.39, можно использовать Ctrl+g" 5 | }, 6 | "EasyUse_Hotkeys_cleanVRAMUsed": { 7 | "name": "Включить Shift+r для выгрузки модели и кэша узлов" 8 | }, 9 | "EasyUse_Hotkeys_toggleNodesMap": { 10 | "name": "Включить Shift+m для переключения карты узлов" 11 | }, 12 | "EasyUse_Hotkeys_AlignSelectedNodes": { 13 | "name": "Включить Shift+Стрелки для выравнивания выделенных узлов и Shift+Ctrl+Alt+Стрелки для распределения узлов по горизонтали/вертикали", 14 | "tooltip": "Shift+Стрелки выравнивают выделенные узлы, Shift+Ctrl+Alt+Стрелки распределяют узлы по горизонтали/вертикали" 15 | }, 16 | "EasyUse_Hotkeys_NormalizeSelectedNodes": { 17 | "name": "Включить Shift+Ctrl+Стрелки для нормализации выделенных узлов", 18 | "tooltip": "Включить Shift+Ctrl+Лево для нормализации ширины и Shift+Ctrl+Право для нормализации высоты" 19 | }, 20 | "EasyUse_Hotkeys_NodesTemplate": { 21 | "name": "Включить Alt+1~9 для вставки шаблонов узлов в рабочий процесс" 22 | }, 23 | "EasyUse_Hotkeys_JumpNearestNodes": { 24 | "name": "Включить Стрелки для перехода к ближайшему узлу" 25 | }, 26 | "EasyUse_ContextMenu_SubDirectories": { 27 | "name": "Включить автоматическое вложение подкаталогов в контекстном меню" 28 | }, 29 | "EasyUse_ContextMenu_ModelsThumbnails": { 30 | "name": "Включить превью миниатюр моделей" 31 | }, 32 | "EasyUse_ContextMenu_NodesSort": { 33 | "name": "Включить A~Z сортировку новых узлов в контекстном меню" 34 | }, 35 | "EasyUse_ContextMenu_QuickOptions": { 36 | "name": "Использовать три быстрых кнопки в контекстном меню", 37 | "options": { 38 | "At the forefront": "В начале", 39 | "At the end": "В конце", 40 | "Disable": "Отключено" 41 | } 42 | }, 43 | "EasyUse_Nodes_Runtime": { 44 | "name": "Включить отображение времени выполнения узлов" 45 | }, 46 | "EasyUse_Nodes_ChainGetSet": { 47 | "name": "Включить связывание точек получения и установки с родительским узлом" 48 | }, 49 | "EasyUse_NodesMap_Sorting": { 50 | "name": "Управление режимом сортировки групп узлов", 51 | "tooltip": "По умолчанию автоматическая сортировка. При ручном режиме группы можно перемещать методом перетаскивания, и порядок будет сохранён.", 52 | "options": { 53 | "Auto sorting": "Автоматическая сортировка", 54 | "Manual drag&drop sorting": "Ручная сортировка перетаскиванием" 55 | } 56 | }, 57 | "EasyUse_NodesMap_DisplayNodeID": { 58 | "name": "Включить отображение ID узлов" 59 | }, 60 | "EasyUse_NodesMap_DisplayGroupOnly": { 61 | "name": "Показывать только группы" 62 | }, 63 | "EasyUse_NodesMap_Enable": { 64 | "name": "Включить карту групп", 65 | "tooltip": "Необходимо обновить страницу для успешного обновления" 66 | } 67 | } -------------------------------------------------------------------------------- /web_version/v1/css/toast.css: -------------------------------------------------------------------------------- 1 | .easyuse-toast-container{ 2 | position: fixed; 3 | z-index: 99999; 4 | top: 0; 5 | left: 0; 6 | width: 100%; 7 | height: 0; 8 | display: flex; 9 | flex-direction: column; 10 | align-items: center; 11 | justify-content: start; 12 | padding:10px 0; 13 | } 14 | .easyuse-toast-container > div { 15 | position: relative; 16 | height: fit-content; 17 | padding: 4px; 18 | margin-top: -100px; /* re-set by JS */ 19 | opacity: 0; 20 | transition: all 0.33s ease-in-out; 21 | z-index: 3; 22 | } 23 | 24 | .easyuse-toast-container > div:last-child { 25 | z-index: 2; 26 | } 27 | 28 | .easyuse-toast-container > div:not(.-show) { 29 | z-index: 1; 30 | } 31 | 32 | .easyuse-toast-container > div.-show { 33 | opacity: 1; 34 | margin-top: 0px !important; 35 | } 36 | 37 | .easyuse-toast-container > div.-show { 38 | opacity: 1; 39 | transform: translateY(0%); 40 | } 41 | 42 | .easyuse-toast-container > div > div { 43 | position: relative; 44 | background: var(--comfy-menu-bg); 45 | color: var(--input-text); 46 | display: flex; 47 | flex-direction: row; 48 | align-items: center; 49 | justify-content: center; 50 | height: fit-content; 51 | box-shadow: 0 0 10px rgba(0, 0, 0, 0.88); 52 | padding: 9px 12px; 53 | border-radius: 8px; 54 | font-family: Arial, sans-serif; 55 | font-size: 14px; 56 | pointer-events: all; 57 | } 58 | 59 | .easyuse-toast-container > div > div > span { 60 | display: flex; 61 | flex-direction: row; 62 | align-items: center; 63 | justify-content: center; 64 | } 65 | 66 | .easyuse-toast-container > div > div > span svg { 67 | width: 16px; 68 | height: auto; 69 | margin-right: 8px; 70 | } 71 | 72 | .easyuse-toast-container > div > div > span svg[data-icon=info-circle]{ 73 | fill: var(--theme-color-light); 74 | } 75 | .easyuse-toast-container > div > div > span svg[data-icon=check-circle]{ 76 | fill: var(--success-color); 77 | } 78 | .easyuse-toast-container > div > div > span svg[data-icon=close-circle]{ 79 | fill: var(--error-color); 80 | } 81 | .easyuse-toast-container > div > div > span svg[data-icon=exclamation-circle]{ 82 | fill: var(--warning-color); 83 | } 84 | /*rotate animation*/ 85 | @keyframes rotate { 86 | 0% { 87 | transform: rotate(0deg); 88 | } 89 | 100% { 90 | transform: rotate(360deg); 91 | } 92 | } 93 | .easyuse-toast-container > div > div > span svg[data-icon=loading]{ 94 | fill: var(--theme-color); 95 | animation: rotate 1s linear infinite; 96 | } 97 | 98 | .easyuse-toast-container a { 99 | cursor: pointer; 100 | text-decoration: underline; 101 | color: var(--theme-color-light); 102 | margin-left: 4px; 103 | display: inline-block; 104 | line-height: 1; 105 | } 106 | 107 | .easyuse-toast-container a:hover { 108 | color: var(--theme-color-light); 109 | text-decoration: none; 110 | } -------------------------------------------------------------------------------- /locales/fr/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "EasyUse_Hotkeys_AddGroup": { 3 | "name": "Activer Shift+g pour ajouter les nœuds sélectionnés à un groupe", 4 | "tooltip": "Depuis la v1.2.39, vous pouvez utiliser Ctrl+g à la place" 5 | }, 6 | "EasyUse_Hotkeys_cleanVRAMUsed": { 7 | "name": "Activer Shift+r pour décharger le cache du modèle et des nœuds" 8 | }, 9 | "EasyUse_Hotkeys_toggleNodesMap": { 10 | "name": "Activer Shift+m pour basculer la carte des nœuds" 11 | }, 12 | "EasyUse_Hotkeys_AlignSelectedNodes": { 13 | "name": "Activer Shift+Up/Down/Left/Right et Shift+Ctrl+Alt+Left/Right pour aligner les nœuds sélectionnés", 14 | "tooltip": "Shift+Up/Down/Left/Right peut aligner les nœuds sélectionnés, Shift+Ctrl+Alt+Left/Right peut les répartir horizontalement/verticalement" 15 | }, 16 | "EasyUse_Hotkeys_NormalizeSelectedNodes": { 17 | "name": "Activer Shift+Ctrl+Left/Right pour normaliser les nœuds sélectionnés", 18 | "tooltip": "Activer Shift+Ctrl+Left pour normaliser la largeur et Shift+Ctrl+Right pour normaliser la hauteur" 19 | }, 20 | "EasyUse_Hotkeys_NodesTemplate": { 21 | "name": "Activer Alt+1~9 pour coller les modèles de nœuds dans le workflow" 22 | }, 23 | "EasyUse_Hotkeys_JumpNearestNodes": { 24 | "name": "Activer Up/Down/Left/Right pour passer au nœud le plus proche" 25 | }, 26 | "EasyUse_ContextMenu_SubDirectories": { 27 | "name": "Activer l'imbrication automatique des sous-répertoires dans le menu contextuel" 28 | }, 29 | "EasyUse_ContextMenu_ModelsThumbnails": { 30 | "name": "Activer les vignettes d'aperçu du modèle" 31 | }, 32 | "EasyUse_ContextMenu_NodesSort": { 33 | "name": "Activer le tri A~Z des nouveaux nœuds dans le menu contextuel" 34 | }, 35 | "EasyUse_ContextMenu_QuickOptions": { 36 | "name": "Utiliser trois boutons rapides dans le menu contextuel", 37 | "options": { 38 | "At the forefront": "À l'avant-plan", 39 | "At the end": "À la fin", 40 | "Disable": "Désactiver" 41 | } 42 | }, 43 | "EasyUse_Nodes_Runtime": { 44 | "name": "Activer l'affichage du temps d'exécution des nœuds" 45 | }, 46 | "EasyUse_Nodes_ChainGetSet": { 47 | "name": "Activer le chaînage des points get et set avec le nœud parent" 48 | }, 49 | "EasyUse_NodesMap_Sorting": { 50 | "name": "Gérer le mode de tri des groupes de nœuds", 51 | "tooltip": "Tri automatique par défaut. Si défini sur manuel, les groupes peuvent être glissés-déposés et l'ordre sera sauvegardé.", 52 | "options": { 53 | "Auto sorting": "Tri automatique", 54 | "Manual drag&drop sorting": "Tri manuel par glisser-déposer" 55 | } 56 | }, 57 | "EasyUse_NodesMap_DisplayNodeID": { 58 | "name": "Activer l'affichage de l'ID du nœud" 59 | }, 60 | "EasyUse_NodesMap_DisplayGroupOnly": { 61 | "name": "Afficher uniquement les groupes" 62 | }, 63 | "EasyUse_NodesMap_Enable": { 64 | "name": "Activer la carte des groupes", 65 | "tooltip": "Vous devez actualiser la page pour mettre à jour" 66 | } 67 | } -------------------------------------------------------------------------------- /py/modules/human_parsing/simple_extractor_dataset.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | 4 | """ 5 | @Author : Peike Li 6 | @Contact : peike.li@yahoo.com 7 | @File : dataset.py 8 | @Time : 8/30/19 9:12 PM 9 | @Desc : Dataset Definition 10 | @License : This source code is licensed under the license found in the 11 | LICENSE file in the root directory of this source tree. 12 | """ 13 | 14 | import os 15 | 16 | import cv2 17 | import numpy as np 18 | from PIL import Image 19 | from torch.utils import data 20 | from .transforms import get_affine_transform 21 | 22 | 23 | class SimpleFolderDataset(data.Dataset): 24 | def __init__(self, root, input_size=[512, 512], transform=None): 25 | self.root = root 26 | self.input_size = input_size 27 | self.transform = transform 28 | self.aspect_ratio = input_size[1] * 1.0 / input_size[0] 29 | self.input_size = np.asarray(input_size) 30 | self.is_pil_image = False 31 | if isinstance(root, Image.Image): 32 | self.file_list = [root] 33 | self.is_pil_image = True 34 | elif os.path.isfile(root): 35 | self.file_list = [os.path.basename(root)] 36 | self.root = os.path.dirname(root) 37 | else: 38 | self.file_list = os.listdir(self.root) 39 | 40 | def __len__(self): 41 | return len(self.file_list) 42 | 43 | def _box2cs(self, box): 44 | x, y, w, h = box[:4] 45 | return self._xywh2cs(x, y, w, h) 46 | 47 | def _xywh2cs(self, x, y, w, h): 48 | center = np.zeros((2), dtype=np.float32) 49 | center[0] = x + w * 0.5 50 | center[1] = y + h * 0.5 51 | if w > self.aspect_ratio * h: 52 | h = w * 1.0 / self.aspect_ratio 53 | elif w < self.aspect_ratio * h: 54 | w = h * self.aspect_ratio 55 | scale = np.array([w, h], dtype=np.float32) 56 | return center, scale 57 | 58 | def __getitem__(self, index): 59 | if self.is_pil_image: 60 | img = np.asarray(self.file_list[index])[:, :, [2, 1, 0]] 61 | else: 62 | img_name = self.file_list[index] 63 | img_path = os.path.join(self.root, img_name) 64 | img = cv2.imread(img_path, cv2.IMREAD_COLOR) 65 | h, w, _ = img.shape 66 | 67 | # Get person center and scale 68 | person_center, s = self._box2cs([0, 0, w - 1, h - 1]) 69 | r = 0 70 | trans = get_affine_transform(person_center, s, r, self.input_size) 71 | input = cv2.warpAffine( 72 | img, 73 | trans, 74 | (int(self.input_size[1]), int(self.input_size[0])), 75 | flags=cv2.INTER_LINEAR, 76 | borderMode=cv2.BORDER_CONSTANT, 77 | borderValue=(0, 0, 0)) 78 | 79 | input = self.transform(input) 80 | meta = { 81 | 'center': person_center, 82 | 'height': h, 83 | 'width': w, 84 | 'scale': s, 85 | 'rotation': r 86 | } 87 | 88 | return input, meta 89 | -------------------------------------------------------------------------------- /py/modules/dit/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | List of all DiT model types / settings 3 | """ 4 | sampling_settings = { 5 | "beta_schedule" : "sqrt_linear", 6 | "linear_start" : 0.0001, 7 | "linear_end" : 0.02, 8 | "timesteps" : 1000, 9 | } 10 | 11 | dit_conf = { 12 | "XL/2": { # DiT_XL_2 13 | "unet_config": { 14 | "depth" : 28, 15 | "num_heads" : 16, 16 | "patch_size" : 2, 17 | "hidden_size" : 1152, 18 | }, 19 | "sampling_settings" : sampling_settings, 20 | }, 21 | "XL/4": { # DiT_XL_4 22 | "unet_config": { 23 | "depth" : 28, 24 | "num_heads" : 16, 25 | "patch_size" : 4, 26 | "hidden_size" : 1152, 27 | }, 28 | "sampling_settings" : sampling_settings, 29 | }, 30 | "XL/8": { # DiT_XL_8 31 | "unet_config": { 32 | "depth" : 28, 33 | "num_heads" : 16, 34 | "patch_size" : 8, 35 | "hidden_size" : 1152, 36 | }, 37 | "sampling_settings" : sampling_settings, 38 | }, 39 | "L/2": { # DiT_L_2 40 | "unet_config": { 41 | "depth" : 24, 42 | "num_heads" : 16, 43 | "patch_size" : 2, 44 | "hidden_size" : 1024, 45 | }, 46 | "sampling_settings" : sampling_settings, 47 | }, 48 | "L/4": { # DiT_L_4 49 | "unet_config": { 50 | "depth" : 24, 51 | "num_heads" : 16, 52 | "patch_size" : 4, 53 | "hidden_size" : 1024, 54 | }, 55 | "sampling_settings" : sampling_settings, 56 | }, 57 | "L/8": { # DiT_L_8 58 | "unet_config": { 59 | "depth" : 24, 60 | "num_heads" : 16, 61 | "patch_size" : 8, 62 | "hidden_size" : 1024, 63 | }, 64 | "sampling_settings" : sampling_settings, 65 | }, 66 | "B/2": { # DiT_B_2 67 | "unet_config": { 68 | "depth" : 12, 69 | "num_heads" : 12, 70 | "patch_size" : 2, 71 | "hidden_size" : 768, 72 | }, 73 | "sampling_settings" : sampling_settings, 74 | }, 75 | "B/4": { # DiT_B_4 76 | "unet_config": { 77 | "depth" : 12, 78 | "num_heads" : 12, 79 | "patch_size" : 4, 80 | "hidden_size" : 768, 81 | }, 82 | "sampling_settings" : sampling_settings, 83 | }, 84 | "B/8": { # DiT_B_8 85 | "unet_config": { 86 | "depth" : 12, 87 | "num_heads" : 12, 88 | "patch_size" : 8, 89 | "hidden_size" : 768, 90 | }, 91 | "sampling_settings" : sampling_settings, 92 | }, 93 | "S/2": { # DiT_S_2 94 | "unet_config": { 95 | "depth" : 12, 96 | "num_heads" : 6, 97 | "patch_size" : 2, 98 | "hidden_size" : 384, 99 | }, 100 | "sampling_settings" : sampling_settings, 101 | }, 102 | "S/4": { # DiT_S_4 103 | "unet_config": { 104 | "depth" : 12, 105 | "num_heads" : 6, 106 | "patch_size" : 4, 107 | "hidden_size" : 384, 108 | }, 109 | "sampling_settings" : sampling_settings, 110 | }, 111 | "S/8": { # DiT_S_8 112 | "unet_config": { 113 | "depth" : 12, 114 | "num_heads" : 6, 115 | "patch_size" : 8, 116 | "hidden_size" : 384, 117 | }, 118 | "sampling_settings" : sampling_settings, 119 | }, 120 | } -------------------------------------------------------------------------------- /py/libs/conditioning.py: -------------------------------------------------------------------------------- 1 | from .utils import find_wildcards_seed, find_nearest_steps, is_linked_styles_selector 2 | from .log import log_node_warn 3 | from .translate import zh_to_en, has_chinese 4 | from .wildcards import process_with_loras 5 | from .adv_encode import advanced_encode 6 | 7 | from nodes import ConditioningConcat, ConditioningCombine, ConditioningAverage, ConditioningSetTimestepRange, CLIPTextEncode 8 | 9 | def prompt_to_cond(type, model, clip, clip_skip, lora_stack, text, prompt_token_normalization, prompt_weight_interpretation, a1111_prompt_style ,my_unique_id, prompt, easyCache, can_load_lora=True, steps=None, model_type=None): 10 | styles_selector = is_linked_styles_selector(prompt, my_unique_id, type) 11 | title = "Positive encoding" if type == 'positive' else "Negative encoding" 12 | 13 | # Translate cn to en 14 | if model_type not in ['hydit'] and text is not None and has_chinese(text): 15 | text = zh_to_en([text])[0] 16 | 17 | if model_type in ['hydit', 'flux', 'mochi']: 18 | log_node_warn(title + "...") 19 | embeddings_final, = CLIPTextEncode().encode(clip, text) if text is not None else (None,) 20 | 21 | return (embeddings_final, "", model, clip) 22 | 23 | log_node_warn(title + "...") 24 | 25 | positive_seed = find_wildcards_seed(my_unique_id, text, prompt) 26 | model, clip, text, cond_decode, show_prompt, pipe_lora_stack = process_with_loras( 27 | text, model, clip, type, positive_seed, can_load_lora, lora_stack, easyCache) 28 | wildcard_prompt = cond_decode if show_prompt or styles_selector else "" 29 | 30 | clipped = clip.clone() 31 | # 当clip模型不存在t5xxl时,可执行跳过层 32 | if not hasattr(clip.cond_stage_model, 't5xxl'): 33 | if clip_skip != 0: 34 | clipped.clip_layer(clip_skip) 35 | 36 | steps = steps if steps is not None else find_nearest_steps(my_unique_id, prompt) 37 | return (advanced_encode(clipped, text, prompt_token_normalization, 38 | prompt_weight_interpretation, w_max=1.0, 39 | apply_to_pooled='enable', 40 | a1111_prompt_style=a1111_prompt_style, steps=steps) if text is not None else None, wildcard_prompt, model, clipped) 41 | 42 | def set_cond(old_cond, new_cond, mode, average_strength, old_cond_start, old_cond_end, new_cond_start, new_cond_end): 43 | if not old_cond: 44 | return new_cond 45 | else: 46 | if mode == "replace": 47 | return new_cond 48 | elif mode == "concat": 49 | return ConditioningConcat().concat(new_cond, old_cond)[0] 50 | elif mode == "combine": 51 | return ConditioningCombine().combine(old_cond, new_cond)[0] 52 | elif mode == 'average': 53 | return ConditioningAverage().addWeighted(new_cond, old_cond, average_strength)[0] 54 | elif mode == 'timestep': 55 | cond_1 = ConditioningSetTimestepRange().set_range(old_cond, old_cond_start, old_cond_end)[0] 56 | cond_2 = ConditioningSetTimestepRange().set_range(new_cond, new_cond_start, new_cond_end)[0] 57 | return ConditioningCombine().combine(cond_1, cond_2)[0] -------------------------------------------------------------------------------- /prestartup_script.py: -------------------------------------------------------------------------------- 1 | import folder_paths 2 | import os 3 | def add_folder_path_and_extensions(folder_name, full_folder_paths, extensions): 4 | for full_folder_path in full_folder_paths: 5 | folder_paths.add_model_folder_path(folder_name, full_folder_path) 6 | if folder_name in folder_paths.folder_names_and_paths: 7 | current_paths, current_extensions = folder_paths.folder_names_and_paths[folder_name] 8 | updated_extensions = current_extensions | extensions 9 | folder_paths.folder_names_and_paths[folder_name] = (current_paths, updated_extensions) 10 | else: 11 | folder_paths.folder_names_and_paths[folder_name] = (full_folder_paths, extensions) 12 | 13 | image_suffixs = set([".jpg", ".jpeg", ".png", ".gif", ".webp", ".bmp", ".tiff", ".svg", ".ico", ".apng", ".tif", ".hdr", ".exr"]) 14 | 15 | model_path = folder_paths.models_dir 16 | add_folder_path_and_extensions("ultralytics_bbox", [os.path.join(model_path, "ultralytics", "bbox")], folder_paths.supported_pt_extensions) 17 | add_folder_path_and_extensions("ultralytics_segm", [os.path.join(model_path, "ultralytics", "segm")], folder_paths.supported_pt_extensions) 18 | add_folder_path_and_extensions("ultralytics", [os.path.join(model_path, "ultralytics")], folder_paths.supported_pt_extensions) 19 | add_folder_path_and_extensions("mmdets_bbox", [os.path.join(model_path, "mmdets", "bbox")], folder_paths.supported_pt_extensions) 20 | add_folder_path_and_extensions("mmdets_segm", [os.path.join(model_path, "mmdets", "segm")], folder_paths.supported_pt_extensions) 21 | add_folder_path_and_extensions("mmdets", [os.path.join(model_path, "mmdets")], folder_paths.supported_pt_extensions) 22 | add_folder_path_and_extensions("sams", [os.path.join(model_path, "sams")], folder_paths.supported_pt_extensions) 23 | add_folder_path_and_extensions("onnx", [os.path.join(model_path, "onnx")], {'.onnx'}) 24 | add_folder_path_and_extensions("instantid", [os.path.join(model_path, "instantid")], folder_paths.supported_pt_extensions) 25 | add_folder_path_and_extensions("pulid", [os.path.join(model_path, "pulid")], folder_paths.supported_pt_extensions) 26 | add_folder_path_and_extensions("layer_model", [os.path.join(model_path, "layer_model")], folder_paths.supported_pt_extensions) 27 | add_folder_path_and_extensions("rembg", [os.path.join(model_path, "rembg")], folder_paths.supported_pt_extensions) 28 | add_folder_path_and_extensions("ipadapter", [os.path.join(model_path, "ipadapter")], folder_paths.supported_pt_extensions) 29 | add_folder_path_and_extensions("dynamicrafter_models", [os.path.join(model_path, "dynamicrafter_models")], folder_paths.supported_pt_extensions) 30 | add_folder_path_and_extensions("mediapipe", [os.path.join(model_path, "mediapipe")], set(['.tflite','.pth'])) 31 | add_folder_path_and_extensions("inpaint", [os.path.join(model_path, "inpaint")], folder_paths.supported_pt_extensions) 32 | add_folder_path_and_extensions("prompt_generator", [os.path.join(model_path, "prompt_generator")], folder_paths.supported_pt_extensions) 33 | add_folder_path_and_extensions("t5", [os.path.join(model_path, "t5")], folder_paths.supported_pt_extensions) 34 | add_folder_path_and_extensions("llm", [os.path.join(model_path, "LLM")], folder_paths.supported_pt_extensions) -------------------------------------------------------------------------------- /web_version/v1/css/easy.css: -------------------------------------------------------------------------------- 1 | 2 | .pysssss-workflow-popup{ 3 | min-width:220px!important; 4 | /*right:0px!important;*/ 5 | /*left:auto!important;*/ 6 | } 7 | body{ 8 | font-family: var(--font-family)!important; 9 | -webkit-font-smoothing: antialiased; 10 | -moz-osx-font-smoothing: grayscale; 11 | } 12 | textarea{ 13 | font-family: var(--font-family)!important; 14 | } 15 | 16 | .comfy-multiline-input{ 17 | background-color: transparent; 18 | border:1px solid var(--border-color); 19 | border-radius:8px; 20 | padding: 8px; 21 | font-size: 12px; 22 | } 23 | .comfy-modal { 24 | border:1px solid var(--border-color); 25 | box-shadow:none; 26 | backdrop-filter: blur(8px) brightness(120%); 27 | } 28 | .comfy-menu{ 29 | border-radius:16px; 30 | box-shadow:0 0 1px var(--descrip-text); 31 | backdrop-filter: blur(8px) brightness(120%); 32 | } 33 | .comfy-menu button,.comfy-modal button { 34 | font-size: 14px; 35 | padding:4px 0; 36 | margin-bottom:4px; 37 | } 38 | .comfy-menu button.comfy-settings-btn{ 39 | font-size: 12px; 40 | } 41 | .comfy-menu-btns { 42 | margin-bottom: 4px; 43 | } 44 | .comfy-menu-btns button,.comfy-list-actions button{ 45 | font-size: 10px; 46 | } 47 | .comfy-menu > button, 48 | .comfy-menu-btns button, 49 | .comfy-menu .comfy-list button, 50 | .comfy-modal button { 51 | border-width:1px; 52 | } 53 | .comfy-modal-content{ 54 | width: 100%; 55 | } 56 | 57 | 58 | dialog{ 59 | border:1px solid var(--border-color); 60 | background:transparent; 61 | backdrop-filter: blur(8px) brightness(120%); 62 | box-shadow:none; 63 | } 64 | .cm-title{ 65 | background-color:transparent!important; 66 | } 67 | .cm-notice-board{ 68 | border-radius:10px!important; 69 | border:1px solid var(--border-color)!important; 70 | } 71 | .cm-menu-container{ 72 | margin-bottom:50px!important; 73 | } 74 | hr{ 75 | border:1px solid var(--border-color); 76 | } 77 | #comfy-dev-save-api-button{ 78 | justify-content: center; 79 | } 80 | #shareButton{ 81 | background:linear-gradient(to left,var(--theme-color),var(--theme-color-light))!important; 82 | color:white!important; 83 | } 84 | #queue-button{ 85 | position:relative; 86 | overflow:hidden; 87 | min-height:30px; 88 | z-index:1; 89 | } 90 | 91 | #queue-button:after{ 92 | clear: both; 93 | content:attr(data-attr); 94 | background:green; 95 | color:#FFF; 96 | width:var(--process-bar-width); 97 | height:100%; 98 | position:absolute; 99 | top:0; 100 | left:0; 101 | z-index:0; 102 | text-align:center; 103 | display:flex; 104 | justify-content:center; 105 | align-items:center; 106 | } 107 | 108 | .litegraph .litemenu-entry.has_submenu { 109 | border-right: 2px solid var(--theme-color); 110 | } 111 | ::-webkit-scrollbar { 112 | width: 0em; 113 | } 114 | ::-webkit-scrollbar-track { 115 | background-color: transparent; 116 | } 117 | ::-webkit-scrollbar-thumb { 118 | background-color: transparent; 119 | border-radius: 2px; 120 | } 121 | ::-webkit-scrollbar-thumb:hover { 122 | background-color: transparent; 123 | } 124 | 125 | [data-theme="dark"] .workspace_manager .chakra-card{ 126 | background-color:var(--comfy-menu-bg)!important; 127 | } 128 | .workspace_manager .chakra-card{ 129 | width: 400px; 130 | } -------------------------------------------------------------------------------- /web_version/v1/css/selector.css: -------------------------------------------------------------------------------- 1 | .easyuse-prompt-styles{ 2 | overflow: auto; 3 | } 4 | .easyuse-prompt-styles .tools{ 5 | display:flex; 6 | justify-content:space-between; 7 | height:30px; 8 | padding-bottom:10px; 9 | border-bottom:2px solid var(--border-color); 10 | } 11 | .easyuse-prompt-styles .tools button.delete{ 12 | height:30px; 13 | border-radius: 8px; 14 | border: 2px solid var(--border-color); 15 | font-size:11px; 16 | background:var(--comfy-input-bg); 17 | color:var(--error-text); 18 | box-shadow:none; 19 | cursor:pointer; 20 | } 21 | .easyuse-prompt-styles .tools button.delete:hover{ 22 | filter: brightness(1.2); 23 | } 24 | .easyuse-prompt-styles .tools textarea.search{ 25 | flex:1; 26 | margin-left:10px; 27 | height:20px; 28 | line-height:20px; 29 | border-radius: 8px; 30 | border: 2px solid var(--border-color); 31 | font-size:11px; 32 | background:var(--comfy-input-bg); 33 | color:var(--input-text); 34 | box-shadow:none; 35 | padding:4px 10px; 36 | outline: none; 37 | resize: none; 38 | appearance:none; 39 | } 40 | .easyuse-prompt-styles-list{ 41 | list-style: none; 42 | padding: 0; 43 | margin: 0; 44 | min-height: 150px; 45 | height: calc(100% - 40px); 46 | overflow: auto; 47 | /*display: flex;*/ 48 | /*flex-wrap: wrap;*/ 49 | } 50 | .easyuse-prompt-styles-list.no-top{ 51 | height: auto; 52 | } 53 | 54 | .easyuse-prompt-styles-tag{ 55 | display: inline-block; 56 | vertical-align: middle; 57 | margin-top: 8px; 58 | margin-right: 8px; 59 | padding:4px; 60 | color: var(--input-text); 61 | background-color: var(--comfy-input-bg); 62 | border-radius: 8px; 63 | border: 2px solid var(--border-color); 64 | font-size:11px; 65 | cursor:pointer; 66 | } 67 | .easyuse-prompt-styles-tag.hide{ 68 | display:none; 69 | } 70 | .easyuse-prompt-styles-tag:hover{ 71 | filter: brightness(1.2); 72 | } 73 | .easyuse-prompt-styles-tag input{ 74 | --ring-color: transparent; 75 | position: relative; 76 | box-shadow: none; 77 | border: 2px solid var(--border-color); 78 | border-radius: 2px; 79 | background: linear-gradient(135deg, var(--comfy-menu-bg) 0%, var(--comfy-input-bg) 60%); 80 | } 81 | .easyuse-prompt-styles-tag input[type=checkbox]:checked{ 82 | border: 1px solid var(--theme-color-light); 83 | background-color: var(--theme-color-light); 84 | background-image: url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e"); 85 | } 86 | .easyuse-prompt-styles-tag input[type=checkbox]{ 87 | color-adjust: exact; 88 | display: inline-block; 89 | flex-shrink: 0; 90 | vertical-align: middle; 91 | appearance: none; 92 | border: 2px solid var(--border-color); 93 | background-origin: border-box; 94 | padding: 0; 95 | width: 1rem; 96 | height: 1rem; 97 | border-radius:4px; 98 | color:var(--theme-color-light); 99 | user-select: none; 100 | } 101 | .easyuse-prompt-styles-tag span{ 102 | margin:0 4px; 103 | vertical-align: middle; 104 | } 105 | #show_image_id{ 106 | width:128px; 107 | height:128px; 108 | } -------------------------------------------------------------------------------- /web_version/v1/js/image_chooser/preview.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../../../scripts/app.js"; 2 | 3 | const kSampler = ['easy kSampler', 'easy kSamplerTiled', 'easy fullkSampler'] 4 | 5 | function display_preview_images(event) { 6 | const node = app.graph._nodes_by_id[event.detail.id]; 7 | if (node) { 8 | node.selected = new Set(); 9 | node.anti_selected = new Set(); 10 | const image = showImages(node, event.detail.urls); 11 | return {node,image,isKSampler:kSampler.includes(node.type)} 12 | } else { 13 | console.log(`Image Chooser Preview - failed to find ${event.detail.id}`) 14 | } 15 | } 16 | 17 | function showImages(node, urls) { 18 | node.imgs = []; 19 | urls.forEach((u)=> { 20 | const img = new Image(); 21 | node.imgs.push(img); 22 | img.onload = () => { app.graph.setDirtyCanvas(true); }; 23 | img.src = `/view?filename=${encodeURIComponent(u.filename)}&type=temp&subfolder=${app.getPreviewFormatParam()}` 24 | }) 25 | node.setSizeForImage?.(); 26 | return node.imgs 27 | } 28 | 29 | function drawRect(node, s, ctx) { 30 | const padding = 1; 31 | var rect; 32 | if (node.imageRects) { 33 | rect = node.imageRects[s]; 34 | } else { 35 | const y = node.imagey; 36 | rect = [padding,y+padding,node.size[0]-2*padding,node.size[1]-y-2*padding]; 37 | } 38 | ctx.strokeRect(rect[0]+padding, rect[1]+padding, rect[2]-padding*2, rect[3]-padding*2); 39 | } 40 | 41 | function additionalDrawBackground(node, ctx) { 42 | if (!node.imgs) return; 43 | if (node.imageRects) { 44 | for (let i = 0; i < node.imgs.length; i++) { 45 | // delete underlying image 46 | ctx.fillStyle = "#000"; 47 | ctx.fillRect(...node.imageRects[i]) 48 | // draw the new one 49 | const img = node.imgs[i]; 50 | const cellWidth = node.imageRects[i][2]; 51 | const cellHeight = node.imageRects[i][3]; 52 | 53 | let wratio = cellWidth/img.width; 54 | let hratio = cellHeight/img.height; 55 | var ratio = Math.min(wratio, hratio); 56 | 57 | let imgHeight = ratio * img.height; 58 | let imgWidth = ratio * img.width; 59 | 60 | const imgX = node.imageRects[i][0] + (cellWidth - imgWidth)/2; 61 | const imgY = node.imageRects[i][1] + (cellHeight - imgHeight)/2; 62 | const cell_padding = 2; 63 | ctx.drawImage(img, imgX+cell_padding, imgY+cell_padding, imgWidth-cell_padding*2, imgHeight-cell_padding*2); 64 | 65 | } 66 | } 67 | ctx.lineWidth = 2; 68 | ctx.strokeStyle = "green"; 69 | node?.selected?.forEach((s) => { drawRect(node,s, ctx) }) 70 | ctx.strokeStyle = "#F88"; 71 | node?.anti_selected?.forEach((s) => { drawRect(node,s, ctx) }) 72 | } 73 | 74 | function click_is_in_image(node, pos) { 75 | if (node.imgs?.length>1) { 76 | for (var i = 0; i 0 && dx < node.imageRects[i][2] && 80 | dy > 0 && dy < node.imageRects[i][3] ) { 81 | return i; 82 | } 83 | } 84 | } else if (node.imgs?.length==1) { 85 | if (pos[1]>node.imagey) return 0; 86 | } 87 | return -1; 88 | } 89 | 90 | export { display_preview_images, additionalDrawBackground, click_is_in_image } -------------------------------------------------------------------------------- /web_version/v1/js/bookmark.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../../scripts/app.js"; 2 | 3 | 4 | app.registerExtension({ 5 | name: "easy bookmark", 6 | registerCustomNodes() { 7 | class Bookmark { 8 | type = 'easy bookmark' 9 | title = "🔖"; 10 | 11 | slot_start_y = -20; 12 | 13 | ___collapsed_width = 0; 14 | 15 | get _collapsed_width() { 16 | return this.___collapsed_width; 17 | } 18 | 19 | set _collapsed_width(width){ 20 | const canvas = app.canvas ; 21 | const ctx = canvas.canvas.getContext('2d'); 22 | if(ctx){ 23 | const oldFont = ctx.font; 24 | ctx.font = canvas.title_text_font; 25 | this.___collapsed_width = 40 + ctx.measureText(this.title).width; 26 | ctx.font = oldFont; 27 | } 28 | } 29 | 30 | isVirtualNode = true; 31 | serialize_widgets = true; 32 | keypressBound = null; 33 | 34 | constructor() { 35 | 36 | this.addWidget('text', 'shortcut_key', '1', (value) => { 37 | value = value.trim()[0] || '1'; 38 | if(value !== ''){ 39 | this.title = "🔖 " + value; 40 | } 41 | },{ 42 | y: 8, 43 | }); 44 | this.addWidget('number', 'zoom', 1, (value) => {}, { 45 | y: 8 + LiteGraph.NODE_WIDGET_HEIGHT + 4, 46 | max: 2, 47 | min: 0.5, 48 | precision: 2, 49 | }); 50 | this.keypressBound = this.onKeypress.bind(this); 51 | } 52 | 53 | onAdded(){ 54 | setTimeout(_=>{ 55 | const value = this.widgets[0].value 56 | if(value){ 57 | this.title = "🔖 " + value; 58 | } 59 | },1) 60 | window.addEventListener("keydown", this.keypressBound); 61 | } 62 | 63 | onRemoved() { 64 | window.removeEventListener("keydown", this.keypressBound); 65 | } 66 | 67 | onKeypress(event){ 68 | const target = event.target; 69 | if (['input','textarea'].includes(target.localName)) { 70 | return; 71 | } 72 | if (this.widgets[0] && event.key.toLocaleLowerCase() === this.widgets[0].value.toLocaleLowerCase()) { 73 | this.canvasToBookmark(); 74 | } 75 | } 76 | 77 | canvasToBookmark() { 78 | const canvas = app.canvas; 79 | // ComfyUI seemed to break us again, but couldn't repro. No reason to not check, I guess. 80 | // https://github.com/rgthree/rgthree-comfy/issues/71 81 | if (canvas?.ds?.offset) { 82 | canvas.ds.offset[0] = -this.pos[0] + 16; 83 | canvas.ds.offset[1] = -this.pos[1] + 40; 84 | } 85 | if (canvas?.ds?.scale != null) { 86 | canvas.ds.scale = Number(this.widgets[1].value || 1); 87 | } 88 | canvas.setDirty(true, true); 89 | } 90 | } 91 | 92 | LiteGraph.registerNodeType( 93 | "easy bookmark", 94 | Object.assign(Bookmark,{ 95 | title: "Bookmark 🔖", 96 | }) 97 | ); 98 | 99 | Bookmark.category = "EasyUse/Util" 100 | } 101 | }) -------------------------------------------------------------------------------- /web_version/v1/js/common/i18n.js: -------------------------------------------------------------------------------- 1 | import {getLocale} from './utils.js' 2 | const locale = getLocale() 3 | 4 | const zhCN = { 5 | "Workflow created by": "工作流创建者", 6 | "Watch more video content": "观看更多视频内容", 7 | "Workflow Guide":"工作流指南", 8 | // ExtraMenu 9 | "💎 View Checkpoint Info...": "💎 查看 Checkpoint 信息...", 10 | "💎 View Lora Info...": "💎 查看 Lora 信息...", 11 | "🔃 Reload Node": "🔃 刷新节点", 12 | // ModelInfo 13 | "Updated At:": "最近更新:", 14 | "Created At:": "首次发布:", 15 | "✏️ Edit": "✏️ 编辑", 16 | "💾 Save": "💾 保存", 17 | "No notes": "当前还没有备注内容", 18 | "Saving Notes...": "正在保存备注...", 19 | "Type your notes here":"在这里输入备注内容", 20 | "ModelName":"模型名称", 21 | "Models Required":"所需模型", 22 | "Download Model": "下载模型", 23 | "Source Url": "模型源地址", 24 | "Notes": "备注", 25 | "Type": "类型", 26 | "Trained Words": "训练词", 27 | "BaseModel": "基础算法", 28 | "Details": "详情", 29 | "Description": "描述", 30 | "Download": "下载量", 31 | "Source": "来源", 32 | "Saving Preview...": "正在保存预览图...", 33 | "Saving Succeed":"保存成功", 34 | "Clean SuccessFully":"清理成功", 35 | "Clean Failed": "清理失败", 36 | "Saving Failed":"保存失败", 37 | "No COMBO link": "沒有找到COMBO连接", 38 | "Reboot ComfyUI":"重启ComfyUI", 39 | "Are you sure you'd like to reboot the server?": "是否要重启ComfyUI?", 40 | // GroupMap 41 | "Groups Map": "管理组", 42 | "Cleanup Of GPU Usage": "清理GPU占用", 43 | "Please stop all running tasks before cleaning GPU": "请在清理GPU之前停止所有运行中的任务", 44 | "Always": "启用中", 45 | "Bypass": "已忽略", 46 | "Never": "已停用", 47 | "Auto Sorting": "自动排序", 48 | "Toggle `Show/Hide` can set mode of group, LongPress can set group nodes to never": "点击`启用中/已忽略`可设置组模式, 长按可停用该组节点", 49 | // Quick 50 | "Enable ALT+1~9 to paste nodes from nodes template (ComfyUI-Easy-Use)": "启用ALT1~9从节点模板粘贴到工作流 (ComfyUI-Easy-Use)", 51 | "Enable process bar in queue button (ComfyUI-Easy-Use)": "启用提示词队列进度显示条 (ComfyUI-Easy-Use)", 52 | "Enable ContextMenu Auto Nest Subdirectories (ComfyUI-Easy-Use)": "启用上下文菜单自动嵌套子目录 (ComfyUI-Easy-Use)", 53 | "Enable tool bar fixed on the left-bottom (ComfyUI-Easy-Use)": "启用工具栏固定在左下角 (ComfyUI-Easy-Use)", 54 | "Too many thumbnails, have closed the display": "模型缩略图太多啦,为您关闭了显示", 55 | // selector 56 | "Empty All": "清空所有", 57 | "🔎 Type here to search styles ...": "🔎 在此处输入以搜索样式 ...", 58 | // account 59 | "Loading UserInfo...": "正在获取用户信息...", 60 | "Please set the APIKEY first": "请先设置APIKEY", 61 | "Setting APIKEY": "设置APIKEY", 62 | "Save Account Info": "保存账号信息", 63 | "Choose": "选择", 64 | "Delete": "删除", 65 | "Edit": "编辑", 66 | "At least one account is required": "删除失败: 至少需要一个账户", 67 | "APIKEY is not Empty": "APIKEY 不能为空", 68 | "Add Account": "添加账号", 69 | "Getting Your APIKEY": "获取您的APIKEY", 70 | // choosers 71 | "Choose Selected Images": "选择选中的图片", 72 | "Choose images to continue": "选择图片以继续", 73 | // seg 74 | "Background": "背景", 75 | "Hat": "帽子", 76 | "Hair": "头发", 77 | "Body": "身体", 78 | "Face": "脸部", 79 | "Clothes": "衣服", 80 | "Others": "其他", 81 | "Glove": "手套", 82 | "Sunglasses": "太阳镜", 83 | "Upper-clothes": "上衣", 84 | "Dress": "连衣裙", 85 | "Coat": "外套", 86 | "Socks": "袜子", 87 | "Pants": "裤子", 88 | "Jumpsuits": "连体衣", 89 | "Scarf": "围巾", 90 | "Skirt": "裙子", 91 | "Left-arm": "左臂", 92 | "Right-arm": "右臂", 93 | "Left-leg": "左腿", 94 | "Right-leg": "右腿", 95 | "Left-shoe": "左鞋", 96 | "Right-shoe": "右鞋", 97 | } 98 | export const $t = (key) => { 99 | const cn = zhCN[key] 100 | return locale === 'zh-CN' && cn ? cn : key 101 | } -------------------------------------------------------------------------------- /py/nodes/seed.py: -------------------------------------------------------------------------------- 1 | from ..config import MAX_SEED_NUM 2 | import hashlib 3 | import random 4 | 5 | class easySeed: 6 | @classmethod 7 | def INPUT_TYPES(s): 8 | return { 9 | "required": { 10 | "seed": ("INT", {"default": 0, "min": 0, "max": MAX_SEED_NUM}), 11 | }, 12 | "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}, 13 | } 14 | 15 | RETURN_TYPES = ("INT",) 16 | RETURN_NAMES = ("seed",) 17 | FUNCTION = "doit" 18 | 19 | CATEGORY = "EasyUse/Seed" 20 | 21 | def doit(self, seed=0, prompt=None, extra_pnginfo=None, my_unique_id=None): 22 | return seed, 23 | 24 | class seedList: 25 | @classmethod 26 | def INPUT_TYPES(s): 27 | return { 28 | "required": { 29 | "min_num": ("INT", {"default": 0, "min": 0, "max": MAX_SEED_NUM}), 30 | "max_num": ("INT", {"default": MAX_SEED_NUM, "max": MAX_SEED_NUM, "min": 0 }), 31 | "method": (["random", "increment", "decrement"], {"default": "random"}), 32 | "total": ("INT", {"default": 1, "min": 1, "max": 100000}), 33 | "seed": ("INT", {"default": 0, "min": 0, "max": MAX_SEED_NUM,}), 34 | }, 35 | "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID"}, 36 | } 37 | 38 | RETURN_TYPES = ("INT", "INT") 39 | RETURN_NAMES = ("seed", "total") 40 | FUNCTION = "doit" 41 | DESCRIPTION = "Random number seed that can be used in a for loop, by connecting index and easy indexAny node to realize different seed values in the loop." 42 | 43 | CATEGORY = "EasyUse/Seed" 44 | 45 | def doit(self, min_num, max_num, method, total, seed=0, prompt=None, extra_pnginfo=None, my_unique_id=None): 46 | random.seed(seed) 47 | 48 | seed_list = [] 49 | if min_num > max_num: 50 | min_num, max_num = max_num, min_num 51 | for i in range(total): 52 | if method == 'random': 53 | s = random.randint(min_num, max_num) 54 | elif method == 'increment': 55 | s = min_num + i 56 | if s > max_num: 57 | s = max_num 58 | elif method == 'decrement': 59 | s = max_num - i 60 | if s < min_num: 61 | s = min_num 62 | seed_list.append(s) 63 | return seed_list, total 64 | 65 | @classmethod 66 | def IS_CHANGED(s, seed, **kwargs): 67 | m = hashlib.sha256() 68 | m.update(seed) 69 | return m.digest().hex() 70 | 71 | # 全局随机种 72 | class globalSeed: 73 | @classmethod 74 | def INPUT_TYPES(s): 75 | return { 76 | "required": { 77 | "value": ("INT", {"default": 0, "min": 0, "max": MAX_SEED_NUM}), 78 | "mode": ("BOOLEAN", {"default": True, "label_on": "control_before_generate", "label_off": "control_after_generate"}), 79 | "action": (["fixed", "increment", "decrement", "randomize", 80 | "increment for each node", "decrement for each node", "randomize for each node"], ), 81 | "last_seed": ("STRING", {"default": ""}), 82 | } 83 | } 84 | 85 | RETURN_TYPES = () 86 | FUNCTION = "doit" 87 | 88 | CATEGORY = "EasyUse/Seed" 89 | 90 | OUTPUT_NODE = True 91 | 92 | def doit(self, **kwargs): 93 | return {} 94 | 95 | 96 | NODE_CLASS_MAPPINGS = { 97 | "easy seed": easySeed, 98 | "easy seedList": seedList, 99 | "easy globalSeed": globalSeed, 100 | } 101 | 102 | NODE_DISPLAY_NAME_MAPPINGS = { 103 | "easy seed": "EasySeed", 104 | "easy seedList": "EasySeedList", 105 | "easy globalSeed": "EasyGlobalSeed", 106 | } -------------------------------------------------------------------------------- /py/modules/ipadapter/attention_processor.py: -------------------------------------------------------------------------------- 1 | import numbers 2 | from typing import Dict, Optional, Tuple 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | from einops import rearrange 7 | 8 | class RMSNorm(nn.Module): 9 | def __init__(self, dim, eps: float, elementwise_affine: bool = True): 10 | super().__init__() 11 | 12 | self.eps = eps 13 | 14 | if isinstance(dim, numbers.Integral): 15 | dim = (dim,) 16 | 17 | self.dim = torch.Size(dim) 18 | 19 | if elementwise_affine: 20 | self.weight = nn.Parameter(torch.ones(dim)) 21 | else: 22 | self.weight = None 23 | 24 | def forward(self, hidden_states): 25 | input_dtype = hidden_states.dtype 26 | variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) 27 | hidden_states = hidden_states * torch.rsqrt(variance + self.eps) 28 | 29 | if self.weight is not None: 30 | # convert into half-precision if necessary 31 | if self.weight.dtype in [torch.float16, torch.bfloat16]: 32 | hidden_states = hidden_states.to(self.weight.dtype) 33 | hidden_states = hidden_states * self.weight 34 | else: 35 | hidden_states = hidden_states.to(input_dtype) 36 | 37 | return hidden_states 38 | 39 | class IPAFluxAttnProcessor2_0(nn.Module): 40 | """Attention processor used typically in processing the SD3-like self-attention projections.""" 41 | 42 | def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4, timestep_range=None): 43 | super().__init__() 44 | 45 | self.hidden_size = hidden_size # 3072 46 | self.cross_attention_dim = cross_attention_dim # 4096 47 | self.scale = scale 48 | self.num_tokens = num_tokens 49 | 50 | self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) 51 | self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) 52 | 53 | self.norm_added_k = RMSNorm(128, eps=1e-5, elementwise_affine=False) 54 | self.norm_added_v = RMSNorm(128, eps=1e-5, elementwise_affine=False) 55 | self.timestep_range = timestep_range 56 | 57 | def __call__( 58 | self, 59 | num_heads, 60 | query, 61 | image_emb: torch.FloatTensor, 62 | t: torch.FloatTensor 63 | ) -> torch.FloatTensor: 64 | # only apply IPA if timestep is within range 65 | if self.timestep_range is not None: 66 | if t[0] > self.timestep_range[0] or t[0] < self.timestep_range[1]: 67 | return None 68 | # `ip-adapter` projections 69 | ip_hidden_states = image_emb 70 | ip_hidden_states_key_proj = self.to_k_ip(ip_hidden_states) 71 | ip_hidden_states_value_proj = self.to_v_ip(ip_hidden_states) 72 | 73 | ip_hidden_states_key_proj = rearrange(ip_hidden_states_key_proj, 'B L (H D) -> B H L D', H=num_heads) 74 | ip_hidden_states_value_proj = rearrange(ip_hidden_states_value_proj, 'B L (H D) -> B H L D', H=num_heads) 75 | 76 | ip_hidden_states_key_proj = self.norm_added_k(ip_hidden_states_key_proj) 77 | ip_hidden_states_value_proj = self.norm_added_v(ip_hidden_states_value_proj) 78 | 79 | ip_hidden_states = F.scaled_dot_product_attention(query.to(image_emb.device).to(image_emb.dtype), 80 | ip_hidden_states_key_proj, 81 | ip_hidden_states_value_proj, 82 | dropout_p=0.0, is_causal=False) 83 | 84 | ip_hidden_states = rearrange(ip_hidden_states, "B H L D -> B L (H D)", H=num_heads) 85 | ip_hidden_states = ip_hidden_states.to(query.dtype).to(query.device) 86 | 87 | return self.scale * ip_hidden_states -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.3.4" 2 | 3 | import yaml 4 | import json 5 | import os 6 | import folder_paths 7 | import importlib 8 | 9 | cwd_path = os.path.dirname(os.path.realpath(__file__)) 10 | comfy_path = folder_paths.base_path 11 | 12 | NODE_CLASS_MAPPINGS = {} 13 | NODE_DISPLAY_NAME_MAPPINGS = {} 14 | 15 | importlib.import_module('.py.routes', __name__) 16 | importlib.import_module('.py.server', __name__) 17 | nodes_list = ["util", "seed", "prompt", "loaders", "adapter", "inpaint", "preSampling", "samplers", "fix", "pipe", "xyplot", "image", "logic", "api", "deprecated"] 18 | for module_name in nodes_list: 19 | imported_module = importlib.import_module(".py.nodes.{}".format(module_name), __name__) 20 | NODE_CLASS_MAPPINGS = {**NODE_CLASS_MAPPINGS, **imported_module.NODE_CLASS_MAPPINGS} 21 | NODE_DISPLAY_NAME_MAPPINGS = {**NODE_DISPLAY_NAME_MAPPINGS, **imported_module.NODE_DISPLAY_NAME_MAPPINGS} 22 | 23 | #Wildcards 24 | from .py.libs.wildcards import read_wildcard_dict 25 | wildcards_path = os.path.join(os.path.dirname(__file__), "wildcards") 26 | if not os.path.exists(wildcards_path): 27 | os.mkdir(wildcards_path) 28 | 29 | # Add custom wildcards example 30 | example_path = os.path.join(wildcards_path, "example.txt") 31 | if not os.path.exists(example_path): 32 | with open(example_path, 'w') as f: 33 | text = "blue\nred\nyellow\ngreen\nbrown\npink\npurple\norange\nblack\nwhite" 34 | f.write(text) 35 | read_wildcard_dict(wildcards_path) 36 | 37 | #Styles 38 | styles_path = os.path.join(os.path.dirname(__file__), "styles") 39 | samples_path = os.path.join(os.path.dirname(__file__), "styles", "samples") 40 | if os.path.exists(styles_path): 41 | if not os.path.exists(samples_path): 42 | os.mkdir(samples_path) 43 | else: 44 | os.mkdir(styles_path) 45 | os.mkdir(samples_path) 46 | 47 | # Add custom styles example 48 | example_path = os.path.join(styles_path, "your_styles.json.example") 49 | if not os.path.exists(example_path): 50 | import json 51 | data = [ 52 | { 53 | "name": "Example Style", 54 | "name_cn": "示例样式", 55 | "prompt": "(masterpiece), (best quality), (ultra-detailed), {prompt} ", 56 | "negative_prompt": "text, watermark, logo" 57 | }, 58 | ] 59 | # Write to file 60 | with open(example_path, 'w', encoding='utf-8') as f: 61 | json.dump(data, f, indent=4, ensure_ascii=False) 62 | 63 | 64 | web_default_version = 'v2' 65 | # web directory 66 | config_path = os.path.join(cwd_path, "config.yaml") 67 | if os.path.isfile(config_path): 68 | with open(config_path, 'r') as f: 69 | data = yaml.load(f, Loader=yaml.FullLoader) 70 | if data and "WEB_VERSION" in data: 71 | directory = f"web_version/{data['WEB_VERSION']}" 72 | with open(config_path, 'w') as f: 73 | yaml.dump(data, f) 74 | elif web_default_version != 'v1': 75 | if not data: 76 | data = {'WEB_VERSION': web_default_version} 77 | elif 'WEB_VERSION' not in data: 78 | data = {**data, 'WEB_VERSION': web_default_version} 79 | with open(config_path, 'w') as f: 80 | yaml.dump(data, f) 81 | directory = f"web_version/{web_default_version}" 82 | else: 83 | directory = f"web_version/v1" 84 | if not os.path.exists(os.path.join(cwd_path, directory)): 85 | print(f"web root {data['WEB_VERSION']} not found, using default") 86 | directory = f"web_version/{web_default_version}" 87 | WEB_DIRECTORY = directory 88 | else: 89 | directory = f"web_version/{web_default_version}" 90 | WEB_DIRECTORY = directory 91 | 92 | __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS', "WEB_DIRECTORY"] 93 | 94 | print(f'\033[34m[ComfyUI-Easy-Use] server: \033[0mv{__version__} \033[92mLoaded\033[0m') 95 | print(f'\033[34m[ComfyUI-Easy-Use] web root: \033[0m{os.path.join(cwd_path, directory)} \033[92mLoaded\033[0m') 96 | -------------------------------------------------------------------------------- /py/modules/kolors/text_encode.py: -------------------------------------------------------------------------------- 1 | import re 2 | import random 3 | import gc 4 | import comfy.model_management as mm 5 | from nodes import ConditioningConcat, ConditioningZeroOut, ConditioningSetTimestepRange, ConditioningCombine 6 | 7 | def chatglm3_text_encode(chatglm3_model, prompt, clean_gpu=False): 8 | device = mm.get_torch_device() 9 | offload_device = mm.unet_offload_device() 10 | if clean_gpu: 11 | mm.unload_all_models() 12 | mm.soft_empty_cache() 13 | # Function to randomly select an option from the brackets 14 | 15 | def choose_random_option(match): 16 | options = match.group(1).split('|') 17 | return random.choice(options) 18 | 19 | prompt = re.sub(r'\{([^{}]*)\}', choose_random_option, prompt) 20 | 21 | if "|" in prompt: 22 | prompt = prompt.split("|") 23 | 24 | if prompt is not None and isinstance(prompt, str): 25 | batch_size = 1 26 | elif prompt is not None and isinstance(prompt, list): 27 | batch_size = len(prompt) 28 | 29 | # Define tokenizers and text encoders 30 | tokenizer = chatglm3_model['tokenizer'] 31 | text_encoder = chatglm3_model['text_encoder'] 32 | text_encoder.to(device) 33 | text_inputs = tokenizer( 34 | prompt, 35 | padding="max_length", 36 | max_length=256, 37 | truncation=True, 38 | return_tensors="pt", 39 | ).to(device) 40 | 41 | output = text_encoder( 42 | input_ids=text_inputs['input_ids'], 43 | attention_mask=text_inputs['attention_mask'], 44 | position_ids=text_inputs['position_ids'], 45 | output_hidden_states=True) 46 | 47 | # [batch_size, 77, 4096] 48 | prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() 49 | text_proj = output.hidden_states[-1][-1, :, :].clone() # [batch_size, 4096] 50 | bs_embed, seq_len, _ = prompt_embeds.shape 51 | prompt_embeds = prompt_embeds.repeat(1, 1, 1) 52 | prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1) 53 | 54 | bs_embed = text_proj.shape[0] 55 | text_proj = text_proj.repeat(1, 1).view(bs_embed, -1) 56 | text_encoder.to(offload_device) 57 | if clean_gpu: 58 | mm.soft_empty_cache() 59 | gc.collect() 60 | return [[prompt_embeds, {"pooled_output": text_proj},]] 61 | 62 | def chatglm3_adv_text_encode(chatglm3_model, text, clean_gpu=False): 63 | time_start = 0 64 | time_end = 1 65 | match = re.search(r'TIMESTEP.*$', text) 66 | if match: 67 | timestep = match.group() 68 | timestep = timestep.split(' ') 69 | timestep = timestep[0] 70 | text = text.replace(timestep, '') 71 | value = timestep.split(':') 72 | if len(value) >= 3: 73 | time_start = float(value[1]) 74 | time_end = float(value[2]) 75 | elif len(value) == 2: 76 | time_start = float(value[1]) 77 | time_end = 1 78 | elif len(value) == 1: 79 | time_start = 0.1 80 | time_end = 1 81 | 82 | 83 | pass3 = [x.strip() for x in text.split("BREAK")] 84 | pass3 = [x for x in pass3 if x != ''] 85 | 86 | if len(pass3) == 0: 87 | pass3 = [''] 88 | 89 | conditioning = None 90 | 91 | for text in pass3: 92 | cond = chatglm3_text_encode(chatglm3_model, text, clean_gpu) 93 | if conditioning is not None: 94 | conditioning = ConditioningConcat().concat(conditioning, cond)[0] 95 | else: 96 | conditioning = cond 97 | 98 | # setTimeStepRange 99 | if time_start > 0 or time_end < 1: 100 | conditioning_2, = ConditioningSetTimestepRange().set_range(conditioning, 0, time_start) 101 | conditioning_1, = ConditioningZeroOut().zero_out(conditioning) 102 | conditioning_1, = ConditioningSetTimestepRange().set_range(conditioning_1, time_start, time_end) 103 | conditioning, = ConditioningCombine().combine(conditioning_1, conditioning_2) 104 | 105 | return conditioning -------------------------------------------------------------------------------- /py/libs/controlnet.py: -------------------------------------------------------------------------------- 1 | import folder_paths 2 | import comfy.controlnet 3 | import comfy.model_management 4 | from nodes import NODE_CLASS_MAPPINGS 5 | 6 | union_controlnet_types = {"auto": -1, "openpose": 0, "depth": 1, "hed/pidi/scribble/ted": 2, "canny/lineart/anime_lineart/mlsd": 3, "normal": 4, "segment": 5, "tile": 6, "repaint": 7} 7 | 8 | class easyControlnet: 9 | def __init__(self): 10 | pass 11 | 12 | def apply(self, control_net_name, image, positive, negative, strength, start_percent=0, end_percent=1, control_net=None, scale_soft_weights=1, mask=None, union_type=None, easyCache=None, use_cache=True, model=None, vae=None): 13 | if strength == 0: 14 | return (positive, negative) 15 | 16 | # kolors controlnet patch 17 | from ..modules.kolors.loader import is_kolors_model, applyKolorsUnet 18 | if is_kolors_model(model): 19 | from ..modules.kolors.model_patch import patch_controlnet 20 | if control_net is None: 21 | with applyKolorsUnet(): 22 | control_net = easyCache.load_controlnet(control_net_name, scale_soft_weights, use_cache) 23 | control_net = patch_controlnet(model, control_net) 24 | else: 25 | if control_net is None: 26 | if easyCache is not None: 27 | control_net = easyCache.load_controlnet(control_net_name, scale_soft_weights, use_cache) 28 | else: 29 | controlnet_path = folder_paths.get_full_path("controlnet", control_net_name) 30 | control_net = comfy.controlnet.load_controlnet(controlnet_path) 31 | 32 | # union controlnet 33 | if union_type is not None: 34 | control_net = control_net.copy() 35 | type_number = union_controlnet_types[union_type] 36 | if type_number >= 0: 37 | control_net.set_extra_arg("control_type", [type_number]) 38 | else: 39 | control_net.set_extra_arg("control_type", []) 40 | 41 | if mask is not None: 42 | mask = mask.to(self.device) 43 | 44 | if mask is not None and len(mask.shape) < 3: 45 | mask = mask.unsqueeze(0) 46 | 47 | control_hint = image.movedim(-1, 1) 48 | 49 | is_cond = True 50 | if negative is None: 51 | p = [] 52 | for t in positive: 53 | n = [t[0], t[1].copy()] 54 | c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent)) 55 | if 'control' in t[1]: 56 | c_net.set_previous_controlnet(t[1]['control']) 57 | n[1]['control'] = c_net 58 | n[1]['control_apply_to_uncond'] = True 59 | if mask is not None: 60 | n[1]['mask'] = mask 61 | n[1]['set_area_to_bounds'] = False 62 | p.append(n) 63 | positive = p 64 | else: 65 | cnets = {} 66 | out = [] 67 | for conditioning in [positive, negative]: 68 | c = [] 69 | for t in conditioning: 70 | d = t[1].copy() 71 | 72 | prev_cnet = d.get('control', None) 73 | if prev_cnet in cnets: 74 | c_net = cnets[prev_cnet] 75 | else: 76 | c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent), vae) 77 | c_net.set_previous_controlnet(prev_cnet) 78 | cnets[prev_cnet] = c_net 79 | 80 | d['control'] = c_net 81 | d['control_apply_to_uncond'] = False 82 | 83 | if mask is not None: 84 | d['mask'] = mask 85 | d['set_area_to_bounds'] = False 86 | 87 | n = [t[0], d] 88 | c.append(n) 89 | out.append(c) 90 | positive = out[0] 91 | negative = out[1] 92 | 93 | return (positive, negative) -------------------------------------------------------------------------------- /py/modules/human_parsing/run_parsing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from PIL import Image 4 | from .parsing_api import onnx_inference 5 | from ...libs.utils import install_package 6 | 7 | class HumanParsing: 8 | def __init__(self, model_path): 9 | self.model_path = model_path 10 | self.session = None 11 | 12 | def __call__(self, input_image, mask_components): 13 | if self.session is None: 14 | install_package('onnxruntime') 15 | import onnxruntime as ort 16 | 17 | session_options = ort.SessionOptions() 18 | session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL 19 | session_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL 20 | # session_options.add_session_config_entry('gpu_id', str(gpu_id)) 21 | self.session = ort.InferenceSession(self.model_path, sess_options=session_options, 22 | providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) 23 | 24 | parsed_image, mask = onnx_inference(self.session, input_image, mask_components) 25 | return parsed_image, mask 26 | 27 | 28 | class HumanParts: 29 | 30 | def __init__(self, model_path): 31 | self.model_path = model_path 32 | self.session = None 33 | # self.classes_dict = { 34 | # "background": 0, 35 | # "hair": 2, 36 | # "glasses": 4, 37 | # "top-clothes": 5, 38 | # "bottom-clothes": 9, 39 | # "torso-skin": 10, 40 | # "face": 13, 41 | # "left-arm": 14, 42 | # "right-arm": 15, 43 | # "left-leg": 16, 44 | # "right-leg": 17, 45 | # "left-foot": 18, 46 | # "right-foot": 19, 47 | # }, 48 | self.classes = [0, 13, 2, 4, 5, 9, 10, 14, 15, 16, 17, 18, 19] 49 | 50 | 51 | def __call__(self, input_image, mask_components): 52 | if self.session is None: 53 | install_package('onnxruntime') 54 | import onnxruntime as ort 55 | 56 | self.session = ort.InferenceSession(self.model_path, providers=['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider']) 57 | 58 | mask, = self.get_mask(self.session, input_image, 0, mask_components) 59 | return mask 60 | 61 | def get_mask(self, model, image, rotation, mask_components): 62 | image = image.squeeze(0) 63 | image_np = image.numpy() * 255 64 | 65 | pil_image = Image.fromarray(image_np.astype(np.uint8)) 66 | original_size = pil_image.size # to resize the mask later 67 | # resize to 512x512 as the model expects 68 | pil_image = pil_image.resize((512, 512)) 69 | center = (256, 256) 70 | 71 | if rotation != 0: 72 | pil_image = pil_image.rotate(rotation, center=center) 73 | 74 | # normalize the image 75 | image_np = np.array(pil_image).astype(np.float32) / 127.5 - 1 76 | image_np = np.expand_dims(image_np, axis=0) 77 | 78 | # use the onnx model to get the mask 79 | input_name = model.get_inputs()[0].name 80 | output_name = model.get_outputs()[0].name 81 | result = model.run([output_name], {input_name: image_np}) 82 | result = np.array(result[0]).argmax(axis=3).squeeze(0) 83 | 84 | score: int = 0 85 | 86 | mask = np.zeros_like(result) 87 | for class_index in mask_components: 88 | detected = result == self.classes[class_index] 89 | mask[detected] = 255 90 | score += mask.sum() 91 | 92 | # back to the original size 93 | mask_image = Image.fromarray(mask.astype(np.uint8), mode="L") 94 | if rotation != 0: 95 | mask_image = mask_image.rotate(-rotation, center=center) 96 | 97 | mask_image = mask_image.resize(original_size) 98 | 99 | # and back to numpy... 100 | mask = np.array(mask_image).astype(np.float32) / 255 101 | 102 | # add 2 dimensions to match the expected output 103 | mask = np.expand_dims(mask, axis=0) 104 | mask = np.expand_dims(mask, axis=0) 105 | # ensure to return a "binary mask_image" 106 | 107 | del image_np, result # free up memory, maybe not necessary 108 | 109 | return (torch.from_numpy(mask.astype(np.uint8)),) -------------------------------------------------------------------------------- /web_version/v1/js/image_chooser/prompt.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../../../scripts/app.js"; 2 | 3 | function links_with(p, node_id, down, up) { 4 | const links_with = []; 5 | p.workflow.links.forEach((l) => { 6 | if (down && l[1]===node_id && !links_with.includes(l[3])) links_with.push(l[3]) 7 | if (up && l[3]===node_id && !links_with.includes(l[1])) links_with.push(l[1]) 8 | }); 9 | return links_with; 10 | } 11 | 12 | function _all_v_nodes(p, here_id) { 13 | /* 14 | Make a list of all downstream nodes. 15 | */ 16 | const downstream = []; 17 | const to_process = [here_id] 18 | while(to_process.length>0) { 19 | const id = to_process.pop(); 20 | downstream.push(id); 21 | to_process.push( 22 | ...links_with(p,id,true,false).filter((nid)=>{ 23 | return !(downstream.includes(nid) || to_process.includes(nid)) 24 | }) 25 | ) 26 | } 27 | 28 | /* 29 | Now all upstream nodes from any of the downstream nodes (except us). 30 | Put us on the result list so we don't flow up through us 31 | */ 32 | to_process.push(...downstream.filter((n)=>{ return n!=here_id})); 33 | const back_upstream = [here_id]; 34 | while(to_process.length>0) { 35 | const id = to_process.pop(); 36 | back_upstream.push(id); 37 | to_process.push( 38 | ...links_with(p,id,false,true).filter((nid)=>{ 39 | return !(back_upstream.includes(nid) || to_process.includes(nid)) 40 | }) 41 | ) 42 | } 43 | 44 | const keep = []; 45 | keep.push(...downstream); 46 | keep.push(...back_upstream.filter((n)=>{return !keep.includes(n)})); 47 | 48 | console.log(`Nodes to keep: ${keep}`); 49 | return keep; 50 | } 51 | 52 | async function all_v_nodes(here_id) { 53 | const p = structuredClone(await app.graphToPrompt()); 54 | const all_nodes = []; 55 | p.workflow.nodes.forEach((node)=>{all_nodes.push(node.id)}) 56 | p.workflow.links = p.workflow.links.filter((l)=>{ return (all_nodes.includes(l[1]) && all_nodes.includes(l[3]))} ) 57 | return _all_v_nodes(p,here_id); 58 | } 59 | 60 | async function restart_from_here(here_id, go_down_to_chooser=false) { 61 | const p = structuredClone(await app.graphToPrompt()); 62 | /* 63 | Make a list of all nodes, and filter out links that are no longer valid 64 | */ 65 | const all_nodes = []; 66 | p.workflow.nodes.forEach((node)=>{all_nodes.push(node.id)}) 67 | p.workflow.links = p.workflow.links.filter((l)=>{ return (all_nodes.includes(l[1]) && all_nodes.includes(l[3]))} ) 68 | 69 | /* Move downstream to a chooser */ 70 | if (go_down_to_chooser) { 71 | while (!app.graph._nodes_by_id[here_id].isChooser) { 72 | here_id = links_with(p, here_id, true, false)[0]; 73 | } 74 | } 75 | 76 | const keep = _all_v_nodes(p, here_id); 77 | 78 | /* 79 | Filter p.workflow.nodes and p.workflow.links 80 | */ 81 | p.workflow.nodes = p.workflow.nodes.filter((node) => { 82 | if (node.id===here_id) node.inputs.forEach((i)=>{i.link=null}) // remove our upstream links 83 | return (keep.includes(node.id)) // only keep keepers 84 | }) 85 | p.workflow.links = p.workflow.links.filter((l) => {return (keep.includes(l[1]) && keep.includes(l[3]))}) 86 | 87 | /* 88 | Filter the p.output object to only include nodes we're keeping 89 | */ 90 | const new_output = {} 91 | for (const [key, value] of Object.entries(p.output)) { 92 | if (keep.includes(parseInt(key))) new_output[key] = value; 93 | } 94 | /* 95 | Filter the p.output entry for the start node to remove any list (ie link) inputs 96 | */ 97 | const new_inputs = {}; 98 | for (const [key, value] of Object.entries(new_output[here_id.toString()].inputs)) { 99 | if (!Array.isArray(value)) new_inputs[key] = value; 100 | } 101 | new_output[here_id.toString()].inputs = new_inputs; 102 | 103 | p.output = new_output; 104 | 105 | // temporarily hijack graph_to_prompt with a version that restores the old one but returns this prompt 106 | const gtp_was = app.graphToPrompt; 107 | app.graphToPrompt = () => { 108 | app.graphToPrompt = gtp_was; 109 | return p; 110 | } 111 | app.queuePrompt(0); 112 | } 113 | 114 | export { restart_from_here, all_v_nodes } -------------------------------------------------------------------------------- /py/libs/add_resources.py: -------------------------------------------------------------------------------- 1 | import urllib.parse 2 | from os import PathLike 3 | from aiohttp import web 4 | from aiohttp.web_urldispatcher import AbstractRoute, UrlDispatcher 5 | from server import PromptServer 6 | from pathlib import Path 7 | 8 | # 文件限制大小(MB) 9 | max_size = 50 10 | def suffix_limiter(self: web.StaticResource, request: web.Request): 11 | suffixes = {".jpg", ".jpeg", ".png", ".gif", ".webp", ".bmp", ".tiff", ".svg", ".ico", ".apng", ".tif", ".hdr", ".exr"} 12 | rel_url = request.match_info["filename"] 13 | try: 14 | filename = Path(rel_url) 15 | if filename.anchor: 16 | raise web.HTTPForbidden() 17 | filepath = self._directory.joinpath(filename).resolve() 18 | if filepath.exists() and filepath.suffix.lower() not in suffixes: 19 | raise web.HTTPForbidden(reason="File type is not allowed") 20 | finally: 21 | pass 22 | 23 | def filesize_limiter(self: web.StaticResource, request: web.Request): 24 | rel_url = request.match_info["filename"] 25 | try: 26 | filename = Path(rel_url) 27 | filepath = self._directory.joinpath(filename).resolve() 28 | if filepath.exists() and filepath.stat().st_size > max_size * 1024 * 1024: 29 | raise web.HTTPForbidden(reason="File size is too large") 30 | finally: 31 | pass 32 | class LimitResource(web.StaticResource): 33 | limiters = [] 34 | 35 | def push_limiter(self, limiter): 36 | self.limiters.append(limiter) 37 | 38 | async def _handle(self, request: web.Request) -> web.StreamResponse: 39 | try: 40 | for limiter in self.limiters: 41 | limiter(self, request) 42 | except (ValueError, FileNotFoundError) as error: 43 | raise web.HTTPNotFound() from error 44 | 45 | return await super()._handle(request) 46 | 47 | def __repr__(self) -> str: 48 | name = "'" + self.name + "'" if self.name is not None else "" 49 | return f' {self._directory!r}>' 50 | 51 | class LimitRouter(web.StaticDef): 52 | def __repr__(self) -> str: 53 | info = [] 54 | for name, value in sorted(self.kwargs.items()): 55 | info.append(f", {name}={value!r}") 56 | return f' {self.path}{"".join(info)}>' 57 | 58 | def register(self, router: UrlDispatcher) -> list[AbstractRoute]: 59 | # resource = router.add_static(self.prefix, self.path, **self.kwargs) 60 | def add_static( 61 | self: UrlDispatcher, 62 | prefix: str, 63 | path: PathLike, 64 | *, 65 | name=None, 66 | expect_handler=None, 67 | chunk_size: int = 256 * 1024, 68 | show_index: bool = False, 69 | follow_symlinks: bool = False, 70 | append_version: bool = False, 71 | ) -> web.AbstractResource: 72 | assert prefix.startswith("/") 73 | if prefix.endswith("/"): 74 | prefix = prefix[:-1] 75 | resource = LimitResource( 76 | prefix, 77 | path, 78 | name=name, 79 | expect_handler=expect_handler, 80 | chunk_size=chunk_size, 81 | show_index=show_index, 82 | follow_symlinks=follow_symlinks, 83 | append_version=append_version, 84 | ) 85 | resource.push_limiter(suffix_limiter) 86 | resource.push_limiter(filesize_limiter) 87 | self.register_resource(resource) 88 | return resource 89 | resource = add_static(router, self.prefix, self.path, **self.kwargs) 90 | routes = resource.get_info().get("routes", {}) 91 | return list(routes.values()) 92 | 93 | def path_to_url(path): 94 | if not path: 95 | return path 96 | path = path.replace("\\", "/") 97 | if not path.startswith("/"): 98 | path = "/" + path 99 | while path.startswith("//"): 100 | path = path[1:] 101 | path = path.replace("//", "/") 102 | return path 103 | 104 | def add_static_resource(prefix, path,limit=False): 105 | app = PromptServer.instance.app 106 | prefix = path_to_url(prefix) 107 | prefix = urllib.parse.quote(prefix) 108 | prefix = path_to_url(prefix) 109 | if limit: 110 | route = LimitRouter(prefix, path, {"follow_symlinks": True}) 111 | else: 112 | route = web.static(prefix, path, follow_symlinks=True) 113 | app.add_routes([route]) -------------------------------------------------------------------------------- /py/libs/colorfix.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from torch import Tensor 4 | from torch.nn import functional as F 5 | 6 | from torchvision.transforms import ToTensor, ToPILImage 7 | 8 | def adain_color_fix(target: Image, source: Image): 9 | # Convert images to tensors 10 | to_tensor = ToTensor() 11 | target_tensor = to_tensor(target).unsqueeze(0) 12 | source_tensor = to_tensor(source).unsqueeze(0) 13 | 14 | # Apply adaptive instance normalization 15 | result_tensor = adaptive_instance_normalization(target_tensor, source_tensor) 16 | 17 | # Convert tensor back to image 18 | to_image = ToPILImage() 19 | result_image = to_image(result_tensor.squeeze(0).clamp_(0.0, 1.0)) 20 | 21 | return result_image 22 | 23 | def wavelet_color_fix(target: Image, source: Image): 24 | source = source.resize(target.size, resample=Image.Resampling.LANCZOS) 25 | 26 | # Convert images to tensors 27 | to_tensor = ToTensor() 28 | target_tensor = to_tensor(target).unsqueeze(0) 29 | source_tensor = to_tensor(source).unsqueeze(0) 30 | 31 | # Apply wavelet reconstruction 32 | result_tensor = wavelet_reconstruction(target_tensor, source_tensor) 33 | 34 | # Convert tensor back to image 35 | to_image = ToPILImage() 36 | result_image = to_image(result_tensor.squeeze(0).clamp_(0.0, 1.0)) 37 | 38 | return result_image 39 | 40 | def calc_mean_std(feat: Tensor, eps=1e-5): 41 | """Calculate mean and std for adaptive_instance_normalization. 42 | Args: 43 | feat (Tensor): 4D tensor. 44 | eps (float): A small value added to the variance to avoid 45 | divide-by-zero. Default: 1e-5. 46 | """ 47 | size = feat.size() 48 | assert len(size) == 4, 'The input feature should be 4D tensor.' 49 | b, c = size[:2] 50 | feat_var = feat.view(b, c, -1).var(dim=2) + eps 51 | feat_std = feat_var.sqrt().view(b, c, 1, 1) 52 | feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1) 53 | return feat_mean, feat_std 54 | 55 | def adaptive_instance_normalization(content_feat:Tensor, style_feat:Tensor): 56 | """Adaptive instance normalization. 57 | Adjust the reference features to have the similar color and illuminations 58 | as those in the degradate features. 59 | Args: 60 | content_feat (Tensor): The reference feature. 61 | style_feat (Tensor): The degradate features. 62 | """ 63 | size = content_feat.size() 64 | style_mean, style_std = calc_mean_std(style_feat) 65 | content_mean, content_std = calc_mean_std(content_feat) 66 | normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size) 67 | return normalized_feat * style_std.expand(size) + style_mean.expand(size) 68 | 69 | def wavelet_blur(image: Tensor, radius: int): 70 | """ 71 | Apply wavelet blur to the input tensor. 72 | """ 73 | # input shape: (1, 3, H, W) 74 | # convolution kernel 75 | kernel_vals = [ 76 | [0.0625, 0.125, 0.0625], 77 | [0.125, 0.25, 0.125], 78 | [0.0625, 0.125, 0.0625], 79 | ] 80 | kernel = torch.tensor(kernel_vals, dtype=image.dtype, device=image.device) 81 | # add channel dimensions to the kernel to make it a 4D tensor 82 | kernel = kernel[None, None] 83 | # repeat the kernel across all input channels 84 | kernel = kernel.repeat(3, 1, 1, 1) 85 | image = F.pad(image, (radius, radius, radius, radius), mode='replicate') 86 | # apply convolution 87 | output = F.conv2d(image, kernel, groups=3, dilation=radius) 88 | return output 89 | 90 | def wavelet_decomposition(image: Tensor, levels=5): 91 | """ 92 | Apply wavelet decomposition to the input tensor. 93 | This function only returns the low frequency & the high frequency. 94 | """ 95 | high_freq = torch.zeros_like(image) 96 | for i in range(levels): 97 | radius = 2 ** i 98 | low_freq = wavelet_blur(image, radius) 99 | high_freq += (image - low_freq) 100 | image = low_freq 101 | 102 | return high_freq, low_freq 103 | 104 | def wavelet_reconstruction(content_feat:Tensor, style_feat:Tensor): 105 | """ 106 | Apply wavelet decomposition, so that the content will have the same color as the style. 107 | """ 108 | # calculate the wavelet decomposition of the content feature 109 | content_high_freq, content_low_freq = wavelet_decomposition(content_feat) 110 | del content_low_freq 111 | # calculate the wavelet decomposition of the style feature 112 | style_high_freq, style_low_freq = wavelet_decomposition(style_feat) 113 | del style_high_freq 114 | # reconstruct the content feature with the style's high frequency 115 | return content_high_freq + style_low_freq -------------------------------------------------------------------------------- /py/nodes/util.py: -------------------------------------------------------------------------------- 1 | import os 2 | import folder_paths 3 | from ..libs.utils import AlwaysEqualProxy 4 | 5 | class showLoaderSettingsNames: 6 | @classmethod 7 | def INPUT_TYPES(s): 8 | return { 9 | "required": { 10 | "pipe": ("PIPE_LINE",), 11 | }, 12 | "hidden": { 13 | "unique_id": "UNIQUE_ID", 14 | "extra_pnginfo": "EXTRA_PNGINFO", 15 | }, 16 | } 17 | 18 | RETURN_TYPES = ("STRING", "STRING", "STRING",) 19 | RETURN_NAMES = ("ckpt_name", "vae_name", "lora_name") 20 | 21 | FUNCTION = "notify" 22 | OUTPUT_NODE = True 23 | 24 | CATEGORY = "EasyUse/Util" 25 | 26 | def notify(self, pipe, names=None, unique_id=None, extra_pnginfo=None): 27 | if unique_id and extra_pnginfo and "workflow" in extra_pnginfo: 28 | workflow = extra_pnginfo["workflow"] 29 | node = next((x for x in workflow["nodes"] if str(x["id"]) == unique_id), None) 30 | if node: 31 | ckpt_name = pipe['loader_settings']['ckpt_name'] if 'ckpt_name' in pipe['loader_settings'] else '' 32 | vae_name = pipe['loader_settings']['vae_name'] if 'vae_name' in pipe['loader_settings'] else '' 33 | lora_name = pipe['loader_settings']['lora_name'] if 'lora_name' in pipe['loader_settings'] else '' 34 | 35 | if ckpt_name: 36 | ckpt_name = os.path.basename(os.path.splitext(ckpt_name)[0]) 37 | if vae_name: 38 | vae_name = os.path.basename(os.path.splitext(vae_name)[0]) 39 | if lora_name: 40 | lora_name = os.path.basename(os.path.splitext(lora_name)[0]) 41 | 42 | names = "ckpt_name: " + ckpt_name + '\n' + "vae_name: " + vae_name + '\n' + "lora_name: " + lora_name 43 | node["widgets_values"] = names 44 | 45 | return {"ui": {"text": [names]}, "result": (ckpt_name, vae_name, lora_name)} 46 | 47 | class sliderControl: 48 | @classmethod 49 | def INPUT_TYPES(s): 50 | return { 51 | "required": { 52 | "mode": (['ipadapter layer weights'],), 53 | "model_type": (['sdxl', 'sd1'],), 54 | }, 55 | "hidden": { 56 | "prompt": "PROMPT", 57 | "my_unique_id": "UNIQUE_ID", 58 | "extra_pnginfo": "EXTRA_PNGINFO", 59 | }, 60 | } 61 | 62 | RETURN_TYPES = ("STRING",) 63 | RETURN_NAMES = ("layer_weights",) 64 | 65 | FUNCTION = "control" 66 | 67 | CATEGORY = "EasyUse/Util" 68 | 69 | def control(self, mode, model_type, prompt=None, my_unique_id=None, extra_pnginfo=None): 70 | values = '' 71 | if my_unique_id in prompt: 72 | if 'values' in prompt[my_unique_id]["inputs"]: 73 | values = prompt[my_unique_id]["inputs"]['values'] 74 | 75 | return (values,) 76 | 77 | class setCkptName: 78 | @classmethod 79 | def INPUT_TYPES(cls): 80 | return {"required": { 81 | "ckpt_name": (folder_paths.get_filename_list("checkpoints"),), 82 | } 83 | } 84 | 85 | RETURN_TYPES = (AlwaysEqualProxy('*'),) 86 | RETURN_NAMES = ("ckpt_name",) 87 | FUNCTION = "set_name" 88 | CATEGORY = "EasyUse/Util" 89 | 90 | def set_name(self, ckpt_name): 91 | return (ckpt_name,) 92 | 93 | class setControlName: 94 | 95 | @classmethod 96 | def INPUT_TYPES(cls): 97 | return {"required": { 98 | "controlnet_name": (folder_paths.get_filename_list("controlnet"),), 99 | } 100 | } 101 | 102 | RETURN_TYPES = (AlwaysEqualProxy('*'),) 103 | RETURN_NAMES = ("controlnet_name",) 104 | FUNCTION = "set_name" 105 | CATEGORY = "EasyUse/Util" 106 | 107 | def set_name(self, controlnet_name): 108 | return (controlnet_name,) 109 | 110 | class setLoraName: 111 | 112 | @classmethod 113 | def INPUT_TYPES(cls): 114 | return {"required": { 115 | "lora_name": (folder_paths.get_filename_list("loras"),), 116 | } 117 | } 118 | 119 | RETURN_TYPES = (AlwaysEqualProxy('*'),) 120 | RETURN_NAMES = ("lora_name",) 121 | FUNCTION = "set_name" 122 | CATEGORY = "EasyUse/Util" 123 | 124 | def set_name(self, lora_name): 125 | return (lora_name,) 126 | 127 | 128 | NODE_CLASS_MAPPINGS = { 129 | "easy showLoaderSettingsNames": showLoaderSettingsNames, 130 | "easy sliderControl": sliderControl, 131 | "easy ckptNames": setCkptName, 132 | "easy controlnetNames": setControlName, 133 | "easy loraNames": setLoraName, 134 | } 135 | 136 | NODE_DISPLAY_NAME_MAPPINGS = { 137 | "easy showLoaderSettingsNames": "Show Loader Settings Names", 138 | "easy sliderControl": "Easy Slider Control", 139 | "easy ckptNames": "Ckpt Names", 140 | "easy controlnetNames": "ControlNet Names", 141 | "easy loraNames": "Lora Names", 142 | } 143 | -------------------------------------------------------------------------------- /py/modules/dit/pixArt/models/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.utils.checkpoint import checkpoint, checkpoint_sequential 5 | from collections.abc import Iterable 6 | from itertools import repeat 7 | 8 | def _ntuple(n): 9 | def parse(x): 10 | if isinstance(x, Iterable) and not isinstance(x, str): 11 | return x 12 | return tuple(repeat(x, n)) 13 | return parse 14 | 15 | to_1tuple = _ntuple(1) 16 | to_2tuple = _ntuple(2) 17 | 18 | def set_grad_checkpoint(model, use_fp32_attention=False, gc_step=1): 19 | assert isinstance(model, nn.Module) 20 | 21 | def set_attr(module): 22 | module.grad_checkpointing = True 23 | module.fp32_attention = use_fp32_attention 24 | module.grad_checkpointing_step = gc_step 25 | model.apply(set_attr) 26 | 27 | def auto_grad_checkpoint(module, *args, **kwargs): 28 | if getattr(module, 'grad_checkpointing', False): 29 | if isinstance(module, Iterable): 30 | gc_step = module[0].grad_checkpointing_step 31 | return checkpoint_sequential(module, gc_step, *args, **kwargs) 32 | else: 33 | return checkpoint(module, *args, **kwargs) 34 | return module(*args, **kwargs) 35 | 36 | def checkpoint_sequential(functions, step, input, *args, **kwargs): 37 | 38 | # Hack for keyword-only parameter in a python 2.7-compliant way 39 | preserve = kwargs.pop('preserve_rng_state', True) 40 | if kwargs: 41 | raise ValueError("Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)) 42 | 43 | def run_function(start, end, functions): 44 | def forward(input): 45 | for j in range(start, end + 1): 46 | input = functions[j](input, *args) 47 | return input 48 | return forward 49 | 50 | if isinstance(functions, torch.nn.Sequential): 51 | functions = list(functions.children()) 52 | 53 | # the last chunk has to be non-volatile 54 | end = -1 55 | segment = len(functions) // step 56 | for start in range(0, step * (segment - 1), step): 57 | end = start + step - 1 58 | input = checkpoint(run_function(start, end, functions), input, preserve_rng_state=preserve) 59 | return run_function(end + 1, len(functions) - 1, functions)(input) 60 | 61 | def get_rel_pos(q_size, k_size, rel_pos): 62 | """ 63 | Get relative positional embeddings according to the relative positions of 64 | query and key sizes. 65 | Args: 66 | q_size (int): size of query q. 67 | k_size (int): size of key k. 68 | rel_pos (Tensor): relative position embeddings (L, C). 69 | 70 | Returns: 71 | Extracted positional embeddings according to relative positions. 72 | """ 73 | max_rel_dist = int(2 * max(q_size, k_size) - 1) 74 | # Interpolate rel pos if needed. 75 | if rel_pos.shape[0] != max_rel_dist: 76 | # Interpolate rel pos. 77 | rel_pos_resized = F.interpolate( 78 | rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), 79 | size=max_rel_dist, 80 | mode="linear", 81 | ) 82 | rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) 83 | else: 84 | rel_pos_resized = rel_pos 85 | 86 | # Scale the coords with short length if shapes for q and k are different. 87 | q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) 88 | k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) 89 | relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) 90 | 91 | return rel_pos_resized[relative_coords.long()] 92 | 93 | def add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size): 94 | """ 95 | Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. 96 | https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 97 | Args: 98 | attn (Tensor): attention map. 99 | q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). 100 | rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. 101 | rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. 102 | q_size (Tuple): spatial sequence size of query q with (q_h, q_w). 103 | k_size (Tuple): spatial sequence size of key k with (k_h, k_w). 104 | 105 | Returns: 106 | attn (Tensor): attention map with added relative positional embeddings. 107 | """ 108 | q_h, q_w = q_size 109 | k_h, k_w = k_size 110 | Rh = get_rel_pos(q_h, k_h, rel_pos_h) 111 | Rw = get_rel_pos(q_w, k_w, rel_pos_w) 112 | 113 | B, _, dim = q.shape 114 | r_q = q.reshape(B, q_h, q_w, dim) 115 | rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) 116 | rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) 117 | 118 | attn = ( 119 | attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] 120 | ).view(B, q_h * q_w, k_h * k_w) 121 | 122 | return attn 123 | -------------------------------------------------------------------------------- /py/nodes/api.py: -------------------------------------------------------------------------------- 1 | import comfy.utils 2 | from ..libs.api.fluxai import fluxaiAPI 3 | from ..libs.api.bizyair import bizyairAPI, encode_data 4 | from nodes import NODE_CLASS_MAPPINGS as ALL_NODE_CLASS_MAPPINGS 5 | 6 | class joyCaption2API: 7 | API_URL = f"/supernode/joycaption2" 8 | 9 | @classmethod 10 | def INPUT_TYPES(s): 11 | return { 12 | "required": { 13 | "image": ("IMAGE",), 14 | "do_sample": ([True, False],), 15 | "temperature": ( 16 | "FLOAT", 17 | { 18 | "default": 0.5, 19 | "min": 0.0, 20 | "max": 2.0, 21 | "step": 0.01, 22 | "round": 0.001, 23 | "display": "number", 24 | }, 25 | ), 26 | "max_tokens": ( 27 | "INT", 28 | { 29 | "default": 256, 30 | "min": 16, 31 | "max": 512, 32 | "step": 16, 33 | "display": "number", 34 | }, 35 | ), 36 | "caption_type": ( 37 | [ 38 | "Descriptive", 39 | "Descriptive (Informal)", 40 | "Training Prompt", 41 | "MidJourney", 42 | "Booru tag list", 43 | "Booru-like tag list", 44 | "Art Critic", 45 | "Product Listing", 46 | "Social Media Post", 47 | ], 48 | ), 49 | "caption_length": ( 50 | ["any", "very short", "short", "medium-length", "long", "very long"] 51 | + [str(i) for i in range(20, 261, 10)], 52 | ), 53 | "extra_options": ( 54 | "STRING", 55 | { 56 | "placeholder": "Extra options(e.g):\nIf there is a person/character in the image you must refer to them as {name}.", 57 | "tooltip": "Extra options for the model", 58 | "multiline": True, 59 | }, 60 | ), 61 | "name_input": ( 62 | "STRING", 63 | { 64 | "default": "", 65 | "tooltip": "Name input is only used if an Extra Option is selected that requires it.", 66 | }, 67 | ), 68 | "custom_prompt": ( 69 | "STRING", 70 | { 71 | "default": "", 72 | "multiline": True, 73 | }, 74 | ), 75 | }, 76 | "optional":{ 77 | "apikey_override": ("STRING", {"default": "", "forceInput": True, "tooltip":"Override the API key in the local config"}), 78 | } 79 | } 80 | 81 | RETURN_TYPES = ("STRING",) 82 | RETURN_NAMES = ("caption",) 83 | 84 | FUNCTION = "joycaption" 85 | OUTPUT_NODE = False 86 | 87 | CATEGORY = "EasyUse/API" 88 | 89 | def joycaption( 90 | self, 91 | image, 92 | do_sample, 93 | temperature, 94 | max_tokens, 95 | caption_type, 96 | caption_length, 97 | extra_options, 98 | name_input, 99 | custom_prompt, 100 | apikey_override=None 101 | ): 102 | pbar = comfy.utils.ProgressBar(100) 103 | pbar.update_absolute(10) 104 | SIZE_LIMIT = 1536 105 | _, w, h, c = image.shape 106 | if w > SIZE_LIMIT or h > SIZE_LIMIT: 107 | node_class = ALL_NODE_CLASS_MAPPINGS['easy imageScaleDownToSize'] 108 | image, = node_class().image_scale_down_to_size(image, SIZE_LIMIT, True) 109 | 110 | payload = { 111 | "image": None, 112 | "do_sample": do_sample == True, 113 | "temperature": temperature, 114 | "max_new_tokens": max_tokens, 115 | "caption_type": caption_type, 116 | "caption_length": caption_length, 117 | "extra_options": [extra_options], 118 | "name_input": name_input, 119 | "custom_prompt": custom_prompt, 120 | } 121 | 122 | pbar.update_absolute(30) 123 | caption = bizyairAPI.joyCaption(payload, image, apikey_override, API_URL=self.API_URL) 124 | 125 | pbar.update_absolute(100) 126 | return (caption,) 127 | 128 | class joyCaption3API(joyCaption2API): 129 | API_URL = f"/supernode/joycaption3" 130 | 131 | NODE_CLASS_MAPPINGS = { 132 | "easy joyCaption2API": joyCaption2API, 133 | "easy joyCaption3API": joyCaption3API, 134 | } 135 | 136 | NODE_DISPLAY_NAME_MAPPINGS = { 137 | "easy joyCaption2API": "JoyCaption2 (BizyAIR)", 138 | "easy joyCaption3API": "JoyCaption3 (BizyAIR)", 139 | } -------------------------------------------------------------------------------- /py/libs/styleAlign.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from comfy.model_patcher import ModelPatcher 4 | from typing import Union 5 | 6 | T = torch.Tensor 7 | 8 | 9 | def exists(val): 10 | return val is not None 11 | 12 | 13 | def default(val, d): 14 | if exists(val): 15 | return val 16 | return d 17 | 18 | 19 | class StyleAlignedArgs: 20 | def __init__(self, share_attn: str) -> None: 21 | self.adain_keys = "k" in share_attn 22 | self.adain_values = "v" in share_attn 23 | self.adain_queries = "q" in share_attn 24 | 25 | share_attention: bool = True 26 | adain_queries: bool = True 27 | adain_keys: bool = True 28 | adain_values: bool = True 29 | 30 | 31 | def expand_first( 32 | feat: T, 33 | scale=1.0, 34 | ) -> T: 35 | """ 36 | Expand the first element so it has the same shape as the rest of the batch. 37 | """ 38 | b = feat.shape[0] 39 | feat_style = torch.stack((feat[0], feat[b // 2])).unsqueeze(1) 40 | if scale == 1: 41 | feat_style = feat_style.expand(2, b // 2, *feat.shape[1:]) 42 | else: 43 | feat_style = feat_style.repeat(1, b // 2, 1, 1, 1) 44 | feat_style = torch.cat([feat_style[:, :1], scale * feat_style[:, 1:]], dim=1) 45 | return feat_style.reshape(*feat.shape) 46 | 47 | 48 | def concat_first(feat: T, dim=2, scale=1.0) -> T: 49 | """ 50 | concat the the feature and the style feature expanded above 51 | """ 52 | feat_style = expand_first(feat, scale=scale) 53 | return torch.cat((feat, feat_style), dim=dim) 54 | 55 | 56 | def calc_mean_std(feat, eps: float = 1e-5) -> "tuple[T, T]": 57 | feat_std = (feat.var(dim=-2, keepdims=True) + eps).sqrt() 58 | feat_mean = feat.mean(dim=-2, keepdims=True) 59 | return feat_mean, feat_std 60 | 61 | def adain(feat: T) -> T: 62 | feat_mean, feat_std = calc_mean_std(feat) 63 | feat_style_mean = expand_first(feat_mean) 64 | feat_style_std = expand_first(feat_std) 65 | feat = (feat - feat_mean) / feat_std 66 | feat = feat * feat_style_std + feat_style_mean 67 | return feat 68 | 69 | class SharedAttentionProcessor: 70 | def __init__(self, args: StyleAlignedArgs, scale: float): 71 | self.args = args 72 | self.scale = scale 73 | 74 | def __call__(self, q, k, v, extra_options): 75 | if self.args.adain_queries: 76 | q = adain(q) 77 | if self.args.adain_keys: 78 | k = adain(k) 79 | if self.args.adain_values: 80 | v = adain(v) 81 | if self.args.share_attention: 82 | k = concat_first(k, -2, scale=self.scale) 83 | v = concat_first(v, -2) 84 | 85 | return q, k, v 86 | 87 | 88 | def get_norm_layers( 89 | layer: nn.Module, 90 | norm_layers_: "dict[str, list[Union[nn.GroupNorm, nn.LayerNorm]]]", 91 | share_layer_norm: bool, 92 | share_group_norm: bool, 93 | ): 94 | if isinstance(layer, nn.LayerNorm) and share_layer_norm: 95 | norm_layers_["layer"].append(layer) 96 | if isinstance(layer, nn.GroupNorm) and share_group_norm: 97 | norm_layers_["group"].append(layer) 98 | else: 99 | for child_layer in layer.children(): 100 | get_norm_layers( 101 | child_layer, norm_layers_, share_layer_norm, share_group_norm 102 | ) 103 | 104 | 105 | def register_norm_forward( 106 | norm_layer: Union[nn.GroupNorm, nn.LayerNorm], 107 | ) -> Union[nn.GroupNorm, nn.LayerNorm]: 108 | if not hasattr(norm_layer, "orig_forward"): 109 | setattr(norm_layer, "orig_forward", norm_layer.forward) 110 | orig_forward = norm_layer.orig_forward 111 | 112 | def forward_(hidden_states: T) -> T: 113 | n = hidden_states.shape[-2] 114 | hidden_states = concat_first(hidden_states, dim=-2) 115 | hidden_states = orig_forward(hidden_states) # type: ignore 116 | return hidden_states[..., :n, :] 117 | 118 | norm_layer.forward = forward_ # type: ignore 119 | return norm_layer 120 | 121 | 122 | def register_shared_norm( 123 | model: ModelPatcher, 124 | share_group_norm: bool = True, 125 | share_layer_norm: bool = True, 126 | ): 127 | norm_layers = {"group": [], "layer": []} 128 | get_norm_layers(model.model, norm_layers, share_layer_norm, share_group_norm) 129 | print( 130 | f"Patching {len(norm_layers['group'])} group norms, {len(norm_layers['layer'])} layer norms." 131 | ) 132 | return [register_norm_forward(layer) for layer in norm_layers["group"]] + [ 133 | register_norm_forward(layer) for layer in norm_layers["layer"] 134 | ] 135 | 136 | 137 | SHARE_NORM_OPTIONS = ["both", "group", "layer", "disabled"] 138 | SHARE_ATTN_OPTIONS = ["q+k", "q+k+v", "disabled"] 139 | 140 | 141 | def styleAlignBatch(model, share_norm, share_attn, scale=1.0): 142 | m = model.clone() 143 | share_group_norm = share_norm in ["group", "both"] 144 | share_layer_norm = share_norm in ["layer", "both"] 145 | register_shared_norm(model, share_group_norm, share_layer_norm) 146 | args = StyleAlignedArgs(share_attn) 147 | m.set_model_attn1_patch(SharedAttentionProcessor(args, scale)) 148 | return m -------------------------------------------------------------------------------- /py/modules/dit/pixArt/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | List of all PixArt model types / settings 3 | """ 4 | sampling_settings = { 5 | "beta_schedule" : "sqrt_linear", 6 | "linear_start" : 0.0001, 7 | "linear_end" : 0.02, 8 | "timesteps" : 1000, 9 | } 10 | 11 | pixart_conf = { 12 | "PixArtMS_XL_2": { # models/PixArtMS 13 | "target": "PixArtMS", 14 | "unet_config": { 15 | "input_size" : 1024//8, 16 | "depth" : 28, 17 | "num_heads" : 16, 18 | "patch_size" : 2, 19 | "hidden_size" : 1152, 20 | "pe_interpolation": 2, 21 | }, 22 | "sampling_settings" : sampling_settings, 23 | }, 24 | "PixArtMS_Sigma_XL_2": { 25 | "target": "PixArtMSSigma", 26 | "unet_config": { 27 | "input_size" : 1024//8, 28 | "token_num" : 300, 29 | "depth" : 28, 30 | "num_heads" : 16, 31 | "patch_size" : 2, 32 | "hidden_size" : 1152, 33 | "micro_condition": False, 34 | "pe_interpolation": 2, 35 | "model_max_length": 300, 36 | }, 37 | "sampling_settings" : sampling_settings, 38 | }, 39 | "PixArtMS_Sigma_XL_2_900M": { 40 | "target": "PixArtMSSigma", 41 | "unet_config": { 42 | "input_size": 1024 // 8, 43 | "token_num": 300, 44 | "depth": 42, 45 | "num_heads": 16, 46 | "patch_size": 2, 47 | "hidden_size": 1152, 48 | "micro_condition": False, 49 | "pe_interpolation": 2, 50 | "model_max_length": 300, 51 | }, 52 | "sampling_settings": sampling_settings, 53 | }, 54 | "PixArtMS_Sigma_XL_2_2K": { 55 | "target": "PixArtMSSigma", 56 | "unet_config": { 57 | "input_size" : 2048//8, 58 | "token_num" : 300, 59 | "depth" : 28, 60 | "num_heads" : 16, 61 | "patch_size" : 2, 62 | "hidden_size" : 1152, 63 | "micro_condition": False, 64 | "pe_interpolation": 4, 65 | "model_max_length": 300, 66 | }, 67 | "sampling_settings" : sampling_settings, 68 | }, 69 | "PixArt_XL_2": { # models/PixArt 70 | "target": "PixArt", 71 | "unet_config": { 72 | "input_size" : 512//8, 73 | "token_num" : 120, 74 | "depth" : 28, 75 | "num_heads" : 16, 76 | "patch_size" : 2, 77 | "hidden_size" : 1152, 78 | "pe_interpolation": 1, 79 | }, 80 | "sampling_settings" : sampling_settings, 81 | }, 82 | } 83 | 84 | pixart_conf.update({ # controlnet models 85 | "ControlPixArtHalf": { 86 | "target": "ControlPixArtHalf", 87 | "unet_config": pixart_conf["PixArt_XL_2"]["unet_config"], 88 | "sampling_settings": pixart_conf["PixArt_XL_2"]["sampling_settings"], 89 | }, 90 | "ControlPixArtMSHalf": { 91 | "target": "ControlPixArtMSHalf", 92 | "unet_config": pixart_conf["PixArtMS_XL_2"]["unet_config"], 93 | "sampling_settings": pixart_conf["PixArtMS_XL_2"]["sampling_settings"], 94 | } 95 | }) 96 | 97 | pixart_res = { 98 | "PixArtMS_XL_2": { # models/PixArtMS 1024x1024 99 | '0.25': [512, 2048], '0.26': [512, 1984], '0.27': [512, 1920], '0.28': [512, 1856], 100 | '0.32': [576, 1792], '0.33': [576, 1728], '0.35': [576, 1664], '0.40': [640, 1600], 101 | '0.42': [640, 1536], '0.48': [704, 1472], '0.50': [704, 1408], '0.52': [704, 1344], 102 | '0.57': [768, 1344], '0.60': [768, 1280], '0.68': [832, 1216], '0.72': [832, 1152], 103 | '0.78': [896, 1152], '0.82': [896, 1088], '0.88': [960, 1088], '0.94': [960, 1024], 104 | '1.00': [1024,1024], '1.07': [1024, 960], '1.13': [1088, 960], '1.21': [1088, 896], 105 | '1.29': [1152, 896], '1.38': [1152, 832], '1.46': [1216, 832], '1.67': [1280, 768], 106 | '1.75': [1344, 768], '2.00': [1408, 704], '2.09': [1472, 704], '2.40': [1536, 640], 107 | '2.50': [1600, 640], '2.89': [1664, 576], '3.00': [1728, 576], '3.11': [1792, 576], 108 | '3.62': [1856, 512], '3.75': [1920, 512], '3.88': [1984, 512], '4.00': [2048, 512], 109 | }, 110 | "PixArt_XL_2": { # models/PixArt 512x512 111 | '0.25': [256,1024], '0.26': [256, 992], '0.27': [256, 960], '0.28': [256, 928], 112 | '0.32': [288, 896], '0.33': [288, 864], '0.35': [288, 832], '0.40': [320, 800], 113 | '0.42': [320, 768], '0.48': [352, 736], '0.50': [352, 704], '0.52': [352, 672], 114 | '0.57': [384, 672], '0.60': [384, 640], '0.68': [416, 608], '0.72': [416, 576], 115 | '0.78': [448, 576], '0.82': [448, 544], '0.88': [480, 544], '0.94': [480, 512], 116 | '1.00': [512, 512], '1.07': [512, 480], '1.13': [544, 480], '1.21': [544, 448], 117 | '1.29': [576, 448], '1.38': [576, 416], '1.46': [608, 416], '1.67': [640, 384], 118 | '1.75': [672, 384], '2.00': [704, 352], '2.09': [736, 352], '2.40': [768, 320], 119 | '2.50': [800, 320], '2.89': [832, 288], '3.00': [864, 288], '3.11': [896, 288], 120 | '3.62': [928, 256], '3.75': [960, 256], '3.88': [992, 256], '4.00': [1024,256] 121 | }, 122 | "PixArtMS_Sigma_XL_2_2K": { 123 | '0.25': [1024, 4096], '0.26': [1024, 3968], '0.27': [1024, 3840], '0.28': [1024, 3712], 124 | '0.32': [1152, 3584], '0.33': [1152, 3456], '0.35': [1152, 3328], '0.40': [1280, 3200], 125 | '0.42': [1280, 3072], '0.48': [1408, 2944], '0.50': [1408, 2816], '0.52': [1408, 2688], 126 | '0.57': [1536, 2688], '0.60': [1536, 2560], '0.68': [1664, 2432], '0.72': [1664, 2304], 127 | '0.78': [1792, 2304], '0.82': [1792, 2176], '0.88': [1920, 2176], '0.94': [1920, 2048], 128 | '1.00': [2048, 2048], '1.07': [2048, 1920], '1.13': [2176, 1920], '1.21': [2176, 1792], 129 | '1.29': [2304, 1792], '1.38': [2304, 1664], '1.46': [2432, 1664], '1.67': [2560, 1536], 130 | '1.75': [2688, 1536], '2.00': [2816, 1408], '2.09': [2944, 1408], '2.40': [3072, 1280], 131 | '2.50': [3200, 1280], '2.89': [3328, 1152], '3.00': [3456, 1152], '3.11': [3584, 1152], 132 | '3.62': [3712, 1024], '3.75': [3840, 1024], '3.88': [3968, 1024], '4.00': [4096, 1024] 133 | } 134 | } 135 | # These should be the same 136 | pixart_res.update({ 137 | "PixArtMS_Sigma_XL_2": pixart_res["PixArtMS_XL_2"], 138 | "PixArtMS_Sigma_XL_2_512": pixart_res["PixArt_XL_2"], 139 | }) -------------------------------------------------------------------------------- /py/modules/human_parsing/transforms.py: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Copyright (c) Microsoft 3 | # Licensed under the MIT License. 4 | # Written by Bin Xiao (Bin.Xiao@microsoft.com) 5 | # ------------------------------------------------------------------------------ 6 | 7 | from __future__ import absolute_import 8 | from __future__ import division 9 | from __future__ import print_function 10 | 11 | import numpy as np 12 | import cv2 13 | import torch 14 | 15 | class BRG2Tensor_transform(object): 16 | def __call__(self, pic): 17 | img = torch.from_numpy(pic.transpose((2, 0, 1))) 18 | if isinstance(img, torch.ByteTensor): 19 | return img.float() 20 | else: 21 | return img 22 | 23 | class BGR2RGB_transform(object): 24 | def __call__(self, tensor): 25 | return tensor[[2,1,0],:,:] 26 | 27 | def flip_back(output_flipped, matched_parts): 28 | ''' 29 | ouput_flipped: numpy.ndarray(batch_size, num_joints, height, width) 30 | ''' 31 | assert output_flipped.ndim == 4,\ 32 | 'output_flipped should be [batch_size, num_joints, height, width]' 33 | 34 | output_flipped = output_flipped[:, :, :, ::-1] 35 | 36 | for pair in matched_parts: 37 | tmp = output_flipped[:, pair[0], :, :].copy() 38 | output_flipped[:, pair[0], :, :] = output_flipped[:, pair[1], :, :] 39 | output_flipped[:, pair[1], :, :] = tmp 40 | 41 | return output_flipped 42 | 43 | 44 | def fliplr_joints(joints, joints_vis, width, matched_parts): 45 | """ 46 | flip coords 47 | """ 48 | # Flip horizontal 49 | joints[:, 0] = width - joints[:, 0] - 1 50 | 51 | # Change left-right parts 52 | for pair in matched_parts: 53 | joints[pair[0], :], joints[pair[1], :] = \ 54 | joints[pair[1], :], joints[pair[0], :].copy() 55 | joints_vis[pair[0], :], joints_vis[pair[1], :] = \ 56 | joints_vis[pair[1], :], joints_vis[pair[0], :].copy() 57 | 58 | return joints*joints_vis, joints_vis 59 | 60 | 61 | def transform_preds(coords, center, scale, input_size): 62 | target_coords = np.zeros(coords.shape) 63 | trans = get_affine_transform(center, scale, 0, input_size, inv=1) 64 | for p in range(coords.shape[0]): 65 | target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans) 66 | return target_coords 67 | 68 | def transform_parsing(pred, center, scale, width, height, input_size): 69 | 70 | trans = get_affine_transform(center, scale, 0, input_size, inv=1) 71 | target_pred = cv2.warpAffine( 72 | pred, 73 | trans, 74 | (int(width), int(height)), #(int(width), int(height)), 75 | flags=cv2.INTER_NEAREST, 76 | borderMode=cv2.BORDER_CONSTANT, 77 | borderValue=(0)) 78 | 79 | return target_pred 80 | 81 | def transform_logits(logits, center, scale, width, height, input_size): 82 | 83 | trans = get_affine_transform(center, scale, 0, input_size, inv=1) 84 | channel = logits.shape[2] 85 | target_logits = [] 86 | for i in range(channel): 87 | target_logit = cv2.warpAffine( 88 | logits[:,:,i], 89 | trans, 90 | (int(width), int(height)), #(int(width), int(height)), 91 | flags=cv2.INTER_LINEAR, 92 | borderMode=cv2.BORDER_CONSTANT, 93 | borderValue=(0)) 94 | target_logits.append(target_logit) 95 | target_logits = np.stack(target_logits,axis=2) 96 | 97 | return target_logits 98 | 99 | 100 | def get_affine_transform(center, 101 | scale, 102 | rot, 103 | output_size, 104 | shift=np.array([0, 0], dtype=np.float32), 105 | inv=0): 106 | if not isinstance(scale, np.ndarray) and not isinstance(scale, list): 107 | print(scale) 108 | scale = np.array([scale, scale]) 109 | 110 | scale_tmp = scale 111 | 112 | src_w = scale_tmp[0] 113 | dst_w = output_size[1] 114 | dst_h = output_size[0] 115 | 116 | rot_rad = np.pi * rot / 180 117 | src_dir = get_dir([0, src_w * -0.5], rot_rad) 118 | dst_dir = np.array([0, (dst_w-1) * -0.5], np.float32) 119 | 120 | src = np.zeros((3, 2), dtype=np.float32) 121 | dst = np.zeros((3, 2), dtype=np.float32) 122 | src[0, :] = center + scale_tmp * shift 123 | src[1, :] = center + src_dir + scale_tmp * shift 124 | dst[0, :] = [(dst_w-1) * 0.5, (dst_h-1) * 0.5] 125 | dst[1, :] = np.array([(dst_w-1) * 0.5, (dst_h-1) * 0.5]) + dst_dir 126 | 127 | src[2:, :] = get_3rd_point(src[0, :], src[1, :]) 128 | dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) 129 | 130 | if inv: 131 | trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) 132 | else: 133 | trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) 134 | 135 | return trans 136 | 137 | 138 | def affine_transform(pt, t): 139 | new_pt = np.array([pt[0], pt[1], 1.]).T 140 | new_pt = np.dot(t, new_pt) 141 | return new_pt[:2] 142 | 143 | 144 | def get_3rd_point(a, b): 145 | direct = a - b 146 | return b + np.array([-direct[1], direct[0]], dtype=np.float32) 147 | 148 | 149 | def get_dir(src_point, rot_rad): 150 | sn, cs = np.sin(rot_rad), np.cos(rot_rad) 151 | 152 | src_result = [0, 0] 153 | src_result[0] = src_point[0] * cs - src_point[1] * sn 154 | src_result[1] = src_point[0] * sn + src_point[1] * cs 155 | 156 | return src_result 157 | 158 | 159 | def crop(img, center, scale, output_size, rot=0): 160 | trans = get_affine_transform(center, scale, rot, output_size) 161 | 162 | dst_img = cv2.warpAffine(img, 163 | trans, 164 | (int(output_size[1]), int(output_size[0])), 165 | flags=cv2.INTER_LINEAR) 166 | 167 | return dst_img 168 | -------------------------------------------------------------------------------- /web_version/v2/assets/vueuse-CqzKat4r.js: -------------------------------------------------------------------------------- 1 | import{I as e,J as t,o as n,A as o,j as s,z as l,U as u,i,V as a,W as r,e as c}from"./vue-DAoaJb2i.js";function v(n){return!!e()&&(t(n),!0)}const d="undefined"!=typeof window&&"undefined"!=typeof document;"undefined"!=typeof WorkerGlobalScope&&(globalThis,WorkerGlobalScope);const f=e=>null!=e,p=Object.prototype.toString,m=e=>"[object Object]"===p.call(e),h=()=>{};function g(e){return Array.isArray(e)?e:[e]}function w(e,t=!0,s){l()?n(e,s):t?e():o(e)}const y=d?window:void 0;function b(e){var t;const n=a(e);return null!=(t=null==n?void 0:n.$el)?t:n}function O(...e){const t=[],n=()=>{t.forEach((e=>e())),t.length=0},o=i((()=>{const t=g(a(e[0])).filter((e=>null!=e));return t.every((e=>"string"!=typeof e))?t:void 0})),l=(u=([e,o,s,l])=>{if(n(),!(null==e?void 0:e.length)||!(null==o?void 0:o.length)||!(null==s?void 0:s.length))return;const u=m(l)?{...l}:l;t.push(...e.flatMap((e=>o.flatMap((t=>s.map((n=>((e,t,n,o)=>(e.addEventListener(t,n,o),()=>e.removeEventListener(t,n,o)))(e,t,n,u))))))))},r={flush:"post"},s((()=>{var t,n;return[null!=(n=null==(t=o.value)?void 0:t.map((e=>b(e))))?n:[y].filter((e=>null!=e)),g(a(o.value?e[1]:e[0])),g(c(o.value?e[2]:e[1])),a(o.value?e[3]:e[2])]}),u,{...r,immediate:!0}));var u,r;return v(n),()=>{l(),n()}}function x(e){const t=function(){const e=u(!1),t=l();return t&&n((()=>{e.value=!0}),t),e}();return i((()=>(t.value,Boolean(e()))))}function E(e,t,n={}){const{window:o=y,...l}=n;let u;const r=x((()=>o&&"MutationObserver"in o)),c=()=>{u&&(u.disconnect(),u=void 0)},d=i((()=>{const t=g(a(e)).map(b).filter(f);return new Set(t)})),p=s((()=>d.value),(e=>{c(),r.value&&e.size&&(u=new MutationObserver(t),e.forEach((e=>u.observe(e,l))))}),{immediate:!0,flush:"post"}),m=()=>{p(),c()};return v(m),{isSupported:r,stop:m,takeRecords:()=>null==u?void 0:u.takeRecords()}}function R(e,t,n={}){const{window:o=y,...l}=n;let u;const r=x((()=>o&&"ResizeObserver"in o)),c=()=>{u&&(u.disconnect(),u=void 0)},d=i((()=>{const t=a(e);return Array.isArray(t)?t.map((e=>b(e))):[b(t)]})),f=s(d,(e=>{if(c(),r.value&&o){u=new ResizeObserver(t);for(const t of e)t&&u.observe(t,l)}}),{immediate:!0,flush:"post"}),p=()=>{c(),f()};return v(p),{isSupported:r,stop:p}}function S(e,t={}){const{reset:n=!0,windowResize:o=!0,windowScroll:l=!0,immediate:i=!0,updateTiming:a="sync"}=t,r=u(0),c=u(0),v=u(0),d=u(0),f=u(0),p=u(0),m=u(0),h=u(0);function g(){const t=b(e);if(!t)return void(n&&(r.value=0,c.value=0,v.value=0,d.value=0,f.value=0,p.value=0,m.value=0,h.value=0));const o=t.getBoundingClientRect();r.value=o.height,c.value=o.bottom,v.value=o.left,d.value=o.right,f.value=o.top,p.value=o.width,m.value=o.x,h.value=o.y}function y(){"sync"===a?g():"next-frame"===a&&requestAnimationFrame((()=>g()))}return R(e,y),s((()=>b(e)),(e=>!e&&y())),E(e,y,{attributeFilter:["style","class"]}),l&&O("scroll",y,{capture:!0,passive:!0}),o&&O("resize",y,{passive:!0}),w((()=>{i&&y()})),{height:r,bottom:c,left:v,right:d,top:f,width:p,x:m,y:h,update:y}}function X(e,t={}){const{delayEnter:n=0,delayLeave:o=0,triggerOnRemoval:s=!1,window:l=y}=t,a=u(!1);let c;const d=e=>{const t=e?n:o;c&&(clearTimeout(c),c=void 0),t?c=setTimeout((()=>a.value=e),t):a.value=e};return l?(O(e,"mouseenter",(()=>d(!0)),{passive:!0}),O(e,"mouseleave",(()=>d(!1)),{passive:!0}),s&&function(e,t,n={}){const{window:o=y,document:s=(null==o?void 0:o.document),flush:l="sync"}=n;if(!o||!s)return h;let u;const i=e=>{null==u||u(),u=e},a=r((()=>{const n=b(e);if(n){const{stop:e}=E(s,(e=>{e.map((e=>[...e.removedNodes])).flat().some((e=>e===n||e.contains(n)))&&t(e)}),{window:o,childList:!0,subtree:!0});i(e)}}),{flush:l}),c=()=>{a(),i()};v(c)}(i((()=>b(e))),(()=>d(!1))),a):a}function Y(e,t,n={}){const{root:o,rootMargin:l="0px",threshold:r=0,window:c=y,immediate:d=!0}=n,p=x((()=>c&&"IntersectionObserver"in c)),m=i((()=>g(a(e)).map(b).filter(f)));let w=h;const O=u(d),E=p.value?s((()=>[m.value,b(o),O.value]),(([e,n])=>{if(w(),!O.value)return;if(!e.length)return;const o=new IntersectionObserver(t,{root:b(n),rootMargin:l,threshold:r});e.forEach((e=>e&&o.observe(e))),w=()=>{o.disconnect(),w=h}}),{immediate:d,flush:"post"}):h,R=()=>{w(),E(),O.value=!1};return v(R),{isSupported:p,isActive:O,pause(){w(),O.value=!1},resume(){O.value=!0},stop:R}}const z={page:e=>[e.pageX,e.pageY],client:e=>[e.clientX,e.clientY],screen:e=>[e.screenX,e.screenY],movement:e=>e instanceof MouseEvent?[e.movementX,e.movementY]:null};function M(e,t={}){const{windowResize:n=!0,windowScroll:o=!0,handleOutside:l=!0,window:i=y}=t,a=t.type||"page",{x:r,y:c,sourceType:v}=function(e={}){const{type:t="page",touch:n=!0,resetOnTouchEnds:o=!1,initialValue:s={x:0,y:0},window:l=y,target:i=l,scroll:a=!0,eventFilter:r}=e;let c=null,v=0,d=0;const f=u(s.x),p=u(s.y),m=u(null),h="function"==typeof t?t:z[t],g=e=>{const t=h(e);c=e,t&&([f.value,p.value]=t,m.value="mouse"),l&&(v=l.scrollX,d=l.scrollY)},w=e=>{if(e.touches.length>0){const t=h(e.touches[0]);t&&([f.value,p.value]=t,m.value="touch")}},b=()=>{if(!c||!l)return;const e=h(c);c instanceof MouseEvent&&e&&(f.value=e[0]+l.scrollX-v,p.value=e[1]+l.scrollY-d)},x=()=>{f.value=s.x,p.value=s.y},E=r?e=>r((()=>g(e)),{}):e=>g(e),R=r?e=>r((()=>w(e)),{}):e=>w(e),S=r?()=>r((()=>b()),{}):()=>b();if(i){const e={passive:!0};O(i,["mousemove","dragover"],E,e),n&&"movement"!==t&&(O(i,["touchstart","touchmove"],R,e),o&&O(i,"touchend",x,e)),a&&"page"===t&&O(l,"scroll",S,e)}return{x:f,y:p,sourceType:m}}(t),d=u(null!=e?e:null==i?void 0:i.document.body),f=u(0),p=u(0),m=u(0),h=u(0),g=u(0),x=u(0),S=u(!0);function X(){if(!i)return;const e=b(d);if(!(e&&e instanceof Element))return;const{left:t,top:n,width:o,height:s}=e.getBoundingClientRect();m.value=t+("page"===a?i.pageXOffset:0),h.value=n+("page"===a?i.pageYOffset:0),g.value=s,x.value=o;const u=r.value-m.value,v=c.value-h.value;S.value=0===o||0===s||u<0||v<0||u>o||v>s,l&&(f.value=u,p.value=v)}const Y=[];if(w((()=>{X()})),i){const{stop:e}=R(d,X),{stop:t}=E(d,X,{attributeFilter:["style","class"]}),l=s([d,r,c],X);Y.push(e,t,l),O(document,"mouseleave",(()=>S.value=!0),{passive:!0}),o&&Y.push(O("scroll",X,{capture:!0,passive:!0})),n&&Y.push(O("resize",X,{passive:!0}))}return{x:r,y:c,sourceType:v,elementX:f,elementY:p,elementPositionX:m,elementPositionY:h,elementHeight:g,elementWidth:x,isOutside:S,stop:function(){Y.forEach((e=>e())),Y.length=0}}}export{Y as a,M as b,X as c,S as d,O as u}; 2 | -------------------------------------------------------------------------------- /web_version/v1/css/toolbar.css: -------------------------------------------------------------------------------- 1 | .easyuse-toolbar{ 2 | background: rgba(15,15,15,.5); 3 | backdrop-filter: blur(4px) brightness(120%); 4 | border-radius:0 12px 12px 0; 5 | min-width:50px; 6 | height:24px; 7 | position: fixed; 8 | bottom:85px; 9 | left:0px; 10 | display: flex; 11 | align-items: center; 12 | z-index:10000; 13 | } 14 | .easyuse-toolbar.disable-render-info{ 15 | bottom: 55px; 16 | } 17 | .easyuse-toolbar-item{ 18 | border-radius:20px; 19 | height: 20px; 20 | width:20px; 21 | cursor: pointer; 22 | display: flex; 23 | justify-content: center; 24 | align-items: center; 25 | transition: all 0.3s ease-in-out; 26 | margin-left:2.5px; 27 | } 28 | .easyuse-toolbar-icon{ 29 | width: 14px; 30 | height: 14px; 31 | display: flex; 32 | justify-content: center; 33 | align-items: center; 34 | font-size: 12px; 35 | color:white; 36 | transition: all 0.3s ease-in-out; 37 | } 38 | .easyuse-toolbar-icon svg{ 39 | width: 14px; 40 | height: 14px; 41 | } 42 | .easyuse-toolbar-tips{ 43 | visibility: hidden; 44 | opacity: 0; 45 | position: absolute; 46 | top: -25px; 47 | left: 0; 48 | color: var(--descrip-text); 49 | padding: 2px 5px; 50 | border-radius: 5px; 51 | font-size: 11px; 52 | min-width:100px; 53 | transition: all 0.3s ease-in-out; 54 | } 55 | .easyuse-toolbar-item:hover{ 56 | background:rgba(12,12,12,1); 57 | } 58 | .easyuse-toolbar-item:hover .easyuse-toolbar-tips{ 59 | opacity: 1; 60 | visibility: visible; 61 | } 62 | .easyuse-toolbar-item:hover .easyuse-toolbar-icon.group{ 63 | color:var(--warning-color); 64 | } 65 | .easyuse-toolbar-item:hover .easyuse-toolbar-icon.rocket{ 66 | color:var(--theme-color-light); 67 | } 68 | .easyuse-toolbar-item:hover .easyuse-toolbar-icon.question{ 69 | color:var(--success-color); 70 | } 71 | 72 | 73 | .easyuse-guide-dialog{ 74 | max-width: 300px; 75 | font-family: var(--font-family); 76 | position: absolute; 77 | z-index:100; 78 | left:0; 79 | bottom:140px; 80 | background: rgba(25,25,25,.85); 81 | backdrop-filter: blur(8px) brightness(120%); 82 | border-radius:0 12px 12px 0; 83 | padding:10px; 84 | transition: .5s all ease-in-out; 85 | visibility: visible; 86 | opacity: 1; 87 | transform: translateX(0%); 88 | } 89 | .easyuse-guide-dialog.disable-render-info{ 90 | bottom:110px; 91 | } 92 | .easyuse-guide-dialog-top{ 93 | display: flex; 94 | justify-content: space-between; 95 | align-items: center; 96 | } 97 | .easyuse-guide-dialog-top .icon{ 98 | width: 12px; 99 | height:12px; 100 | } 101 | .easyuse-guide-dialog.hidden{ 102 | opacity: 0; 103 | transform: translateX(-50%); 104 | visibility: hidden; 105 | } 106 | .easyuse-guide-dialog .closeBtn{ 107 | float: right; 108 | color: var(--input-text); 109 | border-radius:30px; 110 | background-color: var(--comfy-input-bg); 111 | border: 1px solid var(--border-color); 112 | cursor: pointer; 113 | aspect-ratio: 1 / 1; 114 | display: flex; 115 | justify-content: center; 116 | align-items: center; 117 | } 118 | .easyuse-guide-dialog .closeBtn:hover{ 119 | filter:brightness(120%); 120 | } 121 | .easyuse-guide-dialog-title{ 122 | color:var(--input-text); 123 | font-size: 16px; 124 | font-weight: bold; 125 | margin-bottom: 5px; 126 | } 127 | .easyuse-guide-dialog-remark{ 128 | color: var(--input-text); 129 | font-size: 12px; 130 | margin-top: 5px; 131 | } 132 | .easyuse-guide-dialog-content{ 133 | max-height: 600px; 134 | overflow: auto; 135 | } 136 | .easyuse-guide-dialog a, .easyuse-guide-dialog a:visited{ 137 | color: var(--theme-color-light); 138 | cursor: pointer; 139 | } 140 | .easyuse-guide-dialog-note{ 141 | margin-top: 20px; 142 | color:white; 143 | } 144 | .easyuse-guide-dialog p{ 145 | margin:4px 0; 146 | font-size: 12px; 147 | font-weight: 300; 148 | } 149 | .markdown-body h1, .markdown-body h2, .markdown-body h3, .markdown-body h4, .markdown-body h5, .markdown-body h6 { 150 | margin-top: 12px; 151 | margin-bottom: 8px; 152 | font-weight: 600; 153 | line-height: 1.25; 154 | padding-bottom: 5px; 155 | border-bottom: 1px solid var(--border-color); 156 | color: var(--input-text); 157 | } 158 | .markdown-body h1{ 159 | font-size: 18px; 160 | } 161 | .markdown-body h2{ 162 | font-size: 16px; 163 | } 164 | .markdown-body h3{ 165 | font-size: 14px; 166 | } 167 | .markdown-body h4{ 168 | font-size: 13px; 169 | } 170 | .markdown-body table { 171 | display: block; 172 | /*width: 100%;*/ 173 | /*width: max-content;*/ 174 | max-width: 300px; 175 | overflow: auto; 176 | color:var(--input-text); 177 | box-sizing: border-box; 178 | border: 1px solid var(--border-color); 179 | text-align: left; 180 | width: 100%; 181 | } 182 | .markdown-body table th, .markdown-body table td { 183 | padding: 6px 13px; 184 | font-size: 12px; 185 | margin:0; 186 | border-right: 1px solid var(--border-color); 187 | border-bottom: 1px solid var(--border-color); 188 | } 189 | .markdown-body table td { 190 | font-size: 12px; 191 | } 192 | .markdown-body table th:last-child, .markdown-body table td:last-child{ 193 | border-right: none; 194 | } 195 | .markdown-body table tr:last-child td{ 196 | border-bottom: none; 197 | } 198 | .markdown-body table th{ 199 | font-weight: bold; 200 | width: auto; 201 | min-width: 70px; 202 | } 203 | .markdown-body table th:last-child{ 204 | width:100%; 205 | } 206 | .markdown-body .warning{ 207 | color:var(--warning-color) 208 | } 209 | .markdown-body .error{ 210 | color:var(--error-color) 211 | } 212 | .markdown-body .success{ 213 | color:var(--success-color) 214 | } 215 | .markdown-body .link{ 216 | color:var(--theme-color-light) 217 | } 218 | 219 | #comfyui-menu-monitor{ 220 | width:120px; 221 | } 222 | #comfyui-menu-monitor #crystools-monitor-container{ 223 | margin:0 auto!important; 224 | } 225 | #comfyui-menu-monitor #crystools-monitor-container > div{ 226 | margin:2px 0!important; 227 | } 228 | #comfyui-menu-monitor #crystools-monitor-container > div > div > div{ 229 | padding:0 4px!important; 230 | } -------------------------------------------------------------------------------- /web_version/v1/js/easy/easySeg.js: -------------------------------------------------------------------------------- 1 | import {app} from "../../../../scripts/app.js"; 2 | import {$el} from "../../../../scripts/ui.js"; 3 | import {$t} from "../common/i18n.js"; 4 | import {findWidgetByName, toggleWidget} from "../common/utils.js"; 5 | 6 | 7 | const tags = { 8 | "selfie_multiclass_256x256": ["Background", "Hair", "Body", "Face", "Clothes", "Others",], 9 | "human_parsing_lip":["Background","Hat","Hair","Glove","Sunglasses","Upper-clothes","Dress","Coat","Socks","Pants","Jumpsuits","Scarf","Skirt","Face","Left-arm","Right-arm","Left-leg","Right-leg","Left-shoe","Right-shoe"], 10 | } 11 | function getTagList(tags) { 12 | let rlist=[] 13 | tags.forEach((k,i) => { 14 | rlist.push($el( 15 | "label.easyuse-prompt-styles-tag", 16 | { 17 | dataset: { 18 | tag: i, 19 | name: $t(k), 20 | index: i 21 | }, 22 | $: (el) => { 23 | el.children[0].onclick = () => { 24 | el.classList.toggle("easyuse-prompt-styles-tag-selected"); 25 | }; 26 | }, 27 | }, 28 | [ 29 | $el("input",{ 30 | type: 'checkbox', 31 | name: i 32 | }), 33 | $el("span",{ 34 | textContent: $t(k), 35 | }) 36 | ] 37 | )) 38 | }); 39 | return rlist 40 | } 41 | 42 | 43 | app.registerExtension({ 44 | name: 'comfy.easyUse.seg', 45 | async beforeRegisterNodeDef(nodeType, nodeData, app) { 46 | 47 | if (nodeData.name == 'easy humanSegmentation') { 48 | // 创建时 49 | const onNodeCreated = nodeType.prototype.onNodeCreated; 50 | nodeType.prototype.onNodeCreated = function () { 51 | onNodeCreated ? onNodeCreated?.apply(this, arguments) : undefined; 52 | const method = this.widgets.findIndex((w) => w.name == 'method'); 53 | const list = $el("ul.easyuse-prompt-styles-list.no-top", []); 54 | let method_values = '' 55 | this.setProperty("values", []) 56 | 57 | let selector = this.addDOMWidget('mask_components',"btn",$el('div.easyuse-prompt-styles',[list])) 58 | 59 | Object.defineProperty(this.widgets[method],'value',{ 60 | set:(value)=>{ 61 | method_values = value 62 | if(method_values){ 63 | selector.element.children[0].innerHTML = '' 64 | if(method_values == 'selfie_multiclass_256x256'){ 65 | toggleWidget(this, findWidgetByName(this, 'confidence'), true) 66 | this.setSize([300, 260]); 67 | }else{ 68 | toggleWidget(this, findWidgetByName(this, 'confidence')) 69 | this.setSize([300, 500]); 70 | } 71 | let list = getTagList(tags[method_values]); 72 | selector.element.children[0].append(...list) 73 | } 74 | }, 75 | get: () => { 76 | return method_values 77 | } 78 | }) 79 | 80 | let mask_select_values = '' 81 | 82 | Object.defineProperty(selector, "value", { 83 | set: (value) => { 84 | setTimeout(_=>{ 85 | selector.element.children[0].querySelectorAll(".easyuse-prompt-styles-tag").forEach(el => { 86 | let arr = value.split(',') 87 | if (arr.includes(el.dataset.tag)) { 88 | el.classList.add("easyuse-prompt-styles-tag-selected"); 89 | el.children[0].checked = true 90 | } 91 | }) 92 | },100) 93 | }, 94 | get: () => { 95 | selector.element.children[0].querySelectorAll(".easyuse-prompt-styles-tag").forEach(el => { 96 | if(el.classList.value.indexOf("easyuse-prompt-styles-tag-selected")>=0){ 97 | if(!this.properties["values"].includes(el.dataset.tag)){ 98 | this.properties["values"].push(el.dataset.tag); 99 | } 100 | }else{ 101 | if(this.properties["values"].includes(el.dataset.tag)){ 102 | this.properties["values"]= this.properties["values"].filter(v=>v!=el.dataset.tag); 103 | } 104 | } 105 | }); 106 | mask_select_values = this.properties["values"].join(','); 107 | return mask_select_values; 108 | } 109 | }); 110 | 111 | let old_values = '' 112 | let mask_lists_dom = selector.element.children[0] 113 | 114 | // 初始化 115 | setTimeout(_=>{ 116 | if(!method_values) { 117 | method_values = 'selfie_multiclass_256x256' 118 | selector.element.children[0].innerHTML = '' 119 | // 重新排序 120 | let list = getTagList(tags[method_values]); 121 | selector.element.children[0].append(...list) 122 | } 123 | if(method_values == 'selfie_multiclass_256x256'){ 124 | toggleWidget(this, findWidgetByName(this, 'confidence'), true) 125 | this.setSize([300, 260]); 126 | }else{ 127 | toggleWidget(this, findWidgetByName(this, 'confidence')) 128 | this.setSize([300, 500]); 129 | } 130 | },1) 131 | 132 | return onNodeCreated; 133 | } 134 | } 135 | } 136 | }) -------------------------------------------------------------------------------- /py/server.py: -------------------------------------------------------------------------------- 1 | import random 2 | import server 3 | from enum import Enum 4 | 5 | class SGmode(Enum): 6 | FIX = 1 7 | INCR = 2 8 | DECR = 3 9 | RAND = 4 10 | 11 | 12 | class SeedGenerator: 13 | def __init__(self, base_value, action): 14 | self.base_value = base_value 15 | 16 | if action == "fixed" or action == "increment" or action == "decrement" or action == "randomize": 17 | self.action = SGmode.FIX 18 | elif action == 'increment for each node': 19 | self.action = SGmode.INCR 20 | elif action == 'decrement for each node': 21 | self.action = SGmode.DECR 22 | elif action == 'randomize for each node': 23 | self.action = SGmode.RAND 24 | 25 | def next(self): 26 | seed = self.base_value 27 | 28 | if self.action == SGmode.INCR: 29 | self.base_value += 1 30 | if self.base_value > 1125899906842624: 31 | self.base_value = 0 32 | elif self.action == SGmode.DECR: 33 | self.base_value -= 1 34 | if self.base_value < 0: 35 | self.base_value = 1125899906842624 36 | elif self.action == SGmode.RAND: 37 | self.base_value = random.randint(0, 1125899906842624) 38 | 39 | return seed 40 | 41 | 42 | def control_seed(v, action, seed_is_global): 43 | action = v['inputs']['action'] if seed_is_global else action 44 | value = v['inputs']['value'] if seed_is_global else v['inputs']['seed_num'] 45 | 46 | if action == 'increment' or action == 'increment for each node': 47 | value = value + 1 48 | if value > 1125899906842624: 49 | value = 0 50 | elif action == 'decrement' or action == 'decrement for each node': 51 | value = value - 1 52 | if value < 0: 53 | value = 1125899906842624 54 | elif action == 'randomize' or action == 'randomize for each node': 55 | value = random.randint(0, 1125899906842624) 56 | if seed_is_global: 57 | v['inputs']['value'] = value 58 | 59 | return value 60 | 61 | 62 | def prompt_seed_update(json_data): 63 | try: 64 | seed_widget_map = json_data['extra_data']['extra_pnginfo']['workflow']['seed_widgets'] 65 | except: 66 | return None 67 | 68 | workflow = json_data['extra_data']['extra_pnginfo']['workflow'] 69 | seed_widget_map = workflow['seed_widgets'] 70 | value = None 71 | mode = None 72 | node = None 73 | action = None 74 | seed_is_global = False 75 | 76 | for k, v in json_data['prompt'].items(): 77 | if 'class_type' not in v: 78 | continue 79 | 80 | cls = v['class_type'] 81 | 82 | if cls == 'easy globalSeed': 83 | mode = v['inputs']['mode'] 84 | action = v['inputs']['action'] 85 | value = v['inputs']['value'] 86 | node = k, v 87 | seed_is_global = True 88 | 89 | # control before generated 90 | if mode is not None and mode and seed_is_global: 91 | value = control_seed(node[1], action, seed_is_global) 92 | 93 | if seed_is_global: 94 | if value is not None: 95 | seed_generator = SeedGenerator(value, action) 96 | 97 | for k, v in json_data['prompt'].items(): 98 | for k2, v2 in v['inputs'].items(): 99 | if isinstance(v2, str) and '$GlobalSeed.value$' in v2: 100 | v['inputs'][k2] = v2.replace('$GlobalSeed.value$', str(value)) 101 | 102 | if k not in seed_widget_map: 103 | continue 104 | 105 | if 'seed_num' in v['inputs']: 106 | if isinstance(v['inputs']['seed_num'], int): 107 | v['inputs']['seed_num'] = seed_generator.next() 108 | 109 | if 'seed' in v['inputs']: 110 | if isinstance(v['inputs']['seed'], int): 111 | v['inputs']['seed'] = seed_generator.next() 112 | 113 | if 'noise_seed' in v['inputs']: 114 | if isinstance(v['inputs']['noise_seed'], int): 115 | v['inputs']['noise_seed'] = seed_generator.next() 116 | 117 | for k2, v2 in v['inputs'].items(): 118 | if isinstance(v2, str) and '$GlobalSeed.value$' in v2: 119 | v['inputs'][k2] = v2.replace('$GlobalSeed.value$', str(value)) 120 | # control after generated 121 | if mode is not None and not mode: 122 | control_seed(node[1], action, seed_is_global) 123 | 124 | return value is not None 125 | 126 | 127 | def workflow_seed_update(json_data): 128 | nodes = json_data['extra_data']['extra_pnginfo']['workflow']['nodes'] 129 | seed_widget_map = json_data['extra_data']['extra_pnginfo']['workflow']['seed_widgets'] 130 | prompt = json_data['prompt'] 131 | 132 | updated_seed_map = {} 133 | value = None 134 | 135 | for node in nodes: 136 | node_id = str(node['id']) 137 | if node_id in prompt: 138 | if node['type'] == 'easy globalSeed': 139 | value = prompt[node_id]['inputs']['value'] 140 | length = len(node['widgets_values']) 141 | node['widgets_values'][length-1] = node['widgets_values'][0] 142 | node['widgets_values'][0] = value 143 | elif node_id in seed_widget_map: 144 | widget_idx = seed_widget_map[node_id] 145 | 146 | if 'seed_num' in prompt[node_id]['inputs']: 147 | seed = prompt[node_id]['inputs']['seed_num'] 148 | elif 'noise_seed' in prompt[node_id]['inputs']: 149 | seed = prompt[node_id]['inputs']['noise_seed'] 150 | else: 151 | seed = prompt[node_id]['inputs']['seed'] 152 | 153 | node['widgets_values'][widget_idx] = seed 154 | updated_seed_map[node_id] = seed 155 | 156 | server.PromptServer.instance.send_sync("easyuse-global-seed", {"id": node_id, "value": value, "seed_map": updated_seed_map}) 157 | 158 | 159 | def onprompt(json_data): 160 | is_changed = prompt_seed_update(json_data) 161 | if is_changed: 162 | workflow_seed_update(json_data) 163 | 164 | return json_data 165 | 166 | server.PromptServer.instance.add_on_prompt_handler(onprompt) -------------------------------------------------------------------------------- /py/modules/brushnet/model_patch.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import comfy 3 | 4 | # Check and add 'model_patch' to model.model_options['transformer_options'] 5 | def add_model_patch_option(model): 6 | if 'transformer_options' not in model.model_options: 7 | model.model_options['transformer_options'] = {} 8 | to = model.model_options['transformer_options'] 9 | if "model_patch" not in to: 10 | to["model_patch"] = {} 11 | return to 12 | 13 | 14 | # Patch model with model_function_wrapper 15 | def patch_model_function_wrapper(model, forward_patch, remove=False): 16 | def brushnet_model_function_wrapper(apply_model_method, options_dict): 17 | to = options_dict['c']['transformer_options'] 18 | 19 | control = None 20 | if 'control' in options_dict['c']: 21 | control = options_dict['c']['control'] 22 | 23 | x = options_dict['input'] 24 | timestep = options_dict['timestep'] 25 | 26 | # check if there are patches to execute 27 | if 'model_patch' not in to or 'forward' not in to['model_patch']: 28 | return apply_model_method(x, timestep, **options_dict['c']) 29 | 30 | mp = to['model_patch'] 31 | unet = mp['unet'] 32 | 33 | all_sigmas = mp['all_sigmas'] 34 | sigma = to['sigmas'][0].item() 35 | total_steps = all_sigmas.shape[0] - 1 36 | step = torch.argmin((all_sigmas - sigma).abs()).item() 37 | 38 | mp['step'] = step 39 | mp['total_steps'] = total_steps 40 | 41 | # comfy.model_base.apply_model 42 | xc = model.model.model_sampling.calculate_input(timestep, x) 43 | if 'c_concat' in options_dict['c'] and options_dict['c']['c_concat'] is not None: 44 | xc = torch.cat([xc] + [options_dict['c']['c_concat']], dim=1) 45 | t = model.model.model_sampling.timestep(timestep).float() 46 | # execute all patches 47 | for method in mp['forward']: 48 | method(unet, xc, t, to, control) 49 | 50 | return apply_model_method(x, timestep, **options_dict['c']) 51 | 52 | if "model_function_wrapper" in model.model_options and model.model_options["model_function_wrapper"]: 53 | print('BrushNet is going to replace existing model_function_wrapper:', 54 | model.model_options["model_function_wrapper"]) 55 | model.set_model_unet_function_wrapper(brushnet_model_function_wrapper) 56 | 57 | to = add_model_patch_option(model) 58 | mp = to['model_patch'] 59 | 60 | if isinstance(model.model.model_config, comfy.supported_models.SD15): 61 | mp['SDXL'] = False 62 | elif isinstance(model.model.model_config, comfy.supported_models.SDXL): 63 | mp['SDXL'] = True 64 | else: 65 | print('Base model type: ', type(model.model.model_config)) 66 | raise Exception("Unsupported model type: ", type(model.model.model_config)) 67 | 68 | if 'forward' not in mp: 69 | mp['forward'] = [] 70 | 71 | if remove: 72 | if forward_patch in mp['forward']: 73 | mp['forward'].remove(forward_patch) 74 | else: 75 | mp['forward'].append(forward_patch) 76 | 77 | mp['unet'] = model.model.diffusion_model 78 | mp['step'] = 0 79 | mp['total_steps'] = 1 80 | 81 | # apply patches to code 82 | if comfy.samplers.sample.__doc__ is None or 'BrushNet' not in comfy.samplers.sample.__doc__: 83 | comfy.samplers.original_sample = comfy.samplers.sample 84 | comfy.samplers.sample = modified_sample 85 | 86 | if comfy.ldm.modules.diffusionmodules.openaimodel.apply_control.__doc__ is None or \ 87 | 'BrushNet' not in comfy.ldm.modules.diffusionmodules.openaimodel.apply_control.__doc__: 88 | comfy.ldm.modules.diffusionmodules.openaimodel.original_apply_control = comfy.ldm.modules.diffusionmodules.openaimodel.apply_control 89 | comfy.ldm.modules.diffusionmodules.openaimodel.apply_control = modified_apply_control 90 | 91 | 92 | # Model needs current step number and cfg at inference step. It is possible to write a custom KSampler but I'd like to use ComfyUI's one. 93 | # The first versions had modified_common_ksampler, but it broke custom KSampler nodes 94 | def modified_sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model_options={}, 95 | latent_image=None, denoise_mask=None, callback=None, disable_pbar=False, seed=None): 96 | ''' Modified by BrushNet nodes''' 97 | cfg_guider = comfy.samplers.CFGGuider(model) 98 | cfg_guider.set_conds(positive, negative) 99 | cfg_guider.set_cfg(cfg) 100 | 101 | ### Modified part ###################################################################### 102 | to = add_model_patch_option(model) 103 | to['model_patch']['all_sigmas'] = sigmas 104 | ####################################################################################### 105 | 106 | return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) 107 | 108 | # To use Controlnet with RAUNet it is much easier to modify apply_control a little 109 | def modified_apply_control(h, control, name): 110 | '''Modified by BrushNet nodes''' 111 | if control is not None and name in control and len(control[name]) > 0: 112 | ctrl = control[name].pop() 113 | if ctrl is not None: 114 | if h.shape[2] != ctrl.shape[2] or h.shape[3] != ctrl.shape[3]: 115 | ctrl = torch.nn.functional.interpolate(ctrl, size=(h.shape[2], h.shape[3]), mode='bicubic').to( 116 | h.dtype).to(h.device) 117 | try: 118 | h += ctrl 119 | except: 120 | print.warning("warning control could not be applied {} {}".format(h.shape, ctrl.shape)) 121 | return h 122 | 123 | def add_model_patch(model): 124 | to = add_model_patch_option(model) 125 | mp = to['model_patch'] 126 | if "brushnet" in mp: 127 | if isinstance(model.model.model_config, comfy.supported_models.SD15): 128 | mp['SDXL'] = False 129 | elif isinstance(model.model.model_config, comfy.supported_models.SDXL): 130 | mp['SDXL'] = True 131 | else: 132 | print('Base model type: ', type(model.model.model_config)) 133 | raise Exception("Unsupported model type: ", type(model.model.model_config)) 134 | 135 | mp['unet'] = model.model.diffusion_model 136 | mp['step'] = 0 137 | mp['total_steps'] = 1 -------------------------------------------------------------------------------- /py/modules/ipadapter/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import Tensor 3 | from .flux.layers import DoubleStreamBlockIPA, SingleStreamBlockIPA 4 | from comfy.ldm.flux.layers import timestep_embedding 5 | from types import MethodType 6 | 7 | def FluxUpdateModules(bi, ip_attn_procs, image_emb): 8 | flux_model = bi.model 9 | bi.add_object_patch(f"diffusion_model.forward_orig", MethodType(forward_orig_ipa, flux_model.diffusion_model)) 10 | for i, original in enumerate(flux_model.diffusion_model.double_blocks): 11 | patch_name = f"double_blocks.{i}" 12 | maybe_patched_layer = bi.get_model_object(f"diffusion_model.{patch_name}") 13 | # if there's already a patch there, collect its adapters and replace it 14 | procs = [ip_attn_procs[patch_name]] 15 | embs = [image_emb] 16 | if isinstance(maybe_patched_layer, DoubleStreamBlockIPA): 17 | procs = maybe_patched_layer.ip_adapter + procs 18 | embs = maybe_patched_layer.image_emb + embs 19 | # initial ipa models with image embeddings 20 | new_layer = DoubleStreamBlockIPA(original, procs, embs) 21 | # for example, ComfyUI internally uses model.add_patches to add loras 22 | bi.add_object_patch(f"diffusion_model.{patch_name}", new_layer) 23 | for i, original in enumerate(flux_model.diffusion_model.single_blocks): 24 | patch_name = f"single_blocks.{i}" 25 | maybe_patched_layer = bi.get_model_object(f"diffusion_model.{patch_name}") 26 | procs = [ip_attn_procs[patch_name]] 27 | embs = [image_emb] 28 | if isinstance(maybe_patched_layer, SingleStreamBlockIPA): 29 | procs = maybe_patched_layer.ip_adapter + procs 30 | embs = maybe_patched_layer.image_emb + embs 31 | # initial ipa models with image embeddings 32 | new_layer = SingleStreamBlockIPA(original, procs, embs) 33 | bi.add_object_patch(f"diffusion_model.{patch_name}", new_layer) 34 | 35 | def is_model_pathched(model): 36 | def test(mod): 37 | if isinstance(mod, DoubleStreamBlockIPA): 38 | return True 39 | else: 40 | for p in mod.children(): 41 | if test(p): 42 | return True 43 | return False 44 | 45 | result = test(model) 46 | return result 47 | 48 | 49 | def forward_orig_ipa( 50 | self, 51 | img: Tensor, 52 | img_ids: Tensor, 53 | txt: Tensor, 54 | txt_ids: Tensor, 55 | timesteps: Tensor, 56 | y: Tensor, 57 | guidance: Tensor|None = None, 58 | control=None, 59 | transformer_options={}, 60 | attn_mask: Tensor = None, 61 | ) -> Tensor: 62 | patches_replace = transformer_options.get("patches_replace", {}) 63 | if img.ndim != 3 or txt.ndim != 3: 64 | raise ValueError("Input img and txt tensors must have 3 dimensions.") 65 | 66 | # running on sequences img 67 | img = self.img_in(img) 68 | vec = self.time_in(timestep_embedding(timesteps, 256).to(img.dtype)) 69 | if self.params.guidance_embed: 70 | if guidance is None: 71 | raise ValueError("Didn't get guidance strength for guidance distilled model.") 72 | vec = vec + self.guidance_in(timestep_embedding(guidance, 256).to(img.dtype)) 73 | 74 | vec = vec + self.vector_in(y[:,:self.params.vec_in_dim]) 75 | txt = self.txt_in(txt) 76 | 77 | ids = torch.cat((txt_ids, img_ids), dim=1) 78 | pe = self.pe_embedder(ids) 79 | 80 | blocks_replace = patches_replace.get("dit", {}) 81 | for i, block in enumerate(self.double_blocks): 82 | if ("double_block", i) in blocks_replace: 83 | def block_wrap(args): 84 | out = {} 85 | if isinstance(block, DoubleStreamBlockIPA): # ipadaper 86 | out["img"], out["txt"] = block(img=args["img"], txt=args["txt"], vec=args["vec"], pe=args["pe"], t=args["timesteps"], attn_mask=args.get("attn_mask")) 87 | else: 88 | out["img"], out["txt"] = block(img=args["img"], txt=args["txt"], vec=args["vec"], pe=args["pe"], attn_mask=args.get("attn_mask")) 89 | return out 90 | out = blocks_replace[("double_block", i)]({"img": img, "txt": txt, "vec": vec, "pe": pe, "timesteps": timesteps, "attn_mask": attn_mask}, {"original_block": block_wrap}) 91 | txt = out["txt"] 92 | img = out["img"] 93 | else: 94 | if isinstance(block, DoubleStreamBlockIPA): # ipadaper 95 | img, txt = block(img=img, txt=txt, vec=vec, pe=pe, t=timesteps, attn_mask=attn_mask) 96 | else: 97 | img, txt = block(img=img, txt=txt, vec=vec, pe=pe, attn_mask=attn_mask) 98 | 99 | if control is not None: # Controlnet 100 | control_i = control.get("input") 101 | if i < len(control_i): 102 | add = control_i[i] 103 | if add is not None: 104 | img += add 105 | 106 | img = torch.cat((txt, img), 1) 107 | 108 | for i, block in enumerate(self.single_blocks): 109 | if ("single_block", i) in blocks_replace: 110 | def block_wrap(args): 111 | out = {} 112 | if isinstance(block, SingleStreamBlockIPA): # ipadaper 113 | out["img"] = block(args["img"], vec=args["vec"], pe=args["pe"], t=args["timesteps"], attn_mask=args.get("attn_mask")) 114 | else: 115 | out["img"] = block(args["img"], vec=args["vec"], pe=args["pe"], attn_mask=args.get("attn_mask")) 116 | return out 117 | 118 | out = blocks_replace[("single_block", i)]({"img": img, "vec": vec, "pe": pe, "timesteps": timesteps, "attn_mask": attn_mask}, {"original_block": block_wrap}) 119 | img = out["img"] 120 | else: 121 | if isinstance(block, SingleStreamBlockIPA): # ipadaper 122 | img = block(img, vec=vec, pe=pe, t=timesteps, attn_mask=attn_mask) 123 | else: 124 | img = block(img, vec=vec, pe=pe, attn_mask=attn_mask) 125 | 126 | if control is not None: # Controlnet 127 | control_o = control.get("output") 128 | if i < len(control_o): 129 | add = control_o[i] 130 | if add is not None: 131 | img[:, txt.shape[1] :, ...] += add 132 | 133 | img = img[:, txt.shape[1] :, ...] 134 | 135 | img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels) 136 | return img -------------------------------------------------------------------------------- /py/libs/chooser.py: -------------------------------------------------------------------------------- 1 | from threading import Event 2 | 3 | import torch 4 | 5 | from server import PromptServer 6 | from aiohttp import web 7 | from comfy import model_management as mm 8 | from comfy_execution.graph import ExecutionBlocker 9 | import time 10 | 11 | class ChooserCancelled(Exception): 12 | pass 13 | 14 | def get_chooser_cache(): 15 | """获取选择器缓存""" 16 | if not hasattr(PromptServer.instance, '_easyuse_chooser_node'): 17 | PromptServer.instance._easyuse_chooser_node = {} 18 | return PromptServer.instance._easyuse_chooser_node 19 | 20 | def cleanup_session_data(node_id): 21 | """清理会话数据""" 22 | node_data = get_chooser_cache() 23 | if node_id in node_data: 24 | session_keys = ["event", "selected", "images", "total_count", "cancelled"] 25 | for key in session_keys: 26 | if key in node_data[node_id]: 27 | del node_data[node_id][key] 28 | 29 | def wait_for_chooser(id, images, mode, period=0.1): 30 | try: 31 | node_data = get_chooser_cache() 32 | images = [images[i:i + 1, ...] for i in range(images.shape[0])] 33 | if mode == "Keep Last Selection": 34 | if id in node_data and "last_selection" in node_data[id]: 35 | last_selection = node_data[id]["last_selection"] 36 | if last_selection and len(last_selection) > 0: 37 | valid_indices = [idx for idx in last_selection if 0 <= idx < len(images)] 38 | if valid_indices: 39 | try: 40 | PromptServer.instance.send_sync("easyuse-image-keep-selection", { 41 | "id": id, 42 | "selected": valid_indices 43 | }) 44 | except Exception as e: 45 | pass 46 | cleanup_session_data(id) 47 | indices_str = ','.join(str(i) for i in valid_indices) 48 | images = [images[idx] for idx in valid_indices] 49 | images = torch.cat(images, dim=0) 50 | return {"result": (images,)} 51 | 52 | if id in node_data: 53 | del node_data[id] 54 | 55 | event = Event() 56 | node_data[id] = { 57 | "event": event, 58 | "images": images, 59 | "selected": None, 60 | "total_count": len(images), 61 | "cancelled": False, 62 | } 63 | 64 | while id in node_data: 65 | node_info = node_data[id] 66 | if node_info.get("cancelled", False): 67 | cleanup_session_data(id) 68 | raise ChooserCancelled("Manual selection cancelled") 69 | 70 | if "selected" in node_info and node_info["selected"] is not None: 71 | break 72 | 73 | time.sleep(period) 74 | 75 | if id in node_data: 76 | node_info = node_data[id] 77 | selected_indices = node_info.get("selected") 78 | 79 | if selected_indices is not None and len(selected_indices) > 0: 80 | valid_indices = [idx for idx in selected_indices if 0 <= idx < len(images)] 81 | if valid_indices: 82 | selected_images = [images[idx] for idx in valid_indices] 83 | 84 | if id not in node_data: 85 | node_data[id] = {} 86 | node_data[id]["last_selection"] = valid_indices 87 | cleanup_session_data(id) 88 | selected_images = torch.cat(selected_images, dim=0) 89 | return {"result": (selected_images,)} 90 | else: 91 | cleanup_session_data(id) 92 | return {"result": (images[0] if len(images) > 0 else ExecutionBlocker(None),)} 93 | else: 94 | cleanup_session_data(id) 95 | return { 96 | "result": (images[0] if len(images) > 0 else ExecutionBlocker(None),)} 97 | else: 98 | return {"result": (images[0] if len(images) > 0 else ExecutionBlocker(None),)} 99 | 100 | except ChooserCancelled: 101 | raise mm.InterruptProcessingException() 102 | except Exception as e: 103 | node_data = get_chooser_cache() 104 | if id in node_data: 105 | cleanup_session_data(id) 106 | if 'image_list' in locals() and len(images) > 0: 107 | return {"result": (images[0])} 108 | else: 109 | return {"result": (ExecutionBlocker(None),)} 110 | 111 | 112 | @PromptServer.instance.routes.post('/easyuse/image_chooser_message') 113 | async def handle_image_selection(request): 114 | try: 115 | data = await request.json() 116 | node_id = data.get("node_id") 117 | selected = data.get("selected", []) 118 | action = data.get("action") 119 | 120 | node_data = get_chooser_cache() 121 | 122 | if node_id not in node_data: 123 | return web.json_response({"code": -1, "error": "Node data does not exist"}) 124 | 125 | try: 126 | node_info = node_data[node_id] 127 | 128 | if "total_count" not in node_info: 129 | return web.json_response({"code": -1, "error": "The node has been processed"}) 130 | 131 | if action == "cancel": 132 | node_info["cancelled"] = True 133 | node_info["selected"] = [] 134 | elif action == "select" and isinstance(selected, list): 135 | valid_indices = [idx for idx in selected if isinstance(idx, int) and 0 <= idx < node_info["total_count"]] 136 | if valid_indices: 137 | node_info["selected"] = valid_indices 138 | node_info["cancelled"] = False 139 | else: 140 | return web.json_response({"code": -1, "error": "Invalid Selection Index"}) 141 | else: 142 | return web.json_response({"code": -1, "error": "Invalid operation"}) 143 | 144 | node_info["event"].set() 145 | return web.json_response({"code": 1}) 146 | 147 | except Exception as e: 148 | if node_id in node_data and "event" in node_data[node_id]: 149 | node_data[node_id]["event"].set() 150 | return web.json_response({"code": -1, "message": "Processing Failed"}) 151 | 152 | except Exception as e: 153 | return web.json_response({"code": -1, "message": "Request Failed"}) 154 | --------------------------------------------------------------------------------