├── .gitignore ├── 404.html ├── CNAME ├── Gemfile ├── Gemfile.lock ├── _config.yml ├── _posts ├── 2023-05-18-ComfyUi-is-4-months-old.markdown ├── 2023-05-23-Bislerp-slightly-better-latent-upscaling.md ├── 2023-08-27-Weekly-update.md ├── 2023-09-02-Weekly-update.md ├── 2023-09-10-Weekly-update.md ├── 2023-09-17-Weekly-update.md ├── 2023-09-24-Weekly-update.md ├── 2023-10-08-Weekly-update.md ├── 2023-10-30-Update.md ├── 2023-11-24-Update.md ├── 2023-12-19-Update.md └── 2024-06-18-Next-Chapter.md ├── about.markdown ├── assets ├── b_bilinear.png ├── b_bislerp.png ├── b_nearest_exact.png ├── comfyui.png ├── custom_sampler.png ├── img2vid.webp ├── sdxl_turbo.webm └── ssd1b.webp └── index.markdown /.gitignore: -------------------------------------------------------------------------------- 1 | _site 2 | .sass-cache 3 | .jekyll-cache 4 | .jekyll-metadata 5 | vendor 6 | -------------------------------------------------------------------------------- /404.html: -------------------------------------------------------------------------------- 1 | --- 2 | permalink: /404.html 3 | layout: default 4 | --- 5 | 6 | 19 | 20 |
21 |

404

22 | 23 |

Page not found :(

24 |

The requested page could not be found.

25 |
26 | -------------------------------------------------------------------------------- /CNAME: -------------------------------------------------------------------------------- 1 | blog.comfyui.ca -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source "https://rubygems.org" 2 | # Hello! This is where you manage which Jekyll version is used to run. 3 | # When you want to use a different version, change it below, save the 4 | # file and run `bundle install`. Run Jekyll with `bundle exec`, like so: 5 | # 6 | # bundle exec jekyll serve 7 | # 8 | # This will help ensure the proper Jekyll version is running. 9 | # Happy Jekylling! 10 | gem "jekyll", "~> 4.3.2" 11 | # This is the default theme for new Jekyll sites. You may change this to anything you like. 12 | gem "minima", "~> 2.5" 13 | # If you want to use GitHub Pages, remove the "gem "jekyll"" above and 14 | # uncomment the line below. To upgrade, run `bundle update github-pages`. 15 | # gem "github-pages", group: :jekyll_plugins 16 | # If you have any plugins, put them here! 17 | group :jekyll_plugins do 18 | gem "jekyll-feed", "~> 0.12" 19 | end 20 | 21 | # Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem 22 | # and associated library. 23 | platforms :mingw, :x64_mingw, :mswin, :jruby do 24 | gem "tzinfo", ">= 1", "< 3" 25 | gem "tzinfo-data" 26 | end 27 | 28 | # Performance-booster for watching directories on Windows 29 | gem "wdm", "~> 0.1.1", :platforms => [:mingw, :x64_mingw, :mswin] 30 | 31 | # Lock `http_parser.rb` gem to `v0.6.x` on JRuby builds since newer versions of the gem 32 | # do not have a Java counterpart. 33 | gem "http_parser.rb", "~> 0.6.0", :platforms => [:jruby] 34 | -------------------------------------------------------------------------------- /Gemfile.lock: -------------------------------------------------------------------------------- 1 | GEM 2 | remote: https://rubygems.org/ 3 | specs: 4 | addressable (2.8.4) 5 | public_suffix (>= 2.0.2, < 6.0) 6 | colorator (1.1.0) 7 | concurrent-ruby (1.2.2) 8 | em-websocket (0.5.3) 9 | eventmachine (>= 0.12.9) 10 | http_parser.rb (~> 0) 11 | eventmachine (1.2.7) 12 | ffi (1.15.5) 13 | forwardable-extended (2.6.0) 14 | google-protobuf (3.23.1-x86_64-linux) 15 | http_parser.rb (0.8.0) 16 | i18n (1.13.0) 17 | concurrent-ruby (~> 1.0) 18 | jekyll (4.3.2) 19 | addressable (~> 2.4) 20 | colorator (~> 1.0) 21 | em-websocket (~> 0.5) 22 | i18n (~> 1.0) 23 | jekyll-sass-converter (>= 2.0, < 4.0) 24 | jekyll-watch (~> 2.0) 25 | kramdown (~> 2.3, >= 2.3.1) 26 | kramdown-parser-gfm (~> 1.0) 27 | liquid (~> 4.0) 28 | mercenary (>= 0.3.6, < 0.5) 29 | pathutil (~> 0.9) 30 | rouge (>= 3.0, < 5.0) 31 | safe_yaml (~> 1.0) 32 | terminal-table (>= 1.8, < 4.0) 33 | webrick (~> 1.7) 34 | jekyll-feed (0.17.0) 35 | jekyll (>= 3.7, < 5.0) 36 | jekyll-sass-converter (3.0.0) 37 | sass-embedded (~> 1.54) 38 | jekyll-seo-tag (2.8.0) 39 | jekyll (>= 3.8, < 5.0) 40 | jekyll-watch (2.2.1) 41 | listen (~> 3.0) 42 | kramdown (2.4.0) 43 | rexml 44 | kramdown-parser-gfm (1.1.0) 45 | kramdown (~> 2.0) 46 | liquid (4.0.4) 47 | listen (3.8.0) 48 | rb-fsevent (~> 0.10, >= 0.10.3) 49 | rb-inotify (~> 0.9, >= 0.9.10) 50 | mercenary (0.4.0) 51 | minima (2.5.1) 52 | jekyll (>= 3.5, < 5.0) 53 | jekyll-feed (~> 0.9) 54 | jekyll-seo-tag (~> 2.1) 55 | pathutil (0.16.2) 56 | forwardable-extended (~> 2.6) 57 | public_suffix (5.0.1) 58 | rb-fsevent (0.11.2) 59 | rb-inotify (0.10.1) 60 | ffi (~> 1.0) 61 | rexml (3.2.5) 62 | rouge (4.1.1) 63 | safe_yaml (1.0.5) 64 | sass-embedded (1.62.1-x86_64-linux-gnu) 65 | google-protobuf (~> 3.21) 66 | terminal-table (3.0.2) 67 | unicode-display_width (>= 1.1.1, < 3) 68 | unicode-display_width (2.4.2) 69 | webrick (1.8.1) 70 | 71 | PLATFORMS 72 | x86_64-linux 73 | 74 | DEPENDENCIES 75 | http_parser.rb (~> 0.6.0) 76 | jekyll (~> 4.3.2) 77 | jekyll-feed (~> 0.12) 78 | minima (~> 2.5) 79 | tzinfo (>= 1, < 3) 80 | tzinfo-data 81 | wdm (~> 0.1.1) 82 | 83 | BUNDLED WITH 84 | 2.4.10 85 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | # Welcome to Jekyll! 2 | # 3 | # This config file is meant for settings that affect your whole blog, values 4 | # which you are expected to set up once and rarely edit after that. If you find 5 | # yourself editing this file very often, consider using Jekyll's data files 6 | # feature for the data you need to update frequently. 7 | # 8 | # For technical reasons, this file is *NOT* reloaded automatically when you use 9 | # 'bundle exec jekyll serve'. If you change this file, please restart the server process. 10 | # 11 | # If you need help with YAML syntax, here are some quick references for you: 12 | # https://learn-the-web.algonquindesign.ca/topics/markdown-yaml-cheat-sheet/#yaml 13 | # https://learnxinyminutes.com/docs/yaml/ 14 | # 15 | # Site settings 16 | # These are used to personalize your new site. If you look in the HTML files, 17 | # you will see them accessed via {{ site.title }}, {{ site.email }}, and so on. 18 | # You can create any custom variable you would like, and they will be accessible 19 | # in the templates via {{ site.myvariable }}. 20 | 21 | title: ComfyUI blog 22 | # email: your-email@example.com 23 | description: >- # this means to ignore newlines until "baseurl:" 24 | A blog for ComfyUI related news. 25 | baseurl: "" # the subpath of your site, e.g. /blog 26 | url: "" # the base hostname & protocol for your site, e.g. http://example.com 27 | github_username: comfyanonymous 28 | 29 | # Build settings 30 | theme: minima 31 | plugins: 32 | - jekyll-feed 33 | 34 | # Exclude from processing. 35 | # The following items will not be processed, by default. 36 | # Any item listed under the `exclude:` key here will be automatically added to 37 | # the internal "default list". 38 | # 39 | # Excluded items can be processed by explicitly listing the directories or 40 | # their entries' file path in the `include:` list. 41 | # 42 | # exclude: 43 | # - .sass-cache/ 44 | # - .jekyll-cache/ 45 | # - gemfiles/ 46 | # - Gemfile 47 | # - Gemfile.lock 48 | # - node_modules/ 49 | # - vendor/bundle/ 50 | # - vendor/cache/ 51 | # - vendor/gems/ 52 | # - vendor/ruby/ 53 | -------------------------------------------------------------------------------- /_posts/2023-05-18-ComfyUi-is-4-months-old.markdown: -------------------------------------------------------------------------------- 1 | --- 2 | layout: post 3 | title: "ComfyUI is now 4 months old!" 4 | date: 2023-05-18 20:00:00 +0100 5 | categories: comfyui update 6 | --- 7 | 8 | The original goal of [ComfyUI](https://github.com/comfyanonymous/ComfyUI) was to create a powerful and flexible stable diffusion backend/interface. I think it's going pretty well. 9 | 10 | The reason I started writing ComfyUI is that I got a bit too addicted to generating images with Stable Diffusion. The other tools available started lacking the flexibility and power that I desired. I also wanted to learn the core functions of Stable Diffusion and how it works. I wanted to do better than everyone else especially in terms of software design. ComfyUI is far from perfect, but I feel it has matured over the months and is more stable and bug free than others. 11 | 12 | ![ComfyUI screenshot]({{site.baseurl}}/assets/comfyui.png) 13 | 14 | In only 4 months, thanks to everyone who has contributed, ComfyUI grew into an amazing piece of software that in many ways surpasses other stable diffusion graphical interfaces: in flexibility, base features, overall stability, and power it gives users to control the diffusion pipeline. 15 | 16 | It is also by far the easiest stable interface to install. If you have a nvidia card on windows all you have to do is download the [portable build on this page and read the very short instructions.](https://github.com/comfyanonymous/ComfyUI#installing) 17 | 18 | Then for information how to use it you can take a look at the [examples page](https://comfyanonymous.github.io/ComfyUI_examples/) 19 | 20 | I often see some questions about what ComfyUI has over most other UIs: 21 | 22 | - Saving your entire workflow is easy: The entire workflow is saved to every image you generate. This makes loading past workflows and sharing them super easy. 23 | 24 | - A real queue system built into the backend. Queue any combinations of workflows you want, go do something else and come back to all your generated images. 25 | 26 | - Important things like ControlNet and support for all the LoRa/Locon/LoHa/Lycoris/LoKr flavors are a core feature. 27 | 28 | - Much higher performance out of the box on most systems. Some easy to use [standalone packages with nightly pytorch for windows](https://github.com/comfyanonymous/ComfyUI/releases) for those who want to get max performance with the latest bleeding edge python3.11 + nightly pytorch2.1 cu121. 29 | 30 | - Writing custom nodes is extremely easy and they are automatically accessible through the api. 31 | 32 | - Supports a few things not implemented in other SD interfaces like [Noisy Latent Composition](https://comfyanonymous.github.io/ComfyUI_examples/noisy_latent_composition/), [GLiGEN](https://comfyanonymous.github.io/ComfyUI_examples/gligen/) and proper [unCLIP model](https://comfyanonymous.github.io/ComfyUI_examples/unclip/) support (supports using multiple images as input). 33 | 34 | 35 | There is still a lot to do especially when it comes to overall user experience and documentation but I think that the future is Comfy. 36 | 37 | My vision for the future is to keep ComfyUI updated with all the cool new things that come out, improve performance and make the software better. I and others in the ComfyUI community also have a few ideas of our own for some things that could improve Stable Diffusion for everyone so stay tuned. 38 | 39 | ## Third Party Resources 40 | 41 | To show off the ComfyUI community here's a list of some third party resources, alternative frontends and extensions. 42 | 43 | 44 | ### Information and workflows 45 | 46 | A lot of various information/workflows/etc... 47 | 48 | 49 | A list of Custom Nodes, Extensions, and Tools for ComfyUI 50 | 51 | 52 | Node Guide (under construction at the time of writing) 53 | 54 | 55 | ### Alternative Frontends 56 | 57 | In ComfyUI the backend and frontend are decoupled from each other which makes writing an alternative frontend relatively easy. ComfyUI is designed so backend extensions/custom nodes work with other frontends without issues. 58 | 59 | CushyStudio (control ComfyUI in visual studio code): 60 | 61 | ComfyBox (A more standard user interface): 62 | 63 | Integration for Blender: 64 | 65 | ComfyUI serves as the backend for the Stable Horde: 66 | 67 | Not really an alternative frontend but an extension to embed ComfyUI in the A1111 interface: 68 | 69 | 70 | ### Custom nodes/extensions 71 | 72 | ComfyUI is very extensible. If you want to explore the ecosystem, here are some repos that contain nodes/extensions. 73 | 74 | * Most of these repos have more than one custom node. 75 | * Keep in mind some are WIP. 76 | 77 | Extensive node suite with 100+ nodes for advanced workflows: 78 | 79 | 80 | Cutoff implementation: 81 | 82 | 83 | Tiled sampling (also called multidiffusion): 84 | 85 | 86 | Different implementations of how to do prompt weighting: 87 | 88 | 89 | Unsampling and other noise related tricks: 90 | 91 | 92 | Making the "conditioning set area" less of a pain to use: 93 | 94 | 95 | Conveniently enhance images through Detector, Detailer, Upscaler, Pipe, and more: 96 | 97 | 98 | WIP custom node manager: 99 | 100 | 101 | DeepFloyd IF in ComfyUI: 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | Civitai also has some: 172 | -------------------------------------------------------------------------------- /_posts/2023-05-23-Bislerp-slightly-better-latent-upscaling.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: post 3 | title: "Bislerp: Slightly Better Latent Upscaling with Slerp" 4 | categories: research 5 | --- 6 | 7 | I noticed slerp was commonly used for interpolating between latents to make animations with Stable Diffusion. Since it seemed to work for latent interpolation I decided to try to apply it to scaling latents. It's bilinear but with slerp instead of linear so it's bislerp. 8 | 9 | I know the math doesn't make that much sense but it seems to give some good results: 10 | 11 | Here is a non cherry picked example with a simple "hiresfix" or 2 pass type workflow with a 2x upscale with nearest-exact, bislerp and bilinear in that order (if you want to see the exact workflows you can open these images in [ComfyUI](https://github.com/comfyanonymous/ComfyUI): 12 | 13 |

14 | 15 | 16 | 17 |

18 | 19 | You'll notice that the image in the middle seems more alive. With bislerp the woman actually has some color in her and looks healthy. Bislerp is similar to bilinear but in my opinion produces better colors. From my testing it seems to produce consistently better results than the other simple latent upscaling methods. 20 | 21 | For how to use this on [ComfyUI](https://github.com/comfyanonymous/ComfyUI), make sure you are updated to the latest ComfyUI (update/update_comfyui.bat on the standalone, make sure you update even if you have freshly downloaded the standalone) and "bislerp" will be a valid option on the "Upscale Latent" node. You can load the images in this post in ComfyUI to get a workflows. 22 | 23 | For other interfaces my [currently very unoptimized function](https://github.com/comfyanonymous/ComfyUI/blob/c00bb1a0b78f0d2cf2e4ec2dd9ae7d61cb07a637/comfy/utils.py#L50) should be very easy to implement. 24 | 25 | Try it out and let me know what you think. 26 | -------------------------------------------------------------------------------- /_posts/2023-08-27-Weekly-update.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: post 3 | title: "ComfyUI Weekly Update: Better memory management, Control Loras, ReVision and T2I adapters for SDXL" 4 | categories: comfyui update 5 | --- 6 | 7 | I have decided to start posting weekly updates of what is new in ComfyUI. 8 | 9 | Here's what's new recently in [ComfyUI](https://github.com/comfyanonymous/ComfyUI): 10 | 11 | ### Better Memory Management 12 | 13 | ComfyUI will now try to keep weights in vram when possible. The default behavior before was to aggressively move things out of vram. This should make it use less regular ram and speed up overall gen times a bit. It makes it work better on free colab, computers with only 16GB ram and computers with high end GPUs with a lot of vram. 14 | 15 | Because of this improvement on my 3090 TI the generation times for the default ComfyUI workflow (512x512 batch size 1, 20 steps euler SD1.5) with the default ComfyUI settings went from 1.38 seconds to 1.03 seconds. 16 | 17 | ### Control Loras 18 | 19 | In case you missed it stability.ai released [Control Loras](https://huggingface.co/stabilityai/control-lora) for SDXL. In ComfyUI these are used exactly like [ControlNets](https://comfyanonymous.github.io/ComfyUI_examples/controlnet/) 20 | 21 | ### T2I adapters for SDXL 22 | 23 | TencentARC released their [T2I adapters for SDXL](https://huggingface.co/TencentARC/T2I-Adapter/tree/main/models_XL). These are also used exactly like ControlNets in ComfyUI. T2I adapters are faster and more efficient than controlnets but might give lower quality. You should definitively try them out if you care about generation speed. 24 | 25 | ### ReVision 26 | 27 | ReVision is high level concept mixing that only works on SDXL models. You can find workflows for it on the [SDXL examples page](https://comfyanonymous.github.io/ComfyUI_examples/sdxl/#revision) 28 | 29 | 30 | ### Some Cool Recent custom nodes (available in the [ComfyUI manager](https://github.com/ltdrdata/ComfyUI-Manager)) 31 | 32 | [IP adapter implementation for ComfyUI](https://github.com/laksjdjf/IPAdapter-ComfyUI): [IP adapter](https://github.com/tencent-ailab/IP-Adapter) for ComfyUI. 33 | 34 | [Neural network latent upscale](https://github.com/Ttl/ComfyUi_NNLatentUpscale): Better way of upscaling latents. 35 | 36 | -------------------------------------------------------------------------------- /_posts/2023-09-02-Weekly-update.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: post 3 | title: "ComfyUI Weekly Update: Faster VAE, Speed increases, Early inpaint models and more." 4 | categories: comfyui update 5 | --- 6 | 7 | Here's what's new recently in [ComfyUI](https://github.com/comfyanonymous/ComfyUI). 8 | 9 | [Here's the link to the previous update in case you missed it.]({{site.baseurl}}/comfyui/update/2023/08/27/Weekly-update.html) 10 | 11 | Along with the regular bug fixes what's new is: 12 | 13 | ### Faster VAE on Nvidia 3000 series and up 14 | 15 | The VAE is now run in bfloat16 by default on Nvidia 3000 series and up. This should reduce memory and improve speed for the VAE on these cards. People using other GPUs that don't natively support bfloat16 can run ComfyUI with `--fp16-vae` to get a similar speedup by running the VAE in float16 however this isn't the default because it can cause black images due to the values in the VAE overflowing (bfloat16 doesn't have that issue) which is why the default for other cards is full precision float32. 16 | 17 | ### Speed increases 18 | 19 | The CLIPVision model is now run on the GPU unless you have a low vram system which should speed up ReVision and other workflows that depend on it. The Canny preprocessor node is now also run on the GPU so it should be fast now. 20 | 21 | ### Support for SDXL inpaint models 22 | 23 | Huggingface has released an early [inpaint model based on SDXL](https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1). It is in huggingface format so to use it in ComfyUI, download [this file](https://huggingface.co/diffusers/stable-diffusion-xl-1.0-inpainting-0.1/blob/main/unet/diffusion_pytorch_model.fp16.safetensors) and put it in the ComfyUI/models/unet directory. Then you can use the advanced->loaders->UNETLoader node to load it. 24 | 25 | [Here's an example workflow](https://gist.github.com/comfyanonymous/6792ae5460c2cadb16d4bd60381c7e06). Note that I renamed diffusion_pytorch_model.fp16.safetensors to diffusers_sdxl_inpaint_0.1.safetensors to make things more clear. 26 | 27 | Note that in ComfyUI you can right click the Load image node and "Open in Mask Editor" to add or edit the mask for inpainting. You can also copy images from the save image to the load image node by right clicking the save image node and "Copy (clipspace)" and then right clicking the load image node and "Paste (clipspace)". 28 | 29 | ### UI changes 30 | 31 | Groups can now be used to mute or bypass multiple nodes at a time. [See the pull request for more information](https://github.com/comfyanonymous/ComfyUI/pull/1358). As a reminder bypassing a node (CTRL-B or right click->bypass) can be used to disable a node while keeping connections though the node intact. 32 | 33 | The middle mouse button can now be used to drag the canvas. 34 | 35 | The history is now displayed in reverse order to make it more easy to load the last processed workflows. 36 | 37 | ### Node changes 38 | 39 | New Node: mask->ImageColorToMask: convert a specific color in an image to a mask. 40 | 41 | VAEDecodeTiled and VAEEncodeTiled: you can now set the tile size. 42 | 43 | ### For custom node developers 44 | 45 | If your custom node broke this week it's probably because of [this commit](https://github.com/comfyanonymous/ComfyUI/commit/1c012d69afa8bd92a007a3e468e2a1f874365d39) 46 | 47 | ### Some Cool recent custom nodes 48 | 49 | [FABRIC for ComfyUI](https://github.com/ssitu/ComfyUI_fabric) 50 | 51 | [AnimateDiff for ComfyUI with ControlNet](https://github.com/Kosinkadink/ComfyUI-AnimateDiff/) 52 | 53 | -------------------------------------------------------------------------------- /_posts/2023-09-10-Weekly-update.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: post 3 | title: "ComfyUI Weekly Update: DAT upscale model support and more T2I adapters." 4 | categories: comfyui update 5 | --- 6 | 7 | Here's what's new recently in [ComfyUI](https://github.com/comfyanonymous/ComfyUI). 8 | 9 | [Here's the link to the previous update in case you missed it.]({{site.baseurl}}/comfyui/update/2023/09/02/Weekly-update.html) 10 | 11 | ### New upscale model supported: DAT 12 | 13 | [DAT](https://github.com/zhengchen1999/DAT) is a recent upscale model architecture that seems to give pretty decent results. That link contains a link to download some pretrained model files if you want to give it a try. 14 | 15 | Upscale models can be used [like this](https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/) in ComfyUI. 16 | 17 | ComfyUI borrows [code from chaiNNer](https://github.com/chaiNNer-org/chaiNNer/tree/main/backend/src/nodes/impl/pytorch/architecture) for the upscale models so thanks to their great work ComfyUI supports almost all the models that they support. Some interesting upscale models to try that you may be less familiar with are [HAT](https://github.com/XPixelGroup/HAT#how-to-test) and [Omni-SR](https://github.com/Francis0625/Omni-SR#preparation). 18 | 19 | 20 | ### Support for T2I adapters in diffusers format 21 | 22 | TencentARC and HuggingFace released [these T2I adapter model files.](https://huggingface.co/collections/TencentARC/t2i-adapter-sdxl-64fac9cbf393f30370eeb02f) ComfyUI has been updated to support this file format. As a reminder T2I adapters are used exactly like [ControlNets in ComfyUI](https://comfyanonymous.github.io/ComfyUI_examples/controlnet/). T2I adapters take much less processing power than controlnets but might give worse results. 23 | 24 | ### New nodes 25 | 26 | Conditioning (Set Area with Percentage): Set prompt areas for [Area Composition](https://comfyanonymous.github.io/ComfyUI_examples/area_composition/) with coordinates and sizes with values 0.0 to 1.0 instead of pixel values. 27 | 28 | ### UI changes 29 | 30 | You can now paste (CTRL-V) images onto the canvas and it will automatically create a LoadImage node. You can also paste (CTRL-V) images to a selected Load Image node. 31 | 32 | Less trailing zeros are now shown in the UI. 33 | 34 | Cancelling a generation now works in every progress bar. 35 | 36 | ### Notable Bug fixes 37 | 38 | If you run a publicly accessible instance of ComfyUI which I don't really recommend in the first place please update it to the latest version because a [security bug was fixed](https://github.com/comfyanonymous/ComfyUI/commit/d6d1a8998fa60da9265ea3e9db35d80441cac6fd). 39 | 40 | This does not affect regular local installs so if you use the standalone windows download for example this does not affect you at all. 41 | 42 | 43 | Big thanks to the one who found and reported the issue. 44 | 45 | 46 | ### Some Cool custom nodes 47 | 48 | [ControlNet-LLLite](https://github.com/kohya-ss/ControlNet-LLLite-ComfyUI): nodes to use [controlnet-lllite models](https://huggingface.co/kohya-ss/controlnet-lllite/tree/main) 49 | 50 | [ComfyUI Translation](https://github.com/AIGODLIKE/AIGODLIKE-ComfyUI-Translation): Translating ComfyUI to different languages like Chinese, Japanese and others. 51 | 52 | -------------------------------------------------------------------------------- /_posts/2023-09-17-Weekly-update.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: post 3 | title: "ComfyUI Weekly Update: New Model Merging nodes." 4 | categories: comfyui update 5 | --- 6 | 7 | Here's what's new recently in [ComfyUI](https://github.com/comfyanonymous/ComfyUI). 8 | 9 | [Here's the link to the previous update in case you missed it.]({{site.baseurl}}/comfyui/update/2023/09/10/Weekly-update.html) 10 | 11 | Not many new features this week but I'm working on a few things that are not yet ready for release. 12 | 13 | ### Model Merging 14 | 15 | There are two new model merging nodes: 16 | 17 | ModelSubtract: `(model1 - model2) * multiplier` 18 | 19 | ModelAdd: `model1 + model2` 20 | 21 | They can be used to replicate the "add difference" model merging function present in other UIs. 22 | 23 | As a reminder Models can be merged on the fly in ComfyUI in an extremely efficient way. [See this page for some examples including one with the new nodes.](https://comfyanonymous.github.io/ComfyUI_examples/model_merging/) 24 | 25 | ### New Sampler: DDPM 26 | 27 | Gives outputs that are extremely close to euler_ancestral. 28 | 29 | ### Some Cool new ComfyUI related projects 30 | 31 | [A simple prompt generator for ComfyUI utilizing ExLlama](https://github.com/Zuellni/ComfyUI-ExLlama-Nodes) 32 | 33 | [New Krita plugin that uses ComfyUI](https://github.com/Acly/krita-ai-diffusion) 34 | -------------------------------------------------------------------------------- /_posts/2023-09-24-Weekly-update.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: post 3 | title: "ComfyUI Weekly Update: Free Lunch and more." 4 | categories: comfyui update 5 | --- 6 | 7 | Here's what's new recently in [ComfyUI](https://github.com/comfyanonymous/ComfyUI). 8 | 9 | [Here's the link to the previous update in case you missed it.]({{site.baseurl}}/comfyui/update/2023/09/17/Weekly-update.html) 10 | 11 | ## New Nodes 12 | 13 | FreeU: An implementation of [FreeU: Free Lunch in Diffusion U-Net](https://github.com/ChenyangSi/FreeU). The node can be found in the _for_testing category. The default options on the node are the suggested settings for SD2.1. To decrease the chaos in the image/increase contrast increase b1 and b2 and decrease s1 and s2. b1/b2 have stronger effects than s1/s2. You should play around with the values and see what works the best for your favorite model and type of image. 14 | 15 | LatentAdd, LatentSubtract and LatentMultiply: These new nodes can be found in latent->advanced. They can be used to add the values of two latents together, subtract them or multiply the values of a latent by a number. Interestingly it looks like multiplying a latent by -1 can be used to invert the colors of an image in latent space. I implemented them more for experimental reasons than practical reasons. 16 | 17 | ## Updates 18 | 19 | Lora weights are now converted on the GPU when possible so they should apply faster. 20 | 21 | ImageScale related nodes can now use lanczos. 22 | 23 | Added support for controlnet models that are a different architecture from the base one. 24 | 25 | 26 | ## Some Cool ComfyUI related things 27 | 28 | [Dynamic Thresholding](https://github.com/mcmonkeyprojects/sd-dynamic-thresholding) now works on ComfyUI. 29 | 30 | [CushyStudio](https://github.com/rvion/CushyStudio) an interesting frontend for ComfyUI that lets you code workflows is back in active development. 31 | 32 | [ComfyUI is now on RunDiffusion](https://rundiffusion.com/comfyui-workflows), this is a paid service to run stable diffusion in the cloud but they seem to have put effort into supporting ComfyUI. 33 | 34 | -------------------------------------------------------------------------------- /_posts/2023-10-08-Weekly-update.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: post 3 | title: "ComfyUI Weekly Update: Pytorch 2.1, New Sampler nodes, Primitive node improvements." 4 | categories: comfyui update 5 | --- 6 | 7 | Here's what's new recently in [ComfyUI](https://github.com/comfyanonymous/ComfyUI). 8 | 9 | [Here's the link to the previous update in case you missed it.]({{site.baseurl}}/comfyui/update/2023/09/24/Weekly-update.html) 10 | 11 | ## Standalone Windows Package Updates 12 | 13 | The [standalone windows package](https://github.com/comfyanonymous/ComfyUI/releases) now uses python 3.11 with pytorch 2.1 cu121. Xformers has been removed. If you want to update to it you have to download a new version of the standalone. On my 3090 TI I get a 5-10% performance increase versus the old standalone. 14 | 15 | If you are happy with python 3.10 and pytorch cu118 with xformers you can continue using the update scripts in the update folder on the old standalone to keep ComfyUI up to date. 16 | 17 | To migrate from one standalone to another you can move the ComfyUI\models, ComfyUI\custom_nodes and ComfyUI\extra_model_paths.yaml (if you have one) to your new standalone. 18 | 19 | ## Primitive Node Improvements 20 | 21 | Primitive nodes can now be connected to different inputs even when the type does not match exactly. As a reminder the easiest way to use primitive nodes you can right click node -> convert to input -> double click on the new input. 22 | 23 | Since this is a major rework of how Primitive nodes work it seems to have broken some custom scripts so if you get errors make sure you update everything and if that doesn't fix it try disabling any custom scripts that are erroring out. You can see script errors by looking in your browser console or in settings->View Logs. 24 | 25 | ## New Nodes 26 | 27 | ### SamplerCustom 28 | 29 | This node and all the related ones can be found in sampling->custom_sampling. With this node it is now possible for samplers and schedulers to be their own node. This makes it much easier for people to write their own schedulers or samplers because they can now be written as standalone nodes. It also makes it easier to expose advanced options that are specific to a single sampler or scheduler by making a node for that specific sampler with those options exposed. 30 | 31 | There are new nodes like KarrasScheduler and SamplerDPMPP_SDE, these are examples of nodes that implement a single scheduler or sampler and expose some advanced options. 32 | 33 | The SamplerCustom has two different latent outputs: output and denoised_output, those correspond to the "return_with_leftover_noise" option on the advanced sampler. 34 | 35 | Here is the basic way of using the SamplerCustom node: 36 | 37 |

38 | 39 |

40 | 41 | 42 | ### Others 43 | 44 | PorterDuffImageComposite: Porter-Duff Image Composite node. 45 | 46 | SplitImageWithAlpha, JoinImageWithAlpha: nodes to add and remove an alpha channel from an image. Can be useful if you want to save an image with an alpha channel. 47 | 48 | ## Other Updates 49 | 50 | The mask nodes have been updated to handle batches of masks which makes them useful for animations. 51 | 52 | LatentUpscale and ImageScale now accept a width or height of zero if you want to scale images proportionally. 53 | 54 | 55 | ## Some Cool ComfyUI Related Projects 56 | 57 | [AnimateDiff For ComfyUI](https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved) and the related [Prompt Scheduling Nodes](https://github.com/FizzleDorf/ComfyUI_FizzNodes) now support SDXL. 58 | 59 | [A better IPAdapter implementation for ComfyUI](https://github.com/cubiq/ComfyUI_IPAdapter_plus), watch the [Youtube Video](https://www.youtube.com/watch?v=7m9ZZFU3HWo) for a detailed view of it. 60 | -------------------------------------------------------------------------------- /_posts/2023-10-30-Update.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: post 3 | title: "ComfyUI Update: SSD-1B, Hypertile, FreeU V2" 4 | categories: comfyui update 5 | --- 6 | 7 | Here's what's new recently in [ComfyUI](https://github.com/comfyanonymous/ComfyUI). 8 | 9 | [Here's the link to the previous update in case you missed it.]({{site.baseurl}}/comfyui/update/2023/10/08/Weekly-update.html) 10 | 11 | I have not written an update for a while and this is because most of the changes have been fixing bugs, improving memory usage/performance and cleaning up/refactoring (breaking) things to make it easier to implement future models from both the community and stability. 12 | 13 | 14 | ## SSD-1B support 15 | 16 | A model distilled from SDXL that claims to be "the same quality" as SDXL however they removed all the middle transformers which means consistency is going to be worse even if the "quality" is similar. It's an interesting model that you should give a try because the unet is asymmetric so I went ahead and implemented it. 17 | 18 | Download the checkpoint from [here](https://huggingface.co/segmind/SSD-1B/blob/main/SSD-1B.safetensors), put it in the models/checkpoints folder and use it like a regular checkpoint with the Load Checkpoint node. 19 | 20 | Here's a semi complex workflow using SSD-1B as a first pass model and WD1.5 as the second pass to generate the ComfyUI mascot in an autumn scenery: 21 |

22 | 23 |

24 | 25 | Yes this is a webp file and ComfyUI can load workflows from webp files now. A node to save them is coming soon. 26 | 27 | ## Hypertile node 28 | 29 | [Hypertile](https://github.com/tfernd/HyperTile) a method of tiling the self attention of the upper layers to speed up sampling at the cost of consistency has been implemented as _for_testing->HyperTile, max_depth controls which layers it is applied to, 0 meaning only the toppest layer, 1 meaning the top two layers and so on. 30 | 31 | ## FreeU V2 32 | 33 | The authors of [FreeU](https://github.com/ChenyangSi/FreeU) updated it, this update has been implemented as a FreeU_V2 node that can be found in _for_testing->FreeU_V2 34 | 35 | ## New Save nodes 36 | 37 | You can now save VAEs and CLIP models to standalone files with the VAESave and CLIPSave nodes. 38 | 39 | ## UI stuff 40 | 41 | Node templates can now be imported and exported. 42 | Saving a template: Select nodes -> right click canvas -> Save Selected as Template 43 | Using a template: Right click on canvas -> Node templates. 44 | 45 | Alt-C can now be used to collapse/uncollapse nodes. 46 | 47 | You can now add nodes to a group by: 48 | Select nodes -> right click on canvas -> Add selected nodes to group. 49 | 50 | 51 | -------------------------------------------------------------------------------- /_posts/2023-11-24-Update.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: post 3 | title: "ComfyUI Update: Stable Video Diffusion on 8GB vram with 25 frames and more." 4 | categories: comfyui update 5 | --- 6 | 7 | Here's what's new recently in [ComfyUI](https://github.com/comfyanonymous/ComfyUI). 8 | 9 | [Here's the link to the previous update in case you missed it.]({{site.baseurl}}/comfyui/update/2023/10/30/Update.html) 10 | 11 | ## Stable Video Diffusion 12 | 13 |

14 | 15 |

16 | 17 | 18 | ComfyUI now supports the new Stable Video Diffusion image to video model. With ComfyUI you can generate 1024x576 videos of 25 frames long on a GTX 1080 with 8GB vram. I can confirm that it also works on my AMD 6800XT with ROCm on Linux. 19 | 20 | For workflows and explanations how to use these models see: [the video examples page.](https://comfyanonymous.github.io/ComfyUI_examples/video/) 21 | 22 | If you want the workflow I used to generate the video above you can save it and drag it on ComfyUI. 23 | 24 | ## LCM 25 | 26 | LCM models are models that can be sampled in very few steps. Recently loras have been released to convert regular SDXL and SD1.x models to LCM. 27 | 28 | For how to use them in ComfyUI see: [LCM examples page.](https://comfyanonymous.github.io/ComfyUI_examples/lcm/) 29 | 30 | ## Kohya Deep Shrink 31 | 32 | The _for_testing->PatchModelAddDownscale node adds a downscale to the unet that can be scheduled so that it only happens during the first timesteps of the model. This lets you generate consistent images at higher resolutions without having to do a second pass. 33 | 34 | To use it add the node to your workflow and double the resolution you are generating at. 35 | 36 | ## Support for ZSNR V Prediction Models 37 | 38 | The new ModelSamplingDiscrete node lets you set a model as v_prediction zsnr to sample them properly. 39 | 40 | The new RescaleCFG node implements the rescale cfg algorithm in the zsnr paper and should also be used with these models to sample them properly. 41 | 42 | To use a ZSNR v_pred model use the regular checkpoint loader node to load it then chain the ModelSamplingDiscrete with v_pred and zsnr selected. You can then add the RescaleCFG node. 43 | 44 | ## Other 45 | 46 | The Load VAE node now supports TAESD. TAESD is a fast and small VAE implementation that is used for the high quality previews. If you have taesd_encoder and taesd_decoder or taesdxl_encoder and taesdxl_decoder in models/vae_approx the options "taesd" and "taesdxl" will show up on the Load VAE node. The reason I implemented it is because I thought it might go well with LCM for those that want maximum speed. 47 | 48 | SaveAnimatedWEBP: A node to save batches of images as an animated webp. These webp files contain metadata and can be loaded in ComfyUI to get the workflow. 49 | 50 | DOM element clipping: Nodes can now render on top of multi line text boxes. If your UI got slower you can disable it in the UI settings. 51 | 52 | The UI can now load workflows that are in api format. 53 | 54 | New color schemes in the UI: settings -> color palette 55 | 56 | RepeatImageBatch: for repeating a batch of images. 57 | 58 | ImageCrop: node to crop images. 59 | 60 | latent->advanced->LatentInterpolate: uses nlerp to interpolate between two different latents. 61 | 62 | heunpp2 sampler: A sampler that comes from [this repo](https://github.com/Carzit/sd-webui-samplers-scheduler). The paper mentions how using different samplers in a single generation can improve quality. Using different samplers for different steps has been natively supported in ComfyUI for a long time now by chaining advanced sampler nodes together. You can see a complex example of chaining different advanced samplers together on the [Noise Latent Composition Example page](https://comfyanonymous.github.io/ComfyUI_examples/noisy_latent_composition/) page. 63 | 64 | COMBO primitive nodes now have a filter so you can filter what they will increment or randomize to. 65 | 66 | The new FlipSigmas node can be used to flip the sigmas passed to the custom sampler node. It can be used to "unsample" an image. 67 | 68 | ## Cool ComfyUI related projects 69 | 70 | A very cool and novel UI that uses ComfyUI for its backend: [https://github.com/rvion/CushyStudio](https://github.com/rvion/CushyStudio) 71 | 72 | The Krita addon that everyone is talking about right now: [https://github.com/Acly/krita-ai-diffusion](https://github.com/Acly/krita-ai-diffusion) 73 | 74 | If stable video feels a bit lacking here's the best implementation of AnimateDiff: [https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved](https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved) 75 | 76 | -------------------------------------------------------------------------------- /_posts/2023-12-19-Update.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: post 3 | title: "ComfyUI Update: What's new from the last few weeks, SD Turbo, Stable Zero123, Group Nodes, FP8, and more." 4 | categories: comfyui update 5 | --- 6 | 7 | 8 | Here's what's new recently in [ComfyUI](https://github.com/comfyanonymous/ComfyUI). Since this is most likely going to be the final post this year I would like to give a big thanks to everyone who contribute, report bugs and to Stability AI. 9 | 10 | [Here's the link to the previous update in case you missed it.]({{site.baseurl}}/comfyui/update/2023/11/24/Update.html) 11 | 12 | ## SDXL Turbo 13 | 14 |

15 |

17 | 18 | The workflow for live prompting with SDXL Turbo can be found on the [SDXL Turbo examples page here.](https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/). There is also an SD2.1 turbo model that can be found [here](https://huggingface.co/stabilityai/sd-turbo/blob/main/sd_turbo.safetensors) that can be used in the same workflow if SDXL is too slow on your machine. 19 | 20 | ## Frontend Improvements 21 | 22 | Group nodes: You can now select multiple nodes and: Right Click -> Convert to Group Node, this allows you to combine multiple nodes into a single one. 23 | 24 | Undo Redo: CTRL-Z and CTRL-Y can now be used to undo and redo. 25 | 26 | Reroute nodes can now be used with Primitive Nodes 27 | 28 | ## Stable Zero123 29 | 30 | Stable Zero123 is a SD1.x model that can generate different views of an object. Given a photograph of an object with a simple color background it can generate images of what that object looks like from different angles. Normally this model is supposed to be used to generate 3d models from a photograph by generating a bunch of images at different angles then using photogrammetry to create a 3d model from those images. In ComfyUI I only implemented the diffusion model itself 31 | 32 | An example how to use it can be found on the [examples page here](https://comfyanonymous.github.io/ComfyUI_examples/3d/). 33 | 34 | ## FP8 support 35 | 36 | Pytorch supports storing two different fp8 formats: e4m3fn and e5m2. I only recommend using fp8 if you are running out of memory. 37 | 38 | `--fp8_e4m3fn-unet` or `--fp8_e5m2-unet` can be used to store the MODEL (unet) weights in memory in either of those fp8 formats. 39 | 40 | `--fp8_e4m3fn-text-enc` or `--fp8_e5m2-text-enc` can be used to store the CLIP (text encoder) weights in memory in either of those fp8 formats. 41 | 42 | fp8 has a slight performance penalty and makes generated images be lower quality but greatly reduces memory usage. I think e4m3fn looks best but I have seen [some people](https://github.com/comfyanonymous/ComfyUI/discussions/2180) prefer e5m2 so if you want to use fp8 try both and pick the one you like best. 43 | 44 | To have both CLIP and MODEL store the weights in in fp8 e4m3fn you would launch ComfyUI like: 45 | 46 | `python main.py --fp8_e4m3fn-text-enc --fp8_e4m3fn-unet` 47 | 48 | If you are using the standalone windows package copy the run_nvidia_gpu.bat to run_nvidia_fp8.bat, edit it with notepad, add those arguments to it and then use it to run ComfyUI 49 | 50 | 51 | 52 | ## Python 3.12 and Pytorch nightly 2.3 53 | 54 | For those who want the bleeding edge maximum performance the standalone package with nightly pytorch for Windows has been updated to Python 3.12 and Pytorch Nightly 2.3. It can be found on [the releases page](https://github.com/comfyanonymous/ComfyUI/releases). The regular standalone uses Pytorch 3.11 and Pytorch 2.1.2. 55 | 56 | ## Self Attention Guidance 57 | 58 | _for_testing->SelfAttentionGuidance implements Self Attention Guidance: Makes images sharper and more consistent at the cost of slower generation times. 59 | 60 | ## PerpNeg 61 | 62 | _for_testing->PerpNeg implements PerpNeg. This makes some images more consistent by making the negative prompt have a more precise effect at the expense of performance. To use it properly make sure "empty_conditioning" is connected to an empty CLIP Text Encode node. 63 | 64 | ## Segmind Vega Model 65 | 66 | To use it just download the [segmind vega checkpoint](https://huggingface.co/segmind/Segmind-Vega/blob/main/segmind-vega.safetensors) and put it in the ComfyUI/models/checkpoints/ folder. You can then use it like a regular SDXL checkpoint. 67 | 68 | The segmind vega LCM lora is also supported, to use it [download it from the official repo](https://huggingface.co/segmind/Segmind-VegaRT/blob/main/pytorch_lora_weights.safetensors), rename it and put it in ComfyUI/models/loras/ folder. You can then use the [LCM Lora example](https://comfyanonymous.github.io/ComfyUI_examples/lcm/) with the segmind vega LCM lora and the segmind vega checkpoint. You might have to lower the cfg to 1.2 to get good images. 69 | 70 | ## Other 71 | 72 | A SaveAnimatedPNG node that can save animated PNGs which is a format that works well with ffmpeg unlike animated webp. 73 | 74 | `--deterministic` can now be used to make pytorch try to use slower deterministic algorithms which might help reproducing images. 75 | 76 | `--gpu-only` now puts intermediate values in GPU memory instead of CPU memory. 77 | 78 | GLora files are now supported. 79 | 80 | ## Interesting ComfyUI projects 81 | 82 | ComfyUI in Blender: [https://github.com/AIGODLIKE/ComfyUI-BlenderAI-node](https://github.com/AIGODLIKE/ComfyUI-BlenderAI-node) 83 | 84 | PixArt, PixArt LCM for ComfyUI and more: [https://github.com/city96/ComfyUI_ExtraModels](https://github.com/city96/ComfyUI_ExtraModels) 85 | 86 | [ComfyUI Portrait Master Custom Node](https://github.com/florestefano1975/comfyui-portrait-master) 87 | 88 | [Marigold depth estimation in ComfyUI](https://github.com/kijai/ComfyUI-Marigold) 89 | 90 | [StyleAligned for ComfyUI](https://github.com/brianfitzgerald/style_aligned_comfy) 91 | 92 | [ComfyUI Stable Fast](https://github.com/gameltb/ComfyUI_stable_fast) 93 | 94 | [Sigmas Tools and The Golden Scheduler](https://github.com/Extraltodeus/sigmas_tools_and_the_golden_scheduler) 95 | -------------------------------------------------------------------------------- /_posts/2024-06-18-Next-Chapter.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: post 3 | title: "The next chapter for ComfyUI" 4 | categories: comfyui update 5 | --- 6 | 7 | As some of you already know, I have resigned from Stability AI and am starting a new chapter. I am partnering with mcmonkey4eva, Dr.Lt.Data, pythongossssss, robinken, and yoland68 to start [Comfy Org](https://comfy.org). We will continue to develop and improve ComfyUI with a lot more resources. 8 | 9 | As you might have noticed, I am a bit overwhelmed by everything so far. Going forward, the team will work on solving many of the issues around ComfyUI while we continue to focus on keeping ComfyUI on the cutting edge. 10 | 11 | Some of the main focuses: 12 | 13 | * The primary focus will be to develop ComfyUI to be the best free open source software project to inference AI models. 14 | 15 | * The focus will mainly be on image/video/audio models in that order, with the potential to add more modalities in the future. 16 | 17 | * The team will focus on making ComfyUI more comfortable to use. This includes iterating on the custom node registry and enforcing some basic standards to make custom nodes safer to install. 18 | 19 | I believe that true open source is the best way forward and hope to make ComfyUI succeed so well that it will inspire companies to join the open source effort. I personally believe that closed source AI is a dead end and a waste of time. 20 | 21 | Thank you, everyone, for supporting Comfy, for contributing, for writing custom nodes and for being part of the Comfy ecosystem. The future is truly Comfy. 22 | 23 | Be sure to check the Comfy Org blog for more updates: [https://blog.comfy.org/](https://blog.comfy.org/) 24 | 25 | # What's new in ComfyUI 26 | 27 | For those that missed them these are the major updates in ComfyUI in the last few weeks. 28 | 29 | ### SD3 support 30 | 31 | You can find basic examples on the page here: 32 | [https://comfyanonymous.github.io/ComfyUI_examples/sd3/](https://comfyanonymous.github.io/ComfyUI_examples/sd3/) 33 | 34 | ### Stable Audio Support 35 | 36 | I have not put an example for this one on an example page yet because there's still a few things left to do but it should work and if you want to give it a try you can find a workflow on: [https://gist.github.com/comfyanonymous/0e04181f7fd01301230adc106b691cc2](https://gist.github.com/comfyanonymous/0e04181f7fd01301230adc106b691cc2) 37 | 38 | ### TensorRT support 39 | 40 | Thanks to Nvidia there are now [TensorRT nodes for ComfyUI](https://github.com/comfyanonymous/ComfyUI_TensorRT), these can be used to compile models to TensorRT engine files to get a massive speed boost during inference. 41 | 42 | 43 | -------------------------------------------------------------------------------- /about.markdown: -------------------------------------------------------------------------------- 1 | --- 2 | layout: page 3 | title: About 4 | permalink: /about/ 5 | --- 6 | 7 | This is a blog for everything related to [ComfyUI](https://github.com/comfyanonymous/ComfyUI) 8 | -------------------------------------------------------------------------------- /assets/b_bilinear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/comfyanonymous/ComfyUI_Blog/30d9ddb4e3d46002329b784a5057bcfa175baaf1/assets/b_bilinear.png -------------------------------------------------------------------------------- /assets/b_bislerp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/comfyanonymous/ComfyUI_Blog/30d9ddb4e3d46002329b784a5057bcfa175baaf1/assets/b_bislerp.png -------------------------------------------------------------------------------- /assets/b_nearest_exact.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/comfyanonymous/ComfyUI_Blog/30d9ddb4e3d46002329b784a5057bcfa175baaf1/assets/b_nearest_exact.png -------------------------------------------------------------------------------- /assets/comfyui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/comfyanonymous/ComfyUI_Blog/30d9ddb4e3d46002329b784a5057bcfa175baaf1/assets/comfyui.png -------------------------------------------------------------------------------- /assets/custom_sampler.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/comfyanonymous/ComfyUI_Blog/30d9ddb4e3d46002329b784a5057bcfa175baaf1/assets/custom_sampler.png -------------------------------------------------------------------------------- /assets/img2vid.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/comfyanonymous/ComfyUI_Blog/30d9ddb4e3d46002329b784a5057bcfa175baaf1/assets/img2vid.webp -------------------------------------------------------------------------------- /assets/sdxl_turbo.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/comfyanonymous/ComfyUI_Blog/30d9ddb4e3d46002329b784a5057bcfa175baaf1/assets/sdxl_turbo.webm -------------------------------------------------------------------------------- /assets/ssd1b.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/comfyanonymous/ComfyUI_Blog/30d9ddb4e3d46002329b784a5057bcfa175baaf1/assets/ssd1b.webp -------------------------------------------------------------------------------- /index.markdown: -------------------------------------------------------------------------------- 1 | --- 2 | # Feel free to add content and custom Front Matter to this file. 3 | # To modify the layout, see https://jekyllrb.com/docs/themes/#overriding-theme-defaults 4 | 5 | layout: home 6 | --- 7 | --------------------------------------------------------------------------------