├── hakuimg ├── __init__.py ├── blur.py ├── custom_exif.py ├── color │ ├── sharpness.py │ ├── noise.py │ ├── exposure_offset.py │ ├── vignette.py │ ├── hdr.py │ └── __init__.py ├── flip.py ├── tilt_shift │ ├── __init__.py │ └── utils.py ├── image_preprocess.py ├── lens_distortion.py ├── chromatic │ ├── __init__.py │ └── kromo.py ├── pre_resize.py ├── outline_expansion.py ├── neon.py ├── dither.py ├── sketch.py ├── pixeloe.py ├── curve.py ├── blend.py └── pixel.py ├── inoutpaint ├── __init__.py ├── utils.py └── main.py ├── .gitignore ├── assets ├── Blur.jpg ├── Flip.jpg ├── Glow.jpg ├── Color.jpg ├── Curve.jpg ├── HakuImg.jpg ├── PixelOE.jpg ├── Sketch.jpg ├── Chromatic.jpg ├── Pixelize.jpg ├── PreResize.jpg ├── TiltShift.jpg ├── BlendImage.jpg ├── CustomExif.jpg ├── InOutPaint.jpg ├── LenDistortion.jpg └── OutlineExpansion.jpg ├── example_workflows ├── Blur.jpg ├── Color.jpg ├── Curve.jpg ├── Flip.jpg ├── Glow.jpg ├── Sketch.jpg ├── PixelOE.jpg ├── Pixelize.jpg ├── BlendImage.jpg ├── Chromatic.jpg ├── CustomExif.jpg ├── InOutPaint.jpg ├── PreResize.jpg ├── TiltShift.jpg ├── LenDistortion.jpg ├── OutlineExpansion.jpg ├── Blur.json ├── Flip.json ├── LenDistortion.json ├── TiltShift.json ├── Chromatic.json ├── Glow.json ├── Pixelize.json ├── InOutPaint.json ├── Sketch.json ├── Color.json ├── PreResize.json ├── OutlineExpansion.json ├── Curve.json ├── CustomExif.json ├── PixelOE.json └── BlendImage.json ├── requirements.txt ├── pyproject.toml ├── .github └── workflows │ └── publish.yml ├── nodes ├── pre_resize.py ├── outline_expansion.py ├── flip.py ├── custom_exif.py ├── blur.py ├── chromatic.py ├── tilt_shift.py ├── lens_distortion.py ├── neon.py ├── pixeloe_.py ├── inoutpaint.py ├── save_image.py ├── pixelize.py ├── sketch.py ├── color.py ├── blend.py └── curve.py ├── utils ├── logging.py ├── utils.py └── check.py ├── __init__.py ├── README-zh.md ├── README.md ├── js └── widgethider.js └── LICENSE /hakuimg/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /inoutpaint/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | .idea 3 | venv 4 | __pycache__ 5 | node.zip -------------------------------------------------------------------------------- /assets/Blur.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/Blur.jpg -------------------------------------------------------------------------------- /assets/Flip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/Flip.jpg -------------------------------------------------------------------------------- /assets/Glow.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/Glow.jpg -------------------------------------------------------------------------------- /assets/Color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/Color.jpg -------------------------------------------------------------------------------- /assets/Curve.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/Curve.jpg -------------------------------------------------------------------------------- /assets/HakuImg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/HakuImg.jpg -------------------------------------------------------------------------------- /assets/PixelOE.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/PixelOE.jpg -------------------------------------------------------------------------------- /assets/Sketch.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/Sketch.jpg -------------------------------------------------------------------------------- /assets/Chromatic.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/Chromatic.jpg -------------------------------------------------------------------------------- /assets/Pixelize.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/Pixelize.jpg -------------------------------------------------------------------------------- /assets/PreResize.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/PreResize.jpg -------------------------------------------------------------------------------- /assets/TiltShift.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/TiltShift.jpg -------------------------------------------------------------------------------- /assets/BlendImage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/BlendImage.jpg -------------------------------------------------------------------------------- /assets/CustomExif.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/CustomExif.jpg -------------------------------------------------------------------------------- /assets/InOutPaint.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/InOutPaint.jpg -------------------------------------------------------------------------------- /assets/LenDistortion.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/LenDistortion.jpg -------------------------------------------------------------------------------- /assets/OutlineExpansion.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/assets/OutlineExpansion.jpg -------------------------------------------------------------------------------- /example_workflows/Blur.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/Blur.jpg -------------------------------------------------------------------------------- /example_workflows/Color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/Color.jpg -------------------------------------------------------------------------------- /example_workflows/Curve.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/Curve.jpg -------------------------------------------------------------------------------- /example_workflows/Flip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/Flip.jpg -------------------------------------------------------------------------------- /example_workflows/Glow.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/Glow.jpg -------------------------------------------------------------------------------- /example_workflows/Sketch.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/Sketch.jpg -------------------------------------------------------------------------------- /example_workflows/PixelOE.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/PixelOE.jpg -------------------------------------------------------------------------------- /example_workflows/Pixelize.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/Pixelize.jpg -------------------------------------------------------------------------------- /example_workflows/BlendImage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/BlendImage.jpg -------------------------------------------------------------------------------- /example_workflows/Chromatic.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/Chromatic.jpg -------------------------------------------------------------------------------- /example_workflows/CustomExif.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/CustomExif.jpg -------------------------------------------------------------------------------- /example_workflows/InOutPaint.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/InOutPaint.jpg -------------------------------------------------------------------------------- /example_workflows/PreResize.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/PreResize.jpg -------------------------------------------------------------------------------- /example_workflows/TiltShift.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/TiltShift.jpg -------------------------------------------------------------------------------- /example_workflows/LenDistortion.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/LenDistortion.jpg -------------------------------------------------------------------------------- /example_workflows/OutlineExpansion.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/licyk/ComfyUI-HakuImg/HEAD/example_workflows/OutlineExpansion.jpg -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch 2 | torchvision 3 | pillow 4 | numpy 5 | opencv-python 6 | scipy 7 | matplotlib 8 | blendmodes 9 | pixeloe>=0.1.4 -------------------------------------------------------------------------------- /hakuimg/blur.py: -------------------------------------------------------------------------------- 1 | from PIL import Image, ImageFilter 2 | 3 | 4 | def run(img, img_blur): 5 | blur = ImageFilter.GaussianBlur(img_blur) 6 | return img.filter(blur) 7 | -------------------------------------------------------------------------------- /hakuimg/custom_exif.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | 3 | 4 | def run(img, text): 5 | if not text: 6 | return img 7 | 8 | img.info["parameters"] = text 9 | return img 10 | -------------------------------------------------------------------------------- /hakuimg/color/sharpness.py: -------------------------------------------------------------------------------- 1 | from PIL import ImageEnhance 2 | 3 | 4 | def get_sharpness(img, value): 5 | if value <= 0: 6 | return img 7 | 8 | return ImageEnhance.Sharpness(img).enhance((value + 1) * 1.5) 9 | -------------------------------------------------------------------------------- /hakuimg/color/noise.py: -------------------------------------------------------------------------------- 1 | from PIL import ImageChops, Image 2 | import numpy as np 3 | 4 | 5 | def get_noise(img, value): 6 | if value <= 0: 7 | return img 8 | 9 | noise = np.random.randint(0, value * 100, img.size, np.uint8) 10 | noise_img = Image.fromarray(noise, "L").resize(img.size).convert(img.mode) 11 | return ImageChops.add(img, noise_img) 12 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui-hakuimg" 3 | description = "Image processing tool for ComfyUI." 4 | version = "1.0.6" 5 | license = { text = "Apache-2.0 license " } 6 | 7 | [project.urls] 8 | Repository = "https://github.com/licyk/ComfyUI-HakuImg" 9 | # Used by Comfy Registry https://comfyregistry.org 10 | 11 | [tool.comfy] 12 | PublisherId = "licyk" 13 | DisplayName = "ComfyUI-HakuImg" 14 | Icon = "" 15 | -------------------------------------------------------------------------------- /hakuimg/color/exposure_offset.py: -------------------------------------------------------------------------------- 1 | from PIL import ImageEnhance, Image 2 | import numpy as np 3 | 4 | 5 | def get_exposure_offset(img, value, brightness_value): 6 | if value <= 0: 7 | return img 8 | 9 | np_img = np.array(img).astype(float) + value * 75 10 | np_img = np.clip(np_img, 0, 255).astype(np.uint8) 11 | img = Image.fromarray(np_img) 12 | return ImageEnhance.Brightness(img).enhance((brightness_value + 1) - value / 4) 13 | -------------------------------------------------------------------------------- /hakuimg/flip.py: -------------------------------------------------------------------------------- 1 | import enum 2 | 3 | import numpy as np 4 | from PIL import Image 5 | 6 | 7 | class Axis(str, enum.Enum): 8 | VERTICAL = "vertical" 9 | HORIZONTAL = "horizontal" 10 | 11 | 12 | def run(pil_img, axis): 13 | np_img = np.array(pil_img) 14 | if axis == Axis.VERTICAL: 15 | np_img = np.flipud(np_img) 16 | elif axis == Axis.HORIZONTAL: 17 | np_img = np.fliplr(np_img) 18 | 19 | return Image.fromarray(np_img) 20 | -------------------------------------------------------------------------------- /hakuimg/tilt_shift/__init__.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from PIL import Image 4 | 5 | from .utils import tilt_shift 6 | 7 | 8 | def run(pil_img, focus_ratio: float, dof: int): 9 | focus_ratio += 5 10 | 11 | np_img = np.array(pil_img) 12 | height = np_img.shape[0] 13 | 14 | focus_height = round(height * (focus_ratio / 10)) 15 | np_img = tilt_shift(np_img, dof=dof, focus_height=focus_height) 16 | 17 | return Image.fromarray(np_img) 18 | -------------------------------------------------------------------------------- /hakuimg/image_preprocess.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def image_preprocess(img: torch.Tensor, device: str): 5 | if img.ndim == 3: 6 | img = img.unsqueeze(0) 7 | if img.size(3) <= 4: 8 | img = img.permute(0, 3, 1, 2) 9 | use_channel_last = True 10 | if img.size(1) == 4: 11 | img = img[:, :3] 12 | org_device = img.device 13 | if device != "default": 14 | img = img.to(device) 15 | return img, use_channel_last, org_device 16 | -------------------------------------------------------------------------------- /inoutpaint/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import numpy as np 4 | import cv2 5 | from PIL import Image 6 | 7 | 8 | def resize_with_mask( 9 | img: Image.Image, w, h, t, b, l, r, 10 | ) -> tuple[Image.Image, Image.Image]: 11 | new_img = Image.new("RGB", (w, h)) 12 | new_img.paste(img, (l, t)) 13 | 14 | mask = Image.new("L", (w, h), 255) 15 | mask_none = Image.new("L", (r-l, b-t), 0) 16 | mask.paste(mask_none, (l, t)) 17 | return new_img, mask 18 | -------------------------------------------------------------------------------- /hakuimg/color/vignette.py: -------------------------------------------------------------------------------- 1 | from PIL import ImageDraw, Image, ImageFilter 2 | 3 | 4 | def get_vignette(img, value): 5 | if value <= 0: 6 | return img 7 | 8 | width, height = img.size 9 | mask = Image.new("L", (width, height), 0) 10 | draw = ImageDraw.Draw(mask) 11 | padding = 100 - value * 100 12 | draw.ellipse((-padding, -padding, width + padding, height + padding), fill=255) 13 | mask = mask.filter(ImageFilter.GaussianBlur(radius=100)) 14 | return Image.composite(img, Image.new("RGB", img.size, "black"), mask) 15 | -------------------------------------------------------------------------------- /hakuimg/lens_distortion.py: -------------------------------------------------------------------------------- 1 | import enum 2 | 3 | import cv2 4 | import numpy as np 5 | from PIL import Image 6 | 7 | 8 | def run(pil_img, k1, k2): 9 | np_img = np.array(pil_img) 10 | height, width = np_img.shape[:2] 11 | focal_length = width 12 | center_x = width / 2 13 | center_y = height / 2 14 | 15 | K = np.array( 16 | [[focal_length, 0, center_x], [0, focal_length, center_y], [0, 0, 1]], 17 | dtype=np.float64, 18 | ) 19 | D = np.array([k1, k2, 0, 0], dtype=np.float64) 20 | img = cv2.fisheye.undistortImage(np_img, K, D, Knew=K) 21 | 22 | return Image.fromarray(img) 23 | -------------------------------------------------------------------------------- /hakuimg/chromatic/__init__.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | 3 | from .kromo import add_chromatic 4 | 5 | 6 | def run(pil_img, strength, blur=False): 7 | if strength <= 0: 8 | return pil_img 9 | 10 | img = pil_img 11 | 12 | if img.size[0] % 2 == 0 or img.size[1] % 2 == 0: 13 | if img.size[0] % 2 == 0: 14 | img = img.crop((0, 0, img.size[0] - 1, img.size[1])) 15 | img.load() 16 | if img.size[1] % 2 == 0: 17 | img = img.crop((0, 0, img.size[0], img.size[1] - 1)) 18 | img.load() 19 | 20 | img = add_chromatic(img, strength + 0.12, not blur) 21 | return img 22 | -------------------------------------------------------------------------------- /hakuimg/pre_resize.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torchvision.transforms.functional import to_pil_image 3 | from pixeloe.torch.utils import pre_resize 4 | from .image_preprocess import image_preprocess 5 | 6 | 7 | def run( 8 | img: torch.Tensor, 9 | target_pixels: int, 10 | pixel_size: int, 11 | device: str, 12 | ): 13 | img, use_channel_last, org_device = image_preprocess(img, device) 14 | img = to_pil_image(img[0]) 15 | img = pre_resize(img, target_size=target_pixels, patch_size=pixel_size) 16 | img = img.to(org_device) 17 | if use_channel_last: 18 | img = img.permute(0, 2, 3, 1) 19 | return (img,) 20 | -------------------------------------------------------------------------------- /hakuimg/outline_expansion.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .image_preprocess import image_preprocess 3 | from pixeloe.torch.outline import outline_expansion 4 | 5 | 6 | def run( 7 | img: torch.Tensor, 8 | pixel_size: int, 9 | thickness: int, 10 | device: str, 11 | ): 12 | img, use_channel_last, org_device = image_preprocess(img, device) 13 | oe_image, oe_weight = outline_expansion(img, thickness, thickness, pixel_size) 14 | oe_image = oe_image.to(org_device) 15 | oe_weight = oe_weight.to(org_device).repeat(1, 3, 1, 1) 16 | if use_channel_last: 17 | oe_image = oe_image.permute(0, 2, 3, 1) 18 | oe_weight = oe_weight.permute(0, 2, 3, 1) 19 | return oe_image, oe_weight 20 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | - master 8 | paths: 9 | - "pyproject.toml" 10 | 11 | permissions: 12 | issues: write 13 | 14 | jobs: 15 | publish-node: 16 | name: Publish Custom Node to registry 17 | runs-on: ubuntu-latest 18 | if: ${{ github.repository_owner == 'licyk' }} 19 | steps: 20 | - name: Check out code 21 | uses: actions/checkout@v4 22 | - name: Publish Custom Node 23 | uses: Comfy-Org/publish-node-action@v1 24 | with: 25 | ## Add your own personal access token to your Github Repository secrets and reference it here. 26 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} 27 | -------------------------------------------------------------------------------- /hakuimg/neon.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from PIL import Image 3 | import numpy as np 4 | import scipy as sp 5 | 6 | # from hakuimg.blend import Blend 7 | from .blend import Blend 8 | 9 | 10 | def run(pil_img, blur, strength, mode="BS"): 11 | img = np.array(pil_img) 12 | img = img / 255 13 | 14 | if mode == "BS": 15 | img_blur = cv2.GaussianBlur(img, (0, 0), blur) 16 | img_glow = Blend.screen(img_blur, img, strength) 17 | elif mode == "BMBL": 18 | img_blur = cv2.GaussianBlur(img, (0, 0), blur) 19 | img_mul = Blend.multiply(img_blur, img) 20 | img_mul_blur = cv2.GaussianBlur(img_mul, (0, 0), blur) 21 | img_glow = Blend.lighten(img_mul_blur, img, strength) 22 | else: 23 | raise NotImplementedError 24 | 25 | return (img_glow * 255).astype(np.uint8) 26 | -------------------------------------------------------------------------------- /hakuimg/dither.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from itertools import product 4 | 5 | from PIL import Image 6 | import numpy as np 7 | 8 | 9 | def dithering(img: Image, find_new_color): 10 | img = np.array(img) 11 | d_h, d_w, c = img.shape 12 | new_res = np.array(img, dtype=np.float32) / 255 13 | for i, j in product(range(d_h), range(d_w)): 14 | old_val = new_res[i, j].copy() 15 | new_val = find_new_color(old_val) 16 | new_res[i, j] = new_val 17 | err = old_val - new_val 18 | 19 | if j < d_w - 1: 20 | new_res[i, j + 1] += err * 7 / 16 21 | if i < d_h - 1: 22 | new_res[i + 1, j] += err * 5 / 16 23 | if j > 0: 24 | new_res[i + 1, j - 1] += err * 3 / 16 25 | if j < d_w - 1: 26 | new_res[i + 1, j + 1] += err * 1 / 16 27 | return np.clip(new_res / np.max(new_res, axis=(0, 1)) * 255, 0, 255) 28 | -------------------------------------------------------------------------------- /nodes/pre_resize.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from ..hakuimg.pre_resize import run 3 | 4 | 5 | class PreResize: 6 | INPUT_TYPES = lambda: { 7 | "required": { 8 | "img": ("IMAGE",), 9 | "target_pixels": ("INT", {"default": 256, "min": 1, "max": 1024}), 10 | "pixel_size": ("INT", {"default": 4, "min": 1, "max": 32}), 11 | "device": (["default", "cpu", "cuda", "mps"],), 12 | }, 13 | } 14 | RETURN_TYPES = ("IMAGE",) 15 | RETURN_NAMES = ("img",) 16 | FUNCTION = "execute" 17 | CATEGORY = "image/HakuImg" 18 | 19 | def execute( 20 | self, 21 | img: torch.Tensor, 22 | target_pixels: int, 23 | pixel_size: int, 24 | device: str, 25 | ): 26 | img = run( 27 | img=img, 28 | target_pixels=target_pixels, 29 | pixel_size=pixel_size, 30 | device=device, 31 | ) 32 | 33 | return img 34 | -------------------------------------------------------------------------------- /nodes/outline_expansion.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from ..hakuimg.outline_expansion import run 3 | 4 | 5 | class OutlineExpansion: 6 | INPUT_TYPES = lambda: { 7 | "required": { 8 | "img": ("IMAGE",), 9 | "pixel_size": ("INT", {"default": 4, "min": 1, "max": 32}), 10 | "thickness": ("INT", {"default": 3, "min": 1, "max": 6}), 11 | "device": (["default", "cpu", "cuda", "mps"],), 12 | }, 13 | } 14 | RETURN_TYPES = ("IMAGE", "IMAGE") 15 | RETURN_NAMES = ( 16 | "oe_image", 17 | "oe_weight", 18 | ) 19 | FUNCTION = "execute" 20 | CATEGORY = "image/HakuImg" 21 | 22 | def execute( 23 | self, 24 | img: torch.Tensor, 25 | pixel_size: int, 26 | thickness: int, 27 | device: str, 28 | ): 29 | oe_image, oe_weight = run( 30 | img=img, 31 | pixel_size=pixel_size, 32 | thickness=thickness, 33 | device=device, 34 | ) 35 | 36 | return oe_image, oe_weight -------------------------------------------------------------------------------- /inoutpaint/main.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import numpy as np 4 | import cv2 5 | from PIL import Image 6 | 7 | try: 8 | from .utils import * 9 | except: 10 | from utils import * 11 | 12 | 13 | HTML_TEMPLATES = {"resolution": """