├── .gitattributes ├── workflows ├── workflow_comparison.png └── workflow_image_autotone.png ├── .github └── workflows │ └── publish_action.yml ├── pyproject.toml ├── README.md └── __init__.py /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /workflows/workflow_comparison.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SparknightLLC/ComfyUI-ImageAutotone/HEAD/workflows/workflow_comparison.png -------------------------------------------------------------------------------- /workflows/workflow_image_autotone.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SparknightLLC/ComfyUI-ImageAutotone/HEAD/workflows/workflow_image_autotone.png -------------------------------------------------------------------------------- /.github/workflows/publish_action.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - "pyproject.toml" 9 | 10 | permissions: 11 | issues: write 12 | 13 | jobs: 14 | publish-node: 15 | name: Publish Custom Node to registry 16 | runs-on: ubuntu-latest 17 | if: ${{ github.repository_owner == 'SparknightLLC' }} 18 | steps: 19 | - name: Check out code 20 | uses: actions/checkout@v4 21 | - name: Publish Custom Node 22 | uses: Comfy-Org/publish-node-action@v1 23 | with: 24 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} ## Add your own personal access token to your Github Repository secrets and reference it here. 25 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # pyproject.toml 2 | [project] 3 | name = "comfyui-imageautotone" # Unique identifier for your node. Immutable after creation. 4 | description = "A node for ComfyUI that takes an input image and clips the color channels independently to increase contrast and alter color cast. This is a reinterpretation of PhotoShop's 'Auto Tone' algorithm." 5 | version = "0.0.2" # Custom Node version. Must be semantically versioned. 6 | license = { file = "LICENSE.txt" } 7 | dependencies = [] # Filled in from requirements.txt 8 | 9 | [project.urls] 10 | Repository = "https://github.com/SparknightLLC/ComfyUI-ImageAutotone" 11 | 12 | [tool.comfy] 13 | PublisherId = "sparknight" # TODO (fill in Publisher ID from Comfy Registry Website). 14 | DisplayName = "ComfyUI-ImageAutotone" # Display name for the Custom Node. Can be changed later. 15 | Icon = "https://example.com/icon.png" # SVG, PNG, JPG or GIF (MAX. 800x400px) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI-ImageAutotone 2 | 3 | A node for [ComfyUI](https://github.com/comfyanonymous/ComfyUI) that takes an input image and clips the color channels independently to increase contrast and alter color cast. This is a reinterpretation of PhotoShop's "Auto Tone" algorithm. 4 | 5 | Special thank you to Gerald Bakker for his writeup on Adobe's various algorithms: 6 | 7 | - https://geraldbakker.nl/psnumbers/auto-options.html 8 | 9 | ![workflow_image_autotone](workflows/workflow_image_autotone.png) 10 | 11 | ### Installation 12 | 13 | Simply drag the image above into ComfyUI and use [ComfyUI Manager » Install Missing Custom Nodes](https://github.com/ltdrdata/ComfyUI-Manager). 14 | 15 | ### Inputs 16 | 17 | - `image`: The input image(s) to process with the autotone algorithm 18 | - `shadows`: The color that defines the shadows of the image. This should be a comma-separated RGB value (e.g., '0,0,0' for black) or HEX string (e.g. '#000000'). 19 | - `highlights`: The color that defines the highlights in the image. This should be a comma-separated RGB value (e.g., '255,255,255' for white) or HEX string (e.g. '#FFFFFF'). 20 | - `shadow_clip`: The percentage of pixels to clip from the shadows. This is a value between 0 and 1. 21 | - `highlight_clip`: The percentage of pixels to clip from the highlights. This is a value between 0 and 1. 22 | 23 | ### Bonus Comparison 24 | 25 | Below is a workflow that demonstrates the difference between Image Autotone and a few other image contrast nodes on the ComfyUI Registry. 26 | 27 | ![workflow_comparison](workflows/workflow_comparison.png) 28 | 29 | --- 30 | 31 | This node was adapted from the `[image_edit]` shortcode of [Unprompted](https://github.com/ThereforeGames/unprompted), my Automatic1111 extension. -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | 5 | class ImageAutotone: 6 | 7 | @classmethod 8 | def INPUT_TYPES(s): 9 | return { 10 | "required": { 11 | "image": ("IMAGE", ), 12 | "shadows": ("STRING", { 13 | "default": "0,0,0", 14 | "tooltip": "The color to use for the shadows in the image. This should be a comma-separated RGB value (e.g., '0,0,0' for black) or HEX string (e.g. '#000000')." 15 | }), 16 | "highlights": ("STRING", { 17 | "default": "255,255,255", 18 | "tooltip": "The color to use for the highlights in the image. This should be a comma-separated RGB value (e.g., '255,255,255' for white) or HEX string (e.g. '#FFFFFF')." 19 | }), 20 | "shadow_clip": ("FLOAT", { 21 | "default": 0.001, 22 | "min": 0.0, 23 | "max": 1.0, 24 | "step": 0.001, 25 | "tooltip": "The percentage of pixels to clip from the shadows. This is a value between 0 and 1." 26 | }), 27 | "highlight_clip": ("FLOAT", { 28 | "default": 0.001, 29 | "min": 0.0, 30 | "max": 1.0, 31 | "step": 0.001, 32 | "tooltip": "The percentage of pixels to clip from the highlights. This is a value between 0 and 1." 33 | }), 34 | } 35 | } 36 | 37 | RETURN_TYPES = ("IMAGE", ) 38 | FUNCTION = "op" 39 | CATEGORY = "image" 40 | DESCRIPTION = """Clip color channels independently to increase contrast and alter color cast. This is a reinterpretation of Photoshop's "Auto Tone" algorithm.""" 41 | 42 | # Thank you to Gerald Bakker for the following writeup on the algorithm: 43 | # https://geraldbakker.nl/psnumbers/auto-options.html 44 | 45 | def op(self, image, highlights, shadows, shadow_clip, highlight_clip): 46 | 47 | def str_to_rgb(color_string): 48 | """Converts a color string to a tuple of RGB values""" 49 | if color_string[0].isdigit(): 50 | return tuple(map(int, color_string.split(","))) 51 | elif color_string.startswith("#"): 52 | return bytes.fromhex(color_string[1:]) 53 | 54 | def calculate_adjustment_values(hist, total_pixels, clip_percent): 55 | clip_threshold = total_pixels * clip_percent 56 | cumulative_hist = hist.cumsum() 57 | 58 | # Find the first and last indices where the cumulative histogram exceeds the clip thresholds 59 | lower_bound_idx = np.where(cumulative_hist > clip_threshold)[0][0] 60 | upper_bound_idx = np.where(cumulative_hist < (total_pixels - clip_threshold))[0][-1] 61 | 62 | return lower_bound_idx, upper_bound_idx 63 | 64 | shadows = np.array(str_to_rgb(shadows)) 65 | # midtones are only used in other algorithms 66 | # midtones = str_to_rgb(self.Unprompted.parse_arg("midtones", "128,128,128")) 67 | highlights = np.array(str_to_rgb(highlights)) 68 | 69 | total_images = image.shape[0] 70 | out_images = [] 71 | 72 | for i in range(total_images): 73 | # image is a 4d tensor array in the format of [B, H, W, C] 74 | img_array = 255. * image[i].cpu().numpy() 75 | # img_array = np.clip(this_img, 0, 255).astype(np.uint8) 76 | 77 | # Process each channel (R, G, B) separately 78 | for channel in range(3): 79 | # Calculate the histogram of the current channel 80 | hist, _ = np.histogram(img_array[:, :, channel].flatten(), bins=256, range=[0, 255]) 81 | 82 | # Total number of pixels 83 | total_pixels = img_array.shape[0] * img_array.shape[1] 84 | 85 | # Calculate the adjustment values based on clipping percentages 86 | dark_value, light_value = calculate_adjustment_values(hist, total_pixels, shadow_clip) 87 | _, upper_light_value = calculate_adjustment_values(hist, total_pixels, highlight_clip) 88 | 89 | # Adjust light_value using upper_light_value for highlights 90 | light_value = max(light_value, upper_light_value) 91 | 92 | # Avoid division by zero 93 | if light_value == dark_value: 94 | continue 95 | 96 | # Scale and clip the channel values 97 | img_array[:, :, channel] = (img_array[:, :, channel] - dark_value) * (highlights[channel] - shadows[channel]) / (light_value - dark_value) + shadows[channel] 98 | img_array[:, :, channel] = np.clip(img_array[:, :, channel], 0, 255) 99 | 100 | img_array = np.clip(img_array, 0, 255).astype(np.uint8) 101 | 102 | out_images.append(img_array) 103 | 104 | restored_img_np = np.array(out_images).astype(np.float32) / 255.0 105 | restored_img_tensor = torch.from_numpy(restored_img_np) 106 | 107 | return (restored_img_tensor, ) 108 | 109 | 110 | NODE_CLASS_MAPPINGS = { 111 | "ImageAutotone": ImageAutotone, 112 | } 113 | 114 | # A dictionary that contains the friendly/humanly readable titles for the nodes 115 | NODE_DISPLAY_NAME_MAPPINGS = { 116 | "ImageAutotone": "Image Autotone", 117 | } 118 | --------------------------------------------------------------------------------