├── .github └── workflows │ └── publish.yml ├── .gitignore ├── LICENSE ├── README.md ├── __init__.py ├── examples ├── propost-compound.jpg ├── propost-depthmapblur-workflow.png ├── propost-depthmapblur.jpg ├── propost-filmgrain.jpg ├── propost-lut.jpg ├── propost-radialblur-dreamy.jpg ├── propost-radialblur.jpg ├── propost-vignette.jpg ├── propost.jpg └── workflow.png ├── filmgrainer ├── __init__.py ├── filmgrainer.py ├── graingamma.py └── graingen.py ├── nodes.py ├── pyproject.toml ├── requirements.txt └── utils ├── __init__.py ├── loading.py └── processing.py /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Comfy registry 2 | on: 3 | workflow_dispatch: 4 | push: 5 | branches: 6 | - master 7 | paths: 8 | - "pyproject.toml" 9 | 10 | jobs: 11 | publish-node: 12 | name: Publish Custom Node to registry 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Check out code 16 | uses: actions/checkout@v4 17 | - name: Publish Custom Node 18 | uses: Comfy-Org/publish-node-action@main 19 | with: 20 | ## Add your own personal access token to your Github Repository secrets and reference it here. 21 | personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | .vscode/ 3 | .DS_Store 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 John Richard Chipps-Harding 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ComfyUI ProPost 2 | 3 | A set of custom ComfyUI nodes for performing basic post-processing effects. These effects can help to take the edge off AI imagery and make them feel more natural. We only have five nodes at the moment, but we plan to add more over time. 4 | 5 | ![ComfyUI Screenshot using Pro Post](./examples/propost.jpg) 6 | 7 | 8 | ## Installation 9 | 10 | The easiest way is using the [ComfyUI Manager](https://github.com/ltdrdata/ComfyUI-Manager) to install this package. Just search for `propost` and click install. 11 | 12 | 13 | ### Manual Installation 14 | 15 | - Navigate to the `/ComfyUI/custom_nodes/` folder 16 | - Run `git clone https://github.com/digitaljohn/comfyui-propost.git` 17 | - Run `pip install -r requirements.txt` 18 | - Restart ComfyUI 19 | 20 | 21 | ## Nodes 22 | 23 | ### Film Grain 24 | 25 | A film grain effect with many parameters to control the look of the grain. It can create different noise types and patterns, and it can be used to create a variety of film grain looks. 26 | 27 | ![Pro Post Film Grain Example](./examples/propost-filmgrain.jpg) 28 | 29 | | Parameter | Default | Type | Description | 30 | |-------------|-----------|---------|-----------------------------------------------------------------------------------------| 31 | | gray_scale | `false` | Boolean | Enables grayscale mode. If true, the output will be in grayscale. | 32 | | grain_type | `Fine` | String | Sets the grain type. Values can be Fine, Fine Simple, Coarse, or Coarser. | 33 | | grain_sat | `0.5` | Float | Grain color saturation, with a range of 0.0 to 1.0. | 34 | | grain_power | `0.7` | Float | Overall intensity of the grain effect. | 35 | | shadows | `0.2` | Float | Intensity of grain in the shadows. | 36 | | highs | `0.2` | Float | Intensity of grain in the highlights. | 37 | | scale | `1.0` | Float | Image scaling ratio. Scales the image before applying grain and scales back afterwards. | 38 | | sharpen | `0` | Integer | Number of sharpening passes. | 39 | | src_gamma | `1.0` | Float | Gamma compensation applied to the input. | 40 | | seed | `1` | Integer | Seed for the grain random generator. | 41 | 42 | > Note: This code is a direct port/lift of the versatile `Filmgrainer` library available here: https://github.com/larspontoppidan/filmgrainer 43 | 44 | 45 | ### Vignette 46 | 47 | A simple vignette effect that darkens the edges of the screen. It supports very subtle vignettes, as well as more pronounced ones. 48 | 49 | ![Pro Post Film Grain Example](./examples/propost-vignette.jpg) 50 | 51 | | Parameter | Default | Type | Description | 52 | |-------------|-----------|---------|--------------------------------------------------------------------| 53 | | intensity | `1.0` | Float | The intensity of the vignette effect, with a range of 0.0 to 10.0. | 54 | | center_x | `0.5` | Float | The x-coordinate of the center of the vignette. 0.0 to 1.0. | 55 | | center_y | `0.5` | Float | The y-coordinate of the center of the vignette. 0.0 to 1.0. | 56 | 57 | 58 | ### Radial Blur 59 | 60 | This filter allows you to blur the edges of the image. It has a few different options that allows you to accomplish a variety of effects. 61 | 62 | ![Pro Post Radial Blur Example](./examples/propost-radialblur.jpg) 63 | 64 | 65 | | Parameter | Default | Type | Description | 66 | |----------------------|-----------|---------|--------------------------------------------------------------------------------| 67 | | blur_strength | `64.0` | Float | The intensity of the blur at the edges, with a range of 0.0 to 256.0. | 68 | | center_x | `0.5` | Float | The x-coordinate of the center of the blur. 0.0 to 1.0. | 69 | | center_y | `0.5` | Float | The y-coordinate of the center of the blur. 0.0 to 1.0. | 70 | | focus_spread | `1.0` | Float | The spread of the area of focus. A larger value makes more of the image sharp. | 71 | | steps | `5` | Integer | The number of steps to use when blurring the image. Higher numbers are slower. | 72 | 73 | > Note: Using steps set to `1` can create some dreamy effects as seen below with a high edge_blur_strength. 74 | 75 | ![Pro Post Dreamy Radial Blur Example](./examples/propost-radialblur-dreamy.jpg) 76 | 77 | 78 | ### Depth Map Blur 79 | 80 | This filter allows you to blur the image based on a depth map. You can use this in combination with the existing Depth Map nodes like `Depth Anything` or `MiDaS` etc. 81 | 82 | ![Pro Post Depth Map Blur Example](./examples/propost-depthmapblur.jpg) 83 | 84 | #### Example workflow 85 | ![Pro Post Depth Map Blur Workflow](./examples/propost-depthmapblur-workflow.png) 86 | 87 | 88 | | Parameter | Default | Type | Description | 89 | |----------------------|-----------|---------|--------------------------------------------------------------------------------| 90 | | depth_map | n/a | Image | The depth map to use for the blur. | 91 | | blur_strength | `64.0` | Float | The intensity of the blur, with a range of 0.0 to 256.0. | 92 | | focal_depth | `1.0` | Float | The focal depth of the blur. `1.0` is the closest, `0.0` is the farthest. | 93 | | focus_spread | `1.0` | Float | The spread of the area of focus. A larger value makes more of the image sharp. | 94 | | steps | `5` | Integer | The number of steps to use when blurring the image. Higher numbers are slower. | 95 | | focal_range | `0.0` | Float | `1.0` means all areas clear, `0.0` means only focal point is clear. | 96 | | mask_blur | `1` | Integer | Mask blur strength (1 to 127).`1` means no blurring. | 97 | 98 | ### Apply LUT 99 | 100 | This filter allows you to apply a 3D LUT to the image. Currently it only supports 3D LUTs in the CUBE format. Upon installation, a sub-folder called `luts` will be created inside `/ComfyUI/models/`. I know it's not strictly a 'model', but it was the best place to put it for now. You can place your `.cube` files in this folder and they will be listed in the node's dropdown. 101 | 102 | ![Pro Post Apply LUT Example](./examples/propost-lut.jpg) 103 | Here we apply a lut called `Waves.cube` to the image in LOG color space. 104 | 105 | | Parameter | Default | Type | Description | 106 | |------------|---------|---------|---------------------------------------------------------------| 107 | | lut_name | n/a | LUT | The `*.cube` from the `luts` folder to apply to the image. | 108 | | strength | `1.0` | Float | The strength of the LUT effect, with a range of 0.0 to 1.0. | 109 | | log | `false` | Boolean | If true, the image is processed in LOG color space. | 110 | 111 | > Note: This code is a heavily inspired from BilboX's work here: https://github.com/syllebra/bilbox-comfyui 112 | 113 | 114 | ## Putting it all together 115 | 116 | Obviously due to the nature of ComfyUI you can compose these effects together. Below is an example of a all the effects combined to create a more controlled and striking image. 117 | 118 | ![Pro Post Film Grain Example](./examples/propost-compound.jpg) 119 | 120 | 121 | ## Example 122 | 123 | Check out this workflow below, which uses the `Film Grain`, `Vignette`, `Radial Blur`, and `Apply LUT` nodes to create the image above. Just drag the image into ComfyUI. 124 | 125 | ![Sample Workflow](./examples/workflow.png) 126 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from .nodes import NODE_CLASS_MAPPINGS 2 | 3 | __all__ = ["NODE_CLASS_MAPPINGS"] -------------------------------------------------------------------------------- /examples/propost-compound.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/digitaljohn/comfyui-propost/df6a6d122498f57ad7195d58e07701a501c9dcb6/examples/propost-compound.jpg -------------------------------------------------------------------------------- /examples/propost-depthmapblur-workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/digitaljohn/comfyui-propost/df6a6d122498f57ad7195d58e07701a501c9dcb6/examples/propost-depthmapblur-workflow.png -------------------------------------------------------------------------------- /examples/propost-depthmapblur.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/digitaljohn/comfyui-propost/df6a6d122498f57ad7195d58e07701a501c9dcb6/examples/propost-depthmapblur.jpg -------------------------------------------------------------------------------- /examples/propost-filmgrain.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/digitaljohn/comfyui-propost/df6a6d122498f57ad7195d58e07701a501c9dcb6/examples/propost-filmgrain.jpg -------------------------------------------------------------------------------- /examples/propost-lut.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/digitaljohn/comfyui-propost/df6a6d122498f57ad7195d58e07701a501c9dcb6/examples/propost-lut.jpg -------------------------------------------------------------------------------- /examples/propost-radialblur-dreamy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/digitaljohn/comfyui-propost/df6a6d122498f57ad7195d58e07701a501c9dcb6/examples/propost-radialblur-dreamy.jpg -------------------------------------------------------------------------------- /examples/propost-radialblur.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/digitaljohn/comfyui-propost/df6a6d122498f57ad7195d58e07701a501c9dcb6/examples/propost-radialblur.jpg -------------------------------------------------------------------------------- /examples/propost-vignette.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/digitaljohn/comfyui-propost/df6a6d122498f57ad7195d58e07701a501c9dcb6/examples/propost-vignette.jpg -------------------------------------------------------------------------------- /examples/propost.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/digitaljohn/comfyui-propost/df6a6d122498f57ad7195d58e07701a501c9dcb6/examples/propost.jpg -------------------------------------------------------------------------------- /examples/workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/digitaljohn/comfyui-propost/df6a6d122498f57ad7195d58e07701a501c9dcb6/examples/workflow.png -------------------------------------------------------------------------------- /filmgrainer/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.0.2" -------------------------------------------------------------------------------- /filmgrainer/filmgrainer.py: -------------------------------------------------------------------------------- 1 | # Filmgrainer - by Lars Ole Pontoppidan - MIT License 2 | 3 | from PIL import Image, ImageFilter 4 | import os 5 | import tempfile 6 | import numpy as np 7 | 8 | import filmgrainer.graingamma as graingamma 9 | import filmgrainer.graingen as graingen 10 | 11 | 12 | def _grainTypes(typ): 13 | # After rescaling to make different grain sizes, the standard deviation 14 | # of the pixel values change. The following values of grain size and power 15 | # have been imperically chosen to end up with approx the same standard 16 | # deviation in the result: 17 | if typ == 1: 18 | return (0.8, 63) # more interesting fine grain 19 | elif typ == 2: 20 | return (1, 45) # basic fine grain 21 | elif typ == 3: 22 | return (1.5, 50) # coarse grain 23 | elif typ == 4: 24 | return (1.6666, 50) # coarser grain 25 | else: 26 | raise ValueError("Unknown grain type: " + str(typ)) 27 | 28 | # Grain mask cache 29 | MASK_CACHE_PATH = os.path.join(tempfile.gettempdir(), "mask-cache") 30 | 31 | def _getGrainMask(img_width:int, img_height:int, saturation:float, grayscale:bool, grain_size:float, grain_gauss:float, seed): 32 | if grayscale: 33 | str_sat = "BW" 34 | sat = -1.0 # Graingen makes a grayscale image if sat is negative 35 | else: 36 | str_sat = str(saturation) 37 | sat = saturation 38 | 39 | filename = MASK_CACHE_PATH + "grain-%d-%d-%s-%s-%s-%d.png" % ( 40 | img_width, img_height, str_sat, str(grain_size), str(grain_gauss), seed) 41 | if os.path.isfile(filename): 42 | print("Reusing: %s" % filename) 43 | mask = Image.open(filename) 44 | else: 45 | mask = graingen.grainGen(img_width, img_height, grain_size, grain_gauss, sat, seed) 46 | print("Saving: %s" % filename) 47 | if not os.path.isdir(MASK_CACHE_PATH): 48 | os.mkdir(MASK_CACHE_PATH) 49 | mask.save(filename, format="png", compress_level=1) 50 | return mask 51 | 52 | 53 | def process(image, scale:float, src_gamma:float, grain_power:float, shadows:float, 54 | highs:float, grain_type:int, grain_sat:float, gray_scale:bool, sharpen:int, seed:int): 55 | 56 | image = np.clip(image, 0, 1) # Ensure the values are within [0, 1] 57 | image = (image * 255).astype(np.uint8) 58 | img = Image.fromarray(image).convert("RGB") 59 | org_width = img.size[0] 60 | org_height = img.size[1] 61 | 62 | if scale != 1.0: 63 | print("Scaling source image ...") 64 | img = img.resize((int(org_width / scale), int(org_height / scale)), 65 | resample = Image.LANCZOS) 66 | 67 | img_width = img.size[0] 68 | img_height = img.size[1] 69 | print("Size: %d x %d" % (img_width, img_height)) 70 | 71 | print("Calculating map ...") 72 | map = graingamma.Map.calculate(src_gamma, grain_power, shadows, highs) 73 | # map.saveToFile("map.png") 74 | 75 | print("Calculating grain stock ...") 76 | (grain_size, grain_gauss) = _grainTypes(grain_type) 77 | mask = _getGrainMask(img_width, img_height, grain_sat, gray_scale, grain_size, grain_gauss, seed) 78 | 79 | mask_pixels = mask.load() 80 | img_pixels = img.load() 81 | 82 | # Instead of calling map.lookup(a, b) for each pixel, use the map directly: 83 | lookup = map.map 84 | 85 | if gray_scale: 86 | print("Film graining image ... (grayscale)") 87 | for y in range(0, img_height): 88 | for x in range(0, img_width): 89 | m = mask_pixels[x, y] 90 | (r, g, b) = img_pixels[x, y] 91 | gray = int(0.21*r + 0.72*g + 0.07*b) 92 | #gray_lookup = map.lookup(gray, m) 93 | gray_lookup = lookup[gray, m] 94 | img_pixels[x, y] = (gray_lookup, gray_lookup, gray_lookup) 95 | else: 96 | print("Film graining image ...") 97 | for y in range(0, img_height): 98 | for x in range(0, img_width): 99 | (mr, mg, mb) = mask_pixels[x, y] 100 | (r, g, b) = img_pixels[x, y] 101 | r = lookup[r, mr] 102 | g = lookup[g, mg] 103 | b = lookup[b, mb] 104 | img_pixels[x, y] = (r, g, b) 105 | 106 | if scale != 1.0: 107 | print("Scaling image back to original size ...") 108 | img = img.resize((org_width, org_height), resample = Image.LANCZOS) 109 | 110 | if sharpen > 0: 111 | print("Sharpening image: %d pass ..." % sharpen) 112 | for x in range(sharpen): 113 | img = img.filter(ImageFilter.SHARPEN) 114 | 115 | return np.array(img).astype('float32') / 255.0 -------------------------------------------------------------------------------- /filmgrainer/graingamma.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | _ShadowEnd = 160 4 | _HighlightStart = 200 5 | 6 | 7 | def _gammaCurve(gamma, x): 8 | """ Returns from 0.0 to 1.0""" 9 | return pow((x / 255.0), (1.0 / gamma)) 10 | 11 | 12 | def _calcDevelopment(shadow_level, high_level, x): 13 | """ 14 | This function returns a development like this: 15 | 16 | (return) 17 | ^ 18 | | 19 | 0.5 | o - o <-- mids level, always 0.5 20 | | - - 21 | | - - 22 | | - o <-- high_level eg. 0.25 23 | | - 24 | | o <-- shadow_level eg. 0.15 25 | | 26 | 0 -+-----------------|-------|------------|-----> x (input) 27 | 0 160 200 255 28 | """ 29 | if x < _ShadowEnd: 30 | power = 0.5 - (_ShadowEnd - x) * (0.5 - shadow_level) / _ShadowEnd 31 | elif x < _HighlightStart: 32 | power = 0.5 33 | else: 34 | power = 0.5 - (x - _HighlightStart) * (0.5 - high_level) / (255 - _HighlightStart) 35 | 36 | return power 37 | 38 | class Map: 39 | def __init__(self, map): 40 | self.map = map 41 | 42 | @staticmethod 43 | def calculate(src_gamma, noise_power, shadow_level, high_level) -> 'Map': 44 | map = np.zeros([256, 256], dtype=np.uint8) 45 | 46 | # We need to level off top end and low end to leave room for the noise to breathe 47 | crop_top = noise_power * high_level / 12 48 | crop_low = noise_power * shadow_level / 20 49 | 50 | pic_scale = 1 - (crop_top + crop_low) 51 | pic_offs = 255 * crop_low 52 | 53 | for src_value in range(0, 256): 54 | # Gamma compensate picture source value itself 55 | pic_value = _gammaCurve(src_gamma, src_value) * 255.0 56 | 57 | # In the shadows we want noise gamma to be 0.5, in the highs, 2.0: 58 | gamma = pic_value * (1.5 / 256) + 0.5 59 | gamma_offset = _gammaCurve(gamma, 128) 60 | 61 | # Power is determined by the development 62 | power = _calcDevelopment(shadow_level, high_level, pic_value) 63 | 64 | for noise_value in range(0, 256): 65 | gamma_compensated = _gammaCurve(gamma, noise_value) - gamma_offset 66 | value = pic_value * pic_scale + pic_offs + 255.0 * power * noise_power * gamma_compensated 67 | if value < 0: 68 | value = 0 69 | elif value < 255.0: 70 | value = int(value) 71 | else: 72 | value = 255 73 | map[src_value, noise_value] = value 74 | 75 | return Map(map) 76 | 77 | def lookup(self, pic_value, noise_value): 78 | return self.map[pic_value, noise_value] 79 | 80 | def saveToFile(self, filename): 81 | from PIL import Image 82 | img = Image.fromarray(self.map) 83 | img.save(filename) 84 | 85 | if __name__ == "__main__": 86 | import matplotlib.pyplot as plt 87 | import numpy as np 88 | 89 | def plotfunc(x_min, x_max, step, func): 90 | x_all = np.arange(x_min, x_max, step) 91 | y = [] 92 | for x in x_all: 93 | y.append(func(x)) 94 | 95 | plt.figure() 96 | plt.plot(x_all, y) 97 | plt.grid() 98 | 99 | def development1(x): 100 | return _calcDevelopment(0.2, 0.3, x) 101 | 102 | def gamma05(x): 103 | return _gammaCurve(0.5, x) 104 | def gamma1(x): 105 | return _gammaCurve(1, x) 106 | def gamma2(x): 107 | return _gammaCurve(2, x) 108 | 109 | plotfunc(0.0, 255.0, 1.0, development1) 110 | plotfunc(0.0, 255.0, 1.0, gamma05) 111 | plotfunc(0.0, 255.0, 1.0, gamma1) 112 | plotfunc(0.0, 255.0, 1.0, gamma2) 113 | plt.show() -------------------------------------------------------------------------------- /filmgrainer/graingen.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import random 3 | import numpy as np 4 | 5 | def _makeGrayNoise(width, height, power): 6 | buffer = np.zeros([height, width], dtype=int) 7 | 8 | for y in range(0, height): 9 | for x in range(0, width): 10 | buffer[y, x] = random.gauss(128, power) 11 | buffer = buffer.clip(0, 255) 12 | return Image.fromarray(buffer.astype(dtype=np.uint8)) 13 | 14 | def _makeRgbNoise(width, height, power, saturation): 15 | buffer = np.zeros([height, width, 3], dtype=int) 16 | intens_power = power * (1.0 - saturation) 17 | for y in range(0, height): 18 | for x in range(0, width): 19 | intens = random.gauss(128, intens_power) 20 | buffer[y, x, 0] = random.gauss(0, power) * saturation + intens 21 | buffer[y, x, 1] = random.gauss(0, power) * saturation + intens 22 | buffer[y, x, 2] = random.gauss(0, power) * saturation + intens 23 | 24 | buffer = buffer.clip(0, 255) 25 | return Image.fromarray(buffer.astype(dtype=np.uint8)) 26 | 27 | 28 | def grainGen(width, height, grain_size, power, saturation, seed = 1): 29 | # A grain_size of 1 means the noise buffer will be made 1:1 30 | # A grain_size of 2 means the noise buffer will be resampled 1:2 31 | noise_width = int(width / grain_size) 32 | noise_height = int(height / grain_size) 33 | random.seed(seed) 34 | 35 | if saturation < 0.0: 36 | print("Making B/W grain, width: %d, height: %d, grain-size: %s, power: %s, seed: %d" % ( 37 | noise_width, noise_height, str(grain_size), str(power), seed)) 38 | img = _makeGrayNoise(noise_width, noise_height, power) 39 | else: 40 | print("Making RGB grain, width: %d, height: %d, saturation: %s, grain-size: %s, power: %s, seed: %d" % ( 41 | noise_width, noise_height, str(saturation), str(grain_size), str(power), seed)) 42 | img = _makeRgbNoise(noise_width, noise_height, power, saturation) 43 | 44 | # Resample 45 | if grain_size != 1.0: 46 | img = img.resize((width, height), resample = Image.LANCZOS) 47 | 48 | return img 49 | 50 | 51 | if __name__ == "__main__": 52 | import sys 53 | if len(sys.argv) == 8: 54 | width = int(sys.argv[2]) 55 | height = int(sys.argv[3]) 56 | grain_size = float(sys.argv[4]) 57 | power = float(sys.argv[5]) 58 | sat = float(sys.argv[6]) 59 | seed = int(sys.argv[7]) 60 | out = grainGen(width, height, grain_size, power, sat, seed) 61 | out.save(sys.argv[1]) -------------------------------------------------------------------------------- /nodes.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import cv2 4 | import torch 5 | import numpy as np 6 | import folder_paths 7 | from .utils import processing as processing_utils 8 | from .utils import loading as loading_utils 9 | 10 | # Get the directory of the current file and add it to the system path 11 | current_file_directory = os.path.dirname(os.path.abspath(__file__)) 12 | sys.path.append(current_file_directory) 13 | 14 | import filmgrainer.filmgrainer as filmgrainer 15 | 16 | # Create the directory for the LUTs 17 | dir_luts = os.path.join(folder_paths.models_dir, "luts") 18 | os.makedirs(dir_luts, exist_ok=True) 19 | folder_paths.folder_names_and_paths["luts"] = ([dir_luts], set(['.cube'])) 20 | 21 | 22 | class ProPostVignette: 23 | def __init__(self): 24 | pass 25 | 26 | @classmethod 27 | def INPUT_TYPES(s): 28 | return { 29 | "required": { 30 | "image": ("IMAGE",), 31 | "intensity": ("FLOAT", { 32 | "default": 1.0, 33 | "min": 0.0, 34 | "max": 10.0, 35 | "step": 0.01 36 | }), 37 | "center_x": ("FLOAT", { 38 | "default": 0.5, 39 | "min": 0.0, 40 | "max": 1.0, 41 | "step": 0.01 42 | }), 43 | "center_y": ("FLOAT", { 44 | "default": 0.5, 45 | "min": 0.0, 46 | "max": 1.0, 47 | "step": 0.01 48 | }), 49 | }, 50 | } 51 | 52 | RETURN_TYPES = ("IMAGE",) 53 | RETURN_NAMES = () 54 | 55 | FUNCTION = "vignette_image" 56 | 57 | #OUTPUT_NODE = False 58 | 59 | CATEGORY = "Pro Post/Camera Effects" 60 | 61 | def vignette_image(self, image: torch.Tensor, intensity: float, center_x: float, center_y: float): 62 | batch_size, height, width, _ = image.shape 63 | result = torch.zeros_like(image) 64 | 65 | if intensity == 0: 66 | return image 67 | 68 | # Generate the vignette for each image in the batch 69 | # Create linear space but centered around the provided center point ratios 70 | x = np.linspace(-1, 1, width) 71 | y = np.linspace(-1, 1, height) 72 | X, Y = np.meshgrid(x - (2 * center_x - 1), y - (2 * center_y - 1)) 73 | 74 | # Calculate distances to the furthest corner 75 | distances_to_corners = [ 76 | np.sqrt((0 - center_x) ** 2 + (0 - center_y) ** 2), 77 | np.sqrt((1 - center_x) ** 2 + (0 - center_y) ** 2), 78 | np.sqrt((0 - center_x) ** 2 + (1 - center_y) ** 2), 79 | np.sqrt((1 - center_x) ** 2 + (1 - center_y) ** 2) 80 | ] 81 | max_distance_to_corner = np.max(distances_to_corners) 82 | 83 | radius = np.sqrt(X ** 2 + Y ** 2) 84 | radius = radius / (max_distance_to_corner * np.sqrt(2)) # Normalize radius 85 | opacity = np.clip(intensity, 0, 1) 86 | vignette = 1 - radius * opacity 87 | 88 | for b in range(batch_size): 89 | tensor_image = image[b].numpy() 90 | 91 | # Apply vignette 92 | vignette_image = self.apply_vignette(tensor_image, vignette) 93 | 94 | tensor = torch.from_numpy(vignette_image).unsqueeze(0) 95 | result[b] = tensor 96 | 97 | return (result,) 98 | 99 | def apply_vignette(self, image, vignette): 100 | # If image needs to be normalized (0-1 range) 101 | needs_normalization = image.max() > 1 102 | if needs_normalization: 103 | image = image.astype(np.float32) / 255 104 | 105 | final_image = np.clip(image * vignette[..., np.newaxis], 0, 1) 106 | 107 | if needs_normalization: 108 | final_image = (final_image * 255).astype(np.uint8) 109 | 110 | return final_image 111 | 112 | class ProPostFilmGrain: 113 | grain_types = ["Fine", "Fine Simple", "Coarse", "Coarser"] 114 | 115 | def __init__(self): 116 | pass 117 | 118 | @classmethod 119 | def INPUT_TYPES(s): 120 | return { 121 | "required": { 122 | "image": ("IMAGE",), 123 | "gray_scale": ("BOOLEAN", { 124 | "default": False 125 | }), 126 | "grain_type": (s.grain_types,), 127 | "grain_sat": ("FLOAT", { 128 | "default": 0.5, 129 | "min": 0.0, 130 | "max": 1.0, 131 | "step": 0.01 132 | }), 133 | "grain_power": ("FLOAT", { 134 | "default": 0.7, 135 | "min": 0.0, 136 | "max": 1.0, 137 | "step": 0.01 138 | }), 139 | "shadows": ("FLOAT", { 140 | "default": 0.2, 141 | "min": 0.0, 142 | "max": 1.0, 143 | "step": 0.01 144 | }), 145 | "highs": ("FLOAT", { 146 | "default": 0.2, 147 | "min": 0.0, 148 | "max": 1.0, 149 | "step": 0.01 150 | }), 151 | "scale": ("FLOAT", { 152 | "default": 1.0, 153 | "min": 0.0, 154 | "max": 10.0, 155 | "step": 0.01 156 | }), 157 | "sharpen": ("INT", { 158 | "default": 0, 159 | "min": 0, 160 | "max": 10 161 | }), 162 | "src_gamma": ("FLOAT", { 163 | "default": 1.0, 164 | "min": 0.0, 165 | "max": 10.0, 166 | "step": 0.01 167 | }), 168 | "seed": ("INT", { 169 | "default": 1, 170 | "min": 1, 171 | "max": 0xffffffffffffffff 172 | }), 173 | }, 174 | } 175 | 176 | RETURN_TYPES = ("IMAGE",) 177 | RETURN_NAMES = () 178 | 179 | FUNCTION = "filmgrain_image" 180 | 181 | #OUTPUT_NODE = False 182 | 183 | CATEGORY = "Pro Post/Camera Effects" 184 | 185 | def filmgrain_image(self, image: torch.Tensor, gray_scale: bool, grain_type: str, grain_sat: float, grain_power: float, shadows: float, highs: float, scale: float, sharpen: int, src_gamma: float, seed: int): 186 | batch_size, height, width, _ = image.shape 187 | result = torch.zeros_like(image) 188 | 189 | # find index of grain_type 190 | grain_type_index = self.grain_types.index(grain_type) + 1; 191 | 192 | 193 | for b in range(batch_size): 194 | tensor_image = image[b].numpy() 195 | 196 | # Apply vignette 197 | vignette_image = self.apply_filmgrain(tensor_image, gray_scale, grain_type_index, grain_sat, grain_power, shadows, highs, scale, sharpen, src_gamma, seed+b) 198 | 199 | tensor = torch.from_numpy(vignette_image).unsqueeze(0) 200 | result[b] = tensor 201 | 202 | return (result,) 203 | 204 | def apply_filmgrain(self, image, gray_scale, grain_type, grain_sat, grain_power, shadows, highs, scale, sharpen, src_gamma, seed): 205 | out_image = filmgrainer.process(image, scale, src_gamma, 206 | grain_power, shadows, highs, grain_type, 207 | grain_sat, gray_scale, sharpen, seed) 208 | 209 | return out_image 210 | 211 | 212 | class ProPostRadialBlur: 213 | def __init__(self): 214 | pass 215 | 216 | @classmethod 217 | def INPUT_TYPES(s): 218 | return { 219 | "required": { 220 | "image": ("IMAGE",), 221 | "blur_strength": ("FLOAT", { 222 | "default": 64.0, 223 | "min": 0.0, 224 | "max": 256.0, 225 | "step": 1.0 226 | }), 227 | "center_x": ("FLOAT", { 228 | "default": 0.5, 229 | "min": 0.0, 230 | "max": 1.0, 231 | "step": 0.01 232 | }), 233 | "center_y": ("FLOAT", { 234 | "default": 0.5, 235 | "min": 0.0, 236 | "max": 1.0, 237 | "step": 0.01 238 | }), 239 | "focus_spread": ("FLOAT", { 240 | "default": 1, 241 | "min": 0.1, 242 | "max": 8.0, 243 | "step": 0.1 244 | }), 245 | "steps": ("INT", { 246 | "default": 5, 247 | "min": 1, 248 | "max": 32, 249 | }), 250 | }, 251 | } 252 | 253 | RETURN_TYPES = ("IMAGE",) 254 | RETURN_NAMES = () 255 | 256 | FUNCTION = "radialblur_image" 257 | 258 | #OUTPUT_NODE = False 259 | 260 | CATEGORY = "Pro Post/Blur Effects" 261 | 262 | def radialblur_image(self, image: torch.Tensor, blur_strength: float, center_x: float, center_y:float, focus_spread:float, steps: int): 263 | batch_size, height, width, _ = image.shape 264 | result = torch.zeros_like(image) 265 | 266 | # Generate the vignette for each image in the batch 267 | c_x, c_y = int(width * center_x), int(height * center_y) 268 | 269 | # Calculate distances to all corners from the center 270 | distances_to_corners = [ 271 | np.sqrt((c_x - 0)**2 + (c_y - 0)**2), 272 | np.sqrt((c_x - width)**2 + (c_y - 0)**2), 273 | np.sqrt((c_x - 0)**2 + (c_y - height)**2), 274 | np.sqrt((c_x - width)**2 + (c_y - height)**2) 275 | ] 276 | max_distance_to_corner = max(distances_to_corners) 277 | 278 | # Create and adjust radial mask 279 | X, Y = np.meshgrid(np.arange(width) - c_x, np.arange(height) - c_y) 280 | radial_mask = np.sqrt(X**2 + Y**2) / max_distance_to_corner 281 | 282 | for b in range(batch_size): 283 | tensor_image = image[b].numpy() 284 | 285 | # Apply blur 286 | blur_image = self.apply_radialblur(tensor_image, blur_strength, radial_mask, focus_spread, steps) 287 | 288 | tensor = torch.from_numpy(blur_image).unsqueeze(0) 289 | result[b] = tensor 290 | 291 | return (result,) 292 | 293 | def apply_radialblur(self, image, blur_strength, radial_mask, focus_spread, steps): 294 | needs_normalization = image.max() > 1 295 | if needs_normalization: 296 | image = image.astype(np.float32) / 255 297 | 298 | blurred_images = processing_utils.generate_blurred_images(image, blur_strength, steps, focus_spread) 299 | final_image = processing_utils.apply_blurred_images(image, blurred_images, radial_mask) 300 | 301 | if needs_normalization: 302 | final_image = np.clip(final_image * 255, 0, 255).astype(np.uint8) 303 | 304 | return np.clip(final_image, 0, 1) 305 | 306 | 307 | class ProPostDepthMapBlur: 308 | def __init__(self): 309 | pass 310 | 311 | @classmethod 312 | def INPUT_TYPES(s): 313 | return { 314 | "required": { 315 | "image": ("IMAGE",), 316 | "depth_map": ("IMAGE",), 317 | "blur_strength": ("FLOAT", { 318 | "default": 64.0, 319 | "min": 0.0, 320 | "max": 256.0, 321 | "step": 1.0 322 | }), 323 | "focal_depth": ("FLOAT", { 324 | "default": 1.0, 325 | "min": 0.0, 326 | "max": 1.0, 327 | "step": 0.01 328 | }), 329 | "focus_spread": ("FLOAT", { 330 | "default": 1, 331 | "min": 1.0, 332 | "max": 8.0, 333 | "step": 0.1 334 | }), 335 | "steps": ("INT", { 336 | "default": 5, 337 | "min": 1, 338 | "max": 32, 339 | }), 340 | "focal_range": ("FLOAT", { 341 | "default": 0.0, 342 | "min": 0.0, 343 | "max": 1.0, 344 | "step": 0.01 345 | }), 346 | "mask_blur": ("INT", { 347 | "default": 1, 348 | "min": 1, 349 | "max": 127, 350 | "step": 2 351 | }), 352 | }, 353 | } 354 | 355 | RETURN_TYPES = ("IMAGE","MASK") 356 | RETURN_NAMES = () 357 | 358 | FUNCTION = "depthblur_image" 359 | DESCRIPTION = """ 360 | blur_strength: Represents the blur strength. This parameter controls the overall intensity of the blur effect; the higher the value, the more blurred the image becomes. 361 | 362 | focal_depth: Represents the focal depth. This parameter is used to determine which depth level in the image should remain sharp, while other levels are blurred based on depth differences. 363 | 364 | focus_spread: Represents the focus spread range. This parameter controls the size of the blur transition area near the focal depth; the larger the value, the wider the transition area, and the smoother the blur effect spreads around the focus. 365 | 366 | steps: Represents the number of steps in the blur process. This parameter determines the calculation precision of the blur effect; the more steps, the finer the blur effect, but this also increases the computational load. 367 | 368 | focal_range: Represents the focal range. This parameter is used to adjust the depth range within the focal depth that remains sharp; the larger the value, the wider the area around the focal depth that remains sharp. 369 | 370 | mask_blur: Represents the mask blur strength for blurring the depth map. This parameter controls the intensity of the depth map's blur treatment, used for preprocessing the depth map before calculating the final blur effect, to achieve a more natural blur transition. 371 | """ 372 | #OUTPUT_NODE = False 373 | 374 | CATEGORY = "Pro Post/Blur Effects" 375 | 376 | def depthblur_image(self, image: torch.Tensor, depth_map: torch.Tensor, blur_strength: float, focal_depth: float, focus_spread:float, steps: int, focal_range: float, mask_blur: int): 377 | batch_size, height, width, _ = image.shape 378 | image_result = torch.zeros_like(image) 379 | mask_result = torch.zeros((batch_size, height, width), dtype=torch.float32) 380 | 381 | for b in range(batch_size): 382 | tensor_image = image[b].numpy() 383 | tensor_image_depth = depth_map[b].numpy() 384 | 385 | # Apply blur 386 | blur_image,depth_mask = self.apply_depthblur(tensor_image, tensor_image_depth, blur_strength, focal_depth, focus_spread, steps, focal_range, mask_blur) 387 | 388 | tensor_image = torch.from_numpy(blur_image).unsqueeze(0) 389 | tensor_mask = torch.from_numpy(depth_mask).unsqueeze(0) 390 | 391 | image_result[b] = tensor_image 392 | mask_result[b] = tensor_mask 393 | 394 | return (image_result,mask_result) 395 | 396 | def apply_depthblur(self, image, depth_map, blur_strength, focal_depth, focus_spread, steps, focal_range, mask_blur): 397 | # Normalize the input image if needed 398 | needs_normalization = image.max() > 1 399 | if needs_normalization: 400 | image = image.astype(np.float32) / 255 401 | 402 | # Normalize the depth map if needed 403 | depth_map = depth_map.astype(np.float32) / 255 if depth_map.max() > 1 else depth_map 404 | 405 | # Resize depth map to match the image dimensions 406 | depth_map_resized = cv2.resize(depth_map, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_LINEAR) 407 | if len(depth_map_resized.shape) > 2: 408 | depth_map_resized = cv2.cvtColor(depth_map_resized, cv2.COLOR_BGR2GRAY) 409 | 410 | # Adjust the depth map based on the focal plane 411 | depth_mask = np.abs(depth_map_resized - focal_depth) 412 | depth_mask = np.clip(depth_mask / np.max(depth_mask), 0, 1) 413 | 414 | # Process the depth_mask 415 | depth_mask[depth_mask < focal_range] = 0 416 | depth_mask[depth_mask >= focal_range] = (depth_mask[depth_mask >= focal_range] - focal_range) / (1 - focal_range) 417 | 418 | # Apply mask blur 419 | depth_mask = cv2.GaussianBlur(depth_mask, (mask_blur, mask_blur), 0) 420 | 421 | # Generate blurred versions of the image 422 | blurred_images = processing_utils.generate_blurred_images(image, blur_strength, steps, focus_spread) 423 | 424 | # Use the adjusted depth map as a mask for applying blurred images 425 | final_image = processing_utils.apply_blurred_images(image, blurred_images, depth_mask) 426 | 427 | # Convert back to original range if the image was normalized 428 | if needs_normalization: 429 | final_image = np.clip(final_image * 255, 0, 255).astype(np.uint8) 430 | 431 | return final_image, depth_mask 432 | 433 | 434 | class ProPostApplyLUT: 435 | def __init__(self): 436 | pass 437 | 438 | @classmethod 439 | def INPUT_TYPES(s): 440 | return { 441 | "required": { 442 | "image": ("IMAGE",), 443 | "lut_name": (folder_paths.get_filename_list("luts"), ), 444 | "strength": ("FLOAT", { 445 | "default": 1.0, 446 | "min": 0.0, 447 | "max": 1.0, 448 | "step": 0.01 449 | }), 450 | "log": ("BOOLEAN", { 451 | "default": False 452 | }), 453 | }, 454 | } 455 | 456 | RETURN_TYPES = ("IMAGE",) 457 | RETURN_NAMES = () 458 | 459 | FUNCTION = "lut_image" 460 | 461 | #OUTPUT_NODE = False 462 | 463 | CATEGORY = "Pro Post/Color Grading" 464 | 465 | def lut_image(self, image: torch.Tensor, lut_name, strength: float, log: bool): 466 | batch_size, height, width, _ = image.shape 467 | result = torch.zeros_like(image) 468 | 469 | # Read the LUT 470 | lut_path = os.path.join(dir_luts, lut_name) 471 | lut = loading_utils.read_lut(lut_path, clip=True) 472 | 473 | for b in range(batch_size): 474 | tensor_image = image[b].numpy() 475 | 476 | # Apply LUT 477 | lut_image = self.apply_lut(tensor_image, lut, strength, log) 478 | 479 | tensor = torch.from_numpy(lut_image).unsqueeze(0) 480 | result[b] = tensor 481 | 482 | return (result,) 483 | 484 | def apply_lut(self, image, lut, strength, log): 485 | if strength == 0: 486 | return image 487 | 488 | # Apply the LUT 489 | is_non_default_domain = not np.array_equal(lut.domain, np.array([[0., 0., 0.], [1., 1., 1.]])) 490 | dom_scale = None 491 | 492 | im_array = image.copy() 493 | 494 | if is_non_default_domain: 495 | dom_scale = lut.domain[1] - lut.domain[0] 496 | im_array = im_array * dom_scale + lut.domain[0] 497 | if log: 498 | im_array = im_array ** (1/2.2) 499 | 500 | im_array = lut.apply(im_array) 501 | 502 | if log: 503 | im_array = im_array ** (2.2) 504 | if is_non_default_domain: 505 | im_array = (im_array - lut.domain[0]) / dom_scale 506 | 507 | # Blend the original image and the LUT-applied image based on the strength 508 | blended_image = (1 - strength) * image + strength * im_array 509 | 510 | return blended_image 511 | 512 | 513 | # A dictionary that contains all nodes you want to export with their names 514 | # NOTE: names should be globally unique 515 | NODE_CLASS_MAPPINGS = { 516 | "ProPostVignette": ProPostVignette, 517 | "ProPostFilmGrain": ProPostFilmGrain, 518 | "ProPostRadialBlur": ProPostRadialBlur, 519 | "ProPostDepthMapBlur": ProPostDepthMapBlur, 520 | "ProPostApplyLUT": ProPostApplyLUT 521 | } 522 | 523 | # A dictionary that contains the friendly/humanly readable titles for the nodes 524 | NODE_DISPLAY_NAME_MAPPINGS = { 525 | "ProPostVignette": "ProPost Vignette", 526 | "ProPostFilmGrain": "ProPost Film Grain", 527 | "ProPostRadialBlur": "ProPost Radial Blur", 528 | "ProPostDepthMapBlur": "ProPost Depth Map Blur", 529 | "ProPostApplyLUT": "ProPost Apply LUT" 530 | } -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui-propost" 3 | description = "A set of custom ComfyUI nodes for performing basic post-processing effects including Film Grain and Vignette. These effects can help to take the edge off AI imagery and make them feel more natural." 4 | version = "1.1.3" 5 | license = { text = "MIT License" } 6 | dependencies = ["numpy", "Pillow", "opencv-python", "colour-science"] 7 | 8 | [project.urls] 9 | Repository = "https://github.com/digitaljohn/comfyui-propost" 10 | # Used by Comfy Registry https://comfyregistry.org 11 | 12 | [tool.comfy] 13 | PublisherId = "digital" 14 | DisplayName = "comfyui-propost" 15 | Icon = "" 16 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | Pillow 3 | opencv-python 4 | colour-science -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.0.2" -------------------------------------------------------------------------------- /utils/loading.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from colour.io.luts.iridas_cube import read_LUT_IridasCube, LUT3D, LUT3x1D 4 | 5 | def read_lut(lut_path, clip=False): 6 | """ 7 | Reads a LUT from the specified path, returning instance of LUT3D or LUT3x1D 8 | 9 | : the path to the file from which to read the LUT ( 10 | : flag indicating whether to apply clipping of LUT values, limiting all values to the domain's lower and 11 | upper bounds 12 | """ 13 | lut: Union[LUT3x1D, LUT3D] = read_LUT_IridasCube(lut_path) 14 | lut.name = os.path.splitext(os.path.basename(lut_path))[0] # use base filename instead of internal LUT name 15 | 16 | if clip: 17 | if lut.domain[0].max() == lut.domain[0].min() and lut.domain[1].max() == lut.domain[1].min(): 18 | lut.table = np.clip(lut.table, lut.domain[0, 0], lut.domain[1, 0]) 19 | else: 20 | if len(lut.table.shape) == 2: # 3x1D 21 | for dim in range(3): 22 | lut.table[:, dim] = np.clip(lut.table[:, dim], lut.domain[0, dim], lut.domain[1, dim]) 23 | else: # 3D 24 | for dim in range(3): 25 | lut.table[:, :, :, dim] = np.clip(lut.table[:, :, :, dim], lut.domain[0, dim], lut.domain[1, dim]) 26 | 27 | return lut -------------------------------------------------------------------------------- /utils/processing.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | def generate_blurred_images(image, blur_strength, steps, focus_spread=1): 5 | blurred_images = [] 6 | for step in range(1, steps + 1): 7 | # Adjust the curve based on the curve_weight 8 | blur_factor = (step / steps) ** focus_spread * blur_strength 9 | blur_size = max(1, int(blur_factor)) 10 | blur_size = blur_size if blur_size % 2 == 1 else blur_size + 1 # Ensure blur_size is odd 11 | 12 | # Apply Gaussian Blur 13 | blurred_image = cv2.GaussianBlur(image, (blur_size, blur_size), 0) 14 | blurred_images.append(blurred_image) 15 | return blurred_images 16 | 17 | def apply_blurred_images(image, blurred_images, mask): 18 | steps = len(blurred_images) # Calculate the number of steps based on the blurred images provided 19 | final_image = np.zeros_like(image) 20 | step_size = 1.0 / steps 21 | for i, blurred_image in enumerate(blurred_images): 22 | # Calculate the mask for the current step 23 | current_mask = np.clip((mask - i * step_size) * steps, 0, 1) 24 | next_mask = np.clip((mask - (i + 1) * step_size) * steps, 0, 1) 25 | blend_mask = current_mask - next_mask 26 | 27 | # Apply the blend mask 28 | final_image += blend_mask[:, :, np.newaxis] * blurred_image 29 | 30 | # Ensure no division by zero; add the original image for areas without blurring 31 | final_image += (1 - np.clip(mask * steps, 0, 1))[:, :, np.newaxis] * image 32 | return final_image --------------------------------------------------------------------------------