├── .gitignore ├── LICENSE ├── README.MD ├── README_CN.MD ├── __init__.py ├── custom_size.ini.example ├── font └── Alibaba-PuHuiTi-Heavy.ttf ├── image ├── HSV_node.jpg ├── LAB_node.jpg ├── RGB_node.jpg ├── RGB_value_example.jpg ├── YUV_node.jpg ├── add_grain_example.jpg ├── add_grain_node.jpg ├── any_rerouter_node.jpg ├── auto_adjust_example.jpg ├── auto_adjust_node.jpg ├── auto_adjust_v2_example.jpg ├── auto_adjust_v2_node.jpg ├── auto_brightness_example.jpg ├── auto_brightness_node.jpg ├── batch_selector_example.jpg ├── batch_selector_node.jpg ├── ben_ultra_example.jpg ├── ben_ultra_node.jpg ├── blend_mode_result.jpg ├── blend_mode_v2_example.jpg ├── blendif_mask_example.jpg ├── blendif_mask_node.jpg ├── boolean_node.jpg ├── boolean_operator_node.jpg ├── boolean_operator_v2_node.jpg ├── boolean_oprator_v2_node.jpg ├── brightness_&_contrast_node.jpg ├── channel_shake_example.jpg ├── channel_shake_node.jpg ├── check_mask_node.jpg ├── check_mask_v2_node.jpg ├── chioce_text_preset_node.jpg ├── choice_text_preset_example.jpg ├── color_adapter_example.jpg ├── color_adapter_node.jpg ├── color_balance_example.jpg ├── color_balance_node.jpg ├── color_image_example.jpg ├── color_image_node.jpg ├── color_image_v2_node.jpg ├── color_map_node.jpg ├── color_name_example.jpg ├── color_name_node.jpg ├── color_of_shadow_and_highlight_example.jpg ├── color_of_shadow_and_highlight_node.jpg ├── color_overlay_example.jpg ├── color_overlay_node.jpg ├── color_picker.jpg ├── color_temperature_example.jpg ├── color_temperature_node.jpg ├── colormap_result.jpg ├── corp_box_resolve_node.jpg ├── corp_by_mask_example.jpg ├── corp_by_mask_node.jpg ├── corp_by_mask_v2_node.jpg ├── create_gradient_mask_example.jpg ├── create_gradient_mask_example2.jpg ├── create_gradient_mask_node.jpg ├── data_nodes_example.jpg ├── drop_shadow_example.jpg ├── drop_shadow_node.jpg ├── exposure_example.jpg ├── extend_canvas_example.jpg ├── extend_canvas_node.jpg ├── extend_canvas_v2_node.jpg ├── film_example.jpg ├── film_node.jpg ├── film_v2_node.jpg ├── float_node.jpg ├── gamma_node.jpg ├── gaussian_blur_example.jpg ├── gaussian_blur_v2_node.jpg ├── get_image_size_node.jpg ├── get_main_color_and_color_name_example.jpg ├── get_main_color_node.jpg ├── get_main_color_v2_example.jpg ├── get_main_colors_example.jpg ├── gradient_image_example.jpg ├── gradient_image_node.jpg ├── gradient_image_node_v2.jpg ├── gradient_overlay_example.jpg ├── gradient_overlay_node.jpg ├── gray_value_node.jpg ├── halftone_example.jpg ├── halftone_node.jpg ├── hdr_effects_example.jpg ├── hdr_effects_node.jpg ├── hl_frequency_detail_restore_example.jpg ├── hl_frequency_detail_restore_node.jpg ├── hsv_value_node.jpg ├── icmask_crop_back_node.jpg ├── icmask_example.jpg ├── icmask_node.jpg ├── if_example.jpg ├── if_node.jpg ├── image_auto_crop_example.jpg ├── image_auto_crop_node.jpg ├── image_auto_crop_v2_node.jpg ├── image_auto_crop_v3_node.jpg ├── image_blend_advance_example.jpg ├── image_blend_advance_node.jpg ├── image_blend_example.jpg ├── image_blend_node.jpg ├── image_channel_merge_example.jpg ├── image_channel_merge_node.jpg ├── image_channel_split_example.jpg ├── image_channel_split_node.jpg ├── image_combine_alpha_node.jpg ├── image_hub_example.jpg ├── image_hub_node.jpg ├── image_mask_scale_as_example.jpg ├── image_mask_scale_as_node.jpg ├── image_mask_scale_as_v2_example.jpg ├── image_mask_scale_as_v2_node.jpg ├── image_opacity_example.jpg ├── image_reel_composit_node.jpg ├── image_reel_example.jpg ├── image_reel_node.jpg ├── image_remove_alpha_example.jpg ├── image_remove_alpha_node.jpg ├── image_scale_by_aspect_ratio_example.jpg ├── image_scale_by_aspect_ratio_node.jpg ├── image_scale_by_aspect_ratio_v2_node.jpg ├── image_scale_restore_example.jpg ├── image_scale_restore_node.jpg ├── image_scale_restore_v2_node.jpg ├── image_shift_example.jpg ├── image_shift_node.jpg ├── image_tagger_save_example.jpg ├── image_tagger_save_node.jpg ├── image_to_mask_example.jpg ├── image_to_mask_node.jpg ├── inner_glow_example.jpg ├── inner_glow_node.jpg ├── inner_shadow_example.jpg ├── inner_shadow_node.jpg ├── integer_node.jpg ├── layer_image_transform_example.jpg ├── layer_image_transform_node.jpg ├── layer_mask_transform_node.jpg ├── layercolor_nodes.jpg ├── layercolor_title.jpg ├── layerfilter_nodes.jpg ├── layermask_nodes.jpg ├── layerstyle_nodes.jpg ├── layerstyle_title.jpg ├── layerutility_nodes.jpg ├── levels_example.jpg ├── levels_node.jpg ├── light_leak_example.jpg ├── light_leak_node.jpg ├── load_ben_model_node.jpg ├── load_segfromer_model_node.jpg ├── load_vqa_model_node.jpg ├── lut_apply_example.jpg ├── lut_apply_node.jpg ├── mask_box_detect_example.jpg ├── mask_box_detect_node.jpg ├── mask_by_color_example.jpg ├── mask_by_color_node.jpg ├── mask_edge_compare.jpg ├── mask_edge_shrink_example.jpg ├── mask_edge_shrink_node.jpg ├── mask_edge_ultra_detail_example.jpg ├── mask_edge_ultra_detail_node.jpg ├── mask_edge_ultra_detail_v2_example.jpg ├── mask_edge_ultra_detail_v2_node.jpg ├── mask_gradient_example.jpg ├── mask_gradient_node.jpg ├── mask_grain_example.jpg ├── mask_grain_node.jpg ├── mask_grow_example.jpg ├── mask_grow_node.jpg ├── mask_invert.jpg ├── mask_invert_node.jpg ├── mask_motion_blur_example.jpg ├── mask_motion_blur_node.jpg ├── mask_preview_example.jpg ├── mask_stroke_example.jpg ├── mask_stroke_node.jpg ├── menu_layer_color.jpg ├── menu_layer_filter.jpg ├── menu_layer_mask.jpg ├── menu_layer_style.jpg ├── menu_layer_utility.jpg ├── motion_blur_example.jpg ├── name_to_color_example.jpg ├── name_to_color_node.jpg ├── node-menu.jpg ├── node-search.jpg ├── number_calculator_node.jpg ├── number_calculator_v2_node.jpg ├── outer_glow_example.jpg ├── outer_glow_node.jpg ├── pixel_spread_example.jpg ├── pixel_spread_node.jpg ├── print_info_node.jpg ├── purge_vram_example.jpg ├── purge_vram_v2_example.jpg ├── queue_stop_example.jpg ├── queue_stop_node.jpg ├── random_generator_example.jpg ├── random_generator_node.jpg ├── random_generator_v2_node.jpg ├── rembg_ultra_example.jpg ├── rembg_ultra_node.jpg ├── restore_crop_box_node.jpg ├── rmbg_ultra_v2_node.jpg ├── rounded_rectangle_example.jpg ├── rounded_rectangle_node.jpg ├── segformer_clothes_example.jpg ├── segformer_clothes_pipeline_node.jpg ├── segformer_clothes_setting_node.jpg ├── segformer_fashion_example.jpg ├── segformer_fashion_pipeline_node.jpg ├── segformer_fashion_setting_node.jpg ├── segformer_ultra_example.jpg ├── segformer_ultra_node.jpg ├── segformer_ultra_v2_node.jpg ├── segfromer_ultra_v3_node.jpg ├── segment_anything_ultra_compare.jpg ├── segment_anything_ultra_example.jpg ├── segment_anything_ultra_node.jpg ├── segment_anything_ultra_v2_node.jpg ├── shadow_and_highlight_mask_example.jpg ├── shadow_and_highlight_mask_node.jpg ├── sharp_and_soft_example.jpg ├── sharp_and_soft_node.jpg ├── show_blind_watermark_node.jpg ├── simple_text_image_example.jpg ├── simple_text_image_node.jpg ├── skin_beauty_example.jpg ├── skin_beauty_node.jpg ├── soft_light_example.jpg ├── soft_light_node.jpg ├── string_condition_example.jpg ├── string_condition_node.jpg ├── string_node.jpg ├── stroke_example.jpg ├── stroke_node.jpg ├── switch_case_example.jpg ├── switch_case_node.jpg ├── text_box_node.jpg ├── text_image_example.jpg ├── text_image_node.jpg ├── text_image_v2_node.jpg ├── text_join_example.jpg ├── text_join_v2_node.jpg ├── text_preseter_node.jpg ├── title.jpg ├── ultra_nodes.jpg ├── ultra_v2_nodes_example.jpg ├── vqa_prompt_example.jpg ├── vqa_prompt_node.jpg ├── water_color_example.jpg ├── water_color_node.jpg ├── xy2percent_example.jpg └── xy2percent_node.jpg ├── install_requirements.bat ├── install_requirements_aki.bat ├── js ├── dz_comfy_shared.js ├── dz_mtb_widgets.js ├── dz_node_palette.js └── dz_parse-css.js ├── locales ├── fr │ └── nodeDefs.json ├── ja │ └── nodeDefs.json ├── ko │ └── nodeDefs.json ├── ru │ └── nodeDefs.json └── zh │ └── nodeDefs.json ├── lut └── BlueArchitecture.cube ├── py ├── add_grain.py ├── any_rerouter.py ├── batch_selector.py ├── blend_if_mask.py ├── blendmodes.py ├── briarmbg.py ├── channel_shake.py ├── check_mask.py ├── check_mask_v2.py ├── color_adapter.py ├── color_correct_HSV.py ├── color_correct_LAB.py ├── color_correct_LUTapply.py ├── color_correct_RGB.py ├── color_correct_YUV.py ├── color_correct_auto_adjust.py ├── color_correct_auto_adjust_v2.py ├── color_correct_auto_brightness.py ├── color_correct_brightness_and_contrast.py ├── color_correct_color_balance.py ├── color_correct_color_temperature.py ├── color_correct_exposure.py ├── color_correct_gamma.py ├── color_correct_levels.py ├── color_correct_shadow_and_highlight.py ├── color_image.py ├── color_image_v2.py ├── color_map.py ├── color_name.py ├── color_overlay _v2.py ├── color_overlay.py ├── color_picker.py ├── color_to_HSVvalue.py ├── color_to_RGBvalue.py ├── color_to_gray_value.py ├── create_gradient_mask.py ├── crop_box_resolve.py ├── crop_by_mask.py ├── crop_by_mask_v2.py ├── crop_by_mask_v3.py ├── data_nodes.py ├── drop_shadow.py ├── drop_shadow_v2.py ├── drop_shadow_v3.py ├── extend_canvas.py ├── extend_canvas_v2.py ├── film_post.py ├── film_post_v2.py ├── filmgrainer │ ├── __init__.py │ ├── filmgrainer.py │ ├── graingamma.py │ ├── graingen.py │ └── processing.py ├── gaussian_blur.py ├── get_image_size.py ├── get_main_colors.py ├── gradient_image.py ├── gradient_image_v2.py ├── gradient_map.py ├── gradient_overlay.py ├── gradient_overlay_v2.py ├── halftone.py ├── hdr_effects.py ├── hl_frequency_detail_restore.py ├── ic_mask.py ├── image_blend.py ├── image_blend_advance.py ├── image_blend_advance_v2.py ├── image_blend_advance_v3.py ├── image_blend_v2.py ├── image_channel_merge.py ├── image_channel_split.py ├── image_combine_alpha.py ├── image_hub.py ├── image_mask_scale_as.py ├── image_opacity.py ├── image_reel.py ├── image_remove_alpha.py ├── image_scale_by_aspect_ratio.py ├── image_scale_by_aspect_ratio_v2.py ├── image_scale_restore.py ├── image_scale_restore_v2.py ├── image_shift.py ├── image_tagger_save.py ├── image_to_mask.py ├── imagefunc.py ├── inner_glow.py ├── inner_glow_v2.py ├── inner_shadow.py ├── inner_shadow_v2.py ├── layer_image_transform.py ├── layer_mask_transform.py ├── light_leak.py ├── mask_box_detect.py ├── mask_by_color.py ├── mask_edge_shrink.py ├── mask_edge_ultra_detail.py ├── mask_edge_ultra_detail_v2.py ├── mask_gradient.py ├── mask_grain.py ├── mask_grow.py ├── mask_invert.py ├── mask_motion_blur.py ├── mask_preview.py ├── mask_stroke.py ├── motion_blur.py ├── outer_glow.py ├── outer_glow_v2.py ├── pixel_spread.py ├── print_info.py ├── purge_vram.py ├── random_generator.py ├── restore_crop_box.py ├── rmbg_ultra.py ├── rmbg_ultra_v2.py ├── rounded_rectangle.py ├── segformer_ultra.py ├── shadow_highlight_mask.py ├── sharp_soft.py ├── simple_text_image.py ├── skin_beauty.py ├── soft_light.py ├── stroke.py ├── stroke_v2.py ├── text_image.py ├── text_image_v2.py ├── text_join.py ├── text_node.py ├── vqa_prompt.py ├── water_color.py └── xy2percent.py ├── pyproject.toml ├── repair_dependency.bat ├── repair_dependency_aki.bat ├── repair_dependency_list.txt ├── requirements.txt ├── resource_dir.ini.example └── workflow ├── 1280x720_seven_person.jpg ├── 1280x720car.jpg ├── 1280x768_city.png ├── 1344x768_beach.png ├── 1344x768_girl2.png ├── 1344x768_hair.png ├── 1344x768_redcar.png ├── 1920x1080table.png ├── 3840x2160car.jpg ├── 512x512.png ├── 512x512bkgd.jpg ├── 768x1344_beach.png ├── 768x1344_dress.png ├── auto_adjust_v2_example.json ├── auto_brightness_example.json ├── blend_mode_v2_example.json ├── blendif_mask_example.json ├── choice_text_preset_example.json ├── color_of_shadow_and_highlight_example.json ├── color_overlay_example.json ├── create_gradient_mask_example.json ├── crop_by_mask_&_restore_crop_box_example.json ├── data_nodes_example.json ├── drop_shadow_example.json ├── extend_canvas_example.json ├── fox_512x512.png ├── get_main_color_and_color_name_example.json ├── get_main_color_example.json ├── girl_dino_1024.png ├── gradident_overlay_example.json ├── hl_frequency_detail_restore_example.json ├── icmask_example.json ├── if_example.json ├── image_blend_advance_example.json ├── image_blend_example.json ├── image_hub_example.json ├── image_mask_scale_as_example.json ├── image_reel_example.json ├── image_remove_alpha & image_combine_alpha_example.json ├── image_scale_restore_example.json ├── image_tagger_save_example.json ├── image_to_mask_example.json ├── inner_glow_example.json ├── inner_shadow_example.json ├── layer_color_title.json ├── layer_image_transform_example.json ├── layerstyle_all_nodes.json ├── mask_box_detect_example.json ├── mask_by_color_example.json ├── mask_edge_ultra_detail_example.json ├── mask_edge_ultra_detail_v2_example.json ├── mask_gradient_example.json ├── outer_glow_example.json ├── pixel_spread_example.json ├── queue_stop_example.json ├── rembg_ultra_example.json ├── rounded_rectangle_example.json ├── segformet_clothes_example.json ├── segformet_fashion_example.json ├── simple_text_example.json ├── stroke_example.json ├── switch_case_example.json ├── text_image_example.json ├── title_example_workflow.json ├── ultra_v2_nodes_example.json ├── vqa_example.json └── xy2percent_example.json /.gitignore: -------------------------------------------------------------------------------- 1 | _test_*.* 2 | __pycache__ 3 | .venv 4 | .idea 5 | *.pth 6 | *.ini 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 chflame163 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | import importlib.util 2 | import os 3 | import sys 4 | import json 5 | 6 | NODE_CLASS_MAPPINGS = {} 7 | NODE_DISPLAY_NAME_MAPPINGS = {} 8 | 9 | python = sys.executable 10 | 11 | def get_ext_dir(subpath=None, mkdir=False): 12 | dir = os.path.dirname(__file__) 13 | if subpath is not None: 14 | dir = os.path.join(dir, subpath) 15 | 16 | dir = os.path.abspath(dir) 17 | 18 | if mkdir and not os.path.exists(dir): 19 | os.makedirs(dir) 20 | return dir 21 | 22 | def serialize(obj): 23 | if isinstance(obj, (str, int, float, bool, list, dict, type(None))): 24 | return obj 25 | return str(obj) # 转为字符串 26 | 27 | 28 | py = get_ext_dir("py") 29 | files = os.listdir(py) 30 | all_nodes = {} 31 | for file in files: 32 | if not file.endswith(".py"): 33 | continue 34 | name = os.path.splitext(file)[0] 35 | imported_module = importlib.import_module(".py.{}".format(name), __name__) 36 | try: 37 | NODE_CLASS_MAPPINGS = {**NODE_CLASS_MAPPINGS, **imported_module.NODE_CLASS_MAPPINGS} 38 | NODE_DISPLAY_NAME_MAPPINGS = {**NODE_DISPLAY_NAME_MAPPINGS, **imported_module.NODE_DISPLAY_NAME_MAPPINGS} 39 | serialized_CLASS_MAPPINGS = {k: serialize(v) for k, v in imported_module.NODE_CLASS_MAPPINGS.items()} 40 | serialized_DISPLAY_NAME_MAPPINGS = {k: serialize(v) for k, v in imported_module.NODE_DISPLAY_NAME_MAPPINGS.items()} 41 | all_nodes[file]={"NODE_CLASS_MAPPINGS": serialized_CLASS_MAPPINGS, "NODE_DISPLAY_NAME_MAPPINGS": serialized_DISPLAY_NAME_MAPPINGS} 42 | except: 43 | pass 44 | 45 | 46 | WEB_DIRECTORY = "./js" 47 | 48 | __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] 49 | -------------------------------------------------------------------------------- /custom_size.ini.example: -------------------------------------------------------------------------------- 1 | # LayerStyle Custom_size 2 | 1024 x 1024 3 | 768 x 512 4 | 512 x 768 5 | 1280 x 720 6 | 720 x 1280 7 | 1344 x 768 8 | 768 x 1344 9 | 1536 x 640 10 | 640 x 1536 11 | -------------------------------------------------------------------------------- /font/Alibaba-PuHuiTi-Heavy.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/font/Alibaba-PuHuiTi-Heavy.ttf -------------------------------------------------------------------------------- /image/HSV_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/HSV_node.jpg -------------------------------------------------------------------------------- /image/LAB_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/LAB_node.jpg -------------------------------------------------------------------------------- /image/RGB_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/RGB_node.jpg -------------------------------------------------------------------------------- /image/RGB_value_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/RGB_value_example.jpg -------------------------------------------------------------------------------- /image/YUV_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/YUV_node.jpg -------------------------------------------------------------------------------- /image/add_grain_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/add_grain_example.jpg -------------------------------------------------------------------------------- /image/add_grain_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/add_grain_node.jpg -------------------------------------------------------------------------------- /image/any_rerouter_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/any_rerouter_node.jpg -------------------------------------------------------------------------------- /image/auto_adjust_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/auto_adjust_example.jpg -------------------------------------------------------------------------------- /image/auto_adjust_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/auto_adjust_node.jpg -------------------------------------------------------------------------------- /image/auto_adjust_v2_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/auto_adjust_v2_example.jpg -------------------------------------------------------------------------------- /image/auto_adjust_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/auto_adjust_v2_node.jpg -------------------------------------------------------------------------------- /image/auto_brightness_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/auto_brightness_example.jpg -------------------------------------------------------------------------------- /image/auto_brightness_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/auto_brightness_node.jpg -------------------------------------------------------------------------------- /image/batch_selector_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/batch_selector_example.jpg -------------------------------------------------------------------------------- /image/batch_selector_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/batch_selector_node.jpg -------------------------------------------------------------------------------- /image/ben_ultra_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/ben_ultra_example.jpg -------------------------------------------------------------------------------- /image/ben_ultra_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/ben_ultra_node.jpg -------------------------------------------------------------------------------- /image/blend_mode_result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/blend_mode_result.jpg -------------------------------------------------------------------------------- /image/blend_mode_v2_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/blend_mode_v2_example.jpg -------------------------------------------------------------------------------- /image/blendif_mask_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/blendif_mask_example.jpg -------------------------------------------------------------------------------- /image/blendif_mask_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/blendif_mask_node.jpg -------------------------------------------------------------------------------- /image/boolean_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/boolean_node.jpg -------------------------------------------------------------------------------- /image/boolean_operator_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/boolean_operator_node.jpg -------------------------------------------------------------------------------- /image/boolean_operator_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/boolean_operator_v2_node.jpg -------------------------------------------------------------------------------- /image/boolean_oprator_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/boolean_oprator_v2_node.jpg -------------------------------------------------------------------------------- /image/brightness_&_contrast_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/brightness_&_contrast_node.jpg -------------------------------------------------------------------------------- /image/channel_shake_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/channel_shake_example.jpg -------------------------------------------------------------------------------- /image/channel_shake_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/channel_shake_node.jpg -------------------------------------------------------------------------------- /image/check_mask_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/check_mask_node.jpg -------------------------------------------------------------------------------- /image/check_mask_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/check_mask_v2_node.jpg -------------------------------------------------------------------------------- /image/chioce_text_preset_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/chioce_text_preset_node.jpg -------------------------------------------------------------------------------- /image/choice_text_preset_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/choice_text_preset_example.jpg -------------------------------------------------------------------------------- /image/color_adapter_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_adapter_example.jpg -------------------------------------------------------------------------------- /image/color_adapter_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_adapter_node.jpg -------------------------------------------------------------------------------- /image/color_balance_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_balance_example.jpg -------------------------------------------------------------------------------- /image/color_balance_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_balance_node.jpg -------------------------------------------------------------------------------- /image/color_image_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_image_example.jpg -------------------------------------------------------------------------------- /image/color_image_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_image_node.jpg -------------------------------------------------------------------------------- /image/color_image_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_image_v2_node.jpg -------------------------------------------------------------------------------- /image/color_map_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_map_node.jpg -------------------------------------------------------------------------------- /image/color_name_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_name_example.jpg -------------------------------------------------------------------------------- /image/color_name_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_name_node.jpg -------------------------------------------------------------------------------- /image/color_of_shadow_and_highlight_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_of_shadow_and_highlight_example.jpg -------------------------------------------------------------------------------- /image/color_of_shadow_and_highlight_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_of_shadow_and_highlight_node.jpg -------------------------------------------------------------------------------- /image/color_overlay_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_overlay_example.jpg -------------------------------------------------------------------------------- /image/color_overlay_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_overlay_node.jpg -------------------------------------------------------------------------------- /image/color_picker.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_picker.jpg -------------------------------------------------------------------------------- /image/color_temperature_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_temperature_example.jpg -------------------------------------------------------------------------------- /image/color_temperature_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/color_temperature_node.jpg -------------------------------------------------------------------------------- /image/colormap_result.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/colormap_result.jpg -------------------------------------------------------------------------------- /image/corp_box_resolve_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/corp_box_resolve_node.jpg -------------------------------------------------------------------------------- /image/corp_by_mask_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/corp_by_mask_example.jpg -------------------------------------------------------------------------------- /image/corp_by_mask_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/corp_by_mask_node.jpg -------------------------------------------------------------------------------- /image/corp_by_mask_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/corp_by_mask_v2_node.jpg -------------------------------------------------------------------------------- /image/create_gradient_mask_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/create_gradient_mask_example.jpg -------------------------------------------------------------------------------- /image/create_gradient_mask_example2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/create_gradient_mask_example2.jpg -------------------------------------------------------------------------------- /image/create_gradient_mask_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/create_gradient_mask_node.jpg -------------------------------------------------------------------------------- /image/data_nodes_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/data_nodes_example.jpg -------------------------------------------------------------------------------- /image/drop_shadow_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/drop_shadow_example.jpg -------------------------------------------------------------------------------- /image/drop_shadow_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/drop_shadow_node.jpg -------------------------------------------------------------------------------- /image/exposure_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/exposure_example.jpg -------------------------------------------------------------------------------- /image/extend_canvas_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/extend_canvas_example.jpg -------------------------------------------------------------------------------- /image/extend_canvas_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/extend_canvas_node.jpg -------------------------------------------------------------------------------- /image/extend_canvas_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/extend_canvas_v2_node.jpg -------------------------------------------------------------------------------- /image/film_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/film_example.jpg -------------------------------------------------------------------------------- /image/film_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/film_node.jpg -------------------------------------------------------------------------------- /image/film_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/film_v2_node.jpg -------------------------------------------------------------------------------- /image/float_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/float_node.jpg -------------------------------------------------------------------------------- /image/gamma_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/gamma_node.jpg -------------------------------------------------------------------------------- /image/gaussian_blur_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/gaussian_blur_example.jpg -------------------------------------------------------------------------------- /image/gaussian_blur_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/gaussian_blur_v2_node.jpg -------------------------------------------------------------------------------- /image/get_image_size_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/get_image_size_node.jpg -------------------------------------------------------------------------------- /image/get_main_color_and_color_name_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/get_main_color_and_color_name_example.jpg -------------------------------------------------------------------------------- /image/get_main_color_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/get_main_color_node.jpg -------------------------------------------------------------------------------- /image/get_main_color_v2_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/get_main_color_v2_example.jpg -------------------------------------------------------------------------------- /image/get_main_colors_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/get_main_colors_example.jpg -------------------------------------------------------------------------------- /image/gradient_image_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/gradient_image_example.jpg -------------------------------------------------------------------------------- /image/gradient_image_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/gradient_image_node.jpg -------------------------------------------------------------------------------- /image/gradient_image_node_v2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/gradient_image_node_v2.jpg -------------------------------------------------------------------------------- /image/gradient_overlay_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/gradient_overlay_example.jpg -------------------------------------------------------------------------------- /image/gradient_overlay_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/gradient_overlay_node.jpg -------------------------------------------------------------------------------- /image/gray_value_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/gray_value_node.jpg -------------------------------------------------------------------------------- /image/halftone_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/halftone_example.jpg -------------------------------------------------------------------------------- /image/halftone_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/halftone_node.jpg -------------------------------------------------------------------------------- /image/hdr_effects_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/hdr_effects_example.jpg -------------------------------------------------------------------------------- /image/hdr_effects_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/hdr_effects_node.jpg -------------------------------------------------------------------------------- /image/hl_frequency_detail_restore_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/hl_frequency_detail_restore_example.jpg -------------------------------------------------------------------------------- /image/hl_frequency_detail_restore_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/hl_frequency_detail_restore_node.jpg -------------------------------------------------------------------------------- /image/hsv_value_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/hsv_value_node.jpg -------------------------------------------------------------------------------- /image/icmask_crop_back_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/icmask_crop_back_node.jpg -------------------------------------------------------------------------------- /image/icmask_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/icmask_example.jpg -------------------------------------------------------------------------------- /image/icmask_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/icmask_node.jpg -------------------------------------------------------------------------------- /image/if_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/if_example.jpg -------------------------------------------------------------------------------- /image/if_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/if_node.jpg -------------------------------------------------------------------------------- /image/image_auto_crop_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_auto_crop_example.jpg -------------------------------------------------------------------------------- /image/image_auto_crop_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_auto_crop_node.jpg -------------------------------------------------------------------------------- /image/image_auto_crop_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_auto_crop_v2_node.jpg -------------------------------------------------------------------------------- /image/image_auto_crop_v3_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_auto_crop_v3_node.jpg -------------------------------------------------------------------------------- /image/image_blend_advance_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_blend_advance_example.jpg -------------------------------------------------------------------------------- /image/image_blend_advance_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_blend_advance_node.jpg -------------------------------------------------------------------------------- /image/image_blend_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_blend_example.jpg -------------------------------------------------------------------------------- /image/image_blend_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_blend_node.jpg -------------------------------------------------------------------------------- /image/image_channel_merge_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_channel_merge_example.jpg -------------------------------------------------------------------------------- /image/image_channel_merge_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_channel_merge_node.jpg -------------------------------------------------------------------------------- /image/image_channel_split_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_channel_split_example.jpg -------------------------------------------------------------------------------- /image/image_channel_split_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_channel_split_node.jpg -------------------------------------------------------------------------------- /image/image_combine_alpha_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_combine_alpha_node.jpg -------------------------------------------------------------------------------- /image/image_hub_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_hub_example.jpg -------------------------------------------------------------------------------- /image/image_hub_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_hub_node.jpg -------------------------------------------------------------------------------- /image/image_mask_scale_as_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_mask_scale_as_example.jpg -------------------------------------------------------------------------------- /image/image_mask_scale_as_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_mask_scale_as_node.jpg -------------------------------------------------------------------------------- /image/image_mask_scale_as_v2_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_mask_scale_as_v2_example.jpg -------------------------------------------------------------------------------- /image/image_mask_scale_as_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_mask_scale_as_v2_node.jpg -------------------------------------------------------------------------------- /image/image_opacity_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_opacity_example.jpg -------------------------------------------------------------------------------- /image/image_reel_composit_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_reel_composit_node.jpg -------------------------------------------------------------------------------- /image/image_reel_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_reel_example.jpg -------------------------------------------------------------------------------- /image/image_reel_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_reel_node.jpg -------------------------------------------------------------------------------- /image/image_remove_alpha_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_remove_alpha_example.jpg -------------------------------------------------------------------------------- /image/image_remove_alpha_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_remove_alpha_node.jpg -------------------------------------------------------------------------------- /image/image_scale_by_aspect_ratio_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_scale_by_aspect_ratio_example.jpg -------------------------------------------------------------------------------- /image/image_scale_by_aspect_ratio_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_scale_by_aspect_ratio_node.jpg -------------------------------------------------------------------------------- /image/image_scale_by_aspect_ratio_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_scale_by_aspect_ratio_v2_node.jpg -------------------------------------------------------------------------------- /image/image_scale_restore_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_scale_restore_example.jpg -------------------------------------------------------------------------------- /image/image_scale_restore_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_scale_restore_node.jpg -------------------------------------------------------------------------------- /image/image_scale_restore_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_scale_restore_v2_node.jpg -------------------------------------------------------------------------------- /image/image_shift_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_shift_example.jpg -------------------------------------------------------------------------------- /image/image_shift_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_shift_node.jpg -------------------------------------------------------------------------------- /image/image_tagger_save_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_tagger_save_example.jpg -------------------------------------------------------------------------------- /image/image_tagger_save_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_tagger_save_node.jpg -------------------------------------------------------------------------------- /image/image_to_mask_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_to_mask_example.jpg -------------------------------------------------------------------------------- /image/image_to_mask_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/image_to_mask_node.jpg -------------------------------------------------------------------------------- /image/inner_glow_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/inner_glow_example.jpg -------------------------------------------------------------------------------- /image/inner_glow_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/inner_glow_node.jpg -------------------------------------------------------------------------------- /image/inner_shadow_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/inner_shadow_example.jpg -------------------------------------------------------------------------------- /image/inner_shadow_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/inner_shadow_node.jpg -------------------------------------------------------------------------------- /image/integer_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/integer_node.jpg -------------------------------------------------------------------------------- /image/layer_image_transform_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/layer_image_transform_example.jpg -------------------------------------------------------------------------------- /image/layer_image_transform_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/layer_image_transform_node.jpg -------------------------------------------------------------------------------- /image/layer_mask_transform_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/layer_mask_transform_node.jpg -------------------------------------------------------------------------------- /image/layercolor_nodes.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/layercolor_nodes.jpg -------------------------------------------------------------------------------- /image/layercolor_title.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/layercolor_title.jpg -------------------------------------------------------------------------------- /image/layerfilter_nodes.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/layerfilter_nodes.jpg -------------------------------------------------------------------------------- /image/layermask_nodes.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/layermask_nodes.jpg -------------------------------------------------------------------------------- /image/layerstyle_nodes.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/layerstyle_nodes.jpg -------------------------------------------------------------------------------- /image/layerstyle_title.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/layerstyle_title.jpg -------------------------------------------------------------------------------- /image/layerutility_nodes.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/layerutility_nodes.jpg -------------------------------------------------------------------------------- /image/levels_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/levels_example.jpg -------------------------------------------------------------------------------- /image/levels_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/levels_node.jpg -------------------------------------------------------------------------------- /image/light_leak_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/light_leak_example.jpg -------------------------------------------------------------------------------- /image/light_leak_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/light_leak_node.jpg -------------------------------------------------------------------------------- /image/load_ben_model_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/load_ben_model_node.jpg -------------------------------------------------------------------------------- /image/load_segfromer_model_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/load_segfromer_model_node.jpg -------------------------------------------------------------------------------- /image/load_vqa_model_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/load_vqa_model_node.jpg -------------------------------------------------------------------------------- /image/lut_apply_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/lut_apply_example.jpg -------------------------------------------------------------------------------- /image/lut_apply_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/lut_apply_node.jpg -------------------------------------------------------------------------------- /image/mask_box_detect_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_box_detect_example.jpg -------------------------------------------------------------------------------- /image/mask_box_detect_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_box_detect_node.jpg -------------------------------------------------------------------------------- /image/mask_by_color_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_by_color_example.jpg -------------------------------------------------------------------------------- /image/mask_by_color_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_by_color_node.jpg -------------------------------------------------------------------------------- /image/mask_edge_compare.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_edge_compare.jpg -------------------------------------------------------------------------------- /image/mask_edge_shrink_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_edge_shrink_example.jpg -------------------------------------------------------------------------------- /image/mask_edge_shrink_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_edge_shrink_node.jpg -------------------------------------------------------------------------------- /image/mask_edge_ultra_detail_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_edge_ultra_detail_example.jpg -------------------------------------------------------------------------------- /image/mask_edge_ultra_detail_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_edge_ultra_detail_node.jpg -------------------------------------------------------------------------------- /image/mask_edge_ultra_detail_v2_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_edge_ultra_detail_v2_example.jpg -------------------------------------------------------------------------------- /image/mask_edge_ultra_detail_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_edge_ultra_detail_v2_node.jpg -------------------------------------------------------------------------------- /image/mask_gradient_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_gradient_example.jpg -------------------------------------------------------------------------------- /image/mask_gradient_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_gradient_node.jpg -------------------------------------------------------------------------------- /image/mask_grain_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_grain_example.jpg -------------------------------------------------------------------------------- /image/mask_grain_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_grain_node.jpg -------------------------------------------------------------------------------- /image/mask_grow_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_grow_example.jpg -------------------------------------------------------------------------------- /image/mask_grow_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_grow_node.jpg -------------------------------------------------------------------------------- /image/mask_invert.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_invert.jpg -------------------------------------------------------------------------------- /image/mask_invert_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_invert_node.jpg -------------------------------------------------------------------------------- /image/mask_motion_blur_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_motion_blur_example.jpg -------------------------------------------------------------------------------- /image/mask_motion_blur_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_motion_blur_node.jpg -------------------------------------------------------------------------------- /image/mask_preview_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_preview_example.jpg -------------------------------------------------------------------------------- /image/mask_stroke_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_stroke_example.jpg -------------------------------------------------------------------------------- /image/mask_stroke_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/mask_stroke_node.jpg -------------------------------------------------------------------------------- /image/menu_layer_color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/menu_layer_color.jpg -------------------------------------------------------------------------------- /image/menu_layer_filter.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/menu_layer_filter.jpg -------------------------------------------------------------------------------- /image/menu_layer_mask.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/menu_layer_mask.jpg -------------------------------------------------------------------------------- /image/menu_layer_style.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/menu_layer_style.jpg -------------------------------------------------------------------------------- /image/menu_layer_utility.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/menu_layer_utility.jpg -------------------------------------------------------------------------------- /image/motion_blur_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/motion_blur_example.jpg -------------------------------------------------------------------------------- /image/name_to_color_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/name_to_color_example.jpg -------------------------------------------------------------------------------- /image/name_to_color_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/name_to_color_node.jpg -------------------------------------------------------------------------------- /image/node-menu.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/node-menu.jpg -------------------------------------------------------------------------------- /image/node-search.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/node-search.jpg -------------------------------------------------------------------------------- /image/number_calculator_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/number_calculator_node.jpg -------------------------------------------------------------------------------- /image/number_calculator_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/number_calculator_v2_node.jpg -------------------------------------------------------------------------------- /image/outer_glow_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/outer_glow_example.jpg -------------------------------------------------------------------------------- /image/outer_glow_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/outer_glow_node.jpg -------------------------------------------------------------------------------- /image/pixel_spread_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/pixel_spread_example.jpg -------------------------------------------------------------------------------- /image/pixel_spread_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/pixel_spread_node.jpg -------------------------------------------------------------------------------- /image/print_info_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/print_info_node.jpg -------------------------------------------------------------------------------- /image/purge_vram_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/purge_vram_example.jpg -------------------------------------------------------------------------------- /image/purge_vram_v2_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/purge_vram_v2_example.jpg -------------------------------------------------------------------------------- /image/queue_stop_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/queue_stop_example.jpg -------------------------------------------------------------------------------- /image/queue_stop_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/queue_stop_node.jpg -------------------------------------------------------------------------------- /image/random_generator_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/random_generator_example.jpg -------------------------------------------------------------------------------- /image/random_generator_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/random_generator_node.jpg -------------------------------------------------------------------------------- /image/random_generator_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/random_generator_v2_node.jpg -------------------------------------------------------------------------------- /image/rembg_ultra_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/rembg_ultra_example.jpg -------------------------------------------------------------------------------- /image/rembg_ultra_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/rembg_ultra_node.jpg -------------------------------------------------------------------------------- /image/restore_crop_box_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/restore_crop_box_node.jpg -------------------------------------------------------------------------------- /image/rmbg_ultra_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/rmbg_ultra_v2_node.jpg -------------------------------------------------------------------------------- /image/rounded_rectangle_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/rounded_rectangle_example.jpg -------------------------------------------------------------------------------- /image/rounded_rectangle_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/rounded_rectangle_node.jpg -------------------------------------------------------------------------------- /image/segformer_clothes_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/segformer_clothes_example.jpg -------------------------------------------------------------------------------- /image/segformer_clothes_pipeline_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/segformer_clothes_pipeline_node.jpg -------------------------------------------------------------------------------- /image/segformer_clothes_setting_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/segformer_clothes_setting_node.jpg -------------------------------------------------------------------------------- /image/segformer_fashion_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/segformer_fashion_example.jpg -------------------------------------------------------------------------------- /image/segformer_fashion_pipeline_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/segformer_fashion_pipeline_node.jpg -------------------------------------------------------------------------------- /image/segformer_fashion_setting_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/segformer_fashion_setting_node.jpg -------------------------------------------------------------------------------- /image/segformer_ultra_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/segformer_ultra_example.jpg -------------------------------------------------------------------------------- /image/segformer_ultra_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/segformer_ultra_node.jpg -------------------------------------------------------------------------------- /image/segformer_ultra_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/segformer_ultra_v2_node.jpg -------------------------------------------------------------------------------- /image/segfromer_ultra_v3_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/segfromer_ultra_v3_node.jpg -------------------------------------------------------------------------------- /image/segment_anything_ultra_compare.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/segment_anything_ultra_compare.jpg -------------------------------------------------------------------------------- /image/segment_anything_ultra_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/segment_anything_ultra_example.jpg -------------------------------------------------------------------------------- /image/segment_anything_ultra_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/segment_anything_ultra_node.jpg -------------------------------------------------------------------------------- /image/segment_anything_ultra_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/segment_anything_ultra_v2_node.jpg -------------------------------------------------------------------------------- /image/shadow_and_highlight_mask_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/shadow_and_highlight_mask_example.jpg -------------------------------------------------------------------------------- /image/shadow_and_highlight_mask_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/shadow_and_highlight_mask_node.jpg -------------------------------------------------------------------------------- /image/sharp_and_soft_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/sharp_and_soft_example.jpg -------------------------------------------------------------------------------- /image/sharp_and_soft_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/sharp_and_soft_node.jpg -------------------------------------------------------------------------------- /image/show_blind_watermark_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/show_blind_watermark_node.jpg -------------------------------------------------------------------------------- /image/simple_text_image_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/simple_text_image_example.jpg -------------------------------------------------------------------------------- /image/simple_text_image_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/simple_text_image_node.jpg -------------------------------------------------------------------------------- /image/skin_beauty_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/skin_beauty_example.jpg -------------------------------------------------------------------------------- /image/skin_beauty_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/skin_beauty_node.jpg -------------------------------------------------------------------------------- /image/soft_light_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/soft_light_example.jpg -------------------------------------------------------------------------------- /image/soft_light_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/soft_light_node.jpg -------------------------------------------------------------------------------- /image/string_condition_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/string_condition_example.jpg -------------------------------------------------------------------------------- /image/string_condition_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/string_condition_node.jpg -------------------------------------------------------------------------------- /image/string_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/string_node.jpg -------------------------------------------------------------------------------- /image/stroke_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/stroke_example.jpg -------------------------------------------------------------------------------- /image/stroke_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/stroke_node.jpg -------------------------------------------------------------------------------- /image/switch_case_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/switch_case_example.jpg -------------------------------------------------------------------------------- /image/switch_case_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/switch_case_node.jpg -------------------------------------------------------------------------------- /image/text_box_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/text_box_node.jpg -------------------------------------------------------------------------------- /image/text_image_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/text_image_example.jpg -------------------------------------------------------------------------------- /image/text_image_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/text_image_node.jpg -------------------------------------------------------------------------------- /image/text_image_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/text_image_v2_node.jpg -------------------------------------------------------------------------------- /image/text_join_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/text_join_example.jpg -------------------------------------------------------------------------------- /image/text_join_v2_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/text_join_v2_node.jpg -------------------------------------------------------------------------------- /image/text_preseter_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/text_preseter_node.jpg -------------------------------------------------------------------------------- /image/title.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/title.jpg -------------------------------------------------------------------------------- /image/ultra_nodes.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/ultra_nodes.jpg -------------------------------------------------------------------------------- /image/ultra_v2_nodes_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/ultra_v2_nodes_example.jpg -------------------------------------------------------------------------------- /image/vqa_prompt_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/vqa_prompt_example.jpg -------------------------------------------------------------------------------- /image/vqa_prompt_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/vqa_prompt_node.jpg -------------------------------------------------------------------------------- /image/water_color_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/water_color_example.jpg -------------------------------------------------------------------------------- /image/water_color_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/water_color_node.jpg -------------------------------------------------------------------------------- /image/xy2percent_example.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/xy2percent_example.jpg -------------------------------------------------------------------------------- /image/xy2percent_node.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/image/xy2percent_node.jpg -------------------------------------------------------------------------------- /install_requirements.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | set "python_exec=..\..\..\python_embeded\python.exe" 4 | set "repair_dependency_txt=%~dp0\repair_dependency_list.txt" 5 | set "requirements_txt=%~dp0\requirements.txt" 6 | 7 | echo Installing with ComfyUI Portable 8 | echo . 9 | echo Install requirement.txt... 10 | 11 | for /f "delims=" %%i in (%requirements_txt%) do ( 12 | %python_exec% -s -m pip install "%%i" 13 | ) 14 | 15 | echo . 16 | echo Fixing Dependency Package... 17 | %python_exec% -s -m pip uninstall -y opencv-python opencv-contrib-python opencv-python-headless opencv-contrib-python-headless 18 | for /f "delims=" %%i in (%repair_dependency_txt%) do ( 19 | %python_exec% -s -m pip install "%%i" 20 | ) 21 | 22 | echo . 23 | echo Install Finish! 24 | pause 25 | 26 | -------------------------------------------------------------------------------- /install_requirements_aki.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | set "python_exec=..\..\python\python.exe" 4 | set "repair_dependency_txt=%~dp0\repair_dependency_list.txt" 5 | set "requirements_txt=%~dp0\requirements.txt" 6 | 7 | echo Installing with ComfyUI Portable 8 | echo . 9 | echo Install requirement.txt... 10 | for /f "delims=" %%i in (%requirements_txt%) do ( 11 | %python_exec% -s -m pip install "%%i" 12 | ) 13 | 14 | echo . 15 | echo Fixing Dependency Package... 16 | %python_exec% -s -m pip uninstall -y opencv-python opencv-contrib-python opencv-python-headless opencv-contrib-python-headless 17 | for /f "delims=" %%i in (%repair_dependency_txt%) do ( 18 | %python_exec% -s -m pip install "%%i" 19 | ) 20 | 21 | echo . 22 | echo Install Finish! 23 | pause 24 | 25 | -------------------------------------------------------------------------------- /js/dz_node_palette.js: -------------------------------------------------------------------------------- 1 | import { app } from "../../scripts/app.js"; 2 | 3 | 4 | app.registerExtension({ 5 | name: "ColorOverlay", 6 | async nodeCreated(node) { 7 | // 判断是否为layer节点 8 | if(!node.comfyClass.startsWith("Layer")) { 9 | return; 10 | } 11 | 12 | if(node.comfyClass.startsWith("LayerStyle:")) { 13 | node.color = "rgba(20, 95, 121, 0.7)"; 14 | // node.bgcolor = "rgba(50, 241, 255, 0.15)"; 15 | } 16 | 17 | if(node.comfyClass.startsWith("LayerColor:")) { 18 | node.color = "rgba(27, 89, 123, 0.7)"; 19 | // node.bgcolor = "rgba(43, 209, 255, 0.15)"; 20 | } 21 | 22 | if(node.comfyClass.startsWith("LayerMask:")) { 23 | node.color = "rgba(27, 80, 119, 0.7)"; 24 | // node.bgcolor = "rgba(4, 174, 255, 0.15)"; 25 | } 26 | 27 | if(node.comfyClass.startsWith("LayerUtility:")) { 28 | node.color = "rgba(38, 73, 116, 0.7)"; 29 | // node.bgcolor = "rgba(23, 113, 255, 0.15)"; 30 | } 31 | 32 | if(node.comfyClass.startsWith("LayerFilter:")) { 33 | node.color = "rgba(34, 67, 111, 0.7)"; 34 | // node.bgcolor = "rgba(19, 85, 255, 0.15)"; 35 | } 36 | 37 | 38 | // if(node.comfyClass === "LayerStyle: ColorOverlay"){ 39 | // node.setSize([600, 120]); 40 | // } 41 | } 42 | }); -------------------------------------------------------------------------------- /py/add_grain.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import time 3 | from .imagefunc import log, tensor2pil, image_add_grain, pil2tensor 4 | 5 | 6 | 7 | class AddGrain: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'AddGrain' 11 | 12 | 13 | @classmethod 14 | def INPUT_TYPES(self): 15 | 16 | return { 17 | "required": { 18 | "image": ("IMAGE", ), # 19 | "grain_power": ("FLOAT", {"default": 0.5, "min": 0, "max": 1, "step": 0.01}), 20 | "grain_scale": ("FLOAT", {"default": 1, "min": 0.1, "max": 10, "step": 0.1}), 21 | "grain_sat": ("FLOAT", {"default": 1, "min": 0, "max": 1, "step": 0.01}), 22 | }, 23 | "optional": { 24 | } 25 | } 26 | 27 | RETURN_TYPES = ("IMAGE",) 28 | RETURN_NAMES = ("image",) 29 | FUNCTION = 'add_grain' 30 | CATEGORY = '😺dzNodes/LayerFilter' 31 | 32 | def add_grain(self, image, grain_power, grain_scale, grain_sat): 33 | 34 | ret_images = [] 35 | 36 | for i in range(len(image)): 37 | _canvas = tensor2pil(torch.unsqueeze(image[i], 0)).convert('RGB') 38 | _canvas = image_add_grain(_canvas, grain_scale, grain_power, grain_sat, toe=0, seed=int(time.time()) + i) 39 | ret_images.append(pil2tensor(_canvas)) 40 | 41 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 42 | return (torch.cat(ret_images, dim=0),) 43 | 44 | NODE_CLASS_MAPPINGS = { 45 | "LayerFilter: AddGrain": AddGrain 46 | } 47 | 48 | NODE_DISPLAY_NAME_MAPPINGS = { 49 | "LayerFilter: AddGrain": "LayerFilter: Add Grain" 50 | } -------------------------------------------------------------------------------- /py/any_rerouter.py: -------------------------------------------------------------------------------- 1 | from .imagefunc import AnyType 2 | 3 | anything = AnyType('*') 4 | 5 | class LS_AnyRerouter(): 6 | 7 | def __init__(self): 8 | self.NODE_NAME = 'AnyRerouter' 9 | 10 | 11 | @classmethod 12 | def INPUT_TYPES(self): 13 | return { 14 | "required": { 15 | "any": (anything, {}), 16 | }, 17 | "optional": { # 18 | } 19 | } 20 | 21 | RETURN_TYPES = (anything,) 22 | RETURN_NAMES = ('any',) 23 | FUNCTION = 'any_rerouter' 24 | CATEGORY = '😺dzNodes/LayerUtility/Data' 25 | 26 | def any_rerouter(self, any,): 27 | return (any,) 28 | 29 | NODE_CLASS_MAPPINGS = { 30 | "LayerUtility: AnyRerouter": LS_AnyRerouter 31 | } 32 | 33 | NODE_DISPLAY_NAME_MAPPINGS = { 34 | "LayerUtility: AnyRerouter": "LayerUtility: Any Rerouter" 35 | } -------------------------------------------------------------------------------- /py/batch_selector.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import log, pil2tensor,image2mask, extract_numbers 3 | from PIL import Image 4 | 5 | 6 | 7 | class BatchSelector: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'BatchSelector' 11 | pass 12 | 13 | @classmethod 14 | def INPUT_TYPES(self): 15 | 16 | return { 17 | "required": { 18 | "select": ("STRING", {"default": "0,"},), 19 | }, 20 | "optional": { 21 | "images": ("IMAGE",), # 22 | "masks": ("MASK",), # 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("IMAGE", "MASK",) 27 | RETURN_NAMES = ("image", "mask",) 28 | FUNCTION = 'batch_selector' 29 | CATEGORY = '😺dzNodes/LayerUtility/SystemIO' 30 | 31 | def batch_selector(self, select, images=None, masks=None 32 | ): 33 | ret_images = [] 34 | ret_masks = [] 35 | empty_image = pil2tensor(Image.new("RGBA", (64, 64), (0, 0, 0, 0))) 36 | empty_mask = image2mask(Image.new("L", (64, 64), color="black")) 37 | 38 | indexs = extract_numbers(select) 39 | for i in indexs: 40 | if images is not None: 41 | if i < len(images): 42 | ret_images.append(images[i].unsqueeze(0)) 43 | else: 44 | ret_images.append(images[-1].unsqueeze(0)) 45 | if masks is not None: 46 | if i < len(masks): 47 | ret_masks.append(masks[i].unsqueeze(0)) 48 | else: 49 | ret_masks.append(masks[-1].unsqueeze(0)) 50 | 51 | if len(ret_images) == 0: 52 | ret_images.append(empty_image) 53 | if len(ret_masks) == 0: 54 | ret_masks.append(empty_mask) 55 | 56 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 57 | return (torch.cat(ret_images, dim=0), torch.cat(ret_masks, dim=0),) 58 | 59 | NODE_CLASS_MAPPINGS = { 60 | "LayerUtility: BatchSelector": BatchSelector 61 | } 62 | 63 | NODE_DISPLAY_NAME_MAPPINGS = { 64 | "LayerUtility: BatchSelector": "LayerUtility: Batch Selector" 65 | } -------------------------------------------------------------------------------- /py/channel_shake.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import math 3 | from PIL import Image 4 | from .imagefunc import log, tensor2pil, pil2tensor 5 | from .imagefunc import shift_image 6 | 7 | 8 | 9 | 10 | class ChannelShake: 11 | 12 | def __init__(self): 13 | self.NODE_NAME = 'ChannelShake' 14 | 15 | 16 | @classmethod 17 | def INPUT_TYPES(self): 18 | channel_mode = ['RGB', 'RBG', 'BGR', 'BRG', 'GBR', 'GRB'] 19 | return { 20 | "required": { 21 | "image": ("IMAGE", ), # 22 | "distance": ("INT", {"default": 20, "min": 1, "max": 999, "step": 1}), # 距离 23 | "angle": ("FLOAT", {"default": 40, "min": -360, "max": 360, "step": 0.1}), # 角度 24 | "mode": (channel_mode,), # 模式 25 | }, 26 | "optional": { 27 | } 28 | } 29 | 30 | RETURN_TYPES = ("IMAGE",) 31 | RETURN_NAMES = ("image",) 32 | FUNCTION = 'channel_shake' 33 | CATEGORY = '😺dzNodes/LayerFilter' 34 | 35 | def channel_shake(self, image, distance, angle, mode, ): 36 | 37 | ret_images = [] 38 | 39 | for i in image: 40 | i = torch.unsqueeze(i, 0) 41 | _canvas = tensor2pil(i).convert('RGB') 42 | R, G, B = _canvas.split() 43 | x = int(math.cos(angle) * distance) 44 | y = int(math.sin(angle) * distance) 45 | if mode.startswith('R'): 46 | R = shift_image(R.convert('RGB'), -x, -y).convert('L') 47 | if mode.startswith('G'): 48 | G = shift_image(G.convert('RGB'), -x, -y).convert('L') 49 | if mode.startswith('B'): 50 | B = shift_image(B.convert('RGB'), -x, -y).convert('L') 51 | if mode.endswith('R'): 52 | R = shift_image(R.convert('RGB'), x, y).convert('L') 53 | if mode.endswith('G'): 54 | G = shift_image(G.convert('RGB'), x, y).convert('L') 55 | if mode.endswith('B'): 56 | B = shift_image(B.convert('RGB'), x, y).convert('L') 57 | 58 | ret_image = Image.merge('RGB', [R, G, B]) 59 | ret_images.append(pil2tensor(ret_image)) 60 | 61 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 62 | return (torch.cat(ret_images, dim=0),) 63 | 64 | NODE_CLASS_MAPPINGS = { 65 | "LayerFilter: ChannelShake": ChannelShake 66 | } 67 | 68 | NODE_DISPLAY_NAME_MAPPINGS = { 69 | "LayerFilter: ChannelShake": "LayerFilter: ChannelShake" 70 | } -------------------------------------------------------------------------------- /py/check_mask.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor 4 | from .imagefunc import mask_white_area 5 | 6 | 7 | 8 | 9 | # 检查mask是否有效,如果mask面积少于指定比例则判为无效mask 10 | class CheckMask: 11 | 12 | def __init__(self): 13 | self.NODE_NAME = 'CheckMask' 14 | 15 | 16 | @classmethod 17 | def INPUT_TYPES(self): 18 | blank_mask_list = ['white', 'black'] 19 | return { 20 | "required": { 21 | "mask": ("MASK",), # 22 | "white_point": ("INT", {"default": 1, "min": 1, "max": 254, "step": 1}), # 用于判断mask是否有效的白点值,高于此值被计入有效 23 | "area_percent": ("INT", {"default": 1, "min": 1, "max": 99, "step": 1}), # 区域百分比,低于此则mask判定无效 24 | }, 25 | "optional": { # 26 | } 27 | } 28 | 29 | RETURN_TYPES = ("BOOLEAN",) 30 | RETURN_NAMES = ('bool',) 31 | FUNCTION = 'check_mask' 32 | CATEGORY = '😺dzNodes/LayerUtility' 33 | 34 | def check_mask(self, mask, white_point, area_percent,): 35 | 36 | if mask.dim() == 2: 37 | mask = torch.unsqueeze(mask, 0) 38 | mask = tensor2pil(mask[0]) 39 | if mask.width * mask.height > 262144: 40 | target_width = 512 41 | target_height = int(target_width * mask.height / mask.width) 42 | mask = mask.resize((target_width, target_height), Image.LANCZOS) 43 | ret = mask_white_area(mask, white_point) * 100 > area_percent 44 | log(f"{self.NODE_NAME}:{ret}", message_type="finish") 45 | return (ret,) 46 | 47 | NODE_CLASS_MAPPINGS = { 48 | "LayerUtility: CheckMask": CheckMask 49 | } 50 | 51 | NODE_DISPLAY_NAME_MAPPINGS = { 52 | "LayerUtility: CheckMask": "LayerUtility: Check Mask" 53 | } -------------------------------------------------------------------------------- /py/check_mask_v2.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor 4 | from .imagefunc import mask_white_area, is_valid_mask 5 | 6 | 7 | 8 | # 检查mask是否有效,如果mask面积少于指定比例则判为无效mask 9 | class CheckMaskV2: 10 | 11 | def __init__(self): 12 | self.NODE_NAME = 'CheckMaskV2' 13 | pass 14 | 15 | @classmethod 16 | def INPUT_TYPES(self): 17 | method_list = ['simple', 'detect_percent'] 18 | blank_mask_list = ['white', 'black'] 19 | return { 20 | "required": { 21 | "mask": ("MASK",), # 22 | "method": (method_list,), # 23 | "white_point": ("INT", {"default": 1, "min": 1, "max": 254, "step": 1}), # 用于判断mask是否有效的白点值,高于此值被计入有效 24 | "area_percent": ("FLOAT", {"default": 0.01, "min": 0, "max": 100, "step": 0.01}), # 区域百分比,低于此则mask判定无效 25 | }, 26 | "optional": { # 27 | } 28 | } 29 | 30 | RETURN_TYPES = ("BOOLEAN",) 31 | RETURN_NAMES = ('bool',) 32 | FUNCTION = 'check_mask_v2' 33 | CATEGORY = '😺dzNodes/LayerUtility' 34 | 35 | def check_mask_v2(self, mask, method, white_point, area_percent,): 36 | 37 | if mask.dim() == 2: 38 | mask = torch.unsqueeze(mask, 0) 39 | tensor_mask = mask[0] 40 | 41 | pil_mask = tensor2pil(tensor_mask) 42 | if pil_mask.width * pil_mask.height > 262144: 43 | target_width = 512 44 | target_height = int(target_width * pil_mask.height / pil_mask.width) 45 | pil_mask = pil_mask.resize((target_width, target_height), Image.LANCZOS) 46 | ret_bool = False 47 | if method == 'simple': 48 | ret_bool = is_valid_mask(tensor_mask) 49 | else: 50 | ret_bool = mask_white_area(pil_mask, white_point) * 100 > area_percent 51 | log(f"{self.NODE_NAME}: {ret_bool}", message_type='finish') 52 | return (ret_bool,) 53 | 54 | NODE_CLASS_MAPPINGS = { 55 | "LayerUtility: CheckMaskV2": CheckMaskV2 56 | } 57 | 58 | NODE_DISPLAY_NAME_MAPPINGS = { 59 | "LayerUtility: CheckMaskV2": "LayerUtility: Check Mask V2" 60 | } -------------------------------------------------------------------------------- /py/color_adapter.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor 4 | from .imagefunc import color_adapter, chop_image, RGB2RGBA 5 | 6 | 7 | 8 | class ColorAdapter: 9 | 10 | def __init__(self): 11 | self.NODE_NAME = 'ColorAdapter' 12 | 13 | 14 | @classmethod 15 | def INPUT_TYPES(self): 16 | 17 | return { 18 | "required": { 19 | "image": ("IMAGE", ), # 20 | "color_ref_image": ("IMAGE", ), # 21 | "opacity": ("INT", {"default": 75, "min": 0, "max": 100, "step": 1}), # 透明度 22 | }, 23 | "optional": { 24 | } 25 | } 26 | 27 | RETURN_TYPES = ("IMAGE",) 28 | RETURN_NAMES = ("image",) 29 | FUNCTION = 'color_adapter' 30 | CATEGORY = '😺dzNodes/LayerColor' 31 | 32 | def color_adapter(self, image, color_ref_image, opacity): 33 | ret_images = [] 34 | 35 | l_images = [] 36 | r_images = [] 37 | for l in image: 38 | l_images.append(torch.unsqueeze(l, 0)) 39 | for r in color_ref_image: 40 | r_images.append(torch.unsqueeze(r, 0)) 41 | for i in range(len(l_images)): 42 | _image = l_images[i] 43 | _ref = r_images[i] if len(ret_images) > i else r_images[-1] 44 | 45 | __image = tensor2pil(_image) 46 | _canvas = __image.convert('RGB') 47 | ret_image = color_adapter(_canvas, tensor2pil(_ref).convert('RGB')) 48 | ret_image = chop_image(_canvas, ret_image, blend_mode='normal', opacity=opacity) 49 | if __image.mode == 'RGBA': 50 | ret_image = RGB2RGBA(ret_image, __image.split()[-1]) 51 | ret_images.append(pil2tensor(ret_image)) 52 | 53 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 54 | return (torch.cat(ret_images, dim=0),) 55 | 56 | NODE_CLASS_MAPPINGS = { 57 | "LayerColor: ColorAdapter": ColorAdapter 58 | } 59 | 60 | NODE_DISPLAY_NAME_MAPPINGS = { 61 | "LayerColor: ColorAdapter": "LayerColor: ColorAdapter" 62 | } -------------------------------------------------------------------------------- /py/color_correct_HSV.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import log, tensor2pil, pil2tensor 3 | from .imagefunc import image_hue_offset, image_gray_offset, image_channel_merge, RGB2RGBA 4 | 5 | 6 | 7 | class ColorCorrectHSV: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'HSV' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "image": ("IMAGE", ), # 18 | "H": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), 19 | "S": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), 20 | "V": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), 21 | }, 22 | "optional": { 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("IMAGE",) 27 | RETURN_NAMES = ("image",) 28 | FUNCTION = 'color_correct_HSV' 29 | CATEGORY = '😺dzNodes/LayerColor' 30 | 31 | def color_correct_HSV(self, image, H, S, V): 32 | 33 | ret_images = [] 34 | 35 | for i in image: 36 | i = torch.unsqueeze(i,0) 37 | __image = tensor2pil(i) 38 | _h, _s, _v = tensor2pil(i).convert('HSV').split() 39 | if H != 0 : 40 | _h = image_hue_offset(_h, H) 41 | if S != 0 : 42 | _s = image_gray_offset(_s, S) 43 | if V != 0 : 44 | _v = image_gray_offset(_v, V) 45 | ret_image = image_channel_merge((_h, _s, _v), 'HSV') 46 | 47 | if __image.mode == 'RGBA': 48 | ret_image = RGB2RGBA(ret_image, __image.split()[-1]) 49 | 50 | ret_images.append(pil2tensor(ret_image)) 51 | 52 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 53 | return (torch.cat(ret_images, dim=0),) 54 | 55 | NODE_CLASS_MAPPINGS = { 56 | "LayerColor: HSV": ColorCorrectHSV 57 | } 58 | 59 | NODE_DISPLAY_NAME_MAPPINGS = { 60 | "LayerColor: HSV": "LayerColor: HSV" 61 | } -------------------------------------------------------------------------------- /py/color_correct_LAB.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import log, tensor2pil, pil2tensor 3 | from .imagefunc import image_gray_offset, image_channel_merge, RGB2RGBA 4 | 5 | 6 | 7 | class ColorCorrectLAB: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'LAB' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "image": ("IMAGE", ), # 18 | "L": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), 19 | "A": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), 20 | "B": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), 21 | }, 22 | "optional": { 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("IMAGE",) 27 | RETURN_NAMES = ("image",) 28 | FUNCTION = 'color_correct_LAB' 29 | CATEGORY = '😺dzNodes/LayerColor' 30 | 31 | def color_correct_LAB(self, image, L, A, B): 32 | 33 | ret_images = [] 34 | 35 | for i in image: 36 | i = torch.unsqueeze(i, 0) 37 | __image = tensor2pil(i) 38 | _l, _a, _b = tensor2pil(i).convert('LAB').split() 39 | if L != 0 : 40 | _l = image_gray_offset(_l, L) 41 | if A != 0 : 42 | _a = image_gray_offset(_a, A) 43 | if B != 0 : 44 | _b = image_gray_offset(_b, B) 45 | ret_image = image_channel_merge((_l, _a, _b), 'LAB') 46 | 47 | if __image.mode == 'RGBA': 48 | ret_image = RGB2RGBA(ret_image, __image.split()[-1]) 49 | 50 | ret_images.append(pil2tensor(ret_image)) 51 | 52 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 53 | return (torch.cat(ret_images, dim=0),) 54 | 55 | NODE_CLASS_MAPPINGS = { 56 | "LayerColor: LAB": ColorCorrectLAB 57 | } 58 | 59 | NODE_DISPLAY_NAME_MAPPINGS = { 60 | "LayerColor: LAB": "LayerColor: LAB" 61 | } -------------------------------------------------------------------------------- /py/color_correct_LUTapply.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import log, tensor2pil, pil2tensor 3 | from .imagefunc import get_resource_dir, apply_lut, RGB2RGBA 4 | 5 | 6 | 7 | class ColorCorrectLUTapply: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'LUT Apply' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | (LUT_DICT, _) = get_resource_dir() 15 | LUT_LIST = list(LUT_DICT.keys()) 16 | 17 | color_space_list = ['linear', 'log'] 18 | 19 | return { 20 | "required": { 21 | "image": ("IMAGE", ), # 22 | "LUT": (LUT_LIST,), # LUT文件 23 | "color_space": (color_space_list,), 24 | "strength": ("INT", {"default": 100, "min": 0, "max": 100, "step": 1}), 25 | }, 26 | "optional": { 27 | } 28 | } 29 | 30 | RETURN_TYPES = ("IMAGE",) 31 | RETURN_NAMES = ("image",) 32 | FUNCTION = 'color_correct_LUTapply' 33 | CATEGORY = '😺dzNodes/LayerColor' 34 | 35 | def color_correct_LUTapply(self, image, LUT, color_space, strength): 36 | 37 | (LUT_DICT, _) = get_resource_dir() 38 | log(f"LUT_DICT={LUT_DICT}") 39 | ret_images = [] 40 | for i in image: 41 | i = torch.unsqueeze(i, 0) 42 | _image = tensor2pil(i) 43 | 44 | lut_file = LUT_DICT[LUT] 45 | ret_image = apply_lut(_image, lut_file=lut_file, colorspace=color_space, strength=strength) 46 | 47 | if _image.mode == 'RGBA': 48 | ret_image = RGB2RGBA(ret_image, _image.split()[-1]) 49 | ret_images.append(pil2tensor(ret_image)) 50 | 51 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 52 | return (torch.cat(ret_images, dim=0),) 53 | 54 | 55 | 56 | 57 | NODE_CLASS_MAPPINGS = { 58 | "LayerColor: LUT Apply": ColorCorrectLUTapply 59 | } 60 | 61 | NODE_DISPLAY_NAME_MAPPINGS = { 62 | "LayerColor: LUT Apply": "LayerColor: LUT Apply" 63 | } -------------------------------------------------------------------------------- /py/color_correct_RGB.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import log, tensor2pil, pil2tensor 3 | from .imagefunc import image_gray_offset, image_channel_merge, RGB2RGBA 4 | 5 | 6 | 7 | class ColorCorrectRGB: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'RGB' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "image": ("IMAGE", ), # 18 | "R": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), 19 | "G": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), 20 | "B": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), 21 | }, 22 | "optional": { 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("IMAGE",) 27 | RETURN_NAMES = ("image",) 28 | FUNCTION = 'color_correct_RGB' 29 | CATEGORY = '😺dzNodes/LayerColor' 30 | 31 | def color_correct_RGB(self, image, R, G, B): 32 | 33 | ret_images = [] 34 | 35 | for i in image: 36 | i = torch.unsqueeze(i,0) 37 | __image = tensor2pil(i) 38 | _r, _g, _b = tensor2pil(i).convert('RGB').split() 39 | if R != 0 : 40 | _r = image_gray_offset(_r, R) 41 | if G != 0 : 42 | _g = image_gray_offset(_g, G) 43 | if B != 0 : 44 | _b = image_gray_offset(_b, B) 45 | ret_image = image_channel_merge((_r, _g, _b), 'RGB') 46 | 47 | if __image.mode == 'RGBA': 48 | ret_image = RGB2RGBA(ret_image, __image.split()[-1]) 49 | 50 | ret_images.append(pil2tensor(ret_image)) 51 | 52 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 53 | return (torch.cat(ret_images, dim=0),) 54 | 55 | NODE_CLASS_MAPPINGS = { 56 | "LayerColor: RGB": ColorCorrectRGB 57 | } 58 | 59 | NODE_DISPLAY_NAME_MAPPINGS = { 60 | "LayerColor: RGB": "LayerColor: RGB" 61 | } -------------------------------------------------------------------------------- /py/color_correct_YUV.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import log, tensor2pil, pil2tensor 3 | from .imagefunc import image_gray_offset, image_channel_merge, RGB2RGBA 4 | 5 | 6 | 7 | class ColorCorrectYUV: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'YUV' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "image": ("IMAGE", ), # 18 | "Y": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), 19 | "U": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), 20 | "V": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}), 21 | }, 22 | "optional": { 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("IMAGE",) 27 | RETURN_NAMES = ("image",) 28 | FUNCTION = 'color_correct_YUV' 29 | CATEGORY = '😺dzNodes/LayerColor' 30 | 31 | def color_correct_YUV(self, image, Y, U, V): 32 | 33 | ret_images = [] 34 | 35 | for i in image: 36 | i = torch.unsqueeze(i, 0) 37 | __image = tensor2pil(i) 38 | _y, _u, _v = tensor2pil(i).convert('YCbCr').split() 39 | if Y != 0 : 40 | _y = image_gray_offset(_y, Y) 41 | if U != 0 : 42 | _u = image_gray_offset(_u, U) 43 | if V != 0 : 44 | _v = image_gray_offset(_v, V) 45 | ret_image = image_channel_merge((_y, _u, _v), 'YCbCr') 46 | 47 | if __image.mode == 'RGBA': 48 | ret_image = RGB2RGBA(ret_image, __image.split()[-1]) 49 | 50 | ret_images.append(pil2tensor(ret_image)) 51 | 52 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 53 | return (torch.cat(ret_images, dim=0),) 54 | 55 | NODE_CLASS_MAPPINGS = { 56 | "LayerColor: YUV": ColorCorrectYUV 57 | } 58 | 59 | NODE_DISPLAY_NAME_MAPPINGS = { 60 | "LayerColor: YUV": "LayerColor: YUV" 61 | } -------------------------------------------------------------------------------- /py/color_correct_auto_brightness.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor 4 | from .imagefunc import histogram_equalization, chop_image, image_channel_merge, image_gray_offset, RGB2RGBA 5 | 6 | 7 | 8 | class AutoBrightness: 9 | 10 | def __init__(self): 11 | self.NODE_NAME = 'AutoBrightness' 12 | 13 | 14 | @classmethod 15 | def INPUT_TYPES(self): 16 | 17 | return { 18 | "required": { 19 | "image": ("IMAGE", ), # 20 | "strength": ("INT", {"default": 75, "min": 0, "max": 100, "step": 1}), 21 | "saturation": ("INT", {"default": 8, "min": -255, "max": 255, "step": 1}), 22 | }, 23 | "optional": { 24 | "mask": ("MASK", ), 25 | } 26 | } 27 | 28 | RETURN_TYPES = ("IMAGE",) 29 | RETURN_NAMES = ("image",) 30 | FUNCTION = 'auto_brightness' 31 | CATEGORY = '😺dzNodes/LayerColor' 32 | 33 | def auto_brightness(self, image, strength, saturation, mask=None): 34 | 35 | l_images = [] 36 | l_masks = [] 37 | ret_images = [] 38 | 39 | for l in image: 40 | l_images.append(torch.unsqueeze(l, 0)) 41 | m = tensor2pil(l) 42 | if m.mode == 'RGBA': 43 | l_masks.append(m.split()[-1]) 44 | else: 45 | l_masks.append(Image.new('L', m.size, 'white')) 46 | if mask is not None: 47 | if mask.dim() == 2: 48 | mask = torch.unsqueeze(mask, 0) 49 | l_masks = [] 50 | for m in mask: 51 | l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L')) 52 | max_batch = max(len(l_images), len(l_masks)) 53 | for i in range(max_batch): 54 | _image = l_images[i] if i < len(l_images) else l_images[-1] 55 | _mask = l_masks[i] if i < len(l_masks) else l_masks[-1] 56 | orig_image = tensor2pil(_image) 57 | 58 | _l, _a, _b = orig_image.convert('LAB').split() 59 | _histogram = histogram_equalization(_l, _mask, gamma_strength=strength/100) 60 | _l = chop_image(_l, _histogram, 'normal', strength) 61 | ret_image = image_channel_merge((_l, _a, _b), 'LAB') 62 | if saturation != 0 : 63 | _h, _s, _v = ret_image.convert('HSV').split() 64 | _s = image_gray_offset(_s, saturation) 65 | ret_image = image_channel_merge((_h, _s, _v), 'HSV') 66 | 67 | if orig_image.mode == 'RGBA': 68 | ret_image = RGB2RGBA(ret_image, orig_image.split()[-1]) 69 | 70 | ret_images.append(pil2tensor(ret_image)) 71 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 72 | return (torch.cat(ret_images, dim=0),) 73 | 74 | NODE_CLASS_MAPPINGS = { 75 | "LayerColor: AutoBrightness": AutoBrightness 76 | } 77 | 78 | NODE_DISPLAY_NAME_MAPPINGS = { 79 | "LayerColor: AutoBrightness": "LayerColor: AutoBrightness" 80 | } -------------------------------------------------------------------------------- /py/color_correct_color_balance.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image, ImageEnhance 3 | from .imagefunc import log, tensor2pil, pil2tensor 4 | from .imagefunc import color_balance, RGB2RGBA 5 | 6 | 7 | 8 | 9 | class ColorBalance: 10 | 11 | def __init__(self): 12 | self.NODE_NAME = 'ColorBalance' 13 | 14 | 15 | @classmethod 16 | def INPUT_TYPES(self): 17 | 18 | return { 19 | "required": { 20 | "image": ("IMAGE", ), # 21 | "cyan_red": ("FLOAT", {"default": 0, "min": -1.0, "max": 1.0, "step": 0.001}), 22 | "magenta_green": ("FLOAT", {"default": 0, "min": -1.0, "max": 1.0, "step": 0.001}), 23 | "yellow_blue": ("FLOAT", {"default": 0, "min": -1.0, "max": 1.0, "step": 0.001}) 24 | }, 25 | "optional": { 26 | } 27 | } 28 | 29 | RETURN_TYPES = ("IMAGE",) 30 | RETURN_NAMES = ("image",) 31 | FUNCTION = 'color_balance' 32 | CATEGORY = '😺dzNodes/LayerColor' 33 | 34 | def color_balance(self, image, cyan_red, magenta_green, yellow_blue): 35 | 36 | l_images = [] 37 | l_masks = [] 38 | ret_images = [] 39 | 40 | for l in image: 41 | l_images.append(torch.unsqueeze(l, 0)) 42 | m = tensor2pil(l) 43 | if m.mode == 'RGBA': 44 | l_masks.append(m.split()[-1]) 45 | else: 46 | l_masks.append(Image.new('L', m.size, 'white')) 47 | 48 | 49 | for i in range(len(l_images)): 50 | _image = l_images[i] 51 | _mask = l_masks[i] 52 | orig_image = tensor2pil(_image) 53 | 54 | ret_image = color_balance(orig_image, 55 | [cyan_red, magenta_green, yellow_blue], 56 | [cyan_red, magenta_green, yellow_blue], 57 | [cyan_red, magenta_green, yellow_blue], 58 | shadow_center=0.15, 59 | midtone_center=0.5, 60 | midtone_max=1, 61 | preserve_luminosity=True) 62 | 63 | if orig_image.mode == 'RGBA': 64 | ret_image = RGB2RGBA(ret_image, orig_image.split()[-1]) 65 | 66 | ret_images.append(pil2tensor(ret_image)) 67 | 68 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 69 | return (torch.cat(ret_images, dim=0),) 70 | 71 | 72 | NODE_CLASS_MAPPINGS = { 73 | "LayerColor: ColorBalance": ColorBalance 74 | } 75 | 76 | NODE_DISPLAY_NAME_MAPPINGS = { 77 | "LayerColor: ColorBalance": "LayerColor: ColorBalance" 78 | } -------------------------------------------------------------------------------- /py/color_correct_color_temperature.py: -------------------------------------------------------------------------------- 1 | # Adapt from https://github.com/EllangoK/ComfyUI-post-processing-nodes/blob/master/post_processing/color_correct.py 2 | 3 | import torch 4 | import numpy as np 5 | from PIL import Image 6 | from .imagefunc import log 7 | 8 | 9 | class ColorTemperature: 10 | def __init__(self): 11 | self.NODE_NAME = 'ColorTemperature' 12 | @classmethod 13 | def INPUT_TYPES(s): 14 | return { 15 | "required": { 16 | "image": ("IMAGE",), 17 | "temperature": ("FLOAT", {"default": 0, "min": -100, "max": 100, "step": 1},), 18 | }, 19 | } 20 | 21 | RETURN_TYPES = ("IMAGE",) 22 | RETURN_NAMES = ("image",) 23 | FUNCTION = "color_temperature" 24 | CATEGORY = '😺dzNodes/LayerColor' 25 | 26 | def color_temperature(self, image, temperature, 27 | ): 28 | 29 | batch_size, height, width, _ = image.shape 30 | result = torch.zeros_like(image) 31 | 32 | temperature /= -100 33 | 34 | for b in range(batch_size): 35 | tensor_image = image[b].numpy() 36 | modified_image = Image.fromarray((tensor_image * 255).astype(np.uint8)) 37 | modified_image = np.array(modified_image).astype(np.float32) 38 | 39 | if temperature > 0: 40 | modified_image[:, :, 0] *= 1 + temperature 41 | modified_image[:, :, 1] *= 1 + temperature * 0.4 42 | elif temperature < 0: 43 | modified_image[:, :, 0] *= 1 + temperature * 0.2 44 | modified_image[:, :, 2] *= 1 - temperature 45 | 46 | modified_image = np.clip(modified_image, 0, 255) 47 | modified_image = modified_image.astype(np.uint8) 48 | modified_image = modified_image / 255 49 | modified_image = torch.from_numpy(modified_image).unsqueeze(0) 50 | result[b] = modified_image 51 | 52 | log(f"{self.NODE_NAME} Processed {len(result)} image(s).", message_type='finish') 53 | return (result,) 54 | 55 | NODE_CLASS_MAPPINGS = { 56 | "LayerColor: ColorTemperature": ColorTemperature 57 | } 58 | 59 | NODE_DISPLAY_NAME_MAPPINGS = { 60 | "LayerColor: ColorTemperature": "LayerColor: ColorTemperature" 61 | } -------------------------------------------------------------------------------- /py/color_correct_exposure.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from PIL import Image, ImageEnhance 4 | from .imagefunc import log, tensor2pil, pil2tensor 5 | from .imagefunc import RGB2RGBA 6 | 7 | 8 | 9 | class ColorCorrectExposure: 10 | 11 | def __init__(self): 12 | self.NODE_NAME = 'Exposure' 13 | pass 14 | 15 | @classmethod 16 | def INPUT_TYPES(self): 17 | 18 | return { 19 | "required": { 20 | "image": ("IMAGE", ), # 21 | "exposure": ("INT", {"default": 20, "min": -100, "max": 100, "step": 1}), 22 | }, 23 | "optional": { 24 | } 25 | } 26 | 27 | RETURN_TYPES = ("IMAGE",) 28 | RETURN_NAMES = ("image",) 29 | FUNCTION = 'color_correct_exposure' 30 | CATEGORY = '😺dzNodes/LayerColor' 31 | 32 | def color_correct_exposure(self, image, exposure): 33 | 34 | ret_images = [] 35 | 36 | for i in image: 37 | i = torch.unsqueeze(i, 0) 38 | __image = tensor2pil(i) 39 | t = i.detach().clone().cpu().numpy().astype(np.float32) 40 | more = t[:, :, :, :3] > 0 41 | t[:, :, :, :3][more] *= pow(2, exposure / 32) 42 | if exposure < 0: 43 | bp = -exposure / 250 44 | scale = 1 / (1 - bp) 45 | t = np.clip((t - bp) * scale, 0.0, 1.0) 46 | ret_image = tensor2pil(torch.from_numpy(t)) 47 | 48 | if __image.mode == 'RGBA': 49 | ret_image = RGB2RGBA(ret_image, __image.split()[-1]) 50 | 51 | ret_images.append(pil2tensor(ret_image)) 52 | 53 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 54 | return (torch.cat(ret_images, dim=0),) 55 | 56 | 57 | NODE_CLASS_MAPPINGS = { 58 | "LayerColor: Exposure": ColorCorrectExposure 59 | } 60 | 61 | NODE_DISPLAY_NAME_MAPPINGS = { 62 | "LayerColor: Exposure": "LayerColor: Exposure" 63 | } -------------------------------------------------------------------------------- /py/color_correct_gamma.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import log, tensor2pil, pil2tensor 3 | from .imagefunc import gamma_trans, RGB2RGBA 4 | 5 | 6 | 7 | class ColorCorrectGamma: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'Gamma' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "image": ("IMAGE", ), 18 | "gamma": ("FLOAT", {"default": 1, "min": 0.1, "max": 10, "step": 0.01}), 19 | }, 20 | "optional": { 21 | } 22 | } 23 | 24 | RETURN_TYPES = ("IMAGE",) 25 | RETURN_NAMES = ("image",) 26 | FUNCTION = 'color_correct_gamma' 27 | CATEGORY = '😺dzNodes/LayerColor' 28 | 29 | def color_correct_gamma(self, image, gamma): 30 | 31 | ret_images = [] 32 | 33 | for i in image: 34 | i = torch.unsqueeze(i, 0) 35 | __image = tensor2pil(i) 36 | ret_image = gamma_trans(tensor2pil(i), gamma) 37 | 38 | if __image.mode == 'RGBA': 39 | ret_image = RGB2RGBA(ret_image, __image.split()[-1]) 40 | 41 | ret_images.append(pil2tensor(ret_image)) 42 | 43 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 44 | return (torch.cat(ret_images, dim=0),) 45 | 46 | NODE_CLASS_MAPPINGS = { 47 | "LayerColor: Gamma": ColorCorrectGamma 48 | } 49 | 50 | NODE_DISPLAY_NAME_MAPPINGS = { 51 | "LayerColor: Gamma": "LayerColor: Gamma" 52 | } -------------------------------------------------------------------------------- /py/color_image.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | from .imagefunc import log, pil2tensor 3 | 4 | 5 | class ColorImage: 6 | 7 | def __init__(self): 8 | pass 9 | 10 | @classmethod 11 | def INPUT_TYPES(self): 12 | 13 | return { 14 | "required": { 15 | "width": ("INT", {"default": 512, "min": 4, "max": 99999, "step": 1}), 16 | "height": ("INT", {"default": 512, "min": 4, "max": 99999, "step": 1}), 17 | "color": ("STRING", {"default": "#000000"},), 18 | }, 19 | "optional": { 20 | } 21 | } 22 | 23 | RETURN_TYPES = ("IMAGE", ) 24 | RETURN_NAMES = ("image", ) 25 | FUNCTION = 'color_image' 26 | CATEGORY = '😺dzNodes/LayerUtility' 27 | 28 | def color_image(self, width, height, color, ): 29 | 30 | ret_image = Image.new('RGB', (width, height), color=color) 31 | return (pil2tensor(ret_image), ) 32 | 33 | NODE_CLASS_MAPPINGS = { 34 | "LayerUtility: ColorImage": ColorImage 35 | } 36 | 37 | NODE_DISPLAY_NAME_MAPPINGS = { 38 | "LayerUtility: ColorImage": "LayerUtility: ColorImage" 39 | } -------------------------------------------------------------------------------- /py/color_image_v2.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | from .imagefunc import log, tensor2pil, pil2tensor, AnyType, load_custom_size 3 | 4 | 5 | 6 | any = AnyType("*") 7 | 8 | class ColorImageV2: 9 | 10 | def __init__(self): 11 | self.NODE_NAME = 'ColorImage V2' 12 | 13 | @classmethod 14 | def INPUT_TYPES(self): 15 | size_list = ['custom'] 16 | size_list.extend(load_custom_size()) 17 | return { 18 | "required": { 19 | "size": (size_list,), 20 | "custom_width": ("INT", {"default": 512, "min": 4, "max": 99999, "step": 1}), 21 | "custom_height": ("INT", {"default": 512, "min": 4, "max": 99999, "step": 1}), 22 | "color": ("STRING", {"default": "#000000"},), 23 | }, 24 | "optional": { 25 | "size_as": (any, {}), 26 | } 27 | } 28 | 29 | RETURN_TYPES = ("IMAGE", ) 30 | RETURN_NAMES = ("image", ) 31 | FUNCTION = 'color_image_v2' 32 | CATEGORY = '😺dzNodes/LayerUtility' 33 | 34 | def color_image_v2(self, size, custom_width, custom_height, color, size_as=None ): 35 | 36 | if size_as is not None: 37 | if size_as.shape[0] > 0: 38 | _asimage = tensor2pil(size_as[0]) 39 | else: 40 | _asimage = tensor2pil(size_as) 41 | width, height = _asimage.size 42 | else: 43 | if size == 'custom': 44 | width = custom_width 45 | height = custom_height 46 | else: 47 | try: 48 | _s = size.split('x') 49 | width = int(_s[0].strip()) 50 | height = int(_s[1].strip()) 51 | except Exception as e: 52 | log(f'Warning: {self.NODE_NAME} invalid size, check "custom_size.ini"', message_type='warning') 53 | width = custom_width 54 | height = custom_height 55 | 56 | ret_image = Image.new('RGB', (width, height), color=color) 57 | return (pil2tensor(ret_image), ) 58 | 59 | NODE_CLASS_MAPPINGS = { 60 | "LayerUtility: ColorImage V2": ColorImageV2 61 | } 62 | 63 | NODE_DISPLAY_NAME_MAPPINGS = { 64 | "LayerUtility: ColorImage V2": "LayerUtility: ColorImage V2" 65 | } -------------------------------------------------------------------------------- /py/color_map.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import log, tensor2pil, pil2tensor, chop_image 3 | from .imagefunc import image_to_colormap 4 | 5 | 6 | 7 | colormap_list = ['autumn', 'bone', 'jet', 'winter', 'rainbow', 'ocean', 8 | 'summer', 'sprint', 'cool', 'HSV', 'pink', 'hot', 9 | 'parula', 'magma', 'inferno', 'plasma', 'viridis', 'cividis', 10 | 'twilight', 'twilight_shifted', 'turbo', 'deepgreen'] 11 | 12 | class ColorMap: 13 | 14 | def __init__(self): 15 | self.NODE_NAME = 'ColorMap' 16 | 17 | @classmethod 18 | def INPUT_TYPES(self): 19 | 20 | return { 21 | "required": { 22 | "image": ("IMAGE", ), 23 | "color_map": (colormap_list,), 24 | "opacity": ("INT", {"default": 100, "min": 0, "max": 100, "step": 1}), # 透明度 25 | }, 26 | "optional": { 27 | } 28 | } 29 | 30 | RETURN_TYPES = ("IMAGE",) 31 | RETURN_NAMES = ("image",) 32 | FUNCTION = 'color_map' 33 | CATEGORY = '😺dzNodes/LayerFilter' 34 | 35 | def color_map(self, image, color_map, opacity 36 | ): 37 | 38 | ret_images = [] 39 | 40 | for i in image: 41 | i = torch.unsqueeze(i, 0) 42 | _canvas = tensor2pil(i) 43 | _image = image_to_colormap(_canvas, colormap_list.index(color_map)) 44 | ret_image = chop_image(_canvas, _image, 'normal', opacity) 45 | 46 | ret_images.append(pil2tensor(ret_image)) 47 | 48 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 49 | return (torch.cat(ret_images, dim=0),) 50 | 51 | NODE_CLASS_MAPPINGS = { 52 | "LayerFilter: ColorMap": ColorMap 53 | } 54 | 55 | NODE_DISPLAY_NAME_MAPPINGS = { 56 | "LayerFilter: ColorMap": "LayerFilter: ColorMap" 57 | } -------------------------------------------------------------------------------- /py/color_picker.py: -------------------------------------------------------------------------------- 1 | 2 | from .imagefunc import Hex_to_RGB 3 | 4 | 5 | class ColorPicker: 6 | 7 | def __init__(self): 8 | pass 9 | 10 | @classmethod 11 | def INPUT_TYPES(self): 12 | mode_list = ['HEX', 'DEC'] 13 | return { 14 | "required": { 15 | "color": ("COLOR", {"default": "#FFFFFF"},), 16 | "mode": (mode_list,), # 输出模式 17 | }, 18 | "optional": { 19 | } 20 | } 21 | 22 | RETURN_TYPES = ("STRING",) 23 | RETURN_NAMES = ("value",) 24 | FUNCTION = 'picker' 25 | CATEGORY = '😺dzNodes/LayerUtility' 26 | 27 | def picker(self, color, mode): 28 | ret = color 29 | if mode == 'DEC': 30 | ret = Hex_to_RGB(ret) 31 | return (ret,) 32 | 33 | 34 | NODE_CLASS_MAPPINGS = { 35 | "LayerUtility: ColorPicker": ColorPicker 36 | } 37 | 38 | NODE_DISPLAY_NAME_MAPPINGS = { 39 | "LayerUtility: ColorPicker": "LayerUtility: ColorPicker" 40 | } -------------------------------------------------------------------------------- /py/color_to_HSVvalue.py: -------------------------------------------------------------------------------- 1 | from .imagefunc import AnyType, Hex_to_HSV_255level, log 2 | 3 | any = AnyType("*") 4 | 5 | class ColorValuetoHSVValue: 6 | 7 | def __init__(self): 8 | self.NODE_NAME = 'HSV Value' 9 | 10 | @classmethod 11 | def INPUT_TYPES(self): 12 | 13 | return { 14 | "required": { 15 | "color_value": (any, {}), 16 | }, 17 | "optional": { 18 | } 19 | } 20 | 21 | RETURN_TYPES = ("INT", "INT", "INT") 22 | RETURN_NAMES = ("H", "S", "V") 23 | FUNCTION = 'color_value_to_hsv_value' 24 | CATEGORY = '😺dzNodes/LayerUtility/Data' 25 | 26 | def color_value_to_hsv_value(self, color_value,): 27 | H, S, V = 0, 0, 0 28 | if isinstance(color_value, str): 29 | H, S, V = Hex_to_HSV_255level(color_value) 30 | elif isinstance(color_value, tuple): 31 | H, S, V = Hex_to_HSV_255level(RGB_to_Hex(color_value)) 32 | else: 33 | log(f"{self.NODE_NAME}: color_value input type must be tuple or string.", message_type="error") 34 | 35 | return (H, S, V,) 36 | 37 | NODE_CLASS_MAPPINGS = { 38 | "LayerUtility: HSV Value": ColorValuetoHSVValue 39 | } 40 | 41 | NODE_DISPLAY_NAME_MAPPINGS = { 42 | "LayerUtility: HSV Value": "LayerUtility: HSV Value" 43 | } -------------------------------------------------------------------------------- /py/color_to_RGBvalue.py: -------------------------------------------------------------------------------- 1 | from .imagefunc import AnyType, Hex_to_RGB, log 2 | 3 | 4 | any = AnyType("*") 5 | 6 | class ColorValuetoRGBValue: 7 | 8 | def __init__(self): 9 | self.NODE_NAME = 'RGB Value' 10 | 11 | @classmethod 12 | def INPUT_TYPES(self): 13 | 14 | return { 15 | "required": { 16 | "color_value": (any, {}), 17 | }, 18 | "optional": { 19 | } 20 | } 21 | 22 | RETURN_TYPES = ("INT", "INT", "INT") 23 | RETURN_NAMES = ("R", "G", "B") 24 | FUNCTION = 'color_value_to_rgb_value' 25 | CATEGORY = '😺dzNodes/LayerUtility/Data' 26 | 27 | def color_value_to_rgb_value(self, color_value,): 28 | R, G, B = 0, 0, 0 29 | if isinstance(color_value, str): 30 | color = Hex_to_RGB(color_value) 31 | R, G, B = color[0], color[1], color[2] 32 | elif isinstance(color_value, tuple): 33 | R, G, B = color_value[0], color_value[1], color_value[2] 34 | else: 35 | log(f"{self.NODE_NAME}: color_value input type must be tuple or string.", message_type="error") 36 | 37 | return (R, G, B,) 38 | 39 | NODE_CLASS_MAPPINGS = { 40 | "LayerUtility: RGB Value": ColorValuetoRGBValue 41 | } 42 | 43 | NODE_DISPLAY_NAME_MAPPINGS = { 44 | "LayerUtility: RGB Value": "LayerUtility: RGB Value" 45 | } -------------------------------------------------------------------------------- /py/color_to_gray_value.py: -------------------------------------------------------------------------------- 1 | from .imagefunc import AnyType, rgb2gray 2 | 3 | 4 | any = AnyType("*") 5 | 6 | class ColorValuetoGrayValue: 7 | 8 | def __init__(self): 9 | self.NODE_NAME = 'Gray Value' 10 | 11 | @classmethod 12 | def INPUT_TYPES(self): 13 | 14 | return { 15 | "required": { 16 | "color_value": (any, {}), 17 | }, 18 | "optional": { 19 | } 20 | } 21 | 22 | RETURN_TYPES = ("INT", "INT",) 23 | RETURN_NAMES = ("gray(256_level)", "gray(100_level)",) 24 | FUNCTION = 'color_value_to_gray_value' 25 | CATEGORY = '😺dzNodes/LayerUtility/Data' 26 | 27 | def color_value_to_gray_value(self, color_value,): 28 | gray = rgb2gray(color_value) 29 | return (gray, int(gray / 2.55),) 30 | 31 | NODE_CLASS_MAPPINGS = { 32 | "LayerUtility: GrayValue": ColorValuetoGrayValue 33 | } 34 | 35 | NODE_DISPLAY_NAME_MAPPINGS = { 36 | "LayerUtility: GrayValue": "LayerUtility: Gray Value" 37 | } -------------------------------------------------------------------------------- /py/crop_box_resolve.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | class CropBoxResolve: 5 | 6 | def __init__(self): 7 | self.NODE_NAME = 'CropBoxResolve' 8 | 9 | @classmethod 10 | def INPUT_TYPES(self): 11 | 12 | return { 13 | "required": { 14 | "crop_box": ("BOX",), 15 | }, 16 | "optional": { 17 | } 18 | } 19 | 20 | RETURN_TYPES = ("INT", "INT", "INT", "INT") 21 | RETURN_NAMES = ("x", "y", "width", "height") 22 | FUNCTION = 'crop_box_resolve' 23 | CATEGORY = '😺dzNodes/LayerUtility' 24 | 25 | def crop_box_resolve(self, crop_box 26 | ): 27 | 28 | (x1, y1, x2, y2) = crop_box 29 | x1 = int(x1) 30 | y1 = int(y1) 31 | x2 = int(x2) 32 | y2 = int(y2) 33 | 34 | return (x1, y1, x2 - x1, y2 - y1,) 35 | 36 | 37 | NODE_CLASS_MAPPINGS = { 38 | "LayerUtility: CropBoxResolve": CropBoxResolve 39 | } 40 | 41 | NODE_DISPLAY_NAME_MAPPINGS = { 42 | "LayerUtility: CropBoxResolve": "LayerUtility: CropBoxResolve" 43 | } -------------------------------------------------------------------------------- /py/extend_canvas.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor, image2mask 4 | 5 | 6 | class ExtendCanvas: 7 | 8 | def __init__(self): 9 | self.NODE_NAME = 'ExtendCanvas' 10 | 11 | @classmethod 12 | def INPUT_TYPES(self): 13 | 14 | return { 15 | "required": { 16 | "image": ("IMAGE", ), 17 | "invert_mask": ("BOOLEAN", {"default": True}), # 反转mask 18 | "top": ("INT", {"default": 0, "min": 0, "max": 99999, "step": 1}), 19 | "bottom": ("INT", {"default": 0, "min": 0, "max": 99999, "step": 1}), 20 | "left": ("INT", {"default": 0, "min": 0, "max": 99999, "step": 1}), 21 | "right": ("INT", {"default": 0, "min": 0, "max": 99999, "step": 1}), 22 | "color": ("COLOR", {"default": "#000000"},), 23 | }, 24 | "optional": { 25 | "mask": ("MASK",), # 26 | } 27 | } 28 | 29 | RETURN_TYPES = ("IMAGE", "MASK",) 30 | RETURN_NAMES = ("image", "mask") 31 | FUNCTION = 'extend_canvas' 32 | CATEGORY = '😺dzNodes/LayerUtility' 33 | 34 | def extend_canvas(self, image, invert_mask, 35 | top, bottom, left, right, color, 36 | mask=None, 37 | ): 38 | 39 | l_images = [] 40 | l_masks = [] 41 | ret_images = [] 42 | ret_masks = [] 43 | 44 | for l in image: 45 | l_images.append(torch.unsqueeze(l, 0)) 46 | m = tensor2pil(l) 47 | if m.mode == 'RGBA': 48 | l_masks.append(m.split()[-1]) 49 | 50 | if mask is not None: 51 | if mask.dim() == 2: 52 | mask = torch.unsqueeze(mask, 0) 53 | l_masks = [] 54 | for m in mask: 55 | if invert_mask: 56 | m = 1 - m 57 | l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L')) 58 | else: 59 | if len(l_masks) == 0: 60 | l_masks.append(Image.new('L', size=tensor2pil(l_images[0]).size, color='white')) 61 | 62 | max_batch = max(len(l_images), len(l_masks)) 63 | for i in range(max_batch): 64 | 65 | _image = l_images[i] if i < len(l_images) else l_images[-1] 66 | _image = tensor2pil(_image).convert('RGB') 67 | _mask = l_masks[i] if i < len(l_masks) else l_masks[-1] 68 | 69 | width = _image.width + left + right 70 | height = _image.height + top + bottom 71 | _canvas = Image.new('RGB', (width, height), color) 72 | _mask_canvas = Image.new('L', (width, height), "black") 73 | 74 | _canvas.paste(_image, box=(left,top)) 75 | _mask_canvas.paste(_mask.convert('L'), box=(left, top)) 76 | 77 | ret_images.append(pil2tensor(_canvas)) 78 | ret_masks.append(image2mask(_mask_canvas)) 79 | 80 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 81 | return (torch.cat(ret_images, dim=0), torch.cat(ret_masks, dim=0),) 82 | 83 | 84 | NODE_CLASS_MAPPINGS = { 85 | "LayerUtility: ExtendCanvas": ExtendCanvas 86 | } 87 | 88 | NODE_DISPLAY_NAME_MAPPINGS = { 89 | "LayerUtility: ExtendCanvas": "LayerUtility: ExtendCanvas" 90 | } -------------------------------------------------------------------------------- /py/filmgrainer/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.0.2" -------------------------------------------------------------------------------- /py/filmgrainer/graingen.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import random 3 | import numpy as np 4 | 5 | def _makeGrayNoise(width, height, power): 6 | buffer = np.zeros([height, width], dtype=int) 7 | 8 | for y in range(0, height): 9 | for x in range(0, width): 10 | buffer[y, x] = random.gauss(128, power) 11 | buffer = buffer.clip(0, 255) 12 | return Image.fromarray(buffer.astype(dtype=np.uint8)) 13 | 14 | def _makeRgbNoise(width, height, power, saturation): 15 | buffer = np.zeros([height, width, 3], dtype=int) 16 | intens_power = power * (1.0 - saturation) 17 | for y in range(0, height): 18 | for x in range(0, width): 19 | intens = random.gauss(128, intens_power) 20 | buffer[y, x, 0] = random.gauss(0, power) * saturation + intens 21 | buffer[y, x, 1] = random.gauss(0, power) * saturation + intens 22 | buffer[y, x, 2] = random.gauss(0, power) * saturation + intens 23 | 24 | buffer = buffer.clip(0, 255) 25 | return Image.fromarray(buffer.astype(dtype=np.uint8)) 26 | 27 | 28 | def grainGen(width, height, grain_size, power, saturation, seed = 1): 29 | # A grain_size of 1 means the noise buffer will be made 1:1 30 | # A grain_size of 2 means the noise buffer will be resampled 1:2 31 | noise_width = int(width / grain_size) 32 | noise_height = int(height / grain_size) 33 | random.seed(seed) 34 | 35 | if saturation < 0.0: 36 | print("Making B/W grain, width: %d, height: %d, grain-size: %s, power: %s, seed: %d" % ( 37 | noise_width, noise_height, str(grain_size), str(power), seed)) 38 | img = _makeGrayNoise(noise_width, noise_height, power) 39 | else: 40 | print("Making RGB grain, width: %d, height: %d, saturation: %s, grain-size: %s, power: %s, seed: %d" % ( 41 | noise_width, noise_height, str(saturation), str(grain_size), str(power), seed)) 42 | img = _makeRgbNoise(noise_width, noise_height, power, saturation) 43 | 44 | # Resample 45 | if grain_size != 1.0: 46 | img = img.resize((width, height), resample = Image.LANCZOS) 47 | 48 | return img 49 | 50 | 51 | if __name__ == "__main__": 52 | import sys 53 | if len(sys.argv) == 8: 54 | width = int(sys.argv[2]) 55 | height = int(sys.argv[3]) 56 | grain_size = float(sys.argv[4]) 57 | power = float(sys.argv[5]) 58 | sat = float(sys.argv[6]) 59 | seed = int(sys.argv[7]) 60 | out = grainGen(width, height, grain_size, power, sat, seed) 61 | out.save(sys.argv[1]) -------------------------------------------------------------------------------- /py/filmgrainer/processing.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | def generate_blurred_images(image, blur_strength, steps, focus_spread=1): 5 | blurred_images = [] 6 | for step in range(1, steps + 1): 7 | # Adjust the curve based on the curve_weight 8 | blur_factor = (step / steps) ** focus_spread * blur_strength 9 | blur_size = max(1, int(blur_factor)) 10 | blur_size = blur_size if blur_size % 2 == 1 else blur_size + 1 # Ensure blur_size is odd 11 | 12 | # Apply Gaussian Blur 13 | blurred_image = cv2.GaussianBlur(image, (blur_size, blur_size), 0) 14 | blurred_images.append(blurred_image) 15 | return blurred_images 16 | 17 | def apply_blurred_images(image, blurred_images, mask): 18 | steps = len(blurred_images) # Calculate the number of steps based on the blurred images provided 19 | final_image = np.zeros_like(image) 20 | step_size = 1.0 / steps 21 | for i, blurred_image in enumerate(blurred_images): 22 | # Calculate the mask for the current step 23 | current_mask = np.clip((mask - i * step_size) * steps, 0, 1) 24 | next_mask = np.clip((mask - (i + 1) * step_size) * steps, 0, 1) 25 | blend_mask = current_mask - next_mask 26 | 27 | # Apply the blend mask 28 | final_image += blend_mask[:, :, np.newaxis] * blurred_image 29 | 30 | # Ensure no division by zero; add the original image for areas without blurring 31 | final_image += (1 - np.clip(mask * steps, 0, 1))[:, :, np.newaxis] * image 32 | return final_image -------------------------------------------------------------------------------- /py/gaussian_blur.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import log, tensor2pil, pil2tensor, gaussian_blur 3 | 4 | 5 | 6 | class GaussianBlur: 7 | 8 | def __init__(self): 9 | self.NODE_NAME = 'GaussianBlur' 10 | 11 | @classmethod 12 | def INPUT_TYPES(self): 13 | 14 | return { 15 | "required": { 16 | "image": ("IMAGE", ), # 17 | "blur": ("INT", {"default": 20, "min": 1, "max": 999, "step": 1}), # 模糊 18 | }, 19 | "optional": { 20 | } 21 | } 22 | 23 | RETURN_TYPES = ("IMAGE",) 24 | RETURN_NAMES = ("image",) 25 | FUNCTION = 'gaussian_blur' 26 | CATEGORY = '😺dzNodes/LayerFilter' 27 | 28 | def gaussian_blur(self, image, blur): 29 | 30 | ret_images = [] 31 | 32 | for i in image: 33 | _canvas = tensor2pil(torch.unsqueeze(i, 0)).convert('RGB') 34 | 35 | ret_images.append(pil2tensor(gaussian_blur(_canvas, blur))) 36 | 37 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 38 | return (torch.cat(ret_images, dim=0),) 39 | 40 | 41 | class LS_GaussianBlurV2: 42 | 43 | def __init__(self): 44 | self.NODE_NAME = 'GaussianBlurV2' 45 | 46 | @classmethod 47 | def INPUT_TYPES(self): 48 | 49 | return { 50 | "required": { 51 | "image": ("IMAGE", ), # 52 | "blur": ("FLOAT", {"default": 20, "min": 0, "max": 1000, "step": 0.05}), # 模糊 53 | }, 54 | "optional": { 55 | } 56 | } 57 | 58 | RETURN_TYPES = ("IMAGE",) 59 | RETURN_NAMES = ("image",) 60 | FUNCTION = 'gaussian_blur_v2' 61 | CATEGORY = '😺dzNodes/LayerFilter' 62 | 63 | def gaussian_blur_v2(self, image, blur): 64 | 65 | ret_images = [] 66 | 67 | if blur: 68 | for i in image: 69 | _canvas = tensor2pil(torch.unsqueeze(i, 0)).convert('RGB') 70 | 71 | ret_images.append(pil2tensor(gaussian_blur(_canvas, blur))) 72 | else: 73 | return (image,) 74 | 75 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 76 | return (torch.cat(ret_images, dim=0),) 77 | 78 | NODE_CLASS_MAPPINGS = { 79 | "LayerFilter: GaussianBlur": GaussianBlur, 80 | "LayerFilter: GaussianBlurV2": LS_GaussianBlurV2 81 | } 82 | 83 | NODE_DISPLAY_NAME_MAPPINGS = { 84 | "LayerFilter: GaussianBlur": "LayerFilter: GaussianBlur", 85 | "LayerFilter: GaussianBlurV2": "LayerFilter: Gaussian Blur V2" 86 | } -------------------------------------------------------------------------------- /py/get_image_size.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import tensor2pil 3 | 4 | class GetImageSize: 5 | 6 | def __init__(self): 7 | self.NODE_NAME = 'GetImageSize' 8 | 9 | @classmethod 10 | def INPUT_TYPES(self): 11 | 12 | return { 13 | "required": { 14 | "image": ("IMAGE", ), 15 | }, 16 | "optional": { 17 | } 18 | } 19 | 20 | RETURN_TYPES = ("INT", "INT", "BOX") 21 | RETURN_NAMES = ("width", "height", "original_size") 22 | FUNCTION = 'get_image_size' 23 | CATEGORY = '😺dzNodes/LayerUtility' 24 | 25 | def get_image_size(self, image,): 26 | 27 | if image.shape[0] > 0: 28 | image = torch.unsqueeze(image[0], 0) 29 | _image = tensor2pil(image) 30 | 31 | return (_image.width, _image.height, [_image.width, _image.height],) 32 | 33 | NODE_CLASS_MAPPINGS = { 34 | "LayerUtility: GetImageSize": GetImageSize 35 | } 36 | 37 | NODE_DISPLAY_NAME_MAPPINGS = { 38 | "LayerUtility: GetImageSize": "LayerUtility: GetImageSize" 39 | } -------------------------------------------------------------------------------- /py/gradient_image.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import gradient, pil2tensor 3 | 4 | class GradientImage: 5 | 6 | def __init__(self): 7 | self.NODE_NAME = 'GradientImage' 8 | 9 | @classmethod 10 | def INPUT_TYPES(self): 11 | 12 | return { 13 | "required": { 14 | "width": ("INT", {"default": 512, "min": 4, "max": 99999, "step": 1}), 15 | "height": ("INT", {"default": 512, "min": 4, "max": 99999, "step": 1}), 16 | "angle": ("INT", {"default": 0, "min": -360, "max": 360, "step": 1}), 17 | "start_color": ("STRING", {"default": "#FFFFFF"},), 18 | "end_color": ("STRING", {"default": "#000000"},), 19 | }, 20 | "optional": { 21 | } 22 | } 23 | 24 | RETURN_TYPES = ("IMAGE", ) 25 | RETURN_NAMES = ("image", ) 26 | FUNCTION = 'gradient_image' 27 | CATEGORY = '😺dzNodes/LayerUtility' 28 | 29 | def gradient_image(self, width, height, angle, start_color, end_color, ): 30 | 31 | ret_image = gradient(start_color, end_color, width, height, angle) 32 | 33 | return (pil2tensor(ret_image), ) 34 | 35 | NODE_CLASS_MAPPINGS = { 36 | "LayerUtility: GradientImage": GradientImage 37 | } 38 | 39 | NODE_DISPLAY_NAME_MAPPINGS = { 40 | "LayerUtility: GradientImage": "LayerUtility: GradientImage" 41 | } -------------------------------------------------------------------------------- /py/gradient_image_v2.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import log, AnyType, gradient, pil2tensor, tensor2pil, load_custom_size 3 | 4 | 5 | 6 | 7 | any = AnyType("*") 8 | 9 | 10 | class GradientImageV2: 11 | 12 | def __init__(self): 13 | self.NODE_NAME = 'GradientImage V2' 14 | 15 | @classmethod 16 | def INPUT_TYPES(self): 17 | size_list = ['custom'] 18 | size_list.extend(load_custom_size()) 19 | return { 20 | "required": { 21 | "size": (size_list,), 22 | "custom_width": ("INT", {"default": 512, "min": 4, "max": 99999, "step": 1}), 23 | "custom_height": ("INT", {"default": 512, "min": 4, "max": 99999, "step": 1}), 24 | "angle": ("INT", {"default": 0, "min": -360, "max": 360, "step": 1}), 25 | "start_color": ("STRING", {"default": "#FFFFFF"},), 26 | "end_color": ("STRING", {"default": "#000000"},), 27 | }, 28 | "optional": { 29 | "size_as": (any, {}), 30 | } 31 | } 32 | 33 | RETURN_TYPES = ("IMAGE", ) 34 | RETURN_NAMES = ("image", ) 35 | FUNCTION = 'gradient_image_v2' 36 | CATEGORY = '😺dzNodes/LayerUtility' 37 | 38 | def gradient_image_v2(self, size, custom_width, custom_height, angle, start_color, end_color, size_as=None): 39 | 40 | if size_as is not None: 41 | if size_as.shape[0] > 0: 42 | _asimage = tensor2pil(size_as[0]) 43 | else: 44 | _asimage = tensor2pil(size_as) 45 | width, height = _asimage.size 46 | else: 47 | if size == 'custom': 48 | width = custom_width 49 | height = custom_height 50 | else: 51 | try: 52 | _s = size.split('x') 53 | width = int(_s[0].strip()) 54 | height = int(_s[1].strip()) 55 | except Exception as e: 56 | log(f'Warning: {self.NODE_NAME} invalid size, check "custom_size.ini"', message_type='warning') 57 | width = custom_width 58 | height = custom_height 59 | 60 | 61 | ret_image = gradient(start_color, end_color, width, height, angle) 62 | 63 | return (pil2tensor(ret_image), ) 64 | 65 | NODE_CLASS_MAPPINGS = { 66 | "LayerUtility: GradientImage V2": GradientImageV2 67 | } 68 | 69 | NODE_DISPLAY_NAME_MAPPINGS = { 70 | "LayerUtility: GradientImage V2": "LayerUtility: GradientImage V2" 71 | } -------------------------------------------------------------------------------- /py/image_blend.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, pil2tensor, tensor2pil, image2mask, mask2image, chop_image, chop_mode 4 | 5 | 6 | 7 | class ImageBlend: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'ImageBlend' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "background_image": ("IMAGE", ), # 18 | "layer_image": ("IMAGE",), # 19 | "invert_mask": ("BOOLEAN", {"default": True}), # 反转mask 20 | "blend_mode": (chop_mode,), # 混合模式 21 | "opacity": ("INT", {"default": 100, "min": 0, "max": 100, "step": 1}), # 透明度 22 | }, 23 | "optional": { 24 | "layer_mask": ("MASK",), # 25 | } 26 | } 27 | 28 | RETURN_TYPES = ("IMAGE",) 29 | RETURN_NAMES = ("image",) 30 | FUNCTION = 'image_blend' 31 | CATEGORY = '😺dzNodes/LayerUtility' 32 | 33 | def image_blend(self, background_image, layer_image, 34 | invert_mask, blend_mode, opacity, 35 | layer_mask=None 36 | ): 37 | 38 | b_images = [] 39 | l_images = [] 40 | l_masks = [] 41 | ret_images = [] 42 | for b in background_image: 43 | b_images.append(torch.unsqueeze(b, 0)) 44 | for l in layer_image: 45 | l_images.append(torch.unsqueeze(l, 0)) 46 | m = tensor2pil(l) 47 | if m.mode == 'RGBA': 48 | l_masks.append(m.split()[-1]) 49 | else: 50 | l_masks.append(Image.new('L', m.size, 'white')) 51 | if layer_mask is not None: 52 | if layer_mask.dim() == 2: 53 | layer_mask = torch.unsqueeze(layer_mask, 0) 54 | l_masks = [] 55 | for m in layer_mask: 56 | if invert_mask: 57 | m = 1 - m 58 | l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L')) 59 | max_batch = max(len(b_images), len(l_images), len(l_masks)) 60 | for i in range(max_batch): 61 | background_image = b_images[i] if i < len(b_images) else b_images[-1] 62 | layer_image = l_images[i] if i < len(l_images) else l_images[-1] 63 | _mask = l_masks[i] if i < len(l_masks) else l_masks[-1] 64 | 65 | _canvas = tensor2pil(background_image).convert('RGB') 66 | _layer = tensor2pil(layer_image).convert('RGB') 67 | 68 | if _mask.size != _layer.size: 69 | _mask = Image.new('L', _layer.size, 'white') 70 | log(f"Warning: {self.NODE_NAME} mask mismatch, dropped!", message_type='warning') 71 | 72 | # 合成layer 73 | _comp = chop_image(_canvas, _layer, blend_mode, opacity) 74 | _canvas.paste(_comp, mask=_mask) 75 | 76 | ret_images.append(pil2tensor(_canvas)) 77 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 78 | return (torch.cat(ret_images, dim=0),) 79 | 80 | NODE_CLASS_MAPPINGS = { 81 | "LayerUtility: ImageBlend": ImageBlend 82 | } 83 | 84 | NODE_DISPLAY_NAME_MAPPINGS = { 85 | "LayerUtility: ImageBlend": "LayerUtility: ImageBlend" 86 | } -------------------------------------------------------------------------------- /py/image_channel_merge.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor, image_channel_merge 4 | 5 | 6 | 7 | class ImageChannelMerge: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'ImageChannelMerge' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | channel_mode = ['RGBA', 'YCbCr', 'LAB', 'HSV'] 15 | return { 16 | "required": { 17 | "channel_1": ("IMAGE", ), # 18 | "channel_2": ("IMAGE",), # 19 | "channel_3": ("IMAGE",), # 20 | "mode": (channel_mode,), # 通道设置 21 | }, 22 | "optional": { 23 | "channel_4": ("IMAGE",), # 24 | } 25 | } 26 | 27 | RETURN_TYPES = ("IMAGE",) 28 | RETURN_NAMES = ("image",) 29 | FUNCTION = 'image_channel_merge' 30 | CATEGORY = '😺dzNodes/LayerUtility' 31 | 32 | def image_channel_merge(self, channel_1, channel_2, channel_3, mode, channel_4=None): 33 | 34 | c1_images = [] 35 | c2_images = [] 36 | c3_images = [] 37 | c4_images = [] 38 | ret_images = [] 39 | 40 | width, height = tensor2pil(torch.unsqueeze(channel_1[0], 0)).size 41 | for c in channel_1: 42 | c1_images.append(torch.unsqueeze(c, 0)) 43 | for c in channel_2: 44 | c2_images.append(torch.unsqueeze(c, 0)) 45 | for c in channel_3: 46 | c3_images.append(torch.unsqueeze(c, 0)) 47 | if channel_4 is not None: 48 | for c in channel_4: 49 | c4_images.append(torch.unsqueeze(c, 0)) 50 | else: 51 | c4_images.append(pil2tensor(Image.new('L', size=(width, height), color='white'))) 52 | 53 | max_batch = max(len(c1_images), len(c2_images), len(c3_images), len(c4_images)) 54 | for i in range(max_batch): 55 | c_1 = c1_images[i] if i < len(c1_images) else c1_images[-1] 56 | c_2 = c2_images[i] if i < len(c2_images) else c2_images[-1] 57 | c_3 = c3_images[i] if i < len(c3_images) else c3_images[-1] 58 | c_4 = c4_images[i] if i < len(c4_images) else c4_images[-1] 59 | ret_image = image_channel_merge((tensor2pil(c_1), tensor2pil(c_2), tensor2pil(c_3), tensor2pil(c_4)), mode) 60 | 61 | ret_images.append(pil2tensor(ret_image)) 62 | 63 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 64 | return (torch.cat(ret_images, dim=0),) 65 | 66 | NODE_CLASS_MAPPINGS = { 67 | "LayerUtility: ImageChannelMerge": ImageChannelMerge 68 | } 69 | 70 | NODE_DISPLAY_NAME_MAPPINGS = { 71 | "LayerUtility: ImageChannelMerge": "LayerUtility: ImageChannelMerge" 72 | } -------------------------------------------------------------------------------- /py/image_channel_split.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import log, tensor2pil, pil2tensor, image_channel_split 3 | 4 | 5 | 6 | class ImageChannelSplit: 7 | 8 | def __init__(self): 9 | self.NODE_NAME = 'ImageChannelSplit' 10 | 11 | @classmethod 12 | def INPUT_TYPES(self): 13 | channel_mode = ['RGBA', 'YCbCr', 'LAB', 'HSV'] 14 | return { 15 | "required": { 16 | "image": ("IMAGE", ), # 17 | "mode": (channel_mode,), # 通道设置 18 | }, 19 | "optional": { 20 | } 21 | } 22 | 23 | RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE",) 24 | RETURN_NAMES = ("channel_1", "channel_2", "channel_3", "channel_4",) 25 | FUNCTION = 'image_channel_split' 26 | CATEGORY = '😺dzNodes/LayerUtility' 27 | 28 | def image_channel_split(self, image, mode): 29 | 30 | c1_images = [] 31 | c2_images = [] 32 | c3_images = [] 33 | c4_images = [] 34 | 35 | for i in image: 36 | i = torch.unsqueeze(i, 0) 37 | _image = tensor2pil(i).convert('RGBA') 38 | channel1, channel2, channel3, channel4 = image_channel_split(_image, mode) 39 | c1_images.append(pil2tensor(channel1)) 40 | c2_images.append(pil2tensor(channel2)) 41 | c3_images.append(pil2tensor(channel3)) 42 | c4_images.append(pil2tensor(channel4)) 43 | 44 | log(f"{self.NODE_NAME} Processed {len(c1_images)} image(s).", message_type='finish') 45 | return (torch.cat(c1_images, dim=0), torch.cat(c2_images, dim=0), torch.cat(c3_images, dim=0), torch.cat(c4_images, dim=0),) 46 | 47 | NODE_CLASS_MAPPINGS = { 48 | "LayerUtility: ImageChannelSplit": ImageChannelSplit 49 | } 50 | 51 | NODE_DISPLAY_NAME_MAPPINGS = { 52 | "LayerUtility: ImageChannelSplit": "LayerUtility: ImageChannelSplit" 53 | } -------------------------------------------------------------------------------- /py/image_combine_alpha.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import log, tensor2pil, pil2tensor, image_channel_split, image_channel_merge 3 | 4 | 5 | 6 | class ImageCombineAlpha: 7 | 8 | def __init__(self): 9 | self.NODE_NAME = 'ImageCombineAlpha' 10 | 11 | @classmethod 12 | def INPUT_TYPES(self): 13 | channel_mode = ['RGBA', 'YCbCr', 'LAB', 'HSV'] 14 | return { 15 | "required": { 16 | "RGB_image": ("IMAGE", ), # 17 | "mask": ("MASK",), # 18 | }, 19 | "optional": { 20 | } 21 | } 22 | 23 | RETURN_TYPES = ("IMAGE",) 24 | RETURN_NAMES = ("RGBA_image",) 25 | FUNCTION = 'image_combine_alpha' 26 | CATEGORY = '😺dzNodes/LayerUtility' 27 | 28 | def image_combine_alpha(self, RGB_image, mask): 29 | 30 | ret_images = [] 31 | input_images = [] 32 | input_masks = [] 33 | 34 | for i in RGB_image: 35 | input_images.append(torch.unsqueeze(i, 0)) 36 | if mask.dim() == 2: 37 | mask = torch.unsqueeze(mask, 0) 38 | for m in mask: 39 | input_masks.append(torch.unsqueeze(m, 0)) 40 | 41 | max_batch = max(len(input_images), len(input_masks)) 42 | for i in range(max_batch): 43 | _image = input_images[i] if i < len(input_images) else input_images[-1] 44 | _mask = input_masks[i] if i < len(input_masks) else input_masks[-1] 45 | r, g, b, _ = image_channel_split(tensor2pil(_image).convert('RGB'), 'RGB') 46 | ret_image = image_channel_merge((r, g, b, tensor2pil(_mask).convert('L')), 'RGBA') 47 | 48 | ret_images.append(pil2tensor(ret_image)) 49 | 50 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 51 | return (torch.cat(ret_images, dim=0),) 52 | 53 | NODE_CLASS_MAPPINGS = { 54 | "LayerUtility: ImageCombineAlpha": ImageCombineAlpha 55 | } 56 | 57 | NODE_DISPLAY_NAME_MAPPINGS = { 58 | "LayerUtility: ImageCombineAlpha": "LayerUtility: ImageCombineAlpha" 59 | } -------------------------------------------------------------------------------- /py/image_opacity.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image, ImageChops 3 | from .imagefunc import log, tensor2pil, pil2tensor, image2mask 4 | 5 | 6 | 7 | class ImageOpacity: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'ImageOpacity' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "image": ("IMAGE", ), # 18 | "opacity": ("INT", {"default": 100, "min": 0, "max": 100, "step": 1}), # 透明度 19 | "invert_mask": ("BOOLEAN", {"default": True}), # 反转mask 20 | }, 21 | "optional": { 22 | "mask": ("MASK",), # 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("IMAGE", "MASK",) 27 | RETURN_NAMES = ("image", "mask",) 28 | FUNCTION = 'image_opacity' 29 | CATEGORY = '😺dzNodes/LayerUtility' 30 | 31 | def image_opacity(self, image, opacity, invert_mask, 32 | mask=None, 33 | ): 34 | 35 | ret_images = [] 36 | ret_masks = [] 37 | l_images = [] 38 | l_masks = [] 39 | for l in image: 40 | l_images.append(torch.unsqueeze(l, 0)) 41 | m = tensor2pil(l) 42 | if m.mode == 'RGBA': 43 | l_masks.append(m.split()[-1]) 44 | else: 45 | l_masks.append(Image.new('L', size=m.size, color='white')) 46 | 47 | if mask is not None: 48 | if mask.dim() == 2: 49 | mask = torch.unsqueeze(mask, 0) 50 | l_masks = [] 51 | for m in mask: 52 | if invert_mask: 53 | m = 1 - m 54 | l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L')) 55 | 56 | max_batch = max(len(l_images), len(l_masks)) 57 | 58 | for i in range(max_batch): 59 | _image = l_images[i] if i < len(l_images) else l_images[-1] 60 | _image = tensor2pil(_image) 61 | _mask = l_masks[i] if i < len(l_masks) else l_masks[-1] 62 | if invert_mask: 63 | _color = Image.new("L", _image.size, color=('white')) 64 | _mask = ImageChops.invert(_mask) 65 | else: 66 | _color = Image.new("L", _image.size, color=('black')) 67 | 68 | alpha = 1 - opacity / 100.0 69 | ret_mask = Image.blend(_mask, _color, alpha) 70 | R, G, B, = _image.convert('RGB').split() 71 | if invert_mask: 72 | ret_mask = ImageChops.invert(ret_mask) 73 | ret_image = Image.merge('RGBA', (R, G, B, ret_mask)) 74 | 75 | ret_images.append(pil2tensor(ret_image)) 76 | ret_masks.append(image2mask(ret_mask)) 77 | 78 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 79 | return (torch.cat(ret_images, dim=0), torch.cat(ret_masks, dim=0),) 80 | 81 | NODE_CLASS_MAPPINGS = { 82 | "LayerUtility: ImageOpacity": ImageOpacity 83 | } 84 | 85 | NODE_DISPLAY_NAME_MAPPINGS = { 86 | "LayerUtility: ImageOpacity": "LayerUtility: ImageOpacity" 87 | } -------------------------------------------------------------------------------- /py/image_remove_alpha.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor 4 | 5 | 6 | 7 | class ImageRemoveAlpha: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'ImageRemoveAlpha' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "RGBA_image": ("IMAGE", ), # 18 | "fill_background": ("BOOLEAN", {"default": False}), 19 | "background_color": ("STRING", {"default": "#000000"}), 20 | }, 21 | "optional": { 22 | "mask": ("MASK",), # 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("IMAGE", ) 27 | RETURN_NAMES = ("RGB_image", ) 28 | FUNCTION = 'image_remove_alpha' 29 | CATEGORY = '😺dzNodes/LayerUtility' 30 | 31 | def image_remove_alpha(self, RGBA_image, fill_background, background_color, mask=None): 32 | 33 | ret_images = [] 34 | 35 | for index, img in enumerate(RGBA_image): 36 | _image = tensor2pil(img) 37 | 38 | if fill_background: 39 | if mask is not None: 40 | m = mask[index].unsqueeze(0) if index < len(mask) else mask[-1].unsqueeze(0) 41 | alpha = tensor2pil(m).convert('L') 42 | elif _image.mode == "RGBA": 43 | alpha = _image.split()[-1] 44 | else: 45 | log(f"Error: {self.NODE_NAME} skipped, because the input image is not RGBA and mask is None.", 46 | message_type='error') 47 | return (RGBA_image,) 48 | ret_image = Image.new('RGB', size=_image.size, color=background_color) 49 | ret_image.paste(_image, mask=alpha) 50 | ret_images.append(pil2tensor(ret_image)) 51 | 52 | else: 53 | ret_images.append(pil2tensor(tensor2pil(img).convert('RGB'))) 54 | 55 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 56 | return (torch.cat(ret_images, dim=0), ) 57 | 58 | NODE_CLASS_MAPPINGS = { 59 | "LayerUtility: ImageRemoveAlpha": ImageRemoveAlpha 60 | } 61 | 62 | NODE_DISPLAY_NAME_MAPPINGS = { 63 | "LayerUtility: ImageRemoveAlpha": "LayerUtility: ImageRemoveAlpha" 64 | } -------------------------------------------------------------------------------- /py/mask_by_color.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor, image2mask, create_mask_from_color_tensor, mask_fix 4 | 5 | 6 | 7 | 8 | class MaskByColor: 9 | 10 | def __init__(self): 11 | self.NODE_NAME = 'MaskByColor' 12 | 13 | @classmethod 14 | def INPUT_TYPES(s): 15 | return { 16 | "required": { 17 | "image": ("IMAGE", ), 18 | "color": ("COLOR", {"default": "#FFFFFF"},), 19 | "color_in_HEX": ("STRING", {"default": ""}), 20 | "threshold": ("INT", { "default": 50, "min": 0, "max": 100, "step": 1, }), 21 | "fix_gap": ("INT", {"default": 2, "min": 0, "max": 32, "step": 1}), 22 | "fix_threshold": ("FLOAT", {"default": 0.75, "min": 0.01, "max": 0.99, "step": 0.01}), 23 | "invert_mask": ("BOOLEAN", {"default": False}), # 反转mask 24 | }, 25 | "optional": { 26 | "mask": ("MASK",), # 27 | } 28 | } 29 | 30 | RETURN_TYPES = ("MASK",) 31 | RETURN_NAMES = ("mask",) 32 | FUNCTION = "mask_by_color" 33 | CATEGORY = '😺dzNodes/LayerMask' 34 | 35 | def mask_by_color(self, image, color, color_in_HEX, threshold, 36 | fix_gap, fix_threshold, invert_mask, mask=None): 37 | 38 | if color_in_HEX != "" and color_in_HEX.startswith('#') and len(color_in_HEX) == 7: 39 | color = color_in_HEX 40 | 41 | ret_masks = [] 42 | l_images = [] 43 | l_masks = [] 44 | 45 | for l in image: 46 | l_images.append(torch.unsqueeze(l, 0)) 47 | m = tensor2pil(l) 48 | if m.mode == 'RGBA': 49 | l_masks.append(m.split()[-1]) 50 | else: 51 | l_masks.append(Image.new('L', m.size, 'white')) 52 | if mask is not None: 53 | if mask.dim() == 2: 54 | mask = torch.unsqueeze(mask, 0) 55 | l_masks = [] 56 | for m in mask: 57 | if invert_mask: 58 | m = 1 - m 59 | l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L')) 60 | 61 | for i in range(len(l_images)): 62 | img = l_images[i] if i < len(l_images) else l_images[-1] 63 | img = tensor2pil(img) 64 | _mask = l_masks[i] if i < len(l_masks) else l_masks[-1] 65 | 66 | mask = Image.new('L', _mask.size, 'black') 67 | mask.paste(create_mask_from_color_tensor(img, color, threshold), mask=_mask) 68 | mask = image2mask(mask) 69 | if invert_mask: 70 | mask = 1 - mask 71 | if fix_gap: 72 | mask = mask_fix(mask, 1, fix_gap, fix_threshold, fix_threshold) 73 | ret_masks.append(mask) 74 | 75 | return (torch.cat(ret_masks, dim=0), ) 76 | 77 | 78 | NODE_CLASS_MAPPINGS = { 79 | "LayerMask: MaskByColor": MaskByColor 80 | } 81 | 82 | NODE_DISPLAY_NAME_MAPPINGS = { 83 | "LayerMask: MaskByColor": "LayerMask: Mask by Color" 84 | } -------------------------------------------------------------------------------- /py/mask_edge_shrink.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image, ImageChops 3 | from .imagefunc import log, tensor2pil, pil2tensor, image2mask, mask_invert, step_color, expand_mask, step_value, chop_image 4 | 5 | 6 | 7 | class MaskEdgeShrink: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'MaskEdgeShrink' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "mask": ("MASK", ), # 18 | "invert_mask": ("BOOLEAN", {"default": True}), # 反转mask 19 | "shrink_level": ("INT", {"default": 4, "min": 0, "max": 16, "step": 1}), 20 | "soft": ("INT", {"default": 6, "min": 0, "max": 64, "step": 1}), 21 | "edge_shrink": ("INT", {"default": 1, "min": 0, "max": 999, "step": 1}), 22 | "edge_reserve": ("INT", {"default": 25, "min": 0, "max": 100, "step": 1}), # 透明度 23 | }, 24 | "optional": { 25 | } 26 | } 27 | 28 | RETURN_TYPES = ("MASK",) 29 | RETURN_NAMES = ("mask",) 30 | FUNCTION = 'mask_edge_shrink' 31 | CATEGORY = '😺dzNodes/LayerMask' 32 | 33 | def mask_edge_shrink(self, mask, invert_mask, shrink_level, soft, edge_shrink, edge_reserve): 34 | 35 | l_masks = [] 36 | ret_masks = [] 37 | 38 | if mask.dim() == 2: 39 | mask = torch.unsqueeze(mask, 0) 40 | 41 | for m in mask: 42 | if invert_mask: 43 | m = 1 - m 44 | l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L')) 45 | 46 | glow_range = shrink_level * soft 47 | blur = 12 48 | 49 | for i in range(len(l_masks)): 50 | _mask = l_masks[i] 51 | _canvas = Image.new('RGB', size=_mask.size, color='black') 52 | _layer = Image.new('RGB', size=_mask.size, color='white') 53 | loop_grow = glow_range 54 | inner_mask = _mask 55 | for x in range(shrink_level): 56 | _color = step_color('#FFFFFF', '#000000', shrink_level, x) 57 | glow_mask = expand_mask(image2mask(inner_mask), -loop_grow, blur / (x+0.1)) #扩张,模糊 58 | # 合成 59 | color_image = Image.new("RGB", _layer.size, color=_color) 60 | alpha = tensor2pil(mask_invert(glow_mask)).convert('L') 61 | _glow = chop_image(_layer, color_image, 'subtract', int(step_value(1, 100, shrink_level, x))) 62 | _layer.paste(_glow, mask=alpha) 63 | loop_grow = loop_grow - int(glow_range / shrink_level) 64 | # 合成layer 65 | _edge = tensor2pil(expand_mask(image2mask(_mask), -edge_shrink, 0)).convert('RGB') 66 | _layer = chop_image(_layer, _edge, 'normal', edge_reserve) 67 | _layer.paste(_canvas, mask=ImageChops.invert(_mask)) 68 | 69 | ret_masks.append(image2mask(_layer)) 70 | 71 | log(f"{self.NODE_NAME} Processed {len(ret_masks)} mask(s).", message_type='finish') 72 | return (torch.cat(ret_masks, dim=0),) 73 | 74 | 75 | NODE_CLASS_MAPPINGS = { 76 | "LayerMask: MaskEdgeShrink": MaskEdgeShrink 77 | } 78 | 79 | NODE_DISPLAY_NAME_MAPPINGS = { 80 | "LayerMask: MaskEdgeShrink": "LayerMask: MaskEdgeShrink" 81 | } -------------------------------------------------------------------------------- /py/mask_grain.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor, image2mask, expand_mask, chop_image_v2 4 | 5 | 6 | 7 | class MaskGrain: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'MaskGrain' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "mask": ("MASK", ), # 18 | "grain": ("INT", {"default": 6, "min": 0, "max": 127, "step": 1}), 19 | "invert_mask": ("BOOLEAN", {"default": False}), # 反转mask 20 | }, 21 | "optional": { 22 | } 23 | } 24 | 25 | RETURN_TYPES = ("MASK",) 26 | RETURN_NAMES = ("mask",) 27 | FUNCTION = 'mask_grain' 28 | CATEGORY = '😺dzNodes/LayerMask' 29 | 30 | def mask_grain(self, mask, grain, invert_mask): 31 | 32 | l_masks = [] 33 | ret_masks = [] 34 | 35 | if mask.dim() == 2: 36 | mask = torch.unsqueeze(mask, 0) 37 | for m in mask: 38 | if invert_mask: 39 | m = 1 - m 40 | l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L')) 41 | 42 | for mask in l_masks: 43 | if grain: 44 | white_mask = Image.new('L', mask.size, color="white") 45 | inner_mask = tensor2pil(expand_mask(image2mask(mask), 0 - grain, int(grain))).convert('L') 46 | outter_mask = tensor2pil(expand_mask(image2mask(mask), grain, int(grain * 2))).convert('L') 47 | ret_mask = Image.new('L', mask.size, color="black") 48 | ret_mask = chop_image_v2(ret_mask, outter_mask, blend_mode="dissolve", opacity=50).convert('L') 49 | ret_mask.paste(white_mask, mask=inner_mask) 50 | ret_masks.append(image2mask(ret_mask)) 51 | else: 52 | ret_masks.append(image2mask(mask)) 53 | 54 | log(f"{self.NODE_NAME} Processed {len(ret_masks)} mask(s).", message_type='finish') 55 | return (torch.cat(ret_masks, dim=0),) 56 | 57 | NODE_CLASS_MAPPINGS = { 58 | "LayerMask: MaskGrain": MaskGrain 59 | } 60 | 61 | NODE_DISPLAY_NAME_MAPPINGS = { 62 | "LayerMask: MaskGrain": "LayerMask: Mask Grain" 63 | } -------------------------------------------------------------------------------- /py/mask_grow.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, image2mask, expand_mask 4 | 5 | 6 | 7 | class MaskGrow: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'MaskGrow' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "mask": ("MASK", ), # 18 | "invert_mask": ("BOOLEAN", {"default": True}), # 反转mask 19 | "grow": ("INT", {"default": 4, "min": -999, "max": 999, "step": 1}), 20 | "blur": ("INT", {"default": 4, "min": 0, "max": 999, "step": 1}), 21 | }, 22 | "optional": { 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("MASK",) 27 | RETURN_NAMES = ("mask",) 28 | FUNCTION = 'mask_grow' 29 | CATEGORY = '😺dzNodes/LayerMask' 30 | 31 | def mask_grow(self, mask, invert_mask, grow, blur,): 32 | 33 | l_masks = [] 34 | ret_masks = [] 35 | 36 | if mask.dim() == 2: 37 | mask = torch.unsqueeze(mask, 0) 38 | 39 | for m in mask: 40 | if invert_mask: 41 | m = 1 - m 42 | l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L')) 43 | 44 | for i in range(len(l_masks)): 45 | 46 | _mask = l_masks[i] 47 | ret_masks.append(expand_mask(image2mask(_mask), grow, blur) ) 48 | 49 | log(f"{self.NODE_NAME} Processed {len(ret_masks)} mask(s).", message_type='finish') 50 | return (torch.cat(ret_masks, dim=0),) 51 | 52 | 53 | NODE_CLASS_MAPPINGS = { 54 | "LayerMask: MaskGrow": MaskGrow 55 | } 56 | 57 | NODE_DISPLAY_NAME_MAPPINGS = { 58 | "LayerMask: MaskGrow": "LayerMask: MaskGrow" 59 | } -------------------------------------------------------------------------------- /py/mask_invert.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, image2mask, mask_invert 4 | 5 | 6 | 7 | class MaskInvert: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'MaskInvert' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | return { 15 | "required": { 16 | "mask": ("MASK", ), # 17 | }, 18 | "optional": { 19 | } 20 | } 21 | 22 | RETURN_TYPES = ("MASK",) 23 | RETURN_NAMES = ("mask",) 24 | FUNCTION = 'mask_invert' 25 | CATEGORY = '😺dzNodes/LayerMask' 26 | 27 | def mask_invert(self,mask): 28 | l_masks = [] 29 | ret_masks = [] 30 | 31 | if mask.dim() == 2: 32 | mask = torch.unsqueeze(mask, 0) 33 | 34 | for m in mask: 35 | l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L')) 36 | 37 | for i in range(len(l_masks)): 38 | _mask = l_masks[i] 39 | ret_masks.append(mask_invert(image2mask(_mask))) 40 | 41 | return (torch.cat(ret_masks, dim=0),) 42 | 43 | NODE_CLASS_MAPPINGS = { 44 | "LayerMask: MaskInvert": MaskInvert 45 | } 46 | 47 | NODE_DISPLAY_NAME_MAPPINGS = { 48 | "LayerMask: MaskInvert": "LayerMask: MaskInvert" 49 | } -------------------------------------------------------------------------------- /py/mask_motion_blur.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, image2mask, motion_blur 4 | 5 | 6 | 7 | class MaskMotionBlur: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'MaskMotionBlur' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "mask": ("MASK",), 18 | "invert_mask": ("BOOLEAN", {"default": True}), # 反转mask 19 | "blur": ("INT", {"default": 20, "min": 1, "max": 9999, "step": 1}), 20 | "angle": ("FLOAT", {"default": 0, "min": -360, "max": 360, "step": 0.1}), 21 | }, 22 | "optional": { 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("MASK",) 27 | RETURN_NAMES = ("mask",) 28 | FUNCTION = 'mask_motion_blur' 29 | CATEGORY = '😺dzNodes/LayerMask' 30 | 31 | def mask_motion_blur(self, mask, invert_mask, blur, angle,): 32 | 33 | l_masks = [] 34 | ret_masks = [] 35 | 36 | if mask.dim() == 2: 37 | mask = torch.unsqueeze(mask, 0) 38 | 39 | for m in mask: 40 | if invert_mask: 41 | m = 1 - m 42 | l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L')) 43 | 44 | for i in range(len(l_masks)): 45 | _mask = l_masks[i] 46 | _blurimage = motion_blur(_mask, angle, blur) 47 | ret_masks.append(image2mask(_blurimage)) 48 | 49 | log(f"{self.NODE_NAME} Processed {len(ret_masks)} mask(s).", message_type='finish') 50 | return (torch.cat(ret_masks, dim=0),) 51 | 52 | NODE_CLASS_MAPPINGS = { 53 | "LayerMask: MaskMotionBlur": MaskMotionBlur 54 | } 55 | 56 | NODE_DISPLAY_NAME_MAPPINGS = { 57 | "LayerMask: MaskMotionBlur": "LayerMask: MaskMotionBlur" 58 | } -------------------------------------------------------------------------------- /py/mask_preview.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import random 3 | from nodes import SaveImage 4 | import folder_paths 5 | 6 | 7 | class MaskPreview(SaveImage): 8 | def __init__(self): 9 | self.output_dir = folder_paths.get_temp_directory() 10 | self.type = "temp" 11 | self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz1234567890") for x in range(5)) 12 | self.compress_level = 4 13 | 14 | @classmethod 15 | def INPUT_TYPES(self): 16 | return { 17 | "required": {"mask": ("MASK",), }, 18 | } 19 | 20 | FUNCTION = "mask_preview" 21 | CATEGORY = '😺dzNodes/LayerMask' 22 | OUTPUT_NODE = True 23 | 24 | def mask_preview(self, mask): 25 | if mask.dim() == 2: 26 | mask = torch.unsqueeze(mask, 0) 27 | preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) 28 | return self.save_images(preview, "MaskPreview") 29 | 30 | NODE_CLASS_MAPPINGS = { 31 | "LayerMask: MaskPreview": MaskPreview 32 | } 33 | 34 | NODE_DISPLAY_NAME_MAPPINGS = { 35 | "LayerMask: MaskPreview": "LayerMask: MaskPreview" 36 | } -------------------------------------------------------------------------------- /py/mask_stroke.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, image2mask, expand_mask, subtract_mask 4 | 5 | 6 | 7 | class MaskStroke: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'MaskStroke' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "mask": ("MASK", ), # 18 | "invert_mask": ("BOOLEAN", {"default": True}), # 反转mask 19 | "stroke_grow": ("INT", {"default": 0, "min": -999, "max": 999, "step": 1}), # 收缩值 20 | "stroke_width": ("INT", {"default": 20, "min": 0, "max": 999, "step": 1}), # 扩张值 21 | "blur": ("INT", {"default": 6, "min": 0, "max": 100, "step": 1}), # 模糊 22 | }, 23 | "optional": { 24 | } 25 | } 26 | 27 | RETURN_TYPES = ("MASK",) 28 | RETURN_NAMES = ("mask",) 29 | FUNCTION = 'mask_stroke' 30 | CATEGORY = '😺dzNodes/LayerMask' 31 | 32 | def mask_stroke(self, mask, invert_mask, stroke_grow, stroke_width, blur,): 33 | 34 | l_masks = [] 35 | ret_masks = [] 36 | 37 | if mask.dim() == 2: 38 | mask = torch.unsqueeze(mask, 0) 39 | 40 | for m in mask: 41 | if invert_mask: 42 | m = 1 - m 43 | l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L')) 44 | 45 | for i in range(len(l_masks)): 46 | _mask = l_masks[i] 47 | grow_offset = int(stroke_width / 2) 48 | inner_stroke = stroke_grow - grow_offset 49 | outer_stroke = inner_stroke + stroke_width 50 | inner_mask = expand_mask(image2mask(_mask), inner_stroke, blur) 51 | outer_mask = expand_mask(image2mask(_mask), outer_stroke, blur) 52 | stroke_mask = subtract_mask(outer_mask, inner_mask) 53 | ret_masks.append(stroke_mask) 54 | 55 | log(f"{self.NODE_NAME} Processed {len(ret_masks)} mask(s).", message_type='finish') 56 | return (torch.cat(ret_masks, dim=0),) 57 | 58 | NODE_CLASS_MAPPINGS = { 59 | "LayerMask: MaskStroke": MaskStroke 60 | } 61 | 62 | NODE_DISPLAY_NAME_MAPPINGS = { 63 | "LayerMask: MaskStroke": "LayerMask: MaskStroke" 64 | } -------------------------------------------------------------------------------- /py/motion_blur.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor, motion_blur 4 | 5 | 6 | 7 | class MotionBlur: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'MotionBlur' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "image": ("IMAGE", ), # 18 | "angle": ("INT", {"default": 0, "min": -90, "max": 90, "step": 1}), # 角度 19 | "blur": ("INT", {"default": 20, "min": 1, "max": 999, "step": 1}), # 模糊 20 | }, 21 | "optional": { 22 | } 23 | } 24 | 25 | RETURN_TYPES = ("IMAGE",) 26 | RETURN_NAMES = ("image",) 27 | FUNCTION = 'motion_blur' 28 | CATEGORY = '😺dzNodes/LayerFilter' 29 | 30 | def motion_blur(self, image, angle, blur): 31 | 32 | ret_images = [] 33 | 34 | for i in image: 35 | 36 | _canvas = tensor2pil(torch.unsqueeze(i, 0)).convert('RGB') 37 | 38 | ret_images.append(pil2tensor(motion_blur(_canvas, angle, blur))) 39 | 40 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 41 | return (torch.cat(ret_images, dim=0),) 42 | 43 | NODE_CLASS_MAPPINGS = { 44 | "LayerFilter: MotionBlur": MotionBlur 45 | } 46 | 47 | NODE_DISPLAY_NAME_MAPPINGS = { 48 | "LayerFilter: MotionBlur": "LayerFilter: MotionBlur" 49 | } -------------------------------------------------------------------------------- /py/pixel_spread.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor, image2mask, mask2image, expand_mask, pixel_spread 4 | 5 | 6 | 7 | class PixelSpread: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'PixelSpread' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "image": ("IMAGE", ), # 18 | "invert_mask": ("BOOLEAN", {"default": False}), # 反转mask 19 | "mask_grow": ("INT", {"default": 0, "min": -999, "max": 999, "step": 1}), 20 | }, 21 | "optional": { 22 | "mask": ("MASK",), # 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("IMAGE", ) 27 | RETURN_NAMES = ("image", ) 28 | FUNCTION = 'pixel_spread' 29 | CATEGORY = '😺dzNodes/LayerMask' 30 | 31 | def pixel_spread(self, image, invert_mask, mask_grow, mask=None): 32 | 33 | l_images = [] 34 | l_masks = [] 35 | ret_images = [] 36 | 37 | for l in image: 38 | i = tensor2pil(torch.unsqueeze(l, 0)) 39 | l_images.append(i) 40 | if i.mode == 'RGBA': 41 | l_masks.append(i.split()[-1]) 42 | else: 43 | l_masks.append(Image.new('L', i.size, 'white')) 44 | if mask is not None: 45 | if mask.dim() == 2: 46 | mask = torch.unsqueeze(mask, 0) 47 | l_masks = [] 48 | for m in mask: 49 | if invert_mask: 50 | m = 1 - m 51 | l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L')) 52 | max_batch = max(len(l_images), len(l_masks)) 53 | 54 | for i in range(max_batch): 55 | _image = l_images[i] if i < len(l_images) else l_images[-1] 56 | _mask = l_masks[i] if i < len(l_masks) else l_masks[-1] 57 | if mask_grow != 0: 58 | _mask = expand_mask(image2mask(_mask), mask_grow, 0) # 扩张,模糊 59 | _mask = mask2image(_mask) 60 | 61 | if _image.size != _mask.size: 62 | log(f"Error: {self.NODE_NAME} skipped, because the mask is not match image.", message_type='error') 63 | return (image,) 64 | ret_image = pixel_spread(_image.convert('RGB'), _mask.convert('RGB')) 65 | ret_images.append(pil2tensor(ret_image)) 66 | 67 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 68 | return (torch.cat(ret_images, dim=0),) 69 | 70 | NODE_CLASS_MAPPINGS = { 71 | "LayerMask: PixelSpread": PixelSpread 72 | } 73 | 74 | NODE_DISPLAY_NAME_MAPPINGS = { 75 | "LayerMask: PixelSpread": "LayerMask: PixelSpread" 76 | } -------------------------------------------------------------------------------- /py/print_info.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from .imagefunc import log, tensor2pil 3 | from .imagefunc import AnyType 4 | 5 | any = AnyType("*") 6 | 7 | class PrintInfo: 8 | 9 | @classmethod 10 | def INPUT_TYPES(cls): # pylint: disable = invalid-name, missing-function-docstring 11 | return { 12 | "required": { 13 | "anything": (any, {}), 14 | }, 15 | } 16 | 17 | RETURN_TYPES = ("STRING",) 18 | RETURN_NAMES = ("text",) 19 | FUNCTION = "print_info" 20 | CATEGORY = '😺dzNodes/LayerUtility/Data' 21 | OUTPUT_NODE = True 22 | 23 | def print_info(self, anything=None): 24 | value = f'PrintInfo:\nInput type = {type(anything)}' 25 | if isinstance(anything, torch.Tensor): 26 | # value += f"\n Input dim = {anything.dim()}, shape[0] = {anything.shape[0]}" 27 | for i in range(anything.shape[0]): 28 | t = anything[i] 29 | image = tensor2pil(t) 30 | value += f'index {i}: size={image.size}, mode="{image.mode}", dim={t.dim()}; ' 31 | # for j in range(t.dim()): 32 | # value += f'shape[{j}] = {t.shape[j]}, ' 33 | # value += f'\n {t} \n' 34 | elif isinstance(anything, str): 35 | value = f"{value}{anything.encode('unicode-escape').decode('unicode-escape')}" 36 | elif anything is not None: 37 | try: 38 | value = value + json.dumps(anything) + "\n" 39 | except Exception: 40 | try: 41 | value = value + str(anything) + "\n" 42 | except Exception: 43 | value = 'source exists, but could not be serialized.' 44 | else: 45 | value = 'source does not exist.' 46 | 47 | log(value) 48 | 49 | return (value,) 50 | 51 | NODE_CLASS_MAPPINGS = { 52 | "LayerUtility: PrintInfo": PrintInfo 53 | } 54 | 55 | NODE_DISPLAY_NAME_MAPPINGS = { 56 | "LayerUtility: PrintInfo": "LayerUtility: PrintInfo" 57 | } -------------------------------------------------------------------------------- /py/purge_vram.py: -------------------------------------------------------------------------------- 1 | import torch.cuda 2 | import gc 3 | import comfy.model_management 4 | from .imagefunc import AnyType, clear_memory 5 | 6 | any = AnyType("*") 7 | 8 | class PurgeVRAM: 9 | 10 | def __init__(self): 11 | pass 12 | 13 | @classmethod 14 | def INPUT_TYPES(cls): 15 | return { 16 | "required": { 17 | "anything": (any, {}), 18 | "purge_cache": ("BOOLEAN", {"default": True}), 19 | "purge_models": ("BOOLEAN", {"default": True}), 20 | }, 21 | "optional": { 22 | } 23 | } 24 | 25 | RETURN_TYPES = () 26 | FUNCTION = "purge_vram" 27 | CATEGORY = '😺dzNodes/LayerUtility/SystemIO' 28 | OUTPUT_NODE = True 29 | 30 | def purge_vram(self, anything, purge_cache, purge_models): 31 | import torch.cuda 32 | import gc 33 | import comfy.model_management 34 | clear_memory() 35 | if purge_models: 36 | comfy.model_management.unload_all_models() 37 | comfy.model_management.soft_empty_cache() 38 | return (None,) 39 | 40 | class PurgeVRAM_V2: 41 | 42 | def __init__(self): 43 | self.NODE_NAME = 'PurgeVRAM V2' 44 | 45 | @classmethod 46 | def INPUT_TYPES(cls): 47 | return { 48 | "required": { 49 | "anything": (any, {}), 50 | "purge_cache": ("BOOLEAN", {"default": True}), 51 | "purge_models": ("BOOLEAN", {"default": True}), 52 | }, 53 | "optional": { 54 | } 55 | } 56 | 57 | 58 | RETURN_TYPES = (any,) 59 | RETURN_NAMES = ("any",) 60 | FUNCTION = "purge_vram_v2" 61 | CATEGORY = '😺dzNodes/LayerUtility/SystemIO' 62 | OUTPUT_NODE = True 63 | 64 | def purge_vram_v2(self, anything, purge_cache, purge_models): 65 | clear_memory() 66 | if purge_models: 67 | comfy.model_management.unload_all_models() 68 | comfy.model_management.soft_empty_cache() 69 | return (anything,) 70 | 71 | 72 | NODE_CLASS_MAPPINGS = { 73 | "LayerUtility: PurgeVRAM": PurgeVRAM, 74 | "LayerUtility: PurgeVRAM V2": PurgeVRAM_V2, 75 | } 76 | 77 | NODE_DISPLAY_NAME_MAPPINGS = { 78 | "LayerUtility: PurgeVRAM": "LayerUtility: Purge VRAM", 79 | "LayerUtility: PurgeVRAM V2": "LayerUtility: Purge VRAM V2", 80 | } -------------------------------------------------------------------------------- /py/restore_crop_box.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor, image2mask 4 | 5 | 6 | 7 | class RestoreCropBox: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'RestoreCropBox' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "background_image": ("IMAGE", ), 18 | "croped_image": ("IMAGE",), 19 | "invert_mask": ("BOOLEAN", {"default": False}), # 反转mask# 20 | "crop_box": ("BOX",), 21 | }, 22 | "optional": { 23 | "croped_mask": ("MASK",), 24 | } 25 | } 26 | 27 | RETURN_TYPES = ("IMAGE", "MASK", ) 28 | RETURN_NAMES = ("image", "mask", ) 29 | FUNCTION = 'restore_crop_box' 30 | CATEGORY = '😺dzNodes/LayerUtility' 31 | 32 | def restore_crop_box(self, background_image, croped_image, invert_mask, crop_box, 33 | croped_mask=None 34 | ): 35 | 36 | b_images = [] 37 | l_images = [] 38 | l_masks = [] 39 | ret_images = [] 40 | ret_masks = [] 41 | for b in background_image: 42 | b_images.append(torch.unsqueeze(b, 0)) 43 | for l in croped_image: 44 | l_images.append(torch.unsqueeze(l, 0)) 45 | m = tensor2pil(l) 46 | if m.mode == 'RGBA': 47 | l_masks.append(m.split()[-1]) 48 | else: 49 | l_masks.append(Image.new('L', size=m.size, color='white')) 50 | if croped_mask is not None: 51 | if croped_mask.dim() == 2: 52 | croped_mask = torch.unsqueeze(croped_mask, 0) 53 | l_masks = [] 54 | for m in croped_mask: 55 | if invert_mask: 56 | m = 1 - m 57 | l_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L')) 58 | 59 | max_batch = max(len(b_images), len(l_images), len(l_masks)) 60 | for i in range(max_batch): 61 | background_image = b_images[i] if i < len(b_images) else b_images[-1] 62 | croped_image = l_images[i] if i < len(l_images) else l_images[-1] 63 | _mask = l_masks[i] if i < len(l_masks) else l_masks[-1] 64 | 65 | _canvas = tensor2pil(background_image).convert('RGB') 66 | _layer = tensor2pil(croped_image).convert('RGB') 67 | 68 | ret_mask = Image.new('L', size=_canvas.size, color='black') 69 | _canvas.paste(_layer, box=tuple(crop_box), mask=_mask) 70 | ret_mask.paste(_mask, box=tuple(crop_box)) 71 | ret_images.append(pil2tensor(_canvas)) 72 | ret_masks.append(image2mask(ret_mask)) 73 | 74 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 75 | return (torch.cat(ret_images, dim=0), torch.cat(ret_masks, dim=0),) 76 | 77 | 78 | NODE_CLASS_MAPPINGS = { 79 | "LayerUtility: RestoreCropBox": RestoreCropBox 80 | } 81 | 82 | NODE_DISPLAY_NAME_MAPPINGS = { 83 | "LayerUtility: RestoreCropBox": "LayerUtility: RestoreCropBox" 84 | } -------------------------------------------------------------------------------- /py/rmbg_ultra.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor, image2mask, mask2image, RMBG, RGB2RGBA, mask_edge_detail 4 | 5 | 6 | class RemBgUltra: 7 | def __init__(self): 8 | self.NODE_NAME = 'RemBgUltra' 9 | 10 | @classmethod 11 | def INPUT_TYPES(cls): 12 | 13 | return { 14 | "required": { 15 | "image": ("IMAGE",), 16 | "detail_range": ("INT", {"default": 8, "min": 1, "max": 256, "step": 1}), 17 | "black_point": ("FLOAT", {"default": 0.01, "min": 0.01, "max": 0.98, "step": 0.01}), 18 | "white_point": ("FLOAT", {"default": 0.99, "min": 0.02, "max": 0.99, "step": 0.01}), 19 | "process_detail": ("BOOLEAN", {"default": True}), 20 | }, 21 | "optional": { 22 | } 23 | } 24 | 25 | RETURN_TYPES = ("IMAGE", "MASK", ) 26 | RETURN_NAMES = ("image", "mask", ) 27 | FUNCTION = "rembg_ultra" 28 | CATEGORY = '😺dzNodes/LayerMask' 29 | 30 | def rembg_ultra(self, image, detail_range, black_point, white_point, process_detail): 31 | ret_images = [] 32 | ret_masks = [] 33 | 34 | for i in image: 35 | i = torch.unsqueeze(i, 0) 36 | i = pil2tensor(tensor2pil(i).convert('RGB')) 37 | orig_image = tensor2pil(i).convert('RGB') 38 | _mask = RMBG(orig_image) 39 | if process_detail: 40 | _mask = tensor2pil(mask_edge_detail(i, pil2tensor(_mask), detail_range, black_point, white_point)) 41 | ret_image = RGB2RGBA(orig_image, _mask.convert('L')) 42 | ret_images.append(pil2tensor(ret_image)) 43 | ret_masks.append(image2mask(_mask)) 44 | 45 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 46 | return (torch.cat(ret_images, dim=0), torch.cat(ret_masks, dim=0),) 47 | 48 | NODE_CLASS_MAPPINGS = { 49 | "LayerMask: RemBgUltra": RemBgUltra, 50 | } 51 | 52 | NODE_DISPLAY_NAME_MAPPINGS = { 53 | "LayerMask: RemBgUltra": "LayerMask: RemBgUltra", 54 | } 55 | -------------------------------------------------------------------------------- /py/sharp_soft.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import copy 3 | import cv2 4 | import numpy as np 5 | from PIL import Image 6 | from .imagefunc import log 7 | 8 | 9 | 10 | class SharpAndSoft: 11 | 12 | def __init__(self): 13 | self.NODE_NAME = 'Sharp & Soft' 14 | 15 | @classmethod 16 | def INPUT_TYPES(self): 17 | 18 | enhance_list = ['very sharp', 'sharp', 'soft', 'very soft', 'None'] 19 | 20 | return { 21 | "required": { 22 | "images": ("IMAGE",), 23 | "enhance": (enhance_list, ), 24 | 25 | }, 26 | "optional": { 27 | } 28 | } 29 | 30 | RETURN_TYPES = ("IMAGE",) 31 | RETURN_NAMES = ("image",) 32 | FUNCTION = 'sharp_and_soft' 33 | CATEGORY = '😺dzNodes/LayerFilter' 34 | 35 | def sharp_and_soft(self, images, enhance, ): 36 | 37 | if enhance == 'very sharp': 38 | filter_radius = 1 39 | denoise = 0.6 40 | detail_mult = 2.8 41 | if enhance == 'sharp': 42 | filter_radius = 3 43 | denoise = 0.12 44 | detail_mult = 1.8 45 | if enhance == 'soft': 46 | filter_radius = 8 47 | denoise = 0.08 48 | detail_mult = 0.5 49 | if enhance == 'very soft': 50 | filter_radius = 15 51 | denoise = 0.06 52 | detail_mult = 0.01 53 | else: 54 | return (images,) 55 | 56 | d = int(filter_radius * 2) + 1 57 | s = 0.02 58 | n = denoise / 10 59 | dup = copy.deepcopy(images.cpu().numpy()) 60 | 61 | from cv2.ximgproc import guidedFilter 62 | for index, image in enumerate(dup): 63 | imgB = image 64 | if denoise > 0.0: 65 | imgB = cv2.bilateralFilter(image, d, n, d) 66 | imgG = np.clip(guidedFilter(image, image, d, s), 0.001, 1) 67 | details = (imgB / imgG - 1) * detail_mult + 1 68 | dup[index] = np.clip(details * imgG - imgB + image, 0, 1) 69 | 70 | log(f"{self.NODE_NAME} Processed {dup.shape[0]} image(s).", message_type='finish') 71 | return (torch.from_numpy(dup),) 72 | 73 | 74 | NODE_CLASS_MAPPINGS = { 75 | "LayerFilter: Sharp & Soft": SharpAndSoft 76 | } 77 | 78 | NODE_DISPLAY_NAME_MAPPINGS = { 79 | "LayerFilter: Sharp & Soft": "LayerFilter: Sharp & Soft" 80 | } -------------------------------------------------------------------------------- /py/skin_beauty.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor, image2mask, gaussian_blur, chop_image 4 | from .imagefunc import image_channel_split, gray_threshold, remove_background, get_image_bright_average, image_beauty 5 | 6 | 7 | class SkinBeauty: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'SkinBeauty' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "image": ("IMAGE", ), 18 | "smooth": ("INT", {"default": 20, "min": 1, "max": 64, "step": 1}), # 磨皮程度 19 | "threshold": ("INT", {"default": -10, "min": -255, "max": 255, "step": 1}), # 高光阈值 20 | "opacity": ("INT", {"default": 100, "min": 0, "max": 100, "step": 1}), # 透明度 21 | }, 22 | "optional": { 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("IMAGE", "MASK") 27 | RETURN_NAMES = ("image", "beauty_mask") 28 | FUNCTION = 'skin_beauty' 29 | CATEGORY = '😺dzNodes/LayerFilter' 30 | 31 | def skin_beauty(self, image, smooth, threshold, opacity 32 | ): 33 | 34 | ret_images = [] 35 | ret_masks = [] 36 | for i in image: 37 | i = torch.unsqueeze(i, 0) 38 | _canvas = tensor2pil(i).convert('RGB') 39 | _R, _, _, _ = image_channel_split(_canvas, mode='RGB') 40 | _otsumask = gray_threshold(_R, otsu=True) 41 | _removebkgd = remove_background(_R, _otsumask, '#000000') 42 | auto_threshold = get_image_bright_average(_removebkgd) - 16 43 | light_mask = gray_threshold(_canvas, auto_threshold + threshold) 44 | blur = int((_canvas.width + _canvas.height) / 2000 * smooth) 45 | _image = image_beauty(_canvas, level=smooth) 46 | _image = gaussian_blur(_image, blur) 47 | _image = chop_image(_canvas, _image, 'normal', opacity) 48 | light_mask = gaussian_blur(light_mask, blur).convert('L') 49 | _canvas.paste(_image, mask=light_mask) 50 | 51 | ret_images.append(pil2tensor(_canvas)) 52 | ret_masks.append(image2mask(light_mask)) 53 | 54 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 55 | return (torch.cat(ret_images, dim=0), torch.cat(ret_masks, dim=0),) 56 | 57 | 58 | NODE_CLASS_MAPPINGS = { 59 | "LayerFilter: SkinBeauty": SkinBeauty 60 | } 61 | 62 | NODE_DISPLAY_NAME_MAPPINGS = { 63 | "LayerFilter: SkinBeauty": "LayerFilter: SkinBeauty" 64 | } -------------------------------------------------------------------------------- /py/soft_light.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor, gaussian_blur, chop_image 4 | from .imagefunc import gray_threshold, remove_background, get_image_bright_average 5 | 6 | 7 | 8 | class SoftLight: 9 | 10 | def __init__(self): 11 | self.NODE_NAME = 'SoftLight' 12 | 13 | @classmethod 14 | def INPUT_TYPES(self): 15 | 16 | return { 17 | "required": { 18 | "image": ("IMAGE", ), # 19 | "soft": ("FLOAT", {"default": 1, "min": 0.2, "max": 10, "step": 0.01}), # 模糊 20 | "threshold": ("INT", {"default": -10, "min": -255, "max": 255, "step": 1}), # 高光阈值 21 | "opacity": ("INT", {"default": 100, "min": 0, "max": 100, "step": 1}), # 透明度 22 | }, 23 | "optional": { 24 | } 25 | } 26 | 27 | RETURN_TYPES = ("IMAGE",) 28 | RETURN_NAMES = ("image",) 29 | FUNCTION = 'soft_light' 30 | CATEGORY = '😺dzNodes/LayerFilter' 31 | 32 | def soft_light(self, image, soft, threshold, opacity,): 33 | 34 | ret_images = [] 35 | 36 | for i in image: 37 | i = torch.unsqueeze(i, 0) 38 | blend_mode = 'screen' 39 | _canvas = tensor2pil(i).convert('RGB') 40 | blur = int((_canvas.width + _canvas.height) / 200 * soft) 41 | _otsumask = gray_threshold(_canvas, otsu=True) 42 | _removebkgd = remove_background(_canvas, _otsumask, '#000000').convert('L') 43 | auto_threshold = get_image_bright_average(_removebkgd) 44 | light_mask = gray_threshold(_canvas, auto_threshold + threshold) 45 | highlight_mask = gray_threshold(_canvas, auto_threshold + (255 - auto_threshold) // 2 + threshold // 2) 46 | blurimage = gaussian_blur(_canvas, soft).convert('RGB') 47 | light = chop_image(_canvas, blurimage, blend_mode=blend_mode, opacity=opacity) 48 | highlight = chop_image(light, blurimage, blend_mode=blend_mode, opacity=opacity) 49 | _canvas.paste(highlight, mask=gaussian_blur(light_mask, blur * 2).convert('L')) 50 | _canvas.paste(highlight, mask=gaussian_blur(highlight_mask, blur).convert('L')) 51 | 52 | ret_images.append(pil2tensor(_canvas)) 53 | 54 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 55 | return (torch.cat(ret_images, dim=0),) 56 | 57 | NODE_CLASS_MAPPINGS = { 58 | "LayerFilter: SoftLight": SoftLight 59 | } 60 | 61 | NODE_DISPLAY_NAME_MAPPINGS = { 62 | "LayerFilter: SoftLight": "LayerFilter: SoftLight" 63 | } -------------------------------------------------------------------------------- /py/text_join.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | class TextJoin: 5 | 6 | def __init__(self): 7 | self.NODE_NAME = 'TextJoin' 8 | 9 | @classmethod 10 | def INPUT_TYPES(cls): 11 | return { 12 | "required": { 13 | "text_1": ("STRING", {"default": "", "multiline": False,"forceInput":False}), 14 | 15 | }, 16 | "optional": { 17 | "text_2": ("STRING", {"default": "", "multiline": False,"forceInput":False}), 18 | "text_3": ("STRING", {"default": "", "multiline": False,"forceInput":False}), 19 | "text_4": ("STRING", {"default": "", "multiline": False,"forceInput":False}), 20 | } 21 | } 22 | 23 | RETURN_TYPES = ("STRING",) 24 | RETURN_NAMES = ("text",) 25 | FUNCTION = "text_join" 26 | CATEGORY = '😺dzNodes/LayerUtility/Data' 27 | 28 | def text_join(self, text_1, text_2="", text_3="", text_4=""): 29 | 30 | texts = [] 31 | if text_1 != "": 32 | texts.append(text_1) 33 | if text_2 != "": 34 | texts.append(text_2) 35 | if text_3 != "": 36 | texts.append(text_3) 37 | if text_4 != "": 38 | texts.append(text_4) 39 | if len(texts) > 0: 40 | combined_text = ', '.join(texts) 41 | return (combined_text.encode('unicode-escape').decode('unicode-escape'),) 42 | else: 43 | return ('',) 44 | 45 | 46 | class LS_TextJoinV2: 47 | 48 | def __init__(self): 49 | pass 50 | 51 | @classmethod 52 | def INPUT_TYPES(cls): 53 | return { 54 | "required": { 55 | "text_1": ("STRING", {"default": "", "multiline": False,"forceInput":True}), 56 | "delimiter": ("STRING", {"default": ",", "multiline": False}), 57 | }, 58 | "optional": { 59 | "text_2": ("STRING", {"default": "", "multiline": False,"forceInput":True}), 60 | "text_3": ("STRING", {"default": "", "multiline": False,"forceInput":True}), 61 | "text_4": ("STRING", {"default": "", "multiline": False,"forceInput":True}), 62 | } 63 | } 64 | 65 | RETURN_TYPES = ("STRING",) 66 | RETURN_NAMES = ("text",) 67 | FUNCTION = "text_join" 68 | CATEGORY = '😺dzNodes/LayerUtility/Data' 69 | 70 | def text_join(self, text_1, delimiter, text_2="", text_3="", text_4=""): 71 | 72 | texts = [] 73 | if text_1 != "": 74 | texts.append(text_1) 75 | if text_2 != "": 76 | texts.append(text_2) 77 | if text_3 != "": 78 | texts.append(text_3) 79 | if text_4 != "": 80 | texts.append(text_4) 81 | if len(texts) > 0: 82 | combined_text = delimiter.join(texts) 83 | return (combined_text.encode('unicode-escape').decode('unicode-escape'),) 84 | else: 85 | return ('',) 86 | 87 | NODE_CLASS_MAPPINGS = { 88 | "LayerUtility: TextJoin": TextJoin, 89 | "LayerUtility: TextJoinV2": LS_TextJoinV2 90 | } 91 | 92 | NODE_DISPLAY_NAME_MAPPINGS = { 93 | "LayerUtility: TextJoin": "LayerUtility: TextJoin", 94 | "LayerUtility: TextJoinV2": "LayerUtility: TextJoinV2" 95 | } -------------------------------------------------------------------------------- /py/text_node.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import random 4 | from .imagefunc import AnyType, log, extract_all_numbers_from_str, extract_numbers, extract_substr_from_str 5 | from .imagefunc import tokenize_string, find_best_match_by_similarity, remove_empty_lines, remove_duplicate_string 6 | from .imagefunc import get_files, file_is_extension, is_contain_chinese 7 | 8 | any = AnyType("*") 9 | 10 | class LS_TextPreseter: 11 | def __init__(self): 12 | self.NODE_NAME = "TextPreseter" 13 | @classmethod 14 | def INPUT_TYPES(self): 15 | return { 16 | "required": 17 | { 18 | "title": ("STRING", {"default": "", "multiline": False}), 19 | "content": ("STRING", {"default": '', "multiline": True}), 20 | }, 21 | "optional": { 22 | "text_preset": ("LS_TEXT_PRESET", ), 23 | } 24 | } 25 | 26 | RETURN_TYPES = ("LS_TEXT_PRESET",) 27 | RETURN_NAMES = ("text_preset",) 28 | FUNCTION = 'text_preseter' 29 | CATEGORY = '😺dzNodes/LayerUtility/Data' 30 | 31 | def text_preseter(self, title, content, text_preset=None): 32 | 33 | if text_preset is None: 34 | text_preset = {} 35 | 36 | if title: 37 | text_preset[title] = content 38 | 39 | return (text_preset,) 40 | 41 | class LS_ChoiceTextPreset: 42 | def __init__(self): 43 | self.NODE_NAME = "ChoicePresetText" 44 | @classmethod 45 | def INPUT_TYPES(self): 46 | return { 47 | "required": 48 | { "text_preset": ("LS_TEXT_PRESET", ), 49 | "choice_title": ("STRING", {"default": '', "multiline": False}), 50 | "random_choice": ("BOOLEAN", {"default": False}), 51 | "default": ("INT", {"default": 0, "min": 0, "max": 1e4, "step": 1}), 52 | "seed": ("INT", {"default": 0, "min": 0, "max": 1e18, "step": 1}), 53 | }, 54 | "optional": { 55 | } 56 | } 57 | 58 | RETURN_TYPES = ("STRING", "STRING",) 59 | RETURN_NAMES = ("title", "content",) 60 | FUNCTION = 'choice_preset_text' 61 | CATEGORY = '😺dzNodes/LayerUtility/Data' 62 | 63 | def choice_preset_text(self, text_preset, choice_title, random_choice, default, seed): 64 | keys = list(text_preset.keys()) 65 | ret_key = keys[default] 66 | ret_value = '' 67 | 68 | if choice_title in text_preset and not random_choice: 69 | ret_key = choice_title 70 | elif random_choice: 71 | random.seed(seed) 72 | ret_key = random.choice(list(text_preset.keys())) 73 | 74 | ret_value = text_preset[ret_key] 75 | 76 | return (ret_key, ret_value) 77 | 78 | NODE_CLASS_MAPPINGS = { 79 | "LayerUtility: ChoiceTextPreset": LS_ChoiceTextPreset, 80 | "LayerUtility: TextPreseter": LS_TextPreseter, 81 | } 82 | 83 | NODE_DISPLAY_NAME_MAPPINGS = { 84 | "LayerUtility: ChoiceTextPreset": "LayerUtility: Choice Text Preset", 85 | "LayerUtility: TextPreseter": "LayerUtility: Text Preseter", 86 | } -------------------------------------------------------------------------------- /py/water_color.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from PIL import Image 3 | from .imagefunc import log, tensor2pil, pil2tensor, image_watercolor, chop_image 4 | 5 | 6 | 7 | class WaterColor: 8 | 9 | def __init__(self): 10 | self.NODE_NAME = 'WaterColor' 11 | 12 | @classmethod 13 | def INPUT_TYPES(self): 14 | 15 | return { 16 | "required": { 17 | "image": ("IMAGE", ), 18 | "line_density": ("INT", {"default": 50, "min": 1, "max": 100, "step": 1}), # 透明度 19 | "opacity": ("INT", {"default": 100, "min": 0, "max": 100, "step": 1}), # 透明度 20 | }, 21 | "optional": { 22 | } 23 | } 24 | 25 | RETURN_TYPES = ("IMAGE",) 26 | RETURN_NAMES = ("image",) 27 | FUNCTION = 'water_color' 28 | CATEGORY = '😺dzNodes/LayerFilter' 29 | 30 | def water_color(self, image, line_density, opacity 31 | ): 32 | 33 | ret_images = [] 34 | 35 | for i in image: 36 | i = torch.unsqueeze(i, 0) 37 | _canvas = tensor2pil(i).convert('RGB') 38 | _image = image_watercolor(_canvas, level=101-line_density) 39 | ret_image = chop_image(_canvas, _image, 'normal', opacity) 40 | 41 | ret_images.append(pil2tensor(ret_image)) 42 | 43 | log(f"{self.NODE_NAME} Processed {len(ret_images)} image(s).", message_type='finish') 44 | return (torch.cat(ret_images, dim=0),) 45 | 46 | NODE_CLASS_MAPPINGS = { 47 | "LayerFilter: WaterColor": WaterColor 48 | } 49 | 50 | NODE_DISPLAY_NAME_MAPPINGS = { 51 | "LayerFilter: WaterColor": "LayerFilter: WaterColor" 52 | } -------------------------------------------------------------------------------- /py/xy2percent.py: -------------------------------------------------------------------------------- 1 | 2 | from .imagefunc import tensor2pil 3 | 4 | class XYtoPercent: 5 | 6 | def __init__(self): 7 | pass 8 | 9 | @classmethod 10 | def INPUT_TYPES(self): 11 | 12 | return { 13 | "required": { 14 | "background_image": ("IMAGE", ), # 15 | "layer_image": ("IMAGE",), # 16 | "x": ("INT", {"default": 0, "min": -99999, "max": 99999, "step": 1}), 17 | "y": ("INT", {"default": 0, "min": -99999, "max": 99999, "step": 1}), 18 | }, 19 | "optional": { 20 | } 21 | } 22 | 23 | RETURN_TYPES = ("FLOAT", "FLOAT",) 24 | RETURN_NAMES = ("x_percent", "y_percent",) 25 | FUNCTION = 'xy_to_percent' 26 | CATEGORY = '😺dzNodes/LayerUtility/Data' 27 | 28 | def xy_to_percent(self, background_image, layer_image, x, y,): 29 | 30 | _canvas = tensor2pil(background_image).convert('RGB') 31 | _layer = tensor2pil(layer_image).convert('RGB') 32 | x_percent = (x + _layer.width / 2) / _canvas.width * 100.0 33 | y_percent = (y + _layer.height / 2) / _canvas.height * 100.0 34 | 35 | return (x_percent, y_percent,) 36 | 37 | NODE_CLASS_MAPPINGS = { 38 | "LayerUtility: XY to Percent": XYtoPercent 39 | } 40 | 41 | NODE_DISPLAY_NAME_MAPPINGS = { 42 | "LayerUtility: XY to Percent": "LayerUtility: XY to Percent" 43 | } -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "comfyui_layerstyle" 3 | description = "A set of nodes for ComfyUI it generate image like Adobe Photoshop's Layer Style. the Drop Shadow is first completed node, and follow-up work is in progress." 4 | version = "2.0.20" 5 | license = {text = "MIT License"} 6 | dependencies = ["numpy", "pillow", "torch", "matplotlib", "Scipy", "scikit_image", "scikit_learn", "opencv-contrib-python", "pymatting", "timm", "colour-science", "transformers", "blend_modes", "huggingface_hub", "loguru"] 7 | 8 | [project.urls] 9 | Repository = "https://github.com/chflame163/ComfyUI_LayerStyle" 10 | # Used by Comfy Registry https://comfyregistry.org 11 | 12 | [tool.comfy] 13 | PublisherId = "chflame163" 14 | DisplayName = "ComfyUI_LayerStyle" 15 | Icon = "" 16 | -------------------------------------------------------------------------------- /repair_dependency.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | set "requirements_txt=%~dp0\repair_dependency_list.txt" 4 | set "python_exec=..\..\..\python_embeded\python.exe" 5 | 6 | echo Fixing Dependency Package... 7 | 8 | echo Installing with ComfyUI Portable 9 | %python_exec% -s -m pip uninstall -y onnxruntime 10 | %python_exec% -s -m pip uninstall -y opencv-python opencv-contrib-python opencv-python-headless opencv-contrib-python-headless 11 | 12 | for /f "delims=" %%i in (%requirements_txt%) do ( 13 | %python_exec% -s -m pip install -i https://pypi.tuna.tsinghua.edu.cn/simple "%%i" 14 | ) 15 | 16 | pause -------------------------------------------------------------------------------- /repair_dependency_aki.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | set "requirements_txt=%~dp0\repair_dependency_list.txt" 4 | set "python_exec=..\..\python\python.exe" 5 | 6 | echo Fixing Dependency Package... 7 | 8 | echo Installing with ComfyUI Portable 9 | %python_exec% -s -m pip uninstall -y onnxruntime 10 | %python_exec% -s -m pip uninstall -y opencv-python opencv-contrib-python opencv-python-headless opencv-contrib-python-headless 11 | 12 | for /f "delims=" %%i in (%requirements_txt%) do ( 13 | %python_exec% -s -m pip install -i https://pypi.tuna.tsinghua.edu.cn/simple "%%i" 14 | ) 15 | 16 | pause -------------------------------------------------------------------------------- /repair_dependency_list.txt: -------------------------------------------------------------------------------- 1 | opencv-contrib-python>=4.9.0.80 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | pillow 3 | torch 4 | matplotlib 5 | Scipy 6 | scikit_image 7 | scikit_learn 8 | opencv-contrib-python 9 | pymatting 10 | timm 11 | colour-science 12 | transformers>=4.43.2 13 | blend_modes 14 | huggingface_hub>=0.23.4 15 | loguru 16 | -------------------------------------------------------------------------------- /resource_dir.ini.example: -------------------------------------------------------------------------------- 1 | FONT_dir=C:\font,D:\other_font 2 | LUT_dir=C:\lut,D:\other_lut -------------------------------------------------------------------------------- /workflow/1280x720_seven_person.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/1280x720_seven_person.jpg -------------------------------------------------------------------------------- /workflow/1280x720car.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/1280x720car.jpg -------------------------------------------------------------------------------- /workflow/1280x768_city.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/1280x768_city.png -------------------------------------------------------------------------------- /workflow/1344x768_beach.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/1344x768_beach.png -------------------------------------------------------------------------------- /workflow/1344x768_girl2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/1344x768_girl2.png -------------------------------------------------------------------------------- /workflow/1344x768_hair.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/1344x768_hair.png -------------------------------------------------------------------------------- /workflow/1344x768_redcar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/1344x768_redcar.png -------------------------------------------------------------------------------- /workflow/1920x1080table.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/1920x1080table.png -------------------------------------------------------------------------------- /workflow/3840x2160car.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/3840x2160car.jpg -------------------------------------------------------------------------------- /workflow/512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/512x512.png -------------------------------------------------------------------------------- /workflow/512x512bkgd.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/512x512bkgd.jpg -------------------------------------------------------------------------------- /workflow/768x1344_beach.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/768x1344_beach.png -------------------------------------------------------------------------------- /workflow/768x1344_dress.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/768x1344_dress.png -------------------------------------------------------------------------------- /workflow/color_of_shadow_and_highlight_example.json: -------------------------------------------------------------------------------- 1 | { 2 | "last_node_id": 25, 3 | "last_link_id": 45, 4 | "nodes": [ 5 | { 6 | "id": 15, 7 | "type": "PreviewImage", 8 | "pos": [ 9 | 1190, 10 | 158 11 | ], 12 | "size": { 13 | "0": 649.0001220703125, 14 | "1": 385.00006103515625 15 | }, 16 | "flags": {}, 17 | "order": 2, 18 | "mode": 0, 19 | "inputs": [ 20 | { 21 | "name": "images", 22 | "type": "IMAGE", 23 | "link": 42 24 | } 25 | ], 26 | "properties": { 27 | "Node name for S&R": "PreviewImage" 28 | } 29 | }, 30 | { 31 | "id": 12, 32 | "type": "LoadImage", 33 | "pos": [ 34 | 194, 35 | 149 36 | ], 37 | "size": { 38 | "0": 504.2838134765625, 39 | "1": 408.71527099609375 40 | }, 41 | "flags": {}, 42 | "order": 0, 43 | "mode": 0, 44 | "outputs": [ 45 | { 46 | "name": "IMAGE", 47 | "type": "IMAGE", 48 | "links": [ 49 | 41 50 | ], 51 | "shape": 3, 52 | "slot_index": 0 53 | }, 54 | { 55 | "name": "MASK", 56 | "type": "MASK", 57 | "links": null, 58 | "shape": 3 59 | } 60 | ], 61 | "properties": { 62 | "Node name for S&R": "LoadImage" 63 | }, 64 | "widgets_values": [ 65 | "image (6) (1).png", 66 | "image" 67 | ] 68 | }, 69 | { 70 | "id": 22, 71 | "type": "LayerColor: Color of Shadow & Highlight", 72 | "pos": [ 73 | 788, 74 | 233 75 | ], 76 | "size": { 77 | "0": 327.6000061035156, 78 | "1": 294 79 | }, 80 | "flags": {}, 81 | "order": 1, 82 | "mode": 0, 83 | "inputs": [ 84 | { 85 | "name": "image", 86 | "type": "IMAGE", 87 | "link": 41 88 | }, 89 | { 90 | "name": "mask", 91 | "type": "MASK", 92 | "link": null 93 | } 94 | ], 95 | "outputs": [ 96 | { 97 | "name": "image", 98 | "type": "IMAGE", 99 | "links": [ 100 | 42 101 | ], 102 | "shape": 3, 103 | "slot_index": 0 104 | } 105 | ], 106 | "properties": { 107 | "Node name for S&R": "LayerColor: Color of Shadow & Highlight" 108 | }, 109 | "widgets_values": [ 110 | 1, 111 | 1, 112 | 0, 113 | 0, 114 | 0.25, 115 | 0.88, 116 | 1.36, 117 | -125, 118 | 12, 119 | 0.28 120 | ] 121 | } 122 | ], 123 | "links": [ 124 | [ 125 | 41, 126 | 12, 127 | 0, 128 | 22, 129 | 0, 130 | "IMAGE" 131 | ], 132 | [ 133 | 42, 134 | 22, 135 | 0, 136 | 15, 137 | 0, 138 | "IMAGE" 139 | ] 140 | ], 141 | "groups": [], 142 | "config": {}, 143 | "extra": {}, 144 | "version": 0.4 145 | } -------------------------------------------------------------------------------- /workflow/fox_512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/fox_512x512.png -------------------------------------------------------------------------------- /workflow/girl_dino_1024.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chflame163/ComfyUI_LayerStyle/a46b1e6d26d45be9784c49f7065ba44700ef2b63/workflow/girl_dino_1024.png --------------------------------------------------------------------------------